1 /* Output routines for GCC for Renesas / SuperH SH.
2 Copyright (C) 1993-2021 Free Software Foundation, Inc.
3 Contributed by Steve Chamberlain (sac@cygnus.com).
4 Improved by Jim Wilson (wilson@cygnus.com).
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #define IN_TARGET_CODE 1
27 #define INCLUDE_VECTOR
29 #include "coretypes.h"
39 #include "stringpool.h"
44 #include "diagnostic-core.h"
46 #include "fold-const.h"
47 #include "stor-layout.h"
55 #include "insn-attr.h"
57 #include "langhooks.h"
60 #include "sched-int.h"
62 #include "tm-constrs.h"
64 #include "tree-pass.h"
71 /* This file should be included last. */
72 #include "target-def.h"
74 int code_for_indirect_jump_scratch
= CODE_FOR_indirect_jump_scratch
;
76 #define CONST_OK_FOR_ADD(size) CONST_OK_FOR_I08 (size)
77 #define GEN_MOV (*(gen_movsi))
78 #define GEN_ADD3 (*(gen_addsi3))
79 #define GEN_SUB3 (*(gen_subsi3))
81 /* Used to simplify the logic below. Find the attributes wherever
83 #define SH_ATTRIBUTES(decl) \
84 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
85 : DECL_ATTRIBUTES (decl) \
86 ? (DECL_ATTRIBUTES (decl)) \
87 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
89 /* Set to true by expand_prologue() when the function is an
91 bool current_function_interrupt
;
93 tree sh_deferred_function_attributes
;
94 tree
*sh_deferred_function_attributes_tail
= &sh_deferred_function_attributes
;
96 /* Global variables for machine-dependent things. */
98 /* Which cpu are we scheduling for. */
99 enum processor_type sh_cpu
;
101 /* Definitions used in ready queue reordering for first scheduling pass. */
103 /* Reg weights arrays for modes SFmode and SImode, indexed by insn LUID. */
104 static short *regmode_weight
[2];
106 /* Total SFmode and SImode weights of scheduled insns. */
107 static int curr_regmode_pressure
[2];
109 /* Number of r0 life regions. */
110 static int r0_life_regions
;
112 /* If true, skip cycles for Q -> R movement. */
113 static int skip_cycles
= 0;
115 /* Cached value of can_issue_more. This is cached in sh_variable_issue hook
116 and returned from sh_reorder2. */
117 static short cached_can_issue_more
;
119 /* Unique number for UNSPEC_BBR pattern. */
120 static unsigned int unspec_bbr_uid
= 1;
122 /* Provides the class number of the smallest class containing
124 enum reg_class regno_reg_class
[FIRST_PSEUDO_REGISTER
] =
126 R0_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
127 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
128 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
129 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
130 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
131 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
132 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
133 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
134 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
135 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
136 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
137 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
138 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
139 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
140 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
141 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
142 FP0_REGS
,FP_REGS
, FP_REGS
, FP_REGS
,
143 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
144 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
145 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
146 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
147 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
148 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
149 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
150 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
151 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
152 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
153 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
154 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
155 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
156 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
157 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
158 TARGET_REGS
, TARGET_REGS
, TARGET_REGS
, TARGET_REGS
,
159 TARGET_REGS
, TARGET_REGS
, TARGET_REGS
, TARGET_REGS
,
160 DF_REGS
, DF_REGS
, DF_REGS
, DF_REGS
,
161 DF_REGS
, DF_REGS
, DF_REGS
, DF_REGS
,
162 NO_REGS
, GENERAL_REGS
, PR_REGS
, T_REGS
,
163 MAC_REGS
, MAC_REGS
, FPUL_REGS
, FPSCR_REGS
,
164 GENERAL_REGS
, GENERAL_REGS
,
167 char sh_register_names
[FIRST_PSEUDO_REGISTER
] \
168 [MAX_REGISTER_NAME_LENGTH
+ 1] = SH_REGISTER_NAMES_INITIALIZER
;
170 char sh_additional_register_names
[ADDREGNAMES_SIZE
] \
171 [MAX_ADDITIONAL_REGISTER_NAME_LENGTH
+ 1]
172 = SH_ADDITIONAL_REGISTER_NAMES_INITIALIZER
;
174 int assembler_dialect
;
176 static void split_branches (rtx_insn
*);
177 static int branch_dest (rtx
);
178 static void print_slot (rtx_sequence
*);
179 static rtx_code_label
*add_constant (rtx
, machine_mode
, rtx
);
180 static void dump_table (rtx_insn
*, rtx_insn
*);
181 static bool broken_move (rtx_insn
*);
182 static bool mova_p (rtx_insn
*);
183 static rtx_insn
*find_barrier (int, rtx_insn
*, rtx_insn
*);
184 static bool noncall_uses_reg (rtx
, rtx_insn
*, rtx
*);
185 static rtx_insn
*gen_block_redirect (rtx_insn
*, int, int);
186 static void sh_reorg (void);
187 static void sh_option_override (void);
188 static void sh_override_options_after_change (void);
189 static void output_stack_adjust (int, rtx
, int, HARD_REG_SET
*, bool);
190 static rtx_insn
* emit_frame_insn (rtx
);
191 static rtx
push (int);
192 static void pop (int);
193 static void push_regs (HARD_REG_SET
* mask
, bool interrupt_handler
);
194 static int calc_live_regs (HARD_REG_SET
*);
195 static HOST_WIDE_INT
rounded_frame_size (int);
196 static bool sh_frame_pointer_required (void);
197 static void sh_emit_mode_set (int, int, int, HARD_REG_SET
);
198 static int sh_mode_needed (int, rtx_insn
*);
199 static int sh_mode_after (int, int, rtx_insn
*);
200 static int sh_mode_entry (int);
201 static int sh_mode_exit (int);
202 static int sh_mode_priority (int entity
, int n
);
204 static rtx
mark_constant_pool_use (rtx
);
205 static tree
sh_handle_interrupt_handler_attribute (tree
*, tree
, tree
,
207 static tree
sh_handle_resbank_handler_attribute (tree
*, tree
,
209 static tree
sh2a_handle_function_vector_handler_attribute (tree
*, tree
,
211 static tree
sh_handle_sp_switch_attribute (tree
*, tree
, tree
, int, bool *);
212 static tree
sh_handle_trap_exit_attribute (tree
*, tree
, tree
, int, bool *);
213 static tree
sh_handle_renesas_attribute (tree
*, tree
, tree
, int, bool *);
214 static void sh_print_operand (FILE *, rtx
, int);
215 static void sh_print_operand_address (FILE *, machine_mode
, rtx
);
216 static bool sh_print_operand_punct_valid_p (unsigned char code
);
217 static bool sh_asm_output_addr_const_extra (FILE *file
, rtx x
);
218 static void sh_output_function_epilogue (FILE *);
219 static void sh_insert_attributes (tree
, tree
*);
220 static const char *sh_check_pch_target_flags (int);
221 static int sh_register_move_cost (machine_mode
, reg_class_t
, reg_class_t
);
222 static int sh_adjust_cost (rtx_insn
*, int, rtx_insn
*, int, unsigned int);
223 static int sh_issue_rate (void);
224 static int sh_dfa_new_cycle (FILE *, int, rtx_insn
*, int, int, int *sort_p
);
225 static short find_set_regmode_weight (rtx
, machine_mode
);
226 static short find_insn_regmode_weight (rtx
, machine_mode
);
227 static void find_regmode_weight (basic_block
, machine_mode
);
228 static int find_r0_life_regions (basic_block
);
229 static void sh_md_init_global (FILE *, int, int);
230 static void sh_md_finish_global (FILE *, int);
231 static int rank_for_reorder (const void *, const void *);
232 static void swap_reorder (rtx_insn
**, int);
233 static void ready_reorder (rtx_insn
**, int);
234 static bool high_pressure (machine_mode
);
235 static int sh_reorder (FILE *, int, rtx_insn
**, int *, int);
236 static int sh_reorder2 (FILE *, int, rtx_insn
**, int *, int);
237 static void sh_md_init (FILE *, int, int);
238 static int sh_variable_issue (FILE *, int, rtx_insn
*, int);
240 static bool sh_function_ok_for_sibcall (tree
, tree
);
242 static bool sh_can_follow_jump (const rtx_insn
*, const rtx_insn
*);
243 static bool sh_ms_bitfield_layout_p (const_tree
);
245 static void sh_init_builtins (void);
246 static tree
sh_builtin_decl (unsigned, bool);
247 static rtx
sh_expand_builtin (tree
, rtx
, rtx
, machine_mode
, int);
248 static void sh_output_mi_thunk (FILE *, tree
, HOST_WIDE_INT
,
249 HOST_WIDE_INT
, tree
);
250 static void sh_file_start (void);
251 static bool sh_assemble_integer (rtx
, unsigned int, int);
252 static bool flow_dependent_p (rtx_insn
*, rtx_insn
*);
253 static void flow_dependent_p_1 (rtx
, const_rtx
, void *);
254 static int shiftcosts (rtx
);
255 static int and_xor_ior_costs (rtx
, int);
256 static int addsubcosts (rtx
);
257 static int multcosts (rtx
);
258 static bool unspec_caller_rtx_p (rtx
);
259 static bool sh_cannot_copy_insn_p (rtx_insn
*);
260 static bool sh_cannot_force_const_mem_p (machine_mode
, rtx
);
261 static bool sh_rtx_costs (rtx
, machine_mode
, int, int, int *, bool);
262 static int sh_address_cost (rtx
, machine_mode
, addr_space_t
, bool);
263 static int sh_pr_n_sets (void);
264 static rtx
sh_allocate_initial_value (rtx
);
265 static reg_class_t
sh_preferred_reload_class (rtx
, reg_class_t
);
266 static reg_class_t
sh_secondary_reload (bool, rtx
, reg_class_t
,
268 struct secondary_reload_info
*);
269 static bool sh_legitimate_address_p (machine_mode
, rtx
, bool);
270 static rtx
sh_legitimize_address (rtx
, rtx
, machine_mode
);
271 static rtx
sh_delegitimize_address (rtx
);
272 static bool sh_cannot_substitute_mem_equiv_p (rtx
);
273 static bool sh_legitimize_address_displacement (rtx
*, rtx
*,
274 poly_int64
, machine_mode
);
275 static int scavenge_reg (HARD_REG_SET
*s
);
277 static rtx
sh_struct_value_rtx (tree
, int);
278 static rtx
sh_function_value (const_tree
, const_tree
, bool);
279 static bool sh_function_value_regno_p (const unsigned int);
280 static rtx
sh_libcall_value (machine_mode
, const_rtx
);
281 static bool sh_return_in_memory (const_tree
, const_tree
);
282 static rtx
sh_builtin_saveregs (void);
283 static void sh_setup_incoming_varargs (cumulative_args_t
,
284 const function_arg_info
&, int *, int);
285 static bool sh_strict_argument_naming (cumulative_args_t
);
286 static bool sh_pretend_outgoing_varargs_named (cumulative_args_t
);
287 static void sh_atomic_assign_expand_fenv (tree
*, tree
*, tree
*);
288 static tree
sh_build_builtin_va_list (void);
289 static void sh_va_start (tree
, rtx
);
290 static tree
sh_gimplify_va_arg_expr (tree
, tree
, gimple_seq
*, gimple_seq
*);
291 static bool sh_promote_prototypes (const_tree
);
292 static machine_mode
sh_promote_function_mode (const_tree type
,
297 static bool sh_pass_by_reference (cumulative_args_t
,
298 const function_arg_info
&);
299 static bool sh_callee_copies (cumulative_args_t
, const function_arg_info
&);
300 static int sh_arg_partial_bytes (cumulative_args_t
, const function_arg_info
&);
301 static void sh_function_arg_advance (cumulative_args_t
,
302 const function_arg_info
&);
303 static rtx
sh_function_arg (cumulative_args_t
, const function_arg_info
&);
304 static int sh_dwarf_calling_convention (const_tree
);
305 static void sh_encode_section_info (tree
, rtx
, int);
306 static bool sh2a_function_vector_p (tree
);
307 static void sh_trampoline_init (rtx
, tree
, rtx
);
308 static rtx
sh_trampoline_adjust_address (rtx
);
309 static void sh_conditional_register_usage (void);
310 static bool sh_legitimate_constant_p (machine_mode
, rtx
);
311 static int mov_insn_size (machine_mode
, bool);
312 static int mov_insn_alignment_mask (machine_mode
, bool);
313 static bool sh_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT
,
315 enum by_pieces_operation
,
317 static bool sequence_insn_p (rtx_insn
*);
318 static void sh_canonicalize_comparison (int *, rtx
*, rtx
*, bool);
319 static void sh_canonicalize_comparison (enum rtx_code
&, rtx
&, rtx
&,
321 static bool sh_legitimate_combined_insn (rtx_insn
* insn
);
323 static bool sh_fixed_condition_code_regs (unsigned int* p1
, unsigned int* p2
);
325 static void sh_init_sync_libfuncs (void) ATTRIBUTE_UNUSED
;
326 static unsigned int sh_hard_regno_nregs (unsigned int, machine_mode
);
327 static bool sh_hard_regno_mode_ok (unsigned int, machine_mode
);
328 static bool sh_modes_tieable_p (machine_mode
, machine_mode
);
329 static bool sh_can_change_mode_class (machine_mode
, machine_mode
, reg_class_t
);
331 static const struct attribute_spec sh_attribute_table
[] =
333 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
334 affects_type_identity, handler, exclude } */
335 { "interrupt_handler", 0, 0, true, false, false, false,
336 sh_handle_interrupt_handler_attribute
, NULL
},
337 { "sp_switch", 1, 1, true, false, false, false,
338 sh_handle_sp_switch_attribute
, NULL
},
339 { "trap_exit", 1, 1, true, false, false, false,
340 sh_handle_trap_exit_attribute
, NULL
},
341 { "renesas", 0, 0, false, true, false, false,
342 sh_handle_renesas_attribute
, NULL
},
343 { "trapa_handler", 0, 0, true, false, false, false,
344 sh_handle_interrupt_handler_attribute
, NULL
},
345 { "nosave_low_regs", 0, 0, true, false, false, false,
346 sh_handle_interrupt_handler_attribute
, NULL
},
347 { "resbank", 0, 0, true, false, false, false,
348 sh_handle_resbank_handler_attribute
, NULL
},
349 { "function_vector", 1, 1, true, false, false, false,
350 sh2a_handle_function_vector_handler_attribute
, NULL
},
351 { NULL
, 0, 0, false, false, false, false, NULL
, NULL
}
354 /* Initialize the GCC target structure. */
355 #undef TARGET_ATTRIBUTE_TABLE
356 #define TARGET_ATTRIBUTE_TABLE sh_attribute_table
358 /* The next two are used for debug info when compiling with -gdwarf. */
359 #undef TARGET_ASM_UNALIGNED_HI_OP
360 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uaword\t"
361 #undef TARGET_ASM_UNALIGNED_SI_OP
362 #define TARGET_ASM_UNALIGNED_SI_OP "\t.ualong\t"
364 #undef TARGET_OPTION_OVERRIDE
365 #define TARGET_OPTION_OVERRIDE sh_option_override
367 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
368 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE \
369 sh_override_options_after_change
371 #undef TARGET_PRINT_OPERAND
372 #define TARGET_PRINT_OPERAND sh_print_operand
373 #undef TARGET_PRINT_OPERAND_ADDRESS
374 #define TARGET_PRINT_OPERAND_ADDRESS sh_print_operand_address
375 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
376 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P sh_print_operand_punct_valid_p
377 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
378 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA sh_asm_output_addr_const_extra
380 #undef TARGET_ASM_FUNCTION_EPILOGUE
381 #define TARGET_ASM_FUNCTION_EPILOGUE sh_output_function_epilogue
383 #undef TARGET_ASM_OUTPUT_MI_THUNK
384 #define TARGET_ASM_OUTPUT_MI_THUNK sh_output_mi_thunk
386 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
387 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK \
388 hook_bool_const_tree_hwi_hwi_const_tree_true
390 #undef TARGET_ASM_FILE_START
391 #define TARGET_ASM_FILE_START sh_file_start
392 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
393 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
395 #undef TARGET_ASM_INTEGER
396 #define TARGET_ASM_INTEGER sh_assemble_integer
398 #undef TARGET_REGISTER_MOVE_COST
399 #define TARGET_REGISTER_MOVE_COST sh_register_move_cost
401 #undef TARGET_INSERT_ATTRIBUTES
402 #define TARGET_INSERT_ATTRIBUTES sh_insert_attributes
404 #undef TARGET_SCHED_ADJUST_COST
405 #define TARGET_SCHED_ADJUST_COST sh_adjust_cost
407 #undef TARGET_SCHED_ISSUE_RATE
408 #define TARGET_SCHED_ISSUE_RATE sh_issue_rate
410 /* The next 5 hooks have been implemented for reenabling sched1. With the
411 help of these macros we are limiting the movement of insns in sched1 to
412 reduce the register pressure. The overall idea is to keep count of SImode
413 and SFmode regs required by already scheduled insns. When these counts
414 cross some threshold values; give priority to insns that free registers.
415 The insn that frees registers is most likely to be the insn with lowest
416 LUID (original insn order); but such an insn might be there in the stalled
417 queue (Q) instead of the ready queue (R). To solve this, we skip cycles
418 up to a max of 8 cycles so that such insns may move from Q -> R.
420 The description of the hooks are as below:
422 TARGET_SCHED_INIT_GLOBAL: Added a new target hook in the generic
423 scheduler; it is called inside the sched_init function just after
424 find_insn_reg_weights function call. It is used to calculate the SImode
425 and SFmode weights of insns of basic blocks; much similar to what
426 find_insn_reg_weights does.
427 TARGET_SCHED_FINISH_GLOBAL: Corresponding cleanup hook.
429 TARGET_SCHED_DFA_NEW_CYCLE: Skip cycles if high register pressure is
430 indicated by TARGET_SCHED_REORDER2; doing this may move insns from
433 TARGET_SCHED_REORDER: If the register pressure for SImode or SFmode is
434 high; reorder the ready queue so that the insn with lowest LUID will be
437 TARGET_SCHED_REORDER2: If the register pressure is high, indicate to
438 TARGET_SCHED_DFA_NEW_CYCLE to skip cycles.
440 TARGET_SCHED_VARIABLE_ISSUE: Cache the value of can_issue_more so that it
441 can be returned from TARGET_SCHED_REORDER2.
443 TARGET_SCHED_INIT: Reset the register pressure counting variables. */
445 #undef TARGET_SCHED_DFA_NEW_CYCLE
446 #define TARGET_SCHED_DFA_NEW_CYCLE sh_dfa_new_cycle
448 #undef TARGET_SCHED_INIT_GLOBAL
449 #define TARGET_SCHED_INIT_GLOBAL sh_md_init_global
451 #undef TARGET_SCHED_FINISH_GLOBAL
452 #define TARGET_SCHED_FINISH_GLOBAL sh_md_finish_global
454 #undef TARGET_SCHED_VARIABLE_ISSUE
455 #define TARGET_SCHED_VARIABLE_ISSUE sh_variable_issue
457 #undef TARGET_SCHED_REORDER
458 #define TARGET_SCHED_REORDER sh_reorder
460 #undef TARGET_SCHED_REORDER2
461 #define TARGET_SCHED_REORDER2 sh_reorder2
463 #undef TARGET_SCHED_INIT
464 #define TARGET_SCHED_INIT sh_md_init
466 #undef TARGET_DELEGITIMIZE_ADDRESS
467 #define TARGET_DELEGITIMIZE_ADDRESS sh_delegitimize_address
469 #undef TARGET_LEGITIMIZE_ADDRESS
470 #define TARGET_LEGITIMIZE_ADDRESS sh_legitimize_address
472 #undef TARGET_CAN_FOLLOW_JUMP
473 #define TARGET_CAN_FOLLOW_JUMP sh_can_follow_jump
475 #undef TARGET_MS_BITFIELD_LAYOUT_P
476 #define TARGET_MS_BITFIELD_LAYOUT_P sh_ms_bitfield_layout_p
478 #undef TARGET_INIT_BUILTINS
479 #define TARGET_INIT_BUILTINS sh_init_builtins
480 #undef TARGET_BUILTIN_DECL
481 #define TARGET_BUILTIN_DECL sh_builtin_decl
482 #undef TARGET_EXPAND_BUILTIN
483 #define TARGET_EXPAND_BUILTIN sh_expand_builtin
485 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
486 #define TARGET_FUNCTION_OK_FOR_SIBCALL sh_function_ok_for_sibcall
488 #undef TARGET_CANNOT_COPY_INSN_P
489 #define TARGET_CANNOT_COPY_INSN_P sh_cannot_copy_insn_p
490 #undef TARGET_RTX_COSTS
491 #define TARGET_RTX_COSTS sh_rtx_costs
492 #undef TARGET_ADDRESS_COST
493 #define TARGET_ADDRESS_COST sh_address_cost
494 #undef TARGET_ALLOCATE_INITIAL_VALUE
495 #define TARGET_ALLOCATE_INITIAL_VALUE sh_allocate_initial_value
497 #undef TARGET_MACHINE_DEPENDENT_REORG
498 #define TARGET_MACHINE_DEPENDENT_REORG sh_reorg
500 #undef TARGET_DWARF_REGISTER_SPAN
501 #define TARGET_DWARF_REGISTER_SPAN sh_dwarf_register_span
504 #undef TARGET_HAVE_TLS
505 #define TARGET_HAVE_TLS true
508 #undef TARGET_PROMOTE_PROTOTYPES
509 #define TARGET_PROMOTE_PROTOTYPES sh_promote_prototypes
510 #undef TARGET_PROMOTE_FUNCTION_MODE
511 #define TARGET_PROMOTE_FUNCTION_MODE sh_promote_function_mode
513 #undef TARGET_FUNCTION_VALUE
514 #define TARGET_FUNCTION_VALUE sh_function_value
515 #undef TARGET_FUNCTION_VALUE_REGNO_P
516 #define TARGET_FUNCTION_VALUE_REGNO_P sh_function_value_regno_p
517 #undef TARGET_LIBCALL_VALUE
518 #define TARGET_LIBCALL_VALUE sh_libcall_value
519 #undef TARGET_STRUCT_VALUE_RTX
520 #define TARGET_STRUCT_VALUE_RTX sh_struct_value_rtx
521 #undef TARGET_RETURN_IN_MEMORY
522 #define TARGET_RETURN_IN_MEMORY sh_return_in_memory
524 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
525 #define TARGET_EXPAND_BUILTIN_SAVEREGS sh_builtin_saveregs
526 #undef TARGET_SETUP_INCOMING_VARARGS
527 #define TARGET_SETUP_INCOMING_VARARGS sh_setup_incoming_varargs
528 #undef TARGET_STRICT_ARGUMENT_NAMING
529 #define TARGET_STRICT_ARGUMENT_NAMING sh_strict_argument_naming
530 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
531 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED sh_pretend_outgoing_varargs_named
532 #undef TARGET_MUST_PASS_IN_STACK
533 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
534 #undef TARGET_PASS_BY_REFERENCE
535 #define TARGET_PASS_BY_REFERENCE sh_pass_by_reference
536 #undef TARGET_CALLEE_COPIES
537 #define TARGET_CALLEE_COPIES sh_callee_copies
538 #undef TARGET_ARG_PARTIAL_BYTES
539 #define TARGET_ARG_PARTIAL_BYTES sh_arg_partial_bytes
540 #undef TARGET_FUNCTION_ARG
541 #define TARGET_FUNCTION_ARG sh_function_arg
542 #undef TARGET_FUNCTION_ARG_ADVANCE
543 #define TARGET_FUNCTION_ARG_ADVANCE sh_function_arg_advance
545 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
546 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV sh_atomic_assign_expand_fenv
548 #undef TARGET_BUILD_BUILTIN_VA_LIST
549 #define TARGET_BUILD_BUILTIN_VA_LIST sh_build_builtin_va_list
550 #undef TARGET_EXPAND_BUILTIN_VA_START
551 #define TARGET_EXPAND_BUILTIN_VA_START sh_va_start
552 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
553 #define TARGET_GIMPLIFY_VA_ARG_EXPR sh_gimplify_va_arg_expr
555 #undef TARGET_VECTOR_MODE_SUPPORTED_P
556 #define TARGET_VECTOR_MODE_SUPPORTED_P sh_vector_mode_supported_p
558 #undef TARGET_CHECK_PCH_TARGET_FLAGS
559 #define TARGET_CHECK_PCH_TARGET_FLAGS sh_check_pch_target_flags
561 #undef TARGET_DWARF_CALLING_CONVENTION
562 #define TARGET_DWARF_CALLING_CONVENTION sh_dwarf_calling_convention
564 #undef TARGET_FRAME_POINTER_REQUIRED
565 #define TARGET_FRAME_POINTER_REQUIRED sh_frame_pointer_required
567 #undef TARGET_MODE_EMIT
568 #define TARGET_MODE_EMIT sh_emit_mode_set
570 #undef TARGET_MODE_NEEDED
571 #define TARGET_MODE_NEEDED sh_mode_needed
573 #undef TARGET_MODE_AFTER
574 #define TARGET_MODE_AFTER sh_mode_after
576 #undef TARGET_MODE_ENTRY
577 #define TARGET_MODE_ENTRY sh_mode_entry
579 #undef TARGET_MODE_EXIT
580 #define TARGET_MODE_EXIT sh_mode_exit
582 #undef TARGET_MODE_PRIORITY
583 #define TARGET_MODE_PRIORITY sh_mode_priority
585 /* Return regmode weight for insn. */
586 #define INSN_REGMODE_WEIGHT(INSN, MODE)\
587 regmode_weight[((MODE) == SImode) ? 0 : 1][INSN_UID (INSN)]
589 /* Return current register pressure for regmode. */
590 #define CURR_REGMODE_PRESSURE(MODE)\
591 curr_regmode_pressure[((MODE) == SImode) ? 0 : 1]
593 #undef TARGET_ENCODE_SECTION_INFO
594 #define TARGET_ENCODE_SECTION_INFO sh_encode_section_info
597 #define TARGET_LRA_P sh_lra_p
599 #undef TARGET_SECONDARY_RELOAD
600 #define TARGET_SECONDARY_RELOAD sh_secondary_reload
602 #undef TARGET_PREFERRED_RELOAD_CLASS
603 #define TARGET_PREFERRED_RELOAD_CLASS sh_preferred_reload_class
605 #undef TARGET_CONDITIONAL_REGISTER_USAGE
606 #define TARGET_CONDITIONAL_REGISTER_USAGE sh_conditional_register_usage
608 #undef TARGET_LEGITIMATE_ADDRESS_P
609 #define TARGET_LEGITIMATE_ADDRESS_P sh_legitimate_address_p
611 #undef TARGET_CANNOT_SUBSTITUTE_MEM_EQUIV_P
612 #define TARGET_CANNOT_SUBSTITUTE_MEM_EQUIV_P sh_cannot_substitute_mem_equiv_p
614 #undef TARGET_LEGITIMIZE_ADDRESS_DISPLACEMENT
615 #define TARGET_LEGITIMIZE_ADDRESS_DISPLACEMENT \
616 sh_legitimize_address_displacement
618 #undef TARGET_TRAMPOLINE_INIT
619 #define TARGET_TRAMPOLINE_INIT sh_trampoline_init
620 #undef TARGET_TRAMPOLINE_ADJUST_ADDRESS
621 #define TARGET_TRAMPOLINE_ADJUST_ADDRESS sh_trampoline_adjust_address
623 #undef TARGET_LEGITIMATE_CONSTANT_P
624 #define TARGET_LEGITIMATE_CONSTANT_P sh_legitimate_constant_p
626 #undef TARGET_CANONICALIZE_COMPARISON
627 #define TARGET_CANONICALIZE_COMPARISON sh_canonicalize_comparison
629 #undef TARGET_LEGITIMATE_COMBINED_INSN
630 #define TARGET_LEGITIMATE_COMBINED_INSN sh_legitimate_combined_insn
632 #undef TARGET_FIXED_CONDITION_CODE_REGS
633 #define TARGET_FIXED_CONDITION_CODE_REGS sh_fixed_condition_code_regs
635 #undef TARGET_USE_BY_PIECES_INFRASTRUCTURE_P
636 #define TARGET_USE_BY_PIECES_INFRASTRUCTURE_P \
637 sh_use_by_pieces_infrastructure_p
639 /* Machine-specific symbol_ref flags. */
640 #define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
642 /* The tas.b instruction sets the 7th bit in the byte, i.e. 0x80. This value
643 is used by optabs.c atomic op expansion code as well as in sync.md. */
644 #undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
645 #define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 0x80
647 #undef TARGET_CANNOT_FORCE_CONST_MEM
648 #define TARGET_CANNOT_FORCE_CONST_MEM sh_cannot_force_const_mem_p
650 #undef TARGET_HARD_REGNO_NREGS
651 #define TARGET_HARD_REGNO_NREGS sh_hard_regno_nregs
652 #undef TARGET_HARD_REGNO_MODE_OK
653 #define TARGET_HARD_REGNO_MODE_OK sh_hard_regno_mode_ok
655 #undef TARGET_MODES_TIEABLE_P
656 #define TARGET_MODES_TIEABLE_P sh_modes_tieable_p
658 #undef TARGET_CAN_CHANGE_MODE_CLASS
659 #define TARGET_CAN_CHANGE_MODE_CLASS sh_can_change_mode_class
661 #undef TARGET_CONSTANT_ALIGNMENT
662 #define TARGET_CONSTANT_ALIGNMENT constant_alignment_word_strings
664 #undef TARGET_HAVE_SPECULATION_SAFE_VALUE
665 #define TARGET_HAVE_SPECULATION_SAFE_VALUE speculation_safe_value_not_needed
667 struct gcc_target targetm
= TARGET_INITIALIZER
;
670 /* Information on the currently selected atomic model.
671 This is initialized in sh_option_override. */
672 static sh_atomic_model selected_atomic_model_
;
674 const sh_atomic_model
&
675 selected_atomic_model (void)
677 return selected_atomic_model_
;
680 static sh_atomic_model
681 parse_validate_atomic_model_option (const char* str
)
683 const char* model_names
[sh_atomic_model::num_models
];
684 model_names
[sh_atomic_model::none
] = "none";
685 model_names
[sh_atomic_model::soft_gusa
] = "soft-gusa";
686 model_names
[sh_atomic_model::hard_llcs
] = "hard-llcs";
687 model_names
[sh_atomic_model::soft_tcb
] = "soft-tcb";
688 model_names
[sh_atomic_model::soft_imask
] = "soft-imask";
690 const char* model_cdef_names
[sh_atomic_model::num_models
];
691 model_cdef_names
[sh_atomic_model::none
] = "NONE";
692 model_cdef_names
[sh_atomic_model::soft_gusa
] = "SOFT_GUSA";
693 model_cdef_names
[sh_atomic_model::hard_llcs
] = "HARD_LLCS";
694 model_cdef_names
[sh_atomic_model::soft_tcb
] = "SOFT_TCB";
695 model_cdef_names
[sh_atomic_model::soft_imask
] = "SOFT_IMASK";
698 ret
.type
= sh_atomic_model::none
;
699 ret
.name
= model_names
[sh_atomic_model::none
];
700 ret
.cdef_name
= model_cdef_names
[sh_atomic_model::none
];
702 ret
.tcb_gbr_offset
= -1;
704 /* Handle empty string as 'none'. */
705 if (str
== NULL
|| *str
== '\0')
708 #define err_ret(...) do { error (__VA_ARGS__); return ret; } while (0)
710 std::vector
<std::string
> tokens
;
711 for (std::stringstream
ss (str
); ss
.good (); )
713 tokens
.push_back (std::string ());
714 std::getline (ss
, tokens
.back (), ',');
718 err_ret ("invalid atomic model option");
720 /* The first token must be the atomic model name. */
722 for (size_t i
= 0; i
< sh_atomic_model::num_models
; ++i
)
723 if (tokens
.front () == model_names
[i
])
725 ret
.type
= (sh_atomic_model::enum_type
)i
;
726 ret
.name
= model_names
[i
];
727 ret
.cdef_name
= model_cdef_names
[i
];
731 err_ret ("invalid atomic model name \"%s\"", tokens
.front ().c_str ());
735 /* Go through the remaining tokens. */
736 for (size_t i
= 1; i
< tokens
.size (); ++i
)
738 if (tokens
[i
] == "strict")
740 else if (!tokens
[i
].compare (0, strlen ("gbr-offset="), "gbr-offset="))
742 std::string offset_str
= tokens
[i
].substr (strlen ("gbr-offset="));
743 ret
.tcb_gbr_offset
= integral_argument (offset_str
.c_str ());
744 if (offset_str
.empty () || ret
.tcb_gbr_offset
== -1)
745 err_ret ("could not parse gbr-offset value \"%s\" in atomic model "
746 "option", offset_str
.c_str ());
749 err_ret ("unknown parameter \"%s\" in atomic model option",
753 /* Check that the selection makes sense. */
754 if (ret
.type
== sh_atomic_model::soft_gusa
&& !TARGET_SH3
)
755 err_ret ("atomic model %s is only available on SH3 and SH4 targets",
758 if (ret
.type
== sh_atomic_model::hard_llcs
&& !TARGET_SH4A
)
759 err_ret ("atomic model %s is only available on SH4A targets", ret
.name
);
761 if (ret
.type
== sh_atomic_model::soft_tcb
&& ret
.tcb_gbr_offset
== -1)
762 err_ret ("atomic model %s requires gbr-offset parameter", ret
.name
);
764 if (ret
.type
== sh_atomic_model::soft_tcb
765 && (ret
.tcb_gbr_offset
< 0 || ret
.tcb_gbr_offset
> 1020
766 || (ret
.tcb_gbr_offset
& 3) != 0))
767 err_ret ("invalid gbr-offset value \"%d\" for atomic model %s; it must be "
768 "a multiple of 4 in the range 0-1020", ret
.tcb_gbr_offset
,
771 if (ret
.type
== sh_atomic_model::soft_imask
&& TARGET_USERMODE
)
772 err_ret ("cannot use atomic model %s in user mode", ret
.name
);
779 /* Register SH specific RTL passes. */
780 extern opt_pass
* make_pass_sh_treg_combine (gcc::context
* ctx
, bool split_insns
,
782 extern opt_pass
* make_pass_sh_optimize_sett_clrt (gcc::context
* ctx
,
785 register_sh_passes (void)
787 /* Running the sh_treg_combine pass after ce1 generates better code when
788 comparisons are combined and reg-reg moves are introduced, because
789 reg-reg moves will be eliminated afterwards. However, there are quite
790 some cases where combine will be unable to fold comparison related insns,
791 thus for now don't do it.
792 register_pass (make_pass_sh_treg_combine (g, false, "sh_treg_combine1"),
793 PASS_POS_INSERT_AFTER, "ce1", 1);
796 /* Run sh_treg_combine pass after combine but before register allocation. */
797 register_pass (make_pass_sh_treg_combine (g
, true, "sh_treg_combine2"),
798 PASS_POS_INSERT_AFTER
, "split1", 1);
800 /* Run sh_treg_combine pass after register allocation and basic block
801 reordering as this sometimes creates new opportunities. */
802 register_pass (make_pass_sh_treg_combine (g
, true, "sh_treg_combine3"),
803 PASS_POS_INSERT_AFTER
, "split3", 1);
805 /* Optimize sett and clrt insns, by e.g. removing them if the T bit value
806 is known after a conditional branch.
807 This must be done after basic blocks and branch conditions have
808 stabilized and won't be changed by further passes. */
809 register_pass (make_pass_sh_optimize_sett_clrt (g
, "sh_optimize_sett_clrt"),
810 PASS_POS_INSERT_BEFORE
, "sched2", 1);
813 /* Implement TARGET_OPTION_OVERRIDE macro. Validate and override
814 various options, and do some machine dependent initialization. */
816 sh_option_override (void)
820 SUBTARGET_OVERRIDE_OPTIONS
;
822 sh_cpu
= PROCESSOR_SH1
;
823 assembler_dialect
= 0;
825 sh_cpu
= PROCESSOR_SH2
;
827 sh_cpu
= PROCESSOR_SH2E
;
829 sh_cpu
= PROCESSOR_SH2A
;
831 sh_cpu
= PROCESSOR_SH3
;
833 sh_cpu
= PROCESSOR_SH3E
;
836 assembler_dialect
= 1;
837 sh_cpu
= PROCESSOR_SH4
;
841 assembler_dialect
= 1;
842 sh_cpu
= PROCESSOR_SH4A
;
845 /* User/priviledged mode is supported only on SH3* and SH4*.
846 Disable it for everything else. */
847 if (!TARGET_SH3
&& TARGET_USERMODE
)
848 TARGET_USERMODE
= false;
850 if (! strcmp (sh_div_str
, "call-div1"))
851 sh_div_strategy
= SH_DIV_CALL_DIV1
;
852 else if (! strcmp (sh_div_str
, "call-fp") && TARGET_FPU_ANY
)
853 sh_div_strategy
= SH_DIV_CALL_FP
;
854 else if (! strcmp (sh_div_str
, "call-table") && TARGET_DYNSHIFT
)
855 sh_div_strategy
= SH_DIV_CALL_TABLE
;
858 /* Pick one that makes most sense for the target in general.
859 It is not much good to use different functions depending on -Os,
860 since then we'll end up with two different functions when some of
861 the code is compiled for size, and some for speed. */
863 /* SH4 tends to emphasize speed. */
865 sh_div_strategy
= SH_DIV_CALL_TABLE
;
866 /* These have their own way of doing things. */
867 else if (TARGET_SH2A
)
868 sh_div_strategy
= SH_DIV_INTRINSIC
;
869 /* SH1 .. SH3 cores often go into small-footprint systems, so
870 default to the smallest implementation available. */
872 sh_div_strategy
= SH_DIV_CALL_DIV1
;
875 if (sh_divsi3_libfunc
[0])
876 ; /* User supplied - leave it alone. */
877 else if (TARGET_DIVIDE_CALL_FP
)
878 sh_divsi3_libfunc
= "__sdivsi3_i4";
879 else if (TARGET_DIVIDE_CALL_TABLE
)
880 sh_divsi3_libfunc
= "__sdivsi3_i4i";
882 sh_divsi3_libfunc
= "__sdivsi3";
884 if (sh_branch_cost
== -1)
886 /* The SH1 does not have delay slots, hence we get a pipeline stall
887 at every branch. The SH4 is superscalar, so the single delay slot
888 is not sufficient to keep both pipelines filled.
889 In any case, set the default branch cost to '2', as it results in
890 slightly overall smaller code and also enables some if conversions
891 that are required for matching special T bit related insns. */
895 /* Set -mzdcbranch for SH4 / SH4A if not otherwise specified by the user. */
896 if (! global_options_set
.x_TARGET_ZDCBRANCH
&& TARGET_HARD_SH4
)
897 TARGET_ZDCBRANCH
= 1;
899 /* FDPIC code is a special form of PIC, and the vast majority of code
900 generation constraints that apply to PIC also apply to FDPIC, so we
901 set flag_pic to avoid the need to check TARGET_FDPIC everywhere
902 flag_pic is checked. */
903 if (TARGET_FDPIC
&& !flag_pic
)
906 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
907 if (! VALID_REGISTER_P (regno
))
908 sh_register_names
[regno
][0] = '\0';
910 for (regno
= 0; regno
< ADDREGNAMES_SIZE
; regno
++)
911 if (! VALID_REGISTER_P (ADDREGNAMES_REGNO (regno
)))
912 sh_additional_register_names
[regno
][0] = '\0';
914 if (flag_pic
&& ! TARGET_PREFERGOT
)
915 flag_no_function_cse
= 1;
917 if (targetm
.small_register_classes_for_mode_p (VOIDmode
))
919 /* Never run scheduling before reload, since that can
920 break global alloc, and generates slower code anyway due
921 to the pressure on R0. */
922 /* Enable sched1 for SH4 if the user explicitly requests.
923 When sched1 is enabled, the ready queue will be reordered by
924 the target hooks if pressure is high. We cannot do this for
925 PIC, SH3 and lower as they give spill failures for R0. */
926 if (!TARGET_HARD_SH4
|| flag_pic
)
927 flag_schedule_insns
= 0;
928 /* ??? Current exception handling places basic block boundaries
929 after call_insns. It causes the high pressure on R0 and gives
930 spill failures for R0 in reload. See PR 22553 and the thread
932 <http://gcc.gnu.org/ml/gcc-patches/2005-10/msg00816.html>. */
933 else if (flag_exceptions
)
935 if (flag_schedule_insns
&& global_options_set
.x_flag_schedule_insns
)
936 warning (0, "ignoring %<-fschedule-insns%> because of exception "
938 flag_schedule_insns
= 0;
940 else if (flag_schedule_insns
941 && !global_options_set
.x_flag_schedule_insns
)
942 flag_schedule_insns
= 0;
945 /* Unwind info is not correct around the CFG unless either a frame
946 pointer is present or M_A_O_A is set. Fixing this requires rewriting
947 unwind info generation to be aware of the CFG and propagating states
949 if ((flag_unwind_tables
|| flag_asynchronous_unwind_tables
950 || flag_exceptions
|| flag_non_call_exceptions
)
951 && flag_omit_frame_pointer
&& !TARGET_ACCUMULATE_OUTGOING_ARGS
)
953 warning (0, "unwind tables currently require either a frame pointer "
954 "or %<-maccumulate-outgoing-args%> for correctness");
955 TARGET_ACCUMULATE_OUTGOING_ARGS
= 1;
958 if (flag_unsafe_math_optimizations
)
960 /* Enable fsca insn for SH4A if not otherwise specified by the user. */
961 if (global_options_set
.x_TARGET_FSCA
== 0
962 && (TARGET_SH4A_FP
|| TARGET_FPU_SH4_300
))
965 /* Enable fsrra insn for SH4A if not otherwise specified by the user. */
966 if (global_options_set
.x_TARGET_FSRRA
== 0
967 && (TARGET_SH4A_FP
|| TARGET_FPU_SH4_300
))
971 /* Allow fsrra insn only if -funsafe-math-optimizations and
972 -ffinite-math-only is enabled. */
973 TARGET_FSRRA
= TARGET_FSRRA
974 && flag_unsafe_math_optimizations
975 && flag_finite_math_only
;
977 /* If the -mieee option was not explicitly set by the user, turn it on
978 unless -ffinite-math-only was specified. See also PR 33135. */
979 if (! global_options_set
.x_TARGET_IEEE
)
980 TARGET_IEEE
= ! flag_finite_math_only
;
982 if (sh_fixed_range_str
)
983 sh_fix_range (sh_fixed_range_str
);
985 /* This target defaults to strict volatile bitfields. */
986 if (flag_strict_volatile_bitfields
< 0 && abi_version_at_least(2))
987 flag_strict_volatile_bitfields
= 1;
989 sh_override_options_after_change ();
991 /* Parse atomic model option and make sure it is valid for the current
993 selected_atomic_model_
994 = parse_validate_atomic_model_option (sh_atomic_model_str
);
996 register_sh_passes ();
999 /* Implement targetm.override_options_after_change. */
1002 sh_override_options_after_change (void)
1004 /* Adjust loop, jump and function alignment values (in bytes), if those
1005 were not specified by the user using -falign-loops, -falign-jumps
1006 and -falign-functions options.
1007 32 bit alignment is better for speed, because instructions can be
1008 fetched as a pair from a longword boundary. For size use 16 bit
1009 alignment to get more compact code.
1010 Aligning all jumps increases the code size, even if it might
1011 result in slightly faster code. Thus, it is set to the smallest
1012 alignment possible if not specified by the user. */
1013 if (flag_align_loops
&& !str_align_loops
)
1014 str_align_loops
= optimize_size
? "2" : "4";
1016 /* Parse values so that we can compare for current value. */
1017 parse_alignment_opts ();
1018 if (flag_align_jumps
&& !str_align_jumps
)
1019 str_align_jumps
= "2";
1020 else if (align_jumps
.levels
[0].get_value () < 2)
1021 str_align_jumps
= "2";
1023 if (flag_align_functions
&& !str_align_functions
)
1024 str_align_functions
= optimize_size
? "2" : "4";
1026 /* The linker relaxation code breaks when a function contains
1027 alignments that are larger than that at the start of a
1028 compilation unit. */
1031 /* Parse values so that we can compare for current value. */
1032 parse_alignment_opts ();
1033 int min_align
= MAX (align_loops
.levels
[0].get_value (),
1034 align_jumps
.levels
[0].get_value ());
1036 /* Also take possible .long constants / mova tables into account. */
1039 if (align_functions
.levels
[0].get_value () < min_align
)
1041 char *r
= XNEWVEC (char, 16);
1042 sprintf (r
, "%d", min_align
);
1043 str_align_functions
= r
;
1048 /* Print the operand address in x to the stream. */
1050 sh_print_operand_address (FILE *stream
, machine_mode
/*mode*/, rtx x
)
1052 switch (GET_CODE (x
))
1056 fprintf (stream
, "@%s", reg_names
[true_regnum (x
)]);
1061 rtx base
= XEXP (x
, 0);
1062 rtx index
= XEXP (x
, 1);
1064 switch (GET_CODE (index
))
1067 fprintf (stream
, "@(%d,%s)", (int) INTVAL (index
),
1068 reg_names
[true_regnum (base
)]);
1074 int base_num
= true_regnum (base
);
1075 int index_num
= true_regnum (index
);
1077 /* If base or index is R0, make sure that it comes first.
1078 Usually one of them will be R0, but the order might be wrong.
1079 If neither base nor index are R0 it's an error and we just
1080 pass it on to the assembler. This avoids silent wrong code
1082 if (base_num
== 0 && index_num
!= 0)
1083 std::swap (base_num
, index_num
);
1085 fprintf (stream
, "@(%s,%s)", reg_names
[index_num
],
1086 reg_names
[base_num
]);
1097 fprintf (stream
, "@-%s", reg_names
[true_regnum (XEXP (x
, 0))]);
1101 fprintf (stream
, "@%s+", reg_names
[true_regnum (XEXP (x
, 0))]);
1105 x
= mark_constant_pool_use (x
);
1106 output_addr_const (stream
, x
);
1111 /* Print operand x (an rtx) in assembler syntax to file stream
1112 according to modifier code.
1114 '.' print a .s if insn needs delay slot
1115 ',' print LOCAL_LABEL_PREFIX
1116 '@' print trap, rte or rts depending upon pragma interruptness
1117 '#' output a nop if there is nothing to put in the delay slot
1118 ''' print likelihood suffix (/u for unlikely).
1119 '>' print branch target if -fverbose-asm
1120 'O' print a constant without the #
1121 'R' print the LSW of a dp value - changes if in little endian
1122 'S' print the MSW of a dp value - changes if in little endian
1123 'T' print the next word of a dp value - same as 'R' in big endian mode.
1124 'M' print .b / .w / .l / .s / .d suffix if operand is a MEM.
1125 'N' print 'r63' if the operand is (const_int 0).
1126 'd' print a V2SF reg as dN instead of fpN.
1127 'm' print a pair `base,offset' or `base,index', for LD and ST.
1128 'U' Likewise for {LD,ST}{HI,LO}.
1129 'V' print the position of a single bit set.
1130 'W' print the position of a single bit cleared.
1131 't' print a memory address which is a register.
1132 'u' prints the lowest 16 bits of CONST_INT, as an unsigned value.
1133 'o' output an operator. */
1135 sh_print_operand (FILE *stream
, rtx x
, int code
)
1146 && ! INSN_ANNULLED_BRANCH_P (final_sequence
->insn (0))
1147 && get_attr_length (final_sequence
->insn (1)))
1148 fprintf (stream
, ASSEMBLER_DIALECT
? "/s" : ".s");
1151 fprintf (stream
, "%s", LOCAL_LABEL_PREFIX
);
1154 trapa_attr
= lookup_attribute ("trap_exit",
1155 DECL_ATTRIBUTES (current_function_decl
));
1157 fprintf (stream
, "trapa #%ld",
1158 (long) TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (trapa_attr
))));
1159 else if (sh_cfun_interrupt_handler_p ())
1161 if (sh_cfun_resbank_handler_p ())
1162 fprintf (stream
, "resbank\n");
1163 fprintf (stream
, "rte");
1166 fprintf (stream
, "rts");
1169 /* Output a nop if there's nothing in the delay slot. */
1170 if (dbr_sequence_length () == 0)
1171 fprintf (stream
, "\n\tnop");
1175 rtx note
= find_reg_note (current_output_insn
, REG_BR_PROB
, 0);
1178 && profile_probability::from_reg_br_prob_note (XINT (note
, 0))
1179 < profile_probability::even ())
1180 fputs ("/u", stream
);
1184 if (flag_verbose_asm
&& JUMP_LABEL (current_output_insn
))
1186 fputs ("\t! target: ", stream
);
1187 output_addr_const (stream
, JUMP_LABEL (current_output_insn
));
1191 x
= mark_constant_pool_use (x
);
1192 output_addr_const (stream
, x
);
1194 /* N.B.: %R / %S / %T adjust memory addresses by four.
1195 While they can be used to access 64 bit parts of a larger value
1196 held in general purpose registers, that won't work with memory -
1197 neither for fp registers, since the frxx names are used. */
1199 if (REG_P (x
) || GET_CODE (x
) == SUBREG
)
1201 regno
= true_regnum (x
);
1202 regno
+= FP_REGISTER_P (regno
) ? 1 : SH_REG_LSW_OFFSET
;
1203 fputs (reg_names
[regno
], (stream
));
1207 x
= adjust_address (x
, SImode
, 4 * SH_REG_LSW_OFFSET
);
1208 sh_print_operand_address (stream
, GET_MODE (x
), XEXP (x
, 0));
1214 mode
= GET_MODE (x
);
1215 if (mode
== VOIDmode
)
1217 if (GET_MODE_SIZE (mode
) >= 8)
1218 sub
= simplify_subreg (SImode
, x
, mode
, 4 * SH_REG_LSW_OFFSET
);
1220 sh_print_operand (stream
, sub
, 0);
1222 output_operand_lossage ("invalid operand to %%R");
1226 if (REG_P (x
) || GET_CODE (x
) == SUBREG
)
1228 regno
= true_regnum (x
);
1229 regno
+= FP_REGISTER_P (regno
) ? 0 : SH_REG_MSW_OFFSET
;
1230 fputs (reg_names
[regno
], (stream
));
1234 x
= adjust_address (x
, SImode
, 4 * SH_REG_MSW_OFFSET
);
1235 sh_print_operand_address (stream
, GET_MODE (x
), XEXP (x
, 0));
1241 mode
= GET_MODE (x
);
1242 if (mode
== VOIDmode
)
1244 if (GET_MODE_SIZE (mode
) >= 8)
1245 sub
= simplify_subreg (SImode
, x
, mode
, 4 * SH_REG_MSW_OFFSET
);
1247 sh_print_operand (stream
, sub
, 0);
1249 output_operand_lossage ("invalid operand to %%S");
1253 /* Next word of a double. */
1254 switch (GET_CODE (x
))
1257 fputs (reg_names
[REGNO (x
) + 1], (stream
));
1261 machine_mode mode
= GET_MODE (x
);
1262 if (GET_CODE (XEXP (x
, 0)) != PRE_DEC
1263 && GET_CODE (XEXP (x
, 0)) != POST_INC
)
1264 x
= adjust_address (x
, SImode
, 4);
1265 sh_print_operand_address (stream
, mode
, XEXP (x
, 0));
1274 gcc_assert (MEM_P (x
));
1276 switch (GET_CODE (x
))
1280 sh_print_operand (stream
, x
, 0);
1288 switch (GET_CODE (x
))
1290 case PLUS
: fputs ("add", stream
); break;
1291 case MINUS
: fputs ("sub", stream
); break;
1292 case MULT
: fputs ("mul", stream
); break;
1293 case DIV
: fputs ("div", stream
); break;
1294 case EQ
: fputs ("eq", stream
); break;
1295 case NE
: fputs ("ne", stream
); break;
1296 case GT
: case LT
: fputs ("gt", stream
); break;
1297 case GE
: case LE
: fputs ("ge", stream
); break;
1298 case GTU
: case LTU
: fputs ("gtu", stream
); break;
1299 case GEU
: case LEU
: fputs ("geu", stream
); break;
1307 switch (GET_MODE (x
))
1309 case E_QImode
: fputs (".b", stream
); break;
1310 case E_HImode
: fputs (".w", stream
); break;
1311 case E_SImode
: fputs (".l", stream
); break;
1312 case E_SFmode
: fputs (".s", stream
); break;
1313 case E_DFmode
: fputs (".d", stream
); break;
1314 default: gcc_unreachable ();
1320 gcc_assert (MEM_P (x
));
1324 switch (GET_CODE (x
))
1328 sh_print_operand (stream
, x
, 0);
1329 fputs (", 0", stream
);
1333 sh_print_operand (stream
, XEXP (x
, 0), 0);
1334 fputs (", ", stream
);
1335 sh_print_operand (stream
, XEXP (x
, 1), 0);
1345 int num
= exact_log2 (INTVAL (x
));
1346 gcc_assert (num
>= 0);
1347 fprintf (stream
, "#%d", num
);
1353 int num
= exact_log2 (~INTVAL (x
));
1354 gcc_assert (num
>= 0);
1355 fprintf (stream
, "#%d", num
);
1360 gcc_assert (REG_P (x
) && GET_MODE (x
) == V2SFmode
);
1362 fprintf ((stream
), "d%s", reg_names
[REGNO (x
)] + 1);
1366 if (x
== CONST0_RTX (GET_MODE (x
)))
1368 fprintf ((stream
), "r63");
1371 goto default_output
;
1373 if (CONST_INT_P (x
))
1375 fprintf ((stream
), "%u", (unsigned) INTVAL (x
) & (0x10000 - 1));
1383 mode
= GET_MODE (x
);
1385 switch (GET_CODE (x
))
1389 rtx inner
= XEXP (x
, 0);
1391 machine_mode inner_mode
;
1393 /* We might see SUBREGs with vector mode registers inside. */
1394 if (GET_CODE (inner
) == SUBREG
1395 && (GET_MODE_SIZE (GET_MODE (inner
))
1396 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner
))))
1397 && subreg_lowpart_p (inner
))
1398 inner
= SUBREG_REG (inner
);
1399 if (CONST_INT_P (inner
))
1401 x
= GEN_INT (trunc_int_for_mode (INTVAL (inner
), GET_MODE (x
)));
1402 goto default_output
;
1404 inner_mode
= GET_MODE (inner
);
1405 if (GET_CODE (inner
) == SUBREG
1406 && (GET_MODE_SIZE (GET_MODE (inner
))
1407 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner
))))
1408 && REG_P (SUBREG_REG (inner
)))
1410 offset
= subreg_regno_offset (REGNO (SUBREG_REG (inner
)),
1411 GET_MODE (SUBREG_REG (inner
)),
1412 SUBREG_BYTE (inner
),
1414 inner
= SUBREG_REG (inner
);
1416 if (!REG_P (inner
) || GET_MODE_SIZE (inner_mode
) > 8)
1418 /* Floating point register pairs are always big endian;
1419 general purpose registers are 64 bit wide. */
1420 regno
= REGNO (inner
);
1421 regno
= (hard_regno_nregs (regno
, inner_mode
)
1422 - hard_regno_nregs (regno
, mode
))
1431 gcc_assert (SUBREG_BYTE (x
) == 0
1432 && REG_P (SUBREG_REG (x
)));
1440 if (FP_REGISTER_P (regno
)
1441 && mode
== V16SFmode
)
1442 fprintf ((stream
), "mtrx%s", reg_names
[regno
] + 2);
1443 else if (FP_REGISTER_P (REGNO (x
))
1444 && mode
== V4SFmode
)
1445 fprintf ((stream
), "fv%s", reg_names
[regno
] + 2);
1447 && mode
== V2SFmode
)
1448 fprintf ((stream
), "fp%s", reg_names
[regno
] + 2);
1449 else if (FP_REGISTER_P (REGNO (x
))
1450 && GET_MODE_SIZE (mode
) > 4)
1451 fprintf ((stream
), "d%s", reg_names
[regno
] + 1);
1453 fputs (reg_names
[regno
], (stream
));
1457 output_address (GET_MODE (x
), XEXP (x
, 0));
1461 fputc ('#', stream
);
1462 output_addr_const (stream
, x
);
1470 sh_print_operand_punct_valid_p (unsigned char code
)
1472 return (code
== '.' || code
== '#' || code
== '@' || code
== ','
1473 || code
== '$' || code
== '\'' || code
== '>');
1476 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
1478 sh_asm_output_addr_const_extra (FILE *file
, rtx x
)
1480 if (GET_CODE (x
) == UNSPEC
)
1482 switch (XINT (x
, 1))
1485 /* GLOBAL_OFFSET_TABLE or local symbols, no suffix. */
1486 output_addr_const (file
, XVECEXP (x
, 0, 0));
1489 output_addr_const (file
, XVECEXP (x
, 0, 0));
1490 fputs ("@GOT", file
);
1493 output_addr_const (file
, XVECEXP (x
, 0, 0));
1494 fputs ("@GOTOFF", file
);
1497 output_addr_const (file
, XVECEXP (x
, 0, 0));
1498 fputs ("@PLT", file
);
1501 output_addr_const (file
, XVECEXP (x
, 0, 0));
1502 fputs ("@GOTPLT", file
);
1505 output_addr_const (file
, XVECEXP (x
, 0, 0));
1506 fputs ("@PCREL", file
);
1509 output_addr_const (file
, XVECEXP (x
, 0, 0));
1510 fputs ("@DTPOFF", file
);
1512 case UNSPEC_GOTTPOFF
:
1513 output_addr_const (file
, XVECEXP (x
, 0, 0));
1514 fputs ("@GOTTPOFF", file
);
1517 output_addr_const (file
, XVECEXP (x
, 0, 0));
1518 fputs ("@TPOFF", file
);
1523 /* LPCS stands for Label for PIC Call Site. */
1524 targetm
.asm_out
.generate_internal_label (name
, "LPCS",
1525 INTVAL (XVECEXP (x
, 0, 0)));
1526 assemble_name (file
, name
);
1530 output_addr_const (file
, XVECEXP (x
, 0, 0));
1532 if (GET_CODE (XVECEXP (x
, 0, 1)) == CONST
)
1535 output_addr_const (file
, XVECEXP (x
, 0, 1));
1539 output_addr_const (file
, XVECEXP (x
, 0, 1));
1541 case UNSPEC_PCREL_SYMOFF
:
1542 output_addr_const (file
, XVECEXP (x
, 0, 0));
1544 output_addr_const (file
, XVECEXP (x
, 0, 1));
1545 fputs ("-.)", file
);
1547 case UNSPEC_GOTFUNCDESC
:
1548 output_addr_const (file
, XVECEXP (x
, 0, 0));
1549 fputs ("@GOTFUNCDESC", file
);
1551 case UNSPEC_GOTOFFFUNCDESC
:
1552 output_addr_const (file
, XVECEXP (x
, 0, 0));
1553 fputs ("@GOTOFFFUNCDESC", file
);
1564 /* Encode symbol attributes of a SYMBOL_REF into its
1565 SYMBOL_REF_FLAGS. */
1567 sh_encode_section_info (tree decl
, rtx rtl
, int first
)
1569 default_encode_section_info (decl
, rtl
, first
);
1571 if (TREE_CODE (decl
) == FUNCTION_DECL
1572 && sh2a_function_vector_p (decl
) && TARGET_SH2A
)
1573 SYMBOL_REF_FLAGS (XEXP (rtl
, 0)) |= SYMBOL_FLAG_FUNCVEC_FUNCTION
;
1576 /* Prepare operands for a move define_expand; specifically, one of the
1577 operands must be in a register. */
1579 prepare_move_operands (rtx operands
[], machine_mode mode
)
1581 if ((mode
== SImode
|| mode
== DImode
)
1583 && ! ((mode
== Pmode
|| mode
== ptr_mode
)
1584 && tls_symbolic_operand (operands
[1], Pmode
) != TLS_MODEL_NONE
))
1587 if (SYMBOLIC_CONST_P (operands
[1]))
1589 if (MEM_P (operands
[0]))
1590 operands
[1] = force_reg (Pmode
, operands
[1]);
1593 temp
= (!can_create_pseudo_p ()
1595 : gen_reg_rtx (Pmode
));
1596 operands
[1] = legitimize_pic_address (operands
[1], mode
, temp
);
1599 else if (GET_CODE (operands
[1]) == CONST
1600 && GET_CODE (XEXP (operands
[1], 0)) == PLUS
1601 && SYMBOLIC_CONST_P (XEXP (XEXP (operands
[1], 0), 0)))
1603 temp
= !can_create_pseudo_p () ? operands
[0] : gen_reg_rtx (Pmode
);
1604 temp
= legitimize_pic_address (XEXP (XEXP (operands
[1], 0), 0),
1606 operands
[1] = expand_binop (mode
, add_optab
, temp
,
1607 XEXP (XEXP (operands
[1], 0), 1),
1608 (!can_create_pseudo_p ()
1610 : gen_reg_rtx (Pmode
)),
1611 0, OPTAB_LIB_WIDEN
);
1615 if (! reload_in_progress
&& ! reload_completed
)
1617 /* Copy the source to a register if both operands aren't registers. */
1618 if (! register_operand (operands
[0], mode
)
1619 && ! register_operand (operands
[1], mode
))
1620 operands
[1] = copy_to_mode_reg (mode
, operands
[1]);
1622 if (MEM_P (operands
[0]) && ! memory_operand (operands
[0], mode
))
1624 /* This is like change_address_1 (operands[0], mode, 0, 1) ,
1625 except that we can't use that function because it is static. */
1626 rtx new_rtx
= change_address (operands
[0], mode
, 0);
1627 MEM_COPY_ATTRIBUTES (new_rtx
, operands
[0]);
1628 operands
[0] = new_rtx
;
1631 /* This case can happen while generating code to move the result
1632 of a library call to the target. Reject `st r0,@(rX,rY)' because
1633 reload will fail to find a spill register for rX, since r0 is already
1634 being used for the source. */
1635 else if (refers_to_regno_p (R0_REG
, operands
[1])
1636 && MEM_P (operands
[0])
1637 && GET_CODE (XEXP (operands
[0], 0)) == PLUS
1638 && REG_P (XEXP (XEXP (operands
[0], 0), 1)))
1639 operands
[1] = copy_to_mode_reg (mode
, operands
[1]);
1641 /* When the displacement addressing is used, RA will assign r0 to
1642 the pseudo register operand for the QI/HImode load/store.
1643 This tends to make a long live range for R0 and might cause
1644 anomalous register spills in some case with LRA. See PR
1646 We split possible load/store to two move insns via r0 so as to
1647 shorten R0 live range. It will make some codes worse but will
1648 win on average for LRA.
1649 Also when base+index addressing is used and the index term is
1650 a subreg, LRA assumes that more hard registers can be available
1651 in some situation. It isn't the case for SH in the problematic
1652 case. We can pre-allocate R0 for that index term to avoid
1653 the issue. See PR target/66591. */
1654 else if (sh_lra_p ()
1656 && ((REG_P (operands
[0]) && MEM_P (operands
[1]))
1657 || (REG_P (operands
[1]) && MEM_P (operands
[0]))))
1659 bool load_p
= REG_P (operands
[0]);
1660 rtx reg
= operands
[load_p
? 0 : 1];
1661 rtx adr
= XEXP (operands
[load_p
? 1 : 0], 0);
1663 if ((mode
== QImode
|| mode
== HImode
)
1664 && REGNO (reg
) >= FIRST_PSEUDO_REGISTER
1665 && GET_CODE (adr
) == PLUS
1666 && REG_P (XEXP (adr
, 0))
1667 && (REGNO (XEXP (adr
, 0)) >= FIRST_PSEUDO_REGISTER
)
1668 && CONST_INT_P (XEXP (adr
, 1))
1669 && INTVAL (XEXP (adr
, 1)) != 0
1670 && sh_legitimate_index_p (mode
, XEXP (adr
, 1), false, true))
1672 rtx r0_rtx
= gen_rtx_REG (mode
, R0_REG
);
1673 emit_move_insn (r0_rtx
, operands
[1]);
1674 operands
[1] = r0_rtx
;
1676 if (REGNO (reg
) >= FIRST_PSEUDO_REGISTER
1677 && GET_CODE (adr
) == PLUS
1678 && REG_P (XEXP (adr
, 0))
1679 && (REGNO (XEXP (adr
, 0)) >= FIRST_PSEUDO_REGISTER
)
1680 && SUBREG_P (XEXP (adr
, 1))
1681 && REG_P (SUBREG_REG (XEXP (adr
, 1))))
1683 rtx r0_rtx
= gen_rtx_REG (GET_MODE (XEXP (adr
, 1)), R0_REG
);
1684 emit_move_insn (r0_rtx
, XEXP (adr
, 1));
1685 XEXP (adr
, 1) = r0_rtx
;
1690 if (mode
== Pmode
|| mode
== ptr_mode
)
1692 rtx op0
= operands
[0];
1693 rtx op1
= operands
[1];
1695 if (GET_CODE (op1
) == CONST
1696 && GET_CODE (XEXP (op1
, 0)) == PLUS
1697 && (tls_symbolic_operand (XEXP (XEXP (op1
, 0), 0), Pmode
)
1700 opc
= XEXP (XEXP (op1
, 0), 1);
1701 op1
= XEXP (XEXP (op1
, 0), 0);
1706 enum tls_model tls_kind
;
1708 if (! reload_in_progress
&& ! reload_completed
1709 && (tls_kind
= tls_symbolic_operand (op1
, Pmode
)) != TLS_MODEL_NONE
)
1711 rtx tga_op1
, tga_ret
, tmp
, tmp2
;
1714 && (tls_kind
== TLS_MODEL_GLOBAL_DYNAMIC
1715 || tls_kind
== TLS_MODEL_LOCAL_DYNAMIC
1716 || tls_kind
== TLS_MODEL_INITIAL_EXEC
))
1718 static int got_labelno
;
1719 /* Don't schedule insns for getting GOT address when
1720 the first scheduling is enabled, to avoid spill
1722 if (flag_schedule_insns
)
1723 emit_insn (gen_blockage ());
1724 emit_insn (gen_GOTaddr2picreg (GEN_INT (++got_labelno
)));
1725 emit_use (gen_rtx_REG (SImode
, PIC_REG
));
1726 if (flag_schedule_insns
)
1727 emit_insn (gen_blockage ());
1732 case TLS_MODEL_GLOBAL_DYNAMIC
:
1733 tga_ret
= gen_rtx_REG (Pmode
, R0_REG
);
1735 emit_move_insn (gen_rtx_REG (Pmode
, PIC_REG
),
1736 sh_get_fdpic_reg_initial_val ());
1737 emit_call_insn (gen_tls_global_dynamic (tga_ret
, op1
));
1738 tmp
= gen_reg_rtx (Pmode
);
1739 emit_move_insn (tmp
, tga_ret
);
1743 case TLS_MODEL_LOCAL_DYNAMIC
:
1744 tga_ret
= gen_rtx_REG (Pmode
, R0_REG
);
1746 emit_move_insn (gen_rtx_REG (Pmode
, PIC_REG
),
1747 sh_get_fdpic_reg_initial_val ());
1748 emit_call_insn (gen_tls_local_dynamic (tga_ret
, op1
));
1750 tmp
= gen_reg_rtx (Pmode
);
1751 emit_move_insn (tmp
, tga_ret
);
1753 if (register_operand (op0
, Pmode
))
1756 tmp2
= gen_reg_rtx (Pmode
);
1758 emit_insn (gen_symDTPOFF2reg (tmp2
, op1
, tmp
));
1762 case TLS_MODEL_INITIAL_EXEC
:
1763 tga_op1
= !can_create_pseudo_p () ? op0
: gen_reg_rtx (Pmode
);
1764 tmp
= gen_sym2GOTTPOFF (op1
);
1766 emit_move_insn (gen_rtx_REG (Pmode
, PIC_REG
),
1767 sh_get_fdpic_reg_initial_val ());
1768 emit_insn (gen_tls_initial_exec (tga_op1
, tmp
));
1772 case TLS_MODEL_LOCAL_EXEC
:
1773 tmp2
= gen_reg_rtx (Pmode
);
1774 emit_insn (gen_store_gbr (tmp2
));
1775 tmp
= gen_reg_rtx (Pmode
);
1776 emit_insn (gen_symTPOFF2reg (tmp
, op1
));
1778 if (register_operand (op0
, Pmode
))
1781 op1
= gen_reg_rtx (Pmode
);
1783 emit_insn (gen_addsi3 (op1
, tmp
, tmp2
));
1790 emit_insn (gen_addsi3 (op1
, op1
, force_reg (SImode
, opc
)));
1795 if (SH_OFFSETS_MUST_BE_WITHIN_SECTIONS_P
)
1798 split_const (operands
[1], &base
, &offset
);
1800 if (GET_CODE (base
) == SYMBOL_REF
1801 && !offset_within_block_p (base
, INTVAL (offset
)))
1803 rtx tmp
= can_create_pseudo_p () ? gen_reg_rtx (mode
) : operands
[0];
1804 emit_move_insn (tmp
, base
);
1805 if (!arith_operand (offset
, mode
))
1806 offset
= force_reg (mode
, offset
);
1807 emit_insn (gen_add3_insn (operands
[0], tmp
, offset
));
1812 /* Implement the canonicalize_comparison target hook for the combine
1813 pass. For the target hook this function is invoked via
1814 sh_canonicalize_comparison. This function is also re-used to
1815 canonicalize comparisons in cbranch pattern expanders. */
1817 sh_canonicalize_comparison (enum rtx_code
& cmp
, rtx
& op0
, rtx
& op1
,
1819 bool op0_preserve_value
)
1821 /* When invoked from within the combine pass the mode is not specified,
1822 so try to get it from one of the operands. */
1823 if (mode
== VOIDmode
)
1824 mode
= GET_MODE (op0
);
1825 if (mode
== VOIDmode
)
1826 mode
= GET_MODE (op1
);
1828 // We need to have a mode to do something useful here.
1829 if (mode
== VOIDmode
)
1832 // Currently, we don't deal with floats here.
1833 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1836 // Make sure that the constant operand is the second operand.
1837 if (CONST_INT_P (op0
) && !CONST_INT_P (op1
))
1839 if (op0_preserve_value
)
1842 std::swap (op0
, op1
);
1843 cmp
= swap_condition (cmp
);
1846 if (CONST_INT_P (op1
))
1848 /* Try to adjust the constant operand in such a way that available
1849 comparison insns can be utilized better and the constant can be
1850 loaded with a 'mov #imm,Rm' insn. This avoids a load from the
1852 const HOST_WIDE_INT val
= INTVAL (op1
);
1854 /* x > -1 --> x >= 0
1855 x > 0xFFFFFF7F --> x >= 0xFFFFFF80
1857 x <= 0xFFFFFF7F --> x < 0xFFFFFF80 */
1858 if ((val
== -1 || val
== -0x81) && (cmp
== GT
|| cmp
== LE
))
1860 cmp
= cmp
== GT
? GE
: LT
;
1861 op1
= gen_int_mode (val
+ 1, mode
);
1865 x >= 0x80 --> x > 0x7F
1867 x < 0x80 --> x <= 0x7F */
1868 else if ((val
== 1 || val
== 0x80) && (cmp
== GE
|| cmp
== LT
))
1870 cmp
= cmp
== GE
? GT
: LE
;
1871 op1
= gen_int_mode (val
- 1, mode
);
1874 /* unsigned x >= 1 --> x != 0
1875 unsigned x < 1 --> x == 0 */
1876 else if (val
== 1 && (cmp
== GEU
|| cmp
== LTU
))
1878 cmp
= cmp
== GEU
? NE
: EQ
;
1879 op1
= CONST0_RTX (mode
);
1882 /* unsigned x >= 0x80 --> unsigned x > 0x7F
1883 unsigned x < 0x80 --> unsigned x < 0x7F */
1884 else if (val
== 0x80 && (cmp
== GEU
|| cmp
== LTU
))
1886 cmp
= cmp
== GEU
? GTU
: LEU
;
1887 op1
= gen_int_mode (val
- 1, mode
);
1890 /* unsigned x > 0 --> x != 0
1891 unsigned x <= 0 --> x == 0 */
1892 else if (val
== 0 && (cmp
== GTU
|| cmp
== LEU
))
1893 cmp
= cmp
== GTU
? NE
: EQ
;
1895 /* unsigned x > 0x7FFFFFFF --> signed x < 0
1896 unsigned x <= 0x7FFFFFFF --> signed x >= 0 */
1897 else if (mode
== SImode
&& (cmp
== GTU
|| cmp
== LEU
)
1898 && val
== 0x7FFFFFFF)
1900 cmp
= cmp
== GTU
? LT
: GE
;
1904 /* unsigned x >= 0x80000000 --> signed x < 0
1905 unsigned x < 0x80000000 --> signed x >= 0 */
1906 else if (mode
== SImode
&& (cmp
== GEU
|| cmp
== LTU
)
1907 && (unsigned HOST_WIDE_INT
)val
1908 == ((unsigned HOST_WIDE_INT
)0x7FFFFFFF + 1))
1910 cmp
= cmp
== GEU
? LT
: GE
;
1916 /* This function implements the canonicalize_comparison target hook.
1917 This wrapper around the internally used sh_canonicalize_comparison
1918 function is needed to do the enum rtx_code <-> int conversion.
1919 Target hooks cannot use enum rtx_code in its definition. */
1921 sh_canonicalize_comparison (int *code
, rtx
*op0
, rtx
*op1
,
1922 bool op0_preserve_value
)
1924 enum rtx_code tmp_code
= (enum rtx_code
)*code
;
1925 sh_canonicalize_comparison (tmp_code
, *op0
, *op1
,
1926 VOIDmode
, op0_preserve_value
);
1927 *code
= (int)tmp_code
;
1930 /* This function implements the legitimate_combined_insn target hook,
1931 which the combine pass uses to early reject combined insns, before
1932 it tries to recog the insn and determine its cost. */
1934 sh_legitimate_combined_insn (rtx_insn
* insn
)
1936 /* Reject combinations of memory loads and zero extensions, as these
1937 interfere with other combine patterns such as zero extracts and bit
1938 tests. The SH2A movu.{b|w} insns are formed later in the
1939 'sh_optimize_extu_exts' pass after combine/split1. */
1940 rtx p
= PATTERN (insn
);
1941 if (GET_CODE (p
) == SET
1942 && REG_P (XEXP (p
, 0)) && GET_MODE (XEXP (p
, 0)) == SImode
1943 && GET_CODE (XEXP (p
, 1)) == ZERO_EXTEND
1944 && MEM_P (XEXP (XEXP (p
, 1), 0)))
1951 sh_fixed_condition_code_regs (unsigned int* p1
, unsigned int* p2
)
1954 *p2
= INVALID_REGNUM
;
1958 /* Try to calculate the branch distance of a conditional branch in bytes.
1960 FIXME: Because of PR 59189 we can't use the CFG here. Instead just
1961 walk from this insn into the next (fall-through) basic block and see if
1962 we hit the label. */
1964 sh_cbranch_distance (rtx_insn
* _cbranch_insn
, unsigned int max_dist
)
1966 rtx_jump_insn
* cbranch_insn
= safe_as_a
<rtx_jump_insn
*> (_cbranch_insn
);
1970 fprintf (dump_file
, "sh_cbranch_distance insn = \n");
1971 print_rtl_single (dump_file
, cbranch_insn
);
1974 unsigned int dist
= 0;
1976 for (rtx_insn
* i
= next_nonnote_insn (cbranch_insn
);
1977 i
!= NULL
&& dist
< max_dist
; i
= next_nonnote_insn (i
))
1979 const unsigned int i_len
= get_attr_length (i
);
1983 fprintf (dump_file
, " insn %d length = %u dist = %u\n",
1984 INSN_UID (i
), i_len
, dist
);
1986 if (rtx_code_label
* l
= dyn_cast
<rtx_code_label
*> (i
))
1988 if (l
== cbranch_insn
->jump_target ())
1991 fprintf (dump_file
, " cbranch dist = %u\n", dist
);
1999 fprintf (dump_file
, " cbranch dist = unknown\n");
2001 return unknown_cbranch_distance
;
2005 prepare_cbranch_operands (rtx
*operands
, machine_mode mode
,
2006 enum rtx_code comparison
)
2008 gcc_assert (can_create_pseudo_p ());
2010 if (comparison
== LAST_AND_UNUSED_RTX_CODE
)
2011 comparison
= GET_CODE (operands
[0]);
2013 sh_canonicalize_comparison (comparison
, operands
[1], operands
[2],
2016 rtx op1
= operands
[1];
2017 operands
[1] = force_reg (mode
, op1
);
2019 /* When we are handling DImode comparisons, we want to keep constants so
2020 that we can optimize the component comparisons; however, memory loads
2021 are better issued as a whole so that they can be scheduled well.
2022 SImode equality comparisons allow I08 constants, but only when they
2023 compare r0. Hence, if operands[1] has to be loaded from somewhere else
2024 into a register, that register might as well be r0, and we allow the
2025 constant. If it is already in a register, this is likely to be
2026 allocated to a different hard register, thus we load the constant into
2027 a register unless it is zero. */
2028 if (!REG_P (operands
[2])
2029 && (!CONST_INT_P (operands
[2])
2030 || (mode
== SImode
&& operands
[2] != CONST0_RTX (SImode
)
2031 && ((comparison
!= EQ
&& comparison
!= NE
)
2032 || (REG_P (op1
) && REGNO (op1
) != R0_REG
)
2033 || !satisfies_constraint_I08 (operands
[2])))))
2034 operands
[2] = force_reg (mode
, operands
[2]);
2040 expand_cbranchsi4 (rtx
*operands
, enum rtx_code comparison
,
2041 profile_probability probability
)
2043 rtx (*branch_expander
) (rtx
) = gen_branch_true
;
2044 comparison
= prepare_cbranch_operands (operands
, SImode
, comparison
);
2047 case NE
: case LT
: case LE
: case LTU
: case LEU
:
2048 comparison
= reverse_condition (comparison
);
2049 branch_expander
= gen_branch_false
;
2052 emit_insn (gen_rtx_SET (get_t_reg_rtx (),
2053 gen_rtx_fmt_ee (comparison
, SImode
,
2054 operands
[1], operands
[2])));
2055 rtx_insn
*jump
= emit_jump_insn (branch_expander (operands
[3]));
2056 if (probability
.initialized_p ())
2057 add_reg_br_prob_note (jump
, probability
);
2061 expand_cbranchsi4 (rtx
*operands
, enum rtx_code comparison
)
2063 expand_cbranchsi4 (operands
, comparison
,
2064 profile_probability::uninitialized ());
2067 /* ??? How should we distribute probabilities when more than one branch
2068 is generated. So far we only have some ad-hoc observations:
2069 - If the operands are random, they are likely to differ in both parts.
2070 - If comparing items in a hash chain, the operands are random or equal;
2071 operation should be EQ or NE.
2072 - If items are searched in an ordered tree from the root, we can expect
2073 the highpart to be unequal about half of the time; operation should be
2074 an inequality comparison, operands non-constant, and overall probability
2075 about 50%. Likewise for quicksort.
2076 - Range checks will be often made against constants. Even if we assume for
2077 simplicity an even distribution of the non-constant operand over a
2078 sub-range here, the same probability could be generated with differently
2079 wide sub-ranges - as long as the ratio of the part of the subrange that
2080 is before the threshold to the part that comes after the threshold stays
2081 the same. Thus, we can't really tell anything here;
2082 assuming random distribution is at least simple.
2085 expand_cbranchdi4 (rtx
*operands
, enum rtx_code comparison
)
2087 enum rtx_code msw_taken
, msw_skip
, lsw_taken
;
2088 rtx_code_label
*skip_label
= NULL
;
2089 rtx op1h
, op1l
, op2h
, op2l
;
2091 profile_probability prob
, rev_prob
;
2092 profile_probability msw_taken_prob
= profile_probability::uninitialized (),
2093 msw_skip_prob
= profile_probability::uninitialized (),
2094 lsw_taken_prob
= profile_probability::uninitialized ();
2096 comparison
= prepare_cbranch_operands (operands
, DImode
, comparison
);
2097 op1h
= gen_highpart_mode (SImode
, DImode
, operands
[1]);
2098 op2h
= gen_highpart_mode (SImode
, DImode
, operands
[2]);
2099 op1l
= gen_lowpart (SImode
, operands
[1]);
2100 op2l
= gen_lowpart (SImode
, operands
[2]);
2101 msw_taken
= msw_skip
= lsw_taken
= LAST_AND_UNUSED_RTX_CODE
;
2102 prob
= split_branch_probability
;
2103 rev_prob
= prob
.invert ();
2109 if (prob
.initialized_p ())
2111 /* FIXME: This is not optimal. We do not really know the probability
2112 that values differ by MCW only, but we should probably distribute
2113 probabilities more evenly. */
2114 msw_skip_prob
= rev_prob
;
2115 lsw_taken_prob
= prob
> profile_probability::never ()
2116 ? profile_probability::guessed_always ()
2117 : profile_probability::guessed_never ();
2122 msw_taken_prob
= prob
;
2124 lsw_taken_prob
= profile_probability::guessed_never ();
2127 msw_taken
= comparison
;
2128 if (CONST_INT_P (op2l
) && INTVAL (op2l
) == -1)
2130 if (comparison
!= GTU
|| op2h
!= CONST0_RTX (SImode
))
2131 msw_skip
= swap_condition (msw_taken
);
2135 if (op2l
== CONST0_RTX (SImode
))
2136 msw_taken
= comparison
;
2139 msw_taken
= comparison
== GE
? GT
: GTU
;
2140 msw_skip
= swap_condition (msw_taken
);
2145 msw_taken
= comparison
;
2146 if (op2l
== CONST0_RTX (SImode
))
2148 msw_skip
= swap_condition (msw_taken
);
2152 if (CONST_INT_P (op2l
) && INTVAL (op2l
) == -1)
2153 msw_taken
= comparison
;
2157 if (comparison
== LE
)
2159 else if (op2h
!= CONST0_RTX (SImode
))
2163 msw_skip
= swap_condition (LTU
);
2166 msw_skip
= swap_condition (msw_taken
);
2169 default: return false;
2171 num_branches
= ((msw_taken
!= LAST_AND_UNUSED_RTX_CODE
)
2172 + (msw_skip
!= LAST_AND_UNUSED_RTX_CODE
)
2173 + (lsw_taken
!= LAST_AND_UNUSED_RTX_CODE
));
2174 if (comparison
!= EQ
&& comparison
!= NE
&& num_branches
> 1)
2176 if (!CONSTANT_P (operands
[2])
2177 && prob
.initialized_p ()
2178 && prob
.to_reg_br_prob_base () >= (int) (REG_BR_PROB_BASE
* 3 / 8U)
2179 && prob
.to_reg_br_prob_base () <= (int) (REG_BR_PROB_BASE
* 5 / 8U))
2181 msw_taken_prob
= prob
.apply_scale (1, 2);
2182 msw_skip_prob
= rev_prob
.apply_scale (REG_BR_PROB_BASE
,
2183 rev_prob
.to_reg_br_prob_base ()
2184 + REG_BR_PROB_BASE
);
2185 lsw_taken_prob
= prob
;
2189 msw_taken_prob
= prob
;
2190 msw_skip_prob
= profile_probability::guessed_always ();
2191 /* ??? If we have a constant op2h, should we use that when
2192 calculating lsw_taken_prob? */
2193 lsw_taken_prob
= prob
;
2199 if (msw_taken
!= LAST_AND_UNUSED_RTX_CODE
)
2200 expand_cbranchsi4 (operands
, msw_taken
, msw_taken_prob
);
2201 if (msw_skip
!= LAST_AND_UNUSED_RTX_CODE
)
2203 rtx taken_label
= operands
[3];
2205 /* Operands were possibly modified, but msw_skip doesn't expect this.
2206 Always use the original ones. */
2207 if (msw_taken
!= LAST_AND_UNUSED_RTX_CODE
)
2213 operands
[3] = skip_label
= gen_label_rtx ();
2214 expand_cbranchsi4 (operands
, msw_skip
, msw_skip_prob
);
2215 operands
[3] = taken_label
;
2219 if (lsw_taken
!= LAST_AND_UNUSED_RTX_CODE
)
2220 expand_cbranchsi4 (operands
, lsw_taken
, lsw_taken_prob
);
2221 if (msw_skip
!= LAST_AND_UNUSED_RTX_CODE
)
2222 emit_label (skip_label
);
2226 /* Given an operand, return 1 if the evaluated operand plugged into an
2227 if_then_else will result in a branch_true, 0 if branch_false, or
2228 -1 if neither nor applies. The truth table goes like this:
2230 op | cmpval | code | result
2231 ---------+--------+---------+--------------------
2232 T (0) | 0 | EQ (1) | 0 = 0 ^ (0 == 1)
2233 T (0) | 1 | EQ (1) | 1 = 0 ^ (1 == 1)
2234 T (0) | 0 | NE (0) | 1 = 0 ^ (0 == 0)
2235 T (0) | 1 | NE (0) | 0 = 0 ^ (1 == 0)
2236 !T (1) | 0 | EQ (1) | 1 = 1 ^ (0 == 1)
2237 !T (1) | 1 | EQ (1) | 0 = 1 ^ (1 == 1)
2238 !T (1) | 0 | NE (0) | 0 = 1 ^ (0 == 0)
2239 !T (1) | 1 | NE (0) | 1 = 1 ^ (1 == 0) */
2241 sh_eval_treg_value (rtx op
)
2243 if (t_reg_operand (op
, GET_MODE (op
)))
2245 if (negt_reg_operand (op
, GET_MODE (op
)))
2248 rtx_code code
= GET_CODE (op
);
2249 if ((code
!= EQ
&& code
!= NE
) || !CONST_INT_P (XEXP (op
, 1)))
2252 int cmpop
= code
== EQ
? 1 : 0;
2253 int cmpval
= INTVAL (XEXP (op
, 1));
2254 if (cmpval
!= 0 && cmpval
!= 1)
2258 if (t_reg_operand (XEXP (op
, 0), GET_MODE (XEXP (op
, 0))))
2260 else if (negt_reg_operand (XEXP (op
, 0), GET_MODE (XEXP (op
, 0))))
2265 return t
^ (cmpval
== cmpop
);
2268 /* Emit INSN, possibly in a PARALLEL with an USE/CLOBBER of FPSCR bits in case
2269 of floating-point comparisons. */
2271 sh_emit_set_t_insn (rtx insn
, machine_mode mode
)
2273 if (TARGET_FPU_ANY
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
2274 && GET_CODE (insn
) != PARALLEL
)
2276 insn
= gen_rtx_PARALLEL (VOIDmode
,
2278 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, FPSCR_STAT_REG
)),
2279 gen_rtx_USE (VOIDmode
, gen_rtx_REG (SImode
, FPSCR_MODES_REG
))));
2284 /* Prepare the operands for an scc instruction; make sure that the
2285 compare has been done and the result is in T_REG. */
2287 sh_emit_scc_to_t (enum rtx_code code
, rtx op0
, rtx op1
)
2289 rtx t_reg
= get_t_reg_rtx ();
2290 enum rtx_code oldcode
= code
;
2292 /* First need a compare insn. */
2296 /* It isn't possible to handle this case. */
2313 if (code
!= oldcode
)
2314 std::swap (op0
, op1
);
2316 machine_mode mode
= GET_MODE (op0
);
2317 if (mode
== VOIDmode
)
2318 mode
= GET_MODE (op1
);
2320 op0
= force_reg (mode
, op0
);
2321 if ((code
!= EQ
&& code
!= NE
2322 && (op1
!= const0_rtx
2323 || code
== GTU
|| code
== GEU
|| code
== LTU
|| code
== LEU
))
2324 || (mode
== DImode
&& op1
!= const0_rtx
)
2325 || (TARGET_SH2E
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
))
2326 op1
= force_reg (mode
, op1
);
2328 sh_emit_set_t_insn (gen_rtx_SET (t_reg
,
2329 gen_rtx_fmt_ee (code
, SImode
, op0
, op1
)),
2333 /* Called from the md file, set up the operands of a compare instruction. */
2335 sh_emit_compare_and_branch (rtx
*operands
, machine_mode mode
)
2337 enum rtx_code code
= GET_CODE (operands
[0]);
2338 enum rtx_code branch_code
;
2339 rtx op0
= operands
[1];
2340 rtx op1
= operands
[2];
2342 bool need_ccmpeq
= false;
2344 if (TARGET_SH2E
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
)
2346 op0
= force_reg (mode
, op0
);
2347 op1
= force_reg (mode
, op1
);
2351 if (code
!= EQ
|| mode
== DImode
)
2353 /* Force args into regs, since we can't use constants here. */
2354 op0
= force_reg (mode
, op0
);
2355 if (op1
!= const0_rtx
|| code
== GTU
|| code
== GEU
)
2356 op1
= force_reg (mode
, op1
);
2360 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
2363 || (code
== LE
&& TARGET_IEEE
&& TARGET_SH2E
)
2364 || (code
== GE
&& !(TARGET_IEEE
&& TARGET_SH2E
)))
2366 std::swap (op0
, op1
);
2367 code
= swap_condition (code
);
2370 /* GE becomes fcmp/gt+fcmp/eq, for SH2E and TARGET_IEEE only. */
2373 gcc_assert (TARGET_IEEE
&& TARGET_SH2E
);
2378 /* Now we can have EQ, NE, GT, LE. NE and LE are then transformed
2379 to EQ/GT respectively. */
2380 gcc_assert (code
== EQ
|| code
== GT
|| code
== NE
|| code
== LE
);
2397 branch_code
= reverse_condition (code
);
2403 insn
= gen_rtx_SET (get_t_reg_rtx (),
2404 gen_rtx_fmt_ee (branch_code
, SImode
, op0
, op1
));
2406 sh_emit_set_t_insn (insn
, mode
);
2408 sh_emit_set_t_insn (gen_ieee_ccmpeqsf_t (op0
, op1
), mode
);
2410 if (branch_code
== code
)
2411 emit_jump_insn (gen_branch_true (operands
[3]));
2413 emit_jump_insn (gen_branch_false (operands
[3]));
2417 sh_emit_compare_and_set (rtx
*operands
, machine_mode mode
)
2419 enum rtx_code code
= GET_CODE (operands
[1]);
2420 rtx op0
= operands
[2];
2421 rtx op1
= operands
[3];
2422 rtx_code_label
*lab
= NULL
;
2423 bool invert
= false;
2425 op0
= force_reg (mode
, op0
);
2426 if ((code
!= EQ
&& code
!= NE
2427 && (op1
!= const0_rtx
2428 || code
== GTU
|| code
== GEU
|| code
== LTU
|| code
== LEU
))
2429 || (mode
== DImode
&& op1
!= const0_rtx
)
2430 || (TARGET_SH2E
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
))
2431 op1
= force_reg (mode
, op1
);
2433 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
2435 if (code
== LT
|| code
== LE
)
2437 std::swap (op0
, op1
);
2438 code
= swap_condition (code
);
2444 lab
= gen_label_rtx ();
2445 sh_emit_scc_to_t (EQ
, op0
, op1
);
2446 emit_jump_insn (gen_branch_true (lab
));
2463 sh_emit_scc_to_t (code
, op0
, op1
);
2467 emit_insn (gen_movnegt (operands
[0], get_t_reg_rtx ()));
2469 emit_move_insn (operands
[0], get_t_reg_rtx ());
2472 /* Functions to output assembly code. */
2474 /* Return a sequence of instructions to perform DI or DF move.
2476 Since the SH cannot move a DI or DF in one instruction, we have
2477 to take care when we see overlapping source and dest registers. */
2479 output_movedouble (rtx insn ATTRIBUTE_UNUSED
, rtx operands
[],
2482 rtx dst
= operands
[0];
2483 rtx src
= operands
[1];
2486 && GET_CODE (XEXP (dst
, 0)) == PRE_DEC
)
2487 return "mov.l %T1,%0" "\n"
2490 if (register_operand (dst
, mode
)
2491 && register_operand (src
, mode
))
2493 if (REGNO (src
) == MACH_REG
)
2494 return "sts mach,%S0" "\n"
2497 /* When mov.d r1,r2 do r2->r3 then r1->r2;
2498 when mov.d r1,r0 do r1->r0 then r2->r1. */
2499 if (REGNO (src
) + 1 == REGNO (dst
))
2500 return "mov %T1,%T0" "\n"
2503 return "mov %1,%0" "\n"
2506 else if (CONST_INT_P (src
))
2508 if (INTVAL (src
) < 0)
2509 output_asm_insn ("mov #-1,%S0", operands
);
2511 output_asm_insn ("mov #0,%S0", operands
);
2513 return "mov %1,%R0";
2515 else if (MEM_P (src
))
2518 int dreg
= REGNO (dst
);
2519 rtx inside
= XEXP (src
, 0);
2521 switch (GET_CODE (inside
))
2524 ptrreg
= REGNO (inside
);
2528 ptrreg
= subreg_regno (inside
);
2532 ptrreg
= REGNO (XEXP (inside
, 0));
2533 /* ??? A r0+REG address shouldn't be possible here, because it isn't
2534 an offsettable address. Unfortunately, offsettable addresses use
2535 QImode to check the offset, and a QImode offsettable address
2536 requires r0 for the other operand, which is not currently
2537 supported, so we can't use the 'o' constraint.
2538 Thus we must check for and handle r0+REG addresses here.
2539 We punt for now, since this is likely very rare. */
2540 gcc_assert (!REG_P (XEXP (inside
, 1)));
2544 return "mov.l %1,%0" "\n"
2547 return "mov.l %1,%0" "\n"
2553 /* Work out the safe way to copy. Copy into the second half first. */
2555 return "mov.l %T1,%T0" "\n"
2559 return "mov.l %1,%0" "\n"
2563 /* Print an instruction which would have gone into a delay slot after
2564 another instruction, but couldn't because the other instruction expanded
2565 into a sequence where putting the slot insn at the end wouldn't work. */
2567 print_slot (rtx_sequence
*seq
)
2569 final_scan_insn (seq
->insn (1), asm_out_file
, optimize
, 1, NULL
);
2571 seq
->insn (1)->set_deleted ();
2575 output_far_jump (rtx_insn
*insn
, rtx op
)
2577 struct { rtx lab
, reg
, op
; } this_jmp
;
2578 rtx_code_label
*braf_base_lab
= NULL
;
2581 int offset
= branch_dest (insn
) - INSN_ADDRESSES (INSN_UID (insn
));
2584 this_jmp
.lab
= gen_label_rtx ();
2588 && offset
- get_attr_length (insn
) <= 32766
2589 && ! CROSSING_JUMP_P (insn
))
2592 jump
= "mov.w %O0,%1" "\n"
2601 jump
= "mov.l %O0,%1" "\n"
2604 jump
= "mov.l r0,@-r15" "\n"
2606 " mov.l @r0,%1" "\n"
2608 " mov.l @r15+,r0" "\n"
2612 jump
= "mov.l %O0,%1" "\n"
2615 /* If we have a scratch register available, use it. */
2616 if (NONJUMP_INSN_P ((prev
= prev_nonnote_insn (insn
)))
2617 && INSN_CODE (prev
) == CODE_FOR_indirect_jump_scratch
)
2619 this_jmp
.reg
= SET_DEST (XVECEXP (PATTERN (prev
), 0, 0));
2620 if (REGNO (this_jmp
.reg
) == R0_REG
&& flag_pic
&& ! TARGET_SH2
)
2621 jump
= "mov.l r1,@-r15" "\n"
2623 " mov.l @r0,r1" "\n"
2625 " mov.l @r15+,r1" "\n"
2627 output_asm_insn (jump
, &this_jmp
.lab
);
2628 if (dbr_sequence_length ())
2629 print_slot (final_sequence
);
2631 output_asm_insn ("nop", 0);
2635 /* Output the delay slot insn first if any. */
2636 if (dbr_sequence_length ())
2637 print_slot (final_sequence
);
2639 this_jmp
.reg
= gen_rtx_REG (SImode
, 13);
2640 output_asm_insn ("mov.l r13,@-r15", 0);
2641 output_asm_insn (jump
, &this_jmp
.lab
);
2642 output_asm_insn ("mov.l @r15+,r13", 0);
2644 if (far
&& flag_pic
&& TARGET_SH2
)
2646 braf_base_lab
= gen_label_rtx ();
2647 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
2648 CODE_LABEL_NUMBER (braf_base_lab
));
2651 output_asm_insn (".align 2", 0);
2652 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L", CODE_LABEL_NUMBER (this_jmp
.lab
));
2654 if (far
&& flag_pic
)
2657 this_jmp
.lab
= braf_base_lab
;
2658 output_asm_insn (".long %O2-%O0", &this_jmp
.lab
);
2661 output_asm_insn (far
? ".long %O2" : ".word %O2-%O0", &this_jmp
.lab
);
2665 /* Local label counter, used for constants in the pool and inside
2666 pattern branches. */
2667 static int lf
= 100;
2669 /* Output code for ordinary branches. */
2671 output_branch (int logic
, rtx_insn
*insn
, rtx
*operands
)
2673 switch (get_attr_length (insn
))
2676 /* This can happen if filling the delay slot has caused a forward
2677 branch to exceed its range (we could reverse it, but only
2678 when we know we won't overextend other branches; this should
2679 best be handled by relaxation).
2680 It can also happen when other condbranches hoist delay slot insn
2681 from their destination, thus leading to code size increase.
2682 But the branch will still be in the range -4092..+4098 bytes. */
2686 /* The call to print_slot will clobber the operands. */
2687 rtx op0
= operands
[0];
2689 /* If the instruction in the delay slot is annulled (true), then
2690 there is no delay slot where we can put it now. The only safe
2691 place for it is after the label. final will do that by default. */
2694 && ! INSN_ANNULLED_BRANCH_P (final_sequence
->insn (0))
2695 && get_attr_length (final_sequence
->insn (1)))
2697 asm_fprintf (asm_out_file
, "\tb%s%ss\t%LLF%d\n", logic
? "f" : "t",
2698 ASSEMBLER_DIALECT
? "/" : ".", label
);
2699 print_slot (final_sequence
);
2702 asm_fprintf (asm_out_file
, "\tb%s\t%LLF%d\n", logic
? "f" : "t", label
);
2704 output_asm_insn ("bra\t%l0", &op0
);
2705 fprintf (asm_out_file
, "\tnop\n");
2706 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "LF", label
);
2711 /* When relaxing, handle this like a short branch. The linker
2712 will fix it up if it still doesn't fit after relaxation. */
2714 return logic
? "bt%.\t%l0" : "bf%.\t%l0";
2716 /* These are for SH2e, in which we have to account for the
2717 extra nop because of the hardware bug in annulled branches. */
2723 gcc_assert (!final_sequence
2724 || !(INSN_ANNULLED_BRANCH_P
2725 (XVECEXP (final_sequence
, 0, 0))));
2726 asm_fprintf (asm_out_file
, "b%s%ss\t%LLF%d\n",
2728 ASSEMBLER_DIALECT
? "/" : ".", label
);
2729 fprintf (asm_out_file
, "\tnop\n");
2730 output_asm_insn ("bra\t%l0", operands
);
2731 fprintf (asm_out_file
, "\tnop\n");
2732 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "LF", label
);
2741 sprintf (buffer
, "b%s%ss\t%%l0",
2743 ASSEMBLER_DIALECT
? "/" : ".");
2744 output_asm_insn (buffer
, &operands
[0]);
2749 /* There should be no longer branches now - that would
2750 indicate that something has destroyed the branches set
2751 up in machine_dependent_reorg. */
2756 /* Output a code sequence for INSN using TEMPL with OPERANDS; but before,
2757 fill in operands 9 as a label to the successor insn.
2758 We try to use jump threading where possible.
2759 IF CODE matches the comparison in the IF_THEN_ELSE of a following jump,
2760 we assume the jump is taken. I.e. EQ means follow jmp and bf, NE means
2761 follow jmp and bt, if the address is in range. */
2763 output_branchy_insn (enum rtx_code code
, const char *templ
,
2764 rtx_insn
*insn
, rtx
*operands
)
2766 rtx_insn
*next_insn
= NEXT_INSN (insn
);
2768 if (next_insn
&& JUMP_P (next_insn
) && condjump_p (next_insn
))
2770 rtx src
= SET_SRC (PATTERN (next_insn
));
2771 if (GET_CODE (src
) == IF_THEN_ELSE
&& GET_CODE (XEXP (src
, 0)) != code
)
2773 /* Following branch not taken */
2774 rtx_code_label
*lab
= gen_label_rtx ();
2775 emit_label_after (lab
, next_insn
);
2776 INSN_ADDRESSES_NEW (lab
,
2777 INSN_ADDRESSES (INSN_UID (next_insn
))
2778 + get_attr_length (next_insn
));
2784 int offset
= (branch_dest (next_insn
)
2785 - INSN_ADDRESSES (INSN_UID (next_insn
)) + 4);
2786 if (offset
>= -252 && offset
<= 258)
2788 if (GET_CODE (src
) == IF_THEN_ELSE
)
2790 src
= XEXP (src
, 1);
2796 rtx_code_label
*lab
= gen_label_rtx ();
2797 emit_label_after (lab
, insn
);
2798 INSN_ADDRESSES_NEW (lab
,
2799 INSN_ADDRESSES (INSN_UID (insn
))
2800 + get_attr_length (insn
));
2806 output_ieee_ccmpeq (rtx_insn
*insn
, rtx
*operands
)
2808 return output_branchy_insn (NE
, "bt %l9" "\n"
2813 /* Output the start of the assembler file. */
2815 sh_file_start (void)
2817 default_file_start ();
2820 /* We need to show the text section with the proper
2821 attributes as in TEXT_SECTION_ASM_OP, before dwarf2out
2822 emits it without attributes in TEXT_SECTION_ASM_OP, else GAS
2823 will complain. We can teach GAS specifically about the
2824 default attributes for our choice of text section, but
2825 then we would have to change GAS again if/when we change
2826 the text section name. */
2827 fprintf (asm_out_file
, "%s\n", TEXT_SECTION_ASM_OP
);
2829 /* Switch to the data section so that the coffsem symbol
2830 isn't in the text section. */
2831 switch_to_section (data_section
);
2833 if (TARGET_LITTLE_ENDIAN
)
2834 fputs ("\t.little\n", asm_out_file
);
2837 /* Implementation of TARGET_ASM_INTEGER for SH. Pointers to functions
2838 need to be output as pointers to function descriptors for
2842 sh_assemble_integer (rtx value
, unsigned int size
, int aligned_p
)
2844 if (TARGET_FDPIC
&& size
== UNITS_PER_WORD
2845 && GET_CODE (value
) == SYMBOL_REF
&& SYMBOL_REF_FUNCTION_P (value
))
2847 fputs ("\t.long\t", asm_out_file
);
2848 output_addr_const (asm_out_file
, value
);
2849 fputs ("@FUNCDESC\n", asm_out_file
);
2852 return default_assemble_integer (value
, size
, aligned_p
);
2855 /* Check if PAT includes UNSPEC_CALLER unspec pattern. */
2857 unspec_caller_rtx_p (rtx pat
)
2860 split_const (pat
, &base
, &offset
);
2862 if (GET_CODE (base
) == UNSPEC
)
2864 if (XINT (base
, 1) == UNSPEC_CALLER
)
2866 for (int i
= 0; i
< XVECLEN (base
, 0); i
++)
2867 if (unspec_caller_rtx_p (XVECEXP (base
, 0, i
)))
2873 /* Indicate that INSN cannot be duplicated. This is true for insn
2874 that generates a unique label. */
2876 sh_cannot_copy_insn_p (rtx_insn
*insn
)
2878 if (!reload_completed
|| !flag_pic
)
2881 if (!NONJUMP_INSN_P (insn
))
2883 if (asm_noperands (insn
) >= 0)
2886 rtx pat
= PATTERN (insn
);
2888 if (GET_CODE (pat
) == CLOBBER
|| GET_CODE (pat
) == USE
)
2891 if (TARGET_FDPIC
&& GET_CODE (pat
) == PARALLEL
)
2893 rtx t
= XVECEXP (pat
, 0, XVECLEN (pat
, 0) - 1);
2894 if (GET_CODE (t
) == USE
&& unspec_caller_rtx_p (XEXP (t
, 0)))
2898 if (GET_CODE (pat
) != SET
)
2900 pat
= SET_SRC (pat
);
2902 if (unspec_caller_rtx_p (pat
))
2908 /* Number of instructions used to make an arithmetic right shift by N. */
2909 static const char ashiftrt_insns
[] =
2910 { 0,1,2,3,4,5,8,8,8,8,8,8,8,8,8,8,2,3,4,5,8,8,8,8,8,8,8,8,8,8,8,2};
2912 /* Description of a logical left or right shift, when expanded to a sequence
2914 Notice that one bit right shifts clobber the T bit. One bit left shifts
2915 are done with an 'add Rn,Rm' insn and thus do not clobber the T bit. */
2918 ASHL_CLOBBERS_T
= 1 << 0,
2919 LSHR_CLOBBERS_T
= 1 << 1
2922 struct ashl_lshr_sequence
2925 signed char amount
[6];
2929 static const struct ashl_lshr_sequence ashl_lshr_seq
[32] =
2931 { 0, { 0 }, 0 }, // 0
2932 { 1, { 1 }, LSHR_CLOBBERS_T
},
2934 { 2, { 2, 1 }, LSHR_CLOBBERS_T
},
2935 { 2, { 2, 2 }, 0 }, // 4
2936 { 3, { 2, 1, 2 }, LSHR_CLOBBERS_T
},
2937 { 3, { 2, 2, 2 }, 0 },
2938 { 4, { 2, 2, 1, 2 }, LSHR_CLOBBERS_T
},
2939 { 1, { 8 }, 0 }, // 8
2940 { 2, { 8, 1 }, LSHR_CLOBBERS_T
},
2942 { 3, { 8, 1, 2 }, LSHR_CLOBBERS_T
},
2943 { 3, { 8, 2, 2 }, 0 }, // 12
2944 { 4, { 8, 2, 1, 2 }, LSHR_CLOBBERS_T
},
2945 { 3, { 8, -2, 8 }, 0 },
2946 { 3, { 8, -1, 8 }, ASHL_CLOBBERS_T
},
2947 { 1, { 16 }, 0 }, // 16
2948 { 2, { 16, 1 }, LSHR_CLOBBERS_T
},
2949 { 2, { 16, 2 }, 0 },
2950 { 3, { 16, 1, 2 }, LSHR_CLOBBERS_T
},
2951 { 3, { 16, 2, 2 }, 0 }, // 20
2952 { 4, { 16, 2, 1, 2 }, LSHR_CLOBBERS_T
},
2953 { 3, { 16, -2, 8 }, 0 },
2954 { 3, { 16, -1, 8 }, ASHL_CLOBBERS_T
},
2955 { 2, { 16, 8 }, 0 }, // 24
2956 { 3, { 16, 1, 8 }, LSHR_CLOBBERS_T
},
2957 { 3, { 16, 8, 2 }, 0 },
2958 { 4, { 16, 8, 1, 2 }, LSHR_CLOBBERS_T
},
2959 { 4, { 16, 8, 2, 2 }, 0 }, // 28
2960 { 4, { 16, -1, -2, 16 }, ASHL_CLOBBERS_T
},
2961 { 3, { 16, -2, 16 }, 0 },
2963 /* For a right shift by 31 a 2 insn shll-movt sequence can be used.
2964 For a left shift by 31 a 2 insn and-rotl sequences can be used.
2965 However, the shift-and combiner code needs this entry here to be in
2966 terms of real shift insns. */
2967 { 3, { 16, -1, 16 }, ASHL_CLOBBERS_T
}
2970 /* Individual shift amounts for shift amounts < 16, up to three highmost
2971 bits might be clobbered. This is typically used when combined with some
2972 kind of sign or zero extension. */
2973 static const struct ashl_lshr_sequence ext_ashl_lshr_seq
[32] =
2975 { 0, { 0 }, 0 }, // 0
2976 { 1, { 1 }, LSHR_CLOBBERS_T
},
2978 { 2, { 2, 1 }, LSHR_CLOBBERS_T
},
2979 { 2, { 2, 2 }, 0 }, // 4
2980 { 3, { 2, 1, 2 }, LSHR_CLOBBERS_T
},
2981 { 2, { 8, -2 }, 0 },
2982 { 2, { 8, -1 }, ASHL_CLOBBERS_T
},
2983 { 1, { 8 }, 0 }, // 8
2984 { 2, { 8, 1 }, LSHR_CLOBBERS_T
},
2986 { 3, { 8, 1, 2 }, LSHR_CLOBBERS_T
},
2987 { 3, { 8, 2, 2 }, 0 }, // 12
2988 { 3, { 16, -2, -1 }, ASHL_CLOBBERS_T
},
2989 { 2, { 16, -2 }, 0 },
2990 { 2, { 16, -1 }, ASHL_CLOBBERS_T
},
2991 { 1, { 16 }, 0 }, // 16
2992 { 2, { 16, 1 }, LSHR_CLOBBERS_T
},
2993 { 2, { 16, 2 }, 0 },
2994 { 3, { 16, 1, 2 }, LSHR_CLOBBERS_T
},
2995 { 3, { 16, 2, 2 }, 0 }, // 20
2996 { 4, { 16, 2, 1, 2 }, LSHR_CLOBBERS_T
},
2997 { 3, { 16, -2, 8 }, 0 },
2998 { 3, { 16, -1, 8 }, ASHL_CLOBBERS_T
},
2999 { 2, { 16, 8 }, 0 }, // 24
3000 { 3, { 16, 1, 8 }, LSHR_CLOBBERS_T
},
3001 { 3, { 16, 8, 2 }, 0 },
3002 { 4, { 16, 8, 1, 2 }, LSHR_CLOBBERS_T
},
3003 { 4, { 16, 8, 2, 2 }, 0 }, // 28
3004 { 4, { 16, -1, -2, 16 }, ASHL_CLOBBERS_T
},
3005 { 3, { 16, -2, 16 }, 0 },
3006 { 3, { 16, -1, 16 }, ASHL_CLOBBERS_T
}
3009 /* Return true if a shift left consisting of 1/2/8/16 shift instructions
3010 will clobber the T bit. */
3012 sh_ashlsi_clobbers_t_reg_p (rtx shift_amount
)
3014 gcc_assert (CONST_INT_P (shift_amount
));
3016 const int shift_amount_i
= INTVAL (shift_amount
) & 31;
3018 /* Special case for shift count of 31: use and-rotl sequence. */
3019 if (shift_amount_i
== 31)
3022 return (ashl_lshr_seq
[shift_amount_i
].clobbers_t
3023 & ASHL_CLOBBERS_T
) != 0;
3026 /* Return true if a logical right shift consisting of 1/2/8/16 shift
3027 instructions will clobber the T bit. */
3029 sh_lshrsi_clobbers_t_reg_p (rtx shift_amount
)
3031 gcc_assert (CONST_INT_P (shift_amount
));
3033 /* For right shifts the constant might be negative. */
3034 const int shift_amount_i
= std::abs (INTVAL (shift_amount
)) & 31;
3036 /* Special case for shift count of 31: use shll-movt sequence. */
3037 if (shift_amount_i
== 31)
3040 return (ashl_lshr_seq
[shift_amount_i
].clobbers_t
3041 & LSHR_CLOBBERS_T
) != 0;
3044 /* Return true if it is potentially beneficial to use a dynamic shift
3045 instruction (shad / shar) instead of a combination of 1/2/8/16
3046 shift instructions for the specified shift count.
3047 If dynamic shifts are not available, always return false. */
3049 sh_dynamicalize_shift_p (rtx count
)
3051 gcc_assert (CONST_INT_P (count
));
3053 /* For right shifts the constant might be negative. */
3054 const int shift_amount_i
= std::abs (INTVAL (count
)) & 31;
3057 /* For left and right shifts, there are shorter 2 insn sequences for
3058 shift amounts of 31. */
3059 if (shift_amount_i
== 31)
3062 insn_count
= ashl_lshr_seq
[shift_amount_i
].insn_count
;
3064 return TARGET_DYNSHIFT
&& (insn_count
> 1 + SH_DYNAMIC_SHIFT_COST
);
3067 /* Assuming we have a value that has been sign-extended by at least one bit,
3068 can we use the ext_shift_amounts with the last shift turned to an
3069 arithmetic shift to shift it by N without data loss, and quicker than by
3071 #define EXT_SHIFT_SIGNED(n) (((n) | 8) == 15)
3073 /* Return the cost of a shift. */
3077 if (GET_MODE_SIZE (GET_MODE (x
)) > UNITS_PER_WORD
)
3079 if (GET_MODE (x
) == DImode
3080 && CONST_INT_P (XEXP (x
, 1))
3081 && INTVAL (XEXP (x
, 1)) == 1)
3084 /* Everything else is invalid, because there is no pattern for it. */
3087 /* If shift by a non constant, then this will be expensive. */
3088 if (!CONST_INT_P (XEXP (x
, 1)))
3089 return SH_DYNAMIC_SHIFT_COST
;
3091 /* Otherwise, return the true cost in instructions. Cope with out of range
3092 shift counts more or less arbitrarily. */
3093 int value
= INTVAL (XEXP (x
, 1)) & 31;
3095 if (GET_CODE (x
) == ASHIFTRT
)
3097 int cost
= ashiftrt_insns
[value
];
3098 /* If dynamic shifts are available and profitable in this case, then we
3099 put the constant in a reg and use shad. */
3100 if (cost
> 1 + SH_DYNAMIC_SHIFT_COST
)
3101 cost
= 1 + SH_DYNAMIC_SHIFT_COST
;
3105 return ashl_lshr_seq
[value
].insn_count
;
3108 /* Return the cost of an AND/XOR/IOR operation. */
3110 and_xor_ior_costs (rtx x
, int code
)
3112 /* On SH1-4 we have only max. SImode operations.
3113 Double the cost for modes > SImode. */
3114 const int cost_scale
= GET_MODE_SIZE (GET_MODE (x
)) > UNITS_PER_WORD
? 2 : 1;
3116 /* A logical operation with two registers is a single cycle
3118 if (!CONST_INT_P (XEXP (x
, 1)))
3119 return 1 * cost_scale
;
3121 int i
= INTVAL (XEXP (x
, 1));
3123 /* These constants are single cycle extu.[bw] instructions. */
3124 if ((i
== 0xff || i
== 0xffff) && code
== AND
)
3125 return 1 * cost_scale
;
3126 /* Constants that can be used in an instruction as an immediate are
3127 a single cycle, but this requires r0, so make it a little more
3129 if (CONST_OK_FOR_K08 (i
))
3130 return 2 * cost_scale
;
3131 /* Constants that can be loaded with a mov immediate need one more cycle.
3132 This case is probably unnecessary. */
3133 if (CONST_OK_FOR_I08 (i
))
3134 return 2 * cost_scale
;
3135 /* Any other constant requires an additional 2 cycle pc-relative load.
3136 This case is probably unnecessary. */
3137 return 3 * cost_scale
;
3140 /* Return the cost of an addition or a subtraction. */
3144 if (GET_MODE (x
) == SImode
)
3146 /* The addc or subc patterns will eventually become one or two
3147 instructions. Below are some costs for some of the patterns
3148 which combine would reject because the costs of the individual
3149 insns in the patterns are lower.
3151 FIXME: It would be much easier if we had something like insn cost
3152 attributes and the cost calculation machinery used those attributes
3153 in the first place. This would eliminate redundant recog-like C
3154 code to calculate costs of complex patterns. */
3155 rtx op0
= XEXP (x
, 0);
3156 rtx op1
= XEXP (x
, 1);
3158 if (GET_CODE (x
) == PLUS
)
3160 if (GET_CODE (op0
) == AND
3161 && XEXP (op0
, 1) == const1_rtx
3162 && (GET_CODE (op1
) == PLUS
3163 || (GET_CODE (op1
) == MULT
&& XEXP (op1
, 1) == const2_rtx
)))
3166 if (GET_CODE (op0
) == MULT
&& XEXP (op0
, 1) == const2_rtx
3167 && GET_CODE (op1
) == LSHIFTRT
3168 && CONST_INT_P (XEXP (op1
, 1)) && INTVAL (XEXP (op1
, 1)) == 31)
3171 /* Let's assume that adding the result of an insns that stores into
3172 the T bit is cheap. */
3173 if (treg_set_expr (op1
, SImode
))
3175 if (treg_set_expr (op0
, SImode
))
3179 /* On SH1-4 we have only max. SImode operations.
3180 Double the cost for modes > SImode. */
3181 const int cost_scale
= GET_MODE_SIZE (GET_MODE (x
)) > UNITS_PER_WORD
? 2 : 1;
3183 /* Adding a register is a single cycle insn. */
3184 if (REG_P (XEXP (x
, 1))
3185 || GET_CODE (XEXP (x
, 1)) == SUBREG
)
3186 return 1 * cost_scale
;
3188 /* Likewise for small constants. */
3189 if (CONST_INT_P (XEXP (x
, 1))
3190 && CONST_OK_FOR_ADD (INTVAL (XEXP (x
, 1))))
3191 return 1 * cost_scale
;
3193 /* Any other constant requires a 2 cycle pc-relative load plus an
3195 return 3 * cost_scale
;
3198 /* Return the cost of a multiply. */
3200 multcosts (rtx x ATTRIBUTE_UNUSED
)
3202 if (sh_multcost
>= 0)
3207 /* We have a mul insn, so we can never take more than the mul and the
3208 read of the mac reg, but count more because of the latency and extra
3215 /* If we're aiming at small code, then just count the number of
3216 insns in a multiply call sequence. */
3220 /* Otherwise count all the insns in the routine we'd be calling too. */
3224 /* Compute a (partial) cost for rtx X. Return true if the complete
3225 cost has been computed, and false if subexpressions should be
3226 scanned. In either case, *TOTAL contains the cost result. */
3228 sh_rtx_costs (rtx x
, machine_mode mode ATTRIBUTE_UNUSED
, int outer_code
,
3229 int opno ATTRIBUTE_UNUSED
,
3230 int *total
, bool speed ATTRIBUTE_UNUSED
)
3232 int code
= GET_CODE (x
);
3236 /* The lower-subreg pass decides whether to split multi-word regs
3237 into individual regs by looking at the cost for a SET of certain
3238 modes with the following patterns:
3240 (set (reg) (const_int 0))
3241 On machines that support vector-move operations a multi-word move
3242 is the same cost as individual reg move. On SH there is no
3243 vector-move, so we have to provide the correct cost in the number
3244 of move insns to load/store the reg of the mode in question. */
3246 if (sh_movt_set_dest (x
) != NULL
|| sh_movrt_set_dest (x
) != NULL
)
3248 *total
= COSTS_N_INSNS (1);
3252 if (register_operand (SET_DEST (x
), VOIDmode
)
3253 && (register_operand (SET_SRC (x
), VOIDmode
)
3254 || satisfies_constraint_Z (SET_SRC (x
))))
3256 const machine_mode mode
= GET_MODE (SET_DEST (x
));
3257 *total
= COSTS_N_INSNS (GET_MODE_SIZE (mode
)
3258 / mov_insn_size (mode
, TARGET_SH2A
));
3263 /* The cost of a mem access is mainly the cost of the address mode. */
3265 *total
= sh_address_cost (XEXP (x
, 0), GET_MODE (x
), MEM_ADDR_SPACE (x
),
3270 /* This case is required for the if_then_else negc pattern. */
3271 if (treg_set_expr (XEXP (x
, 0), SImode
))
3273 *total
= COSTS_N_INSNS (1);
3279 /* Zero extracts of single bits are usually combine patterns for the
3282 if (GET_CODE (XEXP (x
, 0)) == XOR
3283 && arith_reg_operand (XEXP (XEXP (x
, 0), 0), VOIDmode
)
3284 && XEXP (x
, 1) == const1_rtx
3285 && CONST_INT_P (XEXP (x
, 2))
3286 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
3287 /* Check that the xor constaint overlaps with the extracted bit. */
3288 && (INTVAL (XEXP (XEXP (x
, 0), 1)) & (1LL << INTVAL (XEXP (x
, 2)))))
3290 *total
= 1; //COSTS_N_INSNS (1);
3294 /* div0s variant. */
3295 if (GET_CODE (XEXP (x
, 0)) == XOR
3296 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == XOR
3297 && CONST_INT_P (XEXP (XEXP (x
, 0), 1)))
3304 /* The cost of a sign or zero extend depends on whether the source is a
3305 reg or a mem. In case of a mem take the address into account. */
3307 if (arith_reg_operand (XEXP (x
, 0), GET_MODE (XEXP (x
, 0))))
3309 *total
= COSTS_N_INSNS (1);
3312 if (MEM_P (XEXP (x
, 0)))
3314 *total
= sh_address_cost (XEXP (XEXP (x
, 0), 0),
3315 GET_MODE (XEXP (x
, 0)),
3316 MEM_ADDR_SPACE (XEXP (x
, 0)), true);
3322 if (arith_reg_operand (XEXP (x
, 0), GET_MODE (XEXP (x
, 0))))
3324 *total
= COSTS_N_INSNS (1);
3327 else if (TARGET_SH2A
&& MEM_P (XEXP (x
, 0))
3328 && (GET_MODE (XEXP (x
, 0)) == QImode
3329 || GET_MODE (XEXP (x
, 0)) == HImode
))
3331 /* Handle SH2A's movu.b and movu.w insn. */
3332 *total
= sh_address_cost (XEXP (XEXP (x
, 0), 0),
3333 GET_MODE (XEXP (x
, 0)),
3334 MEM_ADDR_SPACE (XEXP (x
, 0)), true);
3339 /* mems for SFmode and DFmode can be inside a parallel due to
3340 the way the fpscr is handled. */
3342 for (int i
= 0; i
< XVECLEN (x
, 0); i
++)
3344 rtx xx
= XVECEXP (x
, 0, i
);
3345 if (GET_CODE (xx
) == SET
&& MEM_P (XEXP (xx
, 0)))
3347 *total
= sh_address_cost (XEXP (XEXP (xx
, 0), 0),
3348 GET_MODE (XEXP (xx
, 0)),
3349 MEM_ADDR_SPACE (XEXP (xx
, 0)), true);
3352 if (GET_CODE (xx
) == SET
&& MEM_P (XEXP (xx
, 1)))
3354 *total
= sh_address_cost (XEXP (XEXP (xx
, 1), 0),
3355 GET_MODE (XEXP (xx
, 1)),
3356 MEM_ADDR_SPACE (XEXP (xx
, 1)), true);
3361 if (sh_1el_vec (x
, VOIDmode
))
3362 *total
= outer_code
!= SET
;
3363 else if (sh_rep_vec (x
, VOIDmode
))
3364 *total
= ((GET_MODE_UNIT_SIZE (GET_MODE (x
)) + 3) / 4
3365 + (outer_code
!= SET
));
3367 *total
= COSTS_N_INSNS (3) + (outer_code
!= SET
);
3371 if (CONST_OK_FOR_I08 (INTVAL (x
)))
3373 else if ((outer_code
== AND
|| outer_code
== IOR
|| outer_code
== XOR
)
3374 && CONST_OK_FOR_K08 (INTVAL (x
)))
3376 /* prepare_cmp_insn will force costly constants int registers before
3377 the cbranch[sd]i4 patterns can see them, so preserve potentially
3378 interesting ones not covered by I08 above. */
3379 else if (outer_code
== COMPARE
3380 && ((unsigned HOST_WIDE_INT
) INTVAL (x
)
3381 == (unsigned HOST_WIDE_INT
) 0x7fffffff + 1
3382 || INTVAL (x
) == 0x7fffffff
3383 || INTVAL (x
) == 0x80 || INTVAL (x
) == -0x81))
3390 /* An and with a constant compared against zero is
3391 most likely going to be a TST #imm, R0 instruction. */
3392 if (XEXP (x
, 1) == const0_rtx
3393 && ((GET_CODE (XEXP (x
, 0)) == AND
3394 || (SUBREG_P (XEXP (x
, 0))
3395 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == AND
))
3396 || GET_CODE (XEXP (x
, 0)) == ZERO_EXTRACT
))
3402 else if (XEXP (x
, 1) == const0_rtx
3403 && GET_CODE (XEXP (x
, 0)) == AND
3404 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
3405 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == ASHIFT
3406 && arith_reg_operand (XEXP (XEXP (XEXP (x
, 0), 0), 0), SImode
)
3407 && CONST_INT_P (XEXP (XEXP (XEXP (x
, 0), 0), 1)))
3417 /* This is most likely a clips.b or clips.w insn that is being made up
3420 && (GET_CODE (XEXP (x
, 0)) == SMAX
|| GET_CODE (XEXP (x
, 0)) == SMIN
)
3421 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
3422 && REG_P (XEXP (XEXP (x
, 0), 0))
3423 && CONST_INT_P (XEXP (x
, 1)))
3425 *total
= COSTS_N_INSNS (1);
3438 /* prepare_cmp_insn will force costly constants int registers before
3439 the cbranchdi4 pattern can see them, so preserve potentially
3440 interesting ones. */
3441 if (outer_code
== COMPARE
&& GET_MODE (x
) == DImode
)
3448 /* FIXME: This looks broken. Only the last statement has any effect.
3449 Probably this could be folded with the PARALLEL case? */
3450 if (x
== CONST0_RTX (GET_MODE (x
)))
3452 else if (sh_1el_vec (x
, VOIDmode
))
3453 *total
= outer_code
!= SET
;
3454 if (sh_rep_vec (x
, VOIDmode
))
3455 *total
= ((GET_MODE_UNIT_SIZE (GET_MODE (x
)) + 3) / 4
3456 + (outer_code
!= SET
));
3457 *total
= COSTS_N_INSNS (3) + (outer_code
!= SET
);
3462 *total
= COSTS_N_INSNS (addsubcosts (x
));
3466 /* Check for (and (not (reg)) (const_int 1)) which is a tst insn. */
3467 if (GET_CODE (XEXP (x
, 0)) == NOT
&& XEXP (x
, 1) == const1_rtx
)
3469 *total
= COSTS_N_INSNS (1);
3476 *total
= COSTS_N_INSNS (and_xor_ior_costs (x
, code
));
3480 *total
= COSTS_N_INSNS (multcosts (x
));
3485 /* div0s sign comparison. */
3486 if (GET_CODE (XEXP (x
, 0)) == XOR
3487 && REG_P ((XEXP (XEXP (x
, 0), 0)))
3488 && REG_P ((XEXP (XEXP (x
, 0), 1)))
3489 && satisfies_constraint_Z (XEXP (x
, 1)))
3491 *total
= COSTS_N_INSNS (1);
3498 /* div0s sign comparison. */
3499 if (GET_CODE (XEXP (x
, 0)) == XOR
3500 && REG_P ((XEXP (XEXP (x
, 0), 0)))
3501 && REG_P ((XEXP (XEXP (x
, 0), 1)))
3502 && CONST_INT_P (XEXP (x
, 1)) && INTVAL (XEXP (x
, 1)) == 31)
3504 *total
= COSTS_N_INSNS (1);
3511 int cost
= shiftcosts (x
);
3514 *total
= COSTS_N_INSNS (cost
);
3522 *total
= COSTS_N_INSNS (20);
3535 /* Determine the size of the fundamental move insn that will be used
3536 for the specified mode. */
3538 mov_insn_size (machine_mode mode
, bool consider_sh2a
)
3540 const int mode_sz
= GET_MODE_SIZE (mode
);
3542 if ((consider_sh2a
&& TARGET_SH2A_DOUBLE
&& mode
== DFmode
)
3543 || (TARGET_FMOVD
&& mode
== DFmode
))
3547 /* The max. available mode for actual move insns is SImode.
3548 Larger accesses will be split into multiple loads/stores. */
3549 const int max_mov_sz
= GET_MODE_SIZE (SImode
);
3550 return mode_sz
>= max_mov_sz
? max_mov_sz
: mode_sz
;
3554 /* Determine the maximum possible displacement for a move insn for the
3557 sh_max_mov_insn_displacement (machine_mode mode
, bool consider_sh2a
)
3559 /* The 4 byte displacement move insns are the same as the 2 byte
3560 versions but take a 12 bit displacement. All we need to do is to
3561 scale the max. displacement value accordingly. */
3562 const int disp_scale
= consider_sh2a
? (4095 / 15) : 1;
3564 /* SH2A supports FPU move insns with 12 bit displacements.
3565 Other variants to do not support any kind of displacements for
3567 if (! consider_sh2a
&& TARGET_FPU_ANY
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
)
3571 const int mov_insn_sz
= mov_insn_size (mode
, consider_sh2a
);
3572 const int mode_sz
= GET_MODE_SIZE (mode
);
3573 int r
= 15 * mov_insn_sz
* disp_scale
;
3575 /* If the mov insn will be split into multiple loads/stores, the
3576 maximum possible displacement is a bit smaller. */
3577 if (mode_sz
> mov_insn_sz
)
3578 r
-= mode_sz
- mov_insn_sz
;
3583 /* Determine the alignment mask for a move insn of the
3586 mov_insn_alignment_mask (machine_mode mode
, bool consider_sh2a
)
3588 const int mov_insn_sz
= mov_insn_size (mode
, consider_sh2a
);
3589 return mov_insn_sz
> 0 ? (mov_insn_sz
- 1) : 0;
3592 /* Return the displacement value of a displacement address. */
3594 sh_disp_addr_displacement (rtx x
)
3596 gcc_assert (satisfies_constraint_Sdd (x
));
3597 return INTVAL (XEXP (XEXP (x
, 0), 1));
3600 /* Compute the cost of an address. */
3602 sh_address_cost (rtx x
, machine_mode mode
,
3603 addr_space_t as ATTRIBUTE_UNUSED
, bool speed ATTRIBUTE_UNUSED
)
3605 /* 'GBR + 0'. Account one more because of R0 restriction. */
3606 if (REG_P (x
) && REGNO (x
) == GBR_REG
)
3609 /* Simple reg, post-inc, pre-dec addressing. */
3610 if (REG_P (x
) || GET_CODE (x
) == POST_INC
|| GET_CODE (x
) == PRE_DEC
)
3613 /* 'reg + disp' addressing. */
3614 if (GET_CODE (x
) == PLUS
3615 && REG_P (XEXP (x
, 0)) && CONST_INT_P (XEXP (x
, 1)))
3617 /* 'GBR + disp'. Account one more because of R0 restriction. */
3618 if (REGNO (XEXP (x
, 0)) == GBR_REG
3619 && gbr_displacement (XEXP (x
, 1), mode
))
3622 const HOST_WIDE_INT offset
= INTVAL (XEXP (x
, 1));
3627 /* The displacement would fit into a 2 byte move insn.
3628 HImode and QImode loads/stores with displacement put pressure on
3629 R0 which will most likely require another reg copy. Thus account
3630 a higher cost for that. */
3631 if (offset
> 0 && offset
<= sh_max_mov_insn_displacement (mode
, false))
3632 return (mode
== HImode
|| mode
== QImode
) ? 2 : 1;
3634 /* The displacement would fit into a 4 byte move insn (SH2A). */
3636 && offset
> 0 && offset
<= sh_max_mov_insn_displacement (mode
, true))
3639 /* The displacement is probably out of range and will require extra
3644 /* 'reg + reg' addressing. Account a slightly higher cost because of
3645 increased pressure on R0. */
3646 if (GET_CODE (x
) == PLUS
&& ! CONSTANT_P (XEXP (x
, 1)))
3649 /* Not sure what it is - probably expensive. */
3653 /* Code to expand a shift. */
3655 gen_ashift (int type
, int n
, rtx reg
)
3659 /* Negative values here come from the shift_amounts array. */
3669 n_rtx
= GEN_INT (n
);
3670 gcc_assert (satisfies_constraint_P27 (n_rtx
));
3675 emit_insn (gen_ashrsi3_k (reg
, reg
, n_rtx
));
3679 emit_insn (gen_shlr (reg
, reg
));
3681 emit_insn (gen_lshrsi3_k (reg
, reg
, n_rtx
));
3684 emit_insn (gen_ashlsi3_k (reg
, reg
, n_rtx
));
3691 /* Code to expand a HImode shift. */
3693 gen_ashift_hi (int type
, int n
, rtx reg
)
3695 /* Negative values here come from the shift_amounts array. */
3709 /* We don't have HImode right shift operations because using the
3710 ordinary 32 bit shift instructions for that doesn't generate proper
3711 zero/sign extension.
3712 gen_ashift_hi is only called in contexts where we know that the
3713 sign extension works out correctly. */
3716 if (GET_CODE (reg
) == SUBREG
)
3718 offset
= SUBREG_BYTE (reg
);
3719 reg
= SUBREG_REG (reg
);
3721 gen_ashift (type
, n
, gen_rtx_SUBREG (SImode
, reg
, offset
));
3725 emit_insn (gen_ashlhi3_k (reg
, reg
, GEN_INT (n
)));
3730 /* Output RTL to split a constant shift into its component SH constant
3731 shift instructions. */
3733 gen_shifty_op (int code
, rtx
*operands
)
3735 int value
= INTVAL (operands
[2]);
3738 /* Truncate the shift count in case it is out of bounds. */
3743 if (code
== LSHIFTRT
)
3745 emit_insn (gen_rotlsi3_1 (operands
[0], operands
[0]));
3746 emit_insn (gen_movt (operands
[0], get_t_reg_rtx ()));
3749 else if (code
== ASHIFT
)
3751 /* There is a two instruction sequence for 31 bit left shifts,
3752 but it requires r0. */
3753 if (REG_P (operands
[0]) && REGNO (operands
[0]) == 0)
3755 emit_insn (gen_andsi3 (operands
[0], operands
[0], const1_rtx
));
3756 emit_insn (gen_rotlsi3_31 (operands
[0], operands
[0]));
3761 else if (value
== 0)
3763 /* This can happen even when optimizing, if there were subregs before
3764 reload. Don't output a nop here, as this is never optimized away;
3765 use a no-op move instead. */
3766 emit_insn (gen_rtx_SET (operands
[0], operands
[0]));
3770 max
= ashl_lshr_seq
[value
].insn_count
;
3771 for (i
= 0; i
< max
; i
++)
3772 gen_ashift (code
, ashl_lshr_seq
[value
].amount
[i
], operands
[0]);
3775 /* Same as gen_shifty_op, but optimized for values where the topmost bits
3778 gen_shifty_hi_op (int code
, rtx
*operands
)
3780 int value
= INTVAL (operands
[2]);
3782 void (*gen_fun
) (int, int, rtx
);
3784 /* This operation is used by and_shl for SImode values with a few
3785 high bits known to be cleared. */
3789 emit_insn (gen_nop ());
3793 gen_fun
= GET_MODE (operands
[0]) == HImode
? gen_ashift_hi
: gen_ashift
;
3796 max
= ext_ashl_lshr_seq
[value
].insn_count
;
3797 for (i
= 0; i
< max
; i
++)
3798 gen_fun (code
, ext_ashl_lshr_seq
[value
].amount
[i
], operands
[0]);
3801 /* When shifting right, emit the shifts in reverse order, so that
3802 solitary negative values come first. */
3803 for (i
= ext_ashl_lshr_seq
[value
].insn_count
- 1; i
>= 0; i
--)
3804 gen_fun (code
, ext_ashl_lshr_seq
[value
].amount
[i
], operands
[0]);
3807 /* Output RTL for an arithmetic right shift.
3808 ??? Rewrite to use super-optimizer sequences. */
3810 expand_ashiftrt (rtx
*operands
)
3816 if (TARGET_DYNSHIFT
)
3818 if (!CONST_INT_P (operands
[2]))
3820 rtx count
= copy_to_mode_reg (SImode
, operands
[2]);
3821 emit_insn (gen_negsi2 (count
, count
));
3822 emit_insn (gen_ashrsi3_d (operands
[0], operands
[1], count
));
3825 else if (ashiftrt_insns
[INTVAL (operands
[2]) & 31]
3826 > 1 + SH_DYNAMIC_SHIFT_COST
)
3829 = force_reg (SImode
, GEN_INT (- (INTVAL (operands
[2]) & 31)));
3830 emit_insn (gen_ashrsi3_d (operands
[0], operands
[1], count
));
3834 if (!CONST_INT_P (operands
[2]))
3837 value
= INTVAL (operands
[2]) & 31;
3841 /* If we are called from abs expansion, arrange things so that we
3842 we can use a single MT instruction that doesn't clobber the source,
3843 if LICM can hoist out the load of the constant zero. */
3844 if (currently_expanding_to_rtl
)
3846 emit_insn (gen_cmpgtsi_t (force_reg (SImode
, CONST0_RTX (SImode
)),
3848 emit_insn (gen_mov_neg_si_t (operands
[0], get_t_reg_rtx ()));
3851 emit_insn (gen_ashrsi2_31 (operands
[0], operands
[1]));
3854 else if (value
>= 16 && value
<= 19)
3856 wrk
= gen_reg_rtx (SImode
);
3857 emit_insn (gen_ashrsi2_16 (wrk
, operands
[1]));
3860 gen_ashift (ASHIFTRT
, 1, wrk
);
3861 emit_move_insn (operands
[0], wrk
);
3864 /* Expand a short sequence inline, longer call a magic routine. */
3865 else if (value
<= 5)
3867 wrk
= gen_reg_rtx (SImode
);
3868 emit_move_insn (wrk
, operands
[1]);
3870 gen_ashift (ASHIFTRT
, 1, wrk
);
3871 emit_move_insn (operands
[0], wrk
);
3875 wrk
= gen_reg_rtx (Pmode
);
3877 /* Load the value into an arg reg and call a helper. */
3878 emit_move_insn (gen_rtx_REG (SImode
, 4), operands
[1]);
3879 sprintf (func
, "__ashiftrt_r4_%d", value
);
3880 rtx lab
= function_symbol (wrk
, func
, SFUNC_STATIC
).lab
;
3881 emit_insn (gen_ashrsi3_n (GEN_INT (value
), wrk
, lab
));
3882 emit_move_insn (operands
[0], gen_rtx_REG (SImode
, 4));
3886 /* Try to find a good way to implement the combiner pattern
3887 [(set (match_operand:SI 0 "register_operand" "r")
3888 (and:SI (ashift:SI (match_operand:SI 1 "register_operand" "r")
3889 (match_operand:SI 2 "const_int_operand" "n"))
3890 (match_operand:SI 3 "const_int_operand" "n"))) .
3891 LEFT_RTX is operand 2 in the above pattern, and MASK_RTX is operand 3.
3892 return 0 for simple right / left or left/right shift combination.
3893 return 1 for a combination of shifts with zero_extend.
3894 return 2 for a combination of shifts with an AND that needs r0.
3895 return 3 for a combination of shifts with an AND that needs an extra
3896 scratch register, when the three highmost bits of the AND mask are clear.
3897 return 4 for a combination of shifts with an AND that needs an extra
3898 scratch register, when any of the three highmost bits of the AND mask
3900 If ATTRP is set, store an initial right shift width in ATTRP[0],
3901 and the instruction length in ATTRP[1] . These values are not valid
3903 When ATTRP is set and returning 1, ATTRP[2] gets set to the index into
3904 shift_amounts for the last shift value that is to be used before the
3907 shl_and_kind (rtx left_rtx
, rtx mask_rtx
, int *attrp
)
3909 unsigned HOST_WIDE_INT mask
, lsb
, mask2
, lsb2
;
3910 int left
= INTVAL (left_rtx
), right
;
3912 int cost
, best_cost
= 10000;
3913 int best_right
= 0, best_len
= 0;
3917 if (left
< 0 || left
> 31)
3919 if (CONST_INT_P (mask_rtx
))
3920 mask
= (unsigned HOST_WIDE_INT
) INTVAL (mask_rtx
) >> left
;
3922 mask
= (unsigned HOST_WIDE_INT
) GET_MODE_MASK (SImode
) >> left
;
3923 /* Can this be expressed as a right shift / left shift pair? */
3924 lsb
= ((mask
^ (mask
- 1)) >> 1) + 1;
3925 right
= exact_log2 (lsb
);
3926 mask2
= ~(mask
+ lsb
- 1);
3927 lsb2
= ((mask2
^ (mask2
- 1)) >> 1) + 1;
3928 /* mask has no zeroes but trailing zeroes <==> ! mask2 */
3930 best_cost
= ashl_lshr_seq
[right
].insn_count
3931 + ashl_lshr_seq
[right
+ left
].insn_count
;
3932 /* mask has no trailing zeroes <==> ! right */
3933 else if (! right
&& mask2
== ~(lsb2
- 1))
3935 int late_right
= exact_log2 (lsb2
);
3936 best_cost
= ashl_lshr_seq
[left
+ late_right
].insn_count
3937 + ashl_lshr_seq
[late_right
].insn_count
;
3939 /* Try to use zero extend. */
3940 if (mask2
== ~(lsb2
- 1))
3944 for (width
= 8; width
<= 16; width
+= 8)
3946 /* Can we zero-extend right away? */
3947 if (lsb2
== (unsigned HOST_WIDE_INT
) 1 << width
)
3949 cost
= 1 + ext_ashl_lshr_seq
[right
].insn_count
3950 + ext_ashl_lshr_seq
[left
+ right
].insn_count
;
3951 if (cost
< best_cost
)
3962 /* ??? Could try to put zero extend into initial right shift,
3963 or even shift a bit left before the right shift. */
3964 /* Determine value of first part of left shift, to get to the
3965 zero extend cut-off point. */
3966 first
= width
- exact_log2 (lsb2
) + right
;
3967 if (first
>= 0 && right
+ left
- first
>= 0)
3969 cost
= ext_ashl_lshr_seq
[right
].insn_count
3970 + ext_ashl_lshr_seq
[first
].insn_count
+ 1
3971 + ext_ashl_lshr_seq
[right
+ left
- first
].insn_count
;
3973 if (cost
< best_cost
)
3985 /* Try to use r0 AND pattern */
3986 for (i
= 0; i
<= 2; i
++)
3990 if (! CONST_OK_FOR_K08 (mask
>> i
))
3992 cost
= (i
!= 0) + 2 + ext_ashl_lshr_seq
[left
+ i
].insn_count
;
3993 if (cost
< best_cost
)
3998 best_len
= cost
- 1;
4001 /* Try to use a scratch register to hold the AND operand. */
4002 can_ext
= ((mask
<< left
) & ((unsigned HOST_WIDE_INT
) 3 << 30)) == 0;
4003 for (i
= 0; i
<= 2; i
++)
4007 cost
= (i
!= 0) + (CONST_OK_FOR_I08 (mask
>> i
) ? 2 : 3)
4010 : ashl_lshr_seq
)[left
+ i
].insn_count
;
4011 if (cost
< best_cost
)
4016 best_len
= cost
- 1 - ! CONST_OK_FOR_I08 (mask
>> i
);
4022 attrp
[0] = best_right
;
4023 attrp
[1] = best_len
;
4028 /* This is used in length attributes of the unnamed instructions
4029 corresponding to shl_and_kind return values of 1 and 2. */
4031 shl_and_length (rtx insn
)
4033 rtx set_src
, left_rtx
, mask_rtx
;
4036 set_src
= SET_SRC (XVECEXP (PATTERN (insn
), 0, 0));
4037 left_rtx
= XEXP (XEXP (set_src
, 0), 1);
4038 mask_rtx
= XEXP (set_src
, 1);
4039 shl_and_kind (left_rtx
, mask_rtx
, attributes
);
4040 return attributes
[1];
4043 /* This is used in length attribute of the and_shl_scratch instruction. */
4045 shl_and_scr_length (rtx insn
)
4047 rtx set_src
= SET_SRC (XVECEXP (PATTERN (insn
), 0, 0));
4048 int len
= ashl_lshr_seq
[INTVAL (XEXP (set_src
, 1)) & 31].insn_count
;
4049 rtx op
= XEXP (set_src
, 0);
4050 len
+= ashl_lshr_seq
[INTVAL (XEXP (op
, 1)) & 31].insn_count
+ 1;
4051 op
= XEXP (XEXP (op
, 0), 0);
4052 return len
+ ashl_lshr_seq
[INTVAL (XEXP (op
, 1)) & 31].insn_count
;
4055 /* Generate rtl for instructions for which shl_and_kind advised a particular
4056 method of generating them, i.e. returned zero. */
4058 gen_shl_and (rtx dest
, rtx left_rtx
, rtx mask_rtx
, rtx source
)
4061 unsigned HOST_WIDE_INT mask
;
4062 int kind
= shl_and_kind (left_rtx
, mask_rtx
, attributes
);
4063 int right
, total_shift
;
4064 void (*shift_gen_fun
) (int, rtx
*) = gen_shifty_hi_op
;
4066 right
= attributes
[0];
4067 total_shift
= INTVAL (left_rtx
) + right
;
4068 mask
= (unsigned HOST_WIDE_INT
) INTVAL (mask_rtx
) >> total_shift
;
4075 int first
= attributes
[2];
4080 emit_insn ((mask
<< right
) <= 0xff
4081 ? gen_zero_extendqisi2 (dest
,
4082 gen_lowpart (QImode
, source
))
4083 : gen_zero_extendhisi2 (dest
,
4084 gen_lowpart (HImode
, source
)));
4088 emit_insn (gen_movsi (dest
, source
));
4092 operands
[2] = GEN_INT (right
);
4093 gen_shifty_hi_op (LSHIFTRT
, operands
);
4097 operands
[2] = GEN_INT (first
);
4098 gen_shifty_hi_op (ASHIFT
, operands
);
4099 total_shift
-= first
;
4103 emit_insn (mask
<= 0xff
4104 ? gen_zero_extendqisi2 (dest
, gen_lowpart (QImode
, dest
))
4105 : gen_zero_extendhisi2 (dest
, gen_lowpart (HImode
, dest
)));
4106 if (total_shift
> 0)
4108 operands
[2] = GEN_INT (total_shift
);
4109 gen_shifty_hi_op (ASHIFT
, operands
);
4114 shift_gen_fun
= gen_shifty_op
;
4117 /* If the topmost bit that matters is set, set the topmost bits
4118 that don't matter. This way, we might be able to get a shorter
4120 if (mask
& ((HOST_WIDE_INT
) 1 << (31 - total_shift
)))
4121 mask
|= (HOST_WIDE_INT
) ((HOST_WIDE_INT_M1U
) << (31 - total_shift
));
4124 /* Don't expand fine-grained when combining, because that will
4125 make the pattern fail. */
4126 if (currently_expanding_to_rtl
4127 || reload_in_progress
|| reload_completed
)
4131 /* Cases 3 and 4 should be handled by this split
4132 only while combining */
4133 gcc_assert (kind
<= 2);
4136 emit_insn (gen_lshrsi3 (dest
, source
, GEN_INT (right
)));
4139 emit_insn (gen_andsi3 (dest
, source
, GEN_INT (mask
)));
4144 operands
[2] = GEN_INT (total_shift
);
4145 shift_gen_fun (ASHIFT
, operands
);
4152 if (kind
!= 4 && total_shift
< 16)
4154 neg
= -ext_ashl_lshr_seq
[total_shift
].amount
[1];
4156 neg
-= ext_ashl_lshr_seq
[total_shift
].amount
[2];
4160 emit_insn (gen_and_shl_scratch (dest
, source
,
4163 GEN_INT (total_shift
+ neg
),
4165 emit_insn (gen_movsi (dest
, dest
));
4172 /* Try to find a good way to implement the combiner pattern
4173 [(set (match_operand:SI 0 "register_operand" "=r")
4174 (sign_extract:SI (ashift:SI (match_operand:SI 1 "register_operand" "r")
4175 (match_operand:SI 2 "const_int_operand" "n")
4176 (match_operand:SI 3 "const_int_operand" "n")
4178 (clobber (reg:SI T_REG))]
4179 LEFT_RTX is operand 2 in the above pattern, and SIZE_RTX is operand 3.
4180 return 0 for simple left / right shift combination.
4181 return 1 for left shift / 8 bit sign extend / left shift.
4182 return 2 for left shift / 16 bit sign extend / left shift.
4183 return 3 for left shift / 8 bit sign extend / shift / sign extend.
4184 return 4 for left shift / 16 bit sign extend / shift / sign extend.
4185 return 5 for left shift / 16 bit sign extend / right shift
4186 return 6 for < 8 bit sign extend / left shift.
4187 return 7 for < 8 bit sign extend / left shift / single right shift.
4188 If COSTP is nonzero, assign the calculated cost to *COSTP. */
4190 shl_sext_kind (rtx left_rtx
, rtx size_rtx
, int *costp
)
4192 int left
, size
, insize
, ext
;
4193 int cost
= 0, best_cost
;
4196 left
= INTVAL (left_rtx
);
4197 size
= INTVAL (size_rtx
);
4198 insize
= size
- left
;
4199 gcc_assert (insize
> 0);
4200 /* Default to left / right shift. */
4202 best_cost
= ashl_lshr_seq
[32 - insize
].insn_count
4203 + ashl_lshr_seq
[32 - size
].insn_count
;
4206 /* 16 bit shift / sign extend / 16 bit shift */
4207 cost
= ashl_lshr_seq
[16 - insize
].insn_count
+ 1
4208 + ashl_lshr_seq
[16 - size
].insn_count
;
4209 /* If ashiftrt_insns[16 - size] is 8, this choice will be overridden
4210 below, by alternative 3 or something even better. */
4211 if (cost
< best_cost
)
4217 /* Try a plain sign extend between two shifts. */
4218 for (ext
= 16; ext
>= insize
; ext
-= 8)
4222 cost
= ext_ashl_lshr_seq
[ext
- insize
].insn_count
+ 1
4223 + ashl_lshr_seq
[size
- ext
].insn_count
;
4224 if (cost
< best_cost
)
4226 kind
= ext
/ (unsigned) 8;
4230 /* Check if we can do a sloppy shift with a final signed shift
4231 restoring the sign. */
4232 if (EXT_SHIFT_SIGNED (size
- ext
))
4233 cost
= ext_ashl_lshr_seq
[ext
- insize
].insn_count
4234 + ext_ashl_lshr_seq
[size
- ext
].insn_count
+ 1;
4235 /* If not, maybe it's still cheaper to do the second shift sloppy,
4236 and do a final sign extend? */
4237 else if (size
<= 16)
4238 cost
= ext_ashl_lshr_seq
[ext
- insize
].insn_count
+ 1
4239 + ext_ashl_lshr_seq
[size
> ext
? size
- ext
: ext
- size
].insn_count
4243 if (cost
< best_cost
)
4245 kind
= ext
/ (unsigned) 8 + 2;
4249 /* Check if we can sign extend in r0 */
4252 cost
= 3 + ashl_lshr_seq
[left
].insn_count
;
4253 if (cost
< best_cost
)
4258 /* Try the same with a final signed shift. */
4261 cost
= 3 + ext_ashl_lshr_seq
[left
+ 1].insn_count
+ 1;
4262 if (cost
< best_cost
)
4269 if (TARGET_DYNSHIFT
)
4271 /* Try to use a dynamic shift. */
4272 cost
= ashl_lshr_seq
[32 - insize
].insn_count
+ 1 + SH_DYNAMIC_SHIFT_COST
;
4273 if (cost
< best_cost
)
4284 /* Function to be used in the length attribute of the instructions
4285 implementing this pattern. */
4287 shl_sext_length (rtx insn
)
4289 rtx set_src
, left_rtx
, size_rtx
;
4292 set_src
= SET_SRC (XVECEXP (PATTERN (insn
), 0, 0));
4293 left_rtx
= XEXP (XEXP (set_src
, 0), 1);
4294 size_rtx
= XEXP (set_src
, 1);
4295 shl_sext_kind (left_rtx
, size_rtx
, &cost
);
4299 /* Generate rtl for this pattern */
4301 gen_shl_sext (rtx dest
, rtx left_rtx
, rtx size_rtx
, rtx source
)
4304 int left
, size
, insize
, cost
;
4307 kind
= shl_sext_kind (left_rtx
, size_rtx
, &cost
);
4308 left
= INTVAL (left_rtx
);
4309 size
= INTVAL (size_rtx
);
4310 insize
= size
- left
;
4318 int ext
= kind
& 1 ? 8 : 16;
4319 int shift2
= size
- ext
;
4321 /* Don't expand fine-grained when combining, because that will
4322 make the pattern fail. */
4323 if (! currently_expanding_to_rtl
4324 && ! reload_in_progress
&& ! reload_completed
)
4326 emit_insn (gen_shl_sext_ext (dest
, source
, left_rtx
, size_rtx
));
4327 emit_insn (gen_movsi (dest
, source
));
4331 emit_insn (gen_movsi (dest
, source
));
4335 operands
[2] = GEN_INT (ext
- insize
);
4336 gen_shifty_hi_op (ASHIFT
, operands
);
4339 ? gen_extendqisi2 (dest
, gen_lowpart (QImode
, dest
))
4340 : gen_extendhisi2 (dest
, gen_lowpart (HImode
, dest
)));
4345 operands
[2] = GEN_INT (shift2
);
4346 gen_shifty_op (ASHIFT
, operands
);
4353 if (EXT_SHIFT_SIGNED (shift2
))
4355 operands
[2] = GEN_INT (shift2
+ 1);
4356 gen_shifty_op (ASHIFT
, operands
);
4357 operands
[2] = const1_rtx
;
4358 gen_shifty_op (ASHIFTRT
, operands
);
4361 operands
[2] = GEN_INT (shift2
);
4362 gen_shifty_hi_op (ASHIFT
, operands
);
4366 operands
[2] = GEN_INT (-shift2
);
4367 gen_shifty_hi_op (LSHIFTRT
, operands
);
4369 emit_insn (size
<= 8
4370 ? gen_extendqisi2 (dest
, gen_lowpart (QImode
, dest
))
4371 : gen_extendhisi2 (dest
, gen_lowpart (HImode
, dest
)));
4378 if (! currently_expanding_to_rtl
4379 && ! reload_in_progress
&& ! reload_completed
)
4380 emit_insn (gen_shl_sext_ext (dest
, source
, left_rtx
, size_rtx
));
4384 operands
[2] = GEN_INT (16 - insize
);
4385 gen_shifty_hi_op (ASHIFT
, operands
);
4386 emit_insn (gen_extendhisi2 (dest
, gen_lowpart (HImode
, dest
)));
4388 /* Don't use gen_ashrsi3 because it generates new pseudos. */
4390 gen_ashift (ASHIFTRT
, 1, dest
);
4395 /* Don't expand fine-grained when combining, because that will
4396 make the pattern fail. */
4397 if (! currently_expanding_to_rtl
4398 && ! reload_in_progress
&& ! reload_completed
)
4400 emit_insn (gen_shl_sext_ext (dest
, source
, left_rtx
, size_rtx
));
4401 emit_insn (gen_movsi (dest
, source
));
4404 emit_insn (gen_andsi3 (dest
, source
, GEN_INT ((1 << insize
) - 1)));
4405 emit_insn (gen_xorsi3 (dest
, dest
, GEN_INT (1 << (insize
- 1))));
4406 emit_insn (gen_addsi3 (dest
, dest
, GEN_INT (HOST_WIDE_INT_M1U
<< (insize
- 1))));
4408 operands
[2] = kind
== 7 ? GEN_INT (left
+ 1) : left_rtx
;
4409 gen_shifty_op (ASHIFT
, operands
);
4411 emit_insn (gen_ashrsi3_k (dest
, dest
, const1_rtx
));
4419 typedef struct label_ref_list_d
4421 rtx_code_label
*label
;
4422 struct label_ref_list_d
*next
;
4423 } *label_ref_list_t
;
4425 static object_allocator
<label_ref_list_d
> label_ref_list_d_pool
4426 ("label references list");
4428 /* The SH cannot load a large constant into a register, constants have to
4429 come from a pc relative load. The reference of a pc relative load
4430 instruction must be less than 1k in front of the instruction. This
4431 means that we often have to dump a constant inside a function, and
4432 generate code to branch around it.
4434 It is important to minimize this, since the branches will slow things
4435 down and make things bigger.
4437 Worst case code looks like:
4455 We fix this by performing a scan before scheduling, which notices which
4456 instructions need to have their operands fetched from the constant table
4457 and builds the table.
4461 scan, find an instruction which needs a pcrel move. Look forward, find the
4462 last barrier which is within MAX_COUNT bytes of the requirement.
4463 If there isn't one, make one. Process all the instructions between
4464 the find and the barrier.
4466 In the above example, we can tell that L3 is within 1k of L1, so
4467 the first move can be shrunk from the 3 insn+constant sequence into
4468 just 1 insn, and the constant moved to L3 to make:
4479 Then the second move becomes the target for the shortening process. */
4483 rtx value
; /* Value in table. */
4484 rtx_code_label
*label
; /* Label of value. */
4485 label_ref_list_t wend
; /* End of window. */
4486 machine_mode mode
; /* Mode of value. */
4488 /* True if this constant is accessed as part of a post-increment
4489 sequence. Note that HImode constants are never accessed in this way. */
4490 bool part_of_sequence_p
;
4493 /* The maximum number of constants that can fit into one pool, since
4494 constants in the range 0..510 are at least 2 bytes long, and in the
4495 range from there to 1018 at least 4 bytes. */
4497 #define MAX_POOL_SIZE 372
4498 static pool_node pool_vector
[MAX_POOL_SIZE
];
4499 static int pool_size
;
4500 static rtx_code_label
*pool_window_label
;
4501 static int pool_window_last
;
4503 static int max_labelno_before_reorg
;
4505 /* ??? If we need a constant in HImode which is the truncated value of a
4506 constant we need in SImode, we could combine the two entries thus saving
4507 two bytes. Is this common enough to be worth the effort of implementing
4510 /* ??? This stuff should be done at the same time that we shorten branches.
4511 As it is now, we must assume that all branches are the maximum size, and
4512 this causes us to almost always output constant pools sooner than
4515 /* Add a constant to the pool and return its label. */
4516 static rtx_code_label
*
4517 add_constant (rtx x
, machine_mode mode
, rtx last_value
)
4519 rtx_code_label
*lab
, *new_rtx
;
4520 label_ref_list_t ref
, newref
;
4522 /* First see if we've already got it. */
4523 for (int i
= 0; i
< pool_size
; i
++)
4525 if (x
->code
== pool_vector
[i
].value
->code
4526 && mode
== pool_vector
[i
].mode
)
4528 if (x
->code
== CODE_LABEL
)
4530 if (XINT (x
, 3) != XINT (pool_vector
[i
].value
, 3))
4533 if (rtx_equal_p (x
, pool_vector
[i
].value
))
4538 || ! rtx_equal_p (last_value
, pool_vector
[i
-1].value
))
4540 new_rtx
= gen_label_rtx ();
4541 LABEL_REFS (new_rtx
) = pool_vector
[i
].label
;
4542 pool_vector
[i
].label
= lab
= new_rtx
;
4544 if (lab
&& pool_window_label
)
4546 newref
= label_ref_list_d_pool
.allocate ();
4547 newref
->label
= pool_window_label
;
4548 ref
= pool_vector
[pool_window_last
].wend
;
4550 pool_vector
[pool_window_last
].wend
= newref
;
4553 pool_window_label
= new_rtx
;
4554 pool_window_last
= i
;
4560 /* Need a new one. */
4561 pool_vector
[pool_size
].value
= x
;
4562 if (last_value
&& rtx_equal_p (last_value
, pool_vector
[pool_size
- 1].value
))
4565 pool_vector
[pool_size
- 1].part_of_sequence_p
= true;
4568 lab
= gen_label_rtx ();
4569 pool_vector
[pool_size
].mode
= mode
;
4570 pool_vector
[pool_size
].label
= lab
;
4571 pool_vector
[pool_size
].wend
= NULL
;
4572 pool_vector
[pool_size
].part_of_sequence_p
= (lab
== 0);
4573 if (lab
&& pool_window_label
)
4575 newref
= label_ref_list_d_pool
.allocate ();
4576 newref
->label
= pool_window_label
;
4577 ref
= pool_vector
[pool_window_last
].wend
;
4579 pool_vector
[pool_window_last
].wend
= newref
;
4582 pool_window_label
= lab
;
4583 pool_window_last
= pool_size
;
4588 /* Output the literal table. START, if nonzero, is the first instruction
4589 this table is needed for, and also indicates that there is at least one
4590 casesi_worker_2 instruction; We have to emit the operand3 labels from
4591 these insns at a 4-byte aligned position. BARRIER is the barrier
4592 after which we are to place the table. */
4594 dump_table (rtx_insn
*start
, rtx_insn
*barrier
)
4596 rtx_insn
*scan
= barrier
;
4597 bool need_align
= true;
4598 rtx_code_label
*lab
;
4599 label_ref_list_t ref
;
4600 bool have_df
= false;
4602 /* Do two passes, first time dump out the HI sized constants. */
4604 for (int i
= 0; i
< pool_size
; i
++)
4606 pool_node
*p
= &pool_vector
[i
];
4608 if (p
->mode
== HImode
)
4612 scan
= emit_insn_after (gen_align_2 (), scan
);
4615 for (lab
= p
->label
; lab
;
4616 lab
= safe_as_a
<rtx_code_label
*> (LABEL_REFS (lab
)))
4617 scan
= emit_label_after (lab
, scan
);
4618 scan
= emit_insn_after (gen_consttable_2 (p
->value
, const0_rtx
),
4620 for (ref
= p
->wend
; ref
; ref
= ref
->next
)
4623 scan
= emit_insn_after (gen_consttable_window_end (lab
), scan
);
4626 else if (p
->mode
== DFmode
)
4634 scan
= emit_insn_after (gen_align_4 (), scan
);
4636 for (; start
!= barrier
; start
= NEXT_INSN (start
))
4637 if (NONJUMP_INSN_P (start
)
4638 && recog_memoized (start
) == CODE_FOR_casesi_worker_2
)
4640 rtx src
= SET_SRC (XVECEXP (PATTERN (start
), 0, 0));
4641 rtx lab
= XEXP (XVECEXP (src
, 0, 3), 0);
4643 scan
= emit_label_after (as_a
<rtx_insn
*> (lab
), scan
);
4646 if (TARGET_FMOVD
&& TARGET_ALIGN_DOUBLE
&& have_df
)
4648 rtx_insn
*align_insn
= NULL
;
4650 scan
= emit_label_after (gen_label_rtx (), scan
);
4651 scan
= emit_insn_after (gen_align_log (GEN_INT (3)), scan
);
4654 for (int i
= 0; i
< pool_size
; i
++)
4656 pool_node
*p
= &pool_vector
[i
];
4664 if (align_insn
&& !p
->part_of_sequence_p
)
4666 for (lab
= p
->label
; lab
;
4667 lab
= safe_as_a
<rtx_code_label
*> (LABEL_REFS (lab
)))
4668 emit_label_before (lab
, align_insn
);
4669 emit_insn_before (gen_consttable_4 (p
->value
, const0_rtx
),
4671 for (ref
= p
->wend
; ref
; ref
= ref
->next
)
4674 emit_insn_before (gen_consttable_window_end (lab
),
4677 delete_insn (align_insn
);
4683 for (lab
= p
->label
; lab
;
4684 lab
= safe_as_a
<rtx_code_label
*> (LABEL_REFS (lab
)))
4685 scan
= emit_label_after (lab
, scan
);
4686 scan
= emit_insn_after (gen_consttable_4 (p
->value
,
4688 need_align
= ! need_align
;
4694 scan
= emit_insn_after (gen_align_log (GEN_INT (3)), scan
);
4700 for (lab
= p
->label
; lab
;
4701 lab
= safe_as_a
<rtx_code_label
*> (LABEL_REFS (lab
)))
4702 scan
= emit_label_after (lab
, scan
);
4703 scan
= emit_insn_after (gen_consttable_8 (p
->value
, const0_rtx
),
4710 if (p
->mode
!= HImode
)
4712 for (ref
= p
->wend
; ref
; ref
= ref
->next
)
4715 scan
= emit_insn_after (gen_consttable_window_end (lab
),
4724 for (int i
= 0; i
< pool_size
; i
++)
4726 pool_node
*p
= &pool_vector
[i
];
4737 scan
= emit_label_after (gen_label_rtx (), scan
);
4738 scan
= emit_insn_after (gen_align_4 (), scan
);
4740 for (lab
= p
->label
; lab
;
4741 lab
= safe_as_a
<rtx_code_label
*> (LABEL_REFS (lab
)))
4742 scan
= emit_label_after (lab
, scan
);
4743 scan
= emit_insn_after (gen_consttable_4 (p
->value
, const0_rtx
),
4751 scan
= emit_label_after (gen_label_rtx (), scan
);
4752 scan
= emit_insn_after (gen_align_4 (), scan
);
4754 for (lab
= p
->label
; lab
;
4755 lab
= safe_as_a
<rtx_code_label
*> (LABEL_REFS (lab
)))
4756 scan
= emit_label_after (lab
, scan
);
4757 scan
= emit_insn_after (gen_consttable_8 (p
->value
, const0_rtx
),
4764 if (p
->mode
!= HImode
)
4766 for (ref
= p
->wend
; ref
; ref
= ref
->next
)
4769 scan
= emit_insn_after (gen_consttable_window_end (lab
), scan
);
4774 scan
= emit_insn_after (gen_consttable_end (), scan
);
4775 scan
= emit_barrier_after (scan
);
4777 pool_window_label
= NULL
;
4778 pool_window_last
= 0;
4781 #define MOVA_LABELREF(mova) XVECEXP (SET_SRC (PATTERN (mova)), 0, 0)
4783 /* Nonzero if the insn is a move instruction which needs to be fixed. */
4785 /* ??? For a DImode/DFmode moves, we don't need to fix it if each half of the
4786 CONST_DOUBLE input value is CONST_OK_FOR_I08. For a SFmode move, we don't
4787 need to fix it if the input value is CONST_OK_FOR_I08. */
4789 broken_move (rtx_insn
*insn
)
4791 if (NONJUMP_INSN_P (insn
))
4793 rtx pat
= PATTERN (insn
);
4794 if (GET_CODE (pat
) == PARALLEL
)
4795 pat
= XVECEXP (pat
, 0, 0);
4796 if (GET_CODE (pat
) == SET
4797 /* We can load any 8-bit value if we don't care what the high
4798 order bits end up as. */
4799 && GET_MODE (SET_DEST (pat
)) != QImode
4800 && (CONSTANT_P (SET_SRC (pat
))
4801 || (GET_CODE (SET_SRC (pat
)) == UNSPEC_VOLATILE
4802 && XINT (SET_SRC (pat
), 1) == UNSPECV_SP_SWITCH_B
)
4803 /* Match mova_const. */
4804 || (GET_CODE (SET_SRC (pat
)) == UNSPEC
4805 && XINT (SET_SRC (pat
), 1) == UNSPEC_MOVA
4806 && GET_CODE (XVECEXP (SET_SRC (pat
), 0, 0)) == CONST
))
4808 && GET_CODE (SET_SRC (pat
)) == CONST_DOUBLE
4809 && (fp_zero_operand (SET_SRC (pat
))
4810 || fp_one_operand (SET_SRC (pat
)))
4811 /* In general we don't know the current setting of fpscr, so
4813 There is an exception if this was a register-register move
4814 before reload - and hence it was ascertained that we have
4815 single precision setting - and in a post-reload optimization
4816 we changed this to do a constant load. In that case
4817 we don't have an r0 clobber, hence we must use fldi. */
4819 || (GET_CODE (XEXP (XVECEXP (PATTERN (insn
), 0, 2), 0))
4821 && REG_P (SET_DEST (pat
))
4822 && FP_REGISTER_P (REGNO (SET_DEST (pat
))))
4824 && GET_MODE (SET_DEST (pat
)) == SImode
4825 && (satisfies_constraint_I20 (SET_SRC (pat
))
4826 || satisfies_constraint_I28 (SET_SRC (pat
))))
4827 && ! satisfies_constraint_I08 (SET_SRC (pat
)))
4834 /* Return true if the specified insn is a mova insn. */
4836 mova_p (rtx_insn
*insn
)
4838 return (NONJUMP_INSN_P (insn
)
4839 && GET_CODE (PATTERN (insn
)) == SET
4840 && GET_CODE (SET_SRC (PATTERN (insn
))) == UNSPEC
4841 && XINT (SET_SRC (PATTERN (insn
)), 1) == UNSPEC_MOVA
4842 /* Don't match mova_const. */
4843 && GET_CODE (MOVA_LABELREF (insn
)) == LABEL_REF
);
4846 /* Fix up a mova from a switch that went out of range. */
4848 fixup_mova (rtx_insn
*mova
)
4850 PUT_MODE (XEXP (MOVA_LABELREF (mova
), 0), QImode
);
4853 SET_SRC (PATTERN (mova
)) = MOVA_LABELREF (mova
);
4854 INSN_CODE (mova
) = -1;
4858 rtx_insn
*worker
= mova
;
4859 rtx_code_label
*lab
= gen_label_rtx ();
4860 rtx wpat
, wpat0
, wpat1
, wsrc
, target
, base
, diff
;
4864 worker
= NEXT_INSN (worker
);
4866 && !LABEL_P (worker
)
4867 && !JUMP_P (worker
));
4868 } while (NOTE_P (worker
)
4869 || recog_memoized (worker
) != CODE_FOR_casesi_worker_1
);
4870 wpat
= PATTERN (worker
);
4871 wpat0
= XVECEXP (wpat
, 0, 0);
4872 wpat1
= XVECEXP (wpat
, 0, 1);
4873 wsrc
= SET_SRC (wpat0
);
4874 PATTERN (worker
) = (gen_casesi_worker_2
4875 (SET_DEST (wpat0
), XVECEXP (wsrc
, 0, 1),
4876 XEXP (XVECEXP (wsrc
, 0, 2), 0), lab
,
4878 INSN_CODE (worker
) = -1;
4879 target
= XVECEXP (SET_SRC (PATTERN (mova
)), 0, 0);
4880 base
= gen_rtx_LABEL_REF (Pmode
, lab
);
4881 diff
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, target
, base
), UNSPEC_SYMOFF
);
4882 SET_SRC (PATTERN (mova
)) = gen_rtx_CONST (Pmode
, diff
);
4883 INSN_CODE (mova
) = -1;
4887 /* NEW_MOVA is a mova we've just encountered while scanning forward. Update
4888 *num_mova, and check if the new mova is not nested within the first one.
4889 return 0 if *first_mova was replaced, 1 if new_mova was replaced,
4890 2 if new_mova has been assigned to *first_mova, -1 otherwise.. */
4892 untangle_mova (int *num_mova
, rtx_insn
**first_mova
, rtx_insn
*new_mova
)
4894 int n_addr
= 0; /* Initialization to shut up spurious warning. */
4895 int f_target
, n_target
= 0; /* Likewise. */
4899 /* If NEW_MOVA has no address yet, it will be handled later. */
4900 if (INSN_ADDRESSES_SIZE() <= (unsigned) INSN_UID (new_mova
))
4903 n_addr
= INSN_ADDRESSES (INSN_UID (new_mova
));
4904 n_target
= INSN_ADDRESSES (INSN_UID (XEXP (MOVA_LABELREF (new_mova
), 0)));
4905 if (n_addr
> n_target
|| n_addr
+ 1022 < n_target
)
4907 /* Change the mova into a load.
4908 broken_move will then return true for it. */
4909 fixup_mova (new_mova
);
4915 *first_mova
= new_mova
;
4920 = INSN_ADDRESSES (INSN_UID (XEXP (MOVA_LABELREF (*first_mova
), 0))))
4925 if (f_target
- INSN_ADDRESSES (INSN_UID (*first_mova
))
4926 > n_target
- n_addr
)
4928 fixup_mova (*first_mova
);
4933 fixup_mova (new_mova
);
4938 /* Find the last barrier from insn FROM which is close enough to hold the
4939 constant pool. If we can't find one, then create one near the end of
4942 find_barrier (int num_mova
, rtx_insn
*mova
, rtx_insn
*from
)
4950 int leading_mova
= num_mova
;
4951 rtx_insn
*barrier_before_mova
= NULL
;
4952 rtx_insn
*found_barrier
= NULL
;
4953 rtx_insn
*good_barrier
= NULL
;
4956 rtx_insn
*orig
= from
;
4957 rtx_insn
*last_got
= NULL
;
4958 rtx_insn
*last_symoff
= NULL
;
4960 /* For HImode: range is 510, add 4 because pc counts from address of
4961 second instruction after this one, subtract 2 for the jump instruction
4962 that we may need to emit before the table, subtract 2 for the instruction
4963 that fills the jump delay slot (in very rare cases, reorg will take an
4964 instruction from after the constant pool or will leave the delay slot
4965 empty). This gives 510.
4966 For SImode: range is 1020, add 4 because pc counts from address of
4967 second instruction after this one, subtract 2 in case pc is 2 byte
4968 aligned, subtract 2 for the jump instruction that we may need to emit
4969 before the table, subtract 2 for the instruction that fills the jump
4970 delay slot. This gives 1018. */
4972 /* The branch will always be shortened now that the reference address for
4973 forward branches is the successor address, thus we need no longer make
4974 adjustments to the [sh]i_limit for -O0. */
4979 while (from
&& count_si
< si_limit
&& count_hi
< hi_limit
)
4981 int inc
= get_attr_length (from
);
4984 /* If this is a label that existed at the time of the compute_alignments
4985 call, determine the alignment. N.B. When find_barrier recurses for
4986 an out-of-reach mova, we might see labels at the start of previously
4987 inserted constant tables. */
4989 && CODE_LABEL_NUMBER (from
) <= max_labelno_before_reorg
)
4992 new_align
= 1 << label_to_alignment (from
).levels
[0].log
;
4993 else if (BARRIER_P (prev_nonnote_insn (from
)))
4994 new_align
= 1 << barrier_align (from
);
4999 /* In case we are scanning a constant table because of recursion, check
5000 for explicit alignments. If the table is long, we might be forced
5001 to emit the new table in front of it; the length of the alignment
5002 might be the last straw. */
5003 else if (NONJUMP_INSN_P (from
)
5004 && GET_CODE (PATTERN (from
)) == UNSPEC_VOLATILE
5005 && XINT (PATTERN (from
), 1) == UNSPECV_ALIGN
)
5006 new_align
= INTVAL (XVECEXP (PATTERN (from
), 0, 0));
5007 /* When we find the end of a constant table, paste the new constant
5008 at the end. That is better than putting it in front because
5009 this way, we don't need extra alignment for adding a 4-byte-aligned
5010 mov(a) label to a 2/4 or 8/4 byte aligned table. */
5011 else if (NONJUMP_INSN_P (from
)
5012 && GET_CODE (PATTERN (from
)) == UNSPEC_VOLATILE
5013 && XINT (PATTERN (from
), 1) == UNSPECV_CONST_END
)
5016 if (BARRIER_P (from
))
5020 found_barrier
= from
;
5022 /* If we are at the end of the function, or in front of an alignment
5023 instruction, we need not insert an extra alignment. We prefer
5024 this kind of barrier. */
5025 if (barrier_align (from
) > 2)
5026 good_barrier
= from
;
5028 /* If we are at the end of a hot/cold block, dump the constants
5030 next
= NEXT_INSN (from
);
5033 && NOTE_KIND (next
) == NOTE_INSN_SWITCH_TEXT_SECTIONS
)
5037 if (broken_move (from
))
5042 pat
= PATTERN (from
);
5043 if (GET_CODE (pat
) == PARALLEL
)
5044 pat
= XVECEXP (pat
, 0, 0);
5045 src
= SET_SRC (pat
);
5046 dst
= SET_DEST (pat
);
5047 mode
= GET_MODE (dst
);
5049 /* GOT pcrelat setting comes in pair of
5052 instructions. (plus add r0,r12).
5053 Remember if we see one without the other. */
5054 if (GET_CODE (src
) == UNSPEC
&& PIC_ADDR_P (XVECEXP (src
, 0, 0)))
5055 last_got
= last_got
? NULL
: from
;
5056 else if (PIC_ADDR_P (src
))
5057 last_got
= last_got
? NULL
: from
;
5059 /* We must explicitly check the mode, because sometimes the
5060 front end will generate code to load unsigned constants into
5061 HImode targets without properly sign extending them. */
5063 || (mode
== SImode
&& satisfies_constraint_I16 (src
)
5064 && REGNO (dst
) != FPUL_REG
))
5067 /* We put the short constants before the long constants, so
5068 we must count the length of short constants in the range
5069 for the long constants. */
5070 /* ??? This isn't optimal, but is easy to do. */
5075 /* We dump DF/DI constants before SF/SI ones, because
5076 the limit is the same, but the alignment requirements
5077 are higher. We may waste up to 4 additional bytes
5078 for alignment, and the DF/DI constant may have
5079 another SF/SI constant placed before it. */
5080 while (si_align
> 2 && found_si
+ si_align
- 2 > count_si
)
5082 if (found_si
> count_si
)
5083 count_si
= found_si
;
5084 found_si
+= GET_MODE_SIZE (mode
);
5086 si_limit
-= GET_MODE_SIZE (mode
);
5092 switch (untangle_mova (&num_mova
, &mova
, from
))
5097 rtx src
= SET_SRC (PATTERN (from
));
5098 if (GET_CODE (src
) == CONST
5099 && GET_CODE (XEXP (src
, 0)) == UNSPEC
5100 && XINT (XEXP (src
, 0), 1) == UNSPEC_SYMOFF
)
5104 case 0: return find_barrier (0, 0, mova
);
5109 = good_barrier
? good_barrier
: found_barrier
;
5113 if (found_si
> count_si
)
5114 count_si
= found_si
;
5116 else if (JUMP_TABLE_DATA_P (from
)
5117 && GET_CODE (PATTERN (from
)) == ADDR_DIFF_VEC
)
5119 if ((num_mova
> 1 && GET_MODE (prev_nonnote_insn (from
)) == VOIDmode
)
5121 && (prev_nonnote_insn (from
)
5122 == XEXP (MOVA_LABELREF (mova
), 0))))
5124 if (barrier_align (next_real_insn (from
)) == align_jumps
.levels
[0].log
)
5126 /* We have just passed the barrier in front of the
5127 ADDR_DIFF_VEC, which is stored in found_barrier. Since
5128 the ADDR_DIFF_VEC is accessed as data, just like our pool
5129 constants, this is a good opportunity to accommodate what
5130 we have gathered so far.
5131 If we waited any longer, we could end up at a barrier in
5132 front of code, which gives worse cache usage for separated
5133 instruction / data caches. */
5134 good_barrier
= found_barrier
;
5139 rtx body
= PATTERN (from
);
5140 inc
= XVECLEN (body
, 1) * GET_MODE_SIZE (GET_MODE (body
));
5143 /* For the SH1, we generate alignments even after jumps-around-jumps. */
5144 else if (JUMP_P (from
)
5149 /* There is a possibility that a bf is transformed into a bf/s by the
5150 delay slot scheduler. */
5152 && get_attr_type (from
) == TYPE_CBRANCH
5153 && ! sequence_insn_p (from
))
5159 if (new_align
> si_align
)
5161 si_limit
-= (count_si
- 1) & (new_align
- si_align
);
5162 si_align
= new_align
;
5164 count_si
= (count_si
+ new_align
- 1) & -new_align
;
5169 if (new_align
> hi_align
)
5171 hi_limit
-= (count_hi
- 1) & (new_align
- hi_align
);
5172 hi_align
= new_align
;
5174 count_hi
= (count_hi
+ new_align
- 1) & -new_align
;
5176 from
= NEXT_INSN (from
);
5183 /* Try as we might, the leading mova is out of range. Change
5184 it into a load (which will become a pcload) and retry. */
5186 return find_barrier (0, 0, mova
);
5190 /* Insert the constant pool table before the mova instruction,
5191 to prevent the mova label reference from going out of range. */
5193 good_barrier
= found_barrier
= barrier_before_mova
;
5199 if (good_barrier
&& next_real_insn (found_barrier
))
5200 found_barrier
= good_barrier
;
5204 /* We didn't find a barrier in time to dump our stuff,
5205 so we'll make one. */
5206 rtx_code_label
*label
= gen_label_rtx ();
5208 /* Don't emit a constant table in the middle of insns for
5209 casesi_worker_2. This is a bit overkill but is enough
5210 because casesi_worker_2 wouldn't appear so frequently. */
5214 /* If we exceeded the range, then we must back up over the last
5215 instruction we looked at. Otherwise, we just need to undo the
5216 NEXT_INSN at the end of the loop. */
5217 if (PREV_INSN (from
) != orig
5218 && (count_hi
> hi_limit
|| count_si
> si_limit
))
5219 from
= PREV_INSN (PREV_INSN (from
));
5221 from
= PREV_INSN (from
);
5223 /* Don't emit a constant table int the middle of global pointer setting,
5224 since that that would move the addressing base GOT into another table.
5225 We need the first mov instruction before the _GLOBAL_OFFSET_TABLE_
5226 in the pool anyway, so just move up the whole constant pool.
5228 However, avoid doing so when the last single GOT mov is the starting
5229 insn itself. Going past above the start insn would create a negative
5230 offset, causing errors. */
5231 if (last_got
&& last_got
!= orig
)
5232 from
= PREV_INSN (last_got
);
5234 /* Don't insert the constant pool table at the position which
5235 may be the landing pad. */
5238 && find_reg_note (from
, REG_EH_REGION
, NULL_RTX
))
5239 from
= PREV_INSN (from
);
5241 /* Walk back to be just before any jump or label.
5242 Putting it before a label reduces the number of times the branch
5243 around the constant pool table will be hit. Putting it before
5244 a jump makes it more likely that the bra delay slot will be
5246 while (NOTE_P (from
) || JUMP_P (from
) || LABEL_P (from
))
5247 from
= PREV_INSN (from
);
5251 bool sibcall_p
= SIBLING_CALL_P (from
);
5253 /* If FROM was a sibling call, then we know that control
5254 will not return. In fact, we were guaranteed to hit
5255 a barrier before another real insn.
5257 The jump around the constant pool is unnecessary. It
5258 costs space, but more importantly it confuses dwarf2cfi
5261 return emit_barrier_after (from
);
5264 from
= emit_jump_insn_after (gen_jump (label
), from
);
5265 JUMP_LABEL (from
) = label
;
5266 LABEL_NUSES (label
) = 1;
5267 found_barrier
= emit_barrier_after (from
);
5268 emit_label_after (label
, found_barrier
);
5271 return found_barrier
;
5274 /* If the instruction INSN is implemented by a special function, and we can
5275 positively find the register that is used to call the sfunc, and this
5276 register is not used anywhere else in this instruction - except as the
5277 destination of a set, return this register; else, return 0. */
5279 sfunc_uses_reg (rtx_insn
*insn
)
5282 rtx pattern
, part
, reg_part
, reg
;
5284 if (!NONJUMP_INSN_P (insn
))
5286 pattern
= PATTERN (insn
);
5287 if (GET_CODE (pattern
) != PARALLEL
|| get_attr_type (insn
) != TYPE_SFUNC
)
5290 for (reg_part
= NULL_RTX
, i
= XVECLEN (pattern
, 0) - 1; i
>= 1; i
--)
5292 part
= XVECEXP (pattern
, 0, i
);
5293 if (GET_CODE (part
) == USE
&& GET_MODE (XEXP (part
, 0)) == SImode
)
5298 reg
= XEXP (reg_part
, 0);
5299 for (int i
= XVECLEN (pattern
, 0) - 1; i
>= 0; i
--)
5301 part
= XVECEXP (pattern
, 0, i
);
5302 if (part
== reg_part
|| GET_CODE (part
) == CLOBBER
)
5304 if (reg_mentioned_p (reg
, ((GET_CODE (part
) == SET
5305 && REG_P (SET_DEST (part
)))
5306 ? SET_SRC (part
) : part
)))
5312 /* See if the only way in which INSN uses REG is by calling it, or by
5313 setting it while calling it. Set *SET to a SET rtx if the register
5316 noncall_uses_reg (rtx reg
, rtx_insn
*insn
, rtx
*set
)
5320 rtx reg2
= sfunc_uses_reg (insn
);
5321 if (reg2
&& REGNO (reg2
) == REGNO (reg
))
5323 rtx pattern
= single_set (insn
);
5325 && REG_P (SET_DEST (pattern
))
5326 && REGNO (reg
) == REGNO (SET_DEST (pattern
)))
5332 /* We don't use rtx_equal_p because we don't care if the mode is
5334 rtx pattern
= single_set (insn
);
5336 && REG_P (SET_DEST (pattern
))
5337 && REGNO (reg
) == REGNO (SET_DEST (pattern
)))
5343 par
= PATTERN (insn
);
5344 if (GET_CODE (par
) == PARALLEL
)
5345 for (i
= XVECLEN (par
, 0) - 1; i
>= 0; i
--)
5347 part
= XVECEXP (par
, 0, i
);
5348 if (GET_CODE (part
) != SET
&& reg_mentioned_p (reg
, part
))
5351 return reg_mentioned_p (reg
, SET_SRC (pattern
));
5357 rtx pattern
= PATTERN (insn
);
5359 if (GET_CODE (pattern
) == PARALLEL
)
5361 for (int i
= XVECLEN (pattern
, 0) - 1; i
>= 1; i
--)
5362 if (reg_mentioned_p (reg
, XVECEXP (pattern
, 0, i
)))
5364 pattern
= XVECEXP (pattern
, 0, 0);
5367 if (GET_CODE (pattern
) == SET
)
5369 if (reg_mentioned_p (reg
, SET_DEST (pattern
)))
5371 /* We don't use rtx_equal_p, because we don't care if the
5372 mode is different. */
5373 if (!REG_P (SET_DEST (pattern
))
5374 || REGNO (reg
) != REGNO (SET_DEST (pattern
)))
5380 pattern
= SET_SRC (pattern
);
5383 if (GET_CODE (pattern
) != CALL
5384 || !MEM_P (XEXP (pattern
, 0))
5385 || ! rtx_equal_p (reg
, XEXP (XEXP (pattern
, 0), 0)))
5391 /* Given a X, a pattern of an insn or a part of it, return a mask of used
5392 general registers. Bits 0..15 mean that the respective registers
5393 are used as inputs in the instruction. Bits 16..31 mean that the
5394 registers 0..15, respectively, are used as outputs, or are clobbered.
5395 IS_DEST should be set to 16 if X is the destination of a SET, else to 0. */
5397 regs_used (rtx x
, int is_dest
)
5405 code
= GET_CODE (x
);
5410 return (((1 << hard_regno_nregs (0, GET_MODE (x
))) - 1)
5411 << (REGNO (x
) + is_dest
));
5415 rtx y
= SUBREG_REG (x
);
5420 return (((1 << hard_regno_nregs (0, GET_MODE (x
))) - 1)
5422 subreg_regno_offset (REGNO (y
),
5425 GET_MODE (x
)) + is_dest
));
5429 return regs_used (SET_SRC (x
), 0) | regs_used (SET_DEST (x
), 16);
5431 /* If there was a return value, it must have been indicated with USE. */
5446 fmt
= GET_RTX_FORMAT (code
);
5448 for (int i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
5452 for (int j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
5453 used
|= regs_used (XVECEXP (x
, i
, j
), is_dest
);
5455 else if (fmt
[i
] == 'e')
5456 used
|= regs_used (XEXP (x
, i
), is_dest
);
5461 /* Create an instruction that prevents redirection of a conditional branch
5462 to the destination of the JUMP with address ADDR.
5463 If the branch needs to be implemented as an indirect jump, try to find
5464 a scratch register for it.
5465 If NEED_BLOCK is 0, don't do anything unless we need a scratch register.
5466 If any preceding insn that doesn't fit into a delay slot is good enough,
5467 pass 1. Pass 2 if a definite blocking insn is needed.
5468 -1 is used internally to avoid deep recursion.
5469 If a blocking instruction is made or recognized, return it. */
5471 gen_block_redirect (rtx_insn
*jump
, int addr
, int need_block
)
5474 rtx_insn
*prev
= prev_nonnote_insn (jump
);
5476 /* First, check if we already have an instruction that satisfies our need. */
5477 if (prev
&& NONJUMP_INSN_P (prev
) && ! prev
->deleted ())
5479 if (INSN_CODE (prev
) == CODE_FOR_indirect_jump_scratch
)
5481 if (GET_CODE (PATTERN (prev
)) == USE
5482 || GET_CODE (PATTERN (prev
)) == CLOBBER
5483 || get_attr_in_delay_slot (prev
) == IN_DELAY_SLOT_YES
)
5485 else if ((need_block
&= ~1) < 0)
5487 else if (recog_memoized (prev
) == CODE_FOR_block_branch_redirect
)
5490 if (GET_CODE (PATTERN (jump
)) == RETURN
)
5494 /* Reorg even does nasty things with return insns that cause branches
5495 to go out of range - see find_end_label and callers. */
5496 return emit_insn_before (gen_block_branch_redirect (const0_rtx
) , jump
);
5498 /* We can't use JUMP_LABEL here because it might be undefined
5499 when not optimizing. */
5500 rtx dest
= XEXP (SET_SRC (PATTERN (jump
)), 0);
5501 /* If the branch is out of range, try to find a scratch register for it. */
5503 && (INSN_ADDRESSES (INSN_UID (dest
)) - addr
+ (unsigned) 4092
5507 /* Don't look for the stack pointer as a scratch register,
5508 it would cause trouble if an interrupt occurred. */
5509 unsigned attempt
= 0x7fff, used
;
5510 int jump_left
= flag_expensive_optimizations
+ 1;
5512 /* It is likely that the most recent eligible instruction is wanted for
5513 the delay slot. Therefore, find out which registers it uses, and
5514 try to avoid using them. */
5516 for (scan
= jump
; (scan
= PREV_INSN (scan
)); )
5518 if (scan
->deleted ())
5520 rtx_code code
= GET_CODE (scan
);
5521 if (code
== CODE_LABEL
|| code
== JUMP_INSN
)
5524 && GET_CODE (PATTERN (scan
)) != USE
5525 && GET_CODE (PATTERN (scan
)) != CLOBBER
5526 && get_attr_in_delay_slot (scan
) == IN_DELAY_SLOT_YES
)
5528 attempt
&= ~regs_used (PATTERN (scan
), 0);
5532 for (used
= dead
= 0, scan
= JUMP_LABEL_AS_INSN (jump
);
5533 (scan
= NEXT_INSN (scan
)); )
5535 if (scan
->deleted ())
5537 rtx_code code
= GET_CODE (scan
);
5540 used
|= regs_used (PATTERN (scan
), 0);
5541 if (code
== CALL_INSN
)
5542 used
|= regs_used (CALL_INSN_FUNCTION_USAGE (scan
), 0);
5543 dead
|= (used
>> 16) & ~used
;
5549 if (code
== JUMP_INSN
)
5551 if (jump_left
-- && simplejump_p (scan
))
5552 scan
= JUMP_LABEL_AS_INSN (scan
);
5558 /* Mask out the stack pointer again, in case it was
5559 the only 'free' register we have found. */
5562 /* If the immediate destination is still in range, check for possible
5563 threading with a jump beyond the delay slot insn.
5564 Don't check if we are called recursively; the jump has been or will be
5565 checked in a different invocation then. */
5567 else if (optimize
&& need_block
>= 0)
5569 rtx_insn
*next
= next_active_insn (as_a
<rtx_insn
*> (dest
));
5570 next
= next_active_insn (next
);
5571 if (next
&& JUMP_P (next
)
5572 && GET_CODE (PATTERN (next
)) == SET
5573 && recog_memoized (next
) == CODE_FOR_jump_compact
)
5575 dest
= JUMP_LABEL (next
);
5577 && (INSN_ADDRESSES (INSN_UID (dest
)) - addr
+ (unsigned) 4092
5579 gen_block_redirect (next
, INSN_ADDRESSES (INSN_UID (next
)), -1);
5585 rtx reg
= gen_rtx_REG (SImode
, exact_log2 (dead
& -dead
));
5587 /* It would be nice if we could convert the jump into an indirect
5588 jump / far branch right now, and thus exposing all constituent
5589 instructions to further optimization. However, reorg uses
5590 simplejump_p to determine if there is an unconditional jump where
5591 it should try to schedule instructions from the target of the
5592 branch; simplejump_p fails for indirect jumps even if they have
5594 rtx_insn
*insn
= emit_insn_before (gen_indirect_jump_scratch
5595 (reg
, GEN_INT (unspec_bbr_uid
++)),
5597 /* ??? We would like this to have the scope of the jump, but that
5598 scope will change when a delay slot insn of an inner scope is added.
5599 Hence, after delay slot scheduling, we'll have to expect
5600 NOTE_INSN_BLOCK_END notes between the indirect_jump_scratch and
5603 INSN_LOCATION (insn
) = INSN_LOCATION (jump
);
5604 INSN_CODE (insn
) = CODE_FOR_indirect_jump_scratch
;
5607 else if (need_block
)
5608 /* We can't use JUMP_LABEL here because it might be undefined
5609 when not optimizing. */
5610 return emit_insn_before (gen_block_branch_redirect
5611 (GEN_INT (unspec_bbr_uid
++)),
5616 #define CONDJUMP_MIN -252
5617 #define CONDJUMP_MAX 262
5620 /* A label (to be placed) in front of the jump
5621 that jumps to our ultimate destination. */
5622 rtx_insn
*near_label
;
5623 /* Where we are going to insert it if we cannot move the jump any farther,
5624 or the jump itself if we have picked up an existing jump. */
5625 rtx_insn
*insert_place
;
5626 /* The ultimate destination. */
5627 rtx_insn
*far_label
;
5628 struct far_branch
*prev
;
5629 /* If the branch has already been created, its address;
5630 else the address of its first prospective user. */
5634 enum mdep_reorg_phase_e mdep_reorg_phase
;
5637 gen_far_branch (struct far_branch
*bp
)
5639 rtx_insn
*insn
= bp
->insert_place
;
5640 rtx_jump_insn
*jump
;
5641 rtx_code_label
*label
= gen_label_rtx ();
5643 emit_label_after (label
, insn
);
5646 jump
= emit_jump_insn_after (gen_jump (bp
->far_label
), insn
);
5647 LABEL_NUSES (bp
->far_label
)++;
5650 jump
= emit_jump_insn_after (gen_return (), insn
);
5652 /* Emit a barrier so that reorg knows that any following instructions
5653 are not reachable via a fall-through path.
5654 But don't do this when not optimizing, since we wouldn't suppress the
5655 alignment for the barrier then, and could end up with out-of-range
5656 pc-relative loads. */
5658 emit_barrier_after (jump
);
5659 emit_label_after (bp
->near_label
, insn
);
5662 JUMP_LABEL (jump
) = bp
->far_label
;
5665 rtx pat
= PATTERN (jump
);
5666 gcc_assert (ANY_RETURN_P (pat
));
5667 JUMP_LABEL (jump
) = pat
;
5670 bool ok
= invert_jump (as_a
<rtx_jump_insn
*> (insn
), label
, 1);
5673 /* If we are branching around a jump (rather than a return), prevent
5674 reorg from using an insn from the jump target as the delay slot insn -
5675 when reorg did this, it pessimized code (we rather hide the delay slot)
5676 and it could cause branches to go out of range. */
5679 (gen_stuff_delay_slot
5680 (GEN_INT (unspec_bbr_uid
++),
5681 GEN_INT (recog_memoized (insn
) == CODE_FOR_branch_false
)),
5683 /* Prevent reorg from undoing our splits. */
5684 gen_block_redirect (jump
, bp
->address
+= 2, 2);
5687 /* Fix up ADDR_DIFF_VECs. */
5689 fixup_addr_diff_vecs (rtx_insn
*first
)
5693 for (insn
= first
; insn
; insn
= NEXT_INSN (insn
))
5695 rtx vec_lab
, pat
, prevpat
, x
, braf_label
;
5698 if (! JUMP_TABLE_DATA_P (insn
)
5699 || GET_CODE (PATTERN (insn
)) != ADDR_DIFF_VEC
)
5701 pat
= PATTERN (insn
);
5702 vec_lab
= XEXP (XEXP (pat
, 0), 0);
5704 /* Search the matching casesi_jump_2. */
5705 for (prev
= as_a
<rtx_insn
*> (vec_lab
); ; prev
= PREV_INSN (prev
))
5709 prevpat
= PATTERN (prev
);
5710 if (GET_CODE (prevpat
) != PARALLEL
|| XVECLEN (prevpat
, 0) != 2)
5712 x
= XVECEXP (prevpat
, 0, 1);
5713 if (GET_CODE (x
) != USE
)
5716 if (GET_CODE (x
) == LABEL_REF
&& XEXP (x
, 0) == vec_lab
)
5719 /* FIXME: This is a bug in the optimizer, but it seems harmless
5720 to just avoid panicing. */
5724 /* Emit the reference label of the braf where it belongs, right after
5725 the casesi_jump_2 (i.e. braf). */
5726 braf_label
= XEXP (XEXP (SET_SRC (XVECEXP (prevpat
, 0, 0)), 1), 0);
5727 emit_label_after (as_a
<rtx_insn
*> (braf_label
), prev
);
5729 /* Fix up the ADDR_DIF_VEC to be relative
5730 to the reference address of the braf. */
5731 XEXP (XEXP (pat
, 0), 0) = braf_label
;
5735 /* BARRIER_OR_LABEL is either a BARRIER or a CODE_LABEL immediately following
5736 a barrier. Return the base 2 logarithm of the desired alignment. */
5738 barrier_align (rtx_insn
*barrier_or_label
)
5740 if (! barrier_or_label
)
5743 if (LABEL_P (barrier_or_label
)
5744 && NEXT_INSN (barrier_or_label
)
5745 && JUMP_TABLE_DATA_P (NEXT_INSN (barrier_or_label
)))
5748 if (BARRIER_P (barrier_or_label
)
5749 && PREV_INSN (barrier_or_label
)
5750 && JUMP_TABLE_DATA_P (PREV_INSN (barrier_or_label
)))
5752 rtx pat
= PATTERN (PREV_INSN (barrier_or_label
));
5753 /* If this is a very small table, we want to keep the alignment after
5754 the table to the minimum for proper code alignment. */
5755 return ((optimize_size
5756 || ((unsigned) XVECLEN (pat
, 1) * GET_MODE_SIZE (GET_MODE (pat
))
5757 <= (unsigned) 1 << (CACHE_LOG
- 2)))
5758 ? 1 : align_jumps
.levels
[0].log
);
5761 rtx_insn
*next
= next_active_insn (barrier_or_label
);
5766 rtx pat
= PATTERN (next
);
5768 if (GET_CODE (pat
) == UNSPEC_VOLATILE
&& XINT (pat
, 1) == UNSPECV_ALIGN
)
5769 /* This is a barrier in front of a constant table. */
5775 if (! TARGET_SH2
|| ! optimize
)
5776 return align_jumps
.levels
[0].log
;
5778 /* When fixing up pcloads, a constant table might be inserted just before
5779 the basic block that ends with the barrier. Thus, we can't trust the
5780 instruction lengths before that. */
5781 if (mdep_reorg_phase
> SH_FIXUP_PCLOAD
)
5783 /* Check if there is an immediately preceding branch to the insn beyond
5784 the barrier. We must weight the cost of discarding useful information
5785 from the current cache line when executing this branch and there is
5786 an alignment, against that of fetching unneeded insn in front of the
5787 branch target when there is no alignment. */
5789 /* There are two delay_slot cases to consider. One is the simple case
5790 where the preceding branch is to the insn beyond the barrier (simple
5791 delay slot filling), and the other is where the preceding branch has
5792 a delay slot that is a duplicate of the insn after the barrier
5793 (fill_eager_delay_slots) and the branch is to the insn after the insn
5794 after the barrier. */
5797 bool jump_to_next
= false;
5799 /* Skip to the insn before the JUMP_INSN before the barrier under
5801 rtx_insn
*prev
= prev_real_insn (prev_active_insn (barrier_or_label
));
5803 for (slot
= 2, credit
= (1 << (CACHE_LOG
- 2)) + 2;
5804 credit
>= 0 && prev
&& NONJUMP_INSN_P (prev
);
5805 prev
= prev_real_insn (prev
))
5807 jump_to_next
= false;
5808 if (GET_CODE (PATTERN (prev
)) == USE
5809 || GET_CODE (PATTERN (prev
)) == CLOBBER
)
5811 if (rtx_sequence
*prev_seq
= dyn_cast
<rtx_sequence
*> (PATTERN (prev
)))
5813 prev
= prev_seq
->insn (1);
5814 if (INSN_UID (prev
) == INSN_UID (next
))
5816 /* Delay slot was filled with insn at jump target. */
5817 jump_to_next
= true;
5823 get_attr_in_delay_slot (prev
) == IN_DELAY_SLOT_YES
)
5825 credit
-= get_attr_length (prev
);
5827 if (prev
&& jump_to_label_p (prev
))
5831 || next_real_insn (JUMP_LABEL_AS_INSN (prev
)) == next
5832 /* If relax_delay_slots() decides NEXT was redundant
5833 with some previous instruction, it will have
5834 redirected PREV's jump to the following insn. */
5835 || JUMP_LABEL (prev
) == next_nonnote_insn (next
)
5836 /* There is no upper bound on redundant instructions
5837 that might have been skipped, but we must not put an
5838 alignment where none had been before. */
5839 || (x
= (NEXT_INSN (NEXT_INSN (PREV_INSN (prev
)))),
5841 && (INSN_CODE (x
) == CODE_FOR_block_branch_redirect
5842 || INSN_CODE (x
) == CODE_FOR_indirect_jump_scratch
5843 || INSN_CODE (x
) == CODE_FOR_stuff_delay_slot
))))
5845 rtx pat
= PATTERN (prev
);
5846 if (GET_CODE (pat
) == PARALLEL
)
5847 pat
= XVECEXP (pat
, 0, 0);
5848 if (credit
- slot
>= (GET_CODE (SET_SRC (pat
)) == PC
? 2 : 0))
5854 return align_jumps
.levels
[0].log
;
5857 /* If we are inside a phony loop, almost any kind of label can turn up as the
5858 first one in the loop. Aligning a braf label causes incorrect switch
5859 destination addresses; we can detect braf labels because they are
5860 followed by a BARRIER.
5861 Applying loop alignment to small constant or switch tables is a waste
5862 of space, so we suppress this too. */
5864 sh_loop_align (rtx_insn
*label
)
5866 rtx_insn
*next
= label
;
5868 if (! optimize
|| optimize_size
)
5872 next
= next_nonnote_insn (next
);
5873 while (next
&& LABEL_P (next
));
5877 || recog_memoized (next
) == CODE_FOR_consttable_2
)
5880 return align_loops
.levels
[0].log
;
5883 /* Do a final pass over the function, just before delayed branch
5888 rtx_insn
*first
, *insn
, *mova
= NULL
;
5890 rtx r0_rtx
= gen_rtx_REG (Pmode
, 0);
5891 rtx r0_inc_rtx
= gen_rtx_POST_INC (Pmode
, r0_rtx
);
5893 first
= get_insns ();
5894 max_labelno_before_reorg
= max_label_num ();
5896 /* We must split call insns before introducing `mova's. If we're
5897 optimizing, they'll have already been split. Otherwise, make
5898 sure we don't split them too late. */
5900 split_all_insns_noflow ();
5902 /* If relaxing, generate pseudo-ops to associate function calls with
5903 the symbols they call. It does no harm to not generate these
5904 pseudo-ops. However, when we can generate them, it enables the
5905 linker to potentially relax the jsr to a bsr, and eliminate the
5906 register load and, possibly, the constant pool entry. */
5908 mdep_reorg_phase
= SH_INSERT_USES_LABELS
;
5911 /* Remove all REG_LABEL_OPERAND notes. We want to use them for our
5912 own purposes. This works because none of the remaining passes
5913 need to look at them.
5915 ??? But it may break in the future. We should use a machine
5916 dependent REG_NOTE, or some other approach entirely. */
5917 for (insn
= first
; insn
; insn
= NEXT_INSN (insn
))
5923 while ((note
= find_reg_note (insn
, REG_LABEL_OPERAND
,
5925 remove_note (insn
, note
);
5929 for (insn
= first
; insn
; insn
= NEXT_INSN (insn
))
5931 rtx pattern
, reg
, set
, dies
;
5932 rtx_code_label
*label
;
5933 rtx_insn
*link
, *scan
;
5934 int rescan
= 0, foundinsn
= 0;
5938 pattern
= PATTERN (insn
);
5940 if (GET_CODE (pattern
) == PARALLEL
)
5941 pattern
= XVECEXP (pattern
, 0, 0);
5942 if (GET_CODE (pattern
) == SET
)
5943 pattern
= SET_SRC (pattern
);
5945 if (GET_CODE (pattern
) != CALL
5946 || !MEM_P (XEXP (pattern
, 0)))
5949 reg
= XEXP (XEXP (pattern
, 0), 0);
5953 reg
= sfunc_uses_reg (insn
);
5961 /* Try scanning backward to find where the register is set. */
5963 for (scan
= PREV_INSN (insn
);
5964 scan
&& !LABEL_P (scan
);
5965 scan
= PREV_INSN (scan
))
5967 if (! INSN_P (scan
))
5970 if (! reg_mentioned_p (reg
, scan
))
5973 if (noncall_uses_reg (reg
, scan
, &set
))
5986 /* The register is set at LINK. */
5988 /* We can only optimize the function call if the register is
5989 being set to a symbol. In theory, we could sometimes
5990 optimize calls to a constant location, but the assembler
5991 and linker do not support that at present. */
5992 if (GET_CODE (SET_SRC (set
)) != SYMBOL_REF
5993 && GET_CODE (SET_SRC (set
)) != LABEL_REF
)
5996 /* Scan forward from LINK to the place where REG dies, and
5997 make sure that the only insns which use REG are
5998 themselves function calls. */
6000 /* ??? This doesn't work for call targets that were allocated
6001 by reload, since there may not be a REG_DEAD note for the
6005 for (scan
= NEXT_INSN (link
); scan
; scan
= NEXT_INSN (scan
))
6009 /* Don't try to trace forward past a CODE_LABEL if we haven't
6010 seen INSN yet. Ordinarily, we will only find the setting insn
6011 if it is in the same basic block. However,
6012 cross-jumping can insert code labels in between the load and
6013 the call, and can result in situations where a single call
6014 insn may have two targets depending on where we came from. */
6016 if (LABEL_P (scan
) && ! foundinsn
)
6019 if (! INSN_P (scan
))
6022 /* Don't try to trace forward past a JUMP. To optimize
6023 safely, we would have to check that all the
6024 instructions at the jump destination did not use REG. */
6029 if (! reg_mentioned_p (reg
, scan
))
6032 if (noncall_uses_reg (reg
, scan
, &scanset
))
6039 && (CALL_P (scan
) || sfunc_uses_reg (scan
)))
6041 /* There is a function call to this register other
6042 than the one we are checking. If we optimize
6043 this call, we need to rescan again below. */
6047 /* ??? We shouldn't have to worry about SCANSET here.
6048 We should just be able to check for a REG_DEAD note
6049 on a function call. However, the REG_DEAD notes are
6050 apparently not dependable around libcalls; c-torture
6051 execute/920501-2 is a test case. If SCANSET is set,
6052 then this insn sets the register, so it must have
6053 died earlier. Unfortunately, this will only handle
6054 the cases in which the register is, in fact, set in a
6057 /* ??? We shouldn't have to use FOUNDINSN here.
6058 This dates back to when we used LOG_LINKS to find
6059 the most recent insn which sets the register. */
6063 || find_reg_note (scan
, REG_DEAD
, reg
)))
6072 /* Either there was a branch, or some insn used REG
6073 other than as a function call address. */
6077 /* Create a code label, and put it in a REG_LABEL_OPERAND note
6078 on the insn which sets the register, and on each call insn
6079 which uses the register. In final_prescan_insn we look for
6080 the REG_LABEL_OPERAND notes, and output the appropriate label
6083 label
= gen_label_rtx ();
6084 add_reg_note (link
, REG_LABEL_OPERAND
, label
);
6085 add_reg_note (insn
, REG_LABEL_OPERAND
, label
);
6093 scan
= NEXT_INSN (scan
);
6096 && reg_mentioned_p (reg
, scan
))
6097 || ((reg2
= sfunc_uses_reg (scan
))
6098 && REGNO (reg2
) == REGNO (reg
))))
6099 add_reg_note (scan
, REG_LABEL_OPERAND
, label
);
6101 while (scan
!= dies
);
6107 fixup_addr_diff_vecs (first
);
6111 mdep_reorg_phase
= SH_SHORTEN_BRANCHES0
;
6112 shorten_branches (first
);
6115 /* Scan the function looking for move instructions which have to be
6116 changed to pc-relative loads and insert the literal tables. */
6117 mdep_reorg_phase
= SH_FIXUP_PCLOAD
;
6118 for (insn
= first
, num_mova
= 0; insn
; insn
= NEXT_INSN (insn
))
6122 /* ??? basic block reordering can move a switch table dispatch
6123 below the switch table. Check if that has happened.
6124 We only have the addresses available when optimizing; but then,
6125 this check shouldn't be needed when not optimizing. */
6126 if (!untangle_mova (&num_mova
, &mova
, insn
))
6132 else if (JUMP_TABLE_DATA_P (insn
)
6133 && GET_CODE (PATTERN (insn
)) == ADDR_DIFF_VEC
6135 /* ??? loop invariant motion can also move a mova out of a
6136 loop. Since loop does this code motion anyway, maybe we
6137 should wrap UNSPEC_MOVA into a CONST, so that reload can
6140 && GET_MODE (prev_nonnote_insn (insn
)) == VOIDmode
)
6141 || (prev_nonnote_insn (insn
)
6142 == XEXP (MOVA_LABELREF (mova
), 0))))
6149 /* Some code might have been inserted between the mova and
6150 its ADDR_DIFF_VEC. Check if the mova is still in range. */
6151 for (scan
= mova
, total
= 0; scan
!= insn
; scan
= NEXT_INSN (scan
))
6152 total
+= get_attr_length (scan
);
6154 /* range of mova is 1020, add 4 because pc counts from address of
6155 second instruction after this one, subtract 2 in case pc is 2
6156 byte aligned. Possible alignment needed for the ADDR_DIFF_VEC
6157 cancels out with alignment effects of the mova itself. */
6160 /* Change the mova into a load, and restart scanning
6161 there. broken_move will then return true for mova. */
6166 if (broken_move (insn
)
6167 || (NONJUMP_INSN_P (insn
)
6168 && recog_memoized (insn
) == CODE_FOR_casesi_worker_2
))
6171 /* Scan ahead looking for a barrier to stick the constant table
6173 rtx_insn
*barrier
= find_barrier (num_mova
, mova
, insn
);
6174 rtx_insn
*last_float_move
= NULL
;
6175 rtx last_float
= 0, *last_float_addr
= NULL
;
6176 int need_aligned_label
= 0;
6178 if (num_mova
&& ! mova_p (mova
))
6180 /* find_barrier had to change the first mova into a
6181 pcload; thus, we have to start with this new pcload. */
6185 /* Now find all the moves between the points and modify them. */
6186 for (scan
= insn
; scan
!= barrier
; scan
= NEXT_INSN (scan
))
6190 if (NONJUMP_INSN_P (scan
)
6191 && recog_memoized (scan
) == CODE_FOR_casesi_worker_2
)
6192 need_aligned_label
= 1;
6193 if (broken_move (scan
))
6195 rtx
*patp
= &PATTERN (scan
), pat
= *patp
;
6201 if (GET_CODE (pat
) == PARALLEL
)
6202 patp
= &XVECEXP (pat
, 0, 0), pat
= *patp
;
6203 src
= SET_SRC (pat
);
6204 dst
= SET_DEST (pat
);
6205 mode
= GET_MODE (dst
);
6207 if (mode
== SImode
&& satisfies_constraint_I16 (src
)
6208 && REGNO (dst
) != FPUL_REG
)
6213 while (GET_CODE (dst
) == SUBREG
)
6215 offset
+= subreg_regno_offset (REGNO (SUBREG_REG (dst
)),
6216 GET_MODE (SUBREG_REG (dst
)),
6219 dst
= SUBREG_REG (dst
);
6221 dst
= gen_rtx_REG (HImode
, REGNO (dst
) + offset
);
6223 if (REG_P (dst
) && FP_ANY_REGISTER_P (REGNO (dst
)))
6225 /* This must be an insn that clobbers r0. */
6226 rtx
*clobberp
= &XVECEXP (PATTERN (scan
), 0,
6227 XVECLEN (PATTERN (scan
), 0)
6229 rtx clobber
= *clobberp
;
6231 gcc_assert (GET_CODE (clobber
) == CLOBBER
6232 && rtx_equal_p (XEXP (clobber
, 0), r0_rtx
));
6235 && reg_set_between_p (r0_rtx
, last_float_move
, scan
))
6237 lab
= add_constant (src
, mode
, last_float
);
6239 emit_insn_before (gen_mova (lab
), scan
);
6242 /* There will be a REG_UNUSED note for r0 on
6243 LAST_FLOAT_MOVE; we have to change it to REG_INC,
6244 lest reorg:mark_target_live_regs will not
6245 consider r0 to be used, and we end up with delay
6246 slot insn in front of SCAN that clobbers r0. */
6248 = find_regno_note (last_float_move
, REG_UNUSED
, 0);
6250 /* If we are not optimizing, then there may not be
6253 PUT_REG_NOTE_KIND (note
, REG_INC
);
6255 *last_float_addr
= r0_inc_rtx
;
6257 last_float_move
= scan
;
6259 newsrc
= gen_const_mem (mode
,
6260 (((TARGET_SH4
&& ! TARGET_FMOVD
)
6261 || REGNO (dst
) == FPUL_REG
)
6264 last_float_addr
= &XEXP (newsrc
, 0);
6266 /* Remove the clobber of r0. */
6267 *clobberp
= gen_rtx_CLOBBER (GET_MODE (clobber
),
6268 gen_rtx_SCRATCH (Pmode
));
6270 /* This is a mova needing a label. Create it. */
6271 else if (GET_CODE (src
) == UNSPEC
6272 && XINT (src
, 1) == UNSPEC_MOVA
6273 && GET_CODE (XVECEXP (src
, 0, 0)) == CONST
)
6275 lab
= add_constant (XVECEXP (src
, 0, 0), mode
, 0);
6276 newsrc
= gen_rtx_LABEL_REF (VOIDmode
, lab
);
6277 newsrc
= gen_rtx_UNSPEC (SImode
,
6278 gen_rtvec (1, newsrc
),
6281 else if (GET_CODE (src
) == UNSPEC_VOLATILE
6282 && XINT (src
, 1) == UNSPECV_SP_SWITCH_B
)
6284 newsrc
= XVECEXP (src
, 0, 0);
6285 XVECEXP (src
, 0, 0) = gen_const_mem (mode
, newsrc
);
6286 INSN_CODE (scan
) = -1;
6291 lab
= add_constant (src
, mode
, 0);
6292 newsrc
= gen_rtx_LABEL_REF (VOIDmode
, lab
);
6293 newsrc
= gen_const_mem (mode
, newsrc
);
6295 *patp
= gen_rtx_SET (dst
, newsrc
);
6296 INSN_CODE (scan
) = -1;
6299 dump_table (need_aligned_label
? insn
: 0, barrier
);
6303 label_ref_list_d_pool
.release ();
6304 for (insn
= first
; insn
; insn
= NEXT_INSN (insn
))
6305 PUT_MODE (insn
, VOIDmode
);
6307 mdep_reorg_phase
= SH_SHORTEN_BRANCHES1
;
6308 INSN_ADDRESSES_FREE ();
6309 split_branches (first
);
6311 /* The INSN_REFERENCES_ARE_DELAYED in sh.h is problematic because it
6312 also has an effect on the register that holds the address of the sfunc.
6313 Insert an extra dummy insn in front of each sfunc that pretends to
6314 use this register. */
6315 if (flag_delayed_branch
)
6317 for (insn
= first
; insn
; insn
= NEXT_INSN (insn
))
6319 rtx reg
= sfunc_uses_reg (insn
);
6323 emit_insn_before (gen_use_sfunc_addr (reg
), insn
);
6326 mdep_reorg_phase
= SH_AFTER_MDEP_REORG
;
6329 /* Return the UID of the insn that follows the specified label. */
6331 get_dest_uid (rtx_insn
*label
, int max_uid
)
6333 rtx_insn
*dest
= next_real_insn (label
);
6336 /* This can happen for an undefined label. */
6338 int dest_uid
= INSN_UID (dest
);
6339 /* If this is a newly created branch redirection blocking instruction,
6340 we cannot index the branch_uid or insn_addresses arrays with its
6341 uid. But then, we won't need to, because the actual destination is
6342 the following branch. */
6343 while (dest_uid
>= max_uid
)
6345 dest
= NEXT_INSN (dest
);
6346 dest_uid
= INSN_UID (dest
);
6348 if (JUMP_P (dest
) && GET_CODE (PATTERN (dest
)) == RETURN
)
6353 /* Split condbranches that are out of range. Also add clobbers for
6354 scratch registers that are needed in far jumps.
6355 We do this before delay slot scheduling, so that it can take our
6356 newly created instructions into account. It also allows us to
6357 find branches with common targets more easily. */
6359 split_branches (rtx_insn
*first
)
6362 struct far_branch
**uid_branch
, *far_branch_list
= 0;
6363 int max_uid
= get_max_uid ();
6366 /* Find out which branches are out of range. */
6367 shorten_branches (first
);
6369 uid_branch
= (struct far_branch
**) alloca (max_uid
* sizeof *uid_branch
);
6370 memset ((char *) uid_branch
, 0, max_uid
* sizeof *uid_branch
);
6372 for (insn
= first
; insn
; insn
= NEXT_INSN (insn
))
6373 if (! INSN_P (insn
))
6375 else if (insn
->deleted ())
6377 /* Shorten_branches would split this instruction again,
6378 so transform it into a note. */
6379 SET_INSN_DELETED (insn
);
6381 else if (JUMP_P (insn
))
6383 enum attr_type type
= get_attr_type (insn
);
6384 if (type
== TYPE_CBRANCH
)
6386 rtx_insn
*next
, *beyond
;
6388 if (get_attr_length (insn
) > 4)
6390 rtx src
= SET_SRC (PATTERN (insn
));
6391 rtx_insn
*olabel
= safe_as_a
<rtx_insn
*> (XEXP (XEXP (src
, 1), 0));
6392 int addr
= INSN_ADDRESSES (INSN_UID (insn
));
6393 rtx_insn
*label
= 0;
6394 int dest_uid
= get_dest_uid (olabel
, max_uid
);
6395 struct far_branch
*bp
= uid_branch
[dest_uid
];
6397 /* redirect_jump needs a valid JUMP_LABEL, and it might delete
6398 the label if the LABEL_NUSES count drops to zero. There is
6399 always a jump_optimize pass that sets these values, but it
6400 proceeds to delete unreferenced code, and then if not
6401 optimizing, to un-delete the deleted instructions, thus
6402 leaving labels with too low uses counts. */
6405 JUMP_LABEL (insn
) = olabel
;
6406 LABEL_NUSES (olabel
)++;
6410 bp
= (struct far_branch
*) alloca (sizeof *bp
);
6411 uid_branch
[dest_uid
] = bp
;
6412 bp
->prev
= far_branch_list
;
6413 far_branch_list
= bp
;
6414 bp
->far_label
= as_a
<rtx_insn
*> (
6415 XEXP (XEXP (SET_SRC (PATTERN (insn
)), 1),
6417 LABEL_NUSES (bp
->far_label
)++;
6421 label
= bp
->near_label
;
6422 if (! label
&& bp
->address
- addr
>= CONDJUMP_MIN
)
6424 rtx_insn
*block
= bp
->insert_place
;
6426 if (GET_CODE (PATTERN (block
)) == RETURN
)
6427 block
= PREV_INSN (block
);
6429 block
= gen_block_redirect (block
,
6431 label
= emit_label_after (gen_label_rtx (),
6433 bp
->near_label
= label
;
6435 else if (label
&& ! NEXT_INSN (label
))
6437 if (addr
+ 2 - bp
->address
<= CONDJUMP_MAX
)
6438 bp
->insert_place
= insn
;
6440 gen_far_branch (bp
);
6444 || (NEXT_INSN (label
) && bp
->address
- addr
< CONDJUMP_MIN
))
6446 bp
->near_label
= label
= gen_label_rtx ();
6447 bp
->insert_place
= insn
;
6450 ok
= redirect_jump (as_a
<rtx_jump_insn
*> (insn
), label
, 0);
6455 /* get_attr_length (insn) == 2 */
6456 /* Check if we have a pattern where reorg wants to redirect
6457 the branch to a label from an unconditional branch that
6459 /* We can't use JUMP_LABEL here because it might be undefined
6460 when not optimizing. */
6461 /* A syntax error might cause beyond to be NULL_RTX. */
6462 rtx temp
= XEXP (XEXP (SET_SRC (PATTERN (insn
)), 1), 0);
6463 beyond
= next_active_insn (as_a
<rtx_insn
*> (temp
));
6467 || ((beyond
= next_active_insn (beyond
))
6468 && JUMP_P (beyond
)))
6469 && GET_CODE (PATTERN (beyond
)) == SET
6470 && recog_memoized (beyond
) == CODE_FOR_jump_compact
6472 (INSN_UID (XEXP (SET_SRC (PATTERN (beyond
)), 0)))
6473 - INSN_ADDRESSES (INSN_UID (insn
)) + (unsigned) 252)
6475 gen_block_redirect (beyond
,
6476 INSN_ADDRESSES (INSN_UID (beyond
)), 1);
6479 next
= next_active_insn (insn
);
6483 || ((next
= next_active_insn (next
))
6485 && GET_CODE (PATTERN (next
)) == SET
6486 && recog_memoized (next
) == CODE_FOR_jump_compact
6488 (INSN_UID (XEXP (SET_SRC (PATTERN (next
)), 0)))
6489 - INSN_ADDRESSES (INSN_UID (insn
)) + (unsigned) 252)
6491 gen_block_redirect (next
, INSN_ADDRESSES (INSN_UID (next
)), 1);
6493 else if (type
== TYPE_JUMP
|| type
== TYPE_RETURN
)
6495 int addr
= INSN_ADDRESSES (INSN_UID (insn
));
6496 rtx_insn
*far_label
= 0;
6498 struct far_branch
*bp
;
6500 if (type
== TYPE_JUMP
)
6502 if (CROSSING_JUMP_P (insn
))
6504 emit_insn_before (gen_block_branch_redirect (const0_rtx
),
6509 far_label
= as_a
<rtx_insn
*> (
6510 XEXP (SET_SRC (PATTERN (insn
)), 0));
6511 dest_uid
= get_dest_uid (far_label
, max_uid
);
6514 /* Parse errors can lead to labels outside
6516 if (! NEXT_INSN (far_label
))
6521 JUMP_LABEL (insn
) = far_label
;
6522 LABEL_NUSES (far_label
)++;
6524 redirect_jump (as_a
<rtx_jump_insn
*> (insn
), ret_rtx
, 1);
6528 bp
= uid_branch
[dest_uid
];
6531 bp
= (struct far_branch
*) alloca (sizeof *bp
);
6532 uid_branch
[dest_uid
] = bp
;
6533 bp
->prev
= far_branch_list
;
6534 far_branch_list
= bp
;
6536 bp
->far_label
= far_label
;
6538 LABEL_NUSES (far_label
)++;
6540 else if (bp
->near_label
&& ! NEXT_INSN (bp
->near_label
))
6541 if (addr
- bp
->address
<= CONDJUMP_MAX
)
6542 emit_label_after (bp
->near_label
, PREV_INSN (insn
));
6545 gen_far_branch (bp
);
6551 bp
->insert_place
= insn
;
6553 emit_insn_before (gen_block_branch_redirect (const0_rtx
), insn
);
6555 gen_block_redirect (insn
, addr
, bp
->near_label
? 2 : 0);
6558 /* Generate all pending far branches,
6559 and free our references to the far labels. */
6560 while (far_branch_list
)
6562 if (far_branch_list
->near_label
6563 && ! NEXT_INSN (far_branch_list
->near_label
))
6564 gen_far_branch (far_branch_list
);
6566 && far_branch_list
->far_label
6567 && ! --LABEL_NUSES (far_branch_list
->far_label
))
6568 delete_insn (far_branch_list
->far_label
);
6569 far_branch_list
= far_branch_list
->prev
;
6572 /* Instruction length information is no longer valid due to the new
6573 instructions that have been generated. */
6574 init_insn_lengths ();
6577 /* Dump out instruction addresses, which is useful for debugging the
6578 constant pool table stuff.
6580 If relaxing, output the label and pseudo-ops used to link together
6581 calls and the instruction which set the registers.
6583 ??? The addresses printed by this routine for insns are nonsense for
6584 insns which are inside of a sequence where none of the inner insns have
6585 variable length. This is because the second pass of shorten_branches
6586 does not bother to update them. */
6588 final_prescan_insn (rtx_insn
*insn
, rtx
*opvec ATTRIBUTE_UNUSED
,
6589 int noperands ATTRIBUTE_UNUSED
)
6591 if (TARGET_DUMPISIZE
)
6592 fprintf (asm_out_file
, "\n! at %04x\n", INSN_ADDRESSES (INSN_UID (insn
)));
6596 if (rtx note
= find_reg_note (insn
, REG_LABEL_OPERAND
, NULL_RTX
))
6598 rtx pattern
= PATTERN (insn
);
6599 if (GET_CODE (pattern
) == PARALLEL
)
6600 pattern
= XVECEXP (pattern
, 0, 0);
6601 switch (GET_CODE (pattern
))
6604 if (GET_CODE (SET_SRC (pattern
)) != CALL
6605 && get_attr_type (insn
) != TYPE_SFUNC
)
6607 targetm
.asm_out
.internal_label
6608 (asm_out_file
, "L", CODE_LABEL_NUMBER (XEXP (note
, 0)));
6613 asm_fprintf (asm_out_file
, "\t.uses %LL%d\n",
6614 CODE_LABEL_NUMBER (XEXP (note
, 0)));
6624 /* Dump out any constants accumulated in the final pass. These will
6627 output_jump_label_table (void)
6631 fprintf (asm_out_file
, "\t.align 2\n");
6632 for (int i
= 0; i
< pool_size
; i
++)
6634 pool_node
*p
= &pool_vector
[i
];
6636 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
6637 CODE_LABEL_NUMBER (p
->label
));
6638 output_asm_insn (".long %O0", &p
->value
);
6646 /* A full frame looks like:
6650 [ if current_function_anonymous_args
6663 local-0 <- fp points here.
6665 Number of bytes pushed for anonymous args, used to pass information
6666 between expand_prologue and expand_epilogue.
6668 Adjust the stack by SIZE bytes. REG holds the rtl of the register to be
6669 adjusted. If epilogue_p is zero, this is for a prologue; otherwise, it's
6670 for an epilogue and a negative value means that it's for a sibcall
6671 epilogue. If LIVE_REGS_MASK is nonzero, it points to a HARD_REG_SET of
6672 all the registers that are about to be restored, and hence dead. */
6674 output_stack_adjust (int size
, rtx reg
, int epilogue_p
,
6675 HARD_REG_SET
*live_regs_mask
, bool frame_p
)
6677 rtx_insn
*(*emit_fn
) (rtx
) = frame_p
? &emit_frame_insn
: &emit_insn
;
6680 HOST_WIDE_INT align
= STACK_BOUNDARY
/ BITS_PER_UNIT
;
6682 /* This test is bogus, as output_stack_adjust is used to re-align the
6685 gcc_assert (!(size
% align
));
6688 if (CONST_OK_FOR_ADD (size
))
6689 emit_fn (GEN_ADD3 (reg
, reg
, GEN_INT (size
)));
6690 /* Try to do it with two partial adjustments; however, we must make
6691 sure that the stack is properly aligned at all times, in case
6692 an interrupt occurs between the two partial adjustments. */
6693 else if (CONST_OK_FOR_ADD (size
/ 2 & -align
)
6694 && CONST_OK_FOR_ADD (size
- (size
/ 2 & -align
)))
6696 emit_fn (GEN_ADD3 (reg
, reg
, GEN_INT (size
/ 2 & -align
)));
6697 emit_fn (GEN_ADD3 (reg
, reg
, GEN_INT (size
- (size
/ 2 & -align
))));
6703 int temp
= epilogue_p
? 7 : 1;
6706 /* If TEMP is invalid, we could temporarily save a general
6707 register to MACL. However, there is currently no need
6708 to handle this case, so just die when we see it. */
6710 || current_function_interrupt
6711 || ! call_used_regs
[temp
] || fixed_regs
[temp
])
6713 if (temp
< 0 && ! current_function_interrupt
&& epilogue_p
>= 0)
6715 HARD_REG_SET temps
= (regs_invalidated_by_call
6721 if (crtl
->return_rtx
)
6724 mode
= GET_MODE (crtl
->return_rtx
);
6725 if (BASE_RETURN_VALUE_REG (mode
) == FIRST_RET_REG
)
6726 nreg
= hard_regno_nregs (FIRST_RET_REG
, mode
);
6728 for (i
= 0; i
< nreg
; i
++)
6729 CLEAR_HARD_REG_BIT (temps
, FIRST_RET_REG
+ i
);
6730 if (crtl
->calls_eh_return
)
6732 CLEAR_HARD_REG_BIT (temps
, EH_RETURN_STACKADJ_REGNO
);
6733 for (i
= 0; i
<= 3; i
++)
6734 CLEAR_HARD_REG_BIT (temps
, EH_RETURN_DATA_REGNO (i
));
6737 if (epilogue_p
<= 0)
6739 for (i
= FIRST_PARM_REG
;
6740 i
< FIRST_PARM_REG
+ NPARM_REGS (SImode
); i
++)
6741 CLEAR_HARD_REG_BIT (temps
, i
);
6742 if (cfun
->static_chain_decl
!= NULL
)
6743 CLEAR_HARD_REG_BIT (temps
, STATIC_CHAIN_REGNUM
);
6745 temp
= scavenge_reg (&temps
);
6747 if (temp
< 0 && live_regs_mask
)
6751 temps
= *live_regs_mask
;
6752 CLEAR_HARD_REG_BIT (temps
, REGNO (reg
));
6753 temp
= scavenge_reg (&temps
);
6757 rtx adj_reg
, tmp_reg
, mem
;
6759 /* If we reached here, the most likely case is the (sibcall)
6760 epilogue. Put a special push/pop sequence for such case as
6761 the last resort. This looks lengthy but would not be problem
6762 because it seems to be very rare. */
6763 gcc_assert (epilogue_p
);
6765 /* ??? There is still the slight possibility that r4 or
6766 r5 have been reserved as fixed registers or assigned
6767 as global registers, and they change during an
6768 interrupt. There are possible ways to handle this:
6770 - If we are adjusting the frame pointer (r14), we can do
6771 with a single temp register and an ordinary push / pop
6773 - Grab any call-used or call-saved registers (i.e. not
6774 fixed or globals) for the temps we need. We might
6775 also grab r14 if we are adjusting the stack pointer.
6776 If we can't find enough available registers, issue
6777 a diagnostic and die - the user must have reserved
6778 way too many registers.
6779 But since all this is rather unlikely to happen and
6780 would require extra testing, we just die if r4 / r5
6781 are not available. */
6782 gcc_assert (!fixed_regs
[4] && !fixed_regs
[5]
6783 && !global_regs
[4] && !global_regs
[5]);
6785 adj_reg
= gen_rtx_REG (GET_MODE (reg
), 4);
6786 tmp_reg
= gen_rtx_REG (GET_MODE (reg
), 5);
6787 emit_move_insn (gen_tmp_stack_mem (Pmode
, reg
), adj_reg
);
6788 emit_insn (GEN_MOV (adj_reg
, GEN_INT (size
)));
6789 emit_insn (GEN_ADD3 (adj_reg
, adj_reg
, reg
));
6790 mem
= gen_tmp_stack_mem (Pmode
, gen_rtx_PRE_DEC (Pmode
, adj_reg
));
6791 emit_move_insn (mem
, tmp_reg
);
6792 emit_move_insn (tmp_reg
, gen_tmp_stack_mem (Pmode
, reg
));
6793 mem
= gen_tmp_stack_mem (Pmode
, gen_rtx_PRE_DEC (Pmode
, adj_reg
));
6794 emit_move_insn (mem
, tmp_reg
);
6795 emit_move_insn (reg
, adj_reg
);
6796 mem
= gen_tmp_stack_mem (Pmode
, gen_rtx_POST_INC (Pmode
, reg
));
6797 emit_move_insn (adj_reg
, mem
);
6798 mem
= gen_tmp_stack_mem (Pmode
, gen_rtx_POST_INC (Pmode
, reg
));
6799 emit_move_insn (tmp_reg
, mem
);
6800 /* Tell flow the insns that pop r4/r5 aren't dead. */
6805 const_reg
= gen_rtx_REG (GET_MODE (reg
), temp
);
6807 /* If SIZE is negative, subtract the positive value.
6808 This sometimes allows a constant pool entry to be shared
6809 between prologue and epilogue code. */
6812 emit_insn (GEN_MOV (const_reg
, GEN_INT (-size
)));
6813 insn
= emit_fn (GEN_SUB3 (reg
, reg
, const_reg
));
6817 emit_insn (GEN_MOV (const_reg
, GEN_INT (size
)));
6818 insn
= emit_fn (GEN_ADD3 (reg
, reg
, const_reg
));
6820 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
6821 gen_rtx_SET (reg
, gen_rtx_PLUS (SImode
, reg
,
6827 /* Emit the specified insn and mark it as frame related. */
6829 emit_frame_insn (rtx x
)
6831 rtx_insn
*insn
= emit_insn (x
);
6832 RTX_FRAME_RELATED_P (insn
) = 1;
6836 /* Output RTL to push register RN onto the stack. */
6842 x
= gen_push_fpul ();
6843 else if (rn
== FPSCR_REG
)
6844 x
= gen_push_fpscr ();
6845 else if (TARGET_FPU_DOUBLE
&& TARGET_FMOVD
6846 && ! TARGET_FPU_SINGLE
&& FP_OR_XD_REGISTER_P (rn
))
6848 if (FP_REGISTER_P (rn
) && (rn
- FIRST_FP_REG
) & 1)
6850 x
= gen_push_4 (gen_rtx_REG (DFmode
, rn
));
6852 else if (TARGET_SH2E
&& FP_REGISTER_P (rn
))
6853 x
= gen_push_e (gen_rtx_REG (SFmode
, rn
));
6855 x
= gen_push (gen_rtx_REG (SImode
, rn
));
6857 x
= emit_frame_insn (x
);
6858 add_reg_note (x
, REG_INC
, gen_rtx_REG (SImode
, STACK_POINTER_REGNUM
));
6862 /* Output RTL to pop register RN from the stack. */
6868 x
= gen_pop_fpul ();
6869 else if (rn
== FPSCR_REG
)
6870 x
= gen_pop_fpscr ();
6871 else if (TARGET_FPU_DOUBLE
&& TARGET_FMOVD
6872 && ! TARGET_FPU_SINGLE
&& FP_OR_XD_REGISTER_P (rn
))
6874 if (FP_REGISTER_P (rn
) && (rn
- FIRST_FP_REG
) & 1)
6876 x
= gen_pop_4 (gen_rtx_REG (DFmode
, rn
));
6878 else if (TARGET_SH2E
&& FP_REGISTER_P (rn
))
6879 x
= gen_pop_e (gen_rtx_REG (SFmode
, rn
));
6881 x
= gen_pop (gen_rtx_REG (SImode
, rn
));
6885 sp_reg
= gen_rtx_REG (SImode
, STACK_POINTER_REGNUM
);
6886 reg
= copy_rtx (GET_CODE (PATTERN (x
)) == PARALLEL
6887 ? SET_DEST (XVECEXP (PATTERN (x
), 0, 0))
6888 : SET_DEST (PATTERN (x
)));
6889 add_reg_note (x
, REG_CFA_RESTORE
, reg
);
6890 add_reg_note (x
, REG_CFA_ADJUST_CFA
,
6891 gen_rtx_SET (sp_reg
,
6892 plus_constant (SImode
, sp_reg
,
6893 GET_MODE_SIZE (GET_MODE (reg
)))));
6894 add_reg_note (x
, REG_INC
, gen_rtx_REG (SImode
, STACK_POINTER_REGNUM
));
6895 RTX_FRAME_RELATED_P (x
) = 1;
6898 /* Generate code to push the regs specified in the mask. */
6900 push_regs (HARD_REG_SET
*mask
, bool interrupt_handler
)
6902 bool skip_fpscr
= false;
6904 /* Push PR last; this gives better latencies after the prologue, and
6905 candidates for the return delay slot when there are no general
6906 registers pushed. */
6907 for (int i
= interrupt_handler
? LAST_BANKED_REG
+ 1 : 0;
6908 i
< FIRST_PSEUDO_REGISTER
; i
++)
6910 /* If this is an interrupt handler, and the SZ bit varies,
6911 and we have to push any floating point register, we need
6912 to switch to the correct precision first. */
6913 if (i
== FIRST_FP_REG
&& interrupt_handler
&& TARGET_FMOVD
6914 && hard_reg_set_intersect_p (*mask
, reg_class_contents
[DF_REGS
]))
6917 fpscr_set_from_mem (NORMAL_MODE (FP_MODE
), ~*mask
);
6921 && (i
!= FPSCR_REG
|| ! skip_fpscr
)
6922 && TEST_HARD_REG_BIT (*mask
, i
))
6924 /* If the ISR has RESBANK attribute assigned, don't push any of
6925 the following registers - R0-R14, MACH, MACL and GBR. */
6926 if (! (sh_cfun_resbank_handler_p ()
6927 && ((i
>= FIRST_GENERAL_REG
&& i
< LAST_GENERAL_REG
)
6935 /* Push banked registers last to improve delay slot opportunities. */
6936 if (interrupt_handler
)
6938 bool use_movml
= false;
6942 unsigned int count
= 0;
6944 for (int i
= FIRST_BANKED_REG
; i
<= LAST_BANKED_REG
; i
++)
6945 if (TEST_HARD_REG_BIT (*mask
, i
))
6950 /* Use movml when all banked registers are pushed. */
6951 if (count
== LAST_BANKED_REG
- FIRST_BANKED_REG
+ 1)
6955 if (sh_cfun_resbank_handler_p ())
6959 rtx x
, mem
, reg
, set
;
6960 rtx sp_reg
= gen_rtx_REG (SImode
, STACK_POINTER_REGNUM
);
6962 /* We must avoid scheduling multiple store insn with another
6964 emit_insn (gen_blockage ());
6965 x
= gen_movml_push_banked (sp_reg
);
6966 x
= emit_frame_insn (x
);
6967 for (int i
= FIRST_BANKED_REG
; i
<= LAST_BANKED_REG
; i
++)
6969 mem
= gen_rtx_MEM (SImode
, plus_constant (Pmode
, sp_reg
, i
* 4));
6970 reg
= gen_rtx_REG (SImode
, i
);
6971 add_reg_note (x
, REG_CFA_OFFSET
, gen_rtx_SET (mem
, reg
));
6974 set
= gen_rtx_SET (sp_reg
, plus_constant (Pmode
, sp_reg
, - 32));
6975 add_reg_note (x
, REG_CFA_ADJUST_CFA
, set
);
6976 emit_insn (gen_blockage ());
6979 for (int i
= FIRST_BANKED_REG
; i
<= LAST_BANKED_REG
; i
++)
6980 if (TEST_HARD_REG_BIT (*mask
, i
))
6984 /* Don't push PR register for an ISR with RESBANK attribute assigned. */
6985 if (TEST_HARD_REG_BIT (*mask
, PR_REG
) && !sh_cfun_resbank_handler_p ())
6989 /* Work out the registers which need to be saved, both as a mask and a
6990 count of saved words. Return the count.
6992 If doing a pragma interrupt function, then push all regs used by the
6993 function, and if we call another function (we can tell by looking at PR),
6994 make sure that all the regs it clobbers are safe too. */
6996 calc_live_regs (HARD_REG_SET
*live_regs_mask
)
7000 bool interrupt_or_trapa_handler
, trapa_handler
, interrupt_handler
;
7001 bool nosave_low_regs
;
7003 attrs
= DECL_ATTRIBUTES (current_function_decl
);
7004 interrupt_or_trapa_handler
= sh_cfun_interrupt_handler_p ();
7005 trapa_handler
= lookup_attribute ("trapa_handler", attrs
) != NULL_TREE
;
7006 interrupt_handler
= interrupt_or_trapa_handler
&& ! trapa_handler
;
7007 nosave_low_regs
= lookup_attribute ("nosave_low_regs", attrs
) != NULL_TREE
;
7009 CLEAR_HARD_REG_SET (*live_regs_mask
);
7010 if (TARGET_FPU_DOUBLE
&& TARGET_FMOVD
&& interrupt_handler
7011 && df_regs_ever_live_p (FPSCR_REG
))
7012 target_flags
&= ~MASK_FPU_SINGLE
;
7013 /* If we can save a lot of saves by switching to double mode, do that. */
7014 else if (TARGET_FPU_DOUBLE
&& TARGET_FMOVD
&& TARGET_FPU_SINGLE
)
7015 for (int count
= 0, reg
= FIRST_FP_REG
; reg
<= LAST_FP_REG
; reg
+= 2)
7016 if (df_regs_ever_live_p (reg
) && df_regs_ever_live_p (reg
+1)
7017 && (! call_used_regs
[reg
]
7018 || interrupt_handler
)
7021 target_flags
&= ~MASK_FPU_SINGLE
;
7026 rtx pr_initial
= has_hard_reg_initial_val (Pmode
, PR_REG
);
7027 bool pr_live
= (pr_initial
7028 ? (!REG_P (pr_initial
)
7029 || REGNO (pr_initial
) != (PR_REG
))
7030 : df_regs_ever_live_p (PR_REG
));
7031 /* For Shcompact, if not optimizing, we end up with a memory reference
7032 using the return address pointer for __builtin_return_address even
7033 though there is no actual need to put the PR register on the stack. */
7034 pr_live
|= df_regs_ever_live_p (RETURN_ADDRESS_POINTER_REGNUM
);
7036 /* Force PR to be live if the prologue has to call the SHmedia
7037 argument decoder or register saver. */
7038 bool has_call
= pr_live
;
7041 for (count
= 0, reg
= FIRST_PSEUDO_REGISTER
; reg
-- != 0; )
7046 ? (/* Need to save all the regs ever live. */
7047 (df_regs_ever_live_p (reg
)
7048 || (call_used_regs
[reg
]
7049 && (! fixed_regs
[reg
] || reg
== MACH_REG
|| reg
== MACL_REG
7050 || reg
== PIC_OFFSET_TABLE_REGNUM
)
7052 && reg
!= STACK_POINTER_REGNUM
&& reg
!= ARG_POINTER_REGNUM
7053 && reg
!= RETURN_ADDRESS_POINTER_REGNUM
7054 && reg
!= T_REG
&& reg
!= GBR_REG
7055 && reg
!= FPSCR_MODES_REG
&& reg
!= FPSCR_STAT_REG
7056 /* Push fpscr only on targets which have FPU */
7057 && (reg
!= FPSCR_REG
|| TARGET_FPU_ANY
))
7058 : (/* Only push those regs which are used and need to be saved. */
7060 || (df_regs_ever_live_p (reg
)
7061 && ((!call_used_regs
[reg
]
7062 && !(reg
!= PIC_OFFSET_TABLE_REGNUM
7064 && call_used_or_fixed_reg_p (reg
)))
7065 || (trapa_handler
&& reg
== FPSCR_REG
&& TARGET_FPU_ANY
)))
7066 || (crtl
->calls_eh_return
7067 && (reg
== EH_RETURN_DATA_REGNO (0)
7068 || reg
== EH_RETURN_DATA_REGNO (1)
7069 || reg
== EH_RETURN_DATA_REGNO (2)
7070 || reg
== EH_RETURN_DATA_REGNO (3)))
7071 || ((reg
== MACL_REG
|| reg
== MACH_REG
)
7072 && df_regs_ever_live_p (reg
)
7073 && sh_cfun_attr_renesas_p ())
7076 SET_HARD_REG_BIT (*live_regs_mask
, reg
);
7077 count
+= GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg
));
7079 if (TARGET_FPU_DOUBLE
&& TARGET_FMOVD
7080 && GET_MODE_CLASS (REGISTER_NATURAL_MODE (reg
)) == MODE_FLOAT
)
7082 if (FP_REGISTER_P (reg
))
7084 if (! TARGET_FPU_SINGLE
&& ! df_regs_ever_live_p (reg
^ 1))
7086 SET_HARD_REG_BIT (*live_regs_mask
, (reg
^ 1));
7087 count
+= GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg
^ 1));
7090 else if (XD_REGISTER_P (reg
))
7092 /* Must switch to double mode to access these registers. */
7093 target_flags
&= ~MASK_FPU_SINGLE
;
7097 if (nosave_low_regs
&& reg
== R8_REG
)
7104 /* Code to generate prologue and epilogue sequences */
7106 /* PUSHED is the number of bytes that are being pushed on the
7107 stack for register saves. Return the frame size, padded
7108 appropriately so that the stack stays properly aligned. */
7109 static HOST_WIDE_INT
7110 rounded_frame_size (int pushed
)
7112 HOST_WIDE_INT size
= get_frame_size ();
7113 HOST_WIDE_INT align
= STACK_BOUNDARY
/ BITS_PER_UNIT
;
7115 if (ACCUMULATE_OUTGOING_ARGS
)
7116 size
+= crtl
->outgoing_args_size
;
7118 return ((size
+ pushed
+ align
- 1) & -align
) - pushed
;
7121 /* Expand code for the function prologue. */
7123 sh_expand_prologue (void)
7125 int save_flags
= target_flags
;
7127 = lookup_attribute ("sp_switch", DECL_ATTRIBUTES (current_function_decl
));
7129 current_function_interrupt
= sh_cfun_interrupt_handler_p ();
7131 /* We have pretend args if we had an object sent partially in registers
7132 and partially on the stack, e.g. a large structure. */
7133 int pretend_args
= crtl
->args
.pretend_args_size
;
7134 if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl
)
7135 && (NPARM_REGS(SImode
)
7136 > crtl
->args
.info
.arg_count
[(int) SH_ARG_INT
]))
7139 output_stack_adjust (-pretend_args
, stack_pointer_rtx
, 0, NULL
, true);
7140 int stack_usage
= pretend_args
;
7142 /* Emit the code for SETUP_VARARGS. */
7145 if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl
))
7147 /* Push arg regs as if they'd been provided by caller in stack. */
7148 for (int i
= 0; i
< NPARM_REGS(SImode
); i
++)
7150 int rn
= NPARM_REGS(SImode
) + FIRST_PARM_REG
- i
- 1;
7152 if (i
>= (NPARM_REGS(SImode
)
7153 - crtl
->args
.info
.arg_count
[(int) SH_ARG_INT
]
7157 stack_usage
+= GET_MODE_SIZE (SImode
);
7162 /* If we're supposed to switch stacks at function entry, do so now. */
7166 /* The argument specifies a variable holding the address of the
7167 stack the interrupt function should switch to/from at entry/exit. */
7168 tree arg
= TREE_VALUE ( TREE_VALUE (sp_switch_attr
));
7169 const char* s
= ggc_strdup (TREE_STRING_POINTER (arg
));
7170 rtx sp_switch
= gen_rtx_SYMBOL_REF (Pmode
, s
);
7172 lab
= add_constant (sp_switch
, SImode
, 0);
7173 newsrc
= gen_rtx_LABEL_REF (VOIDmode
, lab
);
7175 emit_insn (gen_sp_switch_1 (newsrc
));
7178 HARD_REG_SET live_regs_mask
;
7179 int d
= calc_live_regs (&live_regs_mask
);
7180 /* ??? Maybe we could save some switching if we can move a mode switch
7181 that already happens to be at the function start into the prologue. */
7182 if (target_flags
!= save_flags
&& ! current_function_interrupt
)
7183 emit_insn (gen_toggle_sz ());
7185 push_regs (&live_regs_mask
, current_function_interrupt
);
7188 if (flag_pic
&& !TARGET_FDPIC
7189 && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM
))
7190 emit_insn (gen_GOTaddr2picreg (const0_rtx
));
7192 if (target_flags
!= save_flags
&& ! current_function_interrupt
)
7193 emit_insn (gen_toggle_sz ());
7195 target_flags
= save_flags
;
7197 output_stack_adjust (-rounded_frame_size (d
),
7198 stack_pointer_rtx
, 0, NULL
, true);
7199 stack_usage
+= rounded_frame_size (d
);
7201 if (frame_pointer_needed
)
7202 emit_frame_insn (GEN_MOV (hard_frame_pointer_rtx
, stack_pointer_rtx
));
7204 /* If we are profiling, make sure no instructions are scheduled before
7205 the call to mcount. Similarly if some call instructions are swapped
7206 before frame related insns, it'll confuse the unwinder because
7207 currently SH has no unwind info for function epilogues. */
7208 if (crtl
->profile
|| flag_exceptions
|| flag_unwind_tables
)
7209 emit_insn (gen_blockage ());
7211 if (flag_stack_usage_info
)
7212 current_function_static_stack_size
= stack_usage
;
7215 /* Expand code for the function epilogue. */
7217 sh_expand_epilogue (bool sibcall_p
)
7219 int save_flags
= target_flags
;
7220 bool fpscr_deferred
= false;
7221 int e
= sibcall_p
? -1 : 1;
7223 HARD_REG_SET live_regs_mask
;
7224 int d
= calc_live_regs (&live_regs_mask
);
7227 int frame_size
= rounded_frame_size (d
);
7229 if (frame_pointer_needed
)
7231 /* We must avoid scheduling the epilogue with previous basic blocks.
7232 See PR/18032 and PR/40313. */
7233 emit_insn (gen_blockage ());
7234 output_stack_adjust (frame_size
, hard_frame_pointer_rtx
, e
,
7235 &live_regs_mask
, true);
7237 /* We must avoid moving the stack pointer adjustment past code
7238 which reads from the local frame, else an interrupt could
7239 occur after the SP adjustment and clobber data in the local
7241 emit_insn (gen_blockage ());
7242 emit_frame_insn (GEN_MOV (stack_pointer_rtx
, hard_frame_pointer_rtx
));
7244 else if (frame_size
)
7246 /* We must avoid moving the stack pointer adjustment past code
7247 which reads from the local frame, else an interrupt could
7248 occur after the SP adjustment and clobber data in the local
7250 emit_insn (gen_blockage ());
7251 output_stack_adjust (frame_size
, stack_pointer_rtx
, e
,
7252 &live_regs_mask
, true);
7255 /* Pop all the registers. */
7257 if (target_flags
!= save_flags
&& ! current_function_interrupt
)
7258 emit_insn (gen_toggle_sz ());
7264 /* For an ISR with RESBANK attribute assigned, don't pop PR
7266 if (TEST_HARD_REG_BIT (live_regs_mask
, PR_REG
)
7267 && !sh_cfun_resbank_handler_p ())
7269 if (!frame_pointer_needed
)
7270 emit_insn (gen_blockage ());
7274 /* Banked registers are popped first to avoid being scheduled in the
7275 delay slot. RTE switches banks before the ds instruction. */
7276 if (current_function_interrupt
)
7278 bool use_movml
= false;
7282 unsigned int count
= 0;
7284 for (int i
= FIRST_BANKED_REG
; i
<= LAST_BANKED_REG
; i
++)
7285 if (TEST_HARD_REG_BIT (live_regs_mask
, i
))
7290 /* Use movml when all banked register are poped. */
7291 if (count
== LAST_BANKED_REG
- FIRST_BANKED_REG
+ 1)
7295 if (sh_cfun_resbank_handler_p ())
7299 rtx sp_reg
= gen_rtx_REG (SImode
, STACK_POINTER_REGNUM
);
7301 /* We must avoid scheduling multiple load insn with another
7303 emit_insn (gen_blockage ());
7304 emit_insn (gen_movml_pop_banked (sp_reg
));
7305 emit_insn (gen_blockage ());
7308 for (int i
= LAST_BANKED_REG
; i
>= FIRST_BANKED_REG
; i
--)
7309 if (TEST_HARD_REG_BIT (live_regs_mask
, i
))
7312 last_reg
= FIRST_PSEUDO_REGISTER
- LAST_BANKED_REG
- 1;
7315 last_reg
= FIRST_PSEUDO_REGISTER
;
7317 for (int i
= 0; i
< last_reg
; i
++)
7319 int j
= (FIRST_PSEUDO_REGISTER
- 1) - i
;
7321 if (j
== FPSCR_REG
&& current_function_interrupt
&& TARGET_FMOVD
7322 && hard_reg_set_intersect_p (live_regs_mask
,
7323 reg_class_contents
[DF_REGS
]))
7324 fpscr_deferred
= true;
7325 /* For an ISR with RESBANK attribute assigned, don't pop
7326 following registers, R0-R14, MACH, MACL and GBR. */
7327 else if (j
!= PR_REG
&& TEST_HARD_REG_BIT (live_regs_mask
, j
)
7328 && ! (sh_cfun_resbank_handler_p ()
7329 && ((j
>= FIRST_GENERAL_REG
7330 && j
< LAST_GENERAL_REG
)
7336 if (j
== FIRST_FP_REG
&& fpscr_deferred
)
7340 if (target_flags
!= save_flags
&& ! current_function_interrupt
)
7341 emit_insn (gen_toggle_sz ());
7342 target_flags
= save_flags
;
7344 output_stack_adjust (crtl
->args
.pretend_args_size
+ save_size
,
7345 stack_pointer_rtx
, e
, NULL
, true);
7347 if (crtl
->calls_eh_return
)
7348 emit_insn (GEN_ADD3 (stack_pointer_rtx
, stack_pointer_rtx
,
7349 EH_RETURN_STACKADJ_RTX
));
7351 /* Switch back to the normal stack if necessary. */
7352 if (lookup_attribute ("sp_switch", DECL_ATTRIBUTES (current_function_decl
)))
7353 emit_insn (gen_sp_switch_2 ());
7355 /* Tell flow the insn that pops PR isn't dead. */
7356 if (TEST_HARD_REG_BIT (live_regs_mask
, PR_REG
))
7357 emit_use (gen_rtx_REG (SImode
, PR_REG
));
7360 /* Emit code to change the current function's return address to RA.
7361 TEMP is available as a scratch register, if needed. */
7363 sh_set_return_address (rtx ra
, rtx tmp
)
7365 HARD_REG_SET live_regs_mask
;
7366 int d
= calc_live_regs (&live_regs_mask
);
7368 /* If pr_reg isn't life, we can set it directly. */
7369 if (! TEST_HARD_REG_BIT (live_regs_mask
, PR_REG
))
7371 rtx rr
= gen_rtx_REG (SImode
, PR_REG
);
7372 emit_insn (GEN_MOV (rr
, ra
));
7373 /* Tell flow the register for return isn't dead. */
7378 int pr_offset
= rounded_frame_size (d
);
7380 emit_insn (GEN_MOV (tmp
, GEN_INT (pr_offset
)));
7382 if (frame_pointer_needed
)
7383 emit_insn (GEN_ADD3 (tmp
, tmp
, hard_frame_pointer_rtx
));
7385 emit_insn (GEN_ADD3 (tmp
, tmp
, stack_pointer_rtx
));
7387 tmp
= gen_frame_mem (Pmode
, tmp
);
7388 emit_insn (GEN_MOV (tmp
, ra
));
7389 /* Tell this store isn't dead. */
7393 /* Clear variables at function end. */
7395 sh_output_function_epilogue (FILE *)
7400 sh_builtin_saveregs (void)
7402 /* First unnamed integer register. */
7403 int first_intreg
= crtl
->args
.info
.arg_count
[(int) SH_ARG_INT
];
7404 /* Number of integer registers we need to save. */
7405 int n_intregs
= MAX (0, NPARM_REGS (SImode
) - first_intreg
);
7406 /* First unnamed SFmode float reg */
7407 int first_floatreg
= crtl
->args
.info
.arg_count
[(int) SH_ARG_FLOAT
];
7408 /* Number of SFmode float regs to save. */
7409 int n_floatregs
= MAX (0, NPARM_REGS (SFmode
) - first_floatreg
);
7412 alias_set_type alias_set
;
7414 if (!TARGET_FPU_ANY
)
7416 error ("%<__builtin_saveregs%> not supported by this subtarget");
7420 /* Allocate block of memory for the regs. */
7421 /* ??? If n_intregs + n_floatregs == 0, should we allocate at least 1 byte?
7422 Or can assign_stack_local accept a 0 SIZE argument? */
7423 bufsize
= (n_intregs
* UNITS_PER_WORD
) + (n_floatregs
* UNITS_PER_WORD
);
7425 if (n_floatregs
& 1)
7429 regbuf
= assign_stack_local (BLKmode
, bufsize
+ UNITS_PER_WORD
, 0);
7430 addr
= copy_to_mode_reg (Pmode
, XEXP (regbuf
, 0));
7431 emit_insn (gen_iorsi3 (addr
, addr
, GEN_INT (UNITS_PER_WORD
)));
7432 regbuf
= change_address (regbuf
, BLKmode
, addr
);
7434 else if (STACK_BOUNDARY
< 64 && TARGET_FPU_DOUBLE
&& n_floatregs
)
7438 regbuf
= assign_stack_local (BLKmode
, bufsize
+ UNITS_PER_WORD
, 0);
7439 addr
= copy_to_mode_reg (Pmode
, plus_constant (Pmode
,
7440 XEXP (regbuf
, 0), 4));
7441 mask
= copy_to_mode_reg (Pmode
, GEN_INT (-8));
7442 emit_insn (gen_andsi3 (addr
, addr
, mask
));
7443 regbuf
= change_address (regbuf
, BLKmode
, addr
);
7446 regbuf
= assign_stack_local (BLKmode
, bufsize
, TARGET_FPU_DOUBLE
? 64 : 0);
7447 alias_set
= get_varargs_alias_set ();
7448 set_mem_alias_set (regbuf
, alias_set
);
7451 This is optimized to only save the regs that are necessary. Explicitly
7452 named args need not be saved. */
7454 move_block_from_reg (BASE_ARG_REG (SImode
) + first_intreg
,
7455 adjust_address (regbuf
, BLKmode
,
7456 n_floatregs
* UNITS_PER_WORD
),
7460 This is optimized to only save the regs that are necessary. Explicitly
7461 named args need not be saved.
7462 We explicitly build a pointer to the buffer because it halves the insn
7463 count when not optimizing (otherwise the pointer is built for each reg
7465 We emit the moves in reverse order so that we can use predecrement. */
7467 fpregs
= copy_to_mode_reg (Pmode
,
7468 plus_constant (Pmode
, XEXP (regbuf
, 0),
7469 n_floatregs
* UNITS_PER_WORD
));
7470 if (TARGET_FPU_DOUBLE
)
7473 for (regno
= NPARM_REGS (DFmode
) - 2; regno
>= first_floatreg
; regno
-= 2)
7475 emit_insn (gen_addsi3 (fpregs
, fpregs
,
7476 GEN_INT (-2 * UNITS_PER_WORD
)));
7477 mem
= change_address (regbuf
, DFmode
, fpregs
);
7478 emit_move_insn (mem
,
7479 gen_rtx_REG (DFmode
, BASE_ARG_REG (DFmode
) + regno
));
7481 regno
= first_floatreg
;
7484 emit_insn (gen_addsi3 (fpregs
, fpregs
, GEN_INT (-UNITS_PER_WORD
)));
7485 mem
= change_address (regbuf
, SFmode
, fpregs
);
7486 emit_move_insn (mem
,
7487 gen_rtx_REG (SFmode
, BASE_ARG_REG (SFmode
)
7488 + regno
- SH_REG_MSW_OFFSET
));
7492 for (regno
= NPARM_REGS (SFmode
) - 1; regno
>= first_floatreg
; regno
--)
7496 emit_insn (gen_addsi3 (fpregs
, fpregs
, GEN_INT (-UNITS_PER_WORD
)));
7497 mem
= change_address (regbuf
, SFmode
, fpregs
);
7498 emit_move_insn (mem
,
7499 gen_rtx_REG (SFmode
, BASE_ARG_REG (SFmode
) + regno
));
7502 /* Return the address of the regbuf. */
7503 return XEXP (regbuf
, 0);
7506 /* Define the `__builtin_va_list' type for the ABI. */
7508 sh_build_builtin_va_list (void)
7510 tree f_next_o
, f_next_o_limit
, f_next_fp
, f_next_fp_limit
, f_next_stack
;
7511 tree record
, type_decl
;
7513 if ((! TARGET_SH2E
&& ! TARGET_SH4
)
7514 || TARGET_HITACHI
|| sh_cfun_attr_renesas_p ())
7515 return ptr_type_node
;
7517 record
= (*lang_hooks
.types
.make_type
) (RECORD_TYPE
);
7518 type_decl
= build_decl (BUILTINS_LOCATION
,
7519 TYPE_DECL
, get_identifier ("__va_list_tag"), record
);
7521 f_next_o
= build_decl (BUILTINS_LOCATION
,
7522 FIELD_DECL
, get_identifier ("__va_next_o"),
7524 f_next_o_limit
= build_decl (BUILTINS_LOCATION
,
7526 get_identifier ("__va_next_o_limit"),
7528 f_next_fp
= build_decl (BUILTINS_LOCATION
,
7529 FIELD_DECL
, get_identifier ("__va_next_fp"),
7531 f_next_fp_limit
= build_decl (BUILTINS_LOCATION
,
7533 get_identifier ("__va_next_fp_limit"),
7535 f_next_stack
= build_decl (BUILTINS_LOCATION
,
7536 FIELD_DECL
, get_identifier ("__va_next_stack"),
7539 DECL_FIELD_CONTEXT (f_next_o
) = record
;
7540 DECL_FIELD_CONTEXT (f_next_o_limit
) = record
;
7541 DECL_FIELD_CONTEXT (f_next_fp
) = record
;
7542 DECL_FIELD_CONTEXT (f_next_fp_limit
) = record
;
7543 DECL_FIELD_CONTEXT (f_next_stack
) = record
;
7545 TYPE_STUB_DECL (record
) = type_decl
;
7546 TYPE_NAME (record
) = type_decl
;
7547 TYPE_FIELDS (record
) = f_next_o
;
7548 DECL_CHAIN (f_next_o
) = f_next_o_limit
;
7549 DECL_CHAIN (f_next_o_limit
) = f_next_fp
;
7550 DECL_CHAIN (f_next_fp
) = f_next_fp_limit
;
7551 DECL_CHAIN (f_next_fp_limit
) = f_next_stack
;
7553 layout_type (record
);
7558 /* Implement `va_start' for varargs and stdarg. */
7560 sh_va_start (tree valist
, rtx nextarg
)
7562 tree f_next_o
, f_next_o_limit
, f_next_fp
, f_next_fp_limit
, f_next_stack
;
7563 tree next_o
, next_o_limit
, next_fp
, next_fp_limit
, next_stack
;
7567 if ((! TARGET_SH2E
&& ! TARGET_SH4
)
7568 || TARGET_HITACHI
|| sh_cfun_attr_renesas_p ())
7570 std_expand_builtin_va_start (valist
, nextarg
);
7574 f_next_o
= TYPE_FIELDS (va_list_type_node
);
7575 f_next_o_limit
= DECL_CHAIN (f_next_o
);
7576 f_next_fp
= DECL_CHAIN (f_next_o_limit
);
7577 f_next_fp_limit
= DECL_CHAIN (f_next_fp
);
7578 f_next_stack
= DECL_CHAIN (f_next_fp_limit
);
7580 next_o
= build3 (COMPONENT_REF
, TREE_TYPE (f_next_o
), valist
, f_next_o
,
7582 next_o_limit
= build3 (COMPONENT_REF
, TREE_TYPE (f_next_o_limit
),
7583 valist
, f_next_o_limit
, NULL_TREE
);
7584 next_fp
= build3 (COMPONENT_REF
, TREE_TYPE (f_next_fp
), valist
, f_next_fp
,
7586 next_fp_limit
= build3 (COMPONENT_REF
, TREE_TYPE (f_next_fp_limit
),
7587 valist
, f_next_fp_limit
, NULL_TREE
);
7588 next_stack
= build3 (COMPONENT_REF
, TREE_TYPE (f_next_stack
),
7589 valist
, f_next_stack
, NULL_TREE
);
7591 /* Call __builtin_saveregs. */
7592 u
= make_tree (sizetype
, expand_builtin_saveregs ());
7593 u
= fold_convert (ptr_type_node
, u
);
7594 t
= build2 (MODIFY_EXPR
, ptr_type_node
, next_fp
, u
);
7595 TREE_SIDE_EFFECTS (t
) = 1;
7596 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
7598 nfp
= crtl
->args
.info
.arg_count
[SH_ARG_FLOAT
];
7603 u
= fold_build_pointer_plus_hwi (u
, UNITS_PER_WORD
* nfp
);
7604 t
= build2 (MODIFY_EXPR
, ptr_type_node
, next_fp_limit
, u
);
7605 TREE_SIDE_EFFECTS (t
) = 1;
7606 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
7608 t
= build2 (MODIFY_EXPR
, ptr_type_node
, next_o
, u
);
7609 TREE_SIDE_EFFECTS (t
) = 1;
7610 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
7612 nint
= crtl
->args
.info
.arg_count
[SH_ARG_INT
];
7617 u
= fold_build_pointer_plus_hwi (u
, UNITS_PER_WORD
* nint
);
7618 t
= build2 (MODIFY_EXPR
, ptr_type_node
, next_o_limit
, u
);
7619 TREE_SIDE_EFFECTS (t
) = 1;
7620 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
7622 u
= make_tree (ptr_type_node
, nextarg
);
7623 t
= build2 (MODIFY_EXPR
, ptr_type_node
, next_stack
, u
);
7624 TREE_SIDE_EFFECTS (t
) = 1;
7625 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
7628 /* TYPE is a RECORD_TYPE. If there is only a single nonzero-sized
7629 member, return it. */
7631 find_sole_member (tree type
)
7633 tree field
, member
= NULL_TREE
;
7635 for (field
= TYPE_FIELDS (type
); field
; field
= DECL_CHAIN (field
))
7637 if (TREE_CODE (field
) != FIELD_DECL
)
7639 if (!DECL_SIZE (field
))
7641 if (integer_zerop (DECL_SIZE (field
)))
7650 /* Implement `va_arg'. */
7652 sh_gimplify_va_arg_expr (tree valist
, tree type
, gimple_seq
*pre_p
,
7653 gimple_seq
*post_p ATTRIBUTE_UNUSED
)
7656 tree addr
, lab_over
= NULL
, result
= NULL
;
7659 const bool pass_by_ref
7660 = !VOID_TYPE_P (type
) && must_pass_va_arg_in_stack (type
);
7663 type
= build_pointer_type (type
);
7665 HOST_WIDE_INT size
= int_size_in_bytes (type
);
7666 HOST_WIDE_INT rsize
= (size
+ UNITS_PER_WORD
- 1) & -UNITS_PER_WORD
;
7667 tree pptr_type_node
= build_pointer_type (ptr_type_node
);
7669 if ((TARGET_SH2E
|| TARGET_SH4
)
7670 && ! (TARGET_HITACHI
|| sh_cfun_attr_renesas_p ()))
7672 tree f_next_o
, f_next_o_limit
, f_next_fp
, f_next_fp_limit
, f_next_stack
;
7673 tree next_o
, next_o_limit
, next_fp
, next_fp_limit
, next_stack
;
7677 f_next_o
= TYPE_FIELDS (va_list_type_node
);
7678 f_next_o_limit
= DECL_CHAIN (f_next_o
);
7679 f_next_fp
= DECL_CHAIN (f_next_o_limit
);
7680 f_next_fp_limit
= DECL_CHAIN (f_next_fp
);
7681 f_next_stack
= DECL_CHAIN (f_next_fp_limit
);
7683 next_o
= build3 (COMPONENT_REF
, TREE_TYPE (f_next_o
), valist
, f_next_o
,
7685 next_o_limit
= build3 (COMPONENT_REF
, TREE_TYPE (f_next_o_limit
),
7686 valist
, f_next_o_limit
, NULL_TREE
);
7687 next_fp
= build3 (COMPONENT_REF
, TREE_TYPE (f_next_fp
),
7688 valist
, f_next_fp
, NULL_TREE
);
7689 next_fp_limit
= build3 (COMPONENT_REF
, TREE_TYPE (f_next_fp_limit
),
7690 valist
, f_next_fp_limit
, NULL_TREE
);
7691 next_stack
= build3 (COMPONENT_REF
, TREE_TYPE (f_next_stack
),
7692 valist
, f_next_stack
, NULL_TREE
);
7694 /* Structures with a single member with a distinct mode are passed
7695 like their member. This is relevant if the latter has a REAL_TYPE
7696 or COMPLEX_TYPE type. */
7698 while (TREE_CODE (eff_type
) == RECORD_TYPE
7699 && (member
= find_sole_member (eff_type
))
7700 && (TREE_CODE (TREE_TYPE (member
)) == REAL_TYPE
7701 || TREE_CODE (TREE_TYPE (member
)) == COMPLEX_TYPE
7702 || TREE_CODE (TREE_TYPE (member
)) == RECORD_TYPE
))
7704 tree field_type
= TREE_TYPE (member
);
7706 if (TYPE_MODE (eff_type
) == TYPE_MODE (field_type
))
7707 eff_type
= field_type
;
7710 gcc_assert ((TYPE_ALIGN (eff_type
)
7711 < GET_MODE_ALIGNMENT (TYPE_MODE (field_type
)))
7712 || (TYPE_ALIGN (eff_type
)
7713 > GET_MODE_BITSIZE (TYPE_MODE (field_type
))));
7719 if (TARGET_FPU_DOUBLE
)
7721 pass_as_float
= ((TREE_CODE (eff_type
) == REAL_TYPE
&& size
<= 8)
7722 || (TREE_CODE (eff_type
) == COMPLEX_TYPE
7723 && TREE_CODE (TREE_TYPE (eff_type
)) == REAL_TYPE
7728 pass_as_float
= (TREE_CODE (eff_type
) == REAL_TYPE
&& size
== 4);
7731 addr
= create_tmp_var (pptr_type_node
);
7732 lab_false
= create_artificial_label (UNKNOWN_LOCATION
);
7733 lab_over
= create_artificial_label (UNKNOWN_LOCATION
);
7735 valist
= build_simple_mem_ref (addr
);
7739 tree next_fp_tmp
= create_tmp_var (TREE_TYPE (f_next_fp
));
7741 bool is_double
= size
== 8 && TREE_CODE (eff_type
) == REAL_TYPE
;
7743 tmp
= build1 (ADDR_EXPR
, pptr_type_node
, unshare_expr (next_fp
));
7744 gimplify_assign (unshare_expr (addr
), tmp
, pre_p
);
7746 gimplify_assign (unshare_expr (next_fp_tmp
), valist
, pre_p
);
7747 tmp
= next_fp_limit
;
7748 if (size
> 4 && !is_double
)
7749 tmp
= fold_build_pointer_plus_hwi (unshare_expr (tmp
), 4 - size
);
7750 tmp
= build2 (GE_EXPR
, boolean_type_node
,
7751 unshare_expr (next_fp_tmp
), unshare_expr (tmp
));
7752 cmp
= build3 (COND_EXPR
, void_type_node
, tmp
,
7753 build1 (GOTO_EXPR
, void_type_node
,
7754 unshare_expr (lab_false
)), NULL_TREE
);
7756 gimplify_and_add (cmp
, pre_p
);
7758 if (TYPE_ALIGN (eff_type
) > BITS_PER_WORD
7759 || (is_double
|| size
== 16))
7761 tmp
= fold_convert (sizetype
, next_fp_tmp
);
7762 tmp
= build2 (BIT_AND_EXPR
, sizetype
, tmp
,
7763 size_int (UNITS_PER_WORD
));
7764 tmp
= fold_build_pointer_plus (unshare_expr (next_fp_tmp
), tmp
);
7765 gimplify_assign (unshare_expr (next_fp_tmp
), tmp
, pre_p
);
7768 gimplify_and_add (cmp
, pre_p
);
7770 #ifdef FUNCTION_ARG_SCmode_WART
7771 if (TYPE_MODE (eff_type
) == SCmode
7772 && TARGET_SH4
&& TARGET_LITTLE_ENDIAN
)
7774 tree subtype
= TREE_TYPE (eff_type
);
7778 = std_gimplify_va_arg_expr (next_fp_tmp
, subtype
, pre_p
, NULL
);
7779 imag
= get_initialized_tmp_var (imag
, pre_p
, NULL
);
7782 = std_gimplify_va_arg_expr (next_fp_tmp
, subtype
, pre_p
, NULL
);
7783 real
= get_initialized_tmp_var (real
, pre_p
, NULL
);
7785 result
= build2 (COMPLEX_EXPR
, eff_type
, real
, imag
);
7786 if (type
!= eff_type
)
7787 result
= build1 (VIEW_CONVERT_EXPR
, type
, result
);
7788 result
= get_initialized_tmp_var (result
, pre_p
, NULL
);
7790 #endif /* FUNCTION_ARG_SCmode_WART */
7792 tmp
= build1 (GOTO_EXPR
, void_type_node
, unshare_expr (lab_over
));
7793 gimplify_and_add (tmp
, pre_p
);
7795 tmp
= build1 (LABEL_EXPR
, void_type_node
, unshare_expr (lab_false
));
7796 gimplify_and_add (tmp
, pre_p
);
7798 tmp
= build1 (ADDR_EXPR
, pptr_type_node
, unshare_expr (next_stack
));
7799 gimplify_assign (unshare_expr (addr
), tmp
, pre_p
);
7800 gimplify_assign (unshare_expr (next_fp_tmp
),
7801 unshare_expr (valist
), pre_p
);
7803 gimplify_assign (unshare_expr (valist
),
7804 unshare_expr (next_fp_tmp
), post_p
);
7805 valist
= next_fp_tmp
;
7809 tmp
= fold_build_pointer_plus_hwi (unshare_expr (next_o
), rsize
);
7810 tmp
= build2 (GT_EXPR
, boolean_type_node
, tmp
,
7811 unshare_expr (next_o_limit
));
7812 tmp
= build3 (COND_EXPR
, void_type_node
, tmp
,
7813 build1 (GOTO_EXPR
, void_type_node
,
7814 unshare_expr (lab_false
)),
7816 gimplify_and_add (tmp
, pre_p
);
7818 tmp
= build1 (ADDR_EXPR
, pptr_type_node
, unshare_expr (next_o
));
7819 gimplify_assign (unshare_expr (addr
), tmp
, pre_p
);
7821 tmp
= build1 (GOTO_EXPR
, void_type_node
, unshare_expr (lab_over
));
7822 gimplify_and_add (tmp
, pre_p
);
7824 tmp
= build1 (LABEL_EXPR
, void_type_node
, unshare_expr (lab_false
));
7825 gimplify_and_add (tmp
, pre_p
);
7827 if (size
> 4 && ! (TARGET_SH4
|| TARGET_SH2A
))
7828 gimplify_assign (unshare_expr (next_o
),
7829 unshare_expr (next_o_limit
), pre_p
);
7831 tmp
= build1 (ADDR_EXPR
, pptr_type_node
, unshare_expr (next_stack
));
7832 gimplify_assign (unshare_expr (addr
), tmp
, pre_p
);
7837 tmp
= build1 (LABEL_EXPR
, void_type_node
, unshare_expr (lab_over
));
7838 gimplify_and_add (tmp
, pre_p
);
7842 /* ??? In va-sh.h, there had been code to make values larger than
7843 size 8 indirect. This does not match the FUNCTION_ARG macros. */
7845 tmp
= std_gimplify_va_arg_expr (valist
, type
, pre_p
, NULL
);
7848 gimplify_assign (result
, tmp
, pre_p
);
7849 result
= build1 (NOP_EXPR
, TREE_TYPE (result
), result
);
7850 tmp
= build1 (LABEL_EXPR
, void_type_node
, unshare_expr (lab_over
));
7851 gimplify_and_add (tmp
, pre_p
);
7857 result
= build_va_arg_indirect_ref (result
);
7862 /* 64 bit floating points memory transfers are paired single precision loads
7863 or store. So DWARF information needs fixing in little endian (unless
7864 PR=SZ=1 in FPSCR). */
7866 sh_dwarf_register_span (rtx reg
)
7868 unsigned regno
= REGNO (reg
);
7870 if (WORDS_BIG_ENDIAN
|| GET_MODE (reg
) != DFmode
)
7874 gen_rtx_PARALLEL (VOIDmode
,
7876 gen_rtx_REG (SFmode
, regno
+ 1),
7877 gen_rtx_REG (SFmode
, regno
)));
7881 sh_promote_function_mode (const_tree type
, machine_mode mode
,
7882 int *punsignedp
, const_tree funtype
,
7885 if (sh_promote_prototypes (funtype
))
7886 return promote_mode (type
, mode
, punsignedp
);
7888 return default_promote_function_mode (type
, mode
, punsignedp
, funtype
,
7893 sh_promote_prototypes (const_tree type
)
7899 return ! sh_attr_renesas_p (type
);
7903 sh_pass_by_reference (cumulative_args_t cum_v
, const function_arg_info
&arg
)
7905 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
7907 if (targetm
.calls
.must_pass_in_stack (arg
))
7910 /* ??? std_gimplify_va_arg_expr passes NULL for cum. That function
7911 wants to know about pass-by-reference semantics for incoming
7920 sh_callee_copies (cumulative_args_t cum
, const function_arg_info
&arg
)
7922 /* ??? How can it possibly be correct to return true only on the
7923 caller side of the equation? Is there someplace else in the
7924 sh backend that's magically producing the copies? */
7925 return (get_cumulative_args (cum
)->outgoing
7926 && ((arg
.mode
== BLKmode
7927 ? TYPE_ALIGN (arg
.type
)
7928 : GET_MODE_ALIGNMENT (arg
.mode
))
7929 % SH_MIN_ALIGN_FOR_CALLEE_COPY
== 0));
7933 get_sh_arg_class (machine_mode mode
)
7935 if (TARGET_FPU_ANY
&& mode
== SFmode
)
7936 return SH_ARG_FLOAT
;
7938 if (TARGET_FPU_DOUBLE
7939 && (GET_MODE_CLASS (mode
) == MODE_FLOAT
7940 || GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
))
7941 return SH_ARG_FLOAT
;
7946 /* Round a register number up to a proper boundary for an arg of mode
7948 The SH doesn't care about double alignment, so we only
7949 round doubles to even regs when asked to explicitly. */
7951 sh_round_reg (const CUMULATIVE_ARGS
& cum
, machine_mode mode
)
7953 /* FIXME: This used to be a macro and has been copy pasted into this
7954 function as is. Make this more readable. */
7956 (((TARGET_ALIGN_DOUBLE
7957 || (TARGET_FPU_DOUBLE
7958 && (mode
== DFmode
|| mode
== DCmode
)
7959 && cum
.arg_count
[(int) SH_ARG_FLOAT
] < NPARM_REGS (mode
)))
7960 && GET_MODE_UNIT_SIZE (mode
) > UNITS_PER_WORD
)
7961 ? (cum
.arg_count
[(int) get_sh_arg_class (mode
)]
7962 + (cum
.arg_count
[(int) get_sh_arg_class (mode
)] & 1))
7963 : cum
.arg_count
[(int) get_sh_arg_class (mode
)]);
7966 /* Return true if arg of the specified mode should be passed in a register
7967 or false otherwise. */
7969 sh_pass_in_reg_p (const CUMULATIVE_ARGS
& cum
, machine_mode mode
,
7972 /* FIXME: This used to be a macro and has been copy pasted into this
7973 function as is. Make this more readable. */
7976 || (! TREE_ADDRESSABLE (type
)
7977 && (! (TARGET_HITACHI
|| cum
.renesas_abi
)
7978 || ! (AGGREGATE_TYPE_P (type
)
7980 && (GET_MODE_CLASS (mode
) == MODE_FLOAT
7981 && GET_MODE_SIZE (mode
) > GET_MODE_SIZE (SFmode
)))))))
7984 ? ((mode
) == BLKmode
7985 ? ((cum
.arg_count
[(int) SH_ARG_INT
] * UNITS_PER_WORD
7986 + int_size_in_bytes (type
))
7987 <= NPARM_REGS (SImode
) * UNITS_PER_WORD
)
7988 : ((sh_round_reg (cum
, mode
)
7989 + sh_hard_regno_nregs (BASE_ARG_REG (mode
), mode
))
7990 <= NPARM_REGS (mode
)))
7991 : sh_round_reg (cum
, mode
) < NPARM_REGS (mode
)));
7995 sh_arg_partial_bytes (cumulative_args_t cum_v
, const function_arg_info
&arg
)
7997 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
8000 if (sh_pass_in_reg_p (*cum
, arg
.mode
, arg
.type
)
8001 && !TARGET_FPU_DOUBLE
8002 && (sh_round_reg (*cum
, arg
.mode
)
8003 + CEIL (arg
.promoted_size_in_bytes (), UNITS_PER_WORD
)
8004 > NPARM_REGS (arg
.mode
)))
8005 words
= NPARM_REGS (arg
.mode
) - sh_round_reg (*cum
, arg
.mode
);
8007 return words
* UNITS_PER_WORD
;
8011 /* Define where to put the arguments to a function.
8012 Value is zero to push the argument on the stack,
8013 or a hard register in which to store the argument.
8015 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8016 the preceding args and about the function being called.
8017 ARG is a description of the argument.
8019 On SH the first args are normally in registers
8020 and the rest are pushed. Any arg that starts within the first
8021 NPARM_REGS words is at least partially passed in a register unless
8022 its data type forbids. */
8024 sh_function_arg (cumulative_args_t ca_v
, const function_arg_info
&arg
)
8026 CUMULATIVE_ARGS
*ca
= get_cumulative_args (ca_v
);
8027 machine_mode mode
= arg
.mode
;
8029 if (arg
.end_marker_p ())
8030 return ca
->renesas_abi
? const1_rtx
: const0_rtx
;
8032 if (sh_pass_in_reg_p (*ca
, mode
, arg
.type
)
8033 && (arg
.named
|| ! (TARGET_HITACHI
|| ca
->renesas_abi
)))
8037 if (mode
== SCmode
&& TARGET_SH4
&& TARGET_LITTLE_ENDIAN
8038 && (! FUNCTION_ARG_SCmode_WART
|| (sh_round_reg (*ca
, mode
) & 1)))
8040 rtx r1
= gen_rtx_EXPR_LIST (VOIDmode
,
8041 gen_rtx_REG (SFmode
,
8043 + (sh_round_reg (*ca
, mode
) ^ 1)),
8045 rtx r2
= gen_rtx_EXPR_LIST (VOIDmode
,
8046 gen_rtx_REG (SFmode
,
8048 + ((sh_round_reg (*ca
, mode
) + 1) ^ 1)),
8050 return gen_rtx_PARALLEL(SCmode
, gen_rtvec(2, r1
, r2
));
8053 /* If the alignment of a DF value causes an SF register to be
8054 skipped, we will use that skipped register for the next SF
8056 if ((TARGET_HITACHI
|| ca
->renesas_abi
)
8057 && ca
->free_single_fp_reg
8059 return gen_rtx_REG (mode
, ca
->free_single_fp_reg
);
8061 regno
= (BASE_ARG_REG (mode
) + sh_round_reg (*ca
, mode
))
8062 ^ (mode
== SFmode
&& TARGET_SH4
8063 && TARGET_LITTLE_ENDIAN
8064 && ! TARGET_HITACHI
&& ! ca
->renesas_abi
);
8065 return gen_rtx_REG (mode
, regno
);
8072 /* Update the data in CUM to advance over argument ARG. */
8074 sh_function_arg_advance (cumulative_args_t ca_v
,
8075 const function_arg_info
&arg
)
8077 CUMULATIVE_ARGS
*ca
= get_cumulative_args (ca_v
);
8080 ca
->force_mem
= false;
8082 if ((TARGET_HITACHI
|| ca
->renesas_abi
) && TARGET_FPU_DOUBLE
)
8084 /* Note that we've used the skipped register. */
8085 if (arg
.mode
== SFmode
&& ca
->free_single_fp_reg
)
8087 ca
->free_single_fp_reg
= 0;
8090 /* When we have a DF after an SF, there's an SF register that get
8091 skipped in order to align the DF value. We note this skipped
8092 register, because the next SF value will use it, and not the
8093 SF that follows the DF. */
8094 if (arg
.mode
== DFmode
8095 && sh_round_reg (*ca
, DFmode
) != sh_round_reg (*ca
, SFmode
))
8097 ca
->free_single_fp_reg
= (sh_round_reg (*ca
, SFmode
)
8098 + BASE_ARG_REG (arg
.mode
));
8102 if (! ((TARGET_SH4
|| TARGET_SH2A
) || ca
->renesas_abi
)
8103 || sh_pass_in_reg_p (*ca
, arg
.mode
, arg
.type
))
8104 (ca
->arg_count
[(int) get_sh_arg_class (arg
.mode
)]
8105 = (sh_round_reg (*ca
, arg
.mode
)
8106 + CEIL (arg
.promoted_size_in_bytes (), UNITS_PER_WORD
)));
8109 /* The Renesas calling convention doesn't quite fit into this scheme since
8110 the address is passed like an invisible argument, but one that is always
8111 passed in memory. */
8113 sh_struct_value_rtx (tree fndecl
, int incoming ATTRIBUTE_UNUSED
)
8115 if (TARGET_HITACHI
|| sh_attr_renesas_p (fndecl
))
8117 return gen_rtx_REG (Pmode
, 2);
8120 /* Worker function for TARGET_FUNCTION_VALUE.
8122 For the SH, this is like LIBCALL_VALUE, except that we must change the
8123 mode like PROMOTE_MODE does.
8124 ??? PROMOTE_MODE is ignored for non-scalar types. The set of types
8125 tested here has to be kept in sync with the one in
8126 explow.c:promote_mode. */
8128 sh_function_value (const_tree valtype
,
8129 const_tree fn_decl_or_type
,
8130 bool outgoing ATTRIBUTE_UNUSED
)
8133 && !DECL_P (fn_decl_or_type
))
8134 fn_decl_or_type
= NULL
;
8136 return gen_rtx_REG (
8137 ((GET_MODE_CLASS (TYPE_MODE (valtype
)) == MODE_INT
8138 && GET_MODE_SIZE (TYPE_MODE (valtype
)) < 4
8139 && (TREE_CODE (valtype
) == INTEGER_TYPE
8140 || TREE_CODE (valtype
) == ENUMERAL_TYPE
8141 || TREE_CODE (valtype
) == BOOLEAN_TYPE
8142 || TREE_CODE (valtype
) == REAL_TYPE
8143 || TREE_CODE (valtype
) == OFFSET_TYPE
))
8144 && sh_promote_prototypes (fn_decl_or_type
)
8145 ? SImode
: TYPE_MODE (valtype
)),
8146 BASE_RETURN_VALUE_REG (TYPE_MODE (valtype
)));
8149 /* Worker function for TARGET_LIBCALL_VALUE. */
8151 sh_libcall_value (machine_mode mode
, const_rtx fun ATTRIBUTE_UNUSED
)
8153 return gen_rtx_REG (mode
, BASE_RETURN_VALUE_REG (mode
));
8156 /* Return true if N is a possible register number of function value. */
8158 sh_function_value_regno_p (const unsigned int regno
)
8160 return regno
== FIRST_RET_REG
|| (TARGET_SH2E
&& regno
== FIRST_FP_RET_REG
);
8163 /* Worker function for TARGET_RETURN_IN_MEMORY. */
8165 sh_return_in_memory (const_tree type
, const_tree fndecl
)
8167 return TYPE_MODE (type
) == BLKmode
8168 || ((TARGET_HITACHI
|| sh_attr_renesas_p (fndecl
))
8169 && TREE_CODE (type
) == RECORD_TYPE
);
8172 /* We actually emit the code in sh_expand_prologue. We used to use
8173 a static variable to flag that we need to emit this code, but that
8174 doesn't when inlining, when functions are deferred and then emitted
8175 later. Fortunately, we already have two flags that are part of struct
8176 function that tell if a function uses varargs or stdarg. */
8178 sh_setup_incoming_varargs (cumulative_args_t ca
,
8179 const function_arg_info
&arg
,
8180 int *pretend_arg_size
,
8181 int second_time ATTRIBUTE_UNUSED
)
8183 gcc_assert (cfun
->stdarg
);
8184 if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl
))
8186 int named_parm_regs
, anon_parm_regs
;
8188 named_parm_regs
= (sh_round_reg (*get_cumulative_args (ca
), arg
.mode
)
8189 + CEIL (arg
.promoted_size_in_bytes (),
8191 anon_parm_regs
= NPARM_REGS (SImode
) - named_parm_regs
;
8192 if (anon_parm_regs
> 0)
8193 *pretend_arg_size
= anon_parm_regs
* 4;
8198 sh_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED
)
8204 sh_pretend_outgoing_varargs_named (cumulative_args_t ca_v
)
8206 CUMULATIVE_ARGS
*ca
= get_cumulative_args (ca_v
);
8208 return ! (TARGET_HITACHI
|| ca
->renesas_abi
);
8212 /* Define the offset between two registers, one to be eliminated, and
8213 the other its replacement, at the start of a routine. */
8215 initial_elimination_offset (int from
, int to
)
8217 const int regs_saved_rounding
= 0;
8218 int save_flags
= target_flags
;
8219 HARD_REG_SET live_regs_mask
;
8221 int regs_saved
= calc_live_regs (&live_regs_mask
);
8223 int total_auto_space
= rounded_frame_size (regs_saved
) - regs_saved_rounding
;
8224 target_flags
= save_flags
;
8226 int total_saved_regs_space
= regs_saved
+ regs_saved_rounding
;
8228 if (from
== ARG_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
8229 return total_saved_regs_space
+ total_auto_space
;
8231 if (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
8232 return total_saved_regs_space
+ total_auto_space
;
8234 /* Initial gap between fp and sp is 0. */
8235 if (from
== HARD_FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
8238 if (from
== FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
8239 return rounded_frame_size (0);
8241 if (from
== FRAME_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
8242 return rounded_frame_size (0);
8244 gcc_assert (from
== RETURN_ADDRESS_POINTER_REGNUM
8245 && (to
== HARD_FRAME_POINTER_REGNUM
8246 || to
== STACK_POINTER_REGNUM
));
8247 return total_auto_space
;
8250 /* Parse the -mfixed-range= option string. */
8252 sh_fix_range (const char *const_str
)
8254 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
8255 REG2 are either register names or register numbers. The effect
8256 of this option is to mark the registers in the range from REG1 to
8257 REG2 as ``fixed'' so they won't be used by the compiler. */
8259 char* str
= strcpy ((char*)alloca (strlen (const_str
) + 1), const_str
);
8263 char* dash
= strchr (str
, '-');
8266 warning (0, "value of %<-mfixed-range%> must have form REG1-REG2");
8270 char* comma
= strchr (dash
+ 1, ',');
8274 int first
= decode_reg_name (str
);
8277 warning (0, "unknown register name: %s", str
);
8281 int last
= decode_reg_name (dash
+ 1);
8284 warning (0, "unknown register name: %s", dash
+ 1);
8292 warning (0, "%s-%s is an empty range", str
, dash
+ 1);
8296 for (int i
= first
; i
<= last
; ++i
)
8307 /* Insert any deferred function attributes from earlier pragmas. */
8309 sh_insert_attributes (tree node
, tree
*attributes
)
8311 if (TREE_CODE (node
) != FUNCTION_DECL
)
8314 /* We are only interested in fields. */
8318 /* Append the attributes to the deferred attributes. */
8319 *sh_deferred_function_attributes_tail
= *attributes
;
8320 tree attrs
= sh_deferred_function_attributes
;
8324 /* Some attributes imply or require the interrupt attribute. */
8325 if (!lookup_attribute ("interrupt_handler", attrs
)
8326 && !lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (node
)))
8328 /* If we have a trapa_handler, but no interrupt_handler attribute,
8329 insert an interrupt_handler attribute. */
8330 if (lookup_attribute ("trapa_handler", attrs
) != NULL_TREE
)
8331 /* We can't use sh_pr_interrupt here because that's not in the
8334 = tree_cons (get_identifier("interrupt_handler"), NULL_TREE
, attrs
);
8335 /* However, for sp_switch, trap_exit, nosave_low_regs and resbank,
8336 if the interrupt attribute is missing, we ignore the attribute
8338 else if (lookup_attribute ("sp_switch", attrs
)
8339 || lookup_attribute ("trap_exit", attrs
)
8340 || lookup_attribute ("nosave_low_regs", attrs
)
8341 || lookup_attribute ("resbank", attrs
))
8345 for (tail
= attributes
; attrs
; attrs
= TREE_CHAIN (attrs
))
8347 if (is_attribute_p ("sp_switch", TREE_PURPOSE (attrs
))
8348 || is_attribute_p ("trap_exit", TREE_PURPOSE (attrs
))
8349 || is_attribute_p ("nosave_low_regs", TREE_PURPOSE (attrs
))
8350 || is_attribute_p ("resbank", TREE_PURPOSE (attrs
)))
8351 warning (OPT_Wattributes
,
8352 "%qE attribute only applies to interrupt functions",
8353 TREE_PURPOSE (attrs
));
8356 *tail
= tree_cons (TREE_PURPOSE (attrs
), NULL_TREE
,
8358 tail
= &TREE_CHAIN (*tail
);
8361 attrs
= *attributes
;
8365 /* Install the processed list. */
8366 *attributes
= attrs
;
8368 /* Clear deferred attributes. */
8369 sh_deferred_function_attributes
= NULL_TREE
;
8370 sh_deferred_function_attributes_tail
= &sh_deferred_function_attributes
;
8375 /*------------------------------------------------------------------------------
8376 Target specific attributes
8377 Supported attributes are:
8380 Specifies this function is an interrupt handler.
8383 Like interrupt_handler, but don't save all registers.
8386 Specifies an alternate stack for an interrupt handler to run on.
8389 Use a trapa to exit an interrupt function instead of rte.
8392 Don't save r0..r7 in an interrupt handler function.
8393 This is useful on SH3* and SH4*, which have a separate set of low
8394 regs for user and privileged modes.
8395 This is mainly to be used for non-reentrant interrupt handlers (i.e.
8396 those that run with interrupts disabled and thus can't be
8397 interrupted thenselves).
8400 Use Renesas calling/layout conventions (functions and structures).
8403 In case of an interrupt handler function, use a register bank to
8404 save registers R0-R14, MACH, MACL, GBR and PR.
8405 This is available only on SH2A targets.
8408 Declares a function to be called using the TBR relative addressing
8409 mode. Takes an argument that specifies the slot number in the table
8410 where this function can be looked up by the JSR/N @@(disp8,TBR) insn.
8413 /* Handle a 'resbank' attribute. */
8415 sh_handle_resbank_handler_attribute (tree
* node
, tree name
,
8416 tree args ATTRIBUTE_UNUSED
,
8417 int flags ATTRIBUTE_UNUSED
,
8418 bool * no_add_attrs
)
8422 warning (OPT_Wattributes
, "%qE attribute is supported only for SH2A",
8424 *no_add_attrs
= true;
8426 if (TREE_CODE (*node
) != FUNCTION_DECL
)
8428 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
8430 *no_add_attrs
= true;
8436 /* Handle an "interrupt_handler" attribute; arguments as in
8437 struct attribute_spec.handler. */
8439 sh_handle_interrupt_handler_attribute (tree
*node
, tree name
,
8440 tree args ATTRIBUTE_UNUSED
,
8441 int flags ATTRIBUTE_UNUSED
,
8444 if (TREE_CODE (*node
) != FUNCTION_DECL
)
8446 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
8448 *no_add_attrs
= true;
8454 /* Handle an 'function_vector' attribute; arguments as in
8455 struct attribute_spec.handler. */
8457 sh2a_handle_function_vector_handler_attribute (tree
* node
, tree name
,
8458 tree args ATTRIBUTE_UNUSED
,
8459 int flags ATTRIBUTE_UNUSED
,
8460 bool * no_add_attrs
)
8464 warning (OPT_Wattributes
, "%qE attribute only applies to SH2A",
8466 *no_add_attrs
= true;
8468 else if (TREE_CODE (*node
) != FUNCTION_DECL
)
8470 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
8472 *no_add_attrs
= true;
8474 else if (TREE_CODE (TREE_VALUE (args
)) != INTEGER_CST
)
8476 /* The argument must be a constant integer. */
8477 warning (OPT_Wattributes
,
8478 "%qE attribute argument not an integer constant",
8480 *no_add_attrs
= true;
8482 else if (TREE_INT_CST_LOW (TREE_VALUE (args
)) > 255)
8484 /* The argument value must be between 0 to 255. */
8485 warning (OPT_Wattributes
,
8486 "%qE attribute argument should be between 0 to 255",
8488 *no_add_attrs
= true;
8493 /* Returns true if current function has been assigned the attribute
8494 'function_vector'. */
8496 sh2a_is_function_vector_call (rtx x
)
8498 if (GET_CODE (x
) == SYMBOL_REF
8499 && (SYMBOL_REF_FLAGS (x
) & SYMBOL_FLAG_FUNCVEC_FUNCTION
))
8501 tree tr
= SYMBOL_REF_DECL (x
);
8503 if (sh2a_function_vector_p (tr
))
8510 /* Returns the function vector number, if the attribute
8511 'function_vector' is assigned, otherwise returns zero. */
8513 sh2a_get_function_vector_number (rtx x
)
8515 if ((GET_CODE (x
) == SYMBOL_REF
)
8516 && (SYMBOL_REF_FLAGS (x
) & SYMBOL_FLAG_FUNCVEC_FUNCTION
))
8518 tree t
= SYMBOL_REF_DECL (x
);
8520 if (TREE_CODE (t
) != FUNCTION_DECL
)
8523 for (tree list
= SH_ATTRIBUTES (t
); list
; list
= TREE_CHAIN (list
))
8524 if (is_attribute_p ("function_vector", TREE_PURPOSE (list
)))
8525 return TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list
)));
8533 /* Handle an "sp_switch" attribute; arguments as in
8534 struct attribute_spec.handler. */
8536 sh_handle_sp_switch_attribute (tree
*node
, tree name
, tree args
,
8537 int flags ATTRIBUTE_UNUSED
, bool *no_add_attrs
)
8539 if (TREE_CODE (*node
) != FUNCTION_DECL
)
8541 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
8543 *no_add_attrs
= true;
8545 else if (TREE_CODE (TREE_VALUE (args
)) != STRING_CST
)
8547 /* The argument must be a constant string. */
8548 warning (OPT_Wattributes
, "%qE attribute argument not a string constant",
8550 *no_add_attrs
= true;
8556 /* Handle an "trap_exit" attribute; arguments as in
8557 struct attribute_spec.handler. */
8559 sh_handle_trap_exit_attribute (tree
*node
, tree name
, tree args
,
8560 int flags ATTRIBUTE_UNUSED
, bool *no_add_attrs
)
8562 if (TREE_CODE (*node
) != FUNCTION_DECL
)
8564 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
8566 *no_add_attrs
= true;
8568 /* The argument specifies a trap number to be used in a trapa instruction
8569 at function exit (instead of an rte instruction). */
8570 else if (TREE_CODE (TREE_VALUE (args
)) != INTEGER_CST
)
8572 /* The argument must be a constant integer. */
8573 warning (OPT_Wattributes
, "%qE attribute argument not an "
8574 "integer constant", name
);
8575 *no_add_attrs
= true;
8582 sh_handle_renesas_attribute (tree
*node ATTRIBUTE_UNUSED
,
8583 tree name ATTRIBUTE_UNUSED
,
8584 tree args ATTRIBUTE_UNUSED
,
8585 int flags ATTRIBUTE_UNUSED
,
8586 bool *no_add_attrs ATTRIBUTE_UNUSED
)
8591 /* True if __attribute__((renesas)) or -mrenesas. */
8593 sh_attr_renesas_p (const_tree td
)
8597 if (td
== NULL_TREE
)
8600 td
= TREE_TYPE (td
);
8601 if (td
== error_mark_node
)
8603 return lookup_attribute ("renesas", TYPE_ATTRIBUTES (td
)) != NULL_TREE
;
8606 /* True if __attribute__((renesas)) or -mrenesas, for the current
8609 sh_cfun_attr_renesas_p (void)
8611 return sh_attr_renesas_p (current_function_decl
);
8614 /* Returns true if the current function has the "interrupt_handler"
8617 sh_cfun_interrupt_handler_p (void)
8619 return (lookup_attribute ("interrupt_handler",
8620 DECL_ATTRIBUTES (current_function_decl
))
8624 /* Returns true if FUNC has been assigned the attribute
8625 "function_vector". */
8627 sh2a_function_vector_p (tree func
)
8629 if (TREE_CODE (func
) != FUNCTION_DECL
)
8632 for (tree list
= SH_ATTRIBUTES (func
); list
; list
= TREE_CHAIN (list
))
8633 if (is_attribute_p ("function_vector", get_attribute_name (list
)))
8639 /* Returns true if given tree has the "resbank" attribute set. */
8641 sh_cfun_resbank_handler_p (void)
8643 return ((lookup_attribute ("resbank",
8644 DECL_ATTRIBUTES (current_function_decl
))
8646 && (lookup_attribute ("interrupt_handler",
8647 DECL_ATTRIBUTES (current_function_decl
))
8648 != NULL_TREE
) && TARGET_SH2A
);
8651 /* Returns true if the current function has a "trap_exit" attribute set. */
8653 sh_cfun_trap_exit_p (void)
8655 return lookup_attribute ("trap_exit", DECL_ATTRIBUTES (current_function_decl
))
8659 /* Implement TARGET_CHECK_PCH_TARGET_FLAGS. */
8661 sh_check_pch_target_flags (int old_flags
)
8663 if ((old_flags
^ target_flags
) & (MASK_SH1
| MASK_SH2
| MASK_SH3
8664 | MASK_SH_E
| MASK_HARD_SH4
8665 | MASK_FPU_SINGLE
| MASK_SH4
))
8666 return _("created and used with different architectures / ABIs");
8667 if ((old_flags
^ target_flags
) & MASK_HITACHI
)
8668 return _("created and used with different ABIs");
8669 if ((old_flags
^ target_flags
) & MASK_LITTLE_ENDIAN
)
8670 return _("created and used with different endianness");
8674 /* Predicates used by the templates. */
8676 /* Returns true if OP is MACL, MACH or PR. The input must be a REG rtx.
8677 Used only in general_movsrc_operand. */
8679 system_reg_operand (rtx op
, machine_mode mode ATTRIBUTE_UNUSED
)
8691 /* Returns true if OP is a floating point value with value 0.0. */
8693 fp_zero_operand (rtx op
)
8695 if (GET_MODE (op
) != SFmode
)
8698 const REAL_VALUE_TYPE
* r
= CONST_DOUBLE_REAL_VALUE (op
);
8699 return real_equal (r
, &dconst0
) && ! REAL_VALUE_MINUS_ZERO (*r
);
8702 /* Returns true if OP is a floating point value with value 1.0. */
8704 fp_one_operand (rtx op
)
8706 if (GET_MODE (op
) != SFmode
)
8709 return real_equal (CONST_DOUBLE_REAL_VALUE (op
), &dconst1
);
8712 /* Return the TLS type for TLS symbols. */
8714 tls_symbolic_operand (rtx op
, machine_mode mode ATTRIBUTE_UNUSED
)
8716 if (GET_CODE (op
) != SYMBOL_REF
)
8717 return TLS_MODEL_NONE
;
8718 return SYMBOL_REF_TLS_MODEL (op
);
8721 /* Return the destination address of a branch. */
8723 branch_dest (rtx branch
)
8725 rtx dest
= SET_SRC (PATTERN (branch
));
8727 if (GET_CODE (dest
) == IF_THEN_ELSE
)
8728 dest
= XEXP (dest
, 1);
8730 return INSN_ADDRESSES (INSN_UID (XEXP (dest
, 0)));
8733 /* Return nonzero if REG is not used after INSN.
8734 We assume REG is a reload reg, and therefore does
8735 not live past labels. It may live past calls or jumps though. */
8737 reg_unused_after (rtx reg
, rtx_insn
*insn
)
8739 /* If the reg is set by this instruction, then it is safe for our
8740 case. Disregard the case where this is a store to memory, since
8741 we are checking a register used in the store address. */
8742 rtx set
= single_set (insn
);
8743 if (set
&& !MEM_P (SET_DEST (set
))
8744 && reg_overlap_mentioned_p (reg
, SET_DEST (set
)))
8747 while ((insn
= NEXT_INSN (insn
)))
8752 rtx_code code
= GET_CODE (insn
);
8755 /* If this is a label that existed before reload, then the register
8756 is dead here. However, if this is a label added by reorg, then
8757 the register may still be live here. We can't tell the difference,
8758 so we just ignore labels completely. */
8759 if (code
== CODE_LABEL
)
8764 if (code
== JUMP_INSN
)
8767 /* If this is a sequence, we must handle them all at once.
8768 We could have for instance a call that sets the target register,
8769 and an insn in a delay slot that uses the register. In this case,
8770 we must return 0. */
8771 else if (code
== INSN
&& GET_CODE (PATTERN (insn
)) == SEQUENCE
)
8773 rtx_sequence
*seq
= as_a
<rtx_sequence
*> (PATTERN (insn
));
8774 bool retval
= false;
8776 for (int i
= 0; i
< seq
->len (); i
++)
8778 rtx_insn
*this_insn
= seq
->insn (i
);
8779 rtx set
= single_set (this_insn
);
8781 if (CALL_P (this_insn
))
8783 else if (JUMP_P (this_insn
))
8785 if (INSN_ANNULLED_BRANCH_P (this_insn
))
8790 if (set
&& reg_overlap_mentioned_p (reg
, SET_SRC (set
)))
8792 if (set
&& reg_overlap_mentioned_p (reg
, SET_DEST (set
)))
8794 if (!MEM_P (SET_DEST (set
)))
8800 && reg_overlap_mentioned_p (reg
, PATTERN (this_insn
)))
8805 else if (code
== JUMP_INSN
)
8809 rtx set
= single_set (insn
);
8810 if (set
&& reg_overlap_mentioned_p (reg
, SET_SRC (set
)))
8812 if (set
&& reg_overlap_mentioned_p (reg
, SET_DEST (set
)))
8813 return !MEM_P (SET_DEST (set
));
8814 if (set
== NULL
&& reg_overlap_mentioned_p (reg
, PATTERN (insn
)))
8817 if (code
== CALL_INSN
&& call_used_regs
[REGNO (reg
)])
8824 static GTY(()) rtx t_reg_rtx
;
8826 get_t_reg_rtx (void)
8829 t_reg_rtx
= gen_rtx_REG (SImode
, T_REG
);
8833 static GTY(()) tree fpscr_values
;
8836 emit_fpu_switch (rtx scratch
, int index
)
8838 if (fpscr_values
== NULL
)
8840 tree t
= build_index_type (integer_one_node
);
8841 t
= build_array_type (integer_type_node
, t
);
8842 t
= build_decl (BUILTINS_LOCATION
,
8843 VAR_DECL
, get_identifier ("__fpscr_values"), t
);
8844 DECL_ARTIFICIAL (t
) = 1;
8845 DECL_IGNORED_P (t
) = 1;
8846 DECL_EXTERNAL (t
) = 1;
8847 TREE_STATIC (t
) = 1;
8848 TREE_PUBLIC (t
) = 1;
8854 rtx src
= DECL_RTL (fpscr_values
);
8855 if (!can_create_pseudo_p ())
8857 emit_move_insn (scratch
, XEXP (src
, 0));
8859 emit_insn (gen_addsi3 (scratch
, scratch
, GEN_INT (index
* 4)));
8860 src
= adjust_automodify_address (src
, SImode
, scratch
, index
* 4);
8863 src
= adjust_address (src
, SImode
, index
* 4);
8865 emit_insn (gen_lds_fpscr (src
));
8868 static rtx
get_free_reg (HARD_REG_SET
);
8870 /* This function returns a register to use to load the address to load
8871 the fpscr from. Currently it always returns r1 or r7, but when we are
8872 able to use pseudo registers after combine, or have a better mechanism
8873 for choosing a register, it should be done here. */
8874 /* REGS_LIVE is the liveness information for the point for which we
8875 need this allocation. In some bare-bones exit blocks, r1 is live at the
8876 start. We can even have all of r0..r3 being live:
8877 __complex__ long long f (double d) { if (d == 0) return 2; else return 3; }
8878 INSN before which new insns are placed with will clobber the register
8879 we return. If a basic block consists only of setting the return value
8880 register to a pseudo and using that register, the return value is not
8881 live before or after this block, yet we we'll insert our insns right in
8884 get_free_reg (HARD_REG_SET regs_live
)
8886 if (! TEST_HARD_REG_BIT (regs_live
, 1))
8887 return gen_rtx_REG (Pmode
, 1);
8889 /* Hard reg 1 is live; since this is a small register classes target,
8890 there shouldn't be anything but a jump before the function end. */
8891 gcc_assert (!TEST_HARD_REG_BIT (regs_live
, 7));
8892 return gen_rtx_REG (Pmode
, 7);
8895 /* This function will set the fpscr from memory.
8896 MODE is the mode we are setting it to. */
8898 fpscr_set_from_mem (int mode
, HARD_REG_SET regs_live
)
8900 enum attr_fp_mode fp_mode
= (enum attr_fp_mode
) mode
;
8901 enum attr_fp_mode norm_mode
= ACTUAL_NORMAL_MODE (FP_MODE
);
8903 rtx addr_reg
= !can_create_pseudo_p () ? get_free_reg (regs_live
) : NULL_RTX
;
8904 emit_fpu_switch (addr_reg
, fp_mode
== norm_mode
);
8907 /* Is the given character a logical line separator for the assembler? */
8908 #ifndef IS_ASM_LOGICAL_LINE_SEPARATOR
8909 #define IS_ASM_LOGICAL_LINE_SEPARATOR(C, STR) ((C) == ';')
8913 sequence_insn_p (rtx_insn
*insn
)
8915 rtx_insn
* prev
= PREV_INSN (insn
);
8919 rtx_insn
* next
= NEXT_INSN (prev
);
8923 return INSN_P (next
) && GET_CODE (PATTERN (next
)) == SEQUENCE
;
8927 sh_insn_length_adjustment (rtx_insn
*insn
)
8929 /* Instructions with unfilled delay slots take up an extra two bytes for
8930 the nop in the delay slot. */
8931 if (((NONJUMP_INSN_P (insn
)
8932 && GET_CODE (PATTERN (insn
)) != USE
8933 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
8934 || CALL_P (insn
) || JUMP_P (insn
))
8935 && ! sequence_insn_p (insn
)
8936 && get_attr_needs_delay_slot (insn
) == NEEDS_DELAY_SLOT_YES
)
8939 /* Increase the insn length of a cbranch without a delay slot insn to
8940 force a delay slot which will be stuffed with a nop. */
8941 if (TARGET_CBRANCH_FORCE_DELAY_SLOT
&& TARGET_SH2
8942 && JUMP_P (insn
) && get_attr_type (insn
) == TYPE_CBRANCH
8943 && ! sequence_insn_p (insn
))
8946 /* sh-dsp parallel processing insn take four bytes instead of two. */
8948 if (NONJUMP_INSN_P (insn
))
8951 rtx body
= PATTERN (insn
);
8954 bool maybe_label
= true;
8956 if (GET_CODE (body
) == ASM_INPUT
)
8957 templ
= XSTR (body
, 0);
8958 else if (asm_noperands (body
) >= 0)
8960 = decode_asm_operands (body
, NULL
, NULL
, NULL
, NULL
, NULL
);
8969 while (c
== ' ' || c
== '\t');
8970 /* all sh-dsp parallel-processing insns start with p.
8971 The only non-ppi sh insn starting with p is pref.
8972 The only ppi starting with pr is prnd. */
8973 if ((c
== 'p' || c
== 'P') && strncasecmp ("re", templ
, 2))
8975 /* The repeat pseudo-insn expands two three insns, a total of
8976 six bytes in size. */
8977 else if ((c
== 'r' || c
== 'R')
8978 && ! strncasecmp ("epeat", templ
, 5))
8980 while (c
&& c
!= '\n'
8981 && ! IS_ASM_LOGICAL_LINE_SEPARATOR (c
, templ
))
8983 /* If this is a label, it is obviously not a ppi insn. */
8984 if (c
== ':' && maybe_label
)
8989 else if (c
== '\'' || c
== '"')
8990 maybe_label
= false;
8994 maybe_label
= c
!= ':';
9002 /* Return TRUE for a valid displacement for the REG+disp addressing
9005 sh_legitimate_index_p (machine_mode mode
, rtx op
, bool consider_sh2a
,
9008 if (! CONST_INT_P (op
))
9012 const HOST_WIDE_INT offset
= INTVAL (op
);
9013 const int max_disp
= sh_max_mov_insn_displacement (mode
, consider_sh2a
);
9014 const int align_mask
= mov_insn_alignment_mask (mode
, consider_sh2a
);
9016 /* If the mode does not support any displacement always return false.
9017 Even though an index of '0' is actually always valid, it will cause
9018 troubles when e.g. a DFmode move is split into two SFmode moves,
9019 where one SFmode move will have index '0' and the other move will
9021 if (!allow_zero
&& max_disp
< 1)
9024 return offset
>= 0 && offset
<= max_disp
&& (offset
& align_mask
) == 0;
9028 /* Recognize an RTL expression that is a valid memory address for
9030 The MODE argument is the machine mode for the MEM expression
9031 that wants to use this address.
9040 sh_legitimate_address_p (machine_mode mode
, rtx x
, bool strict
)
9042 if (REG_P (x
) && REGNO (x
) == GBR_REG
)
9045 if (MAYBE_BASE_REGISTER_RTX_P (x
, strict
))
9047 else if ((GET_CODE (x
) == POST_INC
|| GET_CODE (x
) == PRE_DEC
)
9048 && MAYBE_BASE_REGISTER_RTX_P (XEXP (x
, 0), strict
))
9050 else if (GET_CODE (x
) == PLUS
)
9052 rtx xop0
= XEXP (x
, 0);
9053 rtx xop1
= XEXP (x
, 1);
9055 if (REG_P (xop0
) && REGNO (xop0
) == GBR_REG
)
9056 return gbr_displacement (xop1
, mode
);
9058 if (GET_MODE_SIZE (mode
) <= 8
9059 && MAYBE_BASE_REGISTER_RTX_P (xop0
, strict
)
9060 && sh_legitimate_index_p (mode
, xop1
, TARGET_SH2A
, false))
9063 if (GET_MODE_SIZE (mode
) <= 4
9064 || (TARGET_FPU_DOUBLE
&& TARGET_FMOVD
&& mode
== DFmode
))
9066 if (MAYBE_BASE_REGISTER_RTX_P (xop1
, strict
)
9067 && MAYBE_INDEX_REGISTER_RTX_P (xop0
, strict
))
9069 if (MAYBE_INDEX_REGISTER_RTX_P (xop1
, strict
)
9070 && MAYBE_BASE_REGISTER_RTX_P (xop0
, strict
))
9078 /* Return TRUE if X references a SYMBOL_REF or LABEL_REF whose symbol
9079 isn't protected by a PIC unspec. */
9081 nonpic_symbol_mentioned_p (rtx x
)
9083 if (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == LABEL_REF
9084 || GET_CODE (x
) == PC
)
9087 /* We don't want to look into the possible MEM location of a
9088 CONST_DOUBLE, since we're not going to use it, in general. */
9089 if (GET_CODE (x
) == CONST_DOUBLE
)
9092 if (GET_CODE (x
) == UNSPEC
9093 && (XINT (x
, 1) == UNSPEC_PIC
9094 || XINT (x
, 1) == UNSPEC_GOT
9095 || XINT (x
, 1) == UNSPEC_GOTOFF
9096 || XINT (x
, 1) == UNSPEC_GOTPLT
9097 || XINT (x
, 1) == UNSPEC_GOTTPOFF
9098 || XINT (x
, 1) == UNSPEC_DTPOFF
9099 || XINT (x
, 1) == UNSPEC_TPOFF
9100 || XINT (x
, 1) == UNSPEC_PLT
9101 || XINT (x
, 1) == UNSPEC_PCREL
9102 || XINT (x
, 1) == UNSPEC_SYMOFF
9103 || XINT (x
, 1) == UNSPEC_PCREL_SYMOFF
9104 || XINT (x
, 1) == UNSPEC_GOTFUNCDESC
9105 || XINT (x
, 1) == UNSPEC_GOTOFFFUNCDESC
))
9108 const char* fmt
= GET_RTX_FORMAT (GET_CODE (x
));
9109 for (int i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
9113 for (int j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
9114 if (nonpic_symbol_mentioned_p (XVECEXP (x
, i
, j
)))
9117 else if (fmt
[i
] == 'e' && nonpic_symbol_mentioned_p (XEXP (x
, i
)))
9124 /* Convert a non-PIC address in `orig' to a PIC address using @GOT or
9125 @GOTOFF in `reg'. */
9127 legitimize_pic_address (rtx orig
, machine_mode mode ATTRIBUTE_UNUSED
, rtx reg
)
9129 if (tls_symbolic_operand (orig
, Pmode
) != TLS_MODEL_NONE
)
9132 if (GET_CODE (orig
) == LABEL_REF
9133 || (GET_CODE (orig
) == SYMBOL_REF
&& SYMBOL_REF_LOCAL_P (orig
)))
9135 if (reg
== NULL_RTX
)
9136 reg
= gen_reg_rtx (Pmode
);
9139 && GET_CODE (orig
) == SYMBOL_REF
&& SYMBOL_REF_FUNCTION_P (orig
))
9141 /* Weak functions may be NULL which doesn't work with
9142 GOTOFFFUNCDESC because the runtime offset is not known. */
9143 if (SYMBOL_REF_WEAK (orig
))
9144 emit_insn (gen_symGOTFUNCDESC2reg (reg
, orig
));
9146 emit_insn (gen_symGOTOFFFUNCDESC2reg (reg
, orig
));
9148 else if (TARGET_FDPIC
9149 && (GET_CODE (orig
) == LABEL_REF
9150 || (GET_CODE (orig
) == SYMBOL_REF
&& SYMBOL_REF_DECL (orig
)
9151 && (TREE_READONLY (SYMBOL_REF_DECL (orig
))
9152 || SYMBOL_REF_EXTERNAL_P (orig
)
9153 || DECL_SECTION_NAME(SYMBOL_REF_DECL (orig
))))))
9154 /* In FDPIC, GOTOFF can only be used for writable data. */
9155 emit_insn (gen_symGOT2reg (reg
, orig
));
9157 emit_insn (gen_symGOTOFF2reg (reg
, orig
));
9160 else if (GET_CODE (orig
) == SYMBOL_REF
)
9162 if (reg
== NULL_RTX
)
9163 reg
= gen_reg_rtx (Pmode
);
9165 if (TARGET_FDPIC
&& SYMBOL_REF_FUNCTION_P (orig
))
9166 emit_insn (gen_symGOTFUNCDESC2reg (reg
, orig
));
9168 emit_insn (gen_symGOT2reg (reg
, orig
));
9174 /* Given a (logical) mode size and an offset in bytes, try to find a the
9175 appropriate displacement value for a mov insn. On SH the displacements
9176 are limited to max. 60 bytes for SImode, max. 30 bytes in HImode and max.
9177 15 bytes in QImode. To compensate this we create a new base address by
9178 adding an adjustment value to it.
9180 If the originally requested offset is greater than 127 we prefer using
9181 values 124..127 over 128..131 to increase opportunities to use the
9184 In some cases it is possible that a requested offset might seem unaligned
9185 or inappropriate for the mode size, like offset = 2 and mode size = 4.
9186 This is compensated by adjusting the base address so that the effective
9187 address of the displacement move insn will be aligned.
9189 This is not the best possible way of rebasing the base address, as it
9190 does not look at other present displacement addressings around it.
9191 In some cases this can create more base address adjustments than would
9192 actually be necessary. */
9199 static struct disp_adjust
9200 sh_find_mov_disp_adjust (machine_mode mode
, HOST_WIDE_INT offset
)
9202 struct disp_adjust res
= { NULL_RTX
, NULL_RTX
};
9204 /* Do not try to use SH2A's large displacements here, because this would
9205 effectively disable the small displacement insns. */
9206 const int mode_sz
= GET_MODE_SIZE (mode
);
9207 const int mov_insn_sz
= mov_insn_size (mode
, false);
9208 const int max_disp
= sh_max_mov_insn_displacement (mode
, false);
9209 const int max_disp_next
= max_disp
+ mov_insn_sz
;
9210 HOST_WIDE_INT align_modifier
= offset
> 127 ? mov_insn_sz
: 0;
9211 HOST_WIDE_INT offset_adjust
;
9213 /* In some cases this actually does happen and we must check for it. */
9214 if (mode_sz
< 1 || mode_sz
> 8 || max_disp
< 1)
9217 /* Keeps the previous behavior for QImode displacement addressing.
9218 This just decides how the offset is re-based. Removing this special
9219 case will result in slightly bigger code on average, but it's not that
9221 if (mov_insn_sz
== 1)
9224 offset_adjust
= ((offset
+ align_modifier
) & ~max_disp
) - align_modifier
;
9226 if (mode_sz
+ offset
- offset_adjust
<= max_disp_next
)
9228 res
.offset_adjust
= GEN_INT (offset_adjust
);
9229 res
.mov_disp
= GEN_INT (offset
- offset_adjust
);
9235 /* Try to modify an illegitimate address and make it legitimate.
9236 If we find one, return the new, valid address.
9237 Otherwise, return the original address. */
9239 sh_legitimize_address (rtx x
, rtx oldx
, machine_mode mode
)
9242 x
= legitimize_pic_address (oldx
, mode
, NULL_RTX
);
9244 if ((TARGET_FPU_DOUBLE
&& mode
== DFmode
)
9245 || (TARGET_SH2E
&& mode
== SFmode
))
9248 if (GET_CODE (x
) == PLUS
&& CONST_INT_P (XEXP (x
, 1))
9249 && BASE_REGISTER_RTX_P (XEXP (x
, 0)))
9251 struct disp_adjust adj
= sh_find_mov_disp_adjust (mode
,
9252 INTVAL (XEXP (x
, 1)));
9254 if (adj
.offset_adjust
!= NULL_RTX
&& adj
.mov_disp
!= NULL_RTX
)
9256 rtx sum
= expand_binop (Pmode
, add_optab
, XEXP (x
, 0),
9257 adj
.offset_adjust
, NULL_RTX
, 0,
9259 return gen_rtx_PLUS (Pmode
, sum
, adj
.mov_disp
);
9265 /* Attempt to replace *p, which is an address that needs reloading, with
9266 a valid memory address for an operand of mode MODE.
9267 Like for sh_legitimize_address, for the SH we try to get a normal form
9268 of the address. That will allow inheritance of the address reloads. */
9270 sh_legitimize_reload_address (rtx
*p
, machine_mode mode
, int opnum
,
9273 enum reload_type type
= (enum reload_type
) itype
;
9274 const int mode_sz
= GET_MODE_SIZE (mode
);
9279 if (GET_CODE (*p
) == PLUS
&& CONST_INT_P (XEXP (*p
, 1))
9280 && MAYBE_BASE_REGISTER_RTX_P (XEXP (*p
, 0), true))
9282 const HOST_WIDE_INT offset
= INTVAL (XEXP (*p
, 1));
9283 struct disp_adjust adj
= sh_find_mov_disp_adjust (mode
, offset
);
9285 if (TARGET_SH2A
&& mode
== DFmode
&& (offset
& 0x7))
9287 push_reload (*p
, NULL_RTX
, p
, NULL
,
9288 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0, opnum
, type
);
9292 if (TARGET_SH2E
&& mode
== SFmode
)
9295 push_reload (*p
, NULL_RTX
, p
, NULL
,
9296 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0, opnum
, type
);
9300 /* FIXME: Do not allow to legitimize QImode and HImode displacement
9301 moves because then reload has a problem figuring the constraint
9302 that the move insn target/source reg must be R0.
9303 Or maybe some handling is wrong in sh_secondary_reload for this
9304 to work properly? */
9305 if ((mode_sz
== 4 || mode_sz
== 8)
9306 && ! (TARGET_SH4
&& mode
== DFmode
)
9307 && adj
.offset_adjust
!= NULL_RTX
&& adj
.mov_disp
!= NULL_RTX
)
9309 rtx sum
= gen_rtx_PLUS (Pmode
, XEXP (*p
, 0), adj
.offset_adjust
);
9310 *p
= gen_rtx_PLUS (Pmode
, sum
, adj
.mov_disp
);
9311 push_reload (sum
, NULL_RTX
, &XEXP (*p
, 0), NULL
,
9312 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0, opnum
, type
);
9317 /* We must re-recognize what we created before. */
9318 if (GET_CODE (*p
) == PLUS
9319 && (mode_sz
== 4 || mode_sz
== 8)
9320 && GET_CODE (XEXP (*p
, 0)) == PLUS
9321 && CONST_INT_P (XEXP (XEXP (*p
, 0), 1))
9322 && MAYBE_BASE_REGISTER_RTX_P (XEXP (XEXP (*p
, 0), 0), true)
9323 && CONST_INT_P (XEXP (*p
, 1))
9324 && ! (TARGET_SH2E
&& mode
== SFmode
))
9326 /* Because this address is so complex, we know it must have
9327 been created by LEGITIMIZE_RELOAD_ADDRESS before; thus,
9328 it is already unshared, and needs no further unsharing. */
9329 push_reload (XEXP (*p
, 0), NULL_RTX
, &XEXP (*p
, 0), NULL
,
9330 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0, opnum
, type
);
9337 /* In the name of slightly smaller debug output, and to cater to
9338 general assembler lossage, recognize various UNSPEC sequences
9339 and turn them back into a direct symbol reference. */
9341 sh_delegitimize_address (rtx orig_x
)
9343 orig_x
= delegitimize_mem_from_attrs (orig_x
);
9348 if (GET_CODE (x
) == CONST
)
9350 rtx y
= XEXP (x
, 0);
9351 if (GET_CODE (y
) == UNSPEC
)
9353 if (XINT (y
, 1) == UNSPEC_GOT
9354 || XINT (y
, 1) == UNSPEC_GOTOFF
9355 || XINT (y
, 1) == UNSPEC_SYMOFF
)
9356 return XVECEXP (y
, 0, 0);
9357 else if (XINT (y
, 1) == UNSPEC_PCREL_SYMOFF
)
9359 if (GET_CODE (XVECEXP (y
, 0, 0)) == CONST
)
9361 rtx symplt
= XEXP (XVECEXP (y
, 0, 0), 0);
9363 if (GET_CODE (symplt
) == UNSPEC
9364 && (XINT (symplt
, 1) == UNSPEC_PLT
9365 || XINT (symplt
, 1) == UNSPEC_PCREL
))
9366 return XVECEXP (symplt
, 0, 0);
9375 /* Mark the use of a constant in the literal table. If the constant
9376 has multiple labels, make it unique. */
9378 mark_constant_pool_use (rtx x
)
9383 switch (GET_CODE (x
))
9393 /* Get the first label in the list of labels for the same constant
9394 and delete another labels in the list. */
9395 rtx_insn
* lab
= as_a
<rtx_insn
*> (x
);
9396 for (rtx_insn
* insn
= PREV_INSN (lab
); insn
; insn
= PREV_INSN (insn
))
9399 || LABEL_REFS (insn
) != NEXT_INSN (insn
))
9404 for (rtx insn
= LABEL_REFS (lab
); insn
; insn
= LABEL_REFS (insn
))
9405 as_a
<rtx_insn
*> (insn
)->set_deleted ();
9407 /* Mark constants in a window. */
9408 for (rtx_insn
* insn
= NEXT_INSN (as_a
<rtx_insn
*> (x
)); insn
;
9409 insn
= NEXT_INSN (insn
))
9411 if (!NONJUMP_INSN_P (insn
))
9414 rtx pattern
= PATTERN (insn
);
9415 if (GET_CODE (pattern
) != UNSPEC_VOLATILE
)
9418 switch (XINT (pattern
, 1))
9420 case UNSPECV_CONST2
:
9421 case UNSPECV_CONST4
:
9422 case UNSPECV_CONST8
:
9423 XVECEXP (pattern
, 0, 1) = const1_rtx
;
9425 case UNSPECV_WINDOW_END
:
9426 if (XVECEXP (pattern
, 0, 0) == x
)
9429 case UNSPECV_CONST_END
:
9439 /* Return true if it's possible to redirect BRANCH1 to the destination
9440 of an unconditional jump BRANCH2. We only want to do this if the
9441 resulting branch will have a short displacement. */
9443 sh_can_follow_jump (const rtx_insn
*branch1
, const rtx_insn
*branch2
)
9445 /* Don't follow if BRANCH2 is possible to be a jump crossing between
9446 hot and cold partitions. */
9447 if (flag_reorder_blocks_and_partition
9448 && simplejump_p (branch2
)
9449 && CROSSING_JUMP_P (branch2
))
9452 if (flag_expensive_optimizations
&& simplejump_p (branch2
))
9454 rtx dest
= XEXP (SET_SRC (single_set (branch2
)), 0);
9458 for (distance
= 0, insn
= NEXT_INSN (branch1
);
9459 insn
&& distance
< 256;
9460 insn
= PREV_INSN (insn
))
9465 distance
+= get_attr_length (insn
);
9467 for (distance
= 0, insn
= NEXT_INSN (branch1
);
9468 insn
&& distance
< 256;
9469 insn
= NEXT_INSN (insn
))
9474 distance
+= get_attr_length (insn
);
9480 /* Return nonzero if register old_reg can be renamed to register new_reg. */
9482 sh_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED
,
9483 unsigned int new_reg
)
9485 /* Interrupt functions can only use registers that have already been
9486 saved by the prologue, even if they would normally be
9488 if (sh_cfun_interrupt_handler_p () && !df_regs_ever_live_p (new_reg
))
9494 /* Function to update the integer COST
9495 based on the relationship between INSN that is dependent on
9496 DEP_INSN through the dependence LINK. The default is to make no
9497 adjustment to COST. This can be used for example to specify to
9498 the scheduler that an output- or anti-dependence does not incur
9499 the same cost as a data-dependence. The return value should be
9500 the new value for COST. */
9502 sh_adjust_cost (rtx_insn
*insn
, int dep_type
, rtx_insn
*dep_insn
, int cost
,
9509 if (recog_memoized (insn
) < 0
9510 || recog_memoized (dep_insn
) < 0)
9513 rtx dep_set
= single_set (dep_insn
);
9515 /* The latency that we specify in the scheduling description refers
9516 to the actual output, not to an auto-increment register; for that,
9517 the latency is one. */
9518 if (dep_set
&& MEM_P (SET_SRC (dep_set
)) && cost
> 1)
9520 rtx set
= single_set (insn
);
9523 && !reg_mentioned_p (SET_DEST (dep_set
), SET_SRC (set
))
9524 && (!MEM_P (SET_DEST (set
))
9525 || !reg_mentioned_p (SET_DEST (dep_set
),
9526 XEXP (SET_DEST (set
), 0))))
9529 /* The only input for a call that is timing-critical is the
9530 function's address. */
9533 rtx call
= get_call_rtx_from (insn
);
9535 /* sibcalli_thunk uses a symbol_ref in an unspec. */
9536 && (GET_CODE (XEXP (XEXP (call
, 0), 0)) == UNSPEC
9537 || ! reg_set_p (XEXP (XEXP (call
, 0), 0), dep_insn
)))
9538 cost
-= TARGET_SH4_300
? 3 : 6;
9540 /* Likewise, the most timing critical input for an sfuncs call
9541 is the function address. However, sfuncs typically start
9542 using their arguments pretty quickly.
9543 Assume a four cycle delay for SH4 before they are needed.
9544 Cached ST40-300 calls are quicker, so assume only a one
9546 ??? Maybe we should encode the delays till input registers
9547 are needed by sfuncs into the sfunc call insn. */
9548 /* All sfunc calls are parallels with at least four components.
9549 Exploit this to avoid unnecessary calls to sfunc_uses_reg. */
9550 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
9551 && XVECLEN (PATTERN (insn
), 0) >= 4
9552 && (reg
= sfunc_uses_reg (insn
)))
9554 if (! reg_set_p (reg
, dep_insn
))
9555 cost
-= TARGET_SH4_300
? 1 : 4;
9557 if (TARGET_HARD_SH4
&& !TARGET_SH4_300
)
9559 attr_type dep_type
= get_attr_type (dep_insn
);
9561 if (dep_type
== TYPE_FLOAD
|| dep_type
== TYPE_PCFLOAD
)
9563 else if ((dep_type
== TYPE_LOAD_SI
|| dep_type
== TYPE_PCLOAD_SI
)
9564 && (type
= get_attr_type (insn
)) != TYPE_CALL
9565 && type
!= TYPE_SFUNC
)
9567 /* When the preceding instruction loads the shift amount of
9568 the following SHAD/SHLD, the latency of the load is increased
9570 if (get_attr_type (insn
) == TYPE_DYN_SHIFT
9571 && get_attr_any_int_load (dep_insn
) == ANY_INT_LOAD_YES
9572 && reg_overlap_mentioned_p (SET_DEST (dep_set
),
9573 XEXP (SET_SRC (single_set (insn
)),
9576 /* When an LS group instruction with a latency of less than
9577 3 cycles is followed by a double-precision floating-point
9578 instruction, FIPR, or FTRV, the latency of the first
9579 instruction is increased to 3 cycles. */
9581 && get_attr_insn_class (dep_insn
) == INSN_CLASS_LS_GROUP
9582 && get_attr_dfp_comp (insn
) == DFP_COMP_YES
)
9584 /* The lsw register of a double-precision computation is ready one
9586 else if (reload_completed
9587 && get_attr_dfp_comp (dep_insn
) == DFP_COMP_YES
9588 && (use_pat
= single_set (insn
))
9589 && ! regno_use_in (REGNO (SET_DEST (single_set (dep_insn
))),
9593 if (get_attr_any_fp_comp (dep_insn
) == ANY_FP_COMP_YES
9594 && get_attr_late_fp_use (insn
) == LATE_FP_USE_YES
)
9597 else if (TARGET_SH4_300
)
9599 /* Stores need their input register two cycles later. */
9601 if (dep_set
&& cost
>= 1
9602 && ((type
= get_attr_type (insn
)) == TYPE_STORE
9603 || type
== TYPE_PSTORE
9604 || type
== TYPE_FSTORE
|| type
== TYPE_MAC_MEM
))
9606 rtx set
= single_set (insn
);
9608 if (!reg_mentioned_p (SET_SRC (set
), XEXP (SET_DEST (set
), 0))
9609 && rtx_equal_p (SET_SRC (set
), SET_DEST (dep_set
)))
9612 /* But don't reduce the cost below 1 if the address depends
9613 on a side effect of dep_insn. */
9615 && modified_in_p (XEXP (SET_DEST (set
), 0), dep_insn
))
9621 /* An anti-dependence penalty of two applies if the first insn is a double
9622 precision fadd / fsub / fmul. */
9623 else if (!TARGET_SH4_300
9624 && dep_type
== REG_DEP_ANTI
9625 && recog_memoized (dep_insn
) >= 0
9626 && (get_attr_type (dep_insn
) == TYPE_DFP_ARITH
9627 || get_attr_type (dep_insn
) == TYPE_DFP_MUL
)
9628 /* A lot of alleged anti-flow dependences are fake,
9629 so check this one is real. */
9630 && flow_dependent_p (dep_insn
, insn
))
9636 /* Check if INSN is flow-dependent on DEP_INSN. Can also be used to check
9637 if DEP_INSN is anti-flow dependent on INSN. */
9639 flow_dependent_p (rtx_insn
*insn
, rtx_insn
*dep_insn
)
9641 rtx tmp
= PATTERN (insn
);
9643 note_stores (dep_insn
, flow_dependent_p_1
, &tmp
);
9644 return tmp
== NULL_RTX
;
9647 /* A helper function for flow_dependent_p called through note_stores. */
9649 flow_dependent_p_1 (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
9651 rtx
* pinsn
= (rtx
*) data
;
9653 if (*pinsn
&& reg_referenced_p (x
, *pinsn
))
9657 /* For use by sh_allocate_initial_value. Note that sh.md contains some
9658 'special function' patterns (type sfunc) that clobber pr, but that
9659 do not look like function calls to leaf_function_p. Hence we must
9660 do this extra check. */
9664 return DF_REG_DEF_COUNT (PR_REG
);
9667 /* Return where to allocate pseudo for a given hard register initial
9670 sh_allocate_initial_value (rtx hard_reg
)
9672 if (REGNO (hard_reg
) == PR_REG
)
9674 if (crtl
->is_leaf
&& ! sh_pr_n_sets ())
9677 return gen_frame_mem (Pmode
, return_address_pointer_rtx
);
9683 /* This function returns "2" to indicate dual issue for the SH4
9684 processor. To be used by the DFA pipeline description. */
9686 sh_issue_rate (void)
9688 if (TARGET_SUPERSCALAR
)
9694 /* Functions for ready queue reordering for sched1. */
9696 /* Get weight for mode for a set x. */
9698 find_set_regmode_weight (rtx x
, machine_mode mode
)
9700 if (GET_CODE (x
) == CLOBBER
&& register_operand (SET_DEST (x
), mode
))
9702 if (GET_CODE (x
) == SET
&& register_operand (SET_DEST (x
), mode
))
9704 if (REG_P (SET_DEST (x
)))
9706 if (!reg_mentioned_p (SET_DEST (x
), SET_SRC (x
)))
9716 /* Get regmode weight for insn. */
9718 find_insn_regmode_weight (rtx insn
, machine_mode mode
)
9720 /* Increment weight for each register born here. */
9721 rtx x
= PATTERN (insn
);
9722 short reg_weight
= find_set_regmode_weight (x
, mode
);
9723 if (GET_CODE (x
) == PARALLEL
)
9726 for (j
= XVECLEN (x
, 0) - 1; j
>= 0; j
--)
9728 x
= XVECEXP (PATTERN (insn
), 0, j
);
9729 reg_weight
+= find_set_regmode_weight (x
, mode
);
9732 /* Decrement weight for each register that dies here. */
9733 for (x
= REG_NOTES (insn
); x
; x
= XEXP (x
, 1))
9735 if (REG_NOTE_KIND (x
) == REG_DEAD
|| REG_NOTE_KIND (x
) == REG_UNUSED
)
9737 rtx note
= XEXP (x
, 0);
9738 if (REG_P (note
) && GET_MODE (note
) == mode
)
9745 /* Calculate regmode weights for all insns of a basic block. */
9747 find_regmode_weight (basic_block b
, machine_mode mode
)
9749 rtx_insn
*insn
, *next_tail
, *head
, *tail
;
9751 get_ebb_head_tail (b
, b
, &head
, &tail
);
9752 next_tail
= NEXT_INSN (tail
);
9754 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
9756 /* Handle register life information. */
9761 INSN_REGMODE_WEIGHT (insn
, mode
) =
9762 find_insn_regmode_weight (insn
, mode
)
9763 + 2 * find_insn_regmode_weight (insn
, DFmode
);
9764 else if (mode
== SImode
)
9765 INSN_REGMODE_WEIGHT (insn
, mode
) =
9766 find_insn_regmode_weight (insn
, mode
)
9767 + 2 * find_insn_regmode_weight (insn
, DImode
);
9771 /* Comparison function for ready queue sorting. */
9773 rank_for_reorder (const void *x
, const void *y
)
9775 rtx_insn
*tmp
= *(rtx_insn
* const *) y
;
9776 rtx_insn
*tmp2
= *(rtx_insn
* const *) x
;
9778 /* The insn in a schedule group should be issued the first. */
9779 if (SCHED_GROUP_P (tmp
) != SCHED_GROUP_P (tmp2
))
9780 return SCHED_GROUP_P (tmp2
) ? 1 : -1;
9782 /* If insns are equally good, sort by INSN_LUID (original insn order), This
9783 minimizes instruction movement, thus minimizing sched's effect on
9784 register pressure. */
9785 return INSN_LUID (tmp
) - INSN_LUID (tmp2
);
9788 /* Resort the array A in which only element at index N may be out of order. */
9790 swap_reorder (rtx_insn
**a
, int n
)
9792 rtx_insn
*insn
= a
[n
- 1];
9795 while (i
>= 0 && rank_for_reorder (a
+ i
, &insn
) >= 0)
9803 /* Sort the ready list by ascending priority. */
9805 ready_reorder (rtx_insn
**ready
, int nready
)
9808 swap_reorder (ready
, nready
);
9809 else if (nready
> 2)
9810 qsort (ready
, nready
, sizeof (rtx_insn
*), rank_for_reorder
);
9813 /* Count life regions of r0 for a block. */
9815 find_r0_life_regions (basic_block b
)
9821 if (REGNO_REG_SET_P (df_get_live_in (b
), R0_REG
))
9832 rtx_insn
* insn
= BB_HEAD (b
);
9833 rtx_insn
* end
= BB_END (b
);
9834 rtx r0_reg
= gen_rtx_REG (SImode
, R0_REG
);
9839 if (find_regno_note (insn
, REG_DEAD
, R0_REG
))
9847 && (pset
= single_set (insn
))
9848 && reg_overlap_mentioned_p (r0_reg
, SET_DEST (pset
))
9849 && !find_regno_note (insn
, REG_UNUSED
, R0_REG
))
9857 insn
= NEXT_INSN (insn
);
9862 /* Calculate regmode weights for all insns of all basic block. */
9864 sh_md_init_global (FILE *dump ATTRIBUTE_UNUSED
,
9865 int verbose ATTRIBUTE_UNUSED
,
9870 regmode_weight
[0] = (short *) xcalloc (old_max_uid
, sizeof (short));
9871 regmode_weight
[1] = (short *) xcalloc (old_max_uid
, sizeof (short));
9872 r0_life_regions
= 0;
9874 FOR_EACH_BB_REVERSE_FN (b
, cfun
)
9876 find_regmode_weight (b
, SImode
);
9877 find_regmode_weight (b
, SFmode
);
9878 if (!reload_completed
)
9879 r0_life_regions
+= find_r0_life_regions (b
);
9882 CURR_REGMODE_PRESSURE (SImode
) = 0;
9883 CURR_REGMODE_PRESSURE (SFmode
) = 0;
9888 sh_md_finish_global (FILE *dump ATTRIBUTE_UNUSED
,
9889 int verbose ATTRIBUTE_UNUSED
)
9891 if (regmode_weight
[0])
9893 free (regmode_weight
[0]);
9894 regmode_weight
[0] = NULL
;
9896 if (regmode_weight
[1])
9898 free (regmode_weight
[1]);
9899 regmode_weight
[1] = NULL
;
9903 /* Cache the can_issue_more so that we can return it from reorder2. Also,
9904 keep count of register pressures on SImode and SFmode. */
9906 sh_variable_issue (FILE *dump ATTRIBUTE_UNUSED
,
9907 int sched_verbose ATTRIBUTE_UNUSED
,
9911 if (GET_CODE (PATTERN (insn
)) != USE
9912 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
9913 cached_can_issue_more
= can_issue_more
- 1;
9915 cached_can_issue_more
= can_issue_more
;
9917 if (reload_completed
)
9918 return cached_can_issue_more
;
9920 CURR_REGMODE_PRESSURE (SImode
) += INSN_REGMODE_WEIGHT (insn
, SImode
);
9921 CURR_REGMODE_PRESSURE (SFmode
) += INSN_REGMODE_WEIGHT (insn
, SFmode
);
9923 return cached_can_issue_more
;
9927 sh_md_init (FILE *dump ATTRIBUTE_UNUSED
,
9928 int verbose ATTRIBUTE_UNUSED
,
9929 int veclen ATTRIBUTE_UNUSED
)
9931 CURR_REGMODE_PRESSURE (SImode
) = 0;
9932 CURR_REGMODE_PRESSURE (SFmode
) = 0;
9935 /* Some magic numbers. */
9936 /* Pressure on register r0 can lead to spill failures. so avoid sched1 for
9937 functions that already have high pressure on r0. */
9938 #define R0_MAX_LIFE_REGIONS 2
9939 /* Register Pressure thresholds for SImode and SFmode registers. */
9940 #define SIMODE_MAX_WEIGHT 5
9941 #define SFMODE_MAX_WEIGHT 10
9943 /* Return true if the pressure is high for MODE. */
9945 high_pressure (machine_mode mode
)
9947 /* Pressure on register r0 can lead to spill failures. so avoid sched1 for
9948 functions that already have high pressure on r0. */
9949 if (r0_life_regions
>= R0_MAX_LIFE_REGIONS
)
9953 return (CURR_REGMODE_PRESSURE (SFmode
) > SFMODE_MAX_WEIGHT
);
9955 return (CURR_REGMODE_PRESSURE (SImode
) > SIMODE_MAX_WEIGHT
);
9958 /* Reorder ready queue if register pressure is high. */
9960 sh_reorder (FILE *dump ATTRIBUTE_UNUSED
,
9961 int sched_verbose ATTRIBUTE_UNUSED
,
9964 int clock_var ATTRIBUTE_UNUSED
)
9966 if (reload_completed
)
9967 return sh_issue_rate ();
9969 if (high_pressure (SFmode
) || high_pressure (SImode
))
9971 ready_reorder (ready
, *n_readyp
);
9974 return sh_issue_rate ();
9977 /* Skip cycles if the current register pressure is high. */
9979 sh_reorder2 (FILE *dump ATTRIBUTE_UNUSED
,
9980 int sched_verbose ATTRIBUTE_UNUSED
,
9981 rtx_insn
**ready ATTRIBUTE_UNUSED
,
9982 int *n_readyp ATTRIBUTE_UNUSED
,
9983 int clock_var ATTRIBUTE_UNUSED
)
9985 if (reload_completed
)
9986 return cached_can_issue_more
;
9988 if (high_pressure(SFmode
) || high_pressure (SImode
))
9991 return cached_can_issue_more
;
9994 /* Skip cycles without sorting the ready queue. This will move insn from
9995 Q->R. If this is the last cycle we are skipping; allow sorting of ready
9996 queue by sh_reorder. */
9998 /* Generally, skipping these many cycles are sufficient for all insns to move
10000 #define MAX_SKIPS 8
10003 sh_dfa_new_cycle (FILE *sched_dump ATTRIBUTE_UNUSED
,
10004 int sched_verbose ATTRIBUTE_UNUSED
,
10005 rtx_insn
*insn ATTRIBUTE_UNUSED
,
10006 int last_clock_var
,
10010 if (reload_completed
)
10015 if ((clock_var
- last_clock_var
) < MAX_SKIPS
)
10020 /* If this is the last cycle we are skipping, allow reordering of R. */
10021 if ((clock_var
- last_clock_var
) == MAX_SKIPS
)
10034 sh_ms_bitfield_layout_p (const_tree record_type ATTRIBUTE_UNUSED
)
10036 return TARGET_HITACHI
|| sh_attr_renesas_p (record_type
);
10040 On the SH1..SH4, the trampoline looks like
10041 2 0002 D202 mov.l l2,r2
10042 1 0000 D301 mov.l l1,r3
10043 3 0004 422B jmp @r2
10045 5 0008 00000000 l1: .long area
10046 6 000c 00000000 l2: .long function
10048 FDPIC needs a form that includes a function descriptor and
10049 code to load the GOT register:
10050 0 0000 00000000 .long l0
10051 1 0004 00000000 .long gotval
10052 2 0008 D302 l0: mov.l l1,r3
10053 3 000a D203 mov.l l2,r2
10054 4 000c 6122 mov.l @r2,r1
10055 5 000e 5C21 mov.l @(4,r2),r12
10056 6 0010 412B jmp @r1
10058 8 0014 00000000 l1: .long area
10059 9 0018 00000000 l2: .long function
10061 SH5 (compact) uses r1 instead of r3 for the static chain. */
10063 /* Emit insns to store a value at memory address + offset. */
10065 sh_emit_storesi (rtx addr
, HOST_WIDE_INT offset
, rtx value
)
10067 gcc_assert ((offset
& 3) == 0);
10068 emit_move_insn (offset
== 0
10069 ? change_address (addr
, SImode
, NULL_RTX
)
10070 : adjust_address (addr
, SImode
, offset
), value
);
10073 /* Emit insns to store w0 at addr + offset and w1 at addr + offset + 2. */
10075 sh_emit_storehi (rtx addr
, HOST_WIDE_INT offset
, uint16_t w0
, uint16_t w1
)
10077 sh_emit_storesi (addr
, offset
, gen_int_mode (TARGET_LITTLE_ENDIAN
10078 ? (w0
| (w1
<< 16))
10079 : (w1
| (w0
<< 16)), SImode
));
10082 /* Emit RTL insns to initialize the variable parts of a trampoline.
10083 FNADDR is an RTX for the address of the function's pure code.
10084 CXT is an RTX for the static chain value for the function. */
10086 sh_trampoline_init (rtx tramp_mem
, tree fndecl
, rtx cxt
)
10088 rtx fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
10089 rtx tramp
= force_reg (Pmode
, XEXP (tramp_mem
, 0));
10093 rtx a
= force_reg (Pmode
, plus_constant (Pmode
, XEXP (tramp_mem
, 0), 8));
10095 sh_emit_storesi (tramp_mem
, 0, a
);
10096 sh_emit_storesi (tramp_mem
, 4, sh_get_fdpic_reg_initial_val ());
10098 sh_emit_storehi (tramp_mem
, 8, 0xd302, 0xd203);
10099 sh_emit_storehi (tramp_mem
, 12, 0x6122, 0x5c21);
10100 sh_emit_storehi (tramp_mem
, 16, 0x412b, 0x0009);
10102 sh_emit_storesi (tramp_mem
, 20, cxt
);
10103 sh_emit_storesi (tramp_mem
, 24, fnaddr
);
10107 sh_emit_storehi (tramp_mem
, 0, 0xd202, 0xd301);
10108 sh_emit_storehi (tramp_mem
, 4, 0x422b, 0x0009);
10110 sh_emit_storesi (tramp_mem
, 8, cxt
);
10111 sh_emit_storesi (tramp_mem
, 12, fnaddr
);
10113 if (TARGET_HARD_SH4
)
10115 if (!TARGET_INLINE_IC_INVALIDATE
10116 || (!(TARGET_SH4A
|| TARGET_SH4_300
) && TARGET_USERMODE
))
10117 emit_library_call (function_symbol (NULL
, "__ic_invalidate",
10118 FUNCTION_ORDINARY
).sym
,
10119 LCT_NORMAL
, VOIDmode
, tramp
, SImode
);
10121 emit_insn (gen_ic_invalidate_line (tramp
));
10125 /* On SH5, trampolines are SHmedia code, so add 1 to the address. */
10127 sh_trampoline_adjust_address (rtx tramp
)
10132 /* If PIC, we cannot make sibling calls to global functions
10133 because the PLT requires r12 to be live. */
10135 sh_function_ok_for_sibcall (tree decl
, tree exp ATTRIBUTE_UNUSED
)
10138 && ! sh_cfun_interrupt_handler_p ()
10139 && (! flag_pic
|| TARGET_FDPIC
10140 || (decl
&& ! (TREE_PUBLIC (decl
) || DECL_WEAK (decl
)))
10141 || (decl
&& DECL_VISIBILITY (decl
) != VISIBILITY_DEFAULT
)));
10144 /* Expand to appropriate sym*_label2reg for SYM and SIBCALL_P. */
10146 sh_expand_sym_label2reg (rtx reg
, rtx sym
, rtx lab
, bool sibcall_p
)
10148 const_tree decl
= SYMBOL_REF_DECL (sym
);
10149 bool is_weak
= (decl
&& DECL_P (decl
) && DECL_WEAK (decl
));
10151 if (!is_weak
&& SYMBOL_REF_LOCAL_P (sym
))
10152 emit_insn (gen_sym_label2reg (reg
, sym
, lab
));
10153 else if (sibcall_p
&& SYMBOL_REF_LOCAL_P (sym
))
10154 emit_insn (gen_symPCREL_label2reg (reg
, sym
, lab
));
10156 emit_insn (gen_symPLT_label2reg (reg
, sym
, lab
));
10159 /* Machine specific built-in functions. */
10161 struct builtin_description
10163 bool (* const is_enabled
) (void);
10164 const enum insn_code icode
;
10165 const char *const name
;
10170 /* This function can be used if there are any built-ins that are not for
10171 SHmedia. It's commented out to avoid the defined-but-unused warning. */
10173 sh1_builtin_p (void)
10178 /* describe number and signedness of arguments; arg[0] == result
10179 (1: unsigned, 2: signed, 4: don't care, 8: pointer 0: no argument */
10180 /* 9: 64-bit pointer, 10: 32-bit pointer */
10181 static const char signature_args
[][4] =
10183 #define SH_BLTIN_V2SI2 0
10185 #define SH_BLTIN_V4HI2 1
10187 #define SH_BLTIN_V2SI3 2
10189 #define SH_BLTIN_V4HI3 3
10191 #define SH_BLTIN_V8QI3 4
10193 #define SH_BLTIN_MAC_HISI 5
10195 #define SH_BLTIN_SH_HI 6
10197 #define SH_BLTIN_SH_SI 7
10199 #define SH_BLTIN_V4HI2V2SI 8
10201 #define SH_BLTIN_V4HI2V8QI 9
10203 #define SH_BLTIN_SISF 10
10205 #define SH_BLTIN_LDUA_L 11
10207 #define SH_BLTIN_LDUA_Q 12
10209 #define SH_BLTIN_STUA_L 13
10211 #define SH_BLTIN_STUA_Q 14
10213 #define SH_BLTIN_LDUA_L64 15
10215 #define SH_BLTIN_LDUA_Q64 16
10217 #define SH_BLTIN_STUA_L64 17
10219 #define SH_BLTIN_STUA_Q64 18
10221 #define SH_BLTIN_NUM_SHARED_SIGNATURES 19
10222 #define SH_BLTIN_2 19
10223 #define SH_BLTIN_SU 19
10225 #define SH_BLTIN_3 20
10226 #define SH_BLTIN_SUS 20
10228 #define SH_BLTIN_PSSV 21
10230 #define SH_BLTIN_XXUU 22
10231 #define SH_BLTIN_UUUU 22
10233 #define SH_BLTIN_PV 23
10235 #define SH_BLTIN_VP 24
10237 #define SH_BLTIN_UV 25
10239 #define SH_BLTIN_VU 26
10242 /* mcmv: operands considered unsigned. */
10243 /* mmulsum_wq, msad_ubq: result considered unsigned long long. */
10244 /* mperm: control value considered unsigned int. */
10245 /* mshalds, mshard, mshards, mshlld, mshlrd: shift count is unsigned int. */
10246 /* mshards_q: returns signed short. */
10247 /* nsb: takes long long arg, returns unsigned char. */
10248 static struct builtin_description bdesc
[] =
10251 CODE_FOR_sts_fpscr
, "__builtin_sh_get_fpscr", SH_BLTIN_UV
, 0 },
10253 CODE_FOR_set_fpscr
, "__builtin_sh_set_fpscr", SH_BLTIN_VU
, 0 },
10256 static tree sh_builtin_get_fpscr
;
10257 static tree sh_builtin_set_fpscr
;
10260 sh_init_builtins (void)
10262 tree shared
[SH_BLTIN_NUM_SHARED_SIGNATURES
];
10263 memset (shared
, 0, sizeof shared
);
10265 for (unsigned int di
= 0; di
< ARRAY_SIZE (bdesc
); ++di
)
10267 builtin_description
* d
= &bdesc
[di
];
10269 if (!d
->is_enabled ())
10272 tree type
, arg_type
= NULL_TREE
;
10273 int signature
= d
->signature
;
10275 if (signature
< SH_BLTIN_NUM_SHARED_SIGNATURES
&& shared
[signature
])
10276 type
= shared
[signature
];
10279 int has_result
= signature_args
[signature
][0] != 0;
10282 if (! TARGET_FPU_ANY
10283 && FLOAT_MODE_P (insn_data
[d
->icode
].operand
[0].mode
))
10285 for (unsigned int i
= 0; i
< ARRAY_SIZE (args
); i
++)
10286 args
[i
] = NULL_TREE
;
10287 for (int i
= 3; ; i
--)
10289 int arg
= signature_args
[signature
][i
];
10290 int opno
= i
- 1 + has_result
;
10293 arg_type
= ptr_type_node
;
10295 arg_type
= (*lang_hooks
.types
.type_for_mode
)
10296 (insn_data
[d
->icode
].operand
[opno
].mode
, (arg
& 1));
10300 arg_type
= void_type_node
;
10303 args
[i
-1] = arg_type
;
10305 type
= build_function_type_list (arg_type
, args
[0], args
[1],
10306 args
[2], NULL_TREE
);
10307 if (signature
< SH_BLTIN_NUM_SHARED_SIGNATURES
)
10308 shared
[signature
] = type
;
10311 add_builtin_function (d
->name
, type
, d
- bdesc
, BUILT_IN_MD
,
10313 /* Recode {sts,set}_fpscr decls for sh_atomic_assign_expand_fenv. */
10314 if (d
->icode
== CODE_FOR_sts_fpscr
)
10315 sh_builtin_get_fpscr
= d
->fndecl
;
10316 else if (d
->icode
== CODE_FOR_set_fpscr
)
10317 sh_builtin_set_fpscr
= d
->fndecl
;
10321 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV. */
10324 sh_atomic_assign_expand_fenv (tree
*hold
, tree
*clear
, tree
*update
)
10326 const unsigned SH_FE_INVALID
= 64;
10327 const unsigned SH_FE_DIVBYZERO
= 32;
10328 const unsigned SH_FE_OVERFLOW
= 16;
10329 const unsigned SH_FE_UNDERFLOW
= 8;
10330 const unsigned SH_FE_INEXACT
= 4;
10331 const unsigned HOST_WIDE_INT SH_FE_ALL_EXCEPT
= (SH_FE_INVALID
10336 const unsigned HOST_WIDE_INT SH_FE_EXCEPT_SHIFT
= 5;
10337 tree fenv_var
, mask
, ld_fenv
, masked_fenv
;
10338 tree new_fenv_var
, reload_fenv
, restore_fnenv
;
10339 tree update_call
, atomic_feraiseexcept
, hold_fnclex
;
10341 if (! TARGET_FPU_ANY
)
10344 /* Generate the equivalent of :
10345 unsigned int fenv_var;
10346 fenv_var = __builtin_sh_get_fpscr ();
10348 unsigned int masked_fenv;
10349 masked_fenv = fenv_var & mask;
10351 __builtin_sh_set_fpscr (masked_fenv); */
10353 fenv_var
= create_tmp_var_raw (unsigned_type_node
);
10354 mask
= build_int_cst (unsigned_type_node
,
10355 ~((SH_FE_ALL_EXCEPT
<< SH_FE_EXCEPT_SHIFT
)
10356 | SH_FE_ALL_EXCEPT
));
10357 ld_fenv
= build2 (MODIFY_EXPR
, unsigned_type_node
,
10358 fenv_var
, build_call_expr (sh_builtin_get_fpscr
, 0));
10359 masked_fenv
= build2 (BIT_AND_EXPR
, unsigned_type_node
, fenv_var
, mask
);
10360 hold_fnclex
= build_call_expr (sh_builtin_set_fpscr
, 1, masked_fenv
);
10361 fenv_var
= build4 (TARGET_EXPR
, unsigned_type_node
, fenv_var
,
10362 build2 (COMPOUND_EXPR
, void_type_node
, masked_fenv
,
10364 NULL_TREE
, NULL_TREE
);
10365 *hold
= build2 (COMPOUND_EXPR
, void_type_node
, fenv_var
, hold_fnclex
);
10367 /* Store the value of masked_fenv to clear the exceptions:
10368 __builtin_sh_set_fpscr (masked_fenv); */
10370 *clear
= build_call_expr (sh_builtin_set_fpscr
, 1, masked_fenv
);
10372 /* Generate the equivalent of :
10373 unsigned int new_fenv_var;
10374 new_fenv_var = __builtin_sh_get_fpscr ();
10376 __builtin_sh_set_fpscr (fenv_var);
10378 __atomic_feraiseexcept (new_fenv_var); */
10380 new_fenv_var
= create_tmp_var_raw (unsigned_type_node
);
10381 reload_fenv
= build2 (MODIFY_EXPR
, unsigned_type_node
, new_fenv_var
,
10382 build_call_expr (sh_builtin_get_fpscr
, 0));
10383 restore_fnenv
= build_call_expr (sh_builtin_set_fpscr
, 1, fenv_var
);
10384 atomic_feraiseexcept
= builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT
);
10385 update_call
= build_call_expr (atomic_feraiseexcept
, 1,
10386 fold_convert (integer_type_node
,
10388 *update
= build2 (COMPOUND_EXPR
, void_type_node
,
10389 build2 (COMPOUND_EXPR
, void_type_node
,
10390 reload_fenv
, restore_fnenv
), update_call
);
10393 /* Implements target hook vector_mode_supported_p. */
10395 sh_vector_mode_supported_p (machine_mode mode ATTRIBUTE_UNUSED
)
10401 sh_frame_pointer_required (void)
10403 /* If needed override this in other tm.h files to cope with various OS
10404 lossage requiring a frame pointer. */
10405 if (SUBTARGET_FRAME_POINTER_REQUIRED
)
10414 /* Implements target hook dwarf_calling_convention. Return an enum
10415 of dwarf_calling_convention. */
10417 sh_dwarf_calling_convention (const_tree func
)
10419 if (sh_attr_renesas_p (func
))
10420 return DW_CC_GNU_renesas_sh
;
10422 return DW_CC_normal
;
10425 /* Returns the sh builtin decl for CODE. */
10427 sh_builtin_decl (unsigned code
, bool initialize_p ATTRIBUTE_UNUSED
)
10429 if (code
>= ARRAY_SIZE (bdesc
))
10430 return error_mark_node
;
10432 if (!bdesc
[code
].is_enabled ())
10433 return error_mark_node
;
10435 return bdesc
[code
].fndecl
;
10438 /* Expand an expression EXP that calls a built-in function,
10439 with result going to TARGET if that's convenient
10440 (and in mode MODE if that's convenient).
10441 SUBTARGET may be used as the target for computing one of EXP's operands.
10442 IGNORE is nonzero if the value is to be ignored. */
10444 sh_expand_builtin (tree exp
, rtx target
, rtx subtarget ATTRIBUTE_UNUSED
,
10445 machine_mode mode ATTRIBUTE_UNUSED
, int ignore
)
10447 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
10448 unsigned int fcode
= DECL_MD_FUNCTION_CODE (fndecl
);
10449 const struct builtin_description
*d
= &bdesc
[fcode
];
10450 enum insn_code icode
= d
->icode
;
10451 int signature
= d
->signature
;
10455 if (signature_args
[signature
][0])
10460 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
10461 if (! target
|| GET_MODE (target
) != tmode
10462 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
10463 target
= gen_reg_rtx (tmode
);
10464 op
[nop
++] = target
;
10469 for (int i
= 1; i
<= 3; i
++, nop
++)
10471 if (! signature_args
[signature
][i
])
10473 tree arg
= CALL_EXPR_ARG (exp
, i
- 1);
10474 if (arg
== error_mark_node
)
10477 machine_mode opmode
;
10479 if (signature_args
[signature
][i
] & 8)
10482 optype
= ptr_type_node
;
10486 opmode
= insn_data
[icode
].operand
[nop
].mode
;
10487 optype
= (*lang_hooks
.types
.type_for_mode
) (opmode
, 0);
10490 machine_mode argmode
= TYPE_MODE (TREE_TYPE (arg
));
10491 if (argmode
!= opmode
)
10492 arg
= build1 (NOP_EXPR
, optype
, arg
);
10493 op
[nop
] = expand_expr (arg
, NULL_RTX
, opmode
, EXPAND_NORMAL
);
10494 if (! (*insn_data
[icode
].operand
[nop
].predicate
) (op
[nop
], opmode
))
10495 op
[nop
] = copy_to_mode_reg (opmode
, op
[nop
]);
10498 rtx pat
= NULL_RTX
;
10503 pat
= (*insn_data
[d
->icode
].genfun
) (op
[0]);
10506 pat
= (*insn_data
[d
->icode
].genfun
) (op
[0], op
[1]);
10509 pat
= (*insn_data
[d
->icode
].genfun
) (op
[0], op
[1], op
[2]);
10512 pat
= (*insn_data
[d
->icode
].genfun
) (op
[0], op
[1], op
[2], op
[3]);
10515 gcc_unreachable ();
10523 /* Implement TARGET_HARD_REGNO_NREGS. On the SH all but the XD regs are
10524 UNITS_PER_WORD bits wide. */
10526 static unsigned int
10527 sh_hard_regno_nregs (unsigned int regno
, machine_mode mode
)
10529 if (XD_REGISTER_P (regno
))
10530 return CEIL (GET_MODE_SIZE (mode
), 2 * UNITS_PER_WORD
);
10531 return CEIL (GET_MODE_SIZE (mode
), UNITS_PER_WORD
);
10534 /* Implement TARGET_HARD_REGNO_MODE_OK.
10536 We can allow any mode in any general register. The special registers
10537 only allow SImode. Don't allow any mode in the PR.
10539 We cannot hold DCmode values in the XD registers because alter_reg
10540 handles subregs of them incorrectly. We could work around this by
10541 spacing the XD registers like the DR registers, but this would require
10542 additional memory in every compilation to hold larger register vectors.
10543 We could hold SFmode / SCmode values in XD registers, but that
10544 would require a tertiary reload when reloading from / to memory,
10545 and a secondary reload to reload from / to general regs; that
10546 seems to be a losing proposition.
10548 We want to allow TImode FP regs so that when V4SFmode is loaded as TImode,
10549 it won't be ferried through GP registers first. */
10551 sh_hard_regno_mode_ok (unsigned int regno
, machine_mode mode
)
10553 if (SPECIAL_REGISTER_P (regno
))
10554 return mode
== SImode
;
10556 if (regno
== FPUL_REG
)
10557 return (mode
== SImode
|| mode
== SFmode
);
10559 if (FP_REGISTER_P (regno
) && mode
== SFmode
)
10562 if (mode
== V2SFmode
)
10564 if (((FP_REGISTER_P (regno
) && (regno
- FIRST_FP_REG
) % 2 == 0)
10565 || GENERAL_REGISTER_P (regno
)))
10571 if (mode
== V4SFmode
)
10573 if ((FP_REGISTER_P (regno
) && (regno
- FIRST_FP_REG
) % 4 == 0)
10574 || GENERAL_REGISTER_P (regno
))
10580 if (mode
== V16SFmode
)
10581 return regno
== FIRST_XD_REG
;
10583 if (FP_REGISTER_P (regno
))
10587 || ((TARGET_SH2E
) && mode
== SCmode
)
10588 || (((TARGET_FPU_DOUBLE
&& mode
== DFmode
) || mode
== DCmode
)
10589 && ((regno
- FIRST_FP_REG
) & 1) == 0)
10590 || (TARGET_SH4
&& mode
== TImode
10591 && ((regno
- FIRST_FP_REG
) & 3) == 0))
10597 if (XD_REGISTER_P (regno
))
10598 return mode
== DFmode
;
10600 if (regno
== PR_REG
)
10601 return mode
== SImode
;
10603 if (regno
== FPSCR_REG
)
10604 return mode
== SImode
;
10609 /* Implement TARGET_MODES_TIEABLE_P.
10611 If TARGET_HARD_REGNO_MODE_OK could produce different values for MODE1
10612 and MODE2, for any hard reg, then this must be false for correct output.
10613 That's the case for xd registers: we don't hold SFmode values in
10614 them, so we can't tie an SFmode pseudos with one in another
10615 floating-point mode. */
10618 sh_modes_tieable_p (machine_mode mode1
, machine_mode mode2
)
10620 return (mode1
== mode2
10621 || (GET_MODE_CLASS (mode1
) == GET_MODE_CLASS (mode2
)
10622 && (mode1
!= SFmode
&& mode2
!= SFmode
)));
10625 /* Specify the modes required to caller save a given hard regno.
10626 choose_hard_reg_mode chooses mode based on TARGET_HARD_REGNO_MODE_OK
10627 and returns ?Imode for float regs when sh_hard_regno_mode_ok
10628 permits integer modes on them. That makes LRA's split process
10629 unhappy. See PR55212.
10632 sh_hard_regno_caller_save_mode (unsigned int regno
, unsigned int nregs
,
10635 if (FP_REGISTER_P (regno
)
10638 || ((mode
== DFmode
|| mode
== DCmode
)
10639 && ((regno
- FIRST_FP_REG
) & 1) == 0)))
10642 return choose_hard_reg_mode (regno
, nregs
, NULL
);
10645 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
10647 sh_can_change_mode_class (machine_mode from
, machine_mode to
,
10648 reg_class_t rclass
)
10650 /* We want to enable the use of SUBREGs as a means to
10651 VEC_SELECT a single element of a vector. */
10653 /* This effectively disallows using GENERAL_REGS for SFmode vector subregs.
10654 This can be problematic when SFmode vector subregs need to be accessed
10655 on the stack with displacement addressing, as it happens with -O0.
10656 Thus we disallow the mode change for -O0. */
10657 if (to
== SFmode
&& VECTOR_MODE_P (from
) && GET_MODE_INNER (from
) == SFmode
)
10658 return optimize
? !reg_classes_intersect_p (GENERAL_REGS
, rclass
) : true;
10660 if (GET_MODE_SIZE (from
) != GET_MODE_SIZE (to
))
10662 if (TARGET_LITTLE_ENDIAN
)
10664 if (GET_MODE_SIZE (to
) < 8 || GET_MODE_SIZE (from
) < 8)
10665 return !reg_classes_intersect_p (DF_REGS
, rclass
);
10669 if (GET_MODE_SIZE (from
) < 8)
10670 return !reg_classes_intersect_p (DF_REGS
, rclass
);
10676 /* Return true if registers in machine mode MODE will likely be
10677 allocated to registers in small register classes. */
10679 sh_small_register_classes_for_mode_p (machine_mode mode ATTRIBUTE_UNUSED
)
10684 /* If ADDRESS refers to a CODE_LABEL, add NUSES to the number of times
10685 that label is used. */
10687 sh_mark_label (rtx address
, int nuses
)
10689 if (GOTOFF_P (address
))
10691 /* Extract the label or symbol. */
10692 address
= XEXP (address
, 0);
10693 if (GET_CODE (address
) == PLUS
)
10694 address
= XEXP (address
, 0);
10695 address
= XVECEXP (address
, 0, 0);
10697 if (GET_CODE (address
) == LABEL_REF
10698 && LABEL_P (XEXP (address
, 0)))
10699 LABEL_NUSES (XEXP (address
, 0)) += nuses
;
10702 /* Compute extra cost of moving data between one register class
10705 If SECONDARY*_RELOAD_CLASS says something about the src/dst pair, regclass
10706 uses this information. Hence, the general register <-> floating point
10707 register information here is not used for SFmode. */
10709 sh_register_move_cost (machine_mode mode
,
10710 reg_class_t srcclass
, reg_class_t dstclass
)
10712 if (dstclass
== T_REGS
|| dstclass
== PR_REGS
)
10715 if (dstclass
== MAC_REGS
&& srcclass
== MAC_REGS
)
10718 if (mode
== SImode
&& TARGET_FMOVD
10719 && REGCLASS_HAS_FP_REG (srcclass
)
10720 && REGCLASS_HAS_FP_REG (dstclass
))
10723 if (REGCLASS_HAS_FP_REG (dstclass
) && srcclass
== T_REGS
)
10724 return ((TARGET_HARD_SH4
&& !optimize_size
) ? 10 : 7);
10726 if ((REGCLASS_HAS_FP_REG (dstclass
) && srcclass
== MAC_REGS
)
10727 || (dstclass
== MAC_REGS
&& REGCLASS_HAS_FP_REG (srcclass
)))
10730 if ((REGCLASS_HAS_FP_REG (dstclass
)
10731 && REGCLASS_HAS_GENERAL_REG (srcclass
))
10732 || (REGCLASS_HAS_GENERAL_REG (dstclass
)
10733 && REGCLASS_HAS_FP_REG (srcclass
)))
10735 /* Discourage trying to use fp regs for a pointer. This also
10736 discourages fp regs with SImode because Pmode is an alias
10737 of SImode on this target. See PR target/48596. */
10738 int addend
= (mode
== Pmode
) ? 40 : 0;
10740 return ((TARGET_FMOVD
? 8 : 12) + addend
)
10741 * ((GET_MODE_SIZE (mode
) + 7) / 8U);
10744 if ((dstclass
== FPUL_REGS
10745 && REGCLASS_HAS_GENERAL_REG (srcclass
))
10746 || (srcclass
== FPUL_REGS
10747 && REGCLASS_HAS_GENERAL_REG (dstclass
)))
10750 if ((dstclass
== FPUL_REGS
10751 && (srcclass
== PR_REGS
|| srcclass
== MAC_REGS
|| srcclass
== T_REGS
))
10752 || (srcclass
== FPUL_REGS
10753 && (dstclass
== PR_REGS
|| dstclass
== MAC_REGS
)))
10756 if ((srcclass
== FPSCR_REGS
&& ! REGCLASS_HAS_GENERAL_REG (dstclass
))
10757 || (dstclass
== FPSCR_REGS
&& ! REGCLASS_HAS_GENERAL_REG (srcclass
)))
10761 && ! REGCLASS_HAS_GENERAL_REG (srcclass
)
10762 && ! REGCLASS_HAS_GENERAL_REG (dstclass
))
10763 return 2 * ((GET_MODE_SIZE (mode
) + 7) / 8U);
10765 return 2 * ((GET_MODE_SIZE (mode
) + 3) / 4U);
10769 emit_load_ptr (rtx reg
, rtx addr
)
10771 rtx mem
= gen_const_mem (ptr_mode
, addr
);
10773 if (Pmode
!= ptr_mode
)
10774 mem
= gen_rtx_SIGN_EXTEND (Pmode
, mem
);
10775 return emit_move_insn (reg
, mem
);
10779 sh_output_mi_thunk (FILE *file
, tree thunk_fndecl ATTRIBUTE_UNUSED
,
10780 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
10783 const char *fnname
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (thunk_fndecl
));
10784 CUMULATIVE_ARGS cum
;
10785 int structure_value_byref
= 0;
10786 rtx this_rtx
, this_value
, sibcall
, funexp
;
10788 tree funtype
= TREE_TYPE (function
);
10789 int simple_add
= CONST_OK_FOR_ADD (delta
);
10791 rtx scratch0
, scratch1
, scratch2
;
10793 reload_completed
= 1;
10794 epilogue_completed
= 1;
10795 crtl
->uses_only_leaf_regs
= 1;
10797 emit_note (NOTE_INSN_PROLOGUE_END
);
10799 /* Find the "this" pointer. We have such a wide range of ABIs for the
10800 SH that it's best to do this completely machine independently.
10801 "this" is passed as first argument, unless a structure return pointer
10802 comes first, in which case "this" comes second. */
10803 INIT_CUMULATIVE_ARGS (cum
, funtype
, NULL_RTX
, 0, 1);
10804 #ifndef PCC_STATIC_STRUCT_RETURN
10805 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
10806 structure_value_byref
= 1;
10807 #endif /* not PCC_STATIC_STRUCT_RETURN */
10808 if (structure_value_byref
&& sh_struct_value_rtx (function
, 0) == 0)
10810 tree ptype
= build_pointer_type (TREE_TYPE (funtype
));
10812 function_arg_info
ptr_arg (ptype
, Pmode
, /*named=*/true);
10813 sh_function_arg_advance (pack_cumulative_args (&cum
), ptr_arg
);
10815 function_arg_info
ptr_arg (ptr_type_node
, Pmode
, /*named=*/true);
10816 this_rtx
= sh_function_arg (pack_cumulative_args (&cum
), ptr_arg
);
10818 /* For SHcompact, we only have r0 for a scratch register: r1 is the
10819 static chain pointer (even if you can't have nested virtual functions
10820 right now, someone might implement them sometime), and the rest of the
10821 registers are used for argument passing, are callee-saved, or reserved. */
10822 /* We need to check call_used_regs / fixed_regs in case -fcall_saved-reg /
10823 -ffixed-reg has been used. */
10824 if (! call_used_or_fixed_reg_p (0) || fixed_regs
[0])
10825 error ("r0 needs to be available as a call-clobbered register");
10826 scratch0
= scratch1
= scratch2
= gen_rtx_REG (Pmode
, 0);
10829 if (call_used_or_fixed_reg_p (1) && ! fixed_regs
[1])
10830 scratch1
= gen_rtx_REG (ptr_mode
, 1);
10831 /* N.B., if not TARGET_HITACHI, register 2 is used to pass the pointer
10832 pointing where to return struct values. */
10833 if (call_used_or_fixed_reg_p (3) && ! fixed_regs
[3])
10834 scratch2
= gen_rtx_REG (Pmode
, 3);
10837 this_value
= plus_constant (Pmode
, this_rtx
, delta
);
10839 && (simple_add
|| scratch0
!= scratch1
)
10840 && strict_memory_address_p (ptr_mode
, this_value
))
10842 emit_load_ptr (scratch0
, this_value
);
10847 ; /* Do nothing. */
10848 else if (simple_add
)
10849 emit_move_insn (this_rtx
, this_value
);
10852 emit_move_insn (scratch1
, GEN_INT (delta
));
10853 emit_insn (gen_add2_insn (this_rtx
, scratch1
));
10861 emit_load_ptr (scratch0
, this_rtx
);
10863 offset_addr
= plus_constant (Pmode
, scratch0
, vcall_offset
);
10864 if (strict_memory_address_p (ptr_mode
, offset_addr
))
10865 ; /* Do nothing. */
10866 else if (scratch0
!= scratch1
)
10868 /* scratch0 != scratch1, and we have indexed loads. Get better
10869 schedule by loading the offset into r1 and using an indexed
10870 load - then the load of r1 can issue before the load from
10871 (this_rtx + delta) finishes. */
10872 emit_move_insn (scratch1
, GEN_INT (vcall_offset
));
10873 offset_addr
= gen_rtx_PLUS (Pmode
, scratch0
, scratch1
);
10875 else if (CONST_OK_FOR_ADD (vcall_offset
))
10877 emit_insn (gen_add2_insn (scratch0
, GEN_INT (vcall_offset
)));
10878 offset_addr
= scratch0
;
10881 gcc_unreachable (); /* FIXME */
10882 emit_load_ptr (scratch0
, offset_addr
);
10884 if (Pmode
!= ptr_mode
)
10885 scratch0
= gen_rtx_TRUNCATE (ptr_mode
, scratch0
);
10886 emit_insn (gen_add2_insn (this_rtx
, scratch0
));
10889 /* Generate a tail call to the target function. */
10890 if (! TREE_USED (function
))
10892 assemble_external (function
);
10893 TREE_USED (function
) = 1;
10895 funexp
= XEXP (DECL_RTL (function
), 0);
10896 /* If the function is overridden, so is the thunk, hence we don't
10897 need GOT addressing even if this is a public symbol. */
10899 if (TARGET_SH1
&& ! flag_weak
)
10900 sibcall
= gen_sibcalli_thunk (funexp
, const0_rtx
);
10903 if (TARGET_SH2
&& flag_pic
)
10907 sibcall
= gen_sibcall_pcrel_fdpic (funexp
, const0_rtx
);
10908 XEXP (XVECEXP (sibcall
, 0, 3), 0) = scratch2
;
10912 sibcall
= gen_sibcall_pcrel (funexp
, const0_rtx
);
10913 XEXP (XVECEXP (sibcall
, 0, 2), 0) = scratch2
;
10918 emit_move_insn (scratch2
, funexp
);
10919 funexp
= gen_rtx_MEM (FUNCTION_MODE
, scratch2
);
10920 sibcall
= gen_sibcall (funexp
, const0_rtx
, NULL_RTX
);
10922 sibcall
= emit_call_insn (sibcall
);
10923 SIBLING_CALL_P (sibcall
) = 1;
10924 use_reg (&CALL_INSN_FUNCTION_USAGE (sibcall
), this_rtx
);
10927 /* Run just enough of rest_of_compilation to do scheduling and get
10928 the insns emitted. */
10930 insns
= get_insns ();
10936 split_all_insns_noflow ();
10940 shorten_branches (insns
);
10941 assemble_start_function (thunk_fndecl
, fnname
);
10942 final_start_function (insns
, file
, 1);
10943 final (insns
, file
, 1);
10944 final_end_function ();
10945 assemble_end_function (thunk_fndecl
, fnname
);
10947 reload_completed
= 0;
10948 epilogue_completed
= 0;
10951 /* Return an RTX pair for the address and call site label of a function
10952 NAME of kind KIND, placing the result in TARGET if not NULL. For
10953 SFUNC_STATIC, if FDPIC, the LAB member of result will be set to
10954 (const_int 0) if jsr should be used, or a label_ref if bsrf should
10955 be used. For FDPIC, both SFUNC_GOT and SFUNC_STATIC will return the
10956 address of the function itself, not a function descriptor, so they
10957 can only be used with functions not using the FDPIC register that
10958 are known to be called directory without a PLT entry. */
10960 function_symbol_result
10961 function_symbol (rtx target
, const char *name
, sh_function_kind kind
)
10963 /* If this is not an ordinary function, the name usually comes from a
10964 string literal or an sprintf buffer. Make sure we use the same
10965 string consistently, so that cse will be able to unify address loads. */
10966 if (kind
!= FUNCTION_ORDINARY
)
10967 name
= IDENTIFIER_POINTER (get_identifier (name
));
10968 rtx sym
= gen_rtx_SYMBOL_REF (Pmode
, name
);
10969 rtx lab
= const0_rtx
;
10970 SYMBOL_REF_FLAGS (sym
) = SYMBOL_FLAG_FUNCTION
;
10974 case FUNCTION_ORDINARY
:
10978 rtx reg
= target
? target
: gen_reg_rtx (Pmode
);
10980 emit_insn (gen_symGOT2reg (reg
, sym
));
10986 rtx reg
= target
? target
: gen_reg_rtx (Pmode
);
10990 /* We use PC-relative calls, since GOTOFF can only refer
10991 to writable data. This works along with sh_sfunc_call. */
10992 lab
= PATTERN (gen_call_site ());
10993 emit_insn (gen_sym_label2reg (reg
, sym
, lab
));
10997 /* ??? To allow cse to work, we use GOTOFF relocations.
10998 we could add combiner patterns to transform this into
10999 straight pc-relative calls with sym2PIC / bsrf when
11000 label load and function call are still 1:1 and in the
11001 same basic block during combine. */
11002 emit_insn (gen_symGOTOFF2reg (reg
, sym
));
11009 if (target
&& sym
!= target
)
11011 emit_move_insn (target
, sym
);
11012 return function_symbol_result (target
, lab
);
11014 return function_symbol_result (sym
, lab
);
11017 /* Find the number of the first general purpose register in S that
11020 scavenge_reg (HARD_REG_SET
*s
)
11022 for (int r
= FIRST_GENERAL_REG
; r
<= LAST_GENERAL_REG
; r
++)
11023 if (TEST_HARD_REG_BIT (*s
, r
))
11029 sh_get_pr_initial_val (void)
11031 /* If we haven't finished rtl generation, there might be a nonlocal label
11032 that we haven't seen yet.
11033 ??? get_hard_reg_initial_val fails if it is called after register
11034 allocation has started, unless it has been called before for the
11035 same register. And even then, we end in trouble if we didn't use
11036 the register in the same basic block before. So call
11037 get_hard_reg_initial_val now and wrap it in an unspec if we might
11038 need to replace it. */
11039 /* ??? We also must do this for TARGET_SH1 in general, because otherwise
11040 combine can put the pseudo returned by get_hard_reg_initial_val into
11041 instructions that need a general purpose registers, which will fail to
11042 be recognized when the pseudo becomes allocated to PR. */
11043 rtx val
= get_hard_reg_initial_val (Pmode
, PR_REG
);
11044 return gen_rtx_UNSPEC (SImode
, gen_rtvec (1, val
), UNSPEC_RA
);
11048 sh_expand_t_scc (rtx operands
[])
11050 enum rtx_code code
= GET_CODE (operands
[1]);
11051 rtx target
= operands
[0];
11052 rtx op0
= operands
[2];
11053 rtx op1
= operands
[3];
11054 rtx result
= target
;
11056 if (!REG_P (op0
) || REGNO (op0
) != T_REG
11057 || !CONST_INT_P (op1
))
11059 if (!REG_P (result
))
11060 result
= gen_reg_rtx (SImode
);
11061 HOST_WIDE_INT val
= INTVAL (op1
);
11062 if ((code
== EQ
&& val
== 1) || (code
== NE
&& val
== 0))
11063 emit_insn (gen_movt (result
, get_t_reg_rtx ()));
11064 else if ((code
== EQ
&& val
== 0) || (code
== NE
&& val
== 1))
11065 emit_insn (gen_movnegt (result
, get_t_reg_rtx ()));
11066 else if (code
== EQ
|| code
== NE
)
11067 emit_insn (gen_move_insn (result
, GEN_INT (code
== NE
)));
11070 if (result
!= target
)
11071 emit_move_insn (target
, result
);
11075 /* INSN is an sfunc; return the rtx that describes the address used. */
11077 extract_sfunc_addr (rtx insn
)
11079 rtx pattern
= PATTERN (insn
);
11080 const int len
= XVECLEN (pattern
, 0);
11081 for (int i
= 0; i
< len
; i
++)
11083 rtx part
= XVECEXP (pattern
, 0, i
);
11084 if (GET_CODE (part
) == USE
&& GET_MODE (XEXP (part
, 0)) == Pmode
11085 && GENERAL_REGISTER_P (true_regnum (XEXP (part
, 0))))
11086 return XEXP (part
, 0);
11088 gcc_assert (GET_CODE (XVECEXP (pattern
, 0, 0)) == UNSPEC_VOLATILE
);
11089 return XVECEXP (XVECEXP (pattern
, 0, 0), 0, 1);
11092 /* Verify that the register in use_sfunc_addr still agrees with the address
11093 used in the sfunc. This prevents fill_slots_from_thread from changing
11095 INSN is the use_sfunc_addr instruction, and REG is the register it
11098 check_use_sfunc_addr (rtx_insn
*insn
, rtx reg
)
11100 /* Search for the sfunc. It should really come right after INSN. */
11101 while ((insn
= NEXT_INSN (insn
)))
11103 if (LABEL_P (insn
) || JUMP_P (insn
))
11105 if (! INSN_P (insn
))
11108 if (rtx_sequence
*seq
= dyn_cast
<rtx_sequence
*> (PATTERN (insn
)))
11109 insn
= seq
->insn (0);
11110 if (GET_CODE (PATTERN (insn
)) != PARALLEL
11111 || get_attr_type (insn
) != TYPE_SFUNC
)
11113 return rtx_equal_p (extract_sfunc_addr (insn
), reg
);
11115 gcc_unreachable ();
11118 /* This function returns a constant rtx that represents 2**15 / pi in
11119 SFmode. It's used to scale a fixed-point signed 16.16-bit fraction
11120 of a full circle back to an SFmode value, i.e. 0x10000 maps to 2*pi. */
11121 static GTY(()) rtx sh_fsca_sf2int_rtx
;
11124 sh_fsca_sf2int (void)
11126 if (! sh_fsca_sf2int_rtx
)
11128 REAL_VALUE_TYPE rv
;
11130 real_from_string (&rv
, "10430.378350470453");
11131 sh_fsca_sf2int_rtx
= const_double_from_real_value (rv
, SFmode
);
11134 return sh_fsca_sf2int_rtx
;
11137 /* This function returns a constant rtx that represents pi / 2**15 in
11138 SFmode. It's used to scale SFmode angles, in radians, to a
11139 fixed-point signed 16.16-bit fraction of a full circle, i.e. 2*pi
11140 maps to 0x10000. */
11141 static GTY(()) rtx sh_fsca_int2sf_rtx
;
11144 sh_fsca_int2sf (void)
11146 if (! sh_fsca_int2sf_rtx
)
11148 REAL_VALUE_TYPE rv
;
11150 real_from_string (&rv
, "9.587379924285257e-5");
11151 sh_fsca_int2sf_rtx
= const_double_from_real_value (rv
, SFmode
);
11154 return sh_fsca_int2sf_rtx
;
11157 /* Initialize the CUMULATIVE_ARGS structure. */
11159 sh_init_cumulative_args (CUMULATIVE_ARGS
* pcum
,
11161 rtx libname ATTRIBUTE_UNUSED
,
11163 signed int n_named_args
,
11166 pcum
->arg_count
[(int) SH_ARG_FLOAT
] = 0;
11167 pcum
->free_single_fp_reg
= 0;
11168 pcum
->outgoing
= n_named_args
!= -1;
11170 /* FIXME: Should we check TARGET_HITACHI here ??? */
11171 pcum
->renesas_abi
= sh_attr_renesas_p (fntype
);
11175 pcum
->force_mem
= ((TARGET_HITACHI
|| pcum
->renesas_abi
)
11176 && aggregate_value_p (TREE_TYPE (fntype
), fndecl
));
11177 pcum
->prototype_p
= prototype_p (fntype
);
11178 pcum
->arg_count
[(int) SH_ARG_INT
] = false;
11182 pcum
->arg_count
[(int) SH_ARG_INT
] = 0;
11183 pcum
->prototype_p
= false;
11184 if (mode
!= VOIDmode
)
11186 /* If the default ABI is the Renesas ABI then all library
11187 calls must assume that the library will be using the
11188 Renesas ABI. So if the function would return its result
11189 in memory then we must force the address of this memory
11190 block onto the stack. Ideally we would like to call
11191 targetm.calls.return_in_memory() here but we do not have
11192 the TYPE or the FNDECL available so we synthesize the
11193 contents of that function as best we can. */
11195 (TARGET_DEFAULT
& MASK_HITACHI
)
11196 && (mode
== BLKmode
11197 || (GET_MODE_SIZE (mode
) > 4
11198 && !(mode
== DFmode
11199 && TARGET_FPU_DOUBLE
)));
11202 pcum
->force_mem
= false;
11207 sh_gen_truncate (machine_mode mode
, rtx x
, int need_sign_ext
)
11209 enum rtx_code code
= TRUNCATE
;
11211 if (GET_CODE (x
) == ZERO_EXTEND
|| GET_CODE (x
) == SIGN_EXTEND
)
11213 rtx inner
= XEXP (x
, 0);
11214 machine_mode inner_mode
= GET_MODE (inner
);
11216 if (inner_mode
== mode
)
11218 else if (GET_MODE_SIZE (inner_mode
) >= GET_MODE_SIZE (mode
))
11220 else if (GET_MODE_SIZE (inner_mode
) < GET_MODE_SIZE (mode
)
11221 && (! need_sign_ext
|| GET_CODE (x
) == SIGN_EXTEND
))
11223 code
= GET_CODE (x
);
11227 return gen_rtx_fmt_e (code
, mode
, x
);
11230 /* Load and store depend on the highpart of the address. However,
11231 set_attr_alternative does not give well-defined results before reload,
11232 so we must look at the rtl ourselves to see if any of the feeding
11233 registers is used in a memref.
11235 Return true iff INSN contains a MEM. */
11237 sh_contains_memref_p (rtx insn
)
11239 subrtx_iterator::array_type array
;
11240 FOR_EACH_SUBRTX (iter
, array
, PATTERN (insn
), NONCONST
)
11246 /* Return true iff INSN loads a banked register. */
11248 sh_loads_bankedreg_p (rtx insn
)
11250 if (GET_CODE (PATTERN (insn
)) == SET
)
11252 rtx op
= SET_DEST (PATTERN(insn
));
11253 if (REG_P (op
) && BANKED_REGISTER_P (REGNO (op
)))
11260 /* Implement TARGET_PREFERRED_RELOAD_CLASS. */
11262 sh_preferred_reload_class (rtx x ATTRIBUTE_UNUSED
, reg_class_t rclass
)
11267 /* Implement TARGET_SECONDARY_RELOAD. */
11269 sh_secondary_reload (bool in_p
, rtx x
, reg_class_t rclass_i
,
11270 machine_mode mode
, secondary_reload_info
*sri
)
11272 enum reg_class rclass
= (enum reg_class
) rclass_i
;
11274 if (MEM_P (x
) && GET_CODE (XEXP (x
, 0)) == PLUS
11275 && REG_P (XEXP (XEXP (x
, 0), 0))
11276 && REGNO (XEXP (XEXP (x
, 0), 0)) == GBR_REG
)
11277 return rclass
== R0_REGS
? NO_REGS
: R0_REGS
;
11279 if (MEM_P (x
) && REG_P (XEXP (x
, 0)) && REGNO (XEXP (x
, 0)) == GBR_REG
)
11280 return rclass
== R0_REGS
? NO_REGS
: R0_REGS
;
11282 if (REG_P (x
) && REGNO (x
) == GBR_REG
)
11287 if (REGCLASS_HAS_FP_REG (rclass
)
11288 && immediate_operand ((x
), mode
)
11289 && ! ((fp_zero_operand (x
) || fp_one_operand (x
)) && mode
== SFmode
))
11293 sri
->icode
= CODE_FOR_reload_insf__frn
;
11296 sri
->icode
= CODE_FOR_reload_indf__frn
;
11299 /* ??? If we knew that we are in the appropriate mode -
11300 single precision - we could use a reload pattern directly. */
11305 if (rclass
== FPUL_REGS
11306 && ((REG_P (x
) && (REGNO (x
) == MACL_REG
|| REGNO (x
) == MACH_REG
11307 || REGNO (x
) == T_REG
))
11308 || GET_CODE (x
) == PLUS
))
11309 return GENERAL_REGS
;
11310 if (rclass
== FPUL_REGS
&& immediate_operand (x
, mode
))
11312 if (satisfies_constraint_I08 (x
) || fp_zero_operand (x
))
11313 return GENERAL_REGS
;
11314 else if (mode
== SFmode
)
11316 sri
->icode
= CODE_FOR_reload_insi__i_fpul
;
11319 if (rclass
== FPSCR_REGS
11320 && ((REG_P (x
) && REGNO (x
) >= FIRST_PSEUDO_REGISTER
)
11321 || (MEM_P (x
) && GET_CODE (XEXP (x
, 0)) == PLUS
)))
11322 return GENERAL_REGS
;
11323 } /* end of input-only processing. */
11325 if (((REGCLASS_HAS_FP_REG (rclass
)
11327 && (GENERAL_OR_AP_REGISTER_P (REGNO (x
))
11328 || (FP_REGISTER_P (REGNO (x
)) && mode
== SImode
11329 && TARGET_FMOVD
))))
11330 || (REGCLASS_HAS_GENERAL_REG (rclass
)
11332 && FP_REGISTER_P (REGNO (x
))))
11333 && (mode
== SFmode
|| mode
== SImode
))
11335 if ((rclass
== FPUL_REGS
11336 || (REGCLASS_HAS_FP_REG (rclass
) && mode
== SImode
))
11339 && (REGNO (x
) >= FIRST_PSEUDO_REGISTER
11340 || REGNO (x
) == T_REG
11341 || system_reg_operand (x
, VOIDmode
)))))
11343 if (rclass
== FPUL_REGS
)
11344 return GENERAL_REGS
;
11345 return NO_REGS
; // LRA wants NO_REGS here, it used to be FPUL_REGS;
11348 if ((rclass
== MAC_REGS
|| rclass
== PR_REGS
)
11349 && REG_P (x
) && ! GENERAL_REGISTER_P (REGNO (x
))
11350 && rclass
!= REGNO_REG_CLASS (REGNO (x
)))
11351 return GENERAL_REGS
;
11353 /* If here fall back to loading FPUL register through general registers.
11354 This case can happen when movsi_ie insn is picked initially to
11355 load/store the FPUL register from/to another register, and then the
11356 other register is allocated on the stack. */
11357 if (rclass
== FPUL_REGS
&& true_regnum (x
) == -1)
11358 return GENERAL_REGS
;
11360 /* Force mov.b / mov.w displacement addressing insn to use R0 as
11362 On SH2A could also just leave it alone here, which would result in a
11363 4 byte move insn being generated instead. However, for this to work
11364 the insns must have the appropriate alternatives. */
11365 if ((mode
== QImode
|| mode
== HImode
) && rclass
!= R0_REGS
11366 && satisfies_constraint_Sdd (x
)
11367 && sh_disp_addr_displacement (x
)
11368 <= sh_max_mov_insn_displacement (mode
, false))
11371 /* When reload is trying to address a QImode or HImode subreg on the stack,
11372 force any subreg byte into R0_REGS, as this is going to become a
11373 displacement address.
11374 We could restrict this to SUBREG_BYTE (x) > 0, but if the actual reg
11375 is on the stack, the memref to it might already require a displacement
11376 and that has to be added to the final address. At this point we don't
11377 know the cumulative displacement so we assume the worst case. */
11378 if ((mode
== QImode
|| mode
== HImode
) && rclass
!= R0_REGS
11379 && GET_CODE (x
) == SUBREG
&& true_regnum (x
) == -1)
11385 /* Return true if SUBST can't safely replace its equivalent during RA. */
11387 sh_cannot_substitute_mem_equiv_p (rtx
)
11389 /* If SUBST is mem[base+index] or QI/HImode mem[base+disp], the insn
11390 uses R0 and may cause spill failure when R0 is already used.
11391 We have to return true for that case at least.
11392 Moreover SH has strong R0 parity and also have not enough numbers of
11393 the hard registers to make the equiv substitution win in the size
11394 and the speed on average working sets. The pseudos produced to
11395 hold the equiv values can't get good hard registers for bad cases
11396 and end up memory save/restore insns which make the code worse. */
11400 /* Implement TARGET_LEGITIMIZE_ADDRESS_DISPLACEMENT. */
11402 sh_legitimize_address_displacement (rtx
*offset1
, rtx
*offset2
,
11403 poly_int64 orig_offset
,
11406 if ((TARGET_FPU_DOUBLE
&& mode
== DFmode
)
11407 || (TARGET_SH2E
&& mode
== SFmode
))
11410 struct disp_adjust adj
= sh_find_mov_disp_adjust (mode
, orig_offset
);
11411 if (adj
.offset_adjust
!= NULL_RTX
&& adj
.mov_disp
!= NULL_RTX
)
11413 *offset1
= adj
.offset_adjust
;
11414 *offset2
= adj
.mov_disp
;
11421 /* Return true if movsf insn should be splited with an additional
11424 sh_movsf_ie_ra_split_p (rtx op0
, rtx op1
, rtx op2
)
11427 if (rtx_equal_p (op0
, op1
))
11430 if (GET_CODE (op1
) == CONST_DOUBLE
11431 && ! satisfies_constraint_G (op1
)
11432 && ! satisfies_constraint_H (op1
)
11437 if (REG_P (op0
) && FP_REGISTER_P (REGNO (op0
))
11438 && REG_P (op1
) && GENERAL_REGISTER_P (REGNO (op1
))
11439 && REG_P (op2
) && (REGNO (op2
) == FPUL_REG
))
11442 if (REG_P (op1
) && FP_REGISTER_P (REGNO (op1
))
11443 && REG_P (op0
) && GENERAL_REGISTER_P (REGNO (op0
))
11444 && REG_P (op2
) && (REGNO (op2
) == FPUL_REG
))
11451 sh_conditional_register_usage (void)
11453 for (int regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
11454 if (! VALID_REGISTER_P (regno
))
11455 fixed_regs
[regno
] = 1;
11456 /* R8 and R9 are call-clobbered on SH5, but not on earlier SH ABIs. */
11458 fixed_regs
[PIC_OFFSET_TABLE_REGNUM
] = 1;
11461 fixed_regs
[PIC_REG
] = 1;
11462 call_used_regs
[PIC_REG
] = 1;
11464 /* Renesas saves and restores mac registers on call. */
11465 if (TARGET_HITACHI
&& ! TARGET_NOMACSAVE
)
11467 call_used_regs
[MACH_REG
] = 0;
11468 call_used_regs
[MACL_REG
] = 0;
11471 for (int regno
= FIRST_GENERAL_REG
; regno
<= LAST_GENERAL_REG
; regno
++)
11472 if (! fixed_regs
[regno
] && call_used_regs
[regno
])
11473 SET_HARD_REG_BIT (reg_class_contents
[SIBCALL_REGS
], regno
);
11475 call_used_regs
[FPSCR_MODES_REG
] = 0;
11476 call_used_regs
[FPSCR_STAT_REG
] = 0;
11479 /* Implement TARGET_LEGITIMATE_CONSTANT_P
11481 can_store_by_pieces constructs VOIDmode CONST_DOUBLEs. */
11483 sh_legitimate_constant_p (machine_mode mode
, rtx x
)
11485 if (SH_OFFSETS_MUST_BE_WITHIN_SECTIONS_P
)
11488 split_const (x
, &base
, &offset
);
11490 if (GET_CODE (base
) == SYMBOL_REF
11491 && !offset_within_block_p (base
, INTVAL (offset
)))
11496 && (SYMBOLIC_CONST_P (x
)
11497 || (GET_CODE (x
) == CONST
&& GET_CODE (XEXP (x
, 0)) == PLUS
11498 && SYMBOLIC_CONST_P (XEXP (XEXP (x
, 0), 0)))))
11501 return GET_CODE (x
) != CONST_DOUBLE
11502 || mode
== DFmode
|| mode
== SFmode
11503 || mode
== DImode
|| GET_MODE (x
) == VOIDmode
;
11506 enum sh_divide_strategy_e sh_div_strategy
= SH_DIV_STRATEGY_DEFAULT
;
11509 sh_init_sync_libfuncs (void)
11511 init_sync_libfuncs (UNITS_PER_WORD
);
11514 /* Return true if it is appropriate to emit `ret' instructions in the
11515 body of a function. */
11517 sh_can_use_simple_return_p (void)
11519 if (! reload_completed
|| frame_pointer_needed
)
11522 /* Moving prologue around does't reduce the size. */
11523 if (optimize_function_for_size_p (cfun
))
11526 /* Finally, allow for pr save. */
11527 HARD_REG_SET live_regs_mask
;
11528 int d
= calc_live_regs (&live_regs_mask
);
11530 if (rounded_frame_size (d
) > 4)
11536 /*------------------------------------------------------------------------------
11537 Address mode optimization support code
11540 typedef HOST_WIDE_INT disp_t
;
11541 static const disp_t MIN_DISP
= HOST_WIDE_INT_MIN
;
11542 static const disp_t MAX_DISP
= HOST_WIDE_INT_MAX
;
11543 static const disp_t INVALID_DISP
= MAX_DISP
;
11545 /* A memory reference which is described by a base register and a
11547 class base_reg_disp
11550 base_reg_disp (rtx br
, disp_t d
);
11552 bool is_reg (void) const;
11553 bool is_disp (void) const;
11554 rtx
reg (void) const;
11555 disp_t
disp (void) const;
11563 base_reg_disp::base_reg_disp (rtx br
, disp_t d
)
11564 : reg_ (br
), disp_ (d
)
11569 base_reg_disp::is_reg (void) const
11571 return reg_
!= NULL_RTX
&& disp_
!= INVALID_DISP
;
11575 base_reg_disp::is_disp (void) const
11577 return reg_
== NULL_RTX
&& disp_
!= INVALID_DISP
;
11581 base_reg_disp::reg (void) const
11587 base_reg_disp::disp (void) const
11592 /* Find the base register and calculate the displacement for a given
11593 address rtx 'x'. */
11594 static base_reg_disp
11595 sh_find_base_reg_disp (rtx_insn
* insn
, rtx x
, disp_t disp
= 0,
11596 rtx base_reg
= NULL
)
11600 if (REGNO (x
) == GBR_REG
)
11601 return base_reg_disp (x
, disp
);
11603 /* We've reached a hard-reg. This is probably the point where
11604 function args are copied to pseudos. Do not go any further and
11605 stick to the pseudo. If the original mem addr was in a hard reg
11606 from the beginning, it will become the base reg. */
11607 if (REGNO (x
) < FIRST_PSEUDO_REGISTER
)
11608 return base_reg_disp (base_reg
!= NULL
? base_reg
: x
, disp
);
11610 /* Find the def of the reg and trace it. If there are more than one
11611 defs and they are not the same, assume it's not safe to proceed. */
11612 rtx_insn
* last_i
= NULL
;
11613 rtx last_set
= NULL
;
11614 for (df_ref d
= DF_REG_DEF_CHAIN (REGNO (x
)); d
!= NULL
;
11615 d
= DF_REF_NEXT_REG (d
))
11617 rtx set
= const_cast<rtx
> (set_of (x
, DF_REF_INSN (d
)));
11619 /* Accept multiple defs, as long as they are equal. */
11620 if (last_set
== NULL
|| rtx_equal_p (last_set
, set
))
11622 last_i
= DF_REF_INSN (d
);
11633 if (last_set
!= NULL
&& last_i
!= NULL
)
11634 return sh_find_base_reg_disp (last_i
, XEXP (last_set
, 1), disp
,
11635 XEXP (last_set
, 0));
11637 /* When here, no previous insn was found that sets the reg.
11638 The input reg is already the base reg. */
11639 return base_reg_disp (x
, disp
);
11642 else if (GET_CODE (x
) == PLUS
)
11644 base_reg_disp left_val
= sh_find_base_reg_disp (insn
, XEXP (x
, 0));
11645 base_reg_disp right_val
= sh_find_base_reg_disp (insn
, XEXP (x
, 1));
11647 /* Either left or right val must be a reg.
11648 We don't handle the case of 'reg + reg' here. */
11649 if (left_val
.is_reg () && right_val
.is_disp ())
11650 return base_reg_disp (left_val
.reg (), left_val
.disp ()
11651 + right_val
.disp () + disp
);
11652 else if (right_val
.is_reg () && left_val
.is_disp ())
11653 return base_reg_disp (right_val
.reg (), right_val
.disp ()
11654 + left_val
.disp () + disp
);
11656 return base_reg_disp (base_reg
, disp
);
11659 else if (CONST_INT_P (x
))
11660 return base_reg_disp (NULL
, disp
+ INTVAL (x
));
11662 /* Didn't find anything useful. */
11663 return base_reg_disp (base_reg
, disp
);
11666 /* Given an insn and a memory operand, try to find an equivalent GBR
11667 based memory address and return the corresponding new memory address.
11668 Return NULL_RTX if not found. */
11670 sh_find_equiv_gbr_addr (rtx_insn
* insn
, rtx mem
)
11672 if (!MEM_P (mem
) || gbr_address_mem (mem
, GET_MODE (mem
)))
11675 /* Leave post/pre inc/dec or any other side effect addresses alone. */
11676 if (side_effects_p (XEXP (mem
, 0)))
11679 /* When not optimizing there might be no dataflow available. */
11683 base_reg_disp gbr_disp
= sh_find_base_reg_disp (insn
, XEXP (mem
, 0));
11685 if (gbr_disp
.is_reg () && REGNO (gbr_disp
.reg ()) == GBR_REG
)
11687 /* If GBR is marked as call clobbered we bail out if we see a call.
11688 FIXME: Actually should check if this mem refers to the gbr value
11689 before or after the call. If there is a store_gbr preceeding this
11690 mem, it's safe to use GBR for this mem.
11692 If GBR is not marked as call clobbered, but there is some other
11693 def than a call, it's probably a load_gbr upon which we also
11694 bail out to be on the safe side.
11695 FIXME: Should check if we have a use-after-def case, such as
11696 the call case above. */
11697 for (df_ref d
= DF_REG_DEF_CHAIN (GBR_REG
); d
!= NULL
;
11698 d
= DF_REF_NEXT_REG (d
))
11700 if (CALL_P (DF_REF_INSN (d
)))
11702 if (TEST_HARD_REG_BIT (regs_invalidated_by_call
, GBR_REG
))
11711 rtx disp
= GEN_INT (gbr_disp
.disp ());
11712 if (gbr_displacement (disp
, GET_MODE (mem
)))
11713 return gen_rtx_PLUS (SImode
, gen_rtx_REG (SImode
, GBR_REG
), disp
);
11719 /*------------------------------------------------------------------------------
11720 Manual insn combine support code.
11723 /* Return true if the specified insn contains any UNSPECs or
11724 UNSPEC_VOLATILEs. */
11726 sh_unspec_insn_p (rtx x
)
11728 subrtx_iterator::array_type array
;
11729 FOR_EACH_SUBRTX (i
, array
, x
, ALL
)
11731 && (GET_CODE (*i
) == UNSPEC
|| GET_CODE (*i
) == UNSPEC_VOLATILE
))
11737 /* Return true if the register operands of the specified insn are modified
11738 between the specified from and to insns (exclusive of those two). */
11740 sh_insn_operands_modified_between_p (rtx_insn
* operands_insn
,
11741 const rtx_insn
* from
,
11742 const rtx_insn
* to
)
11744 /* FIXME: Return true for multiple sets for now. */
11745 rtx s
= single_set (operands_insn
);
11749 subrtx_iterator::array_type array
;
11750 FOR_EACH_SUBRTX (i
, array
, SET_SRC (s
), ALL
)
11752 ((REG_P (*i
) || SUBREG_P (*i
)) && reg_set_between_p (*i
, from
, to
)))
11758 /* Given an insn, determine whether it's a 'nott' insn, i.e. an insn that
11759 negates the T bit and stores the result in the T bit. */
11761 sh_is_nott_insn (const rtx_insn
* i
)
11763 return i
!= NULL
&& GET_CODE (PATTERN (i
)) == SET
11764 && t_reg_operand (XEXP (PATTERN (i
), 0), VOIDmode
)
11765 && negt_reg_operand (XEXP (PATTERN (i
), 1), VOIDmode
);
11769 sh_movt_set_dest (const rtx_insn
* i
)
11771 return i
== NULL
? NULL
: sh_movt_set_dest (PATTERN (i
));
11775 sh_movt_set_dest (const_rtx pat
)
11777 return GET_CODE (pat
) == SET
11778 && arith_reg_dest (XEXP (pat
, 0), SImode
)
11779 && t_reg_operand (XEXP (pat
, 1), VOIDmode
) ? XEXP (pat
, 0) : NULL
;
11782 /* Given an insn, check whether it's a 'movrt' kind of insn, i.e. an insn
11783 that stores the negated T bit in a register, and return the destination
11784 register rtx, or null. */
11786 sh_movrt_set_dest (const rtx_insn
* i
)
11788 return i
== NULL
? NULL
: sh_movrt_set_dest (PATTERN (i
));
11792 sh_movrt_set_dest (const_rtx pat
)
11794 /* The negc movrt replacement is inside a parallel. */
11795 if (GET_CODE (pat
) == PARALLEL
)
11796 pat
= XVECEXP (pat
, 0, 0);
11798 return GET_CODE (pat
) == SET
11799 && arith_reg_dest (XEXP (pat
, 0), SImode
)
11800 && negt_reg_operand (XEXP (pat
, 1), VOIDmode
) ? XEXP (pat
, 0) : NULL
;
11804 /* Given an insn and a reg number, tell whether the reg dies or is unused
11807 sh_reg_dead_or_unused_after_insn (const rtx_insn
* i
, int regno
)
11809 return find_regno_note (i
, REG_DEAD
, regno
) != NULL
11810 || find_regno_note (i
, REG_UNUSED
, regno
) != NULL
;
11813 /* Given an insn and a reg number, remove reg dead or reg unused notes to
11814 mark it as being used after the insn. */
11816 sh_remove_reg_dead_or_unused_notes (rtx_insn
* i
, int regno
)
11818 if (rtx n
= find_regno_note (i
, REG_DEAD
, regno
))
11819 remove_note (i
, n
);
11820 if (rtx n
= find_regno_note (i
, REG_UNUSED
, regno
))
11821 remove_note (i
, n
);
11824 /* Given an insn check if it contains any post/pre inc/dec mem operands and
11825 add the REG_INC notes accordingly.
11826 FIXME: This function is very similar to lra.c (add_auto_inc_notes).
11827 FIXME: This function is currently used by peephole2 patterns because
11828 the peephole2 pass does not preserve REG_INC notes. If the notes
11829 are dropped the following passes will do wrong things. */
11831 sh_check_add_incdec_notes (rtx_insn
* i
)
11833 struct for_each_inc_dec_clb
11835 static int func (rtx mem ATTRIBUTE_UNUSED
, rtx op ATTRIBUTE_UNUSED
,
11836 rtx dest
, rtx src ATTRIBUTE_UNUSED
,
11837 rtx srcoff ATTRIBUTE_UNUSED
, void* arg
)
11839 gcc_assert (REG_P (dest
));
11841 rtx_insn
* i
= (rtx_insn
*)arg
;
11842 if (find_regno_note (i
, REG_INC
, REGNO (dest
)) == NULL
)
11843 add_reg_note (i
, REG_INC
, dest
);
11849 for_each_inc_dec (PATTERN (i
), for_each_inc_dec_clb::func
, i
);
11853 /* Given a move insn destiation and a source, make sure that the move source
11854 operand is not a post-inc mem load with the same address reg as the
11855 destination. Returns the modified source operand with the post-inc removed
11858 sh_remove_overlapping_post_inc (rtx dst
, rtx src
)
11863 rtx addr
= XEXP (src
, 0);
11865 if (GET_CODE (addr
) == POST_INC
11866 && reg_overlap_mentioned_p (XEXP (addr
, 0), dst
))
11867 return replace_equiv_address (src
, XEXP (addr
, 0));
11869 gcc_assert (GET_CODE (addr
) != POST_MODIFY
);
11873 /* Emit a move insn that is safe to be used in peephole patterns. */
11875 sh_peephole_emit_move_insn (rtx dst
, rtx src
)
11877 return sh_check_add_incdec_notes (
11878 emit_move_insn (dst
, sh_remove_overlapping_post_inc (dst
, src
)));
11881 /* Given an op rtx and an insn, try to find out whether the result of the
11882 specified op consists only of logical operations on T bit stores. */
11884 sh_is_logical_t_store_expr (rtx op
, rtx_insn
* insn
)
11886 if (!logical_operator (op
, SImode
))
11889 rtx ops
[2] = { XEXP (op
, 0), XEXP (op
, 1) };
11890 int op_is_t_count
= 0;
11892 for (int i
= 0; i
< 2; ++i
)
11894 if (t_reg_operand (ops
[i
], VOIDmode
)
11895 || negt_reg_operand (ops
[i
], VOIDmode
))
11900 set_of_reg op_set
= sh_find_set_of_reg
11901 (ops
[i
], insn
, prev_nonnote_nondebug_insn_bb
);
11902 if (op_set
.set_src
== NULL_RTX
)
11905 if (t_reg_operand (op_set
.set_src
, VOIDmode
)
11906 || negt_reg_operand (op_set
.set_src
, VOIDmode
)
11907 || sh_is_logical_t_store_expr (op_set
.set_src
, op_set
.insn
))
11912 return op_is_t_count
== 2;
11915 /* Given the operand that is extended in a sign/zero extend insn, and the
11916 insn, try to figure out whether the sign/zero extension can be replaced
11917 by a simple reg-reg copy. If so, the replacement reg rtx is returned,
11918 NULL_RTX otherwise. */
11920 sh_try_omit_signzero_extend (rtx extended_op
, rtx_insn
* insn
)
11922 if (REG_P (extended_op
))
11923 extended_op
= extended_op
;
11924 else if (GET_CODE (extended_op
) == SUBREG
&& REG_P (SUBREG_REG (extended_op
)))
11925 extended_op
= SUBREG_REG (extended_op
);
11929 /* Reg moves must be of the same mode. */
11930 if (GET_MODE (extended_op
) != SImode
)
11933 set_of_reg s
= sh_find_set_of_reg (extended_op
, insn
,
11934 prev_nonnote_nondebug_insn_bb
);
11935 if (s
.set_src
== NULL_RTX
)
11938 if (t_reg_operand (s
.set_src
, VOIDmode
)
11939 || negt_reg_operand (s
.set_src
, VOIDmode
))
11940 return extended_op
;
11942 /* If the zero extended reg was formed by a logical operation, check the
11943 operands of the logical operation. If both originated from T bit
11944 stores the zero extension can be eliminated. */
11945 else if (sh_is_logical_t_store_expr (s
.set_src
, s
.insn
))
11946 return extended_op
;
11951 /* Given the current insn, which is assumed to be a movrt_negc insn, try to
11952 figure out whether it should be converted into a movt-xor sequence in
11953 the movrt_negc splitter.
11954 Returns true if insns have been modified and the splitter has succeeded. */
11956 sh_split_movrt_negc_to_movt_xor (rtx_insn
* curr_insn
, rtx operands
[])
11958 /* In cases such as
11963 we can replace the T bit clobbering negc with a movt-xor sequence and
11964 eliminate the redundant comparison.
11965 Because the xor insn depends on register allocation results, allow this
11966 only before reload. */
11967 if (!can_create_pseudo_p ())
11970 set_of_reg t_before_negc
= sh_find_set_of_reg
11971 (get_t_reg_rtx (), curr_insn
, prev_nonnote_nondebug_insn_bb
);
11972 set_of_reg t_after_negc
= sh_find_set_of_reg
11973 (get_t_reg_rtx (), curr_insn
, next_nonnote_nondebug_insn_bb
);
11975 if (t_before_negc
.set_rtx
!= NULL_RTX
&& t_after_negc
.set_rtx
!= NULL_RTX
11976 && rtx_equal_p (t_before_negc
.set_rtx
, t_after_negc
.set_rtx
)
11977 && !reg_used_between_p (get_t_reg_rtx (), curr_insn
, t_after_negc
.insn
)
11978 && !sh_insn_operands_modified_between_p (t_before_negc
.insn
,
11979 t_before_negc
.insn
,
11981 && !modified_between_p (get_t_reg_rtx (), curr_insn
, t_after_negc
.insn
)
11982 && !sh_unspec_insn_p (t_after_negc
.insn
)
11983 && !volatile_insn_p (PATTERN (t_after_negc
.insn
))
11984 && !side_effects_p (PATTERN (t_after_negc
.insn
))
11985 && !may_trap_or_fault_p (PATTERN (t_after_negc
.insn
)))
11987 emit_insn (gen_movrt_xor (operands
[0], get_t_reg_rtx ()));
11988 set_insn_deleted (t_after_negc
.insn
);
11995 /* Given a reg and the current insn, see if the value of the reg originated
11996 from a sign or zero extension and return the discovered information. */
11997 sh_extending_set_of_reg
11998 sh_find_extending_set_of_reg (rtx reg
, rtx_insn
* curr_insn
)
12001 return sh_extending_set_of_reg (curr_insn
);
12003 if (SUBREG_P (reg
))
12004 reg
= SUBREG_REG (reg
);
12007 return sh_extending_set_of_reg (curr_insn
);
12009 /* FIXME: Also search the predecessor basic blocks. It seems that checking
12010 only the adjacent predecessor blocks would cover most of the cases.
12011 Also try to look through the first extension that we hit. There are some
12012 cases, where a zero_extend is followed an (implicit) sign_extend, and it
12013 fails to see the sign_extend. */
12014 sh_extending_set_of_reg result
= sh_find_set_of_reg
12015 (reg
, curr_insn
, prev_nonnote_nondebug_insn_bb
, true);
12017 if (result
.set_src
!= NULL
)
12019 if (GET_CODE (result
.set_src
) == SIGN_EXTEND
12020 || GET_CODE (result
.set_src
) == ZERO_EXTEND
)
12023 fprintf (dump_file
, "sh_find_extending_set_of_reg: reg %d is "
12024 "explicitly sign/zero extended in insn %d\n",
12025 REGNO (reg
), INSN_UID (result
.insn
));
12026 result
.from_mode
= GET_MODE (XEXP (result
.set_src
, 0));
12027 result
.ext_code
= GET_CODE (result
.set_src
);
12029 else if (MEM_P (result
.set_src
)
12030 && (GET_MODE (result
.set_src
) == QImode
12031 || GET_MODE (result
.set_src
) == HImode
)
12032 && !sh_unspec_insn_p (result
.insn
))
12034 /* On SH QIHImode memory loads always sign extend. However, in
12035 some cases where it seems that the higher bits are not
12036 interesting, the loads will not be expanded as sign extending
12037 insns, but as QIHImode loads into QIHImode regs. We report that
12038 the reg has been sign extended by the mem load. When it is used
12039 as such, we must convert the mem load into a sign extending insn,
12040 see also sh_extending_set_of_reg::use_as_extended_reg. */
12042 fprintf (dump_file
, "sh_find_extending_set_of_reg: reg %d is "
12043 "implicitly sign extended in insn %d\n",
12044 REGNO (reg
), INSN_UID (result
.insn
));
12045 result
.from_mode
= GET_MODE (result
.set_src
);
12046 result
.ext_code
= SIGN_EXTEND
;
12053 /* Given a reg that is known to be sign or zero extended at some insn,
12054 take the appropriate measures so that the extended value can be used as
12055 a reg at the specified insn and return the resulting reg rtx. */
12057 sh_extending_set_of_reg::use_as_extended_reg (rtx_insn
* use_at_insn
) const
12059 gcc_assert (insn
!= NULL
&& set_src
!= NULL
&& set_rtx
!= NULL
);
12060 gcc_assert (ext_code
== SIGN_EXTEND
|| ext_code
== ZERO_EXTEND
);
12061 gcc_assert (from_mode
== QImode
|| from_mode
== HImode
);
12063 if (MEM_P (set_src
) && ext_code
== SIGN_EXTEND
)
12066 fprintf (dump_file
,
12067 "use_as_extended_reg: converting non-extending mem load in "
12068 "insn %d into sign-extending load\n", INSN_UID (insn
));
12070 rtx r
= gen_reg_rtx (SImode
);
12072 if (from_mode
== QImode
)
12073 i0
= sh_check_add_incdec_notes (
12074 emit_insn_after (gen_extendqisi2 (r
, set_src
), insn
));
12075 else if (from_mode
== HImode
)
12076 i0
= sh_check_add_incdec_notes (
12077 emit_insn_after (gen_extendhisi2 (r
, set_src
), insn
));
12079 gcc_unreachable ();
12082 gen_move_insn (XEXP (set_rtx
, 0),
12083 gen_lowpart (GET_MODE (set_src
), r
)), i0
);
12084 set_insn_deleted (insn
);
12089 rtx extension_dst
= XEXP (set_rtx
, 0);
12090 if (GET_MODE (extension_dst
) != SImode
)
12091 extension_dst
= simplify_gen_subreg (SImode
, extension_dst
,
12092 GET_MODE (extension_dst
), 0);
12093 if (modified_between_p (extension_dst
, insn
, use_at_insn
))
12096 fprintf (dump_file
,
12097 "use_as_extended_reg: dest reg %d of extending insn %d is "
12098 "modified, inserting a reg-reg copy\n",
12099 REGNO (extension_dst
), INSN_UID (insn
));
12101 rtx r
= gen_reg_rtx (SImode
);
12102 emit_insn_after (gen_move_insn (r
, extension_dst
), insn
);
12107 sh_remove_reg_dead_or_unused_notes (insn
, REGNO (extension_dst
));
12108 return extension_dst
;
12114 sh_extending_set_of_reg::can_use_as_unextended_reg (void) const
12116 if ((ext_code
== SIGN_EXTEND
|| ext_code
== ZERO_EXTEND
)
12117 && (from_mode
== QImode
|| from_mode
== HImode
)
12118 && set_src
!= NULL
)
12119 return arith_reg_operand (XEXP (set_src
, 0), from_mode
);
12125 sh_extending_set_of_reg::use_as_unextended_reg (rtx_insn
* use_at_insn
) const
12127 gcc_assert (can_use_as_unextended_reg ());
12129 rtx r
= XEXP (set_src
, 0);
12130 rtx r0
= simplify_gen_subreg (SImode
, r
, from_mode
, 0);
12132 if (modified_between_p (r
, insn
, use_at_insn
))
12134 rtx r1
= gen_reg_rtx (SImode
);
12135 emit_insn_after (gen_move_insn (r1
, r0
), insn
);
12140 sh_remove_reg_dead_or_unused_notes (insn
, SUBREG_P (r
)
12141 ? REGNO (SUBREG_REG (r
))
12147 /* Given the current insn, which is assumed to be the *tst<mode>_t_subregs insn,
12148 perform the necessary checks on the operands and split it accordingly. */
12150 sh_split_tst_subregs (rtx_insn
* curr_insn
, machine_mode subreg_mode
,
12151 int subreg_offset
, rtx operands
[])
12153 gcc_assert (subreg_mode
== QImode
|| subreg_mode
== HImode
);
12155 sh_extending_set_of_reg eop0
= sh_find_extending_set_of_reg (operands
[0],
12157 sh_extending_set_of_reg eop1
= sh_find_extending_set_of_reg (operands
[1],
12160 /* If one of the operands is known to be zero extended, that's already
12161 sufficient to mask out the unwanted high bits. */
12162 if (eop0
.ext_code
== ZERO_EXTEND
&& eop0
.from_mode
== subreg_mode
)
12164 emit_insn (gen_tstsi_t (eop0
.use_as_extended_reg (curr_insn
),
12168 if (eop1
.ext_code
== ZERO_EXTEND
&& eop1
.from_mode
== subreg_mode
)
12170 emit_insn (gen_tstsi_t (operands
[0],
12171 eop1
.use_as_extended_reg (curr_insn
)));
12175 /* None of the operands seem to be zero extended.
12176 If both are sign extended it's OK, too. */
12177 if (eop0
.ext_code
== SIGN_EXTEND
&& eop1
.ext_code
== SIGN_EXTEND
12178 && eop0
.from_mode
== subreg_mode
&& eop1
.from_mode
== subreg_mode
)
12180 emit_insn (gen_tstsi_t (eop0
.use_as_extended_reg (curr_insn
),
12181 eop1
.use_as_extended_reg (curr_insn
)));
12185 /* Otherwise we have to insert a zero extension on one of the operands to
12186 mask out the unwanted high bits.
12187 Prefer the operand that has no known extension. */
12188 if (eop0
.ext_code
!= UNKNOWN
&& eop1
.ext_code
== UNKNOWN
)
12189 std::swap (operands
[0], operands
[1]);
12191 rtx tmp0
= gen_reg_rtx (SImode
);
12192 rtx tmp1
= simplify_gen_subreg (subreg_mode
, operands
[0],
12193 GET_MODE (operands
[0]), subreg_offset
);
12194 emit_insn (subreg_mode
== QImode
12195 ? gen_zero_extendqisi2 (tmp0
, tmp1
)
12196 : gen_zero_extendhisi2 (tmp0
, tmp1
));
12197 emit_insn (gen_tstsi_t (tmp0
, operands
[1]));
12200 /* A helper class to increment/decrement a counter variable each time a
12201 function is entered/left. */
12202 class scope_counter
12205 scope_counter (int& counter
) : m_counter (counter
) { ++m_counter
; }
12207 ~scope_counter (void)
12210 gcc_assert (m_counter
>= 0);
12213 int count (void) const { return m_counter
; }
12219 /* Given an rtx x, determine whether the expression can be used to create
12220 an insn that calulates x and stores the result in the T bit.
12221 This is used by the 'treg_set_expr' predicate to construct insns sequences
12222 where T bit results are fed into other insns, such as addc, subc, negc
12225 FIXME: The patterns that expand 'treg_set_expr' operands tend to
12226 distinguish between 'positive' and 'negative' forms. For now this has to
12227 be done in the preparation code. We could also introduce
12228 'pos_treg_set_expr' and 'neg_treg_set_expr' predicates for that and write
12229 two different patterns for the 'postive' and 'negative' forms. However,
12230 the total amount of lines of code seems to be about the same and the
12231 '{pos|neg}_treg_set_expr' predicates would be more expensive, because the
12232 recog function would need to look inside the expression by temporarily
12234 static int sh_recog_treg_set_expr_reent_count
= 0;
12237 sh_recog_treg_set_expr (rtx op
, machine_mode mode
)
12239 scope_counter
recursion (sh_recog_treg_set_expr_reent_count
);
12241 /* Limit the recursion count to avoid nested expressions which we can't
12242 resolve to a single treg set insn. */
12243 if (recursion
.count () > 1)
12246 /* Early accept known possible operands before doing recog. */
12247 if (op
== const0_rtx
|| op
== const1_rtx
|| t_reg_operand (op
, mode
)
12248 || negt_reg_operand (op
, mode
))
12251 /* Early reject impossible operands before doing recog.
12252 There are some (set ((t) (subreg ...))) patterns, but we must be careful
12253 not to allow any invalid reg-reg or mem-reg moves, or else other passes
12254 such as lower-subreg will bail out. Some insns such as SH4A movua are
12255 done with UNSPEC, so must reject those, too, or else it would result
12256 in an invalid reg -> treg move. */
12257 if (CONST_INT_P (op
) || register_operand (op
, mode
)
12258 || memory_operand (op
, mode
) || sh_unspec_insn_p (op
))
12261 if (!can_create_pseudo_p ())
12264 /* expand_debug_locations may call this to compute rtx costs at
12265 very early stage. In that case, don't make new insns here to
12266 avoid codegen differences with -g. */
12267 if (currently_expanding_to_rtl
)
12270 /* We are going to invoke recog in a re-entrant way and thus
12271 have to capture its current state and restore it afterwards. */
12272 recog_data_d prev_recog_data
= recog_data
;
12274 rtx_insn
* i
= make_insn_raw (gen_rtx_SET (get_t_reg_rtx (), op
));
12275 SET_PREV_INSN (i
) = NULL
;
12276 SET_NEXT_INSN (i
) = NULL
;
12278 /* If the comparison op doesn't have a result mode, set it to SImode. */
12279 machine_mode prev_op_mode
= GET_MODE (op
);
12280 if (COMPARISON_P (op
) && prev_op_mode
== VOIDmode
)
12281 PUT_MODE (op
, SImode
);
12283 int result
= recog (PATTERN (i
), i
, 0);
12285 /* It seems there is no insn like that. Create a negated version and
12286 try again. If we hit a negated form, we'll allow that and append a
12287 nott sequence when splitting out the insns. Insns that do the split
12288 can then remove the trailing nott if they know how to deal with it. */
12289 if (result
< 0 && COMPARISON_P (op
))
12291 machine_mode cmp_mode
= GET_MODE (XEXP (op
, 0));
12292 if (cmp_mode
== VOIDmode
)
12293 cmp_mode
= GET_MODE (XEXP (op
, 1));
12295 rtx_code prev_code
= GET_CODE (op
);
12296 PUT_CODE (op
, reverse_condition (GET_CODE (op
)));
12297 result
= recog (PATTERN (i
), i
, 0);
12298 PUT_CODE (op
, prev_code
);
12301 PUT_MODE (op
, prev_op_mode
);
12302 recog_data
= prev_recog_data
;
12303 return result
>= 0;
12306 /* Returns true when recog of a 'treg_set_expr' is currently in progress.
12307 This can be used as a condition for insn/split patterns to allow certain
12308 T bit setting patters only to be matched as sub expressions of other
12311 sh_in_recog_treg_set_expr (void)
12313 return sh_recog_treg_set_expr_reent_count
> 0;
12316 /* Given an rtx x, which is assumed to be some expression that has been
12317 matched by the 'treg_set_expr' predicate before, split and emit the
12318 insns that are necessary to calculate the expression and store the result
12320 The splitting is done recursively similar to 'try_split' in emit-rt.c.
12321 Unfortunately we can't use 'try_split' here directly, as it tries to invoke
12322 'delete_insn' which then causes the DF parts to bail out, because we
12323 currently are inside another gen_split* function and would invoke
12324 'try_split' in a reentrant way. */
12325 static std::pair
<rtx_insn
*, rtx_insn
*>
12326 sh_try_split_insn_simple (rtx_insn
* i
, rtx_insn
* curr_insn
, int n
= 0)
12330 fprintf (dump_file
, "sh_try_split_insn_simple n = %d i = \n", n
);
12331 print_rtl_single (dump_file
, i
);
12332 fprintf (dump_file
, "\n");
12335 rtx_insn
* seq
= split_insns (PATTERN (i
), curr_insn
);
12338 return std::make_pair (i
, i
);
12340 /* Avoid infinite splitter loops if any insn of the result matches
12341 the original pattern. */
12342 for (rtx_insn
* s
= seq
; s
!= NULL
; s
= NEXT_INSN (s
))
12343 if (INSN_P (s
) && rtx_equal_p (PATTERN (s
), PATTERN (i
)))
12344 return std::make_pair (i
, i
);
12346 unshare_all_rtl_in_chain (seq
);
12348 /* 'seq' is now a replacement for 'i'. Assuming that 'i' is an insn in
12349 a linked list, replace the single insn with the new insns. */
12350 rtx_insn
* seqlast
= seq
;
12351 while (NEXT_INSN (seqlast
) != NULL
)
12352 seqlast
= NEXT_INSN (seqlast
);
12354 if (rtx_insn
* iprev
= PREV_INSN (i
))
12355 SET_NEXT_INSN (iprev
) = seq
;
12356 if (rtx_insn
* inext
= NEXT_INSN (i
))
12357 SET_PREV_INSN (inext
) = seqlast
;
12359 SET_PREV_INSN (seq
) = PREV_INSN (i
);
12360 SET_NEXT_INSN (seqlast
) = NEXT_INSN (i
);
12362 SET_PREV_INSN (i
) = NULL
;
12363 SET_NEXT_INSN (i
) = NULL
;
12365 /* Recursively split all insns. */
12366 for (i
= seq
; ; i
= NEXT_INSN (i
))
12368 std::pair
<rtx_insn
*, rtx_insn
*> ii
=
12369 sh_try_split_insn_simple (i
, curr_insn
, n
+ 1);
12374 seqlast
= ii
.second
;
12380 return std::make_pair (seq
, seqlast
);
12384 sh_split_treg_set_expr (rtx x
, rtx_insn
* curr_insn
)
12386 if (t_reg_operand (x
, VOIDmode
))
12387 return sh_treg_insns ();
12389 scope_counter
in_treg_set_expr (sh_recog_treg_set_expr_reent_count
);
12391 rtx_insn
* i
= make_insn_raw (gen_rtx_SET (get_t_reg_rtx (), x
));
12392 SET_PREV_INSN (i
) = NULL
;
12393 SET_NEXT_INSN (i
) = NULL
;
12397 fprintf (dump_file
, "split_treg_set_expr insn:\n");
12398 print_rtl (dump_file
, i
);
12399 fprintf (dump_file
, "\n");
12402 /* If the insn is not found, we will try a negated form and append
12404 bool append_nott
= false;
12406 /* We are going to invoke recog/split_insns in a re-entrant way and thus
12407 have to capture its current state and restore it afterwards. */
12408 recog_data_d prev_recog_data
= recog_data
;
12410 if (negt_reg_operand (x
, GET_MODE (x
)))
12412 /* This is a normal movt followed by a nott. It will be converted
12413 into a movrt after initial expansion. */
12414 XEXP (PATTERN (i
), 1) = get_t_reg_rtx ();
12415 append_nott
= true;
12419 /* If the comparison op doesn't have a mode set, set it to SImode. */
12420 if (COMPARISON_P (x
) && GET_MODE (x
) == VOIDmode
)
12421 PUT_MODE (x
, SImode
);
12423 int insn_code
= recog (PATTERN (i
), i
, 0);
12425 if (insn_code
< 0 && COMPARISON_P (x
))
12427 machine_mode cmp_mode
= GET_MODE (XEXP (x
, 0));
12428 if (cmp_mode
== VOIDmode
)
12429 cmp_mode
= GET_MODE (XEXP (x
, 1));
12431 PUT_CODE (x
, reverse_condition (GET_CODE (x
)));
12432 insn_code
= recog (PATTERN (i
), i
, 0);
12433 append_nott
= true;
12436 gcc_assert (insn_code
>= 0);
12439 /* Try to recursively split the insn. Some insns might refuse to split
12440 any further while we are in the treg_set_expr splitting phase. They
12441 will be emitted as part of the outer insn and then split again. */
12442 std::pair
<rtx_insn
*, rtx_insn
*> insnlist
=
12443 sh_try_split_insn_simple (i
, curr_insn
);
12445 /* Restore recog state. */
12446 recog_data
= prev_recog_data
;
12448 rtx_insn
* nott_insn
= sh_is_nott_insn (insnlist
.second
)
12453 fprintf (dump_file
, "split_treg_set_expr insnlist:\n");
12454 print_rtl (dump_file
, insnlist
.first
);
12455 fprintf (dump_file
, "\n");
12457 if (nott_insn
!= NULL
)
12458 fprintf (dump_file
, "trailing nott insn %d\n", INSN_UID (nott_insn
));
12461 emit_insn (insnlist
.first
);
12463 if (nott_insn
!= NULL
&& append_nott
)
12466 fprintf (dump_file
, "removing trailing nott\n");
12467 remove_insn (nott_insn
);
12469 append_nott
= false;
12473 nott_insn
= emit_insn (gen_nott (get_t_reg_rtx ()));
12475 rtx_insn
* first_insn
= get_insns ();
12479 fprintf (dump_file
, "resulting insns:\n");
12480 print_rtl (dump_file
, first_insn
);
12481 fprintf (dump_file
, "\n");
12484 return sh_treg_insns (first_insn
, nott_insn
);
12487 /*------------------------------------------------------------------------------
12488 Mode switching support code.
12492 sh_emit_mode_set (int entity ATTRIBUTE_UNUSED
, int mode
,
12493 int prev_mode
, HARD_REG_SET regs_live ATTRIBUTE_UNUSED
)
12495 if ((TARGET_SH4A_FP
|| TARGET_FPU_SH4_300
)
12496 && prev_mode
!= FP_MODE_NONE
&& prev_mode
!= mode
)
12498 emit_insn (gen_toggle_pr ());
12500 emit_insn (gen_toggle_sz ());
12502 else if (mode
!= FP_MODE_NONE
)
12504 rtx tmp
= gen_reg_rtx (SImode
);
12505 emit_insn (gen_sts_fpscr (tmp
));
12508 const unsigned HOST_WIDE_INT fpbits
=
12509 TARGET_FMOVD
? (FPSCR_PR
| FPSCR_SZ
) : FPSCR_PR
;
12511 if (prev_mode
!= FP_MODE_NONE
&& prev_mode
!= mode
)
12512 i
= gen_xorsi3 (tmp
, tmp
, force_reg (SImode
, GEN_INT (fpbits
)));
12513 else if (mode
== FP_MODE_SINGLE
)
12514 i
= gen_andsi3 (tmp
, tmp
, force_reg (SImode
, GEN_INT (~fpbits
)));
12515 else if (mode
== FP_MODE_DOUBLE
)
12516 i
= gen_iorsi3 (tmp
, tmp
, force_reg (SImode
, GEN_INT (fpbits
)));
12518 gcc_unreachable ();
12521 emit_insn (gen_lds_fpscr (tmp
));
12526 sh_mode_needed (int entity ATTRIBUTE_UNUSED
, rtx_insn
*insn
)
12528 return recog_memoized (insn
) >= 0 ? get_attr_fp_mode (insn
) : FP_MODE_NONE
;
12532 sh_mode_after (int entity ATTRIBUTE_UNUSED
, int mode
, rtx_insn
*insn
)
12534 if (TARGET_HITACHI
&& recog_memoized (insn
) >= 0 &&
12535 get_attr_fp_set (insn
) != FP_SET_NONE
)
12536 return (int) get_attr_fp_set (insn
);
12542 sh_mode_entry (int entity ATTRIBUTE_UNUSED
)
12544 return NORMAL_MODE (entity
);
12548 sh_mode_exit (int entity ATTRIBUTE_UNUSED
)
12550 return sh_cfun_attr_renesas_p () ? FP_MODE_NONE
: NORMAL_MODE (entity
);
12554 sh_mode_priority (int entity ATTRIBUTE_UNUSED
, int n
)
12556 return ((TARGET_FPU_SINGLE
!= 0) ^ (n
) ? FP_MODE_SINGLE
: FP_MODE_DOUBLE
);
12559 /*------------------------------------------------------------------------------
12563 /* Return true if we use LRA instead of reload pass. */
12567 return sh_lra_flag
;
12570 /* Implement TARGET_USE_BY_PIECES_INFRASTRUCTURE_P. */
12573 sh_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size
,
12574 unsigned int align
,
12575 enum by_pieces_operation op
,
12580 case MOVE_BY_PIECES
:
12581 return by_pieces_ninsns (size
, align
, MOVE_MAX_PIECES
+ 1, op
)
12582 < (!speed_p
? 2 : (align
>= 32) ? 16 : 2);
12583 case STORE_BY_PIECES
:
12584 case SET_BY_PIECES
:
12585 return by_pieces_ninsns (size
, align
, STORE_MAX_PIECES
+ 1, op
)
12586 < (!speed_p
? 2 : (align
>= 32) ? 16 : 2);
12588 return default_use_by_pieces_infrastructure_p (size
, align
,
12594 sh_cannot_force_const_mem_p (machine_mode mode ATTRIBUTE_UNUSED
,
12595 rtx x ATTRIBUTE_UNUSED
)
12597 return TARGET_FDPIC
;
12600 /* Emit insns to load the function address from FUNCDESC (an FDPIC
12601 function descriptor) into r1 and the GOT address into r12,
12602 returning an rtx for r1. */
12605 sh_load_function_descriptor (rtx funcdesc
)
12607 rtx r1
= gen_rtx_REG (Pmode
, R1_REG
);
12608 rtx pic_reg
= gen_rtx_REG (Pmode
, PIC_REG
);
12609 rtx fnaddr
= gen_rtx_MEM (Pmode
, funcdesc
);
12610 rtx gotaddr
= gen_rtx_MEM (Pmode
, plus_constant (Pmode
, funcdesc
, 4));
12612 emit_move_insn (r1
, fnaddr
);
12613 /* The ABI requires the entry point address to be loaded first, so
12614 prevent the load from being moved after that of the GOT
12616 emit_insn (gen_blockage ());
12617 emit_move_insn (pic_reg
, gotaddr
);
12621 /* Return an rtx holding the initial value of the FDPIC register (the
12622 FDPIC pointer passed in from the caller). */
12625 sh_get_fdpic_reg_initial_val (void)
12627 return get_hard_reg_initial_val (Pmode
, PIC_REG
);