]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/sh/sh.c
./:
[thirdparty/gcc.git] / gcc / config / sh / sh.c
1 /* Output routines for GCC for Renesas / SuperH SH.
2 Copyright (C) 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
4 Contributed by Steve Chamberlain (sac@cygnus.com).
5 Improved by Jim Wilson (wilson@cygnus.com).
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
13
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "insn-config.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "flags.h"
31 #include "expr.h"
32 #include "optabs.h"
33 #include "function.h"
34 #include "regs.h"
35 #include "hard-reg-set.h"
36 #include "output.h"
37 #include "insn-attr.h"
38 #include "toplev.h"
39 #include "recog.h"
40 #include "c-pragma.h"
41 #include "integrate.h"
42 #include "dwarf2.h"
43 #include "tm_p.h"
44 #include "target.h"
45 #include "target-def.h"
46 #include "real.h"
47 #include "langhooks.h"
48 #include "basic-block.h"
49 #include "df.h"
50 #include "cfglayout.h"
51 #include "intl.h"
52 #include "sched-int.h"
53 #include "ggc.h"
54 #include "tree-gimple.h"
55 #include "cfgloop.h"
56 #include "alloc-pool.h"
57 #include "tm-constrs.h"
58
59
60 int code_for_indirect_jump_scratch = CODE_FOR_indirect_jump_scratch;
61
62 #define MSW (TARGET_LITTLE_ENDIAN ? 1 : 0)
63 #define LSW (TARGET_LITTLE_ENDIAN ? 0 : 1)
64
65 /* These are some macros to abstract register modes. */
66 #define CONST_OK_FOR_ADD(size) \
67 (TARGET_SHMEDIA ? CONST_OK_FOR_I10 (size) : CONST_OK_FOR_I08 (size))
68 #define GEN_MOV (*(TARGET_SHMEDIA64 ? gen_movdi : gen_movsi))
69 #define GEN_ADD3 (*(TARGET_SHMEDIA64 ? gen_adddi3 : gen_addsi3))
70 #define GEN_SUB3 (*(TARGET_SHMEDIA64 ? gen_subdi3 : gen_subsi3))
71
72 /* Set to 1 by expand_prologue() when the function is an interrupt handler. */
73 int current_function_interrupt;
74
75 tree sh_deferred_function_attributes;
76 tree *sh_deferred_function_attributes_tail = &sh_deferred_function_attributes;
77
78 /* Global variables for machine-dependent things. */
79
80 /* Which cpu are we scheduling for. */
81 enum processor_type sh_cpu;
82
83 /* Definitions used in ready queue reordering for first scheduling pass. */
84
85 /* Reg weights arrays for modes SFmode and SImode, indexed by insn LUID. */
86 static short *regmode_weight[2];
87
88 /* Total SFmode and SImode weights of scheduled insns. */
89 static int curr_regmode_pressure[2];
90
91 /* Number of r0 life regions. */
92 static int r0_life_regions;
93
94 /* If true, skip cycles for Q -> R movement. */
95 static int skip_cycles = 0;
96
97 /* Cached value of can_issue_more. This is cached in sh_variable_issue hook
98 and returned from sh_reorder2. */
99 static short cached_can_issue_more;
100
101 /* Saved operands from the last compare to use when we generate an scc
102 or bcc insn. */
103
104 rtx sh_compare_op0;
105 rtx sh_compare_op1;
106
107 /* Provides the class number of the smallest class containing
108 reg number. */
109
110 enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER] =
111 {
112 R0_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
113 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
114 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
115 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
116 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
117 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
118 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
119 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
120 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
121 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
122 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
123 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
124 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
125 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
126 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
127 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
128 FP0_REGS,FP_REGS, FP_REGS, FP_REGS,
129 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
130 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
131 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
132 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
133 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
134 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
135 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
136 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
137 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
138 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
139 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
140 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
141 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
142 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
143 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
144 TARGET_REGS, TARGET_REGS, TARGET_REGS, TARGET_REGS,
145 TARGET_REGS, TARGET_REGS, TARGET_REGS, TARGET_REGS,
146 DF_REGS, DF_REGS, DF_REGS, DF_REGS,
147 DF_REGS, DF_REGS, DF_REGS, DF_REGS,
148 NO_REGS, GENERAL_REGS, PR_REGS, T_REGS,
149 MAC_REGS, MAC_REGS, FPUL_REGS, FPSCR_REGS,
150 GENERAL_REGS, GENERAL_REGS,
151 };
152
153 char sh_register_names[FIRST_PSEUDO_REGISTER] \
154 [MAX_REGISTER_NAME_LENGTH + 1] = SH_REGISTER_NAMES_INITIALIZER;
155
156 char sh_additional_register_names[ADDREGNAMES_SIZE] \
157 [MAX_ADDITIONAL_REGISTER_NAME_LENGTH + 1]
158 = SH_ADDITIONAL_REGISTER_NAMES_INITIALIZER;
159
160 int assembler_dialect;
161
162 static bool shmedia_space_reserved_for_target_registers;
163
164 static bool sh_handle_option (size_t, const char *, int);
165 static void split_branches (rtx);
166 static int branch_dest (rtx);
167 static void force_into (rtx, rtx);
168 static void print_slot (rtx);
169 static rtx add_constant (rtx, enum machine_mode, rtx);
170 static void dump_table (rtx, rtx);
171 static int hi_const (rtx);
172 static int broken_move (rtx);
173 static int mova_p (rtx);
174 static rtx find_barrier (int, rtx, rtx);
175 static int noncall_uses_reg (rtx, rtx, rtx *);
176 static rtx gen_block_redirect (rtx, int, int);
177 static void sh_reorg (void);
178 static void output_stack_adjust (int, rtx, int, HARD_REG_SET *);
179 static rtx frame_insn (rtx);
180 static rtx push (int);
181 static void pop (int);
182 static void push_regs (HARD_REG_SET *, int);
183 static int calc_live_regs (HARD_REG_SET *);
184 static HOST_WIDE_INT rounded_frame_size (int);
185 static rtx mark_constant_pool_use (rtx);
186 const struct attribute_spec sh_attribute_table[];
187 static tree sh_handle_interrupt_handler_attribute (tree *, tree, tree, int, bool *);
188 static tree sh_handle_sp_switch_attribute (tree *, tree, tree, int, bool *);
189 static tree sh_handle_trap_exit_attribute (tree *, tree, tree, int, bool *);
190 static tree sh_handle_renesas_attribute (tree *, tree, tree, int, bool *);
191 static void sh_output_function_epilogue (FILE *, HOST_WIDE_INT);
192 static void sh_insert_attributes (tree, tree *);
193 static const char *sh_check_pch_target_flags (int);
194 static int sh_adjust_cost (rtx, rtx, rtx, int);
195 static int sh_issue_rate (void);
196 static int sh_dfa_new_cycle (FILE *, int, rtx, int, int, int *sort_p);
197 static short find_set_regmode_weight (rtx, enum machine_mode);
198 static short find_insn_regmode_weight (rtx, enum machine_mode);
199 static void find_regmode_weight (basic_block, enum machine_mode);
200 static int find_r0_life_regions (basic_block);
201 static void sh_md_init_global (FILE *, int, int);
202 static void sh_md_finish_global (FILE *, int);
203 static int rank_for_reorder (const void *, const void *);
204 static void swap_reorder (rtx *, int);
205 static void ready_reorder (rtx *, int);
206 static short high_pressure (enum machine_mode);
207 static int sh_reorder (FILE *, int, rtx *, int *, int);
208 static int sh_reorder2 (FILE *, int, rtx *, int *, int);
209 static void sh_md_init (FILE *, int, int);
210 static int sh_variable_issue (FILE *, int, rtx, int);
211
212 static bool sh_function_ok_for_sibcall (tree, tree);
213
214 static bool sh_cannot_modify_jumps_p (void);
215 static int sh_target_reg_class (void);
216 static bool sh_optimize_target_register_callee_saved (bool);
217 static bool sh_ms_bitfield_layout_p (tree);
218
219 static void sh_init_builtins (void);
220 static void sh_media_init_builtins (void);
221 static rtx sh_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
222 static void sh_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT, tree);
223 static void sh_file_start (void);
224 static int flow_dependent_p (rtx, rtx);
225 static void flow_dependent_p_1 (rtx, const_rtx, void *);
226 static int shiftcosts (rtx);
227 static int andcosts (rtx);
228 static int addsubcosts (rtx);
229 static int multcosts (rtx);
230 static bool unspec_caller_rtx_p (rtx);
231 static bool sh_cannot_copy_insn_p (rtx);
232 static bool sh_rtx_costs (rtx, int, int, int *);
233 static int sh_address_cost (rtx);
234 static int sh_pr_n_sets (void);
235 static rtx sh_allocate_initial_value (rtx);
236 static int shmedia_target_regs_stack_space (HARD_REG_SET *);
237 static int shmedia_reserve_space_for_target_registers_p (int, HARD_REG_SET *);
238 static int shmedia_target_regs_stack_adjust (HARD_REG_SET *);
239 static int scavenge_reg (HARD_REG_SET *s);
240 struct save_schedule_s;
241 static struct save_entry_s *sh5_schedule_saves (HARD_REG_SET *,
242 struct save_schedule_s *, int);
243
244 static rtx sh_struct_value_rtx (tree, int);
245 static bool sh_return_in_memory (tree, tree);
246 static rtx sh_builtin_saveregs (void);
247 static void sh_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode, tree, int *, int);
248 static bool sh_strict_argument_naming (CUMULATIVE_ARGS *);
249 static bool sh_pretend_outgoing_varargs_named (CUMULATIVE_ARGS *);
250 static tree sh_build_builtin_va_list (void);
251 static tree sh_gimplify_va_arg_expr (tree, tree, tree *, tree *);
252 static bool sh_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
253 tree, bool);
254 static bool sh_callee_copies (CUMULATIVE_ARGS *, enum machine_mode,
255 tree, bool);
256 static int sh_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
257 tree, bool);
258 static int sh_dwarf_calling_convention (tree);
259
260 \f
261 /* Initialize the GCC target structure. */
262 #undef TARGET_ATTRIBUTE_TABLE
263 #define TARGET_ATTRIBUTE_TABLE sh_attribute_table
264
265 /* The next two are used for debug info when compiling with -gdwarf. */
266 #undef TARGET_ASM_UNALIGNED_HI_OP
267 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uaword\t"
268 #undef TARGET_ASM_UNALIGNED_SI_OP
269 #define TARGET_ASM_UNALIGNED_SI_OP "\t.ualong\t"
270
271 /* These are NULLed out on non-SH5 in OVERRIDE_OPTIONS. */
272 #undef TARGET_ASM_UNALIGNED_DI_OP
273 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaquad\t"
274 #undef TARGET_ASM_ALIGNED_DI_OP
275 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
276
277 #undef TARGET_ASM_FUNCTION_EPILOGUE
278 #define TARGET_ASM_FUNCTION_EPILOGUE sh_output_function_epilogue
279
280 #undef TARGET_ASM_OUTPUT_MI_THUNK
281 #define TARGET_ASM_OUTPUT_MI_THUNK sh_output_mi_thunk
282
283 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
284 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
285
286 #undef TARGET_ASM_FILE_START
287 #define TARGET_ASM_FILE_START sh_file_start
288 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
289 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
290
291 #undef TARGET_DEFAULT_TARGET_FLAGS
292 #define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
293 #undef TARGET_HANDLE_OPTION
294 #define TARGET_HANDLE_OPTION sh_handle_option
295
296 #undef TARGET_INSERT_ATTRIBUTES
297 #define TARGET_INSERT_ATTRIBUTES sh_insert_attributes
298
299 #undef TARGET_SCHED_ADJUST_COST
300 #define TARGET_SCHED_ADJUST_COST sh_adjust_cost
301
302 #undef TARGET_SCHED_ISSUE_RATE
303 #define TARGET_SCHED_ISSUE_RATE sh_issue_rate
304
305 /* The next 5 hooks have been implemented for reenabling sched1. With the
306 help of these macros we are limiting the movement of insns in sched1 to
307 reduce the register pressure. The overall idea is to keep count of SImode
308 and SFmode regs required by already scheduled insns. When these counts
309 cross some threshold values; give priority to insns that free registers.
310 The insn that frees registers is most likely to be the insn with lowest
311 LUID (original insn order); but such an insn might be there in the stalled
312 queue (Q) instead of the ready queue (R). To solve this, we skip cycles
313 upto a max of 8 cycles so that such insns may move from Q -> R.
314
315 The description of the hooks are as below:
316
317 TARGET_SCHED_INIT_GLOBAL: Added a new target hook in the generic
318 scheduler; it is called inside the sched_init function just after
319 find_insn_reg_weights function call. It is used to calculate the SImode
320 and SFmode weights of insns of basic blocks; much similar to what
321 find_insn_reg_weights does.
322 TARGET_SCHED_FINISH_GLOBAL: Corresponding cleanup hook.
323
324 TARGET_SCHED_DFA_NEW_CYCLE: Skip cycles if high register pressure is
325 indicated by TARGET_SCHED_REORDER2; doing this may move insns from
326 (Q)->(R).
327
328 TARGET_SCHED_REORDER: If the register pressure for SImode or SFmode is
329 high; reorder the ready queue so that the insn with lowest LUID will be
330 issued next.
331
332 TARGET_SCHED_REORDER2: If the register pressure is high, indicate to
333 TARGET_SCHED_DFA_NEW_CYCLE to skip cycles.
334
335 TARGET_SCHED_VARIABLE_ISSUE: Cache the value of can_issue_more so that it
336 can be returned from TARGET_SCHED_REORDER2.
337
338 TARGET_SCHED_INIT: Reset the register pressure counting variables. */
339
340 #undef TARGET_SCHED_DFA_NEW_CYCLE
341 #define TARGET_SCHED_DFA_NEW_CYCLE sh_dfa_new_cycle
342
343 #undef TARGET_SCHED_INIT_GLOBAL
344 #define TARGET_SCHED_INIT_GLOBAL sh_md_init_global
345
346 #undef TARGET_SCHED_FINISH_GLOBAL
347 #define TARGET_SCHED_FINISH_GLOBAL sh_md_finish_global
348
349 #undef TARGET_SCHED_VARIABLE_ISSUE
350 #define TARGET_SCHED_VARIABLE_ISSUE sh_variable_issue
351
352 #undef TARGET_SCHED_REORDER
353 #define TARGET_SCHED_REORDER sh_reorder
354
355 #undef TARGET_SCHED_REORDER2
356 #define TARGET_SCHED_REORDER2 sh_reorder2
357
358 #undef TARGET_SCHED_INIT
359 #define TARGET_SCHED_INIT sh_md_init
360
361 #undef TARGET_CANNOT_MODIFY_JUMPS_P
362 #define TARGET_CANNOT_MODIFY_JUMPS_P sh_cannot_modify_jumps_p
363 #undef TARGET_BRANCH_TARGET_REGISTER_CLASS
364 #define TARGET_BRANCH_TARGET_REGISTER_CLASS sh_target_reg_class
365 #undef TARGET_BRANCH_TARGET_REGISTER_CALLEE_SAVED
366 #define TARGET_BRANCH_TARGET_REGISTER_CALLEE_SAVED \
367 sh_optimize_target_register_callee_saved
368
369 #undef TARGET_MS_BITFIELD_LAYOUT_P
370 #define TARGET_MS_BITFIELD_LAYOUT_P sh_ms_bitfield_layout_p
371
372 #undef TARGET_INIT_BUILTINS
373 #define TARGET_INIT_BUILTINS sh_init_builtins
374 #undef TARGET_EXPAND_BUILTIN
375 #define TARGET_EXPAND_BUILTIN sh_expand_builtin
376
377 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
378 #define TARGET_FUNCTION_OK_FOR_SIBCALL sh_function_ok_for_sibcall
379
380 #undef TARGET_CANNOT_COPY_INSN_P
381 #define TARGET_CANNOT_COPY_INSN_P sh_cannot_copy_insn_p
382 #undef TARGET_RTX_COSTS
383 #define TARGET_RTX_COSTS sh_rtx_costs
384 #undef TARGET_ADDRESS_COST
385 #define TARGET_ADDRESS_COST sh_address_cost
386 #undef TARGET_ALLOCATE_INITIAL_VALUE
387 #define TARGET_ALLOCATE_INITIAL_VALUE sh_allocate_initial_value
388
389 #undef TARGET_MACHINE_DEPENDENT_REORG
390 #define TARGET_MACHINE_DEPENDENT_REORG sh_reorg
391
392 #ifdef HAVE_AS_TLS
393 #undef TARGET_HAVE_TLS
394 #define TARGET_HAVE_TLS true
395 #endif
396
397 #undef TARGET_PROMOTE_PROTOTYPES
398 #define TARGET_PROMOTE_PROTOTYPES sh_promote_prototypes
399 #undef TARGET_PROMOTE_FUNCTION_ARGS
400 #define TARGET_PROMOTE_FUNCTION_ARGS sh_promote_prototypes
401 #undef TARGET_PROMOTE_FUNCTION_RETURN
402 #define TARGET_PROMOTE_FUNCTION_RETURN sh_promote_prototypes
403
404 #undef TARGET_STRUCT_VALUE_RTX
405 #define TARGET_STRUCT_VALUE_RTX sh_struct_value_rtx
406 #undef TARGET_RETURN_IN_MEMORY
407 #define TARGET_RETURN_IN_MEMORY sh_return_in_memory
408
409 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
410 #define TARGET_EXPAND_BUILTIN_SAVEREGS sh_builtin_saveregs
411 #undef TARGET_SETUP_INCOMING_VARARGS
412 #define TARGET_SETUP_INCOMING_VARARGS sh_setup_incoming_varargs
413 #undef TARGET_STRICT_ARGUMENT_NAMING
414 #define TARGET_STRICT_ARGUMENT_NAMING sh_strict_argument_naming
415 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
416 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED sh_pretend_outgoing_varargs_named
417 #undef TARGET_MUST_PASS_IN_STACK
418 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
419 #undef TARGET_PASS_BY_REFERENCE
420 #define TARGET_PASS_BY_REFERENCE sh_pass_by_reference
421 #undef TARGET_CALLEE_COPIES
422 #define TARGET_CALLEE_COPIES sh_callee_copies
423 #undef TARGET_ARG_PARTIAL_BYTES
424 #define TARGET_ARG_PARTIAL_BYTES sh_arg_partial_bytes
425
426 #undef TARGET_BUILD_BUILTIN_VA_LIST
427 #define TARGET_BUILD_BUILTIN_VA_LIST sh_build_builtin_va_list
428 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
429 #define TARGET_GIMPLIFY_VA_ARG_EXPR sh_gimplify_va_arg_expr
430
431 #undef TARGET_VECTOR_MODE_SUPPORTED_P
432 #define TARGET_VECTOR_MODE_SUPPORTED_P sh_vector_mode_supported_p
433
434 #undef TARGET_CHECK_PCH_TARGET_FLAGS
435 #define TARGET_CHECK_PCH_TARGET_FLAGS sh_check_pch_target_flags
436
437 #undef TARGET_DWARF_CALLING_CONVENTION
438 #define TARGET_DWARF_CALLING_CONVENTION sh_dwarf_calling_convention
439
440 /* Return regmode weight for insn. */
441 #define INSN_REGMODE_WEIGHT(INSN, MODE) regmode_weight[((MODE) == SImode) ? 0 : 1][INSN_UID (INSN)]
442
443 /* Return current register pressure for regmode. */
444 #define CURR_REGMODE_PRESSURE(MODE) curr_regmode_pressure[((MODE) == SImode) ? 0 : 1]
445
446 #ifdef SYMBIAN
447
448 #undef TARGET_ENCODE_SECTION_INFO
449 #define TARGET_ENCODE_SECTION_INFO sh_symbian_encode_section_info
450 #undef TARGET_STRIP_NAME_ENCODING
451 #define TARGET_STRIP_NAME_ENCODING sh_symbian_strip_name_encoding
452 #undef TARGET_CXX_IMPORT_EXPORT_CLASS
453 #define TARGET_CXX_IMPORT_EXPORT_CLASS symbian_import_export_class
454
455 #endif /* SYMBIAN */
456
457 #undef TARGET_SECONDARY_RELOAD
458 #define TARGET_SECONDARY_RELOAD sh_secondary_reload
459
460 struct gcc_target targetm = TARGET_INITIALIZER;
461 \f
462 /* Implement TARGET_HANDLE_OPTION. */
463
464 static bool
465 sh_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED,
466 int value ATTRIBUTE_UNUSED)
467 {
468 switch (code)
469 {
470 case OPT_m1:
471 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH1;
472 return true;
473
474 case OPT_m2:
475 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2;
476 return true;
477
478 case OPT_m2a:
479 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2A;
480 return true;
481
482 case OPT_m2a_nofpu:
483 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2A_NOFPU;
484 return true;
485
486 case OPT_m2a_single:
487 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2A_SINGLE;
488 return true;
489
490 case OPT_m2a_single_only:
491 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2A_SINGLE_ONLY;
492 return true;
493
494 case OPT_m2e:
495 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2E;
496 return true;
497
498 case OPT_m3:
499 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH3;
500 return true;
501
502 case OPT_m3e:
503 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH3E;
504 return true;
505
506 case OPT_m4:
507 case OPT_m4_100:
508 case OPT_m4_200:
509 case OPT_m4_300:
510 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4;
511 return true;
512
513 case OPT_m4_nofpu:
514 case OPT_m4_100_nofpu:
515 case OPT_m4_200_nofpu:
516 case OPT_m4_300_nofpu:
517 case OPT_m4_340:
518 case OPT_m4_400:
519 case OPT_m4_500:
520 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4_NOFPU;
521 return true;
522
523 case OPT_m4_single:
524 case OPT_m4_100_single:
525 case OPT_m4_200_single:
526 case OPT_m4_300_single:
527 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4_SINGLE;
528 return true;
529
530 case OPT_m4_single_only:
531 case OPT_m4_100_single_only:
532 case OPT_m4_200_single_only:
533 case OPT_m4_300_single_only:
534 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4_SINGLE_ONLY;
535 return true;
536
537 case OPT_m4a:
538 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4A;
539 return true;
540
541 case OPT_m4a_nofpu:
542 case OPT_m4al:
543 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4A_NOFPU;
544 return true;
545
546 case OPT_m4a_single:
547 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4A_SINGLE;
548 return true;
549
550 case OPT_m4a_single_only:
551 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4A_SINGLE_ONLY;
552 return true;
553
554 case OPT_m5_32media:
555 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_32MEDIA;
556 return true;
557
558 case OPT_m5_32media_nofpu:
559 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_32MEDIA_NOFPU;
560 return true;
561
562 case OPT_m5_64media:
563 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_64MEDIA;
564 return true;
565
566 case OPT_m5_64media_nofpu:
567 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_64MEDIA_NOFPU;
568 return true;
569
570 case OPT_m5_compact:
571 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_COMPACT;
572 return true;
573
574 case OPT_m5_compact_nofpu:
575 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_COMPACT_NOFPU;
576 return true;
577
578 default:
579 return true;
580 }
581 }
582 \f
583 /* Print the operand address in x to the stream. */
584
585 void
586 print_operand_address (FILE *stream, rtx x)
587 {
588 switch (GET_CODE (x))
589 {
590 case REG:
591 case SUBREG:
592 fprintf (stream, "@%s", reg_names[true_regnum (x)]);
593 break;
594
595 case PLUS:
596 {
597 rtx base = XEXP (x, 0);
598 rtx index = XEXP (x, 1);
599
600 switch (GET_CODE (index))
601 {
602 case CONST_INT:
603 fprintf (stream, "@(%d,%s)", (int) INTVAL (index),
604 reg_names[true_regnum (base)]);
605 break;
606
607 case REG:
608 case SUBREG:
609 {
610 int base_num = true_regnum (base);
611 int index_num = true_regnum (index);
612
613 fprintf (stream, "@(r0,%s)",
614 reg_names[MAX (base_num, index_num)]);
615 break;
616 }
617
618 default:
619 gcc_unreachable ();
620 }
621 }
622 break;
623
624 case PRE_DEC:
625 fprintf (stream, "@-%s", reg_names[true_regnum (XEXP (x, 0))]);
626 break;
627
628 case POST_INC:
629 fprintf (stream, "@%s+", reg_names[true_regnum (XEXP (x, 0))]);
630 break;
631
632 default:
633 x = mark_constant_pool_use (x);
634 output_addr_const (stream, x);
635 break;
636 }
637 }
638
639 /* Print operand x (an rtx) in assembler syntax to file stream
640 according to modifier code.
641
642 '.' print a .s if insn needs delay slot
643 ',' print LOCAL_LABEL_PREFIX
644 '@' print trap, rte or rts depending upon pragma interruptness
645 '#' output a nop if there is nothing to put in the delay slot
646 ''' print likelihood suffix (/u for unlikely).
647 '>' print branch target if -fverbose-asm
648 'O' print a constant without the #
649 'R' print the LSW of a dp value - changes if in little endian
650 'S' print the MSW of a dp value - changes if in little endian
651 'T' print the next word of a dp value - same as 'R' in big endian mode.
652 'M' SHMEDIA: print an `x' if `m' will print `base,index'.
653 otherwise: print .b / .w / .l / .s / .d suffix if operand is a MEM.
654 'N' print 'r63' if the operand is (const_int 0).
655 'd' print a V2SF reg as dN instead of fpN.
656 'm' print a pair `base,offset' or `base,index', for LD and ST.
657 'U' Likewise for {LD,ST}{HI,LO}.
658 'u' prints the lowest 16 bits of CONST_INT, as an unsigned value.
659 'o' output an operator. */
660
661 void
662 print_operand (FILE *stream, rtx x, int code)
663 {
664 int regno;
665 enum machine_mode mode;
666
667 switch (code)
668 {
669 tree trapa_attr;
670
671 case '.':
672 if (final_sequence
673 && ! INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))
674 && get_attr_length (XVECEXP (final_sequence, 0, 1)))
675 fprintf (stream, ASSEMBLER_DIALECT ? "/s" : ".s");
676 break;
677 case ',':
678 fprintf (stream, "%s", LOCAL_LABEL_PREFIX);
679 break;
680 case '@':
681 trapa_attr = lookup_attribute ("trap_exit",
682 DECL_ATTRIBUTES (current_function_decl));
683 if (trapa_attr)
684 fprintf (stream, "trapa #%ld",
685 (long) TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (trapa_attr))));
686 else if (sh_cfun_interrupt_handler_p ())
687 fprintf (stream, "rte");
688 else
689 fprintf (stream, "rts");
690 break;
691 case '#':
692 /* Output a nop if there's nothing in the delay slot. */
693 if (dbr_sequence_length () == 0)
694 fprintf (stream, "\n\tnop");
695 break;
696 case '\'':
697 {
698 rtx note = find_reg_note (current_output_insn, REG_BR_PROB, 0);
699
700 if (note && INTVAL (XEXP (note, 0)) * 2 < REG_BR_PROB_BASE)
701 fputs ("/u", stream);
702 break;
703 }
704 case '>':
705 if (flag_verbose_asm && JUMP_LABEL (current_output_insn))
706 {
707 fputs ("\t! target: ", stream);
708 output_addr_const (stream, JUMP_LABEL (current_output_insn));
709 }
710 break;
711 case 'O':
712 x = mark_constant_pool_use (x);
713 output_addr_const (stream, x);
714 break;
715 /* N.B.: %R / %S / %T adjust memory addresses by four.
716 For SHMEDIA, that means they can be used to access the first and
717 second 32 bit part of a 64 bit (or larger) value that
718 might be held in floating point registers or memory.
719 While they can be used to access 64 bit parts of a larger value
720 held in general purpose registers, that won't work with memory -
721 neither for fp registers, since the frxx names are used. */
722 case 'R':
723 if (REG_P (x) || GET_CODE (x) == SUBREG)
724 {
725 regno = true_regnum (x);
726 regno += FP_REGISTER_P (regno) ? 1 : LSW;
727 fputs (reg_names[regno], (stream));
728 }
729 else if (MEM_P (x))
730 {
731 x = adjust_address (x, SImode, 4 * LSW);
732 print_operand_address (stream, XEXP (x, 0));
733 }
734 else
735 {
736 rtx sub = NULL_RTX;
737
738 mode = GET_MODE (x);
739 if (mode == VOIDmode)
740 mode = DImode;
741 if (GET_MODE_SIZE (mode) >= 8)
742 sub = simplify_subreg (SImode, x, mode, 4 * LSW);
743 if (sub)
744 print_operand (stream, sub, 0);
745 else
746 output_operand_lossage ("invalid operand to %%R");
747 }
748 break;
749 case 'S':
750 if (REG_P (x) || GET_CODE (x) == SUBREG)
751 {
752 regno = true_regnum (x);
753 regno += FP_REGISTER_P (regno) ? 0 : MSW;
754 fputs (reg_names[regno], (stream));
755 }
756 else if (MEM_P (x))
757 {
758 x = adjust_address (x, SImode, 4 * MSW);
759 print_operand_address (stream, XEXP (x, 0));
760 }
761 else
762 {
763 rtx sub = NULL_RTX;
764
765 mode = GET_MODE (x);
766 if (mode == VOIDmode)
767 mode = DImode;
768 if (GET_MODE_SIZE (mode) >= 8)
769 sub = simplify_subreg (SImode, x, mode, 4 * MSW);
770 if (sub)
771 print_operand (stream, sub, 0);
772 else
773 output_operand_lossage ("invalid operand to %%S");
774 }
775 break;
776 case 'T':
777 /* Next word of a double. */
778 switch (GET_CODE (x))
779 {
780 case REG:
781 fputs (reg_names[REGNO (x) + 1], (stream));
782 break;
783 case MEM:
784 if (GET_CODE (XEXP (x, 0)) != PRE_DEC
785 && GET_CODE (XEXP (x, 0)) != POST_INC)
786 x = adjust_address (x, SImode, 4);
787 print_operand_address (stream, XEXP (x, 0));
788 break;
789 default:
790 break;
791 }
792 break;
793 case 'o':
794 switch (GET_CODE (x))
795 {
796 case PLUS: fputs ("add", stream); break;
797 case MINUS: fputs ("sub", stream); break;
798 case MULT: fputs ("mul", stream); break;
799 case DIV: fputs ("div", stream); break;
800 case EQ: fputs ("eq", stream); break;
801 case NE: fputs ("ne", stream); break;
802 case GT: case LT: fputs ("gt", stream); break;
803 case GE: case LE: fputs ("ge", stream); break;
804 case GTU: case LTU: fputs ("gtu", stream); break;
805 case GEU: case LEU: fputs ("geu", stream); break;
806 default:
807 break;
808 }
809 break;
810 case 'M':
811 if (TARGET_SHMEDIA)
812 {
813 if (GET_CODE (x) == MEM
814 && GET_CODE (XEXP (x, 0)) == PLUS
815 && (GET_CODE (XEXP (XEXP (x, 0), 1)) == REG
816 || GET_CODE (XEXP (XEXP (x, 0), 1)) == SUBREG))
817 fputc ('x', stream);
818 }
819 else
820 {
821 if (GET_CODE (x) == MEM)
822 {
823 switch (GET_MODE (x))
824 {
825 case QImode: fputs (".b", stream); break;
826 case HImode: fputs (".w", stream); break;
827 case SImode: fputs (".l", stream); break;
828 case SFmode: fputs (".s", stream); break;
829 case DFmode: fputs (".d", stream); break;
830 default: gcc_unreachable ();
831 }
832 }
833 }
834 break;
835
836 case 'm':
837 gcc_assert (GET_CODE (x) == MEM);
838 x = XEXP (x, 0);
839 /* Fall through. */
840 case 'U':
841 switch (GET_CODE (x))
842 {
843 case REG:
844 case SUBREG:
845 print_operand (stream, x, 0);
846 fputs (", 0", stream);
847 break;
848
849 case PLUS:
850 print_operand (stream, XEXP (x, 0), 0);
851 fputs (", ", stream);
852 print_operand (stream, XEXP (x, 1), 0);
853 break;
854
855 default:
856 gcc_unreachable ();
857 }
858 break;
859
860 case 'd':
861 gcc_assert (GET_CODE (x) == REG && GET_MODE (x) == V2SFmode);
862
863 fprintf ((stream), "d%s", reg_names[REGNO (x)] + 1);
864 break;
865
866 case 'N':
867 if (x == CONST0_RTX (GET_MODE (x)))
868 {
869 fprintf ((stream), "r63");
870 break;
871 }
872 goto default_output;
873 case 'u':
874 if (GET_CODE (x) == CONST_INT)
875 {
876 fprintf ((stream), "%u", (unsigned) INTVAL (x) & (0x10000 - 1));
877 break;
878 }
879 /* Fall through. */
880
881 default_output:
882 default:
883 regno = 0;
884 mode = GET_MODE (x);
885
886 switch (GET_CODE (x))
887 {
888 case TRUNCATE:
889 {
890 rtx inner = XEXP (x, 0);
891 int offset = 0;
892 enum machine_mode inner_mode;
893
894 /* We might see SUBREGs with vector mode registers inside. */
895 if (GET_CODE (inner) == SUBREG
896 && (GET_MODE_SIZE (GET_MODE (inner))
897 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
898 && subreg_lowpart_p (inner))
899 inner = SUBREG_REG (inner);
900 if (GET_CODE (inner) == CONST_INT)
901 {
902 x = GEN_INT (trunc_int_for_mode (INTVAL (inner), GET_MODE (x)));
903 goto default_output;
904 }
905 inner_mode = GET_MODE (inner);
906 if (GET_CODE (inner) == SUBREG
907 && (GET_MODE_SIZE (GET_MODE (inner))
908 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
909 && GET_CODE (SUBREG_REG (inner)) == REG)
910 {
911 offset = subreg_regno_offset (REGNO (SUBREG_REG (inner)),
912 GET_MODE (SUBREG_REG (inner)),
913 SUBREG_BYTE (inner),
914 GET_MODE (inner));
915 inner = SUBREG_REG (inner);
916 }
917 if (GET_CODE (inner) != REG || GET_MODE_SIZE (inner_mode) > 8)
918 abort ();
919 /* Floating point register pairs are always big endian;
920 general purpose registers are 64 bit wide. */
921 regno = REGNO (inner);
922 regno = (HARD_REGNO_NREGS (regno, inner_mode)
923 - HARD_REGNO_NREGS (regno, mode))
924 + offset;
925 x = inner;
926 goto reg;
927 }
928 case SIGN_EXTEND:
929 x = XEXP (x, 0);
930 goto reg;
931 /* FIXME: We need this on SHmedia32 because reload generates
932 some sign-extended HI or QI loads into DImode registers
933 but, because Pmode is SImode, the address ends up with a
934 subreg:SI of the DImode register. Maybe reload should be
935 fixed so as to apply alter_subreg to such loads? */
936 case IF_THEN_ELSE:
937 gcc_assert (trapping_target_operand (x, VOIDmode));
938 x = XEXP (XEXP (x, 2), 0);
939 goto default_output;
940 case SUBREG:
941 gcc_assert (SUBREG_BYTE (x) == 0
942 && GET_CODE (SUBREG_REG (x)) == REG);
943
944 x = SUBREG_REG (x);
945 /* Fall through. */
946
947 reg:
948 case REG:
949 regno += REGNO (x);
950 if (FP_REGISTER_P (regno)
951 && mode == V16SFmode)
952 fprintf ((stream), "mtrx%s", reg_names[regno] + 2);
953 else if (FP_REGISTER_P (REGNO (x))
954 && mode == V4SFmode)
955 fprintf ((stream), "fv%s", reg_names[regno] + 2);
956 else if (GET_CODE (x) == REG
957 && mode == V2SFmode)
958 fprintf ((stream), "fp%s", reg_names[regno] + 2);
959 else if (FP_REGISTER_P (REGNO (x))
960 && GET_MODE_SIZE (mode) > 4)
961 fprintf ((stream), "d%s", reg_names[regno] + 1);
962 else
963 fputs (reg_names[regno], (stream));
964 break;
965
966 case MEM:
967 output_address (XEXP (x, 0));
968 break;
969
970 case CONST:
971 if (TARGET_SHMEDIA
972 && (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
973 || GET_CODE (XEXP (x, 0)) == ZERO_EXTEND)
974 && (GET_MODE (XEXP (x, 0)) == DImode
975 || GET_MODE (XEXP (x, 0)) == SImode)
976 && GET_CODE (XEXP (XEXP (x, 0), 0)) == TRUNCATE
977 && GET_MODE (XEXP (XEXP (x, 0), 0)) == HImode)
978 {
979 rtx val = XEXP (XEXP (XEXP (x, 0), 0), 0);
980 rtx val2 = val;
981 bool nested_expr = false;
982
983 fputc ('(', stream);
984 if (GET_CODE (val) == ASHIFTRT)
985 {
986 fputc ('(', stream);
987 val2 = XEXP (val, 0);
988 }
989 if (GET_CODE (val2) == CONST
990 || GET_RTX_CLASS (GET_CODE (val2)) != RTX_OBJ)
991 {
992 fputc ('(', stream);
993 nested_expr = true;
994 }
995 output_addr_const (stream, val2);
996 if (nested_expr)
997 fputc (')', stream);
998 if (GET_CODE (val) == ASHIFTRT)
999 {
1000 fputs (" >> ", stream);
1001 output_addr_const (stream, XEXP (val, 1));
1002 fputc (')', stream);
1003 }
1004 fputs (" & 65535)", stream);
1005 break;
1006 }
1007
1008 /* Fall through. */
1009 default:
1010 if (TARGET_SH1)
1011 fputc ('#', stream);
1012 output_addr_const (stream, x);
1013 break;
1014 }
1015 break;
1016 }
1017 }
1018 \f
1019 /* Like force_operand, but guarantees that VALUE ends up in TARGET. */
1020 static void
1021 force_into (rtx value, rtx target)
1022 {
1023 value = force_operand (value, target);
1024 if (! rtx_equal_p (value, target))
1025 emit_insn (gen_move_insn (target, value));
1026 }
1027
1028 /* Emit code to perform a block move. Choose the best method.
1029
1030 OPERANDS[0] is the destination.
1031 OPERANDS[1] is the source.
1032 OPERANDS[2] is the size.
1033 OPERANDS[3] is the alignment safe to use. */
1034
1035 int
1036 expand_block_move (rtx *operands)
1037 {
1038 int align = INTVAL (operands[3]);
1039 int constp = (GET_CODE (operands[2]) == CONST_INT);
1040 int bytes = (constp ? INTVAL (operands[2]) : 0);
1041
1042 if (! constp)
1043 return 0;
1044
1045 /* If we could use mov.l to move words and dest is word-aligned, we
1046 can use movua.l for loads and still generate a relatively short
1047 and efficient sequence. */
1048 if (TARGET_SH4A_ARCH && align < 4
1049 && MEM_ALIGN (operands[0]) >= 32
1050 && can_move_by_pieces (bytes, 32))
1051 {
1052 rtx dest = copy_rtx (operands[0]);
1053 rtx src = copy_rtx (operands[1]);
1054 /* We could use different pseudos for each copied word, but
1055 since movua can only load into r0, it's kind of
1056 pointless. */
1057 rtx temp = gen_reg_rtx (SImode);
1058 rtx src_addr = copy_addr_to_reg (XEXP (src, 0));
1059 int copied = 0;
1060
1061 while (copied + 4 <= bytes)
1062 {
1063 rtx to = adjust_address (dest, SImode, copied);
1064 rtx from = adjust_automodify_address (src, BLKmode,
1065 src_addr, copied);
1066
1067 set_mem_size (from, GEN_INT (4));
1068 emit_insn (gen_movua (temp, from));
1069 emit_move_insn (src_addr, plus_constant (src_addr, 4));
1070 emit_move_insn (to, temp);
1071 copied += 4;
1072 }
1073
1074 if (copied < bytes)
1075 move_by_pieces (adjust_address (dest, BLKmode, copied),
1076 adjust_automodify_address (src, BLKmode,
1077 src_addr, copied),
1078 bytes - copied, align, 0);
1079
1080 return 1;
1081 }
1082
1083 /* If it isn't a constant number of bytes, or if it doesn't have 4 byte
1084 alignment, or if it isn't a multiple of 4 bytes, then fail. */
1085 if (align < 4 || (bytes % 4 != 0))
1086 return 0;
1087
1088 if (TARGET_HARD_SH4)
1089 {
1090 if (bytes < 12)
1091 return 0;
1092 else if (bytes == 12)
1093 {
1094 rtx func_addr_rtx = gen_reg_rtx (Pmode);
1095 rtx r4 = gen_rtx_REG (SImode, 4);
1096 rtx r5 = gen_rtx_REG (SImode, 5);
1097
1098 function_symbol (func_addr_rtx, "__movmemSI12_i4", SFUNC_STATIC);
1099 force_into (XEXP (operands[0], 0), r4);
1100 force_into (XEXP (operands[1], 0), r5);
1101 emit_insn (gen_block_move_real_i4 (func_addr_rtx));
1102 return 1;
1103 }
1104 else if (! TARGET_SMALLCODE)
1105 {
1106 const char *entry_name;
1107 rtx func_addr_rtx = gen_reg_rtx (Pmode);
1108 int dwords;
1109 rtx r4 = gen_rtx_REG (SImode, 4);
1110 rtx r5 = gen_rtx_REG (SImode, 5);
1111 rtx r6 = gen_rtx_REG (SImode, 6);
1112
1113 entry_name = (bytes & 4 ? "__movmem_i4_odd" : "__movmem_i4_even");
1114 function_symbol (func_addr_rtx, entry_name, SFUNC_STATIC);
1115 force_into (XEXP (operands[0], 0), r4);
1116 force_into (XEXP (operands[1], 0), r5);
1117
1118 dwords = bytes >> 3;
1119 emit_insn (gen_move_insn (r6, GEN_INT (dwords - 1)));
1120 emit_insn (gen_block_lump_real_i4 (func_addr_rtx));
1121 return 1;
1122 }
1123 else
1124 return 0;
1125 }
1126 if (bytes < 64)
1127 {
1128 char entry[30];
1129 rtx func_addr_rtx = gen_reg_rtx (Pmode);
1130 rtx r4 = gen_rtx_REG (SImode, 4);
1131 rtx r5 = gen_rtx_REG (SImode, 5);
1132
1133 sprintf (entry, "__movmemSI%d", bytes);
1134 function_symbol (func_addr_rtx, entry, SFUNC_STATIC);
1135 force_into (XEXP (operands[0], 0), r4);
1136 force_into (XEXP (operands[1], 0), r5);
1137 emit_insn (gen_block_move_real (func_addr_rtx));
1138 return 1;
1139 }
1140
1141 /* This is the same number of bytes as a memcpy call, but to a different
1142 less common function name, so this will occasionally use more space. */
1143 if (! TARGET_SMALLCODE)
1144 {
1145 rtx func_addr_rtx = gen_reg_rtx (Pmode);
1146 int final_switch, while_loop;
1147 rtx r4 = gen_rtx_REG (SImode, 4);
1148 rtx r5 = gen_rtx_REG (SImode, 5);
1149 rtx r6 = gen_rtx_REG (SImode, 6);
1150
1151 function_symbol (func_addr_rtx, "__movmem", SFUNC_STATIC);
1152 force_into (XEXP (operands[0], 0), r4);
1153 force_into (XEXP (operands[1], 0), r5);
1154
1155 /* r6 controls the size of the move. 16 is decremented from it
1156 for each 64 bytes moved. Then the negative bit left over is used
1157 as an index into a list of move instructions. e.g., a 72 byte move
1158 would be set up with size(r6) = 14, for one iteration through the
1159 big while loop, and a switch of -2 for the last part. */
1160
1161 final_switch = 16 - ((bytes / 4) % 16);
1162 while_loop = ((bytes / 4) / 16 - 1) * 16;
1163 emit_insn (gen_move_insn (r6, GEN_INT (while_loop + final_switch)));
1164 emit_insn (gen_block_lump_real (func_addr_rtx));
1165 return 1;
1166 }
1167
1168 return 0;
1169 }
1170
1171 /* Prepare operands for a move define_expand; specifically, one of the
1172 operands must be in a register. */
1173
1174 int
1175 prepare_move_operands (rtx operands[], enum machine_mode mode)
1176 {
1177 if ((mode == SImode || mode == DImode)
1178 && flag_pic
1179 && ! ((mode == Pmode || mode == ptr_mode)
1180 && tls_symbolic_operand (operands[1], Pmode) != 0))
1181 {
1182 rtx temp;
1183 if (SYMBOLIC_CONST_P (operands[1]))
1184 {
1185 if (GET_CODE (operands[0]) == MEM)
1186 operands[1] = force_reg (Pmode, operands[1]);
1187 else if (TARGET_SHMEDIA
1188 && GET_CODE (operands[1]) == LABEL_REF
1189 && target_reg_operand (operands[0], mode))
1190 /* It's ok. */;
1191 else
1192 {
1193 temp = (!can_create_pseudo_p ()
1194 ? operands[0]
1195 : gen_reg_rtx (Pmode));
1196 operands[1] = legitimize_pic_address (operands[1], mode, temp);
1197 }
1198 }
1199 else if (GET_CODE (operands[1]) == CONST
1200 && GET_CODE (XEXP (operands[1], 0)) == PLUS
1201 && SYMBOLIC_CONST_P (XEXP (XEXP (operands[1], 0), 0)))
1202 {
1203 temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
1204 temp = legitimize_pic_address (XEXP (XEXP (operands[1], 0), 0),
1205 mode, temp);
1206 operands[1] = expand_binop (mode, add_optab, temp,
1207 XEXP (XEXP (operands[1], 0), 1),
1208 (!can_create_pseudo_p ()
1209 ? temp
1210 : gen_reg_rtx (Pmode)),
1211 0, OPTAB_LIB_WIDEN);
1212 }
1213 }
1214
1215 if (! reload_in_progress && ! reload_completed)
1216 {
1217 /* Copy the source to a register if both operands aren't registers. */
1218 if (! register_operand (operands[0], mode)
1219 && ! sh_register_operand (operands[1], mode))
1220 operands[1] = copy_to_mode_reg (mode, operands[1]);
1221
1222 if (GET_CODE (operands[0]) == MEM && ! memory_operand (operands[0], mode))
1223 {
1224 /* This is like change_address_1 (operands[0], mode, 0, 1) ,
1225 except that we can't use that function because it is static. */
1226 rtx new = change_address (operands[0], mode, 0);
1227 MEM_COPY_ATTRIBUTES (new, operands[0]);
1228 operands[0] = new;
1229 }
1230
1231 /* This case can happen while generating code to move the result
1232 of a library call to the target. Reject `st r0,@(rX,rY)' because
1233 reload will fail to find a spill register for rX, since r0 is already
1234 being used for the source. */
1235 else if (TARGET_SH1
1236 && refers_to_regno_p (R0_REG, R0_REG + 1, operands[1], (rtx *)0)
1237 && GET_CODE (operands[0]) == MEM
1238 && GET_CODE (XEXP (operands[0], 0)) == PLUS
1239 && GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == REG)
1240 operands[1] = copy_to_mode_reg (mode, operands[1]);
1241 }
1242
1243 if (mode == Pmode || mode == ptr_mode)
1244 {
1245 rtx op0, op1, opc;
1246 enum tls_model tls_kind;
1247
1248 op0 = operands[0];
1249 op1 = operands[1];
1250 if (GET_CODE (op1) == CONST
1251 && GET_CODE (XEXP (op1, 0)) == PLUS
1252 && tls_symbolic_operand (XEXP (XEXP (op1, 0), 0), Pmode))
1253 {
1254 opc = XEXP (XEXP (op1, 0), 1);
1255 op1 = XEXP (XEXP (op1, 0), 0);
1256 }
1257 else
1258 opc = NULL_RTX;
1259
1260 if ((tls_kind = tls_symbolic_operand (op1, Pmode)))
1261 {
1262 rtx tga_op1, tga_ret, tmp, tmp2;
1263
1264 switch (tls_kind)
1265 {
1266 case TLS_MODEL_GLOBAL_DYNAMIC:
1267 tga_ret = gen_rtx_REG (Pmode, R0_REG);
1268 emit_call_insn (gen_tls_global_dynamic (tga_ret, op1));
1269 op1 = tga_ret;
1270 break;
1271
1272 case TLS_MODEL_LOCAL_DYNAMIC:
1273 tga_ret = gen_rtx_REG (Pmode, R0_REG);
1274 emit_call_insn (gen_tls_local_dynamic (tga_ret, op1));
1275
1276 tmp = gen_reg_rtx (Pmode);
1277 emit_move_insn (tmp, tga_ret);
1278
1279 if (register_operand (op0, Pmode))
1280 tmp2 = op0;
1281 else
1282 tmp2 = gen_reg_rtx (Pmode);
1283
1284 emit_insn (gen_symDTPOFF2reg (tmp2, op1, tmp));
1285 op1 = tmp2;
1286 break;
1287
1288 case TLS_MODEL_INITIAL_EXEC:
1289 if (! flag_pic)
1290 {
1291 /* Don't schedule insns for getting GOT address when
1292 the first scheduling is enabled, to avoid spill
1293 failures for R0. */
1294 if (flag_schedule_insns)
1295 emit_insn (gen_blockage ());
1296 emit_insn (gen_GOTaddr2picreg ());
1297 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode,
1298 PIC_REG)));
1299 if (flag_schedule_insns)
1300 emit_insn (gen_blockage ());
1301 }
1302 tga_op1 = !can_create_pseudo_p () ? op0 : gen_reg_rtx (Pmode);
1303 tmp = gen_sym2GOTTPOFF (op1);
1304 emit_insn (gen_tls_initial_exec (tga_op1, tmp));
1305 op1 = tga_op1;
1306 break;
1307
1308 case TLS_MODEL_LOCAL_EXEC:
1309 tmp2 = gen_reg_rtx (Pmode);
1310 emit_insn (gen_load_gbr (tmp2));
1311 tmp = gen_reg_rtx (Pmode);
1312 emit_insn (gen_symTPOFF2reg (tmp, op1));
1313
1314 if (register_operand (op0, Pmode))
1315 op1 = op0;
1316 else
1317 op1 = gen_reg_rtx (Pmode);
1318
1319 emit_insn (gen_addsi3 (op1, tmp, tmp2));
1320 break;
1321
1322 default:
1323 gcc_unreachable ();
1324 }
1325 if (opc)
1326 emit_insn (gen_addsi3 (op1, op1, force_reg (SImode, opc)));
1327 operands[1] = op1;
1328 }
1329 }
1330
1331 return 0;
1332 }
1333
1334 enum rtx_code
1335 prepare_cbranch_operands (rtx *operands, enum machine_mode mode,
1336 enum rtx_code comparison)
1337 {
1338 rtx op1;
1339 rtx scratch = NULL_RTX;
1340
1341 if (comparison == CODE_FOR_nothing)
1342 comparison = GET_CODE (operands[0]);
1343 else
1344 scratch = operands[4];
1345 if (GET_CODE (operands[1]) == CONST_INT
1346 && GET_CODE (operands[2]) != CONST_INT)
1347 {
1348 rtx tmp = operands[1];
1349
1350 operands[1] = operands[2];
1351 operands[2] = tmp;
1352 comparison = swap_condition (comparison);
1353 }
1354 if (GET_CODE (operands[2]) == CONST_INT)
1355 {
1356 HOST_WIDE_INT val = INTVAL (operands[2]);
1357 if ((val == -1 || val == -0x81)
1358 && (comparison == GT || comparison == LE))
1359 {
1360 comparison = (comparison == GT) ? GE : LT;
1361 operands[2] = gen_int_mode (val + 1, mode);
1362 }
1363 else if ((val == 1 || val == 0x80)
1364 && (comparison == GE || comparison == LT))
1365 {
1366 comparison = (comparison == GE) ? GT : LE;
1367 operands[2] = gen_int_mode (val - 1, mode);
1368 }
1369 else if (val == 1 && (comparison == GEU || comparison == LTU))
1370 {
1371 comparison = (comparison == GEU) ? NE : EQ;
1372 operands[2] = CONST0_RTX (mode);
1373 }
1374 else if (val == 0x80 && (comparison == GEU || comparison == LTU))
1375 {
1376 comparison = (comparison == GEU) ? GTU : LEU;
1377 operands[2] = gen_int_mode (val - 1, mode);
1378 }
1379 else if (val == 0 && (comparison == GTU || comparison == LEU))
1380 comparison = (comparison == GTU) ? NE : EQ;
1381 else if (mode == SImode
1382 && ((val == 0x7fffffff
1383 && (comparison == GTU || comparison == LEU))
1384 || ((unsigned HOST_WIDE_INT) val
1385 == (unsigned HOST_WIDE_INT) 0x7fffffff + 1
1386 && (comparison == GEU || comparison == LTU))))
1387 {
1388 comparison = (comparison == GTU || comparison == GEU) ? LT : GE;
1389 operands[2] = CONST0_RTX (mode);
1390 }
1391 }
1392 op1 = operands[1];
1393 if (can_create_pseudo_p ())
1394 operands[1] = force_reg (mode, op1);
1395 /* When we are handling DImode comparisons, we want to keep constants so
1396 that we can optimize the component comparisons; however, memory loads
1397 are better issued as a whole so that they can be scheduled well.
1398 SImode equality comparisons allow I08 constants, but only when they
1399 compare r0. Hence, if operands[1] has to be loaded from somewhere else
1400 into a register, that register might as well be r0, and we allow the
1401 constant. If it is already in a register, this is likely to be
1402 allocated to a different hard register, thus we load the constant into
1403 a register unless it is zero. */
1404 if (!REG_P (operands[2])
1405 && (GET_CODE (operands[2]) != CONST_INT
1406 || (mode == SImode && operands[2] != CONST0_RTX (SImode)
1407 && ((comparison != EQ && comparison != NE)
1408 || (REG_P (op1) && REGNO (op1) != R0_REG)
1409 || !satisfies_constraint_I08 (operands[2])))))
1410 {
1411 if (scratch && GET_MODE (scratch) == mode)
1412 {
1413 emit_move_insn (scratch, operands[2]);
1414 operands[2] = scratch;
1415 }
1416 else if (can_create_pseudo_p ())
1417 operands[2] = force_reg (mode, operands[2]);
1418 }
1419 return comparison;
1420 }
1421
1422 void
1423 expand_cbranchsi4 (rtx *operands, enum rtx_code comparison, int probability)
1424 {
1425 rtx (*branch_expander) (rtx) = gen_branch_true;
1426 rtx jump;
1427
1428 comparison = prepare_cbranch_operands (operands, SImode, comparison);
1429 switch (comparison)
1430 {
1431 case NE: case LT: case LE: case LTU: case LEU:
1432 comparison = reverse_condition (comparison);
1433 branch_expander = gen_branch_false;
1434 default: ;
1435 }
1436 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, T_REG),
1437 gen_rtx_fmt_ee (comparison, SImode,
1438 operands[1], operands[2])));
1439 jump = emit_jump_insn (branch_expander (operands[3]));
1440 if (probability >= 0)
1441 REG_NOTES (jump)
1442 = gen_rtx_EXPR_LIST (REG_BR_PROB, GEN_INT (probability),
1443 REG_NOTES (jump));
1444
1445 }
1446
1447 /* ??? How should we distribute probabilities when more than one branch
1448 is generated. So far we only have soem ad-hoc observations:
1449 - If the operands are random, they are likely to differ in both parts.
1450 - If comparing items in a hash chain, the operands are random or equal;
1451 operation should be EQ or NE.
1452 - If items are searched in an ordered tree from the root, we can expect
1453 the highpart to be unequal about half of the time; operation should be
1454 an inequality comparison, operands non-constant, and overall probability
1455 about 50%. Likewise for quicksort.
1456 - Range checks will be often made against constants. Even if we assume for
1457 simplicity an even distribution of the non-constant operand over a
1458 sub-range here, the same probability could be generated with differently
1459 wide sub-ranges - as long as the ratio of the part of the subrange that
1460 is before the threshold to the part that comes after the threshold stays
1461 the same. Thus, we can't really tell anything here;
1462 assuming random distribution is at least simple.
1463 */
1464
1465 bool
1466 expand_cbranchdi4 (rtx *operands, enum rtx_code comparison)
1467 {
1468 enum rtx_code msw_taken, msw_skip, lsw_taken;
1469 rtx skip_label = NULL_RTX;
1470 rtx op1h, op1l, op2h, op2l;
1471 int num_branches;
1472 int prob, rev_prob;
1473 int msw_taken_prob = -1, msw_skip_prob = -1, lsw_taken_prob = -1;
1474 rtx scratch = operands[4];
1475
1476 comparison = prepare_cbranch_operands (operands, DImode, comparison);
1477 op1h = gen_highpart_mode (SImode, DImode, operands[1]);
1478 op2h = gen_highpart_mode (SImode, DImode, operands[2]);
1479 op1l = gen_lowpart (SImode, operands[1]);
1480 op2l = gen_lowpart (SImode, operands[2]);
1481 msw_taken = msw_skip = lsw_taken = CODE_FOR_nothing;
1482 prob = split_branch_probability;
1483 rev_prob = REG_BR_PROB_BASE - prob;
1484 switch (comparison)
1485 {
1486 /* ??? Should we use the cmpeqdi_t pattern for equality comparisons?
1487 That costs 1 cycle more when the first branch can be predicted taken,
1488 but saves us mispredicts because only one branch needs prediction.
1489 It also enables generating the cmpeqdi_t-1 pattern. */
1490 case EQ:
1491 if (TARGET_CMPEQDI_T)
1492 {
1493 emit_insn (gen_cmpeqdi_t (operands[1], operands[2]));
1494 emit_jump_insn (gen_branch_true (operands[3]));
1495 return true;
1496 }
1497 msw_skip = NE;
1498 lsw_taken = EQ;
1499 if (prob >= 0)
1500 {
1501 /* If we had more precision, we'd use rev_prob - (rev_prob >> 32) .
1502 */
1503 msw_skip_prob = rev_prob;
1504 if (REG_BR_PROB_BASE <= 65535)
1505 lsw_taken_prob = prob ? REG_BR_PROB_BASE : 0;
1506 else
1507 {
1508 gcc_assert (HOST_BITS_PER_WIDEST_INT >= 64);
1509 lsw_taken_prob
1510 = (prob
1511 ? (REG_BR_PROB_BASE
1512 - ((HOST_WIDEST_INT) REG_BR_PROB_BASE * rev_prob
1513 / ((HOST_WIDEST_INT) prob << 32)))
1514 : 0);
1515 }
1516 }
1517 break;
1518 case NE:
1519 if (TARGET_CMPEQDI_T)
1520 {
1521 emit_insn (gen_cmpeqdi_t (operands[1], operands[2]));
1522 emit_jump_insn (gen_branch_false (operands[3]));
1523 return true;
1524 }
1525 msw_taken = NE;
1526 msw_taken_prob = prob;
1527 lsw_taken = NE;
1528 lsw_taken_prob = 0;
1529 break;
1530 case GTU: case GT:
1531 msw_taken = comparison;
1532 if (GET_CODE (op2l) == CONST_INT && INTVAL (op2l) == -1)
1533 break;
1534 if (comparison != GTU || op2h != CONST0_RTX (SImode))
1535 msw_skip = swap_condition (msw_taken);
1536 lsw_taken = GTU;
1537 break;
1538 case GEU: case GE:
1539 if (op2l == CONST0_RTX (SImode))
1540 msw_taken = comparison;
1541 else
1542 {
1543 msw_taken = comparison == GE ? GT : GTU;
1544 msw_skip = swap_condition (msw_taken);
1545 lsw_taken = GEU;
1546 }
1547 break;
1548 case LTU: case LT:
1549 msw_taken = comparison;
1550 if (op2l == CONST0_RTX (SImode))
1551 break;
1552 msw_skip = swap_condition (msw_taken);
1553 lsw_taken = LTU;
1554 break;
1555 case LEU: case LE:
1556 if (GET_CODE (op2l) == CONST_INT && INTVAL (op2l) == -1)
1557 msw_taken = comparison;
1558 else
1559 {
1560 lsw_taken = LEU;
1561 if (comparison == LE)
1562 msw_taken = LT;
1563 else if (op2h != CONST0_RTX (SImode))
1564 msw_taken = LTU;
1565 else
1566 break;
1567 msw_skip = swap_condition (msw_taken);
1568 }
1569 break;
1570 default: return false;
1571 }
1572 num_branches = ((msw_taken != CODE_FOR_nothing)
1573 + (msw_skip != CODE_FOR_nothing)
1574 + (lsw_taken != CODE_FOR_nothing));
1575 if (comparison != EQ && comparison != NE && num_branches > 1)
1576 {
1577 if (!CONSTANT_P (operands[2])
1578 && prob >= (int) (REG_BR_PROB_BASE * 3 / 8U)
1579 && prob <= (int) (REG_BR_PROB_BASE * 5 / 8U))
1580 {
1581 msw_taken_prob = prob / 2U;
1582 msw_skip_prob
1583 = REG_BR_PROB_BASE * rev_prob / (REG_BR_PROB_BASE + rev_prob);
1584 lsw_taken_prob = prob;
1585 }
1586 else
1587 {
1588 msw_taken_prob = prob;
1589 msw_skip_prob = REG_BR_PROB_BASE;
1590 /* ??? If we have a constant op2h, should we use that when
1591 calculating lsw_taken_prob? */
1592 lsw_taken_prob = prob;
1593 }
1594 }
1595 operands[1] = op1h;
1596 operands[2] = op2h;
1597 operands[4] = NULL_RTX;
1598 if (reload_completed
1599 && ! arith_reg_or_0_operand (op2h, SImode) && true_regnum (op1h)
1600 && (msw_taken != CODE_FOR_nothing || msw_skip != CODE_FOR_nothing))
1601 {
1602 emit_move_insn (scratch, operands[2]);
1603 operands[2] = scratch;
1604 }
1605 if (msw_taken != CODE_FOR_nothing)
1606 expand_cbranchsi4 (operands, msw_taken, msw_taken_prob);
1607 if (msw_skip != CODE_FOR_nothing)
1608 {
1609 rtx taken_label = operands[3];
1610
1611 operands[3] = skip_label = gen_label_rtx ();
1612 expand_cbranchsi4 (operands, msw_skip, msw_skip_prob);
1613 operands[3] = taken_label;
1614 }
1615 operands[1] = op1l;
1616 operands[2] = op2l;
1617 if (lsw_taken != CODE_FOR_nothing)
1618 {
1619 if (reload_completed
1620 && ! arith_reg_or_0_operand (op2l, SImode) && true_regnum (op1l))
1621 operands[4] = scratch;
1622 expand_cbranchsi4 (operands, lsw_taken, lsw_taken_prob);
1623 }
1624 if (msw_skip != CODE_FOR_nothing)
1625 emit_label (skip_label);
1626 return true;
1627 }
1628
1629 /* Prepare the operands for an scc instruction; make sure that the
1630 compare has been done. */
1631 rtx
1632 prepare_scc_operands (enum rtx_code code)
1633 {
1634 rtx t_reg = gen_rtx_REG (SImode, T_REG);
1635 enum rtx_code oldcode = code;
1636 enum machine_mode mode;
1637
1638 /* First need a compare insn. */
1639 switch (code)
1640 {
1641 case NE:
1642 /* It isn't possible to handle this case. */
1643 gcc_unreachable ();
1644 case LT:
1645 code = GT;
1646 break;
1647 case LE:
1648 code = GE;
1649 break;
1650 case LTU:
1651 code = GTU;
1652 break;
1653 case LEU:
1654 code = GEU;
1655 break;
1656 default:
1657 break;
1658 }
1659 if (code != oldcode)
1660 {
1661 rtx tmp = sh_compare_op0;
1662 sh_compare_op0 = sh_compare_op1;
1663 sh_compare_op1 = tmp;
1664 }
1665
1666 mode = GET_MODE (sh_compare_op0);
1667 if (mode == VOIDmode)
1668 mode = GET_MODE (sh_compare_op1);
1669
1670 sh_compare_op0 = force_reg (mode, sh_compare_op0);
1671 if ((code != EQ && code != NE
1672 && (sh_compare_op1 != const0_rtx
1673 || code == GTU || code == GEU || code == LTU || code == LEU))
1674 || (mode == DImode && sh_compare_op1 != const0_rtx)
1675 || (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT))
1676 sh_compare_op1 = force_reg (mode, sh_compare_op1);
1677
1678 if ((TARGET_SH4 || TARGET_SH2A) && GET_MODE_CLASS (mode) == MODE_FLOAT)
1679 (mode == SFmode ? emit_sf_insn : emit_df_insn)
1680 (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2,
1681 gen_rtx_SET (VOIDmode, t_reg,
1682 gen_rtx_fmt_ee (code, SImode,
1683 sh_compare_op0, sh_compare_op1)),
1684 gen_rtx_USE (VOIDmode, get_fpscr_rtx ()))));
1685 else
1686 emit_insn (gen_rtx_SET (VOIDmode, t_reg,
1687 gen_rtx_fmt_ee (code, SImode,
1688 sh_compare_op0, sh_compare_op1)));
1689
1690 return t_reg;
1691 }
1692
1693 /* Called from the md file, set up the operands of a compare instruction. */
1694
1695 void
1696 from_compare (rtx *operands, int code)
1697 {
1698 enum machine_mode mode = GET_MODE (sh_compare_op0);
1699 rtx insn;
1700 if (mode == VOIDmode)
1701 mode = GET_MODE (sh_compare_op1);
1702 if (code != EQ
1703 || mode == DImode
1704 || (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT))
1705 {
1706 /* Force args into regs, since we can't use constants here. */
1707 sh_compare_op0 = force_reg (mode, sh_compare_op0);
1708 if (sh_compare_op1 != const0_rtx
1709 || code == GTU || code == GEU
1710 || (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT))
1711 sh_compare_op1 = force_reg (mode, sh_compare_op1);
1712 }
1713 if (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT && code == GE)
1714 {
1715 from_compare (operands, GT);
1716 insn = gen_ieee_ccmpeqsf_t (sh_compare_op0, sh_compare_op1);
1717 }
1718 else
1719 insn = gen_rtx_SET (VOIDmode,
1720 gen_rtx_REG (SImode, T_REG),
1721 gen_rtx_fmt_ee (code, SImode,
1722 sh_compare_op0, sh_compare_op1));
1723 if ((TARGET_SH4 || TARGET_SH2A) && GET_MODE_CLASS (mode) == MODE_FLOAT)
1724 {
1725 insn = gen_rtx_PARALLEL (VOIDmode,
1726 gen_rtvec (2, insn,
1727 gen_rtx_USE (VOIDmode, get_fpscr_rtx ())));
1728 (mode == SFmode ? emit_sf_insn : emit_df_insn) (insn);
1729 }
1730 else
1731 emit_insn (insn);
1732 }
1733 \f
1734 /* Functions to output assembly code. */
1735
1736 /* Return a sequence of instructions to perform DI or DF move.
1737
1738 Since the SH cannot move a DI or DF in one instruction, we have
1739 to take care when we see overlapping source and dest registers. */
1740
1741 const char *
1742 output_movedouble (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
1743 enum machine_mode mode)
1744 {
1745 rtx dst = operands[0];
1746 rtx src = operands[1];
1747
1748 if (GET_CODE (dst) == MEM
1749 && GET_CODE (XEXP (dst, 0)) == PRE_DEC)
1750 return "mov.l %T1,%0\n\tmov.l %1,%0";
1751
1752 if (register_operand (dst, mode)
1753 && register_operand (src, mode))
1754 {
1755 if (REGNO (src) == MACH_REG)
1756 return "sts mach,%S0\n\tsts macl,%R0";
1757
1758 /* When mov.d r1,r2 do r2->r3 then r1->r2;
1759 when mov.d r1,r0 do r1->r0 then r2->r1. */
1760
1761 if (REGNO (src) + 1 == REGNO (dst))
1762 return "mov %T1,%T0\n\tmov %1,%0";
1763 else
1764 return "mov %1,%0\n\tmov %T1,%T0";
1765 }
1766 else if (GET_CODE (src) == CONST_INT)
1767 {
1768 if (INTVAL (src) < 0)
1769 output_asm_insn ("mov #-1,%S0", operands);
1770 else
1771 output_asm_insn ("mov #0,%S0", operands);
1772
1773 return "mov %1,%R0";
1774 }
1775 else if (GET_CODE (src) == MEM)
1776 {
1777 int ptrreg = -1;
1778 int dreg = REGNO (dst);
1779 rtx inside = XEXP (src, 0);
1780
1781 switch (GET_CODE (inside))
1782 {
1783 case REG:
1784 ptrreg = REGNO (inside);
1785 break;
1786
1787 case SUBREG:
1788 ptrreg = subreg_regno (inside);
1789 break;
1790
1791 case PLUS:
1792 ptrreg = REGNO (XEXP (inside, 0));
1793 /* ??? A r0+REG address shouldn't be possible here, because it isn't
1794 an offsettable address. Unfortunately, offsettable addresses use
1795 QImode to check the offset, and a QImode offsettable address
1796 requires r0 for the other operand, which is not currently
1797 supported, so we can't use the 'o' constraint.
1798 Thus we must check for and handle r0+REG addresses here.
1799 We punt for now, since this is likely very rare. */
1800 gcc_assert (GET_CODE (XEXP (inside, 1)) != REG);
1801 break;
1802
1803 case LABEL_REF:
1804 return "mov.l %1,%0\n\tmov.l %1+4,%T0";
1805 case POST_INC:
1806 return "mov.l %1,%0\n\tmov.l %1,%T0";
1807 default:
1808 gcc_unreachable ();
1809 }
1810
1811 /* Work out the safe way to copy. Copy into the second half first. */
1812 if (dreg == ptrreg)
1813 return "mov.l %T1,%T0\n\tmov.l %1,%0";
1814 }
1815
1816 return "mov.l %1,%0\n\tmov.l %T1,%T0";
1817 }
1818
1819 /* Print an instruction which would have gone into a delay slot after
1820 another instruction, but couldn't because the other instruction expanded
1821 into a sequence where putting the slot insn at the end wouldn't work. */
1822
1823 static void
1824 print_slot (rtx insn)
1825 {
1826 final_scan_insn (XVECEXP (insn, 0, 1), asm_out_file, optimize, 1, NULL);
1827
1828 INSN_DELETED_P (XVECEXP (insn, 0, 1)) = 1;
1829 }
1830
1831 const char *
1832 output_far_jump (rtx insn, rtx op)
1833 {
1834 struct { rtx lab, reg, op; } this;
1835 rtx braf_base_lab = NULL_RTX;
1836 const char *jump;
1837 int far;
1838 int offset = branch_dest (insn) - INSN_ADDRESSES (INSN_UID (insn));
1839 rtx prev;
1840
1841 this.lab = gen_label_rtx ();
1842
1843 if (TARGET_SH2
1844 && offset >= -32764
1845 && offset - get_attr_length (insn) <= 32766)
1846 {
1847 far = 0;
1848 jump = "mov.w %O0,%1; braf %1";
1849 }
1850 else
1851 {
1852 far = 1;
1853 if (flag_pic)
1854 {
1855 if (TARGET_SH2)
1856 jump = "mov.l %O0,%1; braf %1";
1857 else
1858 jump = "mov.l r0,@-r15; mova %O0,r0; mov.l @r0,%1; add r0,%1; mov.l @r15+,r0; jmp @%1";
1859 }
1860 else
1861 jump = "mov.l %O0,%1; jmp @%1";
1862 }
1863 /* If we have a scratch register available, use it. */
1864 if (GET_CODE ((prev = prev_nonnote_insn (insn))) == INSN
1865 && INSN_CODE (prev) == CODE_FOR_indirect_jump_scratch)
1866 {
1867 this.reg = SET_DEST (XVECEXP (PATTERN (prev), 0, 0));
1868 if (REGNO (this.reg) == R0_REG && flag_pic && ! TARGET_SH2)
1869 jump = "mov.l r1,@-r15; mova %O0,r0; mov.l @r0,r1; add r1,r0; mov.l @r15+,r1; jmp @%1";
1870 output_asm_insn (jump, &this.lab);
1871 if (dbr_sequence_length ())
1872 print_slot (final_sequence);
1873 else
1874 output_asm_insn ("nop", 0);
1875 }
1876 else
1877 {
1878 /* Output the delay slot insn first if any. */
1879 if (dbr_sequence_length ())
1880 print_slot (final_sequence);
1881
1882 this.reg = gen_rtx_REG (SImode, 13);
1883 /* We must keep the stack aligned to 8-byte boundaries on SH5.
1884 Fortunately, MACL is fixed and call-clobbered, and we never
1885 need its value across jumps, so save r13 in it instead of in
1886 the stack. */
1887 if (TARGET_SH5)
1888 output_asm_insn ("lds r13, macl", 0);
1889 else
1890 output_asm_insn ("mov.l r13,@-r15", 0);
1891 output_asm_insn (jump, &this.lab);
1892 if (TARGET_SH5)
1893 output_asm_insn ("sts macl, r13", 0);
1894 else
1895 output_asm_insn ("mov.l @r15+,r13", 0);
1896 }
1897 if (far && flag_pic && TARGET_SH2)
1898 {
1899 braf_base_lab = gen_label_rtx ();
1900 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1901 CODE_LABEL_NUMBER (braf_base_lab));
1902 }
1903 if (far)
1904 output_asm_insn (".align 2", 0);
1905 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (this.lab));
1906 this.op = op;
1907 if (far && flag_pic)
1908 {
1909 if (TARGET_SH2)
1910 this.lab = braf_base_lab;
1911 output_asm_insn (".long %O2-%O0", &this.lab);
1912 }
1913 else
1914 output_asm_insn (far ? ".long %O2" : ".word %O2-%O0", &this.lab);
1915 return "";
1916 }
1917
1918 /* Local label counter, used for constants in the pool and inside
1919 pattern branches. */
1920
1921 static int lf = 100;
1922
1923 /* Output code for ordinary branches. */
1924
1925 const char *
1926 output_branch (int logic, rtx insn, rtx *operands)
1927 {
1928 switch (get_attr_length (insn))
1929 {
1930 case 6:
1931 /* This can happen if filling the delay slot has caused a forward
1932 branch to exceed its range (we could reverse it, but only
1933 when we know we won't overextend other branches; this should
1934 best be handled by relaxation).
1935 It can also happen when other condbranches hoist delay slot insn
1936 from their destination, thus leading to code size increase.
1937 But the branch will still be in the range -4092..+4098 bytes. */
1938
1939 if (! TARGET_RELAX)
1940 {
1941 int label = lf++;
1942 /* The call to print_slot will clobber the operands. */
1943 rtx op0 = operands[0];
1944
1945 /* If the instruction in the delay slot is annulled (true), then
1946 there is no delay slot where we can put it now. The only safe
1947 place for it is after the label. final will do that by default. */
1948
1949 if (final_sequence
1950 && ! INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))
1951 && get_attr_length (XVECEXP (final_sequence, 0, 1)))
1952 {
1953 asm_fprintf (asm_out_file, "\tb%s%ss\t%LLF%d\n", logic ? "f" : "t",
1954 ASSEMBLER_DIALECT ? "/" : ".", label);
1955 print_slot (final_sequence);
1956 }
1957 else
1958 asm_fprintf (asm_out_file, "\tb%s\t%LLF%d\n", logic ? "f" : "t", label);
1959
1960 output_asm_insn ("bra\t%l0", &op0);
1961 fprintf (asm_out_file, "\tnop\n");
1962 (*targetm.asm_out.internal_label) (asm_out_file, "LF", label);
1963
1964 return "";
1965 }
1966 /* When relaxing, handle this like a short branch. The linker
1967 will fix it up if it still doesn't fit after relaxation. */
1968 case 2:
1969 return logic ? "bt%.\t%l0" : "bf%.\t%l0";
1970
1971 /* These are for SH2e, in which we have to account for the
1972 extra nop because of the hardware bug in annulled branches. */
1973 case 8:
1974 if (! TARGET_RELAX)
1975 {
1976 int label = lf++;
1977
1978 gcc_assert (!final_sequence
1979 || !(INSN_ANNULLED_BRANCH_P
1980 (XVECEXP (final_sequence, 0, 0))));
1981 asm_fprintf (asm_out_file, "b%s%ss\t%LLF%d\n",
1982 logic ? "f" : "t",
1983 ASSEMBLER_DIALECT ? "/" : ".", label);
1984 fprintf (asm_out_file, "\tnop\n");
1985 output_asm_insn ("bra\t%l0", operands);
1986 fprintf (asm_out_file, "\tnop\n");
1987 (*targetm.asm_out.internal_label) (asm_out_file, "LF", label);
1988
1989 return "";
1990 }
1991 /* When relaxing, fall through. */
1992 case 4:
1993 {
1994 char buffer[10];
1995
1996 sprintf (buffer, "b%s%ss\t%%l0",
1997 logic ? "t" : "f",
1998 ASSEMBLER_DIALECT ? "/" : ".");
1999 output_asm_insn (buffer, &operands[0]);
2000 return "nop";
2001 }
2002
2003 default:
2004 /* There should be no longer branches now - that would
2005 indicate that something has destroyed the branches set
2006 up in machine_dependent_reorg. */
2007 gcc_unreachable ();
2008 }
2009 }
2010
2011 /* Output a code sequence for INSN using TEMPLATE with OPERANDS; but before,
2012 fill in operands 9 as a label to the successor insn.
2013 We try to use jump threading where possible.
2014 IF CODE matches the comparison in the IF_THEN_ELSE of a following jump,
2015 we assume the jump is taken. I.e. EQ means follow jmp and bf, NE means
2016 follow jmp and bt, if the address is in range. */
2017 const char *
2018 output_branchy_insn (enum rtx_code code, const char *template,
2019 rtx insn, rtx *operands)
2020 {
2021 rtx next_insn = NEXT_INSN (insn);
2022
2023 if (next_insn && GET_CODE (next_insn) == JUMP_INSN && condjump_p (next_insn))
2024 {
2025 rtx src = SET_SRC (PATTERN (next_insn));
2026 if (GET_CODE (src) == IF_THEN_ELSE && GET_CODE (XEXP (src, 0)) != code)
2027 {
2028 /* Following branch not taken */
2029 operands[9] = gen_label_rtx ();
2030 emit_label_after (operands[9], next_insn);
2031 INSN_ADDRESSES_NEW (operands[9],
2032 INSN_ADDRESSES (INSN_UID (next_insn))
2033 + get_attr_length (next_insn));
2034 return template;
2035 }
2036 else
2037 {
2038 int offset = (branch_dest (next_insn)
2039 - INSN_ADDRESSES (INSN_UID (next_insn)) + 4);
2040 if (offset >= -252 && offset <= 258)
2041 {
2042 if (GET_CODE (src) == IF_THEN_ELSE)
2043 /* branch_true */
2044 src = XEXP (src, 1);
2045 operands[9] = src;
2046 return template;
2047 }
2048 }
2049 }
2050 operands[9] = gen_label_rtx ();
2051 emit_label_after (operands[9], insn);
2052 INSN_ADDRESSES_NEW (operands[9],
2053 INSN_ADDRESSES (INSN_UID (insn))
2054 + get_attr_length (insn));
2055 return template;
2056 }
2057
2058 const char *
2059 output_ieee_ccmpeq (rtx insn, rtx *operands)
2060 {
2061 return output_branchy_insn (NE, "bt\t%l9\n\tfcmp/eq\t%1,%0",
2062 insn, operands);
2063 }
2064 \f
2065 /* Output the start of the assembler file. */
2066
2067 static void
2068 sh_file_start (void)
2069 {
2070 default_file_start ();
2071
2072 #ifdef SYMBIAN
2073 /* Declare the .directive section before it is used. */
2074 fputs ("\t.section .directive, \"SM\", @progbits, 1\n", asm_out_file);
2075 fputs ("\t.asciz \"#<SYMEDIT>#\\n\"\n", asm_out_file);
2076 #endif
2077
2078 if (TARGET_ELF)
2079 /* We need to show the text section with the proper
2080 attributes as in TEXT_SECTION_ASM_OP, before dwarf2out
2081 emits it without attributes in TEXT_SECTION_ASM_OP, else GAS
2082 will complain. We can teach GAS specifically about the
2083 default attributes for our choice of text section, but
2084 then we would have to change GAS again if/when we change
2085 the text section name. */
2086 fprintf (asm_out_file, "%s\n", TEXT_SECTION_ASM_OP);
2087 else
2088 /* Switch to the data section so that the coffsem symbol
2089 isn't in the text section. */
2090 switch_to_section (data_section);
2091
2092 if (TARGET_LITTLE_ENDIAN)
2093 fputs ("\t.little\n", asm_out_file);
2094
2095 if (!TARGET_ELF)
2096 {
2097 if (TARGET_SHCOMPACT)
2098 fputs ("\t.mode\tSHcompact\n", asm_out_file);
2099 else if (TARGET_SHMEDIA)
2100 fprintf (asm_out_file, "\t.mode\tSHmedia\n\t.abi\t%i\n",
2101 TARGET_SHMEDIA64 ? 64 : 32);
2102 }
2103 }
2104 \f
2105 /* Check if PAT includes UNSPEC_CALLER unspec pattern. */
2106
2107 static bool
2108 unspec_caller_rtx_p (rtx pat)
2109 {
2110 switch (GET_CODE (pat))
2111 {
2112 case CONST:
2113 return unspec_caller_rtx_p (XEXP (pat, 0));
2114 case PLUS:
2115 case MINUS:
2116 if (unspec_caller_rtx_p (XEXP (pat, 0)))
2117 return true;
2118 return unspec_caller_rtx_p (XEXP (pat, 1));
2119 case UNSPEC:
2120 if (XINT (pat, 1) == UNSPEC_CALLER)
2121 return true;
2122 default:
2123 break;
2124 }
2125
2126 return false;
2127 }
2128
2129 /* Indicate that INSN cannot be duplicated. This is true for insn
2130 that generates a unique label. */
2131
2132 static bool
2133 sh_cannot_copy_insn_p (rtx insn)
2134 {
2135 rtx pat;
2136
2137 if (!reload_completed || !flag_pic)
2138 return false;
2139
2140 if (GET_CODE (insn) != INSN)
2141 return false;
2142 if (asm_noperands (insn) >= 0)
2143 return false;
2144
2145 pat = PATTERN (insn);
2146 if (GET_CODE (pat) != SET)
2147 return false;
2148 pat = SET_SRC (pat);
2149
2150 if (unspec_caller_rtx_p (pat))
2151 return true;
2152
2153 return false;
2154 }
2155 \f
2156 /* Actual number of instructions used to make a shift by N. */
2157 static const char ashiftrt_insns[] =
2158 { 0,1,2,3,4,5,8,8,8,8,8,8,8,8,8,8,2,3,4,5,8,8,8,8,8,8,8,8,8,8,8,2};
2159
2160 /* Left shift and logical right shift are the same. */
2161 static const char shift_insns[] =
2162 { 0,1,1,2,2,3,3,4,1,2,2,3,3,4,3,3,1,2,2,3,3,4,3,3,2,3,3,4,4,4,3,3};
2163
2164 /* Individual shift amounts needed to get the above length sequences.
2165 One bit right shifts clobber the T bit, so when possible, put one bit
2166 shifts in the middle of the sequence, so the ends are eligible for
2167 branch delay slots. */
2168 static const short shift_amounts[32][5] = {
2169 {0}, {1}, {2}, {2, 1},
2170 {2, 2}, {2, 1, 2}, {2, 2, 2}, {2, 2, 1, 2},
2171 {8}, {8, 1}, {8, 2}, {8, 1, 2},
2172 {8, 2, 2}, {8, 2, 1, 2}, {8, -2, 8}, {8, -1, 8},
2173 {16}, {16, 1}, {16, 2}, {16, 1, 2},
2174 {16, 2, 2}, {16, 2, 1, 2}, {16, -2, 8}, {16, -1, 8},
2175 {16, 8}, {16, 1, 8}, {16, 8, 2}, {16, 8, 1, 2},
2176 {16, 8, 2, 2}, {16, -1, -2, 16}, {16, -2, 16}, {16, -1, 16}};
2177
2178 /* Likewise, but for shift amounts < 16, up to three highmost bits
2179 might be clobbered. This is typically used when combined with some
2180 kind of sign or zero extension. */
2181
2182 static const char ext_shift_insns[] =
2183 { 0,1,1,2,2,3,2,2,1,2,2,3,3,3,2,2,1,2,2,3,3,4,3,3,2,3,3,4,4,4,3,3};
2184
2185 static const short ext_shift_amounts[32][4] = {
2186 {0}, {1}, {2}, {2, 1},
2187 {2, 2}, {2, 1, 2}, {8, -2}, {8, -1},
2188 {8}, {8, 1}, {8, 2}, {8, 1, 2},
2189 {8, 2, 2}, {16, -2, -1}, {16, -2}, {16, -1},
2190 {16}, {16, 1}, {16, 2}, {16, 1, 2},
2191 {16, 2, 2}, {16, 2, 1, 2}, {16, -2, 8}, {16, -1, 8},
2192 {16, 8}, {16, 1, 8}, {16, 8, 2}, {16, 8, 1, 2},
2193 {16, 8, 2, 2}, {16, -1, -2, 16}, {16, -2, 16}, {16, -1, 16}};
2194
2195 /* Assuming we have a value that has been sign-extended by at least one bit,
2196 can we use the ext_shift_amounts with the last shift turned to an arithmetic shift
2197 to shift it by N without data loss, and quicker than by other means? */
2198 #define EXT_SHIFT_SIGNED(n) (((n) | 8) == 15)
2199
2200 /* This is used in length attributes in sh.md to help compute the length
2201 of arbitrary constant shift instructions. */
2202
2203 int
2204 shift_insns_rtx (rtx insn)
2205 {
2206 rtx set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
2207 int shift_count = INTVAL (XEXP (set_src, 1));
2208 enum rtx_code shift_code = GET_CODE (set_src);
2209
2210 switch (shift_code)
2211 {
2212 case ASHIFTRT:
2213 return ashiftrt_insns[shift_count];
2214 case LSHIFTRT:
2215 case ASHIFT:
2216 return shift_insns[shift_count];
2217 default:
2218 gcc_unreachable ();
2219 }
2220 }
2221
2222 /* Return the cost of a shift. */
2223
2224 static inline int
2225 shiftcosts (rtx x)
2226 {
2227 int value;
2228
2229 if (TARGET_SHMEDIA)
2230 return 1;
2231
2232 if (GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
2233 {
2234 if (GET_MODE (x) == DImode
2235 && GET_CODE (XEXP (x, 1)) == CONST_INT
2236 && INTVAL (XEXP (x, 1)) == 1)
2237 return 2;
2238
2239 /* Everything else is invalid, because there is no pattern for it. */
2240 return MAX_COST;
2241 }
2242 /* If shift by a non constant, then this will be expensive. */
2243 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2244 return SH_DYNAMIC_SHIFT_COST;
2245
2246 value = INTVAL (XEXP (x, 1));
2247
2248 /* Otherwise, return the true cost in instructions. */
2249 if (GET_CODE (x) == ASHIFTRT)
2250 {
2251 int cost = ashiftrt_insns[value];
2252 /* If SH3, then we put the constant in a reg and use shad. */
2253 if (cost > 1 + SH_DYNAMIC_SHIFT_COST)
2254 cost = 1 + SH_DYNAMIC_SHIFT_COST;
2255 return cost;
2256 }
2257 else
2258 return shift_insns[value];
2259 }
2260
2261 /* Return the cost of an AND operation. */
2262
2263 static inline int
2264 andcosts (rtx x)
2265 {
2266 int i;
2267
2268 /* Anding with a register is a single cycle and instruction. */
2269 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2270 return 1;
2271
2272 i = INTVAL (XEXP (x, 1));
2273
2274 if (TARGET_SHMEDIA)
2275 {
2276 if (satisfies_constraint_I10 (XEXP (x, 1))
2277 || satisfies_constraint_J16 (XEXP (x, 1)))
2278 return 1;
2279 else
2280 return 1 + rtx_cost (XEXP (x, 1), AND);
2281 }
2282
2283 /* These constants are single cycle extu.[bw] instructions. */
2284 if (i == 0xff || i == 0xffff)
2285 return 1;
2286 /* Constants that can be used in an and immediate instruction in a single
2287 cycle, but this requires r0, so make it a little more expensive. */
2288 if (CONST_OK_FOR_K08 (i))
2289 return 2;
2290 /* Constants that can be loaded with a mov immediate and an and.
2291 This case is probably unnecessary. */
2292 if (CONST_OK_FOR_I08 (i))
2293 return 2;
2294 /* Any other constants requires a 2 cycle pc-relative load plus an and.
2295 This case is probably unnecessary. */
2296 return 3;
2297 }
2298
2299 /* Return the cost of an addition or a subtraction. */
2300
2301 static inline int
2302 addsubcosts (rtx x)
2303 {
2304 /* Adding a register is a single cycle insn. */
2305 if (GET_CODE (XEXP (x, 1)) == REG
2306 || GET_CODE (XEXP (x, 1)) == SUBREG)
2307 return 1;
2308
2309 /* Likewise for small constants. */
2310 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2311 && CONST_OK_FOR_ADD (INTVAL (XEXP (x, 1))))
2312 return 1;
2313
2314 if (TARGET_SHMEDIA)
2315 switch (GET_CODE (XEXP (x, 1)))
2316 {
2317 case CONST:
2318 case LABEL_REF:
2319 case SYMBOL_REF:
2320 return TARGET_SHMEDIA64 ? 5 : 3;
2321
2322 case CONST_INT:
2323 if (CONST_OK_FOR_I16 (INTVAL (XEXP (x, 1))))
2324 return 2;
2325 else if (CONST_OK_FOR_I16 (INTVAL (XEXP (x, 1)) >> 16))
2326 return 3;
2327 else if (CONST_OK_FOR_I16 ((INTVAL (XEXP (x, 1)) >> 16) >> 16))
2328 return 4;
2329
2330 /* Fall through. */
2331 default:
2332 return 5;
2333 }
2334
2335 /* Any other constant requires a 2 cycle pc-relative load plus an
2336 addition. */
2337 return 3;
2338 }
2339
2340 /* Return the cost of a multiply. */
2341 static inline int
2342 multcosts (rtx x ATTRIBUTE_UNUSED)
2343 {
2344 if (sh_multcost >= 0)
2345 return sh_multcost;
2346 if (TARGET_SHMEDIA)
2347 /* ??? We have a mul insn, but it has a latency of three, and doesn't
2348 accept constants. Ideally, we would use a cost of one or two and
2349 add the cost of the operand, but disregard the latter when inside loops
2350 and loop invariant code motion is still to follow.
2351 Using a multiply first and splitting it later if it's a loss
2352 doesn't work because of different sign / zero extension semantics
2353 of multiplies vs. shifts. */
2354 return TARGET_SMALLCODE ? 2 : 3;
2355
2356 if (TARGET_SH2)
2357 {
2358 /* We have a mul insn, so we can never take more than the mul and the
2359 read of the mac reg, but count more because of the latency and extra
2360 reg usage. */
2361 if (TARGET_SMALLCODE)
2362 return 2;
2363 return 3;
2364 }
2365
2366 /* If we're aiming at small code, then just count the number of
2367 insns in a multiply call sequence. */
2368 if (TARGET_SMALLCODE)
2369 return 5;
2370
2371 /* Otherwise count all the insns in the routine we'd be calling too. */
2372 return 20;
2373 }
2374
2375 /* Compute a (partial) cost for rtx X. Return true if the complete
2376 cost has been computed, and false if subexpressions should be
2377 scanned. In either case, *TOTAL contains the cost result. */
2378
2379 static bool
2380 sh_rtx_costs (rtx x, int code, int outer_code, int *total)
2381 {
2382 switch (code)
2383 {
2384 case CONST_INT:
2385 if (TARGET_SHMEDIA)
2386 {
2387 if (INTVAL (x) == 0)
2388 *total = 0;
2389 else if (outer_code == AND && and_operand ((x), DImode))
2390 *total = 0;
2391 else if ((outer_code == IOR || outer_code == XOR
2392 || outer_code == PLUS)
2393 && CONST_OK_FOR_I10 (INTVAL (x)))
2394 *total = 0;
2395 else if (CONST_OK_FOR_I16 (INTVAL (x)))
2396 *total = COSTS_N_INSNS (outer_code != SET);
2397 else if (CONST_OK_FOR_I16 (INTVAL (x) >> 16))
2398 *total = COSTS_N_INSNS ((outer_code != SET) + 1);
2399 else if (CONST_OK_FOR_I16 ((INTVAL (x) >> 16) >> 16))
2400 *total = COSTS_N_INSNS ((outer_code != SET) + 2);
2401 else
2402 *total = COSTS_N_INSNS ((outer_code != SET) + 3);
2403 return true;
2404 }
2405 if (CONST_OK_FOR_I08 (INTVAL (x)))
2406 *total = 0;
2407 else if ((outer_code == AND || outer_code == IOR || outer_code == XOR)
2408 && CONST_OK_FOR_K08 (INTVAL (x)))
2409 *total = 1;
2410 /* prepare_cmp_insn will force costly constants int registers before
2411 the cbrach[sd]i4 patterns can see them, so preserve potentially
2412 interesting ones not covered by I08 above. */
2413 else if (outer_code == COMPARE
2414 && ((unsigned HOST_WIDE_INT) INTVAL (x)
2415 == (unsigned HOST_WIDE_INT) 0x7fffffff + 1
2416 || INTVAL (x) == 0x7fffffff
2417 || INTVAL (x) == 0x80 || INTVAL (x) == -0x81))
2418 *total = 1;
2419 else
2420 *total = 8;
2421 return true;
2422
2423 case CONST:
2424 case LABEL_REF:
2425 case SYMBOL_REF:
2426 if (TARGET_SHMEDIA64)
2427 *total = COSTS_N_INSNS (4);
2428 else if (TARGET_SHMEDIA32)
2429 *total = COSTS_N_INSNS (2);
2430 else
2431 *total = 5;
2432 return true;
2433
2434 case CONST_DOUBLE:
2435 if (TARGET_SHMEDIA)
2436 *total = COSTS_N_INSNS (4);
2437 /* prepare_cmp_insn will force costly constants int registers before
2438 the cbrachdi4 pattern can see them, so preserve potentially
2439 interesting ones. */
2440 else if (outer_code == COMPARE && GET_MODE (x) == DImode)
2441 *total = 1;
2442 else
2443 *total = 10;
2444 return true;
2445 case CONST_VECTOR:
2446 if (x == CONST0_RTX (GET_MODE (x)))
2447 *total = 0;
2448 else if (sh_1el_vec (x, VOIDmode))
2449 *total = outer_code != SET;
2450 if (sh_rep_vec (x, VOIDmode))
2451 *total = ((GET_MODE_UNIT_SIZE (GET_MODE (x)) + 3) / 4
2452 + (outer_code != SET));
2453 *total = COSTS_N_INSNS (3) + (outer_code != SET);
2454 return true;
2455
2456 case PLUS:
2457 case MINUS:
2458 *total = COSTS_N_INSNS (addsubcosts (x));
2459 return true;
2460
2461 case AND:
2462 *total = COSTS_N_INSNS (andcosts (x));
2463 return true;
2464
2465 case MULT:
2466 *total = COSTS_N_INSNS (multcosts (x));
2467 return true;
2468
2469 case ASHIFT:
2470 case ASHIFTRT:
2471 case LSHIFTRT:
2472 *total = COSTS_N_INSNS (shiftcosts (x));
2473 return true;
2474
2475 case DIV:
2476 case UDIV:
2477 case MOD:
2478 case UMOD:
2479 *total = COSTS_N_INSNS (20);
2480 return true;
2481
2482 case PARALLEL:
2483 if (sh_1el_vec (x, VOIDmode))
2484 *total = outer_code != SET;
2485 if (sh_rep_vec (x, VOIDmode))
2486 *total = ((GET_MODE_UNIT_SIZE (GET_MODE (x)) + 3) / 4
2487 + (outer_code != SET));
2488 *total = COSTS_N_INSNS (3) + (outer_code != SET);
2489 return true;
2490
2491 case FLOAT:
2492 case FIX:
2493 *total = 100;
2494 return true;
2495
2496 default:
2497 return false;
2498 }
2499 }
2500
2501 /* Compute the cost of an address. For the SH, all valid addresses are
2502 the same cost. Use a slightly higher cost for reg + reg addressing,
2503 since it increases pressure on r0. */
2504
2505 static int
2506 sh_address_cost (rtx X)
2507 {
2508 return (GET_CODE (X) == PLUS
2509 && ! CONSTANT_P (XEXP (X, 1))
2510 && ! TARGET_SHMEDIA ? 1 : 0);
2511 }
2512
2513 /* Code to expand a shift. */
2514
2515 void
2516 gen_ashift (int type, int n, rtx reg)
2517 {
2518 /* Negative values here come from the shift_amounts array. */
2519 if (n < 0)
2520 {
2521 if (type == ASHIFT)
2522 type = LSHIFTRT;
2523 else
2524 type = ASHIFT;
2525 n = -n;
2526 }
2527
2528 switch (type)
2529 {
2530 case ASHIFTRT:
2531 emit_insn (gen_ashrsi3_k (reg, reg, GEN_INT (n)));
2532 break;
2533 case LSHIFTRT:
2534 if (n == 1)
2535 emit_insn (gen_lshrsi3_m (reg, reg, GEN_INT (n)));
2536 else
2537 emit_insn (gen_lshrsi3_k (reg, reg, GEN_INT (n)));
2538 break;
2539 case ASHIFT:
2540 emit_insn (gen_ashlsi3_std (reg, reg, GEN_INT (n)));
2541 break;
2542 }
2543 }
2544
2545 /* Same for HImode */
2546
2547 void
2548 gen_ashift_hi (int type, int n, rtx reg)
2549 {
2550 /* Negative values here come from the shift_amounts array. */
2551 if (n < 0)
2552 {
2553 if (type == ASHIFT)
2554 type = LSHIFTRT;
2555 else
2556 type = ASHIFT;
2557 n = -n;
2558 }
2559
2560 switch (type)
2561 {
2562 case ASHIFTRT:
2563 case LSHIFTRT:
2564 /* We don't have HImode right shift operations because using the
2565 ordinary 32 bit shift instructions for that doesn't generate proper
2566 zero/sign extension.
2567 gen_ashift_hi is only called in contexts where we know that the
2568 sign extension works out correctly. */
2569 {
2570 int offset = 0;
2571 if (GET_CODE (reg) == SUBREG)
2572 {
2573 offset = SUBREG_BYTE (reg);
2574 reg = SUBREG_REG (reg);
2575 }
2576 gen_ashift (type, n, gen_rtx_SUBREG (SImode, reg, offset));
2577 break;
2578 }
2579 case ASHIFT:
2580 emit_insn (gen_ashlhi3_k (reg, reg, GEN_INT (n)));
2581 break;
2582 }
2583 }
2584
2585 /* Output RTL to split a constant shift into its component SH constant
2586 shift instructions. */
2587
2588 void
2589 gen_shifty_op (int code, rtx *operands)
2590 {
2591 int value = INTVAL (operands[2]);
2592 int max, i;
2593
2594 /* Truncate the shift count in case it is out of bounds. */
2595 value = value & 0x1f;
2596
2597 if (value == 31)
2598 {
2599 if (code == LSHIFTRT)
2600 {
2601 emit_insn (gen_rotlsi3_1 (operands[0], operands[0]));
2602 emit_insn (gen_movt (operands[0]));
2603 return;
2604 }
2605 else if (code == ASHIFT)
2606 {
2607 /* There is a two instruction sequence for 31 bit left shifts,
2608 but it requires r0. */
2609 if (GET_CODE (operands[0]) == REG && REGNO (operands[0]) == 0)
2610 {
2611 emit_insn (gen_andsi3 (operands[0], operands[0], const1_rtx));
2612 emit_insn (gen_rotlsi3_31 (operands[0], operands[0]));
2613 return;
2614 }
2615 }
2616 }
2617 else if (value == 0)
2618 {
2619 /* This can happen even when optimizing, if there were subregs before
2620 reload. Don't output a nop here, as this is never optimized away;
2621 use a no-op move instead. */
2622 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[0]));
2623 return;
2624 }
2625
2626 max = shift_insns[value];
2627 for (i = 0; i < max; i++)
2628 gen_ashift (code, shift_amounts[value][i], operands[0]);
2629 }
2630
2631 /* Same as above, but optimized for values where the topmost bits don't
2632 matter. */
2633
2634 void
2635 gen_shifty_hi_op (int code, rtx *operands)
2636 {
2637 int value = INTVAL (operands[2]);
2638 int max, i;
2639 void (*gen_fun) (int, int, rtx);
2640
2641 /* This operation is used by and_shl for SImode values with a few
2642 high bits known to be cleared. */
2643 value &= 31;
2644 if (value == 0)
2645 {
2646 emit_insn (gen_nop ());
2647 return;
2648 }
2649
2650 gen_fun = GET_MODE (operands[0]) == HImode ? gen_ashift_hi : gen_ashift;
2651 if (code == ASHIFT)
2652 {
2653 max = ext_shift_insns[value];
2654 for (i = 0; i < max; i++)
2655 gen_fun (code, ext_shift_amounts[value][i], operands[0]);
2656 }
2657 else
2658 /* When shifting right, emit the shifts in reverse order, so that
2659 solitary negative values come first. */
2660 for (i = ext_shift_insns[value] - 1; i >= 0; i--)
2661 gen_fun (code, ext_shift_amounts[value][i], operands[0]);
2662 }
2663
2664 /* Output RTL for an arithmetic right shift. */
2665
2666 /* ??? Rewrite to use super-optimizer sequences. */
2667
2668 int
2669 expand_ashiftrt (rtx *operands)
2670 {
2671 rtx wrk;
2672 char func[18];
2673 int value;
2674
2675 if (TARGET_SH3)
2676 {
2677 if (GET_CODE (operands[2]) != CONST_INT)
2678 {
2679 rtx count = copy_to_mode_reg (SImode, operands[2]);
2680 emit_insn (gen_negsi2 (count, count));
2681 emit_insn (gen_ashrsi3_d (operands[0], operands[1], count));
2682 return 1;
2683 }
2684 else if (ashiftrt_insns[INTVAL (operands[2]) & 31]
2685 > 1 + SH_DYNAMIC_SHIFT_COST)
2686 {
2687 rtx count
2688 = force_reg (SImode, GEN_INT (- (INTVAL (operands[2]) & 31)));
2689 emit_insn (gen_ashrsi3_d (operands[0], operands[1], count));
2690 return 1;
2691 }
2692 }
2693 if (GET_CODE (operands[2]) != CONST_INT)
2694 return 0;
2695
2696 value = INTVAL (operands[2]) & 31;
2697
2698 if (value == 31)
2699 {
2700 /* If we are called from abs expansion, arrange things so that we
2701 we can use a single MT instruction that doesn't clobber the source,
2702 if LICM can hoist out the load of the constant zero. */
2703 if (currently_expanding_to_rtl)
2704 {
2705 emit_insn (gen_cmpgtsi_t (force_reg (SImode, CONST0_RTX (SImode)),
2706 operands[1]));
2707 emit_insn (gen_mov_neg_si_t (operands[0]));
2708 return 1;
2709 }
2710 emit_insn (gen_ashrsi2_31 (operands[0], operands[1]));
2711 return 1;
2712 }
2713 else if (value >= 16 && value <= 19)
2714 {
2715 wrk = gen_reg_rtx (SImode);
2716 emit_insn (gen_ashrsi2_16 (wrk, operands[1]));
2717 value -= 16;
2718 while (value--)
2719 gen_ashift (ASHIFTRT, 1, wrk);
2720 emit_move_insn (operands[0], wrk);
2721 return 1;
2722 }
2723 /* Expand a short sequence inline, longer call a magic routine. */
2724 else if (value <= 5)
2725 {
2726 wrk = gen_reg_rtx (SImode);
2727 emit_move_insn (wrk, operands[1]);
2728 while (value--)
2729 gen_ashift (ASHIFTRT, 1, wrk);
2730 emit_move_insn (operands[0], wrk);
2731 return 1;
2732 }
2733
2734 wrk = gen_reg_rtx (Pmode);
2735
2736 /* Load the value into an arg reg and call a helper. */
2737 emit_move_insn (gen_rtx_REG (SImode, 4), operands[1]);
2738 sprintf (func, "__ashiftrt_r4_%d", value);
2739 function_symbol (wrk, func, SFUNC_STATIC);
2740 emit_insn (gen_ashrsi3_n (GEN_INT (value), wrk));
2741 emit_move_insn (operands[0], gen_rtx_REG (SImode, 4));
2742 return 1;
2743 }
2744
2745 int
2746 sh_dynamicalize_shift_p (rtx count)
2747 {
2748 return shift_insns[INTVAL (count)] > 1 + SH_DYNAMIC_SHIFT_COST;
2749 }
2750
2751 /* Try to find a good way to implement the combiner pattern
2752 [(set (match_operand:SI 0 "register_operand" "r")
2753 (and:SI (ashift:SI (match_operand:SI 1 "register_operand" "r")
2754 (match_operand:SI 2 "const_int_operand" "n"))
2755 (match_operand:SI 3 "const_int_operand" "n"))) .
2756 LEFT_RTX is operand 2 in the above pattern, and MASK_RTX is operand 3.
2757 return 0 for simple right / left or left/right shift combination.
2758 return 1 for a combination of shifts with zero_extend.
2759 return 2 for a combination of shifts with an AND that needs r0.
2760 return 3 for a combination of shifts with an AND that needs an extra
2761 scratch register, when the three highmost bits of the AND mask are clear.
2762 return 4 for a combination of shifts with an AND that needs an extra
2763 scratch register, when any of the three highmost bits of the AND mask
2764 is set.
2765 If ATTRP is set, store an initial right shift width in ATTRP[0],
2766 and the instruction length in ATTRP[1] . These values are not valid
2767 when returning 0.
2768 When ATTRP is set and returning 1, ATTRP[2] gets set to the index into
2769 shift_amounts for the last shift value that is to be used before the
2770 sign extend. */
2771 int
2772 shl_and_kind (rtx left_rtx, rtx mask_rtx, int *attrp)
2773 {
2774 unsigned HOST_WIDE_INT mask, lsb, mask2, lsb2;
2775 int left = INTVAL (left_rtx), right;
2776 int best = 0;
2777 int cost, best_cost = 10000;
2778 int best_right = 0, best_len = 0;
2779 int i;
2780 int can_ext;
2781
2782 if (left < 0 || left > 31)
2783 return 0;
2784 if (GET_CODE (mask_rtx) == CONST_INT)
2785 mask = (unsigned HOST_WIDE_INT) INTVAL (mask_rtx) >> left;
2786 else
2787 mask = (unsigned HOST_WIDE_INT) GET_MODE_MASK (SImode) >> left;
2788 /* Can this be expressed as a right shift / left shift pair? */
2789 lsb = ((mask ^ (mask - 1)) >> 1) + 1;
2790 right = exact_log2 (lsb);
2791 mask2 = ~(mask + lsb - 1);
2792 lsb2 = ((mask2 ^ (mask2 - 1)) >> 1) + 1;
2793 /* mask has no zeroes but trailing zeroes <==> ! mask2 */
2794 if (! mask2)
2795 best_cost = shift_insns[right] + shift_insns[right + left];
2796 /* mask has no trailing zeroes <==> ! right */
2797 else if (! right && mask2 == ~(lsb2 - 1))
2798 {
2799 int late_right = exact_log2 (lsb2);
2800 best_cost = shift_insns[left + late_right] + shift_insns[late_right];
2801 }
2802 /* Try to use zero extend. */
2803 if (mask2 == ~(lsb2 - 1))
2804 {
2805 int width, first;
2806
2807 for (width = 8; width <= 16; width += 8)
2808 {
2809 /* Can we zero-extend right away? */
2810 if (lsb2 == (unsigned HOST_WIDE_INT) 1 << width)
2811 {
2812 cost
2813 = 1 + ext_shift_insns[right] + ext_shift_insns[left + right];
2814 if (cost < best_cost)
2815 {
2816 best = 1;
2817 best_cost = cost;
2818 best_right = right;
2819 best_len = cost;
2820 if (attrp)
2821 attrp[2] = -1;
2822 }
2823 continue;
2824 }
2825 /* ??? Could try to put zero extend into initial right shift,
2826 or even shift a bit left before the right shift. */
2827 /* Determine value of first part of left shift, to get to the
2828 zero extend cut-off point. */
2829 first = width - exact_log2 (lsb2) + right;
2830 if (first >= 0 && right + left - first >= 0)
2831 {
2832 cost = ext_shift_insns[right] + ext_shift_insns[first] + 1
2833 + ext_shift_insns[right + left - first];
2834 if (cost < best_cost)
2835 {
2836 best = 1;
2837 best_cost = cost;
2838 best_right = right;
2839 best_len = cost;
2840 if (attrp)
2841 attrp[2] = first;
2842 }
2843 }
2844 }
2845 }
2846 /* Try to use r0 AND pattern */
2847 for (i = 0; i <= 2; i++)
2848 {
2849 if (i > right)
2850 break;
2851 if (! CONST_OK_FOR_K08 (mask >> i))
2852 continue;
2853 cost = (i != 0) + 2 + ext_shift_insns[left + i];
2854 if (cost < best_cost)
2855 {
2856 best = 2;
2857 best_cost = cost;
2858 best_right = i;
2859 best_len = cost - 1;
2860 }
2861 }
2862 /* Try to use a scratch register to hold the AND operand. */
2863 can_ext = ((mask << left) & ((unsigned HOST_WIDE_INT) 3 << 30)) == 0;
2864 for (i = 0; i <= 2; i++)
2865 {
2866 if (i > right)
2867 break;
2868 cost = (i != 0) + (CONST_OK_FOR_I08 (mask >> i) ? 2 : 3)
2869 + (can_ext ? ext_shift_insns : shift_insns)[left + i];
2870 if (cost < best_cost)
2871 {
2872 best = 4 - can_ext;
2873 best_cost = cost;
2874 best_right = i;
2875 best_len = cost - 1 - ! CONST_OK_FOR_I08 (mask >> i);
2876 }
2877 }
2878
2879 if (attrp)
2880 {
2881 attrp[0] = best_right;
2882 attrp[1] = best_len;
2883 }
2884 return best;
2885 }
2886
2887 /* This is used in length attributes of the unnamed instructions
2888 corresponding to shl_and_kind return values of 1 and 2. */
2889 int
2890 shl_and_length (rtx insn)
2891 {
2892 rtx set_src, left_rtx, mask_rtx;
2893 int attributes[3];
2894
2895 set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
2896 left_rtx = XEXP (XEXP (set_src, 0), 1);
2897 mask_rtx = XEXP (set_src, 1);
2898 shl_and_kind (left_rtx, mask_rtx, attributes);
2899 return attributes[1];
2900 }
2901
2902 /* This is used in length attribute of the and_shl_scratch instruction. */
2903
2904 int
2905 shl_and_scr_length (rtx insn)
2906 {
2907 rtx set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
2908 int len = shift_insns[INTVAL (XEXP (set_src, 1))];
2909 rtx op = XEXP (set_src, 0);
2910 len += shift_insns[INTVAL (XEXP (op, 1))] + 1;
2911 op = XEXP (XEXP (op, 0), 0);
2912 return len + shift_insns[INTVAL (XEXP (op, 1))];
2913 }
2914
2915 /* Generate rtl for instructions for which shl_and_kind advised a particular
2916 method of generating them, i.e. returned zero. */
2917
2918 int
2919 gen_shl_and (rtx dest, rtx left_rtx, rtx mask_rtx, rtx source)
2920 {
2921 int attributes[3];
2922 unsigned HOST_WIDE_INT mask;
2923 int kind = shl_and_kind (left_rtx, mask_rtx, attributes);
2924 int right, total_shift;
2925 void (*shift_gen_fun) (int, rtx *) = gen_shifty_hi_op;
2926
2927 right = attributes[0];
2928 total_shift = INTVAL (left_rtx) + right;
2929 mask = (unsigned HOST_WIDE_INT) INTVAL (mask_rtx) >> total_shift;
2930 switch (kind)
2931 {
2932 default:
2933 return -1;
2934 case 1:
2935 {
2936 int first = attributes[2];
2937 rtx operands[3];
2938
2939 if (first < 0)
2940 {
2941 emit_insn ((mask << right) <= 0xff
2942 ? gen_zero_extendqisi2 (dest,
2943 gen_lowpart (QImode, source))
2944 : gen_zero_extendhisi2 (dest,
2945 gen_lowpart (HImode, source)));
2946 source = dest;
2947 }
2948 if (source != dest)
2949 emit_insn (gen_movsi (dest, source));
2950 operands[0] = dest;
2951 if (right)
2952 {
2953 operands[2] = GEN_INT (right);
2954 gen_shifty_hi_op (LSHIFTRT, operands);
2955 }
2956 if (first > 0)
2957 {
2958 operands[2] = GEN_INT (first);
2959 gen_shifty_hi_op (ASHIFT, operands);
2960 total_shift -= first;
2961 mask <<= first;
2962 }
2963 if (first >= 0)
2964 emit_insn (mask <= 0xff
2965 ? gen_zero_extendqisi2 (dest, gen_lowpart (QImode, dest))
2966 : gen_zero_extendhisi2 (dest, gen_lowpart (HImode, dest)));
2967 if (total_shift > 0)
2968 {
2969 operands[2] = GEN_INT (total_shift);
2970 gen_shifty_hi_op (ASHIFT, operands);
2971 }
2972 break;
2973 }
2974 case 4:
2975 shift_gen_fun = gen_shifty_op;
2976 case 3:
2977 /* If the topmost bit that matters is set, set the topmost bits
2978 that don't matter. This way, we might be able to get a shorter
2979 signed constant. */
2980 if (mask & ((HOST_WIDE_INT) 1 << (31 - total_shift)))
2981 mask |= (HOST_WIDE_INT) ~0 << (31 - total_shift);
2982 case 2:
2983 /* Don't expand fine-grained when combining, because that will
2984 make the pattern fail. */
2985 if (currently_expanding_to_rtl
2986 || reload_in_progress || reload_completed)
2987 {
2988 rtx operands[3];
2989
2990 /* Cases 3 and 4 should be handled by this split
2991 only while combining */
2992 gcc_assert (kind <= 2);
2993 if (right)
2994 {
2995 emit_insn (gen_lshrsi3 (dest, source, GEN_INT (right)));
2996 source = dest;
2997 }
2998 emit_insn (gen_andsi3 (dest, source, GEN_INT (mask)));
2999 if (total_shift)
3000 {
3001 operands[0] = dest;
3002 operands[1] = dest;
3003 operands[2] = GEN_INT (total_shift);
3004 shift_gen_fun (ASHIFT, operands);
3005 }
3006 break;
3007 }
3008 else
3009 {
3010 int neg = 0;
3011 if (kind != 4 && total_shift < 16)
3012 {
3013 neg = -ext_shift_amounts[total_shift][1];
3014 if (neg > 0)
3015 neg -= ext_shift_amounts[total_shift][2];
3016 else
3017 neg = 0;
3018 }
3019 emit_insn (gen_and_shl_scratch (dest, source,
3020 GEN_INT (right),
3021 GEN_INT (mask),
3022 GEN_INT (total_shift + neg),
3023 GEN_INT (neg)));
3024 emit_insn (gen_movsi (dest, dest));
3025 break;
3026 }
3027 }
3028 return 0;
3029 }
3030
3031 /* Try to find a good way to implement the combiner pattern
3032 [(set (match_operand:SI 0 "register_operand" "=r")
3033 (sign_extract:SI (ashift:SI (match_operand:SI 1 "register_operand" "r")
3034 (match_operand:SI 2 "const_int_operand" "n")
3035 (match_operand:SI 3 "const_int_operand" "n")
3036 (const_int 0)))
3037 (clobber (reg:SI T_REG))]
3038 LEFT_RTX is operand 2 in the above pattern, and SIZE_RTX is operand 3.
3039 return 0 for simple left / right shift combination.
3040 return 1 for left shift / 8 bit sign extend / left shift.
3041 return 2 for left shift / 16 bit sign extend / left shift.
3042 return 3 for left shift / 8 bit sign extend / shift / sign extend.
3043 return 4 for left shift / 16 bit sign extend / shift / sign extend.
3044 return 5 for left shift / 16 bit sign extend / right shift
3045 return 6 for < 8 bit sign extend / left shift.
3046 return 7 for < 8 bit sign extend / left shift / single right shift.
3047 If COSTP is nonzero, assign the calculated cost to *COSTP. */
3048
3049 int
3050 shl_sext_kind (rtx left_rtx, rtx size_rtx, int *costp)
3051 {
3052 int left, size, insize, ext;
3053 int cost = 0, best_cost;
3054 int kind;
3055
3056 left = INTVAL (left_rtx);
3057 size = INTVAL (size_rtx);
3058 insize = size - left;
3059 gcc_assert (insize > 0);
3060 /* Default to left / right shift. */
3061 kind = 0;
3062 best_cost = shift_insns[32 - insize] + ashiftrt_insns[32 - size];
3063 if (size <= 16)
3064 {
3065 /* 16 bit shift / sign extend / 16 bit shift */
3066 cost = shift_insns[16 - insize] + 1 + ashiftrt_insns[16 - size];
3067 /* If ashiftrt_insns[16 - size] is 8, this choice will be overridden
3068 below, by alternative 3 or something even better. */
3069 if (cost < best_cost)
3070 {
3071 kind = 5;
3072 best_cost = cost;
3073 }
3074 }
3075 /* Try a plain sign extend between two shifts. */
3076 for (ext = 16; ext >= insize; ext -= 8)
3077 {
3078 if (ext <= size)
3079 {
3080 cost = ext_shift_insns[ext - insize] + 1 + shift_insns[size - ext];
3081 if (cost < best_cost)
3082 {
3083 kind = ext / (unsigned) 8;
3084 best_cost = cost;
3085 }
3086 }
3087 /* Check if we can do a sloppy shift with a final signed shift
3088 restoring the sign. */
3089 if (EXT_SHIFT_SIGNED (size - ext))
3090 cost = ext_shift_insns[ext - insize] + ext_shift_insns[size - ext] + 1;
3091 /* If not, maybe it's still cheaper to do the second shift sloppy,
3092 and do a final sign extend? */
3093 else if (size <= 16)
3094 cost = ext_shift_insns[ext - insize] + 1
3095 + ext_shift_insns[size > ext ? size - ext : ext - size] + 1;
3096 else
3097 continue;
3098 if (cost < best_cost)
3099 {
3100 kind = ext / (unsigned) 8 + 2;
3101 best_cost = cost;
3102 }
3103 }
3104 /* Check if we can sign extend in r0 */
3105 if (insize < 8)
3106 {
3107 cost = 3 + shift_insns[left];
3108 if (cost < best_cost)
3109 {
3110 kind = 6;
3111 best_cost = cost;
3112 }
3113 /* Try the same with a final signed shift. */
3114 if (left < 31)
3115 {
3116 cost = 3 + ext_shift_insns[left + 1] + 1;
3117 if (cost < best_cost)
3118 {
3119 kind = 7;
3120 best_cost = cost;
3121 }
3122 }
3123 }
3124 if (TARGET_SH3)
3125 {
3126 /* Try to use a dynamic shift. */
3127 cost = shift_insns[32 - insize] + 1 + SH_DYNAMIC_SHIFT_COST;
3128 if (cost < best_cost)
3129 {
3130 kind = 0;
3131 best_cost = cost;
3132 }
3133 }
3134 if (costp)
3135 *costp = cost;
3136 return kind;
3137 }
3138
3139 /* Function to be used in the length attribute of the instructions
3140 implementing this pattern. */
3141
3142 int
3143 shl_sext_length (rtx insn)
3144 {
3145 rtx set_src, left_rtx, size_rtx;
3146 int cost;
3147
3148 set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
3149 left_rtx = XEXP (XEXP (set_src, 0), 1);
3150 size_rtx = XEXP (set_src, 1);
3151 shl_sext_kind (left_rtx, size_rtx, &cost);
3152 return cost;
3153 }
3154
3155 /* Generate rtl for this pattern */
3156
3157 int
3158 gen_shl_sext (rtx dest, rtx left_rtx, rtx size_rtx, rtx source)
3159 {
3160 int kind;
3161 int left, size, insize, cost;
3162 rtx operands[3];
3163
3164 kind = shl_sext_kind (left_rtx, size_rtx, &cost);
3165 left = INTVAL (left_rtx);
3166 size = INTVAL (size_rtx);
3167 insize = size - left;
3168 switch (kind)
3169 {
3170 case 1:
3171 case 2:
3172 case 3:
3173 case 4:
3174 {
3175 int ext = kind & 1 ? 8 : 16;
3176 int shift2 = size - ext;
3177
3178 /* Don't expand fine-grained when combining, because that will
3179 make the pattern fail. */
3180 if (! currently_expanding_to_rtl
3181 && ! reload_in_progress && ! reload_completed)
3182 {
3183 emit_insn (gen_shl_sext_ext (dest, source, left_rtx, size_rtx));
3184 emit_insn (gen_movsi (dest, source));
3185 break;
3186 }
3187 if (dest != source)
3188 emit_insn (gen_movsi (dest, source));
3189 operands[0] = dest;
3190 if (ext - insize)
3191 {
3192 operands[2] = GEN_INT (ext - insize);
3193 gen_shifty_hi_op (ASHIFT, operands);
3194 }
3195 emit_insn (kind & 1
3196 ? gen_extendqisi2 (dest, gen_lowpart (QImode, dest))
3197 : gen_extendhisi2 (dest, gen_lowpart (HImode, dest)));
3198 if (kind <= 2)
3199 {
3200 if (shift2)
3201 {
3202 operands[2] = GEN_INT (shift2);
3203 gen_shifty_op (ASHIFT, operands);
3204 }
3205 }
3206 else
3207 {
3208 if (shift2 > 0)
3209 {
3210 if (EXT_SHIFT_SIGNED (shift2))
3211 {
3212 operands[2] = GEN_INT (shift2 + 1);
3213 gen_shifty_op (ASHIFT, operands);
3214 operands[2] = const1_rtx;
3215 gen_shifty_op (ASHIFTRT, operands);
3216 break;
3217 }
3218 operands[2] = GEN_INT (shift2);
3219 gen_shifty_hi_op (ASHIFT, operands);
3220 }
3221 else if (shift2)
3222 {
3223 operands[2] = GEN_INT (-shift2);
3224 gen_shifty_hi_op (LSHIFTRT, operands);
3225 }
3226 emit_insn (size <= 8
3227 ? gen_extendqisi2 (dest, gen_lowpart (QImode, dest))
3228 : gen_extendhisi2 (dest, gen_lowpart (HImode, dest)));
3229 }
3230 break;
3231 }
3232 case 5:
3233 {
3234 int i = 16 - size;
3235 if (! currently_expanding_to_rtl
3236 && ! reload_in_progress && ! reload_completed)
3237 emit_insn (gen_shl_sext_ext (dest, source, left_rtx, size_rtx));
3238 else
3239 {
3240 operands[0] = dest;
3241 operands[2] = GEN_INT (16 - insize);
3242 gen_shifty_hi_op (ASHIFT, operands);
3243 emit_insn (gen_extendhisi2 (dest, gen_lowpart (HImode, dest)));
3244 }
3245 /* Don't use gen_ashrsi3 because it generates new pseudos. */
3246 while (--i >= 0)
3247 gen_ashift (ASHIFTRT, 1, dest);
3248 break;
3249 }
3250 case 6:
3251 case 7:
3252 /* Don't expand fine-grained when combining, because that will
3253 make the pattern fail. */
3254 if (! currently_expanding_to_rtl
3255 && ! reload_in_progress && ! reload_completed)
3256 {
3257 emit_insn (gen_shl_sext_ext (dest, source, left_rtx, size_rtx));
3258 emit_insn (gen_movsi (dest, source));
3259 break;
3260 }
3261 emit_insn (gen_andsi3 (dest, source, GEN_INT ((1 << insize) - 1)));
3262 emit_insn (gen_xorsi3 (dest, dest, GEN_INT (1 << (insize - 1))));
3263 emit_insn (gen_addsi3 (dest, dest, GEN_INT (-1 << (insize - 1))));
3264 operands[0] = dest;
3265 operands[2] = kind == 7 ? GEN_INT (left + 1) : left_rtx;
3266 gen_shifty_op (ASHIFT, operands);
3267 if (kind == 7)
3268 emit_insn (gen_ashrsi3_k (dest, dest, const1_rtx));
3269 break;
3270 default:
3271 return -1;
3272 }
3273 return 0;
3274 }
3275
3276 /* Prefix a symbol_ref name with "datalabel". */
3277
3278 rtx
3279 gen_datalabel_ref (rtx sym)
3280 {
3281 const char *str;
3282
3283 if (GET_CODE (sym) == LABEL_REF)
3284 return gen_rtx_CONST (GET_MODE (sym),
3285 gen_rtx_UNSPEC (GET_MODE (sym),
3286 gen_rtvec (1, sym),
3287 UNSPEC_DATALABEL));
3288
3289 gcc_assert (GET_CODE (sym) == SYMBOL_REF);
3290
3291 str = XSTR (sym, 0);
3292 /* Share all SYMBOL_REF strings with the same value - that is important
3293 for cse. */
3294 str = IDENTIFIER_POINTER (get_identifier (str));
3295 XSTR (sym, 0) = str;
3296
3297 return sym;
3298 }
3299
3300 \f
3301 static alloc_pool label_ref_list_pool;
3302
3303 typedef struct label_ref_list_d
3304 {
3305 rtx label;
3306 struct label_ref_list_d *next;
3307 } *label_ref_list_t;
3308
3309 /* The SH cannot load a large constant into a register, constants have to
3310 come from a pc relative load. The reference of a pc relative load
3311 instruction must be less than 1k in front of the instruction. This
3312 means that we often have to dump a constant inside a function, and
3313 generate code to branch around it.
3314
3315 It is important to minimize this, since the branches will slow things
3316 down and make things bigger.
3317
3318 Worst case code looks like:
3319
3320 mov.l L1,rn
3321 bra L2
3322 nop
3323 align
3324 L1: .long value
3325 L2:
3326 ..
3327
3328 mov.l L3,rn
3329 bra L4
3330 nop
3331 align
3332 L3: .long value
3333 L4:
3334 ..
3335
3336 We fix this by performing a scan before scheduling, which notices which
3337 instructions need to have their operands fetched from the constant table
3338 and builds the table.
3339
3340 The algorithm is:
3341
3342 scan, find an instruction which needs a pcrel move. Look forward, find the
3343 last barrier which is within MAX_COUNT bytes of the requirement.
3344 If there isn't one, make one. Process all the instructions between
3345 the find and the barrier.
3346
3347 In the above example, we can tell that L3 is within 1k of L1, so
3348 the first move can be shrunk from the 3 insn+constant sequence into
3349 just 1 insn, and the constant moved to L3 to make:
3350
3351 mov.l L1,rn
3352 ..
3353 mov.l L3,rn
3354 bra L4
3355 nop
3356 align
3357 L3:.long value
3358 L4:.long value
3359
3360 Then the second move becomes the target for the shortening process. */
3361
3362 typedef struct
3363 {
3364 rtx value; /* Value in table. */
3365 rtx label; /* Label of value. */
3366 label_ref_list_t wend; /* End of window. */
3367 enum machine_mode mode; /* Mode of value. */
3368
3369 /* True if this constant is accessed as part of a post-increment
3370 sequence. Note that HImode constants are never accessed in this way. */
3371 bool part_of_sequence_p;
3372 } pool_node;
3373
3374 /* The maximum number of constants that can fit into one pool, since
3375 constants in the range 0..510 are at least 2 bytes long, and in the
3376 range from there to 1018 at least 4 bytes. */
3377
3378 #define MAX_POOL_SIZE 372
3379 static pool_node pool_vector[MAX_POOL_SIZE];
3380 static int pool_size;
3381 static rtx pool_window_label;
3382 static int pool_window_last;
3383
3384 static int max_labelno_before_reorg;
3385
3386 /* ??? If we need a constant in HImode which is the truncated value of a
3387 constant we need in SImode, we could combine the two entries thus saving
3388 two bytes. Is this common enough to be worth the effort of implementing
3389 it? */
3390
3391 /* ??? This stuff should be done at the same time that we shorten branches.
3392 As it is now, we must assume that all branches are the maximum size, and
3393 this causes us to almost always output constant pools sooner than
3394 necessary. */
3395
3396 /* Add a constant to the pool and return its label. */
3397
3398 static rtx
3399 add_constant (rtx x, enum machine_mode mode, rtx last_value)
3400 {
3401 int i;
3402 rtx lab, new;
3403 label_ref_list_t ref, newref;
3404
3405 /* First see if we've already got it. */
3406 for (i = 0; i < pool_size; i++)
3407 {
3408 if (x->code == pool_vector[i].value->code
3409 && mode == pool_vector[i].mode)
3410 {
3411 if (x->code == CODE_LABEL)
3412 {
3413 if (XINT (x, 3) != XINT (pool_vector[i].value, 3))
3414 continue;
3415 }
3416 if (rtx_equal_p (x, pool_vector[i].value))
3417 {
3418 lab = new = 0;
3419 if (! last_value
3420 || ! i
3421 || ! rtx_equal_p (last_value, pool_vector[i-1].value))
3422 {
3423 new = gen_label_rtx ();
3424 LABEL_REFS (new) = pool_vector[i].label;
3425 pool_vector[i].label = lab = new;
3426 }
3427 if (lab && pool_window_label)
3428 {
3429 newref = (label_ref_list_t) pool_alloc (label_ref_list_pool);
3430 newref->label = pool_window_label;
3431 ref = pool_vector[pool_window_last].wend;
3432 newref->next = ref;
3433 pool_vector[pool_window_last].wend = newref;
3434 }
3435 if (new)
3436 pool_window_label = new;
3437 pool_window_last = i;
3438 return lab;
3439 }
3440 }
3441 }
3442
3443 /* Need a new one. */
3444 pool_vector[pool_size].value = x;
3445 if (last_value && rtx_equal_p (last_value, pool_vector[pool_size - 1].value))
3446 {
3447 lab = 0;
3448 pool_vector[pool_size - 1].part_of_sequence_p = true;
3449 }
3450 else
3451 lab = gen_label_rtx ();
3452 pool_vector[pool_size].mode = mode;
3453 pool_vector[pool_size].label = lab;
3454 pool_vector[pool_size].wend = NULL;
3455 pool_vector[pool_size].part_of_sequence_p = (lab == 0);
3456 if (lab && pool_window_label)
3457 {
3458 newref = (label_ref_list_t) pool_alloc (label_ref_list_pool);
3459 newref->label = pool_window_label;
3460 ref = pool_vector[pool_window_last].wend;
3461 newref->next = ref;
3462 pool_vector[pool_window_last].wend = newref;
3463 }
3464 if (lab)
3465 pool_window_label = lab;
3466 pool_window_last = pool_size;
3467 pool_size++;
3468 return lab;
3469 }
3470
3471 /* Output the literal table. START, if nonzero, is the first instruction
3472 this table is needed for, and also indicates that there is at least one
3473 casesi_worker_2 instruction; We have to emit the operand3 labels from
3474 these insns at a 4-byte aligned position. BARRIER is the barrier
3475 after which we are to place the table. */
3476
3477 static void
3478 dump_table (rtx start, rtx barrier)
3479 {
3480 rtx scan = barrier;
3481 int i;
3482 int need_align = 1;
3483 rtx lab;
3484 label_ref_list_t ref;
3485 int have_df = 0;
3486
3487 /* Do two passes, first time dump out the HI sized constants. */
3488
3489 for (i = 0; i < pool_size; i++)
3490 {
3491 pool_node *p = &pool_vector[i];
3492
3493 if (p->mode == HImode)
3494 {
3495 if (need_align)
3496 {
3497 scan = emit_insn_after (gen_align_2 (), scan);
3498 need_align = 0;
3499 }
3500 for (lab = p->label; lab; lab = LABEL_REFS (lab))
3501 scan = emit_label_after (lab, scan);
3502 scan = emit_insn_after (gen_consttable_2 (p->value, const0_rtx),
3503 scan);
3504 for (ref = p->wend; ref; ref = ref->next)
3505 {
3506 lab = ref->label;
3507 scan = emit_insn_after (gen_consttable_window_end (lab), scan);
3508 }
3509 }
3510 else if (p->mode == DFmode)
3511 have_df = 1;
3512 }
3513
3514 need_align = 1;
3515
3516 if (start)
3517 {
3518 scan = emit_insn_after (gen_align_4 (), scan);
3519 need_align = 0;
3520 for (; start != barrier; start = NEXT_INSN (start))
3521 if (GET_CODE (start) == INSN
3522 && recog_memoized (start) == CODE_FOR_casesi_worker_2)
3523 {
3524 rtx src = SET_SRC (XVECEXP (PATTERN (start), 0, 0));
3525 rtx lab = XEXP (XVECEXP (src, 0, 3), 0);
3526
3527 scan = emit_label_after (lab, scan);
3528 }
3529 }
3530 if (TARGET_FMOVD && TARGET_ALIGN_DOUBLE && have_df)
3531 {
3532 rtx align_insn = NULL_RTX;
3533
3534 scan = emit_label_after (gen_label_rtx (), scan);
3535 scan = emit_insn_after (gen_align_log (GEN_INT (3)), scan);
3536 need_align = 0;
3537
3538 for (i = 0; i < pool_size; i++)
3539 {
3540 pool_node *p = &pool_vector[i];
3541
3542 switch (p->mode)
3543 {
3544 case HImode:
3545 break;
3546 case SImode:
3547 case SFmode:
3548 if (align_insn && !p->part_of_sequence_p)
3549 {
3550 for (lab = p->label; lab; lab = LABEL_REFS (lab))
3551 emit_label_before (lab, align_insn);
3552 emit_insn_before (gen_consttable_4 (p->value, const0_rtx),
3553 align_insn);
3554 for (ref = p->wend; ref; ref = ref->next)
3555 {
3556 lab = ref->label;
3557 emit_insn_before (gen_consttable_window_end (lab),
3558 align_insn);
3559 }
3560 delete_insn (align_insn);
3561 align_insn = NULL_RTX;
3562 continue;
3563 }
3564 else
3565 {
3566 for (lab = p->label; lab; lab = LABEL_REFS (lab))
3567 scan = emit_label_after (lab, scan);
3568 scan = emit_insn_after (gen_consttable_4 (p->value,
3569 const0_rtx), scan);
3570 need_align = ! need_align;
3571 }
3572 break;
3573 case DFmode:
3574 if (need_align)
3575 {
3576 scan = emit_insn_after (gen_align_log (GEN_INT (3)), scan);
3577 align_insn = scan;
3578 need_align = 0;
3579 }
3580 case DImode:
3581 for (lab = p->label; lab; lab = LABEL_REFS (lab))
3582 scan = emit_label_after (lab, scan);
3583 scan = emit_insn_after (gen_consttable_8 (p->value, const0_rtx),
3584 scan);
3585 break;
3586 default:
3587 gcc_unreachable ();
3588 }
3589
3590 if (p->mode != HImode)
3591 {
3592 for (ref = p->wend; ref; ref = ref->next)
3593 {
3594 lab = ref->label;
3595 scan = emit_insn_after (gen_consttable_window_end (lab),
3596 scan);
3597 }
3598 }
3599 }
3600
3601 pool_size = 0;
3602 }
3603
3604 for (i = 0; i < pool_size; i++)
3605 {
3606 pool_node *p = &pool_vector[i];
3607
3608 switch (p->mode)
3609 {
3610 case HImode:
3611 break;
3612 case SImode:
3613 case SFmode:
3614 if (need_align)
3615 {
3616 need_align = 0;
3617 scan = emit_label_after (gen_label_rtx (), scan);
3618 scan = emit_insn_after (gen_align_4 (), scan);
3619 }
3620 for (lab = p->label; lab; lab = LABEL_REFS (lab))
3621 scan = emit_label_after (lab, scan);
3622 scan = emit_insn_after (gen_consttable_4 (p->value, const0_rtx),
3623 scan);
3624 break;
3625 case DFmode:
3626 case DImode:
3627 if (need_align)
3628 {
3629 need_align = 0;
3630 scan = emit_label_after (gen_label_rtx (), scan);
3631 scan = emit_insn_after (gen_align_4 (), scan);
3632 }
3633 for (lab = p->label; lab; lab = LABEL_REFS (lab))
3634 scan = emit_label_after (lab, scan);
3635 scan = emit_insn_after (gen_consttable_8 (p->value, const0_rtx),
3636 scan);
3637 break;
3638 default:
3639 gcc_unreachable ();
3640 }
3641
3642 if (p->mode != HImode)
3643 {
3644 for (ref = p->wend; ref; ref = ref->next)
3645 {
3646 lab = ref->label;
3647 scan = emit_insn_after (gen_consttable_window_end (lab), scan);
3648 }
3649 }
3650 }
3651
3652 scan = emit_insn_after (gen_consttable_end (), scan);
3653 scan = emit_barrier_after (scan);
3654 pool_size = 0;
3655 pool_window_label = NULL_RTX;
3656 pool_window_last = 0;
3657 }
3658
3659 /* Return nonzero if constant would be an ok source for a
3660 mov.w instead of a mov.l. */
3661
3662 static int
3663 hi_const (rtx src)
3664 {
3665 return (GET_CODE (src) == CONST_INT
3666 && INTVAL (src) >= -32768
3667 && INTVAL (src) <= 32767);
3668 }
3669
3670 #define MOVA_LABELREF(mova) XVECEXP (SET_SRC (PATTERN (mova)), 0, 0)
3671
3672 /* Nonzero if the insn is a move instruction which needs to be fixed. */
3673
3674 /* ??? For a DImode/DFmode moves, we don't need to fix it if each half of the
3675 CONST_DOUBLE input value is CONST_OK_FOR_I08. For a SFmode move, we don't
3676 need to fix it if the input value is CONST_OK_FOR_I08. */
3677
3678 static int
3679 broken_move (rtx insn)
3680 {
3681 if (GET_CODE (insn) == INSN)
3682 {
3683 rtx pat = PATTERN (insn);
3684 if (GET_CODE (pat) == PARALLEL)
3685 pat = XVECEXP (pat, 0, 0);
3686 if (GET_CODE (pat) == SET
3687 /* We can load any 8-bit value if we don't care what the high
3688 order bits end up as. */
3689 && GET_MODE (SET_DEST (pat)) != QImode
3690 && (CONSTANT_P (SET_SRC (pat))
3691 /* Match mova_const. */
3692 || (GET_CODE (SET_SRC (pat)) == UNSPEC
3693 && XINT (SET_SRC (pat), 1) == UNSPEC_MOVA
3694 && GET_CODE (XVECEXP (SET_SRC (pat), 0, 0)) == CONST))
3695 && ! (TARGET_SH2E
3696 && GET_CODE (SET_SRC (pat)) == CONST_DOUBLE
3697 && (fp_zero_operand (SET_SRC (pat))
3698 || fp_one_operand (SET_SRC (pat)))
3699 /* ??? If this is a -m4 or -m4-single compilation, in general
3700 we don't know the current setting of fpscr, so disable fldi.
3701 There is an exception if this was a register-register move
3702 before reload - and hence it was ascertained that we have
3703 single precision setting - and in a post-reload optimization
3704 we changed this to do a constant load. In that case
3705 we don't have an r0 clobber, hence we must use fldi. */
3706 && (! TARGET_SH4 || TARGET_FMOVD
3707 || (GET_CODE (XEXP (XVECEXP (PATTERN (insn), 0, 2), 0))
3708 == SCRATCH))
3709 && GET_CODE (SET_DEST (pat)) == REG
3710 && FP_REGISTER_P (REGNO (SET_DEST (pat))))
3711 && ! (TARGET_SH2A
3712 && GET_MODE (SET_DEST (pat)) == SImode
3713 && satisfies_constraint_I20 (SET_SRC (pat)))
3714 && ! satisfies_constraint_I08 (SET_SRC (pat)))
3715 return 1;
3716 }
3717
3718 return 0;
3719 }
3720
3721 static int
3722 mova_p (rtx insn)
3723 {
3724 return (GET_CODE (insn) == INSN
3725 && GET_CODE (PATTERN (insn)) == SET
3726 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
3727 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_MOVA
3728 /* Don't match mova_const. */
3729 && GET_CODE (MOVA_LABELREF (insn)) == LABEL_REF);
3730 }
3731
3732 /* Fix up a mova from a switch that went out of range. */
3733 static void
3734 fixup_mova (rtx mova)
3735 {
3736 PUT_MODE (XEXP (MOVA_LABELREF (mova), 0), QImode);
3737 if (! flag_pic)
3738 {
3739 SET_SRC (PATTERN (mova)) = MOVA_LABELREF (mova);
3740 INSN_CODE (mova) = -1;
3741 }
3742 else
3743 {
3744 rtx worker = mova;
3745 rtx lab = gen_label_rtx ();
3746 rtx wpat, wpat0, wpat1, wsrc, diff;
3747
3748 do
3749 {
3750 worker = NEXT_INSN (worker);
3751 gcc_assert (worker
3752 && GET_CODE (worker) != CODE_LABEL
3753 && GET_CODE (worker) != JUMP_INSN);
3754 } while (GET_CODE (worker) == NOTE
3755 || recog_memoized (worker) != CODE_FOR_casesi_worker_1);
3756 wpat = PATTERN (worker);
3757 wpat0 = XVECEXP (wpat, 0, 0);
3758 wpat1 = XVECEXP (wpat, 0, 1);
3759 wsrc = SET_SRC (wpat0);
3760 PATTERN (worker) = (gen_casesi_worker_2
3761 (SET_DEST (wpat0), XVECEXP (wsrc, 0, 1),
3762 XEXP (XVECEXP (wsrc, 0, 2), 0), lab,
3763 XEXP (wpat1, 0)));
3764 INSN_CODE (worker) = -1;
3765 diff = gen_rtx_MINUS (Pmode, XVECEXP (SET_SRC (PATTERN (mova)), 0, 0),
3766 gen_rtx_LABEL_REF (Pmode, lab));
3767 diff = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, diff), UNSPEC_PIC);
3768 SET_SRC (PATTERN (mova)) = gen_rtx_CONST (Pmode, diff);
3769 INSN_CODE (mova) = -1;
3770 }
3771 }
3772
3773 /* NEW_MOVA is a mova we've just encountered while scanning forward. Update
3774 *num_mova, and check if the new mova is not nested within the first one.
3775 return 0 if *first_mova was replaced, 1 if new_mova was replaced,
3776 2 if new_mova has been assigned to *first_mova, -1 otherwise.. */
3777 static int
3778 untangle_mova (int *num_mova, rtx *first_mova, rtx new_mova)
3779 {
3780 int n_addr = 0; /* Initialization to shut up spurious warning. */
3781 int f_target, n_target = 0; /* Likewise. */
3782
3783 if (optimize)
3784 {
3785 n_addr = INSN_ADDRESSES (INSN_UID (new_mova));
3786 n_target = INSN_ADDRESSES (INSN_UID (XEXP (MOVA_LABELREF (new_mova), 0)));
3787 if (n_addr > n_target || n_addr + 1022 < n_target)
3788 {
3789 /* Change the mova into a load.
3790 broken_move will then return true for it. */
3791 fixup_mova (new_mova);
3792 return 1;
3793 }
3794 }
3795 if (!(*num_mova)++)
3796 {
3797 *first_mova = new_mova;
3798 return 2;
3799 }
3800 if (!optimize
3801 || ((f_target
3802 = INSN_ADDRESSES (INSN_UID (XEXP (MOVA_LABELREF (*first_mova), 0))))
3803 >= n_target))
3804 return -1;
3805
3806 (*num_mova)--;
3807 if (f_target - INSN_ADDRESSES (INSN_UID (*first_mova))
3808 > n_target - n_addr)
3809 {
3810 fixup_mova (*first_mova);
3811 return 0;
3812 }
3813 else
3814 {
3815 fixup_mova (new_mova);
3816 return 1;
3817 }
3818 }
3819
3820 /* Find the last barrier from insn FROM which is close enough to hold the
3821 constant pool. If we can't find one, then create one near the end of
3822 the range. */
3823
3824 static rtx
3825 find_barrier (int num_mova, rtx mova, rtx from)
3826 {
3827 int count_si = 0;
3828 int count_hi = 0;
3829 int found_hi = 0;
3830 int found_si = 0;
3831 int found_di = 0;
3832 int hi_align = 2;
3833 int si_align = 2;
3834 int leading_mova = num_mova;
3835 rtx barrier_before_mova = 0, found_barrier = 0, good_barrier = 0;
3836 int si_limit;
3837 int hi_limit;
3838
3839 /* For HImode: range is 510, add 4 because pc counts from address of
3840 second instruction after this one, subtract 2 for the jump instruction
3841 that we may need to emit before the table, subtract 2 for the instruction
3842 that fills the jump delay slot (in very rare cases, reorg will take an
3843 instruction from after the constant pool or will leave the delay slot
3844 empty). This gives 510.
3845 For SImode: range is 1020, add 4 because pc counts from address of
3846 second instruction after this one, subtract 2 in case pc is 2 byte
3847 aligned, subtract 2 for the jump instruction that we may need to emit
3848 before the table, subtract 2 for the instruction that fills the jump
3849 delay slot. This gives 1018. */
3850
3851 /* The branch will always be shortened now that the reference address for
3852 forward branches is the successor address, thus we need no longer make
3853 adjustments to the [sh]i_limit for -O0. */
3854
3855 si_limit = 1018;
3856 hi_limit = 510;
3857
3858 while (from && count_si < si_limit && count_hi < hi_limit)
3859 {
3860 int inc = get_attr_length (from);
3861 int new_align = 1;
3862
3863 /* If this is a label that existed at the time of the compute_alignments
3864 call, determine the alignment. N.B. When find_barrier recurses for
3865 an out-of-reach mova, we might see labels at the start of previously
3866 inserted constant tables. */
3867 if (GET_CODE (from) == CODE_LABEL
3868 && CODE_LABEL_NUMBER (from) <= max_labelno_before_reorg)
3869 {
3870 if (optimize)
3871 new_align = 1 << label_to_alignment (from);
3872 else if (GET_CODE (prev_nonnote_insn (from)) == BARRIER)
3873 new_align = 1 << barrier_align (from);
3874 else
3875 new_align = 1;
3876 inc = 0;
3877 }
3878 /* In case we are scanning a constant table because of recursion, check
3879 for explicit alignments. If the table is long, we might be forced
3880 to emit the new table in front of it; the length of the alignment
3881 might be the last straw. */
3882 else if (GET_CODE (from) == INSN
3883 && GET_CODE (PATTERN (from)) == UNSPEC_VOLATILE
3884 && XINT (PATTERN (from), 1) == UNSPECV_ALIGN)
3885 new_align = INTVAL (XVECEXP (PATTERN (from), 0, 0));
3886 /* When we find the end of a constant table, paste the new constant
3887 at the end. That is better than putting it in front because
3888 this way, we don't need extra alignment for adding a 4-byte-aligned
3889 mov(a) label to a 2/4 or 8/4 byte aligned table. */
3890 else if (GET_CODE (from) == INSN
3891 && GET_CODE (PATTERN (from)) == UNSPEC_VOLATILE
3892 && XINT (PATTERN (from), 1) == UNSPECV_CONST_END)
3893 return from;
3894
3895 if (GET_CODE (from) == BARRIER)
3896 {
3897
3898 found_barrier = from;
3899
3900 /* If we are at the end of the function, or in front of an alignment
3901 instruction, we need not insert an extra alignment. We prefer
3902 this kind of barrier. */
3903 if (barrier_align (from) > 2)
3904 good_barrier = from;
3905 }
3906
3907 if (broken_move (from))
3908 {
3909 rtx pat, src, dst;
3910 enum machine_mode mode;
3911
3912 pat = PATTERN (from);
3913 if (GET_CODE (pat) == PARALLEL)
3914 pat = XVECEXP (pat, 0, 0);
3915 src = SET_SRC (pat);
3916 dst = SET_DEST (pat);
3917 mode = GET_MODE (dst);
3918
3919 /* We must explicitly check the mode, because sometimes the
3920 front end will generate code to load unsigned constants into
3921 HImode targets without properly sign extending them. */
3922 if (mode == HImode
3923 || (mode == SImode && hi_const (src) && REGNO (dst) != FPUL_REG))
3924 {
3925 found_hi += 2;
3926 /* We put the short constants before the long constants, so
3927 we must count the length of short constants in the range
3928 for the long constants. */
3929 /* ??? This isn't optimal, but is easy to do. */
3930 si_limit -= 2;
3931 }
3932 else
3933 {
3934 /* We dump DF/DI constants before SF/SI ones, because
3935 the limit is the same, but the alignment requirements
3936 are higher. We may waste up to 4 additional bytes
3937 for alignment, and the DF/DI constant may have
3938 another SF/SI constant placed before it. */
3939 if (TARGET_SHCOMPACT
3940 && ! found_di
3941 && (mode == DFmode || mode == DImode))
3942 {
3943 found_di = 1;
3944 si_limit -= 8;
3945 }
3946 while (si_align > 2 && found_si + si_align - 2 > count_si)
3947 si_align >>= 1;
3948 if (found_si > count_si)
3949 count_si = found_si;
3950 found_si += GET_MODE_SIZE (mode);
3951 if (num_mova)
3952 si_limit -= GET_MODE_SIZE (mode);
3953 }
3954 }
3955
3956 if (mova_p (from))
3957 {
3958 switch (untangle_mova (&num_mova, &mova, from))
3959 {
3960 case 0: return find_barrier (0, 0, mova);
3961 case 2:
3962 {
3963 leading_mova = 0;
3964 barrier_before_mova
3965 = good_barrier ? good_barrier : found_barrier;
3966 }
3967 default: break;
3968 }
3969 if (found_si > count_si)
3970 count_si = found_si;
3971 }
3972 else if (GET_CODE (from) == JUMP_INSN
3973 && (GET_CODE (PATTERN (from)) == ADDR_VEC
3974 || GET_CODE (PATTERN (from)) == ADDR_DIFF_VEC))
3975 {
3976 if ((num_mova > 1 && GET_MODE (prev_nonnote_insn (from)) == VOIDmode)
3977 || (num_mova
3978 && (prev_nonnote_insn (from)
3979 == XEXP (MOVA_LABELREF (mova), 0))))
3980 num_mova--;
3981 if (barrier_align (next_real_insn (from)) == align_jumps_log)
3982 {
3983 /* We have just passed the barrier in front of the
3984 ADDR_DIFF_VEC, which is stored in found_barrier. Since
3985 the ADDR_DIFF_VEC is accessed as data, just like our pool
3986 constants, this is a good opportunity to accommodate what
3987 we have gathered so far.
3988 If we waited any longer, we could end up at a barrier in
3989 front of code, which gives worse cache usage for separated
3990 instruction / data caches. */
3991 good_barrier = found_barrier;
3992 break;
3993 }
3994 else
3995 {
3996 rtx body = PATTERN (from);
3997 inc = XVECLEN (body, 1) * GET_MODE_SIZE (GET_MODE (body));
3998 }
3999 }
4000 /* For the SH1, we generate alignments even after jumps-around-jumps. */
4001 else if (GET_CODE (from) == JUMP_INSN
4002 && ! TARGET_SH2
4003 && ! TARGET_SMALLCODE)
4004 new_align = 4;
4005
4006 if (found_si)
4007 {
4008 count_si += inc;
4009 if (new_align > si_align)
4010 {
4011 si_limit -= (count_si - 1) & (new_align - si_align);
4012 si_align = new_align;
4013 }
4014 count_si = (count_si + new_align - 1) & -new_align;
4015 }
4016 if (found_hi)
4017 {
4018 count_hi += inc;
4019 if (new_align > hi_align)
4020 {
4021 hi_limit -= (count_hi - 1) & (new_align - hi_align);
4022 hi_align = new_align;
4023 }
4024 count_hi = (count_hi + new_align - 1) & -new_align;
4025 }
4026 from = NEXT_INSN (from);
4027 }
4028
4029 if (num_mova)
4030 {
4031 if (leading_mova)
4032 {
4033 /* Try as we might, the leading mova is out of range. Change
4034 it into a load (which will become a pcload) and retry. */
4035 fixup_mova (mova);
4036 return find_barrier (0, 0, mova);
4037 }
4038 else
4039 {
4040 /* Insert the constant pool table before the mova instruction,
4041 to prevent the mova label reference from going out of range. */
4042 from = mova;
4043 good_barrier = found_barrier = barrier_before_mova;
4044 }
4045 }
4046
4047 if (found_barrier)
4048 {
4049 if (good_barrier && next_real_insn (found_barrier))
4050 found_barrier = good_barrier;
4051 }
4052 else
4053 {
4054 /* We didn't find a barrier in time to dump our stuff,
4055 so we'll make one. */
4056 rtx label = gen_label_rtx ();
4057
4058 /* If we exceeded the range, then we must back up over the last
4059 instruction we looked at. Otherwise, we just need to undo the
4060 NEXT_INSN at the end of the loop. */
4061 if (count_hi > hi_limit || count_si > si_limit)
4062 from = PREV_INSN (PREV_INSN (from));
4063 else
4064 from = PREV_INSN (from);
4065
4066 /* Walk back to be just before any jump or label.
4067 Putting it before a label reduces the number of times the branch
4068 around the constant pool table will be hit. Putting it before
4069 a jump makes it more likely that the bra delay slot will be
4070 filled. */
4071 while (GET_CODE (from) == JUMP_INSN || GET_CODE (from) == NOTE
4072 || GET_CODE (from) == CODE_LABEL)
4073 from = PREV_INSN (from);
4074
4075 from = emit_jump_insn_after (gen_jump (label), from);
4076 JUMP_LABEL (from) = label;
4077 LABEL_NUSES (label) = 1;
4078 found_barrier = emit_barrier_after (from);
4079 emit_label_after (label, found_barrier);
4080 }
4081
4082 return found_barrier;
4083 }
4084
4085 /* If the instruction INSN is implemented by a special function, and we can
4086 positively find the register that is used to call the sfunc, and this
4087 register is not used anywhere else in this instruction - except as the
4088 destination of a set, return this register; else, return 0. */
4089 rtx
4090 sfunc_uses_reg (rtx insn)
4091 {
4092 int i;
4093 rtx pattern, part, reg_part, reg;
4094
4095 if (GET_CODE (insn) != INSN)
4096 return 0;
4097 pattern = PATTERN (insn);
4098 if (GET_CODE (pattern) != PARALLEL || get_attr_type (insn) != TYPE_SFUNC)
4099 return 0;
4100
4101 for (reg_part = 0, i = XVECLEN (pattern, 0) - 1; i >= 1; i--)
4102 {
4103 part = XVECEXP (pattern, 0, i);
4104 if (GET_CODE (part) == USE && GET_MODE (XEXP (part, 0)) == SImode)
4105 reg_part = part;
4106 }
4107 if (! reg_part)
4108 return 0;
4109 reg = XEXP (reg_part, 0);
4110 for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
4111 {
4112 part = XVECEXP (pattern, 0, i);
4113 if (part == reg_part || GET_CODE (part) == CLOBBER)
4114 continue;
4115 if (reg_mentioned_p (reg, ((GET_CODE (part) == SET
4116 && GET_CODE (SET_DEST (part)) == REG)
4117 ? SET_SRC (part) : part)))
4118 return 0;
4119 }
4120 return reg;
4121 }
4122
4123 /* See if the only way in which INSN uses REG is by calling it, or by
4124 setting it while calling it. Set *SET to a SET rtx if the register
4125 is set by INSN. */
4126
4127 static int
4128 noncall_uses_reg (rtx reg, rtx insn, rtx *set)
4129 {
4130 rtx pattern, reg2;
4131
4132 *set = NULL_RTX;
4133
4134 reg2 = sfunc_uses_reg (insn);
4135 if (reg2 && REGNO (reg2) == REGNO (reg))
4136 {
4137 pattern = single_set (insn);
4138 if (pattern
4139 && GET_CODE (SET_DEST (pattern)) == REG
4140 && REGNO (reg) == REGNO (SET_DEST (pattern)))
4141 *set = pattern;
4142 return 0;
4143 }
4144 if (GET_CODE (insn) != CALL_INSN)
4145 {
4146 /* We don't use rtx_equal_p because we don't care if the mode is
4147 different. */
4148 pattern = single_set (insn);
4149 if (pattern
4150 && GET_CODE (SET_DEST (pattern)) == REG
4151 && REGNO (reg) == REGNO (SET_DEST (pattern)))
4152 {
4153 rtx par, part;
4154 int i;
4155
4156 *set = pattern;
4157 par = PATTERN (insn);
4158 if (GET_CODE (par) == PARALLEL)
4159 for (i = XVECLEN (par, 0) - 1; i >= 0; i--)
4160 {
4161 part = XVECEXP (par, 0, i);
4162 if (GET_CODE (part) != SET && reg_mentioned_p (reg, part))
4163 return 1;
4164 }
4165 return reg_mentioned_p (reg, SET_SRC (pattern));
4166 }
4167
4168 return 1;
4169 }
4170
4171 pattern = PATTERN (insn);
4172
4173 if (GET_CODE (pattern) == PARALLEL)
4174 {
4175 int i;
4176
4177 for (i = XVECLEN (pattern, 0) - 1; i >= 1; i--)
4178 if (reg_mentioned_p (reg, XVECEXP (pattern, 0, i)))
4179 return 1;
4180 pattern = XVECEXP (pattern, 0, 0);
4181 }
4182
4183 if (GET_CODE (pattern) == SET)
4184 {
4185 if (reg_mentioned_p (reg, SET_DEST (pattern)))
4186 {
4187 /* We don't use rtx_equal_p, because we don't care if the
4188 mode is different. */
4189 if (GET_CODE (SET_DEST (pattern)) != REG
4190 || REGNO (reg) != REGNO (SET_DEST (pattern)))
4191 return 1;
4192
4193 *set = pattern;
4194 }
4195
4196 pattern = SET_SRC (pattern);
4197 }
4198
4199 if (GET_CODE (pattern) != CALL
4200 || GET_CODE (XEXP (pattern, 0)) != MEM
4201 || ! rtx_equal_p (reg, XEXP (XEXP (pattern, 0), 0)))
4202 return 1;
4203
4204 return 0;
4205 }
4206
4207 /* Given a X, a pattern of an insn or a part of it, return a mask of used
4208 general registers. Bits 0..15 mean that the respective registers
4209 are used as inputs in the instruction. Bits 16..31 mean that the
4210 registers 0..15, respectively, are used as outputs, or are clobbered.
4211 IS_DEST should be set to 16 if X is the destination of a SET, else to 0. */
4212 int
4213 regs_used (rtx x, int is_dest)
4214 {
4215 enum rtx_code code;
4216 const char *fmt;
4217 int i, used = 0;
4218
4219 if (! x)
4220 return used;
4221 code = GET_CODE (x);
4222 switch (code)
4223 {
4224 case REG:
4225 if (REGNO (x) < 16)
4226 return (((1 << HARD_REGNO_NREGS (0, GET_MODE (x))) - 1)
4227 << (REGNO (x) + is_dest));
4228 return 0;
4229 case SUBREG:
4230 {
4231 rtx y = SUBREG_REG (x);
4232
4233 if (GET_CODE (y) != REG)
4234 break;
4235 if (REGNO (y) < 16)
4236 return (((1 << HARD_REGNO_NREGS (0, GET_MODE (x))) - 1)
4237 << (REGNO (y) +
4238 subreg_regno_offset (REGNO (y),
4239 GET_MODE (y),
4240 SUBREG_BYTE (x),
4241 GET_MODE (x)) + is_dest));
4242 return 0;
4243 }
4244 case SET:
4245 return regs_used (SET_SRC (x), 0) | regs_used (SET_DEST (x), 16);
4246 case RETURN:
4247 /* If there was a return value, it must have been indicated with USE. */
4248 return 0x00ffff00;
4249 case CLOBBER:
4250 is_dest = 1;
4251 break;
4252 case MEM:
4253 is_dest = 0;
4254 break;
4255 case CALL:
4256 used |= 0x00ff00f0;
4257 break;
4258 default:
4259 break;
4260 }
4261
4262 fmt = GET_RTX_FORMAT (code);
4263
4264 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4265 {
4266 if (fmt[i] == 'E')
4267 {
4268 register int j;
4269 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4270 used |= regs_used (XVECEXP (x, i, j), is_dest);
4271 }
4272 else if (fmt[i] == 'e')
4273 used |= regs_used (XEXP (x, i), is_dest);
4274 }
4275 return used;
4276 }
4277
4278 /* Create an instruction that prevents redirection of a conditional branch
4279 to the destination of the JUMP with address ADDR.
4280 If the branch needs to be implemented as an indirect jump, try to find
4281 a scratch register for it.
4282 If NEED_BLOCK is 0, don't do anything unless we need a scratch register.
4283 If any preceding insn that doesn't fit into a delay slot is good enough,
4284 pass 1. Pass 2 if a definite blocking insn is needed.
4285 -1 is used internally to avoid deep recursion.
4286 If a blocking instruction is made or recognized, return it. */
4287
4288 static rtx
4289 gen_block_redirect (rtx jump, int addr, int need_block)
4290 {
4291 int dead = 0;
4292 rtx prev = prev_nonnote_insn (jump);
4293 rtx dest;
4294
4295 /* First, check if we already have an instruction that satisfies our need. */
4296 if (prev && GET_CODE (prev) == INSN && ! INSN_DELETED_P (prev))
4297 {
4298 if (INSN_CODE (prev) == CODE_FOR_indirect_jump_scratch)
4299 return prev;
4300 if (GET_CODE (PATTERN (prev)) == USE
4301 || GET_CODE (PATTERN (prev)) == CLOBBER
4302 || get_attr_in_delay_slot (prev) == IN_DELAY_SLOT_YES)
4303 prev = jump;
4304 else if ((need_block &= ~1) < 0)
4305 return prev;
4306 else if (recog_memoized (prev) == CODE_FOR_block_branch_redirect)
4307 need_block = 0;
4308 }
4309 if (GET_CODE (PATTERN (jump)) == RETURN)
4310 {
4311 if (! need_block)
4312 return prev;
4313 /* Reorg even does nasty things with return insns that cause branches
4314 to go out of range - see find_end_label and callers. */
4315 return emit_insn_before (gen_block_branch_redirect (const0_rtx) , jump);
4316 }
4317 /* We can't use JUMP_LABEL here because it might be undefined
4318 when not optimizing. */
4319 dest = XEXP (SET_SRC (PATTERN (jump)), 0);
4320 /* If the branch is out of range, try to find a scratch register for it. */
4321 if (optimize
4322 && (INSN_ADDRESSES (INSN_UID (dest)) - addr + (unsigned) 4092
4323 > 4092 + 4098))
4324 {
4325 rtx scan;
4326 /* Don't look for the stack pointer as a scratch register,
4327 it would cause trouble if an interrupt occurred. */
4328 unsigned try = 0x7fff, used;
4329 int jump_left = flag_expensive_optimizations + 1;
4330
4331 /* It is likely that the most recent eligible instruction is wanted for
4332 the delay slot. Therefore, find out which registers it uses, and
4333 try to avoid using them. */
4334
4335 for (scan = jump; (scan = PREV_INSN (scan)); )
4336 {
4337 enum rtx_code code;
4338
4339 if (INSN_DELETED_P (scan))
4340 continue;
4341 code = GET_CODE (scan);
4342 if (code == CODE_LABEL || code == JUMP_INSN)
4343 break;
4344 if (code == INSN
4345 && GET_CODE (PATTERN (scan)) != USE
4346 && GET_CODE (PATTERN (scan)) != CLOBBER
4347 && get_attr_in_delay_slot (scan) == IN_DELAY_SLOT_YES)
4348 {
4349 try &= ~regs_used (PATTERN (scan), 0);
4350 break;
4351 }
4352 }
4353 for (used = dead = 0, scan = JUMP_LABEL (jump);
4354 (scan = NEXT_INSN (scan)); )
4355 {
4356 enum rtx_code code;
4357
4358 if (INSN_DELETED_P (scan))
4359 continue;
4360 code = GET_CODE (scan);
4361 if (INSN_P (scan))
4362 {
4363 used |= regs_used (PATTERN (scan), 0);
4364 if (code == CALL_INSN)
4365 used |= regs_used (CALL_INSN_FUNCTION_USAGE (scan), 0);
4366 dead |= (used >> 16) & ~used;
4367 if (dead & try)
4368 {
4369 dead &= try;
4370 break;
4371 }
4372 if (code == JUMP_INSN)
4373 {
4374 if (jump_left-- && simplejump_p (scan))
4375 scan = JUMP_LABEL (scan);
4376 else
4377 break;
4378 }
4379 }
4380 }
4381 /* Mask out the stack pointer again, in case it was
4382 the only 'free' register we have found. */
4383 dead &= 0x7fff;
4384 }
4385 /* If the immediate destination is still in range, check for possible
4386 threading with a jump beyond the delay slot insn.
4387 Don't check if we are called recursively; the jump has been or will be
4388 checked in a different invocation then. */
4389
4390 else if (optimize && need_block >= 0)
4391 {
4392 rtx next = next_active_insn (next_active_insn (dest));
4393 if (next && GET_CODE (next) == JUMP_INSN
4394 && GET_CODE (PATTERN (next)) == SET
4395 && recog_memoized (next) == CODE_FOR_jump_compact)
4396 {
4397 dest = JUMP_LABEL (next);
4398 if (dest
4399 && (INSN_ADDRESSES (INSN_UID (dest)) - addr + (unsigned) 4092
4400 > 4092 + 4098))
4401 gen_block_redirect (next, INSN_ADDRESSES (INSN_UID (next)), -1);
4402 }
4403 }
4404
4405 if (dead)
4406 {
4407 rtx reg = gen_rtx_REG (SImode, exact_log2 (dead & -dead));
4408
4409 /* It would be nice if we could convert the jump into an indirect
4410 jump / far branch right now, and thus exposing all constituent
4411 instructions to further optimization. However, reorg uses
4412 simplejump_p to determine if there is an unconditional jump where
4413 it should try to schedule instructions from the target of the
4414 branch; simplejump_p fails for indirect jumps even if they have
4415 a JUMP_LABEL. */
4416 rtx insn = emit_insn_before (gen_indirect_jump_scratch
4417 (reg, GEN_INT (INSN_UID (JUMP_LABEL (jump))))
4418 , jump);
4419 /* ??? We would like this to have the scope of the jump, but that
4420 scope will change when a delay slot insn of an inner scope is added.
4421 Hence, after delay slot scheduling, we'll have to expect
4422 NOTE_INSN_BLOCK_END notes between the indirect_jump_scratch and
4423 the jump. */
4424
4425 INSN_LOCATOR (insn) = INSN_LOCATOR (jump);
4426 INSN_CODE (insn) = CODE_FOR_indirect_jump_scratch;
4427 return insn;
4428 }
4429 else if (need_block)
4430 /* We can't use JUMP_LABEL here because it might be undefined
4431 when not optimizing. */
4432 return emit_insn_before (gen_block_branch_redirect
4433 (GEN_INT (INSN_UID (XEXP (SET_SRC (PATTERN (jump)), 0))))
4434 , jump);
4435 return prev;
4436 }
4437
4438 #define CONDJUMP_MIN -252
4439 #define CONDJUMP_MAX 262
4440 struct far_branch
4441 {
4442 /* A label (to be placed) in front of the jump
4443 that jumps to our ultimate destination. */
4444 rtx near_label;
4445 /* Where we are going to insert it if we cannot move the jump any farther,
4446 or the jump itself if we have picked up an existing jump. */
4447 rtx insert_place;
4448 /* The ultimate destination. */
4449 rtx far_label;
4450 struct far_branch *prev;
4451 /* If the branch has already been created, its address;
4452 else the address of its first prospective user. */
4453 int address;
4454 };
4455
4456 static void gen_far_branch (struct far_branch *);
4457 enum mdep_reorg_phase_e mdep_reorg_phase;
4458 static void
4459 gen_far_branch (struct far_branch *bp)
4460 {
4461 rtx insn = bp->insert_place;
4462 rtx jump;
4463 rtx label = gen_label_rtx ();
4464 int ok;
4465
4466 emit_label_after (label, insn);
4467 if (bp->far_label)
4468 {
4469 jump = emit_jump_insn_after (gen_jump (bp->far_label), insn);
4470 LABEL_NUSES (bp->far_label)++;
4471 }
4472 else
4473 jump = emit_jump_insn_after (gen_return (), insn);
4474 /* Emit a barrier so that reorg knows that any following instructions
4475 are not reachable via a fall-through path.
4476 But don't do this when not optimizing, since we wouldn't suppress the
4477 alignment for the barrier then, and could end up with out-of-range
4478 pc-relative loads. */
4479 if (optimize)
4480 emit_barrier_after (jump);
4481 emit_label_after (bp->near_label, insn);
4482 JUMP_LABEL (jump) = bp->far_label;
4483 ok = invert_jump (insn, label, 1);
4484 gcc_assert (ok);
4485
4486 /* If we are branching around a jump (rather than a return), prevent
4487 reorg from using an insn from the jump target as the delay slot insn -
4488 when reorg did this, it pessimized code (we rather hide the delay slot)
4489 and it could cause branches to go out of range. */
4490 if (bp->far_label)
4491 (emit_insn_after
4492 (gen_stuff_delay_slot
4493 (GEN_INT (INSN_UID (XEXP (SET_SRC (PATTERN (jump)), 0))),
4494 GEN_INT (recog_memoized (insn) == CODE_FOR_branch_false)),
4495 insn));
4496 /* Prevent reorg from undoing our splits. */
4497 gen_block_redirect (jump, bp->address += 2, 2);
4498 }
4499
4500 /* Fix up ADDR_DIFF_VECs. */
4501 void
4502 fixup_addr_diff_vecs (rtx first)
4503 {
4504 rtx insn;
4505
4506 for (insn = first; insn; insn = NEXT_INSN (insn))
4507 {
4508 rtx vec_lab, pat, prev, prevpat, x, braf_label;
4509
4510 if (GET_CODE (insn) != JUMP_INSN
4511 || GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
4512 continue;
4513 pat = PATTERN (insn);
4514 vec_lab = XEXP (XEXP (pat, 0), 0);
4515
4516 /* Search the matching casesi_jump_2. */
4517 for (prev = vec_lab; ; prev = PREV_INSN (prev))
4518 {
4519 if (GET_CODE (prev) != JUMP_INSN)
4520 continue;
4521 prevpat = PATTERN (prev);
4522 if (GET_CODE (prevpat) != PARALLEL || XVECLEN (prevpat, 0) != 2)
4523 continue;
4524 x = XVECEXP (prevpat, 0, 1);
4525 if (GET_CODE (x) != USE)
4526 continue;
4527 x = XEXP (x, 0);
4528 if (GET_CODE (x) == LABEL_REF && XEXP (x, 0) == vec_lab)
4529 break;
4530 }
4531 /* FIXME: This is a bug in the optimizer, but it seems harmless
4532 to just avoid panicing. */
4533 if (!prev)
4534 continue;
4535
4536 /* Emit the reference label of the braf where it belongs, right after
4537 the casesi_jump_2 (i.e. braf). */
4538 braf_label = XEXP (XEXP (SET_SRC (XVECEXP (prevpat, 0, 0)), 1), 0);
4539 emit_label_after (braf_label, prev);
4540
4541 /* Fix up the ADDR_DIF_VEC to be relative
4542 to the reference address of the braf. */
4543 XEXP (XEXP (pat, 0), 0) = braf_label;
4544 }
4545 }
4546
4547 /* BARRIER_OR_LABEL is either a BARRIER or a CODE_LABEL immediately following
4548 a barrier. Return the base 2 logarithm of the desired alignment. */
4549 int
4550 barrier_align (rtx barrier_or_label)
4551 {
4552 rtx next = next_real_insn (barrier_or_label), pat, prev;
4553 int slot, credit, jump_to_next = 0;
4554
4555 if (! next)
4556 return 0;
4557
4558 pat = PATTERN (next);
4559
4560 if (GET_CODE (pat) == ADDR_DIFF_VEC)
4561 return 2;
4562
4563 if (GET_CODE (pat) == UNSPEC_VOLATILE && XINT (pat, 1) == UNSPECV_ALIGN)
4564 /* This is a barrier in front of a constant table. */
4565 return 0;
4566
4567 prev = prev_real_insn (barrier_or_label);
4568 if (GET_CODE (PATTERN (prev)) == ADDR_DIFF_VEC)
4569 {
4570 pat = PATTERN (prev);
4571 /* If this is a very small table, we want to keep the alignment after
4572 the table to the minimum for proper code alignment. */
4573 return ((TARGET_SMALLCODE
4574 || ((unsigned) XVECLEN (pat, 1) * GET_MODE_SIZE (GET_MODE (pat))
4575 <= (unsigned) 1 << (CACHE_LOG - 2)))
4576 ? 1 << TARGET_SHMEDIA : align_jumps_log);
4577 }
4578
4579 if (TARGET_SMALLCODE)
4580 return 0;
4581
4582 if (! TARGET_SH2 || ! optimize)
4583 return align_jumps_log;
4584
4585 /* When fixing up pcloads, a constant table might be inserted just before
4586 the basic block that ends with the barrier. Thus, we can't trust the
4587 instruction lengths before that. */
4588 if (mdep_reorg_phase > SH_FIXUP_PCLOAD)
4589 {
4590 /* Check if there is an immediately preceding branch to the insn beyond
4591 the barrier. We must weight the cost of discarding useful information
4592 from the current cache line when executing this branch and there is
4593 an alignment, against that of fetching unneeded insn in front of the
4594 branch target when there is no alignment. */
4595
4596 /* There are two delay_slot cases to consider. One is the simple case
4597 where the preceding branch is to the insn beyond the barrier (simple
4598 delay slot filling), and the other is where the preceding branch has
4599 a delay slot that is a duplicate of the insn after the barrier
4600 (fill_eager_delay_slots) and the branch is to the insn after the insn
4601 after the barrier. */
4602
4603 /* PREV is presumed to be the JUMP_INSN for the barrier under
4604 investigation. Skip to the insn before it. */
4605 prev = prev_real_insn (prev);
4606
4607 for (slot = 2, credit = (1 << (CACHE_LOG - 2)) + 2;
4608 credit >= 0 && prev && GET_CODE (prev) == INSN;
4609 prev = prev_real_insn (prev))
4610 {
4611 jump_to_next = 0;
4612 if (GET_CODE (PATTERN (prev)) == USE
4613 || GET_CODE (PATTERN (prev)) == CLOBBER)
4614 continue;
4615 if (GET_CODE (PATTERN (prev)) == SEQUENCE)
4616 {
4617 prev = XVECEXP (PATTERN (prev), 0, 1);
4618 if (INSN_UID (prev) == INSN_UID (next))
4619 {
4620 /* Delay slot was filled with insn at jump target. */
4621 jump_to_next = 1;
4622 continue;
4623 }
4624 }
4625
4626 if (slot &&
4627 get_attr_in_delay_slot (prev) == IN_DELAY_SLOT_YES)
4628 slot = 0;
4629 credit -= get_attr_length (prev);
4630 }
4631 if (prev
4632 && GET_CODE (prev) == JUMP_INSN
4633 && JUMP_LABEL (prev))
4634 {
4635 rtx x;
4636 if (jump_to_next
4637 || next_real_insn (JUMP_LABEL (prev)) == next
4638 /* If relax_delay_slots() decides NEXT was redundant
4639 with some previous instruction, it will have
4640 redirected PREV's jump to the following insn. */
4641 || JUMP_LABEL (prev) == next_nonnote_insn (next)
4642 /* There is no upper bound on redundant instructions
4643 that might have been skipped, but we must not put an
4644 alignment where none had been before. */
4645 || (x = (NEXT_INSN (NEXT_INSN (PREV_INSN (prev)))),
4646 (INSN_P (x)
4647 && (INSN_CODE (x) == CODE_FOR_block_branch_redirect
4648 || INSN_CODE (x) == CODE_FOR_indirect_jump_scratch
4649 || INSN_CODE (x) == CODE_FOR_stuff_delay_slot))))
4650 {
4651 rtx pat = PATTERN (prev);
4652 if (GET_CODE (pat) == PARALLEL)
4653 pat = XVECEXP (pat, 0, 0);
4654 if (credit - slot >= (GET_CODE (SET_SRC (pat)) == PC ? 2 : 0))
4655 return 0;
4656 }
4657 }
4658 }
4659
4660 return align_jumps_log;
4661 }
4662
4663 /* If we are inside a phony loop, almost any kind of label can turn up as the
4664 first one in the loop. Aligning a braf label causes incorrect switch
4665 destination addresses; we can detect braf labels because they are
4666 followed by a BARRIER.
4667 Applying loop alignment to small constant or switch tables is a waste
4668 of space, so we suppress this too. */
4669 int
4670 sh_loop_align (rtx label)
4671 {
4672 rtx next = label;
4673
4674 do
4675 next = next_nonnote_insn (next);
4676 while (next && GET_CODE (next) == CODE_LABEL);
4677
4678 if (! next
4679 || ! INSN_P (next)
4680 || GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC
4681 || recog_memoized (next) == CODE_FOR_consttable_2)
4682 return 0;
4683
4684 return align_loops_log;
4685 }
4686
4687 /* Do a final pass over the function, just before delayed branch
4688 scheduling. */
4689
4690 static void
4691 sh_reorg (void)
4692 {
4693 rtx first, insn, mova = NULL_RTX;
4694 int num_mova;
4695 rtx r0_rtx = gen_rtx_REG (Pmode, 0);
4696 rtx r0_inc_rtx = gen_rtx_POST_INC (Pmode, r0_rtx);
4697
4698 first = get_insns ();
4699 max_labelno_before_reorg = max_label_num ();
4700
4701 /* We must split call insns before introducing `mova's. If we're
4702 optimizing, they'll have already been split. Otherwise, make
4703 sure we don't split them too late. */
4704 if (! optimize)
4705 split_all_insns_noflow ();
4706
4707 if (TARGET_SHMEDIA)
4708 return;
4709
4710 /* If relaxing, generate pseudo-ops to associate function calls with
4711 the symbols they call. It does no harm to not generate these
4712 pseudo-ops. However, when we can generate them, it enables to
4713 linker to potentially relax the jsr to a bsr, and eliminate the
4714 register load and, possibly, the constant pool entry. */
4715
4716 mdep_reorg_phase = SH_INSERT_USES_LABELS;
4717 if (TARGET_RELAX)
4718 {
4719 /* Remove all REG_LABEL notes. We want to use them for our own
4720 purposes. This works because none of the remaining passes
4721 need to look at them.
4722
4723 ??? But it may break in the future. We should use a machine
4724 dependent REG_NOTE, or some other approach entirely. */
4725 for (insn = first; insn; insn = NEXT_INSN (insn))
4726 {
4727 if (INSN_P (insn))
4728 {
4729 rtx note;
4730
4731 while ((note = find_reg_note (insn, REG_LABEL, NULL_RTX)) != 0)
4732 remove_note (insn, note);
4733 }
4734 }
4735
4736 for (insn = first; insn; insn = NEXT_INSN (insn))
4737 {
4738 rtx pattern, reg, link, set, scan, dies, label;
4739 int rescan = 0, foundinsn = 0;
4740
4741 if (GET_CODE (insn) == CALL_INSN)
4742 {
4743 pattern = PATTERN (insn);
4744
4745 if (GET_CODE (pattern) == PARALLEL)
4746 pattern = XVECEXP (pattern, 0, 0);
4747 if (GET_CODE (pattern) == SET)
4748 pattern = SET_SRC (pattern);
4749
4750 if (GET_CODE (pattern) != CALL
4751 || GET_CODE (XEXP (pattern, 0)) != MEM)
4752 continue;
4753
4754 reg = XEXP (XEXP (pattern, 0), 0);
4755 }
4756 else
4757 {
4758 reg = sfunc_uses_reg (insn);
4759 if (! reg)
4760 continue;
4761 }
4762
4763 if (GET_CODE (reg) != REG)
4764 continue;
4765
4766 /* Try scanning backward to find where the register is set. */
4767 link = NULL;
4768 for (scan = PREV_INSN (insn);
4769 scan && GET_CODE (scan) != CODE_LABEL;
4770 scan = PREV_INSN (scan))
4771 {
4772 if (! INSN_P (scan))
4773 continue;
4774
4775 if (! reg_mentioned_p (reg, scan))
4776 continue;
4777
4778 if (noncall_uses_reg (reg, scan, &set))
4779 break;
4780
4781 if (set)
4782 {
4783 link = scan;
4784 break;
4785 }
4786 }
4787
4788 if (! link)
4789 continue;
4790
4791 /* The register is set at LINK. */
4792
4793 /* We can only optimize the function call if the register is
4794 being set to a symbol. In theory, we could sometimes
4795 optimize calls to a constant location, but the assembler
4796 and linker do not support that at present. */
4797 if (GET_CODE (SET_SRC (set)) != SYMBOL_REF
4798 && GET_CODE (SET_SRC (set)) != LABEL_REF)
4799 continue;
4800
4801 /* Scan forward from LINK to the place where REG dies, and
4802 make sure that the only insns which use REG are
4803 themselves function calls. */
4804
4805 /* ??? This doesn't work for call targets that were allocated
4806 by reload, since there may not be a REG_DEAD note for the
4807 register. */
4808
4809 dies = NULL_RTX;
4810 for (scan = NEXT_INSN (link); scan; scan = NEXT_INSN (scan))
4811 {
4812 rtx scanset;
4813
4814 /* Don't try to trace forward past a CODE_LABEL if we haven't
4815 seen INSN yet. Ordinarily, we will only find the setting insn
4816 if it is in the same basic block. However,
4817 cross-jumping can insert code labels in between the load and
4818 the call, and can result in situations where a single call
4819 insn may have two targets depending on where we came from. */
4820
4821 if (GET_CODE (scan) == CODE_LABEL && ! foundinsn)
4822 break;
4823
4824 if (! INSN_P (scan))
4825 continue;
4826
4827 /* Don't try to trace forward past a JUMP. To optimize
4828 safely, we would have to check that all the
4829 instructions at the jump destination did not use REG. */
4830
4831 if (GET_CODE (scan) == JUMP_INSN)
4832 break;
4833
4834 if (! reg_mentioned_p (reg, scan))
4835 continue;
4836
4837 if (noncall_uses_reg (reg, scan, &scanset))
4838 break;
4839
4840 if (scan == insn)
4841 foundinsn = 1;
4842
4843 if (scan != insn
4844 && (GET_CODE (scan) == CALL_INSN || sfunc_uses_reg (scan)))
4845 {
4846 /* There is a function call to this register other
4847 than the one we are checking. If we optimize
4848 this call, we need to rescan again below. */
4849 rescan = 1;
4850 }
4851
4852 /* ??? We shouldn't have to worry about SCANSET here.
4853 We should just be able to check for a REG_DEAD note
4854 on a function call. However, the REG_DEAD notes are
4855 apparently not dependable around libcalls; c-torture
4856 execute/920501-2 is a test case. If SCANSET is set,
4857 then this insn sets the register, so it must have
4858 died earlier. Unfortunately, this will only handle
4859 the cases in which the register is, in fact, set in a
4860 later insn. */
4861
4862 /* ??? We shouldn't have to use FOUNDINSN here.
4863 This dates back to when we used LOG_LINKS to find
4864 the most recent insn which sets the register. */
4865
4866 if (foundinsn
4867 && (scanset
4868 || find_reg_note (scan, REG_DEAD, reg)))
4869 {
4870 dies = scan;
4871 break;
4872 }
4873 }
4874
4875 if (! dies)
4876 {
4877 /* Either there was a branch, or some insn used REG
4878 other than as a function call address. */
4879 continue;
4880 }
4881
4882 /* Create a code label, and put it in a REG_LABEL note on
4883 the insn which sets the register, and on each call insn
4884 which uses the register. In final_prescan_insn we look
4885 for the REG_LABEL notes, and output the appropriate label
4886 or pseudo-op. */
4887
4888 label = gen_label_rtx ();
4889 REG_NOTES (link) = gen_rtx_INSN_LIST (REG_LABEL, label,
4890 REG_NOTES (link));
4891 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, label,
4892 REG_NOTES (insn));
4893 if (rescan)
4894 {
4895 scan = link;
4896 do
4897 {
4898 rtx reg2;
4899
4900 scan = NEXT_INSN (scan);
4901 if (scan != insn
4902 && ((GET_CODE (scan) == CALL_INSN
4903 && reg_mentioned_p (reg, scan))
4904 || ((reg2 = sfunc_uses_reg (scan))
4905 && REGNO (reg2) == REGNO (reg))))
4906 REG_NOTES (scan)
4907 = gen_rtx_INSN_LIST (REG_LABEL, label, REG_NOTES (scan));
4908 }
4909 while (scan != dies);
4910 }
4911 }
4912 }
4913
4914 if (TARGET_SH2)
4915 fixup_addr_diff_vecs (first);
4916
4917 if (optimize)
4918 {
4919 mdep_reorg_phase = SH_SHORTEN_BRANCHES0;
4920 shorten_branches (first);
4921 }
4922
4923 /* Scan the function looking for move instructions which have to be
4924 changed to pc-relative loads and insert the literal tables. */
4925 label_ref_list_pool = create_alloc_pool ("label references list",
4926 sizeof (struct label_ref_list_d),
4927 30);
4928 mdep_reorg_phase = SH_FIXUP_PCLOAD;
4929 for (insn = first, num_mova = 0; insn; insn = NEXT_INSN (insn))
4930 {
4931 if (mova_p (insn))
4932 {
4933 /* ??? basic block reordering can move a switch table dispatch
4934 below the switch table. Check if that has happened.
4935 We only have the addresses available when optimizing; but then,
4936 this check shouldn't be needed when not optimizing. */
4937 if (!untangle_mova (&num_mova, &mova, insn))
4938 {
4939 insn = mova;
4940 num_mova = 0;
4941 }
4942 }
4943 else if (GET_CODE (insn) == JUMP_INSN
4944 && GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
4945 && num_mova
4946 /* ??? loop invariant motion can also move a mova out of a
4947 loop. Since loop does this code motion anyway, maybe we
4948 should wrap UNSPEC_MOVA into a CONST, so that reload can
4949 move it back. */
4950 && ((num_mova > 1
4951 && GET_MODE (prev_nonnote_insn (insn)) == VOIDmode)
4952 || (prev_nonnote_insn (insn)
4953 == XEXP (MOVA_LABELREF (mova), 0))))
4954 {
4955 rtx scan;
4956 int total;
4957
4958 num_mova--;
4959
4960 /* Some code might have been inserted between the mova and
4961 its ADDR_DIFF_VEC. Check if the mova is still in range. */
4962 for (scan = mova, total = 0; scan != insn; scan = NEXT_INSN (scan))
4963 total += get_attr_length (scan);
4964
4965 /* range of mova is 1020, add 4 because pc counts from address of
4966 second instruction after this one, subtract 2 in case pc is 2
4967 byte aligned. Possible alignment needed for the ADDR_DIFF_VEC
4968 cancels out with alignment effects of the mova itself. */
4969 if (total > 1022)
4970 {
4971 /* Change the mova into a load, and restart scanning
4972 there. broken_move will then return true for mova. */
4973 fixup_mova (mova);
4974 insn = mova;
4975 }
4976 }
4977 if (broken_move (insn)
4978 || (GET_CODE (insn) == INSN
4979 && recog_memoized (insn) == CODE_FOR_casesi_worker_2))
4980 {
4981 rtx scan;
4982 /* Scan ahead looking for a barrier to stick the constant table
4983 behind. */
4984 rtx barrier = find_barrier (num_mova, mova, insn);
4985 rtx last_float_move = NULL_RTX, last_float = 0, *last_float_addr = NULL;
4986 int need_aligned_label = 0;
4987
4988 if (num_mova && ! mova_p (mova))
4989 {
4990 /* find_barrier had to change the first mova into a
4991 pcload; thus, we have to start with this new pcload. */
4992 insn = mova;
4993 num_mova = 0;
4994 }
4995 /* Now find all the moves between the points and modify them. */
4996 for (scan = insn; scan != barrier; scan = NEXT_INSN (scan))
4997 {
4998 if (GET_CODE (scan) == CODE_LABEL)
4999 last_float = 0;
5000 if (GET_CODE (scan) == INSN
5001 && recog_memoized (scan) == CODE_FOR_casesi_worker_2)
5002 need_aligned_label = 1;
5003 if (broken_move (scan))
5004 {
5005 rtx *patp = &PATTERN (scan), pat = *patp;
5006 rtx src, dst;
5007 rtx lab;
5008 rtx newsrc;
5009 enum machine_mode mode;
5010
5011 if (GET_CODE (pat) == PARALLEL)
5012 patp = &XVECEXP (pat, 0, 0), pat = *patp;
5013 src = SET_SRC (pat);
5014 dst = SET_DEST (pat);
5015 mode = GET_MODE (dst);
5016
5017 if (mode == SImode && hi_const (src)
5018 && REGNO (dst) != FPUL_REG)
5019 {
5020 int offset = 0;
5021
5022 mode = HImode;
5023 while (GET_CODE (dst) == SUBREG)
5024 {
5025 offset += subreg_regno_offset (REGNO (SUBREG_REG (dst)),
5026 GET_MODE (SUBREG_REG (dst)),
5027 SUBREG_BYTE (dst),
5028 GET_MODE (dst));
5029 dst = SUBREG_REG (dst);
5030 }
5031 dst = gen_rtx_REG (HImode, REGNO (dst) + offset);
5032 }
5033 if (GET_CODE (dst) == REG && FP_ANY_REGISTER_P (REGNO (dst)))
5034 {
5035 /* This must be an insn that clobbers r0. */
5036 rtx *clobberp = &XVECEXP (PATTERN (scan), 0,
5037 XVECLEN (PATTERN (scan), 0)
5038 - 1);
5039 rtx clobber = *clobberp;
5040
5041 gcc_assert (GET_CODE (clobber) == CLOBBER
5042 && rtx_equal_p (XEXP (clobber, 0), r0_rtx));
5043
5044 if (last_float
5045 && reg_set_between_p (r0_rtx, last_float_move, scan))
5046 last_float = 0;
5047 if (last_float
5048 && TARGET_SHCOMPACT
5049 && GET_MODE_SIZE (mode) != 4
5050 && GET_MODE_SIZE (GET_MODE (last_float)) == 4)
5051 last_float = 0;
5052 lab = add_constant (src, mode, last_float);
5053 if (lab)
5054 emit_insn_before (gen_mova (lab), scan);
5055 else
5056 {
5057 /* There will be a REG_UNUSED note for r0 on
5058 LAST_FLOAT_MOVE; we have to change it to REG_INC,
5059 lest reorg:mark_target_live_regs will not
5060 consider r0 to be used, and we end up with delay
5061 slot insn in front of SCAN that clobbers r0. */
5062 rtx note
5063 = find_regno_note (last_float_move, REG_UNUSED, 0);
5064
5065 /* If we are not optimizing, then there may not be
5066 a note. */
5067 if (note)
5068 PUT_MODE (note, REG_INC);
5069
5070 *last_float_addr = r0_inc_rtx;
5071 }
5072 last_float_move = scan;
5073 last_float = src;
5074 newsrc = gen_const_mem (mode,
5075 (((TARGET_SH4 && ! TARGET_FMOVD)
5076 || REGNO (dst) == FPUL_REG)
5077 ? r0_inc_rtx
5078 : r0_rtx));
5079 last_float_addr = &XEXP (newsrc, 0);
5080
5081 /* Remove the clobber of r0. */
5082 *clobberp = gen_rtx_CLOBBER (GET_MODE (clobber),
5083 gen_rtx_SCRATCH (Pmode));
5084 }
5085 /* This is a mova needing a label. Create it. */
5086 else if (GET_CODE (src) == UNSPEC
5087 && XINT (src, 1) == UNSPEC_MOVA
5088 && GET_CODE (XVECEXP (src, 0, 0)) == CONST)
5089 {
5090 lab = add_constant (XVECEXP (src, 0, 0), mode, 0);
5091 newsrc = gen_rtx_LABEL_REF (VOIDmode, lab);
5092 newsrc = gen_rtx_UNSPEC (SImode,
5093 gen_rtvec (1, newsrc),
5094 UNSPEC_MOVA);
5095 }
5096 else
5097 {
5098 lab = add_constant (src, mode, 0);
5099 newsrc = gen_rtx_LABEL_REF (VOIDmode, lab);
5100 newsrc = gen_const_mem (mode, newsrc);
5101 }
5102 *patp = gen_rtx_SET (VOIDmode, dst, newsrc);
5103 INSN_CODE (scan) = -1;
5104 }
5105 }
5106 dump_table (need_aligned_label ? insn : 0, barrier);
5107 insn = barrier;
5108 }
5109 }
5110 free_alloc_pool (label_ref_list_pool);
5111 for (insn = first; insn; insn = NEXT_INSN (insn))
5112 PUT_MODE (insn, VOIDmode);
5113
5114 mdep_reorg_phase = SH_SHORTEN_BRANCHES1;
5115 INSN_ADDRESSES_FREE ();
5116 split_branches (first);
5117
5118 /* The INSN_REFERENCES_ARE_DELAYED in sh.h is problematic because it
5119 also has an effect on the register that holds the address of the sfunc.
5120 Insert an extra dummy insn in front of each sfunc that pretends to
5121 use this register. */
5122 if (flag_delayed_branch)
5123 {
5124 for (insn = first; insn; insn = NEXT_INSN (insn))
5125 {
5126 rtx reg = sfunc_uses_reg (insn);
5127
5128 if (! reg)
5129 continue;
5130 emit_insn_before (gen_use_sfunc_addr (reg), insn);
5131 }
5132 }
5133 #if 0
5134 /* fpscr is not actually a user variable, but we pretend it is for the
5135 sake of the previous optimization passes, since we want it handled like
5136 one. However, we don't have any debugging information for it, so turn
5137 it into a non-user variable now. */
5138 if (TARGET_SH4)
5139 REG_USERVAR_P (get_fpscr_rtx ()) = 0;
5140 #endif
5141 mdep_reorg_phase = SH_AFTER_MDEP_REORG;
5142 }
5143
5144 int
5145 get_dest_uid (rtx label, int max_uid)
5146 {
5147 rtx dest = next_real_insn (label);
5148 int dest_uid;
5149 if (! dest)
5150 /* This can happen for an undefined label. */
5151 return 0;
5152 dest_uid = INSN_UID (dest);
5153 /* If this is a newly created branch redirection blocking instruction,
5154 we cannot index the branch_uid or insn_addresses arrays with its
5155 uid. But then, we won't need to, because the actual destination is
5156 the following branch. */
5157 while (dest_uid >= max_uid)
5158 {
5159 dest = NEXT_INSN (dest);
5160 dest_uid = INSN_UID (dest);
5161 }
5162 if (GET_CODE (dest) == JUMP_INSN && GET_CODE (PATTERN (dest)) == RETURN)
5163 return 0;
5164 return dest_uid;
5165 }
5166
5167 /* Split condbranches that are out of range. Also add clobbers for
5168 scratch registers that are needed in far jumps.
5169 We do this before delay slot scheduling, so that it can take our
5170 newly created instructions into account. It also allows us to
5171 find branches with common targets more easily. */
5172
5173 static void
5174 split_branches (rtx first)
5175 {
5176 rtx insn;
5177 struct far_branch **uid_branch, *far_branch_list = 0;
5178 int max_uid = get_max_uid ();
5179 int ok;
5180
5181 /* Find out which branches are out of range. */
5182 shorten_branches (first);
5183
5184 uid_branch = (struct far_branch **) alloca (max_uid * sizeof *uid_branch);
5185 memset ((char *) uid_branch, 0, max_uid * sizeof *uid_branch);
5186
5187 for (insn = first; insn; insn = NEXT_INSN (insn))
5188 if (! INSN_P (insn))
5189 continue;
5190 else if (INSN_DELETED_P (insn))
5191 {
5192 /* Shorten_branches would split this instruction again,
5193 so transform it into a note. */
5194 SET_INSN_DELETED (insn);
5195 }
5196 else if (GET_CODE (insn) == JUMP_INSN
5197 /* Don't mess with ADDR_DIFF_VEC */
5198 && (GET_CODE (PATTERN (insn)) == SET
5199 || GET_CODE (PATTERN (insn)) == RETURN))
5200 {
5201 enum attr_type type = get_attr_type (insn);
5202 if (type == TYPE_CBRANCH)
5203 {
5204 rtx next, beyond;
5205
5206 if (get_attr_length (insn) > 4)
5207 {
5208 rtx src = SET_SRC (PATTERN (insn));
5209 rtx olabel = XEXP (XEXP (src, 1), 0);
5210 int addr = INSN_ADDRESSES (INSN_UID (insn));
5211 rtx label = 0;
5212 int dest_uid = get_dest_uid (olabel, max_uid);
5213 struct far_branch *bp = uid_branch[dest_uid];
5214
5215 /* redirect_jump needs a valid JUMP_LABEL, and it might delete
5216 the label if the LABEL_NUSES count drops to zero. There is
5217 always a jump_optimize pass that sets these values, but it
5218 proceeds to delete unreferenced code, and then if not
5219 optimizing, to un-delete the deleted instructions, thus
5220 leaving labels with too low uses counts. */
5221 if (! optimize)
5222 {
5223 JUMP_LABEL (insn) = olabel;
5224 LABEL_NUSES (olabel)++;
5225 }
5226 if (! bp)
5227 {
5228 bp = (struct far_branch *) alloca (sizeof *bp);
5229 uid_branch[dest_uid] = bp;
5230 bp->prev = far_branch_list;
5231 far_branch_list = bp;
5232 bp->far_label
5233 = XEXP (XEXP (SET_SRC (PATTERN (insn)), 1), 0);
5234 LABEL_NUSES (bp->far_label)++;
5235 }
5236 else
5237 {
5238 label = bp->near_label;
5239 if (! label && bp->address - addr >= CONDJUMP_MIN)
5240 {
5241 rtx block = bp->insert_place;
5242
5243 if (GET_CODE (PATTERN (block)) == RETURN)
5244 block = PREV_INSN (block);
5245 else
5246 block = gen_block_redirect (block,
5247 bp->address, 2);
5248 label = emit_label_after (gen_label_rtx (),
5249 PREV_INSN (block));
5250 bp->near_label = label;
5251 }
5252 else if (label && ! NEXT_INSN (label))
5253 {
5254 if (addr + 2 - bp->address <= CONDJUMP_MAX)
5255 bp->insert_place = insn;
5256 else
5257 gen_far_branch (bp);
5258 }
5259 }
5260 if (! label
5261 || (NEXT_INSN (label) && bp->address - addr < CONDJUMP_MIN))
5262 {
5263 bp->near_label = label = gen_label_rtx ();
5264 bp->insert_place = insn;
5265 bp->address = addr;
5266 }
5267 ok = redirect_jump (insn, label, 1);
5268 gcc_assert (ok);
5269 }
5270 else
5271 {
5272 /* get_attr_length (insn) == 2 */
5273 /* Check if we have a pattern where reorg wants to redirect
5274 the branch to a label from an unconditional branch that
5275 is too far away. */
5276 /* We can't use JUMP_LABEL here because it might be undefined
5277 when not optimizing. */
5278 /* A syntax error might cause beyond to be NULL_RTX. */
5279 beyond
5280 = next_active_insn (XEXP (XEXP (SET_SRC (PATTERN (insn)), 1),
5281 0));
5282
5283 if (beyond
5284 && (GET_CODE (beyond) == JUMP_INSN
5285 || ((beyond = next_active_insn (beyond))
5286 && GET_CODE (beyond) == JUMP_INSN))
5287 && GET_CODE (PATTERN (beyond)) == SET
5288 && recog_memoized (beyond) == CODE_FOR_jump_compact
5289 && ((INSN_ADDRESSES
5290 (INSN_UID (XEXP (SET_SRC (PATTERN (beyond)), 0)))
5291 - INSN_ADDRESSES (INSN_UID (insn)) + (unsigned) 252)
5292 > 252 + 258 + 2))
5293 gen_block_redirect (beyond,
5294 INSN_ADDRESSES (INSN_UID (beyond)), 1);
5295 }
5296
5297 next = next_active_insn (insn);
5298
5299 if ((GET_CODE (next) == JUMP_INSN
5300 || ((next = next_active_insn (next))
5301 && GET_CODE (next) == JUMP_INSN))
5302 && GET_CODE (PATTERN (next)) == SET
5303 && recog_memoized (next) == CODE_FOR_jump_compact
5304 && ((INSN_ADDRESSES
5305 (INSN_UID (XEXP (SET_SRC (PATTERN (next)), 0)))
5306 - INSN_ADDRESSES (INSN_UID (insn)) + (unsigned) 252)
5307 > 252 + 258 + 2))
5308 gen_block_redirect (next, INSN_ADDRESSES (INSN_UID (next)), 1);
5309 }
5310 else if (type == TYPE_JUMP || type == TYPE_RETURN)
5311 {
5312 int addr = INSN_ADDRESSES (INSN_UID (insn));
5313 rtx far_label = 0;
5314 int dest_uid = 0;
5315 struct far_branch *bp;
5316
5317 if (type == TYPE_JUMP)
5318 {
5319 far_label = XEXP (SET_SRC (PATTERN (insn)), 0);
5320 dest_uid = get_dest_uid (far_label, max_uid);
5321 if (! dest_uid)
5322 {
5323 /* Parse errors can lead to labels outside
5324 the insn stream. */
5325 if (! NEXT_INSN (far_label))
5326 continue;
5327
5328 if (! optimize)
5329 {
5330 JUMP_LABEL (insn) = far_label;
5331 LABEL_NUSES (far_label)++;
5332 }
5333 redirect_jump (insn, NULL_RTX, 1);
5334 far_label = 0;
5335 }
5336 }
5337 bp = uid_branch[dest_uid];
5338 if (! bp)
5339 {
5340 bp = (struct far_branch *) alloca (sizeof *bp);
5341 uid_branch[dest_uid] = bp;
5342 bp->prev = far_branch_list;
5343 far_branch_list = bp;
5344 bp->near_label = 0;
5345 bp->far_label = far_label;
5346 if (far_label)
5347 LABEL_NUSES (far_label)++;
5348 }
5349 else if (bp->near_label && ! NEXT_INSN (bp->near_label))
5350 if (addr - bp->address <= CONDJUMP_MAX)
5351 emit_label_after (bp->near_label, PREV_INSN (insn));
5352 else
5353 {
5354 gen_far_branch (bp);
5355 bp->near_label = 0;
5356 }
5357 else
5358 bp->near_label = 0;
5359 bp->address = addr;
5360 bp->insert_place = insn;
5361 if (! far_label)
5362 emit_insn_before (gen_block_branch_redirect (const0_rtx), insn);
5363 else
5364 gen_block_redirect (insn, addr, bp->near_label ? 2 : 0);
5365 }
5366 }
5367 /* Generate all pending far branches,
5368 and free our references to the far labels. */
5369 while (far_branch_list)
5370 {
5371 if (far_branch_list->near_label
5372 && ! NEXT_INSN (far_branch_list->near_label))
5373 gen_far_branch (far_branch_list);
5374 if (optimize
5375 && far_branch_list->far_label
5376 && ! --LABEL_NUSES (far_branch_list->far_label))
5377 delete_insn (far_branch_list->far_label);
5378 far_branch_list = far_branch_list->prev;
5379 }
5380
5381 /* Instruction length information is no longer valid due to the new
5382 instructions that have been generated. */
5383 init_insn_lengths ();
5384 }
5385
5386 /* Dump out instruction addresses, which is useful for debugging the
5387 constant pool table stuff.
5388
5389 If relaxing, output the label and pseudo-ops used to link together
5390 calls and the instruction which set the registers. */
5391
5392 /* ??? The addresses printed by this routine for insns are nonsense for
5393 insns which are inside of a sequence where none of the inner insns have
5394 variable length. This is because the second pass of shorten_branches
5395 does not bother to update them. */
5396
5397 void
5398 final_prescan_insn (rtx insn, rtx *opvec ATTRIBUTE_UNUSED,
5399 int noperands ATTRIBUTE_UNUSED)
5400 {
5401 if (TARGET_DUMPISIZE)
5402 fprintf (asm_out_file, "\n! at %04x\n", INSN_ADDRESSES (INSN_UID (insn)));
5403
5404 if (TARGET_RELAX)
5405 {
5406 rtx note;
5407
5408 note = find_reg_note (insn, REG_LABEL, NULL_RTX);
5409 if (note)
5410 {
5411 rtx pattern;
5412
5413 pattern = PATTERN (insn);
5414 if (GET_CODE (pattern) == PARALLEL)
5415 pattern = XVECEXP (pattern, 0, 0);
5416 switch (GET_CODE (pattern))
5417 {
5418 case SET:
5419 if (GET_CODE (SET_SRC (pattern)) != CALL
5420 && get_attr_type (insn) != TYPE_SFUNC)
5421 {
5422 targetm.asm_out.internal_label
5423 (asm_out_file, "L", CODE_LABEL_NUMBER (XEXP (note, 0)));
5424 break;
5425 }
5426 /* else FALLTHROUGH */
5427 case CALL:
5428 asm_fprintf (asm_out_file, "\t.uses %LL%d\n",
5429 CODE_LABEL_NUMBER (XEXP (note, 0)));
5430 break;
5431
5432 default:
5433 gcc_unreachable ();
5434 }
5435 }
5436 }
5437 }
5438
5439 /* Dump out any constants accumulated in the final pass. These will
5440 only be labels. */
5441
5442 const char *
5443 output_jump_label_table (void)
5444 {
5445 int i;
5446
5447 if (pool_size)
5448 {
5449 fprintf (asm_out_file, "\t.align 2\n");
5450 for (i = 0; i < pool_size; i++)
5451 {
5452 pool_node *p = &pool_vector[i];
5453
5454 (*targetm.asm_out.internal_label) (asm_out_file, "L",
5455 CODE_LABEL_NUMBER (p->label));
5456 output_asm_insn (".long %O0", &p->value);
5457 }
5458 pool_size = 0;
5459 }
5460
5461 return "";
5462 }
5463 \f
5464 /* A full frame looks like:
5465
5466 arg-5
5467 arg-4
5468 [ if current_function_anonymous_args
5469 arg-3
5470 arg-2
5471 arg-1
5472 arg-0 ]
5473 saved-fp
5474 saved-r10
5475 saved-r11
5476 saved-r12
5477 saved-pr
5478 local-n
5479 ..
5480 local-1
5481 local-0 <- fp points here. */
5482
5483 /* Number of bytes pushed for anonymous args, used to pass information
5484 between expand_prologue and expand_epilogue. */
5485
5486 /* Adjust the stack by SIZE bytes. REG holds the rtl of the register to be
5487 adjusted. If epilogue_p is zero, this is for a prologue; otherwise, it's
5488 for an epilogue and a negative value means that it's for a sibcall
5489 epilogue. If LIVE_REGS_MASK is nonzero, it points to a HARD_REG_SET of
5490 all the registers that are about to be restored, and hence dead. */
5491
5492 static void
5493 output_stack_adjust (int size, rtx reg, int epilogue_p,
5494 HARD_REG_SET *live_regs_mask)
5495 {
5496 rtx (*emit_fn) (rtx) = epilogue_p ? &emit_insn : &frame_insn;
5497 if (size)
5498 {
5499 HOST_WIDE_INT align = STACK_BOUNDARY / BITS_PER_UNIT;
5500
5501 /* This test is bogus, as output_stack_adjust is used to re-align the
5502 stack. */
5503 #if 0
5504 gcc_assert (!(size % align));
5505 #endif
5506
5507 if (CONST_OK_FOR_ADD (size))
5508 emit_fn (GEN_ADD3 (reg, reg, GEN_INT (size)));
5509 /* Try to do it with two partial adjustments; however, we must make
5510 sure that the stack is properly aligned at all times, in case
5511 an interrupt occurs between the two partial adjustments. */
5512 else if (CONST_OK_FOR_ADD (size / 2 & -align)
5513 && CONST_OK_FOR_ADD (size - (size / 2 & -align)))
5514 {
5515 emit_fn (GEN_ADD3 (reg, reg, GEN_INT (size / 2 & -align)));
5516 emit_fn (GEN_ADD3 (reg, reg, GEN_INT (size - (size / 2 & -align))));
5517 }
5518 else
5519 {
5520 rtx const_reg;
5521 rtx insn;
5522 int temp = epilogue_p ? 7 : (TARGET_SH5 ? 0 : 1);
5523 int i;
5524
5525 /* If TEMP is invalid, we could temporarily save a general
5526 register to MACL. However, there is currently no need
5527 to handle this case, so just die when we see it. */
5528 if (epilogue_p < 0
5529 || current_function_interrupt
5530 || ! call_really_used_regs[temp] || fixed_regs[temp])
5531 temp = -1;
5532 if (temp < 0 && ! current_function_interrupt
5533 && (TARGET_SHMEDIA || epilogue_p >= 0))
5534 {
5535 HARD_REG_SET temps;
5536 COPY_HARD_REG_SET (temps, call_used_reg_set);
5537 AND_COMPL_HARD_REG_SET (temps, call_fixed_reg_set);
5538 if (epilogue_p > 0)
5539 {
5540 int nreg = 0;
5541 if (current_function_return_rtx)
5542 {
5543 enum machine_mode mode;
5544 mode = GET_MODE (current_function_return_rtx);
5545 if (BASE_RETURN_VALUE_REG (mode) == FIRST_RET_REG)
5546 nreg = HARD_REGNO_NREGS (FIRST_RET_REG, mode);
5547 }
5548 for (i = 0; i < nreg; i++)
5549 CLEAR_HARD_REG_BIT (temps, FIRST_RET_REG + i);
5550 if (current_function_calls_eh_return)
5551 {
5552 CLEAR_HARD_REG_BIT (temps, EH_RETURN_STACKADJ_REGNO);
5553 for (i = 0; i <= 3; i++)
5554 CLEAR_HARD_REG_BIT (temps, EH_RETURN_DATA_REGNO (i));
5555 }
5556 }
5557 if (TARGET_SHMEDIA && epilogue_p < 0)
5558 for (i = FIRST_TARGET_REG; i <= LAST_TARGET_REG; i++)
5559 CLEAR_HARD_REG_BIT (temps, i);
5560 if (epilogue_p <= 0)
5561 {
5562 for (i = FIRST_PARM_REG;
5563 i < FIRST_PARM_REG + NPARM_REGS (SImode); i++)
5564 CLEAR_HARD_REG_BIT (temps, i);
5565 if (cfun->static_chain_decl != NULL)
5566 CLEAR_HARD_REG_BIT (temps, STATIC_CHAIN_REGNUM);
5567 }
5568 temp = scavenge_reg (&temps);
5569 }
5570 if (temp < 0 && live_regs_mask)
5571 {
5572 HARD_REG_SET temps;
5573
5574 COPY_HARD_REG_SET (temps, *live_regs_mask);
5575 CLEAR_HARD_REG_BIT (temps, REGNO (reg));
5576 temp = scavenge_reg (&temps);
5577 }
5578 if (temp < 0)
5579 {
5580 rtx adj_reg, tmp_reg, mem;
5581
5582 /* If we reached here, the most likely case is the (sibcall)
5583 epilogue for non SHmedia. Put a special push/pop sequence
5584 for such case as the last resort. This looks lengthy but
5585 would not be problem because it seems to be very
5586 rare. */
5587
5588 gcc_assert (!TARGET_SHMEDIA && epilogue_p);
5589
5590
5591 /* ??? There is still the slight possibility that r4 or
5592 r5 have been reserved as fixed registers or assigned
5593 as global registers, and they change during an
5594 interrupt. There are possible ways to handle this:
5595
5596 - If we are adjusting the frame pointer (r14), we can do
5597 with a single temp register and an ordinary push / pop
5598 on the stack.
5599 - Grab any call-used or call-saved registers (i.e. not
5600 fixed or globals) for the temps we need. We might
5601 also grab r14 if we are adjusting the stack pointer.
5602 If we can't find enough available registers, issue
5603 a diagnostic and die - the user must have reserved
5604 way too many registers.
5605 But since all this is rather unlikely to happen and
5606 would require extra testing, we just die if r4 / r5
5607 are not available. */
5608 gcc_assert (!fixed_regs[4] && !fixed_regs[5]
5609 && !global_regs[4] && !global_regs[5]);
5610
5611 adj_reg = gen_rtx_REG (GET_MODE (reg), 4);
5612 tmp_reg = gen_rtx_REG (GET_MODE (reg), 5);
5613 emit_move_insn (gen_tmp_stack_mem (Pmode, reg), adj_reg);
5614 emit_insn (GEN_MOV (adj_reg, GEN_INT (size)));
5615 emit_insn (GEN_ADD3 (adj_reg, adj_reg, reg));
5616 mem = gen_tmp_stack_mem (Pmode, gen_rtx_PRE_DEC (Pmode, adj_reg));
5617 emit_move_insn (mem, tmp_reg);
5618 emit_move_insn (tmp_reg, gen_tmp_stack_mem (Pmode, reg));
5619 mem = gen_tmp_stack_mem (Pmode, gen_rtx_PRE_DEC (Pmode, adj_reg));
5620 emit_move_insn (mem, tmp_reg);
5621 emit_move_insn (reg, adj_reg);
5622 mem = gen_tmp_stack_mem (Pmode, gen_rtx_POST_INC (Pmode, reg));
5623 emit_move_insn (adj_reg, mem);
5624 mem = gen_tmp_stack_mem (Pmode, gen_rtx_POST_INC (Pmode, reg));
5625 emit_move_insn (tmp_reg, mem);
5626 /* Tell flow the insns that pop r4/r5 aren't dead. */
5627 emit_insn (gen_rtx_USE (VOIDmode, tmp_reg));
5628 emit_insn (gen_rtx_USE (VOIDmode, adj_reg));
5629 return;
5630 }
5631 const_reg = gen_rtx_REG (GET_MODE (reg), temp);
5632
5633 /* If SIZE is negative, subtract the positive value.
5634 This sometimes allows a constant pool entry to be shared
5635 between prologue and epilogue code. */
5636 if (size < 0)
5637 {
5638 emit_insn (GEN_MOV (const_reg, GEN_INT (-size)));
5639 insn = emit_fn (GEN_SUB3 (reg, reg, const_reg));
5640 }
5641 else
5642 {
5643 emit_insn (GEN_MOV (const_reg, GEN_INT (size)));
5644 insn = emit_fn (GEN_ADD3 (reg, reg, const_reg));
5645 }
5646 if (! epilogue_p)
5647 REG_NOTES (insn)
5648 = (gen_rtx_EXPR_LIST
5649 (REG_FRAME_RELATED_EXPR,
5650 gen_rtx_SET (VOIDmode, reg,
5651 gen_rtx_PLUS (SImode, reg, GEN_INT (size))),
5652 REG_NOTES (insn)));
5653 }
5654 }
5655 }
5656
5657 static rtx
5658 frame_insn (rtx x)
5659 {
5660 x = emit_insn (x);
5661 RTX_FRAME_RELATED_P (x) = 1;
5662 return x;
5663 }
5664
5665 /* Output RTL to push register RN onto the stack. */
5666
5667 static rtx
5668 push (int rn)
5669 {
5670 rtx x;
5671 if (rn == FPUL_REG)
5672 x = gen_push_fpul ();
5673 else if (rn == FPSCR_REG)
5674 x = gen_push_fpscr ();
5675 else if ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && TARGET_FMOVD && ! TARGET_FPU_SINGLE
5676 && FP_OR_XD_REGISTER_P (rn))
5677 {
5678 if (FP_REGISTER_P (rn) && (rn - FIRST_FP_REG) & 1)
5679 return NULL_RTX;
5680 x = gen_push_4 (gen_rtx_REG (DFmode, rn));
5681 }
5682 else if (TARGET_SH2E && FP_REGISTER_P (rn))
5683 x = gen_push_e (gen_rtx_REG (SFmode, rn));
5684 else
5685 x = gen_push (gen_rtx_REG (SImode, rn));
5686
5687 x = frame_insn (x);
5688 REG_NOTES (x)
5689 = gen_rtx_EXPR_LIST (REG_INC,
5690 gen_rtx_REG (SImode, STACK_POINTER_REGNUM), 0);
5691 return x;
5692 }
5693
5694 /* Output RTL to pop register RN from the stack. */
5695
5696 static void
5697 pop (int rn)
5698 {
5699 rtx x;
5700 if (rn == FPUL_REG)
5701 x = gen_pop_fpul ();
5702 else if (rn == FPSCR_REG)
5703 x = gen_pop_fpscr ();
5704 else if ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && TARGET_FMOVD && ! TARGET_FPU_SINGLE
5705 && FP_OR_XD_REGISTER_P (rn))
5706 {
5707 if (FP_REGISTER_P (rn) && (rn - FIRST_FP_REG) & 1)
5708 return;
5709 x = gen_pop_4 (gen_rtx_REG (DFmode, rn));
5710 }
5711 else if (TARGET_SH2E && FP_REGISTER_P (rn))
5712 x = gen_pop_e (gen_rtx_REG (SFmode, rn));
5713 else
5714 x = gen_pop (gen_rtx_REG (SImode, rn));
5715
5716 x = emit_insn (x);
5717 REG_NOTES (x)
5718 = gen_rtx_EXPR_LIST (REG_INC,
5719 gen_rtx_REG (SImode, STACK_POINTER_REGNUM), 0);
5720 }
5721
5722 /* Generate code to push the regs specified in the mask. */
5723
5724 static void
5725 push_regs (HARD_REG_SET *mask, int interrupt_handler)
5726 {
5727 int i = interrupt_handler ? LAST_BANKED_REG + 1 : 0;
5728 int skip_fpscr = 0;
5729
5730 /* Push PR last; this gives better latencies after the prologue, and
5731 candidates for the return delay slot when there are no general
5732 registers pushed. */
5733 for (; i < FIRST_PSEUDO_REGISTER; i++)
5734 {
5735 /* If this is an interrupt handler, and the SZ bit varies,
5736 and we have to push any floating point register, we need
5737 to switch to the correct precision first. */
5738 if (i == FIRST_FP_REG && interrupt_handler && TARGET_FMOVD
5739 && hard_reg_set_intersect_p (*mask, reg_class_contents[DF_REGS]))
5740 {
5741 HARD_REG_SET unsaved;
5742
5743 push (FPSCR_REG);
5744 COMPL_HARD_REG_SET (unsaved, *mask);
5745 fpscr_set_from_mem (NORMAL_MODE (FP_MODE), unsaved);
5746 skip_fpscr = 1;
5747 }
5748 if (i != PR_REG
5749 && (i != FPSCR_REG || ! skip_fpscr)
5750 && TEST_HARD_REG_BIT (*mask, i))
5751 push (i);
5752 }
5753
5754 /* Push banked registers last to improve delay slot opportunities. */
5755 if (interrupt_handler)
5756 for (i = FIRST_BANKED_REG; i <= LAST_BANKED_REG; i++)
5757 if (TEST_HARD_REG_BIT (*mask, i))
5758 push (i);
5759
5760 if (TEST_HARD_REG_BIT (*mask, PR_REG))
5761 push (PR_REG);
5762 }
5763
5764 /* Calculate how much extra space is needed to save all callee-saved
5765 target registers.
5766 LIVE_REGS_MASK is the register mask calculated by calc_live_regs. */
5767
5768 static int
5769 shmedia_target_regs_stack_space (HARD_REG_SET *live_regs_mask)
5770 {
5771 int reg;
5772 int stack_space = 0;
5773 int interrupt_handler = sh_cfun_interrupt_handler_p ();
5774
5775 for (reg = LAST_TARGET_REG; reg >= FIRST_TARGET_REG; reg--)
5776 if ((! call_really_used_regs[reg] || interrupt_handler)
5777 && ! TEST_HARD_REG_BIT (*live_regs_mask, reg))
5778 /* Leave space to save this target register on the stack,
5779 in case target register allocation wants to use it. */
5780 stack_space += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg));
5781 return stack_space;
5782 }
5783
5784 /* Decide whether we should reserve space for callee-save target registers,
5785 in case target register allocation wants to use them. REGS_SAVED is
5786 the space, in bytes, that is already required for register saves.
5787 LIVE_REGS_MASK is the register mask calculated by calc_live_regs. */
5788
5789 static int
5790 shmedia_reserve_space_for_target_registers_p (int regs_saved,
5791 HARD_REG_SET *live_regs_mask)
5792 {
5793 if (optimize_size)
5794 return 0;
5795 return shmedia_target_regs_stack_space (live_regs_mask) <= regs_saved;
5796 }
5797
5798 /* Decide how much space to reserve for callee-save target registers
5799 in case target register allocation wants to use them.
5800 LIVE_REGS_MASK is the register mask calculated by calc_live_regs. */
5801
5802 static int
5803 shmedia_target_regs_stack_adjust (HARD_REG_SET *live_regs_mask)
5804 {
5805 if (shmedia_space_reserved_for_target_registers)
5806 return shmedia_target_regs_stack_space (live_regs_mask);
5807 else
5808 return 0;
5809 }
5810
5811 /* Work out the registers which need to be saved, both as a mask and a
5812 count of saved words. Return the count.
5813
5814 If doing a pragma interrupt function, then push all regs used by the
5815 function, and if we call another function (we can tell by looking at PR),
5816 make sure that all the regs it clobbers are safe too. */
5817
5818 static int
5819 calc_live_regs (HARD_REG_SET *live_regs_mask)
5820 {
5821 unsigned int reg;
5822 int count;
5823 tree attrs;
5824 bool interrupt_or_trapa_handler, trapa_handler, interrupt_handler;
5825 bool nosave_low_regs;
5826 int pr_live, has_call;
5827
5828 attrs = DECL_ATTRIBUTES (current_function_decl);
5829 interrupt_or_trapa_handler = sh_cfun_interrupt_handler_p ();
5830 trapa_handler = lookup_attribute ("trapa_handler", attrs) != NULL_TREE;
5831 interrupt_handler = interrupt_or_trapa_handler && ! trapa_handler;
5832 nosave_low_regs = lookup_attribute ("nosave_low_regs", attrs) != NULL_TREE;
5833
5834 CLEAR_HARD_REG_SET (*live_regs_mask);
5835 if ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && TARGET_FMOVD && interrupt_handler
5836 && df_regs_ever_live_p (FPSCR_REG))
5837 target_flags &= ~MASK_FPU_SINGLE;
5838 /* If we can save a lot of saves by switching to double mode, do that. */
5839 else if ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && TARGET_FMOVD && TARGET_FPU_SINGLE)
5840 for (count = 0, reg = FIRST_FP_REG; reg <= LAST_FP_REG; reg += 2)
5841 if (df_regs_ever_live_p (reg) && df_regs_ever_live_p (reg+1)
5842 && (! call_really_used_regs[reg]
5843 || interrupt_handler)
5844 && ++count > 2)
5845 {
5846 target_flags &= ~MASK_FPU_SINGLE;
5847 break;
5848 }
5849 /* PR_MEDIA_REG is a general purpose register, thus global_alloc already
5850 knows how to use it. That means the pseudo originally allocated for
5851 the initial value can become the PR_MEDIA_REG hard register, as seen for
5852 execute/20010122-1.c:test9. */
5853 if (TARGET_SHMEDIA)
5854 /* ??? this function is called from initial_elimination_offset, hence we
5855 can't use the result of sh_media_register_for_return here. */
5856 pr_live = sh_pr_n_sets ();
5857 else
5858 {
5859 rtx pr_initial = has_hard_reg_initial_val (Pmode, PR_REG);
5860 pr_live = (pr_initial
5861 ? (GET_CODE (pr_initial) != REG
5862 || REGNO (pr_initial) != (PR_REG))
5863 : df_regs_ever_live_p (PR_REG));
5864 /* For Shcompact, if not optimizing, we end up with a memory reference
5865 using the return address pointer for __builtin_return_address even
5866 though there is no actual need to put the PR register on the stack. */
5867 pr_live |= df_regs_ever_live_p (RETURN_ADDRESS_POINTER_REGNUM);
5868 }
5869 /* Force PR to be live if the prologue has to call the SHmedia
5870 argument decoder or register saver. */
5871 if (TARGET_SHCOMPACT
5872 && ((current_function_args_info.call_cookie
5873 & ~ CALL_COOKIE_RET_TRAMP (1))
5874 || current_function_has_nonlocal_label))
5875 pr_live = 1;
5876 has_call = TARGET_SHMEDIA ? ! leaf_function_p () : pr_live;
5877 for (count = 0, reg = FIRST_PSEUDO_REGISTER; reg-- != 0; )
5878 {
5879 if (reg == (TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG)
5880 ? pr_live
5881 : interrupt_handler
5882 ? (/* Need to save all the regs ever live. */
5883 (df_regs_ever_live_p (reg)
5884 || (call_really_used_regs[reg]
5885 && (! fixed_regs[reg] || reg == MACH_REG || reg == MACL_REG
5886 || reg == PIC_OFFSET_TABLE_REGNUM)
5887 && has_call)
5888 || (TARGET_SHMEDIA && has_call
5889 && REGISTER_NATURAL_MODE (reg) == SImode
5890 && (GENERAL_REGISTER_P (reg) || TARGET_REGISTER_P (reg))))
5891 && reg != STACK_POINTER_REGNUM && reg != ARG_POINTER_REGNUM
5892 && reg != RETURN_ADDRESS_POINTER_REGNUM
5893 && reg != T_REG && reg != GBR_REG
5894 /* Push fpscr only on targets which have FPU */
5895 && (reg != FPSCR_REG || TARGET_FPU_ANY))
5896 : (/* Only push those regs which are used and need to be saved. */
5897 (TARGET_SHCOMPACT
5898 && flag_pic
5899 && current_function_args_info.call_cookie
5900 && reg == PIC_OFFSET_TABLE_REGNUM)
5901 || (df_regs_ever_live_p (reg)
5902 && (!call_really_used_regs[reg]
5903 || (trapa_handler && reg == FPSCR_REG && TARGET_FPU_ANY)))
5904 || (current_function_calls_eh_return
5905 && (reg == EH_RETURN_DATA_REGNO (0)
5906 || reg == EH_RETURN_DATA_REGNO (1)
5907 || reg == EH_RETURN_DATA_REGNO (2)
5908 || reg == EH_RETURN_DATA_REGNO (3)))
5909 || ((reg == MACL_REG || reg == MACH_REG)
5910 && df_regs_ever_live_p (reg)
5911 && sh_cfun_attr_renesas_p ())
5912 ))
5913 {
5914 SET_HARD_REG_BIT (*live_regs_mask, reg);
5915 count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg));
5916
5917 if ((TARGET_SH4 || TARGET_SH2A_DOUBLE || TARGET_SH5) && TARGET_FMOVD
5918 && GET_MODE_CLASS (REGISTER_NATURAL_MODE (reg)) == MODE_FLOAT)
5919 {
5920 if (FP_REGISTER_P (reg))
5921 {
5922 if (! TARGET_FPU_SINGLE && ! df_regs_ever_live_p (reg ^ 1))
5923 {
5924 SET_HARD_REG_BIT (*live_regs_mask, (reg ^ 1));
5925 count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg ^ 1));
5926 }
5927 }
5928 else if (XD_REGISTER_P (reg))
5929 {
5930 /* Must switch to double mode to access these registers. */
5931 target_flags &= ~MASK_FPU_SINGLE;
5932 }
5933 }
5934 }
5935 if (nosave_low_regs && reg == R8_REG)
5936 break;
5937 }
5938 /* If we have a target register optimization pass after prologue / epilogue
5939 threading, we need to assume all target registers will be live even if
5940 they aren't now. */
5941 if (flag_branch_target_load_optimize2
5942 && TARGET_SAVE_ALL_TARGET_REGS
5943 && shmedia_space_reserved_for_target_registers)
5944 for (reg = LAST_TARGET_REG; reg >= FIRST_TARGET_REG; reg--)
5945 if ((! call_really_used_regs[reg] || interrupt_handler)
5946 && ! TEST_HARD_REG_BIT (*live_regs_mask, reg))
5947 {
5948 SET_HARD_REG_BIT (*live_regs_mask, reg);
5949 count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg));
5950 }
5951 /* If this is an interrupt handler, we don't have any call-clobbered
5952 registers we can conveniently use for target register save/restore.
5953 Make sure we save at least one general purpose register when we need
5954 to save target registers. */
5955 if (interrupt_handler
5956 && hard_reg_set_intersect_p (*live_regs_mask,
5957 reg_class_contents[TARGET_REGS])
5958 && ! hard_reg_set_intersect_p (*live_regs_mask,
5959 reg_class_contents[GENERAL_REGS]))
5960 {
5961 SET_HARD_REG_BIT (*live_regs_mask, R0_REG);
5962 count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (R0_REG));
5963 }
5964
5965 return count;
5966 }
5967
5968 /* Code to generate prologue and epilogue sequences */
5969
5970 /* PUSHED is the number of bytes that are being pushed on the
5971 stack for register saves. Return the frame size, padded
5972 appropriately so that the stack stays properly aligned. */
5973 static HOST_WIDE_INT
5974 rounded_frame_size (int pushed)
5975 {
5976 HOST_WIDE_INT size = get_frame_size ();
5977 HOST_WIDE_INT align = STACK_BOUNDARY / BITS_PER_UNIT;
5978
5979 return ((size + pushed + align - 1) & -align) - pushed;
5980 }
5981
5982 /* Choose a call-clobbered target-branch register that remains
5983 unchanged along the whole function. We set it up as the return
5984 value in the prologue. */
5985 int
5986 sh_media_register_for_return (void)
5987 {
5988 int regno;
5989 int tr0_used;
5990
5991 if (! current_function_is_leaf)
5992 return -1;
5993 if (lookup_attribute ("interrupt_handler",
5994 DECL_ATTRIBUTES (current_function_decl)))
5995 return -1;
5996 if (sh_cfun_interrupt_handler_p ())
5997 return -1;
5998
5999 tr0_used = flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
6000
6001 for (regno = FIRST_TARGET_REG + tr0_used; regno <= LAST_TARGET_REG; regno++)
6002 if (call_really_used_regs[regno] && ! df_regs_ever_live_p (regno))
6003 return regno;
6004
6005 return -1;
6006 }
6007
6008 /* The maximum registers we need to save are:
6009 - 62 general purpose registers (r15 is stack pointer, r63 is zero)
6010 - 32 floating point registers (for each pair, we save none,
6011 one single precision value, or a double precision value).
6012 - 8 target registers
6013 - add 1 entry for a delimiter. */
6014 #define MAX_SAVED_REGS (62+32+8)
6015
6016 typedef struct save_entry_s
6017 {
6018 unsigned char reg;
6019 unsigned char mode;
6020 short offset;
6021 } save_entry;
6022
6023 #define MAX_TEMPS 4
6024
6025 /* There will be a delimiter entry with VOIDmode both at the start and the
6026 end of a filled in schedule. The end delimiter has the offset of the
6027 save with the smallest (i.e. most negative) offset. */
6028 typedef struct save_schedule_s
6029 {
6030 save_entry entries[MAX_SAVED_REGS + 2];
6031 int temps[MAX_TEMPS+1];
6032 } save_schedule;
6033
6034 /* Fill in SCHEDULE according to LIVE_REGS_MASK. If RESTORE is nonzero,
6035 use reverse order. Returns the last entry written to (not counting
6036 the delimiter). OFFSET_BASE is a number to be added to all offset
6037 entries. */
6038
6039 static save_entry *
6040 sh5_schedule_saves (HARD_REG_SET *live_regs_mask, save_schedule *schedule,
6041 int offset_base)
6042 {
6043 int align, i;
6044 save_entry *entry = schedule->entries;
6045 int tmpx = 0;
6046 int offset;
6047
6048 if (! current_function_interrupt)
6049 for (i = FIRST_GENERAL_REG; tmpx < MAX_TEMPS && i <= LAST_GENERAL_REG; i++)
6050 if (call_really_used_regs[i] && ! fixed_regs[i] && i != PR_MEDIA_REG
6051 && ! FUNCTION_ARG_REGNO_P (i)
6052 && i != FIRST_RET_REG
6053 && ! (cfun->static_chain_decl != NULL && i == STATIC_CHAIN_REGNUM)
6054 && ! (current_function_calls_eh_return
6055 && (i == EH_RETURN_STACKADJ_REGNO
6056 || ((unsigned) i >= EH_RETURN_DATA_REGNO (0)
6057 && (unsigned) i <= EH_RETURN_DATA_REGNO (3)))))
6058 schedule->temps[tmpx++] = i;
6059 entry->reg = -1;
6060 entry->mode = VOIDmode;
6061 entry->offset = offset_base;
6062 entry++;
6063 /* We loop twice: first, we save 8-byte aligned registers in the
6064 higher addresses, that are known to be aligned. Then, we
6065 proceed to saving 32-bit registers that don't need 8-byte
6066 alignment.
6067 If this is an interrupt function, all registers that need saving
6068 need to be saved in full. moreover, we need to postpone saving
6069 target registers till we have saved some general purpose registers
6070 we can then use as scratch registers. */
6071 offset = offset_base;
6072 for (align = 1; align >= 0; align--)
6073 {
6074 for (i = FIRST_PSEUDO_REGISTER - 1; i >= 0; i--)
6075 if (TEST_HARD_REG_BIT (*live_regs_mask, i))
6076 {
6077 enum machine_mode mode = REGISTER_NATURAL_MODE (i);
6078 int reg = i;
6079
6080 if (current_function_interrupt)
6081 {
6082 if (TARGET_REGISTER_P (i))
6083 continue;
6084 if (GENERAL_REGISTER_P (i))
6085 mode = DImode;
6086 }
6087 if (mode == SFmode && (i % 2) == 1
6088 && ! TARGET_FPU_SINGLE && FP_REGISTER_P (i)
6089 && (TEST_HARD_REG_BIT (*live_regs_mask, (i ^ 1))))
6090 {
6091 mode = DFmode;
6092 i--;
6093 reg--;
6094 }
6095
6096 /* If we're doing the aligned pass and this is not aligned,
6097 or we're doing the unaligned pass and this is aligned,
6098 skip it. */
6099 if ((GET_MODE_SIZE (mode) % (STACK_BOUNDARY / BITS_PER_UNIT) == 0)
6100 != align)
6101 continue;
6102
6103 if (current_function_interrupt
6104 && GENERAL_REGISTER_P (i)
6105 && tmpx < MAX_TEMPS)
6106 schedule->temps[tmpx++] = i;
6107
6108 offset -= GET_MODE_SIZE (mode);
6109 entry->reg = i;
6110 entry->mode = mode;
6111 entry->offset = offset;
6112 entry++;
6113 }
6114 if (align && current_function_interrupt)
6115 for (i = LAST_TARGET_REG; i >= FIRST_TARGET_REG; i--)
6116 if (TEST_HARD_REG_BIT (*live_regs_mask, i))
6117 {
6118 offset -= GET_MODE_SIZE (DImode);
6119 entry->reg = i;
6120 entry->mode = DImode;
6121 entry->offset = offset;
6122 entry++;
6123 }
6124 }
6125 entry->reg = -1;
6126 entry->mode = VOIDmode;
6127 entry->offset = offset;
6128 schedule->temps[tmpx] = -1;
6129 return entry - 1;
6130 }
6131
6132 void
6133 sh_expand_prologue (void)
6134 {
6135 HARD_REG_SET live_regs_mask;
6136 int d, i;
6137 int d_rounding = 0;
6138 int save_flags = target_flags;
6139 int pretend_args;
6140 tree sp_switch_attr
6141 = lookup_attribute ("sp_switch", DECL_ATTRIBUTES (current_function_decl));
6142
6143 current_function_interrupt = sh_cfun_interrupt_handler_p ();
6144
6145 /* We have pretend args if we had an object sent partially in registers
6146 and partially on the stack, e.g. a large structure. */
6147 pretend_args = current_function_pretend_args_size;
6148 if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl)
6149 && (NPARM_REGS(SImode)
6150 > current_function_args_info.arg_count[(int) SH_ARG_INT]))
6151 pretend_args = 0;
6152 output_stack_adjust (-pretend_args
6153 - current_function_args_info.stack_regs * 8,
6154 stack_pointer_rtx, 0, NULL);
6155
6156 if (TARGET_SHCOMPACT && flag_pic && current_function_args_info.call_cookie)
6157 /* We're going to use the PIC register to load the address of the
6158 incoming-argument decoder and/or of the return trampoline from
6159 the GOT, so make sure the PIC register is preserved and
6160 initialized. */
6161 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
6162
6163 if (TARGET_SHCOMPACT
6164 && (current_function_args_info.call_cookie & ~ CALL_COOKIE_RET_TRAMP(1)))
6165 {
6166 int reg;
6167
6168 /* First, make all registers with incoming arguments that will
6169 be pushed onto the stack live, so that register renaming
6170 doesn't overwrite them. */
6171 for (reg = 0; reg < NPARM_REGS (SImode); reg++)
6172 if (CALL_COOKIE_STACKSEQ_GET (current_function_args_info.call_cookie)
6173 >= NPARM_REGS (SImode) - reg)
6174 for (; reg < NPARM_REGS (SImode); reg++)
6175 emit_insn (gen_shcompact_preserve_incoming_args
6176 (gen_rtx_REG (SImode, FIRST_PARM_REG + reg)));
6177 else if (CALL_COOKIE_INT_REG_GET
6178 (current_function_args_info.call_cookie, reg) == 1)
6179 emit_insn (gen_shcompact_preserve_incoming_args
6180 (gen_rtx_REG (SImode, FIRST_PARM_REG + reg)));
6181
6182 emit_move_insn (gen_rtx_REG (Pmode, MACL_REG),
6183 stack_pointer_rtx);
6184 emit_move_insn (gen_rtx_REG (SImode, R0_REG),
6185 GEN_INT (current_function_args_info.call_cookie));
6186 emit_move_insn (gen_rtx_REG (SImode, MACH_REG),
6187 gen_rtx_REG (SImode, R0_REG));
6188 }
6189 else if (TARGET_SHMEDIA)
6190 {
6191 int tr = sh_media_register_for_return ();
6192
6193 if (tr >= 0)
6194 emit_move_insn (gen_rtx_REG (DImode, tr),
6195 gen_rtx_REG (DImode, PR_MEDIA_REG));
6196 }
6197
6198 /* Emit the code for SETUP_VARARGS. */
6199 if (current_function_stdarg)
6200 {
6201 if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl))
6202 {
6203 /* Push arg regs as if they'd been provided by caller in stack. */
6204 for (i = 0; i < NPARM_REGS(SImode); i++)
6205 {
6206 int rn = NPARM_REGS(SImode) + FIRST_PARM_REG - i - 1;
6207 rtx insn;
6208
6209 if (i >= (NPARM_REGS(SImode)
6210 - current_function_args_info.arg_count[(int) SH_ARG_INT]
6211 ))
6212 break;
6213 insn = push (rn);
6214 RTX_FRAME_RELATED_P (insn) = 0;
6215 }
6216 }
6217 }
6218
6219 /* If we're supposed to switch stacks at function entry, do so now. */
6220 if (sp_switch_attr)
6221 {
6222 /* The argument specifies a variable holding the address of the
6223 stack the interrupt function should switch to/from at entry/exit. */
6224 const char *s
6225 = ggc_strdup (TREE_STRING_POINTER (TREE_VALUE (sp_switch_attr)));
6226 rtx sp_switch = gen_rtx_SYMBOL_REF (Pmode, s);
6227
6228 emit_insn (gen_sp_switch_1 (sp_switch));
6229 }
6230
6231 d = calc_live_regs (&live_regs_mask);
6232 /* ??? Maybe we could save some switching if we can move a mode switch
6233 that already happens to be at the function start into the prologue. */
6234 if (target_flags != save_flags && ! current_function_interrupt)
6235 emit_insn (gen_toggle_sz ());
6236
6237 if (TARGET_SH5)
6238 {
6239 int offset_base, offset;
6240 rtx r0 = NULL_RTX;
6241 int offset_in_r0 = -1;
6242 int sp_in_r0 = 0;
6243 int tregs_space = shmedia_target_regs_stack_adjust (&live_regs_mask);
6244 int total_size, save_size;
6245 save_schedule schedule;
6246 save_entry *entry;
6247 int *tmp_pnt;
6248
6249 if (call_really_used_regs[R0_REG] && ! fixed_regs[R0_REG]
6250 && ! current_function_interrupt)
6251 r0 = gen_rtx_REG (Pmode, R0_REG);
6252
6253 /* D is the actual number of bytes that we need for saving registers,
6254 however, in initial_elimination_offset we have committed to using
6255 an additional TREGS_SPACE amount of bytes - in order to keep both
6256 addresses to arguments supplied by the caller and local variables
6257 valid, we must keep this gap. Place it between the incoming
6258 arguments and the actually saved registers in a bid to optimize
6259 locality of reference. */
6260 total_size = d + tregs_space;
6261 total_size += rounded_frame_size (total_size);
6262 save_size = total_size - rounded_frame_size (d);
6263 if (save_size % (STACK_BOUNDARY / BITS_PER_UNIT))
6264 d_rounding = ((STACK_BOUNDARY / BITS_PER_UNIT)
6265 - save_size % (STACK_BOUNDARY / BITS_PER_UNIT));
6266
6267 /* If adjusting the stack in a single step costs nothing extra, do so.
6268 I.e. either if a single addi is enough, or we need a movi anyway,
6269 and we don't exceed the maximum offset range (the test for the
6270 latter is conservative for simplicity). */
6271 if (TARGET_SHMEDIA
6272 && (CONST_OK_FOR_I10 (-total_size)
6273 || (! CONST_OK_FOR_I10 (-(save_size + d_rounding))
6274 && total_size <= 2044)))
6275 d_rounding = total_size - save_size;
6276
6277 offset_base = d + d_rounding;
6278
6279 output_stack_adjust (-(save_size + d_rounding), stack_pointer_rtx,
6280 0, NULL);
6281
6282 sh5_schedule_saves (&live_regs_mask, &schedule, offset_base);
6283 tmp_pnt = schedule.temps;
6284 for (entry = &schedule.entries[1]; entry->mode != VOIDmode; entry++)
6285 {
6286 enum machine_mode mode = entry->mode;
6287 unsigned int reg = entry->reg;
6288 rtx reg_rtx, mem_rtx, pre_dec = NULL_RTX;
6289 rtx orig_reg_rtx;
6290
6291 offset = entry->offset;
6292
6293 reg_rtx = gen_rtx_REG (mode, reg);
6294
6295 mem_rtx = gen_frame_mem (mode,
6296 gen_rtx_PLUS (Pmode,
6297 stack_pointer_rtx,
6298 GEN_INT (offset)));
6299
6300 GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (mem_rtx, 0), try_pre_dec);
6301
6302 gcc_assert (r0);
6303 mem_rtx = NULL_RTX;
6304
6305 try_pre_dec:
6306 do
6307 if (HAVE_PRE_DECREMENT
6308 && (offset_in_r0 - offset == GET_MODE_SIZE (mode)
6309 || mem_rtx == NULL_RTX
6310 || reg == PR_REG || SPECIAL_REGISTER_P (reg)))
6311 {
6312 pre_dec = gen_frame_mem (mode, gen_rtx_PRE_DEC (Pmode, r0));
6313
6314 GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (pre_dec, 0),
6315 pre_dec_ok);
6316
6317 pre_dec = NULL_RTX;
6318
6319 break;
6320
6321 pre_dec_ok:
6322 mem_rtx = NULL_RTX;
6323 offset += GET_MODE_SIZE (mode);
6324 }
6325 while (0);
6326
6327 if (mem_rtx != NULL_RTX)
6328 goto addr_ok;
6329
6330 if (offset_in_r0 == -1)
6331 {
6332 emit_move_insn (r0, GEN_INT (offset));
6333 offset_in_r0 = offset;
6334 }
6335 else if (offset != offset_in_r0)
6336 {
6337 emit_move_insn (r0,
6338 gen_rtx_PLUS
6339 (Pmode, r0,
6340 GEN_INT (offset - offset_in_r0)));
6341 offset_in_r0 += offset - offset_in_r0;
6342 }
6343
6344 if (pre_dec != NULL_RTX)
6345 {
6346 if (! sp_in_r0)
6347 {
6348 emit_move_insn (r0,
6349 gen_rtx_PLUS
6350 (Pmode, r0, stack_pointer_rtx));
6351 sp_in_r0 = 1;
6352 }
6353
6354 offset -= GET_MODE_SIZE (mode);
6355 offset_in_r0 -= GET_MODE_SIZE (mode);
6356
6357 mem_rtx = pre_dec;
6358 }
6359 else if (sp_in_r0)
6360 mem_rtx = gen_frame_mem (mode, r0);
6361 else
6362 mem_rtx = gen_frame_mem (mode,
6363 gen_rtx_PLUS (Pmode,
6364 stack_pointer_rtx,
6365 r0));
6366
6367 /* We must not use an r0-based address for target-branch
6368 registers or for special registers without pre-dec
6369 memory addresses, since we store their values in r0
6370 first. */
6371 gcc_assert (!TARGET_REGISTER_P (reg)
6372 && ((reg != PR_REG && !SPECIAL_REGISTER_P (reg))
6373 || mem_rtx == pre_dec));
6374
6375 addr_ok:
6376 orig_reg_rtx = reg_rtx;
6377 if (TARGET_REGISTER_P (reg)
6378 || ((reg == PR_REG || SPECIAL_REGISTER_P (reg))
6379 && mem_rtx != pre_dec))
6380 {
6381 rtx tmp_reg = gen_rtx_REG (GET_MODE (reg_rtx), *tmp_pnt);
6382
6383 emit_move_insn (tmp_reg, reg_rtx);
6384
6385 if (REGNO (tmp_reg) == R0_REG)
6386 {
6387 offset_in_r0 = -1;
6388 sp_in_r0 = 0;
6389 gcc_assert (!refers_to_regno_p
6390 (R0_REG, R0_REG+1, mem_rtx, (rtx *) 0));
6391 }
6392
6393 if (*++tmp_pnt <= 0)
6394 tmp_pnt = schedule.temps;
6395
6396 reg_rtx = tmp_reg;
6397 }
6398 {
6399 rtx insn;
6400
6401 /* Mark as interesting for dwarf cfi generator */
6402 insn = emit_move_insn (mem_rtx, reg_rtx);
6403 RTX_FRAME_RELATED_P (insn) = 1;
6404 /* If we use an intermediate register for the save, we can't
6405 describe this exactly in cfi as a copy of the to-be-saved
6406 register into the temporary register and then the temporary
6407 register on the stack, because the temporary register can
6408 have a different natural size than the to-be-saved register.
6409 Thus, we gloss over the intermediate copy and pretend we do
6410 a direct save from the to-be-saved register. */
6411 if (REGNO (reg_rtx) != reg)
6412 {
6413 rtx set, note_rtx;
6414
6415 set = gen_rtx_SET (VOIDmode, mem_rtx, orig_reg_rtx);
6416 note_rtx = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, set,
6417 REG_NOTES (insn));
6418 REG_NOTES (insn) = note_rtx;
6419 }
6420
6421 if (TARGET_SHCOMPACT && (offset_in_r0 != -1))
6422 {
6423 rtx reg_rtx = gen_rtx_REG (mode, reg);
6424 rtx set, note_rtx;
6425 rtx mem_rtx = gen_frame_mem (mode,
6426 gen_rtx_PLUS (Pmode,
6427 stack_pointer_rtx,
6428 GEN_INT (offset)));
6429
6430 set = gen_rtx_SET (VOIDmode, mem_rtx, reg_rtx);
6431 note_rtx = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, set,
6432 REG_NOTES (insn));
6433 REG_NOTES (insn) = note_rtx;
6434 }
6435 }
6436 }
6437
6438 gcc_assert (entry->offset == d_rounding);
6439 }
6440 else
6441 push_regs (&live_regs_mask, current_function_interrupt);
6442
6443 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
6444 emit_insn (gen_GOTaddr2picreg ());
6445
6446 if (SHMEDIA_REGS_STACK_ADJUST ())
6447 {
6448 /* This must NOT go through the PLT, otherwise mach and macl
6449 may be clobbered. */
6450 function_symbol (gen_rtx_REG (Pmode, R0_REG),
6451 (TARGET_FPU_ANY
6452 ? "__GCC_push_shmedia_regs"
6453 : "__GCC_push_shmedia_regs_nofpu"), SFUNC_GOT);
6454 emit_insn (gen_shmedia_save_restore_regs_compact
6455 (GEN_INT (-SHMEDIA_REGS_STACK_ADJUST ())));
6456 }
6457
6458 if (target_flags != save_flags && ! current_function_interrupt)
6459 emit_insn (gen_toggle_sz ());
6460
6461 target_flags = save_flags;
6462
6463 output_stack_adjust (-rounded_frame_size (d) + d_rounding,
6464 stack_pointer_rtx, 0, NULL);
6465
6466 if (frame_pointer_needed)
6467 frame_insn (GEN_MOV (hard_frame_pointer_rtx, stack_pointer_rtx));
6468
6469 if (TARGET_SHCOMPACT
6470 && (current_function_args_info.call_cookie & ~ CALL_COOKIE_RET_TRAMP(1)))
6471 {
6472 /* This must NOT go through the PLT, otherwise mach and macl
6473 may be clobbered. */
6474 function_symbol (gen_rtx_REG (Pmode, R0_REG),
6475 "__GCC_shcompact_incoming_args", SFUNC_GOT);
6476 emit_insn (gen_shcompact_incoming_args ());
6477 }
6478 }
6479
6480 void
6481 sh_expand_epilogue (bool sibcall_p)
6482 {
6483 HARD_REG_SET live_regs_mask;
6484 int d, i;
6485 int d_rounding = 0;
6486
6487 int save_flags = target_flags;
6488 int frame_size, save_size;
6489 int fpscr_deferred = 0;
6490 int e = sibcall_p ? -1 : 1;
6491
6492 d = calc_live_regs (&live_regs_mask);
6493
6494 save_size = d;
6495 frame_size = rounded_frame_size (d);
6496
6497 if (TARGET_SH5)
6498 {
6499 int tregs_space = shmedia_target_regs_stack_adjust (&live_regs_mask);
6500 int total_size;
6501 if (d % (STACK_BOUNDARY / BITS_PER_UNIT))
6502 d_rounding = ((STACK_BOUNDARY / BITS_PER_UNIT)
6503 - d % (STACK_BOUNDARY / BITS_PER_UNIT));
6504
6505 total_size = d + tregs_space;
6506 total_size += rounded_frame_size (total_size);
6507 save_size = total_size - frame_size;
6508
6509 /* If adjusting the stack in a single step costs nothing extra, do so.
6510 I.e. either if a single addi is enough, or we need a movi anyway,
6511 and we don't exceed the maximum offset range (the test for the
6512 latter is conservative for simplicity). */
6513 if (TARGET_SHMEDIA
6514 && ! frame_pointer_needed
6515 && (CONST_OK_FOR_I10 (total_size)
6516 || (! CONST_OK_FOR_I10 (save_size + d_rounding)
6517 && total_size <= 2044)))
6518 d_rounding = frame_size;
6519
6520 frame_size -= d_rounding;
6521 }
6522
6523 if (frame_pointer_needed)
6524 {
6525 /* We must avoid scheduling the epilogue with previous basic blocks
6526 when exception handling is enabled. See PR/18032. */
6527 if (flag_exceptions)
6528 emit_insn (gen_blockage ());
6529 output_stack_adjust (frame_size, hard_frame_pointer_rtx, e,
6530 &live_regs_mask);
6531
6532 /* We must avoid moving the stack pointer adjustment past code
6533 which reads from the local frame, else an interrupt could
6534 occur after the SP adjustment and clobber data in the local
6535 frame. */
6536 emit_insn (gen_blockage ());
6537 emit_insn (GEN_MOV (stack_pointer_rtx, hard_frame_pointer_rtx));
6538 }
6539 else if (frame_size)
6540 {
6541 /* We must avoid moving the stack pointer adjustment past code
6542 which reads from the local frame, else an interrupt could
6543 occur after the SP adjustment and clobber data in the local
6544 frame. */
6545 emit_insn (gen_blockage ());
6546 output_stack_adjust (frame_size, stack_pointer_rtx, e, &live_regs_mask);
6547 }
6548
6549 if (SHMEDIA_REGS_STACK_ADJUST ())
6550 {
6551 function_symbol (gen_rtx_REG (Pmode, R0_REG),
6552 (TARGET_FPU_ANY
6553 ? "__GCC_pop_shmedia_regs"
6554 : "__GCC_pop_shmedia_regs_nofpu"), SFUNC_GOT);
6555 /* This must NOT go through the PLT, otherwise mach and macl
6556 may be clobbered. */
6557 emit_insn (gen_shmedia_save_restore_regs_compact
6558 (GEN_INT (SHMEDIA_REGS_STACK_ADJUST ())));
6559 }
6560
6561 /* Pop all the registers. */
6562
6563 if (target_flags != save_flags && ! current_function_interrupt)
6564 emit_insn (gen_toggle_sz ());
6565 if (TARGET_SH5)
6566 {
6567 int offset_base, offset;
6568 int offset_in_r0 = -1;
6569 int sp_in_r0 = 0;
6570 rtx r0 = gen_rtx_REG (Pmode, R0_REG);
6571 save_schedule schedule;
6572 save_entry *entry;
6573 int *tmp_pnt;
6574
6575 entry = sh5_schedule_saves (&live_regs_mask, &schedule, d_rounding);
6576 offset_base = -entry[1].offset + d_rounding;
6577 tmp_pnt = schedule.temps;
6578 for (; entry->mode != VOIDmode; entry--)
6579 {
6580 enum machine_mode mode = entry->mode;
6581 int reg = entry->reg;
6582 rtx reg_rtx, mem_rtx, post_inc = NULL_RTX, insn;
6583
6584 offset = offset_base + entry->offset;
6585 reg_rtx = gen_rtx_REG (mode, reg);
6586
6587 mem_rtx = gen_frame_mem (mode,
6588 gen_rtx_PLUS (Pmode,
6589 stack_pointer_rtx,
6590 GEN_INT (offset)));
6591
6592 GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (mem_rtx, 0), try_post_inc);
6593
6594 mem_rtx = NULL_RTX;
6595
6596 try_post_inc:
6597 do
6598 if (HAVE_POST_INCREMENT
6599 && (offset == offset_in_r0
6600 || (offset + GET_MODE_SIZE (mode) != d + d_rounding
6601 && mem_rtx == NULL_RTX)
6602 || reg == PR_REG || SPECIAL_REGISTER_P (reg)))
6603 {
6604 post_inc = gen_frame_mem (mode, gen_rtx_POST_INC (Pmode, r0));
6605
6606 GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (post_inc, 0),
6607 post_inc_ok);
6608
6609 post_inc = NULL_RTX;
6610
6611 break;
6612
6613 post_inc_ok:
6614 mem_rtx = NULL_RTX;
6615 }
6616 while (0);
6617
6618 if (mem_rtx != NULL_RTX)
6619 goto addr_ok;
6620
6621 if (offset_in_r0 == -1)
6622 {
6623 emit_move_insn (r0, GEN_INT (offset));
6624 offset_in_r0 = offset;
6625 }
6626 else if (offset != offset_in_r0)
6627 {
6628 emit_move_insn (r0,
6629 gen_rtx_PLUS
6630 (Pmode, r0,
6631 GEN_INT (offset - offset_in_r0)));
6632 offset_in_r0 += offset - offset_in_r0;
6633 }
6634
6635 if (post_inc != NULL_RTX)
6636 {
6637 if (! sp_in_r0)
6638 {
6639 emit_move_insn (r0,
6640 gen_rtx_PLUS
6641 (Pmode, r0, stack_pointer_rtx));
6642 sp_in_r0 = 1;
6643 }
6644
6645 mem_rtx = post_inc;
6646
6647 offset_in_r0 += GET_MODE_SIZE (mode);
6648 }
6649 else if (sp_in_r0)
6650 mem_rtx = gen_frame_mem (mode, r0);
6651 else
6652 mem_rtx = gen_frame_mem (mode,
6653 gen_rtx_PLUS (Pmode,
6654 stack_pointer_rtx,
6655 r0));
6656
6657 gcc_assert ((reg != PR_REG && !SPECIAL_REGISTER_P (reg))
6658 || mem_rtx == post_inc);
6659
6660 addr_ok:
6661 if ((reg == PR_REG || SPECIAL_REGISTER_P (reg))
6662 && mem_rtx != post_inc)
6663 {
6664 insn = emit_move_insn (r0, mem_rtx);
6665 mem_rtx = r0;
6666 }
6667 else if (TARGET_REGISTER_P (reg))
6668 {
6669 rtx tmp_reg = gen_rtx_REG (mode, *tmp_pnt);
6670
6671 /* Give the scheduler a bit of freedom by using up to
6672 MAX_TEMPS registers in a round-robin fashion. */
6673 insn = emit_move_insn (tmp_reg, mem_rtx);
6674 mem_rtx = tmp_reg;
6675 if (*++tmp_pnt < 0)
6676 tmp_pnt = schedule.temps;
6677 }
6678
6679 insn = emit_move_insn (reg_rtx, mem_rtx);
6680 }
6681
6682 gcc_assert (entry->offset + offset_base == d + d_rounding);
6683 }
6684 else /* ! TARGET_SH5 */
6685 {
6686 int last_reg;
6687
6688 save_size = 0;
6689 if (TEST_HARD_REG_BIT (live_regs_mask, PR_REG))
6690 {
6691 if (!frame_pointer_needed)
6692 emit_insn (gen_blockage ());
6693 pop (PR_REG);
6694 }
6695
6696 /* Banked registers are poped first to avoid being scheduled in the
6697 delay slot. RTE switches banks before the ds instruction. */
6698 if (current_function_interrupt)
6699 {
6700 for (i = FIRST_BANKED_REG; i <= LAST_BANKED_REG; i++)
6701 if (TEST_HARD_REG_BIT (live_regs_mask, i))
6702 pop (LAST_BANKED_REG - i);
6703
6704 last_reg = FIRST_PSEUDO_REGISTER - LAST_BANKED_REG - 1;
6705 }
6706 else
6707 last_reg = FIRST_PSEUDO_REGISTER;
6708
6709 for (i = 0; i < last_reg; i++)
6710 {
6711 int j = (FIRST_PSEUDO_REGISTER - 1) - i;
6712
6713 if (j == FPSCR_REG && current_function_interrupt && TARGET_FMOVD
6714 && hard_reg_set_intersect_p (live_regs_mask,
6715 reg_class_contents[DF_REGS]))
6716 fpscr_deferred = 1;
6717 else if (j != PR_REG && TEST_HARD_REG_BIT (live_regs_mask, j))
6718 pop (j);
6719
6720 if (j == FIRST_FP_REG && fpscr_deferred)
6721 pop (FPSCR_REG);
6722 }
6723 }
6724 if (target_flags != save_flags && ! current_function_interrupt)
6725 emit_insn (gen_toggle_sz ());
6726 target_flags = save_flags;
6727
6728 output_stack_adjust (current_function_pretend_args_size
6729 + save_size + d_rounding
6730 + current_function_args_info.stack_regs * 8,
6731 stack_pointer_rtx, e, NULL);
6732
6733 if (current_function_calls_eh_return)
6734 emit_insn (GEN_ADD3 (stack_pointer_rtx, stack_pointer_rtx,
6735 EH_RETURN_STACKADJ_RTX));
6736
6737 /* Switch back to the normal stack if necessary. */
6738 if (lookup_attribute ("sp_switch", DECL_ATTRIBUTES (current_function_decl)))
6739 emit_insn (gen_sp_switch_2 ());
6740
6741 /* Tell flow the insn that pops PR isn't dead. */
6742 /* PR_REG will never be live in SHmedia mode, and we don't need to
6743 USE PR_MEDIA_REG, since it will be explicitly copied to TR0_REG
6744 by the return pattern. */
6745 if (TEST_HARD_REG_BIT (live_regs_mask, PR_REG))
6746 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, PR_REG)));
6747 }
6748
6749 static int sh_need_epilogue_known = 0;
6750
6751 int
6752 sh_need_epilogue (void)
6753 {
6754 if (! sh_need_epilogue_known)
6755 {
6756 rtx epilogue;
6757
6758 start_sequence ();
6759 sh_expand_epilogue (0);
6760 epilogue = get_insns ();
6761 end_sequence ();
6762 sh_need_epilogue_known = (epilogue == NULL ? -1 : 1);
6763 }
6764 return sh_need_epilogue_known > 0;
6765 }
6766
6767 /* Emit code to change the current function's return address to RA.
6768 TEMP is available as a scratch register, if needed. */
6769
6770 void
6771 sh_set_return_address (rtx ra, rtx tmp)
6772 {
6773 HARD_REG_SET live_regs_mask;
6774 int d;
6775 int pr_reg = TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG;
6776 int pr_offset;
6777
6778 d = calc_live_regs (&live_regs_mask);
6779
6780 /* If pr_reg isn't life, we can set it (or the register given in
6781 sh_media_register_for_return) directly. */
6782 if (! TEST_HARD_REG_BIT (live_regs_mask, pr_reg))
6783 {
6784 rtx rr;
6785
6786 if (TARGET_SHMEDIA)
6787 {
6788 int rr_regno = sh_media_register_for_return ();
6789
6790 if (rr_regno < 0)
6791 rr_regno = pr_reg;
6792
6793 rr = gen_rtx_REG (DImode, rr_regno);
6794 }
6795 else
6796 rr = gen_rtx_REG (SImode, pr_reg);
6797
6798 emit_insn (GEN_MOV (rr, ra));
6799 /* Tell flow the register for return isn't dead. */
6800 emit_insn (gen_rtx_USE (VOIDmode, rr));
6801 return;
6802 }
6803
6804 if (TARGET_SH5)
6805 {
6806 int offset;
6807 save_schedule schedule;
6808 save_entry *entry;
6809
6810 entry = sh5_schedule_saves (&live_regs_mask, &schedule, 0);
6811 offset = entry[1].offset;
6812 for (; entry->mode != VOIDmode; entry--)
6813 if (entry->reg == pr_reg)
6814 goto found;
6815
6816 /* We can't find pr register. */
6817 gcc_unreachable ();
6818
6819 found:
6820 offset = entry->offset - offset;
6821 pr_offset = (rounded_frame_size (d) + offset
6822 + SHMEDIA_REGS_STACK_ADJUST ());
6823 }
6824 else
6825 pr_offset = rounded_frame_size (d);
6826
6827 emit_insn (GEN_MOV (tmp, GEN_INT (pr_offset)));
6828 emit_insn (GEN_ADD3 (tmp, tmp, hard_frame_pointer_rtx));
6829
6830 tmp = gen_frame_mem (Pmode, tmp);
6831 emit_insn (GEN_MOV (tmp, ra));
6832 }
6833
6834 /* Clear variables at function end. */
6835
6836 static void
6837 sh_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
6838 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
6839 {
6840 sh_need_epilogue_known = 0;
6841 }
6842
6843 static rtx
6844 sh_builtin_saveregs (void)
6845 {
6846 /* First unnamed integer register. */
6847 int first_intreg = current_function_args_info.arg_count[(int) SH_ARG_INT];
6848 /* Number of integer registers we need to save. */
6849 int n_intregs = MAX (0, NPARM_REGS (SImode) - first_intreg);
6850 /* First unnamed SFmode float reg */
6851 int first_floatreg = current_function_args_info.arg_count[(int) SH_ARG_FLOAT];
6852 /* Number of SFmode float regs to save. */
6853 int n_floatregs = MAX (0, NPARM_REGS (SFmode) - first_floatreg);
6854 rtx regbuf, fpregs;
6855 int bufsize, regno;
6856 alias_set_type alias_set;
6857
6858 if (TARGET_SH5)
6859 {
6860 if (n_intregs)
6861 {
6862 int pushregs = n_intregs;
6863
6864 while (pushregs < NPARM_REGS (SImode) - 1
6865 && (CALL_COOKIE_INT_REG_GET
6866 (current_function_args_info.call_cookie,
6867 NPARM_REGS (SImode) - pushregs)
6868 == 1))
6869 {
6870 current_function_args_info.call_cookie
6871 &= ~ CALL_COOKIE_INT_REG (NPARM_REGS (SImode)
6872 - pushregs, 1);
6873 pushregs++;
6874 }
6875
6876 if (pushregs == NPARM_REGS (SImode))
6877 current_function_args_info.call_cookie
6878 |= (CALL_COOKIE_INT_REG (0, 1)
6879 | CALL_COOKIE_STACKSEQ (pushregs - 1));
6880 else
6881 current_function_args_info.call_cookie
6882 |= CALL_COOKIE_STACKSEQ (pushregs);
6883
6884 current_function_pretend_args_size += 8 * n_intregs;
6885 }
6886 if (TARGET_SHCOMPACT)
6887 return const0_rtx;
6888 }
6889
6890 if (! TARGET_SH2E && ! TARGET_SH4 && ! TARGET_SH5)
6891 {
6892 error ("__builtin_saveregs not supported by this subtarget");
6893 return const0_rtx;
6894 }
6895
6896 if (TARGET_SHMEDIA)
6897 n_floatregs = 0;
6898
6899 /* Allocate block of memory for the regs. */
6900 /* ??? If n_intregs + n_floatregs == 0, should we allocate at least 1 byte?
6901 Or can assign_stack_local accept a 0 SIZE argument? */
6902 bufsize = (n_intregs * UNITS_PER_WORD) + (n_floatregs * UNITS_PER_WORD);
6903
6904 if (TARGET_SHMEDIA)
6905 regbuf = gen_frame_mem (BLKmode, gen_rtx_REG (Pmode, ARG_POINTER_REGNUM));
6906 else if (n_floatregs & 1)
6907 {
6908 rtx addr;
6909
6910 regbuf = assign_stack_local (BLKmode, bufsize + UNITS_PER_WORD, 0);
6911 addr = copy_to_mode_reg (Pmode, XEXP (regbuf, 0));
6912 emit_insn (gen_iorsi3 (addr, addr, GEN_INT (UNITS_PER_WORD)));
6913 regbuf = change_address (regbuf, BLKmode, addr);
6914 }
6915 else if (STACK_BOUNDARY < 64 && TARGET_FPU_DOUBLE && n_floatregs)
6916 {
6917 rtx addr, mask;
6918
6919 regbuf = assign_stack_local (BLKmode, bufsize + UNITS_PER_WORD, 0);
6920 addr = copy_to_mode_reg (Pmode, plus_constant (XEXP (regbuf, 0), 4));
6921 mask = copy_to_mode_reg (Pmode, GEN_INT (-8));
6922 emit_insn (gen_andsi3 (addr, addr, mask));
6923 regbuf = change_address (regbuf, BLKmode, addr);
6924 }
6925 else
6926 regbuf = assign_stack_local (BLKmode, bufsize, TARGET_FPU_DOUBLE ? 64 : 0);
6927 alias_set = get_varargs_alias_set ();
6928 set_mem_alias_set (regbuf, alias_set);
6929
6930 /* Save int args.
6931 This is optimized to only save the regs that are necessary. Explicitly
6932 named args need not be saved. */
6933 if (n_intregs > 0)
6934 move_block_from_reg (BASE_ARG_REG (SImode) + first_intreg,
6935 adjust_address (regbuf, BLKmode,
6936 n_floatregs * UNITS_PER_WORD),
6937 n_intregs);
6938
6939 if (TARGET_SHMEDIA)
6940 /* Return the address of the regbuf. */
6941 return XEXP (regbuf, 0);
6942
6943 /* Save float args.
6944 This is optimized to only save the regs that are necessary. Explicitly
6945 named args need not be saved.
6946 We explicitly build a pointer to the buffer because it halves the insn
6947 count when not optimizing (otherwise the pointer is built for each reg
6948 saved).
6949 We emit the moves in reverse order so that we can use predecrement. */
6950
6951 fpregs = copy_to_mode_reg (Pmode,
6952 plus_constant (XEXP (regbuf, 0),
6953 n_floatregs * UNITS_PER_WORD));
6954 if (TARGET_SH4 || TARGET_SH2A_DOUBLE)
6955 {
6956 rtx mem;
6957 for (regno = NPARM_REGS (DFmode) - 2; regno >= first_floatreg; regno -= 2)
6958 {
6959 emit_insn (gen_addsi3 (fpregs, fpregs,
6960 GEN_INT (-2 * UNITS_PER_WORD)));
6961 mem = change_address (regbuf, DFmode, fpregs);
6962 emit_move_insn (mem,
6963 gen_rtx_REG (DFmode, BASE_ARG_REG (DFmode) + regno));
6964 }
6965 regno = first_floatreg;
6966 if (regno & 1)
6967 {
6968 emit_insn (gen_addsi3 (fpregs, fpregs, GEN_INT (-UNITS_PER_WORD)));
6969 mem = change_address (regbuf, SFmode, fpregs);
6970 emit_move_insn (mem,
6971 gen_rtx_REG (SFmode, BASE_ARG_REG (SFmode) + regno
6972 - (TARGET_LITTLE_ENDIAN != 0)));
6973 }
6974 }
6975 else
6976 for (regno = NPARM_REGS (SFmode) - 1; regno >= first_floatreg; regno--)
6977 {
6978 rtx mem;
6979
6980 emit_insn (gen_addsi3 (fpregs, fpregs, GEN_INT (-UNITS_PER_WORD)));
6981 mem = change_address (regbuf, SFmode, fpregs);
6982 emit_move_insn (mem,
6983 gen_rtx_REG (SFmode, BASE_ARG_REG (SFmode) + regno));
6984 }
6985
6986 /* Return the address of the regbuf. */
6987 return XEXP (regbuf, 0);
6988 }
6989
6990 /* Define the `__builtin_va_list' type for the ABI. */
6991
6992 static tree
6993 sh_build_builtin_va_list (void)
6994 {
6995 tree f_next_o, f_next_o_limit, f_next_fp, f_next_fp_limit, f_next_stack;
6996 tree record;
6997
6998 if (TARGET_SH5 || (! TARGET_SH2E && ! TARGET_SH4)
6999 || TARGET_HITACHI || sh_cfun_attr_renesas_p ())
7000 return ptr_type_node;
7001
7002 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
7003
7004 f_next_o = build_decl (FIELD_DECL, get_identifier ("__va_next_o"),
7005 ptr_type_node);
7006 f_next_o_limit = build_decl (FIELD_DECL,
7007 get_identifier ("__va_next_o_limit"),
7008 ptr_type_node);
7009 f_next_fp = build_decl (FIELD_DECL, get_identifier ("__va_next_fp"),
7010 ptr_type_node);
7011 f_next_fp_limit = build_decl (FIELD_DECL,
7012 get_identifier ("__va_next_fp_limit"),
7013 ptr_type_node);
7014 f_next_stack = build_decl (FIELD_DECL, get_identifier ("__va_next_stack"),
7015 ptr_type_node);
7016
7017 DECL_FIELD_CONTEXT (f_next_o) = record;
7018 DECL_FIELD_CONTEXT (f_next_o_limit) = record;
7019 DECL_FIELD_CONTEXT (f_next_fp) = record;
7020 DECL_FIELD_CONTEXT (f_next_fp_limit) = record;
7021 DECL_FIELD_CONTEXT (f_next_stack) = record;
7022
7023 TYPE_FIELDS (record) = f_next_o;
7024 TREE_CHAIN (f_next_o) = f_next_o_limit;
7025 TREE_CHAIN (f_next_o_limit) = f_next_fp;
7026 TREE_CHAIN (f_next_fp) = f_next_fp_limit;
7027 TREE_CHAIN (f_next_fp_limit) = f_next_stack;
7028
7029 layout_type (record);
7030
7031 return record;
7032 }
7033
7034 /* Implement `va_start' for varargs and stdarg. */
7035
7036 void
7037 sh_va_start (tree valist, rtx nextarg)
7038 {
7039 tree f_next_o, f_next_o_limit, f_next_fp, f_next_fp_limit, f_next_stack;
7040 tree next_o, next_o_limit, next_fp, next_fp_limit, next_stack;
7041 tree t, u;
7042 int nfp, nint;
7043
7044 if (TARGET_SH5)
7045 {
7046 expand_builtin_saveregs ();
7047 std_expand_builtin_va_start (valist, nextarg);
7048 return;
7049 }
7050
7051 if ((! TARGET_SH2E && ! TARGET_SH4)
7052 || TARGET_HITACHI || sh_cfun_attr_renesas_p ())
7053 {
7054 std_expand_builtin_va_start (valist, nextarg);
7055 return;
7056 }
7057
7058 f_next_o = TYPE_FIELDS (va_list_type_node);
7059 f_next_o_limit = TREE_CHAIN (f_next_o);
7060 f_next_fp = TREE_CHAIN (f_next_o_limit);
7061 f_next_fp_limit = TREE_CHAIN (f_next_fp);
7062 f_next_stack = TREE_CHAIN (f_next_fp_limit);
7063
7064 next_o = build3 (COMPONENT_REF, TREE_TYPE (f_next_o), valist, f_next_o,
7065 NULL_TREE);
7066 next_o_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_o_limit),
7067 valist, f_next_o_limit, NULL_TREE);
7068 next_fp = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp), valist, f_next_fp,
7069 NULL_TREE);
7070 next_fp_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp_limit),
7071 valist, f_next_fp_limit, NULL_TREE);
7072 next_stack = build3 (COMPONENT_REF, TREE_TYPE (f_next_stack),
7073 valist, f_next_stack, NULL_TREE);
7074
7075 /* Call __builtin_saveregs. */
7076 u = make_tree (sizetype, expand_builtin_saveregs ());
7077 u = fold_convert (ptr_type_node, u);
7078 t = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, next_fp, u);
7079 TREE_SIDE_EFFECTS (t) = 1;
7080 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7081
7082 nfp = current_function_args_info.arg_count[SH_ARG_FLOAT];
7083 if (nfp < 8)
7084 nfp = 8 - nfp;
7085 else
7086 nfp = 0;
7087 u = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, u,
7088 size_int (UNITS_PER_WORD * nfp));
7089 t = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, next_fp_limit, u);
7090 TREE_SIDE_EFFECTS (t) = 1;
7091 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7092
7093 t = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, next_o, u);
7094 TREE_SIDE_EFFECTS (t) = 1;
7095 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7096
7097 nint = current_function_args_info.arg_count[SH_ARG_INT];
7098 if (nint < 4)
7099 nint = 4 - nint;
7100 else
7101 nint = 0;
7102 u = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, u,
7103 size_int (UNITS_PER_WORD * nint));
7104 t = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, next_o_limit, u);
7105 TREE_SIDE_EFFECTS (t) = 1;
7106 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7107
7108 u = make_tree (ptr_type_node, nextarg);
7109 t = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, next_stack, u);
7110 TREE_SIDE_EFFECTS (t) = 1;
7111 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7112 }
7113
7114 /* TYPE is a RECORD_TYPE. If there is only a single nonzero-sized
7115 member, return it. */
7116 static tree
7117 find_sole_member (tree type)
7118 {
7119 tree field, member = NULL_TREE;
7120
7121 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
7122 {
7123 if (TREE_CODE (field) != FIELD_DECL)
7124 continue;
7125 if (!DECL_SIZE (field))
7126 return NULL_TREE;
7127 if (integer_zerop (DECL_SIZE (field)))
7128 continue;
7129 if (member)
7130 return NULL_TREE;
7131 member = field;
7132 }
7133 return member;
7134 }
7135 /* Implement `va_arg'. */
7136
7137 static tree
7138 sh_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p,
7139 tree *post_p ATTRIBUTE_UNUSED)
7140 {
7141 HOST_WIDE_INT size, rsize;
7142 tree tmp, pptr_type_node;
7143 tree addr, lab_over = NULL, result = NULL;
7144 int pass_by_ref = targetm.calls.must_pass_in_stack (TYPE_MODE (type), type);
7145 tree eff_type;
7146
7147 if (pass_by_ref)
7148 type = build_pointer_type (type);
7149
7150 size = int_size_in_bytes (type);
7151 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
7152 pptr_type_node = build_pointer_type (ptr_type_node);
7153
7154 if (! TARGET_SH5 && (TARGET_SH2E || TARGET_SH4)
7155 && ! (TARGET_HITACHI || sh_cfun_attr_renesas_p ()))
7156 {
7157 tree f_next_o, f_next_o_limit, f_next_fp, f_next_fp_limit, f_next_stack;
7158 tree next_o, next_o_limit, next_fp, next_fp_limit, next_stack;
7159 int pass_as_float;
7160 tree lab_false;
7161 tree member;
7162
7163 f_next_o = TYPE_FIELDS (va_list_type_node);
7164 f_next_o_limit = TREE_CHAIN (f_next_o);
7165 f_next_fp = TREE_CHAIN (f_next_o_limit);
7166 f_next_fp_limit = TREE_CHAIN (f_next_fp);
7167 f_next_stack = TREE_CHAIN (f_next_fp_limit);
7168
7169 next_o = build3 (COMPONENT_REF, TREE_TYPE (f_next_o), valist, f_next_o,
7170 NULL_TREE);
7171 next_o_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_o_limit),
7172 valist, f_next_o_limit, NULL_TREE);
7173 next_fp = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp),
7174 valist, f_next_fp, NULL_TREE);
7175 next_fp_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp_limit),
7176 valist, f_next_fp_limit, NULL_TREE);
7177 next_stack = build3 (COMPONENT_REF, TREE_TYPE (f_next_stack),
7178 valist, f_next_stack, NULL_TREE);
7179
7180 /* Structures with a single member with a distinct mode are passed
7181 like their member. This is relevant if the latter has a REAL_TYPE
7182 or COMPLEX_TYPE type. */
7183 eff_type = type;
7184 while (TREE_CODE (eff_type) == RECORD_TYPE
7185 && (member = find_sole_member (eff_type))
7186 && (TREE_CODE (TREE_TYPE (member)) == REAL_TYPE
7187 || TREE_CODE (TREE_TYPE (member)) == COMPLEX_TYPE
7188 || TREE_CODE (TREE_TYPE (member)) == RECORD_TYPE))
7189 {
7190 tree field_type = TREE_TYPE (member);
7191
7192 if (TYPE_MODE (eff_type) == TYPE_MODE (field_type))
7193 eff_type = field_type;
7194 else
7195 {
7196 gcc_assert ((TYPE_ALIGN (eff_type)
7197 < GET_MODE_ALIGNMENT (TYPE_MODE (field_type)))
7198 || (TYPE_ALIGN (eff_type)
7199 > GET_MODE_BITSIZE (TYPE_MODE (field_type))));
7200 break;
7201 }
7202 }
7203
7204 if (TARGET_SH4)
7205 {
7206 pass_as_float = ((TREE_CODE (eff_type) == REAL_TYPE && size <= 8)
7207 || (TREE_CODE (eff_type) == COMPLEX_TYPE
7208 && TREE_CODE (TREE_TYPE (eff_type)) == REAL_TYPE
7209 && size <= 16));
7210 }
7211 else
7212 {
7213 pass_as_float = (TREE_CODE (eff_type) == REAL_TYPE && size == 4);
7214 }
7215
7216 addr = create_tmp_var (pptr_type_node, NULL);
7217 lab_false = create_artificial_label ();
7218 lab_over = create_artificial_label ();
7219
7220 valist = build1 (INDIRECT_REF, ptr_type_node, addr);
7221
7222 if (pass_as_float)
7223 {
7224 tree next_fp_tmp = create_tmp_var (TREE_TYPE (f_next_fp), NULL);
7225 tree cmp;
7226 bool is_double = size == 8 && TREE_CODE (eff_type) == REAL_TYPE;
7227
7228 tmp = build1 (ADDR_EXPR, pptr_type_node, next_fp);
7229 tmp = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, tmp);
7230 gimplify_and_add (tmp, pre_p);
7231
7232 tmp = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, next_fp_tmp, valist);
7233 gimplify_and_add (tmp, pre_p);
7234 tmp = next_fp_limit;
7235 if (size > 4 && !is_double)
7236 tmp = build2 (POINTER_PLUS_EXPR, TREE_TYPE (tmp), tmp,
7237 size_int (4 - size));
7238 tmp = build2 (GE_EXPR, boolean_type_node, next_fp_tmp, tmp);
7239 cmp = build3 (COND_EXPR, void_type_node, tmp,
7240 build1 (GOTO_EXPR, void_type_node, lab_false),
7241 NULL_TREE);
7242 if (!is_double)
7243 gimplify_and_add (cmp, pre_p);
7244
7245 if (TYPE_ALIGN (eff_type) > BITS_PER_WORD
7246 || (is_double || size == 16))
7247 {
7248 tmp = fold_convert (sizetype, next_fp_tmp);
7249 tmp = build2 (BIT_AND_EXPR, sizetype, tmp,
7250 size_int (UNITS_PER_WORD));
7251 tmp = build2 (POINTER_PLUS_EXPR, ptr_type_node,
7252 next_fp_tmp, tmp);
7253 tmp = build2 (GIMPLE_MODIFY_STMT, ptr_type_node,
7254 next_fp_tmp, tmp);
7255 gimplify_and_add (tmp, pre_p);
7256 }
7257 if (is_double)
7258 gimplify_and_add (cmp, pre_p);
7259
7260 #ifdef FUNCTION_ARG_SCmode_WART
7261 if (TYPE_MODE (eff_type) == SCmode
7262 && TARGET_SH4 && TARGET_LITTLE_ENDIAN)
7263 {
7264 tree subtype = TREE_TYPE (eff_type);
7265 tree real, imag;
7266
7267 imag
7268 = std_gimplify_va_arg_expr (next_fp_tmp, subtype, pre_p, NULL);
7269 imag = get_initialized_tmp_var (imag, pre_p, NULL);
7270
7271 real
7272 = std_gimplify_va_arg_expr (next_fp_tmp, subtype, pre_p, NULL);
7273 real = get_initialized_tmp_var (real, pre_p, NULL);
7274
7275 result = build2 (COMPLEX_EXPR, type, real, imag);
7276 result = get_initialized_tmp_var (result, pre_p, NULL);
7277 }
7278 #endif /* FUNCTION_ARG_SCmode_WART */
7279
7280 tmp = build1 (GOTO_EXPR, void_type_node, lab_over);
7281 gimplify_and_add (tmp, pre_p);
7282
7283 tmp = build1 (LABEL_EXPR, void_type_node, lab_false);
7284 gimplify_and_add (tmp, pre_p);
7285
7286 tmp = build1 (ADDR_EXPR, pptr_type_node, next_stack);
7287 tmp = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, tmp);
7288 gimplify_and_add (tmp, pre_p);
7289 tmp = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, next_fp_tmp, valist);
7290 gimplify_and_add (tmp, pre_p);
7291
7292 tmp = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, valist, next_fp_tmp);
7293 gimplify_and_add (tmp, post_p);
7294 valist = next_fp_tmp;
7295 }
7296 else
7297 {
7298 tmp = build2 (POINTER_PLUS_EXPR, ptr_type_node, next_o,
7299 size_int (rsize));
7300 tmp = build2 (GT_EXPR, boolean_type_node, tmp, next_o_limit);
7301 tmp = build3 (COND_EXPR, void_type_node, tmp,
7302 build1 (GOTO_EXPR, void_type_node, lab_false),
7303 NULL_TREE);
7304 gimplify_and_add (tmp, pre_p);
7305
7306 tmp = build1 (ADDR_EXPR, pptr_type_node, next_o);
7307 tmp = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, tmp);
7308 gimplify_and_add (tmp, pre_p);
7309
7310 tmp = build1 (GOTO_EXPR, void_type_node, lab_over);
7311 gimplify_and_add (tmp, pre_p);
7312
7313 tmp = build1 (LABEL_EXPR, void_type_node, lab_false);
7314 gimplify_and_add (tmp, pre_p);
7315
7316 if (size > 4 && ! TARGET_SH4)
7317 {
7318 tmp = build2 (GIMPLE_MODIFY_STMT, ptr_type_node,
7319 next_o, next_o_limit);
7320 gimplify_and_add (tmp, pre_p);
7321 }
7322
7323 tmp = build1 (ADDR_EXPR, pptr_type_node, next_stack);
7324 tmp = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, tmp);
7325 gimplify_and_add (tmp, pre_p);
7326 }
7327
7328 if (!result)
7329 {
7330 tmp = build1 (LABEL_EXPR, void_type_node, lab_over);
7331 gimplify_and_add (tmp, pre_p);
7332 }
7333 }
7334
7335 /* ??? In va-sh.h, there had been code to make values larger than
7336 size 8 indirect. This does not match the FUNCTION_ARG macros. */
7337
7338 tmp = std_gimplify_va_arg_expr (valist, type, pre_p, NULL);
7339 if (result)
7340 {
7341 tmp = build2 (GIMPLE_MODIFY_STMT, void_type_node, result, tmp);
7342 gimplify_and_add (tmp, pre_p);
7343
7344 tmp = build1 (LABEL_EXPR, void_type_node, lab_over);
7345 gimplify_and_add (tmp, pre_p);
7346 }
7347 else
7348 result = tmp;
7349
7350 if (pass_by_ref)
7351 result = build_va_arg_indirect_ref (result);
7352
7353 return result;
7354 }
7355
7356 bool
7357 sh_promote_prototypes (tree type)
7358 {
7359 if (TARGET_HITACHI)
7360 return 0;
7361 if (! type)
7362 return 1;
7363 return ! sh_attr_renesas_p (type);
7364 }
7365
7366 /* Whether an argument must be passed by reference. On SHcompact, we
7367 pretend arguments wider than 32-bits that would have been passed in
7368 registers are passed by reference, so that an SHmedia trampoline
7369 loads them into the full 64-bits registers. */
7370
7371 static int
7372 shcompact_byref (CUMULATIVE_ARGS *cum, enum machine_mode mode,
7373 tree type, bool named)
7374 {
7375 unsigned HOST_WIDE_INT size;
7376
7377 if (type)
7378 size = int_size_in_bytes (type);
7379 else
7380 size = GET_MODE_SIZE (mode);
7381
7382 if (cum->arg_count[SH_ARG_INT] < NPARM_REGS (SImode)
7383 && (!named
7384 || GET_SH_ARG_CLASS (mode) == SH_ARG_INT
7385 || (GET_SH_ARG_CLASS (mode) == SH_ARG_FLOAT
7386 && cum->arg_count[SH_ARG_FLOAT] >= NPARM_REGS (SFmode)))
7387 && size > 4
7388 && !SHCOMPACT_FORCE_ON_STACK (mode, type)
7389 && !SH5_WOULD_BE_PARTIAL_NREGS (*cum, mode, type, named))
7390 return size;
7391 else
7392 return 0;
7393 }
7394
7395 static bool
7396 sh_pass_by_reference (CUMULATIVE_ARGS *cum, enum machine_mode mode,
7397 tree type, bool named)
7398 {
7399 if (targetm.calls.must_pass_in_stack (mode, type))
7400 return true;
7401
7402 /* ??? std_gimplify_va_arg_expr passes NULL for cum. That function
7403 wants to know about pass-by-reference semantics for incoming
7404 arguments. */
7405 if (! cum)
7406 return false;
7407
7408 if (TARGET_SHCOMPACT)
7409 {
7410 cum->byref = shcompact_byref (cum, mode, type, named);
7411 return cum->byref != 0;
7412 }
7413
7414 return false;
7415 }
7416
7417 static bool
7418 sh_callee_copies (CUMULATIVE_ARGS *cum, enum machine_mode mode,
7419 tree type, bool named ATTRIBUTE_UNUSED)
7420 {
7421 /* ??? How can it possibly be correct to return true only on the
7422 caller side of the equation? Is there someplace else in the
7423 sh backend that's magically producing the copies? */
7424 return (cum->outgoing
7425 && ((mode == BLKmode ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode))
7426 % SH_MIN_ALIGN_FOR_CALLEE_COPY == 0));
7427 }
7428
7429 static int
7430 sh_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
7431 tree type, bool named ATTRIBUTE_UNUSED)
7432 {
7433 int words = 0;
7434
7435 if (!TARGET_SH5
7436 && PASS_IN_REG_P (*cum, mode, type)
7437 && !(TARGET_SH4 || TARGET_SH2A_DOUBLE)
7438 && (ROUND_REG (*cum, mode)
7439 + (mode != BLKmode
7440 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
7441 : ROUND_ADVANCE (int_size_in_bytes (type)))
7442 > NPARM_REGS (mode)))
7443 words = NPARM_REGS (mode) - ROUND_REG (*cum, mode);
7444
7445 else if (!TARGET_SHCOMPACT
7446 && SH5_WOULD_BE_PARTIAL_NREGS (*cum, mode, type, named))
7447 words = NPARM_REGS (SImode) - cum->arg_count[SH_ARG_INT];
7448
7449 return words * UNITS_PER_WORD;
7450 }
7451
7452
7453 /* Define where to put the arguments to a function.
7454 Value is zero to push the argument on the stack,
7455 or a hard register in which to store the argument.
7456
7457 MODE is the argument's machine mode.
7458 TYPE is the data type of the argument (as a tree).
7459 This is null for libcalls where that information may
7460 not be available.
7461 CUM is a variable of type CUMULATIVE_ARGS which gives info about
7462 the preceding args and about the function being called.
7463 NAMED is nonzero if this argument is a named parameter
7464 (otherwise it is an extra parameter matching an ellipsis).
7465
7466 On SH the first args are normally in registers
7467 and the rest are pushed. Any arg that starts within the first
7468 NPARM_REGS words is at least partially passed in a register unless
7469 its data type forbids. */
7470
7471
7472 rtx
7473 sh_function_arg (CUMULATIVE_ARGS *ca, enum machine_mode mode,
7474 tree type, int named)
7475 {
7476 if (! TARGET_SH5 && mode == VOIDmode)
7477 return GEN_INT (ca->renesas_abi ? 1 : 0);
7478
7479 if (! TARGET_SH5
7480 && PASS_IN_REG_P (*ca, mode, type)
7481 && (named || ! (TARGET_HITACHI || ca->renesas_abi)))
7482 {
7483 int regno;
7484
7485 if (mode == SCmode && TARGET_SH4 && TARGET_LITTLE_ENDIAN
7486 && (! FUNCTION_ARG_SCmode_WART || (ROUND_REG (*ca, mode) & 1)))
7487 {
7488 rtx r1 = gen_rtx_EXPR_LIST (VOIDmode,
7489 gen_rtx_REG (SFmode,
7490 BASE_ARG_REG (mode)
7491 + (ROUND_REG (*ca, mode) ^ 1)),
7492 const0_rtx);
7493 rtx r2 = gen_rtx_EXPR_LIST (VOIDmode,
7494 gen_rtx_REG (SFmode,
7495 BASE_ARG_REG (mode)
7496 + ((ROUND_REG (*ca, mode) + 1) ^ 1)),
7497 GEN_INT (4));
7498 return gen_rtx_PARALLEL(SCmode, gen_rtvec(2, r1, r2));
7499 }
7500
7501 /* If the alignment of a DF value causes an SF register to be
7502 skipped, we will use that skipped register for the next SF
7503 value. */
7504 if ((TARGET_HITACHI || ca->renesas_abi)
7505 && ca->free_single_fp_reg
7506 && mode == SFmode)
7507 return gen_rtx_REG (mode, ca->free_single_fp_reg);
7508
7509 regno = (BASE_ARG_REG (mode) + ROUND_REG (*ca, mode))
7510 ^ (mode == SFmode && TARGET_SH4
7511 && TARGET_LITTLE_ENDIAN != 0
7512 && ! TARGET_HITACHI && ! ca->renesas_abi);
7513 return gen_rtx_REG (mode, regno);
7514
7515 }
7516
7517 if (TARGET_SH5)
7518 {
7519 if (mode == VOIDmode && TARGET_SHCOMPACT)
7520 return GEN_INT (ca->call_cookie);
7521
7522 /* The following test assumes unnamed arguments are promoted to
7523 DFmode. */
7524 if (mode == SFmode && ca->free_single_fp_reg)
7525 return SH5_PROTOTYPED_FLOAT_ARG (*ca, mode, ca->free_single_fp_reg);
7526
7527 if ((GET_SH_ARG_CLASS (mode) == SH_ARG_FLOAT)
7528 && (named || ! ca->prototype_p)
7529 && ca->arg_count[(int) SH_ARG_FLOAT] < NPARM_REGS (SFmode))
7530 {
7531 if (! ca->prototype_p && TARGET_SHMEDIA)
7532 return SH5_PROTOTYPELESS_FLOAT_ARG (*ca, mode);
7533
7534 return SH5_PROTOTYPED_FLOAT_ARG (*ca, mode,
7535 FIRST_FP_PARM_REG
7536 + ca->arg_count[(int) SH_ARG_FLOAT]);
7537 }
7538
7539 if (ca->arg_count[(int) SH_ARG_INT] < NPARM_REGS (SImode)
7540 && (! TARGET_SHCOMPACT
7541 || (! SHCOMPACT_FORCE_ON_STACK (mode, type)
7542 && ! SH5_WOULD_BE_PARTIAL_NREGS (*ca, mode,
7543 type, named))))
7544 {
7545 return gen_rtx_REG (mode, (FIRST_PARM_REG
7546 + ca->arg_count[(int) SH_ARG_INT]));
7547 }
7548
7549 return 0;
7550 }
7551
7552 return 0;
7553 }
7554
7555 /* Update the data in CUM to advance over an argument
7556 of mode MODE and data type TYPE.
7557 (TYPE is null for libcalls where that information may not be
7558 available.) */
7559
7560 void
7561 sh_function_arg_advance (CUMULATIVE_ARGS *ca, enum machine_mode mode,
7562 tree type, int named)
7563 {
7564 if (ca->force_mem)
7565 ca->force_mem = 0;
7566 else if (TARGET_SH5)
7567 {
7568 tree type2 = (ca->byref && type
7569 ? TREE_TYPE (type)
7570 : type);
7571 enum machine_mode mode2 = (ca->byref && type
7572 ? TYPE_MODE (type2)
7573 : mode);
7574 int dwords = ((ca->byref
7575 ? ca->byref
7576 : mode2 == BLKmode
7577 ? int_size_in_bytes (type2)
7578 : GET_MODE_SIZE (mode2)) + 7) / 8;
7579 int numregs = MIN (dwords, NPARM_REGS (SImode)
7580 - ca->arg_count[(int) SH_ARG_INT]);
7581
7582 if (numregs)
7583 {
7584 ca->arg_count[(int) SH_ARG_INT] += numregs;
7585 if (TARGET_SHCOMPACT
7586 && SHCOMPACT_FORCE_ON_STACK (mode2, type2))
7587 {
7588 ca->call_cookie
7589 |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
7590 - numregs, 1);
7591 /* N.B. We want this also for outgoing. */
7592 ca->stack_regs += numregs;
7593 }
7594 else if (ca->byref)
7595 {
7596 if (! ca->outgoing)
7597 ca->stack_regs += numregs;
7598 ca->byref_regs += numregs;
7599 ca->byref = 0;
7600 do
7601 ca->call_cookie
7602 |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
7603 - numregs, 2);
7604 while (--numregs);
7605 ca->call_cookie
7606 |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
7607 - 1, 1);
7608 }
7609 else if (dwords > numregs)
7610 {
7611 int pushregs = numregs;
7612
7613 if (TARGET_SHCOMPACT)
7614 ca->stack_regs += numregs;
7615 while (pushregs < NPARM_REGS (SImode) - 1
7616 && (CALL_COOKIE_INT_REG_GET
7617 (ca->call_cookie,
7618 NPARM_REGS (SImode) - pushregs)
7619 == 1))
7620 {
7621 ca->call_cookie
7622 &= ~ CALL_COOKIE_INT_REG (NPARM_REGS (SImode)
7623 - pushregs, 1);
7624 pushregs++;
7625 }
7626 if (numregs == NPARM_REGS (SImode))
7627 ca->call_cookie
7628 |= CALL_COOKIE_INT_REG (0, 1)
7629 | CALL_COOKIE_STACKSEQ (numregs - 1);
7630 else
7631 ca->call_cookie
7632 |= CALL_COOKIE_STACKSEQ (numregs);
7633 }
7634 }
7635 if (GET_SH_ARG_CLASS (mode2) == SH_ARG_FLOAT
7636 && (named || ! ca->prototype_p))
7637 {
7638 if (mode2 == SFmode && ca->free_single_fp_reg)
7639 ca->free_single_fp_reg = 0;
7640 else if (ca->arg_count[(int) SH_ARG_FLOAT]
7641 < NPARM_REGS (SFmode))
7642 {
7643 int numfpregs
7644 = MIN ((GET_MODE_SIZE (mode2) + 7) / 8 * 2,
7645 NPARM_REGS (SFmode)
7646 - ca->arg_count[(int) SH_ARG_FLOAT]);
7647
7648 ca->arg_count[(int) SH_ARG_FLOAT] += numfpregs;
7649
7650 if (TARGET_SHCOMPACT && ! ca->prototype_p)
7651 {
7652 if (ca->outgoing && numregs > 0)
7653 do
7654 {
7655 ca->call_cookie
7656 |= (CALL_COOKIE_INT_REG
7657 (ca->arg_count[(int) SH_ARG_INT]
7658 - numregs + ((numfpregs - 2) / 2),
7659 4 + (ca->arg_count[(int) SH_ARG_FLOAT]
7660 - numfpregs) / 2));
7661 }
7662 while (numfpregs -= 2);
7663 }
7664 else if (mode2 == SFmode && (named)
7665 && (ca->arg_count[(int) SH_ARG_FLOAT]
7666 < NPARM_REGS (SFmode)))
7667 ca->free_single_fp_reg
7668 = FIRST_FP_PARM_REG - numfpregs
7669 + ca->arg_count[(int) SH_ARG_FLOAT] + 1;
7670 }
7671 }
7672 return;
7673 }
7674
7675 if ((TARGET_HITACHI || ca->renesas_abi) && TARGET_FPU_DOUBLE)
7676 {
7677 /* Note that we've used the skipped register. */
7678 if (mode == SFmode && ca->free_single_fp_reg)
7679 {
7680 ca->free_single_fp_reg = 0;
7681 return;
7682 }
7683 /* When we have a DF after an SF, there's an SF register that get
7684 skipped in order to align the DF value. We note this skipped
7685 register, because the next SF value will use it, and not the
7686 SF that follows the DF. */
7687 if (mode == DFmode
7688 && ROUND_REG (*ca, DFmode) != ROUND_REG (*ca, SFmode))
7689 {
7690 ca->free_single_fp_reg = (ROUND_REG (*ca, SFmode)
7691 + BASE_ARG_REG (mode));
7692 }
7693 }
7694
7695 if (! ((TARGET_SH4 || TARGET_SH2A) || ca->renesas_abi)
7696 || PASS_IN_REG_P (*ca, mode, type))
7697 (ca->arg_count[(int) GET_SH_ARG_CLASS (mode)]
7698 = (ROUND_REG (*ca, mode)
7699 + (mode == BLKmode
7700 ? ROUND_ADVANCE (int_size_in_bytes (type))
7701 : ROUND_ADVANCE (GET_MODE_SIZE (mode)))));
7702 }
7703
7704 /* The Renesas calling convention doesn't quite fit into this scheme since
7705 the address is passed like an invisible argument, but one that is always
7706 passed in memory. */
7707 static rtx
7708 sh_struct_value_rtx (tree fndecl, int incoming ATTRIBUTE_UNUSED)
7709 {
7710 if (TARGET_HITACHI || sh_attr_renesas_p (fndecl))
7711 return 0;
7712 return gen_rtx_REG (Pmode, 2);
7713 }
7714
7715 /* Worker function for TARGET_RETURN_IN_MEMORY. */
7716
7717 static bool
7718 sh_return_in_memory (tree type, tree fndecl)
7719 {
7720 if (TARGET_SH5)
7721 {
7722 if (TYPE_MODE (type) == BLKmode)
7723 return ((unsigned HOST_WIDE_INT) int_size_in_bytes (type)) > 8;
7724 else
7725 return GET_MODE_SIZE (TYPE_MODE (type)) > 8;
7726 }
7727 else
7728 {
7729 return (TYPE_MODE (type) == BLKmode
7730 || ((TARGET_HITACHI || sh_attr_renesas_p (fndecl))
7731 && TREE_CODE (type) == RECORD_TYPE));
7732 }
7733 }
7734
7735 /* We actually emit the code in sh_expand_prologue. We used to use
7736 a static variable to flag that we need to emit this code, but that
7737 doesn't when inlining, when functions are deferred and then emitted
7738 later. Fortunately, we already have two flags that are part of struct
7739 function that tell if a function uses varargs or stdarg. */
7740 static void
7741 sh_setup_incoming_varargs (CUMULATIVE_ARGS *ca,
7742 enum machine_mode mode,
7743 tree type,
7744 int *pretend_arg_size,
7745 int second_time ATTRIBUTE_UNUSED)
7746 {
7747 gcc_assert (current_function_stdarg);
7748 if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl))
7749 {
7750 int named_parm_regs, anon_parm_regs;
7751
7752 named_parm_regs = (ROUND_REG (*ca, mode)
7753 + (mode == BLKmode
7754 ? ROUND_ADVANCE (int_size_in_bytes (type))
7755 : ROUND_ADVANCE (GET_MODE_SIZE (mode))));
7756 anon_parm_regs = NPARM_REGS (SImode) - named_parm_regs;
7757 if (anon_parm_regs > 0)
7758 *pretend_arg_size = anon_parm_regs * 4;
7759 }
7760 }
7761
7762 static bool
7763 sh_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
7764 {
7765 return TARGET_SH5;
7766 }
7767
7768 static bool
7769 sh_pretend_outgoing_varargs_named (CUMULATIVE_ARGS *ca)
7770 {
7771 return ! (TARGET_HITACHI || ca->renesas_abi) && ! TARGET_SH5;
7772 }
7773
7774
7775 /* Define the offset between two registers, one to be eliminated, and
7776 the other its replacement, at the start of a routine. */
7777
7778 int
7779 initial_elimination_offset (int from, int to)
7780 {
7781 int regs_saved;
7782 int regs_saved_rounding = 0;
7783 int total_saved_regs_space;
7784 int total_auto_space;
7785 int save_flags = target_flags;
7786 int copy_flags;
7787 HARD_REG_SET live_regs_mask;
7788
7789 shmedia_space_reserved_for_target_registers = false;
7790 regs_saved = calc_live_regs (&live_regs_mask);
7791 regs_saved += SHMEDIA_REGS_STACK_ADJUST ();
7792
7793 if (shmedia_reserve_space_for_target_registers_p (regs_saved, &live_regs_mask))
7794 {
7795 shmedia_space_reserved_for_target_registers = true;
7796 regs_saved += shmedia_target_regs_stack_adjust (&live_regs_mask);
7797 }
7798
7799 if (TARGET_SH5 && regs_saved % (STACK_BOUNDARY / BITS_PER_UNIT))
7800 regs_saved_rounding = ((STACK_BOUNDARY / BITS_PER_UNIT)
7801 - regs_saved % (STACK_BOUNDARY / BITS_PER_UNIT));
7802
7803 total_auto_space = rounded_frame_size (regs_saved) - regs_saved_rounding;
7804 copy_flags = target_flags;
7805 target_flags = save_flags;
7806
7807 total_saved_regs_space = regs_saved + regs_saved_rounding;
7808
7809 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
7810 return total_saved_regs_space + total_auto_space
7811 + current_function_args_info.byref_regs * 8;
7812
7813 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
7814 return total_saved_regs_space + total_auto_space
7815 + current_function_args_info.byref_regs * 8;
7816
7817 /* Initial gap between fp and sp is 0. */
7818 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
7819 return 0;
7820
7821 if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
7822 return rounded_frame_size (0);
7823
7824 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
7825 return rounded_frame_size (0);
7826
7827 gcc_assert (from == RETURN_ADDRESS_POINTER_REGNUM
7828 && (to == HARD_FRAME_POINTER_REGNUM
7829 || to == STACK_POINTER_REGNUM));
7830 if (TARGET_SH5)
7831 {
7832 int n = total_saved_regs_space;
7833 int pr_reg = TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG;
7834 save_schedule schedule;
7835 save_entry *entry;
7836
7837 n += total_auto_space;
7838
7839 /* If it wasn't saved, there's not much we can do. */
7840 if (! TEST_HARD_REG_BIT (live_regs_mask, pr_reg))
7841 return n;
7842
7843 target_flags = copy_flags;
7844
7845 sh5_schedule_saves (&live_regs_mask, &schedule, n);
7846 for (entry = &schedule.entries[1]; entry->mode != VOIDmode; entry++)
7847 if (entry->reg == pr_reg)
7848 {
7849 target_flags = save_flags;
7850 return entry->offset;
7851 }
7852 gcc_unreachable ();
7853 }
7854 else
7855 return total_auto_space;
7856 }
7857 \f
7858 /* Insert any deferred function attributes from earlier pragmas. */
7859 static void
7860 sh_insert_attributes (tree node, tree *attributes)
7861 {
7862 tree attrs;
7863
7864 if (TREE_CODE (node) != FUNCTION_DECL)
7865 return;
7866
7867 /* We are only interested in fields. */
7868 if (!DECL_P (node))
7869 return;
7870
7871 /* Append the attributes to the deferred attributes. */
7872 *sh_deferred_function_attributes_tail = *attributes;
7873 attrs = sh_deferred_function_attributes;
7874 if (!attrs)
7875 return;
7876
7877 /* Some attributes imply or require the interrupt attribute. */
7878 if (!lookup_attribute ("interrupt_handler", attrs)
7879 && !lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (node)))
7880 {
7881 /* If we have a trapa_handler, but no interrupt_handler attribute,
7882 insert an interrupt_handler attribute. */
7883 if (lookup_attribute ("trapa_handler", attrs) != NULL_TREE)
7884 /* We can't use sh_pr_interrupt here because that's not in the
7885 java frontend. */
7886 attrs
7887 = tree_cons (get_identifier("interrupt_handler"), NULL_TREE, attrs);
7888 /* However, for sp_switch, trap_exit and nosave_low_regs, if the
7889 interrupt attribute is missing, we ignore the attribute and warn. */
7890 else if (lookup_attribute ("sp_switch", attrs)
7891 || lookup_attribute ("trap_exit", attrs)
7892 || lookup_attribute ("nosave_low_regs", attrs))
7893 {
7894 tree *tail;
7895
7896 for (tail = attributes; attrs; attrs = TREE_CHAIN (attrs))
7897 {
7898 if (is_attribute_p ("sp_switch", TREE_PURPOSE (attrs))
7899 || is_attribute_p ("trap_exit", TREE_PURPOSE (attrs))
7900 || is_attribute_p ("nosave_low_regs", TREE_PURPOSE (attrs)))
7901 warning (OPT_Wattributes,
7902 "%qs attribute only applies to interrupt functions",
7903 IDENTIFIER_POINTER (TREE_PURPOSE (attrs)));
7904 else
7905 {
7906 *tail = tree_cons (TREE_PURPOSE (attrs), NULL_TREE,
7907 NULL_TREE);
7908 tail = &TREE_CHAIN (*tail);
7909 }
7910 }
7911 attrs = *attributes;
7912 }
7913 }
7914
7915 /* Install the processed list. */
7916 *attributes = attrs;
7917
7918 /* Clear deferred attributes. */
7919 sh_deferred_function_attributes = NULL_TREE;
7920 sh_deferred_function_attributes_tail = &sh_deferred_function_attributes;
7921
7922 return;
7923 }
7924
7925 /* Supported attributes:
7926
7927 interrupt_handler -- specifies this function is an interrupt handler.
7928
7929 trapa_handler - like above, but don't save all registers.
7930
7931 sp_switch -- specifies an alternate stack for an interrupt handler
7932 to run on.
7933
7934 trap_exit -- use a trapa to exit an interrupt function instead of
7935 an rte instruction.
7936
7937 nosave_low_regs - don't save r0..r7 in an interrupt handler.
7938 This is useful on the SH3 and upwards,
7939 which has a separate set of low regs for User and Supervisor modes.
7940 This should only be used for the lowest level of interrupts. Higher levels
7941 of interrupts must save the registers in case they themselves are
7942 interrupted.
7943
7944 renesas -- use Renesas calling/layout conventions (functions and
7945 structures).
7946
7947 */
7948
7949 const struct attribute_spec sh_attribute_table[] =
7950 {
7951 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
7952 { "interrupt_handler", 0, 0, true, false, false, sh_handle_interrupt_handler_attribute },
7953 { "sp_switch", 1, 1, true, false, false, sh_handle_sp_switch_attribute },
7954 { "trap_exit", 1, 1, true, false, false, sh_handle_trap_exit_attribute },
7955 { "renesas", 0, 0, false, true, false, sh_handle_renesas_attribute },
7956 { "trapa_handler", 0, 0, true, false, false, sh_handle_interrupt_handler_attribute },
7957 { "nosave_low_regs", 0, 0, true, false, false, sh_handle_interrupt_handler_attribute },
7958 #ifdef SYMBIAN
7959 /* Symbian support adds three new attributes:
7960 dllexport - for exporting a function/variable that will live in a dll
7961 dllimport - for importing a function/variable from a dll
7962
7963 Microsoft allows multiple declspecs in one __declspec, separating
7964 them with spaces. We do NOT support this. Instead, use __declspec
7965 multiple times. */
7966 { "dllimport", 0, 0, true, false, false, sh_symbian_handle_dll_attribute },
7967 { "dllexport", 0, 0, true, false, false, sh_symbian_handle_dll_attribute },
7968 #endif
7969 { NULL, 0, 0, false, false, false, NULL }
7970 };
7971
7972 /* Handle an "interrupt_handler" attribute; arguments as in
7973 struct attribute_spec.handler. */
7974 static tree
7975 sh_handle_interrupt_handler_attribute (tree *node, tree name,
7976 tree args ATTRIBUTE_UNUSED,
7977 int flags ATTRIBUTE_UNUSED,
7978 bool *no_add_attrs)
7979 {
7980 if (TREE_CODE (*node) != FUNCTION_DECL)
7981 {
7982 warning (OPT_Wattributes, "%qs attribute only applies to functions",
7983 IDENTIFIER_POINTER (name));
7984 *no_add_attrs = true;
7985 }
7986 else if (TARGET_SHCOMPACT)
7987 {
7988 error ("attribute interrupt_handler is not compatible with -m5-compact");
7989 *no_add_attrs = true;
7990 }
7991
7992 return NULL_TREE;
7993 }
7994
7995 /* Handle an "sp_switch" attribute; arguments as in
7996 struct attribute_spec.handler. */
7997 static tree
7998 sh_handle_sp_switch_attribute (tree *node, tree name, tree args,
7999 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
8000 {
8001 if (TREE_CODE (*node) != FUNCTION_DECL)
8002 {
8003 warning (OPT_Wattributes, "%qs attribute only applies to functions",
8004 IDENTIFIER_POINTER (name));
8005 *no_add_attrs = true;
8006 }
8007 else if (TREE_CODE (TREE_VALUE (args)) != STRING_CST)
8008 {
8009 /* The argument must be a constant string. */
8010 warning (OPT_Wattributes, "%qs attribute argument not a string constant",
8011 IDENTIFIER_POINTER (name));
8012 *no_add_attrs = true;
8013 }
8014
8015 return NULL_TREE;
8016 }
8017
8018 /* Handle an "trap_exit" attribute; arguments as in
8019 struct attribute_spec.handler. */
8020 static tree
8021 sh_handle_trap_exit_attribute (tree *node, tree name, tree args,
8022 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
8023 {
8024 if (TREE_CODE (*node) != FUNCTION_DECL)
8025 {
8026 warning (OPT_Wattributes, "%qs attribute only applies to functions",
8027 IDENTIFIER_POINTER (name));
8028 *no_add_attrs = true;
8029 }
8030 /* The argument specifies a trap number to be used in a trapa instruction
8031 at function exit (instead of an rte instruction). */
8032 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
8033 {
8034 /* The argument must be a constant integer. */
8035 warning (OPT_Wattributes, "%qs attribute argument not an "
8036 "integer constant", IDENTIFIER_POINTER (name));
8037 *no_add_attrs = true;
8038 }
8039
8040 return NULL_TREE;
8041 }
8042
8043 static tree
8044 sh_handle_renesas_attribute (tree *node ATTRIBUTE_UNUSED,
8045 tree name ATTRIBUTE_UNUSED,
8046 tree args ATTRIBUTE_UNUSED,
8047 int flags ATTRIBUTE_UNUSED,
8048 bool *no_add_attrs ATTRIBUTE_UNUSED)
8049 {
8050 return NULL_TREE;
8051 }
8052
8053 /* True if __attribute__((renesas)) or -mrenesas. */
8054 int
8055 sh_attr_renesas_p (tree td)
8056 {
8057 if (TARGET_HITACHI)
8058 return 1;
8059 if (td == 0)
8060 return 0;
8061 if (DECL_P (td))
8062 td = TREE_TYPE (td);
8063 if (td == error_mark_node)
8064 return 0;
8065 return (lookup_attribute ("renesas", TYPE_ATTRIBUTES (td))
8066 != NULL_TREE);
8067 }
8068
8069 /* True if __attribute__((renesas)) or -mrenesas, for the current
8070 function. */
8071 int
8072 sh_cfun_attr_renesas_p (void)
8073 {
8074 return sh_attr_renesas_p (current_function_decl);
8075 }
8076
8077 int
8078 sh_cfun_interrupt_handler_p (void)
8079 {
8080 return (lookup_attribute ("interrupt_handler",
8081 DECL_ATTRIBUTES (current_function_decl))
8082 != NULL_TREE);
8083 }
8084
8085 /* Implement TARGET_CHECK_PCH_TARGET_FLAGS. */
8086
8087 static const char *
8088 sh_check_pch_target_flags (int old_flags)
8089 {
8090 if ((old_flags ^ target_flags) & (MASK_SH1 | MASK_SH2 | MASK_SH3
8091 | MASK_SH_E | MASK_HARD_SH4
8092 | MASK_FPU_SINGLE | MASK_SH4))
8093 return _("created and used with different architectures / ABIs");
8094 if ((old_flags ^ target_flags) & MASK_HITACHI)
8095 return _("created and used with different ABIs");
8096 if ((old_flags ^ target_flags) & MASK_LITTLE_ENDIAN)
8097 return _("created and used with different endianness");
8098 return NULL;
8099 }
8100 \f
8101 /* Predicates used by the templates. */
8102
8103 /* Returns 1 if OP is MACL, MACH or PR. The input must be a REG rtx.
8104 Used only in general_movsrc_operand. */
8105
8106 int
8107 system_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8108 {
8109 switch (REGNO (op))
8110 {
8111 case PR_REG:
8112 case MACL_REG:
8113 case MACH_REG:
8114 return 1;
8115 }
8116 return 0;
8117 }
8118
8119 /* Nonzero if OP is a floating point value with value 0.0. */
8120
8121 int
8122 fp_zero_operand (rtx op)
8123 {
8124 REAL_VALUE_TYPE r;
8125
8126 if (GET_MODE (op) != SFmode)
8127 return 0;
8128
8129 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
8130 return REAL_VALUES_EQUAL (r, dconst0) && ! REAL_VALUE_MINUS_ZERO (r);
8131 }
8132
8133 /* Nonzero if OP is a floating point value with value 1.0. */
8134
8135 int
8136 fp_one_operand (rtx op)
8137 {
8138 REAL_VALUE_TYPE r;
8139
8140 if (GET_MODE (op) != SFmode)
8141 return 0;
8142
8143 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
8144 return REAL_VALUES_EQUAL (r, dconst1);
8145 }
8146
8147 /* For -m4 and -m4-single-only, mode switching is used. If we are
8148 compiling without -mfmovd, movsf_ie isn't taken into account for
8149 mode switching. We could check in machine_dependent_reorg for
8150 cases where we know we are in single precision mode, but there is
8151 interface to find that out during reload, so we must avoid
8152 choosing an fldi alternative during reload and thus failing to
8153 allocate a scratch register for the constant loading. */
8154 int
8155 fldi_ok (void)
8156 {
8157 return ! TARGET_SH4 || TARGET_FMOVD || reload_completed;
8158 }
8159
8160 int
8161 tertiary_reload_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8162 {
8163 enum rtx_code code = GET_CODE (op);
8164 return code == MEM || (TARGET_SH4 && code == CONST_DOUBLE);
8165 }
8166
8167 /* Return the TLS type for TLS symbols, 0 for otherwise. */
8168 int
8169 tls_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8170 {
8171 if (GET_CODE (op) != SYMBOL_REF)
8172 return 0;
8173 return SYMBOL_REF_TLS_MODEL (op);
8174 }
8175 \f
8176 /* Return the destination address of a branch. */
8177
8178 static int
8179 branch_dest (rtx branch)
8180 {
8181 rtx dest = SET_SRC (PATTERN (branch));
8182 int dest_uid;
8183
8184 if (GET_CODE (dest) == IF_THEN_ELSE)
8185 dest = XEXP (dest, 1);
8186 dest = XEXP (dest, 0);
8187 dest_uid = INSN_UID (dest);
8188 return INSN_ADDRESSES (dest_uid);
8189 }
8190 \f
8191 /* Return nonzero if REG is not used after INSN.
8192 We assume REG is a reload reg, and therefore does
8193 not live past labels. It may live past calls or jumps though. */
8194 int
8195 reg_unused_after (rtx reg, rtx insn)
8196 {
8197 enum rtx_code code;
8198 rtx set;
8199
8200 /* If the reg is set by this instruction, then it is safe for our
8201 case. Disregard the case where this is a store to memory, since
8202 we are checking a register used in the store address. */
8203 set = single_set (insn);
8204 if (set && GET_CODE (SET_DEST (set)) != MEM
8205 && reg_overlap_mentioned_p (reg, SET_DEST (set)))
8206 return 1;
8207
8208 while ((insn = NEXT_INSN (insn)))
8209 {
8210 rtx set;
8211 if (!INSN_P (insn))
8212 continue;
8213
8214 code = GET_CODE (insn);
8215
8216 #if 0
8217 /* If this is a label that existed before reload, then the register
8218 if dead here. However, if this is a label added by reorg, then
8219 the register may still be live here. We can't tell the difference,
8220 so we just ignore labels completely. */
8221 if (code == CODE_LABEL)
8222 return 1;
8223 /* else */
8224 #endif
8225
8226 if (code == JUMP_INSN)
8227 return 0;
8228
8229 /* If this is a sequence, we must handle them all at once.
8230 We could have for instance a call that sets the target register,
8231 and an insn in a delay slot that uses the register. In this case,
8232 we must return 0. */
8233 else if (code == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
8234 {
8235 int i;
8236 int retval = 0;
8237
8238 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
8239 {
8240 rtx this_insn = XVECEXP (PATTERN (insn), 0, i);
8241 rtx set = single_set (this_insn);
8242
8243 if (GET_CODE (this_insn) == CALL_INSN)
8244 code = CALL_INSN;
8245 else if (GET_CODE (this_insn) == JUMP_INSN)
8246 {
8247 if (INSN_ANNULLED_BRANCH_P (this_insn))
8248 return 0;
8249 code = JUMP_INSN;
8250 }
8251
8252 if (set && reg_overlap_mentioned_p (reg, SET_SRC (set)))
8253 return 0;
8254 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
8255 {
8256 if (GET_CODE (SET_DEST (set)) != MEM)
8257 retval = 1;
8258 else
8259 return 0;
8260 }
8261 if (set == 0
8262 && reg_overlap_mentioned_p (reg, PATTERN (this_insn)))
8263 return 0;
8264 }
8265 if (retval == 1)
8266 return 1;
8267 else if (code == JUMP_INSN)
8268 return 0;
8269 }
8270
8271 set = single_set (insn);
8272 if (set && reg_overlap_mentioned_p (reg, SET_SRC (set)))
8273 return 0;
8274 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
8275 return GET_CODE (SET_DEST (set)) != MEM;
8276 if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
8277 return 0;
8278
8279 if (code == CALL_INSN && call_really_used_regs[REGNO (reg)])
8280 return 1;
8281 }
8282 return 1;
8283 }
8284 \f
8285 #include "ggc.h"
8286
8287 static GTY(()) rtx fpscr_rtx;
8288 rtx
8289 get_fpscr_rtx (void)
8290 {
8291 if (! fpscr_rtx)
8292 {
8293 fpscr_rtx = gen_rtx_REG (PSImode, FPSCR_REG);
8294 REG_USERVAR_P (fpscr_rtx) = 1;
8295 mark_user_reg (fpscr_rtx);
8296 }
8297 if (! reload_completed || mdep_reorg_phase != SH_AFTER_MDEP_REORG)
8298 mark_user_reg (fpscr_rtx);
8299 return fpscr_rtx;
8300 }
8301
8302 static GTY(()) tree fpscr_values;
8303
8304 static void
8305 emit_fpu_switch (rtx scratch, int index)
8306 {
8307 rtx dst, src;
8308
8309 if (fpscr_values == NULL)
8310 {
8311 tree t;
8312
8313 t = build_index_type (integer_one_node);
8314 t = build_array_type (integer_type_node, t);
8315 t = build_decl (VAR_DECL, get_identifier ("__fpscr_values"), t);
8316 DECL_ARTIFICIAL (t) = 1;
8317 DECL_IGNORED_P (t) = 1;
8318 DECL_EXTERNAL (t) = 1;
8319 TREE_STATIC (t) = 1;
8320 TREE_PUBLIC (t) = 1;
8321 TREE_USED (t) = 1;
8322
8323 fpscr_values = t;
8324 }
8325
8326 src = DECL_RTL (fpscr_values);
8327 if (!can_create_pseudo_p ())
8328 {
8329 emit_move_insn (scratch, XEXP (src, 0));
8330 if (index != 0)
8331 emit_insn (gen_addsi3 (scratch, scratch, GEN_INT (index * 4)));
8332 src = adjust_automodify_address (src, PSImode, scratch, index * 4);
8333 }
8334 else
8335 src = adjust_address (src, PSImode, index * 4);
8336
8337 dst = get_fpscr_rtx ();
8338 emit_move_insn (dst, src);
8339 }
8340
8341 void
8342 emit_sf_insn (rtx pat)
8343 {
8344 emit_insn (pat);
8345 }
8346
8347 void
8348 emit_df_insn (rtx pat)
8349 {
8350 emit_insn (pat);
8351 }
8352
8353 void
8354 expand_sf_unop (rtx (*fun) (rtx, rtx, rtx), rtx *operands)
8355 {
8356 emit_sf_insn ((*fun) (operands[0], operands[1], get_fpscr_rtx ()));
8357 }
8358
8359 void
8360 expand_sf_binop (rtx (*fun) (rtx, rtx, rtx, rtx), rtx *operands)
8361 {
8362 emit_sf_insn ((*fun) (operands[0], operands[1], operands[2],
8363 get_fpscr_rtx ()));
8364 }
8365
8366 void
8367 expand_df_unop (rtx (*fun) (rtx, rtx, rtx), rtx *operands)
8368 {
8369 emit_df_insn ((*fun) (operands[0], operands[1], get_fpscr_rtx ()));
8370 }
8371
8372 void
8373 expand_df_binop (rtx (*fun) (rtx, rtx, rtx, rtx), rtx *operands)
8374 {
8375 emit_df_insn ((*fun) (operands[0], operands[1], operands[2],
8376 get_fpscr_rtx ()));
8377 }
8378 \f
8379 static rtx get_free_reg (HARD_REG_SET);
8380
8381 /* This function returns a register to use to load the address to load
8382 the fpscr from. Currently it always returns r1 or r7, but when we are
8383 able to use pseudo registers after combine, or have a better mechanism
8384 for choosing a register, it should be done here. */
8385 /* REGS_LIVE is the liveness information for the point for which we
8386 need this allocation. In some bare-bones exit blocks, r1 is live at the
8387 start. We can even have all of r0..r3 being live:
8388 __complex__ long long f (double d) { if (d == 0) return 2; else return 3; }
8389 INSN before which new insns are placed with will clobber the register
8390 we return. If a basic block consists only of setting the return value
8391 register to a pseudo and using that register, the return value is not
8392 live before or after this block, yet we we'll insert our insns right in
8393 the middle. */
8394
8395 static rtx
8396 get_free_reg (HARD_REG_SET regs_live)
8397 {
8398 if (! TEST_HARD_REG_BIT (regs_live, 1))
8399 return gen_rtx_REG (Pmode, 1);
8400
8401 /* Hard reg 1 is live; since this is a SMALL_REGISTER_CLASSES target,
8402 there shouldn't be anything but a jump before the function end. */
8403 gcc_assert (!TEST_HARD_REG_BIT (regs_live, 7));
8404 return gen_rtx_REG (Pmode, 7);
8405 }
8406
8407 /* This function will set the fpscr from memory.
8408 MODE is the mode we are setting it to. */
8409 void
8410 fpscr_set_from_mem (int mode, HARD_REG_SET regs_live)
8411 {
8412 enum attr_fp_mode fp_mode = mode;
8413 enum attr_fp_mode norm_mode = ACTUAL_NORMAL_MODE (FP_MODE);
8414 rtx addr_reg;
8415
8416 addr_reg = !can_create_pseudo_p () ? get_free_reg (regs_live) : NULL_RTX;
8417 emit_fpu_switch (addr_reg, fp_mode == norm_mode);
8418 }
8419
8420 /* Is the given character a logical line separator for the assembler? */
8421 #ifndef IS_ASM_LOGICAL_LINE_SEPARATOR
8422 #define IS_ASM_LOGICAL_LINE_SEPARATOR(C) ((C) == ';')
8423 #endif
8424
8425 int
8426 sh_insn_length_adjustment (rtx insn)
8427 {
8428 /* Instructions with unfilled delay slots take up an extra two bytes for
8429 the nop in the delay slot. */
8430 if (((GET_CODE (insn) == INSN
8431 && GET_CODE (PATTERN (insn)) != USE
8432 && GET_CODE (PATTERN (insn)) != CLOBBER)
8433 || GET_CODE (insn) == CALL_INSN
8434 || (GET_CODE (insn) == JUMP_INSN
8435 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
8436 && GET_CODE (PATTERN (insn)) != ADDR_VEC))
8437 && GET_CODE (PATTERN (NEXT_INSN (PREV_INSN (insn)))) != SEQUENCE
8438 && get_attr_needs_delay_slot (insn) == NEEDS_DELAY_SLOT_YES)
8439 return 2;
8440
8441 /* SH2e has a bug that prevents the use of annulled branches, so if
8442 the delay slot is not filled, we'll have to put a NOP in it. */
8443 if (sh_cpu == CPU_SH2E
8444 && GET_CODE (insn) == JUMP_INSN
8445 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
8446 && GET_CODE (PATTERN (insn)) != ADDR_VEC
8447 && get_attr_type (insn) == TYPE_CBRANCH
8448 && GET_CODE (PATTERN (NEXT_INSN (PREV_INSN (insn)))) != SEQUENCE)
8449 return 2;
8450
8451 /* sh-dsp parallel processing insn take four bytes instead of two. */
8452
8453 if (GET_CODE (insn) == INSN)
8454 {
8455 int sum = 0;
8456 rtx body = PATTERN (insn);
8457 const char *template;
8458 char c;
8459 int maybe_label = 1;
8460
8461 if (GET_CODE (body) == ASM_INPUT)
8462 template = XSTR (body, 0);
8463 else if (asm_noperands (body) >= 0)
8464 template
8465 = decode_asm_operands (body, NULL, NULL, NULL, NULL, NULL);
8466 else
8467 return 0;
8468 do
8469 {
8470 int ppi_adjust = 0;
8471
8472 do
8473 c = *template++;
8474 while (c == ' ' || c == '\t');
8475 /* all sh-dsp parallel-processing insns start with p.
8476 The only non-ppi sh insn starting with p is pref.
8477 The only ppi starting with pr is prnd. */
8478 if ((c == 'p' || c == 'P') && strncasecmp ("re", template, 2))
8479 ppi_adjust = 2;
8480 /* The repeat pseudo-insn expands two three insns, a total of
8481 six bytes in size. */
8482 else if ((c == 'r' || c == 'R')
8483 && ! strncasecmp ("epeat", template, 5))
8484 ppi_adjust = 4;
8485 while (c && c != '\n' && ! IS_ASM_LOGICAL_LINE_SEPARATOR (c))
8486 {
8487 /* If this is a label, it is obviously not a ppi insn. */
8488 if (c == ':' && maybe_label)
8489 {
8490 ppi_adjust = 0;
8491 break;
8492 }
8493 else if (c == '\'' || c == '"')
8494 maybe_label = 0;
8495 c = *template++;
8496 }
8497 sum += ppi_adjust;
8498 maybe_label = c != ':';
8499 }
8500 while (c);
8501 return sum;
8502 }
8503 return 0;
8504 }
8505 \f
8506 /* Return TRUE if X references a SYMBOL_REF or LABEL_REF whose symbol
8507 isn't protected by a PIC unspec. */
8508 int
8509 nonpic_symbol_mentioned_p (rtx x)
8510 {
8511 register const char *fmt;
8512 register int i;
8513
8514 if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF
8515 || GET_CODE (x) == PC)
8516 return 1;
8517
8518 /* We don't want to look into the possible MEM location of a
8519 CONST_DOUBLE, since we're not going to use it, in general. */
8520 if (GET_CODE (x) == CONST_DOUBLE)
8521 return 0;
8522
8523 if (GET_CODE (x) == UNSPEC
8524 && (XINT (x, 1) == UNSPEC_PIC
8525 || XINT (x, 1) == UNSPEC_GOT
8526 || XINT (x, 1) == UNSPEC_GOTOFF
8527 || XINT (x, 1) == UNSPEC_GOTPLT
8528 || XINT (x, 1) == UNSPEC_GOTTPOFF
8529 || XINT (x, 1) == UNSPEC_DTPOFF
8530 || XINT (x, 1) == UNSPEC_PLT))
8531 return 0;
8532
8533 fmt = GET_RTX_FORMAT (GET_CODE (x));
8534 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8535 {
8536 if (fmt[i] == 'E')
8537 {
8538 register int j;
8539
8540 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8541 if (nonpic_symbol_mentioned_p (XVECEXP (x, i, j)))
8542 return 1;
8543 }
8544 else if (fmt[i] == 'e' && nonpic_symbol_mentioned_p (XEXP (x, i)))
8545 return 1;
8546 }
8547
8548 return 0;
8549 }
8550
8551 /* Convert a non-PIC address in `orig' to a PIC address using @GOT or
8552 @GOTOFF in `reg'. */
8553 rtx
8554 legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
8555 rtx reg)
8556 {
8557 if (tls_symbolic_operand (orig, Pmode))
8558 return orig;
8559
8560 if (GET_CODE (orig) == LABEL_REF
8561 || (GET_CODE (orig) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (orig)))
8562 {
8563 if (reg == 0)
8564 reg = gen_reg_rtx (Pmode);
8565
8566 emit_insn (gen_symGOTOFF2reg (reg, orig));
8567 return reg;
8568 }
8569 else if (GET_CODE (orig) == SYMBOL_REF)
8570 {
8571 if (reg == 0)
8572 reg = gen_reg_rtx (Pmode);
8573
8574 emit_insn (gen_symGOT2reg (reg, orig));
8575 return reg;
8576 }
8577 return orig;
8578 }
8579
8580 /* Mark the use of a constant in the literal table. If the constant
8581 has multiple labels, make it unique. */
8582 static rtx
8583 mark_constant_pool_use (rtx x)
8584 {
8585 rtx insn, lab, pattern;
8586
8587 if (x == NULL)
8588 return x;
8589
8590 switch (GET_CODE (x))
8591 {
8592 case LABEL_REF:
8593 x = XEXP (x, 0);
8594 case CODE_LABEL:
8595 break;
8596 default:
8597 return x;
8598 }
8599
8600 /* Get the first label in the list of labels for the same constant
8601 and delete another labels in the list. */
8602 lab = x;
8603 for (insn = PREV_INSN (x); insn; insn = PREV_INSN (insn))
8604 {
8605 if (GET_CODE (insn) != CODE_LABEL
8606 || LABEL_REFS (insn) != NEXT_INSN (insn))
8607 break;
8608 lab = insn;
8609 }
8610
8611 for (insn = LABEL_REFS (lab); insn; insn = LABEL_REFS (insn))
8612 INSN_DELETED_P (insn) = 1;
8613
8614 /* Mark constants in a window. */
8615 for (insn = NEXT_INSN (x); insn; insn = NEXT_INSN (insn))
8616 {
8617 if (GET_CODE (insn) != INSN)
8618 continue;
8619
8620 pattern = PATTERN (insn);
8621 if (GET_CODE (pattern) != UNSPEC_VOLATILE)
8622 continue;
8623
8624 switch (XINT (pattern, 1))
8625 {
8626 case UNSPECV_CONST2:
8627 case UNSPECV_CONST4:
8628 case UNSPECV_CONST8:
8629 XVECEXP (pattern, 0, 1) = const1_rtx;
8630 break;
8631 case UNSPECV_WINDOW_END:
8632 if (XVECEXP (pattern, 0, 0) == x)
8633 return lab;
8634 break;
8635 case UNSPECV_CONST_END:
8636 return lab;
8637 default:
8638 break;
8639 }
8640 }
8641
8642 return lab;
8643 }
8644 \f
8645 /* Return true if it's possible to redirect BRANCH1 to the destination
8646 of an unconditional jump BRANCH2. We only want to do this if the
8647 resulting branch will have a short displacement. */
8648 int
8649 sh_can_redirect_branch (rtx branch1, rtx branch2)
8650 {
8651 if (flag_expensive_optimizations && simplejump_p (branch2))
8652 {
8653 rtx dest = XEXP (SET_SRC (single_set (branch2)), 0);
8654 rtx insn;
8655 int distance;
8656
8657 for (distance = 0, insn = NEXT_INSN (branch1);
8658 insn && distance < 256;
8659 insn = PREV_INSN (insn))
8660 {
8661 if (insn == dest)
8662 return 1;
8663 else
8664 distance += get_attr_length (insn);
8665 }
8666 for (distance = 0, insn = NEXT_INSN (branch1);
8667 insn && distance < 256;
8668 insn = NEXT_INSN (insn))
8669 {
8670 if (insn == dest)
8671 return 1;
8672 else
8673 distance += get_attr_length (insn);
8674 }
8675 }
8676 return 0;
8677 }
8678
8679 /* Return nonzero if register old_reg can be renamed to register new_reg. */
8680 int
8681 sh_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
8682 unsigned int new_reg)
8683 {
8684 /* Interrupt functions can only use registers that have already been
8685 saved by the prologue, even if they would normally be
8686 call-clobbered. */
8687
8688 if (sh_cfun_interrupt_handler_p () && !df_regs_ever_live_p (new_reg))
8689 return 0;
8690
8691 return 1;
8692 }
8693
8694 /* Function to update the integer COST
8695 based on the relationship between INSN that is dependent on
8696 DEP_INSN through the dependence LINK. The default is to make no
8697 adjustment to COST. This can be used for example to specify to
8698 the scheduler that an output- or anti-dependence does not incur
8699 the same cost as a data-dependence. The return value should be
8700 the new value for COST. */
8701 static int
8702 sh_adjust_cost (rtx insn, rtx link ATTRIBUTE_UNUSED, rtx dep_insn, int cost)
8703 {
8704 rtx reg, use_pat;
8705
8706 if (TARGET_SHMEDIA)
8707 {
8708 /* On SHmedia, if the dependence is an anti-dependence or
8709 output-dependence, there is no cost. */
8710 if (REG_NOTE_KIND (link) != 0)
8711 {
8712 /* However, dependencies between target register loads and
8713 uses of the register in a subsequent block that are separated
8714 by a conditional branch are not modelled - we have to do with
8715 the anti-dependency between the target register load and the
8716 conditional branch that ends the current block. */
8717 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
8718 && GET_CODE (PATTERN (dep_insn)) == SET
8719 && (get_attr_type (dep_insn) == TYPE_PT_MEDIA
8720 || get_attr_type (dep_insn) == TYPE_PTABS_MEDIA)
8721 && get_attr_type (insn) == TYPE_CBRANCH_MEDIA)
8722 {
8723 int orig_cost = cost;
8724 rtx note = find_reg_note (insn, REG_BR_PROB, 0);
8725 rtx target = ((! note
8726 || INTVAL (XEXP (note, 0)) * 2 < REG_BR_PROB_BASE)
8727 ? insn : JUMP_LABEL (insn));
8728 /* On the likely path, the branch costs 1, on the unlikely path,
8729 it costs 3. */
8730 cost--;
8731 do
8732 target = next_active_insn (target);
8733 while (target && ! flow_dependent_p (target, dep_insn)
8734 && --cost > 0);
8735 /* If two branches are executed in immediate succession, with the
8736 first branch properly predicted, this causes a stall at the
8737 second branch, hence we won't need the target for the
8738 second branch for two cycles after the launch of the first
8739 branch. */
8740 if (cost > orig_cost - 2)
8741 cost = orig_cost - 2;
8742 }
8743 else
8744 cost = 0;
8745 }
8746
8747 else if (get_attr_is_mac_media (insn)
8748 && get_attr_is_mac_media (dep_insn))
8749 cost = 1;
8750
8751 else if (! reload_completed
8752 && GET_CODE (PATTERN (insn)) == SET
8753 && GET_CODE (SET_SRC (PATTERN (insn))) == FLOAT
8754 && GET_CODE (PATTERN (dep_insn)) == SET
8755 && fp_arith_reg_operand (SET_SRC (PATTERN (dep_insn)), VOIDmode)
8756 && cost < 4)
8757 cost = 4;
8758 /* Schedule the ptabs for a casesi_jump_media in preference to stuff
8759 that is needed at the target. */
8760 else if (get_attr_type (insn) == TYPE_JUMP_MEDIA
8761 && ! flow_dependent_p (insn, dep_insn))
8762 cost--;
8763 }
8764 else if (REG_NOTE_KIND (link) == 0)
8765 {
8766 enum attr_type type;
8767 rtx dep_set;
8768
8769 if (recog_memoized (insn) < 0
8770 || recog_memoized (dep_insn) < 0)
8771 return cost;
8772
8773 dep_set = single_set (dep_insn);
8774
8775 /* The latency that we specify in the scheduling description refers
8776 to the actual output, not to an auto-increment register; for that,
8777 the latency is one. */
8778 if (dep_set && MEM_P (SET_SRC (dep_set)) && cost > 1)
8779 {
8780 rtx set = single_set (insn);
8781
8782 if (set
8783 && !reg_mentioned_p (SET_DEST (dep_set), SET_SRC (set))
8784 && (!MEM_P (SET_DEST (set))
8785 || !reg_mentioned_p (SET_DEST (dep_set),
8786 XEXP (SET_DEST (set), 0))))
8787 cost = 1;
8788 }
8789 /* The only input for a call that is timing-critical is the
8790 function's address. */
8791 if (GET_CODE (insn) == CALL_INSN)
8792 {
8793 rtx call = PATTERN (insn);
8794
8795 if (GET_CODE (call) == PARALLEL)
8796 call = XVECEXP (call, 0 ,0);
8797 if (GET_CODE (call) == SET)
8798 call = SET_SRC (call);
8799 if (GET_CODE (call) == CALL && GET_CODE (XEXP (call, 0)) == MEM
8800 /* sibcalli_thunk uses a symbol_ref in an unspec. */
8801 && (GET_CODE (XEXP (XEXP (call, 0), 0)) == UNSPEC
8802 || ! reg_set_p (XEXP (XEXP (call, 0), 0), dep_insn)))
8803 cost -= TARGET_SH4_300 ? 3 : 6;
8804 }
8805 /* Likewise, the most timing critical input for an sfuncs call
8806 is the function address. However, sfuncs typically start
8807 using their arguments pretty quickly.
8808 Assume a four cycle delay for SH4 before they are needed.
8809 Cached ST40-300 calls are quicker, so assume only a one
8810 cycle delay there.
8811 ??? Maybe we should encode the delays till input registers
8812 are needed by sfuncs into the sfunc call insn. */
8813 /* All sfunc calls are parallels with at least four components.
8814 Exploit this to avoid unnecessary calls to sfunc_uses_reg. */
8815 else if (GET_CODE (PATTERN (insn)) == PARALLEL
8816 && XVECLEN (PATTERN (insn), 0) >= 4
8817 && (reg = sfunc_uses_reg (insn)))
8818 {
8819 if (! reg_set_p (reg, dep_insn))
8820 cost -= TARGET_SH4_300 ? 1 : 4;
8821 }
8822 if (TARGET_HARD_SH4 && !TARGET_SH4_300)
8823 {
8824 enum attr_type dep_type = get_attr_type (dep_insn);
8825
8826 if (dep_type == TYPE_FLOAD || dep_type == TYPE_PCFLOAD)
8827 cost--;
8828 else if ((dep_type == TYPE_LOAD_SI || dep_type == TYPE_PCLOAD_SI)
8829 && (type = get_attr_type (insn)) != TYPE_CALL
8830 && type != TYPE_SFUNC)
8831 cost--;
8832 /* When the preceding instruction loads the shift amount of
8833 the following SHAD/SHLD, the latency of the load is increased
8834 by 1 cycle. */
8835 if (get_attr_type (insn) == TYPE_DYN_SHIFT
8836 && get_attr_any_int_load (dep_insn) == ANY_INT_LOAD_YES
8837 && reg_overlap_mentioned_p (SET_DEST (dep_set),
8838 XEXP (SET_SRC (single_set (insn)),
8839 1)))
8840 cost++;
8841 /* When an LS group instruction with a latency of less than
8842 3 cycles is followed by a double-precision floating-point
8843 instruction, FIPR, or FTRV, the latency of the first
8844 instruction is increased to 3 cycles. */
8845 else if (cost < 3
8846 && get_attr_insn_class (dep_insn) == INSN_CLASS_LS_GROUP
8847 && get_attr_dfp_comp (insn) == DFP_COMP_YES)
8848 cost = 3;
8849 /* The lsw register of a double-precision computation is ready one
8850 cycle earlier. */
8851 else if (reload_completed
8852 && get_attr_dfp_comp (dep_insn) == DFP_COMP_YES
8853 && (use_pat = single_set (insn))
8854 && ! regno_use_in (REGNO (SET_DEST (single_set (dep_insn))),
8855 SET_SRC (use_pat)))
8856 cost -= 1;
8857
8858 if (get_attr_any_fp_comp (dep_insn) == ANY_FP_COMP_YES
8859 && get_attr_late_fp_use (insn) == LATE_FP_USE_YES)
8860 cost -= 1;
8861 }
8862 else if (TARGET_SH4_300)
8863 {
8864 /* Stores need their input register two cycles later. */
8865 if (dep_set && cost >= 1
8866 && ((type = get_attr_type (insn)) == TYPE_STORE
8867 || type == TYPE_PSTORE
8868 || type == TYPE_FSTORE || type == TYPE_MAC_MEM))
8869 {
8870 rtx set = single_set (insn);
8871
8872 if (!reg_mentioned_p (SET_SRC (set), XEXP (SET_DEST (set), 0))
8873 && rtx_equal_p (SET_SRC (set), SET_DEST (dep_set)))
8874 {
8875 cost -= 2;
8876 /* But don't reduce the cost below 1 if the address depends
8877 on a side effect of dep_insn. */
8878 if (cost < 1
8879 && modified_in_p (XEXP (SET_DEST (set), 0), dep_insn))
8880 cost = 1;
8881 }
8882 }
8883 }
8884 }
8885 /* An anti-dependence penalty of two applies if the first insn is a double
8886 precision fadd / fsub / fmul. */
8887 else if (!TARGET_SH4_300
8888 && REG_NOTE_KIND (link) == REG_DEP_ANTI
8889 && recog_memoized (dep_insn) >= 0
8890 && (get_attr_type (dep_insn) == TYPE_DFP_ARITH
8891 || get_attr_type (dep_insn) == TYPE_DFP_MUL)
8892 /* A lot of alleged anti-flow dependences are fake,
8893 so check this one is real. */
8894 && flow_dependent_p (dep_insn, insn))
8895 cost = 2;
8896
8897 return cost;
8898 }
8899
8900 /* Check if INSN is flow-dependent on DEP_INSN. Can also be used to check
8901 if DEP_INSN is anti-flow dependent on INSN. */
8902 static int
8903 flow_dependent_p (rtx insn, rtx dep_insn)
8904 {
8905 rtx tmp = PATTERN (insn);
8906
8907 note_stores (PATTERN (dep_insn), flow_dependent_p_1, &tmp);
8908 return tmp == NULL_RTX;
8909 }
8910
8911 /* A helper function for flow_dependent_p called through note_stores. */
8912 static void
8913 flow_dependent_p_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
8914 {
8915 rtx * pinsn = (rtx *) data;
8916
8917 if (*pinsn && reg_referenced_p (x, *pinsn))
8918 *pinsn = NULL_RTX;
8919 }
8920
8921 /* For use by sh_allocate_initial_value. Note that sh.md contains some
8922 'special function' patterns (type sfunc) that clobber pr, but that
8923 do not look like function calls to leaf_function_p. Hence we must
8924 do this extra check. */
8925 static int
8926 sh_pr_n_sets (void)
8927 {
8928 return DF_REG_DEF_COUNT (TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG);
8929 }
8930
8931 /* Return where to allocate pseudo for a given hard register initial
8932 value. */
8933 static rtx
8934 sh_allocate_initial_value (rtx hard_reg)
8935 {
8936 rtx x;
8937
8938 if (REGNO (hard_reg) == (TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG))
8939 {
8940 if (current_function_is_leaf
8941 && ! sh_pr_n_sets ()
8942 && ! (TARGET_SHCOMPACT
8943 && ((current_function_args_info.call_cookie
8944 & ~ CALL_COOKIE_RET_TRAMP (1))
8945 || current_function_has_nonlocal_label)))
8946 x = hard_reg;
8947 else
8948 x = gen_frame_mem (Pmode, return_address_pointer_rtx);
8949 }
8950 else
8951 x = NULL_RTX;
8952
8953 return x;
8954 }
8955
8956 /* This function returns "2" to indicate dual issue for the SH4
8957 processor. To be used by the DFA pipeline description. */
8958 static int
8959 sh_issue_rate (void)
8960 {
8961 if (TARGET_SUPERSCALAR)
8962 return 2;
8963 else
8964 return 1;
8965 }
8966
8967 /* Functions for ready queue reordering for sched1. */
8968
8969 /* Get weight for mode for a set x. */
8970 static short
8971 find_set_regmode_weight (rtx x, enum machine_mode mode)
8972 {
8973 if (GET_CODE (x) == CLOBBER && register_operand (SET_DEST (x), mode))
8974 return 1;
8975 if (GET_CODE (x) == SET && register_operand (SET_DEST (x), mode))
8976 {
8977 if (GET_CODE (SET_DEST (x)) == REG)
8978 {
8979 if (!reg_mentioned_p (SET_DEST (x), SET_SRC (x)))
8980 return 1;
8981 else
8982 return 0;
8983 }
8984 return 1;
8985 }
8986 return 0;
8987 }
8988
8989 /* Get regmode weight for insn. */
8990 static short
8991 find_insn_regmode_weight (rtx insn, enum machine_mode mode)
8992 {
8993 short reg_weight = 0;
8994 rtx x;
8995
8996 /* Increment weight for each register born here. */
8997 x = PATTERN (insn);
8998 reg_weight += find_set_regmode_weight (x, mode);
8999 if (GET_CODE (x) == PARALLEL)
9000 {
9001 int j;
9002 for (j = XVECLEN (x, 0) - 1; j >= 0; j--)
9003 {
9004 x = XVECEXP (PATTERN (insn), 0, j);
9005 reg_weight += find_set_regmode_weight (x, mode);
9006 }
9007 }
9008 /* Decrement weight for each register that dies here. */
9009 for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
9010 {
9011 if (REG_NOTE_KIND (x) == REG_DEAD || REG_NOTE_KIND (x) == REG_UNUSED)
9012 {
9013 rtx note = XEXP (x, 0);
9014 if (GET_CODE (note) == REG && GET_MODE (note) == mode)
9015 reg_weight--;
9016 }
9017 }
9018 return reg_weight;
9019 }
9020
9021 /* Calculate regmode weights for all insns of a basic block. */
9022 static void
9023 find_regmode_weight (basic_block b, enum machine_mode mode)
9024 {
9025 rtx insn, next_tail, head, tail;
9026
9027 get_ebb_head_tail (b, b, &head, &tail);
9028 next_tail = NEXT_INSN (tail);
9029
9030 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
9031 {
9032 /* Handle register life information. */
9033 if (!INSN_P (insn))
9034 continue;
9035
9036 if (mode == SFmode)
9037 INSN_REGMODE_WEIGHT (insn, mode) =
9038 find_insn_regmode_weight (insn, mode) + 2 * find_insn_regmode_weight (insn, DFmode);
9039 else if (mode == SImode)
9040 INSN_REGMODE_WEIGHT (insn, mode) =
9041 find_insn_regmode_weight (insn, mode) + 2 * find_insn_regmode_weight (insn, DImode);
9042 }
9043 }
9044
9045 /* Comparison function for ready queue sorting. */
9046 static int
9047 rank_for_reorder (const void *x, const void *y)
9048 {
9049 rtx tmp = *(const rtx *) y;
9050 rtx tmp2 = *(const rtx *) x;
9051
9052 /* The insn in a schedule group should be issued the first. */
9053 if (SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
9054 return SCHED_GROUP_P (tmp2) ? 1 : -1;
9055
9056 /* If insns are equally good, sort by INSN_LUID (original insn order), This
9057 minimizes instruction movement, thus minimizing sched's effect on
9058 register pressure. */
9059 return INSN_LUID (tmp) - INSN_LUID (tmp2);
9060 }
9061
9062 /* Resort the array A in which only element at index N may be out of order. */
9063 static void
9064 swap_reorder (rtx *a, int n)
9065 {
9066 rtx insn = a[n - 1];
9067 int i = n - 2;
9068
9069 while (i >= 0 && rank_for_reorder (a + i, &insn) >= 0)
9070 {
9071 a[i + 1] = a[i];
9072 i -= 1;
9073 }
9074 a[i + 1] = insn;
9075 }
9076
9077 #define SCHED_REORDER(READY, N_READY) \
9078 do \
9079 { \
9080 if ((N_READY) == 2) \
9081 swap_reorder (READY, N_READY); \
9082 else if ((N_READY) > 2) \
9083 qsort (READY, N_READY, sizeof (rtx), rank_for_reorder); \
9084 } \
9085 while (0)
9086
9087 /* Sort the ready list READY by ascending priority, using the SCHED_REORDER
9088 macro. */
9089 static void
9090 ready_reorder (rtx *ready, int nready)
9091 {
9092 SCHED_REORDER (ready, nready);
9093 }
9094
9095 /* Count life regions of r0 for a block. */
9096 static int
9097 find_r0_life_regions (basic_block b)
9098 {
9099 rtx end, insn;
9100 rtx pset;
9101 rtx r0_reg;
9102 int live;
9103 int set;
9104 int death = 0;
9105
9106 if (REGNO_REG_SET_P (df_get_live_in (b), R0_REG))
9107 {
9108 set = 1;
9109 live = 1;
9110 }
9111 else
9112 {
9113 set = 0;
9114 live = 0;
9115 }
9116
9117 insn = BB_HEAD (b);
9118 end = BB_END (b);
9119 r0_reg = gen_rtx_REG (SImode, R0_REG);
9120 while (1)
9121 {
9122 if (INSN_P (insn))
9123 {
9124 if (find_regno_note (insn, REG_DEAD, R0_REG))
9125 {
9126 death++;
9127 live = 0;
9128 }
9129 if (!live
9130 && (pset = single_set (insn))
9131 && reg_overlap_mentioned_p (r0_reg, SET_DEST (pset))
9132 && !find_regno_note (insn, REG_UNUSED, R0_REG))
9133 {
9134 set++;
9135 live = 1;
9136 }
9137 }
9138 if (insn == end)
9139 break;
9140 insn = NEXT_INSN (insn);
9141 }
9142 return set - death;
9143 }
9144
9145 /* Calculate regmode weights for all insns of all basic block. */
9146 static void
9147 sh_md_init_global (FILE *dump ATTRIBUTE_UNUSED,
9148 int verbose ATTRIBUTE_UNUSED,
9149 int old_max_uid)
9150 {
9151 basic_block b;
9152
9153 regmode_weight[0] = (short *) xcalloc (old_max_uid, sizeof (short));
9154 regmode_weight[1] = (short *) xcalloc (old_max_uid, sizeof (short));
9155 r0_life_regions = 0;
9156
9157 FOR_EACH_BB_REVERSE (b)
9158 {
9159 find_regmode_weight (b, SImode);
9160 find_regmode_weight (b, SFmode);
9161 if (!reload_completed)
9162 r0_life_regions += find_r0_life_regions (b);
9163 }
9164
9165 CURR_REGMODE_PRESSURE (SImode) = 0;
9166 CURR_REGMODE_PRESSURE (SFmode) = 0;
9167
9168 }
9169
9170 /* Cleanup. */
9171 static void
9172 sh_md_finish_global (FILE *dump ATTRIBUTE_UNUSED,
9173 int verbose ATTRIBUTE_UNUSED)
9174 {
9175 if (regmode_weight[0])
9176 {
9177 free (regmode_weight[0]);
9178 regmode_weight[0] = NULL;
9179 }
9180 if (regmode_weight[1])
9181 {
9182 free (regmode_weight[1]);
9183 regmode_weight[1] = NULL;
9184 }
9185 }
9186
9187 /* Cache the can_issue_more so that we can return it from reorder2. Also,
9188 keep count of register pressures on SImode and SFmode. */
9189 static int
9190 sh_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
9191 int sched_verbose ATTRIBUTE_UNUSED,
9192 rtx insn,
9193 int can_issue_more)
9194 {
9195 if (GET_CODE (PATTERN (insn)) != USE
9196 && GET_CODE (PATTERN (insn)) != CLOBBER)
9197 cached_can_issue_more = can_issue_more - 1;
9198 else
9199 cached_can_issue_more = can_issue_more;
9200
9201 if (reload_completed)
9202 return cached_can_issue_more;
9203
9204 CURR_REGMODE_PRESSURE (SImode) += INSN_REGMODE_WEIGHT (insn, SImode);
9205 CURR_REGMODE_PRESSURE (SFmode) += INSN_REGMODE_WEIGHT (insn, SFmode);
9206
9207 return cached_can_issue_more;
9208 }
9209
9210 static void
9211 sh_md_init (FILE *dump ATTRIBUTE_UNUSED,
9212 int verbose ATTRIBUTE_UNUSED,
9213 int veclen ATTRIBUTE_UNUSED)
9214 {
9215 CURR_REGMODE_PRESSURE (SImode) = 0;
9216 CURR_REGMODE_PRESSURE (SFmode) = 0;
9217 }
9218
9219 /* Some magic numbers. */
9220 /* Pressure on register r0 can lead to spill failures. so avoid sched1 for
9221 functions that already have high pressure on r0. */
9222 #define R0_MAX_LIFE_REGIONS 2
9223 /* Register Pressure thresholds for SImode and SFmode registers. */
9224 #define SIMODE_MAX_WEIGHT 5
9225 #define SFMODE_MAX_WEIGHT 10
9226
9227 /* Return true if the pressure is high for MODE. */
9228 static short
9229 high_pressure (enum machine_mode mode)
9230 {
9231 /* Pressure on register r0 can lead to spill failures. so avoid sched1 for
9232 functions that already have high pressure on r0. */
9233 if (r0_life_regions >= R0_MAX_LIFE_REGIONS)
9234 return 1;
9235
9236 if (mode == SFmode)
9237 return (CURR_REGMODE_PRESSURE (SFmode) > SFMODE_MAX_WEIGHT);
9238 else
9239 return (CURR_REGMODE_PRESSURE (SImode) > SIMODE_MAX_WEIGHT);
9240 }
9241
9242 /* Reorder ready queue if register pressure is high. */
9243 static int
9244 sh_reorder (FILE *dump ATTRIBUTE_UNUSED,
9245 int sched_verbose ATTRIBUTE_UNUSED,
9246 rtx *ready,
9247 int *n_readyp,
9248 int clock_var ATTRIBUTE_UNUSED)
9249 {
9250 if (reload_completed)
9251 return sh_issue_rate ();
9252
9253 if (high_pressure (SFmode) || high_pressure (SImode))
9254 {
9255 ready_reorder (ready, *n_readyp);
9256 }
9257
9258 return sh_issue_rate ();
9259 }
9260
9261 /* Skip cycles if the current register pressure is high. */
9262 static int
9263 sh_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
9264 int sched_verbose ATTRIBUTE_UNUSED,
9265 rtx *ready ATTRIBUTE_UNUSED,
9266 int *n_readyp ATTRIBUTE_UNUSED,
9267 int clock_var ATTRIBUTE_UNUSED)
9268 {
9269 if (reload_completed)
9270 return cached_can_issue_more;
9271
9272 if (high_pressure(SFmode) || high_pressure (SImode))
9273 skip_cycles = 1;
9274
9275 return cached_can_issue_more;
9276 }
9277
9278 /* Skip cycles without sorting the ready queue. This will move insn from
9279 Q->R. If this is the last cycle we are skipping; allow sorting of ready
9280 queue by sh_reorder. */
9281
9282 /* Generally, skipping these many cycles are sufficient for all insns to move
9283 from Q -> R. */
9284 #define MAX_SKIPS 8
9285
9286 static int
9287 sh_dfa_new_cycle (FILE *sched_dump ATTRIBUTE_UNUSED,
9288 int sched_verbose ATTRIBUTE_UNUSED,
9289 rtx insn ATTRIBUTE_UNUSED,
9290 int last_clock_var,
9291 int clock_var,
9292 int *sort_p)
9293 {
9294 if (reload_completed)
9295 return 0;
9296
9297 if (skip_cycles)
9298 {
9299 if ((clock_var - last_clock_var) < MAX_SKIPS)
9300 {
9301 *sort_p = 0;
9302 return 1;
9303 }
9304 /* If this is the last cycle we are skipping, allow reordering of R. */
9305 if ((clock_var - last_clock_var) == MAX_SKIPS)
9306 {
9307 *sort_p = 1;
9308 return 1;
9309 }
9310 }
9311
9312 skip_cycles = 0;
9313
9314 return 0;
9315 }
9316
9317 /* SHmedia requires registers for branches, so we can't generate new
9318 branches past reload. */
9319 static bool
9320 sh_cannot_modify_jumps_p (void)
9321 {
9322 return (TARGET_SHMEDIA && (reload_in_progress || reload_completed));
9323 }
9324
9325 static int
9326 sh_target_reg_class (void)
9327 {
9328 return TARGET_SHMEDIA ? TARGET_REGS : NO_REGS;
9329 }
9330
9331 static bool
9332 sh_optimize_target_register_callee_saved (bool after_prologue_epilogue_gen)
9333 {
9334 HARD_REG_SET dummy;
9335 #if 0
9336 rtx insn;
9337 #endif
9338
9339 if (! shmedia_space_reserved_for_target_registers)
9340 return 0;
9341 if (after_prologue_epilogue_gen && ! TARGET_SAVE_ALL_TARGET_REGS)
9342 return 0;
9343 if (calc_live_regs (&dummy) >= 6 * 8)
9344 return 1;
9345 return 0;
9346 }
9347
9348 static bool
9349 sh_ms_bitfield_layout_p (tree record_type ATTRIBUTE_UNUSED)
9350 {
9351 return (TARGET_SH5 || TARGET_HITACHI || sh_attr_renesas_p (record_type));
9352 }
9353 \f
9354 /*
9355 On the SH1..SH4, the trampoline looks like
9356 2 0002 D202 mov.l l2,r2
9357 1 0000 D301 mov.l l1,r3
9358 3 0004 422B jmp @r2
9359 4 0006 0009 nop
9360 5 0008 00000000 l1: .long area
9361 6 000c 00000000 l2: .long function
9362
9363 SH5 (compact) uses r1 instead of r3 for the static chain. */
9364
9365
9366 /* Emit RTL insns to initialize the variable parts of a trampoline.
9367 FNADDR is an RTX for the address of the function's pure code.
9368 CXT is an RTX for the static chain value for the function. */
9369
9370 void
9371 sh_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
9372 {
9373 rtx tramp_mem = gen_frame_mem (BLKmode, tramp);
9374
9375 if (TARGET_SHMEDIA64)
9376 {
9377 rtx tramp_templ;
9378 int fixed_len;
9379
9380 rtx movi1 = GEN_INT (0xcc000010);
9381 rtx shori1 = GEN_INT (0xc8000010);
9382 rtx src, dst;
9383
9384 /* The following trampoline works within a +- 128 KB range for cxt:
9385 ptb/u cxt,tr1; movi fnaddr >> 48,r0; shori fnaddr >> 32,r0;
9386 shori fnaddr >> 16,r0; shori fnaddr,r0; ptabs/l r0,tr0
9387 gettr tr1,r1; blink tr0,r63 */
9388 /* Address rounding makes it hard to compute the exact bounds of the
9389 offset for this trampoline, but we have a rather generous offset
9390 range, so frame_offset should do fine as an upper bound. */
9391 if (cxt == virtual_stack_vars_rtx && frame_offset < 0x20000)
9392 {
9393 /* ??? could optimize this trampoline initialization
9394 by writing DImode words with two insns each. */
9395 rtx mask = force_reg (DImode, GEN_INT (0x3fffc00));
9396 rtx insn = gen_rtx_MINUS (DImode, cxt, tramp);
9397 insn = gen_rtx_ASHIFT (DImode, insn, GEN_INT (10-2));
9398 insn = gen_rtx_AND (DImode, insn, mask);
9399 /* Or in ptb/u .,tr1 pattern */
9400 insn = gen_rtx_IOR (DImode, insn, gen_int_mode (0xec000010, SImode));
9401 insn = force_operand (insn, NULL_RTX);
9402 insn = gen_lowpart (SImode, insn);
9403 emit_move_insn (change_address (tramp_mem, SImode, NULL_RTX), insn);
9404 insn = gen_rtx_LSHIFTRT (DImode, fnaddr, GEN_INT (38));
9405 insn = gen_rtx_AND (DImode, insn, mask);
9406 insn = force_operand (gen_rtx_IOR (DImode, movi1, insn), NULL_RTX);
9407 insn = gen_lowpart (SImode, insn);
9408 emit_move_insn (adjust_address (tramp_mem, SImode, 4), insn);
9409 insn = gen_rtx_LSHIFTRT (DImode, fnaddr, GEN_INT (22));
9410 insn = gen_rtx_AND (DImode, insn, mask);
9411 insn = force_operand (gen_rtx_IOR (DImode, shori1, insn), NULL_RTX);
9412 insn = gen_lowpart (SImode, insn);
9413 emit_move_insn (adjust_address (tramp_mem, SImode, 8), insn);
9414 insn = gen_rtx_LSHIFTRT (DImode, fnaddr, GEN_INT (6));
9415 insn = gen_rtx_AND (DImode, insn, mask);
9416 insn = force_operand (gen_rtx_IOR (DImode, shori1, insn), NULL_RTX);
9417 insn = gen_lowpart (SImode, insn);
9418 emit_move_insn (adjust_address (tramp_mem, SImode, 12), insn);
9419 insn = gen_rtx_ASHIFT (DImode, fnaddr, GEN_INT (10));
9420 insn = gen_rtx_AND (DImode, insn, mask);
9421 insn = force_operand (gen_rtx_IOR (DImode, shori1, insn), NULL_RTX);
9422 insn = gen_lowpart (SImode, insn);
9423 emit_move_insn (adjust_address (tramp_mem, SImode, 16), insn);
9424 emit_move_insn (adjust_address (tramp_mem, SImode, 20),
9425 GEN_INT (0x6bf10600));
9426 emit_move_insn (adjust_address (tramp_mem, SImode, 24),
9427 GEN_INT (0x4415fc10));
9428 emit_move_insn (adjust_address (tramp_mem, SImode, 28),
9429 GEN_INT (0x4401fff0));
9430 emit_insn (gen_ic_invalidate_line (tramp));
9431 return;
9432 }
9433 tramp_templ = gen_rtx_SYMBOL_REF (Pmode,"__GCC_nested_trampoline");
9434 fixed_len = TRAMPOLINE_SIZE - 2 * GET_MODE_SIZE (Pmode);
9435
9436 tramp_templ = gen_datalabel_ref (tramp_templ);
9437 dst = tramp_mem;
9438 src = gen_const_mem (BLKmode, tramp_templ);
9439 set_mem_align (dst, 256);
9440 set_mem_align (src, 64);
9441 emit_block_move (dst, src, GEN_INT (fixed_len), BLOCK_OP_NORMAL);
9442
9443 emit_move_insn (adjust_address (tramp_mem, Pmode, fixed_len), fnaddr);
9444 emit_move_insn (adjust_address (tramp_mem, Pmode,
9445 fixed_len + GET_MODE_SIZE (Pmode)),
9446 cxt);
9447 emit_insn (gen_ic_invalidate_line (tramp));
9448 return;
9449 }
9450 else if (TARGET_SHMEDIA)
9451 {
9452 /* movi fnaddr >> 16,r1; shori fnaddr,r1; ptabs/l r1,tr0
9453 movi cxt >> 16,r1; shori cxt,r1; blink tr0,r63 */
9454 rtx quad0 = gen_reg_rtx (DImode), cxtload = gen_reg_rtx (DImode);
9455 rtx quad1 = gen_reg_rtx (DImode), quad2 = gen_reg_rtx (DImode);
9456 /* movi 0,r1: 0xcc000010 shori 0,r1: c8000010 concatenated,
9457 rotated 10 right, and higher 16 bit of every 32 selected. */
9458 rtx movishori
9459 = force_reg (V2HImode, (simplify_gen_subreg
9460 (V2HImode, GEN_INT (0x4330432), SImode, 0)));
9461 rtx ptabs = force_reg (DImode, GEN_INT (0x6bf10600));
9462 rtx blink = force_reg (DImode, GEN_INT (0x4401fff0));
9463
9464 tramp = force_reg (Pmode, tramp);
9465 fnaddr = force_reg (SImode, fnaddr);
9466 cxt = force_reg (SImode, cxt);
9467 emit_insn (gen_mshflo_w_x (gen_rtx_SUBREG (V4HImode, quad0, 0),
9468 gen_rtx_SUBREG (V2HImode, fnaddr, 0),
9469 movishori));
9470 emit_insn (gen_rotrdi3_mextr (quad0, quad0,
9471 GEN_INT (TARGET_LITTLE_ENDIAN ? 24 : 56)));
9472 emit_insn (gen_ashldi3_media (quad0, quad0, const2_rtx));
9473 emit_move_insn (change_address (tramp_mem, DImode, NULL_RTX), quad0);
9474 emit_insn (gen_mshflo_w_x (gen_rtx_SUBREG (V4HImode, cxtload, 0),
9475 gen_rtx_SUBREG (V2HImode, cxt, 0),
9476 movishori));
9477 emit_insn (gen_rotrdi3_mextr (cxtload, cxtload,
9478 GEN_INT (TARGET_LITTLE_ENDIAN ? 24 : 56)));
9479 emit_insn (gen_ashldi3_media (cxtload, cxtload, const2_rtx));
9480 if (TARGET_LITTLE_ENDIAN)
9481 {
9482 emit_insn (gen_mshflo_l_di (quad1, ptabs, cxtload));
9483 emit_insn (gen_mextr4 (quad2, cxtload, blink));
9484 }
9485 else
9486 {
9487 emit_insn (gen_mextr4 (quad1, cxtload, ptabs));
9488 emit_insn (gen_mshflo_l_di (quad2, blink, cxtload));
9489 }
9490 emit_move_insn (adjust_address (tramp_mem, DImode, 8), quad1);
9491 emit_move_insn (adjust_address (tramp_mem, DImode, 16), quad2);
9492 emit_insn (gen_ic_invalidate_line (tramp));
9493 return;
9494 }
9495 else if (TARGET_SHCOMPACT)
9496 {
9497 emit_insn (gen_initialize_trampoline (tramp, cxt, fnaddr));
9498 return;
9499 }
9500 emit_move_insn (change_address (tramp_mem, SImode, NULL_RTX),
9501 gen_int_mode (TARGET_LITTLE_ENDIAN ? 0xd301d202 : 0xd202d301,
9502 SImode));
9503 emit_move_insn (adjust_address (tramp_mem, SImode, 4),
9504 gen_int_mode (TARGET_LITTLE_ENDIAN ? 0x0009422b : 0x422b0009,
9505 SImode));
9506 emit_move_insn (adjust_address (tramp_mem, SImode, 8), cxt);
9507 emit_move_insn (adjust_address (tramp_mem, SImode, 12), fnaddr);
9508 if (TARGET_HARVARD)
9509 {
9510 if (!TARGET_INLINE_IC_INVALIDATE
9511 || (!(TARGET_SH4A_ARCH || TARGET_SH4_300) && TARGET_USERMODE))
9512 emit_library_call (function_symbol (NULL, "__ic_invalidate",
9513 FUNCTION_ORDINARY),
9514 0, VOIDmode, 1, tramp, SImode);
9515 else
9516 emit_insn (gen_ic_invalidate_line (tramp));
9517 }
9518 }
9519
9520 /* FIXME: This is overly conservative. A SHcompact function that
9521 receives arguments ``by reference'' will have them stored in its
9522 own stack frame, so it must not pass pointers or references to
9523 these arguments to other functions by means of sibling calls. */
9524 /* If PIC, we cannot make sibling calls to global functions
9525 because the PLT requires r12 to be live. */
9526 static bool
9527 sh_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
9528 {
9529 return (1
9530 && (! TARGET_SHCOMPACT
9531 || current_function_args_info.stack_regs == 0)
9532 && ! sh_cfun_interrupt_handler_p ()
9533 && (! flag_pic
9534 || (decl && ! TREE_PUBLIC (decl))
9535 || (decl && DECL_VISIBILITY (decl) != VISIBILITY_DEFAULT)));
9536 }
9537 \f
9538 /* Machine specific built-in functions. */
9539
9540 struct builtin_description
9541 {
9542 const enum insn_code icode;
9543 const char *const name;
9544 int signature;
9545 };
9546
9547 /* describe number and signedness of arguments; arg[0] == result
9548 (1: unsigned, 2: signed, 4: don't care, 8: pointer 0: no argument */
9549 /* 9: 64-bit pointer, 10: 32-bit pointer */
9550 static const char signature_args[][4] =
9551 {
9552 #define SH_BLTIN_V2SI2 0
9553 { 4, 4 },
9554 #define SH_BLTIN_V4HI2 1
9555 { 4, 4 },
9556 #define SH_BLTIN_V2SI3 2
9557 { 4, 4, 4 },
9558 #define SH_BLTIN_V4HI3 3
9559 { 4, 4, 4 },
9560 #define SH_BLTIN_V8QI3 4
9561 { 4, 4, 4 },
9562 #define SH_BLTIN_MAC_HISI 5
9563 { 1, 4, 4, 1 },
9564 #define SH_BLTIN_SH_HI 6
9565 { 4, 4, 1 },
9566 #define SH_BLTIN_SH_SI 7
9567 { 4, 4, 1 },
9568 #define SH_BLTIN_V4HI2V2SI 8
9569 { 4, 4, 4 },
9570 #define SH_BLTIN_V4HI2V8QI 9
9571 { 4, 4, 4 },
9572 #define SH_BLTIN_SISF 10
9573 { 4, 2 },
9574 #define SH_BLTIN_LDUA_L 11
9575 { 2, 10 },
9576 #define SH_BLTIN_LDUA_Q 12
9577 { 1, 10 },
9578 #define SH_BLTIN_STUA_L 13
9579 { 0, 10, 2 },
9580 #define SH_BLTIN_STUA_Q 14
9581 { 0, 10, 1 },
9582 #define SH_BLTIN_LDUA_L64 15
9583 { 2, 9 },
9584 #define SH_BLTIN_LDUA_Q64 16
9585 { 1, 9 },
9586 #define SH_BLTIN_STUA_L64 17
9587 { 0, 9, 2 },
9588 #define SH_BLTIN_STUA_Q64 18
9589 { 0, 9, 1 },
9590 #define SH_BLTIN_NUM_SHARED_SIGNATURES 19
9591 #define SH_BLTIN_2 19
9592 #define SH_BLTIN_SU 19
9593 { 1, 2 },
9594 #define SH_BLTIN_3 20
9595 #define SH_BLTIN_SUS 20
9596 { 2, 2, 1 },
9597 #define SH_BLTIN_PSSV 21
9598 { 0, 8, 2, 2 },
9599 #define SH_BLTIN_XXUU 22
9600 #define SH_BLTIN_UUUU 22
9601 { 1, 1, 1, 1 },
9602 #define SH_BLTIN_PV 23
9603 { 0, 8 },
9604 };
9605 /* mcmv: operands considered unsigned. */
9606 /* mmulsum_wq, msad_ubq: result considered unsigned long long. */
9607 /* mperm: control value considered unsigned int. */
9608 /* mshalds, mshard, mshards, mshlld, mshlrd: shift count is unsigned int. */
9609 /* mshards_q: returns signed short. */
9610 /* nsb: takes long long arg, returns unsigned char. */
9611 static const struct builtin_description bdesc[] =
9612 {
9613 { CODE_FOR_absv2si2, "__builtin_absv2si2", SH_BLTIN_V2SI2 },
9614 { CODE_FOR_absv4hi2, "__builtin_absv4hi2", SH_BLTIN_V4HI2 },
9615 { CODE_FOR_addv2si3, "__builtin_addv2si3", SH_BLTIN_V2SI3 },
9616 { CODE_FOR_addv4hi3, "__builtin_addv4hi3", SH_BLTIN_V4HI3 },
9617 { CODE_FOR_ssaddv2si3,"__builtin_ssaddv2si3", SH_BLTIN_V2SI3 },
9618 { CODE_FOR_usaddv8qi3,"__builtin_usaddv8qi3", SH_BLTIN_V8QI3 },
9619 { CODE_FOR_ssaddv4hi3,"__builtin_ssaddv4hi3", SH_BLTIN_V4HI3 },
9620 { CODE_FOR_alloco_i, "__builtin_sh_media_ALLOCO", SH_BLTIN_PV },
9621 { CODE_FOR_negcmpeqv8qi,"__builtin_sh_media_MCMPEQ_B", SH_BLTIN_V8QI3 },
9622 { CODE_FOR_negcmpeqv2si,"__builtin_sh_media_MCMPEQ_L", SH_BLTIN_V2SI3 },
9623 { CODE_FOR_negcmpeqv4hi,"__builtin_sh_media_MCMPEQ_W", SH_BLTIN_V4HI3 },
9624 { CODE_FOR_negcmpgtuv8qi,"__builtin_sh_media_MCMPGT_UB", SH_BLTIN_V8QI3 },
9625 { CODE_FOR_negcmpgtv2si,"__builtin_sh_media_MCMPGT_L", SH_BLTIN_V2SI3 },
9626 { CODE_FOR_negcmpgtv4hi,"__builtin_sh_media_MCMPGT_W", SH_BLTIN_V4HI3 },
9627 { CODE_FOR_mcmv, "__builtin_sh_media_MCMV", SH_BLTIN_UUUU },
9628 { CODE_FOR_mcnvs_lw, "__builtin_sh_media_MCNVS_LW", SH_BLTIN_3 },
9629 { CODE_FOR_mcnvs_wb, "__builtin_sh_media_MCNVS_WB", SH_BLTIN_V4HI2V8QI },
9630 { CODE_FOR_mcnvs_wub, "__builtin_sh_media_MCNVS_WUB", SH_BLTIN_V4HI2V8QI },
9631 { CODE_FOR_mextr1, "__builtin_sh_media_MEXTR1", SH_BLTIN_V8QI3 },
9632 { CODE_FOR_mextr2, "__builtin_sh_media_MEXTR2", SH_BLTIN_V8QI3 },
9633 { CODE_FOR_mextr3, "__builtin_sh_media_MEXTR3", SH_BLTIN_V8QI3 },
9634 { CODE_FOR_mextr4, "__builtin_sh_media_MEXTR4", SH_BLTIN_V8QI3 },
9635 { CODE_FOR_mextr5, "__builtin_sh_media_MEXTR5", SH_BLTIN_V8QI3 },
9636 { CODE_FOR_mextr6, "__builtin_sh_media_MEXTR6", SH_BLTIN_V8QI3 },
9637 { CODE_FOR_mextr7, "__builtin_sh_media_MEXTR7", SH_BLTIN_V8QI3 },
9638 { CODE_FOR_mmacfx_wl, "__builtin_sh_media_MMACFX_WL", SH_BLTIN_MAC_HISI },
9639 { CODE_FOR_mmacnfx_wl,"__builtin_sh_media_MMACNFX_WL", SH_BLTIN_MAC_HISI },
9640 { CODE_FOR_mulv2si3, "__builtin_mulv2si3", SH_BLTIN_V2SI3, },
9641 { CODE_FOR_mulv4hi3, "__builtin_mulv4hi3", SH_BLTIN_V4HI3 },
9642 { CODE_FOR_mmulfx_l, "__builtin_sh_media_MMULFX_L", SH_BLTIN_V2SI3 },
9643 { CODE_FOR_mmulfx_w, "__builtin_sh_media_MMULFX_W", SH_BLTIN_V4HI3 },
9644 { CODE_FOR_mmulfxrp_w,"__builtin_sh_media_MMULFXRP_W", SH_BLTIN_V4HI3 },
9645 { CODE_FOR_mmulhi_wl, "__builtin_sh_media_MMULHI_WL", SH_BLTIN_V4HI2V2SI },
9646 { CODE_FOR_mmullo_wl, "__builtin_sh_media_MMULLO_WL", SH_BLTIN_V4HI2V2SI },
9647 { CODE_FOR_mmulsum_wq,"__builtin_sh_media_MMULSUM_WQ", SH_BLTIN_XXUU },
9648 { CODE_FOR_mperm_w, "__builtin_sh_media_MPERM_W", SH_BLTIN_SH_HI },
9649 { CODE_FOR_msad_ubq, "__builtin_sh_media_MSAD_UBQ", SH_BLTIN_XXUU },
9650 { CODE_FOR_mshalds_l, "__builtin_sh_media_MSHALDS_L", SH_BLTIN_SH_SI },
9651 { CODE_FOR_mshalds_w, "__builtin_sh_media_MSHALDS_W", SH_BLTIN_SH_HI },
9652 { CODE_FOR_ashrv2si3, "__builtin_ashrv2si3", SH_BLTIN_SH_SI },
9653 { CODE_FOR_ashrv4hi3, "__builtin_ashrv4hi3", SH_BLTIN_SH_HI },
9654 { CODE_FOR_mshards_q, "__builtin_sh_media_MSHARDS_Q", SH_BLTIN_SUS },
9655 { CODE_FOR_mshfhi_b, "__builtin_sh_media_MSHFHI_B", SH_BLTIN_V8QI3 },
9656 { CODE_FOR_mshfhi_l, "__builtin_sh_media_MSHFHI_L", SH_BLTIN_V2SI3 },
9657 { CODE_FOR_mshfhi_w, "__builtin_sh_media_MSHFHI_W", SH_BLTIN_V4HI3 },
9658 { CODE_FOR_mshflo_b, "__builtin_sh_media_MSHFLO_B", SH_BLTIN_V8QI3 },
9659 { CODE_FOR_mshflo_l, "__builtin_sh_media_MSHFLO_L", SH_BLTIN_V2SI3 },
9660 { CODE_FOR_mshflo_w, "__builtin_sh_media_MSHFLO_W", SH_BLTIN_V4HI3 },
9661 { CODE_FOR_ashlv2si3, "__builtin_ashlv2si3", SH_BLTIN_SH_SI },
9662 { CODE_FOR_ashlv4hi3, "__builtin_ashlv4hi3", SH_BLTIN_SH_HI },
9663 { CODE_FOR_lshrv2si3, "__builtin_lshrv2si3", SH_BLTIN_SH_SI },
9664 { CODE_FOR_lshrv4hi3, "__builtin_lshrv4hi3", SH_BLTIN_SH_HI },
9665 { CODE_FOR_subv2si3, "__builtin_subv2si3", SH_BLTIN_V2SI3 },
9666 { CODE_FOR_subv4hi3, "__builtin_subv4hi3", SH_BLTIN_V4HI3 },
9667 { CODE_FOR_sssubv2si3,"__builtin_sssubv2si3", SH_BLTIN_V2SI3 },
9668 { CODE_FOR_ussubv8qi3,"__builtin_ussubv8qi3", SH_BLTIN_V8QI3 },
9669 { CODE_FOR_sssubv4hi3,"__builtin_sssubv4hi3", SH_BLTIN_V4HI3 },
9670 { CODE_FOR_fcosa_s, "__builtin_sh_media_FCOSA_S", SH_BLTIN_SISF },
9671 { CODE_FOR_fsina_s, "__builtin_sh_media_FSINA_S", SH_BLTIN_SISF },
9672 { CODE_FOR_fipr, "__builtin_sh_media_FIPR_S", SH_BLTIN_3 },
9673 { CODE_FOR_ftrv, "__builtin_sh_media_FTRV_S", SH_BLTIN_3 },
9674 { CODE_FOR_mac_media, "__builtin_sh_media_FMAC_S", SH_BLTIN_3 },
9675 { CODE_FOR_sqrtdf2, "__builtin_sh_media_FSQRT_D", SH_BLTIN_2 },
9676 { CODE_FOR_sqrtsf2, "__builtin_sh_media_FSQRT_S", SH_BLTIN_2 },
9677 { CODE_FOR_fsrra_s, "__builtin_sh_media_FSRRA_S", SH_BLTIN_2 },
9678 { CODE_FOR_ldhi_l, "__builtin_sh_media_LDHI_L", SH_BLTIN_LDUA_L },
9679 { CODE_FOR_ldhi_q, "__builtin_sh_media_LDHI_Q", SH_BLTIN_LDUA_Q },
9680 { CODE_FOR_ldlo_l, "__builtin_sh_media_LDLO_L", SH_BLTIN_LDUA_L },
9681 { CODE_FOR_ldlo_q, "__builtin_sh_media_LDLO_Q", SH_BLTIN_LDUA_Q },
9682 { CODE_FOR_sthi_l, "__builtin_sh_media_STHI_L", SH_BLTIN_STUA_L },
9683 { CODE_FOR_sthi_q, "__builtin_sh_media_STHI_Q", SH_BLTIN_STUA_Q },
9684 { CODE_FOR_stlo_l, "__builtin_sh_media_STLO_L", SH_BLTIN_STUA_L },
9685 { CODE_FOR_stlo_q, "__builtin_sh_media_STLO_Q", SH_BLTIN_STUA_Q },
9686 { CODE_FOR_ldhi_l64, "__builtin_sh_media_LDHI_L", SH_BLTIN_LDUA_L64 },
9687 { CODE_FOR_ldhi_q64, "__builtin_sh_media_LDHI_Q", SH_BLTIN_LDUA_Q64 },
9688 { CODE_FOR_ldlo_l64, "__builtin_sh_media_LDLO_L", SH_BLTIN_LDUA_L64 },
9689 { CODE_FOR_ldlo_q64, "__builtin_sh_media_LDLO_Q", SH_BLTIN_LDUA_Q64 },
9690 { CODE_FOR_sthi_l64, "__builtin_sh_media_STHI_L", SH_BLTIN_STUA_L64 },
9691 { CODE_FOR_sthi_q64, "__builtin_sh_media_STHI_Q", SH_BLTIN_STUA_Q64 },
9692 { CODE_FOR_stlo_l64, "__builtin_sh_media_STLO_L", SH_BLTIN_STUA_L64 },
9693 { CODE_FOR_stlo_q64, "__builtin_sh_media_STLO_Q", SH_BLTIN_STUA_Q64 },
9694 { CODE_FOR_nsb, "__builtin_sh_media_NSB", SH_BLTIN_SU },
9695 { CODE_FOR_byterev, "__builtin_sh_media_BYTEREV", SH_BLTIN_2 },
9696 { CODE_FOR_prefetch, "__builtin_sh_media_PREFO", SH_BLTIN_PSSV },
9697 };
9698
9699 static void
9700 sh_media_init_builtins (void)
9701 {
9702 tree shared[SH_BLTIN_NUM_SHARED_SIGNATURES];
9703 const struct builtin_description *d;
9704
9705 memset (shared, 0, sizeof shared);
9706 for (d = bdesc; d - bdesc < (int) ARRAY_SIZE (bdesc); d++)
9707 {
9708 tree type, arg_type = 0;
9709 int signature = d->signature;
9710 int i;
9711
9712 if (signature < SH_BLTIN_NUM_SHARED_SIGNATURES && shared[signature])
9713 type = shared[signature];
9714 else
9715 {
9716 int has_result = signature_args[signature][0] != 0;
9717
9718 if ((signature_args[signature][1] & 8)
9719 && (((signature_args[signature][1] & 1) && TARGET_SHMEDIA32)
9720 || ((signature_args[signature][1] & 2) && TARGET_SHMEDIA64)))
9721 continue;
9722 if (! TARGET_FPU_ANY
9723 && FLOAT_MODE_P (insn_data[d->icode].operand[0].mode))
9724 continue;
9725 type = void_list_node;
9726 for (i = 3; ; i--)
9727 {
9728 int arg = signature_args[signature][i];
9729 int opno = i - 1 + has_result;
9730
9731 if (arg & 8)
9732 arg_type = ptr_type_node;
9733 else if (arg)
9734 arg_type = (*lang_hooks.types.type_for_mode)
9735 (insn_data[d->icode].operand[opno].mode,
9736 (arg & 1));
9737 else if (i)
9738 continue;
9739 else
9740 arg_type = void_type_node;
9741 if (i == 0)
9742 break;
9743 type = tree_cons (NULL_TREE, arg_type, type);
9744 }
9745 type = build_function_type (arg_type, type);
9746 if (signature < SH_BLTIN_NUM_SHARED_SIGNATURES)
9747 shared[signature] = type;
9748 }
9749 add_builtin_function (d->name, type, d - bdesc, BUILT_IN_MD,
9750 NULL, NULL_TREE);
9751 }
9752 }
9753
9754 /* Implements target hook vector_mode_supported_p. */
9755 bool
9756 sh_vector_mode_supported_p (enum machine_mode mode)
9757 {
9758 if (TARGET_FPU_ANY
9759 && ((mode == V2SFmode)
9760 || (mode == V4SFmode)
9761 || (mode == V16SFmode)))
9762 return true;
9763
9764 else if (TARGET_SHMEDIA
9765 && ((mode == V8QImode)
9766 || (mode == V2HImode)
9767 || (mode == V4HImode)
9768 || (mode == V2SImode)))
9769 return true;
9770
9771 return false;
9772 }
9773
9774 /* Implements target hook dwarf_calling_convention. Return an enum
9775 of dwarf_calling_convention. */
9776 int
9777 sh_dwarf_calling_convention (tree func)
9778 {
9779 if (sh_attr_renesas_p (func))
9780 return DW_CC_GNU_renesas_sh;
9781
9782 return DW_CC_normal;
9783 }
9784
9785 static void
9786 sh_init_builtins (void)
9787 {
9788 if (TARGET_SHMEDIA)
9789 sh_media_init_builtins ();
9790 }
9791
9792 /* Expand an expression EXP that calls a built-in function,
9793 with result going to TARGET if that's convenient
9794 (and in mode MODE if that's convenient).
9795 SUBTARGET may be used as the target for computing one of EXP's operands.
9796 IGNORE is nonzero if the value is to be ignored. */
9797
9798 static rtx
9799 sh_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
9800 enum machine_mode mode ATTRIBUTE_UNUSED, int ignore)
9801 {
9802 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
9803 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
9804 const struct builtin_description *d = &bdesc[fcode];
9805 enum insn_code icode = d->icode;
9806 int signature = d->signature;
9807 enum machine_mode tmode = VOIDmode;
9808 int nop = 0, i;
9809 rtx op[4];
9810 rtx pat = 0;
9811
9812 if (signature_args[signature][0])
9813 {
9814 if (ignore)
9815 return 0;
9816
9817 tmode = insn_data[icode].operand[0].mode;
9818 if (! target
9819 || GET_MODE (target) != tmode
9820 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9821 target = gen_reg_rtx (tmode);
9822 op[nop++] = target;
9823 }
9824 else
9825 target = 0;
9826
9827 for (i = 1; i <= 3; i++, nop++)
9828 {
9829 tree arg;
9830 enum machine_mode opmode, argmode;
9831 tree optype;
9832
9833 if (! signature_args[signature][i])
9834 break;
9835 arg = CALL_EXPR_ARG (exp, i - 1);
9836 if (arg == error_mark_node)
9837 return const0_rtx;
9838 if (signature_args[signature][i] & 8)
9839 {
9840 opmode = ptr_mode;
9841 optype = ptr_type_node;
9842 }
9843 else
9844 {
9845 opmode = insn_data[icode].operand[nop].mode;
9846 optype = (*lang_hooks.types.type_for_mode) (opmode, 0);
9847 }
9848 argmode = TYPE_MODE (TREE_TYPE (arg));
9849 if (argmode != opmode)
9850 arg = build1 (NOP_EXPR, optype, arg);
9851 op[nop] = expand_expr (arg, NULL_RTX, opmode, 0);
9852 if (! (*insn_data[icode].operand[nop].predicate) (op[nop], opmode))
9853 op[nop] = copy_to_mode_reg (opmode, op[nop]);
9854 }
9855
9856 switch (nop)
9857 {
9858 case 1:
9859 pat = (*insn_data[d->icode].genfun) (op[0]);
9860 break;
9861 case 2:
9862 pat = (*insn_data[d->icode].genfun) (op[0], op[1]);
9863 break;
9864 case 3:
9865 pat = (*insn_data[d->icode].genfun) (op[0], op[1], op[2]);
9866 break;
9867 case 4:
9868 pat = (*insn_data[d->icode].genfun) (op[0], op[1], op[2], op[3]);
9869 break;
9870 default:
9871 gcc_unreachable ();
9872 }
9873 if (! pat)
9874 return 0;
9875 emit_insn (pat);
9876 return target;
9877 }
9878
9879 void
9880 sh_expand_unop_v2sf (enum rtx_code code, rtx op0, rtx op1)
9881 {
9882 rtx sel0 = const0_rtx;
9883 rtx sel1 = const1_rtx;
9884 rtx (*fn) (rtx, rtx, rtx, rtx, rtx) = gen_unary_sf_op;
9885 rtx op = gen_rtx_fmt_e (code, SFmode, op1);
9886
9887 emit_insn ((*fn) (op0, op1, op, sel0, sel0));
9888 emit_insn ((*fn) (op0, op1, op, sel1, sel1));
9889 }
9890
9891 void
9892 sh_expand_binop_v2sf (enum rtx_code code, rtx op0, rtx op1, rtx op2)
9893 {
9894 rtx sel0 = const0_rtx;
9895 rtx sel1 = const1_rtx;
9896 rtx (*fn) (rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx)
9897 = gen_binary_sf_op;
9898 rtx op = gen_rtx_fmt_ee (code, SFmode, op1, op2);
9899
9900 emit_insn ((*fn) (op0, op1, op2, op, sel0, sel0, sel0, sel1));
9901 emit_insn ((*fn) (op0, op1, op2, op, sel1, sel1, sel1, sel0));
9902 }
9903
9904 /* Return the class of registers for which a mode change from FROM to TO
9905 is invalid. */
9906 bool
9907 sh_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
9908 enum reg_class class)
9909 {
9910 /* We want to enable the use of SUBREGs as a means to
9911 VEC_SELECT a single element of a vector. */
9912 if (to == SFmode && VECTOR_MODE_P (from) && GET_MODE_INNER (from) == SFmode)
9913 return (reg_classes_intersect_p (GENERAL_REGS, class));
9914
9915 if (GET_MODE_SIZE (from) != GET_MODE_SIZE (to))
9916 {
9917 if (TARGET_LITTLE_ENDIAN)
9918 {
9919 if (GET_MODE_SIZE (to) < 8 || GET_MODE_SIZE (from) < 8)
9920 return reg_classes_intersect_p (DF_REGS, class);
9921 }
9922 else
9923 {
9924 if (GET_MODE_SIZE (from) < 8)
9925 return reg_classes_intersect_p (DF_HI_REGS, class);
9926 }
9927 }
9928 return 0;
9929 }
9930
9931
9932 /* If ADDRESS refers to a CODE_LABEL, add NUSES to the number of times
9933 that label is used. */
9934
9935 void
9936 sh_mark_label (rtx address, int nuses)
9937 {
9938 if (GOTOFF_P (address))
9939 {
9940 /* Extract the label or symbol. */
9941 address = XEXP (address, 0);
9942 if (GET_CODE (address) == PLUS)
9943 address = XEXP (address, 0);
9944 address = XVECEXP (address, 0, 0);
9945 }
9946 if (GET_CODE (address) == LABEL_REF
9947 && GET_CODE (XEXP (address, 0)) == CODE_LABEL)
9948 LABEL_NUSES (XEXP (address, 0)) += nuses;
9949 }
9950
9951 /* Compute extra cost of moving data between one register class
9952 and another. */
9953
9954 /* If SECONDARY*_RELOAD_CLASS says something about the src/dst pair, regclass
9955 uses this information. Hence, the general register <-> floating point
9956 register information here is not used for SFmode. */
9957
9958 int
9959 sh_register_move_cost (enum machine_mode mode,
9960 enum reg_class srcclass, enum reg_class dstclass)
9961 {
9962 if (dstclass == T_REGS || dstclass == PR_REGS)
9963 return 10;
9964
9965 if (dstclass == MAC_REGS && srcclass == MAC_REGS)
9966 return 4;
9967
9968 if (mode == SImode && ! TARGET_SHMEDIA && TARGET_FMOVD
9969 && REGCLASS_HAS_FP_REG (srcclass)
9970 && REGCLASS_HAS_FP_REG (dstclass))
9971 return 4;
9972
9973 if (REGCLASS_HAS_FP_REG (dstclass) && srcclass == T_REGS)
9974 return ((TARGET_HARD_SH4 && !optimize_size) ? 10 : 7);
9975
9976 if ((REGCLASS_HAS_FP_REG (dstclass) && srcclass == MAC_REGS)
9977 || (dstclass == MAC_REGS && REGCLASS_HAS_FP_REG (srcclass)))
9978 return 9;
9979
9980 if ((REGCLASS_HAS_FP_REG (dstclass)
9981 && REGCLASS_HAS_GENERAL_REG (srcclass))
9982 || (REGCLASS_HAS_GENERAL_REG (dstclass)
9983 && REGCLASS_HAS_FP_REG (srcclass)))
9984 return ((TARGET_SHMEDIA ? 4 : TARGET_FMOVD ? 8 : 12)
9985 * ((GET_MODE_SIZE (mode) + 7) / 8U));
9986
9987 if ((dstclass == FPUL_REGS
9988 && REGCLASS_HAS_GENERAL_REG (srcclass))
9989 || (srcclass == FPUL_REGS
9990 && REGCLASS_HAS_GENERAL_REG (dstclass)))
9991 return 5;
9992
9993 if ((dstclass == FPUL_REGS
9994 && (srcclass == PR_REGS || srcclass == MAC_REGS || srcclass == T_REGS))
9995 || (srcclass == FPUL_REGS
9996 && (dstclass == PR_REGS || dstclass == MAC_REGS)))
9997 return 7;
9998
9999 if ((srcclass == TARGET_REGS && ! REGCLASS_HAS_GENERAL_REG (dstclass))
10000 || ((dstclass) == TARGET_REGS && ! REGCLASS_HAS_GENERAL_REG (srcclass)))
10001 return 20;
10002
10003 /* ??? ptabs faults on (value & 0x3) == 0x3 */
10004 if (TARGET_SHMEDIA
10005 && ((srcclass) == TARGET_REGS || (srcclass) == SIBCALL_REGS))
10006 {
10007 if (sh_gettrcost >= 0)
10008 return sh_gettrcost;
10009 else if (!TARGET_PT_FIXED)
10010 return 100;
10011 }
10012
10013 if ((srcclass == FPSCR_REGS && ! REGCLASS_HAS_GENERAL_REG (dstclass))
10014 || (dstclass == FPSCR_REGS && ! REGCLASS_HAS_GENERAL_REG (srcclass)))
10015 return 4;
10016
10017 if (TARGET_SHMEDIA
10018 || (TARGET_FMOVD
10019 && ! REGCLASS_HAS_GENERAL_REG (srcclass)
10020 && ! REGCLASS_HAS_GENERAL_REG (dstclass)))
10021 return 2 * ((GET_MODE_SIZE (mode) + 7) / 8U);
10022
10023 return 2 * ((GET_MODE_SIZE (mode) + 3) / 4U);
10024 }
10025
10026 static rtx emit_load_ptr (rtx, rtx);
10027
10028 static rtx
10029 emit_load_ptr (rtx reg, rtx addr)
10030 {
10031 rtx mem = gen_const_mem (ptr_mode, addr);
10032
10033 if (Pmode != ptr_mode)
10034 mem = gen_rtx_SIGN_EXTEND (Pmode, mem);
10035 return emit_move_insn (reg, mem);
10036 }
10037
10038 static void
10039 sh_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
10040 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
10041 tree function)
10042 {
10043 CUMULATIVE_ARGS cum;
10044 int structure_value_byref = 0;
10045 rtx this, this_value, sibcall, insns, funexp;
10046 tree funtype = TREE_TYPE (function);
10047 int simple_add = CONST_OK_FOR_ADD (delta);
10048 int did_load = 0;
10049 rtx scratch0, scratch1, scratch2;
10050 unsigned i;
10051
10052 reload_completed = 1;
10053 epilogue_completed = 1;
10054 current_function_uses_only_leaf_regs = 1;
10055
10056 emit_note (NOTE_INSN_PROLOGUE_END);
10057
10058 /* Find the "this" pointer. We have such a wide range of ABIs for the
10059 SH that it's best to do this completely machine independently.
10060 "this" is passed as first argument, unless a structure return pointer
10061 comes first, in which case "this" comes second. */
10062 INIT_CUMULATIVE_ARGS (cum, funtype, NULL_RTX, 0, 1);
10063 #ifndef PCC_STATIC_STRUCT_RETURN
10064 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
10065 structure_value_byref = 1;
10066 #endif /* not PCC_STATIC_STRUCT_RETURN */
10067 if (structure_value_byref && sh_struct_value_rtx (function, 0) == 0)
10068 {
10069 tree ptype = build_pointer_type (TREE_TYPE (funtype));
10070
10071 FUNCTION_ARG_ADVANCE (cum, Pmode, ptype, 1);
10072 }
10073 this = FUNCTION_ARG (cum, Pmode, ptr_type_node, 1);
10074
10075 /* For SHcompact, we only have r0 for a scratch register: r1 is the
10076 static chain pointer (even if you can't have nested virtual functions
10077 right now, someone might implement them sometime), and the rest of the
10078 registers are used for argument passing, are callee-saved, or reserved. */
10079 /* We need to check call_used_regs / fixed_regs in case -fcall_saved-reg /
10080 -ffixed-reg has been used. */
10081 if (! call_used_regs[0] || fixed_regs[0])
10082 error ("r0 needs to be available as a call-clobbered register");
10083 scratch0 = scratch1 = scratch2 = gen_rtx_REG (Pmode, 0);
10084 if (! TARGET_SH5)
10085 {
10086 if (call_used_regs[1] && ! fixed_regs[1])
10087 scratch1 = gen_rtx_REG (ptr_mode, 1);
10088 /* N.B., if not TARGET_HITACHI, register 2 is used to pass the pointer
10089 pointing where to return struct values. */
10090 if (call_used_regs[3] && ! fixed_regs[3])
10091 scratch2 = gen_rtx_REG (Pmode, 3);
10092 }
10093 else if (TARGET_SHMEDIA)
10094 {
10095 for (i = FIRST_GENERAL_REG; i <= LAST_GENERAL_REG; i++)
10096 if (i != REGNO (scratch0) &&
10097 call_used_regs[i] && ! fixed_regs[i] && ! FUNCTION_ARG_REGNO_P (i))
10098 {
10099 scratch1 = gen_rtx_REG (ptr_mode, i);
10100 break;
10101 }
10102 if (scratch1 == scratch0)
10103 error ("Need a second call-clobbered general purpose register");
10104 for (i = FIRST_TARGET_REG; i <= LAST_TARGET_REG; i++)
10105 if (call_used_regs[i] && ! fixed_regs[i])
10106 {
10107 scratch2 = gen_rtx_REG (Pmode, i);
10108 break;
10109 }
10110 if (scratch2 == scratch0)
10111 error ("Need a call-clobbered target register");
10112 }
10113
10114 this_value = plus_constant (this, delta);
10115 if (vcall_offset
10116 && (simple_add || scratch0 != scratch1)
10117 && strict_memory_address_p (ptr_mode, this_value))
10118 {
10119 emit_load_ptr (scratch0, this_value);
10120 did_load = 1;
10121 }
10122
10123 if (!delta)
10124 ; /* Do nothing. */
10125 else if (simple_add)
10126 emit_move_insn (this, this_value);
10127 else
10128 {
10129 emit_move_insn (scratch1, GEN_INT (delta));
10130 emit_insn (gen_add2_insn (this, scratch1));
10131 }
10132
10133 if (vcall_offset)
10134 {
10135 rtx offset_addr;
10136
10137 if (!did_load)
10138 emit_load_ptr (scratch0, this);
10139
10140 offset_addr = plus_constant (scratch0, vcall_offset);
10141 if (strict_memory_address_p (ptr_mode, offset_addr))
10142 ; /* Do nothing. */
10143 else if (! TARGET_SH5 && scratch0 != scratch1)
10144 {
10145 /* scratch0 != scratch1, and we have indexed loads. Get better
10146 schedule by loading the offset into r1 and using an indexed
10147 load - then the load of r1 can issue before the load from
10148 (this + delta) finishes. */
10149 emit_move_insn (scratch1, GEN_INT (vcall_offset));
10150 offset_addr = gen_rtx_PLUS (Pmode, scratch0, scratch1);
10151 }
10152 else if (CONST_OK_FOR_ADD (vcall_offset))
10153 {
10154 emit_insn (gen_add2_insn (scratch0, GEN_INT (vcall_offset)));
10155 offset_addr = scratch0;
10156 }
10157 else if (scratch0 != scratch1)
10158 {
10159 emit_move_insn (scratch1, GEN_INT (vcall_offset));
10160 emit_insn (gen_add2_insn (scratch0, scratch1));
10161 offset_addr = scratch0;
10162 }
10163 else
10164 gcc_unreachable (); /* FIXME */
10165 emit_load_ptr (scratch0, offset_addr);
10166
10167 if (Pmode != ptr_mode)
10168 scratch0 = gen_rtx_TRUNCATE (ptr_mode, scratch0);
10169 emit_insn (gen_add2_insn (this, scratch0));
10170 }
10171
10172 /* Generate a tail call to the target function. */
10173 if (! TREE_USED (function))
10174 {
10175 assemble_external (function);
10176 TREE_USED (function) = 1;
10177 }
10178 funexp = XEXP (DECL_RTL (function), 0);
10179 /* If the function is overridden, so is the thunk, hence we don't
10180 need GOT addressing even if this is a public symbol. */
10181 #if 0
10182 if (TARGET_SH1 && ! flag_weak)
10183 sibcall = gen_sibcalli_thunk (funexp, const0_rtx);
10184 else
10185 #endif
10186 if (TARGET_SH2 && flag_pic)
10187 {
10188 sibcall = gen_sibcall_pcrel (funexp, const0_rtx);
10189 XEXP (XVECEXP (sibcall, 0, 2), 0) = scratch2;
10190 }
10191 else
10192 {
10193 if (TARGET_SHMEDIA && flag_pic)
10194 {
10195 funexp = gen_sym2PIC (funexp);
10196 PUT_MODE (funexp, Pmode);
10197 }
10198 emit_move_insn (scratch2, funexp);
10199 funexp = gen_rtx_MEM (FUNCTION_MODE, scratch2);
10200 sibcall = gen_sibcall (funexp, const0_rtx, NULL_RTX);
10201 }
10202 sibcall = emit_call_insn (sibcall);
10203 SIBLING_CALL_P (sibcall) = 1;
10204 use_reg (&CALL_INSN_FUNCTION_USAGE (sibcall), this);
10205 emit_barrier ();
10206
10207 /* Run just enough of rest_of_compilation to do scheduling and get
10208 the insns emitted. Note that use_thunk calls
10209 assemble_start_function and assemble_end_function. */
10210
10211 insn_locators_alloc ();
10212 insns = get_insns ();
10213
10214 #if 0
10215 if (optimize > 0)
10216 {
10217 /* Initialize the bitmap obstacks. */
10218 bitmap_obstack_initialize (NULL);
10219 bitmap_obstack_initialize (&reg_obstack);
10220 if (! cfun->cfg)
10221 init_flow ();
10222 rtl_register_cfg_hooks ();
10223 init_rtl_bb_info (ENTRY_BLOCK_PTR);
10224 init_rtl_bb_info (EXIT_BLOCK_PTR);
10225 ENTRY_BLOCK_PTR->flags |= BB_RTL;
10226 EXIT_BLOCK_PTR->flags |= BB_RTL;
10227 find_basic_blocks (insns);
10228
10229 if (flag_schedule_insns_after_reload)
10230 {
10231 life_analysis (PROP_FINAL);
10232
10233 split_all_insns (1);
10234
10235 schedule_insns ();
10236 }
10237 /* We must split jmp insn in PIC case. */
10238 else if (flag_pic)
10239 split_all_insns_noflow ();
10240 }
10241 #else
10242 if (optimize > 0)
10243 {
10244 if (! cfun->cfg)
10245 init_flow ();
10246 split_all_insns_noflow ();
10247 }
10248 #endif
10249
10250 sh_reorg ();
10251
10252 if (optimize > 0 && flag_delayed_branch)
10253 dbr_schedule (insns);
10254
10255 shorten_branches (insns);
10256 final_start_function (insns, file, 1);
10257 final (insns, file, 1);
10258 final_end_function ();
10259
10260 reload_completed = 0;
10261 epilogue_completed = 0;
10262 }
10263
10264 rtx
10265 function_symbol (rtx target, const char *name, enum sh_function_kind kind)
10266 {
10267 rtx sym;
10268
10269 /* If this is not an ordinary function, the name usually comes from a
10270 string literal or an sprintf buffer. Make sure we use the same
10271 string consistently, so that cse will be able to unify address loads. */
10272 if (kind != FUNCTION_ORDINARY)
10273 name = IDENTIFIER_POINTER (get_identifier (name));
10274 sym = gen_rtx_SYMBOL_REF (Pmode, name);
10275 SYMBOL_REF_FLAGS (sym) = SYMBOL_FLAG_FUNCTION;
10276 if (flag_pic)
10277 switch (kind)
10278 {
10279 case FUNCTION_ORDINARY:
10280 break;
10281 case SFUNC_GOT:
10282 {
10283 rtx reg = target ? target : gen_reg_rtx (Pmode);
10284
10285 emit_insn (gen_symGOT2reg (reg, sym));
10286 sym = reg;
10287 break;
10288 }
10289 case SFUNC_STATIC:
10290 {
10291 /* ??? To allow cse to work, we use GOTOFF relocations.
10292 we could add combiner patterns to transform this into
10293 straight pc-relative calls with sym2PIC / bsrf when
10294 label load and function call are still 1:1 and in the
10295 same basic block during combine. */
10296 rtx reg = target ? target : gen_reg_rtx (Pmode);
10297
10298 emit_insn (gen_symGOTOFF2reg (reg, sym));
10299 sym = reg;
10300 break;
10301 }
10302 }
10303 if (target && sym != target)
10304 {
10305 emit_move_insn (target, sym);
10306 return target;
10307 }
10308 return sym;
10309 }
10310
10311 /* Find the number of a general purpose register in S. */
10312 static int
10313 scavenge_reg (HARD_REG_SET *s)
10314 {
10315 int r;
10316 for (r = FIRST_GENERAL_REG; r <= LAST_GENERAL_REG; r++)
10317 if (TEST_HARD_REG_BIT (*s, r))
10318 return r;
10319 return -1;
10320 }
10321
10322 rtx
10323 sh_get_pr_initial_val (void)
10324 {
10325 rtx val;
10326
10327 /* ??? Unfortunately, get_hard_reg_initial_val doesn't always work for the
10328 PR register on SHcompact, because it might be clobbered by the prologue.
10329 We check first if that is known to be the case. */
10330 if (TARGET_SHCOMPACT
10331 && ((current_function_args_info.call_cookie
10332 & ~ CALL_COOKIE_RET_TRAMP (1))
10333 || current_function_has_nonlocal_label))
10334 return gen_frame_mem (SImode, return_address_pointer_rtx);
10335
10336 /* If we haven't finished rtl generation, there might be a nonlocal label
10337 that we haven't seen yet.
10338 ??? get_hard_reg_initial_val fails if it is called after register
10339 allocation has started, unless it has been called before for the
10340 same register. And even then, we end in trouble if we didn't use
10341 the register in the same basic block before. So call
10342 get_hard_reg_initial_val now and wrap it in an unspec if we might
10343 need to replace it. */
10344 /* ??? We also must do this for TARGET_SH1 in general, because otherwise
10345 combine can put the pseudo returned by get_hard_reg_initial_val into
10346 instructions that need a general purpose registers, which will fail to
10347 be recognized when the pseudo becomes allocated to PR. */
10348 val
10349 = get_hard_reg_initial_val (Pmode, TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG);
10350 if (TARGET_SH1)
10351 return gen_rtx_UNSPEC (SImode, gen_rtvec (1, val), UNSPEC_RA);
10352 return val;
10353 }
10354
10355 int
10356 sh_expand_t_scc (enum rtx_code code, rtx target)
10357 {
10358 rtx result = target;
10359 HOST_WIDE_INT val;
10360
10361 if (GET_CODE (sh_compare_op0) != REG || REGNO (sh_compare_op0) != T_REG
10362 || GET_CODE (sh_compare_op1) != CONST_INT)
10363 return 0;
10364 if (GET_CODE (result) != REG)
10365 result = gen_reg_rtx (SImode);
10366 val = INTVAL (sh_compare_op1);
10367 if ((code == EQ && val == 1) || (code == NE && val == 0))
10368 emit_insn (gen_movt (result));
10369 else if ((code == EQ && val == 0) || (code == NE && val == 1))
10370 {
10371 emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
10372 emit_insn (gen_subc (result, result, result));
10373 emit_insn (gen_addsi3 (result, result, const1_rtx));
10374 }
10375 else if (code == EQ || code == NE)
10376 emit_insn (gen_move_insn (result, GEN_INT (code == NE)));
10377 else
10378 return 0;
10379 if (result != target)
10380 emit_move_insn (target, result);
10381 return 1;
10382 }
10383
10384 /* INSN is an sfunc; return the rtx that describes the address used. */
10385 static rtx
10386 extract_sfunc_addr (rtx insn)
10387 {
10388 rtx pattern, part = NULL_RTX;
10389 int len, i;
10390
10391 pattern = PATTERN (insn);
10392 len = XVECLEN (pattern, 0);
10393 for (i = 0; i < len; i++)
10394 {
10395 part = XVECEXP (pattern, 0, i);
10396 if (GET_CODE (part) == USE && GET_MODE (XEXP (part, 0)) == Pmode
10397 && GENERAL_REGISTER_P (true_regnum (XEXP (part, 0))))
10398 return XEXP (part, 0);
10399 }
10400 gcc_assert (GET_CODE (XVECEXP (pattern, 0, 0)) == UNSPEC_VOLATILE);
10401 return XVECEXP (XVECEXP (pattern, 0, 0), 0, 1);
10402 }
10403
10404 /* Verify that the register in use_sfunc_addr still agrees with the address
10405 used in the sfunc. This prevents fill_slots_from_thread from changing
10406 use_sfunc_addr.
10407 INSN is the use_sfunc_addr instruction, and REG is the register it
10408 guards. */
10409 int
10410 check_use_sfunc_addr (rtx insn, rtx reg)
10411 {
10412 /* Search for the sfunc. It should really come right after INSN. */
10413 while ((insn = NEXT_INSN (insn)))
10414 {
10415 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
10416 break;
10417 if (! INSN_P (insn))
10418 continue;
10419
10420 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
10421 insn = XVECEXP (PATTERN (insn), 0, 0);
10422 if (GET_CODE (PATTERN (insn)) != PARALLEL
10423 || get_attr_type (insn) != TYPE_SFUNC)
10424 continue;
10425 return rtx_equal_p (extract_sfunc_addr (insn), reg);
10426 }
10427 gcc_unreachable ();
10428 }
10429
10430 /* This function returns a constant rtx that represents pi / 2**15 in
10431 SFmode. it's used to scale SFmode angles, in radians, to a
10432 fixed-point signed 16.16-bit fraction of a full circle, i.e., 2*pi
10433 maps to 0x10000). */
10434
10435 static GTY(()) rtx sh_fsca_sf2int_rtx;
10436
10437 rtx
10438 sh_fsca_sf2int (void)
10439 {
10440 if (! sh_fsca_sf2int_rtx)
10441 {
10442 REAL_VALUE_TYPE rv;
10443
10444 real_from_string (&rv, "10430.378350470453");
10445 sh_fsca_sf2int_rtx = const_double_from_real_value (rv, SFmode);
10446 }
10447
10448 return sh_fsca_sf2int_rtx;
10449 }
10450
10451 /* This function returns a constant rtx that represents pi / 2**15 in
10452 DFmode. it's used to scale DFmode angles, in radians, to a
10453 fixed-point signed 16.16-bit fraction of a full circle, i.e., 2*pi
10454 maps to 0x10000). */
10455
10456 static GTY(()) rtx sh_fsca_df2int_rtx;
10457
10458 rtx
10459 sh_fsca_df2int (void)
10460 {
10461 if (! sh_fsca_df2int_rtx)
10462 {
10463 REAL_VALUE_TYPE rv;
10464
10465 real_from_string (&rv, "10430.378350470453");
10466 sh_fsca_df2int_rtx = const_double_from_real_value (rv, DFmode);
10467 }
10468
10469 return sh_fsca_df2int_rtx;
10470 }
10471
10472 /* This function returns a constant rtx that represents 2**15 / pi in
10473 SFmode. it's used to scale a fixed-point signed 16.16-bit fraction
10474 of a full circle back to a SFmode value, i.e., 0x10000 maps to
10475 2*pi). */
10476
10477 static GTY(()) rtx sh_fsca_int2sf_rtx;
10478
10479 rtx
10480 sh_fsca_int2sf (void)
10481 {
10482 if (! sh_fsca_int2sf_rtx)
10483 {
10484 REAL_VALUE_TYPE rv;
10485
10486 real_from_string (&rv, "9.587379924285257e-5");
10487 sh_fsca_int2sf_rtx = const_double_from_real_value (rv, SFmode);
10488 }
10489
10490 return sh_fsca_int2sf_rtx;
10491 }
10492
10493 /* Initialize the CUMULATIVE_ARGS structure. */
10494
10495 void
10496 sh_init_cumulative_args (CUMULATIVE_ARGS * pcum,
10497 tree fntype,
10498 rtx libname ATTRIBUTE_UNUSED,
10499 tree fndecl,
10500 signed int n_named_args,
10501 enum machine_mode mode)
10502 {
10503 pcum->arg_count [(int) SH_ARG_FLOAT] = 0;
10504 pcum->free_single_fp_reg = 0;
10505 pcum->stack_regs = 0;
10506 pcum->byref_regs = 0;
10507 pcum->byref = 0;
10508 pcum->outgoing = (n_named_args == -1) ? 0 : 1;
10509
10510 /* XXX - Should we check TARGET_HITACHI here ??? */
10511 pcum->renesas_abi = sh_attr_renesas_p (fntype) ? 1 : 0;
10512
10513 if (fntype)
10514 {
10515 pcum->force_mem = ((TARGET_HITACHI || pcum->renesas_abi)
10516 && aggregate_value_p (TREE_TYPE (fntype), fndecl));
10517 pcum->prototype_p = TYPE_ARG_TYPES (fntype) ? TRUE : FALSE;
10518 pcum->arg_count [(int) SH_ARG_INT]
10519 = TARGET_SH5 && aggregate_value_p (TREE_TYPE (fntype), fndecl);
10520
10521 pcum->call_cookie
10522 = CALL_COOKIE_RET_TRAMP (TARGET_SHCOMPACT
10523 && pcum->arg_count [(int) SH_ARG_INT] == 0
10524 && (TYPE_MODE (TREE_TYPE (fntype)) == BLKmode
10525 ? int_size_in_bytes (TREE_TYPE (fntype))
10526 : GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (fntype)))) > 4
10527 && (BASE_RETURN_VALUE_REG (TYPE_MODE (TREE_TYPE (fntype)))
10528 == FIRST_RET_REG));
10529 }
10530 else
10531 {
10532 pcum->arg_count [(int) SH_ARG_INT] = 0;
10533 pcum->prototype_p = FALSE;
10534 if (mode != VOIDmode)
10535 {
10536 pcum->call_cookie =
10537 CALL_COOKIE_RET_TRAMP (TARGET_SHCOMPACT
10538 && GET_MODE_SIZE (mode) > 4
10539 && BASE_RETURN_VALUE_REG (mode) == FIRST_RET_REG);
10540
10541 /* If the default ABI is the Renesas ABI then all library
10542 calls must assume that the library will be using the
10543 Renesas ABI. So if the function would return its result
10544 in memory then we must force the address of this memory
10545 block onto the stack. Ideally we would like to call
10546 targetm.calls.return_in_memory() here but we do not have
10547 the TYPE or the FNDECL available so we synthesize the
10548 contents of that function as best we can. */
10549 pcum->force_mem =
10550 (TARGET_DEFAULT & MASK_HITACHI)
10551 && (mode == BLKmode
10552 || (GET_MODE_SIZE (mode) > 4
10553 && !(mode == DFmode
10554 && TARGET_FPU_DOUBLE)));
10555 }
10556 else
10557 {
10558 pcum->call_cookie = 0;
10559 pcum->force_mem = FALSE;
10560 }
10561 }
10562 }
10563
10564 /* Replace any occurrence of FROM(n) in X with TO(n). The function does
10565 not enter into CONST_DOUBLE for the replace.
10566
10567 Note that copying is not done so X must not be shared unless all copies
10568 are to be modified.
10569
10570 This is like replace_rtx, except that we operate on N_REPLACEMENTS
10571 replacements simultaneously - FROM(n) is replacements[n*2] and to(n) is
10572 replacements[n*2+1] - and that we take mode changes into account.
10573
10574 If a replacement is ambiguous, return NULL_RTX.
10575
10576 If MODIFY is zero, don't modify any rtl in place,
10577 just return zero or nonzero for failure / success. */
10578
10579 rtx
10580 replace_n_hard_rtx (rtx x, rtx *replacements, int n_replacements, int modify)
10581 {
10582 int i, j;
10583 const char *fmt;
10584
10585 /* The following prevents loops occurrence when we change MEM in
10586 CONST_DOUBLE onto the same CONST_DOUBLE. */
10587 if (x != 0 && GET_CODE (x) == CONST_DOUBLE)
10588 return x;
10589
10590 for (i = n_replacements - 1; i >= 0 ; i--)
10591 if (x == replacements[i*2] && GET_MODE (x) == GET_MODE (replacements[i*2+1]))
10592 return replacements[i*2+1];
10593
10594 /* Allow this function to make replacements in EXPR_LISTs. */
10595 if (x == 0)
10596 return 0;
10597
10598 if (GET_CODE (x) == SUBREG)
10599 {
10600 rtx new = replace_n_hard_rtx (SUBREG_REG (x), replacements,
10601 n_replacements, modify);
10602
10603 if (GET_CODE (new) == CONST_INT)
10604 {
10605 x = simplify_subreg (GET_MODE (x), new,
10606 GET_MODE (SUBREG_REG (x)),
10607 SUBREG_BYTE (x));
10608 if (! x)
10609 abort ();
10610 }
10611 else if (modify)
10612 SUBREG_REG (x) = new;
10613
10614 return x;
10615 }
10616 else if (GET_CODE (x) == REG)
10617 {
10618 unsigned regno = REGNO (x);
10619 unsigned nregs = (regno < FIRST_PSEUDO_REGISTER
10620 ? HARD_REGNO_NREGS (regno, GET_MODE (x)) : 1);
10621 rtx result = NULL_RTX;
10622
10623 for (i = n_replacements - 1; i >= 0; i--)
10624 {
10625 rtx from = replacements[i*2];
10626 rtx to = replacements[i*2+1];
10627 unsigned from_regno, from_nregs, to_regno, new_regno;
10628
10629 if (GET_CODE (from) != REG)
10630 continue;
10631 from_regno = REGNO (from);
10632 from_nregs = (from_regno < FIRST_PSEUDO_REGISTER
10633 ? HARD_REGNO_NREGS (from_regno, GET_MODE (from)) : 1);
10634 if (regno < from_regno + from_nregs && regno + nregs > from_regno)
10635 {
10636 if (regno < from_regno
10637 || regno + nregs > from_regno + nregs
10638 || GET_CODE (to) != REG
10639 || result)
10640 return NULL_RTX;
10641 to_regno = REGNO (to);
10642 if (to_regno < FIRST_PSEUDO_REGISTER)
10643 {
10644 new_regno = regno + to_regno - from_regno;
10645 if ((unsigned) HARD_REGNO_NREGS (new_regno, GET_MODE (x))
10646 != nregs)
10647 return NULL_RTX;
10648 result = gen_rtx_REG (GET_MODE (x), new_regno);
10649 }
10650 else if (GET_MODE (x) <= GET_MODE (to))
10651 result = gen_lowpart_common (GET_MODE (x), to);
10652 else
10653 result = gen_lowpart_SUBREG (GET_MODE (x), to);
10654 }
10655 }
10656 return result ? result : x;
10657 }
10658 else if (GET_CODE (x) == ZERO_EXTEND)
10659 {
10660 rtx new = replace_n_hard_rtx (XEXP (x, 0), replacements,
10661 n_replacements, modify);
10662
10663 if (GET_CODE (new) == CONST_INT)
10664 {
10665 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
10666 new, GET_MODE (XEXP (x, 0)));
10667 if (! x)
10668 abort ();
10669 }
10670 else if (modify)
10671 XEXP (x, 0) = new;
10672
10673 return x;
10674 }
10675
10676 fmt = GET_RTX_FORMAT (GET_CODE (x));
10677 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
10678 {
10679 rtx new;
10680
10681 if (fmt[i] == 'e')
10682 {
10683 new = replace_n_hard_rtx (XEXP (x, i), replacements,
10684 n_replacements, modify);
10685 if (!new)
10686 return NULL_RTX;
10687 if (modify)
10688 XEXP (x, i) = new;
10689 }
10690 else if (fmt[i] == 'E')
10691 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
10692 {
10693 new = replace_n_hard_rtx (XVECEXP (x, i, j), replacements,
10694 n_replacements, modify);
10695 if (!new)
10696 return NULL_RTX;
10697 if (modify)
10698 XVECEXP (x, i, j) = new;
10699 }
10700 }
10701
10702 return x;
10703 }
10704
10705 rtx
10706 sh_gen_truncate (enum machine_mode mode, rtx x, int need_sign_ext)
10707 {
10708 enum rtx_code code = TRUNCATE;
10709
10710 if (GET_CODE (x) == ZERO_EXTEND || GET_CODE (x) == SIGN_EXTEND)
10711 {
10712 rtx inner = XEXP (x, 0);
10713 enum machine_mode inner_mode = GET_MODE (inner);
10714
10715 if (inner_mode == mode)
10716 return inner;
10717 else if (GET_MODE_SIZE (inner_mode) >= GET_MODE_SIZE (mode))
10718 x = inner;
10719 else if (GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (mode)
10720 && (! need_sign_ext || GET_CODE (x) == SIGN_EXTEND))
10721 {
10722 code = GET_CODE (x);
10723 x = inner;
10724 }
10725 }
10726 return gen_rtx_fmt_e (code, mode, x);
10727 }
10728
10729 /* called via for_each_rtx after reload, to clean up truncates of
10730 registers that span multiple actual hard registers. */
10731 int
10732 shmedia_cleanup_truncate (rtx *p, void *n_changes)
10733 {
10734 rtx x = *p, reg;
10735
10736 if (GET_CODE (x) != TRUNCATE)
10737 return 0;
10738 reg = XEXP (x, 0);
10739 if (GET_MODE_SIZE (GET_MODE (reg)) > 8 && GET_CODE (reg) == REG)
10740 {
10741 enum machine_mode reg_mode = GET_MODE (reg);
10742 XEXP (x, 0) = simplify_subreg (DImode, reg, reg_mode,
10743 subreg_lowpart_offset (DImode, reg_mode));
10744 *(int*) n_changes += 1;
10745 return -1;
10746 }
10747 return 0;
10748 }
10749
10750 /* Load and store depend on the highpart of the address. However,
10751 set_attr_alternative does not give well-defined results before reload,
10752 so we must look at the rtl ourselves to see if any of the feeding
10753 registers is used in a memref. */
10754
10755 /* Called by sh_contains_memref_p via for_each_rtx. */
10756 static int
10757 sh_contains_memref_p_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
10758 {
10759 return (GET_CODE (*loc) == MEM);
10760 }
10761
10762 /* Return nonzero iff INSN contains a MEM. */
10763 int
10764 sh_contains_memref_p (rtx insn)
10765 {
10766 return for_each_rtx (&PATTERN (insn), &sh_contains_memref_p_1, NULL);
10767 }
10768
10769 /* Return nonzero iff INSN loads a banked register. */
10770 int
10771 sh_loads_bankedreg_p (rtx insn)
10772 {
10773 if (GET_CODE (PATTERN (insn)) == SET)
10774 {
10775 rtx op = SET_DEST (PATTERN(insn));
10776 if (REG_P (op) && BANKED_REGISTER_P (REGNO (op)))
10777 return 1;
10778 }
10779
10780 return 0;
10781 }
10782
10783 /* FNADDR is the MEM expression from a call expander. Return an address
10784 to use in an SHmedia insn pattern. */
10785 rtx
10786 shmedia_prepare_call_address (rtx fnaddr, int is_sibcall)
10787 {
10788 int is_sym;
10789
10790 fnaddr = XEXP (fnaddr, 0);
10791 is_sym = GET_CODE (fnaddr) == SYMBOL_REF;
10792 if (flag_pic && is_sym)
10793 {
10794 if (! SYMBOL_REF_LOCAL_P (fnaddr))
10795 {
10796 rtx reg = gen_reg_rtx (Pmode);
10797
10798 /* We must not use GOTPLT for sibcalls, because PIC_REG
10799 must be restored before the PLT code gets to run. */
10800 if (is_sibcall)
10801 emit_insn (gen_symGOT2reg (reg, fnaddr));
10802 else
10803 emit_insn (gen_symGOTPLT2reg (reg, fnaddr));
10804 fnaddr = reg;
10805 }
10806 else
10807 {
10808 fnaddr = gen_sym2PIC (fnaddr);
10809 PUT_MODE (fnaddr, Pmode);
10810 }
10811 }
10812 /* If ptabs might trap, make this visible to the rest of the compiler.
10813 We generally assume that symbols pertain to valid locations, but
10814 it is possible to generate invalid symbols with asm or linker tricks.
10815 In a list of functions where each returns its successor, an invalid
10816 symbol might denote an empty list. */
10817 if (!TARGET_PT_FIXED
10818 && (!is_sym || TARGET_INVALID_SYMBOLS)
10819 && (!REG_P (fnaddr) || ! TARGET_REGISTER_P (REGNO (fnaddr))))
10820 {
10821 rtx tr = gen_reg_rtx (PDImode);
10822
10823 emit_insn (gen_ptabs (tr, fnaddr));
10824 fnaddr = tr;
10825 }
10826 else if (! target_reg_operand (fnaddr, Pmode))
10827 fnaddr = copy_to_mode_reg (Pmode, fnaddr);
10828 return fnaddr;
10829 }
10830
10831 enum reg_class
10832 sh_secondary_reload (bool in_p, rtx x, enum reg_class class,
10833 enum machine_mode mode, secondary_reload_info *sri)
10834 {
10835 if (in_p)
10836 {
10837 if (REGCLASS_HAS_FP_REG (class)
10838 && ! TARGET_SHMEDIA
10839 && immediate_operand ((x), mode)
10840 && ! ((fp_zero_operand (x) || fp_one_operand (x))
10841 && mode == SFmode && fldi_ok ()))
10842 switch (mode)
10843 {
10844 case SFmode:
10845 sri->icode = CODE_FOR_reload_insf__frn;
10846 return NO_REGS;
10847 case DFmode:
10848 sri->icode = CODE_FOR_reload_indf__frn;
10849 return NO_REGS;
10850 case SImode:
10851 /* ??? If we knew that we are in the appropriate mode -
10852 single precision - we could use a reload pattern directly. */
10853 return FPUL_REGS;
10854 default:
10855 abort ();
10856 }
10857 if (class == FPUL_REGS
10858 && ((GET_CODE (x) == REG
10859 && (REGNO (x) == MACL_REG || REGNO (x) == MACH_REG
10860 || REGNO (x) == T_REG))
10861 || GET_CODE (x) == PLUS))
10862 return GENERAL_REGS;
10863 if (class == FPUL_REGS && immediate_operand (x, mode))
10864 {
10865 if (satisfies_constraint_I08 (x))
10866 return GENERAL_REGS;
10867 sri->icode = CODE_FOR_reload_insi__i_fpul;
10868 return NO_REGS;
10869 }
10870 if (class == FPSCR_REGS
10871 && ((GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER)
10872 || (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == PLUS)))
10873 return GENERAL_REGS;
10874 if (REGCLASS_HAS_FP_REG (class)
10875 && TARGET_SHMEDIA
10876 && immediate_operand (x, mode)
10877 && x != CONST0_RTX (GET_MODE (x))
10878 && GET_MODE (x) != V4SFmode)
10879 return GENERAL_REGS;
10880 if ((mode == QImode || mode == HImode)
10881 && TARGET_SHMEDIA && inqhi_operand (x, mode))
10882 {
10883 sri->icode = ((mode == QImode)
10884 ? CODE_FOR_reload_inqi : CODE_FOR_reload_inhi);
10885 return NO_REGS;
10886 }
10887 if (TARGET_SHMEDIA && class == GENERAL_REGS
10888 && (GET_CODE (x) == LABEL_REF || PIC_DIRECT_ADDR_P (x)))
10889 return TARGET_REGS;
10890 } /* end of input-only processing. */
10891
10892 if (((REGCLASS_HAS_FP_REG (class)
10893 && (GET_CODE (x) == REG
10894 && (GENERAL_OR_AP_REGISTER_P (REGNO (x))
10895 || (FP_REGISTER_P (REGNO (x)) && mode == SImode
10896 && TARGET_FMOVD))))
10897 || (REGCLASS_HAS_GENERAL_REG (class)
10898 && GET_CODE (x) == REG
10899 && FP_REGISTER_P (REGNO (x))))
10900 && ! TARGET_SHMEDIA
10901 && (mode == SFmode || mode == SImode))
10902 return FPUL_REGS;
10903 if ((class == FPUL_REGS
10904 || (REGCLASS_HAS_FP_REG (class)
10905 && ! TARGET_SHMEDIA && mode == SImode))
10906 && (GET_CODE (x) == MEM
10907 || (GET_CODE (x) == REG
10908 && (REGNO (x) >= FIRST_PSEUDO_REGISTER
10909 || REGNO (x) == T_REG
10910 || system_reg_operand (x, VOIDmode)))))
10911 {
10912 if (class == FPUL_REGS)
10913 return GENERAL_REGS;
10914 return FPUL_REGS;
10915 }
10916 if ((class == TARGET_REGS
10917 || (TARGET_SHMEDIA && class == SIBCALL_REGS))
10918 && !satisfies_constraint_Csy (x)
10919 && (GET_CODE (x) != REG || ! GENERAL_REGISTER_P (REGNO (x))))
10920 return GENERAL_REGS;
10921 if ((class == MAC_REGS || class == PR_REGS)
10922 && GET_CODE (x) == REG && ! GENERAL_REGISTER_P (REGNO (x))
10923 && class != REGNO_REG_CLASS (REGNO (x)))
10924 return GENERAL_REGS;
10925 if (class != GENERAL_REGS && GET_CODE (x) == REG
10926 && TARGET_REGISTER_P (REGNO (x)))
10927 return GENERAL_REGS;
10928 return NO_REGS;
10929 }
10930
10931 enum sh_divide_strategy_e sh_div_strategy = SH_DIV_STRATEGY_DEFAULT;
10932
10933 #include "gt-sh.h"