1 /* Subroutines for insn-output.c for Motorola 68000 family.
2 Copyright (C) 1987, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
30 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "conditions.h"
34 #include "insn-attr.h"
36 #include "diagnostic-core.h"
41 #include "target-def.h"
45 /* ??? Need to add a dependency between m68k.o and sched-int.h. */
46 #include "sched-int.h"
47 #include "insn-codes.h"
51 enum reg_class regno_reg_class
[] =
53 DATA_REGS
, DATA_REGS
, DATA_REGS
, DATA_REGS
,
54 DATA_REGS
, DATA_REGS
, DATA_REGS
, DATA_REGS
,
55 ADDR_REGS
, ADDR_REGS
, ADDR_REGS
, ADDR_REGS
,
56 ADDR_REGS
, ADDR_REGS
, ADDR_REGS
, ADDR_REGS
,
57 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
58 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
63 /* The minimum number of integer registers that we want to save with the
64 movem instruction. Using two movel instructions instead of a single
65 moveml is about 15% faster for the 68020 and 68030 at no expense in
67 #define MIN_MOVEM_REGS 3
69 /* The minimum number of floating point registers that we want to save
70 with the fmovem instruction. */
71 #define MIN_FMOVEM_REGS 1
73 /* Structure describing stack frame layout. */
76 /* Stack pointer to frame pointer offset. */
79 /* Offset of FPU registers. */
80 HOST_WIDE_INT foffset
;
82 /* Frame size in bytes (rounded up). */
85 /* Data and address register. */
87 unsigned int reg_mask
;
91 unsigned int fpu_mask
;
93 /* Offsets relative to ARG_POINTER. */
94 HOST_WIDE_INT frame_pointer_offset
;
95 HOST_WIDE_INT stack_pointer_offset
;
97 /* Function which the above information refers to. */
101 /* Current frame information calculated by m68k_compute_frame_layout(). */
102 static struct m68k_frame current_frame
;
104 /* Structure describing an m68k address.
106 If CODE is UNKNOWN, the address is BASE + INDEX * SCALE + OFFSET,
107 with null fields evaluating to 0. Here:
109 - BASE satisfies m68k_legitimate_base_reg_p
110 - INDEX satisfies m68k_legitimate_index_reg_p
111 - OFFSET satisfies m68k_legitimate_constant_address_p
113 INDEX is either HImode or SImode. The other fields are SImode.
115 If CODE is PRE_DEC, the address is -(BASE). If CODE is POST_INC,
116 the address is (BASE)+. */
117 struct m68k_address
{
125 static int m68k_sched_adjust_cost (rtx
, rtx
, rtx
, int);
126 static int m68k_sched_issue_rate (void);
127 static int m68k_sched_variable_issue (FILE *, int, rtx
, int);
128 static void m68k_sched_md_init_global (FILE *, int, int);
129 static void m68k_sched_md_finish_global (FILE *, int);
130 static void m68k_sched_md_init (FILE *, int, int);
131 static void m68k_sched_dfa_pre_advance_cycle (void);
132 static void m68k_sched_dfa_post_advance_cycle (void);
133 static int m68k_sched_first_cycle_multipass_dfa_lookahead (void);
135 static bool m68k_can_eliminate (const int, const int);
136 static void m68k_conditional_register_usage (void);
137 static bool m68k_legitimate_address_p (enum machine_mode
, rtx
, bool);
138 static bool m68k_handle_option (struct gcc_options
*, struct gcc_options
*,
139 const struct cl_decoded_option
*, location_t
);
140 static void m68k_option_override (void);
141 static rtx
find_addr_reg (rtx
);
142 static const char *singlemove_string (rtx
*);
143 static void m68k_output_mi_thunk (FILE *, tree
, HOST_WIDE_INT
,
144 HOST_WIDE_INT
, tree
);
145 static rtx
m68k_struct_value_rtx (tree
, int);
146 static tree
m68k_handle_fndecl_attribute (tree
*node
, tree name
,
147 tree args
, int flags
,
149 static void m68k_compute_frame_layout (void);
150 static bool m68k_save_reg (unsigned int regno
, bool interrupt_handler
);
151 static bool m68k_ok_for_sibcall_p (tree
, tree
);
152 static bool m68k_tls_symbol_p (rtx
);
153 static rtx
m68k_legitimize_address (rtx
, rtx
, enum machine_mode
);
154 static bool m68k_rtx_costs (rtx
, int, int, int *, bool);
155 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
156 static bool m68k_return_in_memory (const_tree
, const_tree
);
158 static void m68k_output_dwarf_dtprel (FILE *, int, rtx
) ATTRIBUTE_UNUSED
;
159 static void m68k_trampoline_init (rtx
, tree
, rtx
);
160 static int m68k_return_pops_args (tree
, tree
, int);
161 static rtx
m68k_delegitimize_address (rtx
);
162 static void m68k_function_arg_advance (CUMULATIVE_ARGS
*, enum machine_mode
,
164 static rtx
m68k_function_arg (CUMULATIVE_ARGS
*, enum machine_mode
,
168 /* Specify the identification number of the library being built */
169 const char *m68k_library_id_string
= "_current_shared_library_a5_offset_";
171 /* Initialize the GCC target structure. */
173 #if INT_OP_GROUP == INT_OP_DOT_WORD
174 #undef TARGET_ASM_ALIGNED_HI_OP
175 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
178 #if INT_OP_GROUP == INT_OP_NO_DOT
179 #undef TARGET_ASM_BYTE_OP
180 #define TARGET_ASM_BYTE_OP "\tbyte\t"
181 #undef TARGET_ASM_ALIGNED_HI_OP
182 #define TARGET_ASM_ALIGNED_HI_OP "\tshort\t"
183 #undef TARGET_ASM_ALIGNED_SI_OP
184 #define TARGET_ASM_ALIGNED_SI_OP "\tlong\t"
187 #if INT_OP_GROUP == INT_OP_DC
188 #undef TARGET_ASM_BYTE_OP
189 #define TARGET_ASM_BYTE_OP "\tdc.b\t"
190 #undef TARGET_ASM_ALIGNED_HI_OP
191 #define TARGET_ASM_ALIGNED_HI_OP "\tdc.w\t"
192 #undef TARGET_ASM_ALIGNED_SI_OP
193 #define TARGET_ASM_ALIGNED_SI_OP "\tdc.l\t"
196 #undef TARGET_ASM_UNALIGNED_HI_OP
197 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
198 #undef TARGET_ASM_UNALIGNED_SI_OP
199 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
201 #undef TARGET_ASM_OUTPUT_MI_THUNK
202 #define TARGET_ASM_OUTPUT_MI_THUNK m68k_output_mi_thunk
203 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
204 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
206 #undef TARGET_ASM_FILE_START_APP_OFF
207 #define TARGET_ASM_FILE_START_APP_OFF true
209 #undef TARGET_LEGITIMIZE_ADDRESS
210 #define TARGET_LEGITIMIZE_ADDRESS m68k_legitimize_address
212 #undef TARGET_SCHED_ADJUST_COST
213 #define TARGET_SCHED_ADJUST_COST m68k_sched_adjust_cost
215 #undef TARGET_SCHED_ISSUE_RATE
216 #define TARGET_SCHED_ISSUE_RATE m68k_sched_issue_rate
218 #undef TARGET_SCHED_VARIABLE_ISSUE
219 #define TARGET_SCHED_VARIABLE_ISSUE m68k_sched_variable_issue
221 #undef TARGET_SCHED_INIT_GLOBAL
222 #define TARGET_SCHED_INIT_GLOBAL m68k_sched_md_init_global
224 #undef TARGET_SCHED_FINISH_GLOBAL
225 #define TARGET_SCHED_FINISH_GLOBAL m68k_sched_md_finish_global
227 #undef TARGET_SCHED_INIT
228 #define TARGET_SCHED_INIT m68k_sched_md_init
230 #undef TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE
231 #define TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE m68k_sched_dfa_pre_advance_cycle
233 #undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
234 #define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE m68k_sched_dfa_post_advance_cycle
236 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
237 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
238 m68k_sched_first_cycle_multipass_dfa_lookahead
240 #undef TARGET_HANDLE_OPTION
241 #define TARGET_HANDLE_OPTION m68k_handle_option
243 #undef TARGET_OPTION_OVERRIDE
244 #define TARGET_OPTION_OVERRIDE m68k_option_override
246 #undef TARGET_RTX_COSTS
247 #define TARGET_RTX_COSTS m68k_rtx_costs
249 #undef TARGET_ATTRIBUTE_TABLE
250 #define TARGET_ATTRIBUTE_TABLE m68k_attribute_table
252 #undef TARGET_PROMOTE_PROTOTYPES
253 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
255 #undef TARGET_STRUCT_VALUE_RTX
256 #define TARGET_STRUCT_VALUE_RTX m68k_struct_value_rtx
258 #undef TARGET_CANNOT_FORCE_CONST_MEM
259 #define TARGET_CANNOT_FORCE_CONST_MEM m68k_illegitimate_symbolic_constant_p
261 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
262 #define TARGET_FUNCTION_OK_FOR_SIBCALL m68k_ok_for_sibcall_p
264 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
265 #undef TARGET_RETURN_IN_MEMORY
266 #define TARGET_RETURN_IN_MEMORY m68k_return_in_memory
270 #undef TARGET_HAVE_TLS
271 #define TARGET_HAVE_TLS (true)
273 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
274 #define TARGET_ASM_OUTPUT_DWARF_DTPREL m68k_output_dwarf_dtprel
277 #undef TARGET_LEGITIMATE_ADDRESS_P
278 #define TARGET_LEGITIMATE_ADDRESS_P m68k_legitimate_address_p
280 #undef TARGET_CAN_ELIMINATE
281 #define TARGET_CAN_ELIMINATE m68k_can_eliminate
283 #undef TARGET_CONDITIONAL_REGISTER_USAGE
284 #define TARGET_CONDITIONAL_REGISTER_USAGE m68k_conditional_register_usage
286 #undef TARGET_TRAMPOLINE_INIT
287 #define TARGET_TRAMPOLINE_INIT m68k_trampoline_init
289 #undef TARGET_RETURN_POPS_ARGS
290 #define TARGET_RETURN_POPS_ARGS m68k_return_pops_args
292 #undef TARGET_DELEGITIMIZE_ADDRESS
293 #define TARGET_DELEGITIMIZE_ADDRESS m68k_delegitimize_address
295 #undef TARGET_FUNCTION_ARG
296 #define TARGET_FUNCTION_ARG m68k_function_arg
298 #undef TARGET_FUNCTION_ARG_ADVANCE
299 #define TARGET_FUNCTION_ARG_ADVANCE m68k_function_arg_advance
301 static const struct attribute_spec m68k_attribute_table
[] =
303 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
304 affects_type_identity } */
305 { "interrupt", 0, 0, true, false, false, m68k_handle_fndecl_attribute
,
307 { "interrupt_handler", 0, 0, true, false, false,
308 m68k_handle_fndecl_attribute
, false },
309 { "interrupt_thread", 0, 0, true, false, false,
310 m68k_handle_fndecl_attribute
, false },
311 { NULL
, 0, 0, false, false, false, NULL
, false }
314 struct gcc_target targetm
= TARGET_INITIALIZER
;
316 /* Base flags for 68k ISAs. */
317 #define FL_FOR_isa_00 FL_ISA_68000
318 #define FL_FOR_isa_10 (FL_FOR_isa_00 | FL_ISA_68010)
319 /* FL_68881 controls the default setting of -m68881. gcc has traditionally
320 generated 68881 code for 68020 and 68030 targets unless explicitly told
322 #define FL_FOR_isa_20 (FL_FOR_isa_10 | FL_ISA_68020 \
323 | FL_BITFIELD | FL_68881)
324 #define FL_FOR_isa_40 (FL_FOR_isa_20 | FL_ISA_68040)
325 #define FL_FOR_isa_cpu32 (FL_FOR_isa_10 | FL_ISA_68020)
327 /* Base flags for ColdFire ISAs. */
328 #define FL_FOR_isa_a (FL_COLDFIRE | FL_ISA_A)
329 #define FL_FOR_isa_aplus (FL_FOR_isa_a | FL_ISA_APLUS | FL_CF_USP)
330 /* Note ISA_B doesn't necessarily include USP (user stack pointer) support. */
331 #define FL_FOR_isa_b (FL_FOR_isa_a | FL_ISA_B | FL_CF_HWDIV)
332 /* ISA_C is not upwardly compatible with ISA_B. */
333 #define FL_FOR_isa_c (FL_FOR_isa_a | FL_ISA_C | FL_CF_USP)
337 /* Traditional 68000 instruction sets. */
343 /* ColdFire instruction set variants. */
351 /* Information about one of the -march, -mcpu or -mtune arguments. */
352 struct m68k_target_selection
354 /* The argument being described. */
357 /* For -mcpu, this is the device selected by the option.
358 For -mtune and -march, it is a representative device
359 for the microarchitecture or ISA respectively. */
360 enum target_device device
;
362 /* The M68K_DEVICE fields associated with DEVICE. See the comment
363 in m68k-devices.def for details. FAMILY is only valid for -mcpu. */
365 enum uarch_type microarch
;
370 /* A list of all devices in m68k-devices.def. Used for -mcpu selection. */
371 static const struct m68k_target_selection all_devices
[] =
373 #define M68K_DEVICE(NAME,ENUM_VALUE,FAMILY,MULTILIB,MICROARCH,ISA,FLAGS) \
374 { NAME, ENUM_VALUE, FAMILY, u##MICROARCH, ISA, FLAGS | FL_FOR_##ISA },
375 #include "m68k-devices.def"
377 { NULL
, unk_device
, NULL
, unk_arch
, isa_max
, 0 }
380 /* A list of all ISAs, mapping each one to a representative device.
381 Used for -march selection. */
382 static const struct m68k_target_selection all_isas
[] =
384 { "68000", m68000
, NULL
, u68000
, isa_00
, FL_FOR_isa_00
},
385 { "68010", m68010
, NULL
, u68010
, isa_10
, FL_FOR_isa_10
},
386 { "68020", m68020
, NULL
, u68020
, isa_20
, FL_FOR_isa_20
},
387 { "68030", m68030
, NULL
, u68030
, isa_20
, FL_FOR_isa_20
},
388 { "68040", m68040
, NULL
, u68040
, isa_40
, FL_FOR_isa_40
},
389 { "68060", m68060
, NULL
, u68060
, isa_40
, FL_FOR_isa_40
},
390 { "cpu32", cpu32
, NULL
, ucpu32
, isa_20
, FL_FOR_isa_cpu32
},
391 { "isaa", mcf5206e
, NULL
, ucfv2
, isa_a
, (FL_FOR_isa_a
393 { "isaaplus", mcf5271
, NULL
, ucfv2
, isa_aplus
, (FL_FOR_isa_aplus
395 { "isab", mcf5407
, NULL
, ucfv4
, isa_b
, FL_FOR_isa_b
},
396 { "isac", unk_device
, NULL
, ucfv4
, isa_c
, (FL_FOR_isa_c
398 { NULL
, unk_device
, NULL
, unk_arch
, isa_max
, 0 }
401 /* A list of all microarchitectures, mapping each one to a representative
402 device. Used for -mtune selection. */
403 static const struct m68k_target_selection all_microarchs
[] =
405 { "68000", m68000
, NULL
, u68000
, isa_00
, FL_FOR_isa_00
},
406 { "68010", m68010
, NULL
, u68010
, isa_10
, FL_FOR_isa_10
},
407 { "68020", m68020
, NULL
, u68020
, isa_20
, FL_FOR_isa_20
},
408 { "68020-40", m68020
, NULL
, u68020_40
, isa_20
, FL_FOR_isa_20
},
409 { "68020-60", m68020
, NULL
, u68020_60
, isa_20
, FL_FOR_isa_20
},
410 { "68030", m68030
, NULL
, u68030
, isa_20
, FL_FOR_isa_20
},
411 { "68040", m68040
, NULL
, u68040
, isa_40
, FL_FOR_isa_40
},
412 { "68060", m68060
, NULL
, u68060
, isa_40
, FL_FOR_isa_40
},
413 { "cpu32", cpu32
, NULL
, ucpu32
, isa_20
, FL_FOR_isa_cpu32
},
414 { "cfv1", mcf51qe
, NULL
, ucfv1
, isa_c
, FL_FOR_isa_c
},
415 { "cfv2", mcf5206
, NULL
, ucfv2
, isa_a
, FL_FOR_isa_a
},
416 { "cfv3", mcf5307
, NULL
, ucfv3
, isa_a
, (FL_FOR_isa_a
418 { "cfv4", mcf5407
, NULL
, ucfv4
, isa_b
, FL_FOR_isa_b
},
419 { "cfv4e", mcf547x
, NULL
, ucfv4e
, isa_b
, (FL_FOR_isa_b
423 { NULL
, unk_device
, NULL
, unk_arch
, isa_max
, 0 }
426 /* The entries associated with the -mcpu, -march and -mtune settings,
427 or null for options that have not been used. */
428 const struct m68k_target_selection
*m68k_cpu_entry
;
429 const struct m68k_target_selection
*m68k_arch_entry
;
430 const struct m68k_target_selection
*m68k_tune_entry
;
432 /* Which CPU we are generating code for. */
433 enum target_device m68k_cpu
;
435 /* Which microarchitecture to tune for. */
436 enum uarch_type m68k_tune
;
438 /* Which FPU to use. */
439 enum fpu_type m68k_fpu
;
441 /* The set of FL_* flags that apply to the target processor. */
442 unsigned int m68k_cpu_flags
;
444 /* The set of FL_* flags that apply to the processor to be tuned for. */
445 unsigned int m68k_tune_flags
;
447 /* Asm templates for calling or jumping to an arbitrary symbolic address,
448 or NULL if such calls or jumps are not supported. The address is held
450 const char *m68k_symbolic_call
;
451 const char *m68k_symbolic_jump
;
453 /* Enum variable that corresponds to m68k_symbolic_call values. */
454 enum M68K_SYMBOLIC_CALL m68k_symbolic_call_var
;
457 /* See whether TABLE has an entry with name NAME. Return true and
458 store the entry in *ENTRY if so, otherwise return false and
459 leave *ENTRY alone. */
462 m68k_find_selection (const struct m68k_target_selection
**entry
,
463 const struct m68k_target_selection
*table
,
468 for (i
= 0; table
[i
].name
; i
++)
469 if (strcmp (table
[i
].name
, name
) == 0)
477 /* Implement TARGET_HANDLE_OPTION. */
480 m68k_handle_option (struct gcc_options
*opts
, struct gcc_options
*opts_set
,
481 const struct cl_decoded_option
*decoded
,
482 location_t loc ATTRIBUTE_UNUSED
)
484 size_t code
= decoded
->opt_index
;
485 const char *arg
= decoded
->arg
;
486 int value
= decoded
->value
;
488 gcc_assert (opts
== &global_options
);
489 gcc_assert (opts_set
== &global_options_set
);
494 return m68k_find_selection (&m68k_arch_entry
, all_isas
, arg
);
497 return m68k_find_selection (&m68k_cpu_entry
, all_devices
, arg
);
500 return m68k_find_selection (&m68k_tune_entry
, all_microarchs
, arg
);
503 return m68k_find_selection (&m68k_cpu_entry
, all_devices
, "5206");
506 return m68k_find_selection (&m68k_cpu_entry
, all_devices
, "5206e");
509 return m68k_find_selection (&m68k_cpu_entry
, all_devices
, "528x");
512 return m68k_find_selection (&m68k_cpu_entry
, all_devices
, "5307");
515 return m68k_find_selection (&m68k_cpu_entry
, all_devices
, "5407");
518 return m68k_find_selection (&m68k_cpu_entry
, all_devices
, "547x");
522 return m68k_find_selection (&m68k_cpu_entry
, all_devices
, "68000");
525 return m68k_find_selection (&m68k_cpu_entry
, all_devices
, "68010");
529 return m68k_find_selection (&m68k_cpu_entry
, all_devices
, "68020");
532 return (m68k_find_selection (&m68k_tune_entry
, all_microarchs
,
534 && m68k_find_selection (&m68k_cpu_entry
, all_devices
, "68020"));
537 return (m68k_find_selection (&m68k_tune_entry
, all_microarchs
,
539 && m68k_find_selection (&m68k_cpu_entry
, all_devices
, "68020"));
542 return m68k_find_selection (&m68k_cpu_entry
, all_devices
, "68030");
545 return m68k_find_selection (&m68k_cpu_entry
, all_devices
, "68040");
548 return m68k_find_selection (&m68k_cpu_entry
, all_devices
, "68060");
551 return m68k_find_selection (&m68k_cpu_entry
, all_devices
, "68302");
555 return m68k_find_selection (&m68k_cpu_entry
, all_devices
, "68332");
557 case OPT_mshared_library_id_
:
558 if (value
> MAX_LIBRARY_ID
)
559 error ("-mshared-library-id=%s is not between 0 and %d",
560 arg
, MAX_LIBRARY_ID
);
564 asprintf (&tmp
, "%d", (value
* -4) - 4);
565 m68k_library_id_string
= tmp
;
574 /* Implement TARGET_OPTION_OVERRIDE. */
577 m68k_option_override (void)
579 const struct m68k_target_selection
*entry
;
580 unsigned long target_mask
;
588 -march=ARCH should generate code that runs any processor
589 implementing architecture ARCH. -mcpu=CPU should override -march
590 and should generate code that runs on processor CPU, making free
591 use of any instructions that CPU understands. -mtune=UARCH applies
592 on top of -mcpu or -march and optimizes the code for UARCH. It does
593 not change the target architecture. */
596 /* Complain if the -march setting is for a different microarchitecture,
597 or includes flags that the -mcpu setting doesn't. */
599 && (m68k_arch_entry
->microarch
!= m68k_cpu_entry
->microarch
600 || (m68k_arch_entry
->flags
& ~m68k_cpu_entry
->flags
) != 0))
601 warning (0, "-mcpu=%s conflicts with -march=%s",
602 m68k_cpu_entry
->name
, m68k_arch_entry
->name
);
604 entry
= m68k_cpu_entry
;
607 entry
= m68k_arch_entry
;
610 entry
= all_devices
+ TARGET_CPU_DEFAULT
;
612 m68k_cpu_flags
= entry
->flags
;
614 /* Use the architecture setting to derive default values for
618 /* ColdFire is lenient about alignment. */
619 if (!TARGET_COLDFIRE
)
620 target_mask
|= MASK_STRICT_ALIGNMENT
;
622 if ((m68k_cpu_flags
& FL_BITFIELD
) != 0)
623 target_mask
|= MASK_BITFIELD
;
624 if ((m68k_cpu_flags
& FL_CF_HWDIV
) != 0)
625 target_mask
|= MASK_CF_HWDIV
;
626 if ((m68k_cpu_flags
& (FL_68881
| FL_CF_FPU
)) != 0)
627 target_mask
|= MASK_HARD_FLOAT
;
628 target_flags
|= target_mask
& ~target_flags_explicit
;
630 /* Set the directly-usable versions of the -mcpu and -mtune settings. */
631 m68k_cpu
= entry
->device
;
634 m68k_tune
= m68k_tune_entry
->microarch
;
635 m68k_tune_flags
= m68k_tune_entry
->flags
;
637 #ifdef M68K_DEFAULT_TUNE
638 else if (!m68k_cpu_entry
&& !m68k_arch_entry
)
640 enum target_device dev
;
641 dev
= all_microarchs
[M68K_DEFAULT_TUNE
].device
;
642 m68k_tune_flags
= all_devices
[dev
]->flags
;
647 m68k_tune
= entry
->microarch
;
648 m68k_tune_flags
= entry
->flags
;
651 /* Set the type of FPU. */
652 m68k_fpu
= (!TARGET_HARD_FLOAT
? FPUTYPE_NONE
653 : (m68k_cpu_flags
& FL_COLDFIRE
) != 0 ? FPUTYPE_COLDFIRE
656 /* Sanity check to ensure that msep-data and mid-sahred-library are not
657 * both specified together. Doing so simply doesn't make sense.
659 if (TARGET_SEP_DATA
&& TARGET_ID_SHARED_LIBRARY
)
660 error ("cannot specify both -msep-data and -mid-shared-library");
662 /* If we're generating code for a separate A5 relative data segment,
663 * we've got to enable -fPIC as well. This might be relaxable to
664 * -fpic but it hasn't been tested properly.
666 if (TARGET_SEP_DATA
|| TARGET_ID_SHARED_LIBRARY
)
669 /* -mpcrel -fPIC uses 32-bit pc-relative displacements. Raise an
670 error if the target does not support them. */
671 if (TARGET_PCREL
&& !TARGET_68020
&& flag_pic
== 2)
672 error ("-mpcrel -fPIC is not currently supported on selected cpu");
674 /* ??? A historic way of turning on pic, or is this intended to
675 be an embedded thing that doesn't have the same name binding
676 significance that it does on hosted ELF systems? */
677 if (TARGET_PCREL
&& flag_pic
== 0)
682 m68k_symbolic_call_var
= M68K_SYMBOLIC_CALL_JSR
;
684 m68k_symbolic_jump
= "jra %a0";
686 else if (TARGET_ID_SHARED_LIBRARY
)
687 /* All addresses must be loaded from the GOT. */
689 else if (TARGET_68020
|| TARGET_ISAB
|| TARGET_ISAC
)
692 m68k_symbolic_call_var
= M68K_SYMBOLIC_CALL_BSR_C
;
694 m68k_symbolic_call_var
= M68K_SYMBOLIC_CALL_BSR_P
;
697 /* No unconditional long branch */;
698 else if (TARGET_PCREL
)
699 m68k_symbolic_jump
= "bra%.l %c0";
701 m68k_symbolic_jump
= "bra%.l %p0";
702 /* Turn off function cse if we are doing PIC. We always want
703 function call to be done as `bsr foo@PLTPC'. */
704 /* ??? It's traditional to do this for -mpcrel too, but it isn't
705 clear how intentional that is. */
706 flag_no_function_cse
= 1;
709 switch (m68k_symbolic_call_var
)
711 case M68K_SYMBOLIC_CALL_JSR
:
712 m68k_symbolic_call
= "jsr %a0";
715 case M68K_SYMBOLIC_CALL_BSR_C
:
716 m68k_symbolic_call
= "bsr%.l %c0";
719 case M68K_SYMBOLIC_CALL_BSR_P
:
720 m68k_symbolic_call
= "bsr%.l %p0";
723 case M68K_SYMBOLIC_CALL_NONE
:
724 gcc_assert (m68k_symbolic_call
== NULL
);
731 #ifndef ASM_OUTPUT_ALIGN_WITH_NOP
732 if (align_labels
> 2)
734 warning (0, "-falign-labels=%d is not supported", align_labels
);
739 warning (0, "-falign-loops=%d is not supported", align_loops
);
744 SUBTARGET_OVERRIDE_OPTIONS
;
746 /* Setup scheduling options. */
748 m68k_sched_cpu
= CPU_CFV1
;
750 m68k_sched_cpu
= CPU_CFV2
;
752 m68k_sched_cpu
= CPU_CFV3
;
754 m68k_sched_cpu
= CPU_CFV4
;
757 m68k_sched_cpu
= CPU_UNKNOWN
;
758 flag_schedule_insns
= 0;
759 flag_schedule_insns_after_reload
= 0;
760 flag_modulo_sched
= 0;
763 if (m68k_sched_cpu
!= CPU_UNKNOWN
)
765 if ((m68k_cpu_flags
& (FL_CF_EMAC
| FL_CF_EMAC_B
)) != 0)
766 m68k_sched_mac
= MAC_CF_EMAC
;
767 else if ((m68k_cpu_flags
& FL_CF_MAC
) != 0)
768 m68k_sched_mac
= MAC_CF_MAC
;
770 m68k_sched_mac
= MAC_NO
;
774 /* Generate a macro of the form __mPREFIX_cpu_NAME, where PREFIX is the
775 given argument and NAME is the argument passed to -mcpu. Return NULL
776 if -mcpu was not passed. */
779 m68k_cpp_cpu_ident (const char *prefix
)
783 return concat ("__m", prefix
, "_cpu_", m68k_cpu_entry
->name
, NULL
);
786 /* Generate a macro of the form __mPREFIX_family_NAME, where PREFIX is the
787 given argument and NAME is the name of the representative device for
788 the -mcpu argument's family. Return NULL if -mcpu was not passed. */
791 m68k_cpp_cpu_family (const char *prefix
)
795 return concat ("__m", prefix
, "_family_", m68k_cpu_entry
->family
, NULL
);
798 /* Return m68k_fk_interrupt_handler if FUNC has an "interrupt" or
799 "interrupt_handler" attribute and interrupt_thread if FUNC has an
800 "interrupt_thread" attribute. Otherwise, return
801 m68k_fk_normal_function. */
803 enum m68k_function_kind
804 m68k_get_function_kind (tree func
)
808 gcc_assert (TREE_CODE (func
) == FUNCTION_DECL
);
810 a
= lookup_attribute ("interrupt", DECL_ATTRIBUTES (func
));
812 return m68k_fk_interrupt_handler
;
814 a
= lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (func
));
816 return m68k_fk_interrupt_handler
;
818 a
= lookup_attribute ("interrupt_thread", DECL_ATTRIBUTES (func
));
820 return m68k_fk_interrupt_thread
;
822 return m68k_fk_normal_function
;
825 /* Handle an attribute requiring a FUNCTION_DECL; arguments as in
826 struct attribute_spec.handler. */
828 m68k_handle_fndecl_attribute (tree
*node
, tree name
,
829 tree args ATTRIBUTE_UNUSED
,
830 int flags ATTRIBUTE_UNUSED
,
833 if (TREE_CODE (*node
) != FUNCTION_DECL
)
835 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
837 *no_add_attrs
= true;
840 if (m68k_get_function_kind (*node
) != m68k_fk_normal_function
)
842 error ("multiple interrupt attributes not allowed");
843 *no_add_attrs
= true;
847 && !strcmp (IDENTIFIER_POINTER (name
), "interrupt_thread"))
849 error ("interrupt_thread is available only on fido");
850 *no_add_attrs
= true;
857 m68k_compute_frame_layout (void)
861 enum m68k_function_kind func_kind
=
862 m68k_get_function_kind (current_function_decl
);
863 bool interrupt_handler
= func_kind
== m68k_fk_interrupt_handler
;
864 bool interrupt_thread
= func_kind
== m68k_fk_interrupt_thread
;
866 /* Only compute the frame once per function.
867 Don't cache information until reload has been completed. */
868 if (current_frame
.funcdef_no
== current_function_funcdef_no
872 current_frame
.size
= (get_frame_size () + 3) & -4;
876 /* Interrupt thread does not need to save any register. */
877 if (!interrupt_thread
)
878 for (regno
= 0; regno
< 16; regno
++)
879 if (m68k_save_reg (regno
, interrupt_handler
))
881 mask
|= 1 << (regno
- D0_REG
);
884 current_frame
.offset
= saved
* 4;
885 current_frame
.reg_no
= saved
;
886 current_frame
.reg_mask
= mask
;
888 current_frame
.foffset
= 0;
890 if (TARGET_HARD_FLOAT
)
892 /* Interrupt thread does not need to save any register. */
893 if (!interrupt_thread
)
894 for (regno
= 16; regno
< 24; regno
++)
895 if (m68k_save_reg (regno
, interrupt_handler
))
897 mask
|= 1 << (regno
- FP0_REG
);
900 current_frame
.foffset
= saved
* TARGET_FP_REG_SIZE
;
901 current_frame
.offset
+= current_frame
.foffset
;
903 current_frame
.fpu_no
= saved
;
904 current_frame
.fpu_mask
= mask
;
906 /* Remember what function this frame refers to. */
907 current_frame
.funcdef_no
= current_function_funcdef_no
;
910 /* Worker function for TARGET_CAN_ELIMINATE. */
913 m68k_can_eliminate (const int from ATTRIBUTE_UNUSED
, const int to
)
915 return (to
== STACK_POINTER_REGNUM
? ! frame_pointer_needed
: true);
919 m68k_initial_elimination_offset (int from
, int to
)
922 /* The arg pointer points 8 bytes before the start of the arguments,
923 as defined by FIRST_PARM_OFFSET. This makes it coincident with the
924 frame pointer in most frames. */
925 argptr_offset
= frame_pointer_needed
? 0 : UNITS_PER_WORD
;
926 if (from
== ARG_POINTER_REGNUM
&& to
== FRAME_POINTER_REGNUM
)
927 return argptr_offset
;
929 m68k_compute_frame_layout ();
931 gcc_assert (to
== STACK_POINTER_REGNUM
);
934 case ARG_POINTER_REGNUM
:
935 return current_frame
.offset
+ current_frame
.size
- argptr_offset
;
936 case FRAME_POINTER_REGNUM
:
937 return current_frame
.offset
+ current_frame
.size
;
943 /* Refer to the array `regs_ever_live' to determine which registers
944 to save; `regs_ever_live[I]' is nonzero if register number I
945 is ever used in the function. This function is responsible for
946 knowing which registers should not be saved even if used.
947 Return true if we need to save REGNO. */
950 m68k_save_reg (unsigned int regno
, bool interrupt_handler
)
952 if (flag_pic
&& regno
== PIC_REG
)
954 if (crtl
->saves_all_registers
)
956 if (crtl
->uses_pic_offset_table
)
958 /* Reload may introduce constant pool references into a function
959 that thitherto didn't need a PIC register. Note that the test
960 above will not catch that case because we will only set
961 crtl->uses_pic_offset_table when emitting
962 the address reloads. */
963 if (crtl
->uses_const_pool
)
967 if (crtl
->calls_eh_return
)
972 unsigned int test
= EH_RETURN_DATA_REGNO (i
);
973 if (test
== INVALID_REGNUM
)
980 /* Fixed regs we never touch. */
981 if (fixed_regs
[regno
])
984 /* The frame pointer (if it is such) is handled specially. */
985 if (regno
== FRAME_POINTER_REGNUM
&& frame_pointer_needed
)
988 /* Interrupt handlers must also save call_used_regs
989 if they are live or when calling nested functions. */
990 if (interrupt_handler
)
992 if (df_regs_ever_live_p (regno
))
995 if (!current_function_is_leaf
&& call_used_regs
[regno
])
999 /* Never need to save registers that aren't touched. */
1000 if (!df_regs_ever_live_p (regno
))
1003 /* Otherwise save everything that isn't call-clobbered. */
1004 return !call_used_regs
[regno
];
1007 /* Emit RTL for a MOVEM or FMOVEM instruction. BASE + OFFSET represents
1008 the lowest memory address. COUNT is the number of registers to be
1009 moved, with register REGNO + I being moved if bit I of MASK is set.
1010 STORE_P specifies the direction of the move and ADJUST_STACK_P says
1011 whether or not this is pre-decrement (if STORE_P) or post-increment
1012 (if !STORE_P) operation. */
1015 m68k_emit_movem (rtx base
, HOST_WIDE_INT offset
,
1016 unsigned int count
, unsigned int regno
,
1017 unsigned int mask
, bool store_p
, bool adjust_stack_p
)
1020 rtx body
, addr
, src
, operands
[2];
1021 enum machine_mode mode
;
1023 body
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (adjust_stack_p
+ count
));
1024 mode
= reg_raw_mode
[regno
];
1029 src
= plus_constant (base
, (count
1030 * GET_MODE_SIZE (mode
)
1031 * (HOST_WIDE_INT
) (store_p
? -1 : 1)));
1032 XVECEXP (body
, 0, i
++) = gen_rtx_SET (VOIDmode
, base
, src
);
1035 for (; mask
!= 0; mask
>>= 1, regno
++)
1038 addr
= plus_constant (base
, offset
);
1039 operands
[!store_p
] = gen_frame_mem (mode
, addr
);
1040 operands
[store_p
] = gen_rtx_REG (mode
, regno
);
1041 XVECEXP (body
, 0, i
++)
1042 = gen_rtx_SET (VOIDmode
, operands
[0], operands
[1]);
1043 offset
+= GET_MODE_SIZE (mode
);
1045 gcc_assert (i
== XVECLEN (body
, 0));
1047 return emit_insn (body
);
1050 /* Make INSN a frame-related instruction. */
1053 m68k_set_frame_related (rtx insn
)
1058 RTX_FRAME_RELATED_P (insn
) = 1;
1059 body
= PATTERN (insn
);
1060 if (GET_CODE (body
) == PARALLEL
)
1061 for (i
= 0; i
< XVECLEN (body
, 0); i
++)
1062 RTX_FRAME_RELATED_P (XVECEXP (body
, 0, i
)) = 1;
1065 /* Emit RTL for the "prologue" define_expand. */
1068 m68k_expand_prologue (void)
1070 HOST_WIDE_INT fsize_with_regs
;
1071 rtx limit
, src
, dest
;
1073 m68k_compute_frame_layout ();
1075 /* If the stack limit is a symbol, we can check it here,
1076 before actually allocating the space. */
1077 if (crtl
->limit_stack
1078 && GET_CODE (stack_limit_rtx
) == SYMBOL_REF
)
1080 limit
= plus_constant (stack_limit_rtx
, current_frame
.size
+ 4);
1081 if (!LEGITIMATE_CONSTANT_P (limit
))
1083 emit_move_insn (gen_rtx_REG (Pmode
, D0_REG
), limit
);
1084 limit
= gen_rtx_REG (Pmode
, D0_REG
);
1086 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode
,
1087 stack_pointer_rtx
, limit
),
1088 stack_pointer_rtx
, limit
,
1092 fsize_with_regs
= current_frame
.size
;
1093 if (TARGET_COLDFIRE
)
1095 /* ColdFire's move multiple instructions do not allow pre-decrement
1096 addressing. Add the size of movem saves to the initial stack
1097 allocation instead. */
1098 if (current_frame
.reg_no
>= MIN_MOVEM_REGS
)
1099 fsize_with_regs
+= current_frame
.reg_no
* GET_MODE_SIZE (SImode
);
1100 if (current_frame
.fpu_no
>= MIN_FMOVEM_REGS
)
1101 fsize_with_regs
+= current_frame
.fpu_no
* GET_MODE_SIZE (DFmode
);
1104 if (frame_pointer_needed
)
1106 if (fsize_with_regs
== 0 && TUNE_68040
)
1108 /* On the 68040, two separate moves are faster than link.w 0. */
1109 dest
= gen_frame_mem (Pmode
,
1110 gen_rtx_PRE_DEC (Pmode
, stack_pointer_rtx
));
1111 m68k_set_frame_related (emit_move_insn (dest
, frame_pointer_rtx
));
1112 m68k_set_frame_related (emit_move_insn (frame_pointer_rtx
,
1113 stack_pointer_rtx
));
1115 else if (fsize_with_regs
< 0x8000 || TARGET_68020
)
1116 m68k_set_frame_related
1117 (emit_insn (gen_link (frame_pointer_rtx
,
1118 GEN_INT (-4 - fsize_with_regs
))));
1121 m68k_set_frame_related
1122 (emit_insn (gen_link (frame_pointer_rtx
, GEN_INT (-4))));
1123 m68k_set_frame_related
1124 (emit_insn (gen_addsi3 (stack_pointer_rtx
,
1126 GEN_INT (-fsize_with_regs
))));
1129 /* If the frame pointer is needed, emit a special barrier that
1130 will prevent the scheduler from moving stores to the frame
1131 before the stack adjustment. */
1132 emit_insn (gen_stack_tie (stack_pointer_rtx
, frame_pointer_rtx
));
1134 else if (fsize_with_regs
!= 0)
1135 m68k_set_frame_related
1136 (emit_insn (gen_addsi3 (stack_pointer_rtx
,
1138 GEN_INT (-fsize_with_regs
))));
1140 if (current_frame
.fpu_mask
)
1142 gcc_assert (current_frame
.fpu_no
>= MIN_FMOVEM_REGS
);
1144 m68k_set_frame_related
1145 (m68k_emit_movem (stack_pointer_rtx
,
1146 current_frame
.fpu_no
* -GET_MODE_SIZE (XFmode
),
1147 current_frame
.fpu_no
, FP0_REG
,
1148 current_frame
.fpu_mask
, true, true));
1153 /* If we're using moveml to save the integer registers,
1154 the stack pointer will point to the bottom of the moveml
1155 save area. Find the stack offset of the first FP register. */
1156 if (current_frame
.reg_no
< MIN_MOVEM_REGS
)
1159 offset
= current_frame
.reg_no
* GET_MODE_SIZE (SImode
);
1160 m68k_set_frame_related
1161 (m68k_emit_movem (stack_pointer_rtx
, offset
,
1162 current_frame
.fpu_no
, FP0_REG
,
1163 current_frame
.fpu_mask
, true, false));
1167 /* If the stack limit is not a symbol, check it here.
1168 This has the disadvantage that it may be too late... */
1169 if (crtl
->limit_stack
)
1171 if (REG_P (stack_limit_rtx
))
1172 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode
, stack_pointer_rtx
,
1174 stack_pointer_rtx
, stack_limit_rtx
,
1177 else if (GET_CODE (stack_limit_rtx
) != SYMBOL_REF
)
1178 warning (0, "stack limit expression is not supported");
1181 if (current_frame
.reg_no
< MIN_MOVEM_REGS
)
1183 /* Store each register separately in the same order moveml does. */
1186 for (i
= 16; i
-- > 0; )
1187 if (current_frame
.reg_mask
& (1 << i
))
1189 src
= gen_rtx_REG (SImode
, D0_REG
+ i
);
1190 dest
= gen_frame_mem (SImode
,
1191 gen_rtx_PRE_DEC (Pmode
, stack_pointer_rtx
));
1192 m68k_set_frame_related (emit_insn (gen_movsi (dest
, src
)));
1197 if (TARGET_COLDFIRE
)
1198 /* The required register save space has already been allocated.
1199 The first register should be stored at (%sp). */
1200 m68k_set_frame_related
1201 (m68k_emit_movem (stack_pointer_rtx
, 0,
1202 current_frame
.reg_no
, D0_REG
,
1203 current_frame
.reg_mask
, true, false));
1205 m68k_set_frame_related
1206 (m68k_emit_movem (stack_pointer_rtx
,
1207 current_frame
.reg_no
* -GET_MODE_SIZE (SImode
),
1208 current_frame
.reg_no
, D0_REG
,
1209 current_frame
.reg_mask
, true, true));
1212 if (!TARGET_SEP_DATA
1213 && crtl
->uses_pic_offset_table
)
1214 emit_insn (gen_load_got (pic_offset_table_rtx
));
1217 /* Return true if a simple (return) instruction is sufficient for this
1218 instruction (i.e. if no epilogue is needed). */
1221 m68k_use_return_insn (void)
1223 if (!reload_completed
|| frame_pointer_needed
|| get_frame_size () != 0)
1226 m68k_compute_frame_layout ();
1227 return current_frame
.offset
== 0;
1230 /* Emit RTL for the "epilogue" or "sibcall_epilogue" define_expand;
1231 SIBCALL_P says which.
1233 The function epilogue should not depend on the current stack pointer!
1234 It should use the frame pointer only, if there is a frame pointer.
1235 This is mandatory because of alloca; we also take advantage of it to
1236 omit stack adjustments before returning. */
1239 m68k_expand_epilogue (bool sibcall_p
)
1241 HOST_WIDE_INT fsize
, fsize_with_regs
;
1242 bool big
, restore_from_sp
;
1244 m68k_compute_frame_layout ();
1246 fsize
= current_frame
.size
;
1248 restore_from_sp
= false;
1250 /* FIXME : current_function_is_leaf below is too strong.
1251 What we really need to know there is if there could be pending
1252 stack adjustment needed at that point. */
1253 restore_from_sp
= (!frame_pointer_needed
1254 || (!cfun
->calls_alloca
1255 && current_function_is_leaf
));
1257 /* fsize_with_regs is the size we need to adjust the sp when
1258 popping the frame. */
1259 fsize_with_regs
= fsize
;
1260 if (TARGET_COLDFIRE
&& restore_from_sp
)
1262 /* ColdFire's move multiple instructions do not allow post-increment
1263 addressing. Add the size of movem loads to the final deallocation
1265 if (current_frame
.reg_no
>= MIN_MOVEM_REGS
)
1266 fsize_with_regs
+= current_frame
.reg_no
* GET_MODE_SIZE (SImode
);
1267 if (current_frame
.fpu_no
>= MIN_FMOVEM_REGS
)
1268 fsize_with_regs
+= current_frame
.fpu_no
* GET_MODE_SIZE (DFmode
);
1271 if (current_frame
.offset
+ fsize
>= 0x8000
1273 && (current_frame
.reg_mask
|| current_frame
.fpu_mask
))
1276 && (current_frame
.reg_no
>= MIN_MOVEM_REGS
1277 || current_frame
.fpu_no
>= MIN_FMOVEM_REGS
))
1279 /* ColdFire's move multiple instructions do not support the
1280 (d8,Ax,Xi) addressing mode, so we're as well using a normal
1281 stack-based restore. */
1282 emit_move_insn (gen_rtx_REG (Pmode
, A1_REG
),
1283 GEN_INT (-(current_frame
.offset
+ fsize
)));
1284 emit_insn (gen_addsi3 (stack_pointer_rtx
,
1285 gen_rtx_REG (Pmode
, A1_REG
),
1286 frame_pointer_rtx
));
1287 restore_from_sp
= true;
1291 emit_move_insn (gen_rtx_REG (Pmode
, A1_REG
), GEN_INT (-fsize
));
1297 if (current_frame
.reg_no
< MIN_MOVEM_REGS
)
1299 /* Restore each register separately in the same order moveml does. */
1301 HOST_WIDE_INT offset
;
1303 offset
= current_frame
.offset
+ fsize
;
1304 for (i
= 0; i
< 16; i
++)
1305 if (current_frame
.reg_mask
& (1 << i
))
1311 /* Generate the address -OFFSET(%fp,%a1.l). */
1312 addr
= gen_rtx_REG (Pmode
, A1_REG
);
1313 addr
= gen_rtx_PLUS (Pmode
, addr
, frame_pointer_rtx
);
1314 addr
= plus_constant (addr
, -offset
);
1316 else if (restore_from_sp
)
1317 addr
= gen_rtx_POST_INC (Pmode
, stack_pointer_rtx
);
1319 addr
= plus_constant (frame_pointer_rtx
, -offset
);
1320 emit_move_insn (gen_rtx_REG (SImode
, D0_REG
+ i
),
1321 gen_frame_mem (SImode
, addr
));
1322 offset
-= GET_MODE_SIZE (SImode
);
1325 else if (current_frame
.reg_mask
)
1328 m68k_emit_movem (gen_rtx_PLUS (Pmode
,
1329 gen_rtx_REG (Pmode
, A1_REG
),
1331 -(current_frame
.offset
+ fsize
),
1332 current_frame
.reg_no
, D0_REG
,
1333 current_frame
.reg_mask
, false, false);
1334 else if (restore_from_sp
)
1335 m68k_emit_movem (stack_pointer_rtx
, 0,
1336 current_frame
.reg_no
, D0_REG
,
1337 current_frame
.reg_mask
, false,
1340 m68k_emit_movem (frame_pointer_rtx
,
1341 -(current_frame
.offset
+ fsize
),
1342 current_frame
.reg_no
, D0_REG
,
1343 current_frame
.reg_mask
, false, false);
1346 if (current_frame
.fpu_no
> 0)
1349 m68k_emit_movem (gen_rtx_PLUS (Pmode
,
1350 gen_rtx_REG (Pmode
, A1_REG
),
1352 -(current_frame
.foffset
+ fsize
),
1353 current_frame
.fpu_no
, FP0_REG
,
1354 current_frame
.fpu_mask
, false, false);
1355 else if (restore_from_sp
)
1357 if (TARGET_COLDFIRE
)
1361 /* If we used moveml to restore the integer registers, the
1362 stack pointer will still point to the bottom of the moveml
1363 save area. Find the stack offset of the first FP
1365 if (current_frame
.reg_no
< MIN_MOVEM_REGS
)
1368 offset
= current_frame
.reg_no
* GET_MODE_SIZE (SImode
);
1369 m68k_emit_movem (stack_pointer_rtx
, offset
,
1370 current_frame
.fpu_no
, FP0_REG
,
1371 current_frame
.fpu_mask
, false, false);
1374 m68k_emit_movem (stack_pointer_rtx
, 0,
1375 current_frame
.fpu_no
, FP0_REG
,
1376 current_frame
.fpu_mask
, false, true);
1379 m68k_emit_movem (frame_pointer_rtx
,
1380 -(current_frame
.foffset
+ fsize
),
1381 current_frame
.fpu_no
, FP0_REG
,
1382 current_frame
.fpu_mask
, false, false);
1385 if (frame_pointer_needed
)
1386 emit_insn (gen_unlink (frame_pointer_rtx
));
1387 else if (fsize_with_regs
)
1388 emit_insn (gen_addsi3 (stack_pointer_rtx
,
1390 GEN_INT (fsize_with_regs
)));
1392 if (crtl
->calls_eh_return
)
1393 emit_insn (gen_addsi3 (stack_pointer_rtx
,
1395 EH_RETURN_STACKADJ_RTX
));
1398 emit_jump_insn (gen_rtx_RETURN (VOIDmode
));
1401 /* Return true if X is a valid comparison operator for the dbcc
1404 Note it rejects floating point comparison operators.
1405 (In the future we could use Fdbcc).
1407 It also rejects some comparisons when CC_NO_OVERFLOW is set. */
1410 valid_dbcc_comparison_p_2 (rtx x
, enum machine_mode mode ATTRIBUTE_UNUSED
)
1412 switch (GET_CODE (x
))
1414 case EQ
: case NE
: case GTU
: case LTU
:
1418 /* Reject some when CC_NO_OVERFLOW is set. This may be over
1420 case GT
: case LT
: case GE
: case LE
:
1421 return ! (cc_prev_status
.flags
& CC_NO_OVERFLOW
);
1427 /* Return nonzero if flags are currently in the 68881 flag register. */
1429 flags_in_68881 (void)
1431 /* We could add support for these in the future */
1432 return cc_status
.flags
& CC_IN_68881
;
1435 /* Return true if PARALLEL contains register REGNO. */
1437 m68k_reg_present_p (const_rtx parallel
, unsigned int regno
)
1441 if (REG_P (parallel
) && REGNO (parallel
) == regno
)
1444 if (GET_CODE (parallel
) != PARALLEL
)
1447 for (i
= 0; i
< XVECLEN (parallel
, 0); ++i
)
1451 x
= XEXP (XVECEXP (parallel
, 0, i
), 0);
1452 if (REG_P (x
) && REGNO (x
) == regno
)
1459 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL_P. */
1462 m68k_ok_for_sibcall_p (tree decl
, tree exp
)
1464 enum m68k_function_kind kind
;
1466 /* We cannot use sibcalls for nested functions because we use the
1467 static chain register for indirect calls. */
1468 if (CALL_EXPR_STATIC_CHAIN (exp
))
1471 if (!VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun
->decl
))))
1473 /* Check that the return value locations are the same. For
1474 example that we aren't returning a value from the sibling in
1475 a D0 register but then need to transfer it to a A0 register. */
1479 cfun_value
= FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (cfun
->decl
)),
1481 call_value
= FUNCTION_VALUE (TREE_TYPE (exp
), decl
);
1483 /* Check that the values are equal or that the result the callee
1484 function returns is superset of what the current function returns. */
1485 if (!(rtx_equal_p (cfun_value
, call_value
)
1486 || (REG_P (cfun_value
)
1487 && m68k_reg_present_p (call_value
, REGNO (cfun_value
)))))
1491 kind
= m68k_get_function_kind (current_function_decl
);
1492 if (kind
== m68k_fk_normal_function
)
1493 /* We can always sibcall from a normal function, because it's
1494 undefined if it is calling an interrupt function. */
1497 /* Otherwise we can only sibcall if the function kind is known to be
1499 if (decl
&& m68k_get_function_kind (decl
) == kind
)
1505 /* On the m68k all args are always pushed. */
1508 m68k_function_arg (CUMULATIVE_ARGS
*cum ATTRIBUTE_UNUSED
,
1509 enum machine_mode mode ATTRIBUTE_UNUSED
,
1510 const_tree type ATTRIBUTE_UNUSED
,
1511 bool named ATTRIBUTE_UNUSED
)
1517 m68k_function_arg_advance (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
1518 const_tree type
, bool named ATTRIBUTE_UNUSED
)
1520 *cum
+= (mode
!= BLKmode
1521 ? (GET_MODE_SIZE (mode
) + 3) & ~3
1522 : (int_size_in_bytes (type
) + 3) & ~3);
1525 /* Convert X to a legitimate function call memory reference and return the
1529 m68k_legitimize_call_address (rtx x
)
1531 gcc_assert (MEM_P (x
));
1532 if (call_operand (XEXP (x
, 0), VOIDmode
))
1534 return replace_equiv_address (x
, force_reg (Pmode
, XEXP (x
, 0)));
1537 /* Likewise for sibling calls. */
1540 m68k_legitimize_sibcall_address (rtx x
)
1542 gcc_assert (MEM_P (x
));
1543 if (sibcall_operand (XEXP (x
, 0), VOIDmode
))
1546 emit_move_insn (gen_rtx_REG (Pmode
, STATIC_CHAIN_REGNUM
), XEXP (x
, 0));
1547 return replace_equiv_address (x
, gen_rtx_REG (Pmode
, STATIC_CHAIN_REGNUM
));
1550 /* Convert X to a legitimate address and return it if successful. Otherwise
1553 For the 68000, we handle X+REG by loading X into a register R and
1554 using R+REG. R will go in an address reg and indexing will be used.
1555 However, if REG is a broken-out memory address or multiplication,
1556 nothing needs to be done because REG can certainly go in an address reg. */
1559 m68k_legitimize_address (rtx x
, rtx oldx
, enum machine_mode mode
)
1561 if (m68k_tls_symbol_p (x
))
1562 return m68k_legitimize_tls_address (x
);
1564 if (GET_CODE (x
) == PLUS
)
1566 int ch
= (x
) != (oldx
);
1569 #define COPY_ONCE(Y) if (!copied) { Y = copy_rtx (Y); copied = ch = 1; }
1571 if (GET_CODE (XEXP (x
, 0)) == MULT
)
1574 XEXP (x
, 0) = force_operand (XEXP (x
, 0), 0);
1576 if (GET_CODE (XEXP (x
, 1)) == MULT
)
1579 XEXP (x
, 1) = force_operand (XEXP (x
, 1), 0);
1583 if (GET_CODE (XEXP (x
, 1)) == REG
1584 && GET_CODE (XEXP (x
, 0)) == REG
)
1586 if (TARGET_COLDFIRE_FPU
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1589 x
= force_operand (x
, 0);
1593 if (memory_address_p (mode
, x
))
1596 if (GET_CODE (XEXP (x
, 0)) == REG
1597 || (GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
1598 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
1599 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == HImode
))
1601 rtx temp
= gen_reg_rtx (Pmode
);
1602 rtx val
= force_operand (XEXP (x
, 1), 0);
1603 emit_move_insn (temp
, val
);
1606 if (TARGET_COLDFIRE_FPU
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
1607 && GET_CODE (XEXP (x
, 0)) == REG
)
1608 x
= force_operand (x
, 0);
1610 else if (GET_CODE (XEXP (x
, 1)) == REG
1611 || (GET_CODE (XEXP (x
, 1)) == SIGN_EXTEND
1612 && GET_CODE (XEXP (XEXP (x
, 1), 0)) == REG
1613 && GET_MODE (XEXP (XEXP (x
, 1), 0)) == HImode
))
1615 rtx temp
= gen_reg_rtx (Pmode
);
1616 rtx val
= force_operand (XEXP (x
, 0), 0);
1617 emit_move_insn (temp
, val
);
1620 if (TARGET_COLDFIRE_FPU
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
1621 && GET_CODE (XEXP (x
, 1)) == REG
)
1622 x
= force_operand (x
, 0);
1630 /* Output a dbCC; jCC sequence. Note we do not handle the
1631 floating point version of this sequence (Fdbcc). We also
1632 do not handle alternative conditions when CC_NO_OVERFLOW is
1633 set. It is assumed that valid_dbcc_comparison_p and flags_in_68881 will
1634 kick those out before we get here. */
1637 output_dbcc_and_branch (rtx
*operands
)
1639 switch (GET_CODE (operands
[3]))
1642 output_asm_insn ("dbeq %0,%l1\n\tjeq %l2", operands
);
1646 output_asm_insn ("dbne %0,%l1\n\tjne %l2", operands
);
1650 output_asm_insn ("dbgt %0,%l1\n\tjgt %l2", operands
);
1654 output_asm_insn ("dbhi %0,%l1\n\tjhi %l2", operands
);
1658 output_asm_insn ("dblt %0,%l1\n\tjlt %l2", operands
);
1662 output_asm_insn ("dbcs %0,%l1\n\tjcs %l2", operands
);
1666 output_asm_insn ("dbge %0,%l1\n\tjge %l2", operands
);
1670 output_asm_insn ("dbcc %0,%l1\n\tjcc %l2", operands
);
1674 output_asm_insn ("dble %0,%l1\n\tjle %l2", operands
);
1678 output_asm_insn ("dbls %0,%l1\n\tjls %l2", operands
);
1685 /* If the decrement is to be done in SImode, then we have
1686 to compensate for the fact that dbcc decrements in HImode. */
1687 switch (GET_MODE (operands
[0]))
1690 output_asm_insn ("clr%.w %0\n\tsubq%.l #1,%0\n\tjpl %l1", operands
);
1702 output_scc_di (rtx op
, rtx operand1
, rtx operand2
, rtx dest
)
1705 enum rtx_code op_code
= GET_CODE (op
);
1707 /* This does not produce a useful cc. */
1710 /* The m68k cmp.l instruction requires operand1 to be a reg as used
1711 below. Swap the operands and change the op if these requirements
1712 are not fulfilled. */
1713 if (GET_CODE (operand2
) == REG
&& GET_CODE (operand1
) != REG
)
1717 operand1
= operand2
;
1719 op_code
= swap_condition (op_code
);
1721 loperands
[0] = operand1
;
1722 if (GET_CODE (operand1
) == REG
)
1723 loperands
[1] = gen_rtx_REG (SImode
, REGNO (operand1
) + 1);
1725 loperands
[1] = adjust_address (operand1
, SImode
, 4);
1726 if (operand2
!= const0_rtx
)
1728 loperands
[2] = operand2
;
1729 if (GET_CODE (operand2
) == REG
)
1730 loperands
[3] = gen_rtx_REG (SImode
, REGNO (operand2
) + 1);
1732 loperands
[3] = adjust_address (operand2
, SImode
, 4);
1734 loperands
[4] = gen_label_rtx ();
1735 if (operand2
!= const0_rtx
)
1736 output_asm_insn ("cmp%.l %2,%0\n\tjne %l4\n\tcmp%.l %3,%1", loperands
);
1739 if (TARGET_68020
|| TARGET_COLDFIRE
|| ! ADDRESS_REG_P (loperands
[0]))
1740 output_asm_insn ("tst%.l %0", loperands
);
1742 output_asm_insn ("cmp%.w #0,%0", loperands
);
1744 output_asm_insn ("jne %l4", loperands
);
1746 if (TARGET_68020
|| TARGET_COLDFIRE
|| ! ADDRESS_REG_P (loperands
[1]))
1747 output_asm_insn ("tst%.l %1", loperands
);
1749 output_asm_insn ("cmp%.w #0,%1", loperands
);
1752 loperands
[5] = dest
;
1757 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1758 CODE_LABEL_NUMBER (loperands
[4]));
1759 output_asm_insn ("seq %5", loperands
);
1763 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1764 CODE_LABEL_NUMBER (loperands
[4]));
1765 output_asm_insn ("sne %5", loperands
);
1769 loperands
[6] = gen_label_rtx ();
1770 output_asm_insn ("shi %5\n\tjra %l6", loperands
);
1771 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1772 CODE_LABEL_NUMBER (loperands
[4]));
1773 output_asm_insn ("sgt %5", loperands
);
1774 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1775 CODE_LABEL_NUMBER (loperands
[6]));
1779 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1780 CODE_LABEL_NUMBER (loperands
[4]));
1781 output_asm_insn ("shi %5", loperands
);
1785 loperands
[6] = gen_label_rtx ();
1786 output_asm_insn ("scs %5\n\tjra %l6", loperands
);
1787 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1788 CODE_LABEL_NUMBER (loperands
[4]));
1789 output_asm_insn ("slt %5", loperands
);
1790 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1791 CODE_LABEL_NUMBER (loperands
[6]));
1795 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1796 CODE_LABEL_NUMBER (loperands
[4]));
1797 output_asm_insn ("scs %5", loperands
);
1801 loperands
[6] = gen_label_rtx ();
1802 output_asm_insn ("scc %5\n\tjra %l6", loperands
);
1803 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1804 CODE_LABEL_NUMBER (loperands
[4]));
1805 output_asm_insn ("sge %5", loperands
);
1806 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1807 CODE_LABEL_NUMBER (loperands
[6]));
1811 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1812 CODE_LABEL_NUMBER (loperands
[4]));
1813 output_asm_insn ("scc %5", loperands
);
1817 loperands
[6] = gen_label_rtx ();
1818 output_asm_insn ("sls %5\n\tjra %l6", loperands
);
1819 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1820 CODE_LABEL_NUMBER (loperands
[4]));
1821 output_asm_insn ("sle %5", loperands
);
1822 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1823 CODE_LABEL_NUMBER (loperands
[6]));
1827 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1828 CODE_LABEL_NUMBER (loperands
[4]));
1829 output_asm_insn ("sls %5", loperands
);
1839 output_btst (rtx
*operands
, rtx countop
, rtx dataop
, rtx insn
, int signpos
)
1841 operands
[0] = countop
;
1842 operands
[1] = dataop
;
1844 if (GET_CODE (countop
) == CONST_INT
)
1846 register int count
= INTVAL (countop
);
1847 /* If COUNT is bigger than size of storage unit in use,
1848 advance to the containing unit of same size. */
1849 if (count
> signpos
)
1851 int offset
= (count
& ~signpos
) / 8;
1852 count
= count
& signpos
;
1853 operands
[1] = dataop
= adjust_address (dataop
, QImode
, offset
);
1855 if (count
== signpos
)
1856 cc_status
.flags
= CC_NOT_POSITIVE
| CC_Z_IN_NOT_N
;
1858 cc_status
.flags
= CC_NOT_NEGATIVE
| CC_Z_IN_NOT_N
;
1860 /* These three statements used to use next_insns_test_no...
1861 but it appears that this should do the same job. */
1863 && next_insn_tests_no_inequality (insn
))
1866 && next_insn_tests_no_inequality (insn
))
1869 && next_insn_tests_no_inequality (insn
))
1871 /* Try to use `movew to ccr' followed by the appropriate branch insn.
1872 On some m68k variants unfortunately that's slower than btst.
1873 On 68000 and higher, that should also work for all HImode operands. */
1874 if (TUNE_CPU32
|| TARGET_COLDFIRE
|| optimize_size
)
1876 if (count
== 3 && DATA_REG_P (operands
[1])
1877 && next_insn_tests_no_inequality (insn
))
1879 cc_status
.flags
= CC_NOT_NEGATIVE
| CC_Z_IN_NOT_N
| CC_NO_OVERFLOW
;
1880 return "move%.w %1,%%ccr";
1882 if (count
== 2 && DATA_REG_P (operands
[1])
1883 && next_insn_tests_no_inequality (insn
))
1885 cc_status
.flags
= CC_NOT_NEGATIVE
| CC_INVERTED
| CC_NO_OVERFLOW
;
1886 return "move%.w %1,%%ccr";
1888 /* count == 1 followed by bvc/bvs and
1889 count == 0 followed by bcc/bcs are also possible, but need
1890 m68k-specific CC_Z_IN_NOT_V and CC_Z_IN_NOT_C flags. */
1893 cc_status
.flags
= CC_NOT_NEGATIVE
;
1895 return "btst %0,%1";
1898 /* Return true if X is a legitimate base register. STRICT_P says
1899 whether we need strict checking. */
1902 m68k_legitimate_base_reg_p (rtx x
, bool strict_p
)
1904 /* Allow SUBREG everywhere we allow REG. This results in better code. */
1905 if (!strict_p
&& GET_CODE (x
) == SUBREG
)
1910 ? REGNO_OK_FOR_BASE_P (REGNO (x
))
1911 : REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x
))));
1914 /* Return true if X is a legitimate index register. STRICT_P says
1915 whether we need strict checking. */
1918 m68k_legitimate_index_reg_p (rtx x
, bool strict_p
)
1920 if (!strict_p
&& GET_CODE (x
) == SUBREG
)
1925 ? REGNO_OK_FOR_INDEX_P (REGNO (x
))
1926 : REGNO_OK_FOR_INDEX_NONSTRICT_P (REGNO (x
))));
1929 /* Return true if X is a legitimate index expression for a (d8,An,Xn) or
1930 (bd,An,Xn) addressing mode. Fill in the INDEX and SCALE fields of
1931 ADDRESS if so. STRICT_P says whether we need strict checking. */
1934 m68k_decompose_index (rtx x
, bool strict_p
, struct m68k_address
*address
)
1938 /* Check for a scale factor. */
1940 if ((TARGET_68020
|| TARGET_COLDFIRE
)
1941 && GET_CODE (x
) == MULT
1942 && GET_CODE (XEXP (x
, 1)) == CONST_INT
1943 && (INTVAL (XEXP (x
, 1)) == 2
1944 || INTVAL (XEXP (x
, 1)) == 4
1945 || (INTVAL (XEXP (x
, 1)) == 8
1946 && (TARGET_COLDFIRE_FPU
|| !TARGET_COLDFIRE
))))
1948 scale
= INTVAL (XEXP (x
, 1));
1952 /* Check for a word extension. */
1953 if (!TARGET_COLDFIRE
1954 && GET_CODE (x
) == SIGN_EXTEND
1955 && GET_MODE (XEXP (x
, 0)) == HImode
)
1958 if (m68k_legitimate_index_reg_p (x
, strict_p
))
1960 address
->scale
= scale
;
1968 /* Return true if X is an illegitimate symbolic constant. */
1971 m68k_illegitimate_symbolic_constant_p (rtx x
)
1975 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P
)
1977 split_const (x
, &base
, &offset
);
1978 if (GET_CODE (base
) == SYMBOL_REF
1979 && !offset_within_block_p (base
, INTVAL (offset
)))
1982 return m68k_tls_reference_p (x
, false);
1985 /* Return true if X is a legitimate constant address that can reach
1986 bytes in the range [X, X + REACH). STRICT_P says whether we need
1990 m68k_legitimate_constant_address_p (rtx x
, unsigned int reach
, bool strict_p
)
1994 if (!CONSTANT_ADDRESS_P (x
))
1998 && !(strict_p
&& TARGET_PCREL
)
1999 && symbolic_operand (x
, VOIDmode
))
2002 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P
&& reach
> 1)
2004 split_const (x
, &base
, &offset
);
2005 if (GET_CODE (base
) == SYMBOL_REF
2006 && !offset_within_block_p (base
, INTVAL (offset
) + reach
- 1))
2010 return !m68k_tls_reference_p (x
, false);
2013 /* Return true if X is a LABEL_REF for a jump table. Assume that unplaced
2014 labels will become jump tables. */
2017 m68k_jump_table_ref_p (rtx x
)
2019 if (GET_CODE (x
) != LABEL_REF
)
2023 if (!NEXT_INSN (x
) && !PREV_INSN (x
))
2026 x
= next_nonnote_insn (x
);
2027 return x
&& JUMP_TABLE_DATA_P (x
);
2030 /* Return true if X is a legitimate address for values of mode MODE.
2031 STRICT_P says whether strict checking is needed. If the address
2032 is valid, describe its components in *ADDRESS. */
2035 m68k_decompose_address (enum machine_mode mode
, rtx x
,
2036 bool strict_p
, struct m68k_address
*address
)
2040 memset (address
, 0, sizeof (*address
));
2042 if (mode
== BLKmode
)
2045 reach
= GET_MODE_SIZE (mode
);
2047 /* Check for (An) (mode 2). */
2048 if (m68k_legitimate_base_reg_p (x
, strict_p
))
2054 /* Check for -(An) and (An)+ (modes 3 and 4). */
2055 if ((GET_CODE (x
) == PRE_DEC
|| GET_CODE (x
) == POST_INC
)
2056 && m68k_legitimate_base_reg_p (XEXP (x
, 0), strict_p
))
2058 address
->code
= GET_CODE (x
);
2059 address
->base
= XEXP (x
, 0);
2063 /* Check for (d16,An) (mode 5). */
2064 if (GET_CODE (x
) == PLUS
2065 && GET_CODE (XEXP (x
, 1)) == CONST_INT
2066 && IN_RANGE (INTVAL (XEXP (x
, 1)), -0x8000, 0x8000 - reach
)
2067 && m68k_legitimate_base_reg_p (XEXP (x
, 0), strict_p
))
2069 address
->base
= XEXP (x
, 0);
2070 address
->offset
= XEXP (x
, 1);
2074 /* Check for GOT loads. These are (bd,An,Xn) addresses if
2075 TARGET_68020 && flag_pic == 2, otherwise they are (d16,An)
2077 if (GET_CODE (x
) == PLUS
2078 && XEXP (x
, 0) == pic_offset_table_rtx
)
2080 /* As we are processing a PLUS, do not unwrap RELOC32 symbols --
2081 they are invalid in this context. */
2082 if (m68k_unwrap_symbol (XEXP (x
, 1), false) != XEXP (x
, 1))
2084 address
->base
= XEXP (x
, 0);
2085 address
->offset
= XEXP (x
, 1);
2090 /* The ColdFire FPU only accepts addressing modes 2-5. */
2091 if (TARGET_COLDFIRE_FPU
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
)
2094 /* Check for (xxx).w and (xxx).l. Also, in the TARGET_PCREL case,
2095 check for (d16,PC) or (bd,PC,Xn) with a suppressed index register.
2096 All these modes are variations of mode 7. */
2097 if (m68k_legitimate_constant_address_p (x
, reach
, strict_p
))
2099 address
->offset
= x
;
2103 /* Check for (d8,PC,Xn), a mode 7 form. This case is needed for
2106 ??? do_tablejump creates these addresses before placing the target
2107 label, so we have to assume that unplaced labels are jump table
2108 references. It seems unlikely that we would ever generate indexed
2109 accesses to unplaced labels in other cases. */
2110 if (GET_CODE (x
) == PLUS
2111 && m68k_jump_table_ref_p (XEXP (x
, 1))
2112 && m68k_decompose_index (XEXP (x
, 0), strict_p
, address
))
2114 address
->offset
= XEXP (x
, 1);
2118 /* Everything hereafter deals with (d8,An,Xn.SIZE*SCALE) or
2119 (bd,An,Xn.SIZE*SCALE) addresses. */
2123 /* Check for a nonzero base displacement. */
2124 if (GET_CODE (x
) == PLUS
2125 && m68k_legitimate_constant_address_p (XEXP (x
, 1), reach
, strict_p
))
2127 address
->offset
= XEXP (x
, 1);
2131 /* Check for a suppressed index register. */
2132 if (m68k_legitimate_base_reg_p (x
, strict_p
))
2138 /* Check for a suppressed base register. Do not allow this case
2139 for non-symbolic offsets as it effectively gives gcc freedom
2140 to treat data registers as base registers, which can generate
2143 && symbolic_operand (address
->offset
, VOIDmode
)
2144 && m68k_decompose_index (x
, strict_p
, address
))
2149 /* Check for a nonzero base displacement. */
2150 if (GET_CODE (x
) == PLUS
2151 && GET_CODE (XEXP (x
, 1)) == CONST_INT
2152 && IN_RANGE (INTVAL (XEXP (x
, 1)), -0x80, 0x80 - reach
))
2154 address
->offset
= XEXP (x
, 1);
2159 /* We now expect the sum of a base and an index. */
2160 if (GET_CODE (x
) == PLUS
)
2162 if (m68k_legitimate_base_reg_p (XEXP (x
, 0), strict_p
)
2163 && m68k_decompose_index (XEXP (x
, 1), strict_p
, address
))
2165 address
->base
= XEXP (x
, 0);
2169 if (m68k_legitimate_base_reg_p (XEXP (x
, 1), strict_p
)
2170 && m68k_decompose_index (XEXP (x
, 0), strict_p
, address
))
2172 address
->base
= XEXP (x
, 1);
2179 /* Return true if X is a legitimate address for values of mode MODE.
2180 STRICT_P says whether strict checking is needed. */
2183 m68k_legitimate_address_p (enum machine_mode mode
, rtx x
, bool strict_p
)
2185 struct m68k_address address
;
2187 return m68k_decompose_address (mode
, x
, strict_p
, &address
);
2190 /* Return true if X is a memory, describing its address in ADDRESS if so.
2191 Apply strict checking if called during or after reload. */
2194 m68k_legitimate_mem_p (rtx x
, struct m68k_address
*address
)
2197 && m68k_decompose_address (GET_MODE (x
), XEXP (x
, 0),
2198 reload_in_progress
|| reload_completed
,
2202 /* Return true if X matches the 'Q' constraint. It must be a memory
2203 with a base address and no constant offset or index. */
2206 m68k_matches_q_p (rtx x
)
2208 struct m68k_address address
;
2210 return (m68k_legitimate_mem_p (x
, &address
)
2211 && address
.code
== UNKNOWN
2217 /* Return true if X matches the 'U' constraint. It must be a base address
2218 with a constant offset and no index. */
2221 m68k_matches_u_p (rtx x
)
2223 struct m68k_address address
;
2225 return (m68k_legitimate_mem_p (x
, &address
)
2226 && address
.code
== UNKNOWN
2232 /* Return GOT pointer. */
2237 if (pic_offset_table_rtx
== NULL_RTX
)
2238 pic_offset_table_rtx
= gen_rtx_REG (Pmode
, PIC_REG
);
2240 crtl
->uses_pic_offset_table
= 1;
2242 return pic_offset_table_rtx
;
2245 /* M68K relocations, used to distinguish GOT and TLS relocations in UNSPEC
2247 enum m68k_reloc
{ RELOC_GOT
, RELOC_TLSGD
, RELOC_TLSLDM
, RELOC_TLSLDO
,
2248 RELOC_TLSIE
, RELOC_TLSLE
};
2250 #define TLS_RELOC_P(RELOC) ((RELOC) != RELOC_GOT)
2252 /* Wrap symbol X into unspec representing relocation RELOC.
2253 BASE_REG - register that should be added to the result.
2254 TEMP_REG - if non-null, temporary register. */
2257 m68k_wrap_symbol (rtx x
, enum m68k_reloc reloc
, rtx base_reg
, rtx temp_reg
)
2261 use_x_p
= (base_reg
== pic_offset_table_rtx
) ? TARGET_XGOT
: TARGET_XTLS
;
2263 if (TARGET_COLDFIRE
&& use_x_p
)
2264 /* When compiling with -mx{got, tls} switch the code will look like this:
2266 move.l <X>@<RELOC>,<TEMP_REG>
2267 add.l <BASE_REG>,<TEMP_REG> */
2269 /* Wrap X in UNSPEC_??? to tip m68k_output_addr_const_extra
2270 to put @RELOC after reference. */
2271 x
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, x
, GEN_INT (reloc
)),
2273 x
= gen_rtx_CONST (Pmode
, x
);
2275 if (temp_reg
== NULL
)
2277 gcc_assert (can_create_pseudo_p ());
2278 temp_reg
= gen_reg_rtx (Pmode
);
2281 emit_move_insn (temp_reg
, x
);
2282 emit_insn (gen_addsi3 (temp_reg
, temp_reg
, base_reg
));
2287 x
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, x
, GEN_INT (reloc
)),
2289 x
= gen_rtx_CONST (Pmode
, x
);
2291 x
= gen_rtx_PLUS (Pmode
, base_reg
, x
);
2297 /* Helper for m68k_unwrap_symbol.
2298 Also, if unwrapping was successful (that is if (ORIG != <return value>)),
2299 sets *RELOC_PTR to relocation type for the symbol. */
2302 m68k_unwrap_symbol_1 (rtx orig
, bool unwrap_reloc32_p
,
2303 enum m68k_reloc
*reloc_ptr
)
2305 if (GET_CODE (orig
) == CONST
)
2308 enum m68k_reloc dummy
;
2312 if (reloc_ptr
== NULL
)
2315 /* Handle an addend. */
2316 if ((GET_CODE (x
) == PLUS
|| GET_CODE (x
) == MINUS
)
2317 && CONST_INT_P (XEXP (x
, 1)))
2320 if (GET_CODE (x
) == UNSPEC
)
2322 switch (XINT (x
, 1))
2324 case UNSPEC_RELOC16
:
2325 orig
= XVECEXP (x
, 0, 0);
2326 *reloc_ptr
= (enum m68k_reloc
) INTVAL (XVECEXP (x
, 0, 1));
2329 case UNSPEC_RELOC32
:
2330 if (unwrap_reloc32_p
)
2332 orig
= XVECEXP (x
, 0, 0);
2333 *reloc_ptr
= (enum m68k_reloc
) INTVAL (XVECEXP (x
, 0, 1));
2346 /* Unwrap symbol from UNSPEC_RELOC16 and, if unwrap_reloc32_p,
2347 UNSPEC_RELOC32 wrappers. */
2350 m68k_unwrap_symbol (rtx orig
, bool unwrap_reloc32_p
)
2352 return m68k_unwrap_symbol_1 (orig
, unwrap_reloc32_p
, NULL
);
2355 /* Helper for m68k_final_prescan_insn. */
2358 m68k_final_prescan_insn_1 (rtx
*x_ptr
, void *data ATTRIBUTE_UNUSED
)
2362 if (m68k_unwrap_symbol (x
, true) != x
)
2363 /* For rationale of the below, see comment in m68k_final_prescan_insn. */
2367 gcc_assert (GET_CODE (x
) == CONST
);
2370 if (GET_CODE (plus
) == PLUS
|| GET_CODE (plus
) == MINUS
)
2375 unspec
= XEXP (plus
, 0);
2376 gcc_assert (GET_CODE (unspec
) == UNSPEC
);
2377 addend
= XEXP (plus
, 1);
2378 gcc_assert (CONST_INT_P (addend
));
2380 /* We now have all the pieces, rearrange them. */
2382 /* Move symbol to plus. */
2383 XEXP (plus
, 0) = XVECEXP (unspec
, 0, 0);
2385 /* Move plus inside unspec. */
2386 XVECEXP (unspec
, 0, 0) = plus
;
2388 /* Move unspec to top level of const. */
2389 XEXP (x
, 0) = unspec
;
2398 /* Prescan insn before outputing assembler for it. */
2401 m68k_final_prescan_insn (rtx insn ATTRIBUTE_UNUSED
,
2402 rtx
*operands
, int n_operands
)
2406 /* Combine and, possibly, other optimizations may do good job
2408 (const (unspec [(symbol)]))
2410 (const (plus (unspec [(symbol)])
2412 The problem with this is emitting @TLS or @GOT decorations.
2413 The decoration is emitted when processing (unspec), so the
2414 result would be "#symbol@TLSLE+N" instead of "#symbol+N@TLSLE".
2416 It seems that the easiest solution to this is to convert such
2418 (const (unspec [(plus (symbol)
2420 Note, that the top level of operand remains intact, so we don't have
2421 to patch up anything outside of the operand. */
2423 for (i
= 0; i
< n_operands
; ++i
)
2429 for_each_rtx (&op
, m68k_final_prescan_insn_1
, NULL
);
2433 /* Move X to a register and add REG_EQUAL note pointing to ORIG.
2434 If REG is non-null, use it; generate new pseudo otherwise. */
2437 m68k_move_to_reg (rtx x
, rtx orig
, rtx reg
)
2441 if (reg
== NULL_RTX
)
2443 gcc_assert (can_create_pseudo_p ());
2444 reg
= gen_reg_rtx (Pmode
);
2447 insn
= emit_move_insn (reg
, x
);
2448 /* Put a REG_EQUAL note on this insn, so that it can be optimized
2450 set_unique_reg_note (insn
, REG_EQUAL
, orig
);
2455 /* Does the same as m68k_wrap_symbol, but returns a memory reference to
2459 m68k_wrap_symbol_into_got_ref (rtx x
, enum m68k_reloc reloc
, rtx temp_reg
)
2461 x
= m68k_wrap_symbol (x
, reloc
, m68k_get_gp (), temp_reg
);
2463 x
= gen_rtx_MEM (Pmode
, x
);
2464 MEM_READONLY_P (x
) = 1;
2469 /* Legitimize PIC addresses. If the address is already
2470 position-independent, we return ORIG. Newly generated
2471 position-independent addresses go to REG. If we need more
2472 than one register, we lose.
2474 An address is legitimized by making an indirect reference
2475 through the Global Offset Table with the name of the symbol
2478 The assembler and linker are responsible for placing the
2479 address of the symbol in the GOT. The function prologue
2480 is responsible for initializing a5 to the starting address
2483 The assembler is also responsible for translating a symbol name
2484 into a constant displacement from the start of the GOT.
2486 A quick example may make things a little clearer:
2488 When not generating PIC code to store the value 12345 into _foo
2489 we would generate the following code:
2493 When generating PIC two transformations are made. First, the compiler
2494 loads the address of foo into a register. So the first transformation makes:
2499 The code in movsi will intercept the lea instruction and call this
2500 routine which will transform the instructions into:
2502 movel a5@(_foo:w), a0
2506 That (in a nutshell) is how *all* symbol and label references are
2510 legitimize_pic_address (rtx orig
, enum machine_mode mode ATTRIBUTE_UNUSED
,
2515 /* First handle a simple SYMBOL_REF or LABEL_REF */
2516 if (GET_CODE (orig
) == SYMBOL_REF
|| GET_CODE (orig
) == LABEL_REF
)
2520 pic_ref
= m68k_wrap_symbol_into_got_ref (orig
, RELOC_GOT
, reg
);
2521 pic_ref
= m68k_move_to_reg (pic_ref
, orig
, reg
);
2523 else if (GET_CODE (orig
) == CONST
)
2527 /* Make sure this has not already been legitimized. */
2528 if (m68k_unwrap_symbol (orig
, true) != orig
)
2533 /* legitimize both operands of the PLUS */
2534 gcc_assert (GET_CODE (XEXP (orig
, 0)) == PLUS
);
2536 base
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 0), Pmode
, reg
);
2537 orig
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 1), Pmode
,
2538 base
== reg
? 0 : reg
);
2540 if (GET_CODE (orig
) == CONST_INT
)
2541 pic_ref
= plus_constant (base
, INTVAL (orig
));
2543 pic_ref
= gen_rtx_PLUS (Pmode
, base
, orig
);
2549 /* The __tls_get_addr symbol. */
2550 static GTY(()) rtx m68k_tls_get_addr
;
2552 /* Return SYMBOL_REF for __tls_get_addr. */
2555 m68k_get_tls_get_addr (void)
2557 if (m68k_tls_get_addr
== NULL_RTX
)
2558 m68k_tls_get_addr
= init_one_libfunc ("__tls_get_addr");
2560 return m68k_tls_get_addr
;
2563 /* Return libcall result in A0 instead of usual D0. */
2564 static bool m68k_libcall_value_in_a0_p
= false;
2566 /* Emit instruction sequence that calls __tls_get_addr. X is
2567 the TLS symbol we are referencing and RELOC is the symbol type to use
2568 (either TLSGD or TLSLDM). EQV is the REG_EQUAL note for the sequence
2569 emitted. A pseudo register with result of __tls_get_addr call is
2573 m68k_call_tls_get_addr (rtx x
, rtx eqv
, enum m68k_reloc reloc
)
2579 /* Emit the call sequence. */
2582 /* FIXME: Unfortunately, emit_library_call_value does not
2583 consider (plus (%a5) (const (unspec))) to be a good enough
2584 operand for push, so it forces it into a register. The bad
2585 thing about this is that combiner, due to copy propagation and other
2586 optimizations, sometimes can not later fix this. As a consequence,
2587 additional register may be allocated resulting in a spill.
2588 For reference, see args processing loops in
2589 calls.c:emit_library_call_value_1.
2590 For testcase, see gcc.target/m68k/tls-{gd, ld}.c */
2591 x
= m68k_wrap_symbol (x
, reloc
, m68k_get_gp (), NULL_RTX
);
2593 /* __tls_get_addr() is not a libcall, but emitting a libcall_value
2594 is the simpliest way of generating a call. The difference between
2595 __tls_get_addr() and libcall is that the result is returned in D0
2596 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2597 which temporarily switches returning the result to A0. */
2599 m68k_libcall_value_in_a0_p
= true;
2600 a0
= emit_library_call_value (m68k_get_tls_get_addr (), NULL_RTX
, LCT_PURE
,
2601 Pmode
, 1, x
, Pmode
);
2602 m68k_libcall_value_in_a0_p
= false;
2604 insns
= get_insns ();
2607 gcc_assert (can_create_pseudo_p ());
2608 dest
= gen_reg_rtx (Pmode
);
2609 emit_libcall_block (insns
, dest
, a0
, eqv
);
2614 /* The __tls_get_addr symbol. */
2615 static GTY(()) rtx m68k_read_tp
;
2617 /* Return SYMBOL_REF for __m68k_read_tp. */
2620 m68k_get_m68k_read_tp (void)
2622 if (m68k_read_tp
== NULL_RTX
)
2623 m68k_read_tp
= init_one_libfunc ("__m68k_read_tp");
2625 return m68k_read_tp
;
2628 /* Emit instruction sequence that calls __m68k_read_tp.
2629 A pseudo register with result of __m68k_read_tp call is returned. */
2632 m68k_call_m68k_read_tp (void)
2641 /* __m68k_read_tp() is not a libcall, but emitting a libcall_value
2642 is the simpliest way of generating a call. The difference between
2643 __m68k_read_tp() and libcall is that the result is returned in D0
2644 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2645 which temporarily switches returning the result to A0. */
2647 /* Emit the call sequence. */
2648 m68k_libcall_value_in_a0_p
= true;
2649 a0
= emit_library_call_value (m68k_get_m68k_read_tp (), NULL_RTX
, LCT_PURE
,
2651 m68k_libcall_value_in_a0_p
= false;
2652 insns
= get_insns ();
2655 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2656 share the m68k_read_tp result with other IE/LE model accesses. */
2657 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const1_rtx
), UNSPEC_RELOC32
);
2659 gcc_assert (can_create_pseudo_p ());
2660 dest
= gen_reg_rtx (Pmode
);
2661 emit_libcall_block (insns
, dest
, a0
, eqv
);
2666 /* Return a legitimized address for accessing TLS SYMBOL_REF X.
2667 For explanations on instructions sequences see TLS/NPTL ABI for m68k and
2671 m68k_legitimize_tls_address (rtx orig
)
2673 switch (SYMBOL_REF_TLS_MODEL (orig
))
2675 case TLS_MODEL_GLOBAL_DYNAMIC
:
2676 orig
= m68k_call_tls_get_addr (orig
, orig
, RELOC_TLSGD
);
2679 case TLS_MODEL_LOCAL_DYNAMIC
:
2685 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2686 share the LDM result with other LD model accesses. */
2687 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
),
2690 a0
= m68k_call_tls_get_addr (orig
, eqv
, RELOC_TLSLDM
);
2692 x
= m68k_wrap_symbol (orig
, RELOC_TLSLDO
, a0
, NULL_RTX
);
2694 if (can_create_pseudo_p ())
2695 x
= m68k_move_to_reg (x
, orig
, NULL_RTX
);
2701 case TLS_MODEL_INITIAL_EXEC
:
2706 a0
= m68k_call_m68k_read_tp ();
2708 x
= m68k_wrap_symbol_into_got_ref (orig
, RELOC_TLSIE
, NULL_RTX
);
2709 x
= gen_rtx_PLUS (Pmode
, x
, a0
);
2711 if (can_create_pseudo_p ())
2712 x
= m68k_move_to_reg (x
, orig
, NULL_RTX
);
2718 case TLS_MODEL_LOCAL_EXEC
:
2723 a0
= m68k_call_m68k_read_tp ();
2725 x
= m68k_wrap_symbol (orig
, RELOC_TLSLE
, a0
, NULL_RTX
);
2727 if (can_create_pseudo_p ())
2728 x
= m68k_move_to_reg (x
, orig
, NULL_RTX
);
2741 /* Return true if X is a TLS symbol. */
2744 m68k_tls_symbol_p (rtx x
)
2746 if (!TARGET_HAVE_TLS
)
2749 if (GET_CODE (x
) != SYMBOL_REF
)
2752 return SYMBOL_REF_TLS_MODEL (x
) != 0;
2755 /* Helper for m68k_tls_referenced_p. */
2758 m68k_tls_reference_p_1 (rtx
*x_ptr
, void *data ATTRIBUTE_UNUSED
)
2760 /* Note: this is not the same as m68k_tls_symbol_p. */
2761 if (GET_CODE (*x_ptr
) == SYMBOL_REF
)
2762 return SYMBOL_REF_TLS_MODEL (*x_ptr
) != 0 ? 1 : 0;
2764 /* Don't recurse into legitimate TLS references. */
2765 if (m68k_tls_reference_p (*x_ptr
, true))
2771 /* If !LEGITIMATE_P, return true if X is a TLS symbol reference,
2772 though illegitimate one.
2773 If LEGITIMATE_P, return true if X is a legitimate TLS symbol reference. */
2776 m68k_tls_reference_p (rtx x
, bool legitimate_p
)
2778 if (!TARGET_HAVE_TLS
)
2782 return for_each_rtx (&x
, m68k_tls_reference_p_1
, NULL
) == 1 ? true : false;
2785 enum m68k_reloc reloc
= RELOC_GOT
;
2787 return (m68k_unwrap_symbol_1 (x
, true, &reloc
) != x
2788 && TLS_RELOC_P (reloc
));
2794 #define USE_MOVQ(i) ((unsigned) ((i) + 128) <= 255)
2796 /* Return the type of move that should be used for integer I. */
2799 m68k_const_method (HOST_WIDE_INT i
)
2806 /* The ColdFire doesn't have byte or word operations. */
2807 /* FIXME: This may not be useful for the m68060 either. */
2808 if (!TARGET_COLDFIRE
)
2810 /* if -256 < N < 256 but N is not in range for a moveq
2811 N^ff will be, so use moveq #N^ff, dreg; not.b dreg. */
2812 if (USE_MOVQ (i
^ 0xff))
2814 /* Likewise, try with not.w */
2815 if (USE_MOVQ (i
^ 0xffff))
2817 /* This is the only value where neg.w is useful */
2822 /* Try also with swap. */
2824 if (USE_MOVQ ((u
>> 16) | (u
<< 16)))
2829 /* Try using MVZ/MVS with an immediate value to load constants. */
2830 if (i
>= 0 && i
<= 65535)
2832 if (i
>= -32768 && i
<= 32767)
2836 /* Otherwise, use move.l */
2840 /* Return the cost of moving constant I into a data register. */
2843 const_int_cost (HOST_WIDE_INT i
)
2845 switch (m68k_const_method (i
))
2848 /* Constants between -128 and 127 are cheap due to moveq. */
2856 /* Constants easily generated by moveq + not.b/not.w/neg.w/swap. */
2866 m68k_rtx_costs (rtx x
, int code
, int outer_code
, int *total
,
2867 bool speed ATTRIBUTE_UNUSED
)
2872 /* Constant zero is super cheap due to clr instruction. */
2873 if (x
== const0_rtx
)
2876 *total
= const_int_cost (INTVAL (x
));
2886 /* Make 0.0 cheaper than other floating constants to
2887 encourage creating tstsf and tstdf insns. */
2888 if (outer_code
== COMPARE
2889 && (x
== CONST0_RTX (SFmode
) || x
== CONST0_RTX (DFmode
)))
2895 /* These are vaguely right for a 68020. */
2896 /* The costs for long multiply have been adjusted to work properly
2897 in synth_mult on the 68020, relative to an average of the time
2898 for add and the time for shift, taking away a little more because
2899 sometimes move insns are needed. */
2900 /* div?.w is relatively cheaper on 68000 counted in COSTS_N_INSNS
2905 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2906 : (TUNE_CFV2 && TUNE_MAC) ? 4 \
2908 : TARGET_COLDFIRE ? 3 : 13)
2913 : TUNE_68000_10 ? 5 \
2914 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2915 : (TUNE_CFV2 && TUNE_MAC) ? 2 \
2917 : TARGET_COLDFIRE ? 2 : 8)
2920 (TARGET_CF_HWDIV ? 11 \
2921 : TUNE_68000_10 || TARGET_COLDFIRE ? 12 : 27)
2924 /* An lea costs about three times as much as a simple add. */
2925 if (GET_MODE (x
) == SImode
2926 && GET_CODE (XEXP (x
, 1)) == REG
2927 && GET_CODE (XEXP (x
, 0)) == MULT
2928 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
2929 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
2930 && (INTVAL (XEXP (XEXP (x
, 0), 1)) == 2
2931 || INTVAL (XEXP (XEXP (x
, 0), 1)) == 4
2932 || INTVAL (XEXP (XEXP (x
, 0), 1)) == 8))
2934 /* lea an@(dx:l:i),am */
2935 *total
= COSTS_N_INSNS (TARGET_COLDFIRE
? 2 : 3);
2945 *total
= COSTS_N_INSNS(1);
2950 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
)
2952 if (INTVAL (XEXP (x
, 1)) < 16)
2953 *total
= COSTS_N_INSNS (2) + INTVAL (XEXP (x
, 1)) / 2;
2955 /* We're using clrw + swap for these cases. */
2956 *total
= COSTS_N_INSNS (4) + (INTVAL (XEXP (x
, 1)) - 16) / 2;
2959 *total
= COSTS_N_INSNS (10); /* Worst case. */
2962 /* A shift by a big integer takes an extra instruction. */
2963 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
2964 && (INTVAL (XEXP (x
, 1)) == 16))
2966 *total
= COSTS_N_INSNS (2); /* clrw;swap */
2969 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
2970 && !(INTVAL (XEXP (x
, 1)) > 0
2971 && INTVAL (XEXP (x
, 1)) <= 8))
2973 *total
= COSTS_N_INSNS (TARGET_COLDFIRE
? 1 : 3); /* lsr #i,dn */
2979 if ((GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
2980 || GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
)
2981 && GET_MODE (x
) == SImode
)
2982 *total
= COSTS_N_INSNS (MULW_COST
);
2983 else if (GET_MODE (x
) == QImode
|| GET_MODE (x
) == HImode
)
2984 *total
= COSTS_N_INSNS (MULW_COST
);
2986 *total
= COSTS_N_INSNS (MULL_COST
);
2993 if (GET_MODE (x
) == QImode
|| GET_MODE (x
) == HImode
)
2994 *total
= COSTS_N_INSNS (DIVW_COST
); /* div.w */
2995 else if (TARGET_CF_HWDIV
)
2996 *total
= COSTS_N_INSNS (18);
2998 *total
= COSTS_N_INSNS (43); /* div.l */
3002 if (outer_code
== COMPARE
)
3011 /* Return an instruction to move CONST_INT OPERANDS[1] into data register
3015 output_move_const_into_data_reg (rtx
*operands
)
3019 i
= INTVAL (operands
[1]);
3020 switch (m68k_const_method (i
))
3023 return "mvzw %1,%0";
3025 return "mvsw %1,%0";
3027 return "moveq %1,%0";
3030 operands
[1] = GEN_INT (i
^ 0xff);
3031 return "moveq %1,%0\n\tnot%.b %0";
3034 operands
[1] = GEN_INT (i
^ 0xffff);
3035 return "moveq %1,%0\n\tnot%.w %0";
3038 return "moveq #-128,%0\n\tneg%.w %0";
3043 operands
[1] = GEN_INT ((u
<< 16) | (u
>> 16));
3044 return "moveq %1,%0\n\tswap %0";
3047 return "move%.l %1,%0";
3053 /* Return true if I can be handled by ISA B's mov3q instruction. */
3056 valid_mov3q_const (HOST_WIDE_INT i
)
3058 return TARGET_ISAB
&& (i
== -1 || IN_RANGE (i
, 1, 7));
3061 /* Return an instruction to move CONST_INT OPERANDS[1] into OPERANDS[0].
3062 I is the value of OPERANDS[1]. */
3065 output_move_simode_const (rtx
*operands
)
3071 src
= INTVAL (operands
[1]);
3073 && (DATA_REG_P (dest
) || MEM_P (dest
))
3074 /* clr insns on 68000 read before writing. */
3075 && ((TARGET_68010
|| TARGET_COLDFIRE
)
3076 || !(MEM_P (dest
) && MEM_VOLATILE_P (dest
))))
3078 else if (GET_MODE (dest
) == SImode
&& valid_mov3q_const (src
))
3079 return "mov3q%.l %1,%0";
3080 else if (src
== 0 && ADDRESS_REG_P (dest
))
3081 return "sub%.l %0,%0";
3082 else if (DATA_REG_P (dest
))
3083 return output_move_const_into_data_reg (operands
);
3084 else if (ADDRESS_REG_P (dest
) && IN_RANGE (src
, -0x8000, 0x7fff))
3086 if (valid_mov3q_const (src
))
3087 return "mov3q%.l %1,%0";
3088 return "move%.w %1,%0";
3090 else if (MEM_P (dest
)
3091 && GET_CODE (XEXP (dest
, 0)) == PRE_DEC
3092 && REGNO (XEXP (XEXP (dest
, 0), 0)) == STACK_POINTER_REGNUM
3093 && IN_RANGE (src
, -0x8000, 0x7fff))
3095 if (valid_mov3q_const (src
))
3096 return "mov3q%.l %1,%-";
3099 return "move%.l %1,%0";
3103 output_move_simode (rtx
*operands
)
3105 if (GET_CODE (operands
[1]) == CONST_INT
)
3106 return output_move_simode_const (operands
);
3107 else if ((GET_CODE (operands
[1]) == SYMBOL_REF
3108 || GET_CODE (operands
[1]) == CONST
)
3109 && push_operand (operands
[0], SImode
))
3111 else if ((GET_CODE (operands
[1]) == SYMBOL_REF
3112 || GET_CODE (operands
[1]) == CONST
)
3113 && ADDRESS_REG_P (operands
[0]))
3114 return "lea %a1,%0";
3115 return "move%.l %1,%0";
3119 output_move_himode (rtx
*operands
)
3121 if (GET_CODE (operands
[1]) == CONST_INT
)
3123 if (operands
[1] == const0_rtx
3124 && (DATA_REG_P (operands
[0])
3125 || GET_CODE (operands
[0]) == MEM
)
3126 /* clr insns on 68000 read before writing. */
3127 && ((TARGET_68010
|| TARGET_COLDFIRE
)
3128 || !(GET_CODE (operands
[0]) == MEM
3129 && MEM_VOLATILE_P (operands
[0]))))
3131 else if (operands
[1] == const0_rtx
3132 && ADDRESS_REG_P (operands
[0]))
3133 return "sub%.l %0,%0";
3134 else if (DATA_REG_P (operands
[0])
3135 && INTVAL (operands
[1]) < 128
3136 && INTVAL (operands
[1]) >= -128)
3137 return "moveq %1,%0";
3138 else if (INTVAL (operands
[1]) < 0x8000
3139 && INTVAL (operands
[1]) >= -0x8000)
3140 return "move%.w %1,%0";
3142 else if (CONSTANT_P (operands
[1]))
3143 return "move%.l %1,%0";
3144 return "move%.w %1,%0";
3148 output_move_qimode (rtx
*operands
)
3150 /* 68k family always modifies the stack pointer by at least 2, even for
3151 byte pushes. The 5200 (ColdFire) does not do this. */
3153 /* This case is generated by pushqi1 pattern now. */
3154 gcc_assert (!(GET_CODE (operands
[0]) == MEM
3155 && GET_CODE (XEXP (operands
[0], 0)) == PRE_DEC
3156 && XEXP (XEXP (operands
[0], 0), 0) == stack_pointer_rtx
3157 && ! ADDRESS_REG_P (operands
[1])
3158 && ! TARGET_COLDFIRE
));
3160 /* clr and st insns on 68000 read before writing. */
3161 if (!ADDRESS_REG_P (operands
[0])
3162 && ((TARGET_68010
|| TARGET_COLDFIRE
)
3163 || !(GET_CODE (operands
[0]) == MEM
&& MEM_VOLATILE_P (operands
[0]))))
3165 if (operands
[1] == const0_rtx
)
3167 if ((!TARGET_COLDFIRE
|| DATA_REG_P (operands
[0]))
3168 && GET_CODE (operands
[1]) == CONST_INT
3169 && (INTVAL (operands
[1]) & 255) == 255)
3175 if (GET_CODE (operands
[1]) == CONST_INT
3176 && DATA_REG_P (operands
[0])
3177 && INTVAL (operands
[1]) < 128
3178 && INTVAL (operands
[1]) >= -128)
3179 return "moveq %1,%0";
3180 if (operands
[1] == const0_rtx
&& ADDRESS_REG_P (operands
[0]))
3181 return "sub%.l %0,%0";
3182 if (GET_CODE (operands
[1]) != CONST_INT
&& CONSTANT_P (operands
[1]))
3183 return "move%.l %1,%0";
3184 /* 68k family (including the 5200 ColdFire) does not support byte moves to
3185 from address registers. */
3186 if (ADDRESS_REG_P (operands
[0]) || ADDRESS_REG_P (operands
[1]))
3187 return "move%.w %1,%0";
3188 return "move%.b %1,%0";
3192 output_move_stricthi (rtx
*operands
)
3194 if (operands
[1] == const0_rtx
3195 /* clr insns on 68000 read before writing. */
3196 && ((TARGET_68010
|| TARGET_COLDFIRE
)
3197 || !(GET_CODE (operands
[0]) == MEM
&& MEM_VOLATILE_P (operands
[0]))))
3199 return "move%.w %1,%0";
3203 output_move_strictqi (rtx
*operands
)
3205 if (operands
[1] == const0_rtx
3206 /* clr insns on 68000 read before writing. */
3207 && ((TARGET_68010
|| TARGET_COLDFIRE
)
3208 || !(GET_CODE (operands
[0]) == MEM
&& MEM_VOLATILE_P (operands
[0]))))
3210 return "move%.b %1,%0";
3213 /* Return the best assembler insn template
3214 for moving operands[1] into operands[0] as a fullword. */
3217 singlemove_string (rtx
*operands
)
3219 if (GET_CODE (operands
[1]) == CONST_INT
)
3220 return output_move_simode_const (operands
);
3221 return "move%.l %1,%0";
3225 /* Output assembler or rtl code to perform a doubleword move insn
3226 with operands OPERANDS.
3227 Pointers to 3 helper functions should be specified:
3228 HANDLE_REG_ADJUST to adjust a register by a small value,
3229 HANDLE_COMPADR to compute an address and
3230 HANDLE_MOVSI to move 4 bytes. */
3233 handle_move_double (rtx operands
[2],
3234 void (*handle_reg_adjust
) (rtx
, int),
3235 void (*handle_compadr
) (rtx
[2]),
3236 void (*handle_movsi
) (rtx
[2]))
3240 REGOP
, OFFSOP
, MEMOP
, PUSHOP
, POPOP
, CNSTOP
, RNDOP
3245 rtx addreg0
= 0, addreg1
= 0;
3246 int dest_overlapped_low
= 0;
3247 int size
= GET_MODE_SIZE (GET_MODE (operands
[0]));
3252 /* First classify both operands. */
3254 if (REG_P (operands
[0]))
3256 else if (offsettable_memref_p (operands
[0]))
3258 else if (GET_CODE (XEXP (operands
[0], 0)) == POST_INC
)
3260 else if (GET_CODE (XEXP (operands
[0], 0)) == PRE_DEC
)
3262 else if (GET_CODE (operands
[0]) == MEM
)
3267 if (REG_P (operands
[1]))
3269 else if (CONSTANT_P (operands
[1]))
3271 else if (offsettable_memref_p (operands
[1]))
3273 else if (GET_CODE (XEXP (operands
[1], 0)) == POST_INC
)
3275 else if (GET_CODE (XEXP (operands
[1], 0)) == PRE_DEC
)
3277 else if (GET_CODE (operands
[1]) == MEM
)
3282 /* Check for the cases that the operand constraints are not supposed
3283 to allow to happen. Generating code for these cases is
3285 gcc_assert (optype0
!= RNDOP
&& optype1
!= RNDOP
);
3287 /* If one operand is decrementing and one is incrementing
3288 decrement the former register explicitly
3289 and change that operand into ordinary indexing. */
3291 if (optype0
== PUSHOP
&& optype1
== POPOP
)
3293 operands
[0] = XEXP (XEXP (operands
[0], 0), 0);
3295 handle_reg_adjust (operands
[0], -size
);
3297 if (GET_MODE (operands
[1]) == XFmode
)
3298 operands
[0] = gen_rtx_MEM (XFmode
, operands
[0]);
3299 else if (GET_MODE (operands
[0]) == DFmode
)
3300 operands
[0] = gen_rtx_MEM (DFmode
, operands
[0]);
3302 operands
[0] = gen_rtx_MEM (DImode
, operands
[0]);
3305 if (optype0
== POPOP
&& optype1
== PUSHOP
)
3307 operands
[1] = XEXP (XEXP (operands
[1], 0), 0);
3309 handle_reg_adjust (operands
[1], -size
);
3311 if (GET_MODE (operands
[1]) == XFmode
)
3312 operands
[1] = gen_rtx_MEM (XFmode
, operands
[1]);
3313 else if (GET_MODE (operands
[1]) == DFmode
)
3314 operands
[1] = gen_rtx_MEM (DFmode
, operands
[1]);
3316 operands
[1] = gen_rtx_MEM (DImode
, operands
[1]);
3320 /* If an operand is an unoffsettable memory ref, find a register
3321 we can increment temporarily to make it refer to the second word. */
3323 if (optype0
== MEMOP
)
3324 addreg0
= find_addr_reg (XEXP (operands
[0], 0));
3326 if (optype1
== MEMOP
)
3327 addreg1
= find_addr_reg (XEXP (operands
[1], 0));
3329 /* Ok, we can do one word at a time.
3330 Normally we do the low-numbered word first,
3331 but if either operand is autodecrementing then we
3332 do the high-numbered word first.
3334 In either case, set up in LATEHALF the operands to use
3335 for the high-numbered word and in some cases alter the
3336 operands in OPERANDS to be suitable for the low-numbered word. */
3340 if (optype0
== REGOP
)
3342 latehalf
[0] = gen_rtx_REG (SImode
, REGNO (operands
[0]) + 2);
3343 middlehalf
[0] = gen_rtx_REG (SImode
, REGNO (operands
[0]) + 1);
3345 else if (optype0
== OFFSOP
)
3347 middlehalf
[0] = adjust_address (operands
[0], SImode
, 4);
3348 latehalf
[0] = adjust_address (operands
[0], SImode
, size
- 4);
3352 middlehalf
[0] = adjust_address (operands
[0], SImode
, 0);
3353 latehalf
[0] = adjust_address (operands
[0], SImode
, 0);
3356 if (optype1
== REGOP
)
3358 latehalf
[1] = gen_rtx_REG (SImode
, REGNO (operands
[1]) + 2);
3359 middlehalf
[1] = gen_rtx_REG (SImode
, REGNO (operands
[1]) + 1);
3361 else if (optype1
== OFFSOP
)
3363 middlehalf
[1] = adjust_address (operands
[1], SImode
, 4);
3364 latehalf
[1] = adjust_address (operands
[1], SImode
, size
- 4);
3366 else if (optype1
== CNSTOP
)
3368 if (GET_CODE (operands
[1]) == CONST_DOUBLE
)
3373 REAL_VALUE_FROM_CONST_DOUBLE (r
, operands
[1]);
3374 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r
, l
);
3375 operands
[1] = GEN_INT (l
[0]);
3376 middlehalf
[1] = GEN_INT (l
[1]);
3377 latehalf
[1] = GEN_INT (l
[2]);
3381 /* No non-CONST_DOUBLE constant should ever appear
3383 gcc_assert (!CONSTANT_P (operands
[1]));
3388 middlehalf
[1] = adjust_address (operands
[1], SImode
, 0);
3389 latehalf
[1] = adjust_address (operands
[1], SImode
, 0);
3393 /* size is not 12: */
3395 if (optype0
== REGOP
)
3396 latehalf
[0] = gen_rtx_REG (SImode
, REGNO (operands
[0]) + 1);
3397 else if (optype0
== OFFSOP
)
3398 latehalf
[0] = adjust_address (operands
[0], SImode
, size
- 4);
3400 latehalf
[0] = adjust_address (operands
[0], SImode
, 0);
3402 if (optype1
== REGOP
)
3403 latehalf
[1] = gen_rtx_REG (SImode
, REGNO (operands
[1]) + 1);
3404 else if (optype1
== OFFSOP
)
3405 latehalf
[1] = adjust_address (operands
[1], SImode
, size
- 4);
3406 else if (optype1
== CNSTOP
)
3407 split_double (operands
[1], &operands
[1], &latehalf
[1]);
3409 latehalf
[1] = adjust_address (operands
[1], SImode
, 0);
3412 /* If insn is effectively movd N(sp),-(sp) then we will do the
3413 high word first. We should use the adjusted operand 1 (which is N+4(sp))
3414 for the low word as well, to compensate for the first decrement of sp. */
3415 if (optype0
== PUSHOP
3416 && REGNO (XEXP (XEXP (operands
[0], 0), 0)) == STACK_POINTER_REGNUM
3417 && reg_overlap_mentioned_p (stack_pointer_rtx
, operands
[1]))
3418 operands
[1] = middlehalf
[1] = latehalf
[1];
3420 /* For (set (reg:DI N) (mem:DI ... (reg:SI N) ...)),
3421 if the upper part of reg N does not appear in the MEM, arrange to
3422 emit the move late-half first. Otherwise, compute the MEM address
3423 into the upper part of N and use that as a pointer to the memory
3425 if (optype0
== REGOP
3426 && (optype1
== OFFSOP
|| optype1
== MEMOP
))
3428 rtx testlow
= gen_rtx_REG (SImode
, REGNO (operands
[0]));
3430 if (reg_overlap_mentioned_p (testlow
, XEXP (operands
[1], 0))
3431 && reg_overlap_mentioned_p (latehalf
[0], XEXP (operands
[1], 0)))
3433 /* If both halves of dest are used in the src memory address,
3434 compute the address into latehalf of dest.
3435 Note that this can't happen if the dest is two data regs. */
3437 xops
[0] = latehalf
[0];
3438 xops
[1] = XEXP (operands
[1], 0);
3440 handle_compadr (xops
);
3441 if (GET_MODE (operands
[1]) == XFmode
)
3443 operands
[1] = gen_rtx_MEM (XFmode
, latehalf
[0]);
3444 middlehalf
[1] = adjust_address (operands
[1], DImode
, size
- 8);
3445 latehalf
[1] = adjust_address (operands
[1], DImode
, size
- 4);
3449 operands
[1] = gen_rtx_MEM (DImode
, latehalf
[0]);
3450 latehalf
[1] = adjust_address (operands
[1], DImode
, size
- 4);
3454 && reg_overlap_mentioned_p (middlehalf
[0],
3455 XEXP (operands
[1], 0)))
3457 /* Check for two regs used by both source and dest.
3458 Note that this can't happen if the dest is all data regs.
3459 It can happen if the dest is d6, d7, a0.
3460 But in that case, latehalf is an addr reg, so
3461 the code at compadr does ok. */
3463 if (reg_overlap_mentioned_p (testlow
, XEXP (operands
[1], 0))
3464 || reg_overlap_mentioned_p (latehalf
[0], XEXP (operands
[1], 0)))
3467 /* JRV says this can't happen: */
3468 gcc_assert (!addreg0
&& !addreg1
);
3470 /* Only the middle reg conflicts; simply put it last. */
3471 handle_movsi (operands
);
3472 handle_movsi (latehalf
);
3473 handle_movsi (middlehalf
);
3477 else if (reg_overlap_mentioned_p (testlow
, XEXP (operands
[1], 0)))
3478 /* If the low half of dest is mentioned in the source memory
3479 address, the arrange to emit the move late half first. */
3480 dest_overlapped_low
= 1;
3483 /* If one or both operands autodecrementing,
3484 do the two words, high-numbered first. */
3486 /* Likewise, the first move would clobber the source of the second one,
3487 do them in the other order. This happens only for registers;
3488 such overlap can't happen in memory unless the user explicitly
3489 sets it up, and that is an undefined circumstance. */
3491 if (optype0
== PUSHOP
|| optype1
== PUSHOP
3492 || (optype0
== REGOP
&& optype1
== REGOP
3493 && ((middlehalf
[1] && REGNO (operands
[0]) == REGNO (middlehalf
[1]))
3494 || REGNO (operands
[0]) == REGNO (latehalf
[1])))
3495 || dest_overlapped_low
)
3497 /* Make any unoffsettable addresses point at high-numbered word. */
3499 handle_reg_adjust (addreg0
, size
- 4);
3501 handle_reg_adjust (addreg1
, size
- 4);
3504 handle_movsi (latehalf
);
3506 /* Undo the adds we just did. */
3508 handle_reg_adjust (addreg0
, -4);
3510 handle_reg_adjust (addreg1
, -4);
3514 handle_movsi (middlehalf
);
3517 handle_reg_adjust (addreg0
, -4);
3519 handle_reg_adjust (addreg1
, -4);
3522 /* Do low-numbered word. */
3524 handle_movsi (operands
);
3528 /* Normal case: do the two words, low-numbered first. */
3530 m68k_final_prescan_insn (NULL
, operands
, 2);
3531 handle_movsi (operands
);
3533 /* Do the middle one of the three words for long double */
3537 handle_reg_adjust (addreg0
, 4);
3539 handle_reg_adjust (addreg1
, 4);
3541 m68k_final_prescan_insn (NULL
, middlehalf
, 2);
3542 handle_movsi (middlehalf
);
3545 /* Make any unoffsettable addresses point at high-numbered word. */
3547 handle_reg_adjust (addreg0
, 4);
3549 handle_reg_adjust (addreg1
, 4);
3552 m68k_final_prescan_insn (NULL
, latehalf
, 2);
3553 handle_movsi (latehalf
);
3555 /* Undo the adds we just did. */
3557 handle_reg_adjust (addreg0
, -(size
- 4));
3559 handle_reg_adjust (addreg1
, -(size
- 4));
3564 /* Output assembler code to adjust REG by N. */
3566 output_reg_adjust (rtx reg
, int n
)
3570 gcc_assert (GET_MODE (reg
) == SImode
3571 && -12 <= n
&& n
!= 0 && n
<= 12);
3576 s
= "add%.l #12,%0";
3580 s
= "addq%.l #8,%0";
3584 s
= "addq%.l #4,%0";
3588 s
= "sub%.l #12,%0";
3592 s
= "subq%.l #8,%0";
3596 s
= "subq%.l #4,%0";
3604 output_asm_insn (s
, ®
);
3607 /* Emit rtl code to adjust REG by N. */
3609 emit_reg_adjust (rtx reg1
, int n
)
3613 gcc_assert (GET_MODE (reg1
) == SImode
3614 && -12 <= n
&& n
!= 0 && n
<= 12);
3616 reg1
= copy_rtx (reg1
);
3617 reg2
= copy_rtx (reg1
);
3620 emit_insn (gen_subsi3 (reg1
, reg2
, GEN_INT (-n
)));
3622 emit_insn (gen_addsi3 (reg1
, reg2
, GEN_INT (n
)));
3627 /* Output assembler to load address OPERANDS[0] to register OPERANDS[1]. */
3629 output_compadr (rtx operands
[2])
3631 output_asm_insn ("lea %a1,%0", operands
);
3634 /* Output the best assembler insn for moving operands[1] into operands[0]
3637 output_movsi (rtx operands
[2])
3639 output_asm_insn (singlemove_string (operands
), operands
);
3642 /* Copy OP and change its mode to MODE. */
3644 copy_operand (rtx op
, enum machine_mode mode
)
3646 /* ??? This looks really ugly. There must be a better way
3647 to change a mode on the operand. */
3648 if (GET_MODE (op
) != VOIDmode
)
3651 op
= gen_rtx_REG (mode
, REGNO (op
));
3655 PUT_MODE (op
, mode
);
3662 /* Emit rtl code for moving operands[1] into operands[0] as a fullword. */
3664 emit_movsi (rtx operands
[2])
3666 operands
[0] = copy_operand (operands
[0], SImode
);
3667 operands
[1] = copy_operand (operands
[1], SImode
);
3669 emit_insn (gen_movsi (operands
[0], operands
[1]));
3672 /* Output assembler code to perform a doubleword move insn
3673 with operands OPERANDS. */
3675 output_move_double (rtx
*operands
)
3677 handle_move_double (operands
,
3678 output_reg_adjust
, output_compadr
, output_movsi
);
3683 /* Output rtl code to perform a doubleword move insn
3684 with operands OPERANDS. */
3686 m68k_emit_move_double (rtx operands
[2])
3688 handle_move_double (operands
, emit_reg_adjust
, emit_movsi
, emit_movsi
);
3691 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
3692 new rtx with the correct mode. */
3695 force_mode (enum machine_mode mode
, rtx orig
)
3697 if (mode
== GET_MODE (orig
))
3700 if (REGNO (orig
) >= FIRST_PSEUDO_REGISTER
)
3703 return gen_rtx_REG (mode
, REGNO (orig
));
3707 fp_reg_operand (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
3709 return reg_renumber
&& FP_REG_P (op
);
3712 /* Emit insns to move operands[1] into operands[0].
3714 Return 1 if we have written out everything that needs to be done to
3715 do the move. Otherwise, return 0 and the caller will emit the move
3718 Note SCRATCH_REG may not be in the proper mode depending on how it
3719 will be used. This routine is responsible for creating a new copy
3720 of SCRATCH_REG in the proper mode. */
3723 emit_move_sequence (rtx
*operands
, enum machine_mode mode
, rtx scratch_reg
)
3725 register rtx operand0
= operands
[0];
3726 register rtx operand1
= operands
[1];
3730 && reload_in_progress
&& GET_CODE (operand0
) == REG
3731 && REGNO (operand0
) >= FIRST_PSEUDO_REGISTER
)
3732 operand0
= reg_equiv_mem
[REGNO (operand0
)];
3733 else if (scratch_reg
3734 && reload_in_progress
&& GET_CODE (operand0
) == SUBREG
3735 && GET_CODE (SUBREG_REG (operand0
)) == REG
3736 && REGNO (SUBREG_REG (operand0
)) >= FIRST_PSEUDO_REGISTER
)
3738 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3739 the code which tracks sets/uses for delete_output_reload. */
3740 rtx temp
= gen_rtx_SUBREG (GET_MODE (operand0
),
3741 reg_equiv_mem
[REGNO (SUBREG_REG (operand0
))],
3742 SUBREG_BYTE (operand0
));
3743 operand0
= alter_subreg (&temp
);
3747 && reload_in_progress
&& GET_CODE (operand1
) == REG
3748 && REGNO (operand1
) >= FIRST_PSEUDO_REGISTER
)
3749 operand1
= reg_equiv_mem
[REGNO (operand1
)];
3750 else if (scratch_reg
3751 && reload_in_progress
&& GET_CODE (operand1
) == SUBREG
3752 && GET_CODE (SUBREG_REG (operand1
)) == REG
3753 && REGNO (SUBREG_REG (operand1
)) >= FIRST_PSEUDO_REGISTER
)
3755 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3756 the code which tracks sets/uses for delete_output_reload. */
3757 rtx temp
= gen_rtx_SUBREG (GET_MODE (operand1
),
3758 reg_equiv_mem
[REGNO (SUBREG_REG (operand1
))],
3759 SUBREG_BYTE (operand1
));
3760 operand1
= alter_subreg (&temp
);
3763 if (scratch_reg
&& reload_in_progress
&& GET_CODE (operand0
) == MEM
3764 && ((tem
= find_replacement (&XEXP (operand0
, 0)))
3765 != XEXP (operand0
, 0)))
3766 operand0
= gen_rtx_MEM (GET_MODE (operand0
), tem
);
3767 if (scratch_reg
&& reload_in_progress
&& GET_CODE (operand1
) == MEM
3768 && ((tem
= find_replacement (&XEXP (operand1
, 0)))
3769 != XEXP (operand1
, 0)))
3770 operand1
= gen_rtx_MEM (GET_MODE (operand1
), tem
);
3772 /* Handle secondary reloads for loads/stores of FP registers where
3773 the address is symbolic by using the scratch register */
3774 if (fp_reg_operand (operand0
, mode
)
3775 && ((GET_CODE (operand1
) == MEM
3776 && ! memory_address_p (DFmode
, XEXP (operand1
, 0)))
3777 || ((GET_CODE (operand1
) == SUBREG
3778 && GET_CODE (XEXP (operand1
, 0)) == MEM
3779 && !memory_address_p (DFmode
, XEXP (XEXP (operand1
, 0), 0)))))
3782 if (GET_CODE (operand1
) == SUBREG
)
3783 operand1
= XEXP (operand1
, 0);
3785 /* SCRATCH_REG will hold an address. We want
3786 it in SImode regardless of what mode it was originally given
3788 scratch_reg
= force_mode (SImode
, scratch_reg
);
3790 /* D might not fit in 14 bits either; for such cases load D into
3792 if (!memory_address_p (Pmode
, XEXP (operand1
, 0)))
3794 emit_move_insn (scratch_reg
, XEXP (XEXP (operand1
, 0), 1));
3795 emit_move_insn (scratch_reg
, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1
, 0)),
3797 XEXP (XEXP (operand1
, 0), 0),
3801 emit_move_insn (scratch_reg
, XEXP (operand1
, 0));
3802 emit_insn (gen_rtx_SET (VOIDmode
, operand0
,
3803 gen_rtx_MEM (mode
, scratch_reg
)));
3806 else if (fp_reg_operand (operand1
, mode
)
3807 && ((GET_CODE (operand0
) == MEM
3808 && ! memory_address_p (DFmode
, XEXP (operand0
, 0)))
3809 || ((GET_CODE (operand0
) == SUBREG
)
3810 && GET_CODE (XEXP (operand0
, 0)) == MEM
3811 && !memory_address_p (DFmode
, XEXP (XEXP (operand0
, 0), 0))))
3814 if (GET_CODE (operand0
) == SUBREG
)
3815 operand0
= XEXP (operand0
, 0);
3817 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3818 it in SIMODE regardless of what mode it was originally given
3820 scratch_reg
= force_mode (SImode
, scratch_reg
);
3822 /* D might not fit in 14 bits either; for such cases load D into
3824 if (!memory_address_p (Pmode
, XEXP (operand0
, 0)))
3826 emit_move_insn (scratch_reg
, XEXP (XEXP (operand0
, 0), 1));
3827 emit_move_insn (scratch_reg
, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0
,
3830 XEXP (XEXP (operand0
, 0),
3835 emit_move_insn (scratch_reg
, XEXP (operand0
, 0));
3836 emit_insn (gen_rtx_SET (VOIDmode
, gen_rtx_MEM (mode
, scratch_reg
),
3840 /* Handle secondary reloads for loads of FP registers from constant
3841 expressions by forcing the constant into memory.
3843 use scratch_reg to hold the address of the memory location.
3845 The proper fix is to change PREFERRED_RELOAD_CLASS to return
3846 NO_REGS when presented with a const_int and an register class
3847 containing only FP registers. Doing so unfortunately creates
3848 more problems than it solves. Fix this for 2.5. */
3849 else if (fp_reg_operand (operand0
, mode
)
3850 && CONSTANT_P (operand1
)
3855 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3856 it in SIMODE regardless of what mode it was originally given
3858 scratch_reg
= force_mode (SImode
, scratch_reg
);
3860 /* Force the constant into memory and put the address of the
3861 memory location into scratch_reg. */
3862 xoperands
[0] = scratch_reg
;
3863 xoperands
[1] = XEXP (force_const_mem (mode
, operand1
), 0);
3864 emit_insn (gen_rtx_SET (mode
, scratch_reg
, xoperands
[1]));
3866 /* Now load the destination register. */
3867 emit_insn (gen_rtx_SET (mode
, operand0
,
3868 gen_rtx_MEM (mode
, scratch_reg
)));
3872 /* Now have insn-emit do whatever it normally does. */
3876 /* Split one or more DImode RTL references into pairs of SImode
3877 references. The RTL can be REG, offsettable MEM, integer constant, or
3878 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
3879 split and "num" is its length. lo_half and hi_half are output arrays
3880 that parallel "operands". */
3883 split_di (rtx operands
[], int num
, rtx lo_half
[], rtx hi_half
[])
3887 rtx op
= operands
[num
];
3889 /* simplify_subreg refuses to split volatile memory addresses,
3890 but we still have to handle it. */
3891 if (GET_CODE (op
) == MEM
)
3893 lo_half
[num
] = adjust_address (op
, SImode
, 4);
3894 hi_half
[num
] = adjust_address (op
, SImode
, 0);
3898 lo_half
[num
] = simplify_gen_subreg (SImode
, op
,
3899 GET_MODE (op
) == VOIDmode
3900 ? DImode
: GET_MODE (op
), 4);
3901 hi_half
[num
] = simplify_gen_subreg (SImode
, op
,
3902 GET_MODE (op
) == VOIDmode
3903 ? DImode
: GET_MODE (op
), 0);
3908 /* Split X into a base and a constant offset, storing them in *BASE
3909 and *OFFSET respectively. */
3912 m68k_split_offset (rtx x
, rtx
*base
, HOST_WIDE_INT
*offset
)
3915 if (GET_CODE (x
) == PLUS
&& GET_CODE (XEXP (x
, 1)) == CONST_INT
)
3917 *offset
+= INTVAL (XEXP (x
, 1));
3923 /* Return true if PATTERN is a PARALLEL suitable for a movem or fmovem
3924 instruction. STORE_P says whether the move is a load or store.
3926 If the instruction uses post-increment or pre-decrement addressing,
3927 AUTOMOD_BASE is the base register and AUTOMOD_OFFSET is the total
3928 adjustment. This adjustment will be made by the first element of
3929 PARALLEL, with the loads or stores starting at element 1. If the
3930 instruction does not use post-increment or pre-decrement addressing,
3931 AUTOMOD_BASE is null, AUTOMOD_OFFSET is 0, and the loads or stores
3932 start at element 0. */
3935 m68k_movem_pattern_p (rtx pattern
, rtx automod_base
,
3936 HOST_WIDE_INT automod_offset
, bool store_p
)
3938 rtx base
, mem_base
, set
, mem
, reg
, last_reg
;
3939 HOST_WIDE_INT offset
, mem_offset
;
3941 enum reg_class rclass
;
3943 len
= XVECLEN (pattern
, 0);
3944 first
= (automod_base
!= NULL
);
3948 /* Stores must be pre-decrement and loads must be post-increment. */
3949 if (store_p
!= (automod_offset
< 0))
3952 /* Work out the base and offset for lowest memory location. */
3953 base
= automod_base
;
3954 offset
= (automod_offset
< 0 ? automod_offset
: 0);
3958 /* Allow any valid base and offset in the first access. */
3965 for (i
= first
; i
< len
; i
++)
3967 /* We need a plain SET. */
3968 set
= XVECEXP (pattern
, 0, i
);
3969 if (GET_CODE (set
) != SET
)
3972 /* Check that we have a memory location... */
3973 mem
= XEXP (set
, !store_p
);
3974 if (!MEM_P (mem
) || !memory_operand (mem
, VOIDmode
))
3977 /* ...with the right address. */
3980 m68k_split_offset (XEXP (mem
, 0), &base
, &offset
);
3981 /* The ColdFire instruction only allows (An) and (d16,An) modes.
3982 There are no mode restrictions for 680x0 besides the
3983 automodification rules enforced above. */
3985 && !m68k_legitimate_base_reg_p (base
, reload_completed
))
3990 m68k_split_offset (XEXP (mem
, 0), &mem_base
, &mem_offset
);
3991 if (!rtx_equal_p (base
, mem_base
) || offset
!= mem_offset
)
3995 /* Check that we have a register of the required mode and class. */
3996 reg
= XEXP (set
, store_p
);
3998 || !HARD_REGISTER_P (reg
)
3999 || GET_MODE (reg
) != reg_raw_mode
[REGNO (reg
)])
4004 /* The register must belong to RCLASS and have a higher number
4005 than the register in the previous SET. */
4006 if (!TEST_HARD_REG_BIT (reg_class_contents
[rclass
], REGNO (reg
))
4007 || REGNO (last_reg
) >= REGNO (reg
))
4012 /* Work out which register class we need. */
4013 if (INT_REGNO_P (REGNO (reg
)))
4014 rclass
= GENERAL_REGS
;
4015 else if (FP_REGNO_P (REGNO (reg
)))
4022 offset
+= GET_MODE_SIZE (GET_MODE (reg
));
4025 /* If we have an automodification, check whether the final offset is OK. */
4026 if (automod_base
&& offset
!= (automod_offset
< 0 ? 0 : automod_offset
))
4029 /* Reject unprofitable cases. */
4030 if (len
< first
+ (rclass
== FP_REGS
? MIN_FMOVEM_REGS
: MIN_MOVEM_REGS
))
4036 /* Return the assembly code template for a movem or fmovem instruction
4037 whose pattern is given by PATTERN. Store the template's operands
4040 If the instruction uses post-increment or pre-decrement addressing,
4041 AUTOMOD_OFFSET is the total adjustment, otherwise it is 0. STORE_P
4042 is true if this is a store instruction. */
4045 m68k_output_movem (rtx
*operands
, rtx pattern
,
4046 HOST_WIDE_INT automod_offset
, bool store_p
)
4051 gcc_assert (GET_CODE (pattern
) == PARALLEL
);
4053 first
= (automod_offset
!= 0);
4054 for (i
= first
; i
< XVECLEN (pattern
, 0); i
++)
4056 /* When using movem with pre-decrement addressing, register X + D0_REG
4057 is controlled by bit 15 - X. For all other addressing modes,
4058 register X + D0_REG is controlled by bit X. Confusingly, the
4059 register mask for fmovem is in the opposite order to that for
4063 gcc_assert (MEM_P (XEXP (XVECEXP (pattern
, 0, i
), !store_p
)));
4064 gcc_assert (REG_P (XEXP (XVECEXP (pattern
, 0, i
), store_p
)));
4065 regno
= REGNO (XEXP (XVECEXP (pattern
, 0, i
), store_p
));
4066 if (automod_offset
< 0)
4068 if (FP_REGNO_P (regno
))
4069 mask
|= 1 << (regno
- FP0_REG
);
4071 mask
|= 1 << (15 - (regno
- D0_REG
));
4075 if (FP_REGNO_P (regno
))
4076 mask
|= 1 << (7 - (regno
- FP0_REG
));
4078 mask
|= 1 << (regno
- D0_REG
);
4083 if (automod_offset
== 0)
4084 operands
[0] = XEXP (XEXP (XVECEXP (pattern
, 0, first
), !store_p
), 0);
4085 else if (automod_offset
< 0)
4086 operands
[0] = gen_rtx_PRE_DEC (Pmode
, SET_DEST (XVECEXP (pattern
, 0, 0)));
4088 operands
[0] = gen_rtx_POST_INC (Pmode
, SET_DEST (XVECEXP (pattern
, 0, 0)));
4089 operands
[1] = GEN_INT (mask
);
4090 if (FP_REGNO_P (REGNO (XEXP (XVECEXP (pattern
, 0, first
), store_p
))))
4093 return "fmovem %1,%a0";
4095 return "fmovem %a0,%1";
4100 return "movem%.l %1,%a0";
4102 return "movem%.l %a0,%1";
4106 /* Return a REG that occurs in ADDR with coefficient 1.
4107 ADDR can be effectively incremented by incrementing REG. */
4110 find_addr_reg (rtx addr
)
4112 while (GET_CODE (addr
) == PLUS
)
4114 if (GET_CODE (XEXP (addr
, 0)) == REG
)
4115 addr
= XEXP (addr
, 0);
4116 else if (GET_CODE (XEXP (addr
, 1)) == REG
)
4117 addr
= XEXP (addr
, 1);
4118 else if (CONSTANT_P (XEXP (addr
, 0)))
4119 addr
= XEXP (addr
, 1);
4120 else if (CONSTANT_P (XEXP (addr
, 1)))
4121 addr
= XEXP (addr
, 0);
4125 gcc_assert (GET_CODE (addr
) == REG
);
4129 /* Output assembler code to perform a 32-bit 3-operand add. */
4132 output_addsi3 (rtx
*operands
)
4134 if (! operands_match_p (operands
[0], operands
[1]))
4136 if (!ADDRESS_REG_P (operands
[1]))
4138 rtx tmp
= operands
[1];
4140 operands
[1] = operands
[2];
4144 /* These insns can result from reloads to access
4145 stack slots over 64k from the frame pointer. */
4146 if (GET_CODE (operands
[2]) == CONST_INT
4147 && (INTVAL (operands
[2]) < -32768 || INTVAL (operands
[2]) > 32767))
4148 return "move%.l %2,%0\n\tadd%.l %1,%0";
4149 if (GET_CODE (operands
[2]) == REG
)
4150 return MOTOROLA
? "lea (%1,%2.l),%0" : "lea %1@(0,%2:l),%0";
4151 return MOTOROLA
? "lea (%c2,%1),%0" : "lea %1@(%c2),%0";
4153 if (GET_CODE (operands
[2]) == CONST_INT
)
4155 if (INTVAL (operands
[2]) > 0
4156 && INTVAL (operands
[2]) <= 8)
4157 return "addq%.l %2,%0";
4158 if (INTVAL (operands
[2]) < 0
4159 && INTVAL (operands
[2]) >= -8)
4161 operands
[2] = GEN_INT (- INTVAL (operands
[2]));
4162 return "subq%.l %2,%0";
4164 /* On the CPU32 it is faster to use two addql instructions to
4165 add a small integer (8 < N <= 16) to a register.
4166 Likewise for subql. */
4167 if (TUNE_CPU32
&& REG_P (operands
[0]))
4169 if (INTVAL (operands
[2]) > 8
4170 && INTVAL (operands
[2]) <= 16)
4172 operands
[2] = GEN_INT (INTVAL (operands
[2]) - 8);
4173 return "addq%.l #8,%0\n\taddq%.l %2,%0";
4175 if (INTVAL (operands
[2]) < -8
4176 && INTVAL (operands
[2]) >= -16)
4178 operands
[2] = GEN_INT (- INTVAL (operands
[2]) - 8);
4179 return "subq%.l #8,%0\n\tsubq%.l %2,%0";
4182 if (ADDRESS_REG_P (operands
[0])
4183 && INTVAL (operands
[2]) >= -0x8000
4184 && INTVAL (operands
[2]) < 0x8000)
4187 return "add%.w %2,%0";
4189 return MOTOROLA
? "lea (%c2,%0),%0" : "lea %0@(%c2),%0";
4192 return "add%.l %2,%0";
4195 /* Store in cc_status the expressions that the condition codes will
4196 describe after execution of an instruction whose pattern is EXP.
4197 Do not alter them if the instruction would not alter the cc's. */
4199 /* On the 68000, all the insns to store in an address register fail to
4200 set the cc's. However, in some cases these instructions can make it
4201 possibly invalid to use the saved cc's. In those cases we clear out
4202 some or all of the saved cc's so they won't be used. */
4205 notice_update_cc (rtx exp
, rtx insn
)
4207 if (GET_CODE (exp
) == SET
)
4209 if (GET_CODE (SET_SRC (exp
)) == CALL
)
4211 else if (ADDRESS_REG_P (SET_DEST (exp
)))
4213 if (cc_status
.value1
&& modified_in_p (cc_status
.value1
, insn
))
4214 cc_status
.value1
= 0;
4215 if (cc_status
.value2
&& modified_in_p (cc_status
.value2
, insn
))
4216 cc_status
.value2
= 0;
4218 /* fmoves to memory or data registers do not set the condition
4219 codes. Normal moves _do_ set the condition codes, but not in
4220 a way that is appropriate for comparison with 0, because -0.0
4221 would be treated as a negative nonzero number. Note that it
4222 isn't appropriate to conditionalize this restriction on
4223 HONOR_SIGNED_ZEROS because that macro merely indicates whether
4224 we care about the difference between -0.0 and +0.0. */
4225 else if (!FP_REG_P (SET_DEST (exp
))
4226 && SET_DEST (exp
) != cc0_rtx
4227 && (FP_REG_P (SET_SRC (exp
))
4228 || GET_CODE (SET_SRC (exp
)) == FIX
4229 || FLOAT_MODE_P (GET_MODE (SET_DEST (exp
)))))
4231 /* A pair of move insns doesn't produce a useful overall cc. */
4232 else if (!FP_REG_P (SET_DEST (exp
))
4233 && !FP_REG_P (SET_SRC (exp
))
4234 && GET_MODE_SIZE (GET_MODE (SET_SRC (exp
))) > 4
4235 && (GET_CODE (SET_SRC (exp
)) == REG
4236 || GET_CODE (SET_SRC (exp
)) == MEM
4237 || GET_CODE (SET_SRC (exp
)) == CONST_DOUBLE
))
4239 else if (SET_DEST (exp
) != pc_rtx
)
4241 cc_status
.flags
= 0;
4242 cc_status
.value1
= SET_DEST (exp
);
4243 cc_status
.value2
= SET_SRC (exp
);
4246 else if (GET_CODE (exp
) == PARALLEL
4247 && GET_CODE (XVECEXP (exp
, 0, 0)) == SET
)
4249 rtx dest
= SET_DEST (XVECEXP (exp
, 0, 0));
4250 rtx src
= SET_SRC (XVECEXP (exp
, 0, 0));
4252 if (ADDRESS_REG_P (dest
))
4254 else if (dest
!= pc_rtx
)
4256 cc_status
.flags
= 0;
4257 cc_status
.value1
= dest
;
4258 cc_status
.value2
= src
;
4263 if (cc_status
.value2
!= 0
4264 && ADDRESS_REG_P (cc_status
.value2
)
4265 && GET_MODE (cc_status
.value2
) == QImode
)
4267 if (cc_status
.value2
!= 0)
4268 switch (GET_CODE (cc_status
.value2
))
4270 case ASHIFT
: case ASHIFTRT
: case LSHIFTRT
:
4271 case ROTATE
: case ROTATERT
:
4272 /* These instructions always clear the overflow bit, and set
4273 the carry to the bit shifted out. */
4274 cc_status
.flags
|= CC_OVERFLOW_UNUSABLE
| CC_NO_CARRY
;
4277 case PLUS
: case MINUS
: case MULT
:
4278 case DIV
: case UDIV
: case MOD
: case UMOD
: case NEG
:
4279 if (GET_MODE (cc_status
.value2
) != VOIDmode
)
4280 cc_status
.flags
|= CC_NO_OVERFLOW
;
4283 /* (SET r1 (ZERO_EXTEND r2)) on this machine
4284 ends with a move insn moving r2 in r2's mode.
4285 Thus, the cc's are set for r2.
4286 This can set N bit spuriously. */
4287 cc_status
.flags
|= CC_NOT_NEGATIVE
;
4292 if (cc_status
.value1
&& GET_CODE (cc_status
.value1
) == REG
4294 && reg_overlap_mentioned_p (cc_status
.value1
, cc_status
.value2
))
4295 cc_status
.value2
= 0;
4296 if (((cc_status
.value1
&& FP_REG_P (cc_status
.value1
))
4297 || (cc_status
.value2
&& FP_REG_P (cc_status
.value2
))))
4298 cc_status
.flags
= CC_IN_68881
;
4299 if (cc_status
.value2
&& GET_CODE (cc_status
.value2
) == COMPARE
4300 && GET_MODE_CLASS (GET_MODE (XEXP (cc_status
.value2
, 0))) == MODE_FLOAT
)
4302 cc_status
.flags
= CC_IN_68881
;
4303 if (!FP_REG_P (XEXP (cc_status
.value2
, 0)))
4304 cc_status
.flags
|= CC_REVERSED
;
4309 output_move_const_double (rtx
*operands
)
4311 int code
= standard_68881_constant_p (operands
[1]);
4315 static char buf
[40];
4317 sprintf (buf
, "fmovecr #0x%x,%%0", code
& 0xff);
4320 return "fmove%.d %1,%0";
4324 output_move_const_single (rtx
*operands
)
4326 int code
= standard_68881_constant_p (operands
[1]);
4330 static char buf
[40];
4332 sprintf (buf
, "fmovecr #0x%x,%%0", code
& 0xff);
4335 return "fmove%.s %f1,%0";
4338 /* Return nonzero if X, a CONST_DOUBLE, has a value that we can get
4339 from the "fmovecr" instruction.
4340 The value, anded with 0xff, gives the code to use in fmovecr
4341 to get the desired constant. */
4343 /* This code has been fixed for cross-compilation. */
4345 static int inited_68881_table
= 0;
4347 static const char *const strings_68881
[7] = {
4357 static const int codes_68881
[7] = {
4367 REAL_VALUE_TYPE values_68881
[7];
4369 /* Set up values_68881 array by converting the decimal values
4370 strings_68881 to binary. */
4373 init_68881_table (void)
4377 enum machine_mode mode
;
4380 for (i
= 0; i
< 7; i
++)
4384 r
= REAL_VALUE_ATOF (strings_68881
[i
], mode
);
4385 values_68881
[i
] = r
;
4387 inited_68881_table
= 1;
4391 standard_68881_constant_p (rtx x
)
4396 /* fmovecr must be emulated on the 68040 and 68060, so it shouldn't be
4397 used at all on those chips. */
4401 if (! inited_68881_table
)
4402 init_68881_table ();
4404 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
4406 /* Use REAL_VALUES_IDENTICAL instead of REAL_VALUES_EQUAL so that -0.0
4408 for (i
= 0; i
< 6; i
++)
4410 if (REAL_VALUES_IDENTICAL (r
, values_68881
[i
]))
4411 return (codes_68881
[i
]);
4414 if (GET_MODE (x
) == SFmode
)
4417 if (REAL_VALUES_EQUAL (r
, values_68881
[6]))
4418 return (codes_68881
[6]);
4420 /* larger powers of ten in the constants ram are not used
4421 because they are not equal to a `double' C constant. */
4425 /* If X is a floating-point constant, return the logarithm of X base 2,
4426 or 0 if X is not a power of 2. */
4429 floating_exact_log2 (rtx x
)
4431 REAL_VALUE_TYPE r
, r1
;
4434 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
4436 if (REAL_VALUES_LESS (r
, dconst1
))
4439 exp
= real_exponent (&r
);
4440 real_2expN (&r1
, exp
, DFmode
);
4441 if (REAL_VALUES_EQUAL (r1
, r
))
4447 /* A C compound statement to output to stdio stream STREAM the
4448 assembler syntax for an instruction operand X. X is an RTL
4451 CODE is a value that can be used to specify one of several ways
4452 of printing the operand. It is used when identical operands
4453 must be printed differently depending on the context. CODE
4454 comes from the `%' specification that was used to request
4455 printing of the operand. If the specification was just `%DIGIT'
4456 then CODE is 0; if the specification was `%LTR DIGIT' then CODE
4457 is the ASCII code for LTR.
4459 If X is a register, this macro should print the register's name.
4460 The names can be found in an array `reg_names' whose type is
4461 `char *[]'. `reg_names' is initialized from `REGISTER_NAMES'.
4463 When the machine description has a specification `%PUNCT' (a `%'
4464 followed by a punctuation character), this macro is called with
4465 a null pointer for X and the punctuation character for CODE.
4467 The m68k specific codes are:
4469 '.' for dot needed in Motorola-style opcode names.
4470 '-' for an operand pushing on the stack:
4471 sp@-, -(sp) or -(%sp) depending on the style of syntax.
4472 '+' for an operand pushing on the stack:
4473 sp@+, (sp)+ or (%sp)+ depending on the style of syntax.
4474 '@' for a reference to the top word on the stack:
4475 sp@, (sp) or (%sp) depending on the style of syntax.
4476 '#' for an immediate operand prefix (# in MIT and Motorola syntax
4477 but & in SGS syntax).
4478 '!' for the cc register (used in an `and to cc' insn).
4479 '$' for the letter `s' in an op code, but only on the 68040.
4480 '&' for the letter `d' in an op code, but only on the 68040.
4481 '/' for register prefix needed by longlong.h.
4482 '?' for m68k_library_id_string
4484 'b' for byte insn (no effect, on the Sun; this is for the ISI).
4485 'd' to force memory addressing to be absolute, not relative.
4486 'f' for float insn (print a CONST_DOUBLE as a float rather than in hex)
4487 'x' for float insn (print a CONST_DOUBLE as a float rather than in hex),
4488 or print pair of registers as rx:ry.
4489 'p' print an address with @PLTPC attached, but only if the operand
4490 is not locally-bound. */
4493 print_operand (FILE *file
, rtx op
, int letter
)
4498 fprintf (file
, ".");
4500 else if (letter
== '#')
4501 asm_fprintf (file
, "%I");
4502 else if (letter
== '-')
4503 asm_fprintf (file
, MOTOROLA
? "-(%Rsp)" : "%Rsp@-");
4504 else if (letter
== '+')
4505 asm_fprintf (file
, MOTOROLA
? "(%Rsp)+" : "%Rsp@+");
4506 else if (letter
== '@')
4507 asm_fprintf (file
, MOTOROLA
? "(%Rsp)" : "%Rsp@");
4508 else if (letter
== '!')
4509 asm_fprintf (file
, "%Rfpcr");
4510 else if (letter
== '$')
4513 fprintf (file
, "s");
4515 else if (letter
== '&')
4518 fprintf (file
, "d");
4520 else if (letter
== '/')
4521 asm_fprintf (file
, "%R");
4522 else if (letter
== '?')
4523 asm_fprintf (file
, m68k_library_id_string
);
4524 else if (letter
== 'p')
4526 output_addr_const (file
, op
);
4527 if (!(GET_CODE (op
) == SYMBOL_REF
&& SYMBOL_REF_LOCAL_P (op
)))
4528 fprintf (file
, "@PLTPC");
4530 else if (GET_CODE (op
) == REG
)
4533 /* Print out the second register name of a register pair.
4534 I.e., R (6) => 7. */
4535 fputs (M68K_REGNAME(REGNO (op
) + 1), file
);
4537 fputs (M68K_REGNAME(REGNO (op
)), file
);
4539 else if (GET_CODE (op
) == MEM
)
4541 output_address (XEXP (op
, 0));
4542 if (letter
== 'd' && ! TARGET_68020
4543 && CONSTANT_ADDRESS_P (XEXP (op
, 0))
4544 && !(GET_CODE (XEXP (op
, 0)) == CONST_INT
4545 && INTVAL (XEXP (op
, 0)) < 0x8000
4546 && INTVAL (XEXP (op
, 0)) >= -0x8000))
4547 fprintf (file
, MOTOROLA
? ".l" : ":l");
4549 else if (GET_CODE (op
) == CONST_DOUBLE
&& GET_MODE (op
) == SFmode
)
4553 REAL_VALUE_FROM_CONST_DOUBLE (r
, op
);
4554 REAL_VALUE_TO_TARGET_SINGLE (r
, l
);
4555 asm_fprintf (file
, "%I0x%lx", l
& 0xFFFFFFFF);
4557 else if (GET_CODE (op
) == CONST_DOUBLE
&& GET_MODE (op
) == XFmode
)
4561 REAL_VALUE_FROM_CONST_DOUBLE (r
, op
);
4562 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r
, l
);
4563 asm_fprintf (file
, "%I0x%lx%08lx%08lx", l
[0] & 0xFFFFFFFF,
4564 l
[1] & 0xFFFFFFFF, l
[2] & 0xFFFFFFFF);
4566 else if (GET_CODE (op
) == CONST_DOUBLE
&& GET_MODE (op
) == DFmode
)
4570 REAL_VALUE_FROM_CONST_DOUBLE (r
, op
);
4571 REAL_VALUE_TO_TARGET_DOUBLE (r
, l
);
4572 asm_fprintf (file
, "%I0x%lx%08lx", l
[0] & 0xFFFFFFFF, l
[1] & 0xFFFFFFFF);
4576 /* Use `print_operand_address' instead of `output_addr_const'
4577 to ensure that we print relevant PIC stuff. */
4578 asm_fprintf (file
, "%I");
4580 && (GET_CODE (op
) == SYMBOL_REF
|| GET_CODE (op
) == CONST
))
4581 print_operand_address (file
, op
);
4583 output_addr_const (file
, op
);
4587 /* Return string for TLS relocation RELOC. */
4590 m68k_get_reloc_decoration (enum m68k_reloc reloc
)
4592 /* To my knowledge, !MOTOROLA assemblers don't support TLS. */
4593 gcc_assert (MOTOROLA
|| reloc
== RELOC_GOT
);
4600 if (flag_pic
== 1 && TARGET_68020
)
4641 /* m68k implementation of OUTPUT_ADDR_CONST_EXTRA. */
4644 m68k_output_addr_const_extra (FILE *file
, rtx x
)
4646 if (GET_CODE (x
) == UNSPEC
)
4648 switch (XINT (x
, 1))
4650 case UNSPEC_RELOC16
:
4651 case UNSPEC_RELOC32
:
4652 output_addr_const (file
, XVECEXP (x
, 0, 0));
4653 fputs (m68k_get_reloc_decoration
4654 ((enum m68k_reloc
) INTVAL (XVECEXP (x
, 0, 1))), file
);
4665 /* M68K implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
4668 m68k_output_dwarf_dtprel (FILE *file
, int size
, rtx x
)
4670 gcc_assert (size
== 4);
4671 fputs ("\t.long\t", file
);
4672 output_addr_const (file
, x
);
4673 fputs ("@TLSLDO+0x8000", file
);
4676 /* In the name of slightly smaller debug output, and to cater to
4677 general assembler lossage, recognize various UNSPEC sequences
4678 and turn them back into a direct symbol reference. */
4681 m68k_delegitimize_address (rtx orig_x
)
4684 struct m68k_address addr
;
4687 orig_x
= delegitimize_mem_from_attrs (orig_x
);
4692 if (GET_CODE (x
) != PLUS
|| GET_MODE (x
) != Pmode
)
4695 if (!m68k_decompose_address (GET_MODE (x
), x
, false, &addr
)
4696 || addr
.offset
== NULL_RTX
4697 || GET_CODE (addr
.offset
) != CONST
)
4700 unspec
= XEXP (addr
.offset
, 0);
4701 if (GET_CODE (unspec
) == PLUS
&& CONST_INT_P (XEXP (unspec
, 1)))
4702 unspec
= XEXP (unspec
, 0);
4703 if (GET_CODE (unspec
) != UNSPEC
4704 || (XINT (unspec
, 1) != UNSPEC_RELOC16
4705 && XINT (unspec
, 1) != UNSPEC_RELOC32
))
4707 x
= XVECEXP (unspec
, 0, 0);
4708 gcc_assert (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == LABEL_REF
);
4709 if (unspec
!= XEXP (addr
.offset
, 0))
4710 x
= gen_rtx_PLUS (Pmode
, x
, XEXP (XEXP (addr
.offset
, 0), 1));
4713 rtx idx
= addr
.index
;
4714 if (addr
.scale
!= 1)
4715 idx
= gen_rtx_MULT (Pmode
, idx
, GEN_INT (addr
.scale
));
4716 x
= gen_rtx_PLUS (Pmode
, idx
, x
);
4719 x
= gen_rtx_PLUS (Pmode
, addr
.base
, x
);
4721 x
= replace_equiv_address_nv (orig_x
, x
);
4726 /* A C compound statement to output to stdio stream STREAM the
4727 assembler syntax for an instruction operand that is a memory
4728 reference whose address is ADDR. ADDR is an RTL expression.
4730 Note that this contains a kludge that knows that the only reason
4731 we have an address (plus (label_ref...) (reg...)) when not generating
4732 PIC code is in the insn before a tablejump, and we know that m68k.md
4733 generates a label LInnn: on such an insn.
4735 It is possible for PIC to generate a (plus (label_ref...) (reg...))
4736 and we handle that just like we would a (plus (symbol_ref...) (reg...)).
4738 This routine is responsible for distinguishing between -fpic and -fPIC
4739 style relocations in an address. When generating -fpic code the
4740 offset is output in word mode (e.g. movel a5@(_foo:w), a0). When generating
4741 -fPIC code the offset is output in long mode (e.g. movel a5@(_foo:l), a0) */
4744 print_operand_address (FILE *file
, rtx addr
)
4746 struct m68k_address address
;
4748 if (!m68k_decompose_address (QImode
, addr
, true, &address
))
4751 if (address
.code
== PRE_DEC
)
4752 fprintf (file
, MOTOROLA
? "-(%s)" : "%s@-",
4753 M68K_REGNAME (REGNO (address
.base
)));
4754 else if (address
.code
== POST_INC
)
4755 fprintf (file
, MOTOROLA
? "(%s)+" : "%s@+",
4756 M68K_REGNAME (REGNO (address
.base
)));
4757 else if (!address
.base
&& !address
.index
)
4759 /* A constant address. */
4760 gcc_assert (address
.offset
== addr
);
4761 if (GET_CODE (addr
) == CONST_INT
)
4763 /* (xxx).w or (xxx).l. */
4764 if (IN_RANGE (INTVAL (addr
), -0x8000, 0x7fff))
4765 fprintf (file
, MOTOROLA
? "%d.w" : "%d:w", (int) INTVAL (addr
));
4767 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (addr
));
4769 else if (TARGET_PCREL
)
4771 /* (d16,PC) or (bd,PC,Xn) (with suppressed index register). */
4773 output_addr_const (file
, addr
);
4774 asm_fprintf (file
, flag_pic
== 1 ? ":w,%Rpc)" : ":l,%Rpc)");
4778 /* (xxx).l. We need a special case for SYMBOL_REF if the symbol
4779 name ends in `.<letter>', as the last 2 characters can be
4780 mistaken as a size suffix. Put the name in parentheses. */
4781 if (GET_CODE (addr
) == SYMBOL_REF
4782 && strlen (XSTR (addr
, 0)) > 2
4783 && XSTR (addr
, 0)[strlen (XSTR (addr
, 0)) - 2] == '.')
4786 output_addr_const (file
, addr
);
4790 output_addr_const (file
, addr
);
4797 /* If ADDR is a (d8,pc,Xn) address, this is the number of the
4798 label being accessed, otherwise it is -1. */
4799 labelno
= (address
.offset
4801 && GET_CODE (address
.offset
) == LABEL_REF
4802 ? CODE_LABEL_NUMBER (XEXP (address
.offset
, 0))
4806 /* Print the "offset(base" component. */
4808 asm_fprintf (file
, "%LL%d(%Rpc,", labelno
);
4812 output_addr_const (file
, address
.offset
);
4816 fputs (M68K_REGNAME (REGNO (address
.base
)), file
);
4818 /* Print the ",index" component, if any. */
4823 fprintf (file
, "%s.%c",
4824 M68K_REGNAME (REGNO (address
.index
)),
4825 GET_MODE (address
.index
) == HImode
? 'w' : 'l');
4826 if (address
.scale
!= 1)
4827 fprintf (file
, "*%d", address
.scale
);
4831 else /* !MOTOROLA */
4833 if (!address
.offset
&& !address
.index
)
4834 fprintf (file
, "%s@", M68K_REGNAME (REGNO (address
.base
)));
4837 /* Print the "base@(offset" component. */
4839 asm_fprintf (file
, "%Rpc@(%LL%d", labelno
);
4843 fputs (M68K_REGNAME (REGNO (address
.base
)), file
);
4844 fprintf (file
, "@(");
4846 output_addr_const (file
, address
.offset
);
4848 /* Print the ",index" component, if any. */
4851 fprintf (file
, ",%s:%c",
4852 M68K_REGNAME (REGNO (address
.index
)),
4853 GET_MODE (address
.index
) == HImode
? 'w' : 'l');
4854 if (address
.scale
!= 1)
4855 fprintf (file
, ":%d", address
.scale
);
4863 /* Check for cases where a clr insns can be omitted from code using
4864 strict_low_part sets. For example, the second clrl here is not needed:
4865 clrl d0; movw a0@+,d0; use d0; clrl d0; movw a0@+; use d0; ...
4867 MODE is the mode of this STRICT_LOW_PART set. FIRST_INSN is the clear
4868 insn we are checking for redundancy. TARGET is the register set by the
4872 strict_low_part_peephole_ok (enum machine_mode mode
, rtx first_insn
,
4877 while ((p
= PREV_INSN (p
)))
4879 if (NOTE_INSN_BASIC_BLOCK_P (p
))
4885 /* If it isn't an insn, then give up. */
4889 if (reg_set_p (target
, p
))
4891 rtx set
= single_set (p
);
4894 /* If it isn't an easy to recognize insn, then give up. */
4898 dest
= SET_DEST (set
);
4900 /* If this sets the entire target register to zero, then our
4901 first_insn is redundant. */
4902 if (rtx_equal_p (dest
, target
)
4903 && SET_SRC (set
) == const0_rtx
)
4905 else if (GET_CODE (dest
) == STRICT_LOW_PART
4906 && GET_CODE (XEXP (dest
, 0)) == REG
4907 && REGNO (XEXP (dest
, 0)) == REGNO (target
)
4908 && (GET_MODE_SIZE (GET_MODE (XEXP (dest
, 0)))
4909 <= GET_MODE_SIZE (mode
)))
4910 /* This is a strict low part set which modifies less than
4911 we are using, so it is safe. */
4921 /* Operand predicates for implementing asymmetric pc-relative addressing
4922 on m68k. The m68k supports pc-relative addressing (mode 7, register 2)
4923 when used as a source operand, but not as a destination operand.
4925 We model this by restricting the meaning of the basic predicates
4926 (general_operand, memory_operand, etc) to forbid the use of this
4927 addressing mode, and then define the following predicates that permit
4928 this addressing mode. These predicates can then be used for the
4929 source operands of the appropriate instructions.
4931 n.b. While it is theoretically possible to change all machine patterns
4932 to use this addressing more where permitted by the architecture,
4933 it has only been implemented for "common" cases: SImode, HImode, and
4934 QImode operands, and only for the principle operations that would
4935 require this addressing mode: data movement and simple integer operations.
4937 In parallel with these new predicates, two new constraint letters
4938 were defined: 'S' and 'T'. 'S' is the -mpcrel analog of 'm'.
4939 'T' replaces 's' in the non-pcrel case. It is a no-op in the pcrel case.
4940 In the pcrel case 's' is only valid in combination with 'a' registers.
4941 See addsi3, subsi3, cmpsi, and movsi patterns for a better understanding
4942 of how these constraints are used.
4944 The use of these predicates is strictly optional, though patterns that
4945 don't will cause an extra reload register to be allocated where one
4948 lea (abc:w,%pc),%a0 ; need to reload address
4949 moveq &1,%d1 ; since write to pc-relative space
4950 movel %d1,%a0@ ; is not allowed
4952 lea (abc:w,%pc),%a1 ; no need to reload address here
4953 movel %a1@,%d0 ; since "movel (abc:w,%pc),%d0" is ok
4955 For more info, consult tiemann@cygnus.com.
4958 All of the ugliness with predicates and constraints is due to the
4959 simple fact that the m68k does not allow a pc-relative addressing
4960 mode as a destination. gcc does not distinguish between source and
4961 destination addresses. Hence, if we claim that pc-relative address
4962 modes are valid, e.g. TARGET_LEGITIMATE_ADDRESS_P accepts them, then we
4963 end up with invalid code. To get around this problem, we left
4964 pc-relative modes as invalid addresses, and then added special
4965 predicates and constraints to accept them.
4967 A cleaner way to handle this is to modify gcc to distinguish
4968 between source and destination addresses. We can then say that
4969 pc-relative is a valid source address but not a valid destination
4970 address, and hopefully avoid a lot of the predicate and constraint
4971 hackery. Unfortunately, this would be a pretty big change. It would
4972 be a useful change for a number of ports, but there aren't any current
4973 plans to undertake this.
4975 ***************************************************************************/
4979 output_andsi3 (rtx
*operands
)
4982 if (GET_CODE (operands
[2]) == CONST_INT
4983 && (INTVAL (operands
[2]) | 0xffff) == -1
4984 && (DATA_REG_P (operands
[0])
4985 || offsettable_memref_p (operands
[0]))
4986 && !TARGET_COLDFIRE
)
4988 if (GET_CODE (operands
[0]) != REG
)
4989 operands
[0] = adjust_address (operands
[0], HImode
, 2);
4990 operands
[2] = GEN_INT (INTVAL (operands
[2]) & 0xffff);
4991 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4993 if (operands
[2] == const0_rtx
)
4995 return "and%.w %2,%0";
4997 if (GET_CODE (operands
[2]) == CONST_INT
4998 && (logval
= exact_log2 (~ INTVAL (operands
[2]) & 0xffffffff)) >= 0
4999 && (DATA_REG_P (operands
[0])
5000 || offsettable_memref_p (operands
[0])))
5002 if (DATA_REG_P (operands
[0]))
5003 operands
[1] = GEN_INT (logval
);
5006 operands
[0] = adjust_address (operands
[0], SImode
, 3 - (logval
/ 8));
5007 operands
[1] = GEN_INT (logval
% 8);
5009 /* This does not set condition codes in a standard way. */
5011 return "bclr %1,%0";
5013 return "and%.l %2,%0";
5017 output_iorsi3 (rtx
*operands
)
5019 register int logval
;
5020 if (GET_CODE (operands
[2]) == CONST_INT
5021 && INTVAL (operands
[2]) >> 16 == 0
5022 && (DATA_REG_P (operands
[0])
5023 || offsettable_memref_p (operands
[0]))
5024 && !TARGET_COLDFIRE
)
5026 if (GET_CODE (operands
[0]) != REG
)
5027 operands
[0] = adjust_address (operands
[0], HImode
, 2);
5028 /* Do not delete a following tstl %0 insn; that would be incorrect. */
5030 if (INTVAL (operands
[2]) == 0xffff)
5031 return "mov%.w %2,%0";
5032 return "or%.w %2,%0";
5034 if (GET_CODE (operands
[2]) == CONST_INT
5035 && (logval
= exact_log2 (INTVAL (operands
[2]) & 0xffffffff)) >= 0
5036 && (DATA_REG_P (operands
[0])
5037 || offsettable_memref_p (operands
[0])))
5039 if (DATA_REG_P (operands
[0]))
5040 operands
[1] = GEN_INT (logval
);
5043 operands
[0] = adjust_address (operands
[0], SImode
, 3 - (logval
/ 8));
5044 operands
[1] = GEN_INT (logval
% 8);
5047 return "bset %1,%0";
5049 return "or%.l %2,%0";
5053 output_xorsi3 (rtx
*operands
)
5055 register int logval
;
5056 if (GET_CODE (operands
[2]) == CONST_INT
5057 && INTVAL (operands
[2]) >> 16 == 0
5058 && (offsettable_memref_p (operands
[0]) || DATA_REG_P (operands
[0]))
5059 && !TARGET_COLDFIRE
)
5061 if (! DATA_REG_P (operands
[0]))
5062 operands
[0] = adjust_address (operands
[0], HImode
, 2);
5063 /* Do not delete a following tstl %0 insn; that would be incorrect. */
5065 if (INTVAL (operands
[2]) == 0xffff)
5067 return "eor%.w %2,%0";
5069 if (GET_CODE (operands
[2]) == CONST_INT
5070 && (logval
= exact_log2 (INTVAL (operands
[2]) & 0xffffffff)) >= 0
5071 && (DATA_REG_P (operands
[0])
5072 || offsettable_memref_p (operands
[0])))
5074 if (DATA_REG_P (operands
[0]))
5075 operands
[1] = GEN_INT (logval
);
5078 operands
[0] = adjust_address (operands
[0], SImode
, 3 - (logval
/ 8));
5079 operands
[1] = GEN_INT (logval
% 8);
5082 return "bchg %1,%0";
5084 return "eor%.l %2,%0";
5087 /* Return the instruction that should be used for a call to address X,
5088 which is known to be in operand 0. */
5093 if (symbolic_operand (x
, VOIDmode
))
5094 return m68k_symbolic_call
;
5099 /* Likewise sibling calls. */
5102 output_sibcall (rtx x
)
5104 if (symbolic_operand (x
, VOIDmode
))
5105 return m68k_symbolic_jump
;
5111 m68k_output_mi_thunk (FILE *file
, tree thunk ATTRIBUTE_UNUSED
,
5112 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
5115 rtx this_slot
, offset
, addr
, mem
, insn
, tmp
;
5117 /* Avoid clobbering the struct value reg by using the
5118 static chain reg as a temporary. */
5119 tmp
= gen_rtx_REG (Pmode
, STATIC_CHAIN_REGNUM
);
5121 /* Pretend to be a post-reload pass while generating rtl. */
5122 reload_completed
= 1;
5124 /* The "this" pointer is stored at 4(%sp). */
5125 this_slot
= gen_rtx_MEM (Pmode
, plus_constant (stack_pointer_rtx
, 4));
5127 /* Add DELTA to THIS. */
5130 /* Make the offset a legitimate operand for memory addition. */
5131 offset
= GEN_INT (delta
);
5132 if ((delta
< -8 || delta
> 8)
5133 && (TARGET_COLDFIRE
|| USE_MOVQ (delta
)))
5135 emit_move_insn (gen_rtx_REG (Pmode
, D0_REG
), offset
);
5136 offset
= gen_rtx_REG (Pmode
, D0_REG
);
5138 emit_insn (gen_add3_insn (copy_rtx (this_slot
),
5139 copy_rtx (this_slot
), offset
));
5142 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
5143 if (vcall_offset
!= 0)
5145 /* Set the static chain register to *THIS. */
5146 emit_move_insn (tmp
, this_slot
);
5147 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, tmp
));
5149 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
5150 addr
= plus_constant (tmp
, vcall_offset
);
5151 if (!m68k_legitimate_address_p (Pmode
, addr
, true))
5153 emit_insn (gen_rtx_SET (VOIDmode
, tmp
, addr
));
5157 /* Load the offset into %d0 and add it to THIS. */
5158 emit_move_insn (gen_rtx_REG (Pmode
, D0_REG
),
5159 gen_rtx_MEM (Pmode
, addr
));
5160 emit_insn (gen_add3_insn (copy_rtx (this_slot
),
5161 copy_rtx (this_slot
),
5162 gen_rtx_REG (Pmode
, D0_REG
)));
5165 /* Jump to the target function. Use a sibcall if direct jumps are
5166 allowed, otherwise load the address into a register first. */
5167 mem
= DECL_RTL (function
);
5168 if (!sibcall_operand (XEXP (mem
, 0), VOIDmode
))
5170 gcc_assert (flag_pic
);
5172 if (!TARGET_SEP_DATA
)
5174 /* Use the static chain register as a temporary (call-clobbered)
5175 GOT pointer for this function. We can use the static chain
5176 register because it isn't live on entry to the thunk. */
5177 SET_REGNO (pic_offset_table_rtx
, STATIC_CHAIN_REGNUM
);
5178 emit_insn (gen_load_got (pic_offset_table_rtx
));
5180 legitimize_pic_address (XEXP (mem
, 0), Pmode
, tmp
);
5181 mem
= replace_equiv_address (mem
, tmp
);
5183 insn
= emit_call_insn (gen_sibcall (mem
, const0_rtx
));
5184 SIBLING_CALL_P (insn
) = 1;
5186 /* Run just enough of rest_of_compilation. */
5187 insn
= get_insns ();
5188 split_all_insns_noflow ();
5189 final_start_function (insn
, file
, 1);
5190 final (insn
, file
, 1);
5191 final_end_function ();
5193 /* Clean up the vars set above. */
5194 reload_completed
= 0;
5196 /* Restore the original PIC register. */
5198 SET_REGNO (pic_offset_table_rtx
, PIC_REG
);
5201 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
5204 m68k_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED
,
5205 int incoming ATTRIBUTE_UNUSED
)
5207 return gen_rtx_REG (Pmode
, M68K_STRUCT_VALUE_REGNUM
);
5210 /* Return nonzero if register old_reg can be renamed to register new_reg. */
5212 m68k_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED
,
5213 unsigned int new_reg
)
5216 /* Interrupt functions can only use registers that have already been
5217 saved by the prologue, even if they would normally be
5220 if ((m68k_get_function_kind (current_function_decl
)
5221 == m68k_fk_interrupt_handler
)
5222 && !df_regs_ever_live_p (new_reg
))
5228 /* Value is true if hard register REGNO can hold a value of machine-mode
5229 MODE. On the 68000, we let the cpu registers can hold any mode, but
5230 restrict the 68881 registers to floating-point modes. */
5233 m68k_regno_mode_ok (int regno
, enum machine_mode mode
)
5235 if (DATA_REGNO_P (regno
))
5237 /* Data Registers, can hold aggregate if fits in. */
5238 if (regno
+ GET_MODE_SIZE (mode
) / 4 <= 8)
5241 else if (ADDRESS_REGNO_P (regno
))
5243 if (regno
+ GET_MODE_SIZE (mode
) / 4 <= 16)
5246 else if (FP_REGNO_P (regno
))
5248 /* FPU registers, hold float or complex float of long double or
5250 if ((GET_MODE_CLASS (mode
) == MODE_FLOAT
5251 || GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
)
5252 && GET_MODE_UNIT_SIZE (mode
) <= TARGET_FP_REG_SIZE
)
5258 /* Implement SECONDARY_RELOAD_CLASS. */
5261 m68k_secondary_reload_class (enum reg_class rclass
,
5262 enum machine_mode mode
, rtx x
)
5266 regno
= true_regnum (x
);
5268 /* If one operand of a movqi is an address register, the other
5269 operand must be a general register or constant. Other types
5270 of operand must be reloaded through a data register. */
5271 if (GET_MODE_SIZE (mode
) == 1
5272 && reg_classes_intersect_p (rclass
, ADDR_REGS
)
5273 && !(INT_REGNO_P (regno
) || CONSTANT_P (x
)))
5276 /* PC-relative addresses must be loaded into an address register first. */
5278 && !reg_class_subset_p (rclass
, ADDR_REGS
)
5279 && symbolic_operand (x
, VOIDmode
))
5285 /* Implement PREFERRED_RELOAD_CLASS. */
5288 m68k_preferred_reload_class (rtx x
, enum reg_class rclass
)
5290 enum reg_class secondary_class
;
5292 /* If RCLASS might need a secondary reload, try restricting it to
5293 a class that doesn't. */
5294 secondary_class
= m68k_secondary_reload_class (rclass
, GET_MODE (x
), x
);
5295 if (secondary_class
!= NO_REGS
5296 && reg_class_subset_p (secondary_class
, rclass
))
5297 return secondary_class
;
5299 /* Prefer to use moveq for in-range constants. */
5300 if (GET_CODE (x
) == CONST_INT
5301 && reg_class_subset_p (DATA_REGS
, rclass
)
5302 && IN_RANGE (INTVAL (x
), -0x80, 0x7f))
5305 /* ??? Do we really need this now? */
5306 if (GET_CODE (x
) == CONST_DOUBLE
5307 && GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
5309 if (TARGET_HARD_FLOAT
&& reg_class_subset_p (FP_REGS
, rclass
))
5318 /* Return floating point values in a 68881 register. This makes 68881 code
5319 a little bit faster. It also makes -msoft-float code incompatible with
5320 hard-float code, so people have to be careful not to mix the two.
5321 For ColdFire it was decided the ABI incompatibility is undesirable.
5322 If there is need for a hard-float ABI it is probably worth doing it
5323 properly and also passing function arguments in FP registers. */
5325 m68k_libcall_value (enum machine_mode mode
)
5332 return gen_rtx_REG (mode
, FP0_REG
);
5338 return gen_rtx_REG (mode
, m68k_libcall_value_in_a0_p
? A0_REG
: D0_REG
);
5341 /* Location in which function value is returned.
5342 NOTE: Due to differences in ABIs, don't call this function directly,
5343 use FUNCTION_VALUE instead. */
5345 m68k_function_value (const_tree valtype
, const_tree func ATTRIBUTE_UNUSED
)
5347 enum machine_mode mode
;
5349 mode
= TYPE_MODE (valtype
);
5355 return gen_rtx_REG (mode
, FP0_REG
);
5361 /* If the function returns a pointer, push that into %a0. */
5362 if (func
&& POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (func
))))
5363 /* For compatibility with the large body of existing code which
5364 does not always properly declare external functions returning
5365 pointer types, the m68k/SVR4 convention is to copy the value
5366 returned for pointer functions from a0 to d0 in the function
5367 epilogue, so that callers that have neglected to properly
5368 declare the callee can still find the correct return value in
5370 return gen_rtx_PARALLEL
5373 gen_rtx_EXPR_LIST (VOIDmode
,
5374 gen_rtx_REG (mode
, A0_REG
),
5376 gen_rtx_EXPR_LIST (VOIDmode
,
5377 gen_rtx_REG (mode
, D0_REG
),
5379 else if (POINTER_TYPE_P (valtype
))
5380 return gen_rtx_REG (mode
, A0_REG
);
5382 return gen_rtx_REG (mode
, D0_REG
);
5385 /* Worker function for TARGET_RETURN_IN_MEMORY. */
5386 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
5388 m68k_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
5390 enum machine_mode mode
= TYPE_MODE (type
);
5392 if (mode
== BLKmode
)
5395 /* If TYPE's known alignment is less than the alignment of MODE that
5396 would contain the structure, then return in memory. We need to
5397 do so to maintain the compatibility between code compiled with
5398 -mstrict-align and that compiled with -mno-strict-align. */
5399 if (AGGREGATE_TYPE_P (type
)
5400 && TYPE_ALIGN (type
) < GET_MODE_ALIGNMENT (mode
))
5407 /* CPU to schedule the program for. */
5408 enum attr_cpu m68k_sched_cpu
;
5410 /* MAC to schedule the program for. */
5411 enum attr_mac m68k_sched_mac
;
5419 /* Integer register. */
5425 /* Implicit mem reference (e.g. stack). */
5428 /* Memory without offset or indexing. EA modes 2, 3 and 4. */
5431 /* Memory with offset but without indexing. EA mode 5. */
5434 /* Memory with indexing. EA mode 6. */
5437 /* Memory referenced by absolute address. EA mode 7. */
5440 /* Immediate operand that doesn't require extension word. */
5443 /* Immediate 16 bit operand. */
5446 /* Immediate 32 bit operand. */
5450 /* Return type of memory ADDR_RTX refers to. */
5451 static enum attr_op_type
5452 sched_address_type (enum machine_mode mode
, rtx addr_rtx
)
5454 struct m68k_address address
;
5456 if (symbolic_operand (addr_rtx
, VOIDmode
))
5457 return OP_TYPE_MEM7
;
5459 if (!m68k_decompose_address (mode
, addr_rtx
,
5460 reload_completed
, &address
))
5462 gcc_assert (!reload_completed
);
5463 /* Reload will likely fix the address to be in the register. */
5464 return OP_TYPE_MEM234
;
5467 if (address
.scale
!= 0)
5468 return OP_TYPE_MEM6
;
5470 if (address
.base
!= NULL_RTX
)
5472 if (address
.offset
== NULL_RTX
)
5473 return OP_TYPE_MEM234
;
5475 return OP_TYPE_MEM5
;
5478 gcc_assert (address
.offset
!= NULL_RTX
);
5480 return OP_TYPE_MEM7
;
5483 /* Return X or Y (depending on OPX_P) operand of INSN. */
5485 sched_get_operand (rtx insn
, bool opx_p
)
5489 if (recog_memoized (insn
) < 0)
5492 extract_constrain_insn_cached (insn
);
5495 i
= get_attr_opx (insn
);
5497 i
= get_attr_opy (insn
);
5499 if (i
>= recog_data
.n_operands
)
5502 return recog_data
.operand
[i
];
5505 /* Return type of INSN's operand X (if OPX_P) or operand Y (if !OPX_P).
5506 If ADDRESS_P is true, return type of memory location operand refers to. */
5507 static enum attr_op_type
5508 sched_attr_op_type (rtx insn
, bool opx_p
, bool address_p
)
5512 op
= sched_get_operand (insn
, opx_p
);
5516 gcc_assert (!reload_completed
);
5521 return sched_address_type (QImode
, op
);
5523 if (memory_operand (op
, VOIDmode
))
5524 return sched_address_type (GET_MODE (op
), XEXP (op
, 0));
5526 if (register_operand (op
, VOIDmode
))
5528 if ((!reload_completed
&& FLOAT_MODE_P (GET_MODE (op
)))
5529 || (reload_completed
&& FP_REG_P (op
)))
5535 if (GET_CODE (op
) == CONST_INT
)
5541 /* Check for quick constants. */
5542 switch (get_attr_type (insn
))
5545 if (IN_RANGE (ival
, 1, 8) || IN_RANGE (ival
, -8, -1))
5546 return OP_TYPE_IMM_Q
;
5548 gcc_assert (!reload_completed
);
5552 if (USE_MOVQ (ival
))
5553 return OP_TYPE_IMM_Q
;
5555 gcc_assert (!reload_completed
);
5559 if (valid_mov3q_const (ival
))
5560 return OP_TYPE_IMM_Q
;
5562 gcc_assert (!reload_completed
);
5569 if (IN_RANGE (ival
, -0x8000, 0x7fff))
5570 return OP_TYPE_IMM_W
;
5572 return OP_TYPE_IMM_L
;
5575 if (GET_CODE (op
) == CONST_DOUBLE
)
5577 switch (GET_MODE (op
))
5580 return OP_TYPE_IMM_W
;
5584 return OP_TYPE_IMM_L
;
5591 if (GET_CODE (op
) == CONST
5592 || symbolic_operand (op
, VOIDmode
)
5595 switch (GET_MODE (op
))
5598 return OP_TYPE_IMM_Q
;
5601 return OP_TYPE_IMM_W
;
5604 return OP_TYPE_IMM_L
;
5607 if (symbolic_operand (m68k_unwrap_symbol (op
, false), VOIDmode
))
5609 return OP_TYPE_IMM_W
;
5611 return OP_TYPE_IMM_L
;
5615 gcc_assert (!reload_completed
);
5617 if (FLOAT_MODE_P (GET_MODE (op
)))
5623 /* Implement opx_type attribute.
5624 Return type of INSN's operand X.
5625 If ADDRESS_P is true, return type of memory location operand refers to. */
5627 m68k_sched_attr_opx_type (rtx insn
, int address_p
)
5629 switch (sched_attr_op_type (insn
, true, address_p
!= 0))
5635 return OPX_TYPE_FPN
;
5638 return OPX_TYPE_MEM1
;
5640 case OP_TYPE_MEM234
:
5641 return OPX_TYPE_MEM234
;
5644 return OPX_TYPE_MEM5
;
5647 return OPX_TYPE_MEM6
;
5650 return OPX_TYPE_MEM7
;
5653 return OPX_TYPE_IMM_Q
;
5656 return OPX_TYPE_IMM_W
;
5659 return OPX_TYPE_IMM_L
;
5666 /* Implement opy_type attribute.
5667 Return type of INSN's operand Y.
5668 If ADDRESS_P is true, return type of memory location operand refers to. */
5670 m68k_sched_attr_opy_type (rtx insn
, int address_p
)
5672 switch (sched_attr_op_type (insn
, false, address_p
!= 0))
5678 return OPY_TYPE_FPN
;
5681 return OPY_TYPE_MEM1
;
5683 case OP_TYPE_MEM234
:
5684 return OPY_TYPE_MEM234
;
5687 return OPY_TYPE_MEM5
;
5690 return OPY_TYPE_MEM6
;
5693 return OPY_TYPE_MEM7
;
5696 return OPY_TYPE_IMM_Q
;
5699 return OPY_TYPE_IMM_W
;
5702 return OPY_TYPE_IMM_L
;
5709 /* Return size of INSN as int. */
5711 sched_get_attr_size_int (rtx insn
)
5715 switch (get_attr_type (insn
))
5718 /* There should be no references to m68k_sched_attr_size for 'ignore'
5732 switch (get_attr_opx_type (insn
))
5738 case OPX_TYPE_MEM234
:
5739 case OPY_TYPE_IMM_Q
:
5744 /* Here we assume that most absolute references are short. */
5746 case OPY_TYPE_IMM_W
:
5750 case OPY_TYPE_IMM_L
:
5758 switch (get_attr_opy_type (insn
))
5764 case OPY_TYPE_MEM234
:
5765 case OPY_TYPE_IMM_Q
:
5770 /* Here we assume that most absolute references are short. */
5772 case OPY_TYPE_IMM_W
:
5776 case OPY_TYPE_IMM_L
:
5786 gcc_assert (!reload_completed
);
5794 /* Return size of INSN as attribute enum value. */
5796 m68k_sched_attr_size (rtx insn
)
5798 switch (sched_get_attr_size_int (insn
))
5814 /* Return operand X or Y (depending on OPX_P) of INSN,
5815 if it is a MEM, or NULL overwise. */
5816 static enum attr_op_type
5817 sched_get_opxy_mem_type (rtx insn
, bool opx_p
)
5821 switch (get_attr_opx_type (insn
))
5826 case OPX_TYPE_IMM_Q
:
5827 case OPX_TYPE_IMM_W
:
5828 case OPX_TYPE_IMM_L
:
5832 case OPX_TYPE_MEM234
:
5835 return OP_TYPE_MEM1
;
5838 return OP_TYPE_MEM6
;
5846 switch (get_attr_opy_type (insn
))
5851 case OPY_TYPE_IMM_Q
:
5852 case OPY_TYPE_IMM_W
:
5853 case OPY_TYPE_IMM_L
:
5857 case OPY_TYPE_MEM234
:
5860 return OP_TYPE_MEM1
;
5863 return OP_TYPE_MEM6
;
5871 /* Implement op_mem attribute. */
5873 m68k_sched_attr_op_mem (rtx insn
)
5875 enum attr_op_type opx
;
5876 enum attr_op_type opy
;
5878 opx
= sched_get_opxy_mem_type (insn
, true);
5879 opy
= sched_get_opxy_mem_type (insn
, false);
5881 if (opy
== OP_TYPE_RN
&& opx
== OP_TYPE_RN
)
5884 if (opy
== OP_TYPE_RN
&& opx
== OP_TYPE_MEM1
)
5886 switch (get_attr_opx_access (insn
))
5902 if (opy
== OP_TYPE_RN
&& opx
== OP_TYPE_MEM6
)
5904 switch (get_attr_opx_access (insn
))
5920 if (opy
== OP_TYPE_MEM1
&& opx
== OP_TYPE_RN
)
5923 if (opy
== OP_TYPE_MEM1
&& opx
== OP_TYPE_MEM1
)
5925 switch (get_attr_opx_access (insn
))
5931 gcc_assert (!reload_completed
);
5936 if (opy
== OP_TYPE_MEM1
&& opx
== OP_TYPE_MEM6
)
5938 switch (get_attr_opx_access (insn
))
5944 gcc_assert (!reload_completed
);
5949 if (opy
== OP_TYPE_MEM6
&& opx
== OP_TYPE_RN
)
5952 if (opy
== OP_TYPE_MEM6
&& opx
== OP_TYPE_MEM1
)
5954 switch (get_attr_opx_access (insn
))
5960 gcc_assert (!reload_completed
);
5965 gcc_assert (opy
== OP_TYPE_MEM6
&& opx
== OP_TYPE_MEM6
);
5966 gcc_assert (!reload_completed
);
5970 /* Jump instructions types. Indexed by INSN_UID.
5971 The same rtl insn can be expanded into different asm instructions
5972 depending on the cc0_status. To properly determine type of jump
5973 instructions we scan instruction stream and map jumps types to this
5975 static enum attr_type
*sched_branch_type
;
5977 /* Return the type of the jump insn. */
5979 m68k_sched_branch_type (rtx insn
)
5981 enum attr_type type
;
5983 type
= sched_branch_type
[INSN_UID (insn
)];
5985 gcc_assert (type
!= 0);
5990 /* Data for ColdFire V4 index bypass.
5991 Producer modifies register that is used as index in consumer with
5995 /* Producer instruction. */
5998 /* Consumer instruction. */
6001 /* Scale of indexed memory access within consumer.
6002 Or zero if bypass should not be effective at the moment. */
6004 } sched_cfv4_bypass_data
;
6006 /* An empty state that is used in m68k_sched_adjust_cost. */
6007 static state_t sched_adjust_cost_state
;
6009 /* Implement adjust_cost scheduler hook.
6010 Return adjusted COST of dependency LINK between DEF_INSN and INSN. */
6012 m68k_sched_adjust_cost (rtx insn
, rtx link ATTRIBUTE_UNUSED
, rtx def_insn
,
6017 if (recog_memoized (def_insn
) < 0
6018 || recog_memoized (insn
) < 0)
6021 if (sched_cfv4_bypass_data
.scale
== 1)
6022 /* Handle ColdFire V4 bypass for indexed address with 1x scale. */
6024 /* haifa-sched.c: insn_cost () calls bypass_p () just before
6025 targetm.sched.adjust_cost (). Hence, we can be relatively sure
6026 that the data in sched_cfv4_bypass_data is up to date. */
6027 gcc_assert (sched_cfv4_bypass_data
.pro
== def_insn
6028 && sched_cfv4_bypass_data
.con
== insn
);
6033 sched_cfv4_bypass_data
.pro
= NULL
;
6034 sched_cfv4_bypass_data
.con
= NULL
;
6035 sched_cfv4_bypass_data
.scale
= 0;
6038 gcc_assert (sched_cfv4_bypass_data
.pro
== NULL
6039 && sched_cfv4_bypass_data
.con
== NULL
6040 && sched_cfv4_bypass_data
.scale
== 0);
6042 /* Don't try to issue INSN earlier than DFA permits.
6043 This is especially useful for instructions that write to memory,
6044 as their true dependence (default) latency is better to be set to 0
6045 to workaround alias analysis limitations.
6046 This is, in fact, a machine independent tweak, so, probably,
6047 it should be moved to haifa-sched.c: insn_cost (). */
6048 delay
= min_insn_conflict_delay (sched_adjust_cost_state
, def_insn
, insn
);
6055 /* Return maximal number of insns that can be scheduled on a single cycle. */
6057 m68k_sched_issue_rate (void)
6059 switch (m68k_sched_cpu
)
6075 /* Maximal length of instruction for current CPU.
6076 E.g. it is 3 for any ColdFire core. */
6077 static int max_insn_size
;
6079 /* Data to model instruction buffer of CPU. */
6082 /* True if instruction buffer model is modeled for current CPU. */
6085 /* Size of the instruction buffer in words. */
6088 /* Number of filled words in the instruction buffer. */
6091 /* Additional information about instruction buffer for CPUs that have
6092 a buffer of instruction records, rather then a plain buffer
6093 of instruction words. */
6094 struct _sched_ib_records
6096 /* Size of buffer in records. */
6099 /* Array to hold data on adjustements made to the size of the buffer. */
6102 /* Index of the above array. */
6106 /* An insn that reserves (marks empty) one word in the instruction buffer. */
6110 static struct _sched_ib sched_ib
;
6112 /* ID of memory unit. */
6113 static int sched_mem_unit_code
;
6115 /* Implementation of the targetm.sched.variable_issue () hook.
6116 It is called after INSN was issued. It returns the number of insns
6117 that can possibly get scheduled on the current cycle.
6118 It is used here to determine the effect of INSN on the instruction
6121 m68k_sched_variable_issue (FILE *sched_dump ATTRIBUTE_UNUSED
,
6122 int sched_verbose ATTRIBUTE_UNUSED
,
6123 rtx insn
, int can_issue_more
)
6127 if (recog_memoized (insn
) >= 0 && get_attr_type (insn
) != TYPE_IGNORE
)
6129 switch (m68k_sched_cpu
)
6133 insn_size
= sched_get_attr_size_int (insn
);
6137 insn_size
= sched_get_attr_size_int (insn
);
6139 /* ColdFire V3 and V4 cores have instruction buffers that can
6140 accumulate up to 8 instructions regardless of instructions'
6141 sizes. So we should take care not to "prefetch" 24 one-word
6142 or 12 two-words instructions.
6143 To model this behavior we temporarily decrease size of the
6144 buffer by (max_insn_size - insn_size) for next 7 instructions. */
6148 adjust
= max_insn_size
- insn_size
;
6149 sched_ib
.size
-= adjust
;
6151 if (sched_ib
.filled
> sched_ib
.size
)
6152 sched_ib
.filled
= sched_ib
.size
;
6154 sched_ib
.records
.adjust
[sched_ib
.records
.adjust_index
] = adjust
;
6157 ++sched_ib
.records
.adjust_index
;
6158 if (sched_ib
.records
.adjust_index
== sched_ib
.records
.n_insns
)
6159 sched_ib
.records
.adjust_index
= 0;
6161 /* Undo adjustement we did 7 instructions ago. */
6163 += sched_ib
.records
.adjust
[sched_ib
.records
.adjust_index
];
6168 gcc_assert (!sched_ib
.enabled_p
);
6176 gcc_assert (insn_size
<= sched_ib
.filled
);
6179 else if (GET_CODE (PATTERN (insn
)) == ASM_INPUT
6180 || asm_noperands (PATTERN (insn
)) >= 0)
6181 insn_size
= sched_ib
.filled
;
6185 sched_ib
.filled
-= insn_size
;
6187 return can_issue_more
;
6190 /* Return how many instructions should scheduler lookahead to choose the
6193 m68k_sched_first_cycle_multipass_dfa_lookahead (void)
6195 return m68k_sched_issue_rate () - 1;
6198 /* Implementation of targetm.sched.init_global () hook.
6199 It is invoked once per scheduling pass and is used here
6200 to initialize scheduler constants. */
6202 m68k_sched_md_init_global (FILE *sched_dump ATTRIBUTE_UNUSED
,
6203 int sched_verbose ATTRIBUTE_UNUSED
,
6204 int n_insns ATTRIBUTE_UNUSED
)
6206 /* Init branch types. */
6210 sched_branch_type
= XCNEWVEC (enum attr_type
, get_max_uid () + 1);
6212 for (insn
= get_insns (); insn
!= NULL_RTX
; insn
= NEXT_INSN (insn
))
6215 /* !!! FIXME: Implement real scan here. */
6216 sched_branch_type
[INSN_UID (insn
)] = TYPE_BCC
;
6220 #ifdef ENABLE_CHECKING
6221 /* Check that all instructions have DFA reservations and
6222 that all instructions can be issued from a clean state. */
6227 state
= alloca (state_size ());
6229 for (insn
= get_insns (); insn
!= NULL_RTX
; insn
= NEXT_INSN (insn
))
6231 if (INSN_P (insn
) && recog_memoized (insn
) >= 0)
6233 gcc_assert (insn_has_dfa_reservation_p (insn
));
6235 state_reset (state
);
6236 if (state_transition (state
, insn
) >= 0)
6243 /* Setup target cpu. */
6245 /* ColdFire V4 has a set of features to keep its instruction buffer full
6246 (e.g., a separate memory bus for instructions) and, hence, we do not model
6247 buffer for this CPU. */
6248 sched_ib
.enabled_p
= (m68k_sched_cpu
!= CPU_CFV4
);
6250 switch (m68k_sched_cpu
)
6253 sched_ib
.filled
= 0;
6260 sched_ib
.records
.n_insns
= 0;
6261 sched_ib
.records
.adjust
= NULL
;
6266 sched_ib
.records
.n_insns
= 8;
6267 sched_ib
.records
.adjust
= XNEWVEC (int, sched_ib
.records
.n_insns
);
6274 sched_mem_unit_code
= get_cpu_unit_code ("cf_mem1");
6276 sched_adjust_cost_state
= xmalloc (state_size ());
6277 state_reset (sched_adjust_cost_state
);
6280 emit_insn (gen_ib ());
6281 sched_ib
.insn
= get_insns ();
6285 /* Scheduling pass is now finished. Free/reset static variables. */
6287 m68k_sched_md_finish_global (FILE *dump ATTRIBUTE_UNUSED
,
6288 int verbose ATTRIBUTE_UNUSED
)
6290 sched_ib
.insn
= NULL
;
6292 free (sched_adjust_cost_state
);
6293 sched_adjust_cost_state
= NULL
;
6295 sched_mem_unit_code
= 0;
6297 free (sched_ib
.records
.adjust
);
6298 sched_ib
.records
.adjust
= NULL
;
6299 sched_ib
.records
.n_insns
= 0;
6302 free (sched_branch_type
);
6303 sched_branch_type
= NULL
;
6306 /* Implementation of targetm.sched.init () hook.
6307 It is invoked each time scheduler starts on the new block (basic block or
6308 extended basic block). */
6310 m68k_sched_md_init (FILE *sched_dump ATTRIBUTE_UNUSED
,
6311 int sched_verbose ATTRIBUTE_UNUSED
,
6312 int n_insns ATTRIBUTE_UNUSED
)
6314 switch (m68k_sched_cpu
)
6322 sched_ib
.size
= sched_ib
.records
.n_insns
* max_insn_size
;
6324 memset (sched_ib
.records
.adjust
, 0,
6325 sched_ib
.records
.n_insns
* sizeof (*sched_ib
.records
.adjust
));
6326 sched_ib
.records
.adjust_index
= 0;
6330 gcc_assert (!sched_ib
.enabled_p
);
6338 if (sched_ib
.enabled_p
)
6339 /* haifa-sched.c: schedule_block () calls advance_cycle () just before
6340 the first cycle. Workaround that. */
6341 sched_ib
.filled
= -2;
6344 /* Implementation of targetm.sched.dfa_pre_advance_cycle () hook.
6345 It is invoked just before current cycle finishes and is used here
6346 to track if instruction buffer got its two words this cycle. */
6348 m68k_sched_dfa_pre_advance_cycle (void)
6350 if (!sched_ib
.enabled_p
)
6353 if (!cpu_unit_reservation_p (curr_state
, sched_mem_unit_code
))
6355 sched_ib
.filled
+= 2;
6357 if (sched_ib
.filled
> sched_ib
.size
)
6358 sched_ib
.filled
= sched_ib
.size
;
6362 /* Implementation of targetm.sched.dfa_post_advance_cycle () hook.
6363 It is invoked just after new cycle begins and is used here
6364 to setup number of filled words in the instruction buffer so that
6365 instructions which won't have all their words prefetched would be
6366 stalled for a cycle. */
6368 m68k_sched_dfa_post_advance_cycle (void)
6372 if (!sched_ib
.enabled_p
)
6375 /* Setup number of prefetched instruction words in the instruction
6377 i
= max_insn_size
- sched_ib
.filled
;
6381 if (state_transition (curr_state
, sched_ib
.insn
) >= 0)
6386 /* Return X or Y (depending on OPX_P) operand of INSN,
6387 if it is an integer register, or NULL overwise. */
6389 sched_get_reg_operand (rtx insn
, bool opx_p
)
6395 if (get_attr_opx_type (insn
) == OPX_TYPE_RN
)
6397 op
= sched_get_operand (insn
, true);
6398 gcc_assert (op
!= NULL
);
6400 if (!reload_completed
&& !REG_P (op
))
6406 if (get_attr_opy_type (insn
) == OPY_TYPE_RN
)
6408 op
= sched_get_operand (insn
, false);
6409 gcc_assert (op
!= NULL
);
6411 if (!reload_completed
&& !REG_P (op
))
6419 /* Return true, if X or Y (depending on OPX_P) operand of INSN
6422 sched_mem_operand_p (rtx insn
, bool opx_p
)
6424 switch (sched_get_opxy_mem_type (insn
, opx_p
))
6435 /* Return X or Y (depending on OPX_P) operand of INSN,
6436 if it is a MEM, or NULL overwise. */
6438 sched_get_mem_operand (rtx insn
, bool must_read_p
, bool must_write_p
)
6458 if (opy_p
&& sched_mem_operand_p (insn
, false))
6459 return sched_get_operand (insn
, false);
6461 if (opx_p
&& sched_mem_operand_p (insn
, true))
6462 return sched_get_operand (insn
, true);
6468 /* Return non-zero if PRO modifies register used as part of
6471 m68k_sched_address_bypass_p (rtx pro
, rtx con
)
6476 pro_x
= sched_get_reg_operand (pro
, true);
6480 con_mem_read
= sched_get_mem_operand (con
, true, false);
6481 gcc_assert (con_mem_read
!= NULL
);
6483 if (reg_mentioned_p (pro_x
, con_mem_read
))
6489 /* Helper function for m68k_sched_indexed_address_bypass_p.
6490 if PRO modifies register used as index in CON,
6491 return scale of indexed memory access in CON. Return zero overwise. */
6493 sched_get_indexed_address_scale (rtx pro
, rtx con
)
6497 struct m68k_address address
;
6499 reg
= sched_get_reg_operand (pro
, true);
6503 mem
= sched_get_mem_operand (con
, true, false);
6504 gcc_assert (mem
!= NULL
&& MEM_P (mem
));
6506 if (!m68k_decompose_address (GET_MODE (mem
), XEXP (mem
, 0), reload_completed
,
6510 if (REGNO (reg
) == REGNO (address
.index
))
6512 gcc_assert (address
.scale
!= 0);
6513 return address
.scale
;
6519 /* Return non-zero if PRO modifies register used
6520 as index with scale 2 or 4 in CON. */
6522 m68k_sched_indexed_address_bypass_p (rtx pro
, rtx con
)
6524 gcc_assert (sched_cfv4_bypass_data
.pro
== NULL
6525 && sched_cfv4_bypass_data
.con
== NULL
6526 && sched_cfv4_bypass_data
.scale
== 0);
6528 switch (sched_get_indexed_address_scale (pro
, con
))
6531 /* We can't have a variable latency bypass, so
6532 remember to adjust the insn cost in adjust_cost hook. */
6533 sched_cfv4_bypass_data
.pro
= pro
;
6534 sched_cfv4_bypass_data
.con
= con
;
6535 sched_cfv4_bypass_data
.scale
= 1;
6547 /* We generate a two-instructions program at M_TRAMP :
6548 movea.l &CHAIN_VALUE,%a0
6550 where %a0 can be modified by changing STATIC_CHAIN_REGNUM. */
6553 m68k_trampoline_init (rtx m_tramp
, tree fndecl
, rtx chain_value
)
6555 rtx fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
6558 gcc_assert (ADDRESS_REGNO_P (STATIC_CHAIN_REGNUM
));
6560 mem
= adjust_address (m_tramp
, HImode
, 0);
6561 emit_move_insn (mem
, GEN_INT(0x207C + ((STATIC_CHAIN_REGNUM
-8) << 9)));
6562 mem
= adjust_address (m_tramp
, SImode
, 2);
6563 emit_move_insn (mem
, chain_value
);
6565 mem
= adjust_address (m_tramp
, HImode
, 6);
6566 emit_move_insn (mem
, GEN_INT(0x4EF9));
6567 mem
= adjust_address (m_tramp
, SImode
, 8);
6568 emit_move_insn (mem
, fnaddr
);
6570 FINALIZE_TRAMPOLINE (XEXP (m_tramp
, 0));
6573 /* On the 68000, the RTS insn cannot pop anything.
6574 On the 68010, the RTD insn may be used to pop them if the number
6575 of args is fixed, but if the number is variable then the caller
6576 must pop them all. RTD can't be used for library calls now
6577 because the library is compiled with the Unix compiler.
6578 Use of RTD is a selectable option, since it is incompatible with
6579 standard Unix calling sequences. If the option is not selected,
6580 the caller must always pop the args. */
6583 m68k_return_pops_args (tree fundecl
, tree funtype
, int size
)
6587 || TREE_CODE (fundecl
) != IDENTIFIER_NODE
)
6588 && (!stdarg_p (funtype
)))
6592 /* Make sure everything's fine if we *don't* have a given processor.
6593 This assumes that putting a register in fixed_regs will keep the
6594 compiler's mitts completely off it. We don't bother to zero it out
6595 of register classes. */
6598 m68k_conditional_register_usage (void)
6602 if (!TARGET_HARD_FLOAT
)
6604 COPY_HARD_REG_SET (x
, reg_class_contents
[(int)FP_REGS
]);
6605 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
6606 if (TEST_HARD_REG_BIT (x
, i
))
6607 fixed_regs
[i
] = call_used_regs
[i
] = 1;
6610 fixed_regs
[PIC_REG
] = call_used_regs
[PIC_REG
] = 1;
6613 #include "gt-m68k.h"