]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/m68k/m68k.c
bootstrap-ubsan.mk (POSTSTAGE1_LDFLAGS): Add -ldl.
[thirdparty/gcc.git] / gcc / config / m68k / m68k.c
CommitLineData
79e68feb 1/* Subroutines for insn-output.c for Motorola 68000 family.
d1e082c2 2 Copyright (C) 1987-2013 Free Software Foundation, Inc.
79e68feb 3
7ec022b2 4This file is part of GCC.
79e68feb 5
7ec022b2 6GCC is free software; you can redistribute it and/or modify
79e68feb 7it under the terms of the GNU General Public License as published by
2f83c7d6 8the Free Software Foundation; either version 3, or (at your option)
79e68feb
RS
9any later version.
10
7ec022b2 11GCC is distributed in the hope that it will be useful,
79e68feb
RS
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14GNU General Public License for more details.
15
16You should have received a copy of the GNU General Public License
2f83c7d6
NC
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
79e68feb 19
79e68feb 20#include "config.h"
f5220a5d 21#include "system.h"
4977bab6
ZW
22#include "coretypes.h"
23#include "tm.h"
da932f04 24#include "tree.h"
79e68feb 25#include "rtl.h"
49ad7cfa 26#include "function.h"
79e68feb
RS
27#include "regs.h"
28#include "hard-reg-set.h"
79e68feb
RS
29#include "insn-config.h"
30#include "conditions.h"
79e68feb
RS
31#include "output.h"
32#include "insn-attr.h"
1d8eaa6b 33#include "recog.h"
718f9c0f 34#include "diagnostic-core.h"
6d5f49b2
RH
35#include "expr.h"
36#include "reload.h"
5505f548 37#include "tm_p.h"
672a6f42
NB
38#include "target.h"
39#include "target-def.h"
2cc07db4 40#include "debug.h"
79e68feb 41#include "flags.h"
6fb5fa3c 42#include "df.h"
b8c96320
MK
43/* ??? Need to add a dependency between m68k.o and sched-int.h. */
44#include "sched-int.h"
45#include "insn-codes.h"
75df395f 46#include "ggc.h"
96e45421 47#include "opts.h"
8b281334 48#include "optabs.h"
79e68feb 49
a4e9467d
RZ
50enum reg_class regno_reg_class[] =
51{
52 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
53 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
54 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
55 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
56 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
57 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
58 ADDR_REGS
59};
60
61
a40ed0f3
KH
62/* The minimum number of integer registers that we want to save with the
63 movem instruction. Using two movel instructions instead of a single
64 moveml is about 15% faster for the 68020 and 68030 at no expense in
65 code size. */
66#define MIN_MOVEM_REGS 3
67
68/* The minimum number of floating point registers that we want to save
69 with the fmovem instruction. */
70#define MIN_FMOVEM_REGS 1
71
ff482c8d 72/* Structure describing stack frame layout. */
3d74bc09
BI
73struct m68k_frame
74{
75 /* Stack pointer to frame pointer offset. */
48ed72a4 76 HOST_WIDE_INT offset;
3d74bc09
BI
77
78 /* Offset of FPU registers. */
79 HOST_WIDE_INT foffset;
80
81 /* Frame size in bytes (rounded up). */
48ed72a4 82 HOST_WIDE_INT size;
3d74bc09
BI
83
84 /* Data and address register. */
48ed72a4
PB
85 int reg_no;
86 unsigned int reg_mask;
3d74bc09
BI
87
88 /* FPU registers. */
48ed72a4
PB
89 int fpu_no;
90 unsigned int fpu_mask;
3d74bc09
BI
91
92 /* Offsets relative to ARG_POINTER. */
48ed72a4
PB
93 HOST_WIDE_INT frame_pointer_offset;
94 HOST_WIDE_INT stack_pointer_offset;
3d74bc09
BI
95
96 /* Function which the above information refers to. */
97 int funcdef_no;
48ed72a4
PB
98};
99
3d74bc09
BI
100/* Current frame information calculated by m68k_compute_frame_layout(). */
101static struct m68k_frame current_frame;
102
fc2241eb
RS
103/* Structure describing an m68k address.
104
105 If CODE is UNKNOWN, the address is BASE + INDEX * SCALE + OFFSET,
106 with null fields evaluating to 0. Here:
107
108 - BASE satisfies m68k_legitimate_base_reg_p
109 - INDEX satisfies m68k_legitimate_index_reg_p
110 - OFFSET satisfies m68k_legitimate_constant_address_p
111
112 INDEX is either HImode or SImode. The other fields are SImode.
113
114 If CODE is PRE_DEC, the address is -(BASE). If CODE is POST_INC,
115 the address is (BASE)+. */
116struct m68k_address {
117 enum rtx_code code;
118 rtx base;
119 rtx index;
120 rtx offset;
121 int scale;
122};
123
b8c96320 124static int m68k_sched_adjust_cost (rtx, rtx, rtx, int);
96fcacb7 125static int m68k_sched_issue_rate (void);
b8c96320
MK
126static int m68k_sched_variable_issue (FILE *, int, rtx, int);
127static void m68k_sched_md_init_global (FILE *, int, int);
128static void m68k_sched_md_finish_global (FILE *, int);
129static void m68k_sched_md_init (FILE *, int, int);
130static void m68k_sched_dfa_pre_advance_cycle (void);
131static void m68k_sched_dfa_post_advance_cycle (void);
96fcacb7 132static int m68k_sched_first_cycle_multipass_dfa_lookahead (void);
b8c96320 133
7b5cbb57 134static bool m68k_can_eliminate (const int, const int);
5efd84c5 135static void m68k_conditional_register_usage (void);
c6c3dba9 136static bool m68k_legitimate_address_p (enum machine_mode, rtx, bool);
c5387660 137static void m68k_option_override (void);
03e69b12 138static void m68k_override_options_after_change (void);
8a4a2253
BI
139static rtx find_addr_reg (rtx);
140static const char *singlemove_string (rtx *);
8a4a2253
BI
141static void m68k_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
142 HOST_WIDE_INT, tree);
8636be86 143static rtx m68k_struct_value_rtx (tree, int);
48ed72a4
PB
144static tree m68k_handle_fndecl_attribute (tree *node, tree name,
145 tree args, int flags,
146 bool *no_add_attrs);
3d74bc09 147static void m68k_compute_frame_layout (void);
48ed72a4 148static bool m68k_save_reg (unsigned int regno, bool interrupt_handler);
f7e70894 149static bool m68k_ok_for_sibcall_p (tree, tree);
75df395f 150static bool m68k_tls_symbol_p (rtx);
506d7b68 151static rtx m68k_legitimize_address (rtx, rtx, enum machine_mode);
68f932c4 152static bool m68k_rtx_costs (rtx, int, int, int, int *, bool);
1c445f03 153#if M68K_HONOR_TARGET_STRICT_ALIGNMENT
511e41e5 154static bool m68k_return_in_memory (const_tree, const_tree);
1c445f03 155#endif
75df395f 156static void m68k_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
e0601576 157static void m68k_trampoline_init (rtx, tree, rtx);
079e7538 158static int m68k_return_pops_args (tree, tree, int);
7b0f476d 159static rtx m68k_delegitimize_address (rtx);
d5cc9181 160static void m68k_function_arg_advance (cumulative_args_t, enum machine_mode,
13d3961c 161 const_tree, bool);
d5cc9181 162static rtx m68k_function_arg (cumulative_args_t, enum machine_mode,
13d3961c 163 const_tree, bool);
fbbf66e7 164static bool m68k_cannot_force_const_mem (enum machine_mode mode, rtx x);
cb69db4f 165static bool m68k_output_addr_const_extra (FILE *, rtx);
8b281334 166static void m68k_init_sync_libfuncs (void) ATTRIBUTE_UNUSED;
79e68feb 167\f
672a6f42 168/* Initialize the GCC target structure. */
301d03af
RS
169
170#if INT_OP_GROUP == INT_OP_DOT_WORD
171#undef TARGET_ASM_ALIGNED_HI_OP
172#define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
173#endif
174
175#if INT_OP_GROUP == INT_OP_NO_DOT
176#undef TARGET_ASM_BYTE_OP
177#define TARGET_ASM_BYTE_OP "\tbyte\t"
178#undef TARGET_ASM_ALIGNED_HI_OP
179#define TARGET_ASM_ALIGNED_HI_OP "\tshort\t"
180#undef TARGET_ASM_ALIGNED_SI_OP
181#define TARGET_ASM_ALIGNED_SI_OP "\tlong\t"
182#endif
183
184#if INT_OP_GROUP == INT_OP_DC
185#undef TARGET_ASM_BYTE_OP
186#define TARGET_ASM_BYTE_OP "\tdc.b\t"
187#undef TARGET_ASM_ALIGNED_HI_OP
188#define TARGET_ASM_ALIGNED_HI_OP "\tdc.w\t"
189#undef TARGET_ASM_ALIGNED_SI_OP
190#define TARGET_ASM_ALIGNED_SI_OP "\tdc.l\t"
191#endif
192
193#undef TARGET_ASM_UNALIGNED_HI_OP
194#define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
195#undef TARGET_ASM_UNALIGNED_SI_OP
196#define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
197
c590b625
RH
198#undef TARGET_ASM_OUTPUT_MI_THUNK
199#define TARGET_ASM_OUTPUT_MI_THUNK m68k_output_mi_thunk
bdabc150 200#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
3101faab 201#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
c590b625 202
1bc7c5b6
ZW
203#undef TARGET_ASM_FILE_START_APP_OFF
204#define TARGET_ASM_FILE_START_APP_OFF true
205
506d7b68
PB
206#undef TARGET_LEGITIMIZE_ADDRESS
207#define TARGET_LEGITIMIZE_ADDRESS m68k_legitimize_address
208
b8c96320
MK
209#undef TARGET_SCHED_ADJUST_COST
210#define TARGET_SCHED_ADJUST_COST m68k_sched_adjust_cost
211
96fcacb7
MK
212#undef TARGET_SCHED_ISSUE_RATE
213#define TARGET_SCHED_ISSUE_RATE m68k_sched_issue_rate
214
b8c96320
MK
215#undef TARGET_SCHED_VARIABLE_ISSUE
216#define TARGET_SCHED_VARIABLE_ISSUE m68k_sched_variable_issue
217
218#undef TARGET_SCHED_INIT_GLOBAL
219#define TARGET_SCHED_INIT_GLOBAL m68k_sched_md_init_global
220
221#undef TARGET_SCHED_FINISH_GLOBAL
222#define TARGET_SCHED_FINISH_GLOBAL m68k_sched_md_finish_global
223
224#undef TARGET_SCHED_INIT
225#define TARGET_SCHED_INIT m68k_sched_md_init
226
227#undef TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE
228#define TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE m68k_sched_dfa_pre_advance_cycle
229
230#undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
231#define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE m68k_sched_dfa_post_advance_cycle
232
96fcacb7
MK
233#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
234#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
235 m68k_sched_first_cycle_multipass_dfa_lookahead
236
c5387660
JM
237#undef TARGET_OPTION_OVERRIDE
238#define TARGET_OPTION_OVERRIDE m68k_option_override
239
03e69b12
MP
240#undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
241#define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m68k_override_options_after_change
242
3c50106f
RH
243#undef TARGET_RTX_COSTS
244#define TARGET_RTX_COSTS m68k_rtx_costs
245
48ed72a4
PB
246#undef TARGET_ATTRIBUTE_TABLE
247#define TARGET_ATTRIBUTE_TABLE m68k_attribute_table
248
8636be86 249#undef TARGET_PROMOTE_PROTOTYPES
586de218 250#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
8636be86
KH
251
252#undef TARGET_STRUCT_VALUE_RTX
253#define TARGET_STRUCT_VALUE_RTX m68k_struct_value_rtx
254
7ffb5e78 255#undef TARGET_CANNOT_FORCE_CONST_MEM
fbbf66e7 256#define TARGET_CANNOT_FORCE_CONST_MEM m68k_cannot_force_const_mem
7ffb5e78 257
f7e70894
RS
258#undef TARGET_FUNCTION_OK_FOR_SIBCALL
259#define TARGET_FUNCTION_OK_FOR_SIBCALL m68k_ok_for_sibcall_p
260
1c445f03
NS
261#if M68K_HONOR_TARGET_STRICT_ALIGNMENT
262#undef TARGET_RETURN_IN_MEMORY
263#define TARGET_RETURN_IN_MEMORY m68k_return_in_memory
264#endif
265
75df395f
MK
266#ifdef HAVE_AS_TLS
267#undef TARGET_HAVE_TLS
268#define TARGET_HAVE_TLS (true)
269
270#undef TARGET_ASM_OUTPUT_DWARF_DTPREL
271#define TARGET_ASM_OUTPUT_DWARF_DTPREL m68k_output_dwarf_dtprel
272#endif
273
c6c3dba9
PB
274#undef TARGET_LEGITIMATE_ADDRESS_P
275#define TARGET_LEGITIMATE_ADDRESS_P m68k_legitimate_address_p
276
7b5cbb57
AS
277#undef TARGET_CAN_ELIMINATE
278#define TARGET_CAN_ELIMINATE m68k_can_eliminate
279
5efd84c5
NF
280#undef TARGET_CONDITIONAL_REGISTER_USAGE
281#define TARGET_CONDITIONAL_REGISTER_USAGE m68k_conditional_register_usage
282
e0601576
RH
283#undef TARGET_TRAMPOLINE_INIT
284#define TARGET_TRAMPOLINE_INIT m68k_trampoline_init
285
079e7538
NF
286#undef TARGET_RETURN_POPS_ARGS
287#define TARGET_RETURN_POPS_ARGS m68k_return_pops_args
288
7b0f476d
AS
289#undef TARGET_DELEGITIMIZE_ADDRESS
290#define TARGET_DELEGITIMIZE_ADDRESS m68k_delegitimize_address
291
13d3961c
NF
292#undef TARGET_FUNCTION_ARG
293#define TARGET_FUNCTION_ARG m68k_function_arg
294
295#undef TARGET_FUNCTION_ARG_ADVANCE
296#define TARGET_FUNCTION_ARG_ADVANCE m68k_function_arg_advance
297
1a627b35
RS
298#undef TARGET_LEGITIMATE_CONSTANT_P
299#define TARGET_LEGITIMATE_CONSTANT_P m68k_legitimate_constant_p
300
cb69db4f
AS
301#undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
302#define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA m68k_output_addr_const_extra
303
4c1fd084
RH
304/* The value stored by TAS. */
305#undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
306#define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 128
307
48ed72a4
PB
308static const struct attribute_spec m68k_attribute_table[] =
309{
62d784f7
KT
310 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
311 affects_type_identity } */
312 { "interrupt", 0, 0, true, false, false, m68k_handle_fndecl_attribute,
313 false },
314 { "interrupt_handler", 0, 0, true, false, false,
315 m68k_handle_fndecl_attribute, false },
316 { "interrupt_thread", 0, 0, true, false, false,
317 m68k_handle_fndecl_attribute, false },
318 { NULL, 0, 0, false, false, false, NULL, false }
48ed72a4
PB
319};
320
f6897b10 321struct gcc_target targetm = TARGET_INITIALIZER;
672a6f42 322\f
900ec02d
JB
323/* Base flags for 68k ISAs. */
324#define FL_FOR_isa_00 FL_ISA_68000
325#define FL_FOR_isa_10 (FL_FOR_isa_00 | FL_ISA_68010)
326/* FL_68881 controls the default setting of -m68881. gcc has traditionally
327 generated 68881 code for 68020 and 68030 targets unless explicitly told
328 not to. */
329#define FL_FOR_isa_20 (FL_FOR_isa_10 | FL_ISA_68020 \
6a2b269b 330 | FL_BITFIELD | FL_68881 | FL_CAS)
900ec02d
JB
331#define FL_FOR_isa_40 (FL_FOR_isa_20 | FL_ISA_68040)
332#define FL_FOR_isa_cpu32 (FL_FOR_isa_10 | FL_ISA_68020)
333
334/* Base flags for ColdFire ISAs. */
335#define FL_FOR_isa_a (FL_COLDFIRE | FL_ISA_A)
336#define FL_FOR_isa_aplus (FL_FOR_isa_a | FL_ISA_APLUS | FL_CF_USP)
337/* Note ISA_B doesn't necessarily include USP (user stack pointer) support. */
338#define FL_FOR_isa_b (FL_FOR_isa_a | FL_ISA_B | FL_CF_HWDIV)
4e2b26aa 339/* ISA_C is not upwardly compatible with ISA_B. */
8c5c99dc 340#define FL_FOR_isa_c (FL_FOR_isa_a | FL_ISA_C | FL_CF_USP)
900ec02d
JB
341
342enum m68k_isa
343{
344 /* Traditional 68000 instruction sets. */
345 isa_00,
346 isa_10,
347 isa_20,
348 isa_40,
349 isa_cpu32,
350 /* ColdFire instruction set variants. */
351 isa_a,
352 isa_aplus,
353 isa_b,
354 isa_c,
355 isa_max
356};
357
358/* Information about one of the -march, -mcpu or -mtune arguments. */
359struct m68k_target_selection
360{
361 /* The argument being described. */
362 const char *name;
363
364 /* For -mcpu, this is the device selected by the option.
365 For -mtune and -march, it is a representative device
366 for the microarchitecture or ISA respectively. */
367 enum target_device device;
368
369 /* The M68K_DEVICE fields associated with DEVICE. See the comment
370 in m68k-devices.def for details. FAMILY is only valid for -mcpu. */
371 const char *family;
372 enum uarch_type microarch;
373 enum m68k_isa isa;
374 unsigned long flags;
375};
376
377/* A list of all devices in m68k-devices.def. Used for -mcpu selection. */
378static const struct m68k_target_selection all_devices[] =
379{
380#define M68K_DEVICE(NAME,ENUM_VALUE,FAMILY,MULTILIB,MICROARCH,ISA,FLAGS) \
381 { NAME, ENUM_VALUE, FAMILY, u##MICROARCH, ISA, FLAGS | FL_FOR_##ISA },
382#include "m68k-devices.def"
383#undef M68K_DEVICE
384 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
385};
386
387/* A list of all ISAs, mapping each one to a representative device.
388 Used for -march selection. */
389static const struct m68k_target_selection all_isas[] =
390{
47c94d21
JM
391#define M68K_ISA(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
392 { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
393#include "m68k-isas.def"
394#undef M68K_ISA
900ec02d
JB
395 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
396};
397
398/* A list of all microarchitectures, mapping each one to a representative
399 device. Used for -mtune selection. */
400static const struct m68k_target_selection all_microarchs[] =
401{
47c94d21
JM
402#define M68K_MICROARCH(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
403 { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
404#include "m68k-microarchs.def"
405#undef M68K_MICROARCH
900ec02d
JB
406 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
407};
408\f
409/* The entries associated with the -mcpu, -march and -mtune settings,
410 or null for options that have not been used. */
411const struct m68k_target_selection *m68k_cpu_entry;
412const struct m68k_target_selection *m68k_arch_entry;
413const struct m68k_target_selection *m68k_tune_entry;
414
415/* Which CPU we are generating code for. */
416enum target_device m68k_cpu;
417
418/* Which microarchitecture to tune for. */
419enum uarch_type m68k_tune;
420
421/* Which FPU to use. */
422enum fpu_type m68k_fpu;
4af06170 423
900ec02d
JB
424/* The set of FL_* flags that apply to the target processor. */
425unsigned int m68k_cpu_flags;
29ca003a 426
03b3e271
KH
427/* The set of FL_* flags that apply to the processor to be tuned for. */
428unsigned int m68k_tune_flags;
429
29ca003a
RS
430/* Asm templates for calling or jumping to an arbitrary symbolic address,
431 or NULL if such calls or jumps are not supported. The address is held
432 in operand 0. */
433const char *m68k_symbolic_call;
434const char *m68k_symbolic_jump;
c47b0cb4
MK
435
436/* Enum variable that corresponds to m68k_symbolic_call values. */
437enum M68K_SYMBOLIC_CALL m68k_symbolic_call_var;
438
900ec02d 439\f
c5387660 440/* Implement TARGET_OPTION_OVERRIDE. */
ef1dbfb0 441
c5387660
JM
442static void
443m68k_option_override (void)
ef1dbfb0 444{
900ec02d
JB
445 const struct m68k_target_selection *entry;
446 unsigned long target_mask;
447
47c94d21
JM
448 if (global_options_set.x_m68k_arch_option)
449 m68k_arch_entry = &all_isas[m68k_arch_option];
450
451 if (global_options_set.x_m68k_cpu_option)
452 m68k_cpu_entry = &all_devices[(int) m68k_cpu_option];
453
454 if (global_options_set.x_m68k_tune_option)
455 m68k_tune_entry = &all_microarchs[(int) m68k_tune_option];
456
900ec02d
JB
457 /* User can choose:
458
459 -mcpu=
460 -march=
461 -mtune=
462
463 -march=ARCH should generate code that runs any processor
464 implementing architecture ARCH. -mcpu=CPU should override -march
465 and should generate code that runs on processor CPU, making free
466 use of any instructions that CPU understands. -mtune=UARCH applies
9f5ed61a 467 on top of -mcpu or -march and optimizes the code for UARCH. It does
900ec02d
JB
468 not change the target architecture. */
469 if (m68k_cpu_entry)
470 {
471 /* Complain if the -march setting is for a different microarchitecture,
472 or includes flags that the -mcpu setting doesn't. */
473 if (m68k_arch_entry
474 && (m68k_arch_entry->microarch != m68k_cpu_entry->microarch
475 || (m68k_arch_entry->flags & ~m68k_cpu_entry->flags) != 0))
476 warning (0, "-mcpu=%s conflicts with -march=%s",
477 m68k_cpu_entry->name, m68k_arch_entry->name);
478
479 entry = m68k_cpu_entry;
480 }
481 else
482 entry = m68k_arch_entry;
483
484 if (!entry)
485 entry = all_devices + TARGET_CPU_DEFAULT;
486
487 m68k_cpu_flags = entry->flags;
488
489 /* Use the architecture setting to derive default values for
490 certain flags. */
491 target_mask = 0;
8785d88c
KH
492
493 /* ColdFire is lenient about alignment. */
494 if (!TARGET_COLDFIRE)
495 target_mask |= MASK_STRICT_ALIGNMENT;
496
900ec02d
JB
497 if ((m68k_cpu_flags & FL_BITFIELD) != 0)
498 target_mask |= MASK_BITFIELD;
499 if ((m68k_cpu_flags & FL_CF_HWDIV) != 0)
500 target_mask |= MASK_CF_HWDIV;
501 if ((m68k_cpu_flags & (FL_68881 | FL_CF_FPU)) != 0)
502 target_mask |= MASK_HARD_FLOAT;
503 target_flags |= target_mask & ~target_flags_explicit;
504
505 /* Set the directly-usable versions of the -mcpu and -mtune settings. */
506 m68k_cpu = entry->device;
507 if (m68k_tune_entry)
03b3e271
KH
508 {
509 m68k_tune = m68k_tune_entry->microarch;
510 m68k_tune_flags = m68k_tune_entry->flags;
511 }
900ec02d
JB
512#ifdef M68K_DEFAULT_TUNE
513 else if (!m68k_cpu_entry && !m68k_arch_entry)
03b3e271
KH
514 {
515 enum target_device dev;
516 dev = all_microarchs[M68K_DEFAULT_TUNE].device;
517 m68k_tune_flags = all_devices[dev]->flags;
518 }
900ec02d
JB
519#endif
520 else
03b3e271
KH
521 {
522 m68k_tune = entry->microarch;
523 m68k_tune_flags = entry->flags;
524 }
900ec02d
JB
525
526 /* Set the type of FPU. */
527 m68k_fpu = (!TARGET_HARD_FLOAT ? FPUTYPE_NONE
528 : (m68k_cpu_flags & FL_COLDFIRE) != 0 ? FPUTYPE_COLDFIRE
529 : FPUTYPE_68881);
530
a2ef3db7
BI
531 /* Sanity check to ensure that msep-data and mid-sahred-library are not
532 * both specified together. Doing so simply doesn't make sense.
533 */
534 if (TARGET_SEP_DATA && TARGET_ID_SHARED_LIBRARY)
535 error ("cannot specify both -msep-data and -mid-shared-library");
536
537 /* If we're generating code for a separate A5 relative data segment,
538 * we've got to enable -fPIC as well. This might be relaxable to
539 * -fpic but it hasn't been tested properly.
540 */
541 if (TARGET_SEP_DATA || TARGET_ID_SHARED_LIBRARY)
542 flag_pic = 2;
543
abe92a04
RS
544 /* -mpcrel -fPIC uses 32-bit pc-relative displacements. Raise an
545 error if the target does not support them. */
546 if (TARGET_PCREL && !TARGET_68020 && flag_pic == 2)
547 error ("-mpcrel -fPIC is not currently supported on selected cpu");
adf2ac37
RH
548
549 /* ??? A historic way of turning on pic, or is this intended to
550 be an embedded thing that doesn't have the same name binding
551 significance that it does on hosted ELF systems? */
552 if (TARGET_PCREL && flag_pic == 0)
553 flag_pic = 1;
554
29ca003a
RS
555 if (!flag_pic)
556 {
c47b0cb4
MK
557 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_JSR;
558
29ca003a 559 m68k_symbolic_jump = "jra %a0";
29ca003a
RS
560 }
561 else if (TARGET_ID_SHARED_LIBRARY)
562 /* All addresses must be loaded from the GOT. */
563 ;
4e2b26aa 564 else if (TARGET_68020 || TARGET_ISAB || TARGET_ISAC)
29ca003a
RS
565 {
566 if (TARGET_PCREL)
c47b0cb4 567 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_C;
4e2b26aa 568 else
c47b0cb4
MK
569 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_P;
570
4e2b26aa
NS
571 if (TARGET_ISAC)
572 /* No unconditional long branch */;
573 else if (TARGET_PCREL)
da398bb5 574 m68k_symbolic_jump = "bra%.l %c0";
29ca003a 575 else
da398bb5 576 m68k_symbolic_jump = "bra%.l %p0";
29ca003a
RS
577 /* Turn off function cse if we are doing PIC. We always want
578 function call to be done as `bsr foo@PLTPC'. */
579 /* ??? It's traditional to do this for -mpcrel too, but it isn't
580 clear how intentional that is. */
581 flag_no_function_cse = 1;
582 }
adf2ac37 583
c47b0cb4
MK
584 switch (m68k_symbolic_call_var)
585 {
586 case M68K_SYMBOLIC_CALL_JSR:
c47b0cb4 587 m68k_symbolic_call = "jsr %a0";
c47b0cb4
MK
588 break;
589
590 case M68K_SYMBOLIC_CALL_BSR_C:
da398bb5 591 m68k_symbolic_call = "bsr%.l %c0";
c47b0cb4
MK
592 break;
593
594 case M68K_SYMBOLIC_CALL_BSR_P:
da398bb5 595 m68k_symbolic_call = "bsr%.l %p0";
c47b0cb4
MK
596 break;
597
598 case M68K_SYMBOLIC_CALL_NONE:
599 gcc_assert (m68k_symbolic_call == NULL);
600 break;
601
602 default:
603 gcc_unreachable ();
604 }
605
aaca7021
RZ
606#ifndef ASM_OUTPUT_ALIGN_WITH_NOP
607 if (align_labels > 2)
608 {
609 warning (0, "-falign-labels=%d is not supported", align_labels);
610 align_labels = 0;
611 }
612 if (align_loops > 2)
613 {
614 warning (0, "-falign-loops=%d is not supported", align_loops);
615 align_loops = 0;
616 }
617#endif
618
8e22f79f
AS
619 if (stack_limit_rtx != NULL_RTX && !TARGET_68020)
620 {
621 warning (0, "-fstack-limit- options are not supported on this cpu");
622 stack_limit_rtx = NULL_RTX;
623 }
624
adf2ac37 625 SUBTARGET_OVERRIDE_OPTIONS;
c47b0cb4
MK
626
627 /* Setup scheduling options. */
826fadba
MK
628 if (TUNE_CFV1)
629 m68k_sched_cpu = CPU_CFV1;
630 else if (TUNE_CFV2)
631 m68k_sched_cpu = CPU_CFV2;
632 else if (TUNE_CFV3)
633 m68k_sched_cpu = CPU_CFV3;
96fcacb7
MK
634 else if (TUNE_CFV4)
635 m68k_sched_cpu = CPU_CFV4;
c47b0cb4
MK
636 else
637 {
638 m68k_sched_cpu = CPU_UNKNOWN;
639 flag_schedule_insns = 0;
640 flag_schedule_insns_after_reload = 0;
641 flag_modulo_sched = 0;
642 }
826fadba
MK
643
644 if (m68k_sched_cpu != CPU_UNKNOWN)
645 {
646 if ((m68k_cpu_flags & (FL_CF_EMAC | FL_CF_EMAC_B)) != 0)
647 m68k_sched_mac = MAC_CF_EMAC;
648 else if ((m68k_cpu_flags & FL_CF_MAC) != 0)
649 m68k_sched_mac = MAC_CF_MAC;
650 else
651 m68k_sched_mac = MAC_NO;
652 }
ef1dbfb0 653}
7eb4f044 654
03e69b12
MP
655/* Implement TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE. */
656
657static void
658m68k_override_options_after_change (void)
659{
660 if (m68k_sched_cpu == CPU_UNKNOWN)
661 {
662 flag_schedule_insns = 0;
663 flag_schedule_insns_after_reload = 0;
664 flag_modulo_sched = 0;
665 }
666}
667
7eb4f044
NS
668/* Generate a macro of the form __mPREFIX_cpu_NAME, where PREFIX is the
669 given argument and NAME is the argument passed to -mcpu. Return NULL
670 if -mcpu was not passed. */
671
672const char *
673m68k_cpp_cpu_ident (const char *prefix)
674{
675 if (!m68k_cpu_entry)
676 return NULL;
677 return concat ("__m", prefix, "_cpu_", m68k_cpu_entry->name, NULL);
678}
679
680/* Generate a macro of the form __mPREFIX_family_NAME, where PREFIX is the
681 given argument and NAME is the name of the representative device for
682 the -mcpu argument's family. Return NULL if -mcpu was not passed. */
683
684const char *
685m68k_cpp_cpu_family (const char *prefix)
686{
687 if (!m68k_cpu_entry)
688 return NULL;
689 return concat ("__m", prefix, "_family_", m68k_cpu_entry->family, NULL);
690}
79e68feb 691\f
2bccb817
KH
692/* Return m68k_fk_interrupt_handler if FUNC has an "interrupt" or
693 "interrupt_handler" attribute and interrupt_thread if FUNC has an
694 "interrupt_thread" attribute. Otherwise, return
695 m68k_fk_normal_function. */
a4242737
KH
696
697enum m68k_function_kind
698m68k_get_function_kind (tree func)
48ed72a4
PB
699{
700 tree a;
701
fa157b28
NS
702 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
703
2bccb817
KH
704 a = lookup_attribute ("interrupt", DECL_ATTRIBUTES (func));
705 if (a != NULL_TREE)
706 return m68k_fk_interrupt_handler;
707
48ed72a4 708 a = lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (func));
a4242737
KH
709 if (a != NULL_TREE)
710 return m68k_fk_interrupt_handler;
711
712 a = lookup_attribute ("interrupt_thread", DECL_ATTRIBUTES (func));
713 if (a != NULL_TREE)
714 return m68k_fk_interrupt_thread;
715
716 return m68k_fk_normal_function;
48ed72a4
PB
717}
718
719/* Handle an attribute requiring a FUNCTION_DECL; arguments as in
720 struct attribute_spec.handler. */
721static tree
722m68k_handle_fndecl_attribute (tree *node, tree name,
723 tree args ATTRIBUTE_UNUSED,
724 int flags ATTRIBUTE_UNUSED,
725 bool *no_add_attrs)
726{
727 if (TREE_CODE (*node) != FUNCTION_DECL)
728 {
29d08eba
JM
729 warning (OPT_Wattributes, "%qE attribute only applies to functions",
730 name);
48ed72a4
PB
731 *no_add_attrs = true;
732 }
733
a4242737
KH
734 if (m68k_get_function_kind (*node) != m68k_fk_normal_function)
735 {
736 error ("multiple interrupt attributes not allowed");
737 *no_add_attrs = true;
738 }
739
740 if (!TARGET_FIDOA
741 && !strcmp (IDENTIFIER_POINTER (name), "interrupt_thread"))
742 {
743 error ("interrupt_thread is available only on fido");
744 *no_add_attrs = true;
745 }
746
48ed72a4
PB
747 return NULL_TREE;
748}
860c4900
BI
749
750static void
3d74bc09 751m68k_compute_frame_layout (void)
860c4900
BI
752{
753 int regno, saved;
a40ed0f3 754 unsigned int mask;
a4242737
KH
755 enum m68k_function_kind func_kind =
756 m68k_get_function_kind (current_function_decl);
757 bool interrupt_handler = func_kind == m68k_fk_interrupt_handler;
758 bool interrupt_thread = func_kind == m68k_fk_interrupt_thread;
860c4900 759
3d74bc09
BI
760 /* Only compute the frame once per function.
761 Don't cache information until reload has been completed. */
762 if (current_frame.funcdef_no == current_function_funcdef_no
763 && reload_completed)
764 return;
765
766 current_frame.size = (get_frame_size () + 3) & -4;
860c4900 767
a40ed0f3 768 mask = saved = 0;
a4242737
KH
769
770 /* Interrupt thread does not need to save any register. */
771 if (!interrupt_thread)
772 for (regno = 0; regno < 16; regno++)
773 if (m68k_save_reg (regno, interrupt_handler))
774 {
775 mask |= 1 << (regno - D0_REG);
776 saved++;
777 }
3d74bc09
BI
778 current_frame.offset = saved * 4;
779 current_frame.reg_no = saved;
780 current_frame.reg_mask = mask;
860c4900 781
57047680 782 current_frame.foffset = 0;
a40ed0f3 783 mask = saved = 0;
dcc21c4c 784 if (TARGET_HARD_FLOAT)
860c4900 785 {
a4242737
KH
786 /* Interrupt thread does not need to save any register. */
787 if (!interrupt_thread)
788 for (regno = 16; regno < 24; regno++)
789 if (m68k_save_reg (regno, interrupt_handler))
790 {
791 mask |= 1 << (regno - FP0_REG);
792 saved++;
793 }
dcc21c4c 794 current_frame.foffset = saved * TARGET_FP_REG_SIZE;
3d74bc09 795 current_frame.offset += current_frame.foffset;
860c4900 796 }
57047680
GN
797 current_frame.fpu_no = saved;
798 current_frame.fpu_mask = mask;
3d74bc09
BI
799
800 /* Remember what function this frame refers to. */
801 current_frame.funcdef_no = current_function_funcdef_no;
860c4900
BI
802}
803
7b5cbb57
AS
804/* Worker function for TARGET_CAN_ELIMINATE. */
805
806bool
807m68k_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
808{
809 return (to == STACK_POINTER_REGNUM ? ! frame_pointer_needed : true);
810}
811
860c4900
BI
812HOST_WIDE_INT
813m68k_initial_elimination_offset (int from, int to)
814{
42b67c06
PB
815 int argptr_offset;
816 /* The arg pointer points 8 bytes before the start of the arguments,
817 as defined by FIRST_PARM_OFFSET. This makes it coincident with the
818 frame pointer in most frames. */
819 argptr_offset = frame_pointer_needed ? 0 : UNITS_PER_WORD;
860c4900 820 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
42b67c06 821 return argptr_offset;
860c4900 822
3d74bc09 823 m68k_compute_frame_layout ();
860c4900 824
4761e388
NS
825 gcc_assert (to == STACK_POINTER_REGNUM);
826 switch (from)
827 {
a0a7fbc9 828 case ARG_POINTER_REGNUM:
42b67c06 829 return current_frame.offset + current_frame.size - argptr_offset;
4761e388
NS
830 case FRAME_POINTER_REGNUM:
831 return current_frame.offset + current_frame.size;
832 default:
833 gcc_unreachable ();
834 }
860c4900
BI
835}
836
97c55091
GN
837/* Refer to the array `regs_ever_live' to determine which registers
838 to save; `regs_ever_live[I]' is nonzero if register number I
839 is ever used in the function. This function is responsible for
840 knowing which registers should not be saved even if used.
841 Return true if we need to save REGNO. */
842
48ed72a4
PB
843static bool
844m68k_save_reg (unsigned int regno, bool interrupt_handler)
2cff4a6e 845{
4ab870f5 846 if (flag_pic && regno == PIC_REG)
b86ba8a3 847 {
e3b5732b 848 if (crtl->saves_all_registers)
afcb440c 849 return true;
e3b5732b 850 if (crtl->uses_pic_offset_table)
b86ba8a3 851 return true;
6357eb0d
RS
852 /* Reload may introduce constant pool references into a function
853 that thitherto didn't need a PIC register. Note that the test
854 above will not catch that case because we will only set
e3b5732b 855 crtl->uses_pic_offset_table when emitting
6357eb0d 856 the address reloads. */
e3b5732b 857 if (crtl->uses_const_pool)
6357eb0d 858 return true;
b86ba8a3 859 }
2cff4a6e 860
e3b5732b 861 if (crtl->calls_eh_return)
2cff4a6e
AS
862 {
863 unsigned int i;
864 for (i = 0; ; i++)
865 {
866 unsigned int test = EH_RETURN_DATA_REGNO (i);
867 if (test == INVALID_REGNUM)
868 break;
869 if (test == regno)
48ed72a4 870 return true;
2cff4a6e
AS
871 }
872 }
873
48ed72a4
PB
874 /* Fixed regs we never touch. */
875 if (fixed_regs[regno])
876 return false;
877
878 /* The frame pointer (if it is such) is handled specially. */
879 if (regno == FRAME_POINTER_REGNUM && frame_pointer_needed)
880 return false;
881
882 /* Interrupt handlers must also save call_used_regs
883 if they are live or when calling nested functions. */
884 if (interrupt_handler)
a0a7fbc9 885 {
6fb5fa3c 886 if (df_regs_ever_live_p (regno))
a0a7fbc9 887 return true;
48ed72a4 888
416ff32e 889 if (!crtl->is_leaf && call_used_regs[regno])
a0a7fbc9
AS
890 return true;
891 }
48ed72a4
PB
892
893 /* Never need to save registers that aren't touched. */
6fb5fa3c 894 if (!df_regs_ever_live_p (regno))
48ed72a4
PB
895 return false;
896
b2e08ed4 897 /* Otherwise save everything that isn't call-clobbered. */
48ed72a4 898 return !call_used_regs[regno];
2cff4a6e
AS
899}
900
a40ed0f3
KH
901/* Emit RTL for a MOVEM or FMOVEM instruction. BASE + OFFSET represents
902 the lowest memory address. COUNT is the number of registers to be
903 moved, with register REGNO + I being moved if bit I of MASK is set.
904 STORE_P specifies the direction of the move and ADJUST_STACK_P says
905 whether or not this is pre-decrement (if STORE_P) or post-increment
906 (if !STORE_P) operation. */
907
908static rtx
909m68k_emit_movem (rtx base, HOST_WIDE_INT offset,
910 unsigned int count, unsigned int regno,
911 unsigned int mask, bool store_p, bool adjust_stack_p)
912{
913 int i;
914 rtx body, addr, src, operands[2];
915 enum machine_mode mode;
916
917 body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (adjust_stack_p + count));
918 mode = reg_raw_mode[regno];
919 i = 0;
920
921 if (adjust_stack_p)
922 {
0a81f074
RS
923 src = plus_constant (Pmode, base,
924 (count
925 * GET_MODE_SIZE (mode)
926 * (HOST_WIDE_INT) (store_p ? -1 : 1)));
a40ed0f3
KH
927 XVECEXP (body, 0, i++) = gen_rtx_SET (VOIDmode, base, src);
928 }
929
930 for (; mask != 0; mask >>= 1, regno++)
931 if (mask & 1)
932 {
0a81f074 933 addr = plus_constant (Pmode, base, offset);
a40ed0f3
KH
934 operands[!store_p] = gen_frame_mem (mode, addr);
935 operands[store_p] = gen_rtx_REG (mode, regno);
936 XVECEXP (body, 0, i++)
937 = gen_rtx_SET (VOIDmode, operands[0], operands[1]);
938 offset += GET_MODE_SIZE (mode);
939 }
940 gcc_assert (i == XVECLEN (body, 0));
941
942 return emit_insn (body);
943}
944
945/* Make INSN a frame-related instruction. */
79e68feb 946
08c148a8 947static void
a40ed0f3
KH
948m68k_set_frame_related (rtx insn)
949{
950 rtx body;
951 int i;
952
953 RTX_FRAME_RELATED_P (insn) = 1;
954 body = PATTERN (insn);
955 if (GET_CODE (body) == PARALLEL)
956 for (i = 0; i < XVECLEN (body, 0); i++)
957 RTX_FRAME_RELATED_P (XVECEXP (body, 0, i)) = 1;
958}
959
960/* Emit RTL for the "prologue" define_expand. */
961
962void
963m68k_expand_prologue (void)
79e68feb 964{
860c4900 965 HOST_WIDE_INT fsize_with_regs;
2dc8bd76 966 rtx limit, src, dest;
3d74bc09 967
a40ed0f3 968 m68k_compute_frame_layout ();
3d74bc09 969
a11e0df4 970 if (flag_stack_usage_info)
f69ea688
AS
971 current_function_static_stack_size
972 = current_frame.size + current_frame.offset;
973
a157febd
GK
974 /* If the stack limit is a symbol, we can check it here,
975 before actually allocating the space. */
e3b5732b 976 if (crtl->limit_stack
a157febd 977 && GET_CODE (stack_limit_rtx) == SYMBOL_REF)
a40ed0f3 978 {
0a81f074 979 limit = plus_constant (Pmode, stack_limit_rtx, current_frame.size + 4);
1a627b35 980 if (!m68k_legitimate_constant_p (Pmode, limit))
a40ed0f3
KH
981 {
982 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), limit);
983 limit = gen_rtx_REG (Pmode, D0_REG);
984 }
f90b7a5a
PB
985 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode,
986 stack_pointer_rtx, limit),
987 stack_pointer_rtx, limit,
988 const1_rtx));
a40ed0f3 989 }
79e68feb 990
a89e3f21 991 fsize_with_regs = current_frame.size;
dcc21c4c
PB
992 if (TARGET_COLDFIRE)
993 {
a40ed0f3
KH
994 /* ColdFire's move multiple instructions do not allow pre-decrement
995 addressing. Add the size of movem saves to the initial stack
996 allocation instead. */
997 if (current_frame.reg_no >= MIN_MOVEM_REGS)
998 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
999 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1000 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
dcc21c4c 1001 }
860c4900 1002
79e68feb
RS
1003 if (frame_pointer_needed)
1004 {
a40ed0f3 1005 if (fsize_with_regs == 0 && TUNE_68040)
79e68feb 1006 {
a40ed0f3
KH
1007 /* On the 68040, two separate moves are faster than link.w 0. */
1008 dest = gen_frame_mem (Pmode,
1009 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1010 m68k_set_frame_related (emit_move_insn (dest, frame_pointer_rtx));
1011 m68k_set_frame_related (emit_move_insn (frame_pointer_rtx,
1012 stack_pointer_rtx));
79e68feb 1013 }
a40ed0f3
KH
1014 else if (fsize_with_regs < 0x8000 || TARGET_68020)
1015 m68k_set_frame_related
1016 (emit_insn (gen_link (frame_pointer_rtx,
1017 GEN_INT (-4 - fsize_with_regs))));
d9e88af0 1018 else
a40ed0f3
KH
1019 {
1020 m68k_set_frame_related
1021 (emit_insn (gen_link (frame_pointer_rtx, GEN_INT (-4))));
1022 m68k_set_frame_related
1023 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1024 stack_pointer_rtx,
1025 GEN_INT (-fsize_with_regs))));
1026 }
96fcacb7
MK
1027
1028 /* If the frame pointer is needed, emit a special barrier that
1029 will prevent the scheduler from moving stores to the frame
1030 before the stack adjustment. */
1031 emit_insn (gen_stack_tie (stack_pointer_rtx, frame_pointer_rtx));
d9e88af0 1032 }
a40ed0f3
KH
1033 else if (fsize_with_regs != 0)
1034 m68k_set_frame_related
1035 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1036 stack_pointer_rtx,
1037 GEN_INT (-fsize_with_regs))));
860c4900 1038
57047680 1039 if (current_frame.fpu_mask)
79e68feb 1040 {
a40ed0f3 1041 gcc_assert (current_frame.fpu_no >= MIN_FMOVEM_REGS);
dcc21c4c 1042 if (TARGET_68881)
a40ed0f3
KH
1043 m68k_set_frame_related
1044 (m68k_emit_movem (stack_pointer_rtx,
1045 current_frame.fpu_no * -GET_MODE_SIZE (XFmode),
1046 current_frame.fpu_no, FP0_REG,
1047 current_frame.fpu_mask, true, true));
dcc21c4c
PB
1048 else
1049 {
1050 int offset;
1051
a40ed0f3
KH
1052 /* If we're using moveml to save the integer registers,
1053 the stack pointer will point to the bottom of the moveml
1054 save area. Find the stack offset of the first FP register. */
1055 if (current_frame.reg_no < MIN_MOVEM_REGS)
dcc21c4c
PB
1056 offset = 0;
1057 else
a40ed0f3
KH
1058 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1059 m68k_set_frame_related
1060 (m68k_emit_movem (stack_pointer_rtx, offset,
1061 current_frame.fpu_no, FP0_REG,
1062 current_frame.fpu_mask, true, false));
f277471f 1063 }
79e68feb 1064 }
99df2465 1065
01bbf777 1066 /* If the stack limit is not a symbol, check it here.
a157febd 1067 This has the disadvantage that it may be too late... */
e3b5732b 1068 if (crtl->limit_stack)
a157febd
GK
1069 {
1070 if (REG_P (stack_limit_rtx))
f90b7a5a
PB
1071 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode, stack_pointer_rtx,
1072 stack_limit_rtx),
1073 stack_pointer_rtx, stack_limit_rtx,
1074 const1_rtx));
1075
a157febd 1076 else if (GET_CODE (stack_limit_rtx) != SYMBOL_REF)
d4ee4d25 1077 warning (0, "stack limit expression is not supported");
a157febd 1078 }
01bbf777 1079
a40ed0f3 1080 if (current_frame.reg_no < MIN_MOVEM_REGS)
79e68feb 1081 {
a40ed0f3 1082 /* Store each register separately in the same order moveml does. */
79e68feb
RS
1083 int i;
1084
a40ed0f3
KH
1085 for (i = 16; i-- > 0; )
1086 if (current_frame.reg_mask & (1 << i))
078e983e 1087 {
a40ed0f3
KH
1088 src = gen_rtx_REG (SImode, D0_REG + i);
1089 dest = gen_frame_mem (SImode,
1090 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1091 m68k_set_frame_related (emit_insn (gen_movsi (dest, src)));
078e983e 1092 }
79e68feb 1093 }
a40ed0f3 1094 else
79e68feb 1095 {
9425fb04 1096 if (TARGET_COLDFIRE)
a40ed0f3
KH
1097 /* The required register save space has already been allocated.
1098 The first register should be stored at (%sp). */
1099 m68k_set_frame_related
1100 (m68k_emit_movem (stack_pointer_rtx, 0,
1101 current_frame.reg_no, D0_REG,
1102 current_frame.reg_mask, true, false));
afaff477 1103 else
a40ed0f3
KH
1104 m68k_set_frame_related
1105 (m68k_emit_movem (stack_pointer_rtx,
1106 current_frame.reg_no * -GET_MODE_SIZE (SImode),
1107 current_frame.reg_no, D0_REG,
1108 current_frame.reg_mask, true, true));
79e68feb 1109 }
a40ed0f3 1110
75df395f 1111 if (!TARGET_SEP_DATA
e3b5732b 1112 && crtl->uses_pic_offset_table)
2dc8bd76 1113 emit_insn (gen_load_got (pic_offset_table_rtx));
79e68feb
RS
1114}
1115\f
413ac1b2
RS
1116/* Return true if a simple (return) instruction is sufficient for this
1117 instruction (i.e. if no epilogue is needed). */
79e68feb 1118
3d74bc09 1119bool
a2bda628 1120m68k_use_return_insn (void)
79e68feb 1121{
79e68feb 1122 if (!reload_completed || frame_pointer_needed || get_frame_size () != 0)
3d74bc09 1123 return false;
125ed86f 1124
a0a7fbc9 1125 m68k_compute_frame_layout ();
413ac1b2 1126 return current_frame.offset == 0;
79e68feb
RS
1127}
1128
f7e70894
RS
1129/* Emit RTL for the "epilogue" or "sibcall_epilogue" define_expand;
1130 SIBCALL_P says which.
79e68feb
RS
1131
1132 The function epilogue should not depend on the current stack pointer!
1133 It should use the frame pointer only, if there is a frame pointer.
1134 This is mandatory because of alloca; we also take advantage of it to
1135 omit stack adjustments before returning. */
1136
a40ed0f3 1137void
f7e70894 1138m68k_expand_epilogue (bool sibcall_p)
08c148a8 1139{
3d74bc09 1140 HOST_WIDE_INT fsize, fsize_with_regs;
a40ed0f3 1141 bool big, restore_from_sp;
3d74bc09 1142
a0a7fbc9 1143 m68k_compute_frame_layout ();
3d74bc09 1144
3d74bc09 1145 fsize = current_frame.size;
a40ed0f3
KH
1146 big = false;
1147 restore_from_sp = false;
3d74bc09 1148
416ff32e 1149 /* FIXME : crtl->is_leaf below is too strong.
c67ddce5 1150 What we really need to know there is if there could be pending
7a1929e1 1151 stack adjustment needed at that point. */
a40ed0f3 1152 restore_from_sp = (!frame_pointer_needed
416ff32e 1153 || (!cfun->calls_alloca && crtl->is_leaf));
860c4900
BI
1154
1155 /* fsize_with_regs is the size we need to adjust the sp when
97c55091 1156 popping the frame. */
860c4900 1157 fsize_with_regs = fsize;
dcc21c4c
PB
1158 if (TARGET_COLDFIRE && restore_from_sp)
1159 {
a40ed0f3
KH
1160 /* ColdFire's move multiple instructions do not allow post-increment
1161 addressing. Add the size of movem loads to the final deallocation
1162 instead. */
1163 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1164 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1165 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1166 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
dcc21c4c 1167 }
860c4900 1168
3d74bc09 1169 if (current_frame.offset + fsize >= 0x8000
a40ed0f3 1170 && !restore_from_sp
3d74bc09 1171 && (current_frame.reg_mask || current_frame.fpu_mask))
79e68feb 1172 {
a40ed0f3
KH
1173 if (TARGET_COLDFIRE
1174 && (current_frame.reg_no >= MIN_MOVEM_REGS
1175 || current_frame.fpu_no >= MIN_FMOVEM_REGS))
1176 {
1177 /* ColdFire's move multiple instructions do not support the
1178 (d8,Ax,Xi) addressing mode, so we're as well using a normal
1179 stack-based restore. */
1180 emit_move_insn (gen_rtx_REG (Pmode, A1_REG),
1181 GEN_INT (-(current_frame.offset + fsize)));
1182 emit_insn (gen_addsi3 (stack_pointer_rtx,
1183 gen_rtx_REG (Pmode, A1_REG),
1184 frame_pointer_rtx));
1185 restore_from_sp = true;
1186 }
1187 else
1188 {
1189 emit_move_insn (gen_rtx_REG (Pmode, A1_REG), GEN_INT (-fsize));
1190 fsize = 0;
1191 big = true;
1192 }
79e68feb 1193 }
79e68feb 1194
a40ed0f3
KH
1195 if (current_frame.reg_no < MIN_MOVEM_REGS)
1196 {
1197 /* Restore each register separately in the same order moveml does. */
79e68feb 1198 int i;
a40ed0f3 1199 HOST_WIDE_INT offset;
79e68feb 1200
a40ed0f3 1201 offset = current_frame.offset + fsize;
3d74bc09
BI
1202 for (i = 0; i < 16; i++)
1203 if (current_frame.reg_mask & (1 << i))
79e68feb 1204 {
a40ed0f3
KH
1205 rtx addr;
1206
1207 if (big)
79e68feb 1208 {
a40ed0f3
KH
1209 /* Generate the address -OFFSET(%fp,%a1.l). */
1210 addr = gen_rtx_REG (Pmode, A1_REG);
1211 addr = gen_rtx_PLUS (Pmode, addr, frame_pointer_rtx);
0a81f074 1212 addr = plus_constant (Pmode, addr, -offset);
79e68feb 1213 }
a40ed0f3
KH
1214 else if (restore_from_sp)
1215 addr = gen_rtx_POST_INC (Pmode, stack_pointer_rtx);
1216 else
0a81f074 1217 addr = plus_constant (Pmode, frame_pointer_rtx, -offset);
a40ed0f3
KH
1218 emit_move_insn (gen_rtx_REG (SImode, D0_REG + i),
1219 gen_frame_mem (SImode, addr));
1220 offset -= GET_MODE_SIZE (SImode);
1221 }
79e68feb 1222 }
3d74bc09 1223 else if (current_frame.reg_mask)
79e68feb 1224 {
a40ed0f3
KH
1225 if (big)
1226 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1227 gen_rtx_REG (Pmode, A1_REG),
1228 frame_pointer_rtx),
1229 -(current_frame.offset + fsize),
1230 current_frame.reg_no, D0_REG,
1231 current_frame.reg_mask, false, false);
1232 else if (restore_from_sp)
1233 m68k_emit_movem (stack_pointer_rtx, 0,
1234 current_frame.reg_no, D0_REG,
1235 current_frame.reg_mask, false,
1236 !TARGET_COLDFIRE);
1237 else
1238 m68k_emit_movem (frame_pointer_rtx,
1239 -(current_frame.offset + fsize),
1240 current_frame.reg_no, D0_REG,
1241 current_frame.reg_mask, false, false);
79e68feb 1242 }
a40ed0f3
KH
1243
1244 if (current_frame.fpu_no > 0)
79e68feb
RS
1245 {
1246 if (big)
a40ed0f3
KH
1247 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1248 gen_rtx_REG (Pmode, A1_REG),
1249 frame_pointer_rtx),
1250 -(current_frame.foffset + fsize),
1251 current_frame.fpu_no, FP0_REG,
1252 current_frame.fpu_mask, false, false);
6910dd70 1253 else if (restore_from_sp)
79e68feb 1254 {
dcc21c4c
PB
1255 if (TARGET_COLDFIRE)
1256 {
1257 int offset;
1258
a40ed0f3
KH
1259 /* If we used moveml to restore the integer registers, the
1260 stack pointer will still point to the bottom of the moveml
1261 save area. Find the stack offset of the first FP
1262 register. */
1263 if (current_frame.reg_no < MIN_MOVEM_REGS)
dcc21c4c
PB
1264 offset = 0;
1265 else
a40ed0f3
KH
1266 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1267 m68k_emit_movem (stack_pointer_rtx, offset,
1268 current_frame.fpu_no, FP0_REG,
1269 current_frame.fpu_mask, false, false);
dcc21c4c 1270 }
884b74f0 1271 else
a40ed0f3
KH
1272 m68k_emit_movem (stack_pointer_rtx, 0,
1273 current_frame.fpu_no, FP0_REG,
1274 current_frame.fpu_mask, false, true);
79e68feb
RS
1275 }
1276 else
a40ed0f3
KH
1277 m68k_emit_movem (frame_pointer_rtx,
1278 -(current_frame.foffset + fsize),
1279 current_frame.fpu_no, FP0_REG,
1280 current_frame.fpu_mask, false, false);
79e68feb 1281 }
a40ed0f3 1282
79e68feb 1283 if (frame_pointer_needed)
a40ed0f3 1284 emit_insn (gen_unlink (frame_pointer_rtx));
860c4900 1285 else if (fsize_with_regs)
a40ed0f3
KH
1286 emit_insn (gen_addsi3 (stack_pointer_rtx,
1287 stack_pointer_rtx,
1288 GEN_INT (fsize_with_regs)));
1289
e3b5732b 1290 if (crtl->calls_eh_return)
a40ed0f3
KH
1291 emit_insn (gen_addsi3 (stack_pointer_rtx,
1292 stack_pointer_rtx,
1293 EH_RETURN_STACKADJ_RTX));
1294
f7e70894 1295 if (!sibcall_p)
3810076b 1296 emit_jump_insn (ret_rtx);
79e68feb
RS
1297}
1298\f
8a4a2253 1299/* Return true if X is a valid comparison operator for the dbcc
64a184e9
RS
1300 instruction.
1301
1302 Note it rejects floating point comparison operators.
1303 (In the future we could use Fdbcc).
1304
1305 It also rejects some comparisons when CC_NO_OVERFLOW is set. */
1306
1307int
41b6a5e2 1308valid_dbcc_comparison_p_2 (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED)
64a184e9 1309{
64a184e9
RS
1310 switch (GET_CODE (x))
1311 {
64a184e9
RS
1312 case EQ: case NE: case GTU: case LTU:
1313 case GEU: case LEU:
1314 return 1;
1315
1316 /* Reject some when CC_NO_OVERFLOW is set. This may be over
1317 conservative */
1318 case GT: case LT: case GE: case LE:
1319 return ! (cc_prev_status.flags & CC_NO_OVERFLOW);
1320 default:
1321 return 0;
1322 }
1323}
1324
a0ab749a 1325/* Return nonzero if flags are currently in the 68881 flag register. */
6a0f85e3 1326int
8a4a2253 1327flags_in_68881 (void)
6a0f85e3
TG
1328{
1329 /* We could add support for these in the future */
1330 return cc_status.flags & CC_IN_68881;
1331}
1332
db5e2d51
MK
1333/* Return true if PARALLEL contains register REGNO. */
1334static bool
1335m68k_reg_present_p (const_rtx parallel, unsigned int regno)
1336{
1337 int i;
1338
1339 if (REG_P (parallel) && REGNO (parallel) == regno)
1340 return true;
1341
1342 if (GET_CODE (parallel) != PARALLEL)
1343 return false;
1344
1345 for (i = 0; i < XVECLEN (parallel, 0); ++i)
1346 {
1347 const_rtx x;
1348
1349 x = XEXP (XVECEXP (parallel, 0, i), 0);
1350 if (REG_P (x) && REGNO (x) == regno)
1351 return true;
1352 }
1353
1354 return false;
1355}
1356
fa157b28 1357/* Implement TARGET_FUNCTION_OK_FOR_SIBCALL_P. */
f7e70894
RS
1358
1359static bool
fa157b28 1360m68k_ok_for_sibcall_p (tree decl, tree exp)
f7e70894 1361{
fa157b28
NS
1362 enum m68k_function_kind kind;
1363
1364 /* We cannot use sibcalls for nested functions because we use the
1365 static chain register for indirect calls. */
1366 if (CALL_EXPR_STATIC_CHAIN (exp))
1367 return false;
1368
db5e2d51
MK
1369 if (!VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
1370 {
1371 /* Check that the return value locations are the same. For
1372 example that we aren't returning a value from the sibling in
1373 a D0 register but then need to transfer it to a A0 register. */
1374 rtx cfun_value;
1375 rtx call_value;
1376
1377 cfun_value = FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (cfun->decl)),
1378 cfun->decl);
1379 call_value = FUNCTION_VALUE (TREE_TYPE (exp), decl);
1380
1381 /* Check that the values are equal or that the result the callee
1382 function returns is superset of what the current function returns. */
1383 if (!(rtx_equal_p (cfun_value, call_value)
1384 || (REG_P (cfun_value)
1385 && m68k_reg_present_p (call_value, REGNO (cfun_value)))))
1386 return false;
1387 }
1388
fa157b28
NS
1389 kind = m68k_get_function_kind (current_function_decl);
1390 if (kind == m68k_fk_normal_function)
1391 /* We can always sibcall from a normal function, because it's
1392 undefined if it is calling an interrupt function. */
1393 return true;
1394
1395 /* Otherwise we can only sibcall if the function kind is known to be
1396 the same. */
1397 if (decl && m68k_get_function_kind (decl) == kind)
1398 return true;
1399
1400 return false;
f7e70894
RS
1401}
1402
13d3961c
NF
1403/* On the m68k all args are always pushed. */
1404
1405static rtx
d5cc9181 1406m68k_function_arg (cumulative_args_t cum ATTRIBUTE_UNUSED,
13d3961c
NF
1407 enum machine_mode mode ATTRIBUTE_UNUSED,
1408 const_tree type ATTRIBUTE_UNUSED,
1409 bool named ATTRIBUTE_UNUSED)
1410{
1411 return NULL_RTX;
1412}
1413
1414static void
d5cc9181 1415m68k_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
13d3961c
NF
1416 const_tree type, bool named ATTRIBUTE_UNUSED)
1417{
d5cc9181
JR
1418 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1419
13d3961c
NF
1420 *cum += (mode != BLKmode
1421 ? (GET_MODE_SIZE (mode) + 3) & ~3
1422 : (int_size_in_bytes (type) + 3) & ~3);
1423}
1424
29ca003a
RS
1425/* Convert X to a legitimate function call memory reference and return the
1426 result. */
a2ef3db7 1427
29ca003a
RS
1428rtx
1429m68k_legitimize_call_address (rtx x)
1430{
1431 gcc_assert (MEM_P (x));
1432 if (call_operand (XEXP (x, 0), VOIDmode))
1433 return x;
1434 return replace_equiv_address (x, force_reg (Pmode, XEXP (x, 0)));
a2ef3db7
BI
1435}
1436
f7e70894
RS
1437/* Likewise for sibling calls. */
1438
1439rtx
1440m68k_legitimize_sibcall_address (rtx x)
1441{
1442 gcc_assert (MEM_P (x));
1443 if (sibcall_operand (XEXP (x, 0), VOIDmode))
1444 return x;
1445
1446 emit_move_insn (gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM), XEXP (x, 0));
1447 return replace_equiv_address (x, gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM));
1448}
1449
506d7b68
PB
1450/* Convert X to a legitimate address and return it if successful. Otherwise
1451 return X.
1452
1453 For the 68000, we handle X+REG by loading X into a register R and
1454 using R+REG. R will go in an address reg and indexing will be used.
1455 However, if REG is a broken-out memory address or multiplication,
1456 nothing needs to be done because REG can certainly go in an address reg. */
1457
ab7256e4 1458static rtx
506d7b68
PB
1459m68k_legitimize_address (rtx x, rtx oldx, enum machine_mode mode)
1460{
75df395f
MK
1461 if (m68k_tls_symbol_p (x))
1462 return m68k_legitimize_tls_address (x);
1463
506d7b68
PB
1464 if (GET_CODE (x) == PLUS)
1465 {
1466 int ch = (x) != (oldx);
1467 int copied = 0;
1468
1469#define COPY_ONCE(Y) if (!copied) { Y = copy_rtx (Y); copied = ch = 1; }
1470
1471 if (GET_CODE (XEXP (x, 0)) == MULT)
1472 {
1473 COPY_ONCE (x);
1474 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
1475 }
1476 if (GET_CODE (XEXP (x, 1)) == MULT)
1477 {
1478 COPY_ONCE (x);
1479 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
1480 }
1481 if (ch)
1482 {
1483 if (GET_CODE (XEXP (x, 1)) == REG
1484 && GET_CODE (XEXP (x, 0)) == REG)
1485 {
1486 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
1487 {
1488 COPY_ONCE (x);
1489 x = force_operand (x, 0);
1490 }
1491 return x;
1492 }
1493 if (memory_address_p (mode, x))
1494 return x;
1495 }
1496 if (GET_CODE (XEXP (x, 0)) == REG
1497 || (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
1498 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1499 && GET_MODE (XEXP (XEXP (x, 0), 0)) == HImode))
1500 {
1501 rtx temp = gen_reg_rtx (Pmode);
1502 rtx val = force_operand (XEXP (x, 1), 0);
1503 emit_move_insn (temp, val);
1504 COPY_ONCE (x);
1505 XEXP (x, 1) = temp;
1506 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1507 && GET_CODE (XEXP (x, 0)) == REG)
1508 x = force_operand (x, 0);
1509 }
1510 else if (GET_CODE (XEXP (x, 1)) == REG
1511 || (GET_CODE (XEXP (x, 1)) == SIGN_EXTEND
1512 && GET_CODE (XEXP (XEXP (x, 1), 0)) == REG
1513 && GET_MODE (XEXP (XEXP (x, 1), 0)) == HImode))
1514 {
1515 rtx temp = gen_reg_rtx (Pmode);
1516 rtx val = force_operand (XEXP (x, 0), 0);
1517 emit_move_insn (temp, val);
1518 COPY_ONCE (x);
1519 XEXP (x, 0) = temp;
1520 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1521 && GET_CODE (XEXP (x, 1)) == REG)
1522 x = force_operand (x, 0);
1523 }
1524 }
1525
1526 return x;
1527}
1528
1529
64a184e9
RS
1530/* Output a dbCC; jCC sequence. Note we do not handle the
1531 floating point version of this sequence (Fdbcc). We also
1532 do not handle alternative conditions when CC_NO_OVERFLOW is
6a0f85e3
TG
1533 set. It is assumed that valid_dbcc_comparison_p and flags_in_68881 will
1534 kick those out before we get here. */
64a184e9 1535
1d8eaa6b 1536void
8a4a2253 1537output_dbcc_and_branch (rtx *operands)
64a184e9 1538{
64a184e9
RS
1539 switch (GET_CODE (operands[3]))
1540 {
1541 case EQ:
da398bb5 1542 output_asm_insn ("dbeq %0,%l1\n\tjeq %l2", operands);
e6d98cb0 1543 break;
64a184e9
RS
1544
1545 case NE:
da398bb5 1546 output_asm_insn ("dbne %0,%l1\n\tjne %l2", operands);
e6d98cb0 1547 break;
64a184e9
RS
1548
1549 case GT:
da398bb5 1550 output_asm_insn ("dbgt %0,%l1\n\tjgt %l2", operands);
e6d98cb0 1551 break;
64a184e9
RS
1552
1553 case GTU:
da398bb5 1554 output_asm_insn ("dbhi %0,%l1\n\tjhi %l2", operands);
e6d98cb0 1555 break;
64a184e9
RS
1556
1557 case LT:
da398bb5 1558 output_asm_insn ("dblt %0,%l1\n\tjlt %l2", operands);
e6d98cb0 1559 break;
64a184e9
RS
1560
1561 case LTU:
da398bb5 1562 output_asm_insn ("dbcs %0,%l1\n\tjcs %l2", operands);
e6d98cb0 1563 break;
64a184e9
RS
1564
1565 case GE:
da398bb5 1566 output_asm_insn ("dbge %0,%l1\n\tjge %l2", operands);
e6d98cb0 1567 break;
64a184e9
RS
1568
1569 case GEU:
da398bb5 1570 output_asm_insn ("dbcc %0,%l1\n\tjcc %l2", operands);
e6d98cb0 1571 break;
64a184e9
RS
1572
1573 case LE:
da398bb5 1574 output_asm_insn ("dble %0,%l1\n\tjle %l2", operands);
e6d98cb0 1575 break;
64a184e9
RS
1576
1577 case LEU:
da398bb5 1578 output_asm_insn ("dbls %0,%l1\n\tjls %l2", operands);
e6d98cb0 1579 break;
64a184e9
RS
1580
1581 default:
4761e388 1582 gcc_unreachable ();
64a184e9
RS
1583 }
1584
1585 /* If the decrement is to be done in SImode, then we have
7a1929e1 1586 to compensate for the fact that dbcc decrements in HImode. */
64a184e9
RS
1587 switch (GET_MODE (operands[0]))
1588 {
1589 case SImode:
da398bb5 1590 output_asm_insn ("clr%.w %0\n\tsubq%.l #1,%0\n\tjpl %l1", operands);
64a184e9
RS
1591 break;
1592
1593 case HImode:
1594 break;
1595
1596 default:
4761e388 1597 gcc_unreachable ();
64a184e9
RS
1598 }
1599}
1600
5505f548 1601const char *
4761e388 1602output_scc_di (rtx op, rtx operand1, rtx operand2, rtx dest)
c59c3b1c
RK
1603{
1604 rtx loperands[7];
d9832fd2 1605 enum rtx_code op_code = GET_CODE (op);
c59c3b1c 1606
f710504c 1607 /* This does not produce a useful cc. */
906a2d3c
RK
1608 CC_STATUS_INIT;
1609
d9832fd2
RK
1610 /* The m68k cmp.l instruction requires operand1 to be a reg as used
1611 below. Swap the operands and change the op if these requirements
1612 are not fulfilled. */
1613 if (GET_CODE (operand2) == REG && GET_CODE (operand1) != REG)
1614 {
1615 rtx tmp = operand1;
1616
1617 operand1 = operand2;
1618 operand2 = tmp;
1619 op_code = swap_condition (op_code);
1620 }
c59c3b1c
RK
1621 loperands[0] = operand1;
1622 if (GET_CODE (operand1) == REG)
1d8eaa6b 1623 loperands[1] = gen_rtx_REG (SImode, REGNO (operand1) + 1);
c59c3b1c 1624 else
b72f00af 1625 loperands[1] = adjust_address (operand1, SImode, 4);
c59c3b1c
RK
1626 if (operand2 != const0_rtx)
1627 {
1628 loperands[2] = operand2;
1629 if (GET_CODE (operand2) == REG)
1d8eaa6b 1630 loperands[3] = gen_rtx_REG (SImode, REGNO (operand2) + 1);
c59c3b1c 1631 else
b72f00af 1632 loperands[3] = adjust_address (operand2, SImode, 4);
c59c3b1c 1633 }
428511bb 1634 loperands[4] = gen_label_rtx ();
c59c3b1c 1635 if (operand2 != const0_rtx)
da398bb5 1636 output_asm_insn ("cmp%.l %2,%0\n\tjne %l4\n\tcmp%.l %3,%1", loperands);
392582fa 1637 else
4a8c52e0 1638 {
9425fb04 1639 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[0]))
4a8c52e0
AS
1640 output_asm_insn ("tst%.l %0", loperands);
1641 else
a0a7fbc9 1642 output_asm_insn ("cmp%.w #0,%0", loperands);
4a8c52e0 1643
da398bb5 1644 output_asm_insn ("jne %l4", loperands);
4a8c52e0 1645
9425fb04 1646 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[1]))
4a8c52e0
AS
1647 output_asm_insn ("tst%.l %1", loperands);
1648 else
3b4b85c9 1649 output_asm_insn ("cmp%.w #0,%1", loperands);
4a8c52e0
AS
1650 }
1651
c59c3b1c 1652 loperands[5] = dest;
3b4b85c9 1653
d9832fd2 1654 switch (op_code)
c59c3b1c
RK
1655 {
1656 case EQ:
4977bab6 1657 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1658 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1659 output_asm_insn ("seq %5", loperands);
1660 break;
1661
1662 case NE:
4977bab6 1663 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1664 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1665 output_asm_insn ("sne %5", loperands);
1666 break;
1667
1668 case GT:
428511bb 1669 loperands[6] = gen_label_rtx ();
da398bb5 1670 output_asm_insn ("shi %5\n\tjra %l6", loperands);
4977bab6 1671 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1672 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c 1673 output_asm_insn ("sgt %5", loperands);
4977bab6 1674 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1675 CODE_LABEL_NUMBER (loperands[6]));
c59c3b1c
RK
1676 break;
1677
1678 case GTU:
4977bab6 1679 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1680 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1681 output_asm_insn ("shi %5", loperands);
1682 break;
1683
1684 case LT:
428511bb 1685 loperands[6] = gen_label_rtx ();
da398bb5 1686 output_asm_insn ("scs %5\n\tjra %l6", loperands);
4977bab6 1687 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1688 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c 1689 output_asm_insn ("slt %5", loperands);
4977bab6 1690 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1691 CODE_LABEL_NUMBER (loperands[6]));
c59c3b1c
RK
1692 break;
1693
1694 case LTU:
4977bab6 1695 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1696 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1697 output_asm_insn ("scs %5", loperands);
1698 break;
1699
1700 case GE:
428511bb 1701 loperands[6] = gen_label_rtx ();
da398bb5 1702 output_asm_insn ("scc %5\n\tjra %l6", loperands);
4977bab6 1703 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1704 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c 1705 output_asm_insn ("sge %5", loperands);
4977bab6 1706 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1707 CODE_LABEL_NUMBER (loperands[6]));
c59c3b1c
RK
1708 break;
1709
1710 case GEU:
4977bab6 1711 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1712 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1713 output_asm_insn ("scc %5", loperands);
1714 break;
1715
1716 case LE:
428511bb 1717 loperands[6] = gen_label_rtx ();
da398bb5 1718 output_asm_insn ("sls %5\n\tjra %l6", loperands);
4977bab6 1719 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1720 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c 1721 output_asm_insn ("sle %5", loperands);
4977bab6 1722 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1723 CODE_LABEL_NUMBER (loperands[6]));
c59c3b1c
RK
1724 break;
1725
1726 case LEU:
4977bab6 1727 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1728 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1729 output_asm_insn ("sls %5", loperands);
1730 break;
1731
1732 default:
4761e388 1733 gcc_unreachable ();
c59c3b1c
RK
1734 }
1735 return "";
1736}
1737
5505f548 1738const char *
8a4a2253 1739output_btst (rtx *operands, rtx countop, rtx dataop, rtx insn, int signpos)
79e68feb
RS
1740{
1741 operands[0] = countop;
1742 operands[1] = dataop;
1743
1744 if (GET_CODE (countop) == CONST_INT)
1745 {
1746 register int count = INTVAL (countop);
1747 /* If COUNT is bigger than size of storage unit in use,
1748 advance to the containing unit of same size. */
1749 if (count > signpos)
1750 {
1751 int offset = (count & ~signpos) / 8;
1752 count = count & signpos;
b72f00af 1753 operands[1] = dataop = adjust_address (dataop, QImode, offset);
79e68feb
RS
1754 }
1755 if (count == signpos)
1756 cc_status.flags = CC_NOT_POSITIVE | CC_Z_IN_NOT_N;
1757 else
1758 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N;
1759
1760 /* These three statements used to use next_insns_test_no...
1761 but it appears that this should do the same job. */
1762 if (count == 31
1763 && next_insn_tests_no_inequality (insn))
1764 return "tst%.l %1";
1765 if (count == 15
1766 && next_insn_tests_no_inequality (insn))
1767 return "tst%.w %1";
1768 if (count == 7
1769 && next_insn_tests_no_inequality (insn))
1770 return "tst%.b %1";
5083912d
PDM
1771 /* Try to use `movew to ccr' followed by the appropriate branch insn.
1772 On some m68k variants unfortunately that's slower than btst.
1773 On 68000 and higher, that should also work for all HImode operands. */
1774 if (TUNE_CPU32 || TARGET_COLDFIRE || optimize_size)
1775 {
1776 if (count == 3 && DATA_REG_P (operands[1])
1777 && next_insn_tests_no_inequality (insn))
1778 {
1779 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N | CC_NO_OVERFLOW;
1780 return "move%.w %1,%%ccr";
1781 }
1782 if (count == 2 && DATA_REG_P (operands[1])
1783 && next_insn_tests_no_inequality (insn))
1784 {
1785 cc_status.flags = CC_NOT_NEGATIVE | CC_INVERTED | CC_NO_OVERFLOW;
1786 return "move%.w %1,%%ccr";
1787 }
1788 /* count == 1 followed by bvc/bvs and
1789 count == 0 followed by bcc/bcs are also possible, but need
1790 m68k-specific CC_Z_IN_NOT_V and CC_Z_IN_NOT_C flags. */
1791 }
79e68feb
RS
1792
1793 cc_status.flags = CC_NOT_NEGATIVE;
1794 }
1795 return "btst %0,%1";
1796}
79e68feb 1797\f
fc2241eb
RS
1798/* Return true if X is a legitimate base register. STRICT_P says
1799 whether we need strict checking. */
1800
1801bool
1802m68k_legitimate_base_reg_p (rtx x, bool strict_p)
1803{
1804 /* Allow SUBREG everywhere we allow REG. This results in better code. */
1805 if (!strict_p && GET_CODE (x) == SUBREG)
1806 x = SUBREG_REG (x);
1807
1808 return (REG_P (x)
1809 && (strict_p
1810 ? REGNO_OK_FOR_BASE_P (REGNO (x))
bf32249e 1811 : REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x))));
fc2241eb
RS
1812}
1813
1814/* Return true if X is a legitimate index register. STRICT_P says
1815 whether we need strict checking. */
1816
1817bool
1818m68k_legitimate_index_reg_p (rtx x, bool strict_p)
1819{
1820 if (!strict_p && GET_CODE (x) == SUBREG)
1821 x = SUBREG_REG (x);
1822
1823 return (REG_P (x)
1824 && (strict_p
1825 ? REGNO_OK_FOR_INDEX_P (REGNO (x))
bf32249e 1826 : REGNO_OK_FOR_INDEX_NONSTRICT_P (REGNO (x))));
fc2241eb
RS
1827}
1828
1829/* Return true if X is a legitimate index expression for a (d8,An,Xn) or
1830 (bd,An,Xn) addressing mode. Fill in the INDEX and SCALE fields of
1831 ADDRESS if so. STRICT_P says whether we need strict checking. */
1832
1833static bool
1834m68k_decompose_index (rtx x, bool strict_p, struct m68k_address *address)
1835{
1836 int scale;
1837
1838 /* Check for a scale factor. */
1839 scale = 1;
1840 if ((TARGET_68020 || TARGET_COLDFIRE)
1841 && GET_CODE (x) == MULT
1842 && GET_CODE (XEXP (x, 1)) == CONST_INT
1843 && (INTVAL (XEXP (x, 1)) == 2
1844 || INTVAL (XEXP (x, 1)) == 4
1845 || (INTVAL (XEXP (x, 1)) == 8
1846 && (TARGET_COLDFIRE_FPU || !TARGET_COLDFIRE))))
1847 {
1848 scale = INTVAL (XEXP (x, 1));
1849 x = XEXP (x, 0);
1850 }
1851
1852 /* Check for a word extension. */
1853 if (!TARGET_COLDFIRE
1854 && GET_CODE (x) == SIGN_EXTEND
1855 && GET_MODE (XEXP (x, 0)) == HImode)
1856 x = XEXP (x, 0);
1857
1858 if (m68k_legitimate_index_reg_p (x, strict_p))
1859 {
1860 address->scale = scale;
1861 address->index = x;
1862 return true;
1863 }
1864
1865 return false;
1866}
1867
7ffb5e78
RS
1868/* Return true if X is an illegitimate symbolic constant. */
1869
1870bool
1871m68k_illegitimate_symbolic_constant_p (rtx x)
1872{
1873 rtx base, offset;
1874
1875 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P)
1876 {
1877 split_const (x, &base, &offset);
1878 if (GET_CODE (base) == SYMBOL_REF
1879 && !offset_within_block_p (base, INTVAL (offset)))
1880 return true;
1881 }
75df395f 1882 return m68k_tls_reference_p (x, false);
7ffb5e78
RS
1883}
1884
fbbf66e7
RS
1885/* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1886
1887static bool
1888m68k_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1889{
1890 return m68k_illegitimate_symbolic_constant_p (x);
1891}
1892
fc2241eb
RS
1893/* Return true if X is a legitimate constant address that can reach
1894 bytes in the range [X, X + REACH). STRICT_P says whether we need
1895 strict checking. */
1896
1897static bool
1898m68k_legitimate_constant_address_p (rtx x, unsigned int reach, bool strict_p)
1899{
1900 rtx base, offset;
1901
1902 if (!CONSTANT_ADDRESS_P (x))
1903 return false;
1904
1905 if (flag_pic
1906 && !(strict_p && TARGET_PCREL)
1907 && symbolic_operand (x, VOIDmode))
1908 return false;
1909
1910 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P && reach > 1)
1911 {
1912 split_const (x, &base, &offset);
1913 if (GET_CODE (base) == SYMBOL_REF
1914 && !offset_within_block_p (base, INTVAL (offset) + reach - 1))
1915 return false;
1916 }
1917
75df395f 1918 return !m68k_tls_reference_p (x, false);
fc2241eb
RS
1919}
1920
1921/* Return true if X is a LABEL_REF for a jump table. Assume that unplaced
1922 labels will become jump tables. */
1923
1924static bool
1925m68k_jump_table_ref_p (rtx x)
1926{
1927 if (GET_CODE (x) != LABEL_REF)
1928 return false;
1929
1930 x = XEXP (x, 0);
1931 if (!NEXT_INSN (x) && !PREV_INSN (x))
1932 return true;
1933
1934 x = next_nonnote_insn (x);
1935 return x && JUMP_TABLE_DATA_P (x);
1936}
1937
1938/* Return true if X is a legitimate address for values of mode MODE.
1939 STRICT_P says whether strict checking is needed. If the address
1940 is valid, describe its components in *ADDRESS. */
1941
1942static bool
1943m68k_decompose_address (enum machine_mode mode, rtx x,
1944 bool strict_p, struct m68k_address *address)
1945{
1946 unsigned int reach;
1947
1948 memset (address, 0, sizeof (*address));
1949
1950 if (mode == BLKmode)
1951 reach = 1;
1952 else
1953 reach = GET_MODE_SIZE (mode);
1954
1955 /* Check for (An) (mode 2). */
1956 if (m68k_legitimate_base_reg_p (x, strict_p))
1957 {
1958 address->base = x;
1959 return true;
1960 }
1961
1962 /* Check for -(An) and (An)+ (modes 3 and 4). */
1963 if ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_INC)
1964 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
1965 {
1966 address->code = GET_CODE (x);
1967 address->base = XEXP (x, 0);
1968 return true;
1969 }
1970
1971 /* Check for (d16,An) (mode 5). */
1972 if (GET_CODE (x) == PLUS
1973 && GET_CODE (XEXP (x, 1)) == CONST_INT
1974 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x8000, 0x8000 - reach)
1975 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
1976 {
1977 address->base = XEXP (x, 0);
1978 address->offset = XEXP (x, 1);
1979 return true;
1980 }
1981
1982 /* Check for GOT loads. These are (bd,An,Xn) addresses if
1983 TARGET_68020 && flag_pic == 2, otherwise they are (d16,An)
1984 addresses. */
75df395f
MK
1985 if (GET_CODE (x) == PLUS
1986 && XEXP (x, 0) == pic_offset_table_rtx)
fc2241eb 1987 {
75df395f
MK
1988 /* As we are processing a PLUS, do not unwrap RELOC32 symbols --
1989 they are invalid in this context. */
1990 if (m68k_unwrap_symbol (XEXP (x, 1), false) != XEXP (x, 1))
1991 {
1992 address->base = XEXP (x, 0);
1993 address->offset = XEXP (x, 1);
1994 return true;
1995 }
fc2241eb
RS
1996 }
1997
1998 /* The ColdFire FPU only accepts addressing modes 2-5. */
1999 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
2000 return false;
2001
2002 /* Check for (xxx).w and (xxx).l. Also, in the TARGET_PCREL case,
2003 check for (d16,PC) or (bd,PC,Xn) with a suppressed index register.
2004 All these modes are variations of mode 7. */
2005 if (m68k_legitimate_constant_address_p (x, reach, strict_p))
2006 {
2007 address->offset = x;
2008 return true;
2009 }
2010
2011 /* Check for (d8,PC,Xn), a mode 7 form. This case is needed for
2012 tablejumps.
2013
2014 ??? do_tablejump creates these addresses before placing the target
2015 label, so we have to assume that unplaced labels are jump table
2016 references. It seems unlikely that we would ever generate indexed
2017 accesses to unplaced labels in other cases. */
2018 if (GET_CODE (x) == PLUS
2019 && m68k_jump_table_ref_p (XEXP (x, 1))
2020 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2021 {
2022 address->offset = XEXP (x, 1);
2023 return true;
2024 }
2025
2026 /* Everything hereafter deals with (d8,An,Xn.SIZE*SCALE) or
2027 (bd,An,Xn.SIZE*SCALE) addresses. */
2028
2029 if (TARGET_68020)
2030 {
2031 /* Check for a nonzero base displacement. */
2032 if (GET_CODE (x) == PLUS
2033 && m68k_legitimate_constant_address_p (XEXP (x, 1), reach, strict_p))
2034 {
2035 address->offset = XEXP (x, 1);
2036 x = XEXP (x, 0);
2037 }
2038
2039 /* Check for a suppressed index register. */
2040 if (m68k_legitimate_base_reg_p (x, strict_p))
2041 {
2042 address->base = x;
2043 return true;
2044 }
2045
2046 /* Check for a suppressed base register. Do not allow this case
2047 for non-symbolic offsets as it effectively gives gcc freedom
2048 to treat data registers as base registers, which can generate
2049 worse code. */
2050 if (address->offset
2051 && symbolic_operand (address->offset, VOIDmode)
2052 && m68k_decompose_index (x, strict_p, address))
2053 return true;
2054 }
2055 else
2056 {
2057 /* Check for a nonzero base displacement. */
2058 if (GET_CODE (x) == PLUS
2059 && GET_CODE (XEXP (x, 1)) == CONST_INT
2060 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x80, 0x80 - reach))
2061 {
2062 address->offset = XEXP (x, 1);
2063 x = XEXP (x, 0);
2064 }
2065 }
2066
2067 /* We now expect the sum of a base and an index. */
2068 if (GET_CODE (x) == PLUS)
2069 {
2070 if (m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p)
2071 && m68k_decompose_index (XEXP (x, 1), strict_p, address))
2072 {
2073 address->base = XEXP (x, 0);
2074 return true;
2075 }
2076
2077 if (m68k_legitimate_base_reg_p (XEXP (x, 1), strict_p)
2078 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2079 {
2080 address->base = XEXP (x, 1);
2081 return true;
2082 }
2083 }
2084 return false;
2085}
2086
2087/* Return true if X is a legitimate address for values of mode MODE.
2088 STRICT_P says whether strict checking is needed. */
2089
2090bool
2091m68k_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p)
2092{
2093 struct m68k_address address;
2094
2095 return m68k_decompose_address (mode, x, strict_p, &address);
2096}
2097
2098/* Return true if X is a memory, describing its address in ADDRESS if so.
2099 Apply strict checking if called during or after reload. */
2100
2101static bool
2102m68k_legitimate_mem_p (rtx x, struct m68k_address *address)
2103{
2104 return (MEM_P (x)
2105 && m68k_decompose_address (GET_MODE (x), XEXP (x, 0),
2106 reload_in_progress || reload_completed,
2107 address));
2108}
2109
1a627b35
RS
2110/* Implement TARGET_LEGITIMATE_CONSTANT_P. */
2111
2112bool
2113m68k_legitimate_constant_p (enum machine_mode mode, rtx x)
2114{
2115 return mode != XFmode && !m68k_illegitimate_symbolic_constant_p (x);
2116}
2117
fc2241eb
RS
2118/* Return true if X matches the 'Q' constraint. It must be a memory
2119 with a base address and no constant offset or index. */
2120
2121bool
2122m68k_matches_q_p (rtx x)
2123{
2124 struct m68k_address address;
2125
2126 return (m68k_legitimate_mem_p (x, &address)
2127 && address.code == UNKNOWN
2128 && address.base
2129 && !address.offset
2130 && !address.index);
2131}
2132
2133/* Return true if X matches the 'U' constraint. It must be a base address
2134 with a constant offset and no index. */
2135
2136bool
2137m68k_matches_u_p (rtx x)
2138{
2139 struct m68k_address address;
2140
2141 return (m68k_legitimate_mem_p (x, &address)
2142 && address.code == UNKNOWN
2143 && address.base
2144 && address.offset
2145 && !address.index);
2146}
2147
75df395f
MK
2148/* Return GOT pointer. */
2149
2150static rtx
2151m68k_get_gp (void)
2152{
2153 if (pic_offset_table_rtx == NULL_RTX)
2154 pic_offset_table_rtx = gen_rtx_REG (Pmode, PIC_REG);
2155
2156 crtl->uses_pic_offset_table = 1;
2157
2158 return pic_offset_table_rtx;
2159}
2160
2161/* M68K relocations, used to distinguish GOT and TLS relocations in UNSPEC
2162 wrappers. */
2163enum m68k_reloc { RELOC_GOT, RELOC_TLSGD, RELOC_TLSLDM, RELOC_TLSLDO,
2164 RELOC_TLSIE, RELOC_TLSLE };
2165
2166#define TLS_RELOC_P(RELOC) ((RELOC) != RELOC_GOT)
2167
2168/* Wrap symbol X into unspec representing relocation RELOC.
2169 BASE_REG - register that should be added to the result.
2170 TEMP_REG - if non-null, temporary register. */
2171
2172static rtx
2173m68k_wrap_symbol (rtx x, enum m68k_reloc reloc, rtx base_reg, rtx temp_reg)
2174{
2175 bool use_x_p;
2176
2177 use_x_p = (base_reg == pic_offset_table_rtx) ? TARGET_XGOT : TARGET_XTLS;
2178
2179 if (TARGET_COLDFIRE && use_x_p)
2180 /* When compiling with -mx{got, tls} switch the code will look like this:
2181
2182 move.l <X>@<RELOC>,<TEMP_REG>
2183 add.l <BASE_REG>,<TEMP_REG> */
2184 {
2185 /* Wrap X in UNSPEC_??? to tip m68k_output_addr_const_extra
2186 to put @RELOC after reference. */
2187 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2188 UNSPEC_RELOC32);
2189 x = gen_rtx_CONST (Pmode, x);
2190
2191 if (temp_reg == NULL)
2192 {
2193 gcc_assert (can_create_pseudo_p ());
2194 temp_reg = gen_reg_rtx (Pmode);
2195 }
2196
2197 emit_move_insn (temp_reg, x);
2198 emit_insn (gen_addsi3 (temp_reg, temp_reg, base_reg));
2199 x = temp_reg;
2200 }
2201 else
2202 {
2203 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2204 UNSPEC_RELOC16);
2205 x = gen_rtx_CONST (Pmode, x);
2206
2207 x = gen_rtx_PLUS (Pmode, base_reg, x);
2208 }
2209
2210 return x;
2211}
2212
2213/* Helper for m68k_unwrap_symbol.
2214 Also, if unwrapping was successful (that is if (ORIG != <return value>)),
2215 sets *RELOC_PTR to relocation type for the symbol. */
2216
2217static rtx
2218m68k_unwrap_symbol_1 (rtx orig, bool unwrap_reloc32_p,
2219 enum m68k_reloc *reloc_ptr)
2220{
2221 if (GET_CODE (orig) == CONST)
2222 {
2223 rtx x;
2224 enum m68k_reloc dummy;
2225
2226 x = XEXP (orig, 0);
2227
2228 if (reloc_ptr == NULL)
2229 reloc_ptr = &dummy;
2230
2231 /* Handle an addend. */
2232 if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS)
2233 && CONST_INT_P (XEXP (x, 1)))
2234 x = XEXP (x, 0);
2235
2236 if (GET_CODE (x) == UNSPEC)
2237 {
2238 switch (XINT (x, 1))
2239 {
2240 case UNSPEC_RELOC16:
2241 orig = XVECEXP (x, 0, 0);
2242 *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2243 break;
2244
2245 case UNSPEC_RELOC32:
2246 if (unwrap_reloc32_p)
2247 {
2248 orig = XVECEXP (x, 0, 0);
2249 *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2250 }
2251 break;
2252
2253 default:
2254 break;
2255 }
2256 }
2257 }
2258
2259 return orig;
2260}
2261
2262/* Unwrap symbol from UNSPEC_RELOC16 and, if unwrap_reloc32_p,
2263 UNSPEC_RELOC32 wrappers. */
2264
2265rtx
2266m68k_unwrap_symbol (rtx orig, bool unwrap_reloc32_p)
2267{
2268 return m68k_unwrap_symbol_1 (orig, unwrap_reloc32_p, NULL);
2269}
2270
2271/* Helper for m68k_final_prescan_insn. */
2272
2273static int
2274m68k_final_prescan_insn_1 (rtx *x_ptr, void *data ATTRIBUTE_UNUSED)
2275{
2276 rtx x = *x_ptr;
2277
2278 if (m68k_unwrap_symbol (x, true) != x)
2279 /* For rationale of the below, see comment in m68k_final_prescan_insn. */
2280 {
2281 rtx plus;
2282
2283 gcc_assert (GET_CODE (x) == CONST);
2284 plus = XEXP (x, 0);
2285
2286 if (GET_CODE (plus) == PLUS || GET_CODE (plus) == MINUS)
2287 {
2288 rtx unspec;
2289 rtx addend;
2290
2291 unspec = XEXP (plus, 0);
2292 gcc_assert (GET_CODE (unspec) == UNSPEC);
2293 addend = XEXP (plus, 1);
2294 gcc_assert (CONST_INT_P (addend));
2295
2296 /* We now have all the pieces, rearrange them. */
2297
2298 /* Move symbol to plus. */
2299 XEXP (plus, 0) = XVECEXP (unspec, 0, 0);
2300
2301 /* Move plus inside unspec. */
2302 XVECEXP (unspec, 0, 0) = plus;
2303
2304 /* Move unspec to top level of const. */
2305 XEXP (x, 0) = unspec;
2306 }
2307
2308 return -1;
2309 }
2310
2311 return 0;
2312}
2313
2314/* Prescan insn before outputing assembler for it. */
2315
2316void
2317m68k_final_prescan_insn (rtx insn ATTRIBUTE_UNUSED,
2318 rtx *operands, int n_operands)
2319{
2320 int i;
2321
2322 /* Combine and, possibly, other optimizations may do good job
2323 converting
2324 (const (unspec [(symbol)]))
2325 into
2326 (const (plus (unspec [(symbol)])
2327 (const_int N))).
2328 The problem with this is emitting @TLS or @GOT decorations.
2329 The decoration is emitted when processing (unspec), so the
2330 result would be "#symbol@TLSLE+N" instead of "#symbol+N@TLSLE".
2331
2332 It seems that the easiest solution to this is to convert such
2333 operands to
2334 (const (unspec [(plus (symbol)
2335 (const_int N))])).
2336 Note, that the top level of operand remains intact, so we don't have
2337 to patch up anything outside of the operand. */
2338
2339 for (i = 0; i < n_operands; ++i)
2340 {
2341 rtx op;
2342
2343 op = operands[i];
2344
2345 for_each_rtx (&op, m68k_final_prescan_insn_1, NULL);
2346 }
2347}
2348
2349/* Move X to a register and add REG_EQUAL note pointing to ORIG.
2350 If REG is non-null, use it; generate new pseudo otherwise. */
2351
2352static rtx
2353m68k_move_to_reg (rtx x, rtx orig, rtx reg)
2354{
2355 rtx insn;
2356
2357 if (reg == NULL_RTX)
2358 {
2359 gcc_assert (can_create_pseudo_p ());
2360 reg = gen_reg_rtx (Pmode);
2361 }
2362
2363 insn = emit_move_insn (reg, x);
2364 /* Put a REG_EQUAL note on this insn, so that it can be optimized
2365 by loop. */
2366 set_unique_reg_note (insn, REG_EQUAL, orig);
2367
2368 return reg;
2369}
2370
2371/* Does the same as m68k_wrap_symbol, but returns a memory reference to
2372 GOT slot. */
2373
2374static rtx
2375m68k_wrap_symbol_into_got_ref (rtx x, enum m68k_reloc reloc, rtx temp_reg)
2376{
2377 x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), temp_reg);
2378
2379 x = gen_rtx_MEM (Pmode, x);
2380 MEM_READONLY_P (x) = 1;
2381
2382 return x;
2383}
2384
79e68feb
RS
2385/* Legitimize PIC addresses. If the address is already
2386 position-independent, we return ORIG. Newly generated
2387 position-independent addresses go to REG. If we need more
2388 than one register, we lose.
2389
2390 An address is legitimized by making an indirect reference
2391 through the Global Offset Table with the name of the symbol
2392 used as an offset.
2393
2394 The assembler and linker are responsible for placing the
2395 address of the symbol in the GOT. The function prologue
2396 is responsible for initializing a5 to the starting address
2397 of the GOT.
2398
2399 The assembler is also responsible for translating a symbol name
2400 into a constant displacement from the start of the GOT.
2401
2402 A quick example may make things a little clearer:
2403
2404 When not generating PIC code to store the value 12345 into _foo
2405 we would generate the following code:
2406
2407 movel #12345, _foo
2408
2409 When generating PIC two transformations are made. First, the compiler
2410 loads the address of foo into a register. So the first transformation makes:
2411
2412 lea _foo, a0
2413 movel #12345, a0@
2414
2415 The code in movsi will intercept the lea instruction and call this
2416 routine which will transform the instructions into:
2417
2418 movel a5@(_foo:w), a0
2419 movel #12345, a0@
2420
2421
2422 That (in a nutshell) is how *all* symbol and label references are
2423 handled. */
2424
2425rtx
8a4a2253
BI
2426legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
2427 rtx reg)
79e68feb
RS
2428{
2429 rtx pic_ref = orig;
2430
2431 /* First handle a simple SYMBOL_REF or LABEL_REF */
2432 if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
2433 {
4761e388 2434 gcc_assert (reg);
79e68feb 2435
75df395f
MK
2436 pic_ref = m68k_wrap_symbol_into_got_ref (orig, RELOC_GOT, reg);
2437 pic_ref = m68k_move_to_reg (pic_ref, orig, reg);
79e68feb
RS
2438 }
2439 else if (GET_CODE (orig) == CONST)
2440 {
1d8eaa6b 2441 rtx base;
79e68feb 2442
b2e08ed4 2443 /* Make sure this has not already been legitimized. */
75df395f 2444 if (m68k_unwrap_symbol (orig, true) != orig)
79e68feb
RS
2445 return orig;
2446
4761e388 2447 gcc_assert (reg);
79e68feb
RS
2448
2449 /* legitimize both operands of the PLUS */
4761e388
NS
2450 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
2451
2452 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2453 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2454 base == reg ? 0 : reg);
79e68feb
RS
2455
2456 if (GET_CODE (orig) == CONST_INT)
0a81f074 2457 pic_ref = plus_constant (Pmode, base, INTVAL (orig));
75df395f
MK
2458 else
2459 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
79e68feb 2460 }
75df395f 2461
79e68feb
RS
2462 return pic_ref;
2463}
2464
75df395f
MK
2465/* The __tls_get_addr symbol. */
2466static GTY(()) rtx m68k_tls_get_addr;
2467
2468/* Return SYMBOL_REF for __tls_get_addr. */
2469
2470static rtx
2471m68k_get_tls_get_addr (void)
2472{
2473 if (m68k_tls_get_addr == NULL_RTX)
2474 m68k_tls_get_addr = init_one_libfunc ("__tls_get_addr");
2475
2476 return m68k_tls_get_addr;
2477}
2478
2479/* Return libcall result in A0 instead of usual D0. */
2480static bool m68k_libcall_value_in_a0_p = false;
2481
2482/* Emit instruction sequence that calls __tls_get_addr. X is
2483 the TLS symbol we are referencing and RELOC is the symbol type to use
2484 (either TLSGD or TLSLDM). EQV is the REG_EQUAL note for the sequence
2485 emitted. A pseudo register with result of __tls_get_addr call is
2486 returned. */
2487
2488static rtx
2489m68k_call_tls_get_addr (rtx x, rtx eqv, enum m68k_reloc reloc)
2490{
2491 rtx a0;
2492 rtx insns;
2493 rtx dest;
2494
2495 /* Emit the call sequence. */
2496 start_sequence ();
2497
2498 /* FIXME: Unfortunately, emit_library_call_value does not
2499 consider (plus (%a5) (const (unspec))) to be a good enough
2500 operand for push, so it forces it into a register. The bad
2501 thing about this is that combiner, due to copy propagation and other
2502 optimizations, sometimes can not later fix this. As a consequence,
2503 additional register may be allocated resulting in a spill.
2504 For reference, see args processing loops in
2505 calls.c:emit_library_call_value_1.
2506 For testcase, see gcc.target/m68k/tls-{gd, ld}.c */
2507 x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), NULL_RTX);
2508
2509 /* __tls_get_addr() is not a libcall, but emitting a libcall_value
2510 is the simpliest way of generating a call. The difference between
2511 __tls_get_addr() and libcall is that the result is returned in D0
2512 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2513 which temporarily switches returning the result to A0. */
2514
2515 m68k_libcall_value_in_a0_p = true;
2516 a0 = emit_library_call_value (m68k_get_tls_get_addr (), NULL_RTX, LCT_PURE,
2517 Pmode, 1, x, Pmode);
2518 m68k_libcall_value_in_a0_p = false;
2519
2520 insns = get_insns ();
2521 end_sequence ();
2522
2523 gcc_assert (can_create_pseudo_p ());
2524 dest = gen_reg_rtx (Pmode);
2525 emit_libcall_block (insns, dest, a0, eqv);
2526
2527 return dest;
2528}
2529
2530/* The __tls_get_addr symbol. */
2531static GTY(()) rtx m68k_read_tp;
2532
2533/* Return SYMBOL_REF for __m68k_read_tp. */
2534
2535static rtx
2536m68k_get_m68k_read_tp (void)
2537{
2538 if (m68k_read_tp == NULL_RTX)
2539 m68k_read_tp = init_one_libfunc ("__m68k_read_tp");
2540
2541 return m68k_read_tp;
2542}
2543
2544/* Emit instruction sequence that calls __m68k_read_tp.
2545 A pseudo register with result of __m68k_read_tp call is returned. */
2546
2547static rtx
2548m68k_call_m68k_read_tp (void)
2549{
2550 rtx a0;
2551 rtx eqv;
2552 rtx insns;
2553 rtx dest;
2554
2555 start_sequence ();
2556
2557 /* __m68k_read_tp() is not a libcall, but emitting a libcall_value
2558 is the simpliest way of generating a call. The difference between
2559 __m68k_read_tp() and libcall is that the result is returned in D0
2560 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2561 which temporarily switches returning the result to A0. */
2562
2563 /* Emit the call sequence. */
2564 m68k_libcall_value_in_a0_p = true;
2565 a0 = emit_library_call_value (m68k_get_m68k_read_tp (), NULL_RTX, LCT_PURE,
2566 Pmode, 0);
2567 m68k_libcall_value_in_a0_p = false;
2568 insns = get_insns ();
2569 end_sequence ();
2570
2571 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2572 share the m68k_read_tp result with other IE/LE model accesses. */
2573 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx), UNSPEC_RELOC32);
2574
2575 gcc_assert (can_create_pseudo_p ());
2576 dest = gen_reg_rtx (Pmode);
2577 emit_libcall_block (insns, dest, a0, eqv);
2578
2579 return dest;
2580}
2581
2582/* Return a legitimized address for accessing TLS SYMBOL_REF X.
2583 For explanations on instructions sequences see TLS/NPTL ABI for m68k and
2584 ColdFire. */
2585
2586rtx
2587m68k_legitimize_tls_address (rtx orig)
2588{
2589 switch (SYMBOL_REF_TLS_MODEL (orig))
2590 {
2591 case TLS_MODEL_GLOBAL_DYNAMIC:
2592 orig = m68k_call_tls_get_addr (orig, orig, RELOC_TLSGD);
2593 break;
2594
2595 case TLS_MODEL_LOCAL_DYNAMIC:
2596 {
2597 rtx eqv;
2598 rtx a0;
2599 rtx x;
2600
2601 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2602 share the LDM result with other LD model accesses. */
2603 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2604 UNSPEC_RELOC32);
2605
2606 a0 = m68k_call_tls_get_addr (orig, eqv, RELOC_TLSLDM);
2607
2608 x = m68k_wrap_symbol (orig, RELOC_TLSLDO, a0, NULL_RTX);
2609
2610 if (can_create_pseudo_p ())
2611 x = m68k_move_to_reg (x, orig, NULL_RTX);
2612
2613 orig = x;
2614 break;
2615 }
2616
2617 case TLS_MODEL_INITIAL_EXEC:
2618 {
2619 rtx a0;
2620 rtx x;
2621
2622 a0 = m68k_call_m68k_read_tp ();
2623
2624 x = m68k_wrap_symbol_into_got_ref (orig, RELOC_TLSIE, NULL_RTX);
2625 x = gen_rtx_PLUS (Pmode, x, a0);
2626
2627 if (can_create_pseudo_p ())
2628 x = m68k_move_to_reg (x, orig, NULL_RTX);
2629
2630 orig = x;
2631 break;
2632 }
2633
2634 case TLS_MODEL_LOCAL_EXEC:
2635 {
2636 rtx a0;
2637 rtx x;
2638
2639 a0 = m68k_call_m68k_read_tp ();
2640
2641 x = m68k_wrap_symbol (orig, RELOC_TLSLE, a0, NULL_RTX);
2642
2643 if (can_create_pseudo_p ())
2644 x = m68k_move_to_reg (x, orig, NULL_RTX);
2645
2646 orig = x;
2647 break;
2648 }
2649
2650 default:
2651 gcc_unreachable ();
2652 }
2653
2654 return orig;
2655}
2656
2657/* Return true if X is a TLS symbol. */
2658
2659static bool
2660m68k_tls_symbol_p (rtx x)
2661{
2662 if (!TARGET_HAVE_TLS)
2663 return false;
2664
2665 if (GET_CODE (x) != SYMBOL_REF)
2666 return false;
2667
2668 return SYMBOL_REF_TLS_MODEL (x) != 0;
2669}
2670
2671/* Helper for m68k_tls_referenced_p. */
2672
2673static int
2674m68k_tls_reference_p_1 (rtx *x_ptr, void *data ATTRIBUTE_UNUSED)
2675{
2676 /* Note: this is not the same as m68k_tls_symbol_p. */
2677 if (GET_CODE (*x_ptr) == SYMBOL_REF)
2678 return SYMBOL_REF_TLS_MODEL (*x_ptr) != 0 ? 1 : 0;
2679
2680 /* Don't recurse into legitimate TLS references. */
2681 if (m68k_tls_reference_p (*x_ptr, true))
2682 return -1;
2683
2684 return 0;
2685}
2686
2687/* If !LEGITIMATE_P, return true if X is a TLS symbol reference,
2688 though illegitimate one.
2689 If LEGITIMATE_P, return true if X is a legitimate TLS symbol reference. */
2690
2691bool
2692m68k_tls_reference_p (rtx x, bool legitimate_p)
2693{
2694 if (!TARGET_HAVE_TLS)
2695 return false;
2696
2697 if (!legitimate_p)
2698 return for_each_rtx (&x, m68k_tls_reference_p_1, NULL) == 1 ? true : false;
2699 else
2700 {
2701 enum m68k_reloc reloc = RELOC_GOT;
2702
2703 return (m68k_unwrap_symbol_1 (x, true, &reloc) != x
2704 && TLS_RELOC_P (reloc));
2705 }
2706}
2707
79e68feb 2708\f
0ce6f9fb 2709
a0a7fbc9 2710#define USE_MOVQ(i) ((unsigned) ((i) + 128) <= 255)
0ce6f9fb 2711
bda2a571
RS
2712/* Return the type of move that should be used for integer I. */
2713
c47b0cb4
MK
2714M68K_CONST_METHOD
2715m68k_const_method (HOST_WIDE_INT i)
0ce6f9fb 2716{
0ce6f9fb
RK
2717 unsigned u;
2718
6910dd70 2719 if (USE_MOVQ (i))
0ce6f9fb 2720 return MOVQ;
24092242 2721
c16eadc7 2722 /* The ColdFire doesn't have byte or word operations. */
97c55091 2723 /* FIXME: This may not be useful for the m68060 either. */
85dbf7e2 2724 if (!TARGET_COLDFIRE)
24092242
RK
2725 {
2726 /* if -256 < N < 256 but N is not in range for a moveq
7a1929e1 2727 N^ff will be, so use moveq #N^ff, dreg; not.b dreg. */
24092242
RK
2728 if (USE_MOVQ (i ^ 0xff))
2729 return NOTB;
2730 /* Likewise, try with not.w */
2731 if (USE_MOVQ (i ^ 0xffff))
2732 return NOTW;
2733 /* This is the only value where neg.w is useful */
2734 if (i == -65408)
2735 return NEGW;
24092242 2736 }
28bad6d1 2737
5e04daf3
PB
2738 /* Try also with swap. */
2739 u = i;
2740 if (USE_MOVQ ((u >> 16) | (u << 16)))
2741 return SWAP;
2742
986e74d5 2743 if (TARGET_ISAB)
28bad6d1 2744 {
72edf146 2745 /* Try using MVZ/MVS with an immediate value to load constants. */
28bad6d1
PB
2746 if (i >= 0 && i <= 65535)
2747 return MVZ;
2748 if (i >= -32768 && i <= 32767)
2749 return MVS;
2750 }
2751
0ce6f9fb
RK
2752 /* Otherwise, use move.l */
2753 return MOVL;
2754}
2755
bda2a571
RS
2756/* Return the cost of moving constant I into a data register. */
2757
3c50106f 2758static int
bda2a571 2759const_int_cost (HOST_WIDE_INT i)
0ce6f9fb 2760{
c47b0cb4 2761 switch (m68k_const_method (i))
0ce6f9fb 2762 {
a0a7fbc9
AS
2763 case MOVQ:
2764 /* Constants between -128 and 127 are cheap due to moveq. */
2765 return 0;
2766 case MVZ:
2767 case MVS:
2768 case NOTB:
2769 case NOTW:
2770 case NEGW:
2771 case SWAP:
2772 /* Constants easily generated by moveq + not.b/not.w/neg.w/swap. */
2773 return 1;
2774 case MOVL:
2775 return 2;
2776 default:
2777 gcc_unreachable ();
0ce6f9fb
RK
2778 }
2779}
2780
3c50106f 2781static bool
68f932c4
RS
2782m68k_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2783 int *total, bool speed ATTRIBUTE_UNUSED)
3c50106f
RH
2784{
2785 switch (code)
2786 {
2787 case CONST_INT:
2788 /* Constant zero is super cheap due to clr instruction. */
2789 if (x == const0_rtx)
2790 *total = 0;
2791 else
bda2a571 2792 *total = const_int_cost (INTVAL (x));
3c50106f
RH
2793 return true;
2794
2795 case CONST:
2796 case LABEL_REF:
2797 case SYMBOL_REF:
2798 *total = 3;
2799 return true;
2800
2801 case CONST_DOUBLE:
2802 /* Make 0.0 cheaper than other floating constants to
2803 encourage creating tstsf and tstdf insns. */
2804 if (outer_code == COMPARE
2805 && (x == CONST0_RTX (SFmode) || x == CONST0_RTX (DFmode)))
2806 *total = 4;
2807 else
2808 *total = 5;
2809 return true;
2810
2811 /* These are vaguely right for a 68020. */
2812 /* The costs for long multiply have been adjusted to work properly
2813 in synth_mult on the 68020, relative to an average of the time
2814 for add and the time for shift, taking away a little more because
2815 sometimes move insns are needed. */
a0a7fbc9
AS
2816 /* div?.w is relatively cheaper on 68000 counted in COSTS_N_INSNS
2817 terms. */
fe95f2f7
JB
2818#define MULL_COST \
2819 (TUNE_68060 ? 2 \
2820 : TUNE_68040 ? 5 \
03b3e271
KH
2821 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2822 : (TUNE_CFV2 && TUNE_MAC) ? 4 \
2823 : TUNE_CFV2 ? 8 \
fe95f2f7
JB
2824 : TARGET_COLDFIRE ? 3 : 13)
2825
2826#define MULW_COST \
2827 (TUNE_68060 ? 2 \
2828 : TUNE_68040 ? 3 \
03b3e271
KH
2829 : TUNE_68000_10 ? 5 \
2830 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2831 : (TUNE_CFV2 && TUNE_MAC) ? 2 \
2832 : TUNE_CFV2 ? 8 \
fe95f2f7
JB
2833 : TARGET_COLDFIRE ? 2 : 8)
2834
2835#define DIVW_COST \
2836 (TARGET_CF_HWDIV ? 11 \
2837 : TUNE_68000_10 || TARGET_COLDFIRE ? 12 : 27)
3c50106f
RH
2838
2839 case PLUS:
2840 /* An lea costs about three times as much as a simple add. */
2841 if (GET_MODE (x) == SImode
2842 && GET_CODE (XEXP (x, 1)) == REG
2843 && GET_CODE (XEXP (x, 0)) == MULT
2844 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
2845 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2846 && (INTVAL (XEXP (XEXP (x, 0), 1)) == 2
2847 || INTVAL (XEXP (XEXP (x, 0), 1)) == 4
2848 || INTVAL (XEXP (XEXP (x, 0), 1)) == 8))
eb849993
BI
2849 {
2850 /* lea an@(dx:l:i),am */
2851 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 2 : 3);
2852 return true;
2853 }
3c50106f
RH
2854 return false;
2855
2856 case ASHIFT:
2857 case ASHIFTRT:
2858 case LSHIFTRT:
fe95f2f7 2859 if (TUNE_68060)
3c50106f
RH
2860 {
2861 *total = COSTS_N_INSNS(1);
2862 return true;
2863 }
fe95f2f7 2864 if (TUNE_68000_10)
3c50106f
RH
2865 {
2866 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
2867 {
2868 if (INTVAL (XEXP (x, 1)) < 16)
2869 *total = COSTS_N_INSNS (2) + INTVAL (XEXP (x, 1)) / 2;
2870 else
2871 /* We're using clrw + swap for these cases. */
2872 *total = COSTS_N_INSNS (4) + (INTVAL (XEXP (x, 1)) - 16) / 2;
2873 }
2874 else
a0a7fbc9 2875 *total = COSTS_N_INSNS (10); /* Worst case. */
3c50106f
RH
2876 return true;
2877 }
2878 /* A shift by a big integer takes an extra instruction. */
2879 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2880 && (INTVAL (XEXP (x, 1)) == 16))
2881 {
2882 *total = COSTS_N_INSNS (2); /* clrw;swap */
2883 return true;
2884 }
2885 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2886 && !(INTVAL (XEXP (x, 1)) > 0
2887 && INTVAL (XEXP (x, 1)) <= 8))
2888 {
eb849993 2889 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 1 : 3); /* lsr #i,dn */
3c50106f
RH
2890 return true;
2891 }
2892 return false;
2893
2894 case MULT:
2895 if ((GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
2896 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
2897 && GET_MODE (x) == SImode)
2898 *total = COSTS_N_INSNS (MULW_COST);
2899 else if (GET_MODE (x) == QImode || GET_MODE (x) == HImode)
2900 *total = COSTS_N_INSNS (MULW_COST);
2901 else
2902 *total = COSTS_N_INSNS (MULL_COST);
2903 return true;
2904
2905 case DIV:
2906 case UDIV:
2907 case MOD:
2908 case UMOD:
2909 if (GET_MODE (x) == QImode || GET_MODE (x) == HImode)
2910 *total = COSTS_N_INSNS (DIVW_COST); /* div.w */
eb849993
BI
2911 else if (TARGET_CF_HWDIV)
2912 *total = COSTS_N_INSNS (18);
3c50106f
RH
2913 else
2914 *total = COSTS_N_INSNS (43); /* div.l */
2915 return true;
2916
f90b7a5a
PB
2917 case ZERO_EXTRACT:
2918 if (outer_code == COMPARE)
2919 *total = 0;
2920 return false;
2921
3c50106f
RH
2922 default:
2923 return false;
2924 }
2925}
2926
88512ba0 2927/* Return an instruction to move CONST_INT OPERANDS[1] into data register
bda2a571
RS
2928 OPERANDS[0]. */
2929
2930static const char *
8a4a2253 2931output_move_const_into_data_reg (rtx *operands)
0ce6f9fb 2932{
bda2a571 2933 HOST_WIDE_INT i;
0ce6f9fb
RK
2934
2935 i = INTVAL (operands[1]);
c47b0cb4 2936 switch (m68k_const_method (i))
0ce6f9fb 2937 {
28bad6d1 2938 case MVZ:
28bad6d1 2939 return "mvzw %1,%0";
1cbae84f
PB
2940 case MVS:
2941 return "mvsw %1,%0";
a0a7fbc9 2942 case MOVQ:
0ce6f9fb 2943 return "moveq %1,%0";
a0a7fbc9 2944 case NOTB:
66e07510 2945 CC_STATUS_INIT;
1d8eaa6b 2946 operands[1] = GEN_INT (i ^ 0xff);
0ce6f9fb 2947 return "moveq %1,%0\n\tnot%.b %0";
a0a7fbc9 2948 case NOTW:
66e07510 2949 CC_STATUS_INIT;
1d8eaa6b 2950 operands[1] = GEN_INT (i ^ 0xffff);
0ce6f9fb 2951 return "moveq %1,%0\n\tnot%.w %0";
a0a7fbc9 2952 case NEGW:
66e07510 2953 CC_STATUS_INIT;
3b4b85c9 2954 return "moveq #-128,%0\n\tneg%.w %0";
a0a7fbc9 2955 case SWAP:
0ce6f9fb
RK
2956 {
2957 unsigned u = i;
2958
1d8eaa6b 2959 operands[1] = GEN_INT ((u << 16) | (u >> 16));
0ce6f9fb 2960 return "moveq %1,%0\n\tswap %0";
0ce6f9fb 2961 }
a0a7fbc9 2962 case MOVL:
bda2a571 2963 return "move%.l %1,%0";
a0a7fbc9 2964 default:
bda2a571 2965 gcc_unreachable ();
0ce6f9fb
RK
2966 }
2967}
2968
bda2a571 2969/* Return true if I can be handled by ISA B's mov3q instruction. */
5e04daf3 2970
bda2a571
RS
2971bool
2972valid_mov3q_const (HOST_WIDE_INT i)
2973{
2974 return TARGET_ISAB && (i == -1 || IN_RANGE (i, 1, 7));
5e04daf3
PB
2975}
2976
bda2a571
RS
2977/* Return an instruction to move CONST_INT OPERANDS[1] into OPERANDS[0].
2978 I is the value of OPERANDS[1]. */
5e04daf3 2979
bda2a571 2980static const char *
8a4a2253 2981output_move_simode_const (rtx *operands)
02ed0c07 2982{
bda2a571
RS
2983 rtx dest;
2984 HOST_WIDE_INT src;
2985
2986 dest = operands[0];
2987 src = INTVAL (operands[1]);
2988 if (src == 0
2989 && (DATA_REG_P (dest) || MEM_P (dest))
3197c489
RS
2990 /* clr insns on 68000 read before writing. */
2991 && ((TARGET_68010 || TARGET_COLDFIRE)
bda2a571 2992 || !(MEM_P (dest) && MEM_VOLATILE_P (dest))))
02ed0c07 2993 return "clr%.l %0";
bda2a571 2994 else if (GET_MODE (dest) == SImode && valid_mov3q_const (src))
a0a7fbc9 2995 return "mov3q%.l %1,%0";
bda2a571 2996 else if (src == 0 && ADDRESS_REG_P (dest))
38198304 2997 return "sub%.l %0,%0";
bda2a571 2998 else if (DATA_REG_P (dest))
02ed0c07 2999 return output_move_const_into_data_reg (operands);
bda2a571 3000 else if (ADDRESS_REG_P (dest) && IN_RANGE (src, -0x8000, 0x7fff))
5e04daf3 3001 {
bda2a571 3002 if (valid_mov3q_const (src))
5e04daf3
PB
3003 return "mov3q%.l %1,%0";
3004 return "move%.w %1,%0";
3005 }
bda2a571
RS
3006 else if (MEM_P (dest)
3007 && GET_CODE (XEXP (dest, 0)) == PRE_DEC
3008 && REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
3009 && IN_RANGE (src, -0x8000, 0x7fff))
5e04daf3 3010 {
bda2a571 3011 if (valid_mov3q_const (src))
5e04daf3
PB
3012 return "mov3q%.l %1,%-";
3013 return "pea %a1";
3014 }
02ed0c07
RK
3015 return "move%.l %1,%0";
3016}
3017
5505f548 3018const char *
8a4a2253 3019output_move_simode (rtx *operands)
f4e80198
RK
3020{
3021 if (GET_CODE (operands[1]) == CONST_INT)
3022 return output_move_simode_const (operands);
3023 else if ((GET_CODE (operands[1]) == SYMBOL_REF
3024 || GET_CODE (operands[1]) == CONST)
3025 && push_operand (operands[0], SImode))
3026 return "pea %a1";
3027 else if ((GET_CODE (operands[1]) == SYMBOL_REF
3028 || GET_CODE (operands[1]) == CONST)
3029 && ADDRESS_REG_P (operands[0]))
3030 return "lea %a1,%0";
3031 return "move%.l %1,%0";
3032}
3033
5505f548 3034const char *
8a4a2253 3035output_move_himode (rtx *operands)
f4e80198
RK
3036{
3037 if (GET_CODE (operands[1]) == CONST_INT)
3038 {
3039 if (operands[1] == const0_rtx
3040 && (DATA_REG_P (operands[0])
3041 || GET_CODE (operands[0]) == MEM)
3197c489
RS
3042 /* clr insns on 68000 read before writing. */
3043 && ((TARGET_68010 || TARGET_COLDFIRE)
f4e80198
RK
3044 || !(GET_CODE (operands[0]) == MEM
3045 && MEM_VOLATILE_P (operands[0]))))
3046 return "clr%.w %0";
38198304
AS
3047 else if (operands[1] == const0_rtx
3048 && ADDRESS_REG_P (operands[0]))
3049 return "sub%.l %0,%0";
f4e80198
RK
3050 else if (DATA_REG_P (operands[0])
3051 && INTVAL (operands[1]) < 128
3052 && INTVAL (operands[1]) >= -128)
a0a7fbc9 3053 return "moveq %1,%0";
f4e80198
RK
3054 else if (INTVAL (operands[1]) < 0x8000
3055 && INTVAL (operands[1]) >= -0x8000)
3056 return "move%.w %1,%0";
3057 }
3058 else if (CONSTANT_P (operands[1]))
3059 return "move%.l %1,%0";
f4e80198
RK
3060 return "move%.w %1,%0";
3061}
3062
5505f548 3063const char *
8a4a2253 3064output_move_qimode (rtx *operands)
f4e80198 3065{
102701ff 3066 /* 68k family always modifies the stack pointer by at least 2, even for
c16eadc7 3067 byte pushes. The 5200 (ColdFire) does not do this. */
4761e388 3068
a0a7fbc9 3069 /* This case is generated by pushqi1 pattern now. */
4761e388
NS
3070 gcc_assert (!(GET_CODE (operands[0]) == MEM
3071 && GET_CODE (XEXP (operands[0], 0)) == PRE_DEC
3072 && XEXP (XEXP (operands[0], 0), 0) == stack_pointer_rtx
3073 && ! ADDRESS_REG_P (operands[1])
3074 && ! TARGET_COLDFIRE));
f4e80198 3075
3197c489 3076 /* clr and st insns on 68000 read before writing. */
f4e80198 3077 if (!ADDRESS_REG_P (operands[0])
3197c489 3078 && ((TARGET_68010 || TARGET_COLDFIRE)
f4e80198
RK
3079 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3080 {
3081 if (operands[1] == const0_rtx)
3082 return "clr%.b %0";
9425fb04 3083 if ((!TARGET_COLDFIRE || DATA_REG_P (operands[0]))
f4e80198
RK
3084 && GET_CODE (operands[1]) == CONST_INT
3085 && (INTVAL (operands[1]) & 255) == 255)
3086 {
3087 CC_STATUS_INIT;
3088 return "st %0";
3089 }
3090 }
3091 if (GET_CODE (operands[1]) == CONST_INT
3092 && DATA_REG_P (operands[0])
3093 && INTVAL (operands[1]) < 128
3094 && INTVAL (operands[1]) >= -128)
a0a7fbc9 3095 return "moveq %1,%0";
38198304
AS
3096 if (operands[1] == const0_rtx && ADDRESS_REG_P (operands[0]))
3097 return "sub%.l %0,%0";
f4e80198
RK
3098 if (GET_CODE (operands[1]) != CONST_INT && CONSTANT_P (operands[1]))
3099 return "move%.l %1,%0";
c16eadc7 3100 /* 68k family (including the 5200 ColdFire) does not support byte moves to
37834fc8
JL
3101 from address registers. */
3102 if (ADDRESS_REG_P (operands[0]) || ADDRESS_REG_P (operands[1]))
f4e80198
RK
3103 return "move%.w %1,%0";
3104 return "move%.b %1,%0";
3105}
3106
5505f548 3107const char *
8a4a2253 3108output_move_stricthi (rtx *operands)
9b55bf04
RK
3109{
3110 if (operands[1] == const0_rtx
3197c489
RS
3111 /* clr insns on 68000 read before writing. */
3112 && ((TARGET_68010 || TARGET_COLDFIRE)
9b55bf04
RK
3113 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3114 return "clr%.w %0";
3115 return "move%.w %1,%0";
3116}
3117
5505f548 3118const char *
8a4a2253 3119output_move_strictqi (rtx *operands)
9b55bf04
RK
3120{
3121 if (operands[1] == const0_rtx
3197c489
RS
3122 /* clr insns on 68000 read before writing. */
3123 && ((TARGET_68010 || TARGET_COLDFIRE)
9b55bf04
RK
3124 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3125 return "clr%.b %0";
3126 return "move%.b %1,%0";
3127}
3128
79e68feb
RS
3129/* Return the best assembler insn template
3130 for moving operands[1] into operands[0] as a fullword. */
3131
5505f548 3132static const char *
8a4a2253 3133singlemove_string (rtx *operands)
79e68feb 3134{
02ed0c07
RK
3135 if (GET_CODE (operands[1]) == CONST_INT)
3136 return output_move_simode_const (operands);
3137 return "move%.l %1,%0";
79e68feb
RS
3138}
3139
2505bc97 3140
c47b0cb4
MK
3141/* Output assembler or rtl code to perform a doubleword move insn
3142 with operands OPERANDS.
3143 Pointers to 3 helper functions should be specified:
3144 HANDLE_REG_ADJUST to adjust a register by a small value,
3145 HANDLE_COMPADR to compute an address and
3146 HANDLE_MOVSI to move 4 bytes. */
79e68feb 3147
c47b0cb4
MK
3148static void
3149handle_move_double (rtx operands[2],
3150 void (*handle_reg_adjust) (rtx, int),
3151 void (*handle_compadr) (rtx [2]),
3152 void (*handle_movsi) (rtx [2]))
79e68feb 3153{
2505bc97
RS
3154 enum
3155 {
3156 REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP
3157 } optype0, optype1;
79e68feb 3158 rtx latehalf[2];
2505bc97 3159 rtx middlehalf[2];
7f98eeb6 3160 rtx xops[2];
79e68feb 3161 rtx addreg0 = 0, addreg1 = 0;
7f98eeb6 3162 int dest_overlapped_low = 0;
184916bc 3163 int size = GET_MODE_SIZE (GET_MODE (operands[0]));
2505bc97
RS
3164
3165 middlehalf[0] = 0;
3166 middlehalf[1] = 0;
79e68feb
RS
3167
3168 /* First classify both operands. */
3169
3170 if (REG_P (operands[0]))
3171 optype0 = REGOP;
3172 else if (offsettable_memref_p (operands[0]))
3173 optype0 = OFFSOP;
3174 else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
3175 optype0 = POPOP;
3176 else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
3177 optype0 = PUSHOP;
3178 else if (GET_CODE (operands[0]) == MEM)
3179 optype0 = MEMOP;
3180 else
3181 optype0 = RNDOP;
3182
3183 if (REG_P (operands[1]))
3184 optype1 = REGOP;
3185 else if (CONSTANT_P (operands[1]))
3186 optype1 = CNSTOP;
3187 else if (offsettable_memref_p (operands[1]))
3188 optype1 = OFFSOP;
3189 else if (GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3190 optype1 = POPOP;
3191 else if (GET_CODE (XEXP (operands[1], 0)) == PRE_DEC)
3192 optype1 = PUSHOP;
3193 else if (GET_CODE (operands[1]) == MEM)
3194 optype1 = MEMOP;
3195 else
3196 optype1 = RNDOP;
3197
4761e388
NS
3198 /* Check for the cases that the operand constraints are not supposed
3199 to allow to happen. Generating code for these cases is
3200 painful. */
3201 gcc_assert (optype0 != RNDOP && optype1 != RNDOP);
79e68feb
RS
3202
3203 /* If one operand is decrementing and one is incrementing
3204 decrement the former register explicitly
3205 and change that operand into ordinary indexing. */
3206
3207 if (optype0 == PUSHOP && optype1 == POPOP)
3208 {
3209 operands[0] = XEXP (XEXP (operands[0], 0), 0);
c47b0cb4
MK
3210
3211 handle_reg_adjust (operands[0], -size);
3212
2505bc97 3213 if (GET_MODE (operands[1]) == XFmode)
1d8eaa6b 3214 operands[0] = gen_rtx_MEM (XFmode, operands[0]);
2505bc97 3215 else if (GET_MODE (operands[0]) == DFmode)
1d8eaa6b 3216 operands[0] = gen_rtx_MEM (DFmode, operands[0]);
2505bc97 3217 else
1d8eaa6b 3218 operands[0] = gen_rtx_MEM (DImode, operands[0]);
79e68feb
RS
3219 optype0 = OFFSOP;
3220 }
3221 if (optype0 == POPOP && optype1 == PUSHOP)
3222 {
3223 operands[1] = XEXP (XEXP (operands[1], 0), 0);
c47b0cb4
MK
3224
3225 handle_reg_adjust (operands[1], -size);
3226
2505bc97 3227 if (GET_MODE (operands[1]) == XFmode)
1d8eaa6b 3228 operands[1] = gen_rtx_MEM (XFmode, operands[1]);
2505bc97 3229 else if (GET_MODE (operands[1]) == DFmode)
1d8eaa6b 3230 operands[1] = gen_rtx_MEM (DFmode, operands[1]);
2505bc97 3231 else
1d8eaa6b 3232 operands[1] = gen_rtx_MEM (DImode, operands[1]);
79e68feb
RS
3233 optype1 = OFFSOP;
3234 }
3235
3236 /* If an operand is an unoffsettable memory ref, find a register
3237 we can increment temporarily to make it refer to the second word. */
3238
3239 if (optype0 == MEMOP)
3240 addreg0 = find_addr_reg (XEXP (operands[0], 0));
3241
3242 if (optype1 == MEMOP)
3243 addreg1 = find_addr_reg (XEXP (operands[1], 0));
3244
3245 /* Ok, we can do one word at a time.
3246 Normally we do the low-numbered word first,
3247 but if either operand is autodecrementing then we
3248 do the high-numbered word first.
3249
3250 In either case, set up in LATEHALF the operands to use
3251 for the high-numbered word and in some cases alter the
3252 operands in OPERANDS to be suitable for the low-numbered word. */
3253
2505bc97
RS
3254 if (size == 12)
3255 {
3256 if (optype0 == REGOP)
3257 {
1d8eaa6b
AS
3258 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 2);
3259 middlehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2505bc97
RS
3260 }
3261 else if (optype0 == OFFSOP)
3262 {
b72f00af
RK
3263 middlehalf[0] = adjust_address (operands[0], SImode, 4);
3264 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
2505bc97
RS
3265 }
3266 else
3267 {
c47b0cb4
MK
3268 middlehalf[0] = adjust_address (operands[0], SImode, 0);
3269 latehalf[0] = adjust_address (operands[0], SImode, 0);
2505bc97
RS
3270 }
3271
3272 if (optype1 == REGOP)
3273 {
1d8eaa6b
AS
3274 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2);
3275 middlehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2505bc97
RS
3276 }
3277 else if (optype1 == OFFSOP)
3278 {
b72f00af
RK
3279 middlehalf[1] = adjust_address (operands[1], SImode, 4);
3280 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
2505bc97
RS
3281 }
3282 else if (optype1 == CNSTOP)
3283 {
3284 if (GET_CODE (operands[1]) == CONST_DOUBLE)
3285 {
3286 REAL_VALUE_TYPE r;
3287 long l[3];
3288
3289 REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
3290 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
3291 operands[1] = GEN_INT (l[0]);
3292 middlehalf[1] = GEN_INT (l[1]);
3293 latehalf[1] = GEN_INT (l[2]);
3294 }
4761e388 3295 else
2505bc97 3296 {
4761e388
NS
3297 /* No non-CONST_DOUBLE constant should ever appear
3298 here. */
3299 gcc_assert (!CONSTANT_P (operands[1]));
2505bc97
RS
3300 }
3301 }
3302 else
3303 {
c47b0cb4
MK
3304 middlehalf[1] = adjust_address (operands[1], SImode, 0);
3305 latehalf[1] = adjust_address (operands[1], SImode, 0);
2505bc97
RS
3306 }
3307 }
79e68feb 3308 else
2505bc97
RS
3309 /* size is not 12: */
3310 {
3311 if (optype0 == REGOP)
1d8eaa6b 3312 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2505bc97 3313 else if (optype0 == OFFSOP)
b72f00af 3314 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
2505bc97 3315 else
c47b0cb4 3316 latehalf[0] = adjust_address (operands[0], SImode, 0);
2505bc97
RS
3317
3318 if (optype1 == REGOP)
1d8eaa6b 3319 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2505bc97 3320 else if (optype1 == OFFSOP)
b72f00af 3321 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
2505bc97
RS
3322 else if (optype1 == CNSTOP)
3323 split_double (operands[1], &operands[1], &latehalf[1]);
3324 else
c47b0cb4 3325 latehalf[1] = adjust_address (operands[1], SImode, 0);
2505bc97 3326 }
79e68feb
RS
3327
3328 /* If insn is effectively movd N(sp),-(sp) then we will do the
3329 high word first. We should use the adjusted operand 1 (which is N+4(sp))
3330 for the low word as well, to compensate for the first decrement of sp. */
3331 if (optype0 == PUSHOP
3332 && REGNO (XEXP (XEXP (operands[0], 0), 0)) == STACK_POINTER_REGNUM
3333 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
c88aeaf8 3334 operands[1] = middlehalf[1] = latehalf[1];
79e68feb 3335
7f98eeb6
RS
3336 /* For (set (reg:DI N) (mem:DI ... (reg:SI N) ...)),
3337 if the upper part of reg N does not appear in the MEM, arrange to
3338 emit the move late-half first. Otherwise, compute the MEM address
3339 into the upper part of N and use that as a pointer to the memory
3340 operand. */
3341 if (optype0 == REGOP
3342 && (optype1 == OFFSOP || optype1 == MEMOP))
3343 {
1d8eaa6b 3344 rtx testlow = gen_rtx_REG (SImode, REGNO (operands[0]));
3a58400f
RS
3345
3346 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
d7e8d581 3347 && reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
7f98eeb6
RS
3348 {
3349 /* If both halves of dest are used in the src memory address,
3a58400f
RS
3350 compute the address into latehalf of dest.
3351 Note that this can't happen if the dest is two data regs. */
4761e388 3352 compadr:
7f98eeb6
RS
3353 xops[0] = latehalf[0];
3354 xops[1] = XEXP (operands[1], 0);
c47b0cb4
MK
3355
3356 handle_compadr (xops);
3357 if (GET_MODE (operands[1]) == XFmode)
7f98eeb6 3358 {
1d8eaa6b 3359 operands[1] = gen_rtx_MEM (XFmode, latehalf[0]);
b72f00af
RK
3360 middlehalf[1] = adjust_address (operands[1], DImode, size - 8);
3361 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
7f98eeb6
RS
3362 }
3363 else
3364 {
1d8eaa6b 3365 operands[1] = gen_rtx_MEM (DImode, latehalf[0]);
b72f00af 3366 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
7f98eeb6
RS
3367 }
3368 }
3369 else if (size == 12
d7e8d581
RS
3370 && reg_overlap_mentioned_p (middlehalf[0],
3371 XEXP (operands[1], 0)))
7f98eeb6 3372 {
3a58400f
RS
3373 /* Check for two regs used by both source and dest.
3374 Note that this can't happen if the dest is all data regs.
3375 It can happen if the dest is d6, d7, a0.
3376 But in that case, latehalf is an addr reg, so
3377 the code at compadr does ok. */
3378
3379 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
d7e8d581
RS
3380 || reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
3381 goto compadr;
7f98eeb6
RS
3382
3383 /* JRV says this can't happen: */
4761e388 3384 gcc_assert (!addreg0 && !addreg1);
7f98eeb6 3385
7a1929e1 3386 /* Only the middle reg conflicts; simply put it last. */
c47b0cb4
MK
3387 handle_movsi (operands);
3388 handle_movsi (latehalf);
3389 handle_movsi (middlehalf);
3390
3391 return;
7f98eeb6 3392 }
2fb8a81d 3393 else if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0)))
7f98eeb6
RS
3394 /* If the low half of dest is mentioned in the source memory
3395 address, the arrange to emit the move late half first. */
3396 dest_overlapped_low = 1;
3397 }
3398
79e68feb
RS
3399 /* If one or both operands autodecrementing,
3400 do the two words, high-numbered first. */
3401
3402 /* Likewise, the first move would clobber the source of the second one,
3403 do them in the other order. This happens only for registers;
3404 such overlap can't happen in memory unless the user explicitly
3405 sets it up, and that is an undefined circumstance. */
3406
3407 if (optype0 == PUSHOP || optype1 == PUSHOP
3408 || (optype0 == REGOP && optype1 == REGOP
2505bc97 3409 && ((middlehalf[1] && REGNO (operands[0]) == REGNO (middlehalf[1]))
7f98eeb6
RS
3410 || REGNO (operands[0]) == REGNO (latehalf[1])))
3411 || dest_overlapped_low)
79e68feb
RS
3412 {
3413 /* Make any unoffsettable addresses point at high-numbered word. */
3414 if (addreg0)
c47b0cb4 3415 handle_reg_adjust (addreg0, size - 4);
79e68feb 3416 if (addreg1)
c47b0cb4 3417 handle_reg_adjust (addreg1, size - 4);
79e68feb
RS
3418
3419 /* Do that word. */
c47b0cb4 3420 handle_movsi (latehalf);
79e68feb
RS
3421
3422 /* Undo the adds we just did. */
3423 if (addreg0)
c47b0cb4 3424 handle_reg_adjust (addreg0, -4);
79e68feb 3425 if (addreg1)
c47b0cb4 3426 handle_reg_adjust (addreg1, -4);
79e68feb 3427
2505bc97
RS
3428 if (size == 12)
3429 {
c47b0cb4
MK
3430 handle_movsi (middlehalf);
3431
2505bc97 3432 if (addreg0)
c47b0cb4 3433 handle_reg_adjust (addreg0, -4);
2505bc97 3434 if (addreg1)
c47b0cb4 3435 handle_reg_adjust (addreg1, -4);
2505bc97
RS
3436 }
3437
79e68feb 3438 /* Do low-numbered word. */
c47b0cb4
MK
3439
3440 handle_movsi (operands);
3441 return;
79e68feb
RS
3442 }
3443
3444 /* Normal case: do the two words, low-numbered first. */
3445
dcac2e64 3446 m68k_final_prescan_insn (NULL, operands, 2);
c47b0cb4 3447 handle_movsi (operands);
79e68feb 3448
2505bc97
RS
3449 /* Do the middle one of the three words for long double */
3450 if (size == 12)
3451 {
3452 if (addreg0)
c47b0cb4 3453 handle_reg_adjust (addreg0, 4);
2505bc97 3454 if (addreg1)
c47b0cb4 3455 handle_reg_adjust (addreg1, 4);
2505bc97 3456
dcac2e64 3457 m68k_final_prescan_insn (NULL, middlehalf, 2);
c47b0cb4 3458 handle_movsi (middlehalf);
2505bc97
RS
3459 }
3460
79e68feb
RS
3461 /* Make any unoffsettable addresses point at high-numbered word. */
3462 if (addreg0)
c47b0cb4 3463 handle_reg_adjust (addreg0, 4);
79e68feb 3464 if (addreg1)
c47b0cb4 3465 handle_reg_adjust (addreg1, 4);
79e68feb
RS
3466
3467 /* Do that word. */
dcac2e64 3468 m68k_final_prescan_insn (NULL, latehalf, 2);
c47b0cb4 3469 handle_movsi (latehalf);
79e68feb
RS
3470
3471 /* Undo the adds we just did. */
3472 if (addreg0)
c47b0cb4
MK
3473 handle_reg_adjust (addreg0, -(size - 4));
3474 if (addreg1)
3475 handle_reg_adjust (addreg1, -(size - 4));
3476
3477 return;
3478}
3479
3480/* Output assembler code to adjust REG by N. */
3481static void
3482output_reg_adjust (rtx reg, int n)
3483{
3484 const char *s;
3485
3486 gcc_assert (GET_MODE (reg) == SImode
3487 && -12 <= n && n != 0 && n <= 12);
3488
3489 switch (n)
2505bc97 3490 {
c47b0cb4
MK
3491 case 12:
3492 s = "add%.l #12,%0";
3493 break;
3494
3495 case 8:
3496 s = "addq%.l #8,%0";
3497 break;
3498
3499 case 4:
3500 s = "addq%.l #4,%0";
3501 break;
3502
3503 case -12:
3504 s = "sub%.l #12,%0";
3505 break;
3506
3507 case -8:
3508 s = "subq%.l #8,%0";
3509 break;
3510
3511 case -4:
3512 s = "subq%.l #4,%0";
3513 break;
3514
3515 default:
3516 gcc_unreachable ();
3517 s = NULL;
2505bc97 3518 }
c47b0cb4
MK
3519
3520 output_asm_insn (s, &reg);
3521}
3522
3523/* Emit rtl code to adjust REG by N. */
3524static void
3525emit_reg_adjust (rtx reg1, int n)
3526{
3527 rtx reg2;
3528
3529 gcc_assert (GET_MODE (reg1) == SImode
3530 && -12 <= n && n != 0 && n <= 12);
3531
3532 reg1 = copy_rtx (reg1);
3533 reg2 = copy_rtx (reg1);
3534
3535 if (n < 0)
3536 emit_insn (gen_subsi3 (reg1, reg2, GEN_INT (-n)));
3537 else if (n > 0)
3538 emit_insn (gen_addsi3 (reg1, reg2, GEN_INT (n)));
3539 else
3540 gcc_unreachable ();
3541}
3542
3543/* Output assembler to load address OPERANDS[0] to register OPERANDS[1]. */
3544static void
3545output_compadr (rtx operands[2])
3546{
3547 output_asm_insn ("lea %a1,%0", operands);
3548}
3549
3550/* Output the best assembler insn for moving operands[1] into operands[0]
3551 as a fullword. */
3552static void
3553output_movsi (rtx operands[2])
3554{
3555 output_asm_insn (singlemove_string (operands), operands);
3556}
3557
3558/* Copy OP and change its mode to MODE. */
3559static rtx
3560copy_operand (rtx op, enum machine_mode mode)
3561{
3562 /* ??? This looks really ugly. There must be a better way
3563 to change a mode on the operand. */
3564 if (GET_MODE (op) != VOIDmode)
2505bc97 3565 {
c47b0cb4
MK
3566 if (REG_P (op))
3567 op = gen_rtx_REG (mode, REGNO (op));
2505bc97 3568 else
c47b0cb4
MK
3569 {
3570 op = copy_rtx (op);
3571 PUT_MODE (op, mode);
3572 }
2505bc97 3573 }
79e68feb 3574
c47b0cb4
MK
3575 return op;
3576}
3577
3578/* Emit rtl code for moving operands[1] into operands[0] as a fullword. */
3579static void
3580emit_movsi (rtx operands[2])
3581{
3582 operands[0] = copy_operand (operands[0], SImode);
3583 operands[1] = copy_operand (operands[1], SImode);
3584
3585 emit_insn (gen_movsi (operands[0], operands[1]));
3586}
3587
3588/* Output assembler code to perform a doubleword move insn
3589 with operands OPERANDS. */
3590const char *
3591output_move_double (rtx *operands)
3592{
3593 handle_move_double (operands,
3594 output_reg_adjust, output_compadr, output_movsi);
3595
79e68feb
RS
3596 return "";
3597}
3598
c47b0cb4
MK
3599/* Output rtl code to perform a doubleword move insn
3600 with operands OPERANDS. */
3601void
3602m68k_emit_move_double (rtx operands[2])
3603{
3604 handle_move_double (operands, emit_reg_adjust, emit_movsi, emit_movsi);
3605}
dcc21c4c
PB
3606
3607/* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
3608 new rtx with the correct mode. */
3609
3610static rtx
3611force_mode (enum machine_mode mode, rtx orig)
3612{
3613 if (mode == GET_MODE (orig))
3614 return orig;
3615
3616 if (REGNO (orig) >= FIRST_PSEUDO_REGISTER)
3617 abort ();
3618
3619 return gen_rtx_REG (mode, REGNO (orig));
3620}
3621
3622static int
3623fp_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3624{
3625 return reg_renumber && FP_REG_P (op);
3626}
3627
3628/* Emit insns to move operands[1] into operands[0].
3629
3630 Return 1 if we have written out everything that needs to be done to
3631 do the move. Otherwise, return 0 and the caller will emit the move
3632 normally.
3633
3634 Note SCRATCH_REG may not be in the proper mode depending on how it
c0220ea4 3635 will be used. This routine is responsible for creating a new copy
dcc21c4c
PB
3636 of SCRATCH_REG in the proper mode. */
3637
3638int
3639emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
3640{
3641 register rtx operand0 = operands[0];
3642 register rtx operand1 = operands[1];
3643 register rtx tem;
3644
3645 if (scratch_reg
3646 && reload_in_progress && GET_CODE (operand0) == REG
3647 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
f2034d06 3648 operand0 = reg_equiv_mem (REGNO (operand0));
dcc21c4c
PB
3649 else if (scratch_reg
3650 && reload_in_progress && GET_CODE (operand0) == SUBREG
3651 && GET_CODE (SUBREG_REG (operand0)) == REG
3652 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
3653 {
3654 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3655 the code which tracks sets/uses for delete_output_reload. */
3656 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
f2034d06 3657 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
dcc21c4c 3658 SUBREG_BYTE (operand0));
55a2c322 3659 operand0 = alter_subreg (&temp, true);
dcc21c4c
PB
3660 }
3661
3662 if (scratch_reg
3663 && reload_in_progress && GET_CODE (operand1) == REG
3664 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
f2034d06 3665 operand1 = reg_equiv_mem (REGNO (operand1));
dcc21c4c
PB
3666 else if (scratch_reg
3667 && reload_in_progress && GET_CODE (operand1) == SUBREG
3668 && GET_CODE (SUBREG_REG (operand1)) == REG
3669 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
3670 {
3671 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3672 the code which tracks sets/uses for delete_output_reload. */
3673 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
f2034d06 3674 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
dcc21c4c 3675 SUBREG_BYTE (operand1));
55a2c322 3676 operand1 = alter_subreg (&temp, true);
dcc21c4c
PB
3677 }
3678
3679 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
3680 && ((tem = find_replacement (&XEXP (operand0, 0)))
3681 != XEXP (operand0, 0)))
3682 operand0 = gen_rtx_MEM (GET_MODE (operand0), tem);
3683 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
3684 && ((tem = find_replacement (&XEXP (operand1, 0)))
3685 != XEXP (operand1, 0)))
3686 operand1 = gen_rtx_MEM (GET_MODE (operand1), tem);
3687
3688 /* Handle secondary reloads for loads/stores of FP registers where
3689 the address is symbolic by using the scratch register */
3690 if (fp_reg_operand (operand0, mode)
3691 && ((GET_CODE (operand1) == MEM
3692 && ! memory_address_p (DFmode, XEXP (operand1, 0)))
3693 || ((GET_CODE (operand1) == SUBREG
3694 && GET_CODE (XEXP (operand1, 0)) == MEM
3695 && !memory_address_p (DFmode, XEXP (XEXP (operand1, 0), 0)))))
3696 && scratch_reg)
3697 {
3698 if (GET_CODE (operand1) == SUBREG)
3699 operand1 = XEXP (operand1, 0);
3700
3701 /* SCRATCH_REG will hold an address. We want
3702 it in SImode regardless of what mode it was originally given
3703 to us. */
3704 scratch_reg = force_mode (SImode, scratch_reg);
3705
3706 /* D might not fit in 14 bits either; for such cases load D into
3707 scratch reg. */
3708 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
3709 {
3710 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
3711 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
3712 Pmode,
3713 XEXP (XEXP (operand1, 0), 0),
3714 scratch_reg));
3715 }
3716 else
3717 emit_move_insn (scratch_reg, XEXP (operand1, 0));
3718 emit_insn (gen_rtx_SET (VOIDmode, operand0,
3719 gen_rtx_MEM (mode, scratch_reg)));
3720 return 1;
3721 }
3722 else if (fp_reg_operand (operand1, mode)
3723 && ((GET_CODE (operand0) == MEM
3724 && ! memory_address_p (DFmode, XEXP (operand0, 0)))
3725 || ((GET_CODE (operand0) == SUBREG)
3726 && GET_CODE (XEXP (operand0, 0)) == MEM
3727 && !memory_address_p (DFmode, XEXP (XEXP (operand0, 0), 0))))
3728 && scratch_reg)
3729 {
3730 if (GET_CODE (operand0) == SUBREG)
3731 operand0 = XEXP (operand0, 0);
3732
3733 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3734 it in SIMODE regardless of what mode it was originally given
3735 to us. */
3736 scratch_reg = force_mode (SImode, scratch_reg);
3737
3738 /* D might not fit in 14 bits either; for such cases load D into
3739 scratch reg. */
3740 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
3741 {
3742 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
3743 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
3744 0)),
3745 Pmode,
3746 XEXP (XEXP (operand0, 0),
3747 0),
3748 scratch_reg));
3749 }
3750 else
3751 emit_move_insn (scratch_reg, XEXP (operand0, 0));
3752 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (mode, scratch_reg),
3753 operand1));
3754 return 1;
3755 }
3756 /* Handle secondary reloads for loads of FP registers from constant
3757 expressions by forcing the constant into memory.
3758
3759 use scratch_reg to hold the address of the memory location.
3760
3761 The proper fix is to change PREFERRED_RELOAD_CLASS to return
3762 NO_REGS when presented with a const_int and an register class
3763 containing only FP registers. Doing so unfortunately creates
3764 more problems than it solves. Fix this for 2.5. */
3765 else if (fp_reg_operand (operand0, mode)
3766 && CONSTANT_P (operand1)
3767 && scratch_reg)
3768 {
3769 rtx xoperands[2];
3770
3771 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3772 it in SIMODE regardless of what mode it was originally given
3773 to us. */
3774 scratch_reg = force_mode (SImode, scratch_reg);
3775
3776 /* Force the constant into memory and put the address of the
3777 memory location into scratch_reg. */
3778 xoperands[0] = scratch_reg;
3779 xoperands[1] = XEXP (force_const_mem (mode, operand1), 0);
3780 emit_insn (gen_rtx_SET (mode, scratch_reg, xoperands[1]));
3781
3782 /* Now load the destination register. */
3783 emit_insn (gen_rtx_SET (mode, operand0,
3784 gen_rtx_MEM (mode, scratch_reg)));
3785 return 1;
3786 }
3787
3788 /* Now have insn-emit do whatever it normally does. */
3789 return 0;
3790}
3791
01e304f8
RZ
3792/* Split one or more DImode RTL references into pairs of SImode
3793 references. The RTL can be REG, offsettable MEM, integer constant, or
3794 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
3795 split and "num" is its length. lo_half and hi_half are output arrays
3796 that parallel "operands". */
3797
3798void
3799split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
3800{
3801 while (num--)
3802 {
3803 rtx op = operands[num];
3804
3805 /* simplify_subreg refuses to split volatile memory addresses,
3806 but we still have to handle it. */
3807 if (GET_CODE (op) == MEM)
3808 {
3809 lo_half[num] = adjust_address (op, SImode, 4);
3810 hi_half[num] = adjust_address (op, SImode, 0);
3811 }
3812 else
3813 {
3814 lo_half[num] = simplify_gen_subreg (SImode, op,
3815 GET_MODE (op) == VOIDmode
3816 ? DImode : GET_MODE (op), 4);
3817 hi_half[num] = simplify_gen_subreg (SImode, op,
3818 GET_MODE (op) == VOIDmode
3819 ? DImode : GET_MODE (op), 0);
3820 }
3821 }
3822}
3823
a40ed0f3
KH
3824/* Split X into a base and a constant offset, storing them in *BASE
3825 and *OFFSET respectively. */
3826
3827static void
3828m68k_split_offset (rtx x, rtx *base, HOST_WIDE_INT *offset)
3829{
3830 *offset = 0;
3831 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
3832 {
3833 *offset += INTVAL (XEXP (x, 1));
3834 x = XEXP (x, 0);
3835 }
3836 *base = x;
3837}
3838
3839/* Return true if PATTERN is a PARALLEL suitable for a movem or fmovem
3840 instruction. STORE_P says whether the move is a load or store.
3841
3842 If the instruction uses post-increment or pre-decrement addressing,
3843 AUTOMOD_BASE is the base register and AUTOMOD_OFFSET is the total
3844 adjustment. This adjustment will be made by the first element of
3845 PARALLEL, with the loads or stores starting at element 1. If the
3846 instruction does not use post-increment or pre-decrement addressing,
3847 AUTOMOD_BASE is null, AUTOMOD_OFFSET is 0, and the loads or stores
3848 start at element 0. */
3849
3850bool
3851m68k_movem_pattern_p (rtx pattern, rtx automod_base,
3852 HOST_WIDE_INT automod_offset, bool store_p)
3853{
3854 rtx base, mem_base, set, mem, reg, last_reg;
3855 HOST_WIDE_INT offset, mem_offset;
3856 int i, first, len;
3857 enum reg_class rclass;
3858
3859 len = XVECLEN (pattern, 0);
3860 first = (automod_base != NULL);
3861
3862 if (automod_base)
3863 {
3864 /* Stores must be pre-decrement and loads must be post-increment. */
3865 if (store_p != (automod_offset < 0))
3866 return false;
3867
3868 /* Work out the base and offset for lowest memory location. */
3869 base = automod_base;
3870 offset = (automod_offset < 0 ? automod_offset : 0);
3871 }
3872 else
3873 {
3874 /* Allow any valid base and offset in the first access. */
3875 base = NULL;
3876 offset = 0;
3877 }
3878
3879 last_reg = NULL;
3880 rclass = NO_REGS;
3881 for (i = first; i < len; i++)
3882 {
3883 /* We need a plain SET. */
3884 set = XVECEXP (pattern, 0, i);
3885 if (GET_CODE (set) != SET)
3886 return false;
3887
3888 /* Check that we have a memory location... */
3889 mem = XEXP (set, !store_p);
3890 if (!MEM_P (mem) || !memory_operand (mem, VOIDmode))
3891 return false;
3892
3893 /* ...with the right address. */
3894 if (base == NULL)
3895 {
3896 m68k_split_offset (XEXP (mem, 0), &base, &offset);
3897 /* The ColdFire instruction only allows (An) and (d16,An) modes.
3898 There are no mode restrictions for 680x0 besides the
3899 automodification rules enforced above. */
3900 if (TARGET_COLDFIRE
3901 && !m68k_legitimate_base_reg_p (base, reload_completed))
3902 return false;
3903 }
3904 else
3905 {
3906 m68k_split_offset (XEXP (mem, 0), &mem_base, &mem_offset);
3907 if (!rtx_equal_p (base, mem_base) || offset != mem_offset)
3908 return false;
3909 }
3910
3911 /* Check that we have a register of the required mode and class. */
3912 reg = XEXP (set, store_p);
3913 if (!REG_P (reg)
3914 || !HARD_REGISTER_P (reg)
3915 || GET_MODE (reg) != reg_raw_mode[REGNO (reg)])
3916 return false;
3917
3918 if (last_reg)
3919 {
3920 /* The register must belong to RCLASS and have a higher number
3921 than the register in the previous SET. */
3922 if (!TEST_HARD_REG_BIT (reg_class_contents[rclass], REGNO (reg))
3923 || REGNO (last_reg) >= REGNO (reg))
3924 return false;
3925 }
3926 else
3927 {
3928 /* Work out which register class we need. */
3929 if (INT_REGNO_P (REGNO (reg)))
3930 rclass = GENERAL_REGS;
3931 else if (FP_REGNO_P (REGNO (reg)))
3932 rclass = FP_REGS;
3933 else
3934 return false;
3935 }
3936
3937 last_reg = reg;
3938 offset += GET_MODE_SIZE (GET_MODE (reg));
3939 }
3940
3941 /* If we have an automodification, check whether the final offset is OK. */
3942 if (automod_base && offset != (automod_offset < 0 ? 0 : automod_offset))
3943 return false;
3944
3945 /* Reject unprofitable cases. */
3946 if (len < first + (rclass == FP_REGS ? MIN_FMOVEM_REGS : MIN_MOVEM_REGS))
3947 return false;
3948
3949 return true;
3950}
3951
3952/* Return the assembly code template for a movem or fmovem instruction
3953 whose pattern is given by PATTERN. Store the template's operands
3954 in OPERANDS.
3955
3956 If the instruction uses post-increment or pre-decrement addressing,
3957 AUTOMOD_OFFSET is the total adjustment, otherwise it is 0. STORE_P
3958 is true if this is a store instruction. */
3959
3960const char *
3961m68k_output_movem (rtx *operands, rtx pattern,
3962 HOST_WIDE_INT automod_offset, bool store_p)
3963{
3964 unsigned int mask;
3965 int i, first;
3966
3967 gcc_assert (GET_CODE (pattern) == PARALLEL);
3968 mask = 0;
3969 first = (automod_offset != 0);
3970 for (i = first; i < XVECLEN (pattern, 0); i++)
3971 {
3972 /* When using movem with pre-decrement addressing, register X + D0_REG
3973 is controlled by bit 15 - X. For all other addressing modes,
3974 register X + D0_REG is controlled by bit X. Confusingly, the
3975 register mask for fmovem is in the opposite order to that for
3976 movem. */
3977 unsigned int regno;
3978
3979 gcc_assert (MEM_P (XEXP (XVECEXP (pattern, 0, i), !store_p)));
3980 gcc_assert (REG_P (XEXP (XVECEXP (pattern, 0, i), store_p)));
3981 regno = REGNO (XEXP (XVECEXP (pattern, 0, i), store_p));
3982 if (automod_offset < 0)
3983 {
3984 if (FP_REGNO_P (regno))
3985 mask |= 1 << (regno - FP0_REG);
3986 else
3987 mask |= 1 << (15 - (regno - D0_REG));
3988 }
3989 else
3990 {
3991 if (FP_REGNO_P (regno))
3992 mask |= 1 << (7 - (regno - FP0_REG));
3993 else
3994 mask |= 1 << (regno - D0_REG);
3995 }
3996 }
3997 CC_STATUS_INIT;
3998
3999 if (automod_offset == 0)
4000 operands[0] = XEXP (XEXP (XVECEXP (pattern, 0, first), !store_p), 0);
4001 else if (automod_offset < 0)
4002 operands[0] = gen_rtx_PRE_DEC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
4003 else
4004 operands[0] = gen_rtx_POST_INC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
4005 operands[1] = GEN_INT (mask);
4006 if (FP_REGNO_P (REGNO (XEXP (XVECEXP (pattern, 0, first), store_p))))
4007 {
4008 if (store_p)
1fae2d80 4009 return "fmovem %1,%a0";
a40ed0f3 4010 else
1fae2d80 4011 return "fmovem %a0,%1";
a40ed0f3
KH
4012 }
4013 else
4014 {
4015 if (store_p)
1fae2d80 4016 return "movem%.l %1,%a0";
a40ed0f3 4017 else
1fae2d80 4018 return "movem%.l %a0,%1";
a40ed0f3
KH
4019 }
4020}
4021
79e68feb
RS
4022/* Return a REG that occurs in ADDR with coefficient 1.
4023 ADDR can be effectively incremented by incrementing REG. */
4024
4025static rtx
8a4a2253 4026find_addr_reg (rtx addr)
79e68feb
RS
4027{
4028 while (GET_CODE (addr) == PLUS)
4029 {
4030 if (GET_CODE (XEXP (addr, 0)) == REG)
4031 addr = XEXP (addr, 0);
4032 else if (GET_CODE (XEXP (addr, 1)) == REG)
4033 addr = XEXP (addr, 1);
4034 else if (CONSTANT_P (XEXP (addr, 0)))
4035 addr = XEXP (addr, 1);
4036 else if (CONSTANT_P (XEXP (addr, 1)))
4037 addr = XEXP (addr, 0);
4038 else
4761e388 4039 gcc_unreachable ();
79e68feb 4040 }
4761e388
NS
4041 gcc_assert (GET_CODE (addr) == REG);
4042 return addr;
79e68feb 4043}
9ee3c687 4044
c16eadc7 4045/* Output assembler code to perform a 32-bit 3-operand add. */
9ee3c687 4046
5505f548 4047const char *
8a4a2253 4048output_addsi3 (rtx *operands)
9ee3c687
JW
4049{
4050 if (! operands_match_p (operands[0], operands[1]))
4051 {
4052 if (!ADDRESS_REG_P (operands[1]))
4053 {
4054 rtx tmp = operands[1];
4055
4056 operands[1] = operands[2];
4057 operands[2] = tmp;
4058 }
4059
4060 /* These insns can result from reloads to access
4061 stack slots over 64k from the frame pointer. */
4062 if (GET_CODE (operands[2]) == CONST_INT
218d5a87 4063 && (INTVAL (operands[2]) < -32768 || INTVAL (operands[2]) > 32767))
8c61b6c1 4064 return "move%.l %2,%0\n\tadd%.l %1,%0";
9ee3c687 4065 if (GET_CODE (operands[2]) == REG)
4b3d1177
KH
4066 return MOTOROLA ? "lea (%1,%2.l),%0" : "lea %1@(0,%2:l),%0";
4067 return MOTOROLA ? "lea (%c2,%1),%0" : "lea %1@(%c2),%0";
9ee3c687
JW
4068 }
4069 if (GET_CODE (operands[2]) == CONST_INT)
4070 {
9ee3c687
JW
4071 if (INTVAL (operands[2]) > 0
4072 && INTVAL (operands[2]) <= 8)
4073 return "addq%.l %2,%0";
4074 if (INTVAL (operands[2]) < 0
4075 && INTVAL (operands[2]) >= -8)
4076 {
c5c76735 4077 operands[2] = GEN_INT (- INTVAL (operands[2]));
9ee3c687
JW
4078 return "subq%.l %2,%0";
4079 }
4080 /* On the CPU32 it is faster to use two addql instructions to
4081 add a small integer (8 < N <= 16) to a register.
7a1929e1 4082 Likewise for subql. */
fe95f2f7 4083 if (TUNE_CPU32 && REG_P (operands[0]))
9ee3c687
JW
4084 {
4085 if (INTVAL (operands[2]) > 8
4086 && INTVAL (operands[2]) <= 16)
4087 {
1d8eaa6b 4088 operands[2] = GEN_INT (INTVAL (operands[2]) - 8);
3b4b85c9 4089 return "addq%.l #8,%0\n\taddq%.l %2,%0";
9ee3c687
JW
4090 }
4091 if (INTVAL (operands[2]) < -8
4092 && INTVAL (operands[2]) >= -16)
4093 {
c5c76735 4094 operands[2] = GEN_INT (- INTVAL (operands[2]) - 8);
3b4b85c9 4095 return "subq%.l #8,%0\n\tsubq%.l %2,%0";
9ee3c687
JW
4096 }
4097 }
9ee3c687
JW
4098 if (ADDRESS_REG_P (operands[0])
4099 && INTVAL (operands[2]) >= -0x8000
4100 && INTVAL (operands[2]) < 0x8000)
4101 {
fe95f2f7 4102 if (TUNE_68040)
9ee3c687
JW
4103 return "add%.w %2,%0";
4104 else
4b3d1177 4105 return MOTOROLA ? "lea (%c2,%0),%0" : "lea %0@(%c2),%0";
9ee3c687
JW
4106 }
4107 }
4108 return "add%.l %2,%0";
4109}
79e68feb
RS
4110\f
4111/* Store in cc_status the expressions that the condition codes will
4112 describe after execution of an instruction whose pattern is EXP.
4113 Do not alter them if the instruction would not alter the cc's. */
4114
4115/* On the 68000, all the insns to store in an address register fail to
4116 set the cc's. However, in some cases these instructions can make it
4117 possibly invalid to use the saved cc's. In those cases we clear out
4118 some or all of the saved cc's so they won't be used. */
4119
1d8eaa6b 4120void
8a4a2253 4121notice_update_cc (rtx exp, rtx insn)
79e68feb 4122{
1a8965c4 4123 if (GET_CODE (exp) == SET)
79e68feb
RS
4124 {
4125 if (GET_CODE (SET_SRC (exp)) == CALL)
a0a7fbc9 4126 CC_STATUS_INIT;
79e68feb
RS
4127 else if (ADDRESS_REG_P (SET_DEST (exp)))
4128 {
f5963e61 4129 if (cc_status.value1 && modified_in_p (cc_status.value1, insn))
79e68feb 4130 cc_status.value1 = 0;
f5963e61 4131 if (cc_status.value2 && modified_in_p (cc_status.value2, insn))
79e68feb
RS
4132 cc_status.value2 = 0;
4133 }
f6ab62e8
RS
4134 /* fmoves to memory or data registers do not set the condition
4135 codes. Normal moves _do_ set the condition codes, but not in
4136 a way that is appropriate for comparison with 0, because -0.0
4137 would be treated as a negative nonzero number. Note that it
88512ba0 4138 isn't appropriate to conditionalize this restriction on
f6ab62e8
RS
4139 HONOR_SIGNED_ZEROS because that macro merely indicates whether
4140 we care about the difference between -0.0 and +0.0. */
79e68feb
RS
4141 else if (!FP_REG_P (SET_DEST (exp))
4142 && SET_DEST (exp) != cc0_rtx
4143 && (FP_REG_P (SET_SRC (exp))
4144 || GET_CODE (SET_SRC (exp)) == FIX
f6ab62e8 4145 || FLOAT_MODE_P (GET_MODE (SET_DEST (exp)))))
a0a7fbc9 4146 CC_STATUS_INIT;
79e68feb
RS
4147 /* A pair of move insns doesn't produce a useful overall cc. */
4148 else if (!FP_REG_P (SET_DEST (exp))
4149 && !FP_REG_P (SET_SRC (exp))
4150 && GET_MODE_SIZE (GET_MODE (SET_SRC (exp))) > 4
4151 && (GET_CODE (SET_SRC (exp)) == REG
4152 || GET_CODE (SET_SRC (exp)) == MEM
4153 || GET_CODE (SET_SRC (exp)) == CONST_DOUBLE))
a0a7fbc9 4154 CC_STATUS_INIT;
e1dff52a 4155 else if (SET_DEST (exp) != pc_rtx)
79e68feb
RS
4156 {
4157 cc_status.flags = 0;
e1dff52a
KH
4158 cc_status.value1 = SET_DEST (exp);
4159 cc_status.value2 = SET_SRC (exp);
79e68feb
RS
4160 }
4161 }
4162 else if (GET_CODE (exp) == PARALLEL
4163 && GET_CODE (XVECEXP (exp, 0, 0)) == SET)
4164 {
e1dff52a
KH
4165 rtx dest = SET_DEST (XVECEXP (exp, 0, 0));
4166 rtx src = SET_SRC (XVECEXP (exp, 0, 0));
4167
4168 if (ADDRESS_REG_P (dest))
79e68feb 4169 CC_STATUS_INIT;
e1dff52a 4170 else if (dest != pc_rtx)
79e68feb
RS
4171 {
4172 cc_status.flags = 0;
e1dff52a
KH
4173 cc_status.value1 = dest;
4174 cc_status.value2 = src;
79e68feb
RS
4175 }
4176 }
4177 else
4178 CC_STATUS_INIT;
4179 if (cc_status.value2 != 0
4180 && ADDRESS_REG_P (cc_status.value2)
4181 && GET_MODE (cc_status.value2) == QImode)
4182 CC_STATUS_INIT;
1a8965c4 4183 if (cc_status.value2 != 0)
79e68feb
RS
4184 switch (GET_CODE (cc_status.value2))
4185 {
996a5f59 4186 case ASHIFT: case ASHIFTRT: case LSHIFTRT:
79e68feb 4187 case ROTATE: case ROTATERT:
a126dc3a
RH
4188 /* These instructions always clear the overflow bit, and set
4189 the carry to the bit shifted out. */
1afac9a6 4190 cc_status.flags |= CC_OVERFLOW_UNUSABLE | CC_NO_CARRY;
a126dc3a
RH
4191 break;
4192
4193 case PLUS: case MINUS: case MULT:
4194 case DIV: case UDIV: case MOD: case UMOD: case NEG:
79e68feb
RS
4195 if (GET_MODE (cc_status.value2) != VOIDmode)
4196 cc_status.flags |= CC_NO_OVERFLOW;
4197 break;
4198 case ZERO_EXTEND:
4199 /* (SET r1 (ZERO_EXTEND r2)) on this machine
4200 ends with a move insn moving r2 in r2's mode.
4201 Thus, the cc's are set for r2.
7a1929e1 4202 This can set N bit spuriously. */
79e68feb 4203 cc_status.flags |= CC_NOT_NEGATIVE;
1d8eaa6b
AS
4204
4205 default:
4206 break;
79e68feb
RS
4207 }
4208 if (cc_status.value1 && GET_CODE (cc_status.value1) == REG
4209 && cc_status.value2
4210 && reg_overlap_mentioned_p (cc_status.value1, cc_status.value2))
4211 cc_status.value2 = 0;
1adb2fb9
AS
4212 /* Check for PRE_DEC in dest modifying a register used in src. */
4213 if (cc_status.value1 && GET_CODE (cc_status.value1) == MEM
4214 && GET_CODE (XEXP (cc_status.value1, 0)) == PRE_DEC
4215 && cc_status.value2
4216 && reg_overlap_mentioned_p (XEXP (XEXP (cc_status.value1, 0), 0),
4217 cc_status.value2))
4218 cc_status.value2 = 0;
79e68feb 4219 if (((cc_status.value1 && FP_REG_P (cc_status.value1))
1a8965c4 4220 || (cc_status.value2 && FP_REG_P (cc_status.value2))))
79e68feb 4221 cc_status.flags = CC_IN_68881;
67595cbb
RZ
4222 if (cc_status.value2 && GET_CODE (cc_status.value2) == COMPARE
4223 && GET_MODE_CLASS (GET_MODE (XEXP (cc_status.value2, 0))) == MODE_FLOAT)
4224 {
4225 cc_status.flags = CC_IN_68881;
695074be
JB
4226 if (!FP_REG_P (XEXP (cc_status.value2, 0))
4227 && FP_REG_P (XEXP (cc_status.value2, 1)))
67595cbb
RZ
4228 cc_status.flags |= CC_REVERSED;
4229 }
79e68feb
RS
4230}
4231\f
5505f548 4232const char *
8a4a2253 4233output_move_const_double (rtx *operands)
79e68feb 4234{
1a8965c4 4235 int code = standard_68881_constant_p (operands[1]);
79e68feb 4236
1a8965c4 4237 if (code != 0)
79e68feb 4238 {
1a8965c4 4239 static char buf[40];
79e68feb 4240
3b4b85c9 4241 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
1a8965c4 4242 return buf;
79e68feb 4243 }
1a8965c4 4244 return "fmove%.d %1,%0";
79e68feb
RS
4245}
4246
5505f548 4247const char *
8a4a2253 4248output_move_const_single (rtx *operands)
79e68feb 4249{
1a8965c4 4250 int code = standard_68881_constant_p (operands[1]);
79e68feb 4251
1a8965c4 4252 if (code != 0)
79e68feb 4253 {
1a8965c4 4254 static char buf[40];
79e68feb 4255
3b4b85c9 4256 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
1a8965c4 4257 return buf;
79e68feb 4258 }
1a8965c4 4259 return "fmove%.s %f1,%0";
79e68feb
RS
4260}
4261
4262/* Return nonzero if X, a CONST_DOUBLE, has a value that we can get
4263 from the "fmovecr" instruction.
4264 The value, anded with 0xff, gives the code to use in fmovecr
4265 to get the desired constant. */
4266
7a1929e1 4267/* This code has been fixed for cross-compilation. */
c1cfb2ae
RS
4268
4269static int inited_68881_table = 0;
4270
5505f548 4271static const char *const strings_68881[7] = {
c1cfb2ae
RS
4272 "0.0",
4273 "1.0",
4274 "10.0",
4275 "100.0",
4276 "10000.0",
4277 "1e8",
4278 "1e16"
a0a7fbc9 4279};
c1cfb2ae 4280
8b60264b 4281static const int codes_68881[7] = {
c1cfb2ae
RS
4282 0x0f,
4283 0x32,
4284 0x33,
4285 0x34,
4286 0x35,
4287 0x36,
4288 0x37
a0a7fbc9 4289};
c1cfb2ae
RS
4290
4291REAL_VALUE_TYPE values_68881[7];
4292
4293/* Set up values_68881 array by converting the decimal values
7a1929e1 4294 strings_68881 to binary. */
c1cfb2ae
RS
4295
4296void
8a4a2253 4297init_68881_table (void)
c1cfb2ae
RS
4298{
4299 int i;
4300 REAL_VALUE_TYPE r;
4301 enum machine_mode mode;
4302
16d82c3c 4303 mode = SFmode;
c1cfb2ae
RS
4304 for (i = 0; i < 7; i++)
4305 {
4306 if (i == 6)
16d82c3c 4307 mode = DFmode;
c1cfb2ae
RS
4308 r = REAL_VALUE_ATOF (strings_68881[i], mode);
4309 values_68881[i] = r;
4310 }
4311 inited_68881_table = 1;
4312}
79e68feb
RS
4313
4314int
8a4a2253 4315standard_68881_constant_p (rtx x)
79e68feb 4316{
c1cfb2ae
RS
4317 REAL_VALUE_TYPE r;
4318 int i;
79e68feb 4319
e18db50d 4320 /* fmovecr must be emulated on the 68040 and 68060, so it shouldn't be
7a1929e1 4321 used at all on those chips. */
9cf106c8 4322 if (TUNE_68040_60)
79e68feb
RS
4323 return 0;
4324
c1cfb2ae
RS
4325 if (! inited_68881_table)
4326 init_68881_table ();
4327
4328 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4329
64c0b414
AS
4330 /* Use REAL_VALUES_IDENTICAL instead of REAL_VALUES_EQUAL so that -0.0
4331 is rejected. */
c1cfb2ae
RS
4332 for (i = 0; i < 6; i++)
4333 {
64c0b414 4334 if (REAL_VALUES_IDENTICAL (r, values_68881[i]))
c1cfb2ae
RS
4335 return (codes_68881[i]);
4336 }
4337
79e68feb
RS
4338 if (GET_MODE (x) == SFmode)
4339 return 0;
c1cfb2ae
RS
4340
4341 if (REAL_VALUES_EQUAL (r, values_68881[6]))
4342 return (codes_68881[6]);
4343
79e68feb
RS
4344 /* larger powers of ten in the constants ram are not used
4345 because they are not equal to a `double' C constant. */
4346 return 0;
4347}
4348
4349/* If X is a floating-point constant, return the logarithm of X base 2,
4350 or 0 if X is not a power of 2. */
4351
4352int
8a4a2253 4353floating_exact_log2 (rtx x)
79e68feb 4354{
c1cfb2ae 4355 REAL_VALUE_TYPE r, r1;
eaff3bf8 4356 int exp;
79e68feb 4357
c1cfb2ae 4358 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
79e68feb 4359
eaff3bf8 4360 if (REAL_VALUES_LESS (r, dconst1))
79e68feb
RS
4361 return 0;
4362
eaff3bf8 4363 exp = real_exponent (&r);
6ef9a246 4364 real_2expN (&r1, exp, DFmode);
eaff3bf8
RH
4365 if (REAL_VALUES_EQUAL (r1, r))
4366 return exp;
4367
79e68feb
RS
4368 return 0;
4369}
4370\f
79e68feb
RS
4371/* A C compound statement to output to stdio stream STREAM the
4372 assembler syntax for an instruction operand X. X is an RTL
4373 expression.
4374
4375 CODE is a value that can be used to specify one of several ways
4376 of printing the operand. It is used when identical operands
4377 must be printed differently depending on the context. CODE
4378 comes from the `%' specification that was used to request
4379 printing of the operand. If the specification was just `%DIGIT'
4380 then CODE is 0; if the specification was `%LTR DIGIT' then CODE
4381 is the ASCII code for LTR.
4382
4383 If X is a register, this macro should print the register's name.
4384 The names can be found in an array `reg_names' whose type is
4385 `char *[]'. `reg_names' is initialized from `REGISTER_NAMES'.
4386
4387 When the machine description has a specification `%PUNCT' (a `%'
4388 followed by a punctuation character), this macro is called with
4389 a null pointer for X and the punctuation character for CODE.
4390
4391 The m68k specific codes are:
4392
4393 '.' for dot needed in Motorola-style opcode names.
4394 '-' for an operand pushing on the stack:
4395 sp@-, -(sp) or -(%sp) depending on the style of syntax.
4396 '+' for an operand pushing on the stack:
4397 sp@+, (sp)+ or (%sp)+ depending on the style of syntax.
4398 '@' for a reference to the top word on the stack:
4399 sp@, (sp) or (%sp) depending on the style of syntax.
4400 '#' for an immediate operand prefix (# in MIT and Motorola syntax
5ee084df 4401 but & in SGS syntax).
79e68feb
RS
4402 '!' for the cc register (used in an `and to cc' insn).
4403 '$' for the letter `s' in an op code, but only on the 68040.
4404 '&' for the letter `d' in an op code, but only on the 68040.
2ac5f14a 4405 '/' for register prefix needed by longlong.h.
a40ed0f3 4406 '?' for m68k_library_id_string
79e68feb
RS
4407
4408 'b' for byte insn (no effect, on the Sun; this is for the ISI).
4409 'd' to force memory addressing to be absolute, not relative.
4410 'f' for float insn (print a CONST_DOUBLE as a float rather than in hex)
79e68feb
RS
4411 'x' for float insn (print a CONST_DOUBLE as a float rather than in hex),
4412 or print pair of registers as rx:ry.
29ca003a
RS
4413 'p' print an address with @PLTPC attached, but only if the operand
4414 is not locally-bound. */
79e68feb
RS
4415
4416void
8a4a2253 4417print_operand (FILE *file, rtx op, int letter)
79e68feb 4418{
79e68feb
RS
4419 if (letter == '.')
4420 {
e6d98cb0
BI
4421 if (MOTOROLA)
4422 fprintf (file, ".");
79e68feb
RS
4423 }
4424 else if (letter == '#')
e6d98cb0 4425 asm_fprintf (file, "%I");
79e68feb 4426 else if (letter == '-')
4b3d1177 4427 asm_fprintf (file, MOTOROLA ? "-(%Rsp)" : "%Rsp@-");
79e68feb 4428 else if (letter == '+')
4b3d1177 4429 asm_fprintf (file, MOTOROLA ? "(%Rsp)+" : "%Rsp@+");
79e68feb 4430 else if (letter == '@')
4b3d1177 4431 asm_fprintf (file, MOTOROLA ? "(%Rsp)" : "%Rsp@");
79e68feb 4432 else if (letter == '!')
e6d98cb0 4433 asm_fprintf (file, "%Rfpcr");
79e68feb
RS
4434 else if (letter == '$')
4435 {
b101567e 4436 if (TARGET_68040)
e6d98cb0 4437 fprintf (file, "s");
79e68feb
RS
4438 }
4439 else if (letter == '&')
4440 {
b101567e 4441 if (TARGET_68040)
e6d98cb0 4442 fprintf (file, "d");
79e68feb 4443 }
2ac5f14a 4444 else if (letter == '/')
e6d98cb0 4445 asm_fprintf (file, "%R");
a40ed0f3
KH
4446 else if (letter == '?')
4447 asm_fprintf (file, m68k_library_id_string);
29ca003a 4448 else if (letter == 'p')
2c8ec431 4449 {
29ca003a
RS
4450 output_addr_const (file, op);
4451 if (!(GET_CODE (op) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op)))
4452 fprintf (file, "@PLTPC");
2c8ec431 4453 }
79e68feb
RS
4454 else if (GET_CODE (op) == REG)
4455 {
1a8965c4
AS
4456 if (letter == 'R')
4457 /* Print out the second register name of a register pair.
4458 I.e., R (6) => 7. */
01bbf777 4459 fputs (M68K_REGNAME(REGNO (op) + 1), file);
79e68feb 4460 else
01bbf777 4461 fputs (M68K_REGNAME(REGNO (op)), file);
79e68feb
RS
4462 }
4463 else if (GET_CODE (op) == MEM)
4464 {
4465 output_address (XEXP (op, 0));
4466 if (letter == 'd' && ! TARGET_68020
4467 && CONSTANT_ADDRESS_P (XEXP (op, 0))
4468 && !(GET_CODE (XEXP (op, 0)) == CONST_INT
4469 && INTVAL (XEXP (op, 0)) < 0x8000
4470 && INTVAL (XEXP (op, 0)) >= -0x8000))
4b3d1177 4471 fprintf (file, MOTOROLA ? ".l" : ":l");
79e68feb 4472 }
79e68feb
RS
4473 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == SFmode)
4474 {
c1cfb2ae 4475 REAL_VALUE_TYPE r;
6ae89ea8 4476 long l;
c1cfb2ae 4477 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
6ae89ea8 4478 REAL_VALUE_TO_TARGET_SINGLE (r, l);
429ce992 4479 asm_fprintf (file, "%I0x%lx", l & 0xFFFFFFFF);
c1cfb2ae
RS
4480 }
4481 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == XFmode)
4482 {
4483 REAL_VALUE_TYPE r;
6ae89ea8 4484 long l[3];
c1cfb2ae 4485 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
6ae89ea8 4486 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
429ce992
AS
4487 asm_fprintf (file, "%I0x%lx%08lx%08lx", l[0] & 0xFFFFFFFF,
4488 l[1] & 0xFFFFFFFF, l[2] & 0xFFFFFFFF);
79e68feb 4489 }
e2c0a924 4490 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == DFmode)
79e68feb 4491 {
c1cfb2ae 4492 REAL_VALUE_TYPE r;
6ae89ea8 4493 long l[2];
c1cfb2ae 4494 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
6ae89ea8 4495 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
429ce992 4496 asm_fprintf (file, "%I0x%lx%08lx", l[0] & 0xFFFFFFFF, l[1] & 0xFFFFFFFF);
79e68feb
RS
4497 }
4498 else
4499 {
2c8ec431
DL
4500 /* Use `print_operand_address' instead of `output_addr_const'
4501 to ensure that we print relevant PIC stuff. */
1f85a612 4502 asm_fprintf (file, "%I");
2c8ec431
DL
4503 if (TARGET_PCREL
4504 && (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST))
4505 print_operand_address (file, op);
4506 else
4507 output_addr_const (file, op);
79e68feb
RS
4508 }
4509}
4510
75df395f
MK
4511/* Return string for TLS relocation RELOC. */
4512
4513static const char *
4514m68k_get_reloc_decoration (enum m68k_reloc reloc)
4515{
4516 /* To my knowledge, !MOTOROLA assemblers don't support TLS. */
4517 gcc_assert (MOTOROLA || reloc == RELOC_GOT);
4518
4519 switch (reloc)
4520 {
4521 case RELOC_GOT:
4522 if (MOTOROLA)
4523 {
4524 if (flag_pic == 1 && TARGET_68020)
4525 return "@GOT.w";
4526 else
4527 return "@GOT";
4528 }
4529 else
4530 {
4531 if (TARGET_68020)
4532 {
4533 switch (flag_pic)
4534 {
4535 case 1:
4536 return ":w";
4537 case 2:
4538 return ":l";
4539 default:
4540 return "";
4541 }
4542 }
4543 }
4544
4545 case RELOC_TLSGD:
4546 return "@TLSGD";
4547
4548 case RELOC_TLSLDM:
4549 return "@TLSLDM";
4550
4551 case RELOC_TLSLDO:
4552 return "@TLSLDO";
4553
4554 case RELOC_TLSIE:
4555 return "@TLSIE";
4556
4557 case RELOC_TLSLE:
4558 return "@TLSLE";
4559
4560 default:
4561 gcc_unreachable ();
4562 }
4563}
4564
cb69db4f 4565/* m68k implementation of TARGET_OUTPUT_ADDR_CONST_EXTRA. */
884316ff 4566
cb69db4f 4567static bool
884316ff
JM
4568m68k_output_addr_const_extra (FILE *file, rtx x)
4569{
75df395f
MK
4570 if (GET_CODE (x) == UNSPEC)
4571 {
4572 switch (XINT (x, 1))
4573 {
4574 case UNSPEC_RELOC16:
4575 case UNSPEC_RELOC32:
4576 output_addr_const (file, XVECEXP (x, 0, 0));
f878882b
AS
4577 fputs (m68k_get_reloc_decoration
4578 ((enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1))), file);
75df395f 4579 return true;
884316ff 4580
75df395f
MK
4581 default:
4582 break;
4583 }
4584 }
4585
4586 return false;
4587}
4588
4589/* M68K implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
4590
4591static void
4592m68k_output_dwarf_dtprel (FILE *file, int size, rtx x)
4593{
4594 gcc_assert (size == 4);
4595 fputs ("\t.long\t", file);
4596 output_addr_const (file, x);
4597 fputs ("@TLSLDO+0x8000", file);
884316ff
JM
4598}
4599
7b0f476d
AS
4600/* In the name of slightly smaller debug output, and to cater to
4601 general assembler lossage, recognize various UNSPEC sequences
4602 and turn them back into a direct symbol reference. */
4603
4604static rtx
33d67485 4605m68k_delegitimize_address (rtx orig_x)
7b0f476d 4606{
8390b335
AS
4607 rtx x;
4608 struct m68k_address addr;
4609 rtx unspec;
7b0f476d 4610
33d67485 4611 orig_x = delegitimize_mem_from_attrs (orig_x);
8390b335
AS
4612 x = orig_x;
4613 if (MEM_P (x))
4614 x = XEXP (x, 0);
4615
4616 if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
33d67485
AS
4617 return orig_x;
4618
8390b335
AS
4619 if (!m68k_decompose_address (GET_MODE (x), x, false, &addr)
4620 || addr.offset == NULL_RTX
4621 || GET_CODE (addr.offset) != CONST)
4622 return orig_x;
7b0f476d 4623
8390b335
AS
4624 unspec = XEXP (addr.offset, 0);
4625 if (GET_CODE (unspec) == PLUS && CONST_INT_P (XEXP (unspec, 1)))
4626 unspec = XEXP (unspec, 0);
4627 if (GET_CODE (unspec) != UNSPEC
4628 || (XINT (unspec, 1) != UNSPEC_RELOC16
4629 && XINT (unspec, 1) != UNSPEC_RELOC32))
4630 return orig_x;
4631 x = XVECEXP (unspec, 0, 0);
92cf7399 4632 gcc_assert (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF);
8390b335
AS
4633 if (unspec != XEXP (addr.offset, 0))
4634 x = gen_rtx_PLUS (Pmode, x, XEXP (XEXP (addr.offset, 0), 1));
4635 if (addr.index)
7b0f476d 4636 {
8390b335
AS
4637 rtx idx = addr.index;
4638 if (addr.scale != 1)
4639 idx = gen_rtx_MULT (Pmode, idx, GEN_INT (addr.scale));
4640 x = gen_rtx_PLUS (Pmode, idx, x);
7b0f476d 4641 }
8390b335
AS
4642 if (addr.base)
4643 x = gen_rtx_PLUS (Pmode, addr.base, x);
4644 if (MEM_P (orig_x))
4645 x = replace_equiv_address_nv (orig_x, x);
4646 return x;
7b0f476d
AS
4647}
4648
79e68feb
RS
4649\f
4650/* A C compound statement to output to stdio stream STREAM the
4651 assembler syntax for an instruction operand that is a memory
4652 reference whose address is ADDR. ADDR is an RTL expression.
4653
4654 Note that this contains a kludge that knows that the only reason
4655 we have an address (plus (label_ref...) (reg...)) when not generating
4656 PIC code is in the insn before a tablejump, and we know that m68k.md
4657 generates a label LInnn: on such an insn.
4658
4659 It is possible for PIC to generate a (plus (label_ref...) (reg...))
4660 and we handle that just like we would a (plus (symbol_ref...) (reg...)).
4661
79e68feb
RS
4662 This routine is responsible for distinguishing between -fpic and -fPIC
4663 style relocations in an address. When generating -fpic code the
112cdef5
KH
4664 offset is output in word mode (e.g. movel a5@(_foo:w), a0). When generating
4665 -fPIC code the offset is output in long mode (e.g. movel a5@(_foo:l), a0) */
79e68feb
RS
4666
4667void
8a4a2253 4668print_operand_address (FILE *file, rtx addr)
79e68feb 4669{
fc2241eb
RS
4670 struct m68k_address address;
4671
4672 if (!m68k_decompose_address (QImode, addr, true, &address))
4673 gcc_unreachable ();
4674
4675 if (address.code == PRE_DEC)
4b3d1177
KH
4676 fprintf (file, MOTOROLA ? "-(%s)" : "%s@-",
4677 M68K_REGNAME (REGNO (address.base)));
fc2241eb 4678 else if (address.code == POST_INC)
4b3d1177
KH
4679 fprintf (file, MOTOROLA ? "(%s)+" : "%s@+",
4680 M68K_REGNAME (REGNO (address.base)));
fc2241eb
RS
4681 else if (!address.base && !address.index)
4682 {
4683 /* A constant address. */
4684 gcc_assert (address.offset == addr);
4685 if (GET_CODE (addr) == CONST_INT)
4686 {
4687 /* (xxx).w or (xxx).l. */
4688 if (IN_RANGE (INTVAL (addr), -0x8000, 0x7fff))
4b3d1177 4689 fprintf (file, MOTOROLA ? "%d.w" : "%d:w", (int) INTVAL (addr));
a0a7fbc9 4690 else
fc2241eb 4691 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (addr));
a0a7fbc9 4692 }
fc2241eb 4693 else if (TARGET_PCREL)
a0a7fbc9 4694 {
fc2241eb
RS
4695 /* (d16,PC) or (bd,PC,Xn) (with suppressed index register). */
4696 fputc ('(', file);
4697 output_addr_const (file, addr);
4698 asm_fprintf (file, flag_pic == 1 ? ":w,%Rpc)" : ":l,%Rpc)");
a0a7fbc9 4699 }
fc2241eb 4700 else
a0a7fbc9 4701 {
fc2241eb
RS
4702 /* (xxx).l. We need a special case for SYMBOL_REF if the symbol
4703 name ends in `.<letter>', as the last 2 characters can be
4704 mistaken as a size suffix. Put the name in parentheses. */
4705 if (GET_CODE (addr) == SYMBOL_REF
4706 && strlen (XSTR (addr, 0)) > 2
4707 && XSTR (addr, 0)[strlen (XSTR (addr, 0)) - 2] == '.')
a0a7fbc9 4708 {
fc2241eb
RS
4709 putc ('(', file);
4710 output_addr_const (file, addr);
4711 putc (')', file);
a0a7fbc9
AS
4712 }
4713 else
fc2241eb 4714 output_addr_const (file, addr);
a0a7fbc9 4715 }
fc2241eb
RS
4716 }
4717 else
4718 {
4719 int labelno;
4720
4721 /* If ADDR is a (d8,pc,Xn) address, this is the number of the
44c7bd63 4722 label being accessed, otherwise it is -1. */
fc2241eb
RS
4723 labelno = (address.offset
4724 && !address.base
4725 && GET_CODE (address.offset) == LABEL_REF
4726 ? CODE_LABEL_NUMBER (XEXP (address.offset, 0))
4727 : -1);
4728 if (MOTOROLA)
a0a7fbc9 4729 {
fc2241eb
RS
4730 /* Print the "offset(base" component. */
4731 if (labelno >= 0)
e59d83aa 4732 asm_fprintf (file, "%LL%d(%Rpc,", labelno);
fc2241eb 4733 else
a0a7fbc9 4734 {
fc2241eb 4735 if (address.offset)
75df395f
MK
4736 output_addr_const (file, address.offset);
4737
fc2241eb
RS
4738 putc ('(', file);
4739 if (address.base)
4740 fputs (M68K_REGNAME (REGNO (address.base)), file);
a0a7fbc9 4741 }
fc2241eb
RS
4742 /* Print the ",index" component, if any. */
4743 if (address.index)
a0a7fbc9 4744 {
fc2241eb
RS
4745 if (address.base)
4746 putc (',', file);
4747 fprintf (file, "%s.%c",
4748 M68K_REGNAME (REGNO (address.index)),
4749 GET_MODE (address.index) == HImode ? 'w' : 'l');
4750 if (address.scale != 1)
4751 fprintf (file, "*%d", address.scale);
a0a7fbc9 4752 }
a0a7fbc9 4753 putc (')', file);
a0a7fbc9 4754 }
fc2241eb 4755 else /* !MOTOROLA */
a0a7fbc9 4756 {
fc2241eb
RS
4757 if (!address.offset && !address.index)
4758 fprintf (file, "%s@", M68K_REGNAME (REGNO (address.base)));
a0a7fbc9 4759 else
a0a7fbc9 4760 {
fc2241eb
RS
4761 /* Print the "base@(offset" component. */
4762 if (labelno >= 0)
e59d83aa 4763 asm_fprintf (file, "%Rpc@(%LL%d", labelno);
fc2241eb
RS
4764 else
4765 {
4766 if (address.base)
4767 fputs (M68K_REGNAME (REGNO (address.base)), file);
4768 fprintf (file, "@(");
4769 if (address.offset)
75df395f 4770 output_addr_const (file, address.offset);
fc2241eb
RS
4771 }
4772 /* Print the ",index" component, if any. */
4773 if (address.index)
4774 {
4775 fprintf (file, ",%s:%c",
4776 M68K_REGNAME (REGNO (address.index)),
4777 GET_MODE (address.index) == HImode ? 'w' : 'l');
4778 if (address.scale != 1)
4779 fprintf (file, ":%d", address.scale);
4780 }
a0a7fbc9
AS
4781 putc (')', file);
4782 }
a0a7fbc9 4783 }
79e68feb
RS
4784 }
4785}
af13f02d
JW
4786\f
4787/* Check for cases where a clr insns can be omitted from code using
4788 strict_low_part sets. For example, the second clrl here is not needed:
4789 clrl d0; movw a0@+,d0; use d0; clrl d0; movw a0@+; use d0; ...
4790
4791 MODE is the mode of this STRICT_LOW_PART set. FIRST_INSN is the clear
4792 insn we are checking for redundancy. TARGET is the register set by the
4793 clear insn. */
4794
8a4a2253
BI
4795bool
4796strict_low_part_peephole_ok (enum machine_mode mode, rtx first_insn,
4797 rtx target)
af13f02d 4798{
39250081 4799 rtx p = first_insn;
af13f02d 4800
39250081 4801 while ((p = PREV_INSN (p)))
af13f02d 4802 {
39250081
RZ
4803 if (NOTE_INSN_BASIC_BLOCK_P (p))
4804 return false;
4805
4806 if (NOTE_P (p))
4807 continue;
4808
af13f02d 4809 /* If it isn't an insn, then give up. */
39250081 4810 if (!INSN_P (p))
8a4a2253 4811 return false;
af13f02d
JW
4812
4813 if (reg_set_p (target, p))
4814 {
4815 rtx set = single_set (p);
4816 rtx dest;
4817
4818 /* If it isn't an easy to recognize insn, then give up. */
4819 if (! set)
8a4a2253 4820 return false;
af13f02d
JW
4821
4822 dest = SET_DEST (set);
4823
4824 /* If this sets the entire target register to zero, then our
4825 first_insn is redundant. */
4826 if (rtx_equal_p (dest, target)
4827 && SET_SRC (set) == const0_rtx)
8a4a2253 4828 return true;
af13f02d
JW
4829 else if (GET_CODE (dest) == STRICT_LOW_PART
4830 && GET_CODE (XEXP (dest, 0)) == REG
4831 && REGNO (XEXP (dest, 0)) == REGNO (target)
4832 && (GET_MODE_SIZE (GET_MODE (XEXP (dest, 0)))
4833 <= GET_MODE_SIZE (mode)))
4834 /* This is a strict low part set which modifies less than
4835 we are using, so it is safe. */
4836 ;
4837 else
8a4a2253 4838 return false;
af13f02d 4839 }
af13f02d
JW
4840 }
4841
8a4a2253 4842 return false;
af13f02d 4843}
67cd4f83 4844
2c8ec431
DL
4845/* Operand predicates for implementing asymmetric pc-relative addressing
4846 on m68k. The m68k supports pc-relative addressing (mode 7, register 2)
dab66575 4847 when used as a source operand, but not as a destination operand.
2c8ec431
DL
4848
4849 We model this by restricting the meaning of the basic predicates
4850 (general_operand, memory_operand, etc) to forbid the use of this
4851 addressing mode, and then define the following predicates that permit
4852 this addressing mode. These predicates can then be used for the
4853 source operands of the appropriate instructions.
4854
4855 n.b. While it is theoretically possible to change all machine patterns
4856 to use this addressing more where permitted by the architecture,
4857 it has only been implemented for "common" cases: SImode, HImode, and
4858 QImode operands, and only for the principle operations that would
4859 require this addressing mode: data movement and simple integer operations.
4860
4861 In parallel with these new predicates, two new constraint letters
4862 were defined: 'S' and 'T'. 'S' is the -mpcrel analog of 'm'.
4863 'T' replaces 's' in the non-pcrel case. It is a no-op in the pcrel case.
4864 In the pcrel case 's' is only valid in combination with 'a' registers.
4865 See addsi3, subsi3, cmpsi, and movsi patterns for a better understanding
4866 of how these constraints are used.
4867
4868 The use of these predicates is strictly optional, though patterns that
4869 don't will cause an extra reload register to be allocated where one
4870 was not necessary:
4871
4872 lea (abc:w,%pc),%a0 ; need to reload address
4873 moveq &1,%d1 ; since write to pc-relative space
4874 movel %d1,%a0@ ; is not allowed
4875 ...
4876 lea (abc:w,%pc),%a1 ; no need to reload address here
4877 movel %a1@,%d0 ; since "movel (abc:w,%pc),%d0" is ok
4878
4879 For more info, consult tiemann@cygnus.com.
4880
4881
4882 All of the ugliness with predicates and constraints is due to the
4883 simple fact that the m68k does not allow a pc-relative addressing
4884 mode as a destination. gcc does not distinguish between source and
4885 destination addresses. Hence, if we claim that pc-relative address
331d9186 4886 modes are valid, e.g. TARGET_LEGITIMATE_ADDRESS_P accepts them, then we
2c8ec431
DL
4887 end up with invalid code. To get around this problem, we left
4888 pc-relative modes as invalid addresses, and then added special
4889 predicates and constraints to accept them.
4890
4891 A cleaner way to handle this is to modify gcc to distinguish
4892 between source and destination addresses. We can then say that
4893 pc-relative is a valid source address but not a valid destination
4894 address, and hopefully avoid a lot of the predicate and constraint
4895 hackery. Unfortunately, this would be a pretty big change. It would
4896 be a useful change for a number of ports, but there aren't any current
4897 plans to undertake this.
4898
4899 ***************************************************************************/
4900
4901
5505f548 4902const char *
8a4a2253 4903output_andsi3 (rtx *operands)
29ae8a3c
RK
4904{
4905 int logval;
4906 if (GET_CODE (operands[2]) == CONST_INT
25c99d8f 4907 && (INTVAL (operands[2]) | 0xffff) == -1
29ae8a3c
RK
4908 && (DATA_REG_P (operands[0])
4909 || offsettable_memref_p (operands[0]))
9425fb04 4910 && !TARGET_COLDFIRE)
29ae8a3c
RK
4911 {
4912 if (GET_CODE (operands[0]) != REG)
b72f00af 4913 operands[0] = adjust_address (operands[0], HImode, 2);
1d8eaa6b 4914 operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffff);
29ae8a3c
RK
4915 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4916 CC_STATUS_INIT;
4917 if (operands[2] == const0_rtx)
4918 return "clr%.w %0";
4919 return "and%.w %2,%0";
4920 }
4921 if (GET_CODE (operands[2]) == CONST_INT
c4406f74 4922 && (logval = exact_log2 (~ INTVAL (operands[2]) & 0xffffffff)) >= 0
29ae8a3c
RK
4923 && (DATA_REG_P (operands[0])
4924 || offsettable_memref_p (operands[0])))
4925 {
4926 if (DATA_REG_P (operands[0]))
a0a7fbc9 4927 operands[1] = GEN_INT (logval);
29ae8a3c
RK
4928 else
4929 {
b72f00af 4930 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
1d8eaa6b 4931 operands[1] = GEN_INT (logval % 8);
29ae8a3c
RK
4932 }
4933 /* This does not set condition codes in a standard way. */
4934 CC_STATUS_INIT;
4935 return "bclr %1,%0";
4936 }
4937 return "and%.l %2,%0";
4938}
4939
5505f548 4940const char *
8a4a2253 4941output_iorsi3 (rtx *operands)
29ae8a3c
RK
4942{
4943 register int logval;
4944 if (GET_CODE (operands[2]) == CONST_INT
4945 && INTVAL (operands[2]) >> 16 == 0
4946 && (DATA_REG_P (operands[0])
4947 || offsettable_memref_p (operands[0]))
9425fb04 4948 && !TARGET_COLDFIRE)
29ae8a3c
RK
4949 {
4950 if (GET_CODE (operands[0]) != REG)
b72f00af 4951 operands[0] = adjust_address (operands[0], HImode, 2);
29ae8a3c
RK
4952 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4953 CC_STATUS_INIT;
4954 if (INTVAL (operands[2]) == 0xffff)
4955 return "mov%.w %2,%0";
4956 return "or%.w %2,%0";
4957 }
4958 if (GET_CODE (operands[2]) == CONST_INT
c4406f74 4959 && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
29ae8a3c
RK
4960 && (DATA_REG_P (operands[0])
4961 || offsettable_memref_p (operands[0])))
4962 {
4963 if (DATA_REG_P (operands[0]))
b72f00af 4964 operands[1] = GEN_INT (logval);
29ae8a3c
RK
4965 else
4966 {
b72f00af 4967 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
1d8eaa6b 4968 operands[1] = GEN_INT (logval % 8);
29ae8a3c
RK
4969 }
4970 CC_STATUS_INIT;
4971 return "bset %1,%0";
4972 }
4973 return "or%.l %2,%0";
4974}
4975
5505f548 4976const char *
8a4a2253 4977output_xorsi3 (rtx *operands)
29ae8a3c
RK
4978{
4979 register int logval;
4980 if (GET_CODE (operands[2]) == CONST_INT
4981 && INTVAL (operands[2]) >> 16 == 0
4982 && (offsettable_memref_p (operands[0]) || DATA_REG_P (operands[0]))
9425fb04 4983 && !TARGET_COLDFIRE)
29ae8a3c
RK
4984 {
4985 if (! DATA_REG_P (operands[0]))
b72f00af 4986 operands[0] = adjust_address (operands[0], HImode, 2);
29ae8a3c
RK
4987 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4988 CC_STATUS_INIT;
4989 if (INTVAL (operands[2]) == 0xffff)
4990 return "not%.w %0";
4991 return "eor%.w %2,%0";
4992 }
4993 if (GET_CODE (operands[2]) == CONST_INT
c4406f74 4994 && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
29ae8a3c
RK
4995 && (DATA_REG_P (operands[0])
4996 || offsettable_memref_p (operands[0])))
4997 {
4998 if (DATA_REG_P (operands[0]))
b72f00af 4999 operands[1] = GEN_INT (logval);
29ae8a3c
RK
5000 else
5001 {
b72f00af 5002 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
1d8eaa6b 5003 operands[1] = GEN_INT (logval % 8);
29ae8a3c
RK
5004 }
5005 CC_STATUS_INIT;
5006 return "bchg %1,%0";
5007 }
5008 return "eor%.l %2,%0";
5009}
7c262518 5010
29ca003a
RS
5011/* Return the instruction that should be used for a call to address X,
5012 which is known to be in operand 0. */
5013
5014const char *
5015output_call (rtx x)
5016{
5017 if (symbolic_operand (x, VOIDmode))
5018 return m68k_symbolic_call;
5019 else
5020 return "jsr %a0";
5021}
5022
f7e70894
RS
5023/* Likewise sibling calls. */
5024
5025const char *
5026output_sibcall (rtx x)
5027{
5028 if (symbolic_operand (x, VOIDmode))
5029 return m68k_symbolic_jump;
5030 else
5031 return "jmp %a0";
5032}
5033
c590b625 5034static void
8a4a2253 5035m68k_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
4ab870f5 5036 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8a4a2253 5037 tree function)
483ab821 5038{
e0601576
RH
5039 rtx this_slot, offset, addr, mem, insn, tmp;
5040
5041 /* Avoid clobbering the struct value reg by using the
5042 static chain reg as a temporary. */
5043 tmp = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
4ab870f5
RS
5044
5045 /* Pretend to be a post-reload pass while generating rtl. */
4ab870f5 5046 reload_completed = 1;
4ab870f5
RS
5047
5048 /* The "this" pointer is stored at 4(%sp). */
0a81f074
RS
5049 this_slot = gen_rtx_MEM (Pmode, plus_constant (Pmode,
5050 stack_pointer_rtx, 4));
4ab870f5
RS
5051
5052 /* Add DELTA to THIS. */
5053 if (delta != 0)
5050d266 5054 {
4ab870f5
RS
5055 /* Make the offset a legitimate operand for memory addition. */
5056 offset = GEN_INT (delta);
5057 if ((delta < -8 || delta > 8)
5058 && (TARGET_COLDFIRE || USE_MOVQ (delta)))
5059 {
5060 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), offset);
5061 offset = gen_rtx_REG (Pmode, D0_REG);
5062 }
5063 emit_insn (gen_add3_insn (copy_rtx (this_slot),
5064 copy_rtx (this_slot), offset));
5050d266 5065 }
c590b625 5066
4ab870f5
RS
5067 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
5068 if (vcall_offset != 0)
5069 {
5070 /* Set the static chain register to *THIS. */
e0601576
RH
5071 emit_move_insn (tmp, this_slot);
5072 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
4ab870f5
RS
5073
5074 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
0a81f074 5075 addr = plus_constant (Pmode, tmp, vcall_offset);
4ab870f5
RS
5076 if (!m68k_legitimate_address_p (Pmode, addr, true))
5077 {
e0601576
RH
5078 emit_insn (gen_rtx_SET (VOIDmode, tmp, addr));
5079 addr = tmp;
4ab870f5 5080 }
c590b625 5081
4ab870f5
RS
5082 /* Load the offset into %d0 and add it to THIS. */
5083 emit_move_insn (gen_rtx_REG (Pmode, D0_REG),
5084 gen_rtx_MEM (Pmode, addr));
5085 emit_insn (gen_add3_insn (copy_rtx (this_slot),
5086 copy_rtx (this_slot),
5087 gen_rtx_REG (Pmode, D0_REG)));
5088 }
29ca003a 5089
4ab870f5
RS
5090 /* Jump to the target function. Use a sibcall if direct jumps are
5091 allowed, otherwise load the address into a register first. */
5092 mem = DECL_RTL (function);
5093 if (!sibcall_operand (XEXP (mem, 0), VOIDmode))
5094 {
5095 gcc_assert (flag_pic);
c590b625 5096
4ab870f5
RS
5097 if (!TARGET_SEP_DATA)
5098 {
5099 /* Use the static chain register as a temporary (call-clobbered)
5100 GOT pointer for this function. We can use the static chain
5101 register because it isn't live on entry to the thunk. */
6fb5fa3c 5102 SET_REGNO (pic_offset_table_rtx, STATIC_CHAIN_REGNUM);
4ab870f5
RS
5103 emit_insn (gen_load_got (pic_offset_table_rtx));
5104 }
e0601576
RH
5105 legitimize_pic_address (XEXP (mem, 0), Pmode, tmp);
5106 mem = replace_equiv_address (mem, tmp);
4ab870f5
RS
5107 }
5108 insn = emit_call_insn (gen_sibcall (mem, const0_rtx));
5109 SIBLING_CALL_P (insn) = 1;
5110
5111 /* Run just enough of rest_of_compilation. */
5112 insn = get_insns ();
5113 split_all_insns_noflow ();
5114 final_start_function (insn, file, 1);
5115 final (insn, file, 1);
5116 final_end_function ();
5117
5118 /* Clean up the vars set above. */
5119 reload_completed = 0;
4ab870f5
RS
5120
5121 /* Restore the original PIC register. */
5122 if (flag_pic)
6fb5fa3c 5123 SET_REGNO (pic_offset_table_rtx, PIC_REG);
483ab821 5124}
8636be86
KH
5125
5126/* Worker function for TARGET_STRUCT_VALUE_RTX. */
5127
5128static rtx
5129m68k_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
5130 int incoming ATTRIBUTE_UNUSED)
5131{
5132 return gen_rtx_REG (Pmode, M68K_STRUCT_VALUE_REGNUM);
5133}
cfca21cb
PB
5134
5135/* Return nonzero if register old_reg can be renamed to register new_reg. */
5136int
5137m68k_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
5138 unsigned int new_reg)
5139{
5140
5141 /* Interrupt functions can only use registers that have already been
5142 saved by the prologue, even if they would normally be
5143 call-clobbered. */
5144
a4242737
KH
5145 if ((m68k_get_function_kind (current_function_decl)
5146 == m68k_fk_interrupt_handler)
6fb5fa3c 5147 && !df_regs_ever_live_p (new_reg))
cfca21cb
PB
5148 return 0;
5149
5150 return 1;
5151}
70028b61 5152
ffa2596e
RS
5153/* Value is true if hard register REGNO can hold a value of machine-mode
5154 MODE. On the 68000, we let the cpu registers can hold any mode, but
5155 restrict the 68881 registers to floating-point modes. */
5156
70028b61
PB
5157bool
5158m68k_regno_mode_ok (int regno, enum machine_mode mode)
5159{
36e04090 5160 if (DATA_REGNO_P (regno))
70028b61 5161 {
a0a7fbc9
AS
5162 /* Data Registers, can hold aggregate if fits in. */
5163 if (regno + GET_MODE_SIZE (mode) / 4 <= 8)
5164 return true;
70028b61 5165 }
36e04090 5166 else if (ADDRESS_REGNO_P (regno))
70028b61 5167 {
a0a7fbc9
AS
5168 if (regno + GET_MODE_SIZE (mode) / 4 <= 16)
5169 return true;
70028b61 5170 }
36e04090 5171 else if (FP_REGNO_P (regno))
70028b61
PB
5172 {
5173 /* FPU registers, hold float or complex float of long double or
a0a7fbc9
AS
5174 smaller. */
5175 if ((GET_MODE_CLASS (mode) == MODE_FLOAT
5176 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
dcc21c4c 5177 && GET_MODE_UNIT_SIZE (mode) <= TARGET_FP_REG_SIZE)
a0a7fbc9 5178 return true;
70028b61
PB
5179 }
5180 return false;
5181}
dcc21c4c 5182
ffa2596e
RS
5183/* Implement SECONDARY_RELOAD_CLASS. */
5184
5185enum reg_class
5186m68k_secondary_reload_class (enum reg_class rclass,
5187 enum machine_mode mode, rtx x)
5188{
5189 int regno;
5190
5191 regno = true_regnum (x);
5192
5193 /* If one operand of a movqi is an address register, the other
5194 operand must be a general register or constant. Other types
5195 of operand must be reloaded through a data register. */
5196 if (GET_MODE_SIZE (mode) == 1
5197 && reg_classes_intersect_p (rclass, ADDR_REGS)
5198 && !(INT_REGNO_P (regno) || CONSTANT_P (x)))
5199 return DATA_REGS;
5200
5201 /* PC-relative addresses must be loaded into an address register first. */
5202 if (TARGET_PCREL
5203 && !reg_class_subset_p (rclass, ADDR_REGS)
5204 && symbolic_operand (x, VOIDmode))
5205 return ADDR_REGS;
5206
5207 return NO_REGS;
5208}
5209
5210/* Implement PREFERRED_RELOAD_CLASS. */
5211
5212enum reg_class
5213m68k_preferred_reload_class (rtx x, enum reg_class rclass)
5214{
5215 enum reg_class secondary_class;
5216
5217 /* If RCLASS might need a secondary reload, try restricting it to
5218 a class that doesn't. */
5219 secondary_class = m68k_secondary_reload_class (rclass, GET_MODE (x), x);
5220 if (secondary_class != NO_REGS
5221 && reg_class_subset_p (secondary_class, rclass))
5222 return secondary_class;
5223
5224 /* Prefer to use moveq for in-range constants. */
5225 if (GET_CODE (x) == CONST_INT
5226 && reg_class_subset_p (DATA_REGS, rclass)
5227 && IN_RANGE (INTVAL (x), -0x80, 0x7f))
5228 return DATA_REGS;
5229
5230 /* ??? Do we really need this now? */
5231 if (GET_CODE (x) == CONST_DOUBLE
5232 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
5233 {
5234 if (TARGET_HARD_FLOAT && reg_class_subset_p (FP_REGS, rclass))
5235 return FP_REGS;
5236
5237 return NO_REGS;
5238 }
5239
5240 return rclass;
5241}
5242
dcc21c4c
PB
5243/* Return floating point values in a 68881 register. This makes 68881 code
5244 a little bit faster. It also makes -msoft-float code incompatible with
5245 hard-float code, so people have to be careful not to mix the two.
c0220ea4 5246 For ColdFire it was decided the ABI incompatibility is undesirable.
dcc21c4c
PB
5247 If there is need for a hard-float ABI it is probably worth doing it
5248 properly and also passing function arguments in FP registers. */
5249rtx
5250m68k_libcall_value (enum machine_mode mode)
5251{
5252 switch (mode) {
5253 case SFmode:
5254 case DFmode:
5255 case XFmode:
5256 if (TARGET_68881)
8d989403 5257 return gen_rtx_REG (mode, FP0_REG);
dcc21c4c
PB
5258 break;
5259 default:
5260 break;
5261 }
75df395f
MK
5262
5263 return gen_rtx_REG (mode, m68k_libcall_value_in_a0_p ? A0_REG : D0_REG);
dcc21c4c
PB
5264}
5265
db5e2d51
MK
5266/* Location in which function value is returned.
5267 NOTE: Due to differences in ABIs, don't call this function directly,
5268 use FUNCTION_VALUE instead. */
dcc21c4c 5269rtx
586de218 5270m68k_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
dcc21c4c
PB
5271{
5272 enum machine_mode mode;
5273
5274 mode = TYPE_MODE (valtype);
5275 switch (mode) {
5276 case SFmode:
5277 case DFmode:
5278 case XFmode:
5279 if (TARGET_68881)
8d989403 5280 return gen_rtx_REG (mode, FP0_REG);
dcc21c4c
PB
5281 break;
5282 default:
5283 break;
5284 }
5285
576c9028
KH
5286 /* If the function returns a pointer, push that into %a0. */
5287 if (func && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (func))))
5288 /* For compatibility with the large body of existing code which
5289 does not always properly declare external functions returning
5290 pointer types, the m68k/SVR4 convention is to copy the value
5291 returned for pointer functions from a0 to d0 in the function
5292 epilogue, so that callers that have neglected to properly
5293 declare the callee can still find the correct return value in
5294 d0. */
5295 return gen_rtx_PARALLEL
5296 (mode,
5297 gen_rtvec (2,
5298 gen_rtx_EXPR_LIST (VOIDmode,
5299 gen_rtx_REG (mode, A0_REG),
5300 const0_rtx),
5301 gen_rtx_EXPR_LIST (VOIDmode,
5302 gen_rtx_REG (mode, D0_REG),
5303 const0_rtx)));
5304 else if (POINTER_TYPE_P (valtype))
5305 return gen_rtx_REG (mode, A0_REG);
dcc21c4c 5306 else
576c9028 5307 return gen_rtx_REG (mode, D0_REG);
dcc21c4c 5308}
1c445f03
NS
5309
5310/* Worker function for TARGET_RETURN_IN_MEMORY. */
5311#if M68K_HONOR_TARGET_STRICT_ALIGNMENT
5312static bool
511e41e5 5313m68k_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
1c445f03
NS
5314{
5315 enum machine_mode mode = TYPE_MODE (type);
5316
5317 if (mode == BLKmode)
5318 return true;
5319
5320 /* If TYPE's known alignment is less than the alignment of MODE that
5321 would contain the structure, then return in memory. We need to
5322 do so to maintain the compatibility between code compiled with
5323 -mstrict-align and that compiled with -mno-strict-align. */
5324 if (AGGREGATE_TYPE_P (type)
5325 && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (mode))
5326 return true;
5327
5328 return false;
5329}
5330#endif
c47b0cb4
MK
5331
5332/* CPU to schedule the program for. */
5333enum attr_cpu m68k_sched_cpu;
5334
826fadba
MK
5335/* MAC to schedule the program for. */
5336enum attr_mac m68k_sched_mac;
5337
c47b0cb4
MK
5338/* Operand type. */
5339enum attr_op_type
5340 {
5341 /* No operand. */
5342 OP_TYPE_NONE,
5343
96fcacb7
MK
5344 /* Integer register. */
5345 OP_TYPE_RN,
5346
5347 /* FP register. */
5348 OP_TYPE_FPN,
c47b0cb4
MK
5349
5350 /* Implicit mem reference (e.g. stack). */
5351 OP_TYPE_MEM1,
5352
5353 /* Memory without offset or indexing. EA modes 2, 3 and 4. */
5354 OP_TYPE_MEM234,
5355
5356 /* Memory with offset but without indexing. EA mode 5. */
5357 OP_TYPE_MEM5,
5358
5359 /* Memory with indexing. EA mode 6. */
5360 OP_TYPE_MEM6,
5361
5362 /* Memory referenced by absolute address. EA mode 7. */
5363 OP_TYPE_MEM7,
5364
5365 /* Immediate operand that doesn't require extension word. */
5366 OP_TYPE_IMM_Q,
5367
5368 /* Immediate 16 bit operand. */
5369 OP_TYPE_IMM_W,
5370
5371 /* Immediate 32 bit operand. */
5372 OP_TYPE_IMM_L
5373 };
5374
c47b0cb4
MK
5375/* Return type of memory ADDR_RTX refers to. */
5376static enum attr_op_type
5377sched_address_type (enum machine_mode mode, rtx addr_rtx)
5378{
5379 struct m68k_address address;
5380
96fcacb7
MK
5381 if (symbolic_operand (addr_rtx, VOIDmode))
5382 return OP_TYPE_MEM7;
5383
c47b0cb4
MK
5384 if (!m68k_decompose_address (mode, addr_rtx,
5385 reload_completed, &address))
5386 {
96fcacb7 5387 gcc_assert (!reload_completed);
c47b0cb4
MK
5388 /* Reload will likely fix the address to be in the register. */
5389 return OP_TYPE_MEM234;
5390 }
5391
5392 if (address.scale != 0)
5393 return OP_TYPE_MEM6;
5394
5395 if (address.base != NULL_RTX)
5396 {
5397 if (address.offset == NULL_RTX)
5398 return OP_TYPE_MEM234;
5399
5400 return OP_TYPE_MEM5;
5401 }
5402
5403 gcc_assert (address.offset != NULL_RTX);
5404
5405 return OP_TYPE_MEM7;
5406}
5407
96fcacb7
MK
5408/* Return X or Y (depending on OPX_P) operand of INSN. */
5409static rtx
5410sched_get_operand (rtx insn, bool opx_p)
5411{
5412 int i;
5413
5414 if (recog_memoized (insn) < 0)
5415 gcc_unreachable ();
5416
5417 extract_constrain_insn_cached (insn);
5418
5419 if (opx_p)
5420 i = get_attr_opx (insn);
5421 else
5422 i = get_attr_opy (insn);
5423
5424 if (i >= recog_data.n_operands)
5425 return NULL;
5426
5427 return recog_data.operand[i];
5428}
5429
5430/* Return type of INSN's operand X (if OPX_P) or operand Y (if !OPX_P).
5431 If ADDRESS_P is true, return type of memory location operand refers to. */
c47b0cb4 5432static enum attr_op_type
96fcacb7 5433sched_attr_op_type (rtx insn, bool opx_p, bool address_p)
c47b0cb4 5434{
96fcacb7
MK
5435 rtx op;
5436
5437 op = sched_get_operand (insn, opx_p);
5438
5439 if (op == NULL)
5440 {
5441 gcc_assert (!reload_completed);
5442 return OP_TYPE_RN;
5443 }
c47b0cb4
MK
5444
5445 if (address_p)
5446 return sched_address_type (QImode, op);
5447
5448 if (memory_operand (op, VOIDmode))
5449 return sched_address_type (GET_MODE (op), XEXP (op, 0));
5450
5451 if (register_operand (op, VOIDmode))
96fcacb7
MK
5452 {
5453 if ((!reload_completed && FLOAT_MODE_P (GET_MODE (op)))
5454 || (reload_completed && FP_REG_P (op)))
5455 return OP_TYPE_FPN;
5456
5457 return OP_TYPE_RN;
5458 }
c47b0cb4
MK
5459
5460 if (GET_CODE (op) == CONST_INT)
5461 {
96fcacb7
MK
5462 int ival;
5463
5464 ival = INTVAL (op);
5465
5466 /* Check for quick constants. */
5467 switch (get_attr_type (insn))
5468 {
5469 case TYPE_ALUQ_L:
5470 if (IN_RANGE (ival, 1, 8) || IN_RANGE (ival, -8, -1))
5471 return OP_TYPE_IMM_Q;
5472
5473 gcc_assert (!reload_completed);
5474 break;
5475
5476 case TYPE_MOVEQ_L:
5477 if (USE_MOVQ (ival))
5478 return OP_TYPE_IMM_Q;
5479
5480 gcc_assert (!reload_completed);
5481 break;
5482
5483 case TYPE_MOV3Q_L:
5484 if (valid_mov3q_const (ival))
5485 return OP_TYPE_IMM_Q;
5486
5487 gcc_assert (!reload_completed);
5488 break;
5489
5490 default:
5491 break;
5492 }
5493
5494 if (IN_RANGE (ival, -0x8000, 0x7fff))
c47b0cb4
MK
5495 return OP_TYPE_IMM_W;
5496
5497 return OP_TYPE_IMM_L;
5498 }
5499
5500 if (GET_CODE (op) == CONST_DOUBLE)
5501 {
5502 switch (GET_MODE (op))
5503 {
5504 case SFmode:
5505 return OP_TYPE_IMM_W;
5506
5507 case VOIDmode:
5508 case DFmode:
5509 return OP_TYPE_IMM_L;
5510
5511 default:
5512 gcc_unreachable ();
5513 }
5514 }
5515
00b2ef14
MK
5516 if (GET_CODE (op) == CONST
5517 || symbolic_operand (op, VOIDmode)
c47b0cb4
MK
5518 || LABEL_P (op))
5519 {
5520 switch (GET_MODE (op))
5521 {
5522 case QImode:
5523 return OP_TYPE_IMM_Q;
5524
5525 case HImode:
5526 return OP_TYPE_IMM_W;
5527
5528 case SImode:
5529 return OP_TYPE_IMM_L;
5530
5531 default:
75df395f
MK
5532 if (symbolic_operand (m68k_unwrap_symbol (op, false), VOIDmode))
5533 /* Just a guess. */
c47b0cb4
MK
5534 return OP_TYPE_IMM_W;
5535
5536 return OP_TYPE_IMM_L;
5537 }
5538 }
5539
96fcacb7 5540 gcc_assert (!reload_completed);
c47b0cb4 5541
96fcacb7
MK
5542 if (FLOAT_MODE_P (GET_MODE (op)))
5543 return OP_TYPE_FPN;
c47b0cb4 5544
96fcacb7 5545 return OP_TYPE_RN;
c47b0cb4
MK
5546}
5547
5548/* Implement opx_type attribute.
5549 Return type of INSN's operand X.
5550 If ADDRESS_P is true, return type of memory location operand refers to. */
5551enum attr_opx_type
5552m68k_sched_attr_opx_type (rtx insn, int address_p)
5553{
c47b0cb4
MK
5554 switch (sched_attr_op_type (insn, true, address_p != 0))
5555 {
96fcacb7
MK
5556 case OP_TYPE_RN:
5557 return OPX_TYPE_RN;
5558
5559 case OP_TYPE_FPN:
5560 return OPX_TYPE_FPN;
c47b0cb4
MK
5561
5562 case OP_TYPE_MEM1:
5563 return OPX_TYPE_MEM1;
5564
5565 case OP_TYPE_MEM234:
5566 return OPX_TYPE_MEM234;
5567
5568 case OP_TYPE_MEM5:
5569 return OPX_TYPE_MEM5;
5570
5571 case OP_TYPE_MEM6:
5572 return OPX_TYPE_MEM6;
5573
5574 case OP_TYPE_MEM7:
5575 return OPX_TYPE_MEM7;
5576
5577 case OP_TYPE_IMM_Q:
5578 return OPX_TYPE_IMM_Q;
5579
5580 case OP_TYPE_IMM_W:
5581 return OPX_TYPE_IMM_W;
5582
5583 case OP_TYPE_IMM_L:
5584 return OPX_TYPE_IMM_L;
5585
5586 default:
5587 gcc_unreachable ();
c47b0cb4
MK
5588 }
5589}
5590
5591/* Implement opy_type attribute.
5592 Return type of INSN's operand Y.
5593 If ADDRESS_P is true, return type of memory location operand refers to. */
5594enum attr_opy_type
5595m68k_sched_attr_opy_type (rtx insn, int address_p)
5596{
c47b0cb4
MK
5597 switch (sched_attr_op_type (insn, false, address_p != 0))
5598 {
96fcacb7
MK
5599 case OP_TYPE_RN:
5600 return OPY_TYPE_RN;
5601
5602 case OP_TYPE_FPN:
5603 return OPY_TYPE_FPN;
c47b0cb4
MK
5604
5605 case OP_TYPE_MEM1:
5606 return OPY_TYPE_MEM1;
5607
5608 case OP_TYPE_MEM234:
5609 return OPY_TYPE_MEM234;
5610
5611 case OP_TYPE_MEM5:
5612 return OPY_TYPE_MEM5;
5613
5614 case OP_TYPE_MEM6:
5615 return OPY_TYPE_MEM6;
5616
5617 case OP_TYPE_MEM7:
5618 return OPY_TYPE_MEM7;
5619
5620 case OP_TYPE_IMM_Q:
5621 return OPY_TYPE_IMM_Q;
5622
5623 case OP_TYPE_IMM_W:
5624 return OPY_TYPE_IMM_W;
5625
5626 case OP_TYPE_IMM_L:
5627 return OPY_TYPE_IMM_L;
5628
5629 default:
5630 gcc_unreachable ();
c47b0cb4
MK
5631 }
5632}
5633
96fcacb7
MK
5634/* Return size of INSN as int. */
5635static int
5636sched_get_attr_size_int (rtx insn)
c47b0cb4
MK
5637{
5638 int size;
5639
96fcacb7 5640 switch (get_attr_type (insn))
c47b0cb4 5641 {
96fcacb7
MK
5642 case TYPE_IGNORE:
5643 /* There should be no references to m68k_sched_attr_size for 'ignore'
5644 instructions. */
5645 gcc_unreachable ();
5646 return 0;
5647
5648 case TYPE_MUL_L:
c47b0cb4
MK
5649 size = 2;
5650 break;
5651
5652 default:
5653 size = 1;
5654 break;
5655 }
5656
5657 switch (get_attr_opx_type (insn))
5658 {
5659 case OPX_TYPE_NONE:
96fcacb7
MK
5660 case OPX_TYPE_RN:
5661 case OPX_TYPE_FPN:
c47b0cb4
MK
5662 case OPX_TYPE_MEM1:
5663 case OPX_TYPE_MEM234:
5664 case OPY_TYPE_IMM_Q:
5665 break;
5666
5667 case OPX_TYPE_MEM5:
5668 case OPX_TYPE_MEM6:
5669 /* Here we assume that most absolute references are short. */
5670 case OPX_TYPE_MEM7:
5671 case OPY_TYPE_IMM_W:
5672 ++size;
5673 break;
5674
5675 case OPY_TYPE_IMM_L:
5676 size += 2;
5677 break;
5678
5679 default:
5680 gcc_unreachable ();
5681 }
5682
5683 switch (get_attr_opy_type (insn))
5684 {
5685 case OPY_TYPE_NONE:
96fcacb7
MK
5686 case OPY_TYPE_RN:
5687 case OPY_TYPE_FPN:
c47b0cb4
MK
5688 case OPY_TYPE_MEM1:
5689 case OPY_TYPE_MEM234:
5690 case OPY_TYPE_IMM_Q:
5691 break;
5692
5693 case OPY_TYPE_MEM5:
5694 case OPY_TYPE_MEM6:
5695 /* Here we assume that most absolute references are short. */
5696 case OPY_TYPE_MEM7:
5697 case OPY_TYPE_IMM_W:
5698 ++size;
5699 break;
5700
5701 case OPY_TYPE_IMM_L:
5702 size += 2;
5703 break;
5704
5705 default:
5706 gcc_unreachable ();
5707 }
5708
5709 if (size > 3)
5710 {
96fcacb7 5711 gcc_assert (!reload_completed);
c47b0cb4
MK
5712
5713 size = 3;
5714 }
5715
5716 return size;
5717}
5718
96fcacb7
MK
5719/* Return size of INSN as attribute enum value. */
5720enum attr_size
5721m68k_sched_attr_size (rtx insn)
5722{
5723 switch (sched_get_attr_size_int (insn))
5724 {
5725 case 1:
5726 return SIZE_1;
5727
5728 case 2:
5729 return SIZE_2;
5730
5731 case 3:
5732 return SIZE_3;
5733
5734 default:
5735 gcc_unreachable ();
96fcacb7
MK
5736 }
5737}
5738
5739/* Return operand X or Y (depending on OPX_P) of INSN,
5740 if it is a MEM, or NULL overwise. */
5741static enum attr_op_type
5742sched_get_opxy_mem_type (rtx insn, bool opx_p)
5743{
5744 if (opx_p)
5745 {
5746 switch (get_attr_opx_type (insn))
5747 {
5748 case OPX_TYPE_NONE:
5749 case OPX_TYPE_RN:
5750 case OPX_TYPE_FPN:
5751 case OPX_TYPE_IMM_Q:
5752 case OPX_TYPE_IMM_W:
5753 case OPX_TYPE_IMM_L:
5754 return OP_TYPE_RN;
5755
5756 case OPX_TYPE_MEM1:
5757 case OPX_TYPE_MEM234:
5758 case OPX_TYPE_MEM5:
5759 case OPX_TYPE_MEM7:
5760 return OP_TYPE_MEM1;
5761
5762 case OPX_TYPE_MEM6:
5763 return OP_TYPE_MEM6;
5764
5765 default:
5766 gcc_unreachable ();
96fcacb7
MK
5767 }
5768 }
5769 else
5770 {
5771 switch (get_attr_opy_type (insn))
5772 {
5773 case OPY_TYPE_NONE:
5774 case OPY_TYPE_RN:
5775 case OPY_TYPE_FPN:
5776 case OPY_TYPE_IMM_Q:
5777 case OPY_TYPE_IMM_W:
5778 case OPY_TYPE_IMM_L:
5779 return OP_TYPE_RN;
5780
5781 case OPY_TYPE_MEM1:
5782 case OPY_TYPE_MEM234:
5783 case OPY_TYPE_MEM5:
5784 case OPY_TYPE_MEM7:
5785 return OP_TYPE_MEM1;
5786
5787 case OPY_TYPE_MEM6:
5788 return OP_TYPE_MEM6;
5789
5790 default:
5791 gcc_unreachable ();
96fcacb7
MK
5792 }
5793 }
5794}
5795
c47b0cb4
MK
5796/* Implement op_mem attribute. */
5797enum attr_op_mem
5798m68k_sched_attr_op_mem (rtx insn)
5799{
96fcacb7
MK
5800 enum attr_op_type opx;
5801 enum attr_op_type opy;
c47b0cb4 5802
96fcacb7
MK
5803 opx = sched_get_opxy_mem_type (insn, true);
5804 opy = sched_get_opxy_mem_type (insn, false);
c47b0cb4 5805
96fcacb7 5806 if (opy == OP_TYPE_RN && opx == OP_TYPE_RN)
c47b0cb4
MK
5807 return OP_MEM_00;
5808
96fcacb7 5809 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM1)
c47b0cb4
MK
5810 {
5811 switch (get_attr_opx_access (insn))
5812 {
5813 case OPX_ACCESS_R:
5814 return OP_MEM_10;
5815
5816 case OPX_ACCESS_W:
5817 return OP_MEM_01;
5818
5819 case OPX_ACCESS_RW:
5820 return OP_MEM_11;
5821
5822 default:
96fcacb7 5823 gcc_unreachable ();
c47b0cb4
MK
5824 }
5825 }
5826
96fcacb7 5827 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM6)
c47b0cb4
MK
5828 {
5829 switch (get_attr_opx_access (insn))
5830 {
5831 case OPX_ACCESS_R:
5832 return OP_MEM_I0;
5833
5834 case OPX_ACCESS_W:
5835 return OP_MEM_0I;
5836
5837 case OPX_ACCESS_RW:
5838 return OP_MEM_I1;
5839
5840 default:
96fcacb7 5841 gcc_unreachable ();
c47b0cb4
MK
5842 }
5843 }
5844
96fcacb7 5845 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_RN)
c47b0cb4
MK
5846 return OP_MEM_10;
5847
96fcacb7 5848 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM1)
c47b0cb4
MK
5849 {
5850 switch (get_attr_opx_access (insn))
5851 {
5852 case OPX_ACCESS_W:
5853 return OP_MEM_11;
5854
5855 default:
96fcacb7
MK
5856 gcc_assert (!reload_completed);
5857 return OP_MEM_11;
c47b0cb4
MK
5858 }
5859 }
5860
96fcacb7 5861 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM6)
c47b0cb4
MK
5862 {
5863 switch (get_attr_opx_access (insn))
5864 {
5865 case OPX_ACCESS_W:
5866 return OP_MEM_1I;
5867
5868 default:
96fcacb7
MK
5869 gcc_assert (!reload_completed);
5870 return OP_MEM_1I;
c47b0cb4
MK
5871 }
5872 }
5873
96fcacb7 5874 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_RN)
c47b0cb4
MK
5875 return OP_MEM_I0;
5876
96fcacb7 5877 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM1)
c47b0cb4
MK
5878 {
5879 switch (get_attr_opx_access (insn))
5880 {
5881 case OPX_ACCESS_W:
5882 return OP_MEM_I1;
5883
5884 default:
96fcacb7
MK
5885 gcc_assert (!reload_completed);
5886 return OP_MEM_I1;
c47b0cb4
MK
5887 }
5888 }
5889
96fcacb7
MK
5890 gcc_assert (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM6);
5891 gcc_assert (!reload_completed);
5892 return OP_MEM_I1;
c47b0cb4
MK
5893}
5894
96fcacb7
MK
5895/* Data for ColdFire V4 index bypass.
5896 Producer modifies register that is used as index in consumer with
5897 specified scale. */
5898static struct
b8c96320 5899{
96fcacb7
MK
5900 /* Producer instruction. */
5901 rtx pro;
826fadba 5902
96fcacb7
MK
5903 /* Consumer instruction. */
5904 rtx con;
b8c96320 5905
96fcacb7
MK
5906 /* Scale of indexed memory access within consumer.
5907 Or zero if bypass should not be effective at the moment. */
5908 int scale;
5909} sched_cfv4_bypass_data;
b8c96320
MK
5910
5911/* An empty state that is used in m68k_sched_adjust_cost. */
5912static state_t sched_adjust_cost_state;
5913
5914/* Implement adjust_cost scheduler hook.
5915 Return adjusted COST of dependency LINK between DEF_INSN and INSN. */
5916static int
5917m68k_sched_adjust_cost (rtx insn, rtx link ATTRIBUTE_UNUSED, rtx def_insn,
5918 int cost)
5919{
5920 int delay;
5921
5922 if (recog_memoized (def_insn) < 0
5923 || recog_memoized (insn) < 0)
5924 return cost;
5925
96fcacb7
MK
5926 if (sched_cfv4_bypass_data.scale == 1)
5927 /* Handle ColdFire V4 bypass for indexed address with 1x scale. */
5928 {
5929 /* haifa-sched.c: insn_cost () calls bypass_p () just before
5930 targetm.sched.adjust_cost (). Hence, we can be relatively sure
5931 that the data in sched_cfv4_bypass_data is up to date. */
5932 gcc_assert (sched_cfv4_bypass_data.pro == def_insn
5933 && sched_cfv4_bypass_data.con == insn);
5934
5935 if (cost < 3)
5936 cost = 3;
5937
5938 sched_cfv4_bypass_data.pro = NULL;
5939 sched_cfv4_bypass_data.con = NULL;
5940 sched_cfv4_bypass_data.scale = 0;
5941 }
5942 else
5943 gcc_assert (sched_cfv4_bypass_data.pro == NULL
5944 && sched_cfv4_bypass_data.con == NULL
5945 && sched_cfv4_bypass_data.scale == 0);
5946
b8c96320
MK
5947 /* Don't try to issue INSN earlier than DFA permits.
5948 This is especially useful for instructions that write to memory,
5949 as their true dependence (default) latency is better to be set to 0
5950 to workaround alias analysis limitations.
5951 This is, in fact, a machine independent tweak, so, probably,
5952 it should be moved to haifa-sched.c: insn_cost (). */
b8c96320
MK
5953 delay = min_insn_conflict_delay (sched_adjust_cost_state, def_insn, insn);
5954 if (delay > cost)
5955 cost = delay;
5956
5957 return cost;
5958}
5959
96fcacb7
MK
5960/* Return maximal number of insns that can be scheduled on a single cycle. */
5961static int
5962m68k_sched_issue_rate (void)
5963{
5964 switch (m68k_sched_cpu)
5965 {
5966 case CPU_CFV1:
5967 case CPU_CFV2:
5968 case CPU_CFV3:
5969 return 1;
5970
5971 case CPU_CFV4:
5972 return 2;
5973
5974 default:
5975 gcc_unreachable ();
5976 return 0;
5977 }
5978}
5979
826fadba
MK
5980/* Maximal length of instruction for current CPU.
5981 E.g. it is 3 for any ColdFire core. */
5982static int max_insn_size;
5983
5984/* Data to model instruction buffer of CPU. */
5985struct _sched_ib
5986{
96fcacb7
MK
5987 /* True if instruction buffer model is modeled for current CPU. */
5988 bool enabled_p;
5989
826fadba
MK
5990 /* Size of the instruction buffer in words. */
5991 int size;
5992
5993 /* Number of filled words in the instruction buffer. */
5994 int filled;
5995
5996 /* Additional information about instruction buffer for CPUs that have
5997 a buffer of instruction records, rather then a plain buffer
5998 of instruction words. */
5999 struct _sched_ib_records
6000 {
6001 /* Size of buffer in records. */
6002 int n_insns;
b8c96320 6003
826fadba
MK
6004 /* Array to hold data on adjustements made to the size of the buffer. */
6005 int *adjust;
b8c96320 6006
826fadba
MK
6007 /* Index of the above array. */
6008 int adjust_index;
6009 } records;
6010
6011 /* An insn that reserves (marks empty) one word in the instruction buffer. */
6012 rtx insn;
6013};
6014
6015static struct _sched_ib sched_ib;
b8c96320
MK
6016
6017/* ID of memory unit. */
6018static int sched_mem_unit_code;
6019
6020/* Implementation of the targetm.sched.variable_issue () hook.
6021 It is called after INSN was issued. It returns the number of insns
6022 that can possibly get scheduled on the current cycle.
6023 It is used here to determine the effect of INSN on the instruction
6024 buffer. */
6025static int
6026m68k_sched_variable_issue (FILE *sched_dump ATTRIBUTE_UNUSED,
6027 int sched_verbose ATTRIBUTE_UNUSED,
6028 rtx insn, int can_issue_more)
6029{
6030 int insn_size;
6031
96fcacb7 6032 if (recog_memoized (insn) >= 0 && get_attr_type (insn) != TYPE_IGNORE)
b8c96320 6033 {
826fadba
MK
6034 switch (m68k_sched_cpu)
6035 {
6036 case CPU_CFV1:
6037 case CPU_CFV2:
96fcacb7 6038 insn_size = sched_get_attr_size_int (insn);
826fadba
MK
6039 break;
6040
6041 case CPU_CFV3:
96fcacb7 6042 insn_size = sched_get_attr_size_int (insn);
826fadba
MK
6043
6044 /* ColdFire V3 and V4 cores have instruction buffers that can
6045 accumulate up to 8 instructions regardless of instructions'
6046 sizes. So we should take care not to "prefetch" 24 one-word
6047 or 12 two-words instructions.
6048 To model this behavior we temporarily decrease size of the
6049 buffer by (max_insn_size - insn_size) for next 7 instructions. */
6050 {
6051 int adjust;
6052
6053 adjust = max_insn_size - insn_size;
6054 sched_ib.size -= adjust;
6055
6056 if (sched_ib.filled > sched_ib.size)
6057 sched_ib.filled = sched_ib.size;
6058
6059 sched_ib.records.adjust[sched_ib.records.adjust_index] = adjust;
6060 }
6061
6062 ++sched_ib.records.adjust_index;
6063 if (sched_ib.records.adjust_index == sched_ib.records.n_insns)
6064 sched_ib.records.adjust_index = 0;
6065
6066 /* Undo adjustement we did 7 instructions ago. */
6067 sched_ib.size
6068 += sched_ib.records.adjust[sched_ib.records.adjust_index];
6069
6070 break;
b8c96320 6071
96fcacb7
MK
6072 case CPU_CFV4:
6073 gcc_assert (!sched_ib.enabled_p);
6074 insn_size = 0;
6075 break;
6076
826fadba
MK
6077 default:
6078 gcc_unreachable ();
6079 }
b8c96320 6080
3162fdf4
MK
6081 if (insn_size > sched_ib.filled)
6082 /* Scheduling for register pressure does not always take DFA into
6083 account. Workaround instruction buffer not being filled enough. */
6084 {
60867e8c 6085 gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED);
3162fdf4
MK
6086 insn_size = sched_ib.filled;
6087 }
6088
b8c96320
MK
6089 --can_issue_more;
6090 }
6091 else if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6092 || asm_noperands (PATTERN (insn)) >= 0)
826fadba 6093 insn_size = sched_ib.filled;
b8c96320
MK
6094 else
6095 insn_size = 0;
6096
826fadba 6097 sched_ib.filled -= insn_size;
b8c96320
MK
6098
6099 return can_issue_more;
6100}
6101
96fcacb7
MK
6102/* Return how many instructions should scheduler lookahead to choose the
6103 best one. */
6104static int
6105m68k_sched_first_cycle_multipass_dfa_lookahead (void)
b8c96320 6106{
96fcacb7 6107 return m68k_sched_issue_rate () - 1;
b8c96320
MK
6108}
6109
7ecb00a6 6110/* Implementation of targetm.sched.init_global () hook.
b8c96320
MK
6111 It is invoked once per scheduling pass and is used here
6112 to initialize scheduler constants. */
6113static void
6114m68k_sched_md_init_global (FILE *sched_dump ATTRIBUTE_UNUSED,
6115 int sched_verbose ATTRIBUTE_UNUSED,
6116 int n_insns ATTRIBUTE_UNUSED)
6117{
96fcacb7
MK
6118#ifdef ENABLE_CHECKING
6119 /* Check that all instructions have DFA reservations and
6120 that all instructions can be issued from a clean state. */
6121 {
6122 rtx insn;
6123 state_t state;
b8c96320 6124
96fcacb7 6125 state = alloca (state_size ());
b8c96320 6126
96fcacb7
MK
6127 for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
6128 {
6129 if (INSN_P (insn) && recog_memoized (insn) >= 0)
6130 {
6131 gcc_assert (insn_has_dfa_reservation_p (insn));
b8c96320 6132
96fcacb7
MK
6133 state_reset (state);
6134 if (state_transition (state, insn) >= 0)
6135 gcc_unreachable ();
6136 }
6137 }
6138 }
6139#endif
b8c96320
MK
6140
6141 /* Setup target cpu. */
96fcacb7
MK
6142
6143 /* ColdFire V4 has a set of features to keep its instruction buffer full
6144 (e.g., a separate memory bus for instructions) and, hence, we do not model
6145 buffer for this CPU. */
6146 sched_ib.enabled_p = (m68k_sched_cpu != CPU_CFV4);
6147
b8c96320
MK
6148 switch (m68k_sched_cpu)
6149 {
96fcacb7
MK
6150 case CPU_CFV4:
6151 sched_ib.filled = 0;
6152
6153 /* FALLTHRU */
6154
826fadba
MK
6155 case CPU_CFV1:
6156 case CPU_CFV2:
6157 max_insn_size = 3;
6158 sched_ib.records.n_insns = 0;
6159 sched_ib.records.adjust = NULL;
6160 break;
6161
6162 case CPU_CFV3:
6163 max_insn_size = 3;
6164 sched_ib.records.n_insns = 8;
5ead67f6 6165 sched_ib.records.adjust = XNEWVEC (int, sched_ib.records.n_insns);
b8c96320
MK
6166 break;
6167
6168 default:
6169 gcc_unreachable ();
6170 }
6171
826fadba
MK
6172 sched_mem_unit_code = get_cpu_unit_code ("cf_mem1");
6173
b8c96320
MK
6174 sched_adjust_cost_state = xmalloc (state_size ());
6175 state_reset (sched_adjust_cost_state);
6176
6177 start_sequence ();
6178 emit_insn (gen_ib ());
826fadba 6179 sched_ib.insn = get_insns ();
b8c96320
MK
6180 end_sequence ();
6181}
6182
6183/* Scheduling pass is now finished. Free/reset static variables. */
6184static void
6185m68k_sched_md_finish_global (FILE *dump ATTRIBUTE_UNUSED,
6186 int verbose ATTRIBUTE_UNUSED)
6187{
826fadba 6188 sched_ib.insn = NULL;
b8c96320
MK
6189
6190 free (sched_adjust_cost_state);
6191 sched_adjust_cost_state = NULL;
6192
6193 sched_mem_unit_code = 0;
826fadba
MK
6194
6195 free (sched_ib.records.adjust);
6196 sched_ib.records.adjust = NULL;
6197 sched_ib.records.n_insns = 0;
6198 max_insn_size = 0;
b8c96320
MK
6199}
6200
7ecb00a6 6201/* Implementation of targetm.sched.init () hook.
b8c96320
MK
6202 It is invoked each time scheduler starts on the new block (basic block or
6203 extended basic block). */
6204static void
6205m68k_sched_md_init (FILE *sched_dump ATTRIBUTE_UNUSED,
6206 int sched_verbose ATTRIBUTE_UNUSED,
6207 int n_insns ATTRIBUTE_UNUSED)
6208{
826fadba
MK
6209 switch (m68k_sched_cpu)
6210 {
6211 case CPU_CFV1:
6212 case CPU_CFV2:
6213 sched_ib.size = 6;
6214 break;
6215
6216 case CPU_CFV3:
6217 sched_ib.size = sched_ib.records.n_insns * max_insn_size;
6218
6219 memset (sched_ib.records.adjust, 0,
6220 sched_ib.records.n_insns * sizeof (*sched_ib.records.adjust));
6221 sched_ib.records.adjust_index = 0;
6222 break;
6223
96fcacb7
MK
6224 case CPU_CFV4:
6225 gcc_assert (!sched_ib.enabled_p);
6226 sched_ib.size = 0;
6227 break;
6228
826fadba
MK
6229 default:
6230 gcc_unreachable ();
6231 }
6232
96fcacb7
MK
6233 if (sched_ib.enabled_p)
6234 /* haifa-sched.c: schedule_block () calls advance_cycle () just before
6235 the first cycle. Workaround that. */
6236 sched_ib.filled = -2;
b8c96320
MK
6237}
6238
6239/* Implementation of targetm.sched.dfa_pre_advance_cycle () hook.
6240 It is invoked just before current cycle finishes and is used here
6241 to track if instruction buffer got its two words this cycle. */
6242static void
6243m68k_sched_dfa_pre_advance_cycle (void)
6244{
96fcacb7
MK
6245 if (!sched_ib.enabled_p)
6246 return;
6247
b8c96320
MK
6248 if (!cpu_unit_reservation_p (curr_state, sched_mem_unit_code))
6249 {
826fadba 6250 sched_ib.filled += 2;
b8c96320 6251
826fadba
MK
6252 if (sched_ib.filled > sched_ib.size)
6253 sched_ib.filled = sched_ib.size;
b8c96320
MK
6254 }
6255}
6256
6257/* Implementation of targetm.sched.dfa_post_advance_cycle () hook.
6258 It is invoked just after new cycle begins and is used here
6259 to setup number of filled words in the instruction buffer so that
6260 instructions which won't have all their words prefetched would be
6261 stalled for a cycle. */
6262static void
6263m68k_sched_dfa_post_advance_cycle (void)
6264{
6265 int i;
b8c96320 6266
96fcacb7
MK
6267 if (!sched_ib.enabled_p)
6268 return;
6269
b8c96320
MK
6270 /* Setup number of prefetched instruction words in the instruction
6271 buffer. */
826fadba
MK
6272 i = max_insn_size - sched_ib.filled;
6273
6274 while (--i >= 0)
b8c96320 6275 {
826fadba 6276 if (state_transition (curr_state, sched_ib.insn) >= 0)
5f3b7d7c
MK
6277 /* Pick up scheduler state. */
6278 ++sched_ib.filled;
b8c96320
MK
6279 }
6280}
96fcacb7
MK
6281
6282/* Return X or Y (depending on OPX_P) operand of INSN,
6283 if it is an integer register, or NULL overwise. */
6284static rtx
6285sched_get_reg_operand (rtx insn, bool opx_p)
6286{
6287 rtx op = NULL;
6288
6289 if (opx_p)
6290 {
6291 if (get_attr_opx_type (insn) == OPX_TYPE_RN)
6292 {
6293 op = sched_get_operand (insn, true);
6294 gcc_assert (op != NULL);
6295
6296 if (!reload_completed && !REG_P (op))
6297 return NULL;
6298 }
6299 }
6300 else
6301 {
6302 if (get_attr_opy_type (insn) == OPY_TYPE_RN)
6303 {
6304 op = sched_get_operand (insn, false);
6305 gcc_assert (op != NULL);
6306
6307 if (!reload_completed && !REG_P (op))
6308 return NULL;
6309 }
6310 }
6311
6312 return op;
6313}
6314
6315/* Return true, if X or Y (depending on OPX_P) operand of INSN
6316 is a MEM. */
6317static bool
6318sched_mem_operand_p (rtx insn, bool opx_p)
6319{
6320 switch (sched_get_opxy_mem_type (insn, opx_p))
6321 {
6322 case OP_TYPE_MEM1:
6323 case OP_TYPE_MEM6:
6324 return true;
6325
6326 default:
6327 return false;
6328 }
6329}
6330
6331/* Return X or Y (depending on OPX_P) operand of INSN,
6332 if it is a MEM, or NULL overwise. */
6333static rtx
6334sched_get_mem_operand (rtx insn, bool must_read_p, bool must_write_p)
6335{
6336 bool opx_p;
6337 bool opy_p;
6338
6339 opx_p = false;
6340 opy_p = false;
6341
6342 if (must_read_p)
6343 {
6344 opx_p = true;
6345 opy_p = true;
6346 }
6347
6348 if (must_write_p)
6349 {
6350 opx_p = true;
6351 opy_p = false;
6352 }
6353
6354 if (opy_p && sched_mem_operand_p (insn, false))
6355 return sched_get_operand (insn, false);
6356
6357 if (opx_p && sched_mem_operand_p (insn, true))
6358 return sched_get_operand (insn, true);
6359
6360 gcc_unreachable ();
6361 return NULL;
6362}
6363
6364/* Return non-zero if PRO modifies register used as part of
6365 address in CON. */
6366int
6367m68k_sched_address_bypass_p (rtx pro, rtx con)
6368{
6369 rtx pro_x;
6370 rtx con_mem_read;
6371
6372 pro_x = sched_get_reg_operand (pro, true);
6373 if (pro_x == NULL)
6374 return 0;
6375
6376 con_mem_read = sched_get_mem_operand (con, true, false);
6377 gcc_assert (con_mem_read != NULL);
6378
6379 if (reg_mentioned_p (pro_x, con_mem_read))
6380 return 1;
6381
6382 return 0;
6383}
6384
6385/* Helper function for m68k_sched_indexed_address_bypass_p.
6386 if PRO modifies register used as index in CON,
6387 return scale of indexed memory access in CON. Return zero overwise. */
6388static int
6389sched_get_indexed_address_scale (rtx pro, rtx con)
6390{
6391 rtx reg;
6392 rtx mem;
6393 struct m68k_address address;
6394
6395 reg = sched_get_reg_operand (pro, true);
6396 if (reg == NULL)
6397 return 0;
6398
6399 mem = sched_get_mem_operand (con, true, false);
6400 gcc_assert (mem != NULL && MEM_P (mem));
6401
6402 if (!m68k_decompose_address (GET_MODE (mem), XEXP (mem, 0), reload_completed,
6403 &address))
6404 gcc_unreachable ();
6405
6406 if (REGNO (reg) == REGNO (address.index))
6407 {
6408 gcc_assert (address.scale != 0);
6409 return address.scale;
6410 }
6411
6412 return 0;
6413}
6414
6415/* Return non-zero if PRO modifies register used
6416 as index with scale 2 or 4 in CON. */
6417int
6418m68k_sched_indexed_address_bypass_p (rtx pro, rtx con)
6419{
6420 gcc_assert (sched_cfv4_bypass_data.pro == NULL
6421 && sched_cfv4_bypass_data.con == NULL
6422 && sched_cfv4_bypass_data.scale == 0);
6423
6424 switch (sched_get_indexed_address_scale (pro, con))
6425 {
6426 case 1:
6427 /* We can't have a variable latency bypass, so
6428 remember to adjust the insn cost in adjust_cost hook. */
6429 sched_cfv4_bypass_data.pro = pro;
6430 sched_cfv4_bypass_data.con = con;
6431 sched_cfv4_bypass_data.scale = 1;
6432 return 0;
6433
6434 case 2:
6435 case 4:
6436 return 1;
6437
6438 default:
6439 return 0;
6440 }
6441}
75df395f 6442
e0601576
RH
6443/* We generate a two-instructions program at M_TRAMP :
6444 movea.l &CHAIN_VALUE,%a0
6445 jmp FNADDR
6446 where %a0 can be modified by changing STATIC_CHAIN_REGNUM. */
6447
6448static void
6449m68k_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
6450{
6451 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
6452 rtx mem;
6453
6454 gcc_assert (ADDRESS_REGNO_P (STATIC_CHAIN_REGNUM));
6455
6456 mem = adjust_address (m_tramp, HImode, 0);
6457 emit_move_insn (mem, GEN_INT(0x207C + ((STATIC_CHAIN_REGNUM-8) << 9)));
6458 mem = adjust_address (m_tramp, SImode, 2);
6459 emit_move_insn (mem, chain_value);
6460
6461 mem = adjust_address (m_tramp, HImode, 6);
6462 emit_move_insn (mem, GEN_INT(0x4EF9));
6463 mem = adjust_address (m_tramp, SImode, 8);
6464 emit_move_insn (mem, fnaddr);
6465
6466 FINALIZE_TRAMPOLINE (XEXP (m_tramp, 0));
6467}
6468
079e7538
NF
6469/* On the 68000, the RTS insn cannot pop anything.
6470 On the 68010, the RTD insn may be used to pop them if the number
6471 of args is fixed, but if the number is variable then the caller
6472 must pop them all. RTD can't be used for library calls now
6473 because the library is compiled with the Unix compiler.
6474 Use of RTD is a selectable option, since it is incompatible with
6475 standard Unix calling sequences. If the option is not selected,
6476 the caller must always pop the args. */
6477
6478static int
6479m68k_return_pops_args (tree fundecl, tree funtype, int size)
6480{
6481 return ((TARGET_RTD
6482 && (!fundecl
6483 || TREE_CODE (fundecl) != IDENTIFIER_NODE)
f38958e8 6484 && (!stdarg_p (funtype)))
079e7538
NF
6485 ? size : 0);
6486}
6487
5efd84c5
NF
6488/* Make sure everything's fine if we *don't* have a given processor.
6489 This assumes that putting a register in fixed_regs will keep the
6490 compiler's mitts completely off it. We don't bother to zero it out
6491 of register classes. */
6492
6493static void
6494m68k_conditional_register_usage (void)
6495{
6496 int i;
6497 HARD_REG_SET x;
6498 if (!TARGET_HARD_FLOAT)
6499 {
6500 COPY_HARD_REG_SET (x, reg_class_contents[(int)FP_REGS]);
6501 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
6502 if (TEST_HARD_REG_BIT (x, i))
6503 fixed_regs[i] = call_used_regs[i] = 1;
6504 }
6505 if (flag_pic)
6506 fixed_regs[PIC_REG] = call_used_regs[PIC_REG] = 1;
6507}
6508
8b281334
RH
6509static void
6510m68k_init_sync_libfuncs (void)
6511{
6512 init_sync_libfuncs (UNITS_PER_WORD);
6513}
6514
175aed00
AS
6515/* Implements EPILOGUE_USES. All registers are live on exit from an
6516 interrupt routine. */
6517bool
6518m68k_epilogue_uses (int regno ATTRIBUTE_UNUSED)
6519{
6520 return (reload_completed
6521 && (m68k_get_function_kind (current_function_decl)
6522 == m68k_fk_interrupt_handler));
6523}
6524
75df395f 6525#include "gt-m68k.h"