]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/m68k/m68k.c
re PR tree-optimization/38156 (gcc.dg/tree-ssa/update-unswitch-1.c fails when compile...
[thirdparty/gcc.git] / gcc / config / m68k / m68k.c
CommitLineData
79e68feb 1/* Subroutines for insn-output.c for Motorola 68000 family.
8636be86 2 Copyright (C) 1987, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
6ae89ea8 3 2001, 2003, 2004, 2005, 2006, 2007, 2008
4592bdcb 4 Free Software Foundation, Inc.
79e68feb 5
7ec022b2 6This file is part of GCC.
79e68feb 7
7ec022b2 8GCC is free software; you can redistribute it and/or modify
79e68feb 9it under the terms of the GNU General Public License as published by
2f83c7d6 10the Free Software Foundation; either version 3, or (at your option)
79e68feb
RS
11any later version.
12
7ec022b2 13GCC is distributed in the hope that it will be useful,
79e68feb
RS
14but WITHOUT ANY WARRANTY; without even the implied warranty of
15MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16GNU General Public License for more details.
17
18You should have received a copy of the GNU General Public License
2f83c7d6
NC
19along with GCC; see the file COPYING3. If not see
20<http://www.gnu.org/licenses/>. */
79e68feb 21
79e68feb 22#include "config.h"
f5220a5d 23#include "system.h"
4977bab6
ZW
24#include "coretypes.h"
25#include "tm.h"
da932f04 26#include "tree.h"
79e68feb 27#include "rtl.h"
49ad7cfa 28#include "function.h"
79e68feb
RS
29#include "regs.h"
30#include "hard-reg-set.h"
31#include "real.h"
32#include "insn-config.h"
33#include "conditions.h"
79e68feb
RS
34#include "output.h"
35#include "insn-attr.h"
1d8eaa6b 36#include "recog.h"
f5220a5d 37#include "toplev.h"
6d5f49b2
RH
38#include "expr.h"
39#include "reload.h"
5505f548 40#include "tm_p.h"
672a6f42
NB
41#include "target.h"
42#include "target-def.h"
2cc07db4 43#include "debug.h"
79e68feb 44#include "flags.h"
6fb5fa3c 45#include "df.h"
b8c96320
MK
46/* ??? Need to add a dependency between m68k.o and sched-int.h. */
47#include "sched-int.h"
48#include "insn-codes.h"
79e68feb 49
a4e9467d
RZ
50enum reg_class regno_reg_class[] =
51{
52 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
53 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
54 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
55 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
56 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
57 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
58 ADDR_REGS
59};
60
61
a40ed0f3
KH
62/* The minimum number of integer registers that we want to save with the
63 movem instruction. Using two movel instructions instead of a single
64 moveml is about 15% faster for the 68020 and 68030 at no expense in
65 code size. */
66#define MIN_MOVEM_REGS 3
67
68/* The minimum number of floating point registers that we want to save
69 with the fmovem instruction. */
70#define MIN_FMOVEM_REGS 1
71
ff482c8d 72/* Structure describing stack frame layout. */
3d74bc09
BI
73struct m68k_frame
74{
75 /* Stack pointer to frame pointer offset. */
48ed72a4 76 HOST_WIDE_INT offset;
3d74bc09
BI
77
78 /* Offset of FPU registers. */
79 HOST_WIDE_INT foffset;
80
81 /* Frame size in bytes (rounded up). */
48ed72a4 82 HOST_WIDE_INT size;
3d74bc09
BI
83
84 /* Data and address register. */
48ed72a4
PB
85 int reg_no;
86 unsigned int reg_mask;
3d74bc09
BI
87
88 /* FPU registers. */
48ed72a4
PB
89 int fpu_no;
90 unsigned int fpu_mask;
3d74bc09
BI
91
92 /* Offsets relative to ARG_POINTER. */
48ed72a4
PB
93 HOST_WIDE_INT frame_pointer_offset;
94 HOST_WIDE_INT stack_pointer_offset;
3d74bc09
BI
95
96 /* Function which the above information refers to. */
97 int funcdef_no;
48ed72a4
PB
98};
99
3d74bc09
BI
100/* Current frame information calculated by m68k_compute_frame_layout(). */
101static struct m68k_frame current_frame;
102
fc2241eb
RS
103/* Structure describing an m68k address.
104
105 If CODE is UNKNOWN, the address is BASE + INDEX * SCALE + OFFSET,
106 with null fields evaluating to 0. Here:
107
108 - BASE satisfies m68k_legitimate_base_reg_p
109 - INDEX satisfies m68k_legitimate_index_reg_p
110 - OFFSET satisfies m68k_legitimate_constant_address_p
111
112 INDEX is either HImode or SImode. The other fields are SImode.
113
114 If CODE is PRE_DEC, the address is -(BASE). If CODE is POST_INC,
115 the address is (BASE)+. */
116struct m68k_address {
117 enum rtx_code code;
118 rtx base;
119 rtx index;
120 rtx offset;
121 int scale;
122};
123
b8c96320 124static int m68k_sched_adjust_cost (rtx, rtx, rtx, int);
96fcacb7 125static int m68k_sched_issue_rate (void);
b8c96320
MK
126static int m68k_sched_variable_issue (FILE *, int, rtx, int);
127static void m68k_sched_md_init_global (FILE *, int, int);
128static void m68k_sched_md_finish_global (FILE *, int);
129static void m68k_sched_md_init (FILE *, int, int);
130static void m68k_sched_dfa_pre_advance_cycle (void);
131static void m68k_sched_dfa_post_advance_cycle (void);
96fcacb7 132static int m68k_sched_first_cycle_multipass_dfa_lookahead (void);
b8c96320 133
4af06170 134static bool m68k_handle_option (size_t, const char *, int);
8a4a2253
BI
135static rtx find_addr_reg (rtx);
136static const char *singlemove_string (rtx *);
45849738 137#ifdef M68K_TARGET_COFF
c18a5b6c 138static void m68k_coff_asm_named_section (const char *, unsigned int, tree);
45849738 139#endif /* M68K_TARGET_COFF */
8a4a2253
BI
140static void m68k_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
141 HOST_WIDE_INT, tree);
8636be86 142static rtx m68k_struct_value_rtx (tree, int);
48ed72a4
PB
143static tree m68k_handle_fndecl_attribute (tree *node, tree name,
144 tree args, int flags,
145 bool *no_add_attrs);
3d74bc09 146static void m68k_compute_frame_layout (void);
48ed72a4 147static bool m68k_save_reg (unsigned int regno, bool interrupt_handler);
f7e70894 148static bool m68k_ok_for_sibcall_p (tree, tree);
f40751dd 149static bool m68k_rtx_costs (rtx, int, int, int *, bool);
1c445f03 150#if M68K_HONOR_TARGET_STRICT_ALIGNMENT
511e41e5 151static bool m68k_return_in_memory (const_tree, const_tree);
1c445f03 152#endif
79e68feb
RS
153\f
154
a2ef3db7 155/* Specify the identification number of the library being built */
4af06170 156const char *m68k_library_id_string = "_current_shared_library_a5_offset_";
ef1dbfb0 157
2b3600ac
JL
158/* Nonzero if the last compare/test insn had FP operands. The
159 sCC expanders peek at this to determine what to do for the
160 68060, which has no fsCC instructions. */
161int m68k_last_compare_had_fp_operands;
672a6f42
NB
162\f
163/* Initialize the GCC target structure. */
301d03af
RS
164
165#if INT_OP_GROUP == INT_OP_DOT_WORD
166#undef TARGET_ASM_ALIGNED_HI_OP
167#define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
168#endif
169
170#if INT_OP_GROUP == INT_OP_NO_DOT
171#undef TARGET_ASM_BYTE_OP
172#define TARGET_ASM_BYTE_OP "\tbyte\t"
173#undef TARGET_ASM_ALIGNED_HI_OP
174#define TARGET_ASM_ALIGNED_HI_OP "\tshort\t"
175#undef TARGET_ASM_ALIGNED_SI_OP
176#define TARGET_ASM_ALIGNED_SI_OP "\tlong\t"
177#endif
178
179#if INT_OP_GROUP == INT_OP_DC
180#undef TARGET_ASM_BYTE_OP
181#define TARGET_ASM_BYTE_OP "\tdc.b\t"
182#undef TARGET_ASM_ALIGNED_HI_OP
183#define TARGET_ASM_ALIGNED_HI_OP "\tdc.w\t"
184#undef TARGET_ASM_ALIGNED_SI_OP
185#define TARGET_ASM_ALIGNED_SI_OP "\tdc.l\t"
186#endif
187
188#undef TARGET_ASM_UNALIGNED_HI_OP
189#define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
190#undef TARGET_ASM_UNALIGNED_SI_OP
191#define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
192
c590b625
RH
193#undef TARGET_ASM_OUTPUT_MI_THUNK
194#define TARGET_ASM_OUTPUT_MI_THUNK m68k_output_mi_thunk
bdabc150 195#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
3101faab 196#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
c590b625 197
1bc7c5b6
ZW
198#undef TARGET_ASM_FILE_START_APP_OFF
199#define TARGET_ASM_FILE_START_APP_OFF true
200
b8c96320
MK
201#undef TARGET_SCHED_ADJUST_COST
202#define TARGET_SCHED_ADJUST_COST m68k_sched_adjust_cost
203
96fcacb7
MK
204#undef TARGET_SCHED_ISSUE_RATE
205#define TARGET_SCHED_ISSUE_RATE m68k_sched_issue_rate
206
b8c96320
MK
207#undef TARGET_SCHED_VARIABLE_ISSUE
208#define TARGET_SCHED_VARIABLE_ISSUE m68k_sched_variable_issue
209
210#undef TARGET_SCHED_INIT_GLOBAL
211#define TARGET_SCHED_INIT_GLOBAL m68k_sched_md_init_global
212
213#undef TARGET_SCHED_FINISH_GLOBAL
214#define TARGET_SCHED_FINISH_GLOBAL m68k_sched_md_finish_global
215
216#undef TARGET_SCHED_INIT
217#define TARGET_SCHED_INIT m68k_sched_md_init
218
219#undef TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE
220#define TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE m68k_sched_dfa_pre_advance_cycle
221
222#undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
223#define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE m68k_sched_dfa_post_advance_cycle
224
96fcacb7
MK
225#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
226#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
227 m68k_sched_first_cycle_multipass_dfa_lookahead
228
4af06170
RS
229#undef TARGET_HANDLE_OPTION
230#define TARGET_HANDLE_OPTION m68k_handle_option
231
3c50106f
RH
232#undef TARGET_RTX_COSTS
233#define TARGET_RTX_COSTS m68k_rtx_costs
234
48ed72a4
PB
235#undef TARGET_ATTRIBUTE_TABLE
236#define TARGET_ATTRIBUTE_TABLE m68k_attribute_table
237
8636be86 238#undef TARGET_PROMOTE_PROTOTYPES
586de218 239#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
8636be86
KH
240
241#undef TARGET_STRUCT_VALUE_RTX
242#define TARGET_STRUCT_VALUE_RTX m68k_struct_value_rtx
243
7ffb5e78
RS
244#undef TARGET_CANNOT_FORCE_CONST_MEM
245#define TARGET_CANNOT_FORCE_CONST_MEM m68k_illegitimate_symbolic_constant_p
246
f7e70894
RS
247#undef TARGET_FUNCTION_OK_FOR_SIBCALL
248#define TARGET_FUNCTION_OK_FOR_SIBCALL m68k_ok_for_sibcall_p
249
1c445f03
NS
250#if M68K_HONOR_TARGET_STRICT_ALIGNMENT
251#undef TARGET_RETURN_IN_MEMORY
252#define TARGET_RETURN_IN_MEMORY m68k_return_in_memory
253#endif
254
48ed72a4
PB
255static const struct attribute_spec m68k_attribute_table[] =
256{
257 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2bccb817 258 { "interrupt", 0, 0, true, false, false, m68k_handle_fndecl_attribute },
48ed72a4 259 { "interrupt_handler", 0, 0, true, false, false, m68k_handle_fndecl_attribute },
a4242737 260 { "interrupt_thread", 0, 0, true, false, false, m68k_handle_fndecl_attribute },
48ed72a4
PB
261 { NULL, 0, 0, false, false, false, NULL }
262};
263
f6897b10 264struct gcc_target targetm = TARGET_INITIALIZER;
672a6f42 265\f
900ec02d
JB
266/* Base flags for 68k ISAs. */
267#define FL_FOR_isa_00 FL_ISA_68000
268#define FL_FOR_isa_10 (FL_FOR_isa_00 | FL_ISA_68010)
269/* FL_68881 controls the default setting of -m68881. gcc has traditionally
270 generated 68881 code for 68020 and 68030 targets unless explicitly told
271 not to. */
272#define FL_FOR_isa_20 (FL_FOR_isa_10 | FL_ISA_68020 \
273 | FL_BITFIELD | FL_68881)
274#define FL_FOR_isa_40 (FL_FOR_isa_20 | FL_ISA_68040)
275#define FL_FOR_isa_cpu32 (FL_FOR_isa_10 | FL_ISA_68020)
276
277/* Base flags for ColdFire ISAs. */
278#define FL_FOR_isa_a (FL_COLDFIRE | FL_ISA_A)
279#define FL_FOR_isa_aplus (FL_FOR_isa_a | FL_ISA_APLUS | FL_CF_USP)
280/* Note ISA_B doesn't necessarily include USP (user stack pointer) support. */
281#define FL_FOR_isa_b (FL_FOR_isa_a | FL_ISA_B | FL_CF_HWDIV)
4e2b26aa 282/* ISA_C is not upwardly compatible with ISA_B. */
8c5c99dc 283#define FL_FOR_isa_c (FL_FOR_isa_a | FL_ISA_C | FL_CF_USP)
900ec02d
JB
284
285enum m68k_isa
286{
287 /* Traditional 68000 instruction sets. */
288 isa_00,
289 isa_10,
290 isa_20,
291 isa_40,
292 isa_cpu32,
293 /* ColdFire instruction set variants. */
294 isa_a,
295 isa_aplus,
296 isa_b,
297 isa_c,
298 isa_max
299};
300
301/* Information about one of the -march, -mcpu or -mtune arguments. */
302struct m68k_target_selection
303{
304 /* The argument being described. */
305 const char *name;
306
307 /* For -mcpu, this is the device selected by the option.
308 For -mtune and -march, it is a representative device
309 for the microarchitecture or ISA respectively. */
310 enum target_device device;
311
312 /* The M68K_DEVICE fields associated with DEVICE. See the comment
313 in m68k-devices.def for details. FAMILY is only valid for -mcpu. */
314 const char *family;
315 enum uarch_type microarch;
316 enum m68k_isa isa;
317 unsigned long flags;
318};
319
320/* A list of all devices in m68k-devices.def. Used for -mcpu selection. */
321static const struct m68k_target_selection all_devices[] =
322{
323#define M68K_DEVICE(NAME,ENUM_VALUE,FAMILY,MULTILIB,MICROARCH,ISA,FLAGS) \
324 { NAME, ENUM_VALUE, FAMILY, u##MICROARCH, ISA, FLAGS | FL_FOR_##ISA },
325#include "m68k-devices.def"
326#undef M68K_DEVICE
327 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
328};
329
330/* A list of all ISAs, mapping each one to a representative device.
331 Used for -march selection. */
332static const struct m68k_target_selection all_isas[] =
333{
334 { "68000", m68000, NULL, u68000, isa_00, FL_FOR_isa_00 },
335 { "68010", m68010, NULL, u68010, isa_10, FL_FOR_isa_10 },
336 { "68020", m68020, NULL, u68020, isa_20, FL_FOR_isa_20 },
337 { "68030", m68030, NULL, u68030, isa_20, FL_FOR_isa_20 },
338 { "68040", m68040, NULL, u68040, isa_40, FL_FOR_isa_40 },
339 { "68060", m68060, NULL, u68060, isa_40, FL_FOR_isa_40 },
340 { "cpu32", cpu32, NULL, ucpu32, isa_20, FL_FOR_isa_cpu32 },
341 { "isaa", mcf5206e, NULL, ucfv2, isa_a, (FL_FOR_isa_a
342 | FL_CF_HWDIV) },
343 { "isaaplus", mcf5271, NULL, ucfv2, isa_aplus, (FL_FOR_isa_aplus
344 | FL_CF_HWDIV) },
345 { "isab", mcf5407, NULL, ucfv4, isa_b, FL_FOR_isa_b },
8c5c99dc
MK
346 { "isac", unk_device, NULL, ucfv4, isa_c, (FL_FOR_isa_c
347 | FL_CF_HWDIV) },
900ec02d
JB
348 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
349};
350
351/* A list of all microarchitectures, mapping each one to a representative
352 device. Used for -mtune selection. */
353static const struct m68k_target_selection all_microarchs[] =
354{
355 { "68000", m68000, NULL, u68000, isa_00, FL_FOR_isa_00 },
356 { "68010", m68010, NULL, u68010, isa_10, FL_FOR_isa_10 },
357 { "68020", m68020, NULL, u68020, isa_20, FL_FOR_isa_20 },
358 { "68020-40", m68020, NULL, u68020_40, isa_20, FL_FOR_isa_20 },
359 { "68020-60", m68020, NULL, u68020_60, isa_20, FL_FOR_isa_20 },
360 { "68030", m68030, NULL, u68030, isa_20, FL_FOR_isa_20 },
361 { "68040", m68040, NULL, u68040, isa_40, FL_FOR_isa_40 },
362 { "68060", m68060, NULL, u68060, isa_40, FL_FOR_isa_40 },
363 { "cpu32", cpu32, NULL, ucpu32, isa_20, FL_FOR_isa_cpu32 },
8c5c99dc 364 { "cfv1", mcf51qe, NULL, ucfv1, isa_c, FL_FOR_isa_c },
900ec02d
JB
365 { "cfv2", mcf5206, NULL, ucfv2, isa_a, FL_FOR_isa_a },
366 { "cfv3", mcf5307, NULL, ucfv3, isa_a, (FL_FOR_isa_a
367 | FL_CF_HWDIV) },
368 { "cfv4", mcf5407, NULL, ucfv4, isa_b, FL_FOR_isa_b },
369 { "cfv4e", mcf547x, NULL, ucfv4e, isa_b, (FL_FOR_isa_b
370 | FL_CF_USP
371 | FL_CF_EMAC
372 | FL_CF_FPU) },
373 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
374};
375\f
376/* The entries associated with the -mcpu, -march and -mtune settings,
377 or null for options that have not been used. */
378const struct m68k_target_selection *m68k_cpu_entry;
379const struct m68k_target_selection *m68k_arch_entry;
380const struct m68k_target_selection *m68k_tune_entry;
381
382/* Which CPU we are generating code for. */
383enum target_device m68k_cpu;
384
385/* Which microarchitecture to tune for. */
386enum uarch_type m68k_tune;
387
388/* Which FPU to use. */
389enum fpu_type m68k_fpu;
4af06170 390
900ec02d
JB
391/* The set of FL_* flags that apply to the target processor. */
392unsigned int m68k_cpu_flags;
29ca003a 393
03b3e271
KH
394/* The set of FL_* flags that apply to the processor to be tuned for. */
395unsigned int m68k_tune_flags;
396
29ca003a
RS
397/* Asm templates for calling or jumping to an arbitrary symbolic address,
398 or NULL if such calls or jumps are not supported. The address is held
399 in operand 0. */
400const char *m68k_symbolic_call;
401const char *m68k_symbolic_jump;
c47b0cb4
MK
402
403/* Enum variable that corresponds to m68k_symbolic_call values. */
404enum M68K_SYMBOLIC_CALL m68k_symbolic_call_var;
405
900ec02d
JB
406\f
407/* See whether TABLE has an entry with name NAME. Return true and
408 store the entry in *ENTRY if so, otherwise return false and
409 leave *ENTRY alone. */
410
411static bool
412m68k_find_selection (const struct m68k_target_selection **entry,
413 const struct m68k_target_selection *table,
414 const char *name)
415{
416 size_t i;
417
418 for (i = 0; table[i].name; i++)
419 if (strcmp (table[i].name, name) == 0)
420 {
421 *entry = table + i;
422 return true;
423 }
424 return false;
425}
4af06170
RS
426
427/* Implement TARGET_HANDLE_OPTION. */
428
429static bool
430m68k_handle_option (size_t code, const char *arg, int value)
431{
432 switch (code)
433 {
900ec02d
JB
434 case OPT_march_:
435 return m68k_find_selection (&m68k_arch_entry, all_isas, arg);
436
437 case OPT_mcpu_:
438 return m68k_find_selection (&m68k_cpu_entry, all_devices, arg);
439
440 case OPT_mtune_:
441 return m68k_find_selection (&m68k_tune_entry, all_microarchs, arg);
442
4af06170 443 case OPT_m5200:
900ec02d 444 return m68k_find_selection (&m68k_cpu_entry, all_devices, "5206");
4af06170
RS
445
446 case OPT_m5206e:
900ec02d 447 return m68k_find_selection (&m68k_cpu_entry, all_devices, "5206e");
4af06170
RS
448
449 case OPT_m528x:
900ec02d 450 return m68k_find_selection (&m68k_cpu_entry, all_devices, "528x");
4af06170
RS
451
452 case OPT_m5307:
900ec02d 453 return m68k_find_selection (&m68k_cpu_entry, all_devices, "5307");
4af06170
RS
454
455 case OPT_m5407:
900ec02d 456 return m68k_find_selection (&m68k_cpu_entry, all_devices, "5407");
4af06170 457
dcc21c4c 458 case OPT_mcfv4e:
900ec02d 459 return m68k_find_selection (&m68k_cpu_entry, all_devices, "547x");
dcc21c4c 460
4af06170
RS
461 case OPT_m68000:
462 case OPT_mc68000:
900ec02d 463 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68000");
4af06170 464
3197c489 465 case OPT_m68010:
900ec02d 466 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68010");
3197c489 467
4af06170
RS
468 case OPT_m68020:
469 case OPT_mc68020:
900ec02d 470 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68020");
4af06170
RS
471
472 case OPT_m68020_40:
900ec02d
JB
473 return (m68k_find_selection (&m68k_tune_entry, all_microarchs,
474 "68020-40")
475 && m68k_find_selection (&m68k_cpu_entry, all_devices, "68020"));
4af06170
RS
476
477 case OPT_m68020_60:
900ec02d
JB
478 return (m68k_find_selection (&m68k_tune_entry, all_microarchs,
479 "68020-60")
480 && m68k_find_selection (&m68k_cpu_entry, all_devices, "68020"));
4af06170
RS
481
482 case OPT_m68030:
900ec02d 483 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68030");
4af06170
RS
484
485 case OPT_m68040:
900ec02d 486 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68040");
4af06170
RS
487
488 case OPT_m68060:
900ec02d 489 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68060");
4af06170
RS
490
491 case OPT_m68302:
900ec02d 492 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68302");
4af06170
RS
493
494 case OPT_m68332:
495 case OPT_mcpu32:
900ec02d 496 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68332");
4af06170
RS
497
498 case OPT_mshared_library_id_:
499 if (value > MAX_LIBRARY_ID)
500 error ("-mshared-library-id=%s is not between 0 and %d",
501 arg, MAX_LIBRARY_ID);
502 else
5ead67f6
KG
503 {
504 char *tmp;
505 asprintf (&tmp, "%d", (value * -4) - 4);
506 m68k_library_id_string = tmp;
507 }
4af06170
RS
508 return true;
509
510 default:
511 return true;
512 }
513}
514
ef1dbfb0
RK
515/* Sometimes certain combinations of command options do not make
516 sense on a particular target machine. You can define a macro
517 `OVERRIDE_OPTIONS' to take account of this. This macro, if
518 defined, is executed once just after all the command options have
519 been parsed.
520
521 Don't use this macro to turn on various extra optimizations for
522 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
523
524void
8a4a2253 525override_options (void)
ef1dbfb0 526{
900ec02d
JB
527 const struct m68k_target_selection *entry;
528 unsigned long target_mask;
529
530 /* User can choose:
531
532 -mcpu=
533 -march=
534 -mtune=
535
536 -march=ARCH should generate code that runs any processor
537 implementing architecture ARCH. -mcpu=CPU should override -march
538 and should generate code that runs on processor CPU, making free
539 use of any instructions that CPU understands. -mtune=UARCH applies
9f5ed61a 540 on top of -mcpu or -march and optimizes the code for UARCH. It does
900ec02d
JB
541 not change the target architecture. */
542 if (m68k_cpu_entry)
543 {
544 /* Complain if the -march setting is for a different microarchitecture,
545 or includes flags that the -mcpu setting doesn't. */
546 if (m68k_arch_entry
547 && (m68k_arch_entry->microarch != m68k_cpu_entry->microarch
548 || (m68k_arch_entry->flags & ~m68k_cpu_entry->flags) != 0))
549 warning (0, "-mcpu=%s conflicts with -march=%s",
550 m68k_cpu_entry->name, m68k_arch_entry->name);
551
552 entry = m68k_cpu_entry;
553 }
554 else
555 entry = m68k_arch_entry;
556
557 if (!entry)
558 entry = all_devices + TARGET_CPU_DEFAULT;
559
560 m68k_cpu_flags = entry->flags;
561
562 /* Use the architecture setting to derive default values for
563 certain flags. */
564 target_mask = 0;
8785d88c
KH
565
566 /* ColdFire is lenient about alignment. */
567 if (!TARGET_COLDFIRE)
568 target_mask |= MASK_STRICT_ALIGNMENT;
569
900ec02d
JB
570 if ((m68k_cpu_flags & FL_BITFIELD) != 0)
571 target_mask |= MASK_BITFIELD;
572 if ((m68k_cpu_flags & FL_CF_HWDIV) != 0)
573 target_mask |= MASK_CF_HWDIV;
574 if ((m68k_cpu_flags & (FL_68881 | FL_CF_FPU)) != 0)
575 target_mask |= MASK_HARD_FLOAT;
576 target_flags |= target_mask & ~target_flags_explicit;
577
578 /* Set the directly-usable versions of the -mcpu and -mtune settings. */
579 m68k_cpu = entry->device;
580 if (m68k_tune_entry)
03b3e271
KH
581 {
582 m68k_tune = m68k_tune_entry->microarch;
583 m68k_tune_flags = m68k_tune_entry->flags;
584 }
900ec02d
JB
585#ifdef M68K_DEFAULT_TUNE
586 else if (!m68k_cpu_entry && !m68k_arch_entry)
03b3e271
KH
587 {
588 enum target_device dev;
589 dev = all_microarchs[M68K_DEFAULT_TUNE].device;
590 m68k_tune_flags = all_devices[dev]->flags;
591 }
900ec02d
JB
592#endif
593 else
03b3e271
KH
594 {
595 m68k_tune = entry->microarch;
596 m68k_tune_flags = entry->flags;
597 }
900ec02d
JB
598
599 /* Set the type of FPU. */
600 m68k_fpu = (!TARGET_HARD_FLOAT ? FPUTYPE_NONE
601 : (m68k_cpu_flags & FL_COLDFIRE) != 0 ? FPUTYPE_COLDFIRE
602 : FPUTYPE_68881);
603
a2ef3db7
BI
604 /* Sanity check to ensure that msep-data and mid-sahred-library are not
605 * both specified together. Doing so simply doesn't make sense.
606 */
607 if (TARGET_SEP_DATA && TARGET_ID_SHARED_LIBRARY)
608 error ("cannot specify both -msep-data and -mid-shared-library");
609
610 /* If we're generating code for a separate A5 relative data segment,
611 * we've got to enable -fPIC as well. This might be relaxable to
612 * -fpic but it hasn't been tested properly.
613 */
614 if (TARGET_SEP_DATA || TARGET_ID_SHARED_LIBRARY)
615 flag_pic = 2;
616
abe92a04
RS
617 /* -mpcrel -fPIC uses 32-bit pc-relative displacements. Raise an
618 error if the target does not support them. */
619 if (TARGET_PCREL && !TARGET_68020 && flag_pic == 2)
620 error ("-mpcrel -fPIC is not currently supported on selected cpu");
adf2ac37
RH
621
622 /* ??? A historic way of turning on pic, or is this intended to
623 be an embedded thing that doesn't have the same name binding
624 significance that it does on hosted ELF systems? */
625 if (TARGET_PCREL && flag_pic == 0)
626 flag_pic = 1;
627
29ca003a
RS
628 if (!flag_pic)
629 {
c47b0cb4
MK
630 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_JSR;
631
29ca003a 632 m68k_symbolic_jump = "jra %a0";
29ca003a
RS
633 }
634 else if (TARGET_ID_SHARED_LIBRARY)
635 /* All addresses must be loaded from the GOT. */
636 ;
4e2b26aa 637 else if (TARGET_68020 || TARGET_ISAB || TARGET_ISAC)
29ca003a
RS
638 {
639 if (TARGET_PCREL)
c47b0cb4 640 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_C;
4e2b26aa 641 else
c47b0cb4
MK
642 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_P;
643
4e2b26aa
NS
644 if (TARGET_ISAC)
645 /* No unconditional long branch */;
646 else if (TARGET_PCREL)
da398bb5 647 m68k_symbolic_jump = "bra%.l %c0";
29ca003a 648 else
da398bb5 649 m68k_symbolic_jump = "bra%.l %p0";
29ca003a
RS
650 /* Turn off function cse if we are doing PIC. We always want
651 function call to be done as `bsr foo@PLTPC'. */
652 /* ??? It's traditional to do this for -mpcrel too, but it isn't
653 clear how intentional that is. */
654 flag_no_function_cse = 1;
655 }
adf2ac37 656
c47b0cb4
MK
657 switch (m68k_symbolic_call_var)
658 {
659 case M68K_SYMBOLIC_CALL_JSR:
c47b0cb4 660 m68k_symbolic_call = "jsr %a0";
c47b0cb4
MK
661 break;
662
663 case M68K_SYMBOLIC_CALL_BSR_C:
da398bb5 664 m68k_symbolic_call = "bsr%.l %c0";
c47b0cb4
MK
665 break;
666
667 case M68K_SYMBOLIC_CALL_BSR_P:
da398bb5 668 m68k_symbolic_call = "bsr%.l %p0";
c47b0cb4
MK
669 break;
670
671 case M68K_SYMBOLIC_CALL_NONE:
672 gcc_assert (m68k_symbolic_call == NULL);
673 break;
674
675 default:
676 gcc_unreachable ();
677 }
678
aaca7021
RZ
679#ifndef ASM_OUTPUT_ALIGN_WITH_NOP
680 if (align_labels > 2)
681 {
682 warning (0, "-falign-labels=%d is not supported", align_labels);
683 align_labels = 0;
684 }
685 if (align_loops > 2)
686 {
687 warning (0, "-falign-loops=%d is not supported", align_loops);
688 align_loops = 0;
689 }
690#endif
691
adf2ac37 692 SUBTARGET_OVERRIDE_OPTIONS;
c47b0cb4
MK
693
694 /* Setup scheduling options. */
826fadba
MK
695 if (TUNE_CFV1)
696 m68k_sched_cpu = CPU_CFV1;
697 else if (TUNE_CFV2)
698 m68k_sched_cpu = CPU_CFV2;
699 else if (TUNE_CFV3)
700 m68k_sched_cpu = CPU_CFV3;
96fcacb7
MK
701 else if (TUNE_CFV4)
702 m68k_sched_cpu = CPU_CFV4;
c47b0cb4
MK
703 else
704 {
705 m68k_sched_cpu = CPU_UNKNOWN;
706 flag_schedule_insns = 0;
707 flag_schedule_insns_after_reload = 0;
708 flag_modulo_sched = 0;
709 }
826fadba
MK
710
711 if (m68k_sched_cpu != CPU_UNKNOWN)
712 {
713 if ((m68k_cpu_flags & (FL_CF_EMAC | FL_CF_EMAC_B)) != 0)
714 m68k_sched_mac = MAC_CF_EMAC;
715 else if ((m68k_cpu_flags & FL_CF_MAC) != 0)
716 m68k_sched_mac = MAC_CF_MAC;
717 else
718 m68k_sched_mac = MAC_NO;
719 }
ef1dbfb0 720}
7eb4f044
NS
721
722/* Generate a macro of the form __mPREFIX_cpu_NAME, where PREFIX is the
723 given argument and NAME is the argument passed to -mcpu. Return NULL
724 if -mcpu was not passed. */
725
726const char *
727m68k_cpp_cpu_ident (const char *prefix)
728{
729 if (!m68k_cpu_entry)
730 return NULL;
731 return concat ("__m", prefix, "_cpu_", m68k_cpu_entry->name, NULL);
732}
733
734/* Generate a macro of the form __mPREFIX_family_NAME, where PREFIX is the
735 given argument and NAME is the name of the representative device for
736 the -mcpu argument's family. Return NULL if -mcpu was not passed. */
737
738const char *
739m68k_cpp_cpu_family (const char *prefix)
740{
741 if (!m68k_cpu_entry)
742 return NULL;
743 return concat ("__m", prefix, "_family_", m68k_cpu_entry->family, NULL);
744}
79e68feb 745\f
2bccb817
KH
746/* Return m68k_fk_interrupt_handler if FUNC has an "interrupt" or
747 "interrupt_handler" attribute and interrupt_thread if FUNC has an
748 "interrupt_thread" attribute. Otherwise, return
749 m68k_fk_normal_function. */
a4242737
KH
750
751enum m68k_function_kind
752m68k_get_function_kind (tree func)
48ed72a4
PB
753{
754 tree a;
755
fa157b28
NS
756 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
757
2bccb817
KH
758 a = lookup_attribute ("interrupt", DECL_ATTRIBUTES (func));
759 if (a != NULL_TREE)
760 return m68k_fk_interrupt_handler;
761
48ed72a4 762 a = lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (func));
a4242737
KH
763 if (a != NULL_TREE)
764 return m68k_fk_interrupt_handler;
765
766 a = lookup_attribute ("interrupt_thread", DECL_ATTRIBUTES (func));
767 if (a != NULL_TREE)
768 return m68k_fk_interrupt_thread;
769
770 return m68k_fk_normal_function;
48ed72a4
PB
771}
772
773/* Handle an attribute requiring a FUNCTION_DECL; arguments as in
774 struct attribute_spec.handler. */
775static tree
776m68k_handle_fndecl_attribute (tree *node, tree name,
777 tree args ATTRIBUTE_UNUSED,
778 int flags ATTRIBUTE_UNUSED,
779 bool *no_add_attrs)
780{
781 if (TREE_CODE (*node) != FUNCTION_DECL)
782 {
5c498b10 783 warning (OPT_Wattributes, "%qs attribute only applies to functions",
48ed72a4
PB
784 IDENTIFIER_POINTER (name));
785 *no_add_attrs = true;
786 }
787
a4242737
KH
788 if (m68k_get_function_kind (*node) != m68k_fk_normal_function)
789 {
790 error ("multiple interrupt attributes not allowed");
791 *no_add_attrs = true;
792 }
793
794 if (!TARGET_FIDOA
795 && !strcmp (IDENTIFIER_POINTER (name), "interrupt_thread"))
796 {
797 error ("interrupt_thread is available only on fido");
798 *no_add_attrs = true;
799 }
800
48ed72a4
PB
801 return NULL_TREE;
802}
860c4900
BI
803
804static void
3d74bc09 805m68k_compute_frame_layout (void)
860c4900
BI
806{
807 int regno, saved;
a40ed0f3 808 unsigned int mask;
a4242737
KH
809 enum m68k_function_kind func_kind =
810 m68k_get_function_kind (current_function_decl);
811 bool interrupt_handler = func_kind == m68k_fk_interrupt_handler;
812 bool interrupt_thread = func_kind == m68k_fk_interrupt_thread;
860c4900 813
3d74bc09
BI
814 /* Only compute the frame once per function.
815 Don't cache information until reload has been completed. */
816 if (current_frame.funcdef_no == current_function_funcdef_no
817 && reload_completed)
818 return;
819
820 current_frame.size = (get_frame_size () + 3) & -4;
860c4900 821
a40ed0f3 822 mask = saved = 0;
a4242737
KH
823
824 /* Interrupt thread does not need to save any register. */
825 if (!interrupt_thread)
826 for (regno = 0; regno < 16; regno++)
827 if (m68k_save_reg (regno, interrupt_handler))
828 {
829 mask |= 1 << (regno - D0_REG);
830 saved++;
831 }
3d74bc09
BI
832 current_frame.offset = saved * 4;
833 current_frame.reg_no = saved;
834 current_frame.reg_mask = mask;
860c4900 835
57047680 836 current_frame.foffset = 0;
a40ed0f3 837 mask = saved = 0;
dcc21c4c 838 if (TARGET_HARD_FLOAT)
860c4900 839 {
a4242737
KH
840 /* Interrupt thread does not need to save any register. */
841 if (!interrupt_thread)
842 for (regno = 16; regno < 24; regno++)
843 if (m68k_save_reg (regno, interrupt_handler))
844 {
845 mask |= 1 << (regno - FP0_REG);
846 saved++;
847 }
dcc21c4c 848 current_frame.foffset = saved * TARGET_FP_REG_SIZE;
3d74bc09 849 current_frame.offset += current_frame.foffset;
860c4900 850 }
57047680
GN
851 current_frame.fpu_no = saved;
852 current_frame.fpu_mask = mask;
3d74bc09
BI
853
854 /* Remember what function this frame refers to. */
855 current_frame.funcdef_no = current_function_funcdef_no;
860c4900
BI
856}
857
858HOST_WIDE_INT
859m68k_initial_elimination_offset (int from, int to)
860{
42b67c06
PB
861 int argptr_offset;
862 /* The arg pointer points 8 bytes before the start of the arguments,
863 as defined by FIRST_PARM_OFFSET. This makes it coincident with the
864 frame pointer in most frames. */
865 argptr_offset = frame_pointer_needed ? 0 : UNITS_PER_WORD;
860c4900 866 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
42b67c06 867 return argptr_offset;
860c4900 868
3d74bc09 869 m68k_compute_frame_layout ();
860c4900 870
4761e388
NS
871 gcc_assert (to == STACK_POINTER_REGNUM);
872 switch (from)
873 {
a0a7fbc9 874 case ARG_POINTER_REGNUM:
42b67c06 875 return current_frame.offset + current_frame.size - argptr_offset;
4761e388
NS
876 case FRAME_POINTER_REGNUM:
877 return current_frame.offset + current_frame.size;
878 default:
879 gcc_unreachable ();
880 }
860c4900
BI
881}
882
97c55091
GN
883/* Refer to the array `regs_ever_live' to determine which registers
884 to save; `regs_ever_live[I]' is nonzero if register number I
885 is ever used in the function. This function is responsible for
886 knowing which registers should not be saved even if used.
887 Return true if we need to save REGNO. */
888
48ed72a4
PB
889static bool
890m68k_save_reg (unsigned int regno, bool interrupt_handler)
2cff4a6e 891{
4ab870f5 892 if (flag_pic && regno == PIC_REG)
b86ba8a3 893 {
e3b5732b 894 if (crtl->saves_all_registers)
afcb440c 895 return true;
e3b5732b 896 if (crtl->uses_pic_offset_table)
b86ba8a3 897 return true;
6357eb0d
RS
898 /* Reload may introduce constant pool references into a function
899 that thitherto didn't need a PIC register. Note that the test
900 above will not catch that case because we will only set
e3b5732b 901 crtl->uses_pic_offset_table when emitting
6357eb0d 902 the address reloads. */
e3b5732b 903 if (crtl->uses_const_pool)
6357eb0d 904 return true;
b86ba8a3 905 }
2cff4a6e 906
e3b5732b 907 if (crtl->calls_eh_return)
2cff4a6e
AS
908 {
909 unsigned int i;
910 for (i = 0; ; i++)
911 {
912 unsigned int test = EH_RETURN_DATA_REGNO (i);
913 if (test == INVALID_REGNUM)
914 break;
915 if (test == regno)
48ed72a4 916 return true;
2cff4a6e
AS
917 }
918 }
919
48ed72a4
PB
920 /* Fixed regs we never touch. */
921 if (fixed_regs[regno])
922 return false;
923
924 /* The frame pointer (if it is such) is handled specially. */
925 if (regno == FRAME_POINTER_REGNUM && frame_pointer_needed)
926 return false;
927
928 /* Interrupt handlers must also save call_used_regs
929 if they are live or when calling nested functions. */
930 if (interrupt_handler)
a0a7fbc9 931 {
6fb5fa3c 932 if (df_regs_ever_live_p (regno))
a0a7fbc9 933 return true;
48ed72a4 934
a0a7fbc9
AS
935 if (!current_function_is_leaf && call_used_regs[regno])
936 return true;
937 }
48ed72a4
PB
938
939 /* Never need to save registers that aren't touched. */
6fb5fa3c 940 if (!df_regs_ever_live_p (regno))
48ed72a4
PB
941 return false;
942
b2e08ed4 943 /* Otherwise save everything that isn't call-clobbered. */
48ed72a4 944 return !call_used_regs[regno];
2cff4a6e
AS
945}
946
a40ed0f3
KH
947/* Emit RTL for a MOVEM or FMOVEM instruction. BASE + OFFSET represents
948 the lowest memory address. COUNT is the number of registers to be
949 moved, with register REGNO + I being moved if bit I of MASK is set.
950 STORE_P specifies the direction of the move and ADJUST_STACK_P says
951 whether or not this is pre-decrement (if STORE_P) or post-increment
952 (if !STORE_P) operation. */
953
954static rtx
955m68k_emit_movem (rtx base, HOST_WIDE_INT offset,
956 unsigned int count, unsigned int regno,
957 unsigned int mask, bool store_p, bool adjust_stack_p)
958{
959 int i;
960 rtx body, addr, src, operands[2];
961 enum machine_mode mode;
962
963 body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (adjust_stack_p + count));
964 mode = reg_raw_mode[regno];
965 i = 0;
966
967 if (adjust_stack_p)
968 {
969 src = plus_constant (base, (count
970 * GET_MODE_SIZE (mode)
971 * (HOST_WIDE_INT) (store_p ? -1 : 1)));
972 XVECEXP (body, 0, i++) = gen_rtx_SET (VOIDmode, base, src);
973 }
974
975 for (; mask != 0; mask >>= 1, regno++)
976 if (mask & 1)
977 {
978 addr = plus_constant (base, offset);
979 operands[!store_p] = gen_frame_mem (mode, addr);
980 operands[store_p] = gen_rtx_REG (mode, regno);
981 XVECEXP (body, 0, i++)
982 = gen_rtx_SET (VOIDmode, operands[0], operands[1]);
983 offset += GET_MODE_SIZE (mode);
984 }
985 gcc_assert (i == XVECLEN (body, 0));
986
987 return emit_insn (body);
988}
989
990/* Make INSN a frame-related instruction. */
79e68feb 991
08c148a8 992static void
a40ed0f3
KH
993m68k_set_frame_related (rtx insn)
994{
995 rtx body;
996 int i;
997
998 RTX_FRAME_RELATED_P (insn) = 1;
999 body = PATTERN (insn);
1000 if (GET_CODE (body) == PARALLEL)
1001 for (i = 0; i < XVECLEN (body, 0); i++)
1002 RTX_FRAME_RELATED_P (XVECEXP (body, 0, i)) = 1;
1003}
1004
1005/* Emit RTL for the "prologue" define_expand. */
1006
1007void
1008m68k_expand_prologue (void)
79e68feb 1009{
860c4900 1010 HOST_WIDE_INT fsize_with_regs;
a40ed0f3 1011 rtx limit, src, dest, insn;
3d74bc09 1012
a40ed0f3 1013 m68k_compute_frame_layout ();
3d74bc09 1014
a157febd
GK
1015 /* If the stack limit is a symbol, we can check it here,
1016 before actually allocating the space. */
e3b5732b 1017 if (crtl->limit_stack
a157febd 1018 && GET_CODE (stack_limit_rtx) == SYMBOL_REF)
a40ed0f3
KH
1019 {
1020 limit = plus_constant (stack_limit_rtx, current_frame.size + 4);
1021 if (!LEGITIMATE_CONSTANT_P (limit))
1022 {
1023 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), limit);
1024 limit = gen_rtx_REG (Pmode, D0_REG);
1025 }
1026 emit_insn (gen_cmpsi (stack_pointer_rtx, limit));
1027 emit_insn (gen_conditional_trap (gen_rtx_LTU (VOIDmode,
1028 cc0_rtx, const0_rtx),
1029 const1_rtx));
1030 }
79e68feb 1031
a89e3f21 1032 fsize_with_regs = current_frame.size;
dcc21c4c
PB
1033 if (TARGET_COLDFIRE)
1034 {
a40ed0f3
KH
1035 /* ColdFire's move multiple instructions do not allow pre-decrement
1036 addressing. Add the size of movem saves to the initial stack
1037 allocation instead. */
1038 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1039 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1040 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1041 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
dcc21c4c 1042 }
860c4900 1043
79e68feb
RS
1044 if (frame_pointer_needed)
1045 {
a40ed0f3 1046 if (fsize_with_regs == 0 && TUNE_68040)
79e68feb 1047 {
a40ed0f3
KH
1048 /* On the 68040, two separate moves are faster than link.w 0. */
1049 dest = gen_frame_mem (Pmode,
1050 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1051 m68k_set_frame_related (emit_move_insn (dest, frame_pointer_rtx));
1052 m68k_set_frame_related (emit_move_insn (frame_pointer_rtx,
1053 stack_pointer_rtx));
79e68feb 1054 }
a40ed0f3
KH
1055 else if (fsize_with_regs < 0x8000 || TARGET_68020)
1056 m68k_set_frame_related
1057 (emit_insn (gen_link (frame_pointer_rtx,
1058 GEN_INT (-4 - fsize_with_regs))));
d9e88af0 1059 else
a40ed0f3
KH
1060 {
1061 m68k_set_frame_related
1062 (emit_insn (gen_link (frame_pointer_rtx, GEN_INT (-4))));
1063 m68k_set_frame_related
1064 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1065 stack_pointer_rtx,
1066 GEN_INT (-fsize_with_regs))));
1067 }
96fcacb7
MK
1068
1069 /* If the frame pointer is needed, emit a special barrier that
1070 will prevent the scheduler from moving stores to the frame
1071 before the stack adjustment. */
1072 emit_insn (gen_stack_tie (stack_pointer_rtx, frame_pointer_rtx));
d9e88af0 1073 }
a40ed0f3
KH
1074 else if (fsize_with_regs != 0)
1075 m68k_set_frame_related
1076 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1077 stack_pointer_rtx,
1078 GEN_INT (-fsize_with_regs))));
860c4900 1079
57047680 1080 if (current_frame.fpu_mask)
79e68feb 1081 {
a40ed0f3 1082 gcc_assert (current_frame.fpu_no >= MIN_FMOVEM_REGS);
dcc21c4c 1083 if (TARGET_68881)
a40ed0f3
KH
1084 m68k_set_frame_related
1085 (m68k_emit_movem (stack_pointer_rtx,
1086 current_frame.fpu_no * -GET_MODE_SIZE (XFmode),
1087 current_frame.fpu_no, FP0_REG,
1088 current_frame.fpu_mask, true, true));
dcc21c4c
PB
1089 else
1090 {
1091 int offset;
1092
a40ed0f3
KH
1093 /* If we're using moveml to save the integer registers,
1094 the stack pointer will point to the bottom of the moveml
1095 save area. Find the stack offset of the first FP register. */
1096 if (current_frame.reg_no < MIN_MOVEM_REGS)
dcc21c4c
PB
1097 offset = 0;
1098 else
a40ed0f3
KH
1099 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1100 m68k_set_frame_related
1101 (m68k_emit_movem (stack_pointer_rtx, offset,
1102 current_frame.fpu_no, FP0_REG,
1103 current_frame.fpu_mask, true, false));
f277471f 1104 }
79e68feb 1105 }
99df2465 1106
01bbf777 1107 /* If the stack limit is not a symbol, check it here.
a157febd 1108 This has the disadvantage that it may be too late... */
e3b5732b 1109 if (crtl->limit_stack)
a157febd
GK
1110 {
1111 if (REG_P (stack_limit_rtx))
a40ed0f3
KH
1112 {
1113 emit_insn (gen_cmpsi (stack_pointer_rtx, stack_limit_rtx));
1114 emit_insn (gen_conditional_trap (gen_rtx_LTU (VOIDmode,
1115 cc0_rtx, const0_rtx),
1116 const1_rtx));
1117 }
a157febd 1118 else if (GET_CODE (stack_limit_rtx) != SYMBOL_REF)
d4ee4d25 1119 warning (0, "stack limit expression is not supported");
a157febd 1120 }
01bbf777 1121
a40ed0f3 1122 if (current_frame.reg_no < MIN_MOVEM_REGS)
79e68feb 1123 {
a40ed0f3 1124 /* Store each register separately in the same order moveml does. */
79e68feb
RS
1125 int i;
1126
a40ed0f3
KH
1127 for (i = 16; i-- > 0; )
1128 if (current_frame.reg_mask & (1 << i))
078e983e 1129 {
a40ed0f3
KH
1130 src = gen_rtx_REG (SImode, D0_REG + i);
1131 dest = gen_frame_mem (SImode,
1132 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1133 m68k_set_frame_related (emit_insn (gen_movsi (dest, src)));
078e983e 1134 }
79e68feb 1135 }
a40ed0f3 1136 else
79e68feb 1137 {
9425fb04 1138 if (TARGET_COLDFIRE)
a40ed0f3
KH
1139 /* The required register save space has already been allocated.
1140 The first register should be stored at (%sp). */
1141 m68k_set_frame_related
1142 (m68k_emit_movem (stack_pointer_rtx, 0,
1143 current_frame.reg_no, D0_REG,
1144 current_frame.reg_mask, true, false));
afaff477 1145 else
a40ed0f3
KH
1146 m68k_set_frame_related
1147 (m68k_emit_movem (stack_pointer_rtx,
1148 current_frame.reg_no * -GET_MODE_SIZE (SImode),
1149 current_frame.reg_no, D0_REG,
1150 current_frame.reg_mask, true, true));
79e68feb 1151 }
a40ed0f3
KH
1152
1153 if (flag_pic
1154 && !TARGET_SEP_DATA
e3b5732b 1155 && crtl->uses_pic_offset_table)
6fb5fa3c 1156 insn = emit_insn (gen_load_got (pic_offset_table_rtx));
79e68feb
RS
1157}
1158\f
413ac1b2
RS
1159/* Return true if a simple (return) instruction is sufficient for this
1160 instruction (i.e. if no epilogue is needed). */
79e68feb 1161
3d74bc09 1162bool
a2bda628 1163m68k_use_return_insn (void)
79e68feb 1164{
79e68feb 1165 if (!reload_completed || frame_pointer_needed || get_frame_size () != 0)
3d74bc09 1166 return false;
125ed86f 1167
a0a7fbc9 1168 m68k_compute_frame_layout ();
413ac1b2 1169 return current_frame.offset == 0;
79e68feb
RS
1170}
1171
f7e70894
RS
1172/* Emit RTL for the "epilogue" or "sibcall_epilogue" define_expand;
1173 SIBCALL_P says which.
79e68feb
RS
1174
1175 The function epilogue should not depend on the current stack pointer!
1176 It should use the frame pointer only, if there is a frame pointer.
1177 This is mandatory because of alloca; we also take advantage of it to
1178 omit stack adjustments before returning. */
1179
a40ed0f3 1180void
f7e70894 1181m68k_expand_epilogue (bool sibcall_p)
08c148a8 1182{
3d74bc09 1183 HOST_WIDE_INT fsize, fsize_with_regs;
a40ed0f3 1184 bool big, restore_from_sp;
3d74bc09 1185
a0a7fbc9 1186 m68k_compute_frame_layout ();
3d74bc09 1187
3d74bc09 1188 fsize = current_frame.size;
a40ed0f3
KH
1189 big = false;
1190 restore_from_sp = false;
3d74bc09 1191
a40ed0f3 1192 /* FIXME : current_function_is_leaf below is too strong.
c67ddce5 1193 What we really need to know there is if there could be pending
7a1929e1 1194 stack adjustment needed at that point. */
a40ed0f3 1195 restore_from_sp = (!frame_pointer_needed
e3b5732b 1196 || (!cfun->calls_alloca
a40ed0f3 1197 && current_function_is_leaf));
860c4900
BI
1198
1199 /* fsize_with_regs is the size we need to adjust the sp when
97c55091 1200 popping the frame. */
860c4900 1201 fsize_with_regs = fsize;
dcc21c4c
PB
1202 if (TARGET_COLDFIRE && restore_from_sp)
1203 {
a40ed0f3
KH
1204 /* ColdFire's move multiple instructions do not allow post-increment
1205 addressing. Add the size of movem loads to the final deallocation
1206 instead. */
1207 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1208 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1209 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1210 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
dcc21c4c 1211 }
860c4900 1212
3d74bc09 1213 if (current_frame.offset + fsize >= 0x8000
a40ed0f3 1214 && !restore_from_sp
3d74bc09 1215 && (current_frame.reg_mask || current_frame.fpu_mask))
79e68feb 1216 {
a40ed0f3
KH
1217 if (TARGET_COLDFIRE
1218 && (current_frame.reg_no >= MIN_MOVEM_REGS
1219 || current_frame.fpu_no >= MIN_FMOVEM_REGS))
1220 {
1221 /* ColdFire's move multiple instructions do not support the
1222 (d8,Ax,Xi) addressing mode, so we're as well using a normal
1223 stack-based restore. */
1224 emit_move_insn (gen_rtx_REG (Pmode, A1_REG),
1225 GEN_INT (-(current_frame.offset + fsize)));
1226 emit_insn (gen_addsi3 (stack_pointer_rtx,
1227 gen_rtx_REG (Pmode, A1_REG),
1228 frame_pointer_rtx));
1229 restore_from_sp = true;
1230 }
1231 else
1232 {
1233 emit_move_insn (gen_rtx_REG (Pmode, A1_REG), GEN_INT (-fsize));
1234 fsize = 0;
1235 big = true;
1236 }
79e68feb 1237 }
79e68feb 1238
a40ed0f3
KH
1239 if (current_frame.reg_no < MIN_MOVEM_REGS)
1240 {
1241 /* Restore each register separately in the same order moveml does. */
79e68feb 1242 int i;
a40ed0f3 1243 HOST_WIDE_INT offset;
79e68feb 1244
a40ed0f3 1245 offset = current_frame.offset + fsize;
3d74bc09
BI
1246 for (i = 0; i < 16; i++)
1247 if (current_frame.reg_mask & (1 << i))
79e68feb 1248 {
a40ed0f3
KH
1249 rtx addr;
1250
1251 if (big)
79e68feb 1252 {
a40ed0f3
KH
1253 /* Generate the address -OFFSET(%fp,%a1.l). */
1254 addr = gen_rtx_REG (Pmode, A1_REG);
1255 addr = gen_rtx_PLUS (Pmode, addr, frame_pointer_rtx);
1256 addr = plus_constant (addr, -offset);
79e68feb 1257 }
a40ed0f3
KH
1258 else if (restore_from_sp)
1259 addr = gen_rtx_POST_INC (Pmode, stack_pointer_rtx);
1260 else
1261 addr = plus_constant (frame_pointer_rtx, -offset);
1262 emit_move_insn (gen_rtx_REG (SImode, D0_REG + i),
1263 gen_frame_mem (SImode, addr));
1264 offset -= GET_MODE_SIZE (SImode);
1265 }
79e68feb 1266 }
3d74bc09 1267 else if (current_frame.reg_mask)
79e68feb 1268 {
a40ed0f3
KH
1269 if (big)
1270 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1271 gen_rtx_REG (Pmode, A1_REG),
1272 frame_pointer_rtx),
1273 -(current_frame.offset + fsize),
1274 current_frame.reg_no, D0_REG,
1275 current_frame.reg_mask, false, false);
1276 else if (restore_from_sp)
1277 m68k_emit_movem (stack_pointer_rtx, 0,
1278 current_frame.reg_no, D0_REG,
1279 current_frame.reg_mask, false,
1280 !TARGET_COLDFIRE);
1281 else
1282 m68k_emit_movem (frame_pointer_rtx,
1283 -(current_frame.offset + fsize),
1284 current_frame.reg_no, D0_REG,
1285 current_frame.reg_mask, false, false);
79e68feb 1286 }
a40ed0f3
KH
1287
1288 if (current_frame.fpu_no > 0)
79e68feb
RS
1289 {
1290 if (big)
a40ed0f3
KH
1291 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1292 gen_rtx_REG (Pmode, A1_REG),
1293 frame_pointer_rtx),
1294 -(current_frame.foffset + fsize),
1295 current_frame.fpu_no, FP0_REG,
1296 current_frame.fpu_mask, false, false);
6910dd70 1297 else if (restore_from_sp)
79e68feb 1298 {
dcc21c4c
PB
1299 if (TARGET_COLDFIRE)
1300 {
1301 int offset;
1302
a40ed0f3
KH
1303 /* If we used moveml to restore the integer registers, the
1304 stack pointer will still point to the bottom of the moveml
1305 save area. Find the stack offset of the first FP
1306 register. */
1307 if (current_frame.reg_no < MIN_MOVEM_REGS)
dcc21c4c
PB
1308 offset = 0;
1309 else
a40ed0f3
KH
1310 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1311 m68k_emit_movem (stack_pointer_rtx, offset,
1312 current_frame.fpu_no, FP0_REG,
1313 current_frame.fpu_mask, false, false);
dcc21c4c 1314 }
884b74f0 1315 else
a40ed0f3
KH
1316 m68k_emit_movem (stack_pointer_rtx, 0,
1317 current_frame.fpu_no, FP0_REG,
1318 current_frame.fpu_mask, false, true);
79e68feb
RS
1319 }
1320 else
a40ed0f3
KH
1321 m68k_emit_movem (frame_pointer_rtx,
1322 -(current_frame.foffset + fsize),
1323 current_frame.fpu_no, FP0_REG,
1324 current_frame.fpu_mask, false, false);
79e68feb 1325 }
a40ed0f3 1326
79e68feb 1327 if (frame_pointer_needed)
a40ed0f3 1328 emit_insn (gen_unlink (frame_pointer_rtx));
860c4900 1329 else if (fsize_with_regs)
a40ed0f3
KH
1330 emit_insn (gen_addsi3 (stack_pointer_rtx,
1331 stack_pointer_rtx,
1332 GEN_INT (fsize_with_regs)));
1333
e3b5732b 1334 if (crtl->calls_eh_return)
a40ed0f3
KH
1335 emit_insn (gen_addsi3 (stack_pointer_rtx,
1336 stack_pointer_rtx,
1337 EH_RETURN_STACKADJ_RTX));
1338
f7e70894 1339 if (!sibcall_p)
49570723 1340 emit_jump_insn (gen_rtx_RETURN (VOIDmode));
79e68feb
RS
1341}
1342\f
8a4a2253 1343/* Return true if X is a valid comparison operator for the dbcc
64a184e9
RS
1344 instruction.
1345
1346 Note it rejects floating point comparison operators.
1347 (In the future we could use Fdbcc).
1348
1349 It also rejects some comparisons when CC_NO_OVERFLOW is set. */
1350
1351int
41b6a5e2 1352valid_dbcc_comparison_p_2 (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED)
64a184e9 1353{
64a184e9
RS
1354 switch (GET_CODE (x))
1355 {
64a184e9
RS
1356 case EQ: case NE: case GTU: case LTU:
1357 case GEU: case LEU:
1358 return 1;
1359
1360 /* Reject some when CC_NO_OVERFLOW is set. This may be over
1361 conservative */
1362 case GT: case LT: case GE: case LE:
1363 return ! (cc_prev_status.flags & CC_NO_OVERFLOW);
1364 default:
1365 return 0;
1366 }
1367}
1368
a0ab749a 1369/* Return nonzero if flags are currently in the 68881 flag register. */
6a0f85e3 1370int
8a4a2253 1371flags_in_68881 (void)
6a0f85e3
TG
1372{
1373 /* We could add support for these in the future */
1374 return cc_status.flags & CC_IN_68881;
1375}
1376
fa157b28 1377/* Implement TARGET_FUNCTION_OK_FOR_SIBCALL_P. */
f7e70894
RS
1378
1379static bool
fa157b28 1380m68k_ok_for_sibcall_p (tree decl, tree exp)
f7e70894 1381{
fa157b28
NS
1382 enum m68k_function_kind kind;
1383
1384 /* We cannot use sibcalls for nested functions because we use the
1385 static chain register for indirect calls. */
1386 if (CALL_EXPR_STATIC_CHAIN (exp))
1387 return false;
1388
1389 kind = m68k_get_function_kind (current_function_decl);
1390 if (kind == m68k_fk_normal_function)
1391 /* We can always sibcall from a normal function, because it's
1392 undefined if it is calling an interrupt function. */
1393 return true;
1394
1395 /* Otherwise we can only sibcall if the function kind is known to be
1396 the same. */
1397 if (decl && m68k_get_function_kind (decl) == kind)
1398 return true;
1399
1400 return false;
f7e70894
RS
1401}
1402
29ca003a
RS
1403/* Convert X to a legitimate function call memory reference and return the
1404 result. */
a2ef3db7 1405
29ca003a
RS
1406rtx
1407m68k_legitimize_call_address (rtx x)
1408{
1409 gcc_assert (MEM_P (x));
1410 if (call_operand (XEXP (x, 0), VOIDmode))
1411 return x;
1412 return replace_equiv_address (x, force_reg (Pmode, XEXP (x, 0)));
a2ef3db7
BI
1413}
1414
f7e70894
RS
1415/* Likewise for sibling calls. */
1416
1417rtx
1418m68k_legitimize_sibcall_address (rtx x)
1419{
1420 gcc_assert (MEM_P (x));
1421 if (sibcall_operand (XEXP (x, 0), VOIDmode))
1422 return x;
1423
1424 emit_move_insn (gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM), XEXP (x, 0));
1425 return replace_equiv_address (x, gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM));
1426}
1427
64a184e9
RS
1428/* Output a dbCC; jCC sequence. Note we do not handle the
1429 floating point version of this sequence (Fdbcc). We also
1430 do not handle alternative conditions when CC_NO_OVERFLOW is
6a0f85e3
TG
1431 set. It is assumed that valid_dbcc_comparison_p and flags_in_68881 will
1432 kick those out before we get here. */
64a184e9 1433
1d8eaa6b 1434void
8a4a2253 1435output_dbcc_and_branch (rtx *operands)
64a184e9 1436{
64a184e9
RS
1437 switch (GET_CODE (operands[3]))
1438 {
1439 case EQ:
da398bb5 1440 output_asm_insn ("dbeq %0,%l1\n\tjeq %l2", operands);
e6d98cb0 1441 break;
64a184e9
RS
1442
1443 case NE:
da398bb5 1444 output_asm_insn ("dbne %0,%l1\n\tjne %l2", operands);
e6d98cb0 1445 break;
64a184e9
RS
1446
1447 case GT:
da398bb5 1448 output_asm_insn ("dbgt %0,%l1\n\tjgt %l2", operands);
e6d98cb0 1449 break;
64a184e9
RS
1450
1451 case GTU:
da398bb5 1452 output_asm_insn ("dbhi %0,%l1\n\tjhi %l2", operands);
e6d98cb0 1453 break;
64a184e9
RS
1454
1455 case LT:
da398bb5 1456 output_asm_insn ("dblt %0,%l1\n\tjlt %l2", operands);
e6d98cb0 1457 break;
64a184e9
RS
1458
1459 case LTU:
da398bb5 1460 output_asm_insn ("dbcs %0,%l1\n\tjcs %l2", operands);
e6d98cb0 1461 break;
64a184e9
RS
1462
1463 case GE:
da398bb5 1464 output_asm_insn ("dbge %0,%l1\n\tjge %l2", operands);
e6d98cb0 1465 break;
64a184e9
RS
1466
1467 case GEU:
da398bb5 1468 output_asm_insn ("dbcc %0,%l1\n\tjcc %l2", operands);
e6d98cb0 1469 break;
64a184e9
RS
1470
1471 case LE:
da398bb5 1472 output_asm_insn ("dble %0,%l1\n\tjle %l2", operands);
e6d98cb0 1473 break;
64a184e9
RS
1474
1475 case LEU:
da398bb5 1476 output_asm_insn ("dbls %0,%l1\n\tjls %l2", operands);
e6d98cb0 1477 break;
64a184e9
RS
1478
1479 default:
4761e388 1480 gcc_unreachable ();
64a184e9
RS
1481 }
1482
1483 /* If the decrement is to be done in SImode, then we have
7a1929e1 1484 to compensate for the fact that dbcc decrements in HImode. */
64a184e9
RS
1485 switch (GET_MODE (operands[0]))
1486 {
1487 case SImode:
da398bb5 1488 output_asm_insn ("clr%.w %0\n\tsubq%.l #1,%0\n\tjpl %l1", operands);
64a184e9
RS
1489 break;
1490
1491 case HImode:
1492 break;
1493
1494 default:
4761e388 1495 gcc_unreachable ();
64a184e9
RS
1496 }
1497}
1498
5505f548 1499const char *
4761e388 1500output_scc_di (rtx op, rtx operand1, rtx operand2, rtx dest)
c59c3b1c
RK
1501{
1502 rtx loperands[7];
d9832fd2 1503 enum rtx_code op_code = GET_CODE (op);
c59c3b1c 1504
f710504c 1505 /* This does not produce a useful cc. */
906a2d3c
RK
1506 CC_STATUS_INIT;
1507
d9832fd2
RK
1508 /* The m68k cmp.l instruction requires operand1 to be a reg as used
1509 below. Swap the operands and change the op if these requirements
1510 are not fulfilled. */
1511 if (GET_CODE (operand2) == REG && GET_CODE (operand1) != REG)
1512 {
1513 rtx tmp = operand1;
1514
1515 operand1 = operand2;
1516 operand2 = tmp;
1517 op_code = swap_condition (op_code);
1518 }
c59c3b1c
RK
1519 loperands[0] = operand1;
1520 if (GET_CODE (operand1) == REG)
1d8eaa6b 1521 loperands[1] = gen_rtx_REG (SImode, REGNO (operand1) + 1);
c59c3b1c 1522 else
b72f00af 1523 loperands[1] = adjust_address (operand1, SImode, 4);
c59c3b1c
RK
1524 if (operand2 != const0_rtx)
1525 {
1526 loperands[2] = operand2;
1527 if (GET_CODE (operand2) == REG)
1d8eaa6b 1528 loperands[3] = gen_rtx_REG (SImode, REGNO (operand2) + 1);
c59c3b1c 1529 else
b72f00af 1530 loperands[3] = adjust_address (operand2, SImode, 4);
c59c3b1c 1531 }
428511bb 1532 loperands[4] = gen_label_rtx ();
c59c3b1c 1533 if (operand2 != const0_rtx)
da398bb5 1534 output_asm_insn ("cmp%.l %2,%0\n\tjne %l4\n\tcmp%.l %3,%1", loperands);
392582fa 1535 else
4a8c52e0 1536 {
9425fb04 1537 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[0]))
4a8c52e0
AS
1538 output_asm_insn ("tst%.l %0", loperands);
1539 else
a0a7fbc9 1540 output_asm_insn ("cmp%.w #0,%0", loperands);
4a8c52e0 1541
da398bb5 1542 output_asm_insn ("jne %l4", loperands);
4a8c52e0 1543
9425fb04 1544 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[1]))
4a8c52e0
AS
1545 output_asm_insn ("tst%.l %1", loperands);
1546 else
3b4b85c9 1547 output_asm_insn ("cmp%.w #0,%1", loperands);
4a8c52e0
AS
1548 }
1549
c59c3b1c 1550 loperands[5] = dest;
3b4b85c9 1551
d9832fd2 1552 switch (op_code)
c59c3b1c
RK
1553 {
1554 case EQ:
4977bab6 1555 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1556 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1557 output_asm_insn ("seq %5", loperands);
1558 break;
1559
1560 case NE:
4977bab6 1561 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1562 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1563 output_asm_insn ("sne %5", loperands);
1564 break;
1565
1566 case GT:
428511bb 1567 loperands[6] = gen_label_rtx ();
da398bb5 1568 output_asm_insn ("shi %5\n\tjra %l6", loperands);
4977bab6 1569 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1570 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c 1571 output_asm_insn ("sgt %5", loperands);
4977bab6 1572 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1573 CODE_LABEL_NUMBER (loperands[6]));
c59c3b1c
RK
1574 break;
1575
1576 case GTU:
4977bab6 1577 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1578 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1579 output_asm_insn ("shi %5", loperands);
1580 break;
1581
1582 case LT:
428511bb 1583 loperands[6] = gen_label_rtx ();
da398bb5 1584 output_asm_insn ("scs %5\n\tjra %l6", loperands);
4977bab6 1585 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1586 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c 1587 output_asm_insn ("slt %5", loperands);
4977bab6 1588 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1589 CODE_LABEL_NUMBER (loperands[6]));
c59c3b1c
RK
1590 break;
1591
1592 case LTU:
4977bab6 1593 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1594 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1595 output_asm_insn ("scs %5", loperands);
1596 break;
1597
1598 case GE:
428511bb 1599 loperands[6] = gen_label_rtx ();
da398bb5 1600 output_asm_insn ("scc %5\n\tjra %l6", loperands);
4977bab6 1601 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1602 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c 1603 output_asm_insn ("sge %5", loperands);
4977bab6 1604 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1605 CODE_LABEL_NUMBER (loperands[6]));
c59c3b1c
RK
1606 break;
1607
1608 case GEU:
4977bab6 1609 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1610 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1611 output_asm_insn ("scc %5", loperands);
1612 break;
1613
1614 case LE:
428511bb 1615 loperands[6] = gen_label_rtx ();
da398bb5 1616 output_asm_insn ("sls %5\n\tjra %l6", loperands);
4977bab6 1617 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1618 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c 1619 output_asm_insn ("sle %5", loperands);
4977bab6 1620 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1621 CODE_LABEL_NUMBER (loperands[6]));
c59c3b1c
RK
1622 break;
1623
1624 case LEU:
4977bab6 1625 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1626 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1627 output_asm_insn ("sls %5", loperands);
1628 break;
1629
1630 default:
4761e388 1631 gcc_unreachable ();
c59c3b1c
RK
1632 }
1633 return "";
1634}
1635
5505f548 1636const char *
8a4a2253 1637output_btst (rtx *operands, rtx countop, rtx dataop, rtx insn, int signpos)
79e68feb
RS
1638{
1639 operands[0] = countop;
1640 operands[1] = dataop;
1641
1642 if (GET_CODE (countop) == CONST_INT)
1643 {
1644 register int count = INTVAL (countop);
1645 /* If COUNT is bigger than size of storage unit in use,
1646 advance to the containing unit of same size. */
1647 if (count > signpos)
1648 {
1649 int offset = (count & ~signpos) / 8;
1650 count = count & signpos;
b72f00af 1651 operands[1] = dataop = adjust_address (dataop, QImode, offset);
79e68feb
RS
1652 }
1653 if (count == signpos)
1654 cc_status.flags = CC_NOT_POSITIVE | CC_Z_IN_NOT_N;
1655 else
1656 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N;
1657
1658 /* These three statements used to use next_insns_test_no...
1659 but it appears that this should do the same job. */
1660 if (count == 31
1661 && next_insn_tests_no_inequality (insn))
1662 return "tst%.l %1";
1663 if (count == 15
1664 && next_insn_tests_no_inequality (insn))
1665 return "tst%.w %1";
1666 if (count == 7
1667 && next_insn_tests_no_inequality (insn))
1668 return "tst%.b %1";
5083912d
PDM
1669 /* Try to use `movew to ccr' followed by the appropriate branch insn.
1670 On some m68k variants unfortunately that's slower than btst.
1671 On 68000 and higher, that should also work for all HImode operands. */
1672 if (TUNE_CPU32 || TARGET_COLDFIRE || optimize_size)
1673 {
1674 if (count == 3 && DATA_REG_P (operands[1])
1675 && next_insn_tests_no_inequality (insn))
1676 {
1677 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N | CC_NO_OVERFLOW;
1678 return "move%.w %1,%%ccr";
1679 }
1680 if (count == 2 && DATA_REG_P (operands[1])
1681 && next_insn_tests_no_inequality (insn))
1682 {
1683 cc_status.flags = CC_NOT_NEGATIVE | CC_INVERTED | CC_NO_OVERFLOW;
1684 return "move%.w %1,%%ccr";
1685 }
1686 /* count == 1 followed by bvc/bvs and
1687 count == 0 followed by bcc/bcs are also possible, but need
1688 m68k-specific CC_Z_IN_NOT_V and CC_Z_IN_NOT_C flags. */
1689 }
79e68feb
RS
1690
1691 cc_status.flags = CC_NOT_NEGATIVE;
1692 }
1693 return "btst %0,%1";
1694}
79e68feb 1695\f
fc2241eb
RS
1696/* Return true if X is a legitimate base register. STRICT_P says
1697 whether we need strict checking. */
1698
1699bool
1700m68k_legitimate_base_reg_p (rtx x, bool strict_p)
1701{
1702 /* Allow SUBREG everywhere we allow REG. This results in better code. */
1703 if (!strict_p && GET_CODE (x) == SUBREG)
1704 x = SUBREG_REG (x);
1705
1706 return (REG_P (x)
1707 && (strict_p
1708 ? REGNO_OK_FOR_BASE_P (REGNO (x))
bf32249e 1709 : REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x))));
fc2241eb
RS
1710}
1711
1712/* Return true if X is a legitimate index register. STRICT_P says
1713 whether we need strict checking. */
1714
1715bool
1716m68k_legitimate_index_reg_p (rtx x, bool strict_p)
1717{
1718 if (!strict_p && GET_CODE (x) == SUBREG)
1719 x = SUBREG_REG (x);
1720
1721 return (REG_P (x)
1722 && (strict_p
1723 ? REGNO_OK_FOR_INDEX_P (REGNO (x))
bf32249e 1724 : REGNO_OK_FOR_INDEX_NONSTRICT_P (REGNO (x))));
fc2241eb
RS
1725}
1726
1727/* Return true if X is a legitimate index expression for a (d8,An,Xn) or
1728 (bd,An,Xn) addressing mode. Fill in the INDEX and SCALE fields of
1729 ADDRESS if so. STRICT_P says whether we need strict checking. */
1730
1731static bool
1732m68k_decompose_index (rtx x, bool strict_p, struct m68k_address *address)
1733{
1734 int scale;
1735
1736 /* Check for a scale factor. */
1737 scale = 1;
1738 if ((TARGET_68020 || TARGET_COLDFIRE)
1739 && GET_CODE (x) == MULT
1740 && GET_CODE (XEXP (x, 1)) == CONST_INT
1741 && (INTVAL (XEXP (x, 1)) == 2
1742 || INTVAL (XEXP (x, 1)) == 4
1743 || (INTVAL (XEXP (x, 1)) == 8
1744 && (TARGET_COLDFIRE_FPU || !TARGET_COLDFIRE))))
1745 {
1746 scale = INTVAL (XEXP (x, 1));
1747 x = XEXP (x, 0);
1748 }
1749
1750 /* Check for a word extension. */
1751 if (!TARGET_COLDFIRE
1752 && GET_CODE (x) == SIGN_EXTEND
1753 && GET_MODE (XEXP (x, 0)) == HImode)
1754 x = XEXP (x, 0);
1755
1756 if (m68k_legitimate_index_reg_p (x, strict_p))
1757 {
1758 address->scale = scale;
1759 address->index = x;
1760 return true;
1761 }
1762
1763 return false;
1764}
1765
7ffb5e78
RS
1766/* Return true if X is an illegitimate symbolic constant. */
1767
1768bool
1769m68k_illegitimate_symbolic_constant_p (rtx x)
1770{
1771 rtx base, offset;
1772
1773 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P)
1774 {
1775 split_const (x, &base, &offset);
1776 if (GET_CODE (base) == SYMBOL_REF
1777 && !offset_within_block_p (base, INTVAL (offset)))
1778 return true;
1779 }
1780 return false;
1781}
1782
fc2241eb
RS
1783/* Return true if X is a legitimate constant address that can reach
1784 bytes in the range [X, X + REACH). STRICT_P says whether we need
1785 strict checking. */
1786
1787static bool
1788m68k_legitimate_constant_address_p (rtx x, unsigned int reach, bool strict_p)
1789{
1790 rtx base, offset;
1791
1792 if (!CONSTANT_ADDRESS_P (x))
1793 return false;
1794
1795 if (flag_pic
1796 && !(strict_p && TARGET_PCREL)
1797 && symbolic_operand (x, VOIDmode))
1798 return false;
1799
1800 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P && reach > 1)
1801 {
1802 split_const (x, &base, &offset);
1803 if (GET_CODE (base) == SYMBOL_REF
1804 && !offset_within_block_p (base, INTVAL (offset) + reach - 1))
1805 return false;
1806 }
1807
1808 return true;
1809}
1810
1811/* Return true if X is a LABEL_REF for a jump table. Assume that unplaced
1812 labels will become jump tables. */
1813
1814static bool
1815m68k_jump_table_ref_p (rtx x)
1816{
1817 if (GET_CODE (x) != LABEL_REF)
1818 return false;
1819
1820 x = XEXP (x, 0);
1821 if (!NEXT_INSN (x) && !PREV_INSN (x))
1822 return true;
1823
1824 x = next_nonnote_insn (x);
1825 return x && JUMP_TABLE_DATA_P (x);
1826}
1827
1828/* Return true if X is a legitimate address for values of mode MODE.
1829 STRICT_P says whether strict checking is needed. If the address
1830 is valid, describe its components in *ADDRESS. */
1831
1832static bool
1833m68k_decompose_address (enum machine_mode mode, rtx x,
1834 bool strict_p, struct m68k_address *address)
1835{
1836 unsigned int reach;
1837
1838 memset (address, 0, sizeof (*address));
1839
1840 if (mode == BLKmode)
1841 reach = 1;
1842 else
1843 reach = GET_MODE_SIZE (mode);
1844
1845 /* Check for (An) (mode 2). */
1846 if (m68k_legitimate_base_reg_p (x, strict_p))
1847 {
1848 address->base = x;
1849 return true;
1850 }
1851
1852 /* Check for -(An) and (An)+ (modes 3 and 4). */
1853 if ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_INC)
1854 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
1855 {
1856 address->code = GET_CODE (x);
1857 address->base = XEXP (x, 0);
1858 return true;
1859 }
1860
1861 /* Check for (d16,An) (mode 5). */
1862 if (GET_CODE (x) == PLUS
1863 && GET_CODE (XEXP (x, 1)) == CONST_INT
1864 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x8000, 0x8000 - reach)
1865 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
1866 {
1867 address->base = XEXP (x, 0);
1868 address->offset = XEXP (x, 1);
1869 return true;
1870 }
1871
1872 /* Check for GOT loads. These are (bd,An,Xn) addresses if
1873 TARGET_68020 && flag_pic == 2, otherwise they are (d16,An)
1874 addresses. */
1875 if (flag_pic
1876 && GET_CODE (x) == PLUS
1877 && XEXP (x, 0) == pic_offset_table_rtx
1878 && (GET_CODE (XEXP (x, 1)) == SYMBOL_REF
1879 || GET_CODE (XEXP (x, 1)) == LABEL_REF))
1880 {
1881 address->base = XEXP (x, 0);
1882 address->offset = XEXP (x, 1);
1883 return true;
1884 }
1885
1886 /* The ColdFire FPU only accepts addressing modes 2-5. */
1887 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
1888 return false;
1889
1890 /* Check for (xxx).w and (xxx).l. Also, in the TARGET_PCREL case,
1891 check for (d16,PC) or (bd,PC,Xn) with a suppressed index register.
1892 All these modes are variations of mode 7. */
1893 if (m68k_legitimate_constant_address_p (x, reach, strict_p))
1894 {
1895 address->offset = x;
1896 return true;
1897 }
1898
1899 /* Check for (d8,PC,Xn), a mode 7 form. This case is needed for
1900 tablejumps.
1901
1902 ??? do_tablejump creates these addresses before placing the target
1903 label, so we have to assume that unplaced labels are jump table
1904 references. It seems unlikely that we would ever generate indexed
1905 accesses to unplaced labels in other cases. */
1906 if (GET_CODE (x) == PLUS
1907 && m68k_jump_table_ref_p (XEXP (x, 1))
1908 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
1909 {
1910 address->offset = XEXP (x, 1);
1911 return true;
1912 }
1913
1914 /* Everything hereafter deals with (d8,An,Xn.SIZE*SCALE) or
1915 (bd,An,Xn.SIZE*SCALE) addresses. */
1916
1917 if (TARGET_68020)
1918 {
1919 /* Check for a nonzero base displacement. */
1920 if (GET_CODE (x) == PLUS
1921 && m68k_legitimate_constant_address_p (XEXP (x, 1), reach, strict_p))
1922 {
1923 address->offset = XEXP (x, 1);
1924 x = XEXP (x, 0);
1925 }
1926
1927 /* Check for a suppressed index register. */
1928 if (m68k_legitimate_base_reg_p (x, strict_p))
1929 {
1930 address->base = x;
1931 return true;
1932 }
1933
1934 /* Check for a suppressed base register. Do not allow this case
1935 for non-symbolic offsets as it effectively gives gcc freedom
1936 to treat data registers as base registers, which can generate
1937 worse code. */
1938 if (address->offset
1939 && symbolic_operand (address->offset, VOIDmode)
1940 && m68k_decompose_index (x, strict_p, address))
1941 return true;
1942 }
1943 else
1944 {
1945 /* Check for a nonzero base displacement. */
1946 if (GET_CODE (x) == PLUS
1947 && GET_CODE (XEXP (x, 1)) == CONST_INT
1948 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x80, 0x80 - reach))
1949 {
1950 address->offset = XEXP (x, 1);
1951 x = XEXP (x, 0);
1952 }
1953 }
1954
1955 /* We now expect the sum of a base and an index. */
1956 if (GET_CODE (x) == PLUS)
1957 {
1958 if (m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p)
1959 && m68k_decompose_index (XEXP (x, 1), strict_p, address))
1960 {
1961 address->base = XEXP (x, 0);
1962 return true;
1963 }
1964
1965 if (m68k_legitimate_base_reg_p (XEXP (x, 1), strict_p)
1966 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
1967 {
1968 address->base = XEXP (x, 1);
1969 return true;
1970 }
1971 }
1972 return false;
1973}
1974
1975/* Return true if X is a legitimate address for values of mode MODE.
1976 STRICT_P says whether strict checking is needed. */
1977
1978bool
1979m68k_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p)
1980{
1981 struct m68k_address address;
1982
1983 return m68k_decompose_address (mode, x, strict_p, &address);
1984}
1985
1986/* Return true if X is a memory, describing its address in ADDRESS if so.
1987 Apply strict checking if called during or after reload. */
1988
1989static bool
1990m68k_legitimate_mem_p (rtx x, struct m68k_address *address)
1991{
1992 return (MEM_P (x)
1993 && m68k_decompose_address (GET_MODE (x), XEXP (x, 0),
1994 reload_in_progress || reload_completed,
1995 address));
1996}
1997
1998/* Return true if X matches the 'Q' constraint. It must be a memory
1999 with a base address and no constant offset or index. */
2000
2001bool
2002m68k_matches_q_p (rtx x)
2003{
2004 struct m68k_address address;
2005
2006 return (m68k_legitimate_mem_p (x, &address)
2007 && address.code == UNKNOWN
2008 && address.base
2009 && !address.offset
2010 && !address.index);
2011}
2012
2013/* Return true if X matches the 'U' constraint. It must be a base address
2014 with a constant offset and no index. */
2015
2016bool
2017m68k_matches_u_p (rtx x)
2018{
2019 struct m68k_address address;
2020
2021 return (m68k_legitimate_mem_p (x, &address)
2022 && address.code == UNKNOWN
2023 && address.base
2024 && address.offset
2025 && !address.index);
2026}
2027
79e68feb
RS
2028/* Legitimize PIC addresses. If the address is already
2029 position-independent, we return ORIG. Newly generated
2030 position-independent addresses go to REG. If we need more
2031 than one register, we lose.
2032
2033 An address is legitimized by making an indirect reference
2034 through the Global Offset Table with the name of the symbol
2035 used as an offset.
2036
2037 The assembler and linker are responsible for placing the
2038 address of the symbol in the GOT. The function prologue
2039 is responsible for initializing a5 to the starting address
2040 of the GOT.
2041
2042 The assembler is also responsible for translating a symbol name
2043 into a constant displacement from the start of the GOT.
2044
2045 A quick example may make things a little clearer:
2046
2047 When not generating PIC code to store the value 12345 into _foo
2048 we would generate the following code:
2049
2050 movel #12345, _foo
2051
2052 When generating PIC two transformations are made. First, the compiler
2053 loads the address of foo into a register. So the first transformation makes:
2054
2055 lea _foo, a0
2056 movel #12345, a0@
2057
2058 The code in movsi will intercept the lea instruction and call this
2059 routine which will transform the instructions into:
2060
2061 movel a5@(_foo:w), a0
2062 movel #12345, a0@
2063
2064
2065 That (in a nutshell) is how *all* symbol and label references are
2066 handled. */
2067
2068rtx
8a4a2253
BI
2069legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
2070 rtx reg)
79e68feb
RS
2071{
2072 rtx pic_ref = orig;
2073
2074 /* First handle a simple SYMBOL_REF or LABEL_REF */
2075 if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
2076 {
4761e388 2077 gcc_assert (reg);
79e68feb 2078
884316ff
JM
2079 if (TARGET_COLDFIRE && TARGET_XGOT)
2080 /* When compiling with -mxgot switch the code for the above
2081 example will look like this:
2082
2083 movel a5, a0
2084 addl _foo@GOT, a0
2085 movel a0@, a0
2086 movel #12345, a0@ */
2087 {
2088 rtx pic_offset;
2089
2090 /* Wrap ORIG in UNSPEC_GOTOFF to tip m68k_output_addr_const_extra
2091 to put @GOT after reference. */
2092 pic_offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, orig),
2093 UNSPEC_GOTOFF);
2094 pic_offset = gen_rtx_CONST (Pmode, pic_offset);
2095 emit_move_insn (reg, pic_offset);
2096 emit_insn (gen_addsi3 (reg, reg, pic_offset_table_rtx));
2097 pic_ref = gen_rtx_MEM (Pmode, reg);
2098 }
2099 else
2100 pic_ref = gen_rtx_MEM (Pmode,
2101 gen_rtx_PLUS (Pmode,
2102 pic_offset_table_rtx, orig));
e3b5732b 2103 crtl->uses_pic_offset_table = 1;
389fdba0 2104 MEM_READONLY_P (pic_ref) = 1;
79e68feb
RS
2105 emit_move_insn (reg, pic_ref);
2106 return reg;
2107 }
2108 else if (GET_CODE (orig) == CONST)
2109 {
1d8eaa6b 2110 rtx base;
79e68feb 2111
b2e08ed4 2112 /* Make sure this has not already been legitimized. */
79e68feb
RS
2113 if (GET_CODE (XEXP (orig, 0)) == PLUS
2114 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
2115 return orig;
2116
4761e388 2117 gcc_assert (reg);
79e68feb
RS
2118
2119 /* legitimize both operands of the PLUS */
4761e388
NS
2120 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
2121
2122 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2123 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2124 base == reg ? 0 : reg);
79e68feb
RS
2125
2126 if (GET_CODE (orig) == CONST_INT)
ed8908e7 2127 return plus_constant (base, INTVAL (orig));
1d8eaa6b 2128 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
79e68feb
RS
2129 /* Likewise, should we set special REG_NOTEs here? */
2130 }
2131 return pic_ref;
2132}
2133
2134\f
0ce6f9fb 2135
a0a7fbc9 2136#define USE_MOVQ(i) ((unsigned) ((i) + 128) <= 255)
0ce6f9fb 2137
bda2a571
RS
2138/* Return the type of move that should be used for integer I. */
2139
c47b0cb4
MK
2140M68K_CONST_METHOD
2141m68k_const_method (HOST_WIDE_INT i)
0ce6f9fb 2142{
0ce6f9fb
RK
2143 unsigned u;
2144
6910dd70 2145 if (USE_MOVQ (i))
0ce6f9fb 2146 return MOVQ;
24092242 2147
c16eadc7 2148 /* The ColdFire doesn't have byte or word operations. */
97c55091 2149 /* FIXME: This may not be useful for the m68060 either. */
85dbf7e2 2150 if (!TARGET_COLDFIRE)
24092242
RK
2151 {
2152 /* if -256 < N < 256 but N is not in range for a moveq
7a1929e1 2153 N^ff will be, so use moveq #N^ff, dreg; not.b dreg. */
24092242
RK
2154 if (USE_MOVQ (i ^ 0xff))
2155 return NOTB;
2156 /* Likewise, try with not.w */
2157 if (USE_MOVQ (i ^ 0xffff))
2158 return NOTW;
2159 /* This is the only value where neg.w is useful */
2160 if (i == -65408)
2161 return NEGW;
24092242 2162 }
28bad6d1 2163
5e04daf3
PB
2164 /* Try also with swap. */
2165 u = i;
2166 if (USE_MOVQ ((u >> 16) | (u << 16)))
2167 return SWAP;
2168
986e74d5 2169 if (TARGET_ISAB)
28bad6d1 2170 {
72edf146 2171 /* Try using MVZ/MVS with an immediate value to load constants. */
28bad6d1
PB
2172 if (i >= 0 && i <= 65535)
2173 return MVZ;
2174 if (i >= -32768 && i <= 32767)
2175 return MVS;
2176 }
2177
0ce6f9fb
RK
2178 /* Otherwise, use move.l */
2179 return MOVL;
2180}
2181
bda2a571
RS
2182/* Return the cost of moving constant I into a data register. */
2183
3c50106f 2184static int
bda2a571 2185const_int_cost (HOST_WIDE_INT i)
0ce6f9fb 2186{
c47b0cb4 2187 switch (m68k_const_method (i))
0ce6f9fb 2188 {
a0a7fbc9
AS
2189 case MOVQ:
2190 /* Constants between -128 and 127 are cheap due to moveq. */
2191 return 0;
2192 case MVZ:
2193 case MVS:
2194 case NOTB:
2195 case NOTW:
2196 case NEGW:
2197 case SWAP:
2198 /* Constants easily generated by moveq + not.b/not.w/neg.w/swap. */
2199 return 1;
2200 case MOVL:
2201 return 2;
2202 default:
2203 gcc_unreachable ();
0ce6f9fb
RK
2204 }
2205}
2206
3c50106f 2207static bool
f40751dd
JH
2208m68k_rtx_costs (rtx x, int code, int outer_code, int *total,
2209 bool speed ATTRIBUTE_UNUSED)
3c50106f
RH
2210{
2211 switch (code)
2212 {
2213 case CONST_INT:
2214 /* Constant zero is super cheap due to clr instruction. */
2215 if (x == const0_rtx)
2216 *total = 0;
2217 else
bda2a571 2218 *total = const_int_cost (INTVAL (x));
3c50106f
RH
2219 return true;
2220
2221 case CONST:
2222 case LABEL_REF:
2223 case SYMBOL_REF:
2224 *total = 3;
2225 return true;
2226
2227 case CONST_DOUBLE:
2228 /* Make 0.0 cheaper than other floating constants to
2229 encourage creating tstsf and tstdf insns. */
2230 if (outer_code == COMPARE
2231 && (x == CONST0_RTX (SFmode) || x == CONST0_RTX (DFmode)))
2232 *total = 4;
2233 else
2234 *total = 5;
2235 return true;
2236
2237 /* These are vaguely right for a 68020. */
2238 /* The costs for long multiply have been adjusted to work properly
2239 in synth_mult on the 68020, relative to an average of the time
2240 for add and the time for shift, taking away a little more because
2241 sometimes move insns are needed. */
a0a7fbc9
AS
2242 /* div?.w is relatively cheaper on 68000 counted in COSTS_N_INSNS
2243 terms. */
fe95f2f7
JB
2244#define MULL_COST \
2245 (TUNE_68060 ? 2 \
2246 : TUNE_68040 ? 5 \
03b3e271
KH
2247 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2248 : (TUNE_CFV2 && TUNE_MAC) ? 4 \
2249 : TUNE_CFV2 ? 8 \
fe95f2f7
JB
2250 : TARGET_COLDFIRE ? 3 : 13)
2251
2252#define MULW_COST \
2253 (TUNE_68060 ? 2 \
2254 : TUNE_68040 ? 3 \
03b3e271
KH
2255 : TUNE_68000_10 ? 5 \
2256 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2257 : (TUNE_CFV2 && TUNE_MAC) ? 2 \
2258 : TUNE_CFV2 ? 8 \
fe95f2f7
JB
2259 : TARGET_COLDFIRE ? 2 : 8)
2260
2261#define DIVW_COST \
2262 (TARGET_CF_HWDIV ? 11 \
2263 : TUNE_68000_10 || TARGET_COLDFIRE ? 12 : 27)
3c50106f
RH
2264
2265 case PLUS:
2266 /* An lea costs about three times as much as a simple add. */
2267 if (GET_MODE (x) == SImode
2268 && GET_CODE (XEXP (x, 1)) == REG
2269 && GET_CODE (XEXP (x, 0)) == MULT
2270 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
2271 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2272 && (INTVAL (XEXP (XEXP (x, 0), 1)) == 2
2273 || INTVAL (XEXP (XEXP (x, 0), 1)) == 4
2274 || INTVAL (XEXP (XEXP (x, 0), 1)) == 8))
eb849993
BI
2275 {
2276 /* lea an@(dx:l:i),am */
2277 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 2 : 3);
2278 return true;
2279 }
3c50106f
RH
2280 return false;
2281
2282 case ASHIFT:
2283 case ASHIFTRT:
2284 case LSHIFTRT:
fe95f2f7 2285 if (TUNE_68060)
3c50106f
RH
2286 {
2287 *total = COSTS_N_INSNS(1);
2288 return true;
2289 }
fe95f2f7 2290 if (TUNE_68000_10)
3c50106f
RH
2291 {
2292 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
2293 {
2294 if (INTVAL (XEXP (x, 1)) < 16)
2295 *total = COSTS_N_INSNS (2) + INTVAL (XEXP (x, 1)) / 2;
2296 else
2297 /* We're using clrw + swap for these cases. */
2298 *total = COSTS_N_INSNS (4) + (INTVAL (XEXP (x, 1)) - 16) / 2;
2299 }
2300 else
a0a7fbc9 2301 *total = COSTS_N_INSNS (10); /* Worst case. */
3c50106f
RH
2302 return true;
2303 }
2304 /* A shift by a big integer takes an extra instruction. */
2305 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2306 && (INTVAL (XEXP (x, 1)) == 16))
2307 {
2308 *total = COSTS_N_INSNS (2); /* clrw;swap */
2309 return true;
2310 }
2311 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2312 && !(INTVAL (XEXP (x, 1)) > 0
2313 && INTVAL (XEXP (x, 1)) <= 8))
2314 {
eb849993 2315 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 1 : 3); /* lsr #i,dn */
3c50106f
RH
2316 return true;
2317 }
2318 return false;
2319
2320 case MULT:
2321 if ((GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
2322 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
2323 && GET_MODE (x) == SImode)
2324 *total = COSTS_N_INSNS (MULW_COST);
2325 else if (GET_MODE (x) == QImode || GET_MODE (x) == HImode)
2326 *total = COSTS_N_INSNS (MULW_COST);
2327 else
2328 *total = COSTS_N_INSNS (MULL_COST);
2329 return true;
2330
2331 case DIV:
2332 case UDIV:
2333 case MOD:
2334 case UMOD:
2335 if (GET_MODE (x) == QImode || GET_MODE (x) == HImode)
2336 *total = COSTS_N_INSNS (DIVW_COST); /* div.w */
eb849993
BI
2337 else if (TARGET_CF_HWDIV)
2338 *total = COSTS_N_INSNS (18);
3c50106f
RH
2339 else
2340 *total = COSTS_N_INSNS (43); /* div.l */
2341 return true;
2342
2343 default:
2344 return false;
2345 }
2346}
2347
88512ba0 2348/* Return an instruction to move CONST_INT OPERANDS[1] into data register
bda2a571
RS
2349 OPERANDS[0]. */
2350
2351static const char *
8a4a2253 2352output_move_const_into_data_reg (rtx *operands)
0ce6f9fb 2353{
bda2a571 2354 HOST_WIDE_INT i;
0ce6f9fb
RK
2355
2356 i = INTVAL (operands[1]);
c47b0cb4 2357 switch (m68k_const_method (i))
0ce6f9fb 2358 {
28bad6d1 2359 case MVZ:
28bad6d1 2360 return "mvzw %1,%0";
1cbae84f
PB
2361 case MVS:
2362 return "mvsw %1,%0";
a0a7fbc9 2363 case MOVQ:
0ce6f9fb 2364 return "moveq %1,%0";
a0a7fbc9 2365 case NOTB:
66e07510 2366 CC_STATUS_INIT;
1d8eaa6b 2367 operands[1] = GEN_INT (i ^ 0xff);
0ce6f9fb 2368 return "moveq %1,%0\n\tnot%.b %0";
a0a7fbc9 2369 case NOTW:
66e07510 2370 CC_STATUS_INIT;
1d8eaa6b 2371 operands[1] = GEN_INT (i ^ 0xffff);
0ce6f9fb 2372 return "moveq %1,%0\n\tnot%.w %0";
a0a7fbc9 2373 case NEGW:
66e07510 2374 CC_STATUS_INIT;
3b4b85c9 2375 return "moveq #-128,%0\n\tneg%.w %0";
a0a7fbc9 2376 case SWAP:
0ce6f9fb
RK
2377 {
2378 unsigned u = i;
2379
1d8eaa6b 2380 operands[1] = GEN_INT ((u << 16) | (u >> 16));
0ce6f9fb 2381 return "moveq %1,%0\n\tswap %0";
0ce6f9fb 2382 }
a0a7fbc9 2383 case MOVL:
bda2a571 2384 return "move%.l %1,%0";
a0a7fbc9 2385 default:
bda2a571 2386 gcc_unreachable ();
0ce6f9fb
RK
2387 }
2388}
2389
bda2a571 2390/* Return true if I can be handled by ISA B's mov3q instruction. */
5e04daf3 2391
bda2a571
RS
2392bool
2393valid_mov3q_const (HOST_WIDE_INT i)
2394{
2395 return TARGET_ISAB && (i == -1 || IN_RANGE (i, 1, 7));
5e04daf3
PB
2396}
2397
bda2a571
RS
2398/* Return an instruction to move CONST_INT OPERANDS[1] into OPERANDS[0].
2399 I is the value of OPERANDS[1]. */
5e04daf3 2400
bda2a571 2401static const char *
8a4a2253 2402output_move_simode_const (rtx *operands)
02ed0c07 2403{
bda2a571
RS
2404 rtx dest;
2405 HOST_WIDE_INT src;
2406
2407 dest = operands[0];
2408 src = INTVAL (operands[1]);
2409 if (src == 0
2410 && (DATA_REG_P (dest) || MEM_P (dest))
3197c489
RS
2411 /* clr insns on 68000 read before writing. */
2412 && ((TARGET_68010 || TARGET_COLDFIRE)
bda2a571 2413 || !(MEM_P (dest) && MEM_VOLATILE_P (dest))))
02ed0c07 2414 return "clr%.l %0";
bda2a571 2415 else if (GET_MODE (dest) == SImode && valid_mov3q_const (src))
a0a7fbc9 2416 return "mov3q%.l %1,%0";
bda2a571 2417 else if (src == 0 && ADDRESS_REG_P (dest))
38198304 2418 return "sub%.l %0,%0";
bda2a571 2419 else if (DATA_REG_P (dest))
02ed0c07 2420 return output_move_const_into_data_reg (operands);
bda2a571 2421 else if (ADDRESS_REG_P (dest) && IN_RANGE (src, -0x8000, 0x7fff))
5e04daf3 2422 {
bda2a571 2423 if (valid_mov3q_const (src))
5e04daf3
PB
2424 return "mov3q%.l %1,%0";
2425 return "move%.w %1,%0";
2426 }
bda2a571
RS
2427 else if (MEM_P (dest)
2428 && GET_CODE (XEXP (dest, 0)) == PRE_DEC
2429 && REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
2430 && IN_RANGE (src, -0x8000, 0x7fff))
5e04daf3 2431 {
bda2a571 2432 if (valid_mov3q_const (src))
5e04daf3
PB
2433 return "mov3q%.l %1,%-";
2434 return "pea %a1";
2435 }
02ed0c07
RK
2436 return "move%.l %1,%0";
2437}
2438
5505f548 2439const char *
8a4a2253 2440output_move_simode (rtx *operands)
f4e80198
RK
2441{
2442 if (GET_CODE (operands[1]) == CONST_INT)
2443 return output_move_simode_const (operands);
2444 else if ((GET_CODE (operands[1]) == SYMBOL_REF
2445 || GET_CODE (operands[1]) == CONST)
2446 && push_operand (operands[0], SImode))
2447 return "pea %a1";
2448 else if ((GET_CODE (operands[1]) == SYMBOL_REF
2449 || GET_CODE (operands[1]) == CONST)
2450 && ADDRESS_REG_P (operands[0]))
2451 return "lea %a1,%0";
2452 return "move%.l %1,%0";
2453}
2454
5505f548 2455const char *
8a4a2253 2456output_move_himode (rtx *operands)
f4e80198
RK
2457{
2458 if (GET_CODE (operands[1]) == CONST_INT)
2459 {
2460 if (operands[1] == const0_rtx
2461 && (DATA_REG_P (operands[0])
2462 || GET_CODE (operands[0]) == MEM)
3197c489
RS
2463 /* clr insns on 68000 read before writing. */
2464 && ((TARGET_68010 || TARGET_COLDFIRE)
f4e80198
RK
2465 || !(GET_CODE (operands[0]) == MEM
2466 && MEM_VOLATILE_P (operands[0]))))
2467 return "clr%.w %0";
38198304
AS
2468 else if (operands[1] == const0_rtx
2469 && ADDRESS_REG_P (operands[0]))
2470 return "sub%.l %0,%0";
f4e80198
RK
2471 else if (DATA_REG_P (operands[0])
2472 && INTVAL (operands[1]) < 128
2473 && INTVAL (operands[1]) >= -128)
a0a7fbc9 2474 return "moveq %1,%0";
f4e80198
RK
2475 else if (INTVAL (operands[1]) < 0x8000
2476 && INTVAL (operands[1]) >= -0x8000)
2477 return "move%.w %1,%0";
2478 }
2479 else if (CONSTANT_P (operands[1]))
2480 return "move%.l %1,%0";
f4e80198
RK
2481 return "move%.w %1,%0";
2482}
2483
5505f548 2484const char *
8a4a2253 2485output_move_qimode (rtx *operands)
f4e80198 2486{
102701ff 2487 /* 68k family always modifies the stack pointer by at least 2, even for
c16eadc7 2488 byte pushes. The 5200 (ColdFire) does not do this. */
4761e388 2489
a0a7fbc9 2490 /* This case is generated by pushqi1 pattern now. */
4761e388
NS
2491 gcc_assert (!(GET_CODE (operands[0]) == MEM
2492 && GET_CODE (XEXP (operands[0], 0)) == PRE_DEC
2493 && XEXP (XEXP (operands[0], 0), 0) == stack_pointer_rtx
2494 && ! ADDRESS_REG_P (operands[1])
2495 && ! TARGET_COLDFIRE));
f4e80198 2496
3197c489 2497 /* clr and st insns on 68000 read before writing. */
f4e80198 2498 if (!ADDRESS_REG_P (operands[0])
3197c489 2499 && ((TARGET_68010 || TARGET_COLDFIRE)
f4e80198
RK
2500 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
2501 {
2502 if (operands[1] == const0_rtx)
2503 return "clr%.b %0";
9425fb04 2504 if ((!TARGET_COLDFIRE || DATA_REG_P (operands[0]))
f4e80198
RK
2505 && GET_CODE (operands[1]) == CONST_INT
2506 && (INTVAL (operands[1]) & 255) == 255)
2507 {
2508 CC_STATUS_INIT;
2509 return "st %0";
2510 }
2511 }
2512 if (GET_CODE (operands[1]) == CONST_INT
2513 && DATA_REG_P (operands[0])
2514 && INTVAL (operands[1]) < 128
2515 && INTVAL (operands[1]) >= -128)
a0a7fbc9 2516 return "moveq %1,%0";
38198304
AS
2517 if (operands[1] == const0_rtx && ADDRESS_REG_P (operands[0]))
2518 return "sub%.l %0,%0";
f4e80198
RK
2519 if (GET_CODE (operands[1]) != CONST_INT && CONSTANT_P (operands[1]))
2520 return "move%.l %1,%0";
c16eadc7 2521 /* 68k family (including the 5200 ColdFire) does not support byte moves to
37834fc8
JL
2522 from address registers. */
2523 if (ADDRESS_REG_P (operands[0]) || ADDRESS_REG_P (operands[1]))
f4e80198
RK
2524 return "move%.w %1,%0";
2525 return "move%.b %1,%0";
2526}
2527
5505f548 2528const char *
8a4a2253 2529output_move_stricthi (rtx *operands)
9b55bf04
RK
2530{
2531 if (operands[1] == const0_rtx
3197c489
RS
2532 /* clr insns on 68000 read before writing. */
2533 && ((TARGET_68010 || TARGET_COLDFIRE)
9b55bf04
RK
2534 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
2535 return "clr%.w %0";
2536 return "move%.w %1,%0";
2537}
2538
5505f548 2539const char *
8a4a2253 2540output_move_strictqi (rtx *operands)
9b55bf04
RK
2541{
2542 if (operands[1] == const0_rtx
3197c489
RS
2543 /* clr insns on 68000 read before writing. */
2544 && ((TARGET_68010 || TARGET_COLDFIRE)
9b55bf04
RK
2545 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
2546 return "clr%.b %0";
2547 return "move%.b %1,%0";
2548}
2549
79e68feb
RS
2550/* Return the best assembler insn template
2551 for moving operands[1] into operands[0] as a fullword. */
2552
5505f548 2553static const char *
8a4a2253 2554singlemove_string (rtx *operands)
79e68feb 2555{
02ed0c07
RK
2556 if (GET_CODE (operands[1]) == CONST_INT)
2557 return output_move_simode_const (operands);
2558 return "move%.l %1,%0";
79e68feb
RS
2559}
2560
2505bc97 2561
c47b0cb4
MK
2562/* Output assembler or rtl code to perform a doubleword move insn
2563 with operands OPERANDS.
2564 Pointers to 3 helper functions should be specified:
2565 HANDLE_REG_ADJUST to adjust a register by a small value,
2566 HANDLE_COMPADR to compute an address and
2567 HANDLE_MOVSI to move 4 bytes. */
79e68feb 2568
c47b0cb4
MK
2569static void
2570handle_move_double (rtx operands[2],
2571 void (*handle_reg_adjust) (rtx, int),
2572 void (*handle_compadr) (rtx [2]),
2573 void (*handle_movsi) (rtx [2]))
79e68feb 2574{
2505bc97
RS
2575 enum
2576 {
2577 REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP
2578 } optype0, optype1;
79e68feb 2579 rtx latehalf[2];
2505bc97 2580 rtx middlehalf[2];
7f98eeb6 2581 rtx xops[2];
79e68feb 2582 rtx addreg0 = 0, addreg1 = 0;
7f98eeb6 2583 int dest_overlapped_low = 0;
184916bc 2584 int size = GET_MODE_SIZE (GET_MODE (operands[0]));
2505bc97
RS
2585
2586 middlehalf[0] = 0;
2587 middlehalf[1] = 0;
79e68feb
RS
2588
2589 /* First classify both operands. */
2590
2591 if (REG_P (operands[0]))
2592 optype0 = REGOP;
2593 else if (offsettable_memref_p (operands[0]))
2594 optype0 = OFFSOP;
2595 else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
2596 optype0 = POPOP;
2597 else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
2598 optype0 = PUSHOP;
2599 else if (GET_CODE (operands[0]) == MEM)
2600 optype0 = MEMOP;
2601 else
2602 optype0 = RNDOP;
2603
2604 if (REG_P (operands[1]))
2605 optype1 = REGOP;
2606 else if (CONSTANT_P (operands[1]))
2607 optype1 = CNSTOP;
2608 else if (offsettable_memref_p (operands[1]))
2609 optype1 = OFFSOP;
2610 else if (GET_CODE (XEXP (operands[1], 0)) == POST_INC)
2611 optype1 = POPOP;
2612 else if (GET_CODE (XEXP (operands[1], 0)) == PRE_DEC)
2613 optype1 = PUSHOP;
2614 else if (GET_CODE (operands[1]) == MEM)
2615 optype1 = MEMOP;
2616 else
2617 optype1 = RNDOP;
2618
4761e388
NS
2619 /* Check for the cases that the operand constraints are not supposed
2620 to allow to happen. Generating code for these cases is
2621 painful. */
2622 gcc_assert (optype0 != RNDOP && optype1 != RNDOP);
79e68feb
RS
2623
2624 /* If one operand is decrementing and one is incrementing
2625 decrement the former register explicitly
2626 and change that operand into ordinary indexing. */
2627
2628 if (optype0 == PUSHOP && optype1 == POPOP)
2629 {
2630 operands[0] = XEXP (XEXP (operands[0], 0), 0);
c47b0cb4
MK
2631
2632 handle_reg_adjust (operands[0], -size);
2633
2505bc97 2634 if (GET_MODE (operands[1]) == XFmode)
1d8eaa6b 2635 operands[0] = gen_rtx_MEM (XFmode, operands[0]);
2505bc97 2636 else if (GET_MODE (operands[0]) == DFmode)
1d8eaa6b 2637 operands[0] = gen_rtx_MEM (DFmode, operands[0]);
2505bc97 2638 else
1d8eaa6b 2639 operands[0] = gen_rtx_MEM (DImode, operands[0]);
79e68feb
RS
2640 optype0 = OFFSOP;
2641 }
2642 if (optype0 == POPOP && optype1 == PUSHOP)
2643 {
2644 operands[1] = XEXP (XEXP (operands[1], 0), 0);
c47b0cb4
MK
2645
2646 handle_reg_adjust (operands[1], -size);
2647
2505bc97 2648 if (GET_MODE (operands[1]) == XFmode)
1d8eaa6b 2649 operands[1] = gen_rtx_MEM (XFmode, operands[1]);
2505bc97 2650 else if (GET_MODE (operands[1]) == DFmode)
1d8eaa6b 2651 operands[1] = gen_rtx_MEM (DFmode, operands[1]);
2505bc97 2652 else
1d8eaa6b 2653 operands[1] = gen_rtx_MEM (DImode, operands[1]);
79e68feb
RS
2654 optype1 = OFFSOP;
2655 }
2656
2657 /* If an operand is an unoffsettable memory ref, find a register
2658 we can increment temporarily to make it refer to the second word. */
2659
2660 if (optype0 == MEMOP)
2661 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2662
2663 if (optype1 == MEMOP)
2664 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2665
2666 /* Ok, we can do one word at a time.
2667 Normally we do the low-numbered word first,
2668 but if either operand is autodecrementing then we
2669 do the high-numbered word first.
2670
2671 In either case, set up in LATEHALF the operands to use
2672 for the high-numbered word and in some cases alter the
2673 operands in OPERANDS to be suitable for the low-numbered word. */
2674
2505bc97
RS
2675 if (size == 12)
2676 {
2677 if (optype0 == REGOP)
2678 {
1d8eaa6b
AS
2679 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 2);
2680 middlehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2505bc97
RS
2681 }
2682 else if (optype0 == OFFSOP)
2683 {
b72f00af
RK
2684 middlehalf[0] = adjust_address (operands[0], SImode, 4);
2685 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
2505bc97
RS
2686 }
2687 else
2688 {
c47b0cb4
MK
2689 middlehalf[0] = adjust_address (operands[0], SImode, 0);
2690 latehalf[0] = adjust_address (operands[0], SImode, 0);
2505bc97
RS
2691 }
2692
2693 if (optype1 == REGOP)
2694 {
1d8eaa6b
AS
2695 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2);
2696 middlehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2505bc97
RS
2697 }
2698 else if (optype1 == OFFSOP)
2699 {
b72f00af
RK
2700 middlehalf[1] = adjust_address (operands[1], SImode, 4);
2701 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
2505bc97
RS
2702 }
2703 else if (optype1 == CNSTOP)
2704 {
2705 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2706 {
2707 REAL_VALUE_TYPE r;
2708 long l[3];
2709
2710 REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
2711 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
2712 operands[1] = GEN_INT (l[0]);
2713 middlehalf[1] = GEN_INT (l[1]);
2714 latehalf[1] = GEN_INT (l[2]);
2715 }
4761e388 2716 else
2505bc97 2717 {
4761e388
NS
2718 /* No non-CONST_DOUBLE constant should ever appear
2719 here. */
2720 gcc_assert (!CONSTANT_P (operands[1]));
2505bc97
RS
2721 }
2722 }
2723 else
2724 {
c47b0cb4
MK
2725 middlehalf[1] = adjust_address (operands[1], SImode, 0);
2726 latehalf[1] = adjust_address (operands[1], SImode, 0);
2505bc97
RS
2727 }
2728 }
79e68feb 2729 else
2505bc97
RS
2730 /* size is not 12: */
2731 {
2732 if (optype0 == REGOP)
1d8eaa6b 2733 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2505bc97 2734 else if (optype0 == OFFSOP)
b72f00af 2735 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
2505bc97 2736 else
c47b0cb4 2737 latehalf[0] = adjust_address (operands[0], SImode, 0);
2505bc97
RS
2738
2739 if (optype1 == REGOP)
1d8eaa6b 2740 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2505bc97 2741 else if (optype1 == OFFSOP)
b72f00af 2742 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
2505bc97
RS
2743 else if (optype1 == CNSTOP)
2744 split_double (operands[1], &operands[1], &latehalf[1]);
2745 else
c47b0cb4 2746 latehalf[1] = adjust_address (operands[1], SImode, 0);
2505bc97 2747 }
79e68feb
RS
2748
2749 /* If insn is effectively movd N(sp),-(sp) then we will do the
2750 high word first. We should use the adjusted operand 1 (which is N+4(sp))
2751 for the low word as well, to compensate for the first decrement of sp. */
2752 if (optype0 == PUSHOP
2753 && REGNO (XEXP (XEXP (operands[0], 0), 0)) == STACK_POINTER_REGNUM
2754 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
c88aeaf8 2755 operands[1] = middlehalf[1] = latehalf[1];
79e68feb 2756
7f98eeb6
RS
2757 /* For (set (reg:DI N) (mem:DI ... (reg:SI N) ...)),
2758 if the upper part of reg N does not appear in the MEM, arrange to
2759 emit the move late-half first. Otherwise, compute the MEM address
2760 into the upper part of N and use that as a pointer to the memory
2761 operand. */
2762 if (optype0 == REGOP
2763 && (optype1 == OFFSOP || optype1 == MEMOP))
2764 {
1d8eaa6b 2765 rtx testlow = gen_rtx_REG (SImode, REGNO (operands[0]));
3a58400f
RS
2766
2767 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
d7e8d581 2768 && reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
7f98eeb6
RS
2769 {
2770 /* If both halves of dest are used in the src memory address,
3a58400f
RS
2771 compute the address into latehalf of dest.
2772 Note that this can't happen if the dest is two data regs. */
4761e388 2773 compadr:
7f98eeb6
RS
2774 xops[0] = latehalf[0];
2775 xops[1] = XEXP (operands[1], 0);
c47b0cb4
MK
2776
2777 handle_compadr (xops);
2778 if (GET_MODE (operands[1]) == XFmode)
7f98eeb6 2779 {
1d8eaa6b 2780 operands[1] = gen_rtx_MEM (XFmode, latehalf[0]);
b72f00af
RK
2781 middlehalf[1] = adjust_address (operands[1], DImode, size - 8);
2782 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
7f98eeb6
RS
2783 }
2784 else
2785 {
1d8eaa6b 2786 operands[1] = gen_rtx_MEM (DImode, latehalf[0]);
b72f00af 2787 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
7f98eeb6
RS
2788 }
2789 }
2790 else if (size == 12
d7e8d581
RS
2791 && reg_overlap_mentioned_p (middlehalf[0],
2792 XEXP (operands[1], 0)))
7f98eeb6 2793 {
3a58400f
RS
2794 /* Check for two regs used by both source and dest.
2795 Note that this can't happen if the dest is all data regs.
2796 It can happen if the dest is d6, d7, a0.
2797 But in that case, latehalf is an addr reg, so
2798 the code at compadr does ok. */
2799
2800 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
d7e8d581
RS
2801 || reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
2802 goto compadr;
7f98eeb6
RS
2803
2804 /* JRV says this can't happen: */
4761e388 2805 gcc_assert (!addreg0 && !addreg1);
7f98eeb6 2806
7a1929e1 2807 /* Only the middle reg conflicts; simply put it last. */
c47b0cb4
MK
2808 handle_movsi (operands);
2809 handle_movsi (latehalf);
2810 handle_movsi (middlehalf);
2811
2812 return;
7f98eeb6 2813 }
2fb8a81d 2814 else if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0)))
7f98eeb6
RS
2815 /* If the low half of dest is mentioned in the source memory
2816 address, the arrange to emit the move late half first. */
2817 dest_overlapped_low = 1;
2818 }
2819
79e68feb
RS
2820 /* If one or both operands autodecrementing,
2821 do the two words, high-numbered first. */
2822
2823 /* Likewise, the first move would clobber the source of the second one,
2824 do them in the other order. This happens only for registers;
2825 such overlap can't happen in memory unless the user explicitly
2826 sets it up, and that is an undefined circumstance. */
2827
2828 if (optype0 == PUSHOP || optype1 == PUSHOP
2829 || (optype0 == REGOP && optype1 == REGOP
2505bc97 2830 && ((middlehalf[1] && REGNO (operands[0]) == REGNO (middlehalf[1]))
7f98eeb6
RS
2831 || REGNO (operands[0]) == REGNO (latehalf[1])))
2832 || dest_overlapped_low)
79e68feb
RS
2833 {
2834 /* Make any unoffsettable addresses point at high-numbered word. */
2835 if (addreg0)
c47b0cb4 2836 handle_reg_adjust (addreg0, size - 4);
79e68feb 2837 if (addreg1)
c47b0cb4 2838 handle_reg_adjust (addreg1, size - 4);
79e68feb
RS
2839
2840 /* Do that word. */
c47b0cb4 2841 handle_movsi (latehalf);
79e68feb
RS
2842
2843 /* Undo the adds we just did. */
2844 if (addreg0)
c47b0cb4 2845 handle_reg_adjust (addreg0, -4);
79e68feb 2846 if (addreg1)
c47b0cb4 2847 handle_reg_adjust (addreg1, -4);
79e68feb 2848
2505bc97
RS
2849 if (size == 12)
2850 {
c47b0cb4
MK
2851 handle_movsi (middlehalf);
2852
2505bc97 2853 if (addreg0)
c47b0cb4 2854 handle_reg_adjust (addreg0, -4);
2505bc97 2855 if (addreg1)
c47b0cb4 2856 handle_reg_adjust (addreg1, -4);
2505bc97
RS
2857 }
2858
79e68feb 2859 /* Do low-numbered word. */
c47b0cb4
MK
2860
2861 handle_movsi (operands);
2862 return;
79e68feb
RS
2863 }
2864
2865 /* Normal case: do the two words, low-numbered first. */
2866
c47b0cb4 2867 handle_movsi (operands);
79e68feb 2868
2505bc97
RS
2869 /* Do the middle one of the three words for long double */
2870 if (size == 12)
2871 {
2872 if (addreg0)
c47b0cb4 2873 handle_reg_adjust (addreg0, 4);
2505bc97 2874 if (addreg1)
c47b0cb4 2875 handle_reg_adjust (addreg1, 4);
2505bc97 2876
c47b0cb4 2877 handle_movsi (middlehalf);
2505bc97
RS
2878 }
2879
79e68feb
RS
2880 /* Make any unoffsettable addresses point at high-numbered word. */
2881 if (addreg0)
c47b0cb4 2882 handle_reg_adjust (addreg0, 4);
79e68feb 2883 if (addreg1)
c47b0cb4 2884 handle_reg_adjust (addreg1, 4);
79e68feb
RS
2885
2886 /* Do that word. */
c47b0cb4 2887 handle_movsi (latehalf);
79e68feb
RS
2888
2889 /* Undo the adds we just did. */
2890 if (addreg0)
c47b0cb4
MK
2891 handle_reg_adjust (addreg0, -(size - 4));
2892 if (addreg1)
2893 handle_reg_adjust (addreg1, -(size - 4));
2894
2895 return;
2896}
2897
2898/* Output assembler code to adjust REG by N. */
2899static void
2900output_reg_adjust (rtx reg, int n)
2901{
2902 const char *s;
2903
2904 gcc_assert (GET_MODE (reg) == SImode
2905 && -12 <= n && n != 0 && n <= 12);
2906
2907 switch (n)
2505bc97 2908 {
c47b0cb4
MK
2909 case 12:
2910 s = "add%.l #12,%0";
2911 break;
2912
2913 case 8:
2914 s = "addq%.l #8,%0";
2915 break;
2916
2917 case 4:
2918 s = "addq%.l #4,%0";
2919 break;
2920
2921 case -12:
2922 s = "sub%.l #12,%0";
2923 break;
2924
2925 case -8:
2926 s = "subq%.l #8,%0";
2927 break;
2928
2929 case -4:
2930 s = "subq%.l #4,%0";
2931 break;
2932
2933 default:
2934 gcc_unreachable ();
2935 s = NULL;
2505bc97 2936 }
c47b0cb4
MK
2937
2938 output_asm_insn (s, &reg);
2939}
2940
2941/* Emit rtl code to adjust REG by N. */
2942static void
2943emit_reg_adjust (rtx reg1, int n)
2944{
2945 rtx reg2;
2946
2947 gcc_assert (GET_MODE (reg1) == SImode
2948 && -12 <= n && n != 0 && n <= 12);
2949
2950 reg1 = copy_rtx (reg1);
2951 reg2 = copy_rtx (reg1);
2952
2953 if (n < 0)
2954 emit_insn (gen_subsi3 (reg1, reg2, GEN_INT (-n)));
2955 else if (n > 0)
2956 emit_insn (gen_addsi3 (reg1, reg2, GEN_INT (n)));
2957 else
2958 gcc_unreachable ();
2959}
2960
2961/* Output assembler to load address OPERANDS[0] to register OPERANDS[1]. */
2962static void
2963output_compadr (rtx operands[2])
2964{
2965 output_asm_insn ("lea %a1,%0", operands);
2966}
2967
2968/* Output the best assembler insn for moving operands[1] into operands[0]
2969 as a fullword. */
2970static void
2971output_movsi (rtx operands[2])
2972{
2973 output_asm_insn (singlemove_string (operands), operands);
2974}
2975
2976/* Copy OP and change its mode to MODE. */
2977static rtx
2978copy_operand (rtx op, enum machine_mode mode)
2979{
2980 /* ??? This looks really ugly. There must be a better way
2981 to change a mode on the operand. */
2982 if (GET_MODE (op) != VOIDmode)
2505bc97 2983 {
c47b0cb4
MK
2984 if (REG_P (op))
2985 op = gen_rtx_REG (mode, REGNO (op));
2505bc97 2986 else
c47b0cb4
MK
2987 {
2988 op = copy_rtx (op);
2989 PUT_MODE (op, mode);
2990 }
2505bc97 2991 }
79e68feb 2992
c47b0cb4
MK
2993 return op;
2994}
2995
2996/* Emit rtl code for moving operands[1] into operands[0] as a fullword. */
2997static void
2998emit_movsi (rtx operands[2])
2999{
3000 operands[0] = copy_operand (operands[0], SImode);
3001 operands[1] = copy_operand (operands[1], SImode);
3002
3003 emit_insn (gen_movsi (operands[0], operands[1]));
3004}
3005
3006/* Output assembler code to perform a doubleword move insn
3007 with operands OPERANDS. */
3008const char *
3009output_move_double (rtx *operands)
3010{
3011 handle_move_double (operands,
3012 output_reg_adjust, output_compadr, output_movsi);
3013
79e68feb
RS
3014 return "";
3015}
3016
c47b0cb4
MK
3017/* Output rtl code to perform a doubleword move insn
3018 with operands OPERANDS. */
3019void
3020m68k_emit_move_double (rtx operands[2])
3021{
3022 handle_move_double (operands, emit_reg_adjust, emit_movsi, emit_movsi);
3023}
dcc21c4c
PB
3024
3025/* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
3026 new rtx with the correct mode. */
3027
3028static rtx
3029force_mode (enum machine_mode mode, rtx orig)
3030{
3031 if (mode == GET_MODE (orig))
3032 return orig;
3033
3034 if (REGNO (orig) >= FIRST_PSEUDO_REGISTER)
3035 abort ();
3036
3037 return gen_rtx_REG (mode, REGNO (orig));
3038}
3039
3040static int
3041fp_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3042{
3043 return reg_renumber && FP_REG_P (op);
3044}
3045
3046/* Emit insns to move operands[1] into operands[0].
3047
3048 Return 1 if we have written out everything that needs to be done to
3049 do the move. Otherwise, return 0 and the caller will emit the move
3050 normally.
3051
3052 Note SCRATCH_REG may not be in the proper mode depending on how it
c0220ea4 3053 will be used. This routine is responsible for creating a new copy
dcc21c4c
PB
3054 of SCRATCH_REG in the proper mode. */
3055
3056int
3057emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
3058{
3059 register rtx operand0 = operands[0];
3060 register rtx operand1 = operands[1];
3061 register rtx tem;
3062
3063 if (scratch_reg
3064 && reload_in_progress && GET_CODE (operand0) == REG
3065 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
3066 operand0 = reg_equiv_mem[REGNO (operand0)];
3067 else if (scratch_reg
3068 && reload_in_progress && GET_CODE (operand0) == SUBREG
3069 && GET_CODE (SUBREG_REG (operand0)) == REG
3070 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
3071 {
3072 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3073 the code which tracks sets/uses for delete_output_reload. */
3074 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
3075 reg_equiv_mem [REGNO (SUBREG_REG (operand0))],
3076 SUBREG_BYTE (operand0));
3077 operand0 = alter_subreg (&temp);
3078 }
3079
3080 if (scratch_reg
3081 && reload_in_progress && GET_CODE (operand1) == REG
3082 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
3083 operand1 = reg_equiv_mem[REGNO (operand1)];
3084 else if (scratch_reg
3085 && reload_in_progress && GET_CODE (operand1) == SUBREG
3086 && GET_CODE (SUBREG_REG (operand1)) == REG
3087 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
3088 {
3089 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3090 the code which tracks sets/uses for delete_output_reload. */
3091 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
3092 reg_equiv_mem [REGNO (SUBREG_REG (operand1))],
3093 SUBREG_BYTE (operand1));
3094 operand1 = alter_subreg (&temp);
3095 }
3096
3097 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
3098 && ((tem = find_replacement (&XEXP (operand0, 0)))
3099 != XEXP (operand0, 0)))
3100 operand0 = gen_rtx_MEM (GET_MODE (operand0), tem);
3101 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
3102 && ((tem = find_replacement (&XEXP (operand1, 0)))
3103 != XEXP (operand1, 0)))
3104 operand1 = gen_rtx_MEM (GET_MODE (operand1), tem);
3105
3106 /* Handle secondary reloads for loads/stores of FP registers where
3107 the address is symbolic by using the scratch register */
3108 if (fp_reg_operand (operand0, mode)
3109 && ((GET_CODE (operand1) == MEM
3110 && ! memory_address_p (DFmode, XEXP (operand1, 0)))
3111 || ((GET_CODE (operand1) == SUBREG
3112 && GET_CODE (XEXP (operand1, 0)) == MEM
3113 && !memory_address_p (DFmode, XEXP (XEXP (operand1, 0), 0)))))
3114 && scratch_reg)
3115 {
3116 if (GET_CODE (operand1) == SUBREG)
3117 operand1 = XEXP (operand1, 0);
3118
3119 /* SCRATCH_REG will hold an address. We want
3120 it in SImode regardless of what mode it was originally given
3121 to us. */
3122 scratch_reg = force_mode (SImode, scratch_reg);
3123
3124 /* D might not fit in 14 bits either; for such cases load D into
3125 scratch reg. */
3126 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
3127 {
3128 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
3129 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
3130 Pmode,
3131 XEXP (XEXP (operand1, 0), 0),
3132 scratch_reg));
3133 }
3134 else
3135 emit_move_insn (scratch_reg, XEXP (operand1, 0));
3136 emit_insn (gen_rtx_SET (VOIDmode, operand0,
3137 gen_rtx_MEM (mode, scratch_reg)));
3138 return 1;
3139 }
3140 else if (fp_reg_operand (operand1, mode)
3141 && ((GET_CODE (operand0) == MEM
3142 && ! memory_address_p (DFmode, XEXP (operand0, 0)))
3143 || ((GET_CODE (operand0) == SUBREG)
3144 && GET_CODE (XEXP (operand0, 0)) == MEM
3145 && !memory_address_p (DFmode, XEXP (XEXP (operand0, 0), 0))))
3146 && scratch_reg)
3147 {
3148 if (GET_CODE (operand0) == SUBREG)
3149 operand0 = XEXP (operand0, 0);
3150
3151 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3152 it in SIMODE regardless of what mode it was originally given
3153 to us. */
3154 scratch_reg = force_mode (SImode, scratch_reg);
3155
3156 /* D might not fit in 14 bits either; for such cases load D into
3157 scratch reg. */
3158 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
3159 {
3160 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
3161 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
3162 0)),
3163 Pmode,
3164 XEXP (XEXP (operand0, 0),
3165 0),
3166 scratch_reg));
3167 }
3168 else
3169 emit_move_insn (scratch_reg, XEXP (operand0, 0));
3170 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (mode, scratch_reg),
3171 operand1));
3172 return 1;
3173 }
3174 /* Handle secondary reloads for loads of FP registers from constant
3175 expressions by forcing the constant into memory.
3176
3177 use scratch_reg to hold the address of the memory location.
3178
3179 The proper fix is to change PREFERRED_RELOAD_CLASS to return
3180 NO_REGS when presented with a const_int and an register class
3181 containing only FP registers. Doing so unfortunately creates
3182 more problems than it solves. Fix this for 2.5. */
3183 else if (fp_reg_operand (operand0, mode)
3184 && CONSTANT_P (operand1)
3185 && scratch_reg)
3186 {
3187 rtx xoperands[2];
3188
3189 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3190 it in SIMODE regardless of what mode it was originally given
3191 to us. */
3192 scratch_reg = force_mode (SImode, scratch_reg);
3193
3194 /* Force the constant into memory and put the address of the
3195 memory location into scratch_reg. */
3196 xoperands[0] = scratch_reg;
3197 xoperands[1] = XEXP (force_const_mem (mode, operand1), 0);
3198 emit_insn (gen_rtx_SET (mode, scratch_reg, xoperands[1]));
3199
3200 /* Now load the destination register. */
3201 emit_insn (gen_rtx_SET (mode, operand0,
3202 gen_rtx_MEM (mode, scratch_reg)));
3203 return 1;
3204 }
3205
3206 /* Now have insn-emit do whatever it normally does. */
3207 return 0;
3208}
3209
01e304f8
RZ
3210/* Split one or more DImode RTL references into pairs of SImode
3211 references. The RTL can be REG, offsettable MEM, integer constant, or
3212 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
3213 split and "num" is its length. lo_half and hi_half are output arrays
3214 that parallel "operands". */
3215
3216void
3217split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
3218{
3219 while (num--)
3220 {
3221 rtx op = operands[num];
3222
3223 /* simplify_subreg refuses to split volatile memory addresses,
3224 but we still have to handle it. */
3225 if (GET_CODE (op) == MEM)
3226 {
3227 lo_half[num] = adjust_address (op, SImode, 4);
3228 hi_half[num] = adjust_address (op, SImode, 0);
3229 }
3230 else
3231 {
3232 lo_half[num] = simplify_gen_subreg (SImode, op,
3233 GET_MODE (op) == VOIDmode
3234 ? DImode : GET_MODE (op), 4);
3235 hi_half[num] = simplify_gen_subreg (SImode, op,
3236 GET_MODE (op) == VOIDmode
3237 ? DImode : GET_MODE (op), 0);
3238 }
3239 }
3240}
3241
a40ed0f3
KH
3242/* Split X into a base and a constant offset, storing them in *BASE
3243 and *OFFSET respectively. */
3244
3245static void
3246m68k_split_offset (rtx x, rtx *base, HOST_WIDE_INT *offset)
3247{
3248 *offset = 0;
3249 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
3250 {
3251 *offset += INTVAL (XEXP (x, 1));
3252 x = XEXP (x, 0);
3253 }
3254 *base = x;
3255}
3256
3257/* Return true if PATTERN is a PARALLEL suitable for a movem or fmovem
3258 instruction. STORE_P says whether the move is a load or store.
3259
3260 If the instruction uses post-increment or pre-decrement addressing,
3261 AUTOMOD_BASE is the base register and AUTOMOD_OFFSET is the total
3262 adjustment. This adjustment will be made by the first element of
3263 PARALLEL, with the loads or stores starting at element 1. If the
3264 instruction does not use post-increment or pre-decrement addressing,
3265 AUTOMOD_BASE is null, AUTOMOD_OFFSET is 0, and the loads or stores
3266 start at element 0. */
3267
3268bool
3269m68k_movem_pattern_p (rtx pattern, rtx automod_base,
3270 HOST_WIDE_INT automod_offset, bool store_p)
3271{
3272 rtx base, mem_base, set, mem, reg, last_reg;
3273 HOST_WIDE_INT offset, mem_offset;
3274 int i, first, len;
3275 enum reg_class rclass;
3276
3277 len = XVECLEN (pattern, 0);
3278 first = (automod_base != NULL);
3279
3280 if (automod_base)
3281 {
3282 /* Stores must be pre-decrement and loads must be post-increment. */
3283 if (store_p != (automod_offset < 0))
3284 return false;
3285
3286 /* Work out the base and offset for lowest memory location. */
3287 base = automod_base;
3288 offset = (automod_offset < 0 ? automod_offset : 0);
3289 }
3290 else
3291 {
3292 /* Allow any valid base and offset in the first access. */
3293 base = NULL;
3294 offset = 0;
3295 }
3296
3297 last_reg = NULL;
3298 rclass = NO_REGS;
3299 for (i = first; i < len; i++)
3300 {
3301 /* We need a plain SET. */
3302 set = XVECEXP (pattern, 0, i);
3303 if (GET_CODE (set) != SET)
3304 return false;
3305
3306 /* Check that we have a memory location... */
3307 mem = XEXP (set, !store_p);
3308 if (!MEM_P (mem) || !memory_operand (mem, VOIDmode))
3309 return false;
3310
3311 /* ...with the right address. */
3312 if (base == NULL)
3313 {
3314 m68k_split_offset (XEXP (mem, 0), &base, &offset);
3315 /* The ColdFire instruction only allows (An) and (d16,An) modes.
3316 There are no mode restrictions for 680x0 besides the
3317 automodification rules enforced above. */
3318 if (TARGET_COLDFIRE
3319 && !m68k_legitimate_base_reg_p (base, reload_completed))
3320 return false;
3321 }
3322 else
3323 {
3324 m68k_split_offset (XEXP (mem, 0), &mem_base, &mem_offset);
3325 if (!rtx_equal_p (base, mem_base) || offset != mem_offset)
3326 return false;
3327 }
3328
3329 /* Check that we have a register of the required mode and class. */
3330 reg = XEXP (set, store_p);
3331 if (!REG_P (reg)
3332 || !HARD_REGISTER_P (reg)
3333 || GET_MODE (reg) != reg_raw_mode[REGNO (reg)])
3334 return false;
3335
3336 if (last_reg)
3337 {
3338 /* The register must belong to RCLASS and have a higher number
3339 than the register in the previous SET. */
3340 if (!TEST_HARD_REG_BIT (reg_class_contents[rclass], REGNO (reg))
3341 || REGNO (last_reg) >= REGNO (reg))
3342 return false;
3343 }
3344 else
3345 {
3346 /* Work out which register class we need. */
3347 if (INT_REGNO_P (REGNO (reg)))
3348 rclass = GENERAL_REGS;
3349 else if (FP_REGNO_P (REGNO (reg)))
3350 rclass = FP_REGS;
3351 else
3352 return false;
3353 }
3354
3355 last_reg = reg;
3356 offset += GET_MODE_SIZE (GET_MODE (reg));
3357 }
3358
3359 /* If we have an automodification, check whether the final offset is OK. */
3360 if (automod_base && offset != (automod_offset < 0 ? 0 : automod_offset))
3361 return false;
3362
3363 /* Reject unprofitable cases. */
3364 if (len < first + (rclass == FP_REGS ? MIN_FMOVEM_REGS : MIN_MOVEM_REGS))
3365 return false;
3366
3367 return true;
3368}
3369
3370/* Return the assembly code template for a movem or fmovem instruction
3371 whose pattern is given by PATTERN. Store the template's operands
3372 in OPERANDS.
3373
3374 If the instruction uses post-increment or pre-decrement addressing,
3375 AUTOMOD_OFFSET is the total adjustment, otherwise it is 0. STORE_P
3376 is true if this is a store instruction. */
3377
3378const char *
3379m68k_output_movem (rtx *operands, rtx pattern,
3380 HOST_WIDE_INT automod_offset, bool store_p)
3381{
3382 unsigned int mask;
3383 int i, first;
3384
3385 gcc_assert (GET_CODE (pattern) == PARALLEL);
3386 mask = 0;
3387 first = (automod_offset != 0);
3388 for (i = first; i < XVECLEN (pattern, 0); i++)
3389 {
3390 /* When using movem with pre-decrement addressing, register X + D0_REG
3391 is controlled by bit 15 - X. For all other addressing modes,
3392 register X + D0_REG is controlled by bit X. Confusingly, the
3393 register mask for fmovem is in the opposite order to that for
3394 movem. */
3395 unsigned int regno;
3396
3397 gcc_assert (MEM_P (XEXP (XVECEXP (pattern, 0, i), !store_p)));
3398 gcc_assert (REG_P (XEXP (XVECEXP (pattern, 0, i), store_p)));
3399 regno = REGNO (XEXP (XVECEXP (pattern, 0, i), store_p));
3400 if (automod_offset < 0)
3401 {
3402 if (FP_REGNO_P (regno))
3403 mask |= 1 << (regno - FP0_REG);
3404 else
3405 mask |= 1 << (15 - (regno - D0_REG));
3406 }
3407 else
3408 {
3409 if (FP_REGNO_P (regno))
3410 mask |= 1 << (7 - (regno - FP0_REG));
3411 else
3412 mask |= 1 << (regno - D0_REG);
3413 }
3414 }
3415 CC_STATUS_INIT;
3416
3417 if (automod_offset == 0)
3418 operands[0] = XEXP (XEXP (XVECEXP (pattern, 0, first), !store_p), 0);
3419 else if (automod_offset < 0)
3420 operands[0] = gen_rtx_PRE_DEC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
3421 else
3422 operands[0] = gen_rtx_POST_INC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
3423 operands[1] = GEN_INT (mask);
3424 if (FP_REGNO_P (REGNO (XEXP (XVECEXP (pattern, 0, first), store_p))))
3425 {
3426 if (store_p)
1fae2d80 3427 return "fmovem %1,%a0";
a40ed0f3 3428 else
1fae2d80 3429 return "fmovem %a0,%1";
a40ed0f3
KH
3430 }
3431 else
3432 {
3433 if (store_p)
1fae2d80 3434 return "movem%.l %1,%a0";
a40ed0f3 3435 else
1fae2d80 3436 return "movem%.l %a0,%1";
a40ed0f3
KH
3437 }
3438}
3439
79e68feb
RS
3440/* Return a REG that occurs in ADDR with coefficient 1.
3441 ADDR can be effectively incremented by incrementing REG. */
3442
3443static rtx
8a4a2253 3444find_addr_reg (rtx addr)
79e68feb
RS
3445{
3446 while (GET_CODE (addr) == PLUS)
3447 {
3448 if (GET_CODE (XEXP (addr, 0)) == REG)
3449 addr = XEXP (addr, 0);
3450 else if (GET_CODE (XEXP (addr, 1)) == REG)
3451 addr = XEXP (addr, 1);
3452 else if (CONSTANT_P (XEXP (addr, 0)))
3453 addr = XEXP (addr, 1);
3454 else if (CONSTANT_P (XEXP (addr, 1)))
3455 addr = XEXP (addr, 0);
3456 else
4761e388 3457 gcc_unreachable ();
79e68feb 3458 }
4761e388
NS
3459 gcc_assert (GET_CODE (addr) == REG);
3460 return addr;
79e68feb 3461}
9ee3c687 3462
c16eadc7 3463/* Output assembler code to perform a 32-bit 3-operand add. */
9ee3c687 3464
5505f548 3465const char *
8a4a2253 3466output_addsi3 (rtx *operands)
9ee3c687
JW
3467{
3468 if (! operands_match_p (operands[0], operands[1]))
3469 {
3470 if (!ADDRESS_REG_P (operands[1]))
3471 {
3472 rtx tmp = operands[1];
3473
3474 operands[1] = operands[2];
3475 operands[2] = tmp;
3476 }
3477
3478 /* These insns can result from reloads to access
3479 stack slots over 64k from the frame pointer. */
3480 if (GET_CODE (operands[2]) == CONST_INT
218d5a87 3481 && (INTVAL (operands[2]) < -32768 || INTVAL (operands[2]) > 32767))
8c61b6c1 3482 return "move%.l %2,%0\n\tadd%.l %1,%0";
9ee3c687 3483 if (GET_CODE (operands[2]) == REG)
4b3d1177
KH
3484 return MOTOROLA ? "lea (%1,%2.l),%0" : "lea %1@(0,%2:l),%0";
3485 return MOTOROLA ? "lea (%c2,%1),%0" : "lea %1@(%c2),%0";
9ee3c687
JW
3486 }
3487 if (GET_CODE (operands[2]) == CONST_INT)
3488 {
9ee3c687
JW
3489 if (INTVAL (operands[2]) > 0
3490 && INTVAL (operands[2]) <= 8)
3491 return "addq%.l %2,%0";
3492 if (INTVAL (operands[2]) < 0
3493 && INTVAL (operands[2]) >= -8)
3494 {
c5c76735 3495 operands[2] = GEN_INT (- INTVAL (operands[2]));
9ee3c687
JW
3496 return "subq%.l %2,%0";
3497 }
3498 /* On the CPU32 it is faster to use two addql instructions to
3499 add a small integer (8 < N <= 16) to a register.
7a1929e1 3500 Likewise for subql. */
fe95f2f7 3501 if (TUNE_CPU32 && REG_P (operands[0]))
9ee3c687
JW
3502 {
3503 if (INTVAL (operands[2]) > 8
3504 && INTVAL (operands[2]) <= 16)
3505 {
1d8eaa6b 3506 operands[2] = GEN_INT (INTVAL (operands[2]) - 8);
3b4b85c9 3507 return "addq%.l #8,%0\n\taddq%.l %2,%0";
9ee3c687
JW
3508 }
3509 if (INTVAL (operands[2]) < -8
3510 && INTVAL (operands[2]) >= -16)
3511 {
c5c76735 3512 operands[2] = GEN_INT (- INTVAL (operands[2]) - 8);
3b4b85c9 3513 return "subq%.l #8,%0\n\tsubq%.l %2,%0";
9ee3c687
JW
3514 }
3515 }
9ee3c687
JW
3516 if (ADDRESS_REG_P (operands[0])
3517 && INTVAL (operands[2]) >= -0x8000
3518 && INTVAL (operands[2]) < 0x8000)
3519 {
fe95f2f7 3520 if (TUNE_68040)
9ee3c687
JW
3521 return "add%.w %2,%0";
3522 else
4b3d1177 3523 return MOTOROLA ? "lea (%c2,%0),%0" : "lea %0@(%c2),%0";
9ee3c687
JW
3524 }
3525 }
3526 return "add%.l %2,%0";
3527}
79e68feb
RS
3528\f
3529/* Store in cc_status the expressions that the condition codes will
3530 describe after execution of an instruction whose pattern is EXP.
3531 Do not alter them if the instruction would not alter the cc's. */
3532
3533/* On the 68000, all the insns to store in an address register fail to
3534 set the cc's. However, in some cases these instructions can make it
3535 possibly invalid to use the saved cc's. In those cases we clear out
3536 some or all of the saved cc's so they won't be used. */
3537
1d8eaa6b 3538void
8a4a2253 3539notice_update_cc (rtx exp, rtx insn)
79e68feb 3540{
1a8965c4 3541 if (GET_CODE (exp) == SET)
79e68feb
RS
3542 {
3543 if (GET_CODE (SET_SRC (exp)) == CALL)
a0a7fbc9 3544 CC_STATUS_INIT;
79e68feb
RS
3545 else if (ADDRESS_REG_P (SET_DEST (exp)))
3546 {
f5963e61 3547 if (cc_status.value1 && modified_in_p (cc_status.value1, insn))
79e68feb 3548 cc_status.value1 = 0;
f5963e61 3549 if (cc_status.value2 && modified_in_p (cc_status.value2, insn))
79e68feb
RS
3550 cc_status.value2 = 0;
3551 }
f6ab62e8
RS
3552 /* fmoves to memory or data registers do not set the condition
3553 codes. Normal moves _do_ set the condition codes, but not in
3554 a way that is appropriate for comparison with 0, because -0.0
3555 would be treated as a negative nonzero number. Note that it
88512ba0 3556 isn't appropriate to conditionalize this restriction on
f6ab62e8
RS
3557 HONOR_SIGNED_ZEROS because that macro merely indicates whether
3558 we care about the difference between -0.0 and +0.0. */
79e68feb
RS
3559 else if (!FP_REG_P (SET_DEST (exp))
3560 && SET_DEST (exp) != cc0_rtx
3561 && (FP_REG_P (SET_SRC (exp))
3562 || GET_CODE (SET_SRC (exp)) == FIX
f6ab62e8 3563 || FLOAT_MODE_P (GET_MODE (SET_DEST (exp)))))
a0a7fbc9 3564 CC_STATUS_INIT;
79e68feb
RS
3565 /* A pair of move insns doesn't produce a useful overall cc. */
3566 else if (!FP_REG_P (SET_DEST (exp))
3567 && !FP_REG_P (SET_SRC (exp))
3568 && GET_MODE_SIZE (GET_MODE (SET_SRC (exp))) > 4
3569 && (GET_CODE (SET_SRC (exp)) == REG
3570 || GET_CODE (SET_SRC (exp)) == MEM
3571 || GET_CODE (SET_SRC (exp)) == CONST_DOUBLE))
a0a7fbc9 3572 CC_STATUS_INIT;
e1dff52a 3573 else if (SET_DEST (exp) != pc_rtx)
79e68feb
RS
3574 {
3575 cc_status.flags = 0;
e1dff52a
KH
3576 cc_status.value1 = SET_DEST (exp);
3577 cc_status.value2 = SET_SRC (exp);
79e68feb
RS
3578 }
3579 }
3580 else if (GET_CODE (exp) == PARALLEL
3581 && GET_CODE (XVECEXP (exp, 0, 0)) == SET)
3582 {
e1dff52a
KH
3583 rtx dest = SET_DEST (XVECEXP (exp, 0, 0));
3584 rtx src = SET_SRC (XVECEXP (exp, 0, 0));
3585
3586 if (ADDRESS_REG_P (dest))
79e68feb 3587 CC_STATUS_INIT;
e1dff52a 3588 else if (dest != pc_rtx)
79e68feb
RS
3589 {
3590 cc_status.flags = 0;
e1dff52a
KH
3591 cc_status.value1 = dest;
3592 cc_status.value2 = src;
79e68feb
RS
3593 }
3594 }
3595 else
3596 CC_STATUS_INIT;
3597 if (cc_status.value2 != 0
3598 && ADDRESS_REG_P (cc_status.value2)
3599 && GET_MODE (cc_status.value2) == QImode)
3600 CC_STATUS_INIT;
1a8965c4 3601 if (cc_status.value2 != 0)
79e68feb
RS
3602 switch (GET_CODE (cc_status.value2))
3603 {
996a5f59 3604 case ASHIFT: case ASHIFTRT: case LSHIFTRT:
79e68feb 3605 case ROTATE: case ROTATERT:
a126dc3a
RH
3606 /* These instructions always clear the overflow bit, and set
3607 the carry to the bit shifted out. */
1afac9a6 3608 cc_status.flags |= CC_OVERFLOW_UNUSABLE | CC_NO_CARRY;
a126dc3a
RH
3609 break;
3610
3611 case PLUS: case MINUS: case MULT:
3612 case DIV: case UDIV: case MOD: case UMOD: case NEG:
79e68feb
RS
3613 if (GET_MODE (cc_status.value2) != VOIDmode)
3614 cc_status.flags |= CC_NO_OVERFLOW;
3615 break;
3616 case ZERO_EXTEND:
3617 /* (SET r1 (ZERO_EXTEND r2)) on this machine
3618 ends with a move insn moving r2 in r2's mode.
3619 Thus, the cc's are set for r2.
7a1929e1 3620 This can set N bit spuriously. */
79e68feb 3621 cc_status.flags |= CC_NOT_NEGATIVE;
1d8eaa6b
AS
3622
3623 default:
3624 break;
79e68feb
RS
3625 }
3626 if (cc_status.value1 && GET_CODE (cc_status.value1) == REG
3627 && cc_status.value2
3628 && reg_overlap_mentioned_p (cc_status.value1, cc_status.value2))
3629 cc_status.value2 = 0;
3630 if (((cc_status.value1 && FP_REG_P (cc_status.value1))
1a8965c4 3631 || (cc_status.value2 && FP_REG_P (cc_status.value2))))
79e68feb 3632 cc_status.flags = CC_IN_68881;
67595cbb
RZ
3633 if (cc_status.value2 && GET_CODE (cc_status.value2) == COMPARE
3634 && GET_MODE_CLASS (GET_MODE (XEXP (cc_status.value2, 0))) == MODE_FLOAT)
3635 {
3636 cc_status.flags = CC_IN_68881;
3637 if (!FP_REG_P (XEXP (cc_status.value2, 0)))
3638 cc_status.flags |= CC_REVERSED;
3639 }
79e68feb
RS
3640}
3641\f
5505f548 3642const char *
8a4a2253 3643output_move_const_double (rtx *operands)
79e68feb 3644{
1a8965c4 3645 int code = standard_68881_constant_p (operands[1]);
79e68feb 3646
1a8965c4 3647 if (code != 0)
79e68feb 3648 {
1a8965c4 3649 static char buf[40];
79e68feb 3650
3b4b85c9 3651 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
1a8965c4 3652 return buf;
79e68feb 3653 }
1a8965c4 3654 return "fmove%.d %1,%0";
79e68feb
RS
3655}
3656
5505f548 3657const char *
8a4a2253 3658output_move_const_single (rtx *operands)
79e68feb 3659{
1a8965c4 3660 int code = standard_68881_constant_p (operands[1]);
79e68feb 3661
1a8965c4 3662 if (code != 0)
79e68feb 3663 {
1a8965c4 3664 static char buf[40];
79e68feb 3665
3b4b85c9 3666 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
1a8965c4 3667 return buf;
79e68feb 3668 }
1a8965c4 3669 return "fmove%.s %f1,%0";
79e68feb
RS
3670}
3671
3672/* Return nonzero if X, a CONST_DOUBLE, has a value that we can get
3673 from the "fmovecr" instruction.
3674 The value, anded with 0xff, gives the code to use in fmovecr
3675 to get the desired constant. */
3676
7a1929e1 3677/* This code has been fixed for cross-compilation. */
c1cfb2ae
RS
3678
3679static int inited_68881_table = 0;
3680
5505f548 3681static const char *const strings_68881[7] = {
c1cfb2ae
RS
3682 "0.0",
3683 "1.0",
3684 "10.0",
3685 "100.0",
3686 "10000.0",
3687 "1e8",
3688 "1e16"
a0a7fbc9 3689};
c1cfb2ae 3690
8b60264b 3691static const int codes_68881[7] = {
c1cfb2ae
RS
3692 0x0f,
3693 0x32,
3694 0x33,
3695 0x34,
3696 0x35,
3697 0x36,
3698 0x37
a0a7fbc9 3699};
c1cfb2ae
RS
3700
3701REAL_VALUE_TYPE values_68881[7];
3702
3703/* Set up values_68881 array by converting the decimal values
7a1929e1 3704 strings_68881 to binary. */
c1cfb2ae
RS
3705
3706void
8a4a2253 3707init_68881_table (void)
c1cfb2ae
RS
3708{
3709 int i;
3710 REAL_VALUE_TYPE r;
3711 enum machine_mode mode;
3712
16d82c3c 3713 mode = SFmode;
c1cfb2ae
RS
3714 for (i = 0; i < 7; i++)
3715 {
3716 if (i == 6)
16d82c3c 3717 mode = DFmode;
c1cfb2ae
RS
3718 r = REAL_VALUE_ATOF (strings_68881[i], mode);
3719 values_68881[i] = r;
3720 }
3721 inited_68881_table = 1;
3722}
79e68feb
RS
3723
3724int
8a4a2253 3725standard_68881_constant_p (rtx x)
79e68feb 3726{
c1cfb2ae
RS
3727 REAL_VALUE_TYPE r;
3728 int i;
79e68feb 3729
e18db50d 3730 /* fmovecr must be emulated on the 68040 and 68060, so it shouldn't be
7a1929e1 3731 used at all on those chips. */
9cf106c8 3732 if (TUNE_68040_60)
79e68feb
RS
3733 return 0;
3734
c1cfb2ae
RS
3735 if (! inited_68881_table)
3736 init_68881_table ();
3737
3738 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
3739
64c0b414
AS
3740 /* Use REAL_VALUES_IDENTICAL instead of REAL_VALUES_EQUAL so that -0.0
3741 is rejected. */
c1cfb2ae
RS
3742 for (i = 0; i < 6; i++)
3743 {
64c0b414 3744 if (REAL_VALUES_IDENTICAL (r, values_68881[i]))
c1cfb2ae
RS
3745 return (codes_68881[i]);
3746 }
3747
79e68feb
RS
3748 if (GET_MODE (x) == SFmode)
3749 return 0;
c1cfb2ae
RS
3750
3751 if (REAL_VALUES_EQUAL (r, values_68881[6]))
3752 return (codes_68881[6]);
3753
79e68feb
RS
3754 /* larger powers of ten in the constants ram are not used
3755 because they are not equal to a `double' C constant. */
3756 return 0;
3757}
3758
3759/* If X is a floating-point constant, return the logarithm of X base 2,
3760 or 0 if X is not a power of 2. */
3761
3762int
8a4a2253 3763floating_exact_log2 (rtx x)
79e68feb 3764{
c1cfb2ae 3765 REAL_VALUE_TYPE r, r1;
eaff3bf8 3766 int exp;
79e68feb 3767
c1cfb2ae 3768 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
79e68feb 3769
eaff3bf8 3770 if (REAL_VALUES_LESS (r, dconst1))
79e68feb
RS
3771 return 0;
3772
eaff3bf8 3773 exp = real_exponent (&r);
6ef9a246 3774 real_2expN (&r1, exp, DFmode);
eaff3bf8
RH
3775 if (REAL_VALUES_EQUAL (r1, r))
3776 return exp;
3777
79e68feb
RS
3778 return 0;
3779}
3780\f
79e68feb
RS
3781/* A C compound statement to output to stdio stream STREAM the
3782 assembler syntax for an instruction operand X. X is an RTL
3783 expression.
3784
3785 CODE is a value that can be used to specify one of several ways
3786 of printing the operand. It is used when identical operands
3787 must be printed differently depending on the context. CODE
3788 comes from the `%' specification that was used to request
3789 printing of the operand. If the specification was just `%DIGIT'
3790 then CODE is 0; if the specification was `%LTR DIGIT' then CODE
3791 is the ASCII code for LTR.
3792
3793 If X is a register, this macro should print the register's name.
3794 The names can be found in an array `reg_names' whose type is
3795 `char *[]'. `reg_names' is initialized from `REGISTER_NAMES'.
3796
3797 When the machine description has a specification `%PUNCT' (a `%'
3798 followed by a punctuation character), this macro is called with
3799 a null pointer for X and the punctuation character for CODE.
3800
3801 The m68k specific codes are:
3802
3803 '.' for dot needed in Motorola-style opcode names.
3804 '-' for an operand pushing on the stack:
3805 sp@-, -(sp) or -(%sp) depending on the style of syntax.
3806 '+' for an operand pushing on the stack:
3807 sp@+, (sp)+ or (%sp)+ depending on the style of syntax.
3808 '@' for a reference to the top word on the stack:
3809 sp@, (sp) or (%sp) depending on the style of syntax.
3810 '#' for an immediate operand prefix (# in MIT and Motorola syntax
5ee084df 3811 but & in SGS syntax).
79e68feb
RS
3812 '!' for the cc register (used in an `and to cc' insn).
3813 '$' for the letter `s' in an op code, but only on the 68040.
3814 '&' for the letter `d' in an op code, but only on the 68040.
2ac5f14a 3815 '/' for register prefix needed by longlong.h.
a40ed0f3 3816 '?' for m68k_library_id_string
79e68feb
RS
3817
3818 'b' for byte insn (no effect, on the Sun; this is for the ISI).
3819 'd' to force memory addressing to be absolute, not relative.
3820 'f' for float insn (print a CONST_DOUBLE as a float rather than in hex)
79e68feb
RS
3821 'x' for float insn (print a CONST_DOUBLE as a float rather than in hex),
3822 or print pair of registers as rx:ry.
29ca003a
RS
3823 'p' print an address with @PLTPC attached, but only if the operand
3824 is not locally-bound. */
79e68feb
RS
3825
3826void
8a4a2253 3827print_operand (FILE *file, rtx op, int letter)
79e68feb 3828{
79e68feb
RS
3829 if (letter == '.')
3830 {
e6d98cb0
BI
3831 if (MOTOROLA)
3832 fprintf (file, ".");
79e68feb
RS
3833 }
3834 else if (letter == '#')
e6d98cb0 3835 asm_fprintf (file, "%I");
79e68feb 3836 else if (letter == '-')
4b3d1177 3837 asm_fprintf (file, MOTOROLA ? "-(%Rsp)" : "%Rsp@-");
79e68feb 3838 else if (letter == '+')
4b3d1177 3839 asm_fprintf (file, MOTOROLA ? "(%Rsp)+" : "%Rsp@+");
79e68feb 3840 else if (letter == '@')
4b3d1177 3841 asm_fprintf (file, MOTOROLA ? "(%Rsp)" : "%Rsp@");
79e68feb 3842 else if (letter == '!')
e6d98cb0 3843 asm_fprintf (file, "%Rfpcr");
79e68feb
RS
3844 else if (letter == '$')
3845 {
b101567e 3846 if (TARGET_68040)
e6d98cb0 3847 fprintf (file, "s");
79e68feb
RS
3848 }
3849 else if (letter == '&')
3850 {
b101567e 3851 if (TARGET_68040)
e6d98cb0 3852 fprintf (file, "d");
79e68feb 3853 }
2ac5f14a 3854 else if (letter == '/')
e6d98cb0 3855 asm_fprintf (file, "%R");
a40ed0f3
KH
3856 else if (letter == '?')
3857 asm_fprintf (file, m68k_library_id_string);
29ca003a 3858 else if (letter == 'p')
2c8ec431 3859 {
29ca003a
RS
3860 output_addr_const (file, op);
3861 if (!(GET_CODE (op) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op)))
3862 fprintf (file, "@PLTPC");
2c8ec431 3863 }
79e68feb
RS
3864 else if (GET_CODE (op) == REG)
3865 {
1a8965c4
AS
3866 if (letter == 'R')
3867 /* Print out the second register name of a register pair.
3868 I.e., R (6) => 7. */
01bbf777 3869 fputs (M68K_REGNAME(REGNO (op) + 1), file);
79e68feb 3870 else
01bbf777 3871 fputs (M68K_REGNAME(REGNO (op)), file);
79e68feb
RS
3872 }
3873 else if (GET_CODE (op) == MEM)
3874 {
3875 output_address (XEXP (op, 0));
3876 if (letter == 'd' && ! TARGET_68020
3877 && CONSTANT_ADDRESS_P (XEXP (op, 0))
3878 && !(GET_CODE (XEXP (op, 0)) == CONST_INT
3879 && INTVAL (XEXP (op, 0)) < 0x8000
3880 && INTVAL (XEXP (op, 0)) >= -0x8000))
4b3d1177 3881 fprintf (file, MOTOROLA ? ".l" : ":l");
79e68feb 3882 }
79e68feb
RS
3883 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == SFmode)
3884 {
c1cfb2ae 3885 REAL_VALUE_TYPE r;
6ae89ea8 3886 long l;
c1cfb2ae 3887 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
6ae89ea8 3888 REAL_VALUE_TO_TARGET_SINGLE (r, l);
429ce992 3889 asm_fprintf (file, "%I0x%lx", l & 0xFFFFFFFF);
c1cfb2ae
RS
3890 }
3891 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == XFmode)
3892 {
3893 REAL_VALUE_TYPE r;
6ae89ea8 3894 long l[3];
c1cfb2ae 3895 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
6ae89ea8 3896 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
429ce992
AS
3897 asm_fprintf (file, "%I0x%lx%08lx%08lx", l[0] & 0xFFFFFFFF,
3898 l[1] & 0xFFFFFFFF, l[2] & 0xFFFFFFFF);
79e68feb 3899 }
e2c0a924 3900 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == DFmode)
79e68feb 3901 {
c1cfb2ae 3902 REAL_VALUE_TYPE r;
6ae89ea8 3903 long l[2];
c1cfb2ae 3904 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
6ae89ea8 3905 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
429ce992 3906 asm_fprintf (file, "%I0x%lx%08lx", l[0] & 0xFFFFFFFF, l[1] & 0xFFFFFFFF);
79e68feb
RS
3907 }
3908 else
3909 {
2c8ec431
DL
3910 /* Use `print_operand_address' instead of `output_addr_const'
3911 to ensure that we print relevant PIC stuff. */
1f85a612 3912 asm_fprintf (file, "%I");
2c8ec431
DL
3913 if (TARGET_PCREL
3914 && (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST))
3915 print_operand_address (file, op);
3916 else
3917 output_addr_const (file, op);
79e68feb
RS
3918 }
3919}
3920
884316ff
JM
3921/* m68k implementation of OUTPUT_ADDR_CONST_EXTRA. */
3922
3923bool
3924m68k_output_addr_const_extra (FILE *file, rtx x)
3925{
3926 if (GET_CODE (x) != UNSPEC || XINT (x, 1) != UNSPEC_GOTOFF)
3927 return false;
3928
3929 output_addr_const (file, XVECEXP (x, 0, 0));
3930 /* ??? What is the non-MOTOROLA syntax? */
3931 fputs ("@GOT", file);
3932 return true;
3933}
3934
79e68feb
RS
3935\f
3936/* A C compound statement to output to stdio stream STREAM the
3937 assembler syntax for an instruction operand that is a memory
3938 reference whose address is ADDR. ADDR is an RTL expression.
3939
3940 Note that this contains a kludge that knows that the only reason
3941 we have an address (plus (label_ref...) (reg...)) when not generating
3942 PIC code is in the insn before a tablejump, and we know that m68k.md
3943 generates a label LInnn: on such an insn.
3944
3945 It is possible for PIC to generate a (plus (label_ref...) (reg...))
3946 and we handle that just like we would a (plus (symbol_ref...) (reg...)).
3947
79e68feb
RS
3948 This routine is responsible for distinguishing between -fpic and -fPIC
3949 style relocations in an address. When generating -fpic code the
112cdef5
KH
3950 offset is output in word mode (e.g. movel a5@(_foo:w), a0). When generating
3951 -fPIC code the offset is output in long mode (e.g. movel a5@(_foo:l), a0) */
79e68feb
RS
3952
3953void
8a4a2253 3954print_operand_address (FILE *file, rtx addr)
79e68feb 3955{
fc2241eb
RS
3956 struct m68k_address address;
3957
3958 if (!m68k_decompose_address (QImode, addr, true, &address))
3959 gcc_unreachable ();
3960
3961 if (address.code == PRE_DEC)
4b3d1177
KH
3962 fprintf (file, MOTOROLA ? "-(%s)" : "%s@-",
3963 M68K_REGNAME (REGNO (address.base)));
fc2241eb 3964 else if (address.code == POST_INC)
4b3d1177
KH
3965 fprintf (file, MOTOROLA ? "(%s)+" : "%s@+",
3966 M68K_REGNAME (REGNO (address.base)));
fc2241eb
RS
3967 else if (!address.base && !address.index)
3968 {
3969 /* A constant address. */
3970 gcc_assert (address.offset == addr);
3971 if (GET_CODE (addr) == CONST_INT)
3972 {
3973 /* (xxx).w or (xxx).l. */
3974 if (IN_RANGE (INTVAL (addr), -0x8000, 0x7fff))
4b3d1177 3975 fprintf (file, MOTOROLA ? "%d.w" : "%d:w", (int) INTVAL (addr));
a0a7fbc9 3976 else
fc2241eb 3977 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (addr));
a0a7fbc9 3978 }
fc2241eb 3979 else if (TARGET_PCREL)
a0a7fbc9 3980 {
fc2241eb
RS
3981 /* (d16,PC) or (bd,PC,Xn) (with suppressed index register). */
3982 fputc ('(', file);
3983 output_addr_const (file, addr);
3984 asm_fprintf (file, flag_pic == 1 ? ":w,%Rpc)" : ":l,%Rpc)");
a0a7fbc9 3985 }
fc2241eb 3986 else
a0a7fbc9 3987 {
fc2241eb
RS
3988 /* (xxx).l. We need a special case for SYMBOL_REF if the symbol
3989 name ends in `.<letter>', as the last 2 characters can be
3990 mistaken as a size suffix. Put the name in parentheses. */
3991 if (GET_CODE (addr) == SYMBOL_REF
3992 && strlen (XSTR (addr, 0)) > 2
3993 && XSTR (addr, 0)[strlen (XSTR (addr, 0)) - 2] == '.')
a0a7fbc9 3994 {
fc2241eb
RS
3995 putc ('(', file);
3996 output_addr_const (file, addr);
3997 putc (')', file);
a0a7fbc9
AS
3998 }
3999 else
fc2241eb 4000 output_addr_const (file, addr);
a0a7fbc9 4001 }
fc2241eb
RS
4002 }
4003 else
4004 {
4005 int labelno;
4006
4007 /* If ADDR is a (d8,pc,Xn) address, this is the number of the
44c7bd63 4008 label being accessed, otherwise it is -1. */
fc2241eb
RS
4009 labelno = (address.offset
4010 && !address.base
4011 && GET_CODE (address.offset) == LABEL_REF
4012 ? CODE_LABEL_NUMBER (XEXP (address.offset, 0))
4013 : -1);
4014 if (MOTOROLA)
a0a7fbc9 4015 {
fc2241eb
RS
4016 /* Print the "offset(base" component. */
4017 if (labelno >= 0)
e59d83aa 4018 asm_fprintf (file, "%LL%d(%Rpc,", labelno);
fc2241eb 4019 else
a0a7fbc9 4020 {
fc2241eb 4021 if (address.offset)
a0a7fbc9 4022 {
fc2241eb
RS
4023 output_addr_const (file, address.offset);
4024 if (flag_pic && address.base == pic_offset_table_rtx)
a0a7fbc9
AS
4025 {
4026 fprintf (file, "@GOT");
fc2241eb 4027 if (flag_pic == 1 && TARGET_68020)
a0a7fbc9
AS
4028 fprintf (file, ".w");
4029 }
4030 }
fc2241eb
RS
4031 putc ('(', file);
4032 if (address.base)
4033 fputs (M68K_REGNAME (REGNO (address.base)), file);
a0a7fbc9 4034 }
fc2241eb
RS
4035 /* Print the ",index" component, if any. */
4036 if (address.index)
a0a7fbc9 4037 {
fc2241eb
RS
4038 if (address.base)
4039 putc (',', file);
4040 fprintf (file, "%s.%c",
4041 M68K_REGNAME (REGNO (address.index)),
4042 GET_MODE (address.index) == HImode ? 'w' : 'l');
4043 if (address.scale != 1)
4044 fprintf (file, "*%d", address.scale);
a0a7fbc9 4045 }
a0a7fbc9 4046 putc (')', file);
a0a7fbc9 4047 }
fc2241eb 4048 else /* !MOTOROLA */
a0a7fbc9 4049 {
fc2241eb
RS
4050 if (!address.offset && !address.index)
4051 fprintf (file, "%s@", M68K_REGNAME (REGNO (address.base)));
a0a7fbc9 4052 else
a0a7fbc9 4053 {
fc2241eb
RS
4054 /* Print the "base@(offset" component. */
4055 if (labelno >= 0)
e59d83aa 4056 asm_fprintf (file, "%Rpc@(%LL%d", labelno);
fc2241eb
RS
4057 else
4058 {
4059 if (address.base)
4060 fputs (M68K_REGNAME (REGNO (address.base)), file);
4061 fprintf (file, "@(");
4062 if (address.offset)
4063 {
4064 output_addr_const (file, address.offset);
4065 if (address.base == pic_offset_table_rtx && TARGET_68020)
4066 switch (flag_pic)
4067 {
4068 case 1:
4069 fprintf (file, ":w"); break;
4070 case 2:
4071 fprintf (file, ":l"); break;
4072 default:
4073 break;
4074 }
4075 }
4076 }
4077 /* Print the ",index" component, if any. */
4078 if (address.index)
4079 {
4080 fprintf (file, ",%s:%c",
4081 M68K_REGNAME (REGNO (address.index)),
4082 GET_MODE (address.index) == HImode ? 'w' : 'l');
4083 if (address.scale != 1)
4084 fprintf (file, ":%d", address.scale);
4085 }
a0a7fbc9
AS
4086 putc (')', file);
4087 }
a0a7fbc9 4088 }
79e68feb
RS
4089 }
4090}
af13f02d
JW
4091\f
4092/* Check for cases where a clr insns can be omitted from code using
4093 strict_low_part sets. For example, the second clrl here is not needed:
4094 clrl d0; movw a0@+,d0; use d0; clrl d0; movw a0@+; use d0; ...
4095
4096 MODE is the mode of this STRICT_LOW_PART set. FIRST_INSN is the clear
4097 insn we are checking for redundancy. TARGET is the register set by the
4098 clear insn. */
4099
8a4a2253
BI
4100bool
4101strict_low_part_peephole_ok (enum machine_mode mode, rtx first_insn,
4102 rtx target)
af13f02d 4103{
39250081 4104 rtx p = first_insn;
af13f02d 4105
39250081 4106 while ((p = PREV_INSN (p)))
af13f02d 4107 {
39250081
RZ
4108 if (NOTE_INSN_BASIC_BLOCK_P (p))
4109 return false;
4110
4111 if (NOTE_P (p))
4112 continue;
4113
af13f02d 4114 /* If it isn't an insn, then give up. */
39250081 4115 if (!INSN_P (p))
8a4a2253 4116 return false;
af13f02d
JW
4117
4118 if (reg_set_p (target, p))
4119 {
4120 rtx set = single_set (p);
4121 rtx dest;
4122
4123 /* If it isn't an easy to recognize insn, then give up. */
4124 if (! set)
8a4a2253 4125 return false;
af13f02d
JW
4126
4127 dest = SET_DEST (set);
4128
4129 /* If this sets the entire target register to zero, then our
4130 first_insn is redundant. */
4131 if (rtx_equal_p (dest, target)
4132 && SET_SRC (set) == const0_rtx)
8a4a2253 4133 return true;
af13f02d
JW
4134 else if (GET_CODE (dest) == STRICT_LOW_PART
4135 && GET_CODE (XEXP (dest, 0)) == REG
4136 && REGNO (XEXP (dest, 0)) == REGNO (target)
4137 && (GET_MODE_SIZE (GET_MODE (XEXP (dest, 0)))
4138 <= GET_MODE_SIZE (mode)))
4139 /* This is a strict low part set which modifies less than
4140 we are using, so it is safe. */
4141 ;
4142 else
8a4a2253 4143 return false;
af13f02d 4144 }
af13f02d
JW
4145 }
4146
8a4a2253 4147 return false;
af13f02d 4148}
67cd4f83 4149
2c8ec431
DL
4150/* Operand predicates for implementing asymmetric pc-relative addressing
4151 on m68k. The m68k supports pc-relative addressing (mode 7, register 2)
dab66575 4152 when used as a source operand, but not as a destination operand.
2c8ec431
DL
4153
4154 We model this by restricting the meaning of the basic predicates
4155 (general_operand, memory_operand, etc) to forbid the use of this
4156 addressing mode, and then define the following predicates that permit
4157 this addressing mode. These predicates can then be used for the
4158 source operands of the appropriate instructions.
4159
4160 n.b. While it is theoretically possible to change all machine patterns
4161 to use this addressing more where permitted by the architecture,
4162 it has only been implemented for "common" cases: SImode, HImode, and
4163 QImode operands, and only for the principle operations that would
4164 require this addressing mode: data movement and simple integer operations.
4165
4166 In parallel with these new predicates, two new constraint letters
4167 were defined: 'S' and 'T'. 'S' is the -mpcrel analog of 'm'.
4168 'T' replaces 's' in the non-pcrel case. It is a no-op in the pcrel case.
4169 In the pcrel case 's' is only valid in combination with 'a' registers.
4170 See addsi3, subsi3, cmpsi, and movsi patterns for a better understanding
4171 of how these constraints are used.
4172
4173 The use of these predicates is strictly optional, though patterns that
4174 don't will cause an extra reload register to be allocated where one
4175 was not necessary:
4176
4177 lea (abc:w,%pc),%a0 ; need to reload address
4178 moveq &1,%d1 ; since write to pc-relative space
4179 movel %d1,%a0@ ; is not allowed
4180 ...
4181 lea (abc:w,%pc),%a1 ; no need to reload address here
4182 movel %a1@,%d0 ; since "movel (abc:w,%pc),%d0" is ok
4183
4184 For more info, consult tiemann@cygnus.com.
4185
4186
4187 All of the ugliness with predicates and constraints is due to the
4188 simple fact that the m68k does not allow a pc-relative addressing
4189 mode as a destination. gcc does not distinguish between source and
4190 destination addresses. Hence, if we claim that pc-relative address
4191 modes are valid, e.g. GO_IF_LEGITIMATE_ADDRESS accepts them, then we
4192 end up with invalid code. To get around this problem, we left
4193 pc-relative modes as invalid addresses, and then added special
4194 predicates and constraints to accept them.
4195
4196 A cleaner way to handle this is to modify gcc to distinguish
4197 between source and destination addresses. We can then say that
4198 pc-relative is a valid source address but not a valid destination
4199 address, and hopefully avoid a lot of the predicate and constraint
4200 hackery. Unfortunately, this would be a pretty big change. It would
4201 be a useful change for a number of ports, but there aren't any current
4202 plans to undertake this.
4203
4204 ***************************************************************************/
4205
4206
5505f548 4207const char *
8a4a2253 4208output_andsi3 (rtx *operands)
29ae8a3c
RK
4209{
4210 int logval;
4211 if (GET_CODE (operands[2]) == CONST_INT
25c99d8f 4212 && (INTVAL (operands[2]) | 0xffff) == -1
29ae8a3c
RK
4213 && (DATA_REG_P (operands[0])
4214 || offsettable_memref_p (operands[0]))
9425fb04 4215 && !TARGET_COLDFIRE)
29ae8a3c
RK
4216 {
4217 if (GET_CODE (operands[0]) != REG)
b72f00af 4218 operands[0] = adjust_address (operands[0], HImode, 2);
1d8eaa6b 4219 operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffff);
29ae8a3c
RK
4220 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4221 CC_STATUS_INIT;
4222 if (operands[2] == const0_rtx)
4223 return "clr%.w %0";
4224 return "and%.w %2,%0";
4225 }
4226 if (GET_CODE (operands[2]) == CONST_INT
4227 && (logval = exact_log2 (~ INTVAL (operands[2]))) >= 0
4228 && (DATA_REG_P (operands[0])
4229 || offsettable_memref_p (operands[0])))
4230 {
4231 if (DATA_REG_P (operands[0]))
a0a7fbc9 4232 operands[1] = GEN_INT (logval);
29ae8a3c
RK
4233 else
4234 {
b72f00af 4235 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
1d8eaa6b 4236 operands[1] = GEN_INT (logval % 8);
29ae8a3c
RK
4237 }
4238 /* This does not set condition codes in a standard way. */
4239 CC_STATUS_INIT;
4240 return "bclr %1,%0";
4241 }
4242 return "and%.l %2,%0";
4243}
4244
5505f548 4245const char *
8a4a2253 4246output_iorsi3 (rtx *operands)
29ae8a3c
RK
4247{
4248 register int logval;
4249 if (GET_CODE (operands[2]) == CONST_INT
4250 && INTVAL (operands[2]) >> 16 == 0
4251 && (DATA_REG_P (operands[0])
4252 || offsettable_memref_p (operands[0]))
9425fb04 4253 && !TARGET_COLDFIRE)
29ae8a3c
RK
4254 {
4255 if (GET_CODE (operands[0]) != REG)
b72f00af 4256 operands[0] = adjust_address (operands[0], HImode, 2);
29ae8a3c
RK
4257 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4258 CC_STATUS_INIT;
4259 if (INTVAL (operands[2]) == 0xffff)
4260 return "mov%.w %2,%0";
4261 return "or%.w %2,%0";
4262 }
4263 if (GET_CODE (operands[2]) == CONST_INT
4264 && (logval = exact_log2 (INTVAL (operands[2]))) >= 0
4265 && (DATA_REG_P (operands[0])
4266 || offsettable_memref_p (operands[0])))
4267 {
4268 if (DATA_REG_P (operands[0]))
b72f00af 4269 operands[1] = GEN_INT (logval);
29ae8a3c
RK
4270 else
4271 {
b72f00af 4272 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
1d8eaa6b 4273 operands[1] = GEN_INT (logval % 8);
29ae8a3c
RK
4274 }
4275 CC_STATUS_INIT;
4276 return "bset %1,%0";
4277 }
4278 return "or%.l %2,%0";
4279}
4280
5505f548 4281const char *
8a4a2253 4282output_xorsi3 (rtx *operands)
29ae8a3c
RK
4283{
4284 register int logval;
4285 if (GET_CODE (operands[2]) == CONST_INT
4286 && INTVAL (operands[2]) >> 16 == 0
4287 && (offsettable_memref_p (operands[0]) || DATA_REG_P (operands[0]))
9425fb04 4288 && !TARGET_COLDFIRE)
29ae8a3c
RK
4289 {
4290 if (! DATA_REG_P (operands[0]))
b72f00af 4291 operands[0] = adjust_address (operands[0], HImode, 2);
29ae8a3c
RK
4292 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4293 CC_STATUS_INIT;
4294 if (INTVAL (operands[2]) == 0xffff)
4295 return "not%.w %0";
4296 return "eor%.w %2,%0";
4297 }
4298 if (GET_CODE (operands[2]) == CONST_INT
4299 && (logval = exact_log2 (INTVAL (operands[2]))) >= 0
4300 && (DATA_REG_P (operands[0])
4301 || offsettable_memref_p (operands[0])))
4302 {
4303 if (DATA_REG_P (operands[0]))
b72f00af 4304 operands[1] = GEN_INT (logval);
29ae8a3c
RK
4305 else
4306 {
b72f00af 4307 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
1d8eaa6b 4308 operands[1] = GEN_INT (logval % 8);
29ae8a3c
RK
4309 }
4310 CC_STATUS_INIT;
4311 return "bchg %1,%0";
4312 }
4313 return "eor%.l %2,%0";
4314}
7c262518 4315
29ca003a
RS
4316/* Return the instruction that should be used for a call to address X,
4317 which is known to be in operand 0. */
4318
4319const char *
4320output_call (rtx x)
4321{
4322 if (symbolic_operand (x, VOIDmode))
4323 return m68k_symbolic_call;
4324 else
4325 return "jsr %a0";
4326}
4327
f7e70894
RS
4328/* Likewise sibling calls. */
4329
4330const char *
4331output_sibcall (rtx x)
4332{
4333 if (symbolic_operand (x, VOIDmode))
4334 return m68k_symbolic_jump;
4335 else
4336 return "jmp %a0";
4337}
4338
45849738
BI
4339#ifdef M68K_TARGET_COFF
4340
4341/* Output assembly to switch to section NAME with attribute FLAGS. */
4342
4343static void
c18a5b6c
MM
4344m68k_coff_asm_named_section (const char *name, unsigned int flags,
4345 tree decl ATTRIBUTE_UNUSED)
45849738
BI
4346{
4347 char flagchar;
4348
4349 if (flags & SECTION_WRITE)
4350 flagchar = 'd';
4351 else
4352 flagchar = 'x';
4353
4354 fprintf (asm_out_file, "\t.section\t%s,\"%c\"\n", name, flagchar);
4355}
4356
4357#endif /* M68K_TARGET_COFF */
4358
c590b625 4359static void
8a4a2253 4360m68k_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
4ab870f5 4361 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8a4a2253 4362 tree function)
483ab821 4363{
4ab870f5
RS
4364 rtx this_slot, offset, addr, mem, insn;
4365
4366 /* Pretend to be a post-reload pass while generating rtl. */
4ab870f5 4367 reload_completed = 1;
4ab870f5
RS
4368
4369 /* The "this" pointer is stored at 4(%sp). */
4370 this_slot = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx, 4));
4371
4372 /* Add DELTA to THIS. */
4373 if (delta != 0)
5050d266 4374 {
4ab870f5
RS
4375 /* Make the offset a legitimate operand for memory addition. */
4376 offset = GEN_INT (delta);
4377 if ((delta < -8 || delta > 8)
4378 && (TARGET_COLDFIRE || USE_MOVQ (delta)))
4379 {
4380 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), offset);
4381 offset = gen_rtx_REG (Pmode, D0_REG);
4382 }
4383 emit_insn (gen_add3_insn (copy_rtx (this_slot),
4384 copy_rtx (this_slot), offset));
5050d266 4385 }
c590b625 4386
4ab870f5
RS
4387 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
4388 if (vcall_offset != 0)
4389 {
4390 /* Set the static chain register to *THIS. */
4391 emit_move_insn (static_chain_rtx, this_slot);
4392 emit_move_insn (static_chain_rtx, gen_rtx_MEM (Pmode, static_chain_rtx));
4393
4394 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
4395 addr = plus_constant (static_chain_rtx, vcall_offset);
4396 if (!m68k_legitimate_address_p (Pmode, addr, true))
4397 {
4398 emit_insn (gen_rtx_SET (VOIDmode, static_chain_rtx, addr));
4399 addr = static_chain_rtx;
4400 }
c590b625 4401
4ab870f5
RS
4402 /* Load the offset into %d0 and add it to THIS. */
4403 emit_move_insn (gen_rtx_REG (Pmode, D0_REG),
4404 gen_rtx_MEM (Pmode, addr));
4405 emit_insn (gen_add3_insn (copy_rtx (this_slot),
4406 copy_rtx (this_slot),
4407 gen_rtx_REG (Pmode, D0_REG)));
4408 }
29ca003a 4409
4ab870f5
RS
4410 /* Jump to the target function. Use a sibcall if direct jumps are
4411 allowed, otherwise load the address into a register first. */
4412 mem = DECL_RTL (function);
4413 if (!sibcall_operand (XEXP (mem, 0), VOIDmode))
4414 {
4415 gcc_assert (flag_pic);
c590b625 4416
4ab870f5
RS
4417 if (!TARGET_SEP_DATA)
4418 {
4419 /* Use the static chain register as a temporary (call-clobbered)
4420 GOT pointer for this function. We can use the static chain
4421 register because it isn't live on entry to the thunk. */
6fb5fa3c 4422 SET_REGNO (pic_offset_table_rtx, STATIC_CHAIN_REGNUM);
4ab870f5
RS
4423 emit_insn (gen_load_got (pic_offset_table_rtx));
4424 }
4425 legitimize_pic_address (XEXP (mem, 0), Pmode, static_chain_rtx);
4426 mem = replace_equiv_address (mem, static_chain_rtx);
4427 }
4428 insn = emit_call_insn (gen_sibcall (mem, const0_rtx));
4429 SIBLING_CALL_P (insn) = 1;
4430
4431 /* Run just enough of rest_of_compilation. */
4432 insn = get_insns ();
4433 split_all_insns_noflow ();
4434 final_start_function (insn, file, 1);
4435 final (insn, file, 1);
4436 final_end_function ();
4437
4438 /* Clean up the vars set above. */
4439 reload_completed = 0;
4ab870f5
RS
4440
4441 /* Restore the original PIC register. */
4442 if (flag_pic)
6fb5fa3c 4443 SET_REGNO (pic_offset_table_rtx, PIC_REG);
6b0c2336 4444 free_after_compilation (cfun);
483ab821 4445}
8636be86
KH
4446
4447/* Worker function for TARGET_STRUCT_VALUE_RTX. */
4448
4449static rtx
4450m68k_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
4451 int incoming ATTRIBUTE_UNUSED)
4452{
4453 return gen_rtx_REG (Pmode, M68K_STRUCT_VALUE_REGNUM);
4454}
cfca21cb
PB
4455
4456/* Return nonzero if register old_reg can be renamed to register new_reg. */
4457int
4458m68k_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
4459 unsigned int new_reg)
4460{
4461
4462 /* Interrupt functions can only use registers that have already been
4463 saved by the prologue, even if they would normally be
4464 call-clobbered. */
4465
a4242737
KH
4466 if ((m68k_get_function_kind (current_function_decl)
4467 == m68k_fk_interrupt_handler)
6fb5fa3c 4468 && !df_regs_ever_live_p (new_reg))
cfca21cb
PB
4469 return 0;
4470
4471 return 1;
4472}
70028b61 4473
ffa2596e
RS
4474/* Value is true if hard register REGNO can hold a value of machine-mode
4475 MODE. On the 68000, we let the cpu registers can hold any mode, but
4476 restrict the 68881 registers to floating-point modes. */
4477
70028b61
PB
4478bool
4479m68k_regno_mode_ok (int regno, enum machine_mode mode)
4480{
36e04090 4481 if (DATA_REGNO_P (regno))
70028b61 4482 {
a0a7fbc9
AS
4483 /* Data Registers, can hold aggregate if fits in. */
4484 if (regno + GET_MODE_SIZE (mode) / 4 <= 8)
4485 return true;
70028b61 4486 }
36e04090 4487 else if (ADDRESS_REGNO_P (regno))
70028b61 4488 {
a0a7fbc9
AS
4489 if (regno + GET_MODE_SIZE (mode) / 4 <= 16)
4490 return true;
70028b61 4491 }
36e04090 4492 else if (FP_REGNO_P (regno))
70028b61
PB
4493 {
4494 /* FPU registers, hold float or complex float of long double or
a0a7fbc9
AS
4495 smaller. */
4496 if ((GET_MODE_CLASS (mode) == MODE_FLOAT
4497 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
dcc21c4c 4498 && GET_MODE_UNIT_SIZE (mode) <= TARGET_FP_REG_SIZE)
a0a7fbc9 4499 return true;
70028b61
PB
4500 }
4501 return false;
4502}
dcc21c4c 4503
ffa2596e
RS
4504/* Implement SECONDARY_RELOAD_CLASS. */
4505
4506enum reg_class
4507m68k_secondary_reload_class (enum reg_class rclass,
4508 enum machine_mode mode, rtx x)
4509{
4510 int regno;
4511
4512 regno = true_regnum (x);
4513
4514 /* If one operand of a movqi is an address register, the other
4515 operand must be a general register or constant. Other types
4516 of operand must be reloaded through a data register. */
4517 if (GET_MODE_SIZE (mode) == 1
4518 && reg_classes_intersect_p (rclass, ADDR_REGS)
4519 && !(INT_REGNO_P (regno) || CONSTANT_P (x)))
4520 return DATA_REGS;
4521
4522 /* PC-relative addresses must be loaded into an address register first. */
4523 if (TARGET_PCREL
4524 && !reg_class_subset_p (rclass, ADDR_REGS)
4525 && symbolic_operand (x, VOIDmode))
4526 return ADDR_REGS;
4527
4528 return NO_REGS;
4529}
4530
4531/* Implement PREFERRED_RELOAD_CLASS. */
4532
4533enum reg_class
4534m68k_preferred_reload_class (rtx x, enum reg_class rclass)
4535{
4536 enum reg_class secondary_class;
4537
4538 /* If RCLASS might need a secondary reload, try restricting it to
4539 a class that doesn't. */
4540 secondary_class = m68k_secondary_reload_class (rclass, GET_MODE (x), x);
4541 if (secondary_class != NO_REGS
4542 && reg_class_subset_p (secondary_class, rclass))
4543 return secondary_class;
4544
4545 /* Prefer to use moveq for in-range constants. */
4546 if (GET_CODE (x) == CONST_INT
4547 && reg_class_subset_p (DATA_REGS, rclass)
4548 && IN_RANGE (INTVAL (x), -0x80, 0x7f))
4549 return DATA_REGS;
4550
4551 /* ??? Do we really need this now? */
4552 if (GET_CODE (x) == CONST_DOUBLE
4553 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
4554 {
4555 if (TARGET_HARD_FLOAT && reg_class_subset_p (FP_REGS, rclass))
4556 return FP_REGS;
4557
4558 return NO_REGS;
4559 }
4560
4561 return rclass;
4562}
4563
dcc21c4c
PB
4564/* Return floating point values in a 68881 register. This makes 68881 code
4565 a little bit faster. It also makes -msoft-float code incompatible with
4566 hard-float code, so people have to be careful not to mix the two.
c0220ea4 4567 For ColdFire it was decided the ABI incompatibility is undesirable.
dcc21c4c
PB
4568 If there is need for a hard-float ABI it is probably worth doing it
4569 properly and also passing function arguments in FP registers. */
4570rtx
4571m68k_libcall_value (enum machine_mode mode)
4572{
4573 switch (mode) {
4574 case SFmode:
4575 case DFmode:
4576 case XFmode:
4577 if (TARGET_68881)
8d989403 4578 return gen_rtx_REG (mode, FP0_REG);
dcc21c4c
PB
4579 break;
4580 default:
4581 break;
4582 }
8d989403 4583 return gen_rtx_REG (mode, D0_REG);
dcc21c4c
PB
4584}
4585
4586rtx
586de218 4587m68k_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
dcc21c4c
PB
4588{
4589 enum machine_mode mode;
4590
4591 mode = TYPE_MODE (valtype);
4592 switch (mode) {
4593 case SFmode:
4594 case DFmode:
4595 case XFmode:
4596 if (TARGET_68881)
8d989403 4597 return gen_rtx_REG (mode, FP0_REG);
dcc21c4c
PB
4598 break;
4599 default:
4600 break;
4601 }
4602
576c9028
KH
4603 /* If the function returns a pointer, push that into %a0. */
4604 if (func && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (func))))
4605 /* For compatibility with the large body of existing code which
4606 does not always properly declare external functions returning
4607 pointer types, the m68k/SVR4 convention is to copy the value
4608 returned for pointer functions from a0 to d0 in the function
4609 epilogue, so that callers that have neglected to properly
4610 declare the callee can still find the correct return value in
4611 d0. */
4612 return gen_rtx_PARALLEL
4613 (mode,
4614 gen_rtvec (2,
4615 gen_rtx_EXPR_LIST (VOIDmode,
4616 gen_rtx_REG (mode, A0_REG),
4617 const0_rtx),
4618 gen_rtx_EXPR_LIST (VOIDmode,
4619 gen_rtx_REG (mode, D0_REG),
4620 const0_rtx)));
4621 else if (POINTER_TYPE_P (valtype))
4622 return gen_rtx_REG (mode, A0_REG);
dcc21c4c 4623 else
576c9028 4624 return gen_rtx_REG (mode, D0_REG);
dcc21c4c 4625}
1c445f03
NS
4626
4627/* Worker function for TARGET_RETURN_IN_MEMORY. */
4628#if M68K_HONOR_TARGET_STRICT_ALIGNMENT
4629static bool
511e41e5 4630m68k_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
1c445f03
NS
4631{
4632 enum machine_mode mode = TYPE_MODE (type);
4633
4634 if (mode == BLKmode)
4635 return true;
4636
4637 /* If TYPE's known alignment is less than the alignment of MODE that
4638 would contain the structure, then return in memory. We need to
4639 do so to maintain the compatibility between code compiled with
4640 -mstrict-align and that compiled with -mno-strict-align. */
4641 if (AGGREGATE_TYPE_P (type)
4642 && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (mode))
4643 return true;
4644
4645 return false;
4646}
4647#endif
c47b0cb4
MK
4648
4649/* CPU to schedule the program for. */
4650enum attr_cpu m68k_sched_cpu;
4651
826fadba
MK
4652/* MAC to schedule the program for. */
4653enum attr_mac m68k_sched_mac;
4654
c47b0cb4
MK
4655/* Operand type. */
4656enum attr_op_type
4657 {
4658 /* No operand. */
4659 OP_TYPE_NONE,
4660
96fcacb7
MK
4661 /* Integer register. */
4662 OP_TYPE_RN,
4663
4664 /* FP register. */
4665 OP_TYPE_FPN,
c47b0cb4
MK
4666
4667 /* Implicit mem reference (e.g. stack). */
4668 OP_TYPE_MEM1,
4669
4670 /* Memory without offset or indexing. EA modes 2, 3 and 4. */
4671 OP_TYPE_MEM234,
4672
4673 /* Memory with offset but without indexing. EA mode 5. */
4674 OP_TYPE_MEM5,
4675
4676 /* Memory with indexing. EA mode 6. */
4677 OP_TYPE_MEM6,
4678
4679 /* Memory referenced by absolute address. EA mode 7. */
4680 OP_TYPE_MEM7,
4681
4682 /* Immediate operand that doesn't require extension word. */
4683 OP_TYPE_IMM_Q,
4684
4685 /* Immediate 16 bit operand. */
4686 OP_TYPE_IMM_W,
4687
4688 /* Immediate 32 bit operand. */
4689 OP_TYPE_IMM_L
4690 };
4691
c47b0cb4
MK
4692/* Return type of memory ADDR_RTX refers to. */
4693static enum attr_op_type
4694sched_address_type (enum machine_mode mode, rtx addr_rtx)
4695{
4696 struct m68k_address address;
4697
96fcacb7
MK
4698 if (symbolic_operand (addr_rtx, VOIDmode))
4699 return OP_TYPE_MEM7;
4700
c47b0cb4
MK
4701 if (!m68k_decompose_address (mode, addr_rtx,
4702 reload_completed, &address))
4703 {
96fcacb7 4704 gcc_assert (!reload_completed);
c47b0cb4
MK
4705 /* Reload will likely fix the address to be in the register. */
4706 return OP_TYPE_MEM234;
4707 }
4708
4709 if (address.scale != 0)
4710 return OP_TYPE_MEM6;
4711
4712 if (address.base != NULL_RTX)
4713 {
4714 if (address.offset == NULL_RTX)
4715 return OP_TYPE_MEM234;
4716
4717 return OP_TYPE_MEM5;
4718 }
4719
4720 gcc_assert (address.offset != NULL_RTX);
4721
4722 return OP_TYPE_MEM7;
4723}
4724
96fcacb7
MK
4725/* Return X or Y (depending on OPX_P) operand of INSN. */
4726static rtx
4727sched_get_operand (rtx insn, bool opx_p)
4728{
4729 int i;
4730
4731 if (recog_memoized (insn) < 0)
4732 gcc_unreachable ();
4733
4734 extract_constrain_insn_cached (insn);
4735
4736 if (opx_p)
4737 i = get_attr_opx (insn);
4738 else
4739 i = get_attr_opy (insn);
4740
4741 if (i >= recog_data.n_operands)
4742 return NULL;
4743
4744 return recog_data.operand[i];
4745}
4746
4747/* Return type of INSN's operand X (if OPX_P) or operand Y (if !OPX_P).
4748 If ADDRESS_P is true, return type of memory location operand refers to. */
c47b0cb4 4749static enum attr_op_type
96fcacb7 4750sched_attr_op_type (rtx insn, bool opx_p, bool address_p)
c47b0cb4 4751{
96fcacb7
MK
4752 rtx op;
4753
4754 op = sched_get_operand (insn, opx_p);
4755
4756 if (op == NULL)
4757 {
4758 gcc_assert (!reload_completed);
4759 return OP_TYPE_RN;
4760 }
c47b0cb4
MK
4761
4762 if (address_p)
4763 return sched_address_type (QImode, op);
4764
4765 if (memory_operand (op, VOIDmode))
4766 return sched_address_type (GET_MODE (op), XEXP (op, 0));
4767
4768 if (register_operand (op, VOIDmode))
96fcacb7
MK
4769 {
4770 if ((!reload_completed && FLOAT_MODE_P (GET_MODE (op)))
4771 || (reload_completed && FP_REG_P (op)))
4772 return OP_TYPE_FPN;
4773
4774 return OP_TYPE_RN;
4775 }
c47b0cb4
MK
4776
4777 if (GET_CODE (op) == CONST_INT)
4778 {
96fcacb7
MK
4779 int ival;
4780
4781 ival = INTVAL (op);
4782
4783 /* Check for quick constants. */
4784 switch (get_attr_type (insn))
4785 {
4786 case TYPE_ALUQ_L:
4787 if (IN_RANGE (ival, 1, 8) || IN_RANGE (ival, -8, -1))
4788 return OP_TYPE_IMM_Q;
4789
4790 gcc_assert (!reload_completed);
4791 break;
4792
4793 case TYPE_MOVEQ_L:
4794 if (USE_MOVQ (ival))
4795 return OP_TYPE_IMM_Q;
4796
4797 gcc_assert (!reload_completed);
4798 break;
4799
4800 case TYPE_MOV3Q_L:
4801 if (valid_mov3q_const (ival))
4802 return OP_TYPE_IMM_Q;
4803
4804 gcc_assert (!reload_completed);
4805 break;
4806
4807 default:
4808 break;
4809 }
4810
4811 if (IN_RANGE (ival, -0x8000, 0x7fff))
c47b0cb4
MK
4812 return OP_TYPE_IMM_W;
4813
4814 return OP_TYPE_IMM_L;
4815 }
4816
4817 if (GET_CODE (op) == CONST_DOUBLE)
4818 {
4819 switch (GET_MODE (op))
4820 {
4821 case SFmode:
4822 return OP_TYPE_IMM_W;
4823
4824 case VOIDmode:
4825 case DFmode:
4826 return OP_TYPE_IMM_L;
4827
4828 default:
4829 gcc_unreachable ();
4830 }
4831 }
4832
4833 if (symbolic_operand (op, VOIDmode)
4834 || LABEL_P (op))
4835 {
4836 switch (GET_MODE (op))
4837 {
4838 case QImode:
4839 return OP_TYPE_IMM_Q;
4840
4841 case HImode:
4842 return OP_TYPE_IMM_W;
4843
4844 case SImode:
4845 return OP_TYPE_IMM_L;
4846
4847 default:
4848 if (GET_CODE (op) == SYMBOL_REF)
4849 /* ??? Just a guess. Probably we can guess better using length
4850 attribute of the instructions. */
4851 return OP_TYPE_IMM_W;
4852
4853 return OP_TYPE_IMM_L;
4854 }
4855 }
4856
96fcacb7 4857 gcc_assert (!reload_completed);
c47b0cb4 4858
96fcacb7
MK
4859 if (FLOAT_MODE_P (GET_MODE (op)))
4860 return OP_TYPE_FPN;
c47b0cb4 4861
96fcacb7 4862 return OP_TYPE_RN;
c47b0cb4
MK
4863}
4864
4865/* Implement opx_type attribute.
4866 Return type of INSN's operand X.
4867 If ADDRESS_P is true, return type of memory location operand refers to. */
4868enum attr_opx_type
4869m68k_sched_attr_opx_type (rtx insn, int address_p)
4870{
c47b0cb4
MK
4871 switch (sched_attr_op_type (insn, true, address_p != 0))
4872 {
96fcacb7
MK
4873 case OP_TYPE_RN:
4874 return OPX_TYPE_RN;
4875
4876 case OP_TYPE_FPN:
4877 return OPX_TYPE_FPN;
c47b0cb4
MK
4878
4879 case OP_TYPE_MEM1:
4880 return OPX_TYPE_MEM1;
4881
4882 case OP_TYPE_MEM234:
4883 return OPX_TYPE_MEM234;
4884
4885 case OP_TYPE_MEM5:
4886 return OPX_TYPE_MEM5;
4887
4888 case OP_TYPE_MEM6:
4889 return OPX_TYPE_MEM6;
4890
4891 case OP_TYPE_MEM7:
4892 return OPX_TYPE_MEM7;
4893
4894 case OP_TYPE_IMM_Q:
4895 return OPX_TYPE_IMM_Q;
4896
4897 case OP_TYPE_IMM_W:
4898 return OPX_TYPE_IMM_W;
4899
4900 case OP_TYPE_IMM_L:
4901 return OPX_TYPE_IMM_L;
4902
4903 default:
4904 gcc_unreachable ();
4905 return 0;
4906 }
4907}
4908
4909/* Implement opy_type attribute.
4910 Return type of INSN's operand Y.
4911 If ADDRESS_P is true, return type of memory location operand refers to. */
4912enum attr_opy_type
4913m68k_sched_attr_opy_type (rtx insn, int address_p)
4914{
c47b0cb4
MK
4915 switch (sched_attr_op_type (insn, false, address_p != 0))
4916 {
96fcacb7
MK
4917 case OP_TYPE_RN:
4918 return OPY_TYPE_RN;
4919
4920 case OP_TYPE_FPN:
4921 return OPY_TYPE_FPN;
c47b0cb4
MK
4922
4923 case OP_TYPE_MEM1:
4924 return OPY_TYPE_MEM1;
4925
4926 case OP_TYPE_MEM234:
4927 return OPY_TYPE_MEM234;
4928
4929 case OP_TYPE_MEM5:
4930 return OPY_TYPE_MEM5;
4931
4932 case OP_TYPE_MEM6:
4933 return OPY_TYPE_MEM6;
4934
4935 case OP_TYPE_MEM7:
4936 return OPY_TYPE_MEM7;
4937
4938 case OP_TYPE_IMM_Q:
4939 return OPY_TYPE_IMM_Q;
4940
4941 case OP_TYPE_IMM_W:
4942 return OPY_TYPE_IMM_W;
4943
4944 case OP_TYPE_IMM_L:
4945 return OPY_TYPE_IMM_L;
4946
4947 default:
4948 gcc_unreachable ();
4949 return 0;
4950 }
4951}
4952
96fcacb7
MK
4953/* Return size of INSN as int. */
4954static int
4955sched_get_attr_size_int (rtx insn)
c47b0cb4
MK
4956{
4957 int size;
4958
96fcacb7 4959 switch (get_attr_type (insn))
c47b0cb4 4960 {
96fcacb7
MK
4961 case TYPE_IGNORE:
4962 /* There should be no references to m68k_sched_attr_size for 'ignore'
4963 instructions. */
4964 gcc_unreachable ();
4965 return 0;
4966
4967 case TYPE_MUL_L:
c47b0cb4
MK
4968 size = 2;
4969 break;
4970
4971 default:
4972 size = 1;
4973 break;
4974 }
4975
4976 switch (get_attr_opx_type (insn))
4977 {
4978 case OPX_TYPE_NONE:
96fcacb7
MK
4979 case OPX_TYPE_RN:
4980 case OPX_TYPE_FPN:
c47b0cb4
MK
4981 case OPX_TYPE_MEM1:
4982 case OPX_TYPE_MEM234:
4983 case OPY_TYPE_IMM_Q:
4984 break;
4985
4986 case OPX_TYPE_MEM5:
4987 case OPX_TYPE_MEM6:
4988 /* Here we assume that most absolute references are short. */
4989 case OPX_TYPE_MEM7:
4990 case OPY_TYPE_IMM_W:
4991 ++size;
4992 break;
4993
4994 case OPY_TYPE_IMM_L:
4995 size += 2;
4996 break;
4997
4998 default:
4999 gcc_unreachable ();
5000 }
5001
5002 switch (get_attr_opy_type (insn))
5003 {
5004 case OPY_TYPE_NONE:
96fcacb7
MK
5005 case OPY_TYPE_RN:
5006 case OPY_TYPE_FPN:
c47b0cb4
MK
5007 case OPY_TYPE_MEM1:
5008 case OPY_TYPE_MEM234:
5009 case OPY_TYPE_IMM_Q:
5010 break;
5011
5012 case OPY_TYPE_MEM5:
5013 case OPY_TYPE_MEM6:
5014 /* Here we assume that most absolute references are short. */
5015 case OPY_TYPE_MEM7:
5016 case OPY_TYPE_IMM_W:
5017 ++size;
5018 break;
5019
5020 case OPY_TYPE_IMM_L:
5021 size += 2;
5022 break;
5023
5024 default:
5025 gcc_unreachable ();
5026 }
5027
5028 if (size > 3)
5029 {
96fcacb7 5030 gcc_assert (!reload_completed);
c47b0cb4
MK
5031
5032 size = 3;
5033 }
5034
5035 return size;
5036}
5037
96fcacb7
MK
5038/* Return size of INSN as attribute enum value. */
5039enum attr_size
5040m68k_sched_attr_size (rtx insn)
5041{
5042 switch (sched_get_attr_size_int (insn))
5043 {
5044 case 1:
5045 return SIZE_1;
5046
5047 case 2:
5048 return SIZE_2;
5049
5050 case 3:
5051 return SIZE_3;
5052
5053 default:
5054 gcc_unreachable ();
5055 return 0;
5056 }
5057}
5058
5059/* Return operand X or Y (depending on OPX_P) of INSN,
5060 if it is a MEM, or NULL overwise. */
5061static enum attr_op_type
5062sched_get_opxy_mem_type (rtx insn, bool opx_p)
5063{
5064 if (opx_p)
5065 {
5066 switch (get_attr_opx_type (insn))
5067 {
5068 case OPX_TYPE_NONE:
5069 case OPX_TYPE_RN:
5070 case OPX_TYPE_FPN:
5071 case OPX_TYPE_IMM_Q:
5072 case OPX_TYPE_IMM_W:
5073 case OPX_TYPE_IMM_L:
5074 return OP_TYPE_RN;
5075
5076 case OPX_TYPE_MEM1:
5077 case OPX_TYPE_MEM234:
5078 case OPX_TYPE_MEM5:
5079 case OPX_TYPE_MEM7:
5080 return OP_TYPE_MEM1;
5081
5082 case OPX_TYPE_MEM6:
5083 return OP_TYPE_MEM6;
5084
5085 default:
5086 gcc_unreachable ();
5087 return 0;
5088 }
5089 }
5090 else
5091 {
5092 switch (get_attr_opy_type (insn))
5093 {
5094 case OPY_TYPE_NONE:
5095 case OPY_TYPE_RN:
5096 case OPY_TYPE_FPN:
5097 case OPY_TYPE_IMM_Q:
5098 case OPY_TYPE_IMM_W:
5099 case OPY_TYPE_IMM_L:
5100 return OP_TYPE_RN;
5101
5102 case OPY_TYPE_MEM1:
5103 case OPY_TYPE_MEM234:
5104 case OPY_TYPE_MEM5:
5105 case OPY_TYPE_MEM7:
5106 return OP_TYPE_MEM1;
5107
5108 case OPY_TYPE_MEM6:
5109 return OP_TYPE_MEM6;
5110
5111 default:
5112 gcc_unreachable ();
5113 return 0;
5114 }
5115 }
5116}
5117
c47b0cb4
MK
5118/* Implement op_mem attribute. */
5119enum attr_op_mem
5120m68k_sched_attr_op_mem (rtx insn)
5121{
96fcacb7
MK
5122 enum attr_op_type opx;
5123 enum attr_op_type opy;
c47b0cb4 5124
96fcacb7
MK
5125 opx = sched_get_opxy_mem_type (insn, true);
5126 opy = sched_get_opxy_mem_type (insn, false);
c47b0cb4 5127
96fcacb7 5128 if (opy == OP_TYPE_RN && opx == OP_TYPE_RN)
c47b0cb4
MK
5129 return OP_MEM_00;
5130
96fcacb7 5131 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM1)
c47b0cb4
MK
5132 {
5133 switch (get_attr_opx_access (insn))
5134 {
5135 case OPX_ACCESS_R:
5136 return OP_MEM_10;
5137
5138 case OPX_ACCESS_W:
5139 return OP_MEM_01;
5140
5141 case OPX_ACCESS_RW:
5142 return OP_MEM_11;
5143
5144 default:
96fcacb7
MK
5145 gcc_unreachable ();
5146 return 0;
c47b0cb4
MK
5147 }
5148 }
5149
96fcacb7 5150 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM6)
c47b0cb4
MK
5151 {
5152 switch (get_attr_opx_access (insn))
5153 {
5154 case OPX_ACCESS_R:
5155 return OP_MEM_I0;
5156
5157 case OPX_ACCESS_W:
5158 return OP_MEM_0I;
5159
5160 case OPX_ACCESS_RW:
5161 return OP_MEM_I1;
5162
5163 default:
96fcacb7
MK
5164 gcc_unreachable ();
5165 return 0;
c47b0cb4
MK
5166 }
5167 }
5168
96fcacb7 5169 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_RN)
c47b0cb4
MK
5170 return OP_MEM_10;
5171
96fcacb7 5172 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM1)
c47b0cb4
MK
5173 {
5174 switch (get_attr_opx_access (insn))
5175 {
5176 case OPX_ACCESS_W:
5177 return OP_MEM_11;
5178
5179 default:
96fcacb7
MK
5180 gcc_assert (!reload_completed);
5181 return OP_MEM_11;
c47b0cb4
MK
5182 }
5183 }
5184
96fcacb7 5185 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM6)
c47b0cb4
MK
5186 {
5187 switch (get_attr_opx_access (insn))
5188 {
5189 case OPX_ACCESS_W:
5190 return OP_MEM_1I;
5191
5192 default:
96fcacb7
MK
5193 gcc_assert (!reload_completed);
5194 return OP_MEM_1I;
c47b0cb4
MK
5195 }
5196 }
5197
96fcacb7 5198 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_RN)
c47b0cb4
MK
5199 return OP_MEM_I0;
5200
96fcacb7 5201 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM1)
c47b0cb4
MK
5202 {
5203 switch (get_attr_opx_access (insn))
5204 {
5205 case OPX_ACCESS_W:
5206 return OP_MEM_I1;
5207
5208 default:
96fcacb7
MK
5209 gcc_assert (!reload_completed);
5210 return OP_MEM_I1;
c47b0cb4
MK
5211 }
5212 }
5213
96fcacb7
MK
5214 gcc_assert (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM6);
5215 gcc_assert (!reload_completed);
5216 return OP_MEM_I1;
c47b0cb4
MK
5217}
5218
5219/* Jump instructions types. Indexed by INSN_UID.
5220 The same rtl insn can be expanded into different asm instructions
5221 depending on the cc0_status. To properly determine type of jump
5222 instructions we scan instruction stream and map jumps types to this
5223 array. */
5224static enum attr_type *sched_branch_type;
5225
5226/* Return the type of the jump insn. */
5227enum attr_type
5228m68k_sched_branch_type (rtx insn)
5229{
5230 enum attr_type type;
5231
5232 type = sched_branch_type[INSN_UID (insn)];
5233
5234 gcc_assert (type != 0);
5235
5236 return type;
5237}
b8c96320 5238
96fcacb7
MK
5239/* Data for ColdFire V4 index bypass.
5240 Producer modifies register that is used as index in consumer with
5241 specified scale. */
5242static struct
b8c96320 5243{
96fcacb7
MK
5244 /* Producer instruction. */
5245 rtx pro;
826fadba 5246
96fcacb7
MK
5247 /* Consumer instruction. */
5248 rtx con;
b8c96320 5249
96fcacb7
MK
5250 /* Scale of indexed memory access within consumer.
5251 Or zero if bypass should not be effective at the moment. */
5252 int scale;
5253} sched_cfv4_bypass_data;
b8c96320
MK
5254
5255/* An empty state that is used in m68k_sched_adjust_cost. */
5256static state_t sched_adjust_cost_state;
5257
5258/* Implement adjust_cost scheduler hook.
5259 Return adjusted COST of dependency LINK between DEF_INSN and INSN. */
5260static int
5261m68k_sched_adjust_cost (rtx insn, rtx link ATTRIBUTE_UNUSED, rtx def_insn,
5262 int cost)
5263{
5264 int delay;
5265
5266 if (recog_memoized (def_insn) < 0
5267 || recog_memoized (insn) < 0)
5268 return cost;
5269
96fcacb7
MK
5270 if (sched_cfv4_bypass_data.scale == 1)
5271 /* Handle ColdFire V4 bypass for indexed address with 1x scale. */
5272 {
5273 /* haifa-sched.c: insn_cost () calls bypass_p () just before
5274 targetm.sched.adjust_cost (). Hence, we can be relatively sure
5275 that the data in sched_cfv4_bypass_data is up to date. */
5276 gcc_assert (sched_cfv4_bypass_data.pro == def_insn
5277 && sched_cfv4_bypass_data.con == insn);
5278
5279 if (cost < 3)
5280 cost = 3;
5281
5282 sched_cfv4_bypass_data.pro = NULL;
5283 sched_cfv4_bypass_data.con = NULL;
5284 sched_cfv4_bypass_data.scale = 0;
5285 }
5286 else
5287 gcc_assert (sched_cfv4_bypass_data.pro == NULL
5288 && sched_cfv4_bypass_data.con == NULL
5289 && sched_cfv4_bypass_data.scale == 0);
5290
b8c96320
MK
5291 /* Don't try to issue INSN earlier than DFA permits.
5292 This is especially useful for instructions that write to memory,
5293 as their true dependence (default) latency is better to be set to 0
5294 to workaround alias analysis limitations.
5295 This is, in fact, a machine independent tweak, so, probably,
5296 it should be moved to haifa-sched.c: insn_cost (). */
b8c96320
MK
5297 delay = min_insn_conflict_delay (sched_adjust_cost_state, def_insn, insn);
5298 if (delay > cost)
5299 cost = delay;
5300
5301 return cost;
5302}
5303
96fcacb7
MK
5304/* Return maximal number of insns that can be scheduled on a single cycle. */
5305static int
5306m68k_sched_issue_rate (void)
5307{
5308 switch (m68k_sched_cpu)
5309 {
5310 case CPU_CFV1:
5311 case CPU_CFV2:
5312 case CPU_CFV3:
5313 return 1;
5314
5315 case CPU_CFV4:
5316 return 2;
5317
5318 default:
5319 gcc_unreachable ();
5320 return 0;
5321 }
5322}
5323
826fadba
MK
5324/* Maximal length of instruction for current CPU.
5325 E.g. it is 3 for any ColdFire core. */
5326static int max_insn_size;
5327
5328/* Data to model instruction buffer of CPU. */
5329struct _sched_ib
5330{
96fcacb7
MK
5331 /* True if instruction buffer model is modeled for current CPU. */
5332 bool enabled_p;
5333
826fadba
MK
5334 /* Size of the instruction buffer in words. */
5335 int size;
5336
5337 /* Number of filled words in the instruction buffer. */
5338 int filled;
5339
5340 /* Additional information about instruction buffer for CPUs that have
5341 a buffer of instruction records, rather then a plain buffer
5342 of instruction words. */
5343 struct _sched_ib_records
5344 {
5345 /* Size of buffer in records. */
5346 int n_insns;
b8c96320 5347
826fadba
MK
5348 /* Array to hold data on adjustements made to the size of the buffer. */
5349 int *adjust;
b8c96320 5350
826fadba
MK
5351 /* Index of the above array. */
5352 int adjust_index;
5353 } records;
5354
5355 /* An insn that reserves (marks empty) one word in the instruction buffer. */
5356 rtx insn;
5357};
5358
5359static struct _sched_ib sched_ib;
b8c96320
MK
5360
5361/* ID of memory unit. */
5362static int sched_mem_unit_code;
5363
5364/* Implementation of the targetm.sched.variable_issue () hook.
5365 It is called after INSN was issued. It returns the number of insns
5366 that can possibly get scheduled on the current cycle.
5367 It is used here to determine the effect of INSN on the instruction
5368 buffer. */
5369static int
5370m68k_sched_variable_issue (FILE *sched_dump ATTRIBUTE_UNUSED,
5371 int sched_verbose ATTRIBUTE_UNUSED,
5372 rtx insn, int can_issue_more)
5373{
5374 int insn_size;
5375
96fcacb7 5376 if (recog_memoized (insn) >= 0 && get_attr_type (insn) != TYPE_IGNORE)
b8c96320 5377 {
826fadba
MK
5378 switch (m68k_sched_cpu)
5379 {
5380 case CPU_CFV1:
5381 case CPU_CFV2:
96fcacb7 5382 insn_size = sched_get_attr_size_int (insn);
826fadba
MK
5383 break;
5384
5385 case CPU_CFV3:
96fcacb7 5386 insn_size = sched_get_attr_size_int (insn);
826fadba
MK
5387
5388 /* ColdFire V3 and V4 cores have instruction buffers that can
5389 accumulate up to 8 instructions regardless of instructions'
5390 sizes. So we should take care not to "prefetch" 24 one-word
5391 or 12 two-words instructions.
5392 To model this behavior we temporarily decrease size of the
5393 buffer by (max_insn_size - insn_size) for next 7 instructions. */
5394 {
5395 int adjust;
5396
5397 adjust = max_insn_size - insn_size;
5398 sched_ib.size -= adjust;
5399
5400 if (sched_ib.filled > sched_ib.size)
5401 sched_ib.filled = sched_ib.size;
5402
5403 sched_ib.records.adjust[sched_ib.records.adjust_index] = adjust;
5404 }
5405
5406 ++sched_ib.records.adjust_index;
5407 if (sched_ib.records.adjust_index == sched_ib.records.n_insns)
5408 sched_ib.records.adjust_index = 0;
5409
5410 /* Undo adjustement we did 7 instructions ago. */
5411 sched_ib.size
5412 += sched_ib.records.adjust[sched_ib.records.adjust_index];
5413
5414 break;
b8c96320 5415
96fcacb7
MK
5416 case CPU_CFV4:
5417 gcc_assert (!sched_ib.enabled_p);
5418 insn_size = 0;
5419 break;
5420
826fadba
MK
5421 default:
5422 gcc_unreachable ();
5423 }
b8c96320 5424
826fadba 5425 gcc_assert (insn_size <= sched_ib.filled);
b8c96320
MK
5426 --can_issue_more;
5427 }
5428 else if (GET_CODE (PATTERN (insn)) == ASM_INPUT
5429 || asm_noperands (PATTERN (insn)) >= 0)
826fadba 5430 insn_size = sched_ib.filled;
b8c96320
MK
5431 else
5432 insn_size = 0;
5433
826fadba 5434 sched_ib.filled -= insn_size;
b8c96320
MK
5435
5436 return can_issue_more;
5437}
5438
96fcacb7
MK
5439/* Return how many instructions should scheduler lookahead to choose the
5440 best one. */
5441static int
5442m68k_sched_first_cycle_multipass_dfa_lookahead (void)
b8c96320 5443{
96fcacb7 5444 return m68k_sched_issue_rate () - 1;
b8c96320
MK
5445}
5446
5447/* Implementation of targetm.sched.md_init_global () hook.
5448 It is invoked once per scheduling pass and is used here
5449 to initialize scheduler constants. */
5450static void
5451m68k_sched_md_init_global (FILE *sched_dump ATTRIBUTE_UNUSED,
5452 int sched_verbose ATTRIBUTE_UNUSED,
5453 int n_insns ATTRIBUTE_UNUSED)
5454{
5455 /* Init branch types. */
5456 {
5457 rtx insn;
5458
5ead67f6 5459 sched_branch_type = XCNEWVEC (enum attr_type, get_max_uid () + 1);
b8c96320
MK
5460
5461 for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
5462 {
5463 if (JUMP_P (insn))
5464 /* !!! FIXME: Implement real scan here. */
5465 sched_branch_type[INSN_UID (insn)] = TYPE_BCC;
5466 }
5467 }
5468
96fcacb7
MK
5469#ifdef ENABLE_CHECKING
5470 /* Check that all instructions have DFA reservations and
5471 that all instructions can be issued from a clean state. */
5472 {
5473 rtx insn;
5474 state_t state;
b8c96320 5475
96fcacb7 5476 state = alloca (state_size ());
b8c96320 5477
96fcacb7
MK
5478 for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
5479 {
5480 if (INSN_P (insn) && recog_memoized (insn) >= 0)
5481 {
5482 gcc_assert (insn_has_dfa_reservation_p (insn));
b8c96320 5483
96fcacb7
MK
5484 state_reset (state);
5485 if (state_transition (state, insn) >= 0)
5486 gcc_unreachable ();
5487 }
5488 }
5489 }
5490#endif
b8c96320
MK
5491
5492 /* Setup target cpu. */
96fcacb7
MK
5493
5494 /* ColdFire V4 has a set of features to keep its instruction buffer full
5495 (e.g., a separate memory bus for instructions) and, hence, we do not model
5496 buffer for this CPU. */
5497 sched_ib.enabled_p = (m68k_sched_cpu != CPU_CFV4);
5498
b8c96320
MK
5499 switch (m68k_sched_cpu)
5500 {
96fcacb7
MK
5501 case CPU_CFV4:
5502 sched_ib.filled = 0;
5503
5504 /* FALLTHRU */
5505
826fadba
MK
5506 case CPU_CFV1:
5507 case CPU_CFV2:
5508 max_insn_size = 3;
5509 sched_ib.records.n_insns = 0;
5510 sched_ib.records.adjust = NULL;
5511 break;
5512
5513 case CPU_CFV3:
5514 max_insn_size = 3;
5515 sched_ib.records.n_insns = 8;
5ead67f6 5516 sched_ib.records.adjust = XNEWVEC (int, sched_ib.records.n_insns);
b8c96320
MK
5517 break;
5518
5519 default:
5520 gcc_unreachable ();
5521 }
5522
826fadba
MK
5523 sched_mem_unit_code = get_cpu_unit_code ("cf_mem1");
5524
b8c96320
MK
5525 sched_adjust_cost_state = xmalloc (state_size ());
5526 state_reset (sched_adjust_cost_state);
5527
5528 start_sequence ();
5529 emit_insn (gen_ib ());
826fadba 5530 sched_ib.insn = get_insns ();
b8c96320
MK
5531 end_sequence ();
5532}
5533
5534/* Scheduling pass is now finished. Free/reset static variables. */
5535static void
5536m68k_sched_md_finish_global (FILE *dump ATTRIBUTE_UNUSED,
5537 int verbose ATTRIBUTE_UNUSED)
5538{
826fadba 5539 sched_ib.insn = NULL;
b8c96320
MK
5540
5541 free (sched_adjust_cost_state);
5542 sched_adjust_cost_state = NULL;
5543
5544 sched_mem_unit_code = 0;
826fadba
MK
5545
5546 free (sched_ib.records.adjust);
5547 sched_ib.records.adjust = NULL;
5548 sched_ib.records.n_insns = 0;
5549 max_insn_size = 0;
b8c96320
MK
5550
5551 free (sched_branch_type);
5552 sched_branch_type = NULL;
5553}
5554
5555/* Implementation of targetm.sched.md_init () hook.
5556 It is invoked each time scheduler starts on the new block (basic block or
5557 extended basic block). */
5558static void
5559m68k_sched_md_init (FILE *sched_dump ATTRIBUTE_UNUSED,
5560 int sched_verbose ATTRIBUTE_UNUSED,
5561 int n_insns ATTRIBUTE_UNUSED)
5562{
826fadba
MK
5563 switch (m68k_sched_cpu)
5564 {
5565 case CPU_CFV1:
5566 case CPU_CFV2:
5567 sched_ib.size = 6;
5568 break;
5569
5570 case CPU_CFV3:
5571 sched_ib.size = sched_ib.records.n_insns * max_insn_size;
5572
5573 memset (sched_ib.records.adjust, 0,
5574 sched_ib.records.n_insns * sizeof (*sched_ib.records.adjust));
5575 sched_ib.records.adjust_index = 0;
5576 break;
5577
96fcacb7
MK
5578 case CPU_CFV4:
5579 gcc_assert (!sched_ib.enabled_p);
5580 sched_ib.size = 0;
5581 break;
5582
826fadba
MK
5583 default:
5584 gcc_unreachable ();
5585 }
5586
96fcacb7
MK
5587 if (sched_ib.enabled_p)
5588 /* haifa-sched.c: schedule_block () calls advance_cycle () just before
5589 the first cycle. Workaround that. */
5590 sched_ib.filled = -2;
b8c96320
MK
5591}
5592
5593/* Implementation of targetm.sched.dfa_pre_advance_cycle () hook.
5594 It is invoked just before current cycle finishes and is used here
5595 to track if instruction buffer got its two words this cycle. */
5596static void
5597m68k_sched_dfa_pre_advance_cycle (void)
5598{
96fcacb7
MK
5599 if (!sched_ib.enabled_p)
5600 return;
5601
b8c96320
MK
5602 if (!cpu_unit_reservation_p (curr_state, sched_mem_unit_code))
5603 {
826fadba 5604 sched_ib.filled += 2;
b8c96320 5605
826fadba
MK
5606 if (sched_ib.filled > sched_ib.size)
5607 sched_ib.filled = sched_ib.size;
b8c96320
MK
5608 }
5609}
5610
5611/* Implementation of targetm.sched.dfa_post_advance_cycle () hook.
5612 It is invoked just after new cycle begins and is used here
5613 to setup number of filled words in the instruction buffer so that
5614 instructions which won't have all their words prefetched would be
5615 stalled for a cycle. */
5616static void
5617m68k_sched_dfa_post_advance_cycle (void)
5618{
5619 int i;
b8c96320 5620
96fcacb7
MK
5621 if (!sched_ib.enabled_p)
5622 return;
5623
b8c96320
MK
5624 /* Setup number of prefetched instruction words in the instruction
5625 buffer. */
826fadba
MK
5626 i = max_insn_size - sched_ib.filled;
5627
5628 while (--i >= 0)
b8c96320 5629 {
826fadba 5630 if (state_transition (curr_state, sched_ib.insn) >= 0)
b8c96320
MK
5631 gcc_unreachable ();
5632 }
5633}
96fcacb7
MK
5634
5635/* Return X or Y (depending on OPX_P) operand of INSN,
5636 if it is an integer register, or NULL overwise. */
5637static rtx
5638sched_get_reg_operand (rtx insn, bool opx_p)
5639{
5640 rtx op = NULL;
5641
5642 if (opx_p)
5643 {
5644 if (get_attr_opx_type (insn) == OPX_TYPE_RN)
5645 {
5646 op = sched_get_operand (insn, true);
5647 gcc_assert (op != NULL);
5648
5649 if (!reload_completed && !REG_P (op))
5650 return NULL;
5651 }
5652 }
5653 else
5654 {
5655 if (get_attr_opy_type (insn) == OPY_TYPE_RN)
5656 {
5657 op = sched_get_operand (insn, false);
5658 gcc_assert (op != NULL);
5659
5660 if (!reload_completed && !REG_P (op))
5661 return NULL;
5662 }
5663 }
5664
5665 return op;
5666}
5667
5668/* Return true, if X or Y (depending on OPX_P) operand of INSN
5669 is a MEM. */
5670static bool
5671sched_mem_operand_p (rtx insn, bool opx_p)
5672{
5673 switch (sched_get_opxy_mem_type (insn, opx_p))
5674 {
5675 case OP_TYPE_MEM1:
5676 case OP_TYPE_MEM6:
5677 return true;
5678
5679 default:
5680 return false;
5681 }
5682}
5683
5684/* Return X or Y (depending on OPX_P) operand of INSN,
5685 if it is a MEM, or NULL overwise. */
5686static rtx
5687sched_get_mem_operand (rtx insn, bool must_read_p, bool must_write_p)
5688{
5689 bool opx_p;
5690 bool opy_p;
5691
5692 opx_p = false;
5693 opy_p = false;
5694
5695 if (must_read_p)
5696 {
5697 opx_p = true;
5698 opy_p = true;
5699 }
5700
5701 if (must_write_p)
5702 {
5703 opx_p = true;
5704 opy_p = false;
5705 }
5706
5707 if (opy_p && sched_mem_operand_p (insn, false))
5708 return sched_get_operand (insn, false);
5709
5710 if (opx_p && sched_mem_operand_p (insn, true))
5711 return sched_get_operand (insn, true);
5712
5713 gcc_unreachable ();
5714 return NULL;
5715}
5716
5717/* Return non-zero if PRO modifies register used as part of
5718 address in CON. */
5719int
5720m68k_sched_address_bypass_p (rtx pro, rtx con)
5721{
5722 rtx pro_x;
5723 rtx con_mem_read;
5724
5725 pro_x = sched_get_reg_operand (pro, true);
5726 if (pro_x == NULL)
5727 return 0;
5728
5729 con_mem_read = sched_get_mem_operand (con, true, false);
5730 gcc_assert (con_mem_read != NULL);
5731
5732 if (reg_mentioned_p (pro_x, con_mem_read))
5733 return 1;
5734
5735 return 0;
5736}
5737
5738/* Helper function for m68k_sched_indexed_address_bypass_p.
5739 if PRO modifies register used as index in CON,
5740 return scale of indexed memory access in CON. Return zero overwise. */
5741static int
5742sched_get_indexed_address_scale (rtx pro, rtx con)
5743{
5744 rtx reg;
5745 rtx mem;
5746 struct m68k_address address;
5747
5748 reg = sched_get_reg_operand (pro, true);
5749 if (reg == NULL)
5750 return 0;
5751
5752 mem = sched_get_mem_operand (con, true, false);
5753 gcc_assert (mem != NULL && MEM_P (mem));
5754
5755 if (!m68k_decompose_address (GET_MODE (mem), XEXP (mem, 0), reload_completed,
5756 &address))
5757 gcc_unreachable ();
5758
5759 if (REGNO (reg) == REGNO (address.index))
5760 {
5761 gcc_assert (address.scale != 0);
5762 return address.scale;
5763 }
5764
5765 return 0;
5766}
5767
5768/* Return non-zero if PRO modifies register used
5769 as index with scale 2 or 4 in CON. */
5770int
5771m68k_sched_indexed_address_bypass_p (rtx pro, rtx con)
5772{
5773 gcc_assert (sched_cfv4_bypass_data.pro == NULL
5774 && sched_cfv4_bypass_data.con == NULL
5775 && sched_cfv4_bypass_data.scale == 0);
5776
5777 switch (sched_get_indexed_address_scale (pro, con))
5778 {
5779 case 1:
5780 /* We can't have a variable latency bypass, so
5781 remember to adjust the insn cost in adjust_cost hook. */
5782 sched_cfv4_bypass_data.pro = pro;
5783 sched_cfv4_bypass_data.con = con;
5784 sched_cfv4_bypass_data.scale = 1;
5785 return 0;
5786
5787 case 2:
5788 case 4:
5789 return 1;
5790
5791 default:
5792 return 0;
5793 }
5794}