]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/m68k/m68k.c
Don't mark statements modified when we are in ipa mode
[thirdparty/gcc.git] / gcc / config / m68k / m68k.c
CommitLineData
79e68feb 1/* Subroutines for insn-output.c for Motorola 68000 family.
8636be86 2 Copyright (C) 1987, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
b6a9c30c 3 2001, 2003, 2004, 2005, 2006, 2007
4592bdcb 4 Free Software Foundation, Inc.
79e68feb 5
7ec022b2 6This file is part of GCC.
79e68feb 7
7ec022b2 8GCC is free software; you can redistribute it and/or modify
79e68feb 9it under the terms of the GNU General Public License as published by
2f83c7d6 10the Free Software Foundation; either version 3, or (at your option)
79e68feb
RS
11any later version.
12
7ec022b2 13GCC is distributed in the hope that it will be useful,
79e68feb
RS
14but WITHOUT ANY WARRANTY; without even the implied warranty of
15MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16GNU General Public License for more details.
17
18You should have received a copy of the GNU General Public License
2f83c7d6
NC
19along with GCC; see the file COPYING3. If not see
20<http://www.gnu.org/licenses/>. */
79e68feb 21
79e68feb 22#include "config.h"
f5220a5d 23#include "system.h"
4977bab6
ZW
24#include "coretypes.h"
25#include "tm.h"
da932f04 26#include "tree.h"
79e68feb 27#include "rtl.h"
49ad7cfa 28#include "function.h"
79e68feb
RS
29#include "regs.h"
30#include "hard-reg-set.h"
31#include "real.h"
32#include "insn-config.h"
33#include "conditions.h"
79e68feb
RS
34#include "output.h"
35#include "insn-attr.h"
1d8eaa6b 36#include "recog.h"
f5220a5d 37#include "toplev.h"
6d5f49b2
RH
38#include "expr.h"
39#include "reload.h"
5505f548 40#include "tm_p.h"
672a6f42
NB
41#include "target.h"
42#include "target-def.h"
2cc07db4 43#include "debug.h"
79e68feb 44#include "flags.h"
6fb5fa3c 45#include "df.h"
b8c96320
MK
46/* ??? Need to add a dependency between m68k.o and sched-int.h. */
47#include "sched-int.h"
48#include "insn-codes.h"
79e68feb 49
a4e9467d
RZ
50enum reg_class regno_reg_class[] =
51{
52 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
53 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
54 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
55 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
56 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
57 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
58 ADDR_REGS
59};
60
61
a40ed0f3
KH
62/* The minimum number of integer registers that we want to save with the
63 movem instruction. Using two movel instructions instead of a single
64 moveml is about 15% faster for the 68020 and 68030 at no expense in
65 code size. */
66#define MIN_MOVEM_REGS 3
67
68/* The minimum number of floating point registers that we want to save
69 with the fmovem instruction. */
70#define MIN_FMOVEM_REGS 1
71
ff482c8d 72/* Structure describing stack frame layout. */
3d74bc09
BI
73struct m68k_frame
74{
75 /* Stack pointer to frame pointer offset. */
48ed72a4 76 HOST_WIDE_INT offset;
3d74bc09
BI
77
78 /* Offset of FPU registers. */
79 HOST_WIDE_INT foffset;
80
81 /* Frame size in bytes (rounded up). */
48ed72a4 82 HOST_WIDE_INT size;
3d74bc09
BI
83
84 /* Data and address register. */
48ed72a4
PB
85 int reg_no;
86 unsigned int reg_mask;
3d74bc09
BI
87
88 /* FPU registers. */
48ed72a4
PB
89 int fpu_no;
90 unsigned int fpu_mask;
3d74bc09
BI
91
92 /* Offsets relative to ARG_POINTER. */
48ed72a4
PB
93 HOST_WIDE_INT frame_pointer_offset;
94 HOST_WIDE_INT stack_pointer_offset;
3d74bc09
BI
95
96 /* Function which the above information refers to. */
97 int funcdef_no;
48ed72a4
PB
98};
99
3d74bc09
BI
100/* Current frame information calculated by m68k_compute_frame_layout(). */
101static struct m68k_frame current_frame;
102
fc2241eb
RS
103/* Structure describing an m68k address.
104
105 If CODE is UNKNOWN, the address is BASE + INDEX * SCALE + OFFSET,
106 with null fields evaluating to 0. Here:
107
108 - BASE satisfies m68k_legitimate_base_reg_p
109 - INDEX satisfies m68k_legitimate_index_reg_p
110 - OFFSET satisfies m68k_legitimate_constant_address_p
111
112 INDEX is either HImode or SImode. The other fields are SImode.
113
114 If CODE is PRE_DEC, the address is -(BASE). If CODE is POST_INC,
115 the address is (BASE)+. */
116struct m68k_address {
117 enum rtx_code code;
118 rtx base;
119 rtx index;
120 rtx offset;
121 int scale;
122};
123
b8c96320 124static int m68k_sched_adjust_cost (rtx, rtx, rtx, int);
96fcacb7 125static int m68k_sched_issue_rate (void);
b8c96320
MK
126static int m68k_sched_variable_issue (FILE *, int, rtx, int);
127static void m68k_sched_md_init_global (FILE *, int, int);
128static void m68k_sched_md_finish_global (FILE *, int);
129static void m68k_sched_md_init (FILE *, int, int);
130static void m68k_sched_dfa_pre_advance_cycle (void);
131static void m68k_sched_dfa_post_advance_cycle (void);
96fcacb7 132static int m68k_sched_first_cycle_multipass_dfa_lookahead (void);
b8c96320 133
4af06170 134static bool m68k_handle_option (size_t, const char *, int);
8a4a2253
BI
135static rtx find_addr_reg (rtx);
136static const char *singlemove_string (rtx *);
45849738 137#ifdef M68K_TARGET_COFF
c18a5b6c 138static void m68k_coff_asm_named_section (const char *, unsigned int, tree);
45849738 139#endif /* M68K_TARGET_COFF */
8a4a2253
BI
140static void m68k_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
141 HOST_WIDE_INT, tree);
8636be86 142static rtx m68k_struct_value_rtx (tree, int);
48ed72a4
PB
143static tree m68k_handle_fndecl_attribute (tree *node, tree name,
144 tree args, int flags,
145 bool *no_add_attrs);
3d74bc09 146static void m68k_compute_frame_layout (void);
48ed72a4 147static bool m68k_save_reg (unsigned int regno, bool interrupt_handler);
f7e70894 148static bool m68k_ok_for_sibcall_p (tree, tree);
8a4a2253 149static bool m68k_rtx_costs (rtx, int, int, int *);
1c445f03
NS
150#if M68K_HONOR_TARGET_STRICT_ALIGNMENT
151static bool m68k_return_in_memory (tree, tree);
152#endif
79e68feb
RS
153\f
154
a2ef3db7 155/* Specify the identification number of the library being built */
4af06170 156const char *m68k_library_id_string = "_current_shared_library_a5_offset_";
ef1dbfb0 157
2b3600ac
JL
158/* Nonzero if the last compare/test insn had FP operands. The
159 sCC expanders peek at this to determine what to do for the
160 68060, which has no fsCC instructions. */
161int m68k_last_compare_had_fp_operands;
672a6f42
NB
162\f
163/* Initialize the GCC target structure. */
301d03af
RS
164
165#if INT_OP_GROUP == INT_OP_DOT_WORD
166#undef TARGET_ASM_ALIGNED_HI_OP
167#define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
168#endif
169
170#if INT_OP_GROUP == INT_OP_NO_DOT
171#undef TARGET_ASM_BYTE_OP
172#define TARGET_ASM_BYTE_OP "\tbyte\t"
173#undef TARGET_ASM_ALIGNED_HI_OP
174#define TARGET_ASM_ALIGNED_HI_OP "\tshort\t"
175#undef TARGET_ASM_ALIGNED_SI_OP
176#define TARGET_ASM_ALIGNED_SI_OP "\tlong\t"
177#endif
178
179#if INT_OP_GROUP == INT_OP_DC
180#undef TARGET_ASM_BYTE_OP
181#define TARGET_ASM_BYTE_OP "\tdc.b\t"
182#undef TARGET_ASM_ALIGNED_HI_OP
183#define TARGET_ASM_ALIGNED_HI_OP "\tdc.w\t"
184#undef TARGET_ASM_ALIGNED_SI_OP
185#define TARGET_ASM_ALIGNED_SI_OP "\tdc.l\t"
186#endif
187
188#undef TARGET_ASM_UNALIGNED_HI_OP
189#define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
190#undef TARGET_ASM_UNALIGNED_SI_OP
191#define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
192
c590b625
RH
193#undef TARGET_ASM_OUTPUT_MI_THUNK
194#define TARGET_ASM_OUTPUT_MI_THUNK m68k_output_mi_thunk
bdabc150 195#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
3101faab 196#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
c590b625 197
1bc7c5b6
ZW
198#undef TARGET_ASM_FILE_START_APP_OFF
199#define TARGET_ASM_FILE_START_APP_OFF true
200
b8c96320
MK
201#undef TARGET_SCHED_ADJUST_COST
202#define TARGET_SCHED_ADJUST_COST m68k_sched_adjust_cost
203
96fcacb7
MK
204#undef TARGET_SCHED_ISSUE_RATE
205#define TARGET_SCHED_ISSUE_RATE m68k_sched_issue_rate
206
b8c96320
MK
207#undef TARGET_SCHED_VARIABLE_ISSUE
208#define TARGET_SCHED_VARIABLE_ISSUE m68k_sched_variable_issue
209
210#undef TARGET_SCHED_INIT_GLOBAL
211#define TARGET_SCHED_INIT_GLOBAL m68k_sched_md_init_global
212
213#undef TARGET_SCHED_FINISH_GLOBAL
214#define TARGET_SCHED_FINISH_GLOBAL m68k_sched_md_finish_global
215
216#undef TARGET_SCHED_INIT
217#define TARGET_SCHED_INIT m68k_sched_md_init
218
219#undef TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE
220#define TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE m68k_sched_dfa_pre_advance_cycle
221
222#undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
223#define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE m68k_sched_dfa_post_advance_cycle
224
96fcacb7
MK
225#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
226#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
227 m68k_sched_first_cycle_multipass_dfa_lookahead
228
4af06170
RS
229#undef TARGET_HANDLE_OPTION
230#define TARGET_HANDLE_OPTION m68k_handle_option
231
3c50106f
RH
232#undef TARGET_RTX_COSTS
233#define TARGET_RTX_COSTS m68k_rtx_costs
234
48ed72a4
PB
235#undef TARGET_ATTRIBUTE_TABLE
236#define TARGET_ATTRIBUTE_TABLE m68k_attribute_table
237
8636be86 238#undef TARGET_PROMOTE_PROTOTYPES
586de218 239#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
8636be86
KH
240
241#undef TARGET_STRUCT_VALUE_RTX
242#define TARGET_STRUCT_VALUE_RTX m68k_struct_value_rtx
243
7ffb5e78
RS
244#undef TARGET_CANNOT_FORCE_CONST_MEM
245#define TARGET_CANNOT_FORCE_CONST_MEM m68k_illegitimate_symbolic_constant_p
246
f7e70894
RS
247#undef TARGET_FUNCTION_OK_FOR_SIBCALL
248#define TARGET_FUNCTION_OK_FOR_SIBCALL m68k_ok_for_sibcall_p
249
1c445f03
NS
250#if M68K_HONOR_TARGET_STRICT_ALIGNMENT
251#undef TARGET_RETURN_IN_MEMORY
252#define TARGET_RETURN_IN_MEMORY m68k_return_in_memory
253#endif
254
48ed72a4
PB
255static const struct attribute_spec m68k_attribute_table[] =
256{
257 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2bccb817 258 { "interrupt", 0, 0, true, false, false, m68k_handle_fndecl_attribute },
48ed72a4 259 { "interrupt_handler", 0, 0, true, false, false, m68k_handle_fndecl_attribute },
a4242737 260 { "interrupt_thread", 0, 0, true, false, false, m68k_handle_fndecl_attribute },
48ed72a4
PB
261 { NULL, 0, 0, false, false, false, NULL }
262};
263
f6897b10 264struct gcc_target targetm = TARGET_INITIALIZER;
672a6f42 265\f
900ec02d
JB
266/* Base flags for 68k ISAs. */
267#define FL_FOR_isa_00 FL_ISA_68000
268#define FL_FOR_isa_10 (FL_FOR_isa_00 | FL_ISA_68010)
269/* FL_68881 controls the default setting of -m68881. gcc has traditionally
270 generated 68881 code for 68020 and 68030 targets unless explicitly told
271 not to. */
272#define FL_FOR_isa_20 (FL_FOR_isa_10 | FL_ISA_68020 \
273 | FL_BITFIELD | FL_68881)
274#define FL_FOR_isa_40 (FL_FOR_isa_20 | FL_ISA_68040)
275#define FL_FOR_isa_cpu32 (FL_FOR_isa_10 | FL_ISA_68020)
276
277/* Base flags for ColdFire ISAs. */
278#define FL_FOR_isa_a (FL_COLDFIRE | FL_ISA_A)
279#define FL_FOR_isa_aplus (FL_FOR_isa_a | FL_ISA_APLUS | FL_CF_USP)
280/* Note ISA_B doesn't necessarily include USP (user stack pointer) support. */
281#define FL_FOR_isa_b (FL_FOR_isa_a | FL_ISA_B | FL_CF_HWDIV)
4e2b26aa 282/* ISA_C is not upwardly compatible with ISA_B. */
8c5c99dc 283#define FL_FOR_isa_c (FL_FOR_isa_a | FL_ISA_C | FL_CF_USP)
900ec02d
JB
284
285enum m68k_isa
286{
287 /* Traditional 68000 instruction sets. */
288 isa_00,
289 isa_10,
290 isa_20,
291 isa_40,
292 isa_cpu32,
293 /* ColdFire instruction set variants. */
294 isa_a,
295 isa_aplus,
296 isa_b,
297 isa_c,
298 isa_max
299};
300
301/* Information about one of the -march, -mcpu or -mtune arguments. */
302struct m68k_target_selection
303{
304 /* The argument being described. */
305 const char *name;
306
307 /* For -mcpu, this is the device selected by the option.
308 For -mtune and -march, it is a representative device
309 for the microarchitecture or ISA respectively. */
310 enum target_device device;
311
312 /* The M68K_DEVICE fields associated with DEVICE. See the comment
313 in m68k-devices.def for details. FAMILY is only valid for -mcpu. */
314 const char *family;
315 enum uarch_type microarch;
316 enum m68k_isa isa;
317 unsigned long flags;
318};
319
320/* A list of all devices in m68k-devices.def. Used for -mcpu selection. */
321static const struct m68k_target_selection all_devices[] =
322{
323#define M68K_DEVICE(NAME,ENUM_VALUE,FAMILY,MULTILIB,MICROARCH,ISA,FLAGS) \
324 { NAME, ENUM_VALUE, FAMILY, u##MICROARCH, ISA, FLAGS | FL_FOR_##ISA },
325#include "m68k-devices.def"
326#undef M68K_DEVICE
327 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
328};
329
330/* A list of all ISAs, mapping each one to a representative device.
331 Used for -march selection. */
332static const struct m68k_target_selection all_isas[] =
333{
334 { "68000", m68000, NULL, u68000, isa_00, FL_FOR_isa_00 },
335 { "68010", m68010, NULL, u68010, isa_10, FL_FOR_isa_10 },
336 { "68020", m68020, NULL, u68020, isa_20, FL_FOR_isa_20 },
337 { "68030", m68030, NULL, u68030, isa_20, FL_FOR_isa_20 },
338 { "68040", m68040, NULL, u68040, isa_40, FL_FOR_isa_40 },
339 { "68060", m68060, NULL, u68060, isa_40, FL_FOR_isa_40 },
340 { "cpu32", cpu32, NULL, ucpu32, isa_20, FL_FOR_isa_cpu32 },
341 { "isaa", mcf5206e, NULL, ucfv2, isa_a, (FL_FOR_isa_a
342 | FL_CF_HWDIV) },
343 { "isaaplus", mcf5271, NULL, ucfv2, isa_aplus, (FL_FOR_isa_aplus
344 | FL_CF_HWDIV) },
345 { "isab", mcf5407, NULL, ucfv4, isa_b, FL_FOR_isa_b },
8c5c99dc
MK
346 { "isac", unk_device, NULL, ucfv4, isa_c, (FL_FOR_isa_c
347 | FL_CF_HWDIV) },
900ec02d
JB
348 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
349};
350
351/* A list of all microarchitectures, mapping each one to a representative
352 device. Used for -mtune selection. */
353static const struct m68k_target_selection all_microarchs[] =
354{
355 { "68000", m68000, NULL, u68000, isa_00, FL_FOR_isa_00 },
356 { "68010", m68010, NULL, u68010, isa_10, FL_FOR_isa_10 },
357 { "68020", m68020, NULL, u68020, isa_20, FL_FOR_isa_20 },
358 { "68020-40", m68020, NULL, u68020_40, isa_20, FL_FOR_isa_20 },
359 { "68020-60", m68020, NULL, u68020_60, isa_20, FL_FOR_isa_20 },
360 { "68030", m68030, NULL, u68030, isa_20, FL_FOR_isa_20 },
361 { "68040", m68040, NULL, u68040, isa_40, FL_FOR_isa_40 },
362 { "68060", m68060, NULL, u68060, isa_40, FL_FOR_isa_40 },
363 { "cpu32", cpu32, NULL, ucpu32, isa_20, FL_FOR_isa_cpu32 },
8c5c99dc 364 { "cfv1", mcf51qe, NULL, ucfv1, isa_c, FL_FOR_isa_c },
900ec02d
JB
365 { "cfv2", mcf5206, NULL, ucfv2, isa_a, FL_FOR_isa_a },
366 { "cfv3", mcf5307, NULL, ucfv3, isa_a, (FL_FOR_isa_a
367 | FL_CF_HWDIV) },
368 { "cfv4", mcf5407, NULL, ucfv4, isa_b, FL_FOR_isa_b },
369 { "cfv4e", mcf547x, NULL, ucfv4e, isa_b, (FL_FOR_isa_b
370 | FL_CF_USP
371 | FL_CF_EMAC
372 | FL_CF_FPU) },
373 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
374};
375\f
376/* The entries associated with the -mcpu, -march and -mtune settings,
377 or null for options that have not been used. */
378const struct m68k_target_selection *m68k_cpu_entry;
379const struct m68k_target_selection *m68k_arch_entry;
380const struct m68k_target_selection *m68k_tune_entry;
381
382/* Which CPU we are generating code for. */
383enum target_device m68k_cpu;
384
385/* Which microarchitecture to tune for. */
386enum uarch_type m68k_tune;
387
388/* Which FPU to use. */
389enum fpu_type m68k_fpu;
4af06170 390
900ec02d
JB
391/* The set of FL_* flags that apply to the target processor. */
392unsigned int m68k_cpu_flags;
29ca003a 393
03b3e271
KH
394/* The set of FL_* flags that apply to the processor to be tuned for. */
395unsigned int m68k_tune_flags;
396
29ca003a
RS
397/* Asm templates for calling or jumping to an arbitrary symbolic address,
398 or NULL if such calls or jumps are not supported. The address is held
399 in operand 0. */
400const char *m68k_symbolic_call;
401const char *m68k_symbolic_jump;
c47b0cb4
MK
402
403/* Enum variable that corresponds to m68k_symbolic_call values. */
404enum M68K_SYMBOLIC_CALL m68k_symbolic_call_var;
405
900ec02d
JB
406\f
407/* See whether TABLE has an entry with name NAME. Return true and
408 store the entry in *ENTRY if so, otherwise return false and
409 leave *ENTRY alone. */
410
411static bool
412m68k_find_selection (const struct m68k_target_selection **entry,
413 const struct m68k_target_selection *table,
414 const char *name)
415{
416 size_t i;
417
418 for (i = 0; table[i].name; i++)
419 if (strcmp (table[i].name, name) == 0)
420 {
421 *entry = table + i;
422 return true;
423 }
424 return false;
425}
4af06170
RS
426
427/* Implement TARGET_HANDLE_OPTION. */
428
429static bool
430m68k_handle_option (size_t code, const char *arg, int value)
431{
432 switch (code)
433 {
900ec02d
JB
434 case OPT_march_:
435 return m68k_find_selection (&m68k_arch_entry, all_isas, arg);
436
437 case OPT_mcpu_:
438 return m68k_find_selection (&m68k_cpu_entry, all_devices, arg);
439
440 case OPT_mtune_:
441 return m68k_find_selection (&m68k_tune_entry, all_microarchs, arg);
442
4af06170 443 case OPT_m5200:
900ec02d 444 return m68k_find_selection (&m68k_cpu_entry, all_devices, "5206");
4af06170
RS
445
446 case OPT_m5206e:
900ec02d 447 return m68k_find_selection (&m68k_cpu_entry, all_devices, "5206e");
4af06170
RS
448
449 case OPT_m528x:
900ec02d 450 return m68k_find_selection (&m68k_cpu_entry, all_devices, "528x");
4af06170
RS
451
452 case OPT_m5307:
900ec02d 453 return m68k_find_selection (&m68k_cpu_entry, all_devices, "5307");
4af06170
RS
454
455 case OPT_m5407:
900ec02d 456 return m68k_find_selection (&m68k_cpu_entry, all_devices, "5407");
4af06170 457
dcc21c4c 458 case OPT_mcfv4e:
900ec02d 459 return m68k_find_selection (&m68k_cpu_entry, all_devices, "547x");
dcc21c4c 460
4af06170
RS
461 case OPT_m68000:
462 case OPT_mc68000:
900ec02d 463 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68000");
4af06170 464
3197c489 465 case OPT_m68010:
900ec02d 466 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68010");
3197c489 467
4af06170
RS
468 case OPT_m68020:
469 case OPT_mc68020:
900ec02d 470 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68020");
4af06170
RS
471
472 case OPT_m68020_40:
900ec02d
JB
473 return (m68k_find_selection (&m68k_tune_entry, all_microarchs,
474 "68020-40")
475 && m68k_find_selection (&m68k_cpu_entry, all_devices, "68020"));
4af06170
RS
476
477 case OPT_m68020_60:
900ec02d
JB
478 return (m68k_find_selection (&m68k_tune_entry, all_microarchs,
479 "68020-60")
480 && m68k_find_selection (&m68k_cpu_entry, all_devices, "68020"));
4af06170
RS
481
482 case OPT_m68030:
900ec02d 483 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68030");
4af06170
RS
484
485 case OPT_m68040:
900ec02d 486 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68040");
4af06170
RS
487
488 case OPT_m68060:
900ec02d 489 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68060");
4af06170
RS
490
491 case OPT_m68302:
900ec02d 492 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68302");
4af06170
RS
493
494 case OPT_m68332:
495 case OPT_mcpu32:
900ec02d 496 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68332");
4af06170
RS
497
498 case OPT_mshared_library_id_:
499 if (value > MAX_LIBRARY_ID)
500 error ("-mshared-library-id=%s is not between 0 and %d",
501 arg, MAX_LIBRARY_ID);
502 else
503 asprintf ((char **) &m68k_library_id_string, "%d", (value * -4) - 4);
504 return true;
505
506 default:
507 return true;
508 }
509}
510
ef1dbfb0
RK
511/* Sometimes certain combinations of command options do not make
512 sense on a particular target machine. You can define a macro
513 `OVERRIDE_OPTIONS' to take account of this. This macro, if
514 defined, is executed once just after all the command options have
515 been parsed.
516
517 Don't use this macro to turn on various extra optimizations for
518 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
519
520void
8a4a2253 521override_options (void)
ef1dbfb0 522{
900ec02d
JB
523 const struct m68k_target_selection *entry;
524 unsigned long target_mask;
525
526 /* User can choose:
527
528 -mcpu=
529 -march=
530 -mtune=
531
532 -march=ARCH should generate code that runs any processor
533 implementing architecture ARCH. -mcpu=CPU should override -march
534 and should generate code that runs on processor CPU, making free
535 use of any instructions that CPU understands. -mtune=UARCH applies
9f5ed61a 536 on top of -mcpu or -march and optimizes the code for UARCH. It does
900ec02d
JB
537 not change the target architecture. */
538 if (m68k_cpu_entry)
539 {
540 /* Complain if the -march setting is for a different microarchitecture,
541 or includes flags that the -mcpu setting doesn't. */
542 if (m68k_arch_entry
543 && (m68k_arch_entry->microarch != m68k_cpu_entry->microarch
544 || (m68k_arch_entry->flags & ~m68k_cpu_entry->flags) != 0))
545 warning (0, "-mcpu=%s conflicts with -march=%s",
546 m68k_cpu_entry->name, m68k_arch_entry->name);
547
548 entry = m68k_cpu_entry;
549 }
550 else
551 entry = m68k_arch_entry;
552
553 if (!entry)
554 entry = all_devices + TARGET_CPU_DEFAULT;
555
556 m68k_cpu_flags = entry->flags;
557
558 /* Use the architecture setting to derive default values for
559 certain flags. */
560 target_mask = 0;
8785d88c
KH
561
562 /* ColdFire is lenient about alignment. */
563 if (!TARGET_COLDFIRE)
564 target_mask |= MASK_STRICT_ALIGNMENT;
565
900ec02d
JB
566 if ((m68k_cpu_flags & FL_BITFIELD) != 0)
567 target_mask |= MASK_BITFIELD;
568 if ((m68k_cpu_flags & FL_CF_HWDIV) != 0)
569 target_mask |= MASK_CF_HWDIV;
570 if ((m68k_cpu_flags & (FL_68881 | FL_CF_FPU)) != 0)
571 target_mask |= MASK_HARD_FLOAT;
572 target_flags |= target_mask & ~target_flags_explicit;
573
574 /* Set the directly-usable versions of the -mcpu and -mtune settings. */
575 m68k_cpu = entry->device;
576 if (m68k_tune_entry)
03b3e271
KH
577 {
578 m68k_tune = m68k_tune_entry->microarch;
579 m68k_tune_flags = m68k_tune_entry->flags;
580 }
900ec02d
JB
581#ifdef M68K_DEFAULT_TUNE
582 else if (!m68k_cpu_entry && !m68k_arch_entry)
03b3e271
KH
583 {
584 enum target_device dev;
585 dev = all_microarchs[M68K_DEFAULT_TUNE].device;
586 m68k_tune_flags = all_devices[dev]->flags;
587 }
900ec02d
JB
588#endif
589 else
03b3e271
KH
590 {
591 m68k_tune = entry->microarch;
592 m68k_tune_flags = entry->flags;
593 }
900ec02d
JB
594
595 /* Set the type of FPU. */
596 m68k_fpu = (!TARGET_HARD_FLOAT ? FPUTYPE_NONE
597 : (m68k_cpu_flags & FL_COLDFIRE) != 0 ? FPUTYPE_COLDFIRE
598 : FPUTYPE_68881);
599
a2ef3db7
BI
600 /* Sanity check to ensure that msep-data and mid-sahred-library are not
601 * both specified together. Doing so simply doesn't make sense.
602 */
603 if (TARGET_SEP_DATA && TARGET_ID_SHARED_LIBRARY)
604 error ("cannot specify both -msep-data and -mid-shared-library");
605
606 /* If we're generating code for a separate A5 relative data segment,
607 * we've got to enable -fPIC as well. This might be relaxable to
608 * -fpic but it hasn't been tested properly.
609 */
610 if (TARGET_SEP_DATA || TARGET_ID_SHARED_LIBRARY)
611 flag_pic = 2;
612
abe92a04
RS
613 /* -mpcrel -fPIC uses 32-bit pc-relative displacements. Raise an
614 error if the target does not support them. */
615 if (TARGET_PCREL && !TARGET_68020 && flag_pic == 2)
616 error ("-mpcrel -fPIC is not currently supported on selected cpu");
adf2ac37
RH
617
618 /* ??? A historic way of turning on pic, or is this intended to
619 be an embedded thing that doesn't have the same name binding
620 significance that it does on hosted ELF systems? */
621 if (TARGET_PCREL && flag_pic == 0)
622 flag_pic = 1;
623
29ca003a
RS
624 if (!flag_pic)
625 {
c47b0cb4
MK
626 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_JSR;
627
29ca003a 628 m68k_symbolic_jump = "jra %a0";
29ca003a
RS
629 }
630 else if (TARGET_ID_SHARED_LIBRARY)
631 /* All addresses must be loaded from the GOT. */
632 ;
4e2b26aa 633 else if (TARGET_68020 || TARGET_ISAB || TARGET_ISAC)
29ca003a
RS
634 {
635 if (TARGET_PCREL)
c47b0cb4 636 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_C;
4e2b26aa 637 else
c47b0cb4
MK
638 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_P;
639
4e2b26aa
NS
640 if (TARGET_ISAC)
641 /* No unconditional long branch */;
642 else if (TARGET_PCREL)
da398bb5 643 m68k_symbolic_jump = "bra%.l %c0";
29ca003a 644 else
da398bb5 645 m68k_symbolic_jump = "bra%.l %p0";
29ca003a
RS
646 /* Turn off function cse if we are doing PIC. We always want
647 function call to be done as `bsr foo@PLTPC'. */
648 /* ??? It's traditional to do this for -mpcrel too, but it isn't
649 clear how intentional that is. */
650 flag_no_function_cse = 1;
651 }
adf2ac37 652
c47b0cb4
MK
653 switch (m68k_symbolic_call_var)
654 {
655 case M68K_SYMBOLIC_CALL_JSR:
c47b0cb4 656 m68k_symbolic_call = "jsr %a0";
c47b0cb4
MK
657 break;
658
659 case M68K_SYMBOLIC_CALL_BSR_C:
da398bb5 660 m68k_symbolic_call = "bsr%.l %c0";
c47b0cb4
MK
661 break;
662
663 case M68K_SYMBOLIC_CALL_BSR_P:
da398bb5 664 m68k_symbolic_call = "bsr%.l %p0";
c47b0cb4
MK
665 break;
666
667 case M68K_SYMBOLIC_CALL_NONE:
668 gcc_assert (m68k_symbolic_call == NULL);
669 break;
670
671 default:
672 gcc_unreachable ();
673 }
674
aaca7021
RZ
675#ifndef ASM_OUTPUT_ALIGN_WITH_NOP
676 if (align_labels > 2)
677 {
678 warning (0, "-falign-labels=%d is not supported", align_labels);
679 align_labels = 0;
680 }
681 if (align_loops > 2)
682 {
683 warning (0, "-falign-loops=%d is not supported", align_loops);
684 align_loops = 0;
685 }
686#endif
687
adf2ac37 688 SUBTARGET_OVERRIDE_OPTIONS;
c47b0cb4
MK
689
690 /* Setup scheduling options. */
826fadba
MK
691 if (TUNE_CFV1)
692 m68k_sched_cpu = CPU_CFV1;
693 else if (TUNE_CFV2)
694 m68k_sched_cpu = CPU_CFV2;
695 else if (TUNE_CFV3)
696 m68k_sched_cpu = CPU_CFV3;
96fcacb7
MK
697 else if (TUNE_CFV4)
698 m68k_sched_cpu = CPU_CFV4;
c47b0cb4
MK
699 else
700 {
701 m68k_sched_cpu = CPU_UNKNOWN;
702 flag_schedule_insns = 0;
703 flag_schedule_insns_after_reload = 0;
704 flag_modulo_sched = 0;
705 }
826fadba
MK
706
707 if (m68k_sched_cpu != CPU_UNKNOWN)
708 {
709 if ((m68k_cpu_flags & (FL_CF_EMAC | FL_CF_EMAC_B)) != 0)
710 m68k_sched_mac = MAC_CF_EMAC;
711 else if ((m68k_cpu_flags & FL_CF_MAC) != 0)
712 m68k_sched_mac = MAC_CF_MAC;
713 else
714 m68k_sched_mac = MAC_NO;
715 }
ef1dbfb0 716}
7eb4f044
NS
717
718/* Generate a macro of the form __mPREFIX_cpu_NAME, where PREFIX is the
719 given argument and NAME is the argument passed to -mcpu. Return NULL
720 if -mcpu was not passed. */
721
722const char *
723m68k_cpp_cpu_ident (const char *prefix)
724{
725 if (!m68k_cpu_entry)
726 return NULL;
727 return concat ("__m", prefix, "_cpu_", m68k_cpu_entry->name, NULL);
728}
729
730/* Generate a macro of the form __mPREFIX_family_NAME, where PREFIX is the
731 given argument and NAME is the name of the representative device for
732 the -mcpu argument's family. Return NULL if -mcpu was not passed. */
733
734const char *
735m68k_cpp_cpu_family (const char *prefix)
736{
737 if (!m68k_cpu_entry)
738 return NULL;
739 return concat ("__m", prefix, "_family_", m68k_cpu_entry->family, NULL);
740}
79e68feb 741\f
2bccb817
KH
742/* Return m68k_fk_interrupt_handler if FUNC has an "interrupt" or
743 "interrupt_handler" attribute and interrupt_thread if FUNC has an
744 "interrupt_thread" attribute. Otherwise, return
745 m68k_fk_normal_function. */
a4242737
KH
746
747enum m68k_function_kind
748m68k_get_function_kind (tree func)
48ed72a4
PB
749{
750 tree a;
751
fa157b28
NS
752 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
753
2bccb817
KH
754 a = lookup_attribute ("interrupt", DECL_ATTRIBUTES (func));
755 if (a != NULL_TREE)
756 return m68k_fk_interrupt_handler;
757
48ed72a4 758 a = lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (func));
a4242737
KH
759 if (a != NULL_TREE)
760 return m68k_fk_interrupt_handler;
761
762 a = lookup_attribute ("interrupt_thread", DECL_ATTRIBUTES (func));
763 if (a != NULL_TREE)
764 return m68k_fk_interrupt_thread;
765
766 return m68k_fk_normal_function;
48ed72a4
PB
767}
768
769/* Handle an attribute requiring a FUNCTION_DECL; arguments as in
770 struct attribute_spec.handler. */
771static tree
772m68k_handle_fndecl_attribute (tree *node, tree name,
773 tree args ATTRIBUTE_UNUSED,
774 int flags ATTRIBUTE_UNUSED,
775 bool *no_add_attrs)
776{
777 if (TREE_CODE (*node) != FUNCTION_DECL)
778 {
5c498b10 779 warning (OPT_Wattributes, "%qs attribute only applies to functions",
48ed72a4
PB
780 IDENTIFIER_POINTER (name));
781 *no_add_attrs = true;
782 }
783
a4242737
KH
784 if (m68k_get_function_kind (*node) != m68k_fk_normal_function)
785 {
786 error ("multiple interrupt attributes not allowed");
787 *no_add_attrs = true;
788 }
789
790 if (!TARGET_FIDOA
791 && !strcmp (IDENTIFIER_POINTER (name), "interrupt_thread"))
792 {
793 error ("interrupt_thread is available only on fido");
794 *no_add_attrs = true;
795 }
796
48ed72a4
PB
797 return NULL_TREE;
798}
860c4900
BI
799
800static void
3d74bc09 801m68k_compute_frame_layout (void)
860c4900
BI
802{
803 int regno, saved;
a40ed0f3 804 unsigned int mask;
a4242737
KH
805 enum m68k_function_kind func_kind =
806 m68k_get_function_kind (current_function_decl);
807 bool interrupt_handler = func_kind == m68k_fk_interrupt_handler;
808 bool interrupt_thread = func_kind == m68k_fk_interrupt_thread;
860c4900 809
3d74bc09
BI
810 /* Only compute the frame once per function.
811 Don't cache information until reload has been completed. */
812 if (current_frame.funcdef_no == current_function_funcdef_no
813 && reload_completed)
814 return;
815
816 current_frame.size = (get_frame_size () + 3) & -4;
860c4900 817
a40ed0f3 818 mask = saved = 0;
a4242737
KH
819
820 /* Interrupt thread does not need to save any register. */
821 if (!interrupt_thread)
822 for (regno = 0; regno < 16; regno++)
823 if (m68k_save_reg (regno, interrupt_handler))
824 {
825 mask |= 1 << (regno - D0_REG);
826 saved++;
827 }
3d74bc09
BI
828 current_frame.offset = saved * 4;
829 current_frame.reg_no = saved;
830 current_frame.reg_mask = mask;
860c4900 831
57047680 832 current_frame.foffset = 0;
a40ed0f3 833 mask = saved = 0;
dcc21c4c 834 if (TARGET_HARD_FLOAT)
860c4900 835 {
a4242737
KH
836 /* Interrupt thread does not need to save any register. */
837 if (!interrupt_thread)
838 for (regno = 16; regno < 24; regno++)
839 if (m68k_save_reg (regno, interrupt_handler))
840 {
841 mask |= 1 << (regno - FP0_REG);
842 saved++;
843 }
dcc21c4c 844 current_frame.foffset = saved * TARGET_FP_REG_SIZE;
3d74bc09 845 current_frame.offset += current_frame.foffset;
860c4900 846 }
57047680
GN
847 current_frame.fpu_no = saved;
848 current_frame.fpu_mask = mask;
3d74bc09
BI
849
850 /* Remember what function this frame refers to. */
851 current_frame.funcdef_no = current_function_funcdef_no;
860c4900
BI
852}
853
854HOST_WIDE_INT
855m68k_initial_elimination_offset (int from, int to)
856{
42b67c06
PB
857 int argptr_offset;
858 /* The arg pointer points 8 bytes before the start of the arguments,
859 as defined by FIRST_PARM_OFFSET. This makes it coincident with the
860 frame pointer in most frames. */
861 argptr_offset = frame_pointer_needed ? 0 : UNITS_PER_WORD;
860c4900 862 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
42b67c06 863 return argptr_offset;
860c4900 864
3d74bc09 865 m68k_compute_frame_layout ();
860c4900 866
4761e388
NS
867 gcc_assert (to == STACK_POINTER_REGNUM);
868 switch (from)
869 {
a0a7fbc9 870 case ARG_POINTER_REGNUM:
42b67c06 871 return current_frame.offset + current_frame.size - argptr_offset;
4761e388
NS
872 case FRAME_POINTER_REGNUM:
873 return current_frame.offset + current_frame.size;
874 default:
875 gcc_unreachable ();
876 }
860c4900
BI
877}
878
97c55091
GN
879/* Refer to the array `regs_ever_live' to determine which registers
880 to save; `regs_ever_live[I]' is nonzero if register number I
881 is ever used in the function. This function is responsible for
882 knowing which registers should not be saved even if used.
883 Return true if we need to save REGNO. */
884
48ed72a4
PB
885static bool
886m68k_save_reg (unsigned int regno, bool interrupt_handler)
2cff4a6e 887{
4ab870f5 888 if (flag_pic && regno == PIC_REG)
b86ba8a3 889 {
e3b5732b 890 if (crtl->saves_all_registers)
afcb440c 891 return true;
e3b5732b 892 if (crtl->uses_pic_offset_table)
b86ba8a3 893 return true;
6357eb0d
RS
894 /* Reload may introduce constant pool references into a function
895 that thitherto didn't need a PIC register. Note that the test
896 above will not catch that case because we will only set
e3b5732b 897 crtl->uses_pic_offset_table when emitting
6357eb0d 898 the address reloads. */
e3b5732b 899 if (crtl->uses_const_pool)
6357eb0d 900 return true;
b86ba8a3 901 }
2cff4a6e 902
e3b5732b 903 if (crtl->calls_eh_return)
2cff4a6e
AS
904 {
905 unsigned int i;
906 for (i = 0; ; i++)
907 {
908 unsigned int test = EH_RETURN_DATA_REGNO (i);
909 if (test == INVALID_REGNUM)
910 break;
911 if (test == regno)
48ed72a4 912 return true;
2cff4a6e
AS
913 }
914 }
915
48ed72a4
PB
916 /* Fixed regs we never touch. */
917 if (fixed_regs[regno])
918 return false;
919
920 /* The frame pointer (if it is such) is handled specially. */
921 if (regno == FRAME_POINTER_REGNUM && frame_pointer_needed)
922 return false;
923
924 /* Interrupt handlers must also save call_used_regs
925 if they are live or when calling nested functions. */
926 if (interrupt_handler)
a0a7fbc9 927 {
6fb5fa3c 928 if (df_regs_ever_live_p (regno))
a0a7fbc9 929 return true;
48ed72a4 930
a0a7fbc9
AS
931 if (!current_function_is_leaf && call_used_regs[regno])
932 return true;
933 }
48ed72a4
PB
934
935 /* Never need to save registers that aren't touched. */
6fb5fa3c 936 if (!df_regs_ever_live_p (regno))
48ed72a4
PB
937 return false;
938
b2e08ed4 939 /* Otherwise save everything that isn't call-clobbered. */
48ed72a4 940 return !call_used_regs[regno];
2cff4a6e
AS
941}
942
a40ed0f3
KH
943/* Emit RTL for a MOVEM or FMOVEM instruction. BASE + OFFSET represents
944 the lowest memory address. COUNT is the number of registers to be
945 moved, with register REGNO + I being moved if bit I of MASK is set.
946 STORE_P specifies the direction of the move and ADJUST_STACK_P says
947 whether or not this is pre-decrement (if STORE_P) or post-increment
948 (if !STORE_P) operation. */
949
950static rtx
951m68k_emit_movem (rtx base, HOST_WIDE_INT offset,
952 unsigned int count, unsigned int regno,
953 unsigned int mask, bool store_p, bool adjust_stack_p)
954{
955 int i;
956 rtx body, addr, src, operands[2];
957 enum machine_mode mode;
958
959 body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (adjust_stack_p + count));
960 mode = reg_raw_mode[regno];
961 i = 0;
962
963 if (adjust_stack_p)
964 {
965 src = plus_constant (base, (count
966 * GET_MODE_SIZE (mode)
967 * (HOST_WIDE_INT) (store_p ? -1 : 1)));
968 XVECEXP (body, 0, i++) = gen_rtx_SET (VOIDmode, base, src);
969 }
970
971 for (; mask != 0; mask >>= 1, regno++)
972 if (mask & 1)
973 {
974 addr = plus_constant (base, offset);
975 operands[!store_p] = gen_frame_mem (mode, addr);
976 operands[store_p] = gen_rtx_REG (mode, regno);
977 XVECEXP (body, 0, i++)
978 = gen_rtx_SET (VOIDmode, operands[0], operands[1]);
979 offset += GET_MODE_SIZE (mode);
980 }
981 gcc_assert (i == XVECLEN (body, 0));
982
983 return emit_insn (body);
984}
985
986/* Make INSN a frame-related instruction. */
79e68feb 987
08c148a8 988static void
a40ed0f3
KH
989m68k_set_frame_related (rtx insn)
990{
991 rtx body;
992 int i;
993
994 RTX_FRAME_RELATED_P (insn) = 1;
995 body = PATTERN (insn);
996 if (GET_CODE (body) == PARALLEL)
997 for (i = 0; i < XVECLEN (body, 0); i++)
998 RTX_FRAME_RELATED_P (XVECEXP (body, 0, i)) = 1;
999}
1000
1001/* Emit RTL for the "prologue" define_expand. */
1002
1003void
1004m68k_expand_prologue (void)
79e68feb 1005{
860c4900 1006 HOST_WIDE_INT fsize_with_regs;
a40ed0f3 1007 rtx limit, src, dest, insn;
3d74bc09 1008
a40ed0f3 1009 m68k_compute_frame_layout ();
3d74bc09 1010
a157febd
GK
1011 /* If the stack limit is a symbol, we can check it here,
1012 before actually allocating the space. */
e3b5732b 1013 if (crtl->limit_stack
a157febd 1014 && GET_CODE (stack_limit_rtx) == SYMBOL_REF)
a40ed0f3
KH
1015 {
1016 limit = plus_constant (stack_limit_rtx, current_frame.size + 4);
1017 if (!LEGITIMATE_CONSTANT_P (limit))
1018 {
1019 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), limit);
1020 limit = gen_rtx_REG (Pmode, D0_REG);
1021 }
1022 emit_insn (gen_cmpsi (stack_pointer_rtx, limit));
1023 emit_insn (gen_conditional_trap (gen_rtx_LTU (VOIDmode,
1024 cc0_rtx, const0_rtx),
1025 const1_rtx));
1026 }
79e68feb 1027
a89e3f21 1028 fsize_with_regs = current_frame.size;
dcc21c4c
PB
1029 if (TARGET_COLDFIRE)
1030 {
a40ed0f3
KH
1031 /* ColdFire's move multiple instructions do not allow pre-decrement
1032 addressing. Add the size of movem saves to the initial stack
1033 allocation instead. */
1034 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1035 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1036 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1037 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
dcc21c4c 1038 }
860c4900 1039
79e68feb
RS
1040 if (frame_pointer_needed)
1041 {
a40ed0f3 1042 if (fsize_with_regs == 0 && TUNE_68040)
79e68feb 1043 {
a40ed0f3
KH
1044 /* On the 68040, two separate moves are faster than link.w 0. */
1045 dest = gen_frame_mem (Pmode,
1046 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1047 m68k_set_frame_related (emit_move_insn (dest, frame_pointer_rtx));
1048 m68k_set_frame_related (emit_move_insn (frame_pointer_rtx,
1049 stack_pointer_rtx));
79e68feb 1050 }
a40ed0f3
KH
1051 else if (fsize_with_regs < 0x8000 || TARGET_68020)
1052 m68k_set_frame_related
1053 (emit_insn (gen_link (frame_pointer_rtx,
1054 GEN_INT (-4 - fsize_with_regs))));
d9e88af0 1055 else
a40ed0f3
KH
1056 {
1057 m68k_set_frame_related
1058 (emit_insn (gen_link (frame_pointer_rtx, GEN_INT (-4))));
1059 m68k_set_frame_related
1060 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1061 stack_pointer_rtx,
1062 GEN_INT (-fsize_with_regs))));
1063 }
96fcacb7
MK
1064
1065 /* If the frame pointer is needed, emit a special barrier that
1066 will prevent the scheduler from moving stores to the frame
1067 before the stack adjustment. */
1068 emit_insn (gen_stack_tie (stack_pointer_rtx, frame_pointer_rtx));
d9e88af0 1069 }
a40ed0f3
KH
1070 else if (fsize_with_regs != 0)
1071 m68k_set_frame_related
1072 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1073 stack_pointer_rtx,
1074 GEN_INT (-fsize_with_regs))));
860c4900 1075
57047680 1076 if (current_frame.fpu_mask)
79e68feb 1077 {
a40ed0f3 1078 gcc_assert (current_frame.fpu_no >= MIN_FMOVEM_REGS);
dcc21c4c 1079 if (TARGET_68881)
a40ed0f3
KH
1080 m68k_set_frame_related
1081 (m68k_emit_movem (stack_pointer_rtx,
1082 current_frame.fpu_no * -GET_MODE_SIZE (XFmode),
1083 current_frame.fpu_no, FP0_REG,
1084 current_frame.fpu_mask, true, true));
dcc21c4c
PB
1085 else
1086 {
1087 int offset;
1088
a40ed0f3
KH
1089 /* If we're using moveml to save the integer registers,
1090 the stack pointer will point to the bottom of the moveml
1091 save area. Find the stack offset of the first FP register. */
1092 if (current_frame.reg_no < MIN_MOVEM_REGS)
dcc21c4c
PB
1093 offset = 0;
1094 else
a40ed0f3
KH
1095 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1096 m68k_set_frame_related
1097 (m68k_emit_movem (stack_pointer_rtx, offset,
1098 current_frame.fpu_no, FP0_REG,
1099 current_frame.fpu_mask, true, false));
f277471f 1100 }
79e68feb 1101 }
99df2465 1102
01bbf777 1103 /* If the stack limit is not a symbol, check it here.
a157febd 1104 This has the disadvantage that it may be too late... */
e3b5732b 1105 if (crtl->limit_stack)
a157febd
GK
1106 {
1107 if (REG_P (stack_limit_rtx))
a40ed0f3
KH
1108 {
1109 emit_insn (gen_cmpsi (stack_pointer_rtx, stack_limit_rtx));
1110 emit_insn (gen_conditional_trap (gen_rtx_LTU (VOIDmode,
1111 cc0_rtx, const0_rtx),
1112 const1_rtx));
1113 }
a157febd 1114 else if (GET_CODE (stack_limit_rtx) != SYMBOL_REF)
d4ee4d25 1115 warning (0, "stack limit expression is not supported");
a157febd 1116 }
01bbf777 1117
a40ed0f3 1118 if (current_frame.reg_no < MIN_MOVEM_REGS)
79e68feb 1119 {
a40ed0f3 1120 /* Store each register separately in the same order moveml does. */
79e68feb
RS
1121 int i;
1122
a40ed0f3
KH
1123 for (i = 16; i-- > 0; )
1124 if (current_frame.reg_mask & (1 << i))
078e983e 1125 {
a40ed0f3
KH
1126 src = gen_rtx_REG (SImode, D0_REG + i);
1127 dest = gen_frame_mem (SImode,
1128 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1129 m68k_set_frame_related (emit_insn (gen_movsi (dest, src)));
078e983e 1130 }
79e68feb 1131 }
a40ed0f3 1132 else
79e68feb 1133 {
9425fb04 1134 if (TARGET_COLDFIRE)
a40ed0f3
KH
1135 /* The required register save space has already been allocated.
1136 The first register should be stored at (%sp). */
1137 m68k_set_frame_related
1138 (m68k_emit_movem (stack_pointer_rtx, 0,
1139 current_frame.reg_no, D0_REG,
1140 current_frame.reg_mask, true, false));
afaff477 1141 else
a40ed0f3
KH
1142 m68k_set_frame_related
1143 (m68k_emit_movem (stack_pointer_rtx,
1144 current_frame.reg_no * -GET_MODE_SIZE (SImode),
1145 current_frame.reg_no, D0_REG,
1146 current_frame.reg_mask, true, true));
79e68feb 1147 }
a40ed0f3
KH
1148
1149 if (flag_pic
1150 && !TARGET_SEP_DATA
e3b5732b 1151 && crtl->uses_pic_offset_table)
6fb5fa3c 1152 insn = emit_insn (gen_load_got (pic_offset_table_rtx));
79e68feb
RS
1153}
1154\f
413ac1b2
RS
1155/* Return true if a simple (return) instruction is sufficient for this
1156 instruction (i.e. if no epilogue is needed). */
79e68feb 1157
3d74bc09 1158bool
a2bda628 1159m68k_use_return_insn (void)
79e68feb 1160{
79e68feb 1161 if (!reload_completed || frame_pointer_needed || get_frame_size () != 0)
3d74bc09 1162 return false;
125ed86f 1163
a0a7fbc9 1164 m68k_compute_frame_layout ();
413ac1b2 1165 return current_frame.offset == 0;
79e68feb
RS
1166}
1167
f7e70894
RS
1168/* Emit RTL for the "epilogue" or "sibcall_epilogue" define_expand;
1169 SIBCALL_P says which.
79e68feb
RS
1170
1171 The function epilogue should not depend on the current stack pointer!
1172 It should use the frame pointer only, if there is a frame pointer.
1173 This is mandatory because of alloca; we also take advantage of it to
1174 omit stack adjustments before returning. */
1175
a40ed0f3 1176void
f7e70894 1177m68k_expand_epilogue (bool sibcall_p)
08c148a8 1178{
3d74bc09 1179 HOST_WIDE_INT fsize, fsize_with_regs;
a40ed0f3 1180 bool big, restore_from_sp;
3d74bc09 1181
a0a7fbc9 1182 m68k_compute_frame_layout ();
3d74bc09 1183
3d74bc09 1184 fsize = current_frame.size;
a40ed0f3
KH
1185 big = false;
1186 restore_from_sp = false;
3d74bc09 1187
a40ed0f3 1188 /* FIXME : current_function_is_leaf below is too strong.
c67ddce5 1189 What we really need to know there is if there could be pending
7a1929e1 1190 stack adjustment needed at that point. */
a40ed0f3 1191 restore_from_sp = (!frame_pointer_needed
e3b5732b 1192 || (!cfun->calls_alloca
a40ed0f3 1193 && current_function_is_leaf));
860c4900
BI
1194
1195 /* fsize_with_regs is the size we need to adjust the sp when
97c55091 1196 popping the frame. */
860c4900 1197 fsize_with_regs = fsize;
dcc21c4c
PB
1198 if (TARGET_COLDFIRE && restore_from_sp)
1199 {
a40ed0f3
KH
1200 /* ColdFire's move multiple instructions do not allow post-increment
1201 addressing. Add the size of movem loads to the final deallocation
1202 instead. */
1203 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1204 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1205 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1206 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
dcc21c4c 1207 }
860c4900 1208
3d74bc09 1209 if (current_frame.offset + fsize >= 0x8000
a40ed0f3 1210 && !restore_from_sp
3d74bc09 1211 && (current_frame.reg_mask || current_frame.fpu_mask))
79e68feb 1212 {
a40ed0f3
KH
1213 if (TARGET_COLDFIRE
1214 && (current_frame.reg_no >= MIN_MOVEM_REGS
1215 || current_frame.fpu_no >= MIN_FMOVEM_REGS))
1216 {
1217 /* ColdFire's move multiple instructions do not support the
1218 (d8,Ax,Xi) addressing mode, so we're as well using a normal
1219 stack-based restore. */
1220 emit_move_insn (gen_rtx_REG (Pmode, A1_REG),
1221 GEN_INT (-(current_frame.offset + fsize)));
1222 emit_insn (gen_addsi3 (stack_pointer_rtx,
1223 gen_rtx_REG (Pmode, A1_REG),
1224 frame_pointer_rtx));
1225 restore_from_sp = true;
1226 }
1227 else
1228 {
1229 emit_move_insn (gen_rtx_REG (Pmode, A1_REG), GEN_INT (-fsize));
1230 fsize = 0;
1231 big = true;
1232 }
79e68feb 1233 }
79e68feb 1234
a40ed0f3
KH
1235 if (current_frame.reg_no < MIN_MOVEM_REGS)
1236 {
1237 /* Restore each register separately in the same order moveml does. */
79e68feb 1238 int i;
a40ed0f3 1239 HOST_WIDE_INT offset;
79e68feb 1240
a40ed0f3 1241 offset = current_frame.offset + fsize;
3d74bc09
BI
1242 for (i = 0; i < 16; i++)
1243 if (current_frame.reg_mask & (1 << i))
79e68feb 1244 {
a40ed0f3
KH
1245 rtx addr;
1246
1247 if (big)
79e68feb 1248 {
a40ed0f3
KH
1249 /* Generate the address -OFFSET(%fp,%a1.l). */
1250 addr = gen_rtx_REG (Pmode, A1_REG);
1251 addr = gen_rtx_PLUS (Pmode, addr, frame_pointer_rtx);
1252 addr = plus_constant (addr, -offset);
79e68feb 1253 }
a40ed0f3
KH
1254 else if (restore_from_sp)
1255 addr = gen_rtx_POST_INC (Pmode, stack_pointer_rtx);
1256 else
1257 addr = plus_constant (frame_pointer_rtx, -offset);
1258 emit_move_insn (gen_rtx_REG (SImode, D0_REG + i),
1259 gen_frame_mem (SImode, addr));
1260 offset -= GET_MODE_SIZE (SImode);
1261 }
79e68feb 1262 }
3d74bc09 1263 else if (current_frame.reg_mask)
79e68feb 1264 {
a40ed0f3
KH
1265 if (big)
1266 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1267 gen_rtx_REG (Pmode, A1_REG),
1268 frame_pointer_rtx),
1269 -(current_frame.offset + fsize),
1270 current_frame.reg_no, D0_REG,
1271 current_frame.reg_mask, false, false);
1272 else if (restore_from_sp)
1273 m68k_emit_movem (stack_pointer_rtx, 0,
1274 current_frame.reg_no, D0_REG,
1275 current_frame.reg_mask, false,
1276 !TARGET_COLDFIRE);
1277 else
1278 m68k_emit_movem (frame_pointer_rtx,
1279 -(current_frame.offset + fsize),
1280 current_frame.reg_no, D0_REG,
1281 current_frame.reg_mask, false, false);
79e68feb 1282 }
a40ed0f3
KH
1283
1284 if (current_frame.fpu_no > 0)
79e68feb
RS
1285 {
1286 if (big)
a40ed0f3
KH
1287 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1288 gen_rtx_REG (Pmode, A1_REG),
1289 frame_pointer_rtx),
1290 -(current_frame.foffset + fsize),
1291 current_frame.fpu_no, FP0_REG,
1292 current_frame.fpu_mask, false, false);
6910dd70 1293 else if (restore_from_sp)
79e68feb 1294 {
dcc21c4c
PB
1295 if (TARGET_COLDFIRE)
1296 {
1297 int offset;
1298
a40ed0f3
KH
1299 /* If we used moveml to restore the integer registers, the
1300 stack pointer will still point to the bottom of the moveml
1301 save area. Find the stack offset of the first FP
1302 register. */
1303 if (current_frame.reg_no < MIN_MOVEM_REGS)
dcc21c4c
PB
1304 offset = 0;
1305 else
a40ed0f3
KH
1306 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1307 m68k_emit_movem (stack_pointer_rtx, offset,
1308 current_frame.fpu_no, FP0_REG,
1309 current_frame.fpu_mask, false, false);
dcc21c4c 1310 }
884b74f0 1311 else
a40ed0f3
KH
1312 m68k_emit_movem (stack_pointer_rtx, 0,
1313 current_frame.fpu_no, FP0_REG,
1314 current_frame.fpu_mask, false, true);
79e68feb
RS
1315 }
1316 else
a40ed0f3
KH
1317 m68k_emit_movem (frame_pointer_rtx,
1318 -(current_frame.foffset + fsize),
1319 current_frame.fpu_no, FP0_REG,
1320 current_frame.fpu_mask, false, false);
79e68feb 1321 }
a40ed0f3 1322
79e68feb 1323 if (frame_pointer_needed)
a40ed0f3 1324 emit_insn (gen_unlink (frame_pointer_rtx));
860c4900 1325 else if (fsize_with_regs)
a40ed0f3
KH
1326 emit_insn (gen_addsi3 (stack_pointer_rtx,
1327 stack_pointer_rtx,
1328 GEN_INT (fsize_with_regs)));
1329
e3b5732b 1330 if (crtl->calls_eh_return)
a40ed0f3
KH
1331 emit_insn (gen_addsi3 (stack_pointer_rtx,
1332 stack_pointer_rtx,
1333 EH_RETURN_STACKADJ_RTX));
1334
f7e70894 1335 if (!sibcall_p)
49570723 1336 emit_jump_insn (gen_rtx_RETURN (VOIDmode));
79e68feb
RS
1337}
1338\f
8a4a2253 1339/* Return true if X is a valid comparison operator for the dbcc
64a184e9
RS
1340 instruction.
1341
1342 Note it rejects floating point comparison operators.
1343 (In the future we could use Fdbcc).
1344
1345 It also rejects some comparisons when CC_NO_OVERFLOW is set. */
1346
1347int
41b6a5e2 1348valid_dbcc_comparison_p_2 (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED)
64a184e9 1349{
64a184e9
RS
1350 switch (GET_CODE (x))
1351 {
64a184e9
RS
1352 case EQ: case NE: case GTU: case LTU:
1353 case GEU: case LEU:
1354 return 1;
1355
1356 /* Reject some when CC_NO_OVERFLOW is set. This may be over
1357 conservative */
1358 case GT: case LT: case GE: case LE:
1359 return ! (cc_prev_status.flags & CC_NO_OVERFLOW);
1360 default:
1361 return 0;
1362 }
1363}
1364
a0ab749a 1365/* Return nonzero if flags are currently in the 68881 flag register. */
6a0f85e3 1366int
8a4a2253 1367flags_in_68881 (void)
6a0f85e3
TG
1368{
1369 /* We could add support for these in the future */
1370 return cc_status.flags & CC_IN_68881;
1371}
1372
fa157b28 1373/* Implement TARGET_FUNCTION_OK_FOR_SIBCALL_P. */
f7e70894
RS
1374
1375static bool
fa157b28 1376m68k_ok_for_sibcall_p (tree decl, tree exp)
f7e70894 1377{
fa157b28
NS
1378 enum m68k_function_kind kind;
1379
1380 /* We cannot use sibcalls for nested functions because we use the
1381 static chain register for indirect calls. */
1382 if (CALL_EXPR_STATIC_CHAIN (exp))
1383 return false;
1384
1385 kind = m68k_get_function_kind (current_function_decl);
1386 if (kind == m68k_fk_normal_function)
1387 /* We can always sibcall from a normal function, because it's
1388 undefined if it is calling an interrupt function. */
1389 return true;
1390
1391 /* Otherwise we can only sibcall if the function kind is known to be
1392 the same. */
1393 if (decl && m68k_get_function_kind (decl) == kind)
1394 return true;
1395
1396 return false;
f7e70894
RS
1397}
1398
29ca003a
RS
1399/* Convert X to a legitimate function call memory reference and return the
1400 result. */
a2ef3db7 1401
29ca003a
RS
1402rtx
1403m68k_legitimize_call_address (rtx x)
1404{
1405 gcc_assert (MEM_P (x));
1406 if (call_operand (XEXP (x, 0), VOIDmode))
1407 return x;
1408 return replace_equiv_address (x, force_reg (Pmode, XEXP (x, 0)));
a2ef3db7
BI
1409}
1410
f7e70894
RS
1411/* Likewise for sibling calls. */
1412
1413rtx
1414m68k_legitimize_sibcall_address (rtx x)
1415{
1416 gcc_assert (MEM_P (x));
1417 if (sibcall_operand (XEXP (x, 0), VOIDmode))
1418 return x;
1419
1420 emit_move_insn (gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM), XEXP (x, 0));
1421 return replace_equiv_address (x, gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM));
1422}
1423
64a184e9
RS
1424/* Output a dbCC; jCC sequence. Note we do not handle the
1425 floating point version of this sequence (Fdbcc). We also
1426 do not handle alternative conditions when CC_NO_OVERFLOW is
6a0f85e3
TG
1427 set. It is assumed that valid_dbcc_comparison_p and flags_in_68881 will
1428 kick those out before we get here. */
64a184e9 1429
1d8eaa6b 1430void
8a4a2253 1431output_dbcc_and_branch (rtx *operands)
64a184e9 1432{
64a184e9
RS
1433 switch (GET_CODE (operands[3]))
1434 {
1435 case EQ:
da398bb5 1436 output_asm_insn ("dbeq %0,%l1\n\tjeq %l2", operands);
e6d98cb0 1437 break;
64a184e9
RS
1438
1439 case NE:
da398bb5 1440 output_asm_insn ("dbne %0,%l1\n\tjne %l2", operands);
e6d98cb0 1441 break;
64a184e9
RS
1442
1443 case GT:
da398bb5 1444 output_asm_insn ("dbgt %0,%l1\n\tjgt %l2", operands);
e6d98cb0 1445 break;
64a184e9
RS
1446
1447 case GTU:
da398bb5 1448 output_asm_insn ("dbhi %0,%l1\n\tjhi %l2", operands);
e6d98cb0 1449 break;
64a184e9
RS
1450
1451 case LT:
da398bb5 1452 output_asm_insn ("dblt %0,%l1\n\tjlt %l2", operands);
e6d98cb0 1453 break;
64a184e9
RS
1454
1455 case LTU:
da398bb5 1456 output_asm_insn ("dbcs %0,%l1\n\tjcs %l2", operands);
e6d98cb0 1457 break;
64a184e9
RS
1458
1459 case GE:
da398bb5 1460 output_asm_insn ("dbge %0,%l1\n\tjge %l2", operands);
e6d98cb0 1461 break;
64a184e9
RS
1462
1463 case GEU:
da398bb5 1464 output_asm_insn ("dbcc %0,%l1\n\tjcc %l2", operands);
e6d98cb0 1465 break;
64a184e9
RS
1466
1467 case LE:
da398bb5 1468 output_asm_insn ("dble %0,%l1\n\tjle %l2", operands);
e6d98cb0 1469 break;
64a184e9
RS
1470
1471 case LEU:
da398bb5 1472 output_asm_insn ("dbls %0,%l1\n\tjls %l2", operands);
e6d98cb0 1473 break;
64a184e9
RS
1474
1475 default:
4761e388 1476 gcc_unreachable ();
64a184e9
RS
1477 }
1478
1479 /* If the decrement is to be done in SImode, then we have
7a1929e1 1480 to compensate for the fact that dbcc decrements in HImode. */
64a184e9
RS
1481 switch (GET_MODE (operands[0]))
1482 {
1483 case SImode:
da398bb5 1484 output_asm_insn ("clr%.w %0\n\tsubq%.l #1,%0\n\tjpl %l1", operands);
64a184e9
RS
1485 break;
1486
1487 case HImode:
1488 break;
1489
1490 default:
4761e388 1491 gcc_unreachable ();
64a184e9
RS
1492 }
1493}
1494
5505f548 1495const char *
4761e388 1496output_scc_di (rtx op, rtx operand1, rtx operand2, rtx dest)
c59c3b1c
RK
1497{
1498 rtx loperands[7];
d9832fd2 1499 enum rtx_code op_code = GET_CODE (op);
c59c3b1c 1500
f710504c 1501 /* This does not produce a useful cc. */
906a2d3c
RK
1502 CC_STATUS_INIT;
1503
d9832fd2
RK
1504 /* The m68k cmp.l instruction requires operand1 to be a reg as used
1505 below. Swap the operands and change the op if these requirements
1506 are not fulfilled. */
1507 if (GET_CODE (operand2) == REG && GET_CODE (operand1) != REG)
1508 {
1509 rtx tmp = operand1;
1510
1511 operand1 = operand2;
1512 operand2 = tmp;
1513 op_code = swap_condition (op_code);
1514 }
c59c3b1c
RK
1515 loperands[0] = operand1;
1516 if (GET_CODE (operand1) == REG)
1d8eaa6b 1517 loperands[1] = gen_rtx_REG (SImode, REGNO (operand1) + 1);
c59c3b1c 1518 else
b72f00af 1519 loperands[1] = adjust_address (operand1, SImode, 4);
c59c3b1c
RK
1520 if (operand2 != const0_rtx)
1521 {
1522 loperands[2] = operand2;
1523 if (GET_CODE (operand2) == REG)
1d8eaa6b 1524 loperands[3] = gen_rtx_REG (SImode, REGNO (operand2) + 1);
c59c3b1c 1525 else
b72f00af 1526 loperands[3] = adjust_address (operand2, SImode, 4);
c59c3b1c 1527 }
428511bb 1528 loperands[4] = gen_label_rtx ();
c59c3b1c 1529 if (operand2 != const0_rtx)
da398bb5 1530 output_asm_insn ("cmp%.l %2,%0\n\tjne %l4\n\tcmp%.l %3,%1", loperands);
392582fa 1531 else
4a8c52e0 1532 {
9425fb04 1533 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[0]))
4a8c52e0
AS
1534 output_asm_insn ("tst%.l %0", loperands);
1535 else
a0a7fbc9 1536 output_asm_insn ("cmp%.w #0,%0", loperands);
4a8c52e0 1537
da398bb5 1538 output_asm_insn ("jne %l4", loperands);
4a8c52e0 1539
9425fb04 1540 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[1]))
4a8c52e0
AS
1541 output_asm_insn ("tst%.l %1", loperands);
1542 else
3b4b85c9 1543 output_asm_insn ("cmp%.w #0,%1", loperands);
4a8c52e0
AS
1544 }
1545
c59c3b1c 1546 loperands[5] = dest;
3b4b85c9 1547
d9832fd2 1548 switch (op_code)
c59c3b1c
RK
1549 {
1550 case EQ:
4977bab6 1551 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1552 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1553 output_asm_insn ("seq %5", loperands);
1554 break;
1555
1556 case NE:
4977bab6 1557 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1558 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1559 output_asm_insn ("sne %5", loperands);
1560 break;
1561
1562 case GT:
428511bb 1563 loperands[6] = gen_label_rtx ();
da398bb5 1564 output_asm_insn ("shi %5\n\tjra %l6", loperands);
4977bab6 1565 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1566 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c 1567 output_asm_insn ("sgt %5", loperands);
4977bab6 1568 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1569 CODE_LABEL_NUMBER (loperands[6]));
c59c3b1c
RK
1570 break;
1571
1572 case GTU:
4977bab6 1573 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1574 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1575 output_asm_insn ("shi %5", loperands);
1576 break;
1577
1578 case LT:
428511bb 1579 loperands[6] = gen_label_rtx ();
da398bb5 1580 output_asm_insn ("scs %5\n\tjra %l6", loperands);
4977bab6 1581 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1582 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c 1583 output_asm_insn ("slt %5", loperands);
4977bab6 1584 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1585 CODE_LABEL_NUMBER (loperands[6]));
c59c3b1c
RK
1586 break;
1587
1588 case LTU:
4977bab6 1589 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1590 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1591 output_asm_insn ("scs %5", loperands);
1592 break;
1593
1594 case GE:
428511bb 1595 loperands[6] = gen_label_rtx ();
da398bb5 1596 output_asm_insn ("scc %5\n\tjra %l6", loperands);
4977bab6 1597 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1598 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c 1599 output_asm_insn ("sge %5", loperands);
4977bab6 1600 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1601 CODE_LABEL_NUMBER (loperands[6]));
c59c3b1c
RK
1602 break;
1603
1604 case GEU:
4977bab6 1605 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1606 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1607 output_asm_insn ("scc %5", loperands);
1608 break;
1609
1610 case LE:
428511bb 1611 loperands[6] = gen_label_rtx ();
da398bb5 1612 output_asm_insn ("sls %5\n\tjra %l6", loperands);
4977bab6 1613 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1614 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c 1615 output_asm_insn ("sle %5", loperands);
4977bab6 1616 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1617 CODE_LABEL_NUMBER (loperands[6]));
c59c3b1c
RK
1618 break;
1619
1620 case LEU:
4977bab6 1621 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1622 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1623 output_asm_insn ("sls %5", loperands);
1624 break;
1625
1626 default:
4761e388 1627 gcc_unreachable ();
c59c3b1c
RK
1628 }
1629 return "";
1630}
1631
5505f548 1632const char *
8a4a2253 1633output_btst (rtx *operands, rtx countop, rtx dataop, rtx insn, int signpos)
79e68feb
RS
1634{
1635 operands[0] = countop;
1636 operands[1] = dataop;
1637
1638 if (GET_CODE (countop) == CONST_INT)
1639 {
1640 register int count = INTVAL (countop);
1641 /* If COUNT is bigger than size of storage unit in use,
1642 advance to the containing unit of same size. */
1643 if (count > signpos)
1644 {
1645 int offset = (count & ~signpos) / 8;
1646 count = count & signpos;
b72f00af 1647 operands[1] = dataop = adjust_address (dataop, QImode, offset);
79e68feb
RS
1648 }
1649 if (count == signpos)
1650 cc_status.flags = CC_NOT_POSITIVE | CC_Z_IN_NOT_N;
1651 else
1652 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N;
1653
1654 /* These three statements used to use next_insns_test_no...
1655 but it appears that this should do the same job. */
1656 if (count == 31
1657 && next_insn_tests_no_inequality (insn))
1658 return "tst%.l %1";
1659 if (count == 15
1660 && next_insn_tests_no_inequality (insn))
1661 return "tst%.w %1";
1662 if (count == 7
1663 && next_insn_tests_no_inequality (insn))
1664 return "tst%.b %1";
5083912d
PDM
1665 /* Try to use `movew to ccr' followed by the appropriate branch insn.
1666 On some m68k variants unfortunately that's slower than btst.
1667 On 68000 and higher, that should also work for all HImode operands. */
1668 if (TUNE_CPU32 || TARGET_COLDFIRE || optimize_size)
1669 {
1670 if (count == 3 && DATA_REG_P (operands[1])
1671 && next_insn_tests_no_inequality (insn))
1672 {
1673 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N | CC_NO_OVERFLOW;
1674 return "move%.w %1,%%ccr";
1675 }
1676 if (count == 2 && DATA_REG_P (operands[1])
1677 && next_insn_tests_no_inequality (insn))
1678 {
1679 cc_status.flags = CC_NOT_NEGATIVE | CC_INVERTED | CC_NO_OVERFLOW;
1680 return "move%.w %1,%%ccr";
1681 }
1682 /* count == 1 followed by bvc/bvs and
1683 count == 0 followed by bcc/bcs are also possible, but need
1684 m68k-specific CC_Z_IN_NOT_V and CC_Z_IN_NOT_C flags. */
1685 }
79e68feb
RS
1686
1687 cc_status.flags = CC_NOT_NEGATIVE;
1688 }
1689 return "btst %0,%1";
1690}
79e68feb 1691\f
fc2241eb
RS
1692/* Return true if X is a legitimate base register. STRICT_P says
1693 whether we need strict checking. */
1694
1695bool
1696m68k_legitimate_base_reg_p (rtx x, bool strict_p)
1697{
1698 /* Allow SUBREG everywhere we allow REG. This results in better code. */
1699 if (!strict_p && GET_CODE (x) == SUBREG)
1700 x = SUBREG_REG (x);
1701
1702 return (REG_P (x)
1703 && (strict_p
1704 ? REGNO_OK_FOR_BASE_P (REGNO (x))
bf32249e 1705 : REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x))));
fc2241eb
RS
1706}
1707
1708/* Return true if X is a legitimate index register. STRICT_P says
1709 whether we need strict checking. */
1710
1711bool
1712m68k_legitimate_index_reg_p (rtx x, bool strict_p)
1713{
1714 if (!strict_p && GET_CODE (x) == SUBREG)
1715 x = SUBREG_REG (x);
1716
1717 return (REG_P (x)
1718 && (strict_p
1719 ? REGNO_OK_FOR_INDEX_P (REGNO (x))
bf32249e 1720 : REGNO_OK_FOR_INDEX_NONSTRICT_P (REGNO (x))));
fc2241eb
RS
1721}
1722
1723/* Return true if X is a legitimate index expression for a (d8,An,Xn) or
1724 (bd,An,Xn) addressing mode. Fill in the INDEX and SCALE fields of
1725 ADDRESS if so. STRICT_P says whether we need strict checking. */
1726
1727static bool
1728m68k_decompose_index (rtx x, bool strict_p, struct m68k_address *address)
1729{
1730 int scale;
1731
1732 /* Check for a scale factor. */
1733 scale = 1;
1734 if ((TARGET_68020 || TARGET_COLDFIRE)
1735 && GET_CODE (x) == MULT
1736 && GET_CODE (XEXP (x, 1)) == CONST_INT
1737 && (INTVAL (XEXP (x, 1)) == 2
1738 || INTVAL (XEXP (x, 1)) == 4
1739 || (INTVAL (XEXP (x, 1)) == 8
1740 && (TARGET_COLDFIRE_FPU || !TARGET_COLDFIRE))))
1741 {
1742 scale = INTVAL (XEXP (x, 1));
1743 x = XEXP (x, 0);
1744 }
1745
1746 /* Check for a word extension. */
1747 if (!TARGET_COLDFIRE
1748 && GET_CODE (x) == SIGN_EXTEND
1749 && GET_MODE (XEXP (x, 0)) == HImode)
1750 x = XEXP (x, 0);
1751
1752 if (m68k_legitimate_index_reg_p (x, strict_p))
1753 {
1754 address->scale = scale;
1755 address->index = x;
1756 return true;
1757 }
1758
1759 return false;
1760}
1761
7ffb5e78
RS
1762/* Return true if X is an illegitimate symbolic constant. */
1763
1764bool
1765m68k_illegitimate_symbolic_constant_p (rtx x)
1766{
1767 rtx base, offset;
1768
1769 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P)
1770 {
1771 split_const (x, &base, &offset);
1772 if (GET_CODE (base) == SYMBOL_REF
1773 && !offset_within_block_p (base, INTVAL (offset)))
1774 return true;
1775 }
1776 return false;
1777}
1778
fc2241eb
RS
1779/* Return true if X is a legitimate constant address that can reach
1780 bytes in the range [X, X + REACH). STRICT_P says whether we need
1781 strict checking. */
1782
1783static bool
1784m68k_legitimate_constant_address_p (rtx x, unsigned int reach, bool strict_p)
1785{
1786 rtx base, offset;
1787
1788 if (!CONSTANT_ADDRESS_P (x))
1789 return false;
1790
1791 if (flag_pic
1792 && !(strict_p && TARGET_PCREL)
1793 && symbolic_operand (x, VOIDmode))
1794 return false;
1795
1796 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P && reach > 1)
1797 {
1798 split_const (x, &base, &offset);
1799 if (GET_CODE (base) == SYMBOL_REF
1800 && !offset_within_block_p (base, INTVAL (offset) + reach - 1))
1801 return false;
1802 }
1803
1804 return true;
1805}
1806
1807/* Return true if X is a LABEL_REF for a jump table. Assume that unplaced
1808 labels will become jump tables. */
1809
1810static bool
1811m68k_jump_table_ref_p (rtx x)
1812{
1813 if (GET_CODE (x) != LABEL_REF)
1814 return false;
1815
1816 x = XEXP (x, 0);
1817 if (!NEXT_INSN (x) && !PREV_INSN (x))
1818 return true;
1819
1820 x = next_nonnote_insn (x);
1821 return x && JUMP_TABLE_DATA_P (x);
1822}
1823
1824/* Return true if X is a legitimate address for values of mode MODE.
1825 STRICT_P says whether strict checking is needed. If the address
1826 is valid, describe its components in *ADDRESS. */
1827
1828static bool
1829m68k_decompose_address (enum machine_mode mode, rtx x,
1830 bool strict_p, struct m68k_address *address)
1831{
1832 unsigned int reach;
1833
1834 memset (address, 0, sizeof (*address));
1835
1836 if (mode == BLKmode)
1837 reach = 1;
1838 else
1839 reach = GET_MODE_SIZE (mode);
1840
1841 /* Check for (An) (mode 2). */
1842 if (m68k_legitimate_base_reg_p (x, strict_p))
1843 {
1844 address->base = x;
1845 return true;
1846 }
1847
1848 /* Check for -(An) and (An)+ (modes 3 and 4). */
1849 if ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_INC)
1850 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
1851 {
1852 address->code = GET_CODE (x);
1853 address->base = XEXP (x, 0);
1854 return true;
1855 }
1856
1857 /* Check for (d16,An) (mode 5). */
1858 if (GET_CODE (x) == PLUS
1859 && GET_CODE (XEXP (x, 1)) == CONST_INT
1860 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x8000, 0x8000 - reach)
1861 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
1862 {
1863 address->base = XEXP (x, 0);
1864 address->offset = XEXP (x, 1);
1865 return true;
1866 }
1867
1868 /* Check for GOT loads. These are (bd,An,Xn) addresses if
1869 TARGET_68020 && flag_pic == 2, otherwise they are (d16,An)
1870 addresses. */
1871 if (flag_pic
1872 && GET_CODE (x) == PLUS
1873 && XEXP (x, 0) == pic_offset_table_rtx
1874 && (GET_CODE (XEXP (x, 1)) == SYMBOL_REF
1875 || GET_CODE (XEXP (x, 1)) == LABEL_REF))
1876 {
1877 address->base = XEXP (x, 0);
1878 address->offset = XEXP (x, 1);
1879 return true;
1880 }
1881
1882 /* The ColdFire FPU only accepts addressing modes 2-5. */
1883 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
1884 return false;
1885
1886 /* Check for (xxx).w and (xxx).l. Also, in the TARGET_PCREL case,
1887 check for (d16,PC) or (bd,PC,Xn) with a suppressed index register.
1888 All these modes are variations of mode 7. */
1889 if (m68k_legitimate_constant_address_p (x, reach, strict_p))
1890 {
1891 address->offset = x;
1892 return true;
1893 }
1894
1895 /* Check for (d8,PC,Xn), a mode 7 form. This case is needed for
1896 tablejumps.
1897
1898 ??? do_tablejump creates these addresses before placing the target
1899 label, so we have to assume that unplaced labels are jump table
1900 references. It seems unlikely that we would ever generate indexed
1901 accesses to unplaced labels in other cases. */
1902 if (GET_CODE (x) == PLUS
1903 && m68k_jump_table_ref_p (XEXP (x, 1))
1904 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
1905 {
1906 address->offset = XEXP (x, 1);
1907 return true;
1908 }
1909
1910 /* Everything hereafter deals with (d8,An,Xn.SIZE*SCALE) or
1911 (bd,An,Xn.SIZE*SCALE) addresses. */
1912
1913 if (TARGET_68020)
1914 {
1915 /* Check for a nonzero base displacement. */
1916 if (GET_CODE (x) == PLUS
1917 && m68k_legitimate_constant_address_p (XEXP (x, 1), reach, strict_p))
1918 {
1919 address->offset = XEXP (x, 1);
1920 x = XEXP (x, 0);
1921 }
1922
1923 /* Check for a suppressed index register. */
1924 if (m68k_legitimate_base_reg_p (x, strict_p))
1925 {
1926 address->base = x;
1927 return true;
1928 }
1929
1930 /* Check for a suppressed base register. Do not allow this case
1931 for non-symbolic offsets as it effectively gives gcc freedom
1932 to treat data registers as base registers, which can generate
1933 worse code. */
1934 if (address->offset
1935 && symbolic_operand (address->offset, VOIDmode)
1936 && m68k_decompose_index (x, strict_p, address))
1937 return true;
1938 }
1939 else
1940 {
1941 /* Check for a nonzero base displacement. */
1942 if (GET_CODE (x) == PLUS
1943 && GET_CODE (XEXP (x, 1)) == CONST_INT
1944 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x80, 0x80 - reach))
1945 {
1946 address->offset = XEXP (x, 1);
1947 x = XEXP (x, 0);
1948 }
1949 }
1950
1951 /* We now expect the sum of a base and an index. */
1952 if (GET_CODE (x) == PLUS)
1953 {
1954 if (m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p)
1955 && m68k_decompose_index (XEXP (x, 1), strict_p, address))
1956 {
1957 address->base = XEXP (x, 0);
1958 return true;
1959 }
1960
1961 if (m68k_legitimate_base_reg_p (XEXP (x, 1), strict_p)
1962 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
1963 {
1964 address->base = XEXP (x, 1);
1965 return true;
1966 }
1967 }
1968 return false;
1969}
1970
1971/* Return true if X is a legitimate address for values of mode MODE.
1972 STRICT_P says whether strict checking is needed. */
1973
1974bool
1975m68k_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p)
1976{
1977 struct m68k_address address;
1978
1979 return m68k_decompose_address (mode, x, strict_p, &address);
1980}
1981
1982/* Return true if X is a memory, describing its address in ADDRESS if so.
1983 Apply strict checking if called during or after reload. */
1984
1985static bool
1986m68k_legitimate_mem_p (rtx x, struct m68k_address *address)
1987{
1988 return (MEM_P (x)
1989 && m68k_decompose_address (GET_MODE (x), XEXP (x, 0),
1990 reload_in_progress || reload_completed,
1991 address));
1992}
1993
1994/* Return true if X matches the 'Q' constraint. It must be a memory
1995 with a base address and no constant offset or index. */
1996
1997bool
1998m68k_matches_q_p (rtx x)
1999{
2000 struct m68k_address address;
2001
2002 return (m68k_legitimate_mem_p (x, &address)
2003 && address.code == UNKNOWN
2004 && address.base
2005 && !address.offset
2006 && !address.index);
2007}
2008
2009/* Return true if X matches the 'U' constraint. It must be a base address
2010 with a constant offset and no index. */
2011
2012bool
2013m68k_matches_u_p (rtx x)
2014{
2015 struct m68k_address address;
2016
2017 return (m68k_legitimate_mem_p (x, &address)
2018 && address.code == UNKNOWN
2019 && address.base
2020 && address.offset
2021 && !address.index);
2022}
2023
79e68feb
RS
2024/* Legitimize PIC addresses. If the address is already
2025 position-independent, we return ORIG. Newly generated
2026 position-independent addresses go to REG. If we need more
2027 than one register, we lose.
2028
2029 An address is legitimized by making an indirect reference
2030 through the Global Offset Table with the name of the symbol
2031 used as an offset.
2032
2033 The assembler and linker are responsible for placing the
2034 address of the symbol in the GOT. The function prologue
2035 is responsible for initializing a5 to the starting address
2036 of the GOT.
2037
2038 The assembler is also responsible for translating a symbol name
2039 into a constant displacement from the start of the GOT.
2040
2041 A quick example may make things a little clearer:
2042
2043 When not generating PIC code to store the value 12345 into _foo
2044 we would generate the following code:
2045
2046 movel #12345, _foo
2047
2048 When generating PIC two transformations are made. First, the compiler
2049 loads the address of foo into a register. So the first transformation makes:
2050
2051 lea _foo, a0
2052 movel #12345, a0@
2053
2054 The code in movsi will intercept the lea instruction and call this
2055 routine which will transform the instructions into:
2056
2057 movel a5@(_foo:w), a0
2058 movel #12345, a0@
2059
2060
2061 That (in a nutshell) is how *all* symbol and label references are
2062 handled. */
2063
2064rtx
8a4a2253
BI
2065legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
2066 rtx reg)
79e68feb
RS
2067{
2068 rtx pic_ref = orig;
2069
2070 /* First handle a simple SYMBOL_REF or LABEL_REF */
2071 if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
2072 {
4761e388 2073 gcc_assert (reg);
79e68feb 2074
884316ff
JM
2075 if (TARGET_COLDFIRE && TARGET_XGOT)
2076 /* When compiling with -mxgot switch the code for the above
2077 example will look like this:
2078
2079 movel a5, a0
2080 addl _foo@GOT, a0
2081 movel a0@, a0
2082 movel #12345, a0@ */
2083 {
2084 rtx pic_offset;
2085
2086 /* Wrap ORIG in UNSPEC_GOTOFF to tip m68k_output_addr_const_extra
2087 to put @GOT after reference. */
2088 pic_offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, orig),
2089 UNSPEC_GOTOFF);
2090 pic_offset = gen_rtx_CONST (Pmode, pic_offset);
2091 emit_move_insn (reg, pic_offset);
2092 emit_insn (gen_addsi3 (reg, reg, pic_offset_table_rtx));
2093 pic_ref = gen_rtx_MEM (Pmode, reg);
2094 }
2095 else
2096 pic_ref = gen_rtx_MEM (Pmode,
2097 gen_rtx_PLUS (Pmode,
2098 pic_offset_table_rtx, orig));
e3b5732b 2099 crtl->uses_pic_offset_table = 1;
389fdba0 2100 MEM_READONLY_P (pic_ref) = 1;
79e68feb
RS
2101 emit_move_insn (reg, pic_ref);
2102 return reg;
2103 }
2104 else if (GET_CODE (orig) == CONST)
2105 {
1d8eaa6b 2106 rtx base;
79e68feb 2107
b2e08ed4 2108 /* Make sure this has not already been legitimized. */
79e68feb
RS
2109 if (GET_CODE (XEXP (orig, 0)) == PLUS
2110 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
2111 return orig;
2112
4761e388 2113 gcc_assert (reg);
79e68feb
RS
2114
2115 /* legitimize both operands of the PLUS */
4761e388
NS
2116 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
2117
2118 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2119 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2120 base == reg ? 0 : reg);
79e68feb
RS
2121
2122 if (GET_CODE (orig) == CONST_INT)
ed8908e7 2123 return plus_constant (base, INTVAL (orig));
1d8eaa6b 2124 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
79e68feb
RS
2125 /* Likewise, should we set special REG_NOTEs here? */
2126 }
2127 return pic_ref;
2128}
2129
2130\f
0ce6f9fb 2131
a0a7fbc9 2132#define USE_MOVQ(i) ((unsigned) ((i) + 128) <= 255)
0ce6f9fb 2133
bda2a571
RS
2134/* Return the type of move that should be used for integer I. */
2135
c47b0cb4
MK
2136M68K_CONST_METHOD
2137m68k_const_method (HOST_WIDE_INT i)
0ce6f9fb 2138{
0ce6f9fb
RK
2139 unsigned u;
2140
6910dd70 2141 if (USE_MOVQ (i))
0ce6f9fb 2142 return MOVQ;
24092242 2143
c16eadc7 2144 /* The ColdFire doesn't have byte or word operations. */
97c55091 2145 /* FIXME: This may not be useful for the m68060 either. */
85dbf7e2 2146 if (!TARGET_COLDFIRE)
24092242
RK
2147 {
2148 /* if -256 < N < 256 but N is not in range for a moveq
7a1929e1 2149 N^ff will be, so use moveq #N^ff, dreg; not.b dreg. */
24092242
RK
2150 if (USE_MOVQ (i ^ 0xff))
2151 return NOTB;
2152 /* Likewise, try with not.w */
2153 if (USE_MOVQ (i ^ 0xffff))
2154 return NOTW;
2155 /* This is the only value where neg.w is useful */
2156 if (i == -65408)
2157 return NEGW;
24092242 2158 }
28bad6d1 2159
5e04daf3
PB
2160 /* Try also with swap. */
2161 u = i;
2162 if (USE_MOVQ ((u >> 16) | (u << 16)))
2163 return SWAP;
2164
986e74d5 2165 if (TARGET_ISAB)
28bad6d1 2166 {
72edf146 2167 /* Try using MVZ/MVS with an immediate value to load constants. */
28bad6d1
PB
2168 if (i >= 0 && i <= 65535)
2169 return MVZ;
2170 if (i >= -32768 && i <= 32767)
2171 return MVS;
2172 }
2173
0ce6f9fb
RK
2174 /* Otherwise, use move.l */
2175 return MOVL;
2176}
2177
bda2a571
RS
2178/* Return the cost of moving constant I into a data register. */
2179
3c50106f 2180static int
bda2a571 2181const_int_cost (HOST_WIDE_INT i)
0ce6f9fb 2182{
c47b0cb4 2183 switch (m68k_const_method (i))
0ce6f9fb 2184 {
a0a7fbc9
AS
2185 case MOVQ:
2186 /* Constants between -128 and 127 are cheap due to moveq. */
2187 return 0;
2188 case MVZ:
2189 case MVS:
2190 case NOTB:
2191 case NOTW:
2192 case NEGW:
2193 case SWAP:
2194 /* Constants easily generated by moveq + not.b/not.w/neg.w/swap. */
2195 return 1;
2196 case MOVL:
2197 return 2;
2198 default:
2199 gcc_unreachable ();
0ce6f9fb
RK
2200 }
2201}
2202
3c50106f 2203static bool
8a4a2253 2204m68k_rtx_costs (rtx x, int code, int outer_code, int *total)
3c50106f
RH
2205{
2206 switch (code)
2207 {
2208 case CONST_INT:
2209 /* Constant zero is super cheap due to clr instruction. */
2210 if (x == const0_rtx)
2211 *total = 0;
2212 else
bda2a571 2213 *total = const_int_cost (INTVAL (x));
3c50106f
RH
2214 return true;
2215
2216 case CONST:
2217 case LABEL_REF:
2218 case SYMBOL_REF:
2219 *total = 3;
2220 return true;
2221
2222 case CONST_DOUBLE:
2223 /* Make 0.0 cheaper than other floating constants to
2224 encourage creating tstsf and tstdf insns. */
2225 if (outer_code == COMPARE
2226 && (x == CONST0_RTX (SFmode) || x == CONST0_RTX (DFmode)))
2227 *total = 4;
2228 else
2229 *total = 5;
2230 return true;
2231
2232 /* These are vaguely right for a 68020. */
2233 /* The costs for long multiply have been adjusted to work properly
2234 in synth_mult on the 68020, relative to an average of the time
2235 for add and the time for shift, taking away a little more because
2236 sometimes move insns are needed. */
a0a7fbc9
AS
2237 /* div?.w is relatively cheaper on 68000 counted in COSTS_N_INSNS
2238 terms. */
fe95f2f7
JB
2239#define MULL_COST \
2240 (TUNE_68060 ? 2 \
2241 : TUNE_68040 ? 5 \
03b3e271
KH
2242 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2243 : (TUNE_CFV2 && TUNE_MAC) ? 4 \
2244 : TUNE_CFV2 ? 8 \
fe95f2f7
JB
2245 : TARGET_COLDFIRE ? 3 : 13)
2246
2247#define MULW_COST \
2248 (TUNE_68060 ? 2 \
2249 : TUNE_68040 ? 3 \
03b3e271
KH
2250 : TUNE_68000_10 ? 5 \
2251 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2252 : (TUNE_CFV2 && TUNE_MAC) ? 2 \
2253 : TUNE_CFV2 ? 8 \
fe95f2f7
JB
2254 : TARGET_COLDFIRE ? 2 : 8)
2255
2256#define DIVW_COST \
2257 (TARGET_CF_HWDIV ? 11 \
2258 : TUNE_68000_10 || TARGET_COLDFIRE ? 12 : 27)
3c50106f
RH
2259
2260 case PLUS:
2261 /* An lea costs about three times as much as a simple add. */
2262 if (GET_MODE (x) == SImode
2263 && GET_CODE (XEXP (x, 1)) == REG
2264 && GET_CODE (XEXP (x, 0)) == MULT
2265 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
2266 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2267 && (INTVAL (XEXP (XEXP (x, 0), 1)) == 2
2268 || INTVAL (XEXP (XEXP (x, 0), 1)) == 4
2269 || INTVAL (XEXP (XEXP (x, 0), 1)) == 8))
eb849993
BI
2270 {
2271 /* lea an@(dx:l:i),am */
2272 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 2 : 3);
2273 return true;
2274 }
3c50106f
RH
2275 return false;
2276
2277 case ASHIFT:
2278 case ASHIFTRT:
2279 case LSHIFTRT:
fe95f2f7 2280 if (TUNE_68060)
3c50106f
RH
2281 {
2282 *total = COSTS_N_INSNS(1);
2283 return true;
2284 }
fe95f2f7 2285 if (TUNE_68000_10)
3c50106f
RH
2286 {
2287 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
2288 {
2289 if (INTVAL (XEXP (x, 1)) < 16)
2290 *total = COSTS_N_INSNS (2) + INTVAL (XEXP (x, 1)) / 2;
2291 else
2292 /* We're using clrw + swap for these cases. */
2293 *total = COSTS_N_INSNS (4) + (INTVAL (XEXP (x, 1)) - 16) / 2;
2294 }
2295 else
a0a7fbc9 2296 *total = COSTS_N_INSNS (10); /* Worst case. */
3c50106f
RH
2297 return true;
2298 }
2299 /* A shift by a big integer takes an extra instruction. */
2300 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2301 && (INTVAL (XEXP (x, 1)) == 16))
2302 {
2303 *total = COSTS_N_INSNS (2); /* clrw;swap */
2304 return true;
2305 }
2306 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2307 && !(INTVAL (XEXP (x, 1)) > 0
2308 && INTVAL (XEXP (x, 1)) <= 8))
2309 {
eb849993 2310 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 1 : 3); /* lsr #i,dn */
3c50106f
RH
2311 return true;
2312 }
2313 return false;
2314
2315 case MULT:
2316 if ((GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
2317 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
2318 && GET_MODE (x) == SImode)
2319 *total = COSTS_N_INSNS (MULW_COST);
2320 else if (GET_MODE (x) == QImode || GET_MODE (x) == HImode)
2321 *total = COSTS_N_INSNS (MULW_COST);
2322 else
2323 *total = COSTS_N_INSNS (MULL_COST);
2324 return true;
2325
2326 case DIV:
2327 case UDIV:
2328 case MOD:
2329 case UMOD:
2330 if (GET_MODE (x) == QImode || GET_MODE (x) == HImode)
2331 *total = COSTS_N_INSNS (DIVW_COST); /* div.w */
eb849993
BI
2332 else if (TARGET_CF_HWDIV)
2333 *total = COSTS_N_INSNS (18);
3c50106f
RH
2334 else
2335 *total = COSTS_N_INSNS (43); /* div.l */
2336 return true;
2337
2338 default:
2339 return false;
2340 }
2341}
2342
88512ba0 2343/* Return an instruction to move CONST_INT OPERANDS[1] into data register
bda2a571
RS
2344 OPERANDS[0]. */
2345
2346static const char *
8a4a2253 2347output_move_const_into_data_reg (rtx *operands)
0ce6f9fb 2348{
bda2a571 2349 HOST_WIDE_INT i;
0ce6f9fb
RK
2350
2351 i = INTVAL (operands[1]);
c47b0cb4 2352 switch (m68k_const_method (i))
0ce6f9fb 2353 {
28bad6d1 2354 case MVZ:
28bad6d1 2355 return "mvzw %1,%0";
1cbae84f
PB
2356 case MVS:
2357 return "mvsw %1,%0";
a0a7fbc9 2358 case MOVQ:
0ce6f9fb 2359 return "moveq %1,%0";
a0a7fbc9 2360 case NOTB:
66e07510 2361 CC_STATUS_INIT;
1d8eaa6b 2362 operands[1] = GEN_INT (i ^ 0xff);
0ce6f9fb 2363 return "moveq %1,%0\n\tnot%.b %0";
a0a7fbc9 2364 case NOTW:
66e07510 2365 CC_STATUS_INIT;
1d8eaa6b 2366 operands[1] = GEN_INT (i ^ 0xffff);
0ce6f9fb 2367 return "moveq %1,%0\n\tnot%.w %0";
a0a7fbc9 2368 case NEGW:
66e07510 2369 CC_STATUS_INIT;
3b4b85c9 2370 return "moveq #-128,%0\n\tneg%.w %0";
a0a7fbc9 2371 case SWAP:
0ce6f9fb
RK
2372 {
2373 unsigned u = i;
2374
1d8eaa6b 2375 operands[1] = GEN_INT ((u << 16) | (u >> 16));
0ce6f9fb 2376 return "moveq %1,%0\n\tswap %0";
0ce6f9fb 2377 }
a0a7fbc9 2378 case MOVL:
bda2a571 2379 return "move%.l %1,%0";
a0a7fbc9 2380 default:
bda2a571 2381 gcc_unreachable ();
0ce6f9fb
RK
2382 }
2383}
2384
bda2a571 2385/* Return true if I can be handled by ISA B's mov3q instruction. */
5e04daf3 2386
bda2a571
RS
2387bool
2388valid_mov3q_const (HOST_WIDE_INT i)
2389{
2390 return TARGET_ISAB && (i == -1 || IN_RANGE (i, 1, 7));
5e04daf3
PB
2391}
2392
bda2a571
RS
2393/* Return an instruction to move CONST_INT OPERANDS[1] into OPERANDS[0].
2394 I is the value of OPERANDS[1]. */
5e04daf3 2395
bda2a571 2396static const char *
8a4a2253 2397output_move_simode_const (rtx *operands)
02ed0c07 2398{
bda2a571
RS
2399 rtx dest;
2400 HOST_WIDE_INT src;
2401
2402 dest = operands[0];
2403 src = INTVAL (operands[1]);
2404 if (src == 0
2405 && (DATA_REG_P (dest) || MEM_P (dest))
3197c489
RS
2406 /* clr insns on 68000 read before writing. */
2407 && ((TARGET_68010 || TARGET_COLDFIRE)
bda2a571 2408 || !(MEM_P (dest) && MEM_VOLATILE_P (dest))))
02ed0c07 2409 return "clr%.l %0";
bda2a571 2410 else if (GET_MODE (dest) == SImode && valid_mov3q_const (src))
a0a7fbc9 2411 return "mov3q%.l %1,%0";
bda2a571 2412 else if (src == 0 && ADDRESS_REG_P (dest))
38198304 2413 return "sub%.l %0,%0";
bda2a571 2414 else if (DATA_REG_P (dest))
02ed0c07 2415 return output_move_const_into_data_reg (operands);
bda2a571 2416 else if (ADDRESS_REG_P (dest) && IN_RANGE (src, -0x8000, 0x7fff))
5e04daf3 2417 {
bda2a571 2418 if (valid_mov3q_const (src))
5e04daf3
PB
2419 return "mov3q%.l %1,%0";
2420 return "move%.w %1,%0";
2421 }
bda2a571
RS
2422 else if (MEM_P (dest)
2423 && GET_CODE (XEXP (dest, 0)) == PRE_DEC
2424 && REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
2425 && IN_RANGE (src, -0x8000, 0x7fff))
5e04daf3 2426 {
bda2a571 2427 if (valid_mov3q_const (src))
5e04daf3
PB
2428 return "mov3q%.l %1,%-";
2429 return "pea %a1";
2430 }
02ed0c07
RK
2431 return "move%.l %1,%0";
2432}
2433
5505f548 2434const char *
8a4a2253 2435output_move_simode (rtx *operands)
f4e80198
RK
2436{
2437 if (GET_CODE (operands[1]) == CONST_INT)
2438 return output_move_simode_const (operands);
2439 else if ((GET_CODE (operands[1]) == SYMBOL_REF
2440 || GET_CODE (operands[1]) == CONST)
2441 && push_operand (operands[0], SImode))
2442 return "pea %a1";
2443 else if ((GET_CODE (operands[1]) == SYMBOL_REF
2444 || GET_CODE (operands[1]) == CONST)
2445 && ADDRESS_REG_P (operands[0]))
2446 return "lea %a1,%0";
2447 return "move%.l %1,%0";
2448}
2449
5505f548 2450const char *
8a4a2253 2451output_move_himode (rtx *operands)
f4e80198
RK
2452{
2453 if (GET_CODE (operands[1]) == CONST_INT)
2454 {
2455 if (operands[1] == const0_rtx
2456 && (DATA_REG_P (operands[0])
2457 || GET_CODE (operands[0]) == MEM)
3197c489
RS
2458 /* clr insns on 68000 read before writing. */
2459 && ((TARGET_68010 || TARGET_COLDFIRE)
f4e80198
RK
2460 || !(GET_CODE (operands[0]) == MEM
2461 && MEM_VOLATILE_P (operands[0]))))
2462 return "clr%.w %0";
38198304
AS
2463 else if (operands[1] == const0_rtx
2464 && ADDRESS_REG_P (operands[0]))
2465 return "sub%.l %0,%0";
f4e80198
RK
2466 else if (DATA_REG_P (operands[0])
2467 && INTVAL (operands[1]) < 128
2468 && INTVAL (operands[1]) >= -128)
a0a7fbc9 2469 return "moveq %1,%0";
f4e80198
RK
2470 else if (INTVAL (operands[1]) < 0x8000
2471 && INTVAL (operands[1]) >= -0x8000)
2472 return "move%.w %1,%0";
2473 }
2474 else if (CONSTANT_P (operands[1]))
2475 return "move%.l %1,%0";
f4e80198
RK
2476 return "move%.w %1,%0";
2477}
2478
5505f548 2479const char *
8a4a2253 2480output_move_qimode (rtx *operands)
f4e80198 2481{
102701ff 2482 /* 68k family always modifies the stack pointer by at least 2, even for
c16eadc7 2483 byte pushes. The 5200 (ColdFire) does not do this. */
4761e388 2484
a0a7fbc9 2485 /* This case is generated by pushqi1 pattern now. */
4761e388
NS
2486 gcc_assert (!(GET_CODE (operands[0]) == MEM
2487 && GET_CODE (XEXP (operands[0], 0)) == PRE_DEC
2488 && XEXP (XEXP (operands[0], 0), 0) == stack_pointer_rtx
2489 && ! ADDRESS_REG_P (operands[1])
2490 && ! TARGET_COLDFIRE));
f4e80198 2491
3197c489 2492 /* clr and st insns on 68000 read before writing. */
f4e80198 2493 if (!ADDRESS_REG_P (operands[0])
3197c489 2494 && ((TARGET_68010 || TARGET_COLDFIRE)
f4e80198
RK
2495 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
2496 {
2497 if (operands[1] == const0_rtx)
2498 return "clr%.b %0";
9425fb04 2499 if ((!TARGET_COLDFIRE || DATA_REG_P (operands[0]))
f4e80198
RK
2500 && GET_CODE (operands[1]) == CONST_INT
2501 && (INTVAL (operands[1]) & 255) == 255)
2502 {
2503 CC_STATUS_INIT;
2504 return "st %0";
2505 }
2506 }
2507 if (GET_CODE (operands[1]) == CONST_INT
2508 && DATA_REG_P (operands[0])
2509 && INTVAL (operands[1]) < 128
2510 && INTVAL (operands[1]) >= -128)
a0a7fbc9 2511 return "moveq %1,%0";
38198304
AS
2512 if (operands[1] == const0_rtx && ADDRESS_REG_P (operands[0]))
2513 return "sub%.l %0,%0";
f4e80198
RK
2514 if (GET_CODE (operands[1]) != CONST_INT && CONSTANT_P (operands[1]))
2515 return "move%.l %1,%0";
c16eadc7 2516 /* 68k family (including the 5200 ColdFire) does not support byte moves to
37834fc8
JL
2517 from address registers. */
2518 if (ADDRESS_REG_P (operands[0]) || ADDRESS_REG_P (operands[1]))
f4e80198
RK
2519 return "move%.w %1,%0";
2520 return "move%.b %1,%0";
2521}
2522
5505f548 2523const char *
8a4a2253 2524output_move_stricthi (rtx *operands)
9b55bf04
RK
2525{
2526 if (operands[1] == const0_rtx
3197c489
RS
2527 /* clr insns on 68000 read before writing. */
2528 && ((TARGET_68010 || TARGET_COLDFIRE)
9b55bf04
RK
2529 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
2530 return "clr%.w %0";
2531 return "move%.w %1,%0";
2532}
2533
5505f548 2534const char *
8a4a2253 2535output_move_strictqi (rtx *operands)
9b55bf04
RK
2536{
2537 if (operands[1] == const0_rtx
3197c489
RS
2538 /* clr insns on 68000 read before writing. */
2539 && ((TARGET_68010 || TARGET_COLDFIRE)
9b55bf04
RK
2540 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
2541 return "clr%.b %0";
2542 return "move%.b %1,%0";
2543}
2544
79e68feb
RS
2545/* Return the best assembler insn template
2546 for moving operands[1] into operands[0] as a fullword. */
2547
5505f548 2548static const char *
8a4a2253 2549singlemove_string (rtx *operands)
79e68feb 2550{
02ed0c07
RK
2551 if (GET_CODE (operands[1]) == CONST_INT)
2552 return output_move_simode_const (operands);
2553 return "move%.l %1,%0";
79e68feb
RS
2554}
2555
2505bc97 2556
c47b0cb4
MK
2557/* Output assembler or rtl code to perform a doubleword move insn
2558 with operands OPERANDS.
2559 Pointers to 3 helper functions should be specified:
2560 HANDLE_REG_ADJUST to adjust a register by a small value,
2561 HANDLE_COMPADR to compute an address and
2562 HANDLE_MOVSI to move 4 bytes. */
79e68feb 2563
c47b0cb4
MK
2564static void
2565handle_move_double (rtx operands[2],
2566 void (*handle_reg_adjust) (rtx, int),
2567 void (*handle_compadr) (rtx [2]),
2568 void (*handle_movsi) (rtx [2]))
79e68feb 2569{
2505bc97
RS
2570 enum
2571 {
2572 REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP
2573 } optype0, optype1;
79e68feb 2574 rtx latehalf[2];
2505bc97 2575 rtx middlehalf[2];
7f98eeb6 2576 rtx xops[2];
79e68feb 2577 rtx addreg0 = 0, addreg1 = 0;
7f98eeb6 2578 int dest_overlapped_low = 0;
184916bc 2579 int size = GET_MODE_SIZE (GET_MODE (operands[0]));
2505bc97
RS
2580
2581 middlehalf[0] = 0;
2582 middlehalf[1] = 0;
79e68feb
RS
2583
2584 /* First classify both operands. */
2585
2586 if (REG_P (operands[0]))
2587 optype0 = REGOP;
2588 else if (offsettable_memref_p (operands[0]))
2589 optype0 = OFFSOP;
2590 else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
2591 optype0 = POPOP;
2592 else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
2593 optype0 = PUSHOP;
2594 else if (GET_CODE (operands[0]) == MEM)
2595 optype0 = MEMOP;
2596 else
2597 optype0 = RNDOP;
2598
2599 if (REG_P (operands[1]))
2600 optype1 = REGOP;
2601 else if (CONSTANT_P (operands[1]))
2602 optype1 = CNSTOP;
2603 else if (offsettable_memref_p (operands[1]))
2604 optype1 = OFFSOP;
2605 else if (GET_CODE (XEXP (operands[1], 0)) == POST_INC)
2606 optype1 = POPOP;
2607 else if (GET_CODE (XEXP (operands[1], 0)) == PRE_DEC)
2608 optype1 = PUSHOP;
2609 else if (GET_CODE (operands[1]) == MEM)
2610 optype1 = MEMOP;
2611 else
2612 optype1 = RNDOP;
2613
4761e388
NS
2614 /* Check for the cases that the operand constraints are not supposed
2615 to allow to happen. Generating code for these cases is
2616 painful. */
2617 gcc_assert (optype0 != RNDOP && optype1 != RNDOP);
79e68feb
RS
2618
2619 /* If one operand is decrementing and one is incrementing
2620 decrement the former register explicitly
2621 and change that operand into ordinary indexing. */
2622
2623 if (optype0 == PUSHOP && optype1 == POPOP)
2624 {
2625 operands[0] = XEXP (XEXP (operands[0], 0), 0);
c47b0cb4
MK
2626
2627 handle_reg_adjust (operands[0], -size);
2628
2505bc97 2629 if (GET_MODE (operands[1]) == XFmode)
1d8eaa6b 2630 operands[0] = gen_rtx_MEM (XFmode, operands[0]);
2505bc97 2631 else if (GET_MODE (operands[0]) == DFmode)
1d8eaa6b 2632 operands[0] = gen_rtx_MEM (DFmode, operands[0]);
2505bc97 2633 else
1d8eaa6b 2634 operands[0] = gen_rtx_MEM (DImode, operands[0]);
79e68feb
RS
2635 optype0 = OFFSOP;
2636 }
2637 if (optype0 == POPOP && optype1 == PUSHOP)
2638 {
2639 operands[1] = XEXP (XEXP (operands[1], 0), 0);
c47b0cb4
MK
2640
2641 handle_reg_adjust (operands[1], -size);
2642
2505bc97 2643 if (GET_MODE (operands[1]) == XFmode)
1d8eaa6b 2644 operands[1] = gen_rtx_MEM (XFmode, operands[1]);
2505bc97 2645 else if (GET_MODE (operands[1]) == DFmode)
1d8eaa6b 2646 operands[1] = gen_rtx_MEM (DFmode, operands[1]);
2505bc97 2647 else
1d8eaa6b 2648 operands[1] = gen_rtx_MEM (DImode, operands[1]);
79e68feb
RS
2649 optype1 = OFFSOP;
2650 }
2651
2652 /* If an operand is an unoffsettable memory ref, find a register
2653 we can increment temporarily to make it refer to the second word. */
2654
2655 if (optype0 == MEMOP)
2656 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2657
2658 if (optype1 == MEMOP)
2659 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2660
2661 /* Ok, we can do one word at a time.
2662 Normally we do the low-numbered word first,
2663 but if either operand is autodecrementing then we
2664 do the high-numbered word first.
2665
2666 In either case, set up in LATEHALF the operands to use
2667 for the high-numbered word and in some cases alter the
2668 operands in OPERANDS to be suitable for the low-numbered word. */
2669
2505bc97
RS
2670 if (size == 12)
2671 {
2672 if (optype0 == REGOP)
2673 {
1d8eaa6b
AS
2674 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 2);
2675 middlehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2505bc97
RS
2676 }
2677 else if (optype0 == OFFSOP)
2678 {
b72f00af
RK
2679 middlehalf[0] = adjust_address (operands[0], SImode, 4);
2680 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
2505bc97
RS
2681 }
2682 else
2683 {
c47b0cb4
MK
2684 middlehalf[0] = adjust_address (operands[0], SImode, 0);
2685 latehalf[0] = adjust_address (operands[0], SImode, 0);
2505bc97
RS
2686 }
2687
2688 if (optype1 == REGOP)
2689 {
1d8eaa6b
AS
2690 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2);
2691 middlehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2505bc97
RS
2692 }
2693 else if (optype1 == OFFSOP)
2694 {
b72f00af
RK
2695 middlehalf[1] = adjust_address (operands[1], SImode, 4);
2696 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
2505bc97
RS
2697 }
2698 else if (optype1 == CNSTOP)
2699 {
2700 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2701 {
2702 REAL_VALUE_TYPE r;
2703 long l[3];
2704
2705 REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
2706 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
2707 operands[1] = GEN_INT (l[0]);
2708 middlehalf[1] = GEN_INT (l[1]);
2709 latehalf[1] = GEN_INT (l[2]);
2710 }
4761e388 2711 else
2505bc97 2712 {
4761e388
NS
2713 /* No non-CONST_DOUBLE constant should ever appear
2714 here. */
2715 gcc_assert (!CONSTANT_P (operands[1]));
2505bc97
RS
2716 }
2717 }
2718 else
2719 {
c47b0cb4
MK
2720 middlehalf[1] = adjust_address (operands[1], SImode, 0);
2721 latehalf[1] = adjust_address (operands[1], SImode, 0);
2505bc97
RS
2722 }
2723 }
79e68feb 2724 else
2505bc97
RS
2725 /* size is not 12: */
2726 {
2727 if (optype0 == REGOP)
1d8eaa6b 2728 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2505bc97 2729 else if (optype0 == OFFSOP)
b72f00af 2730 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
2505bc97 2731 else
c47b0cb4 2732 latehalf[0] = adjust_address (operands[0], SImode, 0);
2505bc97
RS
2733
2734 if (optype1 == REGOP)
1d8eaa6b 2735 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2505bc97 2736 else if (optype1 == OFFSOP)
b72f00af 2737 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
2505bc97
RS
2738 else if (optype1 == CNSTOP)
2739 split_double (operands[1], &operands[1], &latehalf[1]);
2740 else
c47b0cb4 2741 latehalf[1] = adjust_address (operands[1], SImode, 0);
2505bc97 2742 }
79e68feb
RS
2743
2744 /* If insn is effectively movd N(sp),-(sp) then we will do the
2745 high word first. We should use the adjusted operand 1 (which is N+4(sp))
2746 for the low word as well, to compensate for the first decrement of sp. */
2747 if (optype0 == PUSHOP
2748 && REGNO (XEXP (XEXP (operands[0], 0), 0)) == STACK_POINTER_REGNUM
2749 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
c88aeaf8 2750 operands[1] = middlehalf[1] = latehalf[1];
79e68feb 2751
7f98eeb6
RS
2752 /* For (set (reg:DI N) (mem:DI ... (reg:SI N) ...)),
2753 if the upper part of reg N does not appear in the MEM, arrange to
2754 emit the move late-half first. Otherwise, compute the MEM address
2755 into the upper part of N and use that as a pointer to the memory
2756 operand. */
2757 if (optype0 == REGOP
2758 && (optype1 == OFFSOP || optype1 == MEMOP))
2759 {
1d8eaa6b 2760 rtx testlow = gen_rtx_REG (SImode, REGNO (operands[0]));
3a58400f
RS
2761
2762 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
d7e8d581 2763 && reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
7f98eeb6
RS
2764 {
2765 /* If both halves of dest are used in the src memory address,
3a58400f
RS
2766 compute the address into latehalf of dest.
2767 Note that this can't happen if the dest is two data regs. */
4761e388 2768 compadr:
7f98eeb6
RS
2769 xops[0] = latehalf[0];
2770 xops[1] = XEXP (operands[1], 0);
c47b0cb4
MK
2771
2772 handle_compadr (xops);
2773 if (GET_MODE (operands[1]) == XFmode)
7f98eeb6 2774 {
1d8eaa6b 2775 operands[1] = gen_rtx_MEM (XFmode, latehalf[0]);
b72f00af
RK
2776 middlehalf[1] = adjust_address (operands[1], DImode, size - 8);
2777 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
7f98eeb6
RS
2778 }
2779 else
2780 {
1d8eaa6b 2781 operands[1] = gen_rtx_MEM (DImode, latehalf[0]);
b72f00af 2782 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
7f98eeb6
RS
2783 }
2784 }
2785 else if (size == 12
d7e8d581
RS
2786 && reg_overlap_mentioned_p (middlehalf[0],
2787 XEXP (operands[1], 0)))
7f98eeb6 2788 {
3a58400f
RS
2789 /* Check for two regs used by both source and dest.
2790 Note that this can't happen if the dest is all data regs.
2791 It can happen if the dest is d6, d7, a0.
2792 But in that case, latehalf is an addr reg, so
2793 the code at compadr does ok. */
2794
2795 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
d7e8d581
RS
2796 || reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
2797 goto compadr;
7f98eeb6
RS
2798
2799 /* JRV says this can't happen: */
4761e388 2800 gcc_assert (!addreg0 && !addreg1);
7f98eeb6 2801
7a1929e1 2802 /* Only the middle reg conflicts; simply put it last. */
c47b0cb4
MK
2803 handle_movsi (operands);
2804 handle_movsi (latehalf);
2805 handle_movsi (middlehalf);
2806
2807 return;
7f98eeb6 2808 }
2fb8a81d 2809 else if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0)))
7f98eeb6
RS
2810 /* If the low half of dest is mentioned in the source memory
2811 address, the arrange to emit the move late half first. */
2812 dest_overlapped_low = 1;
2813 }
2814
79e68feb
RS
2815 /* If one or both operands autodecrementing,
2816 do the two words, high-numbered first. */
2817
2818 /* Likewise, the first move would clobber the source of the second one,
2819 do them in the other order. This happens only for registers;
2820 such overlap can't happen in memory unless the user explicitly
2821 sets it up, and that is an undefined circumstance. */
2822
2823 if (optype0 == PUSHOP || optype1 == PUSHOP
2824 || (optype0 == REGOP && optype1 == REGOP
2505bc97 2825 && ((middlehalf[1] && REGNO (operands[0]) == REGNO (middlehalf[1]))
7f98eeb6
RS
2826 || REGNO (operands[0]) == REGNO (latehalf[1])))
2827 || dest_overlapped_low)
79e68feb
RS
2828 {
2829 /* Make any unoffsettable addresses point at high-numbered word. */
2830 if (addreg0)
c47b0cb4 2831 handle_reg_adjust (addreg0, size - 4);
79e68feb 2832 if (addreg1)
c47b0cb4 2833 handle_reg_adjust (addreg1, size - 4);
79e68feb
RS
2834
2835 /* Do that word. */
c47b0cb4 2836 handle_movsi (latehalf);
79e68feb
RS
2837
2838 /* Undo the adds we just did. */
2839 if (addreg0)
c47b0cb4 2840 handle_reg_adjust (addreg0, -4);
79e68feb 2841 if (addreg1)
c47b0cb4 2842 handle_reg_adjust (addreg1, -4);
79e68feb 2843
2505bc97
RS
2844 if (size == 12)
2845 {
c47b0cb4
MK
2846 handle_movsi (middlehalf);
2847
2505bc97 2848 if (addreg0)
c47b0cb4 2849 handle_reg_adjust (addreg0, -4);
2505bc97 2850 if (addreg1)
c47b0cb4 2851 handle_reg_adjust (addreg1, -4);
2505bc97
RS
2852 }
2853
79e68feb 2854 /* Do low-numbered word. */
c47b0cb4
MK
2855
2856 handle_movsi (operands);
2857 return;
79e68feb
RS
2858 }
2859
2860 /* Normal case: do the two words, low-numbered first. */
2861
c47b0cb4 2862 handle_movsi (operands);
79e68feb 2863
2505bc97
RS
2864 /* Do the middle one of the three words for long double */
2865 if (size == 12)
2866 {
2867 if (addreg0)
c47b0cb4 2868 handle_reg_adjust (addreg0, 4);
2505bc97 2869 if (addreg1)
c47b0cb4 2870 handle_reg_adjust (addreg1, 4);
2505bc97 2871
c47b0cb4 2872 handle_movsi (middlehalf);
2505bc97
RS
2873 }
2874
79e68feb
RS
2875 /* Make any unoffsettable addresses point at high-numbered word. */
2876 if (addreg0)
c47b0cb4 2877 handle_reg_adjust (addreg0, 4);
79e68feb 2878 if (addreg1)
c47b0cb4 2879 handle_reg_adjust (addreg1, 4);
79e68feb
RS
2880
2881 /* Do that word. */
c47b0cb4 2882 handle_movsi (latehalf);
79e68feb
RS
2883
2884 /* Undo the adds we just did. */
2885 if (addreg0)
c47b0cb4
MK
2886 handle_reg_adjust (addreg0, -(size - 4));
2887 if (addreg1)
2888 handle_reg_adjust (addreg1, -(size - 4));
2889
2890 return;
2891}
2892
2893/* Output assembler code to adjust REG by N. */
2894static void
2895output_reg_adjust (rtx reg, int n)
2896{
2897 const char *s;
2898
2899 gcc_assert (GET_MODE (reg) == SImode
2900 && -12 <= n && n != 0 && n <= 12);
2901
2902 switch (n)
2505bc97 2903 {
c47b0cb4
MK
2904 case 12:
2905 s = "add%.l #12,%0";
2906 break;
2907
2908 case 8:
2909 s = "addq%.l #8,%0";
2910 break;
2911
2912 case 4:
2913 s = "addq%.l #4,%0";
2914 break;
2915
2916 case -12:
2917 s = "sub%.l #12,%0";
2918 break;
2919
2920 case -8:
2921 s = "subq%.l #8,%0";
2922 break;
2923
2924 case -4:
2925 s = "subq%.l #4,%0";
2926 break;
2927
2928 default:
2929 gcc_unreachable ();
2930 s = NULL;
2505bc97 2931 }
c47b0cb4
MK
2932
2933 output_asm_insn (s, &reg);
2934}
2935
2936/* Emit rtl code to adjust REG by N. */
2937static void
2938emit_reg_adjust (rtx reg1, int n)
2939{
2940 rtx reg2;
2941
2942 gcc_assert (GET_MODE (reg1) == SImode
2943 && -12 <= n && n != 0 && n <= 12);
2944
2945 reg1 = copy_rtx (reg1);
2946 reg2 = copy_rtx (reg1);
2947
2948 if (n < 0)
2949 emit_insn (gen_subsi3 (reg1, reg2, GEN_INT (-n)));
2950 else if (n > 0)
2951 emit_insn (gen_addsi3 (reg1, reg2, GEN_INT (n)));
2952 else
2953 gcc_unreachable ();
2954}
2955
2956/* Output assembler to load address OPERANDS[0] to register OPERANDS[1]. */
2957static void
2958output_compadr (rtx operands[2])
2959{
2960 output_asm_insn ("lea %a1,%0", operands);
2961}
2962
2963/* Output the best assembler insn for moving operands[1] into operands[0]
2964 as a fullword. */
2965static void
2966output_movsi (rtx operands[2])
2967{
2968 output_asm_insn (singlemove_string (operands), operands);
2969}
2970
2971/* Copy OP and change its mode to MODE. */
2972static rtx
2973copy_operand (rtx op, enum machine_mode mode)
2974{
2975 /* ??? This looks really ugly. There must be a better way
2976 to change a mode on the operand. */
2977 if (GET_MODE (op) != VOIDmode)
2505bc97 2978 {
c47b0cb4
MK
2979 if (REG_P (op))
2980 op = gen_rtx_REG (mode, REGNO (op));
2505bc97 2981 else
c47b0cb4
MK
2982 {
2983 op = copy_rtx (op);
2984 PUT_MODE (op, mode);
2985 }
2505bc97 2986 }
79e68feb 2987
c47b0cb4
MK
2988 return op;
2989}
2990
2991/* Emit rtl code for moving operands[1] into operands[0] as a fullword. */
2992static void
2993emit_movsi (rtx operands[2])
2994{
2995 operands[0] = copy_operand (operands[0], SImode);
2996 operands[1] = copy_operand (operands[1], SImode);
2997
2998 emit_insn (gen_movsi (operands[0], operands[1]));
2999}
3000
3001/* Output assembler code to perform a doubleword move insn
3002 with operands OPERANDS. */
3003const char *
3004output_move_double (rtx *operands)
3005{
3006 handle_move_double (operands,
3007 output_reg_adjust, output_compadr, output_movsi);
3008
79e68feb
RS
3009 return "";
3010}
3011
c47b0cb4
MK
3012/* Output rtl code to perform a doubleword move insn
3013 with operands OPERANDS. */
3014void
3015m68k_emit_move_double (rtx operands[2])
3016{
3017 handle_move_double (operands, emit_reg_adjust, emit_movsi, emit_movsi);
3018}
dcc21c4c
PB
3019
3020/* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
3021 new rtx with the correct mode. */
3022
3023static rtx
3024force_mode (enum machine_mode mode, rtx orig)
3025{
3026 if (mode == GET_MODE (orig))
3027 return orig;
3028
3029 if (REGNO (orig) >= FIRST_PSEUDO_REGISTER)
3030 abort ();
3031
3032 return gen_rtx_REG (mode, REGNO (orig));
3033}
3034
3035static int
3036fp_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3037{
3038 return reg_renumber && FP_REG_P (op);
3039}
3040
3041/* Emit insns to move operands[1] into operands[0].
3042
3043 Return 1 if we have written out everything that needs to be done to
3044 do the move. Otherwise, return 0 and the caller will emit the move
3045 normally.
3046
3047 Note SCRATCH_REG may not be in the proper mode depending on how it
c0220ea4 3048 will be used. This routine is responsible for creating a new copy
dcc21c4c
PB
3049 of SCRATCH_REG in the proper mode. */
3050
3051int
3052emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
3053{
3054 register rtx operand0 = operands[0];
3055 register rtx operand1 = operands[1];
3056 register rtx tem;
3057
3058 if (scratch_reg
3059 && reload_in_progress && GET_CODE (operand0) == REG
3060 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
3061 operand0 = reg_equiv_mem[REGNO (operand0)];
3062 else if (scratch_reg
3063 && reload_in_progress && GET_CODE (operand0) == SUBREG
3064 && GET_CODE (SUBREG_REG (operand0)) == REG
3065 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
3066 {
3067 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3068 the code which tracks sets/uses for delete_output_reload. */
3069 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
3070 reg_equiv_mem [REGNO (SUBREG_REG (operand0))],
3071 SUBREG_BYTE (operand0));
3072 operand0 = alter_subreg (&temp);
3073 }
3074
3075 if (scratch_reg
3076 && reload_in_progress && GET_CODE (operand1) == REG
3077 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
3078 operand1 = reg_equiv_mem[REGNO (operand1)];
3079 else if (scratch_reg
3080 && reload_in_progress && GET_CODE (operand1) == SUBREG
3081 && GET_CODE (SUBREG_REG (operand1)) == REG
3082 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
3083 {
3084 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3085 the code which tracks sets/uses for delete_output_reload. */
3086 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
3087 reg_equiv_mem [REGNO (SUBREG_REG (operand1))],
3088 SUBREG_BYTE (operand1));
3089 operand1 = alter_subreg (&temp);
3090 }
3091
3092 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
3093 && ((tem = find_replacement (&XEXP (operand0, 0)))
3094 != XEXP (operand0, 0)))
3095 operand0 = gen_rtx_MEM (GET_MODE (operand0), tem);
3096 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
3097 && ((tem = find_replacement (&XEXP (operand1, 0)))
3098 != XEXP (operand1, 0)))
3099 operand1 = gen_rtx_MEM (GET_MODE (operand1), tem);
3100
3101 /* Handle secondary reloads for loads/stores of FP registers where
3102 the address is symbolic by using the scratch register */
3103 if (fp_reg_operand (operand0, mode)
3104 && ((GET_CODE (operand1) == MEM
3105 && ! memory_address_p (DFmode, XEXP (operand1, 0)))
3106 || ((GET_CODE (operand1) == SUBREG
3107 && GET_CODE (XEXP (operand1, 0)) == MEM
3108 && !memory_address_p (DFmode, XEXP (XEXP (operand1, 0), 0)))))
3109 && scratch_reg)
3110 {
3111 if (GET_CODE (operand1) == SUBREG)
3112 operand1 = XEXP (operand1, 0);
3113
3114 /* SCRATCH_REG will hold an address. We want
3115 it in SImode regardless of what mode it was originally given
3116 to us. */
3117 scratch_reg = force_mode (SImode, scratch_reg);
3118
3119 /* D might not fit in 14 bits either; for such cases load D into
3120 scratch reg. */
3121 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
3122 {
3123 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
3124 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
3125 Pmode,
3126 XEXP (XEXP (operand1, 0), 0),
3127 scratch_reg));
3128 }
3129 else
3130 emit_move_insn (scratch_reg, XEXP (operand1, 0));
3131 emit_insn (gen_rtx_SET (VOIDmode, operand0,
3132 gen_rtx_MEM (mode, scratch_reg)));
3133 return 1;
3134 }
3135 else if (fp_reg_operand (operand1, mode)
3136 && ((GET_CODE (operand0) == MEM
3137 && ! memory_address_p (DFmode, XEXP (operand0, 0)))
3138 || ((GET_CODE (operand0) == SUBREG)
3139 && GET_CODE (XEXP (operand0, 0)) == MEM
3140 && !memory_address_p (DFmode, XEXP (XEXP (operand0, 0), 0))))
3141 && scratch_reg)
3142 {
3143 if (GET_CODE (operand0) == SUBREG)
3144 operand0 = XEXP (operand0, 0);
3145
3146 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3147 it in SIMODE regardless of what mode it was originally given
3148 to us. */
3149 scratch_reg = force_mode (SImode, scratch_reg);
3150
3151 /* D might not fit in 14 bits either; for such cases load D into
3152 scratch reg. */
3153 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
3154 {
3155 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
3156 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
3157 0)),
3158 Pmode,
3159 XEXP (XEXP (operand0, 0),
3160 0),
3161 scratch_reg));
3162 }
3163 else
3164 emit_move_insn (scratch_reg, XEXP (operand0, 0));
3165 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (mode, scratch_reg),
3166 operand1));
3167 return 1;
3168 }
3169 /* Handle secondary reloads for loads of FP registers from constant
3170 expressions by forcing the constant into memory.
3171
3172 use scratch_reg to hold the address of the memory location.
3173
3174 The proper fix is to change PREFERRED_RELOAD_CLASS to return
3175 NO_REGS when presented with a const_int and an register class
3176 containing only FP registers. Doing so unfortunately creates
3177 more problems than it solves. Fix this for 2.5. */
3178 else if (fp_reg_operand (operand0, mode)
3179 && CONSTANT_P (operand1)
3180 && scratch_reg)
3181 {
3182 rtx xoperands[2];
3183
3184 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3185 it in SIMODE regardless of what mode it was originally given
3186 to us. */
3187 scratch_reg = force_mode (SImode, scratch_reg);
3188
3189 /* Force the constant into memory and put the address of the
3190 memory location into scratch_reg. */
3191 xoperands[0] = scratch_reg;
3192 xoperands[1] = XEXP (force_const_mem (mode, operand1), 0);
3193 emit_insn (gen_rtx_SET (mode, scratch_reg, xoperands[1]));
3194
3195 /* Now load the destination register. */
3196 emit_insn (gen_rtx_SET (mode, operand0,
3197 gen_rtx_MEM (mode, scratch_reg)));
3198 return 1;
3199 }
3200
3201 /* Now have insn-emit do whatever it normally does. */
3202 return 0;
3203}
3204
01e304f8
RZ
3205/* Split one or more DImode RTL references into pairs of SImode
3206 references. The RTL can be REG, offsettable MEM, integer constant, or
3207 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
3208 split and "num" is its length. lo_half and hi_half are output arrays
3209 that parallel "operands". */
3210
3211void
3212split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
3213{
3214 while (num--)
3215 {
3216 rtx op = operands[num];
3217
3218 /* simplify_subreg refuses to split volatile memory addresses,
3219 but we still have to handle it. */
3220 if (GET_CODE (op) == MEM)
3221 {
3222 lo_half[num] = adjust_address (op, SImode, 4);
3223 hi_half[num] = adjust_address (op, SImode, 0);
3224 }
3225 else
3226 {
3227 lo_half[num] = simplify_gen_subreg (SImode, op,
3228 GET_MODE (op) == VOIDmode
3229 ? DImode : GET_MODE (op), 4);
3230 hi_half[num] = simplify_gen_subreg (SImode, op,
3231 GET_MODE (op) == VOIDmode
3232 ? DImode : GET_MODE (op), 0);
3233 }
3234 }
3235}
3236
a40ed0f3
KH
3237/* Split X into a base and a constant offset, storing them in *BASE
3238 and *OFFSET respectively. */
3239
3240static void
3241m68k_split_offset (rtx x, rtx *base, HOST_WIDE_INT *offset)
3242{
3243 *offset = 0;
3244 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
3245 {
3246 *offset += INTVAL (XEXP (x, 1));
3247 x = XEXP (x, 0);
3248 }
3249 *base = x;
3250}
3251
3252/* Return true if PATTERN is a PARALLEL suitable for a movem or fmovem
3253 instruction. STORE_P says whether the move is a load or store.
3254
3255 If the instruction uses post-increment or pre-decrement addressing,
3256 AUTOMOD_BASE is the base register and AUTOMOD_OFFSET is the total
3257 adjustment. This adjustment will be made by the first element of
3258 PARALLEL, with the loads or stores starting at element 1. If the
3259 instruction does not use post-increment or pre-decrement addressing,
3260 AUTOMOD_BASE is null, AUTOMOD_OFFSET is 0, and the loads or stores
3261 start at element 0. */
3262
3263bool
3264m68k_movem_pattern_p (rtx pattern, rtx automod_base,
3265 HOST_WIDE_INT automod_offset, bool store_p)
3266{
3267 rtx base, mem_base, set, mem, reg, last_reg;
3268 HOST_WIDE_INT offset, mem_offset;
3269 int i, first, len;
3270 enum reg_class rclass;
3271
3272 len = XVECLEN (pattern, 0);
3273 first = (automod_base != NULL);
3274
3275 if (automod_base)
3276 {
3277 /* Stores must be pre-decrement and loads must be post-increment. */
3278 if (store_p != (automod_offset < 0))
3279 return false;
3280
3281 /* Work out the base and offset for lowest memory location. */
3282 base = automod_base;
3283 offset = (automod_offset < 0 ? automod_offset : 0);
3284 }
3285 else
3286 {
3287 /* Allow any valid base and offset in the first access. */
3288 base = NULL;
3289 offset = 0;
3290 }
3291
3292 last_reg = NULL;
3293 rclass = NO_REGS;
3294 for (i = first; i < len; i++)
3295 {
3296 /* We need a plain SET. */
3297 set = XVECEXP (pattern, 0, i);
3298 if (GET_CODE (set) != SET)
3299 return false;
3300
3301 /* Check that we have a memory location... */
3302 mem = XEXP (set, !store_p);
3303 if (!MEM_P (mem) || !memory_operand (mem, VOIDmode))
3304 return false;
3305
3306 /* ...with the right address. */
3307 if (base == NULL)
3308 {
3309 m68k_split_offset (XEXP (mem, 0), &base, &offset);
3310 /* The ColdFire instruction only allows (An) and (d16,An) modes.
3311 There are no mode restrictions for 680x0 besides the
3312 automodification rules enforced above. */
3313 if (TARGET_COLDFIRE
3314 && !m68k_legitimate_base_reg_p (base, reload_completed))
3315 return false;
3316 }
3317 else
3318 {
3319 m68k_split_offset (XEXP (mem, 0), &mem_base, &mem_offset);
3320 if (!rtx_equal_p (base, mem_base) || offset != mem_offset)
3321 return false;
3322 }
3323
3324 /* Check that we have a register of the required mode and class. */
3325 reg = XEXP (set, store_p);
3326 if (!REG_P (reg)
3327 || !HARD_REGISTER_P (reg)
3328 || GET_MODE (reg) != reg_raw_mode[REGNO (reg)])
3329 return false;
3330
3331 if (last_reg)
3332 {
3333 /* The register must belong to RCLASS and have a higher number
3334 than the register in the previous SET. */
3335 if (!TEST_HARD_REG_BIT (reg_class_contents[rclass], REGNO (reg))
3336 || REGNO (last_reg) >= REGNO (reg))
3337 return false;
3338 }
3339 else
3340 {
3341 /* Work out which register class we need. */
3342 if (INT_REGNO_P (REGNO (reg)))
3343 rclass = GENERAL_REGS;
3344 else if (FP_REGNO_P (REGNO (reg)))
3345 rclass = FP_REGS;
3346 else
3347 return false;
3348 }
3349
3350 last_reg = reg;
3351 offset += GET_MODE_SIZE (GET_MODE (reg));
3352 }
3353
3354 /* If we have an automodification, check whether the final offset is OK. */
3355 if (automod_base && offset != (automod_offset < 0 ? 0 : automod_offset))
3356 return false;
3357
3358 /* Reject unprofitable cases. */
3359 if (len < first + (rclass == FP_REGS ? MIN_FMOVEM_REGS : MIN_MOVEM_REGS))
3360 return false;
3361
3362 return true;
3363}
3364
3365/* Return the assembly code template for a movem or fmovem instruction
3366 whose pattern is given by PATTERN. Store the template's operands
3367 in OPERANDS.
3368
3369 If the instruction uses post-increment or pre-decrement addressing,
3370 AUTOMOD_OFFSET is the total adjustment, otherwise it is 0. STORE_P
3371 is true if this is a store instruction. */
3372
3373const char *
3374m68k_output_movem (rtx *operands, rtx pattern,
3375 HOST_WIDE_INT automod_offset, bool store_p)
3376{
3377 unsigned int mask;
3378 int i, first;
3379
3380 gcc_assert (GET_CODE (pattern) == PARALLEL);
3381 mask = 0;
3382 first = (automod_offset != 0);
3383 for (i = first; i < XVECLEN (pattern, 0); i++)
3384 {
3385 /* When using movem with pre-decrement addressing, register X + D0_REG
3386 is controlled by bit 15 - X. For all other addressing modes,
3387 register X + D0_REG is controlled by bit X. Confusingly, the
3388 register mask for fmovem is in the opposite order to that for
3389 movem. */
3390 unsigned int regno;
3391
3392 gcc_assert (MEM_P (XEXP (XVECEXP (pattern, 0, i), !store_p)));
3393 gcc_assert (REG_P (XEXP (XVECEXP (pattern, 0, i), store_p)));
3394 regno = REGNO (XEXP (XVECEXP (pattern, 0, i), store_p));
3395 if (automod_offset < 0)
3396 {
3397 if (FP_REGNO_P (regno))
3398 mask |= 1 << (regno - FP0_REG);
3399 else
3400 mask |= 1 << (15 - (regno - D0_REG));
3401 }
3402 else
3403 {
3404 if (FP_REGNO_P (regno))
3405 mask |= 1 << (7 - (regno - FP0_REG));
3406 else
3407 mask |= 1 << (regno - D0_REG);
3408 }
3409 }
3410 CC_STATUS_INIT;
3411
3412 if (automod_offset == 0)
3413 operands[0] = XEXP (XEXP (XVECEXP (pattern, 0, first), !store_p), 0);
3414 else if (automod_offset < 0)
3415 operands[0] = gen_rtx_PRE_DEC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
3416 else
3417 operands[0] = gen_rtx_POST_INC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
3418 operands[1] = GEN_INT (mask);
3419 if (FP_REGNO_P (REGNO (XEXP (XVECEXP (pattern, 0, first), store_p))))
3420 {
3421 if (store_p)
1fae2d80 3422 return "fmovem %1,%a0";
a40ed0f3 3423 else
1fae2d80 3424 return "fmovem %a0,%1";
a40ed0f3
KH
3425 }
3426 else
3427 {
3428 if (store_p)
1fae2d80 3429 return "movem%.l %1,%a0";
a40ed0f3 3430 else
1fae2d80 3431 return "movem%.l %a0,%1";
a40ed0f3
KH
3432 }
3433}
3434
79e68feb
RS
3435/* Return a REG that occurs in ADDR with coefficient 1.
3436 ADDR can be effectively incremented by incrementing REG. */
3437
3438static rtx
8a4a2253 3439find_addr_reg (rtx addr)
79e68feb
RS
3440{
3441 while (GET_CODE (addr) == PLUS)
3442 {
3443 if (GET_CODE (XEXP (addr, 0)) == REG)
3444 addr = XEXP (addr, 0);
3445 else if (GET_CODE (XEXP (addr, 1)) == REG)
3446 addr = XEXP (addr, 1);
3447 else if (CONSTANT_P (XEXP (addr, 0)))
3448 addr = XEXP (addr, 1);
3449 else if (CONSTANT_P (XEXP (addr, 1)))
3450 addr = XEXP (addr, 0);
3451 else
4761e388 3452 gcc_unreachable ();
79e68feb 3453 }
4761e388
NS
3454 gcc_assert (GET_CODE (addr) == REG);
3455 return addr;
79e68feb 3456}
9ee3c687 3457
c16eadc7 3458/* Output assembler code to perform a 32-bit 3-operand add. */
9ee3c687 3459
5505f548 3460const char *
8a4a2253 3461output_addsi3 (rtx *operands)
9ee3c687
JW
3462{
3463 if (! operands_match_p (operands[0], operands[1]))
3464 {
3465 if (!ADDRESS_REG_P (operands[1]))
3466 {
3467 rtx tmp = operands[1];
3468
3469 operands[1] = operands[2];
3470 operands[2] = tmp;
3471 }
3472
3473 /* These insns can result from reloads to access
3474 stack slots over 64k from the frame pointer. */
3475 if (GET_CODE (operands[2]) == CONST_INT
218d5a87 3476 && (INTVAL (operands[2]) < -32768 || INTVAL (operands[2]) > 32767))
8c61b6c1 3477 return "move%.l %2,%0\n\tadd%.l %1,%0";
9ee3c687 3478 if (GET_CODE (operands[2]) == REG)
4b3d1177
KH
3479 return MOTOROLA ? "lea (%1,%2.l),%0" : "lea %1@(0,%2:l),%0";
3480 return MOTOROLA ? "lea (%c2,%1),%0" : "lea %1@(%c2),%0";
9ee3c687
JW
3481 }
3482 if (GET_CODE (operands[2]) == CONST_INT)
3483 {
9ee3c687
JW
3484 if (INTVAL (operands[2]) > 0
3485 && INTVAL (operands[2]) <= 8)
3486 return "addq%.l %2,%0";
3487 if (INTVAL (operands[2]) < 0
3488 && INTVAL (operands[2]) >= -8)
3489 {
c5c76735 3490 operands[2] = GEN_INT (- INTVAL (operands[2]));
9ee3c687
JW
3491 return "subq%.l %2,%0";
3492 }
3493 /* On the CPU32 it is faster to use two addql instructions to
3494 add a small integer (8 < N <= 16) to a register.
7a1929e1 3495 Likewise for subql. */
fe95f2f7 3496 if (TUNE_CPU32 && REG_P (operands[0]))
9ee3c687
JW
3497 {
3498 if (INTVAL (operands[2]) > 8
3499 && INTVAL (operands[2]) <= 16)
3500 {
1d8eaa6b 3501 operands[2] = GEN_INT (INTVAL (operands[2]) - 8);
3b4b85c9 3502 return "addq%.l #8,%0\n\taddq%.l %2,%0";
9ee3c687
JW
3503 }
3504 if (INTVAL (operands[2]) < -8
3505 && INTVAL (operands[2]) >= -16)
3506 {
c5c76735 3507 operands[2] = GEN_INT (- INTVAL (operands[2]) - 8);
3b4b85c9 3508 return "subq%.l #8,%0\n\tsubq%.l %2,%0";
9ee3c687
JW
3509 }
3510 }
9ee3c687
JW
3511 if (ADDRESS_REG_P (operands[0])
3512 && INTVAL (operands[2]) >= -0x8000
3513 && INTVAL (operands[2]) < 0x8000)
3514 {
fe95f2f7 3515 if (TUNE_68040)
9ee3c687
JW
3516 return "add%.w %2,%0";
3517 else
4b3d1177 3518 return MOTOROLA ? "lea (%c2,%0),%0" : "lea %0@(%c2),%0";
9ee3c687
JW
3519 }
3520 }
3521 return "add%.l %2,%0";
3522}
79e68feb
RS
3523\f
3524/* Store in cc_status the expressions that the condition codes will
3525 describe after execution of an instruction whose pattern is EXP.
3526 Do not alter them if the instruction would not alter the cc's. */
3527
3528/* On the 68000, all the insns to store in an address register fail to
3529 set the cc's. However, in some cases these instructions can make it
3530 possibly invalid to use the saved cc's. In those cases we clear out
3531 some or all of the saved cc's so they won't be used. */
3532
1d8eaa6b 3533void
8a4a2253 3534notice_update_cc (rtx exp, rtx insn)
79e68feb 3535{
1a8965c4 3536 if (GET_CODE (exp) == SET)
79e68feb
RS
3537 {
3538 if (GET_CODE (SET_SRC (exp)) == CALL)
a0a7fbc9 3539 CC_STATUS_INIT;
79e68feb
RS
3540 else if (ADDRESS_REG_P (SET_DEST (exp)))
3541 {
f5963e61 3542 if (cc_status.value1 && modified_in_p (cc_status.value1, insn))
79e68feb 3543 cc_status.value1 = 0;
f5963e61 3544 if (cc_status.value2 && modified_in_p (cc_status.value2, insn))
79e68feb
RS
3545 cc_status.value2 = 0;
3546 }
f6ab62e8
RS
3547 /* fmoves to memory or data registers do not set the condition
3548 codes. Normal moves _do_ set the condition codes, but not in
3549 a way that is appropriate for comparison with 0, because -0.0
3550 would be treated as a negative nonzero number. Note that it
88512ba0 3551 isn't appropriate to conditionalize this restriction on
f6ab62e8
RS
3552 HONOR_SIGNED_ZEROS because that macro merely indicates whether
3553 we care about the difference between -0.0 and +0.0. */
79e68feb
RS
3554 else if (!FP_REG_P (SET_DEST (exp))
3555 && SET_DEST (exp) != cc0_rtx
3556 && (FP_REG_P (SET_SRC (exp))
3557 || GET_CODE (SET_SRC (exp)) == FIX
f6ab62e8 3558 || FLOAT_MODE_P (GET_MODE (SET_DEST (exp)))))
a0a7fbc9 3559 CC_STATUS_INIT;
79e68feb
RS
3560 /* A pair of move insns doesn't produce a useful overall cc. */
3561 else if (!FP_REG_P (SET_DEST (exp))
3562 && !FP_REG_P (SET_SRC (exp))
3563 && GET_MODE_SIZE (GET_MODE (SET_SRC (exp))) > 4
3564 && (GET_CODE (SET_SRC (exp)) == REG
3565 || GET_CODE (SET_SRC (exp)) == MEM
3566 || GET_CODE (SET_SRC (exp)) == CONST_DOUBLE))
a0a7fbc9 3567 CC_STATUS_INIT;
e1dff52a 3568 else if (SET_DEST (exp) != pc_rtx)
79e68feb
RS
3569 {
3570 cc_status.flags = 0;
e1dff52a
KH
3571 cc_status.value1 = SET_DEST (exp);
3572 cc_status.value2 = SET_SRC (exp);
79e68feb
RS
3573 }
3574 }
3575 else if (GET_CODE (exp) == PARALLEL
3576 && GET_CODE (XVECEXP (exp, 0, 0)) == SET)
3577 {
e1dff52a
KH
3578 rtx dest = SET_DEST (XVECEXP (exp, 0, 0));
3579 rtx src = SET_SRC (XVECEXP (exp, 0, 0));
3580
3581 if (ADDRESS_REG_P (dest))
79e68feb 3582 CC_STATUS_INIT;
e1dff52a 3583 else if (dest != pc_rtx)
79e68feb
RS
3584 {
3585 cc_status.flags = 0;
e1dff52a
KH
3586 cc_status.value1 = dest;
3587 cc_status.value2 = src;
79e68feb
RS
3588 }
3589 }
3590 else
3591 CC_STATUS_INIT;
3592 if (cc_status.value2 != 0
3593 && ADDRESS_REG_P (cc_status.value2)
3594 && GET_MODE (cc_status.value2) == QImode)
3595 CC_STATUS_INIT;
1a8965c4 3596 if (cc_status.value2 != 0)
79e68feb
RS
3597 switch (GET_CODE (cc_status.value2))
3598 {
996a5f59 3599 case ASHIFT: case ASHIFTRT: case LSHIFTRT:
79e68feb 3600 case ROTATE: case ROTATERT:
a126dc3a
RH
3601 /* These instructions always clear the overflow bit, and set
3602 the carry to the bit shifted out. */
3603 /* ??? We don't currently have a way to signal carry not valid,
3604 nor do we check for it in the branch insns. */
3605 CC_STATUS_INIT;
3606 break;
3607
3608 case PLUS: case MINUS: case MULT:
3609 case DIV: case UDIV: case MOD: case UMOD: case NEG:
79e68feb
RS
3610 if (GET_MODE (cc_status.value2) != VOIDmode)
3611 cc_status.flags |= CC_NO_OVERFLOW;
3612 break;
3613 case ZERO_EXTEND:
3614 /* (SET r1 (ZERO_EXTEND r2)) on this machine
3615 ends with a move insn moving r2 in r2's mode.
3616 Thus, the cc's are set for r2.
7a1929e1 3617 This can set N bit spuriously. */
79e68feb 3618 cc_status.flags |= CC_NOT_NEGATIVE;
1d8eaa6b
AS
3619
3620 default:
3621 break;
79e68feb
RS
3622 }
3623 if (cc_status.value1 && GET_CODE (cc_status.value1) == REG
3624 && cc_status.value2
3625 && reg_overlap_mentioned_p (cc_status.value1, cc_status.value2))
3626 cc_status.value2 = 0;
3627 if (((cc_status.value1 && FP_REG_P (cc_status.value1))
1a8965c4 3628 || (cc_status.value2 && FP_REG_P (cc_status.value2))))
79e68feb 3629 cc_status.flags = CC_IN_68881;
67595cbb
RZ
3630 if (cc_status.value2 && GET_CODE (cc_status.value2) == COMPARE
3631 && GET_MODE_CLASS (GET_MODE (XEXP (cc_status.value2, 0))) == MODE_FLOAT)
3632 {
3633 cc_status.flags = CC_IN_68881;
3634 if (!FP_REG_P (XEXP (cc_status.value2, 0)))
3635 cc_status.flags |= CC_REVERSED;
3636 }
79e68feb
RS
3637}
3638\f
5505f548 3639const char *
8a4a2253 3640output_move_const_double (rtx *operands)
79e68feb 3641{
1a8965c4 3642 int code = standard_68881_constant_p (operands[1]);
79e68feb 3643
1a8965c4 3644 if (code != 0)
79e68feb 3645 {
1a8965c4 3646 static char buf[40];
79e68feb 3647
3b4b85c9 3648 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
1a8965c4 3649 return buf;
79e68feb 3650 }
1a8965c4 3651 return "fmove%.d %1,%0";
79e68feb
RS
3652}
3653
5505f548 3654const char *
8a4a2253 3655output_move_const_single (rtx *operands)
79e68feb 3656{
1a8965c4 3657 int code = standard_68881_constant_p (operands[1]);
79e68feb 3658
1a8965c4 3659 if (code != 0)
79e68feb 3660 {
1a8965c4 3661 static char buf[40];
79e68feb 3662
3b4b85c9 3663 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
1a8965c4 3664 return buf;
79e68feb 3665 }
1a8965c4 3666 return "fmove%.s %f1,%0";
79e68feb
RS
3667}
3668
3669/* Return nonzero if X, a CONST_DOUBLE, has a value that we can get
3670 from the "fmovecr" instruction.
3671 The value, anded with 0xff, gives the code to use in fmovecr
3672 to get the desired constant. */
3673
7a1929e1 3674/* This code has been fixed for cross-compilation. */
c1cfb2ae
RS
3675
3676static int inited_68881_table = 0;
3677
5505f548 3678static const char *const strings_68881[7] = {
c1cfb2ae
RS
3679 "0.0",
3680 "1.0",
3681 "10.0",
3682 "100.0",
3683 "10000.0",
3684 "1e8",
3685 "1e16"
a0a7fbc9 3686};
c1cfb2ae 3687
8b60264b 3688static const int codes_68881[7] = {
c1cfb2ae
RS
3689 0x0f,
3690 0x32,
3691 0x33,
3692 0x34,
3693 0x35,
3694 0x36,
3695 0x37
a0a7fbc9 3696};
c1cfb2ae
RS
3697
3698REAL_VALUE_TYPE values_68881[7];
3699
3700/* Set up values_68881 array by converting the decimal values
7a1929e1 3701 strings_68881 to binary. */
c1cfb2ae
RS
3702
3703void
8a4a2253 3704init_68881_table (void)
c1cfb2ae
RS
3705{
3706 int i;
3707 REAL_VALUE_TYPE r;
3708 enum machine_mode mode;
3709
16d82c3c 3710 mode = SFmode;
c1cfb2ae
RS
3711 for (i = 0; i < 7; i++)
3712 {
3713 if (i == 6)
16d82c3c 3714 mode = DFmode;
c1cfb2ae
RS
3715 r = REAL_VALUE_ATOF (strings_68881[i], mode);
3716 values_68881[i] = r;
3717 }
3718 inited_68881_table = 1;
3719}
79e68feb
RS
3720
3721int
8a4a2253 3722standard_68881_constant_p (rtx x)
79e68feb 3723{
c1cfb2ae
RS
3724 REAL_VALUE_TYPE r;
3725 int i;
79e68feb 3726
e18db50d 3727 /* fmovecr must be emulated on the 68040 and 68060, so it shouldn't be
7a1929e1 3728 used at all on those chips. */
9cf106c8 3729 if (TUNE_68040_60)
79e68feb
RS
3730 return 0;
3731
c1cfb2ae
RS
3732 if (! inited_68881_table)
3733 init_68881_table ();
3734
3735 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
3736
64c0b414
AS
3737 /* Use REAL_VALUES_IDENTICAL instead of REAL_VALUES_EQUAL so that -0.0
3738 is rejected. */
c1cfb2ae
RS
3739 for (i = 0; i < 6; i++)
3740 {
64c0b414 3741 if (REAL_VALUES_IDENTICAL (r, values_68881[i]))
c1cfb2ae
RS
3742 return (codes_68881[i]);
3743 }
3744
79e68feb
RS
3745 if (GET_MODE (x) == SFmode)
3746 return 0;
c1cfb2ae
RS
3747
3748 if (REAL_VALUES_EQUAL (r, values_68881[6]))
3749 return (codes_68881[6]);
3750
79e68feb
RS
3751 /* larger powers of ten in the constants ram are not used
3752 because they are not equal to a `double' C constant. */
3753 return 0;
3754}
3755
3756/* If X is a floating-point constant, return the logarithm of X base 2,
3757 or 0 if X is not a power of 2. */
3758
3759int
8a4a2253 3760floating_exact_log2 (rtx x)
79e68feb 3761{
c1cfb2ae 3762 REAL_VALUE_TYPE r, r1;
eaff3bf8 3763 int exp;
79e68feb 3764
c1cfb2ae 3765 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
79e68feb 3766
eaff3bf8 3767 if (REAL_VALUES_LESS (r, dconst1))
79e68feb
RS
3768 return 0;
3769
eaff3bf8 3770 exp = real_exponent (&r);
6ef9a246 3771 real_2expN (&r1, exp, DFmode);
eaff3bf8
RH
3772 if (REAL_VALUES_EQUAL (r1, r))
3773 return exp;
3774
79e68feb
RS
3775 return 0;
3776}
3777\f
79e68feb
RS
3778/* A C compound statement to output to stdio stream STREAM the
3779 assembler syntax for an instruction operand X. X is an RTL
3780 expression.
3781
3782 CODE is a value that can be used to specify one of several ways
3783 of printing the operand. It is used when identical operands
3784 must be printed differently depending on the context. CODE
3785 comes from the `%' specification that was used to request
3786 printing of the operand. If the specification was just `%DIGIT'
3787 then CODE is 0; if the specification was `%LTR DIGIT' then CODE
3788 is the ASCII code for LTR.
3789
3790 If X is a register, this macro should print the register's name.
3791 The names can be found in an array `reg_names' whose type is
3792 `char *[]'. `reg_names' is initialized from `REGISTER_NAMES'.
3793
3794 When the machine description has a specification `%PUNCT' (a `%'
3795 followed by a punctuation character), this macro is called with
3796 a null pointer for X and the punctuation character for CODE.
3797
3798 The m68k specific codes are:
3799
3800 '.' for dot needed in Motorola-style opcode names.
3801 '-' for an operand pushing on the stack:
3802 sp@-, -(sp) or -(%sp) depending on the style of syntax.
3803 '+' for an operand pushing on the stack:
3804 sp@+, (sp)+ or (%sp)+ depending on the style of syntax.
3805 '@' for a reference to the top word on the stack:
3806 sp@, (sp) or (%sp) depending on the style of syntax.
3807 '#' for an immediate operand prefix (# in MIT and Motorola syntax
5ee084df 3808 but & in SGS syntax).
79e68feb
RS
3809 '!' for the cc register (used in an `and to cc' insn).
3810 '$' for the letter `s' in an op code, but only on the 68040.
3811 '&' for the letter `d' in an op code, but only on the 68040.
2ac5f14a 3812 '/' for register prefix needed by longlong.h.
a40ed0f3 3813 '?' for m68k_library_id_string
79e68feb
RS
3814
3815 'b' for byte insn (no effect, on the Sun; this is for the ISI).
3816 'd' to force memory addressing to be absolute, not relative.
3817 'f' for float insn (print a CONST_DOUBLE as a float rather than in hex)
79e68feb
RS
3818 'x' for float insn (print a CONST_DOUBLE as a float rather than in hex),
3819 or print pair of registers as rx:ry.
29ca003a
RS
3820 'p' print an address with @PLTPC attached, but only if the operand
3821 is not locally-bound. */
79e68feb
RS
3822
3823void
8a4a2253 3824print_operand (FILE *file, rtx op, int letter)
79e68feb 3825{
79e68feb
RS
3826 if (letter == '.')
3827 {
e6d98cb0
BI
3828 if (MOTOROLA)
3829 fprintf (file, ".");
79e68feb
RS
3830 }
3831 else if (letter == '#')
e6d98cb0 3832 asm_fprintf (file, "%I");
79e68feb 3833 else if (letter == '-')
4b3d1177 3834 asm_fprintf (file, MOTOROLA ? "-(%Rsp)" : "%Rsp@-");
79e68feb 3835 else if (letter == '+')
4b3d1177 3836 asm_fprintf (file, MOTOROLA ? "(%Rsp)+" : "%Rsp@+");
79e68feb 3837 else if (letter == '@')
4b3d1177 3838 asm_fprintf (file, MOTOROLA ? "(%Rsp)" : "%Rsp@");
79e68feb 3839 else if (letter == '!')
e6d98cb0 3840 asm_fprintf (file, "%Rfpcr");
79e68feb
RS
3841 else if (letter == '$')
3842 {
b101567e 3843 if (TARGET_68040)
e6d98cb0 3844 fprintf (file, "s");
79e68feb
RS
3845 }
3846 else if (letter == '&')
3847 {
b101567e 3848 if (TARGET_68040)
e6d98cb0 3849 fprintf (file, "d");
79e68feb 3850 }
2ac5f14a 3851 else if (letter == '/')
e6d98cb0 3852 asm_fprintf (file, "%R");
a40ed0f3
KH
3853 else if (letter == '?')
3854 asm_fprintf (file, m68k_library_id_string);
29ca003a 3855 else if (letter == 'p')
2c8ec431 3856 {
29ca003a
RS
3857 output_addr_const (file, op);
3858 if (!(GET_CODE (op) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op)))
3859 fprintf (file, "@PLTPC");
2c8ec431 3860 }
79e68feb
RS
3861 else if (GET_CODE (op) == REG)
3862 {
1a8965c4
AS
3863 if (letter == 'R')
3864 /* Print out the second register name of a register pair.
3865 I.e., R (6) => 7. */
01bbf777 3866 fputs (M68K_REGNAME(REGNO (op) + 1), file);
79e68feb 3867 else
01bbf777 3868 fputs (M68K_REGNAME(REGNO (op)), file);
79e68feb
RS
3869 }
3870 else if (GET_CODE (op) == MEM)
3871 {
3872 output_address (XEXP (op, 0));
3873 if (letter == 'd' && ! TARGET_68020
3874 && CONSTANT_ADDRESS_P (XEXP (op, 0))
3875 && !(GET_CODE (XEXP (op, 0)) == CONST_INT
3876 && INTVAL (XEXP (op, 0)) < 0x8000
3877 && INTVAL (XEXP (op, 0)) >= -0x8000))
4b3d1177 3878 fprintf (file, MOTOROLA ? ".l" : ":l");
79e68feb 3879 }
79e68feb
RS
3880 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == SFmode)
3881 {
c1cfb2ae
RS
3882 REAL_VALUE_TYPE r;
3883 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
3884 ASM_OUTPUT_FLOAT_OPERAND (letter, file, r);
3885 }
3886 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == XFmode)
3887 {
3888 REAL_VALUE_TYPE r;
3889 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
3890 ASM_OUTPUT_LONG_DOUBLE_OPERAND (file, r);
79e68feb 3891 }
e2c0a924 3892 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == DFmode)
79e68feb 3893 {
c1cfb2ae
RS
3894 REAL_VALUE_TYPE r;
3895 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
3896 ASM_OUTPUT_DOUBLE_OPERAND (file, r);
79e68feb
RS
3897 }
3898 else
3899 {
2c8ec431
DL
3900 /* Use `print_operand_address' instead of `output_addr_const'
3901 to ensure that we print relevant PIC stuff. */
1f85a612 3902 asm_fprintf (file, "%I");
2c8ec431
DL
3903 if (TARGET_PCREL
3904 && (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST))
3905 print_operand_address (file, op);
3906 else
3907 output_addr_const (file, op);
79e68feb
RS
3908 }
3909}
3910
884316ff
JM
3911/* m68k implementation of OUTPUT_ADDR_CONST_EXTRA. */
3912
3913bool
3914m68k_output_addr_const_extra (FILE *file, rtx x)
3915{
3916 if (GET_CODE (x) != UNSPEC || XINT (x, 1) != UNSPEC_GOTOFF)
3917 return false;
3918
3919 output_addr_const (file, XVECEXP (x, 0, 0));
3920 /* ??? What is the non-MOTOROLA syntax? */
3921 fputs ("@GOT", file);
3922 return true;
3923}
3924
79e68feb
RS
3925\f
3926/* A C compound statement to output to stdio stream STREAM the
3927 assembler syntax for an instruction operand that is a memory
3928 reference whose address is ADDR. ADDR is an RTL expression.
3929
3930 Note that this contains a kludge that knows that the only reason
3931 we have an address (plus (label_ref...) (reg...)) when not generating
3932 PIC code is in the insn before a tablejump, and we know that m68k.md
3933 generates a label LInnn: on such an insn.
3934
3935 It is possible for PIC to generate a (plus (label_ref...) (reg...))
3936 and we handle that just like we would a (plus (symbol_ref...) (reg...)).
3937
79e68feb
RS
3938 This routine is responsible for distinguishing between -fpic and -fPIC
3939 style relocations in an address. When generating -fpic code the
112cdef5
KH
3940 offset is output in word mode (e.g. movel a5@(_foo:w), a0). When generating
3941 -fPIC code the offset is output in long mode (e.g. movel a5@(_foo:l), a0) */
79e68feb
RS
3942
3943void
8a4a2253 3944print_operand_address (FILE *file, rtx addr)
79e68feb 3945{
fc2241eb
RS
3946 struct m68k_address address;
3947
3948 if (!m68k_decompose_address (QImode, addr, true, &address))
3949 gcc_unreachable ();
3950
3951 if (address.code == PRE_DEC)
4b3d1177
KH
3952 fprintf (file, MOTOROLA ? "-(%s)" : "%s@-",
3953 M68K_REGNAME (REGNO (address.base)));
fc2241eb 3954 else if (address.code == POST_INC)
4b3d1177
KH
3955 fprintf (file, MOTOROLA ? "(%s)+" : "%s@+",
3956 M68K_REGNAME (REGNO (address.base)));
fc2241eb
RS
3957 else if (!address.base && !address.index)
3958 {
3959 /* A constant address. */
3960 gcc_assert (address.offset == addr);
3961 if (GET_CODE (addr) == CONST_INT)
3962 {
3963 /* (xxx).w or (xxx).l. */
3964 if (IN_RANGE (INTVAL (addr), -0x8000, 0x7fff))
4b3d1177 3965 fprintf (file, MOTOROLA ? "%d.w" : "%d:w", (int) INTVAL (addr));
a0a7fbc9 3966 else
fc2241eb 3967 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (addr));
a0a7fbc9 3968 }
fc2241eb 3969 else if (TARGET_PCREL)
a0a7fbc9 3970 {
fc2241eb
RS
3971 /* (d16,PC) or (bd,PC,Xn) (with suppressed index register). */
3972 fputc ('(', file);
3973 output_addr_const (file, addr);
3974 asm_fprintf (file, flag_pic == 1 ? ":w,%Rpc)" : ":l,%Rpc)");
a0a7fbc9 3975 }
fc2241eb 3976 else
a0a7fbc9 3977 {
fc2241eb
RS
3978 /* (xxx).l. We need a special case for SYMBOL_REF if the symbol
3979 name ends in `.<letter>', as the last 2 characters can be
3980 mistaken as a size suffix. Put the name in parentheses. */
3981 if (GET_CODE (addr) == SYMBOL_REF
3982 && strlen (XSTR (addr, 0)) > 2
3983 && XSTR (addr, 0)[strlen (XSTR (addr, 0)) - 2] == '.')
a0a7fbc9 3984 {
fc2241eb
RS
3985 putc ('(', file);
3986 output_addr_const (file, addr);
3987 putc (')', file);
a0a7fbc9
AS
3988 }
3989 else
fc2241eb 3990 output_addr_const (file, addr);
a0a7fbc9 3991 }
fc2241eb
RS
3992 }
3993 else
3994 {
3995 int labelno;
3996
3997 /* If ADDR is a (d8,pc,Xn) address, this is the number of the
44c7bd63 3998 label being accessed, otherwise it is -1. */
fc2241eb
RS
3999 labelno = (address.offset
4000 && !address.base
4001 && GET_CODE (address.offset) == LABEL_REF
4002 ? CODE_LABEL_NUMBER (XEXP (address.offset, 0))
4003 : -1);
4004 if (MOTOROLA)
a0a7fbc9 4005 {
fc2241eb
RS
4006 /* Print the "offset(base" component. */
4007 if (labelno >= 0)
e59d83aa 4008 asm_fprintf (file, "%LL%d(%Rpc,", labelno);
fc2241eb 4009 else
a0a7fbc9 4010 {
fc2241eb 4011 if (address.offset)
a0a7fbc9 4012 {
fc2241eb
RS
4013 output_addr_const (file, address.offset);
4014 if (flag_pic && address.base == pic_offset_table_rtx)
a0a7fbc9
AS
4015 {
4016 fprintf (file, "@GOT");
fc2241eb 4017 if (flag_pic == 1 && TARGET_68020)
a0a7fbc9
AS
4018 fprintf (file, ".w");
4019 }
4020 }
fc2241eb
RS
4021 putc ('(', file);
4022 if (address.base)
4023 fputs (M68K_REGNAME (REGNO (address.base)), file);
a0a7fbc9 4024 }
fc2241eb
RS
4025 /* Print the ",index" component, if any. */
4026 if (address.index)
a0a7fbc9 4027 {
fc2241eb
RS
4028 if (address.base)
4029 putc (',', file);
4030 fprintf (file, "%s.%c",
4031 M68K_REGNAME (REGNO (address.index)),
4032 GET_MODE (address.index) == HImode ? 'w' : 'l');
4033 if (address.scale != 1)
4034 fprintf (file, "*%d", address.scale);
a0a7fbc9 4035 }
a0a7fbc9 4036 putc (')', file);
a0a7fbc9 4037 }
fc2241eb 4038 else /* !MOTOROLA */
a0a7fbc9 4039 {
fc2241eb
RS
4040 if (!address.offset && !address.index)
4041 fprintf (file, "%s@", M68K_REGNAME (REGNO (address.base)));
a0a7fbc9 4042 else
a0a7fbc9 4043 {
fc2241eb
RS
4044 /* Print the "base@(offset" component. */
4045 if (labelno >= 0)
e59d83aa 4046 asm_fprintf (file, "%Rpc@(%LL%d", labelno);
fc2241eb
RS
4047 else
4048 {
4049 if (address.base)
4050 fputs (M68K_REGNAME (REGNO (address.base)), file);
4051 fprintf (file, "@(");
4052 if (address.offset)
4053 {
4054 output_addr_const (file, address.offset);
4055 if (address.base == pic_offset_table_rtx && TARGET_68020)
4056 switch (flag_pic)
4057 {
4058 case 1:
4059 fprintf (file, ":w"); break;
4060 case 2:
4061 fprintf (file, ":l"); break;
4062 default:
4063 break;
4064 }
4065 }
4066 }
4067 /* Print the ",index" component, if any. */
4068 if (address.index)
4069 {
4070 fprintf (file, ",%s:%c",
4071 M68K_REGNAME (REGNO (address.index)),
4072 GET_MODE (address.index) == HImode ? 'w' : 'l');
4073 if (address.scale != 1)
4074 fprintf (file, ":%d", address.scale);
4075 }
a0a7fbc9
AS
4076 putc (')', file);
4077 }
a0a7fbc9 4078 }
79e68feb
RS
4079 }
4080}
af13f02d
JW
4081\f
4082/* Check for cases where a clr insns can be omitted from code using
4083 strict_low_part sets. For example, the second clrl here is not needed:
4084 clrl d0; movw a0@+,d0; use d0; clrl d0; movw a0@+; use d0; ...
4085
4086 MODE is the mode of this STRICT_LOW_PART set. FIRST_INSN is the clear
4087 insn we are checking for redundancy. TARGET is the register set by the
4088 clear insn. */
4089
8a4a2253
BI
4090bool
4091strict_low_part_peephole_ok (enum machine_mode mode, rtx first_insn,
4092 rtx target)
af13f02d 4093{
39250081 4094 rtx p = first_insn;
af13f02d 4095
39250081 4096 while ((p = PREV_INSN (p)))
af13f02d 4097 {
39250081
RZ
4098 if (NOTE_INSN_BASIC_BLOCK_P (p))
4099 return false;
4100
4101 if (NOTE_P (p))
4102 continue;
4103
af13f02d 4104 /* If it isn't an insn, then give up. */
39250081 4105 if (!INSN_P (p))
8a4a2253 4106 return false;
af13f02d
JW
4107
4108 if (reg_set_p (target, p))
4109 {
4110 rtx set = single_set (p);
4111 rtx dest;
4112
4113 /* If it isn't an easy to recognize insn, then give up. */
4114 if (! set)
8a4a2253 4115 return false;
af13f02d
JW
4116
4117 dest = SET_DEST (set);
4118
4119 /* If this sets the entire target register to zero, then our
4120 first_insn is redundant. */
4121 if (rtx_equal_p (dest, target)
4122 && SET_SRC (set) == const0_rtx)
8a4a2253 4123 return true;
af13f02d
JW
4124 else if (GET_CODE (dest) == STRICT_LOW_PART
4125 && GET_CODE (XEXP (dest, 0)) == REG
4126 && REGNO (XEXP (dest, 0)) == REGNO (target)
4127 && (GET_MODE_SIZE (GET_MODE (XEXP (dest, 0)))
4128 <= GET_MODE_SIZE (mode)))
4129 /* This is a strict low part set which modifies less than
4130 we are using, so it is safe. */
4131 ;
4132 else
8a4a2253 4133 return false;
af13f02d 4134 }
af13f02d
JW
4135 }
4136
8a4a2253 4137 return false;
af13f02d 4138}
67cd4f83 4139
2c8ec431
DL
4140/* Operand predicates for implementing asymmetric pc-relative addressing
4141 on m68k. The m68k supports pc-relative addressing (mode 7, register 2)
dab66575 4142 when used as a source operand, but not as a destination operand.
2c8ec431
DL
4143
4144 We model this by restricting the meaning of the basic predicates
4145 (general_operand, memory_operand, etc) to forbid the use of this
4146 addressing mode, and then define the following predicates that permit
4147 this addressing mode. These predicates can then be used for the
4148 source operands of the appropriate instructions.
4149
4150 n.b. While it is theoretically possible to change all machine patterns
4151 to use this addressing more where permitted by the architecture,
4152 it has only been implemented for "common" cases: SImode, HImode, and
4153 QImode operands, and only for the principle operations that would
4154 require this addressing mode: data movement and simple integer operations.
4155
4156 In parallel with these new predicates, two new constraint letters
4157 were defined: 'S' and 'T'. 'S' is the -mpcrel analog of 'm'.
4158 'T' replaces 's' in the non-pcrel case. It is a no-op in the pcrel case.
4159 In the pcrel case 's' is only valid in combination with 'a' registers.
4160 See addsi3, subsi3, cmpsi, and movsi patterns for a better understanding
4161 of how these constraints are used.
4162
4163 The use of these predicates is strictly optional, though patterns that
4164 don't will cause an extra reload register to be allocated where one
4165 was not necessary:
4166
4167 lea (abc:w,%pc),%a0 ; need to reload address
4168 moveq &1,%d1 ; since write to pc-relative space
4169 movel %d1,%a0@ ; is not allowed
4170 ...
4171 lea (abc:w,%pc),%a1 ; no need to reload address here
4172 movel %a1@,%d0 ; since "movel (abc:w,%pc),%d0" is ok
4173
4174 For more info, consult tiemann@cygnus.com.
4175
4176
4177 All of the ugliness with predicates and constraints is due to the
4178 simple fact that the m68k does not allow a pc-relative addressing
4179 mode as a destination. gcc does not distinguish between source and
4180 destination addresses. Hence, if we claim that pc-relative address
4181 modes are valid, e.g. GO_IF_LEGITIMATE_ADDRESS accepts them, then we
4182 end up with invalid code. To get around this problem, we left
4183 pc-relative modes as invalid addresses, and then added special
4184 predicates and constraints to accept them.
4185
4186 A cleaner way to handle this is to modify gcc to distinguish
4187 between source and destination addresses. We can then say that
4188 pc-relative is a valid source address but not a valid destination
4189 address, and hopefully avoid a lot of the predicate and constraint
4190 hackery. Unfortunately, this would be a pretty big change. It would
4191 be a useful change for a number of ports, but there aren't any current
4192 plans to undertake this.
4193
4194 ***************************************************************************/
4195
4196
5505f548 4197const char *
8a4a2253 4198output_andsi3 (rtx *operands)
29ae8a3c
RK
4199{
4200 int logval;
4201 if (GET_CODE (operands[2]) == CONST_INT
25c99d8f 4202 && (INTVAL (operands[2]) | 0xffff) == -1
29ae8a3c
RK
4203 && (DATA_REG_P (operands[0])
4204 || offsettable_memref_p (operands[0]))
9425fb04 4205 && !TARGET_COLDFIRE)
29ae8a3c
RK
4206 {
4207 if (GET_CODE (operands[0]) != REG)
b72f00af 4208 operands[0] = adjust_address (operands[0], HImode, 2);
1d8eaa6b 4209 operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffff);
29ae8a3c
RK
4210 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4211 CC_STATUS_INIT;
4212 if (operands[2] == const0_rtx)
4213 return "clr%.w %0";
4214 return "and%.w %2,%0";
4215 }
4216 if (GET_CODE (operands[2]) == CONST_INT
4217 && (logval = exact_log2 (~ INTVAL (operands[2]))) >= 0
4218 && (DATA_REG_P (operands[0])
4219 || offsettable_memref_p (operands[0])))
4220 {
4221 if (DATA_REG_P (operands[0]))
a0a7fbc9 4222 operands[1] = GEN_INT (logval);
29ae8a3c
RK
4223 else
4224 {
b72f00af 4225 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
1d8eaa6b 4226 operands[1] = GEN_INT (logval % 8);
29ae8a3c
RK
4227 }
4228 /* This does not set condition codes in a standard way. */
4229 CC_STATUS_INIT;
4230 return "bclr %1,%0";
4231 }
4232 return "and%.l %2,%0";
4233}
4234
5505f548 4235const char *
8a4a2253 4236output_iorsi3 (rtx *operands)
29ae8a3c
RK
4237{
4238 register int logval;
4239 if (GET_CODE (operands[2]) == CONST_INT
4240 && INTVAL (operands[2]) >> 16 == 0
4241 && (DATA_REG_P (operands[0])
4242 || offsettable_memref_p (operands[0]))
9425fb04 4243 && !TARGET_COLDFIRE)
29ae8a3c
RK
4244 {
4245 if (GET_CODE (operands[0]) != REG)
b72f00af 4246 operands[0] = adjust_address (operands[0], HImode, 2);
29ae8a3c
RK
4247 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4248 CC_STATUS_INIT;
4249 if (INTVAL (operands[2]) == 0xffff)
4250 return "mov%.w %2,%0";
4251 return "or%.w %2,%0";
4252 }
4253 if (GET_CODE (operands[2]) == CONST_INT
4254 && (logval = exact_log2 (INTVAL (operands[2]))) >= 0
4255 && (DATA_REG_P (operands[0])
4256 || offsettable_memref_p (operands[0])))
4257 {
4258 if (DATA_REG_P (operands[0]))
b72f00af 4259 operands[1] = GEN_INT (logval);
29ae8a3c
RK
4260 else
4261 {
b72f00af 4262 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
1d8eaa6b 4263 operands[1] = GEN_INT (logval % 8);
29ae8a3c
RK
4264 }
4265 CC_STATUS_INIT;
4266 return "bset %1,%0";
4267 }
4268 return "or%.l %2,%0";
4269}
4270
5505f548 4271const char *
8a4a2253 4272output_xorsi3 (rtx *operands)
29ae8a3c
RK
4273{
4274 register int logval;
4275 if (GET_CODE (operands[2]) == CONST_INT
4276 && INTVAL (operands[2]) >> 16 == 0
4277 && (offsettable_memref_p (operands[0]) || DATA_REG_P (operands[0]))
9425fb04 4278 && !TARGET_COLDFIRE)
29ae8a3c
RK
4279 {
4280 if (! DATA_REG_P (operands[0]))
b72f00af 4281 operands[0] = adjust_address (operands[0], HImode, 2);
29ae8a3c
RK
4282 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4283 CC_STATUS_INIT;
4284 if (INTVAL (operands[2]) == 0xffff)
4285 return "not%.w %0";
4286 return "eor%.w %2,%0";
4287 }
4288 if (GET_CODE (operands[2]) == CONST_INT
4289 && (logval = exact_log2 (INTVAL (operands[2]))) >= 0
4290 && (DATA_REG_P (operands[0])
4291 || offsettable_memref_p (operands[0])))
4292 {
4293 if (DATA_REG_P (operands[0]))
b72f00af 4294 operands[1] = GEN_INT (logval);
29ae8a3c
RK
4295 else
4296 {
b72f00af 4297 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
1d8eaa6b 4298 operands[1] = GEN_INT (logval % 8);
29ae8a3c
RK
4299 }
4300 CC_STATUS_INIT;
4301 return "bchg %1,%0";
4302 }
4303 return "eor%.l %2,%0";
4304}
7c262518 4305
29ca003a
RS
4306/* Return the instruction that should be used for a call to address X,
4307 which is known to be in operand 0. */
4308
4309const char *
4310output_call (rtx x)
4311{
4312 if (symbolic_operand (x, VOIDmode))
4313 return m68k_symbolic_call;
4314 else
4315 return "jsr %a0";
4316}
4317
f7e70894
RS
4318/* Likewise sibling calls. */
4319
4320const char *
4321output_sibcall (rtx x)
4322{
4323 if (symbolic_operand (x, VOIDmode))
4324 return m68k_symbolic_jump;
4325 else
4326 return "jmp %a0";
4327}
4328
45849738
BI
4329#ifdef M68K_TARGET_COFF
4330
4331/* Output assembly to switch to section NAME with attribute FLAGS. */
4332
4333static void
c18a5b6c
MM
4334m68k_coff_asm_named_section (const char *name, unsigned int flags,
4335 tree decl ATTRIBUTE_UNUSED)
45849738
BI
4336{
4337 char flagchar;
4338
4339 if (flags & SECTION_WRITE)
4340 flagchar = 'd';
4341 else
4342 flagchar = 'x';
4343
4344 fprintf (asm_out_file, "\t.section\t%s,\"%c\"\n", name, flagchar);
4345}
4346
4347#endif /* M68K_TARGET_COFF */
4348
c590b625 4349static void
8a4a2253 4350m68k_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
4ab870f5 4351 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8a4a2253 4352 tree function)
483ab821 4353{
4ab870f5
RS
4354 rtx this_slot, offset, addr, mem, insn;
4355
4356 /* Pretend to be a post-reload pass while generating rtl. */
4ab870f5 4357 reload_completed = 1;
4ab870f5
RS
4358
4359 /* The "this" pointer is stored at 4(%sp). */
4360 this_slot = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx, 4));
4361
4362 /* Add DELTA to THIS. */
4363 if (delta != 0)
5050d266 4364 {
4ab870f5
RS
4365 /* Make the offset a legitimate operand for memory addition. */
4366 offset = GEN_INT (delta);
4367 if ((delta < -8 || delta > 8)
4368 && (TARGET_COLDFIRE || USE_MOVQ (delta)))
4369 {
4370 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), offset);
4371 offset = gen_rtx_REG (Pmode, D0_REG);
4372 }
4373 emit_insn (gen_add3_insn (copy_rtx (this_slot),
4374 copy_rtx (this_slot), offset));
5050d266 4375 }
c590b625 4376
4ab870f5
RS
4377 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
4378 if (vcall_offset != 0)
4379 {
4380 /* Set the static chain register to *THIS. */
4381 emit_move_insn (static_chain_rtx, this_slot);
4382 emit_move_insn (static_chain_rtx, gen_rtx_MEM (Pmode, static_chain_rtx));
4383
4384 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
4385 addr = plus_constant (static_chain_rtx, vcall_offset);
4386 if (!m68k_legitimate_address_p (Pmode, addr, true))
4387 {
4388 emit_insn (gen_rtx_SET (VOIDmode, static_chain_rtx, addr));
4389 addr = static_chain_rtx;
4390 }
c590b625 4391
4ab870f5
RS
4392 /* Load the offset into %d0 and add it to THIS. */
4393 emit_move_insn (gen_rtx_REG (Pmode, D0_REG),
4394 gen_rtx_MEM (Pmode, addr));
4395 emit_insn (gen_add3_insn (copy_rtx (this_slot),
4396 copy_rtx (this_slot),
4397 gen_rtx_REG (Pmode, D0_REG)));
4398 }
29ca003a 4399
4ab870f5
RS
4400 /* Jump to the target function. Use a sibcall if direct jumps are
4401 allowed, otherwise load the address into a register first. */
4402 mem = DECL_RTL (function);
4403 if (!sibcall_operand (XEXP (mem, 0), VOIDmode))
4404 {
4405 gcc_assert (flag_pic);
c590b625 4406
4ab870f5
RS
4407 if (!TARGET_SEP_DATA)
4408 {
4409 /* Use the static chain register as a temporary (call-clobbered)
4410 GOT pointer for this function. We can use the static chain
4411 register because it isn't live on entry to the thunk. */
6fb5fa3c 4412 SET_REGNO (pic_offset_table_rtx, STATIC_CHAIN_REGNUM);
4ab870f5
RS
4413 emit_insn (gen_load_got (pic_offset_table_rtx));
4414 }
4415 legitimize_pic_address (XEXP (mem, 0), Pmode, static_chain_rtx);
4416 mem = replace_equiv_address (mem, static_chain_rtx);
4417 }
4418 insn = emit_call_insn (gen_sibcall (mem, const0_rtx));
4419 SIBLING_CALL_P (insn) = 1;
4420
4421 /* Run just enough of rest_of_compilation. */
4422 insn = get_insns ();
4423 split_all_insns_noflow ();
4424 final_start_function (insn, file, 1);
4425 final (insn, file, 1);
4426 final_end_function ();
4427
4428 /* Clean up the vars set above. */
4429 reload_completed = 0;
4ab870f5
RS
4430
4431 /* Restore the original PIC register. */
4432 if (flag_pic)
6fb5fa3c 4433 SET_REGNO (pic_offset_table_rtx, PIC_REG);
6b0c2336 4434 free_after_compilation (cfun);
483ab821 4435}
8636be86
KH
4436
4437/* Worker function for TARGET_STRUCT_VALUE_RTX. */
4438
4439static rtx
4440m68k_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
4441 int incoming ATTRIBUTE_UNUSED)
4442{
4443 return gen_rtx_REG (Pmode, M68K_STRUCT_VALUE_REGNUM);
4444}
cfca21cb
PB
4445
4446/* Return nonzero if register old_reg can be renamed to register new_reg. */
4447int
4448m68k_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
4449 unsigned int new_reg)
4450{
4451
4452 /* Interrupt functions can only use registers that have already been
4453 saved by the prologue, even if they would normally be
4454 call-clobbered. */
4455
a4242737
KH
4456 if ((m68k_get_function_kind (current_function_decl)
4457 == m68k_fk_interrupt_handler)
6fb5fa3c 4458 && !df_regs_ever_live_p (new_reg))
cfca21cb
PB
4459 return 0;
4460
4461 return 1;
4462}
70028b61 4463
ffa2596e
RS
4464/* Value is true if hard register REGNO can hold a value of machine-mode
4465 MODE. On the 68000, we let the cpu registers can hold any mode, but
4466 restrict the 68881 registers to floating-point modes. */
4467
70028b61
PB
4468bool
4469m68k_regno_mode_ok (int regno, enum machine_mode mode)
4470{
36e04090 4471 if (DATA_REGNO_P (regno))
70028b61 4472 {
a0a7fbc9
AS
4473 /* Data Registers, can hold aggregate if fits in. */
4474 if (regno + GET_MODE_SIZE (mode) / 4 <= 8)
4475 return true;
70028b61 4476 }
36e04090 4477 else if (ADDRESS_REGNO_P (regno))
70028b61 4478 {
a0a7fbc9
AS
4479 if (regno + GET_MODE_SIZE (mode) / 4 <= 16)
4480 return true;
70028b61 4481 }
36e04090 4482 else if (FP_REGNO_P (regno))
70028b61
PB
4483 {
4484 /* FPU registers, hold float or complex float of long double or
a0a7fbc9
AS
4485 smaller. */
4486 if ((GET_MODE_CLASS (mode) == MODE_FLOAT
4487 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
dcc21c4c 4488 && GET_MODE_UNIT_SIZE (mode) <= TARGET_FP_REG_SIZE)
a0a7fbc9 4489 return true;
70028b61
PB
4490 }
4491 return false;
4492}
dcc21c4c 4493
ffa2596e
RS
4494/* Implement SECONDARY_RELOAD_CLASS. */
4495
4496enum reg_class
4497m68k_secondary_reload_class (enum reg_class rclass,
4498 enum machine_mode mode, rtx x)
4499{
4500 int regno;
4501
4502 regno = true_regnum (x);
4503
4504 /* If one operand of a movqi is an address register, the other
4505 operand must be a general register or constant. Other types
4506 of operand must be reloaded through a data register. */
4507 if (GET_MODE_SIZE (mode) == 1
4508 && reg_classes_intersect_p (rclass, ADDR_REGS)
4509 && !(INT_REGNO_P (regno) || CONSTANT_P (x)))
4510 return DATA_REGS;
4511
4512 /* PC-relative addresses must be loaded into an address register first. */
4513 if (TARGET_PCREL
4514 && !reg_class_subset_p (rclass, ADDR_REGS)
4515 && symbolic_operand (x, VOIDmode))
4516 return ADDR_REGS;
4517
4518 return NO_REGS;
4519}
4520
4521/* Implement PREFERRED_RELOAD_CLASS. */
4522
4523enum reg_class
4524m68k_preferred_reload_class (rtx x, enum reg_class rclass)
4525{
4526 enum reg_class secondary_class;
4527
4528 /* If RCLASS might need a secondary reload, try restricting it to
4529 a class that doesn't. */
4530 secondary_class = m68k_secondary_reload_class (rclass, GET_MODE (x), x);
4531 if (secondary_class != NO_REGS
4532 && reg_class_subset_p (secondary_class, rclass))
4533 return secondary_class;
4534
4535 /* Prefer to use moveq for in-range constants. */
4536 if (GET_CODE (x) == CONST_INT
4537 && reg_class_subset_p (DATA_REGS, rclass)
4538 && IN_RANGE (INTVAL (x), -0x80, 0x7f))
4539 return DATA_REGS;
4540
4541 /* ??? Do we really need this now? */
4542 if (GET_CODE (x) == CONST_DOUBLE
4543 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
4544 {
4545 if (TARGET_HARD_FLOAT && reg_class_subset_p (FP_REGS, rclass))
4546 return FP_REGS;
4547
4548 return NO_REGS;
4549 }
4550
4551 return rclass;
4552}
4553
dcc21c4c
PB
4554/* Return floating point values in a 68881 register. This makes 68881 code
4555 a little bit faster. It also makes -msoft-float code incompatible with
4556 hard-float code, so people have to be careful not to mix the two.
c0220ea4 4557 For ColdFire it was decided the ABI incompatibility is undesirable.
dcc21c4c
PB
4558 If there is need for a hard-float ABI it is probably worth doing it
4559 properly and also passing function arguments in FP registers. */
4560rtx
4561m68k_libcall_value (enum machine_mode mode)
4562{
4563 switch (mode) {
4564 case SFmode:
4565 case DFmode:
4566 case XFmode:
4567 if (TARGET_68881)
8d989403 4568 return gen_rtx_REG (mode, FP0_REG);
dcc21c4c
PB
4569 break;
4570 default:
4571 break;
4572 }
8d989403 4573 return gen_rtx_REG (mode, D0_REG);
dcc21c4c
PB
4574}
4575
4576rtx
586de218 4577m68k_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
dcc21c4c
PB
4578{
4579 enum machine_mode mode;
4580
4581 mode = TYPE_MODE (valtype);
4582 switch (mode) {
4583 case SFmode:
4584 case DFmode:
4585 case XFmode:
4586 if (TARGET_68881)
8d989403 4587 return gen_rtx_REG (mode, FP0_REG);
dcc21c4c
PB
4588 break;
4589 default:
4590 break;
4591 }
4592
576c9028
KH
4593 /* If the function returns a pointer, push that into %a0. */
4594 if (func && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (func))))
4595 /* For compatibility with the large body of existing code which
4596 does not always properly declare external functions returning
4597 pointer types, the m68k/SVR4 convention is to copy the value
4598 returned for pointer functions from a0 to d0 in the function
4599 epilogue, so that callers that have neglected to properly
4600 declare the callee can still find the correct return value in
4601 d0. */
4602 return gen_rtx_PARALLEL
4603 (mode,
4604 gen_rtvec (2,
4605 gen_rtx_EXPR_LIST (VOIDmode,
4606 gen_rtx_REG (mode, A0_REG),
4607 const0_rtx),
4608 gen_rtx_EXPR_LIST (VOIDmode,
4609 gen_rtx_REG (mode, D0_REG),
4610 const0_rtx)));
4611 else if (POINTER_TYPE_P (valtype))
4612 return gen_rtx_REG (mode, A0_REG);
dcc21c4c 4613 else
576c9028 4614 return gen_rtx_REG (mode, D0_REG);
dcc21c4c 4615}
1c445f03
NS
4616
4617/* Worker function for TARGET_RETURN_IN_MEMORY. */
4618#if M68K_HONOR_TARGET_STRICT_ALIGNMENT
4619static bool
4620m68k_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
4621{
4622 enum machine_mode mode = TYPE_MODE (type);
4623
4624 if (mode == BLKmode)
4625 return true;
4626
4627 /* If TYPE's known alignment is less than the alignment of MODE that
4628 would contain the structure, then return in memory. We need to
4629 do so to maintain the compatibility between code compiled with
4630 -mstrict-align and that compiled with -mno-strict-align. */
4631 if (AGGREGATE_TYPE_P (type)
4632 && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (mode))
4633 return true;
4634
4635 return false;
4636}
4637#endif
c47b0cb4
MK
4638
4639/* CPU to schedule the program for. */
4640enum attr_cpu m68k_sched_cpu;
4641
826fadba
MK
4642/* MAC to schedule the program for. */
4643enum attr_mac m68k_sched_mac;
4644
c47b0cb4
MK
4645/* Operand type. */
4646enum attr_op_type
4647 {
4648 /* No operand. */
4649 OP_TYPE_NONE,
4650
96fcacb7
MK
4651 /* Integer register. */
4652 OP_TYPE_RN,
4653
4654 /* FP register. */
4655 OP_TYPE_FPN,
c47b0cb4
MK
4656
4657 /* Implicit mem reference (e.g. stack). */
4658 OP_TYPE_MEM1,
4659
4660 /* Memory without offset or indexing. EA modes 2, 3 and 4. */
4661 OP_TYPE_MEM234,
4662
4663 /* Memory with offset but without indexing. EA mode 5. */
4664 OP_TYPE_MEM5,
4665
4666 /* Memory with indexing. EA mode 6. */
4667 OP_TYPE_MEM6,
4668
4669 /* Memory referenced by absolute address. EA mode 7. */
4670 OP_TYPE_MEM7,
4671
4672 /* Immediate operand that doesn't require extension word. */
4673 OP_TYPE_IMM_Q,
4674
4675 /* Immediate 16 bit operand. */
4676 OP_TYPE_IMM_W,
4677
4678 /* Immediate 32 bit operand. */
4679 OP_TYPE_IMM_L
4680 };
4681
c47b0cb4
MK
4682/* Return type of memory ADDR_RTX refers to. */
4683static enum attr_op_type
4684sched_address_type (enum machine_mode mode, rtx addr_rtx)
4685{
4686 struct m68k_address address;
4687
96fcacb7
MK
4688 if (symbolic_operand (addr_rtx, VOIDmode))
4689 return OP_TYPE_MEM7;
4690
c47b0cb4
MK
4691 if (!m68k_decompose_address (mode, addr_rtx,
4692 reload_completed, &address))
4693 {
96fcacb7 4694 gcc_assert (!reload_completed);
c47b0cb4
MK
4695 /* Reload will likely fix the address to be in the register. */
4696 return OP_TYPE_MEM234;
4697 }
4698
4699 if (address.scale != 0)
4700 return OP_TYPE_MEM6;
4701
4702 if (address.base != NULL_RTX)
4703 {
4704 if (address.offset == NULL_RTX)
4705 return OP_TYPE_MEM234;
4706
4707 return OP_TYPE_MEM5;
4708 }
4709
4710 gcc_assert (address.offset != NULL_RTX);
4711
4712 return OP_TYPE_MEM7;
4713}
4714
96fcacb7
MK
4715/* Return X or Y (depending on OPX_P) operand of INSN. */
4716static rtx
4717sched_get_operand (rtx insn, bool opx_p)
4718{
4719 int i;
4720
4721 if (recog_memoized (insn) < 0)
4722 gcc_unreachable ();
4723
4724 extract_constrain_insn_cached (insn);
4725
4726 if (opx_p)
4727 i = get_attr_opx (insn);
4728 else
4729 i = get_attr_opy (insn);
4730
4731 if (i >= recog_data.n_operands)
4732 return NULL;
4733
4734 return recog_data.operand[i];
4735}
4736
4737/* Return type of INSN's operand X (if OPX_P) or operand Y (if !OPX_P).
4738 If ADDRESS_P is true, return type of memory location operand refers to. */
c47b0cb4 4739static enum attr_op_type
96fcacb7 4740sched_attr_op_type (rtx insn, bool opx_p, bool address_p)
c47b0cb4 4741{
96fcacb7
MK
4742 rtx op;
4743
4744 op = sched_get_operand (insn, opx_p);
4745
4746 if (op == NULL)
4747 {
4748 gcc_assert (!reload_completed);
4749 return OP_TYPE_RN;
4750 }
c47b0cb4
MK
4751
4752 if (address_p)
4753 return sched_address_type (QImode, op);
4754
4755 if (memory_operand (op, VOIDmode))
4756 return sched_address_type (GET_MODE (op), XEXP (op, 0));
4757
4758 if (register_operand (op, VOIDmode))
96fcacb7
MK
4759 {
4760 if ((!reload_completed && FLOAT_MODE_P (GET_MODE (op)))
4761 || (reload_completed && FP_REG_P (op)))
4762 return OP_TYPE_FPN;
4763
4764 return OP_TYPE_RN;
4765 }
c47b0cb4
MK
4766
4767 if (GET_CODE (op) == CONST_INT)
4768 {
96fcacb7
MK
4769 int ival;
4770
4771 ival = INTVAL (op);
4772
4773 /* Check for quick constants. */
4774 switch (get_attr_type (insn))
4775 {
4776 case TYPE_ALUQ_L:
4777 if (IN_RANGE (ival, 1, 8) || IN_RANGE (ival, -8, -1))
4778 return OP_TYPE_IMM_Q;
4779
4780 gcc_assert (!reload_completed);
4781 break;
4782
4783 case TYPE_MOVEQ_L:
4784 if (USE_MOVQ (ival))
4785 return OP_TYPE_IMM_Q;
4786
4787 gcc_assert (!reload_completed);
4788 break;
4789
4790 case TYPE_MOV3Q_L:
4791 if (valid_mov3q_const (ival))
4792 return OP_TYPE_IMM_Q;
4793
4794 gcc_assert (!reload_completed);
4795 break;
4796
4797 default:
4798 break;
4799 }
4800
4801 if (IN_RANGE (ival, -0x8000, 0x7fff))
c47b0cb4
MK
4802 return OP_TYPE_IMM_W;
4803
4804 return OP_TYPE_IMM_L;
4805 }
4806
4807 if (GET_CODE (op) == CONST_DOUBLE)
4808 {
4809 switch (GET_MODE (op))
4810 {
4811 case SFmode:
4812 return OP_TYPE_IMM_W;
4813
4814 case VOIDmode:
4815 case DFmode:
4816 return OP_TYPE_IMM_L;
4817
4818 default:
4819 gcc_unreachable ();
4820 }
4821 }
4822
4823 if (symbolic_operand (op, VOIDmode)
4824 || LABEL_P (op))
4825 {
4826 switch (GET_MODE (op))
4827 {
4828 case QImode:
4829 return OP_TYPE_IMM_Q;
4830
4831 case HImode:
4832 return OP_TYPE_IMM_W;
4833
4834 case SImode:
4835 return OP_TYPE_IMM_L;
4836
4837 default:
4838 if (GET_CODE (op) == SYMBOL_REF)
4839 /* ??? Just a guess. Probably we can guess better using length
4840 attribute of the instructions. */
4841 return OP_TYPE_IMM_W;
4842
4843 return OP_TYPE_IMM_L;
4844 }
4845 }
4846
96fcacb7 4847 gcc_assert (!reload_completed);
c47b0cb4 4848
96fcacb7
MK
4849 if (FLOAT_MODE_P (GET_MODE (op)))
4850 return OP_TYPE_FPN;
c47b0cb4 4851
96fcacb7 4852 return OP_TYPE_RN;
c47b0cb4
MK
4853}
4854
4855/* Implement opx_type attribute.
4856 Return type of INSN's operand X.
4857 If ADDRESS_P is true, return type of memory location operand refers to. */
4858enum attr_opx_type
4859m68k_sched_attr_opx_type (rtx insn, int address_p)
4860{
c47b0cb4
MK
4861 switch (sched_attr_op_type (insn, true, address_p != 0))
4862 {
96fcacb7
MK
4863 case OP_TYPE_RN:
4864 return OPX_TYPE_RN;
4865
4866 case OP_TYPE_FPN:
4867 return OPX_TYPE_FPN;
c47b0cb4
MK
4868
4869 case OP_TYPE_MEM1:
4870 return OPX_TYPE_MEM1;
4871
4872 case OP_TYPE_MEM234:
4873 return OPX_TYPE_MEM234;
4874
4875 case OP_TYPE_MEM5:
4876 return OPX_TYPE_MEM5;
4877
4878 case OP_TYPE_MEM6:
4879 return OPX_TYPE_MEM6;
4880
4881 case OP_TYPE_MEM7:
4882 return OPX_TYPE_MEM7;
4883
4884 case OP_TYPE_IMM_Q:
4885 return OPX_TYPE_IMM_Q;
4886
4887 case OP_TYPE_IMM_W:
4888 return OPX_TYPE_IMM_W;
4889
4890 case OP_TYPE_IMM_L:
4891 return OPX_TYPE_IMM_L;
4892
4893 default:
4894 gcc_unreachable ();
4895 return 0;
4896 }
4897}
4898
4899/* Implement opy_type attribute.
4900 Return type of INSN's operand Y.
4901 If ADDRESS_P is true, return type of memory location operand refers to. */
4902enum attr_opy_type
4903m68k_sched_attr_opy_type (rtx insn, int address_p)
4904{
c47b0cb4
MK
4905 switch (sched_attr_op_type (insn, false, address_p != 0))
4906 {
96fcacb7
MK
4907 case OP_TYPE_RN:
4908 return OPY_TYPE_RN;
4909
4910 case OP_TYPE_FPN:
4911 return OPY_TYPE_FPN;
c47b0cb4
MK
4912
4913 case OP_TYPE_MEM1:
4914 return OPY_TYPE_MEM1;
4915
4916 case OP_TYPE_MEM234:
4917 return OPY_TYPE_MEM234;
4918
4919 case OP_TYPE_MEM5:
4920 return OPY_TYPE_MEM5;
4921
4922 case OP_TYPE_MEM6:
4923 return OPY_TYPE_MEM6;
4924
4925 case OP_TYPE_MEM7:
4926 return OPY_TYPE_MEM7;
4927
4928 case OP_TYPE_IMM_Q:
4929 return OPY_TYPE_IMM_Q;
4930
4931 case OP_TYPE_IMM_W:
4932 return OPY_TYPE_IMM_W;
4933
4934 case OP_TYPE_IMM_L:
4935 return OPY_TYPE_IMM_L;
4936
4937 default:
4938 gcc_unreachable ();
4939 return 0;
4940 }
4941}
4942
96fcacb7
MK
4943/* Return size of INSN as int. */
4944static int
4945sched_get_attr_size_int (rtx insn)
c47b0cb4
MK
4946{
4947 int size;
4948
96fcacb7 4949 switch (get_attr_type (insn))
c47b0cb4 4950 {
96fcacb7
MK
4951 case TYPE_IGNORE:
4952 /* There should be no references to m68k_sched_attr_size for 'ignore'
4953 instructions. */
4954 gcc_unreachable ();
4955 return 0;
4956
4957 case TYPE_MUL_L:
c47b0cb4
MK
4958 size = 2;
4959 break;
4960
4961 default:
4962 size = 1;
4963 break;
4964 }
4965
4966 switch (get_attr_opx_type (insn))
4967 {
4968 case OPX_TYPE_NONE:
96fcacb7
MK
4969 case OPX_TYPE_RN:
4970 case OPX_TYPE_FPN:
c47b0cb4
MK
4971 case OPX_TYPE_MEM1:
4972 case OPX_TYPE_MEM234:
4973 case OPY_TYPE_IMM_Q:
4974 break;
4975
4976 case OPX_TYPE_MEM5:
4977 case OPX_TYPE_MEM6:
4978 /* Here we assume that most absolute references are short. */
4979 case OPX_TYPE_MEM7:
4980 case OPY_TYPE_IMM_W:
4981 ++size;
4982 break;
4983
4984 case OPY_TYPE_IMM_L:
4985 size += 2;
4986 break;
4987
4988 default:
4989 gcc_unreachable ();
4990 }
4991
4992 switch (get_attr_opy_type (insn))
4993 {
4994 case OPY_TYPE_NONE:
96fcacb7
MK
4995 case OPY_TYPE_RN:
4996 case OPY_TYPE_FPN:
c47b0cb4
MK
4997 case OPY_TYPE_MEM1:
4998 case OPY_TYPE_MEM234:
4999 case OPY_TYPE_IMM_Q:
5000 break;
5001
5002 case OPY_TYPE_MEM5:
5003 case OPY_TYPE_MEM6:
5004 /* Here we assume that most absolute references are short. */
5005 case OPY_TYPE_MEM7:
5006 case OPY_TYPE_IMM_W:
5007 ++size;
5008 break;
5009
5010 case OPY_TYPE_IMM_L:
5011 size += 2;
5012 break;
5013
5014 default:
5015 gcc_unreachable ();
5016 }
5017
5018 if (size > 3)
5019 {
96fcacb7 5020 gcc_assert (!reload_completed);
c47b0cb4
MK
5021
5022 size = 3;
5023 }
5024
5025 return size;
5026}
5027
96fcacb7
MK
5028/* Return size of INSN as attribute enum value. */
5029enum attr_size
5030m68k_sched_attr_size (rtx insn)
5031{
5032 switch (sched_get_attr_size_int (insn))
5033 {
5034 case 1:
5035 return SIZE_1;
5036
5037 case 2:
5038 return SIZE_2;
5039
5040 case 3:
5041 return SIZE_3;
5042
5043 default:
5044 gcc_unreachable ();
5045 return 0;
5046 }
5047}
5048
5049/* Return operand X or Y (depending on OPX_P) of INSN,
5050 if it is a MEM, or NULL overwise. */
5051static enum attr_op_type
5052sched_get_opxy_mem_type (rtx insn, bool opx_p)
5053{
5054 if (opx_p)
5055 {
5056 switch (get_attr_opx_type (insn))
5057 {
5058 case OPX_TYPE_NONE:
5059 case OPX_TYPE_RN:
5060 case OPX_TYPE_FPN:
5061 case OPX_TYPE_IMM_Q:
5062 case OPX_TYPE_IMM_W:
5063 case OPX_TYPE_IMM_L:
5064 return OP_TYPE_RN;
5065
5066 case OPX_TYPE_MEM1:
5067 case OPX_TYPE_MEM234:
5068 case OPX_TYPE_MEM5:
5069 case OPX_TYPE_MEM7:
5070 return OP_TYPE_MEM1;
5071
5072 case OPX_TYPE_MEM6:
5073 return OP_TYPE_MEM6;
5074
5075 default:
5076 gcc_unreachable ();
5077 return 0;
5078 }
5079 }
5080 else
5081 {
5082 switch (get_attr_opy_type (insn))
5083 {
5084 case OPY_TYPE_NONE:
5085 case OPY_TYPE_RN:
5086 case OPY_TYPE_FPN:
5087 case OPY_TYPE_IMM_Q:
5088 case OPY_TYPE_IMM_W:
5089 case OPY_TYPE_IMM_L:
5090 return OP_TYPE_RN;
5091
5092 case OPY_TYPE_MEM1:
5093 case OPY_TYPE_MEM234:
5094 case OPY_TYPE_MEM5:
5095 case OPY_TYPE_MEM7:
5096 return OP_TYPE_MEM1;
5097
5098 case OPY_TYPE_MEM6:
5099 return OP_TYPE_MEM6;
5100
5101 default:
5102 gcc_unreachable ();
5103 return 0;
5104 }
5105 }
5106}
5107
c47b0cb4
MK
5108/* Implement op_mem attribute. */
5109enum attr_op_mem
5110m68k_sched_attr_op_mem (rtx insn)
5111{
96fcacb7
MK
5112 enum attr_op_type opx;
5113 enum attr_op_type opy;
c47b0cb4 5114
96fcacb7
MK
5115 opx = sched_get_opxy_mem_type (insn, true);
5116 opy = sched_get_opxy_mem_type (insn, false);
c47b0cb4 5117
96fcacb7 5118 if (opy == OP_TYPE_RN && opx == OP_TYPE_RN)
c47b0cb4
MK
5119 return OP_MEM_00;
5120
96fcacb7 5121 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM1)
c47b0cb4
MK
5122 {
5123 switch (get_attr_opx_access (insn))
5124 {
5125 case OPX_ACCESS_R:
5126 return OP_MEM_10;
5127
5128 case OPX_ACCESS_W:
5129 return OP_MEM_01;
5130
5131 case OPX_ACCESS_RW:
5132 return OP_MEM_11;
5133
5134 default:
96fcacb7
MK
5135 gcc_unreachable ();
5136 return 0;
c47b0cb4
MK
5137 }
5138 }
5139
96fcacb7 5140 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM6)
c47b0cb4
MK
5141 {
5142 switch (get_attr_opx_access (insn))
5143 {
5144 case OPX_ACCESS_R:
5145 return OP_MEM_I0;
5146
5147 case OPX_ACCESS_W:
5148 return OP_MEM_0I;
5149
5150 case OPX_ACCESS_RW:
5151 return OP_MEM_I1;
5152
5153 default:
96fcacb7
MK
5154 gcc_unreachable ();
5155 return 0;
c47b0cb4
MK
5156 }
5157 }
5158
96fcacb7 5159 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_RN)
c47b0cb4
MK
5160 return OP_MEM_10;
5161
96fcacb7 5162 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM1)
c47b0cb4
MK
5163 {
5164 switch (get_attr_opx_access (insn))
5165 {
5166 case OPX_ACCESS_W:
5167 return OP_MEM_11;
5168
5169 default:
96fcacb7
MK
5170 gcc_assert (!reload_completed);
5171 return OP_MEM_11;
c47b0cb4
MK
5172 }
5173 }
5174
96fcacb7 5175 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM6)
c47b0cb4
MK
5176 {
5177 switch (get_attr_opx_access (insn))
5178 {
5179 case OPX_ACCESS_W:
5180 return OP_MEM_1I;
5181
5182 default:
96fcacb7
MK
5183 gcc_assert (!reload_completed);
5184 return OP_MEM_1I;
c47b0cb4
MK
5185 }
5186 }
5187
96fcacb7 5188 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_RN)
c47b0cb4
MK
5189 return OP_MEM_I0;
5190
96fcacb7 5191 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM1)
c47b0cb4
MK
5192 {
5193 switch (get_attr_opx_access (insn))
5194 {
5195 case OPX_ACCESS_W:
5196 return OP_MEM_I1;
5197
5198 default:
96fcacb7
MK
5199 gcc_assert (!reload_completed);
5200 return OP_MEM_I1;
c47b0cb4
MK
5201 }
5202 }
5203
96fcacb7
MK
5204 gcc_assert (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM6);
5205 gcc_assert (!reload_completed);
5206 return OP_MEM_I1;
c47b0cb4
MK
5207}
5208
5209/* Jump instructions types. Indexed by INSN_UID.
5210 The same rtl insn can be expanded into different asm instructions
5211 depending on the cc0_status. To properly determine type of jump
5212 instructions we scan instruction stream and map jumps types to this
5213 array. */
5214static enum attr_type *sched_branch_type;
5215
5216/* Return the type of the jump insn. */
5217enum attr_type
5218m68k_sched_branch_type (rtx insn)
5219{
5220 enum attr_type type;
5221
5222 type = sched_branch_type[INSN_UID (insn)];
5223
5224 gcc_assert (type != 0);
5225
5226 return type;
5227}
b8c96320 5228
96fcacb7
MK
5229/* Data for ColdFire V4 index bypass.
5230 Producer modifies register that is used as index in consumer with
5231 specified scale. */
5232static struct
b8c96320 5233{
96fcacb7
MK
5234 /* Producer instruction. */
5235 rtx pro;
826fadba 5236
96fcacb7
MK
5237 /* Consumer instruction. */
5238 rtx con;
b8c96320 5239
96fcacb7
MK
5240 /* Scale of indexed memory access within consumer.
5241 Or zero if bypass should not be effective at the moment. */
5242 int scale;
5243} sched_cfv4_bypass_data;
b8c96320
MK
5244
5245/* An empty state that is used in m68k_sched_adjust_cost. */
5246static state_t sched_adjust_cost_state;
5247
5248/* Implement adjust_cost scheduler hook.
5249 Return adjusted COST of dependency LINK between DEF_INSN and INSN. */
5250static int
5251m68k_sched_adjust_cost (rtx insn, rtx link ATTRIBUTE_UNUSED, rtx def_insn,
5252 int cost)
5253{
5254 int delay;
5255
5256 if (recog_memoized (def_insn) < 0
5257 || recog_memoized (insn) < 0)
5258 return cost;
5259
96fcacb7
MK
5260 if (sched_cfv4_bypass_data.scale == 1)
5261 /* Handle ColdFire V4 bypass for indexed address with 1x scale. */
5262 {
5263 /* haifa-sched.c: insn_cost () calls bypass_p () just before
5264 targetm.sched.adjust_cost (). Hence, we can be relatively sure
5265 that the data in sched_cfv4_bypass_data is up to date. */
5266 gcc_assert (sched_cfv4_bypass_data.pro == def_insn
5267 && sched_cfv4_bypass_data.con == insn);
5268
5269 if (cost < 3)
5270 cost = 3;
5271
5272 sched_cfv4_bypass_data.pro = NULL;
5273 sched_cfv4_bypass_data.con = NULL;
5274 sched_cfv4_bypass_data.scale = 0;
5275 }
5276 else
5277 gcc_assert (sched_cfv4_bypass_data.pro == NULL
5278 && sched_cfv4_bypass_data.con == NULL
5279 && sched_cfv4_bypass_data.scale == 0);
5280
b8c96320
MK
5281 /* Don't try to issue INSN earlier than DFA permits.
5282 This is especially useful for instructions that write to memory,
5283 as their true dependence (default) latency is better to be set to 0
5284 to workaround alias analysis limitations.
5285 This is, in fact, a machine independent tweak, so, probably,
5286 it should be moved to haifa-sched.c: insn_cost (). */
b8c96320
MK
5287 delay = min_insn_conflict_delay (sched_adjust_cost_state, def_insn, insn);
5288 if (delay > cost)
5289 cost = delay;
5290
5291 return cost;
5292}
5293
96fcacb7
MK
5294/* Return maximal number of insns that can be scheduled on a single cycle. */
5295static int
5296m68k_sched_issue_rate (void)
5297{
5298 switch (m68k_sched_cpu)
5299 {
5300 case CPU_CFV1:
5301 case CPU_CFV2:
5302 case CPU_CFV3:
5303 return 1;
5304
5305 case CPU_CFV4:
5306 return 2;
5307
5308 default:
5309 gcc_unreachable ();
5310 return 0;
5311 }
5312}
5313
826fadba
MK
5314/* Maximal length of instruction for current CPU.
5315 E.g. it is 3 for any ColdFire core. */
5316static int max_insn_size;
5317
5318/* Data to model instruction buffer of CPU. */
5319struct _sched_ib
5320{
96fcacb7
MK
5321 /* True if instruction buffer model is modeled for current CPU. */
5322 bool enabled_p;
5323
826fadba
MK
5324 /* Size of the instruction buffer in words. */
5325 int size;
5326
5327 /* Number of filled words in the instruction buffer. */
5328 int filled;
5329
5330 /* Additional information about instruction buffer for CPUs that have
5331 a buffer of instruction records, rather then a plain buffer
5332 of instruction words. */
5333 struct _sched_ib_records
5334 {
5335 /* Size of buffer in records. */
5336 int n_insns;
b8c96320 5337
826fadba
MK
5338 /* Array to hold data on adjustements made to the size of the buffer. */
5339 int *adjust;
b8c96320 5340
826fadba
MK
5341 /* Index of the above array. */
5342 int adjust_index;
5343 } records;
5344
5345 /* An insn that reserves (marks empty) one word in the instruction buffer. */
5346 rtx insn;
5347};
5348
5349static struct _sched_ib sched_ib;
b8c96320
MK
5350
5351/* ID of memory unit. */
5352static int sched_mem_unit_code;
5353
5354/* Implementation of the targetm.sched.variable_issue () hook.
5355 It is called after INSN was issued. It returns the number of insns
5356 that can possibly get scheduled on the current cycle.
5357 It is used here to determine the effect of INSN on the instruction
5358 buffer. */
5359static int
5360m68k_sched_variable_issue (FILE *sched_dump ATTRIBUTE_UNUSED,
5361 int sched_verbose ATTRIBUTE_UNUSED,
5362 rtx insn, int can_issue_more)
5363{
5364 int insn_size;
5365
96fcacb7 5366 if (recog_memoized (insn) >= 0 && get_attr_type (insn) != TYPE_IGNORE)
b8c96320 5367 {
826fadba
MK
5368 switch (m68k_sched_cpu)
5369 {
5370 case CPU_CFV1:
5371 case CPU_CFV2:
96fcacb7 5372 insn_size = sched_get_attr_size_int (insn);
826fadba
MK
5373 break;
5374
5375 case CPU_CFV3:
96fcacb7 5376 insn_size = sched_get_attr_size_int (insn);
826fadba
MK
5377
5378 /* ColdFire V3 and V4 cores have instruction buffers that can
5379 accumulate up to 8 instructions regardless of instructions'
5380 sizes. So we should take care not to "prefetch" 24 one-word
5381 or 12 two-words instructions.
5382 To model this behavior we temporarily decrease size of the
5383 buffer by (max_insn_size - insn_size) for next 7 instructions. */
5384 {
5385 int adjust;
5386
5387 adjust = max_insn_size - insn_size;
5388 sched_ib.size -= adjust;
5389
5390 if (sched_ib.filled > sched_ib.size)
5391 sched_ib.filled = sched_ib.size;
5392
5393 sched_ib.records.adjust[sched_ib.records.adjust_index] = adjust;
5394 }
5395
5396 ++sched_ib.records.adjust_index;
5397 if (sched_ib.records.adjust_index == sched_ib.records.n_insns)
5398 sched_ib.records.adjust_index = 0;
5399
5400 /* Undo adjustement we did 7 instructions ago. */
5401 sched_ib.size
5402 += sched_ib.records.adjust[sched_ib.records.adjust_index];
5403
5404 break;
b8c96320 5405
96fcacb7
MK
5406 case CPU_CFV4:
5407 gcc_assert (!sched_ib.enabled_p);
5408 insn_size = 0;
5409 break;
5410
826fadba
MK
5411 default:
5412 gcc_unreachable ();
5413 }
b8c96320 5414
826fadba 5415 gcc_assert (insn_size <= sched_ib.filled);
b8c96320
MK
5416 --can_issue_more;
5417 }
5418 else if (GET_CODE (PATTERN (insn)) == ASM_INPUT
5419 || asm_noperands (PATTERN (insn)) >= 0)
826fadba 5420 insn_size = sched_ib.filled;
b8c96320
MK
5421 else
5422 insn_size = 0;
5423
826fadba 5424 sched_ib.filled -= insn_size;
b8c96320
MK
5425
5426 return can_issue_more;
5427}
5428
96fcacb7
MK
5429/* Return how many instructions should scheduler lookahead to choose the
5430 best one. */
5431static int
5432m68k_sched_first_cycle_multipass_dfa_lookahead (void)
b8c96320 5433{
96fcacb7 5434 return m68k_sched_issue_rate () - 1;
b8c96320
MK
5435}
5436
5437/* Implementation of targetm.sched.md_init_global () hook.
5438 It is invoked once per scheduling pass and is used here
5439 to initialize scheduler constants. */
5440static void
5441m68k_sched_md_init_global (FILE *sched_dump ATTRIBUTE_UNUSED,
5442 int sched_verbose ATTRIBUTE_UNUSED,
5443 int n_insns ATTRIBUTE_UNUSED)
5444{
5445 /* Init branch types. */
5446 {
5447 rtx insn;
5448
5449 sched_branch_type = xcalloc (get_max_uid () + 1,
5450 sizeof (*sched_branch_type));
5451
5452 for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
5453 {
5454 if (JUMP_P (insn))
5455 /* !!! FIXME: Implement real scan here. */
5456 sched_branch_type[INSN_UID (insn)] = TYPE_BCC;
5457 }
5458 }
5459
96fcacb7
MK
5460#ifdef ENABLE_CHECKING
5461 /* Check that all instructions have DFA reservations and
5462 that all instructions can be issued from a clean state. */
5463 {
5464 rtx insn;
5465 state_t state;
b8c96320 5466
96fcacb7 5467 state = alloca (state_size ());
b8c96320 5468
96fcacb7
MK
5469 for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
5470 {
5471 if (INSN_P (insn) && recog_memoized (insn) >= 0)
5472 {
5473 gcc_assert (insn_has_dfa_reservation_p (insn));
b8c96320 5474
96fcacb7
MK
5475 state_reset (state);
5476 if (state_transition (state, insn) >= 0)
5477 gcc_unreachable ();
5478 }
5479 }
5480 }
5481#endif
b8c96320
MK
5482
5483 /* Setup target cpu. */
96fcacb7
MK
5484
5485 /* ColdFire V4 has a set of features to keep its instruction buffer full
5486 (e.g., a separate memory bus for instructions) and, hence, we do not model
5487 buffer for this CPU. */
5488 sched_ib.enabled_p = (m68k_sched_cpu != CPU_CFV4);
5489
b8c96320
MK
5490 switch (m68k_sched_cpu)
5491 {
96fcacb7
MK
5492 case CPU_CFV4:
5493 sched_ib.filled = 0;
5494
5495 /* FALLTHRU */
5496
826fadba
MK
5497 case CPU_CFV1:
5498 case CPU_CFV2:
5499 max_insn_size = 3;
5500 sched_ib.records.n_insns = 0;
5501 sched_ib.records.adjust = NULL;
5502 break;
5503
5504 case CPU_CFV3:
5505 max_insn_size = 3;
5506 sched_ib.records.n_insns = 8;
5507 sched_ib.records.adjust = xmalloc (sched_ib.records.n_insns
5508 * sizeof (*sched_ib.records.adjust));
b8c96320
MK
5509 break;
5510
5511 default:
5512 gcc_unreachable ();
5513 }
5514
826fadba
MK
5515 sched_mem_unit_code = get_cpu_unit_code ("cf_mem1");
5516
b8c96320
MK
5517 sched_adjust_cost_state = xmalloc (state_size ());
5518 state_reset (sched_adjust_cost_state);
5519
5520 start_sequence ();
5521 emit_insn (gen_ib ());
826fadba 5522 sched_ib.insn = get_insns ();
b8c96320
MK
5523 end_sequence ();
5524}
5525
5526/* Scheduling pass is now finished. Free/reset static variables. */
5527static void
5528m68k_sched_md_finish_global (FILE *dump ATTRIBUTE_UNUSED,
5529 int verbose ATTRIBUTE_UNUSED)
5530{
826fadba 5531 sched_ib.insn = NULL;
b8c96320
MK
5532
5533 free (sched_adjust_cost_state);
5534 sched_adjust_cost_state = NULL;
5535
5536 sched_mem_unit_code = 0;
826fadba
MK
5537
5538 free (sched_ib.records.adjust);
5539 sched_ib.records.adjust = NULL;
5540 sched_ib.records.n_insns = 0;
5541 max_insn_size = 0;
b8c96320
MK
5542
5543 free (sched_branch_type);
5544 sched_branch_type = NULL;
5545}
5546
5547/* Implementation of targetm.sched.md_init () hook.
5548 It is invoked each time scheduler starts on the new block (basic block or
5549 extended basic block). */
5550static void
5551m68k_sched_md_init (FILE *sched_dump ATTRIBUTE_UNUSED,
5552 int sched_verbose ATTRIBUTE_UNUSED,
5553 int n_insns ATTRIBUTE_UNUSED)
5554{
826fadba
MK
5555 switch (m68k_sched_cpu)
5556 {
5557 case CPU_CFV1:
5558 case CPU_CFV2:
5559 sched_ib.size = 6;
5560 break;
5561
5562 case CPU_CFV3:
5563 sched_ib.size = sched_ib.records.n_insns * max_insn_size;
5564
5565 memset (sched_ib.records.adjust, 0,
5566 sched_ib.records.n_insns * sizeof (*sched_ib.records.adjust));
5567 sched_ib.records.adjust_index = 0;
5568 break;
5569
96fcacb7
MK
5570 case CPU_CFV4:
5571 gcc_assert (!sched_ib.enabled_p);
5572 sched_ib.size = 0;
5573 break;
5574
826fadba
MK
5575 default:
5576 gcc_unreachable ();
5577 }
5578
96fcacb7
MK
5579 if (sched_ib.enabled_p)
5580 /* haifa-sched.c: schedule_block () calls advance_cycle () just before
5581 the first cycle. Workaround that. */
5582 sched_ib.filled = -2;
b8c96320
MK
5583}
5584
5585/* Implementation of targetm.sched.dfa_pre_advance_cycle () hook.
5586 It is invoked just before current cycle finishes and is used here
5587 to track if instruction buffer got its two words this cycle. */
5588static void
5589m68k_sched_dfa_pre_advance_cycle (void)
5590{
96fcacb7
MK
5591 if (!sched_ib.enabled_p)
5592 return;
5593
b8c96320
MK
5594 if (!cpu_unit_reservation_p (curr_state, sched_mem_unit_code))
5595 {
826fadba 5596 sched_ib.filled += 2;
b8c96320 5597
826fadba
MK
5598 if (sched_ib.filled > sched_ib.size)
5599 sched_ib.filled = sched_ib.size;
b8c96320
MK
5600 }
5601}
5602
5603/* Implementation of targetm.sched.dfa_post_advance_cycle () hook.
5604 It is invoked just after new cycle begins and is used here
5605 to setup number of filled words in the instruction buffer so that
5606 instructions which won't have all their words prefetched would be
5607 stalled for a cycle. */
5608static void
5609m68k_sched_dfa_post_advance_cycle (void)
5610{
5611 int i;
b8c96320 5612
96fcacb7
MK
5613 if (!sched_ib.enabled_p)
5614 return;
5615
b8c96320
MK
5616 /* Setup number of prefetched instruction words in the instruction
5617 buffer. */
826fadba
MK
5618 i = max_insn_size - sched_ib.filled;
5619
5620 while (--i >= 0)
b8c96320 5621 {
826fadba 5622 if (state_transition (curr_state, sched_ib.insn) >= 0)
b8c96320
MK
5623 gcc_unreachable ();
5624 }
5625}
96fcacb7
MK
5626
5627/* Return X or Y (depending on OPX_P) operand of INSN,
5628 if it is an integer register, or NULL overwise. */
5629static rtx
5630sched_get_reg_operand (rtx insn, bool opx_p)
5631{
5632 rtx op = NULL;
5633
5634 if (opx_p)
5635 {
5636 if (get_attr_opx_type (insn) == OPX_TYPE_RN)
5637 {
5638 op = sched_get_operand (insn, true);
5639 gcc_assert (op != NULL);
5640
5641 if (!reload_completed && !REG_P (op))
5642 return NULL;
5643 }
5644 }
5645 else
5646 {
5647 if (get_attr_opy_type (insn) == OPY_TYPE_RN)
5648 {
5649 op = sched_get_operand (insn, false);
5650 gcc_assert (op != NULL);
5651
5652 if (!reload_completed && !REG_P (op))
5653 return NULL;
5654 }
5655 }
5656
5657 return op;
5658}
5659
5660/* Return true, if X or Y (depending on OPX_P) operand of INSN
5661 is a MEM. */
5662static bool
5663sched_mem_operand_p (rtx insn, bool opx_p)
5664{
5665 switch (sched_get_opxy_mem_type (insn, opx_p))
5666 {
5667 case OP_TYPE_MEM1:
5668 case OP_TYPE_MEM6:
5669 return true;
5670
5671 default:
5672 return false;
5673 }
5674}
5675
5676/* Return X or Y (depending on OPX_P) operand of INSN,
5677 if it is a MEM, or NULL overwise. */
5678static rtx
5679sched_get_mem_operand (rtx insn, bool must_read_p, bool must_write_p)
5680{
5681 bool opx_p;
5682 bool opy_p;
5683
5684 opx_p = false;
5685 opy_p = false;
5686
5687 if (must_read_p)
5688 {
5689 opx_p = true;
5690 opy_p = true;
5691 }
5692
5693 if (must_write_p)
5694 {
5695 opx_p = true;
5696 opy_p = false;
5697 }
5698
5699 if (opy_p && sched_mem_operand_p (insn, false))
5700 return sched_get_operand (insn, false);
5701
5702 if (opx_p && sched_mem_operand_p (insn, true))
5703 return sched_get_operand (insn, true);
5704
5705 gcc_unreachable ();
5706 return NULL;
5707}
5708
5709/* Return non-zero if PRO modifies register used as part of
5710 address in CON. */
5711int
5712m68k_sched_address_bypass_p (rtx pro, rtx con)
5713{
5714 rtx pro_x;
5715 rtx con_mem_read;
5716
5717 pro_x = sched_get_reg_operand (pro, true);
5718 if (pro_x == NULL)
5719 return 0;
5720
5721 con_mem_read = sched_get_mem_operand (con, true, false);
5722 gcc_assert (con_mem_read != NULL);
5723
5724 if (reg_mentioned_p (pro_x, con_mem_read))
5725 return 1;
5726
5727 return 0;
5728}
5729
5730/* Helper function for m68k_sched_indexed_address_bypass_p.
5731 if PRO modifies register used as index in CON,
5732 return scale of indexed memory access in CON. Return zero overwise. */
5733static int
5734sched_get_indexed_address_scale (rtx pro, rtx con)
5735{
5736 rtx reg;
5737 rtx mem;
5738 struct m68k_address address;
5739
5740 reg = sched_get_reg_operand (pro, true);
5741 if (reg == NULL)
5742 return 0;
5743
5744 mem = sched_get_mem_operand (con, true, false);
5745 gcc_assert (mem != NULL && MEM_P (mem));
5746
5747 if (!m68k_decompose_address (GET_MODE (mem), XEXP (mem, 0), reload_completed,
5748 &address))
5749 gcc_unreachable ();
5750
5751 if (REGNO (reg) == REGNO (address.index))
5752 {
5753 gcc_assert (address.scale != 0);
5754 return address.scale;
5755 }
5756
5757 return 0;
5758}
5759
5760/* Return non-zero if PRO modifies register used
5761 as index with scale 2 or 4 in CON. */
5762int
5763m68k_sched_indexed_address_bypass_p (rtx pro, rtx con)
5764{
5765 gcc_assert (sched_cfv4_bypass_data.pro == NULL
5766 && sched_cfv4_bypass_data.con == NULL
5767 && sched_cfv4_bypass_data.scale == 0);
5768
5769 switch (sched_get_indexed_address_scale (pro, con))
5770 {
5771 case 1:
5772 /* We can't have a variable latency bypass, so
5773 remember to adjust the insn cost in adjust_cost hook. */
5774 sched_cfv4_bypass_data.pro = pro;
5775 sched_cfv4_bypass_data.con = con;
5776 sched_cfv4_bypass_data.scale = 1;
5777 return 0;
5778
5779 case 2:
5780 case 4:
5781 return 1;
5782
5783 default:
5784 return 0;
5785 }
5786}