]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/m68k/m68k.c
re PR libobjc/36610 (objc_msg_sendv is broken for targets which pass argument via...
[thirdparty/gcc.git] / gcc / config / m68k / m68k.c
CommitLineData
79e68feb 1/* Subroutines for insn-output.c for Motorola 68000 family.
8636be86 2 Copyright (C) 1987, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
7b0f476d 3 2001, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4592bdcb 4 Free Software Foundation, Inc.
79e68feb 5
7ec022b2 6This file is part of GCC.
79e68feb 7
7ec022b2 8GCC is free software; you can redistribute it and/or modify
79e68feb 9it under the terms of the GNU General Public License as published by
2f83c7d6 10the Free Software Foundation; either version 3, or (at your option)
79e68feb
RS
11any later version.
12
7ec022b2 13GCC is distributed in the hope that it will be useful,
79e68feb
RS
14but WITHOUT ANY WARRANTY; without even the implied warranty of
15MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16GNU General Public License for more details.
17
18You should have received a copy of the GNU General Public License
2f83c7d6
NC
19along with GCC; see the file COPYING3. If not see
20<http://www.gnu.org/licenses/>. */
79e68feb 21
79e68feb 22#include "config.h"
f5220a5d 23#include "system.h"
4977bab6
ZW
24#include "coretypes.h"
25#include "tm.h"
da932f04 26#include "tree.h"
79e68feb 27#include "rtl.h"
49ad7cfa 28#include "function.h"
79e68feb
RS
29#include "regs.h"
30#include "hard-reg-set.h"
79e68feb
RS
31#include "insn-config.h"
32#include "conditions.h"
79e68feb
RS
33#include "output.h"
34#include "insn-attr.h"
1d8eaa6b 35#include "recog.h"
718f9c0f 36#include "diagnostic-core.h"
f5220a5d 37#include "toplev.h"
6d5f49b2
RH
38#include "expr.h"
39#include "reload.h"
5505f548 40#include "tm_p.h"
672a6f42
NB
41#include "target.h"
42#include "target-def.h"
2cc07db4 43#include "debug.h"
79e68feb 44#include "flags.h"
6fb5fa3c 45#include "df.h"
b8c96320
MK
46/* ??? Need to add a dependency between m68k.o and sched-int.h. */
47#include "sched-int.h"
48#include "insn-codes.h"
75df395f 49#include "ggc.h"
79e68feb 50
a4e9467d
RZ
51enum reg_class regno_reg_class[] =
52{
53 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
54 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
55 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
56 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
57 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
58 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
59 ADDR_REGS
60};
61
62
a40ed0f3
KH
63/* The minimum number of integer registers that we want to save with the
64 movem instruction. Using two movel instructions instead of a single
65 moveml is about 15% faster for the 68020 and 68030 at no expense in
66 code size. */
67#define MIN_MOVEM_REGS 3
68
69/* The minimum number of floating point registers that we want to save
70 with the fmovem instruction. */
71#define MIN_FMOVEM_REGS 1
72
ff482c8d 73/* Structure describing stack frame layout. */
3d74bc09
BI
74struct m68k_frame
75{
76 /* Stack pointer to frame pointer offset. */
48ed72a4 77 HOST_WIDE_INT offset;
3d74bc09
BI
78
79 /* Offset of FPU registers. */
80 HOST_WIDE_INT foffset;
81
82 /* Frame size in bytes (rounded up). */
48ed72a4 83 HOST_WIDE_INT size;
3d74bc09
BI
84
85 /* Data and address register. */
48ed72a4
PB
86 int reg_no;
87 unsigned int reg_mask;
3d74bc09
BI
88
89 /* FPU registers. */
48ed72a4
PB
90 int fpu_no;
91 unsigned int fpu_mask;
3d74bc09
BI
92
93 /* Offsets relative to ARG_POINTER. */
48ed72a4
PB
94 HOST_WIDE_INT frame_pointer_offset;
95 HOST_WIDE_INT stack_pointer_offset;
3d74bc09
BI
96
97 /* Function which the above information refers to. */
98 int funcdef_no;
48ed72a4
PB
99};
100
3d74bc09
BI
101/* Current frame information calculated by m68k_compute_frame_layout(). */
102static struct m68k_frame current_frame;
103
fc2241eb
RS
104/* Structure describing an m68k address.
105
106 If CODE is UNKNOWN, the address is BASE + INDEX * SCALE + OFFSET,
107 with null fields evaluating to 0. Here:
108
109 - BASE satisfies m68k_legitimate_base_reg_p
110 - INDEX satisfies m68k_legitimate_index_reg_p
111 - OFFSET satisfies m68k_legitimate_constant_address_p
112
113 INDEX is either HImode or SImode. The other fields are SImode.
114
115 If CODE is PRE_DEC, the address is -(BASE). If CODE is POST_INC,
116 the address is (BASE)+. */
117struct m68k_address {
118 enum rtx_code code;
119 rtx base;
120 rtx index;
121 rtx offset;
122 int scale;
123};
124
b8c96320 125static int m68k_sched_adjust_cost (rtx, rtx, rtx, int);
96fcacb7 126static int m68k_sched_issue_rate (void);
b8c96320
MK
127static int m68k_sched_variable_issue (FILE *, int, rtx, int);
128static void m68k_sched_md_init_global (FILE *, int, int);
129static void m68k_sched_md_finish_global (FILE *, int);
130static void m68k_sched_md_init (FILE *, int, int);
131static void m68k_sched_dfa_pre_advance_cycle (void);
132static void m68k_sched_dfa_post_advance_cycle (void);
96fcacb7 133static int m68k_sched_first_cycle_multipass_dfa_lookahead (void);
b8c96320 134
7b5cbb57 135static bool m68k_can_eliminate (const int, const int);
c6c3dba9 136static bool m68k_legitimate_address_p (enum machine_mode, rtx, bool);
4af06170 137static bool m68k_handle_option (size_t, const char *, int);
c5387660 138static void m68k_option_override (void);
8a4a2253
BI
139static rtx find_addr_reg (rtx);
140static const char *singlemove_string (rtx *);
8a4a2253
BI
141static void m68k_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
142 HOST_WIDE_INT, tree);
8636be86 143static rtx m68k_struct_value_rtx (tree, int);
48ed72a4
PB
144static tree m68k_handle_fndecl_attribute (tree *node, tree name,
145 tree args, int flags,
146 bool *no_add_attrs);
3d74bc09 147static void m68k_compute_frame_layout (void);
48ed72a4 148static bool m68k_save_reg (unsigned int regno, bool interrupt_handler);
f7e70894 149static bool m68k_ok_for_sibcall_p (tree, tree);
75df395f 150static bool m68k_tls_symbol_p (rtx);
506d7b68 151static rtx m68k_legitimize_address (rtx, rtx, enum machine_mode);
f40751dd 152static bool m68k_rtx_costs (rtx, int, int, int *, bool);
1c445f03 153#if M68K_HONOR_TARGET_STRICT_ALIGNMENT
511e41e5 154static bool m68k_return_in_memory (const_tree, const_tree);
1c445f03 155#endif
75df395f 156static void m68k_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
e0601576 157static void m68k_trampoline_init (rtx, tree, rtx);
079e7538 158static int m68k_return_pops_args (tree, tree, int);
7b0f476d 159static rtx m68k_delegitimize_address (rtx);
13d3961c
NF
160static void m68k_function_arg_advance (CUMULATIVE_ARGS *, enum machine_mode,
161 const_tree, bool);
162static rtx m68k_function_arg (CUMULATIVE_ARGS *, enum machine_mode,
163 const_tree, bool);
79e68feb
RS
164\f
165
a2ef3db7 166/* Specify the identification number of the library being built */
4af06170 167const char *m68k_library_id_string = "_current_shared_library_a5_offset_";
672a6f42
NB
168\f
169/* Initialize the GCC target structure. */
301d03af
RS
170
171#if INT_OP_GROUP == INT_OP_DOT_WORD
172#undef TARGET_ASM_ALIGNED_HI_OP
173#define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
174#endif
175
176#if INT_OP_GROUP == INT_OP_NO_DOT
177#undef TARGET_ASM_BYTE_OP
178#define TARGET_ASM_BYTE_OP "\tbyte\t"
179#undef TARGET_ASM_ALIGNED_HI_OP
180#define TARGET_ASM_ALIGNED_HI_OP "\tshort\t"
181#undef TARGET_ASM_ALIGNED_SI_OP
182#define TARGET_ASM_ALIGNED_SI_OP "\tlong\t"
183#endif
184
185#if INT_OP_GROUP == INT_OP_DC
186#undef TARGET_ASM_BYTE_OP
187#define TARGET_ASM_BYTE_OP "\tdc.b\t"
188#undef TARGET_ASM_ALIGNED_HI_OP
189#define TARGET_ASM_ALIGNED_HI_OP "\tdc.w\t"
190#undef TARGET_ASM_ALIGNED_SI_OP
191#define TARGET_ASM_ALIGNED_SI_OP "\tdc.l\t"
192#endif
193
194#undef TARGET_ASM_UNALIGNED_HI_OP
195#define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
196#undef TARGET_ASM_UNALIGNED_SI_OP
197#define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
198
c590b625
RH
199#undef TARGET_ASM_OUTPUT_MI_THUNK
200#define TARGET_ASM_OUTPUT_MI_THUNK m68k_output_mi_thunk
bdabc150 201#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
3101faab 202#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
c590b625 203
1bc7c5b6
ZW
204#undef TARGET_ASM_FILE_START_APP_OFF
205#define TARGET_ASM_FILE_START_APP_OFF true
206
506d7b68
PB
207#undef TARGET_LEGITIMIZE_ADDRESS
208#define TARGET_LEGITIMIZE_ADDRESS m68k_legitimize_address
209
b8c96320
MK
210#undef TARGET_SCHED_ADJUST_COST
211#define TARGET_SCHED_ADJUST_COST m68k_sched_adjust_cost
212
96fcacb7
MK
213#undef TARGET_SCHED_ISSUE_RATE
214#define TARGET_SCHED_ISSUE_RATE m68k_sched_issue_rate
215
b8c96320
MK
216#undef TARGET_SCHED_VARIABLE_ISSUE
217#define TARGET_SCHED_VARIABLE_ISSUE m68k_sched_variable_issue
218
219#undef TARGET_SCHED_INIT_GLOBAL
220#define TARGET_SCHED_INIT_GLOBAL m68k_sched_md_init_global
221
222#undef TARGET_SCHED_FINISH_GLOBAL
223#define TARGET_SCHED_FINISH_GLOBAL m68k_sched_md_finish_global
224
225#undef TARGET_SCHED_INIT
226#define TARGET_SCHED_INIT m68k_sched_md_init
227
228#undef TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE
229#define TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE m68k_sched_dfa_pre_advance_cycle
230
231#undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
232#define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE m68k_sched_dfa_post_advance_cycle
233
96fcacb7
MK
234#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
235#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
236 m68k_sched_first_cycle_multipass_dfa_lookahead
237
4af06170
RS
238#undef TARGET_HANDLE_OPTION
239#define TARGET_HANDLE_OPTION m68k_handle_option
240
c5387660
JM
241#undef TARGET_OPTION_OVERRIDE
242#define TARGET_OPTION_OVERRIDE m68k_option_override
243
3c50106f
RH
244#undef TARGET_RTX_COSTS
245#define TARGET_RTX_COSTS m68k_rtx_costs
246
48ed72a4
PB
247#undef TARGET_ATTRIBUTE_TABLE
248#define TARGET_ATTRIBUTE_TABLE m68k_attribute_table
249
8636be86 250#undef TARGET_PROMOTE_PROTOTYPES
586de218 251#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
8636be86
KH
252
253#undef TARGET_STRUCT_VALUE_RTX
254#define TARGET_STRUCT_VALUE_RTX m68k_struct_value_rtx
255
7ffb5e78
RS
256#undef TARGET_CANNOT_FORCE_CONST_MEM
257#define TARGET_CANNOT_FORCE_CONST_MEM m68k_illegitimate_symbolic_constant_p
258
f7e70894
RS
259#undef TARGET_FUNCTION_OK_FOR_SIBCALL
260#define TARGET_FUNCTION_OK_FOR_SIBCALL m68k_ok_for_sibcall_p
261
1c445f03
NS
262#if M68K_HONOR_TARGET_STRICT_ALIGNMENT
263#undef TARGET_RETURN_IN_MEMORY
264#define TARGET_RETURN_IN_MEMORY m68k_return_in_memory
265#endif
266
75df395f
MK
267#ifdef HAVE_AS_TLS
268#undef TARGET_HAVE_TLS
269#define TARGET_HAVE_TLS (true)
270
271#undef TARGET_ASM_OUTPUT_DWARF_DTPREL
272#define TARGET_ASM_OUTPUT_DWARF_DTPREL m68k_output_dwarf_dtprel
273#endif
274
c6c3dba9
PB
275#undef TARGET_LEGITIMATE_ADDRESS_P
276#define TARGET_LEGITIMATE_ADDRESS_P m68k_legitimate_address_p
277
7b5cbb57
AS
278#undef TARGET_CAN_ELIMINATE
279#define TARGET_CAN_ELIMINATE m68k_can_eliminate
280
e0601576
RH
281#undef TARGET_TRAMPOLINE_INIT
282#define TARGET_TRAMPOLINE_INIT m68k_trampoline_init
283
079e7538
NF
284#undef TARGET_RETURN_POPS_ARGS
285#define TARGET_RETURN_POPS_ARGS m68k_return_pops_args
286
7b0f476d
AS
287#undef TARGET_DELEGITIMIZE_ADDRESS
288#define TARGET_DELEGITIMIZE_ADDRESS m68k_delegitimize_address
289
13d3961c
NF
290#undef TARGET_FUNCTION_ARG
291#define TARGET_FUNCTION_ARG m68k_function_arg
292
293#undef TARGET_FUNCTION_ARG_ADVANCE
294#define TARGET_FUNCTION_ARG_ADVANCE m68k_function_arg_advance
295
48ed72a4
PB
296static const struct attribute_spec m68k_attribute_table[] =
297{
298 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2bccb817 299 { "interrupt", 0, 0, true, false, false, m68k_handle_fndecl_attribute },
48ed72a4 300 { "interrupt_handler", 0, 0, true, false, false, m68k_handle_fndecl_attribute },
a4242737 301 { "interrupt_thread", 0, 0, true, false, false, m68k_handle_fndecl_attribute },
48ed72a4
PB
302 { NULL, 0, 0, false, false, false, NULL }
303};
304
f6897b10 305struct gcc_target targetm = TARGET_INITIALIZER;
672a6f42 306\f
900ec02d
JB
307/* Base flags for 68k ISAs. */
308#define FL_FOR_isa_00 FL_ISA_68000
309#define FL_FOR_isa_10 (FL_FOR_isa_00 | FL_ISA_68010)
310/* FL_68881 controls the default setting of -m68881. gcc has traditionally
311 generated 68881 code for 68020 and 68030 targets unless explicitly told
312 not to. */
313#define FL_FOR_isa_20 (FL_FOR_isa_10 | FL_ISA_68020 \
314 | FL_BITFIELD | FL_68881)
315#define FL_FOR_isa_40 (FL_FOR_isa_20 | FL_ISA_68040)
316#define FL_FOR_isa_cpu32 (FL_FOR_isa_10 | FL_ISA_68020)
317
318/* Base flags for ColdFire ISAs. */
319#define FL_FOR_isa_a (FL_COLDFIRE | FL_ISA_A)
320#define FL_FOR_isa_aplus (FL_FOR_isa_a | FL_ISA_APLUS | FL_CF_USP)
321/* Note ISA_B doesn't necessarily include USP (user stack pointer) support. */
322#define FL_FOR_isa_b (FL_FOR_isa_a | FL_ISA_B | FL_CF_HWDIV)
4e2b26aa 323/* ISA_C is not upwardly compatible with ISA_B. */
8c5c99dc 324#define FL_FOR_isa_c (FL_FOR_isa_a | FL_ISA_C | FL_CF_USP)
900ec02d
JB
325
326enum m68k_isa
327{
328 /* Traditional 68000 instruction sets. */
329 isa_00,
330 isa_10,
331 isa_20,
332 isa_40,
333 isa_cpu32,
334 /* ColdFire instruction set variants. */
335 isa_a,
336 isa_aplus,
337 isa_b,
338 isa_c,
339 isa_max
340};
341
342/* Information about one of the -march, -mcpu or -mtune arguments. */
343struct m68k_target_selection
344{
345 /* The argument being described. */
346 const char *name;
347
348 /* For -mcpu, this is the device selected by the option.
349 For -mtune and -march, it is a representative device
350 for the microarchitecture or ISA respectively. */
351 enum target_device device;
352
353 /* The M68K_DEVICE fields associated with DEVICE. See the comment
354 in m68k-devices.def for details. FAMILY is only valid for -mcpu. */
355 const char *family;
356 enum uarch_type microarch;
357 enum m68k_isa isa;
358 unsigned long flags;
359};
360
361/* A list of all devices in m68k-devices.def. Used for -mcpu selection. */
362static const struct m68k_target_selection all_devices[] =
363{
364#define M68K_DEVICE(NAME,ENUM_VALUE,FAMILY,MULTILIB,MICROARCH,ISA,FLAGS) \
365 { NAME, ENUM_VALUE, FAMILY, u##MICROARCH, ISA, FLAGS | FL_FOR_##ISA },
366#include "m68k-devices.def"
367#undef M68K_DEVICE
368 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
369};
370
371/* A list of all ISAs, mapping each one to a representative device.
372 Used for -march selection. */
373static const struct m68k_target_selection all_isas[] =
374{
375 { "68000", m68000, NULL, u68000, isa_00, FL_FOR_isa_00 },
376 { "68010", m68010, NULL, u68010, isa_10, FL_FOR_isa_10 },
377 { "68020", m68020, NULL, u68020, isa_20, FL_FOR_isa_20 },
378 { "68030", m68030, NULL, u68030, isa_20, FL_FOR_isa_20 },
379 { "68040", m68040, NULL, u68040, isa_40, FL_FOR_isa_40 },
380 { "68060", m68060, NULL, u68060, isa_40, FL_FOR_isa_40 },
381 { "cpu32", cpu32, NULL, ucpu32, isa_20, FL_FOR_isa_cpu32 },
382 { "isaa", mcf5206e, NULL, ucfv2, isa_a, (FL_FOR_isa_a
383 | FL_CF_HWDIV) },
384 { "isaaplus", mcf5271, NULL, ucfv2, isa_aplus, (FL_FOR_isa_aplus
385 | FL_CF_HWDIV) },
386 { "isab", mcf5407, NULL, ucfv4, isa_b, FL_FOR_isa_b },
8c5c99dc
MK
387 { "isac", unk_device, NULL, ucfv4, isa_c, (FL_FOR_isa_c
388 | FL_CF_HWDIV) },
900ec02d
JB
389 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
390};
391
392/* A list of all microarchitectures, mapping each one to a representative
393 device. Used for -mtune selection. */
394static const struct m68k_target_selection all_microarchs[] =
395{
396 { "68000", m68000, NULL, u68000, isa_00, FL_FOR_isa_00 },
397 { "68010", m68010, NULL, u68010, isa_10, FL_FOR_isa_10 },
398 { "68020", m68020, NULL, u68020, isa_20, FL_FOR_isa_20 },
399 { "68020-40", m68020, NULL, u68020_40, isa_20, FL_FOR_isa_20 },
400 { "68020-60", m68020, NULL, u68020_60, isa_20, FL_FOR_isa_20 },
401 { "68030", m68030, NULL, u68030, isa_20, FL_FOR_isa_20 },
402 { "68040", m68040, NULL, u68040, isa_40, FL_FOR_isa_40 },
403 { "68060", m68060, NULL, u68060, isa_40, FL_FOR_isa_40 },
404 { "cpu32", cpu32, NULL, ucpu32, isa_20, FL_FOR_isa_cpu32 },
8c5c99dc 405 { "cfv1", mcf51qe, NULL, ucfv1, isa_c, FL_FOR_isa_c },
900ec02d
JB
406 { "cfv2", mcf5206, NULL, ucfv2, isa_a, FL_FOR_isa_a },
407 { "cfv3", mcf5307, NULL, ucfv3, isa_a, (FL_FOR_isa_a
408 | FL_CF_HWDIV) },
409 { "cfv4", mcf5407, NULL, ucfv4, isa_b, FL_FOR_isa_b },
410 { "cfv4e", mcf547x, NULL, ucfv4e, isa_b, (FL_FOR_isa_b
411 | FL_CF_USP
412 | FL_CF_EMAC
413 | FL_CF_FPU) },
414 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
415};
416\f
417/* The entries associated with the -mcpu, -march and -mtune settings,
418 or null for options that have not been used. */
419const struct m68k_target_selection *m68k_cpu_entry;
420const struct m68k_target_selection *m68k_arch_entry;
421const struct m68k_target_selection *m68k_tune_entry;
422
423/* Which CPU we are generating code for. */
424enum target_device m68k_cpu;
425
426/* Which microarchitecture to tune for. */
427enum uarch_type m68k_tune;
428
429/* Which FPU to use. */
430enum fpu_type m68k_fpu;
4af06170 431
900ec02d
JB
432/* The set of FL_* flags that apply to the target processor. */
433unsigned int m68k_cpu_flags;
29ca003a 434
03b3e271
KH
435/* The set of FL_* flags that apply to the processor to be tuned for. */
436unsigned int m68k_tune_flags;
437
29ca003a
RS
438/* Asm templates for calling or jumping to an arbitrary symbolic address,
439 or NULL if such calls or jumps are not supported. The address is held
440 in operand 0. */
441const char *m68k_symbolic_call;
442const char *m68k_symbolic_jump;
c47b0cb4
MK
443
444/* Enum variable that corresponds to m68k_symbolic_call values. */
445enum M68K_SYMBOLIC_CALL m68k_symbolic_call_var;
446
900ec02d
JB
447\f
448/* See whether TABLE has an entry with name NAME. Return true and
449 store the entry in *ENTRY if so, otherwise return false and
450 leave *ENTRY alone. */
451
452static bool
453m68k_find_selection (const struct m68k_target_selection **entry,
454 const struct m68k_target_selection *table,
455 const char *name)
456{
457 size_t i;
458
459 for (i = 0; table[i].name; i++)
460 if (strcmp (table[i].name, name) == 0)
461 {
462 *entry = table + i;
463 return true;
464 }
465 return false;
466}
4af06170
RS
467
468/* Implement TARGET_HANDLE_OPTION. */
469
470static bool
471m68k_handle_option (size_t code, const char *arg, int value)
472{
473 switch (code)
474 {
900ec02d
JB
475 case OPT_march_:
476 return m68k_find_selection (&m68k_arch_entry, all_isas, arg);
477
478 case OPT_mcpu_:
479 return m68k_find_selection (&m68k_cpu_entry, all_devices, arg);
480
481 case OPT_mtune_:
482 return m68k_find_selection (&m68k_tune_entry, all_microarchs, arg);
483
4af06170 484 case OPT_m5200:
900ec02d 485 return m68k_find_selection (&m68k_cpu_entry, all_devices, "5206");
4af06170
RS
486
487 case OPT_m5206e:
900ec02d 488 return m68k_find_selection (&m68k_cpu_entry, all_devices, "5206e");
4af06170
RS
489
490 case OPT_m528x:
900ec02d 491 return m68k_find_selection (&m68k_cpu_entry, all_devices, "528x");
4af06170
RS
492
493 case OPT_m5307:
900ec02d 494 return m68k_find_selection (&m68k_cpu_entry, all_devices, "5307");
4af06170
RS
495
496 case OPT_m5407:
900ec02d 497 return m68k_find_selection (&m68k_cpu_entry, all_devices, "5407");
4af06170 498
dcc21c4c 499 case OPT_mcfv4e:
900ec02d 500 return m68k_find_selection (&m68k_cpu_entry, all_devices, "547x");
dcc21c4c 501
4af06170
RS
502 case OPT_m68000:
503 case OPT_mc68000:
900ec02d 504 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68000");
4af06170 505
3197c489 506 case OPT_m68010:
900ec02d 507 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68010");
3197c489 508
4af06170
RS
509 case OPT_m68020:
510 case OPT_mc68020:
900ec02d 511 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68020");
4af06170
RS
512
513 case OPT_m68020_40:
900ec02d
JB
514 return (m68k_find_selection (&m68k_tune_entry, all_microarchs,
515 "68020-40")
516 && m68k_find_selection (&m68k_cpu_entry, all_devices, "68020"));
4af06170
RS
517
518 case OPT_m68020_60:
900ec02d
JB
519 return (m68k_find_selection (&m68k_tune_entry, all_microarchs,
520 "68020-60")
521 && m68k_find_selection (&m68k_cpu_entry, all_devices, "68020"));
4af06170
RS
522
523 case OPT_m68030:
900ec02d 524 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68030");
4af06170
RS
525
526 case OPT_m68040:
900ec02d 527 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68040");
4af06170
RS
528
529 case OPT_m68060:
900ec02d 530 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68060");
4af06170
RS
531
532 case OPT_m68302:
900ec02d 533 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68302");
4af06170
RS
534
535 case OPT_m68332:
536 case OPT_mcpu32:
900ec02d 537 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68332");
4af06170
RS
538
539 case OPT_mshared_library_id_:
540 if (value > MAX_LIBRARY_ID)
541 error ("-mshared-library-id=%s is not between 0 and %d",
542 arg, MAX_LIBRARY_ID);
543 else
5ead67f6
KG
544 {
545 char *tmp;
546 asprintf (&tmp, "%d", (value * -4) - 4);
547 m68k_library_id_string = tmp;
548 }
4af06170
RS
549 return true;
550
551 default:
552 return true;
553 }
554}
555
c5387660 556/* Implement TARGET_OPTION_OVERRIDE. */
ef1dbfb0 557
c5387660
JM
558static void
559m68k_option_override (void)
ef1dbfb0 560{
900ec02d
JB
561 const struct m68k_target_selection *entry;
562 unsigned long target_mask;
563
564 /* User can choose:
565
566 -mcpu=
567 -march=
568 -mtune=
569
570 -march=ARCH should generate code that runs any processor
571 implementing architecture ARCH. -mcpu=CPU should override -march
572 and should generate code that runs on processor CPU, making free
573 use of any instructions that CPU understands. -mtune=UARCH applies
9f5ed61a 574 on top of -mcpu or -march and optimizes the code for UARCH. It does
900ec02d
JB
575 not change the target architecture. */
576 if (m68k_cpu_entry)
577 {
578 /* Complain if the -march setting is for a different microarchitecture,
579 or includes flags that the -mcpu setting doesn't. */
580 if (m68k_arch_entry
581 && (m68k_arch_entry->microarch != m68k_cpu_entry->microarch
582 || (m68k_arch_entry->flags & ~m68k_cpu_entry->flags) != 0))
583 warning (0, "-mcpu=%s conflicts with -march=%s",
584 m68k_cpu_entry->name, m68k_arch_entry->name);
585
586 entry = m68k_cpu_entry;
587 }
588 else
589 entry = m68k_arch_entry;
590
591 if (!entry)
592 entry = all_devices + TARGET_CPU_DEFAULT;
593
594 m68k_cpu_flags = entry->flags;
595
596 /* Use the architecture setting to derive default values for
597 certain flags. */
598 target_mask = 0;
8785d88c
KH
599
600 /* ColdFire is lenient about alignment. */
601 if (!TARGET_COLDFIRE)
602 target_mask |= MASK_STRICT_ALIGNMENT;
603
900ec02d
JB
604 if ((m68k_cpu_flags & FL_BITFIELD) != 0)
605 target_mask |= MASK_BITFIELD;
606 if ((m68k_cpu_flags & FL_CF_HWDIV) != 0)
607 target_mask |= MASK_CF_HWDIV;
608 if ((m68k_cpu_flags & (FL_68881 | FL_CF_FPU)) != 0)
609 target_mask |= MASK_HARD_FLOAT;
610 target_flags |= target_mask & ~target_flags_explicit;
611
612 /* Set the directly-usable versions of the -mcpu and -mtune settings. */
613 m68k_cpu = entry->device;
614 if (m68k_tune_entry)
03b3e271
KH
615 {
616 m68k_tune = m68k_tune_entry->microarch;
617 m68k_tune_flags = m68k_tune_entry->flags;
618 }
900ec02d
JB
619#ifdef M68K_DEFAULT_TUNE
620 else if (!m68k_cpu_entry && !m68k_arch_entry)
03b3e271
KH
621 {
622 enum target_device dev;
623 dev = all_microarchs[M68K_DEFAULT_TUNE].device;
624 m68k_tune_flags = all_devices[dev]->flags;
625 }
900ec02d
JB
626#endif
627 else
03b3e271
KH
628 {
629 m68k_tune = entry->microarch;
630 m68k_tune_flags = entry->flags;
631 }
900ec02d
JB
632
633 /* Set the type of FPU. */
634 m68k_fpu = (!TARGET_HARD_FLOAT ? FPUTYPE_NONE
635 : (m68k_cpu_flags & FL_COLDFIRE) != 0 ? FPUTYPE_COLDFIRE
636 : FPUTYPE_68881);
637
a2ef3db7
BI
638 /* Sanity check to ensure that msep-data and mid-sahred-library are not
639 * both specified together. Doing so simply doesn't make sense.
640 */
641 if (TARGET_SEP_DATA && TARGET_ID_SHARED_LIBRARY)
642 error ("cannot specify both -msep-data and -mid-shared-library");
643
644 /* If we're generating code for a separate A5 relative data segment,
645 * we've got to enable -fPIC as well. This might be relaxable to
646 * -fpic but it hasn't been tested properly.
647 */
648 if (TARGET_SEP_DATA || TARGET_ID_SHARED_LIBRARY)
649 flag_pic = 2;
650
abe92a04
RS
651 /* -mpcrel -fPIC uses 32-bit pc-relative displacements. Raise an
652 error if the target does not support them. */
653 if (TARGET_PCREL && !TARGET_68020 && flag_pic == 2)
654 error ("-mpcrel -fPIC is not currently supported on selected cpu");
adf2ac37
RH
655
656 /* ??? A historic way of turning on pic, or is this intended to
657 be an embedded thing that doesn't have the same name binding
658 significance that it does on hosted ELF systems? */
659 if (TARGET_PCREL && flag_pic == 0)
660 flag_pic = 1;
661
29ca003a
RS
662 if (!flag_pic)
663 {
c47b0cb4
MK
664 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_JSR;
665
29ca003a 666 m68k_symbolic_jump = "jra %a0";
29ca003a
RS
667 }
668 else if (TARGET_ID_SHARED_LIBRARY)
669 /* All addresses must be loaded from the GOT. */
670 ;
4e2b26aa 671 else if (TARGET_68020 || TARGET_ISAB || TARGET_ISAC)
29ca003a
RS
672 {
673 if (TARGET_PCREL)
c47b0cb4 674 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_C;
4e2b26aa 675 else
c47b0cb4
MK
676 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_P;
677
4e2b26aa
NS
678 if (TARGET_ISAC)
679 /* No unconditional long branch */;
680 else if (TARGET_PCREL)
da398bb5 681 m68k_symbolic_jump = "bra%.l %c0";
29ca003a 682 else
da398bb5 683 m68k_symbolic_jump = "bra%.l %p0";
29ca003a
RS
684 /* Turn off function cse if we are doing PIC. We always want
685 function call to be done as `bsr foo@PLTPC'. */
686 /* ??? It's traditional to do this for -mpcrel too, but it isn't
687 clear how intentional that is. */
688 flag_no_function_cse = 1;
689 }
adf2ac37 690
c47b0cb4
MK
691 switch (m68k_symbolic_call_var)
692 {
693 case M68K_SYMBOLIC_CALL_JSR:
c47b0cb4 694 m68k_symbolic_call = "jsr %a0";
c47b0cb4
MK
695 break;
696
697 case M68K_SYMBOLIC_CALL_BSR_C:
da398bb5 698 m68k_symbolic_call = "bsr%.l %c0";
c47b0cb4
MK
699 break;
700
701 case M68K_SYMBOLIC_CALL_BSR_P:
da398bb5 702 m68k_symbolic_call = "bsr%.l %p0";
c47b0cb4
MK
703 break;
704
705 case M68K_SYMBOLIC_CALL_NONE:
706 gcc_assert (m68k_symbolic_call == NULL);
707 break;
708
709 default:
710 gcc_unreachable ();
711 }
712
aaca7021
RZ
713#ifndef ASM_OUTPUT_ALIGN_WITH_NOP
714 if (align_labels > 2)
715 {
716 warning (0, "-falign-labels=%d is not supported", align_labels);
717 align_labels = 0;
718 }
719 if (align_loops > 2)
720 {
721 warning (0, "-falign-loops=%d is not supported", align_loops);
722 align_loops = 0;
723 }
724#endif
725
adf2ac37 726 SUBTARGET_OVERRIDE_OPTIONS;
c47b0cb4
MK
727
728 /* Setup scheduling options. */
826fadba
MK
729 if (TUNE_CFV1)
730 m68k_sched_cpu = CPU_CFV1;
731 else if (TUNE_CFV2)
732 m68k_sched_cpu = CPU_CFV2;
733 else if (TUNE_CFV3)
734 m68k_sched_cpu = CPU_CFV3;
96fcacb7
MK
735 else if (TUNE_CFV4)
736 m68k_sched_cpu = CPU_CFV4;
c47b0cb4
MK
737 else
738 {
739 m68k_sched_cpu = CPU_UNKNOWN;
740 flag_schedule_insns = 0;
741 flag_schedule_insns_after_reload = 0;
742 flag_modulo_sched = 0;
743 }
826fadba
MK
744
745 if (m68k_sched_cpu != CPU_UNKNOWN)
746 {
747 if ((m68k_cpu_flags & (FL_CF_EMAC | FL_CF_EMAC_B)) != 0)
748 m68k_sched_mac = MAC_CF_EMAC;
749 else if ((m68k_cpu_flags & FL_CF_MAC) != 0)
750 m68k_sched_mac = MAC_CF_MAC;
751 else
752 m68k_sched_mac = MAC_NO;
753 }
ef1dbfb0 754}
7eb4f044
NS
755
756/* Generate a macro of the form __mPREFIX_cpu_NAME, where PREFIX is the
757 given argument and NAME is the argument passed to -mcpu. Return NULL
758 if -mcpu was not passed. */
759
760const char *
761m68k_cpp_cpu_ident (const char *prefix)
762{
763 if (!m68k_cpu_entry)
764 return NULL;
765 return concat ("__m", prefix, "_cpu_", m68k_cpu_entry->name, NULL);
766}
767
768/* Generate a macro of the form __mPREFIX_family_NAME, where PREFIX is the
769 given argument and NAME is the name of the representative device for
770 the -mcpu argument's family. Return NULL if -mcpu was not passed. */
771
772const char *
773m68k_cpp_cpu_family (const char *prefix)
774{
775 if (!m68k_cpu_entry)
776 return NULL;
777 return concat ("__m", prefix, "_family_", m68k_cpu_entry->family, NULL);
778}
79e68feb 779\f
2bccb817
KH
780/* Return m68k_fk_interrupt_handler if FUNC has an "interrupt" or
781 "interrupt_handler" attribute and interrupt_thread if FUNC has an
782 "interrupt_thread" attribute. Otherwise, return
783 m68k_fk_normal_function. */
a4242737
KH
784
785enum m68k_function_kind
786m68k_get_function_kind (tree func)
48ed72a4
PB
787{
788 tree a;
789
fa157b28
NS
790 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
791
2bccb817
KH
792 a = lookup_attribute ("interrupt", DECL_ATTRIBUTES (func));
793 if (a != NULL_TREE)
794 return m68k_fk_interrupt_handler;
795
48ed72a4 796 a = lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (func));
a4242737
KH
797 if (a != NULL_TREE)
798 return m68k_fk_interrupt_handler;
799
800 a = lookup_attribute ("interrupt_thread", DECL_ATTRIBUTES (func));
801 if (a != NULL_TREE)
802 return m68k_fk_interrupt_thread;
803
804 return m68k_fk_normal_function;
48ed72a4
PB
805}
806
807/* Handle an attribute requiring a FUNCTION_DECL; arguments as in
808 struct attribute_spec.handler. */
809static tree
810m68k_handle_fndecl_attribute (tree *node, tree name,
811 tree args ATTRIBUTE_UNUSED,
812 int flags ATTRIBUTE_UNUSED,
813 bool *no_add_attrs)
814{
815 if (TREE_CODE (*node) != FUNCTION_DECL)
816 {
29d08eba
JM
817 warning (OPT_Wattributes, "%qE attribute only applies to functions",
818 name);
48ed72a4
PB
819 *no_add_attrs = true;
820 }
821
a4242737
KH
822 if (m68k_get_function_kind (*node) != m68k_fk_normal_function)
823 {
824 error ("multiple interrupt attributes not allowed");
825 *no_add_attrs = true;
826 }
827
828 if (!TARGET_FIDOA
829 && !strcmp (IDENTIFIER_POINTER (name), "interrupt_thread"))
830 {
831 error ("interrupt_thread is available only on fido");
832 *no_add_attrs = true;
833 }
834
48ed72a4
PB
835 return NULL_TREE;
836}
860c4900
BI
837
838static void
3d74bc09 839m68k_compute_frame_layout (void)
860c4900
BI
840{
841 int regno, saved;
a40ed0f3 842 unsigned int mask;
a4242737
KH
843 enum m68k_function_kind func_kind =
844 m68k_get_function_kind (current_function_decl);
845 bool interrupt_handler = func_kind == m68k_fk_interrupt_handler;
846 bool interrupt_thread = func_kind == m68k_fk_interrupt_thread;
860c4900 847
3d74bc09
BI
848 /* Only compute the frame once per function.
849 Don't cache information until reload has been completed. */
850 if (current_frame.funcdef_no == current_function_funcdef_no
851 && reload_completed)
852 return;
853
854 current_frame.size = (get_frame_size () + 3) & -4;
860c4900 855
a40ed0f3 856 mask = saved = 0;
a4242737
KH
857
858 /* Interrupt thread does not need to save any register. */
859 if (!interrupt_thread)
860 for (regno = 0; regno < 16; regno++)
861 if (m68k_save_reg (regno, interrupt_handler))
862 {
863 mask |= 1 << (regno - D0_REG);
864 saved++;
865 }
3d74bc09
BI
866 current_frame.offset = saved * 4;
867 current_frame.reg_no = saved;
868 current_frame.reg_mask = mask;
860c4900 869
57047680 870 current_frame.foffset = 0;
a40ed0f3 871 mask = saved = 0;
dcc21c4c 872 if (TARGET_HARD_FLOAT)
860c4900 873 {
a4242737
KH
874 /* Interrupt thread does not need to save any register. */
875 if (!interrupt_thread)
876 for (regno = 16; regno < 24; regno++)
877 if (m68k_save_reg (regno, interrupt_handler))
878 {
879 mask |= 1 << (regno - FP0_REG);
880 saved++;
881 }
dcc21c4c 882 current_frame.foffset = saved * TARGET_FP_REG_SIZE;
3d74bc09 883 current_frame.offset += current_frame.foffset;
860c4900 884 }
57047680
GN
885 current_frame.fpu_no = saved;
886 current_frame.fpu_mask = mask;
3d74bc09
BI
887
888 /* Remember what function this frame refers to. */
889 current_frame.funcdef_no = current_function_funcdef_no;
860c4900
BI
890}
891
7b5cbb57
AS
892/* Worker function for TARGET_CAN_ELIMINATE. */
893
894bool
895m68k_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
896{
897 return (to == STACK_POINTER_REGNUM ? ! frame_pointer_needed : true);
898}
899
860c4900
BI
900HOST_WIDE_INT
901m68k_initial_elimination_offset (int from, int to)
902{
42b67c06
PB
903 int argptr_offset;
904 /* The arg pointer points 8 bytes before the start of the arguments,
905 as defined by FIRST_PARM_OFFSET. This makes it coincident with the
906 frame pointer in most frames. */
907 argptr_offset = frame_pointer_needed ? 0 : UNITS_PER_WORD;
860c4900 908 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
42b67c06 909 return argptr_offset;
860c4900 910
3d74bc09 911 m68k_compute_frame_layout ();
860c4900 912
4761e388
NS
913 gcc_assert (to == STACK_POINTER_REGNUM);
914 switch (from)
915 {
a0a7fbc9 916 case ARG_POINTER_REGNUM:
42b67c06 917 return current_frame.offset + current_frame.size - argptr_offset;
4761e388
NS
918 case FRAME_POINTER_REGNUM:
919 return current_frame.offset + current_frame.size;
920 default:
921 gcc_unreachable ();
922 }
860c4900
BI
923}
924
97c55091
GN
925/* Refer to the array `regs_ever_live' to determine which registers
926 to save; `regs_ever_live[I]' is nonzero if register number I
927 is ever used in the function. This function is responsible for
928 knowing which registers should not be saved even if used.
929 Return true if we need to save REGNO. */
930
48ed72a4
PB
931static bool
932m68k_save_reg (unsigned int regno, bool interrupt_handler)
2cff4a6e 933{
4ab870f5 934 if (flag_pic && regno == PIC_REG)
b86ba8a3 935 {
e3b5732b 936 if (crtl->saves_all_registers)
afcb440c 937 return true;
e3b5732b 938 if (crtl->uses_pic_offset_table)
b86ba8a3 939 return true;
6357eb0d
RS
940 /* Reload may introduce constant pool references into a function
941 that thitherto didn't need a PIC register. Note that the test
942 above will not catch that case because we will only set
e3b5732b 943 crtl->uses_pic_offset_table when emitting
6357eb0d 944 the address reloads. */
e3b5732b 945 if (crtl->uses_const_pool)
6357eb0d 946 return true;
b86ba8a3 947 }
2cff4a6e 948
e3b5732b 949 if (crtl->calls_eh_return)
2cff4a6e
AS
950 {
951 unsigned int i;
952 for (i = 0; ; i++)
953 {
954 unsigned int test = EH_RETURN_DATA_REGNO (i);
955 if (test == INVALID_REGNUM)
956 break;
957 if (test == regno)
48ed72a4 958 return true;
2cff4a6e
AS
959 }
960 }
961
48ed72a4
PB
962 /* Fixed regs we never touch. */
963 if (fixed_regs[regno])
964 return false;
965
966 /* The frame pointer (if it is such) is handled specially. */
967 if (regno == FRAME_POINTER_REGNUM && frame_pointer_needed)
968 return false;
969
970 /* Interrupt handlers must also save call_used_regs
971 if they are live or when calling nested functions. */
972 if (interrupt_handler)
a0a7fbc9 973 {
6fb5fa3c 974 if (df_regs_ever_live_p (regno))
a0a7fbc9 975 return true;
48ed72a4 976
a0a7fbc9
AS
977 if (!current_function_is_leaf && call_used_regs[regno])
978 return true;
979 }
48ed72a4
PB
980
981 /* Never need to save registers that aren't touched. */
6fb5fa3c 982 if (!df_regs_ever_live_p (regno))
48ed72a4
PB
983 return false;
984
b2e08ed4 985 /* Otherwise save everything that isn't call-clobbered. */
48ed72a4 986 return !call_used_regs[regno];
2cff4a6e
AS
987}
988
a40ed0f3
KH
989/* Emit RTL for a MOVEM or FMOVEM instruction. BASE + OFFSET represents
990 the lowest memory address. COUNT is the number of registers to be
991 moved, with register REGNO + I being moved if bit I of MASK is set.
992 STORE_P specifies the direction of the move and ADJUST_STACK_P says
993 whether or not this is pre-decrement (if STORE_P) or post-increment
994 (if !STORE_P) operation. */
995
996static rtx
997m68k_emit_movem (rtx base, HOST_WIDE_INT offset,
998 unsigned int count, unsigned int regno,
999 unsigned int mask, bool store_p, bool adjust_stack_p)
1000{
1001 int i;
1002 rtx body, addr, src, operands[2];
1003 enum machine_mode mode;
1004
1005 body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (adjust_stack_p + count));
1006 mode = reg_raw_mode[regno];
1007 i = 0;
1008
1009 if (adjust_stack_p)
1010 {
1011 src = plus_constant (base, (count
1012 * GET_MODE_SIZE (mode)
1013 * (HOST_WIDE_INT) (store_p ? -1 : 1)));
1014 XVECEXP (body, 0, i++) = gen_rtx_SET (VOIDmode, base, src);
1015 }
1016
1017 for (; mask != 0; mask >>= 1, regno++)
1018 if (mask & 1)
1019 {
1020 addr = plus_constant (base, offset);
1021 operands[!store_p] = gen_frame_mem (mode, addr);
1022 operands[store_p] = gen_rtx_REG (mode, regno);
1023 XVECEXP (body, 0, i++)
1024 = gen_rtx_SET (VOIDmode, operands[0], operands[1]);
1025 offset += GET_MODE_SIZE (mode);
1026 }
1027 gcc_assert (i == XVECLEN (body, 0));
1028
1029 return emit_insn (body);
1030}
1031
1032/* Make INSN a frame-related instruction. */
79e68feb 1033
08c148a8 1034static void
a40ed0f3
KH
1035m68k_set_frame_related (rtx insn)
1036{
1037 rtx body;
1038 int i;
1039
1040 RTX_FRAME_RELATED_P (insn) = 1;
1041 body = PATTERN (insn);
1042 if (GET_CODE (body) == PARALLEL)
1043 for (i = 0; i < XVECLEN (body, 0); i++)
1044 RTX_FRAME_RELATED_P (XVECEXP (body, 0, i)) = 1;
1045}
1046
1047/* Emit RTL for the "prologue" define_expand. */
1048
1049void
1050m68k_expand_prologue (void)
79e68feb 1051{
860c4900 1052 HOST_WIDE_INT fsize_with_regs;
2dc8bd76 1053 rtx limit, src, dest;
3d74bc09 1054
a40ed0f3 1055 m68k_compute_frame_layout ();
3d74bc09 1056
a157febd
GK
1057 /* If the stack limit is a symbol, we can check it here,
1058 before actually allocating the space. */
e3b5732b 1059 if (crtl->limit_stack
a157febd 1060 && GET_CODE (stack_limit_rtx) == SYMBOL_REF)
a40ed0f3
KH
1061 {
1062 limit = plus_constant (stack_limit_rtx, current_frame.size + 4);
1063 if (!LEGITIMATE_CONSTANT_P (limit))
1064 {
1065 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), limit);
1066 limit = gen_rtx_REG (Pmode, D0_REG);
1067 }
f90b7a5a
PB
1068 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode,
1069 stack_pointer_rtx, limit),
1070 stack_pointer_rtx, limit,
1071 const1_rtx));
a40ed0f3 1072 }
79e68feb 1073
a89e3f21 1074 fsize_with_regs = current_frame.size;
dcc21c4c
PB
1075 if (TARGET_COLDFIRE)
1076 {
a40ed0f3
KH
1077 /* ColdFire's move multiple instructions do not allow pre-decrement
1078 addressing. Add the size of movem saves to the initial stack
1079 allocation instead. */
1080 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1081 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1082 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1083 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
dcc21c4c 1084 }
860c4900 1085
79e68feb
RS
1086 if (frame_pointer_needed)
1087 {
a40ed0f3 1088 if (fsize_with_regs == 0 && TUNE_68040)
79e68feb 1089 {
a40ed0f3
KH
1090 /* On the 68040, two separate moves are faster than link.w 0. */
1091 dest = gen_frame_mem (Pmode,
1092 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1093 m68k_set_frame_related (emit_move_insn (dest, frame_pointer_rtx));
1094 m68k_set_frame_related (emit_move_insn (frame_pointer_rtx,
1095 stack_pointer_rtx));
79e68feb 1096 }
a40ed0f3
KH
1097 else if (fsize_with_regs < 0x8000 || TARGET_68020)
1098 m68k_set_frame_related
1099 (emit_insn (gen_link (frame_pointer_rtx,
1100 GEN_INT (-4 - fsize_with_regs))));
d9e88af0 1101 else
a40ed0f3
KH
1102 {
1103 m68k_set_frame_related
1104 (emit_insn (gen_link (frame_pointer_rtx, GEN_INT (-4))));
1105 m68k_set_frame_related
1106 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1107 stack_pointer_rtx,
1108 GEN_INT (-fsize_with_regs))));
1109 }
96fcacb7
MK
1110
1111 /* If the frame pointer is needed, emit a special barrier that
1112 will prevent the scheduler from moving stores to the frame
1113 before the stack adjustment. */
1114 emit_insn (gen_stack_tie (stack_pointer_rtx, frame_pointer_rtx));
d9e88af0 1115 }
a40ed0f3
KH
1116 else if (fsize_with_regs != 0)
1117 m68k_set_frame_related
1118 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1119 stack_pointer_rtx,
1120 GEN_INT (-fsize_with_regs))));
860c4900 1121
57047680 1122 if (current_frame.fpu_mask)
79e68feb 1123 {
a40ed0f3 1124 gcc_assert (current_frame.fpu_no >= MIN_FMOVEM_REGS);
dcc21c4c 1125 if (TARGET_68881)
a40ed0f3
KH
1126 m68k_set_frame_related
1127 (m68k_emit_movem (stack_pointer_rtx,
1128 current_frame.fpu_no * -GET_MODE_SIZE (XFmode),
1129 current_frame.fpu_no, FP0_REG,
1130 current_frame.fpu_mask, true, true));
dcc21c4c
PB
1131 else
1132 {
1133 int offset;
1134
a40ed0f3
KH
1135 /* If we're using moveml to save the integer registers,
1136 the stack pointer will point to the bottom of the moveml
1137 save area. Find the stack offset of the first FP register. */
1138 if (current_frame.reg_no < MIN_MOVEM_REGS)
dcc21c4c
PB
1139 offset = 0;
1140 else
a40ed0f3
KH
1141 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1142 m68k_set_frame_related
1143 (m68k_emit_movem (stack_pointer_rtx, offset,
1144 current_frame.fpu_no, FP0_REG,
1145 current_frame.fpu_mask, true, false));
f277471f 1146 }
79e68feb 1147 }
99df2465 1148
01bbf777 1149 /* If the stack limit is not a symbol, check it here.
a157febd 1150 This has the disadvantage that it may be too late... */
e3b5732b 1151 if (crtl->limit_stack)
a157febd
GK
1152 {
1153 if (REG_P (stack_limit_rtx))
f90b7a5a
PB
1154 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode, stack_pointer_rtx,
1155 stack_limit_rtx),
1156 stack_pointer_rtx, stack_limit_rtx,
1157 const1_rtx));
1158
a157febd 1159 else if (GET_CODE (stack_limit_rtx) != SYMBOL_REF)
d4ee4d25 1160 warning (0, "stack limit expression is not supported");
a157febd 1161 }
01bbf777 1162
a40ed0f3 1163 if (current_frame.reg_no < MIN_MOVEM_REGS)
79e68feb 1164 {
a40ed0f3 1165 /* Store each register separately in the same order moveml does. */
79e68feb
RS
1166 int i;
1167
a40ed0f3
KH
1168 for (i = 16; i-- > 0; )
1169 if (current_frame.reg_mask & (1 << i))
078e983e 1170 {
a40ed0f3
KH
1171 src = gen_rtx_REG (SImode, D0_REG + i);
1172 dest = gen_frame_mem (SImode,
1173 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1174 m68k_set_frame_related (emit_insn (gen_movsi (dest, src)));
078e983e 1175 }
79e68feb 1176 }
a40ed0f3 1177 else
79e68feb 1178 {
9425fb04 1179 if (TARGET_COLDFIRE)
a40ed0f3
KH
1180 /* The required register save space has already been allocated.
1181 The first register should be stored at (%sp). */
1182 m68k_set_frame_related
1183 (m68k_emit_movem (stack_pointer_rtx, 0,
1184 current_frame.reg_no, D0_REG,
1185 current_frame.reg_mask, true, false));
afaff477 1186 else
a40ed0f3
KH
1187 m68k_set_frame_related
1188 (m68k_emit_movem (stack_pointer_rtx,
1189 current_frame.reg_no * -GET_MODE_SIZE (SImode),
1190 current_frame.reg_no, D0_REG,
1191 current_frame.reg_mask, true, true));
79e68feb 1192 }
a40ed0f3 1193
75df395f 1194 if (!TARGET_SEP_DATA
e3b5732b 1195 && crtl->uses_pic_offset_table)
2dc8bd76 1196 emit_insn (gen_load_got (pic_offset_table_rtx));
79e68feb
RS
1197}
1198\f
413ac1b2
RS
1199/* Return true if a simple (return) instruction is sufficient for this
1200 instruction (i.e. if no epilogue is needed). */
79e68feb 1201
3d74bc09 1202bool
a2bda628 1203m68k_use_return_insn (void)
79e68feb 1204{
79e68feb 1205 if (!reload_completed || frame_pointer_needed || get_frame_size () != 0)
3d74bc09 1206 return false;
125ed86f 1207
a0a7fbc9 1208 m68k_compute_frame_layout ();
413ac1b2 1209 return current_frame.offset == 0;
79e68feb
RS
1210}
1211
f7e70894
RS
1212/* Emit RTL for the "epilogue" or "sibcall_epilogue" define_expand;
1213 SIBCALL_P says which.
79e68feb
RS
1214
1215 The function epilogue should not depend on the current stack pointer!
1216 It should use the frame pointer only, if there is a frame pointer.
1217 This is mandatory because of alloca; we also take advantage of it to
1218 omit stack adjustments before returning. */
1219
a40ed0f3 1220void
f7e70894 1221m68k_expand_epilogue (bool sibcall_p)
08c148a8 1222{
3d74bc09 1223 HOST_WIDE_INT fsize, fsize_with_regs;
a40ed0f3 1224 bool big, restore_from_sp;
3d74bc09 1225
a0a7fbc9 1226 m68k_compute_frame_layout ();
3d74bc09 1227
3d74bc09 1228 fsize = current_frame.size;
a40ed0f3
KH
1229 big = false;
1230 restore_from_sp = false;
3d74bc09 1231
a40ed0f3 1232 /* FIXME : current_function_is_leaf below is too strong.
c67ddce5 1233 What we really need to know there is if there could be pending
7a1929e1 1234 stack adjustment needed at that point. */
a40ed0f3 1235 restore_from_sp = (!frame_pointer_needed
e3b5732b 1236 || (!cfun->calls_alloca
a40ed0f3 1237 && current_function_is_leaf));
860c4900
BI
1238
1239 /* fsize_with_regs is the size we need to adjust the sp when
97c55091 1240 popping the frame. */
860c4900 1241 fsize_with_regs = fsize;
dcc21c4c
PB
1242 if (TARGET_COLDFIRE && restore_from_sp)
1243 {
a40ed0f3
KH
1244 /* ColdFire's move multiple instructions do not allow post-increment
1245 addressing. Add the size of movem loads to the final deallocation
1246 instead. */
1247 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1248 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1249 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1250 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
dcc21c4c 1251 }
860c4900 1252
3d74bc09 1253 if (current_frame.offset + fsize >= 0x8000
a40ed0f3 1254 && !restore_from_sp
3d74bc09 1255 && (current_frame.reg_mask || current_frame.fpu_mask))
79e68feb 1256 {
a40ed0f3
KH
1257 if (TARGET_COLDFIRE
1258 && (current_frame.reg_no >= MIN_MOVEM_REGS
1259 || current_frame.fpu_no >= MIN_FMOVEM_REGS))
1260 {
1261 /* ColdFire's move multiple instructions do not support the
1262 (d8,Ax,Xi) addressing mode, so we're as well using a normal
1263 stack-based restore. */
1264 emit_move_insn (gen_rtx_REG (Pmode, A1_REG),
1265 GEN_INT (-(current_frame.offset + fsize)));
1266 emit_insn (gen_addsi3 (stack_pointer_rtx,
1267 gen_rtx_REG (Pmode, A1_REG),
1268 frame_pointer_rtx));
1269 restore_from_sp = true;
1270 }
1271 else
1272 {
1273 emit_move_insn (gen_rtx_REG (Pmode, A1_REG), GEN_INT (-fsize));
1274 fsize = 0;
1275 big = true;
1276 }
79e68feb 1277 }
79e68feb 1278
a40ed0f3
KH
1279 if (current_frame.reg_no < MIN_MOVEM_REGS)
1280 {
1281 /* Restore each register separately in the same order moveml does. */
79e68feb 1282 int i;
a40ed0f3 1283 HOST_WIDE_INT offset;
79e68feb 1284
a40ed0f3 1285 offset = current_frame.offset + fsize;
3d74bc09
BI
1286 for (i = 0; i < 16; i++)
1287 if (current_frame.reg_mask & (1 << i))
79e68feb 1288 {
a40ed0f3
KH
1289 rtx addr;
1290
1291 if (big)
79e68feb 1292 {
a40ed0f3
KH
1293 /* Generate the address -OFFSET(%fp,%a1.l). */
1294 addr = gen_rtx_REG (Pmode, A1_REG);
1295 addr = gen_rtx_PLUS (Pmode, addr, frame_pointer_rtx);
1296 addr = plus_constant (addr, -offset);
79e68feb 1297 }
a40ed0f3
KH
1298 else if (restore_from_sp)
1299 addr = gen_rtx_POST_INC (Pmode, stack_pointer_rtx);
1300 else
1301 addr = plus_constant (frame_pointer_rtx, -offset);
1302 emit_move_insn (gen_rtx_REG (SImode, D0_REG + i),
1303 gen_frame_mem (SImode, addr));
1304 offset -= GET_MODE_SIZE (SImode);
1305 }
79e68feb 1306 }
3d74bc09 1307 else if (current_frame.reg_mask)
79e68feb 1308 {
a40ed0f3
KH
1309 if (big)
1310 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1311 gen_rtx_REG (Pmode, A1_REG),
1312 frame_pointer_rtx),
1313 -(current_frame.offset + fsize),
1314 current_frame.reg_no, D0_REG,
1315 current_frame.reg_mask, false, false);
1316 else if (restore_from_sp)
1317 m68k_emit_movem (stack_pointer_rtx, 0,
1318 current_frame.reg_no, D0_REG,
1319 current_frame.reg_mask, false,
1320 !TARGET_COLDFIRE);
1321 else
1322 m68k_emit_movem (frame_pointer_rtx,
1323 -(current_frame.offset + fsize),
1324 current_frame.reg_no, D0_REG,
1325 current_frame.reg_mask, false, false);
79e68feb 1326 }
a40ed0f3
KH
1327
1328 if (current_frame.fpu_no > 0)
79e68feb
RS
1329 {
1330 if (big)
a40ed0f3
KH
1331 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1332 gen_rtx_REG (Pmode, A1_REG),
1333 frame_pointer_rtx),
1334 -(current_frame.foffset + fsize),
1335 current_frame.fpu_no, FP0_REG,
1336 current_frame.fpu_mask, false, false);
6910dd70 1337 else if (restore_from_sp)
79e68feb 1338 {
dcc21c4c
PB
1339 if (TARGET_COLDFIRE)
1340 {
1341 int offset;
1342
a40ed0f3
KH
1343 /* If we used moveml to restore the integer registers, the
1344 stack pointer will still point to the bottom of the moveml
1345 save area. Find the stack offset of the first FP
1346 register. */
1347 if (current_frame.reg_no < MIN_MOVEM_REGS)
dcc21c4c
PB
1348 offset = 0;
1349 else
a40ed0f3
KH
1350 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1351 m68k_emit_movem (stack_pointer_rtx, offset,
1352 current_frame.fpu_no, FP0_REG,
1353 current_frame.fpu_mask, false, false);
dcc21c4c 1354 }
884b74f0 1355 else
a40ed0f3
KH
1356 m68k_emit_movem (stack_pointer_rtx, 0,
1357 current_frame.fpu_no, FP0_REG,
1358 current_frame.fpu_mask, false, true);
79e68feb
RS
1359 }
1360 else
a40ed0f3
KH
1361 m68k_emit_movem (frame_pointer_rtx,
1362 -(current_frame.foffset + fsize),
1363 current_frame.fpu_no, FP0_REG,
1364 current_frame.fpu_mask, false, false);
79e68feb 1365 }
a40ed0f3 1366
79e68feb 1367 if (frame_pointer_needed)
a40ed0f3 1368 emit_insn (gen_unlink (frame_pointer_rtx));
860c4900 1369 else if (fsize_with_regs)
a40ed0f3
KH
1370 emit_insn (gen_addsi3 (stack_pointer_rtx,
1371 stack_pointer_rtx,
1372 GEN_INT (fsize_with_regs)));
1373
e3b5732b 1374 if (crtl->calls_eh_return)
a40ed0f3
KH
1375 emit_insn (gen_addsi3 (stack_pointer_rtx,
1376 stack_pointer_rtx,
1377 EH_RETURN_STACKADJ_RTX));
1378
f7e70894 1379 if (!sibcall_p)
49570723 1380 emit_jump_insn (gen_rtx_RETURN (VOIDmode));
79e68feb
RS
1381}
1382\f
8a4a2253 1383/* Return true if X is a valid comparison operator for the dbcc
64a184e9
RS
1384 instruction.
1385
1386 Note it rejects floating point comparison operators.
1387 (In the future we could use Fdbcc).
1388
1389 It also rejects some comparisons when CC_NO_OVERFLOW is set. */
1390
1391int
41b6a5e2 1392valid_dbcc_comparison_p_2 (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED)
64a184e9 1393{
64a184e9
RS
1394 switch (GET_CODE (x))
1395 {
64a184e9
RS
1396 case EQ: case NE: case GTU: case LTU:
1397 case GEU: case LEU:
1398 return 1;
1399
1400 /* Reject some when CC_NO_OVERFLOW is set. This may be over
1401 conservative */
1402 case GT: case LT: case GE: case LE:
1403 return ! (cc_prev_status.flags & CC_NO_OVERFLOW);
1404 default:
1405 return 0;
1406 }
1407}
1408
a0ab749a 1409/* Return nonzero if flags are currently in the 68881 flag register. */
6a0f85e3 1410int
8a4a2253 1411flags_in_68881 (void)
6a0f85e3
TG
1412{
1413 /* We could add support for these in the future */
1414 return cc_status.flags & CC_IN_68881;
1415}
1416
db5e2d51
MK
1417/* Return true if PARALLEL contains register REGNO. */
1418static bool
1419m68k_reg_present_p (const_rtx parallel, unsigned int regno)
1420{
1421 int i;
1422
1423 if (REG_P (parallel) && REGNO (parallel) == regno)
1424 return true;
1425
1426 if (GET_CODE (parallel) != PARALLEL)
1427 return false;
1428
1429 for (i = 0; i < XVECLEN (parallel, 0); ++i)
1430 {
1431 const_rtx x;
1432
1433 x = XEXP (XVECEXP (parallel, 0, i), 0);
1434 if (REG_P (x) && REGNO (x) == regno)
1435 return true;
1436 }
1437
1438 return false;
1439}
1440
fa157b28 1441/* Implement TARGET_FUNCTION_OK_FOR_SIBCALL_P. */
f7e70894
RS
1442
1443static bool
fa157b28 1444m68k_ok_for_sibcall_p (tree decl, tree exp)
f7e70894 1445{
fa157b28
NS
1446 enum m68k_function_kind kind;
1447
1448 /* We cannot use sibcalls for nested functions because we use the
1449 static chain register for indirect calls. */
1450 if (CALL_EXPR_STATIC_CHAIN (exp))
1451 return false;
1452
db5e2d51
MK
1453 if (!VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
1454 {
1455 /* Check that the return value locations are the same. For
1456 example that we aren't returning a value from the sibling in
1457 a D0 register but then need to transfer it to a A0 register. */
1458 rtx cfun_value;
1459 rtx call_value;
1460
1461 cfun_value = FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (cfun->decl)),
1462 cfun->decl);
1463 call_value = FUNCTION_VALUE (TREE_TYPE (exp), decl);
1464
1465 /* Check that the values are equal or that the result the callee
1466 function returns is superset of what the current function returns. */
1467 if (!(rtx_equal_p (cfun_value, call_value)
1468 || (REG_P (cfun_value)
1469 && m68k_reg_present_p (call_value, REGNO (cfun_value)))))
1470 return false;
1471 }
1472
fa157b28
NS
1473 kind = m68k_get_function_kind (current_function_decl);
1474 if (kind == m68k_fk_normal_function)
1475 /* We can always sibcall from a normal function, because it's
1476 undefined if it is calling an interrupt function. */
1477 return true;
1478
1479 /* Otherwise we can only sibcall if the function kind is known to be
1480 the same. */
1481 if (decl && m68k_get_function_kind (decl) == kind)
1482 return true;
1483
1484 return false;
f7e70894
RS
1485}
1486
13d3961c
NF
1487/* On the m68k all args are always pushed. */
1488
1489static rtx
1490m68k_function_arg (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
1491 enum machine_mode mode ATTRIBUTE_UNUSED,
1492 const_tree type ATTRIBUTE_UNUSED,
1493 bool named ATTRIBUTE_UNUSED)
1494{
1495 return NULL_RTX;
1496}
1497
1498static void
1499m68k_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
1500 const_tree type, bool named ATTRIBUTE_UNUSED)
1501{
1502 *cum += (mode != BLKmode
1503 ? (GET_MODE_SIZE (mode) + 3) & ~3
1504 : (int_size_in_bytes (type) + 3) & ~3);
1505}
1506
29ca003a
RS
1507/* Convert X to a legitimate function call memory reference and return the
1508 result. */
a2ef3db7 1509
29ca003a
RS
1510rtx
1511m68k_legitimize_call_address (rtx x)
1512{
1513 gcc_assert (MEM_P (x));
1514 if (call_operand (XEXP (x, 0), VOIDmode))
1515 return x;
1516 return replace_equiv_address (x, force_reg (Pmode, XEXP (x, 0)));
a2ef3db7
BI
1517}
1518
f7e70894
RS
1519/* Likewise for sibling calls. */
1520
1521rtx
1522m68k_legitimize_sibcall_address (rtx x)
1523{
1524 gcc_assert (MEM_P (x));
1525 if (sibcall_operand (XEXP (x, 0), VOIDmode))
1526 return x;
1527
1528 emit_move_insn (gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM), XEXP (x, 0));
1529 return replace_equiv_address (x, gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM));
1530}
1531
506d7b68
PB
1532/* Convert X to a legitimate address and return it if successful. Otherwise
1533 return X.
1534
1535 For the 68000, we handle X+REG by loading X into a register R and
1536 using R+REG. R will go in an address reg and indexing will be used.
1537 However, if REG is a broken-out memory address or multiplication,
1538 nothing needs to be done because REG can certainly go in an address reg. */
1539
ab7256e4 1540static rtx
506d7b68
PB
1541m68k_legitimize_address (rtx x, rtx oldx, enum machine_mode mode)
1542{
75df395f
MK
1543 if (m68k_tls_symbol_p (x))
1544 return m68k_legitimize_tls_address (x);
1545
506d7b68
PB
1546 if (GET_CODE (x) == PLUS)
1547 {
1548 int ch = (x) != (oldx);
1549 int copied = 0;
1550
1551#define COPY_ONCE(Y) if (!copied) { Y = copy_rtx (Y); copied = ch = 1; }
1552
1553 if (GET_CODE (XEXP (x, 0)) == MULT)
1554 {
1555 COPY_ONCE (x);
1556 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
1557 }
1558 if (GET_CODE (XEXP (x, 1)) == MULT)
1559 {
1560 COPY_ONCE (x);
1561 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
1562 }
1563 if (ch)
1564 {
1565 if (GET_CODE (XEXP (x, 1)) == REG
1566 && GET_CODE (XEXP (x, 0)) == REG)
1567 {
1568 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
1569 {
1570 COPY_ONCE (x);
1571 x = force_operand (x, 0);
1572 }
1573 return x;
1574 }
1575 if (memory_address_p (mode, x))
1576 return x;
1577 }
1578 if (GET_CODE (XEXP (x, 0)) == REG
1579 || (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
1580 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1581 && GET_MODE (XEXP (XEXP (x, 0), 0)) == HImode))
1582 {
1583 rtx temp = gen_reg_rtx (Pmode);
1584 rtx val = force_operand (XEXP (x, 1), 0);
1585 emit_move_insn (temp, val);
1586 COPY_ONCE (x);
1587 XEXP (x, 1) = temp;
1588 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1589 && GET_CODE (XEXP (x, 0)) == REG)
1590 x = force_operand (x, 0);
1591 }
1592 else if (GET_CODE (XEXP (x, 1)) == REG
1593 || (GET_CODE (XEXP (x, 1)) == SIGN_EXTEND
1594 && GET_CODE (XEXP (XEXP (x, 1), 0)) == REG
1595 && GET_MODE (XEXP (XEXP (x, 1), 0)) == HImode))
1596 {
1597 rtx temp = gen_reg_rtx (Pmode);
1598 rtx val = force_operand (XEXP (x, 0), 0);
1599 emit_move_insn (temp, val);
1600 COPY_ONCE (x);
1601 XEXP (x, 0) = temp;
1602 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1603 && GET_CODE (XEXP (x, 1)) == REG)
1604 x = force_operand (x, 0);
1605 }
1606 }
1607
1608 return x;
1609}
1610
1611
64a184e9
RS
1612/* Output a dbCC; jCC sequence. Note we do not handle the
1613 floating point version of this sequence (Fdbcc). We also
1614 do not handle alternative conditions when CC_NO_OVERFLOW is
6a0f85e3
TG
1615 set. It is assumed that valid_dbcc_comparison_p and flags_in_68881 will
1616 kick those out before we get here. */
64a184e9 1617
1d8eaa6b 1618void
8a4a2253 1619output_dbcc_and_branch (rtx *operands)
64a184e9 1620{
64a184e9
RS
1621 switch (GET_CODE (operands[3]))
1622 {
1623 case EQ:
da398bb5 1624 output_asm_insn ("dbeq %0,%l1\n\tjeq %l2", operands);
e6d98cb0 1625 break;
64a184e9
RS
1626
1627 case NE:
da398bb5 1628 output_asm_insn ("dbne %0,%l1\n\tjne %l2", operands);
e6d98cb0 1629 break;
64a184e9
RS
1630
1631 case GT:
da398bb5 1632 output_asm_insn ("dbgt %0,%l1\n\tjgt %l2", operands);
e6d98cb0 1633 break;
64a184e9
RS
1634
1635 case GTU:
da398bb5 1636 output_asm_insn ("dbhi %0,%l1\n\tjhi %l2", operands);
e6d98cb0 1637 break;
64a184e9
RS
1638
1639 case LT:
da398bb5 1640 output_asm_insn ("dblt %0,%l1\n\tjlt %l2", operands);
e6d98cb0 1641 break;
64a184e9
RS
1642
1643 case LTU:
da398bb5 1644 output_asm_insn ("dbcs %0,%l1\n\tjcs %l2", operands);
e6d98cb0 1645 break;
64a184e9
RS
1646
1647 case GE:
da398bb5 1648 output_asm_insn ("dbge %0,%l1\n\tjge %l2", operands);
e6d98cb0 1649 break;
64a184e9
RS
1650
1651 case GEU:
da398bb5 1652 output_asm_insn ("dbcc %0,%l1\n\tjcc %l2", operands);
e6d98cb0 1653 break;
64a184e9
RS
1654
1655 case LE:
da398bb5 1656 output_asm_insn ("dble %0,%l1\n\tjle %l2", operands);
e6d98cb0 1657 break;
64a184e9
RS
1658
1659 case LEU:
da398bb5 1660 output_asm_insn ("dbls %0,%l1\n\tjls %l2", operands);
e6d98cb0 1661 break;
64a184e9
RS
1662
1663 default:
4761e388 1664 gcc_unreachable ();
64a184e9
RS
1665 }
1666
1667 /* If the decrement is to be done in SImode, then we have
7a1929e1 1668 to compensate for the fact that dbcc decrements in HImode. */
64a184e9
RS
1669 switch (GET_MODE (operands[0]))
1670 {
1671 case SImode:
da398bb5 1672 output_asm_insn ("clr%.w %0\n\tsubq%.l #1,%0\n\tjpl %l1", operands);
64a184e9
RS
1673 break;
1674
1675 case HImode:
1676 break;
1677
1678 default:
4761e388 1679 gcc_unreachable ();
64a184e9
RS
1680 }
1681}
1682
5505f548 1683const char *
4761e388 1684output_scc_di (rtx op, rtx operand1, rtx operand2, rtx dest)
c59c3b1c
RK
1685{
1686 rtx loperands[7];
d9832fd2 1687 enum rtx_code op_code = GET_CODE (op);
c59c3b1c 1688
f710504c 1689 /* This does not produce a useful cc. */
906a2d3c
RK
1690 CC_STATUS_INIT;
1691
d9832fd2
RK
1692 /* The m68k cmp.l instruction requires operand1 to be a reg as used
1693 below. Swap the operands and change the op if these requirements
1694 are not fulfilled. */
1695 if (GET_CODE (operand2) == REG && GET_CODE (operand1) != REG)
1696 {
1697 rtx tmp = operand1;
1698
1699 operand1 = operand2;
1700 operand2 = tmp;
1701 op_code = swap_condition (op_code);
1702 }
c59c3b1c
RK
1703 loperands[0] = operand1;
1704 if (GET_CODE (operand1) == REG)
1d8eaa6b 1705 loperands[1] = gen_rtx_REG (SImode, REGNO (operand1) + 1);
c59c3b1c 1706 else
b72f00af 1707 loperands[1] = adjust_address (operand1, SImode, 4);
c59c3b1c
RK
1708 if (operand2 != const0_rtx)
1709 {
1710 loperands[2] = operand2;
1711 if (GET_CODE (operand2) == REG)
1d8eaa6b 1712 loperands[3] = gen_rtx_REG (SImode, REGNO (operand2) + 1);
c59c3b1c 1713 else
b72f00af 1714 loperands[3] = adjust_address (operand2, SImode, 4);
c59c3b1c 1715 }
428511bb 1716 loperands[4] = gen_label_rtx ();
c59c3b1c 1717 if (operand2 != const0_rtx)
da398bb5 1718 output_asm_insn ("cmp%.l %2,%0\n\tjne %l4\n\tcmp%.l %3,%1", loperands);
392582fa 1719 else
4a8c52e0 1720 {
9425fb04 1721 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[0]))
4a8c52e0
AS
1722 output_asm_insn ("tst%.l %0", loperands);
1723 else
a0a7fbc9 1724 output_asm_insn ("cmp%.w #0,%0", loperands);
4a8c52e0 1725
da398bb5 1726 output_asm_insn ("jne %l4", loperands);
4a8c52e0 1727
9425fb04 1728 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[1]))
4a8c52e0
AS
1729 output_asm_insn ("tst%.l %1", loperands);
1730 else
3b4b85c9 1731 output_asm_insn ("cmp%.w #0,%1", loperands);
4a8c52e0
AS
1732 }
1733
c59c3b1c 1734 loperands[5] = dest;
3b4b85c9 1735
d9832fd2 1736 switch (op_code)
c59c3b1c
RK
1737 {
1738 case EQ:
4977bab6 1739 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1740 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1741 output_asm_insn ("seq %5", loperands);
1742 break;
1743
1744 case NE:
4977bab6 1745 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1746 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1747 output_asm_insn ("sne %5", loperands);
1748 break;
1749
1750 case GT:
428511bb 1751 loperands[6] = gen_label_rtx ();
da398bb5 1752 output_asm_insn ("shi %5\n\tjra %l6", loperands);
4977bab6 1753 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1754 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c 1755 output_asm_insn ("sgt %5", loperands);
4977bab6 1756 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1757 CODE_LABEL_NUMBER (loperands[6]));
c59c3b1c
RK
1758 break;
1759
1760 case GTU:
4977bab6 1761 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1762 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1763 output_asm_insn ("shi %5", loperands);
1764 break;
1765
1766 case LT:
428511bb 1767 loperands[6] = gen_label_rtx ();
da398bb5 1768 output_asm_insn ("scs %5\n\tjra %l6", loperands);
4977bab6 1769 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1770 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c 1771 output_asm_insn ("slt %5", loperands);
4977bab6 1772 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1773 CODE_LABEL_NUMBER (loperands[6]));
c59c3b1c
RK
1774 break;
1775
1776 case LTU:
4977bab6 1777 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1778 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1779 output_asm_insn ("scs %5", loperands);
1780 break;
1781
1782 case GE:
428511bb 1783 loperands[6] = gen_label_rtx ();
da398bb5 1784 output_asm_insn ("scc %5\n\tjra %l6", loperands);
4977bab6 1785 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1786 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c 1787 output_asm_insn ("sge %5", loperands);
4977bab6 1788 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1789 CODE_LABEL_NUMBER (loperands[6]));
c59c3b1c
RK
1790 break;
1791
1792 case GEU:
4977bab6 1793 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1794 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1795 output_asm_insn ("scc %5", loperands);
1796 break;
1797
1798 case LE:
428511bb 1799 loperands[6] = gen_label_rtx ();
da398bb5 1800 output_asm_insn ("sls %5\n\tjra %l6", loperands);
4977bab6 1801 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1802 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c 1803 output_asm_insn ("sle %5", loperands);
4977bab6 1804 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1805 CODE_LABEL_NUMBER (loperands[6]));
c59c3b1c
RK
1806 break;
1807
1808 case LEU:
4977bab6 1809 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1810 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1811 output_asm_insn ("sls %5", loperands);
1812 break;
1813
1814 default:
4761e388 1815 gcc_unreachable ();
c59c3b1c
RK
1816 }
1817 return "";
1818}
1819
5505f548 1820const char *
8a4a2253 1821output_btst (rtx *operands, rtx countop, rtx dataop, rtx insn, int signpos)
79e68feb
RS
1822{
1823 operands[0] = countop;
1824 operands[1] = dataop;
1825
1826 if (GET_CODE (countop) == CONST_INT)
1827 {
1828 register int count = INTVAL (countop);
1829 /* If COUNT is bigger than size of storage unit in use,
1830 advance to the containing unit of same size. */
1831 if (count > signpos)
1832 {
1833 int offset = (count & ~signpos) / 8;
1834 count = count & signpos;
b72f00af 1835 operands[1] = dataop = adjust_address (dataop, QImode, offset);
79e68feb
RS
1836 }
1837 if (count == signpos)
1838 cc_status.flags = CC_NOT_POSITIVE | CC_Z_IN_NOT_N;
1839 else
1840 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N;
1841
1842 /* These three statements used to use next_insns_test_no...
1843 but it appears that this should do the same job. */
1844 if (count == 31
1845 && next_insn_tests_no_inequality (insn))
1846 return "tst%.l %1";
1847 if (count == 15
1848 && next_insn_tests_no_inequality (insn))
1849 return "tst%.w %1";
1850 if (count == 7
1851 && next_insn_tests_no_inequality (insn))
1852 return "tst%.b %1";
5083912d
PDM
1853 /* Try to use `movew to ccr' followed by the appropriate branch insn.
1854 On some m68k variants unfortunately that's slower than btst.
1855 On 68000 and higher, that should also work for all HImode operands. */
1856 if (TUNE_CPU32 || TARGET_COLDFIRE || optimize_size)
1857 {
1858 if (count == 3 && DATA_REG_P (operands[1])
1859 && next_insn_tests_no_inequality (insn))
1860 {
1861 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N | CC_NO_OVERFLOW;
1862 return "move%.w %1,%%ccr";
1863 }
1864 if (count == 2 && DATA_REG_P (operands[1])
1865 && next_insn_tests_no_inequality (insn))
1866 {
1867 cc_status.flags = CC_NOT_NEGATIVE | CC_INVERTED | CC_NO_OVERFLOW;
1868 return "move%.w %1,%%ccr";
1869 }
1870 /* count == 1 followed by bvc/bvs and
1871 count == 0 followed by bcc/bcs are also possible, but need
1872 m68k-specific CC_Z_IN_NOT_V and CC_Z_IN_NOT_C flags. */
1873 }
79e68feb
RS
1874
1875 cc_status.flags = CC_NOT_NEGATIVE;
1876 }
1877 return "btst %0,%1";
1878}
79e68feb 1879\f
fc2241eb
RS
1880/* Return true if X is a legitimate base register. STRICT_P says
1881 whether we need strict checking. */
1882
1883bool
1884m68k_legitimate_base_reg_p (rtx x, bool strict_p)
1885{
1886 /* Allow SUBREG everywhere we allow REG. This results in better code. */
1887 if (!strict_p && GET_CODE (x) == SUBREG)
1888 x = SUBREG_REG (x);
1889
1890 return (REG_P (x)
1891 && (strict_p
1892 ? REGNO_OK_FOR_BASE_P (REGNO (x))
bf32249e 1893 : REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x))));
fc2241eb
RS
1894}
1895
1896/* Return true if X is a legitimate index register. STRICT_P says
1897 whether we need strict checking. */
1898
1899bool
1900m68k_legitimate_index_reg_p (rtx x, bool strict_p)
1901{
1902 if (!strict_p && GET_CODE (x) == SUBREG)
1903 x = SUBREG_REG (x);
1904
1905 return (REG_P (x)
1906 && (strict_p
1907 ? REGNO_OK_FOR_INDEX_P (REGNO (x))
bf32249e 1908 : REGNO_OK_FOR_INDEX_NONSTRICT_P (REGNO (x))));
fc2241eb
RS
1909}
1910
1911/* Return true if X is a legitimate index expression for a (d8,An,Xn) or
1912 (bd,An,Xn) addressing mode. Fill in the INDEX and SCALE fields of
1913 ADDRESS if so. STRICT_P says whether we need strict checking. */
1914
1915static bool
1916m68k_decompose_index (rtx x, bool strict_p, struct m68k_address *address)
1917{
1918 int scale;
1919
1920 /* Check for a scale factor. */
1921 scale = 1;
1922 if ((TARGET_68020 || TARGET_COLDFIRE)
1923 && GET_CODE (x) == MULT
1924 && GET_CODE (XEXP (x, 1)) == CONST_INT
1925 && (INTVAL (XEXP (x, 1)) == 2
1926 || INTVAL (XEXP (x, 1)) == 4
1927 || (INTVAL (XEXP (x, 1)) == 8
1928 && (TARGET_COLDFIRE_FPU || !TARGET_COLDFIRE))))
1929 {
1930 scale = INTVAL (XEXP (x, 1));
1931 x = XEXP (x, 0);
1932 }
1933
1934 /* Check for a word extension. */
1935 if (!TARGET_COLDFIRE
1936 && GET_CODE (x) == SIGN_EXTEND
1937 && GET_MODE (XEXP (x, 0)) == HImode)
1938 x = XEXP (x, 0);
1939
1940 if (m68k_legitimate_index_reg_p (x, strict_p))
1941 {
1942 address->scale = scale;
1943 address->index = x;
1944 return true;
1945 }
1946
1947 return false;
1948}
1949
7ffb5e78
RS
1950/* Return true if X is an illegitimate symbolic constant. */
1951
1952bool
1953m68k_illegitimate_symbolic_constant_p (rtx x)
1954{
1955 rtx base, offset;
1956
1957 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P)
1958 {
1959 split_const (x, &base, &offset);
1960 if (GET_CODE (base) == SYMBOL_REF
1961 && !offset_within_block_p (base, INTVAL (offset)))
1962 return true;
1963 }
75df395f 1964 return m68k_tls_reference_p (x, false);
7ffb5e78
RS
1965}
1966
fc2241eb
RS
1967/* Return true if X is a legitimate constant address that can reach
1968 bytes in the range [X, X + REACH). STRICT_P says whether we need
1969 strict checking. */
1970
1971static bool
1972m68k_legitimate_constant_address_p (rtx x, unsigned int reach, bool strict_p)
1973{
1974 rtx base, offset;
1975
1976 if (!CONSTANT_ADDRESS_P (x))
1977 return false;
1978
1979 if (flag_pic
1980 && !(strict_p && TARGET_PCREL)
1981 && symbolic_operand (x, VOIDmode))
1982 return false;
1983
1984 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P && reach > 1)
1985 {
1986 split_const (x, &base, &offset);
1987 if (GET_CODE (base) == SYMBOL_REF
1988 && !offset_within_block_p (base, INTVAL (offset) + reach - 1))
1989 return false;
1990 }
1991
75df395f 1992 return !m68k_tls_reference_p (x, false);
fc2241eb
RS
1993}
1994
1995/* Return true if X is a LABEL_REF for a jump table. Assume that unplaced
1996 labels will become jump tables. */
1997
1998static bool
1999m68k_jump_table_ref_p (rtx x)
2000{
2001 if (GET_CODE (x) != LABEL_REF)
2002 return false;
2003
2004 x = XEXP (x, 0);
2005 if (!NEXT_INSN (x) && !PREV_INSN (x))
2006 return true;
2007
2008 x = next_nonnote_insn (x);
2009 return x && JUMP_TABLE_DATA_P (x);
2010}
2011
2012/* Return true if X is a legitimate address for values of mode MODE.
2013 STRICT_P says whether strict checking is needed. If the address
2014 is valid, describe its components in *ADDRESS. */
2015
2016static bool
2017m68k_decompose_address (enum machine_mode mode, rtx x,
2018 bool strict_p, struct m68k_address *address)
2019{
2020 unsigned int reach;
2021
2022 memset (address, 0, sizeof (*address));
2023
2024 if (mode == BLKmode)
2025 reach = 1;
2026 else
2027 reach = GET_MODE_SIZE (mode);
2028
2029 /* Check for (An) (mode 2). */
2030 if (m68k_legitimate_base_reg_p (x, strict_p))
2031 {
2032 address->base = x;
2033 return true;
2034 }
2035
2036 /* Check for -(An) and (An)+ (modes 3 and 4). */
2037 if ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_INC)
2038 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
2039 {
2040 address->code = GET_CODE (x);
2041 address->base = XEXP (x, 0);
2042 return true;
2043 }
2044
2045 /* Check for (d16,An) (mode 5). */
2046 if (GET_CODE (x) == PLUS
2047 && GET_CODE (XEXP (x, 1)) == CONST_INT
2048 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x8000, 0x8000 - reach)
2049 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
2050 {
2051 address->base = XEXP (x, 0);
2052 address->offset = XEXP (x, 1);
2053 return true;
2054 }
2055
2056 /* Check for GOT loads. These are (bd,An,Xn) addresses if
2057 TARGET_68020 && flag_pic == 2, otherwise they are (d16,An)
2058 addresses. */
75df395f
MK
2059 if (GET_CODE (x) == PLUS
2060 && XEXP (x, 0) == pic_offset_table_rtx)
fc2241eb 2061 {
75df395f
MK
2062 /* As we are processing a PLUS, do not unwrap RELOC32 symbols --
2063 they are invalid in this context. */
2064 if (m68k_unwrap_symbol (XEXP (x, 1), false) != XEXP (x, 1))
2065 {
2066 address->base = XEXP (x, 0);
2067 address->offset = XEXP (x, 1);
2068 return true;
2069 }
fc2241eb
RS
2070 }
2071
2072 /* The ColdFire FPU only accepts addressing modes 2-5. */
2073 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
2074 return false;
2075
2076 /* Check for (xxx).w and (xxx).l. Also, in the TARGET_PCREL case,
2077 check for (d16,PC) or (bd,PC,Xn) with a suppressed index register.
2078 All these modes are variations of mode 7. */
2079 if (m68k_legitimate_constant_address_p (x, reach, strict_p))
2080 {
2081 address->offset = x;
2082 return true;
2083 }
2084
2085 /* Check for (d8,PC,Xn), a mode 7 form. This case is needed for
2086 tablejumps.
2087
2088 ??? do_tablejump creates these addresses before placing the target
2089 label, so we have to assume that unplaced labels are jump table
2090 references. It seems unlikely that we would ever generate indexed
2091 accesses to unplaced labels in other cases. */
2092 if (GET_CODE (x) == PLUS
2093 && m68k_jump_table_ref_p (XEXP (x, 1))
2094 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2095 {
2096 address->offset = XEXP (x, 1);
2097 return true;
2098 }
2099
2100 /* Everything hereafter deals with (d8,An,Xn.SIZE*SCALE) or
2101 (bd,An,Xn.SIZE*SCALE) addresses. */
2102
2103 if (TARGET_68020)
2104 {
2105 /* Check for a nonzero base displacement. */
2106 if (GET_CODE (x) == PLUS
2107 && m68k_legitimate_constant_address_p (XEXP (x, 1), reach, strict_p))
2108 {
2109 address->offset = XEXP (x, 1);
2110 x = XEXP (x, 0);
2111 }
2112
2113 /* Check for a suppressed index register. */
2114 if (m68k_legitimate_base_reg_p (x, strict_p))
2115 {
2116 address->base = x;
2117 return true;
2118 }
2119
2120 /* Check for a suppressed base register. Do not allow this case
2121 for non-symbolic offsets as it effectively gives gcc freedom
2122 to treat data registers as base registers, which can generate
2123 worse code. */
2124 if (address->offset
2125 && symbolic_operand (address->offset, VOIDmode)
2126 && m68k_decompose_index (x, strict_p, address))
2127 return true;
2128 }
2129 else
2130 {
2131 /* Check for a nonzero base displacement. */
2132 if (GET_CODE (x) == PLUS
2133 && GET_CODE (XEXP (x, 1)) == CONST_INT
2134 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x80, 0x80 - reach))
2135 {
2136 address->offset = XEXP (x, 1);
2137 x = XEXP (x, 0);
2138 }
2139 }
2140
2141 /* We now expect the sum of a base and an index. */
2142 if (GET_CODE (x) == PLUS)
2143 {
2144 if (m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p)
2145 && m68k_decompose_index (XEXP (x, 1), strict_p, address))
2146 {
2147 address->base = XEXP (x, 0);
2148 return true;
2149 }
2150
2151 if (m68k_legitimate_base_reg_p (XEXP (x, 1), strict_p)
2152 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2153 {
2154 address->base = XEXP (x, 1);
2155 return true;
2156 }
2157 }
2158 return false;
2159}
2160
2161/* Return true if X is a legitimate address for values of mode MODE.
2162 STRICT_P says whether strict checking is needed. */
2163
2164bool
2165m68k_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p)
2166{
2167 struct m68k_address address;
2168
2169 return m68k_decompose_address (mode, x, strict_p, &address);
2170}
2171
2172/* Return true if X is a memory, describing its address in ADDRESS if so.
2173 Apply strict checking if called during or after reload. */
2174
2175static bool
2176m68k_legitimate_mem_p (rtx x, struct m68k_address *address)
2177{
2178 return (MEM_P (x)
2179 && m68k_decompose_address (GET_MODE (x), XEXP (x, 0),
2180 reload_in_progress || reload_completed,
2181 address));
2182}
2183
2184/* Return true if X matches the 'Q' constraint. It must be a memory
2185 with a base address and no constant offset or index. */
2186
2187bool
2188m68k_matches_q_p (rtx x)
2189{
2190 struct m68k_address address;
2191
2192 return (m68k_legitimate_mem_p (x, &address)
2193 && address.code == UNKNOWN
2194 && address.base
2195 && !address.offset
2196 && !address.index);
2197}
2198
2199/* Return true if X matches the 'U' constraint. It must be a base address
2200 with a constant offset and no index. */
2201
2202bool
2203m68k_matches_u_p (rtx x)
2204{
2205 struct m68k_address address;
2206
2207 return (m68k_legitimate_mem_p (x, &address)
2208 && address.code == UNKNOWN
2209 && address.base
2210 && address.offset
2211 && !address.index);
2212}
2213
75df395f
MK
2214/* Return GOT pointer. */
2215
2216static rtx
2217m68k_get_gp (void)
2218{
2219 if (pic_offset_table_rtx == NULL_RTX)
2220 pic_offset_table_rtx = gen_rtx_REG (Pmode, PIC_REG);
2221
2222 crtl->uses_pic_offset_table = 1;
2223
2224 return pic_offset_table_rtx;
2225}
2226
2227/* M68K relocations, used to distinguish GOT and TLS relocations in UNSPEC
2228 wrappers. */
2229enum m68k_reloc { RELOC_GOT, RELOC_TLSGD, RELOC_TLSLDM, RELOC_TLSLDO,
2230 RELOC_TLSIE, RELOC_TLSLE };
2231
2232#define TLS_RELOC_P(RELOC) ((RELOC) != RELOC_GOT)
2233
2234/* Wrap symbol X into unspec representing relocation RELOC.
2235 BASE_REG - register that should be added to the result.
2236 TEMP_REG - if non-null, temporary register. */
2237
2238static rtx
2239m68k_wrap_symbol (rtx x, enum m68k_reloc reloc, rtx base_reg, rtx temp_reg)
2240{
2241 bool use_x_p;
2242
2243 use_x_p = (base_reg == pic_offset_table_rtx) ? TARGET_XGOT : TARGET_XTLS;
2244
2245 if (TARGET_COLDFIRE && use_x_p)
2246 /* When compiling with -mx{got, tls} switch the code will look like this:
2247
2248 move.l <X>@<RELOC>,<TEMP_REG>
2249 add.l <BASE_REG>,<TEMP_REG> */
2250 {
2251 /* Wrap X in UNSPEC_??? to tip m68k_output_addr_const_extra
2252 to put @RELOC after reference. */
2253 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2254 UNSPEC_RELOC32);
2255 x = gen_rtx_CONST (Pmode, x);
2256
2257 if (temp_reg == NULL)
2258 {
2259 gcc_assert (can_create_pseudo_p ());
2260 temp_reg = gen_reg_rtx (Pmode);
2261 }
2262
2263 emit_move_insn (temp_reg, x);
2264 emit_insn (gen_addsi3 (temp_reg, temp_reg, base_reg));
2265 x = temp_reg;
2266 }
2267 else
2268 {
2269 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2270 UNSPEC_RELOC16);
2271 x = gen_rtx_CONST (Pmode, x);
2272
2273 x = gen_rtx_PLUS (Pmode, base_reg, x);
2274 }
2275
2276 return x;
2277}
2278
2279/* Helper for m68k_unwrap_symbol.
2280 Also, if unwrapping was successful (that is if (ORIG != <return value>)),
2281 sets *RELOC_PTR to relocation type for the symbol. */
2282
2283static rtx
2284m68k_unwrap_symbol_1 (rtx orig, bool unwrap_reloc32_p,
2285 enum m68k_reloc *reloc_ptr)
2286{
2287 if (GET_CODE (orig) == CONST)
2288 {
2289 rtx x;
2290 enum m68k_reloc dummy;
2291
2292 x = XEXP (orig, 0);
2293
2294 if (reloc_ptr == NULL)
2295 reloc_ptr = &dummy;
2296
2297 /* Handle an addend. */
2298 if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS)
2299 && CONST_INT_P (XEXP (x, 1)))
2300 x = XEXP (x, 0);
2301
2302 if (GET_CODE (x) == UNSPEC)
2303 {
2304 switch (XINT (x, 1))
2305 {
2306 case UNSPEC_RELOC16:
2307 orig = XVECEXP (x, 0, 0);
2308 *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2309 break;
2310
2311 case UNSPEC_RELOC32:
2312 if (unwrap_reloc32_p)
2313 {
2314 orig = XVECEXP (x, 0, 0);
2315 *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2316 }
2317 break;
2318
2319 default:
2320 break;
2321 }
2322 }
2323 }
2324
2325 return orig;
2326}
2327
2328/* Unwrap symbol from UNSPEC_RELOC16 and, if unwrap_reloc32_p,
2329 UNSPEC_RELOC32 wrappers. */
2330
2331rtx
2332m68k_unwrap_symbol (rtx orig, bool unwrap_reloc32_p)
2333{
2334 return m68k_unwrap_symbol_1 (orig, unwrap_reloc32_p, NULL);
2335}
2336
2337/* Helper for m68k_final_prescan_insn. */
2338
2339static int
2340m68k_final_prescan_insn_1 (rtx *x_ptr, void *data ATTRIBUTE_UNUSED)
2341{
2342 rtx x = *x_ptr;
2343
2344 if (m68k_unwrap_symbol (x, true) != x)
2345 /* For rationale of the below, see comment in m68k_final_prescan_insn. */
2346 {
2347 rtx plus;
2348
2349 gcc_assert (GET_CODE (x) == CONST);
2350 plus = XEXP (x, 0);
2351
2352 if (GET_CODE (plus) == PLUS || GET_CODE (plus) == MINUS)
2353 {
2354 rtx unspec;
2355 rtx addend;
2356
2357 unspec = XEXP (plus, 0);
2358 gcc_assert (GET_CODE (unspec) == UNSPEC);
2359 addend = XEXP (plus, 1);
2360 gcc_assert (CONST_INT_P (addend));
2361
2362 /* We now have all the pieces, rearrange them. */
2363
2364 /* Move symbol to plus. */
2365 XEXP (plus, 0) = XVECEXP (unspec, 0, 0);
2366
2367 /* Move plus inside unspec. */
2368 XVECEXP (unspec, 0, 0) = plus;
2369
2370 /* Move unspec to top level of const. */
2371 XEXP (x, 0) = unspec;
2372 }
2373
2374 return -1;
2375 }
2376
2377 return 0;
2378}
2379
2380/* Prescan insn before outputing assembler for it. */
2381
2382void
2383m68k_final_prescan_insn (rtx insn ATTRIBUTE_UNUSED,
2384 rtx *operands, int n_operands)
2385{
2386 int i;
2387
2388 /* Combine and, possibly, other optimizations may do good job
2389 converting
2390 (const (unspec [(symbol)]))
2391 into
2392 (const (plus (unspec [(symbol)])
2393 (const_int N))).
2394 The problem with this is emitting @TLS or @GOT decorations.
2395 The decoration is emitted when processing (unspec), so the
2396 result would be "#symbol@TLSLE+N" instead of "#symbol+N@TLSLE".
2397
2398 It seems that the easiest solution to this is to convert such
2399 operands to
2400 (const (unspec [(plus (symbol)
2401 (const_int N))])).
2402 Note, that the top level of operand remains intact, so we don't have
2403 to patch up anything outside of the operand. */
2404
2405 for (i = 0; i < n_operands; ++i)
2406 {
2407 rtx op;
2408
2409 op = operands[i];
2410
2411 for_each_rtx (&op, m68k_final_prescan_insn_1, NULL);
2412 }
2413}
2414
2415/* Move X to a register and add REG_EQUAL note pointing to ORIG.
2416 If REG is non-null, use it; generate new pseudo otherwise. */
2417
2418static rtx
2419m68k_move_to_reg (rtx x, rtx orig, rtx reg)
2420{
2421 rtx insn;
2422
2423 if (reg == NULL_RTX)
2424 {
2425 gcc_assert (can_create_pseudo_p ());
2426 reg = gen_reg_rtx (Pmode);
2427 }
2428
2429 insn = emit_move_insn (reg, x);
2430 /* Put a REG_EQUAL note on this insn, so that it can be optimized
2431 by loop. */
2432 set_unique_reg_note (insn, REG_EQUAL, orig);
2433
2434 return reg;
2435}
2436
2437/* Does the same as m68k_wrap_symbol, but returns a memory reference to
2438 GOT slot. */
2439
2440static rtx
2441m68k_wrap_symbol_into_got_ref (rtx x, enum m68k_reloc reloc, rtx temp_reg)
2442{
2443 x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), temp_reg);
2444
2445 x = gen_rtx_MEM (Pmode, x);
2446 MEM_READONLY_P (x) = 1;
2447
2448 return x;
2449}
2450
79e68feb
RS
2451/* Legitimize PIC addresses. If the address is already
2452 position-independent, we return ORIG. Newly generated
2453 position-independent addresses go to REG. If we need more
2454 than one register, we lose.
2455
2456 An address is legitimized by making an indirect reference
2457 through the Global Offset Table with the name of the symbol
2458 used as an offset.
2459
2460 The assembler and linker are responsible for placing the
2461 address of the symbol in the GOT. The function prologue
2462 is responsible for initializing a5 to the starting address
2463 of the GOT.
2464
2465 The assembler is also responsible for translating a symbol name
2466 into a constant displacement from the start of the GOT.
2467
2468 A quick example may make things a little clearer:
2469
2470 When not generating PIC code to store the value 12345 into _foo
2471 we would generate the following code:
2472
2473 movel #12345, _foo
2474
2475 When generating PIC two transformations are made. First, the compiler
2476 loads the address of foo into a register. So the first transformation makes:
2477
2478 lea _foo, a0
2479 movel #12345, a0@
2480
2481 The code in movsi will intercept the lea instruction and call this
2482 routine which will transform the instructions into:
2483
2484 movel a5@(_foo:w), a0
2485 movel #12345, a0@
2486
2487
2488 That (in a nutshell) is how *all* symbol and label references are
2489 handled. */
2490
2491rtx
8a4a2253
BI
2492legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
2493 rtx reg)
79e68feb
RS
2494{
2495 rtx pic_ref = orig;
2496
2497 /* First handle a simple SYMBOL_REF or LABEL_REF */
2498 if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
2499 {
4761e388 2500 gcc_assert (reg);
79e68feb 2501
75df395f
MK
2502 pic_ref = m68k_wrap_symbol_into_got_ref (orig, RELOC_GOT, reg);
2503 pic_ref = m68k_move_to_reg (pic_ref, orig, reg);
79e68feb
RS
2504 }
2505 else if (GET_CODE (orig) == CONST)
2506 {
1d8eaa6b 2507 rtx base;
79e68feb 2508
b2e08ed4 2509 /* Make sure this has not already been legitimized. */
75df395f 2510 if (m68k_unwrap_symbol (orig, true) != orig)
79e68feb
RS
2511 return orig;
2512
4761e388 2513 gcc_assert (reg);
79e68feb
RS
2514
2515 /* legitimize both operands of the PLUS */
4761e388
NS
2516 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
2517
2518 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2519 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2520 base == reg ? 0 : reg);
79e68feb
RS
2521
2522 if (GET_CODE (orig) == CONST_INT)
75df395f
MK
2523 pic_ref = plus_constant (base, INTVAL (orig));
2524 else
2525 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
79e68feb 2526 }
75df395f 2527
79e68feb
RS
2528 return pic_ref;
2529}
2530
75df395f
MK
2531/* The __tls_get_addr symbol. */
2532static GTY(()) rtx m68k_tls_get_addr;
2533
2534/* Return SYMBOL_REF for __tls_get_addr. */
2535
2536static rtx
2537m68k_get_tls_get_addr (void)
2538{
2539 if (m68k_tls_get_addr == NULL_RTX)
2540 m68k_tls_get_addr = init_one_libfunc ("__tls_get_addr");
2541
2542 return m68k_tls_get_addr;
2543}
2544
2545/* Return libcall result in A0 instead of usual D0. */
2546static bool m68k_libcall_value_in_a0_p = false;
2547
2548/* Emit instruction sequence that calls __tls_get_addr. X is
2549 the TLS symbol we are referencing and RELOC is the symbol type to use
2550 (either TLSGD or TLSLDM). EQV is the REG_EQUAL note for the sequence
2551 emitted. A pseudo register with result of __tls_get_addr call is
2552 returned. */
2553
2554static rtx
2555m68k_call_tls_get_addr (rtx x, rtx eqv, enum m68k_reloc reloc)
2556{
2557 rtx a0;
2558 rtx insns;
2559 rtx dest;
2560
2561 /* Emit the call sequence. */
2562 start_sequence ();
2563
2564 /* FIXME: Unfortunately, emit_library_call_value does not
2565 consider (plus (%a5) (const (unspec))) to be a good enough
2566 operand for push, so it forces it into a register. The bad
2567 thing about this is that combiner, due to copy propagation and other
2568 optimizations, sometimes can not later fix this. As a consequence,
2569 additional register may be allocated resulting in a spill.
2570 For reference, see args processing loops in
2571 calls.c:emit_library_call_value_1.
2572 For testcase, see gcc.target/m68k/tls-{gd, ld}.c */
2573 x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), NULL_RTX);
2574
2575 /* __tls_get_addr() is not a libcall, but emitting a libcall_value
2576 is the simpliest way of generating a call. The difference between
2577 __tls_get_addr() and libcall is that the result is returned in D0
2578 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2579 which temporarily switches returning the result to A0. */
2580
2581 m68k_libcall_value_in_a0_p = true;
2582 a0 = emit_library_call_value (m68k_get_tls_get_addr (), NULL_RTX, LCT_PURE,
2583 Pmode, 1, x, Pmode);
2584 m68k_libcall_value_in_a0_p = false;
2585
2586 insns = get_insns ();
2587 end_sequence ();
2588
2589 gcc_assert (can_create_pseudo_p ());
2590 dest = gen_reg_rtx (Pmode);
2591 emit_libcall_block (insns, dest, a0, eqv);
2592
2593 return dest;
2594}
2595
2596/* The __tls_get_addr symbol. */
2597static GTY(()) rtx m68k_read_tp;
2598
2599/* Return SYMBOL_REF for __m68k_read_tp. */
2600
2601static rtx
2602m68k_get_m68k_read_tp (void)
2603{
2604 if (m68k_read_tp == NULL_RTX)
2605 m68k_read_tp = init_one_libfunc ("__m68k_read_tp");
2606
2607 return m68k_read_tp;
2608}
2609
2610/* Emit instruction sequence that calls __m68k_read_tp.
2611 A pseudo register with result of __m68k_read_tp call is returned. */
2612
2613static rtx
2614m68k_call_m68k_read_tp (void)
2615{
2616 rtx a0;
2617 rtx eqv;
2618 rtx insns;
2619 rtx dest;
2620
2621 start_sequence ();
2622
2623 /* __m68k_read_tp() is not a libcall, but emitting a libcall_value
2624 is the simpliest way of generating a call. The difference between
2625 __m68k_read_tp() and libcall is that the result is returned in D0
2626 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2627 which temporarily switches returning the result to A0. */
2628
2629 /* Emit the call sequence. */
2630 m68k_libcall_value_in_a0_p = true;
2631 a0 = emit_library_call_value (m68k_get_m68k_read_tp (), NULL_RTX, LCT_PURE,
2632 Pmode, 0);
2633 m68k_libcall_value_in_a0_p = false;
2634 insns = get_insns ();
2635 end_sequence ();
2636
2637 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2638 share the m68k_read_tp result with other IE/LE model accesses. */
2639 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx), UNSPEC_RELOC32);
2640
2641 gcc_assert (can_create_pseudo_p ());
2642 dest = gen_reg_rtx (Pmode);
2643 emit_libcall_block (insns, dest, a0, eqv);
2644
2645 return dest;
2646}
2647
2648/* Return a legitimized address for accessing TLS SYMBOL_REF X.
2649 For explanations on instructions sequences see TLS/NPTL ABI for m68k and
2650 ColdFire. */
2651
2652rtx
2653m68k_legitimize_tls_address (rtx orig)
2654{
2655 switch (SYMBOL_REF_TLS_MODEL (orig))
2656 {
2657 case TLS_MODEL_GLOBAL_DYNAMIC:
2658 orig = m68k_call_tls_get_addr (orig, orig, RELOC_TLSGD);
2659 break;
2660
2661 case TLS_MODEL_LOCAL_DYNAMIC:
2662 {
2663 rtx eqv;
2664 rtx a0;
2665 rtx x;
2666
2667 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2668 share the LDM result with other LD model accesses. */
2669 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2670 UNSPEC_RELOC32);
2671
2672 a0 = m68k_call_tls_get_addr (orig, eqv, RELOC_TLSLDM);
2673
2674 x = m68k_wrap_symbol (orig, RELOC_TLSLDO, a0, NULL_RTX);
2675
2676 if (can_create_pseudo_p ())
2677 x = m68k_move_to_reg (x, orig, NULL_RTX);
2678
2679 orig = x;
2680 break;
2681 }
2682
2683 case TLS_MODEL_INITIAL_EXEC:
2684 {
2685 rtx a0;
2686 rtx x;
2687
2688 a0 = m68k_call_m68k_read_tp ();
2689
2690 x = m68k_wrap_symbol_into_got_ref (orig, RELOC_TLSIE, NULL_RTX);
2691 x = gen_rtx_PLUS (Pmode, x, a0);
2692
2693 if (can_create_pseudo_p ())
2694 x = m68k_move_to_reg (x, orig, NULL_RTX);
2695
2696 orig = x;
2697 break;
2698 }
2699
2700 case TLS_MODEL_LOCAL_EXEC:
2701 {
2702 rtx a0;
2703 rtx x;
2704
2705 a0 = m68k_call_m68k_read_tp ();
2706
2707 x = m68k_wrap_symbol (orig, RELOC_TLSLE, a0, NULL_RTX);
2708
2709 if (can_create_pseudo_p ())
2710 x = m68k_move_to_reg (x, orig, NULL_RTX);
2711
2712 orig = x;
2713 break;
2714 }
2715
2716 default:
2717 gcc_unreachable ();
2718 }
2719
2720 return orig;
2721}
2722
2723/* Return true if X is a TLS symbol. */
2724
2725static bool
2726m68k_tls_symbol_p (rtx x)
2727{
2728 if (!TARGET_HAVE_TLS)
2729 return false;
2730
2731 if (GET_CODE (x) != SYMBOL_REF)
2732 return false;
2733
2734 return SYMBOL_REF_TLS_MODEL (x) != 0;
2735}
2736
2737/* Helper for m68k_tls_referenced_p. */
2738
2739static int
2740m68k_tls_reference_p_1 (rtx *x_ptr, void *data ATTRIBUTE_UNUSED)
2741{
2742 /* Note: this is not the same as m68k_tls_symbol_p. */
2743 if (GET_CODE (*x_ptr) == SYMBOL_REF)
2744 return SYMBOL_REF_TLS_MODEL (*x_ptr) != 0 ? 1 : 0;
2745
2746 /* Don't recurse into legitimate TLS references. */
2747 if (m68k_tls_reference_p (*x_ptr, true))
2748 return -1;
2749
2750 return 0;
2751}
2752
2753/* If !LEGITIMATE_P, return true if X is a TLS symbol reference,
2754 though illegitimate one.
2755 If LEGITIMATE_P, return true if X is a legitimate TLS symbol reference. */
2756
2757bool
2758m68k_tls_reference_p (rtx x, bool legitimate_p)
2759{
2760 if (!TARGET_HAVE_TLS)
2761 return false;
2762
2763 if (!legitimate_p)
2764 return for_each_rtx (&x, m68k_tls_reference_p_1, NULL) == 1 ? true : false;
2765 else
2766 {
2767 enum m68k_reloc reloc = RELOC_GOT;
2768
2769 return (m68k_unwrap_symbol_1 (x, true, &reloc) != x
2770 && TLS_RELOC_P (reloc));
2771 }
2772}
2773
79e68feb 2774\f
0ce6f9fb 2775
a0a7fbc9 2776#define USE_MOVQ(i) ((unsigned) ((i) + 128) <= 255)
0ce6f9fb 2777
bda2a571
RS
2778/* Return the type of move that should be used for integer I. */
2779
c47b0cb4
MK
2780M68K_CONST_METHOD
2781m68k_const_method (HOST_WIDE_INT i)
0ce6f9fb 2782{
0ce6f9fb
RK
2783 unsigned u;
2784
6910dd70 2785 if (USE_MOVQ (i))
0ce6f9fb 2786 return MOVQ;
24092242 2787
c16eadc7 2788 /* The ColdFire doesn't have byte or word operations. */
97c55091 2789 /* FIXME: This may not be useful for the m68060 either. */
85dbf7e2 2790 if (!TARGET_COLDFIRE)
24092242
RK
2791 {
2792 /* if -256 < N < 256 but N is not in range for a moveq
7a1929e1 2793 N^ff will be, so use moveq #N^ff, dreg; not.b dreg. */
24092242
RK
2794 if (USE_MOVQ (i ^ 0xff))
2795 return NOTB;
2796 /* Likewise, try with not.w */
2797 if (USE_MOVQ (i ^ 0xffff))
2798 return NOTW;
2799 /* This is the only value where neg.w is useful */
2800 if (i == -65408)
2801 return NEGW;
24092242 2802 }
28bad6d1 2803
5e04daf3
PB
2804 /* Try also with swap. */
2805 u = i;
2806 if (USE_MOVQ ((u >> 16) | (u << 16)))
2807 return SWAP;
2808
986e74d5 2809 if (TARGET_ISAB)
28bad6d1 2810 {
72edf146 2811 /* Try using MVZ/MVS with an immediate value to load constants. */
28bad6d1
PB
2812 if (i >= 0 && i <= 65535)
2813 return MVZ;
2814 if (i >= -32768 && i <= 32767)
2815 return MVS;
2816 }
2817
0ce6f9fb
RK
2818 /* Otherwise, use move.l */
2819 return MOVL;
2820}
2821
bda2a571
RS
2822/* Return the cost of moving constant I into a data register. */
2823
3c50106f 2824static int
bda2a571 2825const_int_cost (HOST_WIDE_INT i)
0ce6f9fb 2826{
c47b0cb4 2827 switch (m68k_const_method (i))
0ce6f9fb 2828 {
a0a7fbc9
AS
2829 case MOVQ:
2830 /* Constants between -128 and 127 are cheap due to moveq. */
2831 return 0;
2832 case MVZ:
2833 case MVS:
2834 case NOTB:
2835 case NOTW:
2836 case NEGW:
2837 case SWAP:
2838 /* Constants easily generated by moveq + not.b/not.w/neg.w/swap. */
2839 return 1;
2840 case MOVL:
2841 return 2;
2842 default:
2843 gcc_unreachable ();
0ce6f9fb
RK
2844 }
2845}
2846
3c50106f 2847static bool
f40751dd
JH
2848m68k_rtx_costs (rtx x, int code, int outer_code, int *total,
2849 bool speed ATTRIBUTE_UNUSED)
3c50106f
RH
2850{
2851 switch (code)
2852 {
2853 case CONST_INT:
2854 /* Constant zero is super cheap due to clr instruction. */
2855 if (x == const0_rtx)
2856 *total = 0;
2857 else
bda2a571 2858 *total = const_int_cost (INTVAL (x));
3c50106f
RH
2859 return true;
2860
2861 case CONST:
2862 case LABEL_REF:
2863 case SYMBOL_REF:
2864 *total = 3;
2865 return true;
2866
2867 case CONST_DOUBLE:
2868 /* Make 0.0 cheaper than other floating constants to
2869 encourage creating tstsf and tstdf insns. */
2870 if (outer_code == COMPARE
2871 && (x == CONST0_RTX (SFmode) || x == CONST0_RTX (DFmode)))
2872 *total = 4;
2873 else
2874 *total = 5;
2875 return true;
2876
2877 /* These are vaguely right for a 68020. */
2878 /* The costs for long multiply have been adjusted to work properly
2879 in synth_mult on the 68020, relative to an average of the time
2880 for add and the time for shift, taking away a little more because
2881 sometimes move insns are needed. */
a0a7fbc9
AS
2882 /* div?.w is relatively cheaper on 68000 counted in COSTS_N_INSNS
2883 terms. */
fe95f2f7
JB
2884#define MULL_COST \
2885 (TUNE_68060 ? 2 \
2886 : TUNE_68040 ? 5 \
03b3e271
KH
2887 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2888 : (TUNE_CFV2 && TUNE_MAC) ? 4 \
2889 : TUNE_CFV2 ? 8 \
fe95f2f7
JB
2890 : TARGET_COLDFIRE ? 3 : 13)
2891
2892#define MULW_COST \
2893 (TUNE_68060 ? 2 \
2894 : TUNE_68040 ? 3 \
03b3e271
KH
2895 : TUNE_68000_10 ? 5 \
2896 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2897 : (TUNE_CFV2 && TUNE_MAC) ? 2 \
2898 : TUNE_CFV2 ? 8 \
fe95f2f7
JB
2899 : TARGET_COLDFIRE ? 2 : 8)
2900
2901#define DIVW_COST \
2902 (TARGET_CF_HWDIV ? 11 \
2903 : TUNE_68000_10 || TARGET_COLDFIRE ? 12 : 27)
3c50106f
RH
2904
2905 case PLUS:
2906 /* An lea costs about three times as much as a simple add. */
2907 if (GET_MODE (x) == SImode
2908 && GET_CODE (XEXP (x, 1)) == REG
2909 && GET_CODE (XEXP (x, 0)) == MULT
2910 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
2911 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2912 && (INTVAL (XEXP (XEXP (x, 0), 1)) == 2
2913 || INTVAL (XEXP (XEXP (x, 0), 1)) == 4
2914 || INTVAL (XEXP (XEXP (x, 0), 1)) == 8))
eb849993
BI
2915 {
2916 /* lea an@(dx:l:i),am */
2917 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 2 : 3);
2918 return true;
2919 }
3c50106f
RH
2920 return false;
2921
2922 case ASHIFT:
2923 case ASHIFTRT:
2924 case LSHIFTRT:
fe95f2f7 2925 if (TUNE_68060)
3c50106f
RH
2926 {
2927 *total = COSTS_N_INSNS(1);
2928 return true;
2929 }
fe95f2f7 2930 if (TUNE_68000_10)
3c50106f
RH
2931 {
2932 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
2933 {
2934 if (INTVAL (XEXP (x, 1)) < 16)
2935 *total = COSTS_N_INSNS (2) + INTVAL (XEXP (x, 1)) / 2;
2936 else
2937 /* We're using clrw + swap for these cases. */
2938 *total = COSTS_N_INSNS (4) + (INTVAL (XEXP (x, 1)) - 16) / 2;
2939 }
2940 else
a0a7fbc9 2941 *total = COSTS_N_INSNS (10); /* Worst case. */
3c50106f
RH
2942 return true;
2943 }
2944 /* A shift by a big integer takes an extra instruction. */
2945 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2946 && (INTVAL (XEXP (x, 1)) == 16))
2947 {
2948 *total = COSTS_N_INSNS (2); /* clrw;swap */
2949 return true;
2950 }
2951 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2952 && !(INTVAL (XEXP (x, 1)) > 0
2953 && INTVAL (XEXP (x, 1)) <= 8))
2954 {
eb849993 2955 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 1 : 3); /* lsr #i,dn */
3c50106f
RH
2956 return true;
2957 }
2958 return false;
2959
2960 case MULT:
2961 if ((GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
2962 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
2963 && GET_MODE (x) == SImode)
2964 *total = COSTS_N_INSNS (MULW_COST);
2965 else if (GET_MODE (x) == QImode || GET_MODE (x) == HImode)
2966 *total = COSTS_N_INSNS (MULW_COST);
2967 else
2968 *total = COSTS_N_INSNS (MULL_COST);
2969 return true;
2970
2971 case DIV:
2972 case UDIV:
2973 case MOD:
2974 case UMOD:
2975 if (GET_MODE (x) == QImode || GET_MODE (x) == HImode)
2976 *total = COSTS_N_INSNS (DIVW_COST); /* div.w */
eb849993
BI
2977 else if (TARGET_CF_HWDIV)
2978 *total = COSTS_N_INSNS (18);
3c50106f
RH
2979 else
2980 *total = COSTS_N_INSNS (43); /* div.l */
2981 return true;
2982
f90b7a5a
PB
2983 case ZERO_EXTRACT:
2984 if (outer_code == COMPARE)
2985 *total = 0;
2986 return false;
2987
3c50106f
RH
2988 default:
2989 return false;
2990 }
2991}
2992
88512ba0 2993/* Return an instruction to move CONST_INT OPERANDS[1] into data register
bda2a571
RS
2994 OPERANDS[0]. */
2995
2996static const char *
8a4a2253 2997output_move_const_into_data_reg (rtx *operands)
0ce6f9fb 2998{
bda2a571 2999 HOST_WIDE_INT i;
0ce6f9fb
RK
3000
3001 i = INTVAL (operands[1]);
c47b0cb4 3002 switch (m68k_const_method (i))
0ce6f9fb 3003 {
28bad6d1 3004 case MVZ:
28bad6d1 3005 return "mvzw %1,%0";
1cbae84f
PB
3006 case MVS:
3007 return "mvsw %1,%0";
a0a7fbc9 3008 case MOVQ:
0ce6f9fb 3009 return "moveq %1,%0";
a0a7fbc9 3010 case NOTB:
66e07510 3011 CC_STATUS_INIT;
1d8eaa6b 3012 operands[1] = GEN_INT (i ^ 0xff);
0ce6f9fb 3013 return "moveq %1,%0\n\tnot%.b %0";
a0a7fbc9 3014 case NOTW:
66e07510 3015 CC_STATUS_INIT;
1d8eaa6b 3016 operands[1] = GEN_INT (i ^ 0xffff);
0ce6f9fb 3017 return "moveq %1,%0\n\tnot%.w %0";
a0a7fbc9 3018 case NEGW:
66e07510 3019 CC_STATUS_INIT;
3b4b85c9 3020 return "moveq #-128,%0\n\tneg%.w %0";
a0a7fbc9 3021 case SWAP:
0ce6f9fb
RK
3022 {
3023 unsigned u = i;
3024
1d8eaa6b 3025 operands[1] = GEN_INT ((u << 16) | (u >> 16));
0ce6f9fb 3026 return "moveq %1,%0\n\tswap %0";
0ce6f9fb 3027 }
a0a7fbc9 3028 case MOVL:
bda2a571 3029 return "move%.l %1,%0";
a0a7fbc9 3030 default:
bda2a571 3031 gcc_unreachable ();
0ce6f9fb
RK
3032 }
3033}
3034
bda2a571 3035/* Return true if I can be handled by ISA B's mov3q instruction. */
5e04daf3 3036
bda2a571
RS
3037bool
3038valid_mov3q_const (HOST_WIDE_INT i)
3039{
3040 return TARGET_ISAB && (i == -1 || IN_RANGE (i, 1, 7));
5e04daf3
PB
3041}
3042
bda2a571
RS
3043/* Return an instruction to move CONST_INT OPERANDS[1] into OPERANDS[0].
3044 I is the value of OPERANDS[1]. */
5e04daf3 3045
bda2a571 3046static const char *
8a4a2253 3047output_move_simode_const (rtx *operands)
02ed0c07 3048{
bda2a571
RS
3049 rtx dest;
3050 HOST_WIDE_INT src;
3051
3052 dest = operands[0];
3053 src = INTVAL (operands[1]);
3054 if (src == 0
3055 && (DATA_REG_P (dest) || MEM_P (dest))
3197c489
RS
3056 /* clr insns on 68000 read before writing. */
3057 && ((TARGET_68010 || TARGET_COLDFIRE)
bda2a571 3058 || !(MEM_P (dest) && MEM_VOLATILE_P (dest))))
02ed0c07 3059 return "clr%.l %0";
bda2a571 3060 else if (GET_MODE (dest) == SImode && valid_mov3q_const (src))
a0a7fbc9 3061 return "mov3q%.l %1,%0";
bda2a571 3062 else if (src == 0 && ADDRESS_REG_P (dest))
38198304 3063 return "sub%.l %0,%0";
bda2a571 3064 else if (DATA_REG_P (dest))
02ed0c07 3065 return output_move_const_into_data_reg (operands);
bda2a571 3066 else if (ADDRESS_REG_P (dest) && IN_RANGE (src, -0x8000, 0x7fff))
5e04daf3 3067 {
bda2a571 3068 if (valid_mov3q_const (src))
5e04daf3
PB
3069 return "mov3q%.l %1,%0";
3070 return "move%.w %1,%0";
3071 }
bda2a571
RS
3072 else if (MEM_P (dest)
3073 && GET_CODE (XEXP (dest, 0)) == PRE_DEC
3074 && REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
3075 && IN_RANGE (src, -0x8000, 0x7fff))
5e04daf3 3076 {
bda2a571 3077 if (valid_mov3q_const (src))
5e04daf3
PB
3078 return "mov3q%.l %1,%-";
3079 return "pea %a1";
3080 }
02ed0c07
RK
3081 return "move%.l %1,%0";
3082}
3083
5505f548 3084const char *
8a4a2253 3085output_move_simode (rtx *operands)
f4e80198
RK
3086{
3087 if (GET_CODE (operands[1]) == CONST_INT)
3088 return output_move_simode_const (operands);
3089 else if ((GET_CODE (operands[1]) == SYMBOL_REF
3090 || GET_CODE (operands[1]) == CONST)
3091 && push_operand (operands[0], SImode))
3092 return "pea %a1";
3093 else if ((GET_CODE (operands[1]) == SYMBOL_REF
3094 || GET_CODE (operands[1]) == CONST)
3095 && ADDRESS_REG_P (operands[0]))
3096 return "lea %a1,%0";
3097 return "move%.l %1,%0";
3098}
3099
5505f548 3100const char *
8a4a2253 3101output_move_himode (rtx *operands)
f4e80198
RK
3102{
3103 if (GET_CODE (operands[1]) == CONST_INT)
3104 {
3105 if (operands[1] == const0_rtx
3106 && (DATA_REG_P (operands[0])
3107 || GET_CODE (operands[0]) == MEM)
3197c489
RS
3108 /* clr insns on 68000 read before writing. */
3109 && ((TARGET_68010 || TARGET_COLDFIRE)
f4e80198
RK
3110 || !(GET_CODE (operands[0]) == MEM
3111 && MEM_VOLATILE_P (operands[0]))))
3112 return "clr%.w %0";
38198304
AS
3113 else if (operands[1] == const0_rtx
3114 && ADDRESS_REG_P (operands[0]))
3115 return "sub%.l %0,%0";
f4e80198
RK
3116 else if (DATA_REG_P (operands[0])
3117 && INTVAL (operands[1]) < 128
3118 && INTVAL (operands[1]) >= -128)
a0a7fbc9 3119 return "moveq %1,%0";
f4e80198
RK
3120 else if (INTVAL (operands[1]) < 0x8000
3121 && INTVAL (operands[1]) >= -0x8000)
3122 return "move%.w %1,%0";
3123 }
3124 else if (CONSTANT_P (operands[1]))
3125 return "move%.l %1,%0";
f4e80198
RK
3126 return "move%.w %1,%0";
3127}
3128
5505f548 3129const char *
8a4a2253 3130output_move_qimode (rtx *operands)
f4e80198 3131{
102701ff 3132 /* 68k family always modifies the stack pointer by at least 2, even for
c16eadc7 3133 byte pushes. The 5200 (ColdFire) does not do this. */
4761e388 3134
a0a7fbc9 3135 /* This case is generated by pushqi1 pattern now. */
4761e388
NS
3136 gcc_assert (!(GET_CODE (operands[0]) == MEM
3137 && GET_CODE (XEXP (operands[0], 0)) == PRE_DEC
3138 && XEXP (XEXP (operands[0], 0), 0) == stack_pointer_rtx
3139 && ! ADDRESS_REG_P (operands[1])
3140 && ! TARGET_COLDFIRE));
f4e80198 3141
3197c489 3142 /* clr and st insns on 68000 read before writing. */
f4e80198 3143 if (!ADDRESS_REG_P (operands[0])
3197c489 3144 && ((TARGET_68010 || TARGET_COLDFIRE)
f4e80198
RK
3145 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3146 {
3147 if (operands[1] == const0_rtx)
3148 return "clr%.b %0";
9425fb04 3149 if ((!TARGET_COLDFIRE || DATA_REG_P (operands[0]))
f4e80198
RK
3150 && GET_CODE (operands[1]) == CONST_INT
3151 && (INTVAL (operands[1]) & 255) == 255)
3152 {
3153 CC_STATUS_INIT;
3154 return "st %0";
3155 }
3156 }
3157 if (GET_CODE (operands[1]) == CONST_INT
3158 && DATA_REG_P (operands[0])
3159 && INTVAL (operands[1]) < 128
3160 && INTVAL (operands[1]) >= -128)
a0a7fbc9 3161 return "moveq %1,%0";
38198304
AS
3162 if (operands[1] == const0_rtx && ADDRESS_REG_P (operands[0]))
3163 return "sub%.l %0,%0";
f4e80198
RK
3164 if (GET_CODE (operands[1]) != CONST_INT && CONSTANT_P (operands[1]))
3165 return "move%.l %1,%0";
c16eadc7 3166 /* 68k family (including the 5200 ColdFire) does not support byte moves to
37834fc8
JL
3167 from address registers. */
3168 if (ADDRESS_REG_P (operands[0]) || ADDRESS_REG_P (operands[1]))
f4e80198
RK
3169 return "move%.w %1,%0";
3170 return "move%.b %1,%0";
3171}
3172
5505f548 3173const char *
8a4a2253 3174output_move_stricthi (rtx *operands)
9b55bf04
RK
3175{
3176 if (operands[1] == const0_rtx
3197c489
RS
3177 /* clr insns on 68000 read before writing. */
3178 && ((TARGET_68010 || TARGET_COLDFIRE)
9b55bf04
RK
3179 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3180 return "clr%.w %0";
3181 return "move%.w %1,%0";
3182}
3183
5505f548 3184const char *
8a4a2253 3185output_move_strictqi (rtx *operands)
9b55bf04
RK
3186{
3187 if (operands[1] == const0_rtx
3197c489
RS
3188 /* clr insns on 68000 read before writing. */
3189 && ((TARGET_68010 || TARGET_COLDFIRE)
9b55bf04
RK
3190 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3191 return "clr%.b %0";
3192 return "move%.b %1,%0";
3193}
3194
79e68feb
RS
3195/* Return the best assembler insn template
3196 for moving operands[1] into operands[0] as a fullword. */
3197
5505f548 3198static const char *
8a4a2253 3199singlemove_string (rtx *operands)
79e68feb 3200{
02ed0c07
RK
3201 if (GET_CODE (operands[1]) == CONST_INT)
3202 return output_move_simode_const (operands);
3203 return "move%.l %1,%0";
79e68feb
RS
3204}
3205
2505bc97 3206
c47b0cb4
MK
3207/* Output assembler or rtl code to perform a doubleword move insn
3208 with operands OPERANDS.
3209 Pointers to 3 helper functions should be specified:
3210 HANDLE_REG_ADJUST to adjust a register by a small value,
3211 HANDLE_COMPADR to compute an address and
3212 HANDLE_MOVSI to move 4 bytes. */
79e68feb 3213
c47b0cb4
MK
3214static void
3215handle_move_double (rtx operands[2],
3216 void (*handle_reg_adjust) (rtx, int),
3217 void (*handle_compadr) (rtx [2]),
3218 void (*handle_movsi) (rtx [2]))
79e68feb 3219{
2505bc97
RS
3220 enum
3221 {
3222 REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP
3223 } optype0, optype1;
79e68feb 3224 rtx latehalf[2];
2505bc97 3225 rtx middlehalf[2];
7f98eeb6 3226 rtx xops[2];
79e68feb 3227 rtx addreg0 = 0, addreg1 = 0;
7f98eeb6 3228 int dest_overlapped_low = 0;
184916bc 3229 int size = GET_MODE_SIZE (GET_MODE (operands[0]));
2505bc97
RS
3230
3231 middlehalf[0] = 0;
3232 middlehalf[1] = 0;
79e68feb
RS
3233
3234 /* First classify both operands. */
3235
3236 if (REG_P (operands[0]))
3237 optype0 = REGOP;
3238 else if (offsettable_memref_p (operands[0]))
3239 optype0 = OFFSOP;
3240 else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
3241 optype0 = POPOP;
3242 else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
3243 optype0 = PUSHOP;
3244 else if (GET_CODE (operands[0]) == MEM)
3245 optype0 = MEMOP;
3246 else
3247 optype0 = RNDOP;
3248
3249 if (REG_P (operands[1]))
3250 optype1 = REGOP;
3251 else if (CONSTANT_P (operands[1]))
3252 optype1 = CNSTOP;
3253 else if (offsettable_memref_p (operands[1]))
3254 optype1 = OFFSOP;
3255 else if (GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3256 optype1 = POPOP;
3257 else if (GET_CODE (XEXP (operands[1], 0)) == PRE_DEC)
3258 optype1 = PUSHOP;
3259 else if (GET_CODE (operands[1]) == MEM)
3260 optype1 = MEMOP;
3261 else
3262 optype1 = RNDOP;
3263
4761e388
NS
3264 /* Check for the cases that the operand constraints are not supposed
3265 to allow to happen. Generating code for these cases is
3266 painful. */
3267 gcc_assert (optype0 != RNDOP && optype1 != RNDOP);
79e68feb
RS
3268
3269 /* If one operand is decrementing and one is incrementing
3270 decrement the former register explicitly
3271 and change that operand into ordinary indexing. */
3272
3273 if (optype0 == PUSHOP && optype1 == POPOP)
3274 {
3275 operands[0] = XEXP (XEXP (operands[0], 0), 0);
c47b0cb4
MK
3276
3277 handle_reg_adjust (operands[0], -size);
3278
2505bc97 3279 if (GET_MODE (operands[1]) == XFmode)
1d8eaa6b 3280 operands[0] = gen_rtx_MEM (XFmode, operands[0]);
2505bc97 3281 else if (GET_MODE (operands[0]) == DFmode)
1d8eaa6b 3282 operands[0] = gen_rtx_MEM (DFmode, operands[0]);
2505bc97 3283 else
1d8eaa6b 3284 operands[0] = gen_rtx_MEM (DImode, operands[0]);
79e68feb
RS
3285 optype0 = OFFSOP;
3286 }
3287 if (optype0 == POPOP && optype1 == PUSHOP)
3288 {
3289 operands[1] = XEXP (XEXP (operands[1], 0), 0);
c47b0cb4
MK
3290
3291 handle_reg_adjust (operands[1], -size);
3292
2505bc97 3293 if (GET_MODE (operands[1]) == XFmode)
1d8eaa6b 3294 operands[1] = gen_rtx_MEM (XFmode, operands[1]);
2505bc97 3295 else if (GET_MODE (operands[1]) == DFmode)
1d8eaa6b 3296 operands[1] = gen_rtx_MEM (DFmode, operands[1]);
2505bc97 3297 else
1d8eaa6b 3298 operands[1] = gen_rtx_MEM (DImode, operands[1]);
79e68feb
RS
3299 optype1 = OFFSOP;
3300 }
3301
3302 /* If an operand is an unoffsettable memory ref, find a register
3303 we can increment temporarily to make it refer to the second word. */
3304
3305 if (optype0 == MEMOP)
3306 addreg0 = find_addr_reg (XEXP (operands[0], 0));
3307
3308 if (optype1 == MEMOP)
3309 addreg1 = find_addr_reg (XEXP (operands[1], 0));
3310
3311 /* Ok, we can do one word at a time.
3312 Normally we do the low-numbered word first,
3313 but if either operand is autodecrementing then we
3314 do the high-numbered word first.
3315
3316 In either case, set up in LATEHALF the operands to use
3317 for the high-numbered word and in some cases alter the
3318 operands in OPERANDS to be suitable for the low-numbered word. */
3319
2505bc97
RS
3320 if (size == 12)
3321 {
3322 if (optype0 == REGOP)
3323 {
1d8eaa6b
AS
3324 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 2);
3325 middlehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2505bc97
RS
3326 }
3327 else if (optype0 == OFFSOP)
3328 {
b72f00af
RK
3329 middlehalf[0] = adjust_address (operands[0], SImode, 4);
3330 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
2505bc97
RS
3331 }
3332 else
3333 {
c47b0cb4
MK
3334 middlehalf[0] = adjust_address (operands[0], SImode, 0);
3335 latehalf[0] = adjust_address (operands[0], SImode, 0);
2505bc97
RS
3336 }
3337
3338 if (optype1 == REGOP)
3339 {
1d8eaa6b
AS
3340 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2);
3341 middlehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2505bc97
RS
3342 }
3343 else if (optype1 == OFFSOP)
3344 {
b72f00af
RK
3345 middlehalf[1] = adjust_address (operands[1], SImode, 4);
3346 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
2505bc97
RS
3347 }
3348 else if (optype1 == CNSTOP)
3349 {
3350 if (GET_CODE (operands[1]) == CONST_DOUBLE)
3351 {
3352 REAL_VALUE_TYPE r;
3353 long l[3];
3354
3355 REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
3356 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
3357 operands[1] = GEN_INT (l[0]);
3358 middlehalf[1] = GEN_INT (l[1]);
3359 latehalf[1] = GEN_INT (l[2]);
3360 }
4761e388 3361 else
2505bc97 3362 {
4761e388
NS
3363 /* No non-CONST_DOUBLE constant should ever appear
3364 here. */
3365 gcc_assert (!CONSTANT_P (operands[1]));
2505bc97
RS
3366 }
3367 }
3368 else
3369 {
c47b0cb4
MK
3370 middlehalf[1] = adjust_address (operands[1], SImode, 0);
3371 latehalf[1] = adjust_address (operands[1], SImode, 0);
2505bc97
RS
3372 }
3373 }
79e68feb 3374 else
2505bc97
RS
3375 /* size is not 12: */
3376 {
3377 if (optype0 == REGOP)
1d8eaa6b 3378 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2505bc97 3379 else if (optype0 == OFFSOP)
b72f00af 3380 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
2505bc97 3381 else
c47b0cb4 3382 latehalf[0] = adjust_address (operands[0], SImode, 0);
2505bc97
RS
3383
3384 if (optype1 == REGOP)
1d8eaa6b 3385 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2505bc97 3386 else if (optype1 == OFFSOP)
b72f00af 3387 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
2505bc97
RS
3388 else if (optype1 == CNSTOP)
3389 split_double (operands[1], &operands[1], &latehalf[1]);
3390 else
c47b0cb4 3391 latehalf[1] = adjust_address (operands[1], SImode, 0);
2505bc97 3392 }
79e68feb
RS
3393
3394 /* If insn is effectively movd N(sp),-(sp) then we will do the
3395 high word first. We should use the adjusted operand 1 (which is N+4(sp))
3396 for the low word as well, to compensate for the first decrement of sp. */
3397 if (optype0 == PUSHOP
3398 && REGNO (XEXP (XEXP (operands[0], 0), 0)) == STACK_POINTER_REGNUM
3399 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
c88aeaf8 3400 operands[1] = middlehalf[1] = latehalf[1];
79e68feb 3401
7f98eeb6
RS
3402 /* For (set (reg:DI N) (mem:DI ... (reg:SI N) ...)),
3403 if the upper part of reg N does not appear in the MEM, arrange to
3404 emit the move late-half first. Otherwise, compute the MEM address
3405 into the upper part of N and use that as a pointer to the memory
3406 operand. */
3407 if (optype0 == REGOP
3408 && (optype1 == OFFSOP || optype1 == MEMOP))
3409 {
1d8eaa6b 3410 rtx testlow = gen_rtx_REG (SImode, REGNO (operands[0]));
3a58400f
RS
3411
3412 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
d7e8d581 3413 && reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
7f98eeb6
RS
3414 {
3415 /* If both halves of dest are used in the src memory address,
3a58400f
RS
3416 compute the address into latehalf of dest.
3417 Note that this can't happen if the dest is two data regs. */
4761e388 3418 compadr:
7f98eeb6
RS
3419 xops[0] = latehalf[0];
3420 xops[1] = XEXP (operands[1], 0);
c47b0cb4
MK
3421
3422 handle_compadr (xops);
3423 if (GET_MODE (operands[1]) == XFmode)
7f98eeb6 3424 {
1d8eaa6b 3425 operands[1] = gen_rtx_MEM (XFmode, latehalf[0]);
b72f00af
RK
3426 middlehalf[1] = adjust_address (operands[1], DImode, size - 8);
3427 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
7f98eeb6
RS
3428 }
3429 else
3430 {
1d8eaa6b 3431 operands[1] = gen_rtx_MEM (DImode, latehalf[0]);
b72f00af 3432 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
7f98eeb6
RS
3433 }
3434 }
3435 else if (size == 12
d7e8d581
RS
3436 && reg_overlap_mentioned_p (middlehalf[0],
3437 XEXP (operands[1], 0)))
7f98eeb6 3438 {
3a58400f
RS
3439 /* Check for two regs used by both source and dest.
3440 Note that this can't happen if the dest is all data regs.
3441 It can happen if the dest is d6, d7, a0.
3442 But in that case, latehalf is an addr reg, so
3443 the code at compadr does ok. */
3444
3445 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
d7e8d581
RS
3446 || reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
3447 goto compadr;
7f98eeb6
RS
3448
3449 /* JRV says this can't happen: */
4761e388 3450 gcc_assert (!addreg0 && !addreg1);
7f98eeb6 3451
7a1929e1 3452 /* Only the middle reg conflicts; simply put it last. */
c47b0cb4
MK
3453 handle_movsi (operands);
3454 handle_movsi (latehalf);
3455 handle_movsi (middlehalf);
3456
3457 return;
7f98eeb6 3458 }
2fb8a81d 3459 else if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0)))
7f98eeb6
RS
3460 /* If the low half of dest is mentioned in the source memory
3461 address, the arrange to emit the move late half first. */
3462 dest_overlapped_low = 1;
3463 }
3464
79e68feb
RS
3465 /* If one or both operands autodecrementing,
3466 do the two words, high-numbered first. */
3467
3468 /* Likewise, the first move would clobber the source of the second one,
3469 do them in the other order. This happens only for registers;
3470 such overlap can't happen in memory unless the user explicitly
3471 sets it up, and that is an undefined circumstance. */
3472
3473 if (optype0 == PUSHOP || optype1 == PUSHOP
3474 || (optype0 == REGOP && optype1 == REGOP
2505bc97 3475 && ((middlehalf[1] && REGNO (operands[0]) == REGNO (middlehalf[1]))
7f98eeb6
RS
3476 || REGNO (operands[0]) == REGNO (latehalf[1])))
3477 || dest_overlapped_low)
79e68feb
RS
3478 {
3479 /* Make any unoffsettable addresses point at high-numbered word. */
3480 if (addreg0)
c47b0cb4 3481 handle_reg_adjust (addreg0, size - 4);
79e68feb 3482 if (addreg1)
c47b0cb4 3483 handle_reg_adjust (addreg1, size - 4);
79e68feb
RS
3484
3485 /* Do that word. */
c47b0cb4 3486 handle_movsi (latehalf);
79e68feb
RS
3487
3488 /* Undo the adds we just did. */
3489 if (addreg0)
c47b0cb4 3490 handle_reg_adjust (addreg0, -4);
79e68feb 3491 if (addreg1)
c47b0cb4 3492 handle_reg_adjust (addreg1, -4);
79e68feb 3493
2505bc97
RS
3494 if (size == 12)
3495 {
c47b0cb4
MK
3496 handle_movsi (middlehalf);
3497
2505bc97 3498 if (addreg0)
c47b0cb4 3499 handle_reg_adjust (addreg0, -4);
2505bc97 3500 if (addreg1)
c47b0cb4 3501 handle_reg_adjust (addreg1, -4);
2505bc97
RS
3502 }
3503
79e68feb 3504 /* Do low-numbered word. */
c47b0cb4
MK
3505
3506 handle_movsi (operands);
3507 return;
79e68feb
RS
3508 }
3509
3510 /* Normal case: do the two words, low-numbered first. */
3511
c47b0cb4 3512 handle_movsi (operands);
79e68feb 3513
2505bc97
RS
3514 /* Do the middle one of the three words for long double */
3515 if (size == 12)
3516 {
3517 if (addreg0)
c47b0cb4 3518 handle_reg_adjust (addreg0, 4);
2505bc97 3519 if (addreg1)
c47b0cb4 3520 handle_reg_adjust (addreg1, 4);
2505bc97 3521
c47b0cb4 3522 handle_movsi (middlehalf);
2505bc97
RS
3523 }
3524
79e68feb
RS
3525 /* Make any unoffsettable addresses point at high-numbered word. */
3526 if (addreg0)
c47b0cb4 3527 handle_reg_adjust (addreg0, 4);
79e68feb 3528 if (addreg1)
c47b0cb4 3529 handle_reg_adjust (addreg1, 4);
79e68feb
RS
3530
3531 /* Do that word. */
c47b0cb4 3532 handle_movsi (latehalf);
79e68feb
RS
3533
3534 /* Undo the adds we just did. */
3535 if (addreg0)
c47b0cb4
MK
3536 handle_reg_adjust (addreg0, -(size - 4));
3537 if (addreg1)
3538 handle_reg_adjust (addreg1, -(size - 4));
3539
3540 return;
3541}
3542
3543/* Output assembler code to adjust REG by N. */
3544static void
3545output_reg_adjust (rtx reg, int n)
3546{
3547 const char *s;
3548
3549 gcc_assert (GET_MODE (reg) == SImode
3550 && -12 <= n && n != 0 && n <= 12);
3551
3552 switch (n)
2505bc97 3553 {
c47b0cb4
MK
3554 case 12:
3555 s = "add%.l #12,%0";
3556 break;
3557
3558 case 8:
3559 s = "addq%.l #8,%0";
3560 break;
3561
3562 case 4:
3563 s = "addq%.l #4,%0";
3564 break;
3565
3566 case -12:
3567 s = "sub%.l #12,%0";
3568 break;
3569
3570 case -8:
3571 s = "subq%.l #8,%0";
3572 break;
3573
3574 case -4:
3575 s = "subq%.l #4,%0";
3576 break;
3577
3578 default:
3579 gcc_unreachable ();
3580 s = NULL;
2505bc97 3581 }
c47b0cb4
MK
3582
3583 output_asm_insn (s, &reg);
3584}
3585
3586/* Emit rtl code to adjust REG by N. */
3587static void
3588emit_reg_adjust (rtx reg1, int n)
3589{
3590 rtx reg2;
3591
3592 gcc_assert (GET_MODE (reg1) == SImode
3593 && -12 <= n && n != 0 && n <= 12);
3594
3595 reg1 = copy_rtx (reg1);
3596 reg2 = copy_rtx (reg1);
3597
3598 if (n < 0)
3599 emit_insn (gen_subsi3 (reg1, reg2, GEN_INT (-n)));
3600 else if (n > 0)
3601 emit_insn (gen_addsi3 (reg1, reg2, GEN_INT (n)));
3602 else
3603 gcc_unreachable ();
3604}
3605
3606/* Output assembler to load address OPERANDS[0] to register OPERANDS[1]. */
3607static void
3608output_compadr (rtx operands[2])
3609{
3610 output_asm_insn ("lea %a1,%0", operands);
3611}
3612
3613/* Output the best assembler insn for moving operands[1] into operands[0]
3614 as a fullword. */
3615static void
3616output_movsi (rtx operands[2])
3617{
3618 output_asm_insn (singlemove_string (operands), operands);
3619}
3620
3621/* Copy OP and change its mode to MODE. */
3622static rtx
3623copy_operand (rtx op, enum machine_mode mode)
3624{
3625 /* ??? This looks really ugly. There must be a better way
3626 to change a mode on the operand. */
3627 if (GET_MODE (op) != VOIDmode)
2505bc97 3628 {
c47b0cb4
MK
3629 if (REG_P (op))
3630 op = gen_rtx_REG (mode, REGNO (op));
2505bc97 3631 else
c47b0cb4
MK
3632 {
3633 op = copy_rtx (op);
3634 PUT_MODE (op, mode);
3635 }
2505bc97 3636 }
79e68feb 3637
c47b0cb4
MK
3638 return op;
3639}
3640
3641/* Emit rtl code for moving operands[1] into operands[0] as a fullword. */
3642static void
3643emit_movsi (rtx operands[2])
3644{
3645 operands[0] = copy_operand (operands[0], SImode);
3646 operands[1] = copy_operand (operands[1], SImode);
3647
3648 emit_insn (gen_movsi (operands[0], operands[1]));
3649}
3650
3651/* Output assembler code to perform a doubleword move insn
3652 with operands OPERANDS. */
3653const char *
3654output_move_double (rtx *operands)
3655{
3656 handle_move_double (operands,
3657 output_reg_adjust, output_compadr, output_movsi);
3658
79e68feb
RS
3659 return "";
3660}
3661
c47b0cb4
MK
3662/* Output rtl code to perform a doubleword move insn
3663 with operands OPERANDS. */
3664void
3665m68k_emit_move_double (rtx operands[2])
3666{
3667 handle_move_double (operands, emit_reg_adjust, emit_movsi, emit_movsi);
3668}
dcc21c4c
PB
3669
3670/* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
3671 new rtx with the correct mode. */
3672
3673static rtx
3674force_mode (enum machine_mode mode, rtx orig)
3675{
3676 if (mode == GET_MODE (orig))
3677 return orig;
3678
3679 if (REGNO (orig) >= FIRST_PSEUDO_REGISTER)
3680 abort ();
3681
3682 return gen_rtx_REG (mode, REGNO (orig));
3683}
3684
3685static int
3686fp_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3687{
3688 return reg_renumber && FP_REG_P (op);
3689}
3690
3691/* Emit insns to move operands[1] into operands[0].
3692
3693 Return 1 if we have written out everything that needs to be done to
3694 do the move. Otherwise, return 0 and the caller will emit the move
3695 normally.
3696
3697 Note SCRATCH_REG may not be in the proper mode depending on how it
c0220ea4 3698 will be used. This routine is responsible for creating a new copy
dcc21c4c
PB
3699 of SCRATCH_REG in the proper mode. */
3700
3701int
3702emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
3703{
3704 register rtx operand0 = operands[0];
3705 register rtx operand1 = operands[1];
3706 register rtx tem;
3707
3708 if (scratch_reg
3709 && reload_in_progress && GET_CODE (operand0) == REG
3710 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
3711 operand0 = reg_equiv_mem[REGNO (operand0)];
3712 else if (scratch_reg
3713 && reload_in_progress && GET_CODE (operand0) == SUBREG
3714 && GET_CODE (SUBREG_REG (operand0)) == REG
3715 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
3716 {
3717 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3718 the code which tracks sets/uses for delete_output_reload. */
3719 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
3720 reg_equiv_mem [REGNO (SUBREG_REG (operand0))],
3721 SUBREG_BYTE (operand0));
3722 operand0 = alter_subreg (&temp);
3723 }
3724
3725 if (scratch_reg
3726 && reload_in_progress && GET_CODE (operand1) == REG
3727 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
3728 operand1 = reg_equiv_mem[REGNO (operand1)];
3729 else if (scratch_reg
3730 && reload_in_progress && GET_CODE (operand1) == SUBREG
3731 && GET_CODE (SUBREG_REG (operand1)) == REG
3732 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
3733 {
3734 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3735 the code which tracks sets/uses for delete_output_reload. */
3736 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
3737 reg_equiv_mem [REGNO (SUBREG_REG (operand1))],
3738 SUBREG_BYTE (operand1));
3739 operand1 = alter_subreg (&temp);
3740 }
3741
3742 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
3743 && ((tem = find_replacement (&XEXP (operand0, 0)))
3744 != XEXP (operand0, 0)))
3745 operand0 = gen_rtx_MEM (GET_MODE (operand0), tem);
3746 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
3747 && ((tem = find_replacement (&XEXP (operand1, 0)))
3748 != XEXP (operand1, 0)))
3749 operand1 = gen_rtx_MEM (GET_MODE (operand1), tem);
3750
3751 /* Handle secondary reloads for loads/stores of FP registers where
3752 the address is symbolic by using the scratch register */
3753 if (fp_reg_operand (operand0, mode)
3754 && ((GET_CODE (operand1) == MEM
3755 && ! memory_address_p (DFmode, XEXP (operand1, 0)))
3756 || ((GET_CODE (operand1) == SUBREG
3757 && GET_CODE (XEXP (operand1, 0)) == MEM
3758 && !memory_address_p (DFmode, XEXP (XEXP (operand1, 0), 0)))))
3759 && scratch_reg)
3760 {
3761 if (GET_CODE (operand1) == SUBREG)
3762 operand1 = XEXP (operand1, 0);
3763
3764 /* SCRATCH_REG will hold an address. We want
3765 it in SImode regardless of what mode it was originally given
3766 to us. */
3767 scratch_reg = force_mode (SImode, scratch_reg);
3768
3769 /* D might not fit in 14 bits either; for such cases load D into
3770 scratch reg. */
3771 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
3772 {
3773 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
3774 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
3775 Pmode,
3776 XEXP (XEXP (operand1, 0), 0),
3777 scratch_reg));
3778 }
3779 else
3780 emit_move_insn (scratch_reg, XEXP (operand1, 0));
3781 emit_insn (gen_rtx_SET (VOIDmode, operand0,
3782 gen_rtx_MEM (mode, scratch_reg)));
3783 return 1;
3784 }
3785 else if (fp_reg_operand (operand1, mode)
3786 && ((GET_CODE (operand0) == MEM
3787 && ! memory_address_p (DFmode, XEXP (operand0, 0)))
3788 || ((GET_CODE (operand0) == SUBREG)
3789 && GET_CODE (XEXP (operand0, 0)) == MEM
3790 && !memory_address_p (DFmode, XEXP (XEXP (operand0, 0), 0))))
3791 && scratch_reg)
3792 {
3793 if (GET_CODE (operand0) == SUBREG)
3794 operand0 = XEXP (operand0, 0);
3795
3796 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3797 it in SIMODE regardless of what mode it was originally given
3798 to us. */
3799 scratch_reg = force_mode (SImode, scratch_reg);
3800
3801 /* D might not fit in 14 bits either; for such cases load D into
3802 scratch reg. */
3803 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
3804 {
3805 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
3806 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
3807 0)),
3808 Pmode,
3809 XEXP (XEXP (operand0, 0),
3810 0),
3811 scratch_reg));
3812 }
3813 else
3814 emit_move_insn (scratch_reg, XEXP (operand0, 0));
3815 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (mode, scratch_reg),
3816 operand1));
3817 return 1;
3818 }
3819 /* Handle secondary reloads for loads of FP registers from constant
3820 expressions by forcing the constant into memory.
3821
3822 use scratch_reg to hold the address of the memory location.
3823
3824 The proper fix is to change PREFERRED_RELOAD_CLASS to return
3825 NO_REGS when presented with a const_int and an register class
3826 containing only FP registers. Doing so unfortunately creates
3827 more problems than it solves. Fix this for 2.5. */
3828 else if (fp_reg_operand (operand0, mode)
3829 && CONSTANT_P (operand1)
3830 && scratch_reg)
3831 {
3832 rtx xoperands[2];
3833
3834 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3835 it in SIMODE regardless of what mode it was originally given
3836 to us. */
3837 scratch_reg = force_mode (SImode, scratch_reg);
3838
3839 /* Force the constant into memory and put the address of the
3840 memory location into scratch_reg. */
3841 xoperands[0] = scratch_reg;
3842 xoperands[1] = XEXP (force_const_mem (mode, operand1), 0);
3843 emit_insn (gen_rtx_SET (mode, scratch_reg, xoperands[1]));
3844
3845 /* Now load the destination register. */
3846 emit_insn (gen_rtx_SET (mode, operand0,
3847 gen_rtx_MEM (mode, scratch_reg)));
3848 return 1;
3849 }
3850
3851 /* Now have insn-emit do whatever it normally does. */
3852 return 0;
3853}
3854
01e304f8
RZ
3855/* Split one or more DImode RTL references into pairs of SImode
3856 references. The RTL can be REG, offsettable MEM, integer constant, or
3857 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
3858 split and "num" is its length. lo_half and hi_half are output arrays
3859 that parallel "operands". */
3860
3861void
3862split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
3863{
3864 while (num--)
3865 {
3866 rtx op = operands[num];
3867
3868 /* simplify_subreg refuses to split volatile memory addresses,
3869 but we still have to handle it. */
3870 if (GET_CODE (op) == MEM)
3871 {
3872 lo_half[num] = adjust_address (op, SImode, 4);
3873 hi_half[num] = adjust_address (op, SImode, 0);
3874 }
3875 else
3876 {
3877 lo_half[num] = simplify_gen_subreg (SImode, op,
3878 GET_MODE (op) == VOIDmode
3879 ? DImode : GET_MODE (op), 4);
3880 hi_half[num] = simplify_gen_subreg (SImode, op,
3881 GET_MODE (op) == VOIDmode
3882 ? DImode : GET_MODE (op), 0);
3883 }
3884 }
3885}
3886
a40ed0f3
KH
3887/* Split X into a base and a constant offset, storing them in *BASE
3888 and *OFFSET respectively. */
3889
3890static void
3891m68k_split_offset (rtx x, rtx *base, HOST_WIDE_INT *offset)
3892{
3893 *offset = 0;
3894 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
3895 {
3896 *offset += INTVAL (XEXP (x, 1));
3897 x = XEXP (x, 0);
3898 }
3899 *base = x;
3900}
3901
3902/* Return true if PATTERN is a PARALLEL suitable for a movem or fmovem
3903 instruction. STORE_P says whether the move is a load or store.
3904
3905 If the instruction uses post-increment or pre-decrement addressing,
3906 AUTOMOD_BASE is the base register and AUTOMOD_OFFSET is the total
3907 adjustment. This adjustment will be made by the first element of
3908 PARALLEL, with the loads or stores starting at element 1. If the
3909 instruction does not use post-increment or pre-decrement addressing,
3910 AUTOMOD_BASE is null, AUTOMOD_OFFSET is 0, and the loads or stores
3911 start at element 0. */
3912
3913bool
3914m68k_movem_pattern_p (rtx pattern, rtx automod_base,
3915 HOST_WIDE_INT automod_offset, bool store_p)
3916{
3917 rtx base, mem_base, set, mem, reg, last_reg;
3918 HOST_WIDE_INT offset, mem_offset;
3919 int i, first, len;
3920 enum reg_class rclass;
3921
3922 len = XVECLEN (pattern, 0);
3923 first = (automod_base != NULL);
3924
3925 if (automod_base)
3926 {
3927 /* Stores must be pre-decrement and loads must be post-increment. */
3928 if (store_p != (automod_offset < 0))
3929 return false;
3930
3931 /* Work out the base and offset for lowest memory location. */
3932 base = automod_base;
3933 offset = (automod_offset < 0 ? automod_offset : 0);
3934 }
3935 else
3936 {
3937 /* Allow any valid base and offset in the first access. */
3938 base = NULL;
3939 offset = 0;
3940 }
3941
3942 last_reg = NULL;
3943 rclass = NO_REGS;
3944 for (i = first; i < len; i++)
3945 {
3946 /* We need a plain SET. */
3947 set = XVECEXP (pattern, 0, i);
3948 if (GET_CODE (set) != SET)
3949 return false;
3950
3951 /* Check that we have a memory location... */
3952 mem = XEXP (set, !store_p);
3953 if (!MEM_P (mem) || !memory_operand (mem, VOIDmode))
3954 return false;
3955
3956 /* ...with the right address. */
3957 if (base == NULL)
3958 {
3959 m68k_split_offset (XEXP (mem, 0), &base, &offset);
3960 /* The ColdFire instruction only allows (An) and (d16,An) modes.
3961 There are no mode restrictions for 680x0 besides the
3962 automodification rules enforced above. */
3963 if (TARGET_COLDFIRE
3964 && !m68k_legitimate_base_reg_p (base, reload_completed))
3965 return false;
3966 }
3967 else
3968 {
3969 m68k_split_offset (XEXP (mem, 0), &mem_base, &mem_offset);
3970 if (!rtx_equal_p (base, mem_base) || offset != mem_offset)
3971 return false;
3972 }
3973
3974 /* Check that we have a register of the required mode and class. */
3975 reg = XEXP (set, store_p);
3976 if (!REG_P (reg)
3977 || !HARD_REGISTER_P (reg)
3978 || GET_MODE (reg) != reg_raw_mode[REGNO (reg)])
3979 return false;
3980
3981 if (last_reg)
3982 {
3983 /* The register must belong to RCLASS and have a higher number
3984 than the register in the previous SET. */
3985 if (!TEST_HARD_REG_BIT (reg_class_contents[rclass], REGNO (reg))
3986 || REGNO (last_reg) >= REGNO (reg))
3987 return false;
3988 }
3989 else
3990 {
3991 /* Work out which register class we need. */
3992 if (INT_REGNO_P (REGNO (reg)))
3993 rclass = GENERAL_REGS;
3994 else if (FP_REGNO_P (REGNO (reg)))
3995 rclass = FP_REGS;
3996 else
3997 return false;
3998 }
3999
4000 last_reg = reg;
4001 offset += GET_MODE_SIZE (GET_MODE (reg));
4002 }
4003
4004 /* If we have an automodification, check whether the final offset is OK. */
4005 if (automod_base && offset != (automod_offset < 0 ? 0 : automod_offset))
4006 return false;
4007
4008 /* Reject unprofitable cases. */
4009 if (len < first + (rclass == FP_REGS ? MIN_FMOVEM_REGS : MIN_MOVEM_REGS))
4010 return false;
4011
4012 return true;
4013}
4014
4015/* Return the assembly code template for a movem or fmovem instruction
4016 whose pattern is given by PATTERN. Store the template's operands
4017 in OPERANDS.
4018
4019 If the instruction uses post-increment or pre-decrement addressing,
4020 AUTOMOD_OFFSET is the total adjustment, otherwise it is 0. STORE_P
4021 is true if this is a store instruction. */
4022
4023const char *
4024m68k_output_movem (rtx *operands, rtx pattern,
4025 HOST_WIDE_INT automod_offset, bool store_p)
4026{
4027 unsigned int mask;
4028 int i, first;
4029
4030 gcc_assert (GET_CODE (pattern) == PARALLEL);
4031 mask = 0;
4032 first = (automod_offset != 0);
4033 for (i = first; i < XVECLEN (pattern, 0); i++)
4034 {
4035 /* When using movem with pre-decrement addressing, register X + D0_REG
4036 is controlled by bit 15 - X. For all other addressing modes,
4037 register X + D0_REG is controlled by bit X. Confusingly, the
4038 register mask for fmovem is in the opposite order to that for
4039 movem. */
4040 unsigned int regno;
4041
4042 gcc_assert (MEM_P (XEXP (XVECEXP (pattern, 0, i), !store_p)));
4043 gcc_assert (REG_P (XEXP (XVECEXP (pattern, 0, i), store_p)));
4044 regno = REGNO (XEXP (XVECEXP (pattern, 0, i), store_p));
4045 if (automod_offset < 0)
4046 {
4047 if (FP_REGNO_P (regno))
4048 mask |= 1 << (regno - FP0_REG);
4049 else
4050 mask |= 1 << (15 - (regno - D0_REG));
4051 }
4052 else
4053 {
4054 if (FP_REGNO_P (regno))
4055 mask |= 1 << (7 - (regno - FP0_REG));
4056 else
4057 mask |= 1 << (regno - D0_REG);
4058 }
4059 }
4060 CC_STATUS_INIT;
4061
4062 if (automod_offset == 0)
4063 operands[0] = XEXP (XEXP (XVECEXP (pattern, 0, first), !store_p), 0);
4064 else if (automod_offset < 0)
4065 operands[0] = gen_rtx_PRE_DEC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
4066 else
4067 operands[0] = gen_rtx_POST_INC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
4068 operands[1] = GEN_INT (mask);
4069 if (FP_REGNO_P (REGNO (XEXP (XVECEXP (pattern, 0, first), store_p))))
4070 {
4071 if (store_p)
1fae2d80 4072 return "fmovem %1,%a0";
a40ed0f3 4073 else
1fae2d80 4074 return "fmovem %a0,%1";
a40ed0f3
KH
4075 }
4076 else
4077 {
4078 if (store_p)
1fae2d80 4079 return "movem%.l %1,%a0";
a40ed0f3 4080 else
1fae2d80 4081 return "movem%.l %a0,%1";
a40ed0f3
KH
4082 }
4083}
4084
79e68feb
RS
4085/* Return a REG that occurs in ADDR with coefficient 1.
4086 ADDR can be effectively incremented by incrementing REG. */
4087
4088static rtx
8a4a2253 4089find_addr_reg (rtx addr)
79e68feb
RS
4090{
4091 while (GET_CODE (addr) == PLUS)
4092 {
4093 if (GET_CODE (XEXP (addr, 0)) == REG)
4094 addr = XEXP (addr, 0);
4095 else if (GET_CODE (XEXP (addr, 1)) == REG)
4096 addr = XEXP (addr, 1);
4097 else if (CONSTANT_P (XEXP (addr, 0)))
4098 addr = XEXP (addr, 1);
4099 else if (CONSTANT_P (XEXP (addr, 1)))
4100 addr = XEXP (addr, 0);
4101 else
4761e388 4102 gcc_unreachable ();
79e68feb 4103 }
4761e388
NS
4104 gcc_assert (GET_CODE (addr) == REG);
4105 return addr;
79e68feb 4106}
9ee3c687 4107
c16eadc7 4108/* Output assembler code to perform a 32-bit 3-operand add. */
9ee3c687 4109
5505f548 4110const char *
8a4a2253 4111output_addsi3 (rtx *operands)
9ee3c687
JW
4112{
4113 if (! operands_match_p (operands[0], operands[1]))
4114 {
4115 if (!ADDRESS_REG_P (operands[1]))
4116 {
4117 rtx tmp = operands[1];
4118
4119 operands[1] = operands[2];
4120 operands[2] = tmp;
4121 }
4122
4123 /* These insns can result from reloads to access
4124 stack slots over 64k from the frame pointer. */
4125 if (GET_CODE (operands[2]) == CONST_INT
218d5a87 4126 && (INTVAL (operands[2]) < -32768 || INTVAL (operands[2]) > 32767))
8c61b6c1 4127 return "move%.l %2,%0\n\tadd%.l %1,%0";
9ee3c687 4128 if (GET_CODE (operands[2]) == REG)
4b3d1177
KH
4129 return MOTOROLA ? "lea (%1,%2.l),%0" : "lea %1@(0,%2:l),%0";
4130 return MOTOROLA ? "lea (%c2,%1),%0" : "lea %1@(%c2),%0";
9ee3c687
JW
4131 }
4132 if (GET_CODE (operands[2]) == CONST_INT)
4133 {
9ee3c687
JW
4134 if (INTVAL (operands[2]) > 0
4135 && INTVAL (operands[2]) <= 8)
4136 return "addq%.l %2,%0";
4137 if (INTVAL (operands[2]) < 0
4138 && INTVAL (operands[2]) >= -8)
4139 {
c5c76735 4140 operands[2] = GEN_INT (- INTVAL (operands[2]));
9ee3c687
JW
4141 return "subq%.l %2,%0";
4142 }
4143 /* On the CPU32 it is faster to use two addql instructions to
4144 add a small integer (8 < N <= 16) to a register.
7a1929e1 4145 Likewise for subql. */
fe95f2f7 4146 if (TUNE_CPU32 && REG_P (operands[0]))
9ee3c687
JW
4147 {
4148 if (INTVAL (operands[2]) > 8
4149 && INTVAL (operands[2]) <= 16)
4150 {
1d8eaa6b 4151 operands[2] = GEN_INT (INTVAL (operands[2]) - 8);
3b4b85c9 4152 return "addq%.l #8,%0\n\taddq%.l %2,%0";
9ee3c687
JW
4153 }
4154 if (INTVAL (operands[2]) < -8
4155 && INTVAL (operands[2]) >= -16)
4156 {
c5c76735 4157 operands[2] = GEN_INT (- INTVAL (operands[2]) - 8);
3b4b85c9 4158 return "subq%.l #8,%0\n\tsubq%.l %2,%0";
9ee3c687
JW
4159 }
4160 }
9ee3c687
JW
4161 if (ADDRESS_REG_P (operands[0])
4162 && INTVAL (operands[2]) >= -0x8000
4163 && INTVAL (operands[2]) < 0x8000)
4164 {
fe95f2f7 4165 if (TUNE_68040)
9ee3c687
JW
4166 return "add%.w %2,%0";
4167 else
4b3d1177 4168 return MOTOROLA ? "lea (%c2,%0),%0" : "lea %0@(%c2),%0";
9ee3c687
JW
4169 }
4170 }
4171 return "add%.l %2,%0";
4172}
79e68feb
RS
4173\f
4174/* Store in cc_status the expressions that the condition codes will
4175 describe after execution of an instruction whose pattern is EXP.
4176 Do not alter them if the instruction would not alter the cc's. */
4177
4178/* On the 68000, all the insns to store in an address register fail to
4179 set the cc's. However, in some cases these instructions can make it
4180 possibly invalid to use the saved cc's. In those cases we clear out
4181 some or all of the saved cc's so they won't be used. */
4182
1d8eaa6b 4183void
8a4a2253 4184notice_update_cc (rtx exp, rtx insn)
79e68feb 4185{
1a8965c4 4186 if (GET_CODE (exp) == SET)
79e68feb
RS
4187 {
4188 if (GET_CODE (SET_SRC (exp)) == CALL)
a0a7fbc9 4189 CC_STATUS_INIT;
79e68feb
RS
4190 else if (ADDRESS_REG_P (SET_DEST (exp)))
4191 {
f5963e61 4192 if (cc_status.value1 && modified_in_p (cc_status.value1, insn))
79e68feb 4193 cc_status.value1 = 0;
f5963e61 4194 if (cc_status.value2 && modified_in_p (cc_status.value2, insn))
79e68feb
RS
4195 cc_status.value2 = 0;
4196 }
f6ab62e8
RS
4197 /* fmoves to memory or data registers do not set the condition
4198 codes. Normal moves _do_ set the condition codes, but not in
4199 a way that is appropriate for comparison with 0, because -0.0
4200 would be treated as a negative nonzero number. Note that it
88512ba0 4201 isn't appropriate to conditionalize this restriction on
f6ab62e8
RS
4202 HONOR_SIGNED_ZEROS because that macro merely indicates whether
4203 we care about the difference between -0.0 and +0.0. */
79e68feb
RS
4204 else if (!FP_REG_P (SET_DEST (exp))
4205 && SET_DEST (exp) != cc0_rtx
4206 && (FP_REG_P (SET_SRC (exp))
4207 || GET_CODE (SET_SRC (exp)) == FIX
f6ab62e8 4208 || FLOAT_MODE_P (GET_MODE (SET_DEST (exp)))))
a0a7fbc9 4209 CC_STATUS_INIT;
79e68feb
RS
4210 /* A pair of move insns doesn't produce a useful overall cc. */
4211 else if (!FP_REG_P (SET_DEST (exp))
4212 && !FP_REG_P (SET_SRC (exp))
4213 && GET_MODE_SIZE (GET_MODE (SET_SRC (exp))) > 4
4214 && (GET_CODE (SET_SRC (exp)) == REG
4215 || GET_CODE (SET_SRC (exp)) == MEM
4216 || GET_CODE (SET_SRC (exp)) == CONST_DOUBLE))
a0a7fbc9 4217 CC_STATUS_INIT;
e1dff52a 4218 else if (SET_DEST (exp) != pc_rtx)
79e68feb
RS
4219 {
4220 cc_status.flags = 0;
e1dff52a
KH
4221 cc_status.value1 = SET_DEST (exp);
4222 cc_status.value2 = SET_SRC (exp);
79e68feb
RS
4223 }
4224 }
4225 else if (GET_CODE (exp) == PARALLEL
4226 && GET_CODE (XVECEXP (exp, 0, 0)) == SET)
4227 {
e1dff52a
KH
4228 rtx dest = SET_DEST (XVECEXP (exp, 0, 0));
4229 rtx src = SET_SRC (XVECEXP (exp, 0, 0));
4230
4231 if (ADDRESS_REG_P (dest))
79e68feb 4232 CC_STATUS_INIT;
e1dff52a 4233 else if (dest != pc_rtx)
79e68feb
RS
4234 {
4235 cc_status.flags = 0;
e1dff52a
KH
4236 cc_status.value1 = dest;
4237 cc_status.value2 = src;
79e68feb
RS
4238 }
4239 }
4240 else
4241 CC_STATUS_INIT;
4242 if (cc_status.value2 != 0
4243 && ADDRESS_REG_P (cc_status.value2)
4244 && GET_MODE (cc_status.value2) == QImode)
4245 CC_STATUS_INIT;
1a8965c4 4246 if (cc_status.value2 != 0)
79e68feb
RS
4247 switch (GET_CODE (cc_status.value2))
4248 {
996a5f59 4249 case ASHIFT: case ASHIFTRT: case LSHIFTRT:
79e68feb 4250 case ROTATE: case ROTATERT:
a126dc3a
RH
4251 /* These instructions always clear the overflow bit, and set
4252 the carry to the bit shifted out. */
1afac9a6 4253 cc_status.flags |= CC_OVERFLOW_UNUSABLE | CC_NO_CARRY;
a126dc3a
RH
4254 break;
4255
4256 case PLUS: case MINUS: case MULT:
4257 case DIV: case UDIV: case MOD: case UMOD: case NEG:
79e68feb
RS
4258 if (GET_MODE (cc_status.value2) != VOIDmode)
4259 cc_status.flags |= CC_NO_OVERFLOW;
4260 break;
4261 case ZERO_EXTEND:
4262 /* (SET r1 (ZERO_EXTEND r2)) on this machine
4263 ends with a move insn moving r2 in r2's mode.
4264 Thus, the cc's are set for r2.
7a1929e1 4265 This can set N bit spuriously. */
79e68feb 4266 cc_status.flags |= CC_NOT_NEGATIVE;
1d8eaa6b
AS
4267
4268 default:
4269 break;
79e68feb
RS
4270 }
4271 if (cc_status.value1 && GET_CODE (cc_status.value1) == REG
4272 && cc_status.value2
4273 && reg_overlap_mentioned_p (cc_status.value1, cc_status.value2))
4274 cc_status.value2 = 0;
4275 if (((cc_status.value1 && FP_REG_P (cc_status.value1))
1a8965c4 4276 || (cc_status.value2 && FP_REG_P (cc_status.value2))))
79e68feb 4277 cc_status.flags = CC_IN_68881;
67595cbb
RZ
4278 if (cc_status.value2 && GET_CODE (cc_status.value2) == COMPARE
4279 && GET_MODE_CLASS (GET_MODE (XEXP (cc_status.value2, 0))) == MODE_FLOAT)
4280 {
4281 cc_status.flags = CC_IN_68881;
4282 if (!FP_REG_P (XEXP (cc_status.value2, 0)))
4283 cc_status.flags |= CC_REVERSED;
4284 }
79e68feb
RS
4285}
4286\f
5505f548 4287const char *
8a4a2253 4288output_move_const_double (rtx *operands)
79e68feb 4289{
1a8965c4 4290 int code = standard_68881_constant_p (operands[1]);
79e68feb 4291
1a8965c4 4292 if (code != 0)
79e68feb 4293 {
1a8965c4 4294 static char buf[40];
79e68feb 4295
3b4b85c9 4296 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
1a8965c4 4297 return buf;
79e68feb 4298 }
1a8965c4 4299 return "fmove%.d %1,%0";
79e68feb
RS
4300}
4301
5505f548 4302const char *
8a4a2253 4303output_move_const_single (rtx *operands)
79e68feb 4304{
1a8965c4 4305 int code = standard_68881_constant_p (operands[1]);
79e68feb 4306
1a8965c4 4307 if (code != 0)
79e68feb 4308 {
1a8965c4 4309 static char buf[40];
79e68feb 4310
3b4b85c9 4311 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
1a8965c4 4312 return buf;
79e68feb 4313 }
1a8965c4 4314 return "fmove%.s %f1,%0";
79e68feb
RS
4315}
4316
4317/* Return nonzero if X, a CONST_DOUBLE, has a value that we can get
4318 from the "fmovecr" instruction.
4319 The value, anded with 0xff, gives the code to use in fmovecr
4320 to get the desired constant. */
4321
7a1929e1 4322/* This code has been fixed for cross-compilation. */
c1cfb2ae
RS
4323
4324static int inited_68881_table = 0;
4325
5505f548 4326static const char *const strings_68881[7] = {
c1cfb2ae
RS
4327 "0.0",
4328 "1.0",
4329 "10.0",
4330 "100.0",
4331 "10000.0",
4332 "1e8",
4333 "1e16"
a0a7fbc9 4334};
c1cfb2ae 4335
8b60264b 4336static const int codes_68881[7] = {
c1cfb2ae
RS
4337 0x0f,
4338 0x32,
4339 0x33,
4340 0x34,
4341 0x35,
4342 0x36,
4343 0x37
a0a7fbc9 4344};
c1cfb2ae
RS
4345
4346REAL_VALUE_TYPE values_68881[7];
4347
4348/* Set up values_68881 array by converting the decimal values
7a1929e1 4349 strings_68881 to binary. */
c1cfb2ae
RS
4350
4351void
8a4a2253 4352init_68881_table (void)
c1cfb2ae
RS
4353{
4354 int i;
4355 REAL_VALUE_TYPE r;
4356 enum machine_mode mode;
4357
16d82c3c 4358 mode = SFmode;
c1cfb2ae
RS
4359 for (i = 0; i < 7; i++)
4360 {
4361 if (i == 6)
16d82c3c 4362 mode = DFmode;
c1cfb2ae
RS
4363 r = REAL_VALUE_ATOF (strings_68881[i], mode);
4364 values_68881[i] = r;
4365 }
4366 inited_68881_table = 1;
4367}
79e68feb
RS
4368
4369int
8a4a2253 4370standard_68881_constant_p (rtx x)
79e68feb 4371{
c1cfb2ae
RS
4372 REAL_VALUE_TYPE r;
4373 int i;
79e68feb 4374
e18db50d 4375 /* fmovecr must be emulated on the 68040 and 68060, so it shouldn't be
7a1929e1 4376 used at all on those chips. */
9cf106c8 4377 if (TUNE_68040_60)
79e68feb
RS
4378 return 0;
4379
c1cfb2ae
RS
4380 if (! inited_68881_table)
4381 init_68881_table ();
4382
4383 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4384
64c0b414
AS
4385 /* Use REAL_VALUES_IDENTICAL instead of REAL_VALUES_EQUAL so that -0.0
4386 is rejected. */
c1cfb2ae
RS
4387 for (i = 0; i < 6; i++)
4388 {
64c0b414 4389 if (REAL_VALUES_IDENTICAL (r, values_68881[i]))
c1cfb2ae
RS
4390 return (codes_68881[i]);
4391 }
4392
79e68feb
RS
4393 if (GET_MODE (x) == SFmode)
4394 return 0;
c1cfb2ae
RS
4395
4396 if (REAL_VALUES_EQUAL (r, values_68881[6]))
4397 return (codes_68881[6]);
4398
79e68feb
RS
4399 /* larger powers of ten in the constants ram are not used
4400 because they are not equal to a `double' C constant. */
4401 return 0;
4402}
4403
4404/* If X is a floating-point constant, return the logarithm of X base 2,
4405 or 0 if X is not a power of 2. */
4406
4407int
8a4a2253 4408floating_exact_log2 (rtx x)
79e68feb 4409{
c1cfb2ae 4410 REAL_VALUE_TYPE r, r1;
eaff3bf8 4411 int exp;
79e68feb 4412
c1cfb2ae 4413 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
79e68feb 4414
eaff3bf8 4415 if (REAL_VALUES_LESS (r, dconst1))
79e68feb
RS
4416 return 0;
4417
eaff3bf8 4418 exp = real_exponent (&r);
6ef9a246 4419 real_2expN (&r1, exp, DFmode);
eaff3bf8
RH
4420 if (REAL_VALUES_EQUAL (r1, r))
4421 return exp;
4422
79e68feb
RS
4423 return 0;
4424}
4425\f
79e68feb
RS
4426/* A C compound statement to output to stdio stream STREAM the
4427 assembler syntax for an instruction operand X. X is an RTL
4428 expression.
4429
4430 CODE is a value that can be used to specify one of several ways
4431 of printing the operand. It is used when identical operands
4432 must be printed differently depending on the context. CODE
4433 comes from the `%' specification that was used to request
4434 printing of the operand. If the specification was just `%DIGIT'
4435 then CODE is 0; if the specification was `%LTR DIGIT' then CODE
4436 is the ASCII code for LTR.
4437
4438 If X is a register, this macro should print the register's name.
4439 The names can be found in an array `reg_names' whose type is
4440 `char *[]'. `reg_names' is initialized from `REGISTER_NAMES'.
4441
4442 When the machine description has a specification `%PUNCT' (a `%'
4443 followed by a punctuation character), this macro is called with
4444 a null pointer for X and the punctuation character for CODE.
4445
4446 The m68k specific codes are:
4447
4448 '.' for dot needed in Motorola-style opcode names.
4449 '-' for an operand pushing on the stack:
4450 sp@-, -(sp) or -(%sp) depending on the style of syntax.
4451 '+' for an operand pushing on the stack:
4452 sp@+, (sp)+ or (%sp)+ depending on the style of syntax.
4453 '@' for a reference to the top word on the stack:
4454 sp@, (sp) or (%sp) depending on the style of syntax.
4455 '#' for an immediate operand prefix (# in MIT and Motorola syntax
5ee084df 4456 but & in SGS syntax).
79e68feb
RS
4457 '!' for the cc register (used in an `and to cc' insn).
4458 '$' for the letter `s' in an op code, but only on the 68040.
4459 '&' for the letter `d' in an op code, but only on the 68040.
2ac5f14a 4460 '/' for register prefix needed by longlong.h.
a40ed0f3 4461 '?' for m68k_library_id_string
79e68feb
RS
4462
4463 'b' for byte insn (no effect, on the Sun; this is for the ISI).
4464 'd' to force memory addressing to be absolute, not relative.
4465 'f' for float insn (print a CONST_DOUBLE as a float rather than in hex)
79e68feb
RS
4466 'x' for float insn (print a CONST_DOUBLE as a float rather than in hex),
4467 or print pair of registers as rx:ry.
29ca003a
RS
4468 'p' print an address with @PLTPC attached, but only if the operand
4469 is not locally-bound. */
79e68feb
RS
4470
4471void
8a4a2253 4472print_operand (FILE *file, rtx op, int letter)
79e68feb 4473{
79e68feb
RS
4474 if (letter == '.')
4475 {
e6d98cb0
BI
4476 if (MOTOROLA)
4477 fprintf (file, ".");
79e68feb
RS
4478 }
4479 else if (letter == '#')
e6d98cb0 4480 asm_fprintf (file, "%I");
79e68feb 4481 else if (letter == '-')
4b3d1177 4482 asm_fprintf (file, MOTOROLA ? "-(%Rsp)" : "%Rsp@-");
79e68feb 4483 else if (letter == '+')
4b3d1177 4484 asm_fprintf (file, MOTOROLA ? "(%Rsp)+" : "%Rsp@+");
79e68feb 4485 else if (letter == '@')
4b3d1177 4486 asm_fprintf (file, MOTOROLA ? "(%Rsp)" : "%Rsp@");
79e68feb 4487 else if (letter == '!')
e6d98cb0 4488 asm_fprintf (file, "%Rfpcr");
79e68feb
RS
4489 else if (letter == '$')
4490 {
b101567e 4491 if (TARGET_68040)
e6d98cb0 4492 fprintf (file, "s");
79e68feb
RS
4493 }
4494 else if (letter == '&')
4495 {
b101567e 4496 if (TARGET_68040)
e6d98cb0 4497 fprintf (file, "d");
79e68feb 4498 }
2ac5f14a 4499 else if (letter == '/')
e6d98cb0 4500 asm_fprintf (file, "%R");
a40ed0f3
KH
4501 else if (letter == '?')
4502 asm_fprintf (file, m68k_library_id_string);
29ca003a 4503 else if (letter == 'p')
2c8ec431 4504 {
29ca003a
RS
4505 output_addr_const (file, op);
4506 if (!(GET_CODE (op) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op)))
4507 fprintf (file, "@PLTPC");
2c8ec431 4508 }
79e68feb
RS
4509 else if (GET_CODE (op) == REG)
4510 {
1a8965c4
AS
4511 if (letter == 'R')
4512 /* Print out the second register name of a register pair.
4513 I.e., R (6) => 7. */
01bbf777 4514 fputs (M68K_REGNAME(REGNO (op) + 1), file);
79e68feb 4515 else
01bbf777 4516 fputs (M68K_REGNAME(REGNO (op)), file);
79e68feb
RS
4517 }
4518 else if (GET_CODE (op) == MEM)
4519 {
4520 output_address (XEXP (op, 0));
4521 if (letter == 'd' && ! TARGET_68020
4522 && CONSTANT_ADDRESS_P (XEXP (op, 0))
4523 && !(GET_CODE (XEXP (op, 0)) == CONST_INT
4524 && INTVAL (XEXP (op, 0)) < 0x8000
4525 && INTVAL (XEXP (op, 0)) >= -0x8000))
4b3d1177 4526 fprintf (file, MOTOROLA ? ".l" : ":l");
79e68feb 4527 }
79e68feb
RS
4528 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == SFmode)
4529 {
c1cfb2ae 4530 REAL_VALUE_TYPE r;
6ae89ea8 4531 long l;
c1cfb2ae 4532 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
6ae89ea8 4533 REAL_VALUE_TO_TARGET_SINGLE (r, l);
429ce992 4534 asm_fprintf (file, "%I0x%lx", l & 0xFFFFFFFF);
c1cfb2ae
RS
4535 }
4536 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == XFmode)
4537 {
4538 REAL_VALUE_TYPE r;
6ae89ea8 4539 long l[3];
c1cfb2ae 4540 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
6ae89ea8 4541 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
429ce992
AS
4542 asm_fprintf (file, "%I0x%lx%08lx%08lx", l[0] & 0xFFFFFFFF,
4543 l[1] & 0xFFFFFFFF, l[2] & 0xFFFFFFFF);
79e68feb 4544 }
e2c0a924 4545 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == DFmode)
79e68feb 4546 {
c1cfb2ae 4547 REAL_VALUE_TYPE r;
6ae89ea8 4548 long l[2];
c1cfb2ae 4549 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
6ae89ea8 4550 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
429ce992 4551 asm_fprintf (file, "%I0x%lx%08lx", l[0] & 0xFFFFFFFF, l[1] & 0xFFFFFFFF);
79e68feb
RS
4552 }
4553 else
4554 {
2c8ec431
DL
4555 /* Use `print_operand_address' instead of `output_addr_const'
4556 to ensure that we print relevant PIC stuff. */
1f85a612 4557 asm_fprintf (file, "%I");
2c8ec431
DL
4558 if (TARGET_PCREL
4559 && (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST))
4560 print_operand_address (file, op);
4561 else
4562 output_addr_const (file, op);
79e68feb
RS
4563 }
4564}
4565
75df395f
MK
4566/* Return string for TLS relocation RELOC. */
4567
4568static const char *
4569m68k_get_reloc_decoration (enum m68k_reloc reloc)
4570{
4571 /* To my knowledge, !MOTOROLA assemblers don't support TLS. */
4572 gcc_assert (MOTOROLA || reloc == RELOC_GOT);
4573
4574 switch (reloc)
4575 {
4576 case RELOC_GOT:
4577 if (MOTOROLA)
4578 {
4579 if (flag_pic == 1 && TARGET_68020)
4580 return "@GOT.w";
4581 else
4582 return "@GOT";
4583 }
4584 else
4585 {
4586 if (TARGET_68020)
4587 {
4588 switch (flag_pic)
4589 {
4590 case 1:
4591 return ":w";
4592 case 2:
4593 return ":l";
4594 default:
4595 return "";
4596 }
4597 }
4598 }
4599
4600 case RELOC_TLSGD:
4601 return "@TLSGD";
4602
4603 case RELOC_TLSLDM:
4604 return "@TLSLDM";
4605
4606 case RELOC_TLSLDO:
4607 return "@TLSLDO";
4608
4609 case RELOC_TLSIE:
4610 return "@TLSIE";
4611
4612 case RELOC_TLSLE:
4613 return "@TLSLE";
4614
4615 default:
4616 gcc_unreachable ();
4617 }
4618}
4619
884316ff
JM
4620/* m68k implementation of OUTPUT_ADDR_CONST_EXTRA. */
4621
4622bool
4623m68k_output_addr_const_extra (FILE *file, rtx x)
4624{
75df395f
MK
4625 if (GET_CODE (x) == UNSPEC)
4626 {
4627 switch (XINT (x, 1))
4628 {
4629 case UNSPEC_RELOC16:
4630 case UNSPEC_RELOC32:
4631 output_addr_const (file, XVECEXP (x, 0, 0));
f878882b
AS
4632 fputs (m68k_get_reloc_decoration
4633 ((enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1))), file);
75df395f 4634 return true;
884316ff 4635
75df395f
MK
4636 default:
4637 break;
4638 }
4639 }
4640
4641 return false;
4642}
4643
4644/* M68K implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
4645
4646static void
4647m68k_output_dwarf_dtprel (FILE *file, int size, rtx x)
4648{
4649 gcc_assert (size == 4);
4650 fputs ("\t.long\t", file);
4651 output_addr_const (file, x);
4652 fputs ("@TLSLDO+0x8000", file);
884316ff
JM
4653}
4654
7b0f476d
AS
4655/* In the name of slightly smaller debug output, and to cater to
4656 general assembler lossage, recognize various UNSPEC sequences
4657 and turn them back into a direct symbol reference. */
4658
4659static rtx
33d67485 4660m68k_delegitimize_address (rtx orig_x)
7b0f476d 4661{
33d67485 4662 rtx x, y;
7b0f476d
AS
4663 rtx addend = NULL_RTX;
4664 rtx result;
4665
33d67485
AS
4666 orig_x = delegitimize_mem_from_attrs (orig_x);
4667 if (! MEM_P (orig_x))
4668 return orig_x;
4669
4670 x = XEXP (orig_x, 0);
7b0f476d
AS
4671
4672 if (GET_CODE (x) == PLUS
4673 && GET_CODE (XEXP (x, 1)) == CONST
4674 && REG_P (XEXP (x, 0))
4675 && REGNO (XEXP (x, 0)) == PIC_REG)
4676 {
4677 y = x = XEXP (XEXP (x, 1), 0);
4678
4679 /* Handle an addend. */
4680 if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS)
4681 && CONST_INT_P (XEXP (x, 1)))
4682 {
4683 addend = XEXP (x, 1);
4684 x = XEXP (x, 0);
4685 }
4686
4687 if (GET_CODE (x) == UNSPEC
4688 && (XINT (x, 1) == UNSPEC_RELOC16
4689 || XINT (x, 1) == UNSPEC_RELOC32))
4690 {
4691 result = XVECEXP (x, 0, 0);
4692 if (addend)
4693 {
4694 if (GET_CODE (y) == PLUS)
4695 result = gen_rtx_PLUS (Pmode, result, addend);
4696 else
4697 result = gen_rtx_MINUS (Pmode, result, addend);
4698 result = gen_rtx_CONST (Pmode, result);
4699 }
4700 return result;
4701 }
4702 }
4703
4704 return orig_x;
4705}
4706
79e68feb
RS
4707\f
4708/* A C compound statement to output to stdio stream STREAM the
4709 assembler syntax for an instruction operand that is a memory
4710 reference whose address is ADDR. ADDR is an RTL expression.
4711
4712 Note that this contains a kludge that knows that the only reason
4713 we have an address (plus (label_ref...) (reg...)) when not generating
4714 PIC code is in the insn before a tablejump, and we know that m68k.md
4715 generates a label LInnn: on such an insn.
4716
4717 It is possible for PIC to generate a (plus (label_ref...) (reg...))
4718 and we handle that just like we would a (plus (symbol_ref...) (reg...)).
4719
79e68feb
RS
4720 This routine is responsible for distinguishing between -fpic and -fPIC
4721 style relocations in an address. When generating -fpic code the
112cdef5
KH
4722 offset is output in word mode (e.g. movel a5@(_foo:w), a0). When generating
4723 -fPIC code the offset is output in long mode (e.g. movel a5@(_foo:l), a0) */
79e68feb
RS
4724
4725void
8a4a2253 4726print_operand_address (FILE *file, rtx addr)
79e68feb 4727{
fc2241eb
RS
4728 struct m68k_address address;
4729
4730 if (!m68k_decompose_address (QImode, addr, true, &address))
4731 gcc_unreachable ();
4732
4733 if (address.code == PRE_DEC)
4b3d1177
KH
4734 fprintf (file, MOTOROLA ? "-(%s)" : "%s@-",
4735 M68K_REGNAME (REGNO (address.base)));
fc2241eb 4736 else if (address.code == POST_INC)
4b3d1177
KH
4737 fprintf (file, MOTOROLA ? "(%s)+" : "%s@+",
4738 M68K_REGNAME (REGNO (address.base)));
fc2241eb
RS
4739 else if (!address.base && !address.index)
4740 {
4741 /* A constant address. */
4742 gcc_assert (address.offset == addr);
4743 if (GET_CODE (addr) == CONST_INT)
4744 {
4745 /* (xxx).w or (xxx).l. */
4746 if (IN_RANGE (INTVAL (addr), -0x8000, 0x7fff))
4b3d1177 4747 fprintf (file, MOTOROLA ? "%d.w" : "%d:w", (int) INTVAL (addr));
a0a7fbc9 4748 else
fc2241eb 4749 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (addr));
a0a7fbc9 4750 }
fc2241eb 4751 else if (TARGET_PCREL)
a0a7fbc9 4752 {
fc2241eb
RS
4753 /* (d16,PC) or (bd,PC,Xn) (with suppressed index register). */
4754 fputc ('(', file);
4755 output_addr_const (file, addr);
4756 asm_fprintf (file, flag_pic == 1 ? ":w,%Rpc)" : ":l,%Rpc)");
a0a7fbc9 4757 }
fc2241eb 4758 else
a0a7fbc9 4759 {
fc2241eb
RS
4760 /* (xxx).l. We need a special case for SYMBOL_REF if the symbol
4761 name ends in `.<letter>', as the last 2 characters can be
4762 mistaken as a size suffix. Put the name in parentheses. */
4763 if (GET_CODE (addr) == SYMBOL_REF
4764 && strlen (XSTR (addr, 0)) > 2
4765 && XSTR (addr, 0)[strlen (XSTR (addr, 0)) - 2] == '.')
a0a7fbc9 4766 {
fc2241eb
RS
4767 putc ('(', file);
4768 output_addr_const (file, addr);
4769 putc (')', file);
a0a7fbc9
AS
4770 }
4771 else
fc2241eb 4772 output_addr_const (file, addr);
a0a7fbc9 4773 }
fc2241eb
RS
4774 }
4775 else
4776 {
4777 int labelno;
4778
4779 /* If ADDR is a (d8,pc,Xn) address, this is the number of the
44c7bd63 4780 label being accessed, otherwise it is -1. */
fc2241eb
RS
4781 labelno = (address.offset
4782 && !address.base
4783 && GET_CODE (address.offset) == LABEL_REF
4784 ? CODE_LABEL_NUMBER (XEXP (address.offset, 0))
4785 : -1);
4786 if (MOTOROLA)
a0a7fbc9 4787 {
fc2241eb
RS
4788 /* Print the "offset(base" component. */
4789 if (labelno >= 0)
e59d83aa 4790 asm_fprintf (file, "%LL%d(%Rpc,", labelno);
fc2241eb 4791 else
a0a7fbc9 4792 {
fc2241eb 4793 if (address.offset)
75df395f
MK
4794 output_addr_const (file, address.offset);
4795
fc2241eb
RS
4796 putc ('(', file);
4797 if (address.base)
4798 fputs (M68K_REGNAME (REGNO (address.base)), file);
a0a7fbc9 4799 }
fc2241eb
RS
4800 /* Print the ",index" component, if any. */
4801 if (address.index)
a0a7fbc9 4802 {
fc2241eb
RS
4803 if (address.base)
4804 putc (',', file);
4805 fprintf (file, "%s.%c",
4806 M68K_REGNAME (REGNO (address.index)),
4807 GET_MODE (address.index) == HImode ? 'w' : 'l');
4808 if (address.scale != 1)
4809 fprintf (file, "*%d", address.scale);
a0a7fbc9 4810 }
a0a7fbc9 4811 putc (')', file);
a0a7fbc9 4812 }
fc2241eb 4813 else /* !MOTOROLA */
a0a7fbc9 4814 {
fc2241eb
RS
4815 if (!address.offset && !address.index)
4816 fprintf (file, "%s@", M68K_REGNAME (REGNO (address.base)));
a0a7fbc9 4817 else
a0a7fbc9 4818 {
fc2241eb
RS
4819 /* Print the "base@(offset" component. */
4820 if (labelno >= 0)
e59d83aa 4821 asm_fprintf (file, "%Rpc@(%LL%d", labelno);
fc2241eb
RS
4822 else
4823 {
4824 if (address.base)
4825 fputs (M68K_REGNAME (REGNO (address.base)), file);
4826 fprintf (file, "@(");
4827 if (address.offset)
75df395f 4828 output_addr_const (file, address.offset);
fc2241eb
RS
4829 }
4830 /* Print the ",index" component, if any. */
4831 if (address.index)
4832 {
4833 fprintf (file, ",%s:%c",
4834 M68K_REGNAME (REGNO (address.index)),
4835 GET_MODE (address.index) == HImode ? 'w' : 'l');
4836 if (address.scale != 1)
4837 fprintf (file, ":%d", address.scale);
4838 }
a0a7fbc9
AS
4839 putc (')', file);
4840 }
a0a7fbc9 4841 }
79e68feb
RS
4842 }
4843}
af13f02d
JW
4844\f
4845/* Check for cases where a clr insns can be omitted from code using
4846 strict_low_part sets. For example, the second clrl here is not needed:
4847 clrl d0; movw a0@+,d0; use d0; clrl d0; movw a0@+; use d0; ...
4848
4849 MODE is the mode of this STRICT_LOW_PART set. FIRST_INSN is the clear
4850 insn we are checking for redundancy. TARGET is the register set by the
4851 clear insn. */
4852
8a4a2253
BI
4853bool
4854strict_low_part_peephole_ok (enum machine_mode mode, rtx first_insn,
4855 rtx target)
af13f02d 4856{
39250081 4857 rtx p = first_insn;
af13f02d 4858
39250081 4859 while ((p = PREV_INSN (p)))
af13f02d 4860 {
39250081
RZ
4861 if (NOTE_INSN_BASIC_BLOCK_P (p))
4862 return false;
4863
4864 if (NOTE_P (p))
4865 continue;
4866
af13f02d 4867 /* If it isn't an insn, then give up. */
39250081 4868 if (!INSN_P (p))
8a4a2253 4869 return false;
af13f02d
JW
4870
4871 if (reg_set_p (target, p))
4872 {
4873 rtx set = single_set (p);
4874 rtx dest;
4875
4876 /* If it isn't an easy to recognize insn, then give up. */
4877 if (! set)
8a4a2253 4878 return false;
af13f02d
JW
4879
4880 dest = SET_DEST (set);
4881
4882 /* If this sets the entire target register to zero, then our
4883 first_insn is redundant. */
4884 if (rtx_equal_p (dest, target)
4885 && SET_SRC (set) == const0_rtx)
8a4a2253 4886 return true;
af13f02d
JW
4887 else if (GET_CODE (dest) == STRICT_LOW_PART
4888 && GET_CODE (XEXP (dest, 0)) == REG
4889 && REGNO (XEXP (dest, 0)) == REGNO (target)
4890 && (GET_MODE_SIZE (GET_MODE (XEXP (dest, 0)))
4891 <= GET_MODE_SIZE (mode)))
4892 /* This is a strict low part set which modifies less than
4893 we are using, so it is safe. */
4894 ;
4895 else
8a4a2253 4896 return false;
af13f02d 4897 }
af13f02d
JW
4898 }
4899
8a4a2253 4900 return false;
af13f02d 4901}
67cd4f83 4902
2c8ec431
DL
4903/* Operand predicates for implementing asymmetric pc-relative addressing
4904 on m68k. The m68k supports pc-relative addressing (mode 7, register 2)
dab66575 4905 when used as a source operand, but not as a destination operand.
2c8ec431
DL
4906
4907 We model this by restricting the meaning of the basic predicates
4908 (general_operand, memory_operand, etc) to forbid the use of this
4909 addressing mode, and then define the following predicates that permit
4910 this addressing mode. These predicates can then be used for the
4911 source operands of the appropriate instructions.
4912
4913 n.b. While it is theoretically possible to change all machine patterns
4914 to use this addressing more where permitted by the architecture,
4915 it has only been implemented for "common" cases: SImode, HImode, and
4916 QImode operands, and only for the principle operations that would
4917 require this addressing mode: data movement and simple integer operations.
4918
4919 In parallel with these new predicates, two new constraint letters
4920 were defined: 'S' and 'T'. 'S' is the -mpcrel analog of 'm'.
4921 'T' replaces 's' in the non-pcrel case. It is a no-op in the pcrel case.
4922 In the pcrel case 's' is only valid in combination with 'a' registers.
4923 See addsi3, subsi3, cmpsi, and movsi patterns for a better understanding
4924 of how these constraints are used.
4925
4926 The use of these predicates is strictly optional, though patterns that
4927 don't will cause an extra reload register to be allocated where one
4928 was not necessary:
4929
4930 lea (abc:w,%pc),%a0 ; need to reload address
4931 moveq &1,%d1 ; since write to pc-relative space
4932 movel %d1,%a0@ ; is not allowed
4933 ...
4934 lea (abc:w,%pc),%a1 ; no need to reload address here
4935 movel %a1@,%d0 ; since "movel (abc:w,%pc),%d0" is ok
4936
4937 For more info, consult tiemann@cygnus.com.
4938
4939
4940 All of the ugliness with predicates and constraints is due to the
4941 simple fact that the m68k does not allow a pc-relative addressing
4942 mode as a destination. gcc does not distinguish between source and
4943 destination addresses. Hence, if we claim that pc-relative address
331d9186 4944 modes are valid, e.g. TARGET_LEGITIMATE_ADDRESS_P accepts them, then we
2c8ec431
DL
4945 end up with invalid code. To get around this problem, we left
4946 pc-relative modes as invalid addresses, and then added special
4947 predicates and constraints to accept them.
4948
4949 A cleaner way to handle this is to modify gcc to distinguish
4950 between source and destination addresses. We can then say that
4951 pc-relative is a valid source address but not a valid destination
4952 address, and hopefully avoid a lot of the predicate and constraint
4953 hackery. Unfortunately, this would be a pretty big change. It would
4954 be a useful change for a number of ports, but there aren't any current
4955 plans to undertake this.
4956
4957 ***************************************************************************/
4958
4959
5505f548 4960const char *
8a4a2253 4961output_andsi3 (rtx *operands)
29ae8a3c
RK
4962{
4963 int logval;
4964 if (GET_CODE (operands[2]) == CONST_INT
25c99d8f 4965 && (INTVAL (operands[2]) | 0xffff) == -1
29ae8a3c
RK
4966 && (DATA_REG_P (operands[0])
4967 || offsettable_memref_p (operands[0]))
9425fb04 4968 && !TARGET_COLDFIRE)
29ae8a3c
RK
4969 {
4970 if (GET_CODE (operands[0]) != REG)
b72f00af 4971 operands[0] = adjust_address (operands[0], HImode, 2);
1d8eaa6b 4972 operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffff);
29ae8a3c
RK
4973 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4974 CC_STATUS_INIT;
4975 if (operands[2] == const0_rtx)
4976 return "clr%.w %0";
4977 return "and%.w %2,%0";
4978 }
4979 if (GET_CODE (operands[2]) == CONST_INT
c4406f74 4980 && (logval = exact_log2 (~ INTVAL (operands[2]) & 0xffffffff)) >= 0
29ae8a3c
RK
4981 && (DATA_REG_P (operands[0])
4982 || offsettable_memref_p (operands[0])))
4983 {
4984 if (DATA_REG_P (operands[0]))
a0a7fbc9 4985 operands[1] = GEN_INT (logval);
29ae8a3c
RK
4986 else
4987 {
b72f00af 4988 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
1d8eaa6b 4989 operands[1] = GEN_INT (logval % 8);
29ae8a3c
RK
4990 }
4991 /* This does not set condition codes in a standard way. */
4992 CC_STATUS_INIT;
4993 return "bclr %1,%0";
4994 }
4995 return "and%.l %2,%0";
4996}
4997
5505f548 4998const char *
8a4a2253 4999output_iorsi3 (rtx *operands)
29ae8a3c
RK
5000{
5001 register int logval;
5002 if (GET_CODE (operands[2]) == CONST_INT
5003 && INTVAL (operands[2]) >> 16 == 0
5004 && (DATA_REG_P (operands[0])
5005 || offsettable_memref_p (operands[0]))
9425fb04 5006 && !TARGET_COLDFIRE)
29ae8a3c
RK
5007 {
5008 if (GET_CODE (operands[0]) != REG)
b72f00af 5009 operands[0] = adjust_address (operands[0], HImode, 2);
29ae8a3c
RK
5010 /* Do not delete a following tstl %0 insn; that would be incorrect. */
5011 CC_STATUS_INIT;
5012 if (INTVAL (operands[2]) == 0xffff)
5013 return "mov%.w %2,%0";
5014 return "or%.w %2,%0";
5015 }
5016 if (GET_CODE (operands[2]) == CONST_INT
c4406f74 5017 && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
29ae8a3c
RK
5018 && (DATA_REG_P (operands[0])
5019 || offsettable_memref_p (operands[0])))
5020 {
5021 if (DATA_REG_P (operands[0]))
b72f00af 5022 operands[1] = GEN_INT (logval);
29ae8a3c
RK
5023 else
5024 {
b72f00af 5025 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
1d8eaa6b 5026 operands[1] = GEN_INT (logval % 8);
29ae8a3c
RK
5027 }
5028 CC_STATUS_INIT;
5029 return "bset %1,%0";
5030 }
5031 return "or%.l %2,%0";
5032}
5033
5505f548 5034const char *
8a4a2253 5035output_xorsi3 (rtx *operands)
29ae8a3c
RK
5036{
5037 register int logval;
5038 if (GET_CODE (operands[2]) == CONST_INT
5039 && INTVAL (operands[2]) >> 16 == 0
5040 && (offsettable_memref_p (operands[0]) || DATA_REG_P (operands[0]))
9425fb04 5041 && !TARGET_COLDFIRE)
29ae8a3c
RK
5042 {
5043 if (! DATA_REG_P (operands[0]))
b72f00af 5044 operands[0] = adjust_address (operands[0], HImode, 2);
29ae8a3c
RK
5045 /* Do not delete a following tstl %0 insn; that would be incorrect. */
5046 CC_STATUS_INIT;
5047 if (INTVAL (operands[2]) == 0xffff)
5048 return "not%.w %0";
5049 return "eor%.w %2,%0";
5050 }
5051 if (GET_CODE (operands[2]) == CONST_INT
c4406f74 5052 && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
29ae8a3c
RK
5053 && (DATA_REG_P (operands[0])
5054 || offsettable_memref_p (operands[0])))
5055 {
5056 if (DATA_REG_P (operands[0]))
b72f00af 5057 operands[1] = GEN_INT (logval);
29ae8a3c
RK
5058 else
5059 {
b72f00af 5060 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
1d8eaa6b 5061 operands[1] = GEN_INT (logval % 8);
29ae8a3c
RK
5062 }
5063 CC_STATUS_INIT;
5064 return "bchg %1,%0";
5065 }
5066 return "eor%.l %2,%0";
5067}
7c262518 5068
29ca003a
RS
5069/* Return the instruction that should be used for a call to address X,
5070 which is known to be in operand 0. */
5071
5072const char *
5073output_call (rtx x)
5074{
5075 if (symbolic_operand (x, VOIDmode))
5076 return m68k_symbolic_call;
5077 else
5078 return "jsr %a0";
5079}
5080
f7e70894
RS
5081/* Likewise sibling calls. */
5082
5083const char *
5084output_sibcall (rtx x)
5085{
5086 if (symbolic_operand (x, VOIDmode))
5087 return m68k_symbolic_jump;
5088 else
5089 return "jmp %a0";
5090}
5091
c590b625 5092static void
8a4a2253 5093m68k_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
4ab870f5 5094 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8a4a2253 5095 tree function)
483ab821 5096{
e0601576
RH
5097 rtx this_slot, offset, addr, mem, insn, tmp;
5098
5099 /* Avoid clobbering the struct value reg by using the
5100 static chain reg as a temporary. */
5101 tmp = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
4ab870f5
RS
5102
5103 /* Pretend to be a post-reload pass while generating rtl. */
4ab870f5 5104 reload_completed = 1;
4ab870f5
RS
5105
5106 /* The "this" pointer is stored at 4(%sp). */
5107 this_slot = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx, 4));
5108
5109 /* Add DELTA to THIS. */
5110 if (delta != 0)
5050d266 5111 {
4ab870f5
RS
5112 /* Make the offset a legitimate operand for memory addition. */
5113 offset = GEN_INT (delta);
5114 if ((delta < -8 || delta > 8)
5115 && (TARGET_COLDFIRE || USE_MOVQ (delta)))
5116 {
5117 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), offset);
5118 offset = gen_rtx_REG (Pmode, D0_REG);
5119 }
5120 emit_insn (gen_add3_insn (copy_rtx (this_slot),
5121 copy_rtx (this_slot), offset));
5050d266 5122 }
c590b625 5123
4ab870f5
RS
5124 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
5125 if (vcall_offset != 0)
5126 {
5127 /* Set the static chain register to *THIS. */
e0601576
RH
5128 emit_move_insn (tmp, this_slot);
5129 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
4ab870f5
RS
5130
5131 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
e0601576 5132 addr = plus_constant (tmp, vcall_offset);
4ab870f5
RS
5133 if (!m68k_legitimate_address_p (Pmode, addr, true))
5134 {
e0601576
RH
5135 emit_insn (gen_rtx_SET (VOIDmode, tmp, addr));
5136 addr = tmp;
4ab870f5 5137 }
c590b625 5138
4ab870f5
RS
5139 /* Load the offset into %d0 and add it to THIS. */
5140 emit_move_insn (gen_rtx_REG (Pmode, D0_REG),
5141 gen_rtx_MEM (Pmode, addr));
5142 emit_insn (gen_add3_insn (copy_rtx (this_slot),
5143 copy_rtx (this_slot),
5144 gen_rtx_REG (Pmode, D0_REG)));
5145 }
29ca003a 5146
4ab870f5
RS
5147 /* Jump to the target function. Use a sibcall if direct jumps are
5148 allowed, otherwise load the address into a register first. */
5149 mem = DECL_RTL (function);
5150 if (!sibcall_operand (XEXP (mem, 0), VOIDmode))
5151 {
5152 gcc_assert (flag_pic);
c590b625 5153
4ab870f5
RS
5154 if (!TARGET_SEP_DATA)
5155 {
5156 /* Use the static chain register as a temporary (call-clobbered)
5157 GOT pointer for this function. We can use the static chain
5158 register because it isn't live on entry to the thunk. */
6fb5fa3c 5159 SET_REGNO (pic_offset_table_rtx, STATIC_CHAIN_REGNUM);
4ab870f5
RS
5160 emit_insn (gen_load_got (pic_offset_table_rtx));
5161 }
e0601576
RH
5162 legitimize_pic_address (XEXP (mem, 0), Pmode, tmp);
5163 mem = replace_equiv_address (mem, tmp);
4ab870f5
RS
5164 }
5165 insn = emit_call_insn (gen_sibcall (mem, const0_rtx));
5166 SIBLING_CALL_P (insn) = 1;
5167
5168 /* Run just enough of rest_of_compilation. */
5169 insn = get_insns ();
5170 split_all_insns_noflow ();
5171 final_start_function (insn, file, 1);
5172 final (insn, file, 1);
5173 final_end_function ();
5174
5175 /* Clean up the vars set above. */
5176 reload_completed = 0;
4ab870f5
RS
5177
5178 /* Restore the original PIC register. */
5179 if (flag_pic)
6fb5fa3c 5180 SET_REGNO (pic_offset_table_rtx, PIC_REG);
483ab821 5181}
8636be86
KH
5182
5183/* Worker function for TARGET_STRUCT_VALUE_RTX. */
5184
5185static rtx
5186m68k_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
5187 int incoming ATTRIBUTE_UNUSED)
5188{
5189 return gen_rtx_REG (Pmode, M68K_STRUCT_VALUE_REGNUM);
5190}
cfca21cb
PB
5191
5192/* Return nonzero if register old_reg can be renamed to register new_reg. */
5193int
5194m68k_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
5195 unsigned int new_reg)
5196{
5197
5198 /* Interrupt functions can only use registers that have already been
5199 saved by the prologue, even if they would normally be
5200 call-clobbered. */
5201
a4242737
KH
5202 if ((m68k_get_function_kind (current_function_decl)
5203 == m68k_fk_interrupt_handler)
6fb5fa3c 5204 && !df_regs_ever_live_p (new_reg))
cfca21cb
PB
5205 return 0;
5206
5207 return 1;
5208}
70028b61 5209
ffa2596e
RS
5210/* Value is true if hard register REGNO can hold a value of machine-mode
5211 MODE. On the 68000, we let the cpu registers can hold any mode, but
5212 restrict the 68881 registers to floating-point modes. */
5213
70028b61
PB
5214bool
5215m68k_regno_mode_ok (int regno, enum machine_mode mode)
5216{
36e04090 5217 if (DATA_REGNO_P (regno))
70028b61 5218 {
a0a7fbc9
AS
5219 /* Data Registers, can hold aggregate if fits in. */
5220 if (regno + GET_MODE_SIZE (mode) / 4 <= 8)
5221 return true;
70028b61 5222 }
36e04090 5223 else if (ADDRESS_REGNO_P (regno))
70028b61 5224 {
a0a7fbc9
AS
5225 if (regno + GET_MODE_SIZE (mode) / 4 <= 16)
5226 return true;
70028b61 5227 }
36e04090 5228 else if (FP_REGNO_P (regno))
70028b61
PB
5229 {
5230 /* FPU registers, hold float or complex float of long double or
a0a7fbc9
AS
5231 smaller. */
5232 if ((GET_MODE_CLASS (mode) == MODE_FLOAT
5233 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
dcc21c4c 5234 && GET_MODE_UNIT_SIZE (mode) <= TARGET_FP_REG_SIZE)
a0a7fbc9 5235 return true;
70028b61
PB
5236 }
5237 return false;
5238}
dcc21c4c 5239
ffa2596e
RS
5240/* Implement SECONDARY_RELOAD_CLASS. */
5241
5242enum reg_class
5243m68k_secondary_reload_class (enum reg_class rclass,
5244 enum machine_mode mode, rtx x)
5245{
5246 int regno;
5247
5248 regno = true_regnum (x);
5249
5250 /* If one operand of a movqi is an address register, the other
5251 operand must be a general register or constant. Other types
5252 of operand must be reloaded through a data register. */
5253 if (GET_MODE_SIZE (mode) == 1
5254 && reg_classes_intersect_p (rclass, ADDR_REGS)
5255 && !(INT_REGNO_P (regno) || CONSTANT_P (x)))
5256 return DATA_REGS;
5257
5258 /* PC-relative addresses must be loaded into an address register first. */
5259 if (TARGET_PCREL
5260 && !reg_class_subset_p (rclass, ADDR_REGS)
5261 && symbolic_operand (x, VOIDmode))
5262 return ADDR_REGS;
5263
5264 return NO_REGS;
5265}
5266
5267/* Implement PREFERRED_RELOAD_CLASS. */
5268
5269enum reg_class
5270m68k_preferred_reload_class (rtx x, enum reg_class rclass)
5271{
5272 enum reg_class secondary_class;
5273
5274 /* If RCLASS might need a secondary reload, try restricting it to
5275 a class that doesn't. */
5276 secondary_class = m68k_secondary_reload_class (rclass, GET_MODE (x), x);
5277 if (secondary_class != NO_REGS
5278 && reg_class_subset_p (secondary_class, rclass))
5279 return secondary_class;
5280
5281 /* Prefer to use moveq for in-range constants. */
5282 if (GET_CODE (x) == CONST_INT
5283 && reg_class_subset_p (DATA_REGS, rclass)
5284 && IN_RANGE (INTVAL (x), -0x80, 0x7f))
5285 return DATA_REGS;
5286
5287 /* ??? Do we really need this now? */
5288 if (GET_CODE (x) == CONST_DOUBLE
5289 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
5290 {
5291 if (TARGET_HARD_FLOAT && reg_class_subset_p (FP_REGS, rclass))
5292 return FP_REGS;
5293
5294 return NO_REGS;
5295 }
5296
5297 return rclass;
5298}
5299
dcc21c4c
PB
5300/* Return floating point values in a 68881 register. This makes 68881 code
5301 a little bit faster. It also makes -msoft-float code incompatible with
5302 hard-float code, so people have to be careful not to mix the two.
c0220ea4 5303 For ColdFire it was decided the ABI incompatibility is undesirable.
dcc21c4c
PB
5304 If there is need for a hard-float ABI it is probably worth doing it
5305 properly and also passing function arguments in FP registers. */
5306rtx
5307m68k_libcall_value (enum machine_mode mode)
5308{
5309 switch (mode) {
5310 case SFmode:
5311 case DFmode:
5312 case XFmode:
5313 if (TARGET_68881)
8d989403 5314 return gen_rtx_REG (mode, FP0_REG);
dcc21c4c
PB
5315 break;
5316 default:
5317 break;
5318 }
75df395f
MK
5319
5320 return gen_rtx_REG (mode, m68k_libcall_value_in_a0_p ? A0_REG : D0_REG);
dcc21c4c
PB
5321}
5322
db5e2d51
MK
5323/* Location in which function value is returned.
5324 NOTE: Due to differences in ABIs, don't call this function directly,
5325 use FUNCTION_VALUE instead. */
dcc21c4c 5326rtx
586de218 5327m68k_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
dcc21c4c
PB
5328{
5329 enum machine_mode mode;
5330
5331 mode = TYPE_MODE (valtype);
5332 switch (mode) {
5333 case SFmode:
5334 case DFmode:
5335 case XFmode:
5336 if (TARGET_68881)
8d989403 5337 return gen_rtx_REG (mode, FP0_REG);
dcc21c4c
PB
5338 break;
5339 default:
5340 break;
5341 }
5342
576c9028
KH
5343 /* If the function returns a pointer, push that into %a0. */
5344 if (func && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (func))))
5345 /* For compatibility with the large body of existing code which
5346 does not always properly declare external functions returning
5347 pointer types, the m68k/SVR4 convention is to copy the value
5348 returned for pointer functions from a0 to d0 in the function
5349 epilogue, so that callers that have neglected to properly
5350 declare the callee can still find the correct return value in
5351 d0. */
5352 return gen_rtx_PARALLEL
5353 (mode,
5354 gen_rtvec (2,
5355 gen_rtx_EXPR_LIST (VOIDmode,
5356 gen_rtx_REG (mode, A0_REG),
5357 const0_rtx),
5358 gen_rtx_EXPR_LIST (VOIDmode,
5359 gen_rtx_REG (mode, D0_REG),
5360 const0_rtx)));
5361 else if (POINTER_TYPE_P (valtype))
5362 return gen_rtx_REG (mode, A0_REG);
dcc21c4c 5363 else
576c9028 5364 return gen_rtx_REG (mode, D0_REG);
dcc21c4c 5365}
1c445f03
NS
5366
5367/* Worker function for TARGET_RETURN_IN_MEMORY. */
5368#if M68K_HONOR_TARGET_STRICT_ALIGNMENT
5369static bool
511e41e5 5370m68k_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
1c445f03
NS
5371{
5372 enum machine_mode mode = TYPE_MODE (type);
5373
5374 if (mode == BLKmode)
5375 return true;
5376
5377 /* If TYPE's known alignment is less than the alignment of MODE that
5378 would contain the structure, then return in memory. We need to
5379 do so to maintain the compatibility between code compiled with
5380 -mstrict-align and that compiled with -mno-strict-align. */
5381 if (AGGREGATE_TYPE_P (type)
5382 && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (mode))
5383 return true;
5384
5385 return false;
5386}
5387#endif
c47b0cb4
MK
5388
5389/* CPU to schedule the program for. */
5390enum attr_cpu m68k_sched_cpu;
5391
826fadba
MK
5392/* MAC to schedule the program for. */
5393enum attr_mac m68k_sched_mac;
5394
c47b0cb4
MK
5395/* Operand type. */
5396enum attr_op_type
5397 {
5398 /* No operand. */
5399 OP_TYPE_NONE,
5400
96fcacb7
MK
5401 /* Integer register. */
5402 OP_TYPE_RN,
5403
5404 /* FP register. */
5405 OP_TYPE_FPN,
c47b0cb4
MK
5406
5407 /* Implicit mem reference (e.g. stack). */
5408 OP_TYPE_MEM1,
5409
5410 /* Memory without offset or indexing. EA modes 2, 3 and 4. */
5411 OP_TYPE_MEM234,
5412
5413 /* Memory with offset but without indexing. EA mode 5. */
5414 OP_TYPE_MEM5,
5415
5416 /* Memory with indexing. EA mode 6. */
5417 OP_TYPE_MEM6,
5418
5419 /* Memory referenced by absolute address. EA mode 7. */
5420 OP_TYPE_MEM7,
5421
5422 /* Immediate operand that doesn't require extension word. */
5423 OP_TYPE_IMM_Q,
5424
5425 /* Immediate 16 bit operand. */
5426 OP_TYPE_IMM_W,
5427
5428 /* Immediate 32 bit operand. */
5429 OP_TYPE_IMM_L
5430 };
5431
c47b0cb4
MK
5432/* Return type of memory ADDR_RTX refers to. */
5433static enum attr_op_type
5434sched_address_type (enum machine_mode mode, rtx addr_rtx)
5435{
5436 struct m68k_address address;
5437
96fcacb7
MK
5438 if (symbolic_operand (addr_rtx, VOIDmode))
5439 return OP_TYPE_MEM7;
5440
c47b0cb4
MK
5441 if (!m68k_decompose_address (mode, addr_rtx,
5442 reload_completed, &address))
5443 {
96fcacb7 5444 gcc_assert (!reload_completed);
c47b0cb4
MK
5445 /* Reload will likely fix the address to be in the register. */
5446 return OP_TYPE_MEM234;
5447 }
5448
5449 if (address.scale != 0)
5450 return OP_TYPE_MEM6;
5451
5452 if (address.base != NULL_RTX)
5453 {
5454 if (address.offset == NULL_RTX)
5455 return OP_TYPE_MEM234;
5456
5457 return OP_TYPE_MEM5;
5458 }
5459
5460 gcc_assert (address.offset != NULL_RTX);
5461
5462 return OP_TYPE_MEM7;
5463}
5464
96fcacb7
MK
5465/* Return X or Y (depending on OPX_P) operand of INSN. */
5466static rtx
5467sched_get_operand (rtx insn, bool opx_p)
5468{
5469 int i;
5470
5471 if (recog_memoized (insn) < 0)
5472 gcc_unreachable ();
5473
5474 extract_constrain_insn_cached (insn);
5475
5476 if (opx_p)
5477 i = get_attr_opx (insn);
5478 else
5479 i = get_attr_opy (insn);
5480
5481 if (i >= recog_data.n_operands)
5482 return NULL;
5483
5484 return recog_data.operand[i];
5485}
5486
5487/* Return type of INSN's operand X (if OPX_P) or operand Y (if !OPX_P).
5488 If ADDRESS_P is true, return type of memory location operand refers to. */
c47b0cb4 5489static enum attr_op_type
96fcacb7 5490sched_attr_op_type (rtx insn, bool opx_p, bool address_p)
c47b0cb4 5491{
96fcacb7
MK
5492 rtx op;
5493
5494 op = sched_get_operand (insn, opx_p);
5495
5496 if (op == NULL)
5497 {
5498 gcc_assert (!reload_completed);
5499 return OP_TYPE_RN;
5500 }
c47b0cb4
MK
5501
5502 if (address_p)
5503 return sched_address_type (QImode, op);
5504
5505 if (memory_operand (op, VOIDmode))
5506 return sched_address_type (GET_MODE (op), XEXP (op, 0));
5507
5508 if (register_operand (op, VOIDmode))
96fcacb7
MK
5509 {
5510 if ((!reload_completed && FLOAT_MODE_P (GET_MODE (op)))
5511 || (reload_completed && FP_REG_P (op)))
5512 return OP_TYPE_FPN;
5513
5514 return OP_TYPE_RN;
5515 }
c47b0cb4
MK
5516
5517 if (GET_CODE (op) == CONST_INT)
5518 {
96fcacb7
MK
5519 int ival;
5520
5521 ival = INTVAL (op);
5522
5523 /* Check for quick constants. */
5524 switch (get_attr_type (insn))
5525 {
5526 case TYPE_ALUQ_L:
5527 if (IN_RANGE (ival, 1, 8) || IN_RANGE (ival, -8, -1))
5528 return OP_TYPE_IMM_Q;
5529
5530 gcc_assert (!reload_completed);
5531 break;
5532
5533 case TYPE_MOVEQ_L:
5534 if (USE_MOVQ (ival))
5535 return OP_TYPE_IMM_Q;
5536
5537 gcc_assert (!reload_completed);
5538 break;
5539
5540 case TYPE_MOV3Q_L:
5541 if (valid_mov3q_const (ival))
5542 return OP_TYPE_IMM_Q;
5543
5544 gcc_assert (!reload_completed);
5545 break;
5546
5547 default:
5548 break;
5549 }
5550
5551 if (IN_RANGE (ival, -0x8000, 0x7fff))
c47b0cb4
MK
5552 return OP_TYPE_IMM_W;
5553
5554 return OP_TYPE_IMM_L;
5555 }
5556
5557 if (GET_CODE (op) == CONST_DOUBLE)
5558 {
5559 switch (GET_MODE (op))
5560 {
5561 case SFmode:
5562 return OP_TYPE_IMM_W;
5563
5564 case VOIDmode:
5565 case DFmode:
5566 return OP_TYPE_IMM_L;
5567
5568 default:
5569 gcc_unreachable ();
5570 }
5571 }
5572
00b2ef14
MK
5573 if (GET_CODE (op) == CONST
5574 || symbolic_operand (op, VOIDmode)
c47b0cb4
MK
5575 || LABEL_P (op))
5576 {
5577 switch (GET_MODE (op))
5578 {
5579 case QImode:
5580 return OP_TYPE_IMM_Q;
5581
5582 case HImode:
5583 return OP_TYPE_IMM_W;
5584
5585 case SImode:
5586 return OP_TYPE_IMM_L;
5587
5588 default:
75df395f
MK
5589 if (symbolic_operand (m68k_unwrap_symbol (op, false), VOIDmode))
5590 /* Just a guess. */
c47b0cb4
MK
5591 return OP_TYPE_IMM_W;
5592
5593 return OP_TYPE_IMM_L;
5594 }
5595 }
5596
96fcacb7 5597 gcc_assert (!reload_completed);
c47b0cb4 5598
96fcacb7
MK
5599 if (FLOAT_MODE_P (GET_MODE (op)))
5600 return OP_TYPE_FPN;
c47b0cb4 5601
96fcacb7 5602 return OP_TYPE_RN;
c47b0cb4
MK
5603}
5604
5605/* Implement opx_type attribute.
5606 Return type of INSN's operand X.
5607 If ADDRESS_P is true, return type of memory location operand refers to. */
5608enum attr_opx_type
5609m68k_sched_attr_opx_type (rtx insn, int address_p)
5610{
c47b0cb4
MK
5611 switch (sched_attr_op_type (insn, true, address_p != 0))
5612 {
96fcacb7
MK
5613 case OP_TYPE_RN:
5614 return OPX_TYPE_RN;
5615
5616 case OP_TYPE_FPN:
5617 return OPX_TYPE_FPN;
c47b0cb4
MK
5618
5619 case OP_TYPE_MEM1:
5620 return OPX_TYPE_MEM1;
5621
5622 case OP_TYPE_MEM234:
5623 return OPX_TYPE_MEM234;
5624
5625 case OP_TYPE_MEM5:
5626 return OPX_TYPE_MEM5;
5627
5628 case OP_TYPE_MEM6:
5629 return OPX_TYPE_MEM6;
5630
5631 case OP_TYPE_MEM7:
5632 return OPX_TYPE_MEM7;
5633
5634 case OP_TYPE_IMM_Q:
5635 return OPX_TYPE_IMM_Q;
5636
5637 case OP_TYPE_IMM_W:
5638 return OPX_TYPE_IMM_W;
5639
5640 case OP_TYPE_IMM_L:
5641 return OPX_TYPE_IMM_L;
5642
5643 default:
5644 gcc_unreachable ();
c47b0cb4
MK
5645 }
5646}
5647
5648/* Implement opy_type attribute.
5649 Return type of INSN's operand Y.
5650 If ADDRESS_P is true, return type of memory location operand refers to. */
5651enum attr_opy_type
5652m68k_sched_attr_opy_type (rtx insn, int address_p)
5653{
c47b0cb4
MK
5654 switch (sched_attr_op_type (insn, false, address_p != 0))
5655 {
96fcacb7
MK
5656 case OP_TYPE_RN:
5657 return OPY_TYPE_RN;
5658
5659 case OP_TYPE_FPN:
5660 return OPY_TYPE_FPN;
c47b0cb4
MK
5661
5662 case OP_TYPE_MEM1:
5663 return OPY_TYPE_MEM1;
5664
5665 case OP_TYPE_MEM234:
5666 return OPY_TYPE_MEM234;
5667
5668 case OP_TYPE_MEM5:
5669 return OPY_TYPE_MEM5;
5670
5671 case OP_TYPE_MEM6:
5672 return OPY_TYPE_MEM6;
5673
5674 case OP_TYPE_MEM7:
5675 return OPY_TYPE_MEM7;
5676
5677 case OP_TYPE_IMM_Q:
5678 return OPY_TYPE_IMM_Q;
5679
5680 case OP_TYPE_IMM_W:
5681 return OPY_TYPE_IMM_W;
5682
5683 case OP_TYPE_IMM_L:
5684 return OPY_TYPE_IMM_L;
5685
5686 default:
5687 gcc_unreachable ();
c47b0cb4
MK
5688 }
5689}
5690
96fcacb7
MK
5691/* Return size of INSN as int. */
5692static int
5693sched_get_attr_size_int (rtx insn)
c47b0cb4
MK
5694{
5695 int size;
5696
96fcacb7 5697 switch (get_attr_type (insn))
c47b0cb4 5698 {
96fcacb7
MK
5699 case TYPE_IGNORE:
5700 /* There should be no references to m68k_sched_attr_size for 'ignore'
5701 instructions. */
5702 gcc_unreachable ();
5703 return 0;
5704
5705 case TYPE_MUL_L:
c47b0cb4
MK
5706 size = 2;
5707 break;
5708
5709 default:
5710 size = 1;
5711 break;
5712 }
5713
5714 switch (get_attr_opx_type (insn))
5715 {
5716 case OPX_TYPE_NONE:
96fcacb7
MK
5717 case OPX_TYPE_RN:
5718 case OPX_TYPE_FPN:
c47b0cb4
MK
5719 case OPX_TYPE_MEM1:
5720 case OPX_TYPE_MEM234:
5721 case OPY_TYPE_IMM_Q:
5722 break;
5723
5724 case OPX_TYPE_MEM5:
5725 case OPX_TYPE_MEM6:
5726 /* Here we assume that most absolute references are short. */
5727 case OPX_TYPE_MEM7:
5728 case OPY_TYPE_IMM_W:
5729 ++size;
5730 break;
5731
5732 case OPY_TYPE_IMM_L:
5733 size += 2;
5734 break;
5735
5736 default:
5737 gcc_unreachable ();
5738 }
5739
5740 switch (get_attr_opy_type (insn))
5741 {
5742 case OPY_TYPE_NONE:
96fcacb7
MK
5743 case OPY_TYPE_RN:
5744 case OPY_TYPE_FPN:
c47b0cb4
MK
5745 case OPY_TYPE_MEM1:
5746 case OPY_TYPE_MEM234:
5747 case OPY_TYPE_IMM_Q:
5748 break;
5749
5750 case OPY_TYPE_MEM5:
5751 case OPY_TYPE_MEM6:
5752 /* Here we assume that most absolute references are short. */
5753 case OPY_TYPE_MEM7:
5754 case OPY_TYPE_IMM_W:
5755 ++size;
5756 break;
5757
5758 case OPY_TYPE_IMM_L:
5759 size += 2;
5760 break;
5761
5762 default:
5763 gcc_unreachable ();
5764 }
5765
5766 if (size > 3)
5767 {
96fcacb7 5768 gcc_assert (!reload_completed);
c47b0cb4
MK
5769
5770 size = 3;
5771 }
5772
5773 return size;
5774}
5775
96fcacb7
MK
5776/* Return size of INSN as attribute enum value. */
5777enum attr_size
5778m68k_sched_attr_size (rtx insn)
5779{
5780 switch (sched_get_attr_size_int (insn))
5781 {
5782 case 1:
5783 return SIZE_1;
5784
5785 case 2:
5786 return SIZE_2;
5787
5788 case 3:
5789 return SIZE_3;
5790
5791 default:
5792 gcc_unreachable ();
96fcacb7
MK
5793 }
5794}
5795
5796/* Return operand X or Y (depending on OPX_P) of INSN,
5797 if it is a MEM, or NULL overwise. */
5798static enum attr_op_type
5799sched_get_opxy_mem_type (rtx insn, bool opx_p)
5800{
5801 if (opx_p)
5802 {
5803 switch (get_attr_opx_type (insn))
5804 {
5805 case OPX_TYPE_NONE:
5806 case OPX_TYPE_RN:
5807 case OPX_TYPE_FPN:
5808 case OPX_TYPE_IMM_Q:
5809 case OPX_TYPE_IMM_W:
5810 case OPX_TYPE_IMM_L:
5811 return OP_TYPE_RN;
5812
5813 case OPX_TYPE_MEM1:
5814 case OPX_TYPE_MEM234:
5815 case OPX_TYPE_MEM5:
5816 case OPX_TYPE_MEM7:
5817 return OP_TYPE_MEM1;
5818
5819 case OPX_TYPE_MEM6:
5820 return OP_TYPE_MEM6;
5821
5822 default:
5823 gcc_unreachable ();
96fcacb7
MK
5824 }
5825 }
5826 else
5827 {
5828 switch (get_attr_opy_type (insn))
5829 {
5830 case OPY_TYPE_NONE:
5831 case OPY_TYPE_RN:
5832 case OPY_TYPE_FPN:
5833 case OPY_TYPE_IMM_Q:
5834 case OPY_TYPE_IMM_W:
5835 case OPY_TYPE_IMM_L:
5836 return OP_TYPE_RN;
5837
5838 case OPY_TYPE_MEM1:
5839 case OPY_TYPE_MEM234:
5840 case OPY_TYPE_MEM5:
5841 case OPY_TYPE_MEM7:
5842 return OP_TYPE_MEM1;
5843
5844 case OPY_TYPE_MEM6:
5845 return OP_TYPE_MEM6;
5846
5847 default:
5848 gcc_unreachable ();
96fcacb7
MK
5849 }
5850 }
5851}
5852
c47b0cb4
MK
5853/* Implement op_mem attribute. */
5854enum attr_op_mem
5855m68k_sched_attr_op_mem (rtx insn)
5856{
96fcacb7
MK
5857 enum attr_op_type opx;
5858 enum attr_op_type opy;
c47b0cb4 5859
96fcacb7
MK
5860 opx = sched_get_opxy_mem_type (insn, true);
5861 opy = sched_get_opxy_mem_type (insn, false);
c47b0cb4 5862
96fcacb7 5863 if (opy == OP_TYPE_RN && opx == OP_TYPE_RN)
c47b0cb4
MK
5864 return OP_MEM_00;
5865
96fcacb7 5866 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM1)
c47b0cb4
MK
5867 {
5868 switch (get_attr_opx_access (insn))
5869 {
5870 case OPX_ACCESS_R:
5871 return OP_MEM_10;
5872
5873 case OPX_ACCESS_W:
5874 return OP_MEM_01;
5875
5876 case OPX_ACCESS_RW:
5877 return OP_MEM_11;
5878
5879 default:
96fcacb7 5880 gcc_unreachable ();
c47b0cb4
MK
5881 }
5882 }
5883
96fcacb7 5884 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM6)
c47b0cb4
MK
5885 {
5886 switch (get_attr_opx_access (insn))
5887 {
5888 case OPX_ACCESS_R:
5889 return OP_MEM_I0;
5890
5891 case OPX_ACCESS_W:
5892 return OP_MEM_0I;
5893
5894 case OPX_ACCESS_RW:
5895 return OP_MEM_I1;
5896
5897 default:
96fcacb7 5898 gcc_unreachable ();
c47b0cb4
MK
5899 }
5900 }
5901
96fcacb7 5902 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_RN)
c47b0cb4
MK
5903 return OP_MEM_10;
5904
96fcacb7 5905 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM1)
c47b0cb4
MK
5906 {
5907 switch (get_attr_opx_access (insn))
5908 {
5909 case OPX_ACCESS_W:
5910 return OP_MEM_11;
5911
5912 default:
96fcacb7
MK
5913 gcc_assert (!reload_completed);
5914 return OP_MEM_11;
c47b0cb4
MK
5915 }
5916 }
5917
96fcacb7 5918 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM6)
c47b0cb4
MK
5919 {
5920 switch (get_attr_opx_access (insn))
5921 {
5922 case OPX_ACCESS_W:
5923 return OP_MEM_1I;
5924
5925 default:
96fcacb7
MK
5926 gcc_assert (!reload_completed);
5927 return OP_MEM_1I;
c47b0cb4
MK
5928 }
5929 }
5930
96fcacb7 5931 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_RN)
c47b0cb4
MK
5932 return OP_MEM_I0;
5933
96fcacb7 5934 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM1)
c47b0cb4
MK
5935 {
5936 switch (get_attr_opx_access (insn))
5937 {
5938 case OPX_ACCESS_W:
5939 return OP_MEM_I1;
5940
5941 default:
96fcacb7
MK
5942 gcc_assert (!reload_completed);
5943 return OP_MEM_I1;
c47b0cb4
MK
5944 }
5945 }
5946
96fcacb7
MK
5947 gcc_assert (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM6);
5948 gcc_assert (!reload_completed);
5949 return OP_MEM_I1;
c47b0cb4
MK
5950}
5951
5952/* Jump instructions types. Indexed by INSN_UID.
5953 The same rtl insn can be expanded into different asm instructions
5954 depending on the cc0_status. To properly determine type of jump
5955 instructions we scan instruction stream and map jumps types to this
5956 array. */
5957static enum attr_type *sched_branch_type;
5958
5959/* Return the type of the jump insn. */
5960enum attr_type
5961m68k_sched_branch_type (rtx insn)
5962{
5963 enum attr_type type;
5964
5965 type = sched_branch_type[INSN_UID (insn)];
5966
5967 gcc_assert (type != 0);
5968
5969 return type;
5970}
b8c96320 5971
96fcacb7
MK
5972/* Data for ColdFire V4 index bypass.
5973 Producer modifies register that is used as index in consumer with
5974 specified scale. */
5975static struct
b8c96320 5976{
96fcacb7
MK
5977 /* Producer instruction. */
5978 rtx pro;
826fadba 5979
96fcacb7
MK
5980 /* Consumer instruction. */
5981 rtx con;
b8c96320 5982
96fcacb7
MK
5983 /* Scale of indexed memory access within consumer.
5984 Or zero if bypass should not be effective at the moment. */
5985 int scale;
5986} sched_cfv4_bypass_data;
b8c96320
MK
5987
5988/* An empty state that is used in m68k_sched_adjust_cost. */
5989static state_t sched_adjust_cost_state;
5990
5991/* Implement adjust_cost scheduler hook.
5992 Return adjusted COST of dependency LINK between DEF_INSN and INSN. */
5993static int
5994m68k_sched_adjust_cost (rtx insn, rtx link ATTRIBUTE_UNUSED, rtx def_insn,
5995 int cost)
5996{
5997 int delay;
5998
5999 if (recog_memoized (def_insn) < 0
6000 || recog_memoized (insn) < 0)
6001 return cost;
6002
96fcacb7
MK
6003 if (sched_cfv4_bypass_data.scale == 1)
6004 /* Handle ColdFire V4 bypass for indexed address with 1x scale. */
6005 {
6006 /* haifa-sched.c: insn_cost () calls bypass_p () just before
6007 targetm.sched.adjust_cost (). Hence, we can be relatively sure
6008 that the data in sched_cfv4_bypass_data is up to date. */
6009 gcc_assert (sched_cfv4_bypass_data.pro == def_insn
6010 && sched_cfv4_bypass_data.con == insn);
6011
6012 if (cost < 3)
6013 cost = 3;
6014
6015 sched_cfv4_bypass_data.pro = NULL;
6016 sched_cfv4_bypass_data.con = NULL;
6017 sched_cfv4_bypass_data.scale = 0;
6018 }
6019 else
6020 gcc_assert (sched_cfv4_bypass_data.pro == NULL
6021 && sched_cfv4_bypass_data.con == NULL
6022 && sched_cfv4_bypass_data.scale == 0);
6023
b8c96320
MK
6024 /* Don't try to issue INSN earlier than DFA permits.
6025 This is especially useful for instructions that write to memory,
6026 as their true dependence (default) latency is better to be set to 0
6027 to workaround alias analysis limitations.
6028 This is, in fact, a machine independent tweak, so, probably,
6029 it should be moved to haifa-sched.c: insn_cost (). */
b8c96320
MK
6030 delay = min_insn_conflict_delay (sched_adjust_cost_state, def_insn, insn);
6031 if (delay > cost)
6032 cost = delay;
6033
6034 return cost;
6035}
6036
96fcacb7
MK
6037/* Return maximal number of insns that can be scheduled on a single cycle. */
6038static int
6039m68k_sched_issue_rate (void)
6040{
6041 switch (m68k_sched_cpu)
6042 {
6043 case CPU_CFV1:
6044 case CPU_CFV2:
6045 case CPU_CFV3:
6046 return 1;
6047
6048 case CPU_CFV4:
6049 return 2;
6050
6051 default:
6052 gcc_unreachable ();
6053 return 0;
6054 }
6055}
6056
826fadba
MK
6057/* Maximal length of instruction for current CPU.
6058 E.g. it is 3 for any ColdFire core. */
6059static int max_insn_size;
6060
6061/* Data to model instruction buffer of CPU. */
6062struct _sched_ib
6063{
96fcacb7
MK
6064 /* True if instruction buffer model is modeled for current CPU. */
6065 bool enabled_p;
6066
826fadba
MK
6067 /* Size of the instruction buffer in words. */
6068 int size;
6069
6070 /* Number of filled words in the instruction buffer. */
6071 int filled;
6072
6073 /* Additional information about instruction buffer for CPUs that have
6074 a buffer of instruction records, rather then a plain buffer
6075 of instruction words. */
6076 struct _sched_ib_records
6077 {
6078 /* Size of buffer in records. */
6079 int n_insns;
b8c96320 6080
826fadba
MK
6081 /* Array to hold data on adjustements made to the size of the buffer. */
6082 int *adjust;
b8c96320 6083
826fadba
MK
6084 /* Index of the above array. */
6085 int adjust_index;
6086 } records;
6087
6088 /* An insn that reserves (marks empty) one word in the instruction buffer. */
6089 rtx insn;
6090};
6091
6092static struct _sched_ib sched_ib;
b8c96320
MK
6093
6094/* ID of memory unit. */
6095static int sched_mem_unit_code;
6096
6097/* Implementation of the targetm.sched.variable_issue () hook.
6098 It is called after INSN was issued. It returns the number of insns
6099 that can possibly get scheduled on the current cycle.
6100 It is used here to determine the effect of INSN on the instruction
6101 buffer. */
6102static int
6103m68k_sched_variable_issue (FILE *sched_dump ATTRIBUTE_UNUSED,
6104 int sched_verbose ATTRIBUTE_UNUSED,
6105 rtx insn, int can_issue_more)
6106{
6107 int insn_size;
6108
96fcacb7 6109 if (recog_memoized (insn) >= 0 && get_attr_type (insn) != TYPE_IGNORE)
b8c96320 6110 {
826fadba
MK
6111 switch (m68k_sched_cpu)
6112 {
6113 case CPU_CFV1:
6114 case CPU_CFV2:
96fcacb7 6115 insn_size = sched_get_attr_size_int (insn);
826fadba
MK
6116 break;
6117
6118 case CPU_CFV3:
96fcacb7 6119 insn_size = sched_get_attr_size_int (insn);
826fadba
MK
6120
6121 /* ColdFire V3 and V4 cores have instruction buffers that can
6122 accumulate up to 8 instructions regardless of instructions'
6123 sizes. So we should take care not to "prefetch" 24 one-word
6124 or 12 two-words instructions.
6125 To model this behavior we temporarily decrease size of the
6126 buffer by (max_insn_size - insn_size) for next 7 instructions. */
6127 {
6128 int adjust;
6129
6130 adjust = max_insn_size - insn_size;
6131 sched_ib.size -= adjust;
6132
6133 if (sched_ib.filled > sched_ib.size)
6134 sched_ib.filled = sched_ib.size;
6135
6136 sched_ib.records.adjust[sched_ib.records.adjust_index] = adjust;
6137 }
6138
6139 ++sched_ib.records.adjust_index;
6140 if (sched_ib.records.adjust_index == sched_ib.records.n_insns)
6141 sched_ib.records.adjust_index = 0;
6142
6143 /* Undo adjustement we did 7 instructions ago. */
6144 sched_ib.size
6145 += sched_ib.records.adjust[sched_ib.records.adjust_index];
6146
6147 break;
b8c96320 6148
96fcacb7
MK
6149 case CPU_CFV4:
6150 gcc_assert (!sched_ib.enabled_p);
6151 insn_size = 0;
6152 break;
6153
826fadba
MK
6154 default:
6155 gcc_unreachable ();
6156 }
b8c96320 6157
826fadba 6158 gcc_assert (insn_size <= sched_ib.filled);
b8c96320
MK
6159 --can_issue_more;
6160 }
6161 else if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6162 || asm_noperands (PATTERN (insn)) >= 0)
826fadba 6163 insn_size = sched_ib.filled;
b8c96320
MK
6164 else
6165 insn_size = 0;
6166
826fadba 6167 sched_ib.filled -= insn_size;
b8c96320
MK
6168
6169 return can_issue_more;
6170}
6171
96fcacb7
MK
6172/* Return how many instructions should scheduler lookahead to choose the
6173 best one. */
6174static int
6175m68k_sched_first_cycle_multipass_dfa_lookahead (void)
b8c96320 6176{
96fcacb7 6177 return m68k_sched_issue_rate () - 1;
b8c96320
MK
6178}
6179
7ecb00a6 6180/* Implementation of targetm.sched.init_global () hook.
b8c96320
MK
6181 It is invoked once per scheduling pass and is used here
6182 to initialize scheduler constants. */
6183static void
6184m68k_sched_md_init_global (FILE *sched_dump ATTRIBUTE_UNUSED,
6185 int sched_verbose ATTRIBUTE_UNUSED,
6186 int n_insns ATTRIBUTE_UNUSED)
6187{
6188 /* Init branch types. */
6189 {
6190 rtx insn;
6191
5ead67f6 6192 sched_branch_type = XCNEWVEC (enum attr_type, get_max_uid () + 1);
b8c96320
MK
6193
6194 for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
6195 {
6196 if (JUMP_P (insn))
6197 /* !!! FIXME: Implement real scan here. */
6198 sched_branch_type[INSN_UID (insn)] = TYPE_BCC;
6199 }
6200 }
6201
96fcacb7
MK
6202#ifdef ENABLE_CHECKING
6203 /* Check that all instructions have DFA reservations and
6204 that all instructions can be issued from a clean state. */
6205 {
6206 rtx insn;
6207 state_t state;
b8c96320 6208
96fcacb7 6209 state = alloca (state_size ());
b8c96320 6210
96fcacb7
MK
6211 for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
6212 {
6213 if (INSN_P (insn) && recog_memoized (insn) >= 0)
6214 {
6215 gcc_assert (insn_has_dfa_reservation_p (insn));
b8c96320 6216
96fcacb7
MK
6217 state_reset (state);
6218 if (state_transition (state, insn) >= 0)
6219 gcc_unreachable ();
6220 }
6221 }
6222 }
6223#endif
b8c96320
MK
6224
6225 /* Setup target cpu. */
96fcacb7
MK
6226
6227 /* ColdFire V4 has a set of features to keep its instruction buffer full
6228 (e.g., a separate memory bus for instructions) and, hence, we do not model
6229 buffer for this CPU. */
6230 sched_ib.enabled_p = (m68k_sched_cpu != CPU_CFV4);
6231
b8c96320
MK
6232 switch (m68k_sched_cpu)
6233 {
96fcacb7
MK
6234 case CPU_CFV4:
6235 sched_ib.filled = 0;
6236
6237 /* FALLTHRU */
6238
826fadba
MK
6239 case CPU_CFV1:
6240 case CPU_CFV2:
6241 max_insn_size = 3;
6242 sched_ib.records.n_insns = 0;
6243 sched_ib.records.adjust = NULL;
6244 break;
6245
6246 case CPU_CFV3:
6247 max_insn_size = 3;
6248 sched_ib.records.n_insns = 8;
5ead67f6 6249 sched_ib.records.adjust = XNEWVEC (int, sched_ib.records.n_insns);
b8c96320
MK
6250 break;
6251
6252 default:
6253 gcc_unreachable ();
6254 }
6255
826fadba
MK
6256 sched_mem_unit_code = get_cpu_unit_code ("cf_mem1");
6257
b8c96320
MK
6258 sched_adjust_cost_state = xmalloc (state_size ());
6259 state_reset (sched_adjust_cost_state);
6260
6261 start_sequence ();
6262 emit_insn (gen_ib ());
826fadba 6263 sched_ib.insn = get_insns ();
b8c96320
MK
6264 end_sequence ();
6265}
6266
6267/* Scheduling pass is now finished. Free/reset static variables. */
6268static void
6269m68k_sched_md_finish_global (FILE *dump ATTRIBUTE_UNUSED,
6270 int verbose ATTRIBUTE_UNUSED)
6271{
826fadba 6272 sched_ib.insn = NULL;
b8c96320
MK
6273
6274 free (sched_adjust_cost_state);
6275 sched_adjust_cost_state = NULL;
6276
6277 sched_mem_unit_code = 0;
826fadba
MK
6278
6279 free (sched_ib.records.adjust);
6280 sched_ib.records.adjust = NULL;
6281 sched_ib.records.n_insns = 0;
6282 max_insn_size = 0;
b8c96320
MK
6283
6284 free (sched_branch_type);
6285 sched_branch_type = NULL;
6286}
6287
7ecb00a6 6288/* Implementation of targetm.sched.init () hook.
b8c96320
MK
6289 It is invoked each time scheduler starts on the new block (basic block or
6290 extended basic block). */
6291static void
6292m68k_sched_md_init (FILE *sched_dump ATTRIBUTE_UNUSED,
6293 int sched_verbose ATTRIBUTE_UNUSED,
6294 int n_insns ATTRIBUTE_UNUSED)
6295{
826fadba
MK
6296 switch (m68k_sched_cpu)
6297 {
6298 case CPU_CFV1:
6299 case CPU_CFV2:
6300 sched_ib.size = 6;
6301 break;
6302
6303 case CPU_CFV3:
6304 sched_ib.size = sched_ib.records.n_insns * max_insn_size;
6305
6306 memset (sched_ib.records.adjust, 0,
6307 sched_ib.records.n_insns * sizeof (*sched_ib.records.adjust));
6308 sched_ib.records.adjust_index = 0;
6309 break;
6310
96fcacb7
MK
6311 case CPU_CFV4:
6312 gcc_assert (!sched_ib.enabled_p);
6313 sched_ib.size = 0;
6314 break;
6315
826fadba
MK
6316 default:
6317 gcc_unreachable ();
6318 }
6319
96fcacb7
MK
6320 if (sched_ib.enabled_p)
6321 /* haifa-sched.c: schedule_block () calls advance_cycle () just before
6322 the first cycle. Workaround that. */
6323 sched_ib.filled = -2;
b8c96320
MK
6324}
6325
6326/* Implementation of targetm.sched.dfa_pre_advance_cycle () hook.
6327 It is invoked just before current cycle finishes and is used here
6328 to track if instruction buffer got its two words this cycle. */
6329static void
6330m68k_sched_dfa_pre_advance_cycle (void)
6331{
96fcacb7
MK
6332 if (!sched_ib.enabled_p)
6333 return;
6334
b8c96320
MK
6335 if (!cpu_unit_reservation_p (curr_state, sched_mem_unit_code))
6336 {
826fadba 6337 sched_ib.filled += 2;
b8c96320 6338
826fadba
MK
6339 if (sched_ib.filled > sched_ib.size)
6340 sched_ib.filled = sched_ib.size;
b8c96320
MK
6341 }
6342}
6343
6344/* Implementation of targetm.sched.dfa_post_advance_cycle () hook.
6345 It is invoked just after new cycle begins and is used here
6346 to setup number of filled words in the instruction buffer so that
6347 instructions which won't have all their words prefetched would be
6348 stalled for a cycle. */
6349static void
6350m68k_sched_dfa_post_advance_cycle (void)
6351{
6352 int i;
b8c96320 6353
96fcacb7
MK
6354 if (!sched_ib.enabled_p)
6355 return;
6356
b8c96320
MK
6357 /* Setup number of prefetched instruction words in the instruction
6358 buffer. */
826fadba
MK
6359 i = max_insn_size - sched_ib.filled;
6360
6361 while (--i >= 0)
b8c96320 6362 {
826fadba 6363 if (state_transition (curr_state, sched_ib.insn) >= 0)
b8c96320
MK
6364 gcc_unreachable ();
6365 }
6366}
96fcacb7
MK
6367
6368/* Return X or Y (depending on OPX_P) operand of INSN,
6369 if it is an integer register, or NULL overwise. */
6370static rtx
6371sched_get_reg_operand (rtx insn, bool opx_p)
6372{
6373 rtx op = NULL;
6374
6375 if (opx_p)
6376 {
6377 if (get_attr_opx_type (insn) == OPX_TYPE_RN)
6378 {
6379 op = sched_get_operand (insn, true);
6380 gcc_assert (op != NULL);
6381
6382 if (!reload_completed && !REG_P (op))
6383 return NULL;
6384 }
6385 }
6386 else
6387 {
6388 if (get_attr_opy_type (insn) == OPY_TYPE_RN)
6389 {
6390 op = sched_get_operand (insn, false);
6391 gcc_assert (op != NULL);
6392
6393 if (!reload_completed && !REG_P (op))
6394 return NULL;
6395 }
6396 }
6397
6398 return op;
6399}
6400
6401/* Return true, if X or Y (depending on OPX_P) operand of INSN
6402 is a MEM. */
6403static bool
6404sched_mem_operand_p (rtx insn, bool opx_p)
6405{
6406 switch (sched_get_opxy_mem_type (insn, opx_p))
6407 {
6408 case OP_TYPE_MEM1:
6409 case OP_TYPE_MEM6:
6410 return true;
6411
6412 default:
6413 return false;
6414 }
6415}
6416
6417/* Return X or Y (depending on OPX_P) operand of INSN,
6418 if it is a MEM, or NULL overwise. */
6419static rtx
6420sched_get_mem_operand (rtx insn, bool must_read_p, bool must_write_p)
6421{
6422 bool opx_p;
6423 bool opy_p;
6424
6425 opx_p = false;
6426 opy_p = false;
6427
6428 if (must_read_p)
6429 {
6430 opx_p = true;
6431 opy_p = true;
6432 }
6433
6434 if (must_write_p)
6435 {
6436 opx_p = true;
6437 opy_p = false;
6438 }
6439
6440 if (opy_p && sched_mem_operand_p (insn, false))
6441 return sched_get_operand (insn, false);
6442
6443 if (opx_p && sched_mem_operand_p (insn, true))
6444 return sched_get_operand (insn, true);
6445
6446 gcc_unreachable ();
6447 return NULL;
6448}
6449
6450/* Return non-zero if PRO modifies register used as part of
6451 address in CON. */
6452int
6453m68k_sched_address_bypass_p (rtx pro, rtx con)
6454{
6455 rtx pro_x;
6456 rtx con_mem_read;
6457
6458 pro_x = sched_get_reg_operand (pro, true);
6459 if (pro_x == NULL)
6460 return 0;
6461
6462 con_mem_read = sched_get_mem_operand (con, true, false);
6463 gcc_assert (con_mem_read != NULL);
6464
6465 if (reg_mentioned_p (pro_x, con_mem_read))
6466 return 1;
6467
6468 return 0;
6469}
6470
6471/* Helper function for m68k_sched_indexed_address_bypass_p.
6472 if PRO modifies register used as index in CON,
6473 return scale of indexed memory access in CON. Return zero overwise. */
6474static int
6475sched_get_indexed_address_scale (rtx pro, rtx con)
6476{
6477 rtx reg;
6478 rtx mem;
6479 struct m68k_address address;
6480
6481 reg = sched_get_reg_operand (pro, true);
6482 if (reg == NULL)
6483 return 0;
6484
6485 mem = sched_get_mem_operand (con, true, false);
6486 gcc_assert (mem != NULL && MEM_P (mem));
6487
6488 if (!m68k_decompose_address (GET_MODE (mem), XEXP (mem, 0), reload_completed,
6489 &address))
6490 gcc_unreachable ();
6491
6492 if (REGNO (reg) == REGNO (address.index))
6493 {
6494 gcc_assert (address.scale != 0);
6495 return address.scale;
6496 }
6497
6498 return 0;
6499}
6500
6501/* Return non-zero if PRO modifies register used
6502 as index with scale 2 or 4 in CON. */
6503int
6504m68k_sched_indexed_address_bypass_p (rtx pro, rtx con)
6505{
6506 gcc_assert (sched_cfv4_bypass_data.pro == NULL
6507 && sched_cfv4_bypass_data.con == NULL
6508 && sched_cfv4_bypass_data.scale == 0);
6509
6510 switch (sched_get_indexed_address_scale (pro, con))
6511 {
6512 case 1:
6513 /* We can't have a variable latency bypass, so
6514 remember to adjust the insn cost in adjust_cost hook. */
6515 sched_cfv4_bypass_data.pro = pro;
6516 sched_cfv4_bypass_data.con = con;
6517 sched_cfv4_bypass_data.scale = 1;
6518 return 0;
6519
6520 case 2:
6521 case 4:
6522 return 1;
6523
6524 default:
6525 return 0;
6526 }
6527}
75df395f 6528
e0601576
RH
6529/* We generate a two-instructions program at M_TRAMP :
6530 movea.l &CHAIN_VALUE,%a0
6531 jmp FNADDR
6532 where %a0 can be modified by changing STATIC_CHAIN_REGNUM. */
6533
6534static void
6535m68k_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
6536{
6537 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
6538 rtx mem;
6539
6540 gcc_assert (ADDRESS_REGNO_P (STATIC_CHAIN_REGNUM));
6541
6542 mem = adjust_address (m_tramp, HImode, 0);
6543 emit_move_insn (mem, GEN_INT(0x207C + ((STATIC_CHAIN_REGNUM-8) << 9)));
6544 mem = adjust_address (m_tramp, SImode, 2);
6545 emit_move_insn (mem, chain_value);
6546
6547 mem = adjust_address (m_tramp, HImode, 6);
6548 emit_move_insn (mem, GEN_INT(0x4EF9));
6549 mem = adjust_address (m_tramp, SImode, 8);
6550 emit_move_insn (mem, fnaddr);
6551
6552 FINALIZE_TRAMPOLINE (XEXP (m_tramp, 0));
6553}
6554
079e7538
NF
6555/* On the 68000, the RTS insn cannot pop anything.
6556 On the 68010, the RTD insn may be used to pop them if the number
6557 of args is fixed, but if the number is variable then the caller
6558 must pop them all. RTD can't be used for library calls now
6559 because the library is compiled with the Unix compiler.
6560 Use of RTD is a selectable option, since it is incompatible with
6561 standard Unix calling sequences. If the option is not selected,
6562 the caller must always pop the args. */
6563
6564static int
6565m68k_return_pops_args (tree fundecl, tree funtype, int size)
6566{
6567 return ((TARGET_RTD
6568 && (!fundecl
6569 || TREE_CODE (fundecl) != IDENTIFIER_NODE)
f38958e8 6570 && (!stdarg_p (funtype)))
079e7538
NF
6571 ? size : 0);
6572}
6573
75df395f 6574#include "gt-m68k.h"