]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/m68k/m68k.c
tree-core.h: Include symtab.h.
[thirdparty/gcc.git] / gcc / config / m68k / m68k.c
CommitLineData
79e68feb 1/* Subroutines for insn-output.c for Motorola 68000 family.
5624e564 2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
79e68feb 3
7ec022b2 4This file is part of GCC.
79e68feb 5
7ec022b2 6GCC is free software; you can redistribute it and/or modify
79e68feb 7it under the terms of the GNU General Public License as published by
2f83c7d6 8the Free Software Foundation; either version 3, or (at your option)
79e68feb
RS
9any later version.
10
7ec022b2 11GCC is distributed in the hope that it will be useful,
79e68feb
RS
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14GNU General Public License for more details.
15
16You should have received a copy of the GNU General Public License
2f83c7d6
NC
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
79e68feb 19
79e68feb 20#include "config.h"
f5220a5d 21#include "system.h"
4977bab6 22#include "coretypes.h"
c7131fb2 23#include "backend.h"
da932f04 24#include "tree.h"
c7131fb2
AM
25#include "rtl.h"
26#include "df.h"
27#include "alias.h"
40e23961 28#include "fold-const.h"
d8a2d370
DN
29#include "calls.h"
30#include "stor-layout.h"
31#include "varasm.h"
79e68feb 32#include "regs.h"
79e68feb
RS
33#include "insn-config.h"
34#include "conditions.h"
79e68feb
RS
35#include "output.h"
36#include "insn-attr.h"
1d8eaa6b 37#include "recog.h"
718f9c0f 38#include "diagnostic-core.h"
36566b39 39#include "flags.h"
36566b39
PK
40#include "expmed.h"
41#include "dojump.h"
42#include "explow.h"
43#include "emit-rtl.h"
44#include "stmt.h"
6d5f49b2
RH
45#include "expr.h"
46#include "reload.h"
5505f548 47#include "tm_p.h"
672a6f42 48#include "target.h"
2cc07db4 49#include "debug.h"
60393bbc
AM
50#include "cfgrtl.h"
51#include "cfganal.h"
52#include "lcm.h"
53#include "cfgbuild.h"
54#include "cfgcleanup.h"
b8c96320
MK
55/* ??? Need to add a dependency between m68k.o and sched-int.h. */
56#include "sched-int.h"
57#include "insn-codes.h"
96e45421 58#include "opts.h"
8b281334 59#include "optabs.h"
9b2b7279 60#include "builtins.h"
82eee4f1 61#include "rtl-iter.h"
79e68feb 62
994c5d85 63/* This file should be included last. */
d58627a0
RS
64#include "target-def.h"
65
a4e9467d
RZ
66enum reg_class regno_reg_class[] =
67{
68 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
69 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
70 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
71 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
72 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
73 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
74 ADDR_REGS
75};
76
77
a40ed0f3
KH
78/* The minimum number of integer registers that we want to save with the
79 movem instruction. Using two movel instructions instead of a single
80 moveml is about 15% faster for the 68020 and 68030 at no expense in
81 code size. */
82#define MIN_MOVEM_REGS 3
83
84/* The minimum number of floating point registers that we want to save
85 with the fmovem instruction. */
86#define MIN_FMOVEM_REGS 1
87
ff482c8d 88/* Structure describing stack frame layout. */
3d74bc09
BI
89struct m68k_frame
90{
91 /* Stack pointer to frame pointer offset. */
48ed72a4 92 HOST_WIDE_INT offset;
3d74bc09
BI
93
94 /* Offset of FPU registers. */
95 HOST_WIDE_INT foffset;
96
97 /* Frame size in bytes (rounded up). */
48ed72a4 98 HOST_WIDE_INT size;
3d74bc09
BI
99
100 /* Data and address register. */
48ed72a4
PB
101 int reg_no;
102 unsigned int reg_mask;
3d74bc09
BI
103
104 /* FPU registers. */
48ed72a4
PB
105 int fpu_no;
106 unsigned int fpu_mask;
3d74bc09
BI
107
108 /* Offsets relative to ARG_POINTER. */
48ed72a4
PB
109 HOST_WIDE_INT frame_pointer_offset;
110 HOST_WIDE_INT stack_pointer_offset;
3d74bc09
BI
111
112 /* Function which the above information refers to. */
113 int funcdef_no;
48ed72a4
PB
114};
115
3d74bc09
BI
116/* Current frame information calculated by m68k_compute_frame_layout(). */
117static struct m68k_frame current_frame;
118
fc2241eb
RS
119/* Structure describing an m68k address.
120
121 If CODE is UNKNOWN, the address is BASE + INDEX * SCALE + OFFSET,
122 with null fields evaluating to 0. Here:
123
124 - BASE satisfies m68k_legitimate_base_reg_p
125 - INDEX satisfies m68k_legitimate_index_reg_p
126 - OFFSET satisfies m68k_legitimate_constant_address_p
127
128 INDEX is either HImode or SImode. The other fields are SImode.
129
130 If CODE is PRE_DEC, the address is -(BASE). If CODE is POST_INC,
131 the address is (BASE)+. */
132struct m68k_address {
133 enum rtx_code code;
134 rtx base;
135 rtx index;
136 rtx offset;
137 int scale;
138};
139
ac44248e 140static int m68k_sched_adjust_cost (rtx_insn *, rtx, rtx_insn *, int);
96fcacb7 141static int m68k_sched_issue_rate (void);
ac44248e 142static int m68k_sched_variable_issue (FILE *, int, rtx_insn *, int);
b8c96320
MK
143static void m68k_sched_md_init_global (FILE *, int, int);
144static void m68k_sched_md_finish_global (FILE *, int);
145static void m68k_sched_md_init (FILE *, int, int);
146static void m68k_sched_dfa_pre_advance_cycle (void);
147static void m68k_sched_dfa_post_advance_cycle (void);
96fcacb7 148static int m68k_sched_first_cycle_multipass_dfa_lookahead (void);
b8c96320 149
7b5cbb57 150static bool m68k_can_eliminate (const int, const int);
5efd84c5 151static void m68k_conditional_register_usage (void);
ef4bddc2 152static bool m68k_legitimate_address_p (machine_mode, rtx, bool);
c5387660 153static void m68k_option_override (void);
03e69b12 154static void m68k_override_options_after_change (void);
8a4a2253
BI
155static rtx find_addr_reg (rtx);
156static const char *singlemove_string (rtx *);
8a4a2253
BI
157static void m68k_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
158 HOST_WIDE_INT, tree);
8636be86 159static rtx m68k_struct_value_rtx (tree, int);
48ed72a4
PB
160static tree m68k_handle_fndecl_attribute (tree *node, tree name,
161 tree args, int flags,
162 bool *no_add_attrs);
3d74bc09 163static void m68k_compute_frame_layout (void);
48ed72a4 164static bool m68k_save_reg (unsigned int regno, bool interrupt_handler);
f7e70894 165static bool m68k_ok_for_sibcall_p (tree, tree);
75df395f 166static bool m68k_tls_symbol_p (rtx);
ef4bddc2 167static rtx m68k_legitimize_address (rtx, rtx, machine_mode);
68f932c4 168static bool m68k_rtx_costs (rtx, int, int, int, int *, bool);
1c445f03 169#if M68K_HONOR_TARGET_STRICT_ALIGNMENT
511e41e5 170static bool m68k_return_in_memory (const_tree, const_tree);
1c445f03 171#endif
75df395f 172static void m68k_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
e0601576 173static void m68k_trampoline_init (rtx, tree, rtx);
079e7538 174static int m68k_return_pops_args (tree, tree, int);
7b0f476d 175static rtx m68k_delegitimize_address (rtx);
ef4bddc2 176static void m68k_function_arg_advance (cumulative_args_t, machine_mode,
13d3961c 177 const_tree, bool);
ef4bddc2 178static rtx m68k_function_arg (cumulative_args_t, machine_mode,
13d3961c 179 const_tree, bool);
ef4bddc2 180static bool m68k_cannot_force_const_mem (machine_mode mode, rtx x);
cb69db4f 181static bool m68k_output_addr_const_extra (FILE *, rtx);
8b281334 182static void m68k_init_sync_libfuncs (void) ATTRIBUTE_UNUSED;
79e68feb 183\f
672a6f42 184/* Initialize the GCC target structure. */
301d03af
RS
185
186#if INT_OP_GROUP == INT_OP_DOT_WORD
187#undef TARGET_ASM_ALIGNED_HI_OP
188#define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
189#endif
190
191#if INT_OP_GROUP == INT_OP_NO_DOT
192#undef TARGET_ASM_BYTE_OP
193#define TARGET_ASM_BYTE_OP "\tbyte\t"
194#undef TARGET_ASM_ALIGNED_HI_OP
195#define TARGET_ASM_ALIGNED_HI_OP "\tshort\t"
196#undef TARGET_ASM_ALIGNED_SI_OP
197#define TARGET_ASM_ALIGNED_SI_OP "\tlong\t"
198#endif
199
200#if INT_OP_GROUP == INT_OP_DC
201#undef TARGET_ASM_BYTE_OP
202#define TARGET_ASM_BYTE_OP "\tdc.b\t"
203#undef TARGET_ASM_ALIGNED_HI_OP
204#define TARGET_ASM_ALIGNED_HI_OP "\tdc.w\t"
205#undef TARGET_ASM_ALIGNED_SI_OP
206#define TARGET_ASM_ALIGNED_SI_OP "\tdc.l\t"
207#endif
208
209#undef TARGET_ASM_UNALIGNED_HI_OP
210#define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
211#undef TARGET_ASM_UNALIGNED_SI_OP
212#define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
213
c590b625
RH
214#undef TARGET_ASM_OUTPUT_MI_THUNK
215#define TARGET_ASM_OUTPUT_MI_THUNK m68k_output_mi_thunk
bdabc150 216#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
3101faab 217#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
c590b625 218
1bc7c5b6
ZW
219#undef TARGET_ASM_FILE_START_APP_OFF
220#define TARGET_ASM_FILE_START_APP_OFF true
221
506d7b68
PB
222#undef TARGET_LEGITIMIZE_ADDRESS
223#define TARGET_LEGITIMIZE_ADDRESS m68k_legitimize_address
224
b8c96320
MK
225#undef TARGET_SCHED_ADJUST_COST
226#define TARGET_SCHED_ADJUST_COST m68k_sched_adjust_cost
227
96fcacb7
MK
228#undef TARGET_SCHED_ISSUE_RATE
229#define TARGET_SCHED_ISSUE_RATE m68k_sched_issue_rate
230
b8c96320
MK
231#undef TARGET_SCHED_VARIABLE_ISSUE
232#define TARGET_SCHED_VARIABLE_ISSUE m68k_sched_variable_issue
233
234#undef TARGET_SCHED_INIT_GLOBAL
235#define TARGET_SCHED_INIT_GLOBAL m68k_sched_md_init_global
236
237#undef TARGET_SCHED_FINISH_GLOBAL
238#define TARGET_SCHED_FINISH_GLOBAL m68k_sched_md_finish_global
239
240#undef TARGET_SCHED_INIT
241#define TARGET_SCHED_INIT m68k_sched_md_init
242
243#undef TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE
244#define TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE m68k_sched_dfa_pre_advance_cycle
245
246#undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
247#define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE m68k_sched_dfa_post_advance_cycle
248
96fcacb7
MK
249#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
250#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
251 m68k_sched_first_cycle_multipass_dfa_lookahead
252
c5387660
JM
253#undef TARGET_OPTION_OVERRIDE
254#define TARGET_OPTION_OVERRIDE m68k_option_override
255
03e69b12
MP
256#undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
257#define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m68k_override_options_after_change
258
3c50106f
RH
259#undef TARGET_RTX_COSTS
260#define TARGET_RTX_COSTS m68k_rtx_costs
261
48ed72a4
PB
262#undef TARGET_ATTRIBUTE_TABLE
263#define TARGET_ATTRIBUTE_TABLE m68k_attribute_table
264
8636be86 265#undef TARGET_PROMOTE_PROTOTYPES
586de218 266#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
8636be86
KH
267
268#undef TARGET_STRUCT_VALUE_RTX
269#define TARGET_STRUCT_VALUE_RTX m68k_struct_value_rtx
270
7ffb5e78 271#undef TARGET_CANNOT_FORCE_CONST_MEM
fbbf66e7 272#define TARGET_CANNOT_FORCE_CONST_MEM m68k_cannot_force_const_mem
7ffb5e78 273
f7e70894
RS
274#undef TARGET_FUNCTION_OK_FOR_SIBCALL
275#define TARGET_FUNCTION_OK_FOR_SIBCALL m68k_ok_for_sibcall_p
276
1c445f03
NS
277#if M68K_HONOR_TARGET_STRICT_ALIGNMENT
278#undef TARGET_RETURN_IN_MEMORY
279#define TARGET_RETURN_IN_MEMORY m68k_return_in_memory
280#endif
281
75df395f
MK
282#ifdef HAVE_AS_TLS
283#undef TARGET_HAVE_TLS
284#define TARGET_HAVE_TLS (true)
285
286#undef TARGET_ASM_OUTPUT_DWARF_DTPREL
287#define TARGET_ASM_OUTPUT_DWARF_DTPREL m68k_output_dwarf_dtprel
288#endif
289
c6c3dba9
PB
290#undef TARGET_LEGITIMATE_ADDRESS_P
291#define TARGET_LEGITIMATE_ADDRESS_P m68k_legitimate_address_p
292
7b5cbb57
AS
293#undef TARGET_CAN_ELIMINATE
294#define TARGET_CAN_ELIMINATE m68k_can_eliminate
295
5efd84c5
NF
296#undef TARGET_CONDITIONAL_REGISTER_USAGE
297#define TARGET_CONDITIONAL_REGISTER_USAGE m68k_conditional_register_usage
298
e0601576
RH
299#undef TARGET_TRAMPOLINE_INIT
300#define TARGET_TRAMPOLINE_INIT m68k_trampoline_init
301
079e7538
NF
302#undef TARGET_RETURN_POPS_ARGS
303#define TARGET_RETURN_POPS_ARGS m68k_return_pops_args
304
7b0f476d
AS
305#undef TARGET_DELEGITIMIZE_ADDRESS
306#define TARGET_DELEGITIMIZE_ADDRESS m68k_delegitimize_address
307
13d3961c
NF
308#undef TARGET_FUNCTION_ARG
309#define TARGET_FUNCTION_ARG m68k_function_arg
310
311#undef TARGET_FUNCTION_ARG_ADVANCE
312#define TARGET_FUNCTION_ARG_ADVANCE m68k_function_arg_advance
313
1a627b35
RS
314#undef TARGET_LEGITIMATE_CONSTANT_P
315#define TARGET_LEGITIMATE_CONSTANT_P m68k_legitimate_constant_p
316
cb69db4f
AS
317#undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
318#define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA m68k_output_addr_const_extra
319
4c1fd084
RH
320/* The value stored by TAS. */
321#undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
322#define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 128
323
48ed72a4
PB
324static const struct attribute_spec m68k_attribute_table[] =
325{
62d784f7
KT
326 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
327 affects_type_identity } */
328 { "interrupt", 0, 0, true, false, false, m68k_handle_fndecl_attribute,
329 false },
330 { "interrupt_handler", 0, 0, true, false, false,
331 m68k_handle_fndecl_attribute, false },
332 { "interrupt_thread", 0, 0, true, false, false,
333 m68k_handle_fndecl_attribute, false },
334 { NULL, 0, 0, false, false, false, NULL, false }
48ed72a4
PB
335};
336
f6897b10 337struct gcc_target targetm = TARGET_INITIALIZER;
672a6f42 338\f
900ec02d
JB
339/* Base flags for 68k ISAs. */
340#define FL_FOR_isa_00 FL_ISA_68000
341#define FL_FOR_isa_10 (FL_FOR_isa_00 | FL_ISA_68010)
342/* FL_68881 controls the default setting of -m68881. gcc has traditionally
343 generated 68881 code for 68020 and 68030 targets unless explicitly told
344 not to. */
345#define FL_FOR_isa_20 (FL_FOR_isa_10 | FL_ISA_68020 \
6a2b269b 346 | FL_BITFIELD | FL_68881 | FL_CAS)
900ec02d
JB
347#define FL_FOR_isa_40 (FL_FOR_isa_20 | FL_ISA_68040)
348#define FL_FOR_isa_cpu32 (FL_FOR_isa_10 | FL_ISA_68020)
349
350/* Base flags for ColdFire ISAs. */
351#define FL_FOR_isa_a (FL_COLDFIRE | FL_ISA_A)
352#define FL_FOR_isa_aplus (FL_FOR_isa_a | FL_ISA_APLUS | FL_CF_USP)
353/* Note ISA_B doesn't necessarily include USP (user stack pointer) support. */
354#define FL_FOR_isa_b (FL_FOR_isa_a | FL_ISA_B | FL_CF_HWDIV)
4e2b26aa 355/* ISA_C is not upwardly compatible with ISA_B. */
8c5c99dc 356#define FL_FOR_isa_c (FL_FOR_isa_a | FL_ISA_C | FL_CF_USP)
900ec02d
JB
357
358enum m68k_isa
359{
360 /* Traditional 68000 instruction sets. */
361 isa_00,
362 isa_10,
363 isa_20,
364 isa_40,
365 isa_cpu32,
366 /* ColdFire instruction set variants. */
367 isa_a,
368 isa_aplus,
369 isa_b,
370 isa_c,
371 isa_max
372};
373
374/* Information about one of the -march, -mcpu or -mtune arguments. */
375struct m68k_target_selection
376{
377 /* The argument being described. */
378 const char *name;
379
380 /* For -mcpu, this is the device selected by the option.
381 For -mtune and -march, it is a representative device
382 for the microarchitecture or ISA respectively. */
383 enum target_device device;
384
385 /* The M68K_DEVICE fields associated with DEVICE. See the comment
386 in m68k-devices.def for details. FAMILY is only valid for -mcpu. */
387 const char *family;
388 enum uarch_type microarch;
389 enum m68k_isa isa;
390 unsigned long flags;
391};
392
393/* A list of all devices in m68k-devices.def. Used for -mcpu selection. */
394static const struct m68k_target_selection all_devices[] =
395{
396#define M68K_DEVICE(NAME,ENUM_VALUE,FAMILY,MULTILIB,MICROARCH,ISA,FLAGS) \
397 { NAME, ENUM_VALUE, FAMILY, u##MICROARCH, ISA, FLAGS | FL_FOR_##ISA },
398#include "m68k-devices.def"
399#undef M68K_DEVICE
400 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
401};
402
403/* A list of all ISAs, mapping each one to a representative device.
404 Used for -march selection. */
405static const struct m68k_target_selection all_isas[] =
406{
47c94d21
JM
407#define M68K_ISA(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
408 { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
409#include "m68k-isas.def"
410#undef M68K_ISA
900ec02d
JB
411 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
412};
413
414/* A list of all microarchitectures, mapping each one to a representative
415 device. Used for -mtune selection. */
416static const struct m68k_target_selection all_microarchs[] =
417{
47c94d21
JM
418#define M68K_MICROARCH(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
419 { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
420#include "m68k-microarchs.def"
421#undef M68K_MICROARCH
900ec02d
JB
422 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
423};
424\f
425/* The entries associated with the -mcpu, -march and -mtune settings,
426 or null for options that have not been used. */
427const struct m68k_target_selection *m68k_cpu_entry;
428const struct m68k_target_selection *m68k_arch_entry;
429const struct m68k_target_selection *m68k_tune_entry;
430
431/* Which CPU we are generating code for. */
432enum target_device m68k_cpu;
433
434/* Which microarchitecture to tune for. */
435enum uarch_type m68k_tune;
436
437/* Which FPU to use. */
438enum fpu_type m68k_fpu;
4af06170 439
900ec02d
JB
440/* The set of FL_* flags that apply to the target processor. */
441unsigned int m68k_cpu_flags;
29ca003a 442
03b3e271
KH
443/* The set of FL_* flags that apply to the processor to be tuned for. */
444unsigned int m68k_tune_flags;
445
29ca003a
RS
446/* Asm templates for calling or jumping to an arbitrary symbolic address,
447 or NULL if such calls or jumps are not supported. The address is held
448 in operand 0. */
449const char *m68k_symbolic_call;
450const char *m68k_symbolic_jump;
c47b0cb4
MK
451
452/* Enum variable that corresponds to m68k_symbolic_call values. */
453enum M68K_SYMBOLIC_CALL m68k_symbolic_call_var;
454
900ec02d 455\f
c5387660 456/* Implement TARGET_OPTION_OVERRIDE. */
ef1dbfb0 457
c5387660
JM
458static void
459m68k_option_override (void)
ef1dbfb0 460{
900ec02d
JB
461 const struct m68k_target_selection *entry;
462 unsigned long target_mask;
463
47c94d21
JM
464 if (global_options_set.x_m68k_arch_option)
465 m68k_arch_entry = &all_isas[m68k_arch_option];
466
467 if (global_options_set.x_m68k_cpu_option)
468 m68k_cpu_entry = &all_devices[(int) m68k_cpu_option];
469
470 if (global_options_set.x_m68k_tune_option)
471 m68k_tune_entry = &all_microarchs[(int) m68k_tune_option];
472
900ec02d
JB
473 /* User can choose:
474
475 -mcpu=
476 -march=
477 -mtune=
478
479 -march=ARCH should generate code that runs any processor
480 implementing architecture ARCH. -mcpu=CPU should override -march
481 and should generate code that runs on processor CPU, making free
482 use of any instructions that CPU understands. -mtune=UARCH applies
9f5ed61a 483 on top of -mcpu or -march and optimizes the code for UARCH. It does
900ec02d
JB
484 not change the target architecture. */
485 if (m68k_cpu_entry)
486 {
487 /* Complain if the -march setting is for a different microarchitecture,
488 or includes flags that the -mcpu setting doesn't. */
489 if (m68k_arch_entry
490 && (m68k_arch_entry->microarch != m68k_cpu_entry->microarch
491 || (m68k_arch_entry->flags & ~m68k_cpu_entry->flags) != 0))
492 warning (0, "-mcpu=%s conflicts with -march=%s",
493 m68k_cpu_entry->name, m68k_arch_entry->name);
494
495 entry = m68k_cpu_entry;
496 }
497 else
498 entry = m68k_arch_entry;
499
500 if (!entry)
501 entry = all_devices + TARGET_CPU_DEFAULT;
502
503 m68k_cpu_flags = entry->flags;
504
505 /* Use the architecture setting to derive default values for
506 certain flags. */
507 target_mask = 0;
8785d88c
KH
508
509 /* ColdFire is lenient about alignment. */
510 if (!TARGET_COLDFIRE)
511 target_mask |= MASK_STRICT_ALIGNMENT;
512
900ec02d
JB
513 if ((m68k_cpu_flags & FL_BITFIELD) != 0)
514 target_mask |= MASK_BITFIELD;
515 if ((m68k_cpu_flags & FL_CF_HWDIV) != 0)
516 target_mask |= MASK_CF_HWDIV;
517 if ((m68k_cpu_flags & (FL_68881 | FL_CF_FPU)) != 0)
518 target_mask |= MASK_HARD_FLOAT;
519 target_flags |= target_mask & ~target_flags_explicit;
520
521 /* Set the directly-usable versions of the -mcpu and -mtune settings. */
522 m68k_cpu = entry->device;
523 if (m68k_tune_entry)
03b3e271
KH
524 {
525 m68k_tune = m68k_tune_entry->microarch;
526 m68k_tune_flags = m68k_tune_entry->flags;
527 }
900ec02d
JB
528#ifdef M68K_DEFAULT_TUNE
529 else if (!m68k_cpu_entry && !m68k_arch_entry)
03b3e271
KH
530 {
531 enum target_device dev;
532 dev = all_microarchs[M68K_DEFAULT_TUNE].device;
3b891d26 533 m68k_tune_flags = all_devices[dev].flags;
03b3e271 534 }
900ec02d
JB
535#endif
536 else
03b3e271
KH
537 {
538 m68k_tune = entry->microarch;
539 m68k_tune_flags = entry->flags;
540 }
900ec02d
JB
541
542 /* Set the type of FPU. */
543 m68k_fpu = (!TARGET_HARD_FLOAT ? FPUTYPE_NONE
544 : (m68k_cpu_flags & FL_COLDFIRE) != 0 ? FPUTYPE_COLDFIRE
545 : FPUTYPE_68881);
546
a2ef3db7
BI
547 /* Sanity check to ensure that msep-data and mid-sahred-library are not
548 * both specified together. Doing so simply doesn't make sense.
549 */
550 if (TARGET_SEP_DATA && TARGET_ID_SHARED_LIBRARY)
551 error ("cannot specify both -msep-data and -mid-shared-library");
552
553 /* If we're generating code for a separate A5 relative data segment,
554 * we've got to enable -fPIC as well. This might be relaxable to
555 * -fpic but it hasn't been tested properly.
556 */
557 if (TARGET_SEP_DATA || TARGET_ID_SHARED_LIBRARY)
558 flag_pic = 2;
559
abe92a04
RS
560 /* -mpcrel -fPIC uses 32-bit pc-relative displacements. Raise an
561 error if the target does not support them. */
562 if (TARGET_PCREL && !TARGET_68020 && flag_pic == 2)
563 error ("-mpcrel -fPIC is not currently supported on selected cpu");
adf2ac37
RH
564
565 /* ??? A historic way of turning on pic, or is this intended to
566 be an embedded thing that doesn't have the same name binding
567 significance that it does on hosted ELF systems? */
568 if (TARGET_PCREL && flag_pic == 0)
569 flag_pic = 1;
570
29ca003a
RS
571 if (!flag_pic)
572 {
c47b0cb4
MK
573 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_JSR;
574
29ca003a 575 m68k_symbolic_jump = "jra %a0";
29ca003a
RS
576 }
577 else if (TARGET_ID_SHARED_LIBRARY)
578 /* All addresses must be loaded from the GOT. */
579 ;
4e2b26aa 580 else if (TARGET_68020 || TARGET_ISAB || TARGET_ISAC)
29ca003a
RS
581 {
582 if (TARGET_PCREL)
c47b0cb4 583 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_C;
4e2b26aa 584 else
c47b0cb4
MK
585 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_P;
586
4e2b26aa
NS
587 if (TARGET_ISAC)
588 /* No unconditional long branch */;
589 else if (TARGET_PCREL)
da398bb5 590 m68k_symbolic_jump = "bra%.l %c0";
29ca003a 591 else
da398bb5 592 m68k_symbolic_jump = "bra%.l %p0";
29ca003a
RS
593 /* Turn off function cse if we are doing PIC. We always want
594 function call to be done as `bsr foo@PLTPC'. */
595 /* ??? It's traditional to do this for -mpcrel too, but it isn't
596 clear how intentional that is. */
597 flag_no_function_cse = 1;
598 }
adf2ac37 599
c47b0cb4
MK
600 switch (m68k_symbolic_call_var)
601 {
602 case M68K_SYMBOLIC_CALL_JSR:
c47b0cb4 603 m68k_symbolic_call = "jsr %a0";
c47b0cb4
MK
604 break;
605
606 case M68K_SYMBOLIC_CALL_BSR_C:
da398bb5 607 m68k_symbolic_call = "bsr%.l %c0";
c47b0cb4
MK
608 break;
609
610 case M68K_SYMBOLIC_CALL_BSR_P:
da398bb5 611 m68k_symbolic_call = "bsr%.l %p0";
c47b0cb4
MK
612 break;
613
614 case M68K_SYMBOLIC_CALL_NONE:
615 gcc_assert (m68k_symbolic_call == NULL);
616 break;
617
618 default:
619 gcc_unreachable ();
620 }
621
aaca7021
RZ
622#ifndef ASM_OUTPUT_ALIGN_WITH_NOP
623 if (align_labels > 2)
624 {
625 warning (0, "-falign-labels=%d is not supported", align_labels);
626 align_labels = 0;
627 }
628 if (align_loops > 2)
629 {
630 warning (0, "-falign-loops=%d is not supported", align_loops);
631 align_loops = 0;
632 }
633#endif
634
8e22f79f
AS
635 if (stack_limit_rtx != NULL_RTX && !TARGET_68020)
636 {
637 warning (0, "-fstack-limit- options are not supported on this cpu");
638 stack_limit_rtx = NULL_RTX;
639 }
640
adf2ac37 641 SUBTARGET_OVERRIDE_OPTIONS;
c47b0cb4
MK
642
643 /* Setup scheduling options. */
826fadba
MK
644 if (TUNE_CFV1)
645 m68k_sched_cpu = CPU_CFV1;
646 else if (TUNE_CFV2)
647 m68k_sched_cpu = CPU_CFV2;
648 else if (TUNE_CFV3)
649 m68k_sched_cpu = CPU_CFV3;
96fcacb7
MK
650 else if (TUNE_CFV4)
651 m68k_sched_cpu = CPU_CFV4;
c47b0cb4
MK
652 else
653 {
654 m68k_sched_cpu = CPU_UNKNOWN;
655 flag_schedule_insns = 0;
656 flag_schedule_insns_after_reload = 0;
657 flag_modulo_sched = 0;
1ee6eb01 658 flag_live_range_shrinkage = 0;
c47b0cb4 659 }
826fadba
MK
660
661 if (m68k_sched_cpu != CPU_UNKNOWN)
662 {
663 if ((m68k_cpu_flags & (FL_CF_EMAC | FL_CF_EMAC_B)) != 0)
664 m68k_sched_mac = MAC_CF_EMAC;
665 else if ((m68k_cpu_flags & FL_CF_MAC) != 0)
666 m68k_sched_mac = MAC_CF_MAC;
667 else
668 m68k_sched_mac = MAC_NO;
669 }
ef1dbfb0 670}
7eb4f044 671
03e69b12
MP
672/* Implement TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE. */
673
674static void
675m68k_override_options_after_change (void)
676{
677 if (m68k_sched_cpu == CPU_UNKNOWN)
678 {
679 flag_schedule_insns = 0;
680 flag_schedule_insns_after_reload = 0;
681 flag_modulo_sched = 0;
1ee6eb01 682 flag_live_range_shrinkage = 0;
03e69b12
MP
683 }
684}
685
7eb4f044
NS
686/* Generate a macro of the form __mPREFIX_cpu_NAME, where PREFIX is the
687 given argument and NAME is the argument passed to -mcpu. Return NULL
688 if -mcpu was not passed. */
689
690const char *
691m68k_cpp_cpu_ident (const char *prefix)
692{
693 if (!m68k_cpu_entry)
694 return NULL;
695 return concat ("__m", prefix, "_cpu_", m68k_cpu_entry->name, NULL);
696}
697
698/* Generate a macro of the form __mPREFIX_family_NAME, where PREFIX is the
699 given argument and NAME is the name of the representative device for
700 the -mcpu argument's family. Return NULL if -mcpu was not passed. */
701
702const char *
703m68k_cpp_cpu_family (const char *prefix)
704{
705 if (!m68k_cpu_entry)
706 return NULL;
707 return concat ("__m", prefix, "_family_", m68k_cpu_entry->family, NULL);
708}
79e68feb 709\f
2bccb817
KH
710/* Return m68k_fk_interrupt_handler if FUNC has an "interrupt" or
711 "interrupt_handler" attribute and interrupt_thread if FUNC has an
712 "interrupt_thread" attribute. Otherwise, return
713 m68k_fk_normal_function. */
a4242737
KH
714
715enum m68k_function_kind
716m68k_get_function_kind (tree func)
48ed72a4
PB
717{
718 tree a;
719
fa157b28
NS
720 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
721
2bccb817
KH
722 a = lookup_attribute ("interrupt", DECL_ATTRIBUTES (func));
723 if (a != NULL_TREE)
724 return m68k_fk_interrupt_handler;
725
48ed72a4 726 a = lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (func));
a4242737
KH
727 if (a != NULL_TREE)
728 return m68k_fk_interrupt_handler;
729
730 a = lookup_attribute ("interrupt_thread", DECL_ATTRIBUTES (func));
731 if (a != NULL_TREE)
732 return m68k_fk_interrupt_thread;
733
734 return m68k_fk_normal_function;
48ed72a4
PB
735}
736
737/* Handle an attribute requiring a FUNCTION_DECL; arguments as in
738 struct attribute_spec.handler. */
739static tree
740m68k_handle_fndecl_attribute (tree *node, tree name,
741 tree args ATTRIBUTE_UNUSED,
742 int flags ATTRIBUTE_UNUSED,
743 bool *no_add_attrs)
744{
745 if (TREE_CODE (*node) != FUNCTION_DECL)
746 {
29d08eba
JM
747 warning (OPT_Wattributes, "%qE attribute only applies to functions",
748 name);
48ed72a4
PB
749 *no_add_attrs = true;
750 }
751
a4242737
KH
752 if (m68k_get_function_kind (*node) != m68k_fk_normal_function)
753 {
754 error ("multiple interrupt attributes not allowed");
755 *no_add_attrs = true;
756 }
757
758 if (!TARGET_FIDOA
759 && !strcmp (IDENTIFIER_POINTER (name), "interrupt_thread"))
760 {
761 error ("interrupt_thread is available only on fido");
762 *no_add_attrs = true;
763 }
764
48ed72a4
PB
765 return NULL_TREE;
766}
860c4900
BI
767
768static void
3d74bc09 769m68k_compute_frame_layout (void)
860c4900
BI
770{
771 int regno, saved;
a40ed0f3 772 unsigned int mask;
a4242737
KH
773 enum m68k_function_kind func_kind =
774 m68k_get_function_kind (current_function_decl);
775 bool interrupt_handler = func_kind == m68k_fk_interrupt_handler;
776 bool interrupt_thread = func_kind == m68k_fk_interrupt_thread;
860c4900 777
3d74bc09
BI
778 /* Only compute the frame once per function.
779 Don't cache information until reload has been completed. */
780 if (current_frame.funcdef_no == current_function_funcdef_no
781 && reload_completed)
782 return;
783
784 current_frame.size = (get_frame_size () + 3) & -4;
860c4900 785
a40ed0f3 786 mask = saved = 0;
a4242737
KH
787
788 /* Interrupt thread does not need to save any register. */
789 if (!interrupt_thread)
790 for (regno = 0; regno < 16; regno++)
791 if (m68k_save_reg (regno, interrupt_handler))
792 {
793 mask |= 1 << (regno - D0_REG);
794 saved++;
795 }
3d74bc09
BI
796 current_frame.offset = saved * 4;
797 current_frame.reg_no = saved;
798 current_frame.reg_mask = mask;
860c4900 799
57047680 800 current_frame.foffset = 0;
a40ed0f3 801 mask = saved = 0;
dcc21c4c 802 if (TARGET_HARD_FLOAT)
860c4900 803 {
a4242737
KH
804 /* Interrupt thread does not need to save any register. */
805 if (!interrupt_thread)
806 for (regno = 16; regno < 24; regno++)
807 if (m68k_save_reg (regno, interrupt_handler))
808 {
809 mask |= 1 << (regno - FP0_REG);
810 saved++;
811 }
dcc21c4c 812 current_frame.foffset = saved * TARGET_FP_REG_SIZE;
3d74bc09 813 current_frame.offset += current_frame.foffset;
860c4900 814 }
57047680
GN
815 current_frame.fpu_no = saved;
816 current_frame.fpu_mask = mask;
3d74bc09
BI
817
818 /* Remember what function this frame refers to. */
819 current_frame.funcdef_no = current_function_funcdef_no;
860c4900
BI
820}
821
7b5cbb57
AS
822/* Worker function for TARGET_CAN_ELIMINATE. */
823
824bool
825m68k_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
826{
827 return (to == STACK_POINTER_REGNUM ? ! frame_pointer_needed : true);
828}
829
860c4900
BI
830HOST_WIDE_INT
831m68k_initial_elimination_offset (int from, int to)
832{
42b67c06
PB
833 int argptr_offset;
834 /* The arg pointer points 8 bytes before the start of the arguments,
835 as defined by FIRST_PARM_OFFSET. This makes it coincident with the
836 frame pointer in most frames. */
837 argptr_offset = frame_pointer_needed ? 0 : UNITS_PER_WORD;
860c4900 838 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
42b67c06 839 return argptr_offset;
860c4900 840
3d74bc09 841 m68k_compute_frame_layout ();
860c4900 842
4761e388
NS
843 gcc_assert (to == STACK_POINTER_REGNUM);
844 switch (from)
845 {
a0a7fbc9 846 case ARG_POINTER_REGNUM:
42b67c06 847 return current_frame.offset + current_frame.size - argptr_offset;
4761e388
NS
848 case FRAME_POINTER_REGNUM:
849 return current_frame.offset + current_frame.size;
850 default:
851 gcc_unreachable ();
852 }
860c4900
BI
853}
854
97c55091
GN
855/* Refer to the array `regs_ever_live' to determine which registers
856 to save; `regs_ever_live[I]' is nonzero if register number I
857 is ever used in the function. This function is responsible for
858 knowing which registers should not be saved even if used.
859 Return true if we need to save REGNO. */
860
48ed72a4
PB
861static bool
862m68k_save_reg (unsigned int regno, bool interrupt_handler)
2cff4a6e 863{
4ab870f5 864 if (flag_pic && regno == PIC_REG)
b86ba8a3 865 {
e3b5732b 866 if (crtl->saves_all_registers)
afcb440c 867 return true;
e3b5732b 868 if (crtl->uses_pic_offset_table)
b86ba8a3 869 return true;
6357eb0d
RS
870 /* Reload may introduce constant pool references into a function
871 that thitherto didn't need a PIC register. Note that the test
872 above will not catch that case because we will only set
e3b5732b 873 crtl->uses_pic_offset_table when emitting
6357eb0d 874 the address reloads. */
e3b5732b 875 if (crtl->uses_const_pool)
6357eb0d 876 return true;
b86ba8a3 877 }
2cff4a6e 878
e3b5732b 879 if (crtl->calls_eh_return)
2cff4a6e
AS
880 {
881 unsigned int i;
882 for (i = 0; ; i++)
883 {
884 unsigned int test = EH_RETURN_DATA_REGNO (i);
885 if (test == INVALID_REGNUM)
886 break;
887 if (test == regno)
48ed72a4 888 return true;
2cff4a6e
AS
889 }
890 }
891
48ed72a4
PB
892 /* Fixed regs we never touch. */
893 if (fixed_regs[regno])
894 return false;
895
896 /* The frame pointer (if it is such) is handled specially. */
897 if (regno == FRAME_POINTER_REGNUM && frame_pointer_needed)
898 return false;
899
900 /* Interrupt handlers must also save call_used_regs
901 if they are live or when calling nested functions. */
902 if (interrupt_handler)
a0a7fbc9 903 {
6fb5fa3c 904 if (df_regs_ever_live_p (regno))
a0a7fbc9 905 return true;
48ed72a4 906
416ff32e 907 if (!crtl->is_leaf && call_used_regs[regno])
a0a7fbc9
AS
908 return true;
909 }
48ed72a4
PB
910
911 /* Never need to save registers that aren't touched. */
6fb5fa3c 912 if (!df_regs_ever_live_p (regno))
48ed72a4
PB
913 return false;
914
b2e08ed4 915 /* Otherwise save everything that isn't call-clobbered. */
48ed72a4 916 return !call_used_regs[regno];
2cff4a6e
AS
917}
918
a40ed0f3
KH
919/* Emit RTL for a MOVEM or FMOVEM instruction. BASE + OFFSET represents
920 the lowest memory address. COUNT is the number of registers to be
921 moved, with register REGNO + I being moved if bit I of MASK is set.
922 STORE_P specifies the direction of the move and ADJUST_STACK_P says
923 whether or not this is pre-decrement (if STORE_P) or post-increment
924 (if !STORE_P) operation. */
925
c85e862a 926static rtx_insn *
a40ed0f3
KH
927m68k_emit_movem (rtx base, HOST_WIDE_INT offset,
928 unsigned int count, unsigned int regno,
929 unsigned int mask, bool store_p, bool adjust_stack_p)
930{
931 int i;
932 rtx body, addr, src, operands[2];
ef4bddc2 933 machine_mode mode;
a40ed0f3
KH
934
935 body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (adjust_stack_p + count));
936 mode = reg_raw_mode[regno];
937 i = 0;
938
939 if (adjust_stack_p)
940 {
0a81f074
RS
941 src = plus_constant (Pmode, base,
942 (count
943 * GET_MODE_SIZE (mode)
944 * (HOST_WIDE_INT) (store_p ? -1 : 1)));
f7df4a84 945 XVECEXP (body, 0, i++) = gen_rtx_SET (base, src);
a40ed0f3
KH
946 }
947
948 for (; mask != 0; mask >>= 1, regno++)
949 if (mask & 1)
950 {
0a81f074 951 addr = plus_constant (Pmode, base, offset);
a40ed0f3
KH
952 operands[!store_p] = gen_frame_mem (mode, addr);
953 operands[store_p] = gen_rtx_REG (mode, regno);
954 XVECEXP (body, 0, i++)
f7df4a84 955 = gen_rtx_SET (operands[0], operands[1]);
a40ed0f3
KH
956 offset += GET_MODE_SIZE (mode);
957 }
958 gcc_assert (i == XVECLEN (body, 0));
959
960 return emit_insn (body);
961}
962
963/* Make INSN a frame-related instruction. */
79e68feb 964
08c148a8 965static void
c85e862a 966m68k_set_frame_related (rtx_insn *insn)
a40ed0f3
KH
967{
968 rtx body;
969 int i;
970
971 RTX_FRAME_RELATED_P (insn) = 1;
972 body = PATTERN (insn);
973 if (GET_CODE (body) == PARALLEL)
974 for (i = 0; i < XVECLEN (body, 0); i++)
975 RTX_FRAME_RELATED_P (XVECEXP (body, 0, i)) = 1;
976}
977
978/* Emit RTL for the "prologue" define_expand. */
979
980void
981m68k_expand_prologue (void)
79e68feb 982{
860c4900 983 HOST_WIDE_INT fsize_with_regs;
2dc8bd76 984 rtx limit, src, dest;
3d74bc09 985
a40ed0f3 986 m68k_compute_frame_layout ();
3d74bc09 987
a11e0df4 988 if (flag_stack_usage_info)
f69ea688
AS
989 current_function_static_stack_size
990 = current_frame.size + current_frame.offset;
991
a157febd
GK
992 /* If the stack limit is a symbol, we can check it here,
993 before actually allocating the space. */
e3b5732b 994 if (crtl->limit_stack
a157febd 995 && GET_CODE (stack_limit_rtx) == SYMBOL_REF)
a40ed0f3 996 {
0a81f074 997 limit = plus_constant (Pmode, stack_limit_rtx, current_frame.size + 4);
1a627b35 998 if (!m68k_legitimate_constant_p (Pmode, limit))
a40ed0f3
KH
999 {
1000 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), limit);
1001 limit = gen_rtx_REG (Pmode, D0_REG);
1002 }
f90b7a5a
PB
1003 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode,
1004 stack_pointer_rtx, limit),
1005 stack_pointer_rtx, limit,
1006 const1_rtx));
a40ed0f3 1007 }
79e68feb 1008
a89e3f21 1009 fsize_with_regs = current_frame.size;
dcc21c4c
PB
1010 if (TARGET_COLDFIRE)
1011 {
a40ed0f3
KH
1012 /* ColdFire's move multiple instructions do not allow pre-decrement
1013 addressing. Add the size of movem saves to the initial stack
1014 allocation instead. */
1015 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1016 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1017 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1018 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
dcc21c4c 1019 }
860c4900 1020
79e68feb
RS
1021 if (frame_pointer_needed)
1022 {
a40ed0f3 1023 if (fsize_with_regs == 0 && TUNE_68040)
79e68feb 1024 {
a40ed0f3
KH
1025 /* On the 68040, two separate moves are faster than link.w 0. */
1026 dest = gen_frame_mem (Pmode,
1027 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1028 m68k_set_frame_related (emit_move_insn (dest, frame_pointer_rtx));
1029 m68k_set_frame_related (emit_move_insn (frame_pointer_rtx,
1030 stack_pointer_rtx));
79e68feb 1031 }
a40ed0f3
KH
1032 else if (fsize_with_regs < 0x8000 || TARGET_68020)
1033 m68k_set_frame_related
1034 (emit_insn (gen_link (frame_pointer_rtx,
1035 GEN_INT (-4 - fsize_with_regs))));
d9e88af0 1036 else
a40ed0f3
KH
1037 {
1038 m68k_set_frame_related
1039 (emit_insn (gen_link (frame_pointer_rtx, GEN_INT (-4))));
1040 m68k_set_frame_related
1041 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1042 stack_pointer_rtx,
1043 GEN_INT (-fsize_with_regs))));
1044 }
96fcacb7
MK
1045
1046 /* If the frame pointer is needed, emit a special barrier that
1047 will prevent the scheduler from moving stores to the frame
1048 before the stack adjustment. */
1049 emit_insn (gen_stack_tie (stack_pointer_rtx, frame_pointer_rtx));
d9e88af0 1050 }
a40ed0f3
KH
1051 else if (fsize_with_regs != 0)
1052 m68k_set_frame_related
1053 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1054 stack_pointer_rtx,
1055 GEN_INT (-fsize_with_regs))));
860c4900 1056
57047680 1057 if (current_frame.fpu_mask)
79e68feb 1058 {
a40ed0f3 1059 gcc_assert (current_frame.fpu_no >= MIN_FMOVEM_REGS);
dcc21c4c 1060 if (TARGET_68881)
a40ed0f3
KH
1061 m68k_set_frame_related
1062 (m68k_emit_movem (stack_pointer_rtx,
1063 current_frame.fpu_no * -GET_MODE_SIZE (XFmode),
1064 current_frame.fpu_no, FP0_REG,
1065 current_frame.fpu_mask, true, true));
dcc21c4c
PB
1066 else
1067 {
1068 int offset;
1069
a40ed0f3
KH
1070 /* If we're using moveml to save the integer registers,
1071 the stack pointer will point to the bottom of the moveml
1072 save area. Find the stack offset of the first FP register. */
1073 if (current_frame.reg_no < MIN_MOVEM_REGS)
dcc21c4c
PB
1074 offset = 0;
1075 else
a40ed0f3
KH
1076 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1077 m68k_set_frame_related
1078 (m68k_emit_movem (stack_pointer_rtx, offset,
1079 current_frame.fpu_no, FP0_REG,
1080 current_frame.fpu_mask, true, false));
f277471f 1081 }
79e68feb 1082 }
99df2465 1083
01bbf777 1084 /* If the stack limit is not a symbol, check it here.
a157febd 1085 This has the disadvantage that it may be too late... */
e3b5732b 1086 if (crtl->limit_stack)
a157febd
GK
1087 {
1088 if (REG_P (stack_limit_rtx))
f90b7a5a
PB
1089 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode, stack_pointer_rtx,
1090 stack_limit_rtx),
1091 stack_pointer_rtx, stack_limit_rtx,
1092 const1_rtx));
1093
a157febd 1094 else if (GET_CODE (stack_limit_rtx) != SYMBOL_REF)
d4ee4d25 1095 warning (0, "stack limit expression is not supported");
a157febd 1096 }
01bbf777 1097
a40ed0f3 1098 if (current_frame.reg_no < MIN_MOVEM_REGS)
79e68feb 1099 {
a40ed0f3 1100 /* Store each register separately in the same order moveml does. */
79e68feb
RS
1101 int i;
1102
a40ed0f3
KH
1103 for (i = 16; i-- > 0; )
1104 if (current_frame.reg_mask & (1 << i))
078e983e 1105 {
a40ed0f3
KH
1106 src = gen_rtx_REG (SImode, D0_REG + i);
1107 dest = gen_frame_mem (SImode,
1108 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1109 m68k_set_frame_related (emit_insn (gen_movsi (dest, src)));
078e983e 1110 }
79e68feb 1111 }
a40ed0f3 1112 else
79e68feb 1113 {
9425fb04 1114 if (TARGET_COLDFIRE)
a40ed0f3
KH
1115 /* The required register save space has already been allocated.
1116 The first register should be stored at (%sp). */
1117 m68k_set_frame_related
1118 (m68k_emit_movem (stack_pointer_rtx, 0,
1119 current_frame.reg_no, D0_REG,
1120 current_frame.reg_mask, true, false));
afaff477 1121 else
a40ed0f3
KH
1122 m68k_set_frame_related
1123 (m68k_emit_movem (stack_pointer_rtx,
1124 current_frame.reg_no * -GET_MODE_SIZE (SImode),
1125 current_frame.reg_no, D0_REG,
1126 current_frame.reg_mask, true, true));
79e68feb 1127 }
a40ed0f3 1128
75df395f 1129 if (!TARGET_SEP_DATA
e3b5732b 1130 && crtl->uses_pic_offset_table)
2dc8bd76 1131 emit_insn (gen_load_got (pic_offset_table_rtx));
79e68feb
RS
1132}
1133\f
413ac1b2
RS
1134/* Return true if a simple (return) instruction is sufficient for this
1135 instruction (i.e. if no epilogue is needed). */
79e68feb 1136
3d74bc09 1137bool
a2bda628 1138m68k_use_return_insn (void)
79e68feb 1139{
79e68feb 1140 if (!reload_completed || frame_pointer_needed || get_frame_size () != 0)
3d74bc09 1141 return false;
125ed86f 1142
a0a7fbc9 1143 m68k_compute_frame_layout ();
413ac1b2 1144 return current_frame.offset == 0;
79e68feb
RS
1145}
1146
f7e70894
RS
1147/* Emit RTL for the "epilogue" or "sibcall_epilogue" define_expand;
1148 SIBCALL_P says which.
79e68feb
RS
1149
1150 The function epilogue should not depend on the current stack pointer!
1151 It should use the frame pointer only, if there is a frame pointer.
1152 This is mandatory because of alloca; we also take advantage of it to
1153 omit stack adjustments before returning. */
1154
a40ed0f3 1155void
f7e70894 1156m68k_expand_epilogue (bool sibcall_p)
08c148a8 1157{
3d74bc09 1158 HOST_WIDE_INT fsize, fsize_with_regs;
a40ed0f3 1159 bool big, restore_from_sp;
3d74bc09 1160
a0a7fbc9 1161 m68k_compute_frame_layout ();
3d74bc09 1162
3d74bc09 1163 fsize = current_frame.size;
a40ed0f3
KH
1164 big = false;
1165 restore_from_sp = false;
3d74bc09 1166
416ff32e 1167 /* FIXME : crtl->is_leaf below is too strong.
c67ddce5 1168 What we really need to know there is if there could be pending
7a1929e1 1169 stack adjustment needed at that point. */
a40ed0f3 1170 restore_from_sp = (!frame_pointer_needed
416ff32e 1171 || (!cfun->calls_alloca && crtl->is_leaf));
860c4900
BI
1172
1173 /* fsize_with_regs is the size we need to adjust the sp when
97c55091 1174 popping the frame. */
860c4900 1175 fsize_with_regs = fsize;
dcc21c4c
PB
1176 if (TARGET_COLDFIRE && restore_from_sp)
1177 {
a40ed0f3
KH
1178 /* ColdFire's move multiple instructions do not allow post-increment
1179 addressing. Add the size of movem loads to the final deallocation
1180 instead. */
1181 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1182 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1183 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1184 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
dcc21c4c 1185 }
860c4900 1186
3d74bc09 1187 if (current_frame.offset + fsize >= 0x8000
a40ed0f3 1188 && !restore_from_sp
3d74bc09 1189 && (current_frame.reg_mask || current_frame.fpu_mask))
79e68feb 1190 {
a40ed0f3
KH
1191 if (TARGET_COLDFIRE
1192 && (current_frame.reg_no >= MIN_MOVEM_REGS
1193 || current_frame.fpu_no >= MIN_FMOVEM_REGS))
1194 {
1195 /* ColdFire's move multiple instructions do not support the
1196 (d8,Ax,Xi) addressing mode, so we're as well using a normal
1197 stack-based restore. */
1198 emit_move_insn (gen_rtx_REG (Pmode, A1_REG),
1199 GEN_INT (-(current_frame.offset + fsize)));
1200 emit_insn (gen_addsi3 (stack_pointer_rtx,
1201 gen_rtx_REG (Pmode, A1_REG),
1202 frame_pointer_rtx));
1203 restore_from_sp = true;
1204 }
1205 else
1206 {
1207 emit_move_insn (gen_rtx_REG (Pmode, A1_REG), GEN_INT (-fsize));
1208 fsize = 0;
1209 big = true;
1210 }
79e68feb 1211 }
79e68feb 1212
a40ed0f3
KH
1213 if (current_frame.reg_no < MIN_MOVEM_REGS)
1214 {
1215 /* Restore each register separately in the same order moveml does. */
79e68feb 1216 int i;
a40ed0f3 1217 HOST_WIDE_INT offset;
79e68feb 1218
a40ed0f3 1219 offset = current_frame.offset + fsize;
3d74bc09
BI
1220 for (i = 0; i < 16; i++)
1221 if (current_frame.reg_mask & (1 << i))
79e68feb 1222 {
a40ed0f3
KH
1223 rtx addr;
1224
1225 if (big)
79e68feb 1226 {
a40ed0f3
KH
1227 /* Generate the address -OFFSET(%fp,%a1.l). */
1228 addr = gen_rtx_REG (Pmode, A1_REG);
1229 addr = gen_rtx_PLUS (Pmode, addr, frame_pointer_rtx);
0a81f074 1230 addr = plus_constant (Pmode, addr, -offset);
79e68feb 1231 }
a40ed0f3
KH
1232 else if (restore_from_sp)
1233 addr = gen_rtx_POST_INC (Pmode, stack_pointer_rtx);
1234 else
0a81f074 1235 addr = plus_constant (Pmode, frame_pointer_rtx, -offset);
a40ed0f3
KH
1236 emit_move_insn (gen_rtx_REG (SImode, D0_REG + i),
1237 gen_frame_mem (SImode, addr));
1238 offset -= GET_MODE_SIZE (SImode);
1239 }
79e68feb 1240 }
3d74bc09 1241 else if (current_frame.reg_mask)
79e68feb 1242 {
a40ed0f3
KH
1243 if (big)
1244 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1245 gen_rtx_REG (Pmode, A1_REG),
1246 frame_pointer_rtx),
1247 -(current_frame.offset + fsize),
1248 current_frame.reg_no, D0_REG,
1249 current_frame.reg_mask, false, false);
1250 else if (restore_from_sp)
1251 m68k_emit_movem (stack_pointer_rtx, 0,
1252 current_frame.reg_no, D0_REG,
1253 current_frame.reg_mask, false,
1254 !TARGET_COLDFIRE);
1255 else
1256 m68k_emit_movem (frame_pointer_rtx,
1257 -(current_frame.offset + fsize),
1258 current_frame.reg_no, D0_REG,
1259 current_frame.reg_mask, false, false);
79e68feb 1260 }
a40ed0f3
KH
1261
1262 if (current_frame.fpu_no > 0)
79e68feb
RS
1263 {
1264 if (big)
a40ed0f3
KH
1265 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1266 gen_rtx_REG (Pmode, A1_REG),
1267 frame_pointer_rtx),
1268 -(current_frame.foffset + fsize),
1269 current_frame.fpu_no, FP0_REG,
1270 current_frame.fpu_mask, false, false);
6910dd70 1271 else if (restore_from_sp)
79e68feb 1272 {
dcc21c4c
PB
1273 if (TARGET_COLDFIRE)
1274 {
1275 int offset;
1276
a40ed0f3
KH
1277 /* If we used moveml to restore the integer registers, the
1278 stack pointer will still point to the bottom of the moveml
1279 save area. Find the stack offset of the first FP
1280 register. */
1281 if (current_frame.reg_no < MIN_MOVEM_REGS)
dcc21c4c
PB
1282 offset = 0;
1283 else
a40ed0f3
KH
1284 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1285 m68k_emit_movem (stack_pointer_rtx, offset,
1286 current_frame.fpu_no, FP0_REG,
1287 current_frame.fpu_mask, false, false);
dcc21c4c 1288 }
884b74f0 1289 else
a40ed0f3
KH
1290 m68k_emit_movem (stack_pointer_rtx, 0,
1291 current_frame.fpu_no, FP0_REG,
1292 current_frame.fpu_mask, false, true);
79e68feb
RS
1293 }
1294 else
a40ed0f3
KH
1295 m68k_emit_movem (frame_pointer_rtx,
1296 -(current_frame.foffset + fsize),
1297 current_frame.fpu_no, FP0_REG,
1298 current_frame.fpu_mask, false, false);
79e68feb 1299 }
a40ed0f3 1300
79e68feb 1301 if (frame_pointer_needed)
a40ed0f3 1302 emit_insn (gen_unlink (frame_pointer_rtx));
860c4900 1303 else if (fsize_with_regs)
a40ed0f3
KH
1304 emit_insn (gen_addsi3 (stack_pointer_rtx,
1305 stack_pointer_rtx,
1306 GEN_INT (fsize_with_regs)));
1307
e3b5732b 1308 if (crtl->calls_eh_return)
a40ed0f3
KH
1309 emit_insn (gen_addsi3 (stack_pointer_rtx,
1310 stack_pointer_rtx,
1311 EH_RETURN_STACKADJ_RTX));
1312
f7e70894 1313 if (!sibcall_p)
3810076b 1314 emit_jump_insn (ret_rtx);
79e68feb
RS
1315}
1316\f
8a4a2253 1317/* Return true if X is a valid comparison operator for the dbcc
64a184e9
RS
1318 instruction.
1319
1320 Note it rejects floating point comparison operators.
1321 (In the future we could use Fdbcc).
1322
1323 It also rejects some comparisons when CC_NO_OVERFLOW is set. */
1324
1325int
ef4bddc2 1326valid_dbcc_comparison_p_2 (rtx x, machine_mode mode ATTRIBUTE_UNUSED)
64a184e9 1327{
64a184e9
RS
1328 switch (GET_CODE (x))
1329 {
64a184e9
RS
1330 case EQ: case NE: case GTU: case LTU:
1331 case GEU: case LEU:
1332 return 1;
1333
1334 /* Reject some when CC_NO_OVERFLOW is set. This may be over
1335 conservative */
1336 case GT: case LT: case GE: case LE:
1337 return ! (cc_prev_status.flags & CC_NO_OVERFLOW);
1338 default:
1339 return 0;
1340 }
1341}
1342
a0ab749a 1343/* Return nonzero if flags are currently in the 68881 flag register. */
6a0f85e3 1344int
8a4a2253 1345flags_in_68881 (void)
6a0f85e3
TG
1346{
1347 /* We could add support for these in the future */
1348 return cc_status.flags & CC_IN_68881;
1349}
1350
db5e2d51
MK
1351/* Return true if PARALLEL contains register REGNO. */
1352static bool
1353m68k_reg_present_p (const_rtx parallel, unsigned int regno)
1354{
1355 int i;
1356
1357 if (REG_P (parallel) && REGNO (parallel) == regno)
1358 return true;
1359
1360 if (GET_CODE (parallel) != PARALLEL)
1361 return false;
1362
1363 for (i = 0; i < XVECLEN (parallel, 0); ++i)
1364 {
1365 const_rtx x;
1366
1367 x = XEXP (XVECEXP (parallel, 0, i), 0);
1368 if (REG_P (x) && REGNO (x) == regno)
1369 return true;
1370 }
1371
1372 return false;
1373}
1374
fa157b28 1375/* Implement TARGET_FUNCTION_OK_FOR_SIBCALL_P. */
f7e70894
RS
1376
1377static bool
fa157b28 1378m68k_ok_for_sibcall_p (tree decl, tree exp)
f7e70894 1379{
fa157b28
NS
1380 enum m68k_function_kind kind;
1381
1382 /* We cannot use sibcalls for nested functions because we use the
1383 static chain register for indirect calls. */
1384 if (CALL_EXPR_STATIC_CHAIN (exp))
1385 return false;
1386
db5e2d51
MK
1387 if (!VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
1388 {
1389 /* Check that the return value locations are the same. For
1390 example that we aren't returning a value from the sibling in
1391 a D0 register but then need to transfer it to a A0 register. */
1392 rtx cfun_value;
1393 rtx call_value;
1394
1395 cfun_value = FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (cfun->decl)),
1396 cfun->decl);
1397 call_value = FUNCTION_VALUE (TREE_TYPE (exp), decl);
1398
1399 /* Check that the values are equal or that the result the callee
1400 function returns is superset of what the current function returns. */
1401 if (!(rtx_equal_p (cfun_value, call_value)
1402 || (REG_P (cfun_value)
1403 && m68k_reg_present_p (call_value, REGNO (cfun_value)))))
1404 return false;
1405 }
1406
fa157b28
NS
1407 kind = m68k_get_function_kind (current_function_decl);
1408 if (kind == m68k_fk_normal_function)
1409 /* We can always sibcall from a normal function, because it's
1410 undefined if it is calling an interrupt function. */
1411 return true;
1412
1413 /* Otherwise we can only sibcall if the function kind is known to be
1414 the same. */
1415 if (decl && m68k_get_function_kind (decl) == kind)
1416 return true;
1417
1418 return false;
f7e70894
RS
1419}
1420
13d3961c
NF
1421/* On the m68k all args are always pushed. */
1422
1423static rtx
d5cc9181 1424m68k_function_arg (cumulative_args_t cum ATTRIBUTE_UNUSED,
ef4bddc2 1425 machine_mode mode ATTRIBUTE_UNUSED,
13d3961c
NF
1426 const_tree type ATTRIBUTE_UNUSED,
1427 bool named ATTRIBUTE_UNUSED)
1428{
1429 return NULL_RTX;
1430}
1431
1432static void
ef4bddc2 1433m68k_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
13d3961c
NF
1434 const_tree type, bool named ATTRIBUTE_UNUSED)
1435{
d5cc9181
JR
1436 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1437
13d3961c
NF
1438 *cum += (mode != BLKmode
1439 ? (GET_MODE_SIZE (mode) + 3) & ~3
1440 : (int_size_in_bytes (type) + 3) & ~3);
1441}
1442
29ca003a
RS
1443/* Convert X to a legitimate function call memory reference and return the
1444 result. */
a2ef3db7 1445
29ca003a
RS
1446rtx
1447m68k_legitimize_call_address (rtx x)
1448{
1449 gcc_assert (MEM_P (x));
1450 if (call_operand (XEXP (x, 0), VOIDmode))
1451 return x;
1452 return replace_equiv_address (x, force_reg (Pmode, XEXP (x, 0)));
a2ef3db7
BI
1453}
1454
f7e70894
RS
1455/* Likewise for sibling calls. */
1456
1457rtx
1458m68k_legitimize_sibcall_address (rtx x)
1459{
1460 gcc_assert (MEM_P (x));
1461 if (sibcall_operand (XEXP (x, 0), VOIDmode))
1462 return x;
1463
1464 emit_move_insn (gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM), XEXP (x, 0));
1465 return replace_equiv_address (x, gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM));
1466}
1467
506d7b68
PB
1468/* Convert X to a legitimate address and return it if successful. Otherwise
1469 return X.
1470
1471 For the 68000, we handle X+REG by loading X into a register R and
1472 using R+REG. R will go in an address reg and indexing will be used.
1473 However, if REG is a broken-out memory address or multiplication,
1474 nothing needs to be done because REG can certainly go in an address reg. */
1475
ab7256e4 1476static rtx
ef4bddc2 1477m68k_legitimize_address (rtx x, rtx oldx, machine_mode mode)
506d7b68 1478{
75df395f
MK
1479 if (m68k_tls_symbol_p (x))
1480 return m68k_legitimize_tls_address (x);
1481
506d7b68
PB
1482 if (GET_CODE (x) == PLUS)
1483 {
1484 int ch = (x) != (oldx);
1485 int copied = 0;
1486
1487#define COPY_ONCE(Y) if (!copied) { Y = copy_rtx (Y); copied = ch = 1; }
1488
1489 if (GET_CODE (XEXP (x, 0)) == MULT)
1490 {
1491 COPY_ONCE (x);
1492 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
1493 }
1494 if (GET_CODE (XEXP (x, 1)) == MULT)
1495 {
1496 COPY_ONCE (x);
1497 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
1498 }
1499 if (ch)
1500 {
1501 if (GET_CODE (XEXP (x, 1)) == REG
1502 && GET_CODE (XEXP (x, 0)) == REG)
1503 {
1504 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
1505 {
1506 COPY_ONCE (x);
1507 x = force_operand (x, 0);
1508 }
1509 return x;
1510 }
1511 if (memory_address_p (mode, x))
1512 return x;
1513 }
1514 if (GET_CODE (XEXP (x, 0)) == REG
1515 || (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
1516 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1517 && GET_MODE (XEXP (XEXP (x, 0), 0)) == HImode))
1518 {
1519 rtx temp = gen_reg_rtx (Pmode);
1520 rtx val = force_operand (XEXP (x, 1), 0);
1521 emit_move_insn (temp, val);
1522 COPY_ONCE (x);
1523 XEXP (x, 1) = temp;
1524 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1525 && GET_CODE (XEXP (x, 0)) == REG)
1526 x = force_operand (x, 0);
1527 }
1528 else if (GET_CODE (XEXP (x, 1)) == REG
1529 || (GET_CODE (XEXP (x, 1)) == SIGN_EXTEND
1530 && GET_CODE (XEXP (XEXP (x, 1), 0)) == REG
1531 && GET_MODE (XEXP (XEXP (x, 1), 0)) == HImode))
1532 {
1533 rtx temp = gen_reg_rtx (Pmode);
1534 rtx val = force_operand (XEXP (x, 0), 0);
1535 emit_move_insn (temp, val);
1536 COPY_ONCE (x);
1537 XEXP (x, 0) = temp;
1538 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1539 && GET_CODE (XEXP (x, 1)) == REG)
1540 x = force_operand (x, 0);
1541 }
1542 }
1543
1544 return x;
1545}
1546
1547
64a184e9
RS
1548/* Output a dbCC; jCC sequence. Note we do not handle the
1549 floating point version of this sequence (Fdbcc). We also
1550 do not handle alternative conditions when CC_NO_OVERFLOW is
6a0f85e3
TG
1551 set. It is assumed that valid_dbcc_comparison_p and flags_in_68881 will
1552 kick those out before we get here. */
64a184e9 1553
1d8eaa6b 1554void
8a4a2253 1555output_dbcc_and_branch (rtx *operands)
64a184e9 1556{
64a184e9
RS
1557 switch (GET_CODE (operands[3]))
1558 {
1559 case EQ:
da398bb5 1560 output_asm_insn ("dbeq %0,%l1\n\tjeq %l2", operands);
e6d98cb0 1561 break;
64a184e9
RS
1562
1563 case NE:
da398bb5 1564 output_asm_insn ("dbne %0,%l1\n\tjne %l2", operands);
e6d98cb0 1565 break;
64a184e9
RS
1566
1567 case GT:
da398bb5 1568 output_asm_insn ("dbgt %0,%l1\n\tjgt %l2", operands);
e6d98cb0 1569 break;
64a184e9
RS
1570
1571 case GTU:
da398bb5 1572 output_asm_insn ("dbhi %0,%l1\n\tjhi %l2", operands);
e6d98cb0 1573 break;
64a184e9
RS
1574
1575 case LT:
da398bb5 1576 output_asm_insn ("dblt %0,%l1\n\tjlt %l2", operands);
e6d98cb0 1577 break;
64a184e9
RS
1578
1579 case LTU:
da398bb5 1580 output_asm_insn ("dbcs %0,%l1\n\tjcs %l2", operands);
e6d98cb0 1581 break;
64a184e9
RS
1582
1583 case GE:
da398bb5 1584 output_asm_insn ("dbge %0,%l1\n\tjge %l2", operands);
e6d98cb0 1585 break;
64a184e9
RS
1586
1587 case GEU:
da398bb5 1588 output_asm_insn ("dbcc %0,%l1\n\tjcc %l2", operands);
e6d98cb0 1589 break;
64a184e9
RS
1590
1591 case LE:
da398bb5 1592 output_asm_insn ("dble %0,%l1\n\tjle %l2", operands);
e6d98cb0 1593 break;
64a184e9
RS
1594
1595 case LEU:
da398bb5 1596 output_asm_insn ("dbls %0,%l1\n\tjls %l2", operands);
e6d98cb0 1597 break;
64a184e9
RS
1598
1599 default:
4761e388 1600 gcc_unreachable ();
64a184e9
RS
1601 }
1602
1603 /* If the decrement is to be done in SImode, then we have
7a1929e1 1604 to compensate for the fact that dbcc decrements in HImode. */
64a184e9
RS
1605 switch (GET_MODE (operands[0]))
1606 {
1607 case SImode:
da398bb5 1608 output_asm_insn ("clr%.w %0\n\tsubq%.l #1,%0\n\tjpl %l1", operands);
64a184e9
RS
1609 break;
1610
1611 case HImode:
1612 break;
1613
1614 default:
4761e388 1615 gcc_unreachable ();
64a184e9
RS
1616 }
1617}
1618
5505f548 1619const char *
4761e388 1620output_scc_di (rtx op, rtx operand1, rtx operand2, rtx dest)
c59c3b1c
RK
1621{
1622 rtx loperands[7];
d9832fd2 1623 enum rtx_code op_code = GET_CODE (op);
c59c3b1c 1624
f710504c 1625 /* This does not produce a useful cc. */
906a2d3c
RK
1626 CC_STATUS_INIT;
1627
d9832fd2
RK
1628 /* The m68k cmp.l instruction requires operand1 to be a reg as used
1629 below. Swap the operands and change the op if these requirements
1630 are not fulfilled. */
1631 if (GET_CODE (operand2) == REG && GET_CODE (operand1) != REG)
1632 {
1633 rtx tmp = operand1;
1634
1635 operand1 = operand2;
1636 operand2 = tmp;
1637 op_code = swap_condition (op_code);
1638 }
c59c3b1c
RK
1639 loperands[0] = operand1;
1640 if (GET_CODE (operand1) == REG)
1d8eaa6b 1641 loperands[1] = gen_rtx_REG (SImode, REGNO (operand1) + 1);
c59c3b1c 1642 else
b72f00af 1643 loperands[1] = adjust_address (operand1, SImode, 4);
c59c3b1c
RK
1644 if (operand2 != const0_rtx)
1645 {
1646 loperands[2] = operand2;
1647 if (GET_CODE (operand2) == REG)
1d8eaa6b 1648 loperands[3] = gen_rtx_REG (SImode, REGNO (operand2) + 1);
c59c3b1c 1649 else
b72f00af 1650 loperands[3] = adjust_address (operand2, SImode, 4);
c59c3b1c 1651 }
428511bb 1652 loperands[4] = gen_label_rtx ();
c59c3b1c 1653 if (operand2 != const0_rtx)
da398bb5 1654 output_asm_insn ("cmp%.l %2,%0\n\tjne %l4\n\tcmp%.l %3,%1", loperands);
392582fa 1655 else
4a8c52e0 1656 {
9425fb04 1657 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[0]))
4a8c52e0
AS
1658 output_asm_insn ("tst%.l %0", loperands);
1659 else
a0a7fbc9 1660 output_asm_insn ("cmp%.w #0,%0", loperands);
4a8c52e0 1661
da398bb5 1662 output_asm_insn ("jne %l4", loperands);
4a8c52e0 1663
9425fb04 1664 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[1]))
4a8c52e0
AS
1665 output_asm_insn ("tst%.l %1", loperands);
1666 else
3b4b85c9 1667 output_asm_insn ("cmp%.w #0,%1", loperands);
4a8c52e0
AS
1668 }
1669
c59c3b1c 1670 loperands[5] = dest;
3b4b85c9 1671
d9832fd2 1672 switch (op_code)
c59c3b1c
RK
1673 {
1674 case EQ:
4977bab6 1675 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1676 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1677 output_asm_insn ("seq %5", loperands);
1678 break;
1679
1680 case NE:
4977bab6 1681 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1682 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1683 output_asm_insn ("sne %5", loperands);
1684 break;
1685
1686 case GT:
428511bb 1687 loperands[6] = gen_label_rtx ();
da398bb5 1688 output_asm_insn ("shi %5\n\tjra %l6", loperands);
4977bab6 1689 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1690 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c 1691 output_asm_insn ("sgt %5", loperands);
4977bab6 1692 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1693 CODE_LABEL_NUMBER (loperands[6]));
c59c3b1c
RK
1694 break;
1695
1696 case GTU:
4977bab6 1697 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1698 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1699 output_asm_insn ("shi %5", loperands);
1700 break;
1701
1702 case LT:
428511bb 1703 loperands[6] = gen_label_rtx ();
da398bb5 1704 output_asm_insn ("scs %5\n\tjra %l6", loperands);
4977bab6 1705 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1706 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c 1707 output_asm_insn ("slt %5", loperands);
4977bab6 1708 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1709 CODE_LABEL_NUMBER (loperands[6]));
c59c3b1c
RK
1710 break;
1711
1712 case LTU:
4977bab6 1713 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1714 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1715 output_asm_insn ("scs %5", loperands);
1716 break;
1717
1718 case GE:
428511bb 1719 loperands[6] = gen_label_rtx ();
da398bb5 1720 output_asm_insn ("scc %5\n\tjra %l6", loperands);
4977bab6 1721 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1722 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c 1723 output_asm_insn ("sge %5", loperands);
4977bab6 1724 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1725 CODE_LABEL_NUMBER (loperands[6]));
c59c3b1c
RK
1726 break;
1727
1728 case GEU:
4977bab6 1729 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1730 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1731 output_asm_insn ("scc %5", loperands);
1732 break;
1733
1734 case LE:
428511bb 1735 loperands[6] = gen_label_rtx ();
da398bb5 1736 output_asm_insn ("sls %5\n\tjra %l6", loperands);
4977bab6 1737 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1738 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c 1739 output_asm_insn ("sle %5", loperands);
4977bab6 1740 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1741 CODE_LABEL_NUMBER (loperands[6]));
c59c3b1c
RK
1742 break;
1743
1744 case LEU:
4977bab6 1745 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1746 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1747 output_asm_insn ("sls %5", loperands);
1748 break;
1749
1750 default:
4761e388 1751 gcc_unreachable ();
c59c3b1c
RK
1752 }
1753 return "";
1754}
1755
5505f548 1756const char *
c85e862a 1757output_btst (rtx *operands, rtx countop, rtx dataop, rtx_insn *insn, int signpos)
79e68feb
RS
1758{
1759 operands[0] = countop;
1760 operands[1] = dataop;
1761
1762 if (GET_CODE (countop) == CONST_INT)
1763 {
1764 register int count = INTVAL (countop);
1765 /* If COUNT is bigger than size of storage unit in use,
1766 advance to the containing unit of same size. */
1767 if (count > signpos)
1768 {
1769 int offset = (count & ~signpos) / 8;
1770 count = count & signpos;
b72f00af 1771 operands[1] = dataop = adjust_address (dataop, QImode, offset);
79e68feb
RS
1772 }
1773 if (count == signpos)
1774 cc_status.flags = CC_NOT_POSITIVE | CC_Z_IN_NOT_N;
1775 else
1776 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N;
1777
1778 /* These three statements used to use next_insns_test_no...
1779 but it appears that this should do the same job. */
1780 if (count == 31
1781 && next_insn_tests_no_inequality (insn))
1782 return "tst%.l %1";
1783 if (count == 15
1784 && next_insn_tests_no_inequality (insn))
1785 return "tst%.w %1";
1786 if (count == 7
1787 && next_insn_tests_no_inequality (insn))
1788 return "tst%.b %1";
5083912d
PDM
1789 /* Try to use `movew to ccr' followed by the appropriate branch insn.
1790 On some m68k variants unfortunately that's slower than btst.
1791 On 68000 and higher, that should also work for all HImode operands. */
1792 if (TUNE_CPU32 || TARGET_COLDFIRE || optimize_size)
1793 {
1794 if (count == 3 && DATA_REG_P (operands[1])
1795 && next_insn_tests_no_inequality (insn))
1796 {
1797 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N | CC_NO_OVERFLOW;
1798 return "move%.w %1,%%ccr";
1799 }
1800 if (count == 2 && DATA_REG_P (operands[1])
1801 && next_insn_tests_no_inequality (insn))
1802 {
1803 cc_status.flags = CC_NOT_NEGATIVE | CC_INVERTED | CC_NO_OVERFLOW;
1804 return "move%.w %1,%%ccr";
1805 }
1806 /* count == 1 followed by bvc/bvs and
1807 count == 0 followed by bcc/bcs are also possible, but need
1808 m68k-specific CC_Z_IN_NOT_V and CC_Z_IN_NOT_C flags. */
1809 }
79e68feb
RS
1810
1811 cc_status.flags = CC_NOT_NEGATIVE;
1812 }
1813 return "btst %0,%1";
1814}
79e68feb 1815\f
fc2241eb
RS
1816/* Return true if X is a legitimate base register. STRICT_P says
1817 whether we need strict checking. */
1818
1819bool
1820m68k_legitimate_base_reg_p (rtx x, bool strict_p)
1821{
1822 /* Allow SUBREG everywhere we allow REG. This results in better code. */
1823 if (!strict_p && GET_CODE (x) == SUBREG)
1824 x = SUBREG_REG (x);
1825
1826 return (REG_P (x)
1827 && (strict_p
1828 ? REGNO_OK_FOR_BASE_P (REGNO (x))
bf32249e 1829 : REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x))));
fc2241eb
RS
1830}
1831
1832/* Return true if X is a legitimate index register. STRICT_P says
1833 whether we need strict checking. */
1834
1835bool
1836m68k_legitimate_index_reg_p (rtx x, bool strict_p)
1837{
1838 if (!strict_p && GET_CODE (x) == SUBREG)
1839 x = SUBREG_REG (x);
1840
1841 return (REG_P (x)
1842 && (strict_p
1843 ? REGNO_OK_FOR_INDEX_P (REGNO (x))
bf32249e 1844 : REGNO_OK_FOR_INDEX_NONSTRICT_P (REGNO (x))));
fc2241eb
RS
1845}
1846
1847/* Return true if X is a legitimate index expression for a (d8,An,Xn) or
1848 (bd,An,Xn) addressing mode. Fill in the INDEX and SCALE fields of
1849 ADDRESS if so. STRICT_P says whether we need strict checking. */
1850
1851static bool
1852m68k_decompose_index (rtx x, bool strict_p, struct m68k_address *address)
1853{
1854 int scale;
1855
1856 /* Check for a scale factor. */
1857 scale = 1;
1858 if ((TARGET_68020 || TARGET_COLDFIRE)
1859 && GET_CODE (x) == MULT
1860 && GET_CODE (XEXP (x, 1)) == CONST_INT
1861 && (INTVAL (XEXP (x, 1)) == 2
1862 || INTVAL (XEXP (x, 1)) == 4
1863 || (INTVAL (XEXP (x, 1)) == 8
1864 && (TARGET_COLDFIRE_FPU || !TARGET_COLDFIRE))))
1865 {
1866 scale = INTVAL (XEXP (x, 1));
1867 x = XEXP (x, 0);
1868 }
1869
1870 /* Check for a word extension. */
1871 if (!TARGET_COLDFIRE
1872 && GET_CODE (x) == SIGN_EXTEND
1873 && GET_MODE (XEXP (x, 0)) == HImode)
1874 x = XEXP (x, 0);
1875
1876 if (m68k_legitimate_index_reg_p (x, strict_p))
1877 {
1878 address->scale = scale;
1879 address->index = x;
1880 return true;
1881 }
1882
1883 return false;
1884}
1885
7ffb5e78
RS
1886/* Return true if X is an illegitimate symbolic constant. */
1887
1888bool
1889m68k_illegitimate_symbolic_constant_p (rtx x)
1890{
1891 rtx base, offset;
1892
1893 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P)
1894 {
1895 split_const (x, &base, &offset);
1896 if (GET_CODE (base) == SYMBOL_REF
1897 && !offset_within_block_p (base, INTVAL (offset)))
1898 return true;
1899 }
75df395f 1900 return m68k_tls_reference_p (x, false);
7ffb5e78
RS
1901}
1902
fbbf66e7
RS
1903/* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1904
1905static bool
ef4bddc2 1906m68k_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
fbbf66e7
RS
1907{
1908 return m68k_illegitimate_symbolic_constant_p (x);
1909}
1910
fc2241eb
RS
1911/* Return true if X is a legitimate constant address that can reach
1912 bytes in the range [X, X + REACH). STRICT_P says whether we need
1913 strict checking. */
1914
1915static bool
1916m68k_legitimate_constant_address_p (rtx x, unsigned int reach, bool strict_p)
1917{
1918 rtx base, offset;
1919
1920 if (!CONSTANT_ADDRESS_P (x))
1921 return false;
1922
1923 if (flag_pic
1924 && !(strict_p && TARGET_PCREL)
1925 && symbolic_operand (x, VOIDmode))
1926 return false;
1927
1928 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P && reach > 1)
1929 {
1930 split_const (x, &base, &offset);
1931 if (GET_CODE (base) == SYMBOL_REF
1932 && !offset_within_block_p (base, INTVAL (offset) + reach - 1))
1933 return false;
1934 }
1935
75df395f 1936 return !m68k_tls_reference_p (x, false);
fc2241eb
RS
1937}
1938
1939/* Return true if X is a LABEL_REF for a jump table. Assume that unplaced
1940 labels will become jump tables. */
1941
1942static bool
1943m68k_jump_table_ref_p (rtx x)
1944{
1945 if (GET_CODE (x) != LABEL_REF)
1946 return false;
1947
b32d5189
DM
1948 rtx_insn *insn = as_a <rtx_insn *> (XEXP (x, 0));
1949 if (!NEXT_INSN (insn) && !PREV_INSN (insn))
fc2241eb
RS
1950 return true;
1951
b32d5189
DM
1952 insn = next_nonnote_insn (insn);
1953 return insn && JUMP_TABLE_DATA_P (insn);
fc2241eb
RS
1954}
1955
1956/* Return true if X is a legitimate address for values of mode MODE.
1957 STRICT_P says whether strict checking is needed. If the address
1958 is valid, describe its components in *ADDRESS. */
1959
1960static bool
ef4bddc2 1961m68k_decompose_address (machine_mode mode, rtx x,
fc2241eb
RS
1962 bool strict_p, struct m68k_address *address)
1963{
1964 unsigned int reach;
1965
1966 memset (address, 0, sizeof (*address));
1967
1968 if (mode == BLKmode)
1969 reach = 1;
1970 else
1971 reach = GET_MODE_SIZE (mode);
1972
1973 /* Check for (An) (mode 2). */
1974 if (m68k_legitimate_base_reg_p (x, strict_p))
1975 {
1976 address->base = x;
1977 return true;
1978 }
1979
1980 /* Check for -(An) and (An)+ (modes 3 and 4). */
1981 if ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_INC)
1982 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
1983 {
1984 address->code = GET_CODE (x);
1985 address->base = XEXP (x, 0);
1986 return true;
1987 }
1988
1989 /* Check for (d16,An) (mode 5). */
1990 if (GET_CODE (x) == PLUS
1991 && GET_CODE (XEXP (x, 1)) == CONST_INT
1992 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x8000, 0x8000 - reach)
1993 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
1994 {
1995 address->base = XEXP (x, 0);
1996 address->offset = XEXP (x, 1);
1997 return true;
1998 }
1999
2000 /* Check for GOT loads. These are (bd,An,Xn) addresses if
2001 TARGET_68020 && flag_pic == 2, otherwise they are (d16,An)
2002 addresses. */
75df395f
MK
2003 if (GET_CODE (x) == PLUS
2004 && XEXP (x, 0) == pic_offset_table_rtx)
fc2241eb 2005 {
75df395f
MK
2006 /* As we are processing a PLUS, do not unwrap RELOC32 symbols --
2007 they are invalid in this context. */
2008 if (m68k_unwrap_symbol (XEXP (x, 1), false) != XEXP (x, 1))
2009 {
2010 address->base = XEXP (x, 0);
2011 address->offset = XEXP (x, 1);
2012 return true;
2013 }
fc2241eb
RS
2014 }
2015
2016 /* The ColdFire FPU only accepts addressing modes 2-5. */
2017 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
2018 return false;
2019
2020 /* Check for (xxx).w and (xxx).l. Also, in the TARGET_PCREL case,
2021 check for (d16,PC) or (bd,PC,Xn) with a suppressed index register.
2022 All these modes are variations of mode 7. */
2023 if (m68k_legitimate_constant_address_p (x, reach, strict_p))
2024 {
2025 address->offset = x;
2026 return true;
2027 }
2028
2029 /* Check for (d8,PC,Xn), a mode 7 form. This case is needed for
2030 tablejumps.
2031
2032 ??? do_tablejump creates these addresses before placing the target
2033 label, so we have to assume that unplaced labels are jump table
2034 references. It seems unlikely that we would ever generate indexed
2035 accesses to unplaced labels in other cases. */
2036 if (GET_CODE (x) == PLUS
2037 && m68k_jump_table_ref_p (XEXP (x, 1))
2038 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2039 {
2040 address->offset = XEXP (x, 1);
2041 return true;
2042 }
2043
2044 /* Everything hereafter deals with (d8,An,Xn.SIZE*SCALE) or
2045 (bd,An,Xn.SIZE*SCALE) addresses. */
2046
2047 if (TARGET_68020)
2048 {
2049 /* Check for a nonzero base displacement. */
2050 if (GET_CODE (x) == PLUS
2051 && m68k_legitimate_constant_address_p (XEXP (x, 1), reach, strict_p))
2052 {
2053 address->offset = XEXP (x, 1);
2054 x = XEXP (x, 0);
2055 }
2056
2057 /* Check for a suppressed index register. */
2058 if (m68k_legitimate_base_reg_p (x, strict_p))
2059 {
2060 address->base = x;
2061 return true;
2062 }
2063
2064 /* Check for a suppressed base register. Do not allow this case
2065 for non-symbolic offsets as it effectively gives gcc freedom
2066 to treat data registers as base registers, which can generate
2067 worse code. */
2068 if (address->offset
2069 && symbolic_operand (address->offset, VOIDmode)
2070 && m68k_decompose_index (x, strict_p, address))
2071 return true;
2072 }
2073 else
2074 {
2075 /* Check for a nonzero base displacement. */
2076 if (GET_CODE (x) == PLUS
2077 && GET_CODE (XEXP (x, 1)) == CONST_INT
2078 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x80, 0x80 - reach))
2079 {
2080 address->offset = XEXP (x, 1);
2081 x = XEXP (x, 0);
2082 }
2083 }
2084
2085 /* We now expect the sum of a base and an index. */
2086 if (GET_CODE (x) == PLUS)
2087 {
2088 if (m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p)
2089 && m68k_decompose_index (XEXP (x, 1), strict_p, address))
2090 {
2091 address->base = XEXP (x, 0);
2092 return true;
2093 }
2094
2095 if (m68k_legitimate_base_reg_p (XEXP (x, 1), strict_p)
2096 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2097 {
2098 address->base = XEXP (x, 1);
2099 return true;
2100 }
2101 }
2102 return false;
2103}
2104
2105/* Return true if X is a legitimate address for values of mode MODE.
2106 STRICT_P says whether strict checking is needed. */
2107
2108bool
ef4bddc2 2109m68k_legitimate_address_p (machine_mode mode, rtx x, bool strict_p)
fc2241eb
RS
2110{
2111 struct m68k_address address;
2112
2113 return m68k_decompose_address (mode, x, strict_p, &address);
2114}
2115
2116/* Return true if X is a memory, describing its address in ADDRESS if so.
2117 Apply strict checking if called during or after reload. */
2118
2119static bool
2120m68k_legitimate_mem_p (rtx x, struct m68k_address *address)
2121{
2122 return (MEM_P (x)
2123 && m68k_decompose_address (GET_MODE (x), XEXP (x, 0),
2124 reload_in_progress || reload_completed,
2125 address));
2126}
2127
1a627b35
RS
2128/* Implement TARGET_LEGITIMATE_CONSTANT_P. */
2129
2130bool
ef4bddc2 2131m68k_legitimate_constant_p (machine_mode mode, rtx x)
1a627b35
RS
2132{
2133 return mode != XFmode && !m68k_illegitimate_symbolic_constant_p (x);
2134}
2135
fc2241eb
RS
2136/* Return true if X matches the 'Q' constraint. It must be a memory
2137 with a base address and no constant offset or index. */
2138
2139bool
2140m68k_matches_q_p (rtx x)
2141{
2142 struct m68k_address address;
2143
2144 return (m68k_legitimate_mem_p (x, &address)
2145 && address.code == UNKNOWN
2146 && address.base
2147 && !address.offset
2148 && !address.index);
2149}
2150
2151/* Return true if X matches the 'U' constraint. It must be a base address
2152 with a constant offset and no index. */
2153
2154bool
2155m68k_matches_u_p (rtx x)
2156{
2157 struct m68k_address address;
2158
2159 return (m68k_legitimate_mem_p (x, &address)
2160 && address.code == UNKNOWN
2161 && address.base
2162 && address.offset
2163 && !address.index);
2164}
2165
75df395f
MK
2166/* Return GOT pointer. */
2167
2168static rtx
2169m68k_get_gp (void)
2170{
2171 if (pic_offset_table_rtx == NULL_RTX)
2172 pic_offset_table_rtx = gen_rtx_REG (Pmode, PIC_REG);
2173
2174 crtl->uses_pic_offset_table = 1;
2175
2176 return pic_offset_table_rtx;
2177}
2178
2179/* M68K relocations, used to distinguish GOT and TLS relocations in UNSPEC
2180 wrappers. */
2181enum m68k_reloc { RELOC_GOT, RELOC_TLSGD, RELOC_TLSLDM, RELOC_TLSLDO,
2182 RELOC_TLSIE, RELOC_TLSLE };
2183
2184#define TLS_RELOC_P(RELOC) ((RELOC) != RELOC_GOT)
2185
2186/* Wrap symbol X into unspec representing relocation RELOC.
2187 BASE_REG - register that should be added to the result.
2188 TEMP_REG - if non-null, temporary register. */
2189
2190static rtx
2191m68k_wrap_symbol (rtx x, enum m68k_reloc reloc, rtx base_reg, rtx temp_reg)
2192{
2193 bool use_x_p;
2194
2195 use_x_p = (base_reg == pic_offset_table_rtx) ? TARGET_XGOT : TARGET_XTLS;
2196
2197 if (TARGET_COLDFIRE && use_x_p)
2198 /* When compiling with -mx{got, tls} switch the code will look like this:
2199
2200 move.l <X>@<RELOC>,<TEMP_REG>
2201 add.l <BASE_REG>,<TEMP_REG> */
2202 {
2203 /* Wrap X in UNSPEC_??? to tip m68k_output_addr_const_extra
2204 to put @RELOC after reference. */
2205 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2206 UNSPEC_RELOC32);
2207 x = gen_rtx_CONST (Pmode, x);
2208
2209 if (temp_reg == NULL)
2210 {
2211 gcc_assert (can_create_pseudo_p ());
2212 temp_reg = gen_reg_rtx (Pmode);
2213 }
2214
2215 emit_move_insn (temp_reg, x);
2216 emit_insn (gen_addsi3 (temp_reg, temp_reg, base_reg));
2217 x = temp_reg;
2218 }
2219 else
2220 {
2221 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2222 UNSPEC_RELOC16);
2223 x = gen_rtx_CONST (Pmode, x);
2224
2225 x = gen_rtx_PLUS (Pmode, base_reg, x);
2226 }
2227
2228 return x;
2229}
2230
2231/* Helper for m68k_unwrap_symbol.
2232 Also, if unwrapping was successful (that is if (ORIG != <return value>)),
2233 sets *RELOC_PTR to relocation type for the symbol. */
2234
2235static rtx
2236m68k_unwrap_symbol_1 (rtx orig, bool unwrap_reloc32_p,
2237 enum m68k_reloc *reloc_ptr)
2238{
2239 if (GET_CODE (orig) == CONST)
2240 {
2241 rtx x;
2242 enum m68k_reloc dummy;
2243
2244 x = XEXP (orig, 0);
2245
2246 if (reloc_ptr == NULL)
2247 reloc_ptr = &dummy;
2248
2249 /* Handle an addend. */
2250 if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS)
2251 && CONST_INT_P (XEXP (x, 1)))
2252 x = XEXP (x, 0);
2253
2254 if (GET_CODE (x) == UNSPEC)
2255 {
2256 switch (XINT (x, 1))
2257 {
2258 case UNSPEC_RELOC16:
2259 orig = XVECEXP (x, 0, 0);
2260 *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2261 break;
2262
2263 case UNSPEC_RELOC32:
2264 if (unwrap_reloc32_p)
2265 {
2266 orig = XVECEXP (x, 0, 0);
2267 *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2268 }
2269 break;
2270
2271 default:
2272 break;
2273 }
2274 }
2275 }
2276
2277 return orig;
2278}
2279
2280/* Unwrap symbol from UNSPEC_RELOC16 and, if unwrap_reloc32_p,
2281 UNSPEC_RELOC32 wrappers. */
2282
2283rtx
2284m68k_unwrap_symbol (rtx orig, bool unwrap_reloc32_p)
2285{
2286 return m68k_unwrap_symbol_1 (orig, unwrap_reloc32_p, NULL);
2287}
2288
75df395f
MK
2289/* Prescan insn before outputing assembler for it. */
2290
2291void
c85e862a 2292m68k_final_prescan_insn (rtx_insn *insn ATTRIBUTE_UNUSED,
75df395f
MK
2293 rtx *operands, int n_operands)
2294{
2295 int i;
2296
2297 /* Combine and, possibly, other optimizations may do good job
2298 converting
2299 (const (unspec [(symbol)]))
2300 into
2301 (const (plus (unspec [(symbol)])
2302 (const_int N))).
2303 The problem with this is emitting @TLS or @GOT decorations.
2304 The decoration is emitted when processing (unspec), so the
2305 result would be "#symbol@TLSLE+N" instead of "#symbol+N@TLSLE".
2306
2307 It seems that the easiest solution to this is to convert such
2308 operands to
2309 (const (unspec [(plus (symbol)
2310 (const_int N))])).
2311 Note, that the top level of operand remains intact, so we don't have
2312 to patch up anything outside of the operand. */
2313
82eee4f1 2314 subrtx_var_iterator::array_type array;
75df395f
MK
2315 for (i = 0; i < n_operands; ++i)
2316 {
2317 rtx op;
2318
2319 op = operands[i];
2320
82eee4f1
RS
2321 FOR_EACH_SUBRTX_VAR (iter, array, op, ALL)
2322 {
2323 rtx x = *iter;
2324 if (m68k_unwrap_symbol (x, true) != x)
2325 {
2326 rtx plus;
2327
2328 gcc_assert (GET_CODE (x) == CONST);
2329 plus = XEXP (x, 0);
2330
2331 if (GET_CODE (plus) == PLUS || GET_CODE (plus) == MINUS)
2332 {
2333 rtx unspec;
2334 rtx addend;
2335
2336 unspec = XEXP (plus, 0);
2337 gcc_assert (GET_CODE (unspec) == UNSPEC);
2338 addend = XEXP (plus, 1);
2339 gcc_assert (CONST_INT_P (addend));
2340
2341 /* We now have all the pieces, rearrange them. */
2342
2343 /* Move symbol to plus. */
2344 XEXP (plus, 0) = XVECEXP (unspec, 0, 0);
2345
2346 /* Move plus inside unspec. */
2347 XVECEXP (unspec, 0, 0) = plus;
2348
2349 /* Move unspec to top level of const. */
2350 XEXP (x, 0) = unspec;
2351 }
2352 iter.skip_subrtxes ();
2353 }
2354 }
75df395f
MK
2355 }
2356}
2357
2358/* Move X to a register and add REG_EQUAL note pointing to ORIG.
2359 If REG is non-null, use it; generate new pseudo otherwise. */
2360
2361static rtx
2362m68k_move_to_reg (rtx x, rtx orig, rtx reg)
2363{
c85e862a 2364 rtx_insn *insn;
75df395f
MK
2365
2366 if (reg == NULL_RTX)
2367 {
2368 gcc_assert (can_create_pseudo_p ());
2369 reg = gen_reg_rtx (Pmode);
2370 }
2371
2372 insn = emit_move_insn (reg, x);
2373 /* Put a REG_EQUAL note on this insn, so that it can be optimized
2374 by loop. */
2375 set_unique_reg_note (insn, REG_EQUAL, orig);
2376
2377 return reg;
2378}
2379
2380/* Does the same as m68k_wrap_symbol, but returns a memory reference to
2381 GOT slot. */
2382
2383static rtx
2384m68k_wrap_symbol_into_got_ref (rtx x, enum m68k_reloc reloc, rtx temp_reg)
2385{
2386 x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), temp_reg);
2387
2388 x = gen_rtx_MEM (Pmode, x);
2389 MEM_READONLY_P (x) = 1;
2390
2391 return x;
2392}
2393
79e68feb
RS
2394/* Legitimize PIC addresses. If the address is already
2395 position-independent, we return ORIG. Newly generated
2396 position-independent addresses go to REG. If we need more
2397 than one register, we lose.
2398
2399 An address is legitimized by making an indirect reference
2400 through the Global Offset Table with the name of the symbol
2401 used as an offset.
2402
2403 The assembler and linker are responsible for placing the
2404 address of the symbol in the GOT. The function prologue
2405 is responsible for initializing a5 to the starting address
2406 of the GOT.
2407
2408 The assembler is also responsible for translating a symbol name
2409 into a constant displacement from the start of the GOT.
2410
2411 A quick example may make things a little clearer:
2412
2413 When not generating PIC code to store the value 12345 into _foo
2414 we would generate the following code:
2415
2416 movel #12345, _foo
2417
2418 When generating PIC two transformations are made. First, the compiler
2419 loads the address of foo into a register. So the first transformation makes:
2420
2421 lea _foo, a0
2422 movel #12345, a0@
2423
2424 The code in movsi will intercept the lea instruction and call this
2425 routine which will transform the instructions into:
2426
2427 movel a5@(_foo:w), a0
2428 movel #12345, a0@
2429
2430
2431 That (in a nutshell) is how *all* symbol and label references are
2432 handled. */
2433
2434rtx
ef4bddc2 2435legitimize_pic_address (rtx orig, machine_mode mode ATTRIBUTE_UNUSED,
8a4a2253 2436 rtx reg)
79e68feb
RS
2437{
2438 rtx pic_ref = orig;
2439
2440 /* First handle a simple SYMBOL_REF or LABEL_REF */
2441 if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
2442 {
4761e388 2443 gcc_assert (reg);
79e68feb 2444
75df395f
MK
2445 pic_ref = m68k_wrap_symbol_into_got_ref (orig, RELOC_GOT, reg);
2446 pic_ref = m68k_move_to_reg (pic_ref, orig, reg);
79e68feb
RS
2447 }
2448 else if (GET_CODE (orig) == CONST)
2449 {
1d8eaa6b 2450 rtx base;
79e68feb 2451
b2e08ed4 2452 /* Make sure this has not already been legitimized. */
75df395f 2453 if (m68k_unwrap_symbol (orig, true) != orig)
79e68feb
RS
2454 return orig;
2455
4761e388 2456 gcc_assert (reg);
79e68feb
RS
2457
2458 /* legitimize both operands of the PLUS */
4761e388
NS
2459 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
2460
2461 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2462 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2463 base == reg ? 0 : reg);
79e68feb
RS
2464
2465 if (GET_CODE (orig) == CONST_INT)
0a81f074 2466 pic_ref = plus_constant (Pmode, base, INTVAL (orig));
75df395f
MK
2467 else
2468 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
79e68feb 2469 }
75df395f 2470
79e68feb
RS
2471 return pic_ref;
2472}
2473
75df395f
MK
2474/* The __tls_get_addr symbol. */
2475static GTY(()) rtx m68k_tls_get_addr;
2476
2477/* Return SYMBOL_REF for __tls_get_addr. */
2478
2479static rtx
2480m68k_get_tls_get_addr (void)
2481{
2482 if (m68k_tls_get_addr == NULL_RTX)
2483 m68k_tls_get_addr = init_one_libfunc ("__tls_get_addr");
2484
2485 return m68k_tls_get_addr;
2486}
2487
2488/* Return libcall result in A0 instead of usual D0. */
2489static bool m68k_libcall_value_in_a0_p = false;
2490
2491/* Emit instruction sequence that calls __tls_get_addr. X is
2492 the TLS symbol we are referencing and RELOC is the symbol type to use
2493 (either TLSGD or TLSLDM). EQV is the REG_EQUAL note for the sequence
2494 emitted. A pseudo register with result of __tls_get_addr call is
2495 returned. */
2496
2497static rtx
2498m68k_call_tls_get_addr (rtx x, rtx eqv, enum m68k_reloc reloc)
2499{
2500 rtx a0;
c85e862a 2501 rtx_insn *insns;
75df395f
MK
2502 rtx dest;
2503
2504 /* Emit the call sequence. */
2505 start_sequence ();
2506
2507 /* FIXME: Unfortunately, emit_library_call_value does not
2508 consider (plus (%a5) (const (unspec))) to be a good enough
2509 operand for push, so it forces it into a register. The bad
2510 thing about this is that combiner, due to copy propagation and other
2511 optimizations, sometimes can not later fix this. As a consequence,
2512 additional register may be allocated resulting in a spill.
2513 For reference, see args processing loops in
2514 calls.c:emit_library_call_value_1.
2515 For testcase, see gcc.target/m68k/tls-{gd, ld}.c */
2516 x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), NULL_RTX);
2517
2518 /* __tls_get_addr() is not a libcall, but emitting a libcall_value
2519 is the simpliest way of generating a call. The difference between
2520 __tls_get_addr() and libcall is that the result is returned in D0
2521 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2522 which temporarily switches returning the result to A0. */
2523
2524 m68k_libcall_value_in_a0_p = true;
2525 a0 = emit_library_call_value (m68k_get_tls_get_addr (), NULL_RTX, LCT_PURE,
2526 Pmode, 1, x, Pmode);
2527 m68k_libcall_value_in_a0_p = false;
2528
2529 insns = get_insns ();
2530 end_sequence ();
2531
2532 gcc_assert (can_create_pseudo_p ());
2533 dest = gen_reg_rtx (Pmode);
2534 emit_libcall_block (insns, dest, a0, eqv);
2535
2536 return dest;
2537}
2538
2539/* The __tls_get_addr symbol. */
2540static GTY(()) rtx m68k_read_tp;
2541
2542/* Return SYMBOL_REF for __m68k_read_tp. */
2543
2544static rtx
2545m68k_get_m68k_read_tp (void)
2546{
2547 if (m68k_read_tp == NULL_RTX)
2548 m68k_read_tp = init_one_libfunc ("__m68k_read_tp");
2549
2550 return m68k_read_tp;
2551}
2552
2553/* Emit instruction sequence that calls __m68k_read_tp.
2554 A pseudo register with result of __m68k_read_tp call is returned. */
2555
2556static rtx
2557m68k_call_m68k_read_tp (void)
2558{
2559 rtx a0;
2560 rtx eqv;
c85e862a 2561 rtx_insn *insns;
75df395f
MK
2562 rtx dest;
2563
2564 start_sequence ();
2565
2566 /* __m68k_read_tp() is not a libcall, but emitting a libcall_value
2567 is the simpliest way of generating a call. The difference between
2568 __m68k_read_tp() and libcall is that the result is returned in D0
2569 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2570 which temporarily switches returning the result to A0. */
2571
2572 /* Emit the call sequence. */
2573 m68k_libcall_value_in_a0_p = true;
2574 a0 = emit_library_call_value (m68k_get_m68k_read_tp (), NULL_RTX, LCT_PURE,
2575 Pmode, 0);
2576 m68k_libcall_value_in_a0_p = false;
2577 insns = get_insns ();
2578 end_sequence ();
2579
2580 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2581 share the m68k_read_tp result with other IE/LE model accesses. */
2582 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx), UNSPEC_RELOC32);
2583
2584 gcc_assert (can_create_pseudo_p ());
2585 dest = gen_reg_rtx (Pmode);
2586 emit_libcall_block (insns, dest, a0, eqv);
2587
2588 return dest;
2589}
2590
2591/* Return a legitimized address for accessing TLS SYMBOL_REF X.
2592 For explanations on instructions sequences see TLS/NPTL ABI for m68k and
2593 ColdFire. */
2594
2595rtx
2596m68k_legitimize_tls_address (rtx orig)
2597{
2598 switch (SYMBOL_REF_TLS_MODEL (orig))
2599 {
2600 case TLS_MODEL_GLOBAL_DYNAMIC:
2601 orig = m68k_call_tls_get_addr (orig, orig, RELOC_TLSGD);
2602 break;
2603
2604 case TLS_MODEL_LOCAL_DYNAMIC:
2605 {
2606 rtx eqv;
2607 rtx a0;
2608 rtx x;
2609
2610 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2611 share the LDM result with other LD model accesses. */
2612 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2613 UNSPEC_RELOC32);
2614
2615 a0 = m68k_call_tls_get_addr (orig, eqv, RELOC_TLSLDM);
2616
2617 x = m68k_wrap_symbol (orig, RELOC_TLSLDO, a0, NULL_RTX);
2618
2619 if (can_create_pseudo_p ())
2620 x = m68k_move_to_reg (x, orig, NULL_RTX);
2621
2622 orig = x;
2623 break;
2624 }
2625
2626 case TLS_MODEL_INITIAL_EXEC:
2627 {
2628 rtx a0;
2629 rtx x;
2630
2631 a0 = m68k_call_m68k_read_tp ();
2632
2633 x = m68k_wrap_symbol_into_got_ref (orig, RELOC_TLSIE, NULL_RTX);
2634 x = gen_rtx_PLUS (Pmode, x, a0);
2635
2636 if (can_create_pseudo_p ())
2637 x = m68k_move_to_reg (x, orig, NULL_RTX);
2638
2639 orig = x;
2640 break;
2641 }
2642
2643 case TLS_MODEL_LOCAL_EXEC:
2644 {
2645 rtx a0;
2646 rtx x;
2647
2648 a0 = m68k_call_m68k_read_tp ();
2649
2650 x = m68k_wrap_symbol (orig, RELOC_TLSLE, a0, NULL_RTX);
2651
2652 if (can_create_pseudo_p ())
2653 x = m68k_move_to_reg (x, orig, NULL_RTX);
2654
2655 orig = x;
2656 break;
2657 }
2658
2659 default:
2660 gcc_unreachable ();
2661 }
2662
2663 return orig;
2664}
2665
2666/* Return true if X is a TLS symbol. */
2667
2668static bool
2669m68k_tls_symbol_p (rtx x)
2670{
2671 if (!TARGET_HAVE_TLS)
2672 return false;
2673
2674 if (GET_CODE (x) != SYMBOL_REF)
2675 return false;
2676
2677 return SYMBOL_REF_TLS_MODEL (x) != 0;
2678}
2679
75df395f
MK
2680/* If !LEGITIMATE_P, return true if X is a TLS symbol reference,
2681 though illegitimate one.
2682 If LEGITIMATE_P, return true if X is a legitimate TLS symbol reference. */
2683
2684bool
2685m68k_tls_reference_p (rtx x, bool legitimate_p)
2686{
2687 if (!TARGET_HAVE_TLS)
2688 return false;
2689
2690 if (!legitimate_p)
a5784152
RS
2691 {
2692 subrtx_var_iterator::array_type array;
2693 FOR_EACH_SUBRTX_VAR (iter, array, x, ALL)
2694 {
2695 rtx x = *iter;
2696
2697 /* Note: this is not the same as m68k_tls_symbol_p. */
2698 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0)
2699 return true;
2700
2701 /* Don't recurse into legitimate TLS references. */
2702 if (m68k_tls_reference_p (x, true))
2703 iter.skip_subrtxes ();
2704 }
2705 return false;
2706 }
75df395f
MK
2707 else
2708 {
2709 enum m68k_reloc reloc = RELOC_GOT;
2710
2711 return (m68k_unwrap_symbol_1 (x, true, &reloc) != x
2712 && TLS_RELOC_P (reloc));
2713 }
2714}
2715
79e68feb 2716\f
0ce6f9fb 2717
a0a7fbc9 2718#define USE_MOVQ(i) ((unsigned) ((i) + 128) <= 255)
0ce6f9fb 2719
bda2a571
RS
2720/* Return the type of move that should be used for integer I. */
2721
c47b0cb4
MK
2722M68K_CONST_METHOD
2723m68k_const_method (HOST_WIDE_INT i)
0ce6f9fb 2724{
0ce6f9fb
RK
2725 unsigned u;
2726
6910dd70 2727 if (USE_MOVQ (i))
0ce6f9fb 2728 return MOVQ;
24092242 2729
c16eadc7 2730 /* The ColdFire doesn't have byte or word operations. */
97c55091 2731 /* FIXME: This may not be useful for the m68060 either. */
85dbf7e2 2732 if (!TARGET_COLDFIRE)
24092242
RK
2733 {
2734 /* if -256 < N < 256 but N is not in range for a moveq
7a1929e1 2735 N^ff will be, so use moveq #N^ff, dreg; not.b dreg. */
24092242
RK
2736 if (USE_MOVQ (i ^ 0xff))
2737 return NOTB;
2738 /* Likewise, try with not.w */
2739 if (USE_MOVQ (i ^ 0xffff))
2740 return NOTW;
2741 /* This is the only value where neg.w is useful */
2742 if (i == -65408)
2743 return NEGW;
24092242 2744 }
28bad6d1 2745
5e04daf3
PB
2746 /* Try also with swap. */
2747 u = i;
2748 if (USE_MOVQ ((u >> 16) | (u << 16)))
2749 return SWAP;
2750
986e74d5 2751 if (TARGET_ISAB)
28bad6d1 2752 {
72edf146 2753 /* Try using MVZ/MVS with an immediate value to load constants. */
28bad6d1
PB
2754 if (i >= 0 && i <= 65535)
2755 return MVZ;
2756 if (i >= -32768 && i <= 32767)
2757 return MVS;
2758 }
2759
0ce6f9fb
RK
2760 /* Otherwise, use move.l */
2761 return MOVL;
2762}
2763
bda2a571
RS
2764/* Return the cost of moving constant I into a data register. */
2765
3c50106f 2766static int
bda2a571 2767const_int_cost (HOST_WIDE_INT i)
0ce6f9fb 2768{
c47b0cb4 2769 switch (m68k_const_method (i))
0ce6f9fb 2770 {
a0a7fbc9
AS
2771 case MOVQ:
2772 /* Constants between -128 and 127 are cheap due to moveq. */
2773 return 0;
2774 case MVZ:
2775 case MVS:
2776 case NOTB:
2777 case NOTW:
2778 case NEGW:
2779 case SWAP:
2780 /* Constants easily generated by moveq + not.b/not.w/neg.w/swap. */
2781 return 1;
2782 case MOVL:
2783 return 2;
2784 default:
2785 gcc_unreachable ();
0ce6f9fb
RK
2786 }
2787}
2788
3c50106f 2789static bool
68f932c4
RS
2790m68k_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2791 int *total, bool speed ATTRIBUTE_UNUSED)
3c50106f
RH
2792{
2793 switch (code)
2794 {
2795 case CONST_INT:
2796 /* Constant zero is super cheap due to clr instruction. */
2797 if (x == const0_rtx)
2798 *total = 0;
2799 else
bda2a571 2800 *total = const_int_cost (INTVAL (x));
3c50106f
RH
2801 return true;
2802
2803 case CONST:
2804 case LABEL_REF:
2805 case SYMBOL_REF:
2806 *total = 3;
2807 return true;
2808
2809 case CONST_DOUBLE:
2810 /* Make 0.0 cheaper than other floating constants to
2811 encourage creating tstsf and tstdf insns. */
2812 if (outer_code == COMPARE
2813 && (x == CONST0_RTX (SFmode) || x == CONST0_RTX (DFmode)))
2814 *total = 4;
2815 else
2816 *total = 5;
2817 return true;
2818
2819 /* These are vaguely right for a 68020. */
2820 /* The costs for long multiply have been adjusted to work properly
2821 in synth_mult on the 68020, relative to an average of the time
2822 for add and the time for shift, taking away a little more because
2823 sometimes move insns are needed. */
a0a7fbc9
AS
2824 /* div?.w is relatively cheaper on 68000 counted in COSTS_N_INSNS
2825 terms. */
fe95f2f7
JB
2826#define MULL_COST \
2827 (TUNE_68060 ? 2 \
2828 : TUNE_68040 ? 5 \
03b3e271
KH
2829 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2830 : (TUNE_CFV2 && TUNE_MAC) ? 4 \
2831 : TUNE_CFV2 ? 8 \
fe95f2f7
JB
2832 : TARGET_COLDFIRE ? 3 : 13)
2833
2834#define MULW_COST \
2835 (TUNE_68060 ? 2 \
2836 : TUNE_68040 ? 3 \
03b3e271
KH
2837 : TUNE_68000_10 ? 5 \
2838 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2839 : (TUNE_CFV2 && TUNE_MAC) ? 2 \
2840 : TUNE_CFV2 ? 8 \
fe95f2f7
JB
2841 : TARGET_COLDFIRE ? 2 : 8)
2842
2843#define DIVW_COST \
2844 (TARGET_CF_HWDIV ? 11 \
2845 : TUNE_68000_10 || TARGET_COLDFIRE ? 12 : 27)
3c50106f
RH
2846
2847 case PLUS:
2848 /* An lea costs about three times as much as a simple add. */
2849 if (GET_MODE (x) == SImode
2850 && GET_CODE (XEXP (x, 1)) == REG
2851 && GET_CODE (XEXP (x, 0)) == MULT
2852 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
2853 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2854 && (INTVAL (XEXP (XEXP (x, 0), 1)) == 2
2855 || INTVAL (XEXP (XEXP (x, 0), 1)) == 4
2856 || INTVAL (XEXP (XEXP (x, 0), 1)) == 8))
eb849993
BI
2857 {
2858 /* lea an@(dx:l:i),am */
2859 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 2 : 3);
2860 return true;
2861 }
3c50106f
RH
2862 return false;
2863
2864 case ASHIFT:
2865 case ASHIFTRT:
2866 case LSHIFTRT:
fe95f2f7 2867 if (TUNE_68060)
3c50106f
RH
2868 {
2869 *total = COSTS_N_INSNS(1);
2870 return true;
2871 }
fe95f2f7 2872 if (TUNE_68000_10)
3c50106f
RH
2873 {
2874 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
2875 {
2876 if (INTVAL (XEXP (x, 1)) < 16)
2877 *total = COSTS_N_INSNS (2) + INTVAL (XEXP (x, 1)) / 2;
2878 else
2879 /* We're using clrw + swap for these cases. */
2880 *total = COSTS_N_INSNS (4) + (INTVAL (XEXP (x, 1)) - 16) / 2;
2881 }
2882 else
a0a7fbc9 2883 *total = COSTS_N_INSNS (10); /* Worst case. */
3c50106f
RH
2884 return true;
2885 }
2886 /* A shift by a big integer takes an extra instruction. */
2887 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2888 && (INTVAL (XEXP (x, 1)) == 16))
2889 {
2890 *total = COSTS_N_INSNS (2); /* clrw;swap */
2891 return true;
2892 }
2893 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2894 && !(INTVAL (XEXP (x, 1)) > 0
2895 && INTVAL (XEXP (x, 1)) <= 8))
2896 {
eb849993 2897 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 1 : 3); /* lsr #i,dn */
3c50106f
RH
2898 return true;
2899 }
2900 return false;
2901
2902 case MULT:
2903 if ((GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
2904 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
2905 && GET_MODE (x) == SImode)
2906 *total = COSTS_N_INSNS (MULW_COST);
2907 else if (GET_MODE (x) == QImode || GET_MODE (x) == HImode)
2908 *total = COSTS_N_INSNS (MULW_COST);
2909 else
2910 *total = COSTS_N_INSNS (MULL_COST);
2911 return true;
2912
2913 case DIV:
2914 case UDIV:
2915 case MOD:
2916 case UMOD:
2917 if (GET_MODE (x) == QImode || GET_MODE (x) == HImode)
2918 *total = COSTS_N_INSNS (DIVW_COST); /* div.w */
eb849993
BI
2919 else if (TARGET_CF_HWDIV)
2920 *total = COSTS_N_INSNS (18);
3c50106f
RH
2921 else
2922 *total = COSTS_N_INSNS (43); /* div.l */
2923 return true;
2924
f90b7a5a
PB
2925 case ZERO_EXTRACT:
2926 if (outer_code == COMPARE)
2927 *total = 0;
2928 return false;
2929
3c50106f
RH
2930 default:
2931 return false;
2932 }
2933}
2934
88512ba0 2935/* Return an instruction to move CONST_INT OPERANDS[1] into data register
bda2a571
RS
2936 OPERANDS[0]. */
2937
2938static const char *
8a4a2253 2939output_move_const_into_data_reg (rtx *operands)
0ce6f9fb 2940{
bda2a571 2941 HOST_WIDE_INT i;
0ce6f9fb
RK
2942
2943 i = INTVAL (operands[1]);
c47b0cb4 2944 switch (m68k_const_method (i))
0ce6f9fb 2945 {
28bad6d1 2946 case MVZ:
28bad6d1 2947 return "mvzw %1,%0";
1cbae84f
PB
2948 case MVS:
2949 return "mvsw %1,%0";
a0a7fbc9 2950 case MOVQ:
0ce6f9fb 2951 return "moveq %1,%0";
a0a7fbc9 2952 case NOTB:
66e07510 2953 CC_STATUS_INIT;
1d8eaa6b 2954 operands[1] = GEN_INT (i ^ 0xff);
0ce6f9fb 2955 return "moveq %1,%0\n\tnot%.b %0";
a0a7fbc9 2956 case NOTW:
66e07510 2957 CC_STATUS_INIT;
1d8eaa6b 2958 operands[1] = GEN_INT (i ^ 0xffff);
0ce6f9fb 2959 return "moveq %1,%0\n\tnot%.w %0";
a0a7fbc9 2960 case NEGW:
66e07510 2961 CC_STATUS_INIT;
3b4b85c9 2962 return "moveq #-128,%0\n\tneg%.w %0";
a0a7fbc9 2963 case SWAP:
0ce6f9fb
RK
2964 {
2965 unsigned u = i;
2966
1d8eaa6b 2967 operands[1] = GEN_INT ((u << 16) | (u >> 16));
0ce6f9fb 2968 return "moveq %1,%0\n\tswap %0";
0ce6f9fb 2969 }
a0a7fbc9 2970 case MOVL:
bda2a571 2971 return "move%.l %1,%0";
a0a7fbc9 2972 default:
bda2a571 2973 gcc_unreachable ();
0ce6f9fb
RK
2974 }
2975}
2976
bda2a571 2977/* Return true if I can be handled by ISA B's mov3q instruction. */
5e04daf3 2978
bda2a571
RS
2979bool
2980valid_mov3q_const (HOST_WIDE_INT i)
2981{
2982 return TARGET_ISAB && (i == -1 || IN_RANGE (i, 1, 7));
5e04daf3
PB
2983}
2984
bda2a571
RS
2985/* Return an instruction to move CONST_INT OPERANDS[1] into OPERANDS[0].
2986 I is the value of OPERANDS[1]. */
5e04daf3 2987
bda2a571 2988static const char *
8a4a2253 2989output_move_simode_const (rtx *operands)
02ed0c07 2990{
bda2a571
RS
2991 rtx dest;
2992 HOST_WIDE_INT src;
2993
2994 dest = operands[0];
2995 src = INTVAL (operands[1]);
2996 if (src == 0
2997 && (DATA_REG_P (dest) || MEM_P (dest))
3197c489
RS
2998 /* clr insns on 68000 read before writing. */
2999 && ((TARGET_68010 || TARGET_COLDFIRE)
bda2a571 3000 || !(MEM_P (dest) && MEM_VOLATILE_P (dest))))
02ed0c07 3001 return "clr%.l %0";
bda2a571 3002 else if (GET_MODE (dest) == SImode && valid_mov3q_const (src))
a0a7fbc9 3003 return "mov3q%.l %1,%0";
bda2a571 3004 else if (src == 0 && ADDRESS_REG_P (dest))
38198304 3005 return "sub%.l %0,%0";
bda2a571 3006 else if (DATA_REG_P (dest))
02ed0c07 3007 return output_move_const_into_data_reg (operands);
bda2a571 3008 else if (ADDRESS_REG_P (dest) && IN_RANGE (src, -0x8000, 0x7fff))
5e04daf3 3009 {
bda2a571 3010 if (valid_mov3q_const (src))
5e04daf3
PB
3011 return "mov3q%.l %1,%0";
3012 return "move%.w %1,%0";
3013 }
bda2a571
RS
3014 else if (MEM_P (dest)
3015 && GET_CODE (XEXP (dest, 0)) == PRE_DEC
3016 && REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
3017 && IN_RANGE (src, -0x8000, 0x7fff))
5e04daf3 3018 {
bda2a571 3019 if (valid_mov3q_const (src))
5e04daf3
PB
3020 return "mov3q%.l %1,%-";
3021 return "pea %a1";
3022 }
02ed0c07
RK
3023 return "move%.l %1,%0";
3024}
3025
5505f548 3026const char *
8a4a2253 3027output_move_simode (rtx *operands)
f4e80198
RK
3028{
3029 if (GET_CODE (operands[1]) == CONST_INT)
3030 return output_move_simode_const (operands);
3031 else if ((GET_CODE (operands[1]) == SYMBOL_REF
3032 || GET_CODE (operands[1]) == CONST)
3033 && push_operand (operands[0], SImode))
3034 return "pea %a1";
3035 else if ((GET_CODE (operands[1]) == SYMBOL_REF
3036 || GET_CODE (operands[1]) == CONST)
3037 && ADDRESS_REG_P (operands[0]))
3038 return "lea %a1,%0";
3039 return "move%.l %1,%0";
3040}
3041
5505f548 3042const char *
8a4a2253 3043output_move_himode (rtx *operands)
f4e80198
RK
3044{
3045 if (GET_CODE (operands[1]) == CONST_INT)
3046 {
3047 if (operands[1] == const0_rtx
3048 && (DATA_REG_P (operands[0])
3049 || GET_CODE (operands[0]) == MEM)
3197c489
RS
3050 /* clr insns on 68000 read before writing. */
3051 && ((TARGET_68010 || TARGET_COLDFIRE)
f4e80198
RK
3052 || !(GET_CODE (operands[0]) == MEM
3053 && MEM_VOLATILE_P (operands[0]))))
3054 return "clr%.w %0";
38198304
AS
3055 else if (operands[1] == const0_rtx
3056 && ADDRESS_REG_P (operands[0]))
3057 return "sub%.l %0,%0";
f4e80198
RK
3058 else if (DATA_REG_P (operands[0])
3059 && INTVAL (operands[1]) < 128
3060 && INTVAL (operands[1]) >= -128)
a0a7fbc9 3061 return "moveq %1,%0";
f4e80198
RK
3062 else if (INTVAL (operands[1]) < 0x8000
3063 && INTVAL (operands[1]) >= -0x8000)
3064 return "move%.w %1,%0";
3065 }
3066 else if (CONSTANT_P (operands[1]))
3067 return "move%.l %1,%0";
f4e80198
RK
3068 return "move%.w %1,%0";
3069}
3070
5505f548 3071const char *
8a4a2253 3072output_move_qimode (rtx *operands)
f4e80198 3073{
102701ff 3074 /* 68k family always modifies the stack pointer by at least 2, even for
c16eadc7 3075 byte pushes. The 5200 (ColdFire) does not do this. */
4761e388 3076
a0a7fbc9 3077 /* This case is generated by pushqi1 pattern now. */
4761e388
NS
3078 gcc_assert (!(GET_CODE (operands[0]) == MEM
3079 && GET_CODE (XEXP (operands[0], 0)) == PRE_DEC
3080 && XEXP (XEXP (operands[0], 0), 0) == stack_pointer_rtx
3081 && ! ADDRESS_REG_P (operands[1])
3082 && ! TARGET_COLDFIRE));
f4e80198 3083
3197c489 3084 /* clr and st insns on 68000 read before writing. */
f4e80198 3085 if (!ADDRESS_REG_P (operands[0])
3197c489 3086 && ((TARGET_68010 || TARGET_COLDFIRE)
f4e80198
RK
3087 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3088 {
3089 if (operands[1] == const0_rtx)
3090 return "clr%.b %0";
9425fb04 3091 if ((!TARGET_COLDFIRE || DATA_REG_P (operands[0]))
f4e80198
RK
3092 && GET_CODE (operands[1]) == CONST_INT
3093 && (INTVAL (operands[1]) & 255) == 255)
3094 {
3095 CC_STATUS_INIT;
3096 return "st %0";
3097 }
3098 }
3099 if (GET_CODE (operands[1]) == CONST_INT
3100 && DATA_REG_P (operands[0])
3101 && INTVAL (operands[1]) < 128
3102 && INTVAL (operands[1]) >= -128)
a0a7fbc9 3103 return "moveq %1,%0";
38198304
AS
3104 if (operands[1] == const0_rtx && ADDRESS_REG_P (operands[0]))
3105 return "sub%.l %0,%0";
f4e80198
RK
3106 if (GET_CODE (operands[1]) != CONST_INT && CONSTANT_P (operands[1]))
3107 return "move%.l %1,%0";
c16eadc7 3108 /* 68k family (including the 5200 ColdFire) does not support byte moves to
37834fc8
JL
3109 from address registers. */
3110 if (ADDRESS_REG_P (operands[0]) || ADDRESS_REG_P (operands[1]))
f4e80198
RK
3111 return "move%.w %1,%0";
3112 return "move%.b %1,%0";
3113}
3114
5505f548 3115const char *
8a4a2253 3116output_move_stricthi (rtx *operands)
9b55bf04
RK
3117{
3118 if (operands[1] == const0_rtx
3197c489
RS
3119 /* clr insns on 68000 read before writing. */
3120 && ((TARGET_68010 || TARGET_COLDFIRE)
9b55bf04
RK
3121 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3122 return "clr%.w %0";
3123 return "move%.w %1,%0";
3124}
3125
5505f548 3126const char *
8a4a2253 3127output_move_strictqi (rtx *operands)
9b55bf04
RK
3128{
3129 if (operands[1] == const0_rtx
3197c489
RS
3130 /* clr insns on 68000 read before writing. */
3131 && ((TARGET_68010 || TARGET_COLDFIRE)
9b55bf04
RK
3132 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3133 return "clr%.b %0";
3134 return "move%.b %1,%0";
3135}
3136
79e68feb
RS
3137/* Return the best assembler insn template
3138 for moving operands[1] into operands[0] as a fullword. */
3139
5505f548 3140static const char *
8a4a2253 3141singlemove_string (rtx *operands)
79e68feb 3142{
02ed0c07
RK
3143 if (GET_CODE (operands[1]) == CONST_INT)
3144 return output_move_simode_const (operands);
3145 return "move%.l %1,%0";
79e68feb
RS
3146}
3147
2505bc97 3148
c47b0cb4
MK
3149/* Output assembler or rtl code to perform a doubleword move insn
3150 with operands OPERANDS.
3151 Pointers to 3 helper functions should be specified:
3152 HANDLE_REG_ADJUST to adjust a register by a small value,
3153 HANDLE_COMPADR to compute an address and
3154 HANDLE_MOVSI to move 4 bytes. */
79e68feb 3155
c47b0cb4
MK
3156static void
3157handle_move_double (rtx operands[2],
3158 void (*handle_reg_adjust) (rtx, int),
3159 void (*handle_compadr) (rtx [2]),
3160 void (*handle_movsi) (rtx [2]))
79e68feb 3161{
2505bc97
RS
3162 enum
3163 {
3164 REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP
3165 } optype0, optype1;
79e68feb 3166 rtx latehalf[2];
2505bc97 3167 rtx middlehalf[2];
7f98eeb6 3168 rtx xops[2];
79e68feb 3169 rtx addreg0 = 0, addreg1 = 0;
7f98eeb6 3170 int dest_overlapped_low = 0;
184916bc 3171 int size = GET_MODE_SIZE (GET_MODE (operands[0]));
2505bc97
RS
3172
3173 middlehalf[0] = 0;
3174 middlehalf[1] = 0;
79e68feb
RS
3175
3176 /* First classify both operands. */
3177
3178 if (REG_P (operands[0]))
3179 optype0 = REGOP;
3180 else if (offsettable_memref_p (operands[0]))
3181 optype0 = OFFSOP;
3182 else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
3183 optype0 = POPOP;
3184 else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
3185 optype0 = PUSHOP;
3186 else if (GET_CODE (operands[0]) == MEM)
3187 optype0 = MEMOP;
3188 else
3189 optype0 = RNDOP;
3190
3191 if (REG_P (operands[1]))
3192 optype1 = REGOP;
3193 else if (CONSTANT_P (operands[1]))
3194 optype1 = CNSTOP;
3195 else if (offsettable_memref_p (operands[1]))
3196 optype1 = OFFSOP;
3197 else if (GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3198 optype1 = POPOP;
3199 else if (GET_CODE (XEXP (operands[1], 0)) == PRE_DEC)
3200 optype1 = PUSHOP;
3201 else if (GET_CODE (operands[1]) == MEM)
3202 optype1 = MEMOP;
3203 else
3204 optype1 = RNDOP;
3205
4761e388
NS
3206 /* Check for the cases that the operand constraints are not supposed
3207 to allow to happen. Generating code for these cases is
3208 painful. */
3209 gcc_assert (optype0 != RNDOP && optype1 != RNDOP);
79e68feb
RS
3210
3211 /* If one operand is decrementing and one is incrementing
3212 decrement the former register explicitly
3213 and change that operand into ordinary indexing. */
3214
3215 if (optype0 == PUSHOP && optype1 == POPOP)
3216 {
3217 operands[0] = XEXP (XEXP (operands[0], 0), 0);
c47b0cb4
MK
3218
3219 handle_reg_adjust (operands[0], -size);
3220
2505bc97 3221 if (GET_MODE (operands[1]) == XFmode)
1d8eaa6b 3222 operands[0] = gen_rtx_MEM (XFmode, operands[0]);
2505bc97 3223 else if (GET_MODE (operands[0]) == DFmode)
1d8eaa6b 3224 operands[0] = gen_rtx_MEM (DFmode, operands[0]);
2505bc97 3225 else
1d8eaa6b 3226 operands[0] = gen_rtx_MEM (DImode, operands[0]);
79e68feb
RS
3227 optype0 = OFFSOP;
3228 }
3229 if (optype0 == POPOP && optype1 == PUSHOP)
3230 {
3231 operands[1] = XEXP (XEXP (operands[1], 0), 0);
c47b0cb4
MK
3232
3233 handle_reg_adjust (operands[1], -size);
3234
2505bc97 3235 if (GET_MODE (operands[1]) == XFmode)
1d8eaa6b 3236 operands[1] = gen_rtx_MEM (XFmode, operands[1]);
2505bc97 3237 else if (GET_MODE (operands[1]) == DFmode)
1d8eaa6b 3238 operands[1] = gen_rtx_MEM (DFmode, operands[1]);
2505bc97 3239 else
1d8eaa6b 3240 operands[1] = gen_rtx_MEM (DImode, operands[1]);
79e68feb
RS
3241 optype1 = OFFSOP;
3242 }
3243
3244 /* If an operand is an unoffsettable memory ref, find a register
3245 we can increment temporarily to make it refer to the second word. */
3246
3247 if (optype0 == MEMOP)
3248 addreg0 = find_addr_reg (XEXP (operands[0], 0));
3249
3250 if (optype1 == MEMOP)
3251 addreg1 = find_addr_reg (XEXP (operands[1], 0));
3252
3253 /* Ok, we can do one word at a time.
3254 Normally we do the low-numbered word first,
3255 but if either operand is autodecrementing then we
3256 do the high-numbered word first.
3257
3258 In either case, set up in LATEHALF the operands to use
3259 for the high-numbered word and in some cases alter the
3260 operands in OPERANDS to be suitable for the low-numbered word. */
3261
2505bc97
RS
3262 if (size == 12)
3263 {
3264 if (optype0 == REGOP)
3265 {
1d8eaa6b
AS
3266 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 2);
3267 middlehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2505bc97
RS
3268 }
3269 else if (optype0 == OFFSOP)
3270 {
b72f00af
RK
3271 middlehalf[0] = adjust_address (operands[0], SImode, 4);
3272 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
2505bc97
RS
3273 }
3274 else
3275 {
c47b0cb4
MK
3276 middlehalf[0] = adjust_address (operands[0], SImode, 0);
3277 latehalf[0] = adjust_address (operands[0], SImode, 0);
2505bc97
RS
3278 }
3279
3280 if (optype1 == REGOP)
3281 {
1d8eaa6b
AS
3282 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2);
3283 middlehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2505bc97
RS
3284 }
3285 else if (optype1 == OFFSOP)
3286 {
b72f00af
RK
3287 middlehalf[1] = adjust_address (operands[1], SImode, 4);
3288 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
2505bc97
RS
3289 }
3290 else if (optype1 == CNSTOP)
3291 {
3292 if (GET_CODE (operands[1]) == CONST_DOUBLE)
3293 {
3294 REAL_VALUE_TYPE r;
3295 long l[3];
3296
3297 REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
3298 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
3299 operands[1] = GEN_INT (l[0]);
3300 middlehalf[1] = GEN_INT (l[1]);
3301 latehalf[1] = GEN_INT (l[2]);
3302 }
4761e388 3303 else
2505bc97 3304 {
4761e388
NS
3305 /* No non-CONST_DOUBLE constant should ever appear
3306 here. */
3307 gcc_assert (!CONSTANT_P (operands[1]));
2505bc97
RS
3308 }
3309 }
3310 else
3311 {
c47b0cb4
MK
3312 middlehalf[1] = adjust_address (operands[1], SImode, 0);
3313 latehalf[1] = adjust_address (operands[1], SImode, 0);
2505bc97
RS
3314 }
3315 }
79e68feb 3316 else
2505bc97
RS
3317 /* size is not 12: */
3318 {
3319 if (optype0 == REGOP)
1d8eaa6b 3320 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2505bc97 3321 else if (optype0 == OFFSOP)
b72f00af 3322 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
2505bc97 3323 else
c47b0cb4 3324 latehalf[0] = adjust_address (operands[0], SImode, 0);
2505bc97
RS
3325
3326 if (optype1 == REGOP)
1d8eaa6b 3327 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2505bc97 3328 else if (optype1 == OFFSOP)
b72f00af 3329 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
2505bc97
RS
3330 else if (optype1 == CNSTOP)
3331 split_double (operands[1], &operands[1], &latehalf[1]);
3332 else
c47b0cb4 3333 latehalf[1] = adjust_address (operands[1], SImode, 0);
2505bc97 3334 }
79e68feb 3335
e864837a
AS
3336 /* If insn is effectively movd N(REG),-(REG) then we will do the high
3337 word first. We should use the adjusted operand 1 (which is N+4(REG))
3338 for the low word as well, to compensate for the first decrement of
3339 REG. */
79e68feb 3340 if (optype0 == PUSHOP
e864837a 3341 && reg_overlap_mentioned_p (XEXP (XEXP (operands[0], 0), 0), operands[1]))
c88aeaf8 3342 operands[1] = middlehalf[1] = latehalf[1];
79e68feb 3343
7f98eeb6
RS
3344 /* For (set (reg:DI N) (mem:DI ... (reg:SI N) ...)),
3345 if the upper part of reg N does not appear in the MEM, arrange to
3346 emit the move late-half first. Otherwise, compute the MEM address
3347 into the upper part of N and use that as a pointer to the memory
3348 operand. */
3349 if (optype0 == REGOP
3350 && (optype1 == OFFSOP || optype1 == MEMOP))
3351 {
1d8eaa6b 3352 rtx testlow = gen_rtx_REG (SImode, REGNO (operands[0]));
3a58400f
RS
3353
3354 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
d7e8d581 3355 && reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
7f98eeb6
RS
3356 {
3357 /* If both halves of dest are used in the src memory address,
3a58400f
RS
3358 compute the address into latehalf of dest.
3359 Note that this can't happen if the dest is two data regs. */
4761e388 3360 compadr:
7f98eeb6
RS
3361 xops[0] = latehalf[0];
3362 xops[1] = XEXP (operands[1], 0);
c47b0cb4
MK
3363
3364 handle_compadr (xops);
3365 if (GET_MODE (operands[1]) == XFmode)
7f98eeb6 3366 {
1d8eaa6b 3367 operands[1] = gen_rtx_MEM (XFmode, latehalf[0]);
b72f00af
RK
3368 middlehalf[1] = adjust_address (operands[1], DImode, size - 8);
3369 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
7f98eeb6
RS
3370 }
3371 else
3372 {
1d8eaa6b 3373 operands[1] = gen_rtx_MEM (DImode, latehalf[0]);
b72f00af 3374 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
7f98eeb6
RS
3375 }
3376 }
3377 else if (size == 12
d7e8d581
RS
3378 && reg_overlap_mentioned_p (middlehalf[0],
3379 XEXP (operands[1], 0)))
7f98eeb6 3380 {
3a58400f
RS
3381 /* Check for two regs used by both source and dest.
3382 Note that this can't happen if the dest is all data regs.
3383 It can happen if the dest is d6, d7, a0.
3384 But in that case, latehalf is an addr reg, so
3385 the code at compadr does ok. */
3386
3387 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
d7e8d581
RS
3388 || reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
3389 goto compadr;
7f98eeb6
RS
3390
3391 /* JRV says this can't happen: */
4761e388 3392 gcc_assert (!addreg0 && !addreg1);
7f98eeb6 3393
7a1929e1 3394 /* Only the middle reg conflicts; simply put it last. */
c47b0cb4
MK
3395 handle_movsi (operands);
3396 handle_movsi (latehalf);
3397 handle_movsi (middlehalf);
3398
3399 return;
7f98eeb6 3400 }
2fb8a81d 3401 else if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0)))
7f98eeb6
RS
3402 /* If the low half of dest is mentioned in the source memory
3403 address, the arrange to emit the move late half first. */
3404 dest_overlapped_low = 1;
3405 }
3406
79e68feb
RS
3407 /* If one or both operands autodecrementing,
3408 do the two words, high-numbered first. */
3409
3410 /* Likewise, the first move would clobber the source of the second one,
3411 do them in the other order. This happens only for registers;
3412 such overlap can't happen in memory unless the user explicitly
3413 sets it up, and that is an undefined circumstance. */
3414
3415 if (optype0 == PUSHOP || optype1 == PUSHOP
3416 || (optype0 == REGOP && optype1 == REGOP
2505bc97 3417 && ((middlehalf[1] && REGNO (operands[0]) == REGNO (middlehalf[1]))
7f98eeb6
RS
3418 || REGNO (operands[0]) == REGNO (latehalf[1])))
3419 || dest_overlapped_low)
79e68feb
RS
3420 {
3421 /* Make any unoffsettable addresses point at high-numbered word. */
3422 if (addreg0)
c47b0cb4 3423 handle_reg_adjust (addreg0, size - 4);
79e68feb 3424 if (addreg1)
c47b0cb4 3425 handle_reg_adjust (addreg1, size - 4);
79e68feb
RS
3426
3427 /* Do that word. */
c47b0cb4 3428 handle_movsi (latehalf);
79e68feb
RS
3429
3430 /* Undo the adds we just did. */
3431 if (addreg0)
c47b0cb4 3432 handle_reg_adjust (addreg0, -4);
79e68feb 3433 if (addreg1)
c47b0cb4 3434 handle_reg_adjust (addreg1, -4);
79e68feb 3435
2505bc97
RS
3436 if (size == 12)
3437 {
c47b0cb4
MK
3438 handle_movsi (middlehalf);
3439
2505bc97 3440 if (addreg0)
c47b0cb4 3441 handle_reg_adjust (addreg0, -4);
2505bc97 3442 if (addreg1)
c47b0cb4 3443 handle_reg_adjust (addreg1, -4);
2505bc97
RS
3444 }
3445
79e68feb 3446 /* Do low-numbered word. */
c47b0cb4
MK
3447
3448 handle_movsi (operands);
3449 return;
79e68feb
RS
3450 }
3451
3452 /* Normal case: do the two words, low-numbered first. */
3453
dcac2e64 3454 m68k_final_prescan_insn (NULL, operands, 2);
c47b0cb4 3455 handle_movsi (operands);
79e68feb 3456
2505bc97
RS
3457 /* Do the middle one of the three words for long double */
3458 if (size == 12)
3459 {
3460 if (addreg0)
c47b0cb4 3461 handle_reg_adjust (addreg0, 4);
2505bc97 3462 if (addreg1)
c47b0cb4 3463 handle_reg_adjust (addreg1, 4);
2505bc97 3464
dcac2e64 3465 m68k_final_prescan_insn (NULL, middlehalf, 2);
c47b0cb4 3466 handle_movsi (middlehalf);
2505bc97
RS
3467 }
3468
79e68feb
RS
3469 /* Make any unoffsettable addresses point at high-numbered word. */
3470 if (addreg0)
c47b0cb4 3471 handle_reg_adjust (addreg0, 4);
79e68feb 3472 if (addreg1)
c47b0cb4 3473 handle_reg_adjust (addreg1, 4);
79e68feb
RS
3474
3475 /* Do that word. */
dcac2e64 3476 m68k_final_prescan_insn (NULL, latehalf, 2);
c47b0cb4 3477 handle_movsi (latehalf);
79e68feb
RS
3478
3479 /* Undo the adds we just did. */
3480 if (addreg0)
c47b0cb4
MK
3481 handle_reg_adjust (addreg0, -(size - 4));
3482 if (addreg1)
3483 handle_reg_adjust (addreg1, -(size - 4));
3484
3485 return;
3486}
3487
3488/* Output assembler code to adjust REG by N. */
3489static void
3490output_reg_adjust (rtx reg, int n)
3491{
3492 const char *s;
3493
3494 gcc_assert (GET_MODE (reg) == SImode
3495 && -12 <= n && n != 0 && n <= 12);
3496
3497 switch (n)
2505bc97 3498 {
c47b0cb4
MK
3499 case 12:
3500 s = "add%.l #12,%0";
3501 break;
3502
3503 case 8:
3504 s = "addq%.l #8,%0";
3505 break;
3506
3507 case 4:
3508 s = "addq%.l #4,%0";
3509 break;
3510
3511 case -12:
3512 s = "sub%.l #12,%0";
3513 break;
3514
3515 case -8:
3516 s = "subq%.l #8,%0";
3517 break;
3518
3519 case -4:
3520 s = "subq%.l #4,%0";
3521 break;
3522
3523 default:
3524 gcc_unreachable ();
3525 s = NULL;
2505bc97 3526 }
c47b0cb4
MK
3527
3528 output_asm_insn (s, &reg);
3529}
3530
3531/* Emit rtl code to adjust REG by N. */
3532static void
3533emit_reg_adjust (rtx reg1, int n)
3534{
3535 rtx reg2;
3536
3537 gcc_assert (GET_MODE (reg1) == SImode
3538 && -12 <= n && n != 0 && n <= 12);
3539
3540 reg1 = copy_rtx (reg1);
3541 reg2 = copy_rtx (reg1);
3542
3543 if (n < 0)
3544 emit_insn (gen_subsi3 (reg1, reg2, GEN_INT (-n)));
3545 else if (n > 0)
3546 emit_insn (gen_addsi3 (reg1, reg2, GEN_INT (n)));
3547 else
3548 gcc_unreachable ();
3549}
3550
3551/* Output assembler to load address OPERANDS[0] to register OPERANDS[1]. */
3552static void
3553output_compadr (rtx operands[2])
3554{
3555 output_asm_insn ("lea %a1,%0", operands);
3556}
3557
3558/* Output the best assembler insn for moving operands[1] into operands[0]
3559 as a fullword. */
3560static void
3561output_movsi (rtx operands[2])
3562{
3563 output_asm_insn (singlemove_string (operands), operands);
3564}
3565
3566/* Copy OP and change its mode to MODE. */
3567static rtx
ef4bddc2 3568copy_operand (rtx op, machine_mode mode)
c47b0cb4
MK
3569{
3570 /* ??? This looks really ugly. There must be a better way
3571 to change a mode on the operand. */
3572 if (GET_MODE (op) != VOIDmode)
2505bc97 3573 {
c47b0cb4
MK
3574 if (REG_P (op))
3575 op = gen_rtx_REG (mode, REGNO (op));
2505bc97 3576 else
c47b0cb4
MK
3577 {
3578 op = copy_rtx (op);
3579 PUT_MODE (op, mode);
3580 }
2505bc97 3581 }
79e68feb 3582
c47b0cb4
MK
3583 return op;
3584}
3585
3586/* Emit rtl code for moving operands[1] into operands[0] as a fullword. */
3587static void
3588emit_movsi (rtx operands[2])
3589{
3590 operands[0] = copy_operand (operands[0], SImode);
3591 operands[1] = copy_operand (operands[1], SImode);
3592
3593 emit_insn (gen_movsi (operands[0], operands[1]));
3594}
3595
3596/* Output assembler code to perform a doubleword move insn
3597 with operands OPERANDS. */
3598const char *
3599output_move_double (rtx *operands)
3600{
3601 handle_move_double (operands,
3602 output_reg_adjust, output_compadr, output_movsi);
3603
79e68feb
RS
3604 return "";
3605}
3606
c47b0cb4
MK
3607/* Output rtl code to perform a doubleword move insn
3608 with operands OPERANDS. */
3609void
3610m68k_emit_move_double (rtx operands[2])
3611{
3612 handle_move_double (operands, emit_reg_adjust, emit_movsi, emit_movsi);
3613}
dcc21c4c
PB
3614
3615/* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
3616 new rtx with the correct mode. */
3617
3618static rtx
ef4bddc2 3619force_mode (machine_mode mode, rtx orig)
dcc21c4c
PB
3620{
3621 if (mode == GET_MODE (orig))
3622 return orig;
3623
3624 if (REGNO (orig) >= FIRST_PSEUDO_REGISTER)
3625 abort ();
3626
3627 return gen_rtx_REG (mode, REGNO (orig));
3628}
3629
3630static int
ef4bddc2 3631fp_reg_operand (rtx op, machine_mode mode ATTRIBUTE_UNUSED)
dcc21c4c
PB
3632{
3633 return reg_renumber && FP_REG_P (op);
3634}
3635
3636/* Emit insns to move operands[1] into operands[0].
3637
3638 Return 1 if we have written out everything that needs to be done to
3639 do the move. Otherwise, return 0 and the caller will emit the move
3640 normally.
3641
3642 Note SCRATCH_REG may not be in the proper mode depending on how it
c0220ea4 3643 will be used. This routine is responsible for creating a new copy
dcc21c4c
PB
3644 of SCRATCH_REG in the proper mode. */
3645
3646int
ef4bddc2 3647emit_move_sequence (rtx *operands, machine_mode mode, rtx scratch_reg)
dcc21c4c
PB
3648{
3649 register rtx operand0 = operands[0];
3650 register rtx operand1 = operands[1];
3651 register rtx tem;
3652
3653 if (scratch_reg
3654 && reload_in_progress && GET_CODE (operand0) == REG
3655 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
f2034d06 3656 operand0 = reg_equiv_mem (REGNO (operand0));
dcc21c4c
PB
3657 else if (scratch_reg
3658 && reload_in_progress && GET_CODE (operand0) == SUBREG
3659 && GET_CODE (SUBREG_REG (operand0)) == REG
3660 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
3661 {
3662 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3663 the code which tracks sets/uses for delete_output_reload. */
3664 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
f2034d06 3665 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
dcc21c4c 3666 SUBREG_BYTE (operand0));
55a2c322 3667 operand0 = alter_subreg (&temp, true);
dcc21c4c
PB
3668 }
3669
3670 if (scratch_reg
3671 && reload_in_progress && GET_CODE (operand1) == REG
3672 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
f2034d06 3673 operand1 = reg_equiv_mem (REGNO (operand1));
dcc21c4c
PB
3674 else if (scratch_reg
3675 && reload_in_progress && GET_CODE (operand1) == SUBREG
3676 && GET_CODE (SUBREG_REG (operand1)) == REG
3677 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
3678 {
3679 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3680 the code which tracks sets/uses for delete_output_reload. */
3681 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
f2034d06 3682 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
dcc21c4c 3683 SUBREG_BYTE (operand1));
55a2c322 3684 operand1 = alter_subreg (&temp, true);
dcc21c4c
PB
3685 }
3686
3687 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
3688 && ((tem = find_replacement (&XEXP (operand0, 0)))
3689 != XEXP (operand0, 0)))
3690 operand0 = gen_rtx_MEM (GET_MODE (operand0), tem);
3691 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
3692 && ((tem = find_replacement (&XEXP (operand1, 0)))
3693 != XEXP (operand1, 0)))
3694 operand1 = gen_rtx_MEM (GET_MODE (operand1), tem);
3695
3696 /* Handle secondary reloads for loads/stores of FP registers where
3697 the address is symbolic by using the scratch register */
3698 if (fp_reg_operand (operand0, mode)
3699 && ((GET_CODE (operand1) == MEM
3700 && ! memory_address_p (DFmode, XEXP (operand1, 0)))
3701 || ((GET_CODE (operand1) == SUBREG
3702 && GET_CODE (XEXP (operand1, 0)) == MEM
3703 && !memory_address_p (DFmode, XEXP (XEXP (operand1, 0), 0)))))
3704 && scratch_reg)
3705 {
3706 if (GET_CODE (operand1) == SUBREG)
3707 operand1 = XEXP (operand1, 0);
3708
3709 /* SCRATCH_REG will hold an address. We want
3710 it in SImode regardless of what mode it was originally given
3711 to us. */
3712 scratch_reg = force_mode (SImode, scratch_reg);
3713
3714 /* D might not fit in 14 bits either; for such cases load D into
3715 scratch reg. */
3716 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
3717 {
3718 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
3719 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
3720 Pmode,
3721 XEXP (XEXP (operand1, 0), 0),
3722 scratch_reg));
3723 }
3724 else
3725 emit_move_insn (scratch_reg, XEXP (operand1, 0));
f7df4a84 3726 emit_insn (gen_rtx_SET (operand0, gen_rtx_MEM (mode, scratch_reg)));
dcc21c4c
PB
3727 return 1;
3728 }
3729 else if (fp_reg_operand (operand1, mode)
3730 && ((GET_CODE (operand0) == MEM
3731 && ! memory_address_p (DFmode, XEXP (operand0, 0)))
3732 || ((GET_CODE (operand0) == SUBREG)
3733 && GET_CODE (XEXP (operand0, 0)) == MEM
3734 && !memory_address_p (DFmode, XEXP (XEXP (operand0, 0), 0))))
3735 && scratch_reg)
3736 {
3737 if (GET_CODE (operand0) == SUBREG)
3738 operand0 = XEXP (operand0, 0);
3739
3740 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3741 it in SIMODE regardless of what mode it was originally given
3742 to us. */
3743 scratch_reg = force_mode (SImode, scratch_reg);
3744
3745 /* D might not fit in 14 bits either; for such cases load D into
3746 scratch reg. */
3747 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
3748 {
3749 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
3750 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
3751 0)),
3752 Pmode,
3753 XEXP (XEXP (operand0, 0),
3754 0),
3755 scratch_reg));
3756 }
3757 else
3758 emit_move_insn (scratch_reg, XEXP (operand0, 0));
f7df4a84 3759 emit_insn (gen_rtx_SET (gen_rtx_MEM (mode, scratch_reg), operand1));
dcc21c4c
PB
3760 return 1;
3761 }
3762 /* Handle secondary reloads for loads of FP registers from constant
3763 expressions by forcing the constant into memory.
3764
3765 use scratch_reg to hold the address of the memory location.
3766
3767 The proper fix is to change PREFERRED_RELOAD_CLASS to return
3768 NO_REGS when presented with a const_int and an register class
3769 containing only FP registers. Doing so unfortunately creates
3770 more problems than it solves. Fix this for 2.5. */
3771 else if (fp_reg_operand (operand0, mode)
3772 && CONSTANT_P (operand1)
3773 && scratch_reg)
3774 {
3775 rtx xoperands[2];
3776
3777 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3778 it in SIMODE regardless of what mode it was originally given
3779 to us. */
3780 scratch_reg = force_mode (SImode, scratch_reg);
3781
3782 /* Force the constant into memory and put the address of the
3783 memory location into scratch_reg. */
3784 xoperands[0] = scratch_reg;
3785 xoperands[1] = XEXP (force_const_mem (mode, operand1), 0);
f7df4a84 3786 emit_insn (gen_rtx_SET (scratch_reg, xoperands[1]));
dcc21c4c
PB
3787
3788 /* Now load the destination register. */
f7df4a84 3789 emit_insn (gen_rtx_SET (operand0, gen_rtx_MEM (mode, scratch_reg)));
dcc21c4c
PB
3790 return 1;
3791 }
3792
3793 /* Now have insn-emit do whatever it normally does. */
3794 return 0;
3795}
3796
01e304f8
RZ
3797/* Split one or more DImode RTL references into pairs of SImode
3798 references. The RTL can be REG, offsettable MEM, integer constant, or
3799 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
3800 split and "num" is its length. lo_half and hi_half are output arrays
3801 that parallel "operands". */
3802
3803void
3804split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
3805{
3806 while (num--)
3807 {
3808 rtx op = operands[num];
3809
3810 /* simplify_subreg refuses to split volatile memory addresses,
3811 but we still have to handle it. */
3812 if (GET_CODE (op) == MEM)
3813 {
3814 lo_half[num] = adjust_address (op, SImode, 4);
3815 hi_half[num] = adjust_address (op, SImode, 0);
3816 }
3817 else
3818 {
3819 lo_half[num] = simplify_gen_subreg (SImode, op,
3820 GET_MODE (op) == VOIDmode
3821 ? DImode : GET_MODE (op), 4);
3822 hi_half[num] = simplify_gen_subreg (SImode, op,
3823 GET_MODE (op) == VOIDmode
3824 ? DImode : GET_MODE (op), 0);
3825 }
3826 }
3827}
3828
a40ed0f3
KH
3829/* Split X into a base and a constant offset, storing them in *BASE
3830 and *OFFSET respectively. */
3831
3832static void
3833m68k_split_offset (rtx x, rtx *base, HOST_WIDE_INT *offset)
3834{
3835 *offset = 0;
3836 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
3837 {
3838 *offset += INTVAL (XEXP (x, 1));
3839 x = XEXP (x, 0);
3840 }
3841 *base = x;
3842}
3843
3844/* Return true if PATTERN is a PARALLEL suitable for a movem or fmovem
3845 instruction. STORE_P says whether the move is a load or store.
3846
3847 If the instruction uses post-increment or pre-decrement addressing,
3848 AUTOMOD_BASE is the base register and AUTOMOD_OFFSET is the total
3849 adjustment. This adjustment will be made by the first element of
3850 PARALLEL, with the loads or stores starting at element 1. If the
3851 instruction does not use post-increment or pre-decrement addressing,
3852 AUTOMOD_BASE is null, AUTOMOD_OFFSET is 0, and the loads or stores
3853 start at element 0. */
3854
3855bool
3856m68k_movem_pattern_p (rtx pattern, rtx automod_base,
3857 HOST_WIDE_INT automod_offset, bool store_p)
3858{
3859 rtx base, mem_base, set, mem, reg, last_reg;
3860 HOST_WIDE_INT offset, mem_offset;
3861 int i, first, len;
3862 enum reg_class rclass;
3863
3864 len = XVECLEN (pattern, 0);
3865 first = (automod_base != NULL);
3866
3867 if (automod_base)
3868 {
3869 /* Stores must be pre-decrement and loads must be post-increment. */
3870 if (store_p != (automod_offset < 0))
3871 return false;
3872
3873 /* Work out the base and offset for lowest memory location. */
3874 base = automod_base;
3875 offset = (automod_offset < 0 ? automod_offset : 0);
3876 }
3877 else
3878 {
3879 /* Allow any valid base and offset in the first access. */
3880 base = NULL;
3881 offset = 0;
3882 }
3883
3884 last_reg = NULL;
3885 rclass = NO_REGS;
3886 for (i = first; i < len; i++)
3887 {
3888 /* We need a plain SET. */
3889 set = XVECEXP (pattern, 0, i);
3890 if (GET_CODE (set) != SET)
3891 return false;
3892
3893 /* Check that we have a memory location... */
3894 mem = XEXP (set, !store_p);
3895 if (!MEM_P (mem) || !memory_operand (mem, VOIDmode))
3896 return false;
3897
3898 /* ...with the right address. */
3899 if (base == NULL)
3900 {
3901 m68k_split_offset (XEXP (mem, 0), &base, &offset);
3902 /* The ColdFire instruction only allows (An) and (d16,An) modes.
3903 There are no mode restrictions for 680x0 besides the
3904 automodification rules enforced above. */
3905 if (TARGET_COLDFIRE
3906 && !m68k_legitimate_base_reg_p (base, reload_completed))
3907 return false;
3908 }
3909 else
3910 {
3911 m68k_split_offset (XEXP (mem, 0), &mem_base, &mem_offset);
3912 if (!rtx_equal_p (base, mem_base) || offset != mem_offset)
3913 return false;
3914 }
3915
3916 /* Check that we have a register of the required mode and class. */
3917 reg = XEXP (set, store_p);
3918 if (!REG_P (reg)
3919 || !HARD_REGISTER_P (reg)
3920 || GET_MODE (reg) != reg_raw_mode[REGNO (reg)])
3921 return false;
3922
3923 if (last_reg)
3924 {
3925 /* The register must belong to RCLASS and have a higher number
3926 than the register in the previous SET. */
3927 if (!TEST_HARD_REG_BIT (reg_class_contents[rclass], REGNO (reg))
3928 || REGNO (last_reg) >= REGNO (reg))
3929 return false;
3930 }
3931 else
3932 {
3933 /* Work out which register class we need. */
3934 if (INT_REGNO_P (REGNO (reg)))
3935 rclass = GENERAL_REGS;
3936 else if (FP_REGNO_P (REGNO (reg)))
3937 rclass = FP_REGS;
3938 else
3939 return false;
3940 }
3941
3942 last_reg = reg;
3943 offset += GET_MODE_SIZE (GET_MODE (reg));
3944 }
3945
3946 /* If we have an automodification, check whether the final offset is OK. */
3947 if (automod_base && offset != (automod_offset < 0 ? 0 : automod_offset))
3948 return false;
3949
3950 /* Reject unprofitable cases. */
3951 if (len < first + (rclass == FP_REGS ? MIN_FMOVEM_REGS : MIN_MOVEM_REGS))
3952 return false;
3953
3954 return true;
3955}
3956
3957/* Return the assembly code template for a movem or fmovem instruction
3958 whose pattern is given by PATTERN. Store the template's operands
3959 in OPERANDS.
3960
3961 If the instruction uses post-increment or pre-decrement addressing,
3962 AUTOMOD_OFFSET is the total adjustment, otherwise it is 0. STORE_P
3963 is true if this is a store instruction. */
3964
3965const char *
3966m68k_output_movem (rtx *operands, rtx pattern,
3967 HOST_WIDE_INT automod_offset, bool store_p)
3968{
3969 unsigned int mask;
3970 int i, first;
3971
3972 gcc_assert (GET_CODE (pattern) == PARALLEL);
3973 mask = 0;
3974 first = (automod_offset != 0);
3975 for (i = first; i < XVECLEN (pattern, 0); i++)
3976 {
3977 /* When using movem with pre-decrement addressing, register X + D0_REG
3978 is controlled by bit 15 - X. For all other addressing modes,
3979 register X + D0_REG is controlled by bit X. Confusingly, the
3980 register mask for fmovem is in the opposite order to that for
3981 movem. */
3982 unsigned int regno;
3983
3984 gcc_assert (MEM_P (XEXP (XVECEXP (pattern, 0, i), !store_p)));
3985 gcc_assert (REG_P (XEXP (XVECEXP (pattern, 0, i), store_p)));
3986 regno = REGNO (XEXP (XVECEXP (pattern, 0, i), store_p));
3987 if (automod_offset < 0)
3988 {
3989 if (FP_REGNO_P (regno))
3990 mask |= 1 << (regno - FP0_REG);
3991 else
3992 mask |= 1 << (15 - (regno - D0_REG));
3993 }
3994 else
3995 {
3996 if (FP_REGNO_P (regno))
3997 mask |= 1 << (7 - (regno - FP0_REG));
3998 else
3999 mask |= 1 << (regno - D0_REG);
4000 }
4001 }
4002 CC_STATUS_INIT;
4003
4004 if (automod_offset == 0)
4005 operands[0] = XEXP (XEXP (XVECEXP (pattern, 0, first), !store_p), 0);
4006 else if (automod_offset < 0)
4007 operands[0] = gen_rtx_PRE_DEC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
4008 else
4009 operands[0] = gen_rtx_POST_INC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
4010 operands[1] = GEN_INT (mask);
4011 if (FP_REGNO_P (REGNO (XEXP (XVECEXP (pattern, 0, first), store_p))))
4012 {
4013 if (store_p)
1fae2d80 4014 return "fmovem %1,%a0";
a40ed0f3 4015 else
1fae2d80 4016 return "fmovem %a0,%1";
a40ed0f3
KH
4017 }
4018 else
4019 {
4020 if (store_p)
1fae2d80 4021 return "movem%.l %1,%a0";
a40ed0f3 4022 else
1fae2d80 4023 return "movem%.l %a0,%1";
a40ed0f3
KH
4024 }
4025}
4026
79e68feb
RS
4027/* Return a REG that occurs in ADDR with coefficient 1.
4028 ADDR can be effectively incremented by incrementing REG. */
4029
4030static rtx
8a4a2253 4031find_addr_reg (rtx addr)
79e68feb
RS
4032{
4033 while (GET_CODE (addr) == PLUS)
4034 {
4035 if (GET_CODE (XEXP (addr, 0)) == REG)
4036 addr = XEXP (addr, 0);
4037 else if (GET_CODE (XEXP (addr, 1)) == REG)
4038 addr = XEXP (addr, 1);
4039 else if (CONSTANT_P (XEXP (addr, 0)))
4040 addr = XEXP (addr, 1);
4041 else if (CONSTANT_P (XEXP (addr, 1)))
4042 addr = XEXP (addr, 0);
4043 else
4761e388 4044 gcc_unreachable ();
79e68feb 4045 }
4761e388
NS
4046 gcc_assert (GET_CODE (addr) == REG);
4047 return addr;
79e68feb 4048}
9ee3c687 4049
c16eadc7 4050/* Output assembler code to perform a 32-bit 3-operand add. */
9ee3c687 4051
5505f548 4052const char *
8a4a2253 4053output_addsi3 (rtx *operands)
9ee3c687
JW
4054{
4055 if (! operands_match_p (operands[0], operands[1]))
4056 {
4057 if (!ADDRESS_REG_P (operands[1]))
4058 {
4059 rtx tmp = operands[1];
4060
4061 operands[1] = operands[2];
4062 operands[2] = tmp;
4063 }
4064
4065 /* These insns can result from reloads to access
4066 stack slots over 64k from the frame pointer. */
4067 if (GET_CODE (operands[2]) == CONST_INT
218d5a87 4068 && (INTVAL (operands[2]) < -32768 || INTVAL (operands[2]) > 32767))
8c61b6c1 4069 return "move%.l %2,%0\n\tadd%.l %1,%0";
9ee3c687 4070 if (GET_CODE (operands[2]) == REG)
4b3d1177
KH
4071 return MOTOROLA ? "lea (%1,%2.l),%0" : "lea %1@(0,%2:l),%0";
4072 return MOTOROLA ? "lea (%c2,%1),%0" : "lea %1@(%c2),%0";
9ee3c687
JW
4073 }
4074 if (GET_CODE (operands[2]) == CONST_INT)
4075 {
9ee3c687
JW
4076 if (INTVAL (operands[2]) > 0
4077 && INTVAL (operands[2]) <= 8)
4078 return "addq%.l %2,%0";
4079 if (INTVAL (operands[2]) < 0
4080 && INTVAL (operands[2]) >= -8)
4081 {
c5c76735 4082 operands[2] = GEN_INT (- INTVAL (operands[2]));
9ee3c687
JW
4083 return "subq%.l %2,%0";
4084 }
4085 /* On the CPU32 it is faster to use two addql instructions to
4086 add a small integer (8 < N <= 16) to a register.
7a1929e1 4087 Likewise for subql. */
fe95f2f7 4088 if (TUNE_CPU32 && REG_P (operands[0]))
9ee3c687
JW
4089 {
4090 if (INTVAL (operands[2]) > 8
4091 && INTVAL (operands[2]) <= 16)
4092 {
1d8eaa6b 4093 operands[2] = GEN_INT (INTVAL (operands[2]) - 8);
3b4b85c9 4094 return "addq%.l #8,%0\n\taddq%.l %2,%0";
9ee3c687
JW
4095 }
4096 if (INTVAL (operands[2]) < -8
4097 && INTVAL (operands[2]) >= -16)
4098 {
c5c76735 4099 operands[2] = GEN_INT (- INTVAL (operands[2]) - 8);
3b4b85c9 4100 return "subq%.l #8,%0\n\tsubq%.l %2,%0";
9ee3c687
JW
4101 }
4102 }
9ee3c687
JW
4103 if (ADDRESS_REG_P (operands[0])
4104 && INTVAL (operands[2]) >= -0x8000
4105 && INTVAL (operands[2]) < 0x8000)
4106 {
fe95f2f7 4107 if (TUNE_68040)
9ee3c687
JW
4108 return "add%.w %2,%0";
4109 else
4b3d1177 4110 return MOTOROLA ? "lea (%c2,%0),%0" : "lea %0@(%c2),%0";
9ee3c687
JW
4111 }
4112 }
4113 return "add%.l %2,%0";
4114}
79e68feb
RS
4115\f
4116/* Store in cc_status the expressions that the condition codes will
4117 describe after execution of an instruction whose pattern is EXP.
4118 Do not alter them if the instruction would not alter the cc's. */
4119
4120/* On the 68000, all the insns to store in an address register fail to
4121 set the cc's. However, in some cases these instructions can make it
4122 possibly invalid to use the saved cc's. In those cases we clear out
4123 some or all of the saved cc's so they won't be used. */
4124
1d8eaa6b 4125void
8a4a2253 4126notice_update_cc (rtx exp, rtx insn)
79e68feb 4127{
1a8965c4 4128 if (GET_CODE (exp) == SET)
79e68feb
RS
4129 {
4130 if (GET_CODE (SET_SRC (exp)) == CALL)
a0a7fbc9 4131 CC_STATUS_INIT;
79e68feb
RS
4132 else if (ADDRESS_REG_P (SET_DEST (exp)))
4133 {
f5963e61 4134 if (cc_status.value1 && modified_in_p (cc_status.value1, insn))
79e68feb 4135 cc_status.value1 = 0;
f5963e61 4136 if (cc_status.value2 && modified_in_p (cc_status.value2, insn))
79e68feb
RS
4137 cc_status.value2 = 0;
4138 }
f6ab62e8
RS
4139 /* fmoves to memory or data registers do not set the condition
4140 codes. Normal moves _do_ set the condition codes, but not in
4141 a way that is appropriate for comparison with 0, because -0.0
4142 would be treated as a negative nonzero number. Note that it
88512ba0 4143 isn't appropriate to conditionalize this restriction on
f6ab62e8
RS
4144 HONOR_SIGNED_ZEROS because that macro merely indicates whether
4145 we care about the difference between -0.0 and +0.0. */
79e68feb
RS
4146 else if (!FP_REG_P (SET_DEST (exp))
4147 && SET_DEST (exp) != cc0_rtx
4148 && (FP_REG_P (SET_SRC (exp))
4149 || GET_CODE (SET_SRC (exp)) == FIX
f6ab62e8 4150 || FLOAT_MODE_P (GET_MODE (SET_DEST (exp)))))
a0a7fbc9 4151 CC_STATUS_INIT;
79e68feb
RS
4152 /* A pair of move insns doesn't produce a useful overall cc. */
4153 else if (!FP_REG_P (SET_DEST (exp))
4154 && !FP_REG_P (SET_SRC (exp))
4155 && GET_MODE_SIZE (GET_MODE (SET_SRC (exp))) > 4
4156 && (GET_CODE (SET_SRC (exp)) == REG
4157 || GET_CODE (SET_SRC (exp)) == MEM
4158 || GET_CODE (SET_SRC (exp)) == CONST_DOUBLE))
a0a7fbc9 4159 CC_STATUS_INIT;
e1dff52a 4160 else if (SET_DEST (exp) != pc_rtx)
79e68feb
RS
4161 {
4162 cc_status.flags = 0;
e1dff52a
KH
4163 cc_status.value1 = SET_DEST (exp);
4164 cc_status.value2 = SET_SRC (exp);
79e68feb
RS
4165 }
4166 }
4167 else if (GET_CODE (exp) == PARALLEL
4168 && GET_CODE (XVECEXP (exp, 0, 0)) == SET)
4169 {
e1dff52a
KH
4170 rtx dest = SET_DEST (XVECEXP (exp, 0, 0));
4171 rtx src = SET_SRC (XVECEXP (exp, 0, 0));
4172
4173 if (ADDRESS_REG_P (dest))
79e68feb 4174 CC_STATUS_INIT;
e1dff52a 4175 else if (dest != pc_rtx)
79e68feb
RS
4176 {
4177 cc_status.flags = 0;
e1dff52a
KH
4178 cc_status.value1 = dest;
4179 cc_status.value2 = src;
79e68feb
RS
4180 }
4181 }
4182 else
4183 CC_STATUS_INIT;
4184 if (cc_status.value2 != 0
4185 && ADDRESS_REG_P (cc_status.value2)
4186 && GET_MODE (cc_status.value2) == QImode)
4187 CC_STATUS_INIT;
1a8965c4 4188 if (cc_status.value2 != 0)
79e68feb
RS
4189 switch (GET_CODE (cc_status.value2))
4190 {
996a5f59 4191 case ASHIFT: case ASHIFTRT: case LSHIFTRT:
79e68feb 4192 case ROTATE: case ROTATERT:
a126dc3a
RH
4193 /* These instructions always clear the overflow bit, and set
4194 the carry to the bit shifted out. */
1afac9a6 4195 cc_status.flags |= CC_OVERFLOW_UNUSABLE | CC_NO_CARRY;
a126dc3a
RH
4196 break;
4197
4198 case PLUS: case MINUS: case MULT:
4199 case DIV: case UDIV: case MOD: case UMOD: case NEG:
79e68feb
RS
4200 if (GET_MODE (cc_status.value2) != VOIDmode)
4201 cc_status.flags |= CC_NO_OVERFLOW;
4202 break;
4203 case ZERO_EXTEND:
4204 /* (SET r1 (ZERO_EXTEND r2)) on this machine
4205 ends with a move insn moving r2 in r2's mode.
4206 Thus, the cc's are set for r2.
7a1929e1 4207 This can set N bit spuriously. */
79e68feb 4208 cc_status.flags |= CC_NOT_NEGATIVE;
1d8eaa6b
AS
4209
4210 default:
4211 break;
79e68feb
RS
4212 }
4213 if (cc_status.value1 && GET_CODE (cc_status.value1) == REG
4214 && cc_status.value2
4215 && reg_overlap_mentioned_p (cc_status.value1, cc_status.value2))
4216 cc_status.value2 = 0;
1adb2fb9
AS
4217 /* Check for PRE_DEC in dest modifying a register used in src. */
4218 if (cc_status.value1 && GET_CODE (cc_status.value1) == MEM
4219 && GET_CODE (XEXP (cc_status.value1, 0)) == PRE_DEC
4220 && cc_status.value2
4221 && reg_overlap_mentioned_p (XEXP (XEXP (cc_status.value1, 0), 0),
4222 cc_status.value2))
4223 cc_status.value2 = 0;
79e68feb 4224 if (((cc_status.value1 && FP_REG_P (cc_status.value1))
1a8965c4 4225 || (cc_status.value2 && FP_REG_P (cc_status.value2))))
79e68feb 4226 cc_status.flags = CC_IN_68881;
67595cbb
RZ
4227 if (cc_status.value2 && GET_CODE (cc_status.value2) == COMPARE
4228 && GET_MODE_CLASS (GET_MODE (XEXP (cc_status.value2, 0))) == MODE_FLOAT)
4229 {
4230 cc_status.flags = CC_IN_68881;
695074be
JB
4231 if (!FP_REG_P (XEXP (cc_status.value2, 0))
4232 && FP_REG_P (XEXP (cc_status.value2, 1)))
67595cbb
RZ
4233 cc_status.flags |= CC_REVERSED;
4234 }
79e68feb
RS
4235}
4236\f
5505f548 4237const char *
8a4a2253 4238output_move_const_double (rtx *operands)
79e68feb 4239{
1a8965c4 4240 int code = standard_68881_constant_p (operands[1]);
79e68feb 4241
1a8965c4 4242 if (code != 0)
79e68feb 4243 {
1a8965c4 4244 static char buf[40];
79e68feb 4245
3b4b85c9 4246 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
1a8965c4 4247 return buf;
79e68feb 4248 }
1a8965c4 4249 return "fmove%.d %1,%0";
79e68feb
RS
4250}
4251
5505f548 4252const char *
8a4a2253 4253output_move_const_single (rtx *operands)
79e68feb 4254{
1a8965c4 4255 int code = standard_68881_constant_p (operands[1]);
79e68feb 4256
1a8965c4 4257 if (code != 0)
79e68feb 4258 {
1a8965c4 4259 static char buf[40];
79e68feb 4260
3b4b85c9 4261 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
1a8965c4 4262 return buf;
79e68feb 4263 }
1a8965c4 4264 return "fmove%.s %f1,%0";
79e68feb
RS
4265}
4266
4267/* Return nonzero if X, a CONST_DOUBLE, has a value that we can get
4268 from the "fmovecr" instruction.
4269 The value, anded with 0xff, gives the code to use in fmovecr
4270 to get the desired constant. */
4271
7a1929e1 4272/* This code has been fixed for cross-compilation. */
c1cfb2ae
RS
4273
4274static int inited_68881_table = 0;
4275
5505f548 4276static const char *const strings_68881[7] = {
c1cfb2ae
RS
4277 "0.0",
4278 "1.0",
4279 "10.0",
4280 "100.0",
4281 "10000.0",
4282 "1e8",
4283 "1e16"
a0a7fbc9 4284};
c1cfb2ae 4285
8b60264b 4286static const int codes_68881[7] = {
c1cfb2ae
RS
4287 0x0f,
4288 0x32,
4289 0x33,
4290 0x34,
4291 0x35,
4292 0x36,
4293 0x37
a0a7fbc9 4294};
c1cfb2ae
RS
4295
4296REAL_VALUE_TYPE values_68881[7];
4297
4298/* Set up values_68881 array by converting the decimal values
7a1929e1 4299 strings_68881 to binary. */
c1cfb2ae
RS
4300
4301void
8a4a2253 4302init_68881_table (void)
c1cfb2ae
RS
4303{
4304 int i;
4305 REAL_VALUE_TYPE r;
ef4bddc2 4306 machine_mode mode;
c1cfb2ae 4307
16d82c3c 4308 mode = SFmode;
c1cfb2ae
RS
4309 for (i = 0; i < 7; i++)
4310 {
4311 if (i == 6)
16d82c3c 4312 mode = DFmode;
c1cfb2ae
RS
4313 r = REAL_VALUE_ATOF (strings_68881[i], mode);
4314 values_68881[i] = r;
4315 }
4316 inited_68881_table = 1;
4317}
79e68feb
RS
4318
4319int
8a4a2253 4320standard_68881_constant_p (rtx x)
79e68feb 4321{
c1cfb2ae
RS
4322 REAL_VALUE_TYPE r;
4323 int i;
79e68feb 4324
e18db50d 4325 /* fmovecr must be emulated on the 68040 and 68060, so it shouldn't be
7a1929e1 4326 used at all on those chips. */
9cf106c8 4327 if (TUNE_68040_60)
79e68feb
RS
4328 return 0;
4329
c1cfb2ae
RS
4330 if (! inited_68881_table)
4331 init_68881_table ();
4332
4333 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4334
64c0b414
AS
4335 /* Use REAL_VALUES_IDENTICAL instead of REAL_VALUES_EQUAL so that -0.0
4336 is rejected. */
c1cfb2ae
RS
4337 for (i = 0; i < 6; i++)
4338 {
64c0b414 4339 if (REAL_VALUES_IDENTICAL (r, values_68881[i]))
c1cfb2ae
RS
4340 return (codes_68881[i]);
4341 }
4342
79e68feb
RS
4343 if (GET_MODE (x) == SFmode)
4344 return 0;
c1cfb2ae
RS
4345
4346 if (REAL_VALUES_EQUAL (r, values_68881[6]))
4347 return (codes_68881[6]);
4348
79e68feb
RS
4349 /* larger powers of ten in the constants ram are not used
4350 because they are not equal to a `double' C constant. */
4351 return 0;
4352}
4353
4354/* If X is a floating-point constant, return the logarithm of X base 2,
4355 or 0 if X is not a power of 2. */
4356
4357int
8a4a2253 4358floating_exact_log2 (rtx x)
79e68feb 4359{
c1cfb2ae 4360 REAL_VALUE_TYPE r, r1;
eaff3bf8 4361 int exp;
79e68feb 4362
c1cfb2ae 4363 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
79e68feb 4364
eaff3bf8 4365 if (REAL_VALUES_LESS (r, dconst1))
79e68feb
RS
4366 return 0;
4367
eaff3bf8 4368 exp = real_exponent (&r);
6ef9a246 4369 real_2expN (&r1, exp, DFmode);
eaff3bf8
RH
4370 if (REAL_VALUES_EQUAL (r1, r))
4371 return exp;
4372
79e68feb
RS
4373 return 0;
4374}
4375\f
79e68feb
RS
4376/* A C compound statement to output to stdio stream STREAM the
4377 assembler syntax for an instruction operand X. X is an RTL
4378 expression.
4379
4380 CODE is a value that can be used to specify one of several ways
4381 of printing the operand. It is used when identical operands
4382 must be printed differently depending on the context. CODE
4383 comes from the `%' specification that was used to request
4384 printing of the operand. If the specification was just `%DIGIT'
4385 then CODE is 0; if the specification was `%LTR DIGIT' then CODE
4386 is the ASCII code for LTR.
4387
4388 If X is a register, this macro should print the register's name.
4389 The names can be found in an array `reg_names' whose type is
4390 `char *[]'. `reg_names' is initialized from `REGISTER_NAMES'.
4391
4392 When the machine description has a specification `%PUNCT' (a `%'
4393 followed by a punctuation character), this macro is called with
4394 a null pointer for X and the punctuation character for CODE.
4395
4396 The m68k specific codes are:
4397
4398 '.' for dot needed in Motorola-style opcode names.
4399 '-' for an operand pushing on the stack:
4400 sp@-, -(sp) or -(%sp) depending on the style of syntax.
4401 '+' for an operand pushing on the stack:
4402 sp@+, (sp)+ or (%sp)+ depending on the style of syntax.
4403 '@' for a reference to the top word on the stack:
4404 sp@, (sp) or (%sp) depending on the style of syntax.
4405 '#' for an immediate operand prefix (# in MIT and Motorola syntax
5ee084df 4406 but & in SGS syntax).
79e68feb
RS
4407 '!' for the cc register (used in an `and to cc' insn).
4408 '$' for the letter `s' in an op code, but only on the 68040.
4409 '&' for the letter `d' in an op code, but only on the 68040.
2ac5f14a 4410 '/' for register prefix needed by longlong.h.
a40ed0f3 4411 '?' for m68k_library_id_string
79e68feb
RS
4412
4413 'b' for byte insn (no effect, on the Sun; this is for the ISI).
4414 'd' to force memory addressing to be absolute, not relative.
4415 'f' for float insn (print a CONST_DOUBLE as a float rather than in hex)
79e68feb
RS
4416 'x' for float insn (print a CONST_DOUBLE as a float rather than in hex),
4417 or print pair of registers as rx:ry.
29ca003a
RS
4418 'p' print an address with @PLTPC attached, but only if the operand
4419 is not locally-bound. */
79e68feb
RS
4420
4421void
8a4a2253 4422print_operand (FILE *file, rtx op, int letter)
79e68feb 4423{
79e68feb
RS
4424 if (letter == '.')
4425 {
e6d98cb0
BI
4426 if (MOTOROLA)
4427 fprintf (file, ".");
79e68feb
RS
4428 }
4429 else if (letter == '#')
e6d98cb0 4430 asm_fprintf (file, "%I");
79e68feb 4431 else if (letter == '-')
4b3d1177 4432 asm_fprintf (file, MOTOROLA ? "-(%Rsp)" : "%Rsp@-");
79e68feb 4433 else if (letter == '+')
4b3d1177 4434 asm_fprintf (file, MOTOROLA ? "(%Rsp)+" : "%Rsp@+");
79e68feb 4435 else if (letter == '@')
4b3d1177 4436 asm_fprintf (file, MOTOROLA ? "(%Rsp)" : "%Rsp@");
79e68feb 4437 else if (letter == '!')
e6d98cb0 4438 asm_fprintf (file, "%Rfpcr");
79e68feb
RS
4439 else if (letter == '$')
4440 {
b101567e 4441 if (TARGET_68040)
e6d98cb0 4442 fprintf (file, "s");
79e68feb
RS
4443 }
4444 else if (letter == '&')
4445 {
b101567e 4446 if (TARGET_68040)
e6d98cb0 4447 fprintf (file, "d");
79e68feb 4448 }
2ac5f14a 4449 else if (letter == '/')
e6d98cb0 4450 asm_fprintf (file, "%R");
a40ed0f3
KH
4451 else if (letter == '?')
4452 asm_fprintf (file, m68k_library_id_string);
29ca003a 4453 else if (letter == 'p')
2c8ec431 4454 {
29ca003a
RS
4455 output_addr_const (file, op);
4456 if (!(GET_CODE (op) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op)))
4457 fprintf (file, "@PLTPC");
2c8ec431 4458 }
79e68feb
RS
4459 else if (GET_CODE (op) == REG)
4460 {
1a8965c4
AS
4461 if (letter == 'R')
4462 /* Print out the second register name of a register pair.
4463 I.e., R (6) => 7. */
01bbf777 4464 fputs (M68K_REGNAME(REGNO (op) + 1), file);
79e68feb 4465 else
01bbf777 4466 fputs (M68K_REGNAME(REGNO (op)), file);
79e68feb
RS
4467 }
4468 else if (GET_CODE (op) == MEM)
4469 {
4470 output_address (XEXP (op, 0));
4471 if (letter == 'd' && ! TARGET_68020
4472 && CONSTANT_ADDRESS_P (XEXP (op, 0))
4473 && !(GET_CODE (XEXP (op, 0)) == CONST_INT
4474 && INTVAL (XEXP (op, 0)) < 0x8000
4475 && INTVAL (XEXP (op, 0)) >= -0x8000))
4b3d1177 4476 fprintf (file, MOTOROLA ? ".l" : ":l");
79e68feb 4477 }
79e68feb
RS
4478 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == SFmode)
4479 {
c1cfb2ae 4480 REAL_VALUE_TYPE r;
6ae89ea8 4481 long l;
c1cfb2ae 4482 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
6ae89ea8 4483 REAL_VALUE_TO_TARGET_SINGLE (r, l);
429ce992 4484 asm_fprintf (file, "%I0x%lx", l & 0xFFFFFFFF);
c1cfb2ae
RS
4485 }
4486 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == XFmode)
4487 {
4488 REAL_VALUE_TYPE r;
6ae89ea8 4489 long l[3];
c1cfb2ae 4490 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
6ae89ea8 4491 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
429ce992
AS
4492 asm_fprintf (file, "%I0x%lx%08lx%08lx", l[0] & 0xFFFFFFFF,
4493 l[1] & 0xFFFFFFFF, l[2] & 0xFFFFFFFF);
79e68feb 4494 }
e2c0a924 4495 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == DFmode)
79e68feb 4496 {
c1cfb2ae 4497 REAL_VALUE_TYPE r;
6ae89ea8 4498 long l[2];
c1cfb2ae 4499 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
6ae89ea8 4500 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
429ce992 4501 asm_fprintf (file, "%I0x%lx%08lx", l[0] & 0xFFFFFFFF, l[1] & 0xFFFFFFFF);
79e68feb
RS
4502 }
4503 else
4504 {
2c8ec431
DL
4505 /* Use `print_operand_address' instead of `output_addr_const'
4506 to ensure that we print relevant PIC stuff. */
1f85a612 4507 asm_fprintf (file, "%I");
2c8ec431
DL
4508 if (TARGET_PCREL
4509 && (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST))
4510 print_operand_address (file, op);
4511 else
4512 output_addr_const (file, op);
79e68feb
RS
4513 }
4514}
4515
75df395f
MK
4516/* Return string for TLS relocation RELOC. */
4517
4518static const char *
4519m68k_get_reloc_decoration (enum m68k_reloc reloc)
4520{
4521 /* To my knowledge, !MOTOROLA assemblers don't support TLS. */
4522 gcc_assert (MOTOROLA || reloc == RELOC_GOT);
4523
4524 switch (reloc)
4525 {
4526 case RELOC_GOT:
4527 if (MOTOROLA)
4528 {
4529 if (flag_pic == 1 && TARGET_68020)
4530 return "@GOT.w";
4531 else
4532 return "@GOT";
4533 }
4534 else
4535 {
4536 if (TARGET_68020)
4537 {
4538 switch (flag_pic)
4539 {
4540 case 1:
4541 return ":w";
4542 case 2:
4543 return ":l";
4544 default:
4545 return "";
4546 }
4547 }
4548 }
4549
4550 case RELOC_TLSGD:
4551 return "@TLSGD";
4552
4553 case RELOC_TLSLDM:
4554 return "@TLSLDM";
4555
4556 case RELOC_TLSLDO:
4557 return "@TLSLDO";
4558
4559 case RELOC_TLSIE:
4560 return "@TLSIE";
4561
4562 case RELOC_TLSLE:
4563 return "@TLSLE";
4564
4565 default:
4566 gcc_unreachable ();
4567 }
4568}
4569
cb69db4f 4570/* m68k implementation of TARGET_OUTPUT_ADDR_CONST_EXTRA. */
884316ff 4571
cb69db4f 4572static bool
884316ff
JM
4573m68k_output_addr_const_extra (FILE *file, rtx x)
4574{
75df395f
MK
4575 if (GET_CODE (x) == UNSPEC)
4576 {
4577 switch (XINT (x, 1))
4578 {
4579 case UNSPEC_RELOC16:
4580 case UNSPEC_RELOC32:
4581 output_addr_const (file, XVECEXP (x, 0, 0));
f878882b
AS
4582 fputs (m68k_get_reloc_decoration
4583 ((enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1))), file);
75df395f 4584 return true;
884316ff 4585
75df395f
MK
4586 default:
4587 break;
4588 }
4589 }
4590
4591 return false;
4592}
4593
4594/* M68K implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
4595
4596static void
4597m68k_output_dwarf_dtprel (FILE *file, int size, rtx x)
4598{
4599 gcc_assert (size == 4);
4600 fputs ("\t.long\t", file);
4601 output_addr_const (file, x);
4602 fputs ("@TLSLDO+0x8000", file);
884316ff
JM
4603}
4604
7b0f476d
AS
4605/* In the name of slightly smaller debug output, and to cater to
4606 general assembler lossage, recognize various UNSPEC sequences
4607 and turn them back into a direct symbol reference. */
4608
4609static rtx
33d67485 4610m68k_delegitimize_address (rtx orig_x)
7b0f476d 4611{
8390b335
AS
4612 rtx x;
4613 struct m68k_address addr;
4614 rtx unspec;
7b0f476d 4615
33d67485 4616 orig_x = delegitimize_mem_from_attrs (orig_x);
8390b335
AS
4617 x = orig_x;
4618 if (MEM_P (x))
4619 x = XEXP (x, 0);
4620
4621 if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
33d67485
AS
4622 return orig_x;
4623
8390b335
AS
4624 if (!m68k_decompose_address (GET_MODE (x), x, false, &addr)
4625 || addr.offset == NULL_RTX
4626 || GET_CODE (addr.offset) != CONST)
4627 return orig_x;
7b0f476d 4628
8390b335
AS
4629 unspec = XEXP (addr.offset, 0);
4630 if (GET_CODE (unspec) == PLUS && CONST_INT_P (XEXP (unspec, 1)))
4631 unspec = XEXP (unspec, 0);
4632 if (GET_CODE (unspec) != UNSPEC
4633 || (XINT (unspec, 1) != UNSPEC_RELOC16
4634 && XINT (unspec, 1) != UNSPEC_RELOC32))
4635 return orig_x;
4636 x = XVECEXP (unspec, 0, 0);
92cf7399 4637 gcc_assert (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF);
8390b335
AS
4638 if (unspec != XEXP (addr.offset, 0))
4639 x = gen_rtx_PLUS (Pmode, x, XEXP (XEXP (addr.offset, 0), 1));
4640 if (addr.index)
7b0f476d 4641 {
8390b335
AS
4642 rtx idx = addr.index;
4643 if (addr.scale != 1)
4644 idx = gen_rtx_MULT (Pmode, idx, GEN_INT (addr.scale));
4645 x = gen_rtx_PLUS (Pmode, idx, x);
7b0f476d 4646 }
8390b335
AS
4647 if (addr.base)
4648 x = gen_rtx_PLUS (Pmode, addr.base, x);
4649 if (MEM_P (orig_x))
4650 x = replace_equiv_address_nv (orig_x, x);
4651 return x;
7b0f476d
AS
4652}
4653
79e68feb
RS
4654\f
4655/* A C compound statement to output to stdio stream STREAM the
4656 assembler syntax for an instruction operand that is a memory
4657 reference whose address is ADDR. ADDR is an RTL expression.
4658
4659 Note that this contains a kludge that knows that the only reason
4660 we have an address (plus (label_ref...) (reg...)) when not generating
4661 PIC code is in the insn before a tablejump, and we know that m68k.md
4662 generates a label LInnn: on such an insn.
4663
4664 It is possible for PIC to generate a (plus (label_ref...) (reg...))
4665 and we handle that just like we would a (plus (symbol_ref...) (reg...)).
4666
79e68feb
RS
4667 This routine is responsible for distinguishing between -fpic and -fPIC
4668 style relocations in an address. When generating -fpic code the
112cdef5
KH
4669 offset is output in word mode (e.g. movel a5@(_foo:w), a0). When generating
4670 -fPIC code the offset is output in long mode (e.g. movel a5@(_foo:l), a0) */
79e68feb
RS
4671
4672void
8a4a2253 4673print_operand_address (FILE *file, rtx addr)
79e68feb 4674{
fc2241eb
RS
4675 struct m68k_address address;
4676
4677 if (!m68k_decompose_address (QImode, addr, true, &address))
4678 gcc_unreachable ();
4679
4680 if (address.code == PRE_DEC)
4b3d1177
KH
4681 fprintf (file, MOTOROLA ? "-(%s)" : "%s@-",
4682 M68K_REGNAME (REGNO (address.base)));
fc2241eb 4683 else if (address.code == POST_INC)
4b3d1177
KH
4684 fprintf (file, MOTOROLA ? "(%s)+" : "%s@+",
4685 M68K_REGNAME (REGNO (address.base)));
fc2241eb
RS
4686 else if (!address.base && !address.index)
4687 {
4688 /* A constant address. */
4689 gcc_assert (address.offset == addr);
4690 if (GET_CODE (addr) == CONST_INT)
4691 {
4692 /* (xxx).w or (xxx).l. */
4693 if (IN_RANGE (INTVAL (addr), -0x8000, 0x7fff))
4b3d1177 4694 fprintf (file, MOTOROLA ? "%d.w" : "%d:w", (int) INTVAL (addr));
a0a7fbc9 4695 else
fc2241eb 4696 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (addr));
a0a7fbc9 4697 }
fc2241eb 4698 else if (TARGET_PCREL)
a0a7fbc9 4699 {
fc2241eb
RS
4700 /* (d16,PC) or (bd,PC,Xn) (with suppressed index register). */
4701 fputc ('(', file);
4702 output_addr_const (file, addr);
4703 asm_fprintf (file, flag_pic == 1 ? ":w,%Rpc)" : ":l,%Rpc)");
a0a7fbc9 4704 }
fc2241eb 4705 else
a0a7fbc9 4706 {
fc2241eb
RS
4707 /* (xxx).l. We need a special case for SYMBOL_REF if the symbol
4708 name ends in `.<letter>', as the last 2 characters can be
4709 mistaken as a size suffix. Put the name in parentheses. */
4710 if (GET_CODE (addr) == SYMBOL_REF
4711 && strlen (XSTR (addr, 0)) > 2
4712 && XSTR (addr, 0)[strlen (XSTR (addr, 0)) - 2] == '.')
a0a7fbc9 4713 {
fc2241eb
RS
4714 putc ('(', file);
4715 output_addr_const (file, addr);
4716 putc (')', file);
a0a7fbc9
AS
4717 }
4718 else
fc2241eb 4719 output_addr_const (file, addr);
a0a7fbc9 4720 }
fc2241eb
RS
4721 }
4722 else
4723 {
4724 int labelno;
4725
4726 /* If ADDR is a (d8,pc,Xn) address, this is the number of the
44c7bd63 4727 label being accessed, otherwise it is -1. */
fc2241eb
RS
4728 labelno = (address.offset
4729 && !address.base
4730 && GET_CODE (address.offset) == LABEL_REF
4731 ? CODE_LABEL_NUMBER (XEXP (address.offset, 0))
4732 : -1);
4733 if (MOTOROLA)
a0a7fbc9 4734 {
fc2241eb
RS
4735 /* Print the "offset(base" component. */
4736 if (labelno >= 0)
e59d83aa 4737 asm_fprintf (file, "%LL%d(%Rpc,", labelno);
fc2241eb 4738 else
a0a7fbc9 4739 {
fc2241eb 4740 if (address.offset)
75df395f
MK
4741 output_addr_const (file, address.offset);
4742
fc2241eb
RS
4743 putc ('(', file);
4744 if (address.base)
4745 fputs (M68K_REGNAME (REGNO (address.base)), file);
a0a7fbc9 4746 }
fc2241eb
RS
4747 /* Print the ",index" component, if any. */
4748 if (address.index)
a0a7fbc9 4749 {
fc2241eb
RS
4750 if (address.base)
4751 putc (',', file);
4752 fprintf (file, "%s.%c",
4753 M68K_REGNAME (REGNO (address.index)),
4754 GET_MODE (address.index) == HImode ? 'w' : 'l');
4755 if (address.scale != 1)
4756 fprintf (file, "*%d", address.scale);
a0a7fbc9 4757 }
a0a7fbc9 4758 putc (')', file);
a0a7fbc9 4759 }
fc2241eb 4760 else /* !MOTOROLA */
a0a7fbc9 4761 {
fc2241eb
RS
4762 if (!address.offset && !address.index)
4763 fprintf (file, "%s@", M68K_REGNAME (REGNO (address.base)));
a0a7fbc9 4764 else
a0a7fbc9 4765 {
fc2241eb
RS
4766 /* Print the "base@(offset" component. */
4767 if (labelno >= 0)
e59d83aa 4768 asm_fprintf (file, "%Rpc@(%LL%d", labelno);
fc2241eb
RS
4769 else
4770 {
4771 if (address.base)
4772 fputs (M68K_REGNAME (REGNO (address.base)), file);
4773 fprintf (file, "@(");
4774 if (address.offset)
75df395f 4775 output_addr_const (file, address.offset);
fc2241eb
RS
4776 }
4777 /* Print the ",index" component, if any. */
4778 if (address.index)
4779 {
4780 fprintf (file, ",%s:%c",
4781 M68K_REGNAME (REGNO (address.index)),
4782 GET_MODE (address.index) == HImode ? 'w' : 'l');
4783 if (address.scale != 1)
4784 fprintf (file, ":%d", address.scale);
4785 }
a0a7fbc9
AS
4786 putc (')', file);
4787 }
a0a7fbc9 4788 }
79e68feb
RS
4789 }
4790}
af13f02d
JW
4791\f
4792/* Check for cases where a clr insns can be omitted from code using
4793 strict_low_part sets. For example, the second clrl here is not needed:
4794 clrl d0; movw a0@+,d0; use d0; clrl d0; movw a0@+; use d0; ...
4795
4796 MODE is the mode of this STRICT_LOW_PART set. FIRST_INSN is the clear
4797 insn we are checking for redundancy. TARGET is the register set by the
4798 clear insn. */
4799
8a4a2253 4800bool
ef4bddc2 4801strict_low_part_peephole_ok (machine_mode mode, rtx_insn *first_insn,
8a4a2253 4802 rtx target)
af13f02d 4803{
c85e862a 4804 rtx_insn *p = first_insn;
af13f02d 4805
39250081 4806 while ((p = PREV_INSN (p)))
af13f02d 4807 {
39250081
RZ
4808 if (NOTE_INSN_BASIC_BLOCK_P (p))
4809 return false;
4810
4811 if (NOTE_P (p))
4812 continue;
4813
af13f02d 4814 /* If it isn't an insn, then give up. */
39250081 4815 if (!INSN_P (p))
8a4a2253 4816 return false;
af13f02d
JW
4817
4818 if (reg_set_p (target, p))
4819 {
4820 rtx set = single_set (p);
4821 rtx dest;
4822
4823 /* If it isn't an easy to recognize insn, then give up. */
4824 if (! set)
8a4a2253 4825 return false;
af13f02d
JW
4826
4827 dest = SET_DEST (set);
4828
4829 /* If this sets the entire target register to zero, then our
4830 first_insn is redundant. */
4831 if (rtx_equal_p (dest, target)
4832 && SET_SRC (set) == const0_rtx)
8a4a2253 4833 return true;
af13f02d
JW
4834 else if (GET_CODE (dest) == STRICT_LOW_PART
4835 && GET_CODE (XEXP (dest, 0)) == REG
4836 && REGNO (XEXP (dest, 0)) == REGNO (target)
4837 && (GET_MODE_SIZE (GET_MODE (XEXP (dest, 0)))
4838 <= GET_MODE_SIZE (mode)))
4839 /* This is a strict low part set which modifies less than
4840 we are using, so it is safe. */
4841 ;
4842 else
8a4a2253 4843 return false;
af13f02d 4844 }
af13f02d
JW
4845 }
4846
8a4a2253 4847 return false;
af13f02d 4848}
67cd4f83 4849
2c8ec431
DL
4850/* Operand predicates for implementing asymmetric pc-relative addressing
4851 on m68k. The m68k supports pc-relative addressing (mode 7, register 2)
dab66575 4852 when used as a source operand, but not as a destination operand.
2c8ec431
DL
4853
4854 We model this by restricting the meaning of the basic predicates
4855 (general_operand, memory_operand, etc) to forbid the use of this
4856 addressing mode, and then define the following predicates that permit
4857 this addressing mode. These predicates can then be used for the
4858 source operands of the appropriate instructions.
4859
4860 n.b. While it is theoretically possible to change all machine patterns
4861 to use this addressing more where permitted by the architecture,
4862 it has only been implemented for "common" cases: SImode, HImode, and
4863 QImode operands, and only for the principle operations that would
4864 require this addressing mode: data movement and simple integer operations.
4865
4866 In parallel with these new predicates, two new constraint letters
4867 were defined: 'S' and 'T'. 'S' is the -mpcrel analog of 'm'.
4868 'T' replaces 's' in the non-pcrel case. It is a no-op in the pcrel case.
4869 In the pcrel case 's' is only valid in combination with 'a' registers.
4870 See addsi3, subsi3, cmpsi, and movsi patterns for a better understanding
4871 of how these constraints are used.
4872
4873 The use of these predicates is strictly optional, though patterns that
4874 don't will cause an extra reload register to be allocated where one
4875 was not necessary:
4876
4877 lea (abc:w,%pc),%a0 ; need to reload address
4878 moveq &1,%d1 ; since write to pc-relative space
4879 movel %d1,%a0@ ; is not allowed
4880 ...
4881 lea (abc:w,%pc),%a1 ; no need to reload address here
4882 movel %a1@,%d0 ; since "movel (abc:w,%pc),%d0" is ok
4883
4884 For more info, consult tiemann@cygnus.com.
4885
4886
4887 All of the ugliness with predicates and constraints is due to the
4888 simple fact that the m68k does not allow a pc-relative addressing
4889 mode as a destination. gcc does not distinguish between source and
4890 destination addresses. Hence, if we claim that pc-relative address
331d9186 4891 modes are valid, e.g. TARGET_LEGITIMATE_ADDRESS_P accepts them, then we
2c8ec431
DL
4892 end up with invalid code. To get around this problem, we left
4893 pc-relative modes as invalid addresses, and then added special
4894 predicates and constraints to accept them.
4895
4896 A cleaner way to handle this is to modify gcc to distinguish
4897 between source and destination addresses. We can then say that
4898 pc-relative is a valid source address but not a valid destination
4899 address, and hopefully avoid a lot of the predicate and constraint
4900 hackery. Unfortunately, this would be a pretty big change. It would
4901 be a useful change for a number of ports, but there aren't any current
4902 plans to undertake this.
4903
4904 ***************************************************************************/
4905
4906
5505f548 4907const char *
8a4a2253 4908output_andsi3 (rtx *operands)
29ae8a3c
RK
4909{
4910 int logval;
4911 if (GET_CODE (operands[2]) == CONST_INT
25c99d8f 4912 && (INTVAL (operands[2]) | 0xffff) == -1
29ae8a3c
RK
4913 && (DATA_REG_P (operands[0])
4914 || offsettable_memref_p (operands[0]))
9425fb04 4915 && !TARGET_COLDFIRE)
29ae8a3c
RK
4916 {
4917 if (GET_CODE (operands[0]) != REG)
b72f00af 4918 operands[0] = adjust_address (operands[0], HImode, 2);
1d8eaa6b 4919 operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffff);
29ae8a3c
RK
4920 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4921 CC_STATUS_INIT;
4922 if (operands[2] == const0_rtx)
4923 return "clr%.w %0";
4924 return "and%.w %2,%0";
4925 }
4926 if (GET_CODE (operands[2]) == CONST_INT
c4406f74 4927 && (logval = exact_log2 (~ INTVAL (operands[2]) & 0xffffffff)) >= 0
29ae8a3c
RK
4928 && (DATA_REG_P (operands[0])
4929 || offsettable_memref_p (operands[0])))
4930 {
4931 if (DATA_REG_P (operands[0]))
a0a7fbc9 4932 operands[1] = GEN_INT (logval);
29ae8a3c
RK
4933 else
4934 {
b72f00af 4935 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
1d8eaa6b 4936 operands[1] = GEN_INT (logval % 8);
29ae8a3c
RK
4937 }
4938 /* This does not set condition codes in a standard way. */
4939 CC_STATUS_INIT;
4940 return "bclr %1,%0";
4941 }
4942 return "and%.l %2,%0";
4943}
4944
5505f548 4945const char *
8a4a2253 4946output_iorsi3 (rtx *operands)
29ae8a3c
RK
4947{
4948 register int logval;
4949 if (GET_CODE (operands[2]) == CONST_INT
4950 && INTVAL (operands[2]) >> 16 == 0
4951 && (DATA_REG_P (operands[0])
4952 || offsettable_memref_p (operands[0]))
9425fb04 4953 && !TARGET_COLDFIRE)
29ae8a3c
RK
4954 {
4955 if (GET_CODE (operands[0]) != REG)
b72f00af 4956 operands[0] = adjust_address (operands[0], HImode, 2);
29ae8a3c
RK
4957 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4958 CC_STATUS_INIT;
4959 if (INTVAL (operands[2]) == 0xffff)
4960 return "mov%.w %2,%0";
4961 return "or%.w %2,%0";
4962 }
4963 if (GET_CODE (operands[2]) == CONST_INT
c4406f74 4964 && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
29ae8a3c
RK
4965 && (DATA_REG_P (operands[0])
4966 || offsettable_memref_p (operands[0])))
4967 {
4968 if (DATA_REG_P (operands[0]))
b72f00af 4969 operands[1] = GEN_INT (logval);
29ae8a3c
RK
4970 else
4971 {
b72f00af 4972 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
1d8eaa6b 4973 operands[1] = GEN_INT (logval % 8);
29ae8a3c
RK
4974 }
4975 CC_STATUS_INIT;
4976 return "bset %1,%0";
4977 }
4978 return "or%.l %2,%0";
4979}
4980
5505f548 4981const char *
8a4a2253 4982output_xorsi3 (rtx *operands)
29ae8a3c
RK
4983{
4984 register int logval;
4985 if (GET_CODE (operands[2]) == CONST_INT
4986 && INTVAL (operands[2]) >> 16 == 0
4987 && (offsettable_memref_p (operands[0]) || DATA_REG_P (operands[0]))
9425fb04 4988 && !TARGET_COLDFIRE)
29ae8a3c
RK
4989 {
4990 if (! DATA_REG_P (operands[0]))
b72f00af 4991 operands[0] = adjust_address (operands[0], HImode, 2);
29ae8a3c
RK
4992 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4993 CC_STATUS_INIT;
4994 if (INTVAL (operands[2]) == 0xffff)
4995 return "not%.w %0";
4996 return "eor%.w %2,%0";
4997 }
4998 if (GET_CODE (operands[2]) == CONST_INT
c4406f74 4999 && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
29ae8a3c
RK
5000 && (DATA_REG_P (operands[0])
5001 || offsettable_memref_p (operands[0])))
5002 {
5003 if (DATA_REG_P (operands[0]))
b72f00af 5004 operands[1] = GEN_INT (logval);
29ae8a3c
RK
5005 else
5006 {
b72f00af 5007 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
1d8eaa6b 5008 operands[1] = GEN_INT (logval % 8);
29ae8a3c
RK
5009 }
5010 CC_STATUS_INIT;
5011 return "bchg %1,%0";
5012 }
5013 return "eor%.l %2,%0";
5014}
7c262518 5015
29ca003a
RS
5016/* Return the instruction that should be used for a call to address X,
5017 which is known to be in operand 0. */
5018
5019const char *
5020output_call (rtx x)
5021{
5022 if (symbolic_operand (x, VOIDmode))
5023 return m68k_symbolic_call;
5024 else
5025 return "jsr %a0";
5026}
5027
f7e70894
RS
5028/* Likewise sibling calls. */
5029
5030const char *
5031output_sibcall (rtx x)
5032{
5033 if (symbolic_operand (x, VOIDmode))
5034 return m68k_symbolic_jump;
5035 else
5036 return "jmp %a0";
5037}
5038
c590b625 5039static void
8a4a2253 5040m68k_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
4ab870f5 5041 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8a4a2253 5042 tree function)
483ab821 5043{
c85e862a
DM
5044 rtx this_slot, offset, addr, mem, tmp;
5045 rtx_insn *insn;
e0601576
RH
5046
5047 /* Avoid clobbering the struct value reg by using the
5048 static chain reg as a temporary. */
5049 tmp = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
4ab870f5
RS
5050
5051 /* Pretend to be a post-reload pass while generating rtl. */
4ab870f5 5052 reload_completed = 1;
4ab870f5
RS
5053
5054 /* The "this" pointer is stored at 4(%sp). */
0a81f074
RS
5055 this_slot = gen_rtx_MEM (Pmode, plus_constant (Pmode,
5056 stack_pointer_rtx, 4));
4ab870f5
RS
5057
5058 /* Add DELTA to THIS. */
5059 if (delta != 0)
5050d266 5060 {
4ab870f5
RS
5061 /* Make the offset a legitimate operand for memory addition. */
5062 offset = GEN_INT (delta);
5063 if ((delta < -8 || delta > 8)
5064 && (TARGET_COLDFIRE || USE_MOVQ (delta)))
5065 {
5066 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), offset);
5067 offset = gen_rtx_REG (Pmode, D0_REG);
5068 }
5069 emit_insn (gen_add3_insn (copy_rtx (this_slot),
5070 copy_rtx (this_slot), offset));
5050d266 5071 }
c590b625 5072
4ab870f5
RS
5073 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
5074 if (vcall_offset != 0)
5075 {
5076 /* Set the static chain register to *THIS. */
e0601576
RH
5077 emit_move_insn (tmp, this_slot);
5078 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
4ab870f5
RS
5079
5080 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
0a81f074 5081 addr = plus_constant (Pmode, tmp, vcall_offset);
4ab870f5
RS
5082 if (!m68k_legitimate_address_p (Pmode, addr, true))
5083 {
f7df4a84 5084 emit_insn (gen_rtx_SET (tmp, addr));
e0601576 5085 addr = tmp;
4ab870f5 5086 }
c590b625 5087
4ab870f5
RS
5088 /* Load the offset into %d0 and add it to THIS. */
5089 emit_move_insn (gen_rtx_REG (Pmode, D0_REG),
5090 gen_rtx_MEM (Pmode, addr));
5091 emit_insn (gen_add3_insn (copy_rtx (this_slot),
5092 copy_rtx (this_slot),
5093 gen_rtx_REG (Pmode, D0_REG)));
5094 }
29ca003a 5095
4ab870f5
RS
5096 /* Jump to the target function. Use a sibcall if direct jumps are
5097 allowed, otherwise load the address into a register first. */
5098 mem = DECL_RTL (function);
5099 if (!sibcall_operand (XEXP (mem, 0), VOIDmode))
5100 {
5101 gcc_assert (flag_pic);
c590b625 5102
4ab870f5
RS
5103 if (!TARGET_SEP_DATA)
5104 {
5105 /* Use the static chain register as a temporary (call-clobbered)
5106 GOT pointer for this function. We can use the static chain
5107 register because it isn't live on entry to the thunk. */
6fb5fa3c 5108 SET_REGNO (pic_offset_table_rtx, STATIC_CHAIN_REGNUM);
4ab870f5
RS
5109 emit_insn (gen_load_got (pic_offset_table_rtx));
5110 }
e0601576
RH
5111 legitimize_pic_address (XEXP (mem, 0), Pmode, tmp);
5112 mem = replace_equiv_address (mem, tmp);
4ab870f5
RS
5113 }
5114 insn = emit_call_insn (gen_sibcall (mem, const0_rtx));
5115 SIBLING_CALL_P (insn) = 1;
5116
5117 /* Run just enough of rest_of_compilation. */
5118 insn = get_insns ();
5119 split_all_insns_noflow ();
5120 final_start_function (insn, file, 1);
5121 final (insn, file, 1);
5122 final_end_function ();
5123
5124 /* Clean up the vars set above. */
5125 reload_completed = 0;
4ab870f5
RS
5126
5127 /* Restore the original PIC register. */
5128 if (flag_pic)
6fb5fa3c 5129 SET_REGNO (pic_offset_table_rtx, PIC_REG);
483ab821 5130}
8636be86
KH
5131
5132/* Worker function for TARGET_STRUCT_VALUE_RTX. */
5133
5134static rtx
5135m68k_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
5136 int incoming ATTRIBUTE_UNUSED)
5137{
5138 return gen_rtx_REG (Pmode, M68K_STRUCT_VALUE_REGNUM);
5139}
cfca21cb
PB
5140
5141/* Return nonzero if register old_reg can be renamed to register new_reg. */
5142int
5143m68k_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
5144 unsigned int new_reg)
5145{
5146
5147 /* Interrupt functions can only use registers that have already been
5148 saved by the prologue, even if they would normally be
5149 call-clobbered. */
5150
a4242737
KH
5151 if ((m68k_get_function_kind (current_function_decl)
5152 == m68k_fk_interrupt_handler)
6fb5fa3c 5153 && !df_regs_ever_live_p (new_reg))
cfca21cb
PB
5154 return 0;
5155
5156 return 1;
5157}
70028b61 5158
ffa2596e
RS
5159/* Value is true if hard register REGNO can hold a value of machine-mode
5160 MODE. On the 68000, we let the cpu registers can hold any mode, but
5161 restrict the 68881 registers to floating-point modes. */
5162
70028b61 5163bool
ef4bddc2 5164m68k_regno_mode_ok (int regno, machine_mode mode)
70028b61 5165{
36e04090 5166 if (DATA_REGNO_P (regno))
70028b61 5167 {
a0a7fbc9
AS
5168 /* Data Registers, can hold aggregate if fits in. */
5169 if (regno + GET_MODE_SIZE (mode) / 4 <= 8)
5170 return true;
70028b61 5171 }
36e04090 5172 else if (ADDRESS_REGNO_P (regno))
70028b61 5173 {
a0a7fbc9
AS
5174 if (regno + GET_MODE_SIZE (mode) / 4 <= 16)
5175 return true;
70028b61 5176 }
36e04090 5177 else if (FP_REGNO_P (regno))
70028b61
PB
5178 {
5179 /* FPU registers, hold float or complex float of long double or
a0a7fbc9
AS
5180 smaller. */
5181 if ((GET_MODE_CLASS (mode) == MODE_FLOAT
5182 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
dcc21c4c 5183 && GET_MODE_UNIT_SIZE (mode) <= TARGET_FP_REG_SIZE)
a0a7fbc9 5184 return true;
70028b61
PB
5185 }
5186 return false;
5187}
dcc21c4c 5188
ffa2596e
RS
5189/* Implement SECONDARY_RELOAD_CLASS. */
5190
5191enum reg_class
5192m68k_secondary_reload_class (enum reg_class rclass,
ef4bddc2 5193 machine_mode mode, rtx x)
ffa2596e
RS
5194{
5195 int regno;
5196
5197 regno = true_regnum (x);
5198
5199 /* If one operand of a movqi is an address register, the other
5200 operand must be a general register or constant. Other types
5201 of operand must be reloaded through a data register. */
5202 if (GET_MODE_SIZE (mode) == 1
5203 && reg_classes_intersect_p (rclass, ADDR_REGS)
5204 && !(INT_REGNO_P (regno) || CONSTANT_P (x)))
5205 return DATA_REGS;
5206
5207 /* PC-relative addresses must be loaded into an address register first. */
5208 if (TARGET_PCREL
5209 && !reg_class_subset_p (rclass, ADDR_REGS)
5210 && symbolic_operand (x, VOIDmode))
5211 return ADDR_REGS;
5212
5213 return NO_REGS;
5214}
5215
5216/* Implement PREFERRED_RELOAD_CLASS. */
5217
5218enum reg_class
5219m68k_preferred_reload_class (rtx x, enum reg_class rclass)
5220{
5221 enum reg_class secondary_class;
5222
5223 /* If RCLASS might need a secondary reload, try restricting it to
5224 a class that doesn't. */
5225 secondary_class = m68k_secondary_reload_class (rclass, GET_MODE (x), x);
5226 if (secondary_class != NO_REGS
5227 && reg_class_subset_p (secondary_class, rclass))
5228 return secondary_class;
5229
5230 /* Prefer to use moveq for in-range constants. */
5231 if (GET_CODE (x) == CONST_INT
5232 && reg_class_subset_p (DATA_REGS, rclass)
5233 && IN_RANGE (INTVAL (x), -0x80, 0x7f))
5234 return DATA_REGS;
5235
5236 /* ??? Do we really need this now? */
5237 if (GET_CODE (x) == CONST_DOUBLE
5238 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
5239 {
5240 if (TARGET_HARD_FLOAT && reg_class_subset_p (FP_REGS, rclass))
5241 return FP_REGS;
5242
5243 return NO_REGS;
5244 }
5245
5246 return rclass;
5247}
5248
dcc21c4c
PB
5249/* Return floating point values in a 68881 register. This makes 68881 code
5250 a little bit faster. It also makes -msoft-float code incompatible with
5251 hard-float code, so people have to be careful not to mix the two.
c0220ea4 5252 For ColdFire it was decided the ABI incompatibility is undesirable.
dcc21c4c
PB
5253 If there is need for a hard-float ABI it is probably worth doing it
5254 properly and also passing function arguments in FP registers. */
5255rtx
ef4bddc2 5256m68k_libcall_value (machine_mode mode)
dcc21c4c
PB
5257{
5258 switch (mode) {
5259 case SFmode:
5260 case DFmode:
5261 case XFmode:
5262 if (TARGET_68881)
8d989403 5263 return gen_rtx_REG (mode, FP0_REG);
dcc21c4c
PB
5264 break;
5265 default:
5266 break;
5267 }
75df395f
MK
5268
5269 return gen_rtx_REG (mode, m68k_libcall_value_in_a0_p ? A0_REG : D0_REG);
dcc21c4c
PB
5270}
5271
db5e2d51
MK
5272/* Location in which function value is returned.
5273 NOTE: Due to differences in ABIs, don't call this function directly,
5274 use FUNCTION_VALUE instead. */
dcc21c4c 5275rtx
586de218 5276m68k_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
dcc21c4c 5277{
ef4bddc2 5278 machine_mode mode;
dcc21c4c
PB
5279
5280 mode = TYPE_MODE (valtype);
5281 switch (mode) {
5282 case SFmode:
5283 case DFmode:
5284 case XFmode:
5285 if (TARGET_68881)
8d989403 5286 return gen_rtx_REG (mode, FP0_REG);
dcc21c4c
PB
5287 break;
5288 default:
5289 break;
5290 }
5291
576c9028
KH
5292 /* If the function returns a pointer, push that into %a0. */
5293 if (func && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (func))))
5294 /* For compatibility with the large body of existing code which
5295 does not always properly declare external functions returning
5296 pointer types, the m68k/SVR4 convention is to copy the value
5297 returned for pointer functions from a0 to d0 in the function
5298 epilogue, so that callers that have neglected to properly
5299 declare the callee can still find the correct return value in
5300 d0. */
5301 return gen_rtx_PARALLEL
5302 (mode,
5303 gen_rtvec (2,
5304 gen_rtx_EXPR_LIST (VOIDmode,
5305 gen_rtx_REG (mode, A0_REG),
5306 const0_rtx),
5307 gen_rtx_EXPR_LIST (VOIDmode,
5308 gen_rtx_REG (mode, D0_REG),
5309 const0_rtx)));
5310 else if (POINTER_TYPE_P (valtype))
5311 return gen_rtx_REG (mode, A0_REG);
dcc21c4c 5312 else
576c9028 5313 return gen_rtx_REG (mode, D0_REG);
dcc21c4c 5314}
1c445f03
NS
5315
5316/* Worker function for TARGET_RETURN_IN_MEMORY. */
5317#if M68K_HONOR_TARGET_STRICT_ALIGNMENT
5318static bool
511e41e5 5319m68k_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
1c445f03 5320{
ef4bddc2 5321 machine_mode mode = TYPE_MODE (type);
1c445f03
NS
5322
5323 if (mode == BLKmode)
5324 return true;
5325
5326 /* If TYPE's known alignment is less than the alignment of MODE that
5327 would contain the structure, then return in memory. We need to
5328 do so to maintain the compatibility between code compiled with
5329 -mstrict-align and that compiled with -mno-strict-align. */
5330 if (AGGREGATE_TYPE_P (type)
5331 && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (mode))
5332 return true;
5333
5334 return false;
5335}
5336#endif
c47b0cb4
MK
5337
5338/* CPU to schedule the program for. */
5339enum attr_cpu m68k_sched_cpu;
5340
826fadba
MK
5341/* MAC to schedule the program for. */
5342enum attr_mac m68k_sched_mac;
5343
c47b0cb4
MK
5344/* Operand type. */
5345enum attr_op_type
5346 {
5347 /* No operand. */
5348 OP_TYPE_NONE,
5349
96fcacb7
MK
5350 /* Integer register. */
5351 OP_TYPE_RN,
5352
5353 /* FP register. */
5354 OP_TYPE_FPN,
c47b0cb4
MK
5355
5356 /* Implicit mem reference (e.g. stack). */
5357 OP_TYPE_MEM1,
5358
5359 /* Memory without offset or indexing. EA modes 2, 3 and 4. */
5360 OP_TYPE_MEM234,
5361
5362 /* Memory with offset but without indexing. EA mode 5. */
5363 OP_TYPE_MEM5,
5364
5365 /* Memory with indexing. EA mode 6. */
5366 OP_TYPE_MEM6,
5367
5368 /* Memory referenced by absolute address. EA mode 7. */
5369 OP_TYPE_MEM7,
5370
5371 /* Immediate operand that doesn't require extension word. */
5372 OP_TYPE_IMM_Q,
5373
5374 /* Immediate 16 bit operand. */
5375 OP_TYPE_IMM_W,
5376
5377 /* Immediate 32 bit operand. */
5378 OP_TYPE_IMM_L
5379 };
5380
c47b0cb4
MK
5381/* Return type of memory ADDR_RTX refers to. */
5382static enum attr_op_type
ef4bddc2 5383sched_address_type (machine_mode mode, rtx addr_rtx)
c47b0cb4
MK
5384{
5385 struct m68k_address address;
5386
96fcacb7
MK
5387 if (symbolic_operand (addr_rtx, VOIDmode))
5388 return OP_TYPE_MEM7;
5389
c47b0cb4
MK
5390 if (!m68k_decompose_address (mode, addr_rtx,
5391 reload_completed, &address))
5392 {
96fcacb7 5393 gcc_assert (!reload_completed);
c47b0cb4
MK
5394 /* Reload will likely fix the address to be in the register. */
5395 return OP_TYPE_MEM234;
5396 }
5397
5398 if (address.scale != 0)
5399 return OP_TYPE_MEM6;
5400
5401 if (address.base != NULL_RTX)
5402 {
5403 if (address.offset == NULL_RTX)
5404 return OP_TYPE_MEM234;
5405
5406 return OP_TYPE_MEM5;
5407 }
5408
5409 gcc_assert (address.offset != NULL_RTX);
5410
5411 return OP_TYPE_MEM7;
5412}
5413
96fcacb7
MK
5414/* Return X or Y (depending on OPX_P) operand of INSN. */
5415static rtx
647d790d 5416sched_get_operand (rtx_insn *insn, bool opx_p)
96fcacb7
MK
5417{
5418 int i;
5419
5420 if (recog_memoized (insn) < 0)
5421 gcc_unreachable ();
5422
5423 extract_constrain_insn_cached (insn);
5424
5425 if (opx_p)
5426 i = get_attr_opx (insn);
5427 else
5428 i = get_attr_opy (insn);
5429
5430 if (i >= recog_data.n_operands)
5431 return NULL;
5432
5433 return recog_data.operand[i];
5434}
5435
5436/* Return type of INSN's operand X (if OPX_P) or operand Y (if !OPX_P).
5437 If ADDRESS_P is true, return type of memory location operand refers to. */
c47b0cb4 5438static enum attr_op_type
647d790d 5439sched_attr_op_type (rtx_insn *insn, bool opx_p, bool address_p)
c47b0cb4 5440{
96fcacb7
MK
5441 rtx op;
5442
5443 op = sched_get_operand (insn, opx_p);
5444
5445 if (op == NULL)
5446 {
5447 gcc_assert (!reload_completed);
5448 return OP_TYPE_RN;
5449 }
c47b0cb4
MK
5450
5451 if (address_p)
5452 return sched_address_type (QImode, op);
5453
5454 if (memory_operand (op, VOIDmode))
5455 return sched_address_type (GET_MODE (op), XEXP (op, 0));
5456
5457 if (register_operand (op, VOIDmode))
96fcacb7
MK
5458 {
5459 if ((!reload_completed && FLOAT_MODE_P (GET_MODE (op)))
5460 || (reload_completed && FP_REG_P (op)))
5461 return OP_TYPE_FPN;
5462
5463 return OP_TYPE_RN;
5464 }
c47b0cb4
MK
5465
5466 if (GET_CODE (op) == CONST_INT)
5467 {
96fcacb7
MK
5468 int ival;
5469
5470 ival = INTVAL (op);
5471
5472 /* Check for quick constants. */
5473 switch (get_attr_type (insn))
5474 {
5475 case TYPE_ALUQ_L:
5476 if (IN_RANGE (ival, 1, 8) || IN_RANGE (ival, -8, -1))
5477 return OP_TYPE_IMM_Q;
5478
5479 gcc_assert (!reload_completed);
5480 break;
5481
5482 case TYPE_MOVEQ_L:
5483 if (USE_MOVQ (ival))
5484 return OP_TYPE_IMM_Q;
5485
5486 gcc_assert (!reload_completed);
5487 break;
5488
5489 case TYPE_MOV3Q_L:
5490 if (valid_mov3q_const (ival))
5491 return OP_TYPE_IMM_Q;
5492
5493 gcc_assert (!reload_completed);
5494 break;
5495
5496 default:
5497 break;
5498 }
5499
5500 if (IN_RANGE (ival, -0x8000, 0x7fff))
c47b0cb4
MK
5501 return OP_TYPE_IMM_W;
5502
5503 return OP_TYPE_IMM_L;
5504 }
5505
5506 if (GET_CODE (op) == CONST_DOUBLE)
5507 {
5508 switch (GET_MODE (op))
5509 {
5510 case SFmode:
5511 return OP_TYPE_IMM_W;
5512
5513 case VOIDmode:
5514 case DFmode:
5515 return OP_TYPE_IMM_L;
5516
5517 default:
5518 gcc_unreachable ();
5519 }
5520 }
5521
00b2ef14
MK
5522 if (GET_CODE (op) == CONST
5523 || symbolic_operand (op, VOIDmode)
c47b0cb4
MK
5524 || LABEL_P (op))
5525 {
5526 switch (GET_MODE (op))
5527 {
5528 case QImode:
5529 return OP_TYPE_IMM_Q;
5530
5531 case HImode:
5532 return OP_TYPE_IMM_W;
5533
5534 case SImode:
5535 return OP_TYPE_IMM_L;
5536
5537 default:
75df395f
MK
5538 if (symbolic_operand (m68k_unwrap_symbol (op, false), VOIDmode))
5539 /* Just a guess. */
c47b0cb4
MK
5540 return OP_TYPE_IMM_W;
5541
5542 return OP_TYPE_IMM_L;
5543 }
5544 }
5545
96fcacb7 5546 gcc_assert (!reload_completed);
c47b0cb4 5547
96fcacb7
MK
5548 if (FLOAT_MODE_P (GET_MODE (op)))
5549 return OP_TYPE_FPN;
c47b0cb4 5550
96fcacb7 5551 return OP_TYPE_RN;
c47b0cb4
MK
5552}
5553
5554/* Implement opx_type attribute.
5555 Return type of INSN's operand X.
5556 If ADDRESS_P is true, return type of memory location operand refers to. */
5557enum attr_opx_type
647d790d 5558m68k_sched_attr_opx_type (rtx_insn *insn, int address_p)
c47b0cb4 5559{
c47b0cb4
MK
5560 switch (sched_attr_op_type (insn, true, address_p != 0))
5561 {
96fcacb7
MK
5562 case OP_TYPE_RN:
5563 return OPX_TYPE_RN;
5564
5565 case OP_TYPE_FPN:
5566 return OPX_TYPE_FPN;
c47b0cb4
MK
5567
5568 case OP_TYPE_MEM1:
5569 return OPX_TYPE_MEM1;
5570
5571 case OP_TYPE_MEM234:
5572 return OPX_TYPE_MEM234;
5573
5574 case OP_TYPE_MEM5:
5575 return OPX_TYPE_MEM5;
5576
5577 case OP_TYPE_MEM6:
5578 return OPX_TYPE_MEM6;
5579
5580 case OP_TYPE_MEM7:
5581 return OPX_TYPE_MEM7;
5582
5583 case OP_TYPE_IMM_Q:
5584 return OPX_TYPE_IMM_Q;
5585
5586 case OP_TYPE_IMM_W:
5587 return OPX_TYPE_IMM_W;
5588
5589 case OP_TYPE_IMM_L:
5590 return OPX_TYPE_IMM_L;
5591
5592 default:
5593 gcc_unreachable ();
c47b0cb4
MK
5594 }
5595}
5596
5597/* Implement opy_type attribute.
5598 Return type of INSN's operand Y.
5599 If ADDRESS_P is true, return type of memory location operand refers to. */
5600enum attr_opy_type
647d790d 5601m68k_sched_attr_opy_type (rtx_insn *insn, int address_p)
c47b0cb4 5602{
c47b0cb4
MK
5603 switch (sched_attr_op_type (insn, false, address_p != 0))
5604 {
96fcacb7
MK
5605 case OP_TYPE_RN:
5606 return OPY_TYPE_RN;
5607
5608 case OP_TYPE_FPN:
5609 return OPY_TYPE_FPN;
c47b0cb4
MK
5610
5611 case OP_TYPE_MEM1:
5612 return OPY_TYPE_MEM1;
5613
5614 case OP_TYPE_MEM234:
5615 return OPY_TYPE_MEM234;
5616
5617 case OP_TYPE_MEM5:
5618 return OPY_TYPE_MEM5;
5619
5620 case OP_TYPE_MEM6:
5621 return OPY_TYPE_MEM6;
5622
5623 case OP_TYPE_MEM7:
5624 return OPY_TYPE_MEM7;
5625
5626 case OP_TYPE_IMM_Q:
5627 return OPY_TYPE_IMM_Q;
5628
5629 case OP_TYPE_IMM_W:
5630 return OPY_TYPE_IMM_W;
5631
5632 case OP_TYPE_IMM_L:
5633 return OPY_TYPE_IMM_L;
5634
5635 default:
5636 gcc_unreachable ();
c47b0cb4
MK
5637 }
5638}
5639
96fcacb7
MK
5640/* Return size of INSN as int. */
5641static int
84034c69 5642sched_get_attr_size_int (rtx_insn *insn)
c47b0cb4
MK
5643{
5644 int size;
5645
96fcacb7 5646 switch (get_attr_type (insn))
c47b0cb4 5647 {
96fcacb7
MK
5648 case TYPE_IGNORE:
5649 /* There should be no references to m68k_sched_attr_size for 'ignore'
5650 instructions. */
5651 gcc_unreachable ();
5652 return 0;
5653
5654 case TYPE_MUL_L:
c47b0cb4
MK
5655 size = 2;
5656 break;
5657
5658 default:
5659 size = 1;
5660 break;
5661 }
5662
5663 switch (get_attr_opx_type (insn))
5664 {
5665 case OPX_TYPE_NONE:
96fcacb7
MK
5666 case OPX_TYPE_RN:
5667 case OPX_TYPE_FPN:
c47b0cb4
MK
5668 case OPX_TYPE_MEM1:
5669 case OPX_TYPE_MEM234:
5670 case OPY_TYPE_IMM_Q:
5671 break;
5672
5673 case OPX_TYPE_MEM5:
5674 case OPX_TYPE_MEM6:
5675 /* Here we assume that most absolute references are short. */
5676 case OPX_TYPE_MEM7:
5677 case OPY_TYPE_IMM_W:
5678 ++size;
5679 break;
5680
5681 case OPY_TYPE_IMM_L:
5682 size += 2;
5683 break;
5684
5685 default:
5686 gcc_unreachable ();
5687 }
5688
5689 switch (get_attr_opy_type (insn))
5690 {
5691 case OPY_TYPE_NONE:
96fcacb7
MK
5692 case OPY_TYPE_RN:
5693 case OPY_TYPE_FPN:
c47b0cb4
MK
5694 case OPY_TYPE_MEM1:
5695 case OPY_TYPE_MEM234:
5696 case OPY_TYPE_IMM_Q:
5697 break;
5698
5699 case OPY_TYPE_MEM5:
5700 case OPY_TYPE_MEM6:
5701 /* Here we assume that most absolute references are short. */
5702 case OPY_TYPE_MEM7:
5703 case OPY_TYPE_IMM_W:
5704 ++size;
5705 break;
5706
5707 case OPY_TYPE_IMM_L:
5708 size += 2;
5709 break;
5710
5711 default:
5712 gcc_unreachable ();
5713 }
5714
5715 if (size > 3)
5716 {
96fcacb7 5717 gcc_assert (!reload_completed);
c47b0cb4
MK
5718
5719 size = 3;
5720 }
5721
5722 return size;
5723}
5724
96fcacb7
MK
5725/* Return size of INSN as attribute enum value. */
5726enum attr_size
84034c69 5727m68k_sched_attr_size (rtx_insn *insn)
96fcacb7
MK
5728{
5729 switch (sched_get_attr_size_int (insn))
5730 {
5731 case 1:
5732 return SIZE_1;
5733
5734 case 2:
5735 return SIZE_2;
5736
5737 case 3:
5738 return SIZE_3;
5739
5740 default:
5741 gcc_unreachable ();
96fcacb7
MK
5742 }
5743}
5744
5745/* Return operand X or Y (depending on OPX_P) of INSN,
5746 if it is a MEM, or NULL overwise. */
5747static enum attr_op_type
84034c69 5748sched_get_opxy_mem_type (rtx_insn *insn, bool opx_p)
96fcacb7
MK
5749{
5750 if (opx_p)
5751 {
5752 switch (get_attr_opx_type (insn))
5753 {
5754 case OPX_TYPE_NONE:
5755 case OPX_TYPE_RN:
5756 case OPX_TYPE_FPN:
5757 case OPX_TYPE_IMM_Q:
5758 case OPX_TYPE_IMM_W:
5759 case OPX_TYPE_IMM_L:
5760 return OP_TYPE_RN;
5761
5762 case OPX_TYPE_MEM1:
5763 case OPX_TYPE_MEM234:
5764 case OPX_TYPE_MEM5:
5765 case OPX_TYPE_MEM7:
5766 return OP_TYPE_MEM1;
5767
5768 case OPX_TYPE_MEM6:
5769 return OP_TYPE_MEM6;
5770
5771 default:
5772 gcc_unreachable ();
96fcacb7
MK
5773 }
5774 }
5775 else
5776 {
5777 switch (get_attr_opy_type (insn))
5778 {
5779 case OPY_TYPE_NONE:
5780 case OPY_TYPE_RN:
5781 case OPY_TYPE_FPN:
5782 case OPY_TYPE_IMM_Q:
5783 case OPY_TYPE_IMM_W:
5784 case OPY_TYPE_IMM_L:
5785 return OP_TYPE_RN;
5786
5787 case OPY_TYPE_MEM1:
5788 case OPY_TYPE_MEM234:
5789 case OPY_TYPE_MEM5:
5790 case OPY_TYPE_MEM7:
5791 return OP_TYPE_MEM1;
5792
5793 case OPY_TYPE_MEM6:
5794 return OP_TYPE_MEM6;
5795
5796 default:
5797 gcc_unreachable ();
96fcacb7
MK
5798 }
5799 }
5800}
5801
c47b0cb4
MK
5802/* Implement op_mem attribute. */
5803enum attr_op_mem
84034c69 5804m68k_sched_attr_op_mem (rtx_insn *insn)
c47b0cb4 5805{
96fcacb7
MK
5806 enum attr_op_type opx;
5807 enum attr_op_type opy;
c47b0cb4 5808
96fcacb7
MK
5809 opx = sched_get_opxy_mem_type (insn, true);
5810 opy = sched_get_opxy_mem_type (insn, false);
c47b0cb4 5811
96fcacb7 5812 if (opy == OP_TYPE_RN && opx == OP_TYPE_RN)
c47b0cb4
MK
5813 return OP_MEM_00;
5814
96fcacb7 5815 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM1)
c47b0cb4
MK
5816 {
5817 switch (get_attr_opx_access (insn))
5818 {
5819 case OPX_ACCESS_R:
5820 return OP_MEM_10;
5821
5822 case OPX_ACCESS_W:
5823 return OP_MEM_01;
5824
5825 case OPX_ACCESS_RW:
5826 return OP_MEM_11;
5827
5828 default:
96fcacb7 5829 gcc_unreachable ();
c47b0cb4
MK
5830 }
5831 }
5832
96fcacb7 5833 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM6)
c47b0cb4
MK
5834 {
5835 switch (get_attr_opx_access (insn))
5836 {
5837 case OPX_ACCESS_R:
5838 return OP_MEM_I0;
5839
5840 case OPX_ACCESS_W:
5841 return OP_MEM_0I;
5842
5843 case OPX_ACCESS_RW:
5844 return OP_MEM_I1;
5845
5846 default:
96fcacb7 5847 gcc_unreachable ();
c47b0cb4
MK
5848 }
5849 }
5850
96fcacb7 5851 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_RN)
c47b0cb4
MK
5852 return OP_MEM_10;
5853
96fcacb7 5854 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM1)
c47b0cb4
MK
5855 {
5856 switch (get_attr_opx_access (insn))
5857 {
5858 case OPX_ACCESS_W:
5859 return OP_MEM_11;
5860
5861 default:
96fcacb7
MK
5862 gcc_assert (!reload_completed);
5863 return OP_MEM_11;
c47b0cb4
MK
5864 }
5865 }
5866
96fcacb7 5867 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM6)
c47b0cb4
MK
5868 {
5869 switch (get_attr_opx_access (insn))
5870 {
5871 case OPX_ACCESS_W:
5872 return OP_MEM_1I;
5873
5874 default:
96fcacb7
MK
5875 gcc_assert (!reload_completed);
5876 return OP_MEM_1I;
c47b0cb4
MK
5877 }
5878 }
5879
96fcacb7 5880 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_RN)
c47b0cb4
MK
5881 return OP_MEM_I0;
5882
96fcacb7 5883 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM1)
c47b0cb4
MK
5884 {
5885 switch (get_attr_opx_access (insn))
5886 {
5887 case OPX_ACCESS_W:
5888 return OP_MEM_I1;
5889
5890 default:
96fcacb7
MK
5891 gcc_assert (!reload_completed);
5892 return OP_MEM_I1;
c47b0cb4
MK
5893 }
5894 }
5895
96fcacb7
MK
5896 gcc_assert (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM6);
5897 gcc_assert (!reload_completed);
5898 return OP_MEM_I1;
c47b0cb4
MK
5899}
5900
96fcacb7
MK
5901/* Data for ColdFire V4 index bypass.
5902 Producer modifies register that is used as index in consumer with
5903 specified scale. */
5904static struct
b8c96320 5905{
96fcacb7
MK
5906 /* Producer instruction. */
5907 rtx pro;
826fadba 5908
96fcacb7
MK
5909 /* Consumer instruction. */
5910 rtx con;
b8c96320 5911
96fcacb7
MK
5912 /* Scale of indexed memory access within consumer.
5913 Or zero if bypass should not be effective at the moment. */
5914 int scale;
5915} sched_cfv4_bypass_data;
b8c96320
MK
5916
5917/* An empty state that is used in m68k_sched_adjust_cost. */
5918static state_t sched_adjust_cost_state;
5919
5920/* Implement adjust_cost scheduler hook.
5921 Return adjusted COST of dependency LINK between DEF_INSN and INSN. */
5922static int
ac44248e
DM
5923m68k_sched_adjust_cost (rtx_insn *insn, rtx link ATTRIBUTE_UNUSED,
5924 rtx_insn *def_insn, int cost)
b8c96320
MK
5925{
5926 int delay;
5927
5928 if (recog_memoized (def_insn) < 0
5929 || recog_memoized (insn) < 0)
5930 return cost;
5931
96fcacb7
MK
5932 if (sched_cfv4_bypass_data.scale == 1)
5933 /* Handle ColdFire V4 bypass for indexed address with 1x scale. */
5934 {
5935 /* haifa-sched.c: insn_cost () calls bypass_p () just before
5936 targetm.sched.adjust_cost (). Hence, we can be relatively sure
5937 that the data in sched_cfv4_bypass_data is up to date. */
5938 gcc_assert (sched_cfv4_bypass_data.pro == def_insn
5939 && sched_cfv4_bypass_data.con == insn);
5940
5941 if (cost < 3)
5942 cost = 3;
5943
5944 sched_cfv4_bypass_data.pro = NULL;
5945 sched_cfv4_bypass_data.con = NULL;
5946 sched_cfv4_bypass_data.scale = 0;
5947 }
5948 else
5949 gcc_assert (sched_cfv4_bypass_data.pro == NULL
5950 && sched_cfv4_bypass_data.con == NULL
5951 && sched_cfv4_bypass_data.scale == 0);
5952
b8c96320
MK
5953 /* Don't try to issue INSN earlier than DFA permits.
5954 This is especially useful for instructions that write to memory,
5955 as their true dependence (default) latency is better to be set to 0
5956 to workaround alias analysis limitations.
5957 This is, in fact, a machine independent tweak, so, probably,
5958 it should be moved to haifa-sched.c: insn_cost (). */
b8c96320
MK
5959 delay = min_insn_conflict_delay (sched_adjust_cost_state, def_insn, insn);
5960 if (delay > cost)
5961 cost = delay;
5962
5963 return cost;
5964}
5965
96fcacb7
MK
5966/* Return maximal number of insns that can be scheduled on a single cycle. */
5967static int
5968m68k_sched_issue_rate (void)
5969{
5970 switch (m68k_sched_cpu)
5971 {
5972 case CPU_CFV1:
5973 case CPU_CFV2:
5974 case CPU_CFV3:
5975 return 1;
5976
5977 case CPU_CFV4:
5978 return 2;
5979
5980 default:
5981 gcc_unreachable ();
5982 return 0;
5983 }
5984}
5985
826fadba
MK
5986/* Maximal length of instruction for current CPU.
5987 E.g. it is 3 for any ColdFire core. */
5988static int max_insn_size;
5989
5990/* Data to model instruction buffer of CPU. */
5991struct _sched_ib
5992{
96fcacb7
MK
5993 /* True if instruction buffer model is modeled for current CPU. */
5994 bool enabled_p;
5995
826fadba
MK
5996 /* Size of the instruction buffer in words. */
5997 int size;
5998
5999 /* Number of filled words in the instruction buffer. */
6000 int filled;
6001
6002 /* Additional information about instruction buffer for CPUs that have
6003 a buffer of instruction records, rather then a plain buffer
6004 of instruction words. */
6005 struct _sched_ib_records
6006 {
6007 /* Size of buffer in records. */
6008 int n_insns;
b8c96320 6009
826fadba
MK
6010 /* Array to hold data on adjustements made to the size of the buffer. */
6011 int *adjust;
b8c96320 6012
826fadba
MK
6013 /* Index of the above array. */
6014 int adjust_index;
6015 } records;
6016
6017 /* An insn that reserves (marks empty) one word in the instruction buffer. */
6018 rtx insn;
6019};
6020
6021static struct _sched_ib sched_ib;
b8c96320
MK
6022
6023/* ID of memory unit. */
6024static int sched_mem_unit_code;
6025
6026/* Implementation of the targetm.sched.variable_issue () hook.
6027 It is called after INSN was issued. It returns the number of insns
6028 that can possibly get scheduled on the current cycle.
6029 It is used here to determine the effect of INSN on the instruction
6030 buffer. */
6031static int
6032m68k_sched_variable_issue (FILE *sched_dump ATTRIBUTE_UNUSED,
6033 int sched_verbose ATTRIBUTE_UNUSED,
ac44248e 6034 rtx_insn *insn, int can_issue_more)
b8c96320
MK
6035{
6036 int insn_size;
6037
96fcacb7 6038 if (recog_memoized (insn) >= 0 && get_attr_type (insn) != TYPE_IGNORE)
b8c96320 6039 {
826fadba
MK
6040 switch (m68k_sched_cpu)
6041 {
6042 case CPU_CFV1:
6043 case CPU_CFV2:
96fcacb7 6044 insn_size = sched_get_attr_size_int (insn);
826fadba
MK
6045 break;
6046
6047 case CPU_CFV3:
96fcacb7 6048 insn_size = sched_get_attr_size_int (insn);
826fadba
MK
6049
6050 /* ColdFire V3 and V4 cores have instruction buffers that can
6051 accumulate up to 8 instructions regardless of instructions'
6052 sizes. So we should take care not to "prefetch" 24 one-word
6053 or 12 two-words instructions.
6054 To model this behavior we temporarily decrease size of the
6055 buffer by (max_insn_size - insn_size) for next 7 instructions. */
6056 {
6057 int adjust;
6058
6059 adjust = max_insn_size - insn_size;
6060 sched_ib.size -= adjust;
6061
6062 if (sched_ib.filled > sched_ib.size)
6063 sched_ib.filled = sched_ib.size;
6064
6065 sched_ib.records.adjust[sched_ib.records.adjust_index] = adjust;
6066 }
6067
6068 ++sched_ib.records.adjust_index;
6069 if (sched_ib.records.adjust_index == sched_ib.records.n_insns)
6070 sched_ib.records.adjust_index = 0;
6071
6072 /* Undo adjustement we did 7 instructions ago. */
6073 sched_ib.size
6074 += sched_ib.records.adjust[sched_ib.records.adjust_index];
6075
6076 break;
b8c96320 6077
96fcacb7
MK
6078 case CPU_CFV4:
6079 gcc_assert (!sched_ib.enabled_p);
6080 insn_size = 0;
6081 break;
6082
826fadba
MK
6083 default:
6084 gcc_unreachable ();
6085 }
b8c96320 6086
3162fdf4
MK
6087 if (insn_size > sched_ib.filled)
6088 /* Scheduling for register pressure does not always take DFA into
6089 account. Workaround instruction buffer not being filled enough. */
6090 {
60867e8c 6091 gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED);
3162fdf4
MK
6092 insn_size = sched_ib.filled;
6093 }
6094
b8c96320
MK
6095 --can_issue_more;
6096 }
6097 else if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6098 || asm_noperands (PATTERN (insn)) >= 0)
826fadba 6099 insn_size = sched_ib.filled;
b8c96320
MK
6100 else
6101 insn_size = 0;
6102
826fadba 6103 sched_ib.filled -= insn_size;
b8c96320
MK
6104
6105 return can_issue_more;
6106}
6107
96fcacb7
MK
6108/* Return how many instructions should scheduler lookahead to choose the
6109 best one. */
6110static int
6111m68k_sched_first_cycle_multipass_dfa_lookahead (void)
b8c96320 6112{
96fcacb7 6113 return m68k_sched_issue_rate () - 1;
b8c96320
MK
6114}
6115
7ecb00a6 6116/* Implementation of targetm.sched.init_global () hook.
b8c96320
MK
6117 It is invoked once per scheduling pass and is used here
6118 to initialize scheduler constants. */
6119static void
6120m68k_sched_md_init_global (FILE *sched_dump ATTRIBUTE_UNUSED,
6121 int sched_verbose ATTRIBUTE_UNUSED,
6122 int n_insns ATTRIBUTE_UNUSED)
6123{
96fcacb7
MK
6124#ifdef ENABLE_CHECKING
6125 /* Check that all instructions have DFA reservations and
6126 that all instructions can be issued from a clean state. */
6127 {
b32d5189 6128 rtx_insn *insn;
96fcacb7 6129 state_t state;
b8c96320 6130
96fcacb7 6131 state = alloca (state_size ());
b8c96320 6132
b32d5189 6133 for (insn = get_insns (); insn != NULL; insn = NEXT_INSN (insn))
96fcacb7
MK
6134 {
6135 if (INSN_P (insn) && recog_memoized (insn) >= 0)
6136 {
6137 gcc_assert (insn_has_dfa_reservation_p (insn));
b8c96320 6138
96fcacb7
MK
6139 state_reset (state);
6140 if (state_transition (state, insn) >= 0)
6141 gcc_unreachable ();
6142 }
6143 }
6144 }
6145#endif
b8c96320
MK
6146
6147 /* Setup target cpu. */
96fcacb7
MK
6148
6149 /* ColdFire V4 has a set of features to keep its instruction buffer full
6150 (e.g., a separate memory bus for instructions) and, hence, we do not model
6151 buffer for this CPU. */
6152 sched_ib.enabled_p = (m68k_sched_cpu != CPU_CFV4);
6153
b8c96320
MK
6154 switch (m68k_sched_cpu)
6155 {
96fcacb7
MK
6156 case CPU_CFV4:
6157 sched_ib.filled = 0;
6158
6159 /* FALLTHRU */
6160
826fadba
MK
6161 case CPU_CFV1:
6162 case CPU_CFV2:
6163 max_insn_size = 3;
6164 sched_ib.records.n_insns = 0;
6165 sched_ib.records.adjust = NULL;
6166 break;
6167
6168 case CPU_CFV3:
6169 max_insn_size = 3;
6170 sched_ib.records.n_insns = 8;
5ead67f6 6171 sched_ib.records.adjust = XNEWVEC (int, sched_ib.records.n_insns);
b8c96320
MK
6172 break;
6173
6174 default:
6175 gcc_unreachable ();
6176 }
6177
826fadba
MK
6178 sched_mem_unit_code = get_cpu_unit_code ("cf_mem1");
6179
b8c96320
MK
6180 sched_adjust_cost_state = xmalloc (state_size ());
6181 state_reset (sched_adjust_cost_state);
6182
6183 start_sequence ();
6184 emit_insn (gen_ib ());
826fadba 6185 sched_ib.insn = get_insns ();
b8c96320
MK
6186 end_sequence ();
6187}
6188
6189/* Scheduling pass is now finished. Free/reset static variables. */
6190static void
6191m68k_sched_md_finish_global (FILE *dump ATTRIBUTE_UNUSED,
6192 int verbose ATTRIBUTE_UNUSED)
6193{
826fadba 6194 sched_ib.insn = NULL;
b8c96320
MK
6195
6196 free (sched_adjust_cost_state);
6197 sched_adjust_cost_state = NULL;
6198
6199 sched_mem_unit_code = 0;
826fadba
MK
6200
6201 free (sched_ib.records.adjust);
6202 sched_ib.records.adjust = NULL;
6203 sched_ib.records.n_insns = 0;
6204 max_insn_size = 0;
b8c96320
MK
6205}
6206
7ecb00a6 6207/* Implementation of targetm.sched.init () hook.
b8c96320
MK
6208 It is invoked each time scheduler starts on the new block (basic block or
6209 extended basic block). */
6210static void
6211m68k_sched_md_init (FILE *sched_dump ATTRIBUTE_UNUSED,
6212 int sched_verbose ATTRIBUTE_UNUSED,
6213 int n_insns ATTRIBUTE_UNUSED)
6214{
826fadba
MK
6215 switch (m68k_sched_cpu)
6216 {
6217 case CPU_CFV1:
6218 case CPU_CFV2:
6219 sched_ib.size = 6;
6220 break;
6221
6222 case CPU_CFV3:
6223 sched_ib.size = sched_ib.records.n_insns * max_insn_size;
6224
6225 memset (sched_ib.records.adjust, 0,
6226 sched_ib.records.n_insns * sizeof (*sched_ib.records.adjust));
6227 sched_ib.records.adjust_index = 0;
6228 break;
6229
96fcacb7
MK
6230 case CPU_CFV4:
6231 gcc_assert (!sched_ib.enabled_p);
6232 sched_ib.size = 0;
6233 break;
6234
826fadba
MK
6235 default:
6236 gcc_unreachable ();
6237 }
6238
96fcacb7
MK
6239 if (sched_ib.enabled_p)
6240 /* haifa-sched.c: schedule_block () calls advance_cycle () just before
6241 the first cycle. Workaround that. */
6242 sched_ib.filled = -2;
b8c96320
MK
6243}
6244
6245/* Implementation of targetm.sched.dfa_pre_advance_cycle () hook.
6246 It is invoked just before current cycle finishes and is used here
6247 to track if instruction buffer got its two words this cycle. */
6248static void
6249m68k_sched_dfa_pre_advance_cycle (void)
6250{
96fcacb7
MK
6251 if (!sched_ib.enabled_p)
6252 return;
6253
b8c96320
MK
6254 if (!cpu_unit_reservation_p (curr_state, sched_mem_unit_code))
6255 {
826fadba 6256 sched_ib.filled += 2;
b8c96320 6257
826fadba
MK
6258 if (sched_ib.filled > sched_ib.size)
6259 sched_ib.filled = sched_ib.size;
b8c96320
MK
6260 }
6261}
6262
6263/* Implementation of targetm.sched.dfa_post_advance_cycle () hook.
6264 It is invoked just after new cycle begins and is used here
6265 to setup number of filled words in the instruction buffer so that
6266 instructions which won't have all their words prefetched would be
6267 stalled for a cycle. */
6268static void
6269m68k_sched_dfa_post_advance_cycle (void)
6270{
6271 int i;
b8c96320 6272
96fcacb7
MK
6273 if (!sched_ib.enabled_p)
6274 return;
6275
b8c96320
MK
6276 /* Setup number of prefetched instruction words in the instruction
6277 buffer. */
826fadba
MK
6278 i = max_insn_size - sched_ib.filled;
6279
6280 while (--i >= 0)
b8c96320 6281 {
826fadba 6282 if (state_transition (curr_state, sched_ib.insn) >= 0)
5f3b7d7c
MK
6283 /* Pick up scheduler state. */
6284 ++sched_ib.filled;
b8c96320
MK
6285 }
6286}
96fcacb7
MK
6287
6288/* Return X or Y (depending on OPX_P) operand of INSN,
6289 if it is an integer register, or NULL overwise. */
6290static rtx
647d790d 6291sched_get_reg_operand (rtx_insn *insn, bool opx_p)
96fcacb7
MK
6292{
6293 rtx op = NULL;
6294
6295 if (opx_p)
6296 {
6297 if (get_attr_opx_type (insn) == OPX_TYPE_RN)
6298 {
6299 op = sched_get_operand (insn, true);
6300 gcc_assert (op != NULL);
6301
6302 if (!reload_completed && !REG_P (op))
6303 return NULL;
6304 }
6305 }
6306 else
6307 {
6308 if (get_attr_opy_type (insn) == OPY_TYPE_RN)
6309 {
6310 op = sched_get_operand (insn, false);
6311 gcc_assert (op != NULL);
6312
6313 if (!reload_completed && !REG_P (op))
6314 return NULL;
6315 }
6316 }
6317
6318 return op;
6319}
6320
6321/* Return true, if X or Y (depending on OPX_P) operand of INSN
6322 is a MEM. */
6323static bool
84034c69 6324sched_mem_operand_p (rtx_insn *insn, bool opx_p)
96fcacb7
MK
6325{
6326 switch (sched_get_opxy_mem_type (insn, opx_p))
6327 {
6328 case OP_TYPE_MEM1:
6329 case OP_TYPE_MEM6:
6330 return true;
6331
6332 default:
6333 return false;
6334 }
6335}
6336
6337/* Return X or Y (depending on OPX_P) operand of INSN,
6338 if it is a MEM, or NULL overwise. */
6339static rtx
647d790d 6340sched_get_mem_operand (rtx_insn *insn, bool must_read_p, bool must_write_p)
96fcacb7
MK
6341{
6342 bool opx_p;
6343 bool opy_p;
6344
6345 opx_p = false;
6346 opy_p = false;
6347
6348 if (must_read_p)
6349 {
6350 opx_p = true;
6351 opy_p = true;
6352 }
6353
6354 if (must_write_p)
6355 {
6356 opx_p = true;
6357 opy_p = false;
6358 }
6359
6360 if (opy_p && sched_mem_operand_p (insn, false))
6361 return sched_get_operand (insn, false);
6362
6363 if (opx_p && sched_mem_operand_p (insn, true))
6364 return sched_get_operand (insn, true);
6365
6366 gcc_unreachable ();
6367 return NULL;
6368}
6369
6370/* Return non-zero if PRO modifies register used as part of
6371 address in CON. */
6372int
647d790d 6373m68k_sched_address_bypass_p (rtx_insn *pro, rtx_insn *con)
96fcacb7
MK
6374{
6375 rtx pro_x;
6376 rtx con_mem_read;
6377
6378 pro_x = sched_get_reg_operand (pro, true);
6379 if (pro_x == NULL)
6380 return 0;
6381
6382 con_mem_read = sched_get_mem_operand (con, true, false);
6383 gcc_assert (con_mem_read != NULL);
6384
6385 if (reg_mentioned_p (pro_x, con_mem_read))
6386 return 1;
6387
6388 return 0;
6389}
6390
6391/* Helper function for m68k_sched_indexed_address_bypass_p.
6392 if PRO modifies register used as index in CON,
6393 return scale of indexed memory access in CON. Return zero overwise. */
6394static int
647d790d 6395sched_get_indexed_address_scale (rtx_insn *pro, rtx_insn *con)
96fcacb7
MK
6396{
6397 rtx reg;
6398 rtx mem;
6399 struct m68k_address address;
6400
6401 reg = sched_get_reg_operand (pro, true);
6402 if (reg == NULL)
6403 return 0;
6404
6405 mem = sched_get_mem_operand (con, true, false);
6406 gcc_assert (mem != NULL && MEM_P (mem));
6407
6408 if (!m68k_decompose_address (GET_MODE (mem), XEXP (mem, 0), reload_completed,
6409 &address))
6410 gcc_unreachable ();
6411
6412 if (REGNO (reg) == REGNO (address.index))
6413 {
6414 gcc_assert (address.scale != 0);
6415 return address.scale;
6416 }
6417
6418 return 0;
6419}
6420
6421/* Return non-zero if PRO modifies register used
6422 as index with scale 2 or 4 in CON. */
6423int
647d790d 6424m68k_sched_indexed_address_bypass_p (rtx_insn *pro, rtx_insn *con)
96fcacb7
MK
6425{
6426 gcc_assert (sched_cfv4_bypass_data.pro == NULL
6427 && sched_cfv4_bypass_data.con == NULL
6428 && sched_cfv4_bypass_data.scale == 0);
6429
6430 switch (sched_get_indexed_address_scale (pro, con))
6431 {
6432 case 1:
6433 /* We can't have a variable latency bypass, so
6434 remember to adjust the insn cost in adjust_cost hook. */
6435 sched_cfv4_bypass_data.pro = pro;
6436 sched_cfv4_bypass_data.con = con;
6437 sched_cfv4_bypass_data.scale = 1;
6438 return 0;
6439
6440 case 2:
6441 case 4:
6442 return 1;
6443
6444 default:
6445 return 0;
6446 }
6447}
75df395f 6448
e0601576
RH
6449/* We generate a two-instructions program at M_TRAMP :
6450 movea.l &CHAIN_VALUE,%a0
6451 jmp FNADDR
6452 where %a0 can be modified by changing STATIC_CHAIN_REGNUM. */
6453
6454static void
6455m68k_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
6456{
6457 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
6458 rtx mem;
6459
6460 gcc_assert (ADDRESS_REGNO_P (STATIC_CHAIN_REGNUM));
6461
6462 mem = adjust_address (m_tramp, HImode, 0);
6463 emit_move_insn (mem, GEN_INT(0x207C + ((STATIC_CHAIN_REGNUM-8) << 9)));
6464 mem = adjust_address (m_tramp, SImode, 2);
6465 emit_move_insn (mem, chain_value);
6466
6467 mem = adjust_address (m_tramp, HImode, 6);
6468 emit_move_insn (mem, GEN_INT(0x4EF9));
6469 mem = adjust_address (m_tramp, SImode, 8);
6470 emit_move_insn (mem, fnaddr);
6471
6472 FINALIZE_TRAMPOLINE (XEXP (m_tramp, 0));
6473}
6474
079e7538
NF
6475/* On the 68000, the RTS insn cannot pop anything.
6476 On the 68010, the RTD insn may be used to pop them if the number
6477 of args is fixed, but if the number is variable then the caller
6478 must pop them all. RTD can't be used for library calls now
6479 because the library is compiled with the Unix compiler.
6480 Use of RTD is a selectable option, since it is incompatible with
6481 standard Unix calling sequences. If the option is not selected,
6482 the caller must always pop the args. */
6483
6484static int
6485m68k_return_pops_args (tree fundecl, tree funtype, int size)
6486{
6487 return ((TARGET_RTD
6488 && (!fundecl
6489 || TREE_CODE (fundecl) != IDENTIFIER_NODE)
f38958e8 6490 && (!stdarg_p (funtype)))
079e7538
NF
6491 ? size : 0);
6492}
6493
5efd84c5
NF
6494/* Make sure everything's fine if we *don't* have a given processor.
6495 This assumes that putting a register in fixed_regs will keep the
6496 compiler's mitts completely off it. We don't bother to zero it out
6497 of register classes. */
6498
6499static void
6500m68k_conditional_register_usage (void)
6501{
6502 int i;
6503 HARD_REG_SET x;
6504 if (!TARGET_HARD_FLOAT)
6505 {
6506 COPY_HARD_REG_SET (x, reg_class_contents[(int)FP_REGS]);
6507 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
6508 if (TEST_HARD_REG_BIT (x, i))
6509 fixed_regs[i] = call_used_regs[i] = 1;
6510 }
6511 if (flag_pic)
6512 fixed_regs[PIC_REG] = call_used_regs[PIC_REG] = 1;
6513}
6514
8b281334
RH
6515static void
6516m68k_init_sync_libfuncs (void)
6517{
6518 init_sync_libfuncs (UNITS_PER_WORD);
6519}
6520
175aed00
AS
6521/* Implements EPILOGUE_USES. All registers are live on exit from an
6522 interrupt routine. */
6523bool
6524m68k_epilogue_uses (int regno ATTRIBUTE_UNUSED)
6525{
6526 return (reload_completed
6527 && (m68k_get_function_kind (current_function_decl)
6528 == m68k_fk_interrupt_handler));
6529}
6530
75df395f 6531#include "gt-m68k.h"