]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/m68k/m68k.c
poly_int: emit_single_push_insn_1
[thirdparty/gcc.git] / gcc / config / m68k / m68k.c
CommitLineData
79e68feb 1/* Subroutines for insn-output.c for Motorola 68000 family.
cbe34bb5 2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
79e68feb 3
7ec022b2 4This file is part of GCC.
79e68feb 5
7ec022b2 6GCC is free software; you can redistribute it and/or modify
79e68feb 7it under the terms of the GNU General Public License as published by
2f83c7d6 8the Free Software Foundation; either version 3, or (at your option)
79e68feb
RS
9any later version.
10
7ec022b2 11GCC is distributed in the hope that it will be useful,
79e68feb
RS
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14GNU General Public License for more details.
15
16You should have received a copy of the GNU General Public License
2f83c7d6
NC
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
79e68feb 19
8fcc61f8
RS
20#define IN_TARGET_CODE 1
21
79e68feb 22#include "config.h"
f5220a5d 23#include "system.h"
4977bab6 24#include "coretypes.h"
c7131fb2 25#include "backend.h"
9fdcd34e 26#include "cfghooks.h"
da932f04 27#include "tree.h"
314e6352
ML
28#include "stringpool.h"
29#include "attribs.h"
c7131fb2
AM
30#include "rtl.h"
31#include "df.h"
32#include "alias.h"
40e23961 33#include "fold-const.h"
d8a2d370
DN
34#include "calls.h"
35#include "stor-layout.h"
36#include "varasm.h"
79e68feb 37#include "regs.h"
79e68feb
RS
38#include "insn-config.h"
39#include "conditions.h"
79e68feb
RS
40#include "output.h"
41#include "insn-attr.h"
1d8eaa6b 42#include "recog.h"
718f9c0f 43#include "diagnostic-core.h"
36566b39 44#include "flags.h"
36566b39
PK
45#include "expmed.h"
46#include "dojump.h"
47#include "explow.h"
4d0cdd0c 48#include "memmodel.h"
36566b39
PK
49#include "emit-rtl.h"
50#include "stmt.h"
6d5f49b2
RH
51#include "expr.h"
52#include "reload.h"
5505f548 53#include "tm_p.h"
672a6f42 54#include "target.h"
2cc07db4 55#include "debug.h"
60393bbc
AM
56#include "cfgrtl.h"
57#include "cfganal.h"
58#include "lcm.h"
59#include "cfgbuild.h"
60#include "cfgcleanup.h"
b8c96320
MK
61/* ??? Need to add a dependency between m68k.o and sched-int.h. */
62#include "sched-int.h"
63#include "insn-codes.h"
96e45421 64#include "opts.h"
8b281334 65#include "optabs.h"
9b2b7279 66#include "builtins.h"
82eee4f1 67#include "rtl-iter.h"
79e68feb 68
994c5d85 69/* This file should be included last. */
d58627a0
RS
70#include "target-def.h"
71
a4e9467d
RZ
72enum reg_class regno_reg_class[] =
73{
74 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
75 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
76 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
77 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
78 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
79 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
80 ADDR_REGS
81};
82
83
a40ed0f3
KH
84/* The minimum number of integer registers that we want to save with the
85 movem instruction. Using two movel instructions instead of a single
86 moveml is about 15% faster for the 68020 and 68030 at no expense in
87 code size. */
88#define MIN_MOVEM_REGS 3
89
90/* The minimum number of floating point registers that we want to save
91 with the fmovem instruction. */
92#define MIN_FMOVEM_REGS 1
93
ff482c8d 94/* Structure describing stack frame layout. */
3d74bc09
BI
95struct m68k_frame
96{
97 /* Stack pointer to frame pointer offset. */
48ed72a4 98 HOST_WIDE_INT offset;
3d74bc09
BI
99
100 /* Offset of FPU registers. */
101 HOST_WIDE_INT foffset;
102
103 /* Frame size in bytes (rounded up). */
48ed72a4 104 HOST_WIDE_INT size;
3d74bc09
BI
105
106 /* Data and address register. */
48ed72a4
PB
107 int reg_no;
108 unsigned int reg_mask;
3d74bc09
BI
109
110 /* FPU registers. */
48ed72a4
PB
111 int fpu_no;
112 unsigned int fpu_mask;
3d74bc09
BI
113
114 /* Offsets relative to ARG_POINTER. */
48ed72a4
PB
115 HOST_WIDE_INT frame_pointer_offset;
116 HOST_WIDE_INT stack_pointer_offset;
3d74bc09
BI
117
118 /* Function which the above information refers to. */
119 int funcdef_no;
48ed72a4
PB
120};
121
3d74bc09
BI
122/* Current frame information calculated by m68k_compute_frame_layout(). */
123static struct m68k_frame current_frame;
124
fc2241eb
RS
125/* Structure describing an m68k address.
126
127 If CODE is UNKNOWN, the address is BASE + INDEX * SCALE + OFFSET,
128 with null fields evaluating to 0. Here:
129
130 - BASE satisfies m68k_legitimate_base_reg_p
131 - INDEX satisfies m68k_legitimate_index_reg_p
132 - OFFSET satisfies m68k_legitimate_constant_address_p
133
134 INDEX is either HImode or SImode. The other fields are SImode.
135
136 If CODE is PRE_DEC, the address is -(BASE). If CODE is POST_INC,
137 the address is (BASE)+. */
138struct m68k_address {
139 enum rtx_code code;
140 rtx base;
141 rtx index;
142 rtx offset;
143 int scale;
144};
145
b505225b
TS
146static int m68k_sched_adjust_cost (rtx_insn *, int, rtx_insn *, int,
147 unsigned int);
96fcacb7 148static int m68k_sched_issue_rate (void);
ac44248e 149static int m68k_sched_variable_issue (FILE *, int, rtx_insn *, int);
b8c96320
MK
150static void m68k_sched_md_init_global (FILE *, int, int);
151static void m68k_sched_md_finish_global (FILE *, int);
152static void m68k_sched_md_init (FILE *, int, int);
153static void m68k_sched_dfa_pre_advance_cycle (void);
154static void m68k_sched_dfa_post_advance_cycle (void);
96fcacb7 155static int m68k_sched_first_cycle_multipass_dfa_lookahead (void);
b8c96320 156
7b5cbb57 157static bool m68k_can_eliminate (const int, const int);
5efd84c5 158static void m68k_conditional_register_usage (void);
ef4bddc2 159static bool m68k_legitimate_address_p (machine_mode, rtx, bool);
c5387660 160static void m68k_option_override (void);
03e69b12 161static void m68k_override_options_after_change (void);
8a4a2253
BI
162static rtx find_addr_reg (rtx);
163static const char *singlemove_string (rtx *);
8a4a2253
BI
164static void m68k_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
165 HOST_WIDE_INT, tree);
8636be86 166static rtx m68k_struct_value_rtx (tree, int);
48ed72a4
PB
167static tree m68k_handle_fndecl_attribute (tree *node, tree name,
168 tree args, int flags,
169 bool *no_add_attrs);
3d74bc09 170static void m68k_compute_frame_layout (void);
48ed72a4 171static bool m68k_save_reg (unsigned int regno, bool interrupt_handler);
f7e70894 172static bool m68k_ok_for_sibcall_p (tree, tree);
75df395f 173static bool m68k_tls_symbol_p (rtx);
ef4bddc2 174static rtx m68k_legitimize_address (rtx, rtx, machine_mode);
e548c9df 175static bool m68k_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1c445f03 176#if M68K_HONOR_TARGET_STRICT_ALIGNMENT
511e41e5 177static bool m68k_return_in_memory (const_tree, const_tree);
1c445f03 178#endif
75df395f 179static void m68k_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
e0601576 180static void m68k_trampoline_init (rtx, tree, rtx);
a20c5714 181static poly_int64 m68k_return_pops_args (tree, tree, poly_int64);
7b0f476d 182static rtx m68k_delegitimize_address (rtx);
ef4bddc2 183static void m68k_function_arg_advance (cumulative_args_t, machine_mode,
13d3961c 184 const_tree, bool);
ef4bddc2 185static rtx m68k_function_arg (cumulative_args_t, machine_mode,
13d3961c 186 const_tree, bool);
ef4bddc2 187static bool m68k_cannot_force_const_mem (machine_mode mode, rtx x);
cb69db4f 188static bool m68k_output_addr_const_extra (FILE *, rtx);
8b281334 189static void m68k_init_sync_libfuncs (void) ATTRIBUTE_UNUSED;
b89de1b8
JG
190static enum flt_eval_method
191m68k_excess_precision (enum excess_precision_type);
c43f4279 192static unsigned int m68k_hard_regno_nregs (unsigned int, machine_mode);
f939c3e6 193static bool m68k_hard_regno_mode_ok (unsigned int, machine_mode);
99e1629f 194static bool m68k_modes_tieable_p (machine_mode, machine_mode);
79e68feb 195\f
672a6f42 196/* Initialize the GCC target structure. */
301d03af
RS
197
198#if INT_OP_GROUP == INT_OP_DOT_WORD
199#undef TARGET_ASM_ALIGNED_HI_OP
200#define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
201#endif
202
203#if INT_OP_GROUP == INT_OP_NO_DOT
204#undef TARGET_ASM_BYTE_OP
205#define TARGET_ASM_BYTE_OP "\tbyte\t"
206#undef TARGET_ASM_ALIGNED_HI_OP
207#define TARGET_ASM_ALIGNED_HI_OP "\tshort\t"
208#undef TARGET_ASM_ALIGNED_SI_OP
209#define TARGET_ASM_ALIGNED_SI_OP "\tlong\t"
210#endif
211
212#if INT_OP_GROUP == INT_OP_DC
213#undef TARGET_ASM_BYTE_OP
214#define TARGET_ASM_BYTE_OP "\tdc.b\t"
215#undef TARGET_ASM_ALIGNED_HI_OP
216#define TARGET_ASM_ALIGNED_HI_OP "\tdc.w\t"
217#undef TARGET_ASM_ALIGNED_SI_OP
218#define TARGET_ASM_ALIGNED_SI_OP "\tdc.l\t"
219#endif
220
221#undef TARGET_ASM_UNALIGNED_HI_OP
222#define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
223#undef TARGET_ASM_UNALIGNED_SI_OP
224#define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
225
c590b625
RH
226#undef TARGET_ASM_OUTPUT_MI_THUNK
227#define TARGET_ASM_OUTPUT_MI_THUNK m68k_output_mi_thunk
bdabc150 228#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
3101faab 229#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
c590b625 230
1bc7c5b6
ZW
231#undef TARGET_ASM_FILE_START_APP_OFF
232#define TARGET_ASM_FILE_START_APP_OFF true
233
506d7b68
PB
234#undef TARGET_LEGITIMIZE_ADDRESS
235#define TARGET_LEGITIMIZE_ADDRESS m68k_legitimize_address
236
b8c96320
MK
237#undef TARGET_SCHED_ADJUST_COST
238#define TARGET_SCHED_ADJUST_COST m68k_sched_adjust_cost
239
96fcacb7
MK
240#undef TARGET_SCHED_ISSUE_RATE
241#define TARGET_SCHED_ISSUE_RATE m68k_sched_issue_rate
242
b8c96320
MK
243#undef TARGET_SCHED_VARIABLE_ISSUE
244#define TARGET_SCHED_VARIABLE_ISSUE m68k_sched_variable_issue
245
246#undef TARGET_SCHED_INIT_GLOBAL
247#define TARGET_SCHED_INIT_GLOBAL m68k_sched_md_init_global
248
249#undef TARGET_SCHED_FINISH_GLOBAL
250#define TARGET_SCHED_FINISH_GLOBAL m68k_sched_md_finish_global
251
252#undef TARGET_SCHED_INIT
253#define TARGET_SCHED_INIT m68k_sched_md_init
254
255#undef TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE
256#define TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE m68k_sched_dfa_pre_advance_cycle
257
258#undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
259#define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE m68k_sched_dfa_post_advance_cycle
260
96fcacb7
MK
261#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
262#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
263 m68k_sched_first_cycle_multipass_dfa_lookahead
264
c5387660
JM
265#undef TARGET_OPTION_OVERRIDE
266#define TARGET_OPTION_OVERRIDE m68k_option_override
267
03e69b12
MP
268#undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
269#define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m68k_override_options_after_change
270
3c50106f
RH
271#undef TARGET_RTX_COSTS
272#define TARGET_RTX_COSTS m68k_rtx_costs
273
48ed72a4
PB
274#undef TARGET_ATTRIBUTE_TABLE
275#define TARGET_ATTRIBUTE_TABLE m68k_attribute_table
276
8636be86 277#undef TARGET_PROMOTE_PROTOTYPES
586de218 278#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
8636be86
KH
279
280#undef TARGET_STRUCT_VALUE_RTX
281#define TARGET_STRUCT_VALUE_RTX m68k_struct_value_rtx
282
7ffb5e78 283#undef TARGET_CANNOT_FORCE_CONST_MEM
fbbf66e7 284#define TARGET_CANNOT_FORCE_CONST_MEM m68k_cannot_force_const_mem
7ffb5e78 285
f7e70894
RS
286#undef TARGET_FUNCTION_OK_FOR_SIBCALL
287#define TARGET_FUNCTION_OK_FOR_SIBCALL m68k_ok_for_sibcall_p
288
1c445f03
NS
289#if M68K_HONOR_TARGET_STRICT_ALIGNMENT
290#undef TARGET_RETURN_IN_MEMORY
291#define TARGET_RETURN_IN_MEMORY m68k_return_in_memory
292#endif
293
75df395f
MK
294#ifdef HAVE_AS_TLS
295#undef TARGET_HAVE_TLS
296#define TARGET_HAVE_TLS (true)
297
298#undef TARGET_ASM_OUTPUT_DWARF_DTPREL
299#define TARGET_ASM_OUTPUT_DWARF_DTPREL m68k_output_dwarf_dtprel
300#endif
301
d81db636
SB
302#undef TARGET_LRA_P
303#define TARGET_LRA_P hook_bool_void_false
304
c6c3dba9
PB
305#undef TARGET_LEGITIMATE_ADDRESS_P
306#define TARGET_LEGITIMATE_ADDRESS_P m68k_legitimate_address_p
307
7b5cbb57
AS
308#undef TARGET_CAN_ELIMINATE
309#define TARGET_CAN_ELIMINATE m68k_can_eliminate
310
5efd84c5
NF
311#undef TARGET_CONDITIONAL_REGISTER_USAGE
312#define TARGET_CONDITIONAL_REGISTER_USAGE m68k_conditional_register_usage
313
e0601576
RH
314#undef TARGET_TRAMPOLINE_INIT
315#define TARGET_TRAMPOLINE_INIT m68k_trampoline_init
316
079e7538
NF
317#undef TARGET_RETURN_POPS_ARGS
318#define TARGET_RETURN_POPS_ARGS m68k_return_pops_args
319
7b0f476d
AS
320#undef TARGET_DELEGITIMIZE_ADDRESS
321#define TARGET_DELEGITIMIZE_ADDRESS m68k_delegitimize_address
322
13d3961c
NF
323#undef TARGET_FUNCTION_ARG
324#define TARGET_FUNCTION_ARG m68k_function_arg
325
326#undef TARGET_FUNCTION_ARG_ADVANCE
327#define TARGET_FUNCTION_ARG_ADVANCE m68k_function_arg_advance
328
1a627b35
RS
329#undef TARGET_LEGITIMATE_CONSTANT_P
330#define TARGET_LEGITIMATE_CONSTANT_P m68k_legitimate_constant_p
331
cb69db4f
AS
332#undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
333#define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA m68k_output_addr_const_extra
334
b89de1b8
JG
335#undef TARGET_C_EXCESS_PRECISION
336#define TARGET_C_EXCESS_PRECISION m68k_excess_precision
337
4c1fd084
RH
338/* The value stored by TAS. */
339#undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
340#define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 128
341
c43f4279
RS
342#undef TARGET_HARD_REGNO_NREGS
343#define TARGET_HARD_REGNO_NREGS m68k_hard_regno_nregs
f939c3e6
RS
344#undef TARGET_HARD_REGNO_MODE_OK
345#define TARGET_HARD_REGNO_MODE_OK m68k_hard_regno_mode_ok
346
99e1629f
RS
347#undef TARGET_MODES_TIEABLE_P
348#define TARGET_MODES_TIEABLE_P m68k_modes_tieable_p
349
48ed72a4
PB
350static const struct attribute_spec m68k_attribute_table[] =
351{
4849deb1
JJ
352 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
353 affects_type_identity, handler, exclude } */
354 { "interrupt", 0, 0, true, false, false, false,
355 m68k_handle_fndecl_attribute, NULL },
356 { "interrupt_handler", 0, 0, true, false, false, false,
357 m68k_handle_fndecl_attribute, NULL },
358 { "interrupt_thread", 0, 0, true, false, false, false,
359 m68k_handle_fndecl_attribute, NULL },
360 { NULL, 0, 0, false, false, false, false, NULL, NULL }
48ed72a4
PB
361};
362
f6897b10 363struct gcc_target targetm = TARGET_INITIALIZER;
672a6f42 364\f
900ec02d
JB
365/* Base flags for 68k ISAs. */
366#define FL_FOR_isa_00 FL_ISA_68000
367#define FL_FOR_isa_10 (FL_FOR_isa_00 | FL_ISA_68010)
368/* FL_68881 controls the default setting of -m68881. gcc has traditionally
369 generated 68881 code for 68020 and 68030 targets unless explicitly told
370 not to. */
371#define FL_FOR_isa_20 (FL_FOR_isa_10 | FL_ISA_68020 \
6a2b269b 372 | FL_BITFIELD | FL_68881 | FL_CAS)
900ec02d
JB
373#define FL_FOR_isa_40 (FL_FOR_isa_20 | FL_ISA_68040)
374#define FL_FOR_isa_cpu32 (FL_FOR_isa_10 | FL_ISA_68020)
375
376/* Base flags for ColdFire ISAs. */
377#define FL_FOR_isa_a (FL_COLDFIRE | FL_ISA_A)
378#define FL_FOR_isa_aplus (FL_FOR_isa_a | FL_ISA_APLUS | FL_CF_USP)
379/* Note ISA_B doesn't necessarily include USP (user stack pointer) support. */
380#define FL_FOR_isa_b (FL_FOR_isa_a | FL_ISA_B | FL_CF_HWDIV)
4e2b26aa 381/* ISA_C is not upwardly compatible with ISA_B. */
8c5c99dc 382#define FL_FOR_isa_c (FL_FOR_isa_a | FL_ISA_C | FL_CF_USP)
900ec02d
JB
383
384enum m68k_isa
385{
386 /* Traditional 68000 instruction sets. */
387 isa_00,
388 isa_10,
389 isa_20,
390 isa_40,
391 isa_cpu32,
392 /* ColdFire instruction set variants. */
393 isa_a,
394 isa_aplus,
395 isa_b,
396 isa_c,
397 isa_max
398};
399
400/* Information about one of the -march, -mcpu or -mtune arguments. */
401struct m68k_target_selection
402{
403 /* The argument being described. */
404 const char *name;
405
406 /* For -mcpu, this is the device selected by the option.
407 For -mtune and -march, it is a representative device
408 for the microarchitecture or ISA respectively. */
409 enum target_device device;
410
411 /* The M68K_DEVICE fields associated with DEVICE. See the comment
412 in m68k-devices.def for details. FAMILY is only valid for -mcpu. */
413 const char *family;
414 enum uarch_type microarch;
415 enum m68k_isa isa;
416 unsigned long flags;
417};
418
419/* A list of all devices in m68k-devices.def. Used for -mcpu selection. */
420static const struct m68k_target_selection all_devices[] =
421{
422#define M68K_DEVICE(NAME,ENUM_VALUE,FAMILY,MULTILIB,MICROARCH,ISA,FLAGS) \
423 { NAME, ENUM_VALUE, FAMILY, u##MICROARCH, ISA, FLAGS | FL_FOR_##ISA },
424#include "m68k-devices.def"
425#undef M68K_DEVICE
426 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
427};
428
429/* A list of all ISAs, mapping each one to a representative device.
430 Used for -march selection. */
431static const struct m68k_target_selection all_isas[] =
432{
47c94d21
JM
433#define M68K_ISA(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
434 { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
435#include "m68k-isas.def"
436#undef M68K_ISA
900ec02d
JB
437 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
438};
439
440/* A list of all microarchitectures, mapping each one to a representative
441 device. Used for -mtune selection. */
442static const struct m68k_target_selection all_microarchs[] =
443{
47c94d21
JM
444#define M68K_MICROARCH(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
445 { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
446#include "m68k-microarchs.def"
447#undef M68K_MICROARCH
900ec02d
JB
448 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
449};
450\f
451/* The entries associated with the -mcpu, -march and -mtune settings,
452 or null for options that have not been used. */
453const struct m68k_target_selection *m68k_cpu_entry;
454const struct m68k_target_selection *m68k_arch_entry;
455const struct m68k_target_selection *m68k_tune_entry;
456
457/* Which CPU we are generating code for. */
458enum target_device m68k_cpu;
459
460/* Which microarchitecture to tune for. */
461enum uarch_type m68k_tune;
462
463/* Which FPU to use. */
464enum fpu_type m68k_fpu;
4af06170 465
900ec02d
JB
466/* The set of FL_* flags that apply to the target processor. */
467unsigned int m68k_cpu_flags;
29ca003a 468
03b3e271
KH
469/* The set of FL_* flags that apply to the processor to be tuned for. */
470unsigned int m68k_tune_flags;
471
29ca003a
RS
472/* Asm templates for calling or jumping to an arbitrary symbolic address,
473 or NULL if such calls or jumps are not supported. The address is held
474 in operand 0. */
475const char *m68k_symbolic_call;
476const char *m68k_symbolic_jump;
c47b0cb4
MK
477
478/* Enum variable that corresponds to m68k_symbolic_call values. */
479enum M68K_SYMBOLIC_CALL m68k_symbolic_call_var;
480
900ec02d 481\f
c5387660 482/* Implement TARGET_OPTION_OVERRIDE. */
ef1dbfb0 483
c5387660
JM
484static void
485m68k_option_override (void)
ef1dbfb0 486{
900ec02d
JB
487 const struct m68k_target_selection *entry;
488 unsigned long target_mask;
489
47c94d21
JM
490 if (global_options_set.x_m68k_arch_option)
491 m68k_arch_entry = &all_isas[m68k_arch_option];
492
493 if (global_options_set.x_m68k_cpu_option)
494 m68k_cpu_entry = &all_devices[(int) m68k_cpu_option];
495
496 if (global_options_set.x_m68k_tune_option)
497 m68k_tune_entry = &all_microarchs[(int) m68k_tune_option];
498
900ec02d
JB
499 /* User can choose:
500
501 -mcpu=
502 -march=
503 -mtune=
504
505 -march=ARCH should generate code that runs any processor
506 implementing architecture ARCH. -mcpu=CPU should override -march
507 and should generate code that runs on processor CPU, making free
508 use of any instructions that CPU understands. -mtune=UARCH applies
9f5ed61a 509 on top of -mcpu or -march and optimizes the code for UARCH. It does
900ec02d
JB
510 not change the target architecture. */
511 if (m68k_cpu_entry)
512 {
513 /* Complain if the -march setting is for a different microarchitecture,
514 or includes flags that the -mcpu setting doesn't. */
515 if (m68k_arch_entry
516 && (m68k_arch_entry->microarch != m68k_cpu_entry->microarch
517 || (m68k_arch_entry->flags & ~m68k_cpu_entry->flags) != 0))
518 warning (0, "-mcpu=%s conflicts with -march=%s",
519 m68k_cpu_entry->name, m68k_arch_entry->name);
520
521 entry = m68k_cpu_entry;
522 }
523 else
524 entry = m68k_arch_entry;
525
526 if (!entry)
527 entry = all_devices + TARGET_CPU_DEFAULT;
528
529 m68k_cpu_flags = entry->flags;
530
531 /* Use the architecture setting to derive default values for
532 certain flags. */
533 target_mask = 0;
8785d88c
KH
534
535 /* ColdFire is lenient about alignment. */
536 if (!TARGET_COLDFIRE)
537 target_mask |= MASK_STRICT_ALIGNMENT;
538
900ec02d
JB
539 if ((m68k_cpu_flags & FL_BITFIELD) != 0)
540 target_mask |= MASK_BITFIELD;
541 if ((m68k_cpu_flags & FL_CF_HWDIV) != 0)
542 target_mask |= MASK_CF_HWDIV;
543 if ((m68k_cpu_flags & (FL_68881 | FL_CF_FPU)) != 0)
544 target_mask |= MASK_HARD_FLOAT;
545 target_flags |= target_mask & ~target_flags_explicit;
546
547 /* Set the directly-usable versions of the -mcpu and -mtune settings. */
548 m68k_cpu = entry->device;
549 if (m68k_tune_entry)
03b3e271
KH
550 {
551 m68k_tune = m68k_tune_entry->microarch;
552 m68k_tune_flags = m68k_tune_entry->flags;
553 }
900ec02d
JB
554#ifdef M68K_DEFAULT_TUNE
555 else if (!m68k_cpu_entry && !m68k_arch_entry)
03b3e271
KH
556 {
557 enum target_device dev;
558 dev = all_microarchs[M68K_DEFAULT_TUNE].device;
3b891d26 559 m68k_tune_flags = all_devices[dev].flags;
03b3e271 560 }
900ec02d
JB
561#endif
562 else
03b3e271
KH
563 {
564 m68k_tune = entry->microarch;
565 m68k_tune_flags = entry->flags;
566 }
900ec02d
JB
567
568 /* Set the type of FPU. */
569 m68k_fpu = (!TARGET_HARD_FLOAT ? FPUTYPE_NONE
570 : (m68k_cpu_flags & FL_COLDFIRE) != 0 ? FPUTYPE_COLDFIRE
571 : FPUTYPE_68881);
572
a2ef3db7
BI
573 /* Sanity check to ensure that msep-data and mid-sahred-library are not
574 * both specified together. Doing so simply doesn't make sense.
575 */
576 if (TARGET_SEP_DATA && TARGET_ID_SHARED_LIBRARY)
577 error ("cannot specify both -msep-data and -mid-shared-library");
578
579 /* If we're generating code for a separate A5 relative data segment,
580 * we've got to enable -fPIC as well. This might be relaxable to
581 * -fpic but it hasn't been tested properly.
582 */
583 if (TARGET_SEP_DATA || TARGET_ID_SHARED_LIBRARY)
584 flag_pic = 2;
585
abe92a04
RS
586 /* -mpcrel -fPIC uses 32-bit pc-relative displacements. Raise an
587 error if the target does not support them. */
588 if (TARGET_PCREL && !TARGET_68020 && flag_pic == 2)
589 error ("-mpcrel -fPIC is not currently supported on selected cpu");
adf2ac37
RH
590
591 /* ??? A historic way of turning on pic, or is this intended to
592 be an embedded thing that doesn't have the same name binding
593 significance that it does on hosted ELF systems? */
594 if (TARGET_PCREL && flag_pic == 0)
595 flag_pic = 1;
596
29ca003a
RS
597 if (!flag_pic)
598 {
c47b0cb4
MK
599 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_JSR;
600
29ca003a 601 m68k_symbolic_jump = "jra %a0";
29ca003a
RS
602 }
603 else if (TARGET_ID_SHARED_LIBRARY)
604 /* All addresses must be loaded from the GOT. */
605 ;
4e2b26aa 606 else if (TARGET_68020 || TARGET_ISAB || TARGET_ISAC)
29ca003a
RS
607 {
608 if (TARGET_PCREL)
c47b0cb4 609 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_C;
4e2b26aa 610 else
c47b0cb4
MK
611 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_P;
612
4e2b26aa
NS
613 if (TARGET_ISAC)
614 /* No unconditional long branch */;
615 else if (TARGET_PCREL)
da398bb5 616 m68k_symbolic_jump = "bra%.l %c0";
29ca003a 617 else
da398bb5 618 m68k_symbolic_jump = "bra%.l %p0";
29ca003a
RS
619 /* Turn off function cse if we are doing PIC. We always want
620 function call to be done as `bsr foo@PLTPC'. */
621 /* ??? It's traditional to do this for -mpcrel too, but it isn't
622 clear how intentional that is. */
623 flag_no_function_cse = 1;
624 }
adf2ac37 625
c47b0cb4
MK
626 switch (m68k_symbolic_call_var)
627 {
628 case M68K_SYMBOLIC_CALL_JSR:
c47b0cb4 629 m68k_symbolic_call = "jsr %a0";
c47b0cb4
MK
630 break;
631
632 case M68K_SYMBOLIC_CALL_BSR_C:
da398bb5 633 m68k_symbolic_call = "bsr%.l %c0";
c47b0cb4
MK
634 break;
635
636 case M68K_SYMBOLIC_CALL_BSR_P:
da398bb5 637 m68k_symbolic_call = "bsr%.l %p0";
c47b0cb4
MK
638 break;
639
640 case M68K_SYMBOLIC_CALL_NONE:
641 gcc_assert (m68k_symbolic_call == NULL);
642 break;
643
644 default:
645 gcc_unreachable ();
646 }
647
aaca7021
RZ
648#ifndef ASM_OUTPUT_ALIGN_WITH_NOP
649 if (align_labels > 2)
650 {
651 warning (0, "-falign-labels=%d is not supported", align_labels);
652 align_labels = 0;
653 }
654 if (align_loops > 2)
655 {
656 warning (0, "-falign-loops=%d is not supported", align_loops);
657 align_loops = 0;
658 }
659#endif
660
253abb2a
AS
661 if ((opt_fstack_limit_symbol_arg != NULL || opt_fstack_limit_register_no >= 0)
662 && !TARGET_68020)
8e22f79f
AS
663 {
664 warning (0, "-fstack-limit- options are not supported on this cpu");
253abb2a
AS
665 opt_fstack_limit_symbol_arg = NULL;
666 opt_fstack_limit_register_no = -1;
8e22f79f
AS
667 }
668
adf2ac37 669 SUBTARGET_OVERRIDE_OPTIONS;
c47b0cb4
MK
670
671 /* Setup scheduling options. */
826fadba
MK
672 if (TUNE_CFV1)
673 m68k_sched_cpu = CPU_CFV1;
674 else if (TUNE_CFV2)
675 m68k_sched_cpu = CPU_CFV2;
676 else if (TUNE_CFV3)
677 m68k_sched_cpu = CPU_CFV3;
96fcacb7
MK
678 else if (TUNE_CFV4)
679 m68k_sched_cpu = CPU_CFV4;
c47b0cb4
MK
680 else
681 {
682 m68k_sched_cpu = CPU_UNKNOWN;
683 flag_schedule_insns = 0;
684 flag_schedule_insns_after_reload = 0;
685 flag_modulo_sched = 0;
1ee6eb01 686 flag_live_range_shrinkage = 0;
c47b0cb4 687 }
826fadba
MK
688
689 if (m68k_sched_cpu != CPU_UNKNOWN)
690 {
691 if ((m68k_cpu_flags & (FL_CF_EMAC | FL_CF_EMAC_B)) != 0)
692 m68k_sched_mac = MAC_CF_EMAC;
693 else if ((m68k_cpu_flags & FL_CF_MAC) != 0)
694 m68k_sched_mac = MAC_CF_MAC;
695 else
696 m68k_sched_mac = MAC_NO;
697 }
ef1dbfb0 698}
7eb4f044 699
03e69b12
MP
700/* Implement TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE. */
701
702static void
703m68k_override_options_after_change (void)
704{
705 if (m68k_sched_cpu == CPU_UNKNOWN)
706 {
707 flag_schedule_insns = 0;
708 flag_schedule_insns_after_reload = 0;
709 flag_modulo_sched = 0;
1ee6eb01 710 flag_live_range_shrinkage = 0;
03e69b12
MP
711 }
712}
713
7eb4f044
NS
714/* Generate a macro of the form __mPREFIX_cpu_NAME, where PREFIX is the
715 given argument and NAME is the argument passed to -mcpu. Return NULL
716 if -mcpu was not passed. */
717
718const char *
719m68k_cpp_cpu_ident (const char *prefix)
720{
721 if (!m68k_cpu_entry)
722 return NULL;
723 return concat ("__m", prefix, "_cpu_", m68k_cpu_entry->name, NULL);
724}
725
726/* Generate a macro of the form __mPREFIX_family_NAME, where PREFIX is the
727 given argument and NAME is the name of the representative device for
728 the -mcpu argument's family. Return NULL if -mcpu was not passed. */
729
730const char *
731m68k_cpp_cpu_family (const char *prefix)
732{
733 if (!m68k_cpu_entry)
734 return NULL;
735 return concat ("__m", prefix, "_family_", m68k_cpu_entry->family, NULL);
736}
79e68feb 737\f
2bccb817
KH
738/* Return m68k_fk_interrupt_handler if FUNC has an "interrupt" or
739 "interrupt_handler" attribute and interrupt_thread if FUNC has an
740 "interrupt_thread" attribute. Otherwise, return
741 m68k_fk_normal_function. */
a4242737
KH
742
743enum m68k_function_kind
744m68k_get_function_kind (tree func)
48ed72a4
PB
745{
746 tree a;
747
fa157b28
NS
748 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
749
2bccb817
KH
750 a = lookup_attribute ("interrupt", DECL_ATTRIBUTES (func));
751 if (a != NULL_TREE)
752 return m68k_fk_interrupt_handler;
753
48ed72a4 754 a = lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (func));
a4242737
KH
755 if (a != NULL_TREE)
756 return m68k_fk_interrupt_handler;
757
758 a = lookup_attribute ("interrupt_thread", DECL_ATTRIBUTES (func));
759 if (a != NULL_TREE)
760 return m68k_fk_interrupt_thread;
761
762 return m68k_fk_normal_function;
48ed72a4
PB
763}
764
765/* Handle an attribute requiring a FUNCTION_DECL; arguments as in
766 struct attribute_spec.handler. */
767static tree
768m68k_handle_fndecl_attribute (tree *node, tree name,
769 tree args ATTRIBUTE_UNUSED,
770 int flags ATTRIBUTE_UNUSED,
771 bool *no_add_attrs)
772{
773 if (TREE_CODE (*node) != FUNCTION_DECL)
774 {
29d08eba
JM
775 warning (OPT_Wattributes, "%qE attribute only applies to functions",
776 name);
48ed72a4
PB
777 *no_add_attrs = true;
778 }
779
a4242737
KH
780 if (m68k_get_function_kind (*node) != m68k_fk_normal_function)
781 {
782 error ("multiple interrupt attributes not allowed");
783 *no_add_attrs = true;
784 }
785
786 if (!TARGET_FIDOA
787 && !strcmp (IDENTIFIER_POINTER (name), "interrupt_thread"))
788 {
789 error ("interrupt_thread is available only on fido");
790 *no_add_attrs = true;
791 }
792
48ed72a4
PB
793 return NULL_TREE;
794}
860c4900
BI
795
796static void
3d74bc09 797m68k_compute_frame_layout (void)
860c4900
BI
798{
799 int regno, saved;
a40ed0f3 800 unsigned int mask;
a4242737
KH
801 enum m68k_function_kind func_kind =
802 m68k_get_function_kind (current_function_decl);
803 bool interrupt_handler = func_kind == m68k_fk_interrupt_handler;
804 bool interrupt_thread = func_kind == m68k_fk_interrupt_thread;
860c4900 805
3d74bc09
BI
806 /* Only compute the frame once per function.
807 Don't cache information until reload has been completed. */
808 if (current_frame.funcdef_no == current_function_funcdef_no
809 && reload_completed)
810 return;
811
812 current_frame.size = (get_frame_size () + 3) & -4;
860c4900 813
a40ed0f3 814 mask = saved = 0;
a4242737
KH
815
816 /* Interrupt thread does not need to save any register. */
817 if (!interrupt_thread)
818 for (regno = 0; regno < 16; regno++)
819 if (m68k_save_reg (regno, interrupt_handler))
820 {
821 mask |= 1 << (regno - D0_REG);
822 saved++;
823 }
3d74bc09
BI
824 current_frame.offset = saved * 4;
825 current_frame.reg_no = saved;
826 current_frame.reg_mask = mask;
860c4900 827
57047680 828 current_frame.foffset = 0;
a40ed0f3 829 mask = saved = 0;
dcc21c4c 830 if (TARGET_HARD_FLOAT)
860c4900 831 {
a4242737
KH
832 /* Interrupt thread does not need to save any register. */
833 if (!interrupt_thread)
834 for (regno = 16; regno < 24; regno++)
835 if (m68k_save_reg (regno, interrupt_handler))
836 {
837 mask |= 1 << (regno - FP0_REG);
838 saved++;
839 }
dcc21c4c 840 current_frame.foffset = saved * TARGET_FP_REG_SIZE;
3d74bc09 841 current_frame.offset += current_frame.foffset;
860c4900 842 }
57047680
GN
843 current_frame.fpu_no = saved;
844 current_frame.fpu_mask = mask;
3d74bc09
BI
845
846 /* Remember what function this frame refers to. */
847 current_frame.funcdef_no = current_function_funcdef_no;
860c4900
BI
848}
849
7b5cbb57
AS
850/* Worker function for TARGET_CAN_ELIMINATE. */
851
852bool
853m68k_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
854{
855 return (to == STACK_POINTER_REGNUM ? ! frame_pointer_needed : true);
856}
857
860c4900
BI
858HOST_WIDE_INT
859m68k_initial_elimination_offset (int from, int to)
860{
42b67c06
PB
861 int argptr_offset;
862 /* The arg pointer points 8 bytes before the start of the arguments,
863 as defined by FIRST_PARM_OFFSET. This makes it coincident with the
864 frame pointer in most frames. */
865 argptr_offset = frame_pointer_needed ? 0 : UNITS_PER_WORD;
860c4900 866 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
42b67c06 867 return argptr_offset;
860c4900 868
3d74bc09 869 m68k_compute_frame_layout ();
860c4900 870
4761e388
NS
871 gcc_assert (to == STACK_POINTER_REGNUM);
872 switch (from)
873 {
a0a7fbc9 874 case ARG_POINTER_REGNUM:
42b67c06 875 return current_frame.offset + current_frame.size - argptr_offset;
4761e388
NS
876 case FRAME_POINTER_REGNUM:
877 return current_frame.offset + current_frame.size;
878 default:
879 gcc_unreachable ();
880 }
860c4900
BI
881}
882
97c55091
GN
883/* Refer to the array `regs_ever_live' to determine which registers
884 to save; `regs_ever_live[I]' is nonzero if register number I
885 is ever used in the function. This function is responsible for
886 knowing which registers should not be saved even if used.
887 Return true if we need to save REGNO. */
888
48ed72a4
PB
889static bool
890m68k_save_reg (unsigned int regno, bool interrupt_handler)
2cff4a6e 891{
4ab870f5 892 if (flag_pic && regno == PIC_REG)
b86ba8a3 893 {
e3b5732b 894 if (crtl->saves_all_registers)
afcb440c 895 return true;
e3b5732b 896 if (crtl->uses_pic_offset_table)
b86ba8a3 897 return true;
6357eb0d
RS
898 /* Reload may introduce constant pool references into a function
899 that thitherto didn't need a PIC register. Note that the test
900 above will not catch that case because we will only set
e3b5732b 901 crtl->uses_pic_offset_table when emitting
6357eb0d 902 the address reloads. */
e3b5732b 903 if (crtl->uses_const_pool)
6357eb0d 904 return true;
b86ba8a3 905 }
2cff4a6e 906
e3b5732b 907 if (crtl->calls_eh_return)
2cff4a6e
AS
908 {
909 unsigned int i;
910 for (i = 0; ; i++)
911 {
912 unsigned int test = EH_RETURN_DATA_REGNO (i);
913 if (test == INVALID_REGNUM)
914 break;
915 if (test == regno)
48ed72a4 916 return true;
2cff4a6e
AS
917 }
918 }
919
48ed72a4
PB
920 /* Fixed regs we never touch. */
921 if (fixed_regs[regno])
922 return false;
923
924 /* The frame pointer (if it is such) is handled specially. */
925 if (regno == FRAME_POINTER_REGNUM && frame_pointer_needed)
926 return false;
927
928 /* Interrupt handlers must also save call_used_regs
929 if they are live or when calling nested functions. */
930 if (interrupt_handler)
a0a7fbc9 931 {
6fb5fa3c 932 if (df_regs_ever_live_p (regno))
a0a7fbc9 933 return true;
48ed72a4 934
416ff32e 935 if (!crtl->is_leaf && call_used_regs[regno])
a0a7fbc9
AS
936 return true;
937 }
48ed72a4
PB
938
939 /* Never need to save registers that aren't touched. */
6fb5fa3c 940 if (!df_regs_ever_live_p (regno))
48ed72a4
PB
941 return false;
942
b2e08ed4 943 /* Otherwise save everything that isn't call-clobbered. */
48ed72a4 944 return !call_used_regs[regno];
2cff4a6e
AS
945}
946
a40ed0f3
KH
947/* Emit RTL for a MOVEM or FMOVEM instruction. BASE + OFFSET represents
948 the lowest memory address. COUNT is the number of registers to be
949 moved, with register REGNO + I being moved if bit I of MASK is set.
950 STORE_P specifies the direction of the move and ADJUST_STACK_P says
951 whether or not this is pre-decrement (if STORE_P) or post-increment
952 (if !STORE_P) operation. */
953
c85e862a 954static rtx_insn *
a40ed0f3
KH
955m68k_emit_movem (rtx base, HOST_WIDE_INT offset,
956 unsigned int count, unsigned int regno,
957 unsigned int mask, bool store_p, bool adjust_stack_p)
958{
959 int i;
960 rtx body, addr, src, operands[2];
ef4bddc2 961 machine_mode mode;
a40ed0f3
KH
962
963 body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (adjust_stack_p + count));
964 mode = reg_raw_mode[regno];
965 i = 0;
966
967 if (adjust_stack_p)
968 {
0a81f074
RS
969 src = plus_constant (Pmode, base,
970 (count
971 * GET_MODE_SIZE (mode)
972 * (HOST_WIDE_INT) (store_p ? -1 : 1)));
f7df4a84 973 XVECEXP (body, 0, i++) = gen_rtx_SET (base, src);
a40ed0f3
KH
974 }
975
976 for (; mask != 0; mask >>= 1, regno++)
977 if (mask & 1)
978 {
0a81f074 979 addr = plus_constant (Pmode, base, offset);
a40ed0f3
KH
980 operands[!store_p] = gen_frame_mem (mode, addr);
981 operands[store_p] = gen_rtx_REG (mode, regno);
982 XVECEXP (body, 0, i++)
f7df4a84 983 = gen_rtx_SET (operands[0], operands[1]);
a40ed0f3
KH
984 offset += GET_MODE_SIZE (mode);
985 }
986 gcc_assert (i == XVECLEN (body, 0));
987
988 return emit_insn (body);
989}
990
991/* Make INSN a frame-related instruction. */
79e68feb 992
08c148a8 993static void
c85e862a 994m68k_set_frame_related (rtx_insn *insn)
a40ed0f3
KH
995{
996 rtx body;
997 int i;
998
999 RTX_FRAME_RELATED_P (insn) = 1;
1000 body = PATTERN (insn);
1001 if (GET_CODE (body) == PARALLEL)
1002 for (i = 0; i < XVECLEN (body, 0); i++)
1003 RTX_FRAME_RELATED_P (XVECEXP (body, 0, i)) = 1;
1004}
1005
1006/* Emit RTL for the "prologue" define_expand. */
1007
1008void
1009m68k_expand_prologue (void)
79e68feb 1010{
860c4900 1011 HOST_WIDE_INT fsize_with_regs;
2dc8bd76 1012 rtx limit, src, dest;
3d74bc09 1013
a40ed0f3 1014 m68k_compute_frame_layout ();
3d74bc09 1015
a11e0df4 1016 if (flag_stack_usage_info)
f69ea688
AS
1017 current_function_static_stack_size
1018 = current_frame.size + current_frame.offset;
1019
a157febd
GK
1020 /* If the stack limit is a symbol, we can check it here,
1021 before actually allocating the space. */
e3b5732b 1022 if (crtl->limit_stack
a157febd 1023 && GET_CODE (stack_limit_rtx) == SYMBOL_REF)
a40ed0f3 1024 {
0a81f074 1025 limit = plus_constant (Pmode, stack_limit_rtx, current_frame.size + 4);
1a627b35 1026 if (!m68k_legitimate_constant_p (Pmode, limit))
a40ed0f3
KH
1027 {
1028 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), limit);
1029 limit = gen_rtx_REG (Pmode, D0_REG);
1030 }
f90b7a5a
PB
1031 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode,
1032 stack_pointer_rtx, limit),
1033 stack_pointer_rtx, limit,
1034 const1_rtx));
a40ed0f3 1035 }
79e68feb 1036
a89e3f21 1037 fsize_with_regs = current_frame.size;
dcc21c4c
PB
1038 if (TARGET_COLDFIRE)
1039 {
a40ed0f3
KH
1040 /* ColdFire's move multiple instructions do not allow pre-decrement
1041 addressing. Add the size of movem saves to the initial stack
1042 allocation instead. */
1043 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1044 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1045 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1046 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
dcc21c4c 1047 }
860c4900 1048
79e68feb
RS
1049 if (frame_pointer_needed)
1050 {
a40ed0f3 1051 if (fsize_with_regs == 0 && TUNE_68040)
79e68feb 1052 {
a40ed0f3
KH
1053 /* On the 68040, two separate moves are faster than link.w 0. */
1054 dest = gen_frame_mem (Pmode,
1055 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1056 m68k_set_frame_related (emit_move_insn (dest, frame_pointer_rtx));
1057 m68k_set_frame_related (emit_move_insn (frame_pointer_rtx,
1058 stack_pointer_rtx));
79e68feb 1059 }
a40ed0f3
KH
1060 else if (fsize_with_regs < 0x8000 || TARGET_68020)
1061 m68k_set_frame_related
1062 (emit_insn (gen_link (frame_pointer_rtx,
1063 GEN_INT (-4 - fsize_with_regs))));
d9e88af0 1064 else
a40ed0f3
KH
1065 {
1066 m68k_set_frame_related
1067 (emit_insn (gen_link (frame_pointer_rtx, GEN_INT (-4))));
1068 m68k_set_frame_related
1069 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1070 stack_pointer_rtx,
1071 GEN_INT (-fsize_with_regs))));
1072 }
96fcacb7
MK
1073
1074 /* If the frame pointer is needed, emit a special barrier that
1075 will prevent the scheduler from moving stores to the frame
1076 before the stack adjustment. */
1077 emit_insn (gen_stack_tie (stack_pointer_rtx, frame_pointer_rtx));
d9e88af0 1078 }
a40ed0f3
KH
1079 else if (fsize_with_regs != 0)
1080 m68k_set_frame_related
1081 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1082 stack_pointer_rtx,
1083 GEN_INT (-fsize_with_regs))));
860c4900 1084
57047680 1085 if (current_frame.fpu_mask)
79e68feb 1086 {
a40ed0f3 1087 gcc_assert (current_frame.fpu_no >= MIN_FMOVEM_REGS);
dcc21c4c 1088 if (TARGET_68881)
a40ed0f3
KH
1089 m68k_set_frame_related
1090 (m68k_emit_movem (stack_pointer_rtx,
1091 current_frame.fpu_no * -GET_MODE_SIZE (XFmode),
1092 current_frame.fpu_no, FP0_REG,
1093 current_frame.fpu_mask, true, true));
dcc21c4c
PB
1094 else
1095 {
1096 int offset;
1097
a40ed0f3
KH
1098 /* If we're using moveml to save the integer registers,
1099 the stack pointer will point to the bottom of the moveml
1100 save area. Find the stack offset of the first FP register. */
1101 if (current_frame.reg_no < MIN_MOVEM_REGS)
dcc21c4c
PB
1102 offset = 0;
1103 else
a40ed0f3
KH
1104 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1105 m68k_set_frame_related
1106 (m68k_emit_movem (stack_pointer_rtx, offset,
1107 current_frame.fpu_no, FP0_REG,
1108 current_frame.fpu_mask, true, false));
f277471f 1109 }
79e68feb 1110 }
99df2465 1111
01bbf777 1112 /* If the stack limit is not a symbol, check it here.
a157febd 1113 This has the disadvantage that it may be too late... */
e3b5732b 1114 if (crtl->limit_stack)
a157febd
GK
1115 {
1116 if (REG_P (stack_limit_rtx))
f90b7a5a
PB
1117 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode, stack_pointer_rtx,
1118 stack_limit_rtx),
1119 stack_pointer_rtx, stack_limit_rtx,
1120 const1_rtx));
1121
a157febd 1122 else if (GET_CODE (stack_limit_rtx) != SYMBOL_REF)
d4ee4d25 1123 warning (0, "stack limit expression is not supported");
a157febd 1124 }
01bbf777 1125
a40ed0f3 1126 if (current_frame.reg_no < MIN_MOVEM_REGS)
79e68feb 1127 {
a40ed0f3 1128 /* Store each register separately in the same order moveml does. */
79e68feb
RS
1129 int i;
1130
a40ed0f3
KH
1131 for (i = 16; i-- > 0; )
1132 if (current_frame.reg_mask & (1 << i))
078e983e 1133 {
a40ed0f3
KH
1134 src = gen_rtx_REG (SImode, D0_REG + i);
1135 dest = gen_frame_mem (SImode,
1136 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1137 m68k_set_frame_related (emit_insn (gen_movsi (dest, src)));
078e983e 1138 }
79e68feb 1139 }
a40ed0f3 1140 else
79e68feb 1141 {
9425fb04 1142 if (TARGET_COLDFIRE)
a40ed0f3
KH
1143 /* The required register save space has already been allocated.
1144 The first register should be stored at (%sp). */
1145 m68k_set_frame_related
1146 (m68k_emit_movem (stack_pointer_rtx, 0,
1147 current_frame.reg_no, D0_REG,
1148 current_frame.reg_mask, true, false));
afaff477 1149 else
a40ed0f3
KH
1150 m68k_set_frame_related
1151 (m68k_emit_movem (stack_pointer_rtx,
1152 current_frame.reg_no * -GET_MODE_SIZE (SImode),
1153 current_frame.reg_no, D0_REG,
1154 current_frame.reg_mask, true, true));
79e68feb 1155 }
a40ed0f3 1156
75df395f 1157 if (!TARGET_SEP_DATA
e3b5732b 1158 && crtl->uses_pic_offset_table)
2dc8bd76 1159 emit_insn (gen_load_got (pic_offset_table_rtx));
79e68feb
RS
1160}
1161\f
413ac1b2
RS
1162/* Return true if a simple (return) instruction is sufficient for this
1163 instruction (i.e. if no epilogue is needed). */
79e68feb 1164
3d74bc09 1165bool
a2bda628 1166m68k_use_return_insn (void)
79e68feb 1167{
79e68feb 1168 if (!reload_completed || frame_pointer_needed || get_frame_size () != 0)
3d74bc09 1169 return false;
125ed86f 1170
a0a7fbc9 1171 m68k_compute_frame_layout ();
413ac1b2 1172 return current_frame.offset == 0;
79e68feb
RS
1173}
1174
f7e70894
RS
1175/* Emit RTL for the "epilogue" or "sibcall_epilogue" define_expand;
1176 SIBCALL_P says which.
79e68feb
RS
1177
1178 The function epilogue should not depend on the current stack pointer!
1179 It should use the frame pointer only, if there is a frame pointer.
1180 This is mandatory because of alloca; we also take advantage of it to
1181 omit stack adjustments before returning. */
1182
a40ed0f3 1183void
f7e70894 1184m68k_expand_epilogue (bool sibcall_p)
08c148a8 1185{
3d74bc09 1186 HOST_WIDE_INT fsize, fsize_with_regs;
a40ed0f3 1187 bool big, restore_from_sp;
3d74bc09 1188
a0a7fbc9 1189 m68k_compute_frame_layout ();
3d74bc09 1190
3d74bc09 1191 fsize = current_frame.size;
a40ed0f3
KH
1192 big = false;
1193 restore_from_sp = false;
3d74bc09 1194
416ff32e 1195 /* FIXME : crtl->is_leaf below is too strong.
c67ddce5 1196 What we really need to know there is if there could be pending
7a1929e1 1197 stack adjustment needed at that point. */
a40ed0f3 1198 restore_from_sp = (!frame_pointer_needed
416ff32e 1199 || (!cfun->calls_alloca && crtl->is_leaf));
860c4900
BI
1200
1201 /* fsize_with_regs is the size we need to adjust the sp when
97c55091 1202 popping the frame. */
860c4900 1203 fsize_with_regs = fsize;
dcc21c4c
PB
1204 if (TARGET_COLDFIRE && restore_from_sp)
1205 {
a40ed0f3
KH
1206 /* ColdFire's move multiple instructions do not allow post-increment
1207 addressing. Add the size of movem loads to the final deallocation
1208 instead. */
1209 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1210 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1211 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1212 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
dcc21c4c 1213 }
860c4900 1214
3d74bc09 1215 if (current_frame.offset + fsize >= 0x8000
a40ed0f3 1216 && !restore_from_sp
3d74bc09 1217 && (current_frame.reg_mask || current_frame.fpu_mask))
79e68feb 1218 {
a40ed0f3
KH
1219 if (TARGET_COLDFIRE
1220 && (current_frame.reg_no >= MIN_MOVEM_REGS
1221 || current_frame.fpu_no >= MIN_FMOVEM_REGS))
1222 {
1223 /* ColdFire's move multiple instructions do not support the
1224 (d8,Ax,Xi) addressing mode, so we're as well using a normal
1225 stack-based restore. */
1226 emit_move_insn (gen_rtx_REG (Pmode, A1_REG),
1227 GEN_INT (-(current_frame.offset + fsize)));
f2b6aad9 1228 emit_insn (gen_blockage ());
a40ed0f3
KH
1229 emit_insn (gen_addsi3 (stack_pointer_rtx,
1230 gen_rtx_REG (Pmode, A1_REG),
1231 frame_pointer_rtx));
1232 restore_from_sp = true;
1233 }
1234 else
1235 {
1236 emit_move_insn (gen_rtx_REG (Pmode, A1_REG), GEN_INT (-fsize));
1237 fsize = 0;
1238 big = true;
1239 }
79e68feb 1240 }
79e68feb 1241
a40ed0f3
KH
1242 if (current_frame.reg_no < MIN_MOVEM_REGS)
1243 {
1244 /* Restore each register separately in the same order moveml does. */
79e68feb 1245 int i;
a40ed0f3 1246 HOST_WIDE_INT offset;
79e68feb 1247
a40ed0f3 1248 offset = current_frame.offset + fsize;
3d74bc09
BI
1249 for (i = 0; i < 16; i++)
1250 if (current_frame.reg_mask & (1 << i))
79e68feb 1251 {
a40ed0f3
KH
1252 rtx addr;
1253
1254 if (big)
79e68feb 1255 {
a40ed0f3
KH
1256 /* Generate the address -OFFSET(%fp,%a1.l). */
1257 addr = gen_rtx_REG (Pmode, A1_REG);
1258 addr = gen_rtx_PLUS (Pmode, addr, frame_pointer_rtx);
0a81f074 1259 addr = plus_constant (Pmode, addr, -offset);
79e68feb 1260 }
a40ed0f3
KH
1261 else if (restore_from_sp)
1262 addr = gen_rtx_POST_INC (Pmode, stack_pointer_rtx);
1263 else
0a81f074 1264 addr = plus_constant (Pmode, frame_pointer_rtx, -offset);
a40ed0f3
KH
1265 emit_move_insn (gen_rtx_REG (SImode, D0_REG + i),
1266 gen_frame_mem (SImode, addr));
1267 offset -= GET_MODE_SIZE (SImode);
1268 }
79e68feb 1269 }
3d74bc09 1270 else if (current_frame.reg_mask)
79e68feb 1271 {
a40ed0f3
KH
1272 if (big)
1273 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1274 gen_rtx_REG (Pmode, A1_REG),
1275 frame_pointer_rtx),
1276 -(current_frame.offset + fsize),
1277 current_frame.reg_no, D0_REG,
1278 current_frame.reg_mask, false, false);
1279 else if (restore_from_sp)
1280 m68k_emit_movem (stack_pointer_rtx, 0,
1281 current_frame.reg_no, D0_REG,
1282 current_frame.reg_mask, false,
1283 !TARGET_COLDFIRE);
1284 else
1285 m68k_emit_movem (frame_pointer_rtx,
1286 -(current_frame.offset + fsize),
1287 current_frame.reg_no, D0_REG,
1288 current_frame.reg_mask, false, false);
79e68feb 1289 }
a40ed0f3
KH
1290
1291 if (current_frame.fpu_no > 0)
79e68feb
RS
1292 {
1293 if (big)
a40ed0f3
KH
1294 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1295 gen_rtx_REG (Pmode, A1_REG),
1296 frame_pointer_rtx),
1297 -(current_frame.foffset + fsize),
1298 current_frame.fpu_no, FP0_REG,
1299 current_frame.fpu_mask, false, false);
6910dd70 1300 else if (restore_from_sp)
79e68feb 1301 {
dcc21c4c
PB
1302 if (TARGET_COLDFIRE)
1303 {
1304 int offset;
1305
a40ed0f3
KH
1306 /* If we used moveml to restore the integer registers, the
1307 stack pointer will still point to the bottom of the moveml
1308 save area. Find the stack offset of the first FP
1309 register. */
1310 if (current_frame.reg_no < MIN_MOVEM_REGS)
dcc21c4c
PB
1311 offset = 0;
1312 else
a40ed0f3
KH
1313 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1314 m68k_emit_movem (stack_pointer_rtx, offset,
1315 current_frame.fpu_no, FP0_REG,
1316 current_frame.fpu_mask, false, false);
dcc21c4c 1317 }
884b74f0 1318 else
a40ed0f3
KH
1319 m68k_emit_movem (stack_pointer_rtx, 0,
1320 current_frame.fpu_no, FP0_REG,
1321 current_frame.fpu_mask, false, true);
79e68feb
RS
1322 }
1323 else
a40ed0f3
KH
1324 m68k_emit_movem (frame_pointer_rtx,
1325 -(current_frame.foffset + fsize),
1326 current_frame.fpu_no, FP0_REG,
1327 current_frame.fpu_mask, false, false);
79e68feb 1328 }
a40ed0f3 1329
f2b6aad9 1330 emit_insn (gen_blockage ());
79e68feb 1331 if (frame_pointer_needed)
a40ed0f3 1332 emit_insn (gen_unlink (frame_pointer_rtx));
860c4900 1333 else if (fsize_with_regs)
a40ed0f3
KH
1334 emit_insn (gen_addsi3 (stack_pointer_rtx,
1335 stack_pointer_rtx,
1336 GEN_INT (fsize_with_regs)));
1337
e3b5732b 1338 if (crtl->calls_eh_return)
a40ed0f3
KH
1339 emit_insn (gen_addsi3 (stack_pointer_rtx,
1340 stack_pointer_rtx,
1341 EH_RETURN_STACKADJ_RTX));
1342
f7e70894 1343 if (!sibcall_p)
3810076b 1344 emit_jump_insn (ret_rtx);
79e68feb
RS
1345}
1346\f
8a4a2253 1347/* Return true if X is a valid comparison operator for the dbcc
64a184e9
RS
1348 instruction.
1349
1350 Note it rejects floating point comparison operators.
1351 (In the future we could use Fdbcc).
1352
1353 It also rejects some comparisons when CC_NO_OVERFLOW is set. */
1354
1355int
ef4bddc2 1356valid_dbcc_comparison_p_2 (rtx x, machine_mode mode ATTRIBUTE_UNUSED)
64a184e9 1357{
64a184e9
RS
1358 switch (GET_CODE (x))
1359 {
64a184e9
RS
1360 case EQ: case NE: case GTU: case LTU:
1361 case GEU: case LEU:
1362 return 1;
1363
1364 /* Reject some when CC_NO_OVERFLOW is set. This may be over
1365 conservative */
1366 case GT: case LT: case GE: case LE:
1367 return ! (cc_prev_status.flags & CC_NO_OVERFLOW);
1368 default:
1369 return 0;
1370 }
1371}
1372
a0ab749a 1373/* Return nonzero if flags are currently in the 68881 flag register. */
6a0f85e3 1374int
8a4a2253 1375flags_in_68881 (void)
6a0f85e3
TG
1376{
1377 /* We could add support for these in the future */
1378 return cc_status.flags & CC_IN_68881;
1379}
1380
db5e2d51
MK
1381/* Return true if PARALLEL contains register REGNO. */
1382static bool
1383m68k_reg_present_p (const_rtx parallel, unsigned int regno)
1384{
1385 int i;
1386
1387 if (REG_P (parallel) && REGNO (parallel) == regno)
1388 return true;
1389
1390 if (GET_CODE (parallel) != PARALLEL)
1391 return false;
1392
1393 for (i = 0; i < XVECLEN (parallel, 0); ++i)
1394 {
1395 const_rtx x;
1396
1397 x = XEXP (XVECEXP (parallel, 0, i), 0);
1398 if (REG_P (x) && REGNO (x) == regno)
1399 return true;
1400 }
1401
1402 return false;
1403}
1404
fa157b28 1405/* Implement TARGET_FUNCTION_OK_FOR_SIBCALL_P. */
f7e70894
RS
1406
1407static bool
fa157b28 1408m68k_ok_for_sibcall_p (tree decl, tree exp)
f7e70894 1409{
fa157b28
NS
1410 enum m68k_function_kind kind;
1411
1412 /* We cannot use sibcalls for nested functions because we use the
1413 static chain register for indirect calls. */
1414 if (CALL_EXPR_STATIC_CHAIN (exp))
1415 return false;
1416
db5e2d51
MK
1417 if (!VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
1418 {
1419 /* Check that the return value locations are the same. For
1420 example that we aren't returning a value from the sibling in
1421 a D0 register but then need to transfer it to a A0 register. */
1422 rtx cfun_value;
1423 rtx call_value;
1424
1425 cfun_value = FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (cfun->decl)),
1426 cfun->decl);
1427 call_value = FUNCTION_VALUE (TREE_TYPE (exp), decl);
1428
1429 /* Check that the values are equal or that the result the callee
1430 function returns is superset of what the current function returns. */
1431 if (!(rtx_equal_p (cfun_value, call_value)
1432 || (REG_P (cfun_value)
1433 && m68k_reg_present_p (call_value, REGNO (cfun_value)))))
1434 return false;
1435 }
1436
fa157b28
NS
1437 kind = m68k_get_function_kind (current_function_decl);
1438 if (kind == m68k_fk_normal_function)
1439 /* We can always sibcall from a normal function, because it's
1440 undefined if it is calling an interrupt function. */
1441 return true;
1442
1443 /* Otherwise we can only sibcall if the function kind is known to be
1444 the same. */
1445 if (decl && m68k_get_function_kind (decl) == kind)
1446 return true;
1447
1448 return false;
f7e70894
RS
1449}
1450
13d3961c
NF
1451/* On the m68k all args are always pushed. */
1452
1453static rtx
d5cc9181 1454m68k_function_arg (cumulative_args_t cum ATTRIBUTE_UNUSED,
ef4bddc2 1455 machine_mode mode ATTRIBUTE_UNUSED,
13d3961c
NF
1456 const_tree type ATTRIBUTE_UNUSED,
1457 bool named ATTRIBUTE_UNUSED)
1458{
1459 return NULL_RTX;
1460}
1461
1462static void
ef4bddc2 1463m68k_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
13d3961c
NF
1464 const_tree type, bool named ATTRIBUTE_UNUSED)
1465{
d5cc9181
JR
1466 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1467
13d3961c
NF
1468 *cum += (mode != BLKmode
1469 ? (GET_MODE_SIZE (mode) + 3) & ~3
1470 : (int_size_in_bytes (type) + 3) & ~3);
1471}
1472
29ca003a
RS
1473/* Convert X to a legitimate function call memory reference and return the
1474 result. */
a2ef3db7 1475
29ca003a
RS
1476rtx
1477m68k_legitimize_call_address (rtx x)
1478{
1479 gcc_assert (MEM_P (x));
1480 if (call_operand (XEXP (x, 0), VOIDmode))
1481 return x;
1482 return replace_equiv_address (x, force_reg (Pmode, XEXP (x, 0)));
a2ef3db7
BI
1483}
1484
f7e70894
RS
1485/* Likewise for sibling calls. */
1486
1487rtx
1488m68k_legitimize_sibcall_address (rtx x)
1489{
1490 gcc_assert (MEM_P (x));
1491 if (sibcall_operand (XEXP (x, 0), VOIDmode))
1492 return x;
1493
1494 emit_move_insn (gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM), XEXP (x, 0));
1495 return replace_equiv_address (x, gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM));
1496}
1497
506d7b68
PB
1498/* Convert X to a legitimate address and return it if successful. Otherwise
1499 return X.
1500
1501 For the 68000, we handle X+REG by loading X into a register R and
1502 using R+REG. R will go in an address reg and indexing will be used.
1503 However, if REG is a broken-out memory address or multiplication,
1504 nothing needs to be done because REG can certainly go in an address reg. */
1505
ab7256e4 1506static rtx
ef4bddc2 1507m68k_legitimize_address (rtx x, rtx oldx, machine_mode mode)
506d7b68 1508{
75df395f
MK
1509 if (m68k_tls_symbol_p (x))
1510 return m68k_legitimize_tls_address (x);
1511
506d7b68
PB
1512 if (GET_CODE (x) == PLUS)
1513 {
1514 int ch = (x) != (oldx);
1515 int copied = 0;
1516
1517#define COPY_ONCE(Y) if (!copied) { Y = copy_rtx (Y); copied = ch = 1; }
1518
1519 if (GET_CODE (XEXP (x, 0)) == MULT)
1520 {
1521 COPY_ONCE (x);
1522 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
1523 }
1524 if (GET_CODE (XEXP (x, 1)) == MULT)
1525 {
1526 COPY_ONCE (x);
1527 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
1528 }
1529 if (ch)
1530 {
1531 if (GET_CODE (XEXP (x, 1)) == REG
1532 && GET_CODE (XEXP (x, 0)) == REG)
1533 {
1534 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
1535 {
1536 COPY_ONCE (x);
1537 x = force_operand (x, 0);
1538 }
1539 return x;
1540 }
1541 if (memory_address_p (mode, x))
1542 return x;
1543 }
1544 if (GET_CODE (XEXP (x, 0)) == REG
1545 || (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
1546 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1547 && GET_MODE (XEXP (XEXP (x, 0), 0)) == HImode))
1548 {
1549 rtx temp = gen_reg_rtx (Pmode);
1550 rtx val = force_operand (XEXP (x, 1), 0);
1551 emit_move_insn (temp, val);
1552 COPY_ONCE (x);
1553 XEXP (x, 1) = temp;
1554 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1555 && GET_CODE (XEXP (x, 0)) == REG)
1556 x = force_operand (x, 0);
1557 }
1558 else if (GET_CODE (XEXP (x, 1)) == REG
1559 || (GET_CODE (XEXP (x, 1)) == SIGN_EXTEND
1560 && GET_CODE (XEXP (XEXP (x, 1), 0)) == REG
1561 && GET_MODE (XEXP (XEXP (x, 1), 0)) == HImode))
1562 {
1563 rtx temp = gen_reg_rtx (Pmode);
1564 rtx val = force_operand (XEXP (x, 0), 0);
1565 emit_move_insn (temp, val);
1566 COPY_ONCE (x);
1567 XEXP (x, 0) = temp;
1568 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1569 && GET_CODE (XEXP (x, 1)) == REG)
1570 x = force_operand (x, 0);
1571 }
1572 }
1573
1574 return x;
1575}
1576
1577
64a184e9
RS
1578/* Output a dbCC; jCC sequence. Note we do not handle the
1579 floating point version of this sequence (Fdbcc). We also
1580 do not handle alternative conditions when CC_NO_OVERFLOW is
6a0f85e3
TG
1581 set. It is assumed that valid_dbcc_comparison_p and flags_in_68881 will
1582 kick those out before we get here. */
64a184e9 1583
1d8eaa6b 1584void
8a4a2253 1585output_dbcc_and_branch (rtx *operands)
64a184e9 1586{
64a184e9
RS
1587 switch (GET_CODE (operands[3]))
1588 {
1589 case EQ:
da398bb5 1590 output_asm_insn ("dbeq %0,%l1\n\tjeq %l2", operands);
e6d98cb0 1591 break;
64a184e9
RS
1592
1593 case NE:
da398bb5 1594 output_asm_insn ("dbne %0,%l1\n\tjne %l2", operands);
e6d98cb0 1595 break;
64a184e9
RS
1596
1597 case GT:
da398bb5 1598 output_asm_insn ("dbgt %0,%l1\n\tjgt %l2", operands);
e6d98cb0 1599 break;
64a184e9
RS
1600
1601 case GTU:
da398bb5 1602 output_asm_insn ("dbhi %0,%l1\n\tjhi %l2", operands);
e6d98cb0 1603 break;
64a184e9
RS
1604
1605 case LT:
da398bb5 1606 output_asm_insn ("dblt %0,%l1\n\tjlt %l2", operands);
e6d98cb0 1607 break;
64a184e9
RS
1608
1609 case LTU:
da398bb5 1610 output_asm_insn ("dbcs %0,%l1\n\tjcs %l2", operands);
e6d98cb0 1611 break;
64a184e9
RS
1612
1613 case GE:
da398bb5 1614 output_asm_insn ("dbge %0,%l1\n\tjge %l2", operands);
e6d98cb0 1615 break;
64a184e9
RS
1616
1617 case GEU:
da398bb5 1618 output_asm_insn ("dbcc %0,%l1\n\tjcc %l2", operands);
e6d98cb0 1619 break;
64a184e9
RS
1620
1621 case LE:
da398bb5 1622 output_asm_insn ("dble %0,%l1\n\tjle %l2", operands);
e6d98cb0 1623 break;
64a184e9
RS
1624
1625 case LEU:
da398bb5 1626 output_asm_insn ("dbls %0,%l1\n\tjls %l2", operands);
e6d98cb0 1627 break;
64a184e9
RS
1628
1629 default:
4761e388 1630 gcc_unreachable ();
64a184e9
RS
1631 }
1632
1633 /* If the decrement is to be done in SImode, then we have
7a1929e1 1634 to compensate for the fact that dbcc decrements in HImode. */
64a184e9
RS
1635 switch (GET_MODE (operands[0]))
1636 {
4e10a5a7 1637 case E_SImode:
da398bb5 1638 output_asm_insn ("clr%.w %0\n\tsubq%.l #1,%0\n\tjpl %l1", operands);
64a184e9
RS
1639 break;
1640
4e10a5a7 1641 case E_HImode:
64a184e9
RS
1642 break;
1643
1644 default:
4761e388 1645 gcc_unreachable ();
64a184e9
RS
1646 }
1647}
1648
5505f548 1649const char *
4761e388 1650output_scc_di (rtx op, rtx operand1, rtx operand2, rtx dest)
c59c3b1c
RK
1651{
1652 rtx loperands[7];
d9832fd2 1653 enum rtx_code op_code = GET_CODE (op);
c59c3b1c 1654
f710504c 1655 /* This does not produce a useful cc. */
906a2d3c
RK
1656 CC_STATUS_INIT;
1657
d9832fd2
RK
1658 /* The m68k cmp.l instruction requires operand1 to be a reg as used
1659 below. Swap the operands and change the op if these requirements
1660 are not fulfilled. */
1661 if (GET_CODE (operand2) == REG && GET_CODE (operand1) != REG)
1662 {
1663 rtx tmp = operand1;
1664
1665 operand1 = operand2;
1666 operand2 = tmp;
1667 op_code = swap_condition (op_code);
1668 }
c59c3b1c
RK
1669 loperands[0] = operand1;
1670 if (GET_CODE (operand1) == REG)
1d8eaa6b 1671 loperands[1] = gen_rtx_REG (SImode, REGNO (operand1) + 1);
c59c3b1c 1672 else
b72f00af 1673 loperands[1] = adjust_address (operand1, SImode, 4);
c59c3b1c
RK
1674 if (operand2 != const0_rtx)
1675 {
1676 loperands[2] = operand2;
1677 if (GET_CODE (operand2) == REG)
1d8eaa6b 1678 loperands[3] = gen_rtx_REG (SImode, REGNO (operand2) + 1);
c59c3b1c 1679 else
b72f00af 1680 loperands[3] = adjust_address (operand2, SImode, 4);
c59c3b1c 1681 }
428511bb 1682 loperands[4] = gen_label_rtx ();
c59c3b1c 1683 if (operand2 != const0_rtx)
da398bb5 1684 output_asm_insn ("cmp%.l %2,%0\n\tjne %l4\n\tcmp%.l %3,%1", loperands);
392582fa 1685 else
4a8c52e0 1686 {
9425fb04 1687 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[0]))
4a8c52e0
AS
1688 output_asm_insn ("tst%.l %0", loperands);
1689 else
a0a7fbc9 1690 output_asm_insn ("cmp%.w #0,%0", loperands);
4a8c52e0 1691
da398bb5 1692 output_asm_insn ("jne %l4", loperands);
4a8c52e0 1693
9425fb04 1694 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[1]))
4a8c52e0
AS
1695 output_asm_insn ("tst%.l %1", loperands);
1696 else
3b4b85c9 1697 output_asm_insn ("cmp%.w #0,%1", loperands);
4a8c52e0
AS
1698 }
1699
c59c3b1c 1700 loperands[5] = dest;
3b4b85c9 1701
d9832fd2 1702 switch (op_code)
c59c3b1c
RK
1703 {
1704 case EQ:
4977bab6 1705 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1706 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1707 output_asm_insn ("seq %5", loperands);
1708 break;
1709
1710 case NE:
4977bab6 1711 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1712 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1713 output_asm_insn ("sne %5", loperands);
1714 break;
1715
1716 case GT:
428511bb 1717 loperands[6] = gen_label_rtx ();
da398bb5 1718 output_asm_insn ("shi %5\n\tjra %l6", loperands);
4977bab6 1719 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1720 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c 1721 output_asm_insn ("sgt %5", loperands);
4977bab6 1722 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1723 CODE_LABEL_NUMBER (loperands[6]));
c59c3b1c
RK
1724 break;
1725
1726 case GTU:
4977bab6 1727 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1728 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1729 output_asm_insn ("shi %5", loperands);
1730 break;
1731
1732 case LT:
428511bb 1733 loperands[6] = gen_label_rtx ();
da398bb5 1734 output_asm_insn ("scs %5\n\tjra %l6", loperands);
4977bab6 1735 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1736 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c 1737 output_asm_insn ("slt %5", loperands);
4977bab6 1738 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1739 CODE_LABEL_NUMBER (loperands[6]));
c59c3b1c
RK
1740 break;
1741
1742 case LTU:
4977bab6 1743 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1744 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1745 output_asm_insn ("scs %5", loperands);
1746 break;
1747
1748 case GE:
428511bb 1749 loperands[6] = gen_label_rtx ();
da398bb5 1750 output_asm_insn ("scc %5\n\tjra %l6", loperands);
4977bab6 1751 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1752 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c 1753 output_asm_insn ("sge %5", loperands);
4977bab6 1754 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1755 CODE_LABEL_NUMBER (loperands[6]));
c59c3b1c
RK
1756 break;
1757
1758 case GEU:
4977bab6 1759 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1760 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1761 output_asm_insn ("scc %5", loperands);
1762 break;
1763
1764 case LE:
428511bb 1765 loperands[6] = gen_label_rtx ();
da398bb5 1766 output_asm_insn ("sls %5\n\tjra %l6", loperands);
4977bab6 1767 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1768 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c 1769 output_asm_insn ("sle %5", loperands);
4977bab6 1770 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1771 CODE_LABEL_NUMBER (loperands[6]));
c59c3b1c
RK
1772 break;
1773
1774 case LEU:
4977bab6 1775 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1776 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1777 output_asm_insn ("sls %5", loperands);
1778 break;
1779
1780 default:
4761e388 1781 gcc_unreachable ();
c59c3b1c
RK
1782 }
1783 return "";
1784}
1785
5505f548 1786const char *
c85e862a 1787output_btst (rtx *operands, rtx countop, rtx dataop, rtx_insn *insn, int signpos)
79e68feb
RS
1788{
1789 operands[0] = countop;
1790 operands[1] = dataop;
1791
1792 if (GET_CODE (countop) == CONST_INT)
1793 {
1794 register int count = INTVAL (countop);
1795 /* If COUNT is bigger than size of storage unit in use,
1796 advance to the containing unit of same size. */
1797 if (count > signpos)
1798 {
1799 int offset = (count & ~signpos) / 8;
1800 count = count & signpos;
b72f00af 1801 operands[1] = dataop = adjust_address (dataop, QImode, offset);
79e68feb
RS
1802 }
1803 if (count == signpos)
1804 cc_status.flags = CC_NOT_POSITIVE | CC_Z_IN_NOT_N;
1805 else
1806 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N;
1807
1808 /* These three statements used to use next_insns_test_no...
1809 but it appears that this should do the same job. */
1810 if (count == 31
1811 && next_insn_tests_no_inequality (insn))
1812 return "tst%.l %1";
1813 if (count == 15
1814 && next_insn_tests_no_inequality (insn))
1815 return "tst%.w %1";
1816 if (count == 7
1817 && next_insn_tests_no_inequality (insn))
1818 return "tst%.b %1";
5083912d
PDM
1819 /* Try to use `movew to ccr' followed by the appropriate branch insn.
1820 On some m68k variants unfortunately that's slower than btst.
1821 On 68000 and higher, that should also work for all HImode operands. */
1822 if (TUNE_CPU32 || TARGET_COLDFIRE || optimize_size)
1823 {
1824 if (count == 3 && DATA_REG_P (operands[1])
1825 && next_insn_tests_no_inequality (insn))
1826 {
1827 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N | CC_NO_OVERFLOW;
1828 return "move%.w %1,%%ccr";
1829 }
1830 if (count == 2 && DATA_REG_P (operands[1])
1831 && next_insn_tests_no_inequality (insn))
1832 {
1833 cc_status.flags = CC_NOT_NEGATIVE | CC_INVERTED | CC_NO_OVERFLOW;
1834 return "move%.w %1,%%ccr";
1835 }
1836 /* count == 1 followed by bvc/bvs and
1837 count == 0 followed by bcc/bcs are also possible, but need
1838 m68k-specific CC_Z_IN_NOT_V and CC_Z_IN_NOT_C flags. */
1839 }
79e68feb
RS
1840
1841 cc_status.flags = CC_NOT_NEGATIVE;
1842 }
1843 return "btst %0,%1";
1844}
79e68feb 1845\f
fc2241eb
RS
1846/* Return true if X is a legitimate base register. STRICT_P says
1847 whether we need strict checking. */
1848
1849bool
1850m68k_legitimate_base_reg_p (rtx x, bool strict_p)
1851{
1852 /* Allow SUBREG everywhere we allow REG. This results in better code. */
1853 if (!strict_p && GET_CODE (x) == SUBREG)
1854 x = SUBREG_REG (x);
1855
1856 return (REG_P (x)
1857 && (strict_p
1858 ? REGNO_OK_FOR_BASE_P (REGNO (x))
bf32249e 1859 : REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x))));
fc2241eb
RS
1860}
1861
1862/* Return true if X is a legitimate index register. STRICT_P says
1863 whether we need strict checking. */
1864
1865bool
1866m68k_legitimate_index_reg_p (rtx x, bool strict_p)
1867{
1868 if (!strict_p && GET_CODE (x) == SUBREG)
1869 x = SUBREG_REG (x);
1870
1871 return (REG_P (x)
1872 && (strict_p
1873 ? REGNO_OK_FOR_INDEX_P (REGNO (x))
bf32249e 1874 : REGNO_OK_FOR_INDEX_NONSTRICT_P (REGNO (x))));
fc2241eb
RS
1875}
1876
1877/* Return true if X is a legitimate index expression for a (d8,An,Xn) or
1878 (bd,An,Xn) addressing mode. Fill in the INDEX and SCALE fields of
1879 ADDRESS if so. STRICT_P says whether we need strict checking. */
1880
1881static bool
1882m68k_decompose_index (rtx x, bool strict_p, struct m68k_address *address)
1883{
1884 int scale;
1885
1886 /* Check for a scale factor. */
1887 scale = 1;
1888 if ((TARGET_68020 || TARGET_COLDFIRE)
1889 && GET_CODE (x) == MULT
1890 && GET_CODE (XEXP (x, 1)) == CONST_INT
1891 && (INTVAL (XEXP (x, 1)) == 2
1892 || INTVAL (XEXP (x, 1)) == 4
1893 || (INTVAL (XEXP (x, 1)) == 8
1894 && (TARGET_COLDFIRE_FPU || !TARGET_COLDFIRE))))
1895 {
1896 scale = INTVAL (XEXP (x, 1));
1897 x = XEXP (x, 0);
1898 }
1899
1900 /* Check for a word extension. */
1901 if (!TARGET_COLDFIRE
1902 && GET_CODE (x) == SIGN_EXTEND
1903 && GET_MODE (XEXP (x, 0)) == HImode)
1904 x = XEXP (x, 0);
1905
1906 if (m68k_legitimate_index_reg_p (x, strict_p))
1907 {
1908 address->scale = scale;
1909 address->index = x;
1910 return true;
1911 }
1912
1913 return false;
1914}
1915
7ffb5e78
RS
1916/* Return true if X is an illegitimate symbolic constant. */
1917
1918bool
1919m68k_illegitimate_symbolic_constant_p (rtx x)
1920{
1921 rtx base, offset;
1922
1923 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P)
1924 {
1925 split_const (x, &base, &offset);
1926 if (GET_CODE (base) == SYMBOL_REF
1927 && !offset_within_block_p (base, INTVAL (offset)))
1928 return true;
1929 }
75df395f 1930 return m68k_tls_reference_p (x, false);
7ffb5e78
RS
1931}
1932
fbbf66e7
RS
1933/* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1934
1935static bool
ef4bddc2 1936m68k_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
fbbf66e7
RS
1937{
1938 return m68k_illegitimate_symbolic_constant_p (x);
1939}
1940
fc2241eb
RS
1941/* Return true if X is a legitimate constant address that can reach
1942 bytes in the range [X, X + REACH). STRICT_P says whether we need
1943 strict checking. */
1944
1945static bool
1946m68k_legitimate_constant_address_p (rtx x, unsigned int reach, bool strict_p)
1947{
1948 rtx base, offset;
1949
1950 if (!CONSTANT_ADDRESS_P (x))
1951 return false;
1952
1953 if (flag_pic
1954 && !(strict_p && TARGET_PCREL)
1955 && symbolic_operand (x, VOIDmode))
1956 return false;
1957
1958 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P && reach > 1)
1959 {
1960 split_const (x, &base, &offset);
1961 if (GET_CODE (base) == SYMBOL_REF
1962 && !offset_within_block_p (base, INTVAL (offset) + reach - 1))
1963 return false;
1964 }
1965
75df395f 1966 return !m68k_tls_reference_p (x, false);
fc2241eb
RS
1967}
1968
1969/* Return true if X is a LABEL_REF for a jump table. Assume that unplaced
1970 labels will become jump tables. */
1971
1972static bool
1973m68k_jump_table_ref_p (rtx x)
1974{
1975 if (GET_CODE (x) != LABEL_REF)
1976 return false;
1977
b32d5189
DM
1978 rtx_insn *insn = as_a <rtx_insn *> (XEXP (x, 0));
1979 if (!NEXT_INSN (insn) && !PREV_INSN (insn))
fc2241eb
RS
1980 return true;
1981
b32d5189
DM
1982 insn = next_nonnote_insn (insn);
1983 return insn && JUMP_TABLE_DATA_P (insn);
fc2241eb
RS
1984}
1985
1986/* Return true if X is a legitimate address for values of mode MODE.
1987 STRICT_P says whether strict checking is needed. If the address
1988 is valid, describe its components in *ADDRESS. */
1989
1990static bool
ef4bddc2 1991m68k_decompose_address (machine_mode mode, rtx x,
fc2241eb
RS
1992 bool strict_p, struct m68k_address *address)
1993{
1994 unsigned int reach;
1995
1996 memset (address, 0, sizeof (*address));
1997
1998 if (mode == BLKmode)
1999 reach = 1;
2000 else
2001 reach = GET_MODE_SIZE (mode);
2002
2003 /* Check for (An) (mode 2). */
2004 if (m68k_legitimate_base_reg_p (x, strict_p))
2005 {
2006 address->base = x;
2007 return true;
2008 }
2009
2010 /* Check for -(An) and (An)+ (modes 3 and 4). */
2011 if ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_INC)
2012 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
2013 {
2014 address->code = GET_CODE (x);
2015 address->base = XEXP (x, 0);
2016 return true;
2017 }
2018
2019 /* Check for (d16,An) (mode 5). */
2020 if (GET_CODE (x) == PLUS
2021 && GET_CODE (XEXP (x, 1)) == CONST_INT
2022 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x8000, 0x8000 - reach)
2023 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
2024 {
2025 address->base = XEXP (x, 0);
2026 address->offset = XEXP (x, 1);
2027 return true;
2028 }
2029
2030 /* Check for GOT loads. These are (bd,An,Xn) addresses if
2031 TARGET_68020 && flag_pic == 2, otherwise they are (d16,An)
2032 addresses. */
75df395f
MK
2033 if (GET_CODE (x) == PLUS
2034 && XEXP (x, 0) == pic_offset_table_rtx)
fc2241eb 2035 {
75df395f
MK
2036 /* As we are processing a PLUS, do not unwrap RELOC32 symbols --
2037 they are invalid in this context. */
2038 if (m68k_unwrap_symbol (XEXP (x, 1), false) != XEXP (x, 1))
2039 {
2040 address->base = XEXP (x, 0);
2041 address->offset = XEXP (x, 1);
2042 return true;
2043 }
fc2241eb
RS
2044 }
2045
2046 /* The ColdFire FPU only accepts addressing modes 2-5. */
2047 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
2048 return false;
2049
2050 /* Check for (xxx).w and (xxx).l. Also, in the TARGET_PCREL case,
2051 check for (d16,PC) or (bd,PC,Xn) with a suppressed index register.
2052 All these modes are variations of mode 7. */
2053 if (m68k_legitimate_constant_address_p (x, reach, strict_p))
2054 {
2055 address->offset = x;
2056 return true;
2057 }
2058
2059 /* Check for (d8,PC,Xn), a mode 7 form. This case is needed for
2060 tablejumps.
2061
2062 ??? do_tablejump creates these addresses before placing the target
2063 label, so we have to assume that unplaced labels are jump table
2064 references. It seems unlikely that we would ever generate indexed
2065 accesses to unplaced labels in other cases. */
2066 if (GET_CODE (x) == PLUS
2067 && m68k_jump_table_ref_p (XEXP (x, 1))
2068 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2069 {
2070 address->offset = XEXP (x, 1);
2071 return true;
2072 }
2073
2074 /* Everything hereafter deals with (d8,An,Xn.SIZE*SCALE) or
2075 (bd,An,Xn.SIZE*SCALE) addresses. */
2076
2077 if (TARGET_68020)
2078 {
2079 /* Check for a nonzero base displacement. */
2080 if (GET_CODE (x) == PLUS
2081 && m68k_legitimate_constant_address_p (XEXP (x, 1), reach, strict_p))
2082 {
2083 address->offset = XEXP (x, 1);
2084 x = XEXP (x, 0);
2085 }
2086
2087 /* Check for a suppressed index register. */
2088 if (m68k_legitimate_base_reg_p (x, strict_p))
2089 {
2090 address->base = x;
2091 return true;
2092 }
2093
2094 /* Check for a suppressed base register. Do not allow this case
2095 for non-symbolic offsets as it effectively gives gcc freedom
2096 to treat data registers as base registers, which can generate
2097 worse code. */
2098 if (address->offset
2099 && symbolic_operand (address->offset, VOIDmode)
2100 && m68k_decompose_index (x, strict_p, address))
2101 return true;
2102 }
2103 else
2104 {
2105 /* Check for a nonzero base displacement. */
2106 if (GET_CODE (x) == PLUS
2107 && GET_CODE (XEXP (x, 1)) == CONST_INT
2108 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x80, 0x80 - reach))
2109 {
2110 address->offset = XEXP (x, 1);
2111 x = XEXP (x, 0);
2112 }
2113 }
2114
2115 /* We now expect the sum of a base and an index. */
2116 if (GET_CODE (x) == PLUS)
2117 {
2118 if (m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p)
2119 && m68k_decompose_index (XEXP (x, 1), strict_p, address))
2120 {
2121 address->base = XEXP (x, 0);
2122 return true;
2123 }
2124
2125 if (m68k_legitimate_base_reg_p (XEXP (x, 1), strict_p)
2126 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2127 {
2128 address->base = XEXP (x, 1);
2129 return true;
2130 }
2131 }
2132 return false;
2133}
2134
2135/* Return true if X is a legitimate address for values of mode MODE.
2136 STRICT_P says whether strict checking is needed. */
2137
2138bool
ef4bddc2 2139m68k_legitimate_address_p (machine_mode mode, rtx x, bool strict_p)
fc2241eb
RS
2140{
2141 struct m68k_address address;
2142
2143 return m68k_decompose_address (mode, x, strict_p, &address);
2144}
2145
2146/* Return true if X is a memory, describing its address in ADDRESS if so.
2147 Apply strict checking if called during or after reload. */
2148
2149static bool
2150m68k_legitimate_mem_p (rtx x, struct m68k_address *address)
2151{
2152 return (MEM_P (x)
2153 && m68k_decompose_address (GET_MODE (x), XEXP (x, 0),
2154 reload_in_progress || reload_completed,
2155 address));
2156}
2157
1a627b35
RS
2158/* Implement TARGET_LEGITIMATE_CONSTANT_P. */
2159
2160bool
ef4bddc2 2161m68k_legitimate_constant_p (machine_mode mode, rtx x)
1a627b35
RS
2162{
2163 return mode != XFmode && !m68k_illegitimate_symbolic_constant_p (x);
2164}
2165
fc2241eb
RS
2166/* Return true if X matches the 'Q' constraint. It must be a memory
2167 with a base address and no constant offset or index. */
2168
2169bool
2170m68k_matches_q_p (rtx x)
2171{
2172 struct m68k_address address;
2173
2174 return (m68k_legitimate_mem_p (x, &address)
2175 && address.code == UNKNOWN
2176 && address.base
2177 && !address.offset
2178 && !address.index);
2179}
2180
2181/* Return true if X matches the 'U' constraint. It must be a base address
2182 with a constant offset and no index. */
2183
2184bool
2185m68k_matches_u_p (rtx x)
2186{
2187 struct m68k_address address;
2188
2189 return (m68k_legitimate_mem_p (x, &address)
2190 && address.code == UNKNOWN
2191 && address.base
2192 && address.offset
2193 && !address.index);
2194}
2195
75df395f
MK
2196/* Return GOT pointer. */
2197
2198static rtx
2199m68k_get_gp (void)
2200{
2201 if (pic_offset_table_rtx == NULL_RTX)
2202 pic_offset_table_rtx = gen_rtx_REG (Pmode, PIC_REG);
2203
2204 crtl->uses_pic_offset_table = 1;
2205
2206 return pic_offset_table_rtx;
2207}
2208
2209/* M68K relocations, used to distinguish GOT and TLS relocations in UNSPEC
2210 wrappers. */
2211enum m68k_reloc { RELOC_GOT, RELOC_TLSGD, RELOC_TLSLDM, RELOC_TLSLDO,
2212 RELOC_TLSIE, RELOC_TLSLE };
2213
2214#define TLS_RELOC_P(RELOC) ((RELOC) != RELOC_GOT)
2215
2216/* Wrap symbol X into unspec representing relocation RELOC.
2217 BASE_REG - register that should be added to the result.
2218 TEMP_REG - if non-null, temporary register. */
2219
2220static rtx
2221m68k_wrap_symbol (rtx x, enum m68k_reloc reloc, rtx base_reg, rtx temp_reg)
2222{
2223 bool use_x_p;
2224
2225 use_x_p = (base_reg == pic_offset_table_rtx) ? TARGET_XGOT : TARGET_XTLS;
2226
2227 if (TARGET_COLDFIRE && use_x_p)
2228 /* When compiling with -mx{got, tls} switch the code will look like this:
2229
2230 move.l <X>@<RELOC>,<TEMP_REG>
2231 add.l <BASE_REG>,<TEMP_REG> */
2232 {
2233 /* Wrap X in UNSPEC_??? to tip m68k_output_addr_const_extra
2234 to put @RELOC after reference. */
2235 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2236 UNSPEC_RELOC32);
2237 x = gen_rtx_CONST (Pmode, x);
2238
2239 if (temp_reg == NULL)
2240 {
2241 gcc_assert (can_create_pseudo_p ());
2242 temp_reg = gen_reg_rtx (Pmode);
2243 }
2244
2245 emit_move_insn (temp_reg, x);
2246 emit_insn (gen_addsi3 (temp_reg, temp_reg, base_reg));
2247 x = temp_reg;
2248 }
2249 else
2250 {
2251 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2252 UNSPEC_RELOC16);
2253 x = gen_rtx_CONST (Pmode, x);
2254
2255 x = gen_rtx_PLUS (Pmode, base_reg, x);
2256 }
2257
2258 return x;
2259}
2260
2261/* Helper for m68k_unwrap_symbol.
2262 Also, if unwrapping was successful (that is if (ORIG != <return value>)),
2263 sets *RELOC_PTR to relocation type for the symbol. */
2264
2265static rtx
2266m68k_unwrap_symbol_1 (rtx orig, bool unwrap_reloc32_p,
2267 enum m68k_reloc *reloc_ptr)
2268{
2269 if (GET_CODE (orig) == CONST)
2270 {
2271 rtx x;
2272 enum m68k_reloc dummy;
2273
2274 x = XEXP (orig, 0);
2275
2276 if (reloc_ptr == NULL)
2277 reloc_ptr = &dummy;
2278
2279 /* Handle an addend. */
2280 if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS)
2281 && CONST_INT_P (XEXP (x, 1)))
2282 x = XEXP (x, 0);
2283
2284 if (GET_CODE (x) == UNSPEC)
2285 {
2286 switch (XINT (x, 1))
2287 {
2288 case UNSPEC_RELOC16:
2289 orig = XVECEXP (x, 0, 0);
2290 *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2291 break;
2292
2293 case UNSPEC_RELOC32:
2294 if (unwrap_reloc32_p)
2295 {
2296 orig = XVECEXP (x, 0, 0);
2297 *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2298 }
2299 break;
2300
2301 default:
2302 break;
2303 }
2304 }
2305 }
2306
2307 return orig;
2308}
2309
2310/* Unwrap symbol from UNSPEC_RELOC16 and, if unwrap_reloc32_p,
2311 UNSPEC_RELOC32 wrappers. */
2312
2313rtx
2314m68k_unwrap_symbol (rtx orig, bool unwrap_reloc32_p)
2315{
2316 return m68k_unwrap_symbol_1 (orig, unwrap_reloc32_p, NULL);
2317}
2318
75df395f
MK
2319/* Prescan insn before outputing assembler for it. */
2320
2321void
c85e862a 2322m68k_final_prescan_insn (rtx_insn *insn ATTRIBUTE_UNUSED,
75df395f
MK
2323 rtx *operands, int n_operands)
2324{
2325 int i;
2326
2327 /* Combine and, possibly, other optimizations may do good job
2328 converting
2329 (const (unspec [(symbol)]))
2330 into
2331 (const (plus (unspec [(symbol)])
2332 (const_int N))).
2333 The problem with this is emitting @TLS or @GOT decorations.
2334 The decoration is emitted when processing (unspec), so the
2335 result would be "#symbol@TLSLE+N" instead of "#symbol+N@TLSLE".
2336
2337 It seems that the easiest solution to this is to convert such
2338 operands to
2339 (const (unspec [(plus (symbol)
2340 (const_int N))])).
2341 Note, that the top level of operand remains intact, so we don't have
2342 to patch up anything outside of the operand. */
2343
82eee4f1 2344 subrtx_var_iterator::array_type array;
75df395f
MK
2345 for (i = 0; i < n_operands; ++i)
2346 {
2347 rtx op;
2348
2349 op = operands[i];
2350
82eee4f1
RS
2351 FOR_EACH_SUBRTX_VAR (iter, array, op, ALL)
2352 {
2353 rtx x = *iter;
2354 if (m68k_unwrap_symbol (x, true) != x)
2355 {
2356 rtx plus;
2357
2358 gcc_assert (GET_CODE (x) == CONST);
2359 plus = XEXP (x, 0);
2360
2361 if (GET_CODE (plus) == PLUS || GET_CODE (plus) == MINUS)
2362 {
2363 rtx unspec;
2364 rtx addend;
2365
2366 unspec = XEXP (plus, 0);
2367 gcc_assert (GET_CODE (unspec) == UNSPEC);
2368 addend = XEXP (plus, 1);
2369 gcc_assert (CONST_INT_P (addend));
2370
2371 /* We now have all the pieces, rearrange them. */
2372
2373 /* Move symbol to plus. */
2374 XEXP (plus, 0) = XVECEXP (unspec, 0, 0);
2375
2376 /* Move plus inside unspec. */
2377 XVECEXP (unspec, 0, 0) = plus;
2378
2379 /* Move unspec to top level of const. */
2380 XEXP (x, 0) = unspec;
2381 }
2382 iter.skip_subrtxes ();
2383 }
2384 }
75df395f
MK
2385 }
2386}
2387
2388/* Move X to a register and add REG_EQUAL note pointing to ORIG.
2389 If REG is non-null, use it; generate new pseudo otherwise. */
2390
2391static rtx
2392m68k_move_to_reg (rtx x, rtx orig, rtx reg)
2393{
c85e862a 2394 rtx_insn *insn;
75df395f
MK
2395
2396 if (reg == NULL_RTX)
2397 {
2398 gcc_assert (can_create_pseudo_p ());
2399 reg = gen_reg_rtx (Pmode);
2400 }
2401
2402 insn = emit_move_insn (reg, x);
2403 /* Put a REG_EQUAL note on this insn, so that it can be optimized
2404 by loop. */
2405 set_unique_reg_note (insn, REG_EQUAL, orig);
2406
2407 return reg;
2408}
2409
2410/* Does the same as m68k_wrap_symbol, but returns a memory reference to
2411 GOT slot. */
2412
2413static rtx
2414m68k_wrap_symbol_into_got_ref (rtx x, enum m68k_reloc reloc, rtx temp_reg)
2415{
2416 x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), temp_reg);
2417
2418 x = gen_rtx_MEM (Pmode, x);
2419 MEM_READONLY_P (x) = 1;
2420
2421 return x;
2422}
2423
79e68feb
RS
2424/* Legitimize PIC addresses. If the address is already
2425 position-independent, we return ORIG. Newly generated
2426 position-independent addresses go to REG. If we need more
2427 than one register, we lose.
2428
2429 An address is legitimized by making an indirect reference
2430 through the Global Offset Table with the name of the symbol
2431 used as an offset.
2432
2433 The assembler and linker are responsible for placing the
2434 address of the symbol in the GOT. The function prologue
2435 is responsible for initializing a5 to the starting address
2436 of the GOT.
2437
2438 The assembler is also responsible for translating a symbol name
2439 into a constant displacement from the start of the GOT.
2440
2441 A quick example may make things a little clearer:
2442
2443 When not generating PIC code to store the value 12345 into _foo
2444 we would generate the following code:
2445
2446 movel #12345, _foo
2447
2448 When generating PIC two transformations are made. First, the compiler
2449 loads the address of foo into a register. So the first transformation makes:
2450
2451 lea _foo, a0
2452 movel #12345, a0@
2453
2454 The code in movsi will intercept the lea instruction and call this
2455 routine which will transform the instructions into:
2456
2457 movel a5@(_foo:w), a0
2458 movel #12345, a0@
2459
2460
2461 That (in a nutshell) is how *all* symbol and label references are
2462 handled. */
2463
2464rtx
ef4bddc2 2465legitimize_pic_address (rtx orig, machine_mode mode ATTRIBUTE_UNUSED,
8a4a2253 2466 rtx reg)
79e68feb
RS
2467{
2468 rtx pic_ref = orig;
2469
2470 /* First handle a simple SYMBOL_REF or LABEL_REF */
2471 if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
2472 {
4761e388 2473 gcc_assert (reg);
79e68feb 2474
75df395f
MK
2475 pic_ref = m68k_wrap_symbol_into_got_ref (orig, RELOC_GOT, reg);
2476 pic_ref = m68k_move_to_reg (pic_ref, orig, reg);
79e68feb
RS
2477 }
2478 else if (GET_CODE (orig) == CONST)
2479 {
1d8eaa6b 2480 rtx base;
79e68feb 2481
b2e08ed4 2482 /* Make sure this has not already been legitimized. */
75df395f 2483 if (m68k_unwrap_symbol (orig, true) != orig)
79e68feb
RS
2484 return orig;
2485
4761e388 2486 gcc_assert (reg);
79e68feb
RS
2487
2488 /* legitimize both operands of the PLUS */
4761e388
NS
2489 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
2490
2491 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2492 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2493 base == reg ? 0 : reg);
79e68feb
RS
2494
2495 if (GET_CODE (orig) == CONST_INT)
0a81f074 2496 pic_ref = plus_constant (Pmode, base, INTVAL (orig));
75df395f
MK
2497 else
2498 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
79e68feb 2499 }
75df395f 2500
79e68feb
RS
2501 return pic_ref;
2502}
2503
75df395f
MK
2504/* The __tls_get_addr symbol. */
2505static GTY(()) rtx m68k_tls_get_addr;
2506
2507/* Return SYMBOL_REF for __tls_get_addr. */
2508
2509static rtx
2510m68k_get_tls_get_addr (void)
2511{
2512 if (m68k_tls_get_addr == NULL_RTX)
2513 m68k_tls_get_addr = init_one_libfunc ("__tls_get_addr");
2514
2515 return m68k_tls_get_addr;
2516}
2517
2518/* Return libcall result in A0 instead of usual D0. */
2519static bool m68k_libcall_value_in_a0_p = false;
2520
2521/* Emit instruction sequence that calls __tls_get_addr. X is
2522 the TLS symbol we are referencing and RELOC is the symbol type to use
2523 (either TLSGD or TLSLDM). EQV is the REG_EQUAL note for the sequence
2524 emitted. A pseudo register with result of __tls_get_addr call is
2525 returned. */
2526
2527static rtx
2528m68k_call_tls_get_addr (rtx x, rtx eqv, enum m68k_reloc reloc)
2529{
2530 rtx a0;
c85e862a 2531 rtx_insn *insns;
75df395f
MK
2532 rtx dest;
2533
2534 /* Emit the call sequence. */
2535 start_sequence ();
2536
2537 /* FIXME: Unfortunately, emit_library_call_value does not
2538 consider (plus (%a5) (const (unspec))) to be a good enough
2539 operand for push, so it forces it into a register. The bad
2540 thing about this is that combiner, due to copy propagation and other
2541 optimizations, sometimes can not later fix this. As a consequence,
2542 additional register may be allocated resulting in a spill.
2543 For reference, see args processing loops in
2544 calls.c:emit_library_call_value_1.
2545 For testcase, see gcc.target/m68k/tls-{gd, ld}.c */
2546 x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), NULL_RTX);
2547
2548 /* __tls_get_addr() is not a libcall, but emitting a libcall_value
2549 is the simpliest way of generating a call. The difference between
2550 __tls_get_addr() and libcall is that the result is returned in D0
2551 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2552 which temporarily switches returning the result to A0. */
2553
2554 m68k_libcall_value_in_a0_p = true;
2555 a0 = emit_library_call_value (m68k_get_tls_get_addr (), NULL_RTX, LCT_PURE,
db69559b 2556 Pmode, x, Pmode);
75df395f
MK
2557 m68k_libcall_value_in_a0_p = false;
2558
2559 insns = get_insns ();
2560 end_sequence ();
2561
2562 gcc_assert (can_create_pseudo_p ());
2563 dest = gen_reg_rtx (Pmode);
2564 emit_libcall_block (insns, dest, a0, eqv);
2565
2566 return dest;
2567}
2568
2569/* The __tls_get_addr symbol. */
2570static GTY(()) rtx m68k_read_tp;
2571
2572/* Return SYMBOL_REF for __m68k_read_tp. */
2573
2574static rtx
2575m68k_get_m68k_read_tp (void)
2576{
2577 if (m68k_read_tp == NULL_RTX)
2578 m68k_read_tp = init_one_libfunc ("__m68k_read_tp");
2579
2580 return m68k_read_tp;
2581}
2582
2583/* Emit instruction sequence that calls __m68k_read_tp.
2584 A pseudo register with result of __m68k_read_tp call is returned. */
2585
2586static rtx
2587m68k_call_m68k_read_tp (void)
2588{
2589 rtx a0;
2590 rtx eqv;
c85e862a 2591 rtx_insn *insns;
75df395f
MK
2592 rtx dest;
2593
2594 start_sequence ();
2595
2596 /* __m68k_read_tp() is not a libcall, but emitting a libcall_value
2597 is the simpliest way of generating a call. The difference between
2598 __m68k_read_tp() and libcall is that the result is returned in D0
2599 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2600 which temporarily switches returning the result to A0. */
2601
2602 /* Emit the call sequence. */
2603 m68k_libcall_value_in_a0_p = true;
2604 a0 = emit_library_call_value (m68k_get_m68k_read_tp (), NULL_RTX, LCT_PURE,
db69559b 2605 Pmode);
75df395f
MK
2606 m68k_libcall_value_in_a0_p = false;
2607 insns = get_insns ();
2608 end_sequence ();
2609
2610 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2611 share the m68k_read_tp result with other IE/LE model accesses. */
2612 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx), UNSPEC_RELOC32);
2613
2614 gcc_assert (can_create_pseudo_p ());
2615 dest = gen_reg_rtx (Pmode);
2616 emit_libcall_block (insns, dest, a0, eqv);
2617
2618 return dest;
2619}
2620
2621/* Return a legitimized address for accessing TLS SYMBOL_REF X.
2622 For explanations on instructions sequences see TLS/NPTL ABI for m68k and
2623 ColdFire. */
2624
2625rtx
2626m68k_legitimize_tls_address (rtx orig)
2627{
2628 switch (SYMBOL_REF_TLS_MODEL (orig))
2629 {
2630 case TLS_MODEL_GLOBAL_DYNAMIC:
2631 orig = m68k_call_tls_get_addr (orig, orig, RELOC_TLSGD);
2632 break;
2633
2634 case TLS_MODEL_LOCAL_DYNAMIC:
2635 {
2636 rtx eqv;
2637 rtx a0;
2638 rtx x;
2639
2640 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2641 share the LDM result with other LD model accesses. */
2642 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2643 UNSPEC_RELOC32);
2644
2645 a0 = m68k_call_tls_get_addr (orig, eqv, RELOC_TLSLDM);
2646
2647 x = m68k_wrap_symbol (orig, RELOC_TLSLDO, a0, NULL_RTX);
2648
2649 if (can_create_pseudo_p ())
2650 x = m68k_move_to_reg (x, orig, NULL_RTX);
2651
2652 orig = x;
2653 break;
2654 }
2655
2656 case TLS_MODEL_INITIAL_EXEC:
2657 {
2658 rtx a0;
2659 rtx x;
2660
2661 a0 = m68k_call_m68k_read_tp ();
2662
2663 x = m68k_wrap_symbol_into_got_ref (orig, RELOC_TLSIE, NULL_RTX);
2664 x = gen_rtx_PLUS (Pmode, x, a0);
2665
2666 if (can_create_pseudo_p ())
2667 x = m68k_move_to_reg (x, orig, NULL_RTX);
2668
2669 orig = x;
2670 break;
2671 }
2672
2673 case TLS_MODEL_LOCAL_EXEC:
2674 {
2675 rtx a0;
2676 rtx x;
2677
2678 a0 = m68k_call_m68k_read_tp ();
2679
2680 x = m68k_wrap_symbol (orig, RELOC_TLSLE, a0, NULL_RTX);
2681
2682 if (can_create_pseudo_p ())
2683 x = m68k_move_to_reg (x, orig, NULL_RTX);
2684
2685 orig = x;
2686 break;
2687 }
2688
2689 default:
2690 gcc_unreachable ();
2691 }
2692
2693 return orig;
2694}
2695
2696/* Return true if X is a TLS symbol. */
2697
2698static bool
2699m68k_tls_symbol_p (rtx x)
2700{
2701 if (!TARGET_HAVE_TLS)
2702 return false;
2703
2704 if (GET_CODE (x) != SYMBOL_REF)
2705 return false;
2706
2707 return SYMBOL_REF_TLS_MODEL (x) != 0;
2708}
2709
75df395f
MK
2710/* If !LEGITIMATE_P, return true if X is a TLS symbol reference,
2711 though illegitimate one.
2712 If LEGITIMATE_P, return true if X is a legitimate TLS symbol reference. */
2713
2714bool
2715m68k_tls_reference_p (rtx x, bool legitimate_p)
2716{
2717 if (!TARGET_HAVE_TLS)
2718 return false;
2719
2720 if (!legitimate_p)
a5784152
RS
2721 {
2722 subrtx_var_iterator::array_type array;
2723 FOR_EACH_SUBRTX_VAR (iter, array, x, ALL)
2724 {
2725 rtx x = *iter;
2726
2727 /* Note: this is not the same as m68k_tls_symbol_p. */
2728 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0)
2729 return true;
2730
2731 /* Don't recurse into legitimate TLS references. */
2732 if (m68k_tls_reference_p (x, true))
2733 iter.skip_subrtxes ();
2734 }
2735 return false;
2736 }
75df395f
MK
2737 else
2738 {
2739 enum m68k_reloc reloc = RELOC_GOT;
2740
2741 return (m68k_unwrap_symbol_1 (x, true, &reloc) != x
2742 && TLS_RELOC_P (reloc));
2743 }
2744}
2745
79e68feb 2746\f
0ce6f9fb 2747
a0a7fbc9 2748#define USE_MOVQ(i) ((unsigned) ((i) + 128) <= 255)
0ce6f9fb 2749
bda2a571
RS
2750/* Return the type of move that should be used for integer I. */
2751
c47b0cb4
MK
2752M68K_CONST_METHOD
2753m68k_const_method (HOST_WIDE_INT i)
0ce6f9fb 2754{
0ce6f9fb
RK
2755 unsigned u;
2756
6910dd70 2757 if (USE_MOVQ (i))
0ce6f9fb 2758 return MOVQ;
24092242 2759
c16eadc7 2760 /* The ColdFire doesn't have byte or word operations. */
97c55091 2761 /* FIXME: This may not be useful for the m68060 either. */
85dbf7e2 2762 if (!TARGET_COLDFIRE)
24092242
RK
2763 {
2764 /* if -256 < N < 256 but N is not in range for a moveq
7a1929e1 2765 N^ff will be, so use moveq #N^ff, dreg; not.b dreg. */
24092242
RK
2766 if (USE_MOVQ (i ^ 0xff))
2767 return NOTB;
2768 /* Likewise, try with not.w */
2769 if (USE_MOVQ (i ^ 0xffff))
2770 return NOTW;
2771 /* This is the only value where neg.w is useful */
2772 if (i == -65408)
2773 return NEGW;
24092242 2774 }
28bad6d1 2775
5e04daf3
PB
2776 /* Try also with swap. */
2777 u = i;
2778 if (USE_MOVQ ((u >> 16) | (u << 16)))
2779 return SWAP;
2780
986e74d5 2781 if (TARGET_ISAB)
28bad6d1 2782 {
72edf146 2783 /* Try using MVZ/MVS with an immediate value to load constants. */
28bad6d1
PB
2784 if (i >= 0 && i <= 65535)
2785 return MVZ;
2786 if (i >= -32768 && i <= 32767)
2787 return MVS;
2788 }
2789
0ce6f9fb
RK
2790 /* Otherwise, use move.l */
2791 return MOVL;
2792}
2793
bda2a571
RS
2794/* Return the cost of moving constant I into a data register. */
2795
3c50106f 2796static int
bda2a571 2797const_int_cost (HOST_WIDE_INT i)
0ce6f9fb 2798{
c47b0cb4 2799 switch (m68k_const_method (i))
0ce6f9fb 2800 {
a0a7fbc9
AS
2801 case MOVQ:
2802 /* Constants between -128 and 127 are cheap due to moveq. */
2803 return 0;
2804 case MVZ:
2805 case MVS:
2806 case NOTB:
2807 case NOTW:
2808 case NEGW:
2809 case SWAP:
2810 /* Constants easily generated by moveq + not.b/not.w/neg.w/swap. */
2811 return 1;
2812 case MOVL:
2813 return 2;
2814 default:
2815 gcc_unreachable ();
0ce6f9fb
RK
2816 }
2817}
2818
3c50106f 2819static bool
e548c9df
AM
2820m68k_rtx_costs (rtx x, machine_mode mode, int outer_code,
2821 int opno ATTRIBUTE_UNUSED,
68f932c4 2822 int *total, bool speed ATTRIBUTE_UNUSED)
3c50106f 2823{
e548c9df
AM
2824 int code = GET_CODE (x);
2825
3c50106f
RH
2826 switch (code)
2827 {
2828 case CONST_INT:
2829 /* Constant zero is super cheap due to clr instruction. */
2830 if (x == const0_rtx)
2831 *total = 0;
2832 else
bda2a571 2833 *total = const_int_cost (INTVAL (x));
3c50106f
RH
2834 return true;
2835
2836 case CONST:
2837 case LABEL_REF:
2838 case SYMBOL_REF:
2839 *total = 3;
2840 return true;
2841
2842 case CONST_DOUBLE:
2843 /* Make 0.0 cheaper than other floating constants to
2844 encourage creating tstsf and tstdf insns. */
2845 if (outer_code == COMPARE
2846 && (x == CONST0_RTX (SFmode) || x == CONST0_RTX (DFmode)))
2847 *total = 4;
2848 else
2849 *total = 5;
2850 return true;
2851
2852 /* These are vaguely right for a 68020. */
2853 /* The costs for long multiply have been adjusted to work properly
2854 in synth_mult on the 68020, relative to an average of the time
2855 for add and the time for shift, taking away a little more because
2856 sometimes move insns are needed. */
a0a7fbc9
AS
2857 /* div?.w is relatively cheaper on 68000 counted in COSTS_N_INSNS
2858 terms. */
fe95f2f7
JB
2859#define MULL_COST \
2860 (TUNE_68060 ? 2 \
2861 : TUNE_68040 ? 5 \
03b3e271
KH
2862 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2863 : (TUNE_CFV2 && TUNE_MAC) ? 4 \
2864 : TUNE_CFV2 ? 8 \
fe95f2f7
JB
2865 : TARGET_COLDFIRE ? 3 : 13)
2866
2867#define MULW_COST \
2868 (TUNE_68060 ? 2 \
2869 : TUNE_68040 ? 3 \
03b3e271
KH
2870 : TUNE_68000_10 ? 5 \
2871 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2872 : (TUNE_CFV2 && TUNE_MAC) ? 2 \
2873 : TUNE_CFV2 ? 8 \
fe95f2f7
JB
2874 : TARGET_COLDFIRE ? 2 : 8)
2875
2876#define DIVW_COST \
2877 (TARGET_CF_HWDIV ? 11 \
2878 : TUNE_68000_10 || TARGET_COLDFIRE ? 12 : 27)
3c50106f
RH
2879
2880 case PLUS:
2881 /* An lea costs about three times as much as a simple add. */
e548c9df 2882 if (mode == SImode
3c50106f
RH
2883 && GET_CODE (XEXP (x, 1)) == REG
2884 && GET_CODE (XEXP (x, 0)) == MULT
2885 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
2886 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2887 && (INTVAL (XEXP (XEXP (x, 0), 1)) == 2
2888 || INTVAL (XEXP (XEXP (x, 0), 1)) == 4
2889 || INTVAL (XEXP (XEXP (x, 0), 1)) == 8))
eb849993
BI
2890 {
2891 /* lea an@(dx:l:i),am */
2892 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 2 : 3);
2893 return true;
2894 }
3c50106f
RH
2895 return false;
2896
2897 case ASHIFT:
2898 case ASHIFTRT:
2899 case LSHIFTRT:
fe95f2f7 2900 if (TUNE_68060)
3c50106f
RH
2901 {
2902 *total = COSTS_N_INSNS(1);
2903 return true;
2904 }
fe95f2f7 2905 if (TUNE_68000_10)
3c50106f
RH
2906 {
2907 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
2908 {
2909 if (INTVAL (XEXP (x, 1)) < 16)
2910 *total = COSTS_N_INSNS (2) + INTVAL (XEXP (x, 1)) / 2;
2911 else
2912 /* We're using clrw + swap for these cases. */
2913 *total = COSTS_N_INSNS (4) + (INTVAL (XEXP (x, 1)) - 16) / 2;
2914 }
2915 else
a0a7fbc9 2916 *total = COSTS_N_INSNS (10); /* Worst case. */
3c50106f
RH
2917 return true;
2918 }
2919 /* A shift by a big integer takes an extra instruction. */
2920 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2921 && (INTVAL (XEXP (x, 1)) == 16))
2922 {
2923 *total = COSTS_N_INSNS (2); /* clrw;swap */
2924 return true;
2925 }
2926 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2927 && !(INTVAL (XEXP (x, 1)) > 0
2928 && INTVAL (XEXP (x, 1)) <= 8))
2929 {
eb849993 2930 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 1 : 3); /* lsr #i,dn */
3c50106f
RH
2931 return true;
2932 }
2933 return false;
2934
2935 case MULT:
2936 if ((GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
2937 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
e548c9df 2938 && mode == SImode)
3c50106f 2939 *total = COSTS_N_INSNS (MULW_COST);
e548c9df 2940 else if (mode == QImode || mode == HImode)
3c50106f
RH
2941 *total = COSTS_N_INSNS (MULW_COST);
2942 else
2943 *total = COSTS_N_INSNS (MULL_COST);
2944 return true;
2945
2946 case DIV:
2947 case UDIV:
2948 case MOD:
2949 case UMOD:
e548c9df 2950 if (mode == QImode || mode == HImode)
3c50106f 2951 *total = COSTS_N_INSNS (DIVW_COST); /* div.w */
eb849993
BI
2952 else if (TARGET_CF_HWDIV)
2953 *total = COSTS_N_INSNS (18);
3c50106f
RH
2954 else
2955 *total = COSTS_N_INSNS (43); /* div.l */
2956 return true;
2957
f90b7a5a
PB
2958 case ZERO_EXTRACT:
2959 if (outer_code == COMPARE)
2960 *total = 0;
2961 return false;
2962
3c50106f
RH
2963 default:
2964 return false;
2965 }
2966}
2967
88512ba0 2968/* Return an instruction to move CONST_INT OPERANDS[1] into data register
bda2a571
RS
2969 OPERANDS[0]. */
2970
2971static const char *
8a4a2253 2972output_move_const_into_data_reg (rtx *operands)
0ce6f9fb 2973{
bda2a571 2974 HOST_WIDE_INT i;
0ce6f9fb
RK
2975
2976 i = INTVAL (operands[1]);
c47b0cb4 2977 switch (m68k_const_method (i))
0ce6f9fb 2978 {
28bad6d1 2979 case MVZ:
28bad6d1 2980 return "mvzw %1,%0";
1cbae84f
PB
2981 case MVS:
2982 return "mvsw %1,%0";
a0a7fbc9 2983 case MOVQ:
0ce6f9fb 2984 return "moveq %1,%0";
a0a7fbc9 2985 case NOTB:
66e07510 2986 CC_STATUS_INIT;
1d8eaa6b 2987 operands[1] = GEN_INT (i ^ 0xff);
0ce6f9fb 2988 return "moveq %1,%0\n\tnot%.b %0";
a0a7fbc9 2989 case NOTW:
66e07510 2990 CC_STATUS_INIT;
1d8eaa6b 2991 operands[1] = GEN_INT (i ^ 0xffff);
0ce6f9fb 2992 return "moveq %1,%0\n\tnot%.w %0";
a0a7fbc9 2993 case NEGW:
66e07510 2994 CC_STATUS_INIT;
3b4b85c9 2995 return "moveq #-128,%0\n\tneg%.w %0";
a0a7fbc9 2996 case SWAP:
0ce6f9fb
RK
2997 {
2998 unsigned u = i;
2999
1d8eaa6b 3000 operands[1] = GEN_INT ((u << 16) | (u >> 16));
0ce6f9fb 3001 return "moveq %1,%0\n\tswap %0";
0ce6f9fb 3002 }
a0a7fbc9 3003 case MOVL:
bda2a571 3004 return "move%.l %1,%0";
a0a7fbc9 3005 default:
bda2a571 3006 gcc_unreachable ();
0ce6f9fb
RK
3007 }
3008}
3009
bda2a571 3010/* Return true if I can be handled by ISA B's mov3q instruction. */
5e04daf3 3011
bda2a571
RS
3012bool
3013valid_mov3q_const (HOST_WIDE_INT i)
3014{
3015 return TARGET_ISAB && (i == -1 || IN_RANGE (i, 1, 7));
5e04daf3
PB
3016}
3017
bda2a571
RS
3018/* Return an instruction to move CONST_INT OPERANDS[1] into OPERANDS[0].
3019 I is the value of OPERANDS[1]. */
5e04daf3 3020
bda2a571 3021static const char *
8a4a2253 3022output_move_simode_const (rtx *operands)
02ed0c07 3023{
bda2a571
RS
3024 rtx dest;
3025 HOST_WIDE_INT src;
3026
3027 dest = operands[0];
3028 src = INTVAL (operands[1]);
3029 if (src == 0
3030 && (DATA_REG_P (dest) || MEM_P (dest))
3197c489
RS
3031 /* clr insns on 68000 read before writing. */
3032 && ((TARGET_68010 || TARGET_COLDFIRE)
bda2a571 3033 || !(MEM_P (dest) && MEM_VOLATILE_P (dest))))
02ed0c07 3034 return "clr%.l %0";
bda2a571 3035 else if (GET_MODE (dest) == SImode && valid_mov3q_const (src))
a0a7fbc9 3036 return "mov3q%.l %1,%0";
bda2a571 3037 else if (src == 0 && ADDRESS_REG_P (dest))
38198304 3038 return "sub%.l %0,%0";
bda2a571 3039 else if (DATA_REG_P (dest))
02ed0c07 3040 return output_move_const_into_data_reg (operands);
bda2a571 3041 else if (ADDRESS_REG_P (dest) && IN_RANGE (src, -0x8000, 0x7fff))
5e04daf3 3042 {
bda2a571 3043 if (valid_mov3q_const (src))
5e04daf3
PB
3044 return "mov3q%.l %1,%0";
3045 return "move%.w %1,%0";
3046 }
bda2a571
RS
3047 else if (MEM_P (dest)
3048 && GET_CODE (XEXP (dest, 0)) == PRE_DEC
3049 && REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
3050 && IN_RANGE (src, -0x8000, 0x7fff))
5e04daf3 3051 {
bda2a571 3052 if (valid_mov3q_const (src))
5e04daf3
PB
3053 return "mov3q%.l %1,%-";
3054 return "pea %a1";
3055 }
02ed0c07
RK
3056 return "move%.l %1,%0";
3057}
3058
5505f548 3059const char *
8a4a2253 3060output_move_simode (rtx *operands)
f4e80198
RK
3061{
3062 if (GET_CODE (operands[1]) == CONST_INT)
3063 return output_move_simode_const (operands);
3064 else if ((GET_CODE (operands[1]) == SYMBOL_REF
3065 || GET_CODE (operands[1]) == CONST)
3066 && push_operand (operands[0], SImode))
3067 return "pea %a1";
3068 else if ((GET_CODE (operands[1]) == SYMBOL_REF
3069 || GET_CODE (operands[1]) == CONST)
3070 && ADDRESS_REG_P (operands[0]))
3071 return "lea %a1,%0";
3072 return "move%.l %1,%0";
3073}
3074
5505f548 3075const char *
8a4a2253 3076output_move_himode (rtx *operands)
f4e80198
RK
3077{
3078 if (GET_CODE (operands[1]) == CONST_INT)
3079 {
3080 if (operands[1] == const0_rtx
3081 && (DATA_REG_P (operands[0])
3082 || GET_CODE (operands[0]) == MEM)
3197c489
RS
3083 /* clr insns on 68000 read before writing. */
3084 && ((TARGET_68010 || TARGET_COLDFIRE)
f4e80198
RK
3085 || !(GET_CODE (operands[0]) == MEM
3086 && MEM_VOLATILE_P (operands[0]))))
3087 return "clr%.w %0";
38198304
AS
3088 else if (operands[1] == const0_rtx
3089 && ADDRESS_REG_P (operands[0]))
3090 return "sub%.l %0,%0";
f4e80198
RK
3091 else if (DATA_REG_P (operands[0])
3092 && INTVAL (operands[1]) < 128
3093 && INTVAL (operands[1]) >= -128)
a0a7fbc9 3094 return "moveq %1,%0";
f4e80198
RK
3095 else if (INTVAL (operands[1]) < 0x8000
3096 && INTVAL (operands[1]) >= -0x8000)
3097 return "move%.w %1,%0";
3098 }
3099 else if (CONSTANT_P (operands[1]))
3100 return "move%.l %1,%0";
f4e80198
RK
3101 return "move%.w %1,%0";
3102}
3103
5505f548 3104const char *
8a4a2253 3105output_move_qimode (rtx *operands)
f4e80198 3106{
102701ff 3107 /* 68k family always modifies the stack pointer by at least 2, even for
c16eadc7 3108 byte pushes. The 5200 (ColdFire) does not do this. */
4761e388 3109
a0a7fbc9 3110 /* This case is generated by pushqi1 pattern now. */
4761e388
NS
3111 gcc_assert (!(GET_CODE (operands[0]) == MEM
3112 && GET_CODE (XEXP (operands[0], 0)) == PRE_DEC
3113 && XEXP (XEXP (operands[0], 0), 0) == stack_pointer_rtx
3114 && ! ADDRESS_REG_P (operands[1])
3115 && ! TARGET_COLDFIRE));
f4e80198 3116
3197c489 3117 /* clr and st insns on 68000 read before writing. */
f4e80198 3118 if (!ADDRESS_REG_P (operands[0])
3197c489 3119 && ((TARGET_68010 || TARGET_COLDFIRE)
f4e80198
RK
3120 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3121 {
3122 if (operands[1] == const0_rtx)
3123 return "clr%.b %0";
9425fb04 3124 if ((!TARGET_COLDFIRE || DATA_REG_P (operands[0]))
f4e80198
RK
3125 && GET_CODE (operands[1]) == CONST_INT
3126 && (INTVAL (operands[1]) & 255) == 255)
3127 {
3128 CC_STATUS_INIT;
3129 return "st %0";
3130 }
3131 }
3132 if (GET_CODE (operands[1]) == CONST_INT
3133 && DATA_REG_P (operands[0])
3134 && INTVAL (operands[1]) < 128
3135 && INTVAL (operands[1]) >= -128)
a0a7fbc9 3136 return "moveq %1,%0";
38198304
AS
3137 if (operands[1] == const0_rtx && ADDRESS_REG_P (operands[0]))
3138 return "sub%.l %0,%0";
f4e80198
RK
3139 if (GET_CODE (operands[1]) != CONST_INT && CONSTANT_P (operands[1]))
3140 return "move%.l %1,%0";
c16eadc7 3141 /* 68k family (including the 5200 ColdFire) does not support byte moves to
37834fc8
JL
3142 from address registers. */
3143 if (ADDRESS_REG_P (operands[0]) || ADDRESS_REG_P (operands[1]))
f4e80198
RK
3144 return "move%.w %1,%0";
3145 return "move%.b %1,%0";
3146}
3147
5505f548 3148const char *
8a4a2253 3149output_move_stricthi (rtx *operands)
9b55bf04
RK
3150{
3151 if (operands[1] == const0_rtx
3197c489
RS
3152 /* clr insns on 68000 read before writing. */
3153 && ((TARGET_68010 || TARGET_COLDFIRE)
9b55bf04
RK
3154 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3155 return "clr%.w %0";
3156 return "move%.w %1,%0";
3157}
3158
5505f548 3159const char *
8a4a2253 3160output_move_strictqi (rtx *operands)
9b55bf04
RK
3161{
3162 if (operands[1] == const0_rtx
3197c489
RS
3163 /* clr insns on 68000 read before writing. */
3164 && ((TARGET_68010 || TARGET_COLDFIRE)
9b55bf04
RK
3165 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3166 return "clr%.b %0";
3167 return "move%.b %1,%0";
3168}
3169
79e68feb
RS
3170/* Return the best assembler insn template
3171 for moving operands[1] into operands[0] as a fullword. */
3172
5505f548 3173static const char *
8a4a2253 3174singlemove_string (rtx *operands)
79e68feb 3175{
02ed0c07
RK
3176 if (GET_CODE (operands[1]) == CONST_INT)
3177 return output_move_simode_const (operands);
3178 return "move%.l %1,%0";
79e68feb
RS
3179}
3180
2505bc97 3181
c47b0cb4
MK
3182/* Output assembler or rtl code to perform a doubleword move insn
3183 with operands OPERANDS.
3184 Pointers to 3 helper functions should be specified:
3185 HANDLE_REG_ADJUST to adjust a register by a small value,
3186 HANDLE_COMPADR to compute an address and
3187 HANDLE_MOVSI to move 4 bytes. */
79e68feb 3188
c47b0cb4
MK
3189static void
3190handle_move_double (rtx operands[2],
3191 void (*handle_reg_adjust) (rtx, int),
3192 void (*handle_compadr) (rtx [2]),
3193 void (*handle_movsi) (rtx [2]))
79e68feb 3194{
2505bc97
RS
3195 enum
3196 {
3197 REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP
3198 } optype0, optype1;
79e68feb 3199 rtx latehalf[2];
2505bc97 3200 rtx middlehalf[2];
7f98eeb6 3201 rtx xops[2];
79e68feb 3202 rtx addreg0 = 0, addreg1 = 0;
7f98eeb6 3203 int dest_overlapped_low = 0;
184916bc 3204 int size = GET_MODE_SIZE (GET_MODE (operands[0]));
2505bc97
RS
3205
3206 middlehalf[0] = 0;
3207 middlehalf[1] = 0;
79e68feb
RS
3208
3209 /* First classify both operands. */
3210
3211 if (REG_P (operands[0]))
3212 optype0 = REGOP;
3213 else if (offsettable_memref_p (operands[0]))
3214 optype0 = OFFSOP;
3215 else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
3216 optype0 = POPOP;
3217 else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
3218 optype0 = PUSHOP;
3219 else if (GET_CODE (operands[0]) == MEM)
3220 optype0 = MEMOP;
3221 else
3222 optype0 = RNDOP;
3223
3224 if (REG_P (operands[1]))
3225 optype1 = REGOP;
3226 else if (CONSTANT_P (operands[1]))
3227 optype1 = CNSTOP;
3228 else if (offsettable_memref_p (operands[1]))
3229 optype1 = OFFSOP;
3230 else if (GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3231 optype1 = POPOP;
3232 else if (GET_CODE (XEXP (operands[1], 0)) == PRE_DEC)
3233 optype1 = PUSHOP;
3234 else if (GET_CODE (operands[1]) == MEM)
3235 optype1 = MEMOP;
3236 else
3237 optype1 = RNDOP;
3238
4761e388
NS
3239 /* Check for the cases that the operand constraints are not supposed
3240 to allow to happen. Generating code for these cases is
3241 painful. */
3242 gcc_assert (optype0 != RNDOP && optype1 != RNDOP);
79e68feb
RS
3243
3244 /* If one operand is decrementing and one is incrementing
3245 decrement the former register explicitly
3246 and change that operand into ordinary indexing. */
3247
3248 if (optype0 == PUSHOP && optype1 == POPOP)
3249 {
3250 operands[0] = XEXP (XEXP (operands[0], 0), 0);
c47b0cb4
MK
3251
3252 handle_reg_adjust (operands[0], -size);
3253
2505bc97 3254 if (GET_MODE (operands[1]) == XFmode)
1d8eaa6b 3255 operands[0] = gen_rtx_MEM (XFmode, operands[0]);
2505bc97 3256 else if (GET_MODE (operands[0]) == DFmode)
1d8eaa6b 3257 operands[0] = gen_rtx_MEM (DFmode, operands[0]);
2505bc97 3258 else
1d8eaa6b 3259 operands[0] = gen_rtx_MEM (DImode, operands[0]);
79e68feb
RS
3260 optype0 = OFFSOP;
3261 }
3262 if (optype0 == POPOP && optype1 == PUSHOP)
3263 {
3264 operands[1] = XEXP (XEXP (operands[1], 0), 0);
c47b0cb4
MK
3265
3266 handle_reg_adjust (operands[1], -size);
3267
2505bc97 3268 if (GET_MODE (operands[1]) == XFmode)
1d8eaa6b 3269 operands[1] = gen_rtx_MEM (XFmode, operands[1]);
2505bc97 3270 else if (GET_MODE (operands[1]) == DFmode)
1d8eaa6b 3271 operands[1] = gen_rtx_MEM (DFmode, operands[1]);
2505bc97 3272 else
1d8eaa6b 3273 operands[1] = gen_rtx_MEM (DImode, operands[1]);
79e68feb
RS
3274 optype1 = OFFSOP;
3275 }
3276
3277 /* If an operand is an unoffsettable memory ref, find a register
3278 we can increment temporarily to make it refer to the second word. */
3279
3280 if (optype0 == MEMOP)
3281 addreg0 = find_addr_reg (XEXP (operands[0], 0));
3282
3283 if (optype1 == MEMOP)
3284 addreg1 = find_addr_reg (XEXP (operands[1], 0));
3285
3286 /* Ok, we can do one word at a time.
3287 Normally we do the low-numbered word first,
3288 but if either operand is autodecrementing then we
3289 do the high-numbered word first.
3290
3291 In either case, set up in LATEHALF the operands to use
3292 for the high-numbered word and in some cases alter the
3293 operands in OPERANDS to be suitable for the low-numbered word. */
3294
2505bc97
RS
3295 if (size == 12)
3296 {
3297 if (optype0 == REGOP)
3298 {
1d8eaa6b
AS
3299 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 2);
3300 middlehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2505bc97
RS
3301 }
3302 else if (optype0 == OFFSOP)
3303 {
b72f00af
RK
3304 middlehalf[0] = adjust_address (operands[0], SImode, 4);
3305 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
2505bc97
RS
3306 }
3307 else
3308 {
c47b0cb4
MK
3309 middlehalf[0] = adjust_address (operands[0], SImode, 0);
3310 latehalf[0] = adjust_address (operands[0], SImode, 0);
2505bc97
RS
3311 }
3312
3313 if (optype1 == REGOP)
3314 {
1d8eaa6b
AS
3315 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2);
3316 middlehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2505bc97
RS
3317 }
3318 else if (optype1 == OFFSOP)
3319 {
b72f00af
RK
3320 middlehalf[1] = adjust_address (operands[1], SImode, 4);
3321 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
2505bc97
RS
3322 }
3323 else if (optype1 == CNSTOP)
3324 {
3325 if (GET_CODE (operands[1]) == CONST_DOUBLE)
3326 {
2505bc97
RS
3327 long l[3];
3328
34a72c33
RS
3329 REAL_VALUE_TO_TARGET_LONG_DOUBLE
3330 (*CONST_DOUBLE_REAL_VALUE (operands[1]), l);
2505bc97
RS
3331 operands[1] = GEN_INT (l[0]);
3332 middlehalf[1] = GEN_INT (l[1]);
3333 latehalf[1] = GEN_INT (l[2]);
3334 }
4761e388 3335 else
2505bc97 3336 {
4761e388
NS
3337 /* No non-CONST_DOUBLE constant should ever appear
3338 here. */
3339 gcc_assert (!CONSTANT_P (operands[1]));
2505bc97
RS
3340 }
3341 }
3342 else
3343 {
c47b0cb4
MK
3344 middlehalf[1] = adjust_address (operands[1], SImode, 0);
3345 latehalf[1] = adjust_address (operands[1], SImode, 0);
2505bc97
RS
3346 }
3347 }
79e68feb 3348 else
2505bc97
RS
3349 /* size is not 12: */
3350 {
3351 if (optype0 == REGOP)
1d8eaa6b 3352 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2505bc97 3353 else if (optype0 == OFFSOP)
b72f00af 3354 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
2505bc97 3355 else
c47b0cb4 3356 latehalf[0] = adjust_address (operands[0], SImode, 0);
2505bc97
RS
3357
3358 if (optype1 == REGOP)
1d8eaa6b 3359 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2505bc97 3360 else if (optype1 == OFFSOP)
b72f00af 3361 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
2505bc97
RS
3362 else if (optype1 == CNSTOP)
3363 split_double (operands[1], &operands[1], &latehalf[1]);
3364 else
c47b0cb4 3365 latehalf[1] = adjust_address (operands[1], SImode, 0);
2505bc97 3366 }
79e68feb 3367
e864837a
AS
3368 /* If insn is effectively movd N(REG),-(REG) then we will do the high
3369 word first. We should use the adjusted operand 1 (which is N+4(REG))
3370 for the low word as well, to compensate for the first decrement of
3371 REG. */
79e68feb 3372 if (optype0 == PUSHOP
e864837a 3373 && reg_overlap_mentioned_p (XEXP (XEXP (operands[0], 0), 0), operands[1]))
c88aeaf8 3374 operands[1] = middlehalf[1] = latehalf[1];
79e68feb 3375
7f98eeb6
RS
3376 /* For (set (reg:DI N) (mem:DI ... (reg:SI N) ...)),
3377 if the upper part of reg N does not appear in the MEM, arrange to
3378 emit the move late-half first. Otherwise, compute the MEM address
3379 into the upper part of N and use that as a pointer to the memory
3380 operand. */
3381 if (optype0 == REGOP
3382 && (optype1 == OFFSOP || optype1 == MEMOP))
3383 {
1d8eaa6b 3384 rtx testlow = gen_rtx_REG (SImode, REGNO (operands[0]));
3a58400f
RS
3385
3386 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
d7e8d581 3387 && reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
7f98eeb6
RS
3388 {
3389 /* If both halves of dest are used in the src memory address,
3a58400f
RS
3390 compute the address into latehalf of dest.
3391 Note that this can't happen if the dest is two data regs. */
4761e388 3392 compadr:
7f98eeb6
RS
3393 xops[0] = latehalf[0];
3394 xops[1] = XEXP (operands[1], 0);
c47b0cb4
MK
3395
3396 handle_compadr (xops);
3397 if (GET_MODE (operands[1]) == XFmode)
7f98eeb6 3398 {
1d8eaa6b 3399 operands[1] = gen_rtx_MEM (XFmode, latehalf[0]);
b72f00af
RK
3400 middlehalf[1] = adjust_address (operands[1], DImode, size - 8);
3401 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
7f98eeb6
RS
3402 }
3403 else
3404 {
1d8eaa6b 3405 operands[1] = gen_rtx_MEM (DImode, latehalf[0]);
b72f00af 3406 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
7f98eeb6
RS
3407 }
3408 }
3409 else if (size == 12
d7e8d581
RS
3410 && reg_overlap_mentioned_p (middlehalf[0],
3411 XEXP (operands[1], 0)))
7f98eeb6 3412 {
3a58400f
RS
3413 /* Check for two regs used by both source and dest.
3414 Note that this can't happen if the dest is all data regs.
3415 It can happen if the dest is d6, d7, a0.
3416 But in that case, latehalf is an addr reg, so
3417 the code at compadr does ok. */
3418
3419 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
d7e8d581
RS
3420 || reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
3421 goto compadr;
7f98eeb6
RS
3422
3423 /* JRV says this can't happen: */
4761e388 3424 gcc_assert (!addreg0 && !addreg1);
7f98eeb6 3425
7a1929e1 3426 /* Only the middle reg conflicts; simply put it last. */
c47b0cb4
MK
3427 handle_movsi (operands);
3428 handle_movsi (latehalf);
3429 handle_movsi (middlehalf);
3430
3431 return;
7f98eeb6 3432 }
2fb8a81d 3433 else if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0)))
7f98eeb6
RS
3434 /* If the low half of dest is mentioned in the source memory
3435 address, the arrange to emit the move late half first. */
3436 dest_overlapped_low = 1;
3437 }
3438
79e68feb
RS
3439 /* If one or both operands autodecrementing,
3440 do the two words, high-numbered first. */
3441
3442 /* Likewise, the first move would clobber the source of the second one,
3443 do them in the other order. This happens only for registers;
3444 such overlap can't happen in memory unless the user explicitly
3445 sets it up, and that is an undefined circumstance. */
3446
3447 if (optype0 == PUSHOP || optype1 == PUSHOP
3448 || (optype0 == REGOP && optype1 == REGOP
2505bc97 3449 && ((middlehalf[1] && REGNO (operands[0]) == REGNO (middlehalf[1]))
7f98eeb6
RS
3450 || REGNO (operands[0]) == REGNO (latehalf[1])))
3451 || dest_overlapped_low)
79e68feb
RS
3452 {
3453 /* Make any unoffsettable addresses point at high-numbered word. */
3454 if (addreg0)
c47b0cb4 3455 handle_reg_adjust (addreg0, size - 4);
79e68feb 3456 if (addreg1)
c47b0cb4 3457 handle_reg_adjust (addreg1, size - 4);
79e68feb
RS
3458
3459 /* Do that word. */
c47b0cb4 3460 handle_movsi (latehalf);
79e68feb
RS
3461
3462 /* Undo the adds we just did. */
3463 if (addreg0)
c47b0cb4 3464 handle_reg_adjust (addreg0, -4);
79e68feb 3465 if (addreg1)
c47b0cb4 3466 handle_reg_adjust (addreg1, -4);
79e68feb 3467
2505bc97
RS
3468 if (size == 12)
3469 {
c47b0cb4
MK
3470 handle_movsi (middlehalf);
3471
2505bc97 3472 if (addreg0)
c47b0cb4 3473 handle_reg_adjust (addreg0, -4);
2505bc97 3474 if (addreg1)
c47b0cb4 3475 handle_reg_adjust (addreg1, -4);
2505bc97
RS
3476 }
3477
79e68feb 3478 /* Do low-numbered word. */
c47b0cb4
MK
3479
3480 handle_movsi (operands);
3481 return;
79e68feb
RS
3482 }
3483
3484 /* Normal case: do the two words, low-numbered first. */
3485
dcac2e64 3486 m68k_final_prescan_insn (NULL, operands, 2);
c47b0cb4 3487 handle_movsi (operands);
79e68feb 3488
2505bc97
RS
3489 /* Do the middle one of the three words for long double */
3490 if (size == 12)
3491 {
3492 if (addreg0)
c47b0cb4 3493 handle_reg_adjust (addreg0, 4);
2505bc97 3494 if (addreg1)
c47b0cb4 3495 handle_reg_adjust (addreg1, 4);
2505bc97 3496
dcac2e64 3497 m68k_final_prescan_insn (NULL, middlehalf, 2);
c47b0cb4 3498 handle_movsi (middlehalf);
2505bc97
RS
3499 }
3500
79e68feb
RS
3501 /* Make any unoffsettable addresses point at high-numbered word. */
3502 if (addreg0)
c47b0cb4 3503 handle_reg_adjust (addreg0, 4);
79e68feb 3504 if (addreg1)
c47b0cb4 3505 handle_reg_adjust (addreg1, 4);
79e68feb
RS
3506
3507 /* Do that word. */
dcac2e64 3508 m68k_final_prescan_insn (NULL, latehalf, 2);
c47b0cb4 3509 handle_movsi (latehalf);
79e68feb
RS
3510
3511 /* Undo the adds we just did. */
3512 if (addreg0)
c47b0cb4
MK
3513 handle_reg_adjust (addreg0, -(size - 4));
3514 if (addreg1)
3515 handle_reg_adjust (addreg1, -(size - 4));
3516
3517 return;
3518}
3519
3520/* Output assembler code to adjust REG by N. */
3521static void
3522output_reg_adjust (rtx reg, int n)
3523{
3524 const char *s;
3525
01512446 3526 gcc_assert (GET_MODE (reg) == SImode && n >= -12 && n != 0 && n <= 12);
c47b0cb4
MK
3527
3528 switch (n)
2505bc97 3529 {
c47b0cb4
MK
3530 case 12:
3531 s = "add%.l #12,%0";
3532 break;
3533
3534 case 8:
3535 s = "addq%.l #8,%0";
3536 break;
3537
3538 case 4:
3539 s = "addq%.l #4,%0";
3540 break;
3541
3542 case -12:
3543 s = "sub%.l #12,%0";
3544 break;
3545
3546 case -8:
3547 s = "subq%.l #8,%0";
3548 break;
3549
3550 case -4:
3551 s = "subq%.l #4,%0";
3552 break;
3553
3554 default:
3555 gcc_unreachable ();
3556 s = NULL;
2505bc97 3557 }
c47b0cb4
MK
3558
3559 output_asm_insn (s, &reg);
3560}
3561
3562/* Emit rtl code to adjust REG by N. */
3563static void
3564emit_reg_adjust (rtx reg1, int n)
3565{
3566 rtx reg2;
3567
01512446 3568 gcc_assert (GET_MODE (reg1) == SImode && n >= -12 && n != 0 && n <= 12);
c47b0cb4
MK
3569
3570 reg1 = copy_rtx (reg1);
3571 reg2 = copy_rtx (reg1);
3572
3573 if (n < 0)
3574 emit_insn (gen_subsi3 (reg1, reg2, GEN_INT (-n)));
3575 else if (n > 0)
3576 emit_insn (gen_addsi3 (reg1, reg2, GEN_INT (n)));
3577 else
3578 gcc_unreachable ();
3579}
3580
3581/* Output assembler to load address OPERANDS[0] to register OPERANDS[1]. */
3582static void
3583output_compadr (rtx operands[2])
3584{
3585 output_asm_insn ("lea %a1,%0", operands);
3586}
3587
3588/* Output the best assembler insn for moving operands[1] into operands[0]
3589 as a fullword. */
3590static void
3591output_movsi (rtx operands[2])
3592{
3593 output_asm_insn (singlemove_string (operands), operands);
3594}
3595
3596/* Copy OP and change its mode to MODE. */
3597static rtx
ef4bddc2 3598copy_operand (rtx op, machine_mode mode)
c47b0cb4
MK
3599{
3600 /* ??? This looks really ugly. There must be a better way
3601 to change a mode on the operand. */
3602 if (GET_MODE (op) != VOIDmode)
2505bc97 3603 {
c47b0cb4
MK
3604 if (REG_P (op))
3605 op = gen_rtx_REG (mode, REGNO (op));
2505bc97 3606 else
c47b0cb4
MK
3607 {
3608 op = copy_rtx (op);
3609 PUT_MODE (op, mode);
3610 }
2505bc97 3611 }
79e68feb 3612
c47b0cb4
MK
3613 return op;
3614}
3615
3616/* Emit rtl code for moving operands[1] into operands[0] as a fullword. */
3617static void
3618emit_movsi (rtx operands[2])
3619{
3620 operands[0] = copy_operand (operands[0], SImode);
3621 operands[1] = copy_operand (operands[1], SImode);
3622
3623 emit_insn (gen_movsi (operands[0], operands[1]));
3624}
3625
3626/* Output assembler code to perform a doubleword move insn
3627 with operands OPERANDS. */
3628const char *
3629output_move_double (rtx *operands)
3630{
3631 handle_move_double (operands,
3632 output_reg_adjust, output_compadr, output_movsi);
3633
79e68feb
RS
3634 return "";
3635}
3636
c47b0cb4
MK
3637/* Output rtl code to perform a doubleword move insn
3638 with operands OPERANDS. */
3639void
3640m68k_emit_move_double (rtx operands[2])
3641{
3642 handle_move_double (operands, emit_reg_adjust, emit_movsi, emit_movsi);
3643}
dcc21c4c
PB
3644
3645/* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
3646 new rtx with the correct mode. */
3647
3648static rtx
ef4bddc2 3649force_mode (machine_mode mode, rtx orig)
dcc21c4c
PB
3650{
3651 if (mode == GET_MODE (orig))
3652 return orig;
3653
3654 if (REGNO (orig) >= FIRST_PSEUDO_REGISTER)
3655 abort ();
3656
3657 return gen_rtx_REG (mode, REGNO (orig));
3658}
3659
3660static int
ef4bddc2 3661fp_reg_operand (rtx op, machine_mode mode ATTRIBUTE_UNUSED)
dcc21c4c
PB
3662{
3663 return reg_renumber && FP_REG_P (op);
3664}
3665
3666/* Emit insns to move operands[1] into operands[0].
3667
3668 Return 1 if we have written out everything that needs to be done to
3669 do the move. Otherwise, return 0 and the caller will emit the move
3670 normally.
3671
3672 Note SCRATCH_REG may not be in the proper mode depending on how it
c0220ea4 3673 will be used. This routine is responsible for creating a new copy
dcc21c4c
PB
3674 of SCRATCH_REG in the proper mode. */
3675
3676int
ef4bddc2 3677emit_move_sequence (rtx *operands, machine_mode mode, rtx scratch_reg)
dcc21c4c
PB
3678{
3679 register rtx operand0 = operands[0];
3680 register rtx operand1 = operands[1];
3681 register rtx tem;
3682
3683 if (scratch_reg
3684 && reload_in_progress && GET_CODE (operand0) == REG
3685 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
f2034d06 3686 operand0 = reg_equiv_mem (REGNO (operand0));
dcc21c4c
PB
3687 else if (scratch_reg
3688 && reload_in_progress && GET_CODE (operand0) == SUBREG
3689 && GET_CODE (SUBREG_REG (operand0)) == REG
3690 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
3691 {
3692 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3693 the code which tracks sets/uses for delete_output_reload. */
3694 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
f2034d06 3695 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
dcc21c4c 3696 SUBREG_BYTE (operand0));
55a2c322 3697 operand0 = alter_subreg (&temp, true);
dcc21c4c
PB
3698 }
3699
3700 if (scratch_reg
3701 && reload_in_progress && GET_CODE (operand1) == REG
3702 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
f2034d06 3703 operand1 = reg_equiv_mem (REGNO (operand1));
dcc21c4c
PB
3704 else if (scratch_reg
3705 && reload_in_progress && GET_CODE (operand1) == SUBREG
3706 && GET_CODE (SUBREG_REG (operand1)) == REG
3707 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
3708 {
3709 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3710 the code which tracks sets/uses for delete_output_reload. */
3711 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
f2034d06 3712 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
dcc21c4c 3713 SUBREG_BYTE (operand1));
55a2c322 3714 operand1 = alter_subreg (&temp, true);
dcc21c4c
PB
3715 }
3716
3717 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
3718 && ((tem = find_replacement (&XEXP (operand0, 0)))
3719 != XEXP (operand0, 0)))
3720 operand0 = gen_rtx_MEM (GET_MODE (operand0), tem);
3721 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
3722 && ((tem = find_replacement (&XEXP (operand1, 0)))
3723 != XEXP (operand1, 0)))
3724 operand1 = gen_rtx_MEM (GET_MODE (operand1), tem);
3725
3726 /* Handle secondary reloads for loads/stores of FP registers where
3727 the address is symbolic by using the scratch register */
3728 if (fp_reg_operand (operand0, mode)
3729 && ((GET_CODE (operand1) == MEM
3730 && ! memory_address_p (DFmode, XEXP (operand1, 0)))
3731 || ((GET_CODE (operand1) == SUBREG
3732 && GET_CODE (XEXP (operand1, 0)) == MEM
3733 && !memory_address_p (DFmode, XEXP (XEXP (operand1, 0), 0)))))
3734 && scratch_reg)
3735 {
3736 if (GET_CODE (operand1) == SUBREG)
3737 operand1 = XEXP (operand1, 0);
3738
3739 /* SCRATCH_REG will hold an address. We want
3740 it in SImode regardless of what mode it was originally given
3741 to us. */
3742 scratch_reg = force_mode (SImode, scratch_reg);
3743
3744 /* D might not fit in 14 bits either; for such cases load D into
3745 scratch reg. */
3746 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
3747 {
3748 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
3749 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
3750 Pmode,
3751 XEXP (XEXP (operand1, 0), 0),
3752 scratch_reg));
3753 }
3754 else
3755 emit_move_insn (scratch_reg, XEXP (operand1, 0));
f7df4a84 3756 emit_insn (gen_rtx_SET (operand0, gen_rtx_MEM (mode, scratch_reg)));
dcc21c4c
PB
3757 return 1;
3758 }
3759 else if (fp_reg_operand (operand1, mode)
3760 && ((GET_CODE (operand0) == MEM
3761 && ! memory_address_p (DFmode, XEXP (operand0, 0)))
3762 || ((GET_CODE (operand0) == SUBREG)
3763 && GET_CODE (XEXP (operand0, 0)) == MEM
3764 && !memory_address_p (DFmode, XEXP (XEXP (operand0, 0), 0))))
3765 && scratch_reg)
3766 {
3767 if (GET_CODE (operand0) == SUBREG)
3768 operand0 = XEXP (operand0, 0);
3769
3770 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3771 it in SIMODE regardless of what mode it was originally given
3772 to us. */
3773 scratch_reg = force_mode (SImode, scratch_reg);
3774
3775 /* D might not fit in 14 bits either; for such cases load D into
3776 scratch reg. */
3777 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
3778 {
3779 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
3780 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
3781 0)),
3782 Pmode,
3783 XEXP (XEXP (operand0, 0),
3784 0),
3785 scratch_reg));
3786 }
3787 else
3788 emit_move_insn (scratch_reg, XEXP (operand0, 0));
f7df4a84 3789 emit_insn (gen_rtx_SET (gen_rtx_MEM (mode, scratch_reg), operand1));
dcc21c4c
PB
3790 return 1;
3791 }
3792 /* Handle secondary reloads for loads of FP registers from constant
3793 expressions by forcing the constant into memory.
3794
3795 use scratch_reg to hold the address of the memory location.
3796
3797 The proper fix is to change PREFERRED_RELOAD_CLASS to return
3798 NO_REGS when presented with a const_int and an register class
3799 containing only FP registers. Doing so unfortunately creates
3800 more problems than it solves. Fix this for 2.5. */
3801 else if (fp_reg_operand (operand0, mode)
3802 && CONSTANT_P (operand1)
3803 && scratch_reg)
3804 {
3805 rtx xoperands[2];
3806
3807 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3808 it in SIMODE regardless of what mode it was originally given
3809 to us. */
3810 scratch_reg = force_mode (SImode, scratch_reg);
3811
3812 /* Force the constant into memory and put the address of the
3813 memory location into scratch_reg. */
3814 xoperands[0] = scratch_reg;
3815 xoperands[1] = XEXP (force_const_mem (mode, operand1), 0);
f7df4a84 3816 emit_insn (gen_rtx_SET (scratch_reg, xoperands[1]));
dcc21c4c
PB
3817
3818 /* Now load the destination register. */
f7df4a84 3819 emit_insn (gen_rtx_SET (operand0, gen_rtx_MEM (mode, scratch_reg)));
dcc21c4c
PB
3820 return 1;
3821 }
3822
3823 /* Now have insn-emit do whatever it normally does. */
3824 return 0;
3825}
3826
01e304f8
RZ
3827/* Split one or more DImode RTL references into pairs of SImode
3828 references. The RTL can be REG, offsettable MEM, integer constant, or
3829 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
3830 split and "num" is its length. lo_half and hi_half are output arrays
3831 that parallel "operands". */
3832
3833void
3834split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
3835{
3836 while (num--)
3837 {
3838 rtx op = operands[num];
3839
3840 /* simplify_subreg refuses to split volatile memory addresses,
3841 but we still have to handle it. */
3842 if (GET_CODE (op) == MEM)
3843 {
3844 lo_half[num] = adjust_address (op, SImode, 4);
3845 hi_half[num] = adjust_address (op, SImode, 0);
3846 }
3847 else
3848 {
3849 lo_half[num] = simplify_gen_subreg (SImode, op,
3850 GET_MODE (op) == VOIDmode
3851 ? DImode : GET_MODE (op), 4);
3852 hi_half[num] = simplify_gen_subreg (SImode, op,
3853 GET_MODE (op) == VOIDmode
3854 ? DImode : GET_MODE (op), 0);
3855 }
3856 }
3857}
3858
a40ed0f3
KH
3859/* Split X into a base and a constant offset, storing them in *BASE
3860 and *OFFSET respectively. */
3861
3862static void
3863m68k_split_offset (rtx x, rtx *base, HOST_WIDE_INT *offset)
3864{
3865 *offset = 0;
3866 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
3867 {
3868 *offset += INTVAL (XEXP (x, 1));
3869 x = XEXP (x, 0);
3870 }
3871 *base = x;
3872}
3873
3874/* Return true if PATTERN is a PARALLEL suitable for a movem or fmovem
3875 instruction. STORE_P says whether the move is a load or store.
3876
3877 If the instruction uses post-increment or pre-decrement addressing,
3878 AUTOMOD_BASE is the base register and AUTOMOD_OFFSET is the total
3879 adjustment. This adjustment will be made by the first element of
3880 PARALLEL, with the loads or stores starting at element 1. If the
3881 instruction does not use post-increment or pre-decrement addressing,
3882 AUTOMOD_BASE is null, AUTOMOD_OFFSET is 0, and the loads or stores
3883 start at element 0. */
3884
3885bool
3886m68k_movem_pattern_p (rtx pattern, rtx automod_base,
3887 HOST_WIDE_INT automod_offset, bool store_p)
3888{
3889 rtx base, mem_base, set, mem, reg, last_reg;
3890 HOST_WIDE_INT offset, mem_offset;
3891 int i, first, len;
3892 enum reg_class rclass;
3893
3894 len = XVECLEN (pattern, 0);
3895 first = (automod_base != NULL);
3896
3897 if (automod_base)
3898 {
3899 /* Stores must be pre-decrement and loads must be post-increment. */
3900 if (store_p != (automod_offset < 0))
3901 return false;
3902
3903 /* Work out the base and offset for lowest memory location. */
3904 base = automod_base;
3905 offset = (automod_offset < 0 ? automod_offset : 0);
3906 }
3907 else
3908 {
3909 /* Allow any valid base and offset in the first access. */
3910 base = NULL;
3911 offset = 0;
3912 }
3913
3914 last_reg = NULL;
3915 rclass = NO_REGS;
3916 for (i = first; i < len; i++)
3917 {
3918 /* We need a plain SET. */
3919 set = XVECEXP (pattern, 0, i);
3920 if (GET_CODE (set) != SET)
3921 return false;
3922
3923 /* Check that we have a memory location... */
3924 mem = XEXP (set, !store_p);
3925 if (!MEM_P (mem) || !memory_operand (mem, VOIDmode))
3926 return false;
3927
3928 /* ...with the right address. */
3929 if (base == NULL)
3930 {
3931 m68k_split_offset (XEXP (mem, 0), &base, &offset);
3932 /* The ColdFire instruction only allows (An) and (d16,An) modes.
3933 There are no mode restrictions for 680x0 besides the
3934 automodification rules enforced above. */
3935 if (TARGET_COLDFIRE
3936 && !m68k_legitimate_base_reg_p (base, reload_completed))
3937 return false;
3938 }
3939 else
3940 {
3941 m68k_split_offset (XEXP (mem, 0), &mem_base, &mem_offset);
3942 if (!rtx_equal_p (base, mem_base) || offset != mem_offset)
3943 return false;
3944 }
3945
3946 /* Check that we have a register of the required mode and class. */
3947 reg = XEXP (set, store_p);
3948 if (!REG_P (reg)
3949 || !HARD_REGISTER_P (reg)
3950 || GET_MODE (reg) != reg_raw_mode[REGNO (reg)])
3951 return false;
3952
3953 if (last_reg)
3954 {
3955 /* The register must belong to RCLASS and have a higher number
3956 than the register in the previous SET. */
3957 if (!TEST_HARD_REG_BIT (reg_class_contents[rclass], REGNO (reg))
3958 || REGNO (last_reg) >= REGNO (reg))
3959 return false;
3960 }
3961 else
3962 {
3963 /* Work out which register class we need. */
3964 if (INT_REGNO_P (REGNO (reg)))
3965 rclass = GENERAL_REGS;
3966 else if (FP_REGNO_P (REGNO (reg)))
3967 rclass = FP_REGS;
3968 else
3969 return false;
3970 }
3971
3972 last_reg = reg;
3973 offset += GET_MODE_SIZE (GET_MODE (reg));
3974 }
3975
3976 /* If we have an automodification, check whether the final offset is OK. */
3977 if (automod_base && offset != (automod_offset < 0 ? 0 : automod_offset))
3978 return false;
3979
3980 /* Reject unprofitable cases. */
3981 if (len < first + (rclass == FP_REGS ? MIN_FMOVEM_REGS : MIN_MOVEM_REGS))
3982 return false;
3983
3984 return true;
3985}
3986
3987/* Return the assembly code template for a movem or fmovem instruction
3988 whose pattern is given by PATTERN. Store the template's operands
3989 in OPERANDS.
3990
3991 If the instruction uses post-increment or pre-decrement addressing,
3992 AUTOMOD_OFFSET is the total adjustment, otherwise it is 0. STORE_P
3993 is true if this is a store instruction. */
3994
3995const char *
3996m68k_output_movem (rtx *operands, rtx pattern,
3997 HOST_WIDE_INT automod_offset, bool store_p)
3998{
3999 unsigned int mask;
4000 int i, first;
4001
4002 gcc_assert (GET_CODE (pattern) == PARALLEL);
4003 mask = 0;
4004 first = (automod_offset != 0);
4005 for (i = first; i < XVECLEN (pattern, 0); i++)
4006 {
4007 /* When using movem with pre-decrement addressing, register X + D0_REG
4008 is controlled by bit 15 - X. For all other addressing modes,
4009 register X + D0_REG is controlled by bit X. Confusingly, the
4010 register mask for fmovem is in the opposite order to that for
4011 movem. */
4012 unsigned int regno;
4013
4014 gcc_assert (MEM_P (XEXP (XVECEXP (pattern, 0, i), !store_p)));
4015 gcc_assert (REG_P (XEXP (XVECEXP (pattern, 0, i), store_p)));
4016 regno = REGNO (XEXP (XVECEXP (pattern, 0, i), store_p));
4017 if (automod_offset < 0)
4018 {
4019 if (FP_REGNO_P (regno))
4020 mask |= 1 << (regno - FP0_REG);
4021 else
4022 mask |= 1 << (15 - (regno - D0_REG));
4023 }
4024 else
4025 {
4026 if (FP_REGNO_P (regno))
4027 mask |= 1 << (7 - (regno - FP0_REG));
4028 else
4029 mask |= 1 << (regno - D0_REG);
4030 }
4031 }
4032 CC_STATUS_INIT;
4033
4034 if (automod_offset == 0)
4035 operands[0] = XEXP (XEXP (XVECEXP (pattern, 0, first), !store_p), 0);
4036 else if (automod_offset < 0)
4037 operands[0] = gen_rtx_PRE_DEC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
4038 else
4039 operands[0] = gen_rtx_POST_INC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
4040 operands[1] = GEN_INT (mask);
4041 if (FP_REGNO_P (REGNO (XEXP (XVECEXP (pattern, 0, first), store_p))))
4042 {
4043 if (store_p)
1fae2d80 4044 return "fmovem %1,%a0";
a40ed0f3 4045 else
1fae2d80 4046 return "fmovem %a0,%1";
a40ed0f3
KH
4047 }
4048 else
4049 {
4050 if (store_p)
1fae2d80 4051 return "movem%.l %1,%a0";
a40ed0f3 4052 else
1fae2d80 4053 return "movem%.l %a0,%1";
a40ed0f3
KH
4054 }
4055}
4056
79e68feb
RS
4057/* Return a REG that occurs in ADDR with coefficient 1.
4058 ADDR can be effectively incremented by incrementing REG. */
4059
4060static rtx
8a4a2253 4061find_addr_reg (rtx addr)
79e68feb
RS
4062{
4063 while (GET_CODE (addr) == PLUS)
4064 {
4065 if (GET_CODE (XEXP (addr, 0)) == REG)
4066 addr = XEXP (addr, 0);
4067 else if (GET_CODE (XEXP (addr, 1)) == REG)
4068 addr = XEXP (addr, 1);
4069 else if (CONSTANT_P (XEXP (addr, 0)))
4070 addr = XEXP (addr, 1);
4071 else if (CONSTANT_P (XEXP (addr, 1)))
4072 addr = XEXP (addr, 0);
4073 else
4761e388 4074 gcc_unreachable ();
79e68feb 4075 }
4761e388
NS
4076 gcc_assert (GET_CODE (addr) == REG);
4077 return addr;
79e68feb 4078}
9ee3c687 4079
c16eadc7 4080/* Output assembler code to perform a 32-bit 3-operand add. */
9ee3c687 4081
5505f548 4082const char *
8a4a2253 4083output_addsi3 (rtx *operands)
9ee3c687
JW
4084{
4085 if (! operands_match_p (operands[0], operands[1]))
4086 {
4087 if (!ADDRESS_REG_P (operands[1]))
4088 {
4089 rtx tmp = operands[1];
4090
4091 operands[1] = operands[2];
4092 operands[2] = tmp;
4093 }
4094
4095 /* These insns can result from reloads to access
4096 stack slots over 64k from the frame pointer. */
4097 if (GET_CODE (operands[2]) == CONST_INT
218d5a87 4098 && (INTVAL (operands[2]) < -32768 || INTVAL (operands[2]) > 32767))
8c61b6c1 4099 return "move%.l %2,%0\n\tadd%.l %1,%0";
9ee3c687 4100 if (GET_CODE (operands[2]) == REG)
4b3d1177
KH
4101 return MOTOROLA ? "lea (%1,%2.l),%0" : "lea %1@(0,%2:l),%0";
4102 return MOTOROLA ? "lea (%c2,%1),%0" : "lea %1@(%c2),%0";
9ee3c687
JW
4103 }
4104 if (GET_CODE (operands[2]) == CONST_INT)
4105 {
9ee3c687
JW
4106 if (INTVAL (operands[2]) > 0
4107 && INTVAL (operands[2]) <= 8)
4108 return "addq%.l %2,%0";
4109 if (INTVAL (operands[2]) < 0
4110 && INTVAL (operands[2]) >= -8)
4111 {
c5c76735 4112 operands[2] = GEN_INT (- INTVAL (operands[2]));
9ee3c687
JW
4113 return "subq%.l %2,%0";
4114 }
4115 /* On the CPU32 it is faster to use two addql instructions to
4116 add a small integer (8 < N <= 16) to a register.
7a1929e1 4117 Likewise for subql. */
fe95f2f7 4118 if (TUNE_CPU32 && REG_P (operands[0]))
9ee3c687
JW
4119 {
4120 if (INTVAL (operands[2]) > 8
4121 && INTVAL (operands[2]) <= 16)
4122 {
1d8eaa6b 4123 operands[2] = GEN_INT (INTVAL (operands[2]) - 8);
3b4b85c9 4124 return "addq%.l #8,%0\n\taddq%.l %2,%0";
9ee3c687
JW
4125 }
4126 if (INTVAL (operands[2]) < -8
4127 && INTVAL (operands[2]) >= -16)
4128 {
c5c76735 4129 operands[2] = GEN_INT (- INTVAL (operands[2]) - 8);
3b4b85c9 4130 return "subq%.l #8,%0\n\tsubq%.l %2,%0";
9ee3c687
JW
4131 }
4132 }
9ee3c687
JW
4133 if (ADDRESS_REG_P (operands[0])
4134 && INTVAL (operands[2]) >= -0x8000
4135 && INTVAL (operands[2]) < 0x8000)
4136 {
fe95f2f7 4137 if (TUNE_68040)
9ee3c687
JW
4138 return "add%.w %2,%0";
4139 else
4b3d1177 4140 return MOTOROLA ? "lea (%c2,%0),%0" : "lea %0@(%c2),%0";
9ee3c687
JW
4141 }
4142 }
4143 return "add%.l %2,%0";
4144}
79e68feb
RS
4145\f
4146/* Store in cc_status the expressions that the condition codes will
4147 describe after execution of an instruction whose pattern is EXP.
4148 Do not alter them if the instruction would not alter the cc's. */
4149
4150/* On the 68000, all the insns to store in an address register fail to
4151 set the cc's. However, in some cases these instructions can make it
4152 possibly invalid to use the saved cc's. In those cases we clear out
4153 some or all of the saved cc's so they won't be used. */
4154
1d8eaa6b 4155void
8a4a2253 4156notice_update_cc (rtx exp, rtx insn)
79e68feb 4157{
1a8965c4 4158 if (GET_CODE (exp) == SET)
79e68feb
RS
4159 {
4160 if (GET_CODE (SET_SRC (exp)) == CALL)
a0a7fbc9 4161 CC_STATUS_INIT;
79e68feb
RS
4162 else if (ADDRESS_REG_P (SET_DEST (exp)))
4163 {
f5963e61 4164 if (cc_status.value1 && modified_in_p (cc_status.value1, insn))
79e68feb 4165 cc_status.value1 = 0;
f5963e61 4166 if (cc_status.value2 && modified_in_p (cc_status.value2, insn))
79e68feb
RS
4167 cc_status.value2 = 0;
4168 }
f6ab62e8
RS
4169 /* fmoves to memory or data registers do not set the condition
4170 codes. Normal moves _do_ set the condition codes, but not in
4171 a way that is appropriate for comparison with 0, because -0.0
4172 would be treated as a negative nonzero number. Note that it
88512ba0 4173 isn't appropriate to conditionalize this restriction on
f6ab62e8
RS
4174 HONOR_SIGNED_ZEROS because that macro merely indicates whether
4175 we care about the difference between -0.0 and +0.0. */
79e68feb
RS
4176 else if (!FP_REG_P (SET_DEST (exp))
4177 && SET_DEST (exp) != cc0_rtx
4178 && (FP_REG_P (SET_SRC (exp))
4179 || GET_CODE (SET_SRC (exp)) == FIX
f6ab62e8 4180 || FLOAT_MODE_P (GET_MODE (SET_DEST (exp)))))
a0a7fbc9 4181 CC_STATUS_INIT;
79e68feb
RS
4182 /* A pair of move insns doesn't produce a useful overall cc. */
4183 else if (!FP_REG_P (SET_DEST (exp))
4184 && !FP_REG_P (SET_SRC (exp))
4185 && GET_MODE_SIZE (GET_MODE (SET_SRC (exp))) > 4
4186 && (GET_CODE (SET_SRC (exp)) == REG
4187 || GET_CODE (SET_SRC (exp)) == MEM
4188 || GET_CODE (SET_SRC (exp)) == CONST_DOUBLE))
a0a7fbc9 4189 CC_STATUS_INIT;
e1dff52a 4190 else if (SET_DEST (exp) != pc_rtx)
79e68feb
RS
4191 {
4192 cc_status.flags = 0;
e1dff52a
KH
4193 cc_status.value1 = SET_DEST (exp);
4194 cc_status.value2 = SET_SRC (exp);
79e68feb
RS
4195 }
4196 }
4197 else if (GET_CODE (exp) == PARALLEL
4198 && GET_CODE (XVECEXP (exp, 0, 0)) == SET)
4199 {
e1dff52a
KH
4200 rtx dest = SET_DEST (XVECEXP (exp, 0, 0));
4201 rtx src = SET_SRC (XVECEXP (exp, 0, 0));
4202
4203 if (ADDRESS_REG_P (dest))
79e68feb 4204 CC_STATUS_INIT;
e1dff52a 4205 else if (dest != pc_rtx)
79e68feb
RS
4206 {
4207 cc_status.flags = 0;
e1dff52a
KH
4208 cc_status.value1 = dest;
4209 cc_status.value2 = src;
79e68feb
RS
4210 }
4211 }
4212 else
4213 CC_STATUS_INIT;
4214 if (cc_status.value2 != 0
4215 && ADDRESS_REG_P (cc_status.value2)
4216 && GET_MODE (cc_status.value2) == QImode)
4217 CC_STATUS_INIT;
1a8965c4 4218 if (cc_status.value2 != 0)
79e68feb
RS
4219 switch (GET_CODE (cc_status.value2))
4220 {
996a5f59 4221 case ASHIFT: case ASHIFTRT: case LSHIFTRT:
79e68feb 4222 case ROTATE: case ROTATERT:
a126dc3a
RH
4223 /* These instructions always clear the overflow bit, and set
4224 the carry to the bit shifted out. */
1afac9a6 4225 cc_status.flags |= CC_OVERFLOW_UNUSABLE | CC_NO_CARRY;
a126dc3a
RH
4226 break;
4227
4228 case PLUS: case MINUS: case MULT:
4229 case DIV: case UDIV: case MOD: case UMOD: case NEG:
79e68feb
RS
4230 if (GET_MODE (cc_status.value2) != VOIDmode)
4231 cc_status.flags |= CC_NO_OVERFLOW;
4232 break;
4233 case ZERO_EXTEND:
4234 /* (SET r1 (ZERO_EXTEND r2)) on this machine
4235 ends with a move insn moving r2 in r2's mode.
4236 Thus, the cc's are set for r2.
7a1929e1 4237 This can set N bit spuriously. */
79e68feb 4238 cc_status.flags |= CC_NOT_NEGATIVE;
1d8eaa6b
AS
4239
4240 default:
4241 break;
79e68feb
RS
4242 }
4243 if (cc_status.value1 && GET_CODE (cc_status.value1) == REG
4244 && cc_status.value2
4245 && reg_overlap_mentioned_p (cc_status.value1, cc_status.value2))
4246 cc_status.value2 = 0;
1adb2fb9
AS
4247 /* Check for PRE_DEC in dest modifying a register used in src. */
4248 if (cc_status.value1 && GET_CODE (cc_status.value1) == MEM
4249 && GET_CODE (XEXP (cc_status.value1, 0)) == PRE_DEC
4250 && cc_status.value2
4251 && reg_overlap_mentioned_p (XEXP (XEXP (cc_status.value1, 0), 0),
4252 cc_status.value2))
4253 cc_status.value2 = 0;
79e68feb 4254 if (((cc_status.value1 && FP_REG_P (cc_status.value1))
1a8965c4 4255 || (cc_status.value2 && FP_REG_P (cc_status.value2))))
79e68feb 4256 cc_status.flags = CC_IN_68881;
67595cbb
RZ
4257 if (cc_status.value2 && GET_CODE (cc_status.value2) == COMPARE
4258 && GET_MODE_CLASS (GET_MODE (XEXP (cc_status.value2, 0))) == MODE_FLOAT)
4259 {
4260 cc_status.flags = CC_IN_68881;
695074be
JB
4261 if (!FP_REG_P (XEXP (cc_status.value2, 0))
4262 && FP_REG_P (XEXP (cc_status.value2, 1)))
67595cbb
RZ
4263 cc_status.flags |= CC_REVERSED;
4264 }
79e68feb
RS
4265}
4266\f
5505f548 4267const char *
8a4a2253 4268output_move_const_double (rtx *operands)
79e68feb 4269{
1a8965c4 4270 int code = standard_68881_constant_p (operands[1]);
79e68feb 4271
1a8965c4 4272 if (code != 0)
79e68feb 4273 {
1a8965c4 4274 static char buf[40];
79e68feb 4275
3b4b85c9 4276 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
1a8965c4 4277 return buf;
79e68feb 4278 }
1a8965c4 4279 return "fmove%.d %1,%0";
79e68feb
RS
4280}
4281
5505f548 4282const char *
8a4a2253 4283output_move_const_single (rtx *operands)
79e68feb 4284{
1a8965c4 4285 int code = standard_68881_constant_p (operands[1]);
79e68feb 4286
1a8965c4 4287 if (code != 0)
79e68feb 4288 {
1a8965c4 4289 static char buf[40];
79e68feb 4290
3b4b85c9 4291 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
1a8965c4 4292 return buf;
79e68feb 4293 }
1a8965c4 4294 return "fmove%.s %f1,%0";
79e68feb
RS
4295}
4296
4297/* Return nonzero if X, a CONST_DOUBLE, has a value that we can get
4298 from the "fmovecr" instruction.
4299 The value, anded with 0xff, gives the code to use in fmovecr
4300 to get the desired constant. */
4301
7a1929e1 4302/* This code has been fixed for cross-compilation. */
c1cfb2ae
RS
4303
4304static int inited_68881_table = 0;
4305
5505f548 4306static const char *const strings_68881[7] = {
c1cfb2ae
RS
4307 "0.0",
4308 "1.0",
4309 "10.0",
4310 "100.0",
4311 "10000.0",
4312 "1e8",
4313 "1e16"
a0a7fbc9 4314};
c1cfb2ae 4315
8b60264b 4316static const int codes_68881[7] = {
c1cfb2ae
RS
4317 0x0f,
4318 0x32,
4319 0x33,
4320 0x34,
4321 0x35,
4322 0x36,
4323 0x37
a0a7fbc9 4324};
c1cfb2ae
RS
4325
4326REAL_VALUE_TYPE values_68881[7];
4327
4328/* Set up values_68881 array by converting the decimal values
7a1929e1 4329 strings_68881 to binary. */
c1cfb2ae
RS
4330
4331void
8a4a2253 4332init_68881_table (void)
c1cfb2ae
RS
4333{
4334 int i;
4335 REAL_VALUE_TYPE r;
ef4bddc2 4336 machine_mode mode;
c1cfb2ae 4337
16d82c3c 4338 mode = SFmode;
c1cfb2ae
RS
4339 for (i = 0; i < 7; i++)
4340 {
4341 if (i == 6)
16d82c3c 4342 mode = DFmode;
c1cfb2ae
RS
4343 r = REAL_VALUE_ATOF (strings_68881[i], mode);
4344 values_68881[i] = r;
4345 }
4346 inited_68881_table = 1;
4347}
79e68feb
RS
4348
4349int
8a4a2253 4350standard_68881_constant_p (rtx x)
79e68feb 4351{
34a72c33 4352 const REAL_VALUE_TYPE *r;
c1cfb2ae 4353 int i;
79e68feb 4354
e18db50d 4355 /* fmovecr must be emulated on the 68040 and 68060, so it shouldn't be
7a1929e1 4356 used at all on those chips. */
9cf106c8 4357 if (TUNE_68040_60)
79e68feb
RS
4358 return 0;
4359
c1cfb2ae
RS
4360 if (! inited_68881_table)
4361 init_68881_table ();
4362
34a72c33 4363 r = CONST_DOUBLE_REAL_VALUE (x);
c1cfb2ae 4364
1a25c6b1 4365 /* Use real_identical instead of real_equal so that -0.0 is rejected. */
c1cfb2ae
RS
4366 for (i = 0; i < 6; i++)
4367 {
34a72c33 4368 if (real_identical (r, &values_68881[i]))
c1cfb2ae
RS
4369 return (codes_68881[i]);
4370 }
4371
79e68feb
RS
4372 if (GET_MODE (x) == SFmode)
4373 return 0;
c1cfb2ae 4374
34a72c33 4375 if (real_equal (r, &values_68881[6]))
c1cfb2ae
RS
4376 return (codes_68881[6]);
4377
79e68feb
RS
4378 /* larger powers of ten in the constants ram are not used
4379 because they are not equal to a `double' C constant. */
4380 return 0;
4381}
4382
4383/* If X is a floating-point constant, return the logarithm of X base 2,
4384 or 0 if X is not a power of 2. */
4385
4386int
8a4a2253 4387floating_exact_log2 (rtx x)
79e68feb 4388{
34a72c33
RS
4389 const REAL_VALUE_TYPE *r;
4390 REAL_VALUE_TYPE r1;
eaff3bf8 4391 int exp;
79e68feb 4392
34a72c33 4393 r = CONST_DOUBLE_REAL_VALUE (x);
79e68feb 4394
34a72c33 4395 if (real_less (r, &dconst1))
79e68feb
RS
4396 return 0;
4397
34a72c33 4398 exp = real_exponent (r);
6ef9a246 4399 real_2expN (&r1, exp, DFmode);
34a72c33 4400 if (real_equal (&r1, r))
eaff3bf8
RH
4401 return exp;
4402
79e68feb
RS
4403 return 0;
4404}
4405\f
79e68feb
RS
4406/* A C compound statement to output to stdio stream STREAM the
4407 assembler syntax for an instruction operand X. X is an RTL
4408 expression.
4409
4410 CODE is a value that can be used to specify one of several ways
4411 of printing the operand. It is used when identical operands
4412 must be printed differently depending on the context. CODE
4413 comes from the `%' specification that was used to request
4414 printing of the operand. If the specification was just `%DIGIT'
4415 then CODE is 0; if the specification was `%LTR DIGIT' then CODE
4416 is the ASCII code for LTR.
4417
4418 If X is a register, this macro should print the register's name.
4419 The names can be found in an array `reg_names' whose type is
4420 `char *[]'. `reg_names' is initialized from `REGISTER_NAMES'.
4421
4422 When the machine description has a specification `%PUNCT' (a `%'
4423 followed by a punctuation character), this macro is called with
4424 a null pointer for X and the punctuation character for CODE.
4425
4426 The m68k specific codes are:
4427
4428 '.' for dot needed in Motorola-style opcode names.
4429 '-' for an operand pushing on the stack:
4430 sp@-, -(sp) or -(%sp) depending on the style of syntax.
4431 '+' for an operand pushing on the stack:
4432 sp@+, (sp)+ or (%sp)+ depending on the style of syntax.
4433 '@' for a reference to the top word on the stack:
4434 sp@, (sp) or (%sp) depending on the style of syntax.
4435 '#' for an immediate operand prefix (# in MIT and Motorola syntax
5ee084df 4436 but & in SGS syntax).
79e68feb
RS
4437 '!' for the cc register (used in an `and to cc' insn).
4438 '$' for the letter `s' in an op code, but only on the 68040.
4439 '&' for the letter `d' in an op code, but only on the 68040.
2ac5f14a 4440 '/' for register prefix needed by longlong.h.
a40ed0f3 4441 '?' for m68k_library_id_string
79e68feb
RS
4442
4443 'b' for byte insn (no effect, on the Sun; this is for the ISI).
4444 'd' to force memory addressing to be absolute, not relative.
4445 'f' for float insn (print a CONST_DOUBLE as a float rather than in hex)
79e68feb
RS
4446 'x' for float insn (print a CONST_DOUBLE as a float rather than in hex),
4447 or print pair of registers as rx:ry.
29ca003a
RS
4448 'p' print an address with @PLTPC attached, but only if the operand
4449 is not locally-bound. */
79e68feb
RS
4450
4451void
8a4a2253 4452print_operand (FILE *file, rtx op, int letter)
79e68feb 4453{
79e68feb
RS
4454 if (letter == '.')
4455 {
e6d98cb0
BI
4456 if (MOTOROLA)
4457 fprintf (file, ".");
79e68feb
RS
4458 }
4459 else if (letter == '#')
e6d98cb0 4460 asm_fprintf (file, "%I");
79e68feb 4461 else if (letter == '-')
4b3d1177 4462 asm_fprintf (file, MOTOROLA ? "-(%Rsp)" : "%Rsp@-");
79e68feb 4463 else if (letter == '+')
4b3d1177 4464 asm_fprintf (file, MOTOROLA ? "(%Rsp)+" : "%Rsp@+");
79e68feb 4465 else if (letter == '@')
4b3d1177 4466 asm_fprintf (file, MOTOROLA ? "(%Rsp)" : "%Rsp@");
79e68feb 4467 else if (letter == '!')
e6d98cb0 4468 asm_fprintf (file, "%Rfpcr");
79e68feb
RS
4469 else if (letter == '$')
4470 {
b101567e 4471 if (TARGET_68040)
e6d98cb0 4472 fprintf (file, "s");
79e68feb
RS
4473 }
4474 else if (letter == '&')
4475 {
b101567e 4476 if (TARGET_68040)
e6d98cb0 4477 fprintf (file, "d");
79e68feb 4478 }
2ac5f14a 4479 else if (letter == '/')
e6d98cb0 4480 asm_fprintf (file, "%R");
a40ed0f3
KH
4481 else if (letter == '?')
4482 asm_fprintf (file, m68k_library_id_string);
29ca003a 4483 else if (letter == 'p')
2c8ec431 4484 {
29ca003a
RS
4485 output_addr_const (file, op);
4486 if (!(GET_CODE (op) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op)))
4487 fprintf (file, "@PLTPC");
2c8ec431 4488 }
79e68feb
RS
4489 else if (GET_CODE (op) == REG)
4490 {
1a8965c4
AS
4491 if (letter == 'R')
4492 /* Print out the second register name of a register pair.
4493 I.e., R (6) => 7. */
01bbf777 4494 fputs (M68K_REGNAME(REGNO (op) + 1), file);
79e68feb 4495 else
01bbf777 4496 fputs (M68K_REGNAME(REGNO (op)), file);
79e68feb
RS
4497 }
4498 else if (GET_CODE (op) == MEM)
4499 {
cc8ca59e 4500 output_address (GET_MODE (op), XEXP (op, 0));
79e68feb
RS
4501 if (letter == 'd' && ! TARGET_68020
4502 && CONSTANT_ADDRESS_P (XEXP (op, 0))
4503 && !(GET_CODE (XEXP (op, 0)) == CONST_INT
4504 && INTVAL (XEXP (op, 0)) < 0x8000
4505 && INTVAL (XEXP (op, 0)) >= -0x8000))
4b3d1177 4506 fprintf (file, MOTOROLA ? ".l" : ":l");
79e68feb 4507 }
79e68feb
RS
4508 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == SFmode)
4509 {
6ae89ea8 4510 long l;
34a72c33 4511 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), l);
429ce992 4512 asm_fprintf (file, "%I0x%lx", l & 0xFFFFFFFF);
c1cfb2ae
RS
4513 }
4514 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == XFmode)
4515 {
6ae89ea8 4516 long l[3];
34a72c33 4517 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
429ce992
AS
4518 asm_fprintf (file, "%I0x%lx%08lx%08lx", l[0] & 0xFFFFFFFF,
4519 l[1] & 0xFFFFFFFF, l[2] & 0xFFFFFFFF);
79e68feb 4520 }
e2c0a924 4521 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == DFmode)
79e68feb 4522 {
6ae89ea8 4523 long l[2];
34a72c33 4524 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
429ce992 4525 asm_fprintf (file, "%I0x%lx%08lx", l[0] & 0xFFFFFFFF, l[1] & 0xFFFFFFFF);
79e68feb
RS
4526 }
4527 else
4528 {
2c8ec431
DL
4529 /* Use `print_operand_address' instead of `output_addr_const'
4530 to ensure that we print relevant PIC stuff. */
1f85a612 4531 asm_fprintf (file, "%I");
2c8ec431
DL
4532 if (TARGET_PCREL
4533 && (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST))
4534 print_operand_address (file, op);
4535 else
4536 output_addr_const (file, op);
79e68feb
RS
4537 }
4538}
4539
75df395f
MK
4540/* Return string for TLS relocation RELOC. */
4541
4542static const char *
4543m68k_get_reloc_decoration (enum m68k_reloc reloc)
4544{
4545 /* To my knowledge, !MOTOROLA assemblers don't support TLS. */
4546 gcc_assert (MOTOROLA || reloc == RELOC_GOT);
4547
4548 switch (reloc)
4549 {
4550 case RELOC_GOT:
4551 if (MOTOROLA)
4552 {
4553 if (flag_pic == 1 && TARGET_68020)
4554 return "@GOT.w";
4555 else
4556 return "@GOT";
4557 }
4558 else
4559 {
4560 if (TARGET_68020)
4561 {
4562 switch (flag_pic)
4563 {
4564 case 1:
4565 return ":w";
4566 case 2:
4567 return ":l";
4568 default:
4569 return "";
4570 }
4571 }
4572 }
b1a2b0ba 4573 gcc_unreachable ();
75df395f
MK
4574
4575 case RELOC_TLSGD:
4576 return "@TLSGD";
4577
4578 case RELOC_TLSLDM:
4579 return "@TLSLDM";
4580
4581 case RELOC_TLSLDO:
4582 return "@TLSLDO";
4583
4584 case RELOC_TLSIE:
4585 return "@TLSIE";
4586
4587 case RELOC_TLSLE:
4588 return "@TLSLE";
4589
4590 default:
4591 gcc_unreachable ();
4592 }
4593}
4594
cb69db4f 4595/* m68k implementation of TARGET_OUTPUT_ADDR_CONST_EXTRA. */
884316ff 4596
cb69db4f 4597static bool
884316ff
JM
4598m68k_output_addr_const_extra (FILE *file, rtx x)
4599{
75df395f
MK
4600 if (GET_CODE (x) == UNSPEC)
4601 {
4602 switch (XINT (x, 1))
4603 {
4604 case UNSPEC_RELOC16:
4605 case UNSPEC_RELOC32:
4606 output_addr_const (file, XVECEXP (x, 0, 0));
f878882b
AS
4607 fputs (m68k_get_reloc_decoration
4608 ((enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1))), file);
75df395f 4609 return true;
884316ff 4610
75df395f
MK
4611 default:
4612 break;
4613 }
4614 }
4615
4616 return false;
4617}
4618
4619/* M68K implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
4620
4621static void
4622m68k_output_dwarf_dtprel (FILE *file, int size, rtx x)
4623{
4624 gcc_assert (size == 4);
4625 fputs ("\t.long\t", file);
4626 output_addr_const (file, x);
4627 fputs ("@TLSLDO+0x8000", file);
884316ff
JM
4628}
4629
7b0f476d
AS
4630/* In the name of slightly smaller debug output, and to cater to
4631 general assembler lossage, recognize various UNSPEC sequences
4632 and turn them back into a direct symbol reference. */
4633
4634static rtx
33d67485 4635m68k_delegitimize_address (rtx orig_x)
7b0f476d 4636{
8390b335
AS
4637 rtx x;
4638 struct m68k_address addr;
4639 rtx unspec;
7b0f476d 4640
33d67485 4641 orig_x = delegitimize_mem_from_attrs (orig_x);
8390b335
AS
4642 x = orig_x;
4643 if (MEM_P (x))
4644 x = XEXP (x, 0);
4645
4646 if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
33d67485
AS
4647 return orig_x;
4648
8390b335
AS
4649 if (!m68k_decompose_address (GET_MODE (x), x, false, &addr)
4650 || addr.offset == NULL_RTX
4651 || GET_CODE (addr.offset) != CONST)
4652 return orig_x;
7b0f476d 4653
8390b335
AS
4654 unspec = XEXP (addr.offset, 0);
4655 if (GET_CODE (unspec) == PLUS && CONST_INT_P (XEXP (unspec, 1)))
4656 unspec = XEXP (unspec, 0);
4657 if (GET_CODE (unspec) != UNSPEC
4658 || (XINT (unspec, 1) != UNSPEC_RELOC16
4659 && XINT (unspec, 1) != UNSPEC_RELOC32))
4660 return orig_x;
4661 x = XVECEXP (unspec, 0, 0);
92cf7399 4662 gcc_assert (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF);
8390b335
AS
4663 if (unspec != XEXP (addr.offset, 0))
4664 x = gen_rtx_PLUS (Pmode, x, XEXP (XEXP (addr.offset, 0), 1));
4665 if (addr.index)
7b0f476d 4666 {
8390b335
AS
4667 rtx idx = addr.index;
4668 if (addr.scale != 1)
4669 idx = gen_rtx_MULT (Pmode, idx, GEN_INT (addr.scale));
4670 x = gen_rtx_PLUS (Pmode, idx, x);
7b0f476d 4671 }
8390b335
AS
4672 if (addr.base)
4673 x = gen_rtx_PLUS (Pmode, addr.base, x);
4674 if (MEM_P (orig_x))
4675 x = replace_equiv_address_nv (orig_x, x);
4676 return x;
7b0f476d
AS
4677}
4678
79e68feb
RS
4679\f
4680/* A C compound statement to output to stdio stream STREAM the
4681 assembler syntax for an instruction operand that is a memory
4682 reference whose address is ADDR. ADDR is an RTL expression.
4683
4684 Note that this contains a kludge that knows that the only reason
4685 we have an address (plus (label_ref...) (reg...)) when not generating
4686 PIC code is in the insn before a tablejump, and we know that m68k.md
4687 generates a label LInnn: on such an insn.
4688
4689 It is possible for PIC to generate a (plus (label_ref...) (reg...))
4690 and we handle that just like we would a (plus (symbol_ref...) (reg...)).
4691
79e68feb
RS
4692 This routine is responsible for distinguishing between -fpic and -fPIC
4693 style relocations in an address. When generating -fpic code the
112cdef5
KH
4694 offset is output in word mode (e.g. movel a5@(_foo:w), a0). When generating
4695 -fPIC code the offset is output in long mode (e.g. movel a5@(_foo:l), a0) */
79e68feb
RS
4696
4697void
8a4a2253 4698print_operand_address (FILE *file, rtx addr)
79e68feb 4699{
fc2241eb
RS
4700 struct m68k_address address;
4701
4702 if (!m68k_decompose_address (QImode, addr, true, &address))
4703 gcc_unreachable ();
4704
4705 if (address.code == PRE_DEC)
4b3d1177
KH
4706 fprintf (file, MOTOROLA ? "-(%s)" : "%s@-",
4707 M68K_REGNAME (REGNO (address.base)));
fc2241eb 4708 else if (address.code == POST_INC)
4b3d1177
KH
4709 fprintf (file, MOTOROLA ? "(%s)+" : "%s@+",
4710 M68K_REGNAME (REGNO (address.base)));
fc2241eb
RS
4711 else if (!address.base && !address.index)
4712 {
4713 /* A constant address. */
4714 gcc_assert (address.offset == addr);
4715 if (GET_CODE (addr) == CONST_INT)
4716 {
4717 /* (xxx).w or (xxx).l. */
4718 if (IN_RANGE (INTVAL (addr), -0x8000, 0x7fff))
4b3d1177 4719 fprintf (file, MOTOROLA ? "%d.w" : "%d:w", (int) INTVAL (addr));
a0a7fbc9 4720 else
fc2241eb 4721 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (addr));
a0a7fbc9 4722 }
fc2241eb 4723 else if (TARGET_PCREL)
a0a7fbc9 4724 {
fc2241eb
RS
4725 /* (d16,PC) or (bd,PC,Xn) (with suppressed index register). */
4726 fputc ('(', file);
4727 output_addr_const (file, addr);
4728 asm_fprintf (file, flag_pic == 1 ? ":w,%Rpc)" : ":l,%Rpc)");
a0a7fbc9 4729 }
fc2241eb 4730 else
a0a7fbc9 4731 {
fc2241eb
RS
4732 /* (xxx).l. We need a special case for SYMBOL_REF if the symbol
4733 name ends in `.<letter>', as the last 2 characters can be
4734 mistaken as a size suffix. Put the name in parentheses. */
4735 if (GET_CODE (addr) == SYMBOL_REF
4736 && strlen (XSTR (addr, 0)) > 2
4737 && XSTR (addr, 0)[strlen (XSTR (addr, 0)) - 2] == '.')
a0a7fbc9 4738 {
fc2241eb
RS
4739 putc ('(', file);
4740 output_addr_const (file, addr);
4741 putc (')', file);
a0a7fbc9
AS
4742 }
4743 else
fc2241eb 4744 output_addr_const (file, addr);
a0a7fbc9 4745 }
fc2241eb
RS
4746 }
4747 else
4748 {
4749 int labelno;
4750
4751 /* If ADDR is a (d8,pc,Xn) address, this is the number of the
44c7bd63 4752 label being accessed, otherwise it is -1. */
fc2241eb
RS
4753 labelno = (address.offset
4754 && !address.base
4755 && GET_CODE (address.offset) == LABEL_REF
4756 ? CODE_LABEL_NUMBER (XEXP (address.offset, 0))
4757 : -1);
4758 if (MOTOROLA)
a0a7fbc9 4759 {
fc2241eb
RS
4760 /* Print the "offset(base" component. */
4761 if (labelno >= 0)
e59d83aa 4762 asm_fprintf (file, "%LL%d(%Rpc,", labelno);
fc2241eb 4763 else
a0a7fbc9 4764 {
fc2241eb 4765 if (address.offset)
75df395f
MK
4766 output_addr_const (file, address.offset);
4767
fc2241eb
RS
4768 putc ('(', file);
4769 if (address.base)
4770 fputs (M68K_REGNAME (REGNO (address.base)), file);
a0a7fbc9 4771 }
fc2241eb
RS
4772 /* Print the ",index" component, if any. */
4773 if (address.index)
a0a7fbc9 4774 {
fc2241eb
RS
4775 if (address.base)
4776 putc (',', file);
4777 fprintf (file, "%s.%c",
4778 M68K_REGNAME (REGNO (address.index)),
4779 GET_MODE (address.index) == HImode ? 'w' : 'l');
4780 if (address.scale != 1)
4781 fprintf (file, "*%d", address.scale);
a0a7fbc9 4782 }
a0a7fbc9 4783 putc (')', file);
a0a7fbc9 4784 }
fc2241eb 4785 else /* !MOTOROLA */
a0a7fbc9 4786 {
fc2241eb
RS
4787 if (!address.offset && !address.index)
4788 fprintf (file, "%s@", M68K_REGNAME (REGNO (address.base)));
a0a7fbc9 4789 else
a0a7fbc9 4790 {
fc2241eb
RS
4791 /* Print the "base@(offset" component. */
4792 if (labelno >= 0)
e59d83aa 4793 asm_fprintf (file, "%Rpc@(%LL%d", labelno);
fc2241eb
RS
4794 else
4795 {
4796 if (address.base)
4797 fputs (M68K_REGNAME (REGNO (address.base)), file);
4798 fprintf (file, "@(");
4799 if (address.offset)
75df395f 4800 output_addr_const (file, address.offset);
fc2241eb
RS
4801 }
4802 /* Print the ",index" component, if any. */
4803 if (address.index)
4804 {
4805 fprintf (file, ",%s:%c",
4806 M68K_REGNAME (REGNO (address.index)),
4807 GET_MODE (address.index) == HImode ? 'w' : 'l');
4808 if (address.scale != 1)
4809 fprintf (file, ":%d", address.scale);
4810 }
a0a7fbc9
AS
4811 putc (')', file);
4812 }
a0a7fbc9 4813 }
79e68feb
RS
4814 }
4815}
af13f02d
JW
4816\f
4817/* Check for cases where a clr insns can be omitted from code using
4818 strict_low_part sets. For example, the second clrl here is not needed:
4819 clrl d0; movw a0@+,d0; use d0; clrl d0; movw a0@+; use d0; ...
4820
4821 MODE is the mode of this STRICT_LOW_PART set. FIRST_INSN is the clear
4822 insn we are checking for redundancy. TARGET is the register set by the
4823 clear insn. */
4824
8a4a2253 4825bool
ef4bddc2 4826strict_low_part_peephole_ok (machine_mode mode, rtx_insn *first_insn,
8a4a2253 4827 rtx target)
af13f02d 4828{
c85e862a 4829 rtx_insn *p = first_insn;
af13f02d 4830
39250081 4831 while ((p = PREV_INSN (p)))
af13f02d 4832 {
39250081
RZ
4833 if (NOTE_INSN_BASIC_BLOCK_P (p))
4834 return false;
4835
4836 if (NOTE_P (p))
4837 continue;
4838
af13f02d 4839 /* If it isn't an insn, then give up. */
39250081 4840 if (!INSN_P (p))
8a4a2253 4841 return false;
af13f02d
JW
4842
4843 if (reg_set_p (target, p))
4844 {
4845 rtx set = single_set (p);
4846 rtx dest;
4847
4848 /* If it isn't an easy to recognize insn, then give up. */
4849 if (! set)
8a4a2253 4850 return false;
af13f02d
JW
4851
4852 dest = SET_DEST (set);
4853
4854 /* If this sets the entire target register to zero, then our
4855 first_insn is redundant. */
4856 if (rtx_equal_p (dest, target)
4857 && SET_SRC (set) == const0_rtx)
8a4a2253 4858 return true;
af13f02d
JW
4859 else if (GET_CODE (dest) == STRICT_LOW_PART
4860 && GET_CODE (XEXP (dest, 0)) == REG
4861 && REGNO (XEXP (dest, 0)) == REGNO (target)
4862 && (GET_MODE_SIZE (GET_MODE (XEXP (dest, 0)))
4863 <= GET_MODE_SIZE (mode)))
4864 /* This is a strict low part set which modifies less than
4865 we are using, so it is safe. */
4866 ;
4867 else
8a4a2253 4868 return false;
af13f02d 4869 }
af13f02d
JW
4870 }
4871
8a4a2253 4872 return false;
af13f02d 4873}
67cd4f83 4874
2c8ec431
DL
4875/* Operand predicates for implementing asymmetric pc-relative addressing
4876 on m68k. The m68k supports pc-relative addressing (mode 7, register 2)
dab66575 4877 when used as a source operand, but not as a destination operand.
2c8ec431
DL
4878
4879 We model this by restricting the meaning of the basic predicates
4880 (general_operand, memory_operand, etc) to forbid the use of this
4881 addressing mode, and then define the following predicates that permit
4882 this addressing mode. These predicates can then be used for the
4883 source operands of the appropriate instructions.
4884
4885 n.b. While it is theoretically possible to change all machine patterns
4886 to use this addressing more where permitted by the architecture,
4887 it has only been implemented for "common" cases: SImode, HImode, and
4888 QImode operands, and only for the principle operations that would
4889 require this addressing mode: data movement and simple integer operations.
4890
4891 In parallel with these new predicates, two new constraint letters
4892 were defined: 'S' and 'T'. 'S' is the -mpcrel analog of 'm'.
4893 'T' replaces 's' in the non-pcrel case. It is a no-op in the pcrel case.
4894 In the pcrel case 's' is only valid in combination with 'a' registers.
4895 See addsi3, subsi3, cmpsi, and movsi patterns for a better understanding
4896 of how these constraints are used.
4897
4898 The use of these predicates is strictly optional, though patterns that
4899 don't will cause an extra reload register to be allocated where one
4900 was not necessary:
4901
4902 lea (abc:w,%pc),%a0 ; need to reload address
4903 moveq &1,%d1 ; since write to pc-relative space
4904 movel %d1,%a0@ ; is not allowed
4905 ...
4906 lea (abc:w,%pc),%a1 ; no need to reload address here
4907 movel %a1@,%d0 ; since "movel (abc:w,%pc),%d0" is ok
4908
4909 For more info, consult tiemann@cygnus.com.
4910
4911
4912 All of the ugliness with predicates and constraints is due to the
4913 simple fact that the m68k does not allow a pc-relative addressing
4914 mode as a destination. gcc does not distinguish between source and
4915 destination addresses. Hence, if we claim that pc-relative address
331d9186 4916 modes are valid, e.g. TARGET_LEGITIMATE_ADDRESS_P accepts them, then we
2c8ec431
DL
4917 end up with invalid code. To get around this problem, we left
4918 pc-relative modes as invalid addresses, and then added special
4919 predicates and constraints to accept them.
4920
4921 A cleaner way to handle this is to modify gcc to distinguish
4922 between source and destination addresses. We can then say that
4923 pc-relative is a valid source address but not a valid destination
4924 address, and hopefully avoid a lot of the predicate and constraint
4925 hackery. Unfortunately, this would be a pretty big change. It would
4926 be a useful change for a number of ports, but there aren't any current
4927 plans to undertake this.
4928
4929 ***************************************************************************/
4930
4931
5505f548 4932const char *
8a4a2253 4933output_andsi3 (rtx *operands)
29ae8a3c
RK
4934{
4935 int logval;
4936 if (GET_CODE (operands[2]) == CONST_INT
25c99d8f 4937 && (INTVAL (operands[2]) | 0xffff) == -1
29ae8a3c
RK
4938 && (DATA_REG_P (operands[0])
4939 || offsettable_memref_p (operands[0]))
9425fb04 4940 && !TARGET_COLDFIRE)
29ae8a3c
RK
4941 {
4942 if (GET_CODE (operands[0]) != REG)
b72f00af 4943 operands[0] = adjust_address (operands[0], HImode, 2);
1d8eaa6b 4944 operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffff);
29ae8a3c
RK
4945 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4946 CC_STATUS_INIT;
4947 if (operands[2] == const0_rtx)
4948 return "clr%.w %0";
4949 return "and%.w %2,%0";
4950 }
4951 if (GET_CODE (operands[2]) == CONST_INT
c4406f74 4952 && (logval = exact_log2 (~ INTVAL (operands[2]) & 0xffffffff)) >= 0
29ae8a3c
RK
4953 && (DATA_REG_P (operands[0])
4954 || offsettable_memref_p (operands[0])))
4955 {
4956 if (DATA_REG_P (operands[0]))
a0a7fbc9 4957 operands[1] = GEN_INT (logval);
29ae8a3c
RK
4958 else
4959 {
b72f00af 4960 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
1d8eaa6b 4961 operands[1] = GEN_INT (logval % 8);
29ae8a3c
RK
4962 }
4963 /* This does not set condition codes in a standard way. */
4964 CC_STATUS_INIT;
4965 return "bclr %1,%0";
4966 }
4967 return "and%.l %2,%0";
4968}
4969
5505f548 4970const char *
8a4a2253 4971output_iorsi3 (rtx *operands)
29ae8a3c
RK
4972{
4973 register int logval;
4974 if (GET_CODE (operands[2]) == CONST_INT
4975 && INTVAL (operands[2]) >> 16 == 0
4976 && (DATA_REG_P (operands[0])
4977 || offsettable_memref_p (operands[0]))
9425fb04 4978 && !TARGET_COLDFIRE)
29ae8a3c
RK
4979 {
4980 if (GET_CODE (operands[0]) != REG)
b72f00af 4981 operands[0] = adjust_address (operands[0], HImode, 2);
29ae8a3c
RK
4982 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4983 CC_STATUS_INIT;
4984 if (INTVAL (operands[2]) == 0xffff)
4985 return "mov%.w %2,%0";
4986 return "or%.w %2,%0";
4987 }
4988 if (GET_CODE (operands[2]) == CONST_INT
c4406f74 4989 && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
29ae8a3c
RK
4990 && (DATA_REG_P (operands[0])
4991 || offsettable_memref_p (operands[0])))
4992 {
4993 if (DATA_REG_P (operands[0]))
b72f00af 4994 operands[1] = GEN_INT (logval);
29ae8a3c
RK
4995 else
4996 {
b72f00af 4997 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
1d8eaa6b 4998 operands[1] = GEN_INT (logval % 8);
29ae8a3c
RK
4999 }
5000 CC_STATUS_INIT;
5001 return "bset %1,%0";
5002 }
5003 return "or%.l %2,%0";
5004}
5005
5505f548 5006const char *
8a4a2253 5007output_xorsi3 (rtx *operands)
29ae8a3c
RK
5008{
5009 register int logval;
5010 if (GET_CODE (operands[2]) == CONST_INT
5011 && INTVAL (operands[2]) >> 16 == 0
5012 && (offsettable_memref_p (operands[0]) || DATA_REG_P (operands[0]))
9425fb04 5013 && !TARGET_COLDFIRE)
29ae8a3c
RK
5014 {
5015 if (! DATA_REG_P (operands[0]))
b72f00af 5016 operands[0] = adjust_address (operands[0], HImode, 2);
29ae8a3c
RK
5017 /* Do not delete a following tstl %0 insn; that would be incorrect. */
5018 CC_STATUS_INIT;
5019 if (INTVAL (operands[2]) == 0xffff)
5020 return "not%.w %0";
5021 return "eor%.w %2,%0";
5022 }
5023 if (GET_CODE (operands[2]) == CONST_INT
c4406f74 5024 && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
29ae8a3c
RK
5025 && (DATA_REG_P (operands[0])
5026 || offsettable_memref_p (operands[0])))
5027 {
5028 if (DATA_REG_P (operands[0]))
b72f00af 5029 operands[1] = GEN_INT (logval);
29ae8a3c
RK
5030 else
5031 {
b72f00af 5032 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
1d8eaa6b 5033 operands[1] = GEN_INT (logval % 8);
29ae8a3c
RK
5034 }
5035 CC_STATUS_INIT;
5036 return "bchg %1,%0";
5037 }
5038 return "eor%.l %2,%0";
5039}
7c262518 5040
29ca003a
RS
5041/* Return the instruction that should be used for a call to address X,
5042 which is known to be in operand 0. */
5043
5044const char *
5045output_call (rtx x)
5046{
5047 if (symbolic_operand (x, VOIDmode))
5048 return m68k_symbolic_call;
5049 else
5050 return "jsr %a0";
5051}
5052
f7e70894
RS
5053/* Likewise sibling calls. */
5054
5055const char *
5056output_sibcall (rtx x)
5057{
5058 if (symbolic_operand (x, VOIDmode))
5059 return m68k_symbolic_jump;
5060 else
5061 return "jmp %a0";
5062}
5063
c590b625 5064static void
8a4a2253 5065m68k_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
4ab870f5 5066 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8a4a2253 5067 tree function)
483ab821 5068{
c85e862a
DM
5069 rtx this_slot, offset, addr, mem, tmp;
5070 rtx_insn *insn;
e0601576
RH
5071
5072 /* Avoid clobbering the struct value reg by using the
5073 static chain reg as a temporary. */
5074 tmp = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
4ab870f5
RS
5075
5076 /* Pretend to be a post-reload pass while generating rtl. */
4ab870f5 5077 reload_completed = 1;
4ab870f5
RS
5078
5079 /* The "this" pointer is stored at 4(%sp). */
0a81f074
RS
5080 this_slot = gen_rtx_MEM (Pmode, plus_constant (Pmode,
5081 stack_pointer_rtx, 4));
4ab870f5
RS
5082
5083 /* Add DELTA to THIS. */
5084 if (delta != 0)
5050d266 5085 {
4ab870f5
RS
5086 /* Make the offset a legitimate operand for memory addition. */
5087 offset = GEN_INT (delta);
5088 if ((delta < -8 || delta > 8)
5089 && (TARGET_COLDFIRE || USE_MOVQ (delta)))
5090 {
5091 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), offset);
5092 offset = gen_rtx_REG (Pmode, D0_REG);
5093 }
5094 emit_insn (gen_add3_insn (copy_rtx (this_slot),
5095 copy_rtx (this_slot), offset));
5050d266 5096 }
c590b625 5097
4ab870f5
RS
5098 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
5099 if (vcall_offset != 0)
5100 {
5101 /* Set the static chain register to *THIS. */
e0601576
RH
5102 emit_move_insn (tmp, this_slot);
5103 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
4ab870f5
RS
5104
5105 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
0a81f074 5106 addr = plus_constant (Pmode, tmp, vcall_offset);
4ab870f5
RS
5107 if (!m68k_legitimate_address_p (Pmode, addr, true))
5108 {
f7df4a84 5109 emit_insn (gen_rtx_SET (tmp, addr));
e0601576 5110 addr = tmp;
4ab870f5 5111 }
c590b625 5112
4ab870f5
RS
5113 /* Load the offset into %d0 and add it to THIS. */
5114 emit_move_insn (gen_rtx_REG (Pmode, D0_REG),
5115 gen_rtx_MEM (Pmode, addr));
5116 emit_insn (gen_add3_insn (copy_rtx (this_slot),
5117 copy_rtx (this_slot),
5118 gen_rtx_REG (Pmode, D0_REG)));
5119 }
29ca003a 5120
4ab870f5
RS
5121 /* Jump to the target function. Use a sibcall if direct jumps are
5122 allowed, otherwise load the address into a register first. */
5123 mem = DECL_RTL (function);
5124 if (!sibcall_operand (XEXP (mem, 0), VOIDmode))
5125 {
5126 gcc_assert (flag_pic);
c590b625 5127
4ab870f5
RS
5128 if (!TARGET_SEP_DATA)
5129 {
5130 /* Use the static chain register as a temporary (call-clobbered)
5131 GOT pointer for this function. We can use the static chain
5132 register because it isn't live on entry to the thunk. */
6fb5fa3c 5133 SET_REGNO (pic_offset_table_rtx, STATIC_CHAIN_REGNUM);
4ab870f5
RS
5134 emit_insn (gen_load_got (pic_offset_table_rtx));
5135 }
e0601576
RH
5136 legitimize_pic_address (XEXP (mem, 0), Pmode, tmp);
5137 mem = replace_equiv_address (mem, tmp);
4ab870f5
RS
5138 }
5139 insn = emit_call_insn (gen_sibcall (mem, const0_rtx));
5140 SIBLING_CALL_P (insn) = 1;
5141
5142 /* Run just enough of rest_of_compilation. */
5143 insn = get_insns ();
5144 split_all_insns_noflow ();
5145 final_start_function (insn, file, 1);
5146 final (insn, file, 1);
5147 final_end_function ();
5148
5149 /* Clean up the vars set above. */
5150 reload_completed = 0;
4ab870f5
RS
5151
5152 /* Restore the original PIC register. */
5153 if (flag_pic)
6fb5fa3c 5154 SET_REGNO (pic_offset_table_rtx, PIC_REG);
483ab821 5155}
8636be86
KH
5156
5157/* Worker function for TARGET_STRUCT_VALUE_RTX. */
5158
5159static rtx
5160m68k_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
5161 int incoming ATTRIBUTE_UNUSED)
5162{
5163 return gen_rtx_REG (Pmode, M68K_STRUCT_VALUE_REGNUM);
5164}
cfca21cb
PB
5165
5166/* Return nonzero if register old_reg can be renamed to register new_reg. */
5167int
5168m68k_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
5169 unsigned int new_reg)
5170{
5171
5172 /* Interrupt functions can only use registers that have already been
5173 saved by the prologue, even if they would normally be
5174 call-clobbered. */
5175
a4242737
KH
5176 if ((m68k_get_function_kind (current_function_decl)
5177 == m68k_fk_interrupt_handler)
6fb5fa3c 5178 && !df_regs_ever_live_p (new_reg))
cfca21cb
PB
5179 return 0;
5180
5181 return 1;
5182}
70028b61 5183
c43f4279
RS
5184/* Implement TARGET_HARD_REGNO_NREGS.
5185
5186 On the m68k, ordinary registers hold 32 bits worth;
5187 for the 68881 registers, a single register is always enough for
5188 anything that can be stored in them at all. */
5189
5190static unsigned int
5191m68k_hard_regno_nregs (unsigned int regno, machine_mode mode)
5192{
5193 if (regno >= 16)
5194 return GET_MODE_NUNITS (mode);
5195 return CEIL (GET_MODE_SIZE (mode), UNITS_PER_WORD);
5196}
5197
f939c3e6
RS
5198/* Implement TARGET_HARD_REGNO_MODE_OK. On the 68000, we let the cpu
5199 registers can hold any mode, but restrict the 68881 registers to
5200 floating-point modes. */
ffa2596e 5201
f939c3e6
RS
5202static bool
5203m68k_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
70028b61 5204{
36e04090 5205 if (DATA_REGNO_P (regno))
70028b61 5206 {
a0a7fbc9
AS
5207 /* Data Registers, can hold aggregate if fits in. */
5208 if (regno + GET_MODE_SIZE (mode) / 4 <= 8)
5209 return true;
70028b61 5210 }
36e04090 5211 else if (ADDRESS_REGNO_P (regno))
70028b61 5212 {
a0a7fbc9
AS
5213 if (regno + GET_MODE_SIZE (mode) / 4 <= 16)
5214 return true;
70028b61 5215 }
36e04090 5216 else if (FP_REGNO_P (regno))
70028b61
PB
5217 {
5218 /* FPU registers, hold float or complex float of long double or
a0a7fbc9
AS
5219 smaller. */
5220 if ((GET_MODE_CLASS (mode) == MODE_FLOAT
5221 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
dcc21c4c 5222 && GET_MODE_UNIT_SIZE (mode) <= TARGET_FP_REG_SIZE)
a0a7fbc9 5223 return true;
70028b61
PB
5224 }
5225 return false;
5226}
dcc21c4c 5227
99e1629f
RS
5228/* Implement TARGET_MODES_TIEABLE_P. */
5229
5230static bool
5231m68k_modes_tieable_p (machine_mode mode1, machine_mode mode2)
5232{
5233 return (!TARGET_HARD_FLOAT
5234 || ((GET_MODE_CLASS (mode1) == MODE_FLOAT
5235 || GET_MODE_CLASS (mode1) == MODE_COMPLEX_FLOAT)
5236 == (GET_MODE_CLASS (mode2) == MODE_FLOAT
5237 || GET_MODE_CLASS (mode2) == MODE_COMPLEX_FLOAT)));
5238}
5239
ffa2596e
RS
5240/* Implement SECONDARY_RELOAD_CLASS. */
5241
5242enum reg_class
5243m68k_secondary_reload_class (enum reg_class rclass,
ef4bddc2 5244 machine_mode mode, rtx x)
ffa2596e
RS
5245{
5246 int regno;
5247
5248 regno = true_regnum (x);
5249
5250 /* If one operand of a movqi is an address register, the other
5251 operand must be a general register or constant. Other types
5252 of operand must be reloaded through a data register. */
5253 if (GET_MODE_SIZE (mode) == 1
5254 && reg_classes_intersect_p (rclass, ADDR_REGS)
5255 && !(INT_REGNO_P (regno) || CONSTANT_P (x)))
5256 return DATA_REGS;
5257
5258 /* PC-relative addresses must be loaded into an address register first. */
5259 if (TARGET_PCREL
5260 && !reg_class_subset_p (rclass, ADDR_REGS)
5261 && symbolic_operand (x, VOIDmode))
5262 return ADDR_REGS;
5263
5264 return NO_REGS;
5265}
5266
5267/* Implement PREFERRED_RELOAD_CLASS. */
5268
5269enum reg_class
5270m68k_preferred_reload_class (rtx x, enum reg_class rclass)
5271{
5272 enum reg_class secondary_class;
5273
5274 /* If RCLASS might need a secondary reload, try restricting it to
5275 a class that doesn't. */
5276 secondary_class = m68k_secondary_reload_class (rclass, GET_MODE (x), x);
5277 if (secondary_class != NO_REGS
5278 && reg_class_subset_p (secondary_class, rclass))
5279 return secondary_class;
5280
5281 /* Prefer to use moveq for in-range constants. */
5282 if (GET_CODE (x) == CONST_INT
5283 && reg_class_subset_p (DATA_REGS, rclass)
5284 && IN_RANGE (INTVAL (x), -0x80, 0x7f))
5285 return DATA_REGS;
5286
5287 /* ??? Do we really need this now? */
5288 if (GET_CODE (x) == CONST_DOUBLE
5289 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
5290 {
5291 if (TARGET_HARD_FLOAT && reg_class_subset_p (FP_REGS, rclass))
5292 return FP_REGS;
5293
5294 return NO_REGS;
5295 }
5296
5297 return rclass;
5298}
5299
dcc21c4c
PB
5300/* Return floating point values in a 68881 register. This makes 68881 code
5301 a little bit faster. It also makes -msoft-float code incompatible with
5302 hard-float code, so people have to be careful not to mix the two.
c0220ea4 5303 For ColdFire it was decided the ABI incompatibility is undesirable.
dcc21c4c
PB
5304 If there is need for a hard-float ABI it is probably worth doing it
5305 properly and also passing function arguments in FP registers. */
5306rtx
ef4bddc2 5307m68k_libcall_value (machine_mode mode)
dcc21c4c
PB
5308{
5309 switch (mode) {
4e10a5a7
RS
5310 case E_SFmode:
5311 case E_DFmode:
5312 case E_XFmode:
dcc21c4c 5313 if (TARGET_68881)
8d989403 5314 return gen_rtx_REG (mode, FP0_REG);
dcc21c4c
PB
5315 break;
5316 default:
5317 break;
5318 }
75df395f
MK
5319
5320 return gen_rtx_REG (mode, m68k_libcall_value_in_a0_p ? A0_REG : D0_REG);
dcc21c4c
PB
5321}
5322
db5e2d51
MK
5323/* Location in which function value is returned.
5324 NOTE: Due to differences in ABIs, don't call this function directly,
5325 use FUNCTION_VALUE instead. */
dcc21c4c 5326rtx
586de218 5327m68k_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
dcc21c4c 5328{
ef4bddc2 5329 machine_mode mode;
dcc21c4c
PB
5330
5331 mode = TYPE_MODE (valtype);
5332 switch (mode) {
4e10a5a7
RS
5333 case E_SFmode:
5334 case E_DFmode:
5335 case E_XFmode:
dcc21c4c 5336 if (TARGET_68881)
8d989403 5337 return gen_rtx_REG (mode, FP0_REG);
dcc21c4c
PB
5338 break;
5339 default:
5340 break;
5341 }
5342
576c9028
KH
5343 /* If the function returns a pointer, push that into %a0. */
5344 if (func && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (func))))
5345 /* For compatibility with the large body of existing code which
5346 does not always properly declare external functions returning
5347 pointer types, the m68k/SVR4 convention is to copy the value
5348 returned for pointer functions from a0 to d0 in the function
5349 epilogue, so that callers that have neglected to properly
5350 declare the callee can still find the correct return value in
5351 d0. */
5352 return gen_rtx_PARALLEL
5353 (mode,
5354 gen_rtvec (2,
5355 gen_rtx_EXPR_LIST (VOIDmode,
5356 gen_rtx_REG (mode, A0_REG),
5357 const0_rtx),
5358 gen_rtx_EXPR_LIST (VOIDmode,
5359 gen_rtx_REG (mode, D0_REG),
5360 const0_rtx)));
5361 else if (POINTER_TYPE_P (valtype))
5362 return gen_rtx_REG (mode, A0_REG);
dcc21c4c 5363 else
576c9028 5364 return gen_rtx_REG (mode, D0_REG);
dcc21c4c 5365}
1c445f03
NS
5366
5367/* Worker function for TARGET_RETURN_IN_MEMORY. */
5368#if M68K_HONOR_TARGET_STRICT_ALIGNMENT
5369static bool
511e41e5 5370m68k_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
1c445f03 5371{
ef4bddc2 5372 machine_mode mode = TYPE_MODE (type);
1c445f03
NS
5373
5374 if (mode == BLKmode)
5375 return true;
5376
5377 /* If TYPE's known alignment is less than the alignment of MODE that
5378 would contain the structure, then return in memory. We need to
5379 do so to maintain the compatibility between code compiled with
5380 -mstrict-align and that compiled with -mno-strict-align. */
5381 if (AGGREGATE_TYPE_P (type)
5382 && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (mode))
5383 return true;
5384
5385 return false;
5386}
5387#endif
c47b0cb4
MK
5388
5389/* CPU to schedule the program for. */
5390enum attr_cpu m68k_sched_cpu;
5391
826fadba
MK
5392/* MAC to schedule the program for. */
5393enum attr_mac m68k_sched_mac;
5394
c47b0cb4
MK
5395/* Operand type. */
5396enum attr_op_type
5397 {
5398 /* No operand. */
5399 OP_TYPE_NONE,
5400
96fcacb7
MK
5401 /* Integer register. */
5402 OP_TYPE_RN,
5403
5404 /* FP register. */
5405 OP_TYPE_FPN,
c47b0cb4
MK
5406
5407 /* Implicit mem reference (e.g. stack). */
5408 OP_TYPE_MEM1,
5409
5410 /* Memory without offset or indexing. EA modes 2, 3 and 4. */
5411 OP_TYPE_MEM234,
5412
5413 /* Memory with offset but without indexing. EA mode 5. */
5414 OP_TYPE_MEM5,
5415
5416 /* Memory with indexing. EA mode 6. */
5417 OP_TYPE_MEM6,
5418
5419 /* Memory referenced by absolute address. EA mode 7. */
5420 OP_TYPE_MEM7,
5421
5422 /* Immediate operand that doesn't require extension word. */
5423 OP_TYPE_IMM_Q,
5424
5425 /* Immediate 16 bit operand. */
5426 OP_TYPE_IMM_W,
5427
5428 /* Immediate 32 bit operand. */
5429 OP_TYPE_IMM_L
5430 };
5431
c47b0cb4
MK
5432/* Return type of memory ADDR_RTX refers to. */
5433static enum attr_op_type
ef4bddc2 5434sched_address_type (machine_mode mode, rtx addr_rtx)
c47b0cb4
MK
5435{
5436 struct m68k_address address;
5437
96fcacb7
MK
5438 if (symbolic_operand (addr_rtx, VOIDmode))
5439 return OP_TYPE_MEM7;
5440
c47b0cb4
MK
5441 if (!m68k_decompose_address (mode, addr_rtx,
5442 reload_completed, &address))
5443 {
96fcacb7 5444 gcc_assert (!reload_completed);
c47b0cb4
MK
5445 /* Reload will likely fix the address to be in the register. */
5446 return OP_TYPE_MEM234;
5447 }
5448
5449 if (address.scale != 0)
5450 return OP_TYPE_MEM6;
5451
5452 if (address.base != NULL_RTX)
5453 {
5454 if (address.offset == NULL_RTX)
5455 return OP_TYPE_MEM234;
5456
5457 return OP_TYPE_MEM5;
5458 }
5459
5460 gcc_assert (address.offset != NULL_RTX);
5461
5462 return OP_TYPE_MEM7;
5463}
5464
96fcacb7
MK
5465/* Return X or Y (depending on OPX_P) operand of INSN. */
5466static rtx
647d790d 5467sched_get_operand (rtx_insn *insn, bool opx_p)
96fcacb7
MK
5468{
5469 int i;
5470
5471 if (recog_memoized (insn) < 0)
5472 gcc_unreachable ();
5473
5474 extract_constrain_insn_cached (insn);
5475
5476 if (opx_p)
5477 i = get_attr_opx (insn);
5478 else
5479 i = get_attr_opy (insn);
5480
5481 if (i >= recog_data.n_operands)
5482 return NULL;
5483
5484 return recog_data.operand[i];
5485}
5486
5487/* Return type of INSN's operand X (if OPX_P) or operand Y (if !OPX_P).
5488 If ADDRESS_P is true, return type of memory location operand refers to. */
c47b0cb4 5489static enum attr_op_type
647d790d 5490sched_attr_op_type (rtx_insn *insn, bool opx_p, bool address_p)
c47b0cb4 5491{
96fcacb7
MK
5492 rtx op;
5493
5494 op = sched_get_operand (insn, opx_p);
5495
5496 if (op == NULL)
5497 {
5498 gcc_assert (!reload_completed);
5499 return OP_TYPE_RN;
5500 }
c47b0cb4
MK
5501
5502 if (address_p)
5503 return sched_address_type (QImode, op);
5504
5505 if (memory_operand (op, VOIDmode))
5506 return sched_address_type (GET_MODE (op), XEXP (op, 0));
5507
5508 if (register_operand (op, VOIDmode))
96fcacb7
MK
5509 {
5510 if ((!reload_completed && FLOAT_MODE_P (GET_MODE (op)))
5511 || (reload_completed && FP_REG_P (op)))
5512 return OP_TYPE_FPN;
5513
5514 return OP_TYPE_RN;
5515 }
c47b0cb4
MK
5516
5517 if (GET_CODE (op) == CONST_INT)
5518 {
96fcacb7
MK
5519 int ival;
5520
5521 ival = INTVAL (op);
5522
5523 /* Check for quick constants. */
5524 switch (get_attr_type (insn))
5525 {
5526 case TYPE_ALUQ_L:
5527 if (IN_RANGE (ival, 1, 8) || IN_RANGE (ival, -8, -1))
5528 return OP_TYPE_IMM_Q;
5529
5530 gcc_assert (!reload_completed);
5531 break;
5532
5533 case TYPE_MOVEQ_L:
5534 if (USE_MOVQ (ival))
5535 return OP_TYPE_IMM_Q;
5536
5537 gcc_assert (!reload_completed);
5538 break;
5539
5540 case TYPE_MOV3Q_L:
5541 if (valid_mov3q_const (ival))
5542 return OP_TYPE_IMM_Q;
5543
5544 gcc_assert (!reload_completed);
5545 break;
5546
5547 default:
5548 break;
5549 }
5550
5551 if (IN_RANGE (ival, -0x8000, 0x7fff))
c47b0cb4
MK
5552 return OP_TYPE_IMM_W;
5553
5554 return OP_TYPE_IMM_L;
5555 }
5556
5557 if (GET_CODE (op) == CONST_DOUBLE)
5558 {
5559 switch (GET_MODE (op))
5560 {
4e10a5a7 5561 case E_SFmode:
c47b0cb4
MK
5562 return OP_TYPE_IMM_W;
5563
4e10a5a7
RS
5564 case E_VOIDmode:
5565 case E_DFmode:
c47b0cb4
MK
5566 return OP_TYPE_IMM_L;
5567
5568 default:
5569 gcc_unreachable ();
5570 }
5571 }
5572
00b2ef14
MK
5573 if (GET_CODE (op) == CONST
5574 || symbolic_operand (op, VOIDmode)
c47b0cb4
MK
5575 || LABEL_P (op))
5576 {
5577 switch (GET_MODE (op))
5578 {
4e10a5a7 5579 case E_QImode:
c47b0cb4
MK
5580 return OP_TYPE_IMM_Q;
5581
4e10a5a7 5582 case E_HImode:
c47b0cb4
MK
5583 return OP_TYPE_IMM_W;
5584
4e10a5a7 5585 case E_SImode:
c47b0cb4
MK
5586 return OP_TYPE_IMM_L;
5587
5588 default:
75df395f
MK
5589 if (symbolic_operand (m68k_unwrap_symbol (op, false), VOIDmode))
5590 /* Just a guess. */
c47b0cb4
MK
5591 return OP_TYPE_IMM_W;
5592
5593 return OP_TYPE_IMM_L;
5594 }
5595 }
5596
96fcacb7 5597 gcc_assert (!reload_completed);
c47b0cb4 5598
96fcacb7
MK
5599 if (FLOAT_MODE_P (GET_MODE (op)))
5600 return OP_TYPE_FPN;
c47b0cb4 5601
96fcacb7 5602 return OP_TYPE_RN;
c47b0cb4
MK
5603}
5604
5605/* Implement opx_type attribute.
5606 Return type of INSN's operand X.
5607 If ADDRESS_P is true, return type of memory location operand refers to. */
5608enum attr_opx_type
647d790d 5609m68k_sched_attr_opx_type (rtx_insn *insn, int address_p)
c47b0cb4 5610{
c47b0cb4
MK
5611 switch (sched_attr_op_type (insn, true, address_p != 0))
5612 {
96fcacb7
MK
5613 case OP_TYPE_RN:
5614 return OPX_TYPE_RN;
5615
5616 case OP_TYPE_FPN:
5617 return OPX_TYPE_FPN;
c47b0cb4
MK
5618
5619 case OP_TYPE_MEM1:
5620 return OPX_TYPE_MEM1;
5621
5622 case OP_TYPE_MEM234:
5623 return OPX_TYPE_MEM234;
5624
5625 case OP_TYPE_MEM5:
5626 return OPX_TYPE_MEM5;
5627
5628 case OP_TYPE_MEM6:
5629 return OPX_TYPE_MEM6;
5630
5631 case OP_TYPE_MEM7:
5632 return OPX_TYPE_MEM7;
5633
5634 case OP_TYPE_IMM_Q:
5635 return OPX_TYPE_IMM_Q;
5636
5637 case OP_TYPE_IMM_W:
5638 return OPX_TYPE_IMM_W;
5639
5640 case OP_TYPE_IMM_L:
5641 return OPX_TYPE_IMM_L;
5642
5643 default:
5644 gcc_unreachable ();
c47b0cb4
MK
5645 }
5646}
5647
5648/* Implement opy_type attribute.
5649 Return type of INSN's operand Y.
5650 If ADDRESS_P is true, return type of memory location operand refers to. */
5651enum attr_opy_type
647d790d 5652m68k_sched_attr_opy_type (rtx_insn *insn, int address_p)
c47b0cb4 5653{
c47b0cb4
MK
5654 switch (sched_attr_op_type (insn, false, address_p != 0))
5655 {
96fcacb7
MK
5656 case OP_TYPE_RN:
5657 return OPY_TYPE_RN;
5658
5659 case OP_TYPE_FPN:
5660 return OPY_TYPE_FPN;
c47b0cb4
MK
5661
5662 case OP_TYPE_MEM1:
5663 return OPY_TYPE_MEM1;
5664
5665 case OP_TYPE_MEM234:
5666 return OPY_TYPE_MEM234;
5667
5668 case OP_TYPE_MEM5:
5669 return OPY_TYPE_MEM5;
5670
5671 case OP_TYPE_MEM6:
5672 return OPY_TYPE_MEM6;
5673
5674 case OP_TYPE_MEM7:
5675 return OPY_TYPE_MEM7;
5676
5677 case OP_TYPE_IMM_Q:
5678 return OPY_TYPE_IMM_Q;
5679
5680 case OP_TYPE_IMM_W:
5681 return OPY_TYPE_IMM_W;
5682
5683 case OP_TYPE_IMM_L:
5684 return OPY_TYPE_IMM_L;
5685
5686 default:
5687 gcc_unreachable ();
c47b0cb4
MK
5688 }
5689}
5690
96fcacb7
MK
5691/* Return size of INSN as int. */
5692static int
84034c69 5693sched_get_attr_size_int (rtx_insn *insn)
c47b0cb4
MK
5694{
5695 int size;
5696
96fcacb7 5697 switch (get_attr_type (insn))
c47b0cb4 5698 {
96fcacb7
MK
5699 case TYPE_IGNORE:
5700 /* There should be no references to m68k_sched_attr_size for 'ignore'
5701 instructions. */
5702 gcc_unreachable ();
5703 return 0;
5704
5705 case TYPE_MUL_L:
c47b0cb4
MK
5706 size = 2;
5707 break;
5708
5709 default:
5710 size = 1;
5711 break;
5712 }
5713
5714 switch (get_attr_opx_type (insn))
5715 {
5716 case OPX_TYPE_NONE:
96fcacb7
MK
5717 case OPX_TYPE_RN:
5718 case OPX_TYPE_FPN:
c47b0cb4
MK
5719 case OPX_TYPE_MEM1:
5720 case OPX_TYPE_MEM234:
5721 case OPY_TYPE_IMM_Q:
5722 break;
5723
5724 case OPX_TYPE_MEM5:
5725 case OPX_TYPE_MEM6:
5726 /* Here we assume that most absolute references are short. */
5727 case OPX_TYPE_MEM7:
5728 case OPY_TYPE_IMM_W:
5729 ++size;
5730 break;
5731
5732 case OPY_TYPE_IMM_L:
5733 size += 2;
5734 break;
5735
5736 default:
5737 gcc_unreachable ();
5738 }
5739
5740 switch (get_attr_opy_type (insn))
5741 {
5742 case OPY_TYPE_NONE:
96fcacb7
MK
5743 case OPY_TYPE_RN:
5744 case OPY_TYPE_FPN:
c47b0cb4
MK
5745 case OPY_TYPE_MEM1:
5746 case OPY_TYPE_MEM234:
5747 case OPY_TYPE_IMM_Q:
5748 break;
5749
5750 case OPY_TYPE_MEM5:
5751 case OPY_TYPE_MEM6:
5752 /* Here we assume that most absolute references are short. */
5753 case OPY_TYPE_MEM7:
5754 case OPY_TYPE_IMM_W:
5755 ++size;
5756 break;
5757
5758 case OPY_TYPE_IMM_L:
5759 size += 2;
5760 break;
5761
5762 default:
5763 gcc_unreachable ();
5764 }
5765
5766 if (size > 3)
5767 {
96fcacb7 5768 gcc_assert (!reload_completed);
c47b0cb4
MK
5769
5770 size = 3;
5771 }
5772
5773 return size;
5774}
5775
96fcacb7
MK
5776/* Return size of INSN as attribute enum value. */
5777enum attr_size
84034c69 5778m68k_sched_attr_size (rtx_insn *insn)
96fcacb7
MK
5779{
5780 switch (sched_get_attr_size_int (insn))
5781 {
5782 case 1:
5783 return SIZE_1;
5784
5785 case 2:
5786 return SIZE_2;
5787
5788 case 3:
5789 return SIZE_3;
5790
5791 default:
5792 gcc_unreachable ();
96fcacb7
MK
5793 }
5794}
5795
5796/* Return operand X or Y (depending on OPX_P) of INSN,
5797 if it is a MEM, or NULL overwise. */
5798static enum attr_op_type
84034c69 5799sched_get_opxy_mem_type (rtx_insn *insn, bool opx_p)
96fcacb7
MK
5800{
5801 if (opx_p)
5802 {
5803 switch (get_attr_opx_type (insn))
5804 {
5805 case OPX_TYPE_NONE:
5806 case OPX_TYPE_RN:
5807 case OPX_TYPE_FPN:
5808 case OPX_TYPE_IMM_Q:
5809 case OPX_TYPE_IMM_W:
5810 case OPX_TYPE_IMM_L:
5811 return OP_TYPE_RN;
5812
5813 case OPX_TYPE_MEM1:
5814 case OPX_TYPE_MEM234:
5815 case OPX_TYPE_MEM5:
5816 case OPX_TYPE_MEM7:
5817 return OP_TYPE_MEM1;
5818
5819 case OPX_TYPE_MEM6:
5820 return OP_TYPE_MEM6;
5821
5822 default:
5823 gcc_unreachable ();
96fcacb7
MK
5824 }
5825 }
5826 else
5827 {
5828 switch (get_attr_opy_type (insn))
5829 {
5830 case OPY_TYPE_NONE:
5831 case OPY_TYPE_RN:
5832 case OPY_TYPE_FPN:
5833 case OPY_TYPE_IMM_Q:
5834 case OPY_TYPE_IMM_W:
5835 case OPY_TYPE_IMM_L:
5836 return OP_TYPE_RN;
5837
5838 case OPY_TYPE_MEM1:
5839 case OPY_TYPE_MEM234:
5840 case OPY_TYPE_MEM5:
5841 case OPY_TYPE_MEM7:
5842 return OP_TYPE_MEM1;
5843
5844 case OPY_TYPE_MEM6:
5845 return OP_TYPE_MEM6;
5846
5847 default:
5848 gcc_unreachable ();
96fcacb7
MK
5849 }
5850 }
5851}
5852
c47b0cb4
MK
5853/* Implement op_mem attribute. */
5854enum attr_op_mem
84034c69 5855m68k_sched_attr_op_mem (rtx_insn *insn)
c47b0cb4 5856{
96fcacb7
MK
5857 enum attr_op_type opx;
5858 enum attr_op_type opy;
c47b0cb4 5859
96fcacb7
MK
5860 opx = sched_get_opxy_mem_type (insn, true);
5861 opy = sched_get_opxy_mem_type (insn, false);
c47b0cb4 5862
96fcacb7 5863 if (opy == OP_TYPE_RN && opx == OP_TYPE_RN)
c47b0cb4
MK
5864 return OP_MEM_00;
5865
96fcacb7 5866 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM1)
c47b0cb4
MK
5867 {
5868 switch (get_attr_opx_access (insn))
5869 {
5870 case OPX_ACCESS_R:
5871 return OP_MEM_10;
5872
5873 case OPX_ACCESS_W:
5874 return OP_MEM_01;
5875
5876 case OPX_ACCESS_RW:
5877 return OP_MEM_11;
5878
5879 default:
96fcacb7 5880 gcc_unreachable ();
c47b0cb4
MK
5881 }
5882 }
5883
96fcacb7 5884 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM6)
c47b0cb4
MK
5885 {
5886 switch (get_attr_opx_access (insn))
5887 {
5888 case OPX_ACCESS_R:
5889 return OP_MEM_I0;
5890
5891 case OPX_ACCESS_W:
5892 return OP_MEM_0I;
5893
5894 case OPX_ACCESS_RW:
5895 return OP_MEM_I1;
5896
5897 default:
96fcacb7 5898 gcc_unreachable ();
c47b0cb4
MK
5899 }
5900 }
5901
96fcacb7 5902 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_RN)
c47b0cb4
MK
5903 return OP_MEM_10;
5904
96fcacb7 5905 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM1)
c47b0cb4
MK
5906 {
5907 switch (get_attr_opx_access (insn))
5908 {
5909 case OPX_ACCESS_W:
5910 return OP_MEM_11;
5911
5912 default:
96fcacb7
MK
5913 gcc_assert (!reload_completed);
5914 return OP_MEM_11;
c47b0cb4
MK
5915 }
5916 }
5917
96fcacb7 5918 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM6)
c47b0cb4
MK
5919 {
5920 switch (get_attr_opx_access (insn))
5921 {
5922 case OPX_ACCESS_W:
5923 return OP_MEM_1I;
5924
5925 default:
96fcacb7
MK
5926 gcc_assert (!reload_completed);
5927 return OP_MEM_1I;
c47b0cb4
MK
5928 }
5929 }
5930
96fcacb7 5931 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_RN)
c47b0cb4
MK
5932 return OP_MEM_I0;
5933
96fcacb7 5934 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM1)
c47b0cb4
MK
5935 {
5936 switch (get_attr_opx_access (insn))
5937 {
5938 case OPX_ACCESS_W:
5939 return OP_MEM_I1;
5940
5941 default:
96fcacb7
MK
5942 gcc_assert (!reload_completed);
5943 return OP_MEM_I1;
c47b0cb4
MK
5944 }
5945 }
5946
96fcacb7
MK
5947 gcc_assert (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM6);
5948 gcc_assert (!reload_completed);
5949 return OP_MEM_I1;
c47b0cb4
MK
5950}
5951
96fcacb7
MK
5952/* Data for ColdFire V4 index bypass.
5953 Producer modifies register that is used as index in consumer with
5954 specified scale. */
5955static struct
b8c96320 5956{
96fcacb7
MK
5957 /* Producer instruction. */
5958 rtx pro;
826fadba 5959
96fcacb7
MK
5960 /* Consumer instruction. */
5961 rtx con;
b8c96320 5962
96fcacb7
MK
5963 /* Scale of indexed memory access within consumer.
5964 Or zero if bypass should not be effective at the moment. */
5965 int scale;
5966} sched_cfv4_bypass_data;
b8c96320
MK
5967
5968/* An empty state that is used in m68k_sched_adjust_cost. */
5969static state_t sched_adjust_cost_state;
5970
5971/* Implement adjust_cost scheduler hook.
5972 Return adjusted COST of dependency LINK between DEF_INSN and INSN. */
5973static int
b505225b
TS
5974m68k_sched_adjust_cost (rtx_insn *insn, int, rtx_insn *def_insn, int cost,
5975 unsigned int)
b8c96320
MK
5976{
5977 int delay;
5978
5979 if (recog_memoized (def_insn) < 0
5980 || recog_memoized (insn) < 0)
5981 return cost;
5982
96fcacb7
MK
5983 if (sched_cfv4_bypass_data.scale == 1)
5984 /* Handle ColdFire V4 bypass for indexed address with 1x scale. */
5985 {
5986 /* haifa-sched.c: insn_cost () calls bypass_p () just before
5987 targetm.sched.adjust_cost (). Hence, we can be relatively sure
5988 that the data in sched_cfv4_bypass_data is up to date. */
5989 gcc_assert (sched_cfv4_bypass_data.pro == def_insn
5990 && sched_cfv4_bypass_data.con == insn);
5991
5992 if (cost < 3)
5993 cost = 3;
5994
5995 sched_cfv4_bypass_data.pro = NULL;
5996 sched_cfv4_bypass_data.con = NULL;
5997 sched_cfv4_bypass_data.scale = 0;
5998 }
5999 else
6000 gcc_assert (sched_cfv4_bypass_data.pro == NULL
6001 && sched_cfv4_bypass_data.con == NULL
6002 && sched_cfv4_bypass_data.scale == 0);
6003
b8c96320
MK
6004 /* Don't try to issue INSN earlier than DFA permits.
6005 This is especially useful for instructions that write to memory,
6006 as their true dependence (default) latency is better to be set to 0
6007 to workaround alias analysis limitations.
6008 This is, in fact, a machine independent tweak, so, probably,
6009 it should be moved to haifa-sched.c: insn_cost (). */
b8c96320
MK
6010 delay = min_insn_conflict_delay (sched_adjust_cost_state, def_insn, insn);
6011 if (delay > cost)
6012 cost = delay;
6013
6014 return cost;
6015}
6016
96fcacb7
MK
6017/* Return maximal number of insns that can be scheduled on a single cycle. */
6018static int
6019m68k_sched_issue_rate (void)
6020{
6021 switch (m68k_sched_cpu)
6022 {
6023 case CPU_CFV1:
6024 case CPU_CFV2:
6025 case CPU_CFV3:
6026 return 1;
6027
6028 case CPU_CFV4:
6029 return 2;
6030
6031 default:
6032 gcc_unreachable ();
6033 return 0;
6034 }
6035}
6036
826fadba
MK
6037/* Maximal length of instruction for current CPU.
6038 E.g. it is 3 for any ColdFire core. */
6039static int max_insn_size;
6040
6041/* Data to model instruction buffer of CPU. */
6042struct _sched_ib
6043{
96fcacb7
MK
6044 /* True if instruction buffer model is modeled for current CPU. */
6045 bool enabled_p;
6046
826fadba
MK
6047 /* Size of the instruction buffer in words. */
6048 int size;
6049
6050 /* Number of filled words in the instruction buffer. */
6051 int filled;
6052
6053 /* Additional information about instruction buffer for CPUs that have
6054 a buffer of instruction records, rather then a plain buffer
6055 of instruction words. */
6056 struct _sched_ib_records
6057 {
6058 /* Size of buffer in records. */
6059 int n_insns;
b8c96320 6060
5764ee3c 6061 /* Array to hold data on adjustments made to the size of the buffer. */
826fadba 6062 int *adjust;
b8c96320 6063
826fadba
MK
6064 /* Index of the above array. */
6065 int adjust_index;
6066 } records;
6067
6068 /* An insn that reserves (marks empty) one word in the instruction buffer. */
6069 rtx insn;
6070};
6071
6072static struct _sched_ib sched_ib;
b8c96320
MK
6073
6074/* ID of memory unit. */
6075static int sched_mem_unit_code;
6076
6077/* Implementation of the targetm.sched.variable_issue () hook.
6078 It is called after INSN was issued. It returns the number of insns
6079 that can possibly get scheduled on the current cycle.
6080 It is used here to determine the effect of INSN on the instruction
6081 buffer. */
6082static int
6083m68k_sched_variable_issue (FILE *sched_dump ATTRIBUTE_UNUSED,
6084 int sched_verbose ATTRIBUTE_UNUSED,
ac44248e 6085 rtx_insn *insn, int can_issue_more)
b8c96320
MK
6086{
6087 int insn_size;
6088
96fcacb7 6089 if (recog_memoized (insn) >= 0 && get_attr_type (insn) != TYPE_IGNORE)
b8c96320 6090 {
826fadba
MK
6091 switch (m68k_sched_cpu)
6092 {
6093 case CPU_CFV1:
6094 case CPU_CFV2:
96fcacb7 6095 insn_size = sched_get_attr_size_int (insn);
826fadba
MK
6096 break;
6097
6098 case CPU_CFV3:
96fcacb7 6099 insn_size = sched_get_attr_size_int (insn);
826fadba
MK
6100
6101 /* ColdFire V3 and V4 cores have instruction buffers that can
6102 accumulate up to 8 instructions regardless of instructions'
6103 sizes. So we should take care not to "prefetch" 24 one-word
6104 or 12 two-words instructions.
6105 To model this behavior we temporarily decrease size of the
6106 buffer by (max_insn_size - insn_size) for next 7 instructions. */
6107 {
6108 int adjust;
6109
6110 adjust = max_insn_size - insn_size;
6111 sched_ib.size -= adjust;
6112
6113 if (sched_ib.filled > sched_ib.size)
6114 sched_ib.filled = sched_ib.size;
6115
6116 sched_ib.records.adjust[sched_ib.records.adjust_index] = adjust;
6117 }
6118
6119 ++sched_ib.records.adjust_index;
6120 if (sched_ib.records.adjust_index == sched_ib.records.n_insns)
6121 sched_ib.records.adjust_index = 0;
6122
5764ee3c 6123 /* Undo adjustment we did 7 instructions ago. */
826fadba
MK
6124 sched_ib.size
6125 += sched_ib.records.adjust[sched_ib.records.adjust_index];
6126
6127 break;
b8c96320 6128
96fcacb7
MK
6129 case CPU_CFV4:
6130 gcc_assert (!sched_ib.enabled_p);
6131 insn_size = 0;
6132 break;
6133
826fadba
MK
6134 default:
6135 gcc_unreachable ();
6136 }
b8c96320 6137
3162fdf4
MK
6138 if (insn_size > sched_ib.filled)
6139 /* Scheduling for register pressure does not always take DFA into
6140 account. Workaround instruction buffer not being filled enough. */
6141 {
60867e8c 6142 gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED);
3162fdf4
MK
6143 insn_size = sched_ib.filled;
6144 }
6145
b8c96320
MK
6146 --can_issue_more;
6147 }
6148 else if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6149 || asm_noperands (PATTERN (insn)) >= 0)
826fadba 6150 insn_size = sched_ib.filled;
b8c96320
MK
6151 else
6152 insn_size = 0;
6153
826fadba 6154 sched_ib.filled -= insn_size;
b8c96320
MK
6155
6156 return can_issue_more;
6157}
6158
96fcacb7
MK
6159/* Return how many instructions should scheduler lookahead to choose the
6160 best one. */
6161static int
6162m68k_sched_first_cycle_multipass_dfa_lookahead (void)
b8c96320 6163{
96fcacb7 6164 return m68k_sched_issue_rate () - 1;
b8c96320
MK
6165}
6166
7ecb00a6 6167/* Implementation of targetm.sched.init_global () hook.
b8c96320
MK
6168 It is invoked once per scheduling pass and is used here
6169 to initialize scheduler constants. */
6170static void
6171m68k_sched_md_init_global (FILE *sched_dump ATTRIBUTE_UNUSED,
6172 int sched_verbose ATTRIBUTE_UNUSED,
6173 int n_insns ATTRIBUTE_UNUSED)
6174{
96fcacb7
MK
6175 /* Check that all instructions have DFA reservations and
6176 that all instructions can be issued from a clean state. */
e28c2052
MM
6177 if (flag_checking)
6178 {
6179 rtx_insn *insn;
6180 state_t state;
b8c96320 6181
e28c2052 6182 state = alloca (state_size ());
b8c96320 6183
e28c2052
MM
6184 for (insn = get_insns (); insn != NULL; insn = NEXT_INSN (insn))
6185 {
6186 if (INSN_P (insn) && recog_memoized (insn) >= 0)
6187 {
6188 gcc_assert (insn_has_dfa_reservation_p (insn));
b8c96320 6189
e28c2052
MM
6190 state_reset (state);
6191 if (state_transition (state, insn) >= 0)
6192 gcc_unreachable ();
6193 }
6194 }
6195 }
b8c96320
MK
6196
6197 /* Setup target cpu. */
96fcacb7
MK
6198
6199 /* ColdFire V4 has a set of features to keep its instruction buffer full
6200 (e.g., a separate memory bus for instructions) and, hence, we do not model
6201 buffer for this CPU. */
6202 sched_ib.enabled_p = (m68k_sched_cpu != CPU_CFV4);
6203
b8c96320
MK
6204 switch (m68k_sched_cpu)
6205 {
96fcacb7
MK
6206 case CPU_CFV4:
6207 sched_ib.filled = 0;
6208
6209 /* FALLTHRU */
6210
826fadba
MK
6211 case CPU_CFV1:
6212 case CPU_CFV2:
6213 max_insn_size = 3;
6214 sched_ib.records.n_insns = 0;
6215 sched_ib.records.adjust = NULL;
6216 break;
6217
6218 case CPU_CFV3:
6219 max_insn_size = 3;
6220 sched_ib.records.n_insns = 8;
5ead67f6 6221 sched_ib.records.adjust = XNEWVEC (int, sched_ib.records.n_insns);
b8c96320
MK
6222 break;
6223
6224 default:
6225 gcc_unreachable ();
6226 }
6227
826fadba
MK
6228 sched_mem_unit_code = get_cpu_unit_code ("cf_mem1");
6229
b8c96320
MK
6230 sched_adjust_cost_state = xmalloc (state_size ());
6231 state_reset (sched_adjust_cost_state);
6232
6233 start_sequence ();
6234 emit_insn (gen_ib ());
826fadba 6235 sched_ib.insn = get_insns ();
b8c96320
MK
6236 end_sequence ();
6237}
6238
6239/* Scheduling pass is now finished. Free/reset static variables. */
6240static void
6241m68k_sched_md_finish_global (FILE *dump ATTRIBUTE_UNUSED,
6242 int verbose ATTRIBUTE_UNUSED)
6243{
826fadba 6244 sched_ib.insn = NULL;
b8c96320
MK
6245
6246 free (sched_adjust_cost_state);
6247 sched_adjust_cost_state = NULL;
6248
6249 sched_mem_unit_code = 0;
826fadba
MK
6250
6251 free (sched_ib.records.adjust);
6252 sched_ib.records.adjust = NULL;
6253 sched_ib.records.n_insns = 0;
6254 max_insn_size = 0;
b8c96320
MK
6255}
6256
7ecb00a6 6257/* Implementation of targetm.sched.init () hook.
b8c96320
MK
6258 It is invoked each time scheduler starts on the new block (basic block or
6259 extended basic block). */
6260static void
6261m68k_sched_md_init (FILE *sched_dump ATTRIBUTE_UNUSED,
6262 int sched_verbose ATTRIBUTE_UNUSED,
6263 int n_insns ATTRIBUTE_UNUSED)
6264{
826fadba
MK
6265 switch (m68k_sched_cpu)
6266 {
6267 case CPU_CFV1:
6268 case CPU_CFV2:
6269 sched_ib.size = 6;
6270 break;
6271
6272 case CPU_CFV3:
6273 sched_ib.size = sched_ib.records.n_insns * max_insn_size;
6274
6275 memset (sched_ib.records.adjust, 0,
6276 sched_ib.records.n_insns * sizeof (*sched_ib.records.adjust));
6277 sched_ib.records.adjust_index = 0;
6278 break;
6279
96fcacb7
MK
6280 case CPU_CFV4:
6281 gcc_assert (!sched_ib.enabled_p);
6282 sched_ib.size = 0;
6283 break;
6284
826fadba
MK
6285 default:
6286 gcc_unreachable ();
6287 }
6288
96fcacb7
MK
6289 if (sched_ib.enabled_p)
6290 /* haifa-sched.c: schedule_block () calls advance_cycle () just before
6291 the first cycle. Workaround that. */
6292 sched_ib.filled = -2;
b8c96320
MK
6293}
6294
6295/* Implementation of targetm.sched.dfa_pre_advance_cycle () hook.
6296 It is invoked just before current cycle finishes and is used here
6297 to track if instruction buffer got its two words this cycle. */
6298static void
6299m68k_sched_dfa_pre_advance_cycle (void)
6300{
96fcacb7
MK
6301 if (!sched_ib.enabled_p)
6302 return;
6303
b8c96320
MK
6304 if (!cpu_unit_reservation_p (curr_state, sched_mem_unit_code))
6305 {
826fadba 6306 sched_ib.filled += 2;
b8c96320 6307
826fadba
MK
6308 if (sched_ib.filled > sched_ib.size)
6309 sched_ib.filled = sched_ib.size;
b8c96320
MK
6310 }
6311}
6312
6313/* Implementation of targetm.sched.dfa_post_advance_cycle () hook.
6314 It is invoked just after new cycle begins and is used here
6315 to setup number of filled words in the instruction buffer so that
6316 instructions which won't have all their words prefetched would be
6317 stalled for a cycle. */
6318static void
6319m68k_sched_dfa_post_advance_cycle (void)
6320{
6321 int i;
b8c96320 6322
96fcacb7
MK
6323 if (!sched_ib.enabled_p)
6324 return;
6325
b8c96320
MK
6326 /* Setup number of prefetched instruction words in the instruction
6327 buffer. */
826fadba
MK
6328 i = max_insn_size - sched_ib.filled;
6329
6330 while (--i >= 0)
b8c96320 6331 {
826fadba 6332 if (state_transition (curr_state, sched_ib.insn) >= 0)
5f3b7d7c
MK
6333 /* Pick up scheduler state. */
6334 ++sched_ib.filled;
b8c96320
MK
6335 }
6336}
96fcacb7
MK
6337
6338/* Return X or Y (depending on OPX_P) operand of INSN,
6339 if it is an integer register, or NULL overwise. */
6340static rtx
647d790d 6341sched_get_reg_operand (rtx_insn *insn, bool opx_p)
96fcacb7
MK
6342{
6343 rtx op = NULL;
6344
6345 if (opx_p)
6346 {
6347 if (get_attr_opx_type (insn) == OPX_TYPE_RN)
6348 {
6349 op = sched_get_operand (insn, true);
6350 gcc_assert (op != NULL);
6351
6352 if (!reload_completed && !REG_P (op))
6353 return NULL;
6354 }
6355 }
6356 else
6357 {
6358 if (get_attr_opy_type (insn) == OPY_TYPE_RN)
6359 {
6360 op = sched_get_operand (insn, false);
6361 gcc_assert (op != NULL);
6362
6363 if (!reload_completed && !REG_P (op))
6364 return NULL;
6365 }
6366 }
6367
6368 return op;
6369}
6370
6371/* Return true, if X or Y (depending on OPX_P) operand of INSN
6372 is a MEM. */
6373static bool
84034c69 6374sched_mem_operand_p (rtx_insn *insn, bool opx_p)
96fcacb7
MK
6375{
6376 switch (sched_get_opxy_mem_type (insn, opx_p))
6377 {
6378 case OP_TYPE_MEM1:
6379 case OP_TYPE_MEM6:
6380 return true;
6381
6382 default:
6383 return false;
6384 }
6385}
6386
6387/* Return X or Y (depending on OPX_P) operand of INSN,
6388 if it is a MEM, or NULL overwise. */
6389static rtx
647d790d 6390sched_get_mem_operand (rtx_insn *insn, bool must_read_p, bool must_write_p)
96fcacb7
MK
6391{
6392 bool opx_p;
6393 bool opy_p;
6394
6395 opx_p = false;
6396 opy_p = false;
6397
6398 if (must_read_p)
6399 {
6400 opx_p = true;
6401 opy_p = true;
6402 }
6403
6404 if (must_write_p)
6405 {
6406 opx_p = true;
6407 opy_p = false;
6408 }
6409
6410 if (opy_p && sched_mem_operand_p (insn, false))
6411 return sched_get_operand (insn, false);
6412
6413 if (opx_p && sched_mem_operand_p (insn, true))
6414 return sched_get_operand (insn, true);
6415
6416 gcc_unreachable ();
6417 return NULL;
6418}
6419
6420/* Return non-zero if PRO modifies register used as part of
6421 address in CON. */
6422int
647d790d 6423m68k_sched_address_bypass_p (rtx_insn *pro, rtx_insn *con)
96fcacb7
MK
6424{
6425 rtx pro_x;
6426 rtx con_mem_read;
6427
6428 pro_x = sched_get_reg_operand (pro, true);
6429 if (pro_x == NULL)
6430 return 0;
6431
6432 con_mem_read = sched_get_mem_operand (con, true, false);
6433 gcc_assert (con_mem_read != NULL);
6434
6435 if (reg_mentioned_p (pro_x, con_mem_read))
6436 return 1;
6437
6438 return 0;
6439}
6440
6441/* Helper function for m68k_sched_indexed_address_bypass_p.
6442 if PRO modifies register used as index in CON,
6443 return scale of indexed memory access in CON. Return zero overwise. */
6444static int
647d790d 6445sched_get_indexed_address_scale (rtx_insn *pro, rtx_insn *con)
96fcacb7
MK
6446{
6447 rtx reg;
6448 rtx mem;
6449 struct m68k_address address;
6450
6451 reg = sched_get_reg_operand (pro, true);
6452 if (reg == NULL)
6453 return 0;
6454
6455 mem = sched_get_mem_operand (con, true, false);
6456 gcc_assert (mem != NULL && MEM_P (mem));
6457
6458 if (!m68k_decompose_address (GET_MODE (mem), XEXP (mem, 0), reload_completed,
6459 &address))
6460 gcc_unreachable ();
6461
6462 if (REGNO (reg) == REGNO (address.index))
6463 {
6464 gcc_assert (address.scale != 0);
6465 return address.scale;
6466 }
6467
6468 return 0;
6469}
6470
6471/* Return non-zero if PRO modifies register used
6472 as index with scale 2 or 4 in CON. */
6473int
647d790d 6474m68k_sched_indexed_address_bypass_p (rtx_insn *pro, rtx_insn *con)
96fcacb7
MK
6475{
6476 gcc_assert (sched_cfv4_bypass_data.pro == NULL
6477 && sched_cfv4_bypass_data.con == NULL
6478 && sched_cfv4_bypass_data.scale == 0);
6479
6480 switch (sched_get_indexed_address_scale (pro, con))
6481 {
6482 case 1:
6483 /* We can't have a variable latency bypass, so
6484 remember to adjust the insn cost in adjust_cost hook. */
6485 sched_cfv4_bypass_data.pro = pro;
6486 sched_cfv4_bypass_data.con = con;
6487 sched_cfv4_bypass_data.scale = 1;
6488 return 0;
6489
6490 case 2:
6491 case 4:
6492 return 1;
6493
6494 default:
6495 return 0;
6496 }
6497}
75df395f 6498
e0601576
RH
6499/* We generate a two-instructions program at M_TRAMP :
6500 movea.l &CHAIN_VALUE,%a0
6501 jmp FNADDR
6502 where %a0 can be modified by changing STATIC_CHAIN_REGNUM. */
6503
6504static void
6505m68k_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
6506{
6507 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
6508 rtx mem;
6509
6510 gcc_assert (ADDRESS_REGNO_P (STATIC_CHAIN_REGNUM));
6511
6512 mem = adjust_address (m_tramp, HImode, 0);
6513 emit_move_insn (mem, GEN_INT(0x207C + ((STATIC_CHAIN_REGNUM-8) << 9)));
6514 mem = adjust_address (m_tramp, SImode, 2);
6515 emit_move_insn (mem, chain_value);
6516
6517 mem = adjust_address (m_tramp, HImode, 6);
6518 emit_move_insn (mem, GEN_INT(0x4EF9));
6519 mem = adjust_address (m_tramp, SImode, 8);
6520 emit_move_insn (mem, fnaddr);
6521
6522 FINALIZE_TRAMPOLINE (XEXP (m_tramp, 0));
6523}
6524
079e7538
NF
6525/* On the 68000, the RTS insn cannot pop anything.
6526 On the 68010, the RTD insn may be used to pop them if the number
6527 of args is fixed, but if the number is variable then the caller
6528 must pop them all. RTD can't be used for library calls now
6529 because the library is compiled with the Unix compiler.
6530 Use of RTD is a selectable option, since it is incompatible with
6531 standard Unix calling sequences. If the option is not selected,
6532 the caller must always pop the args. */
6533
a20c5714
RS
6534static poly_int64
6535m68k_return_pops_args (tree fundecl, tree funtype, poly_int64 size)
079e7538
NF
6536{
6537 return ((TARGET_RTD
6538 && (!fundecl
6539 || TREE_CODE (fundecl) != IDENTIFIER_NODE)
f38958e8 6540 && (!stdarg_p (funtype)))
a20c5714 6541 ? (HOST_WIDE_INT) size : 0);
079e7538
NF
6542}
6543
5efd84c5
NF
6544/* Make sure everything's fine if we *don't* have a given processor.
6545 This assumes that putting a register in fixed_regs will keep the
6546 compiler's mitts completely off it. We don't bother to zero it out
6547 of register classes. */
6548
6549static void
6550m68k_conditional_register_usage (void)
6551{
6552 int i;
6553 HARD_REG_SET x;
6554 if (!TARGET_HARD_FLOAT)
6555 {
6556 COPY_HARD_REG_SET (x, reg_class_contents[(int)FP_REGS]);
6557 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
6558 if (TEST_HARD_REG_BIT (x, i))
6559 fixed_regs[i] = call_used_regs[i] = 1;
6560 }
6561 if (flag_pic)
6562 fixed_regs[PIC_REG] = call_used_regs[PIC_REG] = 1;
6563}
6564
8b281334
RH
6565static void
6566m68k_init_sync_libfuncs (void)
6567{
6568 init_sync_libfuncs (UNITS_PER_WORD);
6569}
6570
175aed00
AS
6571/* Implements EPILOGUE_USES. All registers are live on exit from an
6572 interrupt routine. */
6573bool
6574m68k_epilogue_uses (int regno ATTRIBUTE_UNUSED)
6575{
6576 return (reload_completed
6577 && (m68k_get_function_kind (current_function_decl)
6578 == m68k_fk_interrupt_handler));
6579}
6580
b89de1b8
JG
6581
6582/* Implement TARGET_C_EXCESS_PRECISION.
6583
6584 Set the value of FLT_EVAL_METHOD in float.h. When using 68040 fp
6585 instructions, we get proper intermediate rounding, otherwise we
6586 get extended precision results. */
6587
6588static enum flt_eval_method
6589m68k_excess_precision (enum excess_precision_type type)
6590{
6591 switch (type)
6592 {
6593 case EXCESS_PRECISION_TYPE_FAST:
6594 /* The fastest type to promote to will always be the native type,
6595 whether that occurs with implicit excess precision or
6596 otherwise. */
6597 return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT;
6598 case EXCESS_PRECISION_TYPE_STANDARD:
6599 case EXCESS_PRECISION_TYPE_IMPLICIT:
6600 /* Otherwise, the excess precision we want when we are
6601 in a standards compliant mode, and the implicit precision we
6602 provide can be identical. */
6603 if (TARGET_68040 || ! TARGET_68881)
6604 return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT;
6605
6606 return FLT_EVAL_METHOD_PROMOTE_TO_LONG_DOUBLE;
6607 default:
6608 gcc_unreachable ();
6609 }
6610 return FLT_EVAL_METHOD_UNPREDICTABLE;
6611}
6612
75df395f 6613#include "gt-m68k.h"