]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/m68k/m68k.c
Fix not caught use-after-scope with -O1 (PR sanitize/78106)
[thirdparty/gcc.git] / gcc / config / m68k / m68k.c
CommitLineData
79e68feb 1/* Subroutines for insn-output.c for Motorola 68000 family.
818ab71a 2 Copyright (C) 1987-2016 Free Software Foundation, Inc.
79e68feb 3
7ec022b2 4This file is part of GCC.
79e68feb 5
7ec022b2 6GCC is free software; you can redistribute it and/or modify
79e68feb 7it under the terms of the GNU General Public License as published by
2f83c7d6 8the Free Software Foundation; either version 3, or (at your option)
79e68feb
RS
9any later version.
10
7ec022b2 11GCC is distributed in the hope that it will be useful,
79e68feb
RS
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14GNU General Public License for more details.
15
16You should have received a copy of the GNU General Public License
2f83c7d6
NC
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
79e68feb 19
79e68feb 20#include "config.h"
f5220a5d 21#include "system.h"
4977bab6 22#include "coretypes.h"
c7131fb2 23#include "backend.h"
9fdcd34e 24#include "cfghooks.h"
da932f04 25#include "tree.h"
c7131fb2
AM
26#include "rtl.h"
27#include "df.h"
28#include "alias.h"
40e23961 29#include "fold-const.h"
d8a2d370
DN
30#include "calls.h"
31#include "stor-layout.h"
32#include "varasm.h"
79e68feb 33#include "regs.h"
79e68feb
RS
34#include "insn-config.h"
35#include "conditions.h"
79e68feb
RS
36#include "output.h"
37#include "insn-attr.h"
1d8eaa6b 38#include "recog.h"
718f9c0f 39#include "diagnostic-core.h"
36566b39 40#include "flags.h"
36566b39
PK
41#include "expmed.h"
42#include "dojump.h"
43#include "explow.h"
4d0cdd0c 44#include "memmodel.h"
36566b39
PK
45#include "emit-rtl.h"
46#include "stmt.h"
6d5f49b2
RH
47#include "expr.h"
48#include "reload.h"
5505f548 49#include "tm_p.h"
672a6f42 50#include "target.h"
2cc07db4 51#include "debug.h"
60393bbc
AM
52#include "cfgrtl.h"
53#include "cfganal.h"
54#include "lcm.h"
55#include "cfgbuild.h"
56#include "cfgcleanup.h"
b8c96320
MK
57/* ??? Need to add a dependency between m68k.o and sched-int.h. */
58#include "sched-int.h"
59#include "insn-codes.h"
96e45421 60#include "opts.h"
8b281334 61#include "optabs.h"
9b2b7279 62#include "builtins.h"
82eee4f1 63#include "rtl-iter.h"
79e68feb 64
994c5d85 65/* This file should be included last. */
d58627a0
RS
66#include "target-def.h"
67
a4e9467d
RZ
68enum reg_class regno_reg_class[] =
69{
70 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
71 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
72 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
73 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
74 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
75 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
76 ADDR_REGS
77};
78
79
a40ed0f3
KH
80/* The minimum number of integer registers that we want to save with the
81 movem instruction. Using two movel instructions instead of a single
82 moveml is about 15% faster for the 68020 and 68030 at no expense in
83 code size. */
84#define MIN_MOVEM_REGS 3
85
86/* The minimum number of floating point registers that we want to save
87 with the fmovem instruction. */
88#define MIN_FMOVEM_REGS 1
89
ff482c8d 90/* Structure describing stack frame layout. */
3d74bc09
BI
91struct m68k_frame
92{
93 /* Stack pointer to frame pointer offset. */
48ed72a4 94 HOST_WIDE_INT offset;
3d74bc09
BI
95
96 /* Offset of FPU registers. */
97 HOST_WIDE_INT foffset;
98
99 /* Frame size in bytes (rounded up). */
48ed72a4 100 HOST_WIDE_INT size;
3d74bc09
BI
101
102 /* Data and address register. */
48ed72a4
PB
103 int reg_no;
104 unsigned int reg_mask;
3d74bc09
BI
105
106 /* FPU registers. */
48ed72a4
PB
107 int fpu_no;
108 unsigned int fpu_mask;
3d74bc09
BI
109
110 /* Offsets relative to ARG_POINTER. */
48ed72a4
PB
111 HOST_WIDE_INT frame_pointer_offset;
112 HOST_WIDE_INT stack_pointer_offset;
3d74bc09
BI
113
114 /* Function which the above information refers to. */
115 int funcdef_no;
48ed72a4
PB
116};
117
3d74bc09
BI
118/* Current frame information calculated by m68k_compute_frame_layout(). */
119static struct m68k_frame current_frame;
120
fc2241eb
RS
121/* Structure describing an m68k address.
122
123 If CODE is UNKNOWN, the address is BASE + INDEX * SCALE + OFFSET,
124 with null fields evaluating to 0. Here:
125
126 - BASE satisfies m68k_legitimate_base_reg_p
127 - INDEX satisfies m68k_legitimate_index_reg_p
128 - OFFSET satisfies m68k_legitimate_constant_address_p
129
130 INDEX is either HImode or SImode. The other fields are SImode.
131
132 If CODE is PRE_DEC, the address is -(BASE). If CODE is POST_INC,
133 the address is (BASE)+. */
134struct m68k_address {
135 enum rtx_code code;
136 rtx base;
137 rtx index;
138 rtx offset;
139 int scale;
140};
141
b505225b
TS
142static int m68k_sched_adjust_cost (rtx_insn *, int, rtx_insn *, int,
143 unsigned int);
96fcacb7 144static int m68k_sched_issue_rate (void);
ac44248e 145static int m68k_sched_variable_issue (FILE *, int, rtx_insn *, int);
b8c96320
MK
146static void m68k_sched_md_init_global (FILE *, int, int);
147static void m68k_sched_md_finish_global (FILE *, int);
148static void m68k_sched_md_init (FILE *, int, int);
149static void m68k_sched_dfa_pre_advance_cycle (void);
150static void m68k_sched_dfa_post_advance_cycle (void);
96fcacb7 151static int m68k_sched_first_cycle_multipass_dfa_lookahead (void);
b8c96320 152
7b5cbb57 153static bool m68k_can_eliminate (const int, const int);
5efd84c5 154static void m68k_conditional_register_usage (void);
ef4bddc2 155static bool m68k_legitimate_address_p (machine_mode, rtx, bool);
c5387660 156static void m68k_option_override (void);
03e69b12 157static void m68k_override_options_after_change (void);
8a4a2253
BI
158static rtx find_addr_reg (rtx);
159static const char *singlemove_string (rtx *);
8a4a2253
BI
160static void m68k_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
161 HOST_WIDE_INT, tree);
8636be86 162static rtx m68k_struct_value_rtx (tree, int);
48ed72a4
PB
163static tree m68k_handle_fndecl_attribute (tree *node, tree name,
164 tree args, int flags,
165 bool *no_add_attrs);
3d74bc09 166static void m68k_compute_frame_layout (void);
48ed72a4 167static bool m68k_save_reg (unsigned int regno, bool interrupt_handler);
f7e70894 168static bool m68k_ok_for_sibcall_p (tree, tree);
75df395f 169static bool m68k_tls_symbol_p (rtx);
ef4bddc2 170static rtx m68k_legitimize_address (rtx, rtx, machine_mode);
e548c9df 171static bool m68k_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1c445f03 172#if M68K_HONOR_TARGET_STRICT_ALIGNMENT
511e41e5 173static bool m68k_return_in_memory (const_tree, const_tree);
1c445f03 174#endif
75df395f 175static void m68k_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
e0601576 176static void m68k_trampoline_init (rtx, tree, rtx);
079e7538 177static int m68k_return_pops_args (tree, tree, int);
7b0f476d 178static rtx m68k_delegitimize_address (rtx);
ef4bddc2 179static void m68k_function_arg_advance (cumulative_args_t, machine_mode,
13d3961c 180 const_tree, bool);
ef4bddc2 181static rtx m68k_function_arg (cumulative_args_t, machine_mode,
13d3961c 182 const_tree, bool);
ef4bddc2 183static bool m68k_cannot_force_const_mem (machine_mode mode, rtx x);
cb69db4f 184static bool m68k_output_addr_const_extra (FILE *, rtx);
8b281334 185static void m68k_init_sync_libfuncs (void) ATTRIBUTE_UNUSED;
79e68feb 186\f
672a6f42 187/* Initialize the GCC target structure. */
301d03af
RS
188
189#if INT_OP_GROUP == INT_OP_DOT_WORD
190#undef TARGET_ASM_ALIGNED_HI_OP
191#define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
192#endif
193
194#if INT_OP_GROUP == INT_OP_NO_DOT
195#undef TARGET_ASM_BYTE_OP
196#define TARGET_ASM_BYTE_OP "\tbyte\t"
197#undef TARGET_ASM_ALIGNED_HI_OP
198#define TARGET_ASM_ALIGNED_HI_OP "\tshort\t"
199#undef TARGET_ASM_ALIGNED_SI_OP
200#define TARGET_ASM_ALIGNED_SI_OP "\tlong\t"
201#endif
202
203#if INT_OP_GROUP == INT_OP_DC
204#undef TARGET_ASM_BYTE_OP
205#define TARGET_ASM_BYTE_OP "\tdc.b\t"
206#undef TARGET_ASM_ALIGNED_HI_OP
207#define TARGET_ASM_ALIGNED_HI_OP "\tdc.w\t"
208#undef TARGET_ASM_ALIGNED_SI_OP
209#define TARGET_ASM_ALIGNED_SI_OP "\tdc.l\t"
210#endif
211
212#undef TARGET_ASM_UNALIGNED_HI_OP
213#define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
214#undef TARGET_ASM_UNALIGNED_SI_OP
215#define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
216
c590b625
RH
217#undef TARGET_ASM_OUTPUT_MI_THUNK
218#define TARGET_ASM_OUTPUT_MI_THUNK m68k_output_mi_thunk
bdabc150 219#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
3101faab 220#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
c590b625 221
1bc7c5b6
ZW
222#undef TARGET_ASM_FILE_START_APP_OFF
223#define TARGET_ASM_FILE_START_APP_OFF true
224
506d7b68
PB
225#undef TARGET_LEGITIMIZE_ADDRESS
226#define TARGET_LEGITIMIZE_ADDRESS m68k_legitimize_address
227
b8c96320
MK
228#undef TARGET_SCHED_ADJUST_COST
229#define TARGET_SCHED_ADJUST_COST m68k_sched_adjust_cost
230
96fcacb7
MK
231#undef TARGET_SCHED_ISSUE_RATE
232#define TARGET_SCHED_ISSUE_RATE m68k_sched_issue_rate
233
b8c96320
MK
234#undef TARGET_SCHED_VARIABLE_ISSUE
235#define TARGET_SCHED_VARIABLE_ISSUE m68k_sched_variable_issue
236
237#undef TARGET_SCHED_INIT_GLOBAL
238#define TARGET_SCHED_INIT_GLOBAL m68k_sched_md_init_global
239
240#undef TARGET_SCHED_FINISH_GLOBAL
241#define TARGET_SCHED_FINISH_GLOBAL m68k_sched_md_finish_global
242
243#undef TARGET_SCHED_INIT
244#define TARGET_SCHED_INIT m68k_sched_md_init
245
246#undef TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE
247#define TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE m68k_sched_dfa_pre_advance_cycle
248
249#undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
250#define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE m68k_sched_dfa_post_advance_cycle
251
96fcacb7
MK
252#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
253#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
254 m68k_sched_first_cycle_multipass_dfa_lookahead
255
c5387660
JM
256#undef TARGET_OPTION_OVERRIDE
257#define TARGET_OPTION_OVERRIDE m68k_option_override
258
03e69b12
MP
259#undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
260#define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m68k_override_options_after_change
261
3c50106f
RH
262#undef TARGET_RTX_COSTS
263#define TARGET_RTX_COSTS m68k_rtx_costs
264
48ed72a4
PB
265#undef TARGET_ATTRIBUTE_TABLE
266#define TARGET_ATTRIBUTE_TABLE m68k_attribute_table
267
8636be86 268#undef TARGET_PROMOTE_PROTOTYPES
586de218 269#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
8636be86
KH
270
271#undef TARGET_STRUCT_VALUE_RTX
272#define TARGET_STRUCT_VALUE_RTX m68k_struct_value_rtx
273
7ffb5e78 274#undef TARGET_CANNOT_FORCE_CONST_MEM
fbbf66e7 275#define TARGET_CANNOT_FORCE_CONST_MEM m68k_cannot_force_const_mem
7ffb5e78 276
f7e70894
RS
277#undef TARGET_FUNCTION_OK_FOR_SIBCALL
278#define TARGET_FUNCTION_OK_FOR_SIBCALL m68k_ok_for_sibcall_p
279
1c445f03
NS
280#if M68K_HONOR_TARGET_STRICT_ALIGNMENT
281#undef TARGET_RETURN_IN_MEMORY
282#define TARGET_RETURN_IN_MEMORY m68k_return_in_memory
283#endif
284
75df395f
MK
285#ifdef HAVE_AS_TLS
286#undef TARGET_HAVE_TLS
287#define TARGET_HAVE_TLS (true)
288
289#undef TARGET_ASM_OUTPUT_DWARF_DTPREL
290#define TARGET_ASM_OUTPUT_DWARF_DTPREL m68k_output_dwarf_dtprel
291#endif
292
d81db636
SB
293#undef TARGET_LRA_P
294#define TARGET_LRA_P hook_bool_void_false
295
c6c3dba9
PB
296#undef TARGET_LEGITIMATE_ADDRESS_P
297#define TARGET_LEGITIMATE_ADDRESS_P m68k_legitimate_address_p
298
7b5cbb57
AS
299#undef TARGET_CAN_ELIMINATE
300#define TARGET_CAN_ELIMINATE m68k_can_eliminate
301
5efd84c5
NF
302#undef TARGET_CONDITIONAL_REGISTER_USAGE
303#define TARGET_CONDITIONAL_REGISTER_USAGE m68k_conditional_register_usage
304
e0601576
RH
305#undef TARGET_TRAMPOLINE_INIT
306#define TARGET_TRAMPOLINE_INIT m68k_trampoline_init
307
079e7538
NF
308#undef TARGET_RETURN_POPS_ARGS
309#define TARGET_RETURN_POPS_ARGS m68k_return_pops_args
310
7b0f476d
AS
311#undef TARGET_DELEGITIMIZE_ADDRESS
312#define TARGET_DELEGITIMIZE_ADDRESS m68k_delegitimize_address
313
13d3961c
NF
314#undef TARGET_FUNCTION_ARG
315#define TARGET_FUNCTION_ARG m68k_function_arg
316
317#undef TARGET_FUNCTION_ARG_ADVANCE
318#define TARGET_FUNCTION_ARG_ADVANCE m68k_function_arg_advance
319
1a627b35
RS
320#undef TARGET_LEGITIMATE_CONSTANT_P
321#define TARGET_LEGITIMATE_CONSTANT_P m68k_legitimate_constant_p
322
cb69db4f
AS
323#undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
324#define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA m68k_output_addr_const_extra
325
4c1fd084
RH
326/* The value stored by TAS. */
327#undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
328#define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 128
329
48ed72a4
PB
330static const struct attribute_spec m68k_attribute_table[] =
331{
62d784f7
KT
332 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
333 affects_type_identity } */
334 { "interrupt", 0, 0, true, false, false, m68k_handle_fndecl_attribute,
335 false },
336 { "interrupt_handler", 0, 0, true, false, false,
337 m68k_handle_fndecl_attribute, false },
338 { "interrupt_thread", 0, 0, true, false, false,
339 m68k_handle_fndecl_attribute, false },
340 { NULL, 0, 0, false, false, false, NULL, false }
48ed72a4
PB
341};
342
f6897b10 343struct gcc_target targetm = TARGET_INITIALIZER;
672a6f42 344\f
900ec02d
JB
345/* Base flags for 68k ISAs. */
346#define FL_FOR_isa_00 FL_ISA_68000
347#define FL_FOR_isa_10 (FL_FOR_isa_00 | FL_ISA_68010)
348/* FL_68881 controls the default setting of -m68881. gcc has traditionally
349 generated 68881 code for 68020 and 68030 targets unless explicitly told
350 not to. */
351#define FL_FOR_isa_20 (FL_FOR_isa_10 | FL_ISA_68020 \
6a2b269b 352 | FL_BITFIELD | FL_68881 | FL_CAS)
900ec02d
JB
353#define FL_FOR_isa_40 (FL_FOR_isa_20 | FL_ISA_68040)
354#define FL_FOR_isa_cpu32 (FL_FOR_isa_10 | FL_ISA_68020)
355
356/* Base flags for ColdFire ISAs. */
357#define FL_FOR_isa_a (FL_COLDFIRE | FL_ISA_A)
358#define FL_FOR_isa_aplus (FL_FOR_isa_a | FL_ISA_APLUS | FL_CF_USP)
359/* Note ISA_B doesn't necessarily include USP (user stack pointer) support. */
360#define FL_FOR_isa_b (FL_FOR_isa_a | FL_ISA_B | FL_CF_HWDIV)
4e2b26aa 361/* ISA_C is not upwardly compatible with ISA_B. */
8c5c99dc 362#define FL_FOR_isa_c (FL_FOR_isa_a | FL_ISA_C | FL_CF_USP)
900ec02d
JB
363
364enum m68k_isa
365{
366 /* Traditional 68000 instruction sets. */
367 isa_00,
368 isa_10,
369 isa_20,
370 isa_40,
371 isa_cpu32,
372 /* ColdFire instruction set variants. */
373 isa_a,
374 isa_aplus,
375 isa_b,
376 isa_c,
377 isa_max
378};
379
380/* Information about one of the -march, -mcpu or -mtune arguments. */
381struct m68k_target_selection
382{
383 /* The argument being described. */
384 const char *name;
385
386 /* For -mcpu, this is the device selected by the option.
387 For -mtune and -march, it is a representative device
388 for the microarchitecture or ISA respectively. */
389 enum target_device device;
390
391 /* The M68K_DEVICE fields associated with DEVICE. See the comment
392 in m68k-devices.def for details. FAMILY is only valid for -mcpu. */
393 const char *family;
394 enum uarch_type microarch;
395 enum m68k_isa isa;
396 unsigned long flags;
397};
398
399/* A list of all devices in m68k-devices.def. Used for -mcpu selection. */
400static const struct m68k_target_selection all_devices[] =
401{
402#define M68K_DEVICE(NAME,ENUM_VALUE,FAMILY,MULTILIB,MICROARCH,ISA,FLAGS) \
403 { NAME, ENUM_VALUE, FAMILY, u##MICROARCH, ISA, FLAGS | FL_FOR_##ISA },
404#include "m68k-devices.def"
405#undef M68K_DEVICE
406 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
407};
408
409/* A list of all ISAs, mapping each one to a representative device.
410 Used for -march selection. */
411static const struct m68k_target_selection all_isas[] =
412{
47c94d21
JM
413#define M68K_ISA(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
414 { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
415#include "m68k-isas.def"
416#undef M68K_ISA
900ec02d
JB
417 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
418};
419
420/* A list of all microarchitectures, mapping each one to a representative
421 device. Used for -mtune selection. */
422static const struct m68k_target_selection all_microarchs[] =
423{
47c94d21
JM
424#define M68K_MICROARCH(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
425 { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
426#include "m68k-microarchs.def"
427#undef M68K_MICROARCH
900ec02d
JB
428 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
429};
430\f
431/* The entries associated with the -mcpu, -march and -mtune settings,
432 or null for options that have not been used. */
433const struct m68k_target_selection *m68k_cpu_entry;
434const struct m68k_target_selection *m68k_arch_entry;
435const struct m68k_target_selection *m68k_tune_entry;
436
437/* Which CPU we are generating code for. */
438enum target_device m68k_cpu;
439
440/* Which microarchitecture to tune for. */
441enum uarch_type m68k_tune;
442
443/* Which FPU to use. */
444enum fpu_type m68k_fpu;
4af06170 445
900ec02d
JB
446/* The set of FL_* flags that apply to the target processor. */
447unsigned int m68k_cpu_flags;
29ca003a 448
03b3e271
KH
449/* The set of FL_* flags that apply to the processor to be tuned for. */
450unsigned int m68k_tune_flags;
451
29ca003a
RS
452/* Asm templates for calling or jumping to an arbitrary symbolic address,
453 or NULL if such calls or jumps are not supported. The address is held
454 in operand 0. */
455const char *m68k_symbolic_call;
456const char *m68k_symbolic_jump;
c47b0cb4
MK
457
458/* Enum variable that corresponds to m68k_symbolic_call values. */
459enum M68K_SYMBOLIC_CALL m68k_symbolic_call_var;
460
900ec02d 461\f
c5387660 462/* Implement TARGET_OPTION_OVERRIDE. */
ef1dbfb0 463
c5387660
JM
464static void
465m68k_option_override (void)
ef1dbfb0 466{
900ec02d
JB
467 const struct m68k_target_selection *entry;
468 unsigned long target_mask;
469
47c94d21
JM
470 if (global_options_set.x_m68k_arch_option)
471 m68k_arch_entry = &all_isas[m68k_arch_option];
472
473 if (global_options_set.x_m68k_cpu_option)
474 m68k_cpu_entry = &all_devices[(int) m68k_cpu_option];
475
476 if (global_options_set.x_m68k_tune_option)
477 m68k_tune_entry = &all_microarchs[(int) m68k_tune_option];
478
900ec02d
JB
479 /* User can choose:
480
481 -mcpu=
482 -march=
483 -mtune=
484
485 -march=ARCH should generate code that runs any processor
486 implementing architecture ARCH. -mcpu=CPU should override -march
487 and should generate code that runs on processor CPU, making free
488 use of any instructions that CPU understands. -mtune=UARCH applies
9f5ed61a 489 on top of -mcpu or -march and optimizes the code for UARCH. It does
900ec02d
JB
490 not change the target architecture. */
491 if (m68k_cpu_entry)
492 {
493 /* Complain if the -march setting is for a different microarchitecture,
494 or includes flags that the -mcpu setting doesn't. */
495 if (m68k_arch_entry
496 && (m68k_arch_entry->microarch != m68k_cpu_entry->microarch
497 || (m68k_arch_entry->flags & ~m68k_cpu_entry->flags) != 0))
498 warning (0, "-mcpu=%s conflicts with -march=%s",
499 m68k_cpu_entry->name, m68k_arch_entry->name);
500
501 entry = m68k_cpu_entry;
502 }
503 else
504 entry = m68k_arch_entry;
505
506 if (!entry)
507 entry = all_devices + TARGET_CPU_DEFAULT;
508
509 m68k_cpu_flags = entry->flags;
510
511 /* Use the architecture setting to derive default values for
512 certain flags. */
513 target_mask = 0;
8785d88c
KH
514
515 /* ColdFire is lenient about alignment. */
516 if (!TARGET_COLDFIRE)
517 target_mask |= MASK_STRICT_ALIGNMENT;
518
900ec02d
JB
519 if ((m68k_cpu_flags & FL_BITFIELD) != 0)
520 target_mask |= MASK_BITFIELD;
521 if ((m68k_cpu_flags & FL_CF_HWDIV) != 0)
522 target_mask |= MASK_CF_HWDIV;
523 if ((m68k_cpu_flags & (FL_68881 | FL_CF_FPU)) != 0)
524 target_mask |= MASK_HARD_FLOAT;
525 target_flags |= target_mask & ~target_flags_explicit;
526
527 /* Set the directly-usable versions of the -mcpu and -mtune settings. */
528 m68k_cpu = entry->device;
529 if (m68k_tune_entry)
03b3e271
KH
530 {
531 m68k_tune = m68k_tune_entry->microarch;
532 m68k_tune_flags = m68k_tune_entry->flags;
533 }
900ec02d
JB
534#ifdef M68K_DEFAULT_TUNE
535 else if (!m68k_cpu_entry && !m68k_arch_entry)
03b3e271
KH
536 {
537 enum target_device dev;
538 dev = all_microarchs[M68K_DEFAULT_TUNE].device;
3b891d26 539 m68k_tune_flags = all_devices[dev].flags;
03b3e271 540 }
900ec02d
JB
541#endif
542 else
03b3e271
KH
543 {
544 m68k_tune = entry->microarch;
545 m68k_tune_flags = entry->flags;
546 }
900ec02d
JB
547
548 /* Set the type of FPU. */
549 m68k_fpu = (!TARGET_HARD_FLOAT ? FPUTYPE_NONE
550 : (m68k_cpu_flags & FL_COLDFIRE) != 0 ? FPUTYPE_COLDFIRE
551 : FPUTYPE_68881);
552
a2ef3db7
BI
553 /* Sanity check to ensure that msep-data and mid-sahred-library are not
554 * both specified together. Doing so simply doesn't make sense.
555 */
556 if (TARGET_SEP_DATA && TARGET_ID_SHARED_LIBRARY)
557 error ("cannot specify both -msep-data and -mid-shared-library");
558
559 /* If we're generating code for a separate A5 relative data segment,
560 * we've got to enable -fPIC as well. This might be relaxable to
561 * -fpic but it hasn't been tested properly.
562 */
563 if (TARGET_SEP_DATA || TARGET_ID_SHARED_LIBRARY)
564 flag_pic = 2;
565
abe92a04
RS
566 /* -mpcrel -fPIC uses 32-bit pc-relative displacements. Raise an
567 error if the target does not support them. */
568 if (TARGET_PCREL && !TARGET_68020 && flag_pic == 2)
569 error ("-mpcrel -fPIC is not currently supported on selected cpu");
adf2ac37
RH
570
571 /* ??? A historic way of turning on pic, or is this intended to
572 be an embedded thing that doesn't have the same name binding
573 significance that it does on hosted ELF systems? */
574 if (TARGET_PCREL && flag_pic == 0)
575 flag_pic = 1;
576
29ca003a
RS
577 if (!flag_pic)
578 {
c47b0cb4
MK
579 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_JSR;
580
29ca003a 581 m68k_symbolic_jump = "jra %a0";
29ca003a
RS
582 }
583 else if (TARGET_ID_SHARED_LIBRARY)
584 /* All addresses must be loaded from the GOT. */
585 ;
4e2b26aa 586 else if (TARGET_68020 || TARGET_ISAB || TARGET_ISAC)
29ca003a
RS
587 {
588 if (TARGET_PCREL)
c47b0cb4 589 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_C;
4e2b26aa 590 else
c47b0cb4
MK
591 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_P;
592
4e2b26aa
NS
593 if (TARGET_ISAC)
594 /* No unconditional long branch */;
595 else if (TARGET_PCREL)
da398bb5 596 m68k_symbolic_jump = "bra%.l %c0";
29ca003a 597 else
da398bb5 598 m68k_symbolic_jump = "bra%.l %p0";
29ca003a
RS
599 /* Turn off function cse if we are doing PIC. We always want
600 function call to be done as `bsr foo@PLTPC'. */
601 /* ??? It's traditional to do this for -mpcrel too, but it isn't
602 clear how intentional that is. */
603 flag_no_function_cse = 1;
604 }
adf2ac37 605
c47b0cb4
MK
606 switch (m68k_symbolic_call_var)
607 {
608 case M68K_SYMBOLIC_CALL_JSR:
c47b0cb4 609 m68k_symbolic_call = "jsr %a0";
c47b0cb4
MK
610 break;
611
612 case M68K_SYMBOLIC_CALL_BSR_C:
da398bb5 613 m68k_symbolic_call = "bsr%.l %c0";
c47b0cb4
MK
614 break;
615
616 case M68K_SYMBOLIC_CALL_BSR_P:
da398bb5 617 m68k_symbolic_call = "bsr%.l %p0";
c47b0cb4
MK
618 break;
619
620 case M68K_SYMBOLIC_CALL_NONE:
621 gcc_assert (m68k_symbolic_call == NULL);
622 break;
623
624 default:
625 gcc_unreachable ();
626 }
627
aaca7021
RZ
628#ifndef ASM_OUTPUT_ALIGN_WITH_NOP
629 if (align_labels > 2)
630 {
631 warning (0, "-falign-labels=%d is not supported", align_labels);
632 align_labels = 0;
633 }
634 if (align_loops > 2)
635 {
636 warning (0, "-falign-loops=%d is not supported", align_loops);
637 align_loops = 0;
638 }
639#endif
640
253abb2a
AS
641 if ((opt_fstack_limit_symbol_arg != NULL || opt_fstack_limit_register_no >= 0)
642 && !TARGET_68020)
8e22f79f
AS
643 {
644 warning (0, "-fstack-limit- options are not supported on this cpu");
253abb2a
AS
645 opt_fstack_limit_symbol_arg = NULL;
646 opt_fstack_limit_register_no = -1;
8e22f79f
AS
647 }
648
adf2ac37 649 SUBTARGET_OVERRIDE_OPTIONS;
c47b0cb4
MK
650
651 /* Setup scheduling options. */
826fadba
MK
652 if (TUNE_CFV1)
653 m68k_sched_cpu = CPU_CFV1;
654 else if (TUNE_CFV2)
655 m68k_sched_cpu = CPU_CFV2;
656 else if (TUNE_CFV3)
657 m68k_sched_cpu = CPU_CFV3;
96fcacb7
MK
658 else if (TUNE_CFV4)
659 m68k_sched_cpu = CPU_CFV4;
c47b0cb4
MK
660 else
661 {
662 m68k_sched_cpu = CPU_UNKNOWN;
663 flag_schedule_insns = 0;
664 flag_schedule_insns_after_reload = 0;
665 flag_modulo_sched = 0;
1ee6eb01 666 flag_live_range_shrinkage = 0;
c47b0cb4 667 }
826fadba
MK
668
669 if (m68k_sched_cpu != CPU_UNKNOWN)
670 {
671 if ((m68k_cpu_flags & (FL_CF_EMAC | FL_CF_EMAC_B)) != 0)
672 m68k_sched_mac = MAC_CF_EMAC;
673 else if ((m68k_cpu_flags & FL_CF_MAC) != 0)
674 m68k_sched_mac = MAC_CF_MAC;
675 else
676 m68k_sched_mac = MAC_NO;
677 }
ef1dbfb0 678}
7eb4f044 679
03e69b12
MP
680/* Implement TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE. */
681
682static void
683m68k_override_options_after_change (void)
684{
685 if (m68k_sched_cpu == CPU_UNKNOWN)
686 {
687 flag_schedule_insns = 0;
688 flag_schedule_insns_after_reload = 0;
689 flag_modulo_sched = 0;
1ee6eb01 690 flag_live_range_shrinkage = 0;
03e69b12
MP
691 }
692}
693
7eb4f044
NS
694/* Generate a macro of the form __mPREFIX_cpu_NAME, where PREFIX is the
695 given argument and NAME is the argument passed to -mcpu. Return NULL
696 if -mcpu was not passed. */
697
698const char *
699m68k_cpp_cpu_ident (const char *prefix)
700{
701 if (!m68k_cpu_entry)
702 return NULL;
703 return concat ("__m", prefix, "_cpu_", m68k_cpu_entry->name, NULL);
704}
705
706/* Generate a macro of the form __mPREFIX_family_NAME, where PREFIX is the
707 given argument and NAME is the name of the representative device for
708 the -mcpu argument's family. Return NULL if -mcpu was not passed. */
709
710const char *
711m68k_cpp_cpu_family (const char *prefix)
712{
713 if (!m68k_cpu_entry)
714 return NULL;
715 return concat ("__m", prefix, "_family_", m68k_cpu_entry->family, NULL);
716}
79e68feb 717\f
2bccb817
KH
718/* Return m68k_fk_interrupt_handler if FUNC has an "interrupt" or
719 "interrupt_handler" attribute and interrupt_thread if FUNC has an
720 "interrupt_thread" attribute. Otherwise, return
721 m68k_fk_normal_function. */
a4242737
KH
722
723enum m68k_function_kind
724m68k_get_function_kind (tree func)
48ed72a4
PB
725{
726 tree a;
727
fa157b28
NS
728 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
729
2bccb817
KH
730 a = lookup_attribute ("interrupt", DECL_ATTRIBUTES (func));
731 if (a != NULL_TREE)
732 return m68k_fk_interrupt_handler;
733
48ed72a4 734 a = lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (func));
a4242737
KH
735 if (a != NULL_TREE)
736 return m68k_fk_interrupt_handler;
737
738 a = lookup_attribute ("interrupt_thread", DECL_ATTRIBUTES (func));
739 if (a != NULL_TREE)
740 return m68k_fk_interrupt_thread;
741
742 return m68k_fk_normal_function;
48ed72a4
PB
743}
744
745/* Handle an attribute requiring a FUNCTION_DECL; arguments as in
746 struct attribute_spec.handler. */
747static tree
748m68k_handle_fndecl_attribute (tree *node, tree name,
749 tree args ATTRIBUTE_UNUSED,
750 int flags ATTRIBUTE_UNUSED,
751 bool *no_add_attrs)
752{
753 if (TREE_CODE (*node) != FUNCTION_DECL)
754 {
29d08eba
JM
755 warning (OPT_Wattributes, "%qE attribute only applies to functions",
756 name);
48ed72a4
PB
757 *no_add_attrs = true;
758 }
759
a4242737
KH
760 if (m68k_get_function_kind (*node) != m68k_fk_normal_function)
761 {
762 error ("multiple interrupt attributes not allowed");
763 *no_add_attrs = true;
764 }
765
766 if (!TARGET_FIDOA
767 && !strcmp (IDENTIFIER_POINTER (name), "interrupt_thread"))
768 {
769 error ("interrupt_thread is available only on fido");
770 *no_add_attrs = true;
771 }
772
48ed72a4
PB
773 return NULL_TREE;
774}
860c4900
BI
775
776static void
3d74bc09 777m68k_compute_frame_layout (void)
860c4900
BI
778{
779 int regno, saved;
a40ed0f3 780 unsigned int mask;
a4242737
KH
781 enum m68k_function_kind func_kind =
782 m68k_get_function_kind (current_function_decl);
783 bool interrupt_handler = func_kind == m68k_fk_interrupt_handler;
784 bool interrupt_thread = func_kind == m68k_fk_interrupt_thread;
860c4900 785
3d74bc09
BI
786 /* Only compute the frame once per function.
787 Don't cache information until reload has been completed. */
788 if (current_frame.funcdef_no == current_function_funcdef_no
789 && reload_completed)
790 return;
791
792 current_frame.size = (get_frame_size () + 3) & -4;
860c4900 793
a40ed0f3 794 mask = saved = 0;
a4242737
KH
795
796 /* Interrupt thread does not need to save any register. */
797 if (!interrupt_thread)
798 for (regno = 0; regno < 16; regno++)
799 if (m68k_save_reg (regno, interrupt_handler))
800 {
801 mask |= 1 << (regno - D0_REG);
802 saved++;
803 }
3d74bc09
BI
804 current_frame.offset = saved * 4;
805 current_frame.reg_no = saved;
806 current_frame.reg_mask = mask;
860c4900 807
57047680 808 current_frame.foffset = 0;
a40ed0f3 809 mask = saved = 0;
dcc21c4c 810 if (TARGET_HARD_FLOAT)
860c4900 811 {
a4242737
KH
812 /* Interrupt thread does not need to save any register. */
813 if (!interrupt_thread)
814 for (regno = 16; regno < 24; regno++)
815 if (m68k_save_reg (regno, interrupt_handler))
816 {
817 mask |= 1 << (regno - FP0_REG);
818 saved++;
819 }
dcc21c4c 820 current_frame.foffset = saved * TARGET_FP_REG_SIZE;
3d74bc09 821 current_frame.offset += current_frame.foffset;
860c4900 822 }
57047680
GN
823 current_frame.fpu_no = saved;
824 current_frame.fpu_mask = mask;
3d74bc09
BI
825
826 /* Remember what function this frame refers to. */
827 current_frame.funcdef_no = current_function_funcdef_no;
860c4900
BI
828}
829
7b5cbb57
AS
830/* Worker function for TARGET_CAN_ELIMINATE. */
831
832bool
833m68k_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
834{
835 return (to == STACK_POINTER_REGNUM ? ! frame_pointer_needed : true);
836}
837
860c4900
BI
838HOST_WIDE_INT
839m68k_initial_elimination_offset (int from, int to)
840{
42b67c06
PB
841 int argptr_offset;
842 /* The arg pointer points 8 bytes before the start of the arguments,
843 as defined by FIRST_PARM_OFFSET. This makes it coincident with the
844 frame pointer in most frames. */
845 argptr_offset = frame_pointer_needed ? 0 : UNITS_PER_WORD;
860c4900 846 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
42b67c06 847 return argptr_offset;
860c4900 848
3d74bc09 849 m68k_compute_frame_layout ();
860c4900 850
4761e388
NS
851 gcc_assert (to == STACK_POINTER_REGNUM);
852 switch (from)
853 {
a0a7fbc9 854 case ARG_POINTER_REGNUM:
42b67c06 855 return current_frame.offset + current_frame.size - argptr_offset;
4761e388
NS
856 case FRAME_POINTER_REGNUM:
857 return current_frame.offset + current_frame.size;
858 default:
859 gcc_unreachable ();
860 }
860c4900
BI
861}
862
97c55091
GN
863/* Refer to the array `regs_ever_live' to determine which registers
864 to save; `regs_ever_live[I]' is nonzero if register number I
865 is ever used in the function. This function is responsible for
866 knowing which registers should not be saved even if used.
867 Return true if we need to save REGNO. */
868
48ed72a4
PB
869static bool
870m68k_save_reg (unsigned int regno, bool interrupt_handler)
2cff4a6e 871{
4ab870f5 872 if (flag_pic && regno == PIC_REG)
b86ba8a3 873 {
e3b5732b 874 if (crtl->saves_all_registers)
afcb440c 875 return true;
e3b5732b 876 if (crtl->uses_pic_offset_table)
b86ba8a3 877 return true;
6357eb0d
RS
878 /* Reload may introduce constant pool references into a function
879 that thitherto didn't need a PIC register. Note that the test
880 above will not catch that case because we will only set
e3b5732b 881 crtl->uses_pic_offset_table when emitting
6357eb0d 882 the address reloads. */
e3b5732b 883 if (crtl->uses_const_pool)
6357eb0d 884 return true;
b86ba8a3 885 }
2cff4a6e 886
e3b5732b 887 if (crtl->calls_eh_return)
2cff4a6e
AS
888 {
889 unsigned int i;
890 for (i = 0; ; i++)
891 {
892 unsigned int test = EH_RETURN_DATA_REGNO (i);
893 if (test == INVALID_REGNUM)
894 break;
895 if (test == regno)
48ed72a4 896 return true;
2cff4a6e
AS
897 }
898 }
899
48ed72a4
PB
900 /* Fixed regs we never touch. */
901 if (fixed_regs[regno])
902 return false;
903
904 /* The frame pointer (if it is such) is handled specially. */
905 if (regno == FRAME_POINTER_REGNUM && frame_pointer_needed)
906 return false;
907
908 /* Interrupt handlers must also save call_used_regs
909 if they are live or when calling nested functions. */
910 if (interrupt_handler)
a0a7fbc9 911 {
6fb5fa3c 912 if (df_regs_ever_live_p (regno))
a0a7fbc9 913 return true;
48ed72a4 914
416ff32e 915 if (!crtl->is_leaf && call_used_regs[regno])
a0a7fbc9
AS
916 return true;
917 }
48ed72a4
PB
918
919 /* Never need to save registers that aren't touched. */
6fb5fa3c 920 if (!df_regs_ever_live_p (regno))
48ed72a4
PB
921 return false;
922
b2e08ed4 923 /* Otherwise save everything that isn't call-clobbered. */
48ed72a4 924 return !call_used_regs[regno];
2cff4a6e
AS
925}
926
a40ed0f3
KH
927/* Emit RTL for a MOVEM or FMOVEM instruction. BASE + OFFSET represents
928 the lowest memory address. COUNT is the number of registers to be
929 moved, with register REGNO + I being moved if bit I of MASK is set.
930 STORE_P specifies the direction of the move and ADJUST_STACK_P says
931 whether or not this is pre-decrement (if STORE_P) or post-increment
932 (if !STORE_P) operation. */
933
c85e862a 934static rtx_insn *
a40ed0f3
KH
935m68k_emit_movem (rtx base, HOST_WIDE_INT offset,
936 unsigned int count, unsigned int regno,
937 unsigned int mask, bool store_p, bool adjust_stack_p)
938{
939 int i;
940 rtx body, addr, src, operands[2];
ef4bddc2 941 machine_mode mode;
a40ed0f3
KH
942
943 body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (adjust_stack_p + count));
944 mode = reg_raw_mode[regno];
945 i = 0;
946
947 if (adjust_stack_p)
948 {
0a81f074
RS
949 src = plus_constant (Pmode, base,
950 (count
951 * GET_MODE_SIZE (mode)
952 * (HOST_WIDE_INT) (store_p ? -1 : 1)));
f7df4a84 953 XVECEXP (body, 0, i++) = gen_rtx_SET (base, src);
a40ed0f3
KH
954 }
955
956 for (; mask != 0; mask >>= 1, regno++)
957 if (mask & 1)
958 {
0a81f074 959 addr = plus_constant (Pmode, base, offset);
a40ed0f3
KH
960 operands[!store_p] = gen_frame_mem (mode, addr);
961 operands[store_p] = gen_rtx_REG (mode, regno);
962 XVECEXP (body, 0, i++)
f7df4a84 963 = gen_rtx_SET (operands[0], operands[1]);
a40ed0f3
KH
964 offset += GET_MODE_SIZE (mode);
965 }
966 gcc_assert (i == XVECLEN (body, 0));
967
968 return emit_insn (body);
969}
970
971/* Make INSN a frame-related instruction. */
79e68feb 972
08c148a8 973static void
c85e862a 974m68k_set_frame_related (rtx_insn *insn)
a40ed0f3
KH
975{
976 rtx body;
977 int i;
978
979 RTX_FRAME_RELATED_P (insn) = 1;
980 body = PATTERN (insn);
981 if (GET_CODE (body) == PARALLEL)
982 for (i = 0; i < XVECLEN (body, 0); i++)
983 RTX_FRAME_RELATED_P (XVECEXP (body, 0, i)) = 1;
984}
985
986/* Emit RTL for the "prologue" define_expand. */
987
988void
989m68k_expand_prologue (void)
79e68feb 990{
860c4900 991 HOST_WIDE_INT fsize_with_regs;
2dc8bd76 992 rtx limit, src, dest;
3d74bc09 993
a40ed0f3 994 m68k_compute_frame_layout ();
3d74bc09 995
a11e0df4 996 if (flag_stack_usage_info)
f69ea688
AS
997 current_function_static_stack_size
998 = current_frame.size + current_frame.offset;
999
a157febd
GK
1000 /* If the stack limit is a symbol, we can check it here,
1001 before actually allocating the space. */
e3b5732b 1002 if (crtl->limit_stack
a157febd 1003 && GET_CODE (stack_limit_rtx) == SYMBOL_REF)
a40ed0f3 1004 {
0a81f074 1005 limit = plus_constant (Pmode, stack_limit_rtx, current_frame.size + 4);
1a627b35 1006 if (!m68k_legitimate_constant_p (Pmode, limit))
a40ed0f3
KH
1007 {
1008 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), limit);
1009 limit = gen_rtx_REG (Pmode, D0_REG);
1010 }
f90b7a5a
PB
1011 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode,
1012 stack_pointer_rtx, limit),
1013 stack_pointer_rtx, limit,
1014 const1_rtx));
a40ed0f3 1015 }
79e68feb 1016
a89e3f21 1017 fsize_with_regs = current_frame.size;
dcc21c4c
PB
1018 if (TARGET_COLDFIRE)
1019 {
a40ed0f3
KH
1020 /* ColdFire's move multiple instructions do not allow pre-decrement
1021 addressing. Add the size of movem saves to the initial stack
1022 allocation instead. */
1023 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1024 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1025 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1026 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
dcc21c4c 1027 }
860c4900 1028
79e68feb
RS
1029 if (frame_pointer_needed)
1030 {
a40ed0f3 1031 if (fsize_with_regs == 0 && TUNE_68040)
79e68feb 1032 {
a40ed0f3
KH
1033 /* On the 68040, two separate moves are faster than link.w 0. */
1034 dest = gen_frame_mem (Pmode,
1035 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1036 m68k_set_frame_related (emit_move_insn (dest, frame_pointer_rtx));
1037 m68k_set_frame_related (emit_move_insn (frame_pointer_rtx,
1038 stack_pointer_rtx));
79e68feb 1039 }
a40ed0f3
KH
1040 else if (fsize_with_regs < 0x8000 || TARGET_68020)
1041 m68k_set_frame_related
1042 (emit_insn (gen_link (frame_pointer_rtx,
1043 GEN_INT (-4 - fsize_with_regs))));
d9e88af0 1044 else
a40ed0f3
KH
1045 {
1046 m68k_set_frame_related
1047 (emit_insn (gen_link (frame_pointer_rtx, GEN_INT (-4))));
1048 m68k_set_frame_related
1049 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1050 stack_pointer_rtx,
1051 GEN_INT (-fsize_with_regs))));
1052 }
96fcacb7
MK
1053
1054 /* If the frame pointer is needed, emit a special barrier that
1055 will prevent the scheduler from moving stores to the frame
1056 before the stack adjustment. */
1057 emit_insn (gen_stack_tie (stack_pointer_rtx, frame_pointer_rtx));
d9e88af0 1058 }
a40ed0f3
KH
1059 else if (fsize_with_regs != 0)
1060 m68k_set_frame_related
1061 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1062 stack_pointer_rtx,
1063 GEN_INT (-fsize_with_regs))));
860c4900 1064
57047680 1065 if (current_frame.fpu_mask)
79e68feb 1066 {
a40ed0f3 1067 gcc_assert (current_frame.fpu_no >= MIN_FMOVEM_REGS);
dcc21c4c 1068 if (TARGET_68881)
a40ed0f3
KH
1069 m68k_set_frame_related
1070 (m68k_emit_movem (stack_pointer_rtx,
1071 current_frame.fpu_no * -GET_MODE_SIZE (XFmode),
1072 current_frame.fpu_no, FP0_REG,
1073 current_frame.fpu_mask, true, true));
dcc21c4c
PB
1074 else
1075 {
1076 int offset;
1077
a40ed0f3
KH
1078 /* If we're using moveml to save the integer registers,
1079 the stack pointer will point to the bottom of the moveml
1080 save area. Find the stack offset of the first FP register. */
1081 if (current_frame.reg_no < MIN_MOVEM_REGS)
dcc21c4c
PB
1082 offset = 0;
1083 else
a40ed0f3
KH
1084 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1085 m68k_set_frame_related
1086 (m68k_emit_movem (stack_pointer_rtx, offset,
1087 current_frame.fpu_no, FP0_REG,
1088 current_frame.fpu_mask, true, false));
f277471f 1089 }
79e68feb 1090 }
99df2465 1091
01bbf777 1092 /* If the stack limit is not a symbol, check it here.
a157febd 1093 This has the disadvantage that it may be too late... */
e3b5732b 1094 if (crtl->limit_stack)
a157febd
GK
1095 {
1096 if (REG_P (stack_limit_rtx))
f90b7a5a
PB
1097 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode, stack_pointer_rtx,
1098 stack_limit_rtx),
1099 stack_pointer_rtx, stack_limit_rtx,
1100 const1_rtx));
1101
a157febd 1102 else if (GET_CODE (stack_limit_rtx) != SYMBOL_REF)
d4ee4d25 1103 warning (0, "stack limit expression is not supported");
a157febd 1104 }
01bbf777 1105
a40ed0f3 1106 if (current_frame.reg_no < MIN_MOVEM_REGS)
79e68feb 1107 {
a40ed0f3 1108 /* Store each register separately in the same order moveml does. */
79e68feb
RS
1109 int i;
1110
a40ed0f3
KH
1111 for (i = 16; i-- > 0; )
1112 if (current_frame.reg_mask & (1 << i))
078e983e 1113 {
a40ed0f3
KH
1114 src = gen_rtx_REG (SImode, D0_REG + i);
1115 dest = gen_frame_mem (SImode,
1116 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1117 m68k_set_frame_related (emit_insn (gen_movsi (dest, src)));
078e983e 1118 }
79e68feb 1119 }
a40ed0f3 1120 else
79e68feb 1121 {
9425fb04 1122 if (TARGET_COLDFIRE)
a40ed0f3
KH
1123 /* The required register save space has already been allocated.
1124 The first register should be stored at (%sp). */
1125 m68k_set_frame_related
1126 (m68k_emit_movem (stack_pointer_rtx, 0,
1127 current_frame.reg_no, D0_REG,
1128 current_frame.reg_mask, true, false));
afaff477 1129 else
a40ed0f3
KH
1130 m68k_set_frame_related
1131 (m68k_emit_movem (stack_pointer_rtx,
1132 current_frame.reg_no * -GET_MODE_SIZE (SImode),
1133 current_frame.reg_no, D0_REG,
1134 current_frame.reg_mask, true, true));
79e68feb 1135 }
a40ed0f3 1136
75df395f 1137 if (!TARGET_SEP_DATA
e3b5732b 1138 && crtl->uses_pic_offset_table)
2dc8bd76 1139 emit_insn (gen_load_got (pic_offset_table_rtx));
79e68feb
RS
1140}
1141\f
413ac1b2
RS
1142/* Return true if a simple (return) instruction is sufficient for this
1143 instruction (i.e. if no epilogue is needed). */
79e68feb 1144
3d74bc09 1145bool
a2bda628 1146m68k_use_return_insn (void)
79e68feb 1147{
79e68feb 1148 if (!reload_completed || frame_pointer_needed || get_frame_size () != 0)
3d74bc09 1149 return false;
125ed86f 1150
a0a7fbc9 1151 m68k_compute_frame_layout ();
413ac1b2 1152 return current_frame.offset == 0;
79e68feb
RS
1153}
1154
f7e70894
RS
1155/* Emit RTL for the "epilogue" or "sibcall_epilogue" define_expand;
1156 SIBCALL_P says which.
79e68feb
RS
1157
1158 The function epilogue should not depend on the current stack pointer!
1159 It should use the frame pointer only, if there is a frame pointer.
1160 This is mandatory because of alloca; we also take advantage of it to
1161 omit stack adjustments before returning. */
1162
a40ed0f3 1163void
f7e70894 1164m68k_expand_epilogue (bool sibcall_p)
08c148a8 1165{
3d74bc09 1166 HOST_WIDE_INT fsize, fsize_with_regs;
a40ed0f3 1167 bool big, restore_from_sp;
3d74bc09 1168
a0a7fbc9 1169 m68k_compute_frame_layout ();
3d74bc09 1170
3d74bc09 1171 fsize = current_frame.size;
a40ed0f3
KH
1172 big = false;
1173 restore_from_sp = false;
3d74bc09 1174
416ff32e 1175 /* FIXME : crtl->is_leaf below is too strong.
c67ddce5 1176 What we really need to know there is if there could be pending
7a1929e1 1177 stack adjustment needed at that point. */
a40ed0f3 1178 restore_from_sp = (!frame_pointer_needed
416ff32e 1179 || (!cfun->calls_alloca && crtl->is_leaf));
860c4900
BI
1180
1181 /* fsize_with_regs is the size we need to adjust the sp when
97c55091 1182 popping the frame. */
860c4900 1183 fsize_with_regs = fsize;
dcc21c4c
PB
1184 if (TARGET_COLDFIRE && restore_from_sp)
1185 {
a40ed0f3
KH
1186 /* ColdFire's move multiple instructions do not allow post-increment
1187 addressing. Add the size of movem loads to the final deallocation
1188 instead. */
1189 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1190 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1191 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1192 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
dcc21c4c 1193 }
860c4900 1194
3d74bc09 1195 if (current_frame.offset + fsize >= 0x8000
a40ed0f3 1196 && !restore_from_sp
3d74bc09 1197 && (current_frame.reg_mask || current_frame.fpu_mask))
79e68feb 1198 {
a40ed0f3
KH
1199 if (TARGET_COLDFIRE
1200 && (current_frame.reg_no >= MIN_MOVEM_REGS
1201 || current_frame.fpu_no >= MIN_FMOVEM_REGS))
1202 {
1203 /* ColdFire's move multiple instructions do not support the
1204 (d8,Ax,Xi) addressing mode, so we're as well using a normal
1205 stack-based restore. */
1206 emit_move_insn (gen_rtx_REG (Pmode, A1_REG),
1207 GEN_INT (-(current_frame.offset + fsize)));
1208 emit_insn (gen_addsi3 (stack_pointer_rtx,
1209 gen_rtx_REG (Pmode, A1_REG),
1210 frame_pointer_rtx));
1211 restore_from_sp = true;
1212 }
1213 else
1214 {
1215 emit_move_insn (gen_rtx_REG (Pmode, A1_REG), GEN_INT (-fsize));
1216 fsize = 0;
1217 big = true;
1218 }
79e68feb 1219 }
79e68feb 1220
a40ed0f3
KH
1221 if (current_frame.reg_no < MIN_MOVEM_REGS)
1222 {
1223 /* Restore each register separately in the same order moveml does. */
79e68feb 1224 int i;
a40ed0f3 1225 HOST_WIDE_INT offset;
79e68feb 1226
a40ed0f3 1227 offset = current_frame.offset + fsize;
3d74bc09
BI
1228 for (i = 0; i < 16; i++)
1229 if (current_frame.reg_mask & (1 << i))
79e68feb 1230 {
a40ed0f3
KH
1231 rtx addr;
1232
1233 if (big)
79e68feb 1234 {
a40ed0f3
KH
1235 /* Generate the address -OFFSET(%fp,%a1.l). */
1236 addr = gen_rtx_REG (Pmode, A1_REG);
1237 addr = gen_rtx_PLUS (Pmode, addr, frame_pointer_rtx);
0a81f074 1238 addr = plus_constant (Pmode, addr, -offset);
79e68feb 1239 }
a40ed0f3
KH
1240 else if (restore_from_sp)
1241 addr = gen_rtx_POST_INC (Pmode, stack_pointer_rtx);
1242 else
0a81f074 1243 addr = plus_constant (Pmode, frame_pointer_rtx, -offset);
a40ed0f3
KH
1244 emit_move_insn (gen_rtx_REG (SImode, D0_REG + i),
1245 gen_frame_mem (SImode, addr));
1246 offset -= GET_MODE_SIZE (SImode);
1247 }
79e68feb 1248 }
3d74bc09 1249 else if (current_frame.reg_mask)
79e68feb 1250 {
a40ed0f3
KH
1251 if (big)
1252 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1253 gen_rtx_REG (Pmode, A1_REG),
1254 frame_pointer_rtx),
1255 -(current_frame.offset + fsize),
1256 current_frame.reg_no, D0_REG,
1257 current_frame.reg_mask, false, false);
1258 else if (restore_from_sp)
1259 m68k_emit_movem (stack_pointer_rtx, 0,
1260 current_frame.reg_no, D0_REG,
1261 current_frame.reg_mask, false,
1262 !TARGET_COLDFIRE);
1263 else
1264 m68k_emit_movem (frame_pointer_rtx,
1265 -(current_frame.offset + fsize),
1266 current_frame.reg_no, D0_REG,
1267 current_frame.reg_mask, false, false);
79e68feb 1268 }
a40ed0f3
KH
1269
1270 if (current_frame.fpu_no > 0)
79e68feb
RS
1271 {
1272 if (big)
a40ed0f3
KH
1273 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1274 gen_rtx_REG (Pmode, A1_REG),
1275 frame_pointer_rtx),
1276 -(current_frame.foffset + fsize),
1277 current_frame.fpu_no, FP0_REG,
1278 current_frame.fpu_mask, false, false);
6910dd70 1279 else if (restore_from_sp)
79e68feb 1280 {
dcc21c4c
PB
1281 if (TARGET_COLDFIRE)
1282 {
1283 int offset;
1284
a40ed0f3
KH
1285 /* If we used moveml to restore the integer registers, the
1286 stack pointer will still point to the bottom of the moveml
1287 save area. Find the stack offset of the first FP
1288 register. */
1289 if (current_frame.reg_no < MIN_MOVEM_REGS)
dcc21c4c
PB
1290 offset = 0;
1291 else
a40ed0f3
KH
1292 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1293 m68k_emit_movem (stack_pointer_rtx, offset,
1294 current_frame.fpu_no, FP0_REG,
1295 current_frame.fpu_mask, false, false);
dcc21c4c 1296 }
884b74f0 1297 else
a40ed0f3
KH
1298 m68k_emit_movem (stack_pointer_rtx, 0,
1299 current_frame.fpu_no, FP0_REG,
1300 current_frame.fpu_mask, false, true);
79e68feb
RS
1301 }
1302 else
a40ed0f3
KH
1303 m68k_emit_movem (frame_pointer_rtx,
1304 -(current_frame.foffset + fsize),
1305 current_frame.fpu_no, FP0_REG,
1306 current_frame.fpu_mask, false, false);
79e68feb 1307 }
a40ed0f3 1308
79e68feb 1309 if (frame_pointer_needed)
a40ed0f3 1310 emit_insn (gen_unlink (frame_pointer_rtx));
860c4900 1311 else if (fsize_with_regs)
a40ed0f3
KH
1312 emit_insn (gen_addsi3 (stack_pointer_rtx,
1313 stack_pointer_rtx,
1314 GEN_INT (fsize_with_regs)));
1315
e3b5732b 1316 if (crtl->calls_eh_return)
a40ed0f3
KH
1317 emit_insn (gen_addsi3 (stack_pointer_rtx,
1318 stack_pointer_rtx,
1319 EH_RETURN_STACKADJ_RTX));
1320
f7e70894 1321 if (!sibcall_p)
3810076b 1322 emit_jump_insn (ret_rtx);
79e68feb
RS
1323}
1324\f
8a4a2253 1325/* Return true if X is a valid comparison operator for the dbcc
64a184e9
RS
1326 instruction.
1327
1328 Note it rejects floating point comparison operators.
1329 (In the future we could use Fdbcc).
1330
1331 It also rejects some comparisons when CC_NO_OVERFLOW is set. */
1332
1333int
ef4bddc2 1334valid_dbcc_comparison_p_2 (rtx x, machine_mode mode ATTRIBUTE_UNUSED)
64a184e9 1335{
64a184e9
RS
1336 switch (GET_CODE (x))
1337 {
64a184e9
RS
1338 case EQ: case NE: case GTU: case LTU:
1339 case GEU: case LEU:
1340 return 1;
1341
1342 /* Reject some when CC_NO_OVERFLOW is set. This may be over
1343 conservative */
1344 case GT: case LT: case GE: case LE:
1345 return ! (cc_prev_status.flags & CC_NO_OVERFLOW);
1346 default:
1347 return 0;
1348 }
1349}
1350
a0ab749a 1351/* Return nonzero if flags are currently in the 68881 flag register. */
6a0f85e3 1352int
8a4a2253 1353flags_in_68881 (void)
6a0f85e3
TG
1354{
1355 /* We could add support for these in the future */
1356 return cc_status.flags & CC_IN_68881;
1357}
1358
db5e2d51
MK
1359/* Return true if PARALLEL contains register REGNO. */
1360static bool
1361m68k_reg_present_p (const_rtx parallel, unsigned int regno)
1362{
1363 int i;
1364
1365 if (REG_P (parallel) && REGNO (parallel) == regno)
1366 return true;
1367
1368 if (GET_CODE (parallel) != PARALLEL)
1369 return false;
1370
1371 for (i = 0; i < XVECLEN (parallel, 0); ++i)
1372 {
1373 const_rtx x;
1374
1375 x = XEXP (XVECEXP (parallel, 0, i), 0);
1376 if (REG_P (x) && REGNO (x) == regno)
1377 return true;
1378 }
1379
1380 return false;
1381}
1382
fa157b28 1383/* Implement TARGET_FUNCTION_OK_FOR_SIBCALL_P. */
f7e70894
RS
1384
1385static bool
fa157b28 1386m68k_ok_for_sibcall_p (tree decl, tree exp)
f7e70894 1387{
fa157b28
NS
1388 enum m68k_function_kind kind;
1389
1390 /* We cannot use sibcalls for nested functions because we use the
1391 static chain register for indirect calls. */
1392 if (CALL_EXPR_STATIC_CHAIN (exp))
1393 return false;
1394
db5e2d51
MK
1395 if (!VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
1396 {
1397 /* Check that the return value locations are the same. For
1398 example that we aren't returning a value from the sibling in
1399 a D0 register but then need to transfer it to a A0 register. */
1400 rtx cfun_value;
1401 rtx call_value;
1402
1403 cfun_value = FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (cfun->decl)),
1404 cfun->decl);
1405 call_value = FUNCTION_VALUE (TREE_TYPE (exp), decl);
1406
1407 /* Check that the values are equal or that the result the callee
1408 function returns is superset of what the current function returns. */
1409 if (!(rtx_equal_p (cfun_value, call_value)
1410 || (REG_P (cfun_value)
1411 && m68k_reg_present_p (call_value, REGNO (cfun_value)))))
1412 return false;
1413 }
1414
fa157b28
NS
1415 kind = m68k_get_function_kind (current_function_decl);
1416 if (kind == m68k_fk_normal_function)
1417 /* We can always sibcall from a normal function, because it's
1418 undefined if it is calling an interrupt function. */
1419 return true;
1420
1421 /* Otherwise we can only sibcall if the function kind is known to be
1422 the same. */
1423 if (decl && m68k_get_function_kind (decl) == kind)
1424 return true;
1425
1426 return false;
f7e70894
RS
1427}
1428
13d3961c
NF
1429/* On the m68k all args are always pushed. */
1430
1431static rtx
d5cc9181 1432m68k_function_arg (cumulative_args_t cum ATTRIBUTE_UNUSED,
ef4bddc2 1433 machine_mode mode ATTRIBUTE_UNUSED,
13d3961c
NF
1434 const_tree type ATTRIBUTE_UNUSED,
1435 bool named ATTRIBUTE_UNUSED)
1436{
1437 return NULL_RTX;
1438}
1439
1440static void
ef4bddc2 1441m68k_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
13d3961c
NF
1442 const_tree type, bool named ATTRIBUTE_UNUSED)
1443{
d5cc9181
JR
1444 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1445
13d3961c
NF
1446 *cum += (mode != BLKmode
1447 ? (GET_MODE_SIZE (mode) + 3) & ~3
1448 : (int_size_in_bytes (type) + 3) & ~3);
1449}
1450
29ca003a
RS
1451/* Convert X to a legitimate function call memory reference and return the
1452 result. */
a2ef3db7 1453
29ca003a
RS
1454rtx
1455m68k_legitimize_call_address (rtx x)
1456{
1457 gcc_assert (MEM_P (x));
1458 if (call_operand (XEXP (x, 0), VOIDmode))
1459 return x;
1460 return replace_equiv_address (x, force_reg (Pmode, XEXP (x, 0)));
a2ef3db7
BI
1461}
1462
f7e70894
RS
1463/* Likewise for sibling calls. */
1464
1465rtx
1466m68k_legitimize_sibcall_address (rtx x)
1467{
1468 gcc_assert (MEM_P (x));
1469 if (sibcall_operand (XEXP (x, 0), VOIDmode))
1470 return x;
1471
1472 emit_move_insn (gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM), XEXP (x, 0));
1473 return replace_equiv_address (x, gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM));
1474}
1475
506d7b68
PB
1476/* Convert X to a legitimate address and return it if successful. Otherwise
1477 return X.
1478
1479 For the 68000, we handle X+REG by loading X into a register R and
1480 using R+REG. R will go in an address reg and indexing will be used.
1481 However, if REG is a broken-out memory address or multiplication,
1482 nothing needs to be done because REG can certainly go in an address reg. */
1483
ab7256e4 1484static rtx
ef4bddc2 1485m68k_legitimize_address (rtx x, rtx oldx, machine_mode mode)
506d7b68 1486{
75df395f
MK
1487 if (m68k_tls_symbol_p (x))
1488 return m68k_legitimize_tls_address (x);
1489
506d7b68
PB
1490 if (GET_CODE (x) == PLUS)
1491 {
1492 int ch = (x) != (oldx);
1493 int copied = 0;
1494
1495#define COPY_ONCE(Y) if (!copied) { Y = copy_rtx (Y); copied = ch = 1; }
1496
1497 if (GET_CODE (XEXP (x, 0)) == MULT)
1498 {
1499 COPY_ONCE (x);
1500 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
1501 }
1502 if (GET_CODE (XEXP (x, 1)) == MULT)
1503 {
1504 COPY_ONCE (x);
1505 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
1506 }
1507 if (ch)
1508 {
1509 if (GET_CODE (XEXP (x, 1)) == REG
1510 && GET_CODE (XEXP (x, 0)) == REG)
1511 {
1512 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
1513 {
1514 COPY_ONCE (x);
1515 x = force_operand (x, 0);
1516 }
1517 return x;
1518 }
1519 if (memory_address_p (mode, x))
1520 return x;
1521 }
1522 if (GET_CODE (XEXP (x, 0)) == REG
1523 || (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
1524 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1525 && GET_MODE (XEXP (XEXP (x, 0), 0)) == HImode))
1526 {
1527 rtx temp = gen_reg_rtx (Pmode);
1528 rtx val = force_operand (XEXP (x, 1), 0);
1529 emit_move_insn (temp, val);
1530 COPY_ONCE (x);
1531 XEXP (x, 1) = temp;
1532 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1533 && GET_CODE (XEXP (x, 0)) == REG)
1534 x = force_operand (x, 0);
1535 }
1536 else if (GET_CODE (XEXP (x, 1)) == REG
1537 || (GET_CODE (XEXP (x, 1)) == SIGN_EXTEND
1538 && GET_CODE (XEXP (XEXP (x, 1), 0)) == REG
1539 && GET_MODE (XEXP (XEXP (x, 1), 0)) == HImode))
1540 {
1541 rtx temp = gen_reg_rtx (Pmode);
1542 rtx val = force_operand (XEXP (x, 0), 0);
1543 emit_move_insn (temp, val);
1544 COPY_ONCE (x);
1545 XEXP (x, 0) = temp;
1546 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1547 && GET_CODE (XEXP (x, 1)) == REG)
1548 x = force_operand (x, 0);
1549 }
1550 }
1551
1552 return x;
1553}
1554
1555
64a184e9
RS
1556/* Output a dbCC; jCC sequence. Note we do not handle the
1557 floating point version of this sequence (Fdbcc). We also
1558 do not handle alternative conditions when CC_NO_OVERFLOW is
6a0f85e3
TG
1559 set. It is assumed that valid_dbcc_comparison_p and flags_in_68881 will
1560 kick those out before we get here. */
64a184e9 1561
1d8eaa6b 1562void
8a4a2253 1563output_dbcc_and_branch (rtx *operands)
64a184e9 1564{
64a184e9
RS
1565 switch (GET_CODE (operands[3]))
1566 {
1567 case EQ:
da398bb5 1568 output_asm_insn ("dbeq %0,%l1\n\tjeq %l2", operands);
e6d98cb0 1569 break;
64a184e9
RS
1570
1571 case NE:
da398bb5 1572 output_asm_insn ("dbne %0,%l1\n\tjne %l2", operands);
e6d98cb0 1573 break;
64a184e9
RS
1574
1575 case GT:
da398bb5 1576 output_asm_insn ("dbgt %0,%l1\n\tjgt %l2", operands);
e6d98cb0 1577 break;
64a184e9
RS
1578
1579 case GTU:
da398bb5 1580 output_asm_insn ("dbhi %0,%l1\n\tjhi %l2", operands);
e6d98cb0 1581 break;
64a184e9
RS
1582
1583 case LT:
da398bb5 1584 output_asm_insn ("dblt %0,%l1\n\tjlt %l2", operands);
e6d98cb0 1585 break;
64a184e9
RS
1586
1587 case LTU:
da398bb5 1588 output_asm_insn ("dbcs %0,%l1\n\tjcs %l2", operands);
e6d98cb0 1589 break;
64a184e9
RS
1590
1591 case GE:
da398bb5 1592 output_asm_insn ("dbge %0,%l1\n\tjge %l2", operands);
e6d98cb0 1593 break;
64a184e9
RS
1594
1595 case GEU:
da398bb5 1596 output_asm_insn ("dbcc %0,%l1\n\tjcc %l2", operands);
e6d98cb0 1597 break;
64a184e9
RS
1598
1599 case LE:
da398bb5 1600 output_asm_insn ("dble %0,%l1\n\tjle %l2", operands);
e6d98cb0 1601 break;
64a184e9
RS
1602
1603 case LEU:
da398bb5 1604 output_asm_insn ("dbls %0,%l1\n\tjls %l2", operands);
e6d98cb0 1605 break;
64a184e9
RS
1606
1607 default:
4761e388 1608 gcc_unreachable ();
64a184e9
RS
1609 }
1610
1611 /* If the decrement is to be done in SImode, then we have
7a1929e1 1612 to compensate for the fact that dbcc decrements in HImode. */
64a184e9
RS
1613 switch (GET_MODE (operands[0]))
1614 {
1615 case SImode:
da398bb5 1616 output_asm_insn ("clr%.w %0\n\tsubq%.l #1,%0\n\tjpl %l1", operands);
64a184e9
RS
1617 break;
1618
1619 case HImode:
1620 break;
1621
1622 default:
4761e388 1623 gcc_unreachable ();
64a184e9
RS
1624 }
1625}
1626
5505f548 1627const char *
4761e388 1628output_scc_di (rtx op, rtx operand1, rtx operand2, rtx dest)
c59c3b1c
RK
1629{
1630 rtx loperands[7];
d9832fd2 1631 enum rtx_code op_code = GET_CODE (op);
c59c3b1c 1632
f710504c 1633 /* This does not produce a useful cc. */
906a2d3c
RK
1634 CC_STATUS_INIT;
1635
d9832fd2
RK
1636 /* The m68k cmp.l instruction requires operand1 to be a reg as used
1637 below. Swap the operands and change the op if these requirements
1638 are not fulfilled. */
1639 if (GET_CODE (operand2) == REG && GET_CODE (operand1) != REG)
1640 {
1641 rtx tmp = operand1;
1642
1643 operand1 = operand2;
1644 operand2 = tmp;
1645 op_code = swap_condition (op_code);
1646 }
c59c3b1c
RK
1647 loperands[0] = operand1;
1648 if (GET_CODE (operand1) == REG)
1d8eaa6b 1649 loperands[1] = gen_rtx_REG (SImode, REGNO (operand1) + 1);
c59c3b1c 1650 else
b72f00af 1651 loperands[1] = adjust_address (operand1, SImode, 4);
c59c3b1c
RK
1652 if (operand2 != const0_rtx)
1653 {
1654 loperands[2] = operand2;
1655 if (GET_CODE (operand2) == REG)
1d8eaa6b 1656 loperands[3] = gen_rtx_REG (SImode, REGNO (operand2) + 1);
c59c3b1c 1657 else
b72f00af 1658 loperands[3] = adjust_address (operand2, SImode, 4);
c59c3b1c 1659 }
428511bb 1660 loperands[4] = gen_label_rtx ();
c59c3b1c 1661 if (operand2 != const0_rtx)
da398bb5 1662 output_asm_insn ("cmp%.l %2,%0\n\tjne %l4\n\tcmp%.l %3,%1", loperands);
392582fa 1663 else
4a8c52e0 1664 {
9425fb04 1665 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[0]))
4a8c52e0
AS
1666 output_asm_insn ("tst%.l %0", loperands);
1667 else
a0a7fbc9 1668 output_asm_insn ("cmp%.w #0,%0", loperands);
4a8c52e0 1669
da398bb5 1670 output_asm_insn ("jne %l4", loperands);
4a8c52e0 1671
9425fb04 1672 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[1]))
4a8c52e0
AS
1673 output_asm_insn ("tst%.l %1", loperands);
1674 else
3b4b85c9 1675 output_asm_insn ("cmp%.w #0,%1", loperands);
4a8c52e0
AS
1676 }
1677
c59c3b1c 1678 loperands[5] = dest;
3b4b85c9 1679
d9832fd2 1680 switch (op_code)
c59c3b1c
RK
1681 {
1682 case EQ:
4977bab6 1683 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1684 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1685 output_asm_insn ("seq %5", loperands);
1686 break;
1687
1688 case NE:
4977bab6 1689 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1690 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1691 output_asm_insn ("sne %5", loperands);
1692 break;
1693
1694 case GT:
428511bb 1695 loperands[6] = gen_label_rtx ();
da398bb5 1696 output_asm_insn ("shi %5\n\tjra %l6", loperands);
4977bab6 1697 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1698 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c 1699 output_asm_insn ("sgt %5", loperands);
4977bab6 1700 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1701 CODE_LABEL_NUMBER (loperands[6]));
c59c3b1c
RK
1702 break;
1703
1704 case GTU:
4977bab6 1705 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1706 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1707 output_asm_insn ("shi %5", loperands);
1708 break;
1709
1710 case LT:
428511bb 1711 loperands[6] = gen_label_rtx ();
da398bb5 1712 output_asm_insn ("scs %5\n\tjra %l6", loperands);
4977bab6 1713 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1714 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c 1715 output_asm_insn ("slt %5", loperands);
4977bab6 1716 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1717 CODE_LABEL_NUMBER (loperands[6]));
c59c3b1c
RK
1718 break;
1719
1720 case LTU:
4977bab6 1721 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1722 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1723 output_asm_insn ("scs %5", loperands);
1724 break;
1725
1726 case GE:
428511bb 1727 loperands[6] = gen_label_rtx ();
da398bb5 1728 output_asm_insn ("scc %5\n\tjra %l6", loperands);
4977bab6 1729 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1730 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c 1731 output_asm_insn ("sge %5", loperands);
4977bab6 1732 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1733 CODE_LABEL_NUMBER (loperands[6]));
c59c3b1c
RK
1734 break;
1735
1736 case GEU:
4977bab6 1737 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1738 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1739 output_asm_insn ("scc %5", loperands);
1740 break;
1741
1742 case LE:
428511bb 1743 loperands[6] = gen_label_rtx ();
da398bb5 1744 output_asm_insn ("sls %5\n\tjra %l6", loperands);
4977bab6 1745 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1746 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c 1747 output_asm_insn ("sle %5", loperands);
4977bab6 1748 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1749 CODE_LABEL_NUMBER (loperands[6]));
c59c3b1c
RK
1750 break;
1751
1752 case LEU:
4977bab6 1753 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1754 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1755 output_asm_insn ("sls %5", loperands);
1756 break;
1757
1758 default:
4761e388 1759 gcc_unreachable ();
c59c3b1c
RK
1760 }
1761 return "";
1762}
1763
5505f548 1764const char *
c85e862a 1765output_btst (rtx *operands, rtx countop, rtx dataop, rtx_insn *insn, int signpos)
79e68feb
RS
1766{
1767 operands[0] = countop;
1768 operands[1] = dataop;
1769
1770 if (GET_CODE (countop) == CONST_INT)
1771 {
1772 register int count = INTVAL (countop);
1773 /* If COUNT is bigger than size of storage unit in use,
1774 advance to the containing unit of same size. */
1775 if (count > signpos)
1776 {
1777 int offset = (count & ~signpos) / 8;
1778 count = count & signpos;
b72f00af 1779 operands[1] = dataop = adjust_address (dataop, QImode, offset);
79e68feb
RS
1780 }
1781 if (count == signpos)
1782 cc_status.flags = CC_NOT_POSITIVE | CC_Z_IN_NOT_N;
1783 else
1784 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N;
1785
1786 /* These three statements used to use next_insns_test_no...
1787 but it appears that this should do the same job. */
1788 if (count == 31
1789 && next_insn_tests_no_inequality (insn))
1790 return "tst%.l %1";
1791 if (count == 15
1792 && next_insn_tests_no_inequality (insn))
1793 return "tst%.w %1";
1794 if (count == 7
1795 && next_insn_tests_no_inequality (insn))
1796 return "tst%.b %1";
5083912d
PDM
1797 /* Try to use `movew to ccr' followed by the appropriate branch insn.
1798 On some m68k variants unfortunately that's slower than btst.
1799 On 68000 and higher, that should also work for all HImode operands. */
1800 if (TUNE_CPU32 || TARGET_COLDFIRE || optimize_size)
1801 {
1802 if (count == 3 && DATA_REG_P (operands[1])
1803 && next_insn_tests_no_inequality (insn))
1804 {
1805 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N | CC_NO_OVERFLOW;
1806 return "move%.w %1,%%ccr";
1807 }
1808 if (count == 2 && DATA_REG_P (operands[1])
1809 && next_insn_tests_no_inequality (insn))
1810 {
1811 cc_status.flags = CC_NOT_NEGATIVE | CC_INVERTED | CC_NO_OVERFLOW;
1812 return "move%.w %1,%%ccr";
1813 }
1814 /* count == 1 followed by bvc/bvs and
1815 count == 0 followed by bcc/bcs are also possible, but need
1816 m68k-specific CC_Z_IN_NOT_V and CC_Z_IN_NOT_C flags. */
1817 }
79e68feb
RS
1818
1819 cc_status.flags = CC_NOT_NEGATIVE;
1820 }
1821 return "btst %0,%1";
1822}
79e68feb 1823\f
fc2241eb
RS
1824/* Return true if X is a legitimate base register. STRICT_P says
1825 whether we need strict checking. */
1826
1827bool
1828m68k_legitimate_base_reg_p (rtx x, bool strict_p)
1829{
1830 /* Allow SUBREG everywhere we allow REG. This results in better code. */
1831 if (!strict_p && GET_CODE (x) == SUBREG)
1832 x = SUBREG_REG (x);
1833
1834 return (REG_P (x)
1835 && (strict_p
1836 ? REGNO_OK_FOR_BASE_P (REGNO (x))
bf32249e 1837 : REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x))));
fc2241eb
RS
1838}
1839
1840/* Return true if X is a legitimate index register. STRICT_P says
1841 whether we need strict checking. */
1842
1843bool
1844m68k_legitimate_index_reg_p (rtx x, bool strict_p)
1845{
1846 if (!strict_p && GET_CODE (x) == SUBREG)
1847 x = SUBREG_REG (x);
1848
1849 return (REG_P (x)
1850 && (strict_p
1851 ? REGNO_OK_FOR_INDEX_P (REGNO (x))
bf32249e 1852 : REGNO_OK_FOR_INDEX_NONSTRICT_P (REGNO (x))));
fc2241eb
RS
1853}
1854
1855/* Return true if X is a legitimate index expression for a (d8,An,Xn) or
1856 (bd,An,Xn) addressing mode. Fill in the INDEX and SCALE fields of
1857 ADDRESS if so. STRICT_P says whether we need strict checking. */
1858
1859static bool
1860m68k_decompose_index (rtx x, bool strict_p, struct m68k_address *address)
1861{
1862 int scale;
1863
1864 /* Check for a scale factor. */
1865 scale = 1;
1866 if ((TARGET_68020 || TARGET_COLDFIRE)
1867 && GET_CODE (x) == MULT
1868 && GET_CODE (XEXP (x, 1)) == CONST_INT
1869 && (INTVAL (XEXP (x, 1)) == 2
1870 || INTVAL (XEXP (x, 1)) == 4
1871 || (INTVAL (XEXP (x, 1)) == 8
1872 && (TARGET_COLDFIRE_FPU || !TARGET_COLDFIRE))))
1873 {
1874 scale = INTVAL (XEXP (x, 1));
1875 x = XEXP (x, 0);
1876 }
1877
1878 /* Check for a word extension. */
1879 if (!TARGET_COLDFIRE
1880 && GET_CODE (x) == SIGN_EXTEND
1881 && GET_MODE (XEXP (x, 0)) == HImode)
1882 x = XEXP (x, 0);
1883
1884 if (m68k_legitimate_index_reg_p (x, strict_p))
1885 {
1886 address->scale = scale;
1887 address->index = x;
1888 return true;
1889 }
1890
1891 return false;
1892}
1893
7ffb5e78
RS
1894/* Return true if X is an illegitimate symbolic constant. */
1895
1896bool
1897m68k_illegitimate_symbolic_constant_p (rtx x)
1898{
1899 rtx base, offset;
1900
1901 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P)
1902 {
1903 split_const (x, &base, &offset);
1904 if (GET_CODE (base) == SYMBOL_REF
1905 && !offset_within_block_p (base, INTVAL (offset)))
1906 return true;
1907 }
75df395f 1908 return m68k_tls_reference_p (x, false);
7ffb5e78
RS
1909}
1910
fbbf66e7
RS
1911/* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1912
1913static bool
ef4bddc2 1914m68k_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
fbbf66e7
RS
1915{
1916 return m68k_illegitimate_symbolic_constant_p (x);
1917}
1918
fc2241eb
RS
1919/* Return true if X is a legitimate constant address that can reach
1920 bytes in the range [X, X + REACH). STRICT_P says whether we need
1921 strict checking. */
1922
1923static bool
1924m68k_legitimate_constant_address_p (rtx x, unsigned int reach, bool strict_p)
1925{
1926 rtx base, offset;
1927
1928 if (!CONSTANT_ADDRESS_P (x))
1929 return false;
1930
1931 if (flag_pic
1932 && !(strict_p && TARGET_PCREL)
1933 && symbolic_operand (x, VOIDmode))
1934 return false;
1935
1936 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P && reach > 1)
1937 {
1938 split_const (x, &base, &offset);
1939 if (GET_CODE (base) == SYMBOL_REF
1940 && !offset_within_block_p (base, INTVAL (offset) + reach - 1))
1941 return false;
1942 }
1943
75df395f 1944 return !m68k_tls_reference_p (x, false);
fc2241eb
RS
1945}
1946
1947/* Return true if X is a LABEL_REF for a jump table. Assume that unplaced
1948 labels will become jump tables. */
1949
1950static bool
1951m68k_jump_table_ref_p (rtx x)
1952{
1953 if (GET_CODE (x) != LABEL_REF)
1954 return false;
1955
b32d5189
DM
1956 rtx_insn *insn = as_a <rtx_insn *> (XEXP (x, 0));
1957 if (!NEXT_INSN (insn) && !PREV_INSN (insn))
fc2241eb
RS
1958 return true;
1959
b32d5189
DM
1960 insn = next_nonnote_insn (insn);
1961 return insn && JUMP_TABLE_DATA_P (insn);
fc2241eb
RS
1962}
1963
1964/* Return true if X is a legitimate address for values of mode MODE.
1965 STRICT_P says whether strict checking is needed. If the address
1966 is valid, describe its components in *ADDRESS. */
1967
1968static bool
ef4bddc2 1969m68k_decompose_address (machine_mode mode, rtx x,
fc2241eb
RS
1970 bool strict_p, struct m68k_address *address)
1971{
1972 unsigned int reach;
1973
1974 memset (address, 0, sizeof (*address));
1975
1976 if (mode == BLKmode)
1977 reach = 1;
1978 else
1979 reach = GET_MODE_SIZE (mode);
1980
1981 /* Check for (An) (mode 2). */
1982 if (m68k_legitimate_base_reg_p (x, strict_p))
1983 {
1984 address->base = x;
1985 return true;
1986 }
1987
1988 /* Check for -(An) and (An)+ (modes 3 and 4). */
1989 if ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_INC)
1990 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
1991 {
1992 address->code = GET_CODE (x);
1993 address->base = XEXP (x, 0);
1994 return true;
1995 }
1996
1997 /* Check for (d16,An) (mode 5). */
1998 if (GET_CODE (x) == PLUS
1999 && GET_CODE (XEXP (x, 1)) == CONST_INT
2000 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x8000, 0x8000 - reach)
2001 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
2002 {
2003 address->base = XEXP (x, 0);
2004 address->offset = XEXP (x, 1);
2005 return true;
2006 }
2007
2008 /* Check for GOT loads. These are (bd,An,Xn) addresses if
2009 TARGET_68020 && flag_pic == 2, otherwise they are (d16,An)
2010 addresses. */
75df395f
MK
2011 if (GET_CODE (x) == PLUS
2012 && XEXP (x, 0) == pic_offset_table_rtx)
fc2241eb 2013 {
75df395f
MK
2014 /* As we are processing a PLUS, do not unwrap RELOC32 symbols --
2015 they are invalid in this context. */
2016 if (m68k_unwrap_symbol (XEXP (x, 1), false) != XEXP (x, 1))
2017 {
2018 address->base = XEXP (x, 0);
2019 address->offset = XEXP (x, 1);
2020 return true;
2021 }
fc2241eb
RS
2022 }
2023
2024 /* The ColdFire FPU only accepts addressing modes 2-5. */
2025 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
2026 return false;
2027
2028 /* Check for (xxx).w and (xxx).l. Also, in the TARGET_PCREL case,
2029 check for (d16,PC) or (bd,PC,Xn) with a suppressed index register.
2030 All these modes are variations of mode 7. */
2031 if (m68k_legitimate_constant_address_p (x, reach, strict_p))
2032 {
2033 address->offset = x;
2034 return true;
2035 }
2036
2037 /* Check for (d8,PC,Xn), a mode 7 form. This case is needed for
2038 tablejumps.
2039
2040 ??? do_tablejump creates these addresses before placing the target
2041 label, so we have to assume that unplaced labels are jump table
2042 references. It seems unlikely that we would ever generate indexed
2043 accesses to unplaced labels in other cases. */
2044 if (GET_CODE (x) == PLUS
2045 && m68k_jump_table_ref_p (XEXP (x, 1))
2046 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2047 {
2048 address->offset = XEXP (x, 1);
2049 return true;
2050 }
2051
2052 /* Everything hereafter deals with (d8,An,Xn.SIZE*SCALE) or
2053 (bd,An,Xn.SIZE*SCALE) addresses. */
2054
2055 if (TARGET_68020)
2056 {
2057 /* Check for a nonzero base displacement. */
2058 if (GET_CODE (x) == PLUS
2059 && m68k_legitimate_constant_address_p (XEXP (x, 1), reach, strict_p))
2060 {
2061 address->offset = XEXP (x, 1);
2062 x = XEXP (x, 0);
2063 }
2064
2065 /* Check for a suppressed index register. */
2066 if (m68k_legitimate_base_reg_p (x, strict_p))
2067 {
2068 address->base = x;
2069 return true;
2070 }
2071
2072 /* Check for a suppressed base register. Do not allow this case
2073 for non-symbolic offsets as it effectively gives gcc freedom
2074 to treat data registers as base registers, which can generate
2075 worse code. */
2076 if (address->offset
2077 && symbolic_operand (address->offset, VOIDmode)
2078 && m68k_decompose_index (x, strict_p, address))
2079 return true;
2080 }
2081 else
2082 {
2083 /* Check for a nonzero base displacement. */
2084 if (GET_CODE (x) == PLUS
2085 && GET_CODE (XEXP (x, 1)) == CONST_INT
2086 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x80, 0x80 - reach))
2087 {
2088 address->offset = XEXP (x, 1);
2089 x = XEXP (x, 0);
2090 }
2091 }
2092
2093 /* We now expect the sum of a base and an index. */
2094 if (GET_CODE (x) == PLUS)
2095 {
2096 if (m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p)
2097 && m68k_decompose_index (XEXP (x, 1), strict_p, address))
2098 {
2099 address->base = XEXP (x, 0);
2100 return true;
2101 }
2102
2103 if (m68k_legitimate_base_reg_p (XEXP (x, 1), strict_p)
2104 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2105 {
2106 address->base = XEXP (x, 1);
2107 return true;
2108 }
2109 }
2110 return false;
2111}
2112
2113/* Return true if X is a legitimate address for values of mode MODE.
2114 STRICT_P says whether strict checking is needed. */
2115
2116bool
ef4bddc2 2117m68k_legitimate_address_p (machine_mode mode, rtx x, bool strict_p)
fc2241eb
RS
2118{
2119 struct m68k_address address;
2120
2121 return m68k_decompose_address (mode, x, strict_p, &address);
2122}
2123
2124/* Return true if X is a memory, describing its address in ADDRESS if so.
2125 Apply strict checking if called during or after reload. */
2126
2127static bool
2128m68k_legitimate_mem_p (rtx x, struct m68k_address *address)
2129{
2130 return (MEM_P (x)
2131 && m68k_decompose_address (GET_MODE (x), XEXP (x, 0),
2132 reload_in_progress || reload_completed,
2133 address));
2134}
2135
1a627b35
RS
2136/* Implement TARGET_LEGITIMATE_CONSTANT_P. */
2137
2138bool
ef4bddc2 2139m68k_legitimate_constant_p (machine_mode mode, rtx x)
1a627b35
RS
2140{
2141 return mode != XFmode && !m68k_illegitimate_symbolic_constant_p (x);
2142}
2143
fc2241eb
RS
2144/* Return true if X matches the 'Q' constraint. It must be a memory
2145 with a base address and no constant offset or index. */
2146
2147bool
2148m68k_matches_q_p (rtx x)
2149{
2150 struct m68k_address address;
2151
2152 return (m68k_legitimate_mem_p (x, &address)
2153 && address.code == UNKNOWN
2154 && address.base
2155 && !address.offset
2156 && !address.index);
2157}
2158
2159/* Return true if X matches the 'U' constraint. It must be a base address
2160 with a constant offset and no index. */
2161
2162bool
2163m68k_matches_u_p (rtx x)
2164{
2165 struct m68k_address address;
2166
2167 return (m68k_legitimate_mem_p (x, &address)
2168 && address.code == UNKNOWN
2169 && address.base
2170 && address.offset
2171 && !address.index);
2172}
2173
75df395f
MK
2174/* Return GOT pointer. */
2175
2176static rtx
2177m68k_get_gp (void)
2178{
2179 if (pic_offset_table_rtx == NULL_RTX)
2180 pic_offset_table_rtx = gen_rtx_REG (Pmode, PIC_REG);
2181
2182 crtl->uses_pic_offset_table = 1;
2183
2184 return pic_offset_table_rtx;
2185}
2186
2187/* M68K relocations, used to distinguish GOT and TLS relocations in UNSPEC
2188 wrappers. */
2189enum m68k_reloc { RELOC_GOT, RELOC_TLSGD, RELOC_TLSLDM, RELOC_TLSLDO,
2190 RELOC_TLSIE, RELOC_TLSLE };
2191
2192#define TLS_RELOC_P(RELOC) ((RELOC) != RELOC_GOT)
2193
2194/* Wrap symbol X into unspec representing relocation RELOC.
2195 BASE_REG - register that should be added to the result.
2196 TEMP_REG - if non-null, temporary register. */
2197
2198static rtx
2199m68k_wrap_symbol (rtx x, enum m68k_reloc reloc, rtx base_reg, rtx temp_reg)
2200{
2201 bool use_x_p;
2202
2203 use_x_p = (base_reg == pic_offset_table_rtx) ? TARGET_XGOT : TARGET_XTLS;
2204
2205 if (TARGET_COLDFIRE && use_x_p)
2206 /* When compiling with -mx{got, tls} switch the code will look like this:
2207
2208 move.l <X>@<RELOC>,<TEMP_REG>
2209 add.l <BASE_REG>,<TEMP_REG> */
2210 {
2211 /* Wrap X in UNSPEC_??? to tip m68k_output_addr_const_extra
2212 to put @RELOC after reference. */
2213 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2214 UNSPEC_RELOC32);
2215 x = gen_rtx_CONST (Pmode, x);
2216
2217 if (temp_reg == NULL)
2218 {
2219 gcc_assert (can_create_pseudo_p ());
2220 temp_reg = gen_reg_rtx (Pmode);
2221 }
2222
2223 emit_move_insn (temp_reg, x);
2224 emit_insn (gen_addsi3 (temp_reg, temp_reg, base_reg));
2225 x = temp_reg;
2226 }
2227 else
2228 {
2229 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2230 UNSPEC_RELOC16);
2231 x = gen_rtx_CONST (Pmode, x);
2232
2233 x = gen_rtx_PLUS (Pmode, base_reg, x);
2234 }
2235
2236 return x;
2237}
2238
2239/* Helper for m68k_unwrap_symbol.
2240 Also, if unwrapping was successful (that is if (ORIG != <return value>)),
2241 sets *RELOC_PTR to relocation type for the symbol. */
2242
2243static rtx
2244m68k_unwrap_symbol_1 (rtx orig, bool unwrap_reloc32_p,
2245 enum m68k_reloc *reloc_ptr)
2246{
2247 if (GET_CODE (orig) == CONST)
2248 {
2249 rtx x;
2250 enum m68k_reloc dummy;
2251
2252 x = XEXP (orig, 0);
2253
2254 if (reloc_ptr == NULL)
2255 reloc_ptr = &dummy;
2256
2257 /* Handle an addend. */
2258 if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS)
2259 && CONST_INT_P (XEXP (x, 1)))
2260 x = XEXP (x, 0);
2261
2262 if (GET_CODE (x) == UNSPEC)
2263 {
2264 switch (XINT (x, 1))
2265 {
2266 case UNSPEC_RELOC16:
2267 orig = XVECEXP (x, 0, 0);
2268 *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2269 break;
2270
2271 case UNSPEC_RELOC32:
2272 if (unwrap_reloc32_p)
2273 {
2274 orig = XVECEXP (x, 0, 0);
2275 *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2276 }
2277 break;
2278
2279 default:
2280 break;
2281 }
2282 }
2283 }
2284
2285 return orig;
2286}
2287
2288/* Unwrap symbol from UNSPEC_RELOC16 and, if unwrap_reloc32_p,
2289 UNSPEC_RELOC32 wrappers. */
2290
2291rtx
2292m68k_unwrap_symbol (rtx orig, bool unwrap_reloc32_p)
2293{
2294 return m68k_unwrap_symbol_1 (orig, unwrap_reloc32_p, NULL);
2295}
2296
75df395f
MK
2297/* Prescan insn before outputing assembler for it. */
2298
2299void
c85e862a 2300m68k_final_prescan_insn (rtx_insn *insn ATTRIBUTE_UNUSED,
75df395f
MK
2301 rtx *operands, int n_operands)
2302{
2303 int i;
2304
2305 /* Combine and, possibly, other optimizations may do good job
2306 converting
2307 (const (unspec [(symbol)]))
2308 into
2309 (const (plus (unspec [(symbol)])
2310 (const_int N))).
2311 The problem with this is emitting @TLS or @GOT decorations.
2312 The decoration is emitted when processing (unspec), so the
2313 result would be "#symbol@TLSLE+N" instead of "#symbol+N@TLSLE".
2314
2315 It seems that the easiest solution to this is to convert such
2316 operands to
2317 (const (unspec [(plus (symbol)
2318 (const_int N))])).
2319 Note, that the top level of operand remains intact, so we don't have
2320 to patch up anything outside of the operand. */
2321
82eee4f1 2322 subrtx_var_iterator::array_type array;
75df395f
MK
2323 for (i = 0; i < n_operands; ++i)
2324 {
2325 rtx op;
2326
2327 op = operands[i];
2328
82eee4f1
RS
2329 FOR_EACH_SUBRTX_VAR (iter, array, op, ALL)
2330 {
2331 rtx x = *iter;
2332 if (m68k_unwrap_symbol (x, true) != x)
2333 {
2334 rtx plus;
2335
2336 gcc_assert (GET_CODE (x) == CONST);
2337 plus = XEXP (x, 0);
2338
2339 if (GET_CODE (plus) == PLUS || GET_CODE (plus) == MINUS)
2340 {
2341 rtx unspec;
2342 rtx addend;
2343
2344 unspec = XEXP (plus, 0);
2345 gcc_assert (GET_CODE (unspec) == UNSPEC);
2346 addend = XEXP (plus, 1);
2347 gcc_assert (CONST_INT_P (addend));
2348
2349 /* We now have all the pieces, rearrange them. */
2350
2351 /* Move symbol to plus. */
2352 XEXP (plus, 0) = XVECEXP (unspec, 0, 0);
2353
2354 /* Move plus inside unspec. */
2355 XVECEXP (unspec, 0, 0) = plus;
2356
2357 /* Move unspec to top level of const. */
2358 XEXP (x, 0) = unspec;
2359 }
2360 iter.skip_subrtxes ();
2361 }
2362 }
75df395f
MK
2363 }
2364}
2365
2366/* Move X to a register and add REG_EQUAL note pointing to ORIG.
2367 If REG is non-null, use it; generate new pseudo otherwise. */
2368
2369static rtx
2370m68k_move_to_reg (rtx x, rtx orig, rtx reg)
2371{
c85e862a 2372 rtx_insn *insn;
75df395f
MK
2373
2374 if (reg == NULL_RTX)
2375 {
2376 gcc_assert (can_create_pseudo_p ());
2377 reg = gen_reg_rtx (Pmode);
2378 }
2379
2380 insn = emit_move_insn (reg, x);
2381 /* Put a REG_EQUAL note on this insn, so that it can be optimized
2382 by loop. */
2383 set_unique_reg_note (insn, REG_EQUAL, orig);
2384
2385 return reg;
2386}
2387
2388/* Does the same as m68k_wrap_symbol, but returns a memory reference to
2389 GOT slot. */
2390
2391static rtx
2392m68k_wrap_symbol_into_got_ref (rtx x, enum m68k_reloc reloc, rtx temp_reg)
2393{
2394 x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), temp_reg);
2395
2396 x = gen_rtx_MEM (Pmode, x);
2397 MEM_READONLY_P (x) = 1;
2398
2399 return x;
2400}
2401
79e68feb
RS
2402/* Legitimize PIC addresses. If the address is already
2403 position-independent, we return ORIG. Newly generated
2404 position-independent addresses go to REG. If we need more
2405 than one register, we lose.
2406
2407 An address is legitimized by making an indirect reference
2408 through the Global Offset Table with the name of the symbol
2409 used as an offset.
2410
2411 The assembler and linker are responsible for placing the
2412 address of the symbol in the GOT. The function prologue
2413 is responsible for initializing a5 to the starting address
2414 of the GOT.
2415
2416 The assembler is also responsible for translating a symbol name
2417 into a constant displacement from the start of the GOT.
2418
2419 A quick example may make things a little clearer:
2420
2421 When not generating PIC code to store the value 12345 into _foo
2422 we would generate the following code:
2423
2424 movel #12345, _foo
2425
2426 When generating PIC two transformations are made. First, the compiler
2427 loads the address of foo into a register. So the first transformation makes:
2428
2429 lea _foo, a0
2430 movel #12345, a0@
2431
2432 The code in movsi will intercept the lea instruction and call this
2433 routine which will transform the instructions into:
2434
2435 movel a5@(_foo:w), a0
2436 movel #12345, a0@
2437
2438
2439 That (in a nutshell) is how *all* symbol and label references are
2440 handled. */
2441
2442rtx
ef4bddc2 2443legitimize_pic_address (rtx orig, machine_mode mode ATTRIBUTE_UNUSED,
8a4a2253 2444 rtx reg)
79e68feb
RS
2445{
2446 rtx pic_ref = orig;
2447
2448 /* First handle a simple SYMBOL_REF or LABEL_REF */
2449 if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
2450 {
4761e388 2451 gcc_assert (reg);
79e68feb 2452
75df395f
MK
2453 pic_ref = m68k_wrap_symbol_into_got_ref (orig, RELOC_GOT, reg);
2454 pic_ref = m68k_move_to_reg (pic_ref, orig, reg);
79e68feb
RS
2455 }
2456 else if (GET_CODE (orig) == CONST)
2457 {
1d8eaa6b 2458 rtx base;
79e68feb 2459
b2e08ed4 2460 /* Make sure this has not already been legitimized. */
75df395f 2461 if (m68k_unwrap_symbol (orig, true) != orig)
79e68feb
RS
2462 return orig;
2463
4761e388 2464 gcc_assert (reg);
79e68feb
RS
2465
2466 /* legitimize both operands of the PLUS */
4761e388
NS
2467 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
2468
2469 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2470 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2471 base == reg ? 0 : reg);
79e68feb
RS
2472
2473 if (GET_CODE (orig) == CONST_INT)
0a81f074 2474 pic_ref = plus_constant (Pmode, base, INTVAL (orig));
75df395f
MK
2475 else
2476 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
79e68feb 2477 }
75df395f 2478
79e68feb
RS
2479 return pic_ref;
2480}
2481
75df395f
MK
2482/* The __tls_get_addr symbol. */
2483static GTY(()) rtx m68k_tls_get_addr;
2484
2485/* Return SYMBOL_REF for __tls_get_addr. */
2486
2487static rtx
2488m68k_get_tls_get_addr (void)
2489{
2490 if (m68k_tls_get_addr == NULL_RTX)
2491 m68k_tls_get_addr = init_one_libfunc ("__tls_get_addr");
2492
2493 return m68k_tls_get_addr;
2494}
2495
2496/* Return libcall result in A0 instead of usual D0. */
2497static bool m68k_libcall_value_in_a0_p = false;
2498
2499/* Emit instruction sequence that calls __tls_get_addr. X is
2500 the TLS symbol we are referencing and RELOC is the symbol type to use
2501 (either TLSGD or TLSLDM). EQV is the REG_EQUAL note for the sequence
2502 emitted. A pseudo register with result of __tls_get_addr call is
2503 returned. */
2504
2505static rtx
2506m68k_call_tls_get_addr (rtx x, rtx eqv, enum m68k_reloc reloc)
2507{
2508 rtx a0;
c85e862a 2509 rtx_insn *insns;
75df395f
MK
2510 rtx dest;
2511
2512 /* Emit the call sequence. */
2513 start_sequence ();
2514
2515 /* FIXME: Unfortunately, emit_library_call_value does not
2516 consider (plus (%a5) (const (unspec))) to be a good enough
2517 operand for push, so it forces it into a register. The bad
2518 thing about this is that combiner, due to copy propagation and other
2519 optimizations, sometimes can not later fix this. As a consequence,
2520 additional register may be allocated resulting in a spill.
2521 For reference, see args processing loops in
2522 calls.c:emit_library_call_value_1.
2523 For testcase, see gcc.target/m68k/tls-{gd, ld}.c */
2524 x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), NULL_RTX);
2525
2526 /* __tls_get_addr() is not a libcall, but emitting a libcall_value
2527 is the simpliest way of generating a call. The difference between
2528 __tls_get_addr() and libcall is that the result is returned in D0
2529 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2530 which temporarily switches returning the result to A0. */
2531
2532 m68k_libcall_value_in_a0_p = true;
2533 a0 = emit_library_call_value (m68k_get_tls_get_addr (), NULL_RTX, LCT_PURE,
2534 Pmode, 1, x, Pmode);
2535 m68k_libcall_value_in_a0_p = false;
2536
2537 insns = get_insns ();
2538 end_sequence ();
2539
2540 gcc_assert (can_create_pseudo_p ());
2541 dest = gen_reg_rtx (Pmode);
2542 emit_libcall_block (insns, dest, a0, eqv);
2543
2544 return dest;
2545}
2546
2547/* The __tls_get_addr symbol. */
2548static GTY(()) rtx m68k_read_tp;
2549
2550/* Return SYMBOL_REF for __m68k_read_tp. */
2551
2552static rtx
2553m68k_get_m68k_read_tp (void)
2554{
2555 if (m68k_read_tp == NULL_RTX)
2556 m68k_read_tp = init_one_libfunc ("__m68k_read_tp");
2557
2558 return m68k_read_tp;
2559}
2560
2561/* Emit instruction sequence that calls __m68k_read_tp.
2562 A pseudo register with result of __m68k_read_tp call is returned. */
2563
2564static rtx
2565m68k_call_m68k_read_tp (void)
2566{
2567 rtx a0;
2568 rtx eqv;
c85e862a 2569 rtx_insn *insns;
75df395f
MK
2570 rtx dest;
2571
2572 start_sequence ();
2573
2574 /* __m68k_read_tp() is not a libcall, but emitting a libcall_value
2575 is the simpliest way of generating a call. The difference between
2576 __m68k_read_tp() and libcall is that the result is returned in D0
2577 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2578 which temporarily switches returning the result to A0. */
2579
2580 /* Emit the call sequence. */
2581 m68k_libcall_value_in_a0_p = true;
2582 a0 = emit_library_call_value (m68k_get_m68k_read_tp (), NULL_RTX, LCT_PURE,
2583 Pmode, 0);
2584 m68k_libcall_value_in_a0_p = false;
2585 insns = get_insns ();
2586 end_sequence ();
2587
2588 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2589 share the m68k_read_tp result with other IE/LE model accesses. */
2590 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx), UNSPEC_RELOC32);
2591
2592 gcc_assert (can_create_pseudo_p ());
2593 dest = gen_reg_rtx (Pmode);
2594 emit_libcall_block (insns, dest, a0, eqv);
2595
2596 return dest;
2597}
2598
2599/* Return a legitimized address for accessing TLS SYMBOL_REF X.
2600 For explanations on instructions sequences see TLS/NPTL ABI for m68k and
2601 ColdFire. */
2602
2603rtx
2604m68k_legitimize_tls_address (rtx orig)
2605{
2606 switch (SYMBOL_REF_TLS_MODEL (orig))
2607 {
2608 case TLS_MODEL_GLOBAL_DYNAMIC:
2609 orig = m68k_call_tls_get_addr (orig, orig, RELOC_TLSGD);
2610 break;
2611
2612 case TLS_MODEL_LOCAL_DYNAMIC:
2613 {
2614 rtx eqv;
2615 rtx a0;
2616 rtx x;
2617
2618 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2619 share the LDM result with other LD model accesses. */
2620 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2621 UNSPEC_RELOC32);
2622
2623 a0 = m68k_call_tls_get_addr (orig, eqv, RELOC_TLSLDM);
2624
2625 x = m68k_wrap_symbol (orig, RELOC_TLSLDO, a0, NULL_RTX);
2626
2627 if (can_create_pseudo_p ())
2628 x = m68k_move_to_reg (x, orig, NULL_RTX);
2629
2630 orig = x;
2631 break;
2632 }
2633
2634 case TLS_MODEL_INITIAL_EXEC:
2635 {
2636 rtx a0;
2637 rtx x;
2638
2639 a0 = m68k_call_m68k_read_tp ();
2640
2641 x = m68k_wrap_symbol_into_got_ref (orig, RELOC_TLSIE, NULL_RTX);
2642 x = gen_rtx_PLUS (Pmode, x, a0);
2643
2644 if (can_create_pseudo_p ())
2645 x = m68k_move_to_reg (x, orig, NULL_RTX);
2646
2647 orig = x;
2648 break;
2649 }
2650
2651 case TLS_MODEL_LOCAL_EXEC:
2652 {
2653 rtx a0;
2654 rtx x;
2655
2656 a0 = m68k_call_m68k_read_tp ();
2657
2658 x = m68k_wrap_symbol (orig, RELOC_TLSLE, a0, NULL_RTX);
2659
2660 if (can_create_pseudo_p ())
2661 x = m68k_move_to_reg (x, orig, NULL_RTX);
2662
2663 orig = x;
2664 break;
2665 }
2666
2667 default:
2668 gcc_unreachable ();
2669 }
2670
2671 return orig;
2672}
2673
2674/* Return true if X is a TLS symbol. */
2675
2676static bool
2677m68k_tls_symbol_p (rtx x)
2678{
2679 if (!TARGET_HAVE_TLS)
2680 return false;
2681
2682 if (GET_CODE (x) != SYMBOL_REF)
2683 return false;
2684
2685 return SYMBOL_REF_TLS_MODEL (x) != 0;
2686}
2687
75df395f
MK
2688/* If !LEGITIMATE_P, return true if X is a TLS symbol reference,
2689 though illegitimate one.
2690 If LEGITIMATE_P, return true if X is a legitimate TLS symbol reference. */
2691
2692bool
2693m68k_tls_reference_p (rtx x, bool legitimate_p)
2694{
2695 if (!TARGET_HAVE_TLS)
2696 return false;
2697
2698 if (!legitimate_p)
a5784152
RS
2699 {
2700 subrtx_var_iterator::array_type array;
2701 FOR_EACH_SUBRTX_VAR (iter, array, x, ALL)
2702 {
2703 rtx x = *iter;
2704
2705 /* Note: this is not the same as m68k_tls_symbol_p. */
2706 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0)
2707 return true;
2708
2709 /* Don't recurse into legitimate TLS references. */
2710 if (m68k_tls_reference_p (x, true))
2711 iter.skip_subrtxes ();
2712 }
2713 return false;
2714 }
75df395f
MK
2715 else
2716 {
2717 enum m68k_reloc reloc = RELOC_GOT;
2718
2719 return (m68k_unwrap_symbol_1 (x, true, &reloc) != x
2720 && TLS_RELOC_P (reloc));
2721 }
2722}
2723
79e68feb 2724\f
0ce6f9fb 2725
a0a7fbc9 2726#define USE_MOVQ(i) ((unsigned) ((i) + 128) <= 255)
0ce6f9fb 2727
bda2a571
RS
2728/* Return the type of move that should be used for integer I. */
2729
c47b0cb4
MK
2730M68K_CONST_METHOD
2731m68k_const_method (HOST_WIDE_INT i)
0ce6f9fb 2732{
0ce6f9fb
RK
2733 unsigned u;
2734
6910dd70 2735 if (USE_MOVQ (i))
0ce6f9fb 2736 return MOVQ;
24092242 2737
c16eadc7 2738 /* The ColdFire doesn't have byte or word operations. */
97c55091 2739 /* FIXME: This may not be useful for the m68060 either. */
85dbf7e2 2740 if (!TARGET_COLDFIRE)
24092242
RK
2741 {
2742 /* if -256 < N < 256 but N is not in range for a moveq
7a1929e1 2743 N^ff will be, so use moveq #N^ff, dreg; not.b dreg. */
24092242
RK
2744 if (USE_MOVQ (i ^ 0xff))
2745 return NOTB;
2746 /* Likewise, try with not.w */
2747 if (USE_MOVQ (i ^ 0xffff))
2748 return NOTW;
2749 /* This is the only value where neg.w is useful */
2750 if (i == -65408)
2751 return NEGW;
24092242 2752 }
28bad6d1 2753
5e04daf3
PB
2754 /* Try also with swap. */
2755 u = i;
2756 if (USE_MOVQ ((u >> 16) | (u << 16)))
2757 return SWAP;
2758
986e74d5 2759 if (TARGET_ISAB)
28bad6d1 2760 {
72edf146 2761 /* Try using MVZ/MVS with an immediate value to load constants. */
28bad6d1
PB
2762 if (i >= 0 && i <= 65535)
2763 return MVZ;
2764 if (i >= -32768 && i <= 32767)
2765 return MVS;
2766 }
2767
0ce6f9fb
RK
2768 /* Otherwise, use move.l */
2769 return MOVL;
2770}
2771
bda2a571
RS
2772/* Return the cost of moving constant I into a data register. */
2773
3c50106f 2774static int
bda2a571 2775const_int_cost (HOST_WIDE_INT i)
0ce6f9fb 2776{
c47b0cb4 2777 switch (m68k_const_method (i))
0ce6f9fb 2778 {
a0a7fbc9
AS
2779 case MOVQ:
2780 /* Constants between -128 and 127 are cheap due to moveq. */
2781 return 0;
2782 case MVZ:
2783 case MVS:
2784 case NOTB:
2785 case NOTW:
2786 case NEGW:
2787 case SWAP:
2788 /* Constants easily generated by moveq + not.b/not.w/neg.w/swap. */
2789 return 1;
2790 case MOVL:
2791 return 2;
2792 default:
2793 gcc_unreachable ();
0ce6f9fb
RK
2794 }
2795}
2796
3c50106f 2797static bool
e548c9df
AM
2798m68k_rtx_costs (rtx x, machine_mode mode, int outer_code,
2799 int opno ATTRIBUTE_UNUSED,
68f932c4 2800 int *total, bool speed ATTRIBUTE_UNUSED)
3c50106f 2801{
e548c9df
AM
2802 int code = GET_CODE (x);
2803
3c50106f
RH
2804 switch (code)
2805 {
2806 case CONST_INT:
2807 /* Constant zero is super cheap due to clr instruction. */
2808 if (x == const0_rtx)
2809 *total = 0;
2810 else
bda2a571 2811 *total = const_int_cost (INTVAL (x));
3c50106f
RH
2812 return true;
2813
2814 case CONST:
2815 case LABEL_REF:
2816 case SYMBOL_REF:
2817 *total = 3;
2818 return true;
2819
2820 case CONST_DOUBLE:
2821 /* Make 0.0 cheaper than other floating constants to
2822 encourage creating tstsf and tstdf insns. */
2823 if (outer_code == COMPARE
2824 && (x == CONST0_RTX (SFmode) || x == CONST0_RTX (DFmode)))
2825 *total = 4;
2826 else
2827 *total = 5;
2828 return true;
2829
2830 /* These are vaguely right for a 68020. */
2831 /* The costs for long multiply have been adjusted to work properly
2832 in synth_mult on the 68020, relative to an average of the time
2833 for add and the time for shift, taking away a little more because
2834 sometimes move insns are needed. */
a0a7fbc9
AS
2835 /* div?.w is relatively cheaper on 68000 counted in COSTS_N_INSNS
2836 terms. */
fe95f2f7
JB
2837#define MULL_COST \
2838 (TUNE_68060 ? 2 \
2839 : TUNE_68040 ? 5 \
03b3e271
KH
2840 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2841 : (TUNE_CFV2 && TUNE_MAC) ? 4 \
2842 : TUNE_CFV2 ? 8 \
fe95f2f7
JB
2843 : TARGET_COLDFIRE ? 3 : 13)
2844
2845#define MULW_COST \
2846 (TUNE_68060 ? 2 \
2847 : TUNE_68040 ? 3 \
03b3e271
KH
2848 : TUNE_68000_10 ? 5 \
2849 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2850 : (TUNE_CFV2 && TUNE_MAC) ? 2 \
2851 : TUNE_CFV2 ? 8 \
fe95f2f7
JB
2852 : TARGET_COLDFIRE ? 2 : 8)
2853
2854#define DIVW_COST \
2855 (TARGET_CF_HWDIV ? 11 \
2856 : TUNE_68000_10 || TARGET_COLDFIRE ? 12 : 27)
3c50106f
RH
2857
2858 case PLUS:
2859 /* An lea costs about three times as much as a simple add. */
e548c9df 2860 if (mode == SImode
3c50106f
RH
2861 && GET_CODE (XEXP (x, 1)) == REG
2862 && GET_CODE (XEXP (x, 0)) == MULT
2863 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
2864 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2865 && (INTVAL (XEXP (XEXP (x, 0), 1)) == 2
2866 || INTVAL (XEXP (XEXP (x, 0), 1)) == 4
2867 || INTVAL (XEXP (XEXP (x, 0), 1)) == 8))
eb849993
BI
2868 {
2869 /* lea an@(dx:l:i),am */
2870 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 2 : 3);
2871 return true;
2872 }
3c50106f
RH
2873 return false;
2874
2875 case ASHIFT:
2876 case ASHIFTRT:
2877 case LSHIFTRT:
fe95f2f7 2878 if (TUNE_68060)
3c50106f
RH
2879 {
2880 *total = COSTS_N_INSNS(1);
2881 return true;
2882 }
fe95f2f7 2883 if (TUNE_68000_10)
3c50106f
RH
2884 {
2885 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
2886 {
2887 if (INTVAL (XEXP (x, 1)) < 16)
2888 *total = COSTS_N_INSNS (2) + INTVAL (XEXP (x, 1)) / 2;
2889 else
2890 /* We're using clrw + swap for these cases. */
2891 *total = COSTS_N_INSNS (4) + (INTVAL (XEXP (x, 1)) - 16) / 2;
2892 }
2893 else
a0a7fbc9 2894 *total = COSTS_N_INSNS (10); /* Worst case. */
3c50106f
RH
2895 return true;
2896 }
2897 /* A shift by a big integer takes an extra instruction. */
2898 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2899 && (INTVAL (XEXP (x, 1)) == 16))
2900 {
2901 *total = COSTS_N_INSNS (2); /* clrw;swap */
2902 return true;
2903 }
2904 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2905 && !(INTVAL (XEXP (x, 1)) > 0
2906 && INTVAL (XEXP (x, 1)) <= 8))
2907 {
eb849993 2908 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 1 : 3); /* lsr #i,dn */
3c50106f
RH
2909 return true;
2910 }
2911 return false;
2912
2913 case MULT:
2914 if ((GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
2915 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
e548c9df 2916 && mode == SImode)
3c50106f 2917 *total = COSTS_N_INSNS (MULW_COST);
e548c9df 2918 else if (mode == QImode || mode == HImode)
3c50106f
RH
2919 *total = COSTS_N_INSNS (MULW_COST);
2920 else
2921 *total = COSTS_N_INSNS (MULL_COST);
2922 return true;
2923
2924 case DIV:
2925 case UDIV:
2926 case MOD:
2927 case UMOD:
e548c9df 2928 if (mode == QImode || mode == HImode)
3c50106f 2929 *total = COSTS_N_INSNS (DIVW_COST); /* div.w */
eb849993
BI
2930 else if (TARGET_CF_HWDIV)
2931 *total = COSTS_N_INSNS (18);
3c50106f
RH
2932 else
2933 *total = COSTS_N_INSNS (43); /* div.l */
2934 return true;
2935
f90b7a5a
PB
2936 case ZERO_EXTRACT:
2937 if (outer_code == COMPARE)
2938 *total = 0;
2939 return false;
2940
3c50106f
RH
2941 default:
2942 return false;
2943 }
2944}
2945
88512ba0 2946/* Return an instruction to move CONST_INT OPERANDS[1] into data register
bda2a571
RS
2947 OPERANDS[0]. */
2948
2949static const char *
8a4a2253 2950output_move_const_into_data_reg (rtx *operands)
0ce6f9fb 2951{
bda2a571 2952 HOST_WIDE_INT i;
0ce6f9fb
RK
2953
2954 i = INTVAL (operands[1]);
c47b0cb4 2955 switch (m68k_const_method (i))
0ce6f9fb 2956 {
28bad6d1 2957 case MVZ:
28bad6d1 2958 return "mvzw %1,%0";
1cbae84f
PB
2959 case MVS:
2960 return "mvsw %1,%0";
a0a7fbc9 2961 case MOVQ:
0ce6f9fb 2962 return "moveq %1,%0";
a0a7fbc9 2963 case NOTB:
66e07510 2964 CC_STATUS_INIT;
1d8eaa6b 2965 operands[1] = GEN_INT (i ^ 0xff);
0ce6f9fb 2966 return "moveq %1,%0\n\tnot%.b %0";
a0a7fbc9 2967 case NOTW:
66e07510 2968 CC_STATUS_INIT;
1d8eaa6b 2969 operands[1] = GEN_INT (i ^ 0xffff);
0ce6f9fb 2970 return "moveq %1,%0\n\tnot%.w %0";
a0a7fbc9 2971 case NEGW:
66e07510 2972 CC_STATUS_INIT;
3b4b85c9 2973 return "moveq #-128,%0\n\tneg%.w %0";
a0a7fbc9 2974 case SWAP:
0ce6f9fb
RK
2975 {
2976 unsigned u = i;
2977
1d8eaa6b 2978 operands[1] = GEN_INT ((u << 16) | (u >> 16));
0ce6f9fb 2979 return "moveq %1,%0\n\tswap %0";
0ce6f9fb 2980 }
a0a7fbc9 2981 case MOVL:
bda2a571 2982 return "move%.l %1,%0";
a0a7fbc9 2983 default:
bda2a571 2984 gcc_unreachable ();
0ce6f9fb
RK
2985 }
2986}
2987
bda2a571 2988/* Return true if I can be handled by ISA B's mov3q instruction. */
5e04daf3 2989
bda2a571
RS
2990bool
2991valid_mov3q_const (HOST_WIDE_INT i)
2992{
2993 return TARGET_ISAB && (i == -1 || IN_RANGE (i, 1, 7));
5e04daf3
PB
2994}
2995
bda2a571
RS
2996/* Return an instruction to move CONST_INT OPERANDS[1] into OPERANDS[0].
2997 I is the value of OPERANDS[1]. */
5e04daf3 2998
bda2a571 2999static const char *
8a4a2253 3000output_move_simode_const (rtx *operands)
02ed0c07 3001{
bda2a571
RS
3002 rtx dest;
3003 HOST_WIDE_INT src;
3004
3005 dest = operands[0];
3006 src = INTVAL (operands[1]);
3007 if (src == 0
3008 && (DATA_REG_P (dest) || MEM_P (dest))
3197c489
RS
3009 /* clr insns on 68000 read before writing. */
3010 && ((TARGET_68010 || TARGET_COLDFIRE)
bda2a571 3011 || !(MEM_P (dest) && MEM_VOLATILE_P (dest))))
02ed0c07 3012 return "clr%.l %0";
bda2a571 3013 else if (GET_MODE (dest) == SImode && valid_mov3q_const (src))
a0a7fbc9 3014 return "mov3q%.l %1,%0";
bda2a571 3015 else if (src == 0 && ADDRESS_REG_P (dest))
38198304 3016 return "sub%.l %0,%0";
bda2a571 3017 else if (DATA_REG_P (dest))
02ed0c07 3018 return output_move_const_into_data_reg (operands);
bda2a571 3019 else if (ADDRESS_REG_P (dest) && IN_RANGE (src, -0x8000, 0x7fff))
5e04daf3 3020 {
bda2a571 3021 if (valid_mov3q_const (src))
5e04daf3
PB
3022 return "mov3q%.l %1,%0";
3023 return "move%.w %1,%0";
3024 }
bda2a571
RS
3025 else if (MEM_P (dest)
3026 && GET_CODE (XEXP (dest, 0)) == PRE_DEC
3027 && REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
3028 && IN_RANGE (src, -0x8000, 0x7fff))
5e04daf3 3029 {
bda2a571 3030 if (valid_mov3q_const (src))
5e04daf3
PB
3031 return "mov3q%.l %1,%-";
3032 return "pea %a1";
3033 }
02ed0c07
RK
3034 return "move%.l %1,%0";
3035}
3036
5505f548 3037const char *
8a4a2253 3038output_move_simode (rtx *operands)
f4e80198
RK
3039{
3040 if (GET_CODE (operands[1]) == CONST_INT)
3041 return output_move_simode_const (operands);
3042 else if ((GET_CODE (operands[1]) == SYMBOL_REF
3043 || GET_CODE (operands[1]) == CONST)
3044 && push_operand (operands[0], SImode))
3045 return "pea %a1";
3046 else if ((GET_CODE (operands[1]) == SYMBOL_REF
3047 || GET_CODE (operands[1]) == CONST)
3048 && ADDRESS_REG_P (operands[0]))
3049 return "lea %a1,%0";
3050 return "move%.l %1,%0";
3051}
3052
5505f548 3053const char *
8a4a2253 3054output_move_himode (rtx *operands)
f4e80198
RK
3055{
3056 if (GET_CODE (operands[1]) == CONST_INT)
3057 {
3058 if (operands[1] == const0_rtx
3059 && (DATA_REG_P (operands[0])
3060 || GET_CODE (operands[0]) == MEM)
3197c489
RS
3061 /* clr insns on 68000 read before writing. */
3062 && ((TARGET_68010 || TARGET_COLDFIRE)
f4e80198
RK
3063 || !(GET_CODE (operands[0]) == MEM
3064 && MEM_VOLATILE_P (operands[0]))))
3065 return "clr%.w %0";
38198304
AS
3066 else if (operands[1] == const0_rtx
3067 && ADDRESS_REG_P (operands[0]))
3068 return "sub%.l %0,%0";
f4e80198
RK
3069 else if (DATA_REG_P (operands[0])
3070 && INTVAL (operands[1]) < 128
3071 && INTVAL (operands[1]) >= -128)
a0a7fbc9 3072 return "moveq %1,%0";
f4e80198
RK
3073 else if (INTVAL (operands[1]) < 0x8000
3074 && INTVAL (operands[1]) >= -0x8000)
3075 return "move%.w %1,%0";
3076 }
3077 else if (CONSTANT_P (operands[1]))
3078 return "move%.l %1,%0";
f4e80198
RK
3079 return "move%.w %1,%0";
3080}
3081
5505f548 3082const char *
8a4a2253 3083output_move_qimode (rtx *operands)
f4e80198 3084{
102701ff 3085 /* 68k family always modifies the stack pointer by at least 2, even for
c16eadc7 3086 byte pushes. The 5200 (ColdFire) does not do this. */
4761e388 3087
a0a7fbc9 3088 /* This case is generated by pushqi1 pattern now. */
4761e388
NS
3089 gcc_assert (!(GET_CODE (operands[0]) == MEM
3090 && GET_CODE (XEXP (operands[0], 0)) == PRE_DEC
3091 && XEXP (XEXP (operands[0], 0), 0) == stack_pointer_rtx
3092 && ! ADDRESS_REG_P (operands[1])
3093 && ! TARGET_COLDFIRE));
f4e80198 3094
3197c489 3095 /* clr and st insns on 68000 read before writing. */
f4e80198 3096 if (!ADDRESS_REG_P (operands[0])
3197c489 3097 && ((TARGET_68010 || TARGET_COLDFIRE)
f4e80198
RK
3098 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3099 {
3100 if (operands[1] == const0_rtx)
3101 return "clr%.b %0";
9425fb04 3102 if ((!TARGET_COLDFIRE || DATA_REG_P (operands[0]))
f4e80198
RK
3103 && GET_CODE (operands[1]) == CONST_INT
3104 && (INTVAL (operands[1]) & 255) == 255)
3105 {
3106 CC_STATUS_INIT;
3107 return "st %0";
3108 }
3109 }
3110 if (GET_CODE (operands[1]) == CONST_INT
3111 && DATA_REG_P (operands[0])
3112 && INTVAL (operands[1]) < 128
3113 && INTVAL (operands[1]) >= -128)
a0a7fbc9 3114 return "moveq %1,%0";
38198304
AS
3115 if (operands[1] == const0_rtx && ADDRESS_REG_P (operands[0]))
3116 return "sub%.l %0,%0";
f4e80198
RK
3117 if (GET_CODE (operands[1]) != CONST_INT && CONSTANT_P (operands[1]))
3118 return "move%.l %1,%0";
c16eadc7 3119 /* 68k family (including the 5200 ColdFire) does not support byte moves to
37834fc8
JL
3120 from address registers. */
3121 if (ADDRESS_REG_P (operands[0]) || ADDRESS_REG_P (operands[1]))
f4e80198
RK
3122 return "move%.w %1,%0";
3123 return "move%.b %1,%0";
3124}
3125
5505f548 3126const char *
8a4a2253 3127output_move_stricthi (rtx *operands)
9b55bf04
RK
3128{
3129 if (operands[1] == const0_rtx
3197c489
RS
3130 /* clr insns on 68000 read before writing. */
3131 && ((TARGET_68010 || TARGET_COLDFIRE)
9b55bf04
RK
3132 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3133 return "clr%.w %0";
3134 return "move%.w %1,%0";
3135}
3136
5505f548 3137const char *
8a4a2253 3138output_move_strictqi (rtx *operands)
9b55bf04
RK
3139{
3140 if (operands[1] == const0_rtx
3197c489
RS
3141 /* clr insns on 68000 read before writing. */
3142 && ((TARGET_68010 || TARGET_COLDFIRE)
9b55bf04
RK
3143 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3144 return "clr%.b %0";
3145 return "move%.b %1,%0";
3146}
3147
79e68feb
RS
3148/* Return the best assembler insn template
3149 for moving operands[1] into operands[0] as a fullword. */
3150
5505f548 3151static const char *
8a4a2253 3152singlemove_string (rtx *operands)
79e68feb 3153{
02ed0c07
RK
3154 if (GET_CODE (operands[1]) == CONST_INT)
3155 return output_move_simode_const (operands);
3156 return "move%.l %1,%0";
79e68feb
RS
3157}
3158
2505bc97 3159
c47b0cb4
MK
3160/* Output assembler or rtl code to perform a doubleword move insn
3161 with operands OPERANDS.
3162 Pointers to 3 helper functions should be specified:
3163 HANDLE_REG_ADJUST to adjust a register by a small value,
3164 HANDLE_COMPADR to compute an address and
3165 HANDLE_MOVSI to move 4 bytes. */
79e68feb 3166
c47b0cb4
MK
3167static void
3168handle_move_double (rtx operands[2],
3169 void (*handle_reg_adjust) (rtx, int),
3170 void (*handle_compadr) (rtx [2]),
3171 void (*handle_movsi) (rtx [2]))
79e68feb 3172{
2505bc97
RS
3173 enum
3174 {
3175 REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP
3176 } optype0, optype1;
79e68feb 3177 rtx latehalf[2];
2505bc97 3178 rtx middlehalf[2];
7f98eeb6 3179 rtx xops[2];
79e68feb 3180 rtx addreg0 = 0, addreg1 = 0;
7f98eeb6 3181 int dest_overlapped_low = 0;
184916bc 3182 int size = GET_MODE_SIZE (GET_MODE (operands[0]));
2505bc97
RS
3183
3184 middlehalf[0] = 0;
3185 middlehalf[1] = 0;
79e68feb
RS
3186
3187 /* First classify both operands. */
3188
3189 if (REG_P (operands[0]))
3190 optype0 = REGOP;
3191 else if (offsettable_memref_p (operands[0]))
3192 optype0 = OFFSOP;
3193 else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
3194 optype0 = POPOP;
3195 else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
3196 optype0 = PUSHOP;
3197 else if (GET_CODE (operands[0]) == MEM)
3198 optype0 = MEMOP;
3199 else
3200 optype0 = RNDOP;
3201
3202 if (REG_P (operands[1]))
3203 optype1 = REGOP;
3204 else if (CONSTANT_P (operands[1]))
3205 optype1 = CNSTOP;
3206 else if (offsettable_memref_p (operands[1]))
3207 optype1 = OFFSOP;
3208 else if (GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3209 optype1 = POPOP;
3210 else if (GET_CODE (XEXP (operands[1], 0)) == PRE_DEC)
3211 optype1 = PUSHOP;
3212 else if (GET_CODE (operands[1]) == MEM)
3213 optype1 = MEMOP;
3214 else
3215 optype1 = RNDOP;
3216
4761e388
NS
3217 /* Check for the cases that the operand constraints are not supposed
3218 to allow to happen. Generating code for these cases is
3219 painful. */
3220 gcc_assert (optype0 != RNDOP && optype1 != RNDOP);
79e68feb
RS
3221
3222 /* If one operand is decrementing and one is incrementing
3223 decrement the former register explicitly
3224 and change that operand into ordinary indexing. */
3225
3226 if (optype0 == PUSHOP && optype1 == POPOP)
3227 {
3228 operands[0] = XEXP (XEXP (operands[0], 0), 0);
c47b0cb4
MK
3229
3230 handle_reg_adjust (operands[0], -size);
3231
2505bc97 3232 if (GET_MODE (operands[1]) == XFmode)
1d8eaa6b 3233 operands[0] = gen_rtx_MEM (XFmode, operands[0]);
2505bc97 3234 else if (GET_MODE (operands[0]) == DFmode)
1d8eaa6b 3235 operands[0] = gen_rtx_MEM (DFmode, operands[0]);
2505bc97 3236 else
1d8eaa6b 3237 operands[0] = gen_rtx_MEM (DImode, operands[0]);
79e68feb
RS
3238 optype0 = OFFSOP;
3239 }
3240 if (optype0 == POPOP && optype1 == PUSHOP)
3241 {
3242 operands[1] = XEXP (XEXP (operands[1], 0), 0);
c47b0cb4
MK
3243
3244 handle_reg_adjust (operands[1], -size);
3245
2505bc97 3246 if (GET_MODE (operands[1]) == XFmode)
1d8eaa6b 3247 operands[1] = gen_rtx_MEM (XFmode, operands[1]);
2505bc97 3248 else if (GET_MODE (operands[1]) == DFmode)
1d8eaa6b 3249 operands[1] = gen_rtx_MEM (DFmode, operands[1]);
2505bc97 3250 else
1d8eaa6b 3251 operands[1] = gen_rtx_MEM (DImode, operands[1]);
79e68feb
RS
3252 optype1 = OFFSOP;
3253 }
3254
3255 /* If an operand is an unoffsettable memory ref, find a register
3256 we can increment temporarily to make it refer to the second word. */
3257
3258 if (optype0 == MEMOP)
3259 addreg0 = find_addr_reg (XEXP (operands[0], 0));
3260
3261 if (optype1 == MEMOP)
3262 addreg1 = find_addr_reg (XEXP (operands[1], 0));
3263
3264 /* Ok, we can do one word at a time.
3265 Normally we do the low-numbered word first,
3266 but if either operand is autodecrementing then we
3267 do the high-numbered word first.
3268
3269 In either case, set up in LATEHALF the operands to use
3270 for the high-numbered word and in some cases alter the
3271 operands in OPERANDS to be suitable for the low-numbered word. */
3272
2505bc97
RS
3273 if (size == 12)
3274 {
3275 if (optype0 == REGOP)
3276 {
1d8eaa6b
AS
3277 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 2);
3278 middlehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2505bc97
RS
3279 }
3280 else if (optype0 == OFFSOP)
3281 {
b72f00af
RK
3282 middlehalf[0] = adjust_address (operands[0], SImode, 4);
3283 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
2505bc97
RS
3284 }
3285 else
3286 {
c47b0cb4
MK
3287 middlehalf[0] = adjust_address (operands[0], SImode, 0);
3288 latehalf[0] = adjust_address (operands[0], SImode, 0);
2505bc97
RS
3289 }
3290
3291 if (optype1 == REGOP)
3292 {
1d8eaa6b
AS
3293 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2);
3294 middlehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2505bc97
RS
3295 }
3296 else if (optype1 == OFFSOP)
3297 {
b72f00af
RK
3298 middlehalf[1] = adjust_address (operands[1], SImode, 4);
3299 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
2505bc97
RS
3300 }
3301 else if (optype1 == CNSTOP)
3302 {
3303 if (GET_CODE (operands[1]) == CONST_DOUBLE)
3304 {
2505bc97
RS
3305 long l[3];
3306
34a72c33
RS
3307 REAL_VALUE_TO_TARGET_LONG_DOUBLE
3308 (*CONST_DOUBLE_REAL_VALUE (operands[1]), l);
2505bc97
RS
3309 operands[1] = GEN_INT (l[0]);
3310 middlehalf[1] = GEN_INT (l[1]);
3311 latehalf[1] = GEN_INT (l[2]);
3312 }
4761e388 3313 else
2505bc97 3314 {
4761e388
NS
3315 /* No non-CONST_DOUBLE constant should ever appear
3316 here. */
3317 gcc_assert (!CONSTANT_P (operands[1]));
2505bc97
RS
3318 }
3319 }
3320 else
3321 {
c47b0cb4
MK
3322 middlehalf[1] = adjust_address (operands[1], SImode, 0);
3323 latehalf[1] = adjust_address (operands[1], SImode, 0);
2505bc97
RS
3324 }
3325 }
79e68feb 3326 else
2505bc97
RS
3327 /* size is not 12: */
3328 {
3329 if (optype0 == REGOP)
1d8eaa6b 3330 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2505bc97 3331 else if (optype0 == OFFSOP)
b72f00af 3332 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
2505bc97 3333 else
c47b0cb4 3334 latehalf[0] = adjust_address (operands[0], SImode, 0);
2505bc97
RS
3335
3336 if (optype1 == REGOP)
1d8eaa6b 3337 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2505bc97 3338 else if (optype1 == OFFSOP)
b72f00af 3339 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
2505bc97
RS
3340 else if (optype1 == CNSTOP)
3341 split_double (operands[1], &operands[1], &latehalf[1]);
3342 else
c47b0cb4 3343 latehalf[1] = adjust_address (operands[1], SImode, 0);
2505bc97 3344 }
79e68feb 3345
e864837a
AS
3346 /* If insn is effectively movd N(REG),-(REG) then we will do the high
3347 word first. We should use the adjusted operand 1 (which is N+4(REG))
3348 for the low word as well, to compensate for the first decrement of
3349 REG. */
79e68feb 3350 if (optype0 == PUSHOP
e864837a 3351 && reg_overlap_mentioned_p (XEXP (XEXP (operands[0], 0), 0), operands[1]))
c88aeaf8 3352 operands[1] = middlehalf[1] = latehalf[1];
79e68feb 3353
7f98eeb6
RS
3354 /* For (set (reg:DI N) (mem:DI ... (reg:SI N) ...)),
3355 if the upper part of reg N does not appear in the MEM, arrange to
3356 emit the move late-half first. Otherwise, compute the MEM address
3357 into the upper part of N and use that as a pointer to the memory
3358 operand. */
3359 if (optype0 == REGOP
3360 && (optype1 == OFFSOP || optype1 == MEMOP))
3361 {
1d8eaa6b 3362 rtx testlow = gen_rtx_REG (SImode, REGNO (operands[0]));
3a58400f
RS
3363
3364 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
d7e8d581 3365 && reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
7f98eeb6
RS
3366 {
3367 /* If both halves of dest are used in the src memory address,
3a58400f
RS
3368 compute the address into latehalf of dest.
3369 Note that this can't happen if the dest is two data regs. */
4761e388 3370 compadr:
7f98eeb6
RS
3371 xops[0] = latehalf[0];
3372 xops[1] = XEXP (operands[1], 0);
c47b0cb4
MK
3373
3374 handle_compadr (xops);
3375 if (GET_MODE (operands[1]) == XFmode)
7f98eeb6 3376 {
1d8eaa6b 3377 operands[1] = gen_rtx_MEM (XFmode, latehalf[0]);
b72f00af
RK
3378 middlehalf[1] = adjust_address (operands[1], DImode, size - 8);
3379 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
7f98eeb6
RS
3380 }
3381 else
3382 {
1d8eaa6b 3383 operands[1] = gen_rtx_MEM (DImode, latehalf[0]);
b72f00af 3384 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
7f98eeb6
RS
3385 }
3386 }
3387 else if (size == 12
d7e8d581
RS
3388 && reg_overlap_mentioned_p (middlehalf[0],
3389 XEXP (operands[1], 0)))
7f98eeb6 3390 {
3a58400f
RS
3391 /* Check for two regs used by both source and dest.
3392 Note that this can't happen if the dest is all data regs.
3393 It can happen if the dest is d6, d7, a0.
3394 But in that case, latehalf is an addr reg, so
3395 the code at compadr does ok. */
3396
3397 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
d7e8d581
RS
3398 || reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
3399 goto compadr;
7f98eeb6
RS
3400
3401 /* JRV says this can't happen: */
4761e388 3402 gcc_assert (!addreg0 && !addreg1);
7f98eeb6 3403
7a1929e1 3404 /* Only the middle reg conflicts; simply put it last. */
c47b0cb4
MK
3405 handle_movsi (operands);
3406 handle_movsi (latehalf);
3407 handle_movsi (middlehalf);
3408
3409 return;
7f98eeb6 3410 }
2fb8a81d 3411 else if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0)))
7f98eeb6
RS
3412 /* If the low half of dest is mentioned in the source memory
3413 address, the arrange to emit the move late half first. */
3414 dest_overlapped_low = 1;
3415 }
3416
79e68feb
RS
3417 /* If one or both operands autodecrementing,
3418 do the two words, high-numbered first. */
3419
3420 /* Likewise, the first move would clobber the source of the second one,
3421 do them in the other order. This happens only for registers;
3422 such overlap can't happen in memory unless the user explicitly
3423 sets it up, and that is an undefined circumstance. */
3424
3425 if (optype0 == PUSHOP || optype1 == PUSHOP
3426 || (optype0 == REGOP && optype1 == REGOP
2505bc97 3427 && ((middlehalf[1] && REGNO (operands[0]) == REGNO (middlehalf[1]))
7f98eeb6
RS
3428 || REGNO (operands[0]) == REGNO (latehalf[1])))
3429 || dest_overlapped_low)
79e68feb
RS
3430 {
3431 /* Make any unoffsettable addresses point at high-numbered word. */
3432 if (addreg0)
c47b0cb4 3433 handle_reg_adjust (addreg0, size - 4);
79e68feb 3434 if (addreg1)
c47b0cb4 3435 handle_reg_adjust (addreg1, size - 4);
79e68feb
RS
3436
3437 /* Do that word. */
c47b0cb4 3438 handle_movsi (latehalf);
79e68feb
RS
3439
3440 /* Undo the adds we just did. */
3441 if (addreg0)
c47b0cb4 3442 handle_reg_adjust (addreg0, -4);
79e68feb 3443 if (addreg1)
c47b0cb4 3444 handle_reg_adjust (addreg1, -4);
79e68feb 3445
2505bc97
RS
3446 if (size == 12)
3447 {
c47b0cb4
MK
3448 handle_movsi (middlehalf);
3449
2505bc97 3450 if (addreg0)
c47b0cb4 3451 handle_reg_adjust (addreg0, -4);
2505bc97 3452 if (addreg1)
c47b0cb4 3453 handle_reg_adjust (addreg1, -4);
2505bc97
RS
3454 }
3455
79e68feb 3456 /* Do low-numbered word. */
c47b0cb4
MK
3457
3458 handle_movsi (operands);
3459 return;
79e68feb
RS
3460 }
3461
3462 /* Normal case: do the two words, low-numbered first. */
3463
dcac2e64 3464 m68k_final_prescan_insn (NULL, operands, 2);
c47b0cb4 3465 handle_movsi (operands);
79e68feb 3466
2505bc97
RS
3467 /* Do the middle one of the three words for long double */
3468 if (size == 12)
3469 {
3470 if (addreg0)
c47b0cb4 3471 handle_reg_adjust (addreg0, 4);
2505bc97 3472 if (addreg1)
c47b0cb4 3473 handle_reg_adjust (addreg1, 4);
2505bc97 3474
dcac2e64 3475 m68k_final_prescan_insn (NULL, middlehalf, 2);
c47b0cb4 3476 handle_movsi (middlehalf);
2505bc97
RS
3477 }
3478
79e68feb
RS
3479 /* Make any unoffsettable addresses point at high-numbered word. */
3480 if (addreg0)
c47b0cb4 3481 handle_reg_adjust (addreg0, 4);
79e68feb 3482 if (addreg1)
c47b0cb4 3483 handle_reg_adjust (addreg1, 4);
79e68feb
RS
3484
3485 /* Do that word. */
dcac2e64 3486 m68k_final_prescan_insn (NULL, latehalf, 2);
c47b0cb4 3487 handle_movsi (latehalf);
79e68feb
RS
3488
3489 /* Undo the adds we just did. */
3490 if (addreg0)
c47b0cb4
MK
3491 handle_reg_adjust (addreg0, -(size - 4));
3492 if (addreg1)
3493 handle_reg_adjust (addreg1, -(size - 4));
3494
3495 return;
3496}
3497
3498/* Output assembler code to adjust REG by N. */
3499static void
3500output_reg_adjust (rtx reg, int n)
3501{
3502 const char *s;
3503
3504 gcc_assert (GET_MODE (reg) == SImode
3505 && -12 <= n && n != 0 && n <= 12);
3506
3507 switch (n)
2505bc97 3508 {
c47b0cb4
MK
3509 case 12:
3510 s = "add%.l #12,%0";
3511 break;
3512
3513 case 8:
3514 s = "addq%.l #8,%0";
3515 break;
3516
3517 case 4:
3518 s = "addq%.l #4,%0";
3519 break;
3520
3521 case -12:
3522 s = "sub%.l #12,%0";
3523 break;
3524
3525 case -8:
3526 s = "subq%.l #8,%0";
3527 break;
3528
3529 case -4:
3530 s = "subq%.l #4,%0";
3531 break;
3532
3533 default:
3534 gcc_unreachable ();
3535 s = NULL;
2505bc97 3536 }
c47b0cb4
MK
3537
3538 output_asm_insn (s, &reg);
3539}
3540
3541/* Emit rtl code to adjust REG by N. */
3542static void
3543emit_reg_adjust (rtx reg1, int n)
3544{
3545 rtx reg2;
3546
3547 gcc_assert (GET_MODE (reg1) == SImode
3548 && -12 <= n && n != 0 && n <= 12);
3549
3550 reg1 = copy_rtx (reg1);
3551 reg2 = copy_rtx (reg1);
3552
3553 if (n < 0)
3554 emit_insn (gen_subsi3 (reg1, reg2, GEN_INT (-n)));
3555 else if (n > 0)
3556 emit_insn (gen_addsi3 (reg1, reg2, GEN_INT (n)));
3557 else
3558 gcc_unreachable ();
3559}
3560
3561/* Output assembler to load address OPERANDS[0] to register OPERANDS[1]. */
3562static void
3563output_compadr (rtx operands[2])
3564{
3565 output_asm_insn ("lea %a1,%0", operands);
3566}
3567
3568/* Output the best assembler insn for moving operands[1] into operands[0]
3569 as a fullword. */
3570static void
3571output_movsi (rtx operands[2])
3572{
3573 output_asm_insn (singlemove_string (operands), operands);
3574}
3575
3576/* Copy OP and change its mode to MODE. */
3577static rtx
ef4bddc2 3578copy_operand (rtx op, machine_mode mode)
c47b0cb4
MK
3579{
3580 /* ??? This looks really ugly. There must be a better way
3581 to change a mode on the operand. */
3582 if (GET_MODE (op) != VOIDmode)
2505bc97 3583 {
c47b0cb4
MK
3584 if (REG_P (op))
3585 op = gen_rtx_REG (mode, REGNO (op));
2505bc97 3586 else
c47b0cb4
MK
3587 {
3588 op = copy_rtx (op);
3589 PUT_MODE (op, mode);
3590 }
2505bc97 3591 }
79e68feb 3592
c47b0cb4
MK
3593 return op;
3594}
3595
3596/* Emit rtl code for moving operands[1] into operands[0] as a fullword. */
3597static void
3598emit_movsi (rtx operands[2])
3599{
3600 operands[0] = copy_operand (operands[0], SImode);
3601 operands[1] = copy_operand (operands[1], SImode);
3602
3603 emit_insn (gen_movsi (operands[0], operands[1]));
3604}
3605
3606/* Output assembler code to perform a doubleword move insn
3607 with operands OPERANDS. */
3608const char *
3609output_move_double (rtx *operands)
3610{
3611 handle_move_double (operands,
3612 output_reg_adjust, output_compadr, output_movsi);
3613
79e68feb
RS
3614 return "";
3615}
3616
c47b0cb4
MK
3617/* Output rtl code to perform a doubleword move insn
3618 with operands OPERANDS. */
3619void
3620m68k_emit_move_double (rtx operands[2])
3621{
3622 handle_move_double (operands, emit_reg_adjust, emit_movsi, emit_movsi);
3623}
dcc21c4c
PB
3624
3625/* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
3626 new rtx with the correct mode. */
3627
3628static rtx
ef4bddc2 3629force_mode (machine_mode mode, rtx orig)
dcc21c4c
PB
3630{
3631 if (mode == GET_MODE (orig))
3632 return orig;
3633
3634 if (REGNO (orig) >= FIRST_PSEUDO_REGISTER)
3635 abort ();
3636
3637 return gen_rtx_REG (mode, REGNO (orig));
3638}
3639
3640static int
ef4bddc2 3641fp_reg_operand (rtx op, machine_mode mode ATTRIBUTE_UNUSED)
dcc21c4c
PB
3642{
3643 return reg_renumber && FP_REG_P (op);
3644}
3645
3646/* Emit insns to move operands[1] into operands[0].
3647
3648 Return 1 if we have written out everything that needs to be done to
3649 do the move. Otherwise, return 0 and the caller will emit the move
3650 normally.
3651
3652 Note SCRATCH_REG may not be in the proper mode depending on how it
c0220ea4 3653 will be used. This routine is responsible for creating a new copy
dcc21c4c
PB
3654 of SCRATCH_REG in the proper mode. */
3655
3656int
ef4bddc2 3657emit_move_sequence (rtx *operands, machine_mode mode, rtx scratch_reg)
dcc21c4c
PB
3658{
3659 register rtx operand0 = operands[0];
3660 register rtx operand1 = operands[1];
3661 register rtx tem;
3662
3663 if (scratch_reg
3664 && reload_in_progress && GET_CODE (operand0) == REG
3665 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
f2034d06 3666 operand0 = reg_equiv_mem (REGNO (operand0));
dcc21c4c
PB
3667 else if (scratch_reg
3668 && reload_in_progress && GET_CODE (operand0) == SUBREG
3669 && GET_CODE (SUBREG_REG (operand0)) == REG
3670 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
3671 {
3672 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3673 the code which tracks sets/uses for delete_output_reload. */
3674 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
f2034d06 3675 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
dcc21c4c 3676 SUBREG_BYTE (operand0));
55a2c322 3677 operand0 = alter_subreg (&temp, true);
dcc21c4c
PB
3678 }
3679
3680 if (scratch_reg
3681 && reload_in_progress && GET_CODE (operand1) == REG
3682 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
f2034d06 3683 operand1 = reg_equiv_mem (REGNO (operand1));
dcc21c4c
PB
3684 else if (scratch_reg
3685 && reload_in_progress && GET_CODE (operand1) == SUBREG
3686 && GET_CODE (SUBREG_REG (operand1)) == REG
3687 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
3688 {
3689 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3690 the code which tracks sets/uses for delete_output_reload. */
3691 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
f2034d06 3692 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
dcc21c4c 3693 SUBREG_BYTE (operand1));
55a2c322 3694 operand1 = alter_subreg (&temp, true);
dcc21c4c
PB
3695 }
3696
3697 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
3698 && ((tem = find_replacement (&XEXP (operand0, 0)))
3699 != XEXP (operand0, 0)))
3700 operand0 = gen_rtx_MEM (GET_MODE (operand0), tem);
3701 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
3702 && ((tem = find_replacement (&XEXP (operand1, 0)))
3703 != XEXP (operand1, 0)))
3704 operand1 = gen_rtx_MEM (GET_MODE (operand1), tem);
3705
3706 /* Handle secondary reloads for loads/stores of FP registers where
3707 the address is symbolic by using the scratch register */
3708 if (fp_reg_operand (operand0, mode)
3709 && ((GET_CODE (operand1) == MEM
3710 && ! memory_address_p (DFmode, XEXP (operand1, 0)))
3711 || ((GET_CODE (operand1) == SUBREG
3712 && GET_CODE (XEXP (operand1, 0)) == MEM
3713 && !memory_address_p (DFmode, XEXP (XEXP (operand1, 0), 0)))))
3714 && scratch_reg)
3715 {
3716 if (GET_CODE (operand1) == SUBREG)
3717 operand1 = XEXP (operand1, 0);
3718
3719 /* SCRATCH_REG will hold an address. We want
3720 it in SImode regardless of what mode it was originally given
3721 to us. */
3722 scratch_reg = force_mode (SImode, scratch_reg);
3723
3724 /* D might not fit in 14 bits either; for such cases load D into
3725 scratch reg. */
3726 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
3727 {
3728 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
3729 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
3730 Pmode,
3731 XEXP (XEXP (operand1, 0), 0),
3732 scratch_reg));
3733 }
3734 else
3735 emit_move_insn (scratch_reg, XEXP (operand1, 0));
f7df4a84 3736 emit_insn (gen_rtx_SET (operand0, gen_rtx_MEM (mode, scratch_reg)));
dcc21c4c
PB
3737 return 1;
3738 }
3739 else if (fp_reg_operand (operand1, mode)
3740 && ((GET_CODE (operand0) == MEM
3741 && ! memory_address_p (DFmode, XEXP (operand0, 0)))
3742 || ((GET_CODE (operand0) == SUBREG)
3743 && GET_CODE (XEXP (operand0, 0)) == MEM
3744 && !memory_address_p (DFmode, XEXP (XEXP (operand0, 0), 0))))
3745 && scratch_reg)
3746 {
3747 if (GET_CODE (operand0) == SUBREG)
3748 operand0 = XEXP (operand0, 0);
3749
3750 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3751 it in SIMODE regardless of what mode it was originally given
3752 to us. */
3753 scratch_reg = force_mode (SImode, scratch_reg);
3754
3755 /* D might not fit in 14 bits either; for such cases load D into
3756 scratch reg. */
3757 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
3758 {
3759 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
3760 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
3761 0)),
3762 Pmode,
3763 XEXP (XEXP (operand0, 0),
3764 0),
3765 scratch_reg));
3766 }
3767 else
3768 emit_move_insn (scratch_reg, XEXP (operand0, 0));
f7df4a84 3769 emit_insn (gen_rtx_SET (gen_rtx_MEM (mode, scratch_reg), operand1));
dcc21c4c
PB
3770 return 1;
3771 }
3772 /* Handle secondary reloads for loads of FP registers from constant
3773 expressions by forcing the constant into memory.
3774
3775 use scratch_reg to hold the address of the memory location.
3776
3777 The proper fix is to change PREFERRED_RELOAD_CLASS to return
3778 NO_REGS when presented with a const_int and an register class
3779 containing only FP registers. Doing so unfortunately creates
3780 more problems than it solves. Fix this for 2.5. */
3781 else if (fp_reg_operand (operand0, mode)
3782 && CONSTANT_P (operand1)
3783 && scratch_reg)
3784 {
3785 rtx xoperands[2];
3786
3787 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3788 it in SIMODE regardless of what mode it was originally given
3789 to us. */
3790 scratch_reg = force_mode (SImode, scratch_reg);
3791
3792 /* Force the constant into memory and put the address of the
3793 memory location into scratch_reg. */
3794 xoperands[0] = scratch_reg;
3795 xoperands[1] = XEXP (force_const_mem (mode, operand1), 0);
f7df4a84 3796 emit_insn (gen_rtx_SET (scratch_reg, xoperands[1]));
dcc21c4c
PB
3797
3798 /* Now load the destination register. */
f7df4a84 3799 emit_insn (gen_rtx_SET (operand0, gen_rtx_MEM (mode, scratch_reg)));
dcc21c4c
PB
3800 return 1;
3801 }
3802
3803 /* Now have insn-emit do whatever it normally does. */
3804 return 0;
3805}
3806
01e304f8
RZ
3807/* Split one or more DImode RTL references into pairs of SImode
3808 references. The RTL can be REG, offsettable MEM, integer constant, or
3809 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
3810 split and "num" is its length. lo_half and hi_half are output arrays
3811 that parallel "operands". */
3812
3813void
3814split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
3815{
3816 while (num--)
3817 {
3818 rtx op = operands[num];
3819
3820 /* simplify_subreg refuses to split volatile memory addresses,
3821 but we still have to handle it. */
3822 if (GET_CODE (op) == MEM)
3823 {
3824 lo_half[num] = adjust_address (op, SImode, 4);
3825 hi_half[num] = adjust_address (op, SImode, 0);
3826 }
3827 else
3828 {
3829 lo_half[num] = simplify_gen_subreg (SImode, op,
3830 GET_MODE (op) == VOIDmode
3831 ? DImode : GET_MODE (op), 4);
3832 hi_half[num] = simplify_gen_subreg (SImode, op,
3833 GET_MODE (op) == VOIDmode
3834 ? DImode : GET_MODE (op), 0);
3835 }
3836 }
3837}
3838
a40ed0f3
KH
3839/* Split X into a base and a constant offset, storing them in *BASE
3840 and *OFFSET respectively. */
3841
3842static void
3843m68k_split_offset (rtx x, rtx *base, HOST_WIDE_INT *offset)
3844{
3845 *offset = 0;
3846 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
3847 {
3848 *offset += INTVAL (XEXP (x, 1));
3849 x = XEXP (x, 0);
3850 }
3851 *base = x;
3852}
3853
3854/* Return true if PATTERN is a PARALLEL suitable for a movem or fmovem
3855 instruction. STORE_P says whether the move is a load or store.
3856
3857 If the instruction uses post-increment or pre-decrement addressing,
3858 AUTOMOD_BASE is the base register and AUTOMOD_OFFSET is the total
3859 adjustment. This adjustment will be made by the first element of
3860 PARALLEL, with the loads or stores starting at element 1. If the
3861 instruction does not use post-increment or pre-decrement addressing,
3862 AUTOMOD_BASE is null, AUTOMOD_OFFSET is 0, and the loads or stores
3863 start at element 0. */
3864
3865bool
3866m68k_movem_pattern_p (rtx pattern, rtx automod_base,
3867 HOST_WIDE_INT automod_offset, bool store_p)
3868{
3869 rtx base, mem_base, set, mem, reg, last_reg;
3870 HOST_WIDE_INT offset, mem_offset;
3871 int i, first, len;
3872 enum reg_class rclass;
3873
3874 len = XVECLEN (pattern, 0);
3875 first = (automod_base != NULL);
3876
3877 if (automod_base)
3878 {
3879 /* Stores must be pre-decrement and loads must be post-increment. */
3880 if (store_p != (automod_offset < 0))
3881 return false;
3882
3883 /* Work out the base and offset for lowest memory location. */
3884 base = automod_base;
3885 offset = (automod_offset < 0 ? automod_offset : 0);
3886 }
3887 else
3888 {
3889 /* Allow any valid base and offset in the first access. */
3890 base = NULL;
3891 offset = 0;
3892 }
3893
3894 last_reg = NULL;
3895 rclass = NO_REGS;
3896 for (i = first; i < len; i++)
3897 {
3898 /* We need a plain SET. */
3899 set = XVECEXP (pattern, 0, i);
3900 if (GET_CODE (set) != SET)
3901 return false;
3902
3903 /* Check that we have a memory location... */
3904 mem = XEXP (set, !store_p);
3905 if (!MEM_P (mem) || !memory_operand (mem, VOIDmode))
3906 return false;
3907
3908 /* ...with the right address. */
3909 if (base == NULL)
3910 {
3911 m68k_split_offset (XEXP (mem, 0), &base, &offset);
3912 /* The ColdFire instruction only allows (An) and (d16,An) modes.
3913 There are no mode restrictions for 680x0 besides the
3914 automodification rules enforced above. */
3915 if (TARGET_COLDFIRE
3916 && !m68k_legitimate_base_reg_p (base, reload_completed))
3917 return false;
3918 }
3919 else
3920 {
3921 m68k_split_offset (XEXP (mem, 0), &mem_base, &mem_offset);
3922 if (!rtx_equal_p (base, mem_base) || offset != mem_offset)
3923 return false;
3924 }
3925
3926 /* Check that we have a register of the required mode and class. */
3927 reg = XEXP (set, store_p);
3928 if (!REG_P (reg)
3929 || !HARD_REGISTER_P (reg)
3930 || GET_MODE (reg) != reg_raw_mode[REGNO (reg)])
3931 return false;
3932
3933 if (last_reg)
3934 {
3935 /* The register must belong to RCLASS and have a higher number
3936 than the register in the previous SET. */
3937 if (!TEST_HARD_REG_BIT (reg_class_contents[rclass], REGNO (reg))
3938 || REGNO (last_reg) >= REGNO (reg))
3939 return false;
3940 }
3941 else
3942 {
3943 /* Work out which register class we need. */
3944 if (INT_REGNO_P (REGNO (reg)))
3945 rclass = GENERAL_REGS;
3946 else if (FP_REGNO_P (REGNO (reg)))
3947 rclass = FP_REGS;
3948 else
3949 return false;
3950 }
3951
3952 last_reg = reg;
3953 offset += GET_MODE_SIZE (GET_MODE (reg));
3954 }
3955
3956 /* If we have an automodification, check whether the final offset is OK. */
3957 if (automod_base && offset != (automod_offset < 0 ? 0 : automod_offset))
3958 return false;
3959
3960 /* Reject unprofitable cases. */
3961 if (len < first + (rclass == FP_REGS ? MIN_FMOVEM_REGS : MIN_MOVEM_REGS))
3962 return false;
3963
3964 return true;
3965}
3966
3967/* Return the assembly code template for a movem or fmovem instruction
3968 whose pattern is given by PATTERN. Store the template's operands
3969 in OPERANDS.
3970
3971 If the instruction uses post-increment or pre-decrement addressing,
3972 AUTOMOD_OFFSET is the total adjustment, otherwise it is 0. STORE_P
3973 is true if this is a store instruction. */
3974
3975const char *
3976m68k_output_movem (rtx *operands, rtx pattern,
3977 HOST_WIDE_INT automod_offset, bool store_p)
3978{
3979 unsigned int mask;
3980 int i, first;
3981
3982 gcc_assert (GET_CODE (pattern) == PARALLEL);
3983 mask = 0;
3984 first = (automod_offset != 0);
3985 for (i = first; i < XVECLEN (pattern, 0); i++)
3986 {
3987 /* When using movem with pre-decrement addressing, register X + D0_REG
3988 is controlled by bit 15 - X. For all other addressing modes,
3989 register X + D0_REG is controlled by bit X. Confusingly, the
3990 register mask for fmovem is in the opposite order to that for
3991 movem. */
3992 unsigned int regno;
3993
3994 gcc_assert (MEM_P (XEXP (XVECEXP (pattern, 0, i), !store_p)));
3995 gcc_assert (REG_P (XEXP (XVECEXP (pattern, 0, i), store_p)));
3996 regno = REGNO (XEXP (XVECEXP (pattern, 0, i), store_p));
3997 if (automod_offset < 0)
3998 {
3999 if (FP_REGNO_P (regno))
4000 mask |= 1 << (regno - FP0_REG);
4001 else
4002 mask |= 1 << (15 - (regno - D0_REG));
4003 }
4004 else
4005 {
4006 if (FP_REGNO_P (regno))
4007 mask |= 1 << (7 - (regno - FP0_REG));
4008 else
4009 mask |= 1 << (regno - D0_REG);
4010 }
4011 }
4012 CC_STATUS_INIT;
4013
4014 if (automod_offset == 0)
4015 operands[0] = XEXP (XEXP (XVECEXP (pattern, 0, first), !store_p), 0);
4016 else if (automod_offset < 0)
4017 operands[0] = gen_rtx_PRE_DEC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
4018 else
4019 operands[0] = gen_rtx_POST_INC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
4020 operands[1] = GEN_INT (mask);
4021 if (FP_REGNO_P (REGNO (XEXP (XVECEXP (pattern, 0, first), store_p))))
4022 {
4023 if (store_p)
1fae2d80 4024 return "fmovem %1,%a0";
a40ed0f3 4025 else
1fae2d80 4026 return "fmovem %a0,%1";
a40ed0f3
KH
4027 }
4028 else
4029 {
4030 if (store_p)
1fae2d80 4031 return "movem%.l %1,%a0";
a40ed0f3 4032 else
1fae2d80 4033 return "movem%.l %a0,%1";
a40ed0f3
KH
4034 }
4035}
4036
79e68feb
RS
4037/* Return a REG that occurs in ADDR with coefficient 1.
4038 ADDR can be effectively incremented by incrementing REG. */
4039
4040static rtx
8a4a2253 4041find_addr_reg (rtx addr)
79e68feb
RS
4042{
4043 while (GET_CODE (addr) == PLUS)
4044 {
4045 if (GET_CODE (XEXP (addr, 0)) == REG)
4046 addr = XEXP (addr, 0);
4047 else if (GET_CODE (XEXP (addr, 1)) == REG)
4048 addr = XEXP (addr, 1);
4049 else if (CONSTANT_P (XEXP (addr, 0)))
4050 addr = XEXP (addr, 1);
4051 else if (CONSTANT_P (XEXP (addr, 1)))
4052 addr = XEXP (addr, 0);
4053 else
4761e388 4054 gcc_unreachable ();
79e68feb 4055 }
4761e388
NS
4056 gcc_assert (GET_CODE (addr) == REG);
4057 return addr;
79e68feb 4058}
9ee3c687 4059
c16eadc7 4060/* Output assembler code to perform a 32-bit 3-operand add. */
9ee3c687 4061
5505f548 4062const char *
8a4a2253 4063output_addsi3 (rtx *operands)
9ee3c687
JW
4064{
4065 if (! operands_match_p (operands[0], operands[1]))
4066 {
4067 if (!ADDRESS_REG_P (operands[1]))
4068 {
4069 rtx tmp = operands[1];
4070
4071 operands[1] = operands[2];
4072 operands[2] = tmp;
4073 }
4074
4075 /* These insns can result from reloads to access
4076 stack slots over 64k from the frame pointer. */
4077 if (GET_CODE (operands[2]) == CONST_INT
218d5a87 4078 && (INTVAL (operands[2]) < -32768 || INTVAL (operands[2]) > 32767))
8c61b6c1 4079 return "move%.l %2,%0\n\tadd%.l %1,%0";
9ee3c687 4080 if (GET_CODE (operands[2]) == REG)
4b3d1177
KH
4081 return MOTOROLA ? "lea (%1,%2.l),%0" : "lea %1@(0,%2:l),%0";
4082 return MOTOROLA ? "lea (%c2,%1),%0" : "lea %1@(%c2),%0";
9ee3c687
JW
4083 }
4084 if (GET_CODE (operands[2]) == CONST_INT)
4085 {
9ee3c687
JW
4086 if (INTVAL (operands[2]) > 0
4087 && INTVAL (operands[2]) <= 8)
4088 return "addq%.l %2,%0";
4089 if (INTVAL (operands[2]) < 0
4090 && INTVAL (operands[2]) >= -8)
4091 {
c5c76735 4092 operands[2] = GEN_INT (- INTVAL (operands[2]));
9ee3c687
JW
4093 return "subq%.l %2,%0";
4094 }
4095 /* On the CPU32 it is faster to use two addql instructions to
4096 add a small integer (8 < N <= 16) to a register.
7a1929e1 4097 Likewise for subql. */
fe95f2f7 4098 if (TUNE_CPU32 && REG_P (operands[0]))
9ee3c687
JW
4099 {
4100 if (INTVAL (operands[2]) > 8
4101 && INTVAL (operands[2]) <= 16)
4102 {
1d8eaa6b 4103 operands[2] = GEN_INT (INTVAL (operands[2]) - 8);
3b4b85c9 4104 return "addq%.l #8,%0\n\taddq%.l %2,%0";
9ee3c687
JW
4105 }
4106 if (INTVAL (operands[2]) < -8
4107 && INTVAL (operands[2]) >= -16)
4108 {
c5c76735 4109 operands[2] = GEN_INT (- INTVAL (operands[2]) - 8);
3b4b85c9 4110 return "subq%.l #8,%0\n\tsubq%.l %2,%0";
9ee3c687
JW
4111 }
4112 }
9ee3c687
JW
4113 if (ADDRESS_REG_P (operands[0])
4114 && INTVAL (operands[2]) >= -0x8000
4115 && INTVAL (operands[2]) < 0x8000)
4116 {
fe95f2f7 4117 if (TUNE_68040)
9ee3c687
JW
4118 return "add%.w %2,%0";
4119 else
4b3d1177 4120 return MOTOROLA ? "lea (%c2,%0),%0" : "lea %0@(%c2),%0";
9ee3c687
JW
4121 }
4122 }
4123 return "add%.l %2,%0";
4124}
79e68feb
RS
4125\f
4126/* Store in cc_status the expressions that the condition codes will
4127 describe after execution of an instruction whose pattern is EXP.
4128 Do not alter them if the instruction would not alter the cc's. */
4129
4130/* On the 68000, all the insns to store in an address register fail to
4131 set the cc's. However, in some cases these instructions can make it
4132 possibly invalid to use the saved cc's. In those cases we clear out
4133 some or all of the saved cc's so they won't be used. */
4134
1d8eaa6b 4135void
8a4a2253 4136notice_update_cc (rtx exp, rtx insn)
79e68feb 4137{
1a8965c4 4138 if (GET_CODE (exp) == SET)
79e68feb
RS
4139 {
4140 if (GET_CODE (SET_SRC (exp)) == CALL)
a0a7fbc9 4141 CC_STATUS_INIT;
79e68feb
RS
4142 else if (ADDRESS_REG_P (SET_DEST (exp)))
4143 {
f5963e61 4144 if (cc_status.value1 && modified_in_p (cc_status.value1, insn))
79e68feb 4145 cc_status.value1 = 0;
f5963e61 4146 if (cc_status.value2 && modified_in_p (cc_status.value2, insn))
79e68feb
RS
4147 cc_status.value2 = 0;
4148 }
f6ab62e8
RS
4149 /* fmoves to memory or data registers do not set the condition
4150 codes. Normal moves _do_ set the condition codes, but not in
4151 a way that is appropriate for comparison with 0, because -0.0
4152 would be treated as a negative nonzero number. Note that it
88512ba0 4153 isn't appropriate to conditionalize this restriction on
f6ab62e8
RS
4154 HONOR_SIGNED_ZEROS because that macro merely indicates whether
4155 we care about the difference between -0.0 and +0.0. */
79e68feb
RS
4156 else if (!FP_REG_P (SET_DEST (exp))
4157 && SET_DEST (exp) != cc0_rtx
4158 && (FP_REG_P (SET_SRC (exp))
4159 || GET_CODE (SET_SRC (exp)) == FIX
f6ab62e8 4160 || FLOAT_MODE_P (GET_MODE (SET_DEST (exp)))))
a0a7fbc9 4161 CC_STATUS_INIT;
79e68feb
RS
4162 /* A pair of move insns doesn't produce a useful overall cc. */
4163 else if (!FP_REG_P (SET_DEST (exp))
4164 && !FP_REG_P (SET_SRC (exp))
4165 && GET_MODE_SIZE (GET_MODE (SET_SRC (exp))) > 4
4166 && (GET_CODE (SET_SRC (exp)) == REG
4167 || GET_CODE (SET_SRC (exp)) == MEM
4168 || GET_CODE (SET_SRC (exp)) == CONST_DOUBLE))
a0a7fbc9 4169 CC_STATUS_INIT;
e1dff52a 4170 else if (SET_DEST (exp) != pc_rtx)
79e68feb
RS
4171 {
4172 cc_status.flags = 0;
e1dff52a
KH
4173 cc_status.value1 = SET_DEST (exp);
4174 cc_status.value2 = SET_SRC (exp);
79e68feb
RS
4175 }
4176 }
4177 else if (GET_CODE (exp) == PARALLEL
4178 && GET_CODE (XVECEXP (exp, 0, 0)) == SET)
4179 {
e1dff52a
KH
4180 rtx dest = SET_DEST (XVECEXP (exp, 0, 0));
4181 rtx src = SET_SRC (XVECEXP (exp, 0, 0));
4182
4183 if (ADDRESS_REG_P (dest))
79e68feb 4184 CC_STATUS_INIT;
e1dff52a 4185 else if (dest != pc_rtx)
79e68feb
RS
4186 {
4187 cc_status.flags = 0;
e1dff52a
KH
4188 cc_status.value1 = dest;
4189 cc_status.value2 = src;
79e68feb
RS
4190 }
4191 }
4192 else
4193 CC_STATUS_INIT;
4194 if (cc_status.value2 != 0
4195 && ADDRESS_REG_P (cc_status.value2)
4196 && GET_MODE (cc_status.value2) == QImode)
4197 CC_STATUS_INIT;
1a8965c4 4198 if (cc_status.value2 != 0)
79e68feb
RS
4199 switch (GET_CODE (cc_status.value2))
4200 {
996a5f59 4201 case ASHIFT: case ASHIFTRT: case LSHIFTRT:
79e68feb 4202 case ROTATE: case ROTATERT:
a126dc3a
RH
4203 /* These instructions always clear the overflow bit, and set
4204 the carry to the bit shifted out. */
1afac9a6 4205 cc_status.flags |= CC_OVERFLOW_UNUSABLE | CC_NO_CARRY;
a126dc3a
RH
4206 break;
4207
4208 case PLUS: case MINUS: case MULT:
4209 case DIV: case UDIV: case MOD: case UMOD: case NEG:
79e68feb
RS
4210 if (GET_MODE (cc_status.value2) != VOIDmode)
4211 cc_status.flags |= CC_NO_OVERFLOW;
4212 break;
4213 case ZERO_EXTEND:
4214 /* (SET r1 (ZERO_EXTEND r2)) on this machine
4215 ends with a move insn moving r2 in r2's mode.
4216 Thus, the cc's are set for r2.
7a1929e1 4217 This can set N bit spuriously. */
79e68feb 4218 cc_status.flags |= CC_NOT_NEGATIVE;
1d8eaa6b
AS
4219
4220 default:
4221 break;
79e68feb
RS
4222 }
4223 if (cc_status.value1 && GET_CODE (cc_status.value1) == REG
4224 && cc_status.value2
4225 && reg_overlap_mentioned_p (cc_status.value1, cc_status.value2))
4226 cc_status.value2 = 0;
1adb2fb9
AS
4227 /* Check for PRE_DEC in dest modifying a register used in src. */
4228 if (cc_status.value1 && GET_CODE (cc_status.value1) == MEM
4229 && GET_CODE (XEXP (cc_status.value1, 0)) == PRE_DEC
4230 && cc_status.value2
4231 && reg_overlap_mentioned_p (XEXP (XEXP (cc_status.value1, 0), 0),
4232 cc_status.value2))
4233 cc_status.value2 = 0;
79e68feb 4234 if (((cc_status.value1 && FP_REG_P (cc_status.value1))
1a8965c4 4235 || (cc_status.value2 && FP_REG_P (cc_status.value2))))
79e68feb 4236 cc_status.flags = CC_IN_68881;
67595cbb
RZ
4237 if (cc_status.value2 && GET_CODE (cc_status.value2) == COMPARE
4238 && GET_MODE_CLASS (GET_MODE (XEXP (cc_status.value2, 0))) == MODE_FLOAT)
4239 {
4240 cc_status.flags = CC_IN_68881;
695074be
JB
4241 if (!FP_REG_P (XEXP (cc_status.value2, 0))
4242 && FP_REG_P (XEXP (cc_status.value2, 1)))
67595cbb
RZ
4243 cc_status.flags |= CC_REVERSED;
4244 }
79e68feb
RS
4245}
4246\f
5505f548 4247const char *
8a4a2253 4248output_move_const_double (rtx *operands)
79e68feb 4249{
1a8965c4 4250 int code = standard_68881_constant_p (operands[1]);
79e68feb 4251
1a8965c4 4252 if (code != 0)
79e68feb 4253 {
1a8965c4 4254 static char buf[40];
79e68feb 4255
3b4b85c9 4256 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
1a8965c4 4257 return buf;
79e68feb 4258 }
1a8965c4 4259 return "fmove%.d %1,%0";
79e68feb
RS
4260}
4261
5505f548 4262const char *
8a4a2253 4263output_move_const_single (rtx *operands)
79e68feb 4264{
1a8965c4 4265 int code = standard_68881_constant_p (operands[1]);
79e68feb 4266
1a8965c4 4267 if (code != 0)
79e68feb 4268 {
1a8965c4 4269 static char buf[40];
79e68feb 4270
3b4b85c9 4271 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
1a8965c4 4272 return buf;
79e68feb 4273 }
1a8965c4 4274 return "fmove%.s %f1,%0";
79e68feb
RS
4275}
4276
4277/* Return nonzero if X, a CONST_DOUBLE, has a value that we can get
4278 from the "fmovecr" instruction.
4279 The value, anded with 0xff, gives the code to use in fmovecr
4280 to get the desired constant. */
4281
7a1929e1 4282/* This code has been fixed for cross-compilation. */
c1cfb2ae
RS
4283
4284static int inited_68881_table = 0;
4285
5505f548 4286static const char *const strings_68881[7] = {
c1cfb2ae
RS
4287 "0.0",
4288 "1.0",
4289 "10.0",
4290 "100.0",
4291 "10000.0",
4292 "1e8",
4293 "1e16"
a0a7fbc9 4294};
c1cfb2ae 4295
8b60264b 4296static const int codes_68881[7] = {
c1cfb2ae
RS
4297 0x0f,
4298 0x32,
4299 0x33,
4300 0x34,
4301 0x35,
4302 0x36,
4303 0x37
a0a7fbc9 4304};
c1cfb2ae
RS
4305
4306REAL_VALUE_TYPE values_68881[7];
4307
4308/* Set up values_68881 array by converting the decimal values
7a1929e1 4309 strings_68881 to binary. */
c1cfb2ae
RS
4310
4311void
8a4a2253 4312init_68881_table (void)
c1cfb2ae
RS
4313{
4314 int i;
4315 REAL_VALUE_TYPE r;
ef4bddc2 4316 machine_mode mode;
c1cfb2ae 4317
16d82c3c 4318 mode = SFmode;
c1cfb2ae
RS
4319 for (i = 0; i < 7; i++)
4320 {
4321 if (i == 6)
16d82c3c 4322 mode = DFmode;
c1cfb2ae
RS
4323 r = REAL_VALUE_ATOF (strings_68881[i], mode);
4324 values_68881[i] = r;
4325 }
4326 inited_68881_table = 1;
4327}
79e68feb
RS
4328
4329int
8a4a2253 4330standard_68881_constant_p (rtx x)
79e68feb 4331{
34a72c33 4332 const REAL_VALUE_TYPE *r;
c1cfb2ae 4333 int i;
79e68feb 4334
e18db50d 4335 /* fmovecr must be emulated on the 68040 and 68060, so it shouldn't be
7a1929e1 4336 used at all on those chips. */
9cf106c8 4337 if (TUNE_68040_60)
79e68feb
RS
4338 return 0;
4339
c1cfb2ae
RS
4340 if (! inited_68881_table)
4341 init_68881_table ();
4342
34a72c33 4343 r = CONST_DOUBLE_REAL_VALUE (x);
c1cfb2ae 4344
1a25c6b1 4345 /* Use real_identical instead of real_equal so that -0.0 is rejected. */
c1cfb2ae
RS
4346 for (i = 0; i < 6; i++)
4347 {
34a72c33 4348 if (real_identical (r, &values_68881[i]))
c1cfb2ae
RS
4349 return (codes_68881[i]);
4350 }
4351
79e68feb
RS
4352 if (GET_MODE (x) == SFmode)
4353 return 0;
c1cfb2ae 4354
34a72c33 4355 if (real_equal (r, &values_68881[6]))
c1cfb2ae
RS
4356 return (codes_68881[6]);
4357
79e68feb
RS
4358 /* larger powers of ten in the constants ram are not used
4359 because they are not equal to a `double' C constant. */
4360 return 0;
4361}
4362
4363/* If X is a floating-point constant, return the logarithm of X base 2,
4364 or 0 if X is not a power of 2. */
4365
4366int
8a4a2253 4367floating_exact_log2 (rtx x)
79e68feb 4368{
34a72c33
RS
4369 const REAL_VALUE_TYPE *r;
4370 REAL_VALUE_TYPE r1;
eaff3bf8 4371 int exp;
79e68feb 4372
34a72c33 4373 r = CONST_DOUBLE_REAL_VALUE (x);
79e68feb 4374
34a72c33 4375 if (real_less (r, &dconst1))
79e68feb
RS
4376 return 0;
4377
34a72c33 4378 exp = real_exponent (r);
6ef9a246 4379 real_2expN (&r1, exp, DFmode);
34a72c33 4380 if (real_equal (&r1, r))
eaff3bf8
RH
4381 return exp;
4382
79e68feb
RS
4383 return 0;
4384}
4385\f
79e68feb
RS
4386/* A C compound statement to output to stdio stream STREAM the
4387 assembler syntax for an instruction operand X. X is an RTL
4388 expression.
4389
4390 CODE is a value that can be used to specify one of several ways
4391 of printing the operand. It is used when identical operands
4392 must be printed differently depending on the context. CODE
4393 comes from the `%' specification that was used to request
4394 printing of the operand. If the specification was just `%DIGIT'
4395 then CODE is 0; if the specification was `%LTR DIGIT' then CODE
4396 is the ASCII code for LTR.
4397
4398 If X is a register, this macro should print the register's name.
4399 The names can be found in an array `reg_names' whose type is
4400 `char *[]'. `reg_names' is initialized from `REGISTER_NAMES'.
4401
4402 When the machine description has a specification `%PUNCT' (a `%'
4403 followed by a punctuation character), this macro is called with
4404 a null pointer for X and the punctuation character for CODE.
4405
4406 The m68k specific codes are:
4407
4408 '.' for dot needed in Motorola-style opcode names.
4409 '-' for an operand pushing on the stack:
4410 sp@-, -(sp) or -(%sp) depending on the style of syntax.
4411 '+' for an operand pushing on the stack:
4412 sp@+, (sp)+ or (%sp)+ depending on the style of syntax.
4413 '@' for a reference to the top word on the stack:
4414 sp@, (sp) or (%sp) depending on the style of syntax.
4415 '#' for an immediate operand prefix (# in MIT and Motorola syntax
5ee084df 4416 but & in SGS syntax).
79e68feb
RS
4417 '!' for the cc register (used in an `and to cc' insn).
4418 '$' for the letter `s' in an op code, but only on the 68040.
4419 '&' for the letter `d' in an op code, but only on the 68040.
2ac5f14a 4420 '/' for register prefix needed by longlong.h.
a40ed0f3 4421 '?' for m68k_library_id_string
79e68feb
RS
4422
4423 'b' for byte insn (no effect, on the Sun; this is for the ISI).
4424 'd' to force memory addressing to be absolute, not relative.
4425 'f' for float insn (print a CONST_DOUBLE as a float rather than in hex)
79e68feb
RS
4426 'x' for float insn (print a CONST_DOUBLE as a float rather than in hex),
4427 or print pair of registers as rx:ry.
29ca003a
RS
4428 'p' print an address with @PLTPC attached, but only if the operand
4429 is not locally-bound. */
79e68feb
RS
4430
4431void
8a4a2253 4432print_operand (FILE *file, rtx op, int letter)
79e68feb 4433{
79e68feb
RS
4434 if (letter == '.')
4435 {
e6d98cb0
BI
4436 if (MOTOROLA)
4437 fprintf (file, ".");
79e68feb
RS
4438 }
4439 else if (letter == '#')
e6d98cb0 4440 asm_fprintf (file, "%I");
79e68feb 4441 else if (letter == '-')
4b3d1177 4442 asm_fprintf (file, MOTOROLA ? "-(%Rsp)" : "%Rsp@-");
79e68feb 4443 else if (letter == '+')
4b3d1177 4444 asm_fprintf (file, MOTOROLA ? "(%Rsp)+" : "%Rsp@+");
79e68feb 4445 else if (letter == '@')
4b3d1177 4446 asm_fprintf (file, MOTOROLA ? "(%Rsp)" : "%Rsp@");
79e68feb 4447 else if (letter == '!')
e6d98cb0 4448 asm_fprintf (file, "%Rfpcr");
79e68feb
RS
4449 else if (letter == '$')
4450 {
b101567e 4451 if (TARGET_68040)
e6d98cb0 4452 fprintf (file, "s");
79e68feb
RS
4453 }
4454 else if (letter == '&')
4455 {
b101567e 4456 if (TARGET_68040)
e6d98cb0 4457 fprintf (file, "d");
79e68feb 4458 }
2ac5f14a 4459 else if (letter == '/')
e6d98cb0 4460 asm_fprintf (file, "%R");
a40ed0f3
KH
4461 else if (letter == '?')
4462 asm_fprintf (file, m68k_library_id_string);
29ca003a 4463 else if (letter == 'p')
2c8ec431 4464 {
29ca003a
RS
4465 output_addr_const (file, op);
4466 if (!(GET_CODE (op) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op)))
4467 fprintf (file, "@PLTPC");
2c8ec431 4468 }
79e68feb
RS
4469 else if (GET_CODE (op) == REG)
4470 {
1a8965c4
AS
4471 if (letter == 'R')
4472 /* Print out the second register name of a register pair.
4473 I.e., R (6) => 7. */
01bbf777 4474 fputs (M68K_REGNAME(REGNO (op) + 1), file);
79e68feb 4475 else
01bbf777 4476 fputs (M68K_REGNAME(REGNO (op)), file);
79e68feb
RS
4477 }
4478 else if (GET_CODE (op) == MEM)
4479 {
cc8ca59e 4480 output_address (GET_MODE (op), XEXP (op, 0));
79e68feb
RS
4481 if (letter == 'd' && ! TARGET_68020
4482 && CONSTANT_ADDRESS_P (XEXP (op, 0))
4483 && !(GET_CODE (XEXP (op, 0)) == CONST_INT
4484 && INTVAL (XEXP (op, 0)) < 0x8000
4485 && INTVAL (XEXP (op, 0)) >= -0x8000))
4b3d1177 4486 fprintf (file, MOTOROLA ? ".l" : ":l");
79e68feb 4487 }
79e68feb
RS
4488 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == SFmode)
4489 {
6ae89ea8 4490 long l;
34a72c33 4491 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), l);
429ce992 4492 asm_fprintf (file, "%I0x%lx", l & 0xFFFFFFFF);
c1cfb2ae
RS
4493 }
4494 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == XFmode)
4495 {
6ae89ea8 4496 long l[3];
34a72c33 4497 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
429ce992
AS
4498 asm_fprintf (file, "%I0x%lx%08lx%08lx", l[0] & 0xFFFFFFFF,
4499 l[1] & 0xFFFFFFFF, l[2] & 0xFFFFFFFF);
79e68feb 4500 }
e2c0a924 4501 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == DFmode)
79e68feb 4502 {
6ae89ea8 4503 long l[2];
34a72c33 4504 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
429ce992 4505 asm_fprintf (file, "%I0x%lx%08lx", l[0] & 0xFFFFFFFF, l[1] & 0xFFFFFFFF);
79e68feb
RS
4506 }
4507 else
4508 {
2c8ec431
DL
4509 /* Use `print_operand_address' instead of `output_addr_const'
4510 to ensure that we print relevant PIC stuff. */
1f85a612 4511 asm_fprintf (file, "%I");
2c8ec431
DL
4512 if (TARGET_PCREL
4513 && (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST))
4514 print_operand_address (file, op);
4515 else
4516 output_addr_const (file, op);
79e68feb
RS
4517 }
4518}
4519
75df395f
MK
4520/* Return string for TLS relocation RELOC. */
4521
4522static const char *
4523m68k_get_reloc_decoration (enum m68k_reloc reloc)
4524{
4525 /* To my knowledge, !MOTOROLA assemblers don't support TLS. */
4526 gcc_assert (MOTOROLA || reloc == RELOC_GOT);
4527
4528 switch (reloc)
4529 {
4530 case RELOC_GOT:
4531 if (MOTOROLA)
4532 {
4533 if (flag_pic == 1 && TARGET_68020)
4534 return "@GOT.w";
4535 else
4536 return "@GOT";
4537 }
4538 else
4539 {
4540 if (TARGET_68020)
4541 {
4542 switch (flag_pic)
4543 {
4544 case 1:
4545 return ":w";
4546 case 2:
4547 return ":l";
4548 default:
4549 return "";
4550 }
4551 }
4552 }
4553
4554 case RELOC_TLSGD:
4555 return "@TLSGD";
4556
4557 case RELOC_TLSLDM:
4558 return "@TLSLDM";
4559
4560 case RELOC_TLSLDO:
4561 return "@TLSLDO";
4562
4563 case RELOC_TLSIE:
4564 return "@TLSIE";
4565
4566 case RELOC_TLSLE:
4567 return "@TLSLE";
4568
4569 default:
4570 gcc_unreachable ();
4571 }
4572}
4573
cb69db4f 4574/* m68k implementation of TARGET_OUTPUT_ADDR_CONST_EXTRA. */
884316ff 4575
cb69db4f 4576static bool
884316ff
JM
4577m68k_output_addr_const_extra (FILE *file, rtx x)
4578{
75df395f
MK
4579 if (GET_CODE (x) == UNSPEC)
4580 {
4581 switch (XINT (x, 1))
4582 {
4583 case UNSPEC_RELOC16:
4584 case UNSPEC_RELOC32:
4585 output_addr_const (file, XVECEXP (x, 0, 0));
f878882b
AS
4586 fputs (m68k_get_reloc_decoration
4587 ((enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1))), file);
75df395f 4588 return true;
884316ff 4589
75df395f
MK
4590 default:
4591 break;
4592 }
4593 }
4594
4595 return false;
4596}
4597
4598/* M68K implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
4599
4600static void
4601m68k_output_dwarf_dtprel (FILE *file, int size, rtx x)
4602{
4603 gcc_assert (size == 4);
4604 fputs ("\t.long\t", file);
4605 output_addr_const (file, x);
4606 fputs ("@TLSLDO+0x8000", file);
884316ff
JM
4607}
4608
7b0f476d
AS
4609/* In the name of slightly smaller debug output, and to cater to
4610 general assembler lossage, recognize various UNSPEC sequences
4611 and turn them back into a direct symbol reference. */
4612
4613static rtx
33d67485 4614m68k_delegitimize_address (rtx orig_x)
7b0f476d 4615{
8390b335
AS
4616 rtx x;
4617 struct m68k_address addr;
4618 rtx unspec;
7b0f476d 4619
33d67485 4620 orig_x = delegitimize_mem_from_attrs (orig_x);
8390b335
AS
4621 x = orig_x;
4622 if (MEM_P (x))
4623 x = XEXP (x, 0);
4624
4625 if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
33d67485
AS
4626 return orig_x;
4627
8390b335
AS
4628 if (!m68k_decompose_address (GET_MODE (x), x, false, &addr)
4629 || addr.offset == NULL_RTX
4630 || GET_CODE (addr.offset) != CONST)
4631 return orig_x;
7b0f476d 4632
8390b335
AS
4633 unspec = XEXP (addr.offset, 0);
4634 if (GET_CODE (unspec) == PLUS && CONST_INT_P (XEXP (unspec, 1)))
4635 unspec = XEXP (unspec, 0);
4636 if (GET_CODE (unspec) != UNSPEC
4637 || (XINT (unspec, 1) != UNSPEC_RELOC16
4638 && XINT (unspec, 1) != UNSPEC_RELOC32))
4639 return orig_x;
4640 x = XVECEXP (unspec, 0, 0);
92cf7399 4641 gcc_assert (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF);
8390b335
AS
4642 if (unspec != XEXP (addr.offset, 0))
4643 x = gen_rtx_PLUS (Pmode, x, XEXP (XEXP (addr.offset, 0), 1));
4644 if (addr.index)
7b0f476d 4645 {
8390b335
AS
4646 rtx idx = addr.index;
4647 if (addr.scale != 1)
4648 idx = gen_rtx_MULT (Pmode, idx, GEN_INT (addr.scale));
4649 x = gen_rtx_PLUS (Pmode, idx, x);
7b0f476d 4650 }
8390b335
AS
4651 if (addr.base)
4652 x = gen_rtx_PLUS (Pmode, addr.base, x);
4653 if (MEM_P (orig_x))
4654 x = replace_equiv_address_nv (orig_x, x);
4655 return x;
7b0f476d
AS
4656}
4657
79e68feb
RS
4658\f
4659/* A C compound statement to output to stdio stream STREAM the
4660 assembler syntax for an instruction operand that is a memory
4661 reference whose address is ADDR. ADDR is an RTL expression.
4662
4663 Note that this contains a kludge that knows that the only reason
4664 we have an address (plus (label_ref...) (reg...)) when not generating
4665 PIC code is in the insn before a tablejump, and we know that m68k.md
4666 generates a label LInnn: on such an insn.
4667
4668 It is possible for PIC to generate a (plus (label_ref...) (reg...))
4669 and we handle that just like we would a (plus (symbol_ref...) (reg...)).
4670
79e68feb
RS
4671 This routine is responsible for distinguishing between -fpic and -fPIC
4672 style relocations in an address. When generating -fpic code the
112cdef5
KH
4673 offset is output in word mode (e.g. movel a5@(_foo:w), a0). When generating
4674 -fPIC code the offset is output in long mode (e.g. movel a5@(_foo:l), a0) */
79e68feb
RS
4675
4676void
8a4a2253 4677print_operand_address (FILE *file, rtx addr)
79e68feb 4678{
fc2241eb
RS
4679 struct m68k_address address;
4680
4681 if (!m68k_decompose_address (QImode, addr, true, &address))
4682 gcc_unreachable ();
4683
4684 if (address.code == PRE_DEC)
4b3d1177
KH
4685 fprintf (file, MOTOROLA ? "-(%s)" : "%s@-",
4686 M68K_REGNAME (REGNO (address.base)));
fc2241eb 4687 else if (address.code == POST_INC)
4b3d1177
KH
4688 fprintf (file, MOTOROLA ? "(%s)+" : "%s@+",
4689 M68K_REGNAME (REGNO (address.base)));
fc2241eb
RS
4690 else if (!address.base && !address.index)
4691 {
4692 /* A constant address. */
4693 gcc_assert (address.offset == addr);
4694 if (GET_CODE (addr) == CONST_INT)
4695 {
4696 /* (xxx).w or (xxx).l. */
4697 if (IN_RANGE (INTVAL (addr), -0x8000, 0x7fff))
4b3d1177 4698 fprintf (file, MOTOROLA ? "%d.w" : "%d:w", (int) INTVAL (addr));
a0a7fbc9 4699 else
fc2241eb 4700 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (addr));
a0a7fbc9 4701 }
fc2241eb 4702 else if (TARGET_PCREL)
a0a7fbc9 4703 {
fc2241eb
RS
4704 /* (d16,PC) or (bd,PC,Xn) (with suppressed index register). */
4705 fputc ('(', file);
4706 output_addr_const (file, addr);
4707 asm_fprintf (file, flag_pic == 1 ? ":w,%Rpc)" : ":l,%Rpc)");
a0a7fbc9 4708 }
fc2241eb 4709 else
a0a7fbc9 4710 {
fc2241eb
RS
4711 /* (xxx).l. We need a special case for SYMBOL_REF if the symbol
4712 name ends in `.<letter>', as the last 2 characters can be
4713 mistaken as a size suffix. Put the name in parentheses. */
4714 if (GET_CODE (addr) == SYMBOL_REF
4715 && strlen (XSTR (addr, 0)) > 2
4716 && XSTR (addr, 0)[strlen (XSTR (addr, 0)) - 2] == '.')
a0a7fbc9 4717 {
fc2241eb
RS
4718 putc ('(', file);
4719 output_addr_const (file, addr);
4720 putc (')', file);
a0a7fbc9
AS
4721 }
4722 else
fc2241eb 4723 output_addr_const (file, addr);
a0a7fbc9 4724 }
fc2241eb
RS
4725 }
4726 else
4727 {
4728 int labelno;
4729
4730 /* If ADDR is a (d8,pc,Xn) address, this is the number of the
44c7bd63 4731 label being accessed, otherwise it is -1. */
fc2241eb
RS
4732 labelno = (address.offset
4733 && !address.base
4734 && GET_CODE (address.offset) == LABEL_REF
4735 ? CODE_LABEL_NUMBER (XEXP (address.offset, 0))
4736 : -1);
4737 if (MOTOROLA)
a0a7fbc9 4738 {
fc2241eb
RS
4739 /* Print the "offset(base" component. */
4740 if (labelno >= 0)
e59d83aa 4741 asm_fprintf (file, "%LL%d(%Rpc,", labelno);
fc2241eb 4742 else
a0a7fbc9 4743 {
fc2241eb 4744 if (address.offset)
75df395f
MK
4745 output_addr_const (file, address.offset);
4746
fc2241eb
RS
4747 putc ('(', file);
4748 if (address.base)
4749 fputs (M68K_REGNAME (REGNO (address.base)), file);
a0a7fbc9 4750 }
fc2241eb
RS
4751 /* Print the ",index" component, if any. */
4752 if (address.index)
a0a7fbc9 4753 {
fc2241eb
RS
4754 if (address.base)
4755 putc (',', file);
4756 fprintf (file, "%s.%c",
4757 M68K_REGNAME (REGNO (address.index)),
4758 GET_MODE (address.index) == HImode ? 'w' : 'l');
4759 if (address.scale != 1)
4760 fprintf (file, "*%d", address.scale);
a0a7fbc9 4761 }
a0a7fbc9 4762 putc (')', file);
a0a7fbc9 4763 }
fc2241eb 4764 else /* !MOTOROLA */
a0a7fbc9 4765 {
fc2241eb
RS
4766 if (!address.offset && !address.index)
4767 fprintf (file, "%s@", M68K_REGNAME (REGNO (address.base)));
a0a7fbc9 4768 else
a0a7fbc9 4769 {
fc2241eb
RS
4770 /* Print the "base@(offset" component. */
4771 if (labelno >= 0)
e59d83aa 4772 asm_fprintf (file, "%Rpc@(%LL%d", labelno);
fc2241eb
RS
4773 else
4774 {
4775 if (address.base)
4776 fputs (M68K_REGNAME (REGNO (address.base)), file);
4777 fprintf (file, "@(");
4778 if (address.offset)
75df395f 4779 output_addr_const (file, address.offset);
fc2241eb
RS
4780 }
4781 /* Print the ",index" component, if any. */
4782 if (address.index)
4783 {
4784 fprintf (file, ",%s:%c",
4785 M68K_REGNAME (REGNO (address.index)),
4786 GET_MODE (address.index) == HImode ? 'w' : 'l');
4787 if (address.scale != 1)
4788 fprintf (file, ":%d", address.scale);
4789 }
a0a7fbc9
AS
4790 putc (')', file);
4791 }
a0a7fbc9 4792 }
79e68feb
RS
4793 }
4794}
af13f02d
JW
4795\f
4796/* Check for cases where a clr insns can be omitted from code using
4797 strict_low_part sets. For example, the second clrl here is not needed:
4798 clrl d0; movw a0@+,d0; use d0; clrl d0; movw a0@+; use d0; ...
4799
4800 MODE is the mode of this STRICT_LOW_PART set. FIRST_INSN is the clear
4801 insn we are checking for redundancy. TARGET is the register set by the
4802 clear insn. */
4803
8a4a2253 4804bool
ef4bddc2 4805strict_low_part_peephole_ok (machine_mode mode, rtx_insn *first_insn,
8a4a2253 4806 rtx target)
af13f02d 4807{
c85e862a 4808 rtx_insn *p = first_insn;
af13f02d 4809
39250081 4810 while ((p = PREV_INSN (p)))
af13f02d 4811 {
39250081
RZ
4812 if (NOTE_INSN_BASIC_BLOCK_P (p))
4813 return false;
4814
4815 if (NOTE_P (p))
4816 continue;
4817
af13f02d 4818 /* If it isn't an insn, then give up. */
39250081 4819 if (!INSN_P (p))
8a4a2253 4820 return false;
af13f02d
JW
4821
4822 if (reg_set_p (target, p))
4823 {
4824 rtx set = single_set (p);
4825 rtx dest;
4826
4827 /* If it isn't an easy to recognize insn, then give up. */
4828 if (! set)
8a4a2253 4829 return false;
af13f02d
JW
4830
4831 dest = SET_DEST (set);
4832
4833 /* If this sets the entire target register to zero, then our
4834 first_insn is redundant. */
4835 if (rtx_equal_p (dest, target)
4836 && SET_SRC (set) == const0_rtx)
8a4a2253 4837 return true;
af13f02d
JW
4838 else if (GET_CODE (dest) == STRICT_LOW_PART
4839 && GET_CODE (XEXP (dest, 0)) == REG
4840 && REGNO (XEXP (dest, 0)) == REGNO (target)
4841 && (GET_MODE_SIZE (GET_MODE (XEXP (dest, 0)))
4842 <= GET_MODE_SIZE (mode)))
4843 /* This is a strict low part set which modifies less than
4844 we are using, so it is safe. */
4845 ;
4846 else
8a4a2253 4847 return false;
af13f02d 4848 }
af13f02d
JW
4849 }
4850
8a4a2253 4851 return false;
af13f02d 4852}
67cd4f83 4853
2c8ec431
DL
4854/* Operand predicates for implementing asymmetric pc-relative addressing
4855 on m68k. The m68k supports pc-relative addressing (mode 7, register 2)
dab66575 4856 when used as a source operand, but not as a destination operand.
2c8ec431
DL
4857
4858 We model this by restricting the meaning of the basic predicates
4859 (general_operand, memory_operand, etc) to forbid the use of this
4860 addressing mode, and then define the following predicates that permit
4861 this addressing mode. These predicates can then be used for the
4862 source operands of the appropriate instructions.
4863
4864 n.b. While it is theoretically possible to change all machine patterns
4865 to use this addressing more where permitted by the architecture,
4866 it has only been implemented for "common" cases: SImode, HImode, and
4867 QImode operands, and only for the principle operations that would
4868 require this addressing mode: data movement and simple integer operations.
4869
4870 In parallel with these new predicates, two new constraint letters
4871 were defined: 'S' and 'T'. 'S' is the -mpcrel analog of 'm'.
4872 'T' replaces 's' in the non-pcrel case. It is a no-op in the pcrel case.
4873 In the pcrel case 's' is only valid in combination with 'a' registers.
4874 See addsi3, subsi3, cmpsi, and movsi patterns for a better understanding
4875 of how these constraints are used.
4876
4877 The use of these predicates is strictly optional, though patterns that
4878 don't will cause an extra reload register to be allocated where one
4879 was not necessary:
4880
4881 lea (abc:w,%pc),%a0 ; need to reload address
4882 moveq &1,%d1 ; since write to pc-relative space
4883 movel %d1,%a0@ ; is not allowed
4884 ...
4885 lea (abc:w,%pc),%a1 ; no need to reload address here
4886 movel %a1@,%d0 ; since "movel (abc:w,%pc),%d0" is ok
4887
4888 For more info, consult tiemann@cygnus.com.
4889
4890
4891 All of the ugliness with predicates and constraints is due to the
4892 simple fact that the m68k does not allow a pc-relative addressing
4893 mode as a destination. gcc does not distinguish between source and
4894 destination addresses. Hence, if we claim that pc-relative address
331d9186 4895 modes are valid, e.g. TARGET_LEGITIMATE_ADDRESS_P accepts them, then we
2c8ec431
DL
4896 end up with invalid code. To get around this problem, we left
4897 pc-relative modes as invalid addresses, and then added special
4898 predicates and constraints to accept them.
4899
4900 A cleaner way to handle this is to modify gcc to distinguish
4901 between source and destination addresses. We can then say that
4902 pc-relative is a valid source address but not a valid destination
4903 address, and hopefully avoid a lot of the predicate and constraint
4904 hackery. Unfortunately, this would be a pretty big change. It would
4905 be a useful change for a number of ports, but there aren't any current
4906 plans to undertake this.
4907
4908 ***************************************************************************/
4909
4910
5505f548 4911const char *
8a4a2253 4912output_andsi3 (rtx *operands)
29ae8a3c
RK
4913{
4914 int logval;
4915 if (GET_CODE (operands[2]) == CONST_INT
25c99d8f 4916 && (INTVAL (operands[2]) | 0xffff) == -1
29ae8a3c
RK
4917 && (DATA_REG_P (operands[0])
4918 || offsettable_memref_p (operands[0]))
9425fb04 4919 && !TARGET_COLDFIRE)
29ae8a3c
RK
4920 {
4921 if (GET_CODE (operands[0]) != REG)
b72f00af 4922 operands[0] = adjust_address (operands[0], HImode, 2);
1d8eaa6b 4923 operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffff);
29ae8a3c
RK
4924 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4925 CC_STATUS_INIT;
4926 if (operands[2] == const0_rtx)
4927 return "clr%.w %0";
4928 return "and%.w %2,%0";
4929 }
4930 if (GET_CODE (operands[2]) == CONST_INT
c4406f74 4931 && (logval = exact_log2 (~ INTVAL (operands[2]) & 0xffffffff)) >= 0
29ae8a3c
RK
4932 && (DATA_REG_P (operands[0])
4933 || offsettable_memref_p (operands[0])))
4934 {
4935 if (DATA_REG_P (operands[0]))
a0a7fbc9 4936 operands[1] = GEN_INT (logval);
29ae8a3c
RK
4937 else
4938 {
b72f00af 4939 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
1d8eaa6b 4940 operands[1] = GEN_INT (logval % 8);
29ae8a3c
RK
4941 }
4942 /* This does not set condition codes in a standard way. */
4943 CC_STATUS_INIT;
4944 return "bclr %1,%0";
4945 }
4946 return "and%.l %2,%0";
4947}
4948
5505f548 4949const char *
8a4a2253 4950output_iorsi3 (rtx *operands)
29ae8a3c
RK
4951{
4952 register int logval;
4953 if (GET_CODE (operands[2]) == CONST_INT
4954 && INTVAL (operands[2]) >> 16 == 0
4955 && (DATA_REG_P (operands[0])
4956 || offsettable_memref_p (operands[0]))
9425fb04 4957 && !TARGET_COLDFIRE)
29ae8a3c
RK
4958 {
4959 if (GET_CODE (operands[0]) != REG)
b72f00af 4960 operands[0] = adjust_address (operands[0], HImode, 2);
29ae8a3c
RK
4961 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4962 CC_STATUS_INIT;
4963 if (INTVAL (operands[2]) == 0xffff)
4964 return "mov%.w %2,%0";
4965 return "or%.w %2,%0";
4966 }
4967 if (GET_CODE (operands[2]) == CONST_INT
c4406f74 4968 && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
29ae8a3c
RK
4969 && (DATA_REG_P (operands[0])
4970 || offsettable_memref_p (operands[0])))
4971 {
4972 if (DATA_REG_P (operands[0]))
b72f00af 4973 operands[1] = GEN_INT (logval);
29ae8a3c
RK
4974 else
4975 {
b72f00af 4976 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
1d8eaa6b 4977 operands[1] = GEN_INT (logval % 8);
29ae8a3c
RK
4978 }
4979 CC_STATUS_INIT;
4980 return "bset %1,%0";
4981 }
4982 return "or%.l %2,%0";
4983}
4984
5505f548 4985const char *
8a4a2253 4986output_xorsi3 (rtx *operands)
29ae8a3c
RK
4987{
4988 register int logval;
4989 if (GET_CODE (operands[2]) == CONST_INT
4990 && INTVAL (operands[2]) >> 16 == 0
4991 && (offsettable_memref_p (operands[0]) || DATA_REG_P (operands[0]))
9425fb04 4992 && !TARGET_COLDFIRE)
29ae8a3c
RK
4993 {
4994 if (! DATA_REG_P (operands[0]))
b72f00af 4995 operands[0] = adjust_address (operands[0], HImode, 2);
29ae8a3c
RK
4996 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4997 CC_STATUS_INIT;
4998 if (INTVAL (operands[2]) == 0xffff)
4999 return "not%.w %0";
5000 return "eor%.w %2,%0";
5001 }
5002 if (GET_CODE (operands[2]) == CONST_INT
c4406f74 5003 && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
29ae8a3c
RK
5004 && (DATA_REG_P (operands[0])
5005 || offsettable_memref_p (operands[0])))
5006 {
5007 if (DATA_REG_P (operands[0]))
b72f00af 5008 operands[1] = GEN_INT (logval);
29ae8a3c
RK
5009 else
5010 {
b72f00af 5011 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
1d8eaa6b 5012 operands[1] = GEN_INT (logval % 8);
29ae8a3c
RK
5013 }
5014 CC_STATUS_INIT;
5015 return "bchg %1,%0";
5016 }
5017 return "eor%.l %2,%0";
5018}
7c262518 5019
29ca003a
RS
5020/* Return the instruction that should be used for a call to address X,
5021 which is known to be in operand 0. */
5022
5023const char *
5024output_call (rtx x)
5025{
5026 if (symbolic_operand (x, VOIDmode))
5027 return m68k_symbolic_call;
5028 else
5029 return "jsr %a0";
5030}
5031
f7e70894
RS
5032/* Likewise sibling calls. */
5033
5034const char *
5035output_sibcall (rtx x)
5036{
5037 if (symbolic_operand (x, VOIDmode))
5038 return m68k_symbolic_jump;
5039 else
5040 return "jmp %a0";
5041}
5042
c590b625 5043static void
8a4a2253 5044m68k_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
4ab870f5 5045 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8a4a2253 5046 tree function)
483ab821 5047{
c85e862a
DM
5048 rtx this_slot, offset, addr, mem, tmp;
5049 rtx_insn *insn;
e0601576
RH
5050
5051 /* Avoid clobbering the struct value reg by using the
5052 static chain reg as a temporary. */
5053 tmp = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
4ab870f5
RS
5054
5055 /* Pretend to be a post-reload pass while generating rtl. */
4ab870f5 5056 reload_completed = 1;
4ab870f5
RS
5057
5058 /* The "this" pointer is stored at 4(%sp). */
0a81f074
RS
5059 this_slot = gen_rtx_MEM (Pmode, plus_constant (Pmode,
5060 stack_pointer_rtx, 4));
4ab870f5
RS
5061
5062 /* Add DELTA to THIS. */
5063 if (delta != 0)
5050d266 5064 {
4ab870f5
RS
5065 /* Make the offset a legitimate operand for memory addition. */
5066 offset = GEN_INT (delta);
5067 if ((delta < -8 || delta > 8)
5068 && (TARGET_COLDFIRE || USE_MOVQ (delta)))
5069 {
5070 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), offset);
5071 offset = gen_rtx_REG (Pmode, D0_REG);
5072 }
5073 emit_insn (gen_add3_insn (copy_rtx (this_slot),
5074 copy_rtx (this_slot), offset));
5050d266 5075 }
c590b625 5076
4ab870f5
RS
5077 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
5078 if (vcall_offset != 0)
5079 {
5080 /* Set the static chain register to *THIS. */
e0601576
RH
5081 emit_move_insn (tmp, this_slot);
5082 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
4ab870f5
RS
5083
5084 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
0a81f074 5085 addr = plus_constant (Pmode, tmp, vcall_offset);
4ab870f5
RS
5086 if (!m68k_legitimate_address_p (Pmode, addr, true))
5087 {
f7df4a84 5088 emit_insn (gen_rtx_SET (tmp, addr));
e0601576 5089 addr = tmp;
4ab870f5 5090 }
c590b625 5091
4ab870f5
RS
5092 /* Load the offset into %d0 and add it to THIS. */
5093 emit_move_insn (gen_rtx_REG (Pmode, D0_REG),
5094 gen_rtx_MEM (Pmode, addr));
5095 emit_insn (gen_add3_insn (copy_rtx (this_slot),
5096 copy_rtx (this_slot),
5097 gen_rtx_REG (Pmode, D0_REG)));
5098 }
29ca003a 5099
4ab870f5
RS
5100 /* Jump to the target function. Use a sibcall if direct jumps are
5101 allowed, otherwise load the address into a register first. */
5102 mem = DECL_RTL (function);
5103 if (!sibcall_operand (XEXP (mem, 0), VOIDmode))
5104 {
5105 gcc_assert (flag_pic);
c590b625 5106
4ab870f5
RS
5107 if (!TARGET_SEP_DATA)
5108 {
5109 /* Use the static chain register as a temporary (call-clobbered)
5110 GOT pointer for this function. We can use the static chain
5111 register because it isn't live on entry to the thunk. */
6fb5fa3c 5112 SET_REGNO (pic_offset_table_rtx, STATIC_CHAIN_REGNUM);
4ab870f5
RS
5113 emit_insn (gen_load_got (pic_offset_table_rtx));
5114 }
e0601576
RH
5115 legitimize_pic_address (XEXP (mem, 0), Pmode, tmp);
5116 mem = replace_equiv_address (mem, tmp);
4ab870f5
RS
5117 }
5118 insn = emit_call_insn (gen_sibcall (mem, const0_rtx));
5119 SIBLING_CALL_P (insn) = 1;
5120
5121 /* Run just enough of rest_of_compilation. */
5122 insn = get_insns ();
5123 split_all_insns_noflow ();
5124 final_start_function (insn, file, 1);
5125 final (insn, file, 1);
5126 final_end_function ();
5127
5128 /* Clean up the vars set above. */
5129 reload_completed = 0;
4ab870f5
RS
5130
5131 /* Restore the original PIC register. */
5132 if (flag_pic)
6fb5fa3c 5133 SET_REGNO (pic_offset_table_rtx, PIC_REG);
483ab821 5134}
8636be86
KH
5135
5136/* Worker function for TARGET_STRUCT_VALUE_RTX. */
5137
5138static rtx
5139m68k_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
5140 int incoming ATTRIBUTE_UNUSED)
5141{
5142 return gen_rtx_REG (Pmode, M68K_STRUCT_VALUE_REGNUM);
5143}
cfca21cb
PB
5144
5145/* Return nonzero if register old_reg can be renamed to register new_reg. */
5146int
5147m68k_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
5148 unsigned int new_reg)
5149{
5150
5151 /* Interrupt functions can only use registers that have already been
5152 saved by the prologue, even if they would normally be
5153 call-clobbered. */
5154
a4242737
KH
5155 if ((m68k_get_function_kind (current_function_decl)
5156 == m68k_fk_interrupt_handler)
6fb5fa3c 5157 && !df_regs_ever_live_p (new_reg))
cfca21cb
PB
5158 return 0;
5159
5160 return 1;
5161}
70028b61 5162
ffa2596e
RS
5163/* Value is true if hard register REGNO can hold a value of machine-mode
5164 MODE. On the 68000, we let the cpu registers can hold any mode, but
5165 restrict the 68881 registers to floating-point modes. */
5166
70028b61 5167bool
ef4bddc2 5168m68k_regno_mode_ok (int regno, machine_mode mode)
70028b61 5169{
36e04090 5170 if (DATA_REGNO_P (regno))
70028b61 5171 {
a0a7fbc9
AS
5172 /* Data Registers, can hold aggregate if fits in. */
5173 if (regno + GET_MODE_SIZE (mode) / 4 <= 8)
5174 return true;
70028b61 5175 }
36e04090 5176 else if (ADDRESS_REGNO_P (regno))
70028b61 5177 {
a0a7fbc9
AS
5178 if (regno + GET_MODE_SIZE (mode) / 4 <= 16)
5179 return true;
70028b61 5180 }
36e04090 5181 else if (FP_REGNO_P (regno))
70028b61
PB
5182 {
5183 /* FPU registers, hold float or complex float of long double or
a0a7fbc9
AS
5184 smaller. */
5185 if ((GET_MODE_CLASS (mode) == MODE_FLOAT
5186 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
dcc21c4c 5187 && GET_MODE_UNIT_SIZE (mode) <= TARGET_FP_REG_SIZE)
a0a7fbc9 5188 return true;
70028b61
PB
5189 }
5190 return false;
5191}
dcc21c4c 5192
ffa2596e
RS
5193/* Implement SECONDARY_RELOAD_CLASS. */
5194
5195enum reg_class
5196m68k_secondary_reload_class (enum reg_class rclass,
ef4bddc2 5197 machine_mode mode, rtx x)
ffa2596e
RS
5198{
5199 int regno;
5200
5201 regno = true_regnum (x);
5202
5203 /* If one operand of a movqi is an address register, the other
5204 operand must be a general register or constant. Other types
5205 of operand must be reloaded through a data register. */
5206 if (GET_MODE_SIZE (mode) == 1
5207 && reg_classes_intersect_p (rclass, ADDR_REGS)
5208 && !(INT_REGNO_P (regno) || CONSTANT_P (x)))
5209 return DATA_REGS;
5210
5211 /* PC-relative addresses must be loaded into an address register first. */
5212 if (TARGET_PCREL
5213 && !reg_class_subset_p (rclass, ADDR_REGS)
5214 && symbolic_operand (x, VOIDmode))
5215 return ADDR_REGS;
5216
5217 return NO_REGS;
5218}
5219
5220/* Implement PREFERRED_RELOAD_CLASS. */
5221
5222enum reg_class
5223m68k_preferred_reload_class (rtx x, enum reg_class rclass)
5224{
5225 enum reg_class secondary_class;
5226
5227 /* If RCLASS might need a secondary reload, try restricting it to
5228 a class that doesn't. */
5229 secondary_class = m68k_secondary_reload_class (rclass, GET_MODE (x), x);
5230 if (secondary_class != NO_REGS
5231 && reg_class_subset_p (secondary_class, rclass))
5232 return secondary_class;
5233
5234 /* Prefer to use moveq for in-range constants. */
5235 if (GET_CODE (x) == CONST_INT
5236 && reg_class_subset_p (DATA_REGS, rclass)
5237 && IN_RANGE (INTVAL (x), -0x80, 0x7f))
5238 return DATA_REGS;
5239
5240 /* ??? Do we really need this now? */
5241 if (GET_CODE (x) == CONST_DOUBLE
5242 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
5243 {
5244 if (TARGET_HARD_FLOAT && reg_class_subset_p (FP_REGS, rclass))
5245 return FP_REGS;
5246
5247 return NO_REGS;
5248 }
5249
5250 return rclass;
5251}
5252
dcc21c4c
PB
5253/* Return floating point values in a 68881 register. This makes 68881 code
5254 a little bit faster. It also makes -msoft-float code incompatible with
5255 hard-float code, so people have to be careful not to mix the two.
c0220ea4 5256 For ColdFire it was decided the ABI incompatibility is undesirable.
dcc21c4c
PB
5257 If there is need for a hard-float ABI it is probably worth doing it
5258 properly and also passing function arguments in FP registers. */
5259rtx
ef4bddc2 5260m68k_libcall_value (machine_mode mode)
dcc21c4c
PB
5261{
5262 switch (mode) {
5263 case SFmode:
5264 case DFmode:
5265 case XFmode:
5266 if (TARGET_68881)
8d989403 5267 return gen_rtx_REG (mode, FP0_REG);
dcc21c4c
PB
5268 break;
5269 default:
5270 break;
5271 }
75df395f
MK
5272
5273 return gen_rtx_REG (mode, m68k_libcall_value_in_a0_p ? A0_REG : D0_REG);
dcc21c4c
PB
5274}
5275
db5e2d51
MK
5276/* Location in which function value is returned.
5277 NOTE: Due to differences in ABIs, don't call this function directly,
5278 use FUNCTION_VALUE instead. */
dcc21c4c 5279rtx
586de218 5280m68k_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
dcc21c4c 5281{
ef4bddc2 5282 machine_mode mode;
dcc21c4c
PB
5283
5284 mode = TYPE_MODE (valtype);
5285 switch (mode) {
5286 case SFmode:
5287 case DFmode:
5288 case XFmode:
5289 if (TARGET_68881)
8d989403 5290 return gen_rtx_REG (mode, FP0_REG);
dcc21c4c
PB
5291 break;
5292 default:
5293 break;
5294 }
5295
576c9028
KH
5296 /* If the function returns a pointer, push that into %a0. */
5297 if (func && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (func))))
5298 /* For compatibility with the large body of existing code which
5299 does not always properly declare external functions returning
5300 pointer types, the m68k/SVR4 convention is to copy the value
5301 returned for pointer functions from a0 to d0 in the function
5302 epilogue, so that callers that have neglected to properly
5303 declare the callee can still find the correct return value in
5304 d0. */
5305 return gen_rtx_PARALLEL
5306 (mode,
5307 gen_rtvec (2,
5308 gen_rtx_EXPR_LIST (VOIDmode,
5309 gen_rtx_REG (mode, A0_REG),
5310 const0_rtx),
5311 gen_rtx_EXPR_LIST (VOIDmode,
5312 gen_rtx_REG (mode, D0_REG),
5313 const0_rtx)));
5314 else if (POINTER_TYPE_P (valtype))
5315 return gen_rtx_REG (mode, A0_REG);
dcc21c4c 5316 else
576c9028 5317 return gen_rtx_REG (mode, D0_REG);
dcc21c4c 5318}
1c445f03
NS
5319
5320/* Worker function for TARGET_RETURN_IN_MEMORY. */
5321#if M68K_HONOR_TARGET_STRICT_ALIGNMENT
5322static bool
511e41e5 5323m68k_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
1c445f03 5324{
ef4bddc2 5325 machine_mode mode = TYPE_MODE (type);
1c445f03
NS
5326
5327 if (mode == BLKmode)
5328 return true;
5329
5330 /* If TYPE's known alignment is less than the alignment of MODE that
5331 would contain the structure, then return in memory. We need to
5332 do so to maintain the compatibility between code compiled with
5333 -mstrict-align and that compiled with -mno-strict-align. */
5334 if (AGGREGATE_TYPE_P (type)
5335 && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (mode))
5336 return true;
5337
5338 return false;
5339}
5340#endif
c47b0cb4
MK
5341
5342/* CPU to schedule the program for. */
5343enum attr_cpu m68k_sched_cpu;
5344
826fadba
MK
5345/* MAC to schedule the program for. */
5346enum attr_mac m68k_sched_mac;
5347
c47b0cb4
MK
5348/* Operand type. */
5349enum attr_op_type
5350 {
5351 /* No operand. */
5352 OP_TYPE_NONE,
5353
96fcacb7
MK
5354 /* Integer register. */
5355 OP_TYPE_RN,
5356
5357 /* FP register. */
5358 OP_TYPE_FPN,
c47b0cb4
MK
5359
5360 /* Implicit mem reference (e.g. stack). */
5361 OP_TYPE_MEM1,
5362
5363 /* Memory without offset or indexing. EA modes 2, 3 and 4. */
5364 OP_TYPE_MEM234,
5365
5366 /* Memory with offset but without indexing. EA mode 5. */
5367 OP_TYPE_MEM5,
5368
5369 /* Memory with indexing. EA mode 6. */
5370 OP_TYPE_MEM6,
5371
5372 /* Memory referenced by absolute address. EA mode 7. */
5373 OP_TYPE_MEM7,
5374
5375 /* Immediate operand that doesn't require extension word. */
5376 OP_TYPE_IMM_Q,
5377
5378 /* Immediate 16 bit operand. */
5379 OP_TYPE_IMM_W,
5380
5381 /* Immediate 32 bit operand. */
5382 OP_TYPE_IMM_L
5383 };
5384
c47b0cb4
MK
5385/* Return type of memory ADDR_RTX refers to. */
5386static enum attr_op_type
ef4bddc2 5387sched_address_type (machine_mode mode, rtx addr_rtx)
c47b0cb4
MK
5388{
5389 struct m68k_address address;
5390
96fcacb7
MK
5391 if (symbolic_operand (addr_rtx, VOIDmode))
5392 return OP_TYPE_MEM7;
5393
c47b0cb4
MK
5394 if (!m68k_decompose_address (mode, addr_rtx,
5395 reload_completed, &address))
5396 {
96fcacb7 5397 gcc_assert (!reload_completed);
c47b0cb4
MK
5398 /* Reload will likely fix the address to be in the register. */
5399 return OP_TYPE_MEM234;
5400 }
5401
5402 if (address.scale != 0)
5403 return OP_TYPE_MEM6;
5404
5405 if (address.base != NULL_RTX)
5406 {
5407 if (address.offset == NULL_RTX)
5408 return OP_TYPE_MEM234;
5409
5410 return OP_TYPE_MEM5;
5411 }
5412
5413 gcc_assert (address.offset != NULL_RTX);
5414
5415 return OP_TYPE_MEM7;
5416}
5417
96fcacb7
MK
5418/* Return X or Y (depending on OPX_P) operand of INSN. */
5419static rtx
647d790d 5420sched_get_operand (rtx_insn *insn, bool opx_p)
96fcacb7
MK
5421{
5422 int i;
5423
5424 if (recog_memoized (insn) < 0)
5425 gcc_unreachable ();
5426
5427 extract_constrain_insn_cached (insn);
5428
5429 if (opx_p)
5430 i = get_attr_opx (insn);
5431 else
5432 i = get_attr_opy (insn);
5433
5434 if (i >= recog_data.n_operands)
5435 return NULL;
5436
5437 return recog_data.operand[i];
5438}
5439
5440/* Return type of INSN's operand X (if OPX_P) or operand Y (if !OPX_P).
5441 If ADDRESS_P is true, return type of memory location operand refers to. */
c47b0cb4 5442static enum attr_op_type
647d790d 5443sched_attr_op_type (rtx_insn *insn, bool opx_p, bool address_p)
c47b0cb4 5444{
96fcacb7
MK
5445 rtx op;
5446
5447 op = sched_get_operand (insn, opx_p);
5448
5449 if (op == NULL)
5450 {
5451 gcc_assert (!reload_completed);
5452 return OP_TYPE_RN;
5453 }
c47b0cb4
MK
5454
5455 if (address_p)
5456 return sched_address_type (QImode, op);
5457
5458 if (memory_operand (op, VOIDmode))
5459 return sched_address_type (GET_MODE (op), XEXP (op, 0));
5460
5461 if (register_operand (op, VOIDmode))
96fcacb7
MK
5462 {
5463 if ((!reload_completed && FLOAT_MODE_P (GET_MODE (op)))
5464 || (reload_completed && FP_REG_P (op)))
5465 return OP_TYPE_FPN;
5466
5467 return OP_TYPE_RN;
5468 }
c47b0cb4
MK
5469
5470 if (GET_CODE (op) == CONST_INT)
5471 {
96fcacb7
MK
5472 int ival;
5473
5474 ival = INTVAL (op);
5475
5476 /* Check for quick constants. */
5477 switch (get_attr_type (insn))
5478 {
5479 case TYPE_ALUQ_L:
5480 if (IN_RANGE (ival, 1, 8) || IN_RANGE (ival, -8, -1))
5481 return OP_TYPE_IMM_Q;
5482
5483 gcc_assert (!reload_completed);
5484 break;
5485
5486 case TYPE_MOVEQ_L:
5487 if (USE_MOVQ (ival))
5488 return OP_TYPE_IMM_Q;
5489
5490 gcc_assert (!reload_completed);
5491 break;
5492
5493 case TYPE_MOV3Q_L:
5494 if (valid_mov3q_const (ival))
5495 return OP_TYPE_IMM_Q;
5496
5497 gcc_assert (!reload_completed);
5498 break;
5499
5500 default:
5501 break;
5502 }
5503
5504 if (IN_RANGE (ival, -0x8000, 0x7fff))
c47b0cb4
MK
5505 return OP_TYPE_IMM_W;
5506
5507 return OP_TYPE_IMM_L;
5508 }
5509
5510 if (GET_CODE (op) == CONST_DOUBLE)
5511 {
5512 switch (GET_MODE (op))
5513 {
5514 case SFmode:
5515 return OP_TYPE_IMM_W;
5516
5517 case VOIDmode:
5518 case DFmode:
5519 return OP_TYPE_IMM_L;
5520
5521 default:
5522 gcc_unreachable ();
5523 }
5524 }
5525
00b2ef14
MK
5526 if (GET_CODE (op) == CONST
5527 || symbolic_operand (op, VOIDmode)
c47b0cb4
MK
5528 || LABEL_P (op))
5529 {
5530 switch (GET_MODE (op))
5531 {
5532 case QImode:
5533 return OP_TYPE_IMM_Q;
5534
5535 case HImode:
5536 return OP_TYPE_IMM_W;
5537
5538 case SImode:
5539 return OP_TYPE_IMM_L;
5540
5541 default:
75df395f
MK
5542 if (symbolic_operand (m68k_unwrap_symbol (op, false), VOIDmode))
5543 /* Just a guess. */
c47b0cb4
MK
5544 return OP_TYPE_IMM_W;
5545
5546 return OP_TYPE_IMM_L;
5547 }
5548 }
5549
96fcacb7 5550 gcc_assert (!reload_completed);
c47b0cb4 5551
96fcacb7
MK
5552 if (FLOAT_MODE_P (GET_MODE (op)))
5553 return OP_TYPE_FPN;
c47b0cb4 5554
96fcacb7 5555 return OP_TYPE_RN;
c47b0cb4
MK
5556}
5557
5558/* Implement opx_type attribute.
5559 Return type of INSN's operand X.
5560 If ADDRESS_P is true, return type of memory location operand refers to. */
5561enum attr_opx_type
647d790d 5562m68k_sched_attr_opx_type (rtx_insn *insn, int address_p)
c47b0cb4 5563{
c47b0cb4
MK
5564 switch (sched_attr_op_type (insn, true, address_p != 0))
5565 {
96fcacb7
MK
5566 case OP_TYPE_RN:
5567 return OPX_TYPE_RN;
5568
5569 case OP_TYPE_FPN:
5570 return OPX_TYPE_FPN;
c47b0cb4
MK
5571
5572 case OP_TYPE_MEM1:
5573 return OPX_TYPE_MEM1;
5574
5575 case OP_TYPE_MEM234:
5576 return OPX_TYPE_MEM234;
5577
5578 case OP_TYPE_MEM5:
5579 return OPX_TYPE_MEM5;
5580
5581 case OP_TYPE_MEM6:
5582 return OPX_TYPE_MEM6;
5583
5584 case OP_TYPE_MEM7:
5585 return OPX_TYPE_MEM7;
5586
5587 case OP_TYPE_IMM_Q:
5588 return OPX_TYPE_IMM_Q;
5589
5590 case OP_TYPE_IMM_W:
5591 return OPX_TYPE_IMM_W;
5592
5593 case OP_TYPE_IMM_L:
5594 return OPX_TYPE_IMM_L;
5595
5596 default:
5597 gcc_unreachable ();
c47b0cb4
MK
5598 }
5599}
5600
5601/* Implement opy_type attribute.
5602 Return type of INSN's operand Y.
5603 If ADDRESS_P is true, return type of memory location operand refers to. */
5604enum attr_opy_type
647d790d 5605m68k_sched_attr_opy_type (rtx_insn *insn, int address_p)
c47b0cb4 5606{
c47b0cb4
MK
5607 switch (sched_attr_op_type (insn, false, address_p != 0))
5608 {
96fcacb7
MK
5609 case OP_TYPE_RN:
5610 return OPY_TYPE_RN;
5611
5612 case OP_TYPE_FPN:
5613 return OPY_TYPE_FPN;
c47b0cb4
MK
5614
5615 case OP_TYPE_MEM1:
5616 return OPY_TYPE_MEM1;
5617
5618 case OP_TYPE_MEM234:
5619 return OPY_TYPE_MEM234;
5620
5621 case OP_TYPE_MEM5:
5622 return OPY_TYPE_MEM5;
5623
5624 case OP_TYPE_MEM6:
5625 return OPY_TYPE_MEM6;
5626
5627 case OP_TYPE_MEM7:
5628 return OPY_TYPE_MEM7;
5629
5630 case OP_TYPE_IMM_Q:
5631 return OPY_TYPE_IMM_Q;
5632
5633 case OP_TYPE_IMM_W:
5634 return OPY_TYPE_IMM_W;
5635
5636 case OP_TYPE_IMM_L:
5637 return OPY_TYPE_IMM_L;
5638
5639 default:
5640 gcc_unreachable ();
c47b0cb4
MK
5641 }
5642}
5643
96fcacb7
MK
5644/* Return size of INSN as int. */
5645static int
84034c69 5646sched_get_attr_size_int (rtx_insn *insn)
c47b0cb4
MK
5647{
5648 int size;
5649
96fcacb7 5650 switch (get_attr_type (insn))
c47b0cb4 5651 {
96fcacb7
MK
5652 case TYPE_IGNORE:
5653 /* There should be no references to m68k_sched_attr_size for 'ignore'
5654 instructions. */
5655 gcc_unreachable ();
5656 return 0;
5657
5658 case TYPE_MUL_L:
c47b0cb4
MK
5659 size = 2;
5660 break;
5661
5662 default:
5663 size = 1;
5664 break;
5665 }
5666
5667 switch (get_attr_opx_type (insn))
5668 {
5669 case OPX_TYPE_NONE:
96fcacb7
MK
5670 case OPX_TYPE_RN:
5671 case OPX_TYPE_FPN:
c47b0cb4
MK
5672 case OPX_TYPE_MEM1:
5673 case OPX_TYPE_MEM234:
5674 case OPY_TYPE_IMM_Q:
5675 break;
5676
5677 case OPX_TYPE_MEM5:
5678 case OPX_TYPE_MEM6:
5679 /* Here we assume that most absolute references are short. */
5680 case OPX_TYPE_MEM7:
5681 case OPY_TYPE_IMM_W:
5682 ++size;
5683 break;
5684
5685 case OPY_TYPE_IMM_L:
5686 size += 2;
5687 break;
5688
5689 default:
5690 gcc_unreachable ();
5691 }
5692
5693 switch (get_attr_opy_type (insn))
5694 {
5695 case OPY_TYPE_NONE:
96fcacb7
MK
5696 case OPY_TYPE_RN:
5697 case OPY_TYPE_FPN:
c47b0cb4
MK
5698 case OPY_TYPE_MEM1:
5699 case OPY_TYPE_MEM234:
5700 case OPY_TYPE_IMM_Q:
5701 break;
5702
5703 case OPY_TYPE_MEM5:
5704 case OPY_TYPE_MEM6:
5705 /* Here we assume that most absolute references are short. */
5706 case OPY_TYPE_MEM7:
5707 case OPY_TYPE_IMM_W:
5708 ++size;
5709 break;
5710
5711 case OPY_TYPE_IMM_L:
5712 size += 2;
5713 break;
5714
5715 default:
5716 gcc_unreachable ();
5717 }
5718
5719 if (size > 3)
5720 {
96fcacb7 5721 gcc_assert (!reload_completed);
c47b0cb4
MK
5722
5723 size = 3;
5724 }
5725
5726 return size;
5727}
5728
96fcacb7
MK
5729/* Return size of INSN as attribute enum value. */
5730enum attr_size
84034c69 5731m68k_sched_attr_size (rtx_insn *insn)
96fcacb7
MK
5732{
5733 switch (sched_get_attr_size_int (insn))
5734 {
5735 case 1:
5736 return SIZE_1;
5737
5738 case 2:
5739 return SIZE_2;
5740
5741 case 3:
5742 return SIZE_3;
5743
5744 default:
5745 gcc_unreachable ();
96fcacb7
MK
5746 }
5747}
5748
5749/* Return operand X or Y (depending on OPX_P) of INSN,
5750 if it is a MEM, or NULL overwise. */
5751static enum attr_op_type
84034c69 5752sched_get_opxy_mem_type (rtx_insn *insn, bool opx_p)
96fcacb7
MK
5753{
5754 if (opx_p)
5755 {
5756 switch (get_attr_opx_type (insn))
5757 {
5758 case OPX_TYPE_NONE:
5759 case OPX_TYPE_RN:
5760 case OPX_TYPE_FPN:
5761 case OPX_TYPE_IMM_Q:
5762 case OPX_TYPE_IMM_W:
5763 case OPX_TYPE_IMM_L:
5764 return OP_TYPE_RN;
5765
5766 case OPX_TYPE_MEM1:
5767 case OPX_TYPE_MEM234:
5768 case OPX_TYPE_MEM5:
5769 case OPX_TYPE_MEM7:
5770 return OP_TYPE_MEM1;
5771
5772 case OPX_TYPE_MEM6:
5773 return OP_TYPE_MEM6;
5774
5775 default:
5776 gcc_unreachable ();
96fcacb7
MK
5777 }
5778 }
5779 else
5780 {
5781 switch (get_attr_opy_type (insn))
5782 {
5783 case OPY_TYPE_NONE:
5784 case OPY_TYPE_RN:
5785 case OPY_TYPE_FPN:
5786 case OPY_TYPE_IMM_Q:
5787 case OPY_TYPE_IMM_W:
5788 case OPY_TYPE_IMM_L:
5789 return OP_TYPE_RN;
5790
5791 case OPY_TYPE_MEM1:
5792 case OPY_TYPE_MEM234:
5793 case OPY_TYPE_MEM5:
5794 case OPY_TYPE_MEM7:
5795 return OP_TYPE_MEM1;
5796
5797 case OPY_TYPE_MEM6:
5798 return OP_TYPE_MEM6;
5799
5800 default:
5801 gcc_unreachable ();
96fcacb7
MK
5802 }
5803 }
5804}
5805
c47b0cb4
MK
5806/* Implement op_mem attribute. */
5807enum attr_op_mem
84034c69 5808m68k_sched_attr_op_mem (rtx_insn *insn)
c47b0cb4 5809{
96fcacb7
MK
5810 enum attr_op_type opx;
5811 enum attr_op_type opy;
c47b0cb4 5812
96fcacb7
MK
5813 opx = sched_get_opxy_mem_type (insn, true);
5814 opy = sched_get_opxy_mem_type (insn, false);
c47b0cb4 5815
96fcacb7 5816 if (opy == OP_TYPE_RN && opx == OP_TYPE_RN)
c47b0cb4
MK
5817 return OP_MEM_00;
5818
96fcacb7 5819 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM1)
c47b0cb4
MK
5820 {
5821 switch (get_attr_opx_access (insn))
5822 {
5823 case OPX_ACCESS_R:
5824 return OP_MEM_10;
5825
5826 case OPX_ACCESS_W:
5827 return OP_MEM_01;
5828
5829 case OPX_ACCESS_RW:
5830 return OP_MEM_11;
5831
5832 default:
96fcacb7 5833 gcc_unreachable ();
c47b0cb4
MK
5834 }
5835 }
5836
96fcacb7 5837 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM6)
c47b0cb4
MK
5838 {
5839 switch (get_attr_opx_access (insn))
5840 {
5841 case OPX_ACCESS_R:
5842 return OP_MEM_I0;
5843
5844 case OPX_ACCESS_W:
5845 return OP_MEM_0I;
5846
5847 case OPX_ACCESS_RW:
5848 return OP_MEM_I1;
5849
5850 default:
96fcacb7 5851 gcc_unreachable ();
c47b0cb4
MK
5852 }
5853 }
5854
96fcacb7 5855 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_RN)
c47b0cb4
MK
5856 return OP_MEM_10;
5857
96fcacb7 5858 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM1)
c47b0cb4
MK
5859 {
5860 switch (get_attr_opx_access (insn))
5861 {
5862 case OPX_ACCESS_W:
5863 return OP_MEM_11;
5864
5865 default:
96fcacb7
MK
5866 gcc_assert (!reload_completed);
5867 return OP_MEM_11;
c47b0cb4
MK
5868 }
5869 }
5870
96fcacb7 5871 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM6)
c47b0cb4
MK
5872 {
5873 switch (get_attr_opx_access (insn))
5874 {
5875 case OPX_ACCESS_W:
5876 return OP_MEM_1I;
5877
5878 default:
96fcacb7
MK
5879 gcc_assert (!reload_completed);
5880 return OP_MEM_1I;
c47b0cb4
MK
5881 }
5882 }
5883
96fcacb7 5884 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_RN)
c47b0cb4
MK
5885 return OP_MEM_I0;
5886
96fcacb7 5887 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM1)
c47b0cb4
MK
5888 {
5889 switch (get_attr_opx_access (insn))
5890 {
5891 case OPX_ACCESS_W:
5892 return OP_MEM_I1;
5893
5894 default:
96fcacb7
MK
5895 gcc_assert (!reload_completed);
5896 return OP_MEM_I1;
c47b0cb4
MK
5897 }
5898 }
5899
96fcacb7
MK
5900 gcc_assert (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM6);
5901 gcc_assert (!reload_completed);
5902 return OP_MEM_I1;
c47b0cb4
MK
5903}
5904
96fcacb7
MK
5905/* Data for ColdFire V4 index bypass.
5906 Producer modifies register that is used as index in consumer with
5907 specified scale. */
5908static struct
b8c96320 5909{
96fcacb7
MK
5910 /* Producer instruction. */
5911 rtx pro;
826fadba 5912
96fcacb7
MK
5913 /* Consumer instruction. */
5914 rtx con;
b8c96320 5915
96fcacb7
MK
5916 /* Scale of indexed memory access within consumer.
5917 Or zero if bypass should not be effective at the moment. */
5918 int scale;
5919} sched_cfv4_bypass_data;
b8c96320
MK
5920
5921/* An empty state that is used in m68k_sched_adjust_cost. */
5922static state_t sched_adjust_cost_state;
5923
5924/* Implement adjust_cost scheduler hook.
5925 Return adjusted COST of dependency LINK between DEF_INSN and INSN. */
5926static int
b505225b
TS
5927m68k_sched_adjust_cost (rtx_insn *insn, int, rtx_insn *def_insn, int cost,
5928 unsigned int)
b8c96320
MK
5929{
5930 int delay;
5931
5932 if (recog_memoized (def_insn) < 0
5933 || recog_memoized (insn) < 0)
5934 return cost;
5935
96fcacb7
MK
5936 if (sched_cfv4_bypass_data.scale == 1)
5937 /* Handle ColdFire V4 bypass for indexed address with 1x scale. */
5938 {
5939 /* haifa-sched.c: insn_cost () calls bypass_p () just before
5940 targetm.sched.adjust_cost (). Hence, we can be relatively sure
5941 that the data in sched_cfv4_bypass_data is up to date. */
5942 gcc_assert (sched_cfv4_bypass_data.pro == def_insn
5943 && sched_cfv4_bypass_data.con == insn);
5944
5945 if (cost < 3)
5946 cost = 3;
5947
5948 sched_cfv4_bypass_data.pro = NULL;
5949 sched_cfv4_bypass_data.con = NULL;
5950 sched_cfv4_bypass_data.scale = 0;
5951 }
5952 else
5953 gcc_assert (sched_cfv4_bypass_data.pro == NULL
5954 && sched_cfv4_bypass_data.con == NULL
5955 && sched_cfv4_bypass_data.scale == 0);
5956
b8c96320
MK
5957 /* Don't try to issue INSN earlier than DFA permits.
5958 This is especially useful for instructions that write to memory,
5959 as their true dependence (default) latency is better to be set to 0
5960 to workaround alias analysis limitations.
5961 This is, in fact, a machine independent tweak, so, probably,
5962 it should be moved to haifa-sched.c: insn_cost (). */
b8c96320
MK
5963 delay = min_insn_conflict_delay (sched_adjust_cost_state, def_insn, insn);
5964 if (delay > cost)
5965 cost = delay;
5966
5967 return cost;
5968}
5969
96fcacb7
MK
5970/* Return maximal number of insns that can be scheduled on a single cycle. */
5971static int
5972m68k_sched_issue_rate (void)
5973{
5974 switch (m68k_sched_cpu)
5975 {
5976 case CPU_CFV1:
5977 case CPU_CFV2:
5978 case CPU_CFV3:
5979 return 1;
5980
5981 case CPU_CFV4:
5982 return 2;
5983
5984 default:
5985 gcc_unreachable ();
5986 return 0;
5987 }
5988}
5989
826fadba
MK
5990/* Maximal length of instruction for current CPU.
5991 E.g. it is 3 for any ColdFire core. */
5992static int max_insn_size;
5993
5994/* Data to model instruction buffer of CPU. */
5995struct _sched_ib
5996{
96fcacb7
MK
5997 /* True if instruction buffer model is modeled for current CPU. */
5998 bool enabled_p;
5999
826fadba
MK
6000 /* Size of the instruction buffer in words. */
6001 int size;
6002
6003 /* Number of filled words in the instruction buffer. */
6004 int filled;
6005
6006 /* Additional information about instruction buffer for CPUs that have
6007 a buffer of instruction records, rather then a plain buffer
6008 of instruction words. */
6009 struct _sched_ib_records
6010 {
6011 /* Size of buffer in records. */
6012 int n_insns;
b8c96320 6013
826fadba
MK
6014 /* Array to hold data on adjustements made to the size of the buffer. */
6015 int *adjust;
b8c96320 6016
826fadba
MK
6017 /* Index of the above array. */
6018 int adjust_index;
6019 } records;
6020
6021 /* An insn that reserves (marks empty) one word in the instruction buffer. */
6022 rtx insn;
6023};
6024
6025static struct _sched_ib sched_ib;
b8c96320
MK
6026
6027/* ID of memory unit. */
6028static int sched_mem_unit_code;
6029
6030/* Implementation of the targetm.sched.variable_issue () hook.
6031 It is called after INSN was issued. It returns the number of insns
6032 that can possibly get scheduled on the current cycle.
6033 It is used here to determine the effect of INSN on the instruction
6034 buffer. */
6035static int
6036m68k_sched_variable_issue (FILE *sched_dump ATTRIBUTE_UNUSED,
6037 int sched_verbose ATTRIBUTE_UNUSED,
ac44248e 6038 rtx_insn *insn, int can_issue_more)
b8c96320
MK
6039{
6040 int insn_size;
6041
96fcacb7 6042 if (recog_memoized (insn) >= 0 && get_attr_type (insn) != TYPE_IGNORE)
b8c96320 6043 {
826fadba
MK
6044 switch (m68k_sched_cpu)
6045 {
6046 case CPU_CFV1:
6047 case CPU_CFV2:
96fcacb7 6048 insn_size = sched_get_attr_size_int (insn);
826fadba
MK
6049 break;
6050
6051 case CPU_CFV3:
96fcacb7 6052 insn_size = sched_get_attr_size_int (insn);
826fadba
MK
6053
6054 /* ColdFire V3 and V4 cores have instruction buffers that can
6055 accumulate up to 8 instructions regardless of instructions'
6056 sizes. So we should take care not to "prefetch" 24 one-word
6057 or 12 two-words instructions.
6058 To model this behavior we temporarily decrease size of the
6059 buffer by (max_insn_size - insn_size) for next 7 instructions. */
6060 {
6061 int adjust;
6062
6063 adjust = max_insn_size - insn_size;
6064 sched_ib.size -= adjust;
6065
6066 if (sched_ib.filled > sched_ib.size)
6067 sched_ib.filled = sched_ib.size;
6068
6069 sched_ib.records.adjust[sched_ib.records.adjust_index] = adjust;
6070 }
6071
6072 ++sched_ib.records.adjust_index;
6073 if (sched_ib.records.adjust_index == sched_ib.records.n_insns)
6074 sched_ib.records.adjust_index = 0;
6075
6076 /* Undo adjustement we did 7 instructions ago. */
6077 sched_ib.size
6078 += sched_ib.records.adjust[sched_ib.records.adjust_index];
6079
6080 break;
b8c96320 6081
96fcacb7
MK
6082 case CPU_CFV4:
6083 gcc_assert (!sched_ib.enabled_p);
6084 insn_size = 0;
6085 break;
6086
826fadba
MK
6087 default:
6088 gcc_unreachable ();
6089 }
b8c96320 6090
3162fdf4
MK
6091 if (insn_size > sched_ib.filled)
6092 /* Scheduling for register pressure does not always take DFA into
6093 account. Workaround instruction buffer not being filled enough. */
6094 {
60867e8c 6095 gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED);
3162fdf4
MK
6096 insn_size = sched_ib.filled;
6097 }
6098
b8c96320
MK
6099 --can_issue_more;
6100 }
6101 else if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6102 || asm_noperands (PATTERN (insn)) >= 0)
826fadba 6103 insn_size = sched_ib.filled;
b8c96320
MK
6104 else
6105 insn_size = 0;
6106
826fadba 6107 sched_ib.filled -= insn_size;
b8c96320
MK
6108
6109 return can_issue_more;
6110}
6111
96fcacb7
MK
6112/* Return how many instructions should scheduler lookahead to choose the
6113 best one. */
6114static int
6115m68k_sched_first_cycle_multipass_dfa_lookahead (void)
b8c96320 6116{
96fcacb7 6117 return m68k_sched_issue_rate () - 1;
b8c96320
MK
6118}
6119
7ecb00a6 6120/* Implementation of targetm.sched.init_global () hook.
b8c96320
MK
6121 It is invoked once per scheduling pass and is used here
6122 to initialize scheduler constants. */
6123static void
6124m68k_sched_md_init_global (FILE *sched_dump ATTRIBUTE_UNUSED,
6125 int sched_verbose ATTRIBUTE_UNUSED,
6126 int n_insns ATTRIBUTE_UNUSED)
6127{
96fcacb7
MK
6128 /* Check that all instructions have DFA reservations and
6129 that all instructions can be issued from a clean state. */
e28c2052
MM
6130 if (flag_checking)
6131 {
6132 rtx_insn *insn;
6133 state_t state;
b8c96320 6134
e28c2052 6135 state = alloca (state_size ());
b8c96320 6136
e28c2052
MM
6137 for (insn = get_insns (); insn != NULL; insn = NEXT_INSN (insn))
6138 {
6139 if (INSN_P (insn) && recog_memoized (insn) >= 0)
6140 {
6141 gcc_assert (insn_has_dfa_reservation_p (insn));
b8c96320 6142
e28c2052
MM
6143 state_reset (state);
6144 if (state_transition (state, insn) >= 0)
6145 gcc_unreachable ();
6146 }
6147 }
6148 }
b8c96320
MK
6149
6150 /* Setup target cpu. */
96fcacb7
MK
6151
6152 /* ColdFire V4 has a set of features to keep its instruction buffer full
6153 (e.g., a separate memory bus for instructions) and, hence, we do not model
6154 buffer for this CPU. */
6155 sched_ib.enabled_p = (m68k_sched_cpu != CPU_CFV4);
6156
b8c96320
MK
6157 switch (m68k_sched_cpu)
6158 {
96fcacb7
MK
6159 case CPU_CFV4:
6160 sched_ib.filled = 0;
6161
6162 /* FALLTHRU */
6163
826fadba
MK
6164 case CPU_CFV1:
6165 case CPU_CFV2:
6166 max_insn_size = 3;
6167 sched_ib.records.n_insns = 0;
6168 sched_ib.records.adjust = NULL;
6169 break;
6170
6171 case CPU_CFV3:
6172 max_insn_size = 3;
6173 sched_ib.records.n_insns = 8;
5ead67f6 6174 sched_ib.records.adjust = XNEWVEC (int, sched_ib.records.n_insns);
b8c96320
MK
6175 break;
6176
6177 default:
6178 gcc_unreachable ();
6179 }
6180
826fadba
MK
6181 sched_mem_unit_code = get_cpu_unit_code ("cf_mem1");
6182
b8c96320
MK
6183 sched_adjust_cost_state = xmalloc (state_size ());
6184 state_reset (sched_adjust_cost_state);
6185
6186 start_sequence ();
6187 emit_insn (gen_ib ());
826fadba 6188 sched_ib.insn = get_insns ();
b8c96320
MK
6189 end_sequence ();
6190}
6191
6192/* Scheduling pass is now finished. Free/reset static variables. */
6193static void
6194m68k_sched_md_finish_global (FILE *dump ATTRIBUTE_UNUSED,
6195 int verbose ATTRIBUTE_UNUSED)
6196{
826fadba 6197 sched_ib.insn = NULL;
b8c96320
MK
6198
6199 free (sched_adjust_cost_state);
6200 sched_adjust_cost_state = NULL;
6201
6202 sched_mem_unit_code = 0;
826fadba
MK
6203
6204 free (sched_ib.records.adjust);
6205 sched_ib.records.adjust = NULL;
6206 sched_ib.records.n_insns = 0;
6207 max_insn_size = 0;
b8c96320
MK
6208}
6209
7ecb00a6 6210/* Implementation of targetm.sched.init () hook.
b8c96320
MK
6211 It is invoked each time scheduler starts on the new block (basic block or
6212 extended basic block). */
6213static void
6214m68k_sched_md_init (FILE *sched_dump ATTRIBUTE_UNUSED,
6215 int sched_verbose ATTRIBUTE_UNUSED,
6216 int n_insns ATTRIBUTE_UNUSED)
6217{
826fadba
MK
6218 switch (m68k_sched_cpu)
6219 {
6220 case CPU_CFV1:
6221 case CPU_CFV2:
6222 sched_ib.size = 6;
6223 break;
6224
6225 case CPU_CFV3:
6226 sched_ib.size = sched_ib.records.n_insns * max_insn_size;
6227
6228 memset (sched_ib.records.adjust, 0,
6229 sched_ib.records.n_insns * sizeof (*sched_ib.records.adjust));
6230 sched_ib.records.adjust_index = 0;
6231 break;
6232
96fcacb7
MK
6233 case CPU_CFV4:
6234 gcc_assert (!sched_ib.enabled_p);
6235 sched_ib.size = 0;
6236 break;
6237
826fadba
MK
6238 default:
6239 gcc_unreachable ();
6240 }
6241
96fcacb7
MK
6242 if (sched_ib.enabled_p)
6243 /* haifa-sched.c: schedule_block () calls advance_cycle () just before
6244 the first cycle. Workaround that. */
6245 sched_ib.filled = -2;
b8c96320
MK
6246}
6247
6248/* Implementation of targetm.sched.dfa_pre_advance_cycle () hook.
6249 It is invoked just before current cycle finishes and is used here
6250 to track if instruction buffer got its two words this cycle. */
6251static void
6252m68k_sched_dfa_pre_advance_cycle (void)
6253{
96fcacb7
MK
6254 if (!sched_ib.enabled_p)
6255 return;
6256
b8c96320
MK
6257 if (!cpu_unit_reservation_p (curr_state, sched_mem_unit_code))
6258 {
826fadba 6259 sched_ib.filled += 2;
b8c96320 6260
826fadba
MK
6261 if (sched_ib.filled > sched_ib.size)
6262 sched_ib.filled = sched_ib.size;
b8c96320
MK
6263 }
6264}
6265
6266/* Implementation of targetm.sched.dfa_post_advance_cycle () hook.
6267 It is invoked just after new cycle begins and is used here
6268 to setup number of filled words in the instruction buffer so that
6269 instructions which won't have all their words prefetched would be
6270 stalled for a cycle. */
6271static void
6272m68k_sched_dfa_post_advance_cycle (void)
6273{
6274 int i;
b8c96320 6275
96fcacb7
MK
6276 if (!sched_ib.enabled_p)
6277 return;
6278
b8c96320
MK
6279 /* Setup number of prefetched instruction words in the instruction
6280 buffer. */
826fadba
MK
6281 i = max_insn_size - sched_ib.filled;
6282
6283 while (--i >= 0)
b8c96320 6284 {
826fadba 6285 if (state_transition (curr_state, sched_ib.insn) >= 0)
5f3b7d7c
MK
6286 /* Pick up scheduler state. */
6287 ++sched_ib.filled;
b8c96320
MK
6288 }
6289}
96fcacb7
MK
6290
6291/* Return X or Y (depending on OPX_P) operand of INSN,
6292 if it is an integer register, or NULL overwise. */
6293static rtx
647d790d 6294sched_get_reg_operand (rtx_insn *insn, bool opx_p)
96fcacb7
MK
6295{
6296 rtx op = NULL;
6297
6298 if (opx_p)
6299 {
6300 if (get_attr_opx_type (insn) == OPX_TYPE_RN)
6301 {
6302 op = sched_get_operand (insn, true);
6303 gcc_assert (op != NULL);
6304
6305 if (!reload_completed && !REG_P (op))
6306 return NULL;
6307 }
6308 }
6309 else
6310 {
6311 if (get_attr_opy_type (insn) == OPY_TYPE_RN)
6312 {
6313 op = sched_get_operand (insn, false);
6314 gcc_assert (op != NULL);
6315
6316 if (!reload_completed && !REG_P (op))
6317 return NULL;
6318 }
6319 }
6320
6321 return op;
6322}
6323
6324/* Return true, if X or Y (depending on OPX_P) operand of INSN
6325 is a MEM. */
6326static bool
84034c69 6327sched_mem_operand_p (rtx_insn *insn, bool opx_p)
96fcacb7
MK
6328{
6329 switch (sched_get_opxy_mem_type (insn, opx_p))
6330 {
6331 case OP_TYPE_MEM1:
6332 case OP_TYPE_MEM6:
6333 return true;
6334
6335 default:
6336 return false;
6337 }
6338}
6339
6340/* Return X or Y (depending on OPX_P) operand of INSN,
6341 if it is a MEM, or NULL overwise. */
6342static rtx
647d790d 6343sched_get_mem_operand (rtx_insn *insn, bool must_read_p, bool must_write_p)
96fcacb7
MK
6344{
6345 bool opx_p;
6346 bool opy_p;
6347
6348 opx_p = false;
6349 opy_p = false;
6350
6351 if (must_read_p)
6352 {
6353 opx_p = true;
6354 opy_p = true;
6355 }
6356
6357 if (must_write_p)
6358 {
6359 opx_p = true;
6360 opy_p = false;
6361 }
6362
6363 if (opy_p && sched_mem_operand_p (insn, false))
6364 return sched_get_operand (insn, false);
6365
6366 if (opx_p && sched_mem_operand_p (insn, true))
6367 return sched_get_operand (insn, true);
6368
6369 gcc_unreachable ();
6370 return NULL;
6371}
6372
6373/* Return non-zero if PRO modifies register used as part of
6374 address in CON. */
6375int
647d790d 6376m68k_sched_address_bypass_p (rtx_insn *pro, rtx_insn *con)
96fcacb7
MK
6377{
6378 rtx pro_x;
6379 rtx con_mem_read;
6380
6381 pro_x = sched_get_reg_operand (pro, true);
6382 if (pro_x == NULL)
6383 return 0;
6384
6385 con_mem_read = sched_get_mem_operand (con, true, false);
6386 gcc_assert (con_mem_read != NULL);
6387
6388 if (reg_mentioned_p (pro_x, con_mem_read))
6389 return 1;
6390
6391 return 0;
6392}
6393
6394/* Helper function for m68k_sched_indexed_address_bypass_p.
6395 if PRO modifies register used as index in CON,
6396 return scale of indexed memory access in CON. Return zero overwise. */
6397static int
647d790d 6398sched_get_indexed_address_scale (rtx_insn *pro, rtx_insn *con)
96fcacb7
MK
6399{
6400 rtx reg;
6401 rtx mem;
6402 struct m68k_address address;
6403
6404 reg = sched_get_reg_operand (pro, true);
6405 if (reg == NULL)
6406 return 0;
6407
6408 mem = sched_get_mem_operand (con, true, false);
6409 gcc_assert (mem != NULL && MEM_P (mem));
6410
6411 if (!m68k_decompose_address (GET_MODE (mem), XEXP (mem, 0), reload_completed,
6412 &address))
6413 gcc_unreachable ();
6414
6415 if (REGNO (reg) == REGNO (address.index))
6416 {
6417 gcc_assert (address.scale != 0);
6418 return address.scale;
6419 }
6420
6421 return 0;
6422}
6423
6424/* Return non-zero if PRO modifies register used
6425 as index with scale 2 or 4 in CON. */
6426int
647d790d 6427m68k_sched_indexed_address_bypass_p (rtx_insn *pro, rtx_insn *con)
96fcacb7
MK
6428{
6429 gcc_assert (sched_cfv4_bypass_data.pro == NULL
6430 && sched_cfv4_bypass_data.con == NULL
6431 && sched_cfv4_bypass_data.scale == 0);
6432
6433 switch (sched_get_indexed_address_scale (pro, con))
6434 {
6435 case 1:
6436 /* We can't have a variable latency bypass, so
6437 remember to adjust the insn cost in adjust_cost hook. */
6438 sched_cfv4_bypass_data.pro = pro;
6439 sched_cfv4_bypass_data.con = con;
6440 sched_cfv4_bypass_data.scale = 1;
6441 return 0;
6442
6443 case 2:
6444 case 4:
6445 return 1;
6446
6447 default:
6448 return 0;
6449 }
6450}
75df395f 6451
e0601576
RH
6452/* We generate a two-instructions program at M_TRAMP :
6453 movea.l &CHAIN_VALUE,%a0
6454 jmp FNADDR
6455 where %a0 can be modified by changing STATIC_CHAIN_REGNUM. */
6456
6457static void
6458m68k_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
6459{
6460 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
6461 rtx mem;
6462
6463 gcc_assert (ADDRESS_REGNO_P (STATIC_CHAIN_REGNUM));
6464
6465 mem = adjust_address (m_tramp, HImode, 0);
6466 emit_move_insn (mem, GEN_INT(0x207C + ((STATIC_CHAIN_REGNUM-8) << 9)));
6467 mem = adjust_address (m_tramp, SImode, 2);
6468 emit_move_insn (mem, chain_value);
6469
6470 mem = adjust_address (m_tramp, HImode, 6);
6471 emit_move_insn (mem, GEN_INT(0x4EF9));
6472 mem = adjust_address (m_tramp, SImode, 8);
6473 emit_move_insn (mem, fnaddr);
6474
6475 FINALIZE_TRAMPOLINE (XEXP (m_tramp, 0));
6476}
6477
079e7538
NF
6478/* On the 68000, the RTS insn cannot pop anything.
6479 On the 68010, the RTD insn may be used to pop them if the number
6480 of args is fixed, but if the number is variable then the caller
6481 must pop them all. RTD can't be used for library calls now
6482 because the library is compiled with the Unix compiler.
6483 Use of RTD is a selectable option, since it is incompatible with
6484 standard Unix calling sequences. If the option is not selected,
6485 the caller must always pop the args. */
6486
6487static int
6488m68k_return_pops_args (tree fundecl, tree funtype, int size)
6489{
6490 return ((TARGET_RTD
6491 && (!fundecl
6492 || TREE_CODE (fundecl) != IDENTIFIER_NODE)
f38958e8 6493 && (!stdarg_p (funtype)))
079e7538
NF
6494 ? size : 0);
6495}
6496
5efd84c5
NF
6497/* Make sure everything's fine if we *don't* have a given processor.
6498 This assumes that putting a register in fixed_regs will keep the
6499 compiler's mitts completely off it. We don't bother to zero it out
6500 of register classes. */
6501
6502static void
6503m68k_conditional_register_usage (void)
6504{
6505 int i;
6506 HARD_REG_SET x;
6507 if (!TARGET_HARD_FLOAT)
6508 {
6509 COPY_HARD_REG_SET (x, reg_class_contents[(int)FP_REGS]);
6510 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
6511 if (TEST_HARD_REG_BIT (x, i))
6512 fixed_regs[i] = call_used_regs[i] = 1;
6513 }
6514 if (flag_pic)
6515 fixed_regs[PIC_REG] = call_used_regs[PIC_REG] = 1;
6516}
6517
8b281334
RH
6518static void
6519m68k_init_sync_libfuncs (void)
6520{
6521 init_sync_libfuncs (UNITS_PER_WORD);
6522}
6523
175aed00
AS
6524/* Implements EPILOGUE_USES. All registers are live on exit from an
6525 interrupt routine. */
6526bool
6527m68k_epilogue_uses (int regno ATTRIBUTE_UNUSED)
6528{
6529 return (reload_completed
6530 && (m68k_get_function_kind (current_function_decl)
6531 == m68k_fk_interrupt_handler));
6532}
6533
75df395f 6534#include "gt-m68k.h"