]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/m68k/m68k.c
* rtl-factoring.c (collect_patterns_seqs): Handle CC0 targets.
[thirdparty/gcc.git] / gcc / config / m68k / m68k.c
CommitLineData
79e68feb 1/* Subroutines for insn-output.c for Motorola 68000 family.
8636be86 2 Copyright (C) 1987, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
b6a9c30c 3 2001, 2003, 2004, 2005, 2006, 2007
4592bdcb 4 Free Software Foundation, Inc.
79e68feb 5
7ec022b2 6This file is part of GCC.
79e68feb 7
7ec022b2 8GCC is free software; you can redistribute it and/or modify
79e68feb 9it under the terms of the GNU General Public License as published by
2f83c7d6 10the Free Software Foundation; either version 3, or (at your option)
79e68feb
RS
11any later version.
12
7ec022b2 13GCC is distributed in the hope that it will be useful,
79e68feb
RS
14but WITHOUT ANY WARRANTY; without even the implied warranty of
15MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16GNU General Public License for more details.
17
18You should have received a copy of the GNU General Public License
2f83c7d6
NC
19along with GCC; see the file COPYING3. If not see
20<http://www.gnu.org/licenses/>. */
79e68feb 21
79e68feb 22#include "config.h"
f5220a5d 23#include "system.h"
4977bab6
ZW
24#include "coretypes.h"
25#include "tm.h"
da932f04 26#include "tree.h"
79e68feb 27#include "rtl.h"
49ad7cfa 28#include "function.h"
79e68feb
RS
29#include "regs.h"
30#include "hard-reg-set.h"
31#include "real.h"
32#include "insn-config.h"
33#include "conditions.h"
79e68feb
RS
34#include "output.h"
35#include "insn-attr.h"
1d8eaa6b 36#include "recog.h"
f5220a5d 37#include "toplev.h"
6d5f49b2
RH
38#include "expr.h"
39#include "reload.h"
5505f548 40#include "tm_p.h"
672a6f42
NB
41#include "target.h"
42#include "target-def.h"
2cc07db4 43#include "debug.h"
79e68feb 44#include "flags.h"
6fb5fa3c 45#include "df.h"
b8c96320
MK
46/* ??? Need to add a dependency between m68k.o and sched-int.h. */
47#include "sched-int.h"
48#include "insn-codes.h"
79e68feb 49
a4e9467d
RZ
50enum reg_class regno_reg_class[] =
51{
52 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
53 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
54 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
55 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
56 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
57 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
58 ADDR_REGS
59};
60
61
a40ed0f3
KH
62/* The minimum number of integer registers that we want to save with the
63 movem instruction. Using two movel instructions instead of a single
64 moveml is about 15% faster for the 68020 and 68030 at no expense in
65 code size. */
66#define MIN_MOVEM_REGS 3
67
68/* The minimum number of floating point registers that we want to save
69 with the fmovem instruction. */
70#define MIN_FMOVEM_REGS 1
71
ff482c8d 72/* Structure describing stack frame layout. */
3d74bc09
BI
73struct m68k_frame
74{
75 /* Stack pointer to frame pointer offset. */
48ed72a4 76 HOST_WIDE_INT offset;
3d74bc09
BI
77
78 /* Offset of FPU registers. */
79 HOST_WIDE_INT foffset;
80
81 /* Frame size in bytes (rounded up). */
48ed72a4 82 HOST_WIDE_INT size;
3d74bc09
BI
83
84 /* Data and address register. */
48ed72a4
PB
85 int reg_no;
86 unsigned int reg_mask;
3d74bc09
BI
87
88 /* FPU registers. */
48ed72a4
PB
89 int fpu_no;
90 unsigned int fpu_mask;
3d74bc09
BI
91
92 /* Offsets relative to ARG_POINTER. */
48ed72a4
PB
93 HOST_WIDE_INT frame_pointer_offset;
94 HOST_WIDE_INT stack_pointer_offset;
3d74bc09
BI
95
96 /* Function which the above information refers to. */
97 int funcdef_no;
48ed72a4
PB
98};
99
3d74bc09
BI
100/* Current frame information calculated by m68k_compute_frame_layout(). */
101static struct m68k_frame current_frame;
102
fc2241eb
RS
103/* Structure describing an m68k address.
104
105 If CODE is UNKNOWN, the address is BASE + INDEX * SCALE + OFFSET,
106 with null fields evaluating to 0. Here:
107
108 - BASE satisfies m68k_legitimate_base_reg_p
109 - INDEX satisfies m68k_legitimate_index_reg_p
110 - OFFSET satisfies m68k_legitimate_constant_address_p
111
112 INDEX is either HImode or SImode. The other fields are SImode.
113
114 If CODE is PRE_DEC, the address is -(BASE). If CODE is POST_INC,
115 the address is (BASE)+. */
116struct m68k_address {
117 enum rtx_code code;
118 rtx base;
119 rtx index;
120 rtx offset;
121 int scale;
122};
123
b8c96320
MK
124static int m68k_sched_adjust_cost (rtx, rtx, rtx, int);
125static int m68k_sched_variable_issue (FILE *, int, rtx, int);
126static void m68k_sched_md_init_global (FILE *, int, int);
127static void m68k_sched_md_finish_global (FILE *, int);
128static void m68k_sched_md_init (FILE *, int, int);
129static void m68k_sched_dfa_pre_advance_cycle (void);
130static void m68k_sched_dfa_post_advance_cycle (void);
131
4af06170 132static bool m68k_handle_option (size_t, const char *, int);
8a4a2253
BI
133static rtx find_addr_reg (rtx);
134static const char *singlemove_string (rtx *);
45849738 135#ifdef M68K_TARGET_COFF
c18a5b6c 136static void m68k_coff_asm_named_section (const char *, unsigned int, tree);
45849738 137#endif /* M68K_TARGET_COFF */
8a4a2253
BI
138static void m68k_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
139 HOST_WIDE_INT, tree);
8636be86 140static rtx m68k_struct_value_rtx (tree, int);
48ed72a4
PB
141static tree m68k_handle_fndecl_attribute (tree *node, tree name,
142 tree args, int flags,
143 bool *no_add_attrs);
3d74bc09 144static void m68k_compute_frame_layout (void);
48ed72a4 145static bool m68k_save_reg (unsigned int regno, bool interrupt_handler);
f7e70894 146static bool m68k_ok_for_sibcall_p (tree, tree);
8a4a2253 147static bool m68k_rtx_costs (rtx, int, int, int *);
1c445f03
NS
148#if M68K_HONOR_TARGET_STRICT_ALIGNMENT
149static bool m68k_return_in_memory (tree, tree);
150#endif
79e68feb
RS
151\f
152
a2ef3db7 153/* Specify the identification number of the library being built */
4af06170 154const char *m68k_library_id_string = "_current_shared_library_a5_offset_";
ef1dbfb0 155
2b3600ac
JL
156/* Nonzero if the last compare/test insn had FP operands. The
157 sCC expanders peek at this to determine what to do for the
158 68060, which has no fsCC instructions. */
159int m68k_last_compare_had_fp_operands;
672a6f42
NB
160\f
161/* Initialize the GCC target structure. */
301d03af
RS
162
163#if INT_OP_GROUP == INT_OP_DOT_WORD
164#undef TARGET_ASM_ALIGNED_HI_OP
165#define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
166#endif
167
168#if INT_OP_GROUP == INT_OP_NO_DOT
169#undef TARGET_ASM_BYTE_OP
170#define TARGET_ASM_BYTE_OP "\tbyte\t"
171#undef TARGET_ASM_ALIGNED_HI_OP
172#define TARGET_ASM_ALIGNED_HI_OP "\tshort\t"
173#undef TARGET_ASM_ALIGNED_SI_OP
174#define TARGET_ASM_ALIGNED_SI_OP "\tlong\t"
175#endif
176
177#if INT_OP_GROUP == INT_OP_DC
178#undef TARGET_ASM_BYTE_OP
179#define TARGET_ASM_BYTE_OP "\tdc.b\t"
180#undef TARGET_ASM_ALIGNED_HI_OP
181#define TARGET_ASM_ALIGNED_HI_OP "\tdc.w\t"
182#undef TARGET_ASM_ALIGNED_SI_OP
183#define TARGET_ASM_ALIGNED_SI_OP "\tdc.l\t"
184#endif
185
186#undef TARGET_ASM_UNALIGNED_HI_OP
187#define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
188#undef TARGET_ASM_UNALIGNED_SI_OP
189#define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
190
c590b625
RH
191#undef TARGET_ASM_OUTPUT_MI_THUNK
192#define TARGET_ASM_OUTPUT_MI_THUNK m68k_output_mi_thunk
bdabc150 193#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
3101faab 194#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
c590b625 195
1bc7c5b6
ZW
196#undef TARGET_ASM_FILE_START_APP_OFF
197#define TARGET_ASM_FILE_START_APP_OFF true
198
b8c96320
MK
199#undef TARGET_SCHED_ADJUST_COST
200#define TARGET_SCHED_ADJUST_COST m68k_sched_adjust_cost
201
202#undef TARGET_SCHED_VARIABLE_ISSUE
203#define TARGET_SCHED_VARIABLE_ISSUE m68k_sched_variable_issue
204
205#undef TARGET_SCHED_INIT_GLOBAL
206#define TARGET_SCHED_INIT_GLOBAL m68k_sched_md_init_global
207
208#undef TARGET_SCHED_FINISH_GLOBAL
209#define TARGET_SCHED_FINISH_GLOBAL m68k_sched_md_finish_global
210
211#undef TARGET_SCHED_INIT
212#define TARGET_SCHED_INIT m68k_sched_md_init
213
214#undef TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE
215#define TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE m68k_sched_dfa_pre_advance_cycle
216
217#undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
218#define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE m68k_sched_dfa_post_advance_cycle
219
4af06170
RS
220#undef TARGET_HANDLE_OPTION
221#define TARGET_HANDLE_OPTION m68k_handle_option
222
3c50106f
RH
223#undef TARGET_RTX_COSTS
224#define TARGET_RTX_COSTS m68k_rtx_costs
225
48ed72a4
PB
226#undef TARGET_ATTRIBUTE_TABLE
227#define TARGET_ATTRIBUTE_TABLE m68k_attribute_table
228
8636be86 229#undef TARGET_PROMOTE_PROTOTYPES
586de218 230#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
8636be86
KH
231
232#undef TARGET_STRUCT_VALUE_RTX
233#define TARGET_STRUCT_VALUE_RTX m68k_struct_value_rtx
234
7ffb5e78
RS
235#undef TARGET_CANNOT_FORCE_CONST_MEM
236#define TARGET_CANNOT_FORCE_CONST_MEM m68k_illegitimate_symbolic_constant_p
237
f7e70894
RS
238#undef TARGET_FUNCTION_OK_FOR_SIBCALL
239#define TARGET_FUNCTION_OK_FOR_SIBCALL m68k_ok_for_sibcall_p
240
1c445f03
NS
241#if M68K_HONOR_TARGET_STRICT_ALIGNMENT
242#undef TARGET_RETURN_IN_MEMORY
243#define TARGET_RETURN_IN_MEMORY m68k_return_in_memory
244#endif
245
48ed72a4
PB
246static const struct attribute_spec m68k_attribute_table[] =
247{
248 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2bccb817 249 { "interrupt", 0, 0, true, false, false, m68k_handle_fndecl_attribute },
48ed72a4 250 { "interrupt_handler", 0, 0, true, false, false, m68k_handle_fndecl_attribute },
a4242737 251 { "interrupt_thread", 0, 0, true, false, false, m68k_handle_fndecl_attribute },
48ed72a4
PB
252 { NULL, 0, 0, false, false, false, NULL }
253};
254
f6897b10 255struct gcc_target targetm = TARGET_INITIALIZER;
672a6f42 256\f
900ec02d
JB
257/* Base flags for 68k ISAs. */
258#define FL_FOR_isa_00 FL_ISA_68000
259#define FL_FOR_isa_10 (FL_FOR_isa_00 | FL_ISA_68010)
260/* FL_68881 controls the default setting of -m68881. gcc has traditionally
261 generated 68881 code for 68020 and 68030 targets unless explicitly told
262 not to. */
263#define FL_FOR_isa_20 (FL_FOR_isa_10 | FL_ISA_68020 \
264 | FL_BITFIELD | FL_68881)
265#define FL_FOR_isa_40 (FL_FOR_isa_20 | FL_ISA_68040)
266#define FL_FOR_isa_cpu32 (FL_FOR_isa_10 | FL_ISA_68020)
267
268/* Base flags for ColdFire ISAs. */
269#define FL_FOR_isa_a (FL_COLDFIRE | FL_ISA_A)
270#define FL_FOR_isa_aplus (FL_FOR_isa_a | FL_ISA_APLUS | FL_CF_USP)
271/* Note ISA_B doesn't necessarily include USP (user stack pointer) support. */
272#define FL_FOR_isa_b (FL_FOR_isa_a | FL_ISA_B | FL_CF_HWDIV)
4e2b26aa 273/* ISA_C is not upwardly compatible with ISA_B. */
8c5c99dc 274#define FL_FOR_isa_c (FL_FOR_isa_a | FL_ISA_C | FL_CF_USP)
900ec02d
JB
275
276enum m68k_isa
277{
278 /* Traditional 68000 instruction sets. */
279 isa_00,
280 isa_10,
281 isa_20,
282 isa_40,
283 isa_cpu32,
284 /* ColdFire instruction set variants. */
285 isa_a,
286 isa_aplus,
287 isa_b,
288 isa_c,
289 isa_max
290};
291
292/* Information about one of the -march, -mcpu or -mtune arguments. */
293struct m68k_target_selection
294{
295 /* The argument being described. */
296 const char *name;
297
298 /* For -mcpu, this is the device selected by the option.
299 For -mtune and -march, it is a representative device
300 for the microarchitecture or ISA respectively. */
301 enum target_device device;
302
303 /* The M68K_DEVICE fields associated with DEVICE. See the comment
304 in m68k-devices.def for details. FAMILY is only valid for -mcpu. */
305 const char *family;
306 enum uarch_type microarch;
307 enum m68k_isa isa;
308 unsigned long flags;
309};
310
311/* A list of all devices in m68k-devices.def. Used for -mcpu selection. */
312static const struct m68k_target_selection all_devices[] =
313{
314#define M68K_DEVICE(NAME,ENUM_VALUE,FAMILY,MULTILIB,MICROARCH,ISA,FLAGS) \
315 { NAME, ENUM_VALUE, FAMILY, u##MICROARCH, ISA, FLAGS | FL_FOR_##ISA },
316#include "m68k-devices.def"
317#undef M68K_DEVICE
318 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
319};
320
321/* A list of all ISAs, mapping each one to a representative device.
322 Used for -march selection. */
323static const struct m68k_target_selection all_isas[] =
324{
325 { "68000", m68000, NULL, u68000, isa_00, FL_FOR_isa_00 },
326 { "68010", m68010, NULL, u68010, isa_10, FL_FOR_isa_10 },
327 { "68020", m68020, NULL, u68020, isa_20, FL_FOR_isa_20 },
328 { "68030", m68030, NULL, u68030, isa_20, FL_FOR_isa_20 },
329 { "68040", m68040, NULL, u68040, isa_40, FL_FOR_isa_40 },
330 { "68060", m68060, NULL, u68060, isa_40, FL_FOR_isa_40 },
331 { "cpu32", cpu32, NULL, ucpu32, isa_20, FL_FOR_isa_cpu32 },
332 { "isaa", mcf5206e, NULL, ucfv2, isa_a, (FL_FOR_isa_a
333 | FL_CF_HWDIV) },
334 { "isaaplus", mcf5271, NULL, ucfv2, isa_aplus, (FL_FOR_isa_aplus
335 | FL_CF_HWDIV) },
336 { "isab", mcf5407, NULL, ucfv4, isa_b, FL_FOR_isa_b },
8c5c99dc
MK
337 { "isac", unk_device, NULL, ucfv4, isa_c, (FL_FOR_isa_c
338 | FL_CF_HWDIV) },
900ec02d
JB
339 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
340};
341
342/* A list of all microarchitectures, mapping each one to a representative
343 device. Used for -mtune selection. */
344static const struct m68k_target_selection all_microarchs[] =
345{
346 { "68000", m68000, NULL, u68000, isa_00, FL_FOR_isa_00 },
347 { "68010", m68010, NULL, u68010, isa_10, FL_FOR_isa_10 },
348 { "68020", m68020, NULL, u68020, isa_20, FL_FOR_isa_20 },
349 { "68020-40", m68020, NULL, u68020_40, isa_20, FL_FOR_isa_20 },
350 { "68020-60", m68020, NULL, u68020_60, isa_20, FL_FOR_isa_20 },
351 { "68030", m68030, NULL, u68030, isa_20, FL_FOR_isa_20 },
352 { "68040", m68040, NULL, u68040, isa_40, FL_FOR_isa_40 },
353 { "68060", m68060, NULL, u68060, isa_40, FL_FOR_isa_40 },
354 { "cpu32", cpu32, NULL, ucpu32, isa_20, FL_FOR_isa_cpu32 },
8c5c99dc 355 { "cfv1", mcf51qe, NULL, ucfv1, isa_c, FL_FOR_isa_c },
900ec02d
JB
356 { "cfv2", mcf5206, NULL, ucfv2, isa_a, FL_FOR_isa_a },
357 { "cfv3", mcf5307, NULL, ucfv3, isa_a, (FL_FOR_isa_a
358 | FL_CF_HWDIV) },
359 { "cfv4", mcf5407, NULL, ucfv4, isa_b, FL_FOR_isa_b },
360 { "cfv4e", mcf547x, NULL, ucfv4e, isa_b, (FL_FOR_isa_b
361 | FL_CF_USP
362 | FL_CF_EMAC
363 | FL_CF_FPU) },
364 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
365};
366\f
367/* The entries associated with the -mcpu, -march and -mtune settings,
368 or null for options that have not been used. */
369const struct m68k_target_selection *m68k_cpu_entry;
370const struct m68k_target_selection *m68k_arch_entry;
371const struct m68k_target_selection *m68k_tune_entry;
372
373/* Which CPU we are generating code for. */
374enum target_device m68k_cpu;
375
376/* Which microarchitecture to tune for. */
377enum uarch_type m68k_tune;
378
379/* Which FPU to use. */
380enum fpu_type m68k_fpu;
4af06170 381
900ec02d
JB
382/* The set of FL_* flags that apply to the target processor. */
383unsigned int m68k_cpu_flags;
29ca003a
RS
384
385/* Asm templates for calling or jumping to an arbitrary symbolic address,
386 or NULL if such calls or jumps are not supported. The address is held
387 in operand 0. */
388const char *m68k_symbolic_call;
389const char *m68k_symbolic_jump;
c47b0cb4
MK
390
391/* Enum variable that corresponds to m68k_symbolic_call values. */
392enum M68K_SYMBOLIC_CALL m68k_symbolic_call_var;
393
900ec02d
JB
394\f
395/* See whether TABLE has an entry with name NAME. Return true and
396 store the entry in *ENTRY if so, otherwise return false and
397 leave *ENTRY alone. */
398
399static bool
400m68k_find_selection (const struct m68k_target_selection **entry,
401 const struct m68k_target_selection *table,
402 const char *name)
403{
404 size_t i;
405
406 for (i = 0; table[i].name; i++)
407 if (strcmp (table[i].name, name) == 0)
408 {
409 *entry = table + i;
410 return true;
411 }
412 return false;
413}
4af06170
RS
414
415/* Implement TARGET_HANDLE_OPTION. */
416
417static bool
418m68k_handle_option (size_t code, const char *arg, int value)
419{
420 switch (code)
421 {
900ec02d
JB
422 case OPT_march_:
423 return m68k_find_selection (&m68k_arch_entry, all_isas, arg);
424
425 case OPT_mcpu_:
426 return m68k_find_selection (&m68k_cpu_entry, all_devices, arg);
427
428 case OPT_mtune_:
429 return m68k_find_selection (&m68k_tune_entry, all_microarchs, arg);
430
4af06170 431 case OPT_m5200:
900ec02d 432 return m68k_find_selection (&m68k_cpu_entry, all_devices, "5206");
4af06170
RS
433
434 case OPT_m5206e:
900ec02d 435 return m68k_find_selection (&m68k_cpu_entry, all_devices, "5206e");
4af06170
RS
436
437 case OPT_m528x:
900ec02d 438 return m68k_find_selection (&m68k_cpu_entry, all_devices, "528x");
4af06170
RS
439
440 case OPT_m5307:
900ec02d 441 return m68k_find_selection (&m68k_cpu_entry, all_devices, "5307");
4af06170
RS
442
443 case OPT_m5407:
900ec02d 444 return m68k_find_selection (&m68k_cpu_entry, all_devices, "5407");
4af06170 445
dcc21c4c 446 case OPT_mcfv4e:
900ec02d 447 return m68k_find_selection (&m68k_cpu_entry, all_devices, "547x");
dcc21c4c 448
4af06170
RS
449 case OPT_m68000:
450 case OPT_mc68000:
900ec02d 451 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68000");
4af06170 452
3197c489 453 case OPT_m68010:
900ec02d 454 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68010");
3197c489 455
4af06170
RS
456 case OPT_m68020:
457 case OPT_mc68020:
900ec02d 458 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68020");
4af06170
RS
459
460 case OPT_m68020_40:
900ec02d
JB
461 return (m68k_find_selection (&m68k_tune_entry, all_microarchs,
462 "68020-40")
463 && m68k_find_selection (&m68k_cpu_entry, all_devices, "68020"));
4af06170
RS
464
465 case OPT_m68020_60:
900ec02d
JB
466 return (m68k_find_selection (&m68k_tune_entry, all_microarchs,
467 "68020-60")
468 && m68k_find_selection (&m68k_cpu_entry, all_devices, "68020"));
4af06170
RS
469
470 case OPT_m68030:
900ec02d 471 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68030");
4af06170
RS
472
473 case OPT_m68040:
900ec02d 474 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68040");
4af06170
RS
475
476 case OPT_m68060:
900ec02d 477 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68060");
4af06170
RS
478
479 case OPT_m68302:
900ec02d 480 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68302");
4af06170
RS
481
482 case OPT_m68332:
483 case OPT_mcpu32:
900ec02d 484 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68332");
4af06170
RS
485
486 case OPT_mshared_library_id_:
487 if (value > MAX_LIBRARY_ID)
488 error ("-mshared-library-id=%s is not between 0 and %d",
489 arg, MAX_LIBRARY_ID);
490 else
491 asprintf ((char **) &m68k_library_id_string, "%d", (value * -4) - 4);
492 return true;
493
494 default:
495 return true;
496 }
497}
498
ef1dbfb0
RK
499/* Sometimes certain combinations of command options do not make
500 sense on a particular target machine. You can define a macro
501 `OVERRIDE_OPTIONS' to take account of this. This macro, if
502 defined, is executed once just after all the command options have
503 been parsed.
504
505 Don't use this macro to turn on various extra optimizations for
506 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
507
508void
8a4a2253 509override_options (void)
ef1dbfb0 510{
900ec02d
JB
511 const struct m68k_target_selection *entry;
512 unsigned long target_mask;
513
514 /* User can choose:
515
516 -mcpu=
517 -march=
518 -mtune=
519
520 -march=ARCH should generate code that runs any processor
521 implementing architecture ARCH. -mcpu=CPU should override -march
522 and should generate code that runs on processor CPU, making free
523 use of any instructions that CPU understands. -mtune=UARCH applies
9f5ed61a 524 on top of -mcpu or -march and optimizes the code for UARCH. It does
900ec02d
JB
525 not change the target architecture. */
526 if (m68k_cpu_entry)
527 {
528 /* Complain if the -march setting is for a different microarchitecture,
529 or includes flags that the -mcpu setting doesn't. */
530 if (m68k_arch_entry
531 && (m68k_arch_entry->microarch != m68k_cpu_entry->microarch
532 || (m68k_arch_entry->flags & ~m68k_cpu_entry->flags) != 0))
533 warning (0, "-mcpu=%s conflicts with -march=%s",
534 m68k_cpu_entry->name, m68k_arch_entry->name);
535
536 entry = m68k_cpu_entry;
537 }
538 else
539 entry = m68k_arch_entry;
540
541 if (!entry)
542 entry = all_devices + TARGET_CPU_DEFAULT;
543
544 m68k_cpu_flags = entry->flags;
545
546 /* Use the architecture setting to derive default values for
547 certain flags. */
548 target_mask = 0;
8785d88c
KH
549
550 /* ColdFire is lenient about alignment. */
551 if (!TARGET_COLDFIRE)
552 target_mask |= MASK_STRICT_ALIGNMENT;
553
900ec02d
JB
554 if ((m68k_cpu_flags & FL_BITFIELD) != 0)
555 target_mask |= MASK_BITFIELD;
556 if ((m68k_cpu_flags & FL_CF_HWDIV) != 0)
557 target_mask |= MASK_CF_HWDIV;
558 if ((m68k_cpu_flags & (FL_68881 | FL_CF_FPU)) != 0)
559 target_mask |= MASK_HARD_FLOAT;
560 target_flags |= target_mask & ~target_flags_explicit;
561
562 /* Set the directly-usable versions of the -mcpu and -mtune settings. */
563 m68k_cpu = entry->device;
564 if (m68k_tune_entry)
565 m68k_tune = m68k_tune_entry->microarch;
566#ifdef M68K_DEFAULT_TUNE
567 else if (!m68k_cpu_entry && !m68k_arch_entry)
568 m68k_tune = M68K_DEFAULT_TUNE;
569#endif
570 else
571 m68k_tune = entry->microarch;
572
573 /* Set the type of FPU. */
574 m68k_fpu = (!TARGET_HARD_FLOAT ? FPUTYPE_NONE
575 : (m68k_cpu_flags & FL_COLDFIRE) != 0 ? FPUTYPE_COLDFIRE
576 : FPUTYPE_68881);
577
a2ef3db7
BI
578 /* Sanity check to ensure that msep-data and mid-sahred-library are not
579 * both specified together. Doing so simply doesn't make sense.
580 */
581 if (TARGET_SEP_DATA && TARGET_ID_SHARED_LIBRARY)
582 error ("cannot specify both -msep-data and -mid-shared-library");
583
584 /* If we're generating code for a separate A5 relative data segment,
585 * we've got to enable -fPIC as well. This might be relaxable to
586 * -fpic but it hasn't been tested properly.
587 */
588 if (TARGET_SEP_DATA || TARGET_ID_SHARED_LIBRARY)
589 flag_pic = 2;
590
abe92a04
RS
591 /* -mpcrel -fPIC uses 32-bit pc-relative displacements. Raise an
592 error if the target does not support them. */
593 if (TARGET_PCREL && !TARGET_68020 && flag_pic == 2)
594 error ("-mpcrel -fPIC is not currently supported on selected cpu");
adf2ac37
RH
595
596 /* ??? A historic way of turning on pic, or is this intended to
597 be an embedded thing that doesn't have the same name binding
598 significance that it does on hosted ELF systems? */
599 if (TARGET_PCREL && flag_pic == 0)
600 flag_pic = 1;
601
29ca003a
RS
602 if (!flag_pic)
603 {
c47b0cb4
MK
604 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_JSR;
605
29ca003a 606 m68k_symbolic_jump = "jra %a0";
29ca003a
RS
607 }
608 else if (TARGET_ID_SHARED_LIBRARY)
609 /* All addresses must be loaded from the GOT. */
610 ;
4e2b26aa 611 else if (TARGET_68020 || TARGET_ISAB || TARGET_ISAC)
29ca003a
RS
612 {
613 if (TARGET_PCREL)
c47b0cb4 614 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_C;
4e2b26aa 615 else
c47b0cb4
MK
616 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_P;
617
4e2b26aa
NS
618 if (TARGET_ISAC)
619 /* No unconditional long branch */;
620 else if (TARGET_PCREL)
da398bb5 621 m68k_symbolic_jump = "bra%.l %c0";
29ca003a 622 else
da398bb5 623 m68k_symbolic_jump = "bra%.l %p0";
29ca003a
RS
624 /* Turn off function cse if we are doing PIC. We always want
625 function call to be done as `bsr foo@PLTPC'. */
626 /* ??? It's traditional to do this for -mpcrel too, but it isn't
627 clear how intentional that is. */
628 flag_no_function_cse = 1;
629 }
adf2ac37 630
c47b0cb4
MK
631 switch (m68k_symbolic_call_var)
632 {
633 case M68K_SYMBOLIC_CALL_JSR:
c47b0cb4 634 m68k_symbolic_call = "jsr %a0";
c47b0cb4
MK
635 break;
636
637 case M68K_SYMBOLIC_CALL_BSR_C:
da398bb5 638 m68k_symbolic_call = "bsr%.l %c0";
c47b0cb4
MK
639 break;
640
641 case M68K_SYMBOLIC_CALL_BSR_P:
da398bb5 642 m68k_symbolic_call = "bsr%.l %p0";
c47b0cb4
MK
643 break;
644
645 case M68K_SYMBOLIC_CALL_NONE:
646 gcc_assert (m68k_symbolic_call == NULL);
647 break;
648
649 default:
650 gcc_unreachable ();
651 }
652
aaca7021
RZ
653#ifndef ASM_OUTPUT_ALIGN_WITH_NOP
654 if (align_labels > 2)
655 {
656 warning (0, "-falign-labels=%d is not supported", align_labels);
657 align_labels = 0;
658 }
659 if (align_loops > 2)
660 {
661 warning (0, "-falign-loops=%d is not supported", align_loops);
662 align_loops = 0;
663 }
664#endif
665
adf2ac37 666 SUBTARGET_OVERRIDE_OPTIONS;
c47b0cb4
MK
667
668 /* Setup scheduling options. */
669 if (TUNE_CFV2)
670 m68k_sched_cpu = CPU_CF_V2;
671 else
672 {
673 m68k_sched_cpu = CPU_UNKNOWN;
674 flag_schedule_insns = 0;
675 flag_schedule_insns_after_reload = 0;
676 flag_modulo_sched = 0;
677 }
ef1dbfb0 678}
7eb4f044
NS
679
680/* Generate a macro of the form __mPREFIX_cpu_NAME, where PREFIX is the
681 given argument and NAME is the argument passed to -mcpu. Return NULL
682 if -mcpu was not passed. */
683
684const char *
685m68k_cpp_cpu_ident (const char *prefix)
686{
687 if (!m68k_cpu_entry)
688 return NULL;
689 return concat ("__m", prefix, "_cpu_", m68k_cpu_entry->name, NULL);
690}
691
692/* Generate a macro of the form __mPREFIX_family_NAME, where PREFIX is the
693 given argument and NAME is the name of the representative device for
694 the -mcpu argument's family. Return NULL if -mcpu was not passed. */
695
696const char *
697m68k_cpp_cpu_family (const char *prefix)
698{
699 if (!m68k_cpu_entry)
700 return NULL;
701 return concat ("__m", prefix, "_family_", m68k_cpu_entry->family, NULL);
702}
79e68feb 703\f
2bccb817
KH
704/* Return m68k_fk_interrupt_handler if FUNC has an "interrupt" or
705 "interrupt_handler" attribute and interrupt_thread if FUNC has an
706 "interrupt_thread" attribute. Otherwise, return
707 m68k_fk_normal_function. */
a4242737
KH
708
709enum m68k_function_kind
710m68k_get_function_kind (tree func)
48ed72a4
PB
711{
712 tree a;
713
fa157b28
NS
714 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
715
2bccb817
KH
716 a = lookup_attribute ("interrupt", DECL_ATTRIBUTES (func));
717 if (a != NULL_TREE)
718 return m68k_fk_interrupt_handler;
719
48ed72a4 720 a = lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (func));
a4242737
KH
721 if (a != NULL_TREE)
722 return m68k_fk_interrupt_handler;
723
724 a = lookup_attribute ("interrupt_thread", DECL_ATTRIBUTES (func));
725 if (a != NULL_TREE)
726 return m68k_fk_interrupt_thread;
727
728 return m68k_fk_normal_function;
48ed72a4
PB
729}
730
731/* Handle an attribute requiring a FUNCTION_DECL; arguments as in
732 struct attribute_spec.handler. */
733static tree
734m68k_handle_fndecl_attribute (tree *node, tree name,
735 tree args ATTRIBUTE_UNUSED,
736 int flags ATTRIBUTE_UNUSED,
737 bool *no_add_attrs)
738{
739 if (TREE_CODE (*node) != FUNCTION_DECL)
740 {
5c498b10 741 warning (OPT_Wattributes, "%qs attribute only applies to functions",
48ed72a4
PB
742 IDENTIFIER_POINTER (name));
743 *no_add_attrs = true;
744 }
745
a4242737
KH
746 if (m68k_get_function_kind (*node) != m68k_fk_normal_function)
747 {
748 error ("multiple interrupt attributes not allowed");
749 *no_add_attrs = true;
750 }
751
752 if (!TARGET_FIDOA
753 && !strcmp (IDENTIFIER_POINTER (name), "interrupt_thread"))
754 {
755 error ("interrupt_thread is available only on fido");
756 *no_add_attrs = true;
757 }
758
48ed72a4
PB
759 return NULL_TREE;
760}
860c4900
BI
761
762static void
3d74bc09 763m68k_compute_frame_layout (void)
860c4900
BI
764{
765 int regno, saved;
a40ed0f3 766 unsigned int mask;
a4242737
KH
767 enum m68k_function_kind func_kind =
768 m68k_get_function_kind (current_function_decl);
769 bool interrupt_handler = func_kind == m68k_fk_interrupt_handler;
770 bool interrupt_thread = func_kind == m68k_fk_interrupt_thread;
860c4900 771
3d74bc09
BI
772 /* Only compute the frame once per function.
773 Don't cache information until reload has been completed. */
774 if (current_frame.funcdef_no == current_function_funcdef_no
775 && reload_completed)
776 return;
777
778 current_frame.size = (get_frame_size () + 3) & -4;
860c4900 779
a40ed0f3 780 mask = saved = 0;
a4242737
KH
781
782 /* Interrupt thread does not need to save any register. */
783 if (!interrupt_thread)
784 for (regno = 0; regno < 16; regno++)
785 if (m68k_save_reg (regno, interrupt_handler))
786 {
787 mask |= 1 << (regno - D0_REG);
788 saved++;
789 }
3d74bc09
BI
790 current_frame.offset = saved * 4;
791 current_frame.reg_no = saved;
792 current_frame.reg_mask = mask;
860c4900 793
57047680 794 current_frame.foffset = 0;
a40ed0f3 795 mask = saved = 0;
dcc21c4c 796 if (TARGET_HARD_FLOAT)
860c4900 797 {
a4242737
KH
798 /* Interrupt thread does not need to save any register. */
799 if (!interrupt_thread)
800 for (regno = 16; regno < 24; regno++)
801 if (m68k_save_reg (regno, interrupt_handler))
802 {
803 mask |= 1 << (regno - FP0_REG);
804 saved++;
805 }
dcc21c4c 806 current_frame.foffset = saved * TARGET_FP_REG_SIZE;
3d74bc09 807 current_frame.offset += current_frame.foffset;
860c4900 808 }
57047680
GN
809 current_frame.fpu_no = saved;
810 current_frame.fpu_mask = mask;
3d74bc09
BI
811
812 /* Remember what function this frame refers to. */
813 current_frame.funcdef_no = current_function_funcdef_no;
860c4900
BI
814}
815
816HOST_WIDE_INT
817m68k_initial_elimination_offset (int from, int to)
818{
42b67c06
PB
819 int argptr_offset;
820 /* The arg pointer points 8 bytes before the start of the arguments,
821 as defined by FIRST_PARM_OFFSET. This makes it coincident with the
822 frame pointer in most frames. */
823 argptr_offset = frame_pointer_needed ? 0 : UNITS_PER_WORD;
860c4900 824 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
42b67c06 825 return argptr_offset;
860c4900 826
3d74bc09 827 m68k_compute_frame_layout ();
860c4900 828
4761e388
NS
829 gcc_assert (to == STACK_POINTER_REGNUM);
830 switch (from)
831 {
a0a7fbc9 832 case ARG_POINTER_REGNUM:
42b67c06 833 return current_frame.offset + current_frame.size - argptr_offset;
4761e388
NS
834 case FRAME_POINTER_REGNUM:
835 return current_frame.offset + current_frame.size;
836 default:
837 gcc_unreachable ();
838 }
860c4900
BI
839}
840
97c55091
GN
841/* Refer to the array `regs_ever_live' to determine which registers
842 to save; `regs_ever_live[I]' is nonzero if register number I
843 is ever used in the function. This function is responsible for
844 knowing which registers should not be saved even if used.
845 Return true if we need to save REGNO. */
846
48ed72a4
PB
847static bool
848m68k_save_reg (unsigned int regno, bool interrupt_handler)
2cff4a6e 849{
4ab870f5 850 if (flag_pic && regno == PIC_REG)
b86ba8a3 851 {
215161e2 852 if (current_function_saves_all_registers)
afcb440c 853 return true;
b86ba8a3
AT
854 if (current_function_uses_pic_offset_table)
855 return true;
6357eb0d
RS
856 /* Reload may introduce constant pool references into a function
857 that thitherto didn't need a PIC register. Note that the test
858 above will not catch that case because we will only set
859 current_function_uses_pic_offset_table when emitting
860 the address reloads. */
861 if (current_function_uses_const_pool)
862 return true;
b86ba8a3 863 }
2cff4a6e
AS
864
865 if (current_function_calls_eh_return)
866 {
867 unsigned int i;
868 for (i = 0; ; i++)
869 {
870 unsigned int test = EH_RETURN_DATA_REGNO (i);
871 if (test == INVALID_REGNUM)
872 break;
873 if (test == regno)
48ed72a4 874 return true;
2cff4a6e
AS
875 }
876 }
877
48ed72a4
PB
878 /* Fixed regs we never touch. */
879 if (fixed_regs[regno])
880 return false;
881
882 /* The frame pointer (if it is such) is handled specially. */
883 if (regno == FRAME_POINTER_REGNUM && frame_pointer_needed)
884 return false;
885
886 /* Interrupt handlers must also save call_used_regs
887 if they are live or when calling nested functions. */
888 if (interrupt_handler)
a0a7fbc9 889 {
6fb5fa3c 890 if (df_regs_ever_live_p (regno))
a0a7fbc9 891 return true;
48ed72a4 892
a0a7fbc9
AS
893 if (!current_function_is_leaf && call_used_regs[regno])
894 return true;
895 }
48ed72a4
PB
896
897 /* Never need to save registers that aren't touched. */
6fb5fa3c 898 if (!df_regs_ever_live_p (regno))
48ed72a4
PB
899 return false;
900
b2e08ed4 901 /* Otherwise save everything that isn't call-clobbered. */
48ed72a4 902 return !call_used_regs[regno];
2cff4a6e
AS
903}
904
a40ed0f3
KH
905/* Emit RTL for a MOVEM or FMOVEM instruction. BASE + OFFSET represents
906 the lowest memory address. COUNT is the number of registers to be
907 moved, with register REGNO + I being moved if bit I of MASK is set.
908 STORE_P specifies the direction of the move and ADJUST_STACK_P says
909 whether or not this is pre-decrement (if STORE_P) or post-increment
910 (if !STORE_P) operation. */
911
912static rtx
913m68k_emit_movem (rtx base, HOST_WIDE_INT offset,
914 unsigned int count, unsigned int regno,
915 unsigned int mask, bool store_p, bool adjust_stack_p)
916{
917 int i;
918 rtx body, addr, src, operands[2];
919 enum machine_mode mode;
920
921 body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (adjust_stack_p + count));
922 mode = reg_raw_mode[regno];
923 i = 0;
924
925 if (adjust_stack_p)
926 {
927 src = plus_constant (base, (count
928 * GET_MODE_SIZE (mode)
929 * (HOST_WIDE_INT) (store_p ? -1 : 1)));
930 XVECEXP (body, 0, i++) = gen_rtx_SET (VOIDmode, base, src);
931 }
932
933 for (; mask != 0; mask >>= 1, regno++)
934 if (mask & 1)
935 {
936 addr = plus_constant (base, offset);
937 operands[!store_p] = gen_frame_mem (mode, addr);
938 operands[store_p] = gen_rtx_REG (mode, regno);
939 XVECEXP (body, 0, i++)
940 = gen_rtx_SET (VOIDmode, operands[0], operands[1]);
941 offset += GET_MODE_SIZE (mode);
942 }
943 gcc_assert (i == XVECLEN (body, 0));
944
945 return emit_insn (body);
946}
947
948/* Make INSN a frame-related instruction. */
79e68feb 949
08c148a8 950static void
a40ed0f3
KH
951m68k_set_frame_related (rtx insn)
952{
953 rtx body;
954 int i;
955
956 RTX_FRAME_RELATED_P (insn) = 1;
957 body = PATTERN (insn);
958 if (GET_CODE (body) == PARALLEL)
959 for (i = 0; i < XVECLEN (body, 0); i++)
960 RTX_FRAME_RELATED_P (XVECEXP (body, 0, i)) = 1;
961}
962
963/* Emit RTL for the "prologue" define_expand. */
964
965void
966m68k_expand_prologue (void)
79e68feb 967{
860c4900 968 HOST_WIDE_INT fsize_with_regs;
a40ed0f3 969 rtx limit, src, dest, insn;
3d74bc09 970
a40ed0f3 971 m68k_compute_frame_layout ();
3d74bc09 972
a157febd
GK
973 /* If the stack limit is a symbol, we can check it here,
974 before actually allocating the space. */
975 if (current_function_limit_stack
976 && GET_CODE (stack_limit_rtx) == SYMBOL_REF)
a40ed0f3
KH
977 {
978 limit = plus_constant (stack_limit_rtx, current_frame.size + 4);
979 if (!LEGITIMATE_CONSTANT_P (limit))
980 {
981 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), limit);
982 limit = gen_rtx_REG (Pmode, D0_REG);
983 }
984 emit_insn (gen_cmpsi (stack_pointer_rtx, limit));
985 emit_insn (gen_conditional_trap (gen_rtx_LTU (VOIDmode,
986 cc0_rtx, const0_rtx),
987 const1_rtx));
988 }
79e68feb 989
a89e3f21 990 fsize_with_regs = current_frame.size;
dcc21c4c
PB
991 if (TARGET_COLDFIRE)
992 {
a40ed0f3
KH
993 /* ColdFire's move multiple instructions do not allow pre-decrement
994 addressing. Add the size of movem saves to the initial stack
995 allocation instead. */
996 if (current_frame.reg_no >= MIN_MOVEM_REGS)
997 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
998 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
999 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
dcc21c4c 1000 }
860c4900 1001
79e68feb
RS
1002 if (frame_pointer_needed)
1003 {
a40ed0f3 1004 if (fsize_with_regs == 0 && TUNE_68040)
79e68feb 1005 {
a40ed0f3
KH
1006 /* On the 68040, two separate moves are faster than link.w 0. */
1007 dest = gen_frame_mem (Pmode,
1008 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1009 m68k_set_frame_related (emit_move_insn (dest, frame_pointer_rtx));
1010 m68k_set_frame_related (emit_move_insn (frame_pointer_rtx,
1011 stack_pointer_rtx));
79e68feb 1012 }
a40ed0f3
KH
1013 else if (fsize_with_regs < 0x8000 || TARGET_68020)
1014 m68k_set_frame_related
1015 (emit_insn (gen_link (frame_pointer_rtx,
1016 GEN_INT (-4 - fsize_with_regs))));
d9e88af0 1017 else
a40ed0f3
KH
1018 {
1019 m68k_set_frame_related
1020 (emit_insn (gen_link (frame_pointer_rtx, GEN_INT (-4))));
1021 m68k_set_frame_related
1022 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1023 stack_pointer_rtx,
1024 GEN_INT (-fsize_with_regs))));
1025 }
d9e88af0 1026 }
a40ed0f3
KH
1027 else if (fsize_with_regs != 0)
1028 m68k_set_frame_related
1029 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1030 stack_pointer_rtx,
1031 GEN_INT (-fsize_with_regs))));
860c4900 1032
57047680 1033 if (current_frame.fpu_mask)
79e68feb 1034 {
a40ed0f3 1035 gcc_assert (current_frame.fpu_no >= MIN_FMOVEM_REGS);
dcc21c4c 1036 if (TARGET_68881)
a40ed0f3
KH
1037 m68k_set_frame_related
1038 (m68k_emit_movem (stack_pointer_rtx,
1039 current_frame.fpu_no * -GET_MODE_SIZE (XFmode),
1040 current_frame.fpu_no, FP0_REG,
1041 current_frame.fpu_mask, true, true));
dcc21c4c
PB
1042 else
1043 {
1044 int offset;
1045
a40ed0f3
KH
1046 /* If we're using moveml to save the integer registers,
1047 the stack pointer will point to the bottom of the moveml
1048 save area. Find the stack offset of the first FP register. */
1049 if (current_frame.reg_no < MIN_MOVEM_REGS)
dcc21c4c
PB
1050 offset = 0;
1051 else
a40ed0f3
KH
1052 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1053 m68k_set_frame_related
1054 (m68k_emit_movem (stack_pointer_rtx, offset,
1055 current_frame.fpu_no, FP0_REG,
1056 current_frame.fpu_mask, true, false));
f277471f 1057 }
79e68feb 1058 }
99df2465 1059
01bbf777 1060 /* If the stack limit is not a symbol, check it here.
a157febd
GK
1061 This has the disadvantage that it may be too late... */
1062 if (current_function_limit_stack)
1063 {
1064 if (REG_P (stack_limit_rtx))
a40ed0f3
KH
1065 {
1066 emit_insn (gen_cmpsi (stack_pointer_rtx, stack_limit_rtx));
1067 emit_insn (gen_conditional_trap (gen_rtx_LTU (VOIDmode,
1068 cc0_rtx, const0_rtx),
1069 const1_rtx));
1070 }
a157febd 1071 else if (GET_CODE (stack_limit_rtx) != SYMBOL_REF)
d4ee4d25 1072 warning (0, "stack limit expression is not supported");
a157febd 1073 }
01bbf777 1074
a40ed0f3 1075 if (current_frame.reg_no < MIN_MOVEM_REGS)
79e68feb 1076 {
a40ed0f3 1077 /* Store each register separately in the same order moveml does. */
79e68feb
RS
1078 int i;
1079
a40ed0f3
KH
1080 for (i = 16; i-- > 0; )
1081 if (current_frame.reg_mask & (1 << i))
078e983e 1082 {
a40ed0f3
KH
1083 src = gen_rtx_REG (SImode, D0_REG + i);
1084 dest = gen_frame_mem (SImode,
1085 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1086 m68k_set_frame_related (emit_insn (gen_movsi (dest, src)));
078e983e 1087 }
79e68feb 1088 }
a40ed0f3 1089 else
79e68feb 1090 {
9425fb04 1091 if (TARGET_COLDFIRE)
a40ed0f3
KH
1092 /* The required register save space has already been allocated.
1093 The first register should be stored at (%sp). */
1094 m68k_set_frame_related
1095 (m68k_emit_movem (stack_pointer_rtx, 0,
1096 current_frame.reg_no, D0_REG,
1097 current_frame.reg_mask, true, false));
afaff477 1098 else
a40ed0f3
KH
1099 m68k_set_frame_related
1100 (m68k_emit_movem (stack_pointer_rtx,
1101 current_frame.reg_no * -GET_MODE_SIZE (SImode),
1102 current_frame.reg_no, D0_REG,
1103 current_frame.reg_mask, true, true));
79e68feb 1104 }
a40ed0f3
KH
1105
1106 if (flag_pic
1107 && !TARGET_SEP_DATA
4f44ecc0 1108 && current_function_uses_pic_offset_table)
6fb5fa3c 1109 insn = emit_insn (gen_load_got (pic_offset_table_rtx));
79e68feb
RS
1110}
1111\f
413ac1b2
RS
1112/* Return true if a simple (return) instruction is sufficient for this
1113 instruction (i.e. if no epilogue is needed). */
79e68feb 1114
3d74bc09 1115bool
a2bda628 1116m68k_use_return_insn (void)
79e68feb 1117{
79e68feb 1118 if (!reload_completed || frame_pointer_needed || get_frame_size () != 0)
3d74bc09 1119 return false;
125ed86f 1120
a0a7fbc9 1121 m68k_compute_frame_layout ();
413ac1b2 1122 return current_frame.offset == 0;
79e68feb
RS
1123}
1124
f7e70894
RS
1125/* Emit RTL for the "epilogue" or "sibcall_epilogue" define_expand;
1126 SIBCALL_P says which.
79e68feb
RS
1127
1128 The function epilogue should not depend on the current stack pointer!
1129 It should use the frame pointer only, if there is a frame pointer.
1130 This is mandatory because of alloca; we also take advantage of it to
1131 omit stack adjustments before returning. */
1132
a40ed0f3 1133void
f7e70894 1134m68k_expand_epilogue (bool sibcall_p)
08c148a8 1135{
3d74bc09 1136 HOST_WIDE_INT fsize, fsize_with_regs;
a40ed0f3 1137 bool big, restore_from_sp;
3d74bc09 1138
a0a7fbc9 1139 m68k_compute_frame_layout ();
3d74bc09 1140
3d74bc09 1141 fsize = current_frame.size;
a40ed0f3
KH
1142 big = false;
1143 restore_from_sp = false;
3d74bc09 1144
a40ed0f3 1145 /* FIXME : current_function_is_leaf below is too strong.
c67ddce5 1146 What we really need to know there is if there could be pending
7a1929e1 1147 stack adjustment needed at that point. */
a40ed0f3
KH
1148 restore_from_sp = (!frame_pointer_needed
1149 || (!current_function_calls_alloca
1150 && current_function_is_leaf));
860c4900
BI
1151
1152 /* fsize_with_regs is the size we need to adjust the sp when
97c55091 1153 popping the frame. */
860c4900 1154 fsize_with_regs = fsize;
dcc21c4c
PB
1155 if (TARGET_COLDFIRE && restore_from_sp)
1156 {
a40ed0f3
KH
1157 /* ColdFire's move multiple instructions do not allow post-increment
1158 addressing. Add the size of movem loads to the final deallocation
1159 instead. */
1160 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1161 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1162 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1163 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
dcc21c4c 1164 }
860c4900 1165
3d74bc09 1166 if (current_frame.offset + fsize >= 0x8000
a40ed0f3 1167 && !restore_from_sp
3d74bc09 1168 && (current_frame.reg_mask || current_frame.fpu_mask))
79e68feb 1169 {
a40ed0f3
KH
1170 if (TARGET_COLDFIRE
1171 && (current_frame.reg_no >= MIN_MOVEM_REGS
1172 || current_frame.fpu_no >= MIN_FMOVEM_REGS))
1173 {
1174 /* ColdFire's move multiple instructions do not support the
1175 (d8,Ax,Xi) addressing mode, so we're as well using a normal
1176 stack-based restore. */
1177 emit_move_insn (gen_rtx_REG (Pmode, A1_REG),
1178 GEN_INT (-(current_frame.offset + fsize)));
1179 emit_insn (gen_addsi3 (stack_pointer_rtx,
1180 gen_rtx_REG (Pmode, A1_REG),
1181 frame_pointer_rtx));
1182 restore_from_sp = true;
1183 }
1184 else
1185 {
1186 emit_move_insn (gen_rtx_REG (Pmode, A1_REG), GEN_INT (-fsize));
1187 fsize = 0;
1188 big = true;
1189 }
79e68feb 1190 }
79e68feb 1191
a40ed0f3
KH
1192 if (current_frame.reg_no < MIN_MOVEM_REGS)
1193 {
1194 /* Restore each register separately in the same order moveml does. */
79e68feb 1195 int i;
a40ed0f3 1196 HOST_WIDE_INT offset;
79e68feb 1197
a40ed0f3 1198 offset = current_frame.offset + fsize;
3d74bc09
BI
1199 for (i = 0; i < 16; i++)
1200 if (current_frame.reg_mask & (1 << i))
79e68feb 1201 {
a40ed0f3
KH
1202 rtx addr;
1203
1204 if (big)
79e68feb 1205 {
a40ed0f3
KH
1206 /* Generate the address -OFFSET(%fp,%a1.l). */
1207 addr = gen_rtx_REG (Pmode, A1_REG);
1208 addr = gen_rtx_PLUS (Pmode, addr, frame_pointer_rtx);
1209 addr = plus_constant (addr, -offset);
79e68feb 1210 }
a40ed0f3
KH
1211 else if (restore_from_sp)
1212 addr = gen_rtx_POST_INC (Pmode, stack_pointer_rtx);
1213 else
1214 addr = plus_constant (frame_pointer_rtx, -offset);
1215 emit_move_insn (gen_rtx_REG (SImode, D0_REG + i),
1216 gen_frame_mem (SImode, addr));
1217 offset -= GET_MODE_SIZE (SImode);
1218 }
79e68feb 1219 }
3d74bc09 1220 else if (current_frame.reg_mask)
79e68feb 1221 {
a40ed0f3
KH
1222 if (big)
1223 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1224 gen_rtx_REG (Pmode, A1_REG),
1225 frame_pointer_rtx),
1226 -(current_frame.offset + fsize),
1227 current_frame.reg_no, D0_REG,
1228 current_frame.reg_mask, false, false);
1229 else if (restore_from_sp)
1230 m68k_emit_movem (stack_pointer_rtx, 0,
1231 current_frame.reg_no, D0_REG,
1232 current_frame.reg_mask, false,
1233 !TARGET_COLDFIRE);
1234 else
1235 m68k_emit_movem (frame_pointer_rtx,
1236 -(current_frame.offset + fsize),
1237 current_frame.reg_no, D0_REG,
1238 current_frame.reg_mask, false, false);
79e68feb 1239 }
a40ed0f3
KH
1240
1241 if (current_frame.fpu_no > 0)
79e68feb
RS
1242 {
1243 if (big)
a40ed0f3
KH
1244 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1245 gen_rtx_REG (Pmode, A1_REG),
1246 frame_pointer_rtx),
1247 -(current_frame.foffset + fsize),
1248 current_frame.fpu_no, FP0_REG,
1249 current_frame.fpu_mask, false, false);
6910dd70 1250 else if (restore_from_sp)
79e68feb 1251 {
dcc21c4c
PB
1252 if (TARGET_COLDFIRE)
1253 {
1254 int offset;
1255
a40ed0f3
KH
1256 /* If we used moveml to restore the integer registers, the
1257 stack pointer will still point to the bottom of the moveml
1258 save area. Find the stack offset of the first FP
1259 register. */
1260 if (current_frame.reg_no < MIN_MOVEM_REGS)
dcc21c4c
PB
1261 offset = 0;
1262 else
a40ed0f3
KH
1263 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1264 m68k_emit_movem (stack_pointer_rtx, offset,
1265 current_frame.fpu_no, FP0_REG,
1266 current_frame.fpu_mask, false, false);
dcc21c4c 1267 }
884b74f0 1268 else
a40ed0f3
KH
1269 m68k_emit_movem (stack_pointer_rtx, 0,
1270 current_frame.fpu_no, FP0_REG,
1271 current_frame.fpu_mask, false, true);
79e68feb
RS
1272 }
1273 else
a40ed0f3
KH
1274 m68k_emit_movem (frame_pointer_rtx,
1275 -(current_frame.foffset + fsize),
1276 current_frame.fpu_no, FP0_REG,
1277 current_frame.fpu_mask, false, false);
79e68feb 1278 }
a40ed0f3 1279
79e68feb 1280 if (frame_pointer_needed)
a40ed0f3 1281 emit_insn (gen_unlink (frame_pointer_rtx));
860c4900 1282 else if (fsize_with_regs)
a40ed0f3
KH
1283 emit_insn (gen_addsi3 (stack_pointer_rtx,
1284 stack_pointer_rtx,
1285 GEN_INT (fsize_with_regs)));
1286
2cff4a6e 1287 if (current_function_calls_eh_return)
a40ed0f3
KH
1288 emit_insn (gen_addsi3 (stack_pointer_rtx,
1289 stack_pointer_rtx,
1290 EH_RETURN_STACKADJ_RTX));
1291
f7e70894 1292 if (!sibcall_p)
49570723 1293 emit_jump_insn (gen_rtx_RETURN (VOIDmode));
79e68feb
RS
1294}
1295\f
8a4a2253 1296/* Return true if X is a valid comparison operator for the dbcc
64a184e9
RS
1297 instruction.
1298
1299 Note it rejects floating point comparison operators.
1300 (In the future we could use Fdbcc).
1301
1302 It also rejects some comparisons when CC_NO_OVERFLOW is set. */
1303
1304int
41b6a5e2 1305valid_dbcc_comparison_p_2 (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED)
64a184e9 1306{
64a184e9
RS
1307 switch (GET_CODE (x))
1308 {
64a184e9
RS
1309 case EQ: case NE: case GTU: case LTU:
1310 case GEU: case LEU:
1311 return 1;
1312
1313 /* Reject some when CC_NO_OVERFLOW is set. This may be over
1314 conservative */
1315 case GT: case LT: case GE: case LE:
1316 return ! (cc_prev_status.flags & CC_NO_OVERFLOW);
1317 default:
1318 return 0;
1319 }
1320}
1321
a0ab749a 1322/* Return nonzero if flags are currently in the 68881 flag register. */
6a0f85e3 1323int
8a4a2253 1324flags_in_68881 (void)
6a0f85e3
TG
1325{
1326 /* We could add support for these in the future */
1327 return cc_status.flags & CC_IN_68881;
1328}
1329
fa157b28 1330/* Implement TARGET_FUNCTION_OK_FOR_SIBCALL_P. */
f7e70894
RS
1331
1332static bool
fa157b28 1333m68k_ok_for_sibcall_p (tree decl, tree exp)
f7e70894 1334{
fa157b28
NS
1335 enum m68k_function_kind kind;
1336
1337 /* We cannot use sibcalls for nested functions because we use the
1338 static chain register for indirect calls. */
1339 if (CALL_EXPR_STATIC_CHAIN (exp))
1340 return false;
1341
1342 kind = m68k_get_function_kind (current_function_decl);
1343 if (kind == m68k_fk_normal_function)
1344 /* We can always sibcall from a normal function, because it's
1345 undefined if it is calling an interrupt function. */
1346 return true;
1347
1348 /* Otherwise we can only sibcall if the function kind is known to be
1349 the same. */
1350 if (decl && m68k_get_function_kind (decl) == kind)
1351 return true;
1352
1353 return false;
f7e70894
RS
1354}
1355
29ca003a
RS
1356/* Convert X to a legitimate function call memory reference and return the
1357 result. */
a2ef3db7 1358
29ca003a
RS
1359rtx
1360m68k_legitimize_call_address (rtx x)
1361{
1362 gcc_assert (MEM_P (x));
1363 if (call_operand (XEXP (x, 0), VOIDmode))
1364 return x;
1365 return replace_equiv_address (x, force_reg (Pmode, XEXP (x, 0)));
a2ef3db7
BI
1366}
1367
f7e70894
RS
1368/* Likewise for sibling calls. */
1369
1370rtx
1371m68k_legitimize_sibcall_address (rtx x)
1372{
1373 gcc_assert (MEM_P (x));
1374 if (sibcall_operand (XEXP (x, 0), VOIDmode))
1375 return x;
1376
1377 emit_move_insn (gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM), XEXP (x, 0));
1378 return replace_equiv_address (x, gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM));
1379}
1380
64a184e9
RS
1381/* Output a dbCC; jCC sequence. Note we do not handle the
1382 floating point version of this sequence (Fdbcc). We also
1383 do not handle alternative conditions when CC_NO_OVERFLOW is
6a0f85e3
TG
1384 set. It is assumed that valid_dbcc_comparison_p and flags_in_68881 will
1385 kick those out before we get here. */
64a184e9 1386
1d8eaa6b 1387void
8a4a2253 1388output_dbcc_and_branch (rtx *operands)
64a184e9 1389{
64a184e9
RS
1390 switch (GET_CODE (operands[3]))
1391 {
1392 case EQ:
da398bb5 1393 output_asm_insn ("dbeq %0,%l1\n\tjeq %l2", operands);
e6d98cb0 1394 break;
64a184e9
RS
1395
1396 case NE:
da398bb5 1397 output_asm_insn ("dbne %0,%l1\n\tjne %l2", operands);
e6d98cb0 1398 break;
64a184e9
RS
1399
1400 case GT:
da398bb5 1401 output_asm_insn ("dbgt %0,%l1\n\tjgt %l2", operands);
e6d98cb0 1402 break;
64a184e9
RS
1403
1404 case GTU:
da398bb5 1405 output_asm_insn ("dbhi %0,%l1\n\tjhi %l2", operands);
e6d98cb0 1406 break;
64a184e9
RS
1407
1408 case LT:
da398bb5 1409 output_asm_insn ("dblt %0,%l1\n\tjlt %l2", operands);
e6d98cb0 1410 break;
64a184e9
RS
1411
1412 case LTU:
da398bb5 1413 output_asm_insn ("dbcs %0,%l1\n\tjcs %l2", operands);
e6d98cb0 1414 break;
64a184e9
RS
1415
1416 case GE:
da398bb5 1417 output_asm_insn ("dbge %0,%l1\n\tjge %l2", operands);
e6d98cb0 1418 break;
64a184e9
RS
1419
1420 case GEU:
da398bb5 1421 output_asm_insn ("dbcc %0,%l1\n\tjcc %l2", operands);
e6d98cb0 1422 break;
64a184e9
RS
1423
1424 case LE:
da398bb5 1425 output_asm_insn ("dble %0,%l1\n\tjle %l2", operands);
e6d98cb0 1426 break;
64a184e9
RS
1427
1428 case LEU:
da398bb5 1429 output_asm_insn ("dbls %0,%l1\n\tjls %l2", operands);
e6d98cb0 1430 break;
64a184e9
RS
1431
1432 default:
4761e388 1433 gcc_unreachable ();
64a184e9
RS
1434 }
1435
1436 /* If the decrement is to be done in SImode, then we have
7a1929e1 1437 to compensate for the fact that dbcc decrements in HImode. */
64a184e9
RS
1438 switch (GET_MODE (operands[0]))
1439 {
1440 case SImode:
da398bb5 1441 output_asm_insn ("clr%.w %0\n\tsubq%.l #1,%0\n\tjpl %l1", operands);
64a184e9
RS
1442 break;
1443
1444 case HImode:
1445 break;
1446
1447 default:
4761e388 1448 gcc_unreachable ();
64a184e9
RS
1449 }
1450}
1451
5505f548 1452const char *
4761e388 1453output_scc_di (rtx op, rtx operand1, rtx operand2, rtx dest)
c59c3b1c
RK
1454{
1455 rtx loperands[7];
d9832fd2 1456 enum rtx_code op_code = GET_CODE (op);
c59c3b1c 1457
f710504c 1458 /* This does not produce a useful cc. */
906a2d3c
RK
1459 CC_STATUS_INIT;
1460
d9832fd2
RK
1461 /* The m68k cmp.l instruction requires operand1 to be a reg as used
1462 below. Swap the operands and change the op if these requirements
1463 are not fulfilled. */
1464 if (GET_CODE (operand2) == REG && GET_CODE (operand1) != REG)
1465 {
1466 rtx tmp = operand1;
1467
1468 operand1 = operand2;
1469 operand2 = tmp;
1470 op_code = swap_condition (op_code);
1471 }
c59c3b1c
RK
1472 loperands[0] = operand1;
1473 if (GET_CODE (operand1) == REG)
1d8eaa6b 1474 loperands[1] = gen_rtx_REG (SImode, REGNO (operand1) + 1);
c59c3b1c 1475 else
b72f00af 1476 loperands[1] = adjust_address (operand1, SImode, 4);
c59c3b1c
RK
1477 if (operand2 != const0_rtx)
1478 {
1479 loperands[2] = operand2;
1480 if (GET_CODE (operand2) == REG)
1d8eaa6b 1481 loperands[3] = gen_rtx_REG (SImode, REGNO (operand2) + 1);
c59c3b1c 1482 else
b72f00af 1483 loperands[3] = adjust_address (operand2, SImode, 4);
c59c3b1c 1484 }
428511bb 1485 loperands[4] = gen_label_rtx ();
c59c3b1c 1486 if (operand2 != const0_rtx)
da398bb5 1487 output_asm_insn ("cmp%.l %2,%0\n\tjne %l4\n\tcmp%.l %3,%1", loperands);
392582fa 1488 else
4a8c52e0 1489 {
9425fb04 1490 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[0]))
4a8c52e0
AS
1491 output_asm_insn ("tst%.l %0", loperands);
1492 else
a0a7fbc9 1493 output_asm_insn ("cmp%.w #0,%0", loperands);
4a8c52e0 1494
da398bb5 1495 output_asm_insn ("jne %l4", loperands);
4a8c52e0 1496
9425fb04 1497 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[1]))
4a8c52e0
AS
1498 output_asm_insn ("tst%.l %1", loperands);
1499 else
3b4b85c9 1500 output_asm_insn ("cmp%.w #0,%1", loperands);
4a8c52e0
AS
1501 }
1502
c59c3b1c 1503 loperands[5] = dest;
3b4b85c9 1504
d9832fd2 1505 switch (op_code)
c59c3b1c
RK
1506 {
1507 case EQ:
4977bab6 1508 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1509 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1510 output_asm_insn ("seq %5", loperands);
1511 break;
1512
1513 case NE:
4977bab6 1514 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1515 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1516 output_asm_insn ("sne %5", loperands);
1517 break;
1518
1519 case GT:
428511bb 1520 loperands[6] = gen_label_rtx ();
da398bb5 1521 output_asm_insn ("shi %5\n\tjra %l6", loperands);
4977bab6 1522 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1523 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c 1524 output_asm_insn ("sgt %5", loperands);
4977bab6 1525 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1526 CODE_LABEL_NUMBER (loperands[6]));
c59c3b1c
RK
1527 break;
1528
1529 case GTU:
4977bab6 1530 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1531 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1532 output_asm_insn ("shi %5", loperands);
1533 break;
1534
1535 case LT:
428511bb 1536 loperands[6] = gen_label_rtx ();
da398bb5 1537 output_asm_insn ("scs %5\n\tjra %l6", loperands);
4977bab6 1538 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1539 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c 1540 output_asm_insn ("slt %5", loperands);
4977bab6 1541 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1542 CODE_LABEL_NUMBER (loperands[6]));
c59c3b1c
RK
1543 break;
1544
1545 case LTU:
4977bab6 1546 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1547 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1548 output_asm_insn ("scs %5", loperands);
1549 break;
1550
1551 case GE:
428511bb 1552 loperands[6] = gen_label_rtx ();
da398bb5 1553 output_asm_insn ("scc %5\n\tjra %l6", loperands);
4977bab6 1554 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1555 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c 1556 output_asm_insn ("sge %5", loperands);
4977bab6 1557 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1558 CODE_LABEL_NUMBER (loperands[6]));
c59c3b1c
RK
1559 break;
1560
1561 case GEU:
4977bab6 1562 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1563 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1564 output_asm_insn ("scc %5", loperands);
1565 break;
1566
1567 case LE:
428511bb 1568 loperands[6] = gen_label_rtx ();
da398bb5 1569 output_asm_insn ("sls %5\n\tjra %l6", loperands);
4977bab6 1570 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1571 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c 1572 output_asm_insn ("sle %5", loperands);
4977bab6 1573 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1574 CODE_LABEL_NUMBER (loperands[6]));
c59c3b1c
RK
1575 break;
1576
1577 case LEU:
4977bab6 1578 (*targetm.asm_out.internal_label) (asm_out_file, "L",
a0a7fbc9 1579 CODE_LABEL_NUMBER (loperands[4]));
c59c3b1c
RK
1580 output_asm_insn ("sls %5", loperands);
1581 break;
1582
1583 default:
4761e388 1584 gcc_unreachable ();
c59c3b1c
RK
1585 }
1586 return "";
1587}
1588
5505f548 1589const char *
8a4a2253 1590output_btst (rtx *operands, rtx countop, rtx dataop, rtx insn, int signpos)
79e68feb
RS
1591{
1592 operands[0] = countop;
1593 operands[1] = dataop;
1594
1595 if (GET_CODE (countop) == CONST_INT)
1596 {
1597 register int count = INTVAL (countop);
1598 /* If COUNT is bigger than size of storage unit in use,
1599 advance to the containing unit of same size. */
1600 if (count > signpos)
1601 {
1602 int offset = (count & ~signpos) / 8;
1603 count = count & signpos;
b72f00af 1604 operands[1] = dataop = adjust_address (dataop, QImode, offset);
79e68feb
RS
1605 }
1606 if (count == signpos)
1607 cc_status.flags = CC_NOT_POSITIVE | CC_Z_IN_NOT_N;
1608 else
1609 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N;
1610
1611 /* These three statements used to use next_insns_test_no...
1612 but it appears that this should do the same job. */
1613 if (count == 31
1614 && next_insn_tests_no_inequality (insn))
1615 return "tst%.l %1";
1616 if (count == 15
1617 && next_insn_tests_no_inequality (insn))
1618 return "tst%.w %1";
1619 if (count == 7
1620 && next_insn_tests_no_inequality (insn))
1621 return "tst%.b %1";
5083912d
PDM
1622 /* Try to use `movew to ccr' followed by the appropriate branch insn.
1623 On some m68k variants unfortunately that's slower than btst.
1624 On 68000 and higher, that should also work for all HImode operands. */
1625 if (TUNE_CPU32 || TARGET_COLDFIRE || optimize_size)
1626 {
1627 if (count == 3 && DATA_REG_P (operands[1])
1628 && next_insn_tests_no_inequality (insn))
1629 {
1630 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N | CC_NO_OVERFLOW;
1631 return "move%.w %1,%%ccr";
1632 }
1633 if (count == 2 && DATA_REG_P (operands[1])
1634 && next_insn_tests_no_inequality (insn))
1635 {
1636 cc_status.flags = CC_NOT_NEGATIVE | CC_INVERTED | CC_NO_OVERFLOW;
1637 return "move%.w %1,%%ccr";
1638 }
1639 /* count == 1 followed by bvc/bvs and
1640 count == 0 followed by bcc/bcs are also possible, but need
1641 m68k-specific CC_Z_IN_NOT_V and CC_Z_IN_NOT_C flags. */
1642 }
79e68feb
RS
1643
1644 cc_status.flags = CC_NOT_NEGATIVE;
1645 }
1646 return "btst %0,%1";
1647}
79e68feb 1648\f
fc2241eb
RS
1649/* Return true if X is a legitimate base register. STRICT_P says
1650 whether we need strict checking. */
1651
1652bool
1653m68k_legitimate_base_reg_p (rtx x, bool strict_p)
1654{
1655 /* Allow SUBREG everywhere we allow REG. This results in better code. */
1656 if (!strict_p && GET_CODE (x) == SUBREG)
1657 x = SUBREG_REG (x);
1658
1659 return (REG_P (x)
1660 && (strict_p
1661 ? REGNO_OK_FOR_BASE_P (REGNO (x))
bf32249e 1662 : REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x))));
fc2241eb
RS
1663}
1664
1665/* Return true if X is a legitimate index register. STRICT_P says
1666 whether we need strict checking. */
1667
1668bool
1669m68k_legitimate_index_reg_p (rtx x, bool strict_p)
1670{
1671 if (!strict_p && GET_CODE (x) == SUBREG)
1672 x = SUBREG_REG (x);
1673
1674 return (REG_P (x)
1675 && (strict_p
1676 ? REGNO_OK_FOR_INDEX_P (REGNO (x))
bf32249e 1677 : REGNO_OK_FOR_INDEX_NONSTRICT_P (REGNO (x))));
fc2241eb
RS
1678}
1679
1680/* Return true if X is a legitimate index expression for a (d8,An,Xn) or
1681 (bd,An,Xn) addressing mode. Fill in the INDEX and SCALE fields of
1682 ADDRESS if so. STRICT_P says whether we need strict checking. */
1683
1684static bool
1685m68k_decompose_index (rtx x, bool strict_p, struct m68k_address *address)
1686{
1687 int scale;
1688
1689 /* Check for a scale factor. */
1690 scale = 1;
1691 if ((TARGET_68020 || TARGET_COLDFIRE)
1692 && GET_CODE (x) == MULT
1693 && GET_CODE (XEXP (x, 1)) == CONST_INT
1694 && (INTVAL (XEXP (x, 1)) == 2
1695 || INTVAL (XEXP (x, 1)) == 4
1696 || (INTVAL (XEXP (x, 1)) == 8
1697 && (TARGET_COLDFIRE_FPU || !TARGET_COLDFIRE))))
1698 {
1699 scale = INTVAL (XEXP (x, 1));
1700 x = XEXP (x, 0);
1701 }
1702
1703 /* Check for a word extension. */
1704 if (!TARGET_COLDFIRE
1705 && GET_CODE (x) == SIGN_EXTEND
1706 && GET_MODE (XEXP (x, 0)) == HImode)
1707 x = XEXP (x, 0);
1708
1709 if (m68k_legitimate_index_reg_p (x, strict_p))
1710 {
1711 address->scale = scale;
1712 address->index = x;
1713 return true;
1714 }
1715
1716 return false;
1717}
1718
7ffb5e78
RS
1719/* Return true if X is an illegitimate symbolic constant. */
1720
1721bool
1722m68k_illegitimate_symbolic_constant_p (rtx x)
1723{
1724 rtx base, offset;
1725
1726 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P)
1727 {
1728 split_const (x, &base, &offset);
1729 if (GET_CODE (base) == SYMBOL_REF
1730 && !offset_within_block_p (base, INTVAL (offset)))
1731 return true;
1732 }
1733 return false;
1734}
1735
fc2241eb
RS
1736/* Return true if X is a legitimate constant address that can reach
1737 bytes in the range [X, X + REACH). STRICT_P says whether we need
1738 strict checking. */
1739
1740static bool
1741m68k_legitimate_constant_address_p (rtx x, unsigned int reach, bool strict_p)
1742{
1743 rtx base, offset;
1744
1745 if (!CONSTANT_ADDRESS_P (x))
1746 return false;
1747
1748 if (flag_pic
1749 && !(strict_p && TARGET_PCREL)
1750 && symbolic_operand (x, VOIDmode))
1751 return false;
1752
1753 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P && reach > 1)
1754 {
1755 split_const (x, &base, &offset);
1756 if (GET_CODE (base) == SYMBOL_REF
1757 && !offset_within_block_p (base, INTVAL (offset) + reach - 1))
1758 return false;
1759 }
1760
1761 return true;
1762}
1763
1764/* Return true if X is a LABEL_REF for a jump table. Assume that unplaced
1765 labels will become jump tables. */
1766
1767static bool
1768m68k_jump_table_ref_p (rtx x)
1769{
1770 if (GET_CODE (x) != LABEL_REF)
1771 return false;
1772
1773 x = XEXP (x, 0);
1774 if (!NEXT_INSN (x) && !PREV_INSN (x))
1775 return true;
1776
1777 x = next_nonnote_insn (x);
1778 return x && JUMP_TABLE_DATA_P (x);
1779}
1780
1781/* Return true if X is a legitimate address for values of mode MODE.
1782 STRICT_P says whether strict checking is needed. If the address
1783 is valid, describe its components in *ADDRESS. */
1784
1785static bool
1786m68k_decompose_address (enum machine_mode mode, rtx x,
1787 bool strict_p, struct m68k_address *address)
1788{
1789 unsigned int reach;
1790
1791 memset (address, 0, sizeof (*address));
1792
1793 if (mode == BLKmode)
1794 reach = 1;
1795 else
1796 reach = GET_MODE_SIZE (mode);
1797
1798 /* Check for (An) (mode 2). */
1799 if (m68k_legitimate_base_reg_p (x, strict_p))
1800 {
1801 address->base = x;
1802 return true;
1803 }
1804
1805 /* Check for -(An) and (An)+ (modes 3 and 4). */
1806 if ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_INC)
1807 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
1808 {
1809 address->code = GET_CODE (x);
1810 address->base = XEXP (x, 0);
1811 return true;
1812 }
1813
1814 /* Check for (d16,An) (mode 5). */
1815 if (GET_CODE (x) == PLUS
1816 && GET_CODE (XEXP (x, 1)) == CONST_INT
1817 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x8000, 0x8000 - reach)
1818 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
1819 {
1820 address->base = XEXP (x, 0);
1821 address->offset = XEXP (x, 1);
1822 return true;
1823 }
1824
1825 /* Check for GOT loads. These are (bd,An,Xn) addresses if
1826 TARGET_68020 && flag_pic == 2, otherwise they are (d16,An)
1827 addresses. */
1828 if (flag_pic
1829 && GET_CODE (x) == PLUS
1830 && XEXP (x, 0) == pic_offset_table_rtx
1831 && (GET_CODE (XEXP (x, 1)) == SYMBOL_REF
1832 || GET_CODE (XEXP (x, 1)) == LABEL_REF))
1833 {
1834 address->base = XEXP (x, 0);
1835 address->offset = XEXP (x, 1);
1836 return true;
1837 }
1838
1839 /* The ColdFire FPU only accepts addressing modes 2-5. */
1840 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
1841 return false;
1842
1843 /* Check for (xxx).w and (xxx).l. Also, in the TARGET_PCREL case,
1844 check for (d16,PC) or (bd,PC,Xn) with a suppressed index register.
1845 All these modes are variations of mode 7. */
1846 if (m68k_legitimate_constant_address_p (x, reach, strict_p))
1847 {
1848 address->offset = x;
1849 return true;
1850 }
1851
1852 /* Check for (d8,PC,Xn), a mode 7 form. This case is needed for
1853 tablejumps.
1854
1855 ??? do_tablejump creates these addresses before placing the target
1856 label, so we have to assume that unplaced labels are jump table
1857 references. It seems unlikely that we would ever generate indexed
1858 accesses to unplaced labels in other cases. */
1859 if (GET_CODE (x) == PLUS
1860 && m68k_jump_table_ref_p (XEXP (x, 1))
1861 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
1862 {
1863 address->offset = XEXP (x, 1);
1864 return true;
1865 }
1866
1867 /* Everything hereafter deals with (d8,An,Xn.SIZE*SCALE) or
1868 (bd,An,Xn.SIZE*SCALE) addresses. */
1869
1870 if (TARGET_68020)
1871 {
1872 /* Check for a nonzero base displacement. */
1873 if (GET_CODE (x) == PLUS
1874 && m68k_legitimate_constant_address_p (XEXP (x, 1), reach, strict_p))
1875 {
1876 address->offset = XEXP (x, 1);
1877 x = XEXP (x, 0);
1878 }
1879
1880 /* Check for a suppressed index register. */
1881 if (m68k_legitimate_base_reg_p (x, strict_p))
1882 {
1883 address->base = x;
1884 return true;
1885 }
1886
1887 /* Check for a suppressed base register. Do not allow this case
1888 for non-symbolic offsets as it effectively gives gcc freedom
1889 to treat data registers as base registers, which can generate
1890 worse code. */
1891 if (address->offset
1892 && symbolic_operand (address->offset, VOIDmode)
1893 && m68k_decompose_index (x, strict_p, address))
1894 return true;
1895 }
1896 else
1897 {
1898 /* Check for a nonzero base displacement. */
1899 if (GET_CODE (x) == PLUS
1900 && GET_CODE (XEXP (x, 1)) == CONST_INT
1901 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x80, 0x80 - reach))
1902 {
1903 address->offset = XEXP (x, 1);
1904 x = XEXP (x, 0);
1905 }
1906 }
1907
1908 /* We now expect the sum of a base and an index. */
1909 if (GET_CODE (x) == PLUS)
1910 {
1911 if (m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p)
1912 && m68k_decompose_index (XEXP (x, 1), strict_p, address))
1913 {
1914 address->base = XEXP (x, 0);
1915 return true;
1916 }
1917
1918 if (m68k_legitimate_base_reg_p (XEXP (x, 1), strict_p)
1919 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
1920 {
1921 address->base = XEXP (x, 1);
1922 return true;
1923 }
1924 }
1925 return false;
1926}
1927
1928/* Return true if X is a legitimate address for values of mode MODE.
1929 STRICT_P says whether strict checking is needed. */
1930
1931bool
1932m68k_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p)
1933{
1934 struct m68k_address address;
1935
1936 return m68k_decompose_address (mode, x, strict_p, &address);
1937}
1938
1939/* Return true if X is a memory, describing its address in ADDRESS if so.
1940 Apply strict checking if called during or after reload. */
1941
1942static bool
1943m68k_legitimate_mem_p (rtx x, struct m68k_address *address)
1944{
1945 return (MEM_P (x)
1946 && m68k_decompose_address (GET_MODE (x), XEXP (x, 0),
1947 reload_in_progress || reload_completed,
1948 address));
1949}
1950
1951/* Return true if X matches the 'Q' constraint. It must be a memory
1952 with a base address and no constant offset or index. */
1953
1954bool
1955m68k_matches_q_p (rtx x)
1956{
1957 struct m68k_address address;
1958
1959 return (m68k_legitimate_mem_p (x, &address)
1960 && address.code == UNKNOWN
1961 && address.base
1962 && !address.offset
1963 && !address.index);
1964}
1965
1966/* Return true if X matches the 'U' constraint. It must be a base address
1967 with a constant offset and no index. */
1968
1969bool
1970m68k_matches_u_p (rtx x)
1971{
1972 struct m68k_address address;
1973
1974 return (m68k_legitimate_mem_p (x, &address)
1975 && address.code == UNKNOWN
1976 && address.base
1977 && address.offset
1978 && !address.index);
1979}
1980
79e68feb
RS
1981/* Legitimize PIC addresses. If the address is already
1982 position-independent, we return ORIG. Newly generated
1983 position-independent addresses go to REG. If we need more
1984 than one register, we lose.
1985
1986 An address is legitimized by making an indirect reference
1987 through the Global Offset Table with the name of the symbol
1988 used as an offset.
1989
1990 The assembler and linker are responsible for placing the
1991 address of the symbol in the GOT. The function prologue
1992 is responsible for initializing a5 to the starting address
1993 of the GOT.
1994
1995 The assembler is also responsible for translating a symbol name
1996 into a constant displacement from the start of the GOT.
1997
1998 A quick example may make things a little clearer:
1999
2000 When not generating PIC code to store the value 12345 into _foo
2001 we would generate the following code:
2002
2003 movel #12345, _foo
2004
2005 When generating PIC two transformations are made. First, the compiler
2006 loads the address of foo into a register. So the first transformation makes:
2007
2008 lea _foo, a0
2009 movel #12345, a0@
2010
2011 The code in movsi will intercept the lea instruction and call this
2012 routine which will transform the instructions into:
2013
2014 movel a5@(_foo:w), a0
2015 movel #12345, a0@
2016
2017
2018 That (in a nutshell) is how *all* symbol and label references are
2019 handled. */
2020
2021rtx
8a4a2253
BI
2022legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
2023 rtx reg)
79e68feb
RS
2024{
2025 rtx pic_ref = orig;
2026
2027 /* First handle a simple SYMBOL_REF or LABEL_REF */
2028 if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
2029 {
4761e388 2030 gcc_assert (reg);
79e68feb 2031
1d8eaa6b
AS
2032 pic_ref = gen_rtx_MEM (Pmode,
2033 gen_rtx_PLUS (Pmode,
2034 pic_offset_table_rtx, orig));
79e68feb 2035 current_function_uses_pic_offset_table = 1;
389fdba0 2036 MEM_READONLY_P (pic_ref) = 1;
79e68feb
RS
2037 emit_move_insn (reg, pic_ref);
2038 return reg;
2039 }
2040 else if (GET_CODE (orig) == CONST)
2041 {
1d8eaa6b 2042 rtx base;
79e68feb 2043
b2e08ed4 2044 /* Make sure this has not already been legitimized. */
79e68feb
RS
2045 if (GET_CODE (XEXP (orig, 0)) == PLUS
2046 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
2047 return orig;
2048
4761e388 2049 gcc_assert (reg);
79e68feb
RS
2050
2051 /* legitimize both operands of the PLUS */
4761e388
NS
2052 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
2053
2054 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2055 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2056 base == reg ? 0 : reg);
79e68feb
RS
2057
2058 if (GET_CODE (orig) == CONST_INT)
ed8908e7 2059 return plus_constant (base, INTVAL (orig));
1d8eaa6b 2060 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
79e68feb
RS
2061 /* Likewise, should we set special REG_NOTEs here? */
2062 }
2063 return pic_ref;
2064}
2065
2066\f
0ce6f9fb 2067
a0a7fbc9 2068#define USE_MOVQ(i) ((unsigned) ((i) + 128) <= 255)
0ce6f9fb 2069
bda2a571
RS
2070/* Return the type of move that should be used for integer I. */
2071
c47b0cb4
MK
2072M68K_CONST_METHOD
2073m68k_const_method (HOST_WIDE_INT i)
0ce6f9fb 2074{
0ce6f9fb
RK
2075 unsigned u;
2076
6910dd70 2077 if (USE_MOVQ (i))
0ce6f9fb 2078 return MOVQ;
24092242 2079
c16eadc7 2080 /* The ColdFire doesn't have byte or word operations. */
97c55091 2081 /* FIXME: This may not be useful for the m68060 either. */
85dbf7e2 2082 if (!TARGET_COLDFIRE)
24092242
RK
2083 {
2084 /* if -256 < N < 256 but N is not in range for a moveq
7a1929e1 2085 N^ff will be, so use moveq #N^ff, dreg; not.b dreg. */
24092242
RK
2086 if (USE_MOVQ (i ^ 0xff))
2087 return NOTB;
2088 /* Likewise, try with not.w */
2089 if (USE_MOVQ (i ^ 0xffff))
2090 return NOTW;
2091 /* This is the only value where neg.w is useful */
2092 if (i == -65408)
2093 return NEGW;
24092242 2094 }
28bad6d1 2095
5e04daf3
PB
2096 /* Try also with swap. */
2097 u = i;
2098 if (USE_MOVQ ((u >> 16) | (u << 16)))
2099 return SWAP;
2100
986e74d5 2101 if (TARGET_ISAB)
28bad6d1 2102 {
72edf146 2103 /* Try using MVZ/MVS with an immediate value to load constants. */
28bad6d1
PB
2104 if (i >= 0 && i <= 65535)
2105 return MVZ;
2106 if (i >= -32768 && i <= 32767)
2107 return MVS;
2108 }
2109
0ce6f9fb
RK
2110 /* Otherwise, use move.l */
2111 return MOVL;
2112}
2113
bda2a571
RS
2114/* Return the cost of moving constant I into a data register. */
2115
3c50106f 2116static int
bda2a571 2117const_int_cost (HOST_WIDE_INT i)
0ce6f9fb 2118{
c47b0cb4 2119 switch (m68k_const_method (i))
0ce6f9fb 2120 {
a0a7fbc9
AS
2121 case MOVQ:
2122 /* Constants between -128 and 127 are cheap due to moveq. */
2123 return 0;
2124 case MVZ:
2125 case MVS:
2126 case NOTB:
2127 case NOTW:
2128 case NEGW:
2129 case SWAP:
2130 /* Constants easily generated by moveq + not.b/not.w/neg.w/swap. */
2131 return 1;
2132 case MOVL:
2133 return 2;
2134 default:
2135 gcc_unreachable ();
0ce6f9fb
RK
2136 }
2137}
2138
3c50106f 2139static bool
8a4a2253 2140m68k_rtx_costs (rtx x, int code, int outer_code, int *total)
3c50106f
RH
2141{
2142 switch (code)
2143 {
2144 case CONST_INT:
2145 /* Constant zero is super cheap due to clr instruction. */
2146 if (x == const0_rtx)
2147 *total = 0;
2148 else
bda2a571 2149 *total = const_int_cost (INTVAL (x));
3c50106f
RH
2150 return true;
2151
2152 case CONST:
2153 case LABEL_REF:
2154 case SYMBOL_REF:
2155 *total = 3;
2156 return true;
2157
2158 case CONST_DOUBLE:
2159 /* Make 0.0 cheaper than other floating constants to
2160 encourage creating tstsf and tstdf insns. */
2161 if (outer_code == COMPARE
2162 && (x == CONST0_RTX (SFmode) || x == CONST0_RTX (DFmode)))
2163 *total = 4;
2164 else
2165 *total = 5;
2166 return true;
2167
2168 /* These are vaguely right for a 68020. */
2169 /* The costs for long multiply have been adjusted to work properly
2170 in synth_mult on the 68020, relative to an average of the time
2171 for add and the time for shift, taking away a little more because
2172 sometimes move insns are needed. */
a0a7fbc9
AS
2173 /* div?.w is relatively cheaper on 68000 counted in COSTS_N_INSNS
2174 terms. */
fe95f2f7
JB
2175#define MULL_COST \
2176 (TUNE_68060 ? 2 \
2177 : TUNE_68040 ? 5 \
2178 : TUNE_CFV2 ? 10 \
2179 : TARGET_COLDFIRE ? 3 : 13)
2180
2181#define MULW_COST \
2182 (TUNE_68060 ? 2 \
2183 : TUNE_68040 ? 3 \
2184 : TUNE_68000_10 || TUNE_CFV2 ? 5 \
2185 : TARGET_COLDFIRE ? 2 : 8)
2186
2187#define DIVW_COST \
2188 (TARGET_CF_HWDIV ? 11 \
2189 : TUNE_68000_10 || TARGET_COLDFIRE ? 12 : 27)
3c50106f
RH
2190
2191 case PLUS:
2192 /* An lea costs about three times as much as a simple add. */
2193 if (GET_MODE (x) == SImode
2194 && GET_CODE (XEXP (x, 1)) == REG
2195 && GET_CODE (XEXP (x, 0)) == MULT
2196 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
2197 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2198 && (INTVAL (XEXP (XEXP (x, 0), 1)) == 2
2199 || INTVAL (XEXP (XEXP (x, 0), 1)) == 4
2200 || INTVAL (XEXP (XEXP (x, 0), 1)) == 8))
eb849993
BI
2201 {
2202 /* lea an@(dx:l:i),am */
2203 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 2 : 3);
2204 return true;
2205 }
3c50106f
RH
2206 return false;
2207
2208 case ASHIFT:
2209 case ASHIFTRT:
2210 case LSHIFTRT:
fe95f2f7 2211 if (TUNE_68060)
3c50106f
RH
2212 {
2213 *total = COSTS_N_INSNS(1);
2214 return true;
2215 }
fe95f2f7 2216 if (TUNE_68000_10)
3c50106f
RH
2217 {
2218 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
2219 {
2220 if (INTVAL (XEXP (x, 1)) < 16)
2221 *total = COSTS_N_INSNS (2) + INTVAL (XEXP (x, 1)) / 2;
2222 else
2223 /* We're using clrw + swap for these cases. */
2224 *total = COSTS_N_INSNS (4) + (INTVAL (XEXP (x, 1)) - 16) / 2;
2225 }
2226 else
a0a7fbc9 2227 *total = COSTS_N_INSNS (10); /* Worst case. */
3c50106f
RH
2228 return true;
2229 }
2230 /* A shift by a big integer takes an extra instruction. */
2231 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2232 && (INTVAL (XEXP (x, 1)) == 16))
2233 {
2234 *total = COSTS_N_INSNS (2); /* clrw;swap */
2235 return true;
2236 }
2237 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2238 && !(INTVAL (XEXP (x, 1)) > 0
2239 && INTVAL (XEXP (x, 1)) <= 8))
2240 {
eb849993 2241 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 1 : 3); /* lsr #i,dn */
3c50106f
RH
2242 return true;
2243 }
2244 return false;
2245
2246 case MULT:
2247 if ((GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
2248 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
2249 && GET_MODE (x) == SImode)
2250 *total = COSTS_N_INSNS (MULW_COST);
2251 else if (GET_MODE (x) == QImode || GET_MODE (x) == HImode)
2252 *total = COSTS_N_INSNS (MULW_COST);
2253 else
2254 *total = COSTS_N_INSNS (MULL_COST);
2255 return true;
2256
2257 case DIV:
2258 case UDIV:
2259 case MOD:
2260 case UMOD:
2261 if (GET_MODE (x) == QImode || GET_MODE (x) == HImode)
2262 *total = COSTS_N_INSNS (DIVW_COST); /* div.w */
eb849993
BI
2263 else if (TARGET_CF_HWDIV)
2264 *total = COSTS_N_INSNS (18);
3c50106f
RH
2265 else
2266 *total = COSTS_N_INSNS (43); /* div.l */
2267 return true;
2268
2269 default:
2270 return false;
2271 }
2272}
2273
88512ba0 2274/* Return an instruction to move CONST_INT OPERANDS[1] into data register
bda2a571
RS
2275 OPERANDS[0]. */
2276
2277static const char *
8a4a2253 2278output_move_const_into_data_reg (rtx *operands)
0ce6f9fb 2279{
bda2a571 2280 HOST_WIDE_INT i;
0ce6f9fb
RK
2281
2282 i = INTVAL (operands[1]);
c47b0cb4 2283 switch (m68k_const_method (i))
0ce6f9fb 2284 {
28bad6d1 2285 case MVZ:
28bad6d1 2286 return "mvzw %1,%0";
1cbae84f
PB
2287 case MVS:
2288 return "mvsw %1,%0";
a0a7fbc9 2289 case MOVQ:
0ce6f9fb 2290 return "moveq %1,%0";
a0a7fbc9 2291 case NOTB:
66e07510 2292 CC_STATUS_INIT;
1d8eaa6b 2293 operands[1] = GEN_INT (i ^ 0xff);
0ce6f9fb 2294 return "moveq %1,%0\n\tnot%.b %0";
a0a7fbc9 2295 case NOTW:
66e07510 2296 CC_STATUS_INIT;
1d8eaa6b 2297 operands[1] = GEN_INT (i ^ 0xffff);
0ce6f9fb 2298 return "moveq %1,%0\n\tnot%.w %0";
a0a7fbc9 2299 case NEGW:
66e07510 2300 CC_STATUS_INIT;
3b4b85c9 2301 return "moveq #-128,%0\n\tneg%.w %0";
a0a7fbc9 2302 case SWAP:
0ce6f9fb
RK
2303 {
2304 unsigned u = i;
2305
1d8eaa6b 2306 operands[1] = GEN_INT ((u << 16) | (u >> 16));
0ce6f9fb 2307 return "moveq %1,%0\n\tswap %0";
0ce6f9fb 2308 }
a0a7fbc9 2309 case MOVL:
bda2a571 2310 return "move%.l %1,%0";
a0a7fbc9 2311 default:
bda2a571 2312 gcc_unreachable ();
0ce6f9fb
RK
2313 }
2314}
2315
bda2a571 2316/* Return true if I can be handled by ISA B's mov3q instruction. */
5e04daf3 2317
bda2a571
RS
2318bool
2319valid_mov3q_const (HOST_WIDE_INT i)
2320{
2321 return TARGET_ISAB && (i == -1 || IN_RANGE (i, 1, 7));
5e04daf3
PB
2322}
2323
bda2a571
RS
2324/* Return an instruction to move CONST_INT OPERANDS[1] into OPERANDS[0].
2325 I is the value of OPERANDS[1]. */
5e04daf3 2326
bda2a571 2327static const char *
8a4a2253 2328output_move_simode_const (rtx *operands)
02ed0c07 2329{
bda2a571
RS
2330 rtx dest;
2331 HOST_WIDE_INT src;
2332
2333 dest = operands[0];
2334 src = INTVAL (operands[1]);
2335 if (src == 0
2336 && (DATA_REG_P (dest) || MEM_P (dest))
3197c489
RS
2337 /* clr insns on 68000 read before writing. */
2338 && ((TARGET_68010 || TARGET_COLDFIRE)
bda2a571 2339 || !(MEM_P (dest) && MEM_VOLATILE_P (dest))))
02ed0c07 2340 return "clr%.l %0";
bda2a571 2341 else if (GET_MODE (dest) == SImode && valid_mov3q_const (src))
a0a7fbc9 2342 return "mov3q%.l %1,%0";
bda2a571 2343 else if (src == 0 && ADDRESS_REG_P (dest))
38198304 2344 return "sub%.l %0,%0";
bda2a571 2345 else if (DATA_REG_P (dest))
02ed0c07 2346 return output_move_const_into_data_reg (operands);
bda2a571 2347 else if (ADDRESS_REG_P (dest) && IN_RANGE (src, -0x8000, 0x7fff))
5e04daf3 2348 {
bda2a571 2349 if (valid_mov3q_const (src))
5e04daf3
PB
2350 return "mov3q%.l %1,%0";
2351 return "move%.w %1,%0";
2352 }
bda2a571
RS
2353 else if (MEM_P (dest)
2354 && GET_CODE (XEXP (dest, 0)) == PRE_DEC
2355 && REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
2356 && IN_RANGE (src, -0x8000, 0x7fff))
5e04daf3 2357 {
bda2a571 2358 if (valid_mov3q_const (src))
5e04daf3
PB
2359 return "mov3q%.l %1,%-";
2360 return "pea %a1";
2361 }
02ed0c07
RK
2362 return "move%.l %1,%0";
2363}
2364
5505f548 2365const char *
8a4a2253 2366output_move_simode (rtx *operands)
f4e80198
RK
2367{
2368 if (GET_CODE (operands[1]) == CONST_INT)
2369 return output_move_simode_const (operands);
2370 else if ((GET_CODE (operands[1]) == SYMBOL_REF
2371 || GET_CODE (operands[1]) == CONST)
2372 && push_operand (operands[0], SImode))
2373 return "pea %a1";
2374 else if ((GET_CODE (operands[1]) == SYMBOL_REF
2375 || GET_CODE (operands[1]) == CONST)
2376 && ADDRESS_REG_P (operands[0]))
2377 return "lea %a1,%0";
2378 return "move%.l %1,%0";
2379}
2380
5505f548 2381const char *
8a4a2253 2382output_move_himode (rtx *operands)
f4e80198
RK
2383{
2384 if (GET_CODE (operands[1]) == CONST_INT)
2385 {
2386 if (operands[1] == const0_rtx
2387 && (DATA_REG_P (operands[0])
2388 || GET_CODE (operands[0]) == MEM)
3197c489
RS
2389 /* clr insns on 68000 read before writing. */
2390 && ((TARGET_68010 || TARGET_COLDFIRE)
f4e80198
RK
2391 || !(GET_CODE (operands[0]) == MEM
2392 && MEM_VOLATILE_P (operands[0]))))
2393 return "clr%.w %0";
38198304
AS
2394 else if (operands[1] == const0_rtx
2395 && ADDRESS_REG_P (operands[0]))
2396 return "sub%.l %0,%0";
f4e80198
RK
2397 else if (DATA_REG_P (operands[0])
2398 && INTVAL (operands[1]) < 128
2399 && INTVAL (operands[1]) >= -128)
a0a7fbc9 2400 return "moveq %1,%0";
f4e80198
RK
2401 else if (INTVAL (operands[1]) < 0x8000
2402 && INTVAL (operands[1]) >= -0x8000)
2403 return "move%.w %1,%0";
2404 }
2405 else if (CONSTANT_P (operands[1]))
2406 return "move%.l %1,%0";
f4e80198
RK
2407 return "move%.w %1,%0";
2408}
2409
5505f548 2410const char *
8a4a2253 2411output_move_qimode (rtx *operands)
f4e80198 2412{
102701ff 2413 /* 68k family always modifies the stack pointer by at least 2, even for
c16eadc7 2414 byte pushes. The 5200 (ColdFire) does not do this. */
4761e388 2415
a0a7fbc9 2416 /* This case is generated by pushqi1 pattern now. */
4761e388
NS
2417 gcc_assert (!(GET_CODE (operands[0]) == MEM
2418 && GET_CODE (XEXP (operands[0], 0)) == PRE_DEC
2419 && XEXP (XEXP (operands[0], 0), 0) == stack_pointer_rtx
2420 && ! ADDRESS_REG_P (operands[1])
2421 && ! TARGET_COLDFIRE));
f4e80198 2422
3197c489 2423 /* clr and st insns on 68000 read before writing. */
f4e80198 2424 if (!ADDRESS_REG_P (operands[0])
3197c489 2425 && ((TARGET_68010 || TARGET_COLDFIRE)
f4e80198
RK
2426 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
2427 {
2428 if (operands[1] == const0_rtx)
2429 return "clr%.b %0";
9425fb04 2430 if ((!TARGET_COLDFIRE || DATA_REG_P (operands[0]))
f4e80198
RK
2431 && GET_CODE (operands[1]) == CONST_INT
2432 && (INTVAL (operands[1]) & 255) == 255)
2433 {
2434 CC_STATUS_INIT;
2435 return "st %0";
2436 }
2437 }
2438 if (GET_CODE (operands[1]) == CONST_INT
2439 && DATA_REG_P (operands[0])
2440 && INTVAL (operands[1]) < 128
2441 && INTVAL (operands[1]) >= -128)
a0a7fbc9 2442 return "moveq %1,%0";
38198304
AS
2443 if (operands[1] == const0_rtx && ADDRESS_REG_P (operands[0]))
2444 return "sub%.l %0,%0";
f4e80198
RK
2445 if (GET_CODE (operands[1]) != CONST_INT && CONSTANT_P (operands[1]))
2446 return "move%.l %1,%0";
c16eadc7 2447 /* 68k family (including the 5200 ColdFire) does not support byte moves to
37834fc8
JL
2448 from address registers. */
2449 if (ADDRESS_REG_P (operands[0]) || ADDRESS_REG_P (operands[1]))
f4e80198
RK
2450 return "move%.w %1,%0";
2451 return "move%.b %1,%0";
2452}
2453
5505f548 2454const char *
8a4a2253 2455output_move_stricthi (rtx *operands)
9b55bf04
RK
2456{
2457 if (operands[1] == const0_rtx
3197c489
RS
2458 /* clr insns on 68000 read before writing. */
2459 && ((TARGET_68010 || TARGET_COLDFIRE)
9b55bf04
RK
2460 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
2461 return "clr%.w %0";
2462 return "move%.w %1,%0";
2463}
2464
5505f548 2465const char *
8a4a2253 2466output_move_strictqi (rtx *operands)
9b55bf04
RK
2467{
2468 if (operands[1] == const0_rtx
3197c489
RS
2469 /* clr insns on 68000 read before writing. */
2470 && ((TARGET_68010 || TARGET_COLDFIRE)
9b55bf04
RK
2471 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
2472 return "clr%.b %0";
2473 return "move%.b %1,%0";
2474}
2475
79e68feb
RS
2476/* Return the best assembler insn template
2477 for moving operands[1] into operands[0] as a fullword. */
2478
5505f548 2479static const char *
8a4a2253 2480singlemove_string (rtx *operands)
79e68feb 2481{
02ed0c07
RK
2482 if (GET_CODE (operands[1]) == CONST_INT)
2483 return output_move_simode_const (operands);
2484 return "move%.l %1,%0";
79e68feb
RS
2485}
2486
2505bc97 2487
c47b0cb4
MK
2488/* Output assembler or rtl code to perform a doubleword move insn
2489 with operands OPERANDS.
2490 Pointers to 3 helper functions should be specified:
2491 HANDLE_REG_ADJUST to adjust a register by a small value,
2492 HANDLE_COMPADR to compute an address and
2493 HANDLE_MOVSI to move 4 bytes. */
79e68feb 2494
c47b0cb4
MK
2495static void
2496handle_move_double (rtx operands[2],
2497 void (*handle_reg_adjust) (rtx, int),
2498 void (*handle_compadr) (rtx [2]),
2499 void (*handle_movsi) (rtx [2]))
79e68feb 2500{
2505bc97
RS
2501 enum
2502 {
2503 REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP
2504 } optype0, optype1;
79e68feb 2505 rtx latehalf[2];
2505bc97 2506 rtx middlehalf[2];
7f98eeb6 2507 rtx xops[2];
79e68feb 2508 rtx addreg0 = 0, addreg1 = 0;
7f98eeb6 2509 int dest_overlapped_low = 0;
184916bc 2510 int size = GET_MODE_SIZE (GET_MODE (operands[0]));
2505bc97
RS
2511
2512 middlehalf[0] = 0;
2513 middlehalf[1] = 0;
79e68feb
RS
2514
2515 /* First classify both operands. */
2516
2517 if (REG_P (operands[0]))
2518 optype0 = REGOP;
2519 else if (offsettable_memref_p (operands[0]))
2520 optype0 = OFFSOP;
2521 else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
2522 optype0 = POPOP;
2523 else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
2524 optype0 = PUSHOP;
2525 else if (GET_CODE (operands[0]) == MEM)
2526 optype0 = MEMOP;
2527 else
2528 optype0 = RNDOP;
2529
2530 if (REG_P (operands[1]))
2531 optype1 = REGOP;
2532 else if (CONSTANT_P (operands[1]))
2533 optype1 = CNSTOP;
2534 else if (offsettable_memref_p (operands[1]))
2535 optype1 = OFFSOP;
2536 else if (GET_CODE (XEXP (operands[1], 0)) == POST_INC)
2537 optype1 = POPOP;
2538 else if (GET_CODE (XEXP (operands[1], 0)) == PRE_DEC)
2539 optype1 = PUSHOP;
2540 else if (GET_CODE (operands[1]) == MEM)
2541 optype1 = MEMOP;
2542 else
2543 optype1 = RNDOP;
2544
4761e388
NS
2545 /* Check for the cases that the operand constraints are not supposed
2546 to allow to happen. Generating code for these cases is
2547 painful. */
2548 gcc_assert (optype0 != RNDOP && optype1 != RNDOP);
79e68feb
RS
2549
2550 /* If one operand is decrementing and one is incrementing
2551 decrement the former register explicitly
2552 and change that operand into ordinary indexing. */
2553
2554 if (optype0 == PUSHOP && optype1 == POPOP)
2555 {
2556 operands[0] = XEXP (XEXP (operands[0], 0), 0);
c47b0cb4
MK
2557
2558 handle_reg_adjust (operands[0], -size);
2559
2505bc97 2560 if (GET_MODE (operands[1]) == XFmode)
1d8eaa6b 2561 operands[0] = gen_rtx_MEM (XFmode, operands[0]);
2505bc97 2562 else if (GET_MODE (operands[0]) == DFmode)
1d8eaa6b 2563 operands[0] = gen_rtx_MEM (DFmode, operands[0]);
2505bc97 2564 else
1d8eaa6b 2565 operands[0] = gen_rtx_MEM (DImode, operands[0]);
79e68feb
RS
2566 optype0 = OFFSOP;
2567 }
2568 if (optype0 == POPOP && optype1 == PUSHOP)
2569 {
2570 operands[1] = XEXP (XEXP (operands[1], 0), 0);
c47b0cb4
MK
2571
2572 handle_reg_adjust (operands[1], -size);
2573
2505bc97 2574 if (GET_MODE (operands[1]) == XFmode)
1d8eaa6b 2575 operands[1] = gen_rtx_MEM (XFmode, operands[1]);
2505bc97 2576 else if (GET_MODE (operands[1]) == DFmode)
1d8eaa6b 2577 operands[1] = gen_rtx_MEM (DFmode, operands[1]);
2505bc97 2578 else
1d8eaa6b 2579 operands[1] = gen_rtx_MEM (DImode, operands[1]);
79e68feb
RS
2580 optype1 = OFFSOP;
2581 }
2582
2583 /* If an operand is an unoffsettable memory ref, find a register
2584 we can increment temporarily to make it refer to the second word. */
2585
2586 if (optype0 == MEMOP)
2587 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2588
2589 if (optype1 == MEMOP)
2590 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2591
2592 /* Ok, we can do one word at a time.
2593 Normally we do the low-numbered word first,
2594 but if either operand is autodecrementing then we
2595 do the high-numbered word first.
2596
2597 In either case, set up in LATEHALF the operands to use
2598 for the high-numbered word and in some cases alter the
2599 operands in OPERANDS to be suitable for the low-numbered word. */
2600
2505bc97
RS
2601 if (size == 12)
2602 {
2603 if (optype0 == REGOP)
2604 {
1d8eaa6b
AS
2605 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 2);
2606 middlehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2505bc97
RS
2607 }
2608 else if (optype0 == OFFSOP)
2609 {
b72f00af
RK
2610 middlehalf[0] = adjust_address (operands[0], SImode, 4);
2611 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
2505bc97
RS
2612 }
2613 else
2614 {
c47b0cb4
MK
2615 middlehalf[0] = adjust_address (operands[0], SImode, 0);
2616 latehalf[0] = adjust_address (operands[0], SImode, 0);
2505bc97
RS
2617 }
2618
2619 if (optype1 == REGOP)
2620 {
1d8eaa6b
AS
2621 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2);
2622 middlehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2505bc97
RS
2623 }
2624 else if (optype1 == OFFSOP)
2625 {
b72f00af
RK
2626 middlehalf[1] = adjust_address (operands[1], SImode, 4);
2627 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
2505bc97
RS
2628 }
2629 else if (optype1 == CNSTOP)
2630 {
2631 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2632 {
2633 REAL_VALUE_TYPE r;
2634 long l[3];
2635
2636 REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
2637 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
2638 operands[1] = GEN_INT (l[0]);
2639 middlehalf[1] = GEN_INT (l[1]);
2640 latehalf[1] = GEN_INT (l[2]);
2641 }
4761e388 2642 else
2505bc97 2643 {
4761e388
NS
2644 /* No non-CONST_DOUBLE constant should ever appear
2645 here. */
2646 gcc_assert (!CONSTANT_P (operands[1]));
2505bc97
RS
2647 }
2648 }
2649 else
2650 {
c47b0cb4
MK
2651 middlehalf[1] = adjust_address (operands[1], SImode, 0);
2652 latehalf[1] = adjust_address (operands[1], SImode, 0);
2505bc97
RS
2653 }
2654 }
79e68feb 2655 else
2505bc97
RS
2656 /* size is not 12: */
2657 {
2658 if (optype0 == REGOP)
1d8eaa6b 2659 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2505bc97 2660 else if (optype0 == OFFSOP)
b72f00af 2661 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
2505bc97 2662 else
c47b0cb4 2663 latehalf[0] = adjust_address (operands[0], SImode, 0);
2505bc97
RS
2664
2665 if (optype1 == REGOP)
1d8eaa6b 2666 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2505bc97 2667 else if (optype1 == OFFSOP)
b72f00af 2668 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
2505bc97
RS
2669 else if (optype1 == CNSTOP)
2670 split_double (operands[1], &operands[1], &latehalf[1]);
2671 else
c47b0cb4 2672 latehalf[1] = adjust_address (operands[1], SImode, 0);
2505bc97 2673 }
79e68feb
RS
2674
2675 /* If insn is effectively movd N(sp),-(sp) then we will do the
2676 high word first. We should use the adjusted operand 1 (which is N+4(sp))
2677 for the low word as well, to compensate for the first decrement of sp. */
2678 if (optype0 == PUSHOP
2679 && REGNO (XEXP (XEXP (operands[0], 0), 0)) == STACK_POINTER_REGNUM
2680 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
c88aeaf8 2681 operands[1] = middlehalf[1] = latehalf[1];
79e68feb 2682
7f98eeb6
RS
2683 /* For (set (reg:DI N) (mem:DI ... (reg:SI N) ...)),
2684 if the upper part of reg N does not appear in the MEM, arrange to
2685 emit the move late-half first. Otherwise, compute the MEM address
2686 into the upper part of N and use that as a pointer to the memory
2687 operand. */
2688 if (optype0 == REGOP
2689 && (optype1 == OFFSOP || optype1 == MEMOP))
2690 {
1d8eaa6b 2691 rtx testlow = gen_rtx_REG (SImode, REGNO (operands[0]));
3a58400f
RS
2692
2693 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
d7e8d581 2694 && reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
7f98eeb6
RS
2695 {
2696 /* If both halves of dest are used in the src memory address,
3a58400f
RS
2697 compute the address into latehalf of dest.
2698 Note that this can't happen if the dest is two data regs. */
4761e388 2699 compadr:
7f98eeb6
RS
2700 xops[0] = latehalf[0];
2701 xops[1] = XEXP (operands[1], 0);
c47b0cb4
MK
2702
2703 handle_compadr (xops);
2704 if (GET_MODE (operands[1]) == XFmode)
7f98eeb6 2705 {
1d8eaa6b 2706 operands[1] = gen_rtx_MEM (XFmode, latehalf[0]);
b72f00af
RK
2707 middlehalf[1] = adjust_address (operands[1], DImode, size - 8);
2708 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
7f98eeb6
RS
2709 }
2710 else
2711 {
1d8eaa6b 2712 operands[1] = gen_rtx_MEM (DImode, latehalf[0]);
b72f00af 2713 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
7f98eeb6
RS
2714 }
2715 }
2716 else if (size == 12
d7e8d581
RS
2717 && reg_overlap_mentioned_p (middlehalf[0],
2718 XEXP (operands[1], 0)))
7f98eeb6 2719 {
3a58400f
RS
2720 /* Check for two regs used by both source and dest.
2721 Note that this can't happen if the dest is all data regs.
2722 It can happen if the dest is d6, d7, a0.
2723 But in that case, latehalf is an addr reg, so
2724 the code at compadr does ok. */
2725
2726 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
d7e8d581
RS
2727 || reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
2728 goto compadr;
7f98eeb6
RS
2729
2730 /* JRV says this can't happen: */
4761e388 2731 gcc_assert (!addreg0 && !addreg1);
7f98eeb6 2732
7a1929e1 2733 /* Only the middle reg conflicts; simply put it last. */
c47b0cb4
MK
2734 handle_movsi (operands);
2735 handle_movsi (latehalf);
2736 handle_movsi (middlehalf);
2737
2738 return;
7f98eeb6 2739 }
2fb8a81d 2740 else if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0)))
7f98eeb6
RS
2741 /* If the low half of dest is mentioned in the source memory
2742 address, the arrange to emit the move late half first. */
2743 dest_overlapped_low = 1;
2744 }
2745
79e68feb
RS
2746 /* If one or both operands autodecrementing,
2747 do the two words, high-numbered first. */
2748
2749 /* Likewise, the first move would clobber the source of the second one,
2750 do them in the other order. This happens only for registers;
2751 such overlap can't happen in memory unless the user explicitly
2752 sets it up, and that is an undefined circumstance. */
2753
2754 if (optype0 == PUSHOP || optype1 == PUSHOP
2755 || (optype0 == REGOP && optype1 == REGOP
2505bc97 2756 && ((middlehalf[1] && REGNO (operands[0]) == REGNO (middlehalf[1]))
7f98eeb6
RS
2757 || REGNO (operands[0]) == REGNO (latehalf[1])))
2758 || dest_overlapped_low)
79e68feb
RS
2759 {
2760 /* Make any unoffsettable addresses point at high-numbered word. */
2761 if (addreg0)
c47b0cb4 2762 handle_reg_adjust (addreg0, size - 4);
79e68feb 2763 if (addreg1)
c47b0cb4 2764 handle_reg_adjust (addreg1, size - 4);
79e68feb
RS
2765
2766 /* Do that word. */
c47b0cb4 2767 handle_movsi (latehalf);
79e68feb
RS
2768
2769 /* Undo the adds we just did. */
2770 if (addreg0)
c47b0cb4 2771 handle_reg_adjust (addreg0, -4);
79e68feb 2772 if (addreg1)
c47b0cb4 2773 handle_reg_adjust (addreg1, -4);
79e68feb 2774
2505bc97
RS
2775 if (size == 12)
2776 {
c47b0cb4
MK
2777 handle_movsi (middlehalf);
2778
2505bc97 2779 if (addreg0)
c47b0cb4 2780 handle_reg_adjust (addreg0, -4);
2505bc97 2781 if (addreg1)
c47b0cb4 2782 handle_reg_adjust (addreg1, -4);
2505bc97
RS
2783 }
2784
79e68feb 2785 /* Do low-numbered word. */
c47b0cb4
MK
2786
2787 handle_movsi (operands);
2788 return;
79e68feb
RS
2789 }
2790
2791 /* Normal case: do the two words, low-numbered first. */
2792
c47b0cb4 2793 handle_movsi (operands);
79e68feb 2794
2505bc97
RS
2795 /* Do the middle one of the three words for long double */
2796 if (size == 12)
2797 {
2798 if (addreg0)
c47b0cb4 2799 handle_reg_adjust (addreg0, 4);
2505bc97 2800 if (addreg1)
c47b0cb4 2801 handle_reg_adjust (addreg1, 4);
2505bc97 2802
c47b0cb4 2803 handle_movsi (middlehalf);
2505bc97
RS
2804 }
2805
79e68feb
RS
2806 /* Make any unoffsettable addresses point at high-numbered word. */
2807 if (addreg0)
c47b0cb4 2808 handle_reg_adjust (addreg0, 4);
79e68feb 2809 if (addreg1)
c47b0cb4 2810 handle_reg_adjust (addreg1, 4);
79e68feb
RS
2811
2812 /* Do that word. */
c47b0cb4 2813 handle_movsi (latehalf);
79e68feb
RS
2814
2815 /* Undo the adds we just did. */
2816 if (addreg0)
c47b0cb4
MK
2817 handle_reg_adjust (addreg0, -(size - 4));
2818 if (addreg1)
2819 handle_reg_adjust (addreg1, -(size - 4));
2820
2821 return;
2822}
2823
2824/* Output assembler code to adjust REG by N. */
2825static void
2826output_reg_adjust (rtx reg, int n)
2827{
2828 const char *s;
2829
2830 gcc_assert (GET_MODE (reg) == SImode
2831 && -12 <= n && n != 0 && n <= 12);
2832
2833 switch (n)
2505bc97 2834 {
c47b0cb4
MK
2835 case 12:
2836 s = "add%.l #12,%0";
2837 break;
2838
2839 case 8:
2840 s = "addq%.l #8,%0";
2841 break;
2842
2843 case 4:
2844 s = "addq%.l #4,%0";
2845 break;
2846
2847 case -12:
2848 s = "sub%.l #12,%0";
2849 break;
2850
2851 case -8:
2852 s = "subq%.l #8,%0";
2853 break;
2854
2855 case -4:
2856 s = "subq%.l #4,%0";
2857 break;
2858
2859 default:
2860 gcc_unreachable ();
2861 s = NULL;
2505bc97 2862 }
c47b0cb4
MK
2863
2864 output_asm_insn (s, &reg);
2865}
2866
2867/* Emit rtl code to adjust REG by N. */
2868static void
2869emit_reg_adjust (rtx reg1, int n)
2870{
2871 rtx reg2;
2872
2873 gcc_assert (GET_MODE (reg1) == SImode
2874 && -12 <= n && n != 0 && n <= 12);
2875
2876 reg1 = copy_rtx (reg1);
2877 reg2 = copy_rtx (reg1);
2878
2879 if (n < 0)
2880 emit_insn (gen_subsi3 (reg1, reg2, GEN_INT (-n)));
2881 else if (n > 0)
2882 emit_insn (gen_addsi3 (reg1, reg2, GEN_INT (n)));
2883 else
2884 gcc_unreachable ();
2885}
2886
2887/* Output assembler to load address OPERANDS[0] to register OPERANDS[1]. */
2888static void
2889output_compadr (rtx operands[2])
2890{
2891 output_asm_insn ("lea %a1,%0", operands);
2892}
2893
2894/* Output the best assembler insn for moving operands[1] into operands[0]
2895 as a fullword. */
2896static void
2897output_movsi (rtx operands[2])
2898{
2899 output_asm_insn (singlemove_string (operands), operands);
2900}
2901
2902/* Copy OP and change its mode to MODE. */
2903static rtx
2904copy_operand (rtx op, enum machine_mode mode)
2905{
2906 /* ??? This looks really ugly. There must be a better way
2907 to change a mode on the operand. */
2908 if (GET_MODE (op) != VOIDmode)
2505bc97 2909 {
c47b0cb4
MK
2910 if (REG_P (op))
2911 op = gen_rtx_REG (mode, REGNO (op));
2505bc97 2912 else
c47b0cb4
MK
2913 {
2914 op = copy_rtx (op);
2915 PUT_MODE (op, mode);
2916 }
2505bc97 2917 }
79e68feb 2918
c47b0cb4
MK
2919 return op;
2920}
2921
2922/* Emit rtl code for moving operands[1] into operands[0] as a fullword. */
2923static void
2924emit_movsi (rtx operands[2])
2925{
2926 operands[0] = copy_operand (operands[0], SImode);
2927 operands[1] = copy_operand (operands[1], SImode);
2928
2929 emit_insn (gen_movsi (operands[0], operands[1]));
2930}
2931
2932/* Output assembler code to perform a doubleword move insn
2933 with operands OPERANDS. */
2934const char *
2935output_move_double (rtx *operands)
2936{
2937 handle_move_double (operands,
2938 output_reg_adjust, output_compadr, output_movsi);
2939
79e68feb
RS
2940 return "";
2941}
2942
c47b0cb4
MK
2943/* Output rtl code to perform a doubleword move insn
2944 with operands OPERANDS. */
2945void
2946m68k_emit_move_double (rtx operands[2])
2947{
2948 handle_move_double (operands, emit_reg_adjust, emit_movsi, emit_movsi);
2949}
dcc21c4c
PB
2950
2951/* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
2952 new rtx with the correct mode. */
2953
2954static rtx
2955force_mode (enum machine_mode mode, rtx orig)
2956{
2957 if (mode == GET_MODE (orig))
2958 return orig;
2959
2960 if (REGNO (orig) >= FIRST_PSEUDO_REGISTER)
2961 abort ();
2962
2963 return gen_rtx_REG (mode, REGNO (orig));
2964}
2965
2966static int
2967fp_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2968{
2969 return reg_renumber && FP_REG_P (op);
2970}
2971
2972/* Emit insns to move operands[1] into operands[0].
2973
2974 Return 1 if we have written out everything that needs to be done to
2975 do the move. Otherwise, return 0 and the caller will emit the move
2976 normally.
2977
2978 Note SCRATCH_REG may not be in the proper mode depending on how it
c0220ea4 2979 will be used. This routine is responsible for creating a new copy
dcc21c4c
PB
2980 of SCRATCH_REG in the proper mode. */
2981
2982int
2983emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
2984{
2985 register rtx operand0 = operands[0];
2986 register rtx operand1 = operands[1];
2987 register rtx tem;
2988
2989 if (scratch_reg
2990 && reload_in_progress && GET_CODE (operand0) == REG
2991 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
2992 operand0 = reg_equiv_mem[REGNO (operand0)];
2993 else if (scratch_reg
2994 && reload_in_progress && GET_CODE (operand0) == SUBREG
2995 && GET_CODE (SUBREG_REG (operand0)) == REG
2996 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
2997 {
2998 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
2999 the code which tracks sets/uses for delete_output_reload. */
3000 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
3001 reg_equiv_mem [REGNO (SUBREG_REG (operand0))],
3002 SUBREG_BYTE (operand0));
3003 operand0 = alter_subreg (&temp);
3004 }
3005
3006 if (scratch_reg
3007 && reload_in_progress && GET_CODE (operand1) == REG
3008 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
3009 operand1 = reg_equiv_mem[REGNO (operand1)];
3010 else if (scratch_reg
3011 && reload_in_progress && GET_CODE (operand1) == SUBREG
3012 && GET_CODE (SUBREG_REG (operand1)) == REG
3013 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
3014 {
3015 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3016 the code which tracks sets/uses for delete_output_reload. */
3017 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
3018 reg_equiv_mem [REGNO (SUBREG_REG (operand1))],
3019 SUBREG_BYTE (operand1));
3020 operand1 = alter_subreg (&temp);
3021 }
3022
3023 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
3024 && ((tem = find_replacement (&XEXP (operand0, 0)))
3025 != XEXP (operand0, 0)))
3026 operand0 = gen_rtx_MEM (GET_MODE (operand0), tem);
3027 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
3028 && ((tem = find_replacement (&XEXP (operand1, 0)))
3029 != XEXP (operand1, 0)))
3030 operand1 = gen_rtx_MEM (GET_MODE (operand1), tem);
3031
3032 /* Handle secondary reloads for loads/stores of FP registers where
3033 the address is symbolic by using the scratch register */
3034 if (fp_reg_operand (operand0, mode)
3035 && ((GET_CODE (operand1) == MEM
3036 && ! memory_address_p (DFmode, XEXP (operand1, 0)))
3037 || ((GET_CODE (operand1) == SUBREG
3038 && GET_CODE (XEXP (operand1, 0)) == MEM
3039 && !memory_address_p (DFmode, XEXP (XEXP (operand1, 0), 0)))))
3040 && scratch_reg)
3041 {
3042 if (GET_CODE (operand1) == SUBREG)
3043 operand1 = XEXP (operand1, 0);
3044
3045 /* SCRATCH_REG will hold an address. We want
3046 it in SImode regardless of what mode it was originally given
3047 to us. */
3048 scratch_reg = force_mode (SImode, scratch_reg);
3049
3050 /* D might not fit in 14 bits either; for such cases load D into
3051 scratch reg. */
3052 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
3053 {
3054 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
3055 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
3056 Pmode,
3057 XEXP (XEXP (operand1, 0), 0),
3058 scratch_reg));
3059 }
3060 else
3061 emit_move_insn (scratch_reg, XEXP (operand1, 0));
3062 emit_insn (gen_rtx_SET (VOIDmode, operand0,
3063 gen_rtx_MEM (mode, scratch_reg)));
3064 return 1;
3065 }
3066 else if (fp_reg_operand (operand1, mode)
3067 && ((GET_CODE (operand0) == MEM
3068 && ! memory_address_p (DFmode, XEXP (operand0, 0)))
3069 || ((GET_CODE (operand0) == SUBREG)
3070 && GET_CODE (XEXP (operand0, 0)) == MEM
3071 && !memory_address_p (DFmode, XEXP (XEXP (operand0, 0), 0))))
3072 && scratch_reg)
3073 {
3074 if (GET_CODE (operand0) == SUBREG)
3075 operand0 = XEXP (operand0, 0);
3076
3077 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3078 it in SIMODE regardless of what mode it was originally given
3079 to us. */
3080 scratch_reg = force_mode (SImode, scratch_reg);
3081
3082 /* D might not fit in 14 bits either; for such cases load D into
3083 scratch reg. */
3084 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
3085 {
3086 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
3087 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
3088 0)),
3089 Pmode,
3090 XEXP (XEXP (operand0, 0),
3091 0),
3092 scratch_reg));
3093 }
3094 else
3095 emit_move_insn (scratch_reg, XEXP (operand0, 0));
3096 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (mode, scratch_reg),
3097 operand1));
3098 return 1;
3099 }
3100 /* Handle secondary reloads for loads of FP registers from constant
3101 expressions by forcing the constant into memory.
3102
3103 use scratch_reg to hold the address of the memory location.
3104
3105 The proper fix is to change PREFERRED_RELOAD_CLASS to return
3106 NO_REGS when presented with a const_int and an register class
3107 containing only FP registers. Doing so unfortunately creates
3108 more problems than it solves. Fix this for 2.5. */
3109 else if (fp_reg_operand (operand0, mode)
3110 && CONSTANT_P (operand1)
3111 && scratch_reg)
3112 {
3113 rtx xoperands[2];
3114
3115 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3116 it in SIMODE regardless of what mode it was originally given
3117 to us. */
3118 scratch_reg = force_mode (SImode, scratch_reg);
3119
3120 /* Force the constant into memory and put the address of the
3121 memory location into scratch_reg. */
3122 xoperands[0] = scratch_reg;
3123 xoperands[1] = XEXP (force_const_mem (mode, operand1), 0);
3124 emit_insn (gen_rtx_SET (mode, scratch_reg, xoperands[1]));
3125
3126 /* Now load the destination register. */
3127 emit_insn (gen_rtx_SET (mode, operand0,
3128 gen_rtx_MEM (mode, scratch_reg)));
3129 return 1;
3130 }
3131
3132 /* Now have insn-emit do whatever it normally does. */
3133 return 0;
3134}
3135
01e304f8
RZ
3136/* Split one or more DImode RTL references into pairs of SImode
3137 references. The RTL can be REG, offsettable MEM, integer constant, or
3138 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
3139 split and "num" is its length. lo_half and hi_half are output arrays
3140 that parallel "operands". */
3141
3142void
3143split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
3144{
3145 while (num--)
3146 {
3147 rtx op = operands[num];
3148
3149 /* simplify_subreg refuses to split volatile memory addresses,
3150 but we still have to handle it. */
3151 if (GET_CODE (op) == MEM)
3152 {
3153 lo_half[num] = adjust_address (op, SImode, 4);
3154 hi_half[num] = adjust_address (op, SImode, 0);
3155 }
3156 else
3157 {
3158 lo_half[num] = simplify_gen_subreg (SImode, op,
3159 GET_MODE (op) == VOIDmode
3160 ? DImode : GET_MODE (op), 4);
3161 hi_half[num] = simplify_gen_subreg (SImode, op,
3162 GET_MODE (op) == VOIDmode
3163 ? DImode : GET_MODE (op), 0);
3164 }
3165 }
3166}
3167
a40ed0f3
KH
3168/* Split X into a base and a constant offset, storing them in *BASE
3169 and *OFFSET respectively. */
3170
3171static void
3172m68k_split_offset (rtx x, rtx *base, HOST_WIDE_INT *offset)
3173{
3174 *offset = 0;
3175 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
3176 {
3177 *offset += INTVAL (XEXP (x, 1));
3178 x = XEXP (x, 0);
3179 }
3180 *base = x;
3181}
3182
3183/* Return true if PATTERN is a PARALLEL suitable for a movem or fmovem
3184 instruction. STORE_P says whether the move is a load or store.
3185
3186 If the instruction uses post-increment or pre-decrement addressing,
3187 AUTOMOD_BASE is the base register and AUTOMOD_OFFSET is the total
3188 adjustment. This adjustment will be made by the first element of
3189 PARALLEL, with the loads or stores starting at element 1. If the
3190 instruction does not use post-increment or pre-decrement addressing,
3191 AUTOMOD_BASE is null, AUTOMOD_OFFSET is 0, and the loads or stores
3192 start at element 0. */
3193
3194bool
3195m68k_movem_pattern_p (rtx pattern, rtx automod_base,
3196 HOST_WIDE_INT automod_offset, bool store_p)
3197{
3198 rtx base, mem_base, set, mem, reg, last_reg;
3199 HOST_WIDE_INT offset, mem_offset;
3200 int i, first, len;
3201 enum reg_class rclass;
3202
3203 len = XVECLEN (pattern, 0);
3204 first = (automod_base != NULL);
3205
3206 if (automod_base)
3207 {
3208 /* Stores must be pre-decrement and loads must be post-increment. */
3209 if (store_p != (automod_offset < 0))
3210 return false;
3211
3212 /* Work out the base and offset for lowest memory location. */
3213 base = automod_base;
3214 offset = (automod_offset < 0 ? automod_offset : 0);
3215 }
3216 else
3217 {
3218 /* Allow any valid base and offset in the first access. */
3219 base = NULL;
3220 offset = 0;
3221 }
3222
3223 last_reg = NULL;
3224 rclass = NO_REGS;
3225 for (i = first; i < len; i++)
3226 {
3227 /* We need a plain SET. */
3228 set = XVECEXP (pattern, 0, i);
3229 if (GET_CODE (set) != SET)
3230 return false;
3231
3232 /* Check that we have a memory location... */
3233 mem = XEXP (set, !store_p);
3234 if (!MEM_P (mem) || !memory_operand (mem, VOIDmode))
3235 return false;
3236
3237 /* ...with the right address. */
3238 if (base == NULL)
3239 {
3240 m68k_split_offset (XEXP (mem, 0), &base, &offset);
3241 /* The ColdFire instruction only allows (An) and (d16,An) modes.
3242 There are no mode restrictions for 680x0 besides the
3243 automodification rules enforced above. */
3244 if (TARGET_COLDFIRE
3245 && !m68k_legitimate_base_reg_p (base, reload_completed))
3246 return false;
3247 }
3248 else
3249 {
3250 m68k_split_offset (XEXP (mem, 0), &mem_base, &mem_offset);
3251 if (!rtx_equal_p (base, mem_base) || offset != mem_offset)
3252 return false;
3253 }
3254
3255 /* Check that we have a register of the required mode and class. */
3256 reg = XEXP (set, store_p);
3257 if (!REG_P (reg)
3258 || !HARD_REGISTER_P (reg)
3259 || GET_MODE (reg) != reg_raw_mode[REGNO (reg)])
3260 return false;
3261
3262 if (last_reg)
3263 {
3264 /* The register must belong to RCLASS and have a higher number
3265 than the register in the previous SET. */
3266 if (!TEST_HARD_REG_BIT (reg_class_contents[rclass], REGNO (reg))
3267 || REGNO (last_reg) >= REGNO (reg))
3268 return false;
3269 }
3270 else
3271 {
3272 /* Work out which register class we need. */
3273 if (INT_REGNO_P (REGNO (reg)))
3274 rclass = GENERAL_REGS;
3275 else if (FP_REGNO_P (REGNO (reg)))
3276 rclass = FP_REGS;
3277 else
3278 return false;
3279 }
3280
3281 last_reg = reg;
3282 offset += GET_MODE_SIZE (GET_MODE (reg));
3283 }
3284
3285 /* If we have an automodification, check whether the final offset is OK. */
3286 if (automod_base && offset != (automod_offset < 0 ? 0 : automod_offset))
3287 return false;
3288
3289 /* Reject unprofitable cases. */
3290 if (len < first + (rclass == FP_REGS ? MIN_FMOVEM_REGS : MIN_MOVEM_REGS))
3291 return false;
3292
3293 return true;
3294}
3295
3296/* Return the assembly code template for a movem or fmovem instruction
3297 whose pattern is given by PATTERN. Store the template's operands
3298 in OPERANDS.
3299
3300 If the instruction uses post-increment or pre-decrement addressing,
3301 AUTOMOD_OFFSET is the total adjustment, otherwise it is 0. STORE_P
3302 is true if this is a store instruction. */
3303
3304const char *
3305m68k_output_movem (rtx *operands, rtx pattern,
3306 HOST_WIDE_INT automod_offset, bool store_p)
3307{
3308 unsigned int mask;
3309 int i, first;
3310
3311 gcc_assert (GET_CODE (pattern) == PARALLEL);
3312 mask = 0;
3313 first = (automod_offset != 0);
3314 for (i = first; i < XVECLEN (pattern, 0); i++)
3315 {
3316 /* When using movem with pre-decrement addressing, register X + D0_REG
3317 is controlled by bit 15 - X. For all other addressing modes,
3318 register X + D0_REG is controlled by bit X. Confusingly, the
3319 register mask for fmovem is in the opposite order to that for
3320 movem. */
3321 unsigned int regno;
3322
3323 gcc_assert (MEM_P (XEXP (XVECEXP (pattern, 0, i), !store_p)));
3324 gcc_assert (REG_P (XEXP (XVECEXP (pattern, 0, i), store_p)));
3325 regno = REGNO (XEXP (XVECEXP (pattern, 0, i), store_p));
3326 if (automod_offset < 0)
3327 {
3328 if (FP_REGNO_P (regno))
3329 mask |= 1 << (regno - FP0_REG);
3330 else
3331 mask |= 1 << (15 - (regno - D0_REG));
3332 }
3333 else
3334 {
3335 if (FP_REGNO_P (regno))
3336 mask |= 1 << (7 - (regno - FP0_REG));
3337 else
3338 mask |= 1 << (regno - D0_REG);
3339 }
3340 }
3341 CC_STATUS_INIT;
3342
3343 if (automod_offset == 0)
3344 operands[0] = XEXP (XEXP (XVECEXP (pattern, 0, first), !store_p), 0);
3345 else if (automod_offset < 0)
3346 operands[0] = gen_rtx_PRE_DEC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
3347 else
3348 operands[0] = gen_rtx_POST_INC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
3349 operands[1] = GEN_INT (mask);
3350 if (FP_REGNO_P (REGNO (XEXP (XVECEXP (pattern, 0, first), store_p))))
3351 {
3352 if (store_p)
1fae2d80 3353 return "fmovem %1,%a0";
a40ed0f3 3354 else
1fae2d80 3355 return "fmovem %a0,%1";
a40ed0f3
KH
3356 }
3357 else
3358 {
3359 if (store_p)
1fae2d80 3360 return "movem%.l %1,%a0";
a40ed0f3 3361 else
1fae2d80 3362 return "movem%.l %a0,%1";
a40ed0f3
KH
3363 }
3364}
3365
79e68feb
RS
3366/* Return a REG that occurs in ADDR with coefficient 1.
3367 ADDR can be effectively incremented by incrementing REG. */
3368
3369static rtx
8a4a2253 3370find_addr_reg (rtx addr)
79e68feb
RS
3371{
3372 while (GET_CODE (addr) == PLUS)
3373 {
3374 if (GET_CODE (XEXP (addr, 0)) == REG)
3375 addr = XEXP (addr, 0);
3376 else if (GET_CODE (XEXP (addr, 1)) == REG)
3377 addr = XEXP (addr, 1);
3378 else if (CONSTANT_P (XEXP (addr, 0)))
3379 addr = XEXP (addr, 1);
3380 else if (CONSTANT_P (XEXP (addr, 1)))
3381 addr = XEXP (addr, 0);
3382 else
4761e388 3383 gcc_unreachable ();
79e68feb 3384 }
4761e388
NS
3385 gcc_assert (GET_CODE (addr) == REG);
3386 return addr;
79e68feb 3387}
9ee3c687 3388
c16eadc7 3389/* Output assembler code to perform a 32-bit 3-operand add. */
9ee3c687 3390
5505f548 3391const char *
8a4a2253 3392output_addsi3 (rtx *operands)
9ee3c687
JW
3393{
3394 if (! operands_match_p (operands[0], operands[1]))
3395 {
3396 if (!ADDRESS_REG_P (operands[1]))
3397 {
3398 rtx tmp = operands[1];
3399
3400 operands[1] = operands[2];
3401 operands[2] = tmp;
3402 }
3403
3404 /* These insns can result from reloads to access
3405 stack slots over 64k from the frame pointer. */
3406 if (GET_CODE (operands[2]) == CONST_INT
218d5a87 3407 && (INTVAL (operands[2]) < -32768 || INTVAL (operands[2]) > 32767))
8c61b6c1 3408 return "move%.l %2,%0\n\tadd%.l %1,%0";
9ee3c687 3409 if (GET_CODE (operands[2]) == REG)
4b3d1177
KH
3410 return MOTOROLA ? "lea (%1,%2.l),%0" : "lea %1@(0,%2:l),%0";
3411 return MOTOROLA ? "lea (%c2,%1),%0" : "lea %1@(%c2),%0";
9ee3c687
JW
3412 }
3413 if (GET_CODE (operands[2]) == CONST_INT)
3414 {
9ee3c687
JW
3415 if (INTVAL (operands[2]) > 0
3416 && INTVAL (operands[2]) <= 8)
3417 return "addq%.l %2,%0";
3418 if (INTVAL (operands[2]) < 0
3419 && INTVAL (operands[2]) >= -8)
3420 {
c5c76735 3421 operands[2] = GEN_INT (- INTVAL (operands[2]));
9ee3c687
JW
3422 return "subq%.l %2,%0";
3423 }
3424 /* On the CPU32 it is faster to use two addql instructions to
3425 add a small integer (8 < N <= 16) to a register.
7a1929e1 3426 Likewise for subql. */
fe95f2f7 3427 if (TUNE_CPU32 && REG_P (operands[0]))
9ee3c687
JW
3428 {
3429 if (INTVAL (operands[2]) > 8
3430 && INTVAL (operands[2]) <= 16)
3431 {
1d8eaa6b 3432 operands[2] = GEN_INT (INTVAL (operands[2]) - 8);
3b4b85c9 3433 return "addq%.l #8,%0\n\taddq%.l %2,%0";
9ee3c687
JW
3434 }
3435 if (INTVAL (operands[2]) < -8
3436 && INTVAL (operands[2]) >= -16)
3437 {
c5c76735 3438 operands[2] = GEN_INT (- INTVAL (operands[2]) - 8);
3b4b85c9 3439 return "subq%.l #8,%0\n\tsubq%.l %2,%0";
9ee3c687
JW
3440 }
3441 }
9ee3c687
JW
3442 if (ADDRESS_REG_P (operands[0])
3443 && INTVAL (operands[2]) >= -0x8000
3444 && INTVAL (operands[2]) < 0x8000)
3445 {
fe95f2f7 3446 if (TUNE_68040)
9ee3c687
JW
3447 return "add%.w %2,%0";
3448 else
4b3d1177 3449 return MOTOROLA ? "lea (%c2,%0),%0" : "lea %0@(%c2),%0";
9ee3c687
JW
3450 }
3451 }
3452 return "add%.l %2,%0";
3453}
79e68feb
RS
3454\f
3455/* Store in cc_status the expressions that the condition codes will
3456 describe after execution of an instruction whose pattern is EXP.
3457 Do not alter them if the instruction would not alter the cc's. */
3458
3459/* On the 68000, all the insns to store in an address register fail to
3460 set the cc's. However, in some cases these instructions can make it
3461 possibly invalid to use the saved cc's. In those cases we clear out
3462 some or all of the saved cc's so they won't be used. */
3463
1d8eaa6b 3464void
8a4a2253 3465notice_update_cc (rtx exp, rtx insn)
79e68feb 3466{
1a8965c4 3467 if (GET_CODE (exp) == SET)
79e68feb
RS
3468 {
3469 if (GET_CODE (SET_SRC (exp)) == CALL)
a0a7fbc9 3470 CC_STATUS_INIT;
79e68feb
RS
3471 else if (ADDRESS_REG_P (SET_DEST (exp)))
3472 {
f5963e61 3473 if (cc_status.value1 && modified_in_p (cc_status.value1, insn))
79e68feb 3474 cc_status.value1 = 0;
f5963e61 3475 if (cc_status.value2 && modified_in_p (cc_status.value2, insn))
79e68feb
RS
3476 cc_status.value2 = 0;
3477 }
f6ab62e8
RS
3478 /* fmoves to memory or data registers do not set the condition
3479 codes. Normal moves _do_ set the condition codes, but not in
3480 a way that is appropriate for comparison with 0, because -0.0
3481 would be treated as a negative nonzero number. Note that it
88512ba0 3482 isn't appropriate to conditionalize this restriction on
f6ab62e8
RS
3483 HONOR_SIGNED_ZEROS because that macro merely indicates whether
3484 we care about the difference between -0.0 and +0.0. */
79e68feb
RS
3485 else if (!FP_REG_P (SET_DEST (exp))
3486 && SET_DEST (exp) != cc0_rtx
3487 && (FP_REG_P (SET_SRC (exp))
3488 || GET_CODE (SET_SRC (exp)) == FIX
f6ab62e8 3489 || FLOAT_MODE_P (GET_MODE (SET_DEST (exp)))))
a0a7fbc9 3490 CC_STATUS_INIT;
79e68feb
RS
3491 /* A pair of move insns doesn't produce a useful overall cc. */
3492 else if (!FP_REG_P (SET_DEST (exp))
3493 && !FP_REG_P (SET_SRC (exp))
3494 && GET_MODE_SIZE (GET_MODE (SET_SRC (exp))) > 4
3495 && (GET_CODE (SET_SRC (exp)) == REG
3496 || GET_CODE (SET_SRC (exp)) == MEM
3497 || GET_CODE (SET_SRC (exp)) == CONST_DOUBLE))
a0a7fbc9 3498 CC_STATUS_INIT;
e1dff52a 3499 else if (SET_DEST (exp) != pc_rtx)
79e68feb
RS
3500 {
3501 cc_status.flags = 0;
e1dff52a
KH
3502 cc_status.value1 = SET_DEST (exp);
3503 cc_status.value2 = SET_SRC (exp);
79e68feb
RS
3504 }
3505 }
3506 else if (GET_CODE (exp) == PARALLEL
3507 && GET_CODE (XVECEXP (exp, 0, 0)) == SET)
3508 {
e1dff52a
KH
3509 rtx dest = SET_DEST (XVECEXP (exp, 0, 0));
3510 rtx src = SET_SRC (XVECEXP (exp, 0, 0));
3511
3512 if (ADDRESS_REG_P (dest))
79e68feb 3513 CC_STATUS_INIT;
e1dff52a 3514 else if (dest != pc_rtx)
79e68feb
RS
3515 {
3516 cc_status.flags = 0;
e1dff52a
KH
3517 cc_status.value1 = dest;
3518 cc_status.value2 = src;
79e68feb
RS
3519 }
3520 }
3521 else
3522 CC_STATUS_INIT;
3523 if (cc_status.value2 != 0
3524 && ADDRESS_REG_P (cc_status.value2)
3525 && GET_MODE (cc_status.value2) == QImode)
3526 CC_STATUS_INIT;
1a8965c4 3527 if (cc_status.value2 != 0)
79e68feb
RS
3528 switch (GET_CODE (cc_status.value2))
3529 {
996a5f59 3530 case ASHIFT: case ASHIFTRT: case LSHIFTRT:
79e68feb 3531 case ROTATE: case ROTATERT:
a126dc3a
RH
3532 /* These instructions always clear the overflow bit, and set
3533 the carry to the bit shifted out. */
3534 /* ??? We don't currently have a way to signal carry not valid,
3535 nor do we check for it in the branch insns. */
3536 CC_STATUS_INIT;
3537 break;
3538
3539 case PLUS: case MINUS: case MULT:
3540 case DIV: case UDIV: case MOD: case UMOD: case NEG:
79e68feb
RS
3541 if (GET_MODE (cc_status.value2) != VOIDmode)
3542 cc_status.flags |= CC_NO_OVERFLOW;
3543 break;
3544 case ZERO_EXTEND:
3545 /* (SET r1 (ZERO_EXTEND r2)) on this machine
3546 ends with a move insn moving r2 in r2's mode.
3547 Thus, the cc's are set for r2.
7a1929e1 3548 This can set N bit spuriously. */
79e68feb 3549 cc_status.flags |= CC_NOT_NEGATIVE;
1d8eaa6b
AS
3550
3551 default:
3552 break;
79e68feb
RS
3553 }
3554 if (cc_status.value1 && GET_CODE (cc_status.value1) == REG
3555 && cc_status.value2
3556 && reg_overlap_mentioned_p (cc_status.value1, cc_status.value2))
3557 cc_status.value2 = 0;
3558 if (((cc_status.value1 && FP_REG_P (cc_status.value1))
1a8965c4 3559 || (cc_status.value2 && FP_REG_P (cc_status.value2))))
79e68feb 3560 cc_status.flags = CC_IN_68881;
67595cbb
RZ
3561 if (cc_status.value2 && GET_CODE (cc_status.value2) == COMPARE
3562 && GET_MODE_CLASS (GET_MODE (XEXP (cc_status.value2, 0))) == MODE_FLOAT)
3563 {
3564 cc_status.flags = CC_IN_68881;
3565 if (!FP_REG_P (XEXP (cc_status.value2, 0)))
3566 cc_status.flags |= CC_REVERSED;
3567 }
79e68feb
RS
3568}
3569\f
5505f548 3570const char *
8a4a2253 3571output_move_const_double (rtx *operands)
79e68feb 3572{
1a8965c4 3573 int code = standard_68881_constant_p (operands[1]);
79e68feb 3574
1a8965c4 3575 if (code != 0)
79e68feb 3576 {
1a8965c4 3577 static char buf[40];
79e68feb 3578
3b4b85c9 3579 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
1a8965c4 3580 return buf;
79e68feb 3581 }
1a8965c4 3582 return "fmove%.d %1,%0";
79e68feb
RS
3583}
3584
5505f548 3585const char *
8a4a2253 3586output_move_const_single (rtx *operands)
79e68feb 3587{
1a8965c4 3588 int code = standard_68881_constant_p (operands[1]);
79e68feb 3589
1a8965c4 3590 if (code != 0)
79e68feb 3591 {
1a8965c4 3592 static char buf[40];
79e68feb 3593
3b4b85c9 3594 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
1a8965c4 3595 return buf;
79e68feb 3596 }
1a8965c4 3597 return "fmove%.s %f1,%0";
79e68feb
RS
3598}
3599
3600/* Return nonzero if X, a CONST_DOUBLE, has a value that we can get
3601 from the "fmovecr" instruction.
3602 The value, anded with 0xff, gives the code to use in fmovecr
3603 to get the desired constant. */
3604
7a1929e1 3605/* This code has been fixed for cross-compilation. */
c1cfb2ae
RS
3606
3607static int inited_68881_table = 0;
3608
5505f548 3609static const char *const strings_68881[7] = {
c1cfb2ae
RS
3610 "0.0",
3611 "1.0",
3612 "10.0",
3613 "100.0",
3614 "10000.0",
3615 "1e8",
3616 "1e16"
a0a7fbc9 3617};
c1cfb2ae 3618
8b60264b 3619static const int codes_68881[7] = {
c1cfb2ae
RS
3620 0x0f,
3621 0x32,
3622 0x33,
3623 0x34,
3624 0x35,
3625 0x36,
3626 0x37
a0a7fbc9 3627};
c1cfb2ae
RS
3628
3629REAL_VALUE_TYPE values_68881[7];
3630
3631/* Set up values_68881 array by converting the decimal values
7a1929e1 3632 strings_68881 to binary. */
c1cfb2ae
RS
3633
3634void
8a4a2253 3635init_68881_table (void)
c1cfb2ae
RS
3636{
3637 int i;
3638 REAL_VALUE_TYPE r;
3639 enum machine_mode mode;
3640
16d82c3c 3641 mode = SFmode;
c1cfb2ae
RS
3642 for (i = 0; i < 7; i++)
3643 {
3644 if (i == 6)
16d82c3c 3645 mode = DFmode;
c1cfb2ae
RS
3646 r = REAL_VALUE_ATOF (strings_68881[i], mode);
3647 values_68881[i] = r;
3648 }
3649 inited_68881_table = 1;
3650}
79e68feb
RS
3651
3652int
8a4a2253 3653standard_68881_constant_p (rtx x)
79e68feb 3654{
c1cfb2ae
RS
3655 REAL_VALUE_TYPE r;
3656 int i;
79e68feb 3657
e18db50d 3658 /* fmovecr must be emulated on the 68040 and 68060, so it shouldn't be
7a1929e1 3659 used at all on those chips. */
9cf106c8 3660 if (TUNE_68040_60)
79e68feb
RS
3661 return 0;
3662
c1cfb2ae
RS
3663 if (! inited_68881_table)
3664 init_68881_table ();
3665
3666 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
3667
64c0b414
AS
3668 /* Use REAL_VALUES_IDENTICAL instead of REAL_VALUES_EQUAL so that -0.0
3669 is rejected. */
c1cfb2ae
RS
3670 for (i = 0; i < 6; i++)
3671 {
64c0b414 3672 if (REAL_VALUES_IDENTICAL (r, values_68881[i]))
c1cfb2ae
RS
3673 return (codes_68881[i]);
3674 }
3675
79e68feb
RS
3676 if (GET_MODE (x) == SFmode)
3677 return 0;
c1cfb2ae
RS
3678
3679 if (REAL_VALUES_EQUAL (r, values_68881[6]))
3680 return (codes_68881[6]);
3681
79e68feb
RS
3682 /* larger powers of ten in the constants ram are not used
3683 because they are not equal to a `double' C constant. */
3684 return 0;
3685}
3686
3687/* If X is a floating-point constant, return the logarithm of X base 2,
3688 or 0 if X is not a power of 2. */
3689
3690int
8a4a2253 3691floating_exact_log2 (rtx x)
79e68feb 3692{
c1cfb2ae 3693 REAL_VALUE_TYPE r, r1;
eaff3bf8 3694 int exp;
79e68feb 3695
c1cfb2ae 3696 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
79e68feb 3697
eaff3bf8 3698 if (REAL_VALUES_LESS (r, dconst1))
79e68feb
RS
3699 return 0;
3700
eaff3bf8 3701 exp = real_exponent (&r);
6ef9a246 3702 real_2expN (&r1, exp, DFmode);
eaff3bf8
RH
3703 if (REAL_VALUES_EQUAL (r1, r))
3704 return exp;
3705
79e68feb
RS
3706 return 0;
3707}
3708\f
79e68feb
RS
3709/* A C compound statement to output to stdio stream STREAM the
3710 assembler syntax for an instruction operand X. X is an RTL
3711 expression.
3712
3713 CODE is a value that can be used to specify one of several ways
3714 of printing the operand. It is used when identical operands
3715 must be printed differently depending on the context. CODE
3716 comes from the `%' specification that was used to request
3717 printing of the operand. If the specification was just `%DIGIT'
3718 then CODE is 0; if the specification was `%LTR DIGIT' then CODE
3719 is the ASCII code for LTR.
3720
3721 If X is a register, this macro should print the register's name.
3722 The names can be found in an array `reg_names' whose type is
3723 `char *[]'. `reg_names' is initialized from `REGISTER_NAMES'.
3724
3725 When the machine description has a specification `%PUNCT' (a `%'
3726 followed by a punctuation character), this macro is called with
3727 a null pointer for X and the punctuation character for CODE.
3728
3729 The m68k specific codes are:
3730
3731 '.' for dot needed in Motorola-style opcode names.
3732 '-' for an operand pushing on the stack:
3733 sp@-, -(sp) or -(%sp) depending on the style of syntax.
3734 '+' for an operand pushing on the stack:
3735 sp@+, (sp)+ or (%sp)+ depending on the style of syntax.
3736 '@' for a reference to the top word on the stack:
3737 sp@, (sp) or (%sp) depending on the style of syntax.
3738 '#' for an immediate operand prefix (# in MIT and Motorola syntax
5ee084df 3739 but & in SGS syntax).
79e68feb
RS
3740 '!' for the cc register (used in an `and to cc' insn).
3741 '$' for the letter `s' in an op code, but only on the 68040.
3742 '&' for the letter `d' in an op code, but only on the 68040.
2ac5f14a 3743 '/' for register prefix needed by longlong.h.
a40ed0f3 3744 '?' for m68k_library_id_string
79e68feb
RS
3745
3746 'b' for byte insn (no effect, on the Sun; this is for the ISI).
3747 'd' to force memory addressing to be absolute, not relative.
3748 'f' for float insn (print a CONST_DOUBLE as a float rather than in hex)
79e68feb
RS
3749 'x' for float insn (print a CONST_DOUBLE as a float rather than in hex),
3750 or print pair of registers as rx:ry.
29ca003a
RS
3751 'p' print an address with @PLTPC attached, but only if the operand
3752 is not locally-bound. */
79e68feb
RS
3753
3754void
8a4a2253 3755print_operand (FILE *file, rtx op, int letter)
79e68feb 3756{
79e68feb
RS
3757 if (letter == '.')
3758 {
e6d98cb0
BI
3759 if (MOTOROLA)
3760 fprintf (file, ".");
79e68feb
RS
3761 }
3762 else if (letter == '#')
e6d98cb0 3763 asm_fprintf (file, "%I");
79e68feb 3764 else if (letter == '-')
4b3d1177 3765 asm_fprintf (file, MOTOROLA ? "-(%Rsp)" : "%Rsp@-");
79e68feb 3766 else if (letter == '+')
4b3d1177 3767 asm_fprintf (file, MOTOROLA ? "(%Rsp)+" : "%Rsp@+");
79e68feb 3768 else if (letter == '@')
4b3d1177 3769 asm_fprintf (file, MOTOROLA ? "(%Rsp)" : "%Rsp@");
79e68feb 3770 else if (letter == '!')
e6d98cb0 3771 asm_fprintf (file, "%Rfpcr");
79e68feb
RS
3772 else if (letter == '$')
3773 {
b101567e 3774 if (TARGET_68040)
e6d98cb0 3775 fprintf (file, "s");
79e68feb
RS
3776 }
3777 else if (letter == '&')
3778 {
b101567e 3779 if (TARGET_68040)
e6d98cb0 3780 fprintf (file, "d");
79e68feb 3781 }
2ac5f14a 3782 else if (letter == '/')
e6d98cb0 3783 asm_fprintf (file, "%R");
a40ed0f3
KH
3784 else if (letter == '?')
3785 asm_fprintf (file, m68k_library_id_string);
29ca003a 3786 else if (letter == 'p')
2c8ec431 3787 {
29ca003a
RS
3788 output_addr_const (file, op);
3789 if (!(GET_CODE (op) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op)))
3790 fprintf (file, "@PLTPC");
2c8ec431 3791 }
79e68feb
RS
3792 else if (GET_CODE (op) == REG)
3793 {
1a8965c4
AS
3794 if (letter == 'R')
3795 /* Print out the second register name of a register pair.
3796 I.e., R (6) => 7. */
01bbf777 3797 fputs (M68K_REGNAME(REGNO (op) + 1), file);
79e68feb 3798 else
01bbf777 3799 fputs (M68K_REGNAME(REGNO (op)), file);
79e68feb
RS
3800 }
3801 else if (GET_CODE (op) == MEM)
3802 {
3803 output_address (XEXP (op, 0));
3804 if (letter == 'd' && ! TARGET_68020
3805 && CONSTANT_ADDRESS_P (XEXP (op, 0))
3806 && !(GET_CODE (XEXP (op, 0)) == CONST_INT
3807 && INTVAL (XEXP (op, 0)) < 0x8000
3808 && INTVAL (XEXP (op, 0)) >= -0x8000))
4b3d1177 3809 fprintf (file, MOTOROLA ? ".l" : ":l");
79e68feb 3810 }
79e68feb
RS
3811 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == SFmode)
3812 {
c1cfb2ae
RS
3813 REAL_VALUE_TYPE r;
3814 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
3815 ASM_OUTPUT_FLOAT_OPERAND (letter, file, r);
3816 }
3817 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == XFmode)
3818 {
3819 REAL_VALUE_TYPE r;
3820 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
3821 ASM_OUTPUT_LONG_DOUBLE_OPERAND (file, r);
79e68feb 3822 }
e2c0a924 3823 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == DFmode)
79e68feb 3824 {
c1cfb2ae
RS
3825 REAL_VALUE_TYPE r;
3826 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
3827 ASM_OUTPUT_DOUBLE_OPERAND (file, r);
79e68feb
RS
3828 }
3829 else
3830 {
2c8ec431
DL
3831 /* Use `print_operand_address' instead of `output_addr_const'
3832 to ensure that we print relevant PIC stuff. */
1f85a612 3833 asm_fprintf (file, "%I");
2c8ec431
DL
3834 if (TARGET_PCREL
3835 && (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST))
3836 print_operand_address (file, op);
3837 else
3838 output_addr_const (file, op);
79e68feb
RS
3839 }
3840}
3841
3842\f
3843/* A C compound statement to output to stdio stream STREAM the
3844 assembler syntax for an instruction operand that is a memory
3845 reference whose address is ADDR. ADDR is an RTL expression.
3846
3847 Note that this contains a kludge that knows that the only reason
3848 we have an address (plus (label_ref...) (reg...)) when not generating
3849 PIC code is in the insn before a tablejump, and we know that m68k.md
3850 generates a label LInnn: on such an insn.
3851
3852 It is possible for PIC to generate a (plus (label_ref...) (reg...))
3853 and we handle that just like we would a (plus (symbol_ref...) (reg...)).
3854
79e68feb
RS
3855 This routine is responsible for distinguishing between -fpic and -fPIC
3856 style relocations in an address. When generating -fpic code the
112cdef5
KH
3857 offset is output in word mode (e.g. movel a5@(_foo:w), a0). When generating
3858 -fPIC code the offset is output in long mode (e.g. movel a5@(_foo:l), a0) */
79e68feb
RS
3859
3860void
8a4a2253 3861print_operand_address (FILE *file, rtx addr)
79e68feb 3862{
fc2241eb
RS
3863 struct m68k_address address;
3864
3865 if (!m68k_decompose_address (QImode, addr, true, &address))
3866 gcc_unreachable ();
3867
3868 if (address.code == PRE_DEC)
4b3d1177
KH
3869 fprintf (file, MOTOROLA ? "-(%s)" : "%s@-",
3870 M68K_REGNAME (REGNO (address.base)));
fc2241eb 3871 else if (address.code == POST_INC)
4b3d1177
KH
3872 fprintf (file, MOTOROLA ? "(%s)+" : "%s@+",
3873 M68K_REGNAME (REGNO (address.base)));
fc2241eb
RS
3874 else if (!address.base && !address.index)
3875 {
3876 /* A constant address. */
3877 gcc_assert (address.offset == addr);
3878 if (GET_CODE (addr) == CONST_INT)
3879 {
3880 /* (xxx).w or (xxx).l. */
3881 if (IN_RANGE (INTVAL (addr), -0x8000, 0x7fff))
4b3d1177 3882 fprintf (file, MOTOROLA ? "%d.w" : "%d:w", (int) INTVAL (addr));
a0a7fbc9 3883 else
fc2241eb 3884 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (addr));
a0a7fbc9 3885 }
fc2241eb 3886 else if (TARGET_PCREL)
a0a7fbc9 3887 {
fc2241eb
RS
3888 /* (d16,PC) or (bd,PC,Xn) (with suppressed index register). */
3889 fputc ('(', file);
3890 output_addr_const (file, addr);
3891 asm_fprintf (file, flag_pic == 1 ? ":w,%Rpc)" : ":l,%Rpc)");
a0a7fbc9 3892 }
fc2241eb 3893 else
a0a7fbc9 3894 {
fc2241eb
RS
3895 /* (xxx).l. We need a special case for SYMBOL_REF if the symbol
3896 name ends in `.<letter>', as the last 2 characters can be
3897 mistaken as a size suffix. Put the name in parentheses. */
3898 if (GET_CODE (addr) == SYMBOL_REF
3899 && strlen (XSTR (addr, 0)) > 2
3900 && XSTR (addr, 0)[strlen (XSTR (addr, 0)) - 2] == '.')
a0a7fbc9 3901 {
fc2241eb
RS
3902 putc ('(', file);
3903 output_addr_const (file, addr);
3904 putc (')', file);
a0a7fbc9
AS
3905 }
3906 else
fc2241eb 3907 output_addr_const (file, addr);
a0a7fbc9 3908 }
fc2241eb
RS
3909 }
3910 else
3911 {
3912 int labelno;
3913
3914 /* If ADDR is a (d8,pc,Xn) address, this is the number of the
44c7bd63 3915 label being accessed, otherwise it is -1. */
fc2241eb
RS
3916 labelno = (address.offset
3917 && !address.base
3918 && GET_CODE (address.offset) == LABEL_REF
3919 ? CODE_LABEL_NUMBER (XEXP (address.offset, 0))
3920 : -1);
3921 if (MOTOROLA)
a0a7fbc9 3922 {
fc2241eb
RS
3923 /* Print the "offset(base" component. */
3924 if (labelno >= 0)
e59d83aa 3925 asm_fprintf (file, "%LL%d(%Rpc,", labelno);
fc2241eb 3926 else
a0a7fbc9 3927 {
fc2241eb 3928 if (address.offset)
a0a7fbc9 3929 {
fc2241eb
RS
3930 output_addr_const (file, address.offset);
3931 if (flag_pic && address.base == pic_offset_table_rtx)
a0a7fbc9
AS
3932 {
3933 fprintf (file, "@GOT");
fc2241eb 3934 if (flag_pic == 1 && TARGET_68020)
a0a7fbc9
AS
3935 fprintf (file, ".w");
3936 }
3937 }
fc2241eb
RS
3938 putc ('(', file);
3939 if (address.base)
3940 fputs (M68K_REGNAME (REGNO (address.base)), file);
a0a7fbc9 3941 }
fc2241eb
RS
3942 /* Print the ",index" component, if any. */
3943 if (address.index)
a0a7fbc9 3944 {
fc2241eb
RS
3945 if (address.base)
3946 putc (',', file);
3947 fprintf (file, "%s.%c",
3948 M68K_REGNAME (REGNO (address.index)),
3949 GET_MODE (address.index) == HImode ? 'w' : 'l');
3950 if (address.scale != 1)
3951 fprintf (file, "*%d", address.scale);
a0a7fbc9 3952 }
a0a7fbc9 3953 putc (')', file);
a0a7fbc9 3954 }
fc2241eb 3955 else /* !MOTOROLA */
a0a7fbc9 3956 {
fc2241eb
RS
3957 if (!address.offset && !address.index)
3958 fprintf (file, "%s@", M68K_REGNAME (REGNO (address.base)));
a0a7fbc9 3959 else
a0a7fbc9 3960 {
fc2241eb
RS
3961 /* Print the "base@(offset" component. */
3962 if (labelno >= 0)
e59d83aa 3963 asm_fprintf (file, "%Rpc@(%LL%d", labelno);
fc2241eb
RS
3964 else
3965 {
3966 if (address.base)
3967 fputs (M68K_REGNAME (REGNO (address.base)), file);
3968 fprintf (file, "@(");
3969 if (address.offset)
3970 {
3971 output_addr_const (file, address.offset);
3972 if (address.base == pic_offset_table_rtx && TARGET_68020)
3973 switch (flag_pic)
3974 {
3975 case 1:
3976 fprintf (file, ":w"); break;
3977 case 2:
3978 fprintf (file, ":l"); break;
3979 default:
3980 break;
3981 }
3982 }
3983 }
3984 /* Print the ",index" component, if any. */
3985 if (address.index)
3986 {
3987 fprintf (file, ",%s:%c",
3988 M68K_REGNAME (REGNO (address.index)),
3989 GET_MODE (address.index) == HImode ? 'w' : 'l');
3990 if (address.scale != 1)
3991 fprintf (file, ":%d", address.scale);
3992 }
a0a7fbc9
AS
3993 putc (')', file);
3994 }
a0a7fbc9 3995 }
79e68feb
RS
3996 }
3997}
af13f02d
JW
3998\f
3999/* Check for cases where a clr insns can be omitted from code using
4000 strict_low_part sets. For example, the second clrl here is not needed:
4001 clrl d0; movw a0@+,d0; use d0; clrl d0; movw a0@+; use d0; ...
4002
4003 MODE is the mode of this STRICT_LOW_PART set. FIRST_INSN is the clear
4004 insn we are checking for redundancy. TARGET is the register set by the
4005 clear insn. */
4006
8a4a2253
BI
4007bool
4008strict_low_part_peephole_ok (enum machine_mode mode, rtx first_insn,
4009 rtx target)
af13f02d 4010{
39250081 4011 rtx p = first_insn;
af13f02d 4012
39250081 4013 while ((p = PREV_INSN (p)))
af13f02d 4014 {
39250081
RZ
4015 if (NOTE_INSN_BASIC_BLOCK_P (p))
4016 return false;
4017
4018 if (NOTE_P (p))
4019 continue;
4020
af13f02d 4021 /* If it isn't an insn, then give up. */
39250081 4022 if (!INSN_P (p))
8a4a2253 4023 return false;
af13f02d
JW
4024
4025 if (reg_set_p (target, p))
4026 {
4027 rtx set = single_set (p);
4028 rtx dest;
4029
4030 /* If it isn't an easy to recognize insn, then give up. */
4031 if (! set)
8a4a2253 4032 return false;
af13f02d
JW
4033
4034 dest = SET_DEST (set);
4035
4036 /* If this sets the entire target register to zero, then our
4037 first_insn is redundant. */
4038 if (rtx_equal_p (dest, target)
4039 && SET_SRC (set) == const0_rtx)
8a4a2253 4040 return true;
af13f02d
JW
4041 else if (GET_CODE (dest) == STRICT_LOW_PART
4042 && GET_CODE (XEXP (dest, 0)) == REG
4043 && REGNO (XEXP (dest, 0)) == REGNO (target)
4044 && (GET_MODE_SIZE (GET_MODE (XEXP (dest, 0)))
4045 <= GET_MODE_SIZE (mode)))
4046 /* This is a strict low part set which modifies less than
4047 we are using, so it is safe. */
4048 ;
4049 else
8a4a2253 4050 return false;
af13f02d 4051 }
af13f02d
JW
4052 }
4053
8a4a2253 4054 return false;
af13f02d 4055}
67cd4f83 4056
2c8ec431
DL
4057/* Operand predicates for implementing asymmetric pc-relative addressing
4058 on m68k. The m68k supports pc-relative addressing (mode 7, register 2)
dab66575 4059 when used as a source operand, but not as a destination operand.
2c8ec431
DL
4060
4061 We model this by restricting the meaning of the basic predicates
4062 (general_operand, memory_operand, etc) to forbid the use of this
4063 addressing mode, and then define the following predicates that permit
4064 this addressing mode. These predicates can then be used for the
4065 source operands of the appropriate instructions.
4066
4067 n.b. While it is theoretically possible to change all machine patterns
4068 to use this addressing more where permitted by the architecture,
4069 it has only been implemented for "common" cases: SImode, HImode, and
4070 QImode operands, and only for the principle operations that would
4071 require this addressing mode: data movement and simple integer operations.
4072
4073 In parallel with these new predicates, two new constraint letters
4074 were defined: 'S' and 'T'. 'S' is the -mpcrel analog of 'm'.
4075 'T' replaces 's' in the non-pcrel case. It is a no-op in the pcrel case.
4076 In the pcrel case 's' is only valid in combination with 'a' registers.
4077 See addsi3, subsi3, cmpsi, and movsi patterns for a better understanding
4078 of how these constraints are used.
4079
4080 The use of these predicates is strictly optional, though patterns that
4081 don't will cause an extra reload register to be allocated where one
4082 was not necessary:
4083
4084 lea (abc:w,%pc),%a0 ; need to reload address
4085 moveq &1,%d1 ; since write to pc-relative space
4086 movel %d1,%a0@ ; is not allowed
4087 ...
4088 lea (abc:w,%pc),%a1 ; no need to reload address here
4089 movel %a1@,%d0 ; since "movel (abc:w,%pc),%d0" is ok
4090
4091 For more info, consult tiemann@cygnus.com.
4092
4093
4094 All of the ugliness with predicates and constraints is due to the
4095 simple fact that the m68k does not allow a pc-relative addressing
4096 mode as a destination. gcc does not distinguish between source and
4097 destination addresses. Hence, if we claim that pc-relative address
4098 modes are valid, e.g. GO_IF_LEGITIMATE_ADDRESS accepts them, then we
4099 end up with invalid code. To get around this problem, we left
4100 pc-relative modes as invalid addresses, and then added special
4101 predicates and constraints to accept them.
4102
4103 A cleaner way to handle this is to modify gcc to distinguish
4104 between source and destination addresses. We can then say that
4105 pc-relative is a valid source address but not a valid destination
4106 address, and hopefully avoid a lot of the predicate and constraint
4107 hackery. Unfortunately, this would be a pretty big change. It would
4108 be a useful change for a number of ports, but there aren't any current
4109 plans to undertake this.
4110
4111 ***************************************************************************/
4112
4113
5505f548 4114const char *
8a4a2253 4115output_andsi3 (rtx *operands)
29ae8a3c
RK
4116{
4117 int logval;
4118 if (GET_CODE (operands[2]) == CONST_INT
25c99d8f 4119 && (INTVAL (operands[2]) | 0xffff) == -1
29ae8a3c
RK
4120 && (DATA_REG_P (operands[0])
4121 || offsettable_memref_p (operands[0]))
9425fb04 4122 && !TARGET_COLDFIRE)
29ae8a3c
RK
4123 {
4124 if (GET_CODE (operands[0]) != REG)
b72f00af 4125 operands[0] = adjust_address (operands[0], HImode, 2);
1d8eaa6b 4126 operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffff);
29ae8a3c
RK
4127 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4128 CC_STATUS_INIT;
4129 if (operands[2] == const0_rtx)
4130 return "clr%.w %0";
4131 return "and%.w %2,%0";
4132 }
4133 if (GET_CODE (operands[2]) == CONST_INT
4134 && (logval = exact_log2 (~ INTVAL (operands[2]))) >= 0
4135 && (DATA_REG_P (operands[0])
4136 || offsettable_memref_p (operands[0])))
4137 {
4138 if (DATA_REG_P (operands[0]))
a0a7fbc9 4139 operands[1] = GEN_INT (logval);
29ae8a3c
RK
4140 else
4141 {
b72f00af 4142 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
1d8eaa6b 4143 operands[1] = GEN_INT (logval % 8);
29ae8a3c
RK
4144 }
4145 /* This does not set condition codes in a standard way. */
4146 CC_STATUS_INIT;
4147 return "bclr %1,%0";
4148 }
4149 return "and%.l %2,%0";
4150}
4151
5505f548 4152const char *
8a4a2253 4153output_iorsi3 (rtx *operands)
29ae8a3c
RK
4154{
4155 register int logval;
4156 if (GET_CODE (operands[2]) == CONST_INT
4157 && INTVAL (operands[2]) >> 16 == 0
4158 && (DATA_REG_P (operands[0])
4159 || offsettable_memref_p (operands[0]))
9425fb04 4160 && !TARGET_COLDFIRE)
29ae8a3c
RK
4161 {
4162 if (GET_CODE (operands[0]) != REG)
b72f00af 4163 operands[0] = adjust_address (operands[0], HImode, 2);
29ae8a3c
RK
4164 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4165 CC_STATUS_INIT;
4166 if (INTVAL (operands[2]) == 0xffff)
4167 return "mov%.w %2,%0";
4168 return "or%.w %2,%0";
4169 }
4170 if (GET_CODE (operands[2]) == CONST_INT
4171 && (logval = exact_log2 (INTVAL (operands[2]))) >= 0
4172 && (DATA_REG_P (operands[0])
4173 || offsettable_memref_p (operands[0])))
4174 {
4175 if (DATA_REG_P (operands[0]))
b72f00af 4176 operands[1] = GEN_INT (logval);
29ae8a3c
RK
4177 else
4178 {
b72f00af 4179 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
1d8eaa6b 4180 operands[1] = GEN_INT (logval % 8);
29ae8a3c
RK
4181 }
4182 CC_STATUS_INIT;
4183 return "bset %1,%0";
4184 }
4185 return "or%.l %2,%0";
4186}
4187
5505f548 4188const char *
8a4a2253 4189output_xorsi3 (rtx *operands)
29ae8a3c
RK
4190{
4191 register int logval;
4192 if (GET_CODE (operands[2]) == CONST_INT
4193 && INTVAL (operands[2]) >> 16 == 0
4194 && (offsettable_memref_p (operands[0]) || DATA_REG_P (operands[0]))
9425fb04 4195 && !TARGET_COLDFIRE)
29ae8a3c
RK
4196 {
4197 if (! DATA_REG_P (operands[0]))
b72f00af 4198 operands[0] = adjust_address (operands[0], HImode, 2);
29ae8a3c
RK
4199 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4200 CC_STATUS_INIT;
4201 if (INTVAL (operands[2]) == 0xffff)
4202 return "not%.w %0";
4203 return "eor%.w %2,%0";
4204 }
4205 if (GET_CODE (operands[2]) == CONST_INT
4206 && (logval = exact_log2 (INTVAL (operands[2]))) >= 0
4207 && (DATA_REG_P (operands[0])
4208 || offsettable_memref_p (operands[0])))
4209 {
4210 if (DATA_REG_P (operands[0]))
b72f00af 4211 operands[1] = GEN_INT (logval);
29ae8a3c
RK
4212 else
4213 {
b72f00af 4214 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
1d8eaa6b 4215 operands[1] = GEN_INT (logval % 8);
29ae8a3c
RK
4216 }
4217 CC_STATUS_INIT;
4218 return "bchg %1,%0";
4219 }
4220 return "eor%.l %2,%0";
4221}
7c262518 4222
29ca003a
RS
4223/* Return the instruction that should be used for a call to address X,
4224 which is known to be in operand 0. */
4225
4226const char *
4227output_call (rtx x)
4228{
4229 if (symbolic_operand (x, VOIDmode))
4230 return m68k_symbolic_call;
4231 else
4232 return "jsr %a0";
4233}
4234
f7e70894
RS
4235/* Likewise sibling calls. */
4236
4237const char *
4238output_sibcall (rtx x)
4239{
4240 if (symbolic_operand (x, VOIDmode))
4241 return m68k_symbolic_jump;
4242 else
4243 return "jmp %a0";
4244}
4245
45849738
BI
4246#ifdef M68K_TARGET_COFF
4247
4248/* Output assembly to switch to section NAME with attribute FLAGS. */
4249
4250static void
c18a5b6c
MM
4251m68k_coff_asm_named_section (const char *name, unsigned int flags,
4252 tree decl ATTRIBUTE_UNUSED)
45849738
BI
4253{
4254 char flagchar;
4255
4256 if (flags & SECTION_WRITE)
4257 flagchar = 'd';
4258 else
4259 flagchar = 'x';
4260
4261 fprintf (asm_out_file, "\t.section\t%s,\"%c\"\n", name, flagchar);
4262}
4263
4264#endif /* M68K_TARGET_COFF */
4265
c590b625 4266static void
8a4a2253 4267m68k_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
4ab870f5 4268 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8a4a2253 4269 tree function)
483ab821 4270{
4ab870f5
RS
4271 rtx this_slot, offset, addr, mem, insn;
4272
4273 /* Pretend to be a post-reload pass while generating rtl. */
4ab870f5 4274 reload_completed = 1;
4ab870f5
RS
4275
4276 /* The "this" pointer is stored at 4(%sp). */
4277 this_slot = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx, 4));
4278
4279 /* Add DELTA to THIS. */
4280 if (delta != 0)
5050d266 4281 {
4ab870f5
RS
4282 /* Make the offset a legitimate operand for memory addition. */
4283 offset = GEN_INT (delta);
4284 if ((delta < -8 || delta > 8)
4285 && (TARGET_COLDFIRE || USE_MOVQ (delta)))
4286 {
4287 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), offset);
4288 offset = gen_rtx_REG (Pmode, D0_REG);
4289 }
4290 emit_insn (gen_add3_insn (copy_rtx (this_slot),
4291 copy_rtx (this_slot), offset));
5050d266 4292 }
c590b625 4293
4ab870f5
RS
4294 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
4295 if (vcall_offset != 0)
4296 {
4297 /* Set the static chain register to *THIS. */
4298 emit_move_insn (static_chain_rtx, this_slot);
4299 emit_move_insn (static_chain_rtx, gen_rtx_MEM (Pmode, static_chain_rtx));
4300
4301 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
4302 addr = plus_constant (static_chain_rtx, vcall_offset);
4303 if (!m68k_legitimate_address_p (Pmode, addr, true))
4304 {
4305 emit_insn (gen_rtx_SET (VOIDmode, static_chain_rtx, addr));
4306 addr = static_chain_rtx;
4307 }
c590b625 4308
4ab870f5
RS
4309 /* Load the offset into %d0 and add it to THIS. */
4310 emit_move_insn (gen_rtx_REG (Pmode, D0_REG),
4311 gen_rtx_MEM (Pmode, addr));
4312 emit_insn (gen_add3_insn (copy_rtx (this_slot),
4313 copy_rtx (this_slot),
4314 gen_rtx_REG (Pmode, D0_REG)));
4315 }
29ca003a 4316
4ab870f5
RS
4317 /* Jump to the target function. Use a sibcall if direct jumps are
4318 allowed, otherwise load the address into a register first. */
4319 mem = DECL_RTL (function);
4320 if (!sibcall_operand (XEXP (mem, 0), VOIDmode))
4321 {
4322 gcc_assert (flag_pic);
c590b625 4323
4ab870f5
RS
4324 if (!TARGET_SEP_DATA)
4325 {
4326 /* Use the static chain register as a temporary (call-clobbered)
4327 GOT pointer for this function. We can use the static chain
4328 register because it isn't live on entry to the thunk. */
6fb5fa3c 4329 SET_REGNO (pic_offset_table_rtx, STATIC_CHAIN_REGNUM);
4ab870f5
RS
4330 emit_insn (gen_load_got (pic_offset_table_rtx));
4331 }
4332 legitimize_pic_address (XEXP (mem, 0), Pmode, static_chain_rtx);
4333 mem = replace_equiv_address (mem, static_chain_rtx);
4334 }
4335 insn = emit_call_insn (gen_sibcall (mem, const0_rtx));
4336 SIBLING_CALL_P (insn) = 1;
4337
4338 /* Run just enough of rest_of_compilation. */
4339 insn = get_insns ();
4340 split_all_insns_noflow ();
4341 final_start_function (insn, file, 1);
4342 final (insn, file, 1);
4343 final_end_function ();
4344
4345 /* Clean up the vars set above. */
4346 reload_completed = 0;
4ab870f5
RS
4347
4348 /* Restore the original PIC register. */
4349 if (flag_pic)
6fb5fa3c 4350 SET_REGNO (pic_offset_table_rtx, PIC_REG);
6b0c2336 4351 free_after_compilation (cfun);
483ab821 4352}
8636be86
KH
4353
4354/* Worker function for TARGET_STRUCT_VALUE_RTX. */
4355
4356static rtx
4357m68k_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
4358 int incoming ATTRIBUTE_UNUSED)
4359{
4360 return gen_rtx_REG (Pmode, M68K_STRUCT_VALUE_REGNUM);
4361}
cfca21cb
PB
4362
4363/* Return nonzero if register old_reg can be renamed to register new_reg. */
4364int
4365m68k_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
4366 unsigned int new_reg)
4367{
4368
4369 /* Interrupt functions can only use registers that have already been
4370 saved by the prologue, even if they would normally be
4371 call-clobbered. */
4372
a4242737
KH
4373 if ((m68k_get_function_kind (current_function_decl)
4374 == m68k_fk_interrupt_handler)
6fb5fa3c 4375 && !df_regs_ever_live_p (new_reg))
cfca21cb
PB
4376 return 0;
4377
4378 return 1;
4379}
70028b61 4380
ffa2596e
RS
4381/* Value is true if hard register REGNO can hold a value of machine-mode
4382 MODE. On the 68000, we let the cpu registers can hold any mode, but
4383 restrict the 68881 registers to floating-point modes. */
4384
70028b61
PB
4385bool
4386m68k_regno_mode_ok (int regno, enum machine_mode mode)
4387{
36e04090 4388 if (DATA_REGNO_P (regno))
70028b61 4389 {
a0a7fbc9
AS
4390 /* Data Registers, can hold aggregate if fits in. */
4391 if (regno + GET_MODE_SIZE (mode) / 4 <= 8)
4392 return true;
70028b61 4393 }
36e04090 4394 else if (ADDRESS_REGNO_P (regno))
70028b61 4395 {
a0a7fbc9
AS
4396 if (regno + GET_MODE_SIZE (mode) / 4 <= 16)
4397 return true;
70028b61 4398 }
36e04090 4399 else if (FP_REGNO_P (regno))
70028b61
PB
4400 {
4401 /* FPU registers, hold float or complex float of long double or
a0a7fbc9
AS
4402 smaller. */
4403 if ((GET_MODE_CLASS (mode) == MODE_FLOAT
4404 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
dcc21c4c 4405 && GET_MODE_UNIT_SIZE (mode) <= TARGET_FP_REG_SIZE)
a0a7fbc9 4406 return true;
70028b61
PB
4407 }
4408 return false;
4409}
dcc21c4c 4410
ffa2596e
RS
4411/* Implement SECONDARY_RELOAD_CLASS. */
4412
4413enum reg_class
4414m68k_secondary_reload_class (enum reg_class rclass,
4415 enum machine_mode mode, rtx x)
4416{
4417 int regno;
4418
4419 regno = true_regnum (x);
4420
4421 /* If one operand of a movqi is an address register, the other
4422 operand must be a general register or constant. Other types
4423 of operand must be reloaded through a data register. */
4424 if (GET_MODE_SIZE (mode) == 1
4425 && reg_classes_intersect_p (rclass, ADDR_REGS)
4426 && !(INT_REGNO_P (regno) || CONSTANT_P (x)))
4427 return DATA_REGS;
4428
4429 /* PC-relative addresses must be loaded into an address register first. */
4430 if (TARGET_PCREL
4431 && !reg_class_subset_p (rclass, ADDR_REGS)
4432 && symbolic_operand (x, VOIDmode))
4433 return ADDR_REGS;
4434
4435 return NO_REGS;
4436}
4437
4438/* Implement PREFERRED_RELOAD_CLASS. */
4439
4440enum reg_class
4441m68k_preferred_reload_class (rtx x, enum reg_class rclass)
4442{
4443 enum reg_class secondary_class;
4444
4445 /* If RCLASS might need a secondary reload, try restricting it to
4446 a class that doesn't. */
4447 secondary_class = m68k_secondary_reload_class (rclass, GET_MODE (x), x);
4448 if (secondary_class != NO_REGS
4449 && reg_class_subset_p (secondary_class, rclass))
4450 return secondary_class;
4451
4452 /* Prefer to use moveq for in-range constants. */
4453 if (GET_CODE (x) == CONST_INT
4454 && reg_class_subset_p (DATA_REGS, rclass)
4455 && IN_RANGE (INTVAL (x), -0x80, 0x7f))
4456 return DATA_REGS;
4457
4458 /* ??? Do we really need this now? */
4459 if (GET_CODE (x) == CONST_DOUBLE
4460 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
4461 {
4462 if (TARGET_HARD_FLOAT && reg_class_subset_p (FP_REGS, rclass))
4463 return FP_REGS;
4464
4465 return NO_REGS;
4466 }
4467
4468 return rclass;
4469}
4470
dcc21c4c
PB
4471/* Return floating point values in a 68881 register. This makes 68881 code
4472 a little bit faster. It also makes -msoft-float code incompatible with
4473 hard-float code, so people have to be careful not to mix the two.
c0220ea4 4474 For ColdFire it was decided the ABI incompatibility is undesirable.
dcc21c4c
PB
4475 If there is need for a hard-float ABI it is probably worth doing it
4476 properly and also passing function arguments in FP registers. */
4477rtx
4478m68k_libcall_value (enum machine_mode mode)
4479{
4480 switch (mode) {
4481 case SFmode:
4482 case DFmode:
4483 case XFmode:
4484 if (TARGET_68881)
8d989403 4485 return gen_rtx_REG (mode, FP0_REG);
dcc21c4c
PB
4486 break;
4487 default:
4488 break;
4489 }
8d989403 4490 return gen_rtx_REG (mode, D0_REG);
dcc21c4c
PB
4491}
4492
4493rtx
586de218 4494m68k_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
dcc21c4c
PB
4495{
4496 enum machine_mode mode;
4497
4498 mode = TYPE_MODE (valtype);
4499 switch (mode) {
4500 case SFmode:
4501 case DFmode:
4502 case XFmode:
4503 if (TARGET_68881)
8d989403 4504 return gen_rtx_REG (mode, FP0_REG);
dcc21c4c
PB
4505 break;
4506 default:
4507 break;
4508 }
4509
576c9028
KH
4510 /* If the function returns a pointer, push that into %a0. */
4511 if (func && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (func))))
4512 /* For compatibility with the large body of existing code which
4513 does not always properly declare external functions returning
4514 pointer types, the m68k/SVR4 convention is to copy the value
4515 returned for pointer functions from a0 to d0 in the function
4516 epilogue, so that callers that have neglected to properly
4517 declare the callee can still find the correct return value in
4518 d0. */
4519 return gen_rtx_PARALLEL
4520 (mode,
4521 gen_rtvec (2,
4522 gen_rtx_EXPR_LIST (VOIDmode,
4523 gen_rtx_REG (mode, A0_REG),
4524 const0_rtx),
4525 gen_rtx_EXPR_LIST (VOIDmode,
4526 gen_rtx_REG (mode, D0_REG),
4527 const0_rtx)));
4528 else if (POINTER_TYPE_P (valtype))
4529 return gen_rtx_REG (mode, A0_REG);
dcc21c4c 4530 else
576c9028 4531 return gen_rtx_REG (mode, D0_REG);
dcc21c4c 4532}
1c445f03
NS
4533
4534/* Worker function for TARGET_RETURN_IN_MEMORY. */
4535#if M68K_HONOR_TARGET_STRICT_ALIGNMENT
4536static bool
4537m68k_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
4538{
4539 enum machine_mode mode = TYPE_MODE (type);
4540
4541 if (mode == BLKmode)
4542 return true;
4543
4544 /* If TYPE's known alignment is less than the alignment of MODE that
4545 would contain the structure, then return in memory. We need to
4546 do so to maintain the compatibility between code compiled with
4547 -mstrict-align and that compiled with -mno-strict-align. */
4548 if (AGGREGATE_TYPE_P (type)
4549 && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (mode))
4550 return true;
4551
4552 return false;
4553}
4554#endif
c47b0cb4
MK
4555
4556/* CPU to schedule the program for. */
4557enum attr_cpu m68k_sched_cpu;
4558
4559/* Operand type. */
4560enum attr_op_type
4561 {
4562 /* No operand. */
4563 OP_TYPE_NONE,
4564
4565 /* Register. */
4566 OP_TYPE_REG,
4567
4568 /* Implicit mem reference (e.g. stack). */
4569 OP_TYPE_MEM1,
4570
4571 /* Memory without offset or indexing. EA modes 2, 3 and 4. */
4572 OP_TYPE_MEM234,
4573
4574 /* Memory with offset but without indexing. EA mode 5. */
4575 OP_TYPE_MEM5,
4576
4577 /* Memory with indexing. EA mode 6. */
4578 OP_TYPE_MEM6,
4579
4580 /* Memory referenced by absolute address. EA mode 7. */
4581 OP_TYPE_MEM7,
4582
4583 /* Immediate operand that doesn't require extension word. */
4584 OP_TYPE_IMM_Q,
4585
4586 /* Immediate 16 bit operand. */
4587 OP_TYPE_IMM_W,
4588
4589 /* Immediate 32 bit operand. */
4590 OP_TYPE_IMM_L
4591 };
4592
4593/* True if current insn doesn't have complete pipeline description. */
4594static bool sched_guess_p;
4595
4596/* Return type of memory ADDR_RTX refers to. */
4597static enum attr_op_type
4598sched_address_type (enum machine_mode mode, rtx addr_rtx)
4599{
4600 struct m68k_address address;
4601
4602 if (!m68k_decompose_address (mode, addr_rtx,
4603 reload_completed, &address))
4604 {
4605 gcc_assert (sched_guess_p);
4606 /* Reload will likely fix the address to be in the register. */
4607 return OP_TYPE_MEM234;
4608 }
4609
4610 if (address.scale != 0)
4611 return OP_TYPE_MEM6;
4612
4613 if (address.base != NULL_RTX)
4614 {
4615 if (address.offset == NULL_RTX)
4616 return OP_TYPE_MEM234;
4617
4618 return OP_TYPE_MEM5;
4619 }
4620
4621 gcc_assert (address.offset != NULL_RTX);
4622
4623 return OP_TYPE_MEM7;
4624}
4625
4626/* Return type of the operand OP.
4627 If ADDRESS_P is true, return type of memory location OP refers to. */
4628static enum attr_op_type
4629sched_operand_type (rtx op, bool address_p)
4630{
4631 gcc_assert (op != NULL_RTX);
4632
4633 if (address_p)
4634 return sched_address_type (QImode, op);
4635
4636 if (memory_operand (op, VOIDmode))
4637 return sched_address_type (GET_MODE (op), XEXP (op, 0));
4638
4639 if (register_operand (op, VOIDmode))
4640 return OP_TYPE_REG;
4641
4642 if (GET_CODE (op) == CONST_INT)
4643 {
4644 /* ??? Below condition should probably check if the operation is
4645 signed or unsigned. */
4646 if (IN_RANGE (INTVAL (op), -0x8000, 0x7fff))
4647 return OP_TYPE_IMM_W;
4648
4649 return OP_TYPE_IMM_L;
4650 }
4651
4652 if (GET_CODE (op) == CONST_DOUBLE)
4653 {
4654 switch (GET_MODE (op))
4655 {
4656 case SFmode:
4657 return OP_TYPE_IMM_W;
4658
4659 case VOIDmode:
4660 case DFmode:
4661 return OP_TYPE_IMM_L;
4662
4663 default:
4664 gcc_unreachable ();
4665 }
4666 }
4667
4668 if (symbolic_operand (op, VOIDmode)
4669 || LABEL_P (op))
4670 {
4671 switch (GET_MODE (op))
4672 {
4673 case QImode:
4674 return OP_TYPE_IMM_Q;
4675
4676 case HImode:
4677 return OP_TYPE_IMM_W;
4678
4679 case SImode:
4680 return OP_TYPE_IMM_L;
4681
4682 default:
4683 if (GET_CODE (op) == SYMBOL_REF)
4684 /* ??? Just a guess. Probably we can guess better using length
4685 attribute of the instructions. */
4686 return OP_TYPE_IMM_W;
4687
4688 return OP_TYPE_IMM_L;
4689 }
4690 }
4691
4692 gcc_assert (sched_guess_p);
4693
4694 return OP_TYPE_REG;
4695}
4696
4697/* Return type of INSN's operand X (if OPX_P) or operand Y (if !OPX_P).
4698 If ADDRESS_P is true, return type of memory location operand refers to. */
4699static enum attr_op_type
4700sched_attr_op_type (rtx insn, bool opx_p, bool address_p)
4701{
4702 int i;
4703
4704 extract_constrain_insn_cached (insn);
4705
4706 if (opx_p)
4707 i = get_attr_opx (insn);
4708 else
4709 i = get_attr_opy (insn);
4710
4711 if (i >= recog_data.n_operands)
4712 {
4713 gcc_assert (sched_guess_p);
4714 return OP_TYPE_REG;
4715 }
4716
4717 return sched_operand_type (recog_data.operand[i], address_p);
4718}
4719
4720/* Implement opx_type attribute.
4721 Return type of INSN's operand X.
4722 If ADDRESS_P is true, return type of memory location operand refers to. */
4723enum attr_opx_type
4724m68k_sched_attr_opx_type (rtx insn, int address_p)
4725{
4726 sched_guess_p = (get_attr_guess (insn) == GUESS_YES);
4727
4728 switch (sched_attr_op_type (insn, true, address_p != 0))
4729 {
4730 case OP_TYPE_REG:
4731 return OPX_TYPE_REG;
4732
4733 case OP_TYPE_MEM1:
4734 return OPX_TYPE_MEM1;
4735
4736 case OP_TYPE_MEM234:
4737 return OPX_TYPE_MEM234;
4738
4739 case OP_TYPE_MEM5:
4740 return OPX_TYPE_MEM5;
4741
4742 case OP_TYPE_MEM6:
4743 return OPX_TYPE_MEM6;
4744
4745 case OP_TYPE_MEM7:
4746 return OPX_TYPE_MEM7;
4747
4748 case OP_TYPE_IMM_Q:
4749 return OPX_TYPE_IMM_Q;
4750
4751 case OP_TYPE_IMM_W:
4752 return OPX_TYPE_IMM_W;
4753
4754 case OP_TYPE_IMM_L:
4755 return OPX_TYPE_IMM_L;
4756
4757 default:
4758 gcc_unreachable ();
4759 return 0;
4760 }
4761}
4762
4763/* Implement opy_type attribute.
4764 Return type of INSN's operand Y.
4765 If ADDRESS_P is true, return type of memory location operand refers to. */
4766enum attr_opy_type
4767m68k_sched_attr_opy_type (rtx insn, int address_p)
4768{
4769 sched_guess_p = (get_attr_guess (insn) == GUESS_YES);
4770
4771 switch (sched_attr_op_type (insn, false, address_p != 0))
4772 {
4773 case OP_TYPE_REG:
4774 return OPY_TYPE_REG;
4775
4776 case OP_TYPE_MEM1:
4777 return OPY_TYPE_MEM1;
4778
4779 case OP_TYPE_MEM234:
4780 return OPY_TYPE_MEM234;
4781
4782 case OP_TYPE_MEM5:
4783 return OPY_TYPE_MEM5;
4784
4785 case OP_TYPE_MEM6:
4786 return OPY_TYPE_MEM6;
4787
4788 case OP_TYPE_MEM7:
4789 return OPY_TYPE_MEM7;
4790
4791 case OP_TYPE_IMM_Q:
4792 return OPY_TYPE_IMM_Q;
4793
4794 case OP_TYPE_IMM_W:
4795 return OPY_TYPE_IMM_W;
4796
4797 case OP_TYPE_IMM_L:
4798 return OPY_TYPE_IMM_L;
4799
4800 default:
4801 gcc_unreachable ();
4802 return 0;
4803 }
4804}
4805
4806/* Return the size of INSN. */
4807int
4808m68k_sched_attr_size (rtx insn)
4809{
4810 int size;
4811
4812 sched_guess_p = (get_attr_guess (insn) == GUESS_YES);
4813
4814 switch (get_attr_type1 (insn))
4815 {
4816 case TYPE1_MUL_L:
4817 size = 2;
4818 break;
4819
4820 default:
4821 size = 1;
4822 break;
4823 }
4824
4825 switch (get_attr_opx_type (insn))
4826 {
4827 case OPX_TYPE_NONE:
4828 case OPX_TYPE_REG:
4829 case OPX_TYPE_MEM1:
4830 case OPX_TYPE_MEM234:
4831 case OPY_TYPE_IMM_Q:
4832 break;
4833
4834 case OPX_TYPE_MEM5:
4835 case OPX_TYPE_MEM6:
4836 /* Here we assume that most absolute references are short. */
4837 case OPX_TYPE_MEM7:
4838 case OPY_TYPE_IMM_W:
4839 ++size;
4840 break;
4841
4842 case OPY_TYPE_IMM_L:
4843 size += 2;
4844 break;
4845
4846 default:
4847 gcc_unreachable ();
4848 }
4849
4850 switch (get_attr_opy_type (insn))
4851 {
4852 case OPY_TYPE_NONE:
4853 case OPY_TYPE_REG:
4854 case OPY_TYPE_MEM1:
4855 case OPY_TYPE_MEM234:
4856 case OPY_TYPE_IMM_Q:
4857 break;
4858
4859 case OPY_TYPE_MEM5:
4860 case OPY_TYPE_MEM6:
4861 /* Here we assume that most absolute references are short. */
4862 case OPY_TYPE_MEM7:
4863 case OPY_TYPE_IMM_W:
4864 ++size;
4865 break;
4866
4867 case OPY_TYPE_IMM_L:
4868 size += 2;
4869 break;
4870
4871 default:
4872 gcc_unreachable ();
4873 }
4874
4875 if (size > 3)
4876 {
4877 gcc_assert (sched_guess_p);
4878
4879 size = 3;
4880 }
4881
4882 return size;
4883}
4884
4885/* Implement op_mem attribute. */
4886enum attr_op_mem
4887m68k_sched_attr_op_mem (rtx insn)
4888{
4889 enum attr_opy_mem opy;
4890 enum attr_opx_mem opx;
4891
4892 sched_guess_p = (get_attr_guess (insn) == GUESS_YES);
4893
4894 opy = get_attr_opy_mem (insn);
4895 opx = get_attr_opx_mem (insn);
4896
4897 if (opy == OPY_MEM_R && opx == OPX_MEM_R)
4898 return OP_MEM_00;
4899
4900 if (opy == OPY_MEM_R && opx == OPX_MEM_M)
4901 {
4902 switch (get_attr_opx_access (insn))
4903 {
4904 case OPX_ACCESS_R:
4905 return OP_MEM_10;
4906
4907 case OPX_ACCESS_W:
4908 return OP_MEM_01;
4909
4910 case OPX_ACCESS_RW:
4911 return OP_MEM_11;
4912
4913 default:
4914 gcc_assert (sched_guess_p);
4915 return OP_MEM_UNKNOWN;
4916 }
4917 }
4918
4919 if (opy == OPY_MEM_R && opx == OPX_MEM_I)
4920 {
4921 switch (get_attr_opx_access (insn))
4922 {
4923 case OPX_ACCESS_R:
4924 return OP_MEM_I0;
4925
4926 case OPX_ACCESS_W:
4927 return OP_MEM_0I;
4928
4929 case OPX_ACCESS_RW:
4930 return OP_MEM_I1;
4931
4932 default:
4933 gcc_assert (sched_guess_p);
4934 return OP_MEM_UNKNOWN;
4935 }
4936 }
4937
4938 if (opy == OPY_MEM_M && opx == OPX_MEM_R)
4939 return OP_MEM_10;
4940
4941 if (opy == OPY_MEM_M && opx == OPX_MEM_M)
4942 {
4943 switch (get_attr_opx_access (insn))
4944 {
4945 case OPX_ACCESS_W:
4946 return OP_MEM_11;
4947
4948 default:
4949 gcc_assert (sched_guess_p);
4950 return OP_MEM_UNKNOWN;
4951 }
4952 }
4953
4954 if (opy == OPY_MEM_M && opx == OPX_MEM_I)
4955 {
4956 switch (get_attr_opx_access (insn))
4957 {
4958 case OPX_ACCESS_W:
4959 return OP_MEM_1I;
4960
4961 default:
4962 gcc_assert (sched_guess_p);
4963 return OP_MEM_UNKNOWN;
4964 }
4965 }
4966
4967 if (opy == OPY_MEM_I && opx == OPX_MEM_R)
4968 return OP_MEM_I0;
4969
4970
4971 if (opy == OPY_MEM_I && opx == OPX_MEM_M)
4972 {
4973 switch (get_attr_opx_access (insn))
4974 {
4975 case OPX_ACCESS_W:
4976 return OP_MEM_I1;
4977
4978 default:
4979 gcc_assert (sched_guess_p);
4980 return OP_MEM_UNKNOWN;
4981 }
4982 }
4983
4984 gcc_assert (sched_guess_p);
4985 return OP_MEM_UNKNOWN;
4986}
4987
4988/* Jump instructions types. Indexed by INSN_UID.
4989 The same rtl insn can be expanded into different asm instructions
4990 depending on the cc0_status. To properly determine type of jump
4991 instructions we scan instruction stream and map jumps types to this
4992 array. */
4993static enum attr_type *sched_branch_type;
4994
4995/* Return the type of the jump insn. */
4996enum attr_type
4997m68k_sched_branch_type (rtx insn)
4998{
4999 enum attr_type type;
5000
5001 type = sched_branch_type[INSN_UID (insn)];
5002
5003 gcc_assert (type != 0);
5004
5005 return type;
5006}
b8c96320
MK
5007
5008/* Implement type2 attribute. */
5009enum attr_type2
5010m68k_sched_attr_type2 (rtx insn)
5011{
5012 switch (get_attr_type1 (insn))
5013 {
5014 case TYPE1_ALU_REG1:
5015 case TYPE1_ALU_REGX:
5016 return TYPE2_ALU;
5017
5018 case TYPE1_ALU_L:
5019 case TYPE1_ALUQ_L:
5020 case TYPE1_CMP_L:
5021 return TYPE2_ALU_L;
5022
5023 case TYPE1_BCC:
5024 return TYPE2_BCC;
5025
5026 case TYPE1_BRA:
5027 return TYPE2_BRA;
5028
5029 case TYPE1_BSR:
5030 case TYPE1_JSR:
5031 return TYPE2_CALL;
5032
5033 case TYPE1_JMP:
5034 return TYPE2_JMP;
5035
5036 case TYPE1_LEA:
5037 return TYPE2_LEA;
5038
5039 case TYPE1_CLR:
5040 case TYPE1_MOV3Q_L:
5041 case TYPE1_MOVE:
5042 case TYPE1_MOVEQ_L:
5043 case TYPE1_TST:
5044 return TYPE2_MOVE;
5045
5046 case TYPE1_MOVE_L:
5047 case TYPE1_TST_L:
5048 return TYPE2_MOVE_L;
5049
5050 case TYPE1_MUL_W:
5051 case TYPE1_MUL_L:
5052 return TYPE2_MUL;
5053
5054 case TYPE1_PEA:
5055 return TYPE2_PEA;
5056
5057 case TYPE1_RTS:
5058 return TYPE2_RTS;
5059
5060 case TYPE1_UNLK:
5061 return TYPE2_UNLK;
5062
5063 default:
5064 gcc_assert (get_attr_guess (insn) == GUESS_YES);
5065 return TYPE2_UNKNOWN;
5066 }
5067}
5068
5069/* An empty state that is used in m68k_sched_adjust_cost. */
5070static state_t sched_adjust_cost_state;
5071
5072/* Implement adjust_cost scheduler hook.
5073 Return adjusted COST of dependency LINK between DEF_INSN and INSN. */
5074static int
5075m68k_sched_adjust_cost (rtx insn, rtx link ATTRIBUTE_UNUSED, rtx def_insn,
5076 int cost)
5077{
5078 int delay;
5079
5080 if (recog_memoized (def_insn) < 0
5081 || recog_memoized (insn) < 0)
5082 return cost;
5083
5084 /* Don't try to issue INSN earlier than DFA permits.
5085 This is especially useful for instructions that write to memory,
5086 as their true dependence (default) latency is better to be set to 0
5087 to workaround alias analysis limitations.
5088 This is, in fact, a machine independent tweak, so, probably,
5089 it should be moved to haifa-sched.c: insn_cost (). */
5090
5091 delay = min_insn_conflict_delay (sched_adjust_cost_state, def_insn, insn);
5092 if (delay > cost)
5093 cost = delay;
5094
5095 return cost;
5096}
5097
5098/* Size of the instruction buffer in words. */
5099static int sched_ib_size;
5100
5101/* Number of filled words in the instruction buffer. */
5102static int sched_ib_filled;
5103
5104/* An insn that reserves (marks empty) one word in the instruction buffer. */
5105static rtx sched_ib_insn;
5106
5107/* ID of memory unit. */
5108static int sched_mem_unit_code;
5109
5110/* Implementation of the targetm.sched.variable_issue () hook.
5111 It is called after INSN was issued. It returns the number of insns
5112 that can possibly get scheduled on the current cycle.
5113 It is used here to determine the effect of INSN on the instruction
5114 buffer. */
5115static int
5116m68k_sched_variable_issue (FILE *sched_dump ATTRIBUTE_UNUSED,
5117 int sched_verbose ATTRIBUTE_UNUSED,
5118 rtx insn, int can_issue_more)
5119{
5120 int insn_size;
5121
5122 if (recog_memoized (insn) >= 0)
5123 {
5124 insn_size = get_attr_size (insn);
5125
5126 gcc_assert (insn_size <= sched_ib_filled);
5127
5128 --can_issue_more;
5129 }
5130 else if (GET_CODE (PATTERN (insn)) == ASM_INPUT
5131 || asm_noperands (PATTERN (insn)) >= 0)
5132 insn_size = sched_ib_filled;
5133 else
5134 insn_size = 0;
5135
5136 sched_ib_filled -= insn_size;
5137
5138 return can_issue_more;
5139}
5140
5141/* Statistics gatherer. */
5142
5143typedef enum
5144 {
5145 /* Something needs to be done for this insn. */
5146 SCHED_DUMP_TODO,
5147
5148 /* Support for this insn is complete. */
5149 SCHED_DUMP_DONE,
5150
5151 /* This insn didn't require much effort to support it. */
5152 SCHED_DUMP_NOTHING
5153 } sched_dump_class_def;
5154
5155/* Pointer to functions that classifies insns into 3 above classes. */
5156typedef sched_dump_class_def (*sched_dump_class_func_t) (rtx);
5157
5158/* Return statistical type of INSN regarding splits. */
5159static sched_dump_class_def
5160sched_dump_split_class (rtx insn)
5161{
5162 int i;
5163
5164 i = recog_memoized (insn);
5165 gcc_assert (i >= 0);
5166
5167 switch (get_attr_split (insn))
5168 {
5169 case SPLIT_TODO:
5170 return SCHED_DUMP_TODO;
5171
5172 case SPLIT_DONE:
5173 return SCHED_DUMP_DONE;
5174
5175 case SPLIT_NOTHING:
5176 return SCHED_DUMP_NOTHING;
5177
5178 default:
5179 gcc_unreachable ();
5180 }
5181}
5182
5183/* ID of the guess unit. */
5184static int sched_dump_dfa_guess_unit_code;
5185
5186/* DFA state for use in sched_dump_dfa_class (). */
5187static state_t sched_dump_dfa_state;
5188
5189/* Return statistical type of INSN regarding DFA reservations. */
5190static sched_dump_class_def
5191sched_dump_dfa_class (rtx insn)
5192{
5193 int i;
5194
5195 i = recog_memoized (insn);
5196 gcc_assert (i >= 0 && insn_has_dfa_reservation_p (insn));
5197
5198 if (sched_dump_split_class (insn) == SCHED_DUMP_TODO)
5199 /* Insn is not yet ready for reservations. */
5200 return SCHED_DUMP_NOTHING;
5201
5202 state_reset (sched_dump_dfa_state);
5203
5204 if (state_transition (sched_dump_dfa_state, insn) >= 0)
5205 gcc_unreachable ();
5206
5207 if (cpu_unit_reservation_p (sched_dump_dfa_state,
5208 sched_dump_dfa_guess_unit_code))
5209 return SCHED_DUMP_TODO;
5210
5211 return SCHED_DUMP_DONE;
5212}
5213
5214/* Dump statistics on current function into file DUMP_FILENAME and prefix
5215 each entry with PREFIX.
5216 Instructions are classified with DUMP_CLASS. */
5217static void
5218m68k_sched_dump (sched_dump_class_func_t dump_class,
5219 const char *prefix, FILE *dump)
5220{
5221 sbitmap present;
5222 int *todos;
5223 int *dones;
5224 int *nothings;
5225 rtx insn;
5226
5227 gcc_assert (dump != NULL);
5228
5229 present = sbitmap_alloc (CODE_FOR_nothing);
5230 sbitmap_zero (present);
5231
5232 todos = xcalloc (CODE_FOR_nothing, sizeof (*todos));
5233 dones = xcalloc (CODE_FOR_nothing, sizeof (*dones));
5234 nothings = xcalloc (CODE_FOR_nothing, sizeof (*nothings));
5235
5236 /* Gather statistics. */
5237 for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
5238 {
5239 if (INSN_P (insn) && recog_memoized (insn) >= 0)
5240 {
5241 enum insn_code code;
5242
5243 code = INSN_CODE (insn);
5244 gcc_assert (code < CODE_FOR_nothing);
5245
5246 SET_BIT (present, code);
5247
5248 switch (dump_class (insn))
5249 {
5250 case SCHED_DUMP_TODO:
5251 ++todos[code];
5252 break;
5253
5254 case SCHED_DUMP_DONE:
5255 ++dones[code];
5256 break;
5257
5258 case SCHED_DUMP_NOTHING:
5259 ++nothings[code];
5260 break;
5261 }
5262 }
5263 }
5264
5265 /* Print statisctics. */
5266 {
5267 unsigned int i;
5268 sbitmap_iterator si;
5269 int total_todo;
5270 int total_done;
5271 int total_nothing;
5272
5273 total_todo = 0;
5274 total_done = 0;
5275 total_nothing = 0;
5276
5277 EXECUTE_IF_SET_IN_SBITMAP (present, 0, i, si)
5278 {
5279 int todo;
5280 int done;
5281 int nothing;
5282 enum insn_code code;
5283
5284 code = (enum insn_code) i;
5285
5286 todo = todos[code];
5287 done = dones[code];
5288 nothing = nothings[code];
5289
5290 total_todo += todo;
5291 total_done += done;
5292 total_nothing += nothing;
5293
5294 if (todo != 0)
5295 {
5296 fprintf (dump,
5297 "%s: %3d: %d / %d / %d ;",
5298 prefix, code, todo, done, nothing);
5299
5300 {
5301 const char *name;
5302
5303 name = get_insn_name (code);
5304
5305 if (name != NULL)
5306 fprintf (dump, " {%s}\n", name);
5307 else
5308 fprintf (dump, " {unknown}\n");
5309 }
5310 }
5311 }
5312
5313 gcc_assert (CODE_FOR_nothing < 999);
5314
5315 fprintf (dump,
5316 "%s: 999: %d / %d / %d ; {total}\n",
5317 prefix, total_todo, total_done, total_nothing);
5318 }
5319
5320 free (nothings);
5321 nothings = NULL;
5322 free (dones);
5323 dones = NULL;
5324 free (todos);
5325 todos = NULL;
5326
5327 sbitmap_free (present);
5328 present = NULL;
5329}
5330
5331/* Implementation of targetm.sched.md_init_global () hook.
5332 It is invoked once per scheduling pass and is used here
5333 to initialize scheduler constants. */
5334static void
5335m68k_sched_md_init_global (FILE *sched_dump ATTRIBUTE_UNUSED,
5336 int sched_verbose ATTRIBUTE_UNUSED,
5337 int n_insns ATTRIBUTE_UNUSED)
5338{
5339 /* Init branch types. */
5340 {
5341 rtx insn;
5342
5343 sched_branch_type = xcalloc (get_max_uid () + 1,
5344 sizeof (*sched_branch_type));
5345
5346 for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
5347 {
5348 if (JUMP_P (insn))
5349 /* !!! FIXME: Implement real scan here. */
5350 sched_branch_type[INSN_UID (insn)] = TYPE_BCC;
5351 }
5352 }
5353
5354 if (reload_completed && sched_verbose >= 8)
5355 /* Dump statistics. */
5356 {
5357 m68k_sched_dump (sched_dump_split_class, "m68k_sched_split",
5358 sched_dump);
5359
5360 sched_dump_dfa_guess_unit_code = get_cpu_unit_code ("cf_v2_guess");
5361 sched_dump_dfa_state = alloca (state_size ());
5362
5363 m68k_sched_dump (sched_dump_dfa_class, "m68k_sched_dfa",
5364 sched_dump);
5365
5366 sched_dump_dfa_state = NULL;
5367 sched_dump_dfa_guess_unit_code = 0;
5368 }
5369
5370 /* Setup target cpu. */
5371 switch (m68k_sched_cpu)
5372 {
5373 case CPU_CF_V2:
5374 sched_ib_size = 6;
5375 sched_mem_unit_code = get_cpu_unit_code ("cf_v2_mem");
5376 break;
5377
5378 default:
5379 gcc_unreachable ();
5380 }
5381
5382 sched_adjust_cost_state = xmalloc (state_size ());
5383 state_reset (sched_adjust_cost_state);
5384
5385 start_sequence ();
5386 emit_insn (gen_ib ());
5387 sched_ib_insn = get_insns ();
5388 end_sequence ();
5389}
5390
5391/* Scheduling pass is now finished. Free/reset static variables. */
5392static void
5393m68k_sched_md_finish_global (FILE *dump ATTRIBUTE_UNUSED,
5394 int verbose ATTRIBUTE_UNUSED)
5395{
5396 sched_ib_insn = NULL;
5397
5398 free (sched_adjust_cost_state);
5399 sched_adjust_cost_state = NULL;
5400
5401 sched_mem_unit_code = 0;
5402 sched_ib_size = 0;
5403
5404 free (sched_branch_type);
5405 sched_branch_type = NULL;
5406}
5407
5408/* Implementation of targetm.sched.md_init () hook.
5409 It is invoked each time scheduler starts on the new block (basic block or
5410 extended basic block). */
5411static void
5412m68k_sched_md_init (FILE *sched_dump ATTRIBUTE_UNUSED,
5413 int sched_verbose ATTRIBUTE_UNUSED,
5414 int n_insns ATTRIBUTE_UNUSED)
5415{
5416 /* haifa-sched.c: schedule_block () calls advance_cycle () just before
5417 the first cycle. Workaround that. */
5418 sched_ib_filled = -2;
5419}
5420
5421/* Implementation of targetm.sched.dfa_pre_advance_cycle () hook.
5422 It is invoked just before current cycle finishes and is used here
5423 to track if instruction buffer got its two words this cycle. */
5424static void
5425m68k_sched_dfa_pre_advance_cycle (void)
5426{
5427 if (!cpu_unit_reservation_p (curr_state, sched_mem_unit_code))
5428 {
5429 sched_ib_filled += 2;
5430
5431 if (sched_ib_filled > sched_ib_size)
5432 sched_ib_filled = sched_ib_size;
5433 }
5434}
5435
5436/* Implementation of targetm.sched.dfa_post_advance_cycle () hook.
5437 It is invoked just after new cycle begins and is used here
5438 to setup number of filled words in the instruction buffer so that
5439 instructions which won't have all their words prefetched would be
5440 stalled for a cycle. */
5441static void
5442m68k_sched_dfa_post_advance_cycle (void)
5443{
5444 int i;
5445 int n;
5446
5447 /* Setup number of prefetched instruction words in the instruction
5448 buffer. */
5449 for (i = sched_ib_filled, n = sched_ib_size; i < n; ++i)
5450 {
5451 if (state_transition (curr_state, sched_ib_insn) >= 0)
5452 gcc_unreachable ();
5453 }
5454}