]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/m68k/m68k.c
Remove remaining uses of REAL_VALUES_IDENTICAL
[thirdparty/gcc.git] / gcc / config / m68k / m68k.c
1 /* Subroutines for insn-output.c for Motorola 68000 family.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "cfghooks.h"
25 #include "tree.h"
26 #include "rtl.h"
27 #include "df.h"
28 #include "alias.h"
29 #include "fold-const.h"
30 #include "calls.h"
31 #include "stor-layout.h"
32 #include "varasm.h"
33 #include "regs.h"
34 #include "insn-config.h"
35 #include "conditions.h"
36 #include "output.h"
37 #include "insn-attr.h"
38 #include "recog.h"
39 #include "diagnostic-core.h"
40 #include "flags.h"
41 #include "expmed.h"
42 #include "dojump.h"
43 #include "explow.h"
44 #include "emit-rtl.h"
45 #include "stmt.h"
46 #include "expr.h"
47 #include "reload.h"
48 #include "tm_p.h"
49 #include "target.h"
50 #include "debug.h"
51 #include "cfgrtl.h"
52 #include "cfganal.h"
53 #include "lcm.h"
54 #include "cfgbuild.h"
55 #include "cfgcleanup.h"
56 /* ??? Need to add a dependency between m68k.o and sched-int.h. */
57 #include "sched-int.h"
58 #include "insn-codes.h"
59 #include "opts.h"
60 #include "optabs.h"
61 #include "builtins.h"
62 #include "rtl-iter.h"
63
64 /* This file should be included last. */
65 #include "target-def.h"
66
67 enum reg_class regno_reg_class[] =
68 {
69 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
70 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
71 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
72 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
73 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
74 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
75 ADDR_REGS
76 };
77
78
79 /* The minimum number of integer registers that we want to save with the
80 movem instruction. Using two movel instructions instead of a single
81 moveml is about 15% faster for the 68020 and 68030 at no expense in
82 code size. */
83 #define MIN_MOVEM_REGS 3
84
85 /* The minimum number of floating point registers that we want to save
86 with the fmovem instruction. */
87 #define MIN_FMOVEM_REGS 1
88
89 /* Structure describing stack frame layout. */
90 struct m68k_frame
91 {
92 /* Stack pointer to frame pointer offset. */
93 HOST_WIDE_INT offset;
94
95 /* Offset of FPU registers. */
96 HOST_WIDE_INT foffset;
97
98 /* Frame size in bytes (rounded up). */
99 HOST_WIDE_INT size;
100
101 /* Data and address register. */
102 int reg_no;
103 unsigned int reg_mask;
104
105 /* FPU registers. */
106 int fpu_no;
107 unsigned int fpu_mask;
108
109 /* Offsets relative to ARG_POINTER. */
110 HOST_WIDE_INT frame_pointer_offset;
111 HOST_WIDE_INT stack_pointer_offset;
112
113 /* Function which the above information refers to. */
114 int funcdef_no;
115 };
116
117 /* Current frame information calculated by m68k_compute_frame_layout(). */
118 static struct m68k_frame current_frame;
119
120 /* Structure describing an m68k address.
121
122 If CODE is UNKNOWN, the address is BASE + INDEX * SCALE + OFFSET,
123 with null fields evaluating to 0. Here:
124
125 - BASE satisfies m68k_legitimate_base_reg_p
126 - INDEX satisfies m68k_legitimate_index_reg_p
127 - OFFSET satisfies m68k_legitimate_constant_address_p
128
129 INDEX is either HImode or SImode. The other fields are SImode.
130
131 If CODE is PRE_DEC, the address is -(BASE). If CODE is POST_INC,
132 the address is (BASE)+. */
133 struct m68k_address {
134 enum rtx_code code;
135 rtx base;
136 rtx index;
137 rtx offset;
138 int scale;
139 };
140
141 static int m68k_sched_adjust_cost (rtx_insn *, rtx, rtx_insn *, int);
142 static int m68k_sched_issue_rate (void);
143 static int m68k_sched_variable_issue (FILE *, int, rtx_insn *, int);
144 static void m68k_sched_md_init_global (FILE *, int, int);
145 static void m68k_sched_md_finish_global (FILE *, int);
146 static void m68k_sched_md_init (FILE *, int, int);
147 static void m68k_sched_dfa_pre_advance_cycle (void);
148 static void m68k_sched_dfa_post_advance_cycle (void);
149 static int m68k_sched_first_cycle_multipass_dfa_lookahead (void);
150
151 static bool m68k_can_eliminate (const int, const int);
152 static void m68k_conditional_register_usage (void);
153 static bool m68k_legitimate_address_p (machine_mode, rtx, bool);
154 static void m68k_option_override (void);
155 static void m68k_override_options_after_change (void);
156 static rtx find_addr_reg (rtx);
157 static const char *singlemove_string (rtx *);
158 static void m68k_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
159 HOST_WIDE_INT, tree);
160 static rtx m68k_struct_value_rtx (tree, int);
161 static tree m68k_handle_fndecl_attribute (tree *node, tree name,
162 tree args, int flags,
163 bool *no_add_attrs);
164 static void m68k_compute_frame_layout (void);
165 static bool m68k_save_reg (unsigned int regno, bool interrupt_handler);
166 static bool m68k_ok_for_sibcall_p (tree, tree);
167 static bool m68k_tls_symbol_p (rtx);
168 static rtx m68k_legitimize_address (rtx, rtx, machine_mode);
169 static bool m68k_rtx_costs (rtx, machine_mode, int, int, int *, bool);
170 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
171 static bool m68k_return_in_memory (const_tree, const_tree);
172 #endif
173 static void m68k_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
174 static void m68k_trampoline_init (rtx, tree, rtx);
175 static int m68k_return_pops_args (tree, tree, int);
176 static rtx m68k_delegitimize_address (rtx);
177 static void m68k_function_arg_advance (cumulative_args_t, machine_mode,
178 const_tree, bool);
179 static rtx m68k_function_arg (cumulative_args_t, machine_mode,
180 const_tree, bool);
181 static bool m68k_cannot_force_const_mem (machine_mode mode, rtx x);
182 static bool m68k_output_addr_const_extra (FILE *, rtx);
183 static void m68k_init_sync_libfuncs (void) ATTRIBUTE_UNUSED;
184 \f
185 /* Initialize the GCC target structure. */
186
187 #if INT_OP_GROUP == INT_OP_DOT_WORD
188 #undef TARGET_ASM_ALIGNED_HI_OP
189 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
190 #endif
191
192 #if INT_OP_GROUP == INT_OP_NO_DOT
193 #undef TARGET_ASM_BYTE_OP
194 #define TARGET_ASM_BYTE_OP "\tbyte\t"
195 #undef TARGET_ASM_ALIGNED_HI_OP
196 #define TARGET_ASM_ALIGNED_HI_OP "\tshort\t"
197 #undef TARGET_ASM_ALIGNED_SI_OP
198 #define TARGET_ASM_ALIGNED_SI_OP "\tlong\t"
199 #endif
200
201 #if INT_OP_GROUP == INT_OP_DC
202 #undef TARGET_ASM_BYTE_OP
203 #define TARGET_ASM_BYTE_OP "\tdc.b\t"
204 #undef TARGET_ASM_ALIGNED_HI_OP
205 #define TARGET_ASM_ALIGNED_HI_OP "\tdc.w\t"
206 #undef TARGET_ASM_ALIGNED_SI_OP
207 #define TARGET_ASM_ALIGNED_SI_OP "\tdc.l\t"
208 #endif
209
210 #undef TARGET_ASM_UNALIGNED_HI_OP
211 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
212 #undef TARGET_ASM_UNALIGNED_SI_OP
213 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
214
215 #undef TARGET_ASM_OUTPUT_MI_THUNK
216 #define TARGET_ASM_OUTPUT_MI_THUNK m68k_output_mi_thunk
217 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
218 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
219
220 #undef TARGET_ASM_FILE_START_APP_OFF
221 #define TARGET_ASM_FILE_START_APP_OFF true
222
223 #undef TARGET_LEGITIMIZE_ADDRESS
224 #define TARGET_LEGITIMIZE_ADDRESS m68k_legitimize_address
225
226 #undef TARGET_SCHED_ADJUST_COST
227 #define TARGET_SCHED_ADJUST_COST m68k_sched_adjust_cost
228
229 #undef TARGET_SCHED_ISSUE_RATE
230 #define TARGET_SCHED_ISSUE_RATE m68k_sched_issue_rate
231
232 #undef TARGET_SCHED_VARIABLE_ISSUE
233 #define TARGET_SCHED_VARIABLE_ISSUE m68k_sched_variable_issue
234
235 #undef TARGET_SCHED_INIT_GLOBAL
236 #define TARGET_SCHED_INIT_GLOBAL m68k_sched_md_init_global
237
238 #undef TARGET_SCHED_FINISH_GLOBAL
239 #define TARGET_SCHED_FINISH_GLOBAL m68k_sched_md_finish_global
240
241 #undef TARGET_SCHED_INIT
242 #define TARGET_SCHED_INIT m68k_sched_md_init
243
244 #undef TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE
245 #define TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE m68k_sched_dfa_pre_advance_cycle
246
247 #undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
248 #define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE m68k_sched_dfa_post_advance_cycle
249
250 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
251 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
252 m68k_sched_first_cycle_multipass_dfa_lookahead
253
254 #undef TARGET_OPTION_OVERRIDE
255 #define TARGET_OPTION_OVERRIDE m68k_option_override
256
257 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
258 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m68k_override_options_after_change
259
260 #undef TARGET_RTX_COSTS
261 #define TARGET_RTX_COSTS m68k_rtx_costs
262
263 #undef TARGET_ATTRIBUTE_TABLE
264 #define TARGET_ATTRIBUTE_TABLE m68k_attribute_table
265
266 #undef TARGET_PROMOTE_PROTOTYPES
267 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
268
269 #undef TARGET_STRUCT_VALUE_RTX
270 #define TARGET_STRUCT_VALUE_RTX m68k_struct_value_rtx
271
272 #undef TARGET_CANNOT_FORCE_CONST_MEM
273 #define TARGET_CANNOT_FORCE_CONST_MEM m68k_cannot_force_const_mem
274
275 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
276 #define TARGET_FUNCTION_OK_FOR_SIBCALL m68k_ok_for_sibcall_p
277
278 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
279 #undef TARGET_RETURN_IN_MEMORY
280 #define TARGET_RETURN_IN_MEMORY m68k_return_in_memory
281 #endif
282
283 #ifdef HAVE_AS_TLS
284 #undef TARGET_HAVE_TLS
285 #define TARGET_HAVE_TLS (true)
286
287 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
288 #define TARGET_ASM_OUTPUT_DWARF_DTPREL m68k_output_dwarf_dtprel
289 #endif
290
291 #undef TARGET_LEGITIMATE_ADDRESS_P
292 #define TARGET_LEGITIMATE_ADDRESS_P m68k_legitimate_address_p
293
294 #undef TARGET_CAN_ELIMINATE
295 #define TARGET_CAN_ELIMINATE m68k_can_eliminate
296
297 #undef TARGET_CONDITIONAL_REGISTER_USAGE
298 #define TARGET_CONDITIONAL_REGISTER_USAGE m68k_conditional_register_usage
299
300 #undef TARGET_TRAMPOLINE_INIT
301 #define TARGET_TRAMPOLINE_INIT m68k_trampoline_init
302
303 #undef TARGET_RETURN_POPS_ARGS
304 #define TARGET_RETURN_POPS_ARGS m68k_return_pops_args
305
306 #undef TARGET_DELEGITIMIZE_ADDRESS
307 #define TARGET_DELEGITIMIZE_ADDRESS m68k_delegitimize_address
308
309 #undef TARGET_FUNCTION_ARG
310 #define TARGET_FUNCTION_ARG m68k_function_arg
311
312 #undef TARGET_FUNCTION_ARG_ADVANCE
313 #define TARGET_FUNCTION_ARG_ADVANCE m68k_function_arg_advance
314
315 #undef TARGET_LEGITIMATE_CONSTANT_P
316 #define TARGET_LEGITIMATE_CONSTANT_P m68k_legitimate_constant_p
317
318 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
319 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA m68k_output_addr_const_extra
320
321 /* The value stored by TAS. */
322 #undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
323 #define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 128
324
325 static const struct attribute_spec m68k_attribute_table[] =
326 {
327 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
328 affects_type_identity } */
329 { "interrupt", 0, 0, true, false, false, m68k_handle_fndecl_attribute,
330 false },
331 { "interrupt_handler", 0, 0, true, false, false,
332 m68k_handle_fndecl_attribute, false },
333 { "interrupt_thread", 0, 0, true, false, false,
334 m68k_handle_fndecl_attribute, false },
335 { NULL, 0, 0, false, false, false, NULL, false }
336 };
337
338 struct gcc_target targetm = TARGET_INITIALIZER;
339 \f
340 /* Base flags for 68k ISAs. */
341 #define FL_FOR_isa_00 FL_ISA_68000
342 #define FL_FOR_isa_10 (FL_FOR_isa_00 | FL_ISA_68010)
343 /* FL_68881 controls the default setting of -m68881. gcc has traditionally
344 generated 68881 code for 68020 and 68030 targets unless explicitly told
345 not to. */
346 #define FL_FOR_isa_20 (FL_FOR_isa_10 | FL_ISA_68020 \
347 | FL_BITFIELD | FL_68881 | FL_CAS)
348 #define FL_FOR_isa_40 (FL_FOR_isa_20 | FL_ISA_68040)
349 #define FL_FOR_isa_cpu32 (FL_FOR_isa_10 | FL_ISA_68020)
350
351 /* Base flags for ColdFire ISAs. */
352 #define FL_FOR_isa_a (FL_COLDFIRE | FL_ISA_A)
353 #define FL_FOR_isa_aplus (FL_FOR_isa_a | FL_ISA_APLUS | FL_CF_USP)
354 /* Note ISA_B doesn't necessarily include USP (user stack pointer) support. */
355 #define FL_FOR_isa_b (FL_FOR_isa_a | FL_ISA_B | FL_CF_HWDIV)
356 /* ISA_C is not upwardly compatible with ISA_B. */
357 #define FL_FOR_isa_c (FL_FOR_isa_a | FL_ISA_C | FL_CF_USP)
358
359 enum m68k_isa
360 {
361 /* Traditional 68000 instruction sets. */
362 isa_00,
363 isa_10,
364 isa_20,
365 isa_40,
366 isa_cpu32,
367 /* ColdFire instruction set variants. */
368 isa_a,
369 isa_aplus,
370 isa_b,
371 isa_c,
372 isa_max
373 };
374
375 /* Information about one of the -march, -mcpu or -mtune arguments. */
376 struct m68k_target_selection
377 {
378 /* The argument being described. */
379 const char *name;
380
381 /* For -mcpu, this is the device selected by the option.
382 For -mtune and -march, it is a representative device
383 for the microarchitecture or ISA respectively. */
384 enum target_device device;
385
386 /* The M68K_DEVICE fields associated with DEVICE. See the comment
387 in m68k-devices.def for details. FAMILY is only valid for -mcpu. */
388 const char *family;
389 enum uarch_type microarch;
390 enum m68k_isa isa;
391 unsigned long flags;
392 };
393
394 /* A list of all devices in m68k-devices.def. Used for -mcpu selection. */
395 static const struct m68k_target_selection all_devices[] =
396 {
397 #define M68K_DEVICE(NAME,ENUM_VALUE,FAMILY,MULTILIB,MICROARCH,ISA,FLAGS) \
398 { NAME, ENUM_VALUE, FAMILY, u##MICROARCH, ISA, FLAGS | FL_FOR_##ISA },
399 #include "m68k-devices.def"
400 #undef M68K_DEVICE
401 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
402 };
403
404 /* A list of all ISAs, mapping each one to a representative device.
405 Used for -march selection. */
406 static const struct m68k_target_selection all_isas[] =
407 {
408 #define M68K_ISA(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
409 { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
410 #include "m68k-isas.def"
411 #undef M68K_ISA
412 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
413 };
414
415 /* A list of all microarchitectures, mapping each one to a representative
416 device. Used for -mtune selection. */
417 static const struct m68k_target_selection all_microarchs[] =
418 {
419 #define M68K_MICROARCH(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
420 { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
421 #include "m68k-microarchs.def"
422 #undef M68K_MICROARCH
423 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
424 };
425 \f
426 /* The entries associated with the -mcpu, -march and -mtune settings,
427 or null for options that have not been used. */
428 const struct m68k_target_selection *m68k_cpu_entry;
429 const struct m68k_target_selection *m68k_arch_entry;
430 const struct m68k_target_selection *m68k_tune_entry;
431
432 /* Which CPU we are generating code for. */
433 enum target_device m68k_cpu;
434
435 /* Which microarchitecture to tune for. */
436 enum uarch_type m68k_tune;
437
438 /* Which FPU to use. */
439 enum fpu_type m68k_fpu;
440
441 /* The set of FL_* flags that apply to the target processor. */
442 unsigned int m68k_cpu_flags;
443
444 /* The set of FL_* flags that apply to the processor to be tuned for. */
445 unsigned int m68k_tune_flags;
446
447 /* Asm templates for calling or jumping to an arbitrary symbolic address,
448 or NULL if such calls or jumps are not supported. The address is held
449 in operand 0. */
450 const char *m68k_symbolic_call;
451 const char *m68k_symbolic_jump;
452
453 /* Enum variable that corresponds to m68k_symbolic_call values. */
454 enum M68K_SYMBOLIC_CALL m68k_symbolic_call_var;
455
456 \f
457 /* Implement TARGET_OPTION_OVERRIDE. */
458
459 static void
460 m68k_option_override (void)
461 {
462 const struct m68k_target_selection *entry;
463 unsigned long target_mask;
464
465 if (global_options_set.x_m68k_arch_option)
466 m68k_arch_entry = &all_isas[m68k_arch_option];
467
468 if (global_options_set.x_m68k_cpu_option)
469 m68k_cpu_entry = &all_devices[(int) m68k_cpu_option];
470
471 if (global_options_set.x_m68k_tune_option)
472 m68k_tune_entry = &all_microarchs[(int) m68k_tune_option];
473
474 /* User can choose:
475
476 -mcpu=
477 -march=
478 -mtune=
479
480 -march=ARCH should generate code that runs any processor
481 implementing architecture ARCH. -mcpu=CPU should override -march
482 and should generate code that runs on processor CPU, making free
483 use of any instructions that CPU understands. -mtune=UARCH applies
484 on top of -mcpu or -march and optimizes the code for UARCH. It does
485 not change the target architecture. */
486 if (m68k_cpu_entry)
487 {
488 /* Complain if the -march setting is for a different microarchitecture,
489 or includes flags that the -mcpu setting doesn't. */
490 if (m68k_arch_entry
491 && (m68k_arch_entry->microarch != m68k_cpu_entry->microarch
492 || (m68k_arch_entry->flags & ~m68k_cpu_entry->flags) != 0))
493 warning (0, "-mcpu=%s conflicts with -march=%s",
494 m68k_cpu_entry->name, m68k_arch_entry->name);
495
496 entry = m68k_cpu_entry;
497 }
498 else
499 entry = m68k_arch_entry;
500
501 if (!entry)
502 entry = all_devices + TARGET_CPU_DEFAULT;
503
504 m68k_cpu_flags = entry->flags;
505
506 /* Use the architecture setting to derive default values for
507 certain flags. */
508 target_mask = 0;
509
510 /* ColdFire is lenient about alignment. */
511 if (!TARGET_COLDFIRE)
512 target_mask |= MASK_STRICT_ALIGNMENT;
513
514 if ((m68k_cpu_flags & FL_BITFIELD) != 0)
515 target_mask |= MASK_BITFIELD;
516 if ((m68k_cpu_flags & FL_CF_HWDIV) != 0)
517 target_mask |= MASK_CF_HWDIV;
518 if ((m68k_cpu_flags & (FL_68881 | FL_CF_FPU)) != 0)
519 target_mask |= MASK_HARD_FLOAT;
520 target_flags |= target_mask & ~target_flags_explicit;
521
522 /* Set the directly-usable versions of the -mcpu and -mtune settings. */
523 m68k_cpu = entry->device;
524 if (m68k_tune_entry)
525 {
526 m68k_tune = m68k_tune_entry->microarch;
527 m68k_tune_flags = m68k_tune_entry->flags;
528 }
529 #ifdef M68K_DEFAULT_TUNE
530 else if (!m68k_cpu_entry && !m68k_arch_entry)
531 {
532 enum target_device dev;
533 dev = all_microarchs[M68K_DEFAULT_TUNE].device;
534 m68k_tune_flags = all_devices[dev].flags;
535 }
536 #endif
537 else
538 {
539 m68k_tune = entry->microarch;
540 m68k_tune_flags = entry->flags;
541 }
542
543 /* Set the type of FPU. */
544 m68k_fpu = (!TARGET_HARD_FLOAT ? FPUTYPE_NONE
545 : (m68k_cpu_flags & FL_COLDFIRE) != 0 ? FPUTYPE_COLDFIRE
546 : FPUTYPE_68881);
547
548 /* Sanity check to ensure that msep-data and mid-sahred-library are not
549 * both specified together. Doing so simply doesn't make sense.
550 */
551 if (TARGET_SEP_DATA && TARGET_ID_SHARED_LIBRARY)
552 error ("cannot specify both -msep-data and -mid-shared-library");
553
554 /* If we're generating code for a separate A5 relative data segment,
555 * we've got to enable -fPIC as well. This might be relaxable to
556 * -fpic but it hasn't been tested properly.
557 */
558 if (TARGET_SEP_DATA || TARGET_ID_SHARED_LIBRARY)
559 flag_pic = 2;
560
561 /* -mpcrel -fPIC uses 32-bit pc-relative displacements. Raise an
562 error if the target does not support them. */
563 if (TARGET_PCREL && !TARGET_68020 && flag_pic == 2)
564 error ("-mpcrel -fPIC is not currently supported on selected cpu");
565
566 /* ??? A historic way of turning on pic, or is this intended to
567 be an embedded thing that doesn't have the same name binding
568 significance that it does on hosted ELF systems? */
569 if (TARGET_PCREL && flag_pic == 0)
570 flag_pic = 1;
571
572 if (!flag_pic)
573 {
574 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_JSR;
575
576 m68k_symbolic_jump = "jra %a0";
577 }
578 else if (TARGET_ID_SHARED_LIBRARY)
579 /* All addresses must be loaded from the GOT. */
580 ;
581 else if (TARGET_68020 || TARGET_ISAB || TARGET_ISAC)
582 {
583 if (TARGET_PCREL)
584 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_C;
585 else
586 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_P;
587
588 if (TARGET_ISAC)
589 /* No unconditional long branch */;
590 else if (TARGET_PCREL)
591 m68k_symbolic_jump = "bra%.l %c0";
592 else
593 m68k_symbolic_jump = "bra%.l %p0";
594 /* Turn off function cse if we are doing PIC. We always want
595 function call to be done as `bsr foo@PLTPC'. */
596 /* ??? It's traditional to do this for -mpcrel too, but it isn't
597 clear how intentional that is. */
598 flag_no_function_cse = 1;
599 }
600
601 switch (m68k_symbolic_call_var)
602 {
603 case M68K_SYMBOLIC_CALL_JSR:
604 m68k_symbolic_call = "jsr %a0";
605 break;
606
607 case M68K_SYMBOLIC_CALL_BSR_C:
608 m68k_symbolic_call = "bsr%.l %c0";
609 break;
610
611 case M68K_SYMBOLIC_CALL_BSR_P:
612 m68k_symbolic_call = "bsr%.l %p0";
613 break;
614
615 case M68K_SYMBOLIC_CALL_NONE:
616 gcc_assert (m68k_symbolic_call == NULL);
617 break;
618
619 default:
620 gcc_unreachable ();
621 }
622
623 #ifndef ASM_OUTPUT_ALIGN_WITH_NOP
624 if (align_labels > 2)
625 {
626 warning (0, "-falign-labels=%d is not supported", align_labels);
627 align_labels = 0;
628 }
629 if (align_loops > 2)
630 {
631 warning (0, "-falign-loops=%d is not supported", align_loops);
632 align_loops = 0;
633 }
634 #endif
635
636 if (stack_limit_rtx != NULL_RTX && !TARGET_68020)
637 {
638 warning (0, "-fstack-limit- options are not supported on this cpu");
639 stack_limit_rtx = NULL_RTX;
640 }
641
642 SUBTARGET_OVERRIDE_OPTIONS;
643
644 /* Setup scheduling options. */
645 if (TUNE_CFV1)
646 m68k_sched_cpu = CPU_CFV1;
647 else if (TUNE_CFV2)
648 m68k_sched_cpu = CPU_CFV2;
649 else if (TUNE_CFV3)
650 m68k_sched_cpu = CPU_CFV3;
651 else if (TUNE_CFV4)
652 m68k_sched_cpu = CPU_CFV4;
653 else
654 {
655 m68k_sched_cpu = CPU_UNKNOWN;
656 flag_schedule_insns = 0;
657 flag_schedule_insns_after_reload = 0;
658 flag_modulo_sched = 0;
659 flag_live_range_shrinkage = 0;
660 }
661
662 if (m68k_sched_cpu != CPU_UNKNOWN)
663 {
664 if ((m68k_cpu_flags & (FL_CF_EMAC | FL_CF_EMAC_B)) != 0)
665 m68k_sched_mac = MAC_CF_EMAC;
666 else if ((m68k_cpu_flags & FL_CF_MAC) != 0)
667 m68k_sched_mac = MAC_CF_MAC;
668 else
669 m68k_sched_mac = MAC_NO;
670 }
671 }
672
673 /* Implement TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE. */
674
675 static void
676 m68k_override_options_after_change (void)
677 {
678 if (m68k_sched_cpu == CPU_UNKNOWN)
679 {
680 flag_schedule_insns = 0;
681 flag_schedule_insns_after_reload = 0;
682 flag_modulo_sched = 0;
683 flag_live_range_shrinkage = 0;
684 }
685 }
686
687 /* Generate a macro of the form __mPREFIX_cpu_NAME, where PREFIX is the
688 given argument and NAME is the argument passed to -mcpu. Return NULL
689 if -mcpu was not passed. */
690
691 const char *
692 m68k_cpp_cpu_ident (const char *prefix)
693 {
694 if (!m68k_cpu_entry)
695 return NULL;
696 return concat ("__m", prefix, "_cpu_", m68k_cpu_entry->name, NULL);
697 }
698
699 /* Generate a macro of the form __mPREFIX_family_NAME, where PREFIX is the
700 given argument and NAME is the name of the representative device for
701 the -mcpu argument's family. Return NULL if -mcpu was not passed. */
702
703 const char *
704 m68k_cpp_cpu_family (const char *prefix)
705 {
706 if (!m68k_cpu_entry)
707 return NULL;
708 return concat ("__m", prefix, "_family_", m68k_cpu_entry->family, NULL);
709 }
710 \f
711 /* Return m68k_fk_interrupt_handler if FUNC has an "interrupt" or
712 "interrupt_handler" attribute and interrupt_thread if FUNC has an
713 "interrupt_thread" attribute. Otherwise, return
714 m68k_fk_normal_function. */
715
716 enum m68k_function_kind
717 m68k_get_function_kind (tree func)
718 {
719 tree a;
720
721 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
722
723 a = lookup_attribute ("interrupt", DECL_ATTRIBUTES (func));
724 if (a != NULL_TREE)
725 return m68k_fk_interrupt_handler;
726
727 a = lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (func));
728 if (a != NULL_TREE)
729 return m68k_fk_interrupt_handler;
730
731 a = lookup_attribute ("interrupt_thread", DECL_ATTRIBUTES (func));
732 if (a != NULL_TREE)
733 return m68k_fk_interrupt_thread;
734
735 return m68k_fk_normal_function;
736 }
737
738 /* Handle an attribute requiring a FUNCTION_DECL; arguments as in
739 struct attribute_spec.handler. */
740 static tree
741 m68k_handle_fndecl_attribute (tree *node, tree name,
742 tree args ATTRIBUTE_UNUSED,
743 int flags ATTRIBUTE_UNUSED,
744 bool *no_add_attrs)
745 {
746 if (TREE_CODE (*node) != FUNCTION_DECL)
747 {
748 warning (OPT_Wattributes, "%qE attribute only applies to functions",
749 name);
750 *no_add_attrs = true;
751 }
752
753 if (m68k_get_function_kind (*node) != m68k_fk_normal_function)
754 {
755 error ("multiple interrupt attributes not allowed");
756 *no_add_attrs = true;
757 }
758
759 if (!TARGET_FIDOA
760 && !strcmp (IDENTIFIER_POINTER (name), "interrupt_thread"))
761 {
762 error ("interrupt_thread is available only on fido");
763 *no_add_attrs = true;
764 }
765
766 return NULL_TREE;
767 }
768
769 static void
770 m68k_compute_frame_layout (void)
771 {
772 int regno, saved;
773 unsigned int mask;
774 enum m68k_function_kind func_kind =
775 m68k_get_function_kind (current_function_decl);
776 bool interrupt_handler = func_kind == m68k_fk_interrupt_handler;
777 bool interrupt_thread = func_kind == m68k_fk_interrupt_thread;
778
779 /* Only compute the frame once per function.
780 Don't cache information until reload has been completed. */
781 if (current_frame.funcdef_no == current_function_funcdef_no
782 && reload_completed)
783 return;
784
785 current_frame.size = (get_frame_size () + 3) & -4;
786
787 mask = saved = 0;
788
789 /* Interrupt thread does not need to save any register. */
790 if (!interrupt_thread)
791 for (regno = 0; regno < 16; regno++)
792 if (m68k_save_reg (regno, interrupt_handler))
793 {
794 mask |= 1 << (regno - D0_REG);
795 saved++;
796 }
797 current_frame.offset = saved * 4;
798 current_frame.reg_no = saved;
799 current_frame.reg_mask = mask;
800
801 current_frame.foffset = 0;
802 mask = saved = 0;
803 if (TARGET_HARD_FLOAT)
804 {
805 /* Interrupt thread does not need to save any register. */
806 if (!interrupt_thread)
807 for (regno = 16; regno < 24; regno++)
808 if (m68k_save_reg (regno, interrupt_handler))
809 {
810 mask |= 1 << (regno - FP0_REG);
811 saved++;
812 }
813 current_frame.foffset = saved * TARGET_FP_REG_SIZE;
814 current_frame.offset += current_frame.foffset;
815 }
816 current_frame.fpu_no = saved;
817 current_frame.fpu_mask = mask;
818
819 /* Remember what function this frame refers to. */
820 current_frame.funcdef_no = current_function_funcdef_no;
821 }
822
823 /* Worker function for TARGET_CAN_ELIMINATE. */
824
825 bool
826 m68k_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
827 {
828 return (to == STACK_POINTER_REGNUM ? ! frame_pointer_needed : true);
829 }
830
831 HOST_WIDE_INT
832 m68k_initial_elimination_offset (int from, int to)
833 {
834 int argptr_offset;
835 /* The arg pointer points 8 bytes before the start of the arguments,
836 as defined by FIRST_PARM_OFFSET. This makes it coincident with the
837 frame pointer in most frames. */
838 argptr_offset = frame_pointer_needed ? 0 : UNITS_PER_WORD;
839 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
840 return argptr_offset;
841
842 m68k_compute_frame_layout ();
843
844 gcc_assert (to == STACK_POINTER_REGNUM);
845 switch (from)
846 {
847 case ARG_POINTER_REGNUM:
848 return current_frame.offset + current_frame.size - argptr_offset;
849 case FRAME_POINTER_REGNUM:
850 return current_frame.offset + current_frame.size;
851 default:
852 gcc_unreachable ();
853 }
854 }
855
856 /* Refer to the array `regs_ever_live' to determine which registers
857 to save; `regs_ever_live[I]' is nonzero if register number I
858 is ever used in the function. This function is responsible for
859 knowing which registers should not be saved even if used.
860 Return true if we need to save REGNO. */
861
862 static bool
863 m68k_save_reg (unsigned int regno, bool interrupt_handler)
864 {
865 if (flag_pic && regno == PIC_REG)
866 {
867 if (crtl->saves_all_registers)
868 return true;
869 if (crtl->uses_pic_offset_table)
870 return true;
871 /* Reload may introduce constant pool references into a function
872 that thitherto didn't need a PIC register. Note that the test
873 above will not catch that case because we will only set
874 crtl->uses_pic_offset_table when emitting
875 the address reloads. */
876 if (crtl->uses_const_pool)
877 return true;
878 }
879
880 if (crtl->calls_eh_return)
881 {
882 unsigned int i;
883 for (i = 0; ; i++)
884 {
885 unsigned int test = EH_RETURN_DATA_REGNO (i);
886 if (test == INVALID_REGNUM)
887 break;
888 if (test == regno)
889 return true;
890 }
891 }
892
893 /* Fixed regs we never touch. */
894 if (fixed_regs[regno])
895 return false;
896
897 /* The frame pointer (if it is such) is handled specially. */
898 if (regno == FRAME_POINTER_REGNUM && frame_pointer_needed)
899 return false;
900
901 /* Interrupt handlers must also save call_used_regs
902 if they are live or when calling nested functions. */
903 if (interrupt_handler)
904 {
905 if (df_regs_ever_live_p (regno))
906 return true;
907
908 if (!crtl->is_leaf && call_used_regs[regno])
909 return true;
910 }
911
912 /* Never need to save registers that aren't touched. */
913 if (!df_regs_ever_live_p (regno))
914 return false;
915
916 /* Otherwise save everything that isn't call-clobbered. */
917 return !call_used_regs[regno];
918 }
919
920 /* Emit RTL for a MOVEM or FMOVEM instruction. BASE + OFFSET represents
921 the lowest memory address. COUNT is the number of registers to be
922 moved, with register REGNO + I being moved if bit I of MASK is set.
923 STORE_P specifies the direction of the move and ADJUST_STACK_P says
924 whether or not this is pre-decrement (if STORE_P) or post-increment
925 (if !STORE_P) operation. */
926
927 static rtx_insn *
928 m68k_emit_movem (rtx base, HOST_WIDE_INT offset,
929 unsigned int count, unsigned int regno,
930 unsigned int mask, bool store_p, bool adjust_stack_p)
931 {
932 int i;
933 rtx body, addr, src, operands[2];
934 machine_mode mode;
935
936 body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (adjust_stack_p + count));
937 mode = reg_raw_mode[regno];
938 i = 0;
939
940 if (adjust_stack_p)
941 {
942 src = plus_constant (Pmode, base,
943 (count
944 * GET_MODE_SIZE (mode)
945 * (HOST_WIDE_INT) (store_p ? -1 : 1)));
946 XVECEXP (body, 0, i++) = gen_rtx_SET (base, src);
947 }
948
949 for (; mask != 0; mask >>= 1, regno++)
950 if (mask & 1)
951 {
952 addr = plus_constant (Pmode, base, offset);
953 operands[!store_p] = gen_frame_mem (mode, addr);
954 operands[store_p] = gen_rtx_REG (mode, regno);
955 XVECEXP (body, 0, i++)
956 = gen_rtx_SET (operands[0], operands[1]);
957 offset += GET_MODE_SIZE (mode);
958 }
959 gcc_assert (i == XVECLEN (body, 0));
960
961 return emit_insn (body);
962 }
963
964 /* Make INSN a frame-related instruction. */
965
966 static void
967 m68k_set_frame_related (rtx_insn *insn)
968 {
969 rtx body;
970 int i;
971
972 RTX_FRAME_RELATED_P (insn) = 1;
973 body = PATTERN (insn);
974 if (GET_CODE (body) == PARALLEL)
975 for (i = 0; i < XVECLEN (body, 0); i++)
976 RTX_FRAME_RELATED_P (XVECEXP (body, 0, i)) = 1;
977 }
978
979 /* Emit RTL for the "prologue" define_expand. */
980
981 void
982 m68k_expand_prologue (void)
983 {
984 HOST_WIDE_INT fsize_with_regs;
985 rtx limit, src, dest;
986
987 m68k_compute_frame_layout ();
988
989 if (flag_stack_usage_info)
990 current_function_static_stack_size
991 = current_frame.size + current_frame.offset;
992
993 /* If the stack limit is a symbol, we can check it here,
994 before actually allocating the space. */
995 if (crtl->limit_stack
996 && GET_CODE (stack_limit_rtx) == SYMBOL_REF)
997 {
998 limit = plus_constant (Pmode, stack_limit_rtx, current_frame.size + 4);
999 if (!m68k_legitimate_constant_p (Pmode, limit))
1000 {
1001 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), limit);
1002 limit = gen_rtx_REG (Pmode, D0_REG);
1003 }
1004 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode,
1005 stack_pointer_rtx, limit),
1006 stack_pointer_rtx, limit,
1007 const1_rtx));
1008 }
1009
1010 fsize_with_regs = current_frame.size;
1011 if (TARGET_COLDFIRE)
1012 {
1013 /* ColdFire's move multiple instructions do not allow pre-decrement
1014 addressing. Add the size of movem saves to the initial stack
1015 allocation instead. */
1016 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1017 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1018 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1019 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
1020 }
1021
1022 if (frame_pointer_needed)
1023 {
1024 if (fsize_with_regs == 0 && TUNE_68040)
1025 {
1026 /* On the 68040, two separate moves are faster than link.w 0. */
1027 dest = gen_frame_mem (Pmode,
1028 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1029 m68k_set_frame_related (emit_move_insn (dest, frame_pointer_rtx));
1030 m68k_set_frame_related (emit_move_insn (frame_pointer_rtx,
1031 stack_pointer_rtx));
1032 }
1033 else if (fsize_with_regs < 0x8000 || TARGET_68020)
1034 m68k_set_frame_related
1035 (emit_insn (gen_link (frame_pointer_rtx,
1036 GEN_INT (-4 - fsize_with_regs))));
1037 else
1038 {
1039 m68k_set_frame_related
1040 (emit_insn (gen_link (frame_pointer_rtx, GEN_INT (-4))));
1041 m68k_set_frame_related
1042 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1043 stack_pointer_rtx,
1044 GEN_INT (-fsize_with_regs))));
1045 }
1046
1047 /* If the frame pointer is needed, emit a special barrier that
1048 will prevent the scheduler from moving stores to the frame
1049 before the stack adjustment. */
1050 emit_insn (gen_stack_tie (stack_pointer_rtx, frame_pointer_rtx));
1051 }
1052 else if (fsize_with_regs != 0)
1053 m68k_set_frame_related
1054 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1055 stack_pointer_rtx,
1056 GEN_INT (-fsize_with_regs))));
1057
1058 if (current_frame.fpu_mask)
1059 {
1060 gcc_assert (current_frame.fpu_no >= MIN_FMOVEM_REGS);
1061 if (TARGET_68881)
1062 m68k_set_frame_related
1063 (m68k_emit_movem (stack_pointer_rtx,
1064 current_frame.fpu_no * -GET_MODE_SIZE (XFmode),
1065 current_frame.fpu_no, FP0_REG,
1066 current_frame.fpu_mask, true, true));
1067 else
1068 {
1069 int offset;
1070
1071 /* If we're using moveml to save the integer registers,
1072 the stack pointer will point to the bottom of the moveml
1073 save area. Find the stack offset of the first FP register. */
1074 if (current_frame.reg_no < MIN_MOVEM_REGS)
1075 offset = 0;
1076 else
1077 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1078 m68k_set_frame_related
1079 (m68k_emit_movem (stack_pointer_rtx, offset,
1080 current_frame.fpu_no, FP0_REG,
1081 current_frame.fpu_mask, true, false));
1082 }
1083 }
1084
1085 /* If the stack limit is not a symbol, check it here.
1086 This has the disadvantage that it may be too late... */
1087 if (crtl->limit_stack)
1088 {
1089 if (REG_P (stack_limit_rtx))
1090 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode, stack_pointer_rtx,
1091 stack_limit_rtx),
1092 stack_pointer_rtx, stack_limit_rtx,
1093 const1_rtx));
1094
1095 else if (GET_CODE (stack_limit_rtx) != SYMBOL_REF)
1096 warning (0, "stack limit expression is not supported");
1097 }
1098
1099 if (current_frame.reg_no < MIN_MOVEM_REGS)
1100 {
1101 /* Store each register separately in the same order moveml does. */
1102 int i;
1103
1104 for (i = 16; i-- > 0; )
1105 if (current_frame.reg_mask & (1 << i))
1106 {
1107 src = gen_rtx_REG (SImode, D0_REG + i);
1108 dest = gen_frame_mem (SImode,
1109 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1110 m68k_set_frame_related (emit_insn (gen_movsi (dest, src)));
1111 }
1112 }
1113 else
1114 {
1115 if (TARGET_COLDFIRE)
1116 /* The required register save space has already been allocated.
1117 The first register should be stored at (%sp). */
1118 m68k_set_frame_related
1119 (m68k_emit_movem (stack_pointer_rtx, 0,
1120 current_frame.reg_no, D0_REG,
1121 current_frame.reg_mask, true, false));
1122 else
1123 m68k_set_frame_related
1124 (m68k_emit_movem (stack_pointer_rtx,
1125 current_frame.reg_no * -GET_MODE_SIZE (SImode),
1126 current_frame.reg_no, D0_REG,
1127 current_frame.reg_mask, true, true));
1128 }
1129
1130 if (!TARGET_SEP_DATA
1131 && crtl->uses_pic_offset_table)
1132 emit_insn (gen_load_got (pic_offset_table_rtx));
1133 }
1134 \f
1135 /* Return true if a simple (return) instruction is sufficient for this
1136 instruction (i.e. if no epilogue is needed). */
1137
1138 bool
1139 m68k_use_return_insn (void)
1140 {
1141 if (!reload_completed || frame_pointer_needed || get_frame_size () != 0)
1142 return false;
1143
1144 m68k_compute_frame_layout ();
1145 return current_frame.offset == 0;
1146 }
1147
1148 /* Emit RTL for the "epilogue" or "sibcall_epilogue" define_expand;
1149 SIBCALL_P says which.
1150
1151 The function epilogue should not depend on the current stack pointer!
1152 It should use the frame pointer only, if there is a frame pointer.
1153 This is mandatory because of alloca; we also take advantage of it to
1154 omit stack adjustments before returning. */
1155
1156 void
1157 m68k_expand_epilogue (bool sibcall_p)
1158 {
1159 HOST_WIDE_INT fsize, fsize_with_regs;
1160 bool big, restore_from_sp;
1161
1162 m68k_compute_frame_layout ();
1163
1164 fsize = current_frame.size;
1165 big = false;
1166 restore_from_sp = false;
1167
1168 /* FIXME : crtl->is_leaf below is too strong.
1169 What we really need to know there is if there could be pending
1170 stack adjustment needed at that point. */
1171 restore_from_sp = (!frame_pointer_needed
1172 || (!cfun->calls_alloca && crtl->is_leaf));
1173
1174 /* fsize_with_regs is the size we need to adjust the sp when
1175 popping the frame. */
1176 fsize_with_regs = fsize;
1177 if (TARGET_COLDFIRE && restore_from_sp)
1178 {
1179 /* ColdFire's move multiple instructions do not allow post-increment
1180 addressing. Add the size of movem loads to the final deallocation
1181 instead. */
1182 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1183 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1184 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1185 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
1186 }
1187
1188 if (current_frame.offset + fsize >= 0x8000
1189 && !restore_from_sp
1190 && (current_frame.reg_mask || current_frame.fpu_mask))
1191 {
1192 if (TARGET_COLDFIRE
1193 && (current_frame.reg_no >= MIN_MOVEM_REGS
1194 || current_frame.fpu_no >= MIN_FMOVEM_REGS))
1195 {
1196 /* ColdFire's move multiple instructions do not support the
1197 (d8,Ax,Xi) addressing mode, so we're as well using a normal
1198 stack-based restore. */
1199 emit_move_insn (gen_rtx_REG (Pmode, A1_REG),
1200 GEN_INT (-(current_frame.offset + fsize)));
1201 emit_insn (gen_addsi3 (stack_pointer_rtx,
1202 gen_rtx_REG (Pmode, A1_REG),
1203 frame_pointer_rtx));
1204 restore_from_sp = true;
1205 }
1206 else
1207 {
1208 emit_move_insn (gen_rtx_REG (Pmode, A1_REG), GEN_INT (-fsize));
1209 fsize = 0;
1210 big = true;
1211 }
1212 }
1213
1214 if (current_frame.reg_no < MIN_MOVEM_REGS)
1215 {
1216 /* Restore each register separately in the same order moveml does. */
1217 int i;
1218 HOST_WIDE_INT offset;
1219
1220 offset = current_frame.offset + fsize;
1221 for (i = 0; i < 16; i++)
1222 if (current_frame.reg_mask & (1 << i))
1223 {
1224 rtx addr;
1225
1226 if (big)
1227 {
1228 /* Generate the address -OFFSET(%fp,%a1.l). */
1229 addr = gen_rtx_REG (Pmode, A1_REG);
1230 addr = gen_rtx_PLUS (Pmode, addr, frame_pointer_rtx);
1231 addr = plus_constant (Pmode, addr, -offset);
1232 }
1233 else if (restore_from_sp)
1234 addr = gen_rtx_POST_INC (Pmode, stack_pointer_rtx);
1235 else
1236 addr = plus_constant (Pmode, frame_pointer_rtx, -offset);
1237 emit_move_insn (gen_rtx_REG (SImode, D0_REG + i),
1238 gen_frame_mem (SImode, addr));
1239 offset -= GET_MODE_SIZE (SImode);
1240 }
1241 }
1242 else if (current_frame.reg_mask)
1243 {
1244 if (big)
1245 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1246 gen_rtx_REG (Pmode, A1_REG),
1247 frame_pointer_rtx),
1248 -(current_frame.offset + fsize),
1249 current_frame.reg_no, D0_REG,
1250 current_frame.reg_mask, false, false);
1251 else if (restore_from_sp)
1252 m68k_emit_movem (stack_pointer_rtx, 0,
1253 current_frame.reg_no, D0_REG,
1254 current_frame.reg_mask, false,
1255 !TARGET_COLDFIRE);
1256 else
1257 m68k_emit_movem (frame_pointer_rtx,
1258 -(current_frame.offset + fsize),
1259 current_frame.reg_no, D0_REG,
1260 current_frame.reg_mask, false, false);
1261 }
1262
1263 if (current_frame.fpu_no > 0)
1264 {
1265 if (big)
1266 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1267 gen_rtx_REG (Pmode, A1_REG),
1268 frame_pointer_rtx),
1269 -(current_frame.foffset + fsize),
1270 current_frame.fpu_no, FP0_REG,
1271 current_frame.fpu_mask, false, false);
1272 else if (restore_from_sp)
1273 {
1274 if (TARGET_COLDFIRE)
1275 {
1276 int offset;
1277
1278 /* If we used moveml to restore the integer registers, the
1279 stack pointer will still point to the bottom of the moveml
1280 save area. Find the stack offset of the first FP
1281 register. */
1282 if (current_frame.reg_no < MIN_MOVEM_REGS)
1283 offset = 0;
1284 else
1285 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1286 m68k_emit_movem (stack_pointer_rtx, offset,
1287 current_frame.fpu_no, FP0_REG,
1288 current_frame.fpu_mask, false, false);
1289 }
1290 else
1291 m68k_emit_movem (stack_pointer_rtx, 0,
1292 current_frame.fpu_no, FP0_REG,
1293 current_frame.fpu_mask, false, true);
1294 }
1295 else
1296 m68k_emit_movem (frame_pointer_rtx,
1297 -(current_frame.foffset + fsize),
1298 current_frame.fpu_no, FP0_REG,
1299 current_frame.fpu_mask, false, false);
1300 }
1301
1302 if (frame_pointer_needed)
1303 emit_insn (gen_unlink (frame_pointer_rtx));
1304 else if (fsize_with_regs)
1305 emit_insn (gen_addsi3 (stack_pointer_rtx,
1306 stack_pointer_rtx,
1307 GEN_INT (fsize_with_regs)));
1308
1309 if (crtl->calls_eh_return)
1310 emit_insn (gen_addsi3 (stack_pointer_rtx,
1311 stack_pointer_rtx,
1312 EH_RETURN_STACKADJ_RTX));
1313
1314 if (!sibcall_p)
1315 emit_jump_insn (ret_rtx);
1316 }
1317 \f
1318 /* Return true if X is a valid comparison operator for the dbcc
1319 instruction.
1320
1321 Note it rejects floating point comparison operators.
1322 (In the future we could use Fdbcc).
1323
1324 It also rejects some comparisons when CC_NO_OVERFLOW is set. */
1325
1326 int
1327 valid_dbcc_comparison_p_2 (rtx x, machine_mode mode ATTRIBUTE_UNUSED)
1328 {
1329 switch (GET_CODE (x))
1330 {
1331 case EQ: case NE: case GTU: case LTU:
1332 case GEU: case LEU:
1333 return 1;
1334
1335 /* Reject some when CC_NO_OVERFLOW is set. This may be over
1336 conservative */
1337 case GT: case LT: case GE: case LE:
1338 return ! (cc_prev_status.flags & CC_NO_OVERFLOW);
1339 default:
1340 return 0;
1341 }
1342 }
1343
1344 /* Return nonzero if flags are currently in the 68881 flag register. */
1345 int
1346 flags_in_68881 (void)
1347 {
1348 /* We could add support for these in the future */
1349 return cc_status.flags & CC_IN_68881;
1350 }
1351
1352 /* Return true if PARALLEL contains register REGNO. */
1353 static bool
1354 m68k_reg_present_p (const_rtx parallel, unsigned int regno)
1355 {
1356 int i;
1357
1358 if (REG_P (parallel) && REGNO (parallel) == regno)
1359 return true;
1360
1361 if (GET_CODE (parallel) != PARALLEL)
1362 return false;
1363
1364 for (i = 0; i < XVECLEN (parallel, 0); ++i)
1365 {
1366 const_rtx x;
1367
1368 x = XEXP (XVECEXP (parallel, 0, i), 0);
1369 if (REG_P (x) && REGNO (x) == regno)
1370 return true;
1371 }
1372
1373 return false;
1374 }
1375
1376 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL_P. */
1377
1378 static bool
1379 m68k_ok_for_sibcall_p (tree decl, tree exp)
1380 {
1381 enum m68k_function_kind kind;
1382
1383 /* We cannot use sibcalls for nested functions because we use the
1384 static chain register for indirect calls. */
1385 if (CALL_EXPR_STATIC_CHAIN (exp))
1386 return false;
1387
1388 if (!VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
1389 {
1390 /* Check that the return value locations are the same. For
1391 example that we aren't returning a value from the sibling in
1392 a D0 register but then need to transfer it to a A0 register. */
1393 rtx cfun_value;
1394 rtx call_value;
1395
1396 cfun_value = FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (cfun->decl)),
1397 cfun->decl);
1398 call_value = FUNCTION_VALUE (TREE_TYPE (exp), decl);
1399
1400 /* Check that the values are equal or that the result the callee
1401 function returns is superset of what the current function returns. */
1402 if (!(rtx_equal_p (cfun_value, call_value)
1403 || (REG_P (cfun_value)
1404 && m68k_reg_present_p (call_value, REGNO (cfun_value)))))
1405 return false;
1406 }
1407
1408 kind = m68k_get_function_kind (current_function_decl);
1409 if (kind == m68k_fk_normal_function)
1410 /* We can always sibcall from a normal function, because it's
1411 undefined if it is calling an interrupt function. */
1412 return true;
1413
1414 /* Otherwise we can only sibcall if the function kind is known to be
1415 the same. */
1416 if (decl && m68k_get_function_kind (decl) == kind)
1417 return true;
1418
1419 return false;
1420 }
1421
1422 /* On the m68k all args are always pushed. */
1423
1424 static rtx
1425 m68k_function_arg (cumulative_args_t cum ATTRIBUTE_UNUSED,
1426 machine_mode mode ATTRIBUTE_UNUSED,
1427 const_tree type ATTRIBUTE_UNUSED,
1428 bool named ATTRIBUTE_UNUSED)
1429 {
1430 return NULL_RTX;
1431 }
1432
1433 static void
1434 m68k_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
1435 const_tree type, bool named ATTRIBUTE_UNUSED)
1436 {
1437 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1438
1439 *cum += (mode != BLKmode
1440 ? (GET_MODE_SIZE (mode) + 3) & ~3
1441 : (int_size_in_bytes (type) + 3) & ~3);
1442 }
1443
1444 /* Convert X to a legitimate function call memory reference and return the
1445 result. */
1446
1447 rtx
1448 m68k_legitimize_call_address (rtx x)
1449 {
1450 gcc_assert (MEM_P (x));
1451 if (call_operand (XEXP (x, 0), VOIDmode))
1452 return x;
1453 return replace_equiv_address (x, force_reg (Pmode, XEXP (x, 0)));
1454 }
1455
1456 /* Likewise for sibling calls. */
1457
1458 rtx
1459 m68k_legitimize_sibcall_address (rtx x)
1460 {
1461 gcc_assert (MEM_P (x));
1462 if (sibcall_operand (XEXP (x, 0), VOIDmode))
1463 return x;
1464
1465 emit_move_insn (gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM), XEXP (x, 0));
1466 return replace_equiv_address (x, gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM));
1467 }
1468
1469 /* Convert X to a legitimate address and return it if successful. Otherwise
1470 return X.
1471
1472 For the 68000, we handle X+REG by loading X into a register R and
1473 using R+REG. R will go in an address reg and indexing will be used.
1474 However, if REG is a broken-out memory address or multiplication,
1475 nothing needs to be done because REG can certainly go in an address reg. */
1476
1477 static rtx
1478 m68k_legitimize_address (rtx x, rtx oldx, machine_mode mode)
1479 {
1480 if (m68k_tls_symbol_p (x))
1481 return m68k_legitimize_tls_address (x);
1482
1483 if (GET_CODE (x) == PLUS)
1484 {
1485 int ch = (x) != (oldx);
1486 int copied = 0;
1487
1488 #define COPY_ONCE(Y) if (!copied) { Y = copy_rtx (Y); copied = ch = 1; }
1489
1490 if (GET_CODE (XEXP (x, 0)) == MULT)
1491 {
1492 COPY_ONCE (x);
1493 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
1494 }
1495 if (GET_CODE (XEXP (x, 1)) == MULT)
1496 {
1497 COPY_ONCE (x);
1498 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
1499 }
1500 if (ch)
1501 {
1502 if (GET_CODE (XEXP (x, 1)) == REG
1503 && GET_CODE (XEXP (x, 0)) == REG)
1504 {
1505 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
1506 {
1507 COPY_ONCE (x);
1508 x = force_operand (x, 0);
1509 }
1510 return x;
1511 }
1512 if (memory_address_p (mode, x))
1513 return x;
1514 }
1515 if (GET_CODE (XEXP (x, 0)) == REG
1516 || (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
1517 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1518 && GET_MODE (XEXP (XEXP (x, 0), 0)) == HImode))
1519 {
1520 rtx temp = gen_reg_rtx (Pmode);
1521 rtx val = force_operand (XEXP (x, 1), 0);
1522 emit_move_insn (temp, val);
1523 COPY_ONCE (x);
1524 XEXP (x, 1) = temp;
1525 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1526 && GET_CODE (XEXP (x, 0)) == REG)
1527 x = force_operand (x, 0);
1528 }
1529 else if (GET_CODE (XEXP (x, 1)) == REG
1530 || (GET_CODE (XEXP (x, 1)) == SIGN_EXTEND
1531 && GET_CODE (XEXP (XEXP (x, 1), 0)) == REG
1532 && GET_MODE (XEXP (XEXP (x, 1), 0)) == HImode))
1533 {
1534 rtx temp = gen_reg_rtx (Pmode);
1535 rtx val = force_operand (XEXP (x, 0), 0);
1536 emit_move_insn (temp, val);
1537 COPY_ONCE (x);
1538 XEXP (x, 0) = temp;
1539 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1540 && GET_CODE (XEXP (x, 1)) == REG)
1541 x = force_operand (x, 0);
1542 }
1543 }
1544
1545 return x;
1546 }
1547
1548
1549 /* Output a dbCC; jCC sequence. Note we do not handle the
1550 floating point version of this sequence (Fdbcc). We also
1551 do not handle alternative conditions when CC_NO_OVERFLOW is
1552 set. It is assumed that valid_dbcc_comparison_p and flags_in_68881 will
1553 kick those out before we get here. */
1554
1555 void
1556 output_dbcc_and_branch (rtx *operands)
1557 {
1558 switch (GET_CODE (operands[3]))
1559 {
1560 case EQ:
1561 output_asm_insn ("dbeq %0,%l1\n\tjeq %l2", operands);
1562 break;
1563
1564 case NE:
1565 output_asm_insn ("dbne %0,%l1\n\tjne %l2", operands);
1566 break;
1567
1568 case GT:
1569 output_asm_insn ("dbgt %0,%l1\n\tjgt %l2", operands);
1570 break;
1571
1572 case GTU:
1573 output_asm_insn ("dbhi %0,%l1\n\tjhi %l2", operands);
1574 break;
1575
1576 case LT:
1577 output_asm_insn ("dblt %0,%l1\n\tjlt %l2", operands);
1578 break;
1579
1580 case LTU:
1581 output_asm_insn ("dbcs %0,%l1\n\tjcs %l2", operands);
1582 break;
1583
1584 case GE:
1585 output_asm_insn ("dbge %0,%l1\n\tjge %l2", operands);
1586 break;
1587
1588 case GEU:
1589 output_asm_insn ("dbcc %0,%l1\n\tjcc %l2", operands);
1590 break;
1591
1592 case LE:
1593 output_asm_insn ("dble %0,%l1\n\tjle %l2", operands);
1594 break;
1595
1596 case LEU:
1597 output_asm_insn ("dbls %0,%l1\n\tjls %l2", operands);
1598 break;
1599
1600 default:
1601 gcc_unreachable ();
1602 }
1603
1604 /* If the decrement is to be done in SImode, then we have
1605 to compensate for the fact that dbcc decrements in HImode. */
1606 switch (GET_MODE (operands[0]))
1607 {
1608 case SImode:
1609 output_asm_insn ("clr%.w %0\n\tsubq%.l #1,%0\n\tjpl %l1", operands);
1610 break;
1611
1612 case HImode:
1613 break;
1614
1615 default:
1616 gcc_unreachable ();
1617 }
1618 }
1619
1620 const char *
1621 output_scc_di (rtx op, rtx operand1, rtx operand2, rtx dest)
1622 {
1623 rtx loperands[7];
1624 enum rtx_code op_code = GET_CODE (op);
1625
1626 /* This does not produce a useful cc. */
1627 CC_STATUS_INIT;
1628
1629 /* The m68k cmp.l instruction requires operand1 to be a reg as used
1630 below. Swap the operands and change the op if these requirements
1631 are not fulfilled. */
1632 if (GET_CODE (operand2) == REG && GET_CODE (operand1) != REG)
1633 {
1634 rtx tmp = operand1;
1635
1636 operand1 = operand2;
1637 operand2 = tmp;
1638 op_code = swap_condition (op_code);
1639 }
1640 loperands[0] = operand1;
1641 if (GET_CODE (operand1) == REG)
1642 loperands[1] = gen_rtx_REG (SImode, REGNO (operand1) + 1);
1643 else
1644 loperands[1] = adjust_address (operand1, SImode, 4);
1645 if (operand2 != const0_rtx)
1646 {
1647 loperands[2] = operand2;
1648 if (GET_CODE (operand2) == REG)
1649 loperands[3] = gen_rtx_REG (SImode, REGNO (operand2) + 1);
1650 else
1651 loperands[3] = adjust_address (operand2, SImode, 4);
1652 }
1653 loperands[4] = gen_label_rtx ();
1654 if (operand2 != const0_rtx)
1655 output_asm_insn ("cmp%.l %2,%0\n\tjne %l4\n\tcmp%.l %3,%1", loperands);
1656 else
1657 {
1658 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[0]))
1659 output_asm_insn ("tst%.l %0", loperands);
1660 else
1661 output_asm_insn ("cmp%.w #0,%0", loperands);
1662
1663 output_asm_insn ("jne %l4", loperands);
1664
1665 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[1]))
1666 output_asm_insn ("tst%.l %1", loperands);
1667 else
1668 output_asm_insn ("cmp%.w #0,%1", loperands);
1669 }
1670
1671 loperands[5] = dest;
1672
1673 switch (op_code)
1674 {
1675 case EQ:
1676 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1677 CODE_LABEL_NUMBER (loperands[4]));
1678 output_asm_insn ("seq %5", loperands);
1679 break;
1680
1681 case NE:
1682 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1683 CODE_LABEL_NUMBER (loperands[4]));
1684 output_asm_insn ("sne %5", loperands);
1685 break;
1686
1687 case GT:
1688 loperands[6] = gen_label_rtx ();
1689 output_asm_insn ("shi %5\n\tjra %l6", loperands);
1690 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1691 CODE_LABEL_NUMBER (loperands[4]));
1692 output_asm_insn ("sgt %5", loperands);
1693 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1694 CODE_LABEL_NUMBER (loperands[6]));
1695 break;
1696
1697 case GTU:
1698 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1699 CODE_LABEL_NUMBER (loperands[4]));
1700 output_asm_insn ("shi %5", loperands);
1701 break;
1702
1703 case LT:
1704 loperands[6] = gen_label_rtx ();
1705 output_asm_insn ("scs %5\n\tjra %l6", loperands);
1706 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1707 CODE_LABEL_NUMBER (loperands[4]));
1708 output_asm_insn ("slt %5", loperands);
1709 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1710 CODE_LABEL_NUMBER (loperands[6]));
1711 break;
1712
1713 case LTU:
1714 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1715 CODE_LABEL_NUMBER (loperands[4]));
1716 output_asm_insn ("scs %5", loperands);
1717 break;
1718
1719 case GE:
1720 loperands[6] = gen_label_rtx ();
1721 output_asm_insn ("scc %5\n\tjra %l6", loperands);
1722 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1723 CODE_LABEL_NUMBER (loperands[4]));
1724 output_asm_insn ("sge %5", loperands);
1725 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1726 CODE_LABEL_NUMBER (loperands[6]));
1727 break;
1728
1729 case GEU:
1730 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1731 CODE_LABEL_NUMBER (loperands[4]));
1732 output_asm_insn ("scc %5", loperands);
1733 break;
1734
1735 case LE:
1736 loperands[6] = gen_label_rtx ();
1737 output_asm_insn ("sls %5\n\tjra %l6", loperands);
1738 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1739 CODE_LABEL_NUMBER (loperands[4]));
1740 output_asm_insn ("sle %5", loperands);
1741 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1742 CODE_LABEL_NUMBER (loperands[6]));
1743 break;
1744
1745 case LEU:
1746 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1747 CODE_LABEL_NUMBER (loperands[4]));
1748 output_asm_insn ("sls %5", loperands);
1749 break;
1750
1751 default:
1752 gcc_unreachable ();
1753 }
1754 return "";
1755 }
1756
1757 const char *
1758 output_btst (rtx *operands, rtx countop, rtx dataop, rtx_insn *insn, int signpos)
1759 {
1760 operands[0] = countop;
1761 operands[1] = dataop;
1762
1763 if (GET_CODE (countop) == CONST_INT)
1764 {
1765 register int count = INTVAL (countop);
1766 /* If COUNT is bigger than size of storage unit in use,
1767 advance to the containing unit of same size. */
1768 if (count > signpos)
1769 {
1770 int offset = (count & ~signpos) / 8;
1771 count = count & signpos;
1772 operands[1] = dataop = adjust_address (dataop, QImode, offset);
1773 }
1774 if (count == signpos)
1775 cc_status.flags = CC_NOT_POSITIVE | CC_Z_IN_NOT_N;
1776 else
1777 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N;
1778
1779 /* These three statements used to use next_insns_test_no...
1780 but it appears that this should do the same job. */
1781 if (count == 31
1782 && next_insn_tests_no_inequality (insn))
1783 return "tst%.l %1";
1784 if (count == 15
1785 && next_insn_tests_no_inequality (insn))
1786 return "tst%.w %1";
1787 if (count == 7
1788 && next_insn_tests_no_inequality (insn))
1789 return "tst%.b %1";
1790 /* Try to use `movew to ccr' followed by the appropriate branch insn.
1791 On some m68k variants unfortunately that's slower than btst.
1792 On 68000 and higher, that should also work for all HImode operands. */
1793 if (TUNE_CPU32 || TARGET_COLDFIRE || optimize_size)
1794 {
1795 if (count == 3 && DATA_REG_P (operands[1])
1796 && next_insn_tests_no_inequality (insn))
1797 {
1798 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N | CC_NO_OVERFLOW;
1799 return "move%.w %1,%%ccr";
1800 }
1801 if (count == 2 && DATA_REG_P (operands[1])
1802 && next_insn_tests_no_inequality (insn))
1803 {
1804 cc_status.flags = CC_NOT_NEGATIVE | CC_INVERTED | CC_NO_OVERFLOW;
1805 return "move%.w %1,%%ccr";
1806 }
1807 /* count == 1 followed by bvc/bvs and
1808 count == 0 followed by bcc/bcs are also possible, but need
1809 m68k-specific CC_Z_IN_NOT_V and CC_Z_IN_NOT_C flags. */
1810 }
1811
1812 cc_status.flags = CC_NOT_NEGATIVE;
1813 }
1814 return "btst %0,%1";
1815 }
1816 \f
1817 /* Return true if X is a legitimate base register. STRICT_P says
1818 whether we need strict checking. */
1819
1820 bool
1821 m68k_legitimate_base_reg_p (rtx x, bool strict_p)
1822 {
1823 /* Allow SUBREG everywhere we allow REG. This results in better code. */
1824 if (!strict_p && GET_CODE (x) == SUBREG)
1825 x = SUBREG_REG (x);
1826
1827 return (REG_P (x)
1828 && (strict_p
1829 ? REGNO_OK_FOR_BASE_P (REGNO (x))
1830 : REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x))));
1831 }
1832
1833 /* Return true if X is a legitimate index register. STRICT_P says
1834 whether we need strict checking. */
1835
1836 bool
1837 m68k_legitimate_index_reg_p (rtx x, bool strict_p)
1838 {
1839 if (!strict_p && GET_CODE (x) == SUBREG)
1840 x = SUBREG_REG (x);
1841
1842 return (REG_P (x)
1843 && (strict_p
1844 ? REGNO_OK_FOR_INDEX_P (REGNO (x))
1845 : REGNO_OK_FOR_INDEX_NONSTRICT_P (REGNO (x))));
1846 }
1847
1848 /* Return true if X is a legitimate index expression for a (d8,An,Xn) or
1849 (bd,An,Xn) addressing mode. Fill in the INDEX and SCALE fields of
1850 ADDRESS if so. STRICT_P says whether we need strict checking. */
1851
1852 static bool
1853 m68k_decompose_index (rtx x, bool strict_p, struct m68k_address *address)
1854 {
1855 int scale;
1856
1857 /* Check for a scale factor. */
1858 scale = 1;
1859 if ((TARGET_68020 || TARGET_COLDFIRE)
1860 && GET_CODE (x) == MULT
1861 && GET_CODE (XEXP (x, 1)) == CONST_INT
1862 && (INTVAL (XEXP (x, 1)) == 2
1863 || INTVAL (XEXP (x, 1)) == 4
1864 || (INTVAL (XEXP (x, 1)) == 8
1865 && (TARGET_COLDFIRE_FPU || !TARGET_COLDFIRE))))
1866 {
1867 scale = INTVAL (XEXP (x, 1));
1868 x = XEXP (x, 0);
1869 }
1870
1871 /* Check for a word extension. */
1872 if (!TARGET_COLDFIRE
1873 && GET_CODE (x) == SIGN_EXTEND
1874 && GET_MODE (XEXP (x, 0)) == HImode)
1875 x = XEXP (x, 0);
1876
1877 if (m68k_legitimate_index_reg_p (x, strict_p))
1878 {
1879 address->scale = scale;
1880 address->index = x;
1881 return true;
1882 }
1883
1884 return false;
1885 }
1886
1887 /* Return true if X is an illegitimate symbolic constant. */
1888
1889 bool
1890 m68k_illegitimate_symbolic_constant_p (rtx x)
1891 {
1892 rtx base, offset;
1893
1894 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P)
1895 {
1896 split_const (x, &base, &offset);
1897 if (GET_CODE (base) == SYMBOL_REF
1898 && !offset_within_block_p (base, INTVAL (offset)))
1899 return true;
1900 }
1901 return m68k_tls_reference_p (x, false);
1902 }
1903
1904 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1905
1906 static bool
1907 m68k_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1908 {
1909 return m68k_illegitimate_symbolic_constant_p (x);
1910 }
1911
1912 /* Return true if X is a legitimate constant address that can reach
1913 bytes in the range [X, X + REACH). STRICT_P says whether we need
1914 strict checking. */
1915
1916 static bool
1917 m68k_legitimate_constant_address_p (rtx x, unsigned int reach, bool strict_p)
1918 {
1919 rtx base, offset;
1920
1921 if (!CONSTANT_ADDRESS_P (x))
1922 return false;
1923
1924 if (flag_pic
1925 && !(strict_p && TARGET_PCREL)
1926 && symbolic_operand (x, VOIDmode))
1927 return false;
1928
1929 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P && reach > 1)
1930 {
1931 split_const (x, &base, &offset);
1932 if (GET_CODE (base) == SYMBOL_REF
1933 && !offset_within_block_p (base, INTVAL (offset) + reach - 1))
1934 return false;
1935 }
1936
1937 return !m68k_tls_reference_p (x, false);
1938 }
1939
1940 /* Return true if X is a LABEL_REF for a jump table. Assume that unplaced
1941 labels will become jump tables. */
1942
1943 static bool
1944 m68k_jump_table_ref_p (rtx x)
1945 {
1946 if (GET_CODE (x) != LABEL_REF)
1947 return false;
1948
1949 rtx_insn *insn = as_a <rtx_insn *> (XEXP (x, 0));
1950 if (!NEXT_INSN (insn) && !PREV_INSN (insn))
1951 return true;
1952
1953 insn = next_nonnote_insn (insn);
1954 return insn && JUMP_TABLE_DATA_P (insn);
1955 }
1956
1957 /* Return true if X is a legitimate address for values of mode MODE.
1958 STRICT_P says whether strict checking is needed. If the address
1959 is valid, describe its components in *ADDRESS. */
1960
1961 static bool
1962 m68k_decompose_address (machine_mode mode, rtx x,
1963 bool strict_p, struct m68k_address *address)
1964 {
1965 unsigned int reach;
1966
1967 memset (address, 0, sizeof (*address));
1968
1969 if (mode == BLKmode)
1970 reach = 1;
1971 else
1972 reach = GET_MODE_SIZE (mode);
1973
1974 /* Check for (An) (mode 2). */
1975 if (m68k_legitimate_base_reg_p (x, strict_p))
1976 {
1977 address->base = x;
1978 return true;
1979 }
1980
1981 /* Check for -(An) and (An)+ (modes 3 and 4). */
1982 if ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_INC)
1983 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
1984 {
1985 address->code = GET_CODE (x);
1986 address->base = XEXP (x, 0);
1987 return true;
1988 }
1989
1990 /* Check for (d16,An) (mode 5). */
1991 if (GET_CODE (x) == PLUS
1992 && GET_CODE (XEXP (x, 1)) == CONST_INT
1993 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x8000, 0x8000 - reach)
1994 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
1995 {
1996 address->base = XEXP (x, 0);
1997 address->offset = XEXP (x, 1);
1998 return true;
1999 }
2000
2001 /* Check for GOT loads. These are (bd,An,Xn) addresses if
2002 TARGET_68020 && flag_pic == 2, otherwise they are (d16,An)
2003 addresses. */
2004 if (GET_CODE (x) == PLUS
2005 && XEXP (x, 0) == pic_offset_table_rtx)
2006 {
2007 /* As we are processing a PLUS, do not unwrap RELOC32 symbols --
2008 they are invalid in this context. */
2009 if (m68k_unwrap_symbol (XEXP (x, 1), false) != XEXP (x, 1))
2010 {
2011 address->base = XEXP (x, 0);
2012 address->offset = XEXP (x, 1);
2013 return true;
2014 }
2015 }
2016
2017 /* The ColdFire FPU only accepts addressing modes 2-5. */
2018 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
2019 return false;
2020
2021 /* Check for (xxx).w and (xxx).l. Also, in the TARGET_PCREL case,
2022 check for (d16,PC) or (bd,PC,Xn) with a suppressed index register.
2023 All these modes are variations of mode 7. */
2024 if (m68k_legitimate_constant_address_p (x, reach, strict_p))
2025 {
2026 address->offset = x;
2027 return true;
2028 }
2029
2030 /* Check for (d8,PC,Xn), a mode 7 form. This case is needed for
2031 tablejumps.
2032
2033 ??? do_tablejump creates these addresses before placing the target
2034 label, so we have to assume that unplaced labels are jump table
2035 references. It seems unlikely that we would ever generate indexed
2036 accesses to unplaced labels in other cases. */
2037 if (GET_CODE (x) == PLUS
2038 && m68k_jump_table_ref_p (XEXP (x, 1))
2039 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2040 {
2041 address->offset = XEXP (x, 1);
2042 return true;
2043 }
2044
2045 /* Everything hereafter deals with (d8,An,Xn.SIZE*SCALE) or
2046 (bd,An,Xn.SIZE*SCALE) addresses. */
2047
2048 if (TARGET_68020)
2049 {
2050 /* Check for a nonzero base displacement. */
2051 if (GET_CODE (x) == PLUS
2052 && m68k_legitimate_constant_address_p (XEXP (x, 1), reach, strict_p))
2053 {
2054 address->offset = XEXP (x, 1);
2055 x = XEXP (x, 0);
2056 }
2057
2058 /* Check for a suppressed index register. */
2059 if (m68k_legitimate_base_reg_p (x, strict_p))
2060 {
2061 address->base = x;
2062 return true;
2063 }
2064
2065 /* Check for a suppressed base register. Do not allow this case
2066 for non-symbolic offsets as it effectively gives gcc freedom
2067 to treat data registers as base registers, which can generate
2068 worse code. */
2069 if (address->offset
2070 && symbolic_operand (address->offset, VOIDmode)
2071 && m68k_decompose_index (x, strict_p, address))
2072 return true;
2073 }
2074 else
2075 {
2076 /* Check for a nonzero base displacement. */
2077 if (GET_CODE (x) == PLUS
2078 && GET_CODE (XEXP (x, 1)) == CONST_INT
2079 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x80, 0x80 - reach))
2080 {
2081 address->offset = XEXP (x, 1);
2082 x = XEXP (x, 0);
2083 }
2084 }
2085
2086 /* We now expect the sum of a base and an index. */
2087 if (GET_CODE (x) == PLUS)
2088 {
2089 if (m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p)
2090 && m68k_decompose_index (XEXP (x, 1), strict_p, address))
2091 {
2092 address->base = XEXP (x, 0);
2093 return true;
2094 }
2095
2096 if (m68k_legitimate_base_reg_p (XEXP (x, 1), strict_p)
2097 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2098 {
2099 address->base = XEXP (x, 1);
2100 return true;
2101 }
2102 }
2103 return false;
2104 }
2105
2106 /* Return true if X is a legitimate address for values of mode MODE.
2107 STRICT_P says whether strict checking is needed. */
2108
2109 bool
2110 m68k_legitimate_address_p (machine_mode mode, rtx x, bool strict_p)
2111 {
2112 struct m68k_address address;
2113
2114 return m68k_decompose_address (mode, x, strict_p, &address);
2115 }
2116
2117 /* Return true if X is a memory, describing its address in ADDRESS if so.
2118 Apply strict checking if called during or after reload. */
2119
2120 static bool
2121 m68k_legitimate_mem_p (rtx x, struct m68k_address *address)
2122 {
2123 return (MEM_P (x)
2124 && m68k_decompose_address (GET_MODE (x), XEXP (x, 0),
2125 reload_in_progress || reload_completed,
2126 address));
2127 }
2128
2129 /* Implement TARGET_LEGITIMATE_CONSTANT_P. */
2130
2131 bool
2132 m68k_legitimate_constant_p (machine_mode mode, rtx x)
2133 {
2134 return mode != XFmode && !m68k_illegitimate_symbolic_constant_p (x);
2135 }
2136
2137 /* Return true if X matches the 'Q' constraint. It must be a memory
2138 with a base address and no constant offset or index. */
2139
2140 bool
2141 m68k_matches_q_p (rtx x)
2142 {
2143 struct m68k_address address;
2144
2145 return (m68k_legitimate_mem_p (x, &address)
2146 && address.code == UNKNOWN
2147 && address.base
2148 && !address.offset
2149 && !address.index);
2150 }
2151
2152 /* Return true if X matches the 'U' constraint. It must be a base address
2153 with a constant offset and no index. */
2154
2155 bool
2156 m68k_matches_u_p (rtx x)
2157 {
2158 struct m68k_address address;
2159
2160 return (m68k_legitimate_mem_p (x, &address)
2161 && address.code == UNKNOWN
2162 && address.base
2163 && address.offset
2164 && !address.index);
2165 }
2166
2167 /* Return GOT pointer. */
2168
2169 static rtx
2170 m68k_get_gp (void)
2171 {
2172 if (pic_offset_table_rtx == NULL_RTX)
2173 pic_offset_table_rtx = gen_rtx_REG (Pmode, PIC_REG);
2174
2175 crtl->uses_pic_offset_table = 1;
2176
2177 return pic_offset_table_rtx;
2178 }
2179
2180 /* M68K relocations, used to distinguish GOT and TLS relocations in UNSPEC
2181 wrappers. */
2182 enum m68k_reloc { RELOC_GOT, RELOC_TLSGD, RELOC_TLSLDM, RELOC_TLSLDO,
2183 RELOC_TLSIE, RELOC_TLSLE };
2184
2185 #define TLS_RELOC_P(RELOC) ((RELOC) != RELOC_GOT)
2186
2187 /* Wrap symbol X into unspec representing relocation RELOC.
2188 BASE_REG - register that should be added to the result.
2189 TEMP_REG - if non-null, temporary register. */
2190
2191 static rtx
2192 m68k_wrap_symbol (rtx x, enum m68k_reloc reloc, rtx base_reg, rtx temp_reg)
2193 {
2194 bool use_x_p;
2195
2196 use_x_p = (base_reg == pic_offset_table_rtx) ? TARGET_XGOT : TARGET_XTLS;
2197
2198 if (TARGET_COLDFIRE && use_x_p)
2199 /* When compiling with -mx{got, tls} switch the code will look like this:
2200
2201 move.l <X>@<RELOC>,<TEMP_REG>
2202 add.l <BASE_REG>,<TEMP_REG> */
2203 {
2204 /* Wrap X in UNSPEC_??? to tip m68k_output_addr_const_extra
2205 to put @RELOC after reference. */
2206 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2207 UNSPEC_RELOC32);
2208 x = gen_rtx_CONST (Pmode, x);
2209
2210 if (temp_reg == NULL)
2211 {
2212 gcc_assert (can_create_pseudo_p ());
2213 temp_reg = gen_reg_rtx (Pmode);
2214 }
2215
2216 emit_move_insn (temp_reg, x);
2217 emit_insn (gen_addsi3 (temp_reg, temp_reg, base_reg));
2218 x = temp_reg;
2219 }
2220 else
2221 {
2222 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2223 UNSPEC_RELOC16);
2224 x = gen_rtx_CONST (Pmode, x);
2225
2226 x = gen_rtx_PLUS (Pmode, base_reg, x);
2227 }
2228
2229 return x;
2230 }
2231
2232 /* Helper for m68k_unwrap_symbol.
2233 Also, if unwrapping was successful (that is if (ORIG != <return value>)),
2234 sets *RELOC_PTR to relocation type for the symbol. */
2235
2236 static rtx
2237 m68k_unwrap_symbol_1 (rtx orig, bool unwrap_reloc32_p,
2238 enum m68k_reloc *reloc_ptr)
2239 {
2240 if (GET_CODE (orig) == CONST)
2241 {
2242 rtx x;
2243 enum m68k_reloc dummy;
2244
2245 x = XEXP (orig, 0);
2246
2247 if (reloc_ptr == NULL)
2248 reloc_ptr = &dummy;
2249
2250 /* Handle an addend. */
2251 if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS)
2252 && CONST_INT_P (XEXP (x, 1)))
2253 x = XEXP (x, 0);
2254
2255 if (GET_CODE (x) == UNSPEC)
2256 {
2257 switch (XINT (x, 1))
2258 {
2259 case UNSPEC_RELOC16:
2260 orig = XVECEXP (x, 0, 0);
2261 *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2262 break;
2263
2264 case UNSPEC_RELOC32:
2265 if (unwrap_reloc32_p)
2266 {
2267 orig = XVECEXP (x, 0, 0);
2268 *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2269 }
2270 break;
2271
2272 default:
2273 break;
2274 }
2275 }
2276 }
2277
2278 return orig;
2279 }
2280
2281 /* Unwrap symbol from UNSPEC_RELOC16 and, if unwrap_reloc32_p,
2282 UNSPEC_RELOC32 wrappers. */
2283
2284 rtx
2285 m68k_unwrap_symbol (rtx orig, bool unwrap_reloc32_p)
2286 {
2287 return m68k_unwrap_symbol_1 (orig, unwrap_reloc32_p, NULL);
2288 }
2289
2290 /* Prescan insn before outputing assembler for it. */
2291
2292 void
2293 m68k_final_prescan_insn (rtx_insn *insn ATTRIBUTE_UNUSED,
2294 rtx *operands, int n_operands)
2295 {
2296 int i;
2297
2298 /* Combine and, possibly, other optimizations may do good job
2299 converting
2300 (const (unspec [(symbol)]))
2301 into
2302 (const (plus (unspec [(symbol)])
2303 (const_int N))).
2304 The problem with this is emitting @TLS or @GOT decorations.
2305 The decoration is emitted when processing (unspec), so the
2306 result would be "#symbol@TLSLE+N" instead of "#symbol+N@TLSLE".
2307
2308 It seems that the easiest solution to this is to convert such
2309 operands to
2310 (const (unspec [(plus (symbol)
2311 (const_int N))])).
2312 Note, that the top level of operand remains intact, so we don't have
2313 to patch up anything outside of the operand. */
2314
2315 subrtx_var_iterator::array_type array;
2316 for (i = 0; i < n_operands; ++i)
2317 {
2318 rtx op;
2319
2320 op = operands[i];
2321
2322 FOR_EACH_SUBRTX_VAR (iter, array, op, ALL)
2323 {
2324 rtx x = *iter;
2325 if (m68k_unwrap_symbol (x, true) != x)
2326 {
2327 rtx plus;
2328
2329 gcc_assert (GET_CODE (x) == CONST);
2330 plus = XEXP (x, 0);
2331
2332 if (GET_CODE (plus) == PLUS || GET_CODE (plus) == MINUS)
2333 {
2334 rtx unspec;
2335 rtx addend;
2336
2337 unspec = XEXP (plus, 0);
2338 gcc_assert (GET_CODE (unspec) == UNSPEC);
2339 addend = XEXP (plus, 1);
2340 gcc_assert (CONST_INT_P (addend));
2341
2342 /* We now have all the pieces, rearrange them. */
2343
2344 /* Move symbol to plus. */
2345 XEXP (plus, 0) = XVECEXP (unspec, 0, 0);
2346
2347 /* Move plus inside unspec. */
2348 XVECEXP (unspec, 0, 0) = plus;
2349
2350 /* Move unspec to top level of const. */
2351 XEXP (x, 0) = unspec;
2352 }
2353 iter.skip_subrtxes ();
2354 }
2355 }
2356 }
2357 }
2358
2359 /* Move X to a register and add REG_EQUAL note pointing to ORIG.
2360 If REG is non-null, use it; generate new pseudo otherwise. */
2361
2362 static rtx
2363 m68k_move_to_reg (rtx x, rtx orig, rtx reg)
2364 {
2365 rtx_insn *insn;
2366
2367 if (reg == NULL_RTX)
2368 {
2369 gcc_assert (can_create_pseudo_p ());
2370 reg = gen_reg_rtx (Pmode);
2371 }
2372
2373 insn = emit_move_insn (reg, x);
2374 /* Put a REG_EQUAL note on this insn, so that it can be optimized
2375 by loop. */
2376 set_unique_reg_note (insn, REG_EQUAL, orig);
2377
2378 return reg;
2379 }
2380
2381 /* Does the same as m68k_wrap_symbol, but returns a memory reference to
2382 GOT slot. */
2383
2384 static rtx
2385 m68k_wrap_symbol_into_got_ref (rtx x, enum m68k_reloc reloc, rtx temp_reg)
2386 {
2387 x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), temp_reg);
2388
2389 x = gen_rtx_MEM (Pmode, x);
2390 MEM_READONLY_P (x) = 1;
2391
2392 return x;
2393 }
2394
2395 /* Legitimize PIC addresses. If the address is already
2396 position-independent, we return ORIG. Newly generated
2397 position-independent addresses go to REG. If we need more
2398 than one register, we lose.
2399
2400 An address is legitimized by making an indirect reference
2401 through the Global Offset Table with the name of the symbol
2402 used as an offset.
2403
2404 The assembler and linker are responsible for placing the
2405 address of the symbol in the GOT. The function prologue
2406 is responsible for initializing a5 to the starting address
2407 of the GOT.
2408
2409 The assembler is also responsible for translating a symbol name
2410 into a constant displacement from the start of the GOT.
2411
2412 A quick example may make things a little clearer:
2413
2414 When not generating PIC code to store the value 12345 into _foo
2415 we would generate the following code:
2416
2417 movel #12345, _foo
2418
2419 When generating PIC two transformations are made. First, the compiler
2420 loads the address of foo into a register. So the first transformation makes:
2421
2422 lea _foo, a0
2423 movel #12345, a0@
2424
2425 The code in movsi will intercept the lea instruction and call this
2426 routine which will transform the instructions into:
2427
2428 movel a5@(_foo:w), a0
2429 movel #12345, a0@
2430
2431
2432 That (in a nutshell) is how *all* symbol and label references are
2433 handled. */
2434
2435 rtx
2436 legitimize_pic_address (rtx orig, machine_mode mode ATTRIBUTE_UNUSED,
2437 rtx reg)
2438 {
2439 rtx pic_ref = orig;
2440
2441 /* First handle a simple SYMBOL_REF or LABEL_REF */
2442 if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
2443 {
2444 gcc_assert (reg);
2445
2446 pic_ref = m68k_wrap_symbol_into_got_ref (orig, RELOC_GOT, reg);
2447 pic_ref = m68k_move_to_reg (pic_ref, orig, reg);
2448 }
2449 else if (GET_CODE (orig) == CONST)
2450 {
2451 rtx base;
2452
2453 /* Make sure this has not already been legitimized. */
2454 if (m68k_unwrap_symbol (orig, true) != orig)
2455 return orig;
2456
2457 gcc_assert (reg);
2458
2459 /* legitimize both operands of the PLUS */
2460 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
2461
2462 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2463 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2464 base == reg ? 0 : reg);
2465
2466 if (GET_CODE (orig) == CONST_INT)
2467 pic_ref = plus_constant (Pmode, base, INTVAL (orig));
2468 else
2469 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
2470 }
2471
2472 return pic_ref;
2473 }
2474
2475 /* The __tls_get_addr symbol. */
2476 static GTY(()) rtx m68k_tls_get_addr;
2477
2478 /* Return SYMBOL_REF for __tls_get_addr. */
2479
2480 static rtx
2481 m68k_get_tls_get_addr (void)
2482 {
2483 if (m68k_tls_get_addr == NULL_RTX)
2484 m68k_tls_get_addr = init_one_libfunc ("__tls_get_addr");
2485
2486 return m68k_tls_get_addr;
2487 }
2488
2489 /* Return libcall result in A0 instead of usual D0. */
2490 static bool m68k_libcall_value_in_a0_p = false;
2491
2492 /* Emit instruction sequence that calls __tls_get_addr. X is
2493 the TLS symbol we are referencing and RELOC is the symbol type to use
2494 (either TLSGD or TLSLDM). EQV is the REG_EQUAL note for the sequence
2495 emitted. A pseudo register with result of __tls_get_addr call is
2496 returned. */
2497
2498 static rtx
2499 m68k_call_tls_get_addr (rtx x, rtx eqv, enum m68k_reloc reloc)
2500 {
2501 rtx a0;
2502 rtx_insn *insns;
2503 rtx dest;
2504
2505 /* Emit the call sequence. */
2506 start_sequence ();
2507
2508 /* FIXME: Unfortunately, emit_library_call_value does not
2509 consider (plus (%a5) (const (unspec))) to be a good enough
2510 operand for push, so it forces it into a register. The bad
2511 thing about this is that combiner, due to copy propagation and other
2512 optimizations, sometimes can not later fix this. As a consequence,
2513 additional register may be allocated resulting in a spill.
2514 For reference, see args processing loops in
2515 calls.c:emit_library_call_value_1.
2516 For testcase, see gcc.target/m68k/tls-{gd, ld}.c */
2517 x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), NULL_RTX);
2518
2519 /* __tls_get_addr() is not a libcall, but emitting a libcall_value
2520 is the simpliest way of generating a call. The difference between
2521 __tls_get_addr() and libcall is that the result is returned in D0
2522 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2523 which temporarily switches returning the result to A0. */
2524
2525 m68k_libcall_value_in_a0_p = true;
2526 a0 = emit_library_call_value (m68k_get_tls_get_addr (), NULL_RTX, LCT_PURE,
2527 Pmode, 1, x, Pmode);
2528 m68k_libcall_value_in_a0_p = false;
2529
2530 insns = get_insns ();
2531 end_sequence ();
2532
2533 gcc_assert (can_create_pseudo_p ());
2534 dest = gen_reg_rtx (Pmode);
2535 emit_libcall_block (insns, dest, a0, eqv);
2536
2537 return dest;
2538 }
2539
2540 /* The __tls_get_addr symbol. */
2541 static GTY(()) rtx m68k_read_tp;
2542
2543 /* Return SYMBOL_REF for __m68k_read_tp. */
2544
2545 static rtx
2546 m68k_get_m68k_read_tp (void)
2547 {
2548 if (m68k_read_tp == NULL_RTX)
2549 m68k_read_tp = init_one_libfunc ("__m68k_read_tp");
2550
2551 return m68k_read_tp;
2552 }
2553
2554 /* Emit instruction sequence that calls __m68k_read_tp.
2555 A pseudo register with result of __m68k_read_tp call is returned. */
2556
2557 static rtx
2558 m68k_call_m68k_read_tp (void)
2559 {
2560 rtx a0;
2561 rtx eqv;
2562 rtx_insn *insns;
2563 rtx dest;
2564
2565 start_sequence ();
2566
2567 /* __m68k_read_tp() is not a libcall, but emitting a libcall_value
2568 is the simpliest way of generating a call. The difference between
2569 __m68k_read_tp() and libcall is that the result is returned in D0
2570 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2571 which temporarily switches returning the result to A0. */
2572
2573 /* Emit the call sequence. */
2574 m68k_libcall_value_in_a0_p = true;
2575 a0 = emit_library_call_value (m68k_get_m68k_read_tp (), NULL_RTX, LCT_PURE,
2576 Pmode, 0);
2577 m68k_libcall_value_in_a0_p = false;
2578 insns = get_insns ();
2579 end_sequence ();
2580
2581 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2582 share the m68k_read_tp result with other IE/LE model accesses. */
2583 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx), UNSPEC_RELOC32);
2584
2585 gcc_assert (can_create_pseudo_p ());
2586 dest = gen_reg_rtx (Pmode);
2587 emit_libcall_block (insns, dest, a0, eqv);
2588
2589 return dest;
2590 }
2591
2592 /* Return a legitimized address for accessing TLS SYMBOL_REF X.
2593 For explanations on instructions sequences see TLS/NPTL ABI for m68k and
2594 ColdFire. */
2595
2596 rtx
2597 m68k_legitimize_tls_address (rtx orig)
2598 {
2599 switch (SYMBOL_REF_TLS_MODEL (orig))
2600 {
2601 case TLS_MODEL_GLOBAL_DYNAMIC:
2602 orig = m68k_call_tls_get_addr (orig, orig, RELOC_TLSGD);
2603 break;
2604
2605 case TLS_MODEL_LOCAL_DYNAMIC:
2606 {
2607 rtx eqv;
2608 rtx a0;
2609 rtx x;
2610
2611 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2612 share the LDM result with other LD model accesses. */
2613 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2614 UNSPEC_RELOC32);
2615
2616 a0 = m68k_call_tls_get_addr (orig, eqv, RELOC_TLSLDM);
2617
2618 x = m68k_wrap_symbol (orig, RELOC_TLSLDO, a0, NULL_RTX);
2619
2620 if (can_create_pseudo_p ())
2621 x = m68k_move_to_reg (x, orig, NULL_RTX);
2622
2623 orig = x;
2624 break;
2625 }
2626
2627 case TLS_MODEL_INITIAL_EXEC:
2628 {
2629 rtx a0;
2630 rtx x;
2631
2632 a0 = m68k_call_m68k_read_tp ();
2633
2634 x = m68k_wrap_symbol_into_got_ref (orig, RELOC_TLSIE, NULL_RTX);
2635 x = gen_rtx_PLUS (Pmode, x, a0);
2636
2637 if (can_create_pseudo_p ())
2638 x = m68k_move_to_reg (x, orig, NULL_RTX);
2639
2640 orig = x;
2641 break;
2642 }
2643
2644 case TLS_MODEL_LOCAL_EXEC:
2645 {
2646 rtx a0;
2647 rtx x;
2648
2649 a0 = m68k_call_m68k_read_tp ();
2650
2651 x = m68k_wrap_symbol (orig, RELOC_TLSLE, a0, NULL_RTX);
2652
2653 if (can_create_pseudo_p ())
2654 x = m68k_move_to_reg (x, orig, NULL_RTX);
2655
2656 orig = x;
2657 break;
2658 }
2659
2660 default:
2661 gcc_unreachable ();
2662 }
2663
2664 return orig;
2665 }
2666
2667 /* Return true if X is a TLS symbol. */
2668
2669 static bool
2670 m68k_tls_symbol_p (rtx x)
2671 {
2672 if (!TARGET_HAVE_TLS)
2673 return false;
2674
2675 if (GET_CODE (x) != SYMBOL_REF)
2676 return false;
2677
2678 return SYMBOL_REF_TLS_MODEL (x) != 0;
2679 }
2680
2681 /* If !LEGITIMATE_P, return true if X is a TLS symbol reference,
2682 though illegitimate one.
2683 If LEGITIMATE_P, return true if X is a legitimate TLS symbol reference. */
2684
2685 bool
2686 m68k_tls_reference_p (rtx x, bool legitimate_p)
2687 {
2688 if (!TARGET_HAVE_TLS)
2689 return false;
2690
2691 if (!legitimate_p)
2692 {
2693 subrtx_var_iterator::array_type array;
2694 FOR_EACH_SUBRTX_VAR (iter, array, x, ALL)
2695 {
2696 rtx x = *iter;
2697
2698 /* Note: this is not the same as m68k_tls_symbol_p. */
2699 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0)
2700 return true;
2701
2702 /* Don't recurse into legitimate TLS references. */
2703 if (m68k_tls_reference_p (x, true))
2704 iter.skip_subrtxes ();
2705 }
2706 return false;
2707 }
2708 else
2709 {
2710 enum m68k_reloc reloc = RELOC_GOT;
2711
2712 return (m68k_unwrap_symbol_1 (x, true, &reloc) != x
2713 && TLS_RELOC_P (reloc));
2714 }
2715 }
2716
2717 \f
2718
2719 #define USE_MOVQ(i) ((unsigned) ((i) + 128) <= 255)
2720
2721 /* Return the type of move that should be used for integer I. */
2722
2723 M68K_CONST_METHOD
2724 m68k_const_method (HOST_WIDE_INT i)
2725 {
2726 unsigned u;
2727
2728 if (USE_MOVQ (i))
2729 return MOVQ;
2730
2731 /* The ColdFire doesn't have byte or word operations. */
2732 /* FIXME: This may not be useful for the m68060 either. */
2733 if (!TARGET_COLDFIRE)
2734 {
2735 /* if -256 < N < 256 but N is not in range for a moveq
2736 N^ff will be, so use moveq #N^ff, dreg; not.b dreg. */
2737 if (USE_MOVQ (i ^ 0xff))
2738 return NOTB;
2739 /* Likewise, try with not.w */
2740 if (USE_MOVQ (i ^ 0xffff))
2741 return NOTW;
2742 /* This is the only value where neg.w is useful */
2743 if (i == -65408)
2744 return NEGW;
2745 }
2746
2747 /* Try also with swap. */
2748 u = i;
2749 if (USE_MOVQ ((u >> 16) | (u << 16)))
2750 return SWAP;
2751
2752 if (TARGET_ISAB)
2753 {
2754 /* Try using MVZ/MVS with an immediate value to load constants. */
2755 if (i >= 0 && i <= 65535)
2756 return MVZ;
2757 if (i >= -32768 && i <= 32767)
2758 return MVS;
2759 }
2760
2761 /* Otherwise, use move.l */
2762 return MOVL;
2763 }
2764
2765 /* Return the cost of moving constant I into a data register. */
2766
2767 static int
2768 const_int_cost (HOST_WIDE_INT i)
2769 {
2770 switch (m68k_const_method (i))
2771 {
2772 case MOVQ:
2773 /* Constants between -128 and 127 are cheap due to moveq. */
2774 return 0;
2775 case MVZ:
2776 case MVS:
2777 case NOTB:
2778 case NOTW:
2779 case NEGW:
2780 case SWAP:
2781 /* Constants easily generated by moveq + not.b/not.w/neg.w/swap. */
2782 return 1;
2783 case MOVL:
2784 return 2;
2785 default:
2786 gcc_unreachable ();
2787 }
2788 }
2789
2790 static bool
2791 m68k_rtx_costs (rtx x, machine_mode mode, int outer_code,
2792 int opno ATTRIBUTE_UNUSED,
2793 int *total, bool speed ATTRIBUTE_UNUSED)
2794 {
2795 int code = GET_CODE (x);
2796
2797 switch (code)
2798 {
2799 case CONST_INT:
2800 /* Constant zero is super cheap due to clr instruction. */
2801 if (x == const0_rtx)
2802 *total = 0;
2803 else
2804 *total = const_int_cost (INTVAL (x));
2805 return true;
2806
2807 case CONST:
2808 case LABEL_REF:
2809 case SYMBOL_REF:
2810 *total = 3;
2811 return true;
2812
2813 case CONST_DOUBLE:
2814 /* Make 0.0 cheaper than other floating constants to
2815 encourage creating tstsf and tstdf insns. */
2816 if (outer_code == COMPARE
2817 && (x == CONST0_RTX (SFmode) || x == CONST0_RTX (DFmode)))
2818 *total = 4;
2819 else
2820 *total = 5;
2821 return true;
2822
2823 /* These are vaguely right for a 68020. */
2824 /* The costs for long multiply have been adjusted to work properly
2825 in synth_mult on the 68020, relative to an average of the time
2826 for add and the time for shift, taking away a little more because
2827 sometimes move insns are needed. */
2828 /* div?.w is relatively cheaper on 68000 counted in COSTS_N_INSNS
2829 terms. */
2830 #define MULL_COST \
2831 (TUNE_68060 ? 2 \
2832 : TUNE_68040 ? 5 \
2833 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2834 : (TUNE_CFV2 && TUNE_MAC) ? 4 \
2835 : TUNE_CFV2 ? 8 \
2836 : TARGET_COLDFIRE ? 3 : 13)
2837
2838 #define MULW_COST \
2839 (TUNE_68060 ? 2 \
2840 : TUNE_68040 ? 3 \
2841 : TUNE_68000_10 ? 5 \
2842 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2843 : (TUNE_CFV2 && TUNE_MAC) ? 2 \
2844 : TUNE_CFV2 ? 8 \
2845 : TARGET_COLDFIRE ? 2 : 8)
2846
2847 #define DIVW_COST \
2848 (TARGET_CF_HWDIV ? 11 \
2849 : TUNE_68000_10 || TARGET_COLDFIRE ? 12 : 27)
2850
2851 case PLUS:
2852 /* An lea costs about three times as much as a simple add. */
2853 if (mode == SImode
2854 && GET_CODE (XEXP (x, 1)) == REG
2855 && GET_CODE (XEXP (x, 0)) == MULT
2856 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
2857 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2858 && (INTVAL (XEXP (XEXP (x, 0), 1)) == 2
2859 || INTVAL (XEXP (XEXP (x, 0), 1)) == 4
2860 || INTVAL (XEXP (XEXP (x, 0), 1)) == 8))
2861 {
2862 /* lea an@(dx:l:i),am */
2863 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 2 : 3);
2864 return true;
2865 }
2866 return false;
2867
2868 case ASHIFT:
2869 case ASHIFTRT:
2870 case LSHIFTRT:
2871 if (TUNE_68060)
2872 {
2873 *total = COSTS_N_INSNS(1);
2874 return true;
2875 }
2876 if (TUNE_68000_10)
2877 {
2878 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
2879 {
2880 if (INTVAL (XEXP (x, 1)) < 16)
2881 *total = COSTS_N_INSNS (2) + INTVAL (XEXP (x, 1)) / 2;
2882 else
2883 /* We're using clrw + swap for these cases. */
2884 *total = COSTS_N_INSNS (4) + (INTVAL (XEXP (x, 1)) - 16) / 2;
2885 }
2886 else
2887 *total = COSTS_N_INSNS (10); /* Worst case. */
2888 return true;
2889 }
2890 /* A shift by a big integer takes an extra instruction. */
2891 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2892 && (INTVAL (XEXP (x, 1)) == 16))
2893 {
2894 *total = COSTS_N_INSNS (2); /* clrw;swap */
2895 return true;
2896 }
2897 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2898 && !(INTVAL (XEXP (x, 1)) > 0
2899 && INTVAL (XEXP (x, 1)) <= 8))
2900 {
2901 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 1 : 3); /* lsr #i,dn */
2902 return true;
2903 }
2904 return false;
2905
2906 case MULT:
2907 if ((GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
2908 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
2909 && mode == SImode)
2910 *total = COSTS_N_INSNS (MULW_COST);
2911 else if (mode == QImode || mode == HImode)
2912 *total = COSTS_N_INSNS (MULW_COST);
2913 else
2914 *total = COSTS_N_INSNS (MULL_COST);
2915 return true;
2916
2917 case DIV:
2918 case UDIV:
2919 case MOD:
2920 case UMOD:
2921 if (mode == QImode || mode == HImode)
2922 *total = COSTS_N_INSNS (DIVW_COST); /* div.w */
2923 else if (TARGET_CF_HWDIV)
2924 *total = COSTS_N_INSNS (18);
2925 else
2926 *total = COSTS_N_INSNS (43); /* div.l */
2927 return true;
2928
2929 case ZERO_EXTRACT:
2930 if (outer_code == COMPARE)
2931 *total = 0;
2932 return false;
2933
2934 default:
2935 return false;
2936 }
2937 }
2938
2939 /* Return an instruction to move CONST_INT OPERANDS[1] into data register
2940 OPERANDS[0]. */
2941
2942 static const char *
2943 output_move_const_into_data_reg (rtx *operands)
2944 {
2945 HOST_WIDE_INT i;
2946
2947 i = INTVAL (operands[1]);
2948 switch (m68k_const_method (i))
2949 {
2950 case MVZ:
2951 return "mvzw %1,%0";
2952 case MVS:
2953 return "mvsw %1,%0";
2954 case MOVQ:
2955 return "moveq %1,%0";
2956 case NOTB:
2957 CC_STATUS_INIT;
2958 operands[1] = GEN_INT (i ^ 0xff);
2959 return "moveq %1,%0\n\tnot%.b %0";
2960 case NOTW:
2961 CC_STATUS_INIT;
2962 operands[1] = GEN_INT (i ^ 0xffff);
2963 return "moveq %1,%0\n\tnot%.w %0";
2964 case NEGW:
2965 CC_STATUS_INIT;
2966 return "moveq #-128,%0\n\tneg%.w %0";
2967 case SWAP:
2968 {
2969 unsigned u = i;
2970
2971 operands[1] = GEN_INT ((u << 16) | (u >> 16));
2972 return "moveq %1,%0\n\tswap %0";
2973 }
2974 case MOVL:
2975 return "move%.l %1,%0";
2976 default:
2977 gcc_unreachable ();
2978 }
2979 }
2980
2981 /* Return true if I can be handled by ISA B's mov3q instruction. */
2982
2983 bool
2984 valid_mov3q_const (HOST_WIDE_INT i)
2985 {
2986 return TARGET_ISAB && (i == -1 || IN_RANGE (i, 1, 7));
2987 }
2988
2989 /* Return an instruction to move CONST_INT OPERANDS[1] into OPERANDS[0].
2990 I is the value of OPERANDS[1]. */
2991
2992 static const char *
2993 output_move_simode_const (rtx *operands)
2994 {
2995 rtx dest;
2996 HOST_WIDE_INT src;
2997
2998 dest = operands[0];
2999 src = INTVAL (operands[1]);
3000 if (src == 0
3001 && (DATA_REG_P (dest) || MEM_P (dest))
3002 /* clr insns on 68000 read before writing. */
3003 && ((TARGET_68010 || TARGET_COLDFIRE)
3004 || !(MEM_P (dest) && MEM_VOLATILE_P (dest))))
3005 return "clr%.l %0";
3006 else if (GET_MODE (dest) == SImode && valid_mov3q_const (src))
3007 return "mov3q%.l %1,%0";
3008 else if (src == 0 && ADDRESS_REG_P (dest))
3009 return "sub%.l %0,%0";
3010 else if (DATA_REG_P (dest))
3011 return output_move_const_into_data_reg (operands);
3012 else if (ADDRESS_REG_P (dest) && IN_RANGE (src, -0x8000, 0x7fff))
3013 {
3014 if (valid_mov3q_const (src))
3015 return "mov3q%.l %1,%0";
3016 return "move%.w %1,%0";
3017 }
3018 else if (MEM_P (dest)
3019 && GET_CODE (XEXP (dest, 0)) == PRE_DEC
3020 && REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
3021 && IN_RANGE (src, -0x8000, 0x7fff))
3022 {
3023 if (valid_mov3q_const (src))
3024 return "mov3q%.l %1,%-";
3025 return "pea %a1";
3026 }
3027 return "move%.l %1,%0";
3028 }
3029
3030 const char *
3031 output_move_simode (rtx *operands)
3032 {
3033 if (GET_CODE (operands[1]) == CONST_INT)
3034 return output_move_simode_const (operands);
3035 else if ((GET_CODE (operands[1]) == SYMBOL_REF
3036 || GET_CODE (operands[1]) == CONST)
3037 && push_operand (operands[0], SImode))
3038 return "pea %a1";
3039 else if ((GET_CODE (operands[1]) == SYMBOL_REF
3040 || GET_CODE (operands[1]) == CONST)
3041 && ADDRESS_REG_P (operands[0]))
3042 return "lea %a1,%0";
3043 return "move%.l %1,%0";
3044 }
3045
3046 const char *
3047 output_move_himode (rtx *operands)
3048 {
3049 if (GET_CODE (operands[1]) == CONST_INT)
3050 {
3051 if (operands[1] == const0_rtx
3052 && (DATA_REG_P (operands[0])
3053 || GET_CODE (operands[0]) == MEM)
3054 /* clr insns on 68000 read before writing. */
3055 && ((TARGET_68010 || TARGET_COLDFIRE)
3056 || !(GET_CODE (operands[0]) == MEM
3057 && MEM_VOLATILE_P (operands[0]))))
3058 return "clr%.w %0";
3059 else if (operands[1] == const0_rtx
3060 && ADDRESS_REG_P (operands[0]))
3061 return "sub%.l %0,%0";
3062 else if (DATA_REG_P (operands[0])
3063 && INTVAL (operands[1]) < 128
3064 && INTVAL (operands[1]) >= -128)
3065 return "moveq %1,%0";
3066 else if (INTVAL (operands[1]) < 0x8000
3067 && INTVAL (operands[1]) >= -0x8000)
3068 return "move%.w %1,%0";
3069 }
3070 else if (CONSTANT_P (operands[1]))
3071 return "move%.l %1,%0";
3072 return "move%.w %1,%0";
3073 }
3074
3075 const char *
3076 output_move_qimode (rtx *operands)
3077 {
3078 /* 68k family always modifies the stack pointer by at least 2, even for
3079 byte pushes. The 5200 (ColdFire) does not do this. */
3080
3081 /* This case is generated by pushqi1 pattern now. */
3082 gcc_assert (!(GET_CODE (operands[0]) == MEM
3083 && GET_CODE (XEXP (operands[0], 0)) == PRE_DEC
3084 && XEXP (XEXP (operands[0], 0), 0) == stack_pointer_rtx
3085 && ! ADDRESS_REG_P (operands[1])
3086 && ! TARGET_COLDFIRE));
3087
3088 /* clr and st insns on 68000 read before writing. */
3089 if (!ADDRESS_REG_P (operands[0])
3090 && ((TARGET_68010 || TARGET_COLDFIRE)
3091 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3092 {
3093 if (operands[1] == const0_rtx)
3094 return "clr%.b %0";
3095 if ((!TARGET_COLDFIRE || DATA_REG_P (operands[0]))
3096 && GET_CODE (operands[1]) == CONST_INT
3097 && (INTVAL (operands[1]) & 255) == 255)
3098 {
3099 CC_STATUS_INIT;
3100 return "st %0";
3101 }
3102 }
3103 if (GET_CODE (operands[1]) == CONST_INT
3104 && DATA_REG_P (operands[0])
3105 && INTVAL (operands[1]) < 128
3106 && INTVAL (operands[1]) >= -128)
3107 return "moveq %1,%0";
3108 if (operands[1] == const0_rtx && ADDRESS_REG_P (operands[0]))
3109 return "sub%.l %0,%0";
3110 if (GET_CODE (operands[1]) != CONST_INT && CONSTANT_P (operands[1]))
3111 return "move%.l %1,%0";
3112 /* 68k family (including the 5200 ColdFire) does not support byte moves to
3113 from address registers. */
3114 if (ADDRESS_REG_P (operands[0]) || ADDRESS_REG_P (operands[1]))
3115 return "move%.w %1,%0";
3116 return "move%.b %1,%0";
3117 }
3118
3119 const char *
3120 output_move_stricthi (rtx *operands)
3121 {
3122 if (operands[1] == const0_rtx
3123 /* clr insns on 68000 read before writing. */
3124 && ((TARGET_68010 || TARGET_COLDFIRE)
3125 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3126 return "clr%.w %0";
3127 return "move%.w %1,%0";
3128 }
3129
3130 const char *
3131 output_move_strictqi (rtx *operands)
3132 {
3133 if (operands[1] == const0_rtx
3134 /* clr insns on 68000 read before writing. */
3135 && ((TARGET_68010 || TARGET_COLDFIRE)
3136 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3137 return "clr%.b %0";
3138 return "move%.b %1,%0";
3139 }
3140
3141 /* Return the best assembler insn template
3142 for moving operands[1] into operands[0] as a fullword. */
3143
3144 static const char *
3145 singlemove_string (rtx *operands)
3146 {
3147 if (GET_CODE (operands[1]) == CONST_INT)
3148 return output_move_simode_const (operands);
3149 return "move%.l %1,%0";
3150 }
3151
3152
3153 /* Output assembler or rtl code to perform a doubleword move insn
3154 with operands OPERANDS.
3155 Pointers to 3 helper functions should be specified:
3156 HANDLE_REG_ADJUST to adjust a register by a small value,
3157 HANDLE_COMPADR to compute an address and
3158 HANDLE_MOVSI to move 4 bytes. */
3159
3160 static void
3161 handle_move_double (rtx operands[2],
3162 void (*handle_reg_adjust) (rtx, int),
3163 void (*handle_compadr) (rtx [2]),
3164 void (*handle_movsi) (rtx [2]))
3165 {
3166 enum
3167 {
3168 REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP
3169 } optype0, optype1;
3170 rtx latehalf[2];
3171 rtx middlehalf[2];
3172 rtx xops[2];
3173 rtx addreg0 = 0, addreg1 = 0;
3174 int dest_overlapped_low = 0;
3175 int size = GET_MODE_SIZE (GET_MODE (operands[0]));
3176
3177 middlehalf[0] = 0;
3178 middlehalf[1] = 0;
3179
3180 /* First classify both operands. */
3181
3182 if (REG_P (operands[0]))
3183 optype0 = REGOP;
3184 else if (offsettable_memref_p (operands[0]))
3185 optype0 = OFFSOP;
3186 else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
3187 optype0 = POPOP;
3188 else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
3189 optype0 = PUSHOP;
3190 else if (GET_CODE (operands[0]) == MEM)
3191 optype0 = MEMOP;
3192 else
3193 optype0 = RNDOP;
3194
3195 if (REG_P (operands[1]))
3196 optype1 = REGOP;
3197 else if (CONSTANT_P (operands[1]))
3198 optype1 = CNSTOP;
3199 else if (offsettable_memref_p (operands[1]))
3200 optype1 = OFFSOP;
3201 else if (GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3202 optype1 = POPOP;
3203 else if (GET_CODE (XEXP (operands[1], 0)) == PRE_DEC)
3204 optype1 = PUSHOP;
3205 else if (GET_CODE (operands[1]) == MEM)
3206 optype1 = MEMOP;
3207 else
3208 optype1 = RNDOP;
3209
3210 /* Check for the cases that the operand constraints are not supposed
3211 to allow to happen. Generating code for these cases is
3212 painful. */
3213 gcc_assert (optype0 != RNDOP && optype1 != RNDOP);
3214
3215 /* If one operand is decrementing and one is incrementing
3216 decrement the former register explicitly
3217 and change that operand into ordinary indexing. */
3218
3219 if (optype0 == PUSHOP && optype1 == POPOP)
3220 {
3221 operands[0] = XEXP (XEXP (operands[0], 0), 0);
3222
3223 handle_reg_adjust (operands[0], -size);
3224
3225 if (GET_MODE (operands[1]) == XFmode)
3226 operands[0] = gen_rtx_MEM (XFmode, operands[0]);
3227 else if (GET_MODE (operands[0]) == DFmode)
3228 operands[0] = gen_rtx_MEM (DFmode, operands[0]);
3229 else
3230 operands[0] = gen_rtx_MEM (DImode, operands[0]);
3231 optype0 = OFFSOP;
3232 }
3233 if (optype0 == POPOP && optype1 == PUSHOP)
3234 {
3235 operands[1] = XEXP (XEXP (operands[1], 0), 0);
3236
3237 handle_reg_adjust (operands[1], -size);
3238
3239 if (GET_MODE (operands[1]) == XFmode)
3240 operands[1] = gen_rtx_MEM (XFmode, operands[1]);
3241 else if (GET_MODE (operands[1]) == DFmode)
3242 operands[1] = gen_rtx_MEM (DFmode, operands[1]);
3243 else
3244 operands[1] = gen_rtx_MEM (DImode, operands[1]);
3245 optype1 = OFFSOP;
3246 }
3247
3248 /* If an operand is an unoffsettable memory ref, find a register
3249 we can increment temporarily to make it refer to the second word. */
3250
3251 if (optype0 == MEMOP)
3252 addreg0 = find_addr_reg (XEXP (operands[0], 0));
3253
3254 if (optype1 == MEMOP)
3255 addreg1 = find_addr_reg (XEXP (operands[1], 0));
3256
3257 /* Ok, we can do one word at a time.
3258 Normally we do the low-numbered word first,
3259 but if either operand is autodecrementing then we
3260 do the high-numbered word first.
3261
3262 In either case, set up in LATEHALF the operands to use
3263 for the high-numbered word and in some cases alter the
3264 operands in OPERANDS to be suitable for the low-numbered word. */
3265
3266 if (size == 12)
3267 {
3268 if (optype0 == REGOP)
3269 {
3270 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 2);
3271 middlehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
3272 }
3273 else if (optype0 == OFFSOP)
3274 {
3275 middlehalf[0] = adjust_address (operands[0], SImode, 4);
3276 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
3277 }
3278 else
3279 {
3280 middlehalf[0] = adjust_address (operands[0], SImode, 0);
3281 latehalf[0] = adjust_address (operands[0], SImode, 0);
3282 }
3283
3284 if (optype1 == REGOP)
3285 {
3286 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2);
3287 middlehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
3288 }
3289 else if (optype1 == OFFSOP)
3290 {
3291 middlehalf[1] = adjust_address (operands[1], SImode, 4);
3292 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
3293 }
3294 else if (optype1 == CNSTOP)
3295 {
3296 if (GET_CODE (operands[1]) == CONST_DOUBLE)
3297 {
3298 REAL_VALUE_TYPE r;
3299 long l[3];
3300
3301 REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
3302 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
3303 operands[1] = GEN_INT (l[0]);
3304 middlehalf[1] = GEN_INT (l[1]);
3305 latehalf[1] = GEN_INT (l[2]);
3306 }
3307 else
3308 {
3309 /* No non-CONST_DOUBLE constant should ever appear
3310 here. */
3311 gcc_assert (!CONSTANT_P (operands[1]));
3312 }
3313 }
3314 else
3315 {
3316 middlehalf[1] = adjust_address (operands[1], SImode, 0);
3317 latehalf[1] = adjust_address (operands[1], SImode, 0);
3318 }
3319 }
3320 else
3321 /* size is not 12: */
3322 {
3323 if (optype0 == REGOP)
3324 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
3325 else if (optype0 == OFFSOP)
3326 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
3327 else
3328 latehalf[0] = adjust_address (operands[0], SImode, 0);
3329
3330 if (optype1 == REGOP)
3331 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
3332 else if (optype1 == OFFSOP)
3333 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
3334 else if (optype1 == CNSTOP)
3335 split_double (operands[1], &operands[1], &latehalf[1]);
3336 else
3337 latehalf[1] = adjust_address (operands[1], SImode, 0);
3338 }
3339
3340 /* If insn is effectively movd N(REG),-(REG) then we will do the high
3341 word first. We should use the adjusted operand 1 (which is N+4(REG))
3342 for the low word as well, to compensate for the first decrement of
3343 REG. */
3344 if (optype0 == PUSHOP
3345 && reg_overlap_mentioned_p (XEXP (XEXP (operands[0], 0), 0), operands[1]))
3346 operands[1] = middlehalf[1] = latehalf[1];
3347
3348 /* For (set (reg:DI N) (mem:DI ... (reg:SI N) ...)),
3349 if the upper part of reg N does not appear in the MEM, arrange to
3350 emit the move late-half first. Otherwise, compute the MEM address
3351 into the upper part of N and use that as a pointer to the memory
3352 operand. */
3353 if (optype0 == REGOP
3354 && (optype1 == OFFSOP || optype1 == MEMOP))
3355 {
3356 rtx testlow = gen_rtx_REG (SImode, REGNO (operands[0]));
3357
3358 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
3359 && reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
3360 {
3361 /* If both halves of dest are used in the src memory address,
3362 compute the address into latehalf of dest.
3363 Note that this can't happen if the dest is two data regs. */
3364 compadr:
3365 xops[0] = latehalf[0];
3366 xops[1] = XEXP (operands[1], 0);
3367
3368 handle_compadr (xops);
3369 if (GET_MODE (operands[1]) == XFmode)
3370 {
3371 operands[1] = gen_rtx_MEM (XFmode, latehalf[0]);
3372 middlehalf[1] = adjust_address (operands[1], DImode, size - 8);
3373 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
3374 }
3375 else
3376 {
3377 operands[1] = gen_rtx_MEM (DImode, latehalf[0]);
3378 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
3379 }
3380 }
3381 else if (size == 12
3382 && reg_overlap_mentioned_p (middlehalf[0],
3383 XEXP (operands[1], 0)))
3384 {
3385 /* Check for two regs used by both source and dest.
3386 Note that this can't happen if the dest is all data regs.
3387 It can happen if the dest is d6, d7, a0.
3388 But in that case, latehalf is an addr reg, so
3389 the code at compadr does ok. */
3390
3391 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
3392 || reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
3393 goto compadr;
3394
3395 /* JRV says this can't happen: */
3396 gcc_assert (!addreg0 && !addreg1);
3397
3398 /* Only the middle reg conflicts; simply put it last. */
3399 handle_movsi (operands);
3400 handle_movsi (latehalf);
3401 handle_movsi (middlehalf);
3402
3403 return;
3404 }
3405 else if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0)))
3406 /* If the low half of dest is mentioned in the source memory
3407 address, the arrange to emit the move late half first. */
3408 dest_overlapped_low = 1;
3409 }
3410
3411 /* If one or both operands autodecrementing,
3412 do the two words, high-numbered first. */
3413
3414 /* Likewise, the first move would clobber the source of the second one,
3415 do them in the other order. This happens only for registers;
3416 such overlap can't happen in memory unless the user explicitly
3417 sets it up, and that is an undefined circumstance. */
3418
3419 if (optype0 == PUSHOP || optype1 == PUSHOP
3420 || (optype0 == REGOP && optype1 == REGOP
3421 && ((middlehalf[1] && REGNO (operands[0]) == REGNO (middlehalf[1]))
3422 || REGNO (operands[0]) == REGNO (latehalf[1])))
3423 || dest_overlapped_low)
3424 {
3425 /* Make any unoffsettable addresses point at high-numbered word. */
3426 if (addreg0)
3427 handle_reg_adjust (addreg0, size - 4);
3428 if (addreg1)
3429 handle_reg_adjust (addreg1, size - 4);
3430
3431 /* Do that word. */
3432 handle_movsi (latehalf);
3433
3434 /* Undo the adds we just did. */
3435 if (addreg0)
3436 handle_reg_adjust (addreg0, -4);
3437 if (addreg1)
3438 handle_reg_adjust (addreg1, -4);
3439
3440 if (size == 12)
3441 {
3442 handle_movsi (middlehalf);
3443
3444 if (addreg0)
3445 handle_reg_adjust (addreg0, -4);
3446 if (addreg1)
3447 handle_reg_adjust (addreg1, -4);
3448 }
3449
3450 /* Do low-numbered word. */
3451
3452 handle_movsi (operands);
3453 return;
3454 }
3455
3456 /* Normal case: do the two words, low-numbered first. */
3457
3458 m68k_final_prescan_insn (NULL, operands, 2);
3459 handle_movsi (operands);
3460
3461 /* Do the middle one of the three words for long double */
3462 if (size == 12)
3463 {
3464 if (addreg0)
3465 handle_reg_adjust (addreg0, 4);
3466 if (addreg1)
3467 handle_reg_adjust (addreg1, 4);
3468
3469 m68k_final_prescan_insn (NULL, middlehalf, 2);
3470 handle_movsi (middlehalf);
3471 }
3472
3473 /* Make any unoffsettable addresses point at high-numbered word. */
3474 if (addreg0)
3475 handle_reg_adjust (addreg0, 4);
3476 if (addreg1)
3477 handle_reg_adjust (addreg1, 4);
3478
3479 /* Do that word. */
3480 m68k_final_prescan_insn (NULL, latehalf, 2);
3481 handle_movsi (latehalf);
3482
3483 /* Undo the adds we just did. */
3484 if (addreg0)
3485 handle_reg_adjust (addreg0, -(size - 4));
3486 if (addreg1)
3487 handle_reg_adjust (addreg1, -(size - 4));
3488
3489 return;
3490 }
3491
3492 /* Output assembler code to adjust REG by N. */
3493 static void
3494 output_reg_adjust (rtx reg, int n)
3495 {
3496 const char *s;
3497
3498 gcc_assert (GET_MODE (reg) == SImode
3499 && -12 <= n && n != 0 && n <= 12);
3500
3501 switch (n)
3502 {
3503 case 12:
3504 s = "add%.l #12,%0";
3505 break;
3506
3507 case 8:
3508 s = "addq%.l #8,%0";
3509 break;
3510
3511 case 4:
3512 s = "addq%.l #4,%0";
3513 break;
3514
3515 case -12:
3516 s = "sub%.l #12,%0";
3517 break;
3518
3519 case -8:
3520 s = "subq%.l #8,%0";
3521 break;
3522
3523 case -4:
3524 s = "subq%.l #4,%0";
3525 break;
3526
3527 default:
3528 gcc_unreachable ();
3529 s = NULL;
3530 }
3531
3532 output_asm_insn (s, &reg);
3533 }
3534
3535 /* Emit rtl code to adjust REG by N. */
3536 static void
3537 emit_reg_adjust (rtx reg1, int n)
3538 {
3539 rtx reg2;
3540
3541 gcc_assert (GET_MODE (reg1) == SImode
3542 && -12 <= n && n != 0 && n <= 12);
3543
3544 reg1 = copy_rtx (reg1);
3545 reg2 = copy_rtx (reg1);
3546
3547 if (n < 0)
3548 emit_insn (gen_subsi3 (reg1, reg2, GEN_INT (-n)));
3549 else if (n > 0)
3550 emit_insn (gen_addsi3 (reg1, reg2, GEN_INT (n)));
3551 else
3552 gcc_unreachable ();
3553 }
3554
3555 /* Output assembler to load address OPERANDS[0] to register OPERANDS[1]. */
3556 static void
3557 output_compadr (rtx operands[2])
3558 {
3559 output_asm_insn ("lea %a1,%0", operands);
3560 }
3561
3562 /* Output the best assembler insn for moving operands[1] into operands[0]
3563 as a fullword. */
3564 static void
3565 output_movsi (rtx operands[2])
3566 {
3567 output_asm_insn (singlemove_string (operands), operands);
3568 }
3569
3570 /* Copy OP and change its mode to MODE. */
3571 static rtx
3572 copy_operand (rtx op, machine_mode mode)
3573 {
3574 /* ??? This looks really ugly. There must be a better way
3575 to change a mode on the operand. */
3576 if (GET_MODE (op) != VOIDmode)
3577 {
3578 if (REG_P (op))
3579 op = gen_rtx_REG (mode, REGNO (op));
3580 else
3581 {
3582 op = copy_rtx (op);
3583 PUT_MODE (op, mode);
3584 }
3585 }
3586
3587 return op;
3588 }
3589
3590 /* Emit rtl code for moving operands[1] into operands[0] as a fullword. */
3591 static void
3592 emit_movsi (rtx operands[2])
3593 {
3594 operands[0] = copy_operand (operands[0], SImode);
3595 operands[1] = copy_operand (operands[1], SImode);
3596
3597 emit_insn (gen_movsi (operands[0], operands[1]));
3598 }
3599
3600 /* Output assembler code to perform a doubleword move insn
3601 with operands OPERANDS. */
3602 const char *
3603 output_move_double (rtx *operands)
3604 {
3605 handle_move_double (operands,
3606 output_reg_adjust, output_compadr, output_movsi);
3607
3608 return "";
3609 }
3610
3611 /* Output rtl code to perform a doubleword move insn
3612 with operands OPERANDS. */
3613 void
3614 m68k_emit_move_double (rtx operands[2])
3615 {
3616 handle_move_double (operands, emit_reg_adjust, emit_movsi, emit_movsi);
3617 }
3618
3619 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
3620 new rtx with the correct mode. */
3621
3622 static rtx
3623 force_mode (machine_mode mode, rtx orig)
3624 {
3625 if (mode == GET_MODE (orig))
3626 return orig;
3627
3628 if (REGNO (orig) >= FIRST_PSEUDO_REGISTER)
3629 abort ();
3630
3631 return gen_rtx_REG (mode, REGNO (orig));
3632 }
3633
3634 static int
3635 fp_reg_operand (rtx op, machine_mode mode ATTRIBUTE_UNUSED)
3636 {
3637 return reg_renumber && FP_REG_P (op);
3638 }
3639
3640 /* Emit insns to move operands[1] into operands[0].
3641
3642 Return 1 if we have written out everything that needs to be done to
3643 do the move. Otherwise, return 0 and the caller will emit the move
3644 normally.
3645
3646 Note SCRATCH_REG may not be in the proper mode depending on how it
3647 will be used. This routine is responsible for creating a new copy
3648 of SCRATCH_REG in the proper mode. */
3649
3650 int
3651 emit_move_sequence (rtx *operands, machine_mode mode, rtx scratch_reg)
3652 {
3653 register rtx operand0 = operands[0];
3654 register rtx operand1 = operands[1];
3655 register rtx tem;
3656
3657 if (scratch_reg
3658 && reload_in_progress && GET_CODE (operand0) == REG
3659 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
3660 operand0 = reg_equiv_mem (REGNO (operand0));
3661 else if (scratch_reg
3662 && reload_in_progress && GET_CODE (operand0) == SUBREG
3663 && GET_CODE (SUBREG_REG (operand0)) == REG
3664 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
3665 {
3666 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3667 the code which tracks sets/uses for delete_output_reload. */
3668 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
3669 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
3670 SUBREG_BYTE (operand0));
3671 operand0 = alter_subreg (&temp, true);
3672 }
3673
3674 if (scratch_reg
3675 && reload_in_progress && GET_CODE (operand1) == REG
3676 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
3677 operand1 = reg_equiv_mem (REGNO (operand1));
3678 else if (scratch_reg
3679 && reload_in_progress && GET_CODE (operand1) == SUBREG
3680 && GET_CODE (SUBREG_REG (operand1)) == REG
3681 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
3682 {
3683 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3684 the code which tracks sets/uses for delete_output_reload. */
3685 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
3686 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
3687 SUBREG_BYTE (operand1));
3688 operand1 = alter_subreg (&temp, true);
3689 }
3690
3691 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
3692 && ((tem = find_replacement (&XEXP (operand0, 0)))
3693 != XEXP (operand0, 0)))
3694 operand0 = gen_rtx_MEM (GET_MODE (operand0), tem);
3695 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
3696 && ((tem = find_replacement (&XEXP (operand1, 0)))
3697 != XEXP (operand1, 0)))
3698 operand1 = gen_rtx_MEM (GET_MODE (operand1), tem);
3699
3700 /* Handle secondary reloads for loads/stores of FP registers where
3701 the address is symbolic by using the scratch register */
3702 if (fp_reg_operand (operand0, mode)
3703 && ((GET_CODE (operand1) == MEM
3704 && ! memory_address_p (DFmode, XEXP (operand1, 0)))
3705 || ((GET_CODE (operand1) == SUBREG
3706 && GET_CODE (XEXP (operand1, 0)) == MEM
3707 && !memory_address_p (DFmode, XEXP (XEXP (operand1, 0), 0)))))
3708 && scratch_reg)
3709 {
3710 if (GET_CODE (operand1) == SUBREG)
3711 operand1 = XEXP (operand1, 0);
3712
3713 /* SCRATCH_REG will hold an address. We want
3714 it in SImode regardless of what mode it was originally given
3715 to us. */
3716 scratch_reg = force_mode (SImode, scratch_reg);
3717
3718 /* D might not fit in 14 bits either; for such cases load D into
3719 scratch reg. */
3720 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
3721 {
3722 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
3723 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
3724 Pmode,
3725 XEXP (XEXP (operand1, 0), 0),
3726 scratch_reg));
3727 }
3728 else
3729 emit_move_insn (scratch_reg, XEXP (operand1, 0));
3730 emit_insn (gen_rtx_SET (operand0, gen_rtx_MEM (mode, scratch_reg)));
3731 return 1;
3732 }
3733 else if (fp_reg_operand (operand1, mode)
3734 && ((GET_CODE (operand0) == MEM
3735 && ! memory_address_p (DFmode, XEXP (operand0, 0)))
3736 || ((GET_CODE (operand0) == SUBREG)
3737 && GET_CODE (XEXP (operand0, 0)) == MEM
3738 && !memory_address_p (DFmode, XEXP (XEXP (operand0, 0), 0))))
3739 && scratch_reg)
3740 {
3741 if (GET_CODE (operand0) == SUBREG)
3742 operand0 = XEXP (operand0, 0);
3743
3744 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3745 it in SIMODE regardless of what mode it was originally given
3746 to us. */
3747 scratch_reg = force_mode (SImode, scratch_reg);
3748
3749 /* D might not fit in 14 bits either; for such cases load D into
3750 scratch reg. */
3751 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
3752 {
3753 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
3754 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
3755 0)),
3756 Pmode,
3757 XEXP (XEXP (operand0, 0),
3758 0),
3759 scratch_reg));
3760 }
3761 else
3762 emit_move_insn (scratch_reg, XEXP (operand0, 0));
3763 emit_insn (gen_rtx_SET (gen_rtx_MEM (mode, scratch_reg), operand1));
3764 return 1;
3765 }
3766 /* Handle secondary reloads for loads of FP registers from constant
3767 expressions by forcing the constant into memory.
3768
3769 use scratch_reg to hold the address of the memory location.
3770
3771 The proper fix is to change PREFERRED_RELOAD_CLASS to return
3772 NO_REGS when presented with a const_int and an register class
3773 containing only FP registers. Doing so unfortunately creates
3774 more problems than it solves. Fix this for 2.5. */
3775 else if (fp_reg_operand (operand0, mode)
3776 && CONSTANT_P (operand1)
3777 && scratch_reg)
3778 {
3779 rtx xoperands[2];
3780
3781 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3782 it in SIMODE regardless of what mode it was originally given
3783 to us. */
3784 scratch_reg = force_mode (SImode, scratch_reg);
3785
3786 /* Force the constant into memory and put the address of the
3787 memory location into scratch_reg. */
3788 xoperands[0] = scratch_reg;
3789 xoperands[1] = XEXP (force_const_mem (mode, operand1), 0);
3790 emit_insn (gen_rtx_SET (scratch_reg, xoperands[1]));
3791
3792 /* Now load the destination register. */
3793 emit_insn (gen_rtx_SET (operand0, gen_rtx_MEM (mode, scratch_reg)));
3794 return 1;
3795 }
3796
3797 /* Now have insn-emit do whatever it normally does. */
3798 return 0;
3799 }
3800
3801 /* Split one or more DImode RTL references into pairs of SImode
3802 references. The RTL can be REG, offsettable MEM, integer constant, or
3803 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
3804 split and "num" is its length. lo_half and hi_half are output arrays
3805 that parallel "operands". */
3806
3807 void
3808 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
3809 {
3810 while (num--)
3811 {
3812 rtx op = operands[num];
3813
3814 /* simplify_subreg refuses to split volatile memory addresses,
3815 but we still have to handle it. */
3816 if (GET_CODE (op) == MEM)
3817 {
3818 lo_half[num] = adjust_address (op, SImode, 4);
3819 hi_half[num] = adjust_address (op, SImode, 0);
3820 }
3821 else
3822 {
3823 lo_half[num] = simplify_gen_subreg (SImode, op,
3824 GET_MODE (op) == VOIDmode
3825 ? DImode : GET_MODE (op), 4);
3826 hi_half[num] = simplify_gen_subreg (SImode, op,
3827 GET_MODE (op) == VOIDmode
3828 ? DImode : GET_MODE (op), 0);
3829 }
3830 }
3831 }
3832
3833 /* Split X into a base and a constant offset, storing them in *BASE
3834 and *OFFSET respectively. */
3835
3836 static void
3837 m68k_split_offset (rtx x, rtx *base, HOST_WIDE_INT *offset)
3838 {
3839 *offset = 0;
3840 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
3841 {
3842 *offset += INTVAL (XEXP (x, 1));
3843 x = XEXP (x, 0);
3844 }
3845 *base = x;
3846 }
3847
3848 /* Return true if PATTERN is a PARALLEL suitable for a movem or fmovem
3849 instruction. STORE_P says whether the move is a load or store.
3850
3851 If the instruction uses post-increment or pre-decrement addressing,
3852 AUTOMOD_BASE is the base register and AUTOMOD_OFFSET is the total
3853 adjustment. This adjustment will be made by the first element of
3854 PARALLEL, with the loads or stores starting at element 1. If the
3855 instruction does not use post-increment or pre-decrement addressing,
3856 AUTOMOD_BASE is null, AUTOMOD_OFFSET is 0, and the loads or stores
3857 start at element 0. */
3858
3859 bool
3860 m68k_movem_pattern_p (rtx pattern, rtx automod_base,
3861 HOST_WIDE_INT automod_offset, bool store_p)
3862 {
3863 rtx base, mem_base, set, mem, reg, last_reg;
3864 HOST_WIDE_INT offset, mem_offset;
3865 int i, first, len;
3866 enum reg_class rclass;
3867
3868 len = XVECLEN (pattern, 0);
3869 first = (automod_base != NULL);
3870
3871 if (automod_base)
3872 {
3873 /* Stores must be pre-decrement and loads must be post-increment. */
3874 if (store_p != (automod_offset < 0))
3875 return false;
3876
3877 /* Work out the base and offset for lowest memory location. */
3878 base = automod_base;
3879 offset = (automod_offset < 0 ? automod_offset : 0);
3880 }
3881 else
3882 {
3883 /* Allow any valid base and offset in the first access. */
3884 base = NULL;
3885 offset = 0;
3886 }
3887
3888 last_reg = NULL;
3889 rclass = NO_REGS;
3890 for (i = first; i < len; i++)
3891 {
3892 /* We need a plain SET. */
3893 set = XVECEXP (pattern, 0, i);
3894 if (GET_CODE (set) != SET)
3895 return false;
3896
3897 /* Check that we have a memory location... */
3898 mem = XEXP (set, !store_p);
3899 if (!MEM_P (mem) || !memory_operand (mem, VOIDmode))
3900 return false;
3901
3902 /* ...with the right address. */
3903 if (base == NULL)
3904 {
3905 m68k_split_offset (XEXP (mem, 0), &base, &offset);
3906 /* The ColdFire instruction only allows (An) and (d16,An) modes.
3907 There are no mode restrictions for 680x0 besides the
3908 automodification rules enforced above. */
3909 if (TARGET_COLDFIRE
3910 && !m68k_legitimate_base_reg_p (base, reload_completed))
3911 return false;
3912 }
3913 else
3914 {
3915 m68k_split_offset (XEXP (mem, 0), &mem_base, &mem_offset);
3916 if (!rtx_equal_p (base, mem_base) || offset != mem_offset)
3917 return false;
3918 }
3919
3920 /* Check that we have a register of the required mode and class. */
3921 reg = XEXP (set, store_p);
3922 if (!REG_P (reg)
3923 || !HARD_REGISTER_P (reg)
3924 || GET_MODE (reg) != reg_raw_mode[REGNO (reg)])
3925 return false;
3926
3927 if (last_reg)
3928 {
3929 /* The register must belong to RCLASS and have a higher number
3930 than the register in the previous SET. */
3931 if (!TEST_HARD_REG_BIT (reg_class_contents[rclass], REGNO (reg))
3932 || REGNO (last_reg) >= REGNO (reg))
3933 return false;
3934 }
3935 else
3936 {
3937 /* Work out which register class we need. */
3938 if (INT_REGNO_P (REGNO (reg)))
3939 rclass = GENERAL_REGS;
3940 else if (FP_REGNO_P (REGNO (reg)))
3941 rclass = FP_REGS;
3942 else
3943 return false;
3944 }
3945
3946 last_reg = reg;
3947 offset += GET_MODE_SIZE (GET_MODE (reg));
3948 }
3949
3950 /* If we have an automodification, check whether the final offset is OK. */
3951 if (automod_base && offset != (automod_offset < 0 ? 0 : automod_offset))
3952 return false;
3953
3954 /* Reject unprofitable cases. */
3955 if (len < first + (rclass == FP_REGS ? MIN_FMOVEM_REGS : MIN_MOVEM_REGS))
3956 return false;
3957
3958 return true;
3959 }
3960
3961 /* Return the assembly code template for a movem or fmovem instruction
3962 whose pattern is given by PATTERN. Store the template's operands
3963 in OPERANDS.
3964
3965 If the instruction uses post-increment or pre-decrement addressing,
3966 AUTOMOD_OFFSET is the total adjustment, otherwise it is 0. STORE_P
3967 is true if this is a store instruction. */
3968
3969 const char *
3970 m68k_output_movem (rtx *operands, rtx pattern,
3971 HOST_WIDE_INT automod_offset, bool store_p)
3972 {
3973 unsigned int mask;
3974 int i, first;
3975
3976 gcc_assert (GET_CODE (pattern) == PARALLEL);
3977 mask = 0;
3978 first = (automod_offset != 0);
3979 for (i = first; i < XVECLEN (pattern, 0); i++)
3980 {
3981 /* When using movem with pre-decrement addressing, register X + D0_REG
3982 is controlled by bit 15 - X. For all other addressing modes,
3983 register X + D0_REG is controlled by bit X. Confusingly, the
3984 register mask for fmovem is in the opposite order to that for
3985 movem. */
3986 unsigned int regno;
3987
3988 gcc_assert (MEM_P (XEXP (XVECEXP (pattern, 0, i), !store_p)));
3989 gcc_assert (REG_P (XEXP (XVECEXP (pattern, 0, i), store_p)));
3990 regno = REGNO (XEXP (XVECEXP (pattern, 0, i), store_p));
3991 if (automod_offset < 0)
3992 {
3993 if (FP_REGNO_P (regno))
3994 mask |= 1 << (regno - FP0_REG);
3995 else
3996 mask |= 1 << (15 - (regno - D0_REG));
3997 }
3998 else
3999 {
4000 if (FP_REGNO_P (regno))
4001 mask |= 1 << (7 - (regno - FP0_REG));
4002 else
4003 mask |= 1 << (regno - D0_REG);
4004 }
4005 }
4006 CC_STATUS_INIT;
4007
4008 if (automod_offset == 0)
4009 operands[0] = XEXP (XEXP (XVECEXP (pattern, 0, first), !store_p), 0);
4010 else if (automod_offset < 0)
4011 operands[0] = gen_rtx_PRE_DEC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
4012 else
4013 operands[0] = gen_rtx_POST_INC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
4014 operands[1] = GEN_INT (mask);
4015 if (FP_REGNO_P (REGNO (XEXP (XVECEXP (pattern, 0, first), store_p))))
4016 {
4017 if (store_p)
4018 return "fmovem %1,%a0";
4019 else
4020 return "fmovem %a0,%1";
4021 }
4022 else
4023 {
4024 if (store_p)
4025 return "movem%.l %1,%a0";
4026 else
4027 return "movem%.l %a0,%1";
4028 }
4029 }
4030
4031 /* Return a REG that occurs in ADDR with coefficient 1.
4032 ADDR can be effectively incremented by incrementing REG. */
4033
4034 static rtx
4035 find_addr_reg (rtx addr)
4036 {
4037 while (GET_CODE (addr) == PLUS)
4038 {
4039 if (GET_CODE (XEXP (addr, 0)) == REG)
4040 addr = XEXP (addr, 0);
4041 else if (GET_CODE (XEXP (addr, 1)) == REG)
4042 addr = XEXP (addr, 1);
4043 else if (CONSTANT_P (XEXP (addr, 0)))
4044 addr = XEXP (addr, 1);
4045 else if (CONSTANT_P (XEXP (addr, 1)))
4046 addr = XEXP (addr, 0);
4047 else
4048 gcc_unreachable ();
4049 }
4050 gcc_assert (GET_CODE (addr) == REG);
4051 return addr;
4052 }
4053
4054 /* Output assembler code to perform a 32-bit 3-operand add. */
4055
4056 const char *
4057 output_addsi3 (rtx *operands)
4058 {
4059 if (! operands_match_p (operands[0], operands[1]))
4060 {
4061 if (!ADDRESS_REG_P (operands[1]))
4062 {
4063 rtx tmp = operands[1];
4064
4065 operands[1] = operands[2];
4066 operands[2] = tmp;
4067 }
4068
4069 /* These insns can result from reloads to access
4070 stack slots over 64k from the frame pointer. */
4071 if (GET_CODE (operands[2]) == CONST_INT
4072 && (INTVAL (operands[2]) < -32768 || INTVAL (operands[2]) > 32767))
4073 return "move%.l %2,%0\n\tadd%.l %1,%0";
4074 if (GET_CODE (operands[2]) == REG)
4075 return MOTOROLA ? "lea (%1,%2.l),%0" : "lea %1@(0,%2:l),%0";
4076 return MOTOROLA ? "lea (%c2,%1),%0" : "lea %1@(%c2),%0";
4077 }
4078 if (GET_CODE (operands[2]) == CONST_INT)
4079 {
4080 if (INTVAL (operands[2]) > 0
4081 && INTVAL (operands[2]) <= 8)
4082 return "addq%.l %2,%0";
4083 if (INTVAL (operands[2]) < 0
4084 && INTVAL (operands[2]) >= -8)
4085 {
4086 operands[2] = GEN_INT (- INTVAL (operands[2]));
4087 return "subq%.l %2,%0";
4088 }
4089 /* On the CPU32 it is faster to use two addql instructions to
4090 add a small integer (8 < N <= 16) to a register.
4091 Likewise for subql. */
4092 if (TUNE_CPU32 && REG_P (operands[0]))
4093 {
4094 if (INTVAL (operands[2]) > 8
4095 && INTVAL (operands[2]) <= 16)
4096 {
4097 operands[2] = GEN_INT (INTVAL (operands[2]) - 8);
4098 return "addq%.l #8,%0\n\taddq%.l %2,%0";
4099 }
4100 if (INTVAL (operands[2]) < -8
4101 && INTVAL (operands[2]) >= -16)
4102 {
4103 operands[2] = GEN_INT (- INTVAL (operands[2]) - 8);
4104 return "subq%.l #8,%0\n\tsubq%.l %2,%0";
4105 }
4106 }
4107 if (ADDRESS_REG_P (operands[0])
4108 && INTVAL (operands[2]) >= -0x8000
4109 && INTVAL (operands[2]) < 0x8000)
4110 {
4111 if (TUNE_68040)
4112 return "add%.w %2,%0";
4113 else
4114 return MOTOROLA ? "lea (%c2,%0),%0" : "lea %0@(%c2),%0";
4115 }
4116 }
4117 return "add%.l %2,%0";
4118 }
4119 \f
4120 /* Store in cc_status the expressions that the condition codes will
4121 describe after execution of an instruction whose pattern is EXP.
4122 Do not alter them if the instruction would not alter the cc's. */
4123
4124 /* On the 68000, all the insns to store in an address register fail to
4125 set the cc's. However, in some cases these instructions can make it
4126 possibly invalid to use the saved cc's. In those cases we clear out
4127 some or all of the saved cc's so they won't be used. */
4128
4129 void
4130 notice_update_cc (rtx exp, rtx insn)
4131 {
4132 if (GET_CODE (exp) == SET)
4133 {
4134 if (GET_CODE (SET_SRC (exp)) == CALL)
4135 CC_STATUS_INIT;
4136 else if (ADDRESS_REG_P (SET_DEST (exp)))
4137 {
4138 if (cc_status.value1 && modified_in_p (cc_status.value1, insn))
4139 cc_status.value1 = 0;
4140 if (cc_status.value2 && modified_in_p (cc_status.value2, insn))
4141 cc_status.value2 = 0;
4142 }
4143 /* fmoves to memory or data registers do not set the condition
4144 codes. Normal moves _do_ set the condition codes, but not in
4145 a way that is appropriate for comparison with 0, because -0.0
4146 would be treated as a negative nonzero number. Note that it
4147 isn't appropriate to conditionalize this restriction on
4148 HONOR_SIGNED_ZEROS because that macro merely indicates whether
4149 we care about the difference between -0.0 and +0.0. */
4150 else if (!FP_REG_P (SET_DEST (exp))
4151 && SET_DEST (exp) != cc0_rtx
4152 && (FP_REG_P (SET_SRC (exp))
4153 || GET_CODE (SET_SRC (exp)) == FIX
4154 || FLOAT_MODE_P (GET_MODE (SET_DEST (exp)))))
4155 CC_STATUS_INIT;
4156 /* A pair of move insns doesn't produce a useful overall cc. */
4157 else if (!FP_REG_P (SET_DEST (exp))
4158 && !FP_REG_P (SET_SRC (exp))
4159 && GET_MODE_SIZE (GET_MODE (SET_SRC (exp))) > 4
4160 && (GET_CODE (SET_SRC (exp)) == REG
4161 || GET_CODE (SET_SRC (exp)) == MEM
4162 || GET_CODE (SET_SRC (exp)) == CONST_DOUBLE))
4163 CC_STATUS_INIT;
4164 else if (SET_DEST (exp) != pc_rtx)
4165 {
4166 cc_status.flags = 0;
4167 cc_status.value1 = SET_DEST (exp);
4168 cc_status.value2 = SET_SRC (exp);
4169 }
4170 }
4171 else if (GET_CODE (exp) == PARALLEL
4172 && GET_CODE (XVECEXP (exp, 0, 0)) == SET)
4173 {
4174 rtx dest = SET_DEST (XVECEXP (exp, 0, 0));
4175 rtx src = SET_SRC (XVECEXP (exp, 0, 0));
4176
4177 if (ADDRESS_REG_P (dest))
4178 CC_STATUS_INIT;
4179 else if (dest != pc_rtx)
4180 {
4181 cc_status.flags = 0;
4182 cc_status.value1 = dest;
4183 cc_status.value2 = src;
4184 }
4185 }
4186 else
4187 CC_STATUS_INIT;
4188 if (cc_status.value2 != 0
4189 && ADDRESS_REG_P (cc_status.value2)
4190 && GET_MODE (cc_status.value2) == QImode)
4191 CC_STATUS_INIT;
4192 if (cc_status.value2 != 0)
4193 switch (GET_CODE (cc_status.value2))
4194 {
4195 case ASHIFT: case ASHIFTRT: case LSHIFTRT:
4196 case ROTATE: case ROTATERT:
4197 /* These instructions always clear the overflow bit, and set
4198 the carry to the bit shifted out. */
4199 cc_status.flags |= CC_OVERFLOW_UNUSABLE | CC_NO_CARRY;
4200 break;
4201
4202 case PLUS: case MINUS: case MULT:
4203 case DIV: case UDIV: case MOD: case UMOD: case NEG:
4204 if (GET_MODE (cc_status.value2) != VOIDmode)
4205 cc_status.flags |= CC_NO_OVERFLOW;
4206 break;
4207 case ZERO_EXTEND:
4208 /* (SET r1 (ZERO_EXTEND r2)) on this machine
4209 ends with a move insn moving r2 in r2's mode.
4210 Thus, the cc's are set for r2.
4211 This can set N bit spuriously. */
4212 cc_status.flags |= CC_NOT_NEGATIVE;
4213
4214 default:
4215 break;
4216 }
4217 if (cc_status.value1 && GET_CODE (cc_status.value1) == REG
4218 && cc_status.value2
4219 && reg_overlap_mentioned_p (cc_status.value1, cc_status.value2))
4220 cc_status.value2 = 0;
4221 /* Check for PRE_DEC in dest modifying a register used in src. */
4222 if (cc_status.value1 && GET_CODE (cc_status.value1) == MEM
4223 && GET_CODE (XEXP (cc_status.value1, 0)) == PRE_DEC
4224 && cc_status.value2
4225 && reg_overlap_mentioned_p (XEXP (XEXP (cc_status.value1, 0), 0),
4226 cc_status.value2))
4227 cc_status.value2 = 0;
4228 if (((cc_status.value1 && FP_REG_P (cc_status.value1))
4229 || (cc_status.value2 && FP_REG_P (cc_status.value2))))
4230 cc_status.flags = CC_IN_68881;
4231 if (cc_status.value2 && GET_CODE (cc_status.value2) == COMPARE
4232 && GET_MODE_CLASS (GET_MODE (XEXP (cc_status.value2, 0))) == MODE_FLOAT)
4233 {
4234 cc_status.flags = CC_IN_68881;
4235 if (!FP_REG_P (XEXP (cc_status.value2, 0))
4236 && FP_REG_P (XEXP (cc_status.value2, 1)))
4237 cc_status.flags |= CC_REVERSED;
4238 }
4239 }
4240 \f
4241 const char *
4242 output_move_const_double (rtx *operands)
4243 {
4244 int code = standard_68881_constant_p (operands[1]);
4245
4246 if (code != 0)
4247 {
4248 static char buf[40];
4249
4250 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
4251 return buf;
4252 }
4253 return "fmove%.d %1,%0";
4254 }
4255
4256 const char *
4257 output_move_const_single (rtx *operands)
4258 {
4259 int code = standard_68881_constant_p (operands[1]);
4260
4261 if (code != 0)
4262 {
4263 static char buf[40];
4264
4265 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
4266 return buf;
4267 }
4268 return "fmove%.s %f1,%0";
4269 }
4270
4271 /* Return nonzero if X, a CONST_DOUBLE, has a value that we can get
4272 from the "fmovecr" instruction.
4273 The value, anded with 0xff, gives the code to use in fmovecr
4274 to get the desired constant. */
4275
4276 /* This code has been fixed for cross-compilation. */
4277
4278 static int inited_68881_table = 0;
4279
4280 static const char *const strings_68881[7] = {
4281 "0.0",
4282 "1.0",
4283 "10.0",
4284 "100.0",
4285 "10000.0",
4286 "1e8",
4287 "1e16"
4288 };
4289
4290 static const int codes_68881[7] = {
4291 0x0f,
4292 0x32,
4293 0x33,
4294 0x34,
4295 0x35,
4296 0x36,
4297 0x37
4298 };
4299
4300 REAL_VALUE_TYPE values_68881[7];
4301
4302 /* Set up values_68881 array by converting the decimal values
4303 strings_68881 to binary. */
4304
4305 void
4306 init_68881_table (void)
4307 {
4308 int i;
4309 REAL_VALUE_TYPE r;
4310 machine_mode mode;
4311
4312 mode = SFmode;
4313 for (i = 0; i < 7; i++)
4314 {
4315 if (i == 6)
4316 mode = DFmode;
4317 r = REAL_VALUE_ATOF (strings_68881[i], mode);
4318 values_68881[i] = r;
4319 }
4320 inited_68881_table = 1;
4321 }
4322
4323 int
4324 standard_68881_constant_p (rtx x)
4325 {
4326 REAL_VALUE_TYPE r;
4327 int i;
4328
4329 /* fmovecr must be emulated on the 68040 and 68060, so it shouldn't be
4330 used at all on those chips. */
4331 if (TUNE_68040_60)
4332 return 0;
4333
4334 if (! inited_68881_table)
4335 init_68881_table ();
4336
4337 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4338
4339 /* Use real_identical instead of real_equal so that -0.0 is rejected. */
4340 for (i = 0; i < 6; i++)
4341 {
4342 if (real_identical (&r, &values_68881[i]))
4343 return (codes_68881[i]);
4344 }
4345
4346 if (GET_MODE (x) == SFmode)
4347 return 0;
4348
4349 if (real_equal (&r, &values_68881[6]))
4350 return (codes_68881[6]);
4351
4352 /* larger powers of ten in the constants ram are not used
4353 because they are not equal to a `double' C constant. */
4354 return 0;
4355 }
4356
4357 /* If X is a floating-point constant, return the logarithm of X base 2,
4358 or 0 if X is not a power of 2. */
4359
4360 int
4361 floating_exact_log2 (rtx x)
4362 {
4363 REAL_VALUE_TYPE r, r1;
4364 int exp;
4365
4366 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4367
4368 if (REAL_VALUES_LESS (r, dconst1))
4369 return 0;
4370
4371 exp = real_exponent (&r);
4372 real_2expN (&r1, exp, DFmode);
4373 if (real_equal (&r1, &r))
4374 return exp;
4375
4376 return 0;
4377 }
4378 \f
4379 /* A C compound statement to output to stdio stream STREAM the
4380 assembler syntax for an instruction operand X. X is an RTL
4381 expression.
4382
4383 CODE is a value that can be used to specify one of several ways
4384 of printing the operand. It is used when identical operands
4385 must be printed differently depending on the context. CODE
4386 comes from the `%' specification that was used to request
4387 printing of the operand. If the specification was just `%DIGIT'
4388 then CODE is 0; if the specification was `%LTR DIGIT' then CODE
4389 is the ASCII code for LTR.
4390
4391 If X is a register, this macro should print the register's name.
4392 The names can be found in an array `reg_names' whose type is
4393 `char *[]'. `reg_names' is initialized from `REGISTER_NAMES'.
4394
4395 When the machine description has a specification `%PUNCT' (a `%'
4396 followed by a punctuation character), this macro is called with
4397 a null pointer for X and the punctuation character for CODE.
4398
4399 The m68k specific codes are:
4400
4401 '.' for dot needed in Motorola-style opcode names.
4402 '-' for an operand pushing on the stack:
4403 sp@-, -(sp) or -(%sp) depending on the style of syntax.
4404 '+' for an operand pushing on the stack:
4405 sp@+, (sp)+ or (%sp)+ depending on the style of syntax.
4406 '@' for a reference to the top word on the stack:
4407 sp@, (sp) or (%sp) depending on the style of syntax.
4408 '#' for an immediate operand prefix (# in MIT and Motorola syntax
4409 but & in SGS syntax).
4410 '!' for the cc register (used in an `and to cc' insn).
4411 '$' for the letter `s' in an op code, but only on the 68040.
4412 '&' for the letter `d' in an op code, but only on the 68040.
4413 '/' for register prefix needed by longlong.h.
4414 '?' for m68k_library_id_string
4415
4416 'b' for byte insn (no effect, on the Sun; this is for the ISI).
4417 'd' to force memory addressing to be absolute, not relative.
4418 'f' for float insn (print a CONST_DOUBLE as a float rather than in hex)
4419 'x' for float insn (print a CONST_DOUBLE as a float rather than in hex),
4420 or print pair of registers as rx:ry.
4421 'p' print an address with @PLTPC attached, but only if the operand
4422 is not locally-bound. */
4423
4424 void
4425 print_operand (FILE *file, rtx op, int letter)
4426 {
4427 if (letter == '.')
4428 {
4429 if (MOTOROLA)
4430 fprintf (file, ".");
4431 }
4432 else if (letter == '#')
4433 asm_fprintf (file, "%I");
4434 else if (letter == '-')
4435 asm_fprintf (file, MOTOROLA ? "-(%Rsp)" : "%Rsp@-");
4436 else if (letter == '+')
4437 asm_fprintf (file, MOTOROLA ? "(%Rsp)+" : "%Rsp@+");
4438 else if (letter == '@')
4439 asm_fprintf (file, MOTOROLA ? "(%Rsp)" : "%Rsp@");
4440 else if (letter == '!')
4441 asm_fprintf (file, "%Rfpcr");
4442 else if (letter == '$')
4443 {
4444 if (TARGET_68040)
4445 fprintf (file, "s");
4446 }
4447 else if (letter == '&')
4448 {
4449 if (TARGET_68040)
4450 fprintf (file, "d");
4451 }
4452 else if (letter == '/')
4453 asm_fprintf (file, "%R");
4454 else if (letter == '?')
4455 asm_fprintf (file, m68k_library_id_string);
4456 else if (letter == 'p')
4457 {
4458 output_addr_const (file, op);
4459 if (!(GET_CODE (op) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op)))
4460 fprintf (file, "@PLTPC");
4461 }
4462 else if (GET_CODE (op) == REG)
4463 {
4464 if (letter == 'R')
4465 /* Print out the second register name of a register pair.
4466 I.e., R (6) => 7. */
4467 fputs (M68K_REGNAME(REGNO (op) + 1), file);
4468 else
4469 fputs (M68K_REGNAME(REGNO (op)), file);
4470 }
4471 else if (GET_CODE (op) == MEM)
4472 {
4473 output_address (XEXP (op, 0));
4474 if (letter == 'd' && ! TARGET_68020
4475 && CONSTANT_ADDRESS_P (XEXP (op, 0))
4476 && !(GET_CODE (XEXP (op, 0)) == CONST_INT
4477 && INTVAL (XEXP (op, 0)) < 0x8000
4478 && INTVAL (XEXP (op, 0)) >= -0x8000))
4479 fprintf (file, MOTOROLA ? ".l" : ":l");
4480 }
4481 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == SFmode)
4482 {
4483 REAL_VALUE_TYPE r;
4484 long l;
4485 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
4486 REAL_VALUE_TO_TARGET_SINGLE (r, l);
4487 asm_fprintf (file, "%I0x%lx", l & 0xFFFFFFFF);
4488 }
4489 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == XFmode)
4490 {
4491 REAL_VALUE_TYPE r;
4492 long l[3];
4493 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
4494 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
4495 asm_fprintf (file, "%I0x%lx%08lx%08lx", l[0] & 0xFFFFFFFF,
4496 l[1] & 0xFFFFFFFF, l[2] & 0xFFFFFFFF);
4497 }
4498 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == DFmode)
4499 {
4500 REAL_VALUE_TYPE r;
4501 long l[2];
4502 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
4503 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
4504 asm_fprintf (file, "%I0x%lx%08lx", l[0] & 0xFFFFFFFF, l[1] & 0xFFFFFFFF);
4505 }
4506 else
4507 {
4508 /* Use `print_operand_address' instead of `output_addr_const'
4509 to ensure that we print relevant PIC stuff. */
4510 asm_fprintf (file, "%I");
4511 if (TARGET_PCREL
4512 && (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST))
4513 print_operand_address (file, op);
4514 else
4515 output_addr_const (file, op);
4516 }
4517 }
4518
4519 /* Return string for TLS relocation RELOC. */
4520
4521 static const char *
4522 m68k_get_reloc_decoration (enum m68k_reloc reloc)
4523 {
4524 /* To my knowledge, !MOTOROLA assemblers don't support TLS. */
4525 gcc_assert (MOTOROLA || reloc == RELOC_GOT);
4526
4527 switch (reloc)
4528 {
4529 case RELOC_GOT:
4530 if (MOTOROLA)
4531 {
4532 if (flag_pic == 1 && TARGET_68020)
4533 return "@GOT.w";
4534 else
4535 return "@GOT";
4536 }
4537 else
4538 {
4539 if (TARGET_68020)
4540 {
4541 switch (flag_pic)
4542 {
4543 case 1:
4544 return ":w";
4545 case 2:
4546 return ":l";
4547 default:
4548 return "";
4549 }
4550 }
4551 }
4552
4553 case RELOC_TLSGD:
4554 return "@TLSGD";
4555
4556 case RELOC_TLSLDM:
4557 return "@TLSLDM";
4558
4559 case RELOC_TLSLDO:
4560 return "@TLSLDO";
4561
4562 case RELOC_TLSIE:
4563 return "@TLSIE";
4564
4565 case RELOC_TLSLE:
4566 return "@TLSLE";
4567
4568 default:
4569 gcc_unreachable ();
4570 }
4571 }
4572
4573 /* m68k implementation of TARGET_OUTPUT_ADDR_CONST_EXTRA. */
4574
4575 static bool
4576 m68k_output_addr_const_extra (FILE *file, rtx x)
4577 {
4578 if (GET_CODE (x) == UNSPEC)
4579 {
4580 switch (XINT (x, 1))
4581 {
4582 case UNSPEC_RELOC16:
4583 case UNSPEC_RELOC32:
4584 output_addr_const (file, XVECEXP (x, 0, 0));
4585 fputs (m68k_get_reloc_decoration
4586 ((enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1))), file);
4587 return true;
4588
4589 default:
4590 break;
4591 }
4592 }
4593
4594 return false;
4595 }
4596
4597 /* M68K implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
4598
4599 static void
4600 m68k_output_dwarf_dtprel (FILE *file, int size, rtx x)
4601 {
4602 gcc_assert (size == 4);
4603 fputs ("\t.long\t", file);
4604 output_addr_const (file, x);
4605 fputs ("@TLSLDO+0x8000", file);
4606 }
4607
4608 /* In the name of slightly smaller debug output, and to cater to
4609 general assembler lossage, recognize various UNSPEC sequences
4610 and turn them back into a direct symbol reference. */
4611
4612 static rtx
4613 m68k_delegitimize_address (rtx orig_x)
4614 {
4615 rtx x;
4616 struct m68k_address addr;
4617 rtx unspec;
4618
4619 orig_x = delegitimize_mem_from_attrs (orig_x);
4620 x = orig_x;
4621 if (MEM_P (x))
4622 x = XEXP (x, 0);
4623
4624 if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
4625 return orig_x;
4626
4627 if (!m68k_decompose_address (GET_MODE (x), x, false, &addr)
4628 || addr.offset == NULL_RTX
4629 || GET_CODE (addr.offset) != CONST)
4630 return orig_x;
4631
4632 unspec = XEXP (addr.offset, 0);
4633 if (GET_CODE (unspec) == PLUS && CONST_INT_P (XEXP (unspec, 1)))
4634 unspec = XEXP (unspec, 0);
4635 if (GET_CODE (unspec) != UNSPEC
4636 || (XINT (unspec, 1) != UNSPEC_RELOC16
4637 && XINT (unspec, 1) != UNSPEC_RELOC32))
4638 return orig_x;
4639 x = XVECEXP (unspec, 0, 0);
4640 gcc_assert (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF);
4641 if (unspec != XEXP (addr.offset, 0))
4642 x = gen_rtx_PLUS (Pmode, x, XEXP (XEXP (addr.offset, 0), 1));
4643 if (addr.index)
4644 {
4645 rtx idx = addr.index;
4646 if (addr.scale != 1)
4647 idx = gen_rtx_MULT (Pmode, idx, GEN_INT (addr.scale));
4648 x = gen_rtx_PLUS (Pmode, idx, x);
4649 }
4650 if (addr.base)
4651 x = gen_rtx_PLUS (Pmode, addr.base, x);
4652 if (MEM_P (orig_x))
4653 x = replace_equiv_address_nv (orig_x, x);
4654 return x;
4655 }
4656
4657 \f
4658 /* A C compound statement to output to stdio stream STREAM the
4659 assembler syntax for an instruction operand that is a memory
4660 reference whose address is ADDR. ADDR is an RTL expression.
4661
4662 Note that this contains a kludge that knows that the only reason
4663 we have an address (plus (label_ref...) (reg...)) when not generating
4664 PIC code is in the insn before a tablejump, and we know that m68k.md
4665 generates a label LInnn: on such an insn.
4666
4667 It is possible for PIC to generate a (plus (label_ref...) (reg...))
4668 and we handle that just like we would a (plus (symbol_ref...) (reg...)).
4669
4670 This routine is responsible for distinguishing between -fpic and -fPIC
4671 style relocations in an address. When generating -fpic code the
4672 offset is output in word mode (e.g. movel a5@(_foo:w), a0). When generating
4673 -fPIC code the offset is output in long mode (e.g. movel a5@(_foo:l), a0) */
4674
4675 void
4676 print_operand_address (FILE *file, rtx addr)
4677 {
4678 struct m68k_address address;
4679
4680 if (!m68k_decompose_address (QImode, addr, true, &address))
4681 gcc_unreachable ();
4682
4683 if (address.code == PRE_DEC)
4684 fprintf (file, MOTOROLA ? "-(%s)" : "%s@-",
4685 M68K_REGNAME (REGNO (address.base)));
4686 else if (address.code == POST_INC)
4687 fprintf (file, MOTOROLA ? "(%s)+" : "%s@+",
4688 M68K_REGNAME (REGNO (address.base)));
4689 else if (!address.base && !address.index)
4690 {
4691 /* A constant address. */
4692 gcc_assert (address.offset == addr);
4693 if (GET_CODE (addr) == CONST_INT)
4694 {
4695 /* (xxx).w or (xxx).l. */
4696 if (IN_RANGE (INTVAL (addr), -0x8000, 0x7fff))
4697 fprintf (file, MOTOROLA ? "%d.w" : "%d:w", (int) INTVAL (addr));
4698 else
4699 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (addr));
4700 }
4701 else if (TARGET_PCREL)
4702 {
4703 /* (d16,PC) or (bd,PC,Xn) (with suppressed index register). */
4704 fputc ('(', file);
4705 output_addr_const (file, addr);
4706 asm_fprintf (file, flag_pic == 1 ? ":w,%Rpc)" : ":l,%Rpc)");
4707 }
4708 else
4709 {
4710 /* (xxx).l. We need a special case for SYMBOL_REF if the symbol
4711 name ends in `.<letter>', as the last 2 characters can be
4712 mistaken as a size suffix. Put the name in parentheses. */
4713 if (GET_CODE (addr) == SYMBOL_REF
4714 && strlen (XSTR (addr, 0)) > 2
4715 && XSTR (addr, 0)[strlen (XSTR (addr, 0)) - 2] == '.')
4716 {
4717 putc ('(', file);
4718 output_addr_const (file, addr);
4719 putc (')', file);
4720 }
4721 else
4722 output_addr_const (file, addr);
4723 }
4724 }
4725 else
4726 {
4727 int labelno;
4728
4729 /* If ADDR is a (d8,pc,Xn) address, this is the number of the
4730 label being accessed, otherwise it is -1. */
4731 labelno = (address.offset
4732 && !address.base
4733 && GET_CODE (address.offset) == LABEL_REF
4734 ? CODE_LABEL_NUMBER (XEXP (address.offset, 0))
4735 : -1);
4736 if (MOTOROLA)
4737 {
4738 /* Print the "offset(base" component. */
4739 if (labelno >= 0)
4740 asm_fprintf (file, "%LL%d(%Rpc,", labelno);
4741 else
4742 {
4743 if (address.offset)
4744 output_addr_const (file, address.offset);
4745
4746 putc ('(', file);
4747 if (address.base)
4748 fputs (M68K_REGNAME (REGNO (address.base)), file);
4749 }
4750 /* Print the ",index" component, if any. */
4751 if (address.index)
4752 {
4753 if (address.base)
4754 putc (',', file);
4755 fprintf (file, "%s.%c",
4756 M68K_REGNAME (REGNO (address.index)),
4757 GET_MODE (address.index) == HImode ? 'w' : 'l');
4758 if (address.scale != 1)
4759 fprintf (file, "*%d", address.scale);
4760 }
4761 putc (')', file);
4762 }
4763 else /* !MOTOROLA */
4764 {
4765 if (!address.offset && !address.index)
4766 fprintf (file, "%s@", M68K_REGNAME (REGNO (address.base)));
4767 else
4768 {
4769 /* Print the "base@(offset" component. */
4770 if (labelno >= 0)
4771 asm_fprintf (file, "%Rpc@(%LL%d", labelno);
4772 else
4773 {
4774 if (address.base)
4775 fputs (M68K_REGNAME (REGNO (address.base)), file);
4776 fprintf (file, "@(");
4777 if (address.offset)
4778 output_addr_const (file, address.offset);
4779 }
4780 /* Print the ",index" component, if any. */
4781 if (address.index)
4782 {
4783 fprintf (file, ",%s:%c",
4784 M68K_REGNAME (REGNO (address.index)),
4785 GET_MODE (address.index) == HImode ? 'w' : 'l');
4786 if (address.scale != 1)
4787 fprintf (file, ":%d", address.scale);
4788 }
4789 putc (')', file);
4790 }
4791 }
4792 }
4793 }
4794 \f
4795 /* Check for cases where a clr insns can be omitted from code using
4796 strict_low_part sets. For example, the second clrl here is not needed:
4797 clrl d0; movw a0@+,d0; use d0; clrl d0; movw a0@+; use d0; ...
4798
4799 MODE is the mode of this STRICT_LOW_PART set. FIRST_INSN is the clear
4800 insn we are checking for redundancy. TARGET is the register set by the
4801 clear insn. */
4802
4803 bool
4804 strict_low_part_peephole_ok (machine_mode mode, rtx_insn *first_insn,
4805 rtx target)
4806 {
4807 rtx_insn *p = first_insn;
4808
4809 while ((p = PREV_INSN (p)))
4810 {
4811 if (NOTE_INSN_BASIC_BLOCK_P (p))
4812 return false;
4813
4814 if (NOTE_P (p))
4815 continue;
4816
4817 /* If it isn't an insn, then give up. */
4818 if (!INSN_P (p))
4819 return false;
4820
4821 if (reg_set_p (target, p))
4822 {
4823 rtx set = single_set (p);
4824 rtx dest;
4825
4826 /* If it isn't an easy to recognize insn, then give up. */
4827 if (! set)
4828 return false;
4829
4830 dest = SET_DEST (set);
4831
4832 /* If this sets the entire target register to zero, then our
4833 first_insn is redundant. */
4834 if (rtx_equal_p (dest, target)
4835 && SET_SRC (set) == const0_rtx)
4836 return true;
4837 else if (GET_CODE (dest) == STRICT_LOW_PART
4838 && GET_CODE (XEXP (dest, 0)) == REG
4839 && REGNO (XEXP (dest, 0)) == REGNO (target)
4840 && (GET_MODE_SIZE (GET_MODE (XEXP (dest, 0)))
4841 <= GET_MODE_SIZE (mode)))
4842 /* This is a strict low part set which modifies less than
4843 we are using, so it is safe. */
4844 ;
4845 else
4846 return false;
4847 }
4848 }
4849
4850 return false;
4851 }
4852
4853 /* Operand predicates for implementing asymmetric pc-relative addressing
4854 on m68k. The m68k supports pc-relative addressing (mode 7, register 2)
4855 when used as a source operand, but not as a destination operand.
4856
4857 We model this by restricting the meaning of the basic predicates
4858 (general_operand, memory_operand, etc) to forbid the use of this
4859 addressing mode, and then define the following predicates that permit
4860 this addressing mode. These predicates can then be used for the
4861 source operands of the appropriate instructions.
4862
4863 n.b. While it is theoretically possible to change all machine patterns
4864 to use this addressing more where permitted by the architecture,
4865 it has only been implemented for "common" cases: SImode, HImode, and
4866 QImode operands, and only for the principle operations that would
4867 require this addressing mode: data movement and simple integer operations.
4868
4869 In parallel with these new predicates, two new constraint letters
4870 were defined: 'S' and 'T'. 'S' is the -mpcrel analog of 'm'.
4871 'T' replaces 's' in the non-pcrel case. It is a no-op in the pcrel case.
4872 In the pcrel case 's' is only valid in combination with 'a' registers.
4873 See addsi3, subsi3, cmpsi, and movsi patterns for a better understanding
4874 of how these constraints are used.
4875
4876 The use of these predicates is strictly optional, though patterns that
4877 don't will cause an extra reload register to be allocated where one
4878 was not necessary:
4879
4880 lea (abc:w,%pc),%a0 ; need to reload address
4881 moveq &1,%d1 ; since write to pc-relative space
4882 movel %d1,%a0@ ; is not allowed
4883 ...
4884 lea (abc:w,%pc),%a1 ; no need to reload address here
4885 movel %a1@,%d0 ; since "movel (abc:w,%pc),%d0" is ok
4886
4887 For more info, consult tiemann@cygnus.com.
4888
4889
4890 All of the ugliness with predicates and constraints is due to the
4891 simple fact that the m68k does not allow a pc-relative addressing
4892 mode as a destination. gcc does not distinguish between source and
4893 destination addresses. Hence, if we claim that pc-relative address
4894 modes are valid, e.g. TARGET_LEGITIMATE_ADDRESS_P accepts them, then we
4895 end up with invalid code. To get around this problem, we left
4896 pc-relative modes as invalid addresses, and then added special
4897 predicates and constraints to accept them.
4898
4899 A cleaner way to handle this is to modify gcc to distinguish
4900 between source and destination addresses. We can then say that
4901 pc-relative is a valid source address but not a valid destination
4902 address, and hopefully avoid a lot of the predicate and constraint
4903 hackery. Unfortunately, this would be a pretty big change. It would
4904 be a useful change for a number of ports, but there aren't any current
4905 plans to undertake this.
4906
4907 ***************************************************************************/
4908
4909
4910 const char *
4911 output_andsi3 (rtx *operands)
4912 {
4913 int logval;
4914 if (GET_CODE (operands[2]) == CONST_INT
4915 && (INTVAL (operands[2]) | 0xffff) == -1
4916 && (DATA_REG_P (operands[0])
4917 || offsettable_memref_p (operands[0]))
4918 && !TARGET_COLDFIRE)
4919 {
4920 if (GET_CODE (operands[0]) != REG)
4921 operands[0] = adjust_address (operands[0], HImode, 2);
4922 operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffff);
4923 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4924 CC_STATUS_INIT;
4925 if (operands[2] == const0_rtx)
4926 return "clr%.w %0";
4927 return "and%.w %2,%0";
4928 }
4929 if (GET_CODE (operands[2]) == CONST_INT
4930 && (logval = exact_log2 (~ INTVAL (operands[2]) & 0xffffffff)) >= 0
4931 && (DATA_REG_P (operands[0])
4932 || offsettable_memref_p (operands[0])))
4933 {
4934 if (DATA_REG_P (operands[0]))
4935 operands[1] = GEN_INT (logval);
4936 else
4937 {
4938 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
4939 operands[1] = GEN_INT (logval % 8);
4940 }
4941 /* This does not set condition codes in a standard way. */
4942 CC_STATUS_INIT;
4943 return "bclr %1,%0";
4944 }
4945 return "and%.l %2,%0";
4946 }
4947
4948 const char *
4949 output_iorsi3 (rtx *operands)
4950 {
4951 register int logval;
4952 if (GET_CODE (operands[2]) == CONST_INT
4953 && INTVAL (operands[2]) >> 16 == 0
4954 && (DATA_REG_P (operands[0])
4955 || offsettable_memref_p (operands[0]))
4956 && !TARGET_COLDFIRE)
4957 {
4958 if (GET_CODE (operands[0]) != REG)
4959 operands[0] = adjust_address (operands[0], HImode, 2);
4960 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4961 CC_STATUS_INIT;
4962 if (INTVAL (operands[2]) == 0xffff)
4963 return "mov%.w %2,%0";
4964 return "or%.w %2,%0";
4965 }
4966 if (GET_CODE (operands[2]) == CONST_INT
4967 && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
4968 && (DATA_REG_P (operands[0])
4969 || offsettable_memref_p (operands[0])))
4970 {
4971 if (DATA_REG_P (operands[0]))
4972 operands[1] = GEN_INT (logval);
4973 else
4974 {
4975 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
4976 operands[1] = GEN_INT (logval % 8);
4977 }
4978 CC_STATUS_INIT;
4979 return "bset %1,%0";
4980 }
4981 return "or%.l %2,%0";
4982 }
4983
4984 const char *
4985 output_xorsi3 (rtx *operands)
4986 {
4987 register int logval;
4988 if (GET_CODE (operands[2]) == CONST_INT
4989 && INTVAL (operands[2]) >> 16 == 0
4990 && (offsettable_memref_p (operands[0]) || DATA_REG_P (operands[0]))
4991 && !TARGET_COLDFIRE)
4992 {
4993 if (! DATA_REG_P (operands[0]))
4994 operands[0] = adjust_address (operands[0], HImode, 2);
4995 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4996 CC_STATUS_INIT;
4997 if (INTVAL (operands[2]) == 0xffff)
4998 return "not%.w %0";
4999 return "eor%.w %2,%0";
5000 }
5001 if (GET_CODE (operands[2]) == CONST_INT
5002 && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
5003 && (DATA_REG_P (operands[0])
5004 || offsettable_memref_p (operands[0])))
5005 {
5006 if (DATA_REG_P (operands[0]))
5007 operands[1] = GEN_INT (logval);
5008 else
5009 {
5010 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
5011 operands[1] = GEN_INT (logval % 8);
5012 }
5013 CC_STATUS_INIT;
5014 return "bchg %1,%0";
5015 }
5016 return "eor%.l %2,%0";
5017 }
5018
5019 /* Return the instruction that should be used for a call to address X,
5020 which is known to be in operand 0. */
5021
5022 const char *
5023 output_call (rtx x)
5024 {
5025 if (symbolic_operand (x, VOIDmode))
5026 return m68k_symbolic_call;
5027 else
5028 return "jsr %a0";
5029 }
5030
5031 /* Likewise sibling calls. */
5032
5033 const char *
5034 output_sibcall (rtx x)
5035 {
5036 if (symbolic_operand (x, VOIDmode))
5037 return m68k_symbolic_jump;
5038 else
5039 return "jmp %a0";
5040 }
5041
5042 static void
5043 m68k_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
5044 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
5045 tree function)
5046 {
5047 rtx this_slot, offset, addr, mem, tmp;
5048 rtx_insn *insn;
5049
5050 /* Avoid clobbering the struct value reg by using the
5051 static chain reg as a temporary. */
5052 tmp = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
5053
5054 /* Pretend to be a post-reload pass while generating rtl. */
5055 reload_completed = 1;
5056
5057 /* The "this" pointer is stored at 4(%sp). */
5058 this_slot = gen_rtx_MEM (Pmode, plus_constant (Pmode,
5059 stack_pointer_rtx, 4));
5060
5061 /* Add DELTA to THIS. */
5062 if (delta != 0)
5063 {
5064 /* Make the offset a legitimate operand for memory addition. */
5065 offset = GEN_INT (delta);
5066 if ((delta < -8 || delta > 8)
5067 && (TARGET_COLDFIRE || USE_MOVQ (delta)))
5068 {
5069 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), offset);
5070 offset = gen_rtx_REG (Pmode, D0_REG);
5071 }
5072 emit_insn (gen_add3_insn (copy_rtx (this_slot),
5073 copy_rtx (this_slot), offset));
5074 }
5075
5076 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
5077 if (vcall_offset != 0)
5078 {
5079 /* Set the static chain register to *THIS. */
5080 emit_move_insn (tmp, this_slot);
5081 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
5082
5083 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
5084 addr = plus_constant (Pmode, tmp, vcall_offset);
5085 if (!m68k_legitimate_address_p (Pmode, addr, true))
5086 {
5087 emit_insn (gen_rtx_SET (tmp, addr));
5088 addr = tmp;
5089 }
5090
5091 /* Load the offset into %d0 and add it to THIS. */
5092 emit_move_insn (gen_rtx_REG (Pmode, D0_REG),
5093 gen_rtx_MEM (Pmode, addr));
5094 emit_insn (gen_add3_insn (copy_rtx (this_slot),
5095 copy_rtx (this_slot),
5096 gen_rtx_REG (Pmode, D0_REG)));
5097 }
5098
5099 /* Jump to the target function. Use a sibcall if direct jumps are
5100 allowed, otherwise load the address into a register first. */
5101 mem = DECL_RTL (function);
5102 if (!sibcall_operand (XEXP (mem, 0), VOIDmode))
5103 {
5104 gcc_assert (flag_pic);
5105
5106 if (!TARGET_SEP_DATA)
5107 {
5108 /* Use the static chain register as a temporary (call-clobbered)
5109 GOT pointer for this function. We can use the static chain
5110 register because it isn't live on entry to the thunk. */
5111 SET_REGNO (pic_offset_table_rtx, STATIC_CHAIN_REGNUM);
5112 emit_insn (gen_load_got (pic_offset_table_rtx));
5113 }
5114 legitimize_pic_address (XEXP (mem, 0), Pmode, tmp);
5115 mem = replace_equiv_address (mem, tmp);
5116 }
5117 insn = emit_call_insn (gen_sibcall (mem, const0_rtx));
5118 SIBLING_CALL_P (insn) = 1;
5119
5120 /* Run just enough of rest_of_compilation. */
5121 insn = get_insns ();
5122 split_all_insns_noflow ();
5123 final_start_function (insn, file, 1);
5124 final (insn, file, 1);
5125 final_end_function ();
5126
5127 /* Clean up the vars set above. */
5128 reload_completed = 0;
5129
5130 /* Restore the original PIC register. */
5131 if (flag_pic)
5132 SET_REGNO (pic_offset_table_rtx, PIC_REG);
5133 }
5134
5135 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
5136
5137 static rtx
5138 m68k_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
5139 int incoming ATTRIBUTE_UNUSED)
5140 {
5141 return gen_rtx_REG (Pmode, M68K_STRUCT_VALUE_REGNUM);
5142 }
5143
5144 /* Return nonzero if register old_reg can be renamed to register new_reg. */
5145 int
5146 m68k_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
5147 unsigned int new_reg)
5148 {
5149
5150 /* Interrupt functions can only use registers that have already been
5151 saved by the prologue, even if they would normally be
5152 call-clobbered. */
5153
5154 if ((m68k_get_function_kind (current_function_decl)
5155 == m68k_fk_interrupt_handler)
5156 && !df_regs_ever_live_p (new_reg))
5157 return 0;
5158
5159 return 1;
5160 }
5161
5162 /* Value is true if hard register REGNO can hold a value of machine-mode
5163 MODE. On the 68000, we let the cpu registers can hold any mode, but
5164 restrict the 68881 registers to floating-point modes. */
5165
5166 bool
5167 m68k_regno_mode_ok (int regno, machine_mode mode)
5168 {
5169 if (DATA_REGNO_P (regno))
5170 {
5171 /* Data Registers, can hold aggregate if fits in. */
5172 if (regno + GET_MODE_SIZE (mode) / 4 <= 8)
5173 return true;
5174 }
5175 else if (ADDRESS_REGNO_P (regno))
5176 {
5177 if (regno + GET_MODE_SIZE (mode) / 4 <= 16)
5178 return true;
5179 }
5180 else if (FP_REGNO_P (regno))
5181 {
5182 /* FPU registers, hold float or complex float of long double or
5183 smaller. */
5184 if ((GET_MODE_CLASS (mode) == MODE_FLOAT
5185 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5186 && GET_MODE_UNIT_SIZE (mode) <= TARGET_FP_REG_SIZE)
5187 return true;
5188 }
5189 return false;
5190 }
5191
5192 /* Implement SECONDARY_RELOAD_CLASS. */
5193
5194 enum reg_class
5195 m68k_secondary_reload_class (enum reg_class rclass,
5196 machine_mode mode, rtx x)
5197 {
5198 int regno;
5199
5200 regno = true_regnum (x);
5201
5202 /* If one operand of a movqi is an address register, the other
5203 operand must be a general register or constant. Other types
5204 of operand must be reloaded through a data register. */
5205 if (GET_MODE_SIZE (mode) == 1
5206 && reg_classes_intersect_p (rclass, ADDR_REGS)
5207 && !(INT_REGNO_P (regno) || CONSTANT_P (x)))
5208 return DATA_REGS;
5209
5210 /* PC-relative addresses must be loaded into an address register first. */
5211 if (TARGET_PCREL
5212 && !reg_class_subset_p (rclass, ADDR_REGS)
5213 && symbolic_operand (x, VOIDmode))
5214 return ADDR_REGS;
5215
5216 return NO_REGS;
5217 }
5218
5219 /* Implement PREFERRED_RELOAD_CLASS. */
5220
5221 enum reg_class
5222 m68k_preferred_reload_class (rtx x, enum reg_class rclass)
5223 {
5224 enum reg_class secondary_class;
5225
5226 /* If RCLASS might need a secondary reload, try restricting it to
5227 a class that doesn't. */
5228 secondary_class = m68k_secondary_reload_class (rclass, GET_MODE (x), x);
5229 if (secondary_class != NO_REGS
5230 && reg_class_subset_p (secondary_class, rclass))
5231 return secondary_class;
5232
5233 /* Prefer to use moveq for in-range constants. */
5234 if (GET_CODE (x) == CONST_INT
5235 && reg_class_subset_p (DATA_REGS, rclass)
5236 && IN_RANGE (INTVAL (x), -0x80, 0x7f))
5237 return DATA_REGS;
5238
5239 /* ??? Do we really need this now? */
5240 if (GET_CODE (x) == CONST_DOUBLE
5241 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
5242 {
5243 if (TARGET_HARD_FLOAT && reg_class_subset_p (FP_REGS, rclass))
5244 return FP_REGS;
5245
5246 return NO_REGS;
5247 }
5248
5249 return rclass;
5250 }
5251
5252 /* Return floating point values in a 68881 register. This makes 68881 code
5253 a little bit faster. It also makes -msoft-float code incompatible with
5254 hard-float code, so people have to be careful not to mix the two.
5255 For ColdFire it was decided the ABI incompatibility is undesirable.
5256 If there is need for a hard-float ABI it is probably worth doing it
5257 properly and also passing function arguments in FP registers. */
5258 rtx
5259 m68k_libcall_value (machine_mode mode)
5260 {
5261 switch (mode) {
5262 case SFmode:
5263 case DFmode:
5264 case XFmode:
5265 if (TARGET_68881)
5266 return gen_rtx_REG (mode, FP0_REG);
5267 break;
5268 default:
5269 break;
5270 }
5271
5272 return gen_rtx_REG (mode, m68k_libcall_value_in_a0_p ? A0_REG : D0_REG);
5273 }
5274
5275 /* Location in which function value is returned.
5276 NOTE: Due to differences in ABIs, don't call this function directly,
5277 use FUNCTION_VALUE instead. */
5278 rtx
5279 m68k_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
5280 {
5281 machine_mode mode;
5282
5283 mode = TYPE_MODE (valtype);
5284 switch (mode) {
5285 case SFmode:
5286 case DFmode:
5287 case XFmode:
5288 if (TARGET_68881)
5289 return gen_rtx_REG (mode, FP0_REG);
5290 break;
5291 default:
5292 break;
5293 }
5294
5295 /* If the function returns a pointer, push that into %a0. */
5296 if (func && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (func))))
5297 /* For compatibility with the large body of existing code which
5298 does not always properly declare external functions returning
5299 pointer types, the m68k/SVR4 convention is to copy the value
5300 returned for pointer functions from a0 to d0 in the function
5301 epilogue, so that callers that have neglected to properly
5302 declare the callee can still find the correct return value in
5303 d0. */
5304 return gen_rtx_PARALLEL
5305 (mode,
5306 gen_rtvec (2,
5307 gen_rtx_EXPR_LIST (VOIDmode,
5308 gen_rtx_REG (mode, A0_REG),
5309 const0_rtx),
5310 gen_rtx_EXPR_LIST (VOIDmode,
5311 gen_rtx_REG (mode, D0_REG),
5312 const0_rtx)));
5313 else if (POINTER_TYPE_P (valtype))
5314 return gen_rtx_REG (mode, A0_REG);
5315 else
5316 return gen_rtx_REG (mode, D0_REG);
5317 }
5318
5319 /* Worker function for TARGET_RETURN_IN_MEMORY. */
5320 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
5321 static bool
5322 m68k_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
5323 {
5324 machine_mode mode = TYPE_MODE (type);
5325
5326 if (mode == BLKmode)
5327 return true;
5328
5329 /* If TYPE's known alignment is less than the alignment of MODE that
5330 would contain the structure, then return in memory. We need to
5331 do so to maintain the compatibility between code compiled with
5332 -mstrict-align and that compiled with -mno-strict-align. */
5333 if (AGGREGATE_TYPE_P (type)
5334 && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (mode))
5335 return true;
5336
5337 return false;
5338 }
5339 #endif
5340
5341 /* CPU to schedule the program for. */
5342 enum attr_cpu m68k_sched_cpu;
5343
5344 /* MAC to schedule the program for. */
5345 enum attr_mac m68k_sched_mac;
5346
5347 /* Operand type. */
5348 enum attr_op_type
5349 {
5350 /* No operand. */
5351 OP_TYPE_NONE,
5352
5353 /* Integer register. */
5354 OP_TYPE_RN,
5355
5356 /* FP register. */
5357 OP_TYPE_FPN,
5358
5359 /* Implicit mem reference (e.g. stack). */
5360 OP_TYPE_MEM1,
5361
5362 /* Memory without offset or indexing. EA modes 2, 3 and 4. */
5363 OP_TYPE_MEM234,
5364
5365 /* Memory with offset but without indexing. EA mode 5. */
5366 OP_TYPE_MEM5,
5367
5368 /* Memory with indexing. EA mode 6. */
5369 OP_TYPE_MEM6,
5370
5371 /* Memory referenced by absolute address. EA mode 7. */
5372 OP_TYPE_MEM7,
5373
5374 /* Immediate operand that doesn't require extension word. */
5375 OP_TYPE_IMM_Q,
5376
5377 /* Immediate 16 bit operand. */
5378 OP_TYPE_IMM_W,
5379
5380 /* Immediate 32 bit operand. */
5381 OP_TYPE_IMM_L
5382 };
5383
5384 /* Return type of memory ADDR_RTX refers to. */
5385 static enum attr_op_type
5386 sched_address_type (machine_mode mode, rtx addr_rtx)
5387 {
5388 struct m68k_address address;
5389
5390 if (symbolic_operand (addr_rtx, VOIDmode))
5391 return OP_TYPE_MEM7;
5392
5393 if (!m68k_decompose_address (mode, addr_rtx,
5394 reload_completed, &address))
5395 {
5396 gcc_assert (!reload_completed);
5397 /* Reload will likely fix the address to be in the register. */
5398 return OP_TYPE_MEM234;
5399 }
5400
5401 if (address.scale != 0)
5402 return OP_TYPE_MEM6;
5403
5404 if (address.base != NULL_RTX)
5405 {
5406 if (address.offset == NULL_RTX)
5407 return OP_TYPE_MEM234;
5408
5409 return OP_TYPE_MEM5;
5410 }
5411
5412 gcc_assert (address.offset != NULL_RTX);
5413
5414 return OP_TYPE_MEM7;
5415 }
5416
5417 /* Return X or Y (depending on OPX_P) operand of INSN. */
5418 static rtx
5419 sched_get_operand (rtx_insn *insn, bool opx_p)
5420 {
5421 int i;
5422
5423 if (recog_memoized (insn) < 0)
5424 gcc_unreachable ();
5425
5426 extract_constrain_insn_cached (insn);
5427
5428 if (opx_p)
5429 i = get_attr_opx (insn);
5430 else
5431 i = get_attr_opy (insn);
5432
5433 if (i >= recog_data.n_operands)
5434 return NULL;
5435
5436 return recog_data.operand[i];
5437 }
5438
5439 /* Return type of INSN's operand X (if OPX_P) or operand Y (if !OPX_P).
5440 If ADDRESS_P is true, return type of memory location operand refers to. */
5441 static enum attr_op_type
5442 sched_attr_op_type (rtx_insn *insn, bool opx_p, bool address_p)
5443 {
5444 rtx op;
5445
5446 op = sched_get_operand (insn, opx_p);
5447
5448 if (op == NULL)
5449 {
5450 gcc_assert (!reload_completed);
5451 return OP_TYPE_RN;
5452 }
5453
5454 if (address_p)
5455 return sched_address_type (QImode, op);
5456
5457 if (memory_operand (op, VOIDmode))
5458 return sched_address_type (GET_MODE (op), XEXP (op, 0));
5459
5460 if (register_operand (op, VOIDmode))
5461 {
5462 if ((!reload_completed && FLOAT_MODE_P (GET_MODE (op)))
5463 || (reload_completed && FP_REG_P (op)))
5464 return OP_TYPE_FPN;
5465
5466 return OP_TYPE_RN;
5467 }
5468
5469 if (GET_CODE (op) == CONST_INT)
5470 {
5471 int ival;
5472
5473 ival = INTVAL (op);
5474
5475 /* Check for quick constants. */
5476 switch (get_attr_type (insn))
5477 {
5478 case TYPE_ALUQ_L:
5479 if (IN_RANGE (ival, 1, 8) || IN_RANGE (ival, -8, -1))
5480 return OP_TYPE_IMM_Q;
5481
5482 gcc_assert (!reload_completed);
5483 break;
5484
5485 case TYPE_MOVEQ_L:
5486 if (USE_MOVQ (ival))
5487 return OP_TYPE_IMM_Q;
5488
5489 gcc_assert (!reload_completed);
5490 break;
5491
5492 case TYPE_MOV3Q_L:
5493 if (valid_mov3q_const (ival))
5494 return OP_TYPE_IMM_Q;
5495
5496 gcc_assert (!reload_completed);
5497 break;
5498
5499 default:
5500 break;
5501 }
5502
5503 if (IN_RANGE (ival, -0x8000, 0x7fff))
5504 return OP_TYPE_IMM_W;
5505
5506 return OP_TYPE_IMM_L;
5507 }
5508
5509 if (GET_CODE (op) == CONST_DOUBLE)
5510 {
5511 switch (GET_MODE (op))
5512 {
5513 case SFmode:
5514 return OP_TYPE_IMM_W;
5515
5516 case VOIDmode:
5517 case DFmode:
5518 return OP_TYPE_IMM_L;
5519
5520 default:
5521 gcc_unreachable ();
5522 }
5523 }
5524
5525 if (GET_CODE (op) == CONST
5526 || symbolic_operand (op, VOIDmode)
5527 || LABEL_P (op))
5528 {
5529 switch (GET_MODE (op))
5530 {
5531 case QImode:
5532 return OP_TYPE_IMM_Q;
5533
5534 case HImode:
5535 return OP_TYPE_IMM_W;
5536
5537 case SImode:
5538 return OP_TYPE_IMM_L;
5539
5540 default:
5541 if (symbolic_operand (m68k_unwrap_symbol (op, false), VOIDmode))
5542 /* Just a guess. */
5543 return OP_TYPE_IMM_W;
5544
5545 return OP_TYPE_IMM_L;
5546 }
5547 }
5548
5549 gcc_assert (!reload_completed);
5550
5551 if (FLOAT_MODE_P (GET_MODE (op)))
5552 return OP_TYPE_FPN;
5553
5554 return OP_TYPE_RN;
5555 }
5556
5557 /* Implement opx_type attribute.
5558 Return type of INSN's operand X.
5559 If ADDRESS_P is true, return type of memory location operand refers to. */
5560 enum attr_opx_type
5561 m68k_sched_attr_opx_type (rtx_insn *insn, int address_p)
5562 {
5563 switch (sched_attr_op_type (insn, true, address_p != 0))
5564 {
5565 case OP_TYPE_RN:
5566 return OPX_TYPE_RN;
5567
5568 case OP_TYPE_FPN:
5569 return OPX_TYPE_FPN;
5570
5571 case OP_TYPE_MEM1:
5572 return OPX_TYPE_MEM1;
5573
5574 case OP_TYPE_MEM234:
5575 return OPX_TYPE_MEM234;
5576
5577 case OP_TYPE_MEM5:
5578 return OPX_TYPE_MEM5;
5579
5580 case OP_TYPE_MEM6:
5581 return OPX_TYPE_MEM6;
5582
5583 case OP_TYPE_MEM7:
5584 return OPX_TYPE_MEM7;
5585
5586 case OP_TYPE_IMM_Q:
5587 return OPX_TYPE_IMM_Q;
5588
5589 case OP_TYPE_IMM_W:
5590 return OPX_TYPE_IMM_W;
5591
5592 case OP_TYPE_IMM_L:
5593 return OPX_TYPE_IMM_L;
5594
5595 default:
5596 gcc_unreachable ();
5597 }
5598 }
5599
5600 /* Implement opy_type attribute.
5601 Return type of INSN's operand Y.
5602 If ADDRESS_P is true, return type of memory location operand refers to. */
5603 enum attr_opy_type
5604 m68k_sched_attr_opy_type (rtx_insn *insn, int address_p)
5605 {
5606 switch (sched_attr_op_type (insn, false, address_p != 0))
5607 {
5608 case OP_TYPE_RN:
5609 return OPY_TYPE_RN;
5610
5611 case OP_TYPE_FPN:
5612 return OPY_TYPE_FPN;
5613
5614 case OP_TYPE_MEM1:
5615 return OPY_TYPE_MEM1;
5616
5617 case OP_TYPE_MEM234:
5618 return OPY_TYPE_MEM234;
5619
5620 case OP_TYPE_MEM5:
5621 return OPY_TYPE_MEM5;
5622
5623 case OP_TYPE_MEM6:
5624 return OPY_TYPE_MEM6;
5625
5626 case OP_TYPE_MEM7:
5627 return OPY_TYPE_MEM7;
5628
5629 case OP_TYPE_IMM_Q:
5630 return OPY_TYPE_IMM_Q;
5631
5632 case OP_TYPE_IMM_W:
5633 return OPY_TYPE_IMM_W;
5634
5635 case OP_TYPE_IMM_L:
5636 return OPY_TYPE_IMM_L;
5637
5638 default:
5639 gcc_unreachable ();
5640 }
5641 }
5642
5643 /* Return size of INSN as int. */
5644 static int
5645 sched_get_attr_size_int (rtx_insn *insn)
5646 {
5647 int size;
5648
5649 switch (get_attr_type (insn))
5650 {
5651 case TYPE_IGNORE:
5652 /* There should be no references to m68k_sched_attr_size for 'ignore'
5653 instructions. */
5654 gcc_unreachable ();
5655 return 0;
5656
5657 case TYPE_MUL_L:
5658 size = 2;
5659 break;
5660
5661 default:
5662 size = 1;
5663 break;
5664 }
5665
5666 switch (get_attr_opx_type (insn))
5667 {
5668 case OPX_TYPE_NONE:
5669 case OPX_TYPE_RN:
5670 case OPX_TYPE_FPN:
5671 case OPX_TYPE_MEM1:
5672 case OPX_TYPE_MEM234:
5673 case OPY_TYPE_IMM_Q:
5674 break;
5675
5676 case OPX_TYPE_MEM5:
5677 case OPX_TYPE_MEM6:
5678 /* Here we assume that most absolute references are short. */
5679 case OPX_TYPE_MEM7:
5680 case OPY_TYPE_IMM_W:
5681 ++size;
5682 break;
5683
5684 case OPY_TYPE_IMM_L:
5685 size += 2;
5686 break;
5687
5688 default:
5689 gcc_unreachable ();
5690 }
5691
5692 switch (get_attr_opy_type (insn))
5693 {
5694 case OPY_TYPE_NONE:
5695 case OPY_TYPE_RN:
5696 case OPY_TYPE_FPN:
5697 case OPY_TYPE_MEM1:
5698 case OPY_TYPE_MEM234:
5699 case OPY_TYPE_IMM_Q:
5700 break;
5701
5702 case OPY_TYPE_MEM5:
5703 case OPY_TYPE_MEM6:
5704 /* Here we assume that most absolute references are short. */
5705 case OPY_TYPE_MEM7:
5706 case OPY_TYPE_IMM_W:
5707 ++size;
5708 break;
5709
5710 case OPY_TYPE_IMM_L:
5711 size += 2;
5712 break;
5713
5714 default:
5715 gcc_unreachable ();
5716 }
5717
5718 if (size > 3)
5719 {
5720 gcc_assert (!reload_completed);
5721
5722 size = 3;
5723 }
5724
5725 return size;
5726 }
5727
5728 /* Return size of INSN as attribute enum value. */
5729 enum attr_size
5730 m68k_sched_attr_size (rtx_insn *insn)
5731 {
5732 switch (sched_get_attr_size_int (insn))
5733 {
5734 case 1:
5735 return SIZE_1;
5736
5737 case 2:
5738 return SIZE_2;
5739
5740 case 3:
5741 return SIZE_3;
5742
5743 default:
5744 gcc_unreachable ();
5745 }
5746 }
5747
5748 /* Return operand X or Y (depending on OPX_P) of INSN,
5749 if it is a MEM, or NULL overwise. */
5750 static enum attr_op_type
5751 sched_get_opxy_mem_type (rtx_insn *insn, bool opx_p)
5752 {
5753 if (opx_p)
5754 {
5755 switch (get_attr_opx_type (insn))
5756 {
5757 case OPX_TYPE_NONE:
5758 case OPX_TYPE_RN:
5759 case OPX_TYPE_FPN:
5760 case OPX_TYPE_IMM_Q:
5761 case OPX_TYPE_IMM_W:
5762 case OPX_TYPE_IMM_L:
5763 return OP_TYPE_RN;
5764
5765 case OPX_TYPE_MEM1:
5766 case OPX_TYPE_MEM234:
5767 case OPX_TYPE_MEM5:
5768 case OPX_TYPE_MEM7:
5769 return OP_TYPE_MEM1;
5770
5771 case OPX_TYPE_MEM6:
5772 return OP_TYPE_MEM6;
5773
5774 default:
5775 gcc_unreachable ();
5776 }
5777 }
5778 else
5779 {
5780 switch (get_attr_opy_type (insn))
5781 {
5782 case OPY_TYPE_NONE:
5783 case OPY_TYPE_RN:
5784 case OPY_TYPE_FPN:
5785 case OPY_TYPE_IMM_Q:
5786 case OPY_TYPE_IMM_W:
5787 case OPY_TYPE_IMM_L:
5788 return OP_TYPE_RN;
5789
5790 case OPY_TYPE_MEM1:
5791 case OPY_TYPE_MEM234:
5792 case OPY_TYPE_MEM5:
5793 case OPY_TYPE_MEM7:
5794 return OP_TYPE_MEM1;
5795
5796 case OPY_TYPE_MEM6:
5797 return OP_TYPE_MEM6;
5798
5799 default:
5800 gcc_unreachable ();
5801 }
5802 }
5803 }
5804
5805 /* Implement op_mem attribute. */
5806 enum attr_op_mem
5807 m68k_sched_attr_op_mem (rtx_insn *insn)
5808 {
5809 enum attr_op_type opx;
5810 enum attr_op_type opy;
5811
5812 opx = sched_get_opxy_mem_type (insn, true);
5813 opy = sched_get_opxy_mem_type (insn, false);
5814
5815 if (opy == OP_TYPE_RN && opx == OP_TYPE_RN)
5816 return OP_MEM_00;
5817
5818 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM1)
5819 {
5820 switch (get_attr_opx_access (insn))
5821 {
5822 case OPX_ACCESS_R:
5823 return OP_MEM_10;
5824
5825 case OPX_ACCESS_W:
5826 return OP_MEM_01;
5827
5828 case OPX_ACCESS_RW:
5829 return OP_MEM_11;
5830
5831 default:
5832 gcc_unreachable ();
5833 }
5834 }
5835
5836 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM6)
5837 {
5838 switch (get_attr_opx_access (insn))
5839 {
5840 case OPX_ACCESS_R:
5841 return OP_MEM_I0;
5842
5843 case OPX_ACCESS_W:
5844 return OP_MEM_0I;
5845
5846 case OPX_ACCESS_RW:
5847 return OP_MEM_I1;
5848
5849 default:
5850 gcc_unreachable ();
5851 }
5852 }
5853
5854 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_RN)
5855 return OP_MEM_10;
5856
5857 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM1)
5858 {
5859 switch (get_attr_opx_access (insn))
5860 {
5861 case OPX_ACCESS_W:
5862 return OP_MEM_11;
5863
5864 default:
5865 gcc_assert (!reload_completed);
5866 return OP_MEM_11;
5867 }
5868 }
5869
5870 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM6)
5871 {
5872 switch (get_attr_opx_access (insn))
5873 {
5874 case OPX_ACCESS_W:
5875 return OP_MEM_1I;
5876
5877 default:
5878 gcc_assert (!reload_completed);
5879 return OP_MEM_1I;
5880 }
5881 }
5882
5883 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_RN)
5884 return OP_MEM_I0;
5885
5886 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM1)
5887 {
5888 switch (get_attr_opx_access (insn))
5889 {
5890 case OPX_ACCESS_W:
5891 return OP_MEM_I1;
5892
5893 default:
5894 gcc_assert (!reload_completed);
5895 return OP_MEM_I1;
5896 }
5897 }
5898
5899 gcc_assert (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM6);
5900 gcc_assert (!reload_completed);
5901 return OP_MEM_I1;
5902 }
5903
5904 /* Data for ColdFire V4 index bypass.
5905 Producer modifies register that is used as index in consumer with
5906 specified scale. */
5907 static struct
5908 {
5909 /* Producer instruction. */
5910 rtx pro;
5911
5912 /* Consumer instruction. */
5913 rtx con;
5914
5915 /* Scale of indexed memory access within consumer.
5916 Or zero if bypass should not be effective at the moment. */
5917 int scale;
5918 } sched_cfv4_bypass_data;
5919
5920 /* An empty state that is used in m68k_sched_adjust_cost. */
5921 static state_t sched_adjust_cost_state;
5922
5923 /* Implement adjust_cost scheduler hook.
5924 Return adjusted COST of dependency LINK between DEF_INSN and INSN. */
5925 static int
5926 m68k_sched_adjust_cost (rtx_insn *insn, rtx link ATTRIBUTE_UNUSED,
5927 rtx_insn *def_insn, int cost)
5928 {
5929 int delay;
5930
5931 if (recog_memoized (def_insn) < 0
5932 || recog_memoized (insn) < 0)
5933 return cost;
5934
5935 if (sched_cfv4_bypass_data.scale == 1)
5936 /* Handle ColdFire V4 bypass for indexed address with 1x scale. */
5937 {
5938 /* haifa-sched.c: insn_cost () calls bypass_p () just before
5939 targetm.sched.adjust_cost (). Hence, we can be relatively sure
5940 that the data in sched_cfv4_bypass_data is up to date. */
5941 gcc_assert (sched_cfv4_bypass_data.pro == def_insn
5942 && sched_cfv4_bypass_data.con == insn);
5943
5944 if (cost < 3)
5945 cost = 3;
5946
5947 sched_cfv4_bypass_data.pro = NULL;
5948 sched_cfv4_bypass_data.con = NULL;
5949 sched_cfv4_bypass_data.scale = 0;
5950 }
5951 else
5952 gcc_assert (sched_cfv4_bypass_data.pro == NULL
5953 && sched_cfv4_bypass_data.con == NULL
5954 && sched_cfv4_bypass_data.scale == 0);
5955
5956 /* Don't try to issue INSN earlier than DFA permits.
5957 This is especially useful for instructions that write to memory,
5958 as their true dependence (default) latency is better to be set to 0
5959 to workaround alias analysis limitations.
5960 This is, in fact, a machine independent tweak, so, probably,
5961 it should be moved to haifa-sched.c: insn_cost (). */
5962 delay = min_insn_conflict_delay (sched_adjust_cost_state, def_insn, insn);
5963 if (delay > cost)
5964 cost = delay;
5965
5966 return cost;
5967 }
5968
5969 /* Return maximal number of insns that can be scheduled on a single cycle. */
5970 static int
5971 m68k_sched_issue_rate (void)
5972 {
5973 switch (m68k_sched_cpu)
5974 {
5975 case CPU_CFV1:
5976 case CPU_CFV2:
5977 case CPU_CFV3:
5978 return 1;
5979
5980 case CPU_CFV4:
5981 return 2;
5982
5983 default:
5984 gcc_unreachable ();
5985 return 0;
5986 }
5987 }
5988
5989 /* Maximal length of instruction for current CPU.
5990 E.g. it is 3 for any ColdFire core. */
5991 static int max_insn_size;
5992
5993 /* Data to model instruction buffer of CPU. */
5994 struct _sched_ib
5995 {
5996 /* True if instruction buffer model is modeled for current CPU. */
5997 bool enabled_p;
5998
5999 /* Size of the instruction buffer in words. */
6000 int size;
6001
6002 /* Number of filled words in the instruction buffer. */
6003 int filled;
6004
6005 /* Additional information about instruction buffer for CPUs that have
6006 a buffer of instruction records, rather then a plain buffer
6007 of instruction words. */
6008 struct _sched_ib_records
6009 {
6010 /* Size of buffer in records. */
6011 int n_insns;
6012
6013 /* Array to hold data on adjustements made to the size of the buffer. */
6014 int *adjust;
6015
6016 /* Index of the above array. */
6017 int adjust_index;
6018 } records;
6019
6020 /* An insn that reserves (marks empty) one word in the instruction buffer. */
6021 rtx insn;
6022 };
6023
6024 static struct _sched_ib sched_ib;
6025
6026 /* ID of memory unit. */
6027 static int sched_mem_unit_code;
6028
6029 /* Implementation of the targetm.sched.variable_issue () hook.
6030 It is called after INSN was issued. It returns the number of insns
6031 that can possibly get scheduled on the current cycle.
6032 It is used here to determine the effect of INSN on the instruction
6033 buffer. */
6034 static int
6035 m68k_sched_variable_issue (FILE *sched_dump ATTRIBUTE_UNUSED,
6036 int sched_verbose ATTRIBUTE_UNUSED,
6037 rtx_insn *insn, int can_issue_more)
6038 {
6039 int insn_size;
6040
6041 if (recog_memoized (insn) >= 0 && get_attr_type (insn) != TYPE_IGNORE)
6042 {
6043 switch (m68k_sched_cpu)
6044 {
6045 case CPU_CFV1:
6046 case CPU_CFV2:
6047 insn_size = sched_get_attr_size_int (insn);
6048 break;
6049
6050 case CPU_CFV3:
6051 insn_size = sched_get_attr_size_int (insn);
6052
6053 /* ColdFire V3 and V4 cores have instruction buffers that can
6054 accumulate up to 8 instructions regardless of instructions'
6055 sizes. So we should take care not to "prefetch" 24 one-word
6056 or 12 two-words instructions.
6057 To model this behavior we temporarily decrease size of the
6058 buffer by (max_insn_size - insn_size) for next 7 instructions. */
6059 {
6060 int adjust;
6061
6062 adjust = max_insn_size - insn_size;
6063 sched_ib.size -= adjust;
6064
6065 if (sched_ib.filled > sched_ib.size)
6066 sched_ib.filled = sched_ib.size;
6067
6068 sched_ib.records.adjust[sched_ib.records.adjust_index] = adjust;
6069 }
6070
6071 ++sched_ib.records.adjust_index;
6072 if (sched_ib.records.adjust_index == sched_ib.records.n_insns)
6073 sched_ib.records.adjust_index = 0;
6074
6075 /* Undo adjustement we did 7 instructions ago. */
6076 sched_ib.size
6077 += sched_ib.records.adjust[sched_ib.records.adjust_index];
6078
6079 break;
6080
6081 case CPU_CFV4:
6082 gcc_assert (!sched_ib.enabled_p);
6083 insn_size = 0;
6084 break;
6085
6086 default:
6087 gcc_unreachable ();
6088 }
6089
6090 if (insn_size > sched_ib.filled)
6091 /* Scheduling for register pressure does not always take DFA into
6092 account. Workaround instruction buffer not being filled enough. */
6093 {
6094 gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED);
6095 insn_size = sched_ib.filled;
6096 }
6097
6098 --can_issue_more;
6099 }
6100 else if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6101 || asm_noperands (PATTERN (insn)) >= 0)
6102 insn_size = sched_ib.filled;
6103 else
6104 insn_size = 0;
6105
6106 sched_ib.filled -= insn_size;
6107
6108 return can_issue_more;
6109 }
6110
6111 /* Return how many instructions should scheduler lookahead to choose the
6112 best one. */
6113 static int
6114 m68k_sched_first_cycle_multipass_dfa_lookahead (void)
6115 {
6116 return m68k_sched_issue_rate () - 1;
6117 }
6118
6119 /* Implementation of targetm.sched.init_global () hook.
6120 It is invoked once per scheduling pass and is used here
6121 to initialize scheduler constants. */
6122 static void
6123 m68k_sched_md_init_global (FILE *sched_dump ATTRIBUTE_UNUSED,
6124 int sched_verbose ATTRIBUTE_UNUSED,
6125 int n_insns ATTRIBUTE_UNUSED)
6126 {
6127 #ifdef ENABLE_CHECKING
6128 /* Check that all instructions have DFA reservations and
6129 that all instructions can be issued from a clean state. */
6130 {
6131 rtx_insn *insn;
6132 state_t state;
6133
6134 state = alloca (state_size ());
6135
6136 for (insn = get_insns (); insn != NULL; insn = NEXT_INSN (insn))
6137 {
6138 if (INSN_P (insn) && recog_memoized (insn) >= 0)
6139 {
6140 gcc_assert (insn_has_dfa_reservation_p (insn));
6141
6142 state_reset (state);
6143 if (state_transition (state, insn) >= 0)
6144 gcc_unreachable ();
6145 }
6146 }
6147 }
6148 #endif
6149
6150 /* Setup target cpu. */
6151
6152 /* ColdFire V4 has a set of features to keep its instruction buffer full
6153 (e.g., a separate memory bus for instructions) and, hence, we do not model
6154 buffer for this CPU. */
6155 sched_ib.enabled_p = (m68k_sched_cpu != CPU_CFV4);
6156
6157 switch (m68k_sched_cpu)
6158 {
6159 case CPU_CFV4:
6160 sched_ib.filled = 0;
6161
6162 /* FALLTHRU */
6163
6164 case CPU_CFV1:
6165 case CPU_CFV2:
6166 max_insn_size = 3;
6167 sched_ib.records.n_insns = 0;
6168 sched_ib.records.adjust = NULL;
6169 break;
6170
6171 case CPU_CFV3:
6172 max_insn_size = 3;
6173 sched_ib.records.n_insns = 8;
6174 sched_ib.records.adjust = XNEWVEC (int, sched_ib.records.n_insns);
6175 break;
6176
6177 default:
6178 gcc_unreachable ();
6179 }
6180
6181 sched_mem_unit_code = get_cpu_unit_code ("cf_mem1");
6182
6183 sched_adjust_cost_state = xmalloc (state_size ());
6184 state_reset (sched_adjust_cost_state);
6185
6186 start_sequence ();
6187 emit_insn (gen_ib ());
6188 sched_ib.insn = get_insns ();
6189 end_sequence ();
6190 }
6191
6192 /* Scheduling pass is now finished. Free/reset static variables. */
6193 static void
6194 m68k_sched_md_finish_global (FILE *dump ATTRIBUTE_UNUSED,
6195 int verbose ATTRIBUTE_UNUSED)
6196 {
6197 sched_ib.insn = NULL;
6198
6199 free (sched_adjust_cost_state);
6200 sched_adjust_cost_state = NULL;
6201
6202 sched_mem_unit_code = 0;
6203
6204 free (sched_ib.records.adjust);
6205 sched_ib.records.adjust = NULL;
6206 sched_ib.records.n_insns = 0;
6207 max_insn_size = 0;
6208 }
6209
6210 /* Implementation of targetm.sched.init () hook.
6211 It is invoked each time scheduler starts on the new block (basic block or
6212 extended basic block). */
6213 static void
6214 m68k_sched_md_init (FILE *sched_dump ATTRIBUTE_UNUSED,
6215 int sched_verbose ATTRIBUTE_UNUSED,
6216 int n_insns ATTRIBUTE_UNUSED)
6217 {
6218 switch (m68k_sched_cpu)
6219 {
6220 case CPU_CFV1:
6221 case CPU_CFV2:
6222 sched_ib.size = 6;
6223 break;
6224
6225 case CPU_CFV3:
6226 sched_ib.size = sched_ib.records.n_insns * max_insn_size;
6227
6228 memset (sched_ib.records.adjust, 0,
6229 sched_ib.records.n_insns * sizeof (*sched_ib.records.adjust));
6230 sched_ib.records.adjust_index = 0;
6231 break;
6232
6233 case CPU_CFV4:
6234 gcc_assert (!sched_ib.enabled_p);
6235 sched_ib.size = 0;
6236 break;
6237
6238 default:
6239 gcc_unreachable ();
6240 }
6241
6242 if (sched_ib.enabled_p)
6243 /* haifa-sched.c: schedule_block () calls advance_cycle () just before
6244 the first cycle. Workaround that. */
6245 sched_ib.filled = -2;
6246 }
6247
6248 /* Implementation of targetm.sched.dfa_pre_advance_cycle () hook.
6249 It is invoked just before current cycle finishes and is used here
6250 to track if instruction buffer got its two words this cycle. */
6251 static void
6252 m68k_sched_dfa_pre_advance_cycle (void)
6253 {
6254 if (!sched_ib.enabled_p)
6255 return;
6256
6257 if (!cpu_unit_reservation_p (curr_state, sched_mem_unit_code))
6258 {
6259 sched_ib.filled += 2;
6260
6261 if (sched_ib.filled > sched_ib.size)
6262 sched_ib.filled = sched_ib.size;
6263 }
6264 }
6265
6266 /* Implementation of targetm.sched.dfa_post_advance_cycle () hook.
6267 It is invoked just after new cycle begins and is used here
6268 to setup number of filled words in the instruction buffer so that
6269 instructions which won't have all their words prefetched would be
6270 stalled for a cycle. */
6271 static void
6272 m68k_sched_dfa_post_advance_cycle (void)
6273 {
6274 int i;
6275
6276 if (!sched_ib.enabled_p)
6277 return;
6278
6279 /* Setup number of prefetched instruction words in the instruction
6280 buffer. */
6281 i = max_insn_size - sched_ib.filled;
6282
6283 while (--i >= 0)
6284 {
6285 if (state_transition (curr_state, sched_ib.insn) >= 0)
6286 /* Pick up scheduler state. */
6287 ++sched_ib.filled;
6288 }
6289 }
6290
6291 /* Return X or Y (depending on OPX_P) operand of INSN,
6292 if it is an integer register, or NULL overwise. */
6293 static rtx
6294 sched_get_reg_operand (rtx_insn *insn, bool opx_p)
6295 {
6296 rtx op = NULL;
6297
6298 if (opx_p)
6299 {
6300 if (get_attr_opx_type (insn) == OPX_TYPE_RN)
6301 {
6302 op = sched_get_operand (insn, true);
6303 gcc_assert (op != NULL);
6304
6305 if (!reload_completed && !REG_P (op))
6306 return NULL;
6307 }
6308 }
6309 else
6310 {
6311 if (get_attr_opy_type (insn) == OPY_TYPE_RN)
6312 {
6313 op = sched_get_operand (insn, false);
6314 gcc_assert (op != NULL);
6315
6316 if (!reload_completed && !REG_P (op))
6317 return NULL;
6318 }
6319 }
6320
6321 return op;
6322 }
6323
6324 /* Return true, if X or Y (depending on OPX_P) operand of INSN
6325 is a MEM. */
6326 static bool
6327 sched_mem_operand_p (rtx_insn *insn, bool opx_p)
6328 {
6329 switch (sched_get_opxy_mem_type (insn, opx_p))
6330 {
6331 case OP_TYPE_MEM1:
6332 case OP_TYPE_MEM6:
6333 return true;
6334
6335 default:
6336 return false;
6337 }
6338 }
6339
6340 /* Return X or Y (depending on OPX_P) operand of INSN,
6341 if it is a MEM, or NULL overwise. */
6342 static rtx
6343 sched_get_mem_operand (rtx_insn *insn, bool must_read_p, bool must_write_p)
6344 {
6345 bool opx_p;
6346 bool opy_p;
6347
6348 opx_p = false;
6349 opy_p = false;
6350
6351 if (must_read_p)
6352 {
6353 opx_p = true;
6354 opy_p = true;
6355 }
6356
6357 if (must_write_p)
6358 {
6359 opx_p = true;
6360 opy_p = false;
6361 }
6362
6363 if (opy_p && sched_mem_operand_p (insn, false))
6364 return sched_get_operand (insn, false);
6365
6366 if (opx_p && sched_mem_operand_p (insn, true))
6367 return sched_get_operand (insn, true);
6368
6369 gcc_unreachable ();
6370 return NULL;
6371 }
6372
6373 /* Return non-zero if PRO modifies register used as part of
6374 address in CON. */
6375 int
6376 m68k_sched_address_bypass_p (rtx_insn *pro, rtx_insn *con)
6377 {
6378 rtx pro_x;
6379 rtx con_mem_read;
6380
6381 pro_x = sched_get_reg_operand (pro, true);
6382 if (pro_x == NULL)
6383 return 0;
6384
6385 con_mem_read = sched_get_mem_operand (con, true, false);
6386 gcc_assert (con_mem_read != NULL);
6387
6388 if (reg_mentioned_p (pro_x, con_mem_read))
6389 return 1;
6390
6391 return 0;
6392 }
6393
6394 /* Helper function for m68k_sched_indexed_address_bypass_p.
6395 if PRO modifies register used as index in CON,
6396 return scale of indexed memory access in CON. Return zero overwise. */
6397 static int
6398 sched_get_indexed_address_scale (rtx_insn *pro, rtx_insn *con)
6399 {
6400 rtx reg;
6401 rtx mem;
6402 struct m68k_address address;
6403
6404 reg = sched_get_reg_operand (pro, true);
6405 if (reg == NULL)
6406 return 0;
6407
6408 mem = sched_get_mem_operand (con, true, false);
6409 gcc_assert (mem != NULL && MEM_P (mem));
6410
6411 if (!m68k_decompose_address (GET_MODE (mem), XEXP (mem, 0), reload_completed,
6412 &address))
6413 gcc_unreachable ();
6414
6415 if (REGNO (reg) == REGNO (address.index))
6416 {
6417 gcc_assert (address.scale != 0);
6418 return address.scale;
6419 }
6420
6421 return 0;
6422 }
6423
6424 /* Return non-zero if PRO modifies register used
6425 as index with scale 2 or 4 in CON. */
6426 int
6427 m68k_sched_indexed_address_bypass_p (rtx_insn *pro, rtx_insn *con)
6428 {
6429 gcc_assert (sched_cfv4_bypass_data.pro == NULL
6430 && sched_cfv4_bypass_data.con == NULL
6431 && sched_cfv4_bypass_data.scale == 0);
6432
6433 switch (sched_get_indexed_address_scale (pro, con))
6434 {
6435 case 1:
6436 /* We can't have a variable latency bypass, so
6437 remember to adjust the insn cost in adjust_cost hook. */
6438 sched_cfv4_bypass_data.pro = pro;
6439 sched_cfv4_bypass_data.con = con;
6440 sched_cfv4_bypass_data.scale = 1;
6441 return 0;
6442
6443 case 2:
6444 case 4:
6445 return 1;
6446
6447 default:
6448 return 0;
6449 }
6450 }
6451
6452 /* We generate a two-instructions program at M_TRAMP :
6453 movea.l &CHAIN_VALUE,%a0
6454 jmp FNADDR
6455 where %a0 can be modified by changing STATIC_CHAIN_REGNUM. */
6456
6457 static void
6458 m68k_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
6459 {
6460 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
6461 rtx mem;
6462
6463 gcc_assert (ADDRESS_REGNO_P (STATIC_CHAIN_REGNUM));
6464
6465 mem = adjust_address (m_tramp, HImode, 0);
6466 emit_move_insn (mem, GEN_INT(0x207C + ((STATIC_CHAIN_REGNUM-8) << 9)));
6467 mem = adjust_address (m_tramp, SImode, 2);
6468 emit_move_insn (mem, chain_value);
6469
6470 mem = adjust_address (m_tramp, HImode, 6);
6471 emit_move_insn (mem, GEN_INT(0x4EF9));
6472 mem = adjust_address (m_tramp, SImode, 8);
6473 emit_move_insn (mem, fnaddr);
6474
6475 FINALIZE_TRAMPOLINE (XEXP (m_tramp, 0));
6476 }
6477
6478 /* On the 68000, the RTS insn cannot pop anything.
6479 On the 68010, the RTD insn may be used to pop them if the number
6480 of args is fixed, but if the number is variable then the caller
6481 must pop them all. RTD can't be used for library calls now
6482 because the library is compiled with the Unix compiler.
6483 Use of RTD is a selectable option, since it is incompatible with
6484 standard Unix calling sequences. If the option is not selected,
6485 the caller must always pop the args. */
6486
6487 static int
6488 m68k_return_pops_args (tree fundecl, tree funtype, int size)
6489 {
6490 return ((TARGET_RTD
6491 && (!fundecl
6492 || TREE_CODE (fundecl) != IDENTIFIER_NODE)
6493 && (!stdarg_p (funtype)))
6494 ? size : 0);
6495 }
6496
6497 /* Make sure everything's fine if we *don't* have a given processor.
6498 This assumes that putting a register in fixed_regs will keep the
6499 compiler's mitts completely off it. We don't bother to zero it out
6500 of register classes. */
6501
6502 static void
6503 m68k_conditional_register_usage (void)
6504 {
6505 int i;
6506 HARD_REG_SET x;
6507 if (!TARGET_HARD_FLOAT)
6508 {
6509 COPY_HARD_REG_SET (x, reg_class_contents[(int)FP_REGS]);
6510 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
6511 if (TEST_HARD_REG_BIT (x, i))
6512 fixed_regs[i] = call_used_regs[i] = 1;
6513 }
6514 if (flag_pic)
6515 fixed_regs[PIC_REG] = call_used_regs[PIC_REG] = 1;
6516 }
6517
6518 static void
6519 m68k_init_sync_libfuncs (void)
6520 {
6521 init_sync_libfuncs (UNITS_PER_WORD);
6522 }
6523
6524 /* Implements EPILOGUE_USES. All registers are live on exit from an
6525 interrupt routine. */
6526 bool
6527 m68k_epilogue_uses (int regno ATTRIBUTE_UNUSED)
6528 {
6529 return (reload_completed
6530 && (m68k_get_function_kind (current_function_decl)
6531 == m68k_fk_interrupt_handler));
6532 }
6533
6534 #include "gt-m68k.h"