]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/m68k/m68k.c
config/m68k: Use rtx_insn
[thirdparty/gcc.git] / gcc / config / m68k / m68k.c
1 /* Subroutines for insn-output.c for Motorola 68000 family.
2 Copyright (C) 1987-2014 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "tree.h"
25 #include "calls.h"
26 #include "stor-layout.h"
27 #include "varasm.h"
28 #include "rtl.h"
29 #include "function.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-attr.h"
36 #include "recog.h"
37 #include "diagnostic-core.h"
38 #include "expr.h"
39 #include "reload.h"
40 #include "tm_p.h"
41 #include "target.h"
42 #include "target-def.h"
43 #include "debug.h"
44 #include "flags.h"
45 #include "df.h"
46 /* ??? Need to add a dependency between m68k.o and sched-int.h. */
47 #include "sched-int.h"
48 #include "insn-codes.h"
49 #include "ggc.h"
50 #include "opts.h"
51 #include "optabs.h"
52 #include "builtins.h"
53
54 enum reg_class regno_reg_class[] =
55 {
56 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
57 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
58 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
59 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
60 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
61 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
62 ADDR_REGS
63 };
64
65
66 /* The minimum number of integer registers that we want to save with the
67 movem instruction. Using two movel instructions instead of a single
68 moveml is about 15% faster for the 68020 and 68030 at no expense in
69 code size. */
70 #define MIN_MOVEM_REGS 3
71
72 /* The minimum number of floating point registers that we want to save
73 with the fmovem instruction. */
74 #define MIN_FMOVEM_REGS 1
75
76 /* Structure describing stack frame layout. */
77 struct m68k_frame
78 {
79 /* Stack pointer to frame pointer offset. */
80 HOST_WIDE_INT offset;
81
82 /* Offset of FPU registers. */
83 HOST_WIDE_INT foffset;
84
85 /* Frame size in bytes (rounded up). */
86 HOST_WIDE_INT size;
87
88 /* Data and address register. */
89 int reg_no;
90 unsigned int reg_mask;
91
92 /* FPU registers. */
93 int fpu_no;
94 unsigned int fpu_mask;
95
96 /* Offsets relative to ARG_POINTER. */
97 HOST_WIDE_INT frame_pointer_offset;
98 HOST_WIDE_INT stack_pointer_offset;
99
100 /* Function which the above information refers to. */
101 int funcdef_no;
102 };
103
104 /* Current frame information calculated by m68k_compute_frame_layout(). */
105 static struct m68k_frame current_frame;
106
107 /* Structure describing an m68k address.
108
109 If CODE is UNKNOWN, the address is BASE + INDEX * SCALE + OFFSET,
110 with null fields evaluating to 0. Here:
111
112 - BASE satisfies m68k_legitimate_base_reg_p
113 - INDEX satisfies m68k_legitimate_index_reg_p
114 - OFFSET satisfies m68k_legitimate_constant_address_p
115
116 INDEX is either HImode or SImode. The other fields are SImode.
117
118 If CODE is PRE_DEC, the address is -(BASE). If CODE is POST_INC,
119 the address is (BASE)+. */
120 struct m68k_address {
121 enum rtx_code code;
122 rtx base;
123 rtx index;
124 rtx offset;
125 int scale;
126 };
127
128 static int m68k_sched_adjust_cost (rtx, rtx, rtx, int);
129 static int m68k_sched_issue_rate (void);
130 static int m68k_sched_variable_issue (FILE *, int, rtx, int);
131 static void m68k_sched_md_init_global (FILE *, int, int);
132 static void m68k_sched_md_finish_global (FILE *, int);
133 static void m68k_sched_md_init (FILE *, int, int);
134 static void m68k_sched_dfa_pre_advance_cycle (void);
135 static void m68k_sched_dfa_post_advance_cycle (void);
136 static int m68k_sched_first_cycle_multipass_dfa_lookahead (void);
137
138 static bool m68k_can_eliminate (const int, const int);
139 static void m68k_conditional_register_usage (void);
140 static bool m68k_legitimate_address_p (enum machine_mode, rtx, bool);
141 static void m68k_option_override (void);
142 static void m68k_override_options_after_change (void);
143 static rtx find_addr_reg (rtx);
144 static const char *singlemove_string (rtx *);
145 static void m68k_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
146 HOST_WIDE_INT, tree);
147 static rtx m68k_struct_value_rtx (tree, int);
148 static tree m68k_handle_fndecl_attribute (tree *node, tree name,
149 tree args, int flags,
150 bool *no_add_attrs);
151 static void m68k_compute_frame_layout (void);
152 static bool m68k_save_reg (unsigned int regno, bool interrupt_handler);
153 static bool m68k_ok_for_sibcall_p (tree, tree);
154 static bool m68k_tls_symbol_p (rtx);
155 static rtx m68k_legitimize_address (rtx, rtx, enum machine_mode);
156 static bool m68k_rtx_costs (rtx, int, int, int, int *, bool);
157 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
158 static bool m68k_return_in_memory (const_tree, const_tree);
159 #endif
160 static void m68k_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
161 static void m68k_trampoline_init (rtx, tree, rtx);
162 static int m68k_return_pops_args (tree, tree, int);
163 static rtx m68k_delegitimize_address (rtx);
164 static void m68k_function_arg_advance (cumulative_args_t, enum machine_mode,
165 const_tree, bool);
166 static rtx m68k_function_arg (cumulative_args_t, enum machine_mode,
167 const_tree, bool);
168 static bool m68k_cannot_force_const_mem (enum machine_mode mode, rtx x);
169 static bool m68k_output_addr_const_extra (FILE *, rtx);
170 static void m68k_init_sync_libfuncs (void) ATTRIBUTE_UNUSED;
171 \f
172 /* Initialize the GCC target structure. */
173
174 #if INT_OP_GROUP == INT_OP_DOT_WORD
175 #undef TARGET_ASM_ALIGNED_HI_OP
176 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
177 #endif
178
179 #if INT_OP_GROUP == INT_OP_NO_DOT
180 #undef TARGET_ASM_BYTE_OP
181 #define TARGET_ASM_BYTE_OP "\tbyte\t"
182 #undef TARGET_ASM_ALIGNED_HI_OP
183 #define TARGET_ASM_ALIGNED_HI_OP "\tshort\t"
184 #undef TARGET_ASM_ALIGNED_SI_OP
185 #define TARGET_ASM_ALIGNED_SI_OP "\tlong\t"
186 #endif
187
188 #if INT_OP_GROUP == INT_OP_DC
189 #undef TARGET_ASM_BYTE_OP
190 #define TARGET_ASM_BYTE_OP "\tdc.b\t"
191 #undef TARGET_ASM_ALIGNED_HI_OP
192 #define TARGET_ASM_ALIGNED_HI_OP "\tdc.w\t"
193 #undef TARGET_ASM_ALIGNED_SI_OP
194 #define TARGET_ASM_ALIGNED_SI_OP "\tdc.l\t"
195 #endif
196
197 #undef TARGET_ASM_UNALIGNED_HI_OP
198 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
199 #undef TARGET_ASM_UNALIGNED_SI_OP
200 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
201
202 #undef TARGET_ASM_OUTPUT_MI_THUNK
203 #define TARGET_ASM_OUTPUT_MI_THUNK m68k_output_mi_thunk
204 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
205 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
206
207 #undef TARGET_ASM_FILE_START_APP_OFF
208 #define TARGET_ASM_FILE_START_APP_OFF true
209
210 #undef TARGET_LEGITIMIZE_ADDRESS
211 #define TARGET_LEGITIMIZE_ADDRESS m68k_legitimize_address
212
213 #undef TARGET_SCHED_ADJUST_COST
214 #define TARGET_SCHED_ADJUST_COST m68k_sched_adjust_cost
215
216 #undef TARGET_SCHED_ISSUE_RATE
217 #define TARGET_SCHED_ISSUE_RATE m68k_sched_issue_rate
218
219 #undef TARGET_SCHED_VARIABLE_ISSUE
220 #define TARGET_SCHED_VARIABLE_ISSUE m68k_sched_variable_issue
221
222 #undef TARGET_SCHED_INIT_GLOBAL
223 #define TARGET_SCHED_INIT_GLOBAL m68k_sched_md_init_global
224
225 #undef TARGET_SCHED_FINISH_GLOBAL
226 #define TARGET_SCHED_FINISH_GLOBAL m68k_sched_md_finish_global
227
228 #undef TARGET_SCHED_INIT
229 #define TARGET_SCHED_INIT m68k_sched_md_init
230
231 #undef TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE
232 #define TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE m68k_sched_dfa_pre_advance_cycle
233
234 #undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
235 #define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE m68k_sched_dfa_post_advance_cycle
236
237 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
238 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
239 m68k_sched_first_cycle_multipass_dfa_lookahead
240
241 #undef TARGET_OPTION_OVERRIDE
242 #define TARGET_OPTION_OVERRIDE m68k_option_override
243
244 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
245 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m68k_override_options_after_change
246
247 #undef TARGET_RTX_COSTS
248 #define TARGET_RTX_COSTS m68k_rtx_costs
249
250 #undef TARGET_ATTRIBUTE_TABLE
251 #define TARGET_ATTRIBUTE_TABLE m68k_attribute_table
252
253 #undef TARGET_PROMOTE_PROTOTYPES
254 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
255
256 #undef TARGET_STRUCT_VALUE_RTX
257 #define TARGET_STRUCT_VALUE_RTX m68k_struct_value_rtx
258
259 #undef TARGET_CANNOT_FORCE_CONST_MEM
260 #define TARGET_CANNOT_FORCE_CONST_MEM m68k_cannot_force_const_mem
261
262 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
263 #define TARGET_FUNCTION_OK_FOR_SIBCALL m68k_ok_for_sibcall_p
264
265 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
266 #undef TARGET_RETURN_IN_MEMORY
267 #define TARGET_RETURN_IN_MEMORY m68k_return_in_memory
268 #endif
269
270 #ifdef HAVE_AS_TLS
271 #undef TARGET_HAVE_TLS
272 #define TARGET_HAVE_TLS (true)
273
274 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
275 #define TARGET_ASM_OUTPUT_DWARF_DTPREL m68k_output_dwarf_dtprel
276 #endif
277
278 #undef TARGET_LEGITIMATE_ADDRESS_P
279 #define TARGET_LEGITIMATE_ADDRESS_P m68k_legitimate_address_p
280
281 #undef TARGET_CAN_ELIMINATE
282 #define TARGET_CAN_ELIMINATE m68k_can_eliminate
283
284 #undef TARGET_CONDITIONAL_REGISTER_USAGE
285 #define TARGET_CONDITIONAL_REGISTER_USAGE m68k_conditional_register_usage
286
287 #undef TARGET_TRAMPOLINE_INIT
288 #define TARGET_TRAMPOLINE_INIT m68k_trampoline_init
289
290 #undef TARGET_RETURN_POPS_ARGS
291 #define TARGET_RETURN_POPS_ARGS m68k_return_pops_args
292
293 #undef TARGET_DELEGITIMIZE_ADDRESS
294 #define TARGET_DELEGITIMIZE_ADDRESS m68k_delegitimize_address
295
296 #undef TARGET_FUNCTION_ARG
297 #define TARGET_FUNCTION_ARG m68k_function_arg
298
299 #undef TARGET_FUNCTION_ARG_ADVANCE
300 #define TARGET_FUNCTION_ARG_ADVANCE m68k_function_arg_advance
301
302 #undef TARGET_LEGITIMATE_CONSTANT_P
303 #define TARGET_LEGITIMATE_CONSTANT_P m68k_legitimate_constant_p
304
305 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
306 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA m68k_output_addr_const_extra
307
308 /* The value stored by TAS. */
309 #undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
310 #define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 128
311
312 static const struct attribute_spec m68k_attribute_table[] =
313 {
314 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
315 affects_type_identity } */
316 { "interrupt", 0, 0, true, false, false, m68k_handle_fndecl_attribute,
317 false },
318 { "interrupt_handler", 0, 0, true, false, false,
319 m68k_handle_fndecl_attribute, false },
320 { "interrupt_thread", 0, 0, true, false, false,
321 m68k_handle_fndecl_attribute, false },
322 { NULL, 0, 0, false, false, false, NULL, false }
323 };
324
325 struct gcc_target targetm = TARGET_INITIALIZER;
326 \f
327 /* Base flags for 68k ISAs. */
328 #define FL_FOR_isa_00 FL_ISA_68000
329 #define FL_FOR_isa_10 (FL_FOR_isa_00 | FL_ISA_68010)
330 /* FL_68881 controls the default setting of -m68881. gcc has traditionally
331 generated 68881 code for 68020 and 68030 targets unless explicitly told
332 not to. */
333 #define FL_FOR_isa_20 (FL_FOR_isa_10 | FL_ISA_68020 \
334 | FL_BITFIELD | FL_68881 | FL_CAS)
335 #define FL_FOR_isa_40 (FL_FOR_isa_20 | FL_ISA_68040)
336 #define FL_FOR_isa_cpu32 (FL_FOR_isa_10 | FL_ISA_68020)
337
338 /* Base flags for ColdFire ISAs. */
339 #define FL_FOR_isa_a (FL_COLDFIRE | FL_ISA_A)
340 #define FL_FOR_isa_aplus (FL_FOR_isa_a | FL_ISA_APLUS | FL_CF_USP)
341 /* Note ISA_B doesn't necessarily include USP (user stack pointer) support. */
342 #define FL_FOR_isa_b (FL_FOR_isa_a | FL_ISA_B | FL_CF_HWDIV)
343 /* ISA_C is not upwardly compatible with ISA_B. */
344 #define FL_FOR_isa_c (FL_FOR_isa_a | FL_ISA_C | FL_CF_USP)
345
346 enum m68k_isa
347 {
348 /* Traditional 68000 instruction sets. */
349 isa_00,
350 isa_10,
351 isa_20,
352 isa_40,
353 isa_cpu32,
354 /* ColdFire instruction set variants. */
355 isa_a,
356 isa_aplus,
357 isa_b,
358 isa_c,
359 isa_max
360 };
361
362 /* Information about one of the -march, -mcpu or -mtune arguments. */
363 struct m68k_target_selection
364 {
365 /* The argument being described. */
366 const char *name;
367
368 /* For -mcpu, this is the device selected by the option.
369 For -mtune and -march, it is a representative device
370 for the microarchitecture or ISA respectively. */
371 enum target_device device;
372
373 /* The M68K_DEVICE fields associated with DEVICE. See the comment
374 in m68k-devices.def for details. FAMILY is only valid for -mcpu. */
375 const char *family;
376 enum uarch_type microarch;
377 enum m68k_isa isa;
378 unsigned long flags;
379 };
380
381 /* A list of all devices in m68k-devices.def. Used for -mcpu selection. */
382 static const struct m68k_target_selection all_devices[] =
383 {
384 #define M68K_DEVICE(NAME,ENUM_VALUE,FAMILY,MULTILIB,MICROARCH,ISA,FLAGS) \
385 { NAME, ENUM_VALUE, FAMILY, u##MICROARCH, ISA, FLAGS | FL_FOR_##ISA },
386 #include "m68k-devices.def"
387 #undef M68K_DEVICE
388 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
389 };
390
391 /* A list of all ISAs, mapping each one to a representative device.
392 Used for -march selection. */
393 static const struct m68k_target_selection all_isas[] =
394 {
395 #define M68K_ISA(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
396 { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
397 #include "m68k-isas.def"
398 #undef M68K_ISA
399 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
400 };
401
402 /* A list of all microarchitectures, mapping each one to a representative
403 device. Used for -mtune selection. */
404 static const struct m68k_target_selection all_microarchs[] =
405 {
406 #define M68K_MICROARCH(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
407 { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
408 #include "m68k-microarchs.def"
409 #undef M68K_MICROARCH
410 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
411 };
412 \f
413 /* The entries associated with the -mcpu, -march and -mtune settings,
414 or null for options that have not been used. */
415 const struct m68k_target_selection *m68k_cpu_entry;
416 const struct m68k_target_selection *m68k_arch_entry;
417 const struct m68k_target_selection *m68k_tune_entry;
418
419 /* Which CPU we are generating code for. */
420 enum target_device m68k_cpu;
421
422 /* Which microarchitecture to tune for. */
423 enum uarch_type m68k_tune;
424
425 /* Which FPU to use. */
426 enum fpu_type m68k_fpu;
427
428 /* The set of FL_* flags that apply to the target processor. */
429 unsigned int m68k_cpu_flags;
430
431 /* The set of FL_* flags that apply to the processor to be tuned for. */
432 unsigned int m68k_tune_flags;
433
434 /* Asm templates for calling or jumping to an arbitrary symbolic address,
435 or NULL if such calls or jumps are not supported. The address is held
436 in operand 0. */
437 const char *m68k_symbolic_call;
438 const char *m68k_symbolic_jump;
439
440 /* Enum variable that corresponds to m68k_symbolic_call values. */
441 enum M68K_SYMBOLIC_CALL m68k_symbolic_call_var;
442
443 \f
444 /* Implement TARGET_OPTION_OVERRIDE. */
445
446 static void
447 m68k_option_override (void)
448 {
449 const struct m68k_target_selection *entry;
450 unsigned long target_mask;
451
452 if (global_options_set.x_m68k_arch_option)
453 m68k_arch_entry = &all_isas[m68k_arch_option];
454
455 if (global_options_set.x_m68k_cpu_option)
456 m68k_cpu_entry = &all_devices[(int) m68k_cpu_option];
457
458 if (global_options_set.x_m68k_tune_option)
459 m68k_tune_entry = &all_microarchs[(int) m68k_tune_option];
460
461 /* User can choose:
462
463 -mcpu=
464 -march=
465 -mtune=
466
467 -march=ARCH should generate code that runs any processor
468 implementing architecture ARCH. -mcpu=CPU should override -march
469 and should generate code that runs on processor CPU, making free
470 use of any instructions that CPU understands. -mtune=UARCH applies
471 on top of -mcpu or -march and optimizes the code for UARCH. It does
472 not change the target architecture. */
473 if (m68k_cpu_entry)
474 {
475 /* Complain if the -march setting is for a different microarchitecture,
476 or includes flags that the -mcpu setting doesn't. */
477 if (m68k_arch_entry
478 && (m68k_arch_entry->microarch != m68k_cpu_entry->microarch
479 || (m68k_arch_entry->flags & ~m68k_cpu_entry->flags) != 0))
480 warning (0, "-mcpu=%s conflicts with -march=%s",
481 m68k_cpu_entry->name, m68k_arch_entry->name);
482
483 entry = m68k_cpu_entry;
484 }
485 else
486 entry = m68k_arch_entry;
487
488 if (!entry)
489 entry = all_devices + TARGET_CPU_DEFAULT;
490
491 m68k_cpu_flags = entry->flags;
492
493 /* Use the architecture setting to derive default values for
494 certain flags. */
495 target_mask = 0;
496
497 /* ColdFire is lenient about alignment. */
498 if (!TARGET_COLDFIRE)
499 target_mask |= MASK_STRICT_ALIGNMENT;
500
501 if ((m68k_cpu_flags & FL_BITFIELD) != 0)
502 target_mask |= MASK_BITFIELD;
503 if ((m68k_cpu_flags & FL_CF_HWDIV) != 0)
504 target_mask |= MASK_CF_HWDIV;
505 if ((m68k_cpu_flags & (FL_68881 | FL_CF_FPU)) != 0)
506 target_mask |= MASK_HARD_FLOAT;
507 target_flags |= target_mask & ~target_flags_explicit;
508
509 /* Set the directly-usable versions of the -mcpu and -mtune settings. */
510 m68k_cpu = entry->device;
511 if (m68k_tune_entry)
512 {
513 m68k_tune = m68k_tune_entry->microarch;
514 m68k_tune_flags = m68k_tune_entry->flags;
515 }
516 #ifdef M68K_DEFAULT_TUNE
517 else if (!m68k_cpu_entry && !m68k_arch_entry)
518 {
519 enum target_device dev;
520 dev = all_microarchs[M68K_DEFAULT_TUNE].device;
521 m68k_tune_flags = all_devices[dev].flags;
522 }
523 #endif
524 else
525 {
526 m68k_tune = entry->microarch;
527 m68k_tune_flags = entry->flags;
528 }
529
530 /* Set the type of FPU. */
531 m68k_fpu = (!TARGET_HARD_FLOAT ? FPUTYPE_NONE
532 : (m68k_cpu_flags & FL_COLDFIRE) != 0 ? FPUTYPE_COLDFIRE
533 : FPUTYPE_68881);
534
535 /* Sanity check to ensure that msep-data and mid-sahred-library are not
536 * both specified together. Doing so simply doesn't make sense.
537 */
538 if (TARGET_SEP_DATA && TARGET_ID_SHARED_LIBRARY)
539 error ("cannot specify both -msep-data and -mid-shared-library");
540
541 /* If we're generating code for a separate A5 relative data segment,
542 * we've got to enable -fPIC as well. This might be relaxable to
543 * -fpic but it hasn't been tested properly.
544 */
545 if (TARGET_SEP_DATA || TARGET_ID_SHARED_LIBRARY)
546 flag_pic = 2;
547
548 /* -mpcrel -fPIC uses 32-bit pc-relative displacements. Raise an
549 error if the target does not support them. */
550 if (TARGET_PCREL && !TARGET_68020 && flag_pic == 2)
551 error ("-mpcrel -fPIC is not currently supported on selected cpu");
552
553 /* ??? A historic way of turning on pic, or is this intended to
554 be an embedded thing that doesn't have the same name binding
555 significance that it does on hosted ELF systems? */
556 if (TARGET_PCREL && flag_pic == 0)
557 flag_pic = 1;
558
559 if (!flag_pic)
560 {
561 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_JSR;
562
563 m68k_symbolic_jump = "jra %a0";
564 }
565 else if (TARGET_ID_SHARED_LIBRARY)
566 /* All addresses must be loaded from the GOT. */
567 ;
568 else if (TARGET_68020 || TARGET_ISAB || TARGET_ISAC)
569 {
570 if (TARGET_PCREL)
571 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_C;
572 else
573 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_P;
574
575 if (TARGET_ISAC)
576 /* No unconditional long branch */;
577 else if (TARGET_PCREL)
578 m68k_symbolic_jump = "bra%.l %c0";
579 else
580 m68k_symbolic_jump = "bra%.l %p0";
581 /* Turn off function cse if we are doing PIC. We always want
582 function call to be done as `bsr foo@PLTPC'. */
583 /* ??? It's traditional to do this for -mpcrel too, but it isn't
584 clear how intentional that is. */
585 flag_no_function_cse = 1;
586 }
587
588 switch (m68k_symbolic_call_var)
589 {
590 case M68K_SYMBOLIC_CALL_JSR:
591 m68k_symbolic_call = "jsr %a0";
592 break;
593
594 case M68K_SYMBOLIC_CALL_BSR_C:
595 m68k_symbolic_call = "bsr%.l %c0";
596 break;
597
598 case M68K_SYMBOLIC_CALL_BSR_P:
599 m68k_symbolic_call = "bsr%.l %p0";
600 break;
601
602 case M68K_SYMBOLIC_CALL_NONE:
603 gcc_assert (m68k_symbolic_call == NULL);
604 break;
605
606 default:
607 gcc_unreachable ();
608 }
609
610 #ifndef ASM_OUTPUT_ALIGN_WITH_NOP
611 if (align_labels > 2)
612 {
613 warning (0, "-falign-labels=%d is not supported", align_labels);
614 align_labels = 0;
615 }
616 if (align_loops > 2)
617 {
618 warning (0, "-falign-loops=%d is not supported", align_loops);
619 align_loops = 0;
620 }
621 #endif
622
623 if (stack_limit_rtx != NULL_RTX && !TARGET_68020)
624 {
625 warning (0, "-fstack-limit- options are not supported on this cpu");
626 stack_limit_rtx = NULL_RTX;
627 }
628
629 SUBTARGET_OVERRIDE_OPTIONS;
630
631 /* Setup scheduling options. */
632 if (TUNE_CFV1)
633 m68k_sched_cpu = CPU_CFV1;
634 else if (TUNE_CFV2)
635 m68k_sched_cpu = CPU_CFV2;
636 else if (TUNE_CFV3)
637 m68k_sched_cpu = CPU_CFV3;
638 else if (TUNE_CFV4)
639 m68k_sched_cpu = CPU_CFV4;
640 else
641 {
642 m68k_sched_cpu = CPU_UNKNOWN;
643 flag_schedule_insns = 0;
644 flag_schedule_insns_after_reload = 0;
645 flag_modulo_sched = 0;
646 flag_live_range_shrinkage = 0;
647 }
648
649 if (m68k_sched_cpu != CPU_UNKNOWN)
650 {
651 if ((m68k_cpu_flags & (FL_CF_EMAC | FL_CF_EMAC_B)) != 0)
652 m68k_sched_mac = MAC_CF_EMAC;
653 else if ((m68k_cpu_flags & FL_CF_MAC) != 0)
654 m68k_sched_mac = MAC_CF_MAC;
655 else
656 m68k_sched_mac = MAC_NO;
657 }
658 }
659
660 /* Implement TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE. */
661
662 static void
663 m68k_override_options_after_change (void)
664 {
665 if (m68k_sched_cpu == CPU_UNKNOWN)
666 {
667 flag_schedule_insns = 0;
668 flag_schedule_insns_after_reload = 0;
669 flag_modulo_sched = 0;
670 flag_live_range_shrinkage = 0;
671 }
672 }
673
674 /* Generate a macro of the form __mPREFIX_cpu_NAME, where PREFIX is the
675 given argument and NAME is the argument passed to -mcpu. Return NULL
676 if -mcpu was not passed. */
677
678 const char *
679 m68k_cpp_cpu_ident (const char *prefix)
680 {
681 if (!m68k_cpu_entry)
682 return NULL;
683 return concat ("__m", prefix, "_cpu_", m68k_cpu_entry->name, NULL);
684 }
685
686 /* Generate a macro of the form __mPREFIX_family_NAME, where PREFIX is the
687 given argument and NAME is the name of the representative device for
688 the -mcpu argument's family. Return NULL if -mcpu was not passed. */
689
690 const char *
691 m68k_cpp_cpu_family (const char *prefix)
692 {
693 if (!m68k_cpu_entry)
694 return NULL;
695 return concat ("__m", prefix, "_family_", m68k_cpu_entry->family, NULL);
696 }
697 \f
698 /* Return m68k_fk_interrupt_handler if FUNC has an "interrupt" or
699 "interrupt_handler" attribute and interrupt_thread if FUNC has an
700 "interrupt_thread" attribute. Otherwise, return
701 m68k_fk_normal_function. */
702
703 enum m68k_function_kind
704 m68k_get_function_kind (tree func)
705 {
706 tree a;
707
708 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
709
710 a = lookup_attribute ("interrupt", DECL_ATTRIBUTES (func));
711 if (a != NULL_TREE)
712 return m68k_fk_interrupt_handler;
713
714 a = lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (func));
715 if (a != NULL_TREE)
716 return m68k_fk_interrupt_handler;
717
718 a = lookup_attribute ("interrupt_thread", DECL_ATTRIBUTES (func));
719 if (a != NULL_TREE)
720 return m68k_fk_interrupt_thread;
721
722 return m68k_fk_normal_function;
723 }
724
725 /* Handle an attribute requiring a FUNCTION_DECL; arguments as in
726 struct attribute_spec.handler. */
727 static tree
728 m68k_handle_fndecl_attribute (tree *node, tree name,
729 tree args ATTRIBUTE_UNUSED,
730 int flags ATTRIBUTE_UNUSED,
731 bool *no_add_attrs)
732 {
733 if (TREE_CODE (*node) != FUNCTION_DECL)
734 {
735 warning (OPT_Wattributes, "%qE attribute only applies to functions",
736 name);
737 *no_add_attrs = true;
738 }
739
740 if (m68k_get_function_kind (*node) != m68k_fk_normal_function)
741 {
742 error ("multiple interrupt attributes not allowed");
743 *no_add_attrs = true;
744 }
745
746 if (!TARGET_FIDOA
747 && !strcmp (IDENTIFIER_POINTER (name), "interrupt_thread"))
748 {
749 error ("interrupt_thread is available only on fido");
750 *no_add_attrs = true;
751 }
752
753 return NULL_TREE;
754 }
755
756 static void
757 m68k_compute_frame_layout (void)
758 {
759 int regno, saved;
760 unsigned int mask;
761 enum m68k_function_kind func_kind =
762 m68k_get_function_kind (current_function_decl);
763 bool interrupt_handler = func_kind == m68k_fk_interrupt_handler;
764 bool interrupt_thread = func_kind == m68k_fk_interrupt_thread;
765
766 /* Only compute the frame once per function.
767 Don't cache information until reload has been completed. */
768 if (current_frame.funcdef_no == current_function_funcdef_no
769 && reload_completed)
770 return;
771
772 current_frame.size = (get_frame_size () + 3) & -4;
773
774 mask = saved = 0;
775
776 /* Interrupt thread does not need to save any register. */
777 if (!interrupt_thread)
778 for (regno = 0; regno < 16; regno++)
779 if (m68k_save_reg (regno, interrupt_handler))
780 {
781 mask |= 1 << (regno - D0_REG);
782 saved++;
783 }
784 current_frame.offset = saved * 4;
785 current_frame.reg_no = saved;
786 current_frame.reg_mask = mask;
787
788 current_frame.foffset = 0;
789 mask = saved = 0;
790 if (TARGET_HARD_FLOAT)
791 {
792 /* Interrupt thread does not need to save any register. */
793 if (!interrupt_thread)
794 for (regno = 16; regno < 24; regno++)
795 if (m68k_save_reg (regno, interrupt_handler))
796 {
797 mask |= 1 << (regno - FP0_REG);
798 saved++;
799 }
800 current_frame.foffset = saved * TARGET_FP_REG_SIZE;
801 current_frame.offset += current_frame.foffset;
802 }
803 current_frame.fpu_no = saved;
804 current_frame.fpu_mask = mask;
805
806 /* Remember what function this frame refers to. */
807 current_frame.funcdef_no = current_function_funcdef_no;
808 }
809
810 /* Worker function for TARGET_CAN_ELIMINATE. */
811
812 bool
813 m68k_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
814 {
815 return (to == STACK_POINTER_REGNUM ? ! frame_pointer_needed : true);
816 }
817
818 HOST_WIDE_INT
819 m68k_initial_elimination_offset (int from, int to)
820 {
821 int argptr_offset;
822 /* The arg pointer points 8 bytes before the start of the arguments,
823 as defined by FIRST_PARM_OFFSET. This makes it coincident with the
824 frame pointer in most frames. */
825 argptr_offset = frame_pointer_needed ? 0 : UNITS_PER_WORD;
826 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
827 return argptr_offset;
828
829 m68k_compute_frame_layout ();
830
831 gcc_assert (to == STACK_POINTER_REGNUM);
832 switch (from)
833 {
834 case ARG_POINTER_REGNUM:
835 return current_frame.offset + current_frame.size - argptr_offset;
836 case FRAME_POINTER_REGNUM:
837 return current_frame.offset + current_frame.size;
838 default:
839 gcc_unreachable ();
840 }
841 }
842
843 /* Refer to the array `regs_ever_live' to determine which registers
844 to save; `regs_ever_live[I]' is nonzero if register number I
845 is ever used in the function. This function is responsible for
846 knowing which registers should not be saved even if used.
847 Return true if we need to save REGNO. */
848
849 static bool
850 m68k_save_reg (unsigned int regno, bool interrupt_handler)
851 {
852 if (flag_pic && regno == PIC_REG)
853 {
854 if (crtl->saves_all_registers)
855 return true;
856 if (crtl->uses_pic_offset_table)
857 return true;
858 /* Reload may introduce constant pool references into a function
859 that thitherto didn't need a PIC register. Note that the test
860 above will not catch that case because we will only set
861 crtl->uses_pic_offset_table when emitting
862 the address reloads. */
863 if (crtl->uses_const_pool)
864 return true;
865 }
866
867 if (crtl->calls_eh_return)
868 {
869 unsigned int i;
870 for (i = 0; ; i++)
871 {
872 unsigned int test = EH_RETURN_DATA_REGNO (i);
873 if (test == INVALID_REGNUM)
874 break;
875 if (test == regno)
876 return true;
877 }
878 }
879
880 /* Fixed regs we never touch. */
881 if (fixed_regs[regno])
882 return false;
883
884 /* The frame pointer (if it is such) is handled specially. */
885 if (regno == FRAME_POINTER_REGNUM && frame_pointer_needed)
886 return false;
887
888 /* Interrupt handlers must also save call_used_regs
889 if they are live or when calling nested functions. */
890 if (interrupt_handler)
891 {
892 if (df_regs_ever_live_p (regno))
893 return true;
894
895 if (!crtl->is_leaf && call_used_regs[regno])
896 return true;
897 }
898
899 /* Never need to save registers that aren't touched. */
900 if (!df_regs_ever_live_p (regno))
901 return false;
902
903 /* Otherwise save everything that isn't call-clobbered. */
904 return !call_used_regs[regno];
905 }
906
907 /* Emit RTL for a MOVEM or FMOVEM instruction. BASE + OFFSET represents
908 the lowest memory address. COUNT is the number of registers to be
909 moved, with register REGNO + I being moved if bit I of MASK is set.
910 STORE_P specifies the direction of the move and ADJUST_STACK_P says
911 whether or not this is pre-decrement (if STORE_P) or post-increment
912 (if !STORE_P) operation. */
913
914 static rtx_insn *
915 m68k_emit_movem (rtx base, HOST_WIDE_INT offset,
916 unsigned int count, unsigned int regno,
917 unsigned int mask, bool store_p, bool adjust_stack_p)
918 {
919 int i;
920 rtx body, addr, src, operands[2];
921 enum machine_mode mode;
922
923 body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (adjust_stack_p + count));
924 mode = reg_raw_mode[regno];
925 i = 0;
926
927 if (adjust_stack_p)
928 {
929 src = plus_constant (Pmode, base,
930 (count
931 * GET_MODE_SIZE (mode)
932 * (HOST_WIDE_INT) (store_p ? -1 : 1)));
933 XVECEXP (body, 0, i++) = gen_rtx_SET (VOIDmode, base, src);
934 }
935
936 for (; mask != 0; mask >>= 1, regno++)
937 if (mask & 1)
938 {
939 addr = plus_constant (Pmode, base, offset);
940 operands[!store_p] = gen_frame_mem (mode, addr);
941 operands[store_p] = gen_rtx_REG (mode, regno);
942 XVECEXP (body, 0, i++)
943 = gen_rtx_SET (VOIDmode, operands[0], operands[1]);
944 offset += GET_MODE_SIZE (mode);
945 }
946 gcc_assert (i == XVECLEN (body, 0));
947
948 return emit_insn (body);
949 }
950
951 /* Make INSN a frame-related instruction. */
952
953 static void
954 m68k_set_frame_related (rtx_insn *insn)
955 {
956 rtx body;
957 int i;
958
959 RTX_FRAME_RELATED_P (insn) = 1;
960 body = PATTERN (insn);
961 if (GET_CODE (body) == PARALLEL)
962 for (i = 0; i < XVECLEN (body, 0); i++)
963 RTX_FRAME_RELATED_P (XVECEXP (body, 0, i)) = 1;
964 }
965
966 /* Emit RTL for the "prologue" define_expand. */
967
968 void
969 m68k_expand_prologue (void)
970 {
971 HOST_WIDE_INT fsize_with_regs;
972 rtx limit, src, dest;
973
974 m68k_compute_frame_layout ();
975
976 if (flag_stack_usage_info)
977 current_function_static_stack_size
978 = current_frame.size + current_frame.offset;
979
980 /* If the stack limit is a symbol, we can check it here,
981 before actually allocating the space. */
982 if (crtl->limit_stack
983 && GET_CODE (stack_limit_rtx) == SYMBOL_REF)
984 {
985 limit = plus_constant (Pmode, stack_limit_rtx, current_frame.size + 4);
986 if (!m68k_legitimate_constant_p (Pmode, limit))
987 {
988 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), limit);
989 limit = gen_rtx_REG (Pmode, D0_REG);
990 }
991 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode,
992 stack_pointer_rtx, limit),
993 stack_pointer_rtx, limit,
994 const1_rtx));
995 }
996
997 fsize_with_regs = current_frame.size;
998 if (TARGET_COLDFIRE)
999 {
1000 /* ColdFire's move multiple instructions do not allow pre-decrement
1001 addressing. Add the size of movem saves to the initial stack
1002 allocation instead. */
1003 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1004 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1005 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1006 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
1007 }
1008
1009 if (frame_pointer_needed)
1010 {
1011 if (fsize_with_regs == 0 && TUNE_68040)
1012 {
1013 /* On the 68040, two separate moves are faster than link.w 0. */
1014 dest = gen_frame_mem (Pmode,
1015 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1016 m68k_set_frame_related (emit_move_insn (dest, frame_pointer_rtx));
1017 m68k_set_frame_related (emit_move_insn (frame_pointer_rtx,
1018 stack_pointer_rtx));
1019 }
1020 else if (fsize_with_regs < 0x8000 || TARGET_68020)
1021 m68k_set_frame_related
1022 (emit_insn (gen_link (frame_pointer_rtx,
1023 GEN_INT (-4 - fsize_with_regs))));
1024 else
1025 {
1026 m68k_set_frame_related
1027 (emit_insn (gen_link (frame_pointer_rtx, GEN_INT (-4))));
1028 m68k_set_frame_related
1029 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1030 stack_pointer_rtx,
1031 GEN_INT (-fsize_with_regs))));
1032 }
1033
1034 /* If the frame pointer is needed, emit a special barrier that
1035 will prevent the scheduler from moving stores to the frame
1036 before the stack adjustment. */
1037 emit_insn (gen_stack_tie (stack_pointer_rtx, frame_pointer_rtx));
1038 }
1039 else if (fsize_with_regs != 0)
1040 m68k_set_frame_related
1041 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1042 stack_pointer_rtx,
1043 GEN_INT (-fsize_with_regs))));
1044
1045 if (current_frame.fpu_mask)
1046 {
1047 gcc_assert (current_frame.fpu_no >= MIN_FMOVEM_REGS);
1048 if (TARGET_68881)
1049 m68k_set_frame_related
1050 (m68k_emit_movem (stack_pointer_rtx,
1051 current_frame.fpu_no * -GET_MODE_SIZE (XFmode),
1052 current_frame.fpu_no, FP0_REG,
1053 current_frame.fpu_mask, true, true));
1054 else
1055 {
1056 int offset;
1057
1058 /* If we're using moveml to save the integer registers,
1059 the stack pointer will point to the bottom of the moveml
1060 save area. Find the stack offset of the first FP register. */
1061 if (current_frame.reg_no < MIN_MOVEM_REGS)
1062 offset = 0;
1063 else
1064 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1065 m68k_set_frame_related
1066 (m68k_emit_movem (stack_pointer_rtx, offset,
1067 current_frame.fpu_no, FP0_REG,
1068 current_frame.fpu_mask, true, false));
1069 }
1070 }
1071
1072 /* If the stack limit is not a symbol, check it here.
1073 This has the disadvantage that it may be too late... */
1074 if (crtl->limit_stack)
1075 {
1076 if (REG_P (stack_limit_rtx))
1077 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode, stack_pointer_rtx,
1078 stack_limit_rtx),
1079 stack_pointer_rtx, stack_limit_rtx,
1080 const1_rtx));
1081
1082 else if (GET_CODE (stack_limit_rtx) != SYMBOL_REF)
1083 warning (0, "stack limit expression is not supported");
1084 }
1085
1086 if (current_frame.reg_no < MIN_MOVEM_REGS)
1087 {
1088 /* Store each register separately in the same order moveml does. */
1089 int i;
1090
1091 for (i = 16; i-- > 0; )
1092 if (current_frame.reg_mask & (1 << i))
1093 {
1094 src = gen_rtx_REG (SImode, D0_REG + i);
1095 dest = gen_frame_mem (SImode,
1096 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1097 m68k_set_frame_related (emit_insn (gen_movsi (dest, src)));
1098 }
1099 }
1100 else
1101 {
1102 if (TARGET_COLDFIRE)
1103 /* The required register save space has already been allocated.
1104 The first register should be stored at (%sp). */
1105 m68k_set_frame_related
1106 (m68k_emit_movem (stack_pointer_rtx, 0,
1107 current_frame.reg_no, D0_REG,
1108 current_frame.reg_mask, true, false));
1109 else
1110 m68k_set_frame_related
1111 (m68k_emit_movem (stack_pointer_rtx,
1112 current_frame.reg_no * -GET_MODE_SIZE (SImode),
1113 current_frame.reg_no, D0_REG,
1114 current_frame.reg_mask, true, true));
1115 }
1116
1117 if (!TARGET_SEP_DATA
1118 && crtl->uses_pic_offset_table)
1119 emit_insn (gen_load_got (pic_offset_table_rtx));
1120 }
1121 \f
1122 /* Return true if a simple (return) instruction is sufficient for this
1123 instruction (i.e. if no epilogue is needed). */
1124
1125 bool
1126 m68k_use_return_insn (void)
1127 {
1128 if (!reload_completed || frame_pointer_needed || get_frame_size () != 0)
1129 return false;
1130
1131 m68k_compute_frame_layout ();
1132 return current_frame.offset == 0;
1133 }
1134
1135 /* Emit RTL for the "epilogue" or "sibcall_epilogue" define_expand;
1136 SIBCALL_P says which.
1137
1138 The function epilogue should not depend on the current stack pointer!
1139 It should use the frame pointer only, if there is a frame pointer.
1140 This is mandatory because of alloca; we also take advantage of it to
1141 omit stack adjustments before returning. */
1142
1143 void
1144 m68k_expand_epilogue (bool sibcall_p)
1145 {
1146 HOST_WIDE_INT fsize, fsize_with_regs;
1147 bool big, restore_from_sp;
1148
1149 m68k_compute_frame_layout ();
1150
1151 fsize = current_frame.size;
1152 big = false;
1153 restore_from_sp = false;
1154
1155 /* FIXME : crtl->is_leaf below is too strong.
1156 What we really need to know there is if there could be pending
1157 stack adjustment needed at that point. */
1158 restore_from_sp = (!frame_pointer_needed
1159 || (!cfun->calls_alloca && crtl->is_leaf));
1160
1161 /* fsize_with_regs is the size we need to adjust the sp when
1162 popping the frame. */
1163 fsize_with_regs = fsize;
1164 if (TARGET_COLDFIRE && restore_from_sp)
1165 {
1166 /* ColdFire's move multiple instructions do not allow post-increment
1167 addressing. Add the size of movem loads to the final deallocation
1168 instead. */
1169 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1170 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1171 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1172 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
1173 }
1174
1175 if (current_frame.offset + fsize >= 0x8000
1176 && !restore_from_sp
1177 && (current_frame.reg_mask || current_frame.fpu_mask))
1178 {
1179 if (TARGET_COLDFIRE
1180 && (current_frame.reg_no >= MIN_MOVEM_REGS
1181 || current_frame.fpu_no >= MIN_FMOVEM_REGS))
1182 {
1183 /* ColdFire's move multiple instructions do not support the
1184 (d8,Ax,Xi) addressing mode, so we're as well using a normal
1185 stack-based restore. */
1186 emit_move_insn (gen_rtx_REG (Pmode, A1_REG),
1187 GEN_INT (-(current_frame.offset + fsize)));
1188 emit_insn (gen_addsi3 (stack_pointer_rtx,
1189 gen_rtx_REG (Pmode, A1_REG),
1190 frame_pointer_rtx));
1191 restore_from_sp = true;
1192 }
1193 else
1194 {
1195 emit_move_insn (gen_rtx_REG (Pmode, A1_REG), GEN_INT (-fsize));
1196 fsize = 0;
1197 big = true;
1198 }
1199 }
1200
1201 if (current_frame.reg_no < MIN_MOVEM_REGS)
1202 {
1203 /* Restore each register separately in the same order moveml does. */
1204 int i;
1205 HOST_WIDE_INT offset;
1206
1207 offset = current_frame.offset + fsize;
1208 for (i = 0; i < 16; i++)
1209 if (current_frame.reg_mask & (1 << i))
1210 {
1211 rtx addr;
1212
1213 if (big)
1214 {
1215 /* Generate the address -OFFSET(%fp,%a1.l). */
1216 addr = gen_rtx_REG (Pmode, A1_REG);
1217 addr = gen_rtx_PLUS (Pmode, addr, frame_pointer_rtx);
1218 addr = plus_constant (Pmode, addr, -offset);
1219 }
1220 else if (restore_from_sp)
1221 addr = gen_rtx_POST_INC (Pmode, stack_pointer_rtx);
1222 else
1223 addr = plus_constant (Pmode, frame_pointer_rtx, -offset);
1224 emit_move_insn (gen_rtx_REG (SImode, D0_REG + i),
1225 gen_frame_mem (SImode, addr));
1226 offset -= GET_MODE_SIZE (SImode);
1227 }
1228 }
1229 else if (current_frame.reg_mask)
1230 {
1231 if (big)
1232 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1233 gen_rtx_REG (Pmode, A1_REG),
1234 frame_pointer_rtx),
1235 -(current_frame.offset + fsize),
1236 current_frame.reg_no, D0_REG,
1237 current_frame.reg_mask, false, false);
1238 else if (restore_from_sp)
1239 m68k_emit_movem (stack_pointer_rtx, 0,
1240 current_frame.reg_no, D0_REG,
1241 current_frame.reg_mask, false,
1242 !TARGET_COLDFIRE);
1243 else
1244 m68k_emit_movem (frame_pointer_rtx,
1245 -(current_frame.offset + fsize),
1246 current_frame.reg_no, D0_REG,
1247 current_frame.reg_mask, false, false);
1248 }
1249
1250 if (current_frame.fpu_no > 0)
1251 {
1252 if (big)
1253 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1254 gen_rtx_REG (Pmode, A1_REG),
1255 frame_pointer_rtx),
1256 -(current_frame.foffset + fsize),
1257 current_frame.fpu_no, FP0_REG,
1258 current_frame.fpu_mask, false, false);
1259 else if (restore_from_sp)
1260 {
1261 if (TARGET_COLDFIRE)
1262 {
1263 int offset;
1264
1265 /* If we used moveml to restore the integer registers, the
1266 stack pointer will still point to the bottom of the moveml
1267 save area. Find the stack offset of the first FP
1268 register. */
1269 if (current_frame.reg_no < MIN_MOVEM_REGS)
1270 offset = 0;
1271 else
1272 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1273 m68k_emit_movem (stack_pointer_rtx, offset,
1274 current_frame.fpu_no, FP0_REG,
1275 current_frame.fpu_mask, false, false);
1276 }
1277 else
1278 m68k_emit_movem (stack_pointer_rtx, 0,
1279 current_frame.fpu_no, FP0_REG,
1280 current_frame.fpu_mask, false, true);
1281 }
1282 else
1283 m68k_emit_movem (frame_pointer_rtx,
1284 -(current_frame.foffset + fsize),
1285 current_frame.fpu_no, FP0_REG,
1286 current_frame.fpu_mask, false, false);
1287 }
1288
1289 if (frame_pointer_needed)
1290 emit_insn (gen_unlink (frame_pointer_rtx));
1291 else if (fsize_with_regs)
1292 emit_insn (gen_addsi3 (stack_pointer_rtx,
1293 stack_pointer_rtx,
1294 GEN_INT (fsize_with_regs)));
1295
1296 if (crtl->calls_eh_return)
1297 emit_insn (gen_addsi3 (stack_pointer_rtx,
1298 stack_pointer_rtx,
1299 EH_RETURN_STACKADJ_RTX));
1300
1301 if (!sibcall_p)
1302 emit_jump_insn (ret_rtx);
1303 }
1304 \f
1305 /* Return true if X is a valid comparison operator for the dbcc
1306 instruction.
1307
1308 Note it rejects floating point comparison operators.
1309 (In the future we could use Fdbcc).
1310
1311 It also rejects some comparisons when CC_NO_OVERFLOW is set. */
1312
1313 int
1314 valid_dbcc_comparison_p_2 (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED)
1315 {
1316 switch (GET_CODE (x))
1317 {
1318 case EQ: case NE: case GTU: case LTU:
1319 case GEU: case LEU:
1320 return 1;
1321
1322 /* Reject some when CC_NO_OVERFLOW is set. This may be over
1323 conservative */
1324 case GT: case LT: case GE: case LE:
1325 return ! (cc_prev_status.flags & CC_NO_OVERFLOW);
1326 default:
1327 return 0;
1328 }
1329 }
1330
1331 /* Return nonzero if flags are currently in the 68881 flag register. */
1332 int
1333 flags_in_68881 (void)
1334 {
1335 /* We could add support for these in the future */
1336 return cc_status.flags & CC_IN_68881;
1337 }
1338
1339 /* Return true if PARALLEL contains register REGNO. */
1340 static bool
1341 m68k_reg_present_p (const_rtx parallel, unsigned int regno)
1342 {
1343 int i;
1344
1345 if (REG_P (parallel) && REGNO (parallel) == regno)
1346 return true;
1347
1348 if (GET_CODE (parallel) != PARALLEL)
1349 return false;
1350
1351 for (i = 0; i < XVECLEN (parallel, 0); ++i)
1352 {
1353 const_rtx x;
1354
1355 x = XEXP (XVECEXP (parallel, 0, i), 0);
1356 if (REG_P (x) && REGNO (x) == regno)
1357 return true;
1358 }
1359
1360 return false;
1361 }
1362
1363 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL_P. */
1364
1365 static bool
1366 m68k_ok_for_sibcall_p (tree decl, tree exp)
1367 {
1368 enum m68k_function_kind kind;
1369
1370 /* We cannot use sibcalls for nested functions because we use the
1371 static chain register for indirect calls. */
1372 if (CALL_EXPR_STATIC_CHAIN (exp))
1373 return false;
1374
1375 if (!VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
1376 {
1377 /* Check that the return value locations are the same. For
1378 example that we aren't returning a value from the sibling in
1379 a D0 register but then need to transfer it to a A0 register. */
1380 rtx cfun_value;
1381 rtx call_value;
1382
1383 cfun_value = FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (cfun->decl)),
1384 cfun->decl);
1385 call_value = FUNCTION_VALUE (TREE_TYPE (exp), decl);
1386
1387 /* Check that the values are equal or that the result the callee
1388 function returns is superset of what the current function returns. */
1389 if (!(rtx_equal_p (cfun_value, call_value)
1390 || (REG_P (cfun_value)
1391 && m68k_reg_present_p (call_value, REGNO (cfun_value)))))
1392 return false;
1393 }
1394
1395 kind = m68k_get_function_kind (current_function_decl);
1396 if (kind == m68k_fk_normal_function)
1397 /* We can always sibcall from a normal function, because it's
1398 undefined if it is calling an interrupt function. */
1399 return true;
1400
1401 /* Otherwise we can only sibcall if the function kind is known to be
1402 the same. */
1403 if (decl && m68k_get_function_kind (decl) == kind)
1404 return true;
1405
1406 return false;
1407 }
1408
1409 /* On the m68k all args are always pushed. */
1410
1411 static rtx
1412 m68k_function_arg (cumulative_args_t cum ATTRIBUTE_UNUSED,
1413 enum machine_mode mode ATTRIBUTE_UNUSED,
1414 const_tree type ATTRIBUTE_UNUSED,
1415 bool named ATTRIBUTE_UNUSED)
1416 {
1417 return NULL_RTX;
1418 }
1419
1420 static void
1421 m68k_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
1422 const_tree type, bool named ATTRIBUTE_UNUSED)
1423 {
1424 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1425
1426 *cum += (mode != BLKmode
1427 ? (GET_MODE_SIZE (mode) + 3) & ~3
1428 : (int_size_in_bytes (type) + 3) & ~3);
1429 }
1430
1431 /* Convert X to a legitimate function call memory reference and return the
1432 result. */
1433
1434 rtx
1435 m68k_legitimize_call_address (rtx x)
1436 {
1437 gcc_assert (MEM_P (x));
1438 if (call_operand (XEXP (x, 0), VOIDmode))
1439 return x;
1440 return replace_equiv_address (x, force_reg (Pmode, XEXP (x, 0)));
1441 }
1442
1443 /* Likewise for sibling calls. */
1444
1445 rtx
1446 m68k_legitimize_sibcall_address (rtx x)
1447 {
1448 gcc_assert (MEM_P (x));
1449 if (sibcall_operand (XEXP (x, 0), VOIDmode))
1450 return x;
1451
1452 emit_move_insn (gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM), XEXP (x, 0));
1453 return replace_equiv_address (x, gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM));
1454 }
1455
1456 /* Convert X to a legitimate address and return it if successful. Otherwise
1457 return X.
1458
1459 For the 68000, we handle X+REG by loading X into a register R and
1460 using R+REG. R will go in an address reg and indexing will be used.
1461 However, if REG is a broken-out memory address or multiplication,
1462 nothing needs to be done because REG can certainly go in an address reg. */
1463
1464 static rtx
1465 m68k_legitimize_address (rtx x, rtx oldx, enum machine_mode mode)
1466 {
1467 if (m68k_tls_symbol_p (x))
1468 return m68k_legitimize_tls_address (x);
1469
1470 if (GET_CODE (x) == PLUS)
1471 {
1472 int ch = (x) != (oldx);
1473 int copied = 0;
1474
1475 #define COPY_ONCE(Y) if (!copied) { Y = copy_rtx (Y); copied = ch = 1; }
1476
1477 if (GET_CODE (XEXP (x, 0)) == MULT)
1478 {
1479 COPY_ONCE (x);
1480 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
1481 }
1482 if (GET_CODE (XEXP (x, 1)) == MULT)
1483 {
1484 COPY_ONCE (x);
1485 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
1486 }
1487 if (ch)
1488 {
1489 if (GET_CODE (XEXP (x, 1)) == REG
1490 && GET_CODE (XEXP (x, 0)) == REG)
1491 {
1492 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
1493 {
1494 COPY_ONCE (x);
1495 x = force_operand (x, 0);
1496 }
1497 return x;
1498 }
1499 if (memory_address_p (mode, x))
1500 return x;
1501 }
1502 if (GET_CODE (XEXP (x, 0)) == REG
1503 || (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
1504 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1505 && GET_MODE (XEXP (XEXP (x, 0), 0)) == HImode))
1506 {
1507 rtx temp = gen_reg_rtx (Pmode);
1508 rtx val = force_operand (XEXP (x, 1), 0);
1509 emit_move_insn (temp, val);
1510 COPY_ONCE (x);
1511 XEXP (x, 1) = temp;
1512 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1513 && GET_CODE (XEXP (x, 0)) == REG)
1514 x = force_operand (x, 0);
1515 }
1516 else if (GET_CODE (XEXP (x, 1)) == REG
1517 || (GET_CODE (XEXP (x, 1)) == SIGN_EXTEND
1518 && GET_CODE (XEXP (XEXP (x, 1), 0)) == REG
1519 && GET_MODE (XEXP (XEXP (x, 1), 0)) == HImode))
1520 {
1521 rtx temp = gen_reg_rtx (Pmode);
1522 rtx val = force_operand (XEXP (x, 0), 0);
1523 emit_move_insn (temp, val);
1524 COPY_ONCE (x);
1525 XEXP (x, 0) = temp;
1526 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1527 && GET_CODE (XEXP (x, 1)) == REG)
1528 x = force_operand (x, 0);
1529 }
1530 }
1531
1532 return x;
1533 }
1534
1535
1536 /* Output a dbCC; jCC sequence. Note we do not handle the
1537 floating point version of this sequence (Fdbcc). We also
1538 do not handle alternative conditions when CC_NO_OVERFLOW is
1539 set. It is assumed that valid_dbcc_comparison_p and flags_in_68881 will
1540 kick those out before we get here. */
1541
1542 void
1543 output_dbcc_and_branch (rtx *operands)
1544 {
1545 switch (GET_CODE (operands[3]))
1546 {
1547 case EQ:
1548 output_asm_insn ("dbeq %0,%l1\n\tjeq %l2", operands);
1549 break;
1550
1551 case NE:
1552 output_asm_insn ("dbne %0,%l1\n\tjne %l2", operands);
1553 break;
1554
1555 case GT:
1556 output_asm_insn ("dbgt %0,%l1\n\tjgt %l2", operands);
1557 break;
1558
1559 case GTU:
1560 output_asm_insn ("dbhi %0,%l1\n\tjhi %l2", operands);
1561 break;
1562
1563 case LT:
1564 output_asm_insn ("dblt %0,%l1\n\tjlt %l2", operands);
1565 break;
1566
1567 case LTU:
1568 output_asm_insn ("dbcs %0,%l1\n\tjcs %l2", operands);
1569 break;
1570
1571 case GE:
1572 output_asm_insn ("dbge %0,%l1\n\tjge %l2", operands);
1573 break;
1574
1575 case GEU:
1576 output_asm_insn ("dbcc %0,%l1\n\tjcc %l2", operands);
1577 break;
1578
1579 case LE:
1580 output_asm_insn ("dble %0,%l1\n\tjle %l2", operands);
1581 break;
1582
1583 case LEU:
1584 output_asm_insn ("dbls %0,%l1\n\tjls %l2", operands);
1585 break;
1586
1587 default:
1588 gcc_unreachable ();
1589 }
1590
1591 /* If the decrement is to be done in SImode, then we have
1592 to compensate for the fact that dbcc decrements in HImode. */
1593 switch (GET_MODE (operands[0]))
1594 {
1595 case SImode:
1596 output_asm_insn ("clr%.w %0\n\tsubq%.l #1,%0\n\tjpl %l1", operands);
1597 break;
1598
1599 case HImode:
1600 break;
1601
1602 default:
1603 gcc_unreachable ();
1604 }
1605 }
1606
1607 const char *
1608 output_scc_di (rtx op, rtx operand1, rtx operand2, rtx dest)
1609 {
1610 rtx loperands[7];
1611 enum rtx_code op_code = GET_CODE (op);
1612
1613 /* This does not produce a useful cc. */
1614 CC_STATUS_INIT;
1615
1616 /* The m68k cmp.l instruction requires operand1 to be a reg as used
1617 below. Swap the operands and change the op if these requirements
1618 are not fulfilled. */
1619 if (GET_CODE (operand2) == REG && GET_CODE (operand1) != REG)
1620 {
1621 rtx tmp = operand1;
1622
1623 operand1 = operand2;
1624 operand2 = tmp;
1625 op_code = swap_condition (op_code);
1626 }
1627 loperands[0] = operand1;
1628 if (GET_CODE (operand1) == REG)
1629 loperands[1] = gen_rtx_REG (SImode, REGNO (operand1) + 1);
1630 else
1631 loperands[1] = adjust_address (operand1, SImode, 4);
1632 if (operand2 != const0_rtx)
1633 {
1634 loperands[2] = operand2;
1635 if (GET_CODE (operand2) == REG)
1636 loperands[3] = gen_rtx_REG (SImode, REGNO (operand2) + 1);
1637 else
1638 loperands[3] = adjust_address (operand2, SImode, 4);
1639 }
1640 loperands[4] = gen_label_rtx ();
1641 if (operand2 != const0_rtx)
1642 output_asm_insn ("cmp%.l %2,%0\n\tjne %l4\n\tcmp%.l %3,%1", loperands);
1643 else
1644 {
1645 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[0]))
1646 output_asm_insn ("tst%.l %0", loperands);
1647 else
1648 output_asm_insn ("cmp%.w #0,%0", loperands);
1649
1650 output_asm_insn ("jne %l4", loperands);
1651
1652 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[1]))
1653 output_asm_insn ("tst%.l %1", loperands);
1654 else
1655 output_asm_insn ("cmp%.w #0,%1", loperands);
1656 }
1657
1658 loperands[5] = dest;
1659
1660 switch (op_code)
1661 {
1662 case EQ:
1663 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1664 CODE_LABEL_NUMBER (loperands[4]));
1665 output_asm_insn ("seq %5", loperands);
1666 break;
1667
1668 case NE:
1669 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1670 CODE_LABEL_NUMBER (loperands[4]));
1671 output_asm_insn ("sne %5", loperands);
1672 break;
1673
1674 case GT:
1675 loperands[6] = gen_label_rtx ();
1676 output_asm_insn ("shi %5\n\tjra %l6", loperands);
1677 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1678 CODE_LABEL_NUMBER (loperands[4]));
1679 output_asm_insn ("sgt %5", loperands);
1680 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1681 CODE_LABEL_NUMBER (loperands[6]));
1682 break;
1683
1684 case GTU:
1685 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1686 CODE_LABEL_NUMBER (loperands[4]));
1687 output_asm_insn ("shi %5", loperands);
1688 break;
1689
1690 case LT:
1691 loperands[6] = gen_label_rtx ();
1692 output_asm_insn ("scs %5\n\tjra %l6", loperands);
1693 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1694 CODE_LABEL_NUMBER (loperands[4]));
1695 output_asm_insn ("slt %5", loperands);
1696 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1697 CODE_LABEL_NUMBER (loperands[6]));
1698 break;
1699
1700 case LTU:
1701 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1702 CODE_LABEL_NUMBER (loperands[4]));
1703 output_asm_insn ("scs %5", loperands);
1704 break;
1705
1706 case GE:
1707 loperands[6] = gen_label_rtx ();
1708 output_asm_insn ("scc %5\n\tjra %l6", loperands);
1709 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1710 CODE_LABEL_NUMBER (loperands[4]));
1711 output_asm_insn ("sge %5", loperands);
1712 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1713 CODE_LABEL_NUMBER (loperands[6]));
1714 break;
1715
1716 case GEU:
1717 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1718 CODE_LABEL_NUMBER (loperands[4]));
1719 output_asm_insn ("scc %5", loperands);
1720 break;
1721
1722 case LE:
1723 loperands[6] = gen_label_rtx ();
1724 output_asm_insn ("sls %5\n\tjra %l6", loperands);
1725 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1726 CODE_LABEL_NUMBER (loperands[4]));
1727 output_asm_insn ("sle %5", loperands);
1728 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1729 CODE_LABEL_NUMBER (loperands[6]));
1730 break;
1731
1732 case LEU:
1733 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1734 CODE_LABEL_NUMBER (loperands[4]));
1735 output_asm_insn ("sls %5", loperands);
1736 break;
1737
1738 default:
1739 gcc_unreachable ();
1740 }
1741 return "";
1742 }
1743
1744 const char *
1745 output_btst (rtx *operands, rtx countop, rtx dataop, rtx_insn *insn, int signpos)
1746 {
1747 operands[0] = countop;
1748 operands[1] = dataop;
1749
1750 if (GET_CODE (countop) == CONST_INT)
1751 {
1752 register int count = INTVAL (countop);
1753 /* If COUNT is bigger than size of storage unit in use,
1754 advance to the containing unit of same size. */
1755 if (count > signpos)
1756 {
1757 int offset = (count & ~signpos) / 8;
1758 count = count & signpos;
1759 operands[1] = dataop = adjust_address (dataop, QImode, offset);
1760 }
1761 if (count == signpos)
1762 cc_status.flags = CC_NOT_POSITIVE | CC_Z_IN_NOT_N;
1763 else
1764 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N;
1765
1766 /* These three statements used to use next_insns_test_no...
1767 but it appears that this should do the same job. */
1768 if (count == 31
1769 && next_insn_tests_no_inequality (insn))
1770 return "tst%.l %1";
1771 if (count == 15
1772 && next_insn_tests_no_inequality (insn))
1773 return "tst%.w %1";
1774 if (count == 7
1775 && next_insn_tests_no_inequality (insn))
1776 return "tst%.b %1";
1777 /* Try to use `movew to ccr' followed by the appropriate branch insn.
1778 On some m68k variants unfortunately that's slower than btst.
1779 On 68000 and higher, that should also work for all HImode operands. */
1780 if (TUNE_CPU32 || TARGET_COLDFIRE || optimize_size)
1781 {
1782 if (count == 3 && DATA_REG_P (operands[1])
1783 && next_insn_tests_no_inequality (insn))
1784 {
1785 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N | CC_NO_OVERFLOW;
1786 return "move%.w %1,%%ccr";
1787 }
1788 if (count == 2 && DATA_REG_P (operands[1])
1789 && next_insn_tests_no_inequality (insn))
1790 {
1791 cc_status.flags = CC_NOT_NEGATIVE | CC_INVERTED | CC_NO_OVERFLOW;
1792 return "move%.w %1,%%ccr";
1793 }
1794 /* count == 1 followed by bvc/bvs and
1795 count == 0 followed by bcc/bcs are also possible, but need
1796 m68k-specific CC_Z_IN_NOT_V and CC_Z_IN_NOT_C flags. */
1797 }
1798
1799 cc_status.flags = CC_NOT_NEGATIVE;
1800 }
1801 return "btst %0,%1";
1802 }
1803 \f
1804 /* Return true if X is a legitimate base register. STRICT_P says
1805 whether we need strict checking. */
1806
1807 bool
1808 m68k_legitimate_base_reg_p (rtx x, bool strict_p)
1809 {
1810 /* Allow SUBREG everywhere we allow REG. This results in better code. */
1811 if (!strict_p && GET_CODE (x) == SUBREG)
1812 x = SUBREG_REG (x);
1813
1814 return (REG_P (x)
1815 && (strict_p
1816 ? REGNO_OK_FOR_BASE_P (REGNO (x))
1817 : REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x))));
1818 }
1819
1820 /* Return true if X is a legitimate index register. STRICT_P says
1821 whether we need strict checking. */
1822
1823 bool
1824 m68k_legitimate_index_reg_p (rtx x, bool strict_p)
1825 {
1826 if (!strict_p && GET_CODE (x) == SUBREG)
1827 x = SUBREG_REG (x);
1828
1829 return (REG_P (x)
1830 && (strict_p
1831 ? REGNO_OK_FOR_INDEX_P (REGNO (x))
1832 : REGNO_OK_FOR_INDEX_NONSTRICT_P (REGNO (x))));
1833 }
1834
1835 /* Return true if X is a legitimate index expression for a (d8,An,Xn) or
1836 (bd,An,Xn) addressing mode. Fill in the INDEX and SCALE fields of
1837 ADDRESS if so. STRICT_P says whether we need strict checking. */
1838
1839 static bool
1840 m68k_decompose_index (rtx x, bool strict_p, struct m68k_address *address)
1841 {
1842 int scale;
1843
1844 /* Check for a scale factor. */
1845 scale = 1;
1846 if ((TARGET_68020 || TARGET_COLDFIRE)
1847 && GET_CODE (x) == MULT
1848 && GET_CODE (XEXP (x, 1)) == CONST_INT
1849 && (INTVAL (XEXP (x, 1)) == 2
1850 || INTVAL (XEXP (x, 1)) == 4
1851 || (INTVAL (XEXP (x, 1)) == 8
1852 && (TARGET_COLDFIRE_FPU || !TARGET_COLDFIRE))))
1853 {
1854 scale = INTVAL (XEXP (x, 1));
1855 x = XEXP (x, 0);
1856 }
1857
1858 /* Check for a word extension. */
1859 if (!TARGET_COLDFIRE
1860 && GET_CODE (x) == SIGN_EXTEND
1861 && GET_MODE (XEXP (x, 0)) == HImode)
1862 x = XEXP (x, 0);
1863
1864 if (m68k_legitimate_index_reg_p (x, strict_p))
1865 {
1866 address->scale = scale;
1867 address->index = x;
1868 return true;
1869 }
1870
1871 return false;
1872 }
1873
1874 /* Return true if X is an illegitimate symbolic constant. */
1875
1876 bool
1877 m68k_illegitimate_symbolic_constant_p (rtx x)
1878 {
1879 rtx base, offset;
1880
1881 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P)
1882 {
1883 split_const (x, &base, &offset);
1884 if (GET_CODE (base) == SYMBOL_REF
1885 && !offset_within_block_p (base, INTVAL (offset)))
1886 return true;
1887 }
1888 return m68k_tls_reference_p (x, false);
1889 }
1890
1891 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1892
1893 static bool
1894 m68k_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1895 {
1896 return m68k_illegitimate_symbolic_constant_p (x);
1897 }
1898
1899 /* Return true if X is a legitimate constant address that can reach
1900 bytes in the range [X, X + REACH). STRICT_P says whether we need
1901 strict checking. */
1902
1903 static bool
1904 m68k_legitimate_constant_address_p (rtx x, unsigned int reach, bool strict_p)
1905 {
1906 rtx base, offset;
1907
1908 if (!CONSTANT_ADDRESS_P (x))
1909 return false;
1910
1911 if (flag_pic
1912 && !(strict_p && TARGET_PCREL)
1913 && symbolic_operand (x, VOIDmode))
1914 return false;
1915
1916 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P && reach > 1)
1917 {
1918 split_const (x, &base, &offset);
1919 if (GET_CODE (base) == SYMBOL_REF
1920 && !offset_within_block_p (base, INTVAL (offset) + reach - 1))
1921 return false;
1922 }
1923
1924 return !m68k_tls_reference_p (x, false);
1925 }
1926
1927 /* Return true if X is a LABEL_REF for a jump table. Assume that unplaced
1928 labels will become jump tables. */
1929
1930 static bool
1931 m68k_jump_table_ref_p (rtx x)
1932 {
1933 if (GET_CODE (x) != LABEL_REF)
1934 return false;
1935
1936 x = XEXP (x, 0);
1937 if (!NEXT_INSN (x) && !PREV_INSN (x))
1938 return true;
1939
1940 x = next_nonnote_insn (x);
1941 return x && JUMP_TABLE_DATA_P (x);
1942 }
1943
1944 /* Return true if X is a legitimate address for values of mode MODE.
1945 STRICT_P says whether strict checking is needed. If the address
1946 is valid, describe its components in *ADDRESS. */
1947
1948 static bool
1949 m68k_decompose_address (enum machine_mode mode, rtx x,
1950 bool strict_p, struct m68k_address *address)
1951 {
1952 unsigned int reach;
1953
1954 memset (address, 0, sizeof (*address));
1955
1956 if (mode == BLKmode)
1957 reach = 1;
1958 else
1959 reach = GET_MODE_SIZE (mode);
1960
1961 /* Check for (An) (mode 2). */
1962 if (m68k_legitimate_base_reg_p (x, strict_p))
1963 {
1964 address->base = x;
1965 return true;
1966 }
1967
1968 /* Check for -(An) and (An)+ (modes 3 and 4). */
1969 if ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_INC)
1970 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
1971 {
1972 address->code = GET_CODE (x);
1973 address->base = XEXP (x, 0);
1974 return true;
1975 }
1976
1977 /* Check for (d16,An) (mode 5). */
1978 if (GET_CODE (x) == PLUS
1979 && GET_CODE (XEXP (x, 1)) == CONST_INT
1980 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x8000, 0x8000 - reach)
1981 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
1982 {
1983 address->base = XEXP (x, 0);
1984 address->offset = XEXP (x, 1);
1985 return true;
1986 }
1987
1988 /* Check for GOT loads. These are (bd,An,Xn) addresses if
1989 TARGET_68020 && flag_pic == 2, otherwise they are (d16,An)
1990 addresses. */
1991 if (GET_CODE (x) == PLUS
1992 && XEXP (x, 0) == pic_offset_table_rtx)
1993 {
1994 /* As we are processing a PLUS, do not unwrap RELOC32 symbols --
1995 they are invalid in this context. */
1996 if (m68k_unwrap_symbol (XEXP (x, 1), false) != XEXP (x, 1))
1997 {
1998 address->base = XEXP (x, 0);
1999 address->offset = XEXP (x, 1);
2000 return true;
2001 }
2002 }
2003
2004 /* The ColdFire FPU only accepts addressing modes 2-5. */
2005 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
2006 return false;
2007
2008 /* Check for (xxx).w and (xxx).l. Also, in the TARGET_PCREL case,
2009 check for (d16,PC) or (bd,PC,Xn) with a suppressed index register.
2010 All these modes are variations of mode 7. */
2011 if (m68k_legitimate_constant_address_p (x, reach, strict_p))
2012 {
2013 address->offset = x;
2014 return true;
2015 }
2016
2017 /* Check for (d8,PC,Xn), a mode 7 form. This case is needed for
2018 tablejumps.
2019
2020 ??? do_tablejump creates these addresses before placing the target
2021 label, so we have to assume that unplaced labels are jump table
2022 references. It seems unlikely that we would ever generate indexed
2023 accesses to unplaced labels in other cases. */
2024 if (GET_CODE (x) == PLUS
2025 && m68k_jump_table_ref_p (XEXP (x, 1))
2026 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2027 {
2028 address->offset = XEXP (x, 1);
2029 return true;
2030 }
2031
2032 /* Everything hereafter deals with (d8,An,Xn.SIZE*SCALE) or
2033 (bd,An,Xn.SIZE*SCALE) addresses. */
2034
2035 if (TARGET_68020)
2036 {
2037 /* Check for a nonzero base displacement. */
2038 if (GET_CODE (x) == PLUS
2039 && m68k_legitimate_constant_address_p (XEXP (x, 1), reach, strict_p))
2040 {
2041 address->offset = XEXP (x, 1);
2042 x = XEXP (x, 0);
2043 }
2044
2045 /* Check for a suppressed index register. */
2046 if (m68k_legitimate_base_reg_p (x, strict_p))
2047 {
2048 address->base = x;
2049 return true;
2050 }
2051
2052 /* Check for a suppressed base register. Do not allow this case
2053 for non-symbolic offsets as it effectively gives gcc freedom
2054 to treat data registers as base registers, which can generate
2055 worse code. */
2056 if (address->offset
2057 && symbolic_operand (address->offset, VOIDmode)
2058 && m68k_decompose_index (x, strict_p, address))
2059 return true;
2060 }
2061 else
2062 {
2063 /* Check for a nonzero base displacement. */
2064 if (GET_CODE (x) == PLUS
2065 && GET_CODE (XEXP (x, 1)) == CONST_INT
2066 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x80, 0x80 - reach))
2067 {
2068 address->offset = XEXP (x, 1);
2069 x = XEXP (x, 0);
2070 }
2071 }
2072
2073 /* We now expect the sum of a base and an index. */
2074 if (GET_CODE (x) == PLUS)
2075 {
2076 if (m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p)
2077 && m68k_decompose_index (XEXP (x, 1), strict_p, address))
2078 {
2079 address->base = XEXP (x, 0);
2080 return true;
2081 }
2082
2083 if (m68k_legitimate_base_reg_p (XEXP (x, 1), strict_p)
2084 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2085 {
2086 address->base = XEXP (x, 1);
2087 return true;
2088 }
2089 }
2090 return false;
2091 }
2092
2093 /* Return true if X is a legitimate address for values of mode MODE.
2094 STRICT_P says whether strict checking is needed. */
2095
2096 bool
2097 m68k_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p)
2098 {
2099 struct m68k_address address;
2100
2101 return m68k_decompose_address (mode, x, strict_p, &address);
2102 }
2103
2104 /* Return true if X is a memory, describing its address in ADDRESS if so.
2105 Apply strict checking if called during or after reload. */
2106
2107 static bool
2108 m68k_legitimate_mem_p (rtx x, struct m68k_address *address)
2109 {
2110 return (MEM_P (x)
2111 && m68k_decompose_address (GET_MODE (x), XEXP (x, 0),
2112 reload_in_progress || reload_completed,
2113 address));
2114 }
2115
2116 /* Implement TARGET_LEGITIMATE_CONSTANT_P. */
2117
2118 bool
2119 m68k_legitimate_constant_p (enum machine_mode mode, rtx x)
2120 {
2121 return mode != XFmode && !m68k_illegitimate_symbolic_constant_p (x);
2122 }
2123
2124 /* Return true if X matches the 'Q' constraint. It must be a memory
2125 with a base address and no constant offset or index. */
2126
2127 bool
2128 m68k_matches_q_p (rtx x)
2129 {
2130 struct m68k_address address;
2131
2132 return (m68k_legitimate_mem_p (x, &address)
2133 && address.code == UNKNOWN
2134 && address.base
2135 && !address.offset
2136 && !address.index);
2137 }
2138
2139 /* Return true if X matches the 'U' constraint. It must be a base address
2140 with a constant offset and no index. */
2141
2142 bool
2143 m68k_matches_u_p (rtx x)
2144 {
2145 struct m68k_address address;
2146
2147 return (m68k_legitimate_mem_p (x, &address)
2148 && address.code == UNKNOWN
2149 && address.base
2150 && address.offset
2151 && !address.index);
2152 }
2153
2154 /* Return GOT pointer. */
2155
2156 static rtx
2157 m68k_get_gp (void)
2158 {
2159 if (pic_offset_table_rtx == NULL_RTX)
2160 pic_offset_table_rtx = gen_rtx_REG (Pmode, PIC_REG);
2161
2162 crtl->uses_pic_offset_table = 1;
2163
2164 return pic_offset_table_rtx;
2165 }
2166
2167 /* M68K relocations, used to distinguish GOT and TLS relocations in UNSPEC
2168 wrappers. */
2169 enum m68k_reloc { RELOC_GOT, RELOC_TLSGD, RELOC_TLSLDM, RELOC_TLSLDO,
2170 RELOC_TLSIE, RELOC_TLSLE };
2171
2172 #define TLS_RELOC_P(RELOC) ((RELOC) != RELOC_GOT)
2173
2174 /* Wrap symbol X into unspec representing relocation RELOC.
2175 BASE_REG - register that should be added to the result.
2176 TEMP_REG - if non-null, temporary register. */
2177
2178 static rtx
2179 m68k_wrap_symbol (rtx x, enum m68k_reloc reloc, rtx base_reg, rtx temp_reg)
2180 {
2181 bool use_x_p;
2182
2183 use_x_p = (base_reg == pic_offset_table_rtx) ? TARGET_XGOT : TARGET_XTLS;
2184
2185 if (TARGET_COLDFIRE && use_x_p)
2186 /* When compiling with -mx{got, tls} switch the code will look like this:
2187
2188 move.l <X>@<RELOC>,<TEMP_REG>
2189 add.l <BASE_REG>,<TEMP_REG> */
2190 {
2191 /* Wrap X in UNSPEC_??? to tip m68k_output_addr_const_extra
2192 to put @RELOC after reference. */
2193 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2194 UNSPEC_RELOC32);
2195 x = gen_rtx_CONST (Pmode, x);
2196
2197 if (temp_reg == NULL)
2198 {
2199 gcc_assert (can_create_pseudo_p ());
2200 temp_reg = gen_reg_rtx (Pmode);
2201 }
2202
2203 emit_move_insn (temp_reg, x);
2204 emit_insn (gen_addsi3 (temp_reg, temp_reg, base_reg));
2205 x = temp_reg;
2206 }
2207 else
2208 {
2209 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2210 UNSPEC_RELOC16);
2211 x = gen_rtx_CONST (Pmode, x);
2212
2213 x = gen_rtx_PLUS (Pmode, base_reg, x);
2214 }
2215
2216 return x;
2217 }
2218
2219 /* Helper for m68k_unwrap_symbol.
2220 Also, if unwrapping was successful (that is if (ORIG != <return value>)),
2221 sets *RELOC_PTR to relocation type for the symbol. */
2222
2223 static rtx
2224 m68k_unwrap_symbol_1 (rtx orig, bool unwrap_reloc32_p,
2225 enum m68k_reloc *reloc_ptr)
2226 {
2227 if (GET_CODE (orig) == CONST)
2228 {
2229 rtx x;
2230 enum m68k_reloc dummy;
2231
2232 x = XEXP (orig, 0);
2233
2234 if (reloc_ptr == NULL)
2235 reloc_ptr = &dummy;
2236
2237 /* Handle an addend. */
2238 if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS)
2239 && CONST_INT_P (XEXP (x, 1)))
2240 x = XEXP (x, 0);
2241
2242 if (GET_CODE (x) == UNSPEC)
2243 {
2244 switch (XINT (x, 1))
2245 {
2246 case UNSPEC_RELOC16:
2247 orig = XVECEXP (x, 0, 0);
2248 *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2249 break;
2250
2251 case UNSPEC_RELOC32:
2252 if (unwrap_reloc32_p)
2253 {
2254 orig = XVECEXP (x, 0, 0);
2255 *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2256 }
2257 break;
2258
2259 default:
2260 break;
2261 }
2262 }
2263 }
2264
2265 return orig;
2266 }
2267
2268 /* Unwrap symbol from UNSPEC_RELOC16 and, if unwrap_reloc32_p,
2269 UNSPEC_RELOC32 wrappers. */
2270
2271 rtx
2272 m68k_unwrap_symbol (rtx orig, bool unwrap_reloc32_p)
2273 {
2274 return m68k_unwrap_symbol_1 (orig, unwrap_reloc32_p, NULL);
2275 }
2276
2277 /* Helper for m68k_final_prescan_insn. */
2278
2279 static int
2280 m68k_final_prescan_insn_1 (rtx *x_ptr, void *data ATTRIBUTE_UNUSED)
2281 {
2282 rtx x = *x_ptr;
2283
2284 if (m68k_unwrap_symbol (x, true) != x)
2285 /* For rationale of the below, see comment in m68k_final_prescan_insn. */
2286 {
2287 rtx plus;
2288
2289 gcc_assert (GET_CODE (x) == CONST);
2290 plus = XEXP (x, 0);
2291
2292 if (GET_CODE (plus) == PLUS || GET_CODE (plus) == MINUS)
2293 {
2294 rtx unspec;
2295 rtx addend;
2296
2297 unspec = XEXP (plus, 0);
2298 gcc_assert (GET_CODE (unspec) == UNSPEC);
2299 addend = XEXP (plus, 1);
2300 gcc_assert (CONST_INT_P (addend));
2301
2302 /* We now have all the pieces, rearrange them. */
2303
2304 /* Move symbol to plus. */
2305 XEXP (plus, 0) = XVECEXP (unspec, 0, 0);
2306
2307 /* Move plus inside unspec. */
2308 XVECEXP (unspec, 0, 0) = plus;
2309
2310 /* Move unspec to top level of const. */
2311 XEXP (x, 0) = unspec;
2312 }
2313
2314 return -1;
2315 }
2316
2317 return 0;
2318 }
2319
2320 /* Prescan insn before outputing assembler for it. */
2321
2322 void
2323 m68k_final_prescan_insn (rtx_insn *insn ATTRIBUTE_UNUSED,
2324 rtx *operands, int n_operands)
2325 {
2326 int i;
2327
2328 /* Combine and, possibly, other optimizations may do good job
2329 converting
2330 (const (unspec [(symbol)]))
2331 into
2332 (const (plus (unspec [(symbol)])
2333 (const_int N))).
2334 The problem with this is emitting @TLS or @GOT decorations.
2335 The decoration is emitted when processing (unspec), so the
2336 result would be "#symbol@TLSLE+N" instead of "#symbol+N@TLSLE".
2337
2338 It seems that the easiest solution to this is to convert such
2339 operands to
2340 (const (unspec [(plus (symbol)
2341 (const_int N))])).
2342 Note, that the top level of operand remains intact, so we don't have
2343 to patch up anything outside of the operand. */
2344
2345 for (i = 0; i < n_operands; ++i)
2346 {
2347 rtx op;
2348
2349 op = operands[i];
2350
2351 for_each_rtx (&op, m68k_final_prescan_insn_1, NULL);
2352 }
2353 }
2354
2355 /* Move X to a register and add REG_EQUAL note pointing to ORIG.
2356 If REG is non-null, use it; generate new pseudo otherwise. */
2357
2358 static rtx
2359 m68k_move_to_reg (rtx x, rtx orig, rtx reg)
2360 {
2361 rtx_insn *insn;
2362
2363 if (reg == NULL_RTX)
2364 {
2365 gcc_assert (can_create_pseudo_p ());
2366 reg = gen_reg_rtx (Pmode);
2367 }
2368
2369 insn = emit_move_insn (reg, x);
2370 /* Put a REG_EQUAL note on this insn, so that it can be optimized
2371 by loop. */
2372 set_unique_reg_note (insn, REG_EQUAL, orig);
2373
2374 return reg;
2375 }
2376
2377 /* Does the same as m68k_wrap_symbol, but returns a memory reference to
2378 GOT slot. */
2379
2380 static rtx
2381 m68k_wrap_symbol_into_got_ref (rtx x, enum m68k_reloc reloc, rtx temp_reg)
2382 {
2383 x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), temp_reg);
2384
2385 x = gen_rtx_MEM (Pmode, x);
2386 MEM_READONLY_P (x) = 1;
2387
2388 return x;
2389 }
2390
2391 /* Legitimize PIC addresses. If the address is already
2392 position-independent, we return ORIG. Newly generated
2393 position-independent addresses go to REG. If we need more
2394 than one register, we lose.
2395
2396 An address is legitimized by making an indirect reference
2397 through the Global Offset Table with the name of the symbol
2398 used as an offset.
2399
2400 The assembler and linker are responsible for placing the
2401 address of the symbol in the GOT. The function prologue
2402 is responsible for initializing a5 to the starting address
2403 of the GOT.
2404
2405 The assembler is also responsible for translating a symbol name
2406 into a constant displacement from the start of the GOT.
2407
2408 A quick example may make things a little clearer:
2409
2410 When not generating PIC code to store the value 12345 into _foo
2411 we would generate the following code:
2412
2413 movel #12345, _foo
2414
2415 When generating PIC two transformations are made. First, the compiler
2416 loads the address of foo into a register. So the first transformation makes:
2417
2418 lea _foo, a0
2419 movel #12345, a0@
2420
2421 The code in movsi will intercept the lea instruction and call this
2422 routine which will transform the instructions into:
2423
2424 movel a5@(_foo:w), a0
2425 movel #12345, a0@
2426
2427
2428 That (in a nutshell) is how *all* symbol and label references are
2429 handled. */
2430
2431 rtx
2432 legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
2433 rtx reg)
2434 {
2435 rtx pic_ref = orig;
2436
2437 /* First handle a simple SYMBOL_REF or LABEL_REF */
2438 if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
2439 {
2440 gcc_assert (reg);
2441
2442 pic_ref = m68k_wrap_symbol_into_got_ref (orig, RELOC_GOT, reg);
2443 pic_ref = m68k_move_to_reg (pic_ref, orig, reg);
2444 }
2445 else if (GET_CODE (orig) == CONST)
2446 {
2447 rtx base;
2448
2449 /* Make sure this has not already been legitimized. */
2450 if (m68k_unwrap_symbol (orig, true) != orig)
2451 return orig;
2452
2453 gcc_assert (reg);
2454
2455 /* legitimize both operands of the PLUS */
2456 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
2457
2458 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2459 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2460 base == reg ? 0 : reg);
2461
2462 if (GET_CODE (orig) == CONST_INT)
2463 pic_ref = plus_constant (Pmode, base, INTVAL (orig));
2464 else
2465 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
2466 }
2467
2468 return pic_ref;
2469 }
2470
2471 /* The __tls_get_addr symbol. */
2472 static GTY(()) rtx m68k_tls_get_addr;
2473
2474 /* Return SYMBOL_REF for __tls_get_addr. */
2475
2476 static rtx
2477 m68k_get_tls_get_addr (void)
2478 {
2479 if (m68k_tls_get_addr == NULL_RTX)
2480 m68k_tls_get_addr = init_one_libfunc ("__tls_get_addr");
2481
2482 return m68k_tls_get_addr;
2483 }
2484
2485 /* Return libcall result in A0 instead of usual D0. */
2486 static bool m68k_libcall_value_in_a0_p = false;
2487
2488 /* Emit instruction sequence that calls __tls_get_addr. X is
2489 the TLS symbol we are referencing and RELOC is the symbol type to use
2490 (either TLSGD or TLSLDM). EQV is the REG_EQUAL note for the sequence
2491 emitted. A pseudo register with result of __tls_get_addr call is
2492 returned. */
2493
2494 static rtx
2495 m68k_call_tls_get_addr (rtx x, rtx eqv, enum m68k_reloc reloc)
2496 {
2497 rtx a0;
2498 rtx_insn *insns;
2499 rtx dest;
2500
2501 /* Emit the call sequence. */
2502 start_sequence ();
2503
2504 /* FIXME: Unfortunately, emit_library_call_value does not
2505 consider (plus (%a5) (const (unspec))) to be a good enough
2506 operand for push, so it forces it into a register. The bad
2507 thing about this is that combiner, due to copy propagation and other
2508 optimizations, sometimes can not later fix this. As a consequence,
2509 additional register may be allocated resulting in a spill.
2510 For reference, see args processing loops in
2511 calls.c:emit_library_call_value_1.
2512 For testcase, see gcc.target/m68k/tls-{gd, ld}.c */
2513 x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), NULL_RTX);
2514
2515 /* __tls_get_addr() is not a libcall, but emitting a libcall_value
2516 is the simpliest way of generating a call. The difference between
2517 __tls_get_addr() and libcall is that the result is returned in D0
2518 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2519 which temporarily switches returning the result to A0. */
2520
2521 m68k_libcall_value_in_a0_p = true;
2522 a0 = emit_library_call_value (m68k_get_tls_get_addr (), NULL_RTX, LCT_PURE,
2523 Pmode, 1, x, Pmode);
2524 m68k_libcall_value_in_a0_p = false;
2525
2526 insns = get_insns ();
2527 end_sequence ();
2528
2529 gcc_assert (can_create_pseudo_p ());
2530 dest = gen_reg_rtx (Pmode);
2531 emit_libcall_block (insns, dest, a0, eqv);
2532
2533 return dest;
2534 }
2535
2536 /* The __tls_get_addr symbol. */
2537 static GTY(()) rtx m68k_read_tp;
2538
2539 /* Return SYMBOL_REF for __m68k_read_tp. */
2540
2541 static rtx
2542 m68k_get_m68k_read_tp (void)
2543 {
2544 if (m68k_read_tp == NULL_RTX)
2545 m68k_read_tp = init_one_libfunc ("__m68k_read_tp");
2546
2547 return m68k_read_tp;
2548 }
2549
2550 /* Emit instruction sequence that calls __m68k_read_tp.
2551 A pseudo register with result of __m68k_read_tp call is returned. */
2552
2553 static rtx
2554 m68k_call_m68k_read_tp (void)
2555 {
2556 rtx a0;
2557 rtx eqv;
2558 rtx_insn *insns;
2559 rtx dest;
2560
2561 start_sequence ();
2562
2563 /* __m68k_read_tp() is not a libcall, but emitting a libcall_value
2564 is the simpliest way of generating a call. The difference between
2565 __m68k_read_tp() and libcall is that the result is returned in D0
2566 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2567 which temporarily switches returning the result to A0. */
2568
2569 /* Emit the call sequence. */
2570 m68k_libcall_value_in_a0_p = true;
2571 a0 = emit_library_call_value (m68k_get_m68k_read_tp (), NULL_RTX, LCT_PURE,
2572 Pmode, 0);
2573 m68k_libcall_value_in_a0_p = false;
2574 insns = get_insns ();
2575 end_sequence ();
2576
2577 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2578 share the m68k_read_tp result with other IE/LE model accesses. */
2579 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx), UNSPEC_RELOC32);
2580
2581 gcc_assert (can_create_pseudo_p ());
2582 dest = gen_reg_rtx (Pmode);
2583 emit_libcall_block (insns, dest, a0, eqv);
2584
2585 return dest;
2586 }
2587
2588 /* Return a legitimized address for accessing TLS SYMBOL_REF X.
2589 For explanations on instructions sequences see TLS/NPTL ABI for m68k and
2590 ColdFire. */
2591
2592 rtx
2593 m68k_legitimize_tls_address (rtx orig)
2594 {
2595 switch (SYMBOL_REF_TLS_MODEL (orig))
2596 {
2597 case TLS_MODEL_GLOBAL_DYNAMIC:
2598 orig = m68k_call_tls_get_addr (orig, orig, RELOC_TLSGD);
2599 break;
2600
2601 case TLS_MODEL_LOCAL_DYNAMIC:
2602 {
2603 rtx eqv;
2604 rtx a0;
2605 rtx x;
2606
2607 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2608 share the LDM result with other LD model accesses. */
2609 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2610 UNSPEC_RELOC32);
2611
2612 a0 = m68k_call_tls_get_addr (orig, eqv, RELOC_TLSLDM);
2613
2614 x = m68k_wrap_symbol (orig, RELOC_TLSLDO, a0, NULL_RTX);
2615
2616 if (can_create_pseudo_p ())
2617 x = m68k_move_to_reg (x, orig, NULL_RTX);
2618
2619 orig = x;
2620 break;
2621 }
2622
2623 case TLS_MODEL_INITIAL_EXEC:
2624 {
2625 rtx a0;
2626 rtx x;
2627
2628 a0 = m68k_call_m68k_read_tp ();
2629
2630 x = m68k_wrap_symbol_into_got_ref (orig, RELOC_TLSIE, NULL_RTX);
2631 x = gen_rtx_PLUS (Pmode, x, a0);
2632
2633 if (can_create_pseudo_p ())
2634 x = m68k_move_to_reg (x, orig, NULL_RTX);
2635
2636 orig = x;
2637 break;
2638 }
2639
2640 case TLS_MODEL_LOCAL_EXEC:
2641 {
2642 rtx a0;
2643 rtx x;
2644
2645 a0 = m68k_call_m68k_read_tp ();
2646
2647 x = m68k_wrap_symbol (orig, RELOC_TLSLE, a0, NULL_RTX);
2648
2649 if (can_create_pseudo_p ())
2650 x = m68k_move_to_reg (x, orig, NULL_RTX);
2651
2652 orig = x;
2653 break;
2654 }
2655
2656 default:
2657 gcc_unreachable ();
2658 }
2659
2660 return orig;
2661 }
2662
2663 /* Return true if X is a TLS symbol. */
2664
2665 static bool
2666 m68k_tls_symbol_p (rtx x)
2667 {
2668 if (!TARGET_HAVE_TLS)
2669 return false;
2670
2671 if (GET_CODE (x) != SYMBOL_REF)
2672 return false;
2673
2674 return SYMBOL_REF_TLS_MODEL (x) != 0;
2675 }
2676
2677 /* Helper for m68k_tls_referenced_p. */
2678
2679 static int
2680 m68k_tls_reference_p_1 (rtx *x_ptr, void *data ATTRIBUTE_UNUSED)
2681 {
2682 /* Note: this is not the same as m68k_tls_symbol_p. */
2683 if (GET_CODE (*x_ptr) == SYMBOL_REF)
2684 return SYMBOL_REF_TLS_MODEL (*x_ptr) != 0 ? 1 : 0;
2685
2686 /* Don't recurse into legitimate TLS references. */
2687 if (m68k_tls_reference_p (*x_ptr, true))
2688 return -1;
2689
2690 return 0;
2691 }
2692
2693 /* If !LEGITIMATE_P, return true if X is a TLS symbol reference,
2694 though illegitimate one.
2695 If LEGITIMATE_P, return true if X is a legitimate TLS symbol reference. */
2696
2697 bool
2698 m68k_tls_reference_p (rtx x, bool legitimate_p)
2699 {
2700 if (!TARGET_HAVE_TLS)
2701 return false;
2702
2703 if (!legitimate_p)
2704 return for_each_rtx (&x, m68k_tls_reference_p_1, NULL) == 1 ? true : false;
2705 else
2706 {
2707 enum m68k_reloc reloc = RELOC_GOT;
2708
2709 return (m68k_unwrap_symbol_1 (x, true, &reloc) != x
2710 && TLS_RELOC_P (reloc));
2711 }
2712 }
2713
2714 \f
2715
2716 #define USE_MOVQ(i) ((unsigned) ((i) + 128) <= 255)
2717
2718 /* Return the type of move that should be used for integer I. */
2719
2720 M68K_CONST_METHOD
2721 m68k_const_method (HOST_WIDE_INT i)
2722 {
2723 unsigned u;
2724
2725 if (USE_MOVQ (i))
2726 return MOVQ;
2727
2728 /* The ColdFire doesn't have byte or word operations. */
2729 /* FIXME: This may not be useful for the m68060 either. */
2730 if (!TARGET_COLDFIRE)
2731 {
2732 /* if -256 < N < 256 but N is not in range for a moveq
2733 N^ff will be, so use moveq #N^ff, dreg; not.b dreg. */
2734 if (USE_MOVQ (i ^ 0xff))
2735 return NOTB;
2736 /* Likewise, try with not.w */
2737 if (USE_MOVQ (i ^ 0xffff))
2738 return NOTW;
2739 /* This is the only value where neg.w is useful */
2740 if (i == -65408)
2741 return NEGW;
2742 }
2743
2744 /* Try also with swap. */
2745 u = i;
2746 if (USE_MOVQ ((u >> 16) | (u << 16)))
2747 return SWAP;
2748
2749 if (TARGET_ISAB)
2750 {
2751 /* Try using MVZ/MVS with an immediate value to load constants. */
2752 if (i >= 0 && i <= 65535)
2753 return MVZ;
2754 if (i >= -32768 && i <= 32767)
2755 return MVS;
2756 }
2757
2758 /* Otherwise, use move.l */
2759 return MOVL;
2760 }
2761
2762 /* Return the cost of moving constant I into a data register. */
2763
2764 static int
2765 const_int_cost (HOST_WIDE_INT i)
2766 {
2767 switch (m68k_const_method (i))
2768 {
2769 case MOVQ:
2770 /* Constants between -128 and 127 are cheap due to moveq. */
2771 return 0;
2772 case MVZ:
2773 case MVS:
2774 case NOTB:
2775 case NOTW:
2776 case NEGW:
2777 case SWAP:
2778 /* Constants easily generated by moveq + not.b/not.w/neg.w/swap. */
2779 return 1;
2780 case MOVL:
2781 return 2;
2782 default:
2783 gcc_unreachable ();
2784 }
2785 }
2786
2787 static bool
2788 m68k_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2789 int *total, bool speed ATTRIBUTE_UNUSED)
2790 {
2791 switch (code)
2792 {
2793 case CONST_INT:
2794 /* Constant zero is super cheap due to clr instruction. */
2795 if (x == const0_rtx)
2796 *total = 0;
2797 else
2798 *total = const_int_cost (INTVAL (x));
2799 return true;
2800
2801 case CONST:
2802 case LABEL_REF:
2803 case SYMBOL_REF:
2804 *total = 3;
2805 return true;
2806
2807 case CONST_DOUBLE:
2808 /* Make 0.0 cheaper than other floating constants to
2809 encourage creating tstsf and tstdf insns. */
2810 if (outer_code == COMPARE
2811 && (x == CONST0_RTX (SFmode) || x == CONST0_RTX (DFmode)))
2812 *total = 4;
2813 else
2814 *total = 5;
2815 return true;
2816
2817 /* These are vaguely right for a 68020. */
2818 /* The costs for long multiply have been adjusted to work properly
2819 in synth_mult on the 68020, relative to an average of the time
2820 for add and the time for shift, taking away a little more because
2821 sometimes move insns are needed. */
2822 /* div?.w is relatively cheaper on 68000 counted in COSTS_N_INSNS
2823 terms. */
2824 #define MULL_COST \
2825 (TUNE_68060 ? 2 \
2826 : TUNE_68040 ? 5 \
2827 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2828 : (TUNE_CFV2 && TUNE_MAC) ? 4 \
2829 : TUNE_CFV2 ? 8 \
2830 : TARGET_COLDFIRE ? 3 : 13)
2831
2832 #define MULW_COST \
2833 (TUNE_68060 ? 2 \
2834 : TUNE_68040 ? 3 \
2835 : TUNE_68000_10 ? 5 \
2836 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2837 : (TUNE_CFV2 && TUNE_MAC) ? 2 \
2838 : TUNE_CFV2 ? 8 \
2839 : TARGET_COLDFIRE ? 2 : 8)
2840
2841 #define DIVW_COST \
2842 (TARGET_CF_HWDIV ? 11 \
2843 : TUNE_68000_10 || TARGET_COLDFIRE ? 12 : 27)
2844
2845 case PLUS:
2846 /* An lea costs about three times as much as a simple add. */
2847 if (GET_MODE (x) == SImode
2848 && GET_CODE (XEXP (x, 1)) == REG
2849 && GET_CODE (XEXP (x, 0)) == MULT
2850 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
2851 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2852 && (INTVAL (XEXP (XEXP (x, 0), 1)) == 2
2853 || INTVAL (XEXP (XEXP (x, 0), 1)) == 4
2854 || INTVAL (XEXP (XEXP (x, 0), 1)) == 8))
2855 {
2856 /* lea an@(dx:l:i),am */
2857 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 2 : 3);
2858 return true;
2859 }
2860 return false;
2861
2862 case ASHIFT:
2863 case ASHIFTRT:
2864 case LSHIFTRT:
2865 if (TUNE_68060)
2866 {
2867 *total = COSTS_N_INSNS(1);
2868 return true;
2869 }
2870 if (TUNE_68000_10)
2871 {
2872 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
2873 {
2874 if (INTVAL (XEXP (x, 1)) < 16)
2875 *total = COSTS_N_INSNS (2) + INTVAL (XEXP (x, 1)) / 2;
2876 else
2877 /* We're using clrw + swap for these cases. */
2878 *total = COSTS_N_INSNS (4) + (INTVAL (XEXP (x, 1)) - 16) / 2;
2879 }
2880 else
2881 *total = COSTS_N_INSNS (10); /* Worst case. */
2882 return true;
2883 }
2884 /* A shift by a big integer takes an extra instruction. */
2885 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2886 && (INTVAL (XEXP (x, 1)) == 16))
2887 {
2888 *total = COSTS_N_INSNS (2); /* clrw;swap */
2889 return true;
2890 }
2891 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2892 && !(INTVAL (XEXP (x, 1)) > 0
2893 && INTVAL (XEXP (x, 1)) <= 8))
2894 {
2895 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 1 : 3); /* lsr #i,dn */
2896 return true;
2897 }
2898 return false;
2899
2900 case MULT:
2901 if ((GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
2902 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
2903 && GET_MODE (x) == SImode)
2904 *total = COSTS_N_INSNS (MULW_COST);
2905 else if (GET_MODE (x) == QImode || GET_MODE (x) == HImode)
2906 *total = COSTS_N_INSNS (MULW_COST);
2907 else
2908 *total = COSTS_N_INSNS (MULL_COST);
2909 return true;
2910
2911 case DIV:
2912 case UDIV:
2913 case MOD:
2914 case UMOD:
2915 if (GET_MODE (x) == QImode || GET_MODE (x) == HImode)
2916 *total = COSTS_N_INSNS (DIVW_COST); /* div.w */
2917 else if (TARGET_CF_HWDIV)
2918 *total = COSTS_N_INSNS (18);
2919 else
2920 *total = COSTS_N_INSNS (43); /* div.l */
2921 return true;
2922
2923 case ZERO_EXTRACT:
2924 if (outer_code == COMPARE)
2925 *total = 0;
2926 return false;
2927
2928 default:
2929 return false;
2930 }
2931 }
2932
2933 /* Return an instruction to move CONST_INT OPERANDS[1] into data register
2934 OPERANDS[0]. */
2935
2936 static const char *
2937 output_move_const_into_data_reg (rtx *operands)
2938 {
2939 HOST_WIDE_INT i;
2940
2941 i = INTVAL (operands[1]);
2942 switch (m68k_const_method (i))
2943 {
2944 case MVZ:
2945 return "mvzw %1,%0";
2946 case MVS:
2947 return "mvsw %1,%0";
2948 case MOVQ:
2949 return "moveq %1,%0";
2950 case NOTB:
2951 CC_STATUS_INIT;
2952 operands[1] = GEN_INT (i ^ 0xff);
2953 return "moveq %1,%0\n\tnot%.b %0";
2954 case NOTW:
2955 CC_STATUS_INIT;
2956 operands[1] = GEN_INT (i ^ 0xffff);
2957 return "moveq %1,%0\n\tnot%.w %0";
2958 case NEGW:
2959 CC_STATUS_INIT;
2960 return "moveq #-128,%0\n\tneg%.w %0";
2961 case SWAP:
2962 {
2963 unsigned u = i;
2964
2965 operands[1] = GEN_INT ((u << 16) | (u >> 16));
2966 return "moveq %1,%0\n\tswap %0";
2967 }
2968 case MOVL:
2969 return "move%.l %1,%0";
2970 default:
2971 gcc_unreachable ();
2972 }
2973 }
2974
2975 /* Return true if I can be handled by ISA B's mov3q instruction. */
2976
2977 bool
2978 valid_mov3q_const (HOST_WIDE_INT i)
2979 {
2980 return TARGET_ISAB && (i == -1 || IN_RANGE (i, 1, 7));
2981 }
2982
2983 /* Return an instruction to move CONST_INT OPERANDS[1] into OPERANDS[0].
2984 I is the value of OPERANDS[1]. */
2985
2986 static const char *
2987 output_move_simode_const (rtx *operands)
2988 {
2989 rtx dest;
2990 HOST_WIDE_INT src;
2991
2992 dest = operands[0];
2993 src = INTVAL (operands[1]);
2994 if (src == 0
2995 && (DATA_REG_P (dest) || MEM_P (dest))
2996 /* clr insns on 68000 read before writing. */
2997 && ((TARGET_68010 || TARGET_COLDFIRE)
2998 || !(MEM_P (dest) && MEM_VOLATILE_P (dest))))
2999 return "clr%.l %0";
3000 else if (GET_MODE (dest) == SImode && valid_mov3q_const (src))
3001 return "mov3q%.l %1,%0";
3002 else if (src == 0 && ADDRESS_REG_P (dest))
3003 return "sub%.l %0,%0";
3004 else if (DATA_REG_P (dest))
3005 return output_move_const_into_data_reg (operands);
3006 else if (ADDRESS_REG_P (dest) && IN_RANGE (src, -0x8000, 0x7fff))
3007 {
3008 if (valid_mov3q_const (src))
3009 return "mov3q%.l %1,%0";
3010 return "move%.w %1,%0";
3011 }
3012 else if (MEM_P (dest)
3013 && GET_CODE (XEXP (dest, 0)) == PRE_DEC
3014 && REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
3015 && IN_RANGE (src, -0x8000, 0x7fff))
3016 {
3017 if (valid_mov3q_const (src))
3018 return "mov3q%.l %1,%-";
3019 return "pea %a1";
3020 }
3021 return "move%.l %1,%0";
3022 }
3023
3024 const char *
3025 output_move_simode (rtx *operands)
3026 {
3027 if (GET_CODE (operands[1]) == CONST_INT)
3028 return output_move_simode_const (operands);
3029 else if ((GET_CODE (operands[1]) == SYMBOL_REF
3030 || GET_CODE (operands[1]) == CONST)
3031 && push_operand (operands[0], SImode))
3032 return "pea %a1";
3033 else if ((GET_CODE (operands[1]) == SYMBOL_REF
3034 || GET_CODE (operands[1]) == CONST)
3035 && ADDRESS_REG_P (operands[0]))
3036 return "lea %a1,%0";
3037 return "move%.l %1,%0";
3038 }
3039
3040 const char *
3041 output_move_himode (rtx *operands)
3042 {
3043 if (GET_CODE (operands[1]) == CONST_INT)
3044 {
3045 if (operands[1] == const0_rtx
3046 && (DATA_REG_P (operands[0])
3047 || GET_CODE (operands[0]) == MEM)
3048 /* clr insns on 68000 read before writing. */
3049 && ((TARGET_68010 || TARGET_COLDFIRE)
3050 || !(GET_CODE (operands[0]) == MEM
3051 && MEM_VOLATILE_P (operands[0]))))
3052 return "clr%.w %0";
3053 else if (operands[1] == const0_rtx
3054 && ADDRESS_REG_P (operands[0]))
3055 return "sub%.l %0,%0";
3056 else if (DATA_REG_P (operands[0])
3057 && INTVAL (operands[1]) < 128
3058 && INTVAL (operands[1]) >= -128)
3059 return "moveq %1,%0";
3060 else if (INTVAL (operands[1]) < 0x8000
3061 && INTVAL (operands[1]) >= -0x8000)
3062 return "move%.w %1,%0";
3063 }
3064 else if (CONSTANT_P (operands[1]))
3065 return "move%.l %1,%0";
3066 return "move%.w %1,%0";
3067 }
3068
3069 const char *
3070 output_move_qimode (rtx *operands)
3071 {
3072 /* 68k family always modifies the stack pointer by at least 2, even for
3073 byte pushes. The 5200 (ColdFire) does not do this. */
3074
3075 /* This case is generated by pushqi1 pattern now. */
3076 gcc_assert (!(GET_CODE (operands[0]) == MEM
3077 && GET_CODE (XEXP (operands[0], 0)) == PRE_DEC
3078 && XEXP (XEXP (operands[0], 0), 0) == stack_pointer_rtx
3079 && ! ADDRESS_REG_P (operands[1])
3080 && ! TARGET_COLDFIRE));
3081
3082 /* clr and st insns on 68000 read before writing. */
3083 if (!ADDRESS_REG_P (operands[0])
3084 && ((TARGET_68010 || TARGET_COLDFIRE)
3085 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3086 {
3087 if (operands[1] == const0_rtx)
3088 return "clr%.b %0";
3089 if ((!TARGET_COLDFIRE || DATA_REG_P (operands[0]))
3090 && GET_CODE (operands[1]) == CONST_INT
3091 && (INTVAL (operands[1]) & 255) == 255)
3092 {
3093 CC_STATUS_INIT;
3094 return "st %0";
3095 }
3096 }
3097 if (GET_CODE (operands[1]) == CONST_INT
3098 && DATA_REG_P (operands[0])
3099 && INTVAL (operands[1]) < 128
3100 && INTVAL (operands[1]) >= -128)
3101 return "moveq %1,%0";
3102 if (operands[1] == const0_rtx && ADDRESS_REG_P (operands[0]))
3103 return "sub%.l %0,%0";
3104 if (GET_CODE (operands[1]) != CONST_INT && CONSTANT_P (operands[1]))
3105 return "move%.l %1,%0";
3106 /* 68k family (including the 5200 ColdFire) does not support byte moves to
3107 from address registers. */
3108 if (ADDRESS_REG_P (operands[0]) || ADDRESS_REG_P (operands[1]))
3109 return "move%.w %1,%0";
3110 return "move%.b %1,%0";
3111 }
3112
3113 const char *
3114 output_move_stricthi (rtx *operands)
3115 {
3116 if (operands[1] == const0_rtx
3117 /* clr insns on 68000 read before writing. */
3118 && ((TARGET_68010 || TARGET_COLDFIRE)
3119 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3120 return "clr%.w %0";
3121 return "move%.w %1,%0";
3122 }
3123
3124 const char *
3125 output_move_strictqi (rtx *operands)
3126 {
3127 if (operands[1] == const0_rtx
3128 /* clr insns on 68000 read before writing. */
3129 && ((TARGET_68010 || TARGET_COLDFIRE)
3130 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3131 return "clr%.b %0";
3132 return "move%.b %1,%0";
3133 }
3134
3135 /* Return the best assembler insn template
3136 for moving operands[1] into operands[0] as a fullword. */
3137
3138 static const char *
3139 singlemove_string (rtx *operands)
3140 {
3141 if (GET_CODE (operands[1]) == CONST_INT)
3142 return output_move_simode_const (operands);
3143 return "move%.l %1,%0";
3144 }
3145
3146
3147 /* Output assembler or rtl code to perform a doubleword move insn
3148 with operands OPERANDS.
3149 Pointers to 3 helper functions should be specified:
3150 HANDLE_REG_ADJUST to adjust a register by a small value,
3151 HANDLE_COMPADR to compute an address and
3152 HANDLE_MOVSI to move 4 bytes. */
3153
3154 static void
3155 handle_move_double (rtx operands[2],
3156 void (*handle_reg_adjust) (rtx, int),
3157 void (*handle_compadr) (rtx [2]),
3158 void (*handle_movsi) (rtx [2]))
3159 {
3160 enum
3161 {
3162 REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP
3163 } optype0, optype1;
3164 rtx latehalf[2];
3165 rtx middlehalf[2];
3166 rtx xops[2];
3167 rtx addreg0 = 0, addreg1 = 0;
3168 int dest_overlapped_low = 0;
3169 int size = GET_MODE_SIZE (GET_MODE (operands[0]));
3170
3171 middlehalf[0] = 0;
3172 middlehalf[1] = 0;
3173
3174 /* First classify both operands. */
3175
3176 if (REG_P (operands[0]))
3177 optype0 = REGOP;
3178 else if (offsettable_memref_p (operands[0]))
3179 optype0 = OFFSOP;
3180 else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
3181 optype0 = POPOP;
3182 else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
3183 optype0 = PUSHOP;
3184 else if (GET_CODE (operands[0]) == MEM)
3185 optype0 = MEMOP;
3186 else
3187 optype0 = RNDOP;
3188
3189 if (REG_P (operands[1]))
3190 optype1 = REGOP;
3191 else if (CONSTANT_P (operands[1]))
3192 optype1 = CNSTOP;
3193 else if (offsettable_memref_p (operands[1]))
3194 optype1 = OFFSOP;
3195 else if (GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3196 optype1 = POPOP;
3197 else if (GET_CODE (XEXP (operands[1], 0)) == PRE_DEC)
3198 optype1 = PUSHOP;
3199 else if (GET_CODE (operands[1]) == MEM)
3200 optype1 = MEMOP;
3201 else
3202 optype1 = RNDOP;
3203
3204 /* Check for the cases that the operand constraints are not supposed
3205 to allow to happen. Generating code for these cases is
3206 painful. */
3207 gcc_assert (optype0 != RNDOP && optype1 != RNDOP);
3208
3209 /* If one operand is decrementing and one is incrementing
3210 decrement the former register explicitly
3211 and change that operand into ordinary indexing. */
3212
3213 if (optype0 == PUSHOP && optype1 == POPOP)
3214 {
3215 operands[0] = XEXP (XEXP (operands[0], 0), 0);
3216
3217 handle_reg_adjust (operands[0], -size);
3218
3219 if (GET_MODE (operands[1]) == XFmode)
3220 operands[0] = gen_rtx_MEM (XFmode, operands[0]);
3221 else if (GET_MODE (operands[0]) == DFmode)
3222 operands[0] = gen_rtx_MEM (DFmode, operands[0]);
3223 else
3224 operands[0] = gen_rtx_MEM (DImode, operands[0]);
3225 optype0 = OFFSOP;
3226 }
3227 if (optype0 == POPOP && optype1 == PUSHOP)
3228 {
3229 operands[1] = XEXP (XEXP (operands[1], 0), 0);
3230
3231 handle_reg_adjust (operands[1], -size);
3232
3233 if (GET_MODE (operands[1]) == XFmode)
3234 operands[1] = gen_rtx_MEM (XFmode, operands[1]);
3235 else if (GET_MODE (operands[1]) == DFmode)
3236 operands[1] = gen_rtx_MEM (DFmode, operands[1]);
3237 else
3238 operands[1] = gen_rtx_MEM (DImode, operands[1]);
3239 optype1 = OFFSOP;
3240 }
3241
3242 /* If an operand is an unoffsettable memory ref, find a register
3243 we can increment temporarily to make it refer to the second word. */
3244
3245 if (optype0 == MEMOP)
3246 addreg0 = find_addr_reg (XEXP (operands[0], 0));
3247
3248 if (optype1 == MEMOP)
3249 addreg1 = find_addr_reg (XEXP (operands[1], 0));
3250
3251 /* Ok, we can do one word at a time.
3252 Normally we do the low-numbered word first,
3253 but if either operand is autodecrementing then we
3254 do the high-numbered word first.
3255
3256 In either case, set up in LATEHALF the operands to use
3257 for the high-numbered word and in some cases alter the
3258 operands in OPERANDS to be suitable for the low-numbered word. */
3259
3260 if (size == 12)
3261 {
3262 if (optype0 == REGOP)
3263 {
3264 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 2);
3265 middlehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
3266 }
3267 else if (optype0 == OFFSOP)
3268 {
3269 middlehalf[0] = adjust_address (operands[0], SImode, 4);
3270 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
3271 }
3272 else
3273 {
3274 middlehalf[0] = adjust_address (operands[0], SImode, 0);
3275 latehalf[0] = adjust_address (operands[0], SImode, 0);
3276 }
3277
3278 if (optype1 == REGOP)
3279 {
3280 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2);
3281 middlehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
3282 }
3283 else if (optype1 == OFFSOP)
3284 {
3285 middlehalf[1] = adjust_address (operands[1], SImode, 4);
3286 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
3287 }
3288 else if (optype1 == CNSTOP)
3289 {
3290 if (GET_CODE (operands[1]) == CONST_DOUBLE)
3291 {
3292 REAL_VALUE_TYPE r;
3293 long l[3];
3294
3295 REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
3296 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
3297 operands[1] = GEN_INT (l[0]);
3298 middlehalf[1] = GEN_INT (l[1]);
3299 latehalf[1] = GEN_INT (l[2]);
3300 }
3301 else
3302 {
3303 /* No non-CONST_DOUBLE constant should ever appear
3304 here. */
3305 gcc_assert (!CONSTANT_P (operands[1]));
3306 }
3307 }
3308 else
3309 {
3310 middlehalf[1] = adjust_address (operands[1], SImode, 0);
3311 latehalf[1] = adjust_address (operands[1], SImode, 0);
3312 }
3313 }
3314 else
3315 /* size is not 12: */
3316 {
3317 if (optype0 == REGOP)
3318 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
3319 else if (optype0 == OFFSOP)
3320 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
3321 else
3322 latehalf[0] = adjust_address (operands[0], SImode, 0);
3323
3324 if (optype1 == REGOP)
3325 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
3326 else if (optype1 == OFFSOP)
3327 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
3328 else if (optype1 == CNSTOP)
3329 split_double (operands[1], &operands[1], &latehalf[1]);
3330 else
3331 latehalf[1] = adjust_address (operands[1], SImode, 0);
3332 }
3333
3334 /* If insn is effectively movd N(REG),-(REG) then we will do the high
3335 word first. We should use the adjusted operand 1 (which is N+4(REG))
3336 for the low word as well, to compensate for the first decrement of
3337 REG. */
3338 if (optype0 == PUSHOP
3339 && reg_overlap_mentioned_p (XEXP (XEXP (operands[0], 0), 0), operands[1]))
3340 operands[1] = middlehalf[1] = latehalf[1];
3341
3342 /* For (set (reg:DI N) (mem:DI ... (reg:SI N) ...)),
3343 if the upper part of reg N does not appear in the MEM, arrange to
3344 emit the move late-half first. Otherwise, compute the MEM address
3345 into the upper part of N and use that as a pointer to the memory
3346 operand. */
3347 if (optype0 == REGOP
3348 && (optype1 == OFFSOP || optype1 == MEMOP))
3349 {
3350 rtx testlow = gen_rtx_REG (SImode, REGNO (operands[0]));
3351
3352 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
3353 && reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
3354 {
3355 /* If both halves of dest are used in the src memory address,
3356 compute the address into latehalf of dest.
3357 Note that this can't happen if the dest is two data regs. */
3358 compadr:
3359 xops[0] = latehalf[0];
3360 xops[1] = XEXP (operands[1], 0);
3361
3362 handle_compadr (xops);
3363 if (GET_MODE (operands[1]) == XFmode)
3364 {
3365 operands[1] = gen_rtx_MEM (XFmode, latehalf[0]);
3366 middlehalf[1] = adjust_address (operands[1], DImode, size - 8);
3367 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
3368 }
3369 else
3370 {
3371 operands[1] = gen_rtx_MEM (DImode, latehalf[0]);
3372 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
3373 }
3374 }
3375 else if (size == 12
3376 && reg_overlap_mentioned_p (middlehalf[0],
3377 XEXP (operands[1], 0)))
3378 {
3379 /* Check for two regs used by both source and dest.
3380 Note that this can't happen if the dest is all data regs.
3381 It can happen if the dest is d6, d7, a0.
3382 But in that case, latehalf is an addr reg, so
3383 the code at compadr does ok. */
3384
3385 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
3386 || reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
3387 goto compadr;
3388
3389 /* JRV says this can't happen: */
3390 gcc_assert (!addreg0 && !addreg1);
3391
3392 /* Only the middle reg conflicts; simply put it last. */
3393 handle_movsi (operands);
3394 handle_movsi (latehalf);
3395 handle_movsi (middlehalf);
3396
3397 return;
3398 }
3399 else if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0)))
3400 /* If the low half of dest is mentioned in the source memory
3401 address, the arrange to emit the move late half first. */
3402 dest_overlapped_low = 1;
3403 }
3404
3405 /* If one or both operands autodecrementing,
3406 do the two words, high-numbered first. */
3407
3408 /* Likewise, the first move would clobber the source of the second one,
3409 do them in the other order. This happens only for registers;
3410 such overlap can't happen in memory unless the user explicitly
3411 sets it up, and that is an undefined circumstance. */
3412
3413 if (optype0 == PUSHOP || optype1 == PUSHOP
3414 || (optype0 == REGOP && optype1 == REGOP
3415 && ((middlehalf[1] && REGNO (operands[0]) == REGNO (middlehalf[1]))
3416 || REGNO (operands[0]) == REGNO (latehalf[1])))
3417 || dest_overlapped_low)
3418 {
3419 /* Make any unoffsettable addresses point at high-numbered word. */
3420 if (addreg0)
3421 handle_reg_adjust (addreg0, size - 4);
3422 if (addreg1)
3423 handle_reg_adjust (addreg1, size - 4);
3424
3425 /* Do that word. */
3426 handle_movsi (latehalf);
3427
3428 /* Undo the adds we just did. */
3429 if (addreg0)
3430 handle_reg_adjust (addreg0, -4);
3431 if (addreg1)
3432 handle_reg_adjust (addreg1, -4);
3433
3434 if (size == 12)
3435 {
3436 handle_movsi (middlehalf);
3437
3438 if (addreg0)
3439 handle_reg_adjust (addreg0, -4);
3440 if (addreg1)
3441 handle_reg_adjust (addreg1, -4);
3442 }
3443
3444 /* Do low-numbered word. */
3445
3446 handle_movsi (operands);
3447 return;
3448 }
3449
3450 /* Normal case: do the two words, low-numbered first. */
3451
3452 m68k_final_prescan_insn (NULL, operands, 2);
3453 handle_movsi (operands);
3454
3455 /* Do the middle one of the three words for long double */
3456 if (size == 12)
3457 {
3458 if (addreg0)
3459 handle_reg_adjust (addreg0, 4);
3460 if (addreg1)
3461 handle_reg_adjust (addreg1, 4);
3462
3463 m68k_final_prescan_insn (NULL, middlehalf, 2);
3464 handle_movsi (middlehalf);
3465 }
3466
3467 /* Make any unoffsettable addresses point at high-numbered word. */
3468 if (addreg0)
3469 handle_reg_adjust (addreg0, 4);
3470 if (addreg1)
3471 handle_reg_adjust (addreg1, 4);
3472
3473 /* Do that word. */
3474 m68k_final_prescan_insn (NULL, latehalf, 2);
3475 handle_movsi (latehalf);
3476
3477 /* Undo the adds we just did. */
3478 if (addreg0)
3479 handle_reg_adjust (addreg0, -(size - 4));
3480 if (addreg1)
3481 handle_reg_adjust (addreg1, -(size - 4));
3482
3483 return;
3484 }
3485
3486 /* Output assembler code to adjust REG by N. */
3487 static void
3488 output_reg_adjust (rtx reg, int n)
3489 {
3490 const char *s;
3491
3492 gcc_assert (GET_MODE (reg) == SImode
3493 && -12 <= n && n != 0 && n <= 12);
3494
3495 switch (n)
3496 {
3497 case 12:
3498 s = "add%.l #12,%0";
3499 break;
3500
3501 case 8:
3502 s = "addq%.l #8,%0";
3503 break;
3504
3505 case 4:
3506 s = "addq%.l #4,%0";
3507 break;
3508
3509 case -12:
3510 s = "sub%.l #12,%0";
3511 break;
3512
3513 case -8:
3514 s = "subq%.l #8,%0";
3515 break;
3516
3517 case -4:
3518 s = "subq%.l #4,%0";
3519 break;
3520
3521 default:
3522 gcc_unreachable ();
3523 s = NULL;
3524 }
3525
3526 output_asm_insn (s, &reg);
3527 }
3528
3529 /* Emit rtl code to adjust REG by N. */
3530 static void
3531 emit_reg_adjust (rtx reg1, int n)
3532 {
3533 rtx reg2;
3534
3535 gcc_assert (GET_MODE (reg1) == SImode
3536 && -12 <= n && n != 0 && n <= 12);
3537
3538 reg1 = copy_rtx (reg1);
3539 reg2 = copy_rtx (reg1);
3540
3541 if (n < 0)
3542 emit_insn (gen_subsi3 (reg1, reg2, GEN_INT (-n)));
3543 else if (n > 0)
3544 emit_insn (gen_addsi3 (reg1, reg2, GEN_INT (n)));
3545 else
3546 gcc_unreachable ();
3547 }
3548
3549 /* Output assembler to load address OPERANDS[0] to register OPERANDS[1]. */
3550 static void
3551 output_compadr (rtx operands[2])
3552 {
3553 output_asm_insn ("lea %a1,%0", operands);
3554 }
3555
3556 /* Output the best assembler insn for moving operands[1] into operands[0]
3557 as a fullword. */
3558 static void
3559 output_movsi (rtx operands[2])
3560 {
3561 output_asm_insn (singlemove_string (operands), operands);
3562 }
3563
3564 /* Copy OP and change its mode to MODE. */
3565 static rtx
3566 copy_operand (rtx op, enum machine_mode mode)
3567 {
3568 /* ??? This looks really ugly. There must be a better way
3569 to change a mode on the operand. */
3570 if (GET_MODE (op) != VOIDmode)
3571 {
3572 if (REG_P (op))
3573 op = gen_rtx_REG (mode, REGNO (op));
3574 else
3575 {
3576 op = copy_rtx (op);
3577 PUT_MODE (op, mode);
3578 }
3579 }
3580
3581 return op;
3582 }
3583
3584 /* Emit rtl code for moving operands[1] into operands[0] as a fullword. */
3585 static void
3586 emit_movsi (rtx operands[2])
3587 {
3588 operands[0] = copy_operand (operands[0], SImode);
3589 operands[1] = copy_operand (operands[1], SImode);
3590
3591 emit_insn (gen_movsi (operands[0], operands[1]));
3592 }
3593
3594 /* Output assembler code to perform a doubleword move insn
3595 with operands OPERANDS. */
3596 const char *
3597 output_move_double (rtx *operands)
3598 {
3599 handle_move_double (operands,
3600 output_reg_adjust, output_compadr, output_movsi);
3601
3602 return "";
3603 }
3604
3605 /* Output rtl code to perform a doubleword move insn
3606 with operands OPERANDS. */
3607 void
3608 m68k_emit_move_double (rtx operands[2])
3609 {
3610 handle_move_double (operands, emit_reg_adjust, emit_movsi, emit_movsi);
3611 }
3612
3613 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
3614 new rtx with the correct mode. */
3615
3616 static rtx
3617 force_mode (enum machine_mode mode, rtx orig)
3618 {
3619 if (mode == GET_MODE (orig))
3620 return orig;
3621
3622 if (REGNO (orig) >= FIRST_PSEUDO_REGISTER)
3623 abort ();
3624
3625 return gen_rtx_REG (mode, REGNO (orig));
3626 }
3627
3628 static int
3629 fp_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3630 {
3631 return reg_renumber && FP_REG_P (op);
3632 }
3633
3634 /* Emit insns to move operands[1] into operands[0].
3635
3636 Return 1 if we have written out everything that needs to be done to
3637 do the move. Otherwise, return 0 and the caller will emit the move
3638 normally.
3639
3640 Note SCRATCH_REG may not be in the proper mode depending on how it
3641 will be used. This routine is responsible for creating a new copy
3642 of SCRATCH_REG in the proper mode. */
3643
3644 int
3645 emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
3646 {
3647 register rtx operand0 = operands[0];
3648 register rtx operand1 = operands[1];
3649 register rtx tem;
3650
3651 if (scratch_reg
3652 && reload_in_progress && GET_CODE (operand0) == REG
3653 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
3654 operand0 = reg_equiv_mem (REGNO (operand0));
3655 else if (scratch_reg
3656 && reload_in_progress && GET_CODE (operand0) == SUBREG
3657 && GET_CODE (SUBREG_REG (operand0)) == REG
3658 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
3659 {
3660 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3661 the code which tracks sets/uses for delete_output_reload. */
3662 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
3663 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
3664 SUBREG_BYTE (operand0));
3665 operand0 = alter_subreg (&temp, true);
3666 }
3667
3668 if (scratch_reg
3669 && reload_in_progress && GET_CODE (operand1) == REG
3670 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
3671 operand1 = reg_equiv_mem (REGNO (operand1));
3672 else if (scratch_reg
3673 && reload_in_progress && GET_CODE (operand1) == SUBREG
3674 && GET_CODE (SUBREG_REG (operand1)) == REG
3675 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
3676 {
3677 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3678 the code which tracks sets/uses for delete_output_reload. */
3679 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
3680 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
3681 SUBREG_BYTE (operand1));
3682 operand1 = alter_subreg (&temp, true);
3683 }
3684
3685 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
3686 && ((tem = find_replacement (&XEXP (operand0, 0)))
3687 != XEXP (operand0, 0)))
3688 operand0 = gen_rtx_MEM (GET_MODE (operand0), tem);
3689 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
3690 && ((tem = find_replacement (&XEXP (operand1, 0)))
3691 != XEXP (operand1, 0)))
3692 operand1 = gen_rtx_MEM (GET_MODE (operand1), tem);
3693
3694 /* Handle secondary reloads for loads/stores of FP registers where
3695 the address is symbolic by using the scratch register */
3696 if (fp_reg_operand (operand0, mode)
3697 && ((GET_CODE (operand1) == MEM
3698 && ! memory_address_p (DFmode, XEXP (operand1, 0)))
3699 || ((GET_CODE (operand1) == SUBREG
3700 && GET_CODE (XEXP (operand1, 0)) == MEM
3701 && !memory_address_p (DFmode, XEXP (XEXP (operand1, 0), 0)))))
3702 && scratch_reg)
3703 {
3704 if (GET_CODE (operand1) == SUBREG)
3705 operand1 = XEXP (operand1, 0);
3706
3707 /* SCRATCH_REG will hold an address. We want
3708 it in SImode regardless of what mode it was originally given
3709 to us. */
3710 scratch_reg = force_mode (SImode, scratch_reg);
3711
3712 /* D might not fit in 14 bits either; for such cases load D into
3713 scratch reg. */
3714 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
3715 {
3716 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
3717 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
3718 Pmode,
3719 XEXP (XEXP (operand1, 0), 0),
3720 scratch_reg));
3721 }
3722 else
3723 emit_move_insn (scratch_reg, XEXP (operand1, 0));
3724 emit_insn (gen_rtx_SET (VOIDmode, operand0,
3725 gen_rtx_MEM (mode, scratch_reg)));
3726 return 1;
3727 }
3728 else if (fp_reg_operand (operand1, mode)
3729 && ((GET_CODE (operand0) == MEM
3730 && ! memory_address_p (DFmode, XEXP (operand0, 0)))
3731 || ((GET_CODE (operand0) == SUBREG)
3732 && GET_CODE (XEXP (operand0, 0)) == MEM
3733 && !memory_address_p (DFmode, XEXP (XEXP (operand0, 0), 0))))
3734 && scratch_reg)
3735 {
3736 if (GET_CODE (operand0) == SUBREG)
3737 operand0 = XEXP (operand0, 0);
3738
3739 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3740 it in SIMODE regardless of what mode it was originally given
3741 to us. */
3742 scratch_reg = force_mode (SImode, scratch_reg);
3743
3744 /* D might not fit in 14 bits either; for such cases load D into
3745 scratch reg. */
3746 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
3747 {
3748 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
3749 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
3750 0)),
3751 Pmode,
3752 XEXP (XEXP (operand0, 0),
3753 0),
3754 scratch_reg));
3755 }
3756 else
3757 emit_move_insn (scratch_reg, XEXP (operand0, 0));
3758 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (mode, scratch_reg),
3759 operand1));
3760 return 1;
3761 }
3762 /* Handle secondary reloads for loads of FP registers from constant
3763 expressions by forcing the constant into memory.
3764
3765 use scratch_reg to hold the address of the memory location.
3766
3767 The proper fix is to change PREFERRED_RELOAD_CLASS to return
3768 NO_REGS when presented with a const_int and an register class
3769 containing only FP registers. Doing so unfortunately creates
3770 more problems than it solves. Fix this for 2.5. */
3771 else if (fp_reg_operand (operand0, mode)
3772 && CONSTANT_P (operand1)
3773 && scratch_reg)
3774 {
3775 rtx xoperands[2];
3776
3777 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3778 it in SIMODE regardless of what mode it was originally given
3779 to us. */
3780 scratch_reg = force_mode (SImode, scratch_reg);
3781
3782 /* Force the constant into memory and put the address of the
3783 memory location into scratch_reg. */
3784 xoperands[0] = scratch_reg;
3785 xoperands[1] = XEXP (force_const_mem (mode, operand1), 0);
3786 emit_insn (gen_rtx_SET (mode, scratch_reg, xoperands[1]));
3787
3788 /* Now load the destination register. */
3789 emit_insn (gen_rtx_SET (mode, operand0,
3790 gen_rtx_MEM (mode, scratch_reg)));
3791 return 1;
3792 }
3793
3794 /* Now have insn-emit do whatever it normally does. */
3795 return 0;
3796 }
3797
3798 /* Split one or more DImode RTL references into pairs of SImode
3799 references. The RTL can be REG, offsettable MEM, integer constant, or
3800 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
3801 split and "num" is its length. lo_half and hi_half are output arrays
3802 that parallel "operands". */
3803
3804 void
3805 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
3806 {
3807 while (num--)
3808 {
3809 rtx op = operands[num];
3810
3811 /* simplify_subreg refuses to split volatile memory addresses,
3812 but we still have to handle it. */
3813 if (GET_CODE (op) == MEM)
3814 {
3815 lo_half[num] = adjust_address (op, SImode, 4);
3816 hi_half[num] = adjust_address (op, SImode, 0);
3817 }
3818 else
3819 {
3820 lo_half[num] = simplify_gen_subreg (SImode, op,
3821 GET_MODE (op) == VOIDmode
3822 ? DImode : GET_MODE (op), 4);
3823 hi_half[num] = simplify_gen_subreg (SImode, op,
3824 GET_MODE (op) == VOIDmode
3825 ? DImode : GET_MODE (op), 0);
3826 }
3827 }
3828 }
3829
3830 /* Split X into a base and a constant offset, storing them in *BASE
3831 and *OFFSET respectively. */
3832
3833 static void
3834 m68k_split_offset (rtx x, rtx *base, HOST_WIDE_INT *offset)
3835 {
3836 *offset = 0;
3837 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
3838 {
3839 *offset += INTVAL (XEXP (x, 1));
3840 x = XEXP (x, 0);
3841 }
3842 *base = x;
3843 }
3844
3845 /* Return true if PATTERN is a PARALLEL suitable for a movem or fmovem
3846 instruction. STORE_P says whether the move is a load or store.
3847
3848 If the instruction uses post-increment or pre-decrement addressing,
3849 AUTOMOD_BASE is the base register and AUTOMOD_OFFSET is the total
3850 adjustment. This adjustment will be made by the first element of
3851 PARALLEL, with the loads or stores starting at element 1. If the
3852 instruction does not use post-increment or pre-decrement addressing,
3853 AUTOMOD_BASE is null, AUTOMOD_OFFSET is 0, and the loads or stores
3854 start at element 0. */
3855
3856 bool
3857 m68k_movem_pattern_p (rtx pattern, rtx automod_base,
3858 HOST_WIDE_INT automod_offset, bool store_p)
3859 {
3860 rtx base, mem_base, set, mem, reg, last_reg;
3861 HOST_WIDE_INT offset, mem_offset;
3862 int i, first, len;
3863 enum reg_class rclass;
3864
3865 len = XVECLEN (pattern, 0);
3866 first = (automod_base != NULL);
3867
3868 if (automod_base)
3869 {
3870 /* Stores must be pre-decrement and loads must be post-increment. */
3871 if (store_p != (automod_offset < 0))
3872 return false;
3873
3874 /* Work out the base and offset for lowest memory location. */
3875 base = automod_base;
3876 offset = (automod_offset < 0 ? automod_offset : 0);
3877 }
3878 else
3879 {
3880 /* Allow any valid base and offset in the first access. */
3881 base = NULL;
3882 offset = 0;
3883 }
3884
3885 last_reg = NULL;
3886 rclass = NO_REGS;
3887 for (i = first; i < len; i++)
3888 {
3889 /* We need a plain SET. */
3890 set = XVECEXP (pattern, 0, i);
3891 if (GET_CODE (set) != SET)
3892 return false;
3893
3894 /* Check that we have a memory location... */
3895 mem = XEXP (set, !store_p);
3896 if (!MEM_P (mem) || !memory_operand (mem, VOIDmode))
3897 return false;
3898
3899 /* ...with the right address. */
3900 if (base == NULL)
3901 {
3902 m68k_split_offset (XEXP (mem, 0), &base, &offset);
3903 /* The ColdFire instruction only allows (An) and (d16,An) modes.
3904 There are no mode restrictions for 680x0 besides the
3905 automodification rules enforced above. */
3906 if (TARGET_COLDFIRE
3907 && !m68k_legitimate_base_reg_p (base, reload_completed))
3908 return false;
3909 }
3910 else
3911 {
3912 m68k_split_offset (XEXP (mem, 0), &mem_base, &mem_offset);
3913 if (!rtx_equal_p (base, mem_base) || offset != mem_offset)
3914 return false;
3915 }
3916
3917 /* Check that we have a register of the required mode and class. */
3918 reg = XEXP (set, store_p);
3919 if (!REG_P (reg)
3920 || !HARD_REGISTER_P (reg)
3921 || GET_MODE (reg) != reg_raw_mode[REGNO (reg)])
3922 return false;
3923
3924 if (last_reg)
3925 {
3926 /* The register must belong to RCLASS and have a higher number
3927 than the register in the previous SET. */
3928 if (!TEST_HARD_REG_BIT (reg_class_contents[rclass], REGNO (reg))
3929 || REGNO (last_reg) >= REGNO (reg))
3930 return false;
3931 }
3932 else
3933 {
3934 /* Work out which register class we need. */
3935 if (INT_REGNO_P (REGNO (reg)))
3936 rclass = GENERAL_REGS;
3937 else if (FP_REGNO_P (REGNO (reg)))
3938 rclass = FP_REGS;
3939 else
3940 return false;
3941 }
3942
3943 last_reg = reg;
3944 offset += GET_MODE_SIZE (GET_MODE (reg));
3945 }
3946
3947 /* If we have an automodification, check whether the final offset is OK. */
3948 if (automod_base && offset != (automod_offset < 0 ? 0 : automod_offset))
3949 return false;
3950
3951 /* Reject unprofitable cases. */
3952 if (len < first + (rclass == FP_REGS ? MIN_FMOVEM_REGS : MIN_MOVEM_REGS))
3953 return false;
3954
3955 return true;
3956 }
3957
3958 /* Return the assembly code template for a movem or fmovem instruction
3959 whose pattern is given by PATTERN. Store the template's operands
3960 in OPERANDS.
3961
3962 If the instruction uses post-increment or pre-decrement addressing,
3963 AUTOMOD_OFFSET is the total adjustment, otherwise it is 0. STORE_P
3964 is true if this is a store instruction. */
3965
3966 const char *
3967 m68k_output_movem (rtx *operands, rtx pattern,
3968 HOST_WIDE_INT automod_offset, bool store_p)
3969 {
3970 unsigned int mask;
3971 int i, first;
3972
3973 gcc_assert (GET_CODE (pattern) == PARALLEL);
3974 mask = 0;
3975 first = (automod_offset != 0);
3976 for (i = first; i < XVECLEN (pattern, 0); i++)
3977 {
3978 /* When using movem with pre-decrement addressing, register X + D0_REG
3979 is controlled by bit 15 - X. For all other addressing modes,
3980 register X + D0_REG is controlled by bit X. Confusingly, the
3981 register mask for fmovem is in the opposite order to that for
3982 movem. */
3983 unsigned int regno;
3984
3985 gcc_assert (MEM_P (XEXP (XVECEXP (pattern, 0, i), !store_p)));
3986 gcc_assert (REG_P (XEXP (XVECEXP (pattern, 0, i), store_p)));
3987 regno = REGNO (XEXP (XVECEXP (pattern, 0, i), store_p));
3988 if (automod_offset < 0)
3989 {
3990 if (FP_REGNO_P (regno))
3991 mask |= 1 << (regno - FP0_REG);
3992 else
3993 mask |= 1 << (15 - (regno - D0_REG));
3994 }
3995 else
3996 {
3997 if (FP_REGNO_P (regno))
3998 mask |= 1 << (7 - (regno - FP0_REG));
3999 else
4000 mask |= 1 << (regno - D0_REG);
4001 }
4002 }
4003 CC_STATUS_INIT;
4004
4005 if (automod_offset == 0)
4006 operands[0] = XEXP (XEXP (XVECEXP (pattern, 0, first), !store_p), 0);
4007 else if (automod_offset < 0)
4008 operands[0] = gen_rtx_PRE_DEC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
4009 else
4010 operands[0] = gen_rtx_POST_INC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
4011 operands[1] = GEN_INT (mask);
4012 if (FP_REGNO_P (REGNO (XEXP (XVECEXP (pattern, 0, first), store_p))))
4013 {
4014 if (store_p)
4015 return "fmovem %1,%a0";
4016 else
4017 return "fmovem %a0,%1";
4018 }
4019 else
4020 {
4021 if (store_p)
4022 return "movem%.l %1,%a0";
4023 else
4024 return "movem%.l %a0,%1";
4025 }
4026 }
4027
4028 /* Return a REG that occurs in ADDR with coefficient 1.
4029 ADDR can be effectively incremented by incrementing REG. */
4030
4031 static rtx
4032 find_addr_reg (rtx addr)
4033 {
4034 while (GET_CODE (addr) == PLUS)
4035 {
4036 if (GET_CODE (XEXP (addr, 0)) == REG)
4037 addr = XEXP (addr, 0);
4038 else if (GET_CODE (XEXP (addr, 1)) == REG)
4039 addr = XEXP (addr, 1);
4040 else if (CONSTANT_P (XEXP (addr, 0)))
4041 addr = XEXP (addr, 1);
4042 else if (CONSTANT_P (XEXP (addr, 1)))
4043 addr = XEXP (addr, 0);
4044 else
4045 gcc_unreachable ();
4046 }
4047 gcc_assert (GET_CODE (addr) == REG);
4048 return addr;
4049 }
4050
4051 /* Output assembler code to perform a 32-bit 3-operand add. */
4052
4053 const char *
4054 output_addsi3 (rtx *operands)
4055 {
4056 if (! operands_match_p (operands[0], operands[1]))
4057 {
4058 if (!ADDRESS_REG_P (operands[1]))
4059 {
4060 rtx tmp = operands[1];
4061
4062 operands[1] = operands[2];
4063 operands[2] = tmp;
4064 }
4065
4066 /* These insns can result from reloads to access
4067 stack slots over 64k from the frame pointer. */
4068 if (GET_CODE (operands[2]) == CONST_INT
4069 && (INTVAL (operands[2]) < -32768 || INTVAL (operands[2]) > 32767))
4070 return "move%.l %2,%0\n\tadd%.l %1,%0";
4071 if (GET_CODE (operands[2]) == REG)
4072 return MOTOROLA ? "lea (%1,%2.l),%0" : "lea %1@(0,%2:l),%0";
4073 return MOTOROLA ? "lea (%c2,%1),%0" : "lea %1@(%c2),%0";
4074 }
4075 if (GET_CODE (operands[2]) == CONST_INT)
4076 {
4077 if (INTVAL (operands[2]) > 0
4078 && INTVAL (operands[2]) <= 8)
4079 return "addq%.l %2,%0";
4080 if (INTVAL (operands[2]) < 0
4081 && INTVAL (operands[2]) >= -8)
4082 {
4083 operands[2] = GEN_INT (- INTVAL (operands[2]));
4084 return "subq%.l %2,%0";
4085 }
4086 /* On the CPU32 it is faster to use two addql instructions to
4087 add a small integer (8 < N <= 16) to a register.
4088 Likewise for subql. */
4089 if (TUNE_CPU32 && REG_P (operands[0]))
4090 {
4091 if (INTVAL (operands[2]) > 8
4092 && INTVAL (operands[2]) <= 16)
4093 {
4094 operands[2] = GEN_INT (INTVAL (operands[2]) - 8);
4095 return "addq%.l #8,%0\n\taddq%.l %2,%0";
4096 }
4097 if (INTVAL (operands[2]) < -8
4098 && INTVAL (operands[2]) >= -16)
4099 {
4100 operands[2] = GEN_INT (- INTVAL (operands[2]) - 8);
4101 return "subq%.l #8,%0\n\tsubq%.l %2,%0";
4102 }
4103 }
4104 if (ADDRESS_REG_P (operands[0])
4105 && INTVAL (operands[2]) >= -0x8000
4106 && INTVAL (operands[2]) < 0x8000)
4107 {
4108 if (TUNE_68040)
4109 return "add%.w %2,%0";
4110 else
4111 return MOTOROLA ? "lea (%c2,%0),%0" : "lea %0@(%c2),%0";
4112 }
4113 }
4114 return "add%.l %2,%0";
4115 }
4116 \f
4117 /* Store in cc_status the expressions that the condition codes will
4118 describe after execution of an instruction whose pattern is EXP.
4119 Do not alter them if the instruction would not alter the cc's. */
4120
4121 /* On the 68000, all the insns to store in an address register fail to
4122 set the cc's. However, in some cases these instructions can make it
4123 possibly invalid to use the saved cc's. In those cases we clear out
4124 some or all of the saved cc's so they won't be used. */
4125
4126 void
4127 notice_update_cc (rtx exp, rtx insn)
4128 {
4129 if (GET_CODE (exp) == SET)
4130 {
4131 if (GET_CODE (SET_SRC (exp)) == CALL)
4132 CC_STATUS_INIT;
4133 else if (ADDRESS_REG_P (SET_DEST (exp)))
4134 {
4135 if (cc_status.value1 && modified_in_p (cc_status.value1, insn))
4136 cc_status.value1 = 0;
4137 if (cc_status.value2 && modified_in_p (cc_status.value2, insn))
4138 cc_status.value2 = 0;
4139 }
4140 /* fmoves to memory or data registers do not set the condition
4141 codes. Normal moves _do_ set the condition codes, but not in
4142 a way that is appropriate for comparison with 0, because -0.0
4143 would be treated as a negative nonzero number. Note that it
4144 isn't appropriate to conditionalize this restriction on
4145 HONOR_SIGNED_ZEROS because that macro merely indicates whether
4146 we care about the difference between -0.0 and +0.0. */
4147 else if (!FP_REG_P (SET_DEST (exp))
4148 && SET_DEST (exp) != cc0_rtx
4149 && (FP_REG_P (SET_SRC (exp))
4150 || GET_CODE (SET_SRC (exp)) == FIX
4151 || FLOAT_MODE_P (GET_MODE (SET_DEST (exp)))))
4152 CC_STATUS_INIT;
4153 /* A pair of move insns doesn't produce a useful overall cc. */
4154 else if (!FP_REG_P (SET_DEST (exp))
4155 && !FP_REG_P (SET_SRC (exp))
4156 && GET_MODE_SIZE (GET_MODE (SET_SRC (exp))) > 4
4157 && (GET_CODE (SET_SRC (exp)) == REG
4158 || GET_CODE (SET_SRC (exp)) == MEM
4159 || GET_CODE (SET_SRC (exp)) == CONST_DOUBLE))
4160 CC_STATUS_INIT;
4161 else if (SET_DEST (exp) != pc_rtx)
4162 {
4163 cc_status.flags = 0;
4164 cc_status.value1 = SET_DEST (exp);
4165 cc_status.value2 = SET_SRC (exp);
4166 }
4167 }
4168 else if (GET_CODE (exp) == PARALLEL
4169 && GET_CODE (XVECEXP (exp, 0, 0)) == SET)
4170 {
4171 rtx dest = SET_DEST (XVECEXP (exp, 0, 0));
4172 rtx src = SET_SRC (XVECEXP (exp, 0, 0));
4173
4174 if (ADDRESS_REG_P (dest))
4175 CC_STATUS_INIT;
4176 else if (dest != pc_rtx)
4177 {
4178 cc_status.flags = 0;
4179 cc_status.value1 = dest;
4180 cc_status.value2 = src;
4181 }
4182 }
4183 else
4184 CC_STATUS_INIT;
4185 if (cc_status.value2 != 0
4186 && ADDRESS_REG_P (cc_status.value2)
4187 && GET_MODE (cc_status.value2) == QImode)
4188 CC_STATUS_INIT;
4189 if (cc_status.value2 != 0)
4190 switch (GET_CODE (cc_status.value2))
4191 {
4192 case ASHIFT: case ASHIFTRT: case LSHIFTRT:
4193 case ROTATE: case ROTATERT:
4194 /* These instructions always clear the overflow bit, and set
4195 the carry to the bit shifted out. */
4196 cc_status.flags |= CC_OVERFLOW_UNUSABLE | CC_NO_CARRY;
4197 break;
4198
4199 case PLUS: case MINUS: case MULT:
4200 case DIV: case UDIV: case MOD: case UMOD: case NEG:
4201 if (GET_MODE (cc_status.value2) != VOIDmode)
4202 cc_status.flags |= CC_NO_OVERFLOW;
4203 break;
4204 case ZERO_EXTEND:
4205 /* (SET r1 (ZERO_EXTEND r2)) on this machine
4206 ends with a move insn moving r2 in r2's mode.
4207 Thus, the cc's are set for r2.
4208 This can set N bit spuriously. */
4209 cc_status.flags |= CC_NOT_NEGATIVE;
4210
4211 default:
4212 break;
4213 }
4214 if (cc_status.value1 && GET_CODE (cc_status.value1) == REG
4215 && cc_status.value2
4216 && reg_overlap_mentioned_p (cc_status.value1, cc_status.value2))
4217 cc_status.value2 = 0;
4218 /* Check for PRE_DEC in dest modifying a register used in src. */
4219 if (cc_status.value1 && GET_CODE (cc_status.value1) == MEM
4220 && GET_CODE (XEXP (cc_status.value1, 0)) == PRE_DEC
4221 && cc_status.value2
4222 && reg_overlap_mentioned_p (XEXP (XEXP (cc_status.value1, 0), 0),
4223 cc_status.value2))
4224 cc_status.value2 = 0;
4225 if (((cc_status.value1 && FP_REG_P (cc_status.value1))
4226 || (cc_status.value2 && FP_REG_P (cc_status.value2))))
4227 cc_status.flags = CC_IN_68881;
4228 if (cc_status.value2 && GET_CODE (cc_status.value2) == COMPARE
4229 && GET_MODE_CLASS (GET_MODE (XEXP (cc_status.value2, 0))) == MODE_FLOAT)
4230 {
4231 cc_status.flags = CC_IN_68881;
4232 if (!FP_REG_P (XEXP (cc_status.value2, 0))
4233 && FP_REG_P (XEXP (cc_status.value2, 1)))
4234 cc_status.flags |= CC_REVERSED;
4235 }
4236 }
4237 \f
4238 const char *
4239 output_move_const_double (rtx *operands)
4240 {
4241 int code = standard_68881_constant_p (operands[1]);
4242
4243 if (code != 0)
4244 {
4245 static char buf[40];
4246
4247 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
4248 return buf;
4249 }
4250 return "fmove%.d %1,%0";
4251 }
4252
4253 const char *
4254 output_move_const_single (rtx *operands)
4255 {
4256 int code = standard_68881_constant_p (operands[1]);
4257
4258 if (code != 0)
4259 {
4260 static char buf[40];
4261
4262 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
4263 return buf;
4264 }
4265 return "fmove%.s %f1,%0";
4266 }
4267
4268 /* Return nonzero if X, a CONST_DOUBLE, has a value that we can get
4269 from the "fmovecr" instruction.
4270 The value, anded with 0xff, gives the code to use in fmovecr
4271 to get the desired constant. */
4272
4273 /* This code has been fixed for cross-compilation. */
4274
4275 static int inited_68881_table = 0;
4276
4277 static const char *const strings_68881[7] = {
4278 "0.0",
4279 "1.0",
4280 "10.0",
4281 "100.0",
4282 "10000.0",
4283 "1e8",
4284 "1e16"
4285 };
4286
4287 static const int codes_68881[7] = {
4288 0x0f,
4289 0x32,
4290 0x33,
4291 0x34,
4292 0x35,
4293 0x36,
4294 0x37
4295 };
4296
4297 REAL_VALUE_TYPE values_68881[7];
4298
4299 /* Set up values_68881 array by converting the decimal values
4300 strings_68881 to binary. */
4301
4302 void
4303 init_68881_table (void)
4304 {
4305 int i;
4306 REAL_VALUE_TYPE r;
4307 enum machine_mode mode;
4308
4309 mode = SFmode;
4310 for (i = 0; i < 7; i++)
4311 {
4312 if (i == 6)
4313 mode = DFmode;
4314 r = REAL_VALUE_ATOF (strings_68881[i], mode);
4315 values_68881[i] = r;
4316 }
4317 inited_68881_table = 1;
4318 }
4319
4320 int
4321 standard_68881_constant_p (rtx x)
4322 {
4323 REAL_VALUE_TYPE r;
4324 int i;
4325
4326 /* fmovecr must be emulated on the 68040 and 68060, so it shouldn't be
4327 used at all on those chips. */
4328 if (TUNE_68040_60)
4329 return 0;
4330
4331 if (! inited_68881_table)
4332 init_68881_table ();
4333
4334 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4335
4336 /* Use REAL_VALUES_IDENTICAL instead of REAL_VALUES_EQUAL so that -0.0
4337 is rejected. */
4338 for (i = 0; i < 6; i++)
4339 {
4340 if (REAL_VALUES_IDENTICAL (r, values_68881[i]))
4341 return (codes_68881[i]);
4342 }
4343
4344 if (GET_MODE (x) == SFmode)
4345 return 0;
4346
4347 if (REAL_VALUES_EQUAL (r, values_68881[6]))
4348 return (codes_68881[6]);
4349
4350 /* larger powers of ten in the constants ram are not used
4351 because they are not equal to a `double' C constant. */
4352 return 0;
4353 }
4354
4355 /* If X is a floating-point constant, return the logarithm of X base 2,
4356 or 0 if X is not a power of 2. */
4357
4358 int
4359 floating_exact_log2 (rtx x)
4360 {
4361 REAL_VALUE_TYPE r, r1;
4362 int exp;
4363
4364 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4365
4366 if (REAL_VALUES_LESS (r, dconst1))
4367 return 0;
4368
4369 exp = real_exponent (&r);
4370 real_2expN (&r1, exp, DFmode);
4371 if (REAL_VALUES_EQUAL (r1, r))
4372 return exp;
4373
4374 return 0;
4375 }
4376 \f
4377 /* A C compound statement to output to stdio stream STREAM the
4378 assembler syntax for an instruction operand X. X is an RTL
4379 expression.
4380
4381 CODE is a value that can be used to specify one of several ways
4382 of printing the operand. It is used when identical operands
4383 must be printed differently depending on the context. CODE
4384 comes from the `%' specification that was used to request
4385 printing of the operand. If the specification was just `%DIGIT'
4386 then CODE is 0; if the specification was `%LTR DIGIT' then CODE
4387 is the ASCII code for LTR.
4388
4389 If X is a register, this macro should print the register's name.
4390 The names can be found in an array `reg_names' whose type is
4391 `char *[]'. `reg_names' is initialized from `REGISTER_NAMES'.
4392
4393 When the machine description has a specification `%PUNCT' (a `%'
4394 followed by a punctuation character), this macro is called with
4395 a null pointer for X and the punctuation character for CODE.
4396
4397 The m68k specific codes are:
4398
4399 '.' for dot needed in Motorola-style opcode names.
4400 '-' for an operand pushing on the stack:
4401 sp@-, -(sp) or -(%sp) depending on the style of syntax.
4402 '+' for an operand pushing on the stack:
4403 sp@+, (sp)+ or (%sp)+ depending on the style of syntax.
4404 '@' for a reference to the top word on the stack:
4405 sp@, (sp) or (%sp) depending on the style of syntax.
4406 '#' for an immediate operand prefix (# in MIT and Motorola syntax
4407 but & in SGS syntax).
4408 '!' for the cc register (used in an `and to cc' insn).
4409 '$' for the letter `s' in an op code, but only on the 68040.
4410 '&' for the letter `d' in an op code, but only on the 68040.
4411 '/' for register prefix needed by longlong.h.
4412 '?' for m68k_library_id_string
4413
4414 'b' for byte insn (no effect, on the Sun; this is for the ISI).
4415 'd' to force memory addressing to be absolute, not relative.
4416 'f' for float insn (print a CONST_DOUBLE as a float rather than in hex)
4417 'x' for float insn (print a CONST_DOUBLE as a float rather than in hex),
4418 or print pair of registers as rx:ry.
4419 'p' print an address with @PLTPC attached, but only if the operand
4420 is not locally-bound. */
4421
4422 void
4423 print_operand (FILE *file, rtx op, int letter)
4424 {
4425 if (letter == '.')
4426 {
4427 if (MOTOROLA)
4428 fprintf (file, ".");
4429 }
4430 else if (letter == '#')
4431 asm_fprintf (file, "%I");
4432 else if (letter == '-')
4433 asm_fprintf (file, MOTOROLA ? "-(%Rsp)" : "%Rsp@-");
4434 else if (letter == '+')
4435 asm_fprintf (file, MOTOROLA ? "(%Rsp)+" : "%Rsp@+");
4436 else if (letter == '@')
4437 asm_fprintf (file, MOTOROLA ? "(%Rsp)" : "%Rsp@");
4438 else if (letter == '!')
4439 asm_fprintf (file, "%Rfpcr");
4440 else if (letter == '$')
4441 {
4442 if (TARGET_68040)
4443 fprintf (file, "s");
4444 }
4445 else if (letter == '&')
4446 {
4447 if (TARGET_68040)
4448 fprintf (file, "d");
4449 }
4450 else if (letter == '/')
4451 asm_fprintf (file, "%R");
4452 else if (letter == '?')
4453 asm_fprintf (file, m68k_library_id_string);
4454 else if (letter == 'p')
4455 {
4456 output_addr_const (file, op);
4457 if (!(GET_CODE (op) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op)))
4458 fprintf (file, "@PLTPC");
4459 }
4460 else if (GET_CODE (op) == REG)
4461 {
4462 if (letter == 'R')
4463 /* Print out the second register name of a register pair.
4464 I.e., R (6) => 7. */
4465 fputs (M68K_REGNAME(REGNO (op) + 1), file);
4466 else
4467 fputs (M68K_REGNAME(REGNO (op)), file);
4468 }
4469 else if (GET_CODE (op) == MEM)
4470 {
4471 output_address (XEXP (op, 0));
4472 if (letter == 'd' && ! TARGET_68020
4473 && CONSTANT_ADDRESS_P (XEXP (op, 0))
4474 && !(GET_CODE (XEXP (op, 0)) == CONST_INT
4475 && INTVAL (XEXP (op, 0)) < 0x8000
4476 && INTVAL (XEXP (op, 0)) >= -0x8000))
4477 fprintf (file, MOTOROLA ? ".l" : ":l");
4478 }
4479 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == SFmode)
4480 {
4481 REAL_VALUE_TYPE r;
4482 long l;
4483 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
4484 REAL_VALUE_TO_TARGET_SINGLE (r, l);
4485 asm_fprintf (file, "%I0x%lx", l & 0xFFFFFFFF);
4486 }
4487 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == XFmode)
4488 {
4489 REAL_VALUE_TYPE r;
4490 long l[3];
4491 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
4492 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
4493 asm_fprintf (file, "%I0x%lx%08lx%08lx", l[0] & 0xFFFFFFFF,
4494 l[1] & 0xFFFFFFFF, l[2] & 0xFFFFFFFF);
4495 }
4496 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == DFmode)
4497 {
4498 REAL_VALUE_TYPE r;
4499 long l[2];
4500 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
4501 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
4502 asm_fprintf (file, "%I0x%lx%08lx", l[0] & 0xFFFFFFFF, l[1] & 0xFFFFFFFF);
4503 }
4504 else
4505 {
4506 /* Use `print_operand_address' instead of `output_addr_const'
4507 to ensure that we print relevant PIC stuff. */
4508 asm_fprintf (file, "%I");
4509 if (TARGET_PCREL
4510 && (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST))
4511 print_operand_address (file, op);
4512 else
4513 output_addr_const (file, op);
4514 }
4515 }
4516
4517 /* Return string for TLS relocation RELOC. */
4518
4519 static const char *
4520 m68k_get_reloc_decoration (enum m68k_reloc reloc)
4521 {
4522 /* To my knowledge, !MOTOROLA assemblers don't support TLS. */
4523 gcc_assert (MOTOROLA || reloc == RELOC_GOT);
4524
4525 switch (reloc)
4526 {
4527 case RELOC_GOT:
4528 if (MOTOROLA)
4529 {
4530 if (flag_pic == 1 && TARGET_68020)
4531 return "@GOT.w";
4532 else
4533 return "@GOT";
4534 }
4535 else
4536 {
4537 if (TARGET_68020)
4538 {
4539 switch (flag_pic)
4540 {
4541 case 1:
4542 return ":w";
4543 case 2:
4544 return ":l";
4545 default:
4546 return "";
4547 }
4548 }
4549 }
4550
4551 case RELOC_TLSGD:
4552 return "@TLSGD";
4553
4554 case RELOC_TLSLDM:
4555 return "@TLSLDM";
4556
4557 case RELOC_TLSLDO:
4558 return "@TLSLDO";
4559
4560 case RELOC_TLSIE:
4561 return "@TLSIE";
4562
4563 case RELOC_TLSLE:
4564 return "@TLSLE";
4565
4566 default:
4567 gcc_unreachable ();
4568 }
4569 }
4570
4571 /* m68k implementation of TARGET_OUTPUT_ADDR_CONST_EXTRA. */
4572
4573 static bool
4574 m68k_output_addr_const_extra (FILE *file, rtx x)
4575 {
4576 if (GET_CODE (x) == UNSPEC)
4577 {
4578 switch (XINT (x, 1))
4579 {
4580 case UNSPEC_RELOC16:
4581 case UNSPEC_RELOC32:
4582 output_addr_const (file, XVECEXP (x, 0, 0));
4583 fputs (m68k_get_reloc_decoration
4584 ((enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1))), file);
4585 return true;
4586
4587 default:
4588 break;
4589 }
4590 }
4591
4592 return false;
4593 }
4594
4595 /* M68K implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
4596
4597 static void
4598 m68k_output_dwarf_dtprel (FILE *file, int size, rtx x)
4599 {
4600 gcc_assert (size == 4);
4601 fputs ("\t.long\t", file);
4602 output_addr_const (file, x);
4603 fputs ("@TLSLDO+0x8000", file);
4604 }
4605
4606 /* In the name of slightly smaller debug output, and to cater to
4607 general assembler lossage, recognize various UNSPEC sequences
4608 and turn them back into a direct symbol reference. */
4609
4610 static rtx
4611 m68k_delegitimize_address (rtx orig_x)
4612 {
4613 rtx x;
4614 struct m68k_address addr;
4615 rtx unspec;
4616
4617 orig_x = delegitimize_mem_from_attrs (orig_x);
4618 x = orig_x;
4619 if (MEM_P (x))
4620 x = XEXP (x, 0);
4621
4622 if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
4623 return orig_x;
4624
4625 if (!m68k_decompose_address (GET_MODE (x), x, false, &addr)
4626 || addr.offset == NULL_RTX
4627 || GET_CODE (addr.offset) != CONST)
4628 return orig_x;
4629
4630 unspec = XEXP (addr.offset, 0);
4631 if (GET_CODE (unspec) == PLUS && CONST_INT_P (XEXP (unspec, 1)))
4632 unspec = XEXP (unspec, 0);
4633 if (GET_CODE (unspec) != UNSPEC
4634 || (XINT (unspec, 1) != UNSPEC_RELOC16
4635 && XINT (unspec, 1) != UNSPEC_RELOC32))
4636 return orig_x;
4637 x = XVECEXP (unspec, 0, 0);
4638 gcc_assert (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF);
4639 if (unspec != XEXP (addr.offset, 0))
4640 x = gen_rtx_PLUS (Pmode, x, XEXP (XEXP (addr.offset, 0), 1));
4641 if (addr.index)
4642 {
4643 rtx idx = addr.index;
4644 if (addr.scale != 1)
4645 idx = gen_rtx_MULT (Pmode, idx, GEN_INT (addr.scale));
4646 x = gen_rtx_PLUS (Pmode, idx, x);
4647 }
4648 if (addr.base)
4649 x = gen_rtx_PLUS (Pmode, addr.base, x);
4650 if (MEM_P (orig_x))
4651 x = replace_equiv_address_nv (orig_x, x);
4652 return x;
4653 }
4654
4655 \f
4656 /* A C compound statement to output to stdio stream STREAM the
4657 assembler syntax for an instruction operand that is a memory
4658 reference whose address is ADDR. ADDR is an RTL expression.
4659
4660 Note that this contains a kludge that knows that the only reason
4661 we have an address (plus (label_ref...) (reg...)) when not generating
4662 PIC code is in the insn before a tablejump, and we know that m68k.md
4663 generates a label LInnn: on such an insn.
4664
4665 It is possible for PIC to generate a (plus (label_ref...) (reg...))
4666 and we handle that just like we would a (plus (symbol_ref...) (reg...)).
4667
4668 This routine is responsible for distinguishing between -fpic and -fPIC
4669 style relocations in an address. When generating -fpic code the
4670 offset is output in word mode (e.g. movel a5@(_foo:w), a0). When generating
4671 -fPIC code the offset is output in long mode (e.g. movel a5@(_foo:l), a0) */
4672
4673 void
4674 print_operand_address (FILE *file, rtx addr)
4675 {
4676 struct m68k_address address;
4677
4678 if (!m68k_decompose_address (QImode, addr, true, &address))
4679 gcc_unreachable ();
4680
4681 if (address.code == PRE_DEC)
4682 fprintf (file, MOTOROLA ? "-(%s)" : "%s@-",
4683 M68K_REGNAME (REGNO (address.base)));
4684 else if (address.code == POST_INC)
4685 fprintf (file, MOTOROLA ? "(%s)+" : "%s@+",
4686 M68K_REGNAME (REGNO (address.base)));
4687 else if (!address.base && !address.index)
4688 {
4689 /* A constant address. */
4690 gcc_assert (address.offset == addr);
4691 if (GET_CODE (addr) == CONST_INT)
4692 {
4693 /* (xxx).w or (xxx).l. */
4694 if (IN_RANGE (INTVAL (addr), -0x8000, 0x7fff))
4695 fprintf (file, MOTOROLA ? "%d.w" : "%d:w", (int) INTVAL (addr));
4696 else
4697 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (addr));
4698 }
4699 else if (TARGET_PCREL)
4700 {
4701 /* (d16,PC) or (bd,PC,Xn) (with suppressed index register). */
4702 fputc ('(', file);
4703 output_addr_const (file, addr);
4704 asm_fprintf (file, flag_pic == 1 ? ":w,%Rpc)" : ":l,%Rpc)");
4705 }
4706 else
4707 {
4708 /* (xxx).l. We need a special case for SYMBOL_REF if the symbol
4709 name ends in `.<letter>', as the last 2 characters can be
4710 mistaken as a size suffix. Put the name in parentheses. */
4711 if (GET_CODE (addr) == SYMBOL_REF
4712 && strlen (XSTR (addr, 0)) > 2
4713 && XSTR (addr, 0)[strlen (XSTR (addr, 0)) - 2] == '.')
4714 {
4715 putc ('(', file);
4716 output_addr_const (file, addr);
4717 putc (')', file);
4718 }
4719 else
4720 output_addr_const (file, addr);
4721 }
4722 }
4723 else
4724 {
4725 int labelno;
4726
4727 /* If ADDR is a (d8,pc,Xn) address, this is the number of the
4728 label being accessed, otherwise it is -1. */
4729 labelno = (address.offset
4730 && !address.base
4731 && GET_CODE (address.offset) == LABEL_REF
4732 ? CODE_LABEL_NUMBER (XEXP (address.offset, 0))
4733 : -1);
4734 if (MOTOROLA)
4735 {
4736 /* Print the "offset(base" component. */
4737 if (labelno >= 0)
4738 asm_fprintf (file, "%LL%d(%Rpc,", labelno);
4739 else
4740 {
4741 if (address.offset)
4742 output_addr_const (file, address.offset);
4743
4744 putc ('(', file);
4745 if (address.base)
4746 fputs (M68K_REGNAME (REGNO (address.base)), file);
4747 }
4748 /* Print the ",index" component, if any. */
4749 if (address.index)
4750 {
4751 if (address.base)
4752 putc (',', file);
4753 fprintf (file, "%s.%c",
4754 M68K_REGNAME (REGNO (address.index)),
4755 GET_MODE (address.index) == HImode ? 'w' : 'l');
4756 if (address.scale != 1)
4757 fprintf (file, "*%d", address.scale);
4758 }
4759 putc (')', file);
4760 }
4761 else /* !MOTOROLA */
4762 {
4763 if (!address.offset && !address.index)
4764 fprintf (file, "%s@", M68K_REGNAME (REGNO (address.base)));
4765 else
4766 {
4767 /* Print the "base@(offset" component. */
4768 if (labelno >= 0)
4769 asm_fprintf (file, "%Rpc@(%LL%d", labelno);
4770 else
4771 {
4772 if (address.base)
4773 fputs (M68K_REGNAME (REGNO (address.base)), file);
4774 fprintf (file, "@(");
4775 if (address.offset)
4776 output_addr_const (file, address.offset);
4777 }
4778 /* Print the ",index" component, if any. */
4779 if (address.index)
4780 {
4781 fprintf (file, ",%s:%c",
4782 M68K_REGNAME (REGNO (address.index)),
4783 GET_MODE (address.index) == HImode ? 'w' : 'l');
4784 if (address.scale != 1)
4785 fprintf (file, ":%d", address.scale);
4786 }
4787 putc (')', file);
4788 }
4789 }
4790 }
4791 }
4792 \f
4793 /* Check for cases where a clr insns can be omitted from code using
4794 strict_low_part sets. For example, the second clrl here is not needed:
4795 clrl d0; movw a0@+,d0; use d0; clrl d0; movw a0@+; use d0; ...
4796
4797 MODE is the mode of this STRICT_LOW_PART set. FIRST_INSN is the clear
4798 insn we are checking for redundancy. TARGET is the register set by the
4799 clear insn. */
4800
4801 bool
4802 strict_low_part_peephole_ok (enum machine_mode mode, rtx_insn *first_insn,
4803 rtx target)
4804 {
4805 rtx_insn *p = first_insn;
4806
4807 while ((p = PREV_INSN (p)))
4808 {
4809 if (NOTE_INSN_BASIC_BLOCK_P (p))
4810 return false;
4811
4812 if (NOTE_P (p))
4813 continue;
4814
4815 /* If it isn't an insn, then give up. */
4816 if (!INSN_P (p))
4817 return false;
4818
4819 if (reg_set_p (target, p))
4820 {
4821 rtx set = single_set (p);
4822 rtx dest;
4823
4824 /* If it isn't an easy to recognize insn, then give up. */
4825 if (! set)
4826 return false;
4827
4828 dest = SET_DEST (set);
4829
4830 /* If this sets the entire target register to zero, then our
4831 first_insn is redundant. */
4832 if (rtx_equal_p (dest, target)
4833 && SET_SRC (set) == const0_rtx)
4834 return true;
4835 else if (GET_CODE (dest) == STRICT_LOW_PART
4836 && GET_CODE (XEXP (dest, 0)) == REG
4837 && REGNO (XEXP (dest, 0)) == REGNO (target)
4838 && (GET_MODE_SIZE (GET_MODE (XEXP (dest, 0)))
4839 <= GET_MODE_SIZE (mode)))
4840 /* This is a strict low part set which modifies less than
4841 we are using, so it is safe. */
4842 ;
4843 else
4844 return false;
4845 }
4846 }
4847
4848 return false;
4849 }
4850
4851 /* Operand predicates for implementing asymmetric pc-relative addressing
4852 on m68k. The m68k supports pc-relative addressing (mode 7, register 2)
4853 when used as a source operand, but not as a destination operand.
4854
4855 We model this by restricting the meaning of the basic predicates
4856 (general_operand, memory_operand, etc) to forbid the use of this
4857 addressing mode, and then define the following predicates that permit
4858 this addressing mode. These predicates can then be used for the
4859 source operands of the appropriate instructions.
4860
4861 n.b. While it is theoretically possible to change all machine patterns
4862 to use this addressing more where permitted by the architecture,
4863 it has only been implemented for "common" cases: SImode, HImode, and
4864 QImode operands, and only for the principle operations that would
4865 require this addressing mode: data movement and simple integer operations.
4866
4867 In parallel with these new predicates, two new constraint letters
4868 were defined: 'S' and 'T'. 'S' is the -mpcrel analog of 'm'.
4869 'T' replaces 's' in the non-pcrel case. It is a no-op in the pcrel case.
4870 In the pcrel case 's' is only valid in combination with 'a' registers.
4871 See addsi3, subsi3, cmpsi, and movsi patterns for a better understanding
4872 of how these constraints are used.
4873
4874 The use of these predicates is strictly optional, though patterns that
4875 don't will cause an extra reload register to be allocated where one
4876 was not necessary:
4877
4878 lea (abc:w,%pc),%a0 ; need to reload address
4879 moveq &1,%d1 ; since write to pc-relative space
4880 movel %d1,%a0@ ; is not allowed
4881 ...
4882 lea (abc:w,%pc),%a1 ; no need to reload address here
4883 movel %a1@,%d0 ; since "movel (abc:w,%pc),%d0" is ok
4884
4885 For more info, consult tiemann@cygnus.com.
4886
4887
4888 All of the ugliness with predicates and constraints is due to the
4889 simple fact that the m68k does not allow a pc-relative addressing
4890 mode as a destination. gcc does not distinguish between source and
4891 destination addresses. Hence, if we claim that pc-relative address
4892 modes are valid, e.g. TARGET_LEGITIMATE_ADDRESS_P accepts them, then we
4893 end up with invalid code. To get around this problem, we left
4894 pc-relative modes as invalid addresses, and then added special
4895 predicates and constraints to accept them.
4896
4897 A cleaner way to handle this is to modify gcc to distinguish
4898 between source and destination addresses. We can then say that
4899 pc-relative is a valid source address but not a valid destination
4900 address, and hopefully avoid a lot of the predicate and constraint
4901 hackery. Unfortunately, this would be a pretty big change. It would
4902 be a useful change for a number of ports, but there aren't any current
4903 plans to undertake this.
4904
4905 ***************************************************************************/
4906
4907
4908 const char *
4909 output_andsi3 (rtx *operands)
4910 {
4911 int logval;
4912 if (GET_CODE (operands[2]) == CONST_INT
4913 && (INTVAL (operands[2]) | 0xffff) == -1
4914 && (DATA_REG_P (operands[0])
4915 || offsettable_memref_p (operands[0]))
4916 && !TARGET_COLDFIRE)
4917 {
4918 if (GET_CODE (operands[0]) != REG)
4919 operands[0] = adjust_address (operands[0], HImode, 2);
4920 operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffff);
4921 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4922 CC_STATUS_INIT;
4923 if (operands[2] == const0_rtx)
4924 return "clr%.w %0";
4925 return "and%.w %2,%0";
4926 }
4927 if (GET_CODE (operands[2]) == CONST_INT
4928 && (logval = exact_log2 (~ INTVAL (operands[2]) & 0xffffffff)) >= 0
4929 && (DATA_REG_P (operands[0])
4930 || offsettable_memref_p (operands[0])))
4931 {
4932 if (DATA_REG_P (operands[0]))
4933 operands[1] = GEN_INT (logval);
4934 else
4935 {
4936 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
4937 operands[1] = GEN_INT (logval % 8);
4938 }
4939 /* This does not set condition codes in a standard way. */
4940 CC_STATUS_INIT;
4941 return "bclr %1,%0";
4942 }
4943 return "and%.l %2,%0";
4944 }
4945
4946 const char *
4947 output_iorsi3 (rtx *operands)
4948 {
4949 register int logval;
4950 if (GET_CODE (operands[2]) == CONST_INT
4951 && INTVAL (operands[2]) >> 16 == 0
4952 && (DATA_REG_P (operands[0])
4953 || offsettable_memref_p (operands[0]))
4954 && !TARGET_COLDFIRE)
4955 {
4956 if (GET_CODE (operands[0]) != REG)
4957 operands[0] = adjust_address (operands[0], HImode, 2);
4958 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4959 CC_STATUS_INIT;
4960 if (INTVAL (operands[2]) == 0xffff)
4961 return "mov%.w %2,%0";
4962 return "or%.w %2,%0";
4963 }
4964 if (GET_CODE (operands[2]) == CONST_INT
4965 && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
4966 && (DATA_REG_P (operands[0])
4967 || offsettable_memref_p (operands[0])))
4968 {
4969 if (DATA_REG_P (operands[0]))
4970 operands[1] = GEN_INT (logval);
4971 else
4972 {
4973 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
4974 operands[1] = GEN_INT (logval % 8);
4975 }
4976 CC_STATUS_INIT;
4977 return "bset %1,%0";
4978 }
4979 return "or%.l %2,%0";
4980 }
4981
4982 const char *
4983 output_xorsi3 (rtx *operands)
4984 {
4985 register int logval;
4986 if (GET_CODE (operands[2]) == CONST_INT
4987 && INTVAL (operands[2]) >> 16 == 0
4988 && (offsettable_memref_p (operands[0]) || DATA_REG_P (operands[0]))
4989 && !TARGET_COLDFIRE)
4990 {
4991 if (! DATA_REG_P (operands[0]))
4992 operands[0] = adjust_address (operands[0], HImode, 2);
4993 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4994 CC_STATUS_INIT;
4995 if (INTVAL (operands[2]) == 0xffff)
4996 return "not%.w %0";
4997 return "eor%.w %2,%0";
4998 }
4999 if (GET_CODE (operands[2]) == CONST_INT
5000 && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
5001 && (DATA_REG_P (operands[0])
5002 || offsettable_memref_p (operands[0])))
5003 {
5004 if (DATA_REG_P (operands[0]))
5005 operands[1] = GEN_INT (logval);
5006 else
5007 {
5008 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
5009 operands[1] = GEN_INT (logval % 8);
5010 }
5011 CC_STATUS_INIT;
5012 return "bchg %1,%0";
5013 }
5014 return "eor%.l %2,%0";
5015 }
5016
5017 /* Return the instruction that should be used for a call to address X,
5018 which is known to be in operand 0. */
5019
5020 const char *
5021 output_call (rtx x)
5022 {
5023 if (symbolic_operand (x, VOIDmode))
5024 return m68k_symbolic_call;
5025 else
5026 return "jsr %a0";
5027 }
5028
5029 /* Likewise sibling calls. */
5030
5031 const char *
5032 output_sibcall (rtx x)
5033 {
5034 if (symbolic_operand (x, VOIDmode))
5035 return m68k_symbolic_jump;
5036 else
5037 return "jmp %a0";
5038 }
5039
5040 static void
5041 m68k_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
5042 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
5043 tree function)
5044 {
5045 rtx this_slot, offset, addr, mem, tmp;
5046 rtx_insn *insn;
5047
5048 /* Avoid clobbering the struct value reg by using the
5049 static chain reg as a temporary. */
5050 tmp = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
5051
5052 /* Pretend to be a post-reload pass while generating rtl. */
5053 reload_completed = 1;
5054
5055 /* The "this" pointer is stored at 4(%sp). */
5056 this_slot = gen_rtx_MEM (Pmode, plus_constant (Pmode,
5057 stack_pointer_rtx, 4));
5058
5059 /* Add DELTA to THIS. */
5060 if (delta != 0)
5061 {
5062 /* Make the offset a legitimate operand for memory addition. */
5063 offset = GEN_INT (delta);
5064 if ((delta < -8 || delta > 8)
5065 && (TARGET_COLDFIRE || USE_MOVQ (delta)))
5066 {
5067 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), offset);
5068 offset = gen_rtx_REG (Pmode, D0_REG);
5069 }
5070 emit_insn (gen_add3_insn (copy_rtx (this_slot),
5071 copy_rtx (this_slot), offset));
5072 }
5073
5074 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
5075 if (vcall_offset != 0)
5076 {
5077 /* Set the static chain register to *THIS. */
5078 emit_move_insn (tmp, this_slot);
5079 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
5080
5081 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
5082 addr = plus_constant (Pmode, tmp, vcall_offset);
5083 if (!m68k_legitimate_address_p (Pmode, addr, true))
5084 {
5085 emit_insn (gen_rtx_SET (VOIDmode, tmp, addr));
5086 addr = tmp;
5087 }
5088
5089 /* Load the offset into %d0 and add it to THIS. */
5090 emit_move_insn (gen_rtx_REG (Pmode, D0_REG),
5091 gen_rtx_MEM (Pmode, addr));
5092 emit_insn (gen_add3_insn (copy_rtx (this_slot),
5093 copy_rtx (this_slot),
5094 gen_rtx_REG (Pmode, D0_REG)));
5095 }
5096
5097 /* Jump to the target function. Use a sibcall if direct jumps are
5098 allowed, otherwise load the address into a register first. */
5099 mem = DECL_RTL (function);
5100 if (!sibcall_operand (XEXP (mem, 0), VOIDmode))
5101 {
5102 gcc_assert (flag_pic);
5103
5104 if (!TARGET_SEP_DATA)
5105 {
5106 /* Use the static chain register as a temporary (call-clobbered)
5107 GOT pointer for this function. We can use the static chain
5108 register because it isn't live on entry to the thunk. */
5109 SET_REGNO (pic_offset_table_rtx, STATIC_CHAIN_REGNUM);
5110 emit_insn (gen_load_got (pic_offset_table_rtx));
5111 }
5112 legitimize_pic_address (XEXP (mem, 0), Pmode, tmp);
5113 mem = replace_equiv_address (mem, tmp);
5114 }
5115 insn = emit_call_insn (gen_sibcall (mem, const0_rtx));
5116 SIBLING_CALL_P (insn) = 1;
5117
5118 /* Run just enough of rest_of_compilation. */
5119 insn = get_insns ();
5120 split_all_insns_noflow ();
5121 final_start_function (insn, file, 1);
5122 final (insn, file, 1);
5123 final_end_function ();
5124
5125 /* Clean up the vars set above. */
5126 reload_completed = 0;
5127
5128 /* Restore the original PIC register. */
5129 if (flag_pic)
5130 SET_REGNO (pic_offset_table_rtx, PIC_REG);
5131 }
5132
5133 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
5134
5135 static rtx
5136 m68k_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
5137 int incoming ATTRIBUTE_UNUSED)
5138 {
5139 return gen_rtx_REG (Pmode, M68K_STRUCT_VALUE_REGNUM);
5140 }
5141
5142 /* Return nonzero if register old_reg can be renamed to register new_reg. */
5143 int
5144 m68k_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
5145 unsigned int new_reg)
5146 {
5147
5148 /* Interrupt functions can only use registers that have already been
5149 saved by the prologue, even if they would normally be
5150 call-clobbered. */
5151
5152 if ((m68k_get_function_kind (current_function_decl)
5153 == m68k_fk_interrupt_handler)
5154 && !df_regs_ever_live_p (new_reg))
5155 return 0;
5156
5157 return 1;
5158 }
5159
5160 /* Value is true if hard register REGNO can hold a value of machine-mode
5161 MODE. On the 68000, we let the cpu registers can hold any mode, but
5162 restrict the 68881 registers to floating-point modes. */
5163
5164 bool
5165 m68k_regno_mode_ok (int regno, enum machine_mode mode)
5166 {
5167 if (DATA_REGNO_P (regno))
5168 {
5169 /* Data Registers, can hold aggregate if fits in. */
5170 if (regno + GET_MODE_SIZE (mode) / 4 <= 8)
5171 return true;
5172 }
5173 else if (ADDRESS_REGNO_P (regno))
5174 {
5175 if (regno + GET_MODE_SIZE (mode) / 4 <= 16)
5176 return true;
5177 }
5178 else if (FP_REGNO_P (regno))
5179 {
5180 /* FPU registers, hold float or complex float of long double or
5181 smaller. */
5182 if ((GET_MODE_CLASS (mode) == MODE_FLOAT
5183 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5184 && GET_MODE_UNIT_SIZE (mode) <= TARGET_FP_REG_SIZE)
5185 return true;
5186 }
5187 return false;
5188 }
5189
5190 /* Implement SECONDARY_RELOAD_CLASS. */
5191
5192 enum reg_class
5193 m68k_secondary_reload_class (enum reg_class rclass,
5194 enum machine_mode mode, rtx x)
5195 {
5196 int regno;
5197
5198 regno = true_regnum (x);
5199
5200 /* If one operand of a movqi is an address register, the other
5201 operand must be a general register or constant. Other types
5202 of operand must be reloaded through a data register. */
5203 if (GET_MODE_SIZE (mode) == 1
5204 && reg_classes_intersect_p (rclass, ADDR_REGS)
5205 && !(INT_REGNO_P (regno) || CONSTANT_P (x)))
5206 return DATA_REGS;
5207
5208 /* PC-relative addresses must be loaded into an address register first. */
5209 if (TARGET_PCREL
5210 && !reg_class_subset_p (rclass, ADDR_REGS)
5211 && symbolic_operand (x, VOIDmode))
5212 return ADDR_REGS;
5213
5214 return NO_REGS;
5215 }
5216
5217 /* Implement PREFERRED_RELOAD_CLASS. */
5218
5219 enum reg_class
5220 m68k_preferred_reload_class (rtx x, enum reg_class rclass)
5221 {
5222 enum reg_class secondary_class;
5223
5224 /* If RCLASS might need a secondary reload, try restricting it to
5225 a class that doesn't. */
5226 secondary_class = m68k_secondary_reload_class (rclass, GET_MODE (x), x);
5227 if (secondary_class != NO_REGS
5228 && reg_class_subset_p (secondary_class, rclass))
5229 return secondary_class;
5230
5231 /* Prefer to use moveq for in-range constants. */
5232 if (GET_CODE (x) == CONST_INT
5233 && reg_class_subset_p (DATA_REGS, rclass)
5234 && IN_RANGE (INTVAL (x), -0x80, 0x7f))
5235 return DATA_REGS;
5236
5237 /* ??? Do we really need this now? */
5238 if (GET_CODE (x) == CONST_DOUBLE
5239 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
5240 {
5241 if (TARGET_HARD_FLOAT && reg_class_subset_p (FP_REGS, rclass))
5242 return FP_REGS;
5243
5244 return NO_REGS;
5245 }
5246
5247 return rclass;
5248 }
5249
5250 /* Return floating point values in a 68881 register. This makes 68881 code
5251 a little bit faster. It also makes -msoft-float code incompatible with
5252 hard-float code, so people have to be careful not to mix the two.
5253 For ColdFire it was decided the ABI incompatibility is undesirable.
5254 If there is need for a hard-float ABI it is probably worth doing it
5255 properly and also passing function arguments in FP registers. */
5256 rtx
5257 m68k_libcall_value (enum machine_mode mode)
5258 {
5259 switch (mode) {
5260 case SFmode:
5261 case DFmode:
5262 case XFmode:
5263 if (TARGET_68881)
5264 return gen_rtx_REG (mode, FP0_REG);
5265 break;
5266 default:
5267 break;
5268 }
5269
5270 return gen_rtx_REG (mode, m68k_libcall_value_in_a0_p ? A0_REG : D0_REG);
5271 }
5272
5273 /* Location in which function value is returned.
5274 NOTE: Due to differences in ABIs, don't call this function directly,
5275 use FUNCTION_VALUE instead. */
5276 rtx
5277 m68k_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
5278 {
5279 enum machine_mode mode;
5280
5281 mode = TYPE_MODE (valtype);
5282 switch (mode) {
5283 case SFmode:
5284 case DFmode:
5285 case XFmode:
5286 if (TARGET_68881)
5287 return gen_rtx_REG (mode, FP0_REG);
5288 break;
5289 default:
5290 break;
5291 }
5292
5293 /* If the function returns a pointer, push that into %a0. */
5294 if (func && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (func))))
5295 /* For compatibility with the large body of existing code which
5296 does not always properly declare external functions returning
5297 pointer types, the m68k/SVR4 convention is to copy the value
5298 returned for pointer functions from a0 to d0 in the function
5299 epilogue, so that callers that have neglected to properly
5300 declare the callee can still find the correct return value in
5301 d0. */
5302 return gen_rtx_PARALLEL
5303 (mode,
5304 gen_rtvec (2,
5305 gen_rtx_EXPR_LIST (VOIDmode,
5306 gen_rtx_REG (mode, A0_REG),
5307 const0_rtx),
5308 gen_rtx_EXPR_LIST (VOIDmode,
5309 gen_rtx_REG (mode, D0_REG),
5310 const0_rtx)));
5311 else if (POINTER_TYPE_P (valtype))
5312 return gen_rtx_REG (mode, A0_REG);
5313 else
5314 return gen_rtx_REG (mode, D0_REG);
5315 }
5316
5317 /* Worker function for TARGET_RETURN_IN_MEMORY. */
5318 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
5319 static bool
5320 m68k_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
5321 {
5322 enum machine_mode mode = TYPE_MODE (type);
5323
5324 if (mode == BLKmode)
5325 return true;
5326
5327 /* If TYPE's known alignment is less than the alignment of MODE that
5328 would contain the structure, then return in memory. We need to
5329 do so to maintain the compatibility between code compiled with
5330 -mstrict-align and that compiled with -mno-strict-align. */
5331 if (AGGREGATE_TYPE_P (type)
5332 && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (mode))
5333 return true;
5334
5335 return false;
5336 }
5337 #endif
5338
5339 /* CPU to schedule the program for. */
5340 enum attr_cpu m68k_sched_cpu;
5341
5342 /* MAC to schedule the program for. */
5343 enum attr_mac m68k_sched_mac;
5344
5345 /* Operand type. */
5346 enum attr_op_type
5347 {
5348 /* No operand. */
5349 OP_TYPE_NONE,
5350
5351 /* Integer register. */
5352 OP_TYPE_RN,
5353
5354 /* FP register. */
5355 OP_TYPE_FPN,
5356
5357 /* Implicit mem reference (e.g. stack). */
5358 OP_TYPE_MEM1,
5359
5360 /* Memory without offset or indexing. EA modes 2, 3 and 4. */
5361 OP_TYPE_MEM234,
5362
5363 /* Memory with offset but without indexing. EA mode 5. */
5364 OP_TYPE_MEM5,
5365
5366 /* Memory with indexing. EA mode 6. */
5367 OP_TYPE_MEM6,
5368
5369 /* Memory referenced by absolute address. EA mode 7. */
5370 OP_TYPE_MEM7,
5371
5372 /* Immediate operand that doesn't require extension word. */
5373 OP_TYPE_IMM_Q,
5374
5375 /* Immediate 16 bit operand. */
5376 OP_TYPE_IMM_W,
5377
5378 /* Immediate 32 bit operand. */
5379 OP_TYPE_IMM_L
5380 };
5381
5382 /* Return type of memory ADDR_RTX refers to. */
5383 static enum attr_op_type
5384 sched_address_type (enum machine_mode mode, rtx addr_rtx)
5385 {
5386 struct m68k_address address;
5387
5388 if (symbolic_operand (addr_rtx, VOIDmode))
5389 return OP_TYPE_MEM7;
5390
5391 if (!m68k_decompose_address (mode, addr_rtx,
5392 reload_completed, &address))
5393 {
5394 gcc_assert (!reload_completed);
5395 /* Reload will likely fix the address to be in the register. */
5396 return OP_TYPE_MEM234;
5397 }
5398
5399 if (address.scale != 0)
5400 return OP_TYPE_MEM6;
5401
5402 if (address.base != NULL_RTX)
5403 {
5404 if (address.offset == NULL_RTX)
5405 return OP_TYPE_MEM234;
5406
5407 return OP_TYPE_MEM5;
5408 }
5409
5410 gcc_assert (address.offset != NULL_RTX);
5411
5412 return OP_TYPE_MEM7;
5413 }
5414
5415 /* Return X or Y (depending on OPX_P) operand of INSN. */
5416 static rtx
5417 sched_get_operand (rtx insn, bool opx_p)
5418 {
5419 int i;
5420
5421 if (recog_memoized (insn) < 0)
5422 gcc_unreachable ();
5423
5424 extract_constrain_insn_cached (insn);
5425
5426 if (opx_p)
5427 i = get_attr_opx (insn);
5428 else
5429 i = get_attr_opy (insn);
5430
5431 if (i >= recog_data.n_operands)
5432 return NULL;
5433
5434 return recog_data.operand[i];
5435 }
5436
5437 /* Return type of INSN's operand X (if OPX_P) or operand Y (if !OPX_P).
5438 If ADDRESS_P is true, return type of memory location operand refers to. */
5439 static enum attr_op_type
5440 sched_attr_op_type (rtx insn, bool opx_p, bool address_p)
5441 {
5442 rtx op;
5443
5444 op = sched_get_operand (insn, opx_p);
5445
5446 if (op == NULL)
5447 {
5448 gcc_assert (!reload_completed);
5449 return OP_TYPE_RN;
5450 }
5451
5452 if (address_p)
5453 return sched_address_type (QImode, op);
5454
5455 if (memory_operand (op, VOIDmode))
5456 return sched_address_type (GET_MODE (op), XEXP (op, 0));
5457
5458 if (register_operand (op, VOIDmode))
5459 {
5460 if ((!reload_completed && FLOAT_MODE_P (GET_MODE (op)))
5461 || (reload_completed && FP_REG_P (op)))
5462 return OP_TYPE_FPN;
5463
5464 return OP_TYPE_RN;
5465 }
5466
5467 if (GET_CODE (op) == CONST_INT)
5468 {
5469 int ival;
5470
5471 ival = INTVAL (op);
5472
5473 /* Check for quick constants. */
5474 switch (get_attr_type (insn))
5475 {
5476 case TYPE_ALUQ_L:
5477 if (IN_RANGE (ival, 1, 8) || IN_RANGE (ival, -8, -1))
5478 return OP_TYPE_IMM_Q;
5479
5480 gcc_assert (!reload_completed);
5481 break;
5482
5483 case TYPE_MOVEQ_L:
5484 if (USE_MOVQ (ival))
5485 return OP_TYPE_IMM_Q;
5486
5487 gcc_assert (!reload_completed);
5488 break;
5489
5490 case TYPE_MOV3Q_L:
5491 if (valid_mov3q_const (ival))
5492 return OP_TYPE_IMM_Q;
5493
5494 gcc_assert (!reload_completed);
5495 break;
5496
5497 default:
5498 break;
5499 }
5500
5501 if (IN_RANGE (ival, -0x8000, 0x7fff))
5502 return OP_TYPE_IMM_W;
5503
5504 return OP_TYPE_IMM_L;
5505 }
5506
5507 if (GET_CODE (op) == CONST_DOUBLE)
5508 {
5509 switch (GET_MODE (op))
5510 {
5511 case SFmode:
5512 return OP_TYPE_IMM_W;
5513
5514 case VOIDmode:
5515 case DFmode:
5516 return OP_TYPE_IMM_L;
5517
5518 default:
5519 gcc_unreachable ();
5520 }
5521 }
5522
5523 if (GET_CODE (op) == CONST
5524 || symbolic_operand (op, VOIDmode)
5525 || LABEL_P (op))
5526 {
5527 switch (GET_MODE (op))
5528 {
5529 case QImode:
5530 return OP_TYPE_IMM_Q;
5531
5532 case HImode:
5533 return OP_TYPE_IMM_W;
5534
5535 case SImode:
5536 return OP_TYPE_IMM_L;
5537
5538 default:
5539 if (symbolic_operand (m68k_unwrap_symbol (op, false), VOIDmode))
5540 /* Just a guess. */
5541 return OP_TYPE_IMM_W;
5542
5543 return OP_TYPE_IMM_L;
5544 }
5545 }
5546
5547 gcc_assert (!reload_completed);
5548
5549 if (FLOAT_MODE_P (GET_MODE (op)))
5550 return OP_TYPE_FPN;
5551
5552 return OP_TYPE_RN;
5553 }
5554
5555 /* Implement opx_type attribute.
5556 Return type of INSN's operand X.
5557 If ADDRESS_P is true, return type of memory location operand refers to. */
5558 enum attr_opx_type
5559 m68k_sched_attr_opx_type (rtx insn, int address_p)
5560 {
5561 switch (sched_attr_op_type (insn, true, address_p != 0))
5562 {
5563 case OP_TYPE_RN:
5564 return OPX_TYPE_RN;
5565
5566 case OP_TYPE_FPN:
5567 return OPX_TYPE_FPN;
5568
5569 case OP_TYPE_MEM1:
5570 return OPX_TYPE_MEM1;
5571
5572 case OP_TYPE_MEM234:
5573 return OPX_TYPE_MEM234;
5574
5575 case OP_TYPE_MEM5:
5576 return OPX_TYPE_MEM5;
5577
5578 case OP_TYPE_MEM6:
5579 return OPX_TYPE_MEM6;
5580
5581 case OP_TYPE_MEM7:
5582 return OPX_TYPE_MEM7;
5583
5584 case OP_TYPE_IMM_Q:
5585 return OPX_TYPE_IMM_Q;
5586
5587 case OP_TYPE_IMM_W:
5588 return OPX_TYPE_IMM_W;
5589
5590 case OP_TYPE_IMM_L:
5591 return OPX_TYPE_IMM_L;
5592
5593 default:
5594 gcc_unreachable ();
5595 }
5596 }
5597
5598 /* Implement opy_type attribute.
5599 Return type of INSN's operand Y.
5600 If ADDRESS_P is true, return type of memory location operand refers to. */
5601 enum attr_opy_type
5602 m68k_sched_attr_opy_type (rtx insn, int address_p)
5603 {
5604 switch (sched_attr_op_type (insn, false, address_p != 0))
5605 {
5606 case OP_TYPE_RN:
5607 return OPY_TYPE_RN;
5608
5609 case OP_TYPE_FPN:
5610 return OPY_TYPE_FPN;
5611
5612 case OP_TYPE_MEM1:
5613 return OPY_TYPE_MEM1;
5614
5615 case OP_TYPE_MEM234:
5616 return OPY_TYPE_MEM234;
5617
5618 case OP_TYPE_MEM5:
5619 return OPY_TYPE_MEM5;
5620
5621 case OP_TYPE_MEM6:
5622 return OPY_TYPE_MEM6;
5623
5624 case OP_TYPE_MEM7:
5625 return OPY_TYPE_MEM7;
5626
5627 case OP_TYPE_IMM_Q:
5628 return OPY_TYPE_IMM_Q;
5629
5630 case OP_TYPE_IMM_W:
5631 return OPY_TYPE_IMM_W;
5632
5633 case OP_TYPE_IMM_L:
5634 return OPY_TYPE_IMM_L;
5635
5636 default:
5637 gcc_unreachable ();
5638 }
5639 }
5640
5641 /* Return size of INSN as int. */
5642 static int
5643 sched_get_attr_size_int (rtx insn)
5644 {
5645 int size;
5646
5647 switch (get_attr_type (insn))
5648 {
5649 case TYPE_IGNORE:
5650 /* There should be no references to m68k_sched_attr_size for 'ignore'
5651 instructions. */
5652 gcc_unreachable ();
5653 return 0;
5654
5655 case TYPE_MUL_L:
5656 size = 2;
5657 break;
5658
5659 default:
5660 size = 1;
5661 break;
5662 }
5663
5664 switch (get_attr_opx_type (insn))
5665 {
5666 case OPX_TYPE_NONE:
5667 case OPX_TYPE_RN:
5668 case OPX_TYPE_FPN:
5669 case OPX_TYPE_MEM1:
5670 case OPX_TYPE_MEM234:
5671 case OPY_TYPE_IMM_Q:
5672 break;
5673
5674 case OPX_TYPE_MEM5:
5675 case OPX_TYPE_MEM6:
5676 /* Here we assume that most absolute references are short. */
5677 case OPX_TYPE_MEM7:
5678 case OPY_TYPE_IMM_W:
5679 ++size;
5680 break;
5681
5682 case OPY_TYPE_IMM_L:
5683 size += 2;
5684 break;
5685
5686 default:
5687 gcc_unreachable ();
5688 }
5689
5690 switch (get_attr_opy_type (insn))
5691 {
5692 case OPY_TYPE_NONE:
5693 case OPY_TYPE_RN:
5694 case OPY_TYPE_FPN:
5695 case OPY_TYPE_MEM1:
5696 case OPY_TYPE_MEM234:
5697 case OPY_TYPE_IMM_Q:
5698 break;
5699
5700 case OPY_TYPE_MEM5:
5701 case OPY_TYPE_MEM6:
5702 /* Here we assume that most absolute references are short. */
5703 case OPY_TYPE_MEM7:
5704 case OPY_TYPE_IMM_W:
5705 ++size;
5706 break;
5707
5708 case OPY_TYPE_IMM_L:
5709 size += 2;
5710 break;
5711
5712 default:
5713 gcc_unreachable ();
5714 }
5715
5716 if (size > 3)
5717 {
5718 gcc_assert (!reload_completed);
5719
5720 size = 3;
5721 }
5722
5723 return size;
5724 }
5725
5726 /* Return size of INSN as attribute enum value. */
5727 enum attr_size
5728 m68k_sched_attr_size (rtx insn)
5729 {
5730 switch (sched_get_attr_size_int (insn))
5731 {
5732 case 1:
5733 return SIZE_1;
5734
5735 case 2:
5736 return SIZE_2;
5737
5738 case 3:
5739 return SIZE_3;
5740
5741 default:
5742 gcc_unreachable ();
5743 }
5744 }
5745
5746 /* Return operand X or Y (depending on OPX_P) of INSN,
5747 if it is a MEM, or NULL overwise. */
5748 static enum attr_op_type
5749 sched_get_opxy_mem_type (rtx insn, bool opx_p)
5750 {
5751 if (opx_p)
5752 {
5753 switch (get_attr_opx_type (insn))
5754 {
5755 case OPX_TYPE_NONE:
5756 case OPX_TYPE_RN:
5757 case OPX_TYPE_FPN:
5758 case OPX_TYPE_IMM_Q:
5759 case OPX_TYPE_IMM_W:
5760 case OPX_TYPE_IMM_L:
5761 return OP_TYPE_RN;
5762
5763 case OPX_TYPE_MEM1:
5764 case OPX_TYPE_MEM234:
5765 case OPX_TYPE_MEM5:
5766 case OPX_TYPE_MEM7:
5767 return OP_TYPE_MEM1;
5768
5769 case OPX_TYPE_MEM6:
5770 return OP_TYPE_MEM6;
5771
5772 default:
5773 gcc_unreachable ();
5774 }
5775 }
5776 else
5777 {
5778 switch (get_attr_opy_type (insn))
5779 {
5780 case OPY_TYPE_NONE:
5781 case OPY_TYPE_RN:
5782 case OPY_TYPE_FPN:
5783 case OPY_TYPE_IMM_Q:
5784 case OPY_TYPE_IMM_W:
5785 case OPY_TYPE_IMM_L:
5786 return OP_TYPE_RN;
5787
5788 case OPY_TYPE_MEM1:
5789 case OPY_TYPE_MEM234:
5790 case OPY_TYPE_MEM5:
5791 case OPY_TYPE_MEM7:
5792 return OP_TYPE_MEM1;
5793
5794 case OPY_TYPE_MEM6:
5795 return OP_TYPE_MEM6;
5796
5797 default:
5798 gcc_unreachable ();
5799 }
5800 }
5801 }
5802
5803 /* Implement op_mem attribute. */
5804 enum attr_op_mem
5805 m68k_sched_attr_op_mem (rtx insn)
5806 {
5807 enum attr_op_type opx;
5808 enum attr_op_type opy;
5809
5810 opx = sched_get_opxy_mem_type (insn, true);
5811 opy = sched_get_opxy_mem_type (insn, false);
5812
5813 if (opy == OP_TYPE_RN && opx == OP_TYPE_RN)
5814 return OP_MEM_00;
5815
5816 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM1)
5817 {
5818 switch (get_attr_opx_access (insn))
5819 {
5820 case OPX_ACCESS_R:
5821 return OP_MEM_10;
5822
5823 case OPX_ACCESS_W:
5824 return OP_MEM_01;
5825
5826 case OPX_ACCESS_RW:
5827 return OP_MEM_11;
5828
5829 default:
5830 gcc_unreachable ();
5831 }
5832 }
5833
5834 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM6)
5835 {
5836 switch (get_attr_opx_access (insn))
5837 {
5838 case OPX_ACCESS_R:
5839 return OP_MEM_I0;
5840
5841 case OPX_ACCESS_W:
5842 return OP_MEM_0I;
5843
5844 case OPX_ACCESS_RW:
5845 return OP_MEM_I1;
5846
5847 default:
5848 gcc_unreachable ();
5849 }
5850 }
5851
5852 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_RN)
5853 return OP_MEM_10;
5854
5855 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM1)
5856 {
5857 switch (get_attr_opx_access (insn))
5858 {
5859 case OPX_ACCESS_W:
5860 return OP_MEM_11;
5861
5862 default:
5863 gcc_assert (!reload_completed);
5864 return OP_MEM_11;
5865 }
5866 }
5867
5868 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM6)
5869 {
5870 switch (get_attr_opx_access (insn))
5871 {
5872 case OPX_ACCESS_W:
5873 return OP_MEM_1I;
5874
5875 default:
5876 gcc_assert (!reload_completed);
5877 return OP_MEM_1I;
5878 }
5879 }
5880
5881 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_RN)
5882 return OP_MEM_I0;
5883
5884 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM1)
5885 {
5886 switch (get_attr_opx_access (insn))
5887 {
5888 case OPX_ACCESS_W:
5889 return OP_MEM_I1;
5890
5891 default:
5892 gcc_assert (!reload_completed);
5893 return OP_MEM_I1;
5894 }
5895 }
5896
5897 gcc_assert (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM6);
5898 gcc_assert (!reload_completed);
5899 return OP_MEM_I1;
5900 }
5901
5902 /* Data for ColdFire V4 index bypass.
5903 Producer modifies register that is used as index in consumer with
5904 specified scale. */
5905 static struct
5906 {
5907 /* Producer instruction. */
5908 rtx pro;
5909
5910 /* Consumer instruction. */
5911 rtx con;
5912
5913 /* Scale of indexed memory access within consumer.
5914 Or zero if bypass should not be effective at the moment. */
5915 int scale;
5916 } sched_cfv4_bypass_data;
5917
5918 /* An empty state that is used in m68k_sched_adjust_cost. */
5919 static state_t sched_adjust_cost_state;
5920
5921 /* Implement adjust_cost scheduler hook.
5922 Return adjusted COST of dependency LINK between DEF_INSN and INSN. */
5923 static int
5924 m68k_sched_adjust_cost (rtx insn, rtx link ATTRIBUTE_UNUSED, rtx def_insn,
5925 int cost)
5926 {
5927 int delay;
5928
5929 if (recog_memoized (def_insn) < 0
5930 || recog_memoized (insn) < 0)
5931 return cost;
5932
5933 if (sched_cfv4_bypass_data.scale == 1)
5934 /* Handle ColdFire V4 bypass for indexed address with 1x scale. */
5935 {
5936 /* haifa-sched.c: insn_cost () calls bypass_p () just before
5937 targetm.sched.adjust_cost (). Hence, we can be relatively sure
5938 that the data in sched_cfv4_bypass_data is up to date. */
5939 gcc_assert (sched_cfv4_bypass_data.pro == def_insn
5940 && sched_cfv4_bypass_data.con == insn);
5941
5942 if (cost < 3)
5943 cost = 3;
5944
5945 sched_cfv4_bypass_data.pro = NULL;
5946 sched_cfv4_bypass_data.con = NULL;
5947 sched_cfv4_bypass_data.scale = 0;
5948 }
5949 else
5950 gcc_assert (sched_cfv4_bypass_data.pro == NULL
5951 && sched_cfv4_bypass_data.con == NULL
5952 && sched_cfv4_bypass_data.scale == 0);
5953
5954 /* Don't try to issue INSN earlier than DFA permits.
5955 This is especially useful for instructions that write to memory,
5956 as their true dependence (default) latency is better to be set to 0
5957 to workaround alias analysis limitations.
5958 This is, in fact, a machine independent tweak, so, probably,
5959 it should be moved to haifa-sched.c: insn_cost (). */
5960 delay = min_insn_conflict_delay (sched_adjust_cost_state, def_insn, insn);
5961 if (delay > cost)
5962 cost = delay;
5963
5964 return cost;
5965 }
5966
5967 /* Return maximal number of insns that can be scheduled on a single cycle. */
5968 static int
5969 m68k_sched_issue_rate (void)
5970 {
5971 switch (m68k_sched_cpu)
5972 {
5973 case CPU_CFV1:
5974 case CPU_CFV2:
5975 case CPU_CFV3:
5976 return 1;
5977
5978 case CPU_CFV4:
5979 return 2;
5980
5981 default:
5982 gcc_unreachable ();
5983 return 0;
5984 }
5985 }
5986
5987 /* Maximal length of instruction for current CPU.
5988 E.g. it is 3 for any ColdFire core. */
5989 static int max_insn_size;
5990
5991 /* Data to model instruction buffer of CPU. */
5992 struct _sched_ib
5993 {
5994 /* True if instruction buffer model is modeled for current CPU. */
5995 bool enabled_p;
5996
5997 /* Size of the instruction buffer in words. */
5998 int size;
5999
6000 /* Number of filled words in the instruction buffer. */
6001 int filled;
6002
6003 /* Additional information about instruction buffer for CPUs that have
6004 a buffer of instruction records, rather then a plain buffer
6005 of instruction words. */
6006 struct _sched_ib_records
6007 {
6008 /* Size of buffer in records. */
6009 int n_insns;
6010
6011 /* Array to hold data on adjustements made to the size of the buffer. */
6012 int *adjust;
6013
6014 /* Index of the above array. */
6015 int adjust_index;
6016 } records;
6017
6018 /* An insn that reserves (marks empty) one word in the instruction buffer. */
6019 rtx insn;
6020 };
6021
6022 static struct _sched_ib sched_ib;
6023
6024 /* ID of memory unit. */
6025 static int sched_mem_unit_code;
6026
6027 /* Implementation of the targetm.sched.variable_issue () hook.
6028 It is called after INSN was issued. It returns the number of insns
6029 that can possibly get scheduled on the current cycle.
6030 It is used here to determine the effect of INSN on the instruction
6031 buffer. */
6032 static int
6033 m68k_sched_variable_issue (FILE *sched_dump ATTRIBUTE_UNUSED,
6034 int sched_verbose ATTRIBUTE_UNUSED,
6035 rtx insn, int can_issue_more)
6036 {
6037 int insn_size;
6038
6039 if (recog_memoized (insn) >= 0 && get_attr_type (insn) != TYPE_IGNORE)
6040 {
6041 switch (m68k_sched_cpu)
6042 {
6043 case CPU_CFV1:
6044 case CPU_CFV2:
6045 insn_size = sched_get_attr_size_int (insn);
6046 break;
6047
6048 case CPU_CFV3:
6049 insn_size = sched_get_attr_size_int (insn);
6050
6051 /* ColdFire V3 and V4 cores have instruction buffers that can
6052 accumulate up to 8 instructions regardless of instructions'
6053 sizes. So we should take care not to "prefetch" 24 one-word
6054 or 12 two-words instructions.
6055 To model this behavior we temporarily decrease size of the
6056 buffer by (max_insn_size - insn_size) for next 7 instructions. */
6057 {
6058 int adjust;
6059
6060 adjust = max_insn_size - insn_size;
6061 sched_ib.size -= adjust;
6062
6063 if (sched_ib.filled > sched_ib.size)
6064 sched_ib.filled = sched_ib.size;
6065
6066 sched_ib.records.adjust[sched_ib.records.adjust_index] = adjust;
6067 }
6068
6069 ++sched_ib.records.adjust_index;
6070 if (sched_ib.records.adjust_index == sched_ib.records.n_insns)
6071 sched_ib.records.adjust_index = 0;
6072
6073 /* Undo adjustement we did 7 instructions ago. */
6074 sched_ib.size
6075 += sched_ib.records.adjust[sched_ib.records.adjust_index];
6076
6077 break;
6078
6079 case CPU_CFV4:
6080 gcc_assert (!sched_ib.enabled_p);
6081 insn_size = 0;
6082 break;
6083
6084 default:
6085 gcc_unreachable ();
6086 }
6087
6088 if (insn_size > sched_ib.filled)
6089 /* Scheduling for register pressure does not always take DFA into
6090 account. Workaround instruction buffer not being filled enough. */
6091 {
6092 gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED);
6093 insn_size = sched_ib.filled;
6094 }
6095
6096 --can_issue_more;
6097 }
6098 else if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6099 || asm_noperands (PATTERN (insn)) >= 0)
6100 insn_size = sched_ib.filled;
6101 else
6102 insn_size = 0;
6103
6104 sched_ib.filled -= insn_size;
6105
6106 return can_issue_more;
6107 }
6108
6109 /* Return how many instructions should scheduler lookahead to choose the
6110 best one. */
6111 static int
6112 m68k_sched_first_cycle_multipass_dfa_lookahead (void)
6113 {
6114 return m68k_sched_issue_rate () - 1;
6115 }
6116
6117 /* Implementation of targetm.sched.init_global () hook.
6118 It is invoked once per scheduling pass and is used here
6119 to initialize scheduler constants. */
6120 static void
6121 m68k_sched_md_init_global (FILE *sched_dump ATTRIBUTE_UNUSED,
6122 int sched_verbose ATTRIBUTE_UNUSED,
6123 int n_insns ATTRIBUTE_UNUSED)
6124 {
6125 #ifdef ENABLE_CHECKING
6126 /* Check that all instructions have DFA reservations and
6127 that all instructions can be issued from a clean state. */
6128 {
6129 rtx insn;
6130 state_t state;
6131
6132 state = alloca (state_size ());
6133
6134 for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
6135 {
6136 if (INSN_P (insn) && recog_memoized (insn) >= 0)
6137 {
6138 gcc_assert (insn_has_dfa_reservation_p (insn));
6139
6140 state_reset (state);
6141 if (state_transition (state, insn) >= 0)
6142 gcc_unreachable ();
6143 }
6144 }
6145 }
6146 #endif
6147
6148 /* Setup target cpu. */
6149
6150 /* ColdFire V4 has a set of features to keep its instruction buffer full
6151 (e.g., a separate memory bus for instructions) and, hence, we do not model
6152 buffer for this CPU. */
6153 sched_ib.enabled_p = (m68k_sched_cpu != CPU_CFV4);
6154
6155 switch (m68k_sched_cpu)
6156 {
6157 case CPU_CFV4:
6158 sched_ib.filled = 0;
6159
6160 /* FALLTHRU */
6161
6162 case CPU_CFV1:
6163 case CPU_CFV2:
6164 max_insn_size = 3;
6165 sched_ib.records.n_insns = 0;
6166 sched_ib.records.adjust = NULL;
6167 break;
6168
6169 case CPU_CFV3:
6170 max_insn_size = 3;
6171 sched_ib.records.n_insns = 8;
6172 sched_ib.records.adjust = XNEWVEC (int, sched_ib.records.n_insns);
6173 break;
6174
6175 default:
6176 gcc_unreachable ();
6177 }
6178
6179 sched_mem_unit_code = get_cpu_unit_code ("cf_mem1");
6180
6181 sched_adjust_cost_state = xmalloc (state_size ());
6182 state_reset (sched_adjust_cost_state);
6183
6184 start_sequence ();
6185 emit_insn (gen_ib ());
6186 sched_ib.insn = get_insns ();
6187 end_sequence ();
6188 }
6189
6190 /* Scheduling pass is now finished. Free/reset static variables. */
6191 static void
6192 m68k_sched_md_finish_global (FILE *dump ATTRIBUTE_UNUSED,
6193 int verbose ATTRIBUTE_UNUSED)
6194 {
6195 sched_ib.insn = NULL;
6196
6197 free (sched_adjust_cost_state);
6198 sched_adjust_cost_state = NULL;
6199
6200 sched_mem_unit_code = 0;
6201
6202 free (sched_ib.records.adjust);
6203 sched_ib.records.adjust = NULL;
6204 sched_ib.records.n_insns = 0;
6205 max_insn_size = 0;
6206 }
6207
6208 /* Implementation of targetm.sched.init () hook.
6209 It is invoked each time scheduler starts on the new block (basic block or
6210 extended basic block). */
6211 static void
6212 m68k_sched_md_init (FILE *sched_dump ATTRIBUTE_UNUSED,
6213 int sched_verbose ATTRIBUTE_UNUSED,
6214 int n_insns ATTRIBUTE_UNUSED)
6215 {
6216 switch (m68k_sched_cpu)
6217 {
6218 case CPU_CFV1:
6219 case CPU_CFV2:
6220 sched_ib.size = 6;
6221 break;
6222
6223 case CPU_CFV3:
6224 sched_ib.size = sched_ib.records.n_insns * max_insn_size;
6225
6226 memset (sched_ib.records.adjust, 0,
6227 sched_ib.records.n_insns * sizeof (*sched_ib.records.adjust));
6228 sched_ib.records.adjust_index = 0;
6229 break;
6230
6231 case CPU_CFV4:
6232 gcc_assert (!sched_ib.enabled_p);
6233 sched_ib.size = 0;
6234 break;
6235
6236 default:
6237 gcc_unreachable ();
6238 }
6239
6240 if (sched_ib.enabled_p)
6241 /* haifa-sched.c: schedule_block () calls advance_cycle () just before
6242 the first cycle. Workaround that. */
6243 sched_ib.filled = -2;
6244 }
6245
6246 /* Implementation of targetm.sched.dfa_pre_advance_cycle () hook.
6247 It is invoked just before current cycle finishes and is used here
6248 to track if instruction buffer got its two words this cycle. */
6249 static void
6250 m68k_sched_dfa_pre_advance_cycle (void)
6251 {
6252 if (!sched_ib.enabled_p)
6253 return;
6254
6255 if (!cpu_unit_reservation_p (curr_state, sched_mem_unit_code))
6256 {
6257 sched_ib.filled += 2;
6258
6259 if (sched_ib.filled > sched_ib.size)
6260 sched_ib.filled = sched_ib.size;
6261 }
6262 }
6263
6264 /* Implementation of targetm.sched.dfa_post_advance_cycle () hook.
6265 It is invoked just after new cycle begins and is used here
6266 to setup number of filled words in the instruction buffer so that
6267 instructions which won't have all their words prefetched would be
6268 stalled for a cycle. */
6269 static void
6270 m68k_sched_dfa_post_advance_cycle (void)
6271 {
6272 int i;
6273
6274 if (!sched_ib.enabled_p)
6275 return;
6276
6277 /* Setup number of prefetched instruction words in the instruction
6278 buffer. */
6279 i = max_insn_size - sched_ib.filled;
6280
6281 while (--i >= 0)
6282 {
6283 if (state_transition (curr_state, sched_ib.insn) >= 0)
6284 /* Pick up scheduler state. */
6285 ++sched_ib.filled;
6286 }
6287 }
6288
6289 /* Return X or Y (depending on OPX_P) operand of INSN,
6290 if it is an integer register, or NULL overwise. */
6291 static rtx
6292 sched_get_reg_operand (rtx insn, bool opx_p)
6293 {
6294 rtx op = NULL;
6295
6296 if (opx_p)
6297 {
6298 if (get_attr_opx_type (insn) == OPX_TYPE_RN)
6299 {
6300 op = sched_get_operand (insn, true);
6301 gcc_assert (op != NULL);
6302
6303 if (!reload_completed && !REG_P (op))
6304 return NULL;
6305 }
6306 }
6307 else
6308 {
6309 if (get_attr_opy_type (insn) == OPY_TYPE_RN)
6310 {
6311 op = sched_get_operand (insn, false);
6312 gcc_assert (op != NULL);
6313
6314 if (!reload_completed && !REG_P (op))
6315 return NULL;
6316 }
6317 }
6318
6319 return op;
6320 }
6321
6322 /* Return true, if X or Y (depending on OPX_P) operand of INSN
6323 is a MEM. */
6324 static bool
6325 sched_mem_operand_p (rtx insn, bool opx_p)
6326 {
6327 switch (sched_get_opxy_mem_type (insn, opx_p))
6328 {
6329 case OP_TYPE_MEM1:
6330 case OP_TYPE_MEM6:
6331 return true;
6332
6333 default:
6334 return false;
6335 }
6336 }
6337
6338 /* Return X or Y (depending on OPX_P) operand of INSN,
6339 if it is a MEM, or NULL overwise. */
6340 static rtx
6341 sched_get_mem_operand (rtx insn, bool must_read_p, bool must_write_p)
6342 {
6343 bool opx_p;
6344 bool opy_p;
6345
6346 opx_p = false;
6347 opy_p = false;
6348
6349 if (must_read_p)
6350 {
6351 opx_p = true;
6352 opy_p = true;
6353 }
6354
6355 if (must_write_p)
6356 {
6357 opx_p = true;
6358 opy_p = false;
6359 }
6360
6361 if (opy_p && sched_mem_operand_p (insn, false))
6362 return sched_get_operand (insn, false);
6363
6364 if (opx_p && sched_mem_operand_p (insn, true))
6365 return sched_get_operand (insn, true);
6366
6367 gcc_unreachable ();
6368 return NULL;
6369 }
6370
6371 /* Return non-zero if PRO modifies register used as part of
6372 address in CON. */
6373 int
6374 m68k_sched_address_bypass_p (rtx pro, rtx con)
6375 {
6376 rtx pro_x;
6377 rtx con_mem_read;
6378
6379 pro_x = sched_get_reg_operand (pro, true);
6380 if (pro_x == NULL)
6381 return 0;
6382
6383 con_mem_read = sched_get_mem_operand (con, true, false);
6384 gcc_assert (con_mem_read != NULL);
6385
6386 if (reg_mentioned_p (pro_x, con_mem_read))
6387 return 1;
6388
6389 return 0;
6390 }
6391
6392 /* Helper function for m68k_sched_indexed_address_bypass_p.
6393 if PRO modifies register used as index in CON,
6394 return scale of indexed memory access in CON. Return zero overwise. */
6395 static int
6396 sched_get_indexed_address_scale (rtx pro, rtx con)
6397 {
6398 rtx reg;
6399 rtx mem;
6400 struct m68k_address address;
6401
6402 reg = sched_get_reg_operand (pro, true);
6403 if (reg == NULL)
6404 return 0;
6405
6406 mem = sched_get_mem_operand (con, true, false);
6407 gcc_assert (mem != NULL && MEM_P (mem));
6408
6409 if (!m68k_decompose_address (GET_MODE (mem), XEXP (mem, 0), reload_completed,
6410 &address))
6411 gcc_unreachable ();
6412
6413 if (REGNO (reg) == REGNO (address.index))
6414 {
6415 gcc_assert (address.scale != 0);
6416 return address.scale;
6417 }
6418
6419 return 0;
6420 }
6421
6422 /* Return non-zero if PRO modifies register used
6423 as index with scale 2 or 4 in CON. */
6424 int
6425 m68k_sched_indexed_address_bypass_p (rtx pro, rtx con)
6426 {
6427 gcc_assert (sched_cfv4_bypass_data.pro == NULL
6428 && sched_cfv4_bypass_data.con == NULL
6429 && sched_cfv4_bypass_data.scale == 0);
6430
6431 switch (sched_get_indexed_address_scale (pro, con))
6432 {
6433 case 1:
6434 /* We can't have a variable latency bypass, so
6435 remember to adjust the insn cost in adjust_cost hook. */
6436 sched_cfv4_bypass_data.pro = pro;
6437 sched_cfv4_bypass_data.con = con;
6438 sched_cfv4_bypass_data.scale = 1;
6439 return 0;
6440
6441 case 2:
6442 case 4:
6443 return 1;
6444
6445 default:
6446 return 0;
6447 }
6448 }
6449
6450 /* We generate a two-instructions program at M_TRAMP :
6451 movea.l &CHAIN_VALUE,%a0
6452 jmp FNADDR
6453 where %a0 can be modified by changing STATIC_CHAIN_REGNUM. */
6454
6455 static void
6456 m68k_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
6457 {
6458 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
6459 rtx mem;
6460
6461 gcc_assert (ADDRESS_REGNO_P (STATIC_CHAIN_REGNUM));
6462
6463 mem = adjust_address (m_tramp, HImode, 0);
6464 emit_move_insn (mem, GEN_INT(0x207C + ((STATIC_CHAIN_REGNUM-8) << 9)));
6465 mem = adjust_address (m_tramp, SImode, 2);
6466 emit_move_insn (mem, chain_value);
6467
6468 mem = adjust_address (m_tramp, HImode, 6);
6469 emit_move_insn (mem, GEN_INT(0x4EF9));
6470 mem = adjust_address (m_tramp, SImode, 8);
6471 emit_move_insn (mem, fnaddr);
6472
6473 FINALIZE_TRAMPOLINE (XEXP (m_tramp, 0));
6474 }
6475
6476 /* On the 68000, the RTS insn cannot pop anything.
6477 On the 68010, the RTD insn may be used to pop them if the number
6478 of args is fixed, but if the number is variable then the caller
6479 must pop them all. RTD can't be used for library calls now
6480 because the library is compiled with the Unix compiler.
6481 Use of RTD is a selectable option, since it is incompatible with
6482 standard Unix calling sequences. If the option is not selected,
6483 the caller must always pop the args. */
6484
6485 static int
6486 m68k_return_pops_args (tree fundecl, tree funtype, int size)
6487 {
6488 return ((TARGET_RTD
6489 && (!fundecl
6490 || TREE_CODE (fundecl) != IDENTIFIER_NODE)
6491 && (!stdarg_p (funtype)))
6492 ? size : 0);
6493 }
6494
6495 /* Make sure everything's fine if we *don't* have a given processor.
6496 This assumes that putting a register in fixed_regs will keep the
6497 compiler's mitts completely off it. We don't bother to zero it out
6498 of register classes. */
6499
6500 static void
6501 m68k_conditional_register_usage (void)
6502 {
6503 int i;
6504 HARD_REG_SET x;
6505 if (!TARGET_HARD_FLOAT)
6506 {
6507 COPY_HARD_REG_SET (x, reg_class_contents[(int)FP_REGS]);
6508 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
6509 if (TEST_HARD_REG_BIT (x, i))
6510 fixed_regs[i] = call_used_regs[i] = 1;
6511 }
6512 if (flag_pic)
6513 fixed_regs[PIC_REG] = call_used_regs[PIC_REG] = 1;
6514 }
6515
6516 static void
6517 m68k_init_sync_libfuncs (void)
6518 {
6519 init_sync_libfuncs (UNITS_PER_WORD);
6520 }
6521
6522 /* Implements EPILOGUE_USES. All registers are live on exit from an
6523 interrupt routine. */
6524 bool
6525 m68k_epilogue_uses (int regno ATTRIBUTE_UNUSED)
6526 {
6527 return (reload_completed
6528 && (m68k_get_function_kind (current_function_decl)
6529 == m68k_fk_interrupt_handler));
6530 }
6531
6532 #include "gt-m68k.h"