]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/m68k/m68k.c
Turn MODES_TIEABLE_P into a target hook
[thirdparty/gcc.git] / gcc / config / m68k / m68k.c
1 /* Subroutines for insn-output.c for Motorola 68000 family.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "cfghooks.h"
25 #include "tree.h"
26 #include "stringpool.h"
27 #include "attribs.h"
28 #include "rtl.h"
29 #include "df.h"
30 #include "alias.h"
31 #include "fold-const.h"
32 #include "calls.h"
33 #include "stor-layout.h"
34 #include "varasm.h"
35 #include "regs.h"
36 #include "insn-config.h"
37 #include "conditions.h"
38 #include "output.h"
39 #include "insn-attr.h"
40 #include "recog.h"
41 #include "diagnostic-core.h"
42 #include "flags.h"
43 #include "expmed.h"
44 #include "dojump.h"
45 #include "explow.h"
46 #include "memmodel.h"
47 #include "emit-rtl.h"
48 #include "stmt.h"
49 #include "expr.h"
50 #include "reload.h"
51 #include "tm_p.h"
52 #include "target.h"
53 #include "debug.h"
54 #include "cfgrtl.h"
55 #include "cfganal.h"
56 #include "lcm.h"
57 #include "cfgbuild.h"
58 #include "cfgcleanup.h"
59 /* ??? Need to add a dependency between m68k.o and sched-int.h. */
60 #include "sched-int.h"
61 #include "insn-codes.h"
62 #include "opts.h"
63 #include "optabs.h"
64 #include "builtins.h"
65 #include "rtl-iter.h"
66
67 /* This file should be included last. */
68 #include "target-def.h"
69
70 enum reg_class regno_reg_class[] =
71 {
72 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
73 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
74 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
75 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
76 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
77 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
78 ADDR_REGS
79 };
80
81
82 /* The minimum number of integer registers that we want to save with the
83 movem instruction. Using two movel instructions instead of a single
84 moveml is about 15% faster for the 68020 and 68030 at no expense in
85 code size. */
86 #define MIN_MOVEM_REGS 3
87
88 /* The minimum number of floating point registers that we want to save
89 with the fmovem instruction. */
90 #define MIN_FMOVEM_REGS 1
91
92 /* Structure describing stack frame layout. */
93 struct m68k_frame
94 {
95 /* Stack pointer to frame pointer offset. */
96 HOST_WIDE_INT offset;
97
98 /* Offset of FPU registers. */
99 HOST_WIDE_INT foffset;
100
101 /* Frame size in bytes (rounded up). */
102 HOST_WIDE_INT size;
103
104 /* Data and address register. */
105 int reg_no;
106 unsigned int reg_mask;
107
108 /* FPU registers. */
109 int fpu_no;
110 unsigned int fpu_mask;
111
112 /* Offsets relative to ARG_POINTER. */
113 HOST_WIDE_INT frame_pointer_offset;
114 HOST_WIDE_INT stack_pointer_offset;
115
116 /* Function which the above information refers to. */
117 int funcdef_no;
118 };
119
120 /* Current frame information calculated by m68k_compute_frame_layout(). */
121 static struct m68k_frame current_frame;
122
123 /* Structure describing an m68k address.
124
125 If CODE is UNKNOWN, the address is BASE + INDEX * SCALE + OFFSET,
126 with null fields evaluating to 0. Here:
127
128 - BASE satisfies m68k_legitimate_base_reg_p
129 - INDEX satisfies m68k_legitimate_index_reg_p
130 - OFFSET satisfies m68k_legitimate_constant_address_p
131
132 INDEX is either HImode or SImode. The other fields are SImode.
133
134 If CODE is PRE_DEC, the address is -(BASE). If CODE is POST_INC,
135 the address is (BASE)+. */
136 struct m68k_address {
137 enum rtx_code code;
138 rtx base;
139 rtx index;
140 rtx offset;
141 int scale;
142 };
143
144 static int m68k_sched_adjust_cost (rtx_insn *, int, rtx_insn *, int,
145 unsigned int);
146 static int m68k_sched_issue_rate (void);
147 static int m68k_sched_variable_issue (FILE *, int, rtx_insn *, int);
148 static void m68k_sched_md_init_global (FILE *, int, int);
149 static void m68k_sched_md_finish_global (FILE *, int);
150 static void m68k_sched_md_init (FILE *, int, int);
151 static void m68k_sched_dfa_pre_advance_cycle (void);
152 static void m68k_sched_dfa_post_advance_cycle (void);
153 static int m68k_sched_first_cycle_multipass_dfa_lookahead (void);
154
155 static bool m68k_can_eliminate (const int, const int);
156 static void m68k_conditional_register_usage (void);
157 static bool m68k_legitimate_address_p (machine_mode, rtx, bool);
158 static void m68k_option_override (void);
159 static void m68k_override_options_after_change (void);
160 static rtx find_addr_reg (rtx);
161 static const char *singlemove_string (rtx *);
162 static void m68k_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
163 HOST_WIDE_INT, tree);
164 static rtx m68k_struct_value_rtx (tree, int);
165 static tree m68k_handle_fndecl_attribute (tree *node, tree name,
166 tree args, int flags,
167 bool *no_add_attrs);
168 static void m68k_compute_frame_layout (void);
169 static bool m68k_save_reg (unsigned int regno, bool interrupt_handler);
170 static bool m68k_ok_for_sibcall_p (tree, tree);
171 static bool m68k_tls_symbol_p (rtx);
172 static rtx m68k_legitimize_address (rtx, rtx, machine_mode);
173 static bool m68k_rtx_costs (rtx, machine_mode, int, int, int *, bool);
174 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
175 static bool m68k_return_in_memory (const_tree, const_tree);
176 #endif
177 static void m68k_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
178 static void m68k_trampoline_init (rtx, tree, rtx);
179 static int m68k_return_pops_args (tree, tree, int);
180 static rtx m68k_delegitimize_address (rtx);
181 static void m68k_function_arg_advance (cumulative_args_t, machine_mode,
182 const_tree, bool);
183 static rtx m68k_function_arg (cumulative_args_t, machine_mode,
184 const_tree, bool);
185 static bool m68k_cannot_force_const_mem (machine_mode mode, rtx x);
186 static bool m68k_output_addr_const_extra (FILE *, rtx);
187 static void m68k_init_sync_libfuncs (void) ATTRIBUTE_UNUSED;
188 static enum flt_eval_method
189 m68k_excess_precision (enum excess_precision_type);
190 static bool m68k_hard_regno_mode_ok (unsigned int, machine_mode);
191 static bool m68k_modes_tieable_p (machine_mode, machine_mode);
192 \f
193 /* Initialize the GCC target structure. */
194
195 #if INT_OP_GROUP == INT_OP_DOT_WORD
196 #undef TARGET_ASM_ALIGNED_HI_OP
197 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
198 #endif
199
200 #if INT_OP_GROUP == INT_OP_NO_DOT
201 #undef TARGET_ASM_BYTE_OP
202 #define TARGET_ASM_BYTE_OP "\tbyte\t"
203 #undef TARGET_ASM_ALIGNED_HI_OP
204 #define TARGET_ASM_ALIGNED_HI_OP "\tshort\t"
205 #undef TARGET_ASM_ALIGNED_SI_OP
206 #define TARGET_ASM_ALIGNED_SI_OP "\tlong\t"
207 #endif
208
209 #if INT_OP_GROUP == INT_OP_DC
210 #undef TARGET_ASM_BYTE_OP
211 #define TARGET_ASM_BYTE_OP "\tdc.b\t"
212 #undef TARGET_ASM_ALIGNED_HI_OP
213 #define TARGET_ASM_ALIGNED_HI_OP "\tdc.w\t"
214 #undef TARGET_ASM_ALIGNED_SI_OP
215 #define TARGET_ASM_ALIGNED_SI_OP "\tdc.l\t"
216 #endif
217
218 #undef TARGET_ASM_UNALIGNED_HI_OP
219 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
220 #undef TARGET_ASM_UNALIGNED_SI_OP
221 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
222
223 #undef TARGET_ASM_OUTPUT_MI_THUNK
224 #define TARGET_ASM_OUTPUT_MI_THUNK m68k_output_mi_thunk
225 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
226 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
227
228 #undef TARGET_ASM_FILE_START_APP_OFF
229 #define TARGET_ASM_FILE_START_APP_OFF true
230
231 #undef TARGET_LEGITIMIZE_ADDRESS
232 #define TARGET_LEGITIMIZE_ADDRESS m68k_legitimize_address
233
234 #undef TARGET_SCHED_ADJUST_COST
235 #define TARGET_SCHED_ADJUST_COST m68k_sched_adjust_cost
236
237 #undef TARGET_SCHED_ISSUE_RATE
238 #define TARGET_SCHED_ISSUE_RATE m68k_sched_issue_rate
239
240 #undef TARGET_SCHED_VARIABLE_ISSUE
241 #define TARGET_SCHED_VARIABLE_ISSUE m68k_sched_variable_issue
242
243 #undef TARGET_SCHED_INIT_GLOBAL
244 #define TARGET_SCHED_INIT_GLOBAL m68k_sched_md_init_global
245
246 #undef TARGET_SCHED_FINISH_GLOBAL
247 #define TARGET_SCHED_FINISH_GLOBAL m68k_sched_md_finish_global
248
249 #undef TARGET_SCHED_INIT
250 #define TARGET_SCHED_INIT m68k_sched_md_init
251
252 #undef TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE
253 #define TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE m68k_sched_dfa_pre_advance_cycle
254
255 #undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
256 #define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE m68k_sched_dfa_post_advance_cycle
257
258 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
259 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
260 m68k_sched_first_cycle_multipass_dfa_lookahead
261
262 #undef TARGET_OPTION_OVERRIDE
263 #define TARGET_OPTION_OVERRIDE m68k_option_override
264
265 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
266 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m68k_override_options_after_change
267
268 #undef TARGET_RTX_COSTS
269 #define TARGET_RTX_COSTS m68k_rtx_costs
270
271 #undef TARGET_ATTRIBUTE_TABLE
272 #define TARGET_ATTRIBUTE_TABLE m68k_attribute_table
273
274 #undef TARGET_PROMOTE_PROTOTYPES
275 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
276
277 #undef TARGET_STRUCT_VALUE_RTX
278 #define TARGET_STRUCT_VALUE_RTX m68k_struct_value_rtx
279
280 #undef TARGET_CANNOT_FORCE_CONST_MEM
281 #define TARGET_CANNOT_FORCE_CONST_MEM m68k_cannot_force_const_mem
282
283 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
284 #define TARGET_FUNCTION_OK_FOR_SIBCALL m68k_ok_for_sibcall_p
285
286 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
287 #undef TARGET_RETURN_IN_MEMORY
288 #define TARGET_RETURN_IN_MEMORY m68k_return_in_memory
289 #endif
290
291 #ifdef HAVE_AS_TLS
292 #undef TARGET_HAVE_TLS
293 #define TARGET_HAVE_TLS (true)
294
295 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
296 #define TARGET_ASM_OUTPUT_DWARF_DTPREL m68k_output_dwarf_dtprel
297 #endif
298
299 #undef TARGET_LRA_P
300 #define TARGET_LRA_P hook_bool_void_false
301
302 #undef TARGET_LEGITIMATE_ADDRESS_P
303 #define TARGET_LEGITIMATE_ADDRESS_P m68k_legitimate_address_p
304
305 #undef TARGET_CAN_ELIMINATE
306 #define TARGET_CAN_ELIMINATE m68k_can_eliminate
307
308 #undef TARGET_CONDITIONAL_REGISTER_USAGE
309 #define TARGET_CONDITIONAL_REGISTER_USAGE m68k_conditional_register_usage
310
311 #undef TARGET_TRAMPOLINE_INIT
312 #define TARGET_TRAMPOLINE_INIT m68k_trampoline_init
313
314 #undef TARGET_RETURN_POPS_ARGS
315 #define TARGET_RETURN_POPS_ARGS m68k_return_pops_args
316
317 #undef TARGET_DELEGITIMIZE_ADDRESS
318 #define TARGET_DELEGITIMIZE_ADDRESS m68k_delegitimize_address
319
320 #undef TARGET_FUNCTION_ARG
321 #define TARGET_FUNCTION_ARG m68k_function_arg
322
323 #undef TARGET_FUNCTION_ARG_ADVANCE
324 #define TARGET_FUNCTION_ARG_ADVANCE m68k_function_arg_advance
325
326 #undef TARGET_LEGITIMATE_CONSTANT_P
327 #define TARGET_LEGITIMATE_CONSTANT_P m68k_legitimate_constant_p
328
329 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
330 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA m68k_output_addr_const_extra
331
332 #undef TARGET_C_EXCESS_PRECISION
333 #define TARGET_C_EXCESS_PRECISION m68k_excess_precision
334
335 /* The value stored by TAS. */
336 #undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
337 #define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 128
338
339 #undef TARGET_HARD_REGNO_MODE_OK
340 #define TARGET_HARD_REGNO_MODE_OK m68k_hard_regno_mode_ok
341
342 #undef TARGET_MODES_TIEABLE_P
343 #define TARGET_MODES_TIEABLE_P m68k_modes_tieable_p
344
345 static const struct attribute_spec m68k_attribute_table[] =
346 {
347 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
348 affects_type_identity } */
349 { "interrupt", 0, 0, true, false, false, m68k_handle_fndecl_attribute,
350 false },
351 { "interrupt_handler", 0, 0, true, false, false,
352 m68k_handle_fndecl_attribute, false },
353 { "interrupt_thread", 0, 0, true, false, false,
354 m68k_handle_fndecl_attribute, false },
355 { NULL, 0, 0, false, false, false, NULL, false }
356 };
357
358 struct gcc_target targetm = TARGET_INITIALIZER;
359 \f
360 /* Base flags for 68k ISAs. */
361 #define FL_FOR_isa_00 FL_ISA_68000
362 #define FL_FOR_isa_10 (FL_FOR_isa_00 | FL_ISA_68010)
363 /* FL_68881 controls the default setting of -m68881. gcc has traditionally
364 generated 68881 code for 68020 and 68030 targets unless explicitly told
365 not to. */
366 #define FL_FOR_isa_20 (FL_FOR_isa_10 | FL_ISA_68020 \
367 | FL_BITFIELD | FL_68881 | FL_CAS)
368 #define FL_FOR_isa_40 (FL_FOR_isa_20 | FL_ISA_68040)
369 #define FL_FOR_isa_cpu32 (FL_FOR_isa_10 | FL_ISA_68020)
370
371 /* Base flags for ColdFire ISAs. */
372 #define FL_FOR_isa_a (FL_COLDFIRE | FL_ISA_A)
373 #define FL_FOR_isa_aplus (FL_FOR_isa_a | FL_ISA_APLUS | FL_CF_USP)
374 /* Note ISA_B doesn't necessarily include USP (user stack pointer) support. */
375 #define FL_FOR_isa_b (FL_FOR_isa_a | FL_ISA_B | FL_CF_HWDIV)
376 /* ISA_C is not upwardly compatible with ISA_B. */
377 #define FL_FOR_isa_c (FL_FOR_isa_a | FL_ISA_C | FL_CF_USP)
378
379 enum m68k_isa
380 {
381 /* Traditional 68000 instruction sets. */
382 isa_00,
383 isa_10,
384 isa_20,
385 isa_40,
386 isa_cpu32,
387 /* ColdFire instruction set variants. */
388 isa_a,
389 isa_aplus,
390 isa_b,
391 isa_c,
392 isa_max
393 };
394
395 /* Information about one of the -march, -mcpu or -mtune arguments. */
396 struct m68k_target_selection
397 {
398 /* The argument being described. */
399 const char *name;
400
401 /* For -mcpu, this is the device selected by the option.
402 For -mtune and -march, it is a representative device
403 for the microarchitecture or ISA respectively. */
404 enum target_device device;
405
406 /* The M68K_DEVICE fields associated with DEVICE. See the comment
407 in m68k-devices.def for details. FAMILY is only valid for -mcpu. */
408 const char *family;
409 enum uarch_type microarch;
410 enum m68k_isa isa;
411 unsigned long flags;
412 };
413
414 /* A list of all devices in m68k-devices.def. Used for -mcpu selection. */
415 static const struct m68k_target_selection all_devices[] =
416 {
417 #define M68K_DEVICE(NAME,ENUM_VALUE,FAMILY,MULTILIB,MICROARCH,ISA,FLAGS) \
418 { NAME, ENUM_VALUE, FAMILY, u##MICROARCH, ISA, FLAGS | FL_FOR_##ISA },
419 #include "m68k-devices.def"
420 #undef M68K_DEVICE
421 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
422 };
423
424 /* A list of all ISAs, mapping each one to a representative device.
425 Used for -march selection. */
426 static const struct m68k_target_selection all_isas[] =
427 {
428 #define M68K_ISA(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
429 { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
430 #include "m68k-isas.def"
431 #undef M68K_ISA
432 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
433 };
434
435 /* A list of all microarchitectures, mapping each one to a representative
436 device. Used for -mtune selection. */
437 static const struct m68k_target_selection all_microarchs[] =
438 {
439 #define M68K_MICROARCH(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
440 { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
441 #include "m68k-microarchs.def"
442 #undef M68K_MICROARCH
443 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
444 };
445 \f
446 /* The entries associated with the -mcpu, -march and -mtune settings,
447 or null for options that have not been used. */
448 const struct m68k_target_selection *m68k_cpu_entry;
449 const struct m68k_target_selection *m68k_arch_entry;
450 const struct m68k_target_selection *m68k_tune_entry;
451
452 /* Which CPU we are generating code for. */
453 enum target_device m68k_cpu;
454
455 /* Which microarchitecture to tune for. */
456 enum uarch_type m68k_tune;
457
458 /* Which FPU to use. */
459 enum fpu_type m68k_fpu;
460
461 /* The set of FL_* flags that apply to the target processor. */
462 unsigned int m68k_cpu_flags;
463
464 /* The set of FL_* flags that apply to the processor to be tuned for. */
465 unsigned int m68k_tune_flags;
466
467 /* Asm templates for calling or jumping to an arbitrary symbolic address,
468 or NULL if such calls or jumps are not supported. The address is held
469 in operand 0. */
470 const char *m68k_symbolic_call;
471 const char *m68k_symbolic_jump;
472
473 /* Enum variable that corresponds to m68k_symbolic_call values. */
474 enum M68K_SYMBOLIC_CALL m68k_symbolic_call_var;
475
476 \f
477 /* Implement TARGET_OPTION_OVERRIDE. */
478
479 static void
480 m68k_option_override (void)
481 {
482 const struct m68k_target_selection *entry;
483 unsigned long target_mask;
484
485 if (global_options_set.x_m68k_arch_option)
486 m68k_arch_entry = &all_isas[m68k_arch_option];
487
488 if (global_options_set.x_m68k_cpu_option)
489 m68k_cpu_entry = &all_devices[(int) m68k_cpu_option];
490
491 if (global_options_set.x_m68k_tune_option)
492 m68k_tune_entry = &all_microarchs[(int) m68k_tune_option];
493
494 /* User can choose:
495
496 -mcpu=
497 -march=
498 -mtune=
499
500 -march=ARCH should generate code that runs any processor
501 implementing architecture ARCH. -mcpu=CPU should override -march
502 and should generate code that runs on processor CPU, making free
503 use of any instructions that CPU understands. -mtune=UARCH applies
504 on top of -mcpu or -march and optimizes the code for UARCH. It does
505 not change the target architecture. */
506 if (m68k_cpu_entry)
507 {
508 /* Complain if the -march setting is for a different microarchitecture,
509 or includes flags that the -mcpu setting doesn't. */
510 if (m68k_arch_entry
511 && (m68k_arch_entry->microarch != m68k_cpu_entry->microarch
512 || (m68k_arch_entry->flags & ~m68k_cpu_entry->flags) != 0))
513 warning (0, "-mcpu=%s conflicts with -march=%s",
514 m68k_cpu_entry->name, m68k_arch_entry->name);
515
516 entry = m68k_cpu_entry;
517 }
518 else
519 entry = m68k_arch_entry;
520
521 if (!entry)
522 entry = all_devices + TARGET_CPU_DEFAULT;
523
524 m68k_cpu_flags = entry->flags;
525
526 /* Use the architecture setting to derive default values for
527 certain flags. */
528 target_mask = 0;
529
530 /* ColdFire is lenient about alignment. */
531 if (!TARGET_COLDFIRE)
532 target_mask |= MASK_STRICT_ALIGNMENT;
533
534 if ((m68k_cpu_flags & FL_BITFIELD) != 0)
535 target_mask |= MASK_BITFIELD;
536 if ((m68k_cpu_flags & FL_CF_HWDIV) != 0)
537 target_mask |= MASK_CF_HWDIV;
538 if ((m68k_cpu_flags & (FL_68881 | FL_CF_FPU)) != 0)
539 target_mask |= MASK_HARD_FLOAT;
540 target_flags |= target_mask & ~target_flags_explicit;
541
542 /* Set the directly-usable versions of the -mcpu and -mtune settings. */
543 m68k_cpu = entry->device;
544 if (m68k_tune_entry)
545 {
546 m68k_tune = m68k_tune_entry->microarch;
547 m68k_tune_flags = m68k_tune_entry->flags;
548 }
549 #ifdef M68K_DEFAULT_TUNE
550 else if (!m68k_cpu_entry && !m68k_arch_entry)
551 {
552 enum target_device dev;
553 dev = all_microarchs[M68K_DEFAULT_TUNE].device;
554 m68k_tune_flags = all_devices[dev].flags;
555 }
556 #endif
557 else
558 {
559 m68k_tune = entry->microarch;
560 m68k_tune_flags = entry->flags;
561 }
562
563 /* Set the type of FPU. */
564 m68k_fpu = (!TARGET_HARD_FLOAT ? FPUTYPE_NONE
565 : (m68k_cpu_flags & FL_COLDFIRE) != 0 ? FPUTYPE_COLDFIRE
566 : FPUTYPE_68881);
567
568 /* Sanity check to ensure that msep-data and mid-sahred-library are not
569 * both specified together. Doing so simply doesn't make sense.
570 */
571 if (TARGET_SEP_DATA && TARGET_ID_SHARED_LIBRARY)
572 error ("cannot specify both -msep-data and -mid-shared-library");
573
574 /* If we're generating code for a separate A5 relative data segment,
575 * we've got to enable -fPIC as well. This might be relaxable to
576 * -fpic but it hasn't been tested properly.
577 */
578 if (TARGET_SEP_DATA || TARGET_ID_SHARED_LIBRARY)
579 flag_pic = 2;
580
581 /* -mpcrel -fPIC uses 32-bit pc-relative displacements. Raise an
582 error if the target does not support them. */
583 if (TARGET_PCREL && !TARGET_68020 && flag_pic == 2)
584 error ("-mpcrel -fPIC is not currently supported on selected cpu");
585
586 /* ??? A historic way of turning on pic, or is this intended to
587 be an embedded thing that doesn't have the same name binding
588 significance that it does on hosted ELF systems? */
589 if (TARGET_PCREL && flag_pic == 0)
590 flag_pic = 1;
591
592 if (!flag_pic)
593 {
594 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_JSR;
595
596 m68k_symbolic_jump = "jra %a0";
597 }
598 else if (TARGET_ID_SHARED_LIBRARY)
599 /* All addresses must be loaded from the GOT. */
600 ;
601 else if (TARGET_68020 || TARGET_ISAB || TARGET_ISAC)
602 {
603 if (TARGET_PCREL)
604 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_C;
605 else
606 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_P;
607
608 if (TARGET_ISAC)
609 /* No unconditional long branch */;
610 else if (TARGET_PCREL)
611 m68k_symbolic_jump = "bra%.l %c0";
612 else
613 m68k_symbolic_jump = "bra%.l %p0";
614 /* Turn off function cse if we are doing PIC. We always want
615 function call to be done as `bsr foo@PLTPC'. */
616 /* ??? It's traditional to do this for -mpcrel too, but it isn't
617 clear how intentional that is. */
618 flag_no_function_cse = 1;
619 }
620
621 switch (m68k_symbolic_call_var)
622 {
623 case M68K_SYMBOLIC_CALL_JSR:
624 m68k_symbolic_call = "jsr %a0";
625 break;
626
627 case M68K_SYMBOLIC_CALL_BSR_C:
628 m68k_symbolic_call = "bsr%.l %c0";
629 break;
630
631 case M68K_SYMBOLIC_CALL_BSR_P:
632 m68k_symbolic_call = "bsr%.l %p0";
633 break;
634
635 case M68K_SYMBOLIC_CALL_NONE:
636 gcc_assert (m68k_symbolic_call == NULL);
637 break;
638
639 default:
640 gcc_unreachable ();
641 }
642
643 #ifndef ASM_OUTPUT_ALIGN_WITH_NOP
644 if (align_labels > 2)
645 {
646 warning (0, "-falign-labels=%d is not supported", align_labels);
647 align_labels = 0;
648 }
649 if (align_loops > 2)
650 {
651 warning (0, "-falign-loops=%d is not supported", align_loops);
652 align_loops = 0;
653 }
654 #endif
655
656 if ((opt_fstack_limit_symbol_arg != NULL || opt_fstack_limit_register_no >= 0)
657 && !TARGET_68020)
658 {
659 warning (0, "-fstack-limit- options are not supported on this cpu");
660 opt_fstack_limit_symbol_arg = NULL;
661 opt_fstack_limit_register_no = -1;
662 }
663
664 SUBTARGET_OVERRIDE_OPTIONS;
665
666 /* Setup scheduling options. */
667 if (TUNE_CFV1)
668 m68k_sched_cpu = CPU_CFV1;
669 else if (TUNE_CFV2)
670 m68k_sched_cpu = CPU_CFV2;
671 else if (TUNE_CFV3)
672 m68k_sched_cpu = CPU_CFV3;
673 else if (TUNE_CFV4)
674 m68k_sched_cpu = CPU_CFV4;
675 else
676 {
677 m68k_sched_cpu = CPU_UNKNOWN;
678 flag_schedule_insns = 0;
679 flag_schedule_insns_after_reload = 0;
680 flag_modulo_sched = 0;
681 flag_live_range_shrinkage = 0;
682 }
683
684 if (m68k_sched_cpu != CPU_UNKNOWN)
685 {
686 if ((m68k_cpu_flags & (FL_CF_EMAC | FL_CF_EMAC_B)) != 0)
687 m68k_sched_mac = MAC_CF_EMAC;
688 else if ((m68k_cpu_flags & FL_CF_MAC) != 0)
689 m68k_sched_mac = MAC_CF_MAC;
690 else
691 m68k_sched_mac = MAC_NO;
692 }
693 }
694
695 /* Implement TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE. */
696
697 static void
698 m68k_override_options_after_change (void)
699 {
700 if (m68k_sched_cpu == CPU_UNKNOWN)
701 {
702 flag_schedule_insns = 0;
703 flag_schedule_insns_after_reload = 0;
704 flag_modulo_sched = 0;
705 flag_live_range_shrinkage = 0;
706 }
707 }
708
709 /* Generate a macro of the form __mPREFIX_cpu_NAME, where PREFIX is the
710 given argument and NAME is the argument passed to -mcpu. Return NULL
711 if -mcpu was not passed. */
712
713 const char *
714 m68k_cpp_cpu_ident (const char *prefix)
715 {
716 if (!m68k_cpu_entry)
717 return NULL;
718 return concat ("__m", prefix, "_cpu_", m68k_cpu_entry->name, NULL);
719 }
720
721 /* Generate a macro of the form __mPREFIX_family_NAME, where PREFIX is the
722 given argument and NAME is the name of the representative device for
723 the -mcpu argument's family. Return NULL if -mcpu was not passed. */
724
725 const char *
726 m68k_cpp_cpu_family (const char *prefix)
727 {
728 if (!m68k_cpu_entry)
729 return NULL;
730 return concat ("__m", prefix, "_family_", m68k_cpu_entry->family, NULL);
731 }
732 \f
733 /* Return m68k_fk_interrupt_handler if FUNC has an "interrupt" or
734 "interrupt_handler" attribute and interrupt_thread if FUNC has an
735 "interrupt_thread" attribute. Otherwise, return
736 m68k_fk_normal_function. */
737
738 enum m68k_function_kind
739 m68k_get_function_kind (tree func)
740 {
741 tree a;
742
743 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
744
745 a = lookup_attribute ("interrupt", DECL_ATTRIBUTES (func));
746 if (a != NULL_TREE)
747 return m68k_fk_interrupt_handler;
748
749 a = lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (func));
750 if (a != NULL_TREE)
751 return m68k_fk_interrupt_handler;
752
753 a = lookup_attribute ("interrupt_thread", DECL_ATTRIBUTES (func));
754 if (a != NULL_TREE)
755 return m68k_fk_interrupt_thread;
756
757 return m68k_fk_normal_function;
758 }
759
760 /* Handle an attribute requiring a FUNCTION_DECL; arguments as in
761 struct attribute_spec.handler. */
762 static tree
763 m68k_handle_fndecl_attribute (tree *node, tree name,
764 tree args ATTRIBUTE_UNUSED,
765 int flags ATTRIBUTE_UNUSED,
766 bool *no_add_attrs)
767 {
768 if (TREE_CODE (*node) != FUNCTION_DECL)
769 {
770 warning (OPT_Wattributes, "%qE attribute only applies to functions",
771 name);
772 *no_add_attrs = true;
773 }
774
775 if (m68k_get_function_kind (*node) != m68k_fk_normal_function)
776 {
777 error ("multiple interrupt attributes not allowed");
778 *no_add_attrs = true;
779 }
780
781 if (!TARGET_FIDOA
782 && !strcmp (IDENTIFIER_POINTER (name), "interrupt_thread"))
783 {
784 error ("interrupt_thread is available only on fido");
785 *no_add_attrs = true;
786 }
787
788 return NULL_TREE;
789 }
790
791 static void
792 m68k_compute_frame_layout (void)
793 {
794 int regno, saved;
795 unsigned int mask;
796 enum m68k_function_kind func_kind =
797 m68k_get_function_kind (current_function_decl);
798 bool interrupt_handler = func_kind == m68k_fk_interrupt_handler;
799 bool interrupt_thread = func_kind == m68k_fk_interrupt_thread;
800
801 /* Only compute the frame once per function.
802 Don't cache information until reload has been completed. */
803 if (current_frame.funcdef_no == current_function_funcdef_no
804 && reload_completed)
805 return;
806
807 current_frame.size = (get_frame_size () + 3) & -4;
808
809 mask = saved = 0;
810
811 /* Interrupt thread does not need to save any register. */
812 if (!interrupt_thread)
813 for (regno = 0; regno < 16; regno++)
814 if (m68k_save_reg (regno, interrupt_handler))
815 {
816 mask |= 1 << (regno - D0_REG);
817 saved++;
818 }
819 current_frame.offset = saved * 4;
820 current_frame.reg_no = saved;
821 current_frame.reg_mask = mask;
822
823 current_frame.foffset = 0;
824 mask = saved = 0;
825 if (TARGET_HARD_FLOAT)
826 {
827 /* Interrupt thread does not need to save any register. */
828 if (!interrupt_thread)
829 for (regno = 16; regno < 24; regno++)
830 if (m68k_save_reg (regno, interrupt_handler))
831 {
832 mask |= 1 << (regno - FP0_REG);
833 saved++;
834 }
835 current_frame.foffset = saved * TARGET_FP_REG_SIZE;
836 current_frame.offset += current_frame.foffset;
837 }
838 current_frame.fpu_no = saved;
839 current_frame.fpu_mask = mask;
840
841 /* Remember what function this frame refers to. */
842 current_frame.funcdef_no = current_function_funcdef_no;
843 }
844
845 /* Worker function for TARGET_CAN_ELIMINATE. */
846
847 bool
848 m68k_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
849 {
850 return (to == STACK_POINTER_REGNUM ? ! frame_pointer_needed : true);
851 }
852
853 HOST_WIDE_INT
854 m68k_initial_elimination_offset (int from, int to)
855 {
856 int argptr_offset;
857 /* The arg pointer points 8 bytes before the start of the arguments,
858 as defined by FIRST_PARM_OFFSET. This makes it coincident with the
859 frame pointer in most frames. */
860 argptr_offset = frame_pointer_needed ? 0 : UNITS_PER_WORD;
861 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
862 return argptr_offset;
863
864 m68k_compute_frame_layout ();
865
866 gcc_assert (to == STACK_POINTER_REGNUM);
867 switch (from)
868 {
869 case ARG_POINTER_REGNUM:
870 return current_frame.offset + current_frame.size - argptr_offset;
871 case FRAME_POINTER_REGNUM:
872 return current_frame.offset + current_frame.size;
873 default:
874 gcc_unreachable ();
875 }
876 }
877
878 /* Refer to the array `regs_ever_live' to determine which registers
879 to save; `regs_ever_live[I]' is nonzero if register number I
880 is ever used in the function. This function is responsible for
881 knowing which registers should not be saved even if used.
882 Return true if we need to save REGNO. */
883
884 static bool
885 m68k_save_reg (unsigned int regno, bool interrupt_handler)
886 {
887 if (flag_pic && regno == PIC_REG)
888 {
889 if (crtl->saves_all_registers)
890 return true;
891 if (crtl->uses_pic_offset_table)
892 return true;
893 /* Reload may introduce constant pool references into a function
894 that thitherto didn't need a PIC register. Note that the test
895 above will not catch that case because we will only set
896 crtl->uses_pic_offset_table when emitting
897 the address reloads. */
898 if (crtl->uses_const_pool)
899 return true;
900 }
901
902 if (crtl->calls_eh_return)
903 {
904 unsigned int i;
905 for (i = 0; ; i++)
906 {
907 unsigned int test = EH_RETURN_DATA_REGNO (i);
908 if (test == INVALID_REGNUM)
909 break;
910 if (test == regno)
911 return true;
912 }
913 }
914
915 /* Fixed regs we never touch. */
916 if (fixed_regs[regno])
917 return false;
918
919 /* The frame pointer (if it is such) is handled specially. */
920 if (regno == FRAME_POINTER_REGNUM && frame_pointer_needed)
921 return false;
922
923 /* Interrupt handlers must also save call_used_regs
924 if they are live or when calling nested functions. */
925 if (interrupt_handler)
926 {
927 if (df_regs_ever_live_p (regno))
928 return true;
929
930 if (!crtl->is_leaf && call_used_regs[regno])
931 return true;
932 }
933
934 /* Never need to save registers that aren't touched. */
935 if (!df_regs_ever_live_p (regno))
936 return false;
937
938 /* Otherwise save everything that isn't call-clobbered. */
939 return !call_used_regs[regno];
940 }
941
942 /* Emit RTL for a MOVEM or FMOVEM instruction. BASE + OFFSET represents
943 the lowest memory address. COUNT is the number of registers to be
944 moved, with register REGNO + I being moved if bit I of MASK is set.
945 STORE_P specifies the direction of the move and ADJUST_STACK_P says
946 whether or not this is pre-decrement (if STORE_P) or post-increment
947 (if !STORE_P) operation. */
948
949 static rtx_insn *
950 m68k_emit_movem (rtx base, HOST_WIDE_INT offset,
951 unsigned int count, unsigned int regno,
952 unsigned int mask, bool store_p, bool adjust_stack_p)
953 {
954 int i;
955 rtx body, addr, src, operands[2];
956 machine_mode mode;
957
958 body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (adjust_stack_p + count));
959 mode = reg_raw_mode[regno];
960 i = 0;
961
962 if (adjust_stack_p)
963 {
964 src = plus_constant (Pmode, base,
965 (count
966 * GET_MODE_SIZE (mode)
967 * (HOST_WIDE_INT) (store_p ? -1 : 1)));
968 XVECEXP (body, 0, i++) = gen_rtx_SET (base, src);
969 }
970
971 for (; mask != 0; mask >>= 1, regno++)
972 if (mask & 1)
973 {
974 addr = plus_constant (Pmode, base, offset);
975 operands[!store_p] = gen_frame_mem (mode, addr);
976 operands[store_p] = gen_rtx_REG (mode, regno);
977 XVECEXP (body, 0, i++)
978 = gen_rtx_SET (operands[0], operands[1]);
979 offset += GET_MODE_SIZE (mode);
980 }
981 gcc_assert (i == XVECLEN (body, 0));
982
983 return emit_insn (body);
984 }
985
986 /* Make INSN a frame-related instruction. */
987
988 static void
989 m68k_set_frame_related (rtx_insn *insn)
990 {
991 rtx body;
992 int i;
993
994 RTX_FRAME_RELATED_P (insn) = 1;
995 body = PATTERN (insn);
996 if (GET_CODE (body) == PARALLEL)
997 for (i = 0; i < XVECLEN (body, 0); i++)
998 RTX_FRAME_RELATED_P (XVECEXP (body, 0, i)) = 1;
999 }
1000
1001 /* Emit RTL for the "prologue" define_expand. */
1002
1003 void
1004 m68k_expand_prologue (void)
1005 {
1006 HOST_WIDE_INT fsize_with_regs;
1007 rtx limit, src, dest;
1008
1009 m68k_compute_frame_layout ();
1010
1011 if (flag_stack_usage_info)
1012 current_function_static_stack_size
1013 = current_frame.size + current_frame.offset;
1014
1015 /* If the stack limit is a symbol, we can check it here,
1016 before actually allocating the space. */
1017 if (crtl->limit_stack
1018 && GET_CODE (stack_limit_rtx) == SYMBOL_REF)
1019 {
1020 limit = plus_constant (Pmode, stack_limit_rtx, current_frame.size + 4);
1021 if (!m68k_legitimate_constant_p (Pmode, limit))
1022 {
1023 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), limit);
1024 limit = gen_rtx_REG (Pmode, D0_REG);
1025 }
1026 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode,
1027 stack_pointer_rtx, limit),
1028 stack_pointer_rtx, limit,
1029 const1_rtx));
1030 }
1031
1032 fsize_with_regs = current_frame.size;
1033 if (TARGET_COLDFIRE)
1034 {
1035 /* ColdFire's move multiple instructions do not allow pre-decrement
1036 addressing. Add the size of movem saves to the initial stack
1037 allocation instead. */
1038 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1039 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1040 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1041 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
1042 }
1043
1044 if (frame_pointer_needed)
1045 {
1046 if (fsize_with_regs == 0 && TUNE_68040)
1047 {
1048 /* On the 68040, two separate moves are faster than link.w 0. */
1049 dest = gen_frame_mem (Pmode,
1050 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1051 m68k_set_frame_related (emit_move_insn (dest, frame_pointer_rtx));
1052 m68k_set_frame_related (emit_move_insn (frame_pointer_rtx,
1053 stack_pointer_rtx));
1054 }
1055 else if (fsize_with_regs < 0x8000 || TARGET_68020)
1056 m68k_set_frame_related
1057 (emit_insn (gen_link (frame_pointer_rtx,
1058 GEN_INT (-4 - fsize_with_regs))));
1059 else
1060 {
1061 m68k_set_frame_related
1062 (emit_insn (gen_link (frame_pointer_rtx, GEN_INT (-4))));
1063 m68k_set_frame_related
1064 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1065 stack_pointer_rtx,
1066 GEN_INT (-fsize_with_regs))));
1067 }
1068
1069 /* If the frame pointer is needed, emit a special barrier that
1070 will prevent the scheduler from moving stores to the frame
1071 before the stack adjustment. */
1072 emit_insn (gen_stack_tie (stack_pointer_rtx, frame_pointer_rtx));
1073 }
1074 else if (fsize_with_regs != 0)
1075 m68k_set_frame_related
1076 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1077 stack_pointer_rtx,
1078 GEN_INT (-fsize_with_regs))));
1079
1080 if (current_frame.fpu_mask)
1081 {
1082 gcc_assert (current_frame.fpu_no >= MIN_FMOVEM_REGS);
1083 if (TARGET_68881)
1084 m68k_set_frame_related
1085 (m68k_emit_movem (stack_pointer_rtx,
1086 current_frame.fpu_no * -GET_MODE_SIZE (XFmode),
1087 current_frame.fpu_no, FP0_REG,
1088 current_frame.fpu_mask, true, true));
1089 else
1090 {
1091 int offset;
1092
1093 /* If we're using moveml to save the integer registers,
1094 the stack pointer will point to the bottom of the moveml
1095 save area. Find the stack offset of the first FP register. */
1096 if (current_frame.reg_no < MIN_MOVEM_REGS)
1097 offset = 0;
1098 else
1099 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1100 m68k_set_frame_related
1101 (m68k_emit_movem (stack_pointer_rtx, offset,
1102 current_frame.fpu_no, FP0_REG,
1103 current_frame.fpu_mask, true, false));
1104 }
1105 }
1106
1107 /* If the stack limit is not a symbol, check it here.
1108 This has the disadvantage that it may be too late... */
1109 if (crtl->limit_stack)
1110 {
1111 if (REG_P (stack_limit_rtx))
1112 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode, stack_pointer_rtx,
1113 stack_limit_rtx),
1114 stack_pointer_rtx, stack_limit_rtx,
1115 const1_rtx));
1116
1117 else if (GET_CODE (stack_limit_rtx) != SYMBOL_REF)
1118 warning (0, "stack limit expression is not supported");
1119 }
1120
1121 if (current_frame.reg_no < MIN_MOVEM_REGS)
1122 {
1123 /* Store each register separately in the same order moveml does. */
1124 int i;
1125
1126 for (i = 16; i-- > 0; )
1127 if (current_frame.reg_mask & (1 << i))
1128 {
1129 src = gen_rtx_REG (SImode, D0_REG + i);
1130 dest = gen_frame_mem (SImode,
1131 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1132 m68k_set_frame_related (emit_insn (gen_movsi (dest, src)));
1133 }
1134 }
1135 else
1136 {
1137 if (TARGET_COLDFIRE)
1138 /* The required register save space has already been allocated.
1139 The first register should be stored at (%sp). */
1140 m68k_set_frame_related
1141 (m68k_emit_movem (stack_pointer_rtx, 0,
1142 current_frame.reg_no, D0_REG,
1143 current_frame.reg_mask, true, false));
1144 else
1145 m68k_set_frame_related
1146 (m68k_emit_movem (stack_pointer_rtx,
1147 current_frame.reg_no * -GET_MODE_SIZE (SImode),
1148 current_frame.reg_no, D0_REG,
1149 current_frame.reg_mask, true, true));
1150 }
1151
1152 if (!TARGET_SEP_DATA
1153 && crtl->uses_pic_offset_table)
1154 emit_insn (gen_load_got (pic_offset_table_rtx));
1155 }
1156 \f
1157 /* Return true if a simple (return) instruction is sufficient for this
1158 instruction (i.e. if no epilogue is needed). */
1159
1160 bool
1161 m68k_use_return_insn (void)
1162 {
1163 if (!reload_completed || frame_pointer_needed || get_frame_size () != 0)
1164 return false;
1165
1166 m68k_compute_frame_layout ();
1167 return current_frame.offset == 0;
1168 }
1169
1170 /* Emit RTL for the "epilogue" or "sibcall_epilogue" define_expand;
1171 SIBCALL_P says which.
1172
1173 The function epilogue should not depend on the current stack pointer!
1174 It should use the frame pointer only, if there is a frame pointer.
1175 This is mandatory because of alloca; we also take advantage of it to
1176 omit stack adjustments before returning. */
1177
1178 void
1179 m68k_expand_epilogue (bool sibcall_p)
1180 {
1181 HOST_WIDE_INT fsize, fsize_with_regs;
1182 bool big, restore_from_sp;
1183
1184 m68k_compute_frame_layout ();
1185
1186 fsize = current_frame.size;
1187 big = false;
1188 restore_from_sp = false;
1189
1190 /* FIXME : crtl->is_leaf below is too strong.
1191 What we really need to know there is if there could be pending
1192 stack adjustment needed at that point. */
1193 restore_from_sp = (!frame_pointer_needed
1194 || (!cfun->calls_alloca && crtl->is_leaf));
1195
1196 /* fsize_with_regs is the size we need to adjust the sp when
1197 popping the frame. */
1198 fsize_with_regs = fsize;
1199 if (TARGET_COLDFIRE && restore_from_sp)
1200 {
1201 /* ColdFire's move multiple instructions do not allow post-increment
1202 addressing. Add the size of movem loads to the final deallocation
1203 instead. */
1204 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1205 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1206 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1207 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
1208 }
1209
1210 if (current_frame.offset + fsize >= 0x8000
1211 && !restore_from_sp
1212 && (current_frame.reg_mask || current_frame.fpu_mask))
1213 {
1214 if (TARGET_COLDFIRE
1215 && (current_frame.reg_no >= MIN_MOVEM_REGS
1216 || current_frame.fpu_no >= MIN_FMOVEM_REGS))
1217 {
1218 /* ColdFire's move multiple instructions do not support the
1219 (d8,Ax,Xi) addressing mode, so we're as well using a normal
1220 stack-based restore. */
1221 emit_move_insn (gen_rtx_REG (Pmode, A1_REG),
1222 GEN_INT (-(current_frame.offset + fsize)));
1223 emit_insn (gen_blockage ());
1224 emit_insn (gen_addsi3 (stack_pointer_rtx,
1225 gen_rtx_REG (Pmode, A1_REG),
1226 frame_pointer_rtx));
1227 restore_from_sp = true;
1228 }
1229 else
1230 {
1231 emit_move_insn (gen_rtx_REG (Pmode, A1_REG), GEN_INT (-fsize));
1232 fsize = 0;
1233 big = true;
1234 }
1235 }
1236
1237 if (current_frame.reg_no < MIN_MOVEM_REGS)
1238 {
1239 /* Restore each register separately in the same order moveml does. */
1240 int i;
1241 HOST_WIDE_INT offset;
1242
1243 offset = current_frame.offset + fsize;
1244 for (i = 0; i < 16; i++)
1245 if (current_frame.reg_mask & (1 << i))
1246 {
1247 rtx addr;
1248
1249 if (big)
1250 {
1251 /* Generate the address -OFFSET(%fp,%a1.l). */
1252 addr = gen_rtx_REG (Pmode, A1_REG);
1253 addr = gen_rtx_PLUS (Pmode, addr, frame_pointer_rtx);
1254 addr = plus_constant (Pmode, addr, -offset);
1255 }
1256 else if (restore_from_sp)
1257 addr = gen_rtx_POST_INC (Pmode, stack_pointer_rtx);
1258 else
1259 addr = plus_constant (Pmode, frame_pointer_rtx, -offset);
1260 emit_move_insn (gen_rtx_REG (SImode, D0_REG + i),
1261 gen_frame_mem (SImode, addr));
1262 offset -= GET_MODE_SIZE (SImode);
1263 }
1264 }
1265 else if (current_frame.reg_mask)
1266 {
1267 if (big)
1268 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1269 gen_rtx_REG (Pmode, A1_REG),
1270 frame_pointer_rtx),
1271 -(current_frame.offset + fsize),
1272 current_frame.reg_no, D0_REG,
1273 current_frame.reg_mask, false, false);
1274 else if (restore_from_sp)
1275 m68k_emit_movem (stack_pointer_rtx, 0,
1276 current_frame.reg_no, D0_REG,
1277 current_frame.reg_mask, false,
1278 !TARGET_COLDFIRE);
1279 else
1280 m68k_emit_movem (frame_pointer_rtx,
1281 -(current_frame.offset + fsize),
1282 current_frame.reg_no, D0_REG,
1283 current_frame.reg_mask, false, false);
1284 }
1285
1286 if (current_frame.fpu_no > 0)
1287 {
1288 if (big)
1289 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1290 gen_rtx_REG (Pmode, A1_REG),
1291 frame_pointer_rtx),
1292 -(current_frame.foffset + fsize),
1293 current_frame.fpu_no, FP0_REG,
1294 current_frame.fpu_mask, false, false);
1295 else if (restore_from_sp)
1296 {
1297 if (TARGET_COLDFIRE)
1298 {
1299 int offset;
1300
1301 /* If we used moveml to restore the integer registers, the
1302 stack pointer will still point to the bottom of the moveml
1303 save area. Find the stack offset of the first FP
1304 register. */
1305 if (current_frame.reg_no < MIN_MOVEM_REGS)
1306 offset = 0;
1307 else
1308 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1309 m68k_emit_movem (stack_pointer_rtx, offset,
1310 current_frame.fpu_no, FP0_REG,
1311 current_frame.fpu_mask, false, false);
1312 }
1313 else
1314 m68k_emit_movem (stack_pointer_rtx, 0,
1315 current_frame.fpu_no, FP0_REG,
1316 current_frame.fpu_mask, false, true);
1317 }
1318 else
1319 m68k_emit_movem (frame_pointer_rtx,
1320 -(current_frame.foffset + fsize),
1321 current_frame.fpu_no, FP0_REG,
1322 current_frame.fpu_mask, false, false);
1323 }
1324
1325 emit_insn (gen_blockage ());
1326 if (frame_pointer_needed)
1327 emit_insn (gen_unlink (frame_pointer_rtx));
1328 else if (fsize_with_regs)
1329 emit_insn (gen_addsi3 (stack_pointer_rtx,
1330 stack_pointer_rtx,
1331 GEN_INT (fsize_with_regs)));
1332
1333 if (crtl->calls_eh_return)
1334 emit_insn (gen_addsi3 (stack_pointer_rtx,
1335 stack_pointer_rtx,
1336 EH_RETURN_STACKADJ_RTX));
1337
1338 if (!sibcall_p)
1339 emit_jump_insn (ret_rtx);
1340 }
1341 \f
1342 /* Return true if X is a valid comparison operator for the dbcc
1343 instruction.
1344
1345 Note it rejects floating point comparison operators.
1346 (In the future we could use Fdbcc).
1347
1348 It also rejects some comparisons when CC_NO_OVERFLOW is set. */
1349
1350 int
1351 valid_dbcc_comparison_p_2 (rtx x, machine_mode mode ATTRIBUTE_UNUSED)
1352 {
1353 switch (GET_CODE (x))
1354 {
1355 case EQ: case NE: case GTU: case LTU:
1356 case GEU: case LEU:
1357 return 1;
1358
1359 /* Reject some when CC_NO_OVERFLOW is set. This may be over
1360 conservative */
1361 case GT: case LT: case GE: case LE:
1362 return ! (cc_prev_status.flags & CC_NO_OVERFLOW);
1363 default:
1364 return 0;
1365 }
1366 }
1367
1368 /* Return nonzero if flags are currently in the 68881 flag register. */
1369 int
1370 flags_in_68881 (void)
1371 {
1372 /* We could add support for these in the future */
1373 return cc_status.flags & CC_IN_68881;
1374 }
1375
1376 /* Return true if PARALLEL contains register REGNO. */
1377 static bool
1378 m68k_reg_present_p (const_rtx parallel, unsigned int regno)
1379 {
1380 int i;
1381
1382 if (REG_P (parallel) && REGNO (parallel) == regno)
1383 return true;
1384
1385 if (GET_CODE (parallel) != PARALLEL)
1386 return false;
1387
1388 for (i = 0; i < XVECLEN (parallel, 0); ++i)
1389 {
1390 const_rtx x;
1391
1392 x = XEXP (XVECEXP (parallel, 0, i), 0);
1393 if (REG_P (x) && REGNO (x) == regno)
1394 return true;
1395 }
1396
1397 return false;
1398 }
1399
1400 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL_P. */
1401
1402 static bool
1403 m68k_ok_for_sibcall_p (tree decl, tree exp)
1404 {
1405 enum m68k_function_kind kind;
1406
1407 /* We cannot use sibcalls for nested functions because we use the
1408 static chain register for indirect calls. */
1409 if (CALL_EXPR_STATIC_CHAIN (exp))
1410 return false;
1411
1412 if (!VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
1413 {
1414 /* Check that the return value locations are the same. For
1415 example that we aren't returning a value from the sibling in
1416 a D0 register but then need to transfer it to a A0 register. */
1417 rtx cfun_value;
1418 rtx call_value;
1419
1420 cfun_value = FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (cfun->decl)),
1421 cfun->decl);
1422 call_value = FUNCTION_VALUE (TREE_TYPE (exp), decl);
1423
1424 /* Check that the values are equal or that the result the callee
1425 function returns is superset of what the current function returns. */
1426 if (!(rtx_equal_p (cfun_value, call_value)
1427 || (REG_P (cfun_value)
1428 && m68k_reg_present_p (call_value, REGNO (cfun_value)))))
1429 return false;
1430 }
1431
1432 kind = m68k_get_function_kind (current_function_decl);
1433 if (kind == m68k_fk_normal_function)
1434 /* We can always sibcall from a normal function, because it's
1435 undefined if it is calling an interrupt function. */
1436 return true;
1437
1438 /* Otherwise we can only sibcall if the function kind is known to be
1439 the same. */
1440 if (decl && m68k_get_function_kind (decl) == kind)
1441 return true;
1442
1443 return false;
1444 }
1445
1446 /* On the m68k all args are always pushed. */
1447
1448 static rtx
1449 m68k_function_arg (cumulative_args_t cum ATTRIBUTE_UNUSED,
1450 machine_mode mode ATTRIBUTE_UNUSED,
1451 const_tree type ATTRIBUTE_UNUSED,
1452 bool named ATTRIBUTE_UNUSED)
1453 {
1454 return NULL_RTX;
1455 }
1456
1457 static void
1458 m68k_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
1459 const_tree type, bool named ATTRIBUTE_UNUSED)
1460 {
1461 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1462
1463 *cum += (mode != BLKmode
1464 ? (GET_MODE_SIZE (mode) + 3) & ~3
1465 : (int_size_in_bytes (type) + 3) & ~3);
1466 }
1467
1468 /* Convert X to a legitimate function call memory reference and return the
1469 result. */
1470
1471 rtx
1472 m68k_legitimize_call_address (rtx x)
1473 {
1474 gcc_assert (MEM_P (x));
1475 if (call_operand (XEXP (x, 0), VOIDmode))
1476 return x;
1477 return replace_equiv_address (x, force_reg (Pmode, XEXP (x, 0)));
1478 }
1479
1480 /* Likewise for sibling calls. */
1481
1482 rtx
1483 m68k_legitimize_sibcall_address (rtx x)
1484 {
1485 gcc_assert (MEM_P (x));
1486 if (sibcall_operand (XEXP (x, 0), VOIDmode))
1487 return x;
1488
1489 emit_move_insn (gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM), XEXP (x, 0));
1490 return replace_equiv_address (x, gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM));
1491 }
1492
1493 /* Convert X to a legitimate address and return it if successful. Otherwise
1494 return X.
1495
1496 For the 68000, we handle X+REG by loading X into a register R and
1497 using R+REG. R will go in an address reg and indexing will be used.
1498 However, if REG is a broken-out memory address or multiplication,
1499 nothing needs to be done because REG can certainly go in an address reg. */
1500
1501 static rtx
1502 m68k_legitimize_address (rtx x, rtx oldx, machine_mode mode)
1503 {
1504 if (m68k_tls_symbol_p (x))
1505 return m68k_legitimize_tls_address (x);
1506
1507 if (GET_CODE (x) == PLUS)
1508 {
1509 int ch = (x) != (oldx);
1510 int copied = 0;
1511
1512 #define COPY_ONCE(Y) if (!copied) { Y = copy_rtx (Y); copied = ch = 1; }
1513
1514 if (GET_CODE (XEXP (x, 0)) == MULT)
1515 {
1516 COPY_ONCE (x);
1517 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
1518 }
1519 if (GET_CODE (XEXP (x, 1)) == MULT)
1520 {
1521 COPY_ONCE (x);
1522 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
1523 }
1524 if (ch)
1525 {
1526 if (GET_CODE (XEXP (x, 1)) == REG
1527 && GET_CODE (XEXP (x, 0)) == REG)
1528 {
1529 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
1530 {
1531 COPY_ONCE (x);
1532 x = force_operand (x, 0);
1533 }
1534 return x;
1535 }
1536 if (memory_address_p (mode, x))
1537 return x;
1538 }
1539 if (GET_CODE (XEXP (x, 0)) == REG
1540 || (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
1541 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1542 && GET_MODE (XEXP (XEXP (x, 0), 0)) == HImode))
1543 {
1544 rtx temp = gen_reg_rtx (Pmode);
1545 rtx val = force_operand (XEXP (x, 1), 0);
1546 emit_move_insn (temp, val);
1547 COPY_ONCE (x);
1548 XEXP (x, 1) = temp;
1549 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1550 && GET_CODE (XEXP (x, 0)) == REG)
1551 x = force_operand (x, 0);
1552 }
1553 else if (GET_CODE (XEXP (x, 1)) == REG
1554 || (GET_CODE (XEXP (x, 1)) == SIGN_EXTEND
1555 && GET_CODE (XEXP (XEXP (x, 1), 0)) == REG
1556 && GET_MODE (XEXP (XEXP (x, 1), 0)) == HImode))
1557 {
1558 rtx temp = gen_reg_rtx (Pmode);
1559 rtx val = force_operand (XEXP (x, 0), 0);
1560 emit_move_insn (temp, val);
1561 COPY_ONCE (x);
1562 XEXP (x, 0) = temp;
1563 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1564 && GET_CODE (XEXP (x, 1)) == REG)
1565 x = force_operand (x, 0);
1566 }
1567 }
1568
1569 return x;
1570 }
1571
1572
1573 /* Output a dbCC; jCC sequence. Note we do not handle the
1574 floating point version of this sequence (Fdbcc). We also
1575 do not handle alternative conditions when CC_NO_OVERFLOW is
1576 set. It is assumed that valid_dbcc_comparison_p and flags_in_68881 will
1577 kick those out before we get here. */
1578
1579 void
1580 output_dbcc_and_branch (rtx *operands)
1581 {
1582 switch (GET_CODE (operands[3]))
1583 {
1584 case EQ:
1585 output_asm_insn ("dbeq %0,%l1\n\tjeq %l2", operands);
1586 break;
1587
1588 case NE:
1589 output_asm_insn ("dbne %0,%l1\n\tjne %l2", operands);
1590 break;
1591
1592 case GT:
1593 output_asm_insn ("dbgt %0,%l1\n\tjgt %l2", operands);
1594 break;
1595
1596 case GTU:
1597 output_asm_insn ("dbhi %0,%l1\n\tjhi %l2", operands);
1598 break;
1599
1600 case LT:
1601 output_asm_insn ("dblt %0,%l1\n\tjlt %l2", operands);
1602 break;
1603
1604 case LTU:
1605 output_asm_insn ("dbcs %0,%l1\n\tjcs %l2", operands);
1606 break;
1607
1608 case GE:
1609 output_asm_insn ("dbge %0,%l1\n\tjge %l2", operands);
1610 break;
1611
1612 case GEU:
1613 output_asm_insn ("dbcc %0,%l1\n\tjcc %l2", operands);
1614 break;
1615
1616 case LE:
1617 output_asm_insn ("dble %0,%l1\n\tjle %l2", operands);
1618 break;
1619
1620 case LEU:
1621 output_asm_insn ("dbls %0,%l1\n\tjls %l2", operands);
1622 break;
1623
1624 default:
1625 gcc_unreachable ();
1626 }
1627
1628 /* If the decrement is to be done in SImode, then we have
1629 to compensate for the fact that dbcc decrements in HImode. */
1630 switch (GET_MODE (operands[0]))
1631 {
1632 case E_SImode:
1633 output_asm_insn ("clr%.w %0\n\tsubq%.l #1,%0\n\tjpl %l1", operands);
1634 break;
1635
1636 case E_HImode:
1637 break;
1638
1639 default:
1640 gcc_unreachable ();
1641 }
1642 }
1643
1644 const char *
1645 output_scc_di (rtx op, rtx operand1, rtx operand2, rtx dest)
1646 {
1647 rtx loperands[7];
1648 enum rtx_code op_code = GET_CODE (op);
1649
1650 /* This does not produce a useful cc. */
1651 CC_STATUS_INIT;
1652
1653 /* The m68k cmp.l instruction requires operand1 to be a reg as used
1654 below. Swap the operands and change the op if these requirements
1655 are not fulfilled. */
1656 if (GET_CODE (operand2) == REG && GET_CODE (operand1) != REG)
1657 {
1658 rtx tmp = operand1;
1659
1660 operand1 = operand2;
1661 operand2 = tmp;
1662 op_code = swap_condition (op_code);
1663 }
1664 loperands[0] = operand1;
1665 if (GET_CODE (operand1) == REG)
1666 loperands[1] = gen_rtx_REG (SImode, REGNO (operand1) + 1);
1667 else
1668 loperands[1] = adjust_address (operand1, SImode, 4);
1669 if (operand2 != const0_rtx)
1670 {
1671 loperands[2] = operand2;
1672 if (GET_CODE (operand2) == REG)
1673 loperands[3] = gen_rtx_REG (SImode, REGNO (operand2) + 1);
1674 else
1675 loperands[3] = adjust_address (operand2, SImode, 4);
1676 }
1677 loperands[4] = gen_label_rtx ();
1678 if (operand2 != const0_rtx)
1679 output_asm_insn ("cmp%.l %2,%0\n\tjne %l4\n\tcmp%.l %3,%1", loperands);
1680 else
1681 {
1682 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[0]))
1683 output_asm_insn ("tst%.l %0", loperands);
1684 else
1685 output_asm_insn ("cmp%.w #0,%0", loperands);
1686
1687 output_asm_insn ("jne %l4", loperands);
1688
1689 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[1]))
1690 output_asm_insn ("tst%.l %1", loperands);
1691 else
1692 output_asm_insn ("cmp%.w #0,%1", loperands);
1693 }
1694
1695 loperands[5] = dest;
1696
1697 switch (op_code)
1698 {
1699 case EQ:
1700 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1701 CODE_LABEL_NUMBER (loperands[4]));
1702 output_asm_insn ("seq %5", loperands);
1703 break;
1704
1705 case NE:
1706 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1707 CODE_LABEL_NUMBER (loperands[4]));
1708 output_asm_insn ("sne %5", loperands);
1709 break;
1710
1711 case GT:
1712 loperands[6] = gen_label_rtx ();
1713 output_asm_insn ("shi %5\n\tjra %l6", loperands);
1714 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1715 CODE_LABEL_NUMBER (loperands[4]));
1716 output_asm_insn ("sgt %5", loperands);
1717 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1718 CODE_LABEL_NUMBER (loperands[6]));
1719 break;
1720
1721 case GTU:
1722 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1723 CODE_LABEL_NUMBER (loperands[4]));
1724 output_asm_insn ("shi %5", loperands);
1725 break;
1726
1727 case LT:
1728 loperands[6] = gen_label_rtx ();
1729 output_asm_insn ("scs %5\n\tjra %l6", loperands);
1730 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1731 CODE_LABEL_NUMBER (loperands[4]));
1732 output_asm_insn ("slt %5", loperands);
1733 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1734 CODE_LABEL_NUMBER (loperands[6]));
1735 break;
1736
1737 case LTU:
1738 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1739 CODE_LABEL_NUMBER (loperands[4]));
1740 output_asm_insn ("scs %5", loperands);
1741 break;
1742
1743 case GE:
1744 loperands[6] = gen_label_rtx ();
1745 output_asm_insn ("scc %5\n\tjra %l6", loperands);
1746 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1747 CODE_LABEL_NUMBER (loperands[4]));
1748 output_asm_insn ("sge %5", loperands);
1749 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1750 CODE_LABEL_NUMBER (loperands[6]));
1751 break;
1752
1753 case GEU:
1754 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1755 CODE_LABEL_NUMBER (loperands[4]));
1756 output_asm_insn ("scc %5", loperands);
1757 break;
1758
1759 case LE:
1760 loperands[6] = gen_label_rtx ();
1761 output_asm_insn ("sls %5\n\tjra %l6", loperands);
1762 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1763 CODE_LABEL_NUMBER (loperands[4]));
1764 output_asm_insn ("sle %5", loperands);
1765 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1766 CODE_LABEL_NUMBER (loperands[6]));
1767 break;
1768
1769 case LEU:
1770 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1771 CODE_LABEL_NUMBER (loperands[4]));
1772 output_asm_insn ("sls %5", loperands);
1773 break;
1774
1775 default:
1776 gcc_unreachable ();
1777 }
1778 return "";
1779 }
1780
1781 const char *
1782 output_btst (rtx *operands, rtx countop, rtx dataop, rtx_insn *insn, int signpos)
1783 {
1784 operands[0] = countop;
1785 operands[1] = dataop;
1786
1787 if (GET_CODE (countop) == CONST_INT)
1788 {
1789 register int count = INTVAL (countop);
1790 /* If COUNT is bigger than size of storage unit in use,
1791 advance to the containing unit of same size. */
1792 if (count > signpos)
1793 {
1794 int offset = (count & ~signpos) / 8;
1795 count = count & signpos;
1796 operands[1] = dataop = adjust_address (dataop, QImode, offset);
1797 }
1798 if (count == signpos)
1799 cc_status.flags = CC_NOT_POSITIVE | CC_Z_IN_NOT_N;
1800 else
1801 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N;
1802
1803 /* These three statements used to use next_insns_test_no...
1804 but it appears that this should do the same job. */
1805 if (count == 31
1806 && next_insn_tests_no_inequality (insn))
1807 return "tst%.l %1";
1808 if (count == 15
1809 && next_insn_tests_no_inequality (insn))
1810 return "tst%.w %1";
1811 if (count == 7
1812 && next_insn_tests_no_inequality (insn))
1813 return "tst%.b %1";
1814 /* Try to use `movew to ccr' followed by the appropriate branch insn.
1815 On some m68k variants unfortunately that's slower than btst.
1816 On 68000 and higher, that should also work for all HImode operands. */
1817 if (TUNE_CPU32 || TARGET_COLDFIRE || optimize_size)
1818 {
1819 if (count == 3 && DATA_REG_P (operands[1])
1820 && next_insn_tests_no_inequality (insn))
1821 {
1822 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N | CC_NO_OVERFLOW;
1823 return "move%.w %1,%%ccr";
1824 }
1825 if (count == 2 && DATA_REG_P (operands[1])
1826 && next_insn_tests_no_inequality (insn))
1827 {
1828 cc_status.flags = CC_NOT_NEGATIVE | CC_INVERTED | CC_NO_OVERFLOW;
1829 return "move%.w %1,%%ccr";
1830 }
1831 /* count == 1 followed by bvc/bvs and
1832 count == 0 followed by bcc/bcs are also possible, but need
1833 m68k-specific CC_Z_IN_NOT_V and CC_Z_IN_NOT_C flags. */
1834 }
1835
1836 cc_status.flags = CC_NOT_NEGATIVE;
1837 }
1838 return "btst %0,%1";
1839 }
1840 \f
1841 /* Return true if X is a legitimate base register. STRICT_P says
1842 whether we need strict checking. */
1843
1844 bool
1845 m68k_legitimate_base_reg_p (rtx x, bool strict_p)
1846 {
1847 /* Allow SUBREG everywhere we allow REG. This results in better code. */
1848 if (!strict_p && GET_CODE (x) == SUBREG)
1849 x = SUBREG_REG (x);
1850
1851 return (REG_P (x)
1852 && (strict_p
1853 ? REGNO_OK_FOR_BASE_P (REGNO (x))
1854 : REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x))));
1855 }
1856
1857 /* Return true if X is a legitimate index register. STRICT_P says
1858 whether we need strict checking. */
1859
1860 bool
1861 m68k_legitimate_index_reg_p (rtx x, bool strict_p)
1862 {
1863 if (!strict_p && GET_CODE (x) == SUBREG)
1864 x = SUBREG_REG (x);
1865
1866 return (REG_P (x)
1867 && (strict_p
1868 ? REGNO_OK_FOR_INDEX_P (REGNO (x))
1869 : REGNO_OK_FOR_INDEX_NONSTRICT_P (REGNO (x))));
1870 }
1871
1872 /* Return true if X is a legitimate index expression for a (d8,An,Xn) or
1873 (bd,An,Xn) addressing mode. Fill in the INDEX and SCALE fields of
1874 ADDRESS if so. STRICT_P says whether we need strict checking. */
1875
1876 static bool
1877 m68k_decompose_index (rtx x, bool strict_p, struct m68k_address *address)
1878 {
1879 int scale;
1880
1881 /* Check for a scale factor. */
1882 scale = 1;
1883 if ((TARGET_68020 || TARGET_COLDFIRE)
1884 && GET_CODE (x) == MULT
1885 && GET_CODE (XEXP (x, 1)) == CONST_INT
1886 && (INTVAL (XEXP (x, 1)) == 2
1887 || INTVAL (XEXP (x, 1)) == 4
1888 || (INTVAL (XEXP (x, 1)) == 8
1889 && (TARGET_COLDFIRE_FPU || !TARGET_COLDFIRE))))
1890 {
1891 scale = INTVAL (XEXP (x, 1));
1892 x = XEXP (x, 0);
1893 }
1894
1895 /* Check for a word extension. */
1896 if (!TARGET_COLDFIRE
1897 && GET_CODE (x) == SIGN_EXTEND
1898 && GET_MODE (XEXP (x, 0)) == HImode)
1899 x = XEXP (x, 0);
1900
1901 if (m68k_legitimate_index_reg_p (x, strict_p))
1902 {
1903 address->scale = scale;
1904 address->index = x;
1905 return true;
1906 }
1907
1908 return false;
1909 }
1910
1911 /* Return true if X is an illegitimate symbolic constant. */
1912
1913 bool
1914 m68k_illegitimate_symbolic_constant_p (rtx x)
1915 {
1916 rtx base, offset;
1917
1918 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P)
1919 {
1920 split_const (x, &base, &offset);
1921 if (GET_CODE (base) == SYMBOL_REF
1922 && !offset_within_block_p (base, INTVAL (offset)))
1923 return true;
1924 }
1925 return m68k_tls_reference_p (x, false);
1926 }
1927
1928 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1929
1930 static bool
1931 m68k_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1932 {
1933 return m68k_illegitimate_symbolic_constant_p (x);
1934 }
1935
1936 /* Return true if X is a legitimate constant address that can reach
1937 bytes in the range [X, X + REACH). STRICT_P says whether we need
1938 strict checking. */
1939
1940 static bool
1941 m68k_legitimate_constant_address_p (rtx x, unsigned int reach, bool strict_p)
1942 {
1943 rtx base, offset;
1944
1945 if (!CONSTANT_ADDRESS_P (x))
1946 return false;
1947
1948 if (flag_pic
1949 && !(strict_p && TARGET_PCREL)
1950 && symbolic_operand (x, VOIDmode))
1951 return false;
1952
1953 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P && reach > 1)
1954 {
1955 split_const (x, &base, &offset);
1956 if (GET_CODE (base) == SYMBOL_REF
1957 && !offset_within_block_p (base, INTVAL (offset) + reach - 1))
1958 return false;
1959 }
1960
1961 return !m68k_tls_reference_p (x, false);
1962 }
1963
1964 /* Return true if X is a LABEL_REF for a jump table. Assume that unplaced
1965 labels will become jump tables. */
1966
1967 static bool
1968 m68k_jump_table_ref_p (rtx x)
1969 {
1970 if (GET_CODE (x) != LABEL_REF)
1971 return false;
1972
1973 rtx_insn *insn = as_a <rtx_insn *> (XEXP (x, 0));
1974 if (!NEXT_INSN (insn) && !PREV_INSN (insn))
1975 return true;
1976
1977 insn = next_nonnote_insn (insn);
1978 return insn && JUMP_TABLE_DATA_P (insn);
1979 }
1980
1981 /* Return true if X is a legitimate address for values of mode MODE.
1982 STRICT_P says whether strict checking is needed. If the address
1983 is valid, describe its components in *ADDRESS. */
1984
1985 static bool
1986 m68k_decompose_address (machine_mode mode, rtx x,
1987 bool strict_p, struct m68k_address *address)
1988 {
1989 unsigned int reach;
1990
1991 memset (address, 0, sizeof (*address));
1992
1993 if (mode == BLKmode)
1994 reach = 1;
1995 else
1996 reach = GET_MODE_SIZE (mode);
1997
1998 /* Check for (An) (mode 2). */
1999 if (m68k_legitimate_base_reg_p (x, strict_p))
2000 {
2001 address->base = x;
2002 return true;
2003 }
2004
2005 /* Check for -(An) and (An)+ (modes 3 and 4). */
2006 if ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_INC)
2007 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
2008 {
2009 address->code = GET_CODE (x);
2010 address->base = XEXP (x, 0);
2011 return true;
2012 }
2013
2014 /* Check for (d16,An) (mode 5). */
2015 if (GET_CODE (x) == PLUS
2016 && GET_CODE (XEXP (x, 1)) == CONST_INT
2017 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x8000, 0x8000 - reach)
2018 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
2019 {
2020 address->base = XEXP (x, 0);
2021 address->offset = XEXP (x, 1);
2022 return true;
2023 }
2024
2025 /* Check for GOT loads. These are (bd,An,Xn) addresses if
2026 TARGET_68020 && flag_pic == 2, otherwise they are (d16,An)
2027 addresses. */
2028 if (GET_CODE (x) == PLUS
2029 && XEXP (x, 0) == pic_offset_table_rtx)
2030 {
2031 /* As we are processing a PLUS, do not unwrap RELOC32 symbols --
2032 they are invalid in this context. */
2033 if (m68k_unwrap_symbol (XEXP (x, 1), false) != XEXP (x, 1))
2034 {
2035 address->base = XEXP (x, 0);
2036 address->offset = XEXP (x, 1);
2037 return true;
2038 }
2039 }
2040
2041 /* The ColdFire FPU only accepts addressing modes 2-5. */
2042 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
2043 return false;
2044
2045 /* Check for (xxx).w and (xxx).l. Also, in the TARGET_PCREL case,
2046 check for (d16,PC) or (bd,PC,Xn) with a suppressed index register.
2047 All these modes are variations of mode 7. */
2048 if (m68k_legitimate_constant_address_p (x, reach, strict_p))
2049 {
2050 address->offset = x;
2051 return true;
2052 }
2053
2054 /* Check for (d8,PC,Xn), a mode 7 form. This case is needed for
2055 tablejumps.
2056
2057 ??? do_tablejump creates these addresses before placing the target
2058 label, so we have to assume that unplaced labels are jump table
2059 references. It seems unlikely that we would ever generate indexed
2060 accesses to unplaced labels in other cases. */
2061 if (GET_CODE (x) == PLUS
2062 && m68k_jump_table_ref_p (XEXP (x, 1))
2063 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2064 {
2065 address->offset = XEXP (x, 1);
2066 return true;
2067 }
2068
2069 /* Everything hereafter deals with (d8,An,Xn.SIZE*SCALE) or
2070 (bd,An,Xn.SIZE*SCALE) addresses. */
2071
2072 if (TARGET_68020)
2073 {
2074 /* Check for a nonzero base displacement. */
2075 if (GET_CODE (x) == PLUS
2076 && m68k_legitimate_constant_address_p (XEXP (x, 1), reach, strict_p))
2077 {
2078 address->offset = XEXP (x, 1);
2079 x = XEXP (x, 0);
2080 }
2081
2082 /* Check for a suppressed index register. */
2083 if (m68k_legitimate_base_reg_p (x, strict_p))
2084 {
2085 address->base = x;
2086 return true;
2087 }
2088
2089 /* Check for a suppressed base register. Do not allow this case
2090 for non-symbolic offsets as it effectively gives gcc freedom
2091 to treat data registers as base registers, which can generate
2092 worse code. */
2093 if (address->offset
2094 && symbolic_operand (address->offset, VOIDmode)
2095 && m68k_decompose_index (x, strict_p, address))
2096 return true;
2097 }
2098 else
2099 {
2100 /* Check for a nonzero base displacement. */
2101 if (GET_CODE (x) == PLUS
2102 && GET_CODE (XEXP (x, 1)) == CONST_INT
2103 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x80, 0x80 - reach))
2104 {
2105 address->offset = XEXP (x, 1);
2106 x = XEXP (x, 0);
2107 }
2108 }
2109
2110 /* We now expect the sum of a base and an index. */
2111 if (GET_CODE (x) == PLUS)
2112 {
2113 if (m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p)
2114 && m68k_decompose_index (XEXP (x, 1), strict_p, address))
2115 {
2116 address->base = XEXP (x, 0);
2117 return true;
2118 }
2119
2120 if (m68k_legitimate_base_reg_p (XEXP (x, 1), strict_p)
2121 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2122 {
2123 address->base = XEXP (x, 1);
2124 return true;
2125 }
2126 }
2127 return false;
2128 }
2129
2130 /* Return true if X is a legitimate address for values of mode MODE.
2131 STRICT_P says whether strict checking is needed. */
2132
2133 bool
2134 m68k_legitimate_address_p (machine_mode mode, rtx x, bool strict_p)
2135 {
2136 struct m68k_address address;
2137
2138 return m68k_decompose_address (mode, x, strict_p, &address);
2139 }
2140
2141 /* Return true if X is a memory, describing its address in ADDRESS if so.
2142 Apply strict checking if called during or after reload. */
2143
2144 static bool
2145 m68k_legitimate_mem_p (rtx x, struct m68k_address *address)
2146 {
2147 return (MEM_P (x)
2148 && m68k_decompose_address (GET_MODE (x), XEXP (x, 0),
2149 reload_in_progress || reload_completed,
2150 address));
2151 }
2152
2153 /* Implement TARGET_LEGITIMATE_CONSTANT_P. */
2154
2155 bool
2156 m68k_legitimate_constant_p (machine_mode mode, rtx x)
2157 {
2158 return mode != XFmode && !m68k_illegitimate_symbolic_constant_p (x);
2159 }
2160
2161 /* Return true if X matches the 'Q' constraint. It must be a memory
2162 with a base address and no constant offset or index. */
2163
2164 bool
2165 m68k_matches_q_p (rtx x)
2166 {
2167 struct m68k_address address;
2168
2169 return (m68k_legitimate_mem_p (x, &address)
2170 && address.code == UNKNOWN
2171 && address.base
2172 && !address.offset
2173 && !address.index);
2174 }
2175
2176 /* Return true if X matches the 'U' constraint. It must be a base address
2177 with a constant offset and no index. */
2178
2179 bool
2180 m68k_matches_u_p (rtx x)
2181 {
2182 struct m68k_address address;
2183
2184 return (m68k_legitimate_mem_p (x, &address)
2185 && address.code == UNKNOWN
2186 && address.base
2187 && address.offset
2188 && !address.index);
2189 }
2190
2191 /* Return GOT pointer. */
2192
2193 static rtx
2194 m68k_get_gp (void)
2195 {
2196 if (pic_offset_table_rtx == NULL_RTX)
2197 pic_offset_table_rtx = gen_rtx_REG (Pmode, PIC_REG);
2198
2199 crtl->uses_pic_offset_table = 1;
2200
2201 return pic_offset_table_rtx;
2202 }
2203
2204 /* M68K relocations, used to distinguish GOT and TLS relocations in UNSPEC
2205 wrappers. */
2206 enum m68k_reloc { RELOC_GOT, RELOC_TLSGD, RELOC_TLSLDM, RELOC_TLSLDO,
2207 RELOC_TLSIE, RELOC_TLSLE };
2208
2209 #define TLS_RELOC_P(RELOC) ((RELOC) != RELOC_GOT)
2210
2211 /* Wrap symbol X into unspec representing relocation RELOC.
2212 BASE_REG - register that should be added to the result.
2213 TEMP_REG - if non-null, temporary register. */
2214
2215 static rtx
2216 m68k_wrap_symbol (rtx x, enum m68k_reloc reloc, rtx base_reg, rtx temp_reg)
2217 {
2218 bool use_x_p;
2219
2220 use_x_p = (base_reg == pic_offset_table_rtx) ? TARGET_XGOT : TARGET_XTLS;
2221
2222 if (TARGET_COLDFIRE && use_x_p)
2223 /* When compiling with -mx{got, tls} switch the code will look like this:
2224
2225 move.l <X>@<RELOC>,<TEMP_REG>
2226 add.l <BASE_REG>,<TEMP_REG> */
2227 {
2228 /* Wrap X in UNSPEC_??? to tip m68k_output_addr_const_extra
2229 to put @RELOC after reference. */
2230 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2231 UNSPEC_RELOC32);
2232 x = gen_rtx_CONST (Pmode, x);
2233
2234 if (temp_reg == NULL)
2235 {
2236 gcc_assert (can_create_pseudo_p ());
2237 temp_reg = gen_reg_rtx (Pmode);
2238 }
2239
2240 emit_move_insn (temp_reg, x);
2241 emit_insn (gen_addsi3 (temp_reg, temp_reg, base_reg));
2242 x = temp_reg;
2243 }
2244 else
2245 {
2246 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2247 UNSPEC_RELOC16);
2248 x = gen_rtx_CONST (Pmode, x);
2249
2250 x = gen_rtx_PLUS (Pmode, base_reg, x);
2251 }
2252
2253 return x;
2254 }
2255
2256 /* Helper for m68k_unwrap_symbol.
2257 Also, if unwrapping was successful (that is if (ORIG != <return value>)),
2258 sets *RELOC_PTR to relocation type for the symbol. */
2259
2260 static rtx
2261 m68k_unwrap_symbol_1 (rtx orig, bool unwrap_reloc32_p,
2262 enum m68k_reloc *reloc_ptr)
2263 {
2264 if (GET_CODE (orig) == CONST)
2265 {
2266 rtx x;
2267 enum m68k_reloc dummy;
2268
2269 x = XEXP (orig, 0);
2270
2271 if (reloc_ptr == NULL)
2272 reloc_ptr = &dummy;
2273
2274 /* Handle an addend. */
2275 if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS)
2276 && CONST_INT_P (XEXP (x, 1)))
2277 x = XEXP (x, 0);
2278
2279 if (GET_CODE (x) == UNSPEC)
2280 {
2281 switch (XINT (x, 1))
2282 {
2283 case UNSPEC_RELOC16:
2284 orig = XVECEXP (x, 0, 0);
2285 *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2286 break;
2287
2288 case UNSPEC_RELOC32:
2289 if (unwrap_reloc32_p)
2290 {
2291 orig = XVECEXP (x, 0, 0);
2292 *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2293 }
2294 break;
2295
2296 default:
2297 break;
2298 }
2299 }
2300 }
2301
2302 return orig;
2303 }
2304
2305 /* Unwrap symbol from UNSPEC_RELOC16 and, if unwrap_reloc32_p,
2306 UNSPEC_RELOC32 wrappers. */
2307
2308 rtx
2309 m68k_unwrap_symbol (rtx orig, bool unwrap_reloc32_p)
2310 {
2311 return m68k_unwrap_symbol_1 (orig, unwrap_reloc32_p, NULL);
2312 }
2313
2314 /* Prescan insn before outputing assembler for it. */
2315
2316 void
2317 m68k_final_prescan_insn (rtx_insn *insn ATTRIBUTE_UNUSED,
2318 rtx *operands, int n_operands)
2319 {
2320 int i;
2321
2322 /* Combine and, possibly, other optimizations may do good job
2323 converting
2324 (const (unspec [(symbol)]))
2325 into
2326 (const (plus (unspec [(symbol)])
2327 (const_int N))).
2328 The problem with this is emitting @TLS or @GOT decorations.
2329 The decoration is emitted when processing (unspec), so the
2330 result would be "#symbol@TLSLE+N" instead of "#symbol+N@TLSLE".
2331
2332 It seems that the easiest solution to this is to convert such
2333 operands to
2334 (const (unspec [(plus (symbol)
2335 (const_int N))])).
2336 Note, that the top level of operand remains intact, so we don't have
2337 to patch up anything outside of the operand. */
2338
2339 subrtx_var_iterator::array_type array;
2340 for (i = 0; i < n_operands; ++i)
2341 {
2342 rtx op;
2343
2344 op = operands[i];
2345
2346 FOR_EACH_SUBRTX_VAR (iter, array, op, ALL)
2347 {
2348 rtx x = *iter;
2349 if (m68k_unwrap_symbol (x, true) != x)
2350 {
2351 rtx plus;
2352
2353 gcc_assert (GET_CODE (x) == CONST);
2354 plus = XEXP (x, 0);
2355
2356 if (GET_CODE (plus) == PLUS || GET_CODE (plus) == MINUS)
2357 {
2358 rtx unspec;
2359 rtx addend;
2360
2361 unspec = XEXP (plus, 0);
2362 gcc_assert (GET_CODE (unspec) == UNSPEC);
2363 addend = XEXP (plus, 1);
2364 gcc_assert (CONST_INT_P (addend));
2365
2366 /* We now have all the pieces, rearrange them. */
2367
2368 /* Move symbol to plus. */
2369 XEXP (plus, 0) = XVECEXP (unspec, 0, 0);
2370
2371 /* Move plus inside unspec. */
2372 XVECEXP (unspec, 0, 0) = plus;
2373
2374 /* Move unspec to top level of const. */
2375 XEXP (x, 0) = unspec;
2376 }
2377 iter.skip_subrtxes ();
2378 }
2379 }
2380 }
2381 }
2382
2383 /* Move X to a register and add REG_EQUAL note pointing to ORIG.
2384 If REG is non-null, use it; generate new pseudo otherwise. */
2385
2386 static rtx
2387 m68k_move_to_reg (rtx x, rtx orig, rtx reg)
2388 {
2389 rtx_insn *insn;
2390
2391 if (reg == NULL_RTX)
2392 {
2393 gcc_assert (can_create_pseudo_p ());
2394 reg = gen_reg_rtx (Pmode);
2395 }
2396
2397 insn = emit_move_insn (reg, x);
2398 /* Put a REG_EQUAL note on this insn, so that it can be optimized
2399 by loop. */
2400 set_unique_reg_note (insn, REG_EQUAL, orig);
2401
2402 return reg;
2403 }
2404
2405 /* Does the same as m68k_wrap_symbol, but returns a memory reference to
2406 GOT slot. */
2407
2408 static rtx
2409 m68k_wrap_symbol_into_got_ref (rtx x, enum m68k_reloc reloc, rtx temp_reg)
2410 {
2411 x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), temp_reg);
2412
2413 x = gen_rtx_MEM (Pmode, x);
2414 MEM_READONLY_P (x) = 1;
2415
2416 return x;
2417 }
2418
2419 /* Legitimize PIC addresses. If the address is already
2420 position-independent, we return ORIG. Newly generated
2421 position-independent addresses go to REG. If we need more
2422 than one register, we lose.
2423
2424 An address is legitimized by making an indirect reference
2425 through the Global Offset Table with the name of the symbol
2426 used as an offset.
2427
2428 The assembler and linker are responsible for placing the
2429 address of the symbol in the GOT. The function prologue
2430 is responsible for initializing a5 to the starting address
2431 of the GOT.
2432
2433 The assembler is also responsible for translating a symbol name
2434 into a constant displacement from the start of the GOT.
2435
2436 A quick example may make things a little clearer:
2437
2438 When not generating PIC code to store the value 12345 into _foo
2439 we would generate the following code:
2440
2441 movel #12345, _foo
2442
2443 When generating PIC two transformations are made. First, the compiler
2444 loads the address of foo into a register. So the first transformation makes:
2445
2446 lea _foo, a0
2447 movel #12345, a0@
2448
2449 The code in movsi will intercept the lea instruction and call this
2450 routine which will transform the instructions into:
2451
2452 movel a5@(_foo:w), a0
2453 movel #12345, a0@
2454
2455
2456 That (in a nutshell) is how *all* symbol and label references are
2457 handled. */
2458
2459 rtx
2460 legitimize_pic_address (rtx orig, machine_mode mode ATTRIBUTE_UNUSED,
2461 rtx reg)
2462 {
2463 rtx pic_ref = orig;
2464
2465 /* First handle a simple SYMBOL_REF or LABEL_REF */
2466 if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
2467 {
2468 gcc_assert (reg);
2469
2470 pic_ref = m68k_wrap_symbol_into_got_ref (orig, RELOC_GOT, reg);
2471 pic_ref = m68k_move_to_reg (pic_ref, orig, reg);
2472 }
2473 else if (GET_CODE (orig) == CONST)
2474 {
2475 rtx base;
2476
2477 /* Make sure this has not already been legitimized. */
2478 if (m68k_unwrap_symbol (orig, true) != orig)
2479 return orig;
2480
2481 gcc_assert (reg);
2482
2483 /* legitimize both operands of the PLUS */
2484 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
2485
2486 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2487 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2488 base == reg ? 0 : reg);
2489
2490 if (GET_CODE (orig) == CONST_INT)
2491 pic_ref = plus_constant (Pmode, base, INTVAL (orig));
2492 else
2493 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
2494 }
2495
2496 return pic_ref;
2497 }
2498
2499 /* The __tls_get_addr symbol. */
2500 static GTY(()) rtx m68k_tls_get_addr;
2501
2502 /* Return SYMBOL_REF for __tls_get_addr. */
2503
2504 static rtx
2505 m68k_get_tls_get_addr (void)
2506 {
2507 if (m68k_tls_get_addr == NULL_RTX)
2508 m68k_tls_get_addr = init_one_libfunc ("__tls_get_addr");
2509
2510 return m68k_tls_get_addr;
2511 }
2512
2513 /* Return libcall result in A0 instead of usual D0. */
2514 static bool m68k_libcall_value_in_a0_p = false;
2515
2516 /* Emit instruction sequence that calls __tls_get_addr. X is
2517 the TLS symbol we are referencing and RELOC is the symbol type to use
2518 (either TLSGD or TLSLDM). EQV is the REG_EQUAL note for the sequence
2519 emitted. A pseudo register with result of __tls_get_addr call is
2520 returned. */
2521
2522 static rtx
2523 m68k_call_tls_get_addr (rtx x, rtx eqv, enum m68k_reloc reloc)
2524 {
2525 rtx a0;
2526 rtx_insn *insns;
2527 rtx dest;
2528
2529 /* Emit the call sequence. */
2530 start_sequence ();
2531
2532 /* FIXME: Unfortunately, emit_library_call_value does not
2533 consider (plus (%a5) (const (unspec))) to be a good enough
2534 operand for push, so it forces it into a register. The bad
2535 thing about this is that combiner, due to copy propagation and other
2536 optimizations, sometimes can not later fix this. As a consequence,
2537 additional register may be allocated resulting in a spill.
2538 For reference, see args processing loops in
2539 calls.c:emit_library_call_value_1.
2540 For testcase, see gcc.target/m68k/tls-{gd, ld}.c */
2541 x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), NULL_RTX);
2542
2543 /* __tls_get_addr() is not a libcall, but emitting a libcall_value
2544 is the simpliest way of generating a call. The difference between
2545 __tls_get_addr() and libcall is that the result is returned in D0
2546 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2547 which temporarily switches returning the result to A0. */
2548
2549 m68k_libcall_value_in_a0_p = true;
2550 a0 = emit_library_call_value (m68k_get_tls_get_addr (), NULL_RTX, LCT_PURE,
2551 Pmode, x, Pmode);
2552 m68k_libcall_value_in_a0_p = false;
2553
2554 insns = get_insns ();
2555 end_sequence ();
2556
2557 gcc_assert (can_create_pseudo_p ());
2558 dest = gen_reg_rtx (Pmode);
2559 emit_libcall_block (insns, dest, a0, eqv);
2560
2561 return dest;
2562 }
2563
2564 /* The __tls_get_addr symbol. */
2565 static GTY(()) rtx m68k_read_tp;
2566
2567 /* Return SYMBOL_REF for __m68k_read_tp. */
2568
2569 static rtx
2570 m68k_get_m68k_read_tp (void)
2571 {
2572 if (m68k_read_tp == NULL_RTX)
2573 m68k_read_tp = init_one_libfunc ("__m68k_read_tp");
2574
2575 return m68k_read_tp;
2576 }
2577
2578 /* Emit instruction sequence that calls __m68k_read_tp.
2579 A pseudo register with result of __m68k_read_tp call is returned. */
2580
2581 static rtx
2582 m68k_call_m68k_read_tp (void)
2583 {
2584 rtx a0;
2585 rtx eqv;
2586 rtx_insn *insns;
2587 rtx dest;
2588
2589 start_sequence ();
2590
2591 /* __m68k_read_tp() is not a libcall, but emitting a libcall_value
2592 is the simpliest way of generating a call. The difference between
2593 __m68k_read_tp() and libcall is that the result is returned in D0
2594 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2595 which temporarily switches returning the result to A0. */
2596
2597 /* Emit the call sequence. */
2598 m68k_libcall_value_in_a0_p = true;
2599 a0 = emit_library_call_value (m68k_get_m68k_read_tp (), NULL_RTX, LCT_PURE,
2600 Pmode);
2601 m68k_libcall_value_in_a0_p = false;
2602 insns = get_insns ();
2603 end_sequence ();
2604
2605 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2606 share the m68k_read_tp result with other IE/LE model accesses. */
2607 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx), UNSPEC_RELOC32);
2608
2609 gcc_assert (can_create_pseudo_p ());
2610 dest = gen_reg_rtx (Pmode);
2611 emit_libcall_block (insns, dest, a0, eqv);
2612
2613 return dest;
2614 }
2615
2616 /* Return a legitimized address for accessing TLS SYMBOL_REF X.
2617 For explanations on instructions sequences see TLS/NPTL ABI for m68k and
2618 ColdFire. */
2619
2620 rtx
2621 m68k_legitimize_tls_address (rtx orig)
2622 {
2623 switch (SYMBOL_REF_TLS_MODEL (orig))
2624 {
2625 case TLS_MODEL_GLOBAL_DYNAMIC:
2626 orig = m68k_call_tls_get_addr (orig, orig, RELOC_TLSGD);
2627 break;
2628
2629 case TLS_MODEL_LOCAL_DYNAMIC:
2630 {
2631 rtx eqv;
2632 rtx a0;
2633 rtx x;
2634
2635 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2636 share the LDM result with other LD model accesses. */
2637 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2638 UNSPEC_RELOC32);
2639
2640 a0 = m68k_call_tls_get_addr (orig, eqv, RELOC_TLSLDM);
2641
2642 x = m68k_wrap_symbol (orig, RELOC_TLSLDO, a0, NULL_RTX);
2643
2644 if (can_create_pseudo_p ())
2645 x = m68k_move_to_reg (x, orig, NULL_RTX);
2646
2647 orig = x;
2648 break;
2649 }
2650
2651 case TLS_MODEL_INITIAL_EXEC:
2652 {
2653 rtx a0;
2654 rtx x;
2655
2656 a0 = m68k_call_m68k_read_tp ();
2657
2658 x = m68k_wrap_symbol_into_got_ref (orig, RELOC_TLSIE, NULL_RTX);
2659 x = gen_rtx_PLUS (Pmode, x, a0);
2660
2661 if (can_create_pseudo_p ())
2662 x = m68k_move_to_reg (x, orig, NULL_RTX);
2663
2664 orig = x;
2665 break;
2666 }
2667
2668 case TLS_MODEL_LOCAL_EXEC:
2669 {
2670 rtx a0;
2671 rtx x;
2672
2673 a0 = m68k_call_m68k_read_tp ();
2674
2675 x = m68k_wrap_symbol (orig, RELOC_TLSLE, a0, NULL_RTX);
2676
2677 if (can_create_pseudo_p ())
2678 x = m68k_move_to_reg (x, orig, NULL_RTX);
2679
2680 orig = x;
2681 break;
2682 }
2683
2684 default:
2685 gcc_unreachable ();
2686 }
2687
2688 return orig;
2689 }
2690
2691 /* Return true if X is a TLS symbol. */
2692
2693 static bool
2694 m68k_tls_symbol_p (rtx x)
2695 {
2696 if (!TARGET_HAVE_TLS)
2697 return false;
2698
2699 if (GET_CODE (x) != SYMBOL_REF)
2700 return false;
2701
2702 return SYMBOL_REF_TLS_MODEL (x) != 0;
2703 }
2704
2705 /* If !LEGITIMATE_P, return true if X is a TLS symbol reference,
2706 though illegitimate one.
2707 If LEGITIMATE_P, return true if X is a legitimate TLS symbol reference. */
2708
2709 bool
2710 m68k_tls_reference_p (rtx x, bool legitimate_p)
2711 {
2712 if (!TARGET_HAVE_TLS)
2713 return false;
2714
2715 if (!legitimate_p)
2716 {
2717 subrtx_var_iterator::array_type array;
2718 FOR_EACH_SUBRTX_VAR (iter, array, x, ALL)
2719 {
2720 rtx x = *iter;
2721
2722 /* Note: this is not the same as m68k_tls_symbol_p. */
2723 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0)
2724 return true;
2725
2726 /* Don't recurse into legitimate TLS references. */
2727 if (m68k_tls_reference_p (x, true))
2728 iter.skip_subrtxes ();
2729 }
2730 return false;
2731 }
2732 else
2733 {
2734 enum m68k_reloc reloc = RELOC_GOT;
2735
2736 return (m68k_unwrap_symbol_1 (x, true, &reloc) != x
2737 && TLS_RELOC_P (reloc));
2738 }
2739 }
2740
2741 \f
2742
2743 #define USE_MOVQ(i) ((unsigned) ((i) + 128) <= 255)
2744
2745 /* Return the type of move that should be used for integer I. */
2746
2747 M68K_CONST_METHOD
2748 m68k_const_method (HOST_WIDE_INT i)
2749 {
2750 unsigned u;
2751
2752 if (USE_MOVQ (i))
2753 return MOVQ;
2754
2755 /* The ColdFire doesn't have byte or word operations. */
2756 /* FIXME: This may not be useful for the m68060 either. */
2757 if (!TARGET_COLDFIRE)
2758 {
2759 /* if -256 < N < 256 but N is not in range for a moveq
2760 N^ff will be, so use moveq #N^ff, dreg; not.b dreg. */
2761 if (USE_MOVQ (i ^ 0xff))
2762 return NOTB;
2763 /* Likewise, try with not.w */
2764 if (USE_MOVQ (i ^ 0xffff))
2765 return NOTW;
2766 /* This is the only value where neg.w is useful */
2767 if (i == -65408)
2768 return NEGW;
2769 }
2770
2771 /* Try also with swap. */
2772 u = i;
2773 if (USE_MOVQ ((u >> 16) | (u << 16)))
2774 return SWAP;
2775
2776 if (TARGET_ISAB)
2777 {
2778 /* Try using MVZ/MVS with an immediate value to load constants. */
2779 if (i >= 0 && i <= 65535)
2780 return MVZ;
2781 if (i >= -32768 && i <= 32767)
2782 return MVS;
2783 }
2784
2785 /* Otherwise, use move.l */
2786 return MOVL;
2787 }
2788
2789 /* Return the cost of moving constant I into a data register. */
2790
2791 static int
2792 const_int_cost (HOST_WIDE_INT i)
2793 {
2794 switch (m68k_const_method (i))
2795 {
2796 case MOVQ:
2797 /* Constants between -128 and 127 are cheap due to moveq. */
2798 return 0;
2799 case MVZ:
2800 case MVS:
2801 case NOTB:
2802 case NOTW:
2803 case NEGW:
2804 case SWAP:
2805 /* Constants easily generated by moveq + not.b/not.w/neg.w/swap. */
2806 return 1;
2807 case MOVL:
2808 return 2;
2809 default:
2810 gcc_unreachable ();
2811 }
2812 }
2813
2814 static bool
2815 m68k_rtx_costs (rtx x, machine_mode mode, int outer_code,
2816 int opno ATTRIBUTE_UNUSED,
2817 int *total, bool speed ATTRIBUTE_UNUSED)
2818 {
2819 int code = GET_CODE (x);
2820
2821 switch (code)
2822 {
2823 case CONST_INT:
2824 /* Constant zero is super cheap due to clr instruction. */
2825 if (x == const0_rtx)
2826 *total = 0;
2827 else
2828 *total = const_int_cost (INTVAL (x));
2829 return true;
2830
2831 case CONST:
2832 case LABEL_REF:
2833 case SYMBOL_REF:
2834 *total = 3;
2835 return true;
2836
2837 case CONST_DOUBLE:
2838 /* Make 0.0 cheaper than other floating constants to
2839 encourage creating tstsf and tstdf insns. */
2840 if (outer_code == COMPARE
2841 && (x == CONST0_RTX (SFmode) || x == CONST0_RTX (DFmode)))
2842 *total = 4;
2843 else
2844 *total = 5;
2845 return true;
2846
2847 /* These are vaguely right for a 68020. */
2848 /* The costs for long multiply have been adjusted to work properly
2849 in synth_mult on the 68020, relative to an average of the time
2850 for add and the time for shift, taking away a little more because
2851 sometimes move insns are needed. */
2852 /* div?.w is relatively cheaper on 68000 counted in COSTS_N_INSNS
2853 terms. */
2854 #define MULL_COST \
2855 (TUNE_68060 ? 2 \
2856 : TUNE_68040 ? 5 \
2857 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2858 : (TUNE_CFV2 && TUNE_MAC) ? 4 \
2859 : TUNE_CFV2 ? 8 \
2860 : TARGET_COLDFIRE ? 3 : 13)
2861
2862 #define MULW_COST \
2863 (TUNE_68060 ? 2 \
2864 : TUNE_68040 ? 3 \
2865 : TUNE_68000_10 ? 5 \
2866 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2867 : (TUNE_CFV2 && TUNE_MAC) ? 2 \
2868 : TUNE_CFV2 ? 8 \
2869 : TARGET_COLDFIRE ? 2 : 8)
2870
2871 #define DIVW_COST \
2872 (TARGET_CF_HWDIV ? 11 \
2873 : TUNE_68000_10 || TARGET_COLDFIRE ? 12 : 27)
2874
2875 case PLUS:
2876 /* An lea costs about three times as much as a simple add. */
2877 if (mode == SImode
2878 && GET_CODE (XEXP (x, 1)) == REG
2879 && GET_CODE (XEXP (x, 0)) == MULT
2880 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
2881 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2882 && (INTVAL (XEXP (XEXP (x, 0), 1)) == 2
2883 || INTVAL (XEXP (XEXP (x, 0), 1)) == 4
2884 || INTVAL (XEXP (XEXP (x, 0), 1)) == 8))
2885 {
2886 /* lea an@(dx:l:i),am */
2887 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 2 : 3);
2888 return true;
2889 }
2890 return false;
2891
2892 case ASHIFT:
2893 case ASHIFTRT:
2894 case LSHIFTRT:
2895 if (TUNE_68060)
2896 {
2897 *total = COSTS_N_INSNS(1);
2898 return true;
2899 }
2900 if (TUNE_68000_10)
2901 {
2902 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
2903 {
2904 if (INTVAL (XEXP (x, 1)) < 16)
2905 *total = COSTS_N_INSNS (2) + INTVAL (XEXP (x, 1)) / 2;
2906 else
2907 /* We're using clrw + swap for these cases. */
2908 *total = COSTS_N_INSNS (4) + (INTVAL (XEXP (x, 1)) - 16) / 2;
2909 }
2910 else
2911 *total = COSTS_N_INSNS (10); /* Worst case. */
2912 return true;
2913 }
2914 /* A shift by a big integer takes an extra instruction. */
2915 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2916 && (INTVAL (XEXP (x, 1)) == 16))
2917 {
2918 *total = COSTS_N_INSNS (2); /* clrw;swap */
2919 return true;
2920 }
2921 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2922 && !(INTVAL (XEXP (x, 1)) > 0
2923 && INTVAL (XEXP (x, 1)) <= 8))
2924 {
2925 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 1 : 3); /* lsr #i,dn */
2926 return true;
2927 }
2928 return false;
2929
2930 case MULT:
2931 if ((GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
2932 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
2933 && mode == SImode)
2934 *total = COSTS_N_INSNS (MULW_COST);
2935 else if (mode == QImode || mode == HImode)
2936 *total = COSTS_N_INSNS (MULW_COST);
2937 else
2938 *total = COSTS_N_INSNS (MULL_COST);
2939 return true;
2940
2941 case DIV:
2942 case UDIV:
2943 case MOD:
2944 case UMOD:
2945 if (mode == QImode || mode == HImode)
2946 *total = COSTS_N_INSNS (DIVW_COST); /* div.w */
2947 else if (TARGET_CF_HWDIV)
2948 *total = COSTS_N_INSNS (18);
2949 else
2950 *total = COSTS_N_INSNS (43); /* div.l */
2951 return true;
2952
2953 case ZERO_EXTRACT:
2954 if (outer_code == COMPARE)
2955 *total = 0;
2956 return false;
2957
2958 default:
2959 return false;
2960 }
2961 }
2962
2963 /* Return an instruction to move CONST_INT OPERANDS[1] into data register
2964 OPERANDS[0]. */
2965
2966 static const char *
2967 output_move_const_into_data_reg (rtx *operands)
2968 {
2969 HOST_WIDE_INT i;
2970
2971 i = INTVAL (operands[1]);
2972 switch (m68k_const_method (i))
2973 {
2974 case MVZ:
2975 return "mvzw %1,%0";
2976 case MVS:
2977 return "mvsw %1,%0";
2978 case MOVQ:
2979 return "moveq %1,%0";
2980 case NOTB:
2981 CC_STATUS_INIT;
2982 operands[1] = GEN_INT (i ^ 0xff);
2983 return "moveq %1,%0\n\tnot%.b %0";
2984 case NOTW:
2985 CC_STATUS_INIT;
2986 operands[1] = GEN_INT (i ^ 0xffff);
2987 return "moveq %1,%0\n\tnot%.w %0";
2988 case NEGW:
2989 CC_STATUS_INIT;
2990 return "moveq #-128,%0\n\tneg%.w %0";
2991 case SWAP:
2992 {
2993 unsigned u = i;
2994
2995 operands[1] = GEN_INT ((u << 16) | (u >> 16));
2996 return "moveq %1,%0\n\tswap %0";
2997 }
2998 case MOVL:
2999 return "move%.l %1,%0";
3000 default:
3001 gcc_unreachable ();
3002 }
3003 }
3004
3005 /* Return true if I can be handled by ISA B's mov3q instruction. */
3006
3007 bool
3008 valid_mov3q_const (HOST_WIDE_INT i)
3009 {
3010 return TARGET_ISAB && (i == -1 || IN_RANGE (i, 1, 7));
3011 }
3012
3013 /* Return an instruction to move CONST_INT OPERANDS[1] into OPERANDS[0].
3014 I is the value of OPERANDS[1]. */
3015
3016 static const char *
3017 output_move_simode_const (rtx *operands)
3018 {
3019 rtx dest;
3020 HOST_WIDE_INT src;
3021
3022 dest = operands[0];
3023 src = INTVAL (operands[1]);
3024 if (src == 0
3025 && (DATA_REG_P (dest) || MEM_P (dest))
3026 /* clr insns on 68000 read before writing. */
3027 && ((TARGET_68010 || TARGET_COLDFIRE)
3028 || !(MEM_P (dest) && MEM_VOLATILE_P (dest))))
3029 return "clr%.l %0";
3030 else if (GET_MODE (dest) == SImode && valid_mov3q_const (src))
3031 return "mov3q%.l %1,%0";
3032 else if (src == 0 && ADDRESS_REG_P (dest))
3033 return "sub%.l %0,%0";
3034 else if (DATA_REG_P (dest))
3035 return output_move_const_into_data_reg (operands);
3036 else if (ADDRESS_REG_P (dest) && IN_RANGE (src, -0x8000, 0x7fff))
3037 {
3038 if (valid_mov3q_const (src))
3039 return "mov3q%.l %1,%0";
3040 return "move%.w %1,%0";
3041 }
3042 else if (MEM_P (dest)
3043 && GET_CODE (XEXP (dest, 0)) == PRE_DEC
3044 && REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
3045 && IN_RANGE (src, -0x8000, 0x7fff))
3046 {
3047 if (valid_mov3q_const (src))
3048 return "mov3q%.l %1,%-";
3049 return "pea %a1";
3050 }
3051 return "move%.l %1,%0";
3052 }
3053
3054 const char *
3055 output_move_simode (rtx *operands)
3056 {
3057 if (GET_CODE (operands[1]) == CONST_INT)
3058 return output_move_simode_const (operands);
3059 else if ((GET_CODE (operands[1]) == SYMBOL_REF
3060 || GET_CODE (operands[1]) == CONST)
3061 && push_operand (operands[0], SImode))
3062 return "pea %a1";
3063 else if ((GET_CODE (operands[1]) == SYMBOL_REF
3064 || GET_CODE (operands[1]) == CONST)
3065 && ADDRESS_REG_P (operands[0]))
3066 return "lea %a1,%0";
3067 return "move%.l %1,%0";
3068 }
3069
3070 const char *
3071 output_move_himode (rtx *operands)
3072 {
3073 if (GET_CODE (operands[1]) == CONST_INT)
3074 {
3075 if (operands[1] == const0_rtx
3076 && (DATA_REG_P (operands[0])
3077 || GET_CODE (operands[0]) == MEM)
3078 /* clr insns on 68000 read before writing. */
3079 && ((TARGET_68010 || TARGET_COLDFIRE)
3080 || !(GET_CODE (operands[0]) == MEM
3081 && MEM_VOLATILE_P (operands[0]))))
3082 return "clr%.w %0";
3083 else if (operands[1] == const0_rtx
3084 && ADDRESS_REG_P (operands[0]))
3085 return "sub%.l %0,%0";
3086 else if (DATA_REG_P (operands[0])
3087 && INTVAL (operands[1]) < 128
3088 && INTVAL (operands[1]) >= -128)
3089 return "moveq %1,%0";
3090 else if (INTVAL (operands[1]) < 0x8000
3091 && INTVAL (operands[1]) >= -0x8000)
3092 return "move%.w %1,%0";
3093 }
3094 else if (CONSTANT_P (operands[1]))
3095 return "move%.l %1,%0";
3096 return "move%.w %1,%0";
3097 }
3098
3099 const char *
3100 output_move_qimode (rtx *operands)
3101 {
3102 /* 68k family always modifies the stack pointer by at least 2, even for
3103 byte pushes. The 5200 (ColdFire) does not do this. */
3104
3105 /* This case is generated by pushqi1 pattern now. */
3106 gcc_assert (!(GET_CODE (operands[0]) == MEM
3107 && GET_CODE (XEXP (operands[0], 0)) == PRE_DEC
3108 && XEXP (XEXP (operands[0], 0), 0) == stack_pointer_rtx
3109 && ! ADDRESS_REG_P (operands[1])
3110 && ! TARGET_COLDFIRE));
3111
3112 /* clr and st insns on 68000 read before writing. */
3113 if (!ADDRESS_REG_P (operands[0])
3114 && ((TARGET_68010 || TARGET_COLDFIRE)
3115 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3116 {
3117 if (operands[1] == const0_rtx)
3118 return "clr%.b %0";
3119 if ((!TARGET_COLDFIRE || DATA_REG_P (operands[0]))
3120 && GET_CODE (operands[1]) == CONST_INT
3121 && (INTVAL (operands[1]) & 255) == 255)
3122 {
3123 CC_STATUS_INIT;
3124 return "st %0";
3125 }
3126 }
3127 if (GET_CODE (operands[1]) == CONST_INT
3128 && DATA_REG_P (operands[0])
3129 && INTVAL (operands[1]) < 128
3130 && INTVAL (operands[1]) >= -128)
3131 return "moveq %1,%0";
3132 if (operands[1] == const0_rtx && ADDRESS_REG_P (operands[0]))
3133 return "sub%.l %0,%0";
3134 if (GET_CODE (operands[1]) != CONST_INT && CONSTANT_P (operands[1]))
3135 return "move%.l %1,%0";
3136 /* 68k family (including the 5200 ColdFire) does not support byte moves to
3137 from address registers. */
3138 if (ADDRESS_REG_P (operands[0]) || ADDRESS_REG_P (operands[1]))
3139 return "move%.w %1,%0";
3140 return "move%.b %1,%0";
3141 }
3142
3143 const char *
3144 output_move_stricthi (rtx *operands)
3145 {
3146 if (operands[1] == const0_rtx
3147 /* clr insns on 68000 read before writing. */
3148 && ((TARGET_68010 || TARGET_COLDFIRE)
3149 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3150 return "clr%.w %0";
3151 return "move%.w %1,%0";
3152 }
3153
3154 const char *
3155 output_move_strictqi (rtx *operands)
3156 {
3157 if (operands[1] == const0_rtx
3158 /* clr insns on 68000 read before writing. */
3159 && ((TARGET_68010 || TARGET_COLDFIRE)
3160 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3161 return "clr%.b %0";
3162 return "move%.b %1,%0";
3163 }
3164
3165 /* Return the best assembler insn template
3166 for moving operands[1] into operands[0] as a fullword. */
3167
3168 static const char *
3169 singlemove_string (rtx *operands)
3170 {
3171 if (GET_CODE (operands[1]) == CONST_INT)
3172 return output_move_simode_const (operands);
3173 return "move%.l %1,%0";
3174 }
3175
3176
3177 /* Output assembler or rtl code to perform a doubleword move insn
3178 with operands OPERANDS.
3179 Pointers to 3 helper functions should be specified:
3180 HANDLE_REG_ADJUST to adjust a register by a small value,
3181 HANDLE_COMPADR to compute an address and
3182 HANDLE_MOVSI to move 4 bytes. */
3183
3184 static void
3185 handle_move_double (rtx operands[2],
3186 void (*handle_reg_adjust) (rtx, int),
3187 void (*handle_compadr) (rtx [2]),
3188 void (*handle_movsi) (rtx [2]))
3189 {
3190 enum
3191 {
3192 REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP
3193 } optype0, optype1;
3194 rtx latehalf[2];
3195 rtx middlehalf[2];
3196 rtx xops[2];
3197 rtx addreg0 = 0, addreg1 = 0;
3198 int dest_overlapped_low = 0;
3199 int size = GET_MODE_SIZE (GET_MODE (operands[0]));
3200
3201 middlehalf[0] = 0;
3202 middlehalf[1] = 0;
3203
3204 /* First classify both operands. */
3205
3206 if (REG_P (operands[0]))
3207 optype0 = REGOP;
3208 else if (offsettable_memref_p (operands[0]))
3209 optype0 = OFFSOP;
3210 else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
3211 optype0 = POPOP;
3212 else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
3213 optype0 = PUSHOP;
3214 else if (GET_CODE (operands[0]) == MEM)
3215 optype0 = MEMOP;
3216 else
3217 optype0 = RNDOP;
3218
3219 if (REG_P (operands[1]))
3220 optype1 = REGOP;
3221 else if (CONSTANT_P (operands[1]))
3222 optype1 = CNSTOP;
3223 else if (offsettable_memref_p (operands[1]))
3224 optype1 = OFFSOP;
3225 else if (GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3226 optype1 = POPOP;
3227 else if (GET_CODE (XEXP (operands[1], 0)) == PRE_DEC)
3228 optype1 = PUSHOP;
3229 else if (GET_CODE (operands[1]) == MEM)
3230 optype1 = MEMOP;
3231 else
3232 optype1 = RNDOP;
3233
3234 /* Check for the cases that the operand constraints are not supposed
3235 to allow to happen. Generating code for these cases is
3236 painful. */
3237 gcc_assert (optype0 != RNDOP && optype1 != RNDOP);
3238
3239 /* If one operand is decrementing and one is incrementing
3240 decrement the former register explicitly
3241 and change that operand into ordinary indexing. */
3242
3243 if (optype0 == PUSHOP && optype1 == POPOP)
3244 {
3245 operands[0] = XEXP (XEXP (operands[0], 0), 0);
3246
3247 handle_reg_adjust (operands[0], -size);
3248
3249 if (GET_MODE (operands[1]) == XFmode)
3250 operands[0] = gen_rtx_MEM (XFmode, operands[0]);
3251 else if (GET_MODE (operands[0]) == DFmode)
3252 operands[0] = gen_rtx_MEM (DFmode, operands[0]);
3253 else
3254 operands[0] = gen_rtx_MEM (DImode, operands[0]);
3255 optype0 = OFFSOP;
3256 }
3257 if (optype0 == POPOP && optype1 == PUSHOP)
3258 {
3259 operands[1] = XEXP (XEXP (operands[1], 0), 0);
3260
3261 handle_reg_adjust (operands[1], -size);
3262
3263 if (GET_MODE (operands[1]) == XFmode)
3264 operands[1] = gen_rtx_MEM (XFmode, operands[1]);
3265 else if (GET_MODE (operands[1]) == DFmode)
3266 operands[1] = gen_rtx_MEM (DFmode, operands[1]);
3267 else
3268 operands[1] = gen_rtx_MEM (DImode, operands[1]);
3269 optype1 = OFFSOP;
3270 }
3271
3272 /* If an operand is an unoffsettable memory ref, find a register
3273 we can increment temporarily to make it refer to the second word. */
3274
3275 if (optype0 == MEMOP)
3276 addreg0 = find_addr_reg (XEXP (operands[0], 0));
3277
3278 if (optype1 == MEMOP)
3279 addreg1 = find_addr_reg (XEXP (operands[1], 0));
3280
3281 /* Ok, we can do one word at a time.
3282 Normally we do the low-numbered word first,
3283 but if either operand is autodecrementing then we
3284 do the high-numbered word first.
3285
3286 In either case, set up in LATEHALF the operands to use
3287 for the high-numbered word and in some cases alter the
3288 operands in OPERANDS to be suitable for the low-numbered word. */
3289
3290 if (size == 12)
3291 {
3292 if (optype0 == REGOP)
3293 {
3294 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 2);
3295 middlehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
3296 }
3297 else if (optype0 == OFFSOP)
3298 {
3299 middlehalf[0] = adjust_address (operands[0], SImode, 4);
3300 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
3301 }
3302 else
3303 {
3304 middlehalf[0] = adjust_address (operands[0], SImode, 0);
3305 latehalf[0] = adjust_address (operands[0], SImode, 0);
3306 }
3307
3308 if (optype1 == REGOP)
3309 {
3310 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2);
3311 middlehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
3312 }
3313 else if (optype1 == OFFSOP)
3314 {
3315 middlehalf[1] = adjust_address (operands[1], SImode, 4);
3316 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
3317 }
3318 else if (optype1 == CNSTOP)
3319 {
3320 if (GET_CODE (operands[1]) == CONST_DOUBLE)
3321 {
3322 long l[3];
3323
3324 REAL_VALUE_TO_TARGET_LONG_DOUBLE
3325 (*CONST_DOUBLE_REAL_VALUE (operands[1]), l);
3326 operands[1] = GEN_INT (l[0]);
3327 middlehalf[1] = GEN_INT (l[1]);
3328 latehalf[1] = GEN_INT (l[2]);
3329 }
3330 else
3331 {
3332 /* No non-CONST_DOUBLE constant should ever appear
3333 here. */
3334 gcc_assert (!CONSTANT_P (operands[1]));
3335 }
3336 }
3337 else
3338 {
3339 middlehalf[1] = adjust_address (operands[1], SImode, 0);
3340 latehalf[1] = adjust_address (operands[1], SImode, 0);
3341 }
3342 }
3343 else
3344 /* size is not 12: */
3345 {
3346 if (optype0 == REGOP)
3347 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
3348 else if (optype0 == OFFSOP)
3349 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
3350 else
3351 latehalf[0] = adjust_address (operands[0], SImode, 0);
3352
3353 if (optype1 == REGOP)
3354 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
3355 else if (optype1 == OFFSOP)
3356 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
3357 else if (optype1 == CNSTOP)
3358 split_double (operands[1], &operands[1], &latehalf[1]);
3359 else
3360 latehalf[1] = adjust_address (operands[1], SImode, 0);
3361 }
3362
3363 /* If insn is effectively movd N(REG),-(REG) then we will do the high
3364 word first. We should use the adjusted operand 1 (which is N+4(REG))
3365 for the low word as well, to compensate for the first decrement of
3366 REG. */
3367 if (optype0 == PUSHOP
3368 && reg_overlap_mentioned_p (XEXP (XEXP (operands[0], 0), 0), operands[1]))
3369 operands[1] = middlehalf[1] = latehalf[1];
3370
3371 /* For (set (reg:DI N) (mem:DI ... (reg:SI N) ...)),
3372 if the upper part of reg N does not appear in the MEM, arrange to
3373 emit the move late-half first. Otherwise, compute the MEM address
3374 into the upper part of N and use that as a pointer to the memory
3375 operand. */
3376 if (optype0 == REGOP
3377 && (optype1 == OFFSOP || optype1 == MEMOP))
3378 {
3379 rtx testlow = gen_rtx_REG (SImode, REGNO (operands[0]));
3380
3381 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
3382 && reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
3383 {
3384 /* If both halves of dest are used in the src memory address,
3385 compute the address into latehalf of dest.
3386 Note that this can't happen if the dest is two data regs. */
3387 compadr:
3388 xops[0] = latehalf[0];
3389 xops[1] = XEXP (operands[1], 0);
3390
3391 handle_compadr (xops);
3392 if (GET_MODE (operands[1]) == XFmode)
3393 {
3394 operands[1] = gen_rtx_MEM (XFmode, latehalf[0]);
3395 middlehalf[1] = adjust_address (operands[1], DImode, size - 8);
3396 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
3397 }
3398 else
3399 {
3400 operands[1] = gen_rtx_MEM (DImode, latehalf[0]);
3401 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
3402 }
3403 }
3404 else if (size == 12
3405 && reg_overlap_mentioned_p (middlehalf[0],
3406 XEXP (operands[1], 0)))
3407 {
3408 /* Check for two regs used by both source and dest.
3409 Note that this can't happen if the dest is all data regs.
3410 It can happen if the dest is d6, d7, a0.
3411 But in that case, latehalf is an addr reg, so
3412 the code at compadr does ok. */
3413
3414 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
3415 || reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
3416 goto compadr;
3417
3418 /* JRV says this can't happen: */
3419 gcc_assert (!addreg0 && !addreg1);
3420
3421 /* Only the middle reg conflicts; simply put it last. */
3422 handle_movsi (operands);
3423 handle_movsi (latehalf);
3424 handle_movsi (middlehalf);
3425
3426 return;
3427 }
3428 else if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0)))
3429 /* If the low half of dest is mentioned in the source memory
3430 address, the arrange to emit the move late half first. */
3431 dest_overlapped_low = 1;
3432 }
3433
3434 /* If one or both operands autodecrementing,
3435 do the two words, high-numbered first. */
3436
3437 /* Likewise, the first move would clobber the source of the second one,
3438 do them in the other order. This happens only for registers;
3439 such overlap can't happen in memory unless the user explicitly
3440 sets it up, and that is an undefined circumstance. */
3441
3442 if (optype0 == PUSHOP || optype1 == PUSHOP
3443 || (optype0 == REGOP && optype1 == REGOP
3444 && ((middlehalf[1] && REGNO (operands[0]) == REGNO (middlehalf[1]))
3445 || REGNO (operands[0]) == REGNO (latehalf[1])))
3446 || dest_overlapped_low)
3447 {
3448 /* Make any unoffsettable addresses point at high-numbered word. */
3449 if (addreg0)
3450 handle_reg_adjust (addreg0, size - 4);
3451 if (addreg1)
3452 handle_reg_adjust (addreg1, size - 4);
3453
3454 /* Do that word. */
3455 handle_movsi (latehalf);
3456
3457 /* Undo the adds we just did. */
3458 if (addreg0)
3459 handle_reg_adjust (addreg0, -4);
3460 if (addreg1)
3461 handle_reg_adjust (addreg1, -4);
3462
3463 if (size == 12)
3464 {
3465 handle_movsi (middlehalf);
3466
3467 if (addreg0)
3468 handle_reg_adjust (addreg0, -4);
3469 if (addreg1)
3470 handle_reg_adjust (addreg1, -4);
3471 }
3472
3473 /* Do low-numbered word. */
3474
3475 handle_movsi (operands);
3476 return;
3477 }
3478
3479 /* Normal case: do the two words, low-numbered first. */
3480
3481 m68k_final_prescan_insn (NULL, operands, 2);
3482 handle_movsi (operands);
3483
3484 /* Do the middle one of the three words for long double */
3485 if (size == 12)
3486 {
3487 if (addreg0)
3488 handle_reg_adjust (addreg0, 4);
3489 if (addreg1)
3490 handle_reg_adjust (addreg1, 4);
3491
3492 m68k_final_prescan_insn (NULL, middlehalf, 2);
3493 handle_movsi (middlehalf);
3494 }
3495
3496 /* Make any unoffsettable addresses point at high-numbered word. */
3497 if (addreg0)
3498 handle_reg_adjust (addreg0, 4);
3499 if (addreg1)
3500 handle_reg_adjust (addreg1, 4);
3501
3502 /* Do that word. */
3503 m68k_final_prescan_insn (NULL, latehalf, 2);
3504 handle_movsi (latehalf);
3505
3506 /* Undo the adds we just did. */
3507 if (addreg0)
3508 handle_reg_adjust (addreg0, -(size - 4));
3509 if (addreg1)
3510 handle_reg_adjust (addreg1, -(size - 4));
3511
3512 return;
3513 }
3514
3515 /* Output assembler code to adjust REG by N. */
3516 static void
3517 output_reg_adjust (rtx reg, int n)
3518 {
3519 const char *s;
3520
3521 gcc_assert (GET_MODE (reg) == SImode
3522 && -12 <= n && n != 0 && n <= 12);
3523
3524 switch (n)
3525 {
3526 case 12:
3527 s = "add%.l #12,%0";
3528 break;
3529
3530 case 8:
3531 s = "addq%.l #8,%0";
3532 break;
3533
3534 case 4:
3535 s = "addq%.l #4,%0";
3536 break;
3537
3538 case -12:
3539 s = "sub%.l #12,%0";
3540 break;
3541
3542 case -8:
3543 s = "subq%.l #8,%0";
3544 break;
3545
3546 case -4:
3547 s = "subq%.l #4,%0";
3548 break;
3549
3550 default:
3551 gcc_unreachable ();
3552 s = NULL;
3553 }
3554
3555 output_asm_insn (s, &reg);
3556 }
3557
3558 /* Emit rtl code to adjust REG by N. */
3559 static void
3560 emit_reg_adjust (rtx reg1, int n)
3561 {
3562 rtx reg2;
3563
3564 gcc_assert (GET_MODE (reg1) == SImode
3565 && -12 <= n && n != 0 && n <= 12);
3566
3567 reg1 = copy_rtx (reg1);
3568 reg2 = copy_rtx (reg1);
3569
3570 if (n < 0)
3571 emit_insn (gen_subsi3 (reg1, reg2, GEN_INT (-n)));
3572 else if (n > 0)
3573 emit_insn (gen_addsi3 (reg1, reg2, GEN_INT (n)));
3574 else
3575 gcc_unreachable ();
3576 }
3577
3578 /* Output assembler to load address OPERANDS[0] to register OPERANDS[1]. */
3579 static void
3580 output_compadr (rtx operands[2])
3581 {
3582 output_asm_insn ("lea %a1,%0", operands);
3583 }
3584
3585 /* Output the best assembler insn for moving operands[1] into operands[0]
3586 as a fullword. */
3587 static void
3588 output_movsi (rtx operands[2])
3589 {
3590 output_asm_insn (singlemove_string (operands), operands);
3591 }
3592
3593 /* Copy OP and change its mode to MODE. */
3594 static rtx
3595 copy_operand (rtx op, machine_mode mode)
3596 {
3597 /* ??? This looks really ugly. There must be a better way
3598 to change a mode on the operand. */
3599 if (GET_MODE (op) != VOIDmode)
3600 {
3601 if (REG_P (op))
3602 op = gen_rtx_REG (mode, REGNO (op));
3603 else
3604 {
3605 op = copy_rtx (op);
3606 PUT_MODE (op, mode);
3607 }
3608 }
3609
3610 return op;
3611 }
3612
3613 /* Emit rtl code for moving operands[1] into operands[0] as a fullword. */
3614 static void
3615 emit_movsi (rtx operands[2])
3616 {
3617 operands[0] = copy_operand (operands[0], SImode);
3618 operands[1] = copy_operand (operands[1], SImode);
3619
3620 emit_insn (gen_movsi (operands[0], operands[1]));
3621 }
3622
3623 /* Output assembler code to perform a doubleword move insn
3624 with operands OPERANDS. */
3625 const char *
3626 output_move_double (rtx *operands)
3627 {
3628 handle_move_double (operands,
3629 output_reg_adjust, output_compadr, output_movsi);
3630
3631 return "";
3632 }
3633
3634 /* Output rtl code to perform a doubleword move insn
3635 with operands OPERANDS. */
3636 void
3637 m68k_emit_move_double (rtx operands[2])
3638 {
3639 handle_move_double (operands, emit_reg_adjust, emit_movsi, emit_movsi);
3640 }
3641
3642 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
3643 new rtx with the correct mode. */
3644
3645 static rtx
3646 force_mode (machine_mode mode, rtx orig)
3647 {
3648 if (mode == GET_MODE (orig))
3649 return orig;
3650
3651 if (REGNO (orig) >= FIRST_PSEUDO_REGISTER)
3652 abort ();
3653
3654 return gen_rtx_REG (mode, REGNO (orig));
3655 }
3656
3657 static int
3658 fp_reg_operand (rtx op, machine_mode mode ATTRIBUTE_UNUSED)
3659 {
3660 return reg_renumber && FP_REG_P (op);
3661 }
3662
3663 /* Emit insns to move operands[1] into operands[0].
3664
3665 Return 1 if we have written out everything that needs to be done to
3666 do the move. Otherwise, return 0 and the caller will emit the move
3667 normally.
3668
3669 Note SCRATCH_REG may not be in the proper mode depending on how it
3670 will be used. This routine is responsible for creating a new copy
3671 of SCRATCH_REG in the proper mode. */
3672
3673 int
3674 emit_move_sequence (rtx *operands, machine_mode mode, rtx scratch_reg)
3675 {
3676 register rtx operand0 = operands[0];
3677 register rtx operand1 = operands[1];
3678 register rtx tem;
3679
3680 if (scratch_reg
3681 && reload_in_progress && GET_CODE (operand0) == REG
3682 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
3683 operand0 = reg_equiv_mem (REGNO (operand0));
3684 else if (scratch_reg
3685 && reload_in_progress && GET_CODE (operand0) == SUBREG
3686 && GET_CODE (SUBREG_REG (operand0)) == REG
3687 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
3688 {
3689 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3690 the code which tracks sets/uses for delete_output_reload. */
3691 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
3692 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
3693 SUBREG_BYTE (operand0));
3694 operand0 = alter_subreg (&temp, true);
3695 }
3696
3697 if (scratch_reg
3698 && reload_in_progress && GET_CODE (operand1) == REG
3699 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
3700 operand1 = reg_equiv_mem (REGNO (operand1));
3701 else if (scratch_reg
3702 && reload_in_progress && GET_CODE (operand1) == SUBREG
3703 && GET_CODE (SUBREG_REG (operand1)) == REG
3704 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
3705 {
3706 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3707 the code which tracks sets/uses for delete_output_reload. */
3708 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
3709 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
3710 SUBREG_BYTE (operand1));
3711 operand1 = alter_subreg (&temp, true);
3712 }
3713
3714 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
3715 && ((tem = find_replacement (&XEXP (operand0, 0)))
3716 != XEXP (operand0, 0)))
3717 operand0 = gen_rtx_MEM (GET_MODE (operand0), tem);
3718 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
3719 && ((tem = find_replacement (&XEXP (operand1, 0)))
3720 != XEXP (operand1, 0)))
3721 operand1 = gen_rtx_MEM (GET_MODE (operand1), tem);
3722
3723 /* Handle secondary reloads for loads/stores of FP registers where
3724 the address is symbolic by using the scratch register */
3725 if (fp_reg_operand (operand0, mode)
3726 && ((GET_CODE (operand1) == MEM
3727 && ! memory_address_p (DFmode, XEXP (operand1, 0)))
3728 || ((GET_CODE (operand1) == SUBREG
3729 && GET_CODE (XEXP (operand1, 0)) == MEM
3730 && !memory_address_p (DFmode, XEXP (XEXP (operand1, 0), 0)))))
3731 && scratch_reg)
3732 {
3733 if (GET_CODE (operand1) == SUBREG)
3734 operand1 = XEXP (operand1, 0);
3735
3736 /* SCRATCH_REG will hold an address. We want
3737 it in SImode regardless of what mode it was originally given
3738 to us. */
3739 scratch_reg = force_mode (SImode, scratch_reg);
3740
3741 /* D might not fit in 14 bits either; for such cases load D into
3742 scratch reg. */
3743 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
3744 {
3745 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
3746 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
3747 Pmode,
3748 XEXP (XEXP (operand1, 0), 0),
3749 scratch_reg));
3750 }
3751 else
3752 emit_move_insn (scratch_reg, XEXP (operand1, 0));
3753 emit_insn (gen_rtx_SET (operand0, gen_rtx_MEM (mode, scratch_reg)));
3754 return 1;
3755 }
3756 else if (fp_reg_operand (operand1, mode)
3757 && ((GET_CODE (operand0) == MEM
3758 && ! memory_address_p (DFmode, XEXP (operand0, 0)))
3759 || ((GET_CODE (operand0) == SUBREG)
3760 && GET_CODE (XEXP (operand0, 0)) == MEM
3761 && !memory_address_p (DFmode, XEXP (XEXP (operand0, 0), 0))))
3762 && scratch_reg)
3763 {
3764 if (GET_CODE (operand0) == SUBREG)
3765 operand0 = XEXP (operand0, 0);
3766
3767 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3768 it in SIMODE regardless of what mode it was originally given
3769 to us. */
3770 scratch_reg = force_mode (SImode, scratch_reg);
3771
3772 /* D might not fit in 14 bits either; for such cases load D into
3773 scratch reg. */
3774 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
3775 {
3776 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
3777 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
3778 0)),
3779 Pmode,
3780 XEXP (XEXP (operand0, 0),
3781 0),
3782 scratch_reg));
3783 }
3784 else
3785 emit_move_insn (scratch_reg, XEXP (operand0, 0));
3786 emit_insn (gen_rtx_SET (gen_rtx_MEM (mode, scratch_reg), operand1));
3787 return 1;
3788 }
3789 /* Handle secondary reloads for loads of FP registers from constant
3790 expressions by forcing the constant into memory.
3791
3792 use scratch_reg to hold the address of the memory location.
3793
3794 The proper fix is to change PREFERRED_RELOAD_CLASS to return
3795 NO_REGS when presented with a const_int and an register class
3796 containing only FP registers. Doing so unfortunately creates
3797 more problems than it solves. Fix this for 2.5. */
3798 else if (fp_reg_operand (operand0, mode)
3799 && CONSTANT_P (operand1)
3800 && scratch_reg)
3801 {
3802 rtx xoperands[2];
3803
3804 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3805 it in SIMODE regardless of what mode it was originally given
3806 to us. */
3807 scratch_reg = force_mode (SImode, scratch_reg);
3808
3809 /* Force the constant into memory and put the address of the
3810 memory location into scratch_reg. */
3811 xoperands[0] = scratch_reg;
3812 xoperands[1] = XEXP (force_const_mem (mode, operand1), 0);
3813 emit_insn (gen_rtx_SET (scratch_reg, xoperands[1]));
3814
3815 /* Now load the destination register. */
3816 emit_insn (gen_rtx_SET (operand0, gen_rtx_MEM (mode, scratch_reg)));
3817 return 1;
3818 }
3819
3820 /* Now have insn-emit do whatever it normally does. */
3821 return 0;
3822 }
3823
3824 /* Split one or more DImode RTL references into pairs of SImode
3825 references. The RTL can be REG, offsettable MEM, integer constant, or
3826 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
3827 split and "num" is its length. lo_half and hi_half are output arrays
3828 that parallel "operands". */
3829
3830 void
3831 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
3832 {
3833 while (num--)
3834 {
3835 rtx op = operands[num];
3836
3837 /* simplify_subreg refuses to split volatile memory addresses,
3838 but we still have to handle it. */
3839 if (GET_CODE (op) == MEM)
3840 {
3841 lo_half[num] = adjust_address (op, SImode, 4);
3842 hi_half[num] = adjust_address (op, SImode, 0);
3843 }
3844 else
3845 {
3846 lo_half[num] = simplify_gen_subreg (SImode, op,
3847 GET_MODE (op) == VOIDmode
3848 ? DImode : GET_MODE (op), 4);
3849 hi_half[num] = simplify_gen_subreg (SImode, op,
3850 GET_MODE (op) == VOIDmode
3851 ? DImode : GET_MODE (op), 0);
3852 }
3853 }
3854 }
3855
3856 /* Split X into a base and a constant offset, storing them in *BASE
3857 and *OFFSET respectively. */
3858
3859 static void
3860 m68k_split_offset (rtx x, rtx *base, HOST_WIDE_INT *offset)
3861 {
3862 *offset = 0;
3863 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
3864 {
3865 *offset += INTVAL (XEXP (x, 1));
3866 x = XEXP (x, 0);
3867 }
3868 *base = x;
3869 }
3870
3871 /* Return true if PATTERN is a PARALLEL suitable for a movem or fmovem
3872 instruction. STORE_P says whether the move is a load or store.
3873
3874 If the instruction uses post-increment or pre-decrement addressing,
3875 AUTOMOD_BASE is the base register and AUTOMOD_OFFSET is the total
3876 adjustment. This adjustment will be made by the first element of
3877 PARALLEL, with the loads or stores starting at element 1. If the
3878 instruction does not use post-increment or pre-decrement addressing,
3879 AUTOMOD_BASE is null, AUTOMOD_OFFSET is 0, and the loads or stores
3880 start at element 0. */
3881
3882 bool
3883 m68k_movem_pattern_p (rtx pattern, rtx automod_base,
3884 HOST_WIDE_INT automod_offset, bool store_p)
3885 {
3886 rtx base, mem_base, set, mem, reg, last_reg;
3887 HOST_WIDE_INT offset, mem_offset;
3888 int i, first, len;
3889 enum reg_class rclass;
3890
3891 len = XVECLEN (pattern, 0);
3892 first = (automod_base != NULL);
3893
3894 if (automod_base)
3895 {
3896 /* Stores must be pre-decrement and loads must be post-increment. */
3897 if (store_p != (automod_offset < 0))
3898 return false;
3899
3900 /* Work out the base and offset for lowest memory location. */
3901 base = automod_base;
3902 offset = (automod_offset < 0 ? automod_offset : 0);
3903 }
3904 else
3905 {
3906 /* Allow any valid base and offset in the first access. */
3907 base = NULL;
3908 offset = 0;
3909 }
3910
3911 last_reg = NULL;
3912 rclass = NO_REGS;
3913 for (i = first; i < len; i++)
3914 {
3915 /* We need a plain SET. */
3916 set = XVECEXP (pattern, 0, i);
3917 if (GET_CODE (set) != SET)
3918 return false;
3919
3920 /* Check that we have a memory location... */
3921 mem = XEXP (set, !store_p);
3922 if (!MEM_P (mem) || !memory_operand (mem, VOIDmode))
3923 return false;
3924
3925 /* ...with the right address. */
3926 if (base == NULL)
3927 {
3928 m68k_split_offset (XEXP (mem, 0), &base, &offset);
3929 /* The ColdFire instruction only allows (An) and (d16,An) modes.
3930 There are no mode restrictions for 680x0 besides the
3931 automodification rules enforced above. */
3932 if (TARGET_COLDFIRE
3933 && !m68k_legitimate_base_reg_p (base, reload_completed))
3934 return false;
3935 }
3936 else
3937 {
3938 m68k_split_offset (XEXP (mem, 0), &mem_base, &mem_offset);
3939 if (!rtx_equal_p (base, mem_base) || offset != mem_offset)
3940 return false;
3941 }
3942
3943 /* Check that we have a register of the required mode and class. */
3944 reg = XEXP (set, store_p);
3945 if (!REG_P (reg)
3946 || !HARD_REGISTER_P (reg)
3947 || GET_MODE (reg) != reg_raw_mode[REGNO (reg)])
3948 return false;
3949
3950 if (last_reg)
3951 {
3952 /* The register must belong to RCLASS and have a higher number
3953 than the register in the previous SET. */
3954 if (!TEST_HARD_REG_BIT (reg_class_contents[rclass], REGNO (reg))
3955 || REGNO (last_reg) >= REGNO (reg))
3956 return false;
3957 }
3958 else
3959 {
3960 /* Work out which register class we need. */
3961 if (INT_REGNO_P (REGNO (reg)))
3962 rclass = GENERAL_REGS;
3963 else if (FP_REGNO_P (REGNO (reg)))
3964 rclass = FP_REGS;
3965 else
3966 return false;
3967 }
3968
3969 last_reg = reg;
3970 offset += GET_MODE_SIZE (GET_MODE (reg));
3971 }
3972
3973 /* If we have an automodification, check whether the final offset is OK. */
3974 if (automod_base && offset != (automod_offset < 0 ? 0 : automod_offset))
3975 return false;
3976
3977 /* Reject unprofitable cases. */
3978 if (len < first + (rclass == FP_REGS ? MIN_FMOVEM_REGS : MIN_MOVEM_REGS))
3979 return false;
3980
3981 return true;
3982 }
3983
3984 /* Return the assembly code template for a movem or fmovem instruction
3985 whose pattern is given by PATTERN. Store the template's operands
3986 in OPERANDS.
3987
3988 If the instruction uses post-increment or pre-decrement addressing,
3989 AUTOMOD_OFFSET is the total adjustment, otherwise it is 0. STORE_P
3990 is true if this is a store instruction. */
3991
3992 const char *
3993 m68k_output_movem (rtx *operands, rtx pattern,
3994 HOST_WIDE_INT automod_offset, bool store_p)
3995 {
3996 unsigned int mask;
3997 int i, first;
3998
3999 gcc_assert (GET_CODE (pattern) == PARALLEL);
4000 mask = 0;
4001 first = (automod_offset != 0);
4002 for (i = first; i < XVECLEN (pattern, 0); i++)
4003 {
4004 /* When using movem with pre-decrement addressing, register X + D0_REG
4005 is controlled by bit 15 - X. For all other addressing modes,
4006 register X + D0_REG is controlled by bit X. Confusingly, the
4007 register mask for fmovem is in the opposite order to that for
4008 movem. */
4009 unsigned int regno;
4010
4011 gcc_assert (MEM_P (XEXP (XVECEXP (pattern, 0, i), !store_p)));
4012 gcc_assert (REG_P (XEXP (XVECEXP (pattern, 0, i), store_p)));
4013 regno = REGNO (XEXP (XVECEXP (pattern, 0, i), store_p));
4014 if (automod_offset < 0)
4015 {
4016 if (FP_REGNO_P (regno))
4017 mask |= 1 << (regno - FP0_REG);
4018 else
4019 mask |= 1 << (15 - (regno - D0_REG));
4020 }
4021 else
4022 {
4023 if (FP_REGNO_P (regno))
4024 mask |= 1 << (7 - (regno - FP0_REG));
4025 else
4026 mask |= 1 << (regno - D0_REG);
4027 }
4028 }
4029 CC_STATUS_INIT;
4030
4031 if (automod_offset == 0)
4032 operands[0] = XEXP (XEXP (XVECEXP (pattern, 0, first), !store_p), 0);
4033 else if (automod_offset < 0)
4034 operands[0] = gen_rtx_PRE_DEC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
4035 else
4036 operands[0] = gen_rtx_POST_INC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
4037 operands[1] = GEN_INT (mask);
4038 if (FP_REGNO_P (REGNO (XEXP (XVECEXP (pattern, 0, first), store_p))))
4039 {
4040 if (store_p)
4041 return "fmovem %1,%a0";
4042 else
4043 return "fmovem %a0,%1";
4044 }
4045 else
4046 {
4047 if (store_p)
4048 return "movem%.l %1,%a0";
4049 else
4050 return "movem%.l %a0,%1";
4051 }
4052 }
4053
4054 /* Return a REG that occurs in ADDR with coefficient 1.
4055 ADDR can be effectively incremented by incrementing REG. */
4056
4057 static rtx
4058 find_addr_reg (rtx addr)
4059 {
4060 while (GET_CODE (addr) == PLUS)
4061 {
4062 if (GET_CODE (XEXP (addr, 0)) == REG)
4063 addr = XEXP (addr, 0);
4064 else if (GET_CODE (XEXP (addr, 1)) == REG)
4065 addr = XEXP (addr, 1);
4066 else if (CONSTANT_P (XEXP (addr, 0)))
4067 addr = XEXP (addr, 1);
4068 else if (CONSTANT_P (XEXP (addr, 1)))
4069 addr = XEXP (addr, 0);
4070 else
4071 gcc_unreachable ();
4072 }
4073 gcc_assert (GET_CODE (addr) == REG);
4074 return addr;
4075 }
4076
4077 /* Output assembler code to perform a 32-bit 3-operand add. */
4078
4079 const char *
4080 output_addsi3 (rtx *operands)
4081 {
4082 if (! operands_match_p (operands[0], operands[1]))
4083 {
4084 if (!ADDRESS_REG_P (operands[1]))
4085 {
4086 rtx tmp = operands[1];
4087
4088 operands[1] = operands[2];
4089 operands[2] = tmp;
4090 }
4091
4092 /* These insns can result from reloads to access
4093 stack slots over 64k from the frame pointer. */
4094 if (GET_CODE (operands[2]) == CONST_INT
4095 && (INTVAL (operands[2]) < -32768 || INTVAL (operands[2]) > 32767))
4096 return "move%.l %2,%0\n\tadd%.l %1,%0";
4097 if (GET_CODE (operands[2]) == REG)
4098 return MOTOROLA ? "lea (%1,%2.l),%0" : "lea %1@(0,%2:l),%0";
4099 return MOTOROLA ? "lea (%c2,%1),%0" : "lea %1@(%c2),%0";
4100 }
4101 if (GET_CODE (operands[2]) == CONST_INT)
4102 {
4103 if (INTVAL (operands[2]) > 0
4104 && INTVAL (operands[2]) <= 8)
4105 return "addq%.l %2,%0";
4106 if (INTVAL (operands[2]) < 0
4107 && INTVAL (operands[2]) >= -8)
4108 {
4109 operands[2] = GEN_INT (- INTVAL (operands[2]));
4110 return "subq%.l %2,%0";
4111 }
4112 /* On the CPU32 it is faster to use two addql instructions to
4113 add a small integer (8 < N <= 16) to a register.
4114 Likewise for subql. */
4115 if (TUNE_CPU32 && REG_P (operands[0]))
4116 {
4117 if (INTVAL (operands[2]) > 8
4118 && INTVAL (operands[2]) <= 16)
4119 {
4120 operands[2] = GEN_INT (INTVAL (operands[2]) - 8);
4121 return "addq%.l #8,%0\n\taddq%.l %2,%0";
4122 }
4123 if (INTVAL (operands[2]) < -8
4124 && INTVAL (operands[2]) >= -16)
4125 {
4126 operands[2] = GEN_INT (- INTVAL (operands[2]) - 8);
4127 return "subq%.l #8,%0\n\tsubq%.l %2,%0";
4128 }
4129 }
4130 if (ADDRESS_REG_P (operands[0])
4131 && INTVAL (operands[2]) >= -0x8000
4132 && INTVAL (operands[2]) < 0x8000)
4133 {
4134 if (TUNE_68040)
4135 return "add%.w %2,%0";
4136 else
4137 return MOTOROLA ? "lea (%c2,%0),%0" : "lea %0@(%c2),%0";
4138 }
4139 }
4140 return "add%.l %2,%0";
4141 }
4142 \f
4143 /* Store in cc_status the expressions that the condition codes will
4144 describe after execution of an instruction whose pattern is EXP.
4145 Do not alter them if the instruction would not alter the cc's. */
4146
4147 /* On the 68000, all the insns to store in an address register fail to
4148 set the cc's. However, in some cases these instructions can make it
4149 possibly invalid to use the saved cc's. In those cases we clear out
4150 some or all of the saved cc's so they won't be used. */
4151
4152 void
4153 notice_update_cc (rtx exp, rtx insn)
4154 {
4155 if (GET_CODE (exp) == SET)
4156 {
4157 if (GET_CODE (SET_SRC (exp)) == CALL)
4158 CC_STATUS_INIT;
4159 else if (ADDRESS_REG_P (SET_DEST (exp)))
4160 {
4161 if (cc_status.value1 && modified_in_p (cc_status.value1, insn))
4162 cc_status.value1 = 0;
4163 if (cc_status.value2 && modified_in_p (cc_status.value2, insn))
4164 cc_status.value2 = 0;
4165 }
4166 /* fmoves to memory or data registers do not set the condition
4167 codes. Normal moves _do_ set the condition codes, but not in
4168 a way that is appropriate for comparison with 0, because -0.0
4169 would be treated as a negative nonzero number. Note that it
4170 isn't appropriate to conditionalize this restriction on
4171 HONOR_SIGNED_ZEROS because that macro merely indicates whether
4172 we care about the difference between -0.0 and +0.0. */
4173 else if (!FP_REG_P (SET_DEST (exp))
4174 && SET_DEST (exp) != cc0_rtx
4175 && (FP_REG_P (SET_SRC (exp))
4176 || GET_CODE (SET_SRC (exp)) == FIX
4177 || FLOAT_MODE_P (GET_MODE (SET_DEST (exp)))))
4178 CC_STATUS_INIT;
4179 /* A pair of move insns doesn't produce a useful overall cc. */
4180 else if (!FP_REG_P (SET_DEST (exp))
4181 && !FP_REG_P (SET_SRC (exp))
4182 && GET_MODE_SIZE (GET_MODE (SET_SRC (exp))) > 4
4183 && (GET_CODE (SET_SRC (exp)) == REG
4184 || GET_CODE (SET_SRC (exp)) == MEM
4185 || GET_CODE (SET_SRC (exp)) == CONST_DOUBLE))
4186 CC_STATUS_INIT;
4187 else if (SET_DEST (exp) != pc_rtx)
4188 {
4189 cc_status.flags = 0;
4190 cc_status.value1 = SET_DEST (exp);
4191 cc_status.value2 = SET_SRC (exp);
4192 }
4193 }
4194 else if (GET_CODE (exp) == PARALLEL
4195 && GET_CODE (XVECEXP (exp, 0, 0)) == SET)
4196 {
4197 rtx dest = SET_DEST (XVECEXP (exp, 0, 0));
4198 rtx src = SET_SRC (XVECEXP (exp, 0, 0));
4199
4200 if (ADDRESS_REG_P (dest))
4201 CC_STATUS_INIT;
4202 else if (dest != pc_rtx)
4203 {
4204 cc_status.flags = 0;
4205 cc_status.value1 = dest;
4206 cc_status.value2 = src;
4207 }
4208 }
4209 else
4210 CC_STATUS_INIT;
4211 if (cc_status.value2 != 0
4212 && ADDRESS_REG_P (cc_status.value2)
4213 && GET_MODE (cc_status.value2) == QImode)
4214 CC_STATUS_INIT;
4215 if (cc_status.value2 != 0)
4216 switch (GET_CODE (cc_status.value2))
4217 {
4218 case ASHIFT: case ASHIFTRT: case LSHIFTRT:
4219 case ROTATE: case ROTATERT:
4220 /* These instructions always clear the overflow bit, and set
4221 the carry to the bit shifted out. */
4222 cc_status.flags |= CC_OVERFLOW_UNUSABLE | CC_NO_CARRY;
4223 break;
4224
4225 case PLUS: case MINUS: case MULT:
4226 case DIV: case UDIV: case MOD: case UMOD: case NEG:
4227 if (GET_MODE (cc_status.value2) != VOIDmode)
4228 cc_status.flags |= CC_NO_OVERFLOW;
4229 break;
4230 case ZERO_EXTEND:
4231 /* (SET r1 (ZERO_EXTEND r2)) on this machine
4232 ends with a move insn moving r2 in r2's mode.
4233 Thus, the cc's are set for r2.
4234 This can set N bit spuriously. */
4235 cc_status.flags |= CC_NOT_NEGATIVE;
4236
4237 default:
4238 break;
4239 }
4240 if (cc_status.value1 && GET_CODE (cc_status.value1) == REG
4241 && cc_status.value2
4242 && reg_overlap_mentioned_p (cc_status.value1, cc_status.value2))
4243 cc_status.value2 = 0;
4244 /* Check for PRE_DEC in dest modifying a register used in src. */
4245 if (cc_status.value1 && GET_CODE (cc_status.value1) == MEM
4246 && GET_CODE (XEXP (cc_status.value1, 0)) == PRE_DEC
4247 && cc_status.value2
4248 && reg_overlap_mentioned_p (XEXP (XEXP (cc_status.value1, 0), 0),
4249 cc_status.value2))
4250 cc_status.value2 = 0;
4251 if (((cc_status.value1 && FP_REG_P (cc_status.value1))
4252 || (cc_status.value2 && FP_REG_P (cc_status.value2))))
4253 cc_status.flags = CC_IN_68881;
4254 if (cc_status.value2 && GET_CODE (cc_status.value2) == COMPARE
4255 && GET_MODE_CLASS (GET_MODE (XEXP (cc_status.value2, 0))) == MODE_FLOAT)
4256 {
4257 cc_status.flags = CC_IN_68881;
4258 if (!FP_REG_P (XEXP (cc_status.value2, 0))
4259 && FP_REG_P (XEXP (cc_status.value2, 1)))
4260 cc_status.flags |= CC_REVERSED;
4261 }
4262 }
4263 \f
4264 const char *
4265 output_move_const_double (rtx *operands)
4266 {
4267 int code = standard_68881_constant_p (operands[1]);
4268
4269 if (code != 0)
4270 {
4271 static char buf[40];
4272
4273 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
4274 return buf;
4275 }
4276 return "fmove%.d %1,%0";
4277 }
4278
4279 const char *
4280 output_move_const_single (rtx *operands)
4281 {
4282 int code = standard_68881_constant_p (operands[1]);
4283
4284 if (code != 0)
4285 {
4286 static char buf[40];
4287
4288 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
4289 return buf;
4290 }
4291 return "fmove%.s %f1,%0";
4292 }
4293
4294 /* Return nonzero if X, a CONST_DOUBLE, has a value that we can get
4295 from the "fmovecr" instruction.
4296 The value, anded with 0xff, gives the code to use in fmovecr
4297 to get the desired constant. */
4298
4299 /* This code has been fixed for cross-compilation. */
4300
4301 static int inited_68881_table = 0;
4302
4303 static const char *const strings_68881[7] = {
4304 "0.0",
4305 "1.0",
4306 "10.0",
4307 "100.0",
4308 "10000.0",
4309 "1e8",
4310 "1e16"
4311 };
4312
4313 static const int codes_68881[7] = {
4314 0x0f,
4315 0x32,
4316 0x33,
4317 0x34,
4318 0x35,
4319 0x36,
4320 0x37
4321 };
4322
4323 REAL_VALUE_TYPE values_68881[7];
4324
4325 /* Set up values_68881 array by converting the decimal values
4326 strings_68881 to binary. */
4327
4328 void
4329 init_68881_table (void)
4330 {
4331 int i;
4332 REAL_VALUE_TYPE r;
4333 machine_mode mode;
4334
4335 mode = SFmode;
4336 for (i = 0; i < 7; i++)
4337 {
4338 if (i == 6)
4339 mode = DFmode;
4340 r = REAL_VALUE_ATOF (strings_68881[i], mode);
4341 values_68881[i] = r;
4342 }
4343 inited_68881_table = 1;
4344 }
4345
4346 int
4347 standard_68881_constant_p (rtx x)
4348 {
4349 const REAL_VALUE_TYPE *r;
4350 int i;
4351
4352 /* fmovecr must be emulated on the 68040 and 68060, so it shouldn't be
4353 used at all on those chips. */
4354 if (TUNE_68040_60)
4355 return 0;
4356
4357 if (! inited_68881_table)
4358 init_68881_table ();
4359
4360 r = CONST_DOUBLE_REAL_VALUE (x);
4361
4362 /* Use real_identical instead of real_equal so that -0.0 is rejected. */
4363 for (i = 0; i < 6; i++)
4364 {
4365 if (real_identical (r, &values_68881[i]))
4366 return (codes_68881[i]);
4367 }
4368
4369 if (GET_MODE (x) == SFmode)
4370 return 0;
4371
4372 if (real_equal (r, &values_68881[6]))
4373 return (codes_68881[6]);
4374
4375 /* larger powers of ten in the constants ram are not used
4376 because they are not equal to a `double' C constant. */
4377 return 0;
4378 }
4379
4380 /* If X is a floating-point constant, return the logarithm of X base 2,
4381 or 0 if X is not a power of 2. */
4382
4383 int
4384 floating_exact_log2 (rtx x)
4385 {
4386 const REAL_VALUE_TYPE *r;
4387 REAL_VALUE_TYPE r1;
4388 int exp;
4389
4390 r = CONST_DOUBLE_REAL_VALUE (x);
4391
4392 if (real_less (r, &dconst1))
4393 return 0;
4394
4395 exp = real_exponent (r);
4396 real_2expN (&r1, exp, DFmode);
4397 if (real_equal (&r1, r))
4398 return exp;
4399
4400 return 0;
4401 }
4402 \f
4403 /* A C compound statement to output to stdio stream STREAM the
4404 assembler syntax for an instruction operand X. X is an RTL
4405 expression.
4406
4407 CODE is a value that can be used to specify one of several ways
4408 of printing the operand. It is used when identical operands
4409 must be printed differently depending on the context. CODE
4410 comes from the `%' specification that was used to request
4411 printing of the operand. If the specification was just `%DIGIT'
4412 then CODE is 0; if the specification was `%LTR DIGIT' then CODE
4413 is the ASCII code for LTR.
4414
4415 If X is a register, this macro should print the register's name.
4416 The names can be found in an array `reg_names' whose type is
4417 `char *[]'. `reg_names' is initialized from `REGISTER_NAMES'.
4418
4419 When the machine description has a specification `%PUNCT' (a `%'
4420 followed by a punctuation character), this macro is called with
4421 a null pointer for X and the punctuation character for CODE.
4422
4423 The m68k specific codes are:
4424
4425 '.' for dot needed in Motorola-style opcode names.
4426 '-' for an operand pushing on the stack:
4427 sp@-, -(sp) or -(%sp) depending on the style of syntax.
4428 '+' for an operand pushing on the stack:
4429 sp@+, (sp)+ or (%sp)+ depending on the style of syntax.
4430 '@' for a reference to the top word on the stack:
4431 sp@, (sp) or (%sp) depending on the style of syntax.
4432 '#' for an immediate operand prefix (# in MIT and Motorola syntax
4433 but & in SGS syntax).
4434 '!' for the cc register (used in an `and to cc' insn).
4435 '$' for the letter `s' in an op code, but only on the 68040.
4436 '&' for the letter `d' in an op code, but only on the 68040.
4437 '/' for register prefix needed by longlong.h.
4438 '?' for m68k_library_id_string
4439
4440 'b' for byte insn (no effect, on the Sun; this is for the ISI).
4441 'd' to force memory addressing to be absolute, not relative.
4442 'f' for float insn (print a CONST_DOUBLE as a float rather than in hex)
4443 'x' for float insn (print a CONST_DOUBLE as a float rather than in hex),
4444 or print pair of registers as rx:ry.
4445 'p' print an address with @PLTPC attached, but only if the operand
4446 is not locally-bound. */
4447
4448 void
4449 print_operand (FILE *file, rtx op, int letter)
4450 {
4451 if (letter == '.')
4452 {
4453 if (MOTOROLA)
4454 fprintf (file, ".");
4455 }
4456 else if (letter == '#')
4457 asm_fprintf (file, "%I");
4458 else if (letter == '-')
4459 asm_fprintf (file, MOTOROLA ? "-(%Rsp)" : "%Rsp@-");
4460 else if (letter == '+')
4461 asm_fprintf (file, MOTOROLA ? "(%Rsp)+" : "%Rsp@+");
4462 else if (letter == '@')
4463 asm_fprintf (file, MOTOROLA ? "(%Rsp)" : "%Rsp@");
4464 else if (letter == '!')
4465 asm_fprintf (file, "%Rfpcr");
4466 else if (letter == '$')
4467 {
4468 if (TARGET_68040)
4469 fprintf (file, "s");
4470 }
4471 else if (letter == '&')
4472 {
4473 if (TARGET_68040)
4474 fprintf (file, "d");
4475 }
4476 else if (letter == '/')
4477 asm_fprintf (file, "%R");
4478 else if (letter == '?')
4479 asm_fprintf (file, m68k_library_id_string);
4480 else if (letter == 'p')
4481 {
4482 output_addr_const (file, op);
4483 if (!(GET_CODE (op) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op)))
4484 fprintf (file, "@PLTPC");
4485 }
4486 else if (GET_CODE (op) == REG)
4487 {
4488 if (letter == 'R')
4489 /* Print out the second register name of a register pair.
4490 I.e., R (6) => 7. */
4491 fputs (M68K_REGNAME(REGNO (op) + 1), file);
4492 else
4493 fputs (M68K_REGNAME(REGNO (op)), file);
4494 }
4495 else if (GET_CODE (op) == MEM)
4496 {
4497 output_address (GET_MODE (op), XEXP (op, 0));
4498 if (letter == 'd' && ! TARGET_68020
4499 && CONSTANT_ADDRESS_P (XEXP (op, 0))
4500 && !(GET_CODE (XEXP (op, 0)) == CONST_INT
4501 && INTVAL (XEXP (op, 0)) < 0x8000
4502 && INTVAL (XEXP (op, 0)) >= -0x8000))
4503 fprintf (file, MOTOROLA ? ".l" : ":l");
4504 }
4505 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == SFmode)
4506 {
4507 long l;
4508 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), l);
4509 asm_fprintf (file, "%I0x%lx", l & 0xFFFFFFFF);
4510 }
4511 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == XFmode)
4512 {
4513 long l[3];
4514 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
4515 asm_fprintf (file, "%I0x%lx%08lx%08lx", l[0] & 0xFFFFFFFF,
4516 l[1] & 0xFFFFFFFF, l[2] & 0xFFFFFFFF);
4517 }
4518 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == DFmode)
4519 {
4520 long l[2];
4521 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
4522 asm_fprintf (file, "%I0x%lx%08lx", l[0] & 0xFFFFFFFF, l[1] & 0xFFFFFFFF);
4523 }
4524 else
4525 {
4526 /* Use `print_operand_address' instead of `output_addr_const'
4527 to ensure that we print relevant PIC stuff. */
4528 asm_fprintf (file, "%I");
4529 if (TARGET_PCREL
4530 && (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST))
4531 print_operand_address (file, op);
4532 else
4533 output_addr_const (file, op);
4534 }
4535 }
4536
4537 /* Return string for TLS relocation RELOC. */
4538
4539 static const char *
4540 m68k_get_reloc_decoration (enum m68k_reloc reloc)
4541 {
4542 /* To my knowledge, !MOTOROLA assemblers don't support TLS. */
4543 gcc_assert (MOTOROLA || reloc == RELOC_GOT);
4544
4545 switch (reloc)
4546 {
4547 case RELOC_GOT:
4548 if (MOTOROLA)
4549 {
4550 if (flag_pic == 1 && TARGET_68020)
4551 return "@GOT.w";
4552 else
4553 return "@GOT";
4554 }
4555 else
4556 {
4557 if (TARGET_68020)
4558 {
4559 switch (flag_pic)
4560 {
4561 case 1:
4562 return ":w";
4563 case 2:
4564 return ":l";
4565 default:
4566 return "";
4567 }
4568 }
4569 }
4570 gcc_unreachable ();
4571
4572 case RELOC_TLSGD:
4573 return "@TLSGD";
4574
4575 case RELOC_TLSLDM:
4576 return "@TLSLDM";
4577
4578 case RELOC_TLSLDO:
4579 return "@TLSLDO";
4580
4581 case RELOC_TLSIE:
4582 return "@TLSIE";
4583
4584 case RELOC_TLSLE:
4585 return "@TLSLE";
4586
4587 default:
4588 gcc_unreachable ();
4589 }
4590 }
4591
4592 /* m68k implementation of TARGET_OUTPUT_ADDR_CONST_EXTRA. */
4593
4594 static bool
4595 m68k_output_addr_const_extra (FILE *file, rtx x)
4596 {
4597 if (GET_CODE (x) == UNSPEC)
4598 {
4599 switch (XINT (x, 1))
4600 {
4601 case UNSPEC_RELOC16:
4602 case UNSPEC_RELOC32:
4603 output_addr_const (file, XVECEXP (x, 0, 0));
4604 fputs (m68k_get_reloc_decoration
4605 ((enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1))), file);
4606 return true;
4607
4608 default:
4609 break;
4610 }
4611 }
4612
4613 return false;
4614 }
4615
4616 /* M68K implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
4617
4618 static void
4619 m68k_output_dwarf_dtprel (FILE *file, int size, rtx x)
4620 {
4621 gcc_assert (size == 4);
4622 fputs ("\t.long\t", file);
4623 output_addr_const (file, x);
4624 fputs ("@TLSLDO+0x8000", file);
4625 }
4626
4627 /* In the name of slightly smaller debug output, and to cater to
4628 general assembler lossage, recognize various UNSPEC sequences
4629 and turn them back into a direct symbol reference. */
4630
4631 static rtx
4632 m68k_delegitimize_address (rtx orig_x)
4633 {
4634 rtx x;
4635 struct m68k_address addr;
4636 rtx unspec;
4637
4638 orig_x = delegitimize_mem_from_attrs (orig_x);
4639 x = orig_x;
4640 if (MEM_P (x))
4641 x = XEXP (x, 0);
4642
4643 if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
4644 return orig_x;
4645
4646 if (!m68k_decompose_address (GET_MODE (x), x, false, &addr)
4647 || addr.offset == NULL_RTX
4648 || GET_CODE (addr.offset) != CONST)
4649 return orig_x;
4650
4651 unspec = XEXP (addr.offset, 0);
4652 if (GET_CODE (unspec) == PLUS && CONST_INT_P (XEXP (unspec, 1)))
4653 unspec = XEXP (unspec, 0);
4654 if (GET_CODE (unspec) != UNSPEC
4655 || (XINT (unspec, 1) != UNSPEC_RELOC16
4656 && XINT (unspec, 1) != UNSPEC_RELOC32))
4657 return orig_x;
4658 x = XVECEXP (unspec, 0, 0);
4659 gcc_assert (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF);
4660 if (unspec != XEXP (addr.offset, 0))
4661 x = gen_rtx_PLUS (Pmode, x, XEXP (XEXP (addr.offset, 0), 1));
4662 if (addr.index)
4663 {
4664 rtx idx = addr.index;
4665 if (addr.scale != 1)
4666 idx = gen_rtx_MULT (Pmode, idx, GEN_INT (addr.scale));
4667 x = gen_rtx_PLUS (Pmode, idx, x);
4668 }
4669 if (addr.base)
4670 x = gen_rtx_PLUS (Pmode, addr.base, x);
4671 if (MEM_P (orig_x))
4672 x = replace_equiv_address_nv (orig_x, x);
4673 return x;
4674 }
4675
4676 \f
4677 /* A C compound statement to output to stdio stream STREAM the
4678 assembler syntax for an instruction operand that is a memory
4679 reference whose address is ADDR. ADDR is an RTL expression.
4680
4681 Note that this contains a kludge that knows that the only reason
4682 we have an address (plus (label_ref...) (reg...)) when not generating
4683 PIC code is in the insn before a tablejump, and we know that m68k.md
4684 generates a label LInnn: on such an insn.
4685
4686 It is possible for PIC to generate a (plus (label_ref...) (reg...))
4687 and we handle that just like we would a (plus (symbol_ref...) (reg...)).
4688
4689 This routine is responsible for distinguishing between -fpic and -fPIC
4690 style relocations in an address. When generating -fpic code the
4691 offset is output in word mode (e.g. movel a5@(_foo:w), a0). When generating
4692 -fPIC code the offset is output in long mode (e.g. movel a5@(_foo:l), a0) */
4693
4694 void
4695 print_operand_address (FILE *file, rtx addr)
4696 {
4697 struct m68k_address address;
4698
4699 if (!m68k_decompose_address (QImode, addr, true, &address))
4700 gcc_unreachable ();
4701
4702 if (address.code == PRE_DEC)
4703 fprintf (file, MOTOROLA ? "-(%s)" : "%s@-",
4704 M68K_REGNAME (REGNO (address.base)));
4705 else if (address.code == POST_INC)
4706 fprintf (file, MOTOROLA ? "(%s)+" : "%s@+",
4707 M68K_REGNAME (REGNO (address.base)));
4708 else if (!address.base && !address.index)
4709 {
4710 /* A constant address. */
4711 gcc_assert (address.offset == addr);
4712 if (GET_CODE (addr) == CONST_INT)
4713 {
4714 /* (xxx).w or (xxx).l. */
4715 if (IN_RANGE (INTVAL (addr), -0x8000, 0x7fff))
4716 fprintf (file, MOTOROLA ? "%d.w" : "%d:w", (int) INTVAL (addr));
4717 else
4718 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (addr));
4719 }
4720 else if (TARGET_PCREL)
4721 {
4722 /* (d16,PC) or (bd,PC,Xn) (with suppressed index register). */
4723 fputc ('(', file);
4724 output_addr_const (file, addr);
4725 asm_fprintf (file, flag_pic == 1 ? ":w,%Rpc)" : ":l,%Rpc)");
4726 }
4727 else
4728 {
4729 /* (xxx).l. We need a special case for SYMBOL_REF if the symbol
4730 name ends in `.<letter>', as the last 2 characters can be
4731 mistaken as a size suffix. Put the name in parentheses. */
4732 if (GET_CODE (addr) == SYMBOL_REF
4733 && strlen (XSTR (addr, 0)) > 2
4734 && XSTR (addr, 0)[strlen (XSTR (addr, 0)) - 2] == '.')
4735 {
4736 putc ('(', file);
4737 output_addr_const (file, addr);
4738 putc (')', file);
4739 }
4740 else
4741 output_addr_const (file, addr);
4742 }
4743 }
4744 else
4745 {
4746 int labelno;
4747
4748 /* If ADDR is a (d8,pc,Xn) address, this is the number of the
4749 label being accessed, otherwise it is -1. */
4750 labelno = (address.offset
4751 && !address.base
4752 && GET_CODE (address.offset) == LABEL_REF
4753 ? CODE_LABEL_NUMBER (XEXP (address.offset, 0))
4754 : -1);
4755 if (MOTOROLA)
4756 {
4757 /* Print the "offset(base" component. */
4758 if (labelno >= 0)
4759 asm_fprintf (file, "%LL%d(%Rpc,", labelno);
4760 else
4761 {
4762 if (address.offset)
4763 output_addr_const (file, address.offset);
4764
4765 putc ('(', file);
4766 if (address.base)
4767 fputs (M68K_REGNAME (REGNO (address.base)), file);
4768 }
4769 /* Print the ",index" component, if any. */
4770 if (address.index)
4771 {
4772 if (address.base)
4773 putc (',', file);
4774 fprintf (file, "%s.%c",
4775 M68K_REGNAME (REGNO (address.index)),
4776 GET_MODE (address.index) == HImode ? 'w' : 'l');
4777 if (address.scale != 1)
4778 fprintf (file, "*%d", address.scale);
4779 }
4780 putc (')', file);
4781 }
4782 else /* !MOTOROLA */
4783 {
4784 if (!address.offset && !address.index)
4785 fprintf (file, "%s@", M68K_REGNAME (REGNO (address.base)));
4786 else
4787 {
4788 /* Print the "base@(offset" component. */
4789 if (labelno >= 0)
4790 asm_fprintf (file, "%Rpc@(%LL%d", labelno);
4791 else
4792 {
4793 if (address.base)
4794 fputs (M68K_REGNAME (REGNO (address.base)), file);
4795 fprintf (file, "@(");
4796 if (address.offset)
4797 output_addr_const (file, address.offset);
4798 }
4799 /* Print the ",index" component, if any. */
4800 if (address.index)
4801 {
4802 fprintf (file, ",%s:%c",
4803 M68K_REGNAME (REGNO (address.index)),
4804 GET_MODE (address.index) == HImode ? 'w' : 'l');
4805 if (address.scale != 1)
4806 fprintf (file, ":%d", address.scale);
4807 }
4808 putc (')', file);
4809 }
4810 }
4811 }
4812 }
4813 \f
4814 /* Check for cases where a clr insns can be omitted from code using
4815 strict_low_part sets. For example, the second clrl here is not needed:
4816 clrl d0; movw a0@+,d0; use d0; clrl d0; movw a0@+; use d0; ...
4817
4818 MODE is the mode of this STRICT_LOW_PART set. FIRST_INSN is the clear
4819 insn we are checking for redundancy. TARGET is the register set by the
4820 clear insn. */
4821
4822 bool
4823 strict_low_part_peephole_ok (machine_mode mode, rtx_insn *first_insn,
4824 rtx target)
4825 {
4826 rtx_insn *p = first_insn;
4827
4828 while ((p = PREV_INSN (p)))
4829 {
4830 if (NOTE_INSN_BASIC_BLOCK_P (p))
4831 return false;
4832
4833 if (NOTE_P (p))
4834 continue;
4835
4836 /* If it isn't an insn, then give up. */
4837 if (!INSN_P (p))
4838 return false;
4839
4840 if (reg_set_p (target, p))
4841 {
4842 rtx set = single_set (p);
4843 rtx dest;
4844
4845 /* If it isn't an easy to recognize insn, then give up. */
4846 if (! set)
4847 return false;
4848
4849 dest = SET_DEST (set);
4850
4851 /* If this sets the entire target register to zero, then our
4852 first_insn is redundant. */
4853 if (rtx_equal_p (dest, target)
4854 && SET_SRC (set) == const0_rtx)
4855 return true;
4856 else if (GET_CODE (dest) == STRICT_LOW_PART
4857 && GET_CODE (XEXP (dest, 0)) == REG
4858 && REGNO (XEXP (dest, 0)) == REGNO (target)
4859 && (GET_MODE_SIZE (GET_MODE (XEXP (dest, 0)))
4860 <= GET_MODE_SIZE (mode)))
4861 /* This is a strict low part set which modifies less than
4862 we are using, so it is safe. */
4863 ;
4864 else
4865 return false;
4866 }
4867 }
4868
4869 return false;
4870 }
4871
4872 /* Operand predicates for implementing asymmetric pc-relative addressing
4873 on m68k. The m68k supports pc-relative addressing (mode 7, register 2)
4874 when used as a source operand, but not as a destination operand.
4875
4876 We model this by restricting the meaning of the basic predicates
4877 (general_operand, memory_operand, etc) to forbid the use of this
4878 addressing mode, and then define the following predicates that permit
4879 this addressing mode. These predicates can then be used for the
4880 source operands of the appropriate instructions.
4881
4882 n.b. While it is theoretically possible to change all machine patterns
4883 to use this addressing more where permitted by the architecture,
4884 it has only been implemented for "common" cases: SImode, HImode, and
4885 QImode operands, and only for the principle operations that would
4886 require this addressing mode: data movement and simple integer operations.
4887
4888 In parallel with these new predicates, two new constraint letters
4889 were defined: 'S' and 'T'. 'S' is the -mpcrel analog of 'm'.
4890 'T' replaces 's' in the non-pcrel case. It is a no-op in the pcrel case.
4891 In the pcrel case 's' is only valid in combination with 'a' registers.
4892 See addsi3, subsi3, cmpsi, and movsi patterns for a better understanding
4893 of how these constraints are used.
4894
4895 The use of these predicates is strictly optional, though patterns that
4896 don't will cause an extra reload register to be allocated where one
4897 was not necessary:
4898
4899 lea (abc:w,%pc),%a0 ; need to reload address
4900 moveq &1,%d1 ; since write to pc-relative space
4901 movel %d1,%a0@ ; is not allowed
4902 ...
4903 lea (abc:w,%pc),%a1 ; no need to reload address here
4904 movel %a1@,%d0 ; since "movel (abc:w,%pc),%d0" is ok
4905
4906 For more info, consult tiemann@cygnus.com.
4907
4908
4909 All of the ugliness with predicates and constraints is due to the
4910 simple fact that the m68k does not allow a pc-relative addressing
4911 mode as a destination. gcc does not distinguish between source and
4912 destination addresses. Hence, if we claim that pc-relative address
4913 modes are valid, e.g. TARGET_LEGITIMATE_ADDRESS_P accepts them, then we
4914 end up with invalid code. To get around this problem, we left
4915 pc-relative modes as invalid addresses, and then added special
4916 predicates and constraints to accept them.
4917
4918 A cleaner way to handle this is to modify gcc to distinguish
4919 between source and destination addresses. We can then say that
4920 pc-relative is a valid source address but not a valid destination
4921 address, and hopefully avoid a lot of the predicate and constraint
4922 hackery. Unfortunately, this would be a pretty big change. It would
4923 be a useful change for a number of ports, but there aren't any current
4924 plans to undertake this.
4925
4926 ***************************************************************************/
4927
4928
4929 const char *
4930 output_andsi3 (rtx *operands)
4931 {
4932 int logval;
4933 if (GET_CODE (operands[2]) == CONST_INT
4934 && (INTVAL (operands[2]) | 0xffff) == -1
4935 && (DATA_REG_P (operands[0])
4936 || offsettable_memref_p (operands[0]))
4937 && !TARGET_COLDFIRE)
4938 {
4939 if (GET_CODE (operands[0]) != REG)
4940 operands[0] = adjust_address (operands[0], HImode, 2);
4941 operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffff);
4942 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4943 CC_STATUS_INIT;
4944 if (operands[2] == const0_rtx)
4945 return "clr%.w %0";
4946 return "and%.w %2,%0";
4947 }
4948 if (GET_CODE (operands[2]) == CONST_INT
4949 && (logval = exact_log2 (~ INTVAL (operands[2]) & 0xffffffff)) >= 0
4950 && (DATA_REG_P (operands[0])
4951 || offsettable_memref_p (operands[0])))
4952 {
4953 if (DATA_REG_P (operands[0]))
4954 operands[1] = GEN_INT (logval);
4955 else
4956 {
4957 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
4958 operands[1] = GEN_INT (logval % 8);
4959 }
4960 /* This does not set condition codes in a standard way. */
4961 CC_STATUS_INIT;
4962 return "bclr %1,%0";
4963 }
4964 return "and%.l %2,%0";
4965 }
4966
4967 const char *
4968 output_iorsi3 (rtx *operands)
4969 {
4970 register int logval;
4971 if (GET_CODE (operands[2]) == CONST_INT
4972 && INTVAL (operands[2]) >> 16 == 0
4973 && (DATA_REG_P (operands[0])
4974 || offsettable_memref_p (operands[0]))
4975 && !TARGET_COLDFIRE)
4976 {
4977 if (GET_CODE (operands[0]) != REG)
4978 operands[0] = adjust_address (operands[0], HImode, 2);
4979 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4980 CC_STATUS_INIT;
4981 if (INTVAL (operands[2]) == 0xffff)
4982 return "mov%.w %2,%0";
4983 return "or%.w %2,%0";
4984 }
4985 if (GET_CODE (operands[2]) == CONST_INT
4986 && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
4987 && (DATA_REG_P (operands[0])
4988 || offsettable_memref_p (operands[0])))
4989 {
4990 if (DATA_REG_P (operands[0]))
4991 operands[1] = GEN_INT (logval);
4992 else
4993 {
4994 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
4995 operands[1] = GEN_INT (logval % 8);
4996 }
4997 CC_STATUS_INIT;
4998 return "bset %1,%0";
4999 }
5000 return "or%.l %2,%0";
5001 }
5002
5003 const char *
5004 output_xorsi3 (rtx *operands)
5005 {
5006 register int logval;
5007 if (GET_CODE (operands[2]) == CONST_INT
5008 && INTVAL (operands[2]) >> 16 == 0
5009 && (offsettable_memref_p (operands[0]) || DATA_REG_P (operands[0]))
5010 && !TARGET_COLDFIRE)
5011 {
5012 if (! DATA_REG_P (operands[0]))
5013 operands[0] = adjust_address (operands[0], HImode, 2);
5014 /* Do not delete a following tstl %0 insn; that would be incorrect. */
5015 CC_STATUS_INIT;
5016 if (INTVAL (operands[2]) == 0xffff)
5017 return "not%.w %0";
5018 return "eor%.w %2,%0";
5019 }
5020 if (GET_CODE (operands[2]) == CONST_INT
5021 && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
5022 && (DATA_REG_P (operands[0])
5023 || offsettable_memref_p (operands[0])))
5024 {
5025 if (DATA_REG_P (operands[0]))
5026 operands[1] = GEN_INT (logval);
5027 else
5028 {
5029 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
5030 operands[1] = GEN_INT (logval % 8);
5031 }
5032 CC_STATUS_INIT;
5033 return "bchg %1,%0";
5034 }
5035 return "eor%.l %2,%0";
5036 }
5037
5038 /* Return the instruction that should be used for a call to address X,
5039 which is known to be in operand 0. */
5040
5041 const char *
5042 output_call (rtx x)
5043 {
5044 if (symbolic_operand (x, VOIDmode))
5045 return m68k_symbolic_call;
5046 else
5047 return "jsr %a0";
5048 }
5049
5050 /* Likewise sibling calls. */
5051
5052 const char *
5053 output_sibcall (rtx x)
5054 {
5055 if (symbolic_operand (x, VOIDmode))
5056 return m68k_symbolic_jump;
5057 else
5058 return "jmp %a0";
5059 }
5060
5061 static void
5062 m68k_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
5063 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
5064 tree function)
5065 {
5066 rtx this_slot, offset, addr, mem, tmp;
5067 rtx_insn *insn;
5068
5069 /* Avoid clobbering the struct value reg by using the
5070 static chain reg as a temporary. */
5071 tmp = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
5072
5073 /* Pretend to be a post-reload pass while generating rtl. */
5074 reload_completed = 1;
5075
5076 /* The "this" pointer is stored at 4(%sp). */
5077 this_slot = gen_rtx_MEM (Pmode, plus_constant (Pmode,
5078 stack_pointer_rtx, 4));
5079
5080 /* Add DELTA to THIS. */
5081 if (delta != 0)
5082 {
5083 /* Make the offset a legitimate operand for memory addition. */
5084 offset = GEN_INT (delta);
5085 if ((delta < -8 || delta > 8)
5086 && (TARGET_COLDFIRE || USE_MOVQ (delta)))
5087 {
5088 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), offset);
5089 offset = gen_rtx_REG (Pmode, D0_REG);
5090 }
5091 emit_insn (gen_add3_insn (copy_rtx (this_slot),
5092 copy_rtx (this_slot), offset));
5093 }
5094
5095 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
5096 if (vcall_offset != 0)
5097 {
5098 /* Set the static chain register to *THIS. */
5099 emit_move_insn (tmp, this_slot);
5100 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
5101
5102 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
5103 addr = plus_constant (Pmode, tmp, vcall_offset);
5104 if (!m68k_legitimate_address_p (Pmode, addr, true))
5105 {
5106 emit_insn (gen_rtx_SET (tmp, addr));
5107 addr = tmp;
5108 }
5109
5110 /* Load the offset into %d0 and add it to THIS. */
5111 emit_move_insn (gen_rtx_REG (Pmode, D0_REG),
5112 gen_rtx_MEM (Pmode, addr));
5113 emit_insn (gen_add3_insn (copy_rtx (this_slot),
5114 copy_rtx (this_slot),
5115 gen_rtx_REG (Pmode, D0_REG)));
5116 }
5117
5118 /* Jump to the target function. Use a sibcall if direct jumps are
5119 allowed, otherwise load the address into a register first. */
5120 mem = DECL_RTL (function);
5121 if (!sibcall_operand (XEXP (mem, 0), VOIDmode))
5122 {
5123 gcc_assert (flag_pic);
5124
5125 if (!TARGET_SEP_DATA)
5126 {
5127 /* Use the static chain register as a temporary (call-clobbered)
5128 GOT pointer for this function. We can use the static chain
5129 register because it isn't live on entry to the thunk. */
5130 SET_REGNO (pic_offset_table_rtx, STATIC_CHAIN_REGNUM);
5131 emit_insn (gen_load_got (pic_offset_table_rtx));
5132 }
5133 legitimize_pic_address (XEXP (mem, 0), Pmode, tmp);
5134 mem = replace_equiv_address (mem, tmp);
5135 }
5136 insn = emit_call_insn (gen_sibcall (mem, const0_rtx));
5137 SIBLING_CALL_P (insn) = 1;
5138
5139 /* Run just enough of rest_of_compilation. */
5140 insn = get_insns ();
5141 split_all_insns_noflow ();
5142 final_start_function (insn, file, 1);
5143 final (insn, file, 1);
5144 final_end_function ();
5145
5146 /* Clean up the vars set above. */
5147 reload_completed = 0;
5148
5149 /* Restore the original PIC register. */
5150 if (flag_pic)
5151 SET_REGNO (pic_offset_table_rtx, PIC_REG);
5152 }
5153
5154 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
5155
5156 static rtx
5157 m68k_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
5158 int incoming ATTRIBUTE_UNUSED)
5159 {
5160 return gen_rtx_REG (Pmode, M68K_STRUCT_VALUE_REGNUM);
5161 }
5162
5163 /* Return nonzero if register old_reg can be renamed to register new_reg. */
5164 int
5165 m68k_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
5166 unsigned int new_reg)
5167 {
5168
5169 /* Interrupt functions can only use registers that have already been
5170 saved by the prologue, even if they would normally be
5171 call-clobbered. */
5172
5173 if ((m68k_get_function_kind (current_function_decl)
5174 == m68k_fk_interrupt_handler)
5175 && !df_regs_ever_live_p (new_reg))
5176 return 0;
5177
5178 return 1;
5179 }
5180
5181 /* Implement TARGET_HARD_REGNO_MODE_OK. On the 68000, we let the cpu
5182 registers can hold any mode, but restrict the 68881 registers to
5183 floating-point modes. */
5184
5185 static bool
5186 m68k_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
5187 {
5188 if (DATA_REGNO_P (regno))
5189 {
5190 /* Data Registers, can hold aggregate if fits in. */
5191 if (regno + GET_MODE_SIZE (mode) / 4 <= 8)
5192 return true;
5193 }
5194 else if (ADDRESS_REGNO_P (regno))
5195 {
5196 if (regno + GET_MODE_SIZE (mode) / 4 <= 16)
5197 return true;
5198 }
5199 else if (FP_REGNO_P (regno))
5200 {
5201 /* FPU registers, hold float or complex float of long double or
5202 smaller. */
5203 if ((GET_MODE_CLASS (mode) == MODE_FLOAT
5204 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5205 && GET_MODE_UNIT_SIZE (mode) <= TARGET_FP_REG_SIZE)
5206 return true;
5207 }
5208 return false;
5209 }
5210
5211 /* Implement TARGET_MODES_TIEABLE_P. */
5212
5213 static bool
5214 m68k_modes_tieable_p (machine_mode mode1, machine_mode mode2)
5215 {
5216 return (!TARGET_HARD_FLOAT
5217 || ((GET_MODE_CLASS (mode1) == MODE_FLOAT
5218 || GET_MODE_CLASS (mode1) == MODE_COMPLEX_FLOAT)
5219 == (GET_MODE_CLASS (mode2) == MODE_FLOAT
5220 || GET_MODE_CLASS (mode2) == MODE_COMPLEX_FLOAT)));
5221 }
5222
5223 /* Implement SECONDARY_RELOAD_CLASS. */
5224
5225 enum reg_class
5226 m68k_secondary_reload_class (enum reg_class rclass,
5227 machine_mode mode, rtx x)
5228 {
5229 int regno;
5230
5231 regno = true_regnum (x);
5232
5233 /* If one operand of a movqi is an address register, the other
5234 operand must be a general register or constant. Other types
5235 of operand must be reloaded through a data register. */
5236 if (GET_MODE_SIZE (mode) == 1
5237 && reg_classes_intersect_p (rclass, ADDR_REGS)
5238 && !(INT_REGNO_P (regno) || CONSTANT_P (x)))
5239 return DATA_REGS;
5240
5241 /* PC-relative addresses must be loaded into an address register first. */
5242 if (TARGET_PCREL
5243 && !reg_class_subset_p (rclass, ADDR_REGS)
5244 && symbolic_operand (x, VOIDmode))
5245 return ADDR_REGS;
5246
5247 return NO_REGS;
5248 }
5249
5250 /* Implement PREFERRED_RELOAD_CLASS. */
5251
5252 enum reg_class
5253 m68k_preferred_reload_class (rtx x, enum reg_class rclass)
5254 {
5255 enum reg_class secondary_class;
5256
5257 /* If RCLASS might need a secondary reload, try restricting it to
5258 a class that doesn't. */
5259 secondary_class = m68k_secondary_reload_class (rclass, GET_MODE (x), x);
5260 if (secondary_class != NO_REGS
5261 && reg_class_subset_p (secondary_class, rclass))
5262 return secondary_class;
5263
5264 /* Prefer to use moveq for in-range constants. */
5265 if (GET_CODE (x) == CONST_INT
5266 && reg_class_subset_p (DATA_REGS, rclass)
5267 && IN_RANGE (INTVAL (x), -0x80, 0x7f))
5268 return DATA_REGS;
5269
5270 /* ??? Do we really need this now? */
5271 if (GET_CODE (x) == CONST_DOUBLE
5272 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
5273 {
5274 if (TARGET_HARD_FLOAT && reg_class_subset_p (FP_REGS, rclass))
5275 return FP_REGS;
5276
5277 return NO_REGS;
5278 }
5279
5280 return rclass;
5281 }
5282
5283 /* Return floating point values in a 68881 register. This makes 68881 code
5284 a little bit faster. It also makes -msoft-float code incompatible with
5285 hard-float code, so people have to be careful not to mix the two.
5286 For ColdFire it was decided the ABI incompatibility is undesirable.
5287 If there is need for a hard-float ABI it is probably worth doing it
5288 properly and also passing function arguments in FP registers. */
5289 rtx
5290 m68k_libcall_value (machine_mode mode)
5291 {
5292 switch (mode) {
5293 case E_SFmode:
5294 case E_DFmode:
5295 case E_XFmode:
5296 if (TARGET_68881)
5297 return gen_rtx_REG (mode, FP0_REG);
5298 break;
5299 default:
5300 break;
5301 }
5302
5303 return gen_rtx_REG (mode, m68k_libcall_value_in_a0_p ? A0_REG : D0_REG);
5304 }
5305
5306 /* Location in which function value is returned.
5307 NOTE: Due to differences in ABIs, don't call this function directly,
5308 use FUNCTION_VALUE instead. */
5309 rtx
5310 m68k_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
5311 {
5312 machine_mode mode;
5313
5314 mode = TYPE_MODE (valtype);
5315 switch (mode) {
5316 case E_SFmode:
5317 case E_DFmode:
5318 case E_XFmode:
5319 if (TARGET_68881)
5320 return gen_rtx_REG (mode, FP0_REG);
5321 break;
5322 default:
5323 break;
5324 }
5325
5326 /* If the function returns a pointer, push that into %a0. */
5327 if (func && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (func))))
5328 /* For compatibility with the large body of existing code which
5329 does not always properly declare external functions returning
5330 pointer types, the m68k/SVR4 convention is to copy the value
5331 returned for pointer functions from a0 to d0 in the function
5332 epilogue, so that callers that have neglected to properly
5333 declare the callee can still find the correct return value in
5334 d0. */
5335 return gen_rtx_PARALLEL
5336 (mode,
5337 gen_rtvec (2,
5338 gen_rtx_EXPR_LIST (VOIDmode,
5339 gen_rtx_REG (mode, A0_REG),
5340 const0_rtx),
5341 gen_rtx_EXPR_LIST (VOIDmode,
5342 gen_rtx_REG (mode, D0_REG),
5343 const0_rtx)));
5344 else if (POINTER_TYPE_P (valtype))
5345 return gen_rtx_REG (mode, A0_REG);
5346 else
5347 return gen_rtx_REG (mode, D0_REG);
5348 }
5349
5350 /* Worker function for TARGET_RETURN_IN_MEMORY. */
5351 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
5352 static bool
5353 m68k_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
5354 {
5355 machine_mode mode = TYPE_MODE (type);
5356
5357 if (mode == BLKmode)
5358 return true;
5359
5360 /* If TYPE's known alignment is less than the alignment of MODE that
5361 would contain the structure, then return in memory. We need to
5362 do so to maintain the compatibility between code compiled with
5363 -mstrict-align and that compiled with -mno-strict-align. */
5364 if (AGGREGATE_TYPE_P (type)
5365 && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (mode))
5366 return true;
5367
5368 return false;
5369 }
5370 #endif
5371
5372 /* CPU to schedule the program for. */
5373 enum attr_cpu m68k_sched_cpu;
5374
5375 /* MAC to schedule the program for. */
5376 enum attr_mac m68k_sched_mac;
5377
5378 /* Operand type. */
5379 enum attr_op_type
5380 {
5381 /* No operand. */
5382 OP_TYPE_NONE,
5383
5384 /* Integer register. */
5385 OP_TYPE_RN,
5386
5387 /* FP register. */
5388 OP_TYPE_FPN,
5389
5390 /* Implicit mem reference (e.g. stack). */
5391 OP_TYPE_MEM1,
5392
5393 /* Memory without offset or indexing. EA modes 2, 3 and 4. */
5394 OP_TYPE_MEM234,
5395
5396 /* Memory with offset but without indexing. EA mode 5. */
5397 OP_TYPE_MEM5,
5398
5399 /* Memory with indexing. EA mode 6. */
5400 OP_TYPE_MEM6,
5401
5402 /* Memory referenced by absolute address. EA mode 7. */
5403 OP_TYPE_MEM7,
5404
5405 /* Immediate operand that doesn't require extension word. */
5406 OP_TYPE_IMM_Q,
5407
5408 /* Immediate 16 bit operand. */
5409 OP_TYPE_IMM_W,
5410
5411 /* Immediate 32 bit operand. */
5412 OP_TYPE_IMM_L
5413 };
5414
5415 /* Return type of memory ADDR_RTX refers to. */
5416 static enum attr_op_type
5417 sched_address_type (machine_mode mode, rtx addr_rtx)
5418 {
5419 struct m68k_address address;
5420
5421 if (symbolic_operand (addr_rtx, VOIDmode))
5422 return OP_TYPE_MEM7;
5423
5424 if (!m68k_decompose_address (mode, addr_rtx,
5425 reload_completed, &address))
5426 {
5427 gcc_assert (!reload_completed);
5428 /* Reload will likely fix the address to be in the register. */
5429 return OP_TYPE_MEM234;
5430 }
5431
5432 if (address.scale != 0)
5433 return OP_TYPE_MEM6;
5434
5435 if (address.base != NULL_RTX)
5436 {
5437 if (address.offset == NULL_RTX)
5438 return OP_TYPE_MEM234;
5439
5440 return OP_TYPE_MEM5;
5441 }
5442
5443 gcc_assert (address.offset != NULL_RTX);
5444
5445 return OP_TYPE_MEM7;
5446 }
5447
5448 /* Return X or Y (depending on OPX_P) operand of INSN. */
5449 static rtx
5450 sched_get_operand (rtx_insn *insn, bool opx_p)
5451 {
5452 int i;
5453
5454 if (recog_memoized (insn) < 0)
5455 gcc_unreachable ();
5456
5457 extract_constrain_insn_cached (insn);
5458
5459 if (opx_p)
5460 i = get_attr_opx (insn);
5461 else
5462 i = get_attr_opy (insn);
5463
5464 if (i >= recog_data.n_operands)
5465 return NULL;
5466
5467 return recog_data.operand[i];
5468 }
5469
5470 /* Return type of INSN's operand X (if OPX_P) or operand Y (if !OPX_P).
5471 If ADDRESS_P is true, return type of memory location operand refers to. */
5472 static enum attr_op_type
5473 sched_attr_op_type (rtx_insn *insn, bool opx_p, bool address_p)
5474 {
5475 rtx op;
5476
5477 op = sched_get_operand (insn, opx_p);
5478
5479 if (op == NULL)
5480 {
5481 gcc_assert (!reload_completed);
5482 return OP_TYPE_RN;
5483 }
5484
5485 if (address_p)
5486 return sched_address_type (QImode, op);
5487
5488 if (memory_operand (op, VOIDmode))
5489 return sched_address_type (GET_MODE (op), XEXP (op, 0));
5490
5491 if (register_operand (op, VOIDmode))
5492 {
5493 if ((!reload_completed && FLOAT_MODE_P (GET_MODE (op)))
5494 || (reload_completed && FP_REG_P (op)))
5495 return OP_TYPE_FPN;
5496
5497 return OP_TYPE_RN;
5498 }
5499
5500 if (GET_CODE (op) == CONST_INT)
5501 {
5502 int ival;
5503
5504 ival = INTVAL (op);
5505
5506 /* Check for quick constants. */
5507 switch (get_attr_type (insn))
5508 {
5509 case TYPE_ALUQ_L:
5510 if (IN_RANGE (ival, 1, 8) || IN_RANGE (ival, -8, -1))
5511 return OP_TYPE_IMM_Q;
5512
5513 gcc_assert (!reload_completed);
5514 break;
5515
5516 case TYPE_MOVEQ_L:
5517 if (USE_MOVQ (ival))
5518 return OP_TYPE_IMM_Q;
5519
5520 gcc_assert (!reload_completed);
5521 break;
5522
5523 case TYPE_MOV3Q_L:
5524 if (valid_mov3q_const (ival))
5525 return OP_TYPE_IMM_Q;
5526
5527 gcc_assert (!reload_completed);
5528 break;
5529
5530 default:
5531 break;
5532 }
5533
5534 if (IN_RANGE (ival, -0x8000, 0x7fff))
5535 return OP_TYPE_IMM_W;
5536
5537 return OP_TYPE_IMM_L;
5538 }
5539
5540 if (GET_CODE (op) == CONST_DOUBLE)
5541 {
5542 switch (GET_MODE (op))
5543 {
5544 case E_SFmode:
5545 return OP_TYPE_IMM_W;
5546
5547 case E_VOIDmode:
5548 case E_DFmode:
5549 return OP_TYPE_IMM_L;
5550
5551 default:
5552 gcc_unreachable ();
5553 }
5554 }
5555
5556 if (GET_CODE (op) == CONST
5557 || symbolic_operand (op, VOIDmode)
5558 || LABEL_P (op))
5559 {
5560 switch (GET_MODE (op))
5561 {
5562 case E_QImode:
5563 return OP_TYPE_IMM_Q;
5564
5565 case E_HImode:
5566 return OP_TYPE_IMM_W;
5567
5568 case E_SImode:
5569 return OP_TYPE_IMM_L;
5570
5571 default:
5572 if (symbolic_operand (m68k_unwrap_symbol (op, false), VOIDmode))
5573 /* Just a guess. */
5574 return OP_TYPE_IMM_W;
5575
5576 return OP_TYPE_IMM_L;
5577 }
5578 }
5579
5580 gcc_assert (!reload_completed);
5581
5582 if (FLOAT_MODE_P (GET_MODE (op)))
5583 return OP_TYPE_FPN;
5584
5585 return OP_TYPE_RN;
5586 }
5587
5588 /* Implement opx_type attribute.
5589 Return type of INSN's operand X.
5590 If ADDRESS_P is true, return type of memory location operand refers to. */
5591 enum attr_opx_type
5592 m68k_sched_attr_opx_type (rtx_insn *insn, int address_p)
5593 {
5594 switch (sched_attr_op_type (insn, true, address_p != 0))
5595 {
5596 case OP_TYPE_RN:
5597 return OPX_TYPE_RN;
5598
5599 case OP_TYPE_FPN:
5600 return OPX_TYPE_FPN;
5601
5602 case OP_TYPE_MEM1:
5603 return OPX_TYPE_MEM1;
5604
5605 case OP_TYPE_MEM234:
5606 return OPX_TYPE_MEM234;
5607
5608 case OP_TYPE_MEM5:
5609 return OPX_TYPE_MEM5;
5610
5611 case OP_TYPE_MEM6:
5612 return OPX_TYPE_MEM6;
5613
5614 case OP_TYPE_MEM7:
5615 return OPX_TYPE_MEM7;
5616
5617 case OP_TYPE_IMM_Q:
5618 return OPX_TYPE_IMM_Q;
5619
5620 case OP_TYPE_IMM_W:
5621 return OPX_TYPE_IMM_W;
5622
5623 case OP_TYPE_IMM_L:
5624 return OPX_TYPE_IMM_L;
5625
5626 default:
5627 gcc_unreachable ();
5628 }
5629 }
5630
5631 /* Implement opy_type attribute.
5632 Return type of INSN's operand Y.
5633 If ADDRESS_P is true, return type of memory location operand refers to. */
5634 enum attr_opy_type
5635 m68k_sched_attr_opy_type (rtx_insn *insn, int address_p)
5636 {
5637 switch (sched_attr_op_type (insn, false, address_p != 0))
5638 {
5639 case OP_TYPE_RN:
5640 return OPY_TYPE_RN;
5641
5642 case OP_TYPE_FPN:
5643 return OPY_TYPE_FPN;
5644
5645 case OP_TYPE_MEM1:
5646 return OPY_TYPE_MEM1;
5647
5648 case OP_TYPE_MEM234:
5649 return OPY_TYPE_MEM234;
5650
5651 case OP_TYPE_MEM5:
5652 return OPY_TYPE_MEM5;
5653
5654 case OP_TYPE_MEM6:
5655 return OPY_TYPE_MEM6;
5656
5657 case OP_TYPE_MEM7:
5658 return OPY_TYPE_MEM7;
5659
5660 case OP_TYPE_IMM_Q:
5661 return OPY_TYPE_IMM_Q;
5662
5663 case OP_TYPE_IMM_W:
5664 return OPY_TYPE_IMM_W;
5665
5666 case OP_TYPE_IMM_L:
5667 return OPY_TYPE_IMM_L;
5668
5669 default:
5670 gcc_unreachable ();
5671 }
5672 }
5673
5674 /* Return size of INSN as int. */
5675 static int
5676 sched_get_attr_size_int (rtx_insn *insn)
5677 {
5678 int size;
5679
5680 switch (get_attr_type (insn))
5681 {
5682 case TYPE_IGNORE:
5683 /* There should be no references to m68k_sched_attr_size for 'ignore'
5684 instructions. */
5685 gcc_unreachable ();
5686 return 0;
5687
5688 case TYPE_MUL_L:
5689 size = 2;
5690 break;
5691
5692 default:
5693 size = 1;
5694 break;
5695 }
5696
5697 switch (get_attr_opx_type (insn))
5698 {
5699 case OPX_TYPE_NONE:
5700 case OPX_TYPE_RN:
5701 case OPX_TYPE_FPN:
5702 case OPX_TYPE_MEM1:
5703 case OPX_TYPE_MEM234:
5704 case OPY_TYPE_IMM_Q:
5705 break;
5706
5707 case OPX_TYPE_MEM5:
5708 case OPX_TYPE_MEM6:
5709 /* Here we assume that most absolute references are short. */
5710 case OPX_TYPE_MEM7:
5711 case OPY_TYPE_IMM_W:
5712 ++size;
5713 break;
5714
5715 case OPY_TYPE_IMM_L:
5716 size += 2;
5717 break;
5718
5719 default:
5720 gcc_unreachable ();
5721 }
5722
5723 switch (get_attr_opy_type (insn))
5724 {
5725 case OPY_TYPE_NONE:
5726 case OPY_TYPE_RN:
5727 case OPY_TYPE_FPN:
5728 case OPY_TYPE_MEM1:
5729 case OPY_TYPE_MEM234:
5730 case OPY_TYPE_IMM_Q:
5731 break;
5732
5733 case OPY_TYPE_MEM5:
5734 case OPY_TYPE_MEM6:
5735 /* Here we assume that most absolute references are short. */
5736 case OPY_TYPE_MEM7:
5737 case OPY_TYPE_IMM_W:
5738 ++size;
5739 break;
5740
5741 case OPY_TYPE_IMM_L:
5742 size += 2;
5743 break;
5744
5745 default:
5746 gcc_unreachable ();
5747 }
5748
5749 if (size > 3)
5750 {
5751 gcc_assert (!reload_completed);
5752
5753 size = 3;
5754 }
5755
5756 return size;
5757 }
5758
5759 /* Return size of INSN as attribute enum value. */
5760 enum attr_size
5761 m68k_sched_attr_size (rtx_insn *insn)
5762 {
5763 switch (sched_get_attr_size_int (insn))
5764 {
5765 case 1:
5766 return SIZE_1;
5767
5768 case 2:
5769 return SIZE_2;
5770
5771 case 3:
5772 return SIZE_3;
5773
5774 default:
5775 gcc_unreachable ();
5776 }
5777 }
5778
5779 /* Return operand X or Y (depending on OPX_P) of INSN,
5780 if it is a MEM, or NULL overwise. */
5781 static enum attr_op_type
5782 sched_get_opxy_mem_type (rtx_insn *insn, bool opx_p)
5783 {
5784 if (opx_p)
5785 {
5786 switch (get_attr_opx_type (insn))
5787 {
5788 case OPX_TYPE_NONE:
5789 case OPX_TYPE_RN:
5790 case OPX_TYPE_FPN:
5791 case OPX_TYPE_IMM_Q:
5792 case OPX_TYPE_IMM_W:
5793 case OPX_TYPE_IMM_L:
5794 return OP_TYPE_RN;
5795
5796 case OPX_TYPE_MEM1:
5797 case OPX_TYPE_MEM234:
5798 case OPX_TYPE_MEM5:
5799 case OPX_TYPE_MEM7:
5800 return OP_TYPE_MEM1;
5801
5802 case OPX_TYPE_MEM6:
5803 return OP_TYPE_MEM6;
5804
5805 default:
5806 gcc_unreachable ();
5807 }
5808 }
5809 else
5810 {
5811 switch (get_attr_opy_type (insn))
5812 {
5813 case OPY_TYPE_NONE:
5814 case OPY_TYPE_RN:
5815 case OPY_TYPE_FPN:
5816 case OPY_TYPE_IMM_Q:
5817 case OPY_TYPE_IMM_W:
5818 case OPY_TYPE_IMM_L:
5819 return OP_TYPE_RN;
5820
5821 case OPY_TYPE_MEM1:
5822 case OPY_TYPE_MEM234:
5823 case OPY_TYPE_MEM5:
5824 case OPY_TYPE_MEM7:
5825 return OP_TYPE_MEM1;
5826
5827 case OPY_TYPE_MEM6:
5828 return OP_TYPE_MEM6;
5829
5830 default:
5831 gcc_unreachable ();
5832 }
5833 }
5834 }
5835
5836 /* Implement op_mem attribute. */
5837 enum attr_op_mem
5838 m68k_sched_attr_op_mem (rtx_insn *insn)
5839 {
5840 enum attr_op_type opx;
5841 enum attr_op_type opy;
5842
5843 opx = sched_get_opxy_mem_type (insn, true);
5844 opy = sched_get_opxy_mem_type (insn, false);
5845
5846 if (opy == OP_TYPE_RN && opx == OP_TYPE_RN)
5847 return OP_MEM_00;
5848
5849 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM1)
5850 {
5851 switch (get_attr_opx_access (insn))
5852 {
5853 case OPX_ACCESS_R:
5854 return OP_MEM_10;
5855
5856 case OPX_ACCESS_W:
5857 return OP_MEM_01;
5858
5859 case OPX_ACCESS_RW:
5860 return OP_MEM_11;
5861
5862 default:
5863 gcc_unreachable ();
5864 }
5865 }
5866
5867 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM6)
5868 {
5869 switch (get_attr_opx_access (insn))
5870 {
5871 case OPX_ACCESS_R:
5872 return OP_MEM_I0;
5873
5874 case OPX_ACCESS_W:
5875 return OP_MEM_0I;
5876
5877 case OPX_ACCESS_RW:
5878 return OP_MEM_I1;
5879
5880 default:
5881 gcc_unreachable ();
5882 }
5883 }
5884
5885 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_RN)
5886 return OP_MEM_10;
5887
5888 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM1)
5889 {
5890 switch (get_attr_opx_access (insn))
5891 {
5892 case OPX_ACCESS_W:
5893 return OP_MEM_11;
5894
5895 default:
5896 gcc_assert (!reload_completed);
5897 return OP_MEM_11;
5898 }
5899 }
5900
5901 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM6)
5902 {
5903 switch (get_attr_opx_access (insn))
5904 {
5905 case OPX_ACCESS_W:
5906 return OP_MEM_1I;
5907
5908 default:
5909 gcc_assert (!reload_completed);
5910 return OP_MEM_1I;
5911 }
5912 }
5913
5914 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_RN)
5915 return OP_MEM_I0;
5916
5917 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM1)
5918 {
5919 switch (get_attr_opx_access (insn))
5920 {
5921 case OPX_ACCESS_W:
5922 return OP_MEM_I1;
5923
5924 default:
5925 gcc_assert (!reload_completed);
5926 return OP_MEM_I1;
5927 }
5928 }
5929
5930 gcc_assert (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM6);
5931 gcc_assert (!reload_completed);
5932 return OP_MEM_I1;
5933 }
5934
5935 /* Data for ColdFire V4 index bypass.
5936 Producer modifies register that is used as index in consumer with
5937 specified scale. */
5938 static struct
5939 {
5940 /* Producer instruction. */
5941 rtx pro;
5942
5943 /* Consumer instruction. */
5944 rtx con;
5945
5946 /* Scale of indexed memory access within consumer.
5947 Or zero if bypass should not be effective at the moment. */
5948 int scale;
5949 } sched_cfv4_bypass_data;
5950
5951 /* An empty state that is used in m68k_sched_adjust_cost. */
5952 static state_t sched_adjust_cost_state;
5953
5954 /* Implement adjust_cost scheduler hook.
5955 Return adjusted COST of dependency LINK between DEF_INSN and INSN. */
5956 static int
5957 m68k_sched_adjust_cost (rtx_insn *insn, int, rtx_insn *def_insn, int cost,
5958 unsigned int)
5959 {
5960 int delay;
5961
5962 if (recog_memoized (def_insn) < 0
5963 || recog_memoized (insn) < 0)
5964 return cost;
5965
5966 if (sched_cfv4_bypass_data.scale == 1)
5967 /* Handle ColdFire V4 bypass for indexed address with 1x scale. */
5968 {
5969 /* haifa-sched.c: insn_cost () calls bypass_p () just before
5970 targetm.sched.adjust_cost (). Hence, we can be relatively sure
5971 that the data in sched_cfv4_bypass_data is up to date. */
5972 gcc_assert (sched_cfv4_bypass_data.pro == def_insn
5973 && sched_cfv4_bypass_data.con == insn);
5974
5975 if (cost < 3)
5976 cost = 3;
5977
5978 sched_cfv4_bypass_data.pro = NULL;
5979 sched_cfv4_bypass_data.con = NULL;
5980 sched_cfv4_bypass_data.scale = 0;
5981 }
5982 else
5983 gcc_assert (sched_cfv4_bypass_data.pro == NULL
5984 && sched_cfv4_bypass_data.con == NULL
5985 && sched_cfv4_bypass_data.scale == 0);
5986
5987 /* Don't try to issue INSN earlier than DFA permits.
5988 This is especially useful for instructions that write to memory,
5989 as their true dependence (default) latency is better to be set to 0
5990 to workaround alias analysis limitations.
5991 This is, in fact, a machine independent tweak, so, probably,
5992 it should be moved to haifa-sched.c: insn_cost (). */
5993 delay = min_insn_conflict_delay (sched_adjust_cost_state, def_insn, insn);
5994 if (delay > cost)
5995 cost = delay;
5996
5997 return cost;
5998 }
5999
6000 /* Return maximal number of insns that can be scheduled on a single cycle. */
6001 static int
6002 m68k_sched_issue_rate (void)
6003 {
6004 switch (m68k_sched_cpu)
6005 {
6006 case CPU_CFV1:
6007 case CPU_CFV2:
6008 case CPU_CFV3:
6009 return 1;
6010
6011 case CPU_CFV4:
6012 return 2;
6013
6014 default:
6015 gcc_unreachable ();
6016 return 0;
6017 }
6018 }
6019
6020 /* Maximal length of instruction for current CPU.
6021 E.g. it is 3 for any ColdFire core. */
6022 static int max_insn_size;
6023
6024 /* Data to model instruction buffer of CPU. */
6025 struct _sched_ib
6026 {
6027 /* True if instruction buffer model is modeled for current CPU. */
6028 bool enabled_p;
6029
6030 /* Size of the instruction buffer in words. */
6031 int size;
6032
6033 /* Number of filled words in the instruction buffer. */
6034 int filled;
6035
6036 /* Additional information about instruction buffer for CPUs that have
6037 a buffer of instruction records, rather then a plain buffer
6038 of instruction words. */
6039 struct _sched_ib_records
6040 {
6041 /* Size of buffer in records. */
6042 int n_insns;
6043
6044 /* Array to hold data on adjustments made to the size of the buffer. */
6045 int *adjust;
6046
6047 /* Index of the above array. */
6048 int adjust_index;
6049 } records;
6050
6051 /* An insn that reserves (marks empty) one word in the instruction buffer. */
6052 rtx insn;
6053 };
6054
6055 static struct _sched_ib sched_ib;
6056
6057 /* ID of memory unit. */
6058 static int sched_mem_unit_code;
6059
6060 /* Implementation of the targetm.sched.variable_issue () hook.
6061 It is called after INSN was issued. It returns the number of insns
6062 that can possibly get scheduled on the current cycle.
6063 It is used here to determine the effect of INSN on the instruction
6064 buffer. */
6065 static int
6066 m68k_sched_variable_issue (FILE *sched_dump ATTRIBUTE_UNUSED,
6067 int sched_verbose ATTRIBUTE_UNUSED,
6068 rtx_insn *insn, int can_issue_more)
6069 {
6070 int insn_size;
6071
6072 if (recog_memoized (insn) >= 0 && get_attr_type (insn) != TYPE_IGNORE)
6073 {
6074 switch (m68k_sched_cpu)
6075 {
6076 case CPU_CFV1:
6077 case CPU_CFV2:
6078 insn_size = sched_get_attr_size_int (insn);
6079 break;
6080
6081 case CPU_CFV3:
6082 insn_size = sched_get_attr_size_int (insn);
6083
6084 /* ColdFire V3 and V4 cores have instruction buffers that can
6085 accumulate up to 8 instructions regardless of instructions'
6086 sizes. So we should take care not to "prefetch" 24 one-word
6087 or 12 two-words instructions.
6088 To model this behavior we temporarily decrease size of the
6089 buffer by (max_insn_size - insn_size) for next 7 instructions. */
6090 {
6091 int adjust;
6092
6093 adjust = max_insn_size - insn_size;
6094 sched_ib.size -= adjust;
6095
6096 if (sched_ib.filled > sched_ib.size)
6097 sched_ib.filled = sched_ib.size;
6098
6099 sched_ib.records.adjust[sched_ib.records.adjust_index] = adjust;
6100 }
6101
6102 ++sched_ib.records.adjust_index;
6103 if (sched_ib.records.adjust_index == sched_ib.records.n_insns)
6104 sched_ib.records.adjust_index = 0;
6105
6106 /* Undo adjustment we did 7 instructions ago. */
6107 sched_ib.size
6108 += sched_ib.records.adjust[sched_ib.records.adjust_index];
6109
6110 break;
6111
6112 case CPU_CFV4:
6113 gcc_assert (!sched_ib.enabled_p);
6114 insn_size = 0;
6115 break;
6116
6117 default:
6118 gcc_unreachable ();
6119 }
6120
6121 if (insn_size > sched_ib.filled)
6122 /* Scheduling for register pressure does not always take DFA into
6123 account. Workaround instruction buffer not being filled enough. */
6124 {
6125 gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED);
6126 insn_size = sched_ib.filled;
6127 }
6128
6129 --can_issue_more;
6130 }
6131 else if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6132 || asm_noperands (PATTERN (insn)) >= 0)
6133 insn_size = sched_ib.filled;
6134 else
6135 insn_size = 0;
6136
6137 sched_ib.filled -= insn_size;
6138
6139 return can_issue_more;
6140 }
6141
6142 /* Return how many instructions should scheduler lookahead to choose the
6143 best one. */
6144 static int
6145 m68k_sched_first_cycle_multipass_dfa_lookahead (void)
6146 {
6147 return m68k_sched_issue_rate () - 1;
6148 }
6149
6150 /* Implementation of targetm.sched.init_global () hook.
6151 It is invoked once per scheduling pass and is used here
6152 to initialize scheduler constants. */
6153 static void
6154 m68k_sched_md_init_global (FILE *sched_dump ATTRIBUTE_UNUSED,
6155 int sched_verbose ATTRIBUTE_UNUSED,
6156 int n_insns ATTRIBUTE_UNUSED)
6157 {
6158 /* Check that all instructions have DFA reservations and
6159 that all instructions can be issued from a clean state. */
6160 if (flag_checking)
6161 {
6162 rtx_insn *insn;
6163 state_t state;
6164
6165 state = alloca (state_size ());
6166
6167 for (insn = get_insns (); insn != NULL; insn = NEXT_INSN (insn))
6168 {
6169 if (INSN_P (insn) && recog_memoized (insn) >= 0)
6170 {
6171 gcc_assert (insn_has_dfa_reservation_p (insn));
6172
6173 state_reset (state);
6174 if (state_transition (state, insn) >= 0)
6175 gcc_unreachable ();
6176 }
6177 }
6178 }
6179
6180 /* Setup target cpu. */
6181
6182 /* ColdFire V4 has a set of features to keep its instruction buffer full
6183 (e.g., a separate memory bus for instructions) and, hence, we do not model
6184 buffer for this CPU. */
6185 sched_ib.enabled_p = (m68k_sched_cpu != CPU_CFV4);
6186
6187 switch (m68k_sched_cpu)
6188 {
6189 case CPU_CFV4:
6190 sched_ib.filled = 0;
6191
6192 /* FALLTHRU */
6193
6194 case CPU_CFV1:
6195 case CPU_CFV2:
6196 max_insn_size = 3;
6197 sched_ib.records.n_insns = 0;
6198 sched_ib.records.adjust = NULL;
6199 break;
6200
6201 case CPU_CFV3:
6202 max_insn_size = 3;
6203 sched_ib.records.n_insns = 8;
6204 sched_ib.records.adjust = XNEWVEC (int, sched_ib.records.n_insns);
6205 break;
6206
6207 default:
6208 gcc_unreachable ();
6209 }
6210
6211 sched_mem_unit_code = get_cpu_unit_code ("cf_mem1");
6212
6213 sched_adjust_cost_state = xmalloc (state_size ());
6214 state_reset (sched_adjust_cost_state);
6215
6216 start_sequence ();
6217 emit_insn (gen_ib ());
6218 sched_ib.insn = get_insns ();
6219 end_sequence ();
6220 }
6221
6222 /* Scheduling pass is now finished. Free/reset static variables. */
6223 static void
6224 m68k_sched_md_finish_global (FILE *dump ATTRIBUTE_UNUSED,
6225 int verbose ATTRIBUTE_UNUSED)
6226 {
6227 sched_ib.insn = NULL;
6228
6229 free (sched_adjust_cost_state);
6230 sched_adjust_cost_state = NULL;
6231
6232 sched_mem_unit_code = 0;
6233
6234 free (sched_ib.records.adjust);
6235 sched_ib.records.adjust = NULL;
6236 sched_ib.records.n_insns = 0;
6237 max_insn_size = 0;
6238 }
6239
6240 /* Implementation of targetm.sched.init () hook.
6241 It is invoked each time scheduler starts on the new block (basic block or
6242 extended basic block). */
6243 static void
6244 m68k_sched_md_init (FILE *sched_dump ATTRIBUTE_UNUSED,
6245 int sched_verbose ATTRIBUTE_UNUSED,
6246 int n_insns ATTRIBUTE_UNUSED)
6247 {
6248 switch (m68k_sched_cpu)
6249 {
6250 case CPU_CFV1:
6251 case CPU_CFV2:
6252 sched_ib.size = 6;
6253 break;
6254
6255 case CPU_CFV3:
6256 sched_ib.size = sched_ib.records.n_insns * max_insn_size;
6257
6258 memset (sched_ib.records.adjust, 0,
6259 sched_ib.records.n_insns * sizeof (*sched_ib.records.adjust));
6260 sched_ib.records.adjust_index = 0;
6261 break;
6262
6263 case CPU_CFV4:
6264 gcc_assert (!sched_ib.enabled_p);
6265 sched_ib.size = 0;
6266 break;
6267
6268 default:
6269 gcc_unreachable ();
6270 }
6271
6272 if (sched_ib.enabled_p)
6273 /* haifa-sched.c: schedule_block () calls advance_cycle () just before
6274 the first cycle. Workaround that. */
6275 sched_ib.filled = -2;
6276 }
6277
6278 /* Implementation of targetm.sched.dfa_pre_advance_cycle () hook.
6279 It is invoked just before current cycle finishes and is used here
6280 to track if instruction buffer got its two words this cycle. */
6281 static void
6282 m68k_sched_dfa_pre_advance_cycle (void)
6283 {
6284 if (!sched_ib.enabled_p)
6285 return;
6286
6287 if (!cpu_unit_reservation_p (curr_state, sched_mem_unit_code))
6288 {
6289 sched_ib.filled += 2;
6290
6291 if (sched_ib.filled > sched_ib.size)
6292 sched_ib.filled = sched_ib.size;
6293 }
6294 }
6295
6296 /* Implementation of targetm.sched.dfa_post_advance_cycle () hook.
6297 It is invoked just after new cycle begins and is used here
6298 to setup number of filled words in the instruction buffer so that
6299 instructions which won't have all their words prefetched would be
6300 stalled for a cycle. */
6301 static void
6302 m68k_sched_dfa_post_advance_cycle (void)
6303 {
6304 int i;
6305
6306 if (!sched_ib.enabled_p)
6307 return;
6308
6309 /* Setup number of prefetched instruction words in the instruction
6310 buffer. */
6311 i = max_insn_size - sched_ib.filled;
6312
6313 while (--i >= 0)
6314 {
6315 if (state_transition (curr_state, sched_ib.insn) >= 0)
6316 /* Pick up scheduler state. */
6317 ++sched_ib.filled;
6318 }
6319 }
6320
6321 /* Return X or Y (depending on OPX_P) operand of INSN,
6322 if it is an integer register, or NULL overwise. */
6323 static rtx
6324 sched_get_reg_operand (rtx_insn *insn, bool opx_p)
6325 {
6326 rtx op = NULL;
6327
6328 if (opx_p)
6329 {
6330 if (get_attr_opx_type (insn) == OPX_TYPE_RN)
6331 {
6332 op = sched_get_operand (insn, true);
6333 gcc_assert (op != NULL);
6334
6335 if (!reload_completed && !REG_P (op))
6336 return NULL;
6337 }
6338 }
6339 else
6340 {
6341 if (get_attr_opy_type (insn) == OPY_TYPE_RN)
6342 {
6343 op = sched_get_operand (insn, false);
6344 gcc_assert (op != NULL);
6345
6346 if (!reload_completed && !REG_P (op))
6347 return NULL;
6348 }
6349 }
6350
6351 return op;
6352 }
6353
6354 /* Return true, if X or Y (depending on OPX_P) operand of INSN
6355 is a MEM. */
6356 static bool
6357 sched_mem_operand_p (rtx_insn *insn, bool opx_p)
6358 {
6359 switch (sched_get_opxy_mem_type (insn, opx_p))
6360 {
6361 case OP_TYPE_MEM1:
6362 case OP_TYPE_MEM6:
6363 return true;
6364
6365 default:
6366 return false;
6367 }
6368 }
6369
6370 /* Return X or Y (depending on OPX_P) operand of INSN,
6371 if it is a MEM, or NULL overwise. */
6372 static rtx
6373 sched_get_mem_operand (rtx_insn *insn, bool must_read_p, bool must_write_p)
6374 {
6375 bool opx_p;
6376 bool opy_p;
6377
6378 opx_p = false;
6379 opy_p = false;
6380
6381 if (must_read_p)
6382 {
6383 opx_p = true;
6384 opy_p = true;
6385 }
6386
6387 if (must_write_p)
6388 {
6389 opx_p = true;
6390 opy_p = false;
6391 }
6392
6393 if (opy_p && sched_mem_operand_p (insn, false))
6394 return sched_get_operand (insn, false);
6395
6396 if (opx_p && sched_mem_operand_p (insn, true))
6397 return sched_get_operand (insn, true);
6398
6399 gcc_unreachable ();
6400 return NULL;
6401 }
6402
6403 /* Return non-zero if PRO modifies register used as part of
6404 address in CON. */
6405 int
6406 m68k_sched_address_bypass_p (rtx_insn *pro, rtx_insn *con)
6407 {
6408 rtx pro_x;
6409 rtx con_mem_read;
6410
6411 pro_x = sched_get_reg_operand (pro, true);
6412 if (pro_x == NULL)
6413 return 0;
6414
6415 con_mem_read = sched_get_mem_operand (con, true, false);
6416 gcc_assert (con_mem_read != NULL);
6417
6418 if (reg_mentioned_p (pro_x, con_mem_read))
6419 return 1;
6420
6421 return 0;
6422 }
6423
6424 /* Helper function for m68k_sched_indexed_address_bypass_p.
6425 if PRO modifies register used as index in CON,
6426 return scale of indexed memory access in CON. Return zero overwise. */
6427 static int
6428 sched_get_indexed_address_scale (rtx_insn *pro, rtx_insn *con)
6429 {
6430 rtx reg;
6431 rtx mem;
6432 struct m68k_address address;
6433
6434 reg = sched_get_reg_operand (pro, true);
6435 if (reg == NULL)
6436 return 0;
6437
6438 mem = sched_get_mem_operand (con, true, false);
6439 gcc_assert (mem != NULL && MEM_P (mem));
6440
6441 if (!m68k_decompose_address (GET_MODE (mem), XEXP (mem, 0), reload_completed,
6442 &address))
6443 gcc_unreachable ();
6444
6445 if (REGNO (reg) == REGNO (address.index))
6446 {
6447 gcc_assert (address.scale != 0);
6448 return address.scale;
6449 }
6450
6451 return 0;
6452 }
6453
6454 /* Return non-zero if PRO modifies register used
6455 as index with scale 2 or 4 in CON. */
6456 int
6457 m68k_sched_indexed_address_bypass_p (rtx_insn *pro, rtx_insn *con)
6458 {
6459 gcc_assert (sched_cfv4_bypass_data.pro == NULL
6460 && sched_cfv4_bypass_data.con == NULL
6461 && sched_cfv4_bypass_data.scale == 0);
6462
6463 switch (sched_get_indexed_address_scale (pro, con))
6464 {
6465 case 1:
6466 /* We can't have a variable latency bypass, so
6467 remember to adjust the insn cost in adjust_cost hook. */
6468 sched_cfv4_bypass_data.pro = pro;
6469 sched_cfv4_bypass_data.con = con;
6470 sched_cfv4_bypass_data.scale = 1;
6471 return 0;
6472
6473 case 2:
6474 case 4:
6475 return 1;
6476
6477 default:
6478 return 0;
6479 }
6480 }
6481
6482 /* We generate a two-instructions program at M_TRAMP :
6483 movea.l &CHAIN_VALUE,%a0
6484 jmp FNADDR
6485 where %a0 can be modified by changing STATIC_CHAIN_REGNUM. */
6486
6487 static void
6488 m68k_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
6489 {
6490 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
6491 rtx mem;
6492
6493 gcc_assert (ADDRESS_REGNO_P (STATIC_CHAIN_REGNUM));
6494
6495 mem = adjust_address (m_tramp, HImode, 0);
6496 emit_move_insn (mem, GEN_INT(0x207C + ((STATIC_CHAIN_REGNUM-8) << 9)));
6497 mem = adjust_address (m_tramp, SImode, 2);
6498 emit_move_insn (mem, chain_value);
6499
6500 mem = adjust_address (m_tramp, HImode, 6);
6501 emit_move_insn (mem, GEN_INT(0x4EF9));
6502 mem = adjust_address (m_tramp, SImode, 8);
6503 emit_move_insn (mem, fnaddr);
6504
6505 FINALIZE_TRAMPOLINE (XEXP (m_tramp, 0));
6506 }
6507
6508 /* On the 68000, the RTS insn cannot pop anything.
6509 On the 68010, the RTD insn may be used to pop them if the number
6510 of args is fixed, but if the number is variable then the caller
6511 must pop them all. RTD can't be used for library calls now
6512 because the library is compiled with the Unix compiler.
6513 Use of RTD is a selectable option, since it is incompatible with
6514 standard Unix calling sequences. If the option is not selected,
6515 the caller must always pop the args. */
6516
6517 static int
6518 m68k_return_pops_args (tree fundecl, tree funtype, int size)
6519 {
6520 return ((TARGET_RTD
6521 && (!fundecl
6522 || TREE_CODE (fundecl) != IDENTIFIER_NODE)
6523 && (!stdarg_p (funtype)))
6524 ? size : 0);
6525 }
6526
6527 /* Make sure everything's fine if we *don't* have a given processor.
6528 This assumes that putting a register in fixed_regs will keep the
6529 compiler's mitts completely off it. We don't bother to zero it out
6530 of register classes. */
6531
6532 static void
6533 m68k_conditional_register_usage (void)
6534 {
6535 int i;
6536 HARD_REG_SET x;
6537 if (!TARGET_HARD_FLOAT)
6538 {
6539 COPY_HARD_REG_SET (x, reg_class_contents[(int)FP_REGS]);
6540 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
6541 if (TEST_HARD_REG_BIT (x, i))
6542 fixed_regs[i] = call_used_regs[i] = 1;
6543 }
6544 if (flag_pic)
6545 fixed_regs[PIC_REG] = call_used_regs[PIC_REG] = 1;
6546 }
6547
6548 static void
6549 m68k_init_sync_libfuncs (void)
6550 {
6551 init_sync_libfuncs (UNITS_PER_WORD);
6552 }
6553
6554 /* Implements EPILOGUE_USES. All registers are live on exit from an
6555 interrupt routine. */
6556 bool
6557 m68k_epilogue_uses (int regno ATTRIBUTE_UNUSED)
6558 {
6559 return (reload_completed
6560 && (m68k_get_function_kind (current_function_decl)
6561 == m68k_fk_interrupt_handler));
6562 }
6563
6564
6565 /* Implement TARGET_C_EXCESS_PRECISION.
6566
6567 Set the value of FLT_EVAL_METHOD in float.h. When using 68040 fp
6568 instructions, we get proper intermediate rounding, otherwise we
6569 get extended precision results. */
6570
6571 static enum flt_eval_method
6572 m68k_excess_precision (enum excess_precision_type type)
6573 {
6574 switch (type)
6575 {
6576 case EXCESS_PRECISION_TYPE_FAST:
6577 /* The fastest type to promote to will always be the native type,
6578 whether that occurs with implicit excess precision or
6579 otherwise. */
6580 return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT;
6581 case EXCESS_PRECISION_TYPE_STANDARD:
6582 case EXCESS_PRECISION_TYPE_IMPLICIT:
6583 /* Otherwise, the excess precision we want when we are
6584 in a standards compliant mode, and the implicit precision we
6585 provide can be identical. */
6586 if (TARGET_68040 || ! TARGET_68881)
6587 return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT;
6588
6589 return FLT_EVAL_METHOD_PROMOTE_TO_LONG_DOUBLE;
6590 default:
6591 gcc_unreachable ();
6592 }
6593 return FLT_EVAL_METHOD_UNPREDICTABLE;
6594 }
6595
6596 #include "gt-m68k.h"