]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/m68k/m68k.c
Wrap option names in gcc internal messages with %< and %>.
[thirdparty/gcc.git] / gcc / config / m68k / m68k.c
1 /* Subroutines for insn-output.c for Motorola 68000 family.
2 Copyright (C) 1987-2019 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #define IN_TARGET_CODE 1
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "cfghooks.h"
27 #include "tree.h"
28 #include "stringpool.h"
29 #include "attribs.h"
30 #include "rtl.h"
31 #include "df.h"
32 #include "alias.h"
33 #include "fold-const.h"
34 #include "calls.h"
35 #include "stor-layout.h"
36 #include "varasm.h"
37 #include "regs.h"
38 #include "insn-config.h"
39 #include "conditions.h"
40 #include "output.h"
41 #include "insn-attr.h"
42 #include "recog.h"
43 #include "diagnostic-core.h"
44 #include "flags.h"
45 #include "expmed.h"
46 #include "dojump.h"
47 #include "explow.h"
48 #include "memmodel.h"
49 #include "emit-rtl.h"
50 #include "stmt.h"
51 #include "expr.h"
52 #include "reload.h"
53 #include "tm_p.h"
54 #include "target.h"
55 #include "debug.h"
56 #include "cfgrtl.h"
57 #include "cfganal.h"
58 #include "lcm.h"
59 #include "cfgbuild.h"
60 #include "cfgcleanup.h"
61 /* ??? Need to add a dependency between m68k.o and sched-int.h. */
62 #include "sched-int.h"
63 #include "insn-codes.h"
64 #include "opts.h"
65 #include "optabs.h"
66 #include "builtins.h"
67 #include "rtl-iter.h"
68 #include "toplev.h"
69
70 /* This file should be included last. */
71 #include "target-def.h"
72
73 enum reg_class regno_reg_class[] =
74 {
75 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
76 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
77 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
78 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
79 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
80 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
81 ADDR_REGS
82 };
83
84
85 /* The minimum number of integer registers that we want to save with the
86 movem instruction. Using two movel instructions instead of a single
87 moveml is about 15% faster for the 68020 and 68030 at no expense in
88 code size. */
89 #define MIN_MOVEM_REGS 3
90
91 /* The minimum number of floating point registers that we want to save
92 with the fmovem instruction. */
93 #define MIN_FMOVEM_REGS 1
94
95 /* Structure describing stack frame layout. */
96 struct m68k_frame
97 {
98 /* Stack pointer to frame pointer offset. */
99 HOST_WIDE_INT offset;
100
101 /* Offset of FPU registers. */
102 HOST_WIDE_INT foffset;
103
104 /* Frame size in bytes (rounded up). */
105 HOST_WIDE_INT size;
106
107 /* Data and address register. */
108 int reg_no;
109 unsigned int reg_mask;
110
111 /* FPU registers. */
112 int fpu_no;
113 unsigned int fpu_mask;
114
115 /* Offsets relative to ARG_POINTER. */
116 HOST_WIDE_INT frame_pointer_offset;
117 HOST_WIDE_INT stack_pointer_offset;
118
119 /* Function which the above information refers to. */
120 int funcdef_no;
121 };
122
123 /* Current frame information calculated by m68k_compute_frame_layout(). */
124 static struct m68k_frame current_frame;
125
126 /* Structure describing an m68k address.
127
128 If CODE is UNKNOWN, the address is BASE + INDEX * SCALE + OFFSET,
129 with null fields evaluating to 0. Here:
130
131 - BASE satisfies m68k_legitimate_base_reg_p
132 - INDEX satisfies m68k_legitimate_index_reg_p
133 - OFFSET satisfies m68k_legitimate_constant_address_p
134
135 INDEX is either HImode or SImode. The other fields are SImode.
136
137 If CODE is PRE_DEC, the address is -(BASE). If CODE is POST_INC,
138 the address is (BASE)+. */
139 struct m68k_address {
140 enum rtx_code code;
141 rtx base;
142 rtx index;
143 rtx offset;
144 int scale;
145 };
146
147 static int m68k_sched_adjust_cost (rtx_insn *, int, rtx_insn *, int,
148 unsigned int);
149 static int m68k_sched_issue_rate (void);
150 static int m68k_sched_variable_issue (FILE *, int, rtx_insn *, int);
151 static void m68k_sched_md_init_global (FILE *, int, int);
152 static void m68k_sched_md_finish_global (FILE *, int);
153 static void m68k_sched_md_init (FILE *, int, int);
154 static void m68k_sched_dfa_pre_advance_cycle (void);
155 static void m68k_sched_dfa_post_advance_cycle (void);
156 static int m68k_sched_first_cycle_multipass_dfa_lookahead (void);
157
158 static bool m68k_can_eliminate (const int, const int);
159 static void m68k_conditional_register_usage (void);
160 static bool m68k_legitimate_address_p (machine_mode, rtx, bool);
161 static void m68k_option_override (void);
162 static void m68k_override_options_after_change (void);
163 static rtx find_addr_reg (rtx);
164 static const char *singlemove_string (rtx *);
165 static void m68k_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
166 HOST_WIDE_INT, tree);
167 static rtx m68k_struct_value_rtx (tree, int);
168 static tree m68k_handle_fndecl_attribute (tree *node, tree name,
169 tree args, int flags,
170 bool *no_add_attrs);
171 static void m68k_compute_frame_layout (void);
172 static bool m68k_save_reg (unsigned int regno, bool interrupt_handler);
173 static bool m68k_ok_for_sibcall_p (tree, tree);
174 static bool m68k_tls_symbol_p (rtx);
175 static rtx m68k_legitimize_address (rtx, rtx, machine_mode);
176 static bool m68k_rtx_costs (rtx, machine_mode, int, int, int *, bool);
177 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
178 static bool m68k_return_in_memory (const_tree, const_tree);
179 #endif
180 static void m68k_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
181 static void m68k_trampoline_init (rtx, tree, rtx);
182 static poly_int64 m68k_return_pops_args (tree, tree, poly_int64);
183 static rtx m68k_delegitimize_address (rtx);
184 static void m68k_function_arg_advance (cumulative_args_t, machine_mode,
185 const_tree, bool);
186 static rtx m68k_function_arg (cumulative_args_t, machine_mode,
187 const_tree, bool);
188 static bool m68k_cannot_force_const_mem (machine_mode mode, rtx x);
189 static bool m68k_output_addr_const_extra (FILE *, rtx);
190 static void m68k_init_sync_libfuncs (void) ATTRIBUTE_UNUSED;
191 static enum flt_eval_method
192 m68k_excess_precision (enum excess_precision_type);
193 static unsigned int m68k_hard_regno_nregs (unsigned int, machine_mode);
194 static bool m68k_hard_regno_mode_ok (unsigned int, machine_mode);
195 static bool m68k_modes_tieable_p (machine_mode, machine_mode);
196 static machine_mode m68k_promote_function_mode (const_tree, machine_mode,
197 int *, const_tree, int);
198 \f
199 /* Initialize the GCC target structure. */
200
201 #if INT_OP_GROUP == INT_OP_DOT_WORD
202 #undef TARGET_ASM_ALIGNED_HI_OP
203 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
204 #endif
205
206 #if INT_OP_GROUP == INT_OP_NO_DOT
207 #undef TARGET_ASM_BYTE_OP
208 #define TARGET_ASM_BYTE_OP "\tbyte\t"
209 #undef TARGET_ASM_ALIGNED_HI_OP
210 #define TARGET_ASM_ALIGNED_HI_OP "\tshort\t"
211 #undef TARGET_ASM_ALIGNED_SI_OP
212 #define TARGET_ASM_ALIGNED_SI_OP "\tlong\t"
213 #endif
214
215 #if INT_OP_GROUP == INT_OP_DC
216 #undef TARGET_ASM_BYTE_OP
217 #define TARGET_ASM_BYTE_OP "\tdc.b\t"
218 #undef TARGET_ASM_ALIGNED_HI_OP
219 #define TARGET_ASM_ALIGNED_HI_OP "\tdc.w\t"
220 #undef TARGET_ASM_ALIGNED_SI_OP
221 #define TARGET_ASM_ALIGNED_SI_OP "\tdc.l\t"
222 #endif
223
224 #undef TARGET_ASM_UNALIGNED_HI_OP
225 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
226 #undef TARGET_ASM_UNALIGNED_SI_OP
227 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
228
229 #undef TARGET_ASM_OUTPUT_MI_THUNK
230 #define TARGET_ASM_OUTPUT_MI_THUNK m68k_output_mi_thunk
231 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
232 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
233
234 #undef TARGET_ASM_FILE_START_APP_OFF
235 #define TARGET_ASM_FILE_START_APP_OFF true
236
237 #undef TARGET_LEGITIMIZE_ADDRESS
238 #define TARGET_LEGITIMIZE_ADDRESS m68k_legitimize_address
239
240 #undef TARGET_SCHED_ADJUST_COST
241 #define TARGET_SCHED_ADJUST_COST m68k_sched_adjust_cost
242
243 #undef TARGET_SCHED_ISSUE_RATE
244 #define TARGET_SCHED_ISSUE_RATE m68k_sched_issue_rate
245
246 #undef TARGET_SCHED_VARIABLE_ISSUE
247 #define TARGET_SCHED_VARIABLE_ISSUE m68k_sched_variable_issue
248
249 #undef TARGET_SCHED_INIT_GLOBAL
250 #define TARGET_SCHED_INIT_GLOBAL m68k_sched_md_init_global
251
252 #undef TARGET_SCHED_FINISH_GLOBAL
253 #define TARGET_SCHED_FINISH_GLOBAL m68k_sched_md_finish_global
254
255 #undef TARGET_SCHED_INIT
256 #define TARGET_SCHED_INIT m68k_sched_md_init
257
258 #undef TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE
259 #define TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE m68k_sched_dfa_pre_advance_cycle
260
261 #undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
262 #define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE m68k_sched_dfa_post_advance_cycle
263
264 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
265 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
266 m68k_sched_first_cycle_multipass_dfa_lookahead
267
268 #undef TARGET_OPTION_OVERRIDE
269 #define TARGET_OPTION_OVERRIDE m68k_option_override
270
271 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
272 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m68k_override_options_after_change
273
274 #undef TARGET_RTX_COSTS
275 #define TARGET_RTX_COSTS m68k_rtx_costs
276
277 #undef TARGET_ATTRIBUTE_TABLE
278 #define TARGET_ATTRIBUTE_TABLE m68k_attribute_table
279
280 #undef TARGET_PROMOTE_PROTOTYPES
281 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
282
283 #undef TARGET_STRUCT_VALUE_RTX
284 #define TARGET_STRUCT_VALUE_RTX m68k_struct_value_rtx
285
286 #undef TARGET_CANNOT_FORCE_CONST_MEM
287 #define TARGET_CANNOT_FORCE_CONST_MEM m68k_cannot_force_const_mem
288
289 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
290 #define TARGET_FUNCTION_OK_FOR_SIBCALL m68k_ok_for_sibcall_p
291
292 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
293 #undef TARGET_RETURN_IN_MEMORY
294 #define TARGET_RETURN_IN_MEMORY m68k_return_in_memory
295 #endif
296
297 #ifdef HAVE_AS_TLS
298 #undef TARGET_HAVE_TLS
299 #define TARGET_HAVE_TLS (true)
300
301 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
302 #define TARGET_ASM_OUTPUT_DWARF_DTPREL m68k_output_dwarf_dtprel
303 #endif
304
305 #undef TARGET_LRA_P
306 #define TARGET_LRA_P hook_bool_void_false
307
308 #undef TARGET_LEGITIMATE_ADDRESS_P
309 #define TARGET_LEGITIMATE_ADDRESS_P m68k_legitimate_address_p
310
311 #undef TARGET_CAN_ELIMINATE
312 #define TARGET_CAN_ELIMINATE m68k_can_eliminate
313
314 #undef TARGET_CONDITIONAL_REGISTER_USAGE
315 #define TARGET_CONDITIONAL_REGISTER_USAGE m68k_conditional_register_usage
316
317 #undef TARGET_TRAMPOLINE_INIT
318 #define TARGET_TRAMPOLINE_INIT m68k_trampoline_init
319
320 #undef TARGET_RETURN_POPS_ARGS
321 #define TARGET_RETURN_POPS_ARGS m68k_return_pops_args
322
323 #undef TARGET_DELEGITIMIZE_ADDRESS
324 #define TARGET_DELEGITIMIZE_ADDRESS m68k_delegitimize_address
325
326 #undef TARGET_FUNCTION_ARG
327 #define TARGET_FUNCTION_ARG m68k_function_arg
328
329 #undef TARGET_FUNCTION_ARG_ADVANCE
330 #define TARGET_FUNCTION_ARG_ADVANCE m68k_function_arg_advance
331
332 #undef TARGET_LEGITIMATE_CONSTANT_P
333 #define TARGET_LEGITIMATE_CONSTANT_P m68k_legitimate_constant_p
334
335 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
336 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA m68k_output_addr_const_extra
337
338 #undef TARGET_C_EXCESS_PRECISION
339 #define TARGET_C_EXCESS_PRECISION m68k_excess_precision
340
341 /* The value stored by TAS. */
342 #undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
343 #define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 128
344
345 #undef TARGET_HARD_REGNO_NREGS
346 #define TARGET_HARD_REGNO_NREGS m68k_hard_regno_nregs
347 #undef TARGET_HARD_REGNO_MODE_OK
348 #define TARGET_HARD_REGNO_MODE_OK m68k_hard_regno_mode_ok
349
350 #undef TARGET_MODES_TIEABLE_P
351 #define TARGET_MODES_TIEABLE_P m68k_modes_tieable_p
352
353 #undef TARGET_PROMOTE_FUNCTION_MODE
354 #define TARGET_PROMOTE_FUNCTION_MODE m68k_promote_function_mode
355
356 #undef TARGET_HAVE_SPECULATION_SAFE_VALUE
357 #define TARGET_HAVE_SPECULATION_SAFE_VALUE speculation_safe_value_not_needed
358
359 static const struct attribute_spec m68k_attribute_table[] =
360 {
361 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
362 affects_type_identity, handler, exclude } */
363 { "interrupt", 0, 0, true, false, false, false,
364 m68k_handle_fndecl_attribute, NULL },
365 { "interrupt_handler", 0, 0, true, false, false, false,
366 m68k_handle_fndecl_attribute, NULL },
367 { "interrupt_thread", 0, 0, true, false, false, false,
368 m68k_handle_fndecl_attribute, NULL },
369 { NULL, 0, 0, false, false, false, false, NULL, NULL }
370 };
371
372 struct gcc_target targetm = TARGET_INITIALIZER;
373 \f
374 /* Base flags for 68k ISAs. */
375 #define FL_FOR_isa_00 FL_ISA_68000
376 #define FL_FOR_isa_10 (FL_FOR_isa_00 | FL_ISA_68010)
377 /* FL_68881 controls the default setting of -m68881. gcc has traditionally
378 generated 68881 code for 68020 and 68030 targets unless explicitly told
379 not to. */
380 #define FL_FOR_isa_20 (FL_FOR_isa_10 | FL_ISA_68020 \
381 | FL_BITFIELD | FL_68881 | FL_CAS)
382 #define FL_FOR_isa_40 (FL_FOR_isa_20 | FL_ISA_68040)
383 #define FL_FOR_isa_cpu32 (FL_FOR_isa_10 | FL_ISA_68020)
384
385 /* Base flags for ColdFire ISAs. */
386 #define FL_FOR_isa_a (FL_COLDFIRE | FL_ISA_A)
387 #define FL_FOR_isa_aplus (FL_FOR_isa_a | FL_ISA_APLUS | FL_CF_USP)
388 /* Note ISA_B doesn't necessarily include USP (user stack pointer) support. */
389 #define FL_FOR_isa_b (FL_FOR_isa_a | FL_ISA_B | FL_CF_HWDIV)
390 /* ISA_C is not upwardly compatible with ISA_B. */
391 #define FL_FOR_isa_c (FL_FOR_isa_a | FL_ISA_C | FL_CF_USP)
392
393 enum m68k_isa
394 {
395 /* Traditional 68000 instruction sets. */
396 isa_00,
397 isa_10,
398 isa_20,
399 isa_40,
400 isa_cpu32,
401 /* ColdFire instruction set variants. */
402 isa_a,
403 isa_aplus,
404 isa_b,
405 isa_c,
406 isa_max
407 };
408
409 /* Information about one of the -march, -mcpu or -mtune arguments. */
410 struct m68k_target_selection
411 {
412 /* The argument being described. */
413 const char *name;
414
415 /* For -mcpu, this is the device selected by the option.
416 For -mtune and -march, it is a representative device
417 for the microarchitecture or ISA respectively. */
418 enum target_device device;
419
420 /* The M68K_DEVICE fields associated with DEVICE. See the comment
421 in m68k-devices.def for details. FAMILY is only valid for -mcpu. */
422 const char *family;
423 enum uarch_type microarch;
424 enum m68k_isa isa;
425 unsigned long flags;
426 };
427
428 /* A list of all devices in m68k-devices.def. Used for -mcpu selection. */
429 static const struct m68k_target_selection all_devices[] =
430 {
431 #define M68K_DEVICE(NAME,ENUM_VALUE,FAMILY,MULTILIB,MICROARCH,ISA,FLAGS) \
432 { NAME, ENUM_VALUE, FAMILY, u##MICROARCH, ISA, FLAGS | FL_FOR_##ISA },
433 #include "m68k-devices.def"
434 #undef M68K_DEVICE
435 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
436 };
437
438 /* A list of all ISAs, mapping each one to a representative device.
439 Used for -march selection. */
440 static const struct m68k_target_selection all_isas[] =
441 {
442 #define M68K_ISA(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
443 { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
444 #include "m68k-isas.def"
445 #undef M68K_ISA
446 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
447 };
448
449 /* A list of all microarchitectures, mapping each one to a representative
450 device. Used for -mtune selection. */
451 static const struct m68k_target_selection all_microarchs[] =
452 {
453 #define M68K_MICROARCH(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
454 { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
455 #include "m68k-microarchs.def"
456 #undef M68K_MICROARCH
457 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
458 };
459 \f
460 /* The entries associated with the -mcpu, -march and -mtune settings,
461 or null for options that have not been used. */
462 const struct m68k_target_selection *m68k_cpu_entry;
463 const struct m68k_target_selection *m68k_arch_entry;
464 const struct m68k_target_selection *m68k_tune_entry;
465
466 /* Which CPU we are generating code for. */
467 enum target_device m68k_cpu;
468
469 /* Which microarchitecture to tune for. */
470 enum uarch_type m68k_tune;
471
472 /* Which FPU to use. */
473 enum fpu_type m68k_fpu;
474
475 /* The set of FL_* flags that apply to the target processor. */
476 unsigned int m68k_cpu_flags;
477
478 /* The set of FL_* flags that apply to the processor to be tuned for. */
479 unsigned int m68k_tune_flags;
480
481 /* Asm templates for calling or jumping to an arbitrary symbolic address,
482 or NULL if such calls or jumps are not supported. The address is held
483 in operand 0. */
484 const char *m68k_symbolic_call;
485 const char *m68k_symbolic_jump;
486
487 /* Enum variable that corresponds to m68k_symbolic_call values. */
488 enum M68K_SYMBOLIC_CALL m68k_symbolic_call_var;
489
490 \f
491 /* Implement TARGET_OPTION_OVERRIDE. */
492
493 static void
494 m68k_option_override (void)
495 {
496 const struct m68k_target_selection *entry;
497 unsigned long target_mask;
498
499 if (global_options_set.x_m68k_arch_option)
500 m68k_arch_entry = &all_isas[m68k_arch_option];
501
502 if (global_options_set.x_m68k_cpu_option)
503 m68k_cpu_entry = &all_devices[(int) m68k_cpu_option];
504
505 if (global_options_set.x_m68k_tune_option)
506 m68k_tune_entry = &all_microarchs[(int) m68k_tune_option];
507
508 /* User can choose:
509
510 -mcpu=
511 -march=
512 -mtune=
513
514 -march=ARCH should generate code that runs any processor
515 implementing architecture ARCH. -mcpu=CPU should override -march
516 and should generate code that runs on processor CPU, making free
517 use of any instructions that CPU understands. -mtune=UARCH applies
518 on top of -mcpu or -march and optimizes the code for UARCH. It does
519 not change the target architecture. */
520 if (m68k_cpu_entry)
521 {
522 /* Complain if the -march setting is for a different microarchitecture,
523 or includes flags that the -mcpu setting doesn't. */
524 if (m68k_arch_entry
525 && (m68k_arch_entry->microarch != m68k_cpu_entry->microarch
526 || (m68k_arch_entry->flags & ~m68k_cpu_entry->flags) != 0))
527 warning (0, "%<-mcpu=%s%> conflicts with %<-march=%s%>",
528 m68k_cpu_entry->name, m68k_arch_entry->name);
529
530 entry = m68k_cpu_entry;
531 }
532 else
533 entry = m68k_arch_entry;
534
535 if (!entry)
536 entry = all_devices + TARGET_CPU_DEFAULT;
537
538 m68k_cpu_flags = entry->flags;
539
540 /* Use the architecture setting to derive default values for
541 certain flags. */
542 target_mask = 0;
543
544 /* ColdFire is lenient about alignment. */
545 if (!TARGET_COLDFIRE)
546 target_mask |= MASK_STRICT_ALIGNMENT;
547
548 if ((m68k_cpu_flags & FL_BITFIELD) != 0)
549 target_mask |= MASK_BITFIELD;
550 if ((m68k_cpu_flags & FL_CF_HWDIV) != 0)
551 target_mask |= MASK_CF_HWDIV;
552 if ((m68k_cpu_flags & (FL_68881 | FL_CF_FPU)) != 0)
553 target_mask |= MASK_HARD_FLOAT;
554 target_flags |= target_mask & ~target_flags_explicit;
555
556 /* Set the directly-usable versions of the -mcpu and -mtune settings. */
557 m68k_cpu = entry->device;
558 if (m68k_tune_entry)
559 {
560 m68k_tune = m68k_tune_entry->microarch;
561 m68k_tune_flags = m68k_tune_entry->flags;
562 }
563 #ifdef M68K_DEFAULT_TUNE
564 else if (!m68k_cpu_entry && !m68k_arch_entry)
565 {
566 enum target_device dev;
567 dev = all_microarchs[M68K_DEFAULT_TUNE].device;
568 m68k_tune_flags = all_devices[dev].flags;
569 }
570 #endif
571 else
572 {
573 m68k_tune = entry->microarch;
574 m68k_tune_flags = entry->flags;
575 }
576
577 /* Set the type of FPU. */
578 m68k_fpu = (!TARGET_HARD_FLOAT ? FPUTYPE_NONE
579 : (m68k_cpu_flags & FL_COLDFIRE) != 0 ? FPUTYPE_COLDFIRE
580 : FPUTYPE_68881);
581
582 /* Sanity check to ensure that msep-data and mid-sahred-library are not
583 * both specified together. Doing so simply doesn't make sense.
584 */
585 if (TARGET_SEP_DATA && TARGET_ID_SHARED_LIBRARY)
586 error ("cannot specify both %<-msep-data%> and %<-mid-shared-library%>");
587
588 /* If we're generating code for a separate A5 relative data segment,
589 * we've got to enable -fPIC as well. This might be relaxable to
590 * -fpic but it hasn't been tested properly.
591 */
592 if (TARGET_SEP_DATA || TARGET_ID_SHARED_LIBRARY)
593 flag_pic = 2;
594
595 /* -mpcrel -fPIC uses 32-bit pc-relative displacements. Raise an
596 error if the target does not support them. */
597 if (TARGET_PCREL && !TARGET_68020 && flag_pic == 2)
598 error ("%<-mpcrel%> %<-fPIC%> is not currently supported on selected cpu");
599
600 /* ??? A historic way of turning on pic, or is this intended to
601 be an embedded thing that doesn't have the same name binding
602 significance that it does on hosted ELF systems? */
603 if (TARGET_PCREL && flag_pic == 0)
604 flag_pic = 1;
605
606 if (!flag_pic)
607 {
608 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_JSR;
609
610 m68k_symbolic_jump = "jra %a0";
611 }
612 else if (TARGET_ID_SHARED_LIBRARY)
613 /* All addresses must be loaded from the GOT. */
614 ;
615 else if (TARGET_68020 || TARGET_ISAB || TARGET_ISAC)
616 {
617 if (TARGET_PCREL)
618 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_C;
619 else
620 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_P;
621
622 if (TARGET_ISAC)
623 /* No unconditional long branch */;
624 else if (TARGET_PCREL)
625 m68k_symbolic_jump = "bra%.l %c0";
626 else
627 m68k_symbolic_jump = "bra%.l %p0";
628 /* Turn off function cse if we are doing PIC. We always want
629 function call to be done as `bsr foo@PLTPC'. */
630 /* ??? It's traditional to do this for -mpcrel too, but it isn't
631 clear how intentional that is. */
632 flag_no_function_cse = 1;
633 }
634
635 switch (m68k_symbolic_call_var)
636 {
637 case M68K_SYMBOLIC_CALL_JSR:
638 m68k_symbolic_call = "jsr %a0";
639 break;
640
641 case M68K_SYMBOLIC_CALL_BSR_C:
642 m68k_symbolic_call = "bsr%.l %c0";
643 break;
644
645 case M68K_SYMBOLIC_CALL_BSR_P:
646 m68k_symbolic_call = "bsr%.l %p0";
647 break;
648
649 case M68K_SYMBOLIC_CALL_NONE:
650 gcc_assert (m68k_symbolic_call == NULL);
651 break;
652
653 default:
654 gcc_unreachable ();
655 }
656
657 #ifndef ASM_OUTPUT_ALIGN_WITH_NOP
658 parse_alignment_opts ();
659 int label_alignment = align_labels.levels[0].get_value ();
660 if (label_alignment > 2)
661 {
662 warning (0, "%<-falign-labels=%d%> is not supported", label_alignment);
663 str_align_labels = "1";
664 }
665
666 int loop_alignment = align_loops.levels[0].get_value ();
667 if (loop_alignment > 2)
668 {
669 warning (0, "%<-falign-loops=%d%> is not supported", loop_alignment);
670 str_align_loops = "1";
671 }
672 #endif
673
674 if ((opt_fstack_limit_symbol_arg != NULL || opt_fstack_limit_register_no >= 0)
675 && !TARGET_68020)
676 {
677 warning (0, "%<-fstack-limit-%> options are not supported on this cpu");
678 opt_fstack_limit_symbol_arg = NULL;
679 opt_fstack_limit_register_no = -1;
680 }
681
682 SUBTARGET_OVERRIDE_OPTIONS;
683
684 /* Setup scheduling options. */
685 if (TUNE_CFV1)
686 m68k_sched_cpu = CPU_CFV1;
687 else if (TUNE_CFV2)
688 m68k_sched_cpu = CPU_CFV2;
689 else if (TUNE_CFV3)
690 m68k_sched_cpu = CPU_CFV3;
691 else if (TUNE_CFV4)
692 m68k_sched_cpu = CPU_CFV4;
693 else
694 {
695 m68k_sched_cpu = CPU_UNKNOWN;
696 flag_schedule_insns = 0;
697 flag_schedule_insns_after_reload = 0;
698 flag_modulo_sched = 0;
699 flag_live_range_shrinkage = 0;
700 }
701
702 if (m68k_sched_cpu != CPU_UNKNOWN)
703 {
704 if ((m68k_cpu_flags & (FL_CF_EMAC | FL_CF_EMAC_B)) != 0)
705 m68k_sched_mac = MAC_CF_EMAC;
706 else if ((m68k_cpu_flags & FL_CF_MAC) != 0)
707 m68k_sched_mac = MAC_CF_MAC;
708 else
709 m68k_sched_mac = MAC_NO;
710 }
711 }
712
713 /* Implement TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE. */
714
715 static void
716 m68k_override_options_after_change (void)
717 {
718 if (m68k_sched_cpu == CPU_UNKNOWN)
719 {
720 flag_schedule_insns = 0;
721 flag_schedule_insns_after_reload = 0;
722 flag_modulo_sched = 0;
723 flag_live_range_shrinkage = 0;
724 }
725 }
726
727 /* Generate a macro of the form __mPREFIX_cpu_NAME, where PREFIX is the
728 given argument and NAME is the argument passed to -mcpu. Return NULL
729 if -mcpu was not passed. */
730
731 const char *
732 m68k_cpp_cpu_ident (const char *prefix)
733 {
734 if (!m68k_cpu_entry)
735 return NULL;
736 return concat ("__m", prefix, "_cpu_", m68k_cpu_entry->name, NULL);
737 }
738
739 /* Generate a macro of the form __mPREFIX_family_NAME, where PREFIX is the
740 given argument and NAME is the name of the representative device for
741 the -mcpu argument's family. Return NULL if -mcpu was not passed. */
742
743 const char *
744 m68k_cpp_cpu_family (const char *prefix)
745 {
746 if (!m68k_cpu_entry)
747 return NULL;
748 return concat ("__m", prefix, "_family_", m68k_cpu_entry->family, NULL);
749 }
750 \f
751 /* Return m68k_fk_interrupt_handler if FUNC has an "interrupt" or
752 "interrupt_handler" attribute and interrupt_thread if FUNC has an
753 "interrupt_thread" attribute. Otherwise, return
754 m68k_fk_normal_function. */
755
756 enum m68k_function_kind
757 m68k_get_function_kind (tree func)
758 {
759 tree a;
760
761 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
762
763 a = lookup_attribute ("interrupt", DECL_ATTRIBUTES (func));
764 if (a != NULL_TREE)
765 return m68k_fk_interrupt_handler;
766
767 a = lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (func));
768 if (a != NULL_TREE)
769 return m68k_fk_interrupt_handler;
770
771 a = lookup_attribute ("interrupt_thread", DECL_ATTRIBUTES (func));
772 if (a != NULL_TREE)
773 return m68k_fk_interrupt_thread;
774
775 return m68k_fk_normal_function;
776 }
777
778 /* Handle an attribute requiring a FUNCTION_DECL; arguments as in
779 struct attribute_spec.handler. */
780 static tree
781 m68k_handle_fndecl_attribute (tree *node, tree name,
782 tree args ATTRIBUTE_UNUSED,
783 int flags ATTRIBUTE_UNUSED,
784 bool *no_add_attrs)
785 {
786 if (TREE_CODE (*node) != FUNCTION_DECL)
787 {
788 warning (OPT_Wattributes, "%qE attribute only applies to functions",
789 name);
790 *no_add_attrs = true;
791 }
792
793 if (m68k_get_function_kind (*node) != m68k_fk_normal_function)
794 {
795 error ("multiple interrupt attributes not allowed");
796 *no_add_attrs = true;
797 }
798
799 if (!TARGET_FIDOA
800 && !strcmp (IDENTIFIER_POINTER (name), "interrupt_thread"))
801 {
802 error ("interrupt_thread is available only on fido");
803 *no_add_attrs = true;
804 }
805
806 return NULL_TREE;
807 }
808
809 static void
810 m68k_compute_frame_layout (void)
811 {
812 int regno, saved;
813 unsigned int mask;
814 enum m68k_function_kind func_kind =
815 m68k_get_function_kind (current_function_decl);
816 bool interrupt_handler = func_kind == m68k_fk_interrupt_handler;
817 bool interrupt_thread = func_kind == m68k_fk_interrupt_thread;
818
819 /* Only compute the frame once per function.
820 Don't cache information until reload has been completed. */
821 if (current_frame.funcdef_no == current_function_funcdef_no
822 && reload_completed)
823 return;
824
825 current_frame.size = (get_frame_size () + 3) & -4;
826
827 mask = saved = 0;
828
829 /* Interrupt thread does not need to save any register. */
830 if (!interrupt_thread)
831 for (regno = 0; regno < 16; regno++)
832 if (m68k_save_reg (regno, interrupt_handler))
833 {
834 mask |= 1 << (regno - D0_REG);
835 saved++;
836 }
837 current_frame.offset = saved * 4;
838 current_frame.reg_no = saved;
839 current_frame.reg_mask = mask;
840
841 current_frame.foffset = 0;
842 mask = saved = 0;
843 if (TARGET_HARD_FLOAT)
844 {
845 /* Interrupt thread does not need to save any register. */
846 if (!interrupt_thread)
847 for (regno = 16; regno < 24; regno++)
848 if (m68k_save_reg (regno, interrupt_handler))
849 {
850 mask |= 1 << (regno - FP0_REG);
851 saved++;
852 }
853 current_frame.foffset = saved * TARGET_FP_REG_SIZE;
854 current_frame.offset += current_frame.foffset;
855 }
856 current_frame.fpu_no = saved;
857 current_frame.fpu_mask = mask;
858
859 /* Remember what function this frame refers to. */
860 current_frame.funcdef_no = current_function_funcdef_no;
861 }
862
863 /* Worker function for TARGET_CAN_ELIMINATE. */
864
865 bool
866 m68k_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
867 {
868 return (to == STACK_POINTER_REGNUM ? ! frame_pointer_needed : true);
869 }
870
871 HOST_WIDE_INT
872 m68k_initial_elimination_offset (int from, int to)
873 {
874 int argptr_offset;
875 /* The arg pointer points 8 bytes before the start of the arguments,
876 as defined by FIRST_PARM_OFFSET. This makes it coincident with the
877 frame pointer in most frames. */
878 argptr_offset = frame_pointer_needed ? 0 : UNITS_PER_WORD;
879 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
880 return argptr_offset;
881
882 m68k_compute_frame_layout ();
883
884 gcc_assert (to == STACK_POINTER_REGNUM);
885 switch (from)
886 {
887 case ARG_POINTER_REGNUM:
888 return current_frame.offset + current_frame.size - argptr_offset;
889 case FRAME_POINTER_REGNUM:
890 return current_frame.offset + current_frame.size;
891 default:
892 gcc_unreachable ();
893 }
894 }
895
896 /* Refer to the array `regs_ever_live' to determine which registers
897 to save; `regs_ever_live[I]' is nonzero if register number I
898 is ever used in the function. This function is responsible for
899 knowing which registers should not be saved even if used.
900 Return true if we need to save REGNO. */
901
902 static bool
903 m68k_save_reg (unsigned int regno, bool interrupt_handler)
904 {
905 if (flag_pic && regno == PIC_REG)
906 {
907 if (crtl->saves_all_registers)
908 return true;
909 if (crtl->uses_pic_offset_table)
910 return true;
911 /* Reload may introduce constant pool references into a function
912 that thitherto didn't need a PIC register. Note that the test
913 above will not catch that case because we will only set
914 crtl->uses_pic_offset_table when emitting
915 the address reloads. */
916 if (crtl->uses_const_pool)
917 return true;
918 }
919
920 if (crtl->calls_eh_return)
921 {
922 unsigned int i;
923 for (i = 0; ; i++)
924 {
925 unsigned int test = EH_RETURN_DATA_REGNO (i);
926 if (test == INVALID_REGNUM)
927 break;
928 if (test == regno)
929 return true;
930 }
931 }
932
933 /* Fixed regs we never touch. */
934 if (fixed_regs[regno])
935 return false;
936
937 /* The frame pointer (if it is such) is handled specially. */
938 if (regno == FRAME_POINTER_REGNUM && frame_pointer_needed)
939 return false;
940
941 /* Interrupt handlers must also save call_used_regs
942 if they are live or when calling nested functions. */
943 if (interrupt_handler)
944 {
945 if (df_regs_ever_live_p (regno))
946 return true;
947
948 if (!crtl->is_leaf && call_used_regs[regno])
949 return true;
950 }
951
952 /* Never need to save registers that aren't touched. */
953 if (!df_regs_ever_live_p (regno))
954 return false;
955
956 /* Otherwise save everything that isn't call-clobbered. */
957 return !call_used_regs[regno];
958 }
959
960 /* Emit RTL for a MOVEM or FMOVEM instruction. BASE + OFFSET represents
961 the lowest memory address. COUNT is the number of registers to be
962 moved, with register REGNO + I being moved if bit I of MASK is set.
963 STORE_P specifies the direction of the move and ADJUST_STACK_P says
964 whether or not this is pre-decrement (if STORE_P) or post-increment
965 (if !STORE_P) operation. */
966
967 static rtx_insn *
968 m68k_emit_movem (rtx base, HOST_WIDE_INT offset,
969 unsigned int count, unsigned int regno,
970 unsigned int mask, bool store_p, bool adjust_stack_p)
971 {
972 int i;
973 rtx body, addr, src, operands[2];
974 machine_mode mode;
975
976 body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (adjust_stack_p + count));
977 mode = reg_raw_mode[regno];
978 i = 0;
979
980 if (adjust_stack_p)
981 {
982 src = plus_constant (Pmode, base,
983 (count
984 * GET_MODE_SIZE (mode)
985 * (HOST_WIDE_INT) (store_p ? -1 : 1)));
986 XVECEXP (body, 0, i++) = gen_rtx_SET (base, src);
987 }
988
989 for (; mask != 0; mask >>= 1, regno++)
990 if (mask & 1)
991 {
992 addr = plus_constant (Pmode, base, offset);
993 operands[!store_p] = gen_frame_mem (mode, addr);
994 operands[store_p] = gen_rtx_REG (mode, regno);
995 XVECEXP (body, 0, i++)
996 = gen_rtx_SET (operands[0], operands[1]);
997 offset += GET_MODE_SIZE (mode);
998 }
999 gcc_assert (i == XVECLEN (body, 0));
1000
1001 return emit_insn (body);
1002 }
1003
1004 /* Make INSN a frame-related instruction. */
1005
1006 static void
1007 m68k_set_frame_related (rtx_insn *insn)
1008 {
1009 rtx body;
1010 int i;
1011
1012 RTX_FRAME_RELATED_P (insn) = 1;
1013 body = PATTERN (insn);
1014 if (GET_CODE (body) == PARALLEL)
1015 for (i = 0; i < XVECLEN (body, 0); i++)
1016 RTX_FRAME_RELATED_P (XVECEXP (body, 0, i)) = 1;
1017 }
1018
1019 /* Emit RTL for the "prologue" define_expand. */
1020
1021 void
1022 m68k_expand_prologue (void)
1023 {
1024 HOST_WIDE_INT fsize_with_regs;
1025 rtx limit, src, dest;
1026
1027 m68k_compute_frame_layout ();
1028
1029 if (flag_stack_usage_info)
1030 current_function_static_stack_size
1031 = current_frame.size + current_frame.offset;
1032
1033 /* If the stack limit is a symbol, we can check it here,
1034 before actually allocating the space. */
1035 if (crtl->limit_stack
1036 && GET_CODE (stack_limit_rtx) == SYMBOL_REF)
1037 {
1038 limit = plus_constant (Pmode, stack_limit_rtx, current_frame.size + 4);
1039 if (!m68k_legitimate_constant_p (Pmode, limit))
1040 {
1041 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), limit);
1042 limit = gen_rtx_REG (Pmode, D0_REG);
1043 }
1044 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode,
1045 stack_pointer_rtx, limit),
1046 stack_pointer_rtx, limit,
1047 const1_rtx));
1048 }
1049
1050 fsize_with_regs = current_frame.size;
1051 if (TARGET_COLDFIRE)
1052 {
1053 /* ColdFire's move multiple instructions do not allow pre-decrement
1054 addressing. Add the size of movem saves to the initial stack
1055 allocation instead. */
1056 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1057 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1058 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1059 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
1060 }
1061
1062 if (frame_pointer_needed)
1063 {
1064 if (fsize_with_regs == 0 && TUNE_68040)
1065 {
1066 /* On the 68040, two separate moves are faster than link.w 0. */
1067 dest = gen_frame_mem (Pmode,
1068 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1069 m68k_set_frame_related (emit_move_insn (dest, frame_pointer_rtx));
1070 m68k_set_frame_related (emit_move_insn (frame_pointer_rtx,
1071 stack_pointer_rtx));
1072 }
1073 else if (fsize_with_regs < 0x8000 || TARGET_68020)
1074 m68k_set_frame_related
1075 (emit_insn (gen_link (frame_pointer_rtx,
1076 GEN_INT (-4 - fsize_with_regs))));
1077 else
1078 {
1079 m68k_set_frame_related
1080 (emit_insn (gen_link (frame_pointer_rtx, GEN_INT (-4))));
1081 m68k_set_frame_related
1082 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1083 stack_pointer_rtx,
1084 GEN_INT (-fsize_with_regs))));
1085 }
1086
1087 /* If the frame pointer is needed, emit a special barrier that
1088 will prevent the scheduler from moving stores to the frame
1089 before the stack adjustment. */
1090 emit_insn (gen_stack_tie (stack_pointer_rtx, frame_pointer_rtx));
1091 }
1092 else if (fsize_with_regs != 0)
1093 m68k_set_frame_related
1094 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1095 stack_pointer_rtx,
1096 GEN_INT (-fsize_with_regs))));
1097
1098 if (current_frame.fpu_mask)
1099 {
1100 gcc_assert (current_frame.fpu_no >= MIN_FMOVEM_REGS);
1101 if (TARGET_68881)
1102 m68k_set_frame_related
1103 (m68k_emit_movem (stack_pointer_rtx,
1104 current_frame.fpu_no * -GET_MODE_SIZE (XFmode),
1105 current_frame.fpu_no, FP0_REG,
1106 current_frame.fpu_mask, true, true));
1107 else
1108 {
1109 int offset;
1110
1111 /* If we're using moveml to save the integer registers,
1112 the stack pointer will point to the bottom of the moveml
1113 save area. Find the stack offset of the first FP register. */
1114 if (current_frame.reg_no < MIN_MOVEM_REGS)
1115 offset = 0;
1116 else
1117 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1118 m68k_set_frame_related
1119 (m68k_emit_movem (stack_pointer_rtx, offset,
1120 current_frame.fpu_no, FP0_REG,
1121 current_frame.fpu_mask, true, false));
1122 }
1123 }
1124
1125 /* If the stack limit is not a symbol, check it here.
1126 This has the disadvantage that it may be too late... */
1127 if (crtl->limit_stack)
1128 {
1129 if (REG_P (stack_limit_rtx))
1130 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode, stack_pointer_rtx,
1131 stack_limit_rtx),
1132 stack_pointer_rtx, stack_limit_rtx,
1133 const1_rtx));
1134
1135 else if (GET_CODE (stack_limit_rtx) != SYMBOL_REF)
1136 warning (0, "stack limit expression is not supported");
1137 }
1138
1139 if (current_frame.reg_no < MIN_MOVEM_REGS)
1140 {
1141 /* Store each register separately in the same order moveml does. */
1142 int i;
1143
1144 for (i = 16; i-- > 0; )
1145 if (current_frame.reg_mask & (1 << i))
1146 {
1147 src = gen_rtx_REG (SImode, D0_REG + i);
1148 dest = gen_frame_mem (SImode,
1149 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1150 m68k_set_frame_related (emit_insn (gen_movsi (dest, src)));
1151 }
1152 }
1153 else
1154 {
1155 if (TARGET_COLDFIRE)
1156 /* The required register save space has already been allocated.
1157 The first register should be stored at (%sp). */
1158 m68k_set_frame_related
1159 (m68k_emit_movem (stack_pointer_rtx, 0,
1160 current_frame.reg_no, D0_REG,
1161 current_frame.reg_mask, true, false));
1162 else
1163 m68k_set_frame_related
1164 (m68k_emit_movem (stack_pointer_rtx,
1165 current_frame.reg_no * -GET_MODE_SIZE (SImode),
1166 current_frame.reg_no, D0_REG,
1167 current_frame.reg_mask, true, true));
1168 }
1169
1170 if (!TARGET_SEP_DATA
1171 && crtl->uses_pic_offset_table)
1172 emit_insn (gen_load_got (pic_offset_table_rtx));
1173 }
1174 \f
1175 /* Return true if a simple (return) instruction is sufficient for this
1176 instruction (i.e. if no epilogue is needed). */
1177
1178 bool
1179 m68k_use_return_insn (void)
1180 {
1181 if (!reload_completed || frame_pointer_needed || get_frame_size () != 0)
1182 return false;
1183
1184 m68k_compute_frame_layout ();
1185 return current_frame.offset == 0;
1186 }
1187
1188 /* Emit RTL for the "epilogue" or "sibcall_epilogue" define_expand;
1189 SIBCALL_P says which.
1190
1191 The function epilogue should not depend on the current stack pointer!
1192 It should use the frame pointer only, if there is a frame pointer.
1193 This is mandatory because of alloca; we also take advantage of it to
1194 omit stack adjustments before returning. */
1195
1196 void
1197 m68k_expand_epilogue (bool sibcall_p)
1198 {
1199 HOST_WIDE_INT fsize, fsize_with_regs;
1200 bool big, restore_from_sp;
1201
1202 m68k_compute_frame_layout ();
1203
1204 fsize = current_frame.size;
1205 big = false;
1206 restore_from_sp = false;
1207
1208 /* FIXME : crtl->is_leaf below is too strong.
1209 What we really need to know there is if there could be pending
1210 stack adjustment needed at that point. */
1211 restore_from_sp = (!frame_pointer_needed
1212 || (!cfun->calls_alloca && crtl->is_leaf));
1213
1214 /* fsize_with_regs is the size we need to adjust the sp when
1215 popping the frame. */
1216 fsize_with_regs = fsize;
1217 if (TARGET_COLDFIRE && restore_from_sp)
1218 {
1219 /* ColdFire's move multiple instructions do not allow post-increment
1220 addressing. Add the size of movem loads to the final deallocation
1221 instead. */
1222 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1223 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1224 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1225 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
1226 }
1227
1228 if (current_frame.offset + fsize >= 0x8000
1229 && !restore_from_sp
1230 && (current_frame.reg_mask || current_frame.fpu_mask))
1231 {
1232 if (TARGET_COLDFIRE
1233 && (current_frame.reg_no >= MIN_MOVEM_REGS
1234 || current_frame.fpu_no >= MIN_FMOVEM_REGS))
1235 {
1236 /* ColdFire's move multiple instructions do not support the
1237 (d8,Ax,Xi) addressing mode, so we're as well using a normal
1238 stack-based restore. */
1239 emit_move_insn (gen_rtx_REG (Pmode, A1_REG),
1240 GEN_INT (-(current_frame.offset + fsize)));
1241 emit_insn (gen_blockage ());
1242 emit_insn (gen_addsi3 (stack_pointer_rtx,
1243 gen_rtx_REG (Pmode, A1_REG),
1244 frame_pointer_rtx));
1245 restore_from_sp = true;
1246 }
1247 else
1248 {
1249 emit_move_insn (gen_rtx_REG (Pmode, A1_REG), GEN_INT (-fsize));
1250 fsize = 0;
1251 big = true;
1252 }
1253 }
1254
1255 if (current_frame.reg_no < MIN_MOVEM_REGS)
1256 {
1257 /* Restore each register separately in the same order moveml does. */
1258 int i;
1259 HOST_WIDE_INT offset;
1260
1261 offset = current_frame.offset + fsize;
1262 for (i = 0; i < 16; i++)
1263 if (current_frame.reg_mask & (1 << i))
1264 {
1265 rtx addr;
1266
1267 if (big)
1268 {
1269 /* Generate the address -OFFSET(%fp,%a1.l). */
1270 addr = gen_rtx_REG (Pmode, A1_REG);
1271 addr = gen_rtx_PLUS (Pmode, addr, frame_pointer_rtx);
1272 addr = plus_constant (Pmode, addr, -offset);
1273 }
1274 else if (restore_from_sp)
1275 addr = gen_rtx_POST_INC (Pmode, stack_pointer_rtx);
1276 else
1277 addr = plus_constant (Pmode, frame_pointer_rtx, -offset);
1278 emit_move_insn (gen_rtx_REG (SImode, D0_REG + i),
1279 gen_frame_mem (SImode, addr));
1280 offset -= GET_MODE_SIZE (SImode);
1281 }
1282 }
1283 else if (current_frame.reg_mask)
1284 {
1285 if (big)
1286 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1287 gen_rtx_REG (Pmode, A1_REG),
1288 frame_pointer_rtx),
1289 -(current_frame.offset + fsize),
1290 current_frame.reg_no, D0_REG,
1291 current_frame.reg_mask, false, false);
1292 else if (restore_from_sp)
1293 m68k_emit_movem (stack_pointer_rtx, 0,
1294 current_frame.reg_no, D0_REG,
1295 current_frame.reg_mask, false,
1296 !TARGET_COLDFIRE);
1297 else
1298 m68k_emit_movem (frame_pointer_rtx,
1299 -(current_frame.offset + fsize),
1300 current_frame.reg_no, D0_REG,
1301 current_frame.reg_mask, false, false);
1302 }
1303
1304 if (current_frame.fpu_no > 0)
1305 {
1306 if (big)
1307 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1308 gen_rtx_REG (Pmode, A1_REG),
1309 frame_pointer_rtx),
1310 -(current_frame.foffset + fsize),
1311 current_frame.fpu_no, FP0_REG,
1312 current_frame.fpu_mask, false, false);
1313 else if (restore_from_sp)
1314 {
1315 if (TARGET_COLDFIRE)
1316 {
1317 int offset;
1318
1319 /* If we used moveml to restore the integer registers, the
1320 stack pointer will still point to the bottom of the moveml
1321 save area. Find the stack offset of the first FP
1322 register. */
1323 if (current_frame.reg_no < MIN_MOVEM_REGS)
1324 offset = 0;
1325 else
1326 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1327 m68k_emit_movem (stack_pointer_rtx, offset,
1328 current_frame.fpu_no, FP0_REG,
1329 current_frame.fpu_mask, false, false);
1330 }
1331 else
1332 m68k_emit_movem (stack_pointer_rtx, 0,
1333 current_frame.fpu_no, FP0_REG,
1334 current_frame.fpu_mask, false, true);
1335 }
1336 else
1337 m68k_emit_movem (frame_pointer_rtx,
1338 -(current_frame.foffset + fsize),
1339 current_frame.fpu_no, FP0_REG,
1340 current_frame.fpu_mask, false, false);
1341 }
1342
1343 emit_insn (gen_blockage ());
1344 if (frame_pointer_needed)
1345 emit_insn (gen_unlink (frame_pointer_rtx));
1346 else if (fsize_with_regs)
1347 emit_insn (gen_addsi3 (stack_pointer_rtx,
1348 stack_pointer_rtx,
1349 GEN_INT (fsize_with_regs)));
1350
1351 if (crtl->calls_eh_return)
1352 emit_insn (gen_addsi3 (stack_pointer_rtx,
1353 stack_pointer_rtx,
1354 EH_RETURN_STACKADJ_RTX));
1355
1356 if (!sibcall_p)
1357 emit_jump_insn (ret_rtx);
1358 }
1359 \f
1360 /* Return true if X is a valid comparison operator for the dbcc
1361 instruction.
1362
1363 Note it rejects floating point comparison operators.
1364 (In the future we could use Fdbcc).
1365
1366 It also rejects some comparisons when CC_NO_OVERFLOW is set. */
1367
1368 int
1369 valid_dbcc_comparison_p_2 (rtx x, machine_mode mode ATTRIBUTE_UNUSED)
1370 {
1371 switch (GET_CODE (x))
1372 {
1373 case EQ: case NE: case GTU: case LTU:
1374 case GEU: case LEU:
1375 return 1;
1376
1377 /* Reject some when CC_NO_OVERFLOW is set. This may be over
1378 conservative */
1379 case GT: case LT: case GE: case LE:
1380 return ! (cc_prev_status.flags & CC_NO_OVERFLOW);
1381 default:
1382 return 0;
1383 }
1384 }
1385
1386 /* Return nonzero if flags are currently in the 68881 flag register. */
1387 int
1388 flags_in_68881 (void)
1389 {
1390 /* We could add support for these in the future */
1391 return cc_status.flags & CC_IN_68881;
1392 }
1393
1394 /* Return true if PARALLEL contains register REGNO. */
1395 static bool
1396 m68k_reg_present_p (const_rtx parallel, unsigned int regno)
1397 {
1398 int i;
1399
1400 if (REG_P (parallel) && REGNO (parallel) == regno)
1401 return true;
1402
1403 if (GET_CODE (parallel) != PARALLEL)
1404 return false;
1405
1406 for (i = 0; i < XVECLEN (parallel, 0); ++i)
1407 {
1408 const_rtx x;
1409
1410 x = XEXP (XVECEXP (parallel, 0, i), 0);
1411 if (REG_P (x) && REGNO (x) == regno)
1412 return true;
1413 }
1414
1415 return false;
1416 }
1417
1418 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL_P. */
1419
1420 static bool
1421 m68k_ok_for_sibcall_p (tree decl, tree exp)
1422 {
1423 enum m68k_function_kind kind;
1424
1425 /* We cannot use sibcalls for nested functions because we use the
1426 static chain register for indirect calls. */
1427 if (CALL_EXPR_STATIC_CHAIN (exp))
1428 return false;
1429
1430 if (!VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
1431 {
1432 /* Check that the return value locations are the same. For
1433 example that we aren't returning a value from the sibling in
1434 a D0 register but then need to transfer it to a A0 register. */
1435 rtx cfun_value;
1436 rtx call_value;
1437
1438 cfun_value = FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (cfun->decl)),
1439 cfun->decl);
1440 call_value = FUNCTION_VALUE (TREE_TYPE (exp), decl);
1441
1442 /* Check that the values are equal or that the result the callee
1443 function returns is superset of what the current function returns. */
1444 if (!(rtx_equal_p (cfun_value, call_value)
1445 || (REG_P (cfun_value)
1446 && m68k_reg_present_p (call_value, REGNO (cfun_value)))))
1447 return false;
1448 }
1449
1450 kind = m68k_get_function_kind (current_function_decl);
1451 if (kind == m68k_fk_normal_function)
1452 /* We can always sibcall from a normal function, because it's
1453 undefined if it is calling an interrupt function. */
1454 return true;
1455
1456 /* Otherwise we can only sibcall if the function kind is known to be
1457 the same. */
1458 if (decl && m68k_get_function_kind (decl) == kind)
1459 return true;
1460
1461 return false;
1462 }
1463
1464 /* On the m68k all args are always pushed. */
1465
1466 static rtx
1467 m68k_function_arg (cumulative_args_t cum ATTRIBUTE_UNUSED,
1468 machine_mode mode ATTRIBUTE_UNUSED,
1469 const_tree type ATTRIBUTE_UNUSED,
1470 bool named ATTRIBUTE_UNUSED)
1471 {
1472 return NULL_RTX;
1473 }
1474
1475 static void
1476 m68k_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
1477 const_tree type, bool named ATTRIBUTE_UNUSED)
1478 {
1479 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1480
1481 *cum += (mode != BLKmode
1482 ? (GET_MODE_SIZE (mode) + 3) & ~3
1483 : (int_size_in_bytes (type) + 3) & ~3);
1484 }
1485
1486 /* Convert X to a legitimate function call memory reference and return the
1487 result. */
1488
1489 rtx
1490 m68k_legitimize_call_address (rtx x)
1491 {
1492 gcc_assert (MEM_P (x));
1493 if (call_operand (XEXP (x, 0), VOIDmode))
1494 return x;
1495 return replace_equiv_address (x, force_reg (Pmode, XEXP (x, 0)));
1496 }
1497
1498 /* Likewise for sibling calls. */
1499
1500 rtx
1501 m68k_legitimize_sibcall_address (rtx x)
1502 {
1503 gcc_assert (MEM_P (x));
1504 if (sibcall_operand (XEXP (x, 0), VOIDmode))
1505 return x;
1506
1507 emit_move_insn (gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM), XEXP (x, 0));
1508 return replace_equiv_address (x, gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM));
1509 }
1510
1511 /* Convert X to a legitimate address and return it if successful. Otherwise
1512 return X.
1513
1514 For the 68000, we handle X+REG by loading X into a register R and
1515 using R+REG. R will go in an address reg and indexing will be used.
1516 However, if REG is a broken-out memory address or multiplication,
1517 nothing needs to be done because REG can certainly go in an address reg. */
1518
1519 static rtx
1520 m68k_legitimize_address (rtx x, rtx oldx, machine_mode mode)
1521 {
1522 if (m68k_tls_symbol_p (x))
1523 return m68k_legitimize_tls_address (x);
1524
1525 if (GET_CODE (x) == PLUS)
1526 {
1527 int ch = (x) != (oldx);
1528 int copied = 0;
1529
1530 #define COPY_ONCE(Y) if (!copied) { Y = copy_rtx (Y); copied = ch = 1; }
1531
1532 if (GET_CODE (XEXP (x, 0)) == MULT)
1533 {
1534 COPY_ONCE (x);
1535 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
1536 }
1537 if (GET_CODE (XEXP (x, 1)) == MULT)
1538 {
1539 COPY_ONCE (x);
1540 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
1541 }
1542 if (ch)
1543 {
1544 if (GET_CODE (XEXP (x, 1)) == REG
1545 && GET_CODE (XEXP (x, 0)) == REG)
1546 {
1547 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
1548 {
1549 COPY_ONCE (x);
1550 x = force_operand (x, 0);
1551 }
1552 return x;
1553 }
1554 if (memory_address_p (mode, x))
1555 return x;
1556 }
1557 if (GET_CODE (XEXP (x, 0)) == REG
1558 || (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
1559 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1560 && GET_MODE (XEXP (XEXP (x, 0), 0)) == HImode))
1561 {
1562 rtx temp = gen_reg_rtx (Pmode);
1563 rtx val = force_operand (XEXP (x, 1), 0);
1564 emit_move_insn (temp, val);
1565 COPY_ONCE (x);
1566 XEXP (x, 1) = temp;
1567 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1568 && GET_CODE (XEXP (x, 0)) == REG)
1569 x = force_operand (x, 0);
1570 }
1571 else if (GET_CODE (XEXP (x, 1)) == REG
1572 || (GET_CODE (XEXP (x, 1)) == SIGN_EXTEND
1573 && GET_CODE (XEXP (XEXP (x, 1), 0)) == REG
1574 && GET_MODE (XEXP (XEXP (x, 1), 0)) == HImode))
1575 {
1576 rtx temp = gen_reg_rtx (Pmode);
1577 rtx val = force_operand (XEXP (x, 0), 0);
1578 emit_move_insn (temp, val);
1579 COPY_ONCE (x);
1580 XEXP (x, 0) = temp;
1581 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1582 && GET_CODE (XEXP (x, 1)) == REG)
1583 x = force_operand (x, 0);
1584 }
1585 }
1586
1587 return x;
1588 }
1589
1590
1591 /* Output a dbCC; jCC sequence. Note we do not handle the
1592 floating point version of this sequence (Fdbcc). We also
1593 do not handle alternative conditions when CC_NO_OVERFLOW is
1594 set. It is assumed that valid_dbcc_comparison_p and flags_in_68881 will
1595 kick those out before we get here. */
1596
1597 void
1598 output_dbcc_and_branch (rtx *operands)
1599 {
1600 switch (GET_CODE (operands[3]))
1601 {
1602 case EQ:
1603 output_asm_insn ("dbeq %0,%l1\n\tjeq %l2", operands);
1604 break;
1605
1606 case NE:
1607 output_asm_insn ("dbne %0,%l1\n\tjne %l2", operands);
1608 break;
1609
1610 case GT:
1611 output_asm_insn ("dbgt %0,%l1\n\tjgt %l2", operands);
1612 break;
1613
1614 case GTU:
1615 output_asm_insn ("dbhi %0,%l1\n\tjhi %l2", operands);
1616 break;
1617
1618 case LT:
1619 output_asm_insn ("dblt %0,%l1\n\tjlt %l2", operands);
1620 break;
1621
1622 case LTU:
1623 output_asm_insn ("dbcs %0,%l1\n\tjcs %l2", operands);
1624 break;
1625
1626 case GE:
1627 output_asm_insn ("dbge %0,%l1\n\tjge %l2", operands);
1628 break;
1629
1630 case GEU:
1631 output_asm_insn ("dbcc %0,%l1\n\tjcc %l2", operands);
1632 break;
1633
1634 case LE:
1635 output_asm_insn ("dble %0,%l1\n\tjle %l2", operands);
1636 break;
1637
1638 case LEU:
1639 output_asm_insn ("dbls %0,%l1\n\tjls %l2", operands);
1640 break;
1641
1642 default:
1643 gcc_unreachable ();
1644 }
1645
1646 /* If the decrement is to be done in SImode, then we have
1647 to compensate for the fact that dbcc decrements in HImode. */
1648 switch (GET_MODE (operands[0]))
1649 {
1650 case E_SImode:
1651 output_asm_insn ("clr%.w %0\n\tsubq%.l #1,%0\n\tjpl %l1", operands);
1652 break;
1653
1654 case E_HImode:
1655 break;
1656
1657 default:
1658 gcc_unreachable ();
1659 }
1660 }
1661
1662 const char *
1663 output_scc_di (rtx op, rtx operand1, rtx operand2, rtx dest)
1664 {
1665 rtx loperands[7];
1666 enum rtx_code op_code = GET_CODE (op);
1667
1668 /* This does not produce a useful cc. */
1669 CC_STATUS_INIT;
1670
1671 /* The m68k cmp.l instruction requires operand1 to be a reg as used
1672 below. Swap the operands and change the op if these requirements
1673 are not fulfilled. */
1674 if (GET_CODE (operand2) == REG && GET_CODE (operand1) != REG)
1675 {
1676 rtx tmp = operand1;
1677
1678 operand1 = operand2;
1679 operand2 = tmp;
1680 op_code = swap_condition (op_code);
1681 }
1682 loperands[0] = operand1;
1683 if (GET_CODE (operand1) == REG)
1684 loperands[1] = gen_rtx_REG (SImode, REGNO (operand1) + 1);
1685 else
1686 loperands[1] = adjust_address (operand1, SImode, 4);
1687 if (operand2 != const0_rtx)
1688 {
1689 loperands[2] = operand2;
1690 if (GET_CODE (operand2) == REG)
1691 loperands[3] = gen_rtx_REG (SImode, REGNO (operand2) + 1);
1692 else
1693 loperands[3] = adjust_address (operand2, SImode, 4);
1694 }
1695 loperands[4] = gen_label_rtx ();
1696 if (operand2 != const0_rtx)
1697 output_asm_insn ("cmp%.l %2,%0\n\tjne %l4\n\tcmp%.l %3,%1", loperands);
1698 else
1699 {
1700 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[0]))
1701 output_asm_insn ("tst%.l %0", loperands);
1702 else
1703 output_asm_insn ("cmp%.w #0,%0", loperands);
1704
1705 output_asm_insn ("jne %l4", loperands);
1706
1707 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[1]))
1708 output_asm_insn ("tst%.l %1", loperands);
1709 else
1710 output_asm_insn ("cmp%.w #0,%1", loperands);
1711 }
1712
1713 loperands[5] = dest;
1714
1715 switch (op_code)
1716 {
1717 case EQ:
1718 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1719 CODE_LABEL_NUMBER (loperands[4]));
1720 output_asm_insn ("seq %5", loperands);
1721 break;
1722
1723 case NE:
1724 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1725 CODE_LABEL_NUMBER (loperands[4]));
1726 output_asm_insn ("sne %5", loperands);
1727 break;
1728
1729 case GT:
1730 loperands[6] = gen_label_rtx ();
1731 output_asm_insn ("shi %5\n\tjra %l6", loperands);
1732 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1733 CODE_LABEL_NUMBER (loperands[4]));
1734 output_asm_insn ("sgt %5", loperands);
1735 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1736 CODE_LABEL_NUMBER (loperands[6]));
1737 break;
1738
1739 case GTU:
1740 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1741 CODE_LABEL_NUMBER (loperands[4]));
1742 output_asm_insn ("shi %5", loperands);
1743 break;
1744
1745 case LT:
1746 loperands[6] = gen_label_rtx ();
1747 output_asm_insn ("scs %5\n\tjra %l6", loperands);
1748 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1749 CODE_LABEL_NUMBER (loperands[4]));
1750 output_asm_insn ("slt %5", loperands);
1751 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1752 CODE_LABEL_NUMBER (loperands[6]));
1753 break;
1754
1755 case LTU:
1756 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1757 CODE_LABEL_NUMBER (loperands[4]));
1758 output_asm_insn ("scs %5", loperands);
1759 break;
1760
1761 case GE:
1762 loperands[6] = gen_label_rtx ();
1763 output_asm_insn ("scc %5\n\tjra %l6", loperands);
1764 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1765 CODE_LABEL_NUMBER (loperands[4]));
1766 output_asm_insn ("sge %5", loperands);
1767 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1768 CODE_LABEL_NUMBER (loperands[6]));
1769 break;
1770
1771 case GEU:
1772 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1773 CODE_LABEL_NUMBER (loperands[4]));
1774 output_asm_insn ("scc %5", loperands);
1775 break;
1776
1777 case LE:
1778 loperands[6] = gen_label_rtx ();
1779 output_asm_insn ("sls %5\n\tjra %l6", loperands);
1780 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1781 CODE_LABEL_NUMBER (loperands[4]));
1782 output_asm_insn ("sle %5", loperands);
1783 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1784 CODE_LABEL_NUMBER (loperands[6]));
1785 break;
1786
1787 case LEU:
1788 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1789 CODE_LABEL_NUMBER (loperands[4]));
1790 output_asm_insn ("sls %5", loperands);
1791 break;
1792
1793 default:
1794 gcc_unreachable ();
1795 }
1796 return "";
1797 }
1798
1799 const char *
1800 output_btst (rtx *operands, rtx countop, rtx dataop, rtx_insn *insn, int signpos)
1801 {
1802 operands[0] = countop;
1803 operands[1] = dataop;
1804
1805 if (GET_CODE (countop) == CONST_INT)
1806 {
1807 register int count = INTVAL (countop);
1808 /* If COUNT is bigger than size of storage unit in use,
1809 advance to the containing unit of same size. */
1810 if (count > signpos)
1811 {
1812 int offset = (count & ~signpos) / 8;
1813 count = count & signpos;
1814 operands[1] = dataop = adjust_address (dataop, QImode, offset);
1815 }
1816 if (count == signpos)
1817 cc_status.flags = CC_NOT_POSITIVE | CC_Z_IN_NOT_N;
1818 else
1819 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N;
1820
1821 /* These three statements used to use next_insns_test_no...
1822 but it appears that this should do the same job. */
1823 if (count == 31
1824 && next_insn_tests_no_inequality (insn))
1825 return "tst%.l %1";
1826 if (count == 15
1827 && next_insn_tests_no_inequality (insn))
1828 return "tst%.w %1";
1829 if (count == 7
1830 && next_insn_tests_no_inequality (insn))
1831 return "tst%.b %1";
1832 /* Try to use `movew to ccr' followed by the appropriate branch insn.
1833 On some m68k variants unfortunately that's slower than btst.
1834 On 68000 and higher, that should also work for all HImode operands. */
1835 if (TUNE_CPU32 || TARGET_COLDFIRE || optimize_size)
1836 {
1837 if (count == 3 && DATA_REG_P (operands[1])
1838 && next_insn_tests_no_inequality (insn))
1839 {
1840 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N | CC_NO_OVERFLOW;
1841 return "move%.w %1,%%ccr";
1842 }
1843 if (count == 2 && DATA_REG_P (operands[1])
1844 && next_insn_tests_no_inequality (insn))
1845 {
1846 cc_status.flags = CC_NOT_NEGATIVE | CC_INVERTED | CC_NO_OVERFLOW;
1847 return "move%.w %1,%%ccr";
1848 }
1849 /* count == 1 followed by bvc/bvs and
1850 count == 0 followed by bcc/bcs are also possible, but need
1851 m68k-specific CC_Z_IN_NOT_V and CC_Z_IN_NOT_C flags. */
1852 }
1853
1854 cc_status.flags = CC_NOT_NEGATIVE;
1855 }
1856 return "btst %0,%1";
1857 }
1858 \f
1859 /* Return true if X is a legitimate base register. STRICT_P says
1860 whether we need strict checking. */
1861
1862 bool
1863 m68k_legitimate_base_reg_p (rtx x, bool strict_p)
1864 {
1865 /* Allow SUBREG everywhere we allow REG. This results in better code. */
1866 if (!strict_p && GET_CODE (x) == SUBREG)
1867 x = SUBREG_REG (x);
1868
1869 return (REG_P (x)
1870 && (strict_p
1871 ? REGNO_OK_FOR_BASE_P (REGNO (x))
1872 : REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x))));
1873 }
1874
1875 /* Return true if X is a legitimate index register. STRICT_P says
1876 whether we need strict checking. */
1877
1878 bool
1879 m68k_legitimate_index_reg_p (rtx x, bool strict_p)
1880 {
1881 if (!strict_p && GET_CODE (x) == SUBREG)
1882 x = SUBREG_REG (x);
1883
1884 return (REG_P (x)
1885 && (strict_p
1886 ? REGNO_OK_FOR_INDEX_P (REGNO (x))
1887 : REGNO_OK_FOR_INDEX_NONSTRICT_P (REGNO (x))));
1888 }
1889
1890 /* Return true if X is a legitimate index expression for a (d8,An,Xn) or
1891 (bd,An,Xn) addressing mode. Fill in the INDEX and SCALE fields of
1892 ADDRESS if so. STRICT_P says whether we need strict checking. */
1893
1894 static bool
1895 m68k_decompose_index (rtx x, bool strict_p, struct m68k_address *address)
1896 {
1897 int scale;
1898
1899 /* Check for a scale factor. */
1900 scale = 1;
1901 if ((TARGET_68020 || TARGET_COLDFIRE)
1902 && GET_CODE (x) == MULT
1903 && GET_CODE (XEXP (x, 1)) == CONST_INT
1904 && (INTVAL (XEXP (x, 1)) == 2
1905 || INTVAL (XEXP (x, 1)) == 4
1906 || (INTVAL (XEXP (x, 1)) == 8
1907 && (TARGET_COLDFIRE_FPU || !TARGET_COLDFIRE))))
1908 {
1909 scale = INTVAL (XEXP (x, 1));
1910 x = XEXP (x, 0);
1911 }
1912
1913 /* Check for a word extension. */
1914 if (!TARGET_COLDFIRE
1915 && GET_CODE (x) == SIGN_EXTEND
1916 && GET_MODE (XEXP (x, 0)) == HImode)
1917 x = XEXP (x, 0);
1918
1919 if (m68k_legitimate_index_reg_p (x, strict_p))
1920 {
1921 address->scale = scale;
1922 address->index = x;
1923 return true;
1924 }
1925
1926 return false;
1927 }
1928
1929 /* Return true if X is an illegitimate symbolic constant. */
1930
1931 bool
1932 m68k_illegitimate_symbolic_constant_p (rtx x)
1933 {
1934 rtx base, offset;
1935
1936 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P)
1937 {
1938 split_const (x, &base, &offset);
1939 if (GET_CODE (base) == SYMBOL_REF
1940 && !offset_within_block_p (base, INTVAL (offset)))
1941 return true;
1942 }
1943 return m68k_tls_reference_p (x, false);
1944 }
1945
1946 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1947
1948 static bool
1949 m68k_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1950 {
1951 return m68k_illegitimate_symbolic_constant_p (x);
1952 }
1953
1954 /* Return true if X is a legitimate constant address that can reach
1955 bytes in the range [X, X + REACH). STRICT_P says whether we need
1956 strict checking. */
1957
1958 static bool
1959 m68k_legitimate_constant_address_p (rtx x, unsigned int reach, bool strict_p)
1960 {
1961 rtx base, offset;
1962
1963 if (!CONSTANT_ADDRESS_P (x))
1964 return false;
1965
1966 if (flag_pic
1967 && !(strict_p && TARGET_PCREL)
1968 && symbolic_operand (x, VOIDmode))
1969 return false;
1970
1971 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P && reach > 1)
1972 {
1973 split_const (x, &base, &offset);
1974 if (GET_CODE (base) == SYMBOL_REF
1975 && !offset_within_block_p (base, INTVAL (offset) + reach - 1))
1976 return false;
1977 }
1978
1979 return !m68k_tls_reference_p (x, false);
1980 }
1981
1982 /* Return true if X is a LABEL_REF for a jump table. Assume that unplaced
1983 labels will become jump tables. */
1984
1985 static bool
1986 m68k_jump_table_ref_p (rtx x)
1987 {
1988 if (GET_CODE (x) != LABEL_REF)
1989 return false;
1990
1991 rtx_insn *insn = as_a <rtx_insn *> (XEXP (x, 0));
1992 if (!NEXT_INSN (insn) && !PREV_INSN (insn))
1993 return true;
1994
1995 insn = next_nonnote_insn (insn);
1996 return insn && JUMP_TABLE_DATA_P (insn);
1997 }
1998
1999 /* Return true if X is a legitimate address for values of mode MODE.
2000 STRICT_P says whether strict checking is needed. If the address
2001 is valid, describe its components in *ADDRESS. */
2002
2003 static bool
2004 m68k_decompose_address (machine_mode mode, rtx x,
2005 bool strict_p, struct m68k_address *address)
2006 {
2007 unsigned int reach;
2008
2009 memset (address, 0, sizeof (*address));
2010
2011 if (mode == BLKmode)
2012 reach = 1;
2013 else
2014 reach = GET_MODE_SIZE (mode);
2015
2016 /* Check for (An) (mode 2). */
2017 if (m68k_legitimate_base_reg_p (x, strict_p))
2018 {
2019 address->base = x;
2020 return true;
2021 }
2022
2023 /* Check for -(An) and (An)+ (modes 3 and 4). */
2024 if ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_INC)
2025 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
2026 {
2027 address->code = GET_CODE (x);
2028 address->base = XEXP (x, 0);
2029 return true;
2030 }
2031
2032 /* Check for (d16,An) (mode 5). */
2033 if (GET_CODE (x) == PLUS
2034 && GET_CODE (XEXP (x, 1)) == CONST_INT
2035 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x8000, 0x8000 - reach)
2036 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
2037 {
2038 address->base = XEXP (x, 0);
2039 address->offset = XEXP (x, 1);
2040 return true;
2041 }
2042
2043 /* Check for GOT loads. These are (bd,An,Xn) addresses if
2044 TARGET_68020 && flag_pic == 2, otherwise they are (d16,An)
2045 addresses. */
2046 if (GET_CODE (x) == PLUS
2047 && XEXP (x, 0) == pic_offset_table_rtx)
2048 {
2049 /* As we are processing a PLUS, do not unwrap RELOC32 symbols --
2050 they are invalid in this context. */
2051 if (m68k_unwrap_symbol (XEXP (x, 1), false) != XEXP (x, 1))
2052 {
2053 address->base = XEXP (x, 0);
2054 address->offset = XEXP (x, 1);
2055 return true;
2056 }
2057 }
2058
2059 /* The ColdFire FPU only accepts addressing modes 2-5. */
2060 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
2061 return false;
2062
2063 /* Check for (xxx).w and (xxx).l. Also, in the TARGET_PCREL case,
2064 check for (d16,PC) or (bd,PC,Xn) with a suppressed index register.
2065 All these modes are variations of mode 7. */
2066 if (m68k_legitimate_constant_address_p (x, reach, strict_p))
2067 {
2068 address->offset = x;
2069 return true;
2070 }
2071
2072 /* Check for (d8,PC,Xn), a mode 7 form. This case is needed for
2073 tablejumps.
2074
2075 ??? do_tablejump creates these addresses before placing the target
2076 label, so we have to assume that unplaced labels are jump table
2077 references. It seems unlikely that we would ever generate indexed
2078 accesses to unplaced labels in other cases. */
2079 if (GET_CODE (x) == PLUS
2080 && m68k_jump_table_ref_p (XEXP (x, 1))
2081 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2082 {
2083 address->offset = XEXP (x, 1);
2084 return true;
2085 }
2086
2087 /* Everything hereafter deals with (d8,An,Xn.SIZE*SCALE) or
2088 (bd,An,Xn.SIZE*SCALE) addresses. */
2089
2090 if (TARGET_68020)
2091 {
2092 /* Check for a nonzero base displacement. */
2093 if (GET_CODE (x) == PLUS
2094 && m68k_legitimate_constant_address_p (XEXP (x, 1), reach, strict_p))
2095 {
2096 address->offset = XEXP (x, 1);
2097 x = XEXP (x, 0);
2098 }
2099
2100 /* Check for a suppressed index register. */
2101 if (m68k_legitimate_base_reg_p (x, strict_p))
2102 {
2103 address->base = x;
2104 return true;
2105 }
2106
2107 /* Check for a suppressed base register. Do not allow this case
2108 for non-symbolic offsets as it effectively gives gcc freedom
2109 to treat data registers as base registers, which can generate
2110 worse code. */
2111 if (address->offset
2112 && symbolic_operand (address->offset, VOIDmode)
2113 && m68k_decompose_index (x, strict_p, address))
2114 return true;
2115 }
2116 else
2117 {
2118 /* Check for a nonzero base displacement. */
2119 if (GET_CODE (x) == PLUS
2120 && GET_CODE (XEXP (x, 1)) == CONST_INT
2121 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x80, 0x80 - reach))
2122 {
2123 address->offset = XEXP (x, 1);
2124 x = XEXP (x, 0);
2125 }
2126 }
2127
2128 /* We now expect the sum of a base and an index. */
2129 if (GET_CODE (x) == PLUS)
2130 {
2131 if (m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p)
2132 && m68k_decompose_index (XEXP (x, 1), strict_p, address))
2133 {
2134 address->base = XEXP (x, 0);
2135 return true;
2136 }
2137
2138 if (m68k_legitimate_base_reg_p (XEXP (x, 1), strict_p)
2139 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2140 {
2141 address->base = XEXP (x, 1);
2142 return true;
2143 }
2144 }
2145 return false;
2146 }
2147
2148 /* Return true if X is a legitimate address for values of mode MODE.
2149 STRICT_P says whether strict checking is needed. */
2150
2151 bool
2152 m68k_legitimate_address_p (machine_mode mode, rtx x, bool strict_p)
2153 {
2154 struct m68k_address address;
2155
2156 return m68k_decompose_address (mode, x, strict_p, &address);
2157 }
2158
2159 /* Return true if X is a memory, describing its address in ADDRESS if so.
2160 Apply strict checking if called during or after reload. */
2161
2162 static bool
2163 m68k_legitimate_mem_p (rtx x, struct m68k_address *address)
2164 {
2165 return (MEM_P (x)
2166 && m68k_decompose_address (GET_MODE (x), XEXP (x, 0),
2167 reload_in_progress || reload_completed,
2168 address));
2169 }
2170
2171 /* Implement TARGET_LEGITIMATE_CONSTANT_P. */
2172
2173 bool
2174 m68k_legitimate_constant_p (machine_mode mode, rtx x)
2175 {
2176 return mode != XFmode && !m68k_illegitimate_symbolic_constant_p (x);
2177 }
2178
2179 /* Return true if X matches the 'Q' constraint. It must be a memory
2180 with a base address and no constant offset or index. */
2181
2182 bool
2183 m68k_matches_q_p (rtx x)
2184 {
2185 struct m68k_address address;
2186
2187 return (m68k_legitimate_mem_p (x, &address)
2188 && address.code == UNKNOWN
2189 && address.base
2190 && !address.offset
2191 && !address.index);
2192 }
2193
2194 /* Return true if X matches the 'U' constraint. It must be a base address
2195 with a constant offset and no index. */
2196
2197 bool
2198 m68k_matches_u_p (rtx x)
2199 {
2200 struct m68k_address address;
2201
2202 return (m68k_legitimate_mem_p (x, &address)
2203 && address.code == UNKNOWN
2204 && address.base
2205 && address.offset
2206 && !address.index);
2207 }
2208
2209 /* Return GOT pointer. */
2210
2211 static rtx
2212 m68k_get_gp (void)
2213 {
2214 if (pic_offset_table_rtx == NULL_RTX)
2215 pic_offset_table_rtx = gen_rtx_REG (Pmode, PIC_REG);
2216
2217 crtl->uses_pic_offset_table = 1;
2218
2219 return pic_offset_table_rtx;
2220 }
2221
2222 /* M68K relocations, used to distinguish GOT and TLS relocations in UNSPEC
2223 wrappers. */
2224 enum m68k_reloc { RELOC_GOT, RELOC_TLSGD, RELOC_TLSLDM, RELOC_TLSLDO,
2225 RELOC_TLSIE, RELOC_TLSLE };
2226
2227 #define TLS_RELOC_P(RELOC) ((RELOC) != RELOC_GOT)
2228
2229 /* Wrap symbol X into unspec representing relocation RELOC.
2230 BASE_REG - register that should be added to the result.
2231 TEMP_REG - if non-null, temporary register. */
2232
2233 static rtx
2234 m68k_wrap_symbol (rtx x, enum m68k_reloc reloc, rtx base_reg, rtx temp_reg)
2235 {
2236 bool use_x_p;
2237
2238 use_x_p = (base_reg == pic_offset_table_rtx) ? TARGET_XGOT : TARGET_XTLS;
2239
2240 if (TARGET_COLDFIRE && use_x_p)
2241 /* When compiling with -mx{got, tls} switch the code will look like this:
2242
2243 move.l <X>@<RELOC>,<TEMP_REG>
2244 add.l <BASE_REG>,<TEMP_REG> */
2245 {
2246 /* Wrap X in UNSPEC_??? to tip m68k_output_addr_const_extra
2247 to put @RELOC after reference. */
2248 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2249 UNSPEC_RELOC32);
2250 x = gen_rtx_CONST (Pmode, x);
2251
2252 if (temp_reg == NULL)
2253 {
2254 gcc_assert (can_create_pseudo_p ());
2255 temp_reg = gen_reg_rtx (Pmode);
2256 }
2257
2258 emit_move_insn (temp_reg, x);
2259 emit_insn (gen_addsi3 (temp_reg, temp_reg, base_reg));
2260 x = temp_reg;
2261 }
2262 else
2263 {
2264 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2265 UNSPEC_RELOC16);
2266 x = gen_rtx_CONST (Pmode, x);
2267
2268 x = gen_rtx_PLUS (Pmode, base_reg, x);
2269 }
2270
2271 return x;
2272 }
2273
2274 /* Helper for m68k_unwrap_symbol.
2275 Also, if unwrapping was successful (that is if (ORIG != <return value>)),
2276 sets *RELOC_PTR to relocation type for the symbol. */
2277
2278 static rtx
2279 m68k_unwrap_symbol_1 (rtx orig, bool unwrap_reloc32_p,
2280 enum m68k_reloc *reloc_ptr)
2281 {
2282 if (GET_CODE (orig) == CONST)
2283 {
2284 rtx x;
2285 enum m68k_reloc dummy;
2286
2287 x = XEXP (orig, 0);
2288
2289 if (reloc_ptr == NULL)
2290 reloc_ptr = &dummy;
2291
2292 /* Handle an addend. */
2293 if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS)
2294 && CONST_INT_P (XEXP (x, 1)))
2295 x = XEXP (x, 0);
2296
2297 if (GET_CODE (x) == UNSPEC)
2298 {
2299 switch (XINT (x, 1))
2300 {
2301 case UNSPEC_RELOC16:
2302 orig = XVECEXP (x, 0, 0);
2303 *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2304 break;
2305
2306 case UNSPEC_RELOC32:
2307 if (unwrap_reloc32_p)
2308 {
2309 orig = XVECEXP (x, 0, 0);
2310 *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2311 }
2312 break;
2313
2314 default:
2315 break;
2316 }
2317 }
2318 }
2319
2320 return orig;
2321 }
2322
2323 /* Unwrap symbol from UNSPEC_RELOC16 and, if unwrap_reloc32_p,
2324 UNSPEC_RELOC32 wrappers. */
2325
2326 rtx
2327 m68k_unwrap_symbol (rtx orig, bool unwrap_reloc32_p)
2328 {
2329 return m68k_unwrap_symbol_1 (orig, unwrap_reloc32_p, NULL);
2330 }
2331
2332 /* Adjust decorated address operand before outputing assembler for it. */
2333
2334 static void
2335 m68k_adjust_decorated_operand (rtx op)
2336 {
2337 /* Combine and, possibly, other optimizations may do good job
2338 converting
2339 (const (unspec [(symbol)]))
2340 into
2341 (const (plus (unspec [(symbol)])
2342 (const_int N))).
2343 The problem with this is emitting @TLS or @GOT decorations.
2344 The decoration is emitted when processing (unspec), so the
2345 result would be "#symbol@TLSLE+N" instead of "#symbol+N@TLSLE".
2346
2347 It seems that the easiest solution to this is to convert such
2348 operands to
2349 (const (unspec [(plus (symbol)
2350 (const_int N))])).
2351 Note, that the top level of operand remains intact, so we don't have
2352 to patch up anything outside of the operand. */
2353
2354 subrtx_var_iterator::array_type array;
2355 FOR_EACH_SUBRTX_VAR (iter, array, op, ALL)
2356 {
2357 rtx x = *iter;
2358 if (m68k_unwrap_symbol (x, true) != x)
2359 {
2360 rtx plus;
2361
2362 gcc_assert (GET_CODE (x) == CONST);
2363 plus = XEXP (x, 0);
2364
2365 if (GET_CODE (plus) == PLUS || GET_CODE (plus) == MINUS)
2366 {
2367 rtx unspec;
2368 rtx addend;
2369
2370 unspec = XEXP (plus, 0);
2371 gcc_assert (GET_CODE (unspec) == UNSPEC);
2372 addend = XEXP (plus, 1);
2373 gcc_assert (CONST_INT_P (addend));
2374
2375 /* We now have all the pieces, rearrange them. */
2376
2377 /* Move symbol to plus. */
2378 XEXP (plus, 0) = XVECEXP (unspec, 0, 0);
2379
2380 /* Move plus inside unspec. */
2381 XVECEXP (unspec, 0, 0) = plus;
2382
2383 /* Move unspec to top level of const. */
2384 XEXP (x, 0) = unspec;
2385 }
2386 iter.skip_subrtxes ();
2387 }
2388 }
2389 }
2390
2391 /* Move X to a register and add REG_EQUAL note pointing to ORIG.
2392 If REG is non-null, use it; generate new pseudo otherwise. */
2393
2394 static rtx
2395 m68k_move_to_reg (rtx x, rtx orig, rtx reg)
2396 {
2397 rtx_insn *insn;
2398
2399 if (reg == NULL_RTX)
2400 {
2401 gcc_assert (can_create_pseudo_p ());
2402 reg = gen_reg_rtx (Pmode);
2403 }
2404
2405 insn = emit_move_insn (reg, x);
2406 /* Put a REG_EQUAL note on this insn, so that it can be optimized
2407 by loop. */
2408 set_unique_reg_note (insn, REG_EQUAL, orig);
2409
2410 return reg;
2411 }
2412
2413 /* Does the same as m68k_wrap_symbol, but returns a memory reference to
2414 GOT slot. */
2415
2416 static rtx
2417 m68k_wrap_symbol_into_got_ref (rtx x, enum m68k_reloc reloc, rtx temp_reg)
2418 {
2419 x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), temp_reg);
2420
2421 x = gen_rtx_MEM (Pmode, x);
2422 MEM_READONLY_P (x) = 1;
2423
2424 return x;
2425 }
2426
2427 /* Legitimize PIC addresses. If the address is already
2428 position-independent, we return ORIG. Newly generated
2429 position-independent addresses go to REG. If we need more
2430 than one register, we lose.
2431
2432 An address is legitimized by making an indirect reference
2433 through the Global Offset Table with the name of the symbol
2434 used as an offset.
2435
2436 The assembler and linker are responsible for placing the
2437 address of the symbol in the GOT. The function prologue
2438 is responsible for initializing a5 to the starting address
2439 of the GOT.
2440
2441 The assembler is also responsible for translating a symbol name
2442 into a constant displacement from the start of the GOT.
2443
2444 A quick example may make things a little clearer:
2445
2446 When not generating PIC code to store the value 12345 into _foo
2447 we would generate the following code:
2448
2449 movel #12345, _foo
2450
2451 When generating PIC two transformations are made. First, the compiler
2452 loads the address of foo into a register. So the first transformation makes:
2453
2454 lea _foo, a0
2455 movel #12345, a0@
2456
2457 The code in movsi will intercept the lea instruction and call this
2458 routine which will transform the instructions into:
2459
2460 movel a5@(_foo:w), a0
2461 movel #12345, a0@
2462
2463
2464 That (in a nutshell) is how *all* symbol and label references are
2465 handled. */
2466
2467 rtx
2468 legitimize_pic_address (rtx orig, machine_mode mode ATTRIBUTE_UNUSED,
2469 rtx reg)
2470 {
2471 rtx pic_ref = orig;
2472
2473 /* First handle a simple SYMBOL_REF or LABEL_REF */
2474 if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
2475 {
2476 gcc_assert (reg);
2477
2478 pic_ref = m68k_wrap_symbol_into_got_ref (orig, RELOC_GOT, reg);
2479 pic_ref = m68k_move_to_reg (pic_ref, orig, reg);
2480 }
2481 else if (GET_CODE (orig) == CONST)
2482 {
2483 rtx base;
2484
2485 /* Make sure this has not already been legitimized. */
2486 if (m68k_unwrap_symbol (orig, true) != orig)
2487 return orig;
2488
2489 gcc_assert (reg);
2490
2491 /* legitimize both operands of the PLUS */
2492 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
2493
2494 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2495 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2496 base == reg ? 0 : reg);
2497
2498 if (GET_CODE (orig) == CONST_INT)
2499 pic_ref = plus_constant (Pmode, base, INTVAL (orig));
2500 else
2501 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
2502 }
2503
2504 return pic_ref;
2505 }
2506
2507 /* The __tls_get_addr symbol. */
2508 static GTY(()) rtx m68k_tls_get_addr;
2509
2510 /* Return SYMBOL_REF for __tls_get_addr. */
2511
2512 static rtx
2513 m68k_get_tls_get_addr (void)
2514 {
2515 if (m68k_tls_get_addr == NULL_RTX)
2516 m68k_tls_get_addr = init_one_libfunc ("__tls_get_addr");
2517
2518 return m68k_tls_get_addr;
2519 }
2520
2521 /* Return libcall result in A0 instead of usual D0. */
2522 static bool m68k_libcall_value_in_a0_p = false;
2523
2524 /* Emit instruction sequence that calls __tls_get_addr. X is
2525 the TLS symbol we are referencing and RELOC is the symbol type to use
2526 (either TLSGD or TLSLDM). EQV is the REG_EQUAL note for the sequence
2527 emitted. A pseudo register with result of __tls_get_addr call is
2528 returned. */
2529
2530 static rtx
2531 m68k_call_tls_get_addr (rtx x, rtx eqv, enum m68k_reloc reloc)
2532 {
2533 rtx a0;
2534 rtx_insn *insns;
2535 rtx dest;
2536
2537 /* Emit the call sequence. */
2538 start_sequence ();
2539
2540 /* FIXME: Unfortunately, emit_library_call_value does not
2541 consider (plus (%a5) (const (unspec))) to be a good enough
2542 operand for push, so it forces it into a register. The bad
2543 thing about this is that combiner, due to copy propagation and other
2544 optimizations, sometimes cannot later fix this. As a consequence,
2545 additional register may be allocated resulting in a spill.
2546 For reference, see args processing loops in
2547 calls.c:emit_library_call_value_1.
2548 For testcase, see gcc.target/m68k/tls-{gd, ld}.c */
2549 x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), NULL_RTX);
2550
2551 /* __tls_get_addr() is not a libcall, but emitting a libcall_value
2552 is the simpliest way of generating a call. The difference between
2553 __tls_get_addr() and libcall is that the result is returned in D0
2554 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2555 which temporarily switches returning the result to A0. */
2556
2557 m68k_libcall_value_in_a0_p = true;
2558 a0 = emit_library_call_value (m68k_get_tls_get_addr (), NULL_RTX, LCT_PURE,
2559 Pmode, x, Pmode);
2560 m68k_libcall_value_in_a0_p = false;
2561
2562 insns = get_insns ();
2563 end_sequence ();
2564
2565 gcc_assert (can_create_pseudo_p ());
2566 dest = gen_reg_rtx (Pmode);
2567 emit_libcall_block (insns, dest, a0, eqv);
2568
2569 return dest;
2570 }
2571
2572 /* The __tls_get_addr symbol. */
2573 static GTY(()) rtx m68k_read_tp;
2574
2575 /* Return SYMBOL_REF for __m68k_read_tp. */
2576
2577 static rtx
2578 m68k_get_m68k_read_tp (void)
2579 {
2580 if (m68k_read_tp == NULL_RTX)
2581 m68k_read_tp = init_one_libfunc ("__m68k_read_tp");
2582
2583 return m68k_read_tp;
2584 }
2585
2586 /* Emit instruction sequence that calls __m68k_read_tp.
2587 A pseudo register with result of __m68k_read_tp call is returned. */
2588
2589 static rtx
2590 m68k_call_m68k_read_tp (void)
2591 {
2592 rtx a0;
2593 rtx eqv;
2594 rtx_insn *insns;
2595 rtx dest;
2596
2597 start_sequence ();
2598
2599 /* __m68k_read_tp() is not a libcall, but emitting a libcall_value
2600 is the simpliest way of generating a call. The difference between
2601 __m68k_read_tp() and libcall is that the result is returned in D0
2602 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2603 which temporarily switches returning the result to A0. */
2604
2605 /* Emit the call sequence. */
2606 m68k_libcall_value_in_a0_p = true;
2607 a0 = emit_library_call_value (m68k_get_m68k_read_tp (), NULL_RTX, LCT_PURE,
2608 Pmode);
2609 m68k_libcall_value_in_a0_p = false;
2610 insns = get_insns ();
2611 end_sequence ();
2612
2613 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2614 share the m68k_read_tp result with other IE/LE model accesses. */
2615 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx), UNSPEC_RELOC32);
2616
2617 gcc_assert (can_create_pseudo_p ());
2618 dest = gen_reg_rtx (Pmode);
2619 emit_libcall_block (insns, dest, a0, eqv);
2620
2621 return dest;
2622 }
2623
2624 /* Return a legitimized address for accessing TLS SYMBOL_REF X.
2625 For explanations on instructions sequences see TLS/NPTL ABI for m68k and
2626 ColdFire. */
2627
2628 rtx
2629 m68k_legitimize_tls_address (rtx orig)
2630 {
2631 switch (SYMBOL_REF_TLS_MODEL (orig))
2632 {
2633 case TLS_MODEL_GLOBAL_DYNAMIC:
2634 orig = m68k_call_tls_get_addr (orig, orig, RELOC_TLSGD);
2635 break;
2636
2637 case TLS_MODEL_LOCAL_DYNAMIC:
2638 {
2639 rtx eqv;
2640 rtx a0;
2641 rtx x;
2642
2643 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2644 share the LDM result with other LD model accesses. */
2645 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2646 UNSPEC_RELOC32);
2647
2648 a0 = m68k_call_tls_get_addr (orig, eqv, RELOC_TLSLDM);
2649
2650 x = m68k_wrap_symbol (orig, RELOC_TLSLDO, a0, NULL_RTX);
2651
2652 if (can_create_pseudo_p ())
2653 x = m68k_move_to_reg (x, orig, NULL_RTX);
2654
2655 orig = x;
2656 break;
2657 }
2658
2659 case TLS_MODEL_INITIAL_EXEC:
2660 {
2661 rtx a0;
2662 rtx x;
2663
2664 a0 = m68k_call_m68k_read_tp ();
2665
2666 x = m68k_wrap_symbol_into_got_ref (orig, RELOC_TLSIE, NULL_RTX);
2667 x = gen_rtx_PLUS (Pmode, x, a0);
2668
2669 if (can_create_pseudo_p ())
2670 x = m68k_move_to_reg (x, orig, NULL_RTX);
2671
2672 orig = x;
2673 break;
2674 }
2675
2676 case TLS_MODEL_LOCAL_EXEC:
2677 {
2678 rtx a0;
2679 rtx x;
2680
2681 a0 = m68k_call_m68k_read_tp ();
2682
2683 x = m68k_wrap_symbol (orig, RELOC_TLSLE, a0, NULL_RTX);
2684
2685 if (can_create_pseudo_p ())
2686 x = m68k_move_to_reg (x, orig, NULL_RTX);
2687
2688 orig = x;
2689 break;
2690 }
2691
2692 default:
2693 gcc_unreachable ();
2694 }
2695
2696 return orig;
2697 }
2698
2699 /* Return true if X is a TLS symbol. */
2700
2701 static bool
2702 m68k_tls_symbol_p (rtx x)
2703 {
2704 if (!TARGET_HAVE_TLS)
2705 return false;
2706
2707 if (GET_CODE (x) != SYMBOL_REF)
2708 return false;
2709
2710 return SYMBOL_REF_TLS_MODEL (x) != 0;
2711 }
2712
2713 /* If !LEGITIMATE_P, return true if X is a TLS symbol reference,
2714 though illegitimate one.
2715 If LEGITIMATE_P, return true if X is a legitimate TLS symbol reference. */
2716
2717 bool
2718 m68k_tls_reference_p (rtx x, bool legitimate_p)
2719 {
2720 if (!TARGET_HAVE_TLS)
2721 return false;
2722
2723 if (!legitimate_p)
2724 {
2725 subrtx_var_iterator::array_type array;
2726 FOR_EACH_SUBRTX_VAR (iter, array, x, ALL)
2727 {
2728 rtx x = *iter;
2729
2730 /* Note: this is not the same as m68k_tls_symbol_p. */
2731 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0)
2732 return true;
2733
2734 /* Don't recurse into legitimate TLS references. */
2735 if (m68k_tls_reference_p (x, true))
2736 iter.skip_subrtxes ();
2737 }
2738 return false;
2739 }
2740 else
2741 {
2742 enum m68k_reloc reloc = RELOC_GOT;
2743
2744 return (m68k_unwrap_symbol_1 (x, true, &reloc) != x
2745 && TLS_RELOC_P (reloc));
2746 }
2747 }
2748
2749 \f
2750
2751 #define USE_MOVQ(i) ((unsigned) ((i) + 128) <= 255)
2752
2753 /* Return the type of move that should be used for integer I. */
2754
2755 M68K_CONST_METHOD
2756 m68k_const_method (HOST_WIDE_INT i)
2757 {
2758 unsigned u;
2759
2760 if (USE_MOVQ (i))
2761 return MOVQ;
2762
2763 /* The ColdFire doesn't have byte or word operations. */
2764 /* FIXME: This may not be useful for the m68060 either. */
2765 if (!TARGET_COLDFIRE)
2766 {
2767 /* if -256 < N < 256 but N is not in range for a moveq
2768 N^ff will be, so use moveq #N^ff, dreg; not.b dreg. */
2769 if (USE_MOVQ (i ^ 0xff))
2770 return NOTB;
2771 /* Likewise, try with not.w */
2772 if (USE_MOVQ (i ^ 0xffff))
2773 return NOTW;
2774 /* This is the only value where neg.w is useful */
2775 if (i == -65408)
2776 return NEGW;
2777 }
2778
2779 /* Try also with swap. */
2780 u = i;
2781 if (USE_MOVQ ((u >> 16) | (u << 16)))
2782 return SWAP;
2783
2784 if (TARGET_ISAB)
2785 {
2786 /* Try using MVZ/MVS with an immediate value to load constants. */
2787 if (i >= 0 && i <= 65535)
2788 return MVZ;
2789 if (i >= -32768 && i <= 32767)
2790 return MVS;
2791 }
2792
2793 /* Otherwise, use move.l */
2794 return MOVL;
2795 }
2796
2797 /* Return the cost of moving constant I into a data register. */
2798
2799 static int
2800 const_int_cost (HOST_WIDE_INT i)
2801 {
2802 switch (m68k_const_method (i))
2803 {
2804 case MOVQ:
2805 /* Constants between -128 and 127 are cheap due to moveq. */
2806 return 0;
2807 case MVZ:
2808 case MVS:
2809 case NOTB:
2810 case NOTW:
2811 case NEGW:
2812 case SWAP:
2813 /* Constants easily generated by moveq + not.b/not.w/neg.w/swap. */
2814 return 1;
2815 case MOVL:
2816 return 2;
2817 default:
2818 gcc_unreachable ();
2819 }
2820 }
2821
2822 static bool
2823 m68k_rtx_costs (rtx x, machine_mode mode, int outer_code,
2824 int opno ATTRIBUTE_UNUSED,
2825 int *total, bool speed ATTRIBUTE_UNUSED)
2826 {
2827 int code = GET_CODE (x);
2828
2829 switch (code)
2830 {
2831 case CONST_INT:
2832 /* Constant zero is super cheap due to clr instruction. */
2833 if (x == const0_rtx)
2834 *total = 0;
2835 else
2836 *total = const_int_cost (INTVAL (x));
2837 return true;
2838
2839 case CONST:
2840 case LABEL_REF:
2841 case SYMBOL_REF:
2842 *total = 3;
2843 return true;
2844
2845 case CONST_DOUBLE:
2846 /* Make 0.0 cheaper than other floating constants to
2847 encourage creating tstsf and tstdf insns. */
2848 if (outer_code == COMPARE
2849 && (x == CONST0_RTX (SFmode) || x == CONST0_RTX (DFmode)))
2850 *total = 4;
2851 else
2852 *total = 5;
2853 return true;
2854
2855 /* These are vaguely right for a 68020. */
2856 /* The costs for long multiply have been adjusted to work properly
2857 in synth_mult on the 68020, relative to an average of the time
2858 for add and the time for shift, taking away a little more because
2859 sometimes move insns are needed. */
2860 /* div?.w is relatively cheaper on 68000 counted in COSTS_N_INSNS
2861 terms. */
2862 #define MULL_COST \
2863 (TUNE_68060 ? 2 \
2864 : TUNE_68040 ? 5 \
2865 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2866 : (TUNE_CFV2 && TUNE_MAC) ? 4 \
2867 : TUNE_CFV2 ? 8 \
2868 : TARGET_COLDFIRE ? 3 : 13)
2869
2870 #define MULW_COST \
2871 (TUNE_68060 ? 2 \
2872 : TUNE_68040 ? 3 \
2873 : TUNE_68000_10 ? 5 \
2874 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2875 : (TUNE_CFV2 && TUNE_MAC) ? 2 \
2876 : TUNE_CFV2 ? 8 \
2877 : TARGET_COLDFIRE ? 2 : 8)
2878
2879 #define DIVW_COST \
2880 (TARGET_CF_HWDIV ? 11 \
2881 : TUNE_68000_10 || TARGET_COLDFIRE ? 12 : 27)
2882
2883 case PLUS:
2884 /* An lea costs about three times as much as a simple add. */
2885 if (mode == SImode
2886 && GET_CODE (XEXP (x, 1)) == REG
2887 && GET_CODE (XEXP (x, 0)) == MULT
2888 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
2889 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2890 && (INTVAL (XEXP (XEXP (x, 0), 1)) == 2
2891 || INTVAL (XEXP (XEXP (x, 0), 1)) == 4
2892 || INTVAL (XEXP (XEXP (x, 0), 1)) == 8))
2893 {
2894 /* lea an@(dx:l:i),am */
2895 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 2 : 3);
2896 return true;
2897 }
2898 return false;
2899
2900 case ASHIFT:
2901 case ASHIFTRT:
2902 case LSHIFTRT:
2903 if (TUNE_68060)
2904 {
2905 *total = COSTS_N_INSNS(1);
2906 return true;
2907 }
2908 if (TUNE_68000_10)
2909 {
2910 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
2911 {
2912 if (INTVAL (XEXP (x, 1)) < 16)
2913 *total = COSTS_N_INSNS (2) + INTVAL (XEXP (x, 1)) / 2;
2914 else
2915 /* We're using clrw + swap for these cases. */
2916 *total = COSTS_N_INSNS (4) + (INTVAL (XEXP (x, 1)) - 16) / 2;
2917 }
2918 else
2919 *total = COSTS_N_INSNS (10); /* Worst case. */
2920 return true;
2921 }
2922 /* A shift by a big integer takes an extra instruction. */
2923 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2924 && (INTVAL (XEXP (x, 1)) == 16))
2925 {
2926 *total = COSTS_N_INSNS (2); /* clrw;swap */
2927 return true;
2928 }
2929 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2930 && !(INTVAL (XEXP (x, 1)) > 0
2931 && INTVAL (XEXP (x, 1)) <= 8))
2932 {
2933 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 1 : 3); /* lsr #i,dn */
2934 return true;
2935 }
2936 return false;
2937
2938 case MULT:
2939 if ((GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
2940 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
2941 && mode == SImode)
2942 *total = COSTS_N_INSNS (MULW_COST);
2943 else if (mode == QImode || mode == HImode)
2944 *total = COSTS_N_INSNS (MULW_COST);
2945 else
2946 *total = COSTS_N_INSNS (MULL_COST);
2947 return true;
2948
2949 case DIV:
2950 case UDIV:
2951 case MOD:
2952 case UMOD:
2953 if (mode == QImode || mode == HImode)
2954 *total = COSTS_N_INSNS (DIVW_COST); /* div.w */
2955 else if (TARGET_CF_HWDIV)
2956 *total = COSTS_N_INSNS (18);
2957 else
2958 *total = COSTS_N_INSNS (43); /* div.l */
2959 return true;
2960
2961 case ZERO_EXTRACT:
2962 if (outer_code == COMPARE)
2963 *total = 0;
2964 return false;
2965
2966 default:
2967 return false;
2968 }
2969 }
2970
2971 /* Return an instruction to move CONST_INT OPERANDS[1] into data register
2972 OPERANDS[0]. */
2973
2974 static const char *
2975 output_move_const_into_data_reg (rtx *operands)
2976 {
2977 HOST_WIDE_INT i;
2978
2979 i = INTVAL (operands[1]);
2980 switch (m68k_const_method (i))
2981 {
2982 case MVZ:
2983 return "mvzw %1,%0";
2984 case MVS:
2985 return "mvsw %1,%0";
2986 case MOVQ:
2987 return "moveq %1,%0";
2988 case NOTB:
2989 CC_STATUS_INIT;
2990 operands[1] = GEN_INT (i ^ 0xff);
2991 return "moveq %1,%0\n\tnot%.b %0";
2992 case NOTW:
2993 CC_STATUS_INIT;
2994 operands[1] = GEN_INT (i ^ 0xffff);
2995 return "moveq %1,%0\n\tnot%.w %0";
2996 case NEGW:
2997 CC_STATUS_INIT;
2998 return "moveq #-128,%0\n\tneg%.w %0";
2999 case SWAP:
3000 {
3001 unsigned u = i;
3002
3003 operands[1] = GEN_INT ((u << 16) | (u >> 16));
3004 return "moveq %1,%0\n\tswap %0";
3005 }
3006 case MOVL:
3007 return "move%.l %1,%0";
3008 default:
3009 gcc_unreachable ();
3010 }
3011 }
3012
3013 /* Return true if I can be handled by ISA B's mov3q instruction. */
3014
3015 bool
3016 valid_mov3q_const (HOST_WIDE_INT i)
3017 {
3018 return TARGET_ISAB && (i == -1 || IN_RANGE (i, 1, 7));
3019 }
3020
3021 /* Return an instruction to move CONST_INT OPERANDS[1] into OPERANDS[0].
3022 I is the value of OPERANDS[1]. */
3023
3024 static const char *
3025 output_move_simode_const (rtx *operands)
3026 {
3027 rtx dest;
3028 HOST_WIDE_INT src;
3029
3030 dest = operands[0];
3031 src = INTVAL (operands[1]);
3032 if (src == 0
3033 && (DATA_REG_P (dest) || MEM_P (dest))
3034 /* clr insns on 68000 read before writing. */
3035 && ((TARGET_68010 || TARGET_COLDFIRE)
3036 || !(MEM_P (dest) && MEM_VOLATILE_P (dest))))
3037 return "clr%.l %0";
3038 else if (GET_MODE (dest) == SImode && valid_mov3q_const (src))
3039 return "mov3q%.l %1,%0";
3040 else if (src == 0 && ADDRESS_REG_P (dest))
3041 return "sub%.l %0,%0";
3042 else if (DATA_REG_P (dest))
3043 return output_move_const_into_data_reg (operands);
3044 else if (ADDRESS_REG_P (dest) && IN_RANGE (src, -0x8000, 0x7fff))
3045 {
3046 if (valid_mov3q_const (src))
3047 return "mov3q%.l %1,%0";
3048 return "move%.w %1,%0";
3049 }
3050 else if (MEM_P (dest)
3051 && GET_CODE (XEXP (dest, 0)) == PRE_DEC
3052 && REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
3053 && IN_RANGE (src, -0x8000, 0x7fff))
3054 {
3055 if (valid_mov3q_const (src))
3056 return "mov3q%.l %1,%-";
3057 return "pea %a1";
3058 }
3059 return "move%.l %1,%0";
3060 }
3061
3062 const char *
3063 output_move_simode (rtx *operands)
3064 {
3065 if (GET_CODE (operands[1]) == CONST_INT)
3066 return output_move_simode_const (operands);
3067 else if ((GET_CODE (operands[1]) == SYMBOL_REF
3068 || GET_CODE (operands[1]) == CONST)
3069 && push_operand (operands[0], SImode))
3070 return "pea %a1";
3071 else if ((GET_CODE (operands[1]) == SYMBOL_REF
3072 || GET_CODE (operands[1]) == CONST)
3073 && ADDRESS_REG_P (operands[0]))
3074 return "lea %a1,%0";
3075 return "move%.l %1,%0";
3076 }
3077
3078 const char *
3079 output_move_himode (rtx *operands)
3080 {
3081 if (GET_CODE (operands[1]) == CONST_INT)
3082 {
3083 if (operands[1] == const0_rtx
3084 && (DATA_REG_P (operands[0])
3085 || GET_CODE (operands[0]) == MEM)
3086 /* clr insns on 68000 read before writing. */
3087 && ((TARGET_68010 || TARGET_COLDFIRE)
3088 || !(GET_CODE (operands[0]) == MEM
3089 && MEM_VOLATILE_P (operands[0]))))
3090 return "clr%.w %0";
3091 else if (operands[1] == const0_rtx
3092 && ADDRESS_REG_P (operands[0]))
3093 return "sub%.l %0,%0";
3094 else if (DATA_REG_P (operands[0])
3095 && INTVAL (operands[1]) < 128
3096 && INTVAL (operands[1]) >= -128)
3097 return "moveq %1,%0";
3098 else if (INTVAL (operands[1]) < 0x8000
3099 && INTVAL (operands[1]) >= -0x8000)
3100 return "move%.w %1,%0";
3101 }
3102 else if (CONSTANT_P (operands[1]))
3103 return "move%.l %1,%0";
3104 return "move%.w %1,%0";
3105 }
3106
3107 const char *
3108 output_move_qimode (rtx *operands)
3109 {
3110 /* 68k family always modifies the stack pointer by at least 2, even for
3111 byte pushes. The 5200 (ColdFire) does not do this. */
3112
3113 /* This case is generated by pushqi1 pattern now. */
3114 gcc_assert (!(GET_CODE (operands[0]) == MEM
3115 && GET_CODE (XEXP (operands[0], 0)) == PRE_DEC
3116 && XEXP (XEXP (operands[0], 0), 0) == stack_pointer_rtx
3117 && ! ADDRESS_REG_P (operands[1])
3118 && ! TARGET_COLDFIRE));
3119
3120 /* clr and st insns on 68000 read before writing. */
3121 if (!ADDRESS_REG_P (operands[0])
3122 && ((TARGET_68010 || TARGET_COLDFIRE)
3123 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3124 {
3125 if (operands[1] == const0_rtx)
3126 return "clr%.b %0";
3127 if ((!TARGET_COLDFIRE || DATA_REG_P (operands[0]))
3128 && GET_CODE (operands[1]) == CONST_INT
3129 && (INTVAL (operands[1]) & 255) == 255)
3130 {
3131 CC_STATUS_INIT;
3132 return "st %0";
3133 }
3134 }
3135 if (GET_CODE (operands[1]) == CONST_INT
3136 && DATA_REG_P (operands[0])
3137 && INTVAL (operands[1]) < 128
3138 && INTVAL (operands[1]) >= -128)
3139 return "moveq %1,%0";
3140 if (operands[1] == const0_rtx && ADDRESS_REG_P (operands[0]))
3141 return "sub%.l %0,%0";
3142 if (GET_CODE (operands[1]) != CONST_INT && CONSTANT_P (operands[1]))
3143 return "move%.l %1,%0";
3144 /* 68k family (including the 5200 ColdFire) does not support byte moves to
3145 from address registers. */
3146 if (ADDRESS_REG_P (operands[0]) || ADDRESS_REG_P (operands[1]))
3147 return "move%.w %1,%0";
3148 return "move%.b %1,%0";
3149 }
3150
3151 const char *
3152 output_move_stricthi (rtx *operands)
3153 {
3154 if (operands[1] == const0_rtx
3155 /* clr insns on 68000 read before writing. */
3156 && ((TARGET_68010 || TARGET_COLDFIRE)
3157 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3158 return "clr%.w %0";
3159 return "move%.w %1,%0";
3160 }
3161
3162 const char *
3163 output_move_strictqi (rtx *operands)
3164 {
3165 if (operands[1] == const0_rtx
3166 /* clr insns on 68000 read before writing. */
3167 && ((TARGET_68010 || TARGET_COLDFIRE)
3168 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3169 return "clr%.b %0";
3170 return "move%.b %1,%0";
3171 }
3172
3173 /* Return the best assembler insn template
3174 for moving operands[1] into operands[0] as a fullword. */
3175
3176 static const char *
3177 singlemove_string (rtx *operands)
3178 {
3179 if (GET_CODE (operands[1]) == CONST_INT)
3180 return output_move_simode_const (operands);
3181 return "move%.l %1,%0";
3182 }
3183
3184
3185 /* Output assembler or rtl code to perform a doubleword move insn
3186 with operands OPERANDS.
3187 Pointers to 3 helper functions should be specified:
3188 HANDLE_REG_ADJUST to adjust a register by a small value,
3189 HANDLE_COMPADR to compute an address and
3190 HANDLE_MOVSI to move 4 bytes. */
3191
3192 static void
3193 handle_move_double (rtx operands[2],
3194 void (*handle_reg_adjust) (rtx, int),
3195 void (*handle_compadr) (rtx [2]),
3196 void (*handle_movsi) (rtx [2]))
3197 {
3198 enum
3199 {
3200 REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP
3201 } optype0, optype1;
3202 rtx latehalf[2];
3203 rtx middlehalf[2];
3204 rtx xops[2];
3205 rtx addreg0 = 0, addreg1 = 0;
3206 int dest_overlapped_low = 0;
3207 int size = GET_MODE_SIZE (GET_MODE (operands[0]));
3208
3209 middlehalf[0] = 0;
3210 middlehalf[1] = 0;
3211
3212 /* First classify both operands. */
3213
3214 if (REG_P (operands[0]))
3215 optype0 = REGOP;
3216 else if (offsettable_memref_p (operands[0]))
3217 optype0 = OFFSOP;
3218 else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
3219 optype0 = POPOP;
3220 else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
3221 optype0 = PUSHOP;
3222 else if (GET_CODE (operands[0]) == MEM)
3223 optype0 = MEMOP;
3224 else
3225 optype0 = RNDOP;
3226
3227 if (REG_P (operands[1]))
3228 optype1 = REGOP;
3229 else if (CONSTANT_P (operands[1]))
3230 optype1 = CNSTOP;
3231 else if (offsettable_memref_p (operands[1]))
3232 optype1 = OFFSOP;
3233 else if (GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3234 optype1 = POPOP;
3235 else if (GET_CODE (XEXP (operands[1], 0)) == PRE_DEC)
3236 optype1 = PUSHOP;
3237 else if (GET_CODE (operands[1]) == MEM)
3238 optype1 = MEMOP;
3239 else
3240 optype1 = RNDOP;
3241
3242 /* Check for the cases that the operand constraints are not supposed
3243 to allow to happen. Generating code for these cases is
3244 painful. */
3245 gcc_assert (optype0 != RNDOP && optype1 != RNDOP);
3246
3247 /* If one operand is decrementing and one is incrementing
3248 decrement the former register explicitly
3249 and change that operand into ordinary indexing. */
3250
3251 if (optype0 == PUSHOP && optype1 == POPOP)
3252 {
3253 operands[0] = XEXP (XEXP (operands[0], 0), 0);
3254
3255 handle_reg_adjust (operands[0], -size);
3256
3257 if (GET_MODE (operands[1]) == XFmode)
3258 operands[0] = gen_rtx_MEM (XFmode, operands[0]);
3259 else if (GET_MODE (operands[0]) == DFmode)
3260 operands[0] = gen_rtx_MEM (DFmode, operands[0]);
3261 else
3262 operands[0] = gen_rtx_MEM (DImode, operands[0]);
3263 optype0 = OFFSOP;
3264 }
3265 if (optype0 == POPOP && optype1 == PUSHOP)
3266 {
3267 operands[1] = XEXP (XEXP (operands[1], 0), 0);
3268
3269 handle_reg_adjust (operands[1], -size);
3270
3271 if (GET_MODE (operands[1]) == XFmode)
3272 operands[1] = gen_rtx_MEM (XFmode, operands[1]);
3273 else if (GET_MODE (operands[1]) == DFmode)
3274 operands[1] = gen_rtx_MEM (DFmode, operands[1]);
3275 else
3276 operands[1] = gen_rtx_MEM (DImode, operands[1]);
3277 optype1 = OFFSOP;
3278 }
3279
3280 /* If an operand is an unoffsettable memory ref, find a register
3281 we can increment temporarily to make it refer to the second word. */
3282
3283 if (optype0 == MEMOP)
3284 addreg0 = find_addr_reg (XEXP (operands[0], 0));
3285
3286 if (optype1 == MEMOP)
3287 addreg1 = find_addr_reg (XEXP (operands[1], 0));
3288
3289 /* Ok, we can do one word at a time.
3290 Normally we do the low-numbered word first,
3291 but if either operand is autodecrementing then we
3292 do the high-numbered word first.
3293
3294 In either case, set up in LATEHALF the operands to use
3295 for the high-numbered word and in some cases alter the
3296 operands in OPERANDS to be suitable for the low-numbered word. */
3297
3298 if (size == 12)
3299 {
3300 if (optype0 == REGOP)
3301 {
3302 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 2);
3303 middlehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
3304 }
3305 else if (optype0 == OFFSOP)
3306 {
3307 middlehalf[0] = adjust_address (operands[0], SImode, 4);
3308 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
3309 }
3310 else
3311 {
3312 middlehalf[0] = adjust_address (operands[0], SImode, 0);
3313 latehalf[0] = adjust_address (operands[0], SImode, 0);
3314 }
3315
3316 if (optype1 == REGOP)
3317 {
3318 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2);
3319 middlehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
3320 }
3321 else if (optype1 == OFFSOP)
3322 {
3323 middlehalf[1] = adjust_address (operands[1], SImode, 4);
3324 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
3325 }
3326 else if (optype1 == CNSTOP)
3327 {
3328 if (GET_CODE (operands[1]) == CONST_DOUBLE)
3329 {
3330 long l[3];
3331
3332 REAL_VALUE_TO_TARGET_LONG_DOUBLE
3333 (*CONST_DOUBLE_REAL_VALUE (operands[1]), l);
3334 operands[1] = GEN_INT (l[0]);
3335 middlehalf[1] = GEN_INT (l[1]);
3336 latehalf[1] = GEN_INT (l[2]);
3337 }
3338 else
3339 {
3340 /* No non-CONST_DOUBLE constant should ever appear
3341 here. */
3342 gcc_assert (!CONSTANT_P (operands[1]));
3343 }
3344 }
3345 else
3346 {
3347 middlehalf[1] = adjust_address (operands[1], SImode, 0);
3348 latehalf[1] = adjust_address (operands[1], SImode, 0);
3349 }
3350 }
3351 else
3352 /* size is not 12: */
3353 {
3354 if (optype0 == REGOP)
3355 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
3356 else if (optype0 == OFFSOP)
3357 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
3358 else
3359 latehalf[0] = adjust_address (operands[0], SImode, 0);
3360
3361 if (optype1 == REGOP)
3362 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
3363 else if (optype1 == OFFSOP)
3364 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
3365 else if (optype1 == CNSTOP)
3366 split_double (operands[1], &operands[1], &latehalf[1]);
3367 else
3368 latehalf[1] = adjust_address (operands[1], SImode, 0);
3369 }
3370
3371 /* If insn is effectively movd N(REG),-(REG) then we will do the high
3372 word first. We should use the adjusted operand 1 (which is N+4(REG))
3373 for the low word as well, to compensate for the first decrement of
3374 REG. */
3375 if (optype0 == PUSHOP
3376 && reg_overlap_mentioned_p (XEXP (XEXP (operands[0], 0), 0), operands[1]))
3377 operands[1] = middlehalf[1] = latehalf[1];
3378
3379 /* For (set (reg:DI N) (mem:DI ... (reg:SI N) ...)),
3380 if the upper part of reg N does not appear in the MEM, arrange to
3381 emit the move late-half first. Otherwise, compute the MEM address
3382 into the upper part of N and use that as a pointer to the memory
3383 operand. */
3384 if (optype0 == REGOP
3385 && (optype1 == OFFSOP || optype1 == MEMOP))
3386 {
3387 rtx testlow = gen_rtx_REG (SImode, REGNO (operands[0]));
3388
3389 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
3390 && reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
3391 {
3392 /* If both halves of dest are used in the src memory address,
3393 compute the address into latehalf of dest.
3394 Note that this can't happen if the dest is two data regs. */
3395 compadr:
3396 xops[0] = latehalf[0];
3397 xops[1] = XEXP (operands[1], 0);
3398
3399 handle_compadr (xops);
3400 if (GET_MODE (operands[1]) == XFmode)
3401 {
3402 operands[1] = gen_rtx_MEM (XFmode, latehalf[0]);
3403 middlehalf[1] = adjust_address (operands[1], DImode, size - 8);
3404 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
3405 }
3406 else
3407 {
3408 operands[1] = gen_rtx_MEM (DImode, latehalf[0]);
3409 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
3410 }
3411 }
3412 else if (size == 12
3413 && reg_overlap_mentioned_p (middlehalf[0],
3414 XEXP (operands[1], 0)))
3415 {
3416 /* Check for two regs used by both source and dest.
3417 Note that this can't happen if the dest is all data regs.
3418 It can happen if the dest is d6, d7, a0.
3419 But in that case, latehalf is an addr reg, so
3420 the code at compadr does ok. */
3421
3422 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
3423 || reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
3424 goto compadr;
3425
3426 /* JRV says this can't happen: */
3427 gcc_assert (!addreg0 && !addreg1);
3428
3429 /* Only the middle reg conflicts; simply put it last. */
3430 handle_movsi (operands);
3431 handle_movsi (latehalf);
3432 handle_movsi (middlehalf);
3433
3434 return;
3435 }
3436 else if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0)))
3437 /* If the low half of dest is mentioned in the source memory
3438 address, the arrange to emit the move late half first. */
3439 dest_overlapped_low = 1;
3440 }
3441
3442 /* If one or both operands autodecrementing,
3443 do the two words, high-numbered first. */
3444
3445 /* Likewise, the first move would clobber the source of the second one,
3446 do them in the other order. This happens only for registers;
3447 such overlap can't happen in memory unless the user explicitly
3448 sets it up, and that is an undefined circumstance. */
3449
3450 if (optype0 == PUSHOP || optype1 == PUSHOP
3451 || (optype0 == REGOP && optype1 == REGOP
3452 && ((middlehalf[1] && REGNO (operands[0]) == REGNO (middlehalf[1]))
3453 || REGNO (operands[0]) == REGNO (latehalf[1])))
3454 || dest_overlapped_low)
3455 {
3456 /* Make any unoffsettable addresses point at high-numbered word. */
3457 if (addreg0)
3458 handle_reg_adjust (addreg0, size - 4);
3459 if (addreg1)
3460 handle_reg_adjust (addreg1, size - 4);
3461
3462 /* Do that word. */
3463 handle_movsi (latehalf);
3464
3465 /* Undo the adds we just did. */
3466 if (addreg0)
3467 handle_reg_adjust (addreg0, -4);
3468 if (addreg1)
3469 handle_reg_adjust (addreg1, -4);
3470
3471 if (size == 12)
3472 {
3473 handle_movsi (middlehalf);
3474
3475 if (addreg0)
3476 handle_reg_adjust (addreg0, -4);
3477 if (addreg1)
3478 handle_reg_adjust (addreg1, -4);
3479 }
3480
3481 /* Do low-numbered word. */
3482
3483 handle_movsi (operands);
3484 return;
3485 }
3486
3487 /* Normal case: do the two words, low-numbered first. */
3488
3489 handle_movsi (operands);
3490
3491 /* Do the middle one of the three words for long double */
3492 if (size == 12)
3493 {
3494 if (addreg0)
3495 handle_reg_adjust (addreg0, 4);
3496 if (addreg1)
3497 handle_reg_adjust (addreg1, 4);
3498
3499 handle_movsi (middlehalf);
3500 }
3501
3502 /* Make any unoffsettable addresses point at high-numbered word. */
3503 if (addreg0)
3504 handle_reg_adjust (addreg0, 4);
3505 if (addreg1)
3506 handle_reg_adjust (addreg1, 4);
3507
3508 /* Do that word. */
3509 handle_movsi (latehalf);
3510
3511 /* Undo the adds we just did. */
3512 if (addreg0)
3513 handle_reg_adjust (addreg0, -(size - 4));
3514 if (addreg1)
3515 handle_reg_adjust (addreg1, -(size - 4));
3516
3517 return;
3518 }
3519
3520 /* Output assembler code to adjust REG by N. */
3521 static void
3522 output_reg_adjust (rtx reg, int n)
3523 {
3524 const char *s;
3525
3526 gcc_assert (GET_MODE (reg) == SImode && n >= -12 && n != 0 && n <= 12);
3527
3528 switch (n)
3529 {
3530 case 12:
3531 s = "add%.l #12,%0";
3532 break;
3533
3534 case 8:
3535 s = "addq%.l #8,%0";
3536 break;
3537
3538 case 4:
3539 s = "addq%.l #4,%0";
3540 break;
3541
3542 case -12:
3543 s = "sub%.l #12,%0";
3544 break;
3545
3546 case -8:
3547 s = "subq%.l #8,%0";
3548 break;
3549
3550 case -4:
3551 s = "subq%.l #4,%0";
3552 break;
3553
3554 default:
3555 gcc_unreachable ();
3556 s = NULL;
3557 }
3558
3559 output_asm_insn (s, &reg);
3560 }
3561
3562 /* Emit rtl code to adjust REG by N. */
3563 static void
3564 emit_reg_adjust (rtx reg1, int n)
3565 {
3566 rtx reg2;
3567
3568 gcc_assert (GET_MODE (reg1) == SImode && n >= -12 && n != 0 && n <= 12);
3569
3570 reg1 = copy_rtx (reg1);
3571 reg2 = copy_rtx (reg1);
3572
3573 if (n < 0)
3574 emit_insn (gen_subsi3 (reg1, reg2, GEN_INT (-n)));
3575 else if (n > 0)
3576 emit_insn (gen_addsi3 (reg1, reg2, GEN_INT (n)));
3577 else
3578 gcc_unreachable ();
3579 }
3580
3581 /* Output assembler to load address OPERANDS[0] to register OPERANDS[1]. */
3582 static void
3583 output_compadr (rtx operands[2])
3584 {
3585 output_asm_insn ("lea %a1,%0", operands);
3586 }
3587
3588 /* Output the best assembler insn for moving operands[1] into operands[0]
3589 as a fullword. */
3590 static void
3591 output_movsi (rtx operands[2])
3592 {
3593 output_asm_insn (singlemove_string (operands), operands);
3594 }
3595
3596 /* Copy OP and change its mode to MODE. */
3597 static rtx
3598 copy_operand (rtx op, machine_mode mode)
3599 {
3600 /* ??? This looks really ugly. There must be a better way
3601 to change a mode on the operand. */
3602 if (GET_MODE (op) != VOIDmode)
3603 {
3604 if (REG_P (op))
3605 op = gen_rtx_REG (mode, REGNO (op));
3606 else
3607 {
3608 op = copy_rtx (op);
3609 PUT_MODE (op, mode);
3610 }
3611 }
3612
3613 return op;
3614 }
3615
3616 /* Emit rtl code for moving operands[1] into operands[0] as a fullword. */
3617 static void
3618 emit_movsi (rtx operands[2])
3619 {
3620 operands[0] = copy_operand (operands[0], SImode);
3621 operands[1] = copy_operand (operands[1], SImode);
3622
3623 emit_insn (gen_movsi (operands[0], operands[1]));
3624 }
3625
3626 /* Output assembler code to perform a doubleword move insn
3627 with operands OPERANDS. */
3628 const char *
3629 output_move_double (rtx *operands)
3630 {
3631 handle_move_double (operands,
3632 output_reg_adjust, output_compadr, output_movsi);
3633
3634 return "";
3635 }
3636
3637 /* Output rtl code to perform a doubleword move insn
3638 with operands OPERANDS. */
3639 void
3640 m68k_emit_move_double (rtx operands[2])
3641 {
3642 handle_move_double (operands, emit_reg_adjust, emit_movsi, emit_movsi);
3643 }
3644
3645 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
3646 new rtx with the correct mode. */
3647
3648 static rtx
3649 force_mode (machine_mode mode, rtx orig)
3650 {
3651 if (mode == GET_MODE (orig))
3652 return orig;
3653
3654 if (REGNO (orig) >= FIRST_PSEUDO_REGISTER)
3655 abort ();
3656
3657 return gen_rtx_REG (mode, REGNO (orig));
3658 }
3659
3660 static int
3661 fp_reg_operand (rtx op, machine_mode mode ATTRIBUTE_UNUSED)
3662 {
3663 return reg_renumber && FP_REG_P (op);
3664 }
3665
3666 /* Emit insns to move operands[1] into operands[0].
3667
3668 Return 1 if we have written out everything that needs to be done to
3669 do the move. Otherwise, return 0 and the caller will emit the move
3670 normally.
3671
3672 Note SCRATCH_REG may not be in the proper mode depending on how it
3673 will be used. This routine is responsible for creating a new copy
3674 of SCRATCH_REG in the proper mode. */
3675
3676 int
3677 emit_move_sequence (rtx *operands, machine_mode mode, rtx scratch_reg)
3678 {
3679 register rtx operand0 = operands[0];
3680 register rtx operand1 = operands[1];
3681 register rtx tem;
3682
3683 if (scratch_reg
3684 && reload_in_progress && GET_CODE (operand0) == REG
3685 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
3686 operand0 = reg_equiv_mem (REGNO (operand0));
3687 else if (scratch_reg
3688 && reload_in_progress && GET_CODE (operand0) == SUBREG
3689 && GET_CODE (SUBREG_REG (operand0)) == REG
3690 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
3691 {
3692 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3693 the code which tracks sets/uses for delete_output_reload. */
3694 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
3695 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
3696 SUBREG_BYTE (operand0));
3697 operand0 = alter_subreg (&temp, true);
3698 }
3699
3700 if (scratch_reg
3701 && reload_in_progress && GET_CODE (operand1) == REG
3702 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
3703 operand1 = reg_equiv_mem (REGNO (operand1));
3704 else if (scratch_reg
3705 && reload_in_progress && GET_CODE (operand1) == SUBREG
3706 && GET_CODE (SUBREG_REG (operand1)) == REG
3707 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
3708 {
3709 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3710 the code which tracks sets/uses for delete_output_reload. */
3711 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
3712 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
3713 SUBREG_BYTE (operand1));
3714 operand1 = alter_subreg (&temp, true);
3715 }
3716
3717 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
3718 && ((tem = find_replacement (&XEXP (operand0, 0)))
3719 != XEXP (operand0, 0)))
3720 operand0 = gen_rtx_MEM (GET_MODE (operand0), tem);
3721 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
3722 && ((tem = find_replacement (&XEXP (operand1, 0)))
3723 != XEXP (operand1, 0)))
3724 operand1 = gen_rtx_MEM (GET_MODE (operand1), tem);
3725
3726 /* Handle secondary reloads for loads/stores of FP registers where
3727 the address is symbolic by using the scratch register */
3728 if (fp_reg_operand (operand0, mode)
3729 && ((GET_CODE (operand1) == MEM
3730 && ! memory_address_p (DFmode, XEXP (operand1, 0)))
3731 || ((GET_CODE (operand1) == SUBREG
3732 && GET_CODE (XEXP (operand1, 0)) == MEM
3733 && !memory_address_p (DFmode, XEXP (XEXP (operand1, 0), 0)))))
3734 && scratch_reg)
3735 {
3736 if (GET_CODE (operand1) == SUBREG)
3737 operand1 = XEXP (operand1, 0);
3738
3739 /* SCRATCH_REG will hold an address. We want
3740 it in SImode regardless of what mode it was originally given
3741 to us. */
3742 scratch_reg = force_mode (SImode, scratch_reg);
3743
3744 /* D might not fit in 14 bits either; for such cases load D into
3745 scratch reg. */
3746 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
3747 {
3748 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
3749 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
3750 Pmode,
3751 XEXP (XEXP (operand1, 0), 0),
3752 scratch_reg));
3753 }
3754 else
3755 emit_move_insn (scratch_reg, XEXP (operand1, 0));
3756 emit_insn (gen_rtx_SET (operand0, gen_rtx_MEM (mode, scratch_reg)));
3757 return 1;
3758 }
3759 else if (fp_reg_operand (operand1, mode)
3760 && ((GET_CODE (operand0) == MEM
3761 && ! memory_address_p (DFmode, XEXP (operand0, 0)))
3762 || ((GET_CODE (operand0) == SUBREG)
3763 && GET_CODE (XEXP (operand0, 0)) == MEM
3764 && !memory_address_p (DFmode, XEXP (XEXP (operand0, 0), 0))))
3765 && scratch_reg)
3766 {
3767 if (GET_CODE (operand0) == SUBREG)
3768 operand0 = XEXP (operand0, 0);
3769
3770 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3771 it in SIMODE regardless of what mode it was originally given
3772 to us. */
3773 scratch_reg = force_mode (SImode, scratch_reg);
3774
3775 /* D might not fit in 14 bits either; for such cases load D into
3776 scratch reg. */
3777 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
3778 {
3779 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
3780 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
3781 0)),
3782 Pmode,
3783 XEXP (XEXP (operand0, 0),
3784 0),
3785 scratch_reg));
3786 }
3787 else
3788 emit_move_insn (scratch_reg, XEXP (operand0, 0));
3789 emit_insn (gen_rtx_SET (gen_rtx_MEM (mode, scratch_reg), operand1));
3790 return 1;
3791 }
3792 /* Handle secondary reloads for loads of FP registers from constant
3793 expressions by forcing the constant into memory.
3794
3795 use scratch_reg to hold the address of the memory location.
3796
3797 The proper fix is to change PREFERRED_RELOAD_CLASS to return
3798 NO_REGS when presented with a const_int and an register class
3799 containing only FP registers. Doing so unfortunately creates
3800 more problems than it solves. Fix this for 2.5. */
3801 else if (fp_reg_operand (operand0, mode)
3802 && CONSTANT_P (operand1)
3803 && scratch_reg)
3804 {
3805 rtx xoperands[2];
3806
3807 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3808 it in SIMODE regardless of what mode it was originally given
3809 to us. */
3810 scratch_reg = force_mode (SImode, scratch_reg);
3811
3812 /* Force the constant into memory and put the address of the
3813 memory location into scratch_reg. */
3814 xoperands[0] = scratch_reg;
3815 xoperands[1] = XEXP (force_const_mem (mode, operand1), 0);
3816 emit_insn (gen_rtx_SET (scratch_reg, xoperands[1]));
3817
3818 /* Now load the destination register. */
3819 emit_insn (gen_rtx_SET (operand0, gen_rtx_MEM (mode, scratch_reg)));
3820 return 1;
3821 }
3822
3823 /* Now have insn-emit do whatever it normally does. */
3824 return 0;
3825 }
3826
3827 /* Split one or more DImode RTL references into pairs of SImode
3828 references. The RTL can be REG, offsettable MEM, integer constant, or
3829 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
3830 split and "num" is its length. lo_half and hi_half are output arrays
3831 that parallel "operands". */
3832
3833 void
3834 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
3835 {
3836 while (num--)
3837 {
3838 rtx op = operands[num];
3839
3840 /* simplify_subreg refuses to split volatile memory addresses,
3841 but we still have to handle it. */
3842 if (GET_CODE (op) == MEM)
3843 {
3844 lo_half[num] = adjust_address (op, SImode, 4);
3845 hi_half[num] = adjust_address (op, SImode, 0);
3846 }
3847 else
3848 {
3849 lo_half[num] = simplify_gen_subreg (SImode, op,
3850 GET_MODE (op) == VOIDmode
3851 ? DImode : GET_MODE (op), 4);
3852 hi_half[num] = simplify_gen_subreg (SImode, op,
3853 GET_MODE (op) == VOIDmode
3854 ? DImode : GET_MODE (op), 0);
3855 }
3856 }
3857 }
3858
3859 /* Split X into a base and a constant offset, storing them in *BASE
3860 and *OFFSET respectively. */
3861
3862 static void
3863 m68k_split_offset (rtx x, rtx *base, HOST_WIDE_INT *offset)
3864 {
3865 *offset = 0;
3866 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
3867 {
3868 *offset += INTVAL (XEXP (x, 1));
3869 x = XEXP (x, 0);
3870 }
3871 *base = x;
3872 }
3873
3874 /* Return true if PATTERN is a PARALLEL suitable for a movem or fmovem
3875 instruction. STORE_P says whether the move is a load or store.
3876
3877 If the instruction uses post-increment or pre-decrement addressing,
3878 AUTOMOD_BASE is the base register and AUTOMOD_OFFSET is the total
3879 adjustment. This adjustment will be made by the first element of
3880 PARALLEL, with the loads or stores starting at element 1. If the
3881 instruction does not use post-increment or pre-decrement addressing,
3882 AUTOMOD_BASE is null, AUTOMOD_OFFSET is 0, and the loads or stores
3883 start at element 0. */
3884
3885 bool
3886 m68k_movem_pattern_p (rtx pattern, rtx automod_base,
3887 HOST_WIDE_INT automod_offset, bool store_p)
3888 {
3889 rtx base, mem_base, set, mem, reg, last_reg;
3890 HOST_WIDE_INT offset, mem_offset;
3891 int i, first, len;
3892 enum reg_class rclass;
3893
3894 len = XVECLEN (pattern, 0);
3895 first = (automod_base != NULL);
3896
3897 if (automod_base)
3898 {
3899 /* Stores must be pre-decrement and loads must be post-increment. */
3900 if (store_p != (automod_offset < 0))
3901 return false;
3902
3903 /* Work out the base and offset for lowest memory location. */
3904 base = automod_base;
3905 offset = (automod_offset < 0 ? automod_offset : 0);
3906 }
3907 else
3908 {
3909 /* Allow any valid base and offset in the first access. */
3910 base = NULL;
3911 offset = 0;
3912 }
3913
3914 last_reg = NULL;
3915 rclass = NO_REGS;
3916 for (i = first; i < len; i++)
3917 {
3918 /* We need a plain SET. */
3919 set = XVECEXP (pattern, 0, i);
3920 if (GET_CODE (set) != SET)
3921 return false;
3922
3923 /* Check that we have a memory location... */
3924 mem = XEXP (set, !store_p);
3925 if (!MEM_P (mem) || !memory_operand (mem, VOIDmode))
3926 return false;
3927
3928 /* ...with the right address. */
3929 if (base == NULL)
3930 {
3931 m68k_split_offset (XEXP (mem, 0), &base, &offset);
3932 /* The ColdFire instruction only allows (An) and (d16,An) modes.
3933 There are no mode restrictions for 680x0 besides the
3934 automodification rules enforced above. */
3935 if (TARGET_COLDFIRE
3936 && !m68k_legitimate_base_reg_p (base, reload_completed))
3937 return false;
3938 }
3939 else
3940 {
3941 m68k_split_offset (XEXP (mem, 0), &mem_base, &mem_offset);
3942 if (!rtx_equal_p (base, mem_base) || offset != mem_offset)
3943 return false;
3944 }
3945
3946 /* Check that we have a register of the required mode and class. */
3947 reg = XEXP (set, store_p);
3948 if (!REG_P (reg)
3949 || !HARD_REGISTER_P (reg)
3950 || GET_MODE (reg) != reg_raw_mode[REGNO (reg)])
3951 return false;
3952
3953 if (last_reg)
3954 {
3955 /* The register must belong to RCLASS and have a higher number
3956 than the register in the previous SET. */
3957 if (!TEST_HARD_REG_BIT (reg_class_contents[rclass], REGNO (reg))
3958 || REGNO (last_reg) >= REGNO (reg))
3959 return false;
3960 }
3961 else
3962 {
3963 /* Work out which register class we need. */
3964 if (INT_REGNO_P (REGNO (reg)))
3965 rclass = GENERAL_REGS;
3966 else if (FP_REGNO_P (REGNO (reg)))
3967 rclass = FP_REGS;
3968 else
3969 return false;
3970 }
3971
3972 last_reg = reg;
3973 offset += GET_MODE_SIZE (GET_MODE (reg));
3974 }
3975
3976 /* If we have an automodification, check whether the final offset is OK. */
3977 if (automod_base && offset != (automod_offset < 0 ? 0 : automod_offset))
3978 return false;
3979
3980 /* Reject unprofitable cases. */
3981 if (len < first + (rclass == FP_REGS ? MIN_FMOVEM_REGS : MIN_MOVEM_REGS))
3982 return false;
3983
3984 return true;
3985 }
3986
3987 /* Return the assembly code template for a movem or fmovem instruction
3988 whose pattern is given by PATTERN. Store the template's operands
3989 in OPERANDS.
3990
3991 If the instruction uses post-increment or pre-decrement addressing,
3992 AUTOMOD_OFFSET is the total adjustment, otherwise it is 0. STORE_P
3993 is true if this is a store instruction. */
3994
3995 const char *
3996 m68k_output_movem (rtx *operands, rtx pattern,
3997 HOST_WIDE_INT automod_offset, bool store_p)
3998 {
3999 unsigned int mask;
4000 int i, first;
4001
4002 gcc_assert (GET_CODE (pattern) == PARALLEL);
4003 mask = 0;
4004 first = (automod_offset != 0);
4005 for (i = first; i < XVECLEN (pattern, 0); i++)
4006 {
4007 /* When using movem with pre-decrement addressing, register X + D0_REG
4008 is controlled by bit 15 - X. For all other addressing modes,
4009 register X + D0_REG is controlled by bit X. Confusingly, the
4010 register mask for fmovem is in the opposite order to that for
4011 movem. */
4012 unsigned int regno;
4013
4014 gcc_assert (MEM_P (XEXP (XVECEXP (pattern, 0, i), !store_p)));
4015 gcc_assert (REG_P (XEXP (XVECEXP (pattern, 0, i), store_p)));
4016 regno = REGNO (XEXP (XVECEXP (pattern, 0, i), store_p));
4017 if (automod_offset < 0)
4018 {
4019 if (FP_REGNO_P (regno))
4020 mask |= 1 << (regno - FP0_REG);
4021 else
4022 mask |= 1 << (15 - (regno - D0_REG));
4023 }
4024 else
4025 {
4026 if (FP_REGNO_P (regno))
4027 mask |= 1 << (7 - (regno - FP0_REG));
4028 else
4029 mask |= 1 << (regno - D0_REG);
4030 }
4031 }
4032 CC_STATUS_INIT;
4033
4034 if (automod_offset == 0)
4035 operands[0] = XEXP (XEXP (XVECEXP (pattern, 0, first), !store_p), 0);
4036 else if (automod_offset < 0)
4037 operands[0] = gen_rtx_PRE_DEC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
4038 else
4039 operands[0] = gen_rtx_POST_INC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
4040 operands[1] = GEN_INT (mask);
4041 if (FP_REGNO_P (REGNO (XEXP (XVECEXP (pattern, 0, first), store_p))))
4042 {
4043 if (store_p)
4044 return "fmovem %1,%a0";
4045 else
4046 return "fmovem %a0,%1";
4047 }
4048 else
4049 {
4050 if (store_p)
4051 return "movem%.l %1,%a0";
4052 else
4053 return "movem%.l %a0,%1";
4054 }
4055 }
4056
4057 /* Return a REG that occurs in ADDR with coefficient 1.
4058 ADDR can be effectively incremented by incrementing REG. */
4059
4060 static rtx
4061 find_addr_reg (rtx addr)
4062 {
4063 while (GET_CODE (addr) == PLUS)
4064 {
4065 if (GET_CODE (XEXP (addr, 0)) == REG)
4066 addr = XEXP (addr, 0);
4067 else if (GET_CODE (XEXP (addr, 1)) == REG)
4068 addr = XEXP (addr, 1);
4069 else if (CONSTANT_P (XEXP (addr, 0)))
4070 addr = XEXP (addr, 1);
4071 else if (CONSTANT_P (XEXP (addr, 1)))
4072 addr = XEXP (addr, 0);
4073 else
4074 gcc_unreachable ();
4075 }
4076 gcc_assert (GET_CODE (addr) == REG);
4077 return addr;
4078 }
4079
4080 /* Output assembler code to perform a 32-bit 3-operand add. */
4081
4082 const char *
4083 output_addsi3 (rtx *operands)
4084 {
4085 if (! operands_match_p (operands[0], operands[1]))
4086 {
4087 if (!ADDRESS_REG_P (operands[1]))
4088 {
4089 rtx tmp = operands[1];
4090
4091 operands[1] = operands[2];
4092 operands[2] = tmp;
4093 }
4094
4095 /* These insns can result from reloads to access
4096 stack slots over 64k from the frame pointer. */
4097 if (GET_CODE (operands[2]) == CONST_INT
4098 && (INTVAL (operands[2]) < -32768 || INTVAL (operands[2]) > 32767))
4099 return "move%.l %2,%0\n\tadd%.l %1,%0";
4100 if (GET_CODE (operands[2]) == REG)
4101 return MOTOROLA ? "lea (%1,%2.l),%0" : "lea %1@(0,%2:l),%0";
4102 return MOTOROLA ? "lea (%c2,%1),%0" : "lea %1@(%c2),%0";
4103 }
4104 if (GET_CODE (operands[2]) == CONST_INT)
4105 {
4106 if (INTVAL (operands[2]) > 0
4107 && INTVAL (operands[2]) <= 8)
4108 return "addq%.l %2,%0";
4109 if (INTVAL (operands[2]) < 0
4110 && INTVAL (operands[2]) >= -8)
4111 {
4112 operands[2] = GEN_INT (- INTVAL (operands[2]));
4113 return "subq%.l %2,%0";
4114 }
4115 /* On the CPU32 it is faster to use two addql instructions to
4116 add a small integer (8 < N <= 16) to a register.
4117 Likewise for subql. */
4118 if (TUNE_CPU32 && REG_P (operands[0]))
4119 {
4120 if (INTVAL (operands[2]) > 8
4121 && INTVAL (operands[2]) <= 16)
4122 {
4123 operands[2] = GEN_INT (INTVAL (operands[2]) - 8);
4124 return "addq%.l #8,%0\n\taddq%.l %2,%0";
4125 }
4126 if (INTVAL (operands[2]) < -8
4127 && INTVAL (operands[2]) >= -16)
4128 {
4129 operands[2] = GEN_INT (- INTVAL (operands[2]) - 8);
4130 return "subq%.l #8,%0\n\tsubq%.l %2,%0";
4131 }
4132 }
4133 if (ADDRESS_REG_P (operands[0])
4134 && INTVAL (operands[2]) >= -0x8000
4135 && INTVAL (operands[2]) < 0x8000)
4136 {
4137 if (TUNE_68040)
4138 return "add%.w %2,%0";
4139 else
4140 return MOTOROLA ? "lea (%c2,%0),%0" : "lea %0@(%c2),%0";
4141 }
4142 }
4143 return "add%.l %2,%0";
4144 }
4145 \f
4146 /* Store in cc_status the expressions that the condition codes will
4147 describe after execution of an instruction whose pattern is EXP.
4148 Do not alter them if the instruction would not alter the cc's. */
4149
4150 /* On the 68000, all the insns to store in an address register fail to
4151 set the cc's. However, in some cases these instructions can make it
4152 possibly invalid to use the saved cc's. In those cases we clear out
4153 some or all of the saved cc's so they won't be used. */
4154
4155 void
4156 notice_update_cc (rtx exp, rtx insn)
4157 {
4158 if (GET_CODE (exp) == SET)
4159 {
4160 if (GET_CODE (SET_SRC (exp)) == CALL)
4161 CC_STATUS_INIT;
4162 else if (ADDRESS_REG_P (SET_DEST (exp)))
4163 {
4164 if (cc_status.value1 && modified_in_p (cc_status.value1, insn))
4165 cc_status.value1 = 0;
4166 if (cc_status.value2 && modified_in_p (cc_status.value2, insn))
4167 cc_status.value2 = 0;
4168 }
4169 /* fmoves to memory or data registers do not set the condition
4170 codes. Normal moves _do_ set the condition codes, but not in
4171 a way that is appropriate for comparison with 0, because -0.0
4172 would be treated as a negative nonzero number. Note that it
4173 isn't appropriate to conditionalize this restriction on
4174 HONOR_SIGNED_ZEROS because that macro merely indicates whether
4175 we care about the difference between -0.0 and +0.0. */
4176 else if (!FP_REG_P (SET_DEST (exp))
4177 && SET_DEST (exp) != cc0_rtx
4178 && (FP_REG_P (SET_SRC (exp))
4179 || GET_CODE (SET_SRC (exp)) == FIX
4180 || FLOAT_MODE_P (GET_MODE (SET_DEST (exp)))))
4181 CC_STATUS_INIT;
4182 /* A pair of move insns doesn't produce a useful overall cc. */
4183 else if (!FP_REG_P (SET_DEST (exp))
4184 && !FP_REG_P (SET_SRC (exp))
4185 && GET_MODE_SIZE (GET_MODE (SET_SRC (exp))) > 4
4186 && (GET_CODE (SET_SRC (exp)) == REG
4187 || GET_CODE (SET_SRC (exp)) == MEM
4188 || GET_CODE (SET_SRC (exp)) == CONST_DOUBLE))
4189 CC_STATUS_INIT;
4190 else if (SET_DEST (exp) != pc_rtx)
4191 {
4192 cc_status.flags = 0;
4193 cc_status.value1 = SET_DEST (exp);
4194 cc_status.value2 = SET_SRC (exp);
4195 }
4196 }
4197 else if (GET_CODE (exp) == PARALLEL
4198 && GET_CODE (XVECEXP (exp, 0, 0)) == SET)
4199 {
4200 rtx dest = SET_DEST (XVECEXP (exp, 0, 0));
4201 rtx src = SET_SRC (XVECEXP (exp, 0, 0));
4202
4203 if (ADDRESS_REG_P (dest))
4204 CC_STATUS_INIT;
4205 else if (dest != pc_rtx)
4206 {
4207 cc_status.flags = 0;
4208 cc_status.value1 = dest;
4209 cc_status.value2 = src;
4210 }
4211 }
4212 else
4213 CC_STATUS_INIT;
4214 if (cc_status.value2 != 0
4215 && ADDRESS_REG_P (cc_status.value2)
4216 && GET_MODE (cc_status.value2) == QImode)
4217 CC_STATUS_INIT;
4218 if (cc_status.value2 != 0)
4219 switch (GET_CODE (cc_status.value2))
4220 {
4221 case ASHIFT: case ASHIFTRT: case LSHIFTRT:
4222 case ROTATE: case ROTATERT:
4223 /* These instructions always clear the overflow bit, and set
4224 the carry to the bit shifted out. */
4225 cc_status.flags |= CC_OVERFLOW_UNUSABLE | CC_NO_CARRY;
4226 break;
4227
4228 case PLUS: case MINUS: case MULT:
4229 case DIV: case UDIV: case MOD: case UMOD: case NEG:
4230 if (GET_MODE (cc_status.value2) != VOIDmode)
4231 cc_status.flags |= CC_NO_OVERFLOW;
4232 break;
4233 case ZERO_EXTEND:
4234 /* (SET r1 (ZERO_EXTEND r2)) on this machine
4235 ends with a move insn moving r2 in r2's mode.
4236 Thus, the cc's are set for r2.
4237 This can set N bit spuriously. */
4238 cc_status.flags |= CC_NOT_NEGATIVE;
4239
4240 default:
4241 break;
4242 }
4243 if (cc_status.value1 && GET_CODE (cc_status.value1) == REG
4244 && cc_status.value2
4245 && reg_overlap_mentioned_p (cc_status.value1, cc_status.value2))
4246 cc_status.value2 = 0;
4247 /* Check for PRE_DEC in dest modifying a register used in src. */
4248 if (cc_status.value1 && GET_CODE (cc_status.value1) == MEM
4249 && GET_CODE (XEXP (cc_status.value1, 0)) == PRE_DEC
4250 && cc_status.value2
4251 && reg_overlap_mentioned_p (XEXP (XEXP (cc_status.value1, 0), 0),
4252 cc_status.value2))
4253 cc_status.value2 = 0;
4254 if (((cc_status.value1 && FP_REG_P (cc_status.value1))
4255 || (cc_status.value2 && FP_REG_P (cc_status.value2))))
4256 cc_status.flags = CC_IN_68881;
4257 if (cc_status.value2 && GET_CODE (cc_status.value2) == COMPARE
4258 && GET_MODE_CLASS (GET_MODE (XEXP (cc_status.value2, 0))) == MODE_FLOAT)
4259 {
4260 cc_status.flags = CC_IN_68881;
4261 if (!FP_REG_P (XEXP (cc_status.value2, 0))
4262 && FP_REG_P (XEXP (cc_status.value2, 1)))
4263 cc_status.flags |= CC_REVERSED;
4264 }
4265 }
4266 \f
4267 const char *
4268 output_move_const_double (rtx *operands)
4269 {
4270 int code = standard_68881_constant_p (operands[1]);
4271
4272 if (code != 0)
4273 {
4274 static char buf[40];
4275
4276 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
4277 return buf;
4278 }
4279 return "fmove%.d %1,%0";
4280 }
4281
4282 const char *
4283 output_move_const_single (rtx *operands)
4284 {
4285 int code = standard_68881_constant_p (operands[1]);
4286
4287 if (code != 0)
4288 {
4289 static char buf[40];
4290
4291 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
4292 return buf;
4293 }
4294 return "fmove%.s %f1,%0";
4295 }
4296
4297 /* Return nonzero if X, a CONST_DOUBLE, has a value that we can get
4298 from the "fmovecr" instruction.
4299 The value, anded with 0xff, gives the code to use in fmovecr
4300 to get the desired constant. */
4301
4302 /* This code has been fixed for cross-compilation. */
4303
4304 static int inited_68881_table = 0;
4305
4306 static const char *const strings_68881[7] = {
4307 "0.0",
4308 "1.0",
4309 "10.0",
4310 "100.0",
4311 "10000.0",
4312 "1e8",
4313 "1e16"
4314 };
4315
4316 static const int codes_68881[7] = {
4317 0x0f,
4318 0x32,
4319 0x33,
4320 0x34,
4321 0x35,
4322 0x36,
4323 0x37
4324 };
4325
4326 REAL_VALUE_TYPE values_68881[7];
4327
4328 /* Set up values_68881 array by converting the decimal values
4329 strings_68881 to binary. */
4330
4331 void
4332 init_68881_table (void)
4333 {
4334 int i;
4335 REAL_VALUE_TYPE r;
4336 machine_mode mode;
4337
4338 mode = SFmode;
4339 for (i = 0; i < 7; i++)
4340 {
4341 if (i == 6)
4342 mode = DFmode;
4343 r = REAL_VALUE_ATOF (strings_68881[i], mode);
4344 values_68881[i] = r;
4345 }
4346 inited_68881_table = 1;
4347 }
4348
4349 int
4350 standard_68881_constant_p (rtx x)
4351 {
4352 const REAL_VALUE_TYPE *r;
4353 int i;
4354
4355 /* fmovecr must be emulated on the 68040 and 68060, so it shouldn't be
4356 used at all on those chips. */
4357 if (TUNE_68040_60)
4358 return 0;
4359
4360 if (! inited_68881_table)
4361 init_68881_table ();
4362
4363 r = CONST_DOUBLE_REAL_VALUE (x);
4364
4365 /* Use real_identical instead of real_equal so that -0.0 is rejected. */
4366 for (i = 0; i < 6; i++)
4367 {
4368 if (real_identical (r, &values_68881[i]))
4369 return (codes_68881[i]);
4370 }
4371
4372 if (GET_MODE (x) == SFmode)
4373 return 0;
4374
4375 if (real_equal (r, &values_68881[6]))
4376 return (codes_68881[6]);
4377
4378 /* larger powers of ten in the constants ram are not used
4379 because they are not equal to a `double' C constant. */
4380 return 0;
4381 }
4382
4383 /* If X is a floating-point constant, return the logarithm of X base 2,
4384 or 0 if X is not a power of 2. */
4385
4386 int
4387 floating_exact_log2 (rtx x)
4388 {
4389 const REAL_VALUE_TYPE *r;
4390 REAL_VALUE_TYPE r1;
4391 int exp;
4392
4393 r = CONST_DOUBLE_REAL_VALUE (x);
4394
4395 if (real_less (r, &dconst1))
4396 return 0;
4397
4398 exp = real_exponent (r);
4399 real_2expN (&r1, exp, DFmode);
4400 if (real_equal (&r1, r))
4401 return exp;
4402
4403 return 0;
4404 }
4405 \f
4406 /* A C compound statement to output to stdio stream STREAM the
4407 assembler syntax for an instruction operand X. X is an RTL
4408 expression.
4409
4410 CODE is a value that can be used to specify one of several ways
4411 of printing the operand. It is used when identical operands
4412 must be printed differently depending on the context. CODE
4413 comes from the `%' specification that was used to request
4414 printing of the operand. If the specification was just `%DIGIT'
4415 then CODE is 0; if the specification was `%LTR DIGIT' then CODE
4416 is the ASCII code for LTR.
4417
4418 If X is a register, this macro should print the register's name.
4419 The names can be found in an array `reg_names' whose type is
4420 `char *[]'. `reg_names' is initialized from `REGISTER_NAMES'.
4421
4422 When the machine description has a specification `%PUNCT' (a `%'
4423 followed by a punctuation character), this macro is called with
4424 a null pointer for X and the punctuation character for CODE.
4425
4426 The m68k specific codes are:
4427
4428 '.' for dot needed in Motorola-style opcode names.
4429 '-' for an operand pushing on the stack:
4430 sp@-, -(sp) or -(%sp) depending on the style of syntax.
4431 '+' for an operand pushing on the stack:
4432 sp@+, (sp)+ or (%sp)+ depending on the style of syntax.
4433 '@' for a reference to the top word on the stack:
4434 sp@, (sp) or (%sp) depending on the style of syntax.
4435 '#' for an immediate operand prefix (# in MIT and Motorola syntax
4436 but & in SGS syntax).
4437 '!' for the cc register (used in an `and to cc' insn).
4438 '$' for the letter `s' in an op code, but only on the 68040.
4439 '&' for the letter `d' in an op code, but only on the 68040.
4440 '/' for register prefix needed by longlong.h.
4441 '?' for m68k_library_id_string
4442
4443 'b' for byte insn (no effect, on the Sun; this is for the ISI).
4444 'd' to force memory addressing to be absolute, not relative.
4445 'f' for float insn (print a CONST_DOUBLE as a float rather than in hex)
4446 'x' for float insn (print a CONST_DOUBLE as a float rather than in hex),
4447 or print pair of registers as rx:ry.
4448 'p' print an address with @PLTPC attached, but only if the operand
4449 is not locally-bound. */
4450
4451 void
4452 print_operand (FILE *file, rtx op, int letter)
4453 {
4454 if (op != NULL_RTX)
4455 m68k_adjust_decorated_operand (op);
4456
4457 if (letter == '.')
4458 {
4459 if (MOTOROLA)
4460 fprintf (file, ".");
4461 }
4462 else if (letter == '#')
4463 asm_fprintf (file, "%I");
4464 else if (letter == '-')
4465 asm_fprintf (file, MOTOROLA ? "-(%Rsp)" : "%Rsp@-");
4466 else if (letter == '+')
4467 asm_fprintf (file, MOTOROLA ? "(%Rsp)+" : "%Rsp@+");
4468 else if (letter == '@')
4469 asm_fprintf (file, MOTOROLA ? "(%Rsp)" : "%Rsp@");
4470 else if (letter == '!')
4471 asm_fprintf (file, "%Rfpcr");
4472 else if (letter == '$')
4473 {
4474 if (TARGET_68040)
4475 fprintf (file, "s");
4476 }
4477 else if (letter == '&')
4478 {
4479 if (TARGET_68040)
4480 fprintf (file, "d");
4481 }
4482 else if (letter == '/')
4483 asm_fprintf (file, "%R");
4484 else if (letter == '?')
4485 asm_fprintf (file, m68k_library_id_string);
4486 else if (letter == 'p')
4487 {
4488 output_addr_const (file, op);
4489 if (!(GET_CODE (op) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op)))
4490 fprintf (file, "@PLTPC");
4491 }
4492 else if (GET_CODE (op) == REG)
4493 {
4494 if (letter == 'R')
4495 /* Print out the second register name of a register pair.
4496 I.e., R (6) => 7. */
4497 fputs (M68K_REGNAME(REGNO (op) + 1), file);
4498 else
4499 fputs (M68K_REGNAME(REGNO (op)), file);
4500 }
4501 else if (GET_CODE (op) == MEM)
4502 {
4503 output_address (GET_MODE (op), XEXP (op, 0));
4504 if (letter == 'd' && ! TARGET_68020
4505 && CONSTANT_ADDRESS_P (XEXP (op, 0))
4506 && !(GET_CODE (XEXP (op, 0)) == CONST_INT
4507 && INTVAL (XEXP (op, 0)) < 0x8000
4508 && INTVAL (XEXP (op, 0)) >= -0x8000))
4509 fprintf (file, MOTOROLA ? ".l" : ":l");
4510 }
4511 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == SFmode)
4512 {
4513 long l;
4514 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), l);
4515 asm_fprintf (file, "%I0x%lx", l & 0xFFFFFFFF);
4516 }
4517 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == XFmode)
4518 {
4519 long l[3];
4520 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
4521 asm_fprintf (file, "%I0x%lx%08lx%08lx", l[0] & 0xFFFFFFFF,
4522 l[1] & 0xFFFFFFFF, l[2] & 0xFFFFFFFF);
4523 }
4524 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == DFmode)
4525 {
4526 long l[2];
4527 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
4528 asm_fprintf (file, "%I0x%lx%08lx", l[0] & 0xFFFFFFFF, l[1] & 0xFFFFFFFF);
4529 }
4530 else
4531 {
4532 /* Use `print_operand_address' instead of `output_addr_const'
4533 to ensure that we print relevant PIC stuff. */
4534 asm_fprintf (file, "%I");
4535 if (TARGET_PCREL
4536 && (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST))
4537 print_operand_address (file, op);
4538 else
4539 output_addr_const (file, op);
4540 }
4541 }
4542
4543 /* Return string for TLS relocation RELOC. */
4544
4545 static const char *
4546 m68k_get_reloc_decoration (enum m68k_reloc reloc)
4547 {
4548 /* To my knowledge, !MOTOROLA assemblers don't support TLS. */
4549 gcc_assert (MOTOROLA || reloc == RELOC_GOT);
4550
4551 switch (reloc)
4552 {
4553 case RELOC_GOT:
4554 if (MOTOROLA)
4555 {
4556 if (flag_pic == 1 && TARGET_68020)
4557 return "@GOT.w";
4558 else
4559 return "@GOT";
4560 }
4561 else
4562 {
4563 if (TARGET_68020)
4564 {
4565 switch (flag_pic)
4566 {
4567 case 1:
4568 return ":w";
4569 case 2:
4570 return ":l";
4571 default:
4572 return "";
4573 }
4574 }
4575 }
4576 gcc_unreachable ();
4577
4578 case RELOC_TLSGD:
4579 return "@TLSGD";
4580
4581 case RELOC_TLSLDM:
4582 return "@TLSLDM";
4583
4584 case RELOC_TLSLDO:
4585 return "@TLSLDO";
4586
4587 case RELOC_TLSIE:
4588 return "@TLSIE";
4589
4590 case RELOC_TLSLE:
4591 return "@TLSLE";
4592
4593 default:
4594 gcc_unreachable ();
4595 }
4596 }
4597
4598 /* m68k implementation of TARGET_OUTPUT_ADDR_CONST_EXTRA. */
4599
4600 static bool
4601 m68k_output_addr_const_extra (FILE *file, rtx x)
4602 {
4603 if (GET_CODE (x) == UNSPEC)
4604 {
4605 switch (XINT (x, 1))
4606 {
4607 case UNSPEC_RELOC16:
4608 case UNSPEC_RELOC32:
4609 output_addr_const (file, XVECEXP (x, 0, 0));
4610 fputs (m68k_get_reloc_decoration
4611 ((enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1))), file);
4612 return true;
4613
4614 default:
4615 break;
4616 }
4617 }
4618
4619 return false;
4620 }
4621
4622 /* M68K implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
4623
4624 static void
4625 m68k_output_dwarf_dtprel (FILE *file, int size, rtx x)
4626 {
4627 gcc_assert (size == 4);
4628 fputs ("\t.long\t", file);
4629 output_addr_const (file, x);
4630 fputs ("@TLSLDO+0x8000", file);
4631 }
4632
4633 /* In the name of slightly smaller debug output, and to cater to
4634 general assembler lossage, recognize various UNSPEC sequences
4635 and turn them back into a direct symbol reference. */
4636
4637 static rtx
4638 m68k_delegitimize_address (rtx orig_x)
4639 {
4640 rtx x;
4641 struct m68k_address addr;
4642 rtx unspec;
4643
4644 orig_x = delegitimize_mem_from_attrs (orig_x);
4645 x = orig_x;
4646 if (MEM_P (x))
4647 x = XEXP (x, 0);
4648
4649 if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
4650 return orig_x;
4651
4652 if (!m68k_decompose_address (GET_MODE (x), x, false, &addr)
4653 || addr.offset == NULL_RTX
4654 || GET_CODE (addr.offset) != CONST)
4655 return orig_x;
4656
4657 unspec = XEXP (addr.offset, 0);
4658 if (GET_CODE (unspec) == PLUS && CONST_INT_P (XEXP (unspec, 1)))
4659 unspec = XEXP (unspec, 0);
4660 if (GET_CODE (unspec) != UNSPEC
4661 || (XINT (unspec, 1) != UNSPEC_RELOC16
4662 && XINT (unspec, 1) != UNSPEC_RELOC32))
4663 return orig_x;
4664 x = XVECEXP (unspec, 0, 0);
4665 gcc_assert (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF);
4666 if (unspec != XEXP (addr.offset, 0))
4667 x = gen_rtx_PLUS (Pmode, x, XEXP (XEXP (addr.offset, 0), 1));
4668 if (addr.index)
4669 {
4670 rtx idx = addr.index;
4671 if (addr.scale != 1)
4672 idx = gen_rtx_MULT (Pmode, idx, GEN_INT (addr.scale));
4673 x = gen_rtx_PLUS (Pmode, idx, x);
4674 }
4675 if (addr.base)
4676 x = gen_rtx_PLUS (Pmode, addr.base, x);
4677 if (MEM_P (orig_x))
4678 x = replace_equiv_address_nv (orig_x, x);
4679 return x;
4680 }
4681
4682 \f
4683 /* A C compound statement to output to stdio stream STREAM the
4684 assembler syntax for an instruction operand that is a memory
4685 reference whose address is ADDR. ADDR is an RTL expression.
4686
4687 Note that this contains a kludge that knows that the only reason
4688 we have an address (plus (label_ref...) (reg...)) when not generating
4689 PIC code is in the insn before a tablejump, and we know that m68k.md
4690 generates a label LInnn: on such an insn.
4691
4692 It is possible for PIC to generate a (plus (label_ref...) (reg...))
4693 and we handle that just like we would a (plus (symbol_ref...) (reg...)).
4694
4695 This routine is responsible for distinguishing between -fpic and -fPIC
4696 style relocations in an address. When generating -fpic code the
4697 offset is output in word mode (e.g. movel a5@(_foo:w), a0). When generating
4698 -fPIC code the offset is output in long mode (e.g. movel a5@(_foo:l), a0) */
4699
4700 void
4701 print_operand_address (FILE *file, rtx addr)
4702 {
4703 struct m68k_address address;
4704
4705 m68k_adjust_decorated_operand (addr);
4706
4707 if (!m68k_decompose_address (QImode, addr, true, &address))
4708 gcc_unreachable ();
4709
4710 if (address.code == PRE_DEC)
4711 fprintf (file, MOTOROLA ? "-(%s)" : "%s@-",
4712 M68K_REGNAME (REGNO (address.base)));
4713 else if (address.code == POST_INC)
4714 fprintf (file, MOTOROLA ? "(%s)+" : "%s@+",
4715 M68K_REGNAME (REGNO (address.base)));
4716 else if (!address.base && !address.index)
4717 {
4718 /* A constant address. */
4719 gcc_assert (address.offset == addr);
4720 if (GET_CODE (addr) == CONST_INT)
4721 {
4722 /* (xxx).w or (xxx).l. */
4723 if (IN_RANGE (INTVAL (addr), -0x8000, 0x7fff))
4724 fprintf (file, MOTOROLA ? "%d.w" : "%d:w", (int) INTVAL (addr));
4725 else
4726 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (addr));
4727 }
4728 else if (TARGET_PCREL)
4729 {
4730 /* (d16,PC) or (bd,PC,Xn) (with suppressed index register). */
4731 fputc ('(', file);
4732 output_addr_const (file, addr);
4733 asm_fprintf (file, flag_pic == 1 ? ":w,%Rpc)" : ":l,%Rpc)");
4734 }
4735 else
4736 {
4737 /* (xxx).l. We need a special case for SYMBOL_REF if the symbol
4738 name ends in `.<letter>', as the last 2 characters can be
4739 mistaken as a size suffix. Put the name in parentheses. */
4740 if (GET_CODE (addr) == SYMBOL_REF
4741 && strlen (XSTR (addr, 0)) > 2
4742 && XSTR (addr, 0)[strlen (XSTR (addr, 0)) - 2] == '.')
4743 {
4744 putc ('(', file);
4745 output_addr_const (file, addr);
4746 putc (')', file);
4747 }
4748 else
4749 output_addr_const (file, addr);
4750 }
4751 }
4752 else
4753 {
4754 int labelno;
4755
4756 /* If ADDR is a (d8,pc,Xn) address, this is the number of the
4757 label being accessed, otherwise it is -1. */
4758 labelno = (address.offset
4759 && !address.base
4760 && GET_CODE (address.offset) == LABEL_REF
4761 ? CODE_LABEL_NUMBER (XEXP (address.offset, 0))
4762 : -1);
4763 if (MOTOROLA)
4764 {
4765 /* Print the "offset(base" component. */
4766 if (labelno >= 0)
4767 asm_fprintf (file, "%LL%d(%Rpc,", labelno);
4768 else
4769 {
4770 if (address.offset)
4771 output_addr_const (file, address.offset);
4772
4773 putc ('(', file);
4774 if (address.base)
4775 fputs (M68K_REGNAME (REGNO (address.base)), file);
4776 }
4777 /* Print the ",index" component, if any. */
4778 if (address.index)
4779 {
4780 if (address.base)
4781 putc (',', file);
4782 fprintf (file, "%s.%c",
4783 M68K_REGNAME (REGNO (address.index)),
4784 GET_MODE (address.index) == HImode ? 'w' : 'l');
4785 if (address.scale != 1)
4786 fprintf (file, "*%d", address.scale);
4787 }
4788 putc (')', file);
4789 }
4790 else /* !MOTOROLA */
4791 {
4792 if (!address.offset && !address.index)
4793 fprintf (file, "%s@", M68K_REGNAME (REGNO (address.base)));
4794 else
4795 {
4796 /* Print the "base@(offset" component. */
4797 if (labelno >= 0)
4798 asm_fprintf (file, "%Rpc@(%LL%d", labelno);
4799 else
4800 {
4801 if (address.base)
4802 fputs (M68K_REGNAME (REGNO (address.base)), file);
4803 fprintf (file, "@(");
4804 if (address.offset)
4805 output_addr_const (file, address.offset);
4806 }
4807 /* Print the ",index" component, if any. */
4808 if (address.index)
4809 {
4810 fprintf (file, ",%s:%c",
4811 M68K_REGNAME (REGNO (address.index)),
4812 GET_MODE (address.index) == HImode ? 'w' : 'l');
4813 if (address.scale != 1)
4814 fprintf (file, ":%d", address.scale);
4815 }
4816 putc (')', file);
4817 }
4818 }
4819 }
4820 }
4821 \f
4822 /* Check for cases where a clr insns can be omitted from code using
4823 strict_low_part sets. For example, the second clrl here is not needed:
4824 clrl d0; movw a0@+,d0; use d0; clrl d0; movw a0@+; use d0; ...
4825
4826 MODE is the mode of this STRICT_LOW_PART set. FIRST_INSN is the clear
4827 insn we are checking for redundancy. TARGET is the register set by the
4828 clear insn. */
4829
4830 bool
4831 strict_low_part_peephole_ok (machine_mode mode, rtx_insn *first_insn,
4832 rtx target)
4833 {
4834 rtx_insn *p = first_insn;
4835
4836 while ((p = PREV_INSN (p)))
4837 {
4838 if (NOTE_INSN_BASIC_BLOCK_P (p))
4839 return false;
4840
4841 if (NOTE_P (p))
4842 continue;
4843
4844 /* If it isn't an insn, then give up. */
4845 if (!INSN_P (p))
4846 return false;
4847
4848 if (reg_set_p (target, p))
4849 {
4850 rtx set = single_set (p);
4851 rtx dest;
4852
4853 /* If it isn't an easy to recognize insn, then give up. */
4854 if (! set)
4855 return false;
4856
4857 dest = SET_DEST (set);
4858
4859 /* If this sets the entire target register to zero, then our
4860 first_insn is redundant. */
4861 if (rtx_equal_p (dest, target)
4862 && SET_SRC (set) == const0_rtx)
4863 return true;
4864 else if (GET_CODE (dest) == STRICT_LOW_PART
4865 && GET_CODE (XEXP (dest, 0)) == REG
4866 && REGNO (XEXP (dest, 0)) == REGNO (target)
4867 && (GET_MODE_SIZE (GET_MODE (XEXP (dest, 0)))
4868 <= GET_MODE_SIZE (mode)))
4869 /* This is a strict low part set which modifies less than
4870 we are using, so it is safe. */
4871 ;
4872 else
4873 return false;
4874 }
4875 }
4876
4877 return false;
4878 }
4879
4880 /* Operand predicates for implementing asymmetric pc-relative addressing
4881 on m68k. The m68k supports pc-relative addressing (mode 7, register 2)
4882 when used as a source operand, but not as a destination operand.
4883
4884 We model this by restricting the meaning of the basic predicates
4885 (general_operand, memory_operand, etc) to forbid the use of this
4886 addressing mode, and then define the following predicates that permit
4887 this addressing mode. These predicates can then be used for the
4888 source operands of the appropriate instructions.
4889
4890 n.b. While it is theoretically possible to change all machine patterns
4891 to use this addressing more where permitted by the architecture,
4892 it has only been implemented for "common" cases: SImode, HImode, and
4893 QImode operands, and only for the principle operations that would
4894 require this addressing mode: data movement and simple integer operations.
4895
4896 In parallel with these new predicates, two new constraint letters
4897 were defined: 'S' and 'T'. 'S' is the -mpcrel analog of 'm'.
4898 'T' replaces 's' in the non-pcrel case. It is a no-op in the pcrel case.
4899 In the pcrel case 's' is only valid in combination with 'a' registers.
4900 See addsi3, subsi3, cmpsi, and movsi patterns for a better understanding
4901 of how these constraints are used.
4902
4903 The use of these predicates is strictly optional, though patterns that
4904 don't will cause an extra reload register to be allocated where one
4905 was not necessary:
4906
4907 lea (abc:w,%pc),%a0 ; need to reload address
4908 moveq &1,%d1 ; since write to pc-relative space
4909 movel %d1,%a0@ ; is not allowed
4910 ...
4911 lea (abc:w,%pc),%a1 ; no need to reload address here
4912 movel %a1@,%d0 ; since "movel (abc:w,%pc),%d0" is ok
4913
4914 For more info, consult tiemann@cygnus.com.
4915
4916
4917 All of the ugliness with predicates and constraints is due to the
4918 simple fact that the m68k does not allow a pc-relative addressing
4919 mode as a destination. gcc does not distinguish between source and
4920 destination addresses. Hence, if we claim that pc-relative address
4921 modes are valid, e.g. TARGET_LEGITIMATE_ADDRESS_P accepts them, then we
4922 end up with invalid code. To get around this problem, we left
4923 pc-relative modes as invalid addresses, and then added special
4924 predicates and constraints to accept them.
4925
4926 A cleaner way to handle this is to modify gcc to distinguish
4927 between source and destination addresses. We can then say that
4928 pc-relative is a valid source address but not a valid destination
4929 address, and hopefully avoid a lot of the predicate and constraint
4930 hackery. Unfortunately, this would be a pretty big change. It would
4931 be a useful change for a number of ports, but there aren't any current
4932 plans to undertake this.
4933
4934 ***************************************************************************/
4935
4936
4937 const char *
4938 output_andsi3 (rtx *operands)
4939 {
4940 int logval;
4941 if (GET_CODE (operands[2]) == CONST_INT
4942 && (INTVAL (operands[2]) | 0xffff) == -1
4943 && (DATA_REG_P (operands[0])
4944 || offsettable_memref_p (operands[0]))
4945 && !TARGET_COLDFIRE)
4946 {
4947 if (GET_CODE (operands[0]) != REG)
4948 operands[0] = adjust_address (operands[0], HImode, 2);
4949 operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffff);
4950 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4951 CC_STATUS_INIT;
4952 if (operands[2] == const0_rtx)
4953 return "clr%.w %0";
4954 return "and%.w %2,%0";
4955 }
4956 if (GET_CODE (operands[2]) == CONST_INT
4957 && (logval = exact_log2 (~ INTVAL (operands[2]) & 0xffffffff)) >= 0
4958 && (DATA_REG_P (operands[0])
4959 || offsettable_memref_p (operands[0])))
4960 {
4961 if (DATA_REG_P (operands[0]))
4962 operands[1] = GEN_INT (logval);
4963 else
4964 {
4965 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
4966 operands[1] = GEN_INT (logval % 8);
4967 }
4968 /* This does not set condition codes in a standard way. */
4969 CC_STATUS_INIT;
4970 return "bclr %1,%0";
4971 }
4972 return "and%.l %2,%0";
4973 }
4974
4975 const char *
4976 output_iorsi3 (rtx *operands)
4977 {
4978 register int logval;
4979 if (GET_CODE (operands[2]) == CONST_INT
4980 && INTVAL (operands[2]) >> 16 == 0
4981 && (DATA_REG_P (operands[0])
4982 || offsettable_memref_p (operands[0]))
4983 && !TARGET_COLDFIRE)
4984 {
4985 if (GET_CODE (operands[0]) != REG)
4986 operands[0] = adjust_address (operands[0], HImode, 2);
4987 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4988 CC_STATUS_INIT;
4989 if (INTVAL (operands[2]) == 0xffff)
4990 return "mov%.w %2,%0";
4991 return "or%.w %2,%0";
4992 }
4993 if (GET_CODE (operands[2]) == CONST_INT
4994 && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
4995 && (DATA_REG_P (operands[0])
4996 || offsettable_memref_p (operands[0])))
4997 {
4998 if (DATA_REG_P (operands[0]))
4999 operands[1] = GEN_INT (logval);
5000 else
5001 {
5002 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
5003 operands[1] = GEN_INT (logval % 8);
5004 }
5005 CC_STATUS_INIT;
5006 return "bset %1,%0";
5007 }
5008 return "or%.l %2,%0";
5009 }
5010
5011 const char *
5012 output_xorsi3 (rtx *operands)
5013 {
5014 register int logval;
5015 if (GET_CODE (operands[2]) == CONST_INT
5016 && INTVAL (operands[2]) >> 16 == 0
5017 && (offsettable_memref_p (operands[0]) || DATA_REG_P (operands[0]))
5018 && !TARGET_COLDFIRE)
5019 {
5020 if (! DATA_REG_P (operands[0]))
5021 operands[0] = adjust_address (operands[0], HImode, 2);
5022 /* Do not delete a following tstl %0 insn; that would be incorrect. */
5023 CC_STATUS_INIT;
5024 if (INTVAL (operands[2]) == 0xffff)
5025 return "not%.w %0";
5026 return "eor%.w %2,%0";
5027 }
5028 if (GET_CODE (operands[2]) == CONST_INT
5029 && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
5030 && (DATA_REG_P (operands[0])
5031 || offsettable_memref_p (operands[0])))
5032 {
5033 if (DATA_REG_P (operands[0]))
5034 operands[1] = GEN_INT (logval);
5035 else
5036 {
5037 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
5038 operands[1] = GEN_INT (logval % 8);
5039 }
5040 CC_STATUS_INIT;
5041 return "bchg %1,%0";
5042 }
5043 return "eor%.l %2,%0";
5044 }
5045
5046 /* Return the instruction that should be used for a call to address X,
5047 which is known to be in operand 0. */
5048
5049 const char *
5050 output_call (rtx x)
5051 {
5052 if (symbolic_operand (x, VOIDmode))
5053 return m68k_symbolic_call;
5054 else
5055 return "jsr %a0";
5056 }
5057
5058 /* Likewise sibling calls. */
5059
5060 const char *
5061 output_sibcall (rtx x)
5062 {
5063 if (symbolic_operand (x, VOIDmode))
5064 return m68k_symbolic_jump;
5065 else
5066 return "jmp %a0";
5067 }
5068
5069 static void
5070 m68k_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
5071 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
5072 tree function)
5073 {
5074 rtx this_slot, offset, addr, mem, tmp;
5075 rtx_insn *insn;
5076
5077 /* Avoid clobbering the struct value reg by using the
5078 static chain reg as a temporary. */
5079 tmp = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
5080
5081 /* Pretend to be a post-reload pass while generating rtl. */
5082 reload_completed = 1;
5083
5084 /* The "this" pointer is stored at 4(%sp). */
5085 this_slot = gen_rtx_MEM (Pmode, plus_constant (Pmode,
5086 stack_pointer_rtx, 4));
5087
5088 /* Add DELTA to THIS. */
5089 if (delta != 0)
5090 {
5091 /* Make the offset a legitimate operand for memory addition. */
5092 offset = GEN_INT (delta);
5093 if ((delta < -8 || delta > 8)
5094 && (TARGET_COLDFIRE || USE_MOVQ (delta)))
5095 {
5096 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), offset);
5097 offset = gen_rtx_REG (Pmode, D0_REG);
5098 }
5099 emit_insn (gen_add3_insn (copy_rtx (this_slot),
5100 copy_rtx (this_slot), offset));
5101 }
5102
5103 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
5104 if (vcall_offset != 0)
5105 {
5106 /* Set the static chain register to *THIS. */
5107 emit_move_insn (tmp, this_slot);
5108 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
5109
5110 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
5111 addr = plus_constant (Pmode, tmp, vcall_offset);
5112 if (!m68k_legitimate_address_p (Pmode, addr, true))
5113 {
5114 emit_insn (gen_rtx_SET (tmp, addr));
5115 addr = tmp;
5116 }
5117
5118 /* Load the offset into %d0 and add it to THIS. */
5119 emit_move_insn (gen_rtx_REG (Pmode, D0_REG),
5120 gen_rtx_MEM (Pmode, addr));
5121 emit_insn (gen_add3_insn (copy_rtx (this_slot),
5122 copy_rtx (this_slot),
5123 gen_rtx_REG (Pmode, D0_REG)));
5124 }
5125
5126 /* Jump to the target function. Use a sibcall if direct jumps are
5127 allowed, otherwise load the address into a register first. */
5128 mem = DECL_RTL (function);
5129 if (!sibcall_operand (XEXP (mem, 0), VOIDmode))
5130 {
5131 gcc_assert (flag_pic);
5132
5133 if (!TARGET_SEP_DATA)
5134 {
5135 /* Use the static chain register as a temporary (call-clobbered)
5136 GOT pointer for this function. We can use the static chain
5137 register because it isn't live on entry to the thunk. */
5138 SET_REGNO (pic_offset_table_rtx, STATIC_CHAIN_REGNUM);
5139 emit_insn (gen_load_got (pic_offset_table_rtx));
5140 }
5141 legitimize_pic_address (XEXP (mem, 0), Pmode, tmp);
5142 mem = replace_equiv_address (mem, tmp);
5143 }
5144 insn = emit_call_insn (gen_sibcall (mem, const0_rtx));
5145 SIBLING_CALL_P (insn) = 1;
5146
5147 /* Run just enough of rest_of_compilation. */
5148 insn = get_insns ();
5149 split_all_insns_noflow ();
5150 final_start_function (insn, file, 1);
5151 final (insn, file, 1);
5152 final_end_function ();
5153
5154 /* Clean up the vars set above. */
5155 reload_completed = 0;
5156
5157 /* Restore the original PIC register. */
5158 if (flag_pic)
5159 SET_REGNO (pic_offset_table_rtx, PIC_REG);
5160 }
5161
5162 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
5163
5164 static rtx
5165 m68k_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
5166 int incoming ATTRIBUTE_UNUSED)
5167 {
5168 return gen_rtx_REG (Pmode, M68K_STRUCT_VALUE_REGNUM);
5169 }
5170
5171 /* Return nonzero if register old_reg can be renamed to register new_reg. */
5172 int
5173 m68k_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
5174 unsigned int new_reg)
5175 {
5176
5177 /* Interrupt functions can only use registers that have already been
5178 saved by the prologue, even if they would normally be
5179 call-clobbered. */
5180
5181 if ((m68k_get_function_kind (current_function_decl)
5182 == m68k_fk_interrupt_handler)
5183 && !df_regs_ever_live_p (new_reg))
5184 return 0;
5185
5186 return 1;
5187 }
5188
5189 /* Implement TARGET_HARD_REGNO_NREGS.
5190
5191 On the m68k, ordinary registers hold 32 bits worth;
5192 for the 68881 registers, a single register is always enough for
5193 anything that can be stored in them at all. */
5194
5195 static unsigned int
5196 m68k_hard_regno_nregs (unsigned int regno, machine_mode mode)
5197 {
5198 if (regno >= 16)
5199 return GET_MODE_NUNITS (mode);
5200 return CEIL (GET_MODE_SIZE (mode), UNITS_PER_WORD);
5201 }
5202
5203 /* Implement TARGET_HARD_REGNO_MODE_OK. On the 68000, we let the cpu
5204 registers can hold any mode, but restrict the 68881 registers to
5205 floating-point modes. */
5206
5207 static bool
5208 m68k_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
5209 {
5210 if (DATA_REGNO_P (regno))
5211 {
5212 /* Data Registers, can hold aggregate if fits in. */
5213 if (regno + GET_MODE_SIZE (mode) / 4 <= 8)
5214 return true;
5215 }
5216 else if (ADDRESS_REGNO_P (regno))
5217 {
5218 if (regno + GET_MODE_SIZE (mode) / 4 <= 16)
5219 return true;
5220 }
5221 else if (FP_REGNO_P (regno))
5222 {
5223 /* FPU registers, hold float or complex float of long double or
5224 smaller. */
5225 if ((GET_MODE_CLASS (mode) == MODE_FLOAT
5226 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5227 && GET_MODE_UNIT_SIZE (mode) <= TARGET_FP_REG_SIZE)
5228 return true;
5229 }
5230 return false;
5231 }
5232
5233 /* Implement TARGET_MODES_TIEABLE_P. */
5234
5235 static bool
5236 m68k_modes_tieable_p (machine_mode mode1, machine_mode mode2)
5237 {
5238 return (!TARGET_HARD_FLOAT
5239 || ((GET_MODE_CLASS (mode1) == MODE_FLOAT
5240 || GET_MODE_CLASS (mode1) == MODE_COMPLEX_FLOAT)
5241 == (GET_MODE_CLASS (mode2) == MODE_FLOAT
5242 || GET_MODE_CLASS (mode2) == MODE_COMPLEX_FLOAT)));
5243 }
5244
5245 /* Implement SECONDARY_RELOAD_CLASS. */
5246
5247 enum reg_class
5248 m68k_secondary_reload_class (enum reg_class rclass,
5249 machine_mode mode, rtx x)
5250 {
5251 int regno;
5252
5253 regno = true_regnum (x);
5254
5255 /* If one operand of a movqi is an address register, the other
5256 operand must be a general register or constant. Other types
5257 of operand must be reloaded through a data register. */
5258 if (GET_MODE_SIZE (mode) == 1
5259 && reg_classes_intersect_p (rclass, ADDR_REGS)
5260 && !(INT_REGNO_P (regno) || CONSTANT_P (x)))
5261 return DATA_REGS;
5262
5263 /* PC-relative addresses must be loaded into an address register first. */
5264 if (TARGET_PCREL
5265 && !reg_class_subset_p (rclass, ADDR_REGS)
5266 && symbolic_operand (x, VOIDmode))
5267 return ADDR_REGS;
5268
5269 return NO_REGS;
5270 }
5271
5272 /* Implement PREFERRED_RELOAD_CLASS. */
5273
5274 enum reg_class
5275 m68k_preferred_reload_class (rtx x, enum reg_class rclass)
5276 {
5277 enum reg_class secondary_class;
5278
5279 /* If RCLASS might need a secondary reload, try restricting it to
5280 a class that doesn't. */
5281 secondary_class = m68k_secondary_reload_class (rclass, GET_MODE (x), x);
5282 if (secondary_class != NO_REGS
5283 && reg_class_subset_p (secondary_class, rclass))
5284 return secondary_class;
5285
5286 /* Prefer to use moveq for in-range constants. */
5287 if (GET_CODE (x) == CONST_INT
5288 && reg_class_subset_p (DATA_REGS, rclass)
5289 && IN_RANGE (INTVAL (x), -0x80, 0x7f))
5290 return DATA_REGS;
5291
5292 /* ??? Do we really need this now? */
5293 if (GET_CODE (x) == CONST_DOUBLE
5294 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
5295 {
5296 if (TARGET_HARD_FLOAT && reg_class_subset_p (FP_REGS, rclass))
5297 return FP_REGS;
5298
5299 return NO_REGS;
5300 }
5301
5302 return rclass;
5303 }
5304
5305 /* Return floating point values in a 68881 register. This makes 68881 code
5306 a little bit faster. It also makes -msoft-float code incompatible with
5307 hard-float code, so people have to be careful not to mix the two.
5308 For ColdFire it was decided the ABI incompatibility is undesirable.
5309 If there is need for a hard-float ABI it is probably worth doing it
5310 properly and also passing function arguments in FP registers. */
5311 rtx
5312 m68k_libcall_value (machine_mode mode)
5313 {
5314 switch (mode) {
5315 case E_SFmode:
5316 case E_DFmode:
5317 case E_XFmode:
5318 if (TARGET_68881)
5319 return gen_rtx_REG (mode, FP0_REG);
5320 break;
5321 default:
5322 break;
5323 }
5324
5325 return gen_rtx_REG (mode, m68k_libcall_value_in_a0_p ? A0_REG : D0_REG);
5326 }
5327
5328 /* Location in which function value is returned.
5329 NOTE: Due to differences in ABIs, don't call this function directly,
5330 use FUNCTION_VALUE instead. */
5331 rtx
5332 m68k_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
5333 {
5334 machine_mode mode;
5335
5336 mode = TYPE_MODE (valtype);
5337 switch (mode) {
5338 case E_SFmode:
5339 case E_DFmode:
5340 case E_XFmode:
5341 if (TARGET_68881)
5342 return gen_rtx_REG (mode, FP0_REG);
5343 break;
5344 default:
5345 break;
5346 }
5347
5348 /* If the function returns a pointer, push that into %a0. */
5349 if (func && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (func))))
5350 /* For compatibility with the large body of existing code which
5351 does not always properly declare external functions returning
5352 pointer types, the m68k/SVR4 convention is to copy the value
5353 returned for pointer functions from a0 to d0 in the function
5354 epilogue, so that callers that have neglected to properly
5355 declare the callee can still find the correct return value in
5356 d0. */
5357 return gen_rtx_PARALLEL
5358 (mode,
5359 gen_rtvec (2,
5360 gen_rtx_EXPR_LIST (VOIDmode,
5361 gen_rtx_REG (mode, A0_REG),
5362 const0_rtx),
5363 gen_rtx_EXPR_LIST (VOIDmode,
5364 gen_rtx_REG (mode, D0_REG),
5365 const0_rtx)));
5366 else if (POINTER_TYPE_P (valtype))
5367 return gen_rtx_REG (mode, A0_REG);
5368 else
5369 return gen_rtx_REG (mode, D0_REG);
5370 }
5371
5372 /* Worker function for TARGET_RETURN_IN_MEMORY. */
5373 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
5374 static bool
5375 m68k_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
5376 {
5377 machine_mode mode = TYPE_MODE (type);
5378
5379 if (mode == BLKmode)
5380 return true;
5381
5382 /* If TYPE's known alignment is less than the alignment of MODE that
5383 would contain the structure, then return in memory. We need to
5384 do so to maintain the compatibility between code compiled with
5385 -mstrict-align and that compiled with -mno-strict-align. */
5386 if (AGGREGATE_TYPE_P (type)
5387 && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (mode))
5388 return true;
5389
5390 return false;
5391 }
5392 #endif
5393
5394 /* CPU to schedule the program for. */
5395 enum attr_cpu m68k_sched_cpu;
5396
5397 /* MAC to schedule the program for. */
5398 enum attr_mac m68k_sched_mac;
5399
5400 /* Operand type. */
5401 enum attr_op_type
5402 {
5403 /* No operand. */
5404 OP_TYPE_NONE,
5405
5406 /* Integer register. */
5407 OP_TYPE_RN,
5408
5409 /* FP register. */
5410 OP_TYPE_FPN,
5411
5412 /* Implicit mem reference (e.g. stack). */
5413 OP_TYPE_MEM1,
5414
5415 /* Memory without offset or indexing. EA modes 2, 3 and 4. */
5416 OP_TYPE_MEM234,
5417
5418 /* Memory with offset but without indexing. EA mode 5. */
5419 OP_TYPE_MEM5,
5420
5421 /* Memory with indexing. EA mode 6. */
5422 OP_TYPE_MEM6,
5423
5424 /* Memory referenced by absolute address. EA mode 7. */
5425 OP_TYPE_MEM7,
5426
5427 /* Immediate operand that doesn't require extension word. */
5428 OP_TYPE_IMM_Q,
5429
5430 /* Immediate 16 bit operand. */
5431 OP_TYPE_IMM_W,
5432
5433 /* Immediate 32 bit operand. */
5434 OP_TYPE_IMM_L
5435 };
5436
5437 /* Return type of memory ADDR_RTX refers to. */
5438 static enum attr_op_type
5439 sched_address_type (machine_mode mode, rtx addr_rtx)
5440 {
5441 struct m68k_address address;
5442
5443 if (symbolic_operand (addr_rtx, VOIDmode))
5444 return OP_TYPE_MEM7;
5445
5446 if (!m68k_decompose_address (mode, addr_rtx,
5447 reload_completed, &address))
5448 {
5449 gcc_assert (!reload_completed);
5450 /* Reload will likely fix the address to be in the register. */
5451 return OP_TYPE_MEM234;
5452 }
5453
5454 if (address.scale != 0)
5455 return OP_TYPE_MEM6;
5456
5457 if (address.base != NULL_RTX)
5458 {
5459 if (address.offset == NULL_RTX)
5460 return OP_TYPE_MEM234;
5461
5462 return OP_TYPE_MEM5;
5463 }
5464
5465 gcc_assert (address.offset != NULL_RTX);
5466
5467 return OP_TYPE_MEM7;
5468 }
5469
5470 /* Return X or Y (depending on OPX_P) operand of INSN. */
5471 static rtx
5472 sched_get_operand (rtx_insn *insn, bool opx_p)
5473 {
5474 int i;
5475
5476 if (recog_memoized (insn) < 0)
5477 gcc_unreachable ();
5478
5479 extract_constrain_insn_cached (insn);
5480
5481 if (opx_p)
5482 i = get_attr_opx (insn);
5483 else
5484 i = get_attr_opy (insn);
5485
5486 if (i >= recog_data.n_operands)
5487 return NULL;
5488
5489 return recog_data.operand[i];
5490 }
5491
5492 /* Return type of INSN's operand X (if OPX_P) or operand Y (if !OPX_P).
5493 If ADDRESS_P is true, return type of memory location operand refers to. */
5494 static enum attr_op_type
5495 sched_attr_op_type (rtx_insn *insn, bool opx_p, bool address_p)
5496 {
5497 rtx op;
5498
5499 op = sched_get_operand (insn, opx_p);
5500
5501 if (op == NULL)
5502 {
5503 gcc_assert (!reload_completed);
5504 return OP_TYPE_RN;
5505 }
5506
5507 if (address_p)
5508 return sched_address_type (QImode, op);
5509
5510 if (memory_operand (op, VOIDmode))
5511 return sched_address_type (GET_MODE (op), XEXP (op, 0));
5512
5513 if (register_operand (op, VOIDmode))
5514 {
5515 if ((!reload_completed && FLOAT_MODE_P (GET_MODE (op)))
5516 || (reload_completed && FP_REG_P (op)))
5517 return OP_TYPE_FPN;
5518
5519 return OP_TYPE_RN;
5520 }
5521
5522 if (GET_CODE (op) == CONST_INT)
5523 {
5524 int ival;
5525
5526 ival = INTVAL (op);
5527
5528 /* Check for quick constants. */
5529 switch (get_attr_type (insn))
5530 {
5531 case TYPE_ALUQ_L:
5532 if (IN_RANGE (ival, 1, 8) || IN_RANGE (ival, -8, -1))
5533 return OP_TYPE_IMM_Q;
5534
5535 gcc_assert (!reload_completed);
5536 break;
5537
5538 case TYPE_MOVEQ_L:
5539 if (USE_MOVQ (ival))
5540 return OP_TYPE_IMM_Q;
5541
5542 gcc_assert (!reload_completed);
5543 break;
5544
5545 case TYPE_MOV3Q_L:
5546 if (valid_mov3q_const (ival))
5547 return OP_TYPE_IMM_Q;
5548
5549 gcc_assert (!reload_completed);
5550 break;
5551
5552 default:
5553 break;
5554 }
5555
5556 if (IN_RANGE (ival, -0x8000, 0x7fff))
5557 return OP_TYPE_IMM_W;
5558
5559 return OP_TYPE_IMM_L;
5560 }
5561
5562 if (GET_CODE (op) == CONST_DOUBLE)
5563 {
5564 switch (GET_MODE (op))
5565 {
5566 case E_SFmode:
5567 return OP_TYPE_IMM_W;
5568
5569 case E_VOIDmode:
5570 case E_DFmode:
5571 return OP_TYPE_IMM_L;
5572
5573 default:
5574 gcc_unreachable ();
5575 }
5576 }
5577
5578 if (GET_CODE (op) == CONST
5579 || symbolic_operand (op, VOIDmode)
5580 || LABEL_P (op))
5581 {
5582 switch (GET_MODE (op))
5583 {
5584 case E_QImode:
5585 return OP_TYPE_IMM_Q;
5586
5587 case E_HImode:
5588 return OP_TYPE_IMM_W;
5589
5590 case E_SImode:
5591 return OP_TYPE_IMM_L;
5592
5593 default:
5594 if (symbolic_operand (m68k_unwrap_symbol (op, false), VOIDmode))
5595 /* Just a guess. */
5596 return OP_TYPE_IMM_W;
5597
5598 return OP_TYPE_IMM_L;
5599 }
5600 }
5601
5602 gcc_assert (!reload_completed);
5603
5604 if (FLOAT_MODE_P (GET_MODE (op)))
5605 return OP_TYPE_FPN;
5606
5607 return OP_TYPE_RN;
5608 }
5609
5610 /* Implement opx_type attribute.
5611 Return type of INSN's operand X.
5612 If ADDRESS_P is true, return type of memory location operand refers to. */
5613 enum attr_opx_type
5614 m68k_sched_attr_opx_type (rtx_insn *insn, int address_p)
5615 {
5616 switch (sched_attr_op_type (insn, true, address_p != 0))
5617 {
5618 case OP_TYPE_RN:
5619 return OPX_TYPE_RN;
5620
5621 case OP_TYPE_FPN:
5622 return OPX_TYPE_FPN;
5623
5624 case OP_TYPE_MEM1:
5625 return OPX_TYPE_MEM1;
5626
5627 case OP_TYPE_MEM234:
5628 return OPX_TYPE_MEM234;
5629
5630 case OP_TYPE_MEM5:
5631 return OPX_TYPE_MEM5;
5632
5633 case OP_TYPE_MEM6:
5634 return OPX_TYPE_MEM6;
5635
5636 case OP_TYPE_MEM7:
5637 return OPX_TYPE_MEM7;
5638
5639 case OP_TYPE_IMM_Q:
5640 return OPX_TYPE_IMM_Q;
5641
5642 case OP_TYPE_IMM_W:
5643 return OPX_TYPE_IMM_W;
5644
5645 case OP_TYPE_IMM_L:
5646 return OPX_TYPE_IMM_L;
5647
5648 default:
5649 gcc_unreachable ();
5650 }
5651 }
5652
5653 /* Implement opy_type attribute.
5654 Return type of INSN's operand Y.
5655 If ADDRESS_P is true, return type of memory location operand refers to. */
5656 enum attr_opy_type
5657 m68k_sched_attr_opy_type (rtx_insn *insn, int address_p)
5658 {
5659 switch (sched_attr_op_type (insn, false, address_p != 0))
5660 {
5661 case OP_TYPE_RN:
5662 return OPY_TYPE_RN;
5663
5664 case OP_TYPE_FPN:
5665 return OPY_TYPE_FPN;
5666
5667 case OP_TYPE_MEM1:
5668 return OPY_TYPE_MEM1;
5669
5670 case OP_TYPE_MEM234:
5671 return OPY_TYPE_MEM234;
5672
5673 case OP_TYPE_MEM5:
5674 return OPY_TYPE_MEM5;
5675
5676 case OP_TYPE_MEM6:
5677 return OPY_TYPE_MEM6;
5678
5679 case OP_TYPE_MEM7:
5680 return OPY_TYPE_MEM7;
5681
5682 case OP_TYPE_IMM_Q:
5683 return OPY_TYPE_IMM_Q;
5684
5685 case OP_TYPE_IMM_W:
5686 return OPY_TYPE_IMM_W;
5687
5688 case OP_TYPE_IMM_L:
5689 return OPY_TYPE_IMM_L;
5690
5691 default:
5692 gcc_unreachable ();
5693 }
5694 }
5695
5696 /* Return size of INSN as int. */
5697 static int
5698 sched_get_attr_size_int (rtx_insn *insn)
5699 {
5700 int size;
5701
5702 switch (get_attr_type (insn))
5703 {
5704 case TYPE_IGNORE:
5705 /* There should be no references to m68k_sched_attr_size for 'ignore'
5706 instructions. */
5707 gcc_unreachable ();
5708 return 0;
5709
5710 case TYPE_MUL_L:
5711 size = 2;
5712 break;
5713
5714 default:
5715 size = 1;
5716 break;
5717 }
5718
5719 switch (get_attr_opx_type (insn))
5720 {
5721 case OPX_TYPE_NONE:
5722 case OPX_TYPE_RN:
5723 case OPX_TYPE_FPN:
5724 case OPX_TYPE_MEM1:
5725 case OPX_TYPE_MEM234:
5726 case OPY_TYPE_IMM_Q:
5727 break;
5728
5729 case OPX_TYPE_MEM5:
5730 case OPX_TYPE_MEM6:
5731 /* Here we assume that most absolute references are short. */
5732 case OPX_TYPE_MEM7:
5733 case OPY_TYPE_IMM_W:
5734 ++size;
5735 break;
5736
5737 case OPY_TYPE_IMM_L:
5738 size += 2;
5739 break;
5740
5741 default:
5742 gcc_unreachable ();
5743 }
5744
5745 switch (get_attr_opy_type (insn))
5746 {
5747 case OPY_TYPE_NONE:
5748 case OPY_TYPE_RN:
5749 case OPY_TYPE_FPN:
5750 case OPY_TYPE_MEM1:
5751 case OPY_TYPE_MEM234:
5752 case OPY_TYPE_IMM_Q:
5753 break;
5754
5755 case OPY_TYPE_MEM5:
5756 case OPY_TYPE_MEM6:
5757 /* Here we assume that most absolute references are short. */
5758 case OPY_TYPE_MEM7:
5759 case OPY_TYPE_IMM_W:
5760 ++size;
5761 break;
5762
5763 case OPY_TYPE_IMM_L:
5764 size += 2;
5765 break;
5766
5767 default:
5768 gcc_unreachable ();
5769 }
5770
5771 if (size > 3)
5772 {
5773 gcc_assert (!reload_completed);
5774
5775 size = 3;
5776 }
5777
5778 return size;
5779 }
5780
5781 /* Return size of INSN as attribute enum value. */
5782 enum attr_size
5783 m68k_sched_attr_size (rtx_insn *insn)
5784 {
5785 switch (sched_get_attr_size_int (insn))
5786 {
5787 case 1:
5788 return SIZE_1;
5789
5790 case 2:
5791 return SIZE_2;
5792
5793 case 3:
5794 return SIZE_3;
5795
5796 default:
5797 gcc_unreachable ();
5798 }
5799 }
5800
5801 /* Return operand X or Y (depending on OPX_P) of INSN,
5802 if it is a MEM, or NULL overwise. */
5803 static enum attr_op_type
5804 sched_get_opxy_mem_type (rtx_insn *insn, bool opx_p)
5805 {
5806 if (opx_p)
5807 {
5808 switch (get_attr_opx_type (insn))
5809 {
5810 case OPX_TYPE_NONE:
5811 case OPX_TYPE_RN:
5812 case OPX_TYPE_FPN:
5813 case OPX_TYPE_IMM_Q:
5814 case OPX_TYPE_IMM_W:
5815 case OPX_TYPE_IMM_L:
5816 return OP_TYPE_RN;
5817
5818 case OPX_TYPE_MEM1:
5819 case OPX_TYPE_MEM234:
5820 case OPX_TYPE_MEM5:
5821 case OPX_TYPE_MEM7:
5822 return OP_TYPE_MEM1;
5823
5824 case OPX_TYPE_MEM6:
5825 return OP_TYPE_MEM6;
5826
5827 default:
5828 gcc_unreachable ();
5829 }
5830 }
5831 else
5832 {
5833 switch (get_attr_opy_type (insn))
5834 {
5835 case OPY_TYPE_NONE:
5836 case OPY_TYPE_RN:
5837 case OPY_TYPE_FPN:
5838 case OPY_TYPE_IMM_Q:
5839 case OPY_TYPE_IMM_W:
5840 case OPY_TYPE_IMM_L:
5841 return OP_TYPE_RN;
5842
5843 case OPY_TYPE_MEM1:
5844 case OPY_TYPE_MEM234:
5845 case OPY_TYPE_MEM5:
5846 case OPY_TYPE_MEM7:
5847 return OP_TYPE_MEM1;
5848
5849 case OPY_TYPE_MEM6:
5850 return OP_TYPE_MEM6;
5851
5852 default:
5853 gcc_unreachable ();
5854 }
5855 }
5856 }
5857
5858 /* Implement op_mem attribute. */
5859 enum attr_op_mem
5860 m68k_sched_attr_op_mem (rtx_insn *insn)
5861 {
5862 enum attr_op_type opx;
5863 enum attr_op_type opy;
5864
5865 opx = sched_get_opxy_mem_type (insn, true);
5866 opy = sched_get_opxy_mem_type (insn, false);
5867
5868 if (opy == OP_TYPE_RN && opx == OP_TYPE_RN)
5869 return OP_MEM_00;
5870
5871 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM1)
5872 {
5873 switch (get_attr_opx_access (insn))
5874 {
5875 case OPX_ACCESS_R:
5876 return OP_MEM_10;
5877
5878 case OPX_ACCESS_W:
5879 return OP_MEM_01;
5880
5881 case OPX_ACCESS_RW:
5882 return OP_MEM_11;
5883
5884 default:
5885 gcc_unreachable ();
5886 }
5887 }
5888
5889 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM6)
5890 {
5891 switch (get_attr_opx_access (insn))
5892 {
5893 case OPX_ACCESS_R:
5894 return OP_MEM_I0;
5895
5896 case OPX_ACCESS_W:
5897 return OP_MEM_0I;
5898
5899 case OPX_ACCESS_RW:
5900 return OP_MEM_I1;
5901
5902 default:
5903 gcc_unreachable ();
5904 }
5905 }
5906
5907 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_RN)
5908 return OP_MEM_10;
5909
5910 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM1)
5911 {
5912 switch (get_attr_opx_access (insn))
5913 {
5914 case OPX_ACCESS_W:
5915 return OP_MEM_11;
5916
5917 default:
5918 gcc_assert (!reload_completed);
5919 return OP_MEM_11;
5920 }
5921 }
5922
5923 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM6)
5924 {
5925 switch (get_attr_opx_access (insn))
5926 {
5927 case OPX_ACCESS_W:
5928 return OP_MEM_1I;
5929
5930 default:
5931 gcc_assert (!reload_completed);
5932 return OP_MEM_1I;
5933 }
5934 }
5935
5936 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_RN)
5937 return OP_MEM_I0;
5938
5939 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM1)
5940 {
5941 switch (get_attr_opx_access (insn))
5942 {
5943 case OPX_ACCESS_W:
5944 return OP_MEM_I1;
5945
5946 default:
5947 gcc_assert (!reload_completed);
5948 return OP_MEM_I1;
5949 }
5950 }
5951
5952 gcc_assert (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM6);
5953 gcc_assert (!reload_completed);
5954 return OP_MEM_I1;
5955 }
5956
5957 /* Data for ColdFire V4 index bypass.
5958 Producer modifies register that is used as index in consumer with
5959 specified scale. */
5960 static struct
5961 {
5962 /* Producer instruction. */
5963 rtx pro;
5964
5965 /* Consumer instruction. */
5966 rtx con;
5967
5968 /* Scale of indexed memory access within consumer.
5969 Or zero if bypass should not be effective at the moment. */
5970 int scale;
5971 } sched_cfv4_bypass_data;
5972
5973 /* An empty state that is used in m68k_sched_adjust_cost. */
5974 static state_t sched_adjust_cost_state;
5975
5976 /* Implement adjust_cost scheduler hook.
5977 Return adjusted COST of dependency LINK between DEF_INSN and INSN. */
5978 static int
5979 m68k_sched_adjust_cost (rtx_insn *insn, int, rtx_insn *def_insn, int cost,
5980 unsigned int)
5981 {
5982 int delay;
5983
5984 if (recog_memoized (def_insn) < 0
5985 || recog_memoized (insn) < 0)
5986 return cost;
5987
5988 if (sched_cfv4_bypass_data.scale == 1)
5989 /* Handle ColdFire V4 bypass for indexed address with 1x scale. */
5990 {
5991 /* haifa-sched.c: insn_cost () calls bypass_p () just before
5992 targetm.sched.adjust_cost (). Hence, we can be relatively sure
5993 that the data in sched_cfv4_bypass_data is up to date. */
5994 gcc_assert (sched_cfv4_bypass_data.pro == def_insn
5995 && sched_cfv4_bypass_data.con == insn);
5996
5997 if (cost < 3)
5998 cost = 3;
5999
6000 sched_cfv4_bypass_data.pro = NULL;
6001 sched_cfv4_bypass_data.con = NULL;
6002 sched_cfv4_bypass_data.scale = 0;
6003 }
6004 else
6005 gcc_assert (sched_cfv4_bypass_data.pro == NULL
6006 && sched_cfv4_bypass_data.con == NULL
6007 && sched_cfv4_bypass_data.scale == 0);
6008
6009 /* Don't try to issue INSN earlier than DFA permits.
6010 This is especially useful for instructions that write to memory,
6011 as their true dependence (default) latency is better to be set to 0
6012 to workaround alias analysis limitations.
6013 This is, in fact, a machine independent tweak, so, probably,
6014 it should be moved to haifa-sched.c: insn_cost (). */
6015 delay = min_insn_conflict_delay (sched_adjust_cost_state, def_insn, insn);
6016 if (delay > cost)
6017 cost = delay;
6018
6019 return cost;
6020 }
6021
6022 /* Return maximal number of insns that can be scheduled on a single cycle. */
6023 static int
6024 m68k_sched_issue_rate (void)
6025 {
6026 switch (m68k_sched_cpu)
6027 {
6028 case CPU_CFV1:
6029 case CPU_CFV2:
6030 case CPU_CFV3:
6031 return 1;
6032
6033 case CPU_CFV4:
6034 return 2;
6035
6036 default:
6037 gcc_unreachable ();
6038 return 0;
6039 }
6040 }
6041
6042 /* Maximal length of instruction for current CPU.
6043 E.g. it is 3 for any ColdFire core. */
6044 static int max_insn_size;
6045
6046 /* Data to model instruction buffer of CPU. */
6047 struct _sched_ib
6048 {
6049 /* True if instruction buffer model is modeled for current CPU. */
6050 bool enabled_p;
6051
6052 /* Size of the instruction buffer in words. */
6053 int size;
6054
6055 /* Number of filled words in the instruction buffer. */
6056 int filled;
6057
6058 /* Additional information about instruction buffer for CPUs that have
6059 a buffer of instruction records, rather then a plain buffer
6060 of instruction words. */
6061 struct _sched_ib_records
6062 {
6063 /* Size of buffer in records. */
6064 int n_insns;
6065
6066 /* Array to hold data on adjustments made to the size of the buffer. */
6067 int *adjust;
6068
6069 /* Index of the above array. */
6070 int adjust_index;
6071 } records;
6072
6073 /* An insn that reserves (marks empty) one word in the instruction buffer. */
6074 rtx insn;
6075 };
6076
6077 static struct _sched_ib sched_ib;
6078
6079 /* ID of memory unit. */
6080 static int sched_mem_unit_code;
6081
6082 /* Implementation of the targetm.sched.variable_issue () hook.
6083 It is called after INSN was issued. It returns the number of insns
6084 that can possibly get scheduled on the current cycle.
6085 It is used here to determine the effect of INSN on the instruction
6086 buffer. */
6087 static int
6088 m68k_sched_variable_issue (FILE *sched_dump ATTRIBUTE_UNUSED,
6089 int sched_verbose ATTRIBUTE_UNUSED,
6090 rtx_insn *insn, int can_issue_more)
6091 {
6092 int insn_size;
6093
6094 if (recog_memoized (insn) >= 0 && get_attr_type (insn) != TYPE_IGNORE)
6095 {
6096 switch (m68k_sched_cpu)
6097 {
6098 case CPU_CFV1:
6099 case CPU_CFV2:
6100 insn_size = sched_get_attr_size_int (insn);
6101 break;
6102
6103 case CPU_CFV3:
6104 insn_size = sched_get_attr_size_int (insn);
6105
6106 /* ColdFire V3 and V4 cores have instruction buffers that can
6107 accumulate up to 8 instructions regardless of instructions'
6108 sizes. So we should take care not to "prefetch" 24 one-word
6109 or 12 two-words instructions.
6110 To model this behavior we temporarily decrease size of the
6111 buffer by (max_insn_size - insn_size) for next 7 instructions. */
6112 {
6113 int adjust;
6114
6115 adjust = max_insn_size - insn_size;
6116 sched_ib.size -= adjust;
6117
6118 if (sched_ib.filled > sched_ib.size)
6119 sched_ib.filled = sched_ib.size;
6120
6121 sched_ib.records.adjust[sched_ib.records.adjust_index] = adjust;
6122 }
6123
6124 ++sched_ib.records.adjust_index;
6125 if (sched_ib.records.adjust_index == sched_ib.records.n_insns)
6126 sched_ib.records.adjust_index = 0;
6127
6128 /* Undo adjustment we did 7 instructions ago. */
6129 sched_ib.size
6130 += sched_ib.records.adjust[sched_ib.records.adjust_index];
6131
6132 break;
6133
6134 case CPU_CFV4:
6135 gcc_assert (!sched_ib.enabled_p);
6136 insn_size = 0;
6137 break;
6138
6139 default:
6140 gcc_unreachable ();
6141 }
6142
6143 if (insn_size > sched_ib.filled)
6144 /* Scheduling for register pressure does not always take DFA into
6145 account. Workaround instruction buffer not being filled enough. */
6146 {
6147 gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED);
6148 insn_size = sched_ib.filled;
6149 }
6150
6151 --can_issue_more;
6152 }
6153 else if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6154 || asm_noperands (PATTERN (insn)) >= 0)
6155 insn_size = sched_ib.filled;
6156 else
6157 insn_size = 0;
6158
6159 sched_ib.filled -= insn_size;
6160
6161 return can_issue_more;
6162 }
6163
6164 /* Return how many instructions should scheduler lookahead to choose the
6165 best one. */
6166 static int
6167 m68k_sched_first_cycle_multipass_dfa_lookahead (void)
6168 {
6169 return m68k_sched_issue_rate () - 1;
6170 }
6171
6172 /* Implementation of targetm.sched.init_global () hook.
6173 It is invoked once per scheduling pass and is used here
6174 to initialize scheduler constants. */
6175 static void
6176 m68k_sched_md_init_global (FILE *sched_dump ATTRIBUTE_UNUSED,
6177 int sched_verbose ATTRIBUTE_UNUSED,
6178 int n_insns ATTRIBUTE_UNUSED)
6179 {
6180 /* Check that all instructions have DFA reservations and
6181 that all instructions can be issued from a clean state. */
6182 if (flag_checking)
6183 {
6184 rtx_insn *insn;
6185 state_t state;
6186
6187 state = alloca (state_size ());
6188
6189 for (insn = get_insns (); insn != NULL; insn = NEXT_INSN (insn))
6190 {
6191 if (INSN_P (insn) && recog_memoized (insn) >= 0)
6192 {
6193 gcc_assert (insn_has_dfa_reservation_p (insn));
6194
6195 state_reset (state);
6196 if (state_transition (state, insn) >= 0)
6197 gcc_unreachable ();
6198 }
6199 }
6200 }
6201
6202 /* Setup target cpu. */
6203
6204 /* ColdFire V4 has a set of features to keep its instruction buffer full
6205 (e.g., a separate memory bus for instructions) and, hence, we do not model
6206 buffer for this CPU. */
6207 sched_ib.enabled_p = (m68k_sched_cpu != CPU_CFV4);
6208
6209 switch (m68k_sched_cpu)
6210 {
6211 case CPU_CFV4:
6212 sched_ib.filled = 0;
6213
6214 /* FALLTHRU */
6215
6216 case CPU_CFV1:
6217 case CPU_CFV2:
6218 max_insn_size = 3;
6219 sched_ib.records.n_insns = 0;
6220 sched_ib.records.adjust = NULL;
6221 break;
6222
6223 case CPU_CFV3:
6224 max_insn_size = 3;
6225 sched_ib.records.n_insns = 8;
6226 sched_ib.records.adjust = XNEWVEC (int, sched_ib.records.n_insns);
6227 break;
6228
6229 default:
6230 gcc_unreachable ();
6231 }
6232
6233 sched_mem_unit_code = get_cpu_unit_code ("cf_mem1");
6234
6235 sched_adjust_cost_state = xmalloc (state_size ());
6236 state_reset (sched_adjust_cost_state);
6237
6238 start_sequence ();
6239 emit_insn (gen_ib ());
6240 sched_ib.insn = get_insns ();
6241 end_sequence ();
6242 }
6243
6244 /* Scheduling pass is now finished. Free/reset static variables. */
6245 static void
6246 m68k_sched_md_finish_global (FILE *dump ATTRIBUTE_UNUSED,
6247 int verbose ATTRIBUTE_UNUSED)
6248 {
6249 sched_ib.insn = NULL;
6250
6251 free (sched_adjust_cost_state);
6252 sched_adjust_cost_state = NULL;
6253
6254 sched_mem_unit_code = 0;
6255
6256 free (sched_ib.records.adjust);
6257 sched_ib.records.adjust = NULL;
6258 sched_ib.records.n_insns = 0;
6259 max_insn_size = 0;
6260 }
6261
6262 /* Implementation of targetm.sched.init () hook.
6263 It is invoked each time scheduler starts on the new block (basic block or
6264 extended basic block). */
6265 static void
6266 m68k_sched_md_init (FILE *sched_dump ATTRIBUTE_UNUSED,
6267 int sched_verbose ATTRIBUTE_UNUSED,
6268 int n_insns ATTRIBUTE_UNUSED)
6269 {
6270 switch (m68k_sched_cpu)
6271 {
6272 case CPU_CFV1:
6273 case CPU_CFV2:
6274 sched_ib.size = 6;
6275 break;
6276
6277 case CPU_CFV3:
6278 sched_ib.size = sched_ib.records.n_insns * max_insn_size;
6279
6280 memset (sched_ib.records.adjust, 0,
6281 sched_ib.records.n_insns * sizeof (*sched_ib.records.adjust));
6282 sched_ib.records.adjust_index = 0;
6283 break;
6284
6285 case CPU_CFV4:
6286 gcc_assert (!sched_ib.enabled_p);
6287 sched_ib.size = 0;
6288 break;
6289
6290 default:
6291 gcc_unreachable ();
6292 }
6293
6294 if (sched_ib.enabled_p)
6295 /* haifa-sched.c: schedule_block () calls advance_cycle () just before
6296 the first cycle. Workaround that. */
6297 sched_ib.filled = -2;
6298 }
6299
6300 /* Implementation of targetm.sched.dfa_pre_advance_cycle () hook.
6301 It is invoked just before current cycle finishes and is used here
6302 to track if instruction buffer got its two words this cycle. */
6303 static void
6304 m68k_sched_dfa_pre_advance_cycle (void)
6305 {
6306 if (!sched_ib.enabled_p)
6307 return;
6308
6309 if (!cpu_unit_reservation_p (curr_state, sched_mem_unit_code))
6310 {
6311 sched_ib.filled += 2;
6312
6313 if (sched_ib.filled > sched_ib.size)
6314 sched_ib.filled = sched_ib.size;
6315 }
6316 }
6317
6318 /* Implementation of targetm.sched.dfa_post_advance_cycle () hook.
6319 It is invoked just after new cycle begins and is used here
6320 to setup number of filled words in the instruction buffer so that
6321 instructions which won't have all their words prefetched would be
6322 stalled for a cycle. */
6323 static void
6324 m68k_sched_dfa_post_advance_cycle (void)
6325 {
6326 int i;
6327
6328 if (!sched_ib.enabled_p)
6329 return;
6330
6331 /* Setup number of prefetched instruction words in the instruction
6332 buffer. */
6333 i = max_insn_size - sched_ib.filled;
6334
6335 while (--i >= 0)
6336 {
6337 if (state_transition (curr_state, sched_ib.insn) >= 0)
6338 /* Pick up scheduler state. */
6339 ++sched_ib.filled;
6340 }
6341 }
6342
6343 /* Return X or Y (depending on OPX_P) operand of INSN,
6344 if it is an integer register, or NULL overwise. */
6345 static rtx
6346 sched_get_reg_operand (rtx_insn *insn, bool opx_p)
6347 {
6348 rtx op = NULL;
6349
6350 if (opx_p)
6351 {
6352 if (get_attr_opx_type (insn) == OPX_TYPE_RN)
6353 {
6354 op = sched_get_operand (insn, true);
6355 gcc_assert (op != NULL);
6356
6357 if (!reload_completed && !REG_P (op))
6358 return NULL;
6359 }
6360 }
6361 else
6362 {
6363 if (get_attr_opy_type (insn) == OPY_TYPE_RN)
6364 {
6365 op = sched_get_operand (insn, false);
6366 gcc_assert (op != NULL);
6367
6368 if (!reload_completed && !REG_P (op))
6369 return NULL;
6370 }
6371 }
6372
6373 return op;
6374 }
6375
6376 /* Return true, if X or Y (depending on OPX_P) operand of INSN
6377 is a MEM. */
6378 static bool
6379 sched_mem_operand_p (rtx_insn *insn, bool opx_p)
6380 {
6381 switch (sched_get_opxy_mem_type (insn, opx_p))
6382 {
6383 case OP_TYPE_MEM1:
6384 case OP_TYPE_MEM6:
6385 return true;
6386
6387 default:
6388 return false;
6389 }
6390 }
6391
6392 /* Return X or Y (depending on OPX_P) operand of INSN,
6393 if it is a MEM, or NULL overwise. */
6394 static rtx
6395 sched_get_mem_operand (rtx_insn *insn, bool must_read_p, bool must_write_p)
6396 {
6397 bool opx_p;
6398 bool opy_p;
6399
6400 opx_p = false;
6401 opy_p = false;
6402
6403 if (must_read_p)
6404 {
6405 opx_p = true;
6406 opy_p = true;
6407 }
6408
6409 if (must_write_p)
6410 {
6411 opx_p = true;
6412 opy_p = false;
6413 }
6414
6415 if (opy_p && sched_mem_operand_p (insn, false))
6416 return sched_get_operand (insn, false);
6417
6418 if (opx_p && sched_mem_operand_p (insn, true))
6419 return sched_get_operand (insn, true);
6420
6421 gcc_unreachable ();
6422 return NULL;
6423 }
6424
6425 /* Return non-zero if PRO modifies register used as part of
6426 address in CON. */
6427 int
6428 m68k_sched_address_bypass_p (rtx_insn *pro, rtx_insn *con)
6429 {
6430 rtx pro_x;
6431 rtx con_mem_read;
6432
6433 pro_x = sched_get_reg_operand (pro, true);
6434 if (pro_x == NULL)
6435 return 0;
6436
6437 con_mem_read = sched_get_mem_operand (con, true, false);
6438 gcc_assert (con_mem_read != NULL);
6439
6440 if (reg_mentioned_p (pro_x, con_mem_read))
6441 return 1;
6442
6443 return 0;
6444 }
6445
6446 /* Helper function for m68k_sched_indexed_address_bypass_p.
6447 if PRO modifies register used as index in CON,
6448 return scale of indexed memory access in CON. Return zero overwise. */
6449 static int
6450 sched_get_indexed_address_scale (rtx_insn *pro, rtx_insn *con)
6451 {
6452 rtx reg;
6453 rtx mem;
6454 struct m68k_address address;
6455
6456 reg = sched_get_reg_operand (pro, true);
6457 if (reg == NULL)
6458 return 0;
6459
6460 mem = sched_get_mem_operand (con, true, false);
6461 gcc_assert (mem != NULL && MEM_P (mem));
6462
6463 if (!m68k_decompose_address (GET_MODE (mem), XEXP (mem, 0), reload_completed,
6464 &address))
6465 gcc_unreachable ();
6466
6467 if (REGNO (reg) == REGNO (address.index))
6468 {
6469 gcc_assert (address.scale != 0);
6470 return address.scale;
6471 }
6472
6473 return 0;
6474 }
6475
6476 /* Return non-zero if PRO modifies register used
6477 as index with scale 2 or 4 in CON. */
6478 int
6479 m68k_sched_indexed_address_bypass_p (rtx_insn *pro, rtx_insn *con)
6480 {
6481 gcc_assert (sched_cfv4_bypass_data.pro == NULL
6482 && sched_cfv4_bypass_data.con == NULL
6483 && sched_cfv4_bypass_data.scale == 0);
6484
6485 switch (sched_get_indexed_address_scale (pro, con))
6486 {
6487 case 1:
6488 /* We can't have a variable latency bypass, so
6489 remember to adjust the insn cost in adjust_cost hook. */
6490 sched_cfv4_bypass_data.pro = pro;
6491 sched_cfv4_bypass_data.con = con;
6492 sched_cfv4_bypass_data.scale = 1;
6493 return 0;
6494
6495 case 2:
6496 case 4:
6497 return 1;
6498
6499 default:
6500 return 0;
6501 }
6502 }
6503
6504 /* We generate a two-instructions program at M_TRAMP :
6505 movea.l &CHAIN_VALUE,%a0
6506 jmp FNADDR
6507 where %a0 can be modified by changing STATIC_CHAIN_REGNUM. */
6508
6509 static void
6510 m68k_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
6511 {
6512 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
6513 rtx mem;
6514
6515 gcc_assert (ADDRESS_REGNO_P (STATIC_CHAIN_REGNUM));
6516
6517 mem = adjust_address (m_tramp, HImode, 0);
6518 emit_move_insn (mem, GEN_INT(0x207C + ((STATIC_CHAIN_REGNUM-8) << 9)));
6519 mem = adjust_address (m_tramp, SImode, 2);
6520 emit_move_insn (mem, chain_value);
6521
6522 mem = adjust_address (m_tramp, HImode, 6);
6523 emit_move_insn (mem, GEN_INT(0x4EF9));
6524 mem = adjust_address (m_tramp, SImode, 8);
6525 emit_move_insn (mem, fnaddr);
6526
6527 FINALIZE_TRAMPOLINE (XEXP (m_tramp, 0));
6528 }
6529
6530 /* On the 68000, the RTS insn cannot pop anything.
6531 On the 68010, the RTD insn may be used to pop them if the number
6532 of args is fixed, but if the number is variable then the caller
6533 must pop them all. RTD can't be used for library calls now
6534 because the library is compiled with the Unix compiler.
6535 Use of RTD is a selectable option, since it is incompatible with
6536 standard Unix calling sequences. If the option is not selected,
6537 the caller must always pop the args. */
6538
6539 static poly_int64
6540 m68k_return_pops_args (tree fundecl, tree funtype, poly_int64 size)
6541 {
6542 return ((TARGET_RTD
6543 && (!fundecl
6544 || TREE_CODE (fundecl) != IDENTIFIER_NODE)
6545 && (!stdarg_p (funtype)))
6546 ? (HOST_WIDE_INT) size : 0);
6547 }
6548
6549 /* Make sure everything's fine if we *don't* have a given processor.
6550 This assumes that putting a register in fixed_regs will keep the
6551 compiler's mitts completely off it. We don't bother to zero it out
6552 of register classes. */
6553
6554 static void
6555 m68k_conditional_register_usage (void)
6556 {
6557 int i;
6558 HARD_REG_SET x;
6559 if (!TARGET_HARD_FLOAT)
6560 {
6561 COPY_HARD_REG_SET (x, reg_class_contents[(int)FP_REGS]);
6562 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
6563 if (TEST_HARD_REG_BIT (x, i))
6564 fixed_regs[i] = call_used_regs[i] = 1;
6565 }
6566 if (flag_pic)
6567 fixed_regs[PIC_REG] = call_used_regs[PIC_REG] = 1;
6568 }
6569
6570 static void
6571 m68k_init_sync_libfuncs (void)
6572 {
6573 init_sync_libfuncs (UNITS_PER_WORD);
6574 }
6575
6576 /* Implements EPILOGUE_USES. All registers are live on exit from an
6577 interrupt routine. */
6578 bool
6579 m68k_epilogue_uses (int regno ATTRIBUTE_UNUSED)
6580 {
6581 return (reload_completed
6582 && (m68k_get_function_kind (current_function_decl)
6583 == m68k_fk_interrupt_handler));
6584 }
6585
6586
6587 /* Implement TARGET_C_EXCESS_PRECISION.
6588
6589 Set the value of FLT_EVAL_METHOD in float.h. When using 68040 fp
6590 instructions, we get proper intermediate rounding, otherwise we
6591 get extended precision results. */
6592
6593 static enum flt_eval_method
6594 m68k_excess_precision (enum excess_precision_type type)
6595 {
6596 switch (type)
6597 {
6598 case EXCESS_PRECISION_TYPE_FAST:
6599 /* The fastest type to promote to will always be the native type,
6600 whether that occurs with implicit excess precision or
6601 otherwise. */
6602 return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT;
6603 case EXCESS_PRECISION_TYPE_STANDARD:
6604 case EXCESS_PRECISION_TYPE_IMPLICIT:
6605 /* Otherwise, the excess precision we want when we are
6606 in a standards compliant mode, and the implicit precision we
6607 provide can be identical. */
6608 if (TARGET_68040 || ! TARGET_68881)
6609 return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT;
6610
6611 return FLT_EVAL_METHOD_PROMOTE_TO_LONG_DOUBLE;
6612 default:
6613 gcc_unreachable ();
6614 }
6615 return FLT_EVAL_METHOD_UNPREDICTABLE;
6616 }
6617
6618 /* Implement PUSH_ROUNDING. On the 680x0, sp@- in a byte insn really pushes
6619 a word. On the ColdFire, sp@- in a byte insn pushes just a byte. */
6620
6621 poly_int64
6622 m68k_push_rounding (poly_int64 bytes)
6623 {
6624 if (TARGET_COLDFIRE)
6625 return bytes;
6626 return (bytes + 1) & ~1;
6627 }
6628
6629 /* Implement TARGET_PROMOTE_FUNCTION_MODE. */
6630
6631 static machine_mode
6632 m68k_promote_function_mode (const_tree type, machine_mode mode,
6633 int *punsignedp ATTRIBUTE_UNUSED,
6634 const_tree fntype ATTRIBUTE_UNUSED,
6635 int for_return)
6636 {
6637 /* Promote libcall arguments narrower than int to match the normal C
6638 ABI (for which promotions are handled via
6639 TARGET_PROMOTE_PROTOTYPES). */
6640 if (type == NULL_TREE && !for_return && (mode == QImode || mode == HImode))
6641 return SImode;
6642 return mode;
6643 }
6644
6645 #include "gt-m68k.h"