]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/m68k/m68k.c
m68k: handle more cases of TLS symbols with offset
[thirdparty/gcc.git] / gcc / config / m68k / m68k.c
1 /* Subroutines for insn-output.c for Motorola 68000 family.
2 Copyright (C) 1987-2018 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #define IN_TARGET_CODE 1
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "cfghooks.h"
27 #include "tree.h"
28 #include "stringpool.h"
29 #include "attribs.h"
30 #include "rtl.h"
31 #include "df.h"
32 #include "alias.h"
33 #include "fold-const.h"
34 #include "calls.h"
35 #include "stor-layout.h"
36 #include "varasm.h"
37 #include "regs.h"
38 #include "insn-config.h"
39 #include "conditions.h"
40 #include "output.h"
41 #include "insn-attr.h"
42 #include "recog.h"
43 #include "diagnostic-core.h"
44 #include "flags.h"
45 #include "expmed.h"
46 #include "dojump.h"
47 #include "explow.h"
48 #include "memmodel.h"
49 #include "emit-rtl.h"
50 #include "stmt.h"
51 #include "expr.h"
52 #include "reload.h"
53 #include "tm_p.h"
54 #include "target.h"
55 #include "debug.h"
56 #include "cfgrtl.h"
57 #include "cfganal.h"
58 #include "lcm.h"
59 #include "cfgbuild.h"
60 #include "cfgcleanup.h"
61 /* ??? Need to add a dependency between m68k.o and sched-int.h. */
62 #include "sched-int.h"
63 #include "insn-codes.h"
64 #include "opts.h"
65 #include "optabs.h"
66 #include "builtins.h"
67 #include "rtl-iter.h"
68 #include "toplev.h"
69
70 /* This file should be included last. */
71 #include "target-def.h"
72
73 enum reg_class regno_reg_class[] =
74 {
75 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
76 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
77 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
78 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
79 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
80 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
81 ADDR_REGS
82 };
83
84
85 /* The minimum number of integer registers that we want to save with the
86 movem instruction. Using two movel instructions instead of a single
87 moveml is about 15% faster for the 68020 and 68030 at no expense in
88 code size. */
89 #define MIN_MOVEM_REGS 3
90
91 /* The minimum number of floating point registers that we want to save
92 with the fmovem instruction. */
93 #define MIN_FMOVEM_REGS 1
94
95 /* Structure describing stack frame layout. */
96 struct m68k_frame
97 {
98 /* Stack pointer to frame pointer offset. */
99 HOST_WIDE_INT offset;
100
101 /* Offset of FPU registers. */
102 HOST_WIDE_INT foffset;
103
104 /* Frame size in bytes (rounded up). */
105 HOST_WIDE_INT size;
106
107 /* Data and address register. */
108 int reg_no;
109 unsigned int reg_mask;
110
111 /* FPU registers. */
112 int fpu_no;
113 unsigned int fpu_mask;
114
115 /* Offsets relative to ARG_POINTER. */
116 HOST_WIDE_INT frame_pointer_offset;
117 HOST_WIDE_INT stack_pointer_offset;
118
119 /* Function which the above information refers to. */
120 int funcdef_no;
121 };
122
123 /* Current frame information calculated by m68k_compute_frame_layout(). */
124 static struct m68k_frame current_frame;
125
126 /* Structure describing an m68k address.
127
128 If CODE is UNKNOWN, the address is BASE + INDEX * SCALE + OFFSET,
129 with null fields evaluating to 0. Here:
130
131 - BASE satisfies m68k_legitimate_base_reg_p
132 - INDEX satisfies m68k_legitimate_index_reg_p
133 - OFFSET satisfies m68k_legitimate_constant_address_p
134
135 INDEX is either HImode or SImode. The other fields are SImode.
136
137 If CODE is PRE_DEC, the address is -(BASE). If CODE is POST_INC,
138 the address is (BASE)+. */
139 struct m68k_address {
140 enum rtx_code code;
141 rtx base;
142 rtx index;
143 rtx offset;
144 int scale;
145 };
146
147 static int m68k_sched_adjust_cost (rtx_insn *, int, rtx_insn *, int,
148 unsigned int);
149 static int m68k_sched_issue_rate (void);
150 static int m68k_sched_variable_issue (FILE *, int, rtx_insn *, int);
151 static void m68k_sched_md_init_global (FILE *, int, int);
152 static void m68k_sched_md_finish_global (FILE *, int);
153 static void m68k_sched_md_init (FILE *, int, int);
154 static void m68k_sched_dfa_pre_advance_cycle (void);
155 static void m68k_sched_dfa_post_advance_cycle (void);
156 static int m68k_sched_first_cycle_multipass_dfa_lookahead (void);
157
158 static bool m68k_can_eliminate (const int, const int);
159 static void m68k_conditional_register_usage (void);
160 static bool m68k_legitimate_address_p (machine_mode, rtx, bool);
161 static void m68k_option_override (void);
162 static void m68k_override_options_after_change (void);
163 static rtx find_addr_reg (rtx);
164 static const char *singlemove_string (rtx *);
165 static void m68k_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
166 HOST_WIDE_INT, tree);
167 static rtx m68k_struct_value_rtx (tree, int);
168 static tree m68k_handle_fndecl_attribute (tree *node, tree name,
169 tree args, int flags,
170 bool *no_add_attrs);
171 static void m68k_compute_frame_layout (void);
172 static bool m68k_save_reg (unsigned int regno, bool interrupt_handler);
173 static bool m68k_ok_for_sibcall_p (tree, tree);
174 static bool m68k_tls_symbol_p (rtx);
175 static rtx m68k_legitimize_address (rtx, rtx, machine_mode);
176 static bool m68k_rtx_costs (rtx, machine_mode, int, int, int *, bool);
177 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
178 static bool m68k_return_in_memory (const_tree, const_tree);
179 #endif
180 static void m68k_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
181 static void m68k_trampoline_init (rtx, tree, rtx);
182 static poly_int64 m68k_return_pops_args (tree, tree, poly_int64);
183 static rtx m68k_delegitimize_address (rtx);
184 static void m68k_function_arg_advance (cumulative_args_t, machine_mode,
185 const_tree, bool);
186 static rtx m68k_function_arg (cumulative_args_t, machine_mode,
187 const_tree, bool);
188 static bool m68k_cannot_force_const_mem (machine_mode mode, rtx x);
189 static bool m68k_output_addr_const_extra (FILE *, rtx);
190 static void m68k_init_sync_libfuncs (void) ATTRIBUTE_UNUSED;
191 static enum flt_eval_method
192 m68k_excess_precision (enum excess_precision_type);
193 static unsigned int m68k_hard_regno_nregs (unsigned int, machine_mode);
194 static bool m68k_hard_regno_mode_ok (unsigned int, machine_mode);
195 static bool m68k_modes_tieable_p (machine_mode, machine_mode);
196 static machine_mode m68k_promote_function_mode (const_tree, machine_mode,
197 int *, const_tree, int);
198 \f
199 /* Initialize the GCC target structure. */
200
201 #if INT_OP_GROUP == INT_OP_DOT_WORD
202 #undef TARGET_ASM_ALIGNED_HI_OP
203 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
204 #endif
205
206 #if INT_OP_GROUP == INT_OP_NO_DOT
207 #undef TARGET_ASM_BYTE_OP
208 #define TARGET_ASM_BYTE_OP "\tbyte\t"
209 #undef TARGET_ASM_ALIGNED_HI_OP
210 #define TARGET_ASM_ALIGNED_HI_OP "\tshort\t"
211 #undef TARGET_ASM_ALIGNED_SI_OP
212 #define TARGET_ASM_ALIGNED_SI_OP "\tlong\t"
213 #endif
214
215 #if INT_OP_GROUP == INT_OP_DC
216 #undef TARGET_ASM_BYTE_OP
217 #define TARGET_ASM_BYTE_OP "\tdc.b\t"
218 #undef TARGET_ASM_ALIGNED_HI_OP
219 #define TARGET_ASM_ALIGNED_HI_OP "\tdc.w\t"
220 #undef TARGET_ASM_ALIGNED_SI_OP
221 #define TARGET_ASM_ALIGNED_SI_OP "\tdc.l\t"
222 #endif
223
224 #undef TARGET_ASM_UNALIGNED_HI_OP
225 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
226 #undef TARGET_ASM_UNALIGNED_SI_OP
227 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
228
229 #undef TARGET_ASM_OUTPUT_MI_THUNK
230 #define TARGET_ASM_OUTPUT_MI_THUNK m68k_output_mi_thunk
231 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
232 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
233
234 #undef TARGET_ASM_FILE_START_APP_OFF
235 #define TARGET_ASM_FILE_START_APP_OFF true
236
237 #undef TARGET_LEGITIMIZE_ADDRESS
238 #define TARGET_LEGITIMIZE_ADDRESS m68k_legitimize_address
239
240 #undef TARGET_SCHED_ADJUST_COST
241 #define TARGET_SCHED_ADJUST_COST m68k_sched_adjust_cost
242
243 #undef TARGET_SCHED_ISSUE_RATE
244 #define TARGET_SCHED_ISSUE_RATE m68k_sched_issue_rate
245
246 #undef TARGET_SCHED_VARIABLE_ISSUE
247 #define TARGET_SCHED_VARIABLE_ISSUE m68k_sched_variable_issue
248
249 #undef TARGET_SCHED_INIT_GLOBAL
250 #define TARGET_SCHED_INIT_GLOBAL m68k_sched_md_init_global
251
252 #undef TARGET_SCHED_FINISH_GLOBAL
253 #define TARGET_SCHED_FINISH_GLOBAL m68k_sched_md_finish_global
254
255 #undef TARGET_SCHED_INIT
256 #define TARGET_SCHED_INIT m68k_sched_md_init
257
258 #undef TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE
259 #define TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE m68k_sched_dfa_pre_advance_cycle
260
261 #undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
262 #define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE m68k_sched_dfa_post_advance_cycle
263
264 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
265 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
266 m68k_sched_first_cycle_multipass_dfa_lookahead
267
268 #undef TARGET_OPTION_OVERRIDE
269 #define TARGET_OPTION_OVERRIDE m68k_option_override
270
271 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
272 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m68k_override_options_after_change
273
274 #undef TARGET_RTX_COSTS
275 #define TARGET_RTX_COSTS m68k_rtx_costs
276
277 #undef TARGET_ATTRIBUTE_TABLE
278 #define TARGET_ATTRIBUTE_TABLE m68k_attribute_table
279
280 #undef TARGET_PROMOTE_PROTOTYPES
281 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
282
283 #undef TARGET_STRUCT_VALUE_RTX
284 #define TARGET_STRUCT_VALUE_RTX m68k_struct_value_rtx
285
286 #undef TARGET_CANNOT_FORCE_CONST_MEM
287 #define TARGET_CANNOT_FORCE_CONST_MEM m68k_cannot_force_const_mem
288
289 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
290 #define TARGET_FUNCTION_OK_FOR_SIBCALL m68k_ok_for_sibcall_p
291
292 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
293 #undef TARGET_RETURN_IN_MEMORY
294 #define TARGET_RETURN_IN_MEMORY m68k_return_in_memory
295 #endif
296
297 #ifdef HAVE_AS_TLS
298 #undef TARGET_HAVE_TLS
299 #define TARGET_HAVE_TLS (true)
300
301 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
302 #define TARGET_ASM_OUTPUT_DWARF_DTPREL m68k_output_dwarf_dtprel
303 #endif
304
305 #undef TARGET_LRA_P
306 #define TARGET_LRA_P hook_bool_void_false
307
308 #undef TARGET_LEGITIMATE_ADDRESS_P
309 #define TARGET_LEGITIMATE_ADDRESS_P m68k_legitimate_address_p
310
311 #undef TARGET_CAN_ELIMINATE
312 #define TARGET_CAN_ELIMINATE m68k_can_eliminate
313
314 #undef TARGET_CONDITIONAL_REGISTER_USAGE
315 #define TARGET_CONDITIONAL_REGISTER_USAGE m68k_conditional_register_usage
316
317 #undef TARGET_TRAMPOLINE_INIT
318 #define TARGET_TRAMPOLINE_INIT m68k_trampoline_init
319
320 #undef TARGET_RETURN_POPS_ARGS
321 #define TARGET_RETURN_POPS_ARGS m68k_return_pops_args
322
323 #undef TARGET_DELEGITIMIZE_ADDRESS
324 #define TARGET_DELEGITIMIZE_ADDRESS m68k_delegitimize_address
325
326 #undef TARGET_FUNCTION_ARG
327 #define TARGET_FUNCTION_ARG m68k_function_arg
328
329 #undef TARGET_FUNCTION_ARG_ADVANCE
330 #define TARGET_FUNCTION_ARG_ADVANCE m68k_function_arg_advance
331
332 #undef TARGET_LEGITIMATE_CONSTANT_P
333 #define TARGET_LEGITIMATE_CONSTANT_P m68k_legitimate_constant_p
334
335 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
336 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA m68k_output_addr_const_extra
337
338 #undef TARGET_C_EXCESS_PRECISION
339 #define TARGET_C_EXCESS_PRECISION m68k_excess_precision
340
341 /* The value stored by TAS. */
342 #undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
343 #define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 128
344
345 #undef TARGET_HARD_REGNO_NREGS
346 #define TARGET_HARD_REGNO_NREGS m68k_hard_regno_nregs
347 #undef TARGET_HARD_REGNO_MODE_OK
348 #define TARGET_HARD_REGNO_MODE_OK m68k_hard_regno_mode_ok
349
350 #undef TARGET_MODES_TIEABLE_P
351 #define TARGET_MODES_TIEABLE_P m68k_modes_tieable_p
352
353 #undef TARGET_PROMOTE_FUNCTION_MODE
354 #define TARGET_PROMOTE_FUNCTION_MODE m68k_promote_function_mode
355
356 #undef TARGET_HAVE_SPECULATION_SAFE_VALUE
357 #define TARGET_HAVE_SPECULATION_SAFE_VALUE speculation_safe_value_not_needed
358
359 static const struct attribute_spec m68k_attribute_table[] =
360 {
361 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
362 affects_type_identity, handler, exclude } */
363 { "interrupt", 0, 0, true, false, false, false,
364 m68k_handle_fndecl_attribute, NULL },
365 { "interrupt_handler", 0, 0, true, false, false, false,
366 m68k_handle_fndecl_attribute, NULL },
367 { "interrupt_thread", 0, 0, true, false, false, false,
368 m68k_handle_fndecl_attribute, NULL },
369 { NULL, 0, 0, false, false, false, false, NULL, NULL }
370 };
371
372 struct gcc_target targetm = TARGET_INITIALIZER;
373 \f
374 /* Base flags for 68k ISAs. */
375 #define FL_FOR_isa_00 FL_ISA_68000
376 #define FL_FOR_isa_10 (FL_FOR_isa_00 | FL_ISA_68010)
377 /* FL_68881 controls the default setting of -m68881. gcc has traditionally
378 generated 68881 code for 68020 and 68030 targets unless explicitly told
379 not to. */
380 #define FL_FOR_isa_20 (FL_FOR_isa_10 | FL_ISA_68020 \
381 | FL_BITFIELD | FL_68881 | FL_CAS)
382 #define FL_FOR_isa_40 (FL_FOR_isa_20 | FL_ISA_68040)
383 #define FL_FOR_isa_cpu32 (FL_FOR_isa_10 | FL_ISA_68020)
384
385 /* Base flags for ColdFire ISAs. */
386 #define FL_FOR_isa_a (FL_COLDFIRE | FL_ISA_A)
387 #define FL_FOR_isa_aplus (FL_FOR_isa_a | FL_ISA_APLUS | FL_CF_USP)
388 /* Note ISA_B doesn't necessarily include USP (user stack pointer) support. */
389 #define FL_FOR_isa_b (FL_FOR_isa_a | FL_ISA_B | FL_CF_HWDIV)
390 /* ISA_C is not upwardly compatible with ISA_B. */
391 #define FL_FOR_isa_c (FL_FOR_isa_a | FL_ISA_C | FL_CF_USP)
392
393 enum m68k_isa
394 {
395 /* Traditional 68000 instruction sets. */
396 isa_00,
397 isa_10,
398 isa_20,
399 isa_40,
400 isa_cpu32,
401 /* ColdFire instruction set variants. */
402 isa_a,
403 isa_aplus,
404 isa_b,
405 isa_c,
406 isa_max
407 };
408
409 /* Information about one of the -march, -mcpu or -mtune arguments. */
410 struct m68k_target_selection
411 {
412 /* The argument being described. */
413 const char *name;
414
415 /* For -mcpu, this is the device selected by the option.
416 For -mtune and -march, it is a representative device
417 for the microarchitecture or ISA respectively. */
418 enum target_device device;
419
420 /* The M68K_DEVICE fields associated with DEVICE. See the comment
421 in m68k-devices.def for details. FAMILY is only valid for -mcpu. */
422 const char *family;
423 enum uarch_type microarch;
424 enum m68k_isa isa;
425 unsigned long flags;
426 };
427
428 /* A list of all devices in m68k-devices.def. Used for -mcpu selection. */
429 static const struct m68k_target_selection all_devices[] =
430 {
431 #define M68K_DEVICE(NAME,ENUM_VALUE,FAMILY,MULTILIB,MICROARCH,ISA,FLAGS) \
432 { NAME, ENUM_VALUE, FAMILY, u##MICROARCH, ISA, FLAGS | FL_FOR_##ISA },
433 #include "m68k-devices.def"
434 #undef M68K_DEVICE
435 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
436 };
437
438 /* A list of all ISAs, mapping each one to a representative device.
439 Used for -march selection. */
440 static const struct m68k_target_selection all_isas[] =
441 {
442 #define M68K_ISA(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
443 { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
444 #include "m68k-isas.def"
445 #undef M68K_ISA
446 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
447 };
448
449 /* A list of all microarchitectures, mapping each one to a representative
450 device. Used for -mtune selection. */
451 static const struct m68k_target_selection all_microarchs[] =
452 {
453 #define M68K_MICROARCH(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
454 { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
455 #include "m68k-microarchs.def"
456 #undef M68K_MICROARCH
457 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
458 };
459 \f
460 /* The entries associated with the -mcpu, -march and -mtune settings,
461 or null for options that have not been used. */
462 const struct m68k_target_selection *m68k_cpu_entry;
463 const struct m68k_target_selection *m68k_arch_entry;
464 const struct m68k_target_selection *m68k_tune_entry;
465
466 /* Which CPU we are generating code for. */
467 enum target_device m68k_cpu;
468
469 /* Which microarchitecture to tune for. */
470 enum uarch_type m68k_tune;
471
472 /* Which FPU to use. */
473 enum fpu_type m68k_fpu;
474
475 /* The set of FL_* flags that apply to the target processor. */
476 unsigned int m68k_cpu_flags;
477
478 /* The set of FL_* flags that apply to the processor to be tuned for. */
479 unsigned int m68k_tune_flags;
480
481 /* Asm templates for calling or jumping to an arbitrary symbolic address,
482 or NULL if such calls or jumps are not supported. The address is held
483 in operand 0. */
484 const char *m68k_symbolic_call;
485 const char *m68k_symbolic_jump;
486
487 /* Enum variable that corresponds to m68k_symbolic_call values. */
488 enum M68K_SYMBOLIC_CALL m68k_symbolic_call_var;
489
490 \f
491 /* Implement TARGET_OPTION_OVERRIDE. */
492
493 static void
494 m68k_option_override (void)
495 {
496 const struct m68k_target_selection *entry;
497 unsigned long target_mask;
498
499 if (global_options_set.x_m68k_arch_option)
500 m68k_arch_entry = &all_isas[m68k_arch_option];
501
502 if (global_options_set.x_m68k_cpu_option)
503 m68k_cpu_entry = &all_devices[(int) m68k_cpu_option];
504
505 if (global_options_set.x_m68k_tune_option)
506 m68k_tune_entry = &all_microarchs[(int) m68k_tune_option];
507
508 /* User can choose:
509
510 -mcpu=
511 -march=
512 -mtune=
513
514 -march=ARCH should generate code that runs any processor
515 implementing architecture ARCH. -mcpu=CPU should override -march
516 and should generate code that runs on processor CPU, making free
517 use of any instructions that CPU understands. -mtune=UARCH applies
518 on top of -mcpu or -march and optimizes the code for UARCH. It does
519 not change the target architecture. */
520 if (m68k_cpu_entry)
521 {
522 /* Complain if the -march setting is for a different microarchitecture,
523 or includes flags that the -mcpu setting doesn't. */
524 if (m68k_arch_entry
525 && (m68k_arch_entry->microarch != m68k_cpu_entry->microarch
526 || (m68k_arch_entry->flags & ~m68k_cpu_entry->flags) != 0))
527 warning (0, "-mcpu=%s conflicts with -march=%s",
528 m68k_cpu_entry->name, m68k_arch_entry->name);
529
530 entry = m68k_cpu_entry;
531 }
532 else
533 entry = m68k_arch_entry;
534
535 if (!entry)
536 entry = all_devices + TARGET_CPU_DEFAULT;
537
538 m68k_cpu_flags = entry->flags;
539
540 /* Use the architecture setting to derive default values for
541 certain flags. */
542 target_mask = 0;
543
544 /* ColdFire is lenient about alignment. */
545 if (!TARGET_COLDFIRE)
546 target_mask |= MASK_STRICT_ALIGNMENT;
547
548 if ((m68k_cpu_flags & FL_BITFIELD) != 0)
549 target_mask |= MASK_BITFIELD;
550 if ((m68k_cpu_flags & FL_CF_HWDIV) != 0)
551 target_mask |= MASK_CF_HWDIV;
552 if ((m68k_cpu_flags & (FL_68881 | FL_CF_FPU)) != 0)
553 target_mask |= MASK_HARD_FLOAT;
554 target_flags |= target_mask & ~target_flags_explicit;
555
556 /* Set the directly-usable versions of the -mcpu and -mtune settings. */
557 m68k_cpu = entry->device;
558 if (m68k_tune_entry)
559 {
560 m68k_tune = m68k_tune_entry->microarch;
561 m68k_tune_flags = m68k_tune_entry->flags;
562 }
563 #ifdef M68K_DEFAULT_TUNE
564 else if (!m68k_cpu_entry && !m68k_arch_entry)
565 {
566 enum target_device dev;
567 dev = all_microarchs[M68K_DEFAULT_TUNE].device;
568 m68k_tune_flags = all_devices[dev].flags;
569 }
570 #endif
571 else
572 {
573 m68k_tune = entry->microarch;
574 m68k_tune_flags = entry->flags;
575 }
576
577 /* Set the type of FPU. */
578 m68k_fpu = (!TARGET_HARD_FLOAT ? FPUTYPE_NONE
579 : (m68k_cpu_flags & FL_COLDFIRE) != 0 ? FPUTYPE_COLDFIRE
580 : FPUTYPE_68881);
581
582 /* Sanity check to ensure that msep-data and mid-sahred-library are not
583 * both specified together. Doing so simply doesn't make sense.
584 */
585 if (TARGET_SEP_DATA && TARGET_ID_SHARED_LIBRARY)
586 error ("cannot specify both -msep-data and -mid-shared-library");
587
588 /* If we're generating code for a separate A5 relative data segment,
589 * we've got to enable -fPIC as well. This might be relaxable to
590 * -fpic but it hasn't been tested properly.
591 */
592 if (TARGET_SEP_DATA || TARGET_ID_SHARED_LIBRARY)
593 flag_pic = 2;
594
595 /* -mpcrel -fPIC uses 32-bit pc-relative displacements. Raise an
596 error if the target does not support them. */
597 if (TARGET_PCREL && !TARGET_68020 && flag_pic == 2)
598 error ("-mpcrel -fPIC is not currently supported on selected cpu");
599
600 /* ??? A historic way of turning on pic, or is this intended to
601 be an embedded thing that doesn't have the same name binding
602 significance that it does on hosted ELF systems? */
603 if (TARGET_PCREL && flag_pic == 0)
604 flag_pic = 1;
605
606 if (!flag_pic)
607 {
608 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_JSR;
609
610 m68k_symbolic_jump = "jra %a0";
611 }
612 else if (TARGET_ID_SHARED_LIBRARY)
613 /* All addresses must be loaded from the GOT. */
614 ;
615 else if (TARGET_68020 || TARGET_ISAB || TARGET_ISAC)
616 {
617 if (TARGET_PCREL)
618 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_C;
619 else
620 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_P;
621
622 if (TARGET_ISAC)
623 /* No unconditional long branch */;
624 else if (TARGET_PCREL)
625 m68k_symbolic_jump = "bra%.l %c0";
626 else
627 m68k_symbolic_jump = "bra%.l %p0";
628 /* Turn off function cse if we are doing PIC. We always want
629 function call to be done as `bsr foo@PLTPC'. */
630 /* ??? It's traditional to do this for -mpcrel too, but it isn't
631 clear how intentional that is. */
632 flag_no_function_cse = 1;
633 }
634
635 switch (m68k_symbolic_call_var)
636 {
637 case M68K_SYMBOLIC_CALL_JSR:
638 m68k_symbolic_call = "jsr %a0";
639 break;
640
641 case M68K_SYMBOLIC_CALL_BSR_C:
642 m68k_symbolic_call = "bsr%.l %c0";
643 break;
644
645 case M68K_SYMBOLIC_CALL_BSR_P:
646 m68k_symbolic_call = "bsr%.l %p0";
647 break;
648
649 case M68K_SYMBOLIC_CALL_NONE:
650 gcc_assert (m68k_symbolic_call == NULL);
651 break;
652
653 default:
654 gcc_unreachable ();
655 }
656
657 #ifndef ASM_OUTPUT_ALIGN_WITH_NOP
658 parse_alignment_opts ();
659 int label_alignment = align_labels.levels[0].get_value ();
660 if (label_alignment > 2)
661 {
662 warning (0, "-falign-labels=%d is not supported", label_alignment);
663 str_align_labels = "1";
664 }
665
666 int loop_alignment = align_loops.levels[0].get_value ();
667 if (loop_alignment > 2)
668 {
669 warning (0, "-falign-loops=%d is not supported", loop_alignment);
670 str_align_loops = "1";
671 }
672 #endif
673
674 if ((opt_fstack_limit_symbol_arg != NULL || opt_fstack_limit_register_no >= 0)
675 && !TARGET_68020)
676 {
677 warning (0, "-fstack-limit- options are not supported on this cpu");
678 opt_fstack_limit_symbol_arg = NULL;
679 opt_fstack_limit_register_no = -1;
680 }
681
682 SUBTARGET_OVERRIDE_OPTIONS;
683
684 /* Setup scheduling options. */
685 if (TUNE_CFV1)
686 m68k_sched_cpu = CPU_CFV1;
687 else if (TUNE_CFV2)
688 m68k_sched_cpu = CPU_CFV2;
689 else if (TUNE_CFV3)
690 m68k_sched_cpu = CPU_CFV3;
691 else if (TUNE_CFV4)
692 m68k_sched_cpu = CPU_CFV4;
693 else
694 {
695 m68k_sched_cpu = CPU_UNKNOWN;
696 flag_schedule_insns = 0;
697 flag_schedule_insns_after_reload = 0;
698 flag_modulo_sched = 0;
699 flag_live_range_shrinkage = 0;
700 }
701
702 if (m68k_sched_cpu != CPU_UNKNOWN)
703 {
704 if ((m68k_cpu_flags & (FL_CF_EMAC | FL_CF_EMAC_B)) != 0)
705 m68k_sched_mac = MAC_CF_EMAC;
706 else if ((m68k_cpu_flags & FL_CF_MAC) != 0)
707 m68k_sched_mac = MAC_CF_MAC;
708 else
709 m68k_sched_mac = MAC_NO;
710 }
711 }
712
713 /* Implement TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE. */
714
715 static void
716 m68k_override_options_after_change (void)
717 {
718 if (m68k_sched_cpu == CPU_UNKNOWN)
719 {
720 flag_schedule_insns = 0;
721 flag_schedule_insns_after_reload = 0;
722 flag_modulo_sched = 0;
723 flag_live_range_shrinkage = 0;
724 }
725 }
726
727 /* Generate a macro of the form __mPREFIX_cpu_NAME, where PREFIX is the
728 given argument and NAME is the argument passed to -mcpu. Return NULL
729 if -mcpu was not passed. */
730
731 const char *
732 m68k_cpp_cpu_ident (const char *prefix)
733 {
734 if (!m68k_cpu_entry)
735 return NULL;
736 return concat ("__m", prefix, "_cpu_", m68k_cpu_entry->name, NULL);
737 }
738
739 /* Generate a macro of the form __mPREFIX_family_NAME, where PREFIX is the
740 given argument and NAME is the name of the representative device for
741 the -mcpu argument's family. Return NULL if -mcpu was not passed. */
742
743 const char *
744 m68k_cpp_cpu_family (const char *prefix)
745 {
746 if (!m68k_cpu_entry)
747 return NULL;
748 return concat ("__m", prefix, "_family_", m68k_cpu_entry->family, NULL);
749 }
750 \f
751 /* Return m68k_fk_interrupt_handler if FUNC has an "interrupt" or
752 "interrupt_handler" attribute and interrupt_thread if FUNC has an
753 "interrupt_thread" attribute. Otherwise, return
754 m68k_fk_normal_function. */
755
756 enum m68k_function_kind
757 m68k_get_function_kind (tree func)
758 {
759 tree a;
760
761 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
762
763 a = lookup_attribute ("interrupt", DECL_ATTRIBUTES (func));
764 if (a != NULL_TREE)
765 return m68k_fk_interrupt_handler;
766
767 a = lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (func));
768 if (a != NULL_TREE)
769 return m68k_fk_interrupt_handler;
770
771 a = lookup_attribute ("interrupt_thread", DECL_ATTRIBUTES (func));
772 if (a != NULL_TREE)
773 return m68k_fk_interrupt_thread;
774
775 return m68k_fk_normal_function;
776 }
777
778 /* Handle an attribute requiring a FUNCTION_DECL; arguments as in
779 struct attribute_spec.handler. */
780 static tree
781 m68k_handle_fndecl_attribute (tree *node, tree name,
782 tree args ATTRIBUTE_UNUSED,
783 int flags ATTRIBUTE_UNUSED,
784 bool *no_add_attrs)
785 {
786 if (TREE_CODE (*node) != FUNCTION_DECL)
787 {
788 warning (OPT_Wattributes, "%qE attribute only applies to functions",
789 name);
790 *no_add_attrs = true;
791 }
792
793 if (m68k_get_function_kind (*node) != m68k_fk_normal_function)
794 {
795 error ("multiple interrupt attributes not allowed");
796 *no_add_attrs = true;
797 }
798
799 if (!TARGET_FIDOA
800 && !strcmp (IDENTIFIER_POINTER (name), "interrupt_thread"))
801 {
802 error ("interrupt_thread is available only on fido");
803 *no_add_attrs = true;
804 }
805
806 return NULL_TREE;
807 }
808
809 static void
810 m68k_compute_frame_layout (void)
811 {
812 int regno, saved;
813 unsigned int mask;
814 enum m68k_function_kind func_kind =
815 m68k_get_function_kind (current_function_decl);
816 bool interrupt_handler = func_kind == m68k_fk_interrupt_handler;
817 bool interrupt_thread = func_kind == m68k_fk_interrupt_thread;
818
819 /* Only compute the frame once per function.
820 Don't cache information until reload has been completed. */
821 if (current_frame.funcdef_no == current_function_funcdef_no
822 && reload_completed)
823 return;
824
825 current_frame.size = (get_frame_size () + 3) & -4;
826
827 mask = saved = 0;
828
829 /* Interrupt thread does not need to save any register. */
830 if (!interrupt_thread)
831 for (regno = 0; regno < 16; regno++)
832 if (m68k_save_reg (regno, interrupt_handler))
833 {
834 mask |= 1 << (regno - D0_REG);
835 saved++;
836 }
837 current_frame.offset = saved * 4;
838 current_frame.reg_no = saved;
839 current_frame.reg_mask = mask;
840
841 current_frame.foffset = 0;
842 mask = saved = 0;
843 if (TARGET_HARD_FLOAT)
844 {
845 /* Interrupt thread does not need to save any register. */
846 if (!interrupt_thread)
847 for (regno = 16; regno < 24; regno++)
848 if (m68k_save_reg (regno, interrupt_handler))
849 {
850 mask |= 1 << (regno - FP0_REG);
851 saved++;
852 }
853 current_frame.foffset = saved * TARGET_FP_REG_SIZE;
854 current_frame.offset += current_frame.foffset;
855 }
856 current_frame.fpu_no = saved;
857 current_frame.fpu_mask = mask;
858
859 /* Remember what function this frame refers to. */
860 current_frame.funcdef_no = current_function_funcdef_no;
861 }
862
863 /* Worker function for TARGET_CAN_ELIMINATE. */
864
865 bool
866 m68k_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
867 {
868 return (to == STACK_POINTER_REGNUM ? ! frame_pointer_needed : true);
869 }
870
871 HOST_WIDE_INT
872 m68k_initial_elimination_offset (int from, int to)
873 {
874 int argptr_offset;
875 /* The arg pointer points 8 bytes before the start of the arguments,
876 as defined by FIRST_PARM_OFFSET. This makes it coincident with the
877 frame pointer in most frames. */
878 argptr_offset = frame_pointer_needed ? 0 : UNITS_PER_WORD;
879 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
880 return argptr_offset;
881
882 m68k_compute_frame_layout ();
883
884 gcc_assert (to == STACK_POINTER_REGNUM);
885 switch (from)
886 {
887 case ARG_POINTER_REGNUM:
888 return current_frame.offset + current_frame.size - argptr_offset;
889 case FRAME_POINTER_REGNUM:
890 return current_frame.offset + current_frame.size;
891 default:
892 gcc_unreachable ();
893 }
894 }
895
896 /* Refer to the array `regs_ever_live' to determine which registers
897 to save; `regs_ever_live[I]' is nonzero if register number I
898 is ever used in the function. This function is responsible for
899 knowing which registers should not be saved even if used.
900 Return true if we need to save REGNO. */
901
902 static bool
903 m68k_save_reg (unsigned int regno, bool interrupt_handler)
904 {
905 if (flag_pic && regno == PIC_REG)
906 {
907 if (crtl->saves_all_registers)
908 return true;
909 if (crtl->uses_pic_offset_table)
910 return true;
911 /* Reload may introduce constant pool references into a function
912 that thitherto didn't need a PIC register. Note that the test
913 above will not catch that case because we will only set
914 crtl->uses_pic_offset_table when emitting
915 the address reloads. */
916 if (crtl->uses_const_pool)
917 return true;
918 }
919
920 if (crtl->calls_eh_return)
921 {
922 unsigned int i;
923 for (i = 0; ; i++)
924 {
925 unsigned int test = EH_RETURN_DATA_REGNO (i);
926 if (test == INVALID_REGNUM)
927 break;
928 if (test == regno)
929 return true;
930 }
931 }
932
933 /* Fixed regs we never touch. */
934 if (fixed_regs[regno])
935 return false;
936
937 /* The frame pointer (if it is such) is handled specially. */
938 if (regno == FRAME_POINTER_REGNUM && frame_pointer_needed)
939 return false;
940
941 /* Interrupt handlers must also save call_used_regs
942 if they are live or when calling nested functions. */
943 if (interrupt_handler)
944 {
945 if (df_regs_ever_live_p (regno))
946 return true;
947
948 if (!crtl->is_leaf && call_used_regs[regno])
949 return true;
950 }
951
952 /* Never need to save registers that aren't touched. */
953 if (!df_regs_ever_live_p (regno))
954 return false;
955
956 /* Otherwise save everything that isn't call-clobbered. */
957 return !call_used_regs[regno];
958 }
959
960 /* Emit RTL for a MOVEM or FMOVEM instruction. BASE + OFFSET represents
961 the lowest memory address. COUNT is the number of registers to be
962 moved, with register REGNO + I being moved if bit I of MASK is set.
963 STORE_P specifies the direction of the move and ADJUST_STACK_P says
964 whether or not this is pre-decrement (if STORE_P) or post-increment
965 (if !STORE_P) operation. */
966
967 static rtx_insn *
968 m68k_emit_movem (rtx base, HOST_WIDE_INT offset,
969 unsigned int count, unsigned int regno,
970 unsigned int mask, bool store_p, bool adjust_stack_p)
971 {
972 int i;
973 rtx body, addr, src, operands[2];
974 machine_mode mode;
975
976 body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (adjust_stack_p + count));
977 mode = reg_raw_mode[regno];
978 i = 0;
979
980 if (adjust_stack_p)
981 {
982 src = plus_constant (Pmode, base,
983 (count
984 * GET_MODE_SIZE (mode)
985 * (HOST_WIDE_INT) (store_p ? -1 : 1)));
986 XVECEXP (body, 0, i++) = gen_rtx_SET (base, src);
987 }
988
989 for (; mask != 0; mask >>= 1, regno++)
990 if (mask & 1)
991 {
992 addr = plus_constant (Pmode, base, offset);
993 operands[!store_p] = gen_frame_mem (mode, addr);
994 operands[store_p] = gen_rtx_REG (mode, regno);
995 XVECEXP (body, 0, i++)
996 = gen_rtx_SET (operands[0], operands[1]);
997 offset += GET_MODE_SIZE (mode);
998 }
999 gcc_assert (i == XVECLEN (body, 0));
1000
1001 return emit_insn (body);
1002 }
1003
1004 /* Make INSN a frame-related instruction. */
1005
1006 static void
1007 m68k_set_frame_related (rtx_insn *insn)
1008 {
1009 rtx body;
1010 int i;
1011
1012 RTX_FRAME_RELATED_P (insn) = 1;
1013 body = PATTERN (insn);
1014 if (GET_CODE (body) == PARALLEL)
1015 for (i = 0; i < XVECLEN (body, 0); i++)
1016 RTX_FRAME_RELATED_P (XVECEXP (body, 0, i)) = 1;
1017 }
1018
1019 /* Emit RTL for the "prologue" define_expand. */
1020
1021 void
1022 m68k_expand_prologue (void)
1023 {
1024 HOST_WIDE_INT fsize_with_regs;
1025 rtx limit, src, dest;
1026
1027 m68k_compute_frame_layout ();
1028
1029 if (flag_stack_usage_info)
1030 current_function_static_stack_size
1031 = current_frame.size + current_frame.offset;
1032
1033 /* If the stack limit is a symbol, we can check it here,
1034 before actually allocating the space. */
1035 if (crtl->limit_stack
1036 && GET_CODE (stack_limit_rtx) == SYMBOL_REF)
1037 {
1038 limit = plus_constant (Pmode, stack_limit_rtx, current_frame.size + 4);
1039 if (!m68k_legitimate_constant_p (Pmode, limit))
1040 {
1041 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), limit);
1042 limit = gen_rtx_REG (Pmode, D0_REG);
1043 }
1044 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode,
1045 stack_pointer_rtx, limit),
1046 stack_pointer_rtx, limit,
1047 const1_rtx));
1048 }
1049
1050 fsize_with_regs = current_frame.size;
1051 if (TARGET_COLDFIRE)
1052 {
1053 /* ColdFire's move multiple instructions do not allow pre-decrement
1054 addressing. Add the size of movem saves to the initial stack
1055 allocation instead. */
1056 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1057 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1058 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1059 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
1060 }
1061
1062 if (frame_pointer_needed)
1063 {
1064 if (fsize_with_regs == 0 && TUNE_68040)
1065 {
1066 /* On the 68040, two separate moves are faster than link.w 0. */
1067 dest = gen_frame_mem (Pmode,
1068 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1069 m68k_set_frame_related (emit_move_insn (dest, frame_pointer_rtx));
1070 m68k_set_frame_related (emit_move_insn (frame_pointer_rtx,
1071 stack_pointer_rtx));
1072 }
1073 else if (fsize_with_regs < 0x8000 || TARGET_68020)
1074 m68k_set_frame_related
1075 (emit_insn (gen_link (frame_pointer_rtx,
1076 GEN_INT (-4 - fsize_with_regs))));
1077 else
1078 {
1079 m68k_set_frame_related
1080 (emit_insn (gen_link (frame_pointer_rtx, GEN_INT (-4))));
1081 m68k_set_frame_related
1082 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1083 stack_pointer_rtx,
1084 GEN_INT (-fsize_with_regs))));
1085 }
1086
1087 /* If the frame pointer is needed, emit a special barrier that
1088 will prevent the scheduler from moving stores to the frame
1089 before the stack adjustment. */
1090 emit_insn (gen_stack_tie (stack_pointer_rtx, frame_pointer_rtx));
1091 }
1092 else if (fsize_with_regs != 0)
1093 m68k_set_frame_related
1094 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1095 stack_pointer_rtx,
1096 GEN_INT (-fsize_with_regs))));
1097
1098 if (current_frame.fpu_mask)
1099 {
1100 gcc_assert (current_frame.fpu_no >= MIN_FMOVEM_REGS);
1101 if (TARGET_68881)
1102 m68k_set_frame_related
1103 (m68k_emit_movem (stack_pointer_rtx,
1104 current_frame.fpu_no * -GET_MODE_SIZE (XFmode),
1105 current_frame.fpu_no, FP0_REG,
1106 current_frame.fpu_mask, true, true));
1107 else
1108 {
1109 int offset;
1110
1111 /* If we're using moveml to save the integer registers,
1112 the stack pointer will point to the bottom of the moveml
1113 save area. Find the stack offset of the first FP register. */
1114 if (current_frame.reg_no < MIN_MOVEM_REGS)
1115 offset = 0;
1116 else
1117 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1118 m68k_set_frame_related
1119 (m68k_emit_movem (stack_pointer_rtx, offset,
1120 current_frame.fpu_no, FP0_REG,
1121 current_frame.fpu_mask, true, false));
1122 }
1123 }
1124
1125 /* If the stack limit is not a symbol, check it here.
1126 This has the disadvantage that it may be too late... */
1127 if (crtl->limit_stack)
1128 {
1129 if (REG_P (stack_limit_rtx))
1130 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode, stack_pointer_rtx,
1131 stack_limit_rtx),
1132 stack_pointer_rtx, stack_limit_rtx,
1133 const1_rtx));
1134
1135 else if (GET_CODE (stack_limit_rtx) != SYMBOL_REF)
1136 warning (0, "stack limit expression is not supported");
1137 }
1138
1139 if (current_frame.reg_no < MIN_MOVEM_REGS)
1140 {
1141 /* Store each register separately in the same order moveml does. */
1142 int i;
1143
1144 for (i = 16; i-- > 0; )
1145 if (current_frame.reg_mask & (1 << i))
1146 {
1147 src = gen_rtx_REG (SImode, D0_REG + i);
1148 dest = gen_frame_mem (SImode,
1149 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1150 m68k_set_frame_related (emit_insn (gen_movsi (dest, src)));
1151 }
1152 }
1153 else
1154 {
1155 if (TARGET_COLDFIRE)
1156 /* The required register save space has already been allocated.
1157 The first register should be stored at (%sp). */
1158 m68k_set_frame_related
1159 (m68k_emit_movem (stack_pointer_rtx, 0,
1160 current_frame.reg_no, D0_REG,
1161 current_frame.reg_mask, true, false));
1162 else
1163 m68k_set_frame_related
1164 (m68k_emit_movem (stack_pointer_rtx,
1165 current_frame.reg_no * -GET_MODE_SIZE (SImode),
1166 current_frame.reg_no, D0_REG,
1167 current_frame.reg_mask, true, true));
1168 }
1169
1170 if (!TARGET_SEP_DATA
1171 && crtl->uses_pic_offset_table)
1172 emit_insn (gen_load_got (pic_offset_table_rtx));
1173 }
1174 \f
1175 /* Return true if a simple (return) instruction is sufficient for this
1176 instruction (i.e. if no epilogue is needed). */
1177
1178 bool
1179 m68k_use_return_insn (void)
1180 {
1181 if (!reload_completed || frame_pointer_needed || get_frame_size () != 0)
1182 return false;
1183
1184 m68k_compute_frame_layout ();
1185 return current_frame.offset == 0;
1186 }
1187
1188 /* Emit RTL for the "epilogue" or "sibcall_epilogue" define_expand;
1189 SIBCALL_P says which.
1190
1191 The function epilogue should not depend on the current stack pointer!
1192 It should use the frame pointer only, if there is a frame pointer.
1193 This is mandatory because of alloca; we also take advantage of it to
1194 omit stack adjustments before returning. */
1195
1196 void
1197 m68k_expand_epilogue (bool sibcall_p)
1198 {
1199 HOST_WIDE_INT fsize, fsize_with_regs;
1200 bool big, restore_from_sp;
1201
1202 m68k_compute_frame_layout ();
1203
1204 fsize = current_frame.size;
1205 big = false;
1206 restore_from_sp = false;
1207
1208 /* FIXME : crtl->is_leaf below is too strong.
1209 What we really need to know there is if there could be pending
1210 stack adjustment needed at that point. */
1211 restore_from_sp = (!frame_pointer_needed
1212 || (!cfun->calls_alloca && crtl->is_leaf));
1213
1214 /* fsize_with_regs is the size we need to adjust the sp when
1215 popping the frame. */
1216 fsize_with_regs = fsize;
1217 if (TARGET_COLDFIRE && restore_from_sp)
1218 {
1219 /* ColdFire's move multiple instructions do not allow post-increment
1220 addressing. Add the size of movem loads to the final deallocation
1221 instead. */
1222 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1223 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1224 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1225 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
1226 }
1227
1228 if (current_frame.offset + fsize >= 0x8000
1229 && !restore_from_sp
1230 && (current_frame.reg_mask || current_frame.fpu_mask))
1231 {
1232 if (TARGET_COLDFIRE
1233 && (current_frame.reg_no >= MIN_MOVEM_REGS
1234 || current_frame.fpu_no >= MIN_FMOVEM_REGS))
1235 {
1236 /* ColdFire's move multiple instructions do not support the
1237 (d8,Ax,Xi) addressing mode, so we're as well using a normal
1238 stack-based restore. */
1239 emit_move_insn (gen_rtx_REG (Pmode, A1_REG),
1240 GEN_INT (-(current_frame.offset + fsize)));
1241 emit_insn (gen_blockage ());
1242 emit_insn (gen_addsi3 (stack_pointer_rtx,
1243 gen_rtx_REG (Pmode, A1_REG),
1244 frame_pointer_rtx));
1245 restore_from_sp = true;
1246 }
1247 else
1248 {
1249 emit_move_insn (gen_rtx_REG (Pmode, A1_REG), GEN_INT (-fsize));
1250 fsize = 0;
1251 big = true;
1252 }
1253 }
1254
1255 if (current_frame.reg_no < MIN_MOVEM_REGS)
1256 {
1257 /* Restore each register separately in the same order moveml does. */
1258 int i;
1259 HOST_WIDE_INT offset;
1260
1261 offset = current_frame.offset + fsize;
1262 for (i = 0; i < 16; i++)
1263 if (current_frame.reg_mask & (1 << i))
1264 {
1265 rtx addr;
1266
1267 if (big)
1268 {
1269 /* Generate the address -OFFSET(%fp,%a1.l). */
1270 addr = gen_rtx_REG (Pmode, A1_REG);
1271 addr = gen_rtx_PLUS (Pmode, addr, frame_pointer_rtx);
1272 addr = plus_constant (Pmode, addr, -offset);
1273 }
1274 else if (restore_from_sp)
1275 addr = gen_rtx_POST_INC (Pmode, stack_pointer_rtx);
1276 else
1277 addr = plus_constant (Pmode, frame_pointer_rtx, -offset);
1278 emit_move_insn (gen_rtx_REG (SImode, D0_REG + i),
1279 gen_frame_mem (SImode, addr));
1280 offset -= GET_MODE_SIZE (SImode);
1281 }
1282 }
1283 else if (current_frame.reg_mask)
1284 {
1285 if (big)
1286 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1287 gen_rtx_REG (Pmode, A1_REG),
1288 frame_pointer_rtx),
1289 -(current_frame.offset + fsize),
1290 current_frame.reg_no, D0_REG,
1291 current_frame.reg_mask, false, false);
1292 else if (restore_from_sp)
1293 m68k_emit_movem (stack_pointer_rtx, 0,
1294 current_frame.reg_no, D0_REG,
1295 current_frame.reg_mask, false,
1296 !TARGET_COLDFIRE);
1297 else
1298 m68k_emit_movem (frame_pointer_rtx,
1299 -(current_frame.offset + fsize),
1300 current_frame.reg_no, D0_REG,
1301 current_frame.reg_mask, false, false);
1302 }
1303
1304 if (current_frame.fpu_no > 0)
1305 {
1306 if (big)
1307 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1308 gen_rtx_REG (Pmode, A1_REG),
1309 frame_pointer_rtx),
1310 -(current_frame.foffset + fsize),
1311 current_frame.fpu_no, FP0_REG,
1312 current_frame.fpu_mask, false, false);
1313 else if (restore_from_sp)
1314 {
1315 if (TARGET_COLDFIRE)
1316 {
1317 int offset;
1318
1319 /* If we used moveml to restore the integer registers, the
1320 stack pointer will still point to the bottom of the moveml
1321 save area. Find the stack offset of the first FP
1322 register. */
1323 if (current_frame.reg_no < MIN_MOVEM_REGS)
1324 offset = 0;
1325 else
1326 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1327 m68k_emit_movem (stack_pointer_rtx, offset,
1328 current_frame.fpu_no, FP0_REG,
1329 current_frame.fpu_mask, false, false);
1330 }
1331 else
1332 m68k_emit_movem (stack_pointer_rtx, 0,
1333 current_frame.fpu_no, FP0_REG,
1334 current_frame.fpu_mask, false, true);
1335 }
1336 else
1337 m68k_emit_movem (frame_pointer_rtx,
1338 -(current_frame.foffset + fsize),
1339 current_frame.fpu_no, FP0_REG,
1340 current_frame.fpu_mask, false, false);
1341 }
1342
1343 emit_insn (gen_blockage ());
1344 if (frame_pointer_needed)
1345 emit_insn (gen_unlink (frame_pointer_rtx));
1346 else if (fsize_with_regs)
1347 emit_insn (gen_addsi3 (stack_pointer_rtx,
1348 stack_pointer_rtx,
1349 GEN_INT (fsize_with_regs)));
1350
1351 if (crtl->calls_eh_return)
1352 emit_insn (gen_addsi3 (stack_pointer_rtx,
1353 stack_pointer_rtx,
1354 EH_RETURN_STACKADJ_RTX));
1355
1356 if (!sibcall_p)
1357 emit_jump_insn (ret_rtx);
1358 }
1359 \f
1360 /* Return true if X is a valid comparison operator for the dbcc
1361 instruction.
1362
1363 Note it rejects floating point comparison operators.
1364 (In the future we could use Fdbcc).
1365
1366 It also rejects some comparisons when CC_NO_OVERFLOW is set. */
1367
1368 int
1369 valid_dbcc_comparison_p_2 (rtx x, machine_mode mode ATTRIBUTE_UNUSED)
1370 {
1371 switch (GET_CODE (x))
1372 {
1373 case EQ: case NE: case GTU: case LTU:
1374 case GEU: case LEU:
1375 return 1;
1376
1377 /* Reject some when CC_NO_OVERFLOW is set. This may be over
1378 conservative */
1379 case GT: case LT: case GE: case LE:
1380 return ! (cc_prev_status.flags & CC_NO_OVERFLOW);
1381 default:
1382 return 0;
1383 }
1384 }
1385
1386 /* Return nonzero if flags are currently in the 68881 flag register. */
1387 int
1388 flags_in_68881 (void)
1389 {
1390 /* We could add support for these in the future */
1391 return cc_status.flags & CC_IN_68881;
1392 }
1393
1394 /* Return true if PARALLEL contains register REGNO. */
1395 static bool
1396 m68k_reg_present_p (const_rtx parallel, unsigned int regno)
1397 {
1398 int i;
1399
1400 if (REG_P (parallel) && REGNO (parallel) == regno)
1401 return true;
1402
1403 if (GET_CODE (parallel) != PARALLEL)
1404 return false;
1405
1406 for (i = 0; i < XVECLEN (parallel, 0); ++i)
1407 {
1408 const_rtx x;
1409
1410 x = XEXP (XVECEXP (parallel, 0, i), 0);
1411 if (REG_P (x) && REGNO (x) == regno)
1412 return true;
1413 }
1414
1415 return false;
1416 }
1417
1418 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL_P. */
1419
1420 static bool
1421 m68k_ok_for_sibcall_p (tree decl, tree exp)
1422 {
1423 enum m68k_function_kind kind;
1424
1425 /* We cannot use sibcalls for nested functions because we use the
1426 static chain register for indirect calls. */
1427 if (CALL_EXPR_STATIC_CHAIN (exp))
1428 return false;
1429
1430 if (!VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
1431 {
1432 /* Check that the return value locations are the same. For
1433 example that we aren't returning a value from the sibling in
1434 a D0 register but then need to transfer it to a A0 register. */
1435 rtx cfun_value;
1436 rtx call_value;
1437
1438 cfun_value = FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (cfun->decl)),
1439 cfun->decl);
1440 call_value = FUNCTION_VALUE (TREE_TYPE (exp), decl);
1441
1442 /* Check that the values are equal or that the result the callee
1443 function returns is superset of what the current function returns. */
1444 if (!(rtx_equal_p (cfun_value, call_value)
1445 || (REG_P (cfun_value)
1446 && m68k_reg_present_p (call_value, REGNO (cfun_value)))))
1447 return false;
1448 }
1449
1450 kind = m68k_get_function_kind (current_function_decl);
1451 if (kind == m68k_fk_normal_function)
1452 /* We can always sibcall from a normal function, because it's
1453 undefined if it is calling an interrupt function. */
1454 return true;
1455
1456 /* Otherwise we can only sibcall if the function kind is known to be
1457 the same. */
1458 if (decl && m68k_get_function_kind (decl) == kind)
1459 return true;
1460
1461 return false;
1462 }
1463
1464 /* On the m68k all args are always pushed. */
1465
1466 static rtx
1467 m68k_function_arg (cumulative_args_t cum ATTRIBUTE_UNUSED,
1468 machine_mode mode ATTRIBUTE_UNUSED,
1469 const_tree type ATTRIBUTE_UNUSED,
1470 bool named ATTRIBUTE_UNUSED)
1471 {
1472 return NULL_RTX;
1473 }
1474
1475 static void
1476 m68k_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
1477 const_tree type, bool named ATTRIBUTE_UNUSED)
1478 {
1479 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1480
1481 *cum += (mode != BLKmode
1482 ? (GET_MODE_SIZE (mode) + 3) & ~3
1483 : (int_size_in_bytes (type) + 3) & ~3);
1484 }
1485
1486 /* Convert X to a legitimate function call memory reference and return the
1487 result. */
1488
1489 rtx
1490 m68k_legitimize_call_address (rtx x)
1491 {
1492 gcc_assert (MEM_P (x));
1493 if (call_operand (XEXP (x, 0), VOIDmode))
1494 return x;
1495 return replace_equiv_address (x, force_reg (Pmode, XEXP (x, 0)));
1496 }
1497
1498 /* Likewise for sibling calls. */
1499
1500 rtx
1501 m68k_legitimize_sibcall_address (rtx x)
1502 {
1503 gcc_assert (MEM_P (x));
1504 if (sibcall_operand (XEXP (x, 0), VOIDmode))
1505 return x;
1506
1507 emit_move_insn (gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM), XEXP (x, 0));
1508 return replace_equiv_address (x, gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM));
1509 }
1510
1511 /* Convert X to a legitimate address and return it if successful. Otherwise
1512 return X.
1513
1514 For the 68000, we handle X+REG by loading X into a register R and
1515 using R+REG. R will go in an address reg and indexing will be used.
1516 However, if REG is a broken-out memory address or multiplication,
1517 nothing needs to be done because REG can certainly go in an address reg. */
1518
1519 static rtx
1520 m68k_legitimize_address (rtx x, rtx oldx, machine_mode mode)
1521 {
1522 if (m68k_tls_symbol_p (x))
1523 return m68k_legitimize_tls_address (x);
1524
1525 if (GET_CODE (x) == PLUS)
1526 {
1527 int ch = (x) != (oldx);
1528 int copied = 0;
1529
1530 #define COPY_ONCE(Y) if (!copied) { Y = copy_rtx (Y); copied = ch = 1; }
1531
1532 if (GET_CODE (XEXP (x, 0)) == MULT)
1533 {
1534 COPY_ONCE (x);
1535 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
1536 }
1537 if (GET_CODE (XEXP (x, 1)) == MULT)
1538 {
1539 COPY_ONCE (x);
1540 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
1541 }
1542 if (ch)
1543 {
1544 if (GET_CODE (XEXP (x, 1)) == REG
1545 && GET_CODE (XEXP (x, 0)) == REG)
1546 {
1547 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
1548 {
1549 COPY_ONCE (x);
1550 x = force_operand (x, 0);
1551 }
1552 return x;
1553 }
1554 if (memory_address_p (mode, x))
1555 return x;
1556 }
1557 if (GET_CODE (XEXP (x, 0)) == REG
1558 || (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
1559 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1560 && GET_MODE (XEXP (XEXP (x, 0), 0)) == HImode))
1561 {
1562 rtx temp = gen_reg_rtx (Pmode);
1563 rtx val = force_operand (XEXP (x, 1), 0);
1564 emit_move_insn (temp, val);
1565 COPY_ONCE (x);
1566 XEXP (x, 1) = temp;
1567 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1568 && GET_CODE (XEXP (x, 0)) == REG)
1569 x = force_operand (x, 0);
1570 }
1571 else if (GET_CODE (XEXP (x, 1)) == REG
1572 || (GET_CODE (XEXP (x, 1)) == SIGN_EXTEND
1573 && GET_CODE (XEXP (XEXP (x, 1), 0)) == REG
1574 && GET_MODE (XEXP (XEXP (x, 1), 0)) == HImode))
1575 {
1576 rtx temp = gen_reg_rtx (Pmode);
1577 rtx val = force_operand (XEXP (x, 0), 0);
1578 emit_move_insn (temp, val);
1579 COPY_ONCE (x);
1580 XEXP (x, 0) = temp;
1581 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1582 && GET_CODE (XEXP (x, 1)) == REG)
1583 x = force_operand (x, 0);
1584 }
1585 }
1586
1587 return x;
1588 }
1589
1590
1591 /* Output a dbCC; jCC sequence. Note we do not handle the
1592 floating point version of this sequence (Fdbcc). We also
1593 do not handle alternative conditions when CC_NO_OVERFLOW is
1594 set. It is assumed that valid_dbcc_comparison_p and flags_in_68881 will
1595 kick those out before we get here. */
1596
1597 void
1598 output_dbcc_and_branch (rtx *operands)
1599 {
1600 switch (GET_CODE (operands[3]))
1601 {
1602 case EQ:
1603 output_asm_insn ("dbeq %0,%l1\n\tjeq %l2", operands);
1604 break;
1605
1606 case NE:
1607 output_asm_insn ("dbne %0,%l1\n\tjne %l2", operands);
1608 break;
1609
1610 case GT:
1611 output_asm_insn ("dbgt %0,%l1\n\tjgt %l2", operands);
1612 break;
1613
1614 case GTU:
1615 output_asm_insn ("dbhi %0,%l1\n\tjhi %l2", operands);
1616 break;
1617
1618 case LT:
1619 output_asm_insn ("dblt %0,%l1\n\tjlt %l2", operands);
1620 break;
1621
1622 case LTU:
1623 output_asm_insn ("dbcs %0,%l1\n\tjcs %l2", operands);
1624 break;
1625
1626 case GE:
1627 output_asm_insn ("dbge %0,%l1\n\tjge %l2", operands);
1628 break;
1629
1630 case GEU:
1631 output_asm_insn ("dbcc %0,%l1\n\tjcc %l2", operands);
1632 break;
1633
1634 case LE:
1635 output_asm_insn ("dble %0,%l1\n\tjle %l2", operands);
1636 break;
1637
1638 case LEU:
1639 output_asm_insn ("dbls %0,%l1\n\tjls %l2", operands);
1640 break;
1641
1642 default:
1643 gcc_unreachable ();
1644 }
1645
1646 /* If the decrement is to be done in SImode, then we have
1647 to compensate for the fact that dbcc decrements in HImode. */
1648 switch (GET_MODE (operands[0]))
1649 {
1650 case E_SImode:
1651 output_asm_insn ("clr%.w %0\n\tsubq%.l #1,%0\n\tjpl %l1", operands);
1652 break;
1653
1654 case E_HImode:
1655 break;
1656
1657 default:
1658 gcc_unreachable ();
1659 }
1660 }
1661
1662 const char *
1663 output_scc_di (rtx op, rtx operand1, rtx operand2, rtx dest)
1664 {
1665 rtx loperands[7];
1666 enum rtx_code op_code = GET_CODE (op);
1667
1668 /* This does not produce a useful cc. */
1669 CC_STATUS_INIT;
1670
1671 /* The m68k cmp.l instruction requires operand1 to be a reg as used
1672 below. Swap the operands and change the op if these requirements
1673 are not fulfilled. */
1674 if (GET_CODE (operand2) == REG && GET_CODE (operand1) != REG)
1675 {
1676 rtx tmp = operand1;
1677
1678 operand1 = operand2;
1679 operand2 = tmp;
1680 op_code = swap_condition (op_code);
1681 }
1682 loperands[0] = operand1;
1683 if (GET_CODE (operand1) == REG)
1684 loperands[1] = gen_rtx_REG (SImode, REGNO (operand1) + 1);
1685 else
1686 loperands[1] = adjust_address (operand1, SImode, 4);
1687 if (operand2 != const0_rtx)
1688 {
1689 loperands[2] = operand2;
1690 if (GET_CODE (operand2) == REG)
1691 loperands[3] = gen_rtx_REG (SImode, REGNO (operand2) + 1);
1692 else
1693 loperands[3] = adjust_address (operand2, SImode, 4);
1694 }
1695 loperands[4] = gen_label_rtx ();
1696 if (operand2 != const0_rtx)
1697 output_asm_insn ("cmp%.l %2,%0\n\tjne %l4\n\tcmp%.l %3,%1", loperands);
1698 else
1699 {
1700 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[0]))
1701 output_asm_insn ("tst%.l %0", loperands);
1702 else
1703 output_asm_insn ("cmp%.w #0,%0", loperands);
1704
1705 output_asm_insn ("jne %l4", loperands);
1706
1707 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[1]))
1708 output_asm_insn ("tst%.l %1", loperands);
1709 else
1710 output_asm_insn ("cmp%.w #0,%1", loperands);
1711 }
1712
1713 loperands[5] = dest;
1714
1715 switch (op_code)
1716 {
1717 case EQ:
1718 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1719 CODE_LABEL_NUMBER (loperands[4]));
1720 output_asm_insn ("seq %5", loperands);
1721 break;
1722
1723 case NE:
1724 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1725 CODE_LABEL_NUMBER (loperands[4]));
1726 output_asm_insn ("sne %5", loperands);
1727 break;
1728
1729 case GT:
1730 loperands[6] = gen_label_rtx ();
1731 output_asm_insn ("shi %5\n\tjra %l6", loperands);
1732 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1733 CODE_LABEL_NUMBER (loperands[4]));
1734 output_asm_insn ("sgt %5", loperands);
1735 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1736 CODE_LABEL_NUMBER (loperands[6]));
1737 break;
1738
1739 case GTU:
1740 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1741 CODE_LABEL_NUMBER (loperands[4]));
1742 output_asm_insn ("shi %5", loperands);
1743 break;
1744
1745 case LT:
1746 loperands[6] = gen_label_rtx ();
1747 output_asm_insn ("scs %5\n\tjra %l6", loperands);
1748 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1749 CODE_LABEL_NUMBER (loperands[4]));
1750 output_asm_insn ("slt %5", loperands);
1751 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1752 CODE_LABEL_NUMBER (loperands[6]));
1753 break;
1754
1755 case LTU:
1756 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1757 CODE_LABEL_NUMBER (loperands[4]));
1758 output_asm_insn ("scs %5", loperands);
1759 break;
1760
1761 case GE:
1762 loperands[6] = gen_label_rtx ();
1763 output_asm_insn ("scc %5\n\tjra %l6", loperands);
1764 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1765 CODE_LABEL_NUMBER (loperands[4]));
1766 output_asm_insn ("sge %5", loperands);
1767 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1768 CODE_LABEL_NUMBER (loperands[6]));
1769 break;
1770
1771 case GEU:
1772 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1773 CODE_LABEL_NUMBER (loperands[4]));
1774 output_asm_insn ("scc %5", loperands);
1775 break;
1776
1777 case LE:
1778 loperands[6] = gen_label_rtx ();
1779 output_asm_insn ("sls %5\n\tjra %l6", loperands);
1780 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1781 CODE_LABEL_NUMBER (loperands[4]));
1782 output_asm_insn ("sle %5", loperands);
1783 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1784 CODE_LABEL_NUMBER (loperands[6]));
1785 break;
1786
1787 case LEU:
1788 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1789 CODE_LABEL_NUMBER (loperands[4]));
1790 output_asm_insn ("sls %5", loperands);
1791 break;
1792
1793 default:
1794 gcc_unreachable ();
1795 }
1796 return "";
1797 }
1798
1799 const char *
1800 output_btst (rtx *operands, rtx countop, rtx dataop, rtx_insn *insn, int signpos)
1801 {
1802 operands[0] = countop;
1803 operands[1] = dataop;
1804
1805 if (GET_CODE (countop) == CONST_INT)
1806 {
1807 register int count = INTVAL (countop);
1808 /* If COUNT is bigger than size of storage unit in use,
1809 advance to the containing unit of same size. */
1810 if (count > signpos)
1811 {
1812 int offset = (count & ~signpos) / 8;
1813 count = count & signpos;
1814 operands[1] = dataop = adjust_address (dataop, QImode, offset);
1815 }
1816 if (count == signpos)
1817 cc_status.flags = CC_NOT_POSITIVE | CC_Z_IN_NOT_N;
1818 else
1819 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N;
1820
1821 /* These three statements used to use next_insns_test_no...
1822 but it appears that this should do the same job. */
1823 if (count == 31
1824 && next_insn_tests_no_inequality (insn))
1825 return "tst%.l %1";
1826 if (count == 15
1827 && next_insn_tests_no_inequality (insn))
1828 return "tst%.w %1";
1829 if (count == 7
1830 && next_insn_tests_no_inequality (insn))
1831 return "tst%.b %1";
1832 /* Try to use `movew to ccr' followed by the appropriate branch insn.
1833 On some m68k variants unfortunately that's slower than btst.
1834 On 68000 and higher, that should also work for all HImode operands. */
1835 if (TUNE_CPU32 || TARGET_COLDFIRE || optimize_size)
1836 {
1837 if (count == 3 && DATA_REG_P (operands[1])
1838 && next_insn_tests_no_inequality (insn))
1839 {
1840 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N | CC_NO_OVERFLOW;
1841 return "move%.w %1,%%ccr";
1842 }
1843 if (count == 2 && DATA_REG_P (operands[1])
1844 && next_insn_tests_no_inequality (insn))
1845 {
1846 cc_status.flags = CC_NOT_NEGATIVE | CC_INVERTED | CC_NO_OVERFLOW;
1847 return "move%.w %1,%%ccr";
1848 }
1849 /* count == 1 followed by bvc/bvs and
1850 count == 0 followed by bcc/bcs are also possible, but need
1851 m68k-specific CC_Z_IN_NOT_V and CC_Z_IN_NOT_C flags. */
1852 }
1853
1854 cc_status.flags = CC_NOT_NEGATIVE;
1855 }
1856 return "btst %0,%1";
1857 }
1858 \f
1859 /* Return true if X is a legitimate base register. STRICT_P says
1860 whether we need strict checking. */
1861
1862 bool
1863 m68k_legitimate_base_reg_p (rtx x, bool strict_p)
1864 {
1865 /* Allow SUBREG everywhere we allow REG. This results in better code. */
1866 if (!strict_p && GET_CODE (x) == SUBREG)
1867 x = SUBREG_REG (x);
1868
1869 return (REG_P (x)
1870 && (strict_p
1871 ? REGNO_OK_FOR_BASE_P (REGNO (x))
1872 : REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x))));
1873 }
1874
1875 /* Return true if X is a legitimate index register. STRICT_P says
1876 whether we need strict checking. */
1877
1878 bool
1879 m68k_legitimate_index_reg_p (rtx x, bool strict_p)
1880 {
1881 if (!strict_p && GET_CODE (x) == SUBREG)
1882 x = SUBREG_REG (x);
1883
1884 return (REG_P (x)
1885 && (strict_p
1886 ? REGNO_OK_FOR_INDEX_P (REGNO (x))
1887 : REGNO_OK_FOR_INDEX_NONSTRICT_P (REGNO (x))));
1888 }
1889
1890 /* Return true if X is a legitimate index expression for a (d8,An,Xn) or
1891 (bd,An,Xn) addressing mode. Fill in the INDEX and SCALE fields of
1892 ADDRESS if so. STRICT_P says whether we need strict checking. */
1893
1894 static bool
1895 m68k_decompose_index (rtx x, bool strict_p, struct m68k_address *address)
1896 {
1897 int scale;
1898
1899 /* Check for a scale factor. */
1900 scale = 1;
1901 if ((TARGET_68020 || TARGET_COLDFIRE)
1902 && GET_CODE (x) == MULT
1903 && GET_CODE (XEXP (x, 1)) == CONST_INT
1904 && (INTVAL (XEXP (x, 1)) == 2
1905 || INTVAL (XEXP (x, 1)) == 4
1906 || (INTVAL (XEXP (x, 1)) == 8
1907 && (TARGET_COLDFIRE_FPU || !TARGET_COLDFIRE))))
1908 {
1909 scale = INTVAL (XEXP (x, 1));
1910 x = XEXP (x, 0);
1911 }
1912
1913 /* Check for a word extension. */
1914 if (!TARGET_COLDFIRE
1915 && GET_CODE (x) == SIGN_EXTEND
1916 && GET_MODE (XEXP (x, 0)) == HImode)
1917 x = XEXP (x, 0);
1918
1919 if (m68k_legitimate_index_reg_p (x, strict_p))
1920 {
1921 address->scale = scale;
1922 address->index = x;
1923 return true;
1924 }
1925
1926 return false;
1927 }
1928
1929 /* Return true if X is an illegitimate symbolic constant. */
1930
1931 bool
1932 m68k_illegitimate_symbolic_constant_p (rtx x)
1933 {
1934 rtx base, offset;
1935
1936 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P)
1937 {
1938 split_const (x, &base, &offset);
1939 if (GET_CODE (base) == SYMBOL_REF
1940 && !offset_within_block_p (base, INTVAL (offset)))
1941 return true;
1942 }
1943 return m68k_tls_reference_p (x, false);
1944 }
1945
1946 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1947
1948 static bool
1949 m68k_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1950 {
1951 return m68k_illegitimate_symbolic_constant_p (x);
1952 }
1953
1954 /* Return true if X is a legitimate constant address that can reach
1955 bytes in the range [X, X + REACH). STRICT_P says whether we need
1956 strict checking. */
1957
1958 static bool
1959 m68k_legitimate_constant_address_p (rtx x, unsigned int reach, bool strict_p)
1960 {
1961 rtx base, offset;
1962
1963 if (!CONSTANT_ADDRESS_P (x))
1964 return false;
1965
1966 if (flag_pic
1967 && !(strict_p && TARGET_PCREL)
1968 && symbolic_operand (x, VOIDmode))
1969 return false;
1970
1971 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P && reach > 1)
1972 {
1973 split_const (x, &base, &offset);
1974 if (GET_CODE (base) == SYMBOL_REF
1975 && !offset_within_block_p (base, INTVAL (offset) + reach - 1))
1976 return false;
1977 }
1978
1979 return !m68k_tls_reference_p (x, false);
1980 }
1981
1982 /* Return true if X is a LABEL_REF for a jump table. Assume that unplaced
1983 labels will become jump tables. */
1984
1985 static bool
1986 m68k_jump_table_ref_p (rtx x)
1987 {
1988 if (GET_CODE (x) != LABEL_REF)
1989 return false;
1990
1991 rtx_insn *insn = as_a <rtx_insn *> (XEXP (x, 0));
1992 if (!NEXT_INSN (insn) && !PREV_INSN (insn))
1993 return true;
1994
1995 insn = next_nonnote_insn (insn);
1996 return insn && JUMP_TABLE_DATA_P (insn);
1997 }
1998
1999 /* Return true if X is a legitimate address for values of mode MODE.
2000 STRICT_P says whether strict checking is needed. If the address
2001 is valid, describe its components in *ADDRESS. */
2002
2003 static bool
2004 m68k_decompose_address (machine_mode mode, rtx x,
2005 bool strict_p, struct m68k_address *address)
2006 {
2007 unsigned int reach;
2008
2009 memset (address, 0, sizeof (*address));
2010
2011 if (mode == BLKmode)
2012 reach = 1;
2013 else
2014 reach = GET_MODE_SIZE (mode);
2015
2016 /* Check for (An) (mode 2). */
2017 if (m68k_legitimate_base_reg_p (x, strict_p))
2018 {
2019 address->base = x;
2020 return true;
2021 }
2022
2023 /* Check for -(An) and (An)+ (modes 3 and 4). */
2024 if ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_INC)
2025 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
2026 {
2027 address->code = GET_CODE (x);
2028 address->base = XEXP (x, 0);
2029 return true;
2030 }
2031
2032 /* Check for (d16,An) (mode 5). */
2033 if (GET_CODE (x) == PLUS
2034 && GET_CODE (XEXP (x, 1)) == CONST_INT
2035 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x8000, 0x8000 - reach)
2036 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
2037 {
2038 address->base = XEXP (x, 0);
2039 address->offset = XEXP (x, 1);
2040 return true;
2041 }
2042
2043 /* Check for GOT loads. These are (bd,An,Xn) addresses if
2044 TARGET_68020 && flag_pic == 2, otherwise they are (d16,An)
2045 addresses. */
2046 if (GET_CODE (x) == PLUS
2047 && XEXP (x, 0) == pic_offset_table_rtx)
2048 {
2049 /* As we are processing a PLUS, do not unwrap RELOC32 symbols --
2050 they are invalid in this context. */
2051 if (m68k_unwrap_symbol (XEXP (x, 1), false) != XEXP (x, 1))
2052 {
2053 address->base = XEXP (x, 0);
2054 address->offset = XEXP (x, 1);
2055 return true;
2056 }
2057 }
2058
2059 /* The ColdFire FPU only accepts addressing modes 2-5. */
2060 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
2061 return false;
2062
2063 /* Check for (xxx).w and (xxx).l. Also, in the TARGET_PCREL case,
2064 check for (d16,PC) or (bd,PC,Xn) with a suppressed index register.
2065 All these modes are variations of mode 7. */
2066 if (m68k_legitimate_constant_address_p (x, reach, strict_p))
2067 {
2068 address->offset = x;
2069 return true;
2070 }
2071
2072 /* Check for (d8,PC,Xn), a mode 7 form. This case is needed for
2073 tablejumps.
2074
2075 ??? do_tablejump creates these addresses before placing the target
2076 label, so we have to assume that unplaced labels are jump table
2077 references. It seems unlikely that we would ever generate indexed
2078 accesses to unplaced labels in other cases. */
2079 if (GET_CODE (x) == PLUS
2080 && m68k_jump_table_ref_p (XEXP (x, 1))
2081 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2082 {
2083 address->offset = XEXP (x, 1);
2084 return true;
2085 }
2086
2087 /* Everything hereafter deals with (d8,An,Xn.SIZE*SCALE) or
2088 (bd,An,Xn.SIZE*SCALE) addresses. */
2089
2090 if (TARGET_68020)
2091 {
2092 /* Check for a nonzero base displacement. */
2093 if (GET_CODE (x) == PLUS
2094 && m68k_legitimate_constant_address_p (XEXP (x, 1), reach, strict_p))
2095 {
2096 address->offset = XEXP (x, 1);
2097 x = XEXP (x, 0);
2098 }
2099
2100 /* Check for a suppressed index register. */
2101 if (m68k_legitimate_base_reg_p (x, strict_p))
2102 {
2103 address->base = x;
2104 return true;
2105 }
2106
2107 /* Check for a suppressed base register. Do not allow this case
2108 for non-symbolic offsets as it effectively gives gcc freedom
2109 to treat data registers as base registers, which can generate
2110 worse code. */
2111 if (address->offset
2112 && symbolic_operand (address->offset, VOIDmode)
2113 && m68k_decompose_index (x, strict_p, address))
2114 return true;
2115 }
2116 else
2117 {
2118 /* Check for a nonzero base displacement. */
2119 if (GET_CODE (x) == PLUS
2120 && GET_CODE (XEXP (x, 1)) == CONST_INT
2121 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x80, 0x80 - reach))
2122 {
2123 address->offset = XEXP (x, 1);
2124 x = XEXP (x, 0);
2125 }
2126 }
2127
2128 /* We now expect the sum of a base and an index. */
2129 if (GET_CODE (x) == PLUS)
2130 {
2131 if (m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p)
2132 && m68k_decompose_index (XEXP (x, 1), strict_p, address))
2133 {
2134 address->base = XEXP (x, 0);
2135 return true;
2136 }
2137
2138 if (m68k_legitimate_base_reg_p (XEXP (x, 1), strict_p)
2139 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2140 {
2141 address->base = XEXP (x, 1);
2142 return true;
2143 }
2144 }
2145 return false;
2146 }
2147
2148 /* Return true if X is a legitimate address for values of mode MODE.
2149 STRICT_P says whether strict checking is needed. */
2150
2151 bool
2152 m68k_legitimate_address_p (machine_mode mode, rtx x, bool strict_p)
2153 {
2154 struct m68k_address address;
2155
2156 return m68k_decompose_address (mode, x, strict_p, &address);
2157 }
2158
2159 /* Return true if X is a memory, describing its address in ADDRESS if so.
2160 Apply strict checking if called during or after reload. */
2161
2162 static bool
2163 m68k_legitimate_mem_p (rtx x, struct m68k_address *address)
2164 {
2165 return (MEM_P (x)
2166 && m68k_decompose_address (GET_MODE (x), XEXP (x, 0),
2167 reload_in_progress || reload_completed,
2168 address));
2169 }
2170
2171 /* Implement TARGET_LEGITIMATE_CONSTANT_P. */
2172
2173 bool
2174 m68k_legitimate_constant_p (machine_mode mode, rtx x)
2175 {
2176 return mode != XFmode && !m68k_illegitimate_symbolic_constant_p (x);
2177 }
2178
2179 /* Return true if X matches the 'Q' constraint. It must be a memory
2180 with a base address and no constant offset or index. */
2181
2182 bool
2183 m68k_matches_q_p (rtx x)
2184 {
2185 struct m68k_address address;
2186
2187 return (m68k_legitimate_mem_p (x, &address)
2188 && address.code == UNKNOWN
2189 && address.base
2190 && !address.offset
2191 && !address.index);
2192 }
2193
2194 /* Return true if X matches the 'U' constraint. It must be a base address
2195 with a constant offset and no index. */
2196
2197 bool
2198 m68k_matches_u_p (rtx x)
2199 {
2200 struct m68k_address address;
2201
2202 return (m68k_legitimate_mem_p (x, &address)
2203 && address.code == UNKNOWN
2204 && address.base
2205 && address.offset
2206 && !address.index);
2207 }
2208
2209 /* Return GOT pointer. */
2210
2211 static rtx
2212 m68k_get_gp (void)
2213 {
2214 if (pic_offset_table_rtx == NULL_RTX)
2215 pic_offset_table_rtx = gen_rtx_REG (Pmode, PIC_REG);
2216
2217 crtl->uses_pic_offset_table = 1;
2218
2219 return pic_offset_table_rtx;
2220 }
2221
2222 /* M68K relocations, used to distinguish GOT and TLS relocations in UNSPEC
2223 wrappers. */
2224 enum m68k_reloc { RELOC_GOT, RELOC_TLSGD, RELOC_TLSLDM, RELOC_TLSLDO,
2225 RELOC_TLSIE, RELOC_TLSLE };
2226
2227 #define TLS_RELOC_P(RELOC) ((RELOC) != RELOC_GOT)
2228
2229 /* Wrap symbol X into unspec representing relocation RELOC.
2230 BASE_REG - register that should be added to the result.
2231 TEMP_REG - if non-null, temporary register. */
2232
2233 static rtx
2234 m68k_wrap_symbol (rtx x, enum m68k_reloc reloc, rtx base_reg, rtx temp_reg)
2235 {
2236 bool use_x_p;
2237
2238 use_x_p = (base_reg == pic_offset_table_rtx) ? TARGET_XGOT : TARGET_XTLS;
2239
2240 if (TARGET_COLDFIRE && use_x_p)
2241 /* When compiling with -mx{got, tls} switch the code will look like this:
2242
2243 move.l <X>@<RELOC>,<TEMP_REG>
2244 add.l <BASE_REG>,<TEMP_REG> */
2245 {
2246 /* Wrap X in UNSPEC_??? to tip m68k_output_addr_const_extra
2247 to put @RELOC after reference. */
2248 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2249 UNSPEC_RELOC32);
2250 x = gen_rtx_CONST (Pmode, x);
2251
2252 if (temp_reg == NULL)
2253 {
2254 gcc_assert (can_create_pseudo_p ());
2255 temp_reg = gen_reg_rtx (Pmode);
2256 }
2257
2258 emit_move_insn (temp_reg, x);
2259 emit_insn (gen_addsi3 (temp_reg, temp_reg, base_reg));
2260 x = temp_reg;
2261 }
2262 else
2263 {
2264 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2265 UNSPEC_RELOC16);
2266 x = gen_rtx_CONST (Pmode, x);
2267
2268 x = gen_rtx_PLUS (Pmode, base_reg, x);
2269 }
2270
2271 return x;
2272 }
2273
2274 /* Helper for m68k_unwrap_symbol.
2275 Also, if unwrapping was successful (that is if (ORIG != <return value>)),
2276 sets *RELOC_PTR to relocation type for the symbol. */
2277
2278 static rtx
2279 m68k_unwrap_symbol_1 (rtx orig, bool unwrap_reloc32_p,
2280 enum m68k_reloc *reloc_ptr)
2281 {
2282 if (GET_CODE (orig) == CONST)
2283 {
2284 rtx x;
2285 enum m68k_reloc dummy;
2286
2287 x = XEXP (orig, 0);
2288
2289 if (reloc_ptr == NULL)
2290 reloc_ptr = &dummy;
2291
2292 /* Handle an addend. */
2293 if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS)
2294 && CONST_INT_P (XEXP (x, 1)))
2295 x = XEXP (x, 0);
2296
2297 if (GET_CODE (x) == UNSPEC)
2298 {
2299 switch (XINT (x, 1))
2300 {
2301 case UNSPEC_RELOC16:
2302 orig = XVECEXP (x, 0, 0);
2303 *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2304 break;
2305
2306 case UNSPEC_RELOC32:
2307 if (unwrap_reloc32_p)
2308 {
2309 orig = XVECEXP (x, 0, 0);
2310 *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2311 }
2312 break;
2313
2314 default:
2315 break;
2316 }
2317 }
2318 }
2319
2320 return orig;
2321 }
2322
2323 /* Unwrap symbol from UNSPEC_RELOC16 and, if unwrap_reloc32_p,
2324 UNSPEC_RELOC32 wrappers. */
2325
2326 rtx
2327 m68k_unwrap_symbol (rtx orig, bool unwrap_reloc32_p)
2328 {
2329 return m68k_unwrap_symbol_1 (orig, unwrap_reloc32_p, NULL);
2330 }
2331
2332 /* Adjust decorated address operand before outputing assembler for it. */
2333
2334 static void
2335 m68k_adjust_decorated_operand (rtx op)
2336 {
2337 int i;
2338
2339 /* Combine and, possibly, other optimizations may do good job
2340 converting
2341 (const (unspec [(symbol)]))
2342 into
2343 (const (plus (unspec [(symbol)])
2344 (const_int N))).
2345 The problem with this is emitting @TLS or @GOT decorations.
2346 The decoration is emitted when processing (unspec), so the
2347 result would be "#symbol@TLSLE+N" instead of "#symbol+N@TLSLE".
2348
2349 It seems that the easiest solution to this is to convert such
2350 operands to
2351 (const (unspec [(plus (symbol)
2352 (const_int N))])).
2353 Note, that the top level of operand remains intact, so we don't have
2354 to patch up anything outside of the operand. */
2355
2356 subrtx_var_iterator::array_type array;
2357 FOR_EACH_SUBRTX_VAR (iter, array, op, ALL)
2358 {
2359 rtx x = *iter;
2360 if (m68k_unwrap_symbol (x, true) != x)
2361 {
2362 rtx plus;
2363
2364 gcc_assert (GET_CODE (x) == CONST);
2365 plus = XEXP (x, 0);
2366
2367 if (GET_CODE (plus) == PLUS || GET_CODE (plus) == MINUS)
2368 {
2369 rtx unspec;
2370 rtx addend;
2371
2372 unspec = XEXP (plus, 0);
2373 gcc_assert (GET_CODE (unspec) == UNSPEC);
2374 addend = XEXP (plus, 1);
2375 gcc_assert (CONST_INT_P (addend));
2376
2377 /* We now have all the pieces, rearrange them. */
2378
2379 /* Move symbol to plus. */
2380 XEXP (plus, 0) = XVECEXP (unspec, 0, 0);
2381
2382 /* Move plus inside unspec. */
2383 XVECEXP (unspec, 0, 0) = plus;
2384
2385 /* Move unspec to top level of const. */
2386 XEXP (x, 0) = unspec;
2387 }
2388 iter.skip_subrtxes ();
2389 }
2390 }
2391 }
2392
2393 /* Move X to a register and add REG_EQUAL note pointing to ORIG.
2394 If REG is non-null, use it; generate new pseudo otherwise. */
2395
2396 static rtx
2397 m68k_move_to_reg (rtx x, rtx orig, rtx reg)
2398 {
2399 rtx_insn *insn;
2400
2401 if (reg == NULL_RTX)
2402 {
2403 gcc_assert (can_create_pseudo_p ());
2404 reg = gen_reg_rtx (Pmode);
2405 }
2406
2407 insn = emit_move_insn (reg, x);
2408 /* Put a REG_EQUAL note on this insn, so that it can be optimized
2409 by loop. */
2410 set_unique_reg_note (insn, REG_EQUAL, orig);
2411
2412 return reg;
2413 }
2414
2415 /* Does the same as m68k_wrap_symbol, but returns a memory reference to
2416 GOT slot. */
2417
2418 static rtx
2419 m68k_wrap_symbol_into_got_ref (rtx x, enum m68k_reloc reloc, rtx temp_reg)
2420 {
2421 x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), temp_reg);
2422
2423 x = gen_rtx_MEM (Pmode, x);
2424 MEM_READONLY_P (x) = 1;
2425
2426 return x;
2427 }
2428
2429 /* Legitimize PIC addresses. If the address is already
2430 position-independent, we return ORIG. Newly generated
2431 position-independent addresses go to REG. If we need more
2432 than one register, we lose.
2433
2434 An address is legitimized by making an indirect reference
2435 through the Global Offset Table with the name of the symbol
2436 used as an offset.
2437
2438 The assembler and linker are responsible for placing the
2439 address of the symbol in the GOT. The function prologue
2440 is responsible for initializing a5 to the starting address
2441 of the GOT.
2442
2443 The assembler is also responsible for translating a symbol name
2444 into a constant displacement from the start of the GOT.
2445
2446 A quick example may make things a little clearer:
2447
2448 When not generating PIC code to store the value 12345 into _foo
2449 we would generate the following code:
2450
2451 movel #12345, _foo
2452
2453 When generating PIC two transformations are made. First, the compiler
2454 loads the address of foo into a register. So the first transformation makes:
2455
2456 lea _foo, a0
2457 movel #12345, a0@
2458
2459 The code in movsi will intercept the lea instruction and call this
2460 routine which will transform the instructions into:
2461
2462 movel a5@(_foo:w), a0
2463 movel #12345, a0@
2464
2465
2466 That (in a nutshell) is how *all* symbol and label references are
2467 handled. */
2468
2469 rtx
2470 legitimize_pic_address (rtx orig, machine_mode mode ATTRIBUTE_UNUSED,
2471 rtx reg)
2472 {
2473 rtx pic_ref = orig;
2474
2475 /* First handle a simple SYMBOL_REF or LABEL_REF */
2476 if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
2477 {
2478 gcc_assert (reg);
2479
2480 pic_ref = m68k_wrap_symbol_into_got_ref (orig, RELOC_GOT, reg);
2481 pic_ref = m68k_move_to_reg (pic_ref, orig, reg);
2482 }
2483 else if (GET_CODE (orig) == CONST)
2484 {
2485 rtx base;
2486
2487 /* Make sure this has not already been legitimized. */
2488 if (m68k_unwrap_symbol (orig, true) != orig)
2489 return orig;
2490
2491 gcc_assert (reg);
2492
2493 /* legitimize both operands of the PLUS */
2494 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
2495
2496 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2497 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2498 base == reg ? 0 : reg);
2499
2500 if (GET_CODE (orig) == CONST_INT)
2501 pic_ref = plus_constant (Pmode, base, INTVAL (orig));
2502 else
2503 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
2504 }
2505
2506 return pic_ref;
2507 }
2508
2509 /* The __tls_get_addr symbol. */
2510 static GTY(()) rtx m68k_tls_get_addr;
2511
2512 /* Return SYMBOL_REF for __tls_get_addr. */
2513
2514 static rtx
2515 m68k_get_tls_get_addr (void)
2516 {
2517 if (m68k_tls_get_addr == NULL_RTX)
2518 m68k_tls_get_addr = init_one_libfunc ("__tls_get_addr");
2519
2520 return m68k_tls_get_addr;
2521 }
2522
2523 /* Return libcall result in A0 instead of usual D0. */
2524 static bool m68k_libcall_value_in_a0_p = false;
2525
2526 /* Emit instruction sequence that calls __tls_get_addr. X is
2527 the TLS symbol we are referencing and RELOC is the symbol type to use
2528 (either TLSGD or TLSLDM). EQV is the REG_EQUAL note for the sequence
2529 emitted. A pseudo register with result of __tls_get_addr call is
2530 returned. */
2531
2532 static rtx
2533 m68k_call_tls_get_addr (rtx x, rtx eqv, enum m68k_reloc reloc)
2534 {
2535 rtx a0;
2536 rtx_insn *insns;
2537 rtx dest;
2538
2539 /* Emit the call sequence. */
2540 start_sequence ();
2541
2542 /* FIXME: Unfortunately, emit_library_call_value does not
2543 consider (plus (%a5) (const (unspec))) to be a good enough
2544 operand for push, so it forces it into a register. The bad
2545 thing about this is that combiner, due to copy propagation and other
2546 optimizations, sometimes can not later fix this. As a consequence,
2547 additional register may be allocated resulting in a spill.
2548 For reference, see args processing loops in
2549 calls.c:emit_library_call_value_1.
2550 For testcase, see gcc.target/m68k/tls-{gd, ld}.c */
2551 x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), NULL_RTX);
2552
2553 /* __tls_get_addr() is not a libcall, but emitting a libcall_value
2554 is the simpliest way of generating a call. The difference between
2555 __tls_get_addr() and libcall is that the result is returned in D0
2556 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2557 which temporarily switches returning the result to A0. */
2558
2559 m68k_libcall_value_in_a0_p = true;
2560 a0 = emit_library_call_value (m68k_get_tls_get_addr (), NULL_RTX, LCT_PURE,
2561 Pmode, x, Pmode);
2562 m68k_libcall_value_in_a0_p = false;
2563
2564 insns = get_insns ();
2565 end_sequence ();
2566
2567 gcc_assert (can_create_pseudo_p ());
2568 dest = gen_reg_rtx (Pmode);
2569 emit_libcall_block (insns, dest, a0, eqv);
2570
2571 return dest;
2572 }
2573
2574 /* The __tls_get_addr symbol. */
2575 static GTY(()) rtx m68k_read_tp;
2576
2577 /* Return SYMBOL_REF for __m68k_read_tp. */
2578
2579 static rtx
2580 m68k_get_m68k_read_tp (void)
2581 {
2582 if (m68k_read_tp == NULL_RTX)
2583 m68k_read_tp = init_one_libfunc ("__m68k_read_tp");
2584
2585 return m68k_read_tp;
2586 }
2587
2588 /* Emit instruction sequence that calls __m68k_read_tp.
2589 A pseudo register with result of __m68k_read_tp call is returned. */
2590
2591 static rtx
2592 m68k_call_m68k_read_tp (void)
2593 {
2594 rtx a0;
2595 rtx eqv;
2596 rtx_insn *insns;
2597 rtx dest;
2598
2599 start_sequence ();
2600
2601 /* __m68k_read_tp() is not a libcall, but emitting a libcall_value
2602 is the simpliest way of generating a call. The difference between
2603 __m68k_read_tp() and libcall is that the result is returned in D0
2604 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2605 which temporarily switches returning the result to A0. */
2606
2607 /* Emit the call sequence. */
2608 m68k_libcall_value_in_a0_p = true;
2609 a0 = emit_library_call_value (m68k_get_m68k_read_tp (), NULL_RTX, LCT_PURE,
2610 Pmode);
2611 m68k_libcall_value_in_a0_p = false;
2612 insns = get_insns ();
2613 end_sequence ();
2614
2615 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2616 share the m68k_read_tp result with other IE/LE model accesses. */
2617 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx), UNSPEC_RELOC32);
2618
2619 gcc_assert (can_create_pseudo_p ());
2620 dest = gen_reg_rtx (Pmode);
2621 emit_libcall_block (insns, dest, a0, eqv);
2622
2623 return dest;
2624 }
2625
2626 /* Return a legitimized address for accessing TLS SYMBOL_REF X.
2627 For explanations on instructions sequences see TLS/NPTL ABI for m68k and
2628 ColdFire. */
2629
2630 rtx
2631 m68k_legitimize_tls_address (rtx orig)
2632 {
2633 switch (SYMBOL_REF_TLS_MODEL (orig))
2634 {
2635 case TLS_MODEL_GLOBAL_DYNAMIC:
2636 orig = m68k_call_tls_get_addr (orig, orig, RELOC_TLSGD);
2637 break;
2638
2639 case TLS_MODEL_LOCAL_DYNAMIC:
2640 {
2641 rtx eqv;
2642 rtx a0;
2643 rtx x;
2644
2645 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2646 share the LDM result with other LD model accesses. */
2647 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2648 UNSPEC_RELOC32);
2649
2650 a0 = m68k_call_tls_get_addr (orig, eqv, RELOC_TLSLDM);
2651
2652 x = m68k_wrap_symbol (orig, RELOC_TLSLDO, a0, NULL_RTX);
2653
2654 if (can_create_pseudo_p ())
2655 x = m68k_move_to_reg (x, orig, NULL_RTX);
2656
2657 orig = x;
2658 break;
2659 }
2660
2661 case TLS_MODEL_INITIAL_EXEC:
2662 {
2663 rtx a0;
2664 rtx x;
2665
2666 a0 = m68k_call_m68k_read_tp ();
2667
2668 x = m68k_wrap_symbol_into_got_ref (orig, RELOC_TLSIE, NULL_RTX);
2669 x = gen_rtx_PLUS (Pmode, x, a0);
2670
2671 if (can_create_pseudo_p ())
2672 x = m68k_move_to_reg (x, orig, NULL_RTX);
2673
2674 orig = x;
2675 break;
2676 }
2677
2678 case TLS_MODEL_LOCAL_EXEC:
2679 {
2680 rtx a0;
2681 rtx x;
2682
2683 a0 = m68k_call_m68k_read_tp ();
2684
2685 x = m68k_wrap_symbol (orig, RELOC_TLSLE, a0, NULL_RTX);
2686
2687 if (can_create_pseudo_p ())
2688 x = m68k_move_to_reg (x, orig, NULL_RTX);
2689
2690 orig = x;
2691 break;
2692 }
2693
2694 default:
2695 gcc_unreachable ();
2696 }
2697
2698 return orig;
2699 }
2700
2701 /* Return true if X is a TLS symbol. */
2702
2703 static bool
2704 m68k_tls_symbol_p (rtx x)
2705 {
2706 if (!TARGET_HAVE_TLS)
2707 return false;
2708
2709 if (GET_CODE (x) != SYMBOL_REF)
2710 return false;
2711
2712 return SYMBOL_REF_TLS_MODEL (x) != 0;
2713 }
2714
2715 /* If !LEGITIMATE_P, return true if X is a TLS symbol reference,
2716 though illegitimate one.
2717 If LEGITIMATE_P, return true if X is a legitimate TLS symbol reference. */
2718
2719 bool
2720 m68k_tls_reference_p (rtx x, bool legitimate_p)
2721 {
2722 if (!TARGET_HAVE_TLS)
2723 return false;
2724
2725 if (!legitimate_p)
2726 {
2727 subrtx_var_iterator::array_type array;
2728 FOR_EACH_SUBRTX_VAR (iter, array, x, ALL)
2729 {
2730 rtx x = *iter;
2731
2732 /* Note: this is not the same as m68k_tls_symbol_p. */
2733 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0)
2734 return true;
2735
2736 /* Don't recurse into legitimate TLS references. */
2737 if (m68k_tls_reference_p (x, true))
2738 iter.skip_subrtxes ();
2739 }
2740 return false;
2741 }
2742 else
2743 {
2744 enum m68k_reloc reloc = RELOC_GOT;
2745
2746 return (m68k_unwrap_symbol_1 (x, true, &reloc) != x
2747 && TLS_RELOC_P (reloc));
2748 }
2749 }
2750
2751 \f
2752
2753 #define USE_MOVQ(i) ((unsigned) ((i) + 128) <= 255)
2754
2755 /* Return the type of move that should be used for integer I. */
2756
2757 M68K_CONST_METHOD
2758 m68k_const_method (HOST_WIDE_INT i)
2759 {
2760 unsigned u;
2761
2762 if (USE_MOVQ (i))
2763 return MOVQ;
2764
2765 /* The ColdFire doesn't have byte or word operations. */
2766 /* FIXME: This may not be useful for the m68060 either. */
2767 if (!TARGET_COLDFIRE)
2768 {
2769 /* if -256 < N < 256 but N is not in range for a moveq
2770 N^ff will be, so use moveq #N^ff, dreg; not.b dreg. */
2771 if (USE_MOVQ (i ^ 0xff))
2772 return NOTB;
2773 /* Likewise, try with not.w */
2774 if (USE_MOVQ (i ^ 0xffff))
2775 return NOTW;
2776 /* This is the only value where neg.w is useful */
2777 if (i == -65408)
2778 return NEGW;
2779 }
2780
2781 /* Try also with swap. */
2782 u = i;
2783 if (USE_MOVQ ((u >> 16) | (u << 16)))
2784 return SWAP;
2785
2786 if (TARGET_ISAB)
2787 {
2788 /* Try using MVZ/MVS with an immediate value to load constants. */
2789 if (i >= 0 && i <= 65535)
2790 return MVZ;
2791 if (i >= -32768 && i <= 32767)
2792 return MVS;
2793 }
2794
2795 /* Otherwise, use move.l */
2796 return MOVL;
2797 }
2798
2799 /* Return the cost of moving constant I into a data register. */
2800
2801 static int
2802 const_int_cost (HOST_WIDE_INT i)
2803 {
2804 switch (m68k_const_method (i))
2805 {
2806 case MOVQ:
2807 /* Constants between -128 and 127 are cheap due to moveq. */
2808 return 0;
2809 case MVZ:
2810 case MVS:
2811 case NOTB:
2812 case NOTW:
2813 case NEGW:
2814 case SWAP:
2815 /* Constants easily generated by moveq + not.b/not.w/neg.w/swap. */
2816 return 1;
2817 case MOVL:
2818 return 2;
2819 default:
2820 gcc_unreachable ();
2821 }
2822 }
2823
2824 static bool
2825 m68k_rtx_costs (rtx x, machine_mode mode, int outer_code,
2826 int opno ATTRIBUTE_UNUSED,
2827 int *total, bool speed ATTRIBUTE_UNUSED)
2828 {
2829 int code = GET_CODE (x);
2830
2831 switch (code)
2832 {
2833 case CONST_INT:
2834 /* Constant zero is super cheap due to clr instruction. */
2835 if (x == const0_rtx)
2836 *total = 0;
2837 else
2838 *total = const_int_cost (INTVAL (x));
2839 return true;
2840
2841 case CONST:
2842 case LABEL_REF:
2843 case SYMBOL_REF:
2844 *total = 3;
2845 return true;
2846
2847 case CONST_DOUBLE:
2848 /* Make 0.0 cheaper than other floating constants to
2849 encourage creating tstsf and tstdf insns. */
2850 if (outer_code == COMPARE
2851 && (x == CONST0_RTX (SFmode) || x == CONST0_RTX (DFmode)))
2852 *total = 4;
2853 else
2854 *total = 5;
2855 return true;
2856
2857 /* These are vaguely right for a 68020. */
2858 /* The costs for long multiply have been adjusted to work properly
2859 in synth_mult on the 68020, relative to an average of the time
2860 for add and the time for shift, taking away a little more because
2861 sometimes move insns are needed. */
2862 /* div?.w is relatively cheaper on 68000 counted in COSTS_N_INSNS
2863 terms. */
2864 #define MULL_COST \
2865 (TUNE_68060 ? 2 \
2866 : TUNE_68040 ? 5 \
2867 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2868 : (TUNE_CFV2 && TUNE_MAC) ? 4 \
2869 : TUNE_CFV2 ? 8 \
2870 : TARGET_COLDFIRE ? 3 : 13)
2871
2872 #define MULW_COST \
2873 (TUNE_68060 ? 2 \
2874 : TUNE_68040 ? 3 \
2875 : TUNE_68000_10 ? 5 \
2876 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2877 : (TUNE_CFV2 && TUNE_MAC) ? 2 \
2878 : TUNE_CFV2 ? 8 \
2879 : TARGET_COLDFIRE ? 2 : 8)
2880
2881 #define DIVW_COST \
2882 (TARGET_CF_HWDIV ? 11 \
2883 : TUNE_68000_10 || TARGET_COLDFIRE ? 12 : 27)
2884
2885 case PLUS:
2886 /* An lea costs about three times as much as a simple add. */
2887 if (mode == SImode
2888 && GET_CODE (XEXP (x, 1)) == REG
2889 && GET_CODE (XEXP (x, 0)) == MULT
2890 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
2891 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2892 && (INTVAL (XEXP (XEXP (x, 0), 1)) == 2
2893 || INTVAL (XEXP (XEXP (x, 0), 1)) == 4
2894 || INTVAL (XEXP (XEXP (x, 0), 1)) == 8))
2895 {
2896 /* lea an@(dx:l:i),am */
2897 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 2 : 3);
2898 return true;
2899 }
2900 return false;
2901
2902 case ASHIFT:
2903 case ASHIFTRT:
2904 case LSHIFTRT:
2905 if (TUNE_68060)
2906 {
2907 *total = COSTS_N_INSNS(1);
2908 return true;
2909 }
2910 if (TUNE_68000_10)
2911 {
2912 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
2913 {
2914 if (INTVAL (XEXP (x, 1)) < 16)
2915 *total = COSTS_N_INSNS (2) + INTVAL (XEXP (x, 1)) / 2;
2916 else
2917 /* We're using clrw + swap for these cases. */
2918 *total = COSTS_N_INSNS (4) + (INTVAL (XEXP (x, 1)) - 16) / 2;
2919 }
2920 else
2921 *total = COSTS_N_INSNS (10); /* Worst case. */
2922 return true;
2923 }
2924 /* A shift by a big integer takes an extra instruction. */
2925 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2926 && (INTVAL (XEXP (x, 1)) == 16))
2927 {
2928 *total = COSTS_N_INSNS (2); /* clrw;swap */
2929 return true;
2930 }
2931 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2932 && !(INTVAL (XEXP (x, 1)) > 0
2933 && INTVAL (XEXP (x, 1)) <= 8))
2934 {
2935 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 1 : 3); /* lsr #i,dn */
2936 return true;
2937 }
2938 return false;
2939
2940 case MULT:
2941 if ((GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
2942 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
2943 && mode == SImode)
2944 *total = COSTS_N_INSNS (MULW_COST);
2945 else if (mode == QImode || mode == HImode)
2946 *total = COSTS_N_INSNS (MULW_COST);
2947 else
2948 *total = COSTS_N_INSNS (MULL_COST);
2949 return true;
2950
2951 case DIV:
2952 case UDIV:
2953 case MOD:
2954 case UMOD:
2955 if (mode == QImode || mode == HImode)
2956 *total = COSTS_N_INSNS (DIVW_COST); /* div.w */
2957 else if (TARGET_CF_HWDIV)
2958 *total = COSTS_N_INSNS (18);
2959 else
2960 *total = COSTS_N_INSNS (43); /* div.l */
2961 return true;
2962
2963 case ZERO_EXTRACT:
2964 if (outer_code == COMPARE)
2965 *total = 0;
2966 return false;
2967
2968 default:
2969 return false;
2970 }
2971 }
2972
2973 /* Return an instruction to move CONST_INT OPERANDS[1] into data register
2974 OPERANDS[0]. */
2975
2976 static const char *
2977 output_move_const_into_data_reg (rtx *operands)
2978 {
2979 HOST_WIDE_INT i;
2980
2981 i = INTVAL (operands[1]);
2982 switch (m68k_const_method (i))
2983 {
2984 case MVZ:
2985 return "mvzw %1,%0";
2986 case MVS:
2987 return "mvsw %1,%0";
2988 case MOVQ:
2989 return "moveq %1,%0";
2990 case NOTB:
2991 CC_STATUS_INIT;
2992 operands[1] = GEN_INT (i ^ 0xff);
2993 return "moveq %1,%0\n\tnot%.b %0";
2994 case NOTW:
2995 CC_STATUS_INIT;
2996 operands[1] = GEN_INT (i ^ 0xffff);
2997 return "moveq %1,%0\n\tnot%.w %0";
2998 case NEGW:
2999 CC_STATUS_INIT;
3000 return "moveq #-128,%0\n\tneg%.w %0";
3001 case SWAP:
3002 {
3003 unsigned u = i;
3004
3005 operands[1] = GEN_INT ((u << 16) | (u >> 16));
3006 return "moveq %1,%0\n\tswap %0";
3007 }
3008 case MOVL:
3009 return "move%.l %1,%0";
3010 default:
3011 gcc_unreachable ();
3012 }
3013 }
3014
3015 /* Return true if I can be handled by ISA B's mov3q instruction. */
3016
3017 bool
3018 valid_mov3q_const (HOST_WIDE_INT i)
3019 {
3020 return TARGET_ISAB && (i == -1 || IN_RANGE (i, 1, 7));
3021 }
3022
3023 /* Return an instruction to move CONST_INT OPERANDS[1] into OPERANDS[0].
3024 I is the value of OPERANDS[1]. */
3025
3026 static const char *
3027 output_move_simode_const (rtx *operands)
3028 {
3029 rtx dest;
3030 HOST_WIDE_INT src;
3031
3032 dest = operands[0];
3033 src = INTVAL (operands[1]);
3034 if (src == 0
3035 && (DATA_REG_P (dest) || MEM_P (dest))
3036 /* clr insns on 68000 read before writing. */
3037 && ((TARGET_68010 || TARGET_COLDFIRE)
3038 || !(MEM_P (dest) && MEM_VOLATILE_P (dest))))
3039 return "clr%.l %0";
3040 else if (GET_MODE (dest) == SImode && valid_mov3q_const (src))
3041 return "mov3q%.l %1,%0";
3042 else if (src == 0 && ADDRESS_REG_P (dest))
3043 return "sub%.l %0,%0";
3044 else if (DATA_REG_P (dest))
3045 return output_move_const_into_data_reg (operands);
3046 else if (ADDRESS_REG_P (dest) && IN_RANGE (src, -0x8000, 0x7fff))
3047 {
3048 if (valid_mov3q_const (src))
3049 return "mov3q%.l %1,%0";
3050 return "move%.w %1,%0";
3051 }
3052 else if (MEM_P (dest)
3053 && GET_CODE (XEXP (dest, 0)) == PRE_DEC
3054 && REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
3055 && IN_RANGE (src, -0x8000, 0x7fff))
3056 {
3057 if (valid_mov3q_const (src))
3058 return "mov3q%.l %1,%-";
3059 return "pea %a1";
3060 }
3061 return "move%.l %1,%0";
3062 }
3063
3064 const char *
3065 output_move_simode (rtx *operands)
3066 {
3067 if (GET_CODE (operands[1]) == CONST_INT)
3068 return output_move_simode_const (operands);
3069 else if ((GET_CODE (operands[1]) == SYMBOL_REF
3070 || GET_CODE (operands[1]) == CONST)
3071 && push_operand (operands[0], SImode))
3072 return "pea %a1";
3073 else if ((GET_CODE (operands[1]) == SYMBOL_REF
3074 || GET_CODE (operands[1]) == CONST)
3075 && ADDRESS_REG_P (operands[0]))
3076 return "lea %a1,%0";
3077 return "move%.l %1,%0";
3078 }
3079
3080 const char *
3081 output_move_himode (rtx *operands)
3082 {
3083 if (GET_CODE (operands[1]) == CONST_INT)
3084 {
3085 if (operands[1] == const0_rtx
3086 && (DATA_REG_P (operands[0])
3087 || GET_CODE (operands[0]) == MEM)
3088 /* clr insns on 68000 read before writing. */
3089 && ((TARGET_68010 || TARGET_COLDFIRE)
3090 || !(GET_CODE (operands[0]) == MEM
3091 && MEM_VOLATILE_P (operands[0]))))
3092 return "clr%.w %0";
3093 else if (operands[1] == const0_rtx
3094 && ADDRESS_REG_P (operands[0]))
3095 return "sub%.l %0,%0";
3096 else if (DATA_REG_P (operands[0])
3097 && INTVAL (operands[1]) < 128
3098 && INTVAL (operands[1]) >= -128)
3099 return "moveq %1,%0";
3100 else if (INTVAL (operands[1]) < 0x8000
3101 && INTVAL (operands[1]) >= -0x8000)
3102 return "move%.w %1,%0";
3103 }
3104 else if (CONSTANT_P (operands[1]))
3105 return "move%.l %1,%0";
3106 return "move%.w %1,%0";
3107 }
3108
3109 const char *
3110 output_move_qimode (rtx *operands)
3111 {
3112 /* 68k family always modifies the stack pointer by at least 2, even for
3113 byte pushes. The 5200 (ColdFire) does not do this. */
3114
3115 /* This case is generated by pushqi1 pattern now. */
3116 gcc_assert (!(GET_CODE (operands[0]) == MEM
3117 && GET_CODE (XEXP (operands[0], 0)) == PRE_DEC
3118 && XEXP (XEXP (operands[0], 0), 0) == stack_pointer_rtx
3119 && ! ADDRESS_REG_P (operands[1])
3120 && ! TARGET_COLDFIRE));
3121
3122 /* clr and st insns on 68000 read before writing. */
3123 if (!ADDRESS_REG_P (operands[0])
3124 && ((TARGET_68010 || TARGET_COLDFIRE)
3125 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3126 {
3127 if (operands[1] == const0_rtx)
3128 return "clr%.b %0";
3129 if ((!TARGET_COLDFIRE || DATA_REG_P (operands[0]))
3130 && GET_CODE (operands[1]) == CONST_INT
3131 && (INTVAL (operands[1]) & 255) == 255)
3132 {
3133 CC_STATUS_INIT;
3134 return "st %0";
3135 }
3136 }
3137 if (GET_CODE (operands[1]) == CONST_INT
3138 && DATA_REG_P (operands[0])
3139 && INTVAL (operands[1]) < 128
3140 && INTVAL (operands[1]) >= -128)
3141 return "moveq %1,%0";
3142 if (operands[1] == const0_rtx && ADDRESS_REG_P (operands[0]))
3143 return "sub%.l %0,%0";
3144 if (GET_CODE (operands[1]) != CONST_INT && CONSTANT_P (operands[1]))
3145 return "move%.l %1,%0";
3146 /* 68k family (including the 5200 ColdFire) does not support byte moves to
3147 from address registers. */
3148 if (ADDRESS_REG_P (operands[0]) || ADDRESS_REG_P (operands[1]))
3149 return "move%.w %1,%0";
3150 return "move%.b %1,%0";
3151 }
3152
3153 const char *
3154 output_move_stricthi (rtx *operands)
3155 {
3156 if (operands[1] == const0_rtx
3157 /* clr insns on 68000 read before writing. */
3158 && ((TARGET_68010 || TARGET_COLDFIRE)
3159 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3160 return "clr%.w %0";
3161 return "move%.w %1,%0";
3162 }
3163
3164 const char *
3165 output_move_strictqi (rtx *operands)
3166 {
3167 if (operands[1] == const0_rtx
3168 /* clr insns on 68000 read before writing. */
3169 && ((TARGET_68010 || TARGET_COLDFIRE)
3170 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3171 return "clr%.b %0";
3172 return "move%.b %1,%0";
3173 }
3174
3175 /* Return the best assembler insn template
3176 for moving operands[1] into operands[0] as a fullword. */
3177
3178 static const char *
3179 singlemove_string (rtx *operands)
3180 {
3181 if (GET_CODE (operands[1]) == CONST_INT)
3182 return output_move_simode_const (operands);
3183 return "move%.l %1,%0";
3184 }
3185
3186
3187 /* Output assembler or rtl code to perform a doubleword move insn
3188 with operands OPERANDS.
3189 Pointers to 3 helper functions should be specified:
3190 HANDLE_REG_ADJUST to adjust a register by a small value,
3191 HANDLE_COMPADR to compute an address and
3192 HANDLE_MOVSI to move 4 bytes. */
3193
3194 static void
3195 handle_move_double (rtx operands[2],
3196 void (*handle_reg_adjust) (rtx, int),
3197 void (*handle_compadr) (rtx [2]),
3198 void (*handle_movsi) (rtx [2]))
3199 {
3200 enum
3201 {
3202 REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP
3203 } optype0, optype1;
3204 rtx latehalf[2];
3205 rtx middlehalf[2];
3206 rtx xops[2];
3207 rtx addreg0 = 0, addreg1 = 0;
3208 int dest_overlapped_low = 0;
3209 int size = GET_MODE_SIZE (GET_MODE (operands[0]));
3210
3211 middlehalf[0] = 0;
3212 middlehalf[1] = 0;
3213
3214 /* First classify both operands. */
3215
3216 if (REG_P (operands[0]))
3217 optype0 = REGOP;
3218 else if (offsettable_memref_p (operands[0]))
3219 optype0 = OFFSOP;
3220 else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
3221 optype0 = POPOP;
3222 else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
3223 optype0 = PUSHOP;
3224 else if (GET_CODE (operands[0]) == MEM)
3225 optype0 = MEMOP;
3226 else
3227 optype0 = RNDOP;
3228
3229 if (REG_P (operands[1]))
3230 optype1 = REGOP;
3231 else if (CONSTANT_P (operands[1]))
3232 optype1 = CNSTOP;
3233 else if (offsettable_memref_p (operands[1]))
3234 optype1 = OFFSOP;
3235 else if (GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3236 optype1 = POPOP;
3237 else if (GET_CODE (XEXP (operands[1], 0)) == PRE_DEC)
3238 optype1 = PUSHOP;
3239 else if (GET_CODE (operands[1]) == MEM)
3240 optype1 = MEMOP;
3241 else
3242 optype1 = RNDOP;
3243
3244 /* Check for the cases that the operand constraints are not supposed
3245 to allow to happen. Generating code for these cases is
3246 painful. */
3247 gcc_assert (optype0 != RNDOP && optype1 != RNDOP);
3248
3249 /* If one operand is decrementing and one is incrementing
3250 decrement the former register explicitly
3251 and change that operand into ordinary indexing. */
3252
3253 if (optype0 == PUSHOP && optype1 == POPOP)
3254 {
3255 operands[0] = XEXP (XEXP (operands[0], 0), 0);
3256
3257 handle_reg_adjust (operands[0], -size);
3258
3259 if (GET_MODE (operands[1]) == XFmode)
3260 operands[0] = gen_rtx_MEM (XFmode, operands[0]);
3261 else if (GET_MODE (operands[0]) == DFmode)
3262 operands[0] = gen_rtx_MEM (DFmode, operands[0]);
3263 else
3264 operands[0] = gen_rtx_MEM (DImode, operands[0]);
3265 optype0 = OFFSOP;
3266 }
3267 if (optype0 == POPOP && optype1 == PUSHOP)
3268 {
3269 operands[1] = XEXP (XEXP (operands[1], 0), 0);
3270
3271 handle_reg_adjust (operands[1], -size);
3272
3273 if (GET_MODE (operands[1]) == XFmode)
3274 operands[1] = gen_rtx_MEM (XFmode, operands[1]);
3275 else if (GET_MODE (operands[1]) == DFmode)
3276 operands[1] = gen_rtx_MEM (DFmode, operands[1]);
3277 else
3278 operands[1] = gen_rtx_MEM (DImode, operands[1]);
3279 optype1 = OFFSOP;
3280 }
3281
3282 /* If an operand is an unoffsettable memory ref, find a register
3283 we can increment temporarily to make it refer to the second word. */
3284
3285 if (optype0 == MEMOP)
3286 addreg0 = find_addr_reg (XEXP (operands[0], 0));
3287
3288 if (optype1 == MEMOP)
3289 addreg1 = find_addr_reg (XEXP (operands[1], 0));
3290
3291 /* Ok, we can do one word at a time.
3292 Normally we do the low-numbered word first,
3293 but if either operand is autodecrementing then we
3294 do the high-numbered word first.
3295
3296 In either case, set up in LATEHALF the operands to use
3297 for the high-numbered word and in some cases alter the
3298 operands in OPERANDS to be suitable for the low-numbered word. */
3299
3300 if (size == 12)
3301 {
3302 if (optype0 == REGOP)
3303 {
3304 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 2);
3305 middlehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
3306 }
3307 else if (optype0 == OFFSOP)
3308 {
3309 middlehalf[0] = adjust_address (operands[0], SImode, 4);
3310 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
3311 }
3312 else
3313 {
3314 middlehalf[0] = adjust_address (operands[0], SImode, 0);
3315 latehalf[0] = adjust_address (operands[0], SImode, 0);
3316 }
3317
3318 if (optype1 == REGOP)
3319 {
3320 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2);
3321 middlehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
3322 }
3323 else if (optype1 == OFFSOP)
3324 {
3325 middlehalf[1] = adjust_address (operands[1], SImode, 4);
3326 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
3327 }
3328 else if (optype1 == CNSTOP)
3329 {
3330 if (GET_CODE (operands[1]) == CONST_DOUBLE)
3331 {
3332 long l[3];
3333
3334 REAL_VALUE_TO_TARGET_LONG_DOUBLE
3335 (*CONST_DOUBLE_REAL_VALUE (operands[1]), l);
3336 operands[1] = GEN_INT (l[0]);
3337 middlehalf[1] = GEN_INT (l[1]);
3338 latehalf[1] = GEN_INT (l[2]);
3339 }
3340 else
3341 {
3342 /* No non-CONST_DOUBLE constant should ever appear
3343 here. */
3344 gcc_assert (!CONSTANT_P (operands[1]));
3345 }
3346 }
3347 else
3348 {
3349 middlehalf[1] = adjust_address (operands[1], SImode, 0);
3350 latehalf[1] = adjust_address (operands[1], SImode, 0);
3351 }
3352 }
3353 else
3354 /* size is not 12: */
3355 {
3356 if (optype0 == REGOP)
3357 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
3358 else if (optype0 == OFFSOP)
3359 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
3360 else
3361 latehalf[0] = adjust_address (operands[0], SImode, 0);
3362
3363 if (optype1 == REGOP)
3364 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
3365 else if (optype1 == OFFSOP)
3366 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
3367 else if (optype1 == CNSTOP)
3368 split_double (operands[1], &operands[1], &latehalf[1]);
3369 else
3370 latehalf[1] = adjust_address (operands[1], SImode, 0);
3371 }
3372
3373 /* If insn is effectively movd N(REG),-(REG) then we will do the high
3374 word first. We should use the adjusted operand 1 (which is N+4(REG))
3375 for the low word as well, to compensate for the first decrement of
3376 REG. */
3377 if (optype0 == PUSHOP
3378 && reg_overlap_mentioned_p (XEXP (XEXP (operands[0], 0), 0), operands[1]))
3379 operands[1] = middlehalf[1] = latehalf[1];
3380
3381 /* For (set (reg:DI N) (mem:DI ... (reg:SI N) ...)),
3382 if the upper part of reg N does not appear in the MEM, arrange to
3383 emit the move late-half first. Otherwise, compute the MEM address
3384 into the upper part of N and use that as a pointer to the memory
3385 operand. */
3386 if (optype0 == REGOP
3387 && (optype1 == OFFSOP || optype1 == MEMOP))
3388 {
3389 rtx testlow = gen_rtx_REG (SImode, REGNO (operands[0]));
3390
3391 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
3392 && reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
3393 {
3394 /* If both halves of dest are used in the src memory address,
3395 compute the address into latehalf of dest.
3396 Note that this can't happen if the dest is two data regs. */
3397 compadr:
3398 xops[0] = latehalf[0];
3399 xops[1] = XEXP (operands[1], 0);
3400
3401 handle_compadr (xops);
3402 if (GET_MODE (operands[1]) == XFmode)
3403 {
3404 operands[1] = gen_rtx_MEM (XFmode, latehalf[0]);
3405 middlehalf[1] = adjust_address (operands[1], DImode, size - 8);
3406 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
3407 }
3408 else
3409 {
3410 operands[1] = gen_rtx_MEM (DImode, latehalf[0]);
3411 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
3412 }
3413 }
3414 else if (size == 12
3415 && reg_overlap_mentioned_p (middlehalf[0],
3416 XEXP (operands[1], 0)))
3417 {
3418 /* Check for two regs used by both source and dest.
3419 Note that this can't happen if the dest is all data regs.
3420 It can happen if the dest is d6, d7, a0.
3421 But in that case, latehalf is an addr reg, so
3422 the code at compadr does ok. */
3423
3424 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
3425 || reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
3426 goto compadr;
3427
3428 /* JRV says this can't happen: */
3429 gcc_assert (!addreg0 && !addreg1);
3430
3431 /* Only the middle reg conflicts; simply put it last. */
3432 handle_movsi (operands);
3433 handle_movsi (latehalf);
3434 handle_movsi (middlehalf);
3435
3436 return;
3437 }
3438 else if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0)))
3439 /* If the low half of dest is mentioned in the source memory
3440 address, the arrange to emit the move late half first. */
3441 dest_overlapped_low = 1;
3442 }
3443
3444 /* If one or both operands autodecrementing,
3445 do the two words, high-numbered first. */
3446
3447 /* Likewise, the first move would clobber the source of the second one,
3448 do them in the other order. This happens only for registers;
3449 such overlap can't happen in memory unless the user explicitly
3450 sets it up, and that is an undefined circumstance. */
3451
3452 if (optype0 == PUSHOP || optype1 == PUSHOP
3453 || (optype0 == REGOP && optype1 == REGOP
3454 && ((middlehalf[1] && REGNO (operands[0]) == REGNO (middlehalf[1]))
3455 || REGNO (operands[0]) == REGNO (latehalf[1])))
3456 || dest_overlapped_low)
3457 {
3458 /* Make any unoffsettable addresses point at high-numbered word. */
3459 if (addreg0)
3460 handle_reg_adjust (addreg0, size - 4);
3461 if (addreg1)
3462 handle_reg_adjust (addreg1, size - 4);
3463
3464 /* Do that word. */
3465 handle_movsi (latehalf);
3466
3467 /* Undo the adds we just did. */
3468 if (addreg0)
3469 handle_reg_adjust (addreg0, -4);
3470 if (addreg1)
3471 handle_reg_adjust (addreg1, -4);
3472
3473 if (size == 12)
3474 {
3475 handle_movsi (middlehalf);
3476
3477 if (addreg0)
3478 handle_reg_adjust (addreg0, -4);
3479 if (addreg1)
3480 handle_reg_adjust (addreg1, -4);
3481 }
3482
3483 /* Do low-numbered word. */
3484
3485 handle_movsi (operands);
3486 return;
3487 }
3488
3489 /* Normal case: do the two words, low-numbered first. */
3490
3491 handle_movsi (operands);
3492
3493 /* Do the middle one of the three words for long double */
3494 if (size == 12)
3495 {
3496 if (addreg0)
3497 handle_reg_adjust (addreg0, 4);
3498 if (addreg1)
3499 handle_reg_adjust (addreg1, 4);
3500
3501 handle_movsi (middlehalf);
3502 }
3503
3504 /* Make any unoffsettable addresses point at high-numbered word. */
3505 if (addreg0)
3506 handle_reg_adjust (addreg0, 4);
3507 if (addreg1)
3508 handle_reg_adjust (addreg1, 4);
3509
3510 /* Do that word. */
3511 handle_movsi (latehalf);
3512
3513 /* Undo the adds we just did. */
3514 if (addreg0)
3515 handle_reg_adjust (addreg0, -(size - 4));
3516 if (addreg1)
3517 handle_reg_adjust (addreg1, -(size - 4));
3518
3519 return;
3520 }
3521
3522 /* Output assembler code to adjust REG by N. */
3523 static void
3524 output_reg_adjust (rtx reg, int n)
3525 {
3526 const char *s;
3527
3528 gcc_assert (GET_MODE (reg) == SImode && n >= -12 && n != 0 && n <= 12);
3529
3530 switch (n)
3531 {
3532 case 12:
3533 s = "add%.l #12,%0";
3534 break;
3535
3536 case 8:
3537 s = "addq%.l #8,%0";
3538 break;
3539
3540 case 4:
3541 s = "addq%.l #4,%0";
3542 break;
3543
3544 case -12:
3545 s = "sub%.l #12,%0";
3546 break;
3547
3548 case -8:
3549 s = "subq%.l #8,%0";
3550 break;
3551
3552 case -4:
3553 s = "subq%.l #4,%0";
3554 break;
3555
3556 default:
3557 gcc_unreachable ();
3558 s = NULL;
3559 }
3560
3561 output_asm_insn (s, &reg);
3562 }
3563
3564 /* Emit rtl code to adjust REG by N. */
3565 static void
3566 emit_reg_adjust (rtx reg1, int n)
3567 {
3568 rtx reg2;
3569
3570 gcc_assert (GET_MODE (reg1) == SImode && n >= -12 && n != 0 && n <= 12);
3571
3572 reg1 = copy_rtx (reg1);
3573 reg2 = copy_rtx (reg1);
3574
3575 if (n < 0)
3576 emit_insn (gen_subsi3 (reg1, reg2, GEN_INT (-n)));
3577 else if (n > 0)
3578 emit_insn (gen_addsi3 (reg1, reg2, GEN_INT (n)));
3579 else
3580 gcc_unreachable ();
3581 }
3582
3583 /* Output assembler to load address OPERANDS[0] to register OPERANDS[1]. */
3584 static void
3585 output_compadr (rtx operands[2])
3586 {
3587 output_asm_insn ("lea %a1,%0", operands);
3588 }
3589
3590 /* Output the best assembler insn for moving operands[1] into operands[0]
3591 as a fullword. */
3592 static void
3593 output_movsi (rtx operands[2])
3594 {
3595 output_asm_insn (singlemove_string (operands), operands);
3596 }
3597
3598 /* Copy OP and change its mode to MODE. */
3599 static rtx
3600 copy_operand (rtx op, machine_mode mode)
3601 {
3602 /* ??? This looks really ugly. There must be a better way
3603 to change a mode on the operand. */
3604 if (GET_MODE (op) != VOIDmode)
3605 {
3606 if (REG_P (op))
3607 op = gen_rtx_REG (mode, REGNO (op));
3608 else
3609 {
3610 op = copy_rtx (op);
3611 PUT_MODE (op, mode);
3612 }
3613 }
3614
3615 return op;
3616 }
3617
3618 /* Emit rtl code for moving operands[1] into operands[0] as a fullword. */
3619 static void
3620 emit_movsi (rtx operands[2])
3621 {
3622 operands[0] = copy_operand (operands[0], SImode);
3623 operands[1] = copy_operand (operands[1], SImode);
3624
3625 emit_insn (gen_movsi (operands[0], operands[1]));
3626 }
3627
3628 /* Output assembler code to perform a doubleword move insn
3629 with operands OPERANDS. */
3630 const char *
3631 output_move_double (rtx *operands)
3632 {
3633 handle_move_double (operands,
3634 output_reg_adjust, output_compadr, output_movsi);
3635
3636 return "";
3637 }
3638
3639 /* Output rtl code to perform a doubleword move insn
3640 with operands OPERANDS. */
3641 void
3642 m68k_emit_move_double (rtx operands[2])
3643 {
3644 handle_move_double (operands, emit_reg_adjust, emit_movsi, emit_movsi);
3645 }
3646
3647 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
3648 new rtx with the correct mode. */
3649
3650 static rtx
3651 force_mode (machine_mode mode, rtx orig)
3652 {
3653 if (mode == GET_MODE (orig))
3654 return orig;
3655
3656 if (REGNO (orig) >= FIRST_PSEUDO_REGISTER)
3657 abort ();
3658
3659 return gen_rtx_REG (mode, REGNO (orig));
3660 }
3661
3662 static int
3663 fp_reg_operand (rtx op, machine_mode mode ATTRIBUTE_UNUSED)
3664 {
3665 return reg_renumber && FP_REG_P (op);
3666 }
3667
3668 /* Emit insns to move operands[1] into operands[0].
3669
3670 Return 1 if we have written out everything that needs to be done to
3671 do the move. Otherwise, return 0 and the caller will emit the move
3672 normally.
3673
3674 Note SCRATCH_REG may not be in the proper mode depending on how it
3675 will be used. This routine is responsible for creating a new copy
3676 of SCRATCH_REG in the proper mode. */
3677
3678 int
3679 emit_move_sequence (rtx *operands, machine_mode mode, rtx scratch_reg)
3680 {
3681 register rtx operand0 = operands[0];
3682 register rtx operand1 = operands[1];
3683 register rtx tem;
3684
3685 if (scratch_reg
3686 && reload_in_progress && GET_CODE (operand0) == REG
3687 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
3688 operand0 = reg_equiv_mem (REGNO (operand0));
3689 else if (scratch_reg
3690 && reload_in_progress && GET_CODE (operand0) == SUBREG
3691 && GET_CODE (SUBREG_REG (operand0)) == REG
3692 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
3693 {
3694 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3695 the code which tracks sets/uses for delete_output_reload. */
3696 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
3697 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
3698 SUBREG_BYTE (operand0));
3699 operand0 = alter_subreg (&temp, true);
3700 }
3701
3702 if (scratch_reg
3703 && reload_in_progress && GET_CODE (operand1) == REG
3704 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
3705 operand1 = reg_equiv_mem (REGNO (operand1));
3706 else if (scratch_reg
3707 && reload_in_progress && GET_CODE (operand1) == SUBREG
3708 && GET_CODE (SUBREG_REG (operand1)) == REG
3709 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
3710 {
3711 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3712 the code which tracks sets/uses for delete_output_reload. */
3713 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
3714 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
3715 SUBREG_BYTE (operand1));
3716 operand1 = alter_subreg (&temp, true);
3717 }
3718
3719 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
3720 && ((tem = find_replacement (&XEXP (operand0, 0)))
3721 != XEXP (operand0, 0)))
3722 operand0 = gen_rtx_MEM (GET_MODE (operand0), tem);
3723 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
3724 && ((tem = find_replacement (&XEXP (operand1, 0)))
3725 != XEXP (operand1, 0)))
3726 operand1 = gen_rtx_MEM (GET_MODE (operand1), tem);
3727
3728 /* Handle secondary reloads for loads/stores of FP registers where
3729 the address is symbolic by using the scratch register */
3730 if (fp_reg_operand (operand0, mode)
3731 && ((GET_CODE (operand1) == MEM
3732 && ! memory_address_p (DFmode, XEXP (operand1, 0)))
3733 || ((GET_CODE (operand1) == SUBREG
3734 && GET_CODE (XEXP (operand1, 0)) == MEM
3735 && !memory_address_p (DFmode, XEXP (XEXP (operand1, 0), 0)))))
3736 && scratch_reg)
3737 {
3738 if (GET_CODE (operand1) == SUBREG)
3739 operand1 = XEXP (operand1, 0);
3740
3741 /* SCRATCH_REG will hold an address. We want
3742 it in SImode regardless of what mode it was originally given
3743 to us. */
3744 scratch_reg = force_mode (SImode, scratch_reg);
3745
3746 /* D might not fit in 14 bits either; for such cases load D into
3747 scratch reg. */
3748 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
3749 {
3750 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
3751 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
3752 Pmode,
3753 XEXP (XEXP (operand1, 0), 0),
3754 scratch_reg));
3755 }
3756 else
3757 emit_move_insn (scratch_reg, XEXP (operand1, 0));
3758 emit_insn (gen_rtx_SET (operand0, gen_rtx_MEM (mode, scratch_reg)));
3759 return 1;
3760 }
3761 else if (fp_reg_operand (operand1, mode)
3762 && ((GET_CODE (operand0) == MEM
3763 && ! memory_address_p (DFmode, XEXP (operand0, 0)))
3764 || ((GET_CODE (operand0) == SUBREG)
3765 && GET_CODE (XEXP (operand0, 0)) == MEM
3766 && !memory_address_p (DFmode, XEXP (XEXP (operand0, 0), 0))))
3767 && scratch_reg)
3768 {
3769 if (GET_CODE (operand0) == SUBREG)
3770 operand0 = XEXP (operand0, 0);
3771
3772 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3773 it in SIMODE regardless of what mode it was originally given
3774 to us. */
3775 scratch_reg = force_mode (SImode, scratch_reg);
3776
3777 /* D might not fit in 14 bits either; for such cases load D into
3778 scratch reg. */
3779 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
3780 {
3781 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
3782 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
3783 0)),
3784 Pmode,
3785 XEXP (XEXP (operand0, 0),
3786 0),
3787 scratch_reg));
3788 }
3789 else
3790 emit_move_insn (scratch_reg, XEXP (operand0, 0));
3791 emit_insn (gen_rtx_SET (gen_rtx_MEM (mode, scratch_reg), operand1));
3792 return 1;
3793 }
3794 /* Handle secondary reloads for loads of FP registers from constant
3795 expressions by forcing the constant into memory.
3796
3797 use scratch_reg to hold the address of the memory location.
3798
3799 The proper fix is to change PREFERRED_RELOAD_CLASS to return
3800 NO_REGS when presented with a const_int and an register class
3801 containing only FP registers. Doing so unfortunately creates
3802 more problems than it solves. Fix this for 2.5. */
3803 else if (fp_reg_operand (operand0, mode)
3804 && CONSTANT_P (operand1)
3805 && scratch_reg)
3806 {
3807 rtx xoperands[2];
3808
3809 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3810 it in SIMODE regardless of what mode it was originally given
3811 to us. */
3812 scratch_reg = force_mode (SImode, scratch_reg);
3813
3814 /* Force the constant into memory and put the address of the
3815 memory location into scratch_reg. */
3816 xoperands[0] = scratch_reg;
3817 xoperands[1] = XEXP (force_const_mem (mode, operand1), 0);
3818 emit_insn (gen_rtx_SET (scratch_reg, xoperands[1]));
3819
3820 /* Now load the destination register. */
3821 emit_insn (gen_rtx_SET (operand0, gen_rtx_MEM (mode, scratch_reg)));
3822 return 1;
3823 }
3824
3825 /* Now have insn-emit do whatever it normally does. */
3826 return 0;
3827 }
3828
3829 /* Split one or more DImode RTL references into pairs of SImode
3830 references. The RTL can be REG, offsettable MEM, integer constant, or
3831 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
3832 split and "num" is its length. lo_half and hi_half are output arrays
3833 that parallel "operands". */
3834
3835 void
3836 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
3837 {
3838 while (num--)
3839 {
3840 rtx op = operands[num];
3841
3842 /* simplify_subreg refuses to split volatile memory addresses,
3843 but we still have to handle it. */
3844 if (GET_CODE (op) == MEM)
3845 {
3846 lo_half[num] = adjust_address (op, SImode, 4);
3847 hi_half[num] = adjust_address (op, SImode, 0);
3848 }
3849 else
3850 {
3851 lo_half[num] = simplify_gen_subreg (SImode, op,
3852 GET_MODE (op) == VOIDmode
3853 ? DImode : GET_MODE (op), 4);
3854 hi_half[num] = simplify_gen_subreg (SImode, op,
3855 GET_MODE (op) == VOIDmode
3856 ? DImode : GET_MODE (op), 0);
3857 }
3858 }
3859 }
3860
3861 /* Split X into a base and a constant offset, storing them in *BASE
3862 and *OFFSET respectively. */
3863
3864 static void
3865 m68k_split_offset (rtx x, rtx *base, HOST_WIDE_INT *offset)
3866 {
3867 *offset = 0;
3868 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
3869 {
3870 *offset += INTVAL (XEXP (x, 1));
3871 x = XEXP (x, 0);
3872 }
3873 *base = x;
3874 }
3875
3876 /* Return true if PATTERN is a PARALLEL suitable for a movem or fmovem
3877 instruction. STORE_P says whether the move is a load or store.
3878
3879 If the instruction uses post-increment or pre-decrement addressing,
3880 AUTOMOD_BASE is the base register and AUTOMOD_OFFSET is the total
3881 adjustment. This adjustment will be made by the first element of
3882 PARALLEL, with the loads or stores starting at element 1. If the
3883 instruction does not use post-increment or pre-decrement addressing,
3884 AUTOMOD_BASE is null, AUTOMOD_OFFSET is 0, and the loads or stores
3885 start at element 0. */
3886
3887 bool
3888 m68k_movem_pattern_p (rtx pattern, rtx automod_base,
3889 HOST_WIDE_INT automod_offset, bool store_p)
3890 {
3891 rtx base, mem_base, set, mem, reg, last_reg;
3892 HOST_WIDE_INT offset, mem_offset;
3893 int i, first, len;
3894 enum reg_class rclass;
3895
3896 len = XVECLEN (pattern, 0);
3897 first = (automod_base != NULL);
3898
3899 if (automod_base)
3900 {
3901 /* Stores must be pre-decrement and loads must be post-increment. */
3902 if (store_p != (automod_offset < 0))
3903 return false;
3904
3905 /* Work out the base and offset for lowest memory location. */
3906 base = automod_base;
3907 offset = (automod_offset < 0 ? automod_offset : 0);
3908 }
3909 else
3910 {
3911 /* Allow any valid base and offset in the first access. */
3912 base = NULL;
3913 offset = 0;
3914 }
3915
3916 last_reg = NULL;
3917 rclass = NO_REGS;
3918 for (i = first; i < len; i++)
3919 {
3920 /* We need a plain SET. */
3921 set = XVECEXP (pattern, 0, i);
3922 if (GET_CODE (set) != SET)
3923 return false;
3924
3925 /* Check that we have a memory location... */
3926 mem = XEXP (set, !store_p);
3927 if (!MEM_P (mem) || !memory_operand (mem, VOIDmode))
3928 return false;
3929
3930 /* ...with the right address. */
3931 if (base == NULL)
3932 {
3933 m68k_split_offset (XEXP (mem, 0), &base, &offset);
3934 /* The ColdFire instruction only allows (An) and (d16,An) modes.
3935 There are no mode restrictions for 680x0 besides the
3936 automodification rules enforced above. */
3937 if (TARGET_COLDFIRE
3938 && !m68k_legitimate_base_reg_p (base, reload_completed))
3939 return false;
3940 }
3941 else
3942 {
3943 m68k_split_offset (XEXP (mem, 0), &mem_base, &mem_offset);
3944 if (!rtx_equal_p (base, mem_base) || offset != mem_offset)
3945 return false;
3946 }
3947
3948 /* Check that we have a register of the required mode and class. */
3949 reg = XEXP (set, store_p);
3950 if (!REG_P (reg)
3951 || !HARD_REGISTER_P (reg)
3952 || GET_MODE (reg) != reg_raw_mode[REGNO (reg)])
3953 return false;
3954
3955 if (last_reg)
3956 {
3957 /* The register must belong to RCLASS and have a higher number
3958 than the register in the previous SET. */
3959 if (!TEST_HARD_REG_BIT (reg_class_contents[rclass], REGNO (reg))
3960 || REGNO (last_reg) >= REGNO (reg))
3961 return false;
3962 }
3963 else
3964 {
3965 /* Work out which register class we need. */
3966 if (INT_REGNO_P (REGNO (reg)))
3967 rclass = GENERAL_REGS;
3968 else if (FP_REGNO_P (REGNO (reg)))
3969 rclass = FP_REGS;
3970 else
3971 return false;
3972 }
3973
3974 last_reg = reg;
3975 offset += GET_MODE_SIZE (GET_MODE (reg));
3976 }
3977
3978 /* If we have an automodification, check whether the final offset is OK. */
3979 if (automod_base && offset != (automod_offset < 0 ? 0 : automod_offset))
3980 return false;
3981
3982 /* Reject unprofitable cases. */
3983 if (len < first + (rclass == FP_REGS ? MIN_FMOVEM_REGS : MIN_MOVEM_REGS))
3984 return false;
3985
3986 return true;
3987 }
3988
3989 /* Return the assembly code template for a movem or fmovem instruction
3990 whose pattern is given by PATTERN. Store the template's operands
3991 in OPERANDS.
3992
3993 If the instruction uses post-increment or pre-decrement addressing,
3994 AUTOMOD_OFFSET is the total adjustment, otherwise it is 0. STORE_P
3995 is true if this is a store instruction. */
3996
3997 const char *
3998 m68k_output_movem (rtx *operands, rtx pattern,
3999 HOST_WIDE_INT automod_offset, bool store_p)
4000 {
4001 unsigned int mask;
4002 int i, first;
4003
4004 gcc_assert (GET_CODE (pattern) == PARALLEL);
4005 mask = 0;
4006 first = (automod_offset != 0);
4007 for (i = first; i < XVECLEN (pattern, 0); i++)
4008 {
4009 /* When using movem with pre-decrement addressing, register X + D0_REG
4010 is controlled by bit 15 - X. For all other addressing modes,
4011 register X + D0_REG is controlled by bit X. Confusingly, the
4012 register mask for fmovem is in the opposite order to that for
4013 movem. */
4014 unsigned int regno;
4015
4016 gcc_assert (MEM_P (XEXP (XVECEXP (pattern, 0, i), !store_p)));
4017 gcc_assert (REG_P (XEXP (XVECEXP (pattern, 0, i), store_p)));
4018 regno = REGNO (XEXP (XVECEXP (pattern, 0, i), store_p));
4019 if (automod_offset < 0)
4020 {
4021 if (FP_REGNO_P (regno))
4022 mask |= 1 << (regno - FP0_REG);
4023 else
4024 mask |= 1 << (15 - (regno - D0_REG));
4025 }
4026 else
4027 {
4028 if (FP_REGNO_P (regno))
4029 mask |= 1 << (7 - (regno - FP0_REG));
4030 else
4031 mask |= 1 << (regno - D0_REG);
4032 }
4033 }
4034 CC_STATUS_INIT;
4035
4036 if (automod_offset == 0)
4037 operands[0] = XEXP (XEXP (XVECEXP (pattern, 0, first), !store_p), 0);
4038 else if (automod_offset < 0)
4039 operands[0] = gen_rtx_PRE_DEC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
4040 else
4041 operands[0] = gen_rtx_POST_INC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
4042 operands[1] = GEN_INT (mask);
4043 if (FP_REGNO_P (REGNO (XEXP (XVECEXP (pattern, 0, first), store_p))))
4044 {
4045 if (store_p)
4046 return "fmovem %1,%a0";
4047 else
4048 return "fmovem %a0,%1";
4049 }
4050 else
4051 {
4052 if (store_p)
4053 return "movem%.l %1,%a0";
4054 else
4055 return "movem%.l %a0,%1";
4056 }
4057 }
4058
4059 /* Return a REG that occurs in ADDR with coefficient 1.
4060 ADDR can be effectively incremented by incrementing REG. */
4061
4062 static rtx
4063 find_addr_reg (rtx addr)
4064 {
4065 while (GET_CODE (addr) == PLUS)
4066 {
4067 if (GET_CODE (XEXP (addr, 0)) == REG)
4068 addr = XEXP (addr, 0);
4069 else if (GET_CODE (XEXP (addr, 1)) == REG)
4070 addr = XEXP (addr, 1);
4071 else if (CONSTANT_P (XEXP (addr, 0)))
4072 addr = XEXP (addr, 1);
4073 else if (CONSTANT_P (XEXP (addr, 1)))
4074 addr = XEXP (addr, 0);
4075 else
4076 gcc_unreachable ();
4077 }
4078 gcc_assert (GET_CODE (addr) == REG);
4079 return addr;
4080 }
4081
4082 /* Output assembler code to perform a 32-bit 3-operand add. */
4083
4084 const char *
4085 output_addsi3 (rtx *operands)
4086 {
4087 if (! operands_match_p (operands[0], operands[1]))
4088 {
4089 if (!ADDRESS_REG_P (operands[1]))
4090 {
4091 rtx tmp = operands[1];
4092
4093 operands[1] = operands[2];
4094 operands[2] = tmp;
4095 }
4096
4097 /* These insns can result from reloads to access
4098 stack slots over 64k from the frame pointer. */
4099 if (GET_CODE (operands[2]) == CONST_INT
4100 && (INTVAL (operands[2]) < -32768 || INTVAL (operands[2]) > 32767))
4101 return "move%.l %2,%0\n\tadd%.l %1,%0";
4102 if (GET_CODE (operands[2]) == REG)
4103 return MOTOROLA ? "lea (%1,%2.l),%0" : "lea %1@(0,%2:l),%0";
4104 return MOTOROLA ? "lea (%c2,%1),%0" : "lea %1@(%c2),%0";
4105 }
4106 if (GET_CODE (operands[2]) == CONST_INT)
4107 {
4108 if (INTVAL (operands[2]) > 0
4109 && INTVAL (operands[2]) <= 8)
4110 return "addq%.l %2,%0";
4111 if (INTVAL (operands[2]) < 0
4112 && INTVAL (operands[2]) >= -8)
4113 {
4114 operands[2] = GEN_INT (- INTVAL (operands[2]));
4115 return "subq%.l %2,%0";
4116 }
4117 /* On the CPU32 it is faster to use two addql instructions to
4118 add a small integer (8 < N <= 16) to a register.
4119 Likewise for subql. */
4120 if (TUNE_CPU32 && REG_P (operands[0]))
4121 {
4122 if (INTVAL (operands[2]) > 8
4123 && INTVAL (operands[2]) <= 16)
4124 {
4125 operands[2] = GEN_INT (INTVAL (operands[2]) - 8);
4126 return "addq%.l #8,%0\n\taddq%.l %2,%0";
4127 }
4128 if (INTVAL (operands[2]) < -8
4129 && INTVAL (operands[2]) >= -16)
4130 {
4131 operands[2] = GEN_INT (- INTVAL (operands[2]) - 8);
4132 return "subq%.l #8,%0\n\tsubq%.l %2,%0";
4133 }
4134 }
4135 if (ADDRESS_REG_P (operands[0])
4136 && INTVAL (operands[2]) >= -0x8000
4137 && INTVAL (operands[2]) < 0x8000)
4138 {
4139 if (TUNE_68040)
4140 return "add%.w %2,%0";
4141 else
4142 return MOTOROLA ? "lea (%c2,%0),%0" : "lea %0@(%c2),%0";
4143 }
4144 }
4145 return "add%.l %2,%0";
4146 }
4147 \f
4148 /* Store in cc_status the expressions that the condition codes will
4149 describe after execution of an instruction whose pattern is EXP.
4150 Do not alter them if the instruction would not alter the cc's. */
4151
4152 /* On the 68000, all the insns to store in an address register fail to
4153 set the cc's. However, in some cases these instructions can make it
4154 possibly invalid to use the saved cc's. In those cases we clear out
4155 some or all of the saved cc's so they won't be used. */
4156
4157 void
4158 notice_update_cc (rtx exp, rtx insn)
4159 {
4160 if (GET_CODE (exp) == SET)
4161 {
4162 if (GET_CODE (SET_SRC (exp)) == CALL)
4163 CC_STATUS_INIT;
4164 else if (ADDRESS_REG_P (SET_DEST (exp)))
4165 {
4166 if (cc_status.value1 && modified_in_p (cc_status.value1, insn))
4167 cc_status.value1 = 0;
4168 if (cc_status.value2 && modified_in_p (cc_status.value2, insn))
4169 cc_status.value2 = 0;
4170 }
4171 /* fmoves to memory or data registers do not set the condition
4172 codes. Normal moves _do_ set the condition codes, but not in
4173 a way that is appropriate for comparison with 0, because -0.0
4174 would be treated as a negative nonzero number. Note that it
4175 isn't appropriate to conditionalize this restriction on
4176 HONOR_SIGNED_ZEROS because that macro merely indicates whether
4177 we care about the difference between -0.0 and +0.0. */
4178 else if (!FP_REG_P (SET_DEST (exp))
4179 && SET_DEST (exp) != cc0_rtx
4180 && (FP_REG_P (SET_SRC (exp))
4181 || GET_CODE (SET_SRC (exp)) == FIX
4182 || FLOAT_MODE_P (GET_MODE (SET_DEST (exp)))))
4183 CC_STATUS_INIT;
4184 /* A pair of move insns doesn't produce a useful overall cc. */
4185 else if (!FP_REG_P (SET_DEST (exp))
4186 && !FP_REG_P (SET_SRC (exp))
4187 && GET_MODE_SIZE (GET_MODE (SET_SRC (exp))) > 4
4188 && (GET_CODE (SET_SRC (exp)) == REG
4189 || GET_CODE (SET_SRC (exp)) == MEM
4190 || GET_CODE (SET_SRC (exp)) == CONST_DOUBLE))
4191 CC_STATUS_INIT;
4192 else if (SET_DEST (exp) != pc_rtx)
4193 {
4194 cc_status.flags = 0;
4195 cc_status.value1 = SET_DEST (exp);
4196 cc_status.value2 = SET_SRC (exp);
4197 }
4198 }
4199 else if (GET_CODE (exp) == PARALLEL
4200 && GET_CODE (XVECEXP (exp, 0, 0)) == SET)
4201 {
4202 rtx dest = SET_DEST (XVECEXP (exp, 0, 0));
4203 rtx src = SET_SRC (XVECEXP (exp, 0, 0));
4204
4205 if (ADDRESS_REG_P (dest))
4206 CC_STATUS_INIT;
4207 else if (dest != pc_rtx)
4208 {
4209 cc_status.flags = 0;
4210 cc_status.value1 = dest;
4211 cc_status.value2 = src;
4212 }
4213 }
4214 else
4215 CC_STATUS_INIT;
4216 if (cc_status.value2 != 0
4217 && ADDRESS_REG_P (cc_status.value2)
4218 && GET_MODE (cc_status.value2) == QImode)
4219 CC_STATUS_INIT;
4220 if (cc_status.value2 != 0)
4221 switch (GET_CODE (cc_status.value2))
4222 {
4223 case ASHIFT: case ASHIFTRT: case LSHIFTRT:
4224 case ROTATE: case ROTATERT:
4225 /* These instructions always clear the overflow bit, and set
4226 the carry to the bit shifted out. */
4227 cc_status.flags |= CC_OVERFLOW_UNUSABLE | CC_NO_CARRY;
4228 break;
4229
4230 case PLUS: case MINUS: case MULT:
4231 case DIV: case UDIV: case MOD: case UMOD: case NEG:
4232 if (GET_MODE (cc_status.value2) != VOIDmode)
4233 cc_status.flags |= CC_NO_OVERFLOW;
4234 break;
4235 case ZERO_EXTEND:
4236 /* (SET r1 (ZERO_EXTEND r2)) on this machine
4237 ends with a move insn moving r2 in r2's mode.
4238 Thus, the cc's are set for r2.
4239 This can set N bit spuriously. */
4240 cc_status.flags |= CC_NOT_NEGATIVE;
4241
4242 default:
4243 break;
4244 }
4245 if (cc_status.value1 && GET_CODE (cc_status.value1) == REG
4246 && cc_status.value2
4247 && reg_overlap_mentioned_p (cc_status.value1, cc_status.value2))
4248 cc_status.value2 = 0;
4249 /* Check for PRE_DEC in dest modifying a register used in src. */
4250 if (cc_status.value1 && GET_CODE (cc_status.value1) == MEM
4251 && GET_CODE (XEXP (cc_status.value1, 0)) == PRE_DEC
4252 && cc_status.value2
4253 && reg_overlap_mentioned_p (XEXP (XEXP (cc_status.value1, 0), 0),
4254 cc_status.value2))
4255 cc_status.value2 = 0;
4256 if (((cc_status.value1 && FP_REG_P (cc_status.value1))
4257 || (cc_status.value2 && FP_REG_P (cc_status.value2))))
4258 cc_status.flags = CC_IN_68881;
4259 if (cc_status.value2 && GET_CODE (cc_status.value2) == COMPARE
4260 && GET_MODE_CLASS (GET_MODE (XEXP (cc_status.value2, 0))) == MODE_FLOAT)
4261 {
4262 cc_status.flags = CC_IN_68881;
4263 if (!FP_REG_P (XEXP (cc_status.value2, 0))
4264 && FP_REG_P (XEXP (cc_status.value2, 1)))
4265 cc_status.flags |= CC_REVERSED;
4266 }
4267 }
4268 \f
4269 const char *
4270 output_move_const_double (rtx *operands)
4271 {
4272 int code = standard_68881_constant_p (operands[1]);
4273
4274 if (code != 0)
4275 {
4276 static char buf[40];
4277
4278 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
4279 return buf;
4280 }
4281 return "fmove%.d %1,%0";
4282 }
4283
4284 const char *
4285 output_move_const_single (rtx *operands)
4286 {
4287 int code = standard_68881_constant_p (operands[1]);
4288
4289 if (code != 0)
4290 {
4291 static char buf[40];
4292
4293 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
4294 return buf;
4295 }
4296 return "fmove%.s %f1,%0";
4297 }
4298
4299 /* Return nonzero if X, a CONST_DOUBLE, has a value that we can get
4300 from the "fmovecr" instruction.
4301 The value, anded with 0xff, gives the code to use in fmovecr
4302 to get the desired constant. */
4303
4304 /* This code has been fixed for cross-compilation. */
4305
4306 static int inited_68881_table = 0;
4307
4308 static const char *const strings_68881[7] = {
4309 "0.0",
4310 "1.0",
4311 "10.0",
4312 "100.0",
4313 "10000.0",
4314 "1e8",
4315 "1e16"
4316 };
4317
4318 static const int codes_68881[7] = {
4319 0x0f,
4320 0x32,
4321 0x33,
4322 0x34,
4323 0x35,
4324 0x36,
4325 0x37
4326 };
4327
4328 REAL_VALUE_TYPE values_68881[7];
4329
4330 /* Set up values_68881 array by converting the decimal values
4331 strings_68881 to binary. */
4332
4333 void
4334 init_68881_table (void)
4335 {
4336 int i;
4337 REAL_VALUE_TYPE r;
4338 machine_mode mode;
4339
4340 mode = SFmode;
4341 for (i = 0; i < 7; i++)
4342 {
4343 if (i == 6)
4344 mode = DFmode;
4345 r = REAL_VALUE_ATOF (strings_68881[i], mode);
4346 values_68881[i] = r;
4347 }
4348 inited_68881_table = 1;
4349 }
4350
4351 int
4352 standard_68881_constant_p (rtx x)
4353 {
4354 const REAL_VALUE_TYPE *r;
4355 int i;
4356
4357 /* fmovecr must be emulated on the 68040 and 68060, so it shouldn't be
4358 used at all on those chips. */
4359 if (TUNE_68040_60)
4360 return 0;
4361
4362 if (! inited_68881_table)
4363 init_68881_table ();
4364
4365 r = CONST_DOUBLE_REAL_VALUE (x);
4366
4367 /* Use real_identical instead of real_equal so that -0.0 is rejected. */
4368 for (i = 0; i < 6; i++)
4369 {
4370 if (real_identical (r, &values_68881[i]))
4371 return (codes_68881[i]);
4372 }
4373
4374 if (GET_MODE (x) == SFmode)
4375 return 0;
4376
4377 if (real_equal (r, &values_68881[6]))
4378 return (codes_68881[6]);
4379
4380 /* larger powers of ten in the constants ram are not used
4381 because they are not equal to a `double' C constant. */
4382 return 0;
4383 }
4384
4385 /* If X is a floating-point constant, return the logarithm of X base 2,
4386 or 0 if X is not a power of 2. */
4387
4388 int
4389 floating_exact_log2 (rtx x)
4390 {
4391 const REAL_VALUE_TYPE *r;
4392 REAL_VALUE_TYPE r1;
4393 int exp;
4394
4395 r = CONST_DOUBLE_REAL_VALUE (x);
4396
4397 if (real_less (r, &dconst1))
4398 return 0;
4399
4400 exp = real_exponent (r);
4401 real_2expN (&r1, exp, DFmode);
4402 if (real_equal (&r1, r))
4403 return exp;
4404
4405 return 0;
4406 }
4407 \f
4408 /* A C compound statement to output to stdio stream STREAM the
4409 assembler syntax for an instruction operand X. X is an RTL
4410 expression.
4411
4412 CODE is a value that can be used to specify one of several ways
4413 of printing the operand. It is used when identical operands
4414 must be printed differently depending on the context. CODE
4415 comes from the `%' specification that was used to request
4416 printing of the operand. If the specification was just `%DIGIT'
4417 then CODE is 0; if the specification was `%LTR DIGIT' then CODE
4418 is the ASCII code for LTR.
4419
4420 If X is a register, this macro should print the register's name.
4421 The names can be found in an array `reg_names' whose type is
4422 `char *[]'. `reg_names' is initialized from `REGISTER_NAMES'.
4423
4424 When the machine description has a specification `%PUNCT' (a `%'
4425 followed by a punctuation character), this macro is called with
4426 a null pointer for X and the punctuation character for CODE.
4427
4428 The m68k specific codes are:
4429
4430 '.' for dot needed in Motorola-style opcode names.
4431 '-' for an operand pushing on the stack:
4432 sp@-, -(sp) or -(%sp) depending on the style of syntax.
4433 '+' for an operand pushing on the stack:
4434 sp@+, (sp)+ or (%sp)+ depending on the style of syntax.
4435 '@' for a reference to the top word on the stack:
4436 sp@, (sp) or (%sp) depending on the style of syntax.
4437 '#' for an immediate operand prefix (# in MIT and Motorola syntax
4438 but & in SGS syntax).
4439 '!' for the cc register (used in an `and to cc' insn).
4440 '$' for the letter `s' in an op code, but only on the 68040.
4441 '&' for the letter `d' in an op code, but only on the 68040.
4442 '/' for register prefix needed by longlong.h.
4443 '?' for m68k_library_id_string
4444
4445 'b' for byte insn (no effect, on the Sun; this is for the ISI).
4446 'd' to force memory addressing to be absolute, not relative.
4447 'f' for float insn (print a CONST_DOUBLE as a float rather than in hex)
4448 'x' for float insn (print a CONST_DOUBLE as a float rather than in hex),
4449 or print pair of registers as rx:ry.
4450 'p' print an address with @PLTPC attached, but only if the operand
4451 is not locally-bound. */
4452
4453 void
4454 print_operand (FILE *file, rtx op, int letter)
4455 {
4456 if (op != NULL_RTX)
4457 m68k_adjust_decorated_operand (op);
4458
4459 if (letter == '.')
4460 {
4461 if (MOTOROLA)
4462 fprintf (file, ".");
4463 }
4464 else if (letter == '#')
4465 asm_fprintf (file, "%I");
4466 else if (letter == '-')
4467 asm_fprintf (file, MOTOROLA ? "-(%Rsp)" : "%Rsp@-");
4468 else if (letter == '+')
4469 asm_fprintf (file, MOTOROLA ? "(%Rsp)+" : "%Rsp@+");
4470 else if (letter == '@')
4471 asm_fprintf (file, MOTOROLA ? "(%Rsp)" : "%Rsp@");
4472 else if (letter == '!')
4473 asm_fprintf (file, "%Rfpcr");
4474 else if (letter == '$')
4475 {
4476 if (TARGET_68040)
4477 fprintf (file, "s");
4478 }
4479 else if (letter == '&')
4480 {
4481 if (TARGET_68040)
4482 fprintf (file, "d");
4483 }
4484 else if (letter == '/')
4485 asm_fprintf (file, "%R");
4486 else if (letter == '?')
4487 asm_fprintf (file, m68k_library_id_string);
4488 else if (letter == 'p')
4489 {
4490 output_addr_const (file, op);
4491 if (!(GET_CODE (op) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op)))
4492 fprintf (file, "@PLTPC");
4493 }
4494 else if (GET_CODE (op) == REG)
4495 {
4496 if (letter == 'R')
4497 /* Print out the second register name of a register pair.
4498 I.e., R (6) => 7. */
4499 fputs (M68K_REGNAME(REGNO (op) + 1), file);
4500 else
4501 fputs (M68K_REGNAME(REGNO (op)), file);
4502 }
4503 else if (GET_CODE (op) == MEM)
4504 {
4505 output_address (GET_MODE (op), XEXP (op, 0));
4506 if (letter == 'd' && ! TARGET_68020
4507 && CONSTANT_ADDRESS_P (XEXP (op, 0))
4508 && !(GET_CODE (XEXP (op, 0)) == CONST_INT
4509 && INTVAL (XEXP (op, 0)) < 0x8000
4510 && INTVAL (XEXP (op, 0)) >= -0x8000))
4511 fprintf (file, MOTOROLA ? ".l" : ":l");
4512 }
4513 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == SFmode)
4514 {
4515 long l;
4516 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), l);
4517 asm_fprintf (file, "%I0x%lx", l & 0xFFFFFFFF);
4518 }
4519 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == XFmode)
4520 {
4521 long l[3];
4522 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
4523 asm_fprintf (file, "%I0x%lx%08lx%08lx", l[0] & 0xFFFFFFFF,
4524 l[1] & 0xFFFFFFFF, l[2] & 0xFFFFFFFF);
4525 }
4526 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == DFmode)
4527 {
4528 long l[2];
4529 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
4530 asm_fprintf (file, "%I0x%lx%08lx", l[0] & 0xFFFFFFFF, l[1] & 0xFFFFFFFF);
4531 }
4532 else
4533 {
4534 /* Use `print_operand_address' instead of `output_addr_const'
4535 to ensure that we print relevant PIC stuff. */
4536 asm_fprintf (file, "%I");
4537 if (TARGET_PCREL
4538 && (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST))
4539 print_operand_address (file, op);
4540 else
4541 output_addr_const (file, op);
4542 }
4543 }
4544
4545 /* Return string for TLS relocation RELOC. */
4546
4547 static const char *
4548 m68k_get_reloc_decoration (enum m68k_reloc reloc)
4549 {
4550 /* To my knowledge, !MOTOROLA assemblers don't support TLS. */
4551 gcc_assert (MOTOROLA || reloc == RELOC_GOT);
4552
4553 switch (reloc)
4554 {
4555 case RELOC_GOT:
4556 if (MOTOROLA)
4557 {
4558 if (flag_pic == 1 && TARGET_68020)
4559 return "@GOT.w";
4560 else
4561 return "@GOT";
4562 }
4563 else
4564 {
4565 if (TARGET_68020)
4566 {
4567 switch (flag_pic)
4568 {
4569 case 1:
4570 return ":w";
4571 case 2:
4572 return ":l";
4573 default:
4574 return "";
4575 }
4576 }
4577 }
4578 gcc_unreachable ();
4579
4580 case RELOC_TLSGD:
4581 return "@TLSGD";
4582
4583 case RELOC_TLSLDM:
4584 return "@TLSLDM";
4585
4586 case RELOC_TLSLDO:
4587 return "@TLSLDO";
4588
4589 case RELOC_TLSIE:
4590 return "@TLSIE";
4591
4592 case RELOC_TLSLE:
4593 return "@TLSLE";
4594
4595 default:
4596 gcc_unreachable ();
4597 }
4598 }
4599
4600 /* m68k implementation of TARGET_OUTPUT_ADDR_CONST_EXTRA. */
4601
4602 static bool
4603 m68k_output_addr_const_extra (FILE *file, rtx x)
4604 {
4605 if (GET_CODE (x) == UNSPEC)
4606 {
4607 switch (XINT (x, 1))
4608 {
4609 case UNSPEC_RELOC16:
4610 case UNSPEC_RELOC32:
4611 output_addr_const (file, XVECEXP (x, 0, 0));
4612 fputs (m68k_get_reloc_decoration
4613 ((enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1))), file);
4614 return true;
4615
4616 default:
4617 break;
4618 }
4619 }
4620
4621 return false;
4622 }
4623
4624 /* M68K implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
4625
4626 static void
4627 m68k_output_dwarf_dtprel (FILE *file, int size, rtx x)
4628 {
4629 gcc_assert (size == 4);
4630 fputs ("\t.long\t", file);
4631 output_addr_const (file, x);
4632 fputs ("@TLSLDO+0x8000", file);
4633 }
4634
4635 /* In the name of slightly smaller debug output, and to cater to
4636 general assembler lossage, recognize various UNSPEC sequences
4637 and turn them back into a direct symbol reference. */
4638
4639 static rtx
4640 m68k_delegitimize_address (rtx orig_x)
4641 {
4642 rtx x;
4643 struct m68k_address addr;
4644 rtx unspec;
4645
4646 orig_x = delegitimize_mem_from_attrs (orig_x);
4647 x = orig_x;
4648 if (MEM_P (x))
4649 x = XEXP (x, 0);
4650
4651 if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
4652 return orig_x;
4653
4654 if (!m68k_decompose_address (GET_MODE (x), x, false, &addr)
4655 || addr.offset == NULL_RTX
4656 || GET_CODE (addr.offset) != CONST)
4657 return orig_x;
4658
4659 unspec = XEXP (addr.offset, 0);
4660 if (GET_CODE (unspec) == PLUS && CONST_INT_P (XEXP (unspec, 1)))
4661 unspec = XEXP (unspec, 0);
4662 if (GET_CODE (unspec) != UNSPEC
4663 || (XINT (unspec, 1) != UNSPEC_RELOC16
4664 && XINT (unspec, 1) != UNSPEC_RELOC32))
4665 return orig_x;
4666 x = XVECEXP (unspec, 0, 0);
4667 gcc_assert (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF);
4668 if (unspec != XEXP (addr.offset, 0))
4669 x = gen_rtx_PLUS (Pmode, x, XEXP (XEXP (addr.offset, 0), 1));
4670 if (addr.index)
4671 {
4672 rtx idx = addr.index;
4673 if (addr.scale != 1)
4674 idx = gen_rtx_MULT (Pmode, idx, GEN_INT (addr.scale));
4675 x = gen_rtx_PLUS (Pmode, idx, x);
4676 }
4677 if (addr.base)
4678 x = gen_rtx_PLUS (Pmode, addr.base, x);
4679 if (MEM_P (orig_x))
4680 x = replace_equiv_address_nv (orig_x, x);
4681 return x;
4682 }
4683
4684 \f
4685 /* A C compound statement to output to stdio stream STREAM the
4686 assembler syntax for an instruction operand that is a memory
4687 reference whose address is ADDR. ADDR is an RTL expression.
4688
4689 Note that this contains a kludge that knows that the only reason
4690 we have an address (plus (label_ref...) (reg...)) when not generating
4691 PIC code is in the insn before a tablejump, and we know that m68k.md
4692 generates a label LInnn: on such an insn.
4693
4694 It is possible for PIC to generate a (plus (label_ref...) (reg...))
4695 and we handle that just like we would a (plus (symbol_ref...) (reg...)).
4696
4697 This routine is responsible for distinguishing between -fpic and -fPIC
4698 style relocations in an address. When generating -fpic code the
4699 offset is output in word mode (e.g. movel a5@(_foo:w), a0). When generating
4700 -fPIC code the offset is output in long mode (e.g. movel a5@(_foo:l), a0) */
4701
4702 void
4703 print_operand_address (FILE *file, rtx addr)
4704 {
4705 struct m68k_address address;
4706
4707 m68k_adjust_decorated_operand (addr);
4708
4709 if (!m68k_decompose_address (QImode, addr, true, &address))
4710 gcc_unreachable ();
4711
4712 if (address.code == PRE_DEC)
4713 fprintf (file, MOTOROLA ? "-(%s)" : "%s@-",
4714 M68K_REGNAME (REGNO (address.base)));
4715 else if (address.code == POST_INC)
4716 fprintf (file, MOTOROLA ? "(%s)+" : "%s@+",
4717 M68K_REGNAME (REGNO (address.base)));
4718 else if (!address.base && !address.index)
4719 {
4720 /* A constant address. */
4721 gcc_assert (address.offset == addr);
4722 if (GET_CODE (addr) == CONST_INT)
4723 {
4724 /* (xxx).w or (xxx).l. */
4725 if (IN_RANGE (INTVAL (addr), -0x8000, 0x7fff))
4726 fprintf (file, MOTOROLA ? "%d.w" : "%d:w", (int) INTVAL (addr));
4727 else
4728 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (addr));
4729 }
4730 else if (TARGET_PCREL)
4731 {
4732 /* (d16,PC) or (bd,PC,Xn) (with suppressed index register). */
4733 fputc ('(', file);
4734 output_addr_const (file, addr);
4735 asm_fprintf (file, flag_pic == 1 ? ":w,%Rpc)" : ":l,%Rpc)");
4736 }
4737 else
4738 {
4739 /* (xxx).l. We need a special case for SYMBOL_REF if the symbol
4740 name ends in `.<letter>', as the last 2 characters can be
4741 mistaken as a size suffix. Put the name in parentheses. */
4742 if (GET_CODE (addr) == SYMBOL_REF
4743 && strlen (XSTR (addr, 0)) > 2
4744 && XSTR (addr, 0)[strlen (XSTR (addr, 0)) - 2] == '.')
4745 {
4746 putc ('(', file);
4747 output_addr_const (file, addr);
4748 putc (')', file);
4749 }
4750 else
4751 output_addr_const (file, addr);
4752 }
4753 }
4754 else
4755 {
4756 int labelno;
4757
4758 /* If ADDR is a (d8,pc,Xn) address, this is the number of the
4759 label being accessed, otherwise it is -1. */
4760 labelno = (address.offset
4761 && !address.base
4762 && GET_CODE (address.offset) == LABEL_REF
4763 ? CODE_LABEL_NUMBER (XEXP (address.offset, 0))
4764 : -1);
4765 if (MOTOROLA)
4766 {
4767 /* Print the "offset(base" component. */
4768 if (labelno >= 0)
4769 asm_fprintf (file, "%LL%d(%Rpc,", labelno);
4770 else
4771 {
4772 if (address.offset)
4773 output_addr_const (file, address.offset);
4774
4775 putc ('(', file);
4776 if (address.base)
4777 fputs (M68K_REGNAME (REGNO (address.base)), file);
4778 }
4779 /* Print the ",index" component, if any. */
4780 if (address.index)
4781 {
4782 if (address.base)
4783 putc (',', file);
4784 fprintf (file, "%s.%c",
4785 M68K_REGNAME (REGNO (address.index)),
4786 GET_MODE (address.index) == HImode ? 'w' : 'l');
4787 if (address.scale != 1)
4788 fprintf (file, "*%d", address.scale);
4789 }
4790 putc (')', file);
4791 }
4792 else /* !MOTOROLA */
4793 {
4794 if (!address.offset && !address.index)
4795 fprintf (file, "%s@", M68K_REGNAME (REGNO (address.base)));
4796 else
4797 {
4798 /* Print the "base@(offset" component. */
4799 if (labelno >= 0)
4800 asm_fprintf (file, "%Rpc@(%LL%d", labelno);
4801 else
4802 {
4803 if (address.base)
4804 fputs (M68K_REGNAME (REGNO (address.base)), file);
4805 fprintf (file, "@(");
4806 if (address.offset)
4807 output_addr_const (file, address.offset);
4808 }
4809 /* Print the ",index" component, if any. */
4810 if (address.index)
4811 {
4812 fprintf (file, ",%s:%c",
4813 M68K_REGNAME (REGNO (address.index)),
4814 GET_MODE (address.index) == HImode ? 'w' : 'l');
4815 if (address.scale != 1)
4816 fprintf (file, ":%d", address.scale);
4817 }
4818 putc (')', file);
4819 }
4820 }
4821 }
4822 }
4823 \f
4824 /* Check for cases where a clr insns can be omitted from code using
4825 strict_low_part sets. For example, the second clrl here is not needed:
4826 clrl d0; movw a0@+,d0; use d0; clrl d0; movw a0@+; use d0; ...
4827
4828 MODE is the mode of this STRICT_LOW_PART set. FIRST_INSN is the clear
4829 insn we are checking for redundancy. TARGET is the register set by the
4830 clear insn. */
4831
4832 bool
4833 strict_low_part_peephole_ok (machine_mode mode, rtx_insn *first_insn,
4834 rtx target)
4835 {
4836 rtx_insn *p = first_insn;
4837
4838 while ((p = PREV_INSN (p)))
4839 {
4840 if (NOTE_INSN_BASIC_BLOCK_P (p))
4841 return false;
4842
4843 if (NOTE_P (p))
4844 continue;
4845
4846 /* If it isn't an insn, then give up. */
4847 if (!INSN_P (p))
4848 return false;
4849
4850 if (reg_set_p (target, p))
4851 {
4852 rtx set = single_set (p);
4853 rtx dest;
4854
4855 /* If it isn't an easy to recognize insn, then give up. */
4856 if (! set)
4857 return false;
4858
4859 dest = SET_DEST (set);
4860
4861 /* If this sets the entire target register to zero, then our
4862 first_insn is redundant. */
4863 if (rtx_equal_p (dest, target)
4864 && SET_SRC (set) == const0_rtx)
4865 return true;
4866 else if (GET_CODE (dest) == STRICT_LOW_PART
4867 && GET_CODE (XEXP (dest, 0)) == REG
4868 && REGNO (XEXP (dest, 0)) == REGNO (target)
4869 && (GET_MODE_SIZE (GET_MODE (XEXP (dest, 0)))
4870 <= GET_MODE_SIZE (mode)))
4871 /* This is a strict low part set which modifies less than
4872 we are using, so it is safe. */
4873 ;
4874 else
4875 return false;
4876 }
4877 }
4878
4879 return false;
4880 }
4881
4882 /* Operand predicates for implementing asymmetric pc-relative addressing
4883 on m68k. The m68k supports pc-relative addressing (mode 7, register 2)
4884 when used as a source operand, but not as a destination operand.
4885
4886 We model this by restricting the meaning of the basic predicates
4887 (general_operand, memory_operand, etc) to forbid the use of this
4888 addressing mode, and then define the following predicates that permit
4889 this addressing mode. These predicates can then be used for the
4890 source operands of the appropriate instructions.
4891
4892 n.b. While it is theoretically possible to change all machine patterns
4893 to use this addressing more where permitted by the architecture,
4894 it has only been implemented for "common" cases: SImode, HImode, and
4895 QImode operands, and only for the principle operations that would
4896 require this addressing mode: data movement and simple integer operations.
4897
4898 In parallel with these new predicates, two new constraint letters
4899 were defined: 'S' and 'T'. 'S' is the -mpcrel analog of 'm'.
4900 'T' replaces 's' in the non-pcrel case. It is a no-op in the pcrel case.
4901 In the pcrel case 's' is only valid in combination with 'a' registers.
4902 See addsi3, subsi3, cmpsi, and movsi patterns for a better understanding
4903 of how these constraints are used.
4904
4905 The use of these predicates is strictly optional, though patterns that
4906 don't will cause an extra reload register to be allocated where one
4907 was not necessary:
4908
4909 lea (abc:w,%pc),%a0 ; need to reload address
4910 moveq &1,%d1 ; since write to pc-relative space
4911 movel %d1,%a0@ ; is not allowed
4912 ...
4913 lea (abc:w,%pc),%a1 ; no need to reload address here
4914 movel %a1@,%d0 ; since "movel (abc:w,%pc),%d0" is ok
4915
4916 For more info, consult tiemann@cygnus.com.
4917
4918
4919 All of the ugliness with predicates and constraints is due to the
4920 simple fact that the m68k does not allow a pc-relative addressing
4921 mode as a destination. gcc does not distinguish between source and
4922 destination addresses. Hence, if we claim that pc-relative address
4923 modes are valid, e.g. TARGET_LEGITIMATE_ADDRESS_P accepts them, then we
4924 end up with invalid code. To get around this problem, we left
4925 pc-relative modes as invalid addresses, and then added special
4926 predicates and constraints to accept them.
4927
4928 A cleaner way to handle this is to modify gcc to distinguish
4929 between source and destination addresses. We can then say that
4930 pc-relative is a valid source address but not a valid destination
4931 address, and hopefully avoid a lot of the predicate and constraint
4932 hackery. Unfortunately, this would be a pretty big change. It would
4933 be a useful change for a number of ports, but there aren't any current
4934 plans to undertake this.
4935
4936 ***************************************************************************/
4937
4938
4939 const char *
4940 output_andsi3 (rtx *operands)
4941 {
4942 int logval;
4943 if (GET_CODE (operands[2]) == CONST_INT
4944 && (INTVAL (operands[2]) | 0xffff) == -1
4945 && (DATA_REG_P (operands[0])
4946 || offsettable_memref_p (operands[0]))
4947 && !TARGET_COLDFIRE)
4948 {
4949 if (GET_CODE (operands[0]) != REG)
4950 operands[0] = adjust_address (operands[0], HImode, 2);
4951 operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffff);
4952 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4953 CC_STATUS_INIT;
4954 if (operands[2] == const0_rtx)
4955 return "clr%.w %0";
4956 return "and%.w %2,%0";
4957 }
4958 if (GET_CODE (operands[2]) == CONST_INT
4959 && (logval = exact_log2 (~ INTVAL (operands[2]) & 0xffffffff)) >= 0
4960 && (DATA_REG_P (operands[0])
4961 || offsettable_memref_p (operands[0])))
4962 {
4963 if (DATA_REG_P (operands[0]))
4964 operands[1] = GEN_INT (logval);
4965 else
4966 {
4967 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
4968 operands[1] = GEN_INT (logval % 8);
4969 }
4970 /* This does not set condition codes in a standard way. */
4971 CC_STATUS_INIT;
4972 return "bclr %1,%0";
4973 }
4974 return "and%.l %2,%0";
4975 }
4976
4977 const char *
4978 output_iorsi3 (rtx *operands)
4979 {
4980 register int logval;
4981 if (GET_CODE (operands[2]) == CONST_INT
4982 && INTVAL (operands[2]) >> 16 == 0
4983 && (DATA_REG_P (operands[0])
4984 || offsettable_memref_p (operands[0]))
4985 && !TARGET_COLDFIRE)
4986 {
4987 if (GET_CODE (operands[0]) != REG)
4988 operands[0] = adjust_address (operands[0], HImode, 2);
4989 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4990 CC_STATUS_INIT;
4991 if (INTVAL (operands[2]) == 0xffff)
4992 return "mov%.w %2,%0";
4993 return "or%.w %2,%0";
4994 }
4995 if (GET_CODE (operands[2]) == CONST_INT
4996 && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
4997 && (DATA_REG_P (operands[0])
4998 || offsettable_memref_p (operands[0])))
4999 {
5000 if (DATA_REG_P (operands[0]))
5001 operands[1] = GEN_INT (logval);
5002 else
5003 {
5004 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
5005 operands[1] = GEN_INT (logval % 8);
5006 }
5007 CC_STATUS_INIT;
5008 return "bset %1,%0";
5009 }
5010 return "or%.l %2,%0";
5011 }
5012
5013 const char *
5014 output_xorsi3 (rtx *operands)
5015 {
5016 register int logval;
5017 if (GET_CODE (operands[2]) == CONST_INT
5018 && INTVAL (operands[2]) >> 16 == 0
5019 && (offsettable_memref_p (operands[0]) || DATA_REG_P (operands[0]))
5020 && !TARGET_COLDFIRE)
5021 {
5022 if (! DATA_REG_P (operands[0]))
5023 operands[0] = adjust_address (operands[0], HImode, 2);
5024 /* Do not delete a following tstl %0 insn; that would be incorrect. */
5025 CC_STATUS_INIT;
5026 if (INTVAL (operands[2]) == 0xffff)
5027 return "not%.w %0";
5028 return "eor%.w %2,%0";
5029 }
5030 if (GET_CODE (operands[2]) == CONST_INT
5031 && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
5032 && (DATA_REG_P (operands[0])
5033 || offsettable_memref_p (operands[0])))
5034 {
5035 if (DATA_REG_P (operands[0]))
5036 operands[1] = GEN_INT (logval);
5037 else
5038 {
5039 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
5040 operands[1] = GEN_INT (logval % 8);
5041 }
5042 CC_STATUS_INIT;
5043 return "bchg %1,%0";
5044 }
5045 return "eor%.l %2,%0";
5046 }
5047
5048 /* Return the instruction that should be used for a call to address X,
5049 which is known to be in operand 0. */
5050
5051 const char *
5052 output_call (rtx x)
5053 {
5054 if (symbolic_operand (x, VOIDmode))
5055 return m68k_symbolic_call;
5056 else
5057 return "jsr %a0";
5058 }
5059
5060 /* Likewise sibling calls. */
5061
5062 const char *
5063 output_sibcall (rtx x)
5064 {
5065 if (symbolic_operand (x, VOIDmode))
5066 return m68k_symbolic_jump;
5067 else
5068 return "jmp %a0";
5069 }
5070
5071 static void
5072 m68k_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
5073 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
5074 tree function)
5075 {
5076 rtx this_slot, offset, addr, mem, tmp;
5077 rtx_insn *insn;
5078
5079 /* Avoid clobbering the struct value reg by using the
5080 static chain reg as a temporary. */
5081 tmp = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
5082
5083 /* Pretend to be a post-reload pass while generating rtl. */
5084 reload_completed = 1;
5085
5086 /* The "this" pointer is stored at 4(%sp). */
5087 this_slot = gen_rtx_MEM (Pmode, plus_constant (Pmode,
5088 stack_pointer_rtx, 4));
5089
5090 /* Add DELTA to THIS. */
5091 if (delta != 0)
5092 {
5093 /* Make the offset a legitimate operand for memory addition. */
5094 offset = GEN_INT (delta);
5095 if ((delta < -8 || delta > 8)
5096 && (TARGET_COLDFIRE || USE_MOVQ (delta)))
5097 {
5098 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), offset);
5099 offset = gen_rtx_REG (Pmode, D0_REG);
5100 }
5101 emit_insn (gen_add3_insn (copy_rtx (this_slot),
5102 copy_rtx (this_slot), offset));
5103 }
5104
5105 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
5106 if (vcall_offset != 0)
5107 {
5108 /* Set the static chain register to *THIS. */
5109 emit_move_insn (tmp, this_slot);
5110 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
5111
5112 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
5113 addr = plus_constant (Pmode, tmp, vcall_offset);
5114 if (!m68k_legitimate_address_p (Pmode, addr, true))
5115 {
5116 emit_insn (gen_rtx_SET (tmp, addr));
5117 addr = tmp;
5118 }
5119
5120 /* Load the offset into %d0 and add it to THIS. */
5121 emit_move_insn (gen_rtx_REG (Pmode, D0_REG),
5122 gen_rtx_MEM (Pmode, addr));
5123 emit_insn (gen_add3_insn (copy_rtx (this_slot),
5124 copy_rtx (this_slot),
5125 gen_rtx_REG (Pmode, D0_REG)));
5126 }
5127
5128 /* Jump to the target function. Use a sibcall if direct jumps are
5129 allowed, otherwise load the address into a register first. */
5130 mem = DECL_RTL (function);
5131 if (!sibcall_operand (XEXP (mem, 0), VOIDmode))
5132 {
5133 gcc_assert (flag_pic);
5134
5135 if (!TARGET_SEP_DATA)
5136 {
5137 /* Use the static chain register as a temporary (call-clobbered)
5138 GOT pointer for this function. We can use the static chain
5139 register because it isn't live on entry to the thunk. */
5140 SET_REGNO (pic_offset_table_rtx, STATIC_CHAIN_REGNUM);
5141 emit_insn (gen_load_got (pic_offset_table_rtx));
5142 }
5143 legitimize_pic_address (XEXP (mem, 0), Pmode, tmp);
5144 mem = replace_equiv_address (mem, tmp);
5145 }
5146 insn = emit_call_insn (gen_sibcall (mem, const0_rtx));
5147 SIBLING_CALL_P (insn) = 1;
5148
5149 /* Run just enough of rest_of_compilation. */
5150 insn = get_insns ();
5151 split_all_insns_noflow ();
5152 final_start_function (insn, file, 1);
5153 final (insn, file, 1);
5154 final_end_function ();
5155
5156 /* Clean up the vars set above. */
5157 reload_completed = 0;
5158
5159 /* Restore the original PIC register. */
5160 if (flag_pic)
5161 SET_REGNO (pic_offset_table_rtx, PIC_REG);
5162 }
5163
5164 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
5165
5166 static rtx
5167 m68k_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
5168 int incoming ATTRIBUTE_UNUSED)
5169 {
5170 return gen_rtx_REG (Pmode, M68K_STRUCT_VALUE_REGNUM);
5171 }
5172
5173 /* Return nonzero if register old_reg can be renamed to register new_reg. */
5174 int
5175 m68k_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
5176 unsigned int new_reg)
5177 {
5178
5179 /* Interrupt functions can only use registers that have already been
5180 saved by the prologue, even if they would normally be
5181 call-clobbered. */
5182
5183 if ((m68k_get_function_kind (current_function_decl)
5184 == m68k_fk_interrupt_handler)
5185 && !df_regs_ever_live_p (new_reg))
5186 return 0;
5187
5188 return 1;
5189 }
5190
5191 /* Implement TARGET_HARD_REGNO_NREGS.
5192
5193 On the m68k, ordinary registers hold 32 bits worth;
5194 for the 68881 registers, a single register is always enough for
5195 anything that can be stored in them at all. */
5196
5197 static unsigned int
5198 m68k_hard_regno_nregs (unsigned int regno, machine_mode mode)
5199 {
5200 if (regno >= 16)
5201 return GET_MODE_NUNITS (mode);
5202 return CEIL (GET_MODE_SIZE (mode), UNITS_PER_WORD);
5203 }
5204
5205 /* Implement TARGET_HARD_REGNO_MODE_OK. On the 68000, we let the cpu
5206 registers can hold any mode, but restrict the 68881 registers to
5207 floating-point modes. */
5208
5209 static bool
5210 m68k_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
5211 {
5212 if (DATA_REGNO_P (regno))
5213 {
5214 /* Data Registers, can hold aggregate if fits in. */
5215 if (regno + GET_MODE_SIZE (mode) / 4 <= 8)
5216 return true;
5217 }
5218 else if (ADDRESS_REGNO_P (regno))
5219 {
5220 if (regno + GET_MODE_SIZE (mode) / 4 <= 16)
5221 return true;
5222 }
5223 else if (FP_REGNO_P (regno))
5224 {
5225 /* FPU registers, hold float or complex float of long double or
5226 smaller. */
5227 if ((GET_MODE_CLASS (mode) == MODE_FLOAT
5228 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5229 && GET_MODE_UNIT_SIZE (mode) <= TARGET_FP_REG_SIZE)
5230 return true;
5231 }
5232 return false;
5233 }
5234
5235 /* Implement TARGET_MODES_TIEABLE_P. */
5236
5237 static bool
5238 m68k_modes_tieable_p (machine_mode mode1, machine_mode mode2)
5239 {
5240 return (!TARGET_HARD_FLOAT
5241 || ((GET_MODE_CLASS (mode1) == MODE_FLOAT
5242 || GET_MODE_CLASS (mode1) == MODE_COMPLEX_FLOAT)
5243 == (GET_MODE_CLASS (mode2) == MODE_FLOAT
5244 || GET_MODE_CLASS (mode2) == MODE_COMPLEX_FLOAT)));
5245 }
5246
5247 /* Implement SECONDARY_RELOAD_CLASS. */
5248
5249 enum reg_class
5250 m68k_secondary_reload_class (enum reg_class rclass,
5251 machine_mode mode, rtx x)
5252 {
5253 int regno;
5254
5255 regno = true_regnum (x);
5256
5257 /* If one operand of a movqi is an address register, the other
5258 operand must be a general register or constant. Other types
5259 of operand must be reloaded through a data register. */
5260 if (GET_MODE_SIZE (mode) == 1
5261 && reg_classes_intersect_p (rclass, ADDR_REGS)
5262 && !(INT_REGNO_P (regno) || CONSTANT_P (x)))
5263 return DATA_REGS;
5264
5265 /* PC-relative addresses must be loaded into an address register first. */
5266 if (TARGET_PCREL
5267 && !reg_class_subset_p (rclass, ADDR_REGS)
5268 && symbolic_operand (x, VOIDmode))
5269 return ADDR_REGS;
5270
5271 return NO_REGS;
5272 }
5273
5274 /* Implement PREFERRED_RELOAD_CLASS. */
5275
5276 enum reg_class
5277 m68k_preferred_reload_class (rtx x, enum reg_class rclass)
5278 {
5279 enum reg_class secondary_class;
5280
5281 /* If RCLASS might need a secondary reload, try restricting it to
5282 a class that doesn't. */
5283 secondary_class = m68k_secondary_reload_class (rclass, GET_MODE (x), x);
5284 if (secondary_class != NO_REGS
5285 && reg_class_subset_p (secondary_class, rclass))
5286 return secondary_class;
5287
5288 /* Prefer to use moveq for in-range constants. */
5289 if (GET_CODE (x) == CONST_INT
5290 && reg_class_subset_p (DATA_REGS, rclass)
5291 && IN_RANGE (INTVAL (x), -0x80, 0x7f))
5292 return DATA_REGS;
5293
5294 /* ??? Do we really need this now? */
5295 if (GET_CODE (x) == CONST_DOUBLE
5296 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
5297 {
5298 if (TARGET_HARD_FLOAT && reg_class_subset_p (FP_REGS, rclass))
5299 return FP_REGS;
5300
5301 return NO_REGS;
5302 }
5303
5304 return rclass;
5305 }
5306
5307 /* Return floating point values in a 68881 register. This makes 68881 code
5308 a little bit faster. It also makes -msoft-float code incompatible with
5309 hard-float code, so people have to be careful not to mix the two.
5310 For ColdFire it was decided the ABI incompatibility is undesirable.
5311 If there is need for a hard-float ABI it is probably worth doing it
5312 properly and also passing function arguments in FP registers. */
5313 rtx
5314 m68k_libcall_value (machine_mode mode)
5315 {
5316 switch (mode) {
5317 case E_SFmode:
5318 case E_DFmode:
5319 case E_XFmode:
5320 if (TARGET_68881)
5321 return gen_rtx_REG (mode, FP0_REG);
5322 break;
5323 default:
5324 break;
5325 }
5326
5327 return gen_rtx_REG (mode, m68k_libcall_value_in_a0_p ? A0_REG : D0_REG);
5328 }
5329
5330 /* Location in which function value is returned.
5331 NOTE: Due to differences in ABIs, don't call this function directly,
5332 use FUNCTION_VALUE instead. */
5333 rtx
5334 m68k_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
5335 {
5336 machine_mode mode;
5337
5338 mode = TYPE_MODE (valtype);
5339 switch (mode) {
5340 case E_SFmode:
5341 case E_DFmode:
5342 case E_XFmode:
5343 if (TARGET_68881)
5344 return gen_rtx_REG (mode, FP0_REG);
5345 break;
5346 default:
5347 break;
5348 }
5349
5350 /* If the function returns a pointer, push that into %a0. */
5351 if (func && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (func))))
5352 /* For compatibility with the large body of existing code which
5353 does not always properly declare external functions returning
5354 pointer types, the m68k/SVR4 convention is to copy the value
5355 returned for pointer functions from a0 to d0 in the function
5356 epilogue, so that callers that have neglected to properly
5357 declare the callee can still find the correct return value in
5358 d0. */
5359 return gen_rtx_PARALLEL
5360 (mode,
5361 gen_rtvec (2,
5362 gen_rtx_EXPR_LIST (VOIDmode,
5363 gen_rtx_REG (mode, A0_REG),
5364 const0_rtx),
5365 gen_rtx_EXPR_LIST (VOIDmode,
5366 gen_rtx_REG (mode, D0_REG),
5367 const0_rtx)));
5368 else if (POINTER_TYPE_P (valtype))
5369 return gen_rtx_REG (mode, A0_REG);
5370 else
5371 return gen_rtx_REG (mode, D0_REG);
5372 }
5373
5374 /* Worker function for TARGET_RETURN_IN_MEMORY. */
5375 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
5376 static bool
5377 m68k_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
5378 {
5379 machine_mode mode = TYPE_MODE (type);
5380
5381 if (mode == BLKmode)
5382 return true;
5383
5384 /* If TYPE's known alignment is less than the alignment of MODE that
5385 would contain the structure, then return in memory. We need to
5386 do so to maintain the compatibility between code compiled with
5387 -mstrict-align and that compiled with -mno-strict-align. */
5388 if (AGGREGATE_TYPE_P (type)
5389 && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (mode))
5390 return true;
5391
5392 return false;
5393 }
5394 #endif
5395
5396 /* CPU to schedule the program for. */
5397 enum attr_cpu m68k_sched_cpu;
5398
5399 /* MAC to schedule the program for. */
5400 enum attr_mac m68k_sched_mac;
5401
5402 /* Operand type. */
5403 enum attr_op_type
5404 {
5405 /* No operand. */
5406 OP_TYPE_NONE,
5407
5408 /* Integer register. */
5409 OP_TYPE_RN,
5410
5411 /* FP register. */
5412 OP_TYPE_FPN,
5413
5414 /* Implicit mem reference (e.g. stack). */
5415 OP_TYPE_MEM1,
5416
5417 /* Memory without offset or indexing. EA modes 2, 3 and 4. */
5418 OP_TYPE_MEM234,
5419
5420 /* Memory with offset but without indexing. EA mode 5. */
5421 OP_TYPE_MEM5,
5422
5423 /* Memory with indexing. EA mode 6. */
5424 OP_TYPE_MEM6,
5425
5426 /* Memory referenced by absolute address. EA mode 7. */
5427 OP_TYPE_MEM7,
5428
5429 /* Immediate operand that doesn't require extension word. */
5430 OP_TYPE_IMM_Q,
5431
5432 /* Immediate 16 bit operand. */
5433 OP_TYPE_IMM_W,
5434
5435 /* Immediate 32 bit operand. */
5436 OP_TYPE_IMM_L
5437 };
5438
5439 /* Return type of memory ADDR_RTX refers to. */
5440 static enum attr_op_type
5441 sched_address_type (machine_mode mode, rtx addr_rtx)
5442 {
5443 struct m68k_address address;
5444
5445 if (symbolic_operand (addr_rtx, VOIDmode))
5446 return OP_TYPE_MEM7;
5447
5448 if (!m68k_decompose_address (mode, addr_rtx,
5449 reload_completed, &address))
5450 {
5451 gcc_assert (!reload_completed);
5452 /* Reload will likely fix the address to be in the register. */
5453 return OP_TYPE_MEM234;
5454 }
5455
5456 if (address.scale != 0)
5457 return OP_TYPE_MEM6;
5458
5459 if (address.base != NULL_RTX)
5460 {
5461 if (address.offset == NULL_RTX)
5462 return OP_TYPE_MEM234;
5463
5464 return OP_TYPE_MEM5;
5465 }
5466
5467 gcc_assert (address.offset != NULL_RTX);
5468
5469 return OP_TYPE_MEM7;
5470 }
5471
5472 /* Return X or Y (depending on OPX_P) operand of INSN. */
5473 static rtx
5474 sched_get_operand (rtx_insn *insn, bool opx_p)
5475 {
5476 int i;
5477
5478 if (recog_memoized (insn) < 0)
5479 gcc_unreachable ();
5480
5481 extract_constrain_insn_cached (insn);
5482
5483 if (opx_p)
5484 i = get_attr_opx (insn);
5485 else
5486 i = get_attr_opy (insn);
5487
5488 if (i >= recog_data.n_operands)
5489 return NULL;
5490
5491 return recog_data.operand[i];
5492 }
5493
5494 /* Return type of INSN's operand X (if OPX_P) or operand Y (if !OPX_P).
5495 If ADDRESS_P is true, return type of memory location operand refers to. */
5496 static enum attr_op_type
5497 sched_attr_op_type (rtx_insn *insn, bool opx_p, bool address_p)
5498 {
5499 rtx op;
5500
5501 op = sched_get_operand (insn, opx_p);
5502
5503 if (op == NULL)
5504 {
5505 gcc_assert (!reload_completed);
5506 return OP_TYPE_RN;
5507 }
5508
5509 if (address_p)
5510 return sched_address_type (QImode, op);
5511
5512 if (memory_operand (op, VOIDmode))
5513 return sched_address_type (GET_MODE (op), XEXP (op, 0));
5514
5515 if (register_operand (op, VOIDmode))
5516 {
5517 if ((!reload_completed && FLOAT_MODE_P (GET_MODE (op)))
5518 || (reload_completed && FP_REG_P (op)))
5519 return OP_TYPE_FPN;
5520
5521 return OP_TYPE_RN;
5522 }
5523
5524 if (GET_CODE (op) == CONST_INT)
5525 {
5526 int ival;
5527
5528 ival = INTVAL (op);
5529
5530 /* Check for quick constants. */
5531 switch (get_attr_type (insn))
5532 {
5533 case TYPE_ALUQ_L:
5534 if (IN_RANGE (ival, 1, 8) || IN_RANGE (ival, -8, -1))
5535 return OP_TYPE_IMM_Q;
5536
5537 gcc_assert (!reload_completed);
5538 break;
5539
5540 case TYPE_MOVEQ_L:
5541 if (USE_MOVQ (ival))
5542 return OP_TYPE_IMM_Q;
5543
5544 gcc_assert (!reload_completed);
5545 break;
5546
5547 case TYPE_MOV3Q_L:
5548 if (valid_mov3q_const (ival))
5549 return OP_TYPE_IMM_Q;
5550
5551 gcc_assert (!reload_completed);
5552 break;
5553
5554 default:
5555 break;
5556 }
5557
5558 if (IN_RANGE (ival, -0x8000, 0x7fff))
5559 return OP_TYPE_IMM_W;
5560
5561 return OP_TYPE_IMM_L;
5562 }
5563
5564 if (GET_CODE (op) == CONST_DOUBLE)
5565 {
5566 switch (GET_MODE (op))
5567 {
5568 case E_SFmode:
5569 return OP_TYPE_IMM_W;
5570
5571 case E_VOIDmode:
5572 case E_DFmode:
5573 return OP_TYPE_IMM_L;
5574
5575 default:
5576 gcc_unreachable ();
5577 }
5578 }
5579
5580 if (GET_CODE (op) == CONST
5581 || symbolic_operand (op, VOIDmode)
5582 || LABEL_P (op))
5583 {
5584 switch (GET_MODE (op))
5585 {
5586 case E_QImode:
5587 return OP_TYPE_IMM_Q;
5588
5589 case E_HImode:
5590 return OP_TYPE_IMM_W;
5591
5592 case E_SImode:
5593 return OP_TYPE_IMM_L;
5594
5595 default:
5596 if (symbolic_operand (m68k_unwrap_symbol (op, false), VOIDmode))
5597 /* Just a guess. */
5598 return OP_TYPE_IMM_W;
5599
5600 return OP_TYPE_IMM_L;
5601 }
5602 }
5603
5604 gcc_assert (!reload_completed);
5605
5606 if (FLOAT_MODE_P (GET_MODE (op)))
5607 return OP_TYPE_FPN;
5608
5609 return OP_TYPE_RN;
5610 }
5611
5612 /* Implement opx_type attribute.
5613 Return type of INSN's operand X.
5614 If ADDRESS_P is true, return type of memory location operand refers to. */
5615 enum attr_opx_type
5616 m68k_sched_attr_opx_type (rtx_insn *insn, int address_p)
5617 {
5618 switch (sched_attr_op_type (insn, true, address_p != 0))
5619 {
5620 case OP_TYPE_RN:
5621 return OPX_TYPE_RN;
5622
5623 case OP_TYPE_FPN:
5624 return OPX_TYPE_FPN;
5625
5626 case OP_TYPE_MEM1:
5627 return OPX_TYPE_MEM1;
5628
5629 case OP_TYPE_MEM234:
5630 return OPX_TYPE_MEM234;
5631
5632 case OP_TYPE_MEM5:
5633 return OPX_TYPE_MEM5;
5634
5635 case OP_TYPE_MEM6:
5636 return OPX_TYPE_MEM6;
5637
5638 case OP_TYPE_MEM7:
5639 return OPX_TYPE_MEM7;
5640
5641 case OP_TYPE_IMM_Q:
5642 return OPX_TYPE_IMM_Q;
5643
5644 case OP_TYPE_IMM_W:
5645 return OPX_TYPE_IMM_W;
5646
5647 case OP_TYPE_IMM_L:
5648 return OPX_TYPE_IMM_L;
5649
5650 default:
5651 gcc_unreachable ();
5652 }
5653 }
5654
5655 /* Implement opy_type attribute.
5656 Return type of INSN's operand Y.
5657 If ADDRESS_P is true, return type of memory location operand refers to. */
5658 enum attr_opy_type
5659 m68k_sched_attr_opy_type (rtx_insn *insn, int address_p)
5660 {
5661 switch (sched_attr_op_type (insn, false, address_p != 0))
5662 {
5663 case OP_TYPE_RN:
5664 return OPY_TYPE_RN;
5665
5666 case OP_TYPE_FPN:
5667 return OPY_TYPE_FPN;
5668
5669 case OP_TYPE_MEM1:
5670 return OPY_TYPE_MEM1;
5671
5672 case OP_TYPE_MEM234:
5673 return OPY_TYPE_MEM234;
5674
5675 case OP_TYPE_MEM5:
5676 return OPY_TYPE_MEM5;
5677
5678 case OP_TYPE_MEM6:
5679 return OPY_TYPE_MEM6;
5680
5681 case OP_TYPE_MEM7:
5682 return OPY_TYPE_MEM7;
5683
5684 case OP_TYPE_IMM_Q:
5685 return OPY_TYPE_IMM_Q;
5686
5687 case OP_TYPE_IMM_W:
5688 return OPY_TYPE_IMM_W;
5689
5690 case OP_TYPE_IMM_L:
5691 return OPY_TYPE_IMM_L;
5692
5693 default:
5694 gcc_unreachable ();
5695 }
5696 }
5697
5698 /* Return size of INSN as int. */
5699 static int
5700 sched_get_attr_size_int (rtx_insn *insn)
5701 {
5702 int size;
5703
5704 switch (get_attr_type (insn))
5705 {
5706 case TYPE_IGNORE:
5707 /* There should be no references to m68k_sched_attr_size for 'ignore'
5708 instructions. */
5709 gcc_unreachable ();
5710 return 0;
5711
5712 case TYPE_MUL_L:
5713 size = 2;
5714 break;
5715
5716 default:
5717 size = 1;
5718 break;
5719 }
5720
5721 switch (get_attr_opx_type (insn))
5722 {
5723 case OPX_TYPE_NONE:
5724 case OPX_TYPE_RN:
5725 case OPX_TYPE_FPN:
5726 case OPX_TYPE_MEM1:
5727 case OPX_TYPE_MEM234:
5728 case OPY_TYPE_IMM_Q:
5729 break;
5730
5731 case OPX_TYPE_MEM5:
5732 case OPX_TYPE_MEM6:
5733 /* Here we assume that most absolute references are short. */
5734 case OPX_TYPE_MEM7:
5735 case OPY_TYPE_IMM_W:
5736 ++size;
5737 break;
5738
5739 case OPY_TYPE_IMM_L:
5740 size += 2;
5741 break;
5742
5743 default:
5744 gcc_unreachable ();
5745 }
5746
5747 switch (get_attr_opy_type (insn))
5748 {
5749 case OPY_TYPE_NONE:
5750 case OPY_TYPE_RN:
5751 case OPY_TYPE_FPN:
5752 case OPY_TYPE_MEM1:
5753 case OPY_TYPE_MEM234:
5754 case OPY_TYPE_IMM_Q:
5755 break;
5756
5757 case OPY_TYPE_MEM5:
5758 case OPY_TYPE_MEM6:
5759 /* Here we assume that most absolute references are short. */
5760 case OPY_TYPE_MEM7:
5761 case OPY_TYPE_IMM_W:
5762 ++size;
5763 break;
5764
5765 case OPY_TYPE_IMM_L:
5766 size += 2;
5767 break;
5768
5769 default:
5770 gcc_unreachable ();
5771 }
5772
5773 if (size > 3)
5774 {
5775 gcc_assert (!reload_completed);
5776
5777 size = 3;
5778 }
5779
5780 return size;
5781 }
5782
5783 /* Return size of INSN as attribute enum value. */
5784 enum attr_size
5785 m68k_sched_attr_size (rtx_insn *insn)
5786 {
5787 switch (sched_get_attr_size_int (insn))
5788 {
5789 case 1:
5790 return SIZE_1;
5791
5792 case 2:
5793 return SIZE_2;
5794
5795 case 3:
5796 return SIZE_3;
5797
5798 default:
5799 gcc_unreachable ();
5800 }
5801 }
5802
5803 /* Return operand X or Y (depending on OPX_P) of INSN,
5804 if it is a MEM, or NULL overwise. */
5805 static enum attr_op_type
5806 sched_get_opxy_mem_type (rtx_insn *insn, bool opx_p)
5807 {
5808 if (opx_p)
5809 {
5810 switch (get_attr_opx_type (insn))
5811 {
5812 case OPX_TYPE_NONE:
5813 case OPX_TYPE_RN:
5814 case OPX_TYPE_FPN:
5815 case OPX_TYPE_IMM_Q:
5816 case OPX_TYPE_IMM_W:
5817 case OPX_TYPE_IMM_L:
5818 return OP_TYPE_RN;
5819
5820 case OPX_TYPE_MEM1:
5821 case OPX_TYPE_MEM234:
5822 case OPX_TYPE_MEM5:
5823 case OPX_TYPE_MEM7:
5824 return OP_TYPE_MEM1;
5825
5826 case OPX_TYPE_MEM6:
5827 return OP_TYPE_MEM6;
5828
5829 default:
5830 gcc_unreachable ();
5831 }
5832 }
5833 else
5834 {
5835 switch (get_attr_opy_type (insn))
5836 {
5837 case OPY_TYPE_NONE:
5838 case OPY_TYPE_RN:
5839 case OPY_TYPE_FPN:
5840 case OPY_TYPE_IMM_Q:
5841 case OPY_TYPE_IMM_W:
5842 case OPY_TYPE_IMM_L:
5843 return OP_TYPE_RN;
5844
5845 case OPY_TYPE_MEM1:
5846 case OPY_TYPE_MEM234:
5847 case OPY_TYPE_MEM5:
5848 case OPY_TYPE_MEM7:
5849 return OP_TYPE_MEM1;
5850
5851 case OPY_TYPE_MEM6:
5852 return OP_TYPE_MEM6;
5853
5854 default:
5855 gcc_unreachable ();
5856 }
5857 }
5858 }
5859
5860 /* Implement op_mem attribute. */
5861 enum attr_op_mem
5862 m68k_sched_attr_op_mem (rtx_insn *insn)
5863 {
5864 enum attr_op_type opx;
5865 enum attr_op_type opy;
5866
5867 opx = sched_get_opxy_mem_type (insn, true);
5868 opy = sched_get_opxy_mem_type (insn, false);
5869
5870 if (opy == OP_TYPE_RN && opx == OP_TYPE_RN)
5871 return OP_MEM_00;
5872
5873 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM1)
5874 {
5875 switch (get_attr_opx_access (insn))
5876 {
5877 case OPX_ACCESS_R:
5878 return OP_MEM_10;
5879
5880 case OPX_ACCESS_W:
5881 return OP_MEM_01;
5882
5883 case OPX_ACCESS_RW:
5884 return OP_MEM_11;
5885
5886 default:
5887 gcc_unreachable ();
5888 }
5889 }
5890
5891 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM6)
5892 {
5893 switch (get_attr_opx_access (insn))
5894 {
5895 case OPX_ACCESS_R:
5896 return OP_MEM_I0;
5897
5898 case OPX_ACCESS_W:
5899 return OP_MEM_0I;
5900
5901 case OPX_ACCESS_RW:
5902 return OP_MEM_I1;
5903
5904 default:
5905 gcc_unreachable ();
5906 }
5907 }
5908
5909 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_RN)
5910 return OP_MEM_10;
5911
5912 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM1)
5913 {
5914 switch (get_attr_opx_access (insn))
5915 {
5916 case OPX_ACCESS_W:
5917 return OP_MEM_11;
5918
5919 default:
5920 gcc_assert (!reload_completed);
5921 return OP_MEM_11;
5922 }
5923 }
5924
5925 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM6)
5926 {
5927 switch (get_attr_opx_access (insn))
5928 {
5929 case OPX_ACCESS_W:
5930 return OP_MEM_1I;
5931
5932 default:
5933 gcc_assert (!reload_completed);
5934 return OP_MEM_1I;
5935 }
5936 }
5937
5938 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_RN)
5939 return OP_MEM_I0;
5940
5941 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM1)
5942 {
5943 switch (get_attr_opx_access (insn))
5944 {
5945 case OPX_ACCESS_W:
5946 return OP_MEM_I1;
5947
5948 default:
5949 gcc_assert (!reload_completed);
5950 return OP_MEM_I1;
5951 }
5952 }
5953
5954 gcc_assert (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM6);
5955 gcc_assert (!reload_completed);
5956 return OP_MEM_I1;
5957 }
5958
5959 /* Data for ColdFire V4 index bypass.
5960 Producer modifies register that is used as index in consumer with
5961 specified scale. */
5962 static struct
5963 {
5964 /* Producer instruction. */
5965 rtx pro;
5966
5967 /* Consumer instruction. */
5968 rtx con;
5969
5970 /* Scale of indexed memory access within consumer.
5971 Or zero if bypass should not be effective at the moment. */
5972 int scale;
5973 } sched_cfv4_bypass_data;
5974
5975 /* An empty state that is used in m68k_sched_adjust_cost. */
5976 static state_t sched_adjust_cost_state;
5977
5978 /* Implement adjust_cost scheduler hook.
5979 Return adjusted COST of dependency LINK between DEF_INSN and INSN. */
5980 static int
5981 m68k_sched_adjust_cost (rtx_insn *insn, int, rtx_insn *def_insn, int cost,
5982 unsigned int)
5983 {
5984 int delay;
5985
5986 if (recog_memoized (def_insn) < 0
5987 || recog_memoized (insn) < 0)
5988 return cost;
5989
5990 if (sched_cfv4_bypass_data.scale == 1)
5991 /* Handle ColdFire V4 bypass for indexed address with 1x scale. */
5992 {
5993 /* haifa-sched.c: insn_cost () calls bypass_p () just before
5994 targetm.sched.adjust_cost (). Hence, we can be relatively sure
5995 that the data in sched_cfv4_bypass_data is up to date. */
5996 gcc_assert (sched_cfv4_bypass_data.pro == def_insn
5997 && sched_cfv4_bypass_data.con == insn);
5998
5999 if (cost < 3)
6000 cost = 3;
6001
6002 sched_cfv4_bypass_data.pro = NULL;
6003 sched_cfv4_bypass_data.con = NULL;
6004 sched_cfv4_bypass_data.scale = 0;
6005 }
6006 else
6007 gcc_assert (sched_cfv4_bypass_data.pro == NULL
6008 && sched_cfv4_bypass_data.con == NULL
6009 && sched_cfv4_bypass_data.scale == 0);
6010
6011 /* Don't try to issue INSN earlier than DFA permits.
6012 This is especially useful for instructions that write to memory,
6013 as their true dependence (default) latency is better to be set to 0
6014 to workaround alias analysis limitations.
6015 This is, in fact, a machine independent tweak, so, probably,
6016 it should be moved to haifa-sched.c: insn_cost (). */
6017 delay = min_insn_conflict_delay (sched_adjust_cost_state, def_insn, insn);
6018 if (delay > cost)
6019 cost = delay;
6020
6021 return cost;
6022 }
6023
6024 /* Return maximal number of insns that can be scheduled on a single cycle. */
6025 static int
6026 m68k_sched_issue_rate (void)
6027 {
6028 switch (m68k_sched_cpu)
6029 {
6030 case CPU_CFV1:
6031 case CPU_CFV2:
6032 case CPU_CFV3:
6033 return 1;
6034
6035 case CPU_CFV4:
6036 return 2;
6037
6038 default:
6039 gcc_unreachable ();
6040 return 0;
6041 }
6042 }
6043
6044 /* Maximal length of instruction for current CPU.
6045 E.g. it is 3 for any ColdFire core. */
6046 static int max_insn_size;
6047
6048 /* Data to model instruction buffer of CPU. */
6049 struct _sched_ib
6050 {
6051 /* True if instruction buffer model is modeled for current CPU. */
6052 bool enabled_p;
6053
6054 /* Size of the instruction buffer in words. */
6055 int size;
6056
6057 /* Number of filled words in the instruction buffer. */
6058 int filled;
6059
6060 /* Additional information about instruction buffer for CPUs that have
6061 a buffer of instruction records, rather then a plain buffer
6062 of instruction words. */
6063 struct _sched_ib_records
6064 {
6065 /* Size of buffer in records. */
6066 int n_insns;
6067
6068 /* Array to hold data on adjustments made to the size of the buffer. */
6069 int *adjust;
6070
6071 /* Index of the above array. */
6072 int adjust_index;
6073 } records;
6074
6075 /* An insn that reserves (marks empty) one word in the instruction buffer. */
6076 rtx insn;
6077 };
6078
6079 static struct _sched_ib sched_ib;
6080
6081 /* ID of memory unit. */
6082 static int sched_mem_unit_code;
6083
6084 /* Implementation of the targetm.sched.variable_issue () hook.
6085 It is called after INSN was issued. It returns the number of insns
6086 that can possibly get scheduled on the current cycle.
6087 It is used here to determine the effect of INSN on the instruction
6088 buffer. */
6089 static int
6090 m68k_sched_variable_issue (FILE *sched_dump ATTRIBUTE_UNUSED,
6091 int sched_verbose ATTRIBUTE_UNUSED,
6092 rtx_insn *insn, int can_issue_more)
6093 {
6094 int insn_size;
6095
6096 if (recog_memoized (insn) >= 0 && get_attr_type (insn) != TYPE_IGNORE)
6097 {
6098 switch (m68k_sched_cpu)
6099 {
6100 case CPU_CFV1:
6101 case CPU_CFV2:
6102 insn_size = sched_get_attr_size_int (insn);
6103 break;
6104
6105 case CPU_CFV3:
6106 insn_size = sched_get_attr_size_int (insn);
6107
6108 /* ColdFire V3 and V4 cores have instruction buffers that can
6109 accumulate up to 8 instructions regardless of instructions'
6110 sizes. So we should take care not to "prefetch" 24 one-word
6111 or 12 two-words instructions.
6112 To model this behavior we temporarily decrease size of the
6113 buffer by (max_insn_size - insn_size) for next 7 instructions. */
6114 {
6115 int adjust;
6116
6117 adjust = max_insn_size - insn_size;
6118 sched_ib.size -= adjust;
6119
6120 if (sched_ib.filled > sched_ib.size)
6121 sched_ib.filled = sched_ib.size;
6122
6123 sched_ib.records.adjust[sched_ib.records.adjust_index] = adjust;
6124 }
6125
6126 ++sched_ib.records.adjust_index;
6127 if (sched_ib.records.adjust_index == sched_ib.records.n_insns)
6128 sched_ib.records.adjust_index = 0;
6129
6130 /* Undo adjustment we did 7 instructions ago. */
6131 sched_ib.size
6132 += sched_ib.records.adjust[sched_ib.records.adjust_index];
6133
6134 break;
6135
6136 case CPU_CFV4:
6137 gcc_assert (!sched_ib.enabled_p);
6138 insn_size = 0;
6139 break;
6140
6141 default:
6142 gcc_unreachable ();
6143 }
6144
6145 if (insn_size > sched_ib.filled)
6146 /* Scheduling for register pressure does not always take DFA into
6147 account. Workaround instruction buffer not being filled enough. */
6148 {
6149 gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED);
6150 insn_size = sched_ib.filled;
6151 }
6152
6153 --can_issue_more;
6154 }
6155 else if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6156 || asm_noperands (PATTERN (insn)) >= 0)
6157 insn_size = sched_ib.filled;
6158 else
6159 insn_size = 0;
6160
6161 sched_ib.filled -= insn_size;
6162
6163 return can_issue_more;
6164 }
6165
6166 /* Return how many instructions should scheduler lookahead to choose the
6167 best one. */
6168 static int
6169 m68k_sched_first_cycle_multipass_dfa_lookahead (void)
6170 {
6171 return m68k_sched_issue_rate () - 1;
6172 }
6173
6174 /* Implementation of targetm.sched.init_global () hook.
6175 It is invoked once per scheduling pass and is used here
6176 to initialize scheduler constants. */
6177 static void
6178 m68k_sched_md_init_global (FILE *sched_dump ATTRIBUTE_UNUSED,
6179 int sched_verbose ATTRIBUTE_UNUSED,
6180 int n_insns ATTRIBUTE_UNUSED)
6181 {
6182 /* Check that all instructions have DFA reservations and
6183 that all instructions can be issued from a clean state. */
6184 if (flag_checking)
6185 {
6186 rtx_insn *insn;
6187 state_t state;
6188
6189 state = alloca (state_size ());
6190
6191 for (insn = get_insns (); insn != NULL; insn = NEXT_INSN (insn))
6192 {
6193 if (INSN_P (insn) && recog_memoized (insn) >= 0)
6194 {
6195 gcc_assert (insn_has_dfa_reservation_p (insn));
6196
6197 state_reset (state);
6198 if (state_transition (state, insn) >= 0)
6199 gcc_unreachable ();
6200 }
6201 }
6202 }
6203
6204 /* Setup target cpu. */
6205
6206 /* ColdFire V4 has a set of features to keep its instruction buffer full
6207 (e.g., a separate memory bus for instructions) and, hence, we do not model
6208 buffer for this CPU. */
6209 sched_ib.enabled_p = (m68k_sched_cpu != CPU_CFV4);
6210
6211 switch (m68k_sched_cpu)
6212 {
6213 case CPU_CFV4:
6214 sched_ib.filled = 0;
6215
6216 /* FALLTHRU */
6217
6218 case CPU_CFV1:
6219 case CPU_CFV2:
6220 max_insn_size = 3;
6221 sched_ib.records.n_insns = 0;
6222 sched_ib.records.adjust = NULL;
6223 break;
6224
6225 case CPU_CFV3:
6226 max_insn_size = 3;
6227 sched_ib.records.n_insns = 8;
6228 sched_ib.records.adjust = XNEWVEC (int, sched_ib.records.n_insns);
6229 break;
6230
6231 default:
6232 gcc_unreachable ();
6233 }
6234
6235 sched_mem_unit_code = get_cpu_unit_code ("cf_mem1");
6236
6237 sched_adjust_cost_state = xmalloc (state_size ());
6238 state_reset (sched_adjust_cost_state);
6239
6240 start_sequence ();
6241 emit_insn (gen_ib ());
6242 sched_ib.insn = get_insns ();
6243 end_sequence ();
6244 }
6245
6246 /* Scheduling pass is now finished. Free/reset static variables. */
6247 static void
6248 m68k_sched_md_finish_global (FILE *dump ATTRIBUTE_UNUSED,
6249 int verbose ATTRIBUTE_UNUSED)
6250 {
6251 sched_ib.insn = NULL;
6252
6253 free (sched_adjust_cost_state);
6254 sched_adjust_cost_state = NULL;
6255
6256 sched_mem_unit_code = 0;
6257
6258 free (sched_ib.records.adjust);
6259 sched_ib.records.adjust = NULL;
6260 sched_ib.records.n_insns = 0;
6261 max_insn_size = 0;
6262 }
6263
6264 /* Implementation of targetm.sched.init () hook.
6265 It is invoked each time scheduler starts on the new block (basic block or
6266 extended basic block). */
6267 static void
6268 m68k_sched_md_init (FILE *sched_dump ATTRIBUTE_UNUSED,
6269 int sched_verbose ATTRIBUTE_UNUSED,
6270 int n_insns ATTRIBUTE_UNUSED)
6271 {
6272 switch (m68k_sched_cpu)
6273 {
6274 case CPU_CFV1:
6275 case CPU_CFV2:
6276 sched_ib.size = 6;
6277 break;
6278
6279 case CPU_CFV3:
6280 sched_ib.size = sched_ib.records.n_insns * max_insn_size;
6281
6282 memset (sched_ib.records.adjust, 0,
6283 sched_ib.records.n_insns * sizeof (*sched_ib.records.adjust));
6284 sched_ib.records.adjust_index = 0;
6285 break;
6286
6287 case CPU_CFV4:
6288 gcc_assert (!sched_ib.enabled_p);
6289 sched_ib.size = 0;
6290 break;
6291
6292 default:
6293 gcc_unreachable ();
6294 }
6295
6296 if (sched_ib.enabled_p)
6297 /* haifa-sched.c: schedule_block () calls advance_cycle () just before
6298 the first cycle. Workaround that. */
6299 sched_ib.filled = -2;
6300 }
6301
6302 /* Implementation of targetm.sched.dfa_pre_advance_cycle () hook.
6303 It is invoked just before current cycle finishes and is used here
6304 to track if instruction buffer got its two words this cycle. */
6305 static void
6306 m68k_sched_dfa_pre_advance_cycle (void)
6307 {
6308 if (!sched_ib.enabled_p)
6309 return;
6310
6311 if (!cpu_unit_reservation_p (curr_state, sched_mem_unit_code))
6312 {
6313 sched_ib.filled += 2;
6314
6315 if (sched_ib.filled > sched_ib.size)
6316 sched_ib.filled = sched_ib.size;
6317 }
6318 }
6319
6320 /* Implementation of targetm.sched.dfa_post_advance_cycle () hook.
6321 It is invoked just after new cycle begins and is used here
6322 to setup number of filled words in the instruction buffer so that
6323 instructions which won't have all their words prefetched would be
6324 stalled for a cycle. */
6325 static void
6326 m68k_sched_dfa_post_advance_cycle (void)
6327 {
6328 int i;
6329
6330 if (!sched_ib.enabled_p)
6331 return;
6332
6333 /* Setup number of prefetched instruction words in the instruction
6334 buffer. */
6335 i = max_insn_size - sched_ib.filled;
6336
6337 while (--i >= 0)
6338 {
6339 if (state_transition (curr_state, sched_ib.insn) >= 0)
6340 /* Pick up scheduler state. */
6341 ++sched_ib.filled;
6342 }
6343 }
6344
6345 /* Return X or Y (depending on OPX_P) operand of INSN,
6346 if it is an integer register, or NULL overwise. */
6347 static rtx
6348 sched_get_reg_operand (rtx_insn *insn, bool opx_p)
6349 {
6350 rtx op = NULL;
6351
6352 if (opx_p)
6353 {
6354 if (get_attr_opx_type (insn) == OPX_TYPE_RN)
6355 {
6356 op = sched_get_operand (insn, true);
6357 gcc_assert (op != NULL);
6358
6359 if (!reload_completed && !REG_P (op))
6360 return NULL;
6361 }
6362 }
6363 else
6364 {
6365 if (get_attr_opy_type (insn) == OPY_TYPE_RN)
6366 {
6367 op = sched_get_operand (insn, false);
6368 gcc_assert (op != NULL);
6369
6370 if (!reload_completed && !REG_P (op))
6371 return NULL;
6372 }
6373 }
6374
6375 return op;
6376 }
6377
6378 /* Return true, if X or Y (depending on OPX_P) operand of INSN
6379 is a MEM. */
6380 static bool
6381 sched_mem_operand_p (rtx_insn *insn, bool opx_p)
6382 {
6383 switch (sched_get_opxy_mem_type (insn, opx_p))
6384 {
6385 case OP_TYPE_MEM1:
6386 case OP_TYPE_MEM6:
6387 return true;
6388
6389 default:
6390 return false;
6391 }
6392 }
6393
6394 /* Return X or Y (depending on OPX_P) operand of INSN,
6395 if it is a MEM, or NULL overwise. */
6396 static rtx
6397 sched_get_mem_operand (rtx_insn *insn, bool must_read_p, bool must_write_p)
6398 {
6399 bool opx_p;
6400 bool opy_p;
6401
6402 opx_p = false;
6403 opy_p = false;
6404
6405 if (must_read_p)
6406 {
6407 opx_p = true;
6408 opy_p = true;
6409 }
6410
6411 if (must_write_p)
6412 {
6413 opx_p = true;
6414 opy_p = false;
6415 }
6416
6417 if (opy_p && sched_mem_operand_p (insn, false))
6418 return sched_get_operand (insn, false);
6419
6420 if (opx_p && sched_mem_operand_p (insn, true))
6421 return sched_get_operand (insn, true);
6422
6423 gcc_unreachable ();
6424 return NULL;
6425 }
6426
6427 /* Return non-zero if PRO modifies register used as part of
6428 address in CON. */
6429 int
6430 m68k_sched_address_bypass_p (rtx_insn *pro, rtx_insn *con)
6431 {
6432 rtx pro_x;
6433 rtx con_mem_read;
6434
6435 pro_x = sched_get_reg_operand (pro, true);
6436 if (pro_x == NULL)
6437 return 0;
6438
6439 con_mem_read = sched_get_mem_operand (con, true, false);
6440 gcc_assert (con_mem_read != NULL);
6441
6442 if (reg_mentioned_p (pro_x, con_mem_read))
6443 return 1;
6444
6445 return 0;
6446 }
6447
6448 /* Helper function for m68k_sched_indexed_address_bypass_p.
6449 if PRO modifies register used as index in CON,
6450 return scale of indexed memory access in CON. Return zero overwise. */
6451 static int
6452 sched_get_indexed_address_scale (rtx_insn *pro, rtx_insn *con)
6453 {
6454 rtx reg;
6455 rtx mem;
6456 struct m68k_address address;
6457
6458 reg = sched_get_reg_operand (pro, true);
6459 if (reg == NULL)
6460 return 0;
6461
6462 mem = sched_get_mem_operand (con, true, false);
6463 gcc_assert (mem != NULL && MEM_P (mem));
6464
6465 if (!m68k_decompose_address (GET_MODE (mem), XEXP (mem, 0), reload_completed,
6466 &address))
6467 gcc_unreachable ();
6468
6469 if (REGNO (reg) == REGNO (address.index))
6470 {
6471 gcc_assert (address.scale != 0);
6472 return address.scale;
6473 }
6474
6475 return 0;
6476 }
6477
6478 /* Return non-zero if PRO modifies register used
6479 as index with scale 2 or 4 in CON. */
6480 int
6481 m68k_sched_indexed_address_bypass_p (rtx_insn *pro, rtx_insn *con)
6482 {
6483 gcc_assert (sched_cfv4_bypass_data.pro == NULL
6484 && sched_cfv4_bypass_data.con == NULL
6485 && sched_cfv4_bypass_data.scale == 0);
6486
6487 switch (sched_get_indexed_address_scale (pro, con))
6488 {
6489 case 1:
6490 /* We can't have a variable latency bypass, so
6491 remember to adjust the insn cost in adjust_cost hook. */
6492 sched_cfv4_bypass_data.pro = pro;
6493 sched_cfv4_bypass_data.con = con;
6494 sched_cfv4_bypass_data.scale = 1;
6495 return 0;
6496
6497 case 2:
6498 case 4:
6499 return 1;
6500
6501 default:
6502 return 0;
6503 }
6504 }
6505
6506 /* We generate a two-instructions program at M_TRAMP :
6507 movea.l &CHAIN_VALUE,%a0
6508 jmp FNADDR
6509 where %a0 can be modified by changing STATIC_CHAIN_REGNUM. */
6510
6511 static void
6512 m68k_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
6513 {
6514 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
6515 rtx mem;
6516
6517 gcc_assert (ADDRESS_REGNO_P (STATIC_CHAIN_REGNUM));
6518
6519 mem = adjust_address (m_tramp, HImode, 0);
6520 emit_move_insn (mem, GEN_INT(0x207C + ((STATIC_CHAIN_REGNUM-8) << 9)));
6521 mem = adjust_address (m_tramp, SImode, 2);
6522 emit_move_insn (mem, chain_value);
6523
6524 mem = adjust_address (m_tramp, HImode, 6);
6525 emit_move_insn (mem, GEN_INT(0x4EF9));
6526 mem = adjust_address (m_tramp, SImode, 8);
6527 emit_move_insn (mem, fnaddr);
6528
6529 FINALIZE_TRAMPOLINE (XEXP (m_tramp, 0));
6530 }
6531
6532 /* On the 68000, the RTS insn cannot pop anything.
6533 On the 68010, the RTD insn may be used to pop them if the number
6534 of args is fixed, but if the number is variable then the caller
6535 must pop them all. RTD can't be used for library calls now
6536 because the library is compiled with the Unix compiler.
6537 Use of RTD is a selectable option, since it is incompatible with
6538 standard Unix calling sequences. If the option is not selected,
6539 the caller must always pop the args. */
6540
6541 static poly_int64
6542 m68k_return_pops_args (tree fundecl, tree funtype, poly_int64 size)
6543 {
6544 return ((TARGET_RTD
6545 && (!fundecl
6546 || TREE_CODE (fundecl) != IDENTIFIER_NODE)
6547 && (!stdarg_p (funtype)))
6548 ? (HOST_WIDE_INT) size : 0);
6549 }
6550
6551 /* Make sure everything's fine if we *don't* have a given processor.
6552 This assumes that putting a register in fixed_regs will keep the
6553 compiler's mitts completely off it. We don't bother to zero it out
6554 of register classes. */
6555
6556 static void
6557 m68k_conditional_register_usage (void)
6558 {
6559 int i;
6560 HARD_REG_SET x;
6561 if (!TARGET_HARD_FLOAT)
6562 {
6563 COPY_HARD_REG_SET (x, reg_class_contents[(int)FP_REGS]);
6564 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
6565 if (TEST_HARD_REG_BIT (x, i))
6566 fixed_regs[i] = call_used_regs[i] = 1;
6567 }
6568 if (flag_pic)
6569 fixed_regs[PIC_REG] = call_used_regs[PIC_REG] = 1;
6570 }
6571
6572 static void
6573 m68k_init_sync_libfuncs (void)
6574 {
6575 init_sync_libfuncs (UNITS_PER_WORD);
6576 }
6577
6578 /* Implements EPILOGUE_USES. All registers are live on exit from an
6579 interrupt routine. */
6580 bool
6581 m68k_epilogue_uses (int regno ATTRIBUTE_UNUSED)
6582 {
6583 return (reload_completed
6584 && (m68k_get_function_kind (current_function_decl)
6585 == m68k_fk_interrupt_handler));
6586 }
6587
6588
6589 /* Implement TARGET_C_EXCESS_PRECISION.
6590
6591 Set the value of FLT_EVAL_METHOD in float.h. When using 68040 fp
6592 instructions, we get proper intermediate rounding, otherwise we
6593 get extended precision results. */
6594
6595 static enum flt_eval_method
6596 m68k_excess_precision (enum excess_precision_type type)
6597 {
6598 switch (type)
6599 {
6600 case EXCESS_PRECISION_TYPE_FAST:
6601 /* The fastest type to promote to will always be the native type,
6602 whether that occurs with implicit excess precision or
6603 otherwise. */
6604 return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT;
6605 case EXCESS_PRECISION_TYPE_STANDARD:
6606 case EXCESS_PRECISION_TYPE_IMPLICIT:
6607 /* Otherwise, the excess precision we want when we are
6608 in a standards compliant mode, and the implicit precision we
6609 provide can be identical. */
6610 if (TARGET_68040 || ! TARGET_68881)
6611 return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT;
6612
6613 return FLT_EVAL_METHOD_PROMOTE_TO_LONG_DOUBLE;
6614 default:
6615 gcc_unreachable ();
6616 }
6617 return FLT_EVAL_METHOD_UNPREDICTABLE;
6618 }
6619
6620 /* Implement PUSH_ROUNDING. On the 680x0, sp@- in a byte insn really pushes
6621 a word. On the ColdFire, sp@- in a byte insn pushes just a byte. */
6622
6623 poly_int64
6624 m68k_push_rounding (poly_int64 bytes)
6625 {
6626 if (TARGET_COLDFIRE)
6627 return bytes;
6628 return (bytes + 1) & ~1;
6629 }
6630
6631 /* Implement TARGET_PROMOTE_FUNCTION_MODE. */
6632
6633 static machine_mode
6634 m68k_promote_function_mode (const_tree type, machine_mode mode,
6635 int *punsignedp ATTRIBUTE_UNUSED,
6636 const_tree fntype ATTRIBUTE_UNUSED,
6637 int for_return)
6638 {
6639 /* Promote libcall arguments narrower than int to match the normal C
6640 ABI (for which promotions are handled via
6641 TARGET_PROMOTE_PROTOTYPES). */
6642 if (type == NULL_TREE && !for_return && (mode == QImode || mode == HImode))
6643 return SImode;
6644 return mode;
6645 }
6646
6647 #include "gt-m68k.h"