]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/m68k/m68k.c
Use function_arg_info for TARGET_FUNCTION_(INCOMING_)ARG
[thirdparty/gcc.git] / gcc / config / m68k / m68k.c
1 /* Subroutines for insn-output.c for Motorola 68000 family.
2 Copyright (C) 1987-2019 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #define IN_TARGET_CODE 1
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "cfghooks.h"
27 #include "tree.h"
28 #include "stringpool.h"
29 #include "attribs.h"
30 #include "rtl.h"
31 #include "df.h"
32 #include "alias.h"
33 #include "fold-const.h"
34 #include "calls.h"
35 #include "stor-layout.h"
36 #include "varasm.h"
37 #include "regs.h"
38 #include "insn-config.h"
39 #include "conditions.h"
40 #include "output.h"
41 #include "insn-attr.h"
42 #include "recog.h"
43 #include "diagnostic-core.h"
44 #include "flags.h"
45 #include "expmed.h"
46 #include "dojump.h"
47 #include "explow.h"
48 #include "memmodel.h"
49 #include "emit-rtl.h"
50 #include "stmt.h"
51 #include "expr.h"
52 #include "reload.h"
53 #include "tm_p.h"
54 #include "target.h"
55 #include "debug.h"
56 #include "cfgrtl.h"
57 #include "cfganal.h"
58 #include "lcm.h"
59 #include "cfgbuild.h"
60 #include "cfgcleanup.h"
61 /* ??? Need to add a dependency between m68k.o and sched-int.h. */
62 #include "sched-int.h"
63 #include "insn-codes.h"
64 #include "opts.h"
65 #include "optabs.h"
66 #include "builtins.h"
67 #include "rtl-iter.h"
68 #include "toplev.h"
69
70 /* This file should be included last. */
71 #include "target-def.h"
72
73 enum reg_class regno_reg_class[] =
74 {
75 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
76 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
77 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
78 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
79 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
80 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
81 ADDR_REGS
82 };
83
84
85 /* The minimum number of integer registers that we want to save with the
86 movem instruction. Using two movel instructions instead of a single
87 moveml is about 15% faster for the 68020 and 68030 at no expense in
88 code size. */
89 #define MIN_MOVEM_REGS 3
90
91 /* The minimum number of floating point registers that we want to save
92 with the fmovem instruction. */
93 #define MIN_FMOVEM_REGS 1
94
95 /* Structure describing stack frame layout. */
96 struct m68k_frame
97 {
98 /* Stack pointer to frame pointer offset. */
99 HOST_WIDE_INT offset;
100
101 /* Offset of FPU registers. */
102 HOST_WIDE_INT foffset;
103
104 /* Frame size in bytes (rounded up). */
105 HOST_WIDE_INT size;
106
107 /* Data and address register. */
108 int reg_no;
109 unsigned int reg_mask;
110
111 /* FPU registers. */
112 int fpu_no;
113 unsigned int fpu_mask;
114
115 /* Offsets relative to ARG_POINTER. */
116 HOST_WIDE_INT frame_pointer_offset;
117 HOST_WIDE_INT stack_pointer_offset;
118
119 /* Function which the above information refers to. */
120 int funcdef_no;
121 };
122
123 /* Current frame information calculated by m68k_compute_frame_layout(). */
124 static struct m68k_frame current_frame;
125
126 /* Structure describing an m68k address.
127
128 If CODE is UNKNOWN, the address is BASE + INDEX * SCALE + OFFSET,
129 with null fields evaluating to 0. Here:
130
131 - BASE satisfies m68k_legitimate_base_reg_p
132 - INDEX satisfies m68k_legitimate_index_reg_p
133 - OFFSET satisfies m68k_legitimate_constant_address_p
134
135 INDEX is either HImode or SImode. The other fields are SImode.
136
137 If CODE is PRE_DEC, the address is -(BASE). If CODE is POST_INC,
138 the address is (BASE)+. */
139 struct m68k_address {
140 enum rtx_code code;
141 rtx base;
142 rtx index;
143 rtx offset;
144 int scale;
145 };
146
147 static int m68k_sched_adjust_cost (rtx_insn *, int, rtx_insn *, int,
148 unsigned int);
149 static int m68k_sched_issue_rate (void);
150 static int m68k_sched_variable_issue (FILE *, int, rtx_insn *, int);
151 static void m68k_sched_md_init_global (FILE *, int, int);
152 static void m68k_sched_md_finish_global (FILE *, int);
153 static void m68k_sched_md_init (FILE *, int, int);
154 static void m68k_sched_dfa_pre_advance_cycle (void);
155 static void m68k_sched_dfa_post_advance_cycle (void);
156 static int m68k_sched_first_cycle_multipass_dfa_lookahead (void);
157
158 static bool m68k_can_eliminate (const int, const int);
159 static void m68k_conditional_register_usage (void);
160 static bool m68k_legitimate_address_p (machine_mode, rtx, bool);
161 static void m68k_option_override (void);
162 static void m68k_override_options_after_change (void);
163 static rtx find_addr_reg (rtx);
164 static const char *singlemove_string (rtx *);
165 static void m68k_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
166 HOST_WIDE_INT, tree);
167 static rtx m68k_struct_value_rtx (tree, int);
168 static tree m68k_handle_fndecl_attribute (tree *node, tree name,
169 tree args, int flags,
170 bool *no_add_attrs);
171 static void m68k_compute_frame_layout (void);
172 static bool m68k_save_reg (unsigned int regno, bool interrupt_handler);
173 static bool m68k_ok_for_sibcall_p (tree, tree);
174 static bool m68k_tls_symbol_p (rtx);
175 static rtx m68k_legitimize_address (rtx, rtx, machine_mode);
176 static bool m68k_rtx_costs (rtx, machine_mode, int, int, int *, bool);
177 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
178 static bool m68k_return_in_memory (const_tree, const_tree);
179 #endif
180 static void m68k_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
181 static void m68k_trampoline_init (rtx, tree, rtx);
182 static poly_int64 m68k_return_pops_args (tree, tree, poly_int64);
183 static rtx m68k_delegitimize_address (rtx);
184 static void m68k_function_arg_advance (cumulative_args_t, machine_mode,
185 const_tree, bool);
186 static rtx m68k_function_arg (cumulative_args_t, const function_arg_info &);
187 static bool m68k_cannot_force_const_mem (machine_mode mode, rtx x);
188 static bool m68k_output_addr_const_extra (FILE *, rtx);
189 static void m68k_init_sync_libfuncs (void) ATTRIBUTE_UNUSED;
190 static enum flt_eval_method
191 m68k_excess_precision (enum excess_precision_type);
192 static unsigned int m68k_hard_regno_nregs (unsigned int, machine_mode);
193 static bool m68k_hard_regno_mode_ok (unsigned int, machine_mode);
194 static bool m68k_modes_tieable_p (machine_mode, machine_mode);
195 static machine_mode m68k_promote_function_mode (const_tree, machine_mode,
196 int *, const_tree, int);
197 \f
198 /* Initialize the GCC target structure. */
199
200 #if INT_OP_GROUP == INT_OP_DOT_WORD
201 #undef TARGET_ASM_ALIGNED_HI_OP
202 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
203 #endif
204
205 #if INT_OP_GROUP == INT_OP_NO_DOT
206 #undef TARGET_ASM_BYTE_OP
207 #define TARGET_ASM_BYTE_OP "\tbyte\t"
208 #undef TARGET_ASM_ALIGNED_HI_OP
209 #define TARGET_ASM_ALIGNED_HI_OP "\tshort\t"
210 #undef TARGET_ASM_ALIGNED_SI_OP
211 #define TARGET_ASM_ALIGNED_SI_OP "\tlong\t"
212 #endif
213
214 #if INT_OP_GROUP == INT_OP_DC
215 #undef TARGET_ASM_BYTE_OP
216 #define TARGET_ASM_BYTE_OP "\tdc.b\t"
217 #undef TARGET_ASM_ALIGNED_HI_OP
218 #define TARGET_ASM_ALIGNED_HI_OP "\tdc.w\t"
219 #undef TARGET_ASM_ALIGNED_SI_OP
220 #define TARGET_ASM_ALIGNED_SI_OP "\tdc.l\t"
221 #endif
222
223 #undef TARGET_ASM_UNALIGNED_HI_OP
224 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
225 #undef TARGET_ASM_UNALIGNED_SI_OP
226 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
227
228 #undef TARGET_ASM_OUTPUT_MI_THUNK
229 #define TARGET_ASM_OUTPUT_MI_THUNK m68k_output_mi_thunk
230 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
231 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
232
233 #undef TARGET_ASM_FILE_START_APP_OFF
234 #define TARGET_ASM_FILE_START_APP_OFF true
235
236 #undef TARGET_LEGITIMIZE_ADDRESS
237 #define TARGET_LEGITIMIZE_ADDRESS m68k_legitimize_address
238
239 #undef TARGET_SCHED_ADJUST_COST
240 #define TARGET_SCHED_ADJUST_COST m68k_sched_adjust_cost
241
242 #undef TARGET_SCHED_ISSUE_RATE
243 #define TARGET_SCHED_ISSUE_RATE m68k_sched_issue_rate
244
245 #undef TARGET_SCHED_VARIABLE_ISSUE
246 #define TARGET_SCHED_VARIABLE_ISSUE m68k_sched_variable_issue
247
248 #undef TARGET_SCHED_INIT_GLOBAL
249 #define TARGET_SCHED_INIT_GLOBAL m68k_sched_md_init_global
250
251 #undef TARGET_SCHED_FINISH_GLOBAL
252 #define TARGET_SCHED_FINISH_GLOBAL m68k_sched_md_finish_global
253
254 #undef TARGET_SCHED_INIT
255 #define TARGET_SCHED_INIT m68k_sched_md_init
256
257 #undef TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE
258 #define TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE m68k_sched_dfa_pre_advance_cycle
259
260 #undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
261 #define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE m68k_sched_dfa_post_advance_cycle
262
263 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
264 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
265 m68k_sched_first_cycle_multipass_dfa_lookahead
266
267 #undef TARGET_OPTION_OVERRIDE
268 #define TARGET_OPTION_OVERRIDE m68k_option_override
269
270 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
271 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m68k_override_options_after_change
272
273 #undef TARGET_RTX_COSTS
274 #define TARGET_RTX_COSTS m68k_rtx_costs
275
276 #undef TARGET_ATTRIBUTE_TABLE
277 #define TARGET_ATTRIBUTE_TABLE m68k_attribute_table
278
279 #undef TARGET_PROMOTE_PROTOTYPES
280 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
281
282 #undef TARGET_STRUCT_VALUE_RTX
283 #define TARGET_STRUCT_VALUE_RTX m68k_struct_value_rtx
284
285 #undef TARGET_CANNOT_FORCE_CONST_MEM
286 #define TARGET_CANNOT_FORCE_CONST_MEM m68k_cannot_force_const_mem
287
288 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
289 #define TARGET_FUNCTION_OK_FOR_SIBCALL m68k_ok_for_sibcall_p
290
291 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
292 #undef TARGET_RETURN_IN_MEMORY
293 #define TARGET_RETURN_IN_MEMORY m68k_return_in_memory
294 #endif
295
296 #ifdef HAVE_AS_TLS
297 #undef TARGET_HAVE_TLS
298 #define TARGET_HAVE_TLS (true)
299
300 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
301 #define TARGET_ASM_OUTPUT_DWARF_DTPREL m68k_output_dwarf_dtprel
302 #endif
303
304 #undef TARGET_LRA_P
305 #define TARGET_LRA_P hook_bool_void_false
306
307 #undef TARGET_LEGITIMATE_ADDRESS_P
308 #define TARGET_LEGITIMATE_ADDRESS_P m68k_legitimate_address_p
309
310 #undef TARGET_CAN_ELIMINATE
311 #define TARGET_CAN_ELIMINATE m68k_can_eliminate
312
313 #undef TARGET_CONDITIONAL_REGISTER_USAGE
314 #define TARGET_CONDITIONAL_REGISTER_USAGE m68k_conditional_register_usage
315
316 #undef TARGET_TRAMPOLINE_INIT
317 #define TARGET_TRAMPOLINE_INIT m68k_trampoline_init
318
319 #undef TARGET_RETURN_POPS_ARGS
320 #define TARGET_RETURN_POPS_ARGS m68k_return_pops_args
321
322 #undef TARGET_DELEGITIMIZE_ADDRESS
323 #define TARGET_DELEGITIMIZE_ADDRESS m68k_delegitimize_address
324
325 #undef TARGET_FUNCTION_ARG
326 #define TARGET_FUNCTION_ARG m68k_function_arg
327
328 #undef TARGET_FUNCTION_ARG_ADVANCE
329 #define TARGET_FUNCTION_ARG_ADVANCE m68k_function_arg_advance
330
331 #undef TARGET_LEGITIMATE_CONSTANT_P
332 #define TARGET_LEGITIMATE_CONSTANT_P m68k_legitimate_constant_p
333
334 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
335 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA m68k_output_addr_const_extra
336
337 #undef TARGET_C_EXCESS_PRECISION
338 #define TARGET_C_EXCESS_PRECISION m68k_excess_precision
339
340 /* The value stored by TAS. */
341 #undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
342 #define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 128
343
344 #undef TARGET_HARD_REGNO_NREGS
345 #define TARGET_HARD_REGNO_NREGS m68k_hard_regno_nregs
346 #undef TARGET_HARD_REGNO_MODE_OK
347 #define TARGET_HARD_REGNO_MODE_OK m68k_hard_regno_mode_ok
348
349 #undef TARGET_MODES_TIEABLE_P
350 #define TARGET_MODES_TIEABLE_P m68k_modes_tieable_p
351
352 #undef TARGET_PROMOTE_FUNCTION_MODE
353 #define TARGET_PROMOTE_FUNCTION_MODE m68k_promote_function_mode
354
355 #undef TARGET_HAVE_SPECULATION_SAFE_VALUE
356 #define TARGET_HAVE_SPECULATION_SAFE_VALUE speculation_safe_value_not_needed
357
358 static const struct attribute_spec m68k_attribute_table[] =
359 {
360 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
361 affects_type_identity, handler, exclude } */
362 { "interrupt", 0, 0, true, false, false, false,
363 m68k_handle_fndecl_attribute, NULL },
364 { "interrupt_handler", 0, 0, true, false, false, false,
365 m68k_handle_fndecl_attribute, NULL },
366 { "interrupt_thread", 0, 0, true, false, false, false,
367 m68k_handle_fndecl_attribute, NULL },
368 { NULL, 0, 0, false, false, false, false, NULL, NULL }
369 };
370
371 struct gcc_target targetm = TARGET_INITIALIZER;
372 \f
373 /* Base flags for 68k ISAs. */
374 #define FL_FOR_isa_00 FL_ISA_68000
375 #define FL_FOR_isa_10 (FL_FOR_isa_00 | FL_ISA_68010)
376 /* FL_68881 controls the default setting of -m68881. gcc has traditionally
377 generated 68881 code for 68020 and 68030 targets unless explicitly told
378 not to. */
379 #define FL_FOR_isa_20 (FL_FOR_isa_10 | FL_ISA_68020 \
380 | FL_BITFIELD | FL_68881 | FL_CAS)
381 #define FL_FOR_isa_40 (FL_FOR_isa_20 | FL_ISA_68040)
382 #define FL_FOR_isa_cpu32 (FL_FOR_isa_10 | FL_ISA_68020)
383
384 /* Base flags for ColdFire ISAs. */
385 #define FL_FOR_isa_a (FL_COLDFIRE | FL_ISA_A)
386 #define FL_FOR_isa_aplus (FL_FOR_isa_a | FL_ISA_APLUS | FL_CF_USP)
387 /* Note ISA_B doesn't necessarily include USP (user stack pointer) support. */
388 #define FL_FOR_isa_b (FL_FOR_isa_a | FL_ISA_B | FL_CF_HWDIV)
389 /* ISA_C is not upwardly compatible with ISA_B. */
390 #define FL_FOR_isa_c (FL_FOR_isa_a | FL_ISA_C | FL_CF_USP)
391
392 enum m68k_isa
393 {
394 /* Traditional 68000 instruction sets. */
395 isa_00,
396 isa_10,
397 isa_20,
398 isa_40,
399 isa_cpu32,
400 /* ColdFire instruction set variants. */
401 isa_a,
402 isa_aplus,
403 isa_b,
404 isa_c,
405 isa_max
406 };
407
408 /* Information about one of the -march, -mcpu or -mtune arguments. */
409 struct m68k_target_selection
410 {
411 /* The argument being described. */
412 const char *name;
413
414 /* For -mcpu, this is the device selected by the option.
415 For -mtune and -march, it is a representative device
416 for the microarchitecture or ISA respectively. */
417 enum target_device device;
418
419 /* The M68K_DEVICE fields associated with DEVICE. See the comment
420 in m68k-devices.def for details. FAMILY is only valid for -mcpu. */
421 const char *family;
422 enum uarch_type microarch;
423 enum m68k_isa isa;
424 unsigned long flags;
425 };
426
427 /* A list of all devices in m68k-devices.def. Used for -mcpu selection. */
428 static const struct m68k_target_selection all_devices[] =
429 {
430 #define M68K_DEVICE(NAME,ENUM_VALUE,FAMILY,MULTILIB,MICROARCH,ISA,FLAGS) \
431 { NAME, ENUM_VALUE, FAMILY, u##MICROARCH, ISA, FLAGS | FL_FOR_##ISA },
432 #include "m68k-devices.def"
433 #undef M68K_DEVICE
434 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
435 };
436
437 /* A list of all ISAs, mapping each one to a representative device.
438 Used for -march selection. */
439 static const struct m68k_target_selection all_isas[] =
440 {
441 #define M68K_ISA(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
442 { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
443 #include "m68k-isas.def"
444 #undef M68K_ISA
445 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
446 };
447
448 /* A list of all microarchitectures, mapping each one to a representative
449 device. Used for -mtune selection. */
450 static const struct m68k_target_selection all_microarchs[] =
451 {
452 #define M68K_MICROARCH(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
453 { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
454 #include "m68k-microarchs.def"
455 #undef M68K_MICROARCH
456 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
457 };
458 \f
459 /* The entries associated with the -mcpu, -march and -mtune settings,
460 or null for options that have not been used. */
461 const struct m68k_target_selection *m68k_cpu_entry;
462 const struct m68k_target_selection *m68k_arch_entry;
463 const struct m68k_target_selection *m68k_tune_entry;
464
465 /* Which CPU we are generating code for. */
466 enum target_device m68k_cpu;
467
468 /* Which microarchitecture to tune for. */
469 enum uarch_type m68k_tune;
470
471 /* Which FPU to use. */
472 enum fpu_type m68k_fpu;
473
474 /* The set of FL_* flags that apply to the target processor. */
475 unsigned int m68k_cpu_flags;
476
477 /* The set of FL_* flags that apply to the processor to be tuned for. */
478 unsigned int m68k_tune_flags;
479
480 /* Asm templates for calling or jumping to an arbitrary symbolic address,
481 or NULL if such calls or jumps are not supported. The address is held
482 in operand 0. */
483 const char *m68k_symbolic_call;
484 const char *m68k_symbolic_jump;
485
486 /* Enum variable that corresponds to m68k_symbolic_call values. */
487 enum M68K_SYMBOLIC_CALL m68k_symbolic_call_var;
488
489 \f
490 /* Implement TARGET_OPTION_OVERRIDE. */
491
492 static void
493 m68k_option_override (void)
494 {
495 const struct m68k_target_selection *entry;
496 unsigned long target_mask;
497
498 if (global_options_set.x_m68k_arch_option)
499 m68k_arch_entry = &all_isas[m68k_arch_option];
500
501 if (global_options_set.x_m68k_cpu_option)
502 m68k_cpu_entry = &all_devices[(int) m68k_cpu_option];
503
504 if (global_options_set.x_m68k_tune_option)
505 m68k_tune_entry = &all_microarchs[(int) m68k_tune_option];
506
507 /* User can choose:
508
509 -mcpu=
510 -march=
511 -mtune=
512
513 -march=ARCH should generate code that runs any processor
514 implementing architecture ARCH. -mcpu=CPU should override -march
515 and should generate code that runs on processor CPU, making free
516 use of any instructions that CPU understands. -mtune=UARCH applies
517 on top of -mcpu or -march and optimizes the code for UARCH. It does
518 not change the target architecture. */
519 if (m68k_cpu_entry)
520 {
521 /* Complain if the -march setting is for a different microarchitecture,
522 or includes flags that the -mcpu setting doesn't. */
523 if (m68k_arch_entry
524 && (m68k_arch_entry->microarch != m68k_cpu_entry->microarch
525 || (m68k_arch_entry->flags & ~m68k_cpu_entry->flags) != 0))
526 warning (0, "%<-mcpu=%s%> conflicts with %<-march=%s%>",
527 m68k_cpu_entry->name, m68k_arch_entry->name);
528
529 entry = m68k_cpu_entry;
530 }
531 else
532 entry = m68k_arch_entry;
533
534 if (!entry)
535 entry = all_devices + TARGET_CPU_DEFAULT;
536
537 m68k_cpu_flags = entry->flags;
538
539 /* Use the architecture setting to derive default values for
540 certain flags. */
541 target_mask = 0;
542
543 /* ColdFire is lenient about alignment. */
544 if (!TARGET_COLDFIRE)
545 target_mask |= MASK_STRICT_ALIGNMENT;
546
547 if ((m68k_cpu_flags & FL_BITFIELD) != 0)
548 target_mask |= MASK_BITFIELD;
549 if ((m68k_cpu_flags & FL_CF_HWDIV) != 0)
550 target_mask |= MASK_CF_HWDIV;
551 if ((m68k_cpu_flags & (FL_68881 | FL_CF_FPU)) != 0)
552 target_mask |= MASK_HARD_FLOAT;
553 target_flags |= target_mask & ~target_flags_explicit;
554
555 /* Set the directly-usable versions of the -mcpu and -mtune settings. */
556 m68k_cpu = entry->device;
557 if (m68k_tune_entry)
558 {
559 m68k_tune = m68k_tune_entry->microarch;
560 m68k_tune_flags = m68k_tune_entry->flags;
561 }
562 #ifdef M68K_DEFAULT_TUNE
563 else if (!m68k_cpu_entry && !m68k_arch_entry)
564 {
565 enum target_device dev;
566 dev = all_microarchs[M68K_DEFAULT_TUNE].device;
567 m68k_tune_flags = all_devices[dev].flags;
568 }
569 #endif
570 else
571 {
572 m68k_tune = entry->microarch;
573 m68k_tune_flags = entry->flags;
574 }
575
576 /* Set the type of FPU. */
577 m68k_fpu = (!TARGET_HARD_FLOAT ? FPUTYPE_NONE
578 : (m68k_cpu_flags & FL_COLDFIRE) != 0 ? FPUTYPE_COLDFIRE
579 : FPUTYPE_68881);
580
581 /* Sanity check to ensure that msep-data and mid-sahred-library are not
582 * both specified together. Doing so simply doesn't make sense.
583 */
584 if (TARGET_SEP_DATA && TARGET_ID_SHARED_LIBRARY)
585 error ("cannot specify both %<-msep-data%> and %<-mid-shared-library%>");
586
587 /* If we're generating code for a separate A5 relative data segment,
588 * we've got to enable -fPIC as well. This might be relaxable to
589 * -fpic but it hasn't been tested properly.
590 */
591 if (TARGET_SEP_DATA || TARGET_ID_SHARED_LIBRARY)
592 flag_pic = 2;
593
594 /* -mpcrel -fPIC uses 32-bit pc-relative displacements. Raise an
595 error if the target does not support them. */
596 if (TARGET_PCREL && !TARGET_68020 && flag_pic == 2)
597 error ("%<-mpcrel%> %<-fPIC%> is not currently supported on selected cpu");
598
599 /* ??? A historic way of turning on pic, or is this intended to
600 be an embedded thing that doesn't have the same name binding
601 significance that it does on hosted ELF systems? */
602 if (TARGET_PCREL && flag_pic == 0)
603 flag_pic = 1;
604
605 if (!flag_pic)
606 {
607 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_JSR;
608
609 m68k_symbolic_jump = "jra %a0";
610 }
611 else if (TARGET_ID_SHARED_LIBRARY)
612 /* All addresses must be loaded from the GOT. */
613 ;
614 else if (TARGET_68020 || TARGET_ISAB || TARGET_ISAC)
615 {
616 if (TARGET_PCREL)
617 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_C;
618 else
619 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_P;
620
621 if (TARGET_ISAC)
622 /* No unconditional long branch */;
623 else if (TARGET_PCREL)
624 m68k_symbolic_jump = "bra%.l %c0";
625 else
626 m68k_symbolic_jump = "bra%.l %p0";
627 /* Turn off function cse if we are doing PIC. We always want
628 function call to be done as `bsr foo@PLTPC'. */
629 /* ??? It's traditional to do this for -mpcrel too, but it isn't
630 clear how intentional that is. */
631 flag_no_function_cse = 1;
632 }
633
634 switch (m68k_symbolic_call_var)
635 {
636 case M68K_SYMBOLIC_CALL_JSR:
637 m68k_symbolic_call = "jsr %a0";
638 break;
639
640 case M68K_SYMBOLIC_CALL_BSR_C:
641 m68k_symbolic_call = "bsr%.l %c0";
642 break;
643
644 case M68K_SYMBOLIC_CALL_BSR_P:
645 m68k_symbolic_call = "bsr%.l %p0";
646 break;
647
648 case M68K_SYMBOLIC_CALL_NONE:
649 gcc_assert (m68k_symbolic_call == NULL);
650 break;
651
652 default:
653 gcc_unreachable ();
654 }
655
656 #ifndef ASM_OUTPUT_ALIGN_WITH_NOP
657 parse_alignment_opts ();
658 int label_alignment = align_labels.levels[0].get_value ();
659 if (label_alignment > 2)
660 {
661 warning (0, "%<-falign-labels=%d%> is not supported", label_alignment);
662 str_align_labels = "1";
663 }
664
665 int loop_alignment = align_loops.levels[0].get_value ();
666 if (loop_alignment > 2)
667 {
668 warning (0, "%<-falign-loops=%d%> is not supported", loop_alignment);
669 str_align_loops = "1";
670 }
671 #endif
672
673 if ((opt_fstack_limit_symbol_arg != NULL || opt_fstack_limit_register_no >= 0)
674 && !TARGET_68020)
675 {
676 warning (0, "%<-fstack-limit-%> options are not supported on this cpu");
677 opt_fstack_limit_symbol_arg = NULL;
678 opt_fstack_limit_register_no = -1;
679 }
680
681 SUBTARGET_OVERRIDE_OPTIONS;
682
683 /* Setup scheduling options. */
684 if (TUNE_CFV1)
685 m68k_sched_cpu = CPU_CFV1;
686 else if (TUNE_CFV2)
687 m68k_sched_cpu = CPU_CFV2;
688 else if (TUNE_CFV3)
689 m68k_sched_cpu = CPU_CFV3;
690 else if (TUNE_CFV4)
691 m68k_sched_cpu = CPU_CFV4;
692 else
693 {
694 m68k_sched_cpu = CPU_UNKNOWN;
695 flag_schedule_insns = 0;
696 flag_schedule_insns_after_reload = 0;
697 flag_modulo_sched = 0;
698 flag_live_range_shrinkage = 0;
699 }
700
701 if (m68k_sched_cpu != CPU_UNKNOWN)
702 {
703 if ((m68k_cpu_flags & (FL_CF_EMAC | FL_CF_EMAC_B)) != 0)
704 m68k_sched_mac = MAC_CF_EMAC;
705 else if ((m68k_cpu_flags & FL_CF_MAC) != 0)
706 m68k_sched_mac = MAC_CF_MAC;
707 else
708 m68k_sched_mac = MAC_NO;
709 }
710 }
711
712 /* Implement TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE. */
713
714 static void
715 m68k_override_options_after_change (void)
716 {
717 if (m68k_sched_cpu == CPU_UNKNOWN)
718 {
719 flag_schedule_insns = 0;
720 flag_schedule_insns_after_reload = 0;
721 flag_modulo_sched = 0;
722 flag_live_range_shrinkage = 0;
723 }
724 }
725
726 /* Generate a macro of the form __mPREFIX_cpu_NAME, where PREFIX is the
727 given argument and NAME is the argument passed to -mcpu. Return NULL
728 if -mcpu was not passed. */
729
730 const char *
731 m68k_cpp_cpu_ident (const char *prefix)
732 {
733 if (!m68k_cpu_entry)
734 return NULL;
735 return concat ("__m", prefix, "_cpu_", m68k_cpu_entry->name, NULL);
736 }
737
738 /* Generate a macro of the form __mPREFIX_family_NAME, where PREFIX is the
739 given argument and NAME is the name of the representative device for
740 the -mcpu argument's family. Return NULL if -mcpu was not passed. */
741
742 const char *
743 m68k_cpp_cpu_family (const char *prefix)
744 {
745 if (!m68k_cpu_entry)
746 return NULL;
747 return concat ("__m", prefix, "_family_", m68k_cpu_entry->family, NULL);
748 }
749 \f
750 /* Return m68k_fk_interrupt_handler if FUNC has an "interrupt" or
751 "interrupt_handler" attribute and interrupt_thread if FUNC has an
752 "interrupt_thread" attribute. Otherwise, return
753 m68k_fk_normal_function. */
754
755 enum m68k_function_kind
756 m68k_get_function_kind (tree func)
757 {
758 tree a;
759
760 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
761
762 a = lookup_attribute ("interrupt", DECL_ATTRIBUTES (func));
763 if (a != NULL_TREE)
764 return m68k_fk_interrupt_handler;
765
766 a = lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (func));
767 if (a != NULL_TREE)
768 return m68k_fk_interrupt_handler;
769
770 a = lookup_attribute ("interrupt_thread", DECL_ATTRIBUTES (func));
771 if (a != NULL_TREE)
772 return m68k_fk_interrupt_thread;
773
774 return m68k_fk_normal_function;
775 }
776
777 /* Handle an attribute requiring a FUNCTION_DECL; arguments as in
778 struct attribute_spec.handler. */
779 static tree
780 m68k_handle_fndecl_attribute (tree *node, tree name,
781 tree args ATTRIBUTE_UNUSED,
782 int flags ATTRIBUTE_UNUSED,
783 bool *no_add_attrs)
784 {
785 if (TREE_CODE (*node) != FUNCTION_DECL)
786 {
787 warning (OPT_Wattributes, "%qE attribute only applies to functions",
788 name);
789 *no_add_attrs = true;
790 }
791
792 if (m68k_get_function_kind (*node) != m68k_fk_normal_function)
793 {
794 error ("multiple interrupt attributes not allowed");
795 *no_add_attrs = true;
796 }
797
798 if (!TARGET_FIDOA
799 && !strcmp (IDENTIFIER_POINTER (name), "interrupt_thread"))
800 {
801 error ("interrupt_thread is available only on fido");
802 *no_add_attrs = true;
803 }
804
805 return NULL_TREE;
806 }
807
808 static void
809 m68k_compute_frame_layout (void)
810 {
811 int regno, saved;
812 unsigned int mask;
813 enum m68k_function_kind func_kind =
814 m68k_get_function_kind (current_function_decl);
815 bool interrupt_handler = func_kind == m68k_fk_interrupt_handler;
816 bool interrupt_thread = func_kind == m68k_fk_interrupt_thread;
817
818 /* Only compute the frame once per function.
819 Don't cache information until reload has been completed. */
820 if (current_frame.funcdef_no == current_function_funcdef_no
821 && reload_completed)
822 return;
823
824 current_frame.size = (get_frame_size () + 3) & -4;
825
826 mask = saved = 0;
827
828 /* Interrupt thread does not need to save any register. */
829 if (!interrupt_thread)
830 for (regno = 0; regno < 16; regno++)
831 if (m68k_save_reg (regno, interrupt_handler))
832 {
833 mask |= 1 << (regno - D0_REG);
834 saved++;
835 }
836 current_frame.offset = saved * 4;
837 current_frame.reg_no = saved;
838 current_frame.reg_mask = mask;
839
840 current_frame.foffset = 0;
841 mask = saved = 0;
842 if (TARGET_HARD_FLOAT)
843 {
844 /* Interrupt thread does not need to save any register. */
845 if (!interrupt_thread)
846 for (regno = 16; regno < 24; regno++)
847 if (m68k_save_reg (regno, interrupt_handler))
848 {
849 mask |= 1 << (regno - FP0_REG);
850 saved++;
851 }
852 current_frame.foffset = saved * TARGET_FP_REG_SIZE;
853 current_frame.offset += current_frame.foffset;
854 }
855 current_frame.fpu_no = saved;
856 current_frame.fpu_mask = mask;
857
858 /* Remember what function this frame refers to. */
859 current_frame.funcdef_no = current_function_funcdef_no;
860 }
861
862 /* Worker function for TARGET_CAN_ELIMINATE. */
863
864 bool
865 m68k_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
866 {
867 return (to == STACK_POINTER_REGNUM ? ! frame_pointer_needed : true);
868 }
869
870 HOST_WIDE_INT
871 m68k_initial_elimination_offset (int from, int to)
872 {
873 int argptr_offset;
874 /* The arg pointer points 8 bytes before the start of the arguments,
875 as defined by FIRST_PARM_OFFSET. This makes it coincident with the
876 frame pointer in most frames. */
877 argptr_offset = frame_pointer_needed ? 0 : UNITS_PER_WORD;
878 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
879 return argptr_offset;
880
881 m68k_compute_frame_layout ();
882
883 gcc_assert (to == STACK_POINTER_REGNUM);
884 switch (from)
885 {
886 case ARG_POINTER_REGNUM:
887 return current_frame.offset + current_frame.size - argptr_offset;
888 case FRAME_POINTER_REGNUM:
889 return current_frame.offset + current_frame.size;
890 default:
891 gcc_unreachable ();
892 }
893 }
894
895 /* Refer to the array `regs_ever_live' to determine which registers
896 to save; `regs_ever_live[I]' is nonzero if register number I
897 is ever used in the function. This function is responsible for
898 knowing which registers should not be saved even if used.
899 Return true if we need to save REGNO. */
900
901 static bool
902 m68k_save_reg (unsigned int regno, bool interrupt_handler)
903 {
904 if (flag_pic && regno == PIC_REG)
905 {
906 if (crtl->saves_all_registers)
907 return true;
908 if (crtl->uses_pic_offset_table)
909 return true;
910 /* Reload may introduce constant pool references into a function
911 that thitherto didn't need a PIC register. Note that the test
912 above will not catch that case because we will only set
913 crtl->uses_pic_offset_table when emitting
914 the address reloads. */
915 if (crtl->uses_const_pool)
916 return true;
917 }
918
919 if (crtl->calls_eh_return)
920 {
921 unsigned int i;
922 for (i = 0; ; i++)
923 {
924 unsigned int test = EH_RETURN_DATA_REGNO (i);
925 if (test == INVALID_REGNUM)
926 break;
927 if (test == regno)
928 return true;
929 }
930 }
931
932 /* Fixed regs we never touch. */
933 if (fixed_regs[regno])
934 return false;
935
936 /* The frame pointer (if it is such) is handled specially. */
937 if (regno == FRAME_POINTER_REGNUM && frame_pointer_needed)
938 return false;
939
940 /* Interrupt handlers must also save call_used_regs
941 if they are live or when calling nested functions. */
942 if (interrupt_handler)
943 {
944 if (df_regs_ever_live_p (regno))
945 return true;
946
947 if (!crtl->is_leaf && call_used_regs[regno])
948 return true;
949 }
950
951 /* Never need to save registers that aren't touched. */
952 if (!df_regs_ever_live_p (regno))
953 return false;
954
955 /* Otherwise save everything that isn't call-clobbered. */
956 return !call_used_regs[regno];
957 }
958
959 /* Emit RTL for a MOVEM or FMOVEM instruction. BASE + OFFSET represents
960 the lowest memory address. COUNT is the number of registers to be
961 moved, with register REGNO + I being moved if bit I of MASK is set.
962 STORE_P specifies the direction of the move and ADJUST_STACK_P says
963 whether or not this is pre-decrement (if STORE_P) or post-increment
964 (if !STORE_P) operation. */
965
966 static rtx_insn *
967 m68k_emit_movem (rtx base, HOST_WIDE_INT offset,
968 unsigned int count, unsigned int regno,
969 unsigned int mask, bool store_p, bool adjust_stack_p)
970 {
971 int i;
972 rtx body, addr, src, operands[2];
973 machine_mode mode;
974
975 body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (adjust_stack_p + count));
976 mode = reg_raw_mode[regno];
977 i = 0;
978
979 if (adjust_stack_p)
980 {
981 src = plus_constant (Pmode, base,
982 (count
983 * GET_MODE_SIZE (mode)
984 * (HOST_WIDE_INT) (store_p ? -1 : 1)));
985 XVECEXP (body, 0, i++) = gen_rtx_SET (base, src);
986 }
987
988 for (; mask != 0; mask >>= 1, regno++)
989 if (mask & 1)
990 {
991 addr = plus_constant (Pmode, base, offset);
992 operands[!store_p] = gen_frame_mem (mode, addr);
993 operands[store_p] = gen_rtx_REG (mode, regno);
994 XVECEXP (body, 0, i++)
995 = gen_rtx_SET (operands[0], operands[1]);
996 offset += GET_MODE_SIZE (mode);
997 }
998 gcc_assert (i == XVECLEN (body, 0));
999
1000 return emit_insn (body);
1001 }
1002
1003 /* Make INSN a frame-related instruction. */
1004
1005 static void
1006 m68k_set_frame_related (rtx_insn *insn)
1007 {
1008 rtx body;
1009 int i;
1010
1011 RTX_FRAME_RELATED_P (insn) = 1;
1012 body = PATTERN (insn);
1013 if (GET_CODE (body) == PARALLEL)
1014 for (i = 0; i < XVECLEN (body, 0); i++)
1015 RTX_FRAME_RELATED_P (XVECEXP (body, 0, i)) = 1;
1016 }
1017
1018 /* Emit RTL for the "prologue" define_expand. */
1019
1020 void
1021 m68k_expand_prologue (void)
1022 {
1023 HOST_WIDE_INT fsize_with_regs;
1024 rtx limit, src, dest;
1025
1026 m68k_compute_frame_layout ();
1027
1028 if (flag_stack_usage_info)
1029 current_function_static_stack_size
1030 = current_frame.size + current_frame.offset;
1031
1032 /* If the stack limit is a symbol, we can check it here,
1033 before actually allocating the space. */
1034 if (crtl->limit_stack
1035 && GET_CODE (stack_limit_rtx) == SYMBOL_REF)
1036 {
1037 limit = plus_constant (Pmode, stack_limit_rtx, current_frame.size + 4);
1038 if (!m68k_legitimate_constant_p (Pmode, limit))
1039 {
1040 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), limit);
1041 limit = gen_rtx_REG (Pmode, D0_REG);
1042 }
1043 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode,
1044 stack_pointer_rtx, limit),
1045 stack_pointer_rtx, limit,
1046 const1_rtx));
1047 }
1048
1049 fsize_with_regs = current_frame.size;
1050 if (TARGET_COLDFIRE)
1051 {
1052 /* ColdFire's move multiple instructions do not allow pre-decrement
1053 addressing. Add the size of movem saves to the initial stack
1054 allocation instead. */
1055 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1056 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1057 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1058 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
1059 }
1060
1061 if (frame_pointer_needed)
1062 {
1063 if (fsize_with_regs == 0 && TUNE_68040)
1064 {
1065 /* On the 68040, two separate moves are faster than link.w 0. */
1066 dest = gen_frame_mem (Pmode,
1067 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1068 m68k_set_frame_related (emit_move_insn (dest, frame_pointer_rtx));
1069 m68k_set_frame_related (emit_move_insn (frame_pointer_rtx,
1070 stack_pointer_rtx));
1071 }
1072 else if (fsize_with_regs < 0x8000 || TARGET_68020)
1073 m68k_set_frame_related
1074 (emit_insn (gen_link (frame_pointer_rtx,
1075 GEN_INT (-4 - fsize_with_regs))));
1076 else
1077 {
1078 m68k_set_frame_related
1079 (emit_insn (gen_link (frame_pointer_rtx, GEN_INT (-4))));
1080 m68k_set_frame_related
1081 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1082 stack_pointer_rtx,
1083 GEN_INT (-fsize_with_regs))));
1084 }
1085
1086 /* If the frame pointer is needed, emit a special barrier that
1087 will prevent the scheduler from moving stores to the frame
1088 before the stack adjustment. */
1089 emit_insn (gen_stack_tie (stack_pointer_rtx, frame_pointer_rtx));
1090 }
1091 else if (fsize_with_regs != 0)
1092 m68k_set_frame_related
1093 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1094 stack_pointer_rtx,
1095 GEN_INT (-fsize_with_regs))));
1096
1097 if (current_frame.fpu_mask)
1098 {
1099 gcc_assert (current_frame.fpu_no >= MIN_FMOVEM_REGS);
1100 if (TARGET_68881)
1101 m68k_set_frame_related
1102 (m68k_emit_movem (stack_pointer_rtx,
1103 current_frame.fpu_no * -GET_MODE_SIZE (XFmode),
1104 current_frame.fpu_no, FP0_REG,
1105 current_frame.fpu_mask, true, true));
1106 else
1107 {
1108 int offset;
1109
1110 /* If we're using moveml to save the integer registers,
1111 the stack pointer will point to the bottom of the moveml
1112 save area. Find the stack offset of the first FP register. */
1113 if (current_frame.reg_no < MIN_MOVEM_REGS)
1114 offset = 0;
1115 else
1116 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1117 m68k_set_frame_related
1118 (m68k_emit_movem (stack_pointer_rtx, offset,
1119 current_frame.fpu_no, FP0_REG,
1120 current_frame.fpu_mask, true, false));
1121 }
1122 }
1123
1124 /* If the stack limit is not a symbol, check it here.
1125 This has the disadvantage that it may be too late... */
1126 if (crtl->limit_stack)
1127 {
1128 if (REG_P (stack_limit_rtx))
1129 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode, stack_pointer_rtx,
1130 stack_limit_rtx),
1131 stack_pointer_rtx, stack_limit_rtx,
1132 const1_rtx));
1133
1134 else if (GET_CODE (stack_limit_rtx) != SYMBOL_REF)
1135 warning (0, "stack limit expression is not supported");
1136 }
1137
1138 if (current_frame.reg_no < MIN_MOVEM_REGS)
1139 {
1140 /* Store each register separately in the same order moveml does. */
1141 int i;
1142
1143 for (i = 16; i-- > 0; )
1144 if (current_frame.reg_mask & (1 << i))
1145 {
1146 src = gen_rtx_REG (SImode, D0_REG + i);
1147 dest = gen_frame_mem (SImode,
1148 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1149 m68k_set_frame_related (emit_insn (gen_movsi (dest, src)));
1150 }
1151 }
1152 else
1153 {
1154 if (TARGET_COLDFIRE)
1155 /* The required register save space has already been allocated.
1156 The first register should be stored at (%sp). */
1157 m68k_set_frame_related
1158 (m68k_emit_movem (stack_pointer_rtx, 0,
1159 current_frame.reg_no, D0_REG,
1160 current_frame.reg_mask, true, false));
1161 else
1162 m68k_set_frame_related
1163 (m68k_emit_movem (stack_pointer_rtx,
1164 current_frame.reg_no * -GET_MODE_SIZE (SImode),
1165 current_frame.reg_no, D0_REG,
1166 current_frame.reg_mask, true, true));
1167 }
1168
1169 if (!TARGET_SEP_DATA
1170 && crtl->uses_pic_offset_table)
1171 emit_insn (gen_load_got (pic_offset_table_rtx));
1172 }
1173 \f
1174 /* Return true if a simple (return) instruction is sufficient for this
1175 instruction (i.e. if no epilogue is needed). */
1176
1177 bool
1178 m68k_use_return_insn (void)
1179 {
1180 if (!reload_completed || frame_pointer_needed || get_frame_size () != 0)
1181 return false;
1182
1183 m68k_compute_frame_layout ();
1184 return current_frame.offset == 0;
1185 }
1186
1187 /* Emit RTL for the "epilogue" or "sibcall_epilogue" define_expand;
1188 SIBCALL_P says which.
1189
1190 The function epilogue should not depend on the current stack pointer!
1191 It should use the frame pointer only, if there is a frame pointer.
1192 This is mandatory because of alloca; we also take advantage of it to
1193 omit stack adjustments before returning. */
1194
1195 void
1196 m68k_expand_epilogue (bool sibcall_p)
1197 {
1198 HOST_WIDE_INT fsize, fsize_with_regs;
1199 bool big, restore_from_sp;
1200
1201 m68k_compute_frame_layout ();
1202
1203 fsize = current_frame.size;
1204 big = false;
1205 restore_from_sp = false;
1206
1207 /* FIXME : crtl->is_leaf below is too strong.
1208 What we really need to know there is if there could be pending
1209 stack adjustment needed at that point. */
1210 restore_from_sp = (!frame_pointer_needed
1211 || (!cfun->calls_alloca && crtl->is_leaf));
1212
1213 /* fsize_with_regs is the size we need to adjust the sp when
1214 popping the frame. */
1215 fsize_with_regs = fsize;
1216 if (TARGET_COLDFIRE && restore_from_sp)
1217 {
1218 /* ColdFire's move multiple instructions do not allow post-increment
1219 addressing. Add the size of movem loads to the final deallocation
1220 instead. */
1221 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1222 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1223 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1224 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
1225 }
1226
1227 if (current_frame.offset + fsize >= 0x8000
1228 && !restore_from_sp
1229 && (current_frame.reg_mask || current_frame.fpu_mask))
1230 {
1231 if (TARGET_COLDFIRE
1232 && (current_frame.reg_no >= MIN_MOVEM_REGS
1233 || current_frame.fpu_no >= MIN_FMOVEM_REGS))
1234 {
1235 /* ColdFire's move multiple instructions do not support the
1236 (d8,Ax,Xi) addressing mode, so we're as well using a normal
1237 stack-based restore. */
1238 emit_move_insn (gen_rtx_REG (Pmode, A1_REG),
1239 GEN_INT (-(current_frame.offset + fsize)));
1240 emit_insn (gen_blockage ());
1241 emit_insn (gen_addsi3 (stack_pointer_rtx,
1242 gen_rtx_REG (Pmode, A1_REG),
1243 frame_pointer_rtx));
1244 restore_from_sp = true;
1245 }
1246 else
1247 {
1248 emit_move_insn (gen_rtx_REG (Pmode, A1_REG), GEN_INT (-fsize));
1249 fsize = 0;
1250 big = true;
1251 }
1252 }
1253
1254 if (current_frame.reg_no < MIN_MOVEM_REGS)
1255 {
1256 /* Restore each register separately in the same order moveml does. */
1257 int i;
1258 HOST_WIDE_INT offset;
1259
1260 offset = current_frame.offset + fsize;
1261 for (i = 0; i < 16; i++)
1262 if (current_frame.reg_mask & (1 << i))
1263 {
1264 rtx addr;
1265
1266 if (big)
1267 {
1268 /* Generate the address -OFFSET(%fp,%a1.l). */
1269 addr = gen_rtx_REG (Pmode, A1_REG);
1270 addr = gen_rtx_PLUS (Pmode, addr, frame_pointer_rtx);
1271 addr = plus_constant (Pmode, addr, -offset);
1272 }
1273 else if (restore_from_sp)
1274 addr = gen_rtx_POST_INC (Pmode, stack_pointer_rtx);
1275 else
1276 addr = plus_constant (Pmode, frame_pointer_rtx, -offset);
1277 emit_move_insn (gen_rtx_REG (SImode, D0_REG + i),
1278 gen_frame_mem (SImode, addr));
1279 offset -= GET_MODE_SIZE (SImode);
1280 }
1281 }
1282 else if (current_frame.reg_mask)
1283 {
1284 if (big)
1285 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1286 gen_rtx_REG (Pmode, A1_REG),
1287 frame_pointer_rtx),
1288 -(current_frame.offset + fsize),
1289 current_frame.reg_no, D0_REG,
1290 current_frame.reg_mask, false, false);
1291 else if (restore_from_sp)
1292 m68k_emit_movem (stack_pointer_rtx, 0,
1293 current_frame.reg_no, D0_REG,
1294 current_frame.reg_mask, false,
1295 !TARGET_COLDFIRE);
1296 else
1297 m68k_emit_movem (frame_pointer_rtx,
1298 -(current_frame.offset + fsize),
1299 current_frame.reg_no, D0_REG,
1300 current_frame.reg_mask, false, false);
1301 }
1302
1303 if (current_frame.fpu_no > 0)
1304 {
1305 if (big)
1306 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1307 gen_rtx_REG (Pmode, A1_REG),
1308 frame_pointer_rtx),
1309 -(current_frame.foffset + fsize),
1310 current_frame.fpu_no, FP0_REG,
1311 current_frame.fpu_mask, false, false);
1312 else if (restore_from_sp)
1313 {
1314 if (TARGET_COLDFIRE)
1315 {
1316 int offset;
1317
1318 /* If we used moveml to restore the integer registers, the
1319 stack pointer will still point to the bottom of the moveml
1320 save area. Find the stack offset of the first FP
1321 register. */
1322 if (current_frame.reg_no < MIN_MOVEM_REGS)
1323 offset = 0;
1324 else
1325 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1326 m68k_emit_movem (stack_pointer_rtx, offset,
1327 current_frame.fpu_no, FP0_REG,
1328 current_frame.fpu_mask, false, false);
1329 }
1330 else
1331 m68k_emit_movem (stack_pointer_rtx, 0,
1332 current_frame.fpu_no, FP0_REG,
1333 current_frame.fpu_mask, false, true);
1334 }
1335 else
1336 m68k_emit_movem (frame_pointer_rtx,
1337 -(current_frame.foffset + fsize),
1338 current_frame.fpu_no, FP0_REG,
1339 current_frame.fpu_mask, false, false);
1340 }
1341
1342 emit_insn (gen_blockage ());
1343 if (frame_pointer_needed)
1344 emit_insn (gen_unlink (frame_pointer_rtx));
1345 else if (fsize_with_regs)
1346 emit_insn (gen_addsi3 (stack_pointer_rtx,
1347 stack_pointer_rtx,
1348 GEN_INT (fsize_with_regs)));
1349
1350 if (crtl->calls_eh_return)
1351 emit_insn (gen_addsi3 (stack_pointer_rtx,
1352 stack_pointer_rtx,
1353 EH_RETURN_STACKADJ_RTX));
1354
1355 if (!sibcall_p)
1356 emit_jump_insn (ret_rtx);
1357 }
1358 \f
1359 /* Return true if X is a valid comparison operator for the dbcc
1360 instruction.
1361
1362 Note it rejects floating point comparison operators.
1363 (In the future we could use Fdbcc).
1364
1365 It also rejects some comparisons when CC_NO_OVERFLOW is set. */
1366
1367 int
1368 valid_dbcc_comparison_p_2 (rtx x, machine_mode mode ATTRIBUTE_UNUSED)
1369 {
1370 switch (GET_CODE (x))
1371 {
1372 case EQ: case NE: case GTU: case LTU:
1373 case GEU: case LEU:
1374 return 1;
1375
1376 /* Reject some when CC_NO_OVERFLOW is set. This may be over
1377 conservative */
1378 case GT: case LT: case GE: case LE:
1379 return ! (cc_prev_status.flags & CC_NO_OVERFLOW);
1380 default:
1381 return 0;
1382 }
1383 }
1384
1385 /* Return nonzero if flags are currently in the 68881 flag register. */
1386 int
1387 flags_in_68881 (void)
1388 {
1389 /* We could add support for these in the future */
1390 return cc_status.flags & CC_IN_68881;
1391 }
1392
1393 /* Return true if PARALLEL contains register REGNO. */
1394 static bool
1395 m68k_reg_present_p (const_rtx parallel, unsigned int regno)
1396 {
1397 int i;
1398
1399 if (REG_P (parallel) && REGNO (parallel) == regno)
1400 return true;
1401
1402 if (GET_CODE (parallel) != PARALLEL)
1403 return false;
1404
1405 for (i = 0; i < XVECLEN (parallel, 0); ++i)
1406 {
1407 const_rtx x;
1408
1409 x = XEXP (XVECEXP (parallel, 0, i), 0);
1410 if (REG_P (x) && REGNO (x) == regno)
1411 return true;
1412 }
1413
1414 return false;
1415 }
1416
1417 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL_P. */
1418
1419 static bool
1420 m68k_ok_for_sibcall_p (tree decl, tree exp)
1421 {
1422 enum m68k_function_kind kind;
1423
1424 /* We cannot use sibcalls for nested functions because we use the
1425 static chain register for indirect calls. */
1426 if (CALL_EXPR_STATIC_CHAIN (exp))
1427 return false;
1428
1429 if (!VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
1430 {
1431 /* Check that the return value locations are the same. For
1432 example that we aren't returning a value from the sibling in
1433 a D0 register but then need to transfer it to a A0 register. */
1434 rtx cfun_value;
1435 rtx call_value;
1436
1437 cfun_value = FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (cfun->decl)),
1438 cfun->decl);
1439 call_value = FUNCTION_VALUE (TREE_TYPE (exp), decl);
1440
1441 /* Check that the values are equal or that the result the callee
1442 function returns is superset of what the current function returns. */
1443 if (!(rtx_equal_p (cfun_value, call_value)
1444 || (REG_P (cfun_value)
1445 && m68k_reg_present_p (call_value, REGNO (cfun_value)))))
1446 return false;
1447 }
1448
1449 kind = m68k_get_function_kind (current_function_decl);
1450 if (kind == m68k_fk_normal_function)
1451 /* We can always sibcall from a normal function, because it's
1452 undefined if it is calling an interrupt function. */
1453 return true;
1454
1455 /* Otherwise we can only sibcall if the function kind is known to be
1456 the same. */
1457 if (decl && m68k_get_function_kind (decl) == kind)
1458 return true;
1459
1460 return false;
1461 }
1462
1463 /* On the m68k all args are always pushed. */
1464
1465 static rtx
1466 m68k_function_arg (cumulative_args_t, const function_arg_info &)
1467 {
1468 return NULL_RTX;
1469 }
1470
1471 static void
1472 m68k_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
1473 const_tree type, bool named ATTRIBUTE_UNUSED)
1474 {
1475 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1476
1477 *cum += (mode != BLKmode
1478 ? (GET_MODE_SIZE (mode) + 3) & ~3
1479 : (int_size_in_bytes (type) + 3) & ~3);
1480 }
1481
1482 /* Convert X to a legitimate function call memory reference and return the
1483 result. */
1484
1485 rtx
1486 m68k_legitimize_call_address (rtx x)
1487 {
1488 gcc_assert (MEM_P (x));
1489 if (call_operand (XEXP (x, 0), VOIDmode))
1490 return x;
1491 return replace_equiv_address (x, force_reg (Pmode, XEXP (x, 0)));
1492 }
1493
1494 /* Likewise for sibling calls. */
1495
1496 rtx
1497 m68k_legitimize_sibcall_address (rtx x)
1498 {
1499 gcc_assert (MEM_P (x));
1500 if (sibcall_operand (XEXP (x, 0), VOIDmode))
1501 return x;
1502
1503 emit_move_insn (gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM), XEXP (x, 0));
1504 return replace_equiv_address (x, gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM));
1505 }
1506
1507 /* Convert X to a legitimate address and return it if successful. Otherwise
1508 return X.
1509
1510 For the 68000, we handle X+REG by loading X into a register R and
1511 using R+REG. R will go in an address reg and indexing will be used.
1512 However, if REG is a broken-out memory address or multiplication,
1513 nothing needs to be done because REG can certainly go in an address reg. */
1514
1515 static rtx
1516 m68k_legitimize_address (rtx x, rtx oldx, machine_mode mode)
1517 {
1518 if (m68k_tls_symbol_p (x))
1519 return m68k_legitimize_tls_address (x);
1520
1521 if (GET_CODE (x) == PLUS)
1522 {
1523 int ch = (x) != (oldx);
1524 int copied = 0;
1525
1526 #define COPY_ONCE(Y) if (!copied) { Y = copy_rtx (Y); copied = ch = 1; }
1527
1528 if (GET_CODE (XEXP (x, 0)) == MULT)
1529 {
1530 COPY_ONCE (x);
1531 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
1532 }
1533 if (GET_CODE (XEXP (x, 1)) == MULT)
1534 {
1535 COPY_ONCE (x);
1536 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
1537 }
1538 if (ch)
1539 {
1540 if (GET_CODE (XEXP (x, 1)) == REG
1541 && GET_CODE (XEXP (x, 0)) == REG)
1542 {
1543 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
1544 {
1545 COPY_ONCE (x);
1546 x = force_operand (x, 0);
1547 }
1548 return x;
1549 }
1550 if (memory_address_p (mode, x))
1551 return x;
1552 }
1553 if (GET_CODE (XEXP (x, 0)) == REG
1554 || (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
1555 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1556 && GET_MODE (XEXP (XEXP (x, 0), 0)) == HImode))
1557 {
1558 rtx temp = gen_reg_rtx (Pmode);
1559 rtx val = force_operand (XEXP (x, 1), 0);
1560 emit_move_insn (temp, val);
1561 COPY_ONCE (x);
1562 XEXP (x, 1) = temp;
1563 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1564 && GET_CODE (XEXP (x, 0)) == REG)
1565 x = force_operand (x, 0);
1566 }
1567 else if (GET_CODE (XEXP (x, 1)) == REG
1568 || (GET_CODE (XEXP (x, 1)) == SIGN_EXTEND
1569 && GET_CODE (XEXP (XEXP (x, 1), 0)) == REG
1570 && GET_MODE (XEXP (XEXP (x, 1), 0)) == HImode))
1571 {
1572 rtx temp = gen_reg_rtx (Pmode);
1573 rtx val = force_operand (XEXP (x, 0), 0);
1574 emit_move_insn (temp, val);
1575 COPY_ONCE (x);
1576 XEXP (x, 0) = temp;
1577 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1578 && GET_CODE (XEXP (x, 1)) == REG)
1579 x = force_operand (x, 0);
1580 }
1581 }
1582
1583 return x;
1584 }
1585
1586
1587 /* Output a dbCC; jCC sequence. Note we do not handle the
1588 floating point version of this sequence (Fdbcc). We also
1589 do not handle alternative conditions when CC_NO_OVERFLOW is
1590 set. It is assumed that valid_dbcc_comparison_p and flags_in_68881 will
1591 kick those out before we get here. */
1592
1593 void
1594 output_dbcc_and_branch (rtx *operands)
1595 {
1596 switch (GET_CODE (operands[3]))
1597 {
1598 case EQ:
1599 output_asm_insn ("dbeq %0,%l1\n\tjeq %l2", operands);
1600 break;
1601
1602 case NE:
1603 output_asm_insn ("dbne %0,%l1\n\tjne %l2", operands);
1604 break;
1605
1606 case GT:
1607 output_asm_insn ("dbgt %0,%l1\n\tjgt %l2", operands);
1608 break;
1609
1610 case GTU:
1611 output_asm_insn ("dbhi %0,%l1\n\tjhi %l2", operands);
1612 break;
1613
1614 case LT:
1615 output_asm_insn ("dblt %0,%l1\n\tjlt %l2", operands);
1616 break;
1617
1618 case LTU:
1619 output_asm_insn ("dbcs %0,%l1\n\tjcs %l2", operands);
1620 break;
1621
1622 case GE:
1623 output_asm_insn ("dbge %0,%l1\n\tjge %l2", operands);
1624 break;
1625
1626 case GEU:
1627 output_asm_insn ("dbcc %0,%l1\n\tjcc %l2", operands);
1628 break;
1629
1630 case LE:
1631 output_asm_insn ("dble %0,%l1\n\tjle %l2", operands);
1632 break;
1633
1634 case LEU:
1635 output_asm_insn ("dbls %0,%l1\n\tjls %l2", operands);
1636 break;
1637
1638 default:
1639 gcc_unreachable ();
1640 }
1641
1642 /* If the decrement is to be done in SImode, then we have
1643 to compensate for the fact that dbcc decrements in HImode. */
1644 switch (GET_MODE (operands[0]))
1645 {
1646 case E_SImode:
1647 output_asm_insn ("clr%.w %0\n\tsubq%.l #1,%0\n\tjpl %l1", operands);
1648 break;
1649
1650 case E_HImode:
1651 break;
1652
1653 default:
1654 gcc_unreachable ();
1655 }
1656 }
1657
1658 const char *
1659 output_scc_di (rtx op, rtx operand1, rtx operand2, rtx dest)
1660 {
1661 rtx loperands[7];
1662 enum rtx_code op_code = GET_CODE (op);
1663
1664 /* This does not produce a useful cc. */
1665 CC_STATUS_INIT;
1666
1667 /* The m68k cmp.l instruction requires operand1 to be a reg as used
1668 below. Swap the operands and change the op if these requirements
1669 are not fulfilled. */
1670 if (GET_CODE (operand2) == REG && GET_CODE (operand1) != REG)
1671 {
1672 rtx tmp = operand1;
1673
1674 operand1 = operand2;
1675 operand2 = tmp;
1676 op_code = swap_condition (op_code);
1677 }
1678 loperands[0] = operand1;
1679 if (GET_CODE (operand1) == REG)
1680 loperands[1] = gen_rtx_REG (SImode, REGNO (operand1) + 1);
1681 else
1682 loperands[1] = adjust_address (operand1, SImode, 4);
1683 if (operand2 != const0_rtx)
1684 {
1685 loperands[2] = operand2;
1686 if (GET_CODE (operand2) == REG)
1687 loperands[3] = gen_rtx_REG (SImode, REGNO (operand2) + 1);
1688 else
1689 loperands[3] = adjust_address (operand2, SImode, 4);
1690 }
1691 loperands[4] = gen_label_rtx ();
1692 if (operand2 != const0_rtx)
1693 output_asm_insn ("cmp%.l %2,%0\n\tjne %l4\n\tcmp%.l %3,%1", loperands);
1694 else
1695 {
1696 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[0]))
1697 output_asm_insn ("tst%.l %0", loperands);
1698 else
1699 output_asm_insn ("cmp%.w #0,%0", loperands);
1700
1701 output_asm_insn ("jne %l4", loperands);
1702
1703 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[1]))
1704 output_asm_insn ("tst%.l %1", loperands);
1705 else
1706 output_asm_insn ("cmp%.w #0,%1", loperands);
1707 }
1708
1709 loperands[5] = dest;
1710
1711 switch (op_code)
1712 {
1713 case EQ:
1714 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1715 CODE_LABEL_NUMBER (loperands[4]));
1716 output_asm_insn ("seq %5", loperands);
1717 break;
1718
1719 case NE:
1720 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1721 CODE_LABEL_NUMBER (loperands[4]));
1722 output_asm_insn ("sne %5", loperands);
1723 break;
1724
1725 case GT:
1726 loperands[6] = gen_label_rtx ();
1727 output_asm_insn ("shi %5\n\tjra %l6", loperands);
1728 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1729 CODE_LABEL_NUMBER (loperands[4]));
1730 output_asm_insn ("sgt %5", loperands);
1731 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1732 CODE_LABEL_NUMBER (loperands[6]));
1733 break;
1734
1735 case GTU:
1736 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1737 CODE_LABEL_NUMBER (loperands[4]));
1738 output_asm_insn ("shi %5", loperands);
1739 break;
1740
1741 case LT:
1742 loperands[6] = gen_label_rtx ();
1743 output_asm_insn ("scs %5\n\tjra %l6", loperands);
1744 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1745 CODE_LABEL_NUMBER (loperands[4]));
1746 output_asm_insn ("slt %5", loperands);
1747 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1748 CODE_LABEL_NUMBER (loperands[6]));
1749 break;
1750
1751 case LTU:
1752 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1753 CODE_LABEL_NUMBER (loperands[4]));
1754 output_asm_insn ("scs %5", loperands);
1755 break;
1756
1757 case GE:
1758 loperands[6] = gen_label_rtx ();
1759 output_asm_insn ("scc %5\n\tjra %l6", loperands);
1760 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1761 CODE_LABEL_NUMBER (loperands[4]));
1762 output_asm_insn ("sge %5", loperands);
1763 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1764 CODE_LABEL_NUMBER (loperands[6]));
1765 break;
1766
1767 case GEU:
1768 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1769 CODE_LABEL_NUMBER (loperands[4]));
1770 output_asm_insn ("scc %5", loperands);
1771 break;
1772
1773 case LE:
1774 loperands[6] = gen_label_rtx ();
1775 output_asm_insn ("sls %5\n\tjra %l6", loperands);
1776 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1777 CODE_LABEL_NUMBER (loperands[4]));
1778 output_asm_insn ("sle %5", loperands);
1779 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1780 CODE_LABEL_NUMBER (loperands[6]));
1781 break;
1782
1783 case LEU:
1784 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1785 CODE_LABEL_NUMBER (loperands[4]));
1786 output_asm_insn ("sls %5", loperands);
1787 break;
1788
1789 default:
1790 gcc_unreachable ();
1791 }
1792 return "";
1793 }
1794
1795 const char *
1796 output_btst (rtx *operands, rtx countop, rtx dataop, rtx_insn *insn, int signpos)
1797 {
1798 operands[0] = countop;
1799 operands[1] = dataop;
1800
1801 if (GET_CODE (countop) == CONST_INT)
1802 {
1803 register int count = INTVAL (countop);
1804 /* If COUNT is bigger than size of storage unit in use,
1805 advance to the containing unit of same size. */
1806 if (count > signpos)
1807 {
1808 int offset = (count & ~signpos) / 8;
1809 count = count & signpos;
1810 operands[1] = dataop = adjust_address (dataop, QImode, offset);
1811 }
1812 if (count == signpos)
1813 cc_status.flags = CC_NOT_POSITIVE | CC_Z_IN_NOT_N;
1814 else
1815 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N;
1816
1817 /* These three statements used to use next_insns_test_no...
1818 but it appears that this should do the same job. */
1819 if (count == 31
1820 && next_insn_tests_no_inequality (insn))
1821 return "tst%.l %1";
1822 if (count == 15
1823 && next_insn_tests_no_inequality (insn))
1824 return "tst%.w %1";
1825 if (count == 7
1826 && next_insn_tests_no_inequality (insn))
1827 return "tst%.b %1";
1828 /* Try to use `movew to ccr' followed by the appropriate branch insn.
1829 On some m68k variants unfortunately that's slower than btst.
1830 On 68000 and higher, that should also work for all HImode operands. */
1831 if (TUNE_CPU32 || TARGET_COLDFIRE || optimize_size)
1832 {
1833 if (count == 3 && DATA_REG_P (operands[1])
1834 && next_insn_tests_no_inequality (insn))
1835 {
1836 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N | CC_NO_OVERFLOW;
1837 return "move%.w %1,%%ccr";
1838 }
1839 if (count == 2 && DATA_REG_P (operands[1])
1840 && next_insn_tests_no_inequality (insn))
1841 {
1842 cc_status.flags = CC_NOT_NEGATIVE | CC_INVERTED | CC_NO_OVERFLOW;
1843 return "move%.w %1,%%ccr";
1844 }
1845 /* count == 1 followed by bvc/bvs and
1846 count == 0 followed by bcc/bcs are also possible, but need
1847 m68k-specific CC_Z_IN_NOT_V and CC_Z_IN_NOT_C flags. */
1848 }
1849
1850 cc_status.flags = CC_NOT_NEGATIVE;
1851 }
1852 return "btst %0,%1";
1853 }
1854 \f
1855 /* Return true if X is a legitimate base register. STRICT_P says
1856 whether we need strict checking. */
1857
1858 bool
1859 m68k_legitimate_base_reg_p (rtx x, bool strict_p)
1860 {
1861 /* Allow SUBREG everywhere we allow REG. This results in better code. */
1862 if (!strict_p && GET_CODE (x) == SUBREG)
1863 x = SUBREG_REG (x);
1864
1865 return (REG_P (x)
1866 && (strict_p
1867 ? REGNO_OK_FOR_BASE_P (REGNO (x))
1868 : REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x))));
1869 }
1870
1871 /* Return true if X is a legitimate index register. STRICT_P says
1872 whether we need strict checking. */
1873
1874 bool
1875 m68k_legitimate_index_reg_p (rtx x, bool strict_p)
1876 {
1877 if (!strict_p && GET_CODE (x) == SUBREG)
1878 x = SUBREG_REG (x);
1879
1880 return (REG_P (x)
1881 && (strict_p
1882 ? REGNO_OK_FOR_INDEX_P (REGNO (x))
1883 : REGNO_OK_FOR_INDEX_NONSTRICT_P (REGNO (x))));
1884 }
1885
1886 /* Return true if X is a legitimate index expression for a (d8,An,Xn) or
1887 (bd,An,Xn) addressing mode. Fill in the INDEX and SCALE fields of
1888 ADDRESS if so. STRICT_P says whether we need strict checking. */
1889
1890 static bool
1891 m68k_decompose_index (rtx x, bool strict_p, struct m68k_address *address)
1892 {
1893 int scale;
1894
1895 /* Check for a scale factor. */
1896 scale = 1;
1897 if ((TARGET_68020 || TARGET_COLDFIRE)
1898 && GET_CODE (x) == MULT
1899 && GET_CODE (XEXP (x, 1)) == CONST_INT
1900 && (INTVAL (XEXP (x, 1)) == 2
1901 || INTVAL (XEXP (x, 1)) == 4
1902 || (INTVAL (XEXP (x, 1)) == 8
1903 && (TARGET_COLDFIRE_FPU || !TARGET_COLDFIRE))))
1904 {
1905 scale = INTVAL (XEXP (x, 1));
1906 x = XEXP (x, 0);
1907 }
1908
1909 /* Check for a word extension. */
1910 if (!TARGET_COLDFIRE
1911 && GET_CODE (x) == SIGN_EXTEND
1912 && GET_MODE (XEXP (x, 0)) == HImode)
1913 x = XEXP (x, 0);
1914
1915 if (m68k_legitimate_index_reg_p (x, strict_p))
1916 {
1917 address->scale = scale;
1918 address->index = x;
1919 return true;
1920 }
1921
1922 return false;
1923 }
1924
1925 /* Return true if X is an illegitimate symbolic constant. */
1926
1927 bool
1928 m68k_illegitimate_symbolic_constant_p (rtx x)
1929 {
1930 rtx base, offset;
1931
1932 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P)
1933 {
1934 split_const (x, &base, &offset);
1935 if (GET_CODE (base) == SYMBOL_REF
1936 && !offset_within_block_p (base, INTVAL (offset)))
1937 return true;
1938 }
1939 return m68k_tls_reference_p (x, false);
1940 }
1941
1942 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1943
1944 static bool
1945 m68k_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1946 {
1947 return m68k_illegitimate_symbolic_constant_p (x);
1948 }
1949
1950 /* Return true if X is a legitimate constant address that can reach
1951 bytes in the range [X, X + REACH). STRICT_P says whether we need
1952 strict checking. */
1953
1954 static bool
1955 m68k_legitimate_constant_address_p (rtx x, unsigned int reach, bool strict_p)
1956 {
1957 rtx base, offset;
1958
1959 if (!CONSTANT_ADDRESS_P (x))
1960 return false;
1961
1962 if (flag_pic
1963 && !(strict_p && TARGET_PCREL)
1964 && symbolic_operand (x, VOIDmode))
1965 return false;
1966
1967 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P && reach > 1)
1968 {
1969 split_const (x, &base, &offset);
1970 if (GET_CODE (base) == SYMBOL_REF
1971 && !offset_within_block_p (base, INTVAL (offset) + reach - 1))
1972 return false;
1973 }
1974
1975 return !m68k_tls_reference_p (x, false);
1976 }
1977
1978 /* Return true if X is a LABEL_REF for a jump table. Assume that unplaced
1979 labels will become jump tables. */
1980
1981 static bool
1982 m68k_jump_table_ref_p (rtx x)
1983 {
1984 if (GET_CODE (x) != LABEL_REF)
1985 return false;
1986
1987 rtx_insn *insn = as_a <rtx_insn *> (XEXP (x, 0));
1988 if (!NEXT_INSN (insn) && !PREV_INSN (insn))
1989 return true;
1990
1991 insn = next_nonnote_insn (insn);
1992 return insn && JUMP_TABLE_DATA_P (insn);
1993 }
1994
1995 /* Return true if X is a legitimate address for values of mode MODE.
1996 STRICT_P says whether strict checking is needed. If the address
1997 is valid, describe its components in *ADDRESS. */
1998
1999 static bool
2000 m68k_decompose_address (machine_mode mode, rtx x,
2001 bool strict_p, struct m68k_address *address)
2002 {
2003 unsigned int reach;
2004
2005 memset (address, 0, sizeof (*address));
2006
2007 if (mode == BLKmode)
2008 reach = 1;
2009 else
2010 reach = GET_MODE_SIZE (mode);
2011
2012 /* Check for (An) (mode 2). */
2013 if (m68k_legitimate_base_reg_p (x, strict_p))
2014 {
2015 address->base = x;
2016 return true;
2017 }
2018
2019 /* Check for -(An) and (An)+ (modes 3 and 4). */
2020 if ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_INC)
2021 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
2022 {
2023 address->code = GET_CODE (x);
2024 address->base = XEXP (x, 0);
2025 return true;
2026 }
2027
2028 /* Check for (d16,An) (mode 5). */
2029 if (GET_CODE (x) == PLUS
2030 && GET_CODE (XEXP (x, 1)) == CONST_INT
2031 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x8000, 0x8000 - reach)
2032 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
2033 {
2034 address->base = XEXP (x, 0);
2035 address->offset = XEXP (x, 1);
2036 return true;
2037 }
2038
2039 /* Check for GOT loads. These are (bd,An,Xn) addresses if
2040 TARGET_68020 && flag_pic == 2, otherwise they are (d16,An)
2041 addresses. */
2042 if (GET_CODE (x) == PLUS
2043 && XEXP (x, 0) == pic_offset_table_rtx)
2044 {
2045 /* As we are processing a PLUS, do not unwrap RELOC32 symbols --
2046 they are invalid in this context. */
2047 if (m68k_unwrap_symbol (XEXP (x, 1), false) != XEXP (x, 1))
2048 {
2049 address->base = XEXP (x, 0);
2050 address->offset = XEXP (x, 1);
2051 return true;
2052 }
2053 }
2054
2055 /* The ColdFire FPU only accepts addressing modes 2-5. */
2056 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
2057 return false;
2058
2059 /* Check for (xxx).w and (xxx).l. Also, in the TARGET_PCREL case,
2060 check for (d16,PC) or (bd,PC,Xn) with a suppressed index register.
2061 All these modes are variations of mode 7. */
2062 if (m68k_legitimate_constant_address_p (x, reach, strict_p))
2063 {
2064 address->offset = x;
2065 return true;
2066 }
2067
2068 /* Check for (d8,PC,Xn), a mode 7 form. This case is needed for
2069 tablejumps.
2070
2071 ??? do_tablejump creates these addresses before placing the target
2072 label, so we have to assume that unplaced labels are jump table
2073 references. It seems unlikely that we would ever generate indexed
2074 accesses to unplaced labels in other cases. */
2075 if (GET_CODE (x) == PLUS
2076 && m68k_jump_table_ref_p (XEXP (x, 1))
2077 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2078 {
2079 address->offset = XEXP (x, 1);
2080 return true;
2081 }
2082
2083 /* Everything hereafter deals with (d8,An,Xn.SIZE*SCALE) or
2084 (bd,An,Xn.SIZE*SCALE) addresses. */
2085
2086 if (TARGET_68020)
2087 {
2088 /* Check for a nonzero base displacement. */
2089 if (GET_CODE (x) == PLUS
2090 && m68k_legitimate_constant_address_p (XEXP (x, 1), reach, strict_p))
2091 {
2092 address->offset = XEXP (x, 1);
2093 x = XEXP (x, 0);
2094 }
2095
2096 /* Check for a suppressed index register. */
2097 if (m68k_legitimate_base_reg_p (x, strict_p))
2098 {
2099 address->base = x;
2100 return true;
2101 }
2102
2103 /* Check for a suppressed base register. Do not allow this case
2104 for non-symbolic offsets as it effectively gives gcc freedom
2105 to treat data registers as base registers, which can generate
2106 worse code. */
2107 if (address->offset
2108 && symbolic_operand (address->offset, VOIDmode)
2109 && m68k_decompose_index (x, strict_p, address))
2110 return true;
2111 }
2112 else
2113 {
2114 /* Check for a nonzero base displacement. */
2115 if (GET_CODE (x) == PLUS
2116 && GET_CODE (XEXP (x, 1)) == CONST_INT
2117 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x80, 0x80 - reach))
2118 {
2119 address->offset = XEXP (x, 1);
2120 x = XEXP (x, 0);
2121 }
2122 }
2123
2124 /* We now expect the sum of a base and an index. */
2125 if (GET_CODE (x) == PLUS)
2126 {
2127 if (m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p)
2128 && m68k_decompose_index (XEXP (x, 1), strict_p, address))
2129 {
2130 address->base = XEXP (x, 0);
2131 return true;
2132 }
2133
2134 if (m68k_legitimate_base_reg_p (XEXP (x, 1), strict_p)
2135 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2136 {
2137 address->base = XEXP (x, 1);
2138 return true;
2139 }
2140 }
2141 return false;
2142 }
2143
2144 /* Return true if X is a legitimate address for values of mode MODE.
2145 STRICT_P says whether strict checking is needed. */
2146
2147 bool
2148 m68k_legitimate_address_p (machine_mode mode, rtx x, bool strict_p)
2149 {
2150 struct m68k_address address;
2151
2152 return m68k_decompose_address (mode, x, strict_p, &address);
2153 }
2154
2155 /* Return true if X is a memory, describing its address in ADDRESS if so.
2156 Apply strict checking if called during or after reload. */
2157
2158 static bool
2159 m68k_legitimate_mem_p (rtx x, struct m68k_address *address)
2160 {
2161 return (MEM_P (x)
2162 && m68k_decompose_address (GET_MODE (x), XEXP (x, 0),
2163 reload_in_progress || reload_completed,
2164 address));
2165 }
2166
2167 /* Implement TARGET_LEGITIMATE_CONSTANT_P. */
2168
2169 bool
2170 m68k_legitimate_constant_p (machine_mode mode, rtx x)
2171 {
2172 return mode != XFmode && !m68k_illegitimate_symbolic_constant_p (x);
2173 }
2174
2175 /* Return true if X matches the 'Q' constraint. It must be a memory
2176 with a base address and no constant offset or index. */
2177
2178 bool
2179 m68k_matches_q_p (rtx x)
2180 {
2181 struct m68k_address address;
2182
2183 return (m68k_legitimate_mem_p (x, &address)
2184 && address.code == UNKNOWN
2185 && address.base
2186 && !address.offset
2187 && !address.index);
2188 }
2189
2190 /* Return true if X matches the 'U' constraint. It must be a base address
2191 with a constant offset and no index. */
2192
2193 bool
2194 m68k_matches_u_p (rtx x)
2195 {
2196 struct m68k_address address;
2197
2198 return (m68k_legitimate_mem_p (x, &address)
2199 && address.code == UNKNOWN
2200 && address.base
2201 && address.offset
2202 && !address.index);
2203 }
2204
2205 /* Return GOT pointer. */
2206
2207 static rtx
2208 m68k_get_gp (void)
2209 {
2210 if (pic_offset_table_rtx == NULL_RTX)
2211 pic_offset_table_rtx = gen_rtx_REG (Pmode, PIC_REG);
2212
2213 crtl->uses_pic_offset_table = 1;
2214
2215 return pic_offset_table_rtx;
2216 }
2217
2218 /* M68K relocations, used to distinguish GOT and TLS relocations in UNSPEC
2219 wrappers. */
2220 enum m68k_reloc { RELOC_GOT, RELOC_TLSGD, RELOC_TLSLDM, RELOC_TLSLDO,
2221 RELOC_TLSIE, RELOC_TLSLE };
2222
2223 #define TLS_RELOC_P(RELOC) ((RELOC) != RELOC_GOT)
2224
2225 /* Wrap symbol X into unspec representing relocation RELOC.
2226 BASE_REG - register that should be added to the result.
2227 TEMP_REG - if non-null, temporary register. */
2228
2229 static rtx
2230 m68k_wrap_symbol (rtx x, enum m68k_reloc reloc, rtx base_reg, rtx temp_reg)
2231 {
2232 bool use_x_p;
2233
2234 use_x_p = (base_reg == pic_offset_table_rtx) ? TARGET_XGOT : TARGET_XTLS;
2235
2236 if (TARGET_COLDFIRE && use_x_p)
2237 /* When compiling with -mx{got, tls} switch the code will look like this:
2238
2239 move.l <X>@<RELOC>,<TEMP_REG>
2240 add.l <BASE_REG>,<TEMP_REG> */
2241 {
2242 /* Wrap X in UNSPEC_??? to tip m68k_output_addr_const_extra
2243 to put @RELOC after reference. */
2244 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2245 UNSPEC_RELOC32);
2246 x = gen_rtx_CONST (Pmode, x);
2247
2248 if (temp_reg == NULL)
2249 {
2250 gcc_assert (can_create_pseudo_p ());
2251 temp_reg = gen_reg_rtx (Pmode);
2252 }
2253
2254 emit_move_insn (temp_reg, x);
2255 emit_insn (gen_addsi3 (temp_reg, temp_reg, base_reg));
2256 x = temp_reg;
2257 }
2258 else
2259 {
2260 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2261 UNSPEC_RELOC16);
2262 x = gen_rtx_CONST (Pmode, x);
2263
2264 x = gen_rtx_PLUS (Pmode, base_reg, x);
2265 }
2266
2267 return x;
2268 }
2269
2270 /* Helper for m68k_unwrap_symbol.
2271 Also, if unwrapping was successful (that is if (ORIG != <return value>)),
2272 sets *RELOC_PTR to relocation type for the symbol. */
2273
2274 static rtx
2275 m68k_unwrap_symbol_1 (rtx orig, bool unwrap_reloc32_p,
2276 enum m68k_reloc *reloc_ptr)
2277 {
2278 if (GET_CODE (orig) == CONST)
2279 {
2280 rtx x;
2281 enum m68k_reloc dummy;
2282
2283 x = XEXP (orig, 0);
2284
2285 if (reloc_ptr == NULL)
2286 reloc_ptr = &dummy;
2287
2288 /* Handle an addend. */
2289 if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS)
2290 && CONST_INT_P (XEXP (x, 1)))
2291 x = XEXP (x, 0);
2292
2293 if (GET_CODE (x) == UNSPEC)
2294 {
2295 switch (XINT (x, 1))
2296 {
2297 case UNSPEC_RELOC16:
2298 orig = XVECEXP (x, 0, 0);
2299 *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2300 break;
2301
2302 case UNSPEC_RELOC32:
2303 if (unwrap_reloc32_p)
2304 {
2305 orig = XVECEXP (x, 0, 0);
2306 *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2307 }
2308 break;
2309
2310 default:
2311 break;
2312 }
2313 }
2314 }
2315
2316 return orig;
2317 }
2318
2319 /* Unwrap symbol from UNSPEC_RELOC16 and, if unwrap_reloc32_p,
2320 UNSPEC_RELOC32 wrappers. */
2321
2322 rtx
2323 m68k_unwrap_symbol (rtx orig, bool unwrap_reloc32_p)
2324 {
2325 return m68k_unwrap_symbol_1 (orig, unwrap_reloc32_p, NULL);
2326 }
2327
2328 /* Adjust decorated address operand before outputing assembler for it. */
2329
2330 static void
2331 m68k_adjust_decorated_operand (rtx op)
2332 {
2333 /* Combine and, possibly, other optimizations may do good job
2334 converting
2335 (const (unspec [(symbol)]))
2336 into
2337 (const (plus (unspec [(symbol)])
2338 (const_int N))).
2339 The problem with this is emitting @TLS or @GOT decorations.
2340 The decoration is emitted when processing (unspec), so the
2341 result would be "#symbol@TLSLE+N" instead of "#symbol+N@TLSLE".
2342
2343 It seems that the easiest solution to this is to convert such
2344 operands to
2345 (const (unspec [(plus (symbol)
2346 (const_int N))])).
2347 Note, that the top level of operand remains intact, so we don't have
2348 to patch up anything outside of the operand. */
2349
2350 subrtx_var_iterator::array_type array;
2351 FOR_EACH_SUBRTX_VAR (iter, array, op, ALL)
2352 {
2353 rtx x = *iter;
2354 if (m68k_unwrap_symbol (x, true) != x)
2355 {
2356 rtx plus;
2357
2358 gcc_assert (GET_CODE (x) == CONST);
2359 plus = XEXP (x, 0);
2360
2361 if (GET_CODE (plus) == PLUS || GET_CODE (plus) == MINUS)
2362 {
2363 rtx unspec;
2364 rtx addend;
2365
2366 unspec = XEXP (plus, 0);
2367 gcc_assert (GET_CODE (unspec) == UNSPEC);
2368 addend = XEXP (plus, 1);
2369 gcc_assert (CONST_INT_P (addend));
2370
2371 /* We now have all the pieces, rearrange them. */
2372
2373 /* Move symbol to plus. */
2374 XEXP (plus, 0) = XVECEXP (unspec, 0, 0);
2375
2376 /* Move plus inside unspec. */
2377 XVECEXP (unspec, 0, 0) = plus;
2378
2379 /* Move unspec to top level of const. */
2380 XEXP (x, 0) = unspec;
2381 }
2382 iter.skip_subrtxes ();
2383 }
2384 }
2385 }
2386
2387 /* Move X to a register and add REG_EQUAL note pointing to ORIG.
2388 If REG is non-null, use it; generate new pseudo otherwise. */
2389
2390 static rtx
2391 m68k_move_to_reg (rtx x, rtx orig, rtx reg)
2392 {
2393 rtx_insn *insn;
2394
2395 if (reg == NULL_RTX)
2396 {
2397 gcc_assert (can_create_pseudo_p ());
2398 reg = gen_reg_rtx (Pmode);
2399 }
2400
2401 insn = emit_move_insn (reg, x);
2402 /* Put a REG_EQUAL note on this insn, so that it can be optimized
2403 by loop. */
2404 set_unique_reg_note (insn, REG_EQUAL, orig);
2405
2406 return reg;
2407 }
2408
2409 /* Does the same as m68k_wrap_symbol, but returns a memory reference to
2410 GOT slot. */
2411
2412 static rtx
2413 m68k_wrap_symbol_into_got_ref (rtx x, enum m68k_reloc reloc, rtx temp_reg)
2414 {
2415 x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), temp_reg);
2416
2417 x = gen_rtx_MEM (Pmode, x);
2418 MEM_READONLY_P (x) = 1;
2419
2420 return x;
2421 }
2422
2423 /* Legitimize PIC addresses. If the address is already
2424 position-independent, we return ORIG. Newly generated
2425 position-independent addresses go to REG. If we need more
2426 than one register, we lose.
2427
2428 An address is legitimized by making an indirect reference
2429 through the Global Offset Table with the name of the symbol
2430 used as an offset.
2431
2432 The assembler and linker are responsible for placing the
2433 address of the symbol in the GOT. The function prologue
2434 is responsible for initializing a5 to the starting address
2435 of the GOT.
2436
2437 The assembler is also responsible for translating a symbol name
2438 into a constant displacement from the start of the GOT.
2439
2440 A quick example may make things a little clearer:
2441
2442 When not generating PIC code to store the value 12345 into _foo
2443 we would generate the following code:
2444
2445 movel #12345, _foo
2446
2447 When generating PIC two transformations are made. First, the compiler
2448 loads the address of foo into a register. So the first transformation makes:
2449
2450 lea _foo, a0
2451 movel #12345, a0@
2452
2453 The code in movsi will intercept the lea instruction and call this
2454 routine which will transform the instructions into:
2455
2456 movel a5@(_foo:w), a0
2457 movel #12345, a0@
2458
2459
2460 That (in a nutshell) is how *all* symbol and label references are
2461 handled. */
2462
2463 rtx
2464 legitimize_pic_address (rtx orig, machine_mode mode ATTRIBUTE_UNUSED,
2465 rtx reg)
2466 {
2467 rtx pic_ref = orig;
2468
2469 /* First handle a simple SYMBOL_REF or LABEL_REF */
2470 if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
2471 {
2472 gcc_assert (reg);
2473
2474 pic_ref = m68k_wrap_symbol_into_got_ref (orig, RELOC_GOT, reg);
2475 pic_ref = m68k_move_to_reg (pic_ref, orig, reg);
2476 }
2477 else if (GET_CODE (orig) == CONST)
2478 {
2479 rtx base;
2480
2481 /* Make sure this has not already been legitimized. */
2482 if (m68k_unwrap_symbol (orig, true) != orig)
2483 return orig;
2484
2485 gcc_assert (reg);
2486
2487 /* legitimize both operands of the PLUS */
2488 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
2489
2490 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2491 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2492 base == reg ? 0 : reg);
2493
2494 if (GET_CODE (orig) == CONST_INT)
2495 pic_ref = plus_constant (Pmode, base, INTVAL (orig));
2496 else
2497 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
2498 }
2499
2500 return pic_ref;
2501 }
2502
2503 /* The __tls_get_addr symbol. */
2504 static GTY(()) rtx m68k_tls_get_addr;
2505
2506 /* Return SYMBOL_REF for __tls_get_addr. */
2507
2508 static rtx
2509 m68k_get_tls_get_addr (void)
2510 {
2511 if (m68k_tls_get_addr == NULL_RTX)
2512 m68k_tls_get_addr = init_one_libfunc ("__tls_get_addr");
2513
2514 return m68k_tls_get_addr;
2515 }
2516
2517 /* Return libcall result in A0 instead of usual D0. */
2518 static bool m68k_libcall_value_in_a0_p = false;
2519
2520 /* Emit instruction sequence that calls __tls_get_addr. X is
2521 the TLS symbol we are referencing and RELOC is the symbol type to use
2522 (either TLSGD or TLSLDM). EQV is the REG_EQUAL note for the sequence
2523 emitted. A pseudo register with result of __tls_get_addr call is
2524 returned. */
2525
2526 static rtx
2527 m68k_call_tls_get_addr (rtx x, rtx eqv, enum m68k_reloc reloc)
2528 {
2529 rtx a0;
2530 rtx_insn *insns;
2531 rtx dest;
2532
2533 /* Emit the call sequence. */
2534 start_sequence ();
2535
2536 /* FIXME: Unfortunately, emit_library_call_value does not
2537 consider (plus (%a5) (const (unspec))) to be a good enough
2538 operand for push, so it forces it into a register. The bad
2539 thing about this is that combiner, due to copy propagation and other
2540 optimizations, sometimes cannot later fix this. As a consequence,
2541 additional register may be allocated resulting in a spill.
2542 For reference, see args processing loops in
2543 calls.c:emit_library_call_value_1.
2544 For testcase, see gcc.target/m68k/tls-{gd, ld}.c */
2545 x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), NULL_RTX);
2546
2547 /* __tls_get_addr() is not a libcall, but emitting a libcall_value
2548 is the simpliest way of generating a call. The difference between
2549 __tls_get_addr() and libcall is that the result is returned in D0
2550 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2551 which temporarily switches returning the result to A0. */
2552
2553 m68k_libcall_value_in_a0_p = true;
2554 a0 = emit_library_call_value (m68k_get_tls_get_addr (), NULL_RTX, LCT_PURE,
2555 Pmode, x, Pmode);
2556 m68k_libcall_value_in_a0_p = false;
2557
2558 insns = get_insns ();
2559 end_sequence ();
2560
2561 gcc_assert (can_create_pseudo_p ());
2562 dest = gen_reg_rtx (Pmode);
2563 emit_libcall_block (insns, dest, a0, eqv);
2564
2565 return dest;
2566 }
2567
2568 /* The __tls_get_addr symbol. */
2569 static GTY(()) rtx m68k_read_tp;
2570
2571 /* Return SYMBOL_REF for __m68k_read_tp. */
2572
2573 static rtx
2574 m68k_get_m68k_read_tp (void)
2575 {
2576 if (m68k_read_tp == NULL_RTX)
2577 m68k_read_tp = init_one_libfunc ("__m68k_read_tp");
2578
2579 return m68k_read_tp;
2580 }
2581
2582 /* Emit instruction sequence that calls __m68k_read_tp.
2583 A pseudo register with result of __m68k_read_tp call is returned. */
2584
2585 static rtx
2586 m68k_call_m68k_read_tp (void)
2587 {
2588 rtx a0;
2589 rtx eqv;
2590 rtx_insn *insns;
2591 rtx dest;
2592
2593 start_sequence ();
2594
2595 /* __m68k_read_tp() is not a libcall, but emitting a libcall_value
2596 is the simpliest way of generating a call. The difference between
2597 __m68k_read_tp() and libcall is that the result is returned in D0
2598 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2599 which temporarily switches returning the result to A0. */
2600
2601 /* Emit the call sequence. */
2602 m68k_libcall_value_in_a0_p = true;
2603 a0 = emit_library_call_value (m68k_get_m68k_read_tp (), NULL_RTX, LCT_PURE,
2604 Pmode);
2605 m68k_libcall_value_in_a0_p = false;
2606 insns = get_insns ();
2607 end_sequence ();
2608
2609 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2610 share the m68k_read_tp result with other IE/LE model accesses. */
2611 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx), UNSPEC_RELOC32);
2612
2613 gcc_assert (can_create_pseudo_p ());
2614 dest = gen_reg_rtx (Pmode);
2615 emit_libcall_block (insns, dest, a0, eqv);
2616
2617 return dest;
2618 }
2619
2620 /* Return a legitimized address for accessing TLS SYMBOL_REF X.
2621 For explanations on instructions sequences see TLS/NPTL ABI for m68k and
2622 ColdFire. */
2623
2624 rtx
2625 m68k_legitimize_tls_address (rtx orig)
2626 {
2627 switch (SYMBOL_REF_TLS_MODEL (orig))
2628 {
2629 case TLS_MODEL_GLOBAL_DYNAMIC:
2630 orig = m68k_call_tls_get_addr (orig, orig, RELOC_TLSGD);
2631 break;
2632
2633 case TLS_MODEL_LOCAL_DYNAMIC:
2634 {
2635 rtx eqv;
2636 rtx a0;
2637 rtx x;
2638
2639 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2640 share the LDM result with other LD model accesses. */
2641 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2642 UNSPEC_RELOC32);
2643
2644 a0 = m68k_call_tls_get_addr (orig, eqv, RELOC_TLSLDM);
2645
2646 x = m68k_wrap_symbol (orig, RELOC_TLSLDO, a0, NULL_RTX);
2647
2648 if (can_create_pseudo_p ())
2649 x = m68k_move_to_reg (x, orig, NULL_RTX);
2650
2651 orig = x;
2652 break;
2653 }
2654
2655 case TLS_MODEL_INITIAL_EXEC:
2656 {
2657 rtx a0;
2658 rtx x;
2659
2660 a0 = m68k_call_m68k_read_tp ();
2661
2662 x = m68k_wrap_symbol_into_got_ref (orig, RELOC_TLSIE, NULL_RTX);
2663 x = gen_rtx_PLUS (Pmode, x, a0);
2664
2665 if (can_create_pseudo_p ())
2666 x = m68k_move_to_reg (x, orig, NULL_RTX);
2667
2668 orig = x;
2669 break;
2670 }
2671
2672 case TLS_MODEL_LOCAL_EXEC:
2673 {
2674 rtx a0;
2675 rtx x;
2676
2677 a0 = m68k_call_m68k_read_tp ();
2678
2679 x = m68k_wrap_symbol (orig, RELOC_TLSLE, a0, NULL_RTX);
2680
2681 if (can_create_pseudo_p ())
2682 x = m68k_move_to_reg (x, orig, NULL_RTX);
2683
2684 orig = x;
2685 break;
2686 }
2687
2688 default:
2689 gcc_unreachable ();
2690 }
2691
2692 return orig;
2693 }
2694
2695 /* Return true if X is a TLS symbol. */
2696
2697 static bool
2698 m68k_tls_symbol_p (rtx x)
2699 {
2700 if (!TARGET_HAVE_TLS)
2701 return false;
2702
2703 if (GET_CODE (x) != SYMBOL_REF)
2704 return false;
2705
2706 return SYMBOL_REF_TLS_MODEL (x) != 0;
2707 }
2708
2709 /* If !LEGITIMATE_P, return true if X is a TLS symbol reference,
2710 though illegitimate one.
2711 If LEGITIMATE_P, return true if X is a legitimate TLS symbol reference. */
2712
2713 bool
2714 m68k_tls_reference_p (rtx x, bool legitimate_p)
2715 {
2716 if (!TARGET_HAVE_TLS)
2717 return false;
2718
2719 if (!legitimate_p)
2720 {
2721 subrtx_var_iterator::array_type array;
2722 FOR_EACH_SUBRTX_VAR (iter, array, x, ALL)
2723 {
2724 rtx x = *iter;
2725
2726 /* Note: this is not the same as m68k_tls_symbol_p. */
2727 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0)
2728 return true;
2729
2730 /* Don't recurse into legitimate TLS references. */
2731 if (m68k_tls_reference_p (x, true))
2732 iter.skip_subrtxes ();
2733 }
2734 return false;
2735 }
2736 else
2737 {
2738 enum m68k_reloc reloc = RELOC_GOT;
2739
2740 return (m68k_unwrap_symbol_1 (x, true, &reloc) != x
2741 && TLS_RELOC_P (reloc));
2742 }
2743 }
2744
2745 \f
2746
2747 #define USE_MOVQ(i) ((unsigned) ((i) + 128) <= 255)
2748
2749 /* Return the type of move that should be used for integer I. */
2750
2751 M68K_CONST_METHOD
2752 m68k_const_method (HOST_WIDE_INT i)
2753 {
2754 unsigned u;
2755
2756 if (USE_MOVQ (i))
2757 return MOVQ;
2758
2759 /* The ColdFire doesn't have byte or word operations. */
2760 /* FIXME: This may not be useful for the m68060 either. */
2761 if (!TARGET_COLDFIRE)
2762 {
2763 /* if -256 < N < 256 but N is not in range for a moveq
2764 N^ff will be, so use moveq #N^ff, dreg; not.b dreg. */
2765 if (USE_MOVQ (i ^ 0xff))
2766 return NOTB;
2767 /* Likewise, try with not.w */
2768 if (USE_MOVQ (i ^ 0xffff))
2769 return NOTW;
2770 /* This is the only value where neg.w is useful */
2771 if (i == -65408)
2772 return NEGW;
2773 }
2774
2775 /* Try also with swap. */
2776 u = i;
2777 if (USE_MOVQ ((u >> 16) | (u << 16)))
2778 return SWAP;
2779
2780 if (TARGET_ISAB)
2781 {
2782 /* Try using MVZ/MVS with an immediate value to load constants. */
2783 if (i >= 0 && i <= 65535)
2784 return MVZ;
2785 if (i >= -32768 && i <= 32767)
2786 return MVS;
2787 }
2788
2789 /* Otherwise, use move.l */
2790 return MOVL;
2791 }
2792
2793 /* Return the cost of moving constant I into a data register. */
2794
2795 static int
2796 const_int_cost (HOST_WIDE_INT i)
2797 {
2798 switch (m68k_const_method (i))
2799 {
2800 case MOVQ:
2801 /* Constants between -128 and 127 are cheap due to moveq. */
2802 return 0;
2803 case MVZ:
2804 case MVS:
2805 case NOTB:
2806 case NOTW:
2807 case NEGW:
2808 case SWAP:
2809 /* Constants easily generated by moveq + not.b/not.w/neg.w/swap. */
2810 return 1;
2811 case MOVL:
2812 return 2;
2813 default:
2814 gcc_unreachable ();
2815 }
2816 }
2817
2818 static bool
2819 m68k_rtx_costs (rtx x, machine_mode mode, int outer_code,
2820 int opno ATTRIBUTE_UNUSED,
2821 int *total, bool speed ATTRIBUTE_UNUSED)
2822 {
2823 int code = GET_CODE (x);
2824
2825 switch (code)
2826 {
2827 case CONST_INT:
2828 /* Constant zero is super cheap due to clr instruction. */
2829 if (x == const0_rtx)
2830 *total = 0;
2831 else
2832 *total = const_int_cost (INTVAL (x));
2833 return true;
2834
2835 case CONST:
2836 case LABEL_REF:
2837 case SYMBOL_REF:
2838 *total = 3;
2839 return true;
2840
2841 case CONST_DOUBLE:
2842 /* Make 0.0 cheaper than other floating constants to
2843 encourage creating tstsf and tstdf insns. */
2844 if (outer_code == COMPARE
2845 && (x == CONST0_RTX (SFmode) || x == CONST0_RTX (DFmode)))
2846 *total = 4;
2847 else
2848 *total = 5;
2849 return true;
2850
2851 /* These are vaguely right for a 68020. */
2852 /* The costs for long multiply have been adjusted to work properly
2853 in synth_mult on the 68020, relative to an average of the time
2854 for add and the time for shift, taking away a little more because
2855 sometimes move insns are needed. */
2856 /* div?.w is relatively cheaper on 68000 counted in COSTS_N_INSNS
2857 terms. */
2858 #define MULL_COST \
2859 (TUNE_68060 ? 2 \
2860 : TUNE_68040 ? 5 \
2861 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2862 : (TUNE_CFV2 && TUNE_MAC) ? 4 \
2863 : TUNE_CFV2 ? 8 \
2864 : TARGET_COLDFIRE ? 3 : 13)
2865
2866 #define MULW_COST \
2867 (TUNE_68060 ? 2 \
2868 : TUNE_68040 ? 3 \
2869 : TUNE_68000_10 ? 5 \
2870 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2871 : (TUNE_CFV2 && TUNE_MAC) ? 2 \
2872 : TUNE_CFV2 ? 8 \
2873 : TARGET_COLDFIRE ? 2 : 8)
2874
2875 #define DIVW_COST \
2876 (TARGET_CF_HWDIV ? 11 \
2877 : TUNE_68000_10 || TARGET_COLDFIRE ? 12 : 27)
2878
2879 case PLUS:
2880 /* An lea costs about three times as much as a simple add. */
2881 if (mode == SImode
2882 && GET_CODE (XEXP (x, 1)) == REG
2883 && GET_CODE (XEXP (x, 0)) == MULT
2884 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
2885 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2886 && (INTVAL (XEXP (XEXP (x, 0), 1)) == 2
2887 || INTVAL (XEXP (XEXP (x, 0), 1)) == 4
2888 || INTVAL (XEXP (XEXP (x, 0), 1)) == 8))
2889 {
2890 /* lea an@(dx:l:i),am */
2891 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 2 : 3);
2892 return true;
2893 }
2894 return false;
2895
2896 case ASHIFT:
2897 case ASHIFTRT:
2898 case LSHIFTRT:
2899 if (TUNE_68060)
2900 {
2901 *total = COSTS_N_INSNS(1);
2902 return true;
2903 }
2904 if (TUNE_68000_10)
2905 {
2906 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
2907 {
2908 if (INTVAL (XEXP (x, 1)) < 16)
2909 *total = COSTS_N_INSNS (2) + INTVAL (XEXP (x, 1)) / 2;
2910 else
2911 /* We're using clrw + swap for these cases. */
2912 *total = COSTS_N_INSNS (4) + (INTVAL (XEXP (x, 1)) - 16) / 2;
2913 }
2914 else
2915 *total = COSTS_N_INSNS (10); /* Worst case. */
2916 return true;
2917 }
2918 /* A shift by a big integer takes an extra instruction. */
2919 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2920 && (INTVAL (XEXP (x, 1)) == 16))
2921 {
2922 *total = COSTS_N_INSNS (2); /* clrw;swap */
2923 return true;
2924 }
2925 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2926 && !(INTVAL (XEXP (x, 1)) > 0
2927 && INTVAL (XEXP (x, 1)) <= 8))
2928 {
2929 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 1 : 3); /* lsr #i,dn */
2930 return true;
2931 }
2932 return false;
2933
2934 case MULT:
2935 if ((GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
2936 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
2937 && mode == SImode)
2938 *total = COSTS_N_INSNS (MULW_COST);
2939 else if (mode == QImode || mode == HImode)
2940 *total = COSTS_N_INSNS (MULW_COST);
2941 else
2942 *total = COSTS_N_INSNS (MULL_COST);
2943 return true;
2944
2945 case DIV:
2946 case UDIV:
2947 case MOD:
2948 case UMOD:
2949 if (mode == QImode || mode == HImode)
2950 *total = COSTS_N_INSNS (DIVW_COST); /* div.w */
2951 else if (TARGET_CF_HWDIV)
2952 *total = COSTS_N_INSNS (18);
2953 else
2954 *total = COSTS_N_INSNS (43); /* div.l */
2955 return true;
2956
2957 case ZERO_EXTRACT:
2958 if (outer_code == COMPARE)
2959 *total = 0;
2960 return false;
2961
2962 default:
2963 return false;
2964 }
2965 }
2966
2967 /* Return an instruction to move CONST_INT OPERANDS[1] into data register
2968 OPERANDS[0]. */
2969
2970 static const char *
2971 output_move_const_into_data_reg (rtx *operands)
2972 {
2973 HOST_WIDE_INT i;
2974
2975 i = INTVAL (operands[1]);
2976 switch (m68k_const_method (i))
2977 {
2978 case MVZ:
2979 return "mvzw %1,%0";
2980 case MVS:
2981 return "mvsw %1,%0";
2982 case MOVQ:
2983 return "moveq %1,%0";
2984 case NOTB:
2985 CC_STATUS_INIT;
2986 operands[1] = GEN_INT (i ^ 0xff);
2987 return "moveq %1,%0\n\tnot%.b %0";
2988 case NOTW:
2989 CC_STATUS_INIT;
2990 operands[1] = GEN_INT (i ^ 0xffff);
2991 return "moveq %1,%0\n\tnot%.w %0";
2992 case NEGW:
2993 CC_STATUS_INIT;
2994 return "moveq #-128,%0\n\tneg%.w %0";
2995 case SWAP:
2996 {
2997 unsigned u = i;
2998
2999 operands[1] = GEN_INT ((u << 16) | (u >> 16));
3000 return "moveq %1,%0\n\tswap %0";
3001 }
3002 case MOVL:
3003 return "move%.l %1,%0";
3004 default:
3005 gcc_unreachable ();
3006 }
3007 }
3008
3009 /* Return true if I can be handled by ISA B's mov3q instruction. */
3010
3011 bool
3012 valid_mov3q_const (HOST_WIDE_INT i)
3013 {
3014 return TARGET_ISAB && (i == -1 || IN_RANGE (i, 1, 7));
3015 }
3016
3017 /* Return an instruction to move CONST_INT OPERANDS[1] into OPERANDS[0].
3018 I is the value of OPERANDS[1]. */
3019
3020 static const char *
3021 output_move_simode_const (rtx *operands)
3022 {
3023 rtx dest;
3024 HOST_WIDE_INT src;
3025
3026 dest = operands[0];
3027 src = INTVAL (operands[1]);
3028 if (src == 0
3029 && (DATA_REG_P (dest) || MEM_P (dest))
3030 /* clr insns on 68000 read before writing. */
3031 && ((TARGET_68010 || TARGET_COLDFIRE)
3032 || !(MEM_P (dest) && MEM_VOLATILE_P (dest))))
3033 return "clr%.l %0";
3034 else if (GET_MODE (dest) == SImode && valid_mov3q_const (src))
3035 return "mov3q%.l %1,%0";
3036 else if (src == 0 && ADDRESS_REG_P (dest))
3037 return "sub%.l %0,%0";
3038 else if (DATA_REG_P (dest))
3039 return output_move_const_into_data_reg (operands);
3040 else if (ADDRESS_REG_P (dest) && IN_RANGE (src, -0x8000, 0x7fff))
3041 {
3042 if (valid_mov3q_const (src))
3043 return "mov3q%.l %1,%0";
3044 return "move%.w %1,%0";
3045 }
3046 else if (MEM_P (dest)
3047 && GET_CODE (XEXP (dest, 0)) == PRE_DEC
3048 && REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
3049 && IN_RANGE (src, -0x8000, 0x7fff))
3050 {
3051 if (valid_mov3q_const (src))
3052 return "mov3q%.l %1,%-";
3053 return "pea %a1";
3054 }
3055 return "move%.l %1,%0";
3056 }
3057
3058 const char *
3059 output_move_simode (rtx *operands)
3060 {
3061 if (GET_CODE (operands[1]) == CONST_INT)
3062 return output_move_simode_const (operands);
3063 else if ((GET_CODE (operands[1]) == SYMBOL_REF
3064 || GET_CODE (operands[1]) == CONST)
3065 && push_operand (operands[0], SImode))
3066 return "pea %a1";
3067 else if ((GET_CODE (operands[1]) == SYMBOL_REF
3068 || GET_CODE (operands[1]) == CONST)
3069 && ADDRESS_REG_P (operands[0]))
3070 return "lea %a1,%0";
3071 return "move%.l %1,%0";
3072 }
3073
3074 const char *
3075 output_move_himode (rtx *operands)
3076 {
3077 if (GET_CODE (operands[1]) == CONST_INT)
3078 {
3079 if (operands[1] == const0_rtx
3080 && (DATA_REG_P (operands[0])
3081 || GET_CODE (operands[0]) == MEM)
3082 /* clr insns on 68000 read before writing. */
3083 && ((TARGET_68010 || TARGET_COLDFIRE)
3084 || !(GET_CODE (operands[0]) == MEM
3085 && MEM_VOLATILE_P (operands[0]))))
3086 return "clr%.w %0";
3087 else if (operands[1] == const0_rtx
3088 && ADDRESS_REG_P (operands[0]))
3089 return "sub%.l %0,%0";
3090 else if (DATA_REG_P (operands[0])
3091 && INTVAL (operands[1]) < 128
3092 && INTVAL (operands[1]) >= -128)
3093 return "moveq %1,%0";
3094 else if (INTVAL (operands[1]) < 0x8000
3095 && INTVAL (operands[1]) >= -0x8000)
3096 return "move%.w %1,%0";
3097 }
3098 else if (CONSTANT_P (operands[1]))
3099 return "move%.l %1,%0";
3100 return "move%.w %1,%0";
3101 }
3102
3103 const char *
3104 output_move_qimode (rtx *operands)
3105 {
3106 /* 68k family always modifies the stack pointer by at least 2, even for
3107 byte pushes. The 5200 (ColdFire) does not do this. */
3108
3109 /* This case is generated by pushqi1 pattern now. */
3110 gcc_assert (!(GET_CODE (operands[0]) == MEM
3111 && GET_CODE (XEXP (operands[0], 0)) == PRE_DEC
3112 && XEXP (XEXP (operands[0], 0), 0) == stack_pointer_rtx
3113 && ! ADDRESS_REG_P (operands[1])
3114 && ! TARGET_COLDFIRE));
3115
3116 /* clr and st insns on 68000 read before writing. */
3117 if (!ADDRESS_REG_P (operands[0])
3118 && ((TARGET_68010 || TARGET_COLDFIRE)
3119 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3120 {
3121 if (operands[1] == const0_rtx)
3122 return "clr%.b %0";
3123 if ((!TARGET_COLDFIRE || DATA_REG_P (operands[0]))
3124 && GET_CODE (operands[1]) == CONST_INT
3125 && (INTVAL (operands[1]) & 255) == 255)
3126 {
3127 CC_STATUS_INIT;
3128 return "st %0";
3129 }
3130 }
3131 if (GET_CODE (operands[1]) == CONST_INT
3132 && DATA_REG_P (operands[0])
3133 && INTVAL (operands[1]) < 128
3134 && INTVAL (operands[1]) >= -128)
3135 return "moveq %1,%0";
3136 if (operands[1] == const0_rtx && ADDRESS_REG_P (operands[0]))
3137 return "sub%.l %0,%0";
3138 if (GET_CODE (operands[1]) != CONST_INT && CONSTANT_P (operands[1]))
3139 return "move%.l %1,%0";
3140 /* 68k family (including the 5200 ColdFire) does not support byte moves to
3141 from address registers. */
3142 if (ADDRESS_REG_P (operands[0]) || ADDRESS_REG_P (operands[1]))
3143 return "move%.w %1,%0";
3144 return "move%.b %1,%0";
3145 }
3146
3147 const char *
3148 output_move_stricthi (rtx *operands)
3149 {
3150 if (operands[1] == const0_rtx
3151 /* clr insns on 68000 read before writing. */
3152 && ((TARGET_68010 || TARGET_COLDFIRE)
3153 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3154 return "clr%.w %0";
3155 return "move%.w %1,%0";
3156 }
3157
3158 const char *
3159 output_move_strictqi (rtx *operands)
3160 {
3161 if (operands[1] == const0_rtx
3162 /* clr insns on 68000 read before writing. */
3163 && ((TARGET_68010 || TARGET_COLDFIRE)
3164 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3165 return "clr%.b %0";
3166 return "move%.b %1,%0";
3167 }
3168
3169 /* Return the best assembler insn template
3170 for moving operands[1] into operands[0] as a fullword. */
3171
3172 static const char *
3173 singlemove_string (rtx *operands)
3174 {
3175 if (GET_CODE (operands[1]) == CONST_INT)
3176 return output_move_simode_const (operands);
3177 return "move%.l %1,%0";
3178 }
3179
3180
3181 /* Output assembler or rtl code to perform a doubleword move insn
3182 with operands OPERANDS.
3183 Pointers to 3 helper functions should be specified:
3184 HANDLE_REG_ADJUST to adjust a register by a small value,
3185 HANDLE_COMPADR to compute an address and
3186 HANDLE_MOVSI to move 4 bytes. */
3187
3188 static void
3189 handle_move_double (rtx operands[2],
3190 void (*handle_reg_adjust) (rtx, int),
3191 void (*handle_compadr) (rtx [2]),
3192 void (*handle_movsi) (rtx [2]))
3193 {
3194 enum
3195 {
3196 REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP
3197 } optype0, optype1;
3198 rtx latehalf[2];
3199 rtx middlehalf[2];
3200 rtx xops[2];
3201 rtx addreg0 = 0, addreg1 = 0;
3202 int dest_overlapped_low = 0;
3203 int size = GET_MODE_SIZE (GET_MODE (operands[0]));
3204
3205 middlehalf[0] = 0;
3206 middlehalf[1] = 0;
3207
3208 /* First classify both operands. */
3209
3210 if (REG_P (operands[0]))
3211 optype0 = REGOP;
3212 else if (offsettable_memref_p (operands[0]))
3213 optype0 = OFFSOP;
3214 else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
3215 optype0 = POPOP;
3216 else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
3217 optype0 = PUSHOP;
3218 else if (GET_CODE (operands[0]) == MEM)
3219 optype0 = MEMOP;
3220 else
3221 optype0 = RNDOP;
3222
3223 if (REG_P (operands[1]))
3224 optype1 = REGOP;
3225 else if (CONSTANT_P (operands[1]))
3226 optype1 = CNSTOP;
3227 else if (offsettable_memref_p (operands[1]))
3228 optype1 = OFFSOP;
3229 else if (GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3230 optype1 = POPOP;
3231 else if (GET_CODE (XEXP (operands[1], 0)) == PRE_DEC)
3232 optype1 = PUSHOP;
3233 else if (GET_CODE (operands[1]) == MEM)
3234 optype1 = MEMOP;
3235 else
3236 optype1 = RNDOP;
3237
3238 /* Check for the cases that the operand constraints are not supposed
3239 to allow to happen. Generating code for these cases is
3240 painful. */
3241 gcc_assert (optype0 != RNDOP && optype1 != RNDOP);
3242
3243 /* If one operand is decrementing and one is incrementing
3244 decrement the former register explicitly
3245 and change that operand into ordinary indexing. */
3246
3247 if (optype0 == PUSHOP && optype1 == POPOP)
3248 {
3249 operands[0] = XEXP (XEXP (operands[0], 0), 0);
3250
3251 handle_reg_adjust (operands[0], -size);
3252
3253 if (GET_MODE (operands[1]) == XFmode)
3254 operands[0] = gen_rtx_MEM (XFmode, operands[0]);
3255 else if (GET_MODE (operands[0]) == DFmode)
3256 operands[0] = gen_rtx_MEM (DFmode, operands[0]);
3257 else
3258 operands[0] = gen_rtx_MEM (DImode, operands[0]);
3259 optype0 = OFFSOP;
3260 }
3261 if (optype0 == POPOP && optype1 == PUSHOP)
3262 {
3263 operands[1] = XEXP (XEXP (operands[1], 0), 0);
3264
3265 handle_reg_adjust (operands[1], -size);
3266
3267 if (GET_MODE (operands[1]) == XFmode)
3268 operands[1] = gen_rtx_MEM (XFmode, operands[1]);
3269 else if (GET_MODE (operands[1]) == DFmode)
3270 operands[1] = gen_rtx_MEM (DFmode, operands[1]);
3271 else
3272 operands[1] = gen_rtx_MEM (DImode, operands[1]);
3273 optype1 = OFFSOP;
3274 }
3275
3276 /* If an operand is an unoffsettable memory ref, find a register
3277 we can increment temporarily to make it refer to the second word. */
3278
3279 if (optype0 == MEMOP)
3280 addreg0 = find_addr_reg (XEXP (operands[0], 0));
3281
3282 if (optype1 == MEMOP)
3283 addreg1 = find_addr_reg (XEXP (operands[1], 0));
3284
3285 /* Ok, we can do one word at a time.
3286 Normally we do the low-numbered word first,
3287 but if either operand is autodecrementing then we
3288 do the high-numbered word first.
3289
3290 In either case, set up in LATEHALF the operands to use
3291 for the high-numbered word and in some cases alter the
3292 operands in OPERANDS to be suitable for the low-numbered word. */
3293
3294 if (size == 12)
3295 {
3296 if (optype0 == REGOP)
3297 {
3298 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 2);
3299 middlehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
3300 }
3301 else if (optype0 == OFFSOP)
3302 {
3303 middlehalf[0] = adjust_address (operands[0], SImode, 4);
3304 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
3305 }
3306 else
3307 {
3308 middlehalf[0] = adjust_address (operands[0], SImode, 0);
3309 latehalf[0] = adjust_address (operands[0], SImode, 0);
3310 }
3311
3312 if (optype1 == REGOP)
3313 {
3314 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2);
3315 middlehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
3316 }
3317 else if (optype1 == OFFSOP)
3318 {
3319 middlehalf[1] = adjust_address (operands[1], SImode, 4);
3320 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
3321 }
3322 else if (optype1 == CNSTOP)
3323 {
3324 if (GET_CODE (operands[1]) == CONST_DOUBLE)
3325 {
3326 long l[3];
3327
3328 REAL_VALUE_TO_TARGET_LONG_DOUBLE
3329 (*CONST_DOUBLE_REAL_VALUE (operands[1]), l);
3330 operands[1] = GEN_INT (l[0]);
3331 middlehalf[1] = GEN_INT (l[1]);
3332 latehalf[1] = GEN_INT (l[2]);
3333 }
3334 else
3335 {
3336 /* No non-CONST_DOUBLE constant should ever appear
3337 here. */
3338 gcc_assert (!CONSTANT_P (operands[1]));
3339 }
3340 }
3341 else
3342 {
3343 middlehalf[1] = adjust_address (operands[1], SImode, 0);
3344 latehalf[1] = adjust_address (operands[1], SImode, 0);
3345 }
3346 }
3347 else
3348 /* size is not 12: */
3349 {
3350 if (optype0 == REGOP)
3351 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
3352 else if (optype0 == OFFSOP)
3353 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
3354 else
3355 latehalf[0] = adjust_address (operands[0], SImode, 0);
3356
3357 if (optype1 == REGOP)
3358 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
3359 else if (optype1 == OFFSOP)
3360 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
3361 else if (optype1 == CNSTOP)
3362 split_double (operands[1], &operands[1], &latehalf[1]);
3363 else
3364 latehalf[1] = adjust_address (operands[1], SImode, 0);
3365 }
3366
3367 /* If insn is effectively movd N(REG),-(REG) then we will do the high
3368 word first. We should use the adjusted operand 1 (which is N+4(REG))
3369 for the low word as well, to compensate for the first decrement of
3370 REG. */
3371 if (optype0 == PUSHOP
3372 && reg_overlap_mentioned_p (XEXP (XEXP (operands[0], 0), 0), operands[1]))
3373 operands[1] = middlehalf[1] = latehalf[1];
3374
3375 /* For (set (reg:DI N) (mem:DI ... (reg:SI N) ...)),
3376 if the upper part of reg N does not appear in the MEM, arrange to
3377 emit the move late-half first. Otherwise, compute the MEM address
3378 into the upper part of N and use that as a pointer to the memory
3379 operand. */
3380 if (optype0 == REGOP
3381 && (optype1 == OFFSOP || optype1 == MEMOP))
3382 {
3383 rtx testlow = gen_rtx_REG (SImode, REGNO (operands[0]));
3384
3385 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
3386 && reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
3387 {
3388 /* If both halves of dest are used in the src memory address,
3389 compute the address into latehalf of dest.
3390 Note that this can't happen if the dest is two data regs. */
3391 compadr:
3392 xops[0] = latehalf[0];
3393 xops[1] = XEXP (operands[1], 0);
3394
3395 handle_compadr (xops);
3396 if (GET_MODE (operands[1]) == XFmode)
3397 {
3398 operands[1] = gen_rtx_MEM (XFmode, latehalf[0]);
3399 middlehalf[1] = adjust_address (operands[1], DImode, size - 8);
3400 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
3401 }
3402 else
3403 {
3404 operands[1] = gen_rtx_MEM (DImode, latehalf[0]);
3405 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
3406 }
3407 }
3408 else if (size == 12
3409 && reg_overlap_mentioned_p (middlehalf[0],
3410 XEXP (operands[1], 0)))
3411 {
3412 /* Check for two regs used by both source and dest.
3413 Note that this can't happen if the dest is all data regs.
3414 It can happen if the dest is d6, d7, a0.
3415 But in that case, latehalf is an addr reg, so
3416 the code at compadr does ok. */
3417
3418 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
3419 || reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
3420 goto compadr;
3421
3422 /* JRV says this can't happen: */
3423 gcc_assert (!addreg0 && !addreg1);
3424
3425 /* Only the middle reg conflicts; simply put it last. */
3426 handle_movsi (operands);
3427 handle_movsi (latehalf);
3428 handle_movsi (middlehalf);
3429
3430 return;
3431 }
3432 else if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0)))
3433 /* If the low half of dest is mentioned in the source memory
3434 address, the arrange to emit the move late half first. */
3435 dest_overlapped_low = 1;
3436 }
3437
3438 /* If one or both operands autodecrementing,
3439 do the two words, high-numbered first. */
3440
3441 /* Likewise, the first move would clobber the source of the second one,
3442 do them in the other order. This happens only for registers;
3443 such overlap can't happen in memory unless the user explicitly
3444 sets it up, and that is an undefined circumstance. */
3445
3446 if (optype0 == PUSHOP || optype1 == PUSHOP
3447 || (optype0 == REGOP && optype1 == REGOP
3448 && ((middlehalf[1] && REGNO (operands[0]) == REGNO (middlehalf[1]))
3449 || REGNO (operands[0]) == REGNO (latehalf[1])))
3450 || dest_overlapped_low)
3451 {
3452 /* Make any unoffsettable addresses point at high-numbered word. */
3453 if (addreg0)
3454 handle_reg_adjust (addreg0, size - 4);
3455 if (addreg1)
3456 handle_reg_adjust (addreg1, size - 4);
3457
3458 /* Do that word. */
3459 handle_movsi (latehalf);
3460
3461 /* Undo the adds we just did. */
3462 if (addreg0)
3463 handle_reg_adjust (addreg0, -4);
3464 if (addreg1)
3465 handle_reg_adjust (addreg1, -4);
3466
3467 if (size == 12)
3468 {
3469 handle_movsi (middlehalf);
3470
3471 if (addreg0)
3472 handle_reg_adjust (addreg0, -4);
3473 if (addreg1)
3474 handle_reg_adjust (addreg1, -4);
3475 }
3476
3477 /* Do low-numbered word. */
3478
3479 handle_movsi (operands);
3480 return;
3481 }
3482
3483 /* Normal case: do the two words, low-numbered first. */
3484
3485 handle_movsi (operands);
3486
3487 /* Do the middle one of the three words for long double */
3488 if (size == 12)
3489 {
3490 if (addreg0)
3491 handle_reg_adjust (addreg0, 4);
3492 if (addreg1)
3493 handle_reg_adjust (addreg1, 4);
3494
3495 handle_movsi (middlehalf);
3496 }
3497
3498 /* Make any unoffsettable addresses point at high-numbered word. */
3499 if (addreg0)
3500 handle_reg_adjust (addreg0, 4);
3501 if (addreg1)
3502 handle_reg_adjust (addreg1, 4);
3503
3504 /* Do that word. */
3505 handle_movsi (latehalf);
3506
3507 /* Undo the adds we just did. */
3508 if (addreg0)
3509 handle_reg_adjust (addreg0, -(size - 4));
3510 if (addreg1)
3511 handle_reg_adjust (addreg1, -(size - 4));
3512
3513 return;
3514 }
3515
3516 /* Output assembler code to adjust REG by N. */
3517 static void
3518 output_reg_adjust (rtx reg, int n)
3519 {
3520 const char *s;
3521
3522 gcc_assert (GET_MODE (reg) == SImode && n >= -12 && n != 0 && n <= 12);
3523
3524 switch (n)
3525 {
3526 case 12:
3527 s = "add%.l #12,%0";
3528 break;
3529
3530 case 8:
3531 s = "addq%.l #8,%0";
3532 break;
3533
3534 case 4:
3535 s = "addq%.l #4,%0";
3536 break;
3537
3538 case -12:
3539 s = "sub%.l #12,%0";
3540 break;
3541
3542 case -8:
3543 s = "subq%.l #8,%0";
3544 break;
3545
3546 case -4:
3547 s = "subq%.l #4,%0";
3548 break;
3549
3550 default:
3551 gcc_unreachable ();
3552 s = NULL;
3553 }
3554
3555 output_asm_insn (s, &reg);
3556 }
3557
3558 /* Emit rtl code to adjust REG by N. */
3559 static void
3560 emit_reg_adjust (rtx reg1, int n)
3561 {
3562 rtx reg2;
3563
3564 gcc_assert (GET_MODE (reg1) == SImode && n >= -12 && n != 0 && n <= 12);
3565
3566 reg1 = copy_rtx (reg1);
3567 reg2 = copy_rtx (reg1);
3568
3569 if (n < 0)
3570 emit_insn (gen_subsi3 (reg1, reg2, GEN_INT (-n)));
3571 else if (n > 0)
3572 emit_insn (gen_addsi3 (reg1, reg2, GEN_INT (n)));
3573 else
3574 gcc_unreachable ();
3575 }
3576
3577 /* Output assembler to load address OPERANDS[0] to register OPERANDS[1]. */
3578 static void
3579 output_compadr (rtx operands[2])
3580 {
3581 output_asm_insn ("lea %a1,%0", operands);
3582 }
3583
3584 /* Output the best assembler insn for moving operands[1] into operands[0]
3585 as a fullword. */
3586 static void
3587 output_movsi (rtx operands[2])
3588 {
3589 output_asm_insn (singlemove_string (operands), operands);
3590 }
3591
3592 /* Copy OP and change its mode to MODE. */
3593 static rtx
3594 copy_operand (rtx op, machine_mode mode)
3595 {
3596 /* ??? This looks really ugly. There must be a better way
3597 to change a mode on the operand. */
3598 if (GET_MODE (op) != VOIDmode)
3599 {
3600 if (REG_P (op))
3601 op = gen_rtx_REG (mode, REGNO (op));
3602 else
3603 {
3604 op = copy_rtx (op);
3605 PUT_MODE (op, mode);
3606 }
3607 }
3608
3609 return op;
3610 }
3611
3612 /* Emit rtl code for moving operands[1] into operands[0] as a fullword. */
3613 static void
3614 emit_movsi (rtx operands[2])
3615 {
3616 operands[0] = copy_operand (operands[0], SImode);
3617 operands[1] = copy_operand (operands[1], SImode);
3618
3619 emit_insn (gen_movsi (operands[0], operands[1]));
3620 }
3621
3622 /* Output assembler code to perform a doubleword move insn
3623 with operands OPERANDS. */
3624 const char *
3625 output_move_double (rtx *operands)
3626 {
3627 handle_move_double (operands,
3628 output_reg_adjust, output_compadr, output_movsi);
3629
3630 return "";
3631 }
3632
3633 /* Output rtl code to perform a doubleword move insn
3634 with operands OPERANDS. */
3635 void
3636 m68k_emit_move_double (rtx operands[2])
3637 {
3638 handle_move_double (operands, emit_reg_adjust, emit_movsi, emit_movsi);
3639 }
3640
3641 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
3642 new rtx with the correct mode. */
3643
3644 static rtx
3645 force_mode (machine_mode mode, rtx orig)
3646 {
3647 if (mode == GET_MODE (orig))
3648 return orig;
3649
3650 if (REGNO (orig) >= FIRST_PSEUDO_REGISTER)
3651 abort ();
3652
3653 return gen_rtx_REG (mode, REGNO (orig));
3654 }
3655
3656 static int
3657 fp_reg_operand (rtx op, machine_mode mode ATTRIBUTE_UNUSED)
3658 {
3659 return reg_renumber && FP_REG_P (op);
3660 }
3661
3662 /* Emit insns to move operands[1] into operands[0].
3663
3664 Return 1 if we have written out everything that needs to be done to
3665 do the move. Otherwise, return 0 and the caller will emit the move
3666 normally.
3667
3668 Note SCRATCH_REG may not be in the proper mode depending on how it
3669 will be used. This routine is responsible for creating a new copy
3670 of SCRATCH_REG in the proper mode. */
3671
3672 int
3673 emit_move_sequence (rtx *operands, machine_mode mode, rtx scratch_reg)
3674 {
3675 register rtx operand0 = operands[0];
3676 register rtx operand1 = operands[1];
3677 register rtx tem;
3678
3679 if (scratch_reg
3680 && reload_in_progress && GET_CODE (operand0) == REG
3681 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
3682 operand0 = reg_equiv_mem (REGNO (operand0));
3683 else if (scratch_reg
3684 && reload_in_progress && GET_CODE (operand0) == SUBREG
3685 && GET_CODE (SUBREG_REG (operand0)) == REG
3686 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
3687 {
3688 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3689 the code which tracks sets/uses for delete_output_reload. */
3690 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
3691 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
3692 SUBREG_BYTE (operand0));
3693 operand0 = alter_subreg (&temp, true);
3694 }
3695
3696 if (scratch_reg
3697 && reload_in_progress && GET_CODE (operand1) == REG
3698 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
3699 operand1 = reg_equiv_mem (REGNO (operand1));
3700 else if (scratch_reg
3701 && reload_in_progress && GET_CODE (operand1) == SUBREG
3702 && GET_CODE (SUBREG_REG (operand1)) == REG
3703 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
3704 {
3705 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3706 the code which tracks sets/uses for delete_output_reload. */
3707 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
3708 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
3709 SUBREG_BYTE (operand1));
3710 operand1 = alter_subreg (&temp, true);
3711 }
3712
3713 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
3714 && ((tem = find_replacement (&XEXP (operand0, 0)))
3715 != XEXP (operand0, 0)))
3716 operand0 = gen_rtx_MEM (GET_MODE (operand0), tem);
3717 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
3718 && ((tem = find_replacement (&XEXP (operand1, 0)))
3719 != XEXP (operand1, 0)))
3720 operand1 = gen_rtx_MEM (GET_MODE (operand1), tem);
3721
3722 /* Handle secondary reloads for loads/stores of FP registers where
3723 the address is symbolic by using the scratch register */
3724 if (fp_reg_operand (operand0, mode)
3725 && ((GET_CODE (operand1) == MEM
3726 && ! memory_address_p (DFmode, XEXP (operand1, 0)))
3727 || ((GET_CODE (operand1) == SUBREG
3728 && GET_CODE (XEXP (operand1, 0)) == MEM
3729 && !memory_address_p (DFmode, XEXP (XEXP (operand1, 0), 0)))))
3730 && scratch_reg)
3731 {
3732 if (GET_CODE (operand1) == SUBREG)
3733 operand1 = XEXP (operand1, 0);
3734
3735 /* SCRATCH_REG will hold an address. We want
3736 it in SImode regardless of what mode it was originally given
3737 to us. */
3738 scratch_reg = force_mode (SImode, scratch_reg);
3739
3740 /* D might not fit in 14 bits either; for such cases load D into
3741 scratch reg. */
3742 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
3743 {
3744 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
3745 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
3746 Pmode,
3747 XEXP (XEXP (operand1, 0), 0),
3748 scratch_reg));
3749 }
3750 else
3751 emit_move_insn (scratch_reg, XEXP (operand1, 0));
3752 emit_insn (gen_rtx_SET (operand0, gen_rtx_MEM (mode, scratch_reg)));
3753 return 1;
3754 }
3755 else if (fp_reg_operand (operand1, mode)
3756 && ((GET_CODE (operand0) == MEM
3757 && ! memory_address_p (DFmode, XEXP (operand0, 0)))
3758 || ((GET_CODE (operand0) == SUBREG)
3759 && GET_CODE (XEXP (operand0, 0)) == MEM
3760 && !memory_address_p (DFmode, XEXP (XEXP (operand0, 0), 0))))
3761 && scratch_reg)
3762 {
3763 if (GET_CODE (operand0) == SUBREG)
3764 operand0 = XEXP (operand0, 0);
3765
3766 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3767 it in SIMODE regardless of what mode it was originally given
3768 to us. */
3769 scratch_reg = force_mode (SImode, scratch_reg);
3770
3771 /* D might not fit in 14 bits either; for such cases load D into
3772 scratch reg. */
3773 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
3774 {
3775 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
3776 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
3777 0)),
3778 Pmode,
3779 XEXP (XEXP (operand0, 0),
3780 0),
3781 scratch_reg));
3782 }
3783 else
3784 emit_move_insn (scratch_reg, XEXP (operand0, 0));
3785 emit_insn (gen_rtx_SET (gen_rtx_MEM (mode, scratch_reg), operand1));
3786 return 1;
3787 }
3788 /* Handle secondary reloads for loads of FP registers from constant
3789 expressions by forcing the constant into memory.
3790
3791 use scratch_reg to hold the address of the memory location.
3792
3793 The proper fix is to change PREFERRED_RELOAD_CLASS to return
3794 NO_REGS when presented with a const_int and an register class
3795 containing only FP registers. Doing so unfortunately creates
3796 more problems than it solves. Fix this for 2.5. */
3797 else if (fp_reg_operand (operand0, mode)
3798 && CONSTANT_P (operand1)
3799 && scratch_reg)
3800 {
3801 rtx xoperands[2];
3802
3803 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3804 it in SIMODE regardless of what mode it was originally given
3805 to us. */
3806 scratch_reg = force_mode (SImode, scratch_reg);
3807
3808 /* Force the constant into memory and put the address of the
3809 memory location into scratch_reg. */
3810 xoperands[0] = scratch_reg;
3811 xoperands[1] = XEXP (force_const_mem (mode, operand1), 0);
3812 emit_insn (gen_rtx_SET (scratch_reg, xoperands[1]));
3813
3814 /* Now load the destination register. */
3815 emit_insn (gen_rtx_SET (operand0, gen_rtx_MEM (mode, scratch_reg)));
3816 return 1;
3817 }
3818
3819 /* Now have insn-emit do whatever it normally does. */
3820 return 0;
3821 }
3822
3823 /* Split one or more DImode RTL references into pairs of SImode
3824 references. The RTL can be REG, offsettable MEM, integer constant, or
3825 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
3826 split and "num" is its length. lo_half and hi_half are output arrays
3827 that parallel "operands". */
3828
3829 void
3830 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
3831 {
3832 while (num--)
3833 {
3834 rtx op = operands[num];
3835
3836 /* simplify_subreg refuses to split volatile memory addresses,
3837 but we still have to handle it. */
3838 if (GET_CODE (op) == MEM)
3839 {
3840 lo_half[num] = adjust_address (op, SImode, 4);
3841 hi_half[num] = adjust_address (op, SImode, 0);
3842 }
3843 else
3844 {
3845 lo_half[num] = simplify_gen_subreg (SImode, op,
3846 GET_MODE (op) == VOIDmode
3847 ? DImode : GET_MODE (op), 4);
3848 hi_half[num] = simplify_gen_subreg (SImode, op,
3849 GET_MODE (op) == VOIDmode
3850 ? DImode : GET_MODE (op), 0);
3851 }
3852 }
3853 }
3854
3855 /* Split X into a base and a constant offset, storing them in *BASE
3856 and *OFFSET respectively. */
3857
3858 static void
3859 m68k_split_offset (rtx x, rtx *base, HOST_WIDE_INT *offset)
3860 {
3861 *offset = 0;
3862 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
3863 {
3864 *offset += INTVAL (XEXP (x, 1));
3865 x = XEXP (x, 0);
3866 }
3867 *base = x;
3868 }
3869
3870 /* Return true if PATTERN is a PARALLEL suitable for a movem or fmovem
3871 instruction. STORE_P says whether the move is a load or store.
3872
3873 If the instruction uses post-increment or pre-decrement addressing,
3874 AUTOMOD_BASE is the base register and AUTOMOD_OFFSET is the total
3875 adjustment. This adjustment will be made by the first element of
3876 PARALLEL, with the loads or stores starting at element 1. If the
3877 instruction does not use post-increment or pre-decrement addressing,
3878 AUTOMOD_BASE is null, AUTOMOD_OFFSET is 0, and the loads or stores
3879 start at element 0. */
3880
3881 bool
3882 m68k_movem_pattern_p (rtx pattern, rtx automod_base,
3883 HOST_WIDE_INT automod_offset, bool store_p)
3884 {
3885 rtx base, mem_base, set, mem, reg, last_reg;
3886 HOST_WIDE_INT offset, mem_offset;
3887 int i, first, len;
3888 enum reg_class rclass;
3889
3890 len = XVECLEN (pattern, 0);
3891 first = (automod_base != NULL);
3892
3893 if (automod_base)
3894 {
3895 /* Stores must be pre-decrement and loads must be post-increment. */
3896 if (store_p != (automod_offset < 0))
3897 return false;
3898
3899 /* Work out the base and offset for lowest memory location. */
3900 base = automod_base;
3901 offset = (automod_offset < 0 ? automod_offset : 0);
3902 }
3903 else
3904 {
3905 /* Allow any valid base and offset in the first access. */
3906 base = NULL;
3907 offset = 0;
3908 }
3909
3910 last_reg = NULL;
3911 rclass = NO_REGS;
3912 for (i = first; i < len; i++)
3913 {
3914 /* We need a plain SET. */
3915 set = XVECEXP (pattern, 0, i);
3916 if (GET_CODE (set) != SET)
3917 return false;
3918
3919 /* Check that we have a memory location... */
3920 mem = XEXP (set, !store_p);
3921 if (!MEM_P (mem) || !memory_operand (mem, VOIDmode))
3922 return false;
3923
3924 /* ...with the right address. */
3925 if (base == NULL)
3926 {
3927 m68k_split_offset (XEXP (mem, 0), &base, &offset);
3928 /* The ColdFire instruction only allows (An) and (d16,An) modes.
3929 There are no mode restrictions for 680x0 besides the
3930 automodification rules enforced above. */
3931 if (TARGET_COLDFIRE
3932 && !m68k_legitimate_base_reg_p (base, reload_completed))
3933 return false;
3934 }
3935 else
3936 {
3937 m68k_split_offset (XEXP (mem, 0), &mem_base, &mem_offset);
3938 if (!rtx_equal_p (base, mem_base) || offset != mem_offset)
3939 return false;
3940 }
3941
3942 /* Check that we have a register of the required mode and class. */
3943 reg = XEXP (set, store_p);
3944 if (!REG_P (reg)
3945 || !HARD_REGISTER_P (reg)
3946 || GET_MODE (reg) != reg_raw_mode[REGNO (reg)])
3947 return false;
3948
3949 if (last_reg)
3950 {
3951 /* The register must belong to RCLASS and have a higher number
3952 than the register in the previous SET. */
3953 if (!TEST_HARD_REG_BIT (reg_class_contents[rclass], REGNO (reg))
3954 || REGNO (last_reg) >= REGNO (reg))
3955 return false;
3956 }
3957 else
3958 {
3959 /* Work out which register class we need. */
3960 if (INT_REGNO_P (REGNO (reg)))
3961 rclass = GENERAL_REGS;
3962 else if (FP_REGNO_P (REGNO (reg)))
3963 rclass = FP_REGS;
3964 else
3965 return false;
3966 }
3967
3968 last_reg = reg;
3969 offset += GET_MODE_SIZE (GET_MODE (reg));
3970 }
3971
3972 /* If we have an automodification, check whether the final offset is OK. */
3973 if (automod_base && offset != (automod_offset < 0 ? 0 : automod_offset))
3974 return false;
3975
3976 /* Reject unprofitable cases. */
3977 if (len < first + (rclass == FP_REGS ? MIN_FMOVEM_REGS : MIN_MOVEM_REGS))
3978 return false;
3979
3980 return true;
3981 }
3982
3983 /* Return the assembly code template for a movem or fmovem instruction
3984 whose pattern is given by PATTERN. Store the template's operands
3985 in OPERANDS.
3986
3987 If the instruction uses post-increment or pre-decrement addressing,
3988 AUTOMOD_OFFSET is the total adjustment, otherwise it is 0. STORE_P
3989 is true if this is a store instruction. */
3990
3991 const char *
3992 m68k_output_movem (rtx *operands, rtx pattern,
3993 HOST_WIDE_INT automod_offset, bool store_p)
3994 {
3995 unsigned int mask;
3996 int i, first;
3997
3998 gcc_assert (GET_CODE (pattern) == PARALLEL);
3999 mask = 0;
4000 first = (automod_offset != 0);
4001 for (i = first; i < XVECLEN (pattern, 0); i++)
4002 {
4003 /* When using movem with pre-decrement addressing, register X + D0_REG
4004 is controlled by bit 15 - X. For all other addressing modes,
4005 register X + D0_REG is controlled by bit X. Confusingly, the
4006 register mask for fmovem is in the opposite order to that for
4007 movem. */
4008 unsigned int regno;
4009
4010 gcc_assert (MEM_P (XEXP (XVECEXP (pattern, 0, i), !store_p)));
4011 gcc_assert (REG_P (XEXP (XVECEXP (pattern, 0, i), store_p)));
4012 regno = REGNO (XEXP (XVECEXP (pattern, 0, i), store_p));
4013 if (automod_offset < 0)
4014 {
4015 if (FP_REGNO_P (regno))
4016 mask |= 1 << (regno - FP0_REG);
4017 else
4018 mask |= 1 << (15 - (regno - D0_REG));
4019 }
4020 else
4021 {
4022 if (FP_REGNO_P (regno))
4023 mask |= 1 << (7 - (regno - FP0_REG));
4024 else
4025 mask |= 1 << (regno - D0_REG);
4026 }
4027 }
4028 CC_STATUS_INIT;
4029
4030 if (automod_offset == 0)
4031 operands[0] = XEXP (XEXP (XVECEXP (pattern, 0, first), !store_p), 0);
4032 else if (automod_offset < 0)
4033 operands[0] = gen_rtx_PRE_DEC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
4034 else
4035 operands[0] = gen_rtx_POST_INC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
4036 operands[1] = GEN_INT (mask);
4037 if (FP_REGNO_P (REGNO (XEXP (XVECEXP (pattern, 0, first), store_p))))
4038 {
4039 if (store_p)
4040 return "fmovem %1,%a0";
4041 else
4042 return "fmovem %a0,%1";
4043 }
4044 else
4045 {
4046 if (store_p)
4047 return "movem%.l %1,%a0";
4048 else
4049 return "movem%.l %a0,%1";
4050 }
4051 }
4052
4053 /* Return a REG that occurs in ADDR with coefficient 1.
4054 ADDR can be effectively incremented by incrementing REG. */
4055
4056 static rtx
4057 find_addr_reg (rtx addr)
4058 {
4059 while (GET_CODE (addr) == PLUS)
4060 {
4061 if (GET_CODE (XEXP (addr, 0)) == REG)
4062 addr = XEXP (addr, 0);
4063 else if (GET_CODE (XEXP (addr, 1)) == REG)
4064 addr = XEXP (addr, 1);
4065 else if (CONSTANT_P (XEXP (addr, 0)))
4066 addr = XEXP (addr, 1);
4067 else if (CONSTANT_P (XEXP (addr, 1)))
4068 addr = XEXP (addr, 0);
4069 else
4070 gcc_unreachable ();
4071 }
4072 gcc_assert (GET_CODE (addr) == REG);
4073 return addr;
4074 }
4075
4076 /* Output assembler code to perform a 32-bit 3-operand add. */
4077
4078 const char *
4079 output_addsi3 (rtx *operands)
4080 {
4081 if (! operands_match_p (operands[0], operands[1]))
4082 {
4083 if (!ADDRESS_REG_P (operands[1]))
4084 {
4085 rtx tmp = operands[1];
4086
4087 operands[1] = operands[2];
4088 operands[2] = tmp;
4089 }
4090
4091 /* These insns can result from reloads to access
4092 stack slots over 64k from the frame pointer. */
4093 if (GET_CODE (operands[2]) == CONST_INT
4094 && (INTVAL (operands[2]) < -32768 || INTVAL (operands[2]) > 32767))
4095 return "move%.l %2,%0\n\tadd%.l %1,%0";
4096 if (GET_CODE (operands[2]) == REG)
4097 return MOTOROLA ? "lea (%1,%2.l),%0" : "lea %1@(0,%2:l),%0";
4098 return MOTOROLA ? "lea (%c2,%1),%0" : "lea %1@(%c2),%0";
4099 }
4100 if (GET_CODE (operands[2]) == CONST_INT)
4101 {
4102 if (INTVAL (operands[2]) > 0
4103 && INTVAL (operands[2]) <= 8)
4104 return "addq%.l %2,%0";
4105 if (INTVAL (operands[2]) < 0
4106 && INTVAL (operands[2]) >= -8)
4107 {
4108 operands[2] = GEN_INT (- INTVAL (operands[2]));
4109 return "subq%.l %2,%0";
4110 }
4111 /* On the CPU32 it is faster to use two addql instructions to
4112 add a small integer (8 < N <= 16) to a register.
4113 Likewise for subql. */
4114 if (TUNE_CPU32 && REG_P (operands[0]))
4115 {
4116 if (INTVAL (operands[2]) > 8
4117 && INTVAL (operands[2]) <= 16)
4118 {
4119 operands[2] = GEN_INT (INTVAL (operands[2]) - 8);
4120 return "addq%.l #8,%0\n\taddq%.l %2,%0";
4121 }
4122 if (INTVAL (operands[2]) < -8
4123 && INTVAL (operands[2]) >= -16)
4124 {
4125 operands[2] = GEN_INT (- INTVAL (operands[2]) - 8);
4126 return "subq%.l #8,%0\n\tsubq%.l %2,%0";
4127 }
4128 }
4129 if (ADDRESS_REG_P (operands[0])
4130 && INTVAL (operands[2]) >= -0x8000
4131 && INTVAL (operands[2]) < 0x8000)
4132 {
4133 if (TUNE_68040)
4134 return "add%.w %2,%0";
4135 else
4136 return MOTOROLA ? "lea (%c2,%0),%0" : "lea %0@(%c2),%0";
4137 }
4138 }
4139 return "add%.l %2,%0";
4140 }
4141 \f
4142 /* Store in cc_status the expressions that the condition codes will
4143 describe after execution of an instruction whose pattern is EXP.
4144 Do not alter them if the instruction would not alter the cc's. */
4145
4146 /* On the 68000, all the insns to store in an address register fail to
4147 set the cc's. However, in some cases these instructions can make it
4148 possibly invalid to use the saved cc's. In those cases we clear out
4149 some or all of the saved cc's so they won't be used. */
4150
4151 void
4152 notice_update_cc (rtx exp, rtx insn)
4153 {
4154 if (GET_CODE (exp) == SET)
4155 {
4156 if (GET_CODE (SET_SRC (exp)) == CALL)
4157 CC_STATUS_INIT;
4158 else if (ADDRESS_REG_P (SET_DEST (exp)))
4159 {
4160 if (cc_status.value1 && modified_in_p (cc_status.value1, insn))
4161 cc_status.value1 = 0;
4162 if (cc_status.value2 && modified_in_p (cc_status.value2, insn))
4163 cc_status.value2 = 0;
4164 }
4165 /* fmoves to memory or data registers do not set the condition
4166 codes. Normal moves _do_ set the condition codes, but not in
4167 a way that is appropriate for comparison with 0, because -0.0
4168 would be treated as a negative nonzero number. Note that it
4169 isn't appropriate to conditionalize this restriction on
4170 HONOR_SIGNED_ZEROS because that macro merely indicates whether
4171 we care about the difference between -0.0 and +0.0. */
4172 else if (!FP_REG_P (SET_DEST (exp))
4173 && SET_DEST (exp) != cc0_rtx
4174 && (FP_REG_P (SET_SRC (exp))
4175 || GET_CODE (SET_SRC (exp)) == FIX
4176 || FLOAT_MODE_P (GET_MODE (SET_DEST (exp)))))
4177 CC_STATUS_INIT;
4178 /* A pair of move insns doesn't produce a useful overall cc. */
4179 else if (!FP_REG_P (SET_DEST (exp))
4180 && !FP_REG_P (SET_SRC (exp))
4181 && GET_MODE_SIZE (GET_MODE (SET_SRC (exp))) > 4
4182 && (GET_CODE (SET_SRC (exp)) == REG
4183 || GET_CODE (SET_SRC (exp)) == MEM
4184 || GET_CODE (SET_SRC (exp)) == CONST_DOUBLE))
4185 CC_STATUS_INIT;
4186 else if (SET_DEST (exp) != pc_rtx)
4187 {
4188 cc_status.flags = 0;
4189 cc_status.value1 = SET_DEST (exp);
4190 cc_status.value2 = SET_SRC (exp);
4191 }
4192 }
4193 else if (GET_CODE (exp) == PARALLEL
4194 && GET_CODE (XVECEXP (exp, 0, 0)) == SET)
4195 {
4196 rtx dest = SET_DEST (XVECEXP (exp, 0, 0));
4197 rtx src = SET_SRC (XVECEXP (exp, 0, 0));
4198
4199 if (ADDRESS_REG_P (dest))
4200 CC_STATUS_INIT;
4201 else if (dest != pc_rtx)
4202 {
4203 cc_status.flags = 0;
4204 cc_status.value1 = dest;
4205 cc_status.value2 = src;
4206 }
4207 }
4208 else
4209 CC_STATUS_INIT;
4210 if (cc_status.value2 != 0
4211 && ADDRESS_REG_P (cc_status.value2)
4212 && GET_MODE (cc_status.value2) == QImode)
4213 CC_STATUS_INIT;
4214 if (cc_status.value2 != 0)
4215 switch (GET_CODE (cc_status.value2))
4216 {
4217 case ASHIFT: case ASHIFTRT: case LSHIFTRT:
4218 case ROTATE: case ROTATERT:
4219 /* These instructions always clear the overflow bit, and set
4220 the carry to the bit shifted out. */
4221 cc_status.flags |= CC_OVERFLOW_UNUSABLE | CC_NO_CARRY;
4222 break;
4223
4224 case PLUS: case MINUS: case MULT:
4225 case DIV: case UDIV: case MOD: case UMOD: case NEG:
4226 if (GET_MODE (cc_status.value2) != VOIDmode)
4227 cc_status.flags |= CC_NO_OVERFLOW;
4228 break;
4229 case ZERO_EXTEND:
4230 /* (SET r1 (ZERO_EXTEND r2)) on this machine
4231 ends with a move insn moving r2 in r2's mode.
4232 Thus, the cc's are set for r2.
4233 This can set N bit spuriously. */
4234 cc_status.flags |= CC_NOT_NEGATIVE;
4235
4236 default:
4237 break;
4238 }
4239 if (cc_status.value1 && GET_CODE (cc_status.value1) == REG
4240 && cc_status.value2
4241 && reg_overlap_mentioned_p (cc_status.value1, cc_status.value2))
4242 cc_status.value2 = 0;
4243 /* Check for PRE_DEC in dest modifying a register used in src. */
4244 if (cc_status.value1 && GET_CODE (cc_status.value1) == MEM
4245 && GET_CODE (XEXP (cc_status.value1, 0)) == PRE_DEC
4246 && cc_status.value2
4247 && reg_overlap_mentioned_p (XEXP (XEXP (cc_status.value1, 0), 0),
4248 cc_status.value2))
4249 cc_status.value2 = 0;
4250 if (((cc_status.value1 && FP_REG_P (cc_status.value1))
4251 || (cc_status.value2 && FP_REG_P (cc_status.value2))))
4252 cc_status.flags = CC_IN_68881;
4253 if (cc_status.value2 && GET_CODE (cc_status.value2) == COMPARE
4254 && GET_MODE_CLASS (GET_MODE (XEXP (cc_status.value2, 0))) == MODE_FLOAT)
4255 {
4256 cc_status.flags = CC_IN_68881;
4257 if (!FP_REG_P (XEXP (cc_status.value2, 0))
4258 && FP_REG_P (XEXP (cc_status.value2, 1)))
4259 cc_status.flags |= CC_REVERSED;
4260 }
4261 }
4262 \f
4263 const char *
4264 output_move_const_double (rtx *operands)
4265 {
4266 int code = standard_68881_constant_p (operands[1]);
4267
4268 if (code != 0)
4269 {
4270 static char buf[40];
4271
4272 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
4273 return buf;
4274 }
4275 return "fmove%.d %1,%0";
4276 }
4277
4278 const char *
4279 output_move_const_single (rtx *operands)
4280 {
4281 int code = standard_68881_constant_p (operands[1]);
4282
4283 if (code != 0)
4284 {
4285 static char buf[40];
4286
4287 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
4288 return buf;
4289 }
4290 return "fmove%.s %f1,%0";
4291 }
4292
4293 /* Return nonzero if X, a CONST_DOUBLE, has a value that we can get
4294 from the "fmovecr" instruction.
4295 The value, anded with 0xff, gives the code to use in fmovecr
4296 to get the desired constant. */
4297
4298 /* This code has been fixed for cross-compilation. */
4299
4300 static int inited_68881_table = 0;
4301
4302 static const char *const strings_68881[7] = {
4303 "0.0",
4304 "1.0",
4305 "10.0",
4306 "100.0",
4307 "10000.0",
4308 "1e8",
4309 "1e16"
4310 };
4311
4312 static const int codes_68881[7] = {
4313 0x0f,
4314 0x32,
4315 0x33,
4316 0x34,
4317 0x35,
4318 0x36,
4319 0x37
4320 };
4321
4322 REAL_VALUE_TYPE values_68881[7];
4323
4324 /* Set up values_68881 array by converting the decimal values
4325 strings_68881 to binary. */
4326
4327 void
4328 init_68881_table (void)
4329 {
4330 int i;
4331 REAL_VALUE_TYPE r;
4332 machine_mode mode;
4333
4334 mode = SFmode;
4335 for (i = 0; i < 7; i++)
4336 {
4337 if (i == 6)
4338 mode = DFmode;
4339 r = REAL_VALUE_ATOF (strings_68881[i], mode);
4340 values_68881[i] = r;
4341 }
4342 inited_68881_table = 1;
4343 }
4344
4345 int
4346 standard_68881_constant_p (rtx x)
4347 {
4348 const REAL_VALUE_TYPE *r;
4349 int i;
4350
4351 /* fmovecr must be emulated on the 68040 and 68060, so it shouldn't be
4352 used at all on those chips. */
4353 if (TUNE_68040_60)
4354 return 0;
4355
4356 if (! inited_68881_table)
4357 init_68881_table ();
4358
4359 r = CONST_DOUBLE_REAL_VALUE (x);
4360
4361 /* Use real_identical instead of real_equal so that -0.0 is rejected. */
4362 for (i = 0; i < 6; i++)
4363 {
4364 if (real_identical (r, &values_68881[i]))
4365 return (codes_68881[i]);
4366 }
4367
4368 if (GET_MODE (x) == SFmode)
4369 return 0;
4370
4371 if (real_equal (r, &values_68881[6]))
4372 return (codes_68881[6]);
4373
4374 /* larger powers of ten in the constants ram are not used
4375 because they are not equal to a `double' C constant. */
4376 return 0;
4377 }
4378
4379 /* If X is a floating-point constant, return the logarithm of X base 2,
4380 or 0 if X is not a power of 2. */
4381
4382 int
4383 floating_exact_log2 (rtx x)
4384 {
4385 const REAL_VALUE_TYPE *r;
4386 REAL_VALUE_TYPE r1;
4387 int exp;
4388
4389 r = CONST_DOUBLE_REAL_VALUE (x);
4390
4391 if (real_less (r, &dconst1))
4392 return 0;
4393
4394 exp = real_exponent (r);
4395 real_2expN (&r1, exp, DFmode);
4396 if (real_equal (&r1, r))
4397 return exp;
4398
4399 return 0;
4400 }
4401 \f
4402 /* A C compound statement to output to stdio stream STREAM the
4403 assembler syntax for an instruction operand X. X is an RTL
4404 expression.
4405
4406 CODE is a value that can be used to specify one of several ways
4407 of printing the operand. It is used when identical operands
4408 must be printed differently depending on the context. CODE
4409 comes from the `%' specification that was used to request
4410 printing of the operand. If the specification was just `%DIGIT'
4411 then CODE is 0; if the specification was `%LTR DIGIT' then CODE
4412 is the ASCII code for LTR.
4413
4414 If X is a register, this macro should print the register's name.
4415 The names can be found in an array `reg_names' whose type is
4416 `char *[]'. `reg_names' is initialized from `REGISTER_NAMES'.
4417
4418 When the machine description has a specification `%PUNCT' (a `%'
4419 followed by a punctuation character), this macro is called with
4420 a null pointer for X and the punctuation character for CODE.
4421
4422 The m68k specific codes are:
4423
4424 '.' for dot needed in Motorola-style opcode names.
4425 '-' for an operand pushing on the stack:
4426 sp@-, -(sp) or -(%sp) depending on the style of syntax.
4427 '+' for an operand pushing on the stack:
4428 sp@+, (sp)+ or (%sp)+ depending on the style of syntax.
4429 '@' for a reference to the top word on the stack:
4430 sp@, (sp) or (%sp) depending on the style of syntax.
4431 '#' for an immediate operand prefix (# in MIT and Motorola syntax
4432 but & in SGS syntax).
4433 '!' for the cc register (used in an `and to cc' insn).
4434 '$' for the letter `s' in an op code, but only on the 68040.
4435 '&' for the letter `d' in an op code, but only on the 68040.
4436 '/' for register prefix needed by longlong.h.
4437 '?' for m68k_library_id_string
4438
4439 'b' for byte insn (no effect, on the Sun; this is for the ISI).
4440 'd' to force memory addressing to be absolute, not relative.
4441 'f' for float insn (print a CONST_DOUBLE as a float rather than in hex)
4442 'x' for float insn (print a CONST_DOUBLE as a float rather than in hex),
4443 or print pair of registers as rx:ry.
4444 'p' print an address with @PLTPC attached, but only if the operand
4445 is not locally-bound. */
4446
4447 void
4448 print_operand (FILE *file, rtx op, int letter)
4449 {
4450 if (op != NULL_RTX)
4451 m68k_adjust_decorated_operand (op);
4452
4453 if (letter == '.')
4454 {
4455 if (MOTOROLA)
4456 fprintf (file, ".");
4457 }
4458 else if (letter == '#')
4459 asm_fprintf (file, "%I");
4460 else if (letter == '-')
4461 asm_fprintf (file, MOTOROLA ? "-(%Rsp)" : "%Rsp@-");
4462 else if (letter == '+')
4463 asm_fprintf (file, MOTOROLA ? "(%Rsp)+" : "%Rsp@+");
4464 else if (letter == '@')
4465 asm_fprintf (file, MOTOROLA ? "(%Rsp)" : "%Rsp@");
4466 else if (letter == '!')
4467 asm_fprintf (file, "%Rfpcr");
4468 else if (letter == '$')
4469 {
4470 if (TARGET_68040)
4471 fprintf (file, "s");
4472 }
4473 else if (letter == '&')
4474 {
4475 if (TARGET_68040)
4476 fprintf (file, "d");
4477 }
4478 else if (letter == '/')
4479 asm_fprintf (file, "%R");
4480 else if (letter == '?')
4481 asm_fprintf (file, m68k_library_id_string);
4482 else if (letter == 'p')
4483 {
4484 output_addr_const (file, op);
4485 if (!(GET_CODE (op) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op)))
4486 fprintf (file, "@PLTPC");
4487 }
4488 else if (GET_CODE (op) == REG)
4489 {
4490 if (letter == 'R')
4491 /* Print out the second register name of a register pair.
4492 I.e., R (6) => 7. */
4493 fputs (M68K_REGNAME(REGNO (op) + 1), file);
4494 else
4495 fputs (M68K_REGNAME(REGNO (op)), file);
4496 }
4497 else if (GET_CODE (op) == MEM)
4498 {
4499 output_address (GET_MODE (op), XEXP (op, 0));
4500 if (letter == 'd' && ! TARGET_68020
4501 && CONSTANT_ADDRESS_P (XEXP (op, 0))
4502 && !(GET_CODE (XEXP (op, 0)) == CONST_INT
4503 && INTVAL (XEXP (op, 0)) < 0x8000
4504 && INTVAL (XEXP (op, 0)) >= -0x8000))
4505 fprintf (file, MOTOROLA ? ".l" : ":l");
4506 }
4507 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == SFmode)
4508 {
4509 long l;
4510 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), l);
4511 asm_fprintf (file, "%I0x%lx", l & 0xFFFFFFFF);
4512 }
4513 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == XFmode)
4514 {
4515 long l[3];
4516 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
4517 asm_fprintf (file, "%I0x%lx%08lx%08lx", l[0] & 0xFFFFFFFF,
4518 l[1] & 0xFFFFFFFF, l[2] & 0xFFFFFFFF);
4519 }
4520 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == DFmode)
4521 {
4522 long l[2];
4523 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
4524 asm_fprintf (file, "%I0x%lx%08lx", l[0] & 0xFFFFFFFF, l[1] & 0xFFFFFFFF);
4525 }
4526 else
4527 {
4528 /* Use `print_operand_address' instead of `output_addr_const'
4529 to ensure that we print relevant PIC stuff. */
4530 asm_fprintf (file, "%I");
4531 if (TARGET_PCREL
4532 && (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST))
4533 print_operand_address (file, op);
4534 else
4535 output_addr_const (file, op);
4536 }
4537 }
4538
4539 /* Return string for TLS relocation RELOC. */
4540
4541 static const char *
4542 m68k_get_reloc_decoration (enum m68k_reloc reloc)
4543 {
4544 /* To my knowledge, !MOTOROLA assemblers don't support TLS. */
4545 gcc_assert (MOTOROLA || reloc == RELOC_GOT);
4546
4547 switch (reloc)
4548 {
4549 case RELOC_GOT:
4550 if (MOTOROLA)
4551 {
4552 if (flag_pic == 1 && TARGET_68020)
4553 return "@GOT.w";
4554 else
4555 return "@GOT";
4556 }
4557 else
4558 {
4559 if (TARGET_68020)
4560 {
4561 switch (flag_pic)
4562 {
4563 case 1:
4564 return ":w";
4565 case 2:
4566 return ":l";
4567 default:
4568 return "";
4569 }
4570 }
4571 }
4572 gcc_unreachable ();
4573
4574 case RELOC_TLSGD:
4575 return "@TLSGD";
4576
4577 case RELOC_TLSLDM:
4578 return "@TLSLDM";
4579
4580 case RELOC_TLSLDO:
4581 return "@TLSLDO";
4582
4583 case RELOC_TLSIE:
4584 return "@TLSIE";
4585
4586 case RELOC_TLSLE:
4587 return "@TLSLE";
4588
4589 default:
4590 gcc_unreachable ();
4591 }
4592 }
4593
4594 /* m68k implementation of TARGET_OUTPUT_ADDR_CONST_EXTRA. */
4595
4596 static bool
4597 m68k_output_addr_const_extra (FILE *file, rtx x)
4598 {
4599 if (GET_CODE (x) == UNSPEC)
4600 {
4601 switch (XINT (x, 1))
4602 {
4603 case UNSPEC_RELOC16:
4604 case UNSPEC_RELOC32:
4605 output_addr_const (file, XVECEXP (x, 0, 0));
4606 fputs (m68k_get_reloc_decoration
4607 ((enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1))), file);
4608 return true;
4609
4610 default:
4611 break;
4612 }
4613 }
4614
4615 return false;
4616 }
4617
4618 /* M68K implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
4619
4620 static void
4621 m68k_output_dwarf_dtprel (FILE *file, int size, rtx x)
4622 {
4623 gcc_assert (size == 4);
4624 fputs ("\t.long\t", file);
4625 output_addr_const (file, x);
4626 fputs ("@TLSLDO+0x8000", file);
4627 }
4628
4629 /* In the name of slightly smaller debug output, and to cater to
4630 general assembler lossage, recognize various UNSPEC sequences
4631 and turn them back into a direct symbol reference. */
4632
4633 static rtx
4634 m68k_delegitimize_address (rtx orig_x)
4635 {
4636 rtx x;
4637 struct m68k_address addr;
4638 rtx unspec;
4639
4640 orig_x = delegitimize_mem_from_attrs (orig_x);
4641 x = orig_x;
4642 if (MEM_P (x))
4643 x = XEXP (x, 0);
4644
4645 if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
4646 return orig_x;
4647
4648 if (!m68k_decompose_address (GET_MODE (x), x, false, &addr)
4649 || addr.offset == NULL_RTX
4650 || GET_CODE (addr.offset) != CONST)
4651 return orig_x;
4652
4653 unspec = XEXP (addr.offset, 0);
4654 if (GET_CODE (unspec) == PLUS && CONST_INT_P (XEXP (unspec, 1)))
4655 unspec = XEXP (unspec, 0);
4656 if (GET_CODE (unspec) != UNSPEC
4657 || (XINT (unspec, 1) != UNSPEC_RELOC16
4658 && XINT (unspec, 1) != UNSPEC_RELOC32))
4659 return orig_x;
4660 x = XVECEXP (unspec, 0, 0);
4661 gcc_assert (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF);
4662 if (unspec != XEXP (addr.offset, 0))
4663 x = gen_rtx_PLUS (Pmode, x, XEXP (XEXP (addr.offset, 0), 1));
4664 if (addr.index)
4665 {
4666 rtx idx = addr.index;
4667 if (addr.scale != 1)
4668 idx = gen_rtx_MULT (Pmode, idx, GEN_INT (addr.scale));
4669 x = gen_rtx_PLUS (Pmode, idx, x);
4670 }
4671 if (addr.base)
4672 x = gen_rtx_PLUS (Pmode, addr.base, x);
4673 if (MEM_P (orig_x))
4674 x = replace_equiv_address_nv (orig_x, x);
4675 return x;
4676 }
4677
4678 \f
4679 /* A C compound statement to output to stdio stream STREAM the
4680 assembler syntax for an instruction operand that is a memory
4681 reference whose address is ADDR. ADDR is an RTL expression.
4682
4683 Note that this contains a kludge that knows that the only reason
4684 we have an address (plus (label_ref...) (reg...)) when not generating
4685 PIC code is in the insn before a tablejump, and we know that m68k.md
4686 generates a label LInnn: on such an insn.
4687
4688 It is possible for PIC to generate a (plus (label_ref...) (reg...))
4689 and we handle that just like we would a (plus (symbol_ref...) (reg...)).
4690
4691 This routine is responsible for distinguishing between -fpic and -fPIC
4692 style relocations in an address. When generating -fpic code the
4693 offset is output in word mode (e.g. movel a5@(_foo:w), a0). When generating
4694 -fPIC code the offset is output in long mode (e.g. movel a5@(_foo:l), a0) */
4695
4696 void
4697 print_operand_address (FILE *file, rtx addr)
4698 {
4699 struct m68k_address address;
4700
4701 m68k_adjust_decorated_operand (addr);
4702
4703 if (!m68k_decompose_address (QImode, addr, true, &address))
4704 gcc_unreachable ();
4705
4706 if (address.code == PRE_DEC)
4707 fprintf (file, MOTOROLA ? "-(%s)" : "%s@-",
4708 M68K_REGNAME (REGNO (address.base)));
4709 else if (address.code == POST_INC)
4710 fprintf (file, MOTOROLA ? "(%s)+" : "%s@+",
4711 M68K_REGNAME (REGNO (address.base)));
4712 else if (!address.base && !address.index)
4713 {
4714 /* A constant address. */
4715 gcc_assert (address.offset == addr);
4716 if (GET_CODE (addr) == CONST_INT)
4717 {
4718 /* (xxx).w or (xxx).l. */
4719 if (IN_RANGE (INTVAL (addr), -0x8000, 0x7fff))
4720 fprintf (file, MOTOROLA ? "%d.w" : "%d:w", (int) INTVAL (addr));
4721 else
4722 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (addr));
4723 }
4724 else if (TARGET_PCREL)
4725 {
4726 /* (d16,PC) or (bd,PC,Xn) (with suppressed index register). */
4727 fputc ('(', file);
4728 output_addr_const (file, addr);
4729 asm_fprintf (file, flag_pic == 1 ? ":w,%Rpc)" : ":l,%Rpc)");
4730 }
4731 else
4732 {
4733 /* (xxx).l. We need a special case for SYMBOL_REF if the symbol
4734 name ends in `.<letter>', as the last 2 characters can be
4735 mistaken as a size suffix. Put the name in parentheses. */
4736 if (GET_CODE (addr) == SYMBOL_REF
4737 && strlen (XSTR (addr, 0)) > 2
4738 && XSTR (addr, 0)[strlen (XSTR (addr, 0)) - 2] == '.')
4739 {
4740 putc ('(', file);
4741 output_addr_const (file, addr);
4742 putc (')', file);
4743 }
4744 else
4745 output_addr_const (file, addr);
4746 }
4747 }
4748 else
4749 {
4750 int labelno;
4751
4752 /* If ADDR is a (d8,pc,Xn) address, this is the number of the
4753 label being accessed, otherwise it is -1. */
4754 labelno = (address.offset
4755 && !address.base
4756 && GET_CODE (address.offset) == LABEL_REF
4757 ? CODE_LABEL_NUMBER (XEXP (address.offset, 0))
4758 : -1);
4759 if (MOTOROLA)
4760 {
4761 /* Print the "offset(base" component. */
4762 if (labelno >= 0)
4763 asm_fprintf (file, "%LL%d(%Rpc,", labelno);
4764 else
4765 {
4766 if (address.offset)
4767 output_addr_const (file, address.offset);
4768
4769 putc ('(', file);
4770 if (address.base)
4771 fputs (M68K_REGNAME (REGNO (address.base)), file);
4772 }
4773 /* Print the ",index" component, if any. */
4774 if (address.index)
4775 {
4776 if (address.base)
4777 putc (',', file);
4778 fprintf (file, "%s.%c",
4779 M68K_REGNAME (REGNO (address.index)),
4780 GET_MODE (address.index) == HImode ? 'w' : 'l');
4781 if (address.scale != 1)
4782 fprintf (file, "*%d", address.scale);
4783 }
4784 putc (')', file);
4785 }
4786 else /* !MOTOROLA */
4787 {
4788 if (!address.offset && !address.index)
4789 fprintf (file, "%s@", M68K_REGNAME (REGNO (address.base)));
4790 else
4791 {
4792 /* Print the "base@(offset" component. */
4793 if (labelno >= 0)
4794 asm_fprintf (file, "%Rpc@(%LL%d", labelno);
4795 else
4796 {
4797 if (address.base)
4798 fputs (M68K_REGNAME (REGNO (address.base)), file);
4799 fprintf (file, "@(");
4800 if (address.offset)
4801 output_addr_const (file, address.offset);
4802 }
4803 /* Print the ",index" component, if any. */
4804 if (address.index)
4805 {
4806 fprintf (file, ",%s:%c",
4807 M68K_REGNAME (REGNO (address.index)),
4808 GET_MODE (address.index) == HImode ? 'w' : 'l');
4809 if (address.scale != 1)
4810 fprintf (file, ":%d", address.scale);
4811 }
4812 putc (')', file);
4813 }
4814 }
4815 }
4816 }
4817 \f
4818 /* Check for cases where a clr insns can be omitted from code using
4819 strict_low_part sets. For example, the second clrl here is not needed:
4820 clrl d0; movw a0@+,d0; use d0; clrl d0; movw a0@+; use d0; ...
4821
4822 MODE is the mode of this STRICT_LOW_PART set. FIRST_INSN is the clear
4823 insn we are checking for redundancy. TARGET is the register set by the
4824 clear insn. */
4825
4826 bool
4827 strict_low_part_peephole_ok (machine_mode mode, rtx_insn *first_insn,
4828 rtx target)
4829 {
4830 rtx_insn *p = first_insn;
4831
4832 while ((p = PREV_INSN (p)))
4833 {
4834 if (NOTE_INSN_BASIC_BLOCK_P (p))
4835 return false;
4836
4837 if (NOTE_P (p))
4838 continue;
4839
4840 /* If it isn't an insn, then give up. */
4841 if (!INSN_P (p))
4842 return false;
4843
4844 if (reg_set_p (target, p))
4845 {
4846 rtx set = single_set (p);
4847 rtx dest;
4848
4849 /* If it isn't an easy to recognize insn, then give up. */
4850 if (! set)
4851 return false;
4852
4853 dest = SET_DEST (set);
4854
4855 /* If this sets the entire target register to zero, then our
4856 first_insn is redundant. */
4857 if (rtx_equal_p (dest, target)
4858 && SET_SRC (set) == const0_rtx)
4859 return true;
4860 else if (GET_CODE (dest) == STRICT_LOW_PART
4861 && GET_CODE (XEXP (dest, 0)) == REG
4862 && REGNO (XEXP (dest, 0)) == REGNO (target)
4863 && (GET_MODE_SIZE (GET_MODE (XEXP (dest, 0)))
4864 <= GET_MODE_SIZE (mode)))
4865 /* This is a strict low part set which modifies less than
4866 we are using, so it is safe. */
4867 ;
4868 else
4869 return false;
4870 }
4871 }
4872
4873 return false;
4874 }
4875
4876 /* Operand predicates for implementing asymmetric pc-relative addressing
4877 on m68k. The m68k supports pc-relative addressing (mode 7, register 2)
4878 when used as a source operand, but not as a destination operand.
4879
4880 We model this by restricting the meaning of the basic predicates
4881 (general_operand, memory_operand, etc) to forbid the use of this
4882 addressing mode, and then define the following predicates that permit
4883 this addressing mode. These predicates can then be used for the
4884 source operands of the appropriate instructions.
4885
4886 n.b. While it is theoretically possible to change all machine patterns
4887 to use this addressing more where permitted by the architecture,
4888 it has only been implemented for "common" cases: SImode, HImode, and
4889 QImode operands, and only for the principle operations that would
4890 require this addressing mode: data movement and simple integer operations.
4891
4892 In parallel with these new predicates, two new constraint letters
4893 were defined: 'S' and 'T'. 'S' is the -mpcrel analog of 'm'.
4894 'T' replaces 's' in the non-pcrel case. It is a no-op in the pcrel case.
4895 In the pcrel case 's' is only valid in combination with 'a' registers.
4896 See addsi3, subsi3, cmpsi, and movsi patterns for a better understanding
4897 of how these constraints are used.
4898
4899 The use of these predicates is strictly optional, though patterns that
4900 don't will cause an extra reload register to be allocated where one
4901 was not necessary:
4902
4903 lea (abc:w,%pc),%a0 ; need to reload address
4904 moveq &1,%d1 ; since write to pc-relative space
4905 movel %d1,%a0@ ; is not allowed
4906 ...
4907 lea (abc:w,%pc),%a1 ; no need to reload address here
4908 movel %a1@,%d0 ; since "movel (abc:w,%pc),%d0" is ok
4909
4910 For more info, consult tiemann@cygnus.com.
4911
4912
4913 All of the ugliness with predicates and constraints is due to the
4914 simple fact that the m68k does not allow a pc-relative addressing
4915 mode as a destination. gcc does not distinguish between source and
4916 destination addresses. Hence, if we claim that pc-relative address
4917 modes are valid, e.g. TARGET_LEGITIMATE_ADDRESS_P accepts them, then we
4918 end up with invalid code. To get around this problem, we left
4919 pc-relative modes as invalid addresses, and then added special
4920 predicates and constraints to accept them.
4921
4922 A cleaner way to handle this is to modify gcc to distinguish
4923 between source and destination addresses. We can then say that
4924 pc-relative is a valid source address but not a valid destination
4925 address, and hopefully avoid a lot of the predicate and constraint
4926 hackery. Unfortunately, this would be a pretty big change. It would
4927 be a useful change for a number of ports, but there aren't any current
4928 plans to undertake this.
4929
4930 ***************************************************************************/
4931
4932
4933 const char *
4934 output_andsi3 (rtx *operands)
4935 {
4936 int logval;
4937 if (GET_CODE (operands[2]) == CONST_INT
4938 && (INTVAL (operands[2]) | 0xffff) == -1
4939 && (DATA_REG_P (operands[0])
4940 || offsettable_memref_p (operands[0]))
4941 && !TARGET_COLDFIRE)
4942 {
4943 if (GET_CODE (operands[0]) != REG)
4944 operands[0] = adjust_address (operands[0], HImode, 2);
4945 operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffff);
4946 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4947 CC_STATUS_INIT;
4948 if (operands[2] == const0_rtx)
4949 return "clr%.w %0";
4950 return "and%.w %2,%0";
4951 }
4952 if (GET_CODE (operands[2]) == CONST_INT
4953 && (logval = exact_log2 (~ INTVAL (operands[2]) & 0xffffffff)) >= 0
4954 && (DATA_REG_P (operands[0])
4955 || offsettable_memref_p (operands[0])))
4956 {
4957 if (DATA_REG_P (operands[0]))
4958 operands[1] = GEN_INT (logval);
4959 else
4960 {
4961 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
4962 operands[1] = GEN_INT (logval % 8);
4963 }
4964 /* This does not set condition codes in a standard way. */
4965 CC_STATUS_INIT;
4966 return "bclr %1,%0";
4967 }
4968 return "and%.l %2,%0";
4969 }
4970
4971 const char *
4972 output_iorsi3 (rtx *operands)
4973 {
4974 register int logval;
4975 if (GET_CODE (operands[2]) == CONST_INT
4976 && INTVAL (operands[2]) >> 16 == 0
4977 && (DATA_REG_P (operands[0])
4978 || offsettable_memref_p (operands[0]))
4979 && !TARGET_COLDFIRE)
4980 {
4981 if (GET_CODE (operands[0]) != REG)
4982 operands[0] = adjust_address (operands[0], HImode, 2);
4983 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4984 CC_STATUS_INIT;
4985 if (INTVAL (operands[2]) == 0xffff)
4986 return "mov%.w %2,%0";
4987 return "or%.w %2,%0";
4988 }
4989 if (GET_CODE (operands[2]) == CONST_INT
4990 && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
4991 && (DATA_REG_P (operands[0])
4992 || offsettable_memref_p (operands[0])))
4993 {
4994 if (DATA_REG_P (operands[0]))
4995 operands[1] = GEN_INT (logval);
4996 else
4997 {
4998 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
4999 operands[1] = GEN_INT (logval % 8);
5000 }
5001 CC_STATUS_INIT;
5002 return "bset %1,%0";
5003 }
5004 return "or%.l %2,%0";
5005 }
5006
5007 const char *
5008 output_xorsi3 (rtx *operands)
5009 {
5010 register int logval;
5011 if (GET_CODE (operands[2]) == CONST_INT
5012 && INTVAL (operands[2]) >> 16 == 0
5013 && (offsettable_memref_p (operands[0]) || DATA_REG_P (operands[0]))
5014 && !TARGET_COLDFIRE)
5015 {
5016 if (! DATA_REG_P (operands[0]))
5017 operands[0] = adjust_address (operands[0], HImode, 2);
5018 /* Do not delete a following tstl %0 insn; that would be incorrect. */
5019 CC_STATUS_INIT;
5020 if (INTVAL (operands[2]) == 0xffff)
5021 return "not%.w %0";
5022 return "eor%.w %2,%0";
5023 }
5024 if (GET_CODE (operands[2]) == CONST_INT
5025 && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
5026 && (DATA_REG_P (operands[0])
5027 || offsettable_memref_p (operands[0])))
5028 {
5029 if (DATA_REG_P (operands[0]))
5030 operands[1] = GEN_INT (logval);
5031 else
5032 {
5033 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
5034 operands[1] = GEN_INT (logval % 8);
5035 }
5036 CC_STATUS_INIT;
5037 return "bchg %1,%0";
5038 }
5039 return "eor%.l %2,%0";
5040 }
5041
5042 /* Return the instruction that should be used for a call to address X,
5043 which is known to be in operand 0. */
5044
5045 const char *
5046 output_call (rtx x)
5047 {
5048 if (symbolic_operand (x, VOIDmode))
5049 return m68k_symbolic_call;
5050 else
5051 return "jsr %a0";
5052 }
5053
5054 /* Likewise sibling calls. */
5055
5056 const char *
5057 output_sibcall (rtx x)
5058 {
5059 if (symbolic_operand (x, VOIDmode))
5060 return m68k_symbolic_jump;
5061 else
5062 return "jmp %a0";
5063 }
5064
5065 static void
5066 m68k_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
5067 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
5068 tree function)
5069 {
5070 const char *fnname = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (thunk));
5071 rtx this_slot, offset, addr, mem, tmp;
5072 rtx_insn *insn;
5073
5074 /* Avoid clobbering the struct value reg by using the
5075 static chain reg as a temporary. */
5076 tmp = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
5077
5078 /* Pretend to be a post-reload pass while generating rtl. */
5079 reload_completed = 1;
5080
5081 /* The "this" pointer is stored at 4(%sp). */
5082 this_slot = gen_rtx_MEM (Pmode, plus_constant (Pmode,
5083 stack_pointer_rtx, 4));
5084
5085 /* Add DELTA to THIS. */
5086 if (delta != 0)
5087 {
5088 /* Make the offset a legitimate operand for memory addition. */
5089 offset = GEN_INT (delta);
5090 if ((delta < -8 || delta > 8)
5091 && (TARGET_COLDFIRE || USE_MOVQ (delta)))
5092 {
5093 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), offset);
5094 offset = gen_rtx_REG (Pmode, D0_REG);
5095 }
5096 emit_insn (gen_add3_insn (copy_rtx (this_slot),
5097 copy_rtx (this_slot), offset));
5098 }
5099
5100 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
5101 if (vcall_offset != 0)
5102 {
5103 /* Set the static chain register to *THIS. */
5104 emit_move_insn (tmp, this_slot);
5105 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
5106
5107 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
5108 addr = plus_constant (Pmode, tmp, vcall_offset);
5109 if (!m68k_legitimate_address_p (Pmode, addr, true))
5110 {
5111 emit_insn (gen_rtx_SET (tmp, addr));
5112 addr = tmp;
5113 }
5114
5115 /* Load the offset into %d0 and add it to THIS. */
5116 emit_move_insn (gen_rtx_REG (Pmode, D0_REG),
5117 gen_rtx_MEM (Pmode, addr));
5118 emit_insn (gen_add3_insn (copy_rtx (this_slot),
5119 copy_rtx (this_slot),
5120 gen_rtx_REG (Pmode, D0_REG)));
5121 }
5122
5123 /* Jump to the target function. Use a sibcall if direct jumps are
5124 allowed, otherwise load the address into a register first. */
5125 mem = DECL_RTL (function);
5126 if (!sibcall_operand (XEXP (mem, 0), VOIDmode))
5127 {
5128 gcc_assert (flag_pic);
5129
5130 if (!TARGET_SEP_DATA)
5131 {
5132 /* Use the static chain register as a temporary (call-clobbered)
5133 GOT pointer for this function. We can use the static chain
5134 register because it isn't live on entry to the thunk. */
5135 SET_REGNO (pic_offset_table_rtx, STATIC_CHAIN_REGNUM);
5136 emit_insn (gen_load_got (pic_offset_table_rtx));
5137 }
5138 legitimize_pic_address (XEXP (mem, 0), Pmode, tmp);
5139 mem = replace_equiv_address (mem, tmp);
5140 }
5141 insn = emit_call_insn (gen_sibcall (mem, const0_rtx));
5142 SIBLING_CALL_P (insn) = 1;
5143
5144 /* Run just enough of rest_of_compilation. */
5145 insn = get_insns ();
5146 split_all_insns_noflow ();
5147 assemble_start_function (thunk, fnname);
5148 final_start_function (insn, file, 1);
5149 final (insn, file, 1);
5150 final_end_function ();
5151 assemble_end_function (thunk, fnname);
5152
5153 /* Clean up the vars set above. */
5154 reload_completed = 0;
5155
5156 /* Restore the original PIC register. */
5157 if (flag_pic)
5158 SET_REGNO (pic_offset_table_rtx, PIC_REG);
5159 }
5160
5161 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
5162
5163 static rtx
5164 m68k_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
5165 int incoming ATTRIBUTE_UNUSED)
5166 {
5167 return gen_rtx_REG (Pmode, M68K_STRUCT_VALUE_REGNUM);
5168 }
5169
5170 /* Return nonzero if register old_reg can be renamed to register new_reg. */
5171 int
5172 m68k_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
5173 unsigned int new_reg)
5174 {
5175
5176 /* Interrupt functions can only use registers that have already been
5177 saved by the prologue, even if they would normally be
5178 call-clobbered. */
5179
5180 if ((m68k_get_function_kind (current_function_decl)
5181 == m68k_fk_interrupt_handler)
5182 && !df_regs_ever_live_p (new_reg))
5183 return 0;
5184
5185 return 1;
5186 }
5187
5188 /* Implement TARGET_HARD_REGNO_NREGS.
5189
5190 On the m68k, ordinary registers hold 32 bits worth;
5191 for the 68881 registers, a single register is always enough for
5192 anything that can be stored in them at all. */
5193
5194 static unsigned int
5195 m68k_hard_regno_nregs (unsigned int regno, machine_mode mode)
5196 {
5197 if (regno >= 16)
5198 return GET_MODE_NUNITS (mode);
5199 return CEIL (GET_MODE_SIZE (mode), UNITS_PER_WORD);
5200 }
5201
5202 /* Implement TARGET_HARD_REGNO_MODE_OK. On the 68000, we let the cpu
5203 registers can hold any mode, but restrict the 68881 registers to
5204 floating-point modes. */
5205
5206 static bool
5207 m68k_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
5208 {
5209 if (DATA_REGNO_P (regno))
5210 {
5211 /* Data Registers, can hold aggregate if fits in. */
5212 if (regno + GET_MODE_SIZE (mode) / 4 <= 8)
5213 return true;
5214 }
5215 else if (ADDRESS_REGNO_P (regno))
5216 {
5217 if (regno + GET_MODE_SIZE (mode) / 4 <= 16)
5218 return true;
5219 }
5220 else if (FP_REGNO_P (regno))
5221 {
5222 /* FPU registers, hold float or complex float of long double or
5223 smaller. */
5224 if ((GET_MODE_CLASS (mode) == MODE_FLOAT
5225 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5226 && GET_MODE_UNIT_SIZE (mode) <= TARGET_FP_REG_SIZE)
5227 return true;
5228 }
5229 return false;
5230 }
5231
5232 /* Implement TARGET_MODES_TIEABLE_P. */
5233
5234 static bool
5235 m68k_modes_tieable_p (machine_mode mode1, machine_mode mode2)
5236 {
5237 return (!TARGET_HARD_FLOAT
5238 || ((GET_MODE_CLASS (mode1) == MODE_FLOAT
5239 || GET_MODE_CLASS (mode1) == MODE_COMPLEX_FLOAT)
5240 == (GET_MODE_CLASS (mode2) == MODE_FLOAT
5241 || GET_MODE_CLASS (mode2) == MODE_COMPLEX_FLOAT)));
5242 }
5243
5244 /* Implement SECONDARY_RELOAD_CLASS. */
5245
5246 enum reg_class
5247 m68k_secondary_reload_class (enum reg_class rclass,
5248 machine_mode mode, rtx x)
5249 {
5250 int regno;
5251
5252 regno = true_regnum (x);
5253
5254 /* If one operand of a movqi is an address register, the other
5255 operand must be a general register or constant. Other types
5256 of operand must be reloaded through a data register. */
5257 if (GET_MODE_SIZE (mode) == 1
5258 && reg_classes_intersect_p (rclass, ADDR_REGS)
5259 && !(INT_REGNO_P (regno) || CONSTANT_P (x)))
5260 return DATA_REGS;
5261
5262 /* PC-relative addresses must be loaded into an address register first. */
5263 if (TARGET_PCREL
5264 && !reg_class_subset_p (rclass, ADDR_REGS)
5265 && symbolic_operand (x, VOIDmode))
5266 return ADDR_REGS;
5267
5268 return NO_REGS;
5269 }
5270
5271 /* Implement PREFERRED_RELOAD_CLASS. */
5272
5273 enum reg_class
5274 m68k_preferred_reload_class (rtx x, enum reg_class rclass)
5275 {
5276 enum reg_class secondary_class;
5277
5278 /* If RCLASS might need a secondary reload, try restricting it to
5279 a class that doesn't. */
5280 secondary_class = m68k_secondary_reload_class (rclass, GET_MODE (x), x);
5281 if (secondary_class != NO_REGS
5282 && reg_class_subset_p (secondary_class, rclass))
5283 return secondary_class;
5284
5285 /* Prefer to use moveq for in-range constants. */
5286 if (GET_CODE (x) == CONST_INT
5287 && reg_class_subset_p (DATA_REGS, rclass)
5288 && IN_RANGE (INTVAL (x), -0x80, 0x7f))
5289 return DATA_REGS;
5290
5291 /* ??? Do we really need this now? */
5292 if (GET_CODE (x) == CONST_DOUBLE
5293 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
5294 {
5295 if (TARGET_HARD_FLOAT && reg_class_subset_p (FP_REGS, rclass))
5296 return FP_REGS;
5297
5298 return NO_REGS;
5299 }
5300
5301 return rclass;
5302 }
5303
5304 /* Return floating point values in a 68881 register. This makes 68881 code
5305 a little bit faster. It also makes -msoft-float code incompatible with
5306 hard-float code, so people have to be careful not to mix the two.
5307 For ColdFire it was decided the ABI incompatibility is undesirable.
5308 If there is need for a hard-float ABI it is probably worth doing it
5309 properly and also passing function arguments in FP registers. */
5310 rtx
5311 m68k_libcall_value (machine_mode mode)
5312 {
5313 switch (mode) {
5314 case E_SFmode:
5315 case E_DFmode:
5316 case E_XFmode:
5317 if (TARGET_68881)
5318 return gen_rtx_REG (mode, FP0_REG);
5319 break;
5320 default:
5321 break;
5322 }
5323
5324 return gen_rtx_REG (mode, m68k_libcall_value_in_a0_p ? A0_REG : D0_REG);
5325 }
5326
5327 /* Location in which function value is returned.
5328 NOTE: Due to differences in ABIs, don't call this function directly,
5329 use FUNCTION_VALUE instead. */
5330 rtx
5331 m68k_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
5332 {
5333 machine_mode mode;
5334
5335 mode = TYPE_MODE (valtype);
5336 switch (mode) {
5337 case E_SFmode:
5338 case E_DFmode:
5339 case E_XFmode:
5340 if (TARGET_68881)
5341 return gen_rtx_REG (mode, FP0_REG);
5342 break;
5343 default:
5344 break;
5345 }
5346
5347 /* If the function returns a pointer, push that into %a0. */
5348 if (func && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (func))))
5349 /* For compatibility with the large body of existing code which
5350 does not always properly declare external functions returning
5351 pointer types, the m68k/SVR4 convention is to copy the value
5352 returned for pointer functions from a0 to d0 in the function
5353 epilogue, so that callers that have neglected to properly
5354 declare the callee can still find the correct return value in
5355 d0. */
5356 return gen_rtx_PARALLEL
5357 (mode,
5358 gen_rtvec (2,
5359 gen_rtx_EXPR_LIST (VOIDmode,
5360 gen_rtx_REG (mode, A0_REG),
5361 const0_rtx),
5362 gen_rtx_EXPR_LIST (VOIDmode,
5363 gen_rtx_REG (mode, D0_REG),
5364 const0_rtx)));
5365 else if (POINTER_TYPE_P (valtype))
5366 return gen_rtx_REG (mode, A0_REG);
5367 else
5368 return gen_rtx_REG (mode, D0_REG);
5369 }
5370
5371 /* Worker function for TARGET_RETURN_IN_MEMORY. */
5372 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
5373 static bool
5374 m68k_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
5375 {
5376 machine_mode mode = TYPE_MODE (type);
5377
5378 if (mode == BLKmode)
5379 return true;
5380
5381 /* If TYPE's known alignment is less than the alignment of MODE that
5382 would contain the structure, then return in memory. We need to
5383 do so to maintain the compatibility between code compiled with
5384 -mstrict-align and that compiled with -mno-strict-align. */
5385 if (AGGREGATE_TYPE_P (type)
5386 && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (mode))
5387 return true;
5388
5389 return false;
5390 }
5391 #endif
5392
5393 /* CPU to schedule the program for. */
5394 enum attr_cpu m68k_sched_cpu;
5395
5396 /* MAC to schedule the program for. */
5397 enum attr_mac m68k_sched_mac;
5398
5399 /* Operand type. */
5400 enum attr_op_type
5401 {
5402 /* No operand. */
5403 OP_TYPE_NONE,
5404
5405 /* Integer register. */
5406 OP_TYPE_RN,
5407
5408 /* FP register. */
5409 OP_TYPE_FPN,
5410
5411 /* Implicit mem reference (e.g. stack). */
5412 OP_TYPE_MEM1,
5413
5414 /* Memory without offset or indexing. EA modes 2, 3 and 4. */
5415 OP_TYPE_MEM234,
5416
5417 /* Memory with offset but without indexing. EA mode 5. */
5418 OP_TYPE_MEM5,
5419
5420 /* Memory with indexing. EA mode 6. */
5421 OP_TYPE_MEM6,
5422
5423 /* Memory referenced by absolute address. EA mode 7. */
5424 OP_TYPE_MEM7,
5425
5426 /* Immediate operand that doesn't require extension word. */
5427 OP_TYPE_IMM_Q,
5428
5429 /* Immediate 16 bit operand. */
5430 OP_TYPE_IMM_W,
5431
5432 /* Immediate 32 bit operand. */
5433 OP_TYPE_IMM_L
5434 };
5435
5436 /* Return type of memory ADDR_RTX refers to. */
5437 static enum attr_op_type
5438 sched_address_type (machine_mode mode, rtx addr_rtx)
5439 {
5440 struct m68k_address address;
5441
5442 if (symbolic_operand (addr_rtx, VOIDmode))
5443 return OP_TYPE_MEM7;
5444
5445 if (!m68k_decompose_address (mode, addr_rtx,
5446 reload_completed, &address))
5447 {
5448 gcc_assert (!reload_completed);
5449 /* Reload will likely fix the address to be in the register. */
5450 return OP_TYPE_MEM234;
5451 }
5452
5453 if (address.scale != 0)
5454 return OP_TYPE_MEM6;
5455
5456 if (address.base != NULL_RTX)
5457 {
5458 if (address.offset == NULL_RTX)
5459 return OP_TYPE_MEM234;
5460
5461 return OP_TYPE_MEM5;
5462 }
5463
5464 gcc_assert (address.offset != NULL_RTX);
5465
5466 return OP_TYPE_MEM7;
5467 }
5468
5469 /* Return X or Y (depending on OPX_P) operand of INSN. */
5470 static rtx
5471 sched_get_operand (rtx_insn *insn, bool opx_p)
5472 {
5473 int i;
5474
5475 if (recog_memoized (insn) < 0)
5476 gcc_unreachable ();
5477
5478 extract_constrain_insn_cached (insn);
5479
5480 if (opx_p)
5481 i = get_attr_opx (insn);
5482 else
5483 i = get_attr_opy (insn);
5484
5485 if (i >= recog_data.n_operands)
5486 return NULL;
5487
5488 return recog_data.operand[i];
5489 }
5490
5491 /* Return type of INSN's operand X (if OPX_P) or operand Y (if !OPX_P).
5492 If ADDRESS_P is true, return type of memory location operand refers to. */
5493 static enum attr_op_type
5494 sched_attr_op_type (rtx_insn *insn, bool opx_p, bool address_p)
5495 {
5496 rtx op;
5497
5498 op = sched_get_operand (insn, opx_p);
5499
5500 if (op == NULL)
5501 {
5502 gcc_assert (!reload_completed);
5503 return OP_TYPE_RN;
5504 }
5505
5506 if (address_p)
5507 return sched_address_type (QImode, op);
5508
5509 if (memory_operand (op, VOIDmode))
5510 return sched_address_type (GET_MODE (op), XEXP (op, 0));
5511
5512 if (register_operand (op, VOIDmode))
5513 {
5514 if ((!reload_completed && FLOAT_MODE_P (GET_MODE (op)))
5515 || (reload_completed && FP_REG_P (op)))
5516 return OP_TYPE_FPN;
5517
5518 return OP_TYPE_RN;
5519 }
5520
5521 if (GET_CODE (op) == CONST_INT)
5522 {
5523 int ival;
5524
5525 ival = INTVAL (op);
5526
5527 /* Check for quick constants. */
5528 switch (get_attr_type (insn))
5529 {
5530 case TYPE_ALUQ_L:
5531 if (IN_RANGE (ival, 1, 8) || IN_RANGE (ival, -8, -1))
5532 return OP_TYPE_IMM_Q;
5533
5534 gcc_assert (!reload_completed);
5535 break;
5536
5537 case TYPE_MOVEQ_L:
5538 if (USE_MOVQ (ival))
5539 return OP_TYPE_IMM_Q;
5540
5541 gcc_assert (!reload_completed);
5542 break;
5543
5544 case TYPE_MOV3Q_L:
5545 if (valid_mov3q_const (ival))
5546 return OP_TYPE_IMM_Q;
5547
5548 gcc_assert (!reload_completed);
5549 break;
5550
5551 default:
5552 break;
5553 }
5554
5555 if (IN_RANGE (ival, -0x8000, 0x7fff))
5556 return OP_TYPE_IMM_W;
5557
5558 return OP_TYPE_IMM_L;
5559 }
5560
5561 if (GET_CODE (op) == CONST_DOUBLE)
5562 {
5563 switch (GET_MODE (op))
5564 {
5565 case E_SFmode:
5566 return OP_TYPE_IMM_W;
5567
5568 case E_VOIDmode:
5569 case E_DFmode:
5570 return OP_TYPE_IMM_L;
5571
5572 default:
5573 gcc_unreachable ();
5574 }
5575 }
5576
5577 if (GET_CODE (op) == CONST
5578 || symbolic_operand (op, VOIDmode)
5579 || LABEL_P (op))
5580 {
5581 switch (GET_MODE (op))
5582 {
5583 case E_QImode:
5584 return OP_TYPE_IMM_Q;
5585
5586 case E_HImode:
5587 return OP_TYPE_IMM_W;
5588
5589 case E_SImode:
5590 return OP_TYPE_IMM_L;
5591
5592 default:
5593 if (symbolic_operand (m68k_unwrap_symbol (op, false), VOIDmode))
5594 /* Just a guess. */
5595 return OP_TYPE_IMM_W;
5596
5597 return OP_TYPE_IMM_L;
5598 }
5599 }
5600
5601 gcc_assert (!reload_completed);
5602
5603 if (FLOAT_MODE_P (GET_MODE (op)))
5604 return OP_TYPE_FPN;
5605
5606 return OP_TYPE_RN;
5607 }
5608
5609 /* Implement opx_type attribute.
5610 Return type of INSN's operand X.
5611 If ADDRESS_P is true, return type of memory location operand refers to. */
5612 enum attr_opx_type
5613 m68k_sched_attr_opx_type (rtx_insn *insn, int address_p)
5614 {
5615 switch (sched_attr_op_type (insn, true, address_p != 0))
5616 {
5617 case OP_TYPE_RN:
5618 return OPX_TYPE_RN;
5619
5620 case OP_TYPE_FPN:
5621 return OPX_TYPE_FPN;
5622
5623 case OP_TYPE_MEM1:
5624 return OPX_TYPE_MEM1;
5625
5626 case OP_TYPE_MEM234:
5627 return OPX_TYPE_MEM234;
5628
5629 case OP_TYPE_MEM5:
5630 return OPX_TYPE_MEM5;
5631
5632 case OP_TYPE_MEM6:
5633 return OPX_TYPE_MEM6;
5634
5635 case OP_TYPE_MEM7:
5636 return OPX_TYPE_MEM7;
5637
5638 case OP_TYPE_IMM_Q:
5639 return OPX_TYPE_IMM_Q;
5640
5641 case OP_TYPE_IMM_W:
5642 return OPX_TYPE_IMM_W;
5643
5644 case OP_TYPE_IMM_L:
5645 return OPX_TYPE_IMM_L;
5646
5647 default:
5648 gcc_unreachable ();
5649 }
5650 }
5651
5652 /* Implement opy_type attribute.
5653 Return type of INSN's operand Y.
5654 If ADDRESS_P is true, return type of memory location operand refers to. */
5655 enum attr_opy_type
5656 m68k_sched_attr_opy_type (rtx_insn *insn, int address_p)
5657 {
5658 switch (sched_attr_op_type (insn, false, address_p != 0))
5659 {
5660 case OP_TYPE_RN:
5661 return OPY_TYPE_RN;
5662
5663 case OP_TYPE_FPN:
5664 return OPY_TYPE_FPN;
5665
5666 case OP_TYPE_MEM1:
5667 return OPY_TYPE_MEM1;
5668
5669 case OP_TYPE_MEM234:
5670 return OPY_TYPE_MEM234;
5671
5672 case OP_TYPE_MEM5:
5673 return OPY_TYPE_MEM5;
5674
5675 case OP_TYPE_MEM6:
5676 return OPY_TYPE_MEM6;
5677
5678 case OP_TYPE_MEM7:
5679 return OPY_TYPE_MEM7;
5680
5681 case OP_TYPE_IMM_Q:
5682 return OPY_TYPE_IMM_Q;
5683
5684 case OP_TYPE_IMM_W:
5685 return OPY_TYPE_IMM_W;
5686
5687 case OP_TYPE_IMM_L:
5688 return OPY_TYPE_IMM_L;
5689
5690 default:
5691 gcc_unreachable ();
5692 }
5693 }
5694
5695 /* Return size of INSN as int. */
5696 static int
5697 sched_get_attr_size_int (rtx_insn *insn)
5698 {
5699 int size;
5700
5701 switch (get_attr_type (insn))
5702 {
5703 case TYPE_IGNORE:
5704 /* There should be no references to m68k_sched_attr_size for 'ignore'
5705 instructions. */
5706 gcc_unreachable ();
5707 return 0;
5708
5709 case TYPE_MUL_L:
5710 size = 2;
5711 break;
5712
5713 default:
5714 size = 1;
5715 break;
5716 }
5717
5718 switch (get_attr_opx_type (insn))
5719 {
5720 case OPX_TYPE_NONE:
5721 case OPX_TYPE_RN:
5722 case OPX_TYPE_FPN:
5723 case OPX_TYPE_MEM1:
5724 case OPX_TYPE_MEM234:
5725 case OPY_TYPE_IMM_Q:
5726 break;
5727
5728 case OPX_TYPE_MEM5:
5729 case OPX_TYPE_MEM6:
5730 /* Here we assume that most absolute references are short. */
5731 case OPX_TYPE_MEM7:
5732 case OPY_TYPE_IMM_W:
5733 ++size;
5734 break;
5735
5736 case OPY_TYPE_IMM_L:
5737 size += 2;
5738 break;
5739
5740 default:
5741 gcc_unreachable ();
5742 }
5743
5744 switch (get_attr_opy_type (insn))
5745 {
5746 case OPY_TYPE_NONE:
5747 case OPY_TYPE_RN:
5748 case OPY_TYPE_FPN:
5749 case OPY_TYPE_MEM1:
5750 case OPY_TYPE_MEM234:
5751 case OPY_TYPE_IMM_Q:
5752 break;
5753
5754 case OPY_TYPE_MEM5:
5755 case OPY_TYPE_MEM6:
5756 /* Here we assume that most absolute references are short. */
5757 case OPY_TYPE_MEM7:
5758 case OPY_TYPE_IMM_W:
5759 ++size;
5760 break;
5761
5762 case OPY_TYPE_IMM_L:
5763 size += 2;
5764 break;
5765
5766 default:
5767 gcc_unreachable ();
5768 }
5769
5770 if (size > 3)
5771 {
5772 gcc_assert (!reload_completed);
5773
5774 size = 3;
5775 }
5776
5777 return size;
5778 }
5779
5780 /* Return size of INSN as attribute enum value. */
5781 enum attr_size
5782 m68k_sched_attr_size (rtx_insn *insn)
5783 {
5784 switch (sched_get_attr_size_int (insn))
5785 {
5786 case 1:
5787 return SIZE_1;
5788
5789 case 2:
5790 return SIZE_2;
5791
5792 case 3:
5793 return SIZE_3;
5794
5795 default:
5796 gcc_unreachable ();
5797 }
5798 }
5799
5800 /* Return operand X or Y (depending on OPX_P) of INSN,
5801 if it is a MEM, or NULL overwise. */
5802 static enum attr_op_type
5803 sched_get_opxy_mem_type (rtx_insn *insn, bool opx_p)
5804 {
5805 if (opx_p)
5806 {
5807 switch (get_attr_opx_type (insn))
5808 {
5809 case OPX_TYPE_NONE:
5810 case OPX_TYPE_RN:
5811 case OPX_TYPE_FPN:
5812 case OPX_TYPE_IMM_Q:
5813 case OPX_TYPE_IMM_W:
5814 case OPX_TYPE_IMM_L:
5815 return OP_TYPE_RN;
5816
5817 case OPX_TYPE_MEM1:
5818 case OPX_TYPE_MEM234:
5819 case OPX_TYPE_MEM5:
5820 case OPX_TYPE_MEM7:
5821 return OP_TYPE_MEM1;
5822
5823 case OPX_TYPE_MEM6:
5824 return OP_TYPE_MEM6;
5825
5826 default:
5827 gcc_unreachable ();
5828 }
5829 }
5830 else
5831 {
5832 switch (get_attr_opy_type (insn))
5833 {
5834 case OPY_TYPE_NONE:
5835 case OPY_TYPE_RN:
5836 case OPY_TYPE_FPN:
5837 case OPY_TYPE_IMM_Q:
5838 case OPY_TYPE_IMM_W:
5839 case OPY_TYPE_IMM_L:
5840 return OP_TYPE_RN;
5841
5842 case OPY_TYPE_MEM1:
5843 case OPY_TYPE_MEM234:
5844 case OPY_TYPE_MEM5:
5845 case OPY_TYPE_MEM7:
5846 return OP_TYPE_MEM1;
5847
5848 case OPY_TYPE_MEM6:
5849 return OP_TYPE_MEM6;
5850
5851 default:
5852 gcc_unreachable ();
5853 }
5854 }
5855 }
5856
5857 /* Implement op_mem attribute. */
5858 enum attr_op_mem
5859 m68k_sched_attr_op_mem (rtx_insn *insn)
5860 {
5861 enum attr_op_type opx;
5862 enum attr_op_type opy;
5863
5864 opx = sched_get_opxy_mem_type (insn, true);
5865 opy = sched_get_opxy_mem_type (insn, false);
5866
5867 if (opy == OP_TYPE_RN && opx == OP_TYPE_RN)
5868 return OP_MEM_00;
5869
5870 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM1)
5871 {
5872 switch (get_attr_opx_access (insn))
5873 {
5874 case OPX_ACCESS_R:
5875 return OP_MEM_10;
5876
5877 case OPX_ACCESS_W:
5878 return OP_MEM_01;
5879
5880 case OPX_ACCESS_RW:
5881 return OP_MEM_11;
5882
5883 default:
5884 gcc_unreachable ();
5885 }
5886 }
5887
5888 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM6)
5889 {
5890 switch (get_attr_opx_access (insn))
5891 {
5892 case OPX_ACCESS_R:
5893 return OP_MEM_I0;
5894
5895 case OPX_ACCESS_W:
5896 return OP_MEM_0I;
5897
5898 case OPX_ACCESS_RW:
5899 return OP_MEM_I1;
5900
5901 default:
5902 gcc_unreachable ();
5903 }
5904 }
5905
5906 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_RN)
5907 return OP_MEM_10;
5908
5909 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM1)
5910 {
5911 switch (get_attr_opx_access (insn))
5912 {
5913 case OPX_ACCESS_W:
5914 return OP_MEM_11;
5915
5916 default:
5917 gcc_assert (!reload_completed);
5918 return OP_MEM_11;
5919 }
5920 }
5921
5922 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM6)
5923 {
5924 switch (get_attr_opx_access (insn))
5925 {
5926 case OPX_ACCESS_W:
5927 return OP_MEM_1I;
5928
5929 default:
5930 gcc_assert (!reload_completed);
5931 return OP_MEM_1I;
5932 }
5933 }
5934
5935 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_RN)
5936 return OP_MEM_I0;
5937
5938 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM1)
5939 {
5940 switch (get_attr_opx_access (insn))
5941 {
5942 case OPX_ACCESS_W:
5943 return OP_MEM_I1;
5944
5945 default:
5946 gcc_assert (!reload_completed);
5947 return OP_MEM_I1;
5948 }
5949 }
5950
5951 gcc_assert (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM6);
5952 gcc_assert (!reload_completed);
5953 return OP_MEM_I1;
5954 }
5955
5956 /* Data for ColdFire V4 index bypass.
5957 Producer modifies register that is used as index in consumer with
5958 specified scale. */
5959 static struct
5960 {
5961 /* Producer instruction. */
5962 rtx pro;
5963
5964 /* Consumer instruction. */
5965 rtx con;
5966
5967 /* Scale of indexed memory access within consumer.
5968 Or zero if bypass should not be effective at the moment. */
5969 int scale;
5970 } sched_cfv4_bypass_data;
5971
5972 /* An empty state that is used in m68k_sched_adjust_cost. */
5973 static state_t sched_adjust_cost_state;
5974
5975 /* Implement adjust_cost scheduler hook.
5976 Return adjusted COST of dependency LINK between DEF_INSN and INSN. */
5977 static int
5978 m68k_sched_adjust_cost (rtx_insn *insn, int, rtx_insn *def_insn, int cost,
5979 unsigned int)
5980 {
5981 int delay;
5982
5983 if (recog_memoized (def_insn) < 0
5984 || recog_memoized (insn) < 0)
5985 return cost;
5986
5987 if (sched_cfv4_bypass_data.scale == 1)
5988 /* Handle ColdFire V4 bypass for indexed address with 1x scale. */
5989 {
5990 /* haifa-sched.c: insn_cost () calls bypass_p () just before
5991 targetm.sched.adjust_cost (). Hence, we can be relatively sure
5992 that the data in sched_cfv4_bypass_data is up to date. */
5993 gcc_assert (sched_cfv4_bypass_data.pro == def_insn
5994 && sched_cfv4_bypass_data.con == insn);
5995
5996 if (cost < 3)
5997 cost = 3;
5998
5999 sched_cfv4_bypass_data.pro = NULL;
6000 sched_cfv4_bypass_data.con = NULL;
6001 sched_cfv4_bypass_data.scale = 0;
6002 }
6003 else
6004 gcc_assert (sched_cfv4_bypass_data.pro == NULL
6005 && sched_cfv4_bypass_data.con == NULL
6006 && sched_cfv4_bypass_data.scale == 0);
6007
6008 /* Don't try to issue INSN earlier than DFA permits.
6009 This is especially useful for instructions that write to memory,
6010 as their true dependence (default) latency is better to be set to 0
6011 to workaround alias analysis limitations.
6012 This is, in fact, a machine independent tweak, so, probably,
6013 it should be moved to haifa-sched.c: insn_cost (). */
6014 delay = min_insn_conflict_delay (sched_adjust_cost_state, def_insn, insn);
6015 if (delay > cost)
6016 cost = delay;
6017
6018 return cost;
6019 }
6020
6021 /* Return maximal number of insns that can be scheduled on a single cycle. */
6022 static int
6023 m68k_sched_issue_rate (void)
6024 {
6025 switch (m68k_sched_cpu)
6026 {
6027 case CPU_CFV1:
6028 case CPU_CFV2:
6029 case CPU_CFV3:
6030 return 1;
6031
6032 case CPU_CFV4:
6033 return 2;
6034
6035 default:
6036 gcc_unreachable ();
6037 return 0;
6038 }
6039 }
6040
6041 /* Maximal length of instruction for current CPU.
6042 E.g. it is 3 for any ColdFire core. */
6043 static int max_insn_size;
6044
6045 /* Data to model instruction buffer of CPU. */
6046 struct _sched_ib
6047 {
6048 /* True if instruction buffer model is modeled for current CPU. */
6049 bool enabled_p;
6050
6051 /* Size of the instruction buffer in words. */
6052 int size;
6053
6054 /* Number of filled words in the instruction buffer. */
6055 int filled;
6056
6057 /* Additional information about instruction buffer for CPUs that have
6058 a buffer of instruction records, rather then a plain buffer
6059 of instruction words. */
6060 struct _sched_ib_records
6061 {
6062 /* Size of buffer in records. */
6063 int n_insns;
6064
6065 /* Array to hold data on adjustments made to the size of the buffer. */
6066 int *adjust;
6067
6068 /* Index of the above array. */
6069 int adjust_index;
6070 } records;
6071
6072 /* An insn that reserves (marks empty) one word in the instruction buffer. */
6073 rtx insn;
6074 };
6075
6076 static struct _sched_ib sched_ib;
6077
6078 /* ID of memory unit. */
6079 static int sched_mem_unit_code;
6080
6081 /* Implementation of the targetm.sched.variable_issue () hook.
6082 It is called after INSN was issued. It returns the number of insns
6083 that can possibly get scheduled on the current cycle.
6084 It is used here to determine the effect of INSN on the instruction
6085 buffer. */
6086 static int
6087 m68k_sched_variable_issue (FILE *sched_dump ATTRIBUTE_UNUSED,
6088 int sched_verbose ATTRIBUTE_UNUSED,
6089 rtx_insn *insn, int can_issue_more)
6090 {
6091 int insn_size;
6092
6093 if (recog_memoized (insn) >= 0 && get_attr_type (insn) != TYPE_IGNORE)
6094 {
6095 switch (m68k_sched_cpu)
6096 {
6097 case CPU_CFV1:
6098 case CPU_CFV2:
6099 insn_size = sched_get_attr_size_int (insn);
6100 break;
6101
6102 case CPU_CFV3:
6103 insn_size = sched_get_attr_size_int (insn);
6104
6105 /* ColdFire V3 and V4 cores have instruction buffers that can
6106 accumulate up to 8 instructions regardless of instructions'
6107 sizes. So we should take care not to "prefetch" 24 one-word
6108 or 12 two-words instructions.
6109 To model this behavior we temporarily decrease size of the
6110 buffer by (max_insn_size - insn_size) for next 7 instructions. */
6111 {
6112 int adjust;
6113
6114 adjust = max_insn_size - insn_size;
6115 sched_ib.size -= adjust;
6116
6117 if (sched_ib.filled > sched_ib.size)
6118 sched_ib.filled = sched_ib.size;
6119
6120 sched_ib.records.adjust[sched_ib.records.adjust_index] = adjust;
6121 }
6122
6123 ++sched_ib.records.adjust_index;
6124 if (sched_ib.records.adjust_index == sched_ib.records.n_insns)
6125 sched_ib.records.adjust_index = 0;
6126
6127 /* Undo adjustment we did 7 instructions ago. */
6128 sched_ib.size
6129 += sched_ib.records.adjust[sched_ib.records.adjust_index];
6130
6131 break;
6132
6133 case CPU_CFV4:
6134 gcc_assert (!sched_ib.enabled_p);
6135 insn_size = 0;
6136 break;
6137
6138 default:
6139 gcc_unreachable ();
6140 }
6141
6142 if (insn_size > sched_ib.filled)
6143 /* Scheduling for register pressure does not always take DFA into
6144 account. Workaround instruction buffer not being filled enough. */
6145 {
6146 gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED);
6147 insn_size = sched_ib.filled;
6148 }
6149
6150 --can_issue_more;
6151 }
6152 else if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6153 || asm_noperands (PATTERN (insn)) >= 0)
6154 insn_size = sched_ib.filled;
6155 else
6156 insn_size = 0;
6157
6158 sched_ib.filled -= insn_size;
6159
6160 return can_issue_more;
6161 }
6162
6163 /* Return how many instructions should scheduler lookahead to choose the
6164 best one. */
6165 static int
6166 m68k_sched_first_cycle_multipass_dfa_lookahead (void)
6167 {
6168 return m68k_sched_issue_rate () - 1;
6169 }
6170
6171 /* Implementation of targetm.sched.init_global () hook.
6172 It is invoked once per scheduling pass and is used here
6173 to initialize scheduler constants. */
6174 static void
6175 m68k_sched_md_init_global (FILE *sched_dump ATTRIBUTE_UNUSED,
6176 int sched_verbose ATTRIBUTE_UNUSED,
6177 int n_insns ATTRIBUTE_UNUSED)
6178 {
6179 /* Check that all instructions have DFA reservations and
6180 that all instructions can be issued from a clean state. */
6181 if (flag_checking)
6182 {
6183 rtx_insn *insn;
6184 state_t state;
6185
6186 state = alloca (state_size ());
6187
6188 for (insn = get_insns (); insn != NULL; insn = NEXT_INSN (insn))
6189 {
6190 if (INSN_P (insn) && recog_memoized (insn) >= 0)
6191 {
6192 gcc_assert (insn_has_dfa_reservation_p (insn));
6193
6194 state_reset (state);
6195 if (state_transition (state, insn) >= 0)
6196 gcc_unreachable ();
6197 }
6198 }
6199 }
6200
6201 /* Setup target cpu. */
6202
6203 /* ColdFire V4 has a set of features to keep its instruction buffer full
6204 (e.g., a separate memory bus for instructions) and, hence, we do not model
6205 buffer for this CPU. */
6206 sched_ib.enabled_p = (m68k_sched_cpu != CPU_CFV4);
6207
6208 switch (m68k_sched_cpu)
6209 {
6210 case CPU_CFV4:
6211 sched_ib.filled = 0;
6212
6213 /* FALLTHRU */
6214
6215 case CPU_CFV1:
6216 case CPU_CFV2:
6217 max_insn_size = 3;
6218 sched_ib.records.n_insns = 0;
6219 sched_ib.records.adjust = NULL;
6220 break;
6221
6222 case CPU_CFV3:
6223 max_insn_size = 3;
6224 sched_ib.records.n_insns = 8;
6225 sched_ib.records.adjust = XNEWVEC (int, sched_ib.records.n_insns);
6226 break;
6227
6228 default:
6229 gcc_unreachable ();
6230 }
6231
6232 sched_mem_unit_code = get_cpu_unit_code ("cf_mem1");
6233
6234 sched_adjust_cost_state = xmalloc (state_size ());
6235 state_reset (sched_adjust_cost_state);
6236
6237 start_sequence ();
6238 emit_insn (gen_ib ());
6239 sched_ib.insn = get_insns ();
6240 end_sequence ();
6241 }
6242
6243 /* Scheduling pass is now finished. Free/reset static variables. */
6244 static void
6245 m68k_sched_md_finish_global (FILE *dump ATTRIBUTE_UNUSED,
6246 int verbose ATTRIBUTE_UNUSED)
6247 {
6248 sched_ib.insn = NULL;
6249
6250 free (sched_adjust_cost_state);
6251 sched_adjust_cost_state = NULL;
6252
6253 sched_mem_unit_code = 0;
6254
6255 free (sched_ib.records.adjust);
6256 sched_ib.records.adjust = NULL;
6257 sched_ib.records.n_insns = 0;
6258 max_insn_size = 0;
6259 }
6260
6261 /* Implementation of targetm.sched.init () hook.
6262 It is invoked each time scheduler starts on the new block (basic block or
6263 extended basic block). */
6264 static void
6265 m68k_sched_md_init (FILE *sched_dump ATTRIBUTE_UNUSED,
6266 int sched_verbose ATTRIBUTE_UNUSED,
6267 int n_insns ATTRIBUTE_UNUSED)
6268 {
6269 switch (m68k_sched_cpu)
6270 {
6271 case CPU_CFV1:
6272 case CPU_CFV2:
6273 sched_ib.size = 6;
6274 break;
6275
6276 case CPU_CFV3:
6277 sched_ib.size = sched_ib.records.n_insns * max_insn_size;
6278
6279 memset (sched_ib.records.adjust, 0,
6280 sched_ib.records.n_insns * sizeof (*sched_ib.records.adjust));
6281 sched_ib.records.adjust_index = 0;
6282 break;
6283
6284 case CPU_CFV4:
6285 gcc_assert (!sched_ib.enabled_p);
6286 sched_ib.size = 0;
6287 break;
6288
6289 default:
6290 gcc_unreachable ();
6291 }
6292
6293 if (sched_ib.enabled_p)
6294 /* haifa-sched.c: schedule_block () calls advance_cycle () just before
6295 the first cycle. Workaround that. */
6296 sched_ib.filled = -2;
6297 }
6298
6299 /* Implementation of targetm.sched.dfa_pre_advance_cycle () hook.
6300 It is invoked just before current cycle finishes and is used here
6301 to track if instruction buffer got its two words this cycle. */
6302 static void
6303 m68k_sched_dfa_pre_advance_cycle (void)
6304 {
6305 if (!sched_ib.enabled_p)
6306 return;
6307
6308 if (!cpu_unit_reservation_p (curr_state, sched_mem_unit_code))
6309 {
6310 sched_ib.filled += 2;
6311
6312 if (sched_ib.filled > sched_ib.size)
6313 sched_ib.filled = sched_ib.size;
6314 }
6315 }
6316
6317 /* Implementation of targetm.sched.dfa_post_advance_cycle () hook.
6318 It is invoked just after new cycle begins and is used here
6319 to setup number of filled words in the instruction buffer so that
6320 instructions which won't have all their words prefetched would be
6321 stalled for a cycle. */
6322 static void
6323 m68k_sched_dfa_post_advance_cycle (void)
6324 {
6325 int i;
6326
6327 if (!sched_ib.enabled_p)
6328 return;
6329
6330 /* Setup number of prefetched instruction words in the instruction
6331 buffer. */
6332 i = max_insn_size - sched_ib.filled;
6333
6334 while (--i >= 0)
6335 {
6336 if (state_transition (curr_state, sched_ib.insn) >= 0)
6337 /* Pick up scheduler state. */
6338 ++sched_ib.filled;
6339 }
6340 }
6341
6342 /* Return X or Y (depending on OPX_P) operand of INSN,
6343 if it is an integer register, or NULL overwise. */
6344 static rtx
6345 sched_get_reg_operand (rtx_insn *insn, bool opx_p)
6346 {
6347 rtx op = NULL;
6348
6349 if (opx_p)
6350 {
6351 if (get_attr_opx_type (insn) == OPX_TYPE_RN)
6352 {
6353 op = sched_get_operand (insn, true);
6354 gcc_assert (op != NULL);
6355
6356 if (!reload_completed && !REG_P (op))
6357 return NULL;
6358 }
6359 }
6360 else
6361 {
6362 if (get_attr_opy_type (insn) == OPY_TYPE_RN)
6363 {
6364 op = sched_get_operand (insn, false);
6365 gcc_assert (op != NULL);
6366
6367 if (!reload_completed && !REG_P (op))
6368 return NULL;
6369 }
6370 }
6371
6372 return op;
6373 }
6374
6375 /* Return true, if X or Y (depending on OPX_P) operand of INSN
6376 is a MEM. */
6377 static bool
6378 sched_mem_operand_p (rtx_insn *insn, bool opx_p)
6379 {
6380 switch (sched_get_opxy_mem_type (insn, opx_p))
6381 {
6382 case OP_TYPE_MEM1:
6383 case OP_TYPE_MEM6:
6384 return true;
6385
6386 default:
6387 return false;
6388 }
6389 }
6390
6391 /* Return X or Y (depending on OPX_P) operand of INSN,
6392 if it is a MEM, or NULL overwise. */
6393 static rtx
6394 sched_get_mem_operand (rtx_insn *insn, bool must_read_p, bool must_write_p)
6395 {
6396 bool opx_p;
6397 bool opy_p;
6398
6399 opx_p = false;
6400 opy_p = false;
6401
6402 if (must_read_p)
6403 {
6404 opx_p = true;
6405 opy_p = true;
6406 }
6407
6408 if (must_write_p)
6409 {
6410 opx_p = true;
6411 opy_p = false;
6412 }
6413
6414 if (opy_p && sched_mem_operand_p (insn, false))
6415 return sched_get_operand (insn, false);
6416
6417 if (opx_p && sched_mem_operand_p (insn, true))
6418 return sched_get_operand (insn, true);
6419
6420 gcc_unreachable ();
6421 return NULL;
6422 }
6423
6424 /* Return non-zero if PRO modifies register used as part of
6425 address in CON. */
6426 int
6427 m68k_sched_address_bypass_p (rtx_insn *pro, rtx_insn *con)
6428 {
6429 rtx pro_x;
6430 rtx con_mem_read;
6431
6432 pro_x = sched_get_reg_operand (pro, true);
6433 if (pro_x == NULL)
6434 return 0;
6435
6436 con_mem_read = sched_get_mem_operand (con, true, false);
6437 gcc_assert (con_mem_read != NULL);
6438
6439 if (reg_mentioned_p (pro_x, con_mem_read))
6440 return 1;
6441
6442 return 0;
6443 }
6444
6445 /* Helper function for m68k_sched_indexed_address_bypass_p.
6446 if PRO modifies register used as index in CON,
6447 return scale of indexed memory access in CON. Return zero overwise. */
6448 static int
6449 sched_get_indexed_address_scale (rtx_insn *pro, rtx_insn *con)
6450 {
6451 rtx reg;
6452 rtx mem;
6453 struct m68k_address address;
6454
6455 reg = sched_get_reg_operand (pro, true);
6456 if (reg == NULL)
6457 return 0;
6458
6459 mem = sched_get_mem_operand (con, true, false);
6460 gcc_assert (mem != NULL && MEM_P (mem));
6461
6462 if (!m68k_decompose_address (GET_MODE (mem), XEXP (mem, 0), reload_completed,
6463 &address))
6464 gcc_unreachable ();
6465
6466 if (REGNO (reg) == REGNO (address.index))
6467 {
6468 gcc_assert (address.scale != 0);
6469 return address.scale;
6470 }
6471
6472 return 0;
6473 }
6474
6475 /* Return non-zero if PRO modifies register used
6476 as index with scale 2 or 4 in CON. */
6477 int
6478 m68k_sched_indexed_address_bypass_p (rtx_insn *pro, rtx_insn *con)
6479 {
6480 gcc_assert (sched_cfv4_bypass_data.pro == NULL
6481 && sched_cfv4_bypass_data.con == NULL
6482 && sched_cfv4_bypass_data.scale == 0);
6483
6484 switch (sched_get_indexed_address_scale (pro, con))
6485 {
6486 case 1:
6487 /* We can't have a variable latency bypass, so
6488 remember to adjust the insn cost in adjust_cost hook. */
6489 sched_cfv4_bypass_data.pro = pro;
6490 sched_cfv4_bypass_data.con = con;
6491 sched_cfv4_bypass_data.scale = 1;
6492 return 0;
6493
6494 case 2:
6495 case 4:
6496 return 1;
6497
6498 default:
6499 return 0;
6500 }
6501 }
6502
6503 /* We generate a two-instructions program at M_TRAMP :
6504 movea.l &CHAIN_VALUE,%a0
6505 jmp FNADDR
6506 where %a0 can be modified by changing STATIC_CHAIN_REGNUM. */
6507
6508 static void
6509 m68k_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
6510 {
6511 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
6512 rtx mem;
6513
6514 gcc_assert (ADDRESS_REGNO_P (STATIC_CHAIN_REGNUM));
6515
6516 mem = adjust_address (m_tramp, HImode, 0);
6517 emit_move_insn (mem, GEN_INT(0x207C + ((STATIC_CHAIN_REGNUM-8) << 9)));
6518 mem = adjust_address (m_tramp, SImode, 2);
6519 emit_move_insn (mem, chain_value);
6520
6521 mem = adjust_address (m_tramp, HImode, 6);
6522 emit_move_insn (mem, GEN_INT(0x4EF9));
6523 mem = adjust_address (m_tramp, SImode, 8);
6524 emit_move_insn (mem, fnaddr);
6525
6526 FINALIZE_TRAMPOLINE (XEXP (m_tramp, 0));
6527 }
6528
6529 /* On the 68000, the RTS insn cannot pop anything.
6530 On the 68010, the RTD insn may be used to pop them if the number
6531 of args is fixed, but if the number is variable then the caller
6532 must pop them all. RTD can't be used for library calls now
6533 because the library is compiled with the Unix compiler.
6534 Use of RTD is a selectable option, since it is incompatible with
6535 standard Unix calling sequences. If the option is not selected,
6536 the caller must always pop the args. */
6537
6538 static poly_int64
6539 m68k_return_pops_args (tree fundecl, tree funtype, poly_int64 size)
6540 {
6541 return ((TARGET_RTD
6542 && (!fundecl
6543 || TREE_CODE (fundecl) != IDENTIFIER_NODE)
6544 && (!stdarg_p (funtype)))
6545 ? (HOST_WIDE_INT) size : 0);
6546 }
6547
6548 /* Make sure everything's fine if we *don't* have a given processor.
6549 This assumes that putting a register in fixed_regs will keep the
6550 compiler's mitts completely off it. We don't bother to zero it out
6551 of register classes. */
6552
6553 static void
6554 m68k_conditional_register_usage (void)
6555 {
6556 int i;
6557 HARD_REG_SET x;
6558 if (!TARGET_HARD_FLOAT)
6559 {
6560 COPY_HARD_REG_SET (x, reg_class_contents[(int)FP_REGS]);
6561 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
6562 if (TEST_HARD_REG_BIT (x, i))
6563 fixed_regs[i] = call_used_regs[i] = 1;
6564 }
6565 if (flag_pic)
6566 fixed_regs[PIC_REG] = call_used_regs[PIC_REG] = 1;
6567 }
6568
6569 static void
6570 m68k_init_sync_libfuncs (void)
6571 {
6572 init_sync_libfuncs (UNITS_PER_WORD);
6573 }
6574
6575 /* Implements EPILOGUE_USES. All registers are live on exit from an
6576 interrupt routine. */
6577 bool
6578 m68k_epilogue_uses (int regno ATTRIBUTE_UNUSED)
6579 {
6580 return (reload_completed
6581 && (m68k_get_function_kind (current_function_decl)
6582 == m68k_fk_interrupt_handler));
6583 }
6584
6585
6586 /* Implement TARGET_C_EXCESS_PRECISION.
6587
6588 Set the value of FLT_EVAL_METHOD in float.h. When using 68040 fp
6589 instructions, we get proper intermediate rounding, otherwise we
6590 get extended precision results. */
6591
6592 static enum flt_eval_method
6593 m68k_excess_precision (enum excess_precision_type type)
6594 {
6595 switch (type)
6596 {
6597 case EXCESS_PRECISION_TYPE_FAST:
6598 /* The fastest type to promote to will always be the native type,
6599 whether that occurs with implicit excess precision or
6600 otherwise. */
6601 return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT;
6602 case EXCESS_PRECISION_TYPE_STANDARD:
6603 case EXCESS_PRECISION_TYPE_IMPLICIT:
6604 /* Otherwise, the excess precision we want when we are
6605 in a standards compliant mode, and the implicit precision we
6606 provide can be identical. */
6607 if (TARGET_68040 || ! TARGET_68881)
6608 return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT;
6609
6610 return FLT_EVAL_METHOD_PROMOTE_TO_LONG_DOUBLE;
6611 default:
6612 gcc_unreachable ();
6613 }
6614 return FLT_EVAL_METHOD_UNPREDICTABLE;
6615 }
6616
6617 /* Implement PUSH_ROUNDING. On the 680x0, sp@- in a byte insn really pushes
6618 a word. On the ColdFire, sp@- in a byte insn pushes just a byte. */
6619
6620 poly_int64
6621 m68k_push_rounding (poly_int64 bytes)
6622 {
6623 if (TARGET_COLDFIRE)
6624 return bytes;
6625 return (bytes + 1) & ~1;
6626 }
6627
6628 /* Implement TARGET_PROMOTE_FUNCTION_MODE. */
6629
6630 static machine_mode
6631 m68k_promote_function_mode (const_tree type, machine_mode mode,
6632 int *punsignedp ATTRIBUTE_UNUSED,
6633 const_tree fntype ATTRIBUTE_UNUSED,
6634 int for_return)
6635 {
6636 /* Promote libcall arguments narrower than int to match the normal C
6637 ABI (for which promotions are handled via
6638 TARGET_PROMOTE_PROTOTYPES). */
6639 if (type == NULL_TREE && !for_return && (mode == QImode || mode == HImode))
6640 return SImode;
6641 return mode;
6642 }
6643
6644 #include "gt-m68k.h"