]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/m68k/m68k.c
target.def (handle_option): Take gcc_options and cl_decoded_option pointers and locat...
[thirdparty/gcc.git] / gcc / config / m68k / m68k.c
1 /* Subroutines for insn-output.c for Motorola 68000 family.
2 Copyright (C) 1987, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "tree.h"
27 #include "rtl.h"
28 #include "function.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "conditions.h"
33 #include "output.h"
34 #include "insn-attr.h"
35 #include "recog.h"
36 #include "diagnostic-core.h"
37 #include "expr.h"
38 #include "reload.h"
39 #include "tm_p.h"
40 #include "target.h"
41 #include "target-def.h"
42 #include "debug.h"
43 #include "flags.h"
44 #include "df.h"
45 /* ??? Need to add a dependency between m68k.o and sched-int.h. */
46 #include "sched-int.h"
47 #include "insn-codes.h"
48 #include "ggc.h"
49 #include "opts.h"
50
51 enum reg_class regno_reg_class[] =
52 {
53 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
54 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
55 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
56 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
57 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
58 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
59 ADDR_REGS
60 };
61
62
63 /* The minimum number of integer registers that we want to save with the
64 movem instruction. Using two movel instructions instead of a single
65 moveml is about 15% faster for the 68020 and 68030 at no expense in
66 code size. */
67 #define MIN_MOVEM_REGS 3
68
69 /* The minimum number of floating point registers that we want to save
70 with the fmovem instruction. */
71 #define MIN_FMOVEM_REGS 1
72
73 /* Structure describing stack frame layout. */
74 struct m68k_frame
75 {
76 /* Stack pointer to frame pointer offset. */
77 HOST_WIDE_INT offset;
78
79 /* Offset of FPU registers. */
80 HOST_WIDE_INT foffset;
81
82 /* Frame size in bytes (rounded up). */
83 HOST_WIDE_INT size;
84
85 /* Data and address register. */
86 int reg_no;
87 unsigned int reg_mask;
88
89 /* FPU registers. */
90 int fpu_no;
91 unsigned int fpu_mask;
92
93 /* Offsets relative to ARG_POINTER. */
94 HOST_WIDE_INT frame_pointer_offset;
95 HOST_WIDE_INT stack_pointer_offset;
96
97 /* Function which the above information refers to. */
98 int funcdef_no;
99 };
100
101 /* Current frame information calculated by m68k_compute_frame_layout(). */
102 static struct m68k_frame current_frame;
103
104 /* Structure describing an m68k address.
105
106 If CODE is UNKNOWN, the address is BASE + INDEX * SCALE + OFFSET,
107 with null fields evaluating to 0. Here:
108
109 - BASE satisfies m68k_legitimate_base_reg_p
110 - INDEX satisfies m68k_legitimate_index_reg_p
111 - OFFSET satisfies m68k_legitimate_constant_address_p
112
113 INDEX is either HImode or SImode. The other fields are SImode.
114
115 If CODE is PRE_DEC, the address is -(BASE). If CODE is POST_INC,
116 the address is (BASE)+. */
117 struct m68k_address {
118 enum rtx_code code;
119 rtx base;
120 rtx index;
121 rtx offset;
122 int scale;
123 };
124
125 static int m68k_sched_adjust_cost (rtx, rtx, rtx, int);
126 static int m68k_sched_issue_rate (void);
127 static int m68k_sched_variable_issue (FILE *, int, rtx, int);
128 static void m68k_sched_md_init_global (FILE *, int, int);
129 static void m68k_sched_md_finish_global (FILE *, int);
130 static void m68k_sched_md_init (FILE *, int, int);
131 static void m68k_sched_dfa_pre_advance_cycle (void);
132 static void m68k_sched_dfa_post_advance_cycle (void);
133 static int m68k_sched_first_cycle_multipass_dfa_lookahead (void);
134
135 static bool m68k_can_eliminate (const int, const int);
136 static void m68k_conditional_register_usage (void);
137 static bool m68k_legitimate_address_p (enum machine_mode, rtx, bool);
138 static bool m68k_handle_option (struct gcc_options *, struct gcc_options *,
139 const struct cl_decoded_option *, location_t);
140 static void m68k_option_override (void);
141 static rtx find_addr_reg (rtx);
142 static const char *singlemove_string (rtx *);
143 static void m68k_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
144 HOST_WIDE_INT, tree);
145 static rtx m68k_struct_value_rtx (tree, int);
146 static tree m68k_handle_fndecl_attribute (tree *node, tree name,
147 tree args, int flags,
148 bool *no_add_attrs);
149 static void m68k_compute_frame_layout (void);
150 static bool m68k_save_reg (unsigned int regno, bool interrupt_handler);
151 static bool m68k_ok_for_sibcall_p (tree, tree);
152 static bool m68k_tls_symbol_p (rtx);
153 static rtx m68k_legitimize_address (rtx, rtx, enum machine_mode);
154 static bool m68k_rtx_costs (rtx, int, int, int *, bool);
155 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
156 static bool m68k_return_in_memory (const_tree, const_tree);
157 #endif
158 static void m68k_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
159 static void m68k_trampoline_init (rtx, tree, rtx);
160 static int m68k_return_pops_args (tree, tree, int);
161 static rtx m68k_delegitimize_address (rtx);
162 static void m68k_function_arg_advance (CUMULATIVE_ARGS *, enum machine_mode,
163 const_tree, bool);
164 static rtx m68k_function_arg (CUMULATIVE_ARGS *, enum machine_mode,
165 const_tree, bool);
166 \f
167
168 /* Specify the identification number of the library being built */
169 const char *m68k_library_id_string = "_current_shared_library_a5_offset_";
170 \f
171 /* Initialize the GCC target structure. */
172
173 #if INT_OP_GROUP == INT_OP_DOT_WORD
174 #undef TARGET_ASM_ALIGNED_HI_OP
175 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
176 #endif
177
178 #if INT_OP_GROUP == INT_OP_NO_DOT
179 #undef TARGET_ASM_BYTE_OP
180 #define TARGET_ASM_BYTE_OP "\tbyte\t"
181 #undef TARGET_ASM_ALIGNED_HI_OP
182 #define TARGET_ASM_ALIGNED_HI_OP "\tshort\t"
183 #undef TARGET_ASM_ALIGNED_SI_OP
184 #define TARGET_ASM_ALIGNED_SI_OP "\tlong\t"
185 #endif
186
187 #if INT_OP_GROUP == INT_OP_DC
188 #undef TARGET_ASM_BYTE_OP
189 #define TARGET_ASM_BYTE_OP "\tdc.b\t"
190 #undef TARGET_ASM_ALIGNED_HI_OP
191 #define TARGET_ASM_ALIGNED_HI_OP "\tdc.w\t"
192 #undef TARGET_ASM_ALIGNED_SI_OP
193 #define TARGET_ASM_ALIGNED_SI_OP "\tdc.l\t"
194 #endif
195
196 #undef TARGET_ASM_UNALIGNED_HI_OP
197 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
198 #undef TARGET_ASM_UNALIGNED_SI_OP
199 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
200
201 #undef TARGET_ASM_OUTPUT_MI_THUNK
202 #define TARGET_ASM_OUTPUT_MI_THUNK m68k_output_mi_thunk
203 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
204 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
205
206 #undef TARGET_ASM_FILE_START_APP_OFF
207 #define TARGET_ASM_FILE_START_APP_OFF true
208
209 #undef TARGET_LEGITIMIZE_ADDRESS
210 #define TARGET_LEGITIMIZE_ADDRESS m68k_legitimize_address
211
212 #undef TARGET_SCHED_ADJUST_COST
213 #define TARGET_SCHED_ADJUST_COST m68k_sched_adjust_cost
214
215 #undef TARGET_SCHED_ISSUE_RATE
216 #define TARGET_SCHED_ISSUE_RATE m68k_sched_issue_rate
217
218 #undef TARGET_SCHED_VARIABLE_ISSUE
219 #define TARGET_SCHED_VARIABLE_ISSUE m68k_sched_variable_issue
220
221 #undef TARGET_SCHED_INIT_GLOBAL
222 #define TARGET_SCHED_INIT_GLOBAL m68k_sched_md_init_global
223
224 #undef TARGET_SCHED_FINISH_GLOBAL
225 #define TARGET_SCHED_FINISH_GLOBAL m68k_sched_md_finish_global
226
227 #undef TARGET_SCHED_INIT
228 #define TARGET_SCHED_INIT m68k_sched_md_init
229
230 #undef TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE
231 #define TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE m68k_sched_dfa_pre_advance_cycle
232
233 #undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
234 #define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE m68k_sched_dfa_post_advance_cycle
235
236 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
237 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
238 m68k_sched_first_cycle_multipass_dfa_lookahead
239
240 #undef TARGET_HANDLE_OPTION
241 #define TARGET_HANDLE_OPTION m68k_handle_option
242
243 #undef TARGET_OPTION_OVERRIDE
244 #define TARGET_OPTION_OVERRIDE m68k_option_override
245
246 #undef TARGET_RTX_COSTS
247 #define TARGET_RTX_COSTS m68k_rtx_costs
248
249 #undef TARGET_ATTRIBUTE_TABLE
250 #define TARGET_ATTRIBUTE_TABLE m68k_attribute_table
251
252 #undef TARGET_PROMOTE_PROTOTYPES
253 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
254
255 #undef TARGET_STRUCT_VALUE_RTX
256 #define TARGET_STRUCT_VALUE_RTX m68k_struct_value_rtx
257
258 #undef TARGET_CANNOT_FORCE_CONST_MEM
259 #define TARGET_CANNOT_FORCE_CONST_MEM m68k_illegitimate_symbolic_constant_p
260
261 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
262 #define TARGET_FUNCTION_OK_FOR_SIBCALL m68k_ok_for_sibcall_p
263
264 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
265 #undef TARGET_RETURN_IN_MEMORY
266 #define TARGET_RETURN_IN_MEMORY m68k_return_in_memory
267 #endif
268
269 #ifdef HAVE_AS_TLS
270 #undef TARGET_HAVE_TLS
271 #define TARGET_HAVE_TLS (true)
272
273 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
274 #define TARGET_ASM_OUTPUT_DWARF_DTPREL m68k_output_dwarf_dtprel
275 #endif
276
277 #undef TARGET_LEGITIMATE_ADDRESS_P
278 #define TARGET_LEGITIMATE_ADDRESS_P m68k_legitimate_address_p
279
280 #undef TARGET_CAN_ELIMINATE
281 #define TARGET_CAN_ELIMINATE m68k_can_eliminate
282
283 #undef TARGET_CONDITIONAL_REGISTER_USAGE
284 #define TARGET_CONDITIONAL_REGISTER_USAGE m68k_conditional_register_usage
285
286 #undef TARGET_TRAMPOLINE_INIT
287 #define TARGET_TRAMPOLINE_INIT m68k_trampoline_init
288
289 #undef TARGET_RETURN_POPS_ARGS
290 #define TARGET_RETURN_POPS_ARGS m68k_return_pops_args
291
292 #undef TARGET_DELEGITIMIZE_ADDRESS
293 #define TARGET_DELEGITIMIZE_ADDRESS m68k_delegitimize_address
294
295 #undef TARGET_FUNCTION_ARG
296 #define TARGET_FUNCTION_ARG m68k_function_arg
297
298 #undef TARGET_FUNCTION_ARG_ADVANCE
299 #define TARGET_FUNCTION_ARG_ADVANCE m68k_function_arg_advance
300
301 static const struct attribute_spec m68k_attribute_table[] =
302 {
303 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
304 affects_type_identity } */
305 { "interrupt", 0, 0, true, false, false, m68k_handle_fndecl_attribute,
306 false },
307 { "interrupt_handler", 0, 0, true, false, false,
308 m68k_handle_fndecl_attribute, false },
309 { "interrupt_thread", 0, 0, true, false, false,
310 m68k_handle_fndecl_attribute, false },
311 { NULL, 0, 0, false, false, false, NULL, false }
312 };
313
314 struct gcc_target targetm = TARGET_INITIALIZER;
315 \f
316 /* Base flags for 68k ISAs. */
317 #define FL_FOR_isa_00 FL_ISA_68000
318 #define FL_FOR_isa_10 (FL_FOR_isa_00 | FL_ISA_68010)
319 /* FL_68881 controls the default setting of -m68881. gcc has traditionally
320 generated 68881 code for 68020 and 68030 targets unless explicitly told
321 not to. */
322 #define FL_FOR_isa_20 (FL_FOR_isa_10 | FL_ISA_68020 \
323 | FL_BITFIELD | FL_68881)
324 #define FL_FOR_isa_40 (FL_FOR_isa_20 | FL_ISA_68040)
325 #define FL_FOR_isa_cpu32 (FL_FOR_isa_10 | FL_ISA_68020)
326
327 /* Base flags for ColdFire ISAs. */
328 #define FL_FOR_isa_a (FL_COLDFIRE | FL_ISA_A)
329 #define FL_FOR_isa_aplus (FL_FOR_isa_a | FL_ISA_APLUS | FL_CF_USP)
330 /* Note ISA_B doesn't necessarily include USP (user stack pointer) support. */
331 #define FL_FOR_isa_b (FL_FOR_isa_a | FL_ISA_B | FL_CF_HWDIV)
332 /* ISA_C is not upwardly compatible with ISA_B. */
333 #define FL_FOR_isa_c (FL_FOR_isa_a | FL_ISA_C | FL_CF_USP)
334
335 enum m68k_isa
336 {
337 /* Traditional 68000 instruction sets. */
338 isa_00,
339 isa_10,
340 isa_20,
341 isa_40,
342 isa_cpu32,
343 /* ColdFire instruction set variants. */
344 isa_a,
345 isa_aplus,
346 isa_b,
347 isa_c,
348 isa_max
349 };
350
351 /* Information about one of the -march, -mcpu or -mtune arguments. */
352 struct m68k_target_selection
353 {
354 /* The argument being described. */
355 const char *name;
356
357 /* For -mcpu, this is the device selected by the option.
358 For -mtune and -march, it is a representative device
359 for the microarchitecture or ISA respectively. */
360 enum target_device device;
361
362 /* The M68K_DEVICE fields associated with DEVICE. See the comment
363 in m68k-devices.def for details. FAMILY is only valid for -mcpu. */
364 const char *family;
365 enum uarch_type microarch;
366 enum m68k_isa isa;
367 unsigned long flags;
368 };
369
370 /* A list of all devices in m68k-devices.def. Used for -mcpu selection. */
371 static const struct m68k_target_selection all_devices[] =
372 {
373 #define M68K_DEVICE(NAME,ENUM_VALUE,FAMILY,MULTILIB,MICROARCH,ISA,FLAGS) \
374 { NAME, ENUM_VALUE, FAMILY, u##MICROARCH, ISA, FLAGS | FL_FOR_##ISA },
375 #include "m68k-devices.def"
376 #undef M68K_DEVICE
377 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
378 };
379
380 /* A list of all ISAs, mapping each one to a representative device.
381 Used for -march selection. */
382 static const struct m68k_target_selection all_isas[] =
383 {
384 { "68000", m68000, NULL, u68000, isa_00, FL_FOR_isa_00 },
385 { "68010", m68010, NULL, u68010, isa_10, FL_FOR_isa_10 },
386 { "68020", m68020, NULL, u68020, isa_20, FL_FOR_isa_20 },
387 { "68030", m68030, NULL, u68030, isa_20, FL_FOR_isa_20 },
388 { "68040", m68040, NULL, u68040, isa_40, FL_FOR_isa_40 },
389 { "68060", m68060, NULL, u68060, isa_40, FL_FOR_isa_40 },
390 { "cpu32", cpu32, NULL, ucpu32, isa_20, FL_FOR_isa_cpu32 },
391 { "isaa", mcf5206e, NULL, ucfv2, isa_a, (FL_FOR_isa_a
392 | FL_CF_HWDIV) },
393 { "isaaplus", mcf5271, NULL, ucfv2, isa_aplus, (FL_FOR_isa_aplus
394 | FL_CF_HWDIV) },
395 { "isab", mcf5407, NULL, ucfv4, isa_b, FL_FOR_isa_b },
396 { "isac", unk_device, NULL, ucfv4, isa_c, (FL_FOR_isa_c
397 | FL_CF_HWDIV) },
398 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
399 };
400
401 /* A list of all microarchitectures, mapping each one to a representative
402 device. Used for -mtune selection. */
403 static const struct m68k_target_selection all_microarchs[] =
404 {
405 { "68000", m68000, NULL, u68000, isa_00, FL_FOR_isa_00 },
406 { "68010", m68010, NULL, u68010, isa_10, FL_FOR_isa_10 },
407 { "68020", m68020, NULL, u68020, isa_20, FL_FOR_isa_20 },
408 { "68020-40", m68020, NULL, u68020_40, isa_20, FL_FOR_isa_20 },
409 { "68020-60", m68020, NULL, u68020_60, isa_20, FL_FOR_isa_20 },
410 { "68030", m68030, NULL, u68030, isa_20, FL_FOR_isa_20 },
411 { "68040", m68040, NULL, u68040, isa_40, FL_FOR_isa_40 },
412 { "68060", m68060, NULL, u68060, isa_40, FL_FOR_isa_40 },
413 { "cpu32", cpu32, NULL, ucpu32, isa_20, FL_FOR_isa_cpu32 },
414 { "cfv1", mcf51qe, NULL, ucfv1, isa_c, FL_FOR_isa_c },
415 { "cfv2", mcf5206, NULL, ucfv2, isa_a, FL_FOR_isa_a },
416 { "cfv3", mcf5307, NULL, ucfv3, isa_a, (FL_FOR_isa_a
417 | FL_CF_HWDIV) },
418 { "cfv4", mcf5407, NULL, ucfv4, isa_b, FL_FOR_isa_b },
419 { "cfv4e", mcf547x, NULL, ucfv4e, isa_b, (FL_FOR_isa_b
420 | FL_CF_USP
421 | FL_CF_EMAC
422 | FL_CF_FPU) },
423 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
424 };
425 \f
426 /* The entries associated with the -mcpu, -march and -mtune settings,
427 or null for options that have not been used. */
428 const struct m68k_target_selection *m68k_cpu_entry;
429 const struct m68k_target_selection *m68k_arch_entry;
430 const struct m68k_target_selection *m68k_tune_entry;
431
432 /* Which CPU we are generating code for. */
433 enum target_device m68k_cpu;
434
435 /* Which microarchitecture to tune for. */
436 enum uarch_type m68k_tune;
437
438 /* Which FPU to use. */
439 enum fpu_type m68k_fpu;
440
441 /* The set of FL_* flags that apply to the target processor. */
442 unsigned int m68k_cpu_flags;
443
444 /* The set of FL_* flags that apply to the processor to be tuned for. */
445 unsigned int m68k_tune_flags;
446
447 /* Asm templates for calling or jumping to an arbitrary symbolic address,
448 or NULL if such calls or jumps are not supported. The address is held
449 in operand 0. */
450 const char *m68k_symbolic_call;
451 const char *m68k_symbolic_jump;
452
453 /* Enum variable that corresponds to m68k_symbolic_call values. */
454 enum M68K_SYMBOLIC_CALL m68k_symbolic_call_var;
455
456 \f
457 /* See whether TABLE has an entry with name NAME. Return true and
458 store the entry in *ENTRY if so, otherwise return false and
459 leave *ENTRY alone. */
460
461 static bool
462 m68k_find_selection (const struct m68k_target_selection **entry,
463 const struct m68k_target_selection *table,
464 const char *name)
465 {
466 size_t i;
467
468 for (i = 0; table[i].name; i++)
469 if (strcmp (table[i].name, name) == 0)
470 {
471 *entry = table + i;
472 return true;
473 }
474 return false;
475 }
476
477 /* Implement TARGET_HANDLE_OPTION. */
478
479 static bool
480 m68k_handle_option (struct gcc_options *opts, struct gcc_options *opts_set,
481 const struct cl_decoded_option *decoded,
482 location_t loc ATTRIBUTE_UNUSED)
483 {
484 size_t code = decoded->opt_index;
485 const char *arg = decoded->arg;
486 int value = decoded->value;
487
488 gcc_assert (opts == &global_options);
489 gcc_assert (opts_set == &global_options_set);
490
491 switch (code)
492 {
493 case OPT_march_:
494 return m68k_find_selection (&m68k_arch_entry, all_isas, arg);
495
496 case OPT_mcpu_:
497 return m68k_find_selection (&m68k_cpu_entry, all_devices, arg);
498
499 case OPT_mtune_:
500 return m68k_find_selection (&m68k_tune_entry, all_microarchs, arg);
501
502 case OPT_m5200:
503 return m68k_find_selection (&m68k_cpu_entry, all_devices, "5206");
504
505 case OPT_m5206e:
506 return m68k_find_selection (&m68k_cpu_entry, all_devices, "5206e");
507
508 case OPT_m528x:
509 return m68k_find_selection (&m68k_cpu_entry, all_devices, "528x");
510
511 case OPT_m5307:
512 return m68k_find_selection (&m68k_cpu_entry, all_devices, "5307");
513
514 case OPT_m5407:
515 return m68k_find_selection (&m68k_cpu_entry, all_devices, "5407");
516
517 case OPT_mcfv4e:
518 return m68k_find_selection (&m68k_cpu_entry, all_devices, "547x");
519
520 case OPT_m68000:
521 case OPT_mc68000:
522 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68000");
523
524 case OPT_m68010:
525 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68010");
526
527 case OPT_m68020:
528 case OPT_mc68020:
529 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68020");
530
531 case OPT_m68020_40:
532 return (m68k_find_selection (&m68k_tune_entry, all_microarchs,
533 "68020-40")
534 && m68k_find_selection (&m68k_cpu_entry, all_devices, "68020"));
535
536 case OPT_m68020_60:
537 return (m68k_find_selection (&m68k_tune_entry, all_microarchs,
538 "68020-60")
539 && m68k_find_selection (&m68k_cpu_entry, all_devices, "68020"));
540
541 case OPT_m68030:
542 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68030");
543
544 case OPT_m68040:
545 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68040");
546
547 case OPT_m68060:
548 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68060");
549
550 case OPT_m68302:
551 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68302");
552
553 case OPT_m68332:
554 case OPT_mcpu32:
555 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68332");
556
557 case OPT_mshared_library_id_:
558 if (value > MAX_LIBRARY_ID)
559 error ("-mshared-library-id=%s is not between 0 and %d",
560 arg, MAX_LIBRARY_ID);
561 else
562 {
563 char *tmp;
564 asprintf (&tmp, "%d", (value * -4) - 4);
565 m68k_library_id_string = tmp;
566 }
567 return true;
568
569 default:
570 return true;
571 }
572 }
573
574 /* Implement TARGET_OPTION_OVERRIDE. */
575
576 static void
577 m68k_option_override (void)
578 {
579 const struct m68k_target_selection *entry;
580 unsigned long target_mask;
581
582 /* User can choose:
583
584 -mcpu=
585 -march=
586 -mtune=
587
588 -march=ARCH should generate code that runs any processor
589 implementing architecture ARCH. -mcpu=CPU should override -march
590 and should generate code that runs on processor CPU, making free
591 use of any instructions that CPU understands. -mtune=UARCH applies
592 on top of -mcpu or -march and optimizes the code for UARCH. It does
593 not change the target architecture. */
594 if (m68k_cpu_entry)
595 {
596 /* Complain if the -march setting is for a different microarchitecture,
597 or includes flags that the -mcpu setting doesn't. */
598 if (m68k_arch_entry
599 && (m68k_arch_entry->microarch != m68k_cpu_entry->microarch
600 || (m68k_arch_entry->flags & ~m68k_cpu_entry->flags) != 0))
601 warning (0, "-mcpu=%s conflicts with -march=%s",
602 m68k_cpu_entry->name, m68k_arch_entry->name);
603
604 entry = m68k_cpu_entry;
605 }
606 else
607 entry = m68k_arch_entry;
608
609 if (!entry)
610 entry = all_devices + TARGET_CPU_DEFAULT;
611
612 m68k_cpu_flags = entry->flags;
613
614 /* Use the architecture setting to derive default values for
615 certain flags. */
616 target_mask = 0;
617
618 /* ColdFire is lenient about alignment. */
619 if (!TARGET_COLDFIRE)
620 target_mask |= MASK_STRICT_ALIGNMENT;
621
622 if ((m68k_cpu_flags & FL_BITFIELD) != 0)
623 target_mask |= MASK_BITFIELD;
624 if ((m68k_cpu_flags & FL_CF_HWDIV) != 0)
625 target_mask |= MASK_CF_HWDIV;
626 if ((m68k_cpu_flags & (FL_68881 | FL_CF_FPU)) != 0)
627 target_mask |= MASK_HARD_FLOAT;
628 target_flags |= target_mask & ~target_flags_explicit;
629
630 /* Set the directly-usable versions of the -mcpu and -mtune settings. */
631 m68k_cpu = entry->device;
632 if (m68k_tune_entry)
633 {
634 m68k_tune = m68k_tune_entry->microarch;
635 m68k_tune_flags = m68k_tune_entry->flags;
636 }
637 #ifdef M68K_DEFAULT_TUNE
638 else if (!m68k_cpu_entry && !m68k_arch_entry)
639 {
640 enum target_device dev;
641 dev = all_microarchs[M68K_DEFAULT_TUNE].device;
642 m68k_tune_flags = all_devices[dev]->flags;
643 }
644 #endif
645 else
646 {
647 m68k_tune = entry->microarch;
648 m68k_tune_flags = entry->flags;
649 }
650
651 /* Set the type of FPU. */
652 m68k_fpu = (!TARGET_HARD_FLOAT ? FPUTYPE_NONE
653 : (m68k_cpu_flags & FL_COLDFIRE) != 0 ? FPUTYPE_COLDFIRE
654 : FPUTYPE_68881);
655
656 /* Sanity check to ensure that msep-data and mid-sahred-library are not
657 * both specified together. Doing so simply doesn't make sense.
658 */
659 if (TARGET_SEP_DATA && TARGET_ID_SHARED_LIBRARY)
660 error ("cannot specify both -msep-data and -mid-shared-library");
661
662 /* If we're generating code for a separate A5 relative data segment,
663 * we've got to enable -fPIC as well. This might be relaxable to
664 * -fpic but it hasn't been tested properly.
665 */
666 if (TARGET_SEP_DATA || TARGET_ID_SHARED_LIBRARY)
667 flag_pic = 2;
668
669 /* -mpcrel -fPIC uses 32-bit pc-relative displacements. Raise an
670 error if the target does not support them. */
671 if (TARGET_PCREL && !TARGET_68020 && flag_pic == 2)
672 error ("-mpcrel -fPIC is not currently supported on selected cpu");
673
674 /* ??? A historic way of turning on pic, or is this intended to
675 be an embedded thing that doesn't have the same name binding
676 significance that it does on hosted ELF systems? */
677 if (TARGET_PCREL && flag_pic == 0)
678 flag_pic = 1;
679
680 if (!flag_pic)
681 {
682 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_JSR;
683
684 m68k_symbolic_jump = "jra %a0";
685 }
686 else if (TARGET_ID_SHARED_LIBRARY)
687 /* All addresses must be loaded from the GOT. */
688 ;
689 else if (TARGET_68020 || TARGET_ISAB || TARGET_ISAC)
690 {
691 if (TARGET_PCREL)
692 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_C;
693 else
694 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_P;
695
696 if (TARGET_ISAC)
697 /* No unconditional long branch */;
698 else if (TARGET_PCREL)
699 m68k_symbolic_jump = "bra%.l %c0";
700 else
701 m68k_symbolic_jump = "bra%.l %p0";
702 /* Turn off function cse if we are doing PIC. We always want
703 function call to be done as `bsr foo@PLTPC'. */
704 /* ??? It's traditional to do this for -mpcrel too, but it isn't
705 clear how intentional that is. */
706 flag_no_function_cse = 1;
707 }
708
709 switch (m68k_symbolic_call_var)
710 {
711 case M68K_SYMBOLIC_CALL_JSR:
712 m68k_symbolic_call = "jsr %a0";
713 break;
714
715 case M68K_SYMBOLIC_CALL_BSR_C:
716 m68k_symbolic_call = "bsr%.l %c0";
717 break;
718
719 case M68K_SYMBOLIC_CALL_BSR_P:
720 m68k_symbolic_call = "bsr%.l %p0";
721 break;
722
723 case M68K_SYMBOLIC_CALL_NONE:
724 gcc_assert (m68k_symbolic_call == NULL);
725 break;
726
727 default:
728 gcc_unreachable ();
729 }
730
731 #ifndef ASM_OUTPUT_ALIGN_WITH_NOP
732 if (align_labels > 2)
733 {
734 warning (0, "-falign-labels=%d is not supported", align_labels);
735 align_labels = 0;
736 }
737 if (align_loops > 2)
738 {
739 warning (0, "-falign-loops=%d is not supported", align_loops);
740 align_loops = 0;
741 }
742 #endif
743
744 SUBTARGET_OVERRIDE_OPTIONS;
745
746 /* Setup scheduling options. */
747 if (TUNE_CFV1)
748 m68k_sched_cpu = CPU_CFV1;
749 else if (TUNE_CFV2)
750 m68k_sched_cpu = CPU_CFV2;
751 else if (TUNE_CFV3)
752 m68k_sched_cpu = CPU_CFV3;
753 else if (TUNE_CFV4)
754 m68k_sched_cpu = CPU_CFV4;
755 else
756 {
757 m68k_sched_cpu = CPU_UNKNOWN;
758 flag_schedule_insns = 0;
759 flag_schedule_insns_after_reload = 0;
760 flag_modulo_sched = 0;
761 }
762
763 if (m68k_sched_cpu != CPU_UNKNOWN)
764 {
765 if ((m68k_cpu_flags & (FL_CF_EMAC | FL_CF_EMAC_B)) != 0)
766 m68k_sched_mac = MAC_CF_EMAC;
767 else if ((m68k_cpu_flags & FL_CF_MAC) != 0)
768 m68k_sched_mac = MAC_CF_MAC;
769 else
770 m68k_sched_mac = MAC_NO;
771 }
772 }
773
774 /* Generate a macro of the form __mPREFIX_cpu_NAME, where PREFIX is the
775 given argument and NAME is the argument passed to -mcpu. Return NULL
776 if -mcpu was not passed. */
777
778 const char *
779 m68k_cpp_cpu_ident (const char *prefix)
780 {
781 if (!m68k_cpu_entry)
782 return NULL;
783 return concat ("__m", prefix, "_cpu_", m68k_cpu_entry->name, NULL);
784 }
785
786 /* Generate a macro of the form __mPREFIX_family_NAME, where PREFIX is the
787 given argument and NAME is the name of the representative device for
788 the -mcpu argument's family. Return NULL if -mcpu was not passed. */
789
790 const char *
791 m68k_cpp_cpu_family (const char *prefix)
792 {
793 if (!m68k_cpu_entry)
794 return NULL;
795 return concat ("__m", prefix, "_family_", m68k_cpu_entry->family, NULL);
796 }
797 \f
798 /* Return m68k_fk_interrupt_handler if FUNC has an "interrupt" or
799 "interrupt_handler" attribute and interrupt_thread if FUNC has an
800 "interrupt_thread" attribute. Otherwise, return
801 m68k_fk_normal_function. */
802
803 enum m68k_function_kind
804 m68k_get_function_kind (tree func)
805 {
806 tree a;
807
808 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
809
810 a = lookup_attribute ("interrupt", DECL_ATTRIBUTES (func));
811 if (a != NULL_TREE)
812 return m68k_fk_interrupt_handler;
813
814 a = lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (func));
815 if (a != NULL_TREE)
816 return m68k_fk_interrupt_handler;
817
818 a = lookup_attribute ("interrupt_thread", DECL_ATTRIBUTES (func));
819 if (a != NULL_TREE)
820 return m68k_fk_interrupt_thread;
821
822 return m68k_fk_normal_function;
823 }
824
825 /* Handle an attribute requiring a FUNCTION_DECL; arguments as in
826 struct attribute_spec.handler. */
827 static tree
828 m68k_handle_fndecl_attribute (tree *node, tree name,
829 tree args ATTRIBUTE_UNUSED,
830 int flags ATTRIBUTE_UNUSED,
831 bool *no_add_attrs)
832 {
833 if (TREE_CODE (*node) != FUNCTION_DECL)
834 {
835 warning (OPT_Wattributes, "%qE attribute only applies to functions",
836 name);
837 *no_add_attrs = true;
838 }
839
840 if (m68k_get_function_kind (*node) != m68k_fk_normal_function)
841 {
842 error ("multiple interrupt attributes not allowed");
843 *no_add_attrs = true;
844 }
845
846 if (!TARGET_FIDOA
847 && !strcmp (IDENTIFIER_POINTER (name), "interrupt_thread"))
848 {
849 error ("interrupt_thread is available only on fido");
850 *no_add_attrs = true;
851 }
852
853 return NULL_TREE;
854 }
855
856 static void
857 m68k_compute_frame_layout (void)
858 {
859 int regno, saved;
860 unsigned int mask;
861 enum m68k_function_kind func_kind =
862 m68k_get_function_kind (current_function_decl);
863 bool interrupt_handler = func_kind == m68k_fk_interrupt_handler;
864 bool interrupt_thread = func_kind == m68k_fk_interrupt_thread;
865
866 /* Only compute the frame once per function.
867 Don't cache information until reload has been completed. */
868 if (current_frame.funcdef_no == current_function_funcdef_no
869 && reload_completed)
870 return;
871
872 current_frame.size = (get_frame_size () + 3) & -4;
873
874 mask = saved = 0;
875
876 /* Interrupt thread does not need to save any register. */
877 if (!interrupt_thread)
878 for (regno = 0; regno < 16; regno++)
879 if (m68k_save_reg (regno, interrupt_handler))
880 {
881 mask |= 1 << (regno - D0_REG);
882 saved++;
883 }
884 current_frame.offset = saved * 4;
885 current_frame.reg_no = saved;
886 current_frame.reg_mask = mask;
887
888 current_frame.foffset = 0;
889 mask = saved = 0;
890 if (TARGET_HARD_FLOAT)
891 {
892 /* Interrupt thread does not need to save any register. */
893 if (!interrupt_thread)
894 for (regno = 16; regno < 24; regno++)
895 if (m68k_save_reg (regno, interrupt_handler))
896 {
897 mask |= 1 << (regno - FP0_REG);
898 saved++;
899 }
900 current_frame.foffset = saved * TARGET_FP_REG_SIZE;
901 current_frame.offset += current_frame.foffset;
902 }
903 current_frame.fpu_no = saved;
904 current_frame.fpu_mask = mask;
905
906 /* Remember what function this frame refers to. */
907 current_frame.funcdef_no = current_function_funcdef_no;
908 }
909
910 /* Worker function for TARGET_CAN_ELIMINATE. */
911
912 bool
913 m68k_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
914 {
915 return (to == STACK_POINTER_REGNUM ? ! frame_pointer_needed : true);
916 }
917
918 HOST_WIDE_INT
919 m68k_initial_elimination_offset (int from, int to)
920 {
921 int argptr_offset;
922 /* The arg pointer points 8 bytes before the start of the arguments,
923 as defined by FIRST_PARM_OFFSET. This makes it coincident with the
924 frame pointer in most frames. */
925 argptr_offset = frame_pointer_needed ? 0 : UNITS_PER_WORD;
926 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
927 return argptr_offset;
928
929 m68k_compute_frame_layout ();
930
931 gcc_assert (to == STACK_POINTER_REGNUM);
932 switch (from)
933 {
934 case ARG_POINTER_REGNUM:
935 return current_frame.offset + current_frame.size - argptr_offset;
936 case FRAME_POINTER_REGNUM:
937 return current_frame.offset + current_frame.size;
938 default:
939 gcc_unreachable ();
940 }
941 }
942
943 /* Refer to the array `regs_ever_live' to determine which registers
944 to save; `regs_ever_live[I]' is nonzero if register number I
945 is ever used in the function. This function is responsible for
946 knowing which registers should not be saved even if used.
947 Return true if we need to save REGNO. */
948
949 static bool
950 m68k_save_reg (unsigned int regno, bool interrupt_handler)
951 {
952 if (flag_pic && regno == PIC_REG)
953 {
954 if (crtl->saves_all_registers)
955 return true;
956 if (crtl->uses_pic_offset_table)
957 return true;
958 /* Reload may introduce constant pool references into a function
959 that thitherto didn't need a PIC register. Note that the test
960 above will not catch that case because we will only set
961 crtl->uses_pic_offset_table when emitting
962 the address reloads. */
963 if (crtl->uses_const_pool)
964 return true;
965 }
966
967 if (crtl->calls_eh_return)
968 {
969 unsigned int i;
970 for (i = 0; ; i++)
971 {
972 unsigned int test = EH_RETURN_DATA_REGNO (i);
973 if (test == INVALID_REGNUM)
974 break;
975 if (test == regno)
976 return true;
977 }
978 }
979
980 /* Fixed regs we never touch. */
981 if (fixed_regs[regno])
982 return false;
983
984 /* The frame pointer (if it is such) is handled specially. */
985 if (regno == FRAME_POINTER_REGNUM && frame_pointer_needed)
986 return false;
987
988 /* Interrupt handlers must also save call_used_regs
989 if they are live or when calling nested functions. */
990 if (interrupt_handler)
991 {
992 if (df_regs_ever_live_p (regno))
993 return true;
994
995 if (!current_function_is_leaf && call_used_regs[regno])
996 return true;
997 }
998
999 /* Never need to save registers that aren't touched. */
1000 if (!df_regs_ever_live_p (regno))
1001 return false;
1002
1003 /* Otherwise save everything that isn't call-clobbered. */
1004 return !call_used_regs[regno];
1005 }
1006
1007 /* Emit RTL for a MOVEM or FMOVEM instruction. BASE + OFFSET represents
1008 the lowest memory address. COUNT is the number of registers to be
1009 moved, with register REGNO + I being moved if bit I of MASK is set.
1010 STORE_P specifies the direction of the move and ADJUST_STACK_P says
1011 whether or not this is pre-decrement (if STORE_P) or post-increment
1012 (if !STORE_P) operation. */
1013
1014 static rtx
1015 m68k_emit_movem (rtx base, HOST_WIDE_INT offset,
1016 unsigned int count, unsigned int regno,
1017 unsigned int mask, bool store_p, bool adjust_stack_p)
1018 {
1019 int i;
1020 rtx body, addr, src, operands[2];
1021 enum machine_mode mode;
1022
1023 body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (adjust_stack_p + count));
1024 mode = reg_raw_mode[regno];
1025 i = 0;
1026
1027 if (adjust_stack_p)
1028 {
1029 src = plus_constant (base, (count
1030 * GET_MODE_SIZE (mode)
1031 * (HOST_WIDE_INT) (store_p ? -1 : 1)));
1032 XVECEXP (body, 0, i++) = gen_rtx_SET (VOIDmode, base, src);
1033 }
1034
1035 for (; mask != 0; mask >>= 1, regno++)
1036 if (mask & 1)
1037 {
1038 addr = plus_constant (base, offset);
1039 operands[!store_p] = gen_frame_mem (mode, addr);
1040 operands[store_p] = gen_rtx_REG (mode, regno);
1041 XVECEXP (body, 0, i++)
1042 = gen_rtx_SET (VOIDmode, operands[0], operands[1]);
1043 offset += GET_MODE_SIZE (mode);
1044 }
1045 gcc_assert (i == XVECLEN (body, 0));
1046
1047 return emit_insn (body);
1048 }
1049
1050 /* Make INSN a frame-related instruction. */
1051
1052 static void
1053 m68k_set_frame_related (rtx insn)
1054 {
1055 rtx body;
1056 int i;
1057
1058 RTX_FRAME_RELATED_P (insn) = 1;
1059 body = PATTERN (insn);
1060 if (GET_CODE (body) == PARALLEL)
1061 for (i = 0; i < XVECLEN (body, 0); i++)
1062 RTX_FRAME_RELATED_P (XVECEXP (body, 0, i)) = 1;
1063 }
1064
1065 /* Emit RTL for the "prologue" define_expand. */
1066
1067 void
1068 m68k_expand_prologue (void)
1069 {
1070 HOST_WIDE_INT fsize_with_regs;
1071 rtx limit, src, dest;
1072
1073 m68k_compute_frame_layout ();
1074
1075 /* If the stack limit is a symbol, we can check it here,
1076 before actually allocating the space. */
1077 if (crtl->limit_stack
1078 && GET_CODE (stack_limit_rtx) == SYMBOL_REF)
1079 {
1080 limit = plus_constant (stack_limit_rtx, current_frame.size + 4);
1081 if (!LEGITIMATE_CONSTANT_P (limit))
1082 {
1083 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), limit);
1084 limit = gen_rtx_REG (Pmode, D0_REG);
1085 }
1086 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode,
1087 stack_pointer_rtx, limit),
1088 stack_pointer_rtx, limit,
1089 const1_rtx));
1090 }
1091
1092 fsize_with_regs = current_frame.size;
1093 if (TARGET_COLDFIRE)
1094 {
1095 /* ColdFire's move multiple instructions do not allow pre-decrement
1096 addressing. Add the size of movem saves to the initial stack
1097 allocation instead. */
1098 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1099 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1100 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1101 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
1102 }
1103
1104 if (frame_pointer_needed)
1105 {
1106 if (fsize_with_regs == 0 && TUNE_68040)
1107 {
1108 /* On the 68040, two separate moves are faster than link.w 0. */
1109 dest = gen_frame_mem (Pmode,
1110 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1111 m68k_set_frame_related (emit_move_insn (dest, frame_pointer_rtx));
1112 m68k_set_frame_related (emit_move_insn (frame_pointer_rtx,
1113 stack_pointer_rtx));
1114 }
1115 else if (fsize_with_regs < 0x8000 || TARGET_68020)
1116 m68k_set_frame_related
1117 (emit_insn (gen_link (frame_pointer_rtx,
1118 GEN_INT (-4 - fsize_with_regs))));
1119 else
1120 {
1121 m68k_set_frame_related
1122 (emit_insn (gen_link (frame_pointer_rtx, GEN_INT (-4))));
1123 m68k_set_frame_related
1124 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1125 stack_pointer_rtx,
1126 GEN_INT (-fsize_with_regs))));
1127 }
1128
1129 /* If the frame pointer is needed, emit a special barrier that
1130 will prevent the scheduler from moving stores to the frame
1131 before the stack adjustment. */
1132 emit_insn (gen_stack_tie (stack_pointer_rtx, frame_pointer_rtx));
1133 }
1134 else if (fsize_with_regs != 0)
1135 m68k_set_frame_related
1136 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1137 stack_pointer_rtx,
1138 GEN_INT (-fsize_with_regs))));
1139
1140 if (current_frame.fpu_mask)
1141 {
1142 gcc_assert (current_frame.fpu_no >= MIN_FMOVEM_REGS);
1143 if (TARGET_68881)
1144 m68k_set_frame_related
1145 (m68k_emit_movem (stack_pointer_rtx,
1146 current_frame.fpu_no * -GET_MODE_SIZE (XFmode),
1147 current_frame.fpu_no, FP0_REG,
1148 current_frame.fpu_mask, true, true));
1149 else
1150 {
1151 int offset;
1152
1153 /* If we're using moveml to save the integer registers,
1154 the stack pointer will point to the bottom of the moveml
1155 save area. Find the stack offset of the first FP register. */
1156 if (current_frame.reg_no < MIN_MOVEM_REGS)
1157 offset = 0;
1158 else
1159 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1160 m68k_set_frame_related
1161 (m68k_emit_movem (stack_pointer_rtx, offset,
1162 current_frame.fpu_no, FP0_REG,
1163 current_frame.fpu_mask, true, false));
1164 }
1165 }
1166
1167 /* If the stack limit is not a symbol, check it here.
1168 This has the disadvantage that it may be too late... */
1169 if (crtl->limit_stack)
1170 {
1171 if (REG_P (stack_limit_rtx))
1172 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode, stack_pointer_rtx,
1173 stack_limit_rtx),
1174 stack_pointer_rtx, stack_limit_rtx,
1175 const1_rtx));
1176
1177 else if (GET_CODE (stack_limit_rtx) != SYMBOL_REF)
1178 warning (0, "stack limit expression is not supported");
1179 }
1180
1181 if (current_frame.reg_no < MIN_MOVEM_REGS)
1182 {
1183 /* Store each register separately in the same order moveml does. */
1184 int i;
1185
1186 for (i = 16; i-- > 0; )
1187 if (current_frame.reg_mask & (1 << i))
1188 {
1189 src = gen_rtx_REG (SImode, D0_REG + i);
1190 dest = gen_frame_mem (SImode,
1191 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1192 m68k_set_frame_related (emit_insn (gen_movsi (dest, src)));
1193 }
1194 }
1195 else
1196 {
1197 if (TARGET_COLDFIRE)
1198 /* The required register save space has already been allocated.
1199 The first register should be stored at (%sp). */
1200 m68k_set_frame_related
1201 (m68k_emit_movem (stack_pointer_rtx, 0,
1202 current_frame.reg_no, D0_REG,
1203 current_frame.reg_mask, true, false));
1204 else
1205 m68k_set_frame_related
1206 (m68k_emit_movem (stack_pointer_rtx,
1207 current_frame.reg_no * -GET_MODE_SIZE (SImode),
1208 current_frame.reg_no, D0_REG,
1209 current_frame.reg_mask, true, true));
1210 }
1211
1212 if (!TARGET_SEP_DATA
1213 && crtl->uses_pic_offset_table)
1214 emit_insn (gen_load_got (pic_offset_table_rtx));
1215 }
1216 \f
1217 /* Return true if a simple (return) instruction is sufficient for this
1218 instruction (i.e. if no epilogue is needed). */
1219
1220 bool
1221 m68k_use_return_insn (void)
1222 {
1223 if (!reload_completed || frame_pointer_needed || get_frame_size () != 0)
1224 return false;
1225
1226 m68k_compute_frame_layout ();
1227 return current_frame.offset == 0;
1228 }
1229
1230 /* Emit RTL for the "epilogue" or "sibcall_epilogue" define_expand;
1231 SIBCALL_P says which.
1232
1233 The function epilogue should not depend on the current stack pointer!
1234 It should use the frame pointer only, if there is a frame pointer.
1235 This is mandatory because of alloca; we also take advantage of it to
1236 omit stack adjustments before returning. */
1237
1238 void
1239 m68k_expand_epilogue (bool sibcall_p)
1240 {
1241 HOST_WIDE_INT fsize, fsize_with_regs;
1242 bool big, restore_from_sp;
1243
1244 m68k_compute_frame_layout ();
1245
1246 fsize = current_frame.size;
1247 big = false;
1248 restore_from_sp = false;
1249
1250 /* FIXME : current_function_is_leaf below is too strong.
1251 What we really need to know there is if there could be pending
1252 stack adjustment needed at that point. */
1253 restore_from_sp = (!frame_pointer_needed
1254 || (!cfun->calls_alloca
1255 && current_function_is_leaf));
1256
1257 /* fsize_with_regs is the size we need to adjust the sp when
1258 popping the frame. */
1259 fsize_with_regs = fsize;
1260 if (TARGET_COLDFIRE && restore_from_sp)
1261 {
1262 /* ColdFire's move multiple instructions do not allow post-increment
1263 addressing. Add the size of movem loads to the final deallocation
1264 instead. */
1265 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1266 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1267 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1268 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
1269 }
1270
1271 if (current_frame.offset + fsize >= 0x8000
1272 && !restore_from_sp
1273 && (current_frame.reg_mask || current_frame.fpu_mask))
1274 {
1275 if (TARGET_COLDFIRE
1276 && (current_frame.reg_no >= MIN_MOVEM_REGS
1277 || current_frame.fpu_no >= MIN_FMOVEM_REGS))
1278 {
1279 /* ColdFire's move multiple instructions do not support the
1280 (d8,Ax,Xi) addressing mode, so we're as well using a normal
1281 stack-based restore. */
1282 emit_move_insn (gen_rtx_REG (Pmode, A1_REG),
1283 GEN_INT (-(current_frame.offset + fsize)));
1284 emit_insn (gen_addsi3 (stack_pointer_rtx,
1285 gen_rtx_REG (Pmode, A1_REG),
1286 frame_pointer_rtx));
1287 restore_from_sp = true;
1288 }
1289 else
1290 {
1291 emit_move_insn (gen_rtx_REG (Pmode, A1_REG), GEN_INT (-fsize));
1292 fsize = 0;
1293 big = true;
1294 }
1295 }
1296
1297 if (current_frame.reg_no < MIN_MOVEM_REGS)
1298 {
1299 /* Restore each register separately in the same order moveml does. */
1300 int i;
1301 HOST_WIDE_INT offset;
1302
1303 offset = current_frame.offset + fsize;
1304 for (i = 0; i < 16; i++)
1305 if (current_frame.reg_mask & (1 << i))
1306 {
1307 rtx addr;
1308
1309 if (big)
1310 {
1311 /* Generate the address -OFFSET(%fp,%a1.l). */
1312 addr = gen_rtx_REG (Pmode, A1_REG);
1313 addr = gen_rtx_PLUS (Pmode, addr, frame_pointer_rtx);
1314 addr = plus_constant (addr, -offset);
1315 }
1316 else if (restore_from_sp)
1317 addr = gen_rtx_POST_INC (Pmode, stack_pointer_rtx);
1318 else
1319 addr = plus_constant (frame_pointer_rtx, -offset);
1320 emit_move_insn (gen_rtx_REG (SImode, D0_REG + i),
1321 gen_frame_mem (SImode, addr));
1322 offset -= GET_MODE_SIZE (SImode);
1323 }
1324 }
1325 else if (current_frame.reg_mask)
1326 {
1327 if (big)
1328 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1329 gen_rtx_REG (Pmode, A1_REG),
1330 frame_pointer_rtx),
1331 -(current_frame.offset + fsize),
1332 current_frame.reg_no, D0_REG,
1333 current_frame.reg_mask, false, false);
1334 else if (restore_from_sp)
1335 m68k_emit_movem (stack_pointer_rtx, 0,
1336 current_frame.reg_no, D0_REG,
1337 current_frame.reg_mask, false,
1338 !TARGET_COLDFIRE);
1339 else
1340 m68k_emit_movem (frame_pointer_rtx,
1341 -(current_frame.offset + fsize),
1342 current_frame.reg_no, D0_REG,
1343 current_frame.reg_mask, false, false);
1344 }
1345
1346 if (current_frame.fpu_no > 0)
1347 {
1348 if (big)
1349 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1350 gen_rtx_REG (Pmode, A1_REG),
1351 frame_pointer_rtx),
1352 -(current_frame.foffset + fsize),
1353 current_frame.fpu_no, FP0_REG,
1354 current_frame.fpu_mask, false, false);
1355 else if (restore_from_sp)
1356 {
1357 if (TARGET_COLDFIRE)
1358 {
1359 int offset;
1360
1361 /* If we used moveml to restore the integer registers, the
1362 stack pointer will still point to the bottom of the moveml
1363 save area. Find the stack offset of the first FP
1364 register. */
1365 if (current_frame.reg_no < MIN_MOVEM_REGS)
1366 offset = 0;
1367 else
1368 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1369 m68k_emit_movem (stack_pointer_rtx, offset,
1370 current_frame.fpu_no, FP0_REG,
1371 current_frame.fpu_mask, false, false);
1372 }
1373 else
1374 m68k_emit_movem (stack_pointer_rtx, 0,
1375 current_frame.fpu_no, FP0_REG,
1376 current_frame.fpu_mask, false, true);
1377 }
1378 else
1379 m68k_emit_movem (frame_pointer_rtx,
1380 -(current_frame.foffset + fsize),
1381 current_frame.fpu_no, FP0_REG,
1382 current_frame.fpu_mask, false, false);
1383 }
1384
1385 if (frame_pointer_needed)
1386 emit_insn (gen_unlink (frame_pointer_rtx));
1387 else if (fsize_with_regs)
1388 emit_insn (gen_addsi3 (stack_pointer_rtx,
1389 stack_pointer_rtx,
1390 GEN_INT (fsize_with_regs)));
1391
1392 if (crtl->calls_eh_return)
1393 emit_insn (gen_addsi3 (stack_pointer_rtx,
1394 stack_pointer_rtx,
1395 EH_RETURN_STACKADJ_RTX));
1396
1397 if (!sibcall_p)
1398 emit_jump_insn (gen_rtx_RETURN (VOIDmode));
1399 }
1400 \f
1401 /* Return true if X is a valid comparison operator for the dbcc
1402 instruction.
1403
1404 Note it rejects floating point comparison operators.
1405 (In the future we could use Fdbcc).
1406
1407 It also rejects some comparisons when CC_NO_OVERFLOW is set. */
1408
1409 int
1410 valid_dbcc_comparison_p_2 (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED)
1411 {
1412 switch (GET_CODE (x))
1413 {
1414 case EQ: case NE: case GTU: case LTU:
1415 case GEU: case LEU:
1416 return 1;
1417
1418 /* Reject some when CC_NO_OVERFLOW is set. This may be over
1419 conservative */
1420 case GT: case LT: case GE: case LE:
1421 return ! (cc_prev_status.flags & CC_NO_OVERFLOW);
1422 default:
1423 return 0;
1424 }
1425 }
1426
1427 /* Return nonzero if flags are currently in the 68881 flag register. */
1428 int
1429 flags_in_68881 (void)
1430 {
1431 /* We could add support for these in the future */
1432 return cc_status.flags & CC_IN_68881;
1433 }
1434
1435 /* Return true if PARALLEL contains register REGNO. */
1436 static bool
1437 m68k_reg_present_p (const_rtx parallel, unsigned int regno)
1438 {
1439 int i;
1440
1441 if (REG_P (parallel) && REGNO (parallel) == regno)
1442 return true;
1443
1444 if (GET_CODE (parallel) != PARALLEL)
1445 return false;
1446
1447 for (i = 0; i < XVECLEN (parallel, 0); ++i)
1448 {
1449 const_rtx x;
1450
1451 x = XEXP (XVECEXP (parallel, 0, i), 0);
1452 if (REG_P (x) && REGNO (x) == regno)
1453 return true;
1454 }
1455
1456 return false;
1457 }
1458
1459 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL_P. */
1460
1461 static bool
1462 m68k_ok_for_sibcall_p (tree decl, tree exp)
1463 {
1464 enum m68k_function_kind kind;
1465
1466 /* We cannot use sibcalls for nested functions because we use the
1467 static chain register for indirect calls. */
1468 if (CALL_EXPR_STATIC_CHAIN (exp))
1469 return false;
1470
1471 if (!VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
1472 {
1473 /* Check that the return value locations are the same. For
1474 example that we aren't returning a value from the sibling in
1475 a D0 register but then need to transfer it to a A0 register. */
1476 rtx cfun_value;
1477 rtx call_value;
1478
1479 cfun_value = FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (cfun->decl)),
1480 cfun->decl);
1481 call_value = FUNCTION_VALUE (TREE_TYPE (exp), decl);
1482
1483 /* Check that the values are equal or that the result the callee
1484 function returns is superset of what the current function returns. */
1485 if (!(rtx_equal_p (cfun_value, call_value)
1486 || (REG_P (cfun_value)
1487 && m68k_reg_present_p (call_value, REGNO (cfun_value)))))
1488 return false;
1489 }
1490
1491 kind = m68k_get_function_kind (current_function_decl);
1492 if (kind == m68k_fk_normal_function)
1493 /* We can always sibcall from a normal function, because it's
1494 undefined if it is calling an interrupt function. */
1495 return true;
1496
1497 /* Otherwise we can only sibcall if the function kind is known to be
1498 the same. */
1499 if (decl && m68k_get_function_kind (decl) == kind)
1500 return true;
1501
1502 return false;
1503 }
1504
1505 /* On the m68k all args are always pushed. */
1506
1507 static rtx
1508 m68k_function_arg (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
1509 enum machine_mode mode ATTRIBUTE_UNUSED,
1510 const_tree type ATTRIBUTE_UNUSED,
1511 bool named ATTRIBUTE_UNUSED)
1512 {
1513 return NULL_RTX;
1514 }
1515
1516 static void
1517 m68k_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
1518 const_tree type, bool named ATTRIBUTE_UNUSED)
1519 {
1520 *cum += (mode != BLKmode
1521 ? (GET_MODE_SIZE (mode) + 3) & ~3
1522 : (int_size_in_bytes (type) + 3) & ~3);
1523 }
1524
1525 /* Convert X to a legitimate function call memory reference and return the
1526 result. */
1527
1528 rtx
1529 m68k_legitimize_call_address (rtx x)
1530 {
1531 gcc_assert (MEM_P (x));
1532 if (call_operand (XEXP (x, 0), VOIDmode))
1533 return x;
1534 return replace_equiv_address (x, force_reg (Pmode, XEXP (x, 0)));
1535 }
1536
1537 /* Likewise for sibling calls. */
1538
1539 rtx
1540 m68k_legitimize_sibcall_address (rtx x)
1541 {
1542 gcc_assert (MEM_P (x));
1543 if (sibcall_operand (XEXP (x, 0), VOIDmode))
1544 return x;
1545
1546 emit_move_insn (gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM), XEXP (x, 0));
1547 return replace_equiv_address (x, gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM));
1548 }
1549
1550 /* Convert X to a legitimate address and return it if successful. Otherwise
1551 return X.
1552
1553 For the 68000, we handle X+REG by loading X into a register R and
1554 using R+REG. R will go in an address reg and indexing will be used.
1555 However, if REG is a broken-out memory address or multiplication,
1556 nothing needs to be done because REG can certainly go in an address reg. */
1557
1558 static rtx
1559 m68k_legitimize_address (rtx x, rtx oldx, enum machine_mode mode)
1560 {
1561 if (m68k_tls_symbol_p (x))
1562 return m68k_legitimize_tls_address (x);
1563
1564 if (GET_CODE (x) == PLUS)
1565 {
1566 int ch = (x) != (oldx);
1567 int copied = 0;
1568
1569 #define COPY_ONCE(Y) if (!copied) { Y = copy_rtx (Y); copied = ch = 1; }
1570
1571 if (GET_CODE (XEXP (x, 0)) == MULT)
1572 {
1573 COPY_ONCE (x);
1574 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
1575 }
1576 if (GET_CODE (XEXP (x, 1)) == MULT)
1577 {
1578 COPY_ONCE (x);
1579 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
1580 }
1581 if (ch)
1582 {
1583 if (GET_CODE (XEXP (x, 1)) == REG
1584 && GET_CODE (XEXP (x, 0)) == REG)
1585 {
1586 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
1587 {
1588 COPY_ONCE (x);
1589 x = force_operand (x, 0);
1590 }
1591 return x;
1592 }
1593 if (memory_address_p (mode, x))
1594 return x;
1595 }
1596 if (GET_CODE (XEXP (x, 0)) == REG
1597 || (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
1598 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1599 && GET_MODE (XEXP (XEXP (x, 0), 0)) == HImode))
1600 {
1601 rtx temp = gen_reg_rtx (Pmode);
1602 rtx val = force_operand (XEXP (x, 1), 0);
1603 emit_move_insn (temp, val);
1604 COPY_ONCE (x);
1605 XEXP (x, 1) = temp;
1606 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1607 && GET_CODE (XEXP (x, 0)) == REG)
1608 x = force_operand (x, 0);
1609 }
1610 else if (GET_CODE (XEXP (x, 1)) == REG
1611 || (GET_CODE (XEXP (x, 1)) == SIGN_EXTEND
1612 && GET_CODE (XEXP (XEXP (x, 1), 0)) == REG
1613 && GET_MODE (XEXP (XEXP (x, 1), 0)) == HImode))
1614 {
1615 rtx temp = gen_reg_rtx (Pmode);
1616 rtx val = force_operand (XEXP (x, 0), 0);
1617 emit_move_insn (temp, val);
1618 COPY_ONCE (x);
1619 XEXP (x, 0) = temp;
1620 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1621 && GET_CODE (XEXP (x, 1)) == REG)
1622 x = force_operand (x, 0);
1623 }
1624 }
1625
1626 return x;
1627 }
1628
1629
1630 /* Output a dbCC; jCC sequence. Note we do not handle the
1631 floating point version of this sequence (Fdbcc). We also
1632 do not handle alternative conditions when CC_NO_OVERFLOW is
1633 set. It is assumed that valid_dbcc_comparison_p and flags_in_68881 will
1634 kick those out before we get here. */
1635
1636 void
1637 output_dbcc_and_branch (rtx *operands)
1638 {
1639 switch (GET_CODE (operands[3]))
1640 {
1641 case EQ:
1642 output_asm_insn ("dbeq %0,%l1\n\tjeq %l2", operands);
1643 break;
1644
1645 case NE:
1646 output_asm_insn ("dbne %0,%l1\n\tjne %l2", operands);
1647 break;
1648
1649 case GT:
1650 output_asm_insn ("dbgt %0,%l1\n\tjgt %l2", operands);
1651 break;
1652
1653 case GTU:
1654 output_asm_insn ("dbhi %0,%l1\n\tjhi %l2", operands);
1655 break;
1656
1657 case LT:
1658 output_asm_insn ("dblt %0,%l1\n\tjlt %l2", operands);
1659 break;
1660
1661 case LTU:
1662 output_asm_insn ("dbcs %0,%l1\n\tjcs %l2", operands);
1663 break;
1664
1665 case GE:
1666 output_asm_insn ("dbge %0,%l1\n\tjge %l2", operands);
1667 break;
1668
1669 case GEU:
1670 output_asm_insn ("dbcc %0,%l1\n\tjcc %l2", operands);
1671 break;
1672
1673 case LE:
1674 output_asm_insn ("dble %0,%l1\n\tjle %l2", operands);
1675 break;
1676
1677 case LEU:
1678 output_asm_insn ("dbls %0,%l1\n\tjls %l2", operands);
1679 break;
1680
1681 default:
1682 gcc_unreachable ();
1683 }
1684
1685 /* If the decrement is to be done in SImode, then we have
1686 to compensate for the fact that dbcc decrements in HImode. */
1687 switch (GET_MODE (operands[0]))
1688 {
1689 case SImode:
1690 output_asm_insn ("clr%.w %0\n\tsubq%.l #1,%0\n\tjpl %l1", operands);
1691 break;
1692
1693 case HImode:
1694 break;
1695
1696 default:
1697 gcc_unreachable ();
1698 }
1699 }
1700
1701 const char *
1702 output_scc_di (rtx op, rtx operand1, rtx operand2, rtx dest)
1703 {
1704 rtx loperands[7];
1705 enum rtx_code op_code = GET_CODE (op);
1706
1707 /* This does not produce a useful cc. */
1708 CC_STATUS_INIT;
1709
1710 /* The m68k cmp.l instruction requires operand1 to be a reg as used
1711 below. Swap the operands and change the op if these requirements
1712 are not fulfilled. */
1713 if (GET_CODE (operand2) == REG && GET_CODE (operand1) != REG)
1714 {
1715 rtx tmp = operand1;
1716
1717 operand1 = operand2;
1718 operand2 = tmp;
1719 op_code = swap_condition (op_code);
1720 }
1721 loperands[0] = operand1;
1722 if (GET_CODE (operand1) == REG)
1723 loperands[1] = gen_rtx_REG (SImode, REGNO (operand1) + 1);
1724 else
1725 loperands[1] = adjust_address (operand1, SImode, 4);
1726 if (operand2 != const0_rtx)
1727 {
1728 loperands[2] = operand2;
1729 if (GET_CODE (operand2) == REG)
1730 loperands[3] = gen_rtx_REG (SImode, REGNO (operand2) + 1);
1731 else
1732 loperands[3] = adjust_address (operand2, SImode, 4);
1733 }
1734 loperands[4] = gen_label_rtx ();
1735 if (operand2 != const0_rtx)
1736 output_asm_insn ("cmp%.l %2,%0\n\tjne %l4\n\tcmp%.l %3,%1", loperands);
1737 else
1738 {
1739 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[0]))
1740 output_asm_insn ("tst%.l %0", loperands);
1741 else
1742 output_asm_insn ("cmp%.w #0,%0", loperands);
1743
1744 output_asm_insn ("jne %l4", loperands);
1745
1746 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[1]))
1747 output_asm_insn ("tst%.l %1", loperands);
1748 else
1749 output_asm_insn ("cmp%.w #0,%1", loperands);
1750 }
1751
1752 loperands[5] = dest;
1753
1754 switch (op_code)
1755 {
1756 case EQ:
1757 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1758 CODE_LABEL_NUMBER (loperands[4]));
1759 output_asm_insn ("seq %5", loperands);
1760 break;
1761
1762 case NE:
1763 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1764 CODE_LABEL_NUMBER (loperands[4]));
1765 output_asm_insn ("sne %5", loperands);
1766 break;
1767
1768 case GT:
1769 loperands[6] = gen_label_rtx ();
1770 output_asm_insn ("shi %5\n\tjra %l6", loperands);
1771 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1772 CODE_LABEL_NUMBER (loperands[4]));
1773 output_asm_insn ("sgt %5", loperands);
1774 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1775 CODE_LABEL_NUMBER (loperands[6]));
1776 break;
1777
1778 case GTU:
1779 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1780 CODE_LABEL_NUMBER (loperands[4]));
1781 output_asm_insn ("shi %5", loperands);
1782 break;
1783
1784 case LT:
1785 loperands[6] = gen_label_rtx ();
1786 output_asm_insn ("scs %5\n\tjra %l6", loperands);
1787 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1788 CODE_LABEL_NUMBER (loperands[4]));
1789 output_asm_insn ("slt %5", loperands);
1790 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1791 CODE_LABEL_NUMBER (loperands[6]));
1792 break;
1793
1794 case LTU:
1795 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1796 CODE_LABEL_NUMBER (loperands[4]));
1797 output_asm_insn ("scs %5", loperands);
1798 break;
1799
1800 case GE:
1801 loperands[6] = gen_label_rtx ();
1802 output_asm_insn ("scc %5\n\tjra %l6", loperands);
1803 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1804 CODE_LABEL_NUMBER (loperands[4]));
1805 output_asm_insn ("sge %5", loperands);
1806 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1807 CODE_LABEL_NUMBER (loperands[6]));
1808 break;
1809
1810 case GEU:
1811 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1812 CODE_LABEL_NUMBER (loperands[4]));
1813 output_asm_insn ("scc %5", loperands);
1814 break;
1815
1816 case LE:
1817 loperands[6] = gen_label_rtx ();
1818 output_asm_insn ("sls %5\n\tjra %l6", loperands);
1819 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1820 CODE_LABEL_NUMBER (loperands[4]));
1821 output_asm_insn ("sle %5", loperands);
1822 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1823 CODE_LABEL_NUMBER (loperands[6]));
1824 break;
1825
1826 case LEU:
1827 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1828 CODE_LABEL_NUMBER (loperands[4]));
1829 output_asm_insn ("sls %5", loperands);
1830 break;
1831
1832 default:
1833 gcc_unreachable ();
1834 }
1835 return "";
1836 }
1837
1838 const char *
1839 output_btst (rtx *operands, rtx countop, rtx dataop, rtx insn, int signpos)
1840 {
1841 operands[0] = countop;
1842 operands[1] = dataop;
1843
1844 if (GET_CODE (countop) == CONST_INT)
1845 {
1846 register int count = INTVAL (countop);
1847 /* If COUNT is bigger than size of storage unit in use,
1848 advance to the containing unit of same size. */
1849 if (count > signpos)
1850 {
1851 int offset = (count & ~signpos) / 8;
1852 count = count & signpos;
1853 operands[1] = dataop = adjust_address (dataop, QImode, offset);
1854 }
1855 if (count == signpos)
1856 cc_status.flags = CC_NOT_POSITIVE | CC_Z_IN_NOT_N;
1857 else
1858 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N;
1859
1860 /* These three statements used to use next_insns_test_no...
1861 but it appears that this should do the same job. */
1862 if (count == 31
1863 && next_insn_tests_no_inequality (insn))
1864 return "tst%.l %1";
1865 if (count == 15
1866 && next_insn_tests_no_inequality (insn))
1867 return "tst%.w %1";
1868 if (count == 7
1869 && next_insn_tests_no_inequality (insn))
1870 return "tst%.b %1";
1871 /* Try to use `movew to ccr' followed by the appropriate branch insn.
1872 On some m68k variants unfortunately that's slower than btst.
1873 On 68000 and higher, that should also work for all HImode operands. */
1874 if (TUNE_CPU32 || TARGET_COLDFIRE || optimize_size)
1875 {
1876 if (count == 3 && DATA_REG_P (operands[1])
1877 && next_insn_tests_no_inequality (insn))
1878 {
1879 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N | CC_NO_OVERFLOW;
1880 return "move%.w %1,%%ccr";
1881 }
1882 if (count == 2 && DATA_REG_P (operands[1])
1883 && next_insn_tests_no_inequality (insn))
1884 {
1885 cc_status.flags = CC_NOT_NEGATIVE | CC_INVERTED | CC_NO_OVERFLOW;
1886 return "move%.w %1,%%ccr";
1887 }
1888 /* count == 1 followed by bvc/bvs and
1889 count == 0 followed by bcc/bcs are also possible, but need
1890 m68k-specific CC_Z_IN_NOT_V and CC_Z_IN_NOT_C flags. */
1891 }
1892
1893 cc_status.flags = CC_NOT_NEGATIVE;
1894 }
1895 return "btst %0,%1";
1896 }
1897 \f
1898 /* Return true if X is a legitimate base register. STRICT_P says
1899 whether we need strict checking. */
1900
1901 bool
1902 m68k_legitimate_base_reg_p (rtx x, bool strict_p)
1903 {
1904 /* Allow SUBREG everywhere we allow REG. This results in better code. */
1905 if (!strict_p && GET_CODE (x) == SUBREG)
1906 x = SUBREG_REG (x);
1907
1908 return (REG_P (x)
1909 && (strict_p
1910 ? REGNO_OK_FOR_BASE_P (REGNO (x))
1911 : REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x))));
1912 }
1913
1914 /* Return true if X is a legitimate index register. STRICT_P says
1915 whether we need strict checking. */
1916
1917 bool
1918 m68k_legitimate_index_reg_p (rtx x, bool strict_p)
1919 {
1920 if (!strict_p && GET_CODE (x) == SUBREG)
1921 x = SUBREG_REG (x);
1922
1923 return (REG_P (x)
1924 && (strict_p
1925 ? REGNO_OK_FOR_INDEX_P (REGNO (x))
1926 : REGNO_OK_FOR_INDEX_NONSTRICT_P (REGNO (x))));
1927 }
1928
1929 /* Return true if X is a legitimate index expression for a (d8,An,Xn) or
1930 (bd,An,Xn) addressing mode. Fill in the INDEX and SCALE fields of
1931 ADDRESS if so. STRICT_P says whether we need strict checking. */
1932
1933 static bool
1934 m68k_decompose_index (rtx x, bool strict_p, struct m68k_address *address)
1935 {
1936 int scale;
1937
1938 /* Check for a scale factor. */
1939 scale = 1;
1940 if ((TARGET_68020 || TARGET_COLDFIRE)
1941 && GET_CODE (x) == MULT
1942 && GET_CODE (XEXP (x, 1)) == CONST_INT
1943 && (INTVAL (XEXP (x, 1)) == 2
1944 || INTVAL (XEXP (x, 1)) == 4
1945 || (INTVAL (XEXP (x, 1)) == 8
1946 && (TARGET_COLDFIRE_FPU || !TARGET_COLDFIRE))))
1947 {
1948 scale = INTVAL (XEXP (x, 1));
1949 x = XEXP (x, 0);
1950 }
1951
1952 /* Check for a word extension. */
1953 if (!TARGET_COLDFIRE
1954 && GET_CODE (x) == SIGN_EXTEND
1955 && GET_MODE (XEXP (x, 0)) == HImode)
1956 x = XEXP (x, 0);
1957
1958 if (m68k_legitimate_index_reg_p (x, strict_p))
1959 {
1960 address->scale = scale;
1961 address->index = x;
1962 return true;
1963 }
1964
1965 return false;
1966 }
1967
1968 /* Return true if X is an illegitimate symbolic constant. */
1969
1970 bool
1971 m68k_illegitimate_symbolic_constant_p (rtx x)
1972 {
1973 rtx base, offset;
1974
1975 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P)
1976 {
1977 split_const (x, &base, &offset);
1978 if (GET_CODE (base) == SYMBOL_REF
1979 && !offset_within_block_p (base, INTVAL (offset)))
1980 return true;
1981 }
1982 return m68k_tls_reference_p (x, false);
1983 }
1984
1985 /* Return true if X is a legitimate constant address that can reach
1986 bytes in the range [X, X + REACH). STRICT_P says whether we need
1987 strict checking. */
1988
1989 static bool
1990 m68k_legitimate_constant_address_p (rtx x, unsigned int reach, bool strict_p)
1991 {
1992 rtx base, offset;
1993
1994 if (!CONSTANT_ADDRESS_P (x))
1995 return false;
1996
1997 if (flag_pic
1998 && !(strict_p && TARGET_PCREL)
1999 && symbolic_operand (x, VOIDmode))
2000 return false;
2001
2002 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P && reach > 1)
2003 {
2004 split_const (x, &base, &offset);
2005 if (GET_CODE (base) == SYMBOL_REF
2006 && !offset_within_block_p (base, INTVAL (offset) + reach - 1))
2007 return false;
2008 }
2009
2010 return !m68k_tls_reference_p (x, false);
2011 }
2012
2013 /* Return true if X is a LABEL_REF for a jump table. Assume that unplaced
2014 labels will become jump tables. */
2015
2016 static bool
2017 m68k_jump_table_ref_p (rtx x)
2018 {
2019 if (GET_CODE (x) != LABEL_REF)
2020 return false;
2021
2022 x = XEXP (x, 0);
2023 if (!NEXT_INSN (x) && !PREV_INSN (x))
2024 return true;
2025
2026 x = next_nonnote_insn (x);
2027 return x && JUMP_TABLE_DATA_P (x);
2028 }
2029
2030 /* Return true if X is a legitimate address for values of mode MODE.
2031 STRICT_P says whether strict checking is needed. If the address
2032 is valid, describe its components in *ADDRESS. */
2033
2034 static bool
2035 m68k_decompose_address (enum machine_mode mode, rtx x,
2036 bool strict_p, struct m68k_address *address)
2037 {
2038 unsigned int reach;
2039
2040 memset (address, 0, sizeof (*address));
2041
2042 if (mode == BLKmode)
2043 reach = 1;
2044 else
2045 reach = GET_MODE_SIZE (mode);
2046
2047 /* Check for (An) (mode 2). */
2048 if (m68k_legitimate_base_reg_p (x, strict_p))
2049 {
2050 address->base = x;
2051 return true;
2052 }
2053
2054 /* Check for -(An) and (An)+ (modes 3 and 4). */
2055 if ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_INC)
2056 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
2057 {
2058 address->code = GET_CODE (x);
2059 address->base = XEXP (x, 0);
2060 return true;
2061 }
2062
2063 /* Check for (d16,An) (mode 5). */
2064 if (GET_CODE (x) == PLUS
2065 && GET_CODE (XEXP (x, 1)) == CONST_INT
2066 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x8000, 0x8000 - reach)
2067 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
2068 {
2069 address->base = XEXP (x, 0);
2070 address->offset = XEXP (x, 1);
2071 return true;
2072 }
2073
2074 /* Check for GOT loads. These are (bd,An,Xn) addresses if
2075 TARGET_68020 && flag_pic == 2, otherwise they are (d16,An)
2076 addresses. */
2077 if (GET_CODE (x) == PLUS
2078 && XEXP (x, 0) == pic_offset_table_rtx)
2079 {
2080 /* As we are processing a PLUS, do not unwrap RELOC32 symbols --
2081 they are invalid in this context. */
2082 if (m68k_unwrap_symbol (XEXP (x, 1), false) != XEXP (x, 1))
2083 {
2084 address->base = XEXP (x, 0);
2085 address->offset = XEXP (x, 1);
2086 return true;
2087 }
2088 }
2089
2090 /* The ColdFire FPU only accepts addressing modes 2-5. */
2091 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
2092 return false;
2093
2094 /* Check for (xxx).w and (xxx).l. Also, in the TARGET_PCREL case,
2095 check for (d16,PC) or (bd,PC,Xn) with a suppressed index register.
2096 All these modes are variations of mode 7. */
2097 if (m68k_legitimate_constant_address_p (x, reach, strict_p))
2098 {
2099 address->offset = x;
2100 return true;
2101 }
2102
2103 /* Check for (d8,PC,Xn), a mode 7 form. This case is needed for
2104 tablejumps.
2105
2106 ??? do_tablejump creates these addresses before placing the target
2107 label, so we have to assume that unplaced labels are jump table
2108 references. It seems unlikely that we would ever generate indexed
2109 accesses to unplaced labels in other cases. */
2110 if (GET_CODE (x) == PLUS
2111 && m68k_jump_table_ref_p (XEXP (x, 1))
2112 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2113 {
2114 address->offset = XEXP (x, 1);
2115 return true;
2116 }
2117
2118 /* Everything hereafter deals with (d8,An,Xn.SIZE*SCALE) or
2119 (bd,An,Xn.SIZE*SCALE) addresses. */
2120
2121 if (TARGET_68020)
2122 {
2123 /* Check for a nonzero base displacement. */
2124 if (GET_CODE (x) == PLUS
2125 && m68k_legitimate_constant_address_p (XEXP (x, 1), reach, strict_p))
2126 {
2127 address->offset = XEXP (x, 1);
2128 x = XEXP (x, 0);
2129 }
2130
2131 /* Check for a suppressed index register. */
2132 if (m68k_legitimate_base_reg_p (x, strict_p))
2133 {
2134 address->base = x;
2135 return true;
2136 }
2137
2138 /* Check for a suppressed base register. Do not allow this case
2139 for non-symbolic offsets as it effectively gives gcc freedom
2140 to treat data registers as base registers, which can generate
2141 worse code. */
2142 if (address->offset
2143 && symbolic_operand (address->offset, VOIDmode)
2144 && m68k_decompose_index (x, strict_p, address))
2145 return true;
2146 }
2147 else
2148 {
2149 /* Check for a nonzero base displacement. */
2150 if (GET_CODE (x) == PLUS
2151 && GET_CODE (XEXP (x, 1)) == CONST_INT
2152 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x80, 0x80 - reach))
2153 {
2154 address->offset = XEXP (x, 1);
2155 x = XEXP (x, 0);
2156 }
2157 }
2158
2159 /* We now expect the sum of a base and an index. */
2160 if (GET_CODE (x) == PLUS)
2161 {
2162 if (m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p)
2163 && m68k_decompose_index (XEXP (x, 1), strict_p, address))
2164 {
2165 address->base = XEXP (x, 0);
2166 return true;
2167 }
2168
2169 if (m68k_legitimate_base_reg_p (XEXP (x, 1), strict_p)
2170 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2171 {
2172 address->base = XEXP (x, 1);
2173 return true;
2174 }
2175 }
2176 return false;
2177 }
2178
2179 /* Return true if X is a legitimate address for values of mode MODE.
2180 STRICT_P says whether strict checking is needed. */
2181
2182 bool
2183 m68k_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p)
2184 {
2185 struct m68k_address address;
2186
2187 return m68k_decompose_address (mode, x, strict_p, &address);
2188 }
2189
2190 /* Return true if X is a memory, describing its address in ADDRESS if so.
2191 Apply strict checking if called during or after reload. */
2192
2193 static bool
2194 m68k_legitimate_mem_p (rtx x, struct m68k_address *address)
2195 {
2196 return (MEM_P (x)
2197 && m68k_decompose_address (GET_MODE (x), XEXP (x, 0),
2198 reload_in_progress || reload_completed,
2199 address));
2200 }
2201
2202 /* Return true if X matches the 'Q' constraint. It must be a memory
2203 with a base address and no constant offset or index. */
2204
2205 bool
2206 m68k_matches_q_p (rtx x)
2207 {
2208 struct m68k_address address;
2209
2210 return (m68k_legitimate_mem_p (x, &address)
2211 && address.code == UNKNOWN
2212 && address.base
2213 && !address.offset
2214 && !address.index);
2215 }
2216
2217 /* Return true if X matches the 'U' constraint. It must be a base address
2218 with a constant offset and no index. */
2219
2220 bool
2221 m68k_matches_u_p (rtx x)
2222 {
2223 struct m68k_address address;
2224
2225 return (m68k_legitimate_mem_p (x, &address)
2226 && address.code == UNKNOWN
2227 && address.base
2228 && address.offset
2229 && !address.index);
2230 }
2231
2232 /* Return GOT pointer. */
2233
2234 static rtx
2235 m68k_get_gp (void)
2236 {
2237 if (pic_offset_table_rtx == NULL_RTX)
2238 pic_offset_table_rtx = gen_rtx_REG (Pmode, PIC_REG);
2239
2240 crtl->uses_pic_offset_table = 1;
2241
2242 return pic_offset_table_rtx;
2243 }
2244
2245 /* M68K relocations, used to distinguish GOT and TLS relocations in UNSPEC
2246 wrappers. */
2247 enum m68k_reloc { RELOC_GOT, RELOC_TLSGD, RELOC_TLSLDM, RELOC_TLSLDO,
2248 RELOC_TLSIE, RELOC_TLSLE };
2249
2250 #define TLS_RELOC_P(RELOC) ((RELOC) != RELOC_GOT)
2251
2252 /* Wrap symbol X into unspec representing relocation RELOC.
2253 BASE_REG - register that should be added to the result.
2254 TEMP_REG - if non-null, temporary register. */
2255
2256 static rtx
2257 m68k_wrap_symbol (rtx x, enum m68k_reloc reloc, rtx base_reg, rtx temp_reg)
2258 {
2259 bool use_x_p;
2260
2261 use_x_p = (base_reg == pic_offset_table_rtx) ? TARGET_XGOT : TARGET_XTLS;
2262
2263 if (TARGET_COLDFIRE && use_x_p)
2264 /* When compiling with -mx{got, tls} switch the code will look like this:
2265
2266 move.l <X>@<RELOC>,<TEMP_REG>
2267 add.l <BASE_REG>,<TEMP_REG> */
2268 {
2269 /* Wrap X in UNSPEC_??? to tip m68k_output_addr_const_extra
2270 to put @RELOC after reference. */
2271 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2272 UNSPEC_RELOC32);
2273 x = gen_rtx_CONST (Pmode, x);
2274
2275 if (temp_reg == NULL)
2276 {
2277 gcc_assert (can_create_pseudo_p ());
2278 temp_reg = gen_reg_rtx (Pmode);
2279 }
2280
2281 emit_move_insn (temp_reg, x);
2282 emit_insn (gen_addsi3 (temp_reg, temp_reg, base_reg));
2283 x = temp_reg;
2284 }
2285 else
2286 {
2287 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2288 UNSPEC_RELOC16);
2289 x = gen_rtx_CONST (Pmode, x);
2290
2291 x = gen_rtx_PLUS (Pmode, base_reg, x);
2292 }
2293
2294 return x;
2295 }
2296
2297 /* Helper for m68k_unwrap_symbol.
2298 Also, if unwrapping was successful (that is if (ORIG != <return value>)),
2299 sets *RELOC_PTR to relocation type for the symbol. */
2300
2301 static rtx
2302 m68k_unwrap_symbol_1 (rtx orig, bool unwrap_reloc32_p,
2303 enum m68k_reloc *reloc_ptr)
2304 {
2305 if (GET_CODE (orig) == CONST)
2306 {
2307 rtx x;
2308 enum m68k_reloc dummy;
2309
2310 x = XEXP (orig, 0);
2311
2312 if (reloc_ptr == NULL)
2313 reloc_ptr = &dummy;
2314
2315 /* Handle an addend. */
2316 if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS)
2317 && CONST_INT_P (XEXP (x, 1)))
2318 x = XEXP (x, 0);
2319
2320 if (GET_CODE (x) == UNSPEC)
2321 {
2322 switch (XINT (x, 1))
2323 {
2324 case UNSPEC_RELOC16:
2325 orig = XVECEXP (x, 0, 0);
2326 *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2327 break;
2328
2329 case UNSPEC_RELOC32:
2330 if (unwrap_reloc32_p)
2331 {
2332 orig = XVECEXP (x, 0, 0);
2333 *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2334 }
2335 break;
2336
2337 default:
2338 break;
2339 }
2340 }
2341 }
2342
2343 return orig;
2344 }
2345
2346 /* Unwrap symbol from UNSPEC_RELOC16 and, if unwrap_reloc32_p,
2347 UNSPEC_RELOC32 wrappers. */
2348
2349 rtx
2350 m68k_unwrap_symbol (rtx orig, bool unwrap_reloc32_p)
2351 {
2352 return m68k_unwrap_symbol_1 (orig, unwrap_reloc32_p, NULL);
2353 }
2354
2355 /* Helper for m68k_final_prescan_insn. */
2356
2357 static int
2358 m68k_final_prescan_insn_1 (rtx *x_ptr, void *data ATTRIBUTE_UNUSED)
2359 {
2360 rtx x = *x_ptr;
2361
2362 if (m68k_unwrap_symbol (x, true) != x)
2363 /* For rationale of the below, see comment in m68k_final_prescan_insn. */
2364 {
2365 rtx plus;
2366
2367 gcc_assert (GET_CODE (x) == CONST);
2368 plus = XEXP (x, 0);
2369
2370 if (GET_CODE (plus) == PLUS || GET_CODE (plus) == MINUS)
2371 {
2372 rtx unspec;
2373 rtx addend;
2374
2375 unspec = XEXP (plus, 0);
2376 gcc_assert (GET_CODE (unspec) == UNSPEC);
2377 addend = XEXP (plus, 1);
2378 gcc_assert (CONST_INT_P (addend));
2379
2380 /* We now have all the pieces, rearrange them. */
2381
2382 /* Move symbol to plus. */
2383 XEXP (plus, 0) = XVECEXP (unspec, 0, 0);
2384
2385 /* Move plus inside unspec. */
2386 XVECEXP (unspec, 0, 0) = plus;
2387
2388 /* Move unspec to top level of const. */
2389 XEXP (x, 0) = unspec;
2390 }
2391
2392 return -1;
2393 }
2394
2395 return 0;
2396 }
2397
2398 /* Prescan insn before outputing assembler for it. */
2399
2400 void
2401 m68k_final_prescan_insn (rtx insn ATTRIBUTE_UNUSED,
2402 rtx *operands, int n_operands)
2403 {
2404 int i;
2405
2406 /* Combine and, possibly, other optimizations may do good job
2407 converting
2408 (const (unspec [(symbol)]))
2409 into
2410 (const (plus (unspec [(symbol)])
2411 (const_int N))).
2412 The problem with this is emitting @TLS or @GOT decorations.
2413 The decoration is emitted when processing (unspec), so the
2414 result would be "#symbol@TLSLE+N" instead of "#symbol+N@TLSLE".
2415
2416 It seems that the easiest solution to this is to convert such
2417 operands to
2418 (const (unspec [(plus (symbol)
2419 (const_int N))])).
2420 Note, that the top level of operand remains intact, so we don't have
2421 to patch up anything outside of the operand. */
2422
2423 for (i = 0; i < n_operands; ++i)
2424 {
2425 rtx op;
2426
2427 op = operands[i];
2428
2429 for_each_rtx (&op, m68k_final_prescan_insn_1, NULL);
2430 }
2431 }
2432
2433 /* Move X to a register and add REG_EQUAL note pointing to ORIG.
2434 If REG is non-null, use it; generate new pseudo otherwise. */
2435
2436 static rtx
2437 m68k_move_to_reg (rtx x, rtx orig, rtx reg)
2438 {
2439 rtx insn;
2440
2441 if (reg == NULL_RTX)
2442 {
2443 gcc_assert (can_create_pseudo_p ());
2444 reg = gen_reg_rtx (Pmode);
2445 }
2446
2447 insn = emit_move_insn (reg, x);
2448 /* Put a REG_EQUAL note on this insn, so that it can be optimized
2449 by loop. */
2450 set_unique_reg_note (insn, REG_EQUAL, orig);
2451
2452 return reg;
2453 }
2454
2455 /* Does the same as m68k_wrap_symbol, but returns a memory reference to
2456 GOT slot. */
2457
2458 static rtx
2459 m68k_wrap_symbol_into_got_ref (rtx x, enum m68k_reloc reloc, rtx temp_reg)
2460 {
2461 x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), temp_reg);
2462
2463 x = gen_rtx_MEM (Pmode, x);
2464 MEM_READONLY_P (x) = 1;
2465
2466 return x;
2467 }
2468
2469 /* Legitimize PIC addresses. If the address is already
2470 position-independent, we return ORIG. Newly generated
2471 position-independent addresses go to REG. If we need more
2472 than one register, we lose.
2473
2474 An address is legitimized by making an indirect reference
2475 through the Global Offset Table with the name of the symbol
2476 used as an offset.
2477
2478 The assembler and linker are responsible for placing the
2479 address of the symbol in the GOT. The function prologue
2480 is responsible for initializing a5 to the starting address
2481 of the GOT.
2482
2483 The assembler is also responsible for translating a symbol name
2484 into a constant displacement from the start of the GOT.
2485
2486 A quick example may make things a little clearer:
2487
2488 When not generating PIC code to store the value 12345 into _foo
2489 we would generate the following code:
2490
2491 movel #12345, _foo
2492
2493 When generating PIC two transformations are made. First, the compiler
2494 loads the address of foo into a register. So the first transformation makes:
2495
2496 lea _foo, a0
2497 movel #12345, a0@
2498
2499 The code in movsi will intercept the lea instruction and call this
2500 routine which will transform the instructions into:
2501
2502 movel a5@(_foo:w), a0
2503 movel #12345, a0@
2504
2505
2506 That (in a nutshell) is how *all* symbol and label references are
2507 handled. */
2508
2509 rtx
2510 legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
2511 rtx reg)
2512 {
2513 rtx pic_ref = orig;
2514
2515 /* First handle a simple SYMBOL_REF or LABEL_REF */
2516 if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
2517 {
2518 gcc_assert (reg);
2519
2520 pic_ref = m68k_wrap_symbol_into_got_ref (orig, RELOC_GOT, reg);
2521 pic_ref = m68k_move_to_reg (pic_ref, orig, reg);
2522 }
2523 else if (GET_CODE (orig) == CONST)
2524 {
2525 rtx base;
2526
2527 /* Make sure this has not already been legitimized. */
2528 if (m68k_unwrap_symbol (orig, true) != orig)
2529 return orig;
2530
2531 gcc_assert (reg);
2532
2533 /* legitimize both operands of the PLUS */
2534 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
2535
2536 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2537 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2538 base == reg ? 0 : reg);
2539
2540 if (GET_CODE (orig) == CONST_INT)
2541 pic_ref = plus_constant (base, INTVAL (orig));
2542 else
2543 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
2544 }
2545
2546 return pic_ref;
2547 }
2548
2549 /* The __tls_get_addr symbol. */
2550 static GTY(()) rtx m68k_tls_get_addr;
2551
2552 /* Return SYMBOL_REF for __tls_get_addr. */
2553
2554 static rtx
2555 m68k_get_tls_get_addr (void)
2556 {
2557 if (m68k_tls_get_addr == NULL_RTX)
2558 m68k_tls_get_addr = init_one_libfunc ("__tls_get_addr");
2559
2560 return m68k_tls_get_addr;
2561 }
2562
2563 /* Return libcall result in A0 instead of usual D0. */
2564 static bool m68k_libcall_value_in_a0_p = false;
2565
2566 /* Emit instruction sequence that calls __tls_get_addr. X is
2567 the TLS symbol we are referencing and RELOC is the symbol type to use
2568 (either TLSGD or TLSLDM). EQV is the REG_EQUAL note for the sequence
2569 emitted. A pseudo register with result of __tls_get_addr call is
2570 returned. */
2571
2572 static rtx
2573 m68k_call_tls_get_addr (rtx x, rtx eqv, enum m68k_reloc reloc)
2574 {
2575 rtx a0;
2576 rtx insns;
2577 rtx dest;
2578
2579 /* Emit the call sequence. */
2580 start_sequence ();
2581
2582 /* FIXME: Unfortunately, emit_library_call_value does not
2583 consider (plus (%a5) (const (unspec))) to be a good enough
2584 operand for push, so it forces it into a register. The bad
2585 thing about this is that combiner, due to copy propagation and other
2586 optimizations, sometimes can not later fix this. As a consequence,
2587 additional register may be allocated resulting in a spill.
2588 For reference, see args processing loops in
2589 calls.c:emit_library_call_value_1.
2590 For testcase, see gcc.target/m68k/tls-{gd, ld}.c */
2591 x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), NULL_RTX);
2592
2593 /* __tls_get_addr() is not a libcall, but emitting a libcall_value
2594 is the simpliest way of generating a call. The difference between
2595 __tls_get_addr() and libcall is that the result is returned in D0
2596 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2597 which temporarily switches returning the result to A0. */
2598
2599 m68k_libcall_value_in_a0_p = true;
2600 a0 = emit_library_call_value (m68k_get_tls_get_addr (), NULL_RTX, LCT_PURE,
2601 Pmode, 1, x, Pmode);
2602 m68k_libcall_value_in_a0_p = false;
2603
2604 insns = get_insns ();
2605 end_sequence ();
2606
2607 gcc_assert (can_create_pseudo_p ());
2608 dest = gen_reg_rtx (Pmode);
2609 emit_libcall_block (insns, dest, a0, eqv);
2610
2611 return dest;
2612 }
2613
2614 /* The __tls_get_addr symbol. */
2615 static GTY(()) rtx m68k_read_tp;
2616
2617 /* Return SYMBOL_REF for __m68k_read_tp. */
2618
2619 static rtx
2620 m68k_get_m68k_read_tp (void)
2621 {
2622 if (m68k_read_tp == NULL_RTX)
2623 m68k_read_tp = init_one_libfunc ("__m68k_read_tp");
2624
2625 return m68k_read_tp;
2626 }
2627
2628 /* Emit instruction sequence that calls __m68k_read_tp.
2629 A pseudo register with result of __m68k_read_tp call is returned. */
2630
2631 static rtx
2632 m68k_call_m68k_read_tp (void)
2633 {
2634 rtx a0;
2635 rtx eqv;
2636 rtx insns;
2637 rtx dest;
2638
2639 start_sequence ();
2640
2641 /* __m68k_read_tp() is not a libcall, but emitting a libcall_value
2642 is the simpliest way of generating a call. The difference between
2643 __m68k_read_tp() and libcall is that the result is returned in D0
2644 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2645 which temporarily switches returning the result to A0. */
2646
2647 /* Emit the call sequence. */
2648 m68k_libcall_value_in_a0_p = true;
2649 a0 = emit_library_call_value (m68k_get_m68k_read_tp (), NULL_RTX, LCT_PURE,
2650 Pmode, 0);
2651 m68k_libcall_value_in_a0_p = false;
2652 insns = get_insns ();
2653 end_sequence ();
2654
2655 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2656 share the m68k_read_tp result with other IE/LE model accesses. */
2657 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx), UNSPEC_RELOC32);
2658
2659 gcc_assert (can_create_pseudo_p ());
2660 dest = gen_reg_rtx (Pmode);
2661 emit_libcall_block (insns, dest, a0, eqv);
2662
2663 return dest;
2664 }
2665
2666 /* Return a legitimized address for accessing TLS SYMBOL_REF X.
2667 For explanations on instructions sequences see TLS/NPTL ABI for m68k and
2668 ColdFire. */
2669
2670 rtx
2671 m68k_legitimize_tls_address (rtx orig)
2672 {
2673 switch (SYMBOL_REF_TLS_MODEL (orig))
2674 {
2675 case TLS_MODEL_GLOBAL_DYNAMIC:
2676 orig = m68k_call_tls_get_addr (orig, orig, RELOC_TLSGD);
2677 break;
2678
2679 case TLS_MODEL_LOCAL_DYNAMIC:
2680 {
2681 rtx eqv;
2682 rtx a0;
2683 rtx x;
2684
2685 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2686 share the LDM result with other LD model accesses. */
2687 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2688 UNSPEC_RELOC32);
2689
2690 a0 = m68k_call_tls_get_addr (orig, eqv, RELOC_TLSLDM);
2691
2692 x = m68k_wrap_symbol (orig, RELOC_TLSLDO, a0, NULL_RTX);
2693
2694 if (can_create_pseudo_p ())
2695 x = m68k_move_to_reg (x, orig, NULL_RTX);
2696
2697 orig = x;
2698 break;
2699 }
2700
2701 case TLS_MODEL_INITIAL_EXEC:
2702 {
2703 rtx a0;
2704 rtx x;
2705
2706 a0 = m68k_call_m68k_read_tp ();
2707
2708 x = m68k_wrap_symbol_into_got_ref (orig, RELOC_TLSIE, NULL_RTX);
2709 x = gen_rtx_PLUS (Pmode, x, a0);
2710
2711 if (can_create_pseudo_p ())
2712 x = m68k_move_to_reg (x, orig, NULL_RTX);
2713
2714 orig = x;
2715 break;
2716 }
2717
2718 case TLS_MODEL_LOCAL_EXEC:
2719 {
2720 rtx a0;
2721 rtx x;
2722
2723 a0 = m68k_call_m68k_read_tp ();
2724
2725 x = m68k_wrap_symbol (orig, RELOC_TLSLE, a0, NULL_RTX);
2726
2727 if (can_create_pseudo_p ())
2728 x = m68k_move_to_reg (x, orig, NULL_RTX);
2729
2730 orig = x;
2731 break;
2732 }
2733
2734 default:
2735 gcc_unreachable ();
2736 }
2737
2738 return orig;
2739 }
2740
2741 /* Return true if X is a TLS symbol. */
2742
2743 static bool
2744 m68k_tls_symbol_p (rtx x)
2745 {
2746 if (!TARGET_HAVE_TLS)
2747 return false;
2748
2749 if (GET_CODE (x) != SYMBOL_REF)
2750 return false;
2751
2752 return SYMBOL_REF_TLS_MODEL (x) != 0;
2753 }
2754
2755 /* Helper for m68k_tls_referenced_p. */
2756
2757 static int
2758 m68k_tls_reference_p_1 (rtx *x_ptr, void *data ATTRIBUTE_UNUSED)
2759 {
2760 /* Note: this is not the same as m68k_tls_symbol_p. */
2761 if (GET_CODE (*x_ptr) == SYMBOL_REF)
2762 return SYMBOL_REF_TLS_MODEL (*x_ptr) != 0 ? 1 : 0;
2763
2764 /* Don't recurse into legitimate TLS references. */
2765 if (m68k_tls_reference_p (*x_ptr, true))
2766 return -1;
2767
2768 return 0;
2769 }
2770
2771 /* If !LEGITIMATE_P, return true if X is a TLS symbol reference,
2772 though illegitimate one.
2773 If LEGITIMATE_P, return true if X is a legitimate TLS symbol reference. */
2774
2775 bool
2776 m68k_tls_reference_p (rtx x, bool legitimate_p)
2777 {
2778 if (!TARGET_HAVE_TLS)
2779 return false;
2780
2781 if (!legitimate_p)
2782 return for_each_rtx (&x, m68k_tls_reference_p_1, NULL) == 1 ? true : false;
2783 else
2784 {
2785 enum m68k_reloc reloc = RELOC_GOT;
2786
2787 return (m68k_unwrap_symbol_1 (x, true, &reloc) != x
2788 && TLS_RELOC_P (reloc));
2789 }
2790 }
2791
2792 \f
2793
2794 #define USE_MOVQ(i) ((unsigned) ((i) + 128) <= 255)
2795
2796 /* Return the type of move that should be used for integer I. */
2797
2798 M68K_CONST_METHOD
2799 m68k_const_method (HOST_WIDE_INT i)
2800 {
2801 unsigned u;
2802
2803 if (USE_MOVQ (i))
2804 return MOVQ;
2805
2806 /* The ColdFire doesn't have byte or word operations. */
2807 /* FIXME: This may not be useful for the m68060 either. */
2808 if (!TARGET_COLDFIRE)
2809 {
2810 /* if -256 < N < 256 but N is not in range for a moveq
2811 N^ff will be, so use moveq #N^ff, dreg; not.b dreg. */
2812 if (USE_MOVQ (i ^ 0xff))
2813 return NOTB;
2814 /* Likewise, try with not.w */
2815 if (USE_MOVQ (i ^ 0xffff))
2816 return NOTW;
2817 /* This is the only value where neg.w is useful */
2818 if (i == -65408)
2819 return NEGW;
2820 }
2821
2822 /* Try also with swap. */
2823 u = i;
2824 if (USE_MOVQ ((u >> 16) | (u << 16)))
2825 return SWAP;
2826
2827 if (TARGET_ISAB)
2828 {
2829 /* Try using MVZ/MVS with an immediate value to load constants. */
2830 if (i >= 0 && i <= 65535)
2831 return MVZ;
2832 if (i >= -32768 && i <= 32767)
2833 return MVS;
2834 }
2835
2836 /* Otherwise, use move.l */
2837 return MOVL;
2838 }
2839
2840 /* Return the cost of moving constant I into a data register. */
2841
2842 static int
2843 const_int_cost (HOST_WIDE_INT i)
2844 {
2845 switch (m68k_const_method (i))
2846 {
2847 case MOVQ:
2848 /* Constants between -128 and 127 are cheap due to moveq. */
2849 return 0;
2850 case MVZ:
2851 case MVS:
2852 case NOTB:
2853 case NOTW:
2854 case NEGW:
2855 case SWAP:
2856 /* Constants easily generated by moveq + not.b/not.w/neg.w/swap. */
2857 return 1;
2858 case MOVL:
2859 return 2;
2860 default:
2861 gcc_unreachable ();
2862 }
2863 }
2864
2865 static bool
2866 m68k_rtx_costs (rtx x, int code, int outer_code, int *total,
2867 bool speed ATTRIBUTE_UNUSED)
2868 {
2869 switch (code)
2870 {
2871 case CONST_INT:
2872 /* Constant zero is super cheap due to clr instruction. */
2873 if (x == const0_rtx)
2874 *total = 0;
2875 else
2876 *total = const_int_cost (INTVAL (x));
2877 return true;
2878
2879 case CONST:
2880 case LABEL_REF:
2881 case SYMBOL_REF:
2882 *total = 3;
2883 return true;
2884
2885 case CONST_DOUBLE:
2886 /* Make 0.0 cheaper than other floating constants to
2887 encourage creating tstsf and tstdf insns. */
2888 if (outer_code == COMPARE
2889 && (x == CONST0_RTX (SFmode) || x == CONST0_RTX (DFmode)))
2890 *total = 4;
2891 else
2892 *total = 5;
2893 return true;
2894
2895 /* These are vaguely right for a 68020. */
2896 /* The costs for long multiply have been adjusted to work properly
2897 in synth_mult on the 68020, relative to an average of the time
2898 for add and the time for shift, taking away a little more because
2899 sometimes move insns are needed. */
2900 /* div?.w is relatively cheaper on 68000 counted in COSTS_N_INSNS
2901 terms. */
2902 #define MULL_COST \
2903 (TUNE_68060 ? 2 \
2904 : TUNE_68040 ? 5 \
2905 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2906 : (TUNE_CFV2 && TUNE_MAC) ? 4 \
2907 : TUNE_CFV2 ? 8 \
2908 : TARGET_COLDFIRE ? 3 : 13)
2909
2910 #define MULW_COST \
2911 (TUNE_68060 ? 2 \
2912 : TUNE_68040 ? 3 \
2913 : TUNE_68000_10 ? 5 \
2914 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2915 : (TUNE_CFV2 && TUNE_MAC) ? 2 \
2916 : TUNE_CFV2 ? 8 \
2917 : TARGET_COLDFIRE ? 2 : 8)
2918
2919 #define DIVW_COST \
2920 (TARGET_CF_HWDIV ? 11 \
2921 : TUNE_68000_10 || TARGET_COLDFIRE ? 12 : 27)
2922
2923 case PLUS:
2924 /* An lea costs about three times as much as a simple add. */
2925 if (GET_MODE (x) == SImode
2926 && GET_CODE (XEXP (x, 1)) == REG
2927 && GET_CODE (XEXP (x, 0)) == MULT
2928 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
2929 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2930 && (INTVAL (XEXP (XEXP (x, 0), 1)) == 2
2931 || INTVAL (XEXP (XEXP (x, 0), 1)) == 4
2932 || INTVAL (XEXP (XEXP (x, 0), 1)) == 8))
2933 {
2934 /* lea an@(dx:l:i),am */
2935 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 2 : 3);
2936 return true;
2937 }
2938 return false;
2939
2940 case ASHIFT:
2941 case ASHIFTRT:
2942 case LSHIFTRT:
2943 if (TUNE_68060)
2944 {
2945 *total = COSTS_N_INSNS(1);
2946 return true;
2947 }
2948 if (TUNE_68000_10)
2949 {
2950 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
2951 {
2952 if (INTVAL (XEXP (x, 1)) < 16)
2953 *total = COSTS_N_INSNS (2) + INTVAL (XEXP (x, 1)) / 2;
2954 else
2955 /* We're using clrw + swap for these cases. */
2956 *total = COSTS_N_INSNS (4) + (INTVAL (XEXP (x, 1)) - 16) / 2;
2957 }
2958 else
2959 *total = COSTS_N_INSNS (10); /* Worst case. */
2960 return true;
2961 }
2962 /* A shift by a big integer takes an extra instruction. */
2963 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2964 && (INTVAL (XEXP (x, 1)) == 16))
2965 {
2966 *total = COSTS_N_INSNS (2); /* clrw;swap */
2967 return true;
2968 }
2969 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2970 && !(INTVAL (XEXP (x, 1)) > 0
2971 && INTVAL (XEXP (x, 1)) <= 8))
2972 {
2973 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 1 : 3); /* lsr #i,dn */
2974 return true;
2975 }
2976 return false;
2977
2978 case MULT:
2979 if ((GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
2980 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
2981 && GET_MODE (x) == SImode)
2982 *total = COSTS_N_INSNS (MULW_COST);
2983 else if (GET_MODE (x) == QImode || GET_MODE (x) == HImode)
2984 *total = COSTS_N_INSNS (MULW_COST);
2985 else
2986 *total = COSTS_N_INSNS (MULL_COST);
2987 return true;
2988
2989 case DIV:
2990 case UDIV:
2991 case MOD:
2992 case UMOD:
2993 if (GET_MODE (x) == QImode || GET_MODE (x) == HImode)
2994 *total = COSTS_N_INSNS (DIVW_COST); /* div.w */
2995 else if (TARGET_CF_HWDIV)
2996 *total = COSTS_N_INSNS (18);
2997 else
2998 *total = COSTS_N_INSNS (43); /* div.l */
2999 return true;
3000
3001 case ZERO_EXTRACT:
3002 if (outer_code == COMPARE)
3003 *total = 0;
3004 return false;
3005
3006 default:
3007 return false;
3008 }
3009 }
3010
3011 /* Return an instruction to move CONST_INT OPERANDS[1] into data register
3012 OPERANDS[0]. */
3013
3014 static const char *
3015 output_move_const_into_data_reg (rtx *operands)
3016 {
3017 HOST_WIDE_INT i;
3018
3019 i = INTVAL (operands[1]);
3020 switch (m68k_const_method (i))
3021 {
3022 case MVZ:
3023 return "mvzw %1,%0";
3024 case MVS:
3025 return "mvsw %1,%0";
3026 case MOVQ:
3027 return "moveq %1,%0";
3028 case NOTB:
3029 CC_STATUS_INIT;
3030 operands[1] = GEN_INT (i ^ 0xff);
3031 return "moveq %1,%0\n\tnot%.b %0";
3032 case NOTW:
3033 CC_STATUS_INIT;
3034 operands[1] = GEN_INT (i ^ 0xffff);
3035 return "moveq %1,%0\n\tnot%.w %0";
3036 case NEGW:
3037 CC_STATUS_INIT;
3038 return "moveq #-128,%0\n\tneg%.w %0";
3039 case SWAP:
3040 {
3041 unsigned u = i;
3042
3043 operands[1] = GEN_INT ((u << 16) | (u >> 16));
3044 return "moveq %1,%0\n\tswap %0";
3045 }
3046 case MOVL:
3047 return "move%.l %1,%0";
3048 default:
3049 gcc_unreachable ();
3050 }
3051 }
3052
3053 /* Return true if I can be handled by ISA B's mov3q instruction. */
3054
3055 bool
3056 valid_mov3q_const (HOST_WIDE_INT i)
3057 {
3058 return TARGET_ISAB && (i == -1 || IN_RANGE (i, 1, 7));
3059 }
3060
3061 /* Return an instruction to move CONST_INT OPERANDS[1] into OPERANDS[0].
3062 I is the value of OPERANDS[1]. */
3063
3064 static const char *
3065 output_move_simode_const (rtx *operands)
3066 {
3067 rtx dest;
3068 HOST_WIDE_INT src;
3069
3070 dest = operands[0];
3071 src = INTVAL (operands[1]);
3072 if (src == 0
3073 && (DATA_REG_P (dest) || MEM_P (dest))
3074 /* clr insns on 68000 read before writing. */
3075 && ((TARGET_68010 || TARGET_COLDFIRE)
3076 || !(MEM_P (dest) && MEM_VOLATILE_P (dest))))
3077 return "clr%.l %0";
3078 else if (GET_MODE (dest) == SImode && valid_mov3q_const (src))
3079 return "mov3q%.l %1,%0";
3080 else if (src == 0 && ADDRESS_REG_P (dest))
3081 return "sub%.l %0,%0";
3082 else if (DATA_REG_P (dest))
3083 return output_move_const_into_data_reg (operands);
3084 else if (ADDRESS_REG_P (dest) && IN_RANGE (src, -0x8000, 0x7fff))
3085 {
3086 if (valid_mov3q_const (src))
3087 return "mov3q%.l %1,%0";
3088 return "move%.w %1,%0";
3089 }
3090 else if (MEM_P (dest)
3091 && GET_CODE (XEXP (dest, 0)) == PRE_DEC
3092 && REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
3093 && IN_RANGE (src, -0x8000, 0x7fff))
3094 {
3095 if (valid_mov3q_const (src))
3096 return "mov3q%.l %1,%-";
3097 return "pea %a1";
3098 }
3099 return "move%.l %1,%0";
3100 }
3101
3102 const char *
3103 output_move_simode (rtx *operands)
3104 {
3105 if (GET_CODE (operands[1]) == CONST_INT)
3106 return output_move_simode_const (operands);
3107 else if ((GET_CODE (operands[1]) == SYMBOL_REF
3108 || GET_CODE (operands[1]) == CONST)
3109 && push_operand (operands[0], SImode))
3110 return "pea %a1";
3111 else if ((GET_CODE (operands[1]) == SYMBOL_REF
3112 || GET_CODE (operands[1]) == CONST)
3113 && ADDRESS_REG_P (operands[0]))
3114 return "lea %a1,%0";
3115 return "move%.l %1,%0";
3116 }
3117
3118 const char *
3119 output_move_himode (rtx *operands)
3120 {
3121 if (GET_CODE (operands[1]) == CONST_INT)
3122 {
3123 if (operands[1] == const0_rtx
3124 && (DATA_REG_P (operands[0])
3125 || GET_CODE (operands[0]) == MEM)
3126 /* clr insns on 68000 read before writing. */
3127 && ((TARGET_68010 || TARGET_COLDFIRE)
3128 || !(GET_CODE (operands[0]) == MEM
3129 && MEM_VOLATILE_P (operands[0]))))
3130 return "clr%.w %0";
3131 else if (operands[1] == const0_rtx
3132 && ADDRESS_REG_P (operands[0]))
3133 return "sub%.l %0,%0";
3134 else if (DATA_REG_P (operands[0])
3135 && INTVAL (operands[1]) < 128
3136 && INTVAL (operands[1]) >= -128)
3137 return "moveq %1,%0";
3138 else if (INTVAL (operands[1]) < 0x8000
3139 && INTVAL (operands[1]) >= -0x8000)
3140 return "move%.w %1,%0";
3141 }
3142 else if (CONSTANT_P (operands[1]))
3143 return "move%.l %1,%0";
3144 return "move%.w %1,%0";
3145 }
3146
3147 const char *
3148 output_move_qimode (rtx *operands)
3149 {
3150 /* 68k family always modifies the stack pointer by at least 2, even for
3151 byte pushes. The 5200 (ColdFire) does not do this. */
3152
3153 /* This case is generated by pushqi1 pattern now. */
3154 gcc_assert (!(GET_CODE (operands[0]) == MEM
3155 && GET_CODE (XEXP (operands[0], 0)) == PRE_DEC
3156 && XEXP (XEXP (operands[0], 0), 0) == stack_pointer_rtx
3157 && ! ADDRESS_REG_P (operands[1])
3158 && ! TARGET_COLDFIRE));
3159
3160 /* clr and st insns on 68000 read before writing. */
3161 if (!ADDRESS_REG_P (operands[0])
3162 && ((TARGET_68010 || TARGET_COLDFIRE)
3163 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3164 {
3165 if (operands[1] == const0_rtx)
3166 return "clr%.b %0";
3167 if ((!TARGET_COLDFIRE || DATA_REG_P (operands[0]))
3168 && GET_CODE (operands[1]) == CONST_INT
3169 && (INTVAL (operands[1]) & 255) == 255)
3170 {
3171 CC_STATUS_INIT;
3172 return "st %0";
3173 }
3174 }
3175 if (GET_CODE (operands[1]) == CONST_INT
3176 && DATA_REG_P (operands[0])
3177 && INTVAL (operands[1]) < 128
3178 && INTVAL (operands[1]) >= -128)
3179 return "moveq %1,%0";
3180 if (operands[1] == const0_rtx && ADDRESS_REG_P (operands[0]))
3181 return "sub%.l %0,%0";
3182 if (GET_CODE (operands[1]) != CONST_INT && CONSTANT_P (operands[1]))
3183 return "move%.l %1,%0";
3184 /* 68k family (including the 5200 ColdFire) does not support byte moves to
3185 from address registers. */
3186 if (ADDRESS_REG_P (operands[0]) || ADDRESS_REG_P (operands[1]))
3187 return "move%.w %1,%0";
3188 return "move%.b %1,%0";
3189 }
3190
3191 const char *
3192 output_move_stricthi (rtx *operands)
3193 {
3194 if (operands[1] == const0_rtx
3195 /* clr insns on 68000 read before writing. */
3196 && ((TARGET_68010 || TARGET_COLDFIRE)
3197 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3198 return "clr%.w %0";
3199 return "move%.w %1,%0";
3200 }
3201
3202 const char *
3203 output_move_strictqi (rtx *operands)
3204 {
3205 if (operands[1] == const0_rtx
3206 /* clr insns on 68000 read before writing. */
3207 && ((TARGET_68010 || TARGET_COLDFIRE)
3208 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3209 return "clr%.b %0";
3210 return "move%.b %1,%0";
3211 }
3212
3213 /* Return the best assembler insn template
3214 for moving operands[1] into operands[0] as a fullword. */
3215
3216 static const char *
3217 singlemove_string (rtx *operands)
3218 {
3219 if (GET_CODE (operands[1]) == CONST_INT)
3220 return output_move_simode_const (operands);
3221 return "move%.l %1,%0";
3222 }
3223
3224
3225 /* Output assembler or rtl code to perform a doubleword move insn
3226 with operands OPERANDS.
3227 Pointers to 3 helper functions should be specified:
3228 HANDLE_REG_ADJUST to adjust a register by a small value,
3229 HANDLE_COMPADR to compute an address and
3230 HANDLE_MOVSI to move 4 bytes. */
3231
3232 static void
3233 handle_move_double (rtx operands[2],
3234 void (*handle_reg_adjust) (rtx, int),
3235 void (*handle_compadr) (rtx [2]),
3236 void (*handle_movsi) (rtx [2]))
3237 {
3238 enum
3239 {
3240 REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP
3241 } optype0, optype1;
3242 rtx latehalf[2];
3243 rtx middlehalf[2];
3244 rtx xops[2];
3245 rtx addreg0 = 0, addreg1 = 0;
3246 int dest_overlapped_low = 0;
3247 int size = GET_MODE_SIZE (GET_MODE (operands[0]));
3248
3249 middlehalf[0] = 0;
3250 middlehalf[1] = 0;
3251
3252 /* First classify both operands. */
3253
3254 if (REG_P (operands[0]))
3255 optype0 = REGOP;
3256 else if (offsettable_memref_p (operands[0]))
3257 optype0 = OFFSOP;
3258 else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
3259 optype0 = POPOP;
3260 else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
3261 optype0 = PUSHOP;
3262 else if (GET_CODE (operands[0]) == MEM)
3263 optype0 = MEMOP;
3264 else
3265 optype0 = RNDOP;
3266
3267 if (REG_P (operands[1]))
3268 optype1 = REGOP;
3269 else if (CONSTANT_P (operands[1]))
3270 optype1 = CNSTOP;
3271 else if (offsettable_memref_p (operands[1]))
3272 optype1 = OFFSOP;
3273 else if (GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3274 optype1 = POPOP;
3275 else if (GET_CODE (XEXP (operands[1], 0)) == PRE_DEC)
3276 optype1 = PUSHOP;
3277 else if (GET_CODE (operands[1]) == MEM)
3278 optype1 = MEMOP;
3279 else
3280 optype1 = RNDOP;
3281
3282 /* Check for the cases that the operand constraints are not supposed
3283 to allow to happen. Generating code for these cases is
3284 painful. */
3285 gcc_assert (optype0 != RNDOP && optype1 != RNDOP);
3286
3287 /* If one operand is decrementing and one is incrementing
3288 decrement the former register explicitly
3289 and change that operand into ordinary indexing. */
3290
3291 if (optype0 == PUSHOP && optype1 == POPOP)
3292 {
3293 operands[0] = XEXP (XEXP (operands[0], 0), 0);
3294
3295 handle_reg_adjust (operands[0], -size);
3296
3297 if (GET_MODE (operands[1]) == XFmode)
3298 operands[0] = gen_rtx_MEM (XFmode, operands[0]);
3299 else if (GET_MODE (operands[0]) == DFmode)
3300 operands[0] = gen_rtx_MEM (DFmode, operands[0]);
3301 else
3302 operands[0] = gen_rtx_MEM (DImode, operands[0]);
3303 optype0 = OFFSOP;
3304 }
3305 if (optype0 == POPOP && optype1 == PUSHOP)
3306 {
3307 operands[1] = XEXP (XEXP (operands[1], 0), 0);
3308
3309 handle_reg_adjust (operands[1], -size);
3310
3311 if (GET_MODE (operands[1]) == XFmode)
3312 operands[1] = gen_rtx_MEM (XFmode, operands[1]);
3313 else if (GET_MODE (operands[1]) == DFmode)
3314 operands[1] = gen_rtx_MEM (DFmode, operands[1]);
3315 else
3316 operands[1] = gen_rtx_MEM (DImode, operands[1]);
3317 optype1 = OFFSOP;
3318 }
3319
3320 /* If an operand is an unoffsettable memory ref, find a register
3321 we can increment temporarily to make it refer to the second word. */
3322
3323 if (optype0 == MEMOP)
3324 addreg0 = find_addr_reg (XEXP (operands[0], 0));
3325
3326 if (optype1 == MEMOP)
3327 addreg1 = find_addr_reg (XEXP (operands[1], 0));
3328
3329 /* Ok, we can do one word at a time.
3330 Normally we do the low-numbered word first,
3331 but if either operand is autodecrementing then we
3332 do the high-numbered word first.
3333
3334 In either case, set up in LATEHALF the operands to use
3335 for the high-numbered word and in some cases alter the
3336 operands in OPERANDS to be suitable for the low-numbered word. */
3337
3338 if (size == 12)
3339 {
3340 if (optype0 == REGOP)
3341 {
3342 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 2);
3343 middlehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
3344 }
3345 else if (optype0 == OFFSOP)
3346 {
3347 middlehalf[0] = adjust_address (operands[0], SImode, 4);
3348 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
3349 }
3350 else
3351 {
3352 middlehalf[0] = adjust_address (operands[0], SImode, 0);
3353 latehalf[0] = adjust_address (operands[0], SImode, 0);
3354 }
3355
3356 if (optype1 == REGOP)
3357 {
3358 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2);
3359 middlehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
3360 }
3361 else if (optype1 == OFFSOP)
3362 {
3363 middlehalf[1] = adjust_address (operands[1], SImode, 4);
3364 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
3365 }
3366 else if (optype1 == CNSTOP)
3367 {
3368 if (GET_CODE (operands[1]) == CONST_DOUBLE)
3369 {
3370 REAL_VALUE_TYPE r;
3371 long l[3];
3372
3373 REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
3374 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
3375 operands[1] = GEN_INT (l[0]);
3376 middlehalf[1] = GEN_INT (l[1]);
3377 latehalf[1] = GEN_INT (l[2]);
3378 }
3379 else
3380 {
3381 /* No non-CONST_DOUBLE constant should ever appear
3382 here. */
3383 gcc_assert (!CONSTANT_P (operands[1]));
3384 }
3385 }
3386 else
3387 {
3388 middlehalf[1] = adjust_address (operands[1], SImode, 0);
3389 latehalf[1] = adjust_address (operands[1], SImode, 0);
3390 }
3391 }
3392 else
3393 /* size is not 12: */
3394 {
3395 if (optype0 == REGOP)
3396 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
3397 else if (optype0 == OFFSOP)
3398 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
3399 else
3400 latehalf[0] = adjust_address (operands[0], SImode, 0);
3401
3402 if (optype1 == REGOP)
3403 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
3404 else if (optype1 == OFFSOP)
3405 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
3406 else if (optype1 == CNSTOP)
3407 split_double (operands[1], &operands[1], &latehalf[1]);
3408 else
3409 latehalf[1] = adjust_address (operands[1], SImode, 0);
3410 }
3411
3412 /* If insn is effectively movd N(sp),-(sp) then we will do the
3413 high word first. We should use the adjusted operand 1 (which is N+4(sp))
3414 for the low word as well, to compensate for the first decrement of sp. */
3415 if (optype0 == PUSHOP
3416 && REGNO (XEXP (XEXP (operands[0], 0), 0)) == STACK_POINTER_REGNUM
3417 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
3418 operands[1] = middlehalf[1] = latehalf[1];
3419
3420 /* For (set (reg:DI N) (mem:DI ... (reg:SI N) ...)),
3421 if the upper part of reg N does not appear in the MEM, arrange to
3422 emit the move late-half first. Otherwise, compute the MEM address
3423 into the upper part of N and use that as a pointer to the memory
3424 operand. */
3425 if (optype0 == REGOP
3426 && (optype1 == OFFSOP || optype1 == MEMOP))
3427 {
3428 rtx testlow = gen_rtx_REG (SImode, REGNO (operands[0]));
3429
3430 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
3431 && reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
3432 {
3433 /* If both halves of dest are used in the src memory address,
3434 compute the address into latehalf of dest.
3435 Note that this can't happen if the dest is two data regs. */
3436 compadr:
3437 xops[0] = latehalf[0];
3438 xops[1] = XEXP (operands[1], 0);
3439
3440 handle_compadr (xops);
3441 if (GET_MODE (operands[1]) == XFmode)
3442 {
3443 operands[1] = gen_rtx_MEM (XFmode, latehalf[0]);
3444 middlehalf[1] = adjust_address (operands[1], DImode, size - 8);
3445 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
3446 }
3447 else
3448 {
3449 operands[1] = gen_rtx_MEM (DImode, latehalf[0]);
3450 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
3451 }
3452 }
3453 else if (size == 12
3454 && reg_overlap_mentioned_p (middlehalf[0],
3455 XEXP (operands[1], 0)))
3456 {
3457 /* Check for two regs used by both source and dest.
3458 Note that this can't happen if the dest is all data regs.
3459 It can happen if the dest is d6, d7, a0.
3460 But in that case, latehalf is an addr reg, so
3461 the code at compadr does ok. */
3462
3463 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
3464 || reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
3465 goto compadr;
3466
3467 /* JRV says this can't happen: */
3468 gcc_assert (!addreg0 && !addreg1);
3469
3470 /* Only the middle reg conflicts; simply put it last. */
3471 handle_movsi (operands);
3472 handle_movsi (latehalf);
3473 handle_movsi (middlehalf);
3474
3475 return;
3476 }
3477 else if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0)))
3478 /* If the low half of dest is mentioned in the source memory
3479 address, the arrange to emit the move late half first. */
3480 dest_overlapped_low = 1;
3481 }
3482
3483 /* If one or both operands autodecrementing,
3484 do the two words, high-numbered first. */
3485
3486 /* Likewise, the first move would clobber the source of the second one,
3487 do them in the other order. This happens only for registers;
3488 such overlap can't happen in memory unless the user explicitly
3489 sets it up, and that is an undefined circumstance. */
3490
3491 if (optype0 == PUSHOP || optype1 == PUSHOP
3492 || (optype0 == REGOP && optype1 == REGOP
3493 && ((middlehalf[1] && REGNO (operands[0]) == REGNO (middlehalf[1]))
3494 || REGNO (operands[0]) == REGNO (latehalf[1])))
3495 || dest_overlapped_low)
3496 {
3497 /* Make any unoffsettable addresses point at high-numbered word. */
3498 if (addreg0)
3499 handle_reg_adjust (addreg0, size - 4);
3500 if (addreg1)
3501 handle_reg_adjust (addreg1, size - 4);
3502
3503 /* Do that word. */
3504 handle_movsi (latehalf);
3505
3506 /* Undo the adds we just did. */
3507 if (addreg0)
3508 handle_reg_adjust (addreg0, -4);
3509 if (addreg1)
3510 handle_reg_adjust (addreg1, -4);
3511
3512 if (size == 12)
3513 {
3514 handle_movsi (middlehalf);
3515
3516 if (addreg0)
3517 handle_reg_adjust (addreg0, -4);
3518 if (addreg1)
3519 handle_reg_adjust (addreg1, -4);
3520 }
3521
3522 /* Do low-numbered word. */
3523
3524 handle_movsi (operands);
3525 return;
3526 }
3527
3528 /* Normal case: do the two words, low-numbered first. */
3529
3530 m68k_final_prescan_insn (NULL, operands, 2);
3531 handle_movsi (operands);
3532
3533 /* Do the middle one of the three words for long double */
3534 if (size == 12)
3535 {
3536 if (addreg0)
3537 handle_reg_adjust (addreg0, 4);
3538 if (addreg1)
3539 handle_reg_adjust (addreg1, 4);
3540
3541 m68k_final_prescan_insn (NULL, middlehalf, 2);
3542 handle_movsi (middlehalf);
3543 }
3544
3545 /* Make any unoffsettable addresses point at high-numbered word. */
3546 if (addreg0)
3547 handle_reg_adjust (addreg0, 4);
3548 if (addreg1)
3549 handle_reg_adjust (addreg1, 4);
3550
3551 /* Do that word. */
3552 m68k_final_prescan_insn (NULL, latehalf, 2);
3553 handle_movsi (latehalf);
3554
3555 /* Undo the adds we just did. */
3556 if (addreg0)
3557 handle_reg_adjust (addreg0, -(size - 4));
3558 if (addreg1)
3559 handle_reg_adjust (addreg1, -(size - 4));
3560
3561 return;
3562 }
3563
3564 /* Output assembler code to adjust REG by N. */
3565 static void
3566 output_reg_adjust (rtx reg, int n)
3567 {
3568 const char *s;
3569
3570 gcc_assert (GET_MODE (reg) == SImode
3571 && -12 <= n && n != 0 && n <= 12);
3572
3573 switch (n)
3574 {
3575 case 12:
3576 s = "add%.l #12,%0";
3577 break;
3578
3579 case 8:
3580 s = "addq%.l #8,%0";
3581 break;
3582
3583 case 4:
3584 s = "addq%.l #4,%0";
3585 break;
3586
3587 case -12:
3588 s = "sub%.l #12,%0";
3589 break;
3590
3591 case -8:
3592 s = "subq%.l #8,%0";
3593 break;
3594
3595 case -4:
3596 s = "subq%.l #4,%0";
3597 break;
3598
3599 default:
3600 gcc_unreachable ();
3601 s = NULL;
3602 }
3603
3604 output_asm_insn (s, &reg);
3605 }
3606
3607 /* Emit rtl code to adjust REG by N. */
3608 static void
3609 emit_reg_adjust (rtx reg1, int n)
3610 {
3611 rtx reg2;
3612
3613 gcc_assert (GET_MODE (reg1) == SImode
3614 && -12 <= n && n != 0 && n <= 12);
3615
3616 reg1 = copy_rtx (reg1);
3617 reg2 = copy_rtx (reg1);
3618
3619 if (n < 0)
3620 emit_insn (gen_subsi3 (reg1, reg2, GEN_INT (-n)));
3621 else if (n > 0)
3622 emit_insn (gen_addsi3 (reg1, reg2, GEN_INT (n)));
3623 else
3624 gcc_unreachable ();
3625 }
3626
3627 /* Output assembler to load address OPERANDS[0] to register OPERANDS[1]. */
3628 static void
3629 output_compadr (rtx operands[2])
3630 {
3631 output_asm_insn ("lea %a1,%0", operands);
3632 }
3633
3634 /* Output the best assembler insn for moving operands[1] into operands[0]
3635 as a fullword. */
3636 static void
3637 output_movsi (rtx operands[2])
3638 {
3639 output_asm_insn (singlemove_string (operands), operands);
3640 }
3641
3642 /* Copy OP and change its mode to MODE. */
3643 static rtx
3644 copy_operand (rtx op, enum machine_mode mode)
3645 {
3646 /* ??? This looks really ugly. There must be a better way
3647 to change a mode on the operand. */
3648 if (GET_MODE (op) != VOIDmode)
3649 {
3650 if (REG_P (op))
3651 op = gen_rtx_REG (mode, REGNO (op));
3652 else
3653 {
3654 op = copy_rtx (op);
3655 PUT_MODE (op, mode);
3656 }
3657 }
3658
3659 return op;
3660 }
3661
3662 /* Emit rtl code for moving operands[1] into operands[0] as a fullword. */
3663 static void
3664 emit_movsi (rtx operands[2])
3665 {
3666 operands[0] = copy_operand (operands[0], SImode);
3667 operands[1] = copy_operand (operands[1], SImode);
3668
3669 emit_insn (gen_movsi (operands[0], operands[1]));
3670 }
3671
3672 /* Output assembler code to perform a doubleword move insn
3673 with operands OPERANDS. */
3674 const char *
3675 output_move_double (rtx *operands)
3676 {
3677 handle_move_double (operands,
3678 output_reg_adjust, output_compadr, output_movsi);
3679
3680 return "";
3681 }
3682
3683 /* Output rtl code to perform a doubleword move insn
3684 with operands OPERANDS. */
3685 void
3686 m68k_emit_move_double (rtx operands[2])
3687 {
3688 handle_move_double (operands, emit_reg_adjust, emit_movsi, emit_movsi);
3689 }
3690
3691 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
3692 new rtx with the correct mode. */
3693
3694 static rtx
3695 force_mode (enum machine_mode mode, rtx orig)
3696 {
3697 if (mode == GET_MODE (orig))
3698 return orig;
3699
3700 if (REGNO (orig) >= FIRST_PSEUDO_REGISTER)
3701 abort ();
3702
3703 return gen_rtx_REG (mode, REGNO (orig));
3704 }
3705
3706 static int
3707 fp_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3708 {
3709 return reg_renumber && FP_REG_P (op);
3710 }
3711
3712 /* Emit insns to move operands[1] into operands[0].
3713
3714 Return 1 if we have written out everything that needs to be done to
3715 do the move. Otherwise, return 0 and the caller will emit the move
3716 normally.
3717
3718 Note SCRATCH_REG may not be in the proper mode depending on how it
3719 will be used. This routine is responsible for creating a new copy
3720 of SCRATCH_REG in the proper mode. */
3721
3722 int
3723 emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
3724 {
3725 register rtx operand0 = operands[0];
3726 register rtx operand1 = operands[1];
3727 register rtx tem;
3728
3729 if (scratch_reg
3730 && reload_in_progress && GET_CODE (operand0) == REG
3731 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
3732 operand0 = reg_equiv_mem[REGNO (operand0)];
3733 else if (scratch_reg
3734 && reload_in_progress && GET_CODE (operand0) == SUBREG
3735 && GET_CODE (SUBREG_REG (operand0)) == REG
3736 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
3737 {
3738 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3739 the code which tracks sets/uses for delete_output_reload. */
3740 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
3741 reg_equiv_mem [REGNO (SUBREG_REG (operand0))],
3742 SUBREG_BYTE (operand0));
3743 operand0 = alter_subreg (&temp);
3744 }
3745
3746 if (scratch_reg
3747 && reload_in_progress && GET_CODE (operand1) == REG
3748 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
3749 operand1 = reg_equiv_mem[REGNO (operand1)];
3750 else if (scratch_reg
3751 && reload_in_progress && GET_CODE (operand1) == SUBREG
3752 && GET_CODE (SUBREG_REG (operand1)) == REG
3753 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
3754 {
3755 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3756 the code which tracks sets/uses for delete_output_reload. */
3757 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
3758 reg_equiv_mem [REGNO (SUBREG_REG (operand1))],
3759 SUBREG_BYTE (operand1));
3760 operand1 = alter_subreg (&temp);
3761 }
3762
3763 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
3764 && ((tem = find_replacement (&XEXP (operand0, 0)))
3765 != XEXP (operand0, 0)))
3766 operand0 = gen_rtx_MEM (GET_MODE (operand0), tem);
3767 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
3768 && ((tem = find_replacement (&XEXP (operand1, 0)))
3769 != XEXP (operand1, 0)))
3770 operand1 = gen_rtx_MEM (GET_MODE (operand1), tem);
3771
3772 /* Handle secondary reloads for loads/stores of FP registers where
3773 the address is symbolic by using the scratch register */
3774 if (fp_reg_operand (operand0, mode)
3775 && ((GET_CODE (operand1) == MEM
3776 && ! memory_address_p (DFmode, XEXP (operand1, 0)))
3777 || ((GET_CODE (operand1) == SUBREG
3778 && GET_CODE (XEXP (operand1, 0)) == MEM
3779 && !memory_address_p (DFmode, XEXP (XEXP (operand1, 0), 0)))))
3780 && scratch_reg)
3781 {
3782 if (GET_CODE (operand1) == SUBREG)
3783 operand1 = XEXP (operand1, 0);
3784
3785 /* SCRATCH_REG will hold an address. We want
3786 it in SImode regardless of what mode it was originally given
3787 to us. */
3788 scratch_reg = force_mode (SImode, scratch_reg);
3789
3790 /* D might not fit in 14 bits either; for such cases load D into
3791 scratch reg. */
3792 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
3793 {
3794 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
3795 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
3796 Pmode,
3797 XEXP (XEXP (operand1, 0), 0),
3798 scratch_reg));
3799 }
3800 else
3801 emit_move_insn (scratch_reg, XEXP (operand1, 0));
3802 emit_insn (gen_rtx_SET (VOIDmode, operand0,
3803 gen_rtx_MEM (mode, scratch_reg)));
3804 return 1;
3805 }
3806 else if (fp_reg_operand (operand1, mode)
3807 && ((GET_CODE (operand0) == MEM
3808 && ! memory_address_p (DFmode, XEXP (operand0, 0)))
3809 || ((GET_CODE (operand0) == SUBREG)
3810 && GET_CODE (XEXP (operand0, 0)) == MEM
3811 && !memory_address_p (DFmode, XEXP (XEXP (operand0, 0), 0))))
3812 && scratch_reg)
3813 {
3814 if (GET_CODE (operand0) == SUBREG)
3815 operand0 = XEXP (operand0, 0);
3816
3817 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3818 it in SIMODE regardless of what mode it was originally given
3819 to us. */
3820 scratch_reg = force_mode (SImode, scratch_reg);
3821
3822 /* D might not fit in 14 bits either; for such cases load D into
3823 scratch reg. */
3824 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
3825 {
3826 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
3827 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
3828 0)),
3829 Pmode,
3830 XEXP (XEXP (operand0, 0),
3831 0),
3832 scratch_reg));
3833 }
3834 else
3835 emit_move_insn (scratch_reg, XEXP (operand0, 0));
3836 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (mode, scratch_reg),
3837 operand1));
3838 return 1;
3839 }
3840 /* Handle secondary reloads for loads of FP registers from constant
3841 expressions by forcing the constant into memory.
3842
3843 use scratch_reg to hold the address of the memory location.
3844
3845 The proper fix is to change PREFERRED_RELOAD_CLASS to return
3846 NO_REGS when presented with a const_int and an register class
3847 containing only FP registers. Doing so unfortunately creates
3848 more problems than it solves. Fix this for 2.5. */
3849 else if (fp_reg_operand (operand0, mode)
3850 && CONSTANT_P (operand1)
3851 && scratch_reg)
3852 {
3853 rtx xoperands[2];
3854
3855 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3856 it in SIMODE regardless of what mode it was originally given
3857 to us. */
3858 scratch_reg = force_mode (SImode, scratch_reg);
3859
3860 /* Force the constant into memory and put the address of the
3861 memory location into scratch_reg. */
3862 xoperands[0] = scratch_reg;
3863 xoperands[1] = XEXP (force_const_mem (mode, operand1), 0);
3864 emit_insn (gen_rtx_SET (mode, scratch_reg, xoperands[1]));
3865
3866 /* Now load the destination register. */
3867 emit_insn (gen_rtx_SET (mode, operand0,
3868 gen_rtx_MEM (mode, scratch_reg)));
3869 return 1;
3870 }
3871
3872 /* Now have insn-emit do whatever it normally does. */
3873 return 0;
3874 }
3875
3876 /* Split one or more DImode RTL references into pairs of SImode
3877 references. The RTL can be REG, offsettable MEM, integer constant, or
3878 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
3879 split and "num" is its length. lo_half and hi_half are output arrays
3880 that parallel "operands". */
3881
3882 void
3883 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
3884 {
3885 while (num--)
3886 {
3887 rtx op = operands[num];
3888
3889 /* simplify_subreg refuses to split volatile memory addresses,
3890 but we still have to handle it. */
3891 if (GET_CODE (op) == MEM)
3892 {
3893 lo_half[num] = adjust_address (op, SImode, 4);
3894 hi_half[num] = adjust_address (op, SImode, 0);
3895 }
3896 else
3897 {
3898 lo_half[num] = simplify_gen_subreg (SImode, op,
3899 GET_MODE (op) == VOIDmode
3900 ? DImode : GET_MODE (op), 4);
3901 hi_half[num] = simplify_gen_subreg (SImode, op,
3902 GET_MODE (op) == VOIDmode
3903 ? DImode : GET_MODE (op), 0);
3904 }
3905 }
3906 }
3907
3908 /* Split X into a base and a constant offset, storing them in *BASE
3909 and *OFFSET respectively. */
3910
3911 static void
3912 m68k_split_offset (rtx x, rtx *base, HOST_WIDE_INT *offset)
3913 {
3914 *offset = 0;
3915 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
3916 {
3917 *offset += INTVAL (XEXP (x, 1));
3918 x = XEXP (x, 0);
3919 }
3920 *base = x;
3921 }
3922
3923 /* Return true if PATTERN is a PARALLEL suitable for a movem or fmovem
3924 instruction. STORE_P says whether the move is a load or store.
3925
3926 If the instruction uses post-increment or pre-decrement addressing,
3927 AUTOMOD_BASE is the base register and AUTOMOD_OFFSET is the total
3928 adjustment. This adjustment will be made by the first element of
3929 PARALLEL, with the loads or stores starting at element 1. If the
3930 instruction does not use post-increment or pre-decrement addressing,
3931 AUTOMOD_BASE is null, AUTOMOD_OFFSET is 0, and the loads or stores
3932 start at element 0. */
3933
3934 bool
3935 m68k_movem_pattern_p (rtx pattern, rtx automod_base,
3936 HOST_WIDE_INT automod_offset, bool store_p)
3937 {
3938 rtx base, mem_base, set, mem, reg, last_reg;
3939 HOST_WIDE_INT offset, mem_offset;
3940 int i, first, len;
3941 enum reg_class rclass;
3942
3943 len = XVECLEN (pattern, 0);
3944 first = (automod_base != NULL);
3945
3946 if (automod_base)
3947 {
3948 /* Stores must be pre-decrement and loads must be post-increment. */
3949 if (store_p != (automod_offset < 0))
3950 return false;
3951
3952 /* Work out the base and offset for lowest memory location. */
3953 base = automod_base;
3954 offset = (automod_offset < 0 ? automod_offset : 0);
3955 }
3956 else
3957 {
3958 /* Allow any valid base and offset in the first access. */
3959 base = NULL;
3960 offset = 0;
3961 }
3962
3963 last_reg = NULL;
3964 rclass = NO_REGS;
3965 for (i = first; i < len; i++)
3966 {
3967 /* We need a plain SET. */
3968 set = XVECEXP (pattern, 0, i);
3969 if (GET_CODE (set) != SET)
3970 return false;
3971
3972 /* Check that we have a memory location... */
3973 mem = XEXP (set, !store_p);
3974 if (!MEM_P (mem) || !memory_operand (mem, VOIDmode))
3975 return false;
3976
3977 /* ...with the right address. */
3978 if (base == NULL)
3979 {
3980 m68k_split_offset (XEXP (mem, 0), &base, &offset);
3981 /* The ColdFire instruction only allows (An) and (d16,An) modes.
3982 There are no mode restrictions for 680x0 besides the
3983 automodification rules enforced above. */
3984 if (TARGET_COLDFIRE
3985 && !m68k_legitimate_base_reg_p (base, reload_completed))
3986 return false;
3987 }
3988 else
3989 {
3990 m68k_split_offset (XEXP (mem, 0), &mem_base, &mem_offset);
3991 if (!rtx_equal_p (base, mem_base) || offset != mem_offset)
3992 return false;
3993 }
3994
3995 /* Check that we have a register of the required mode and class. */
3996 reg = XEXP (set, store_p);
3997 if (!REG_P (reg)
3998 || !HARD_REGISTER_P (reg)
3999 || GET_MODE (reg) != reg_raw_mode[REGNO (reg)])
4000 return false;
4001
4002 if (last_reg)
4003 {
4004 /* The register must belong to RCLASS and have a higher number
4005 than the register in the previous SET. */
4006 if (!TEST_HARD_REG_BIT (reg_class_contents[rclass], REGNO (reg))
4007 || REGNO (last_reg) >= REGNO (reg))
4008 return false;
4009 }
4010 else
4011 {
4012 /* Work out which register class we need. */
4013 if (INT_REGNO_P (REGNO (reg)))
4014 rclass = GENERAL_REGS;
4015 else if (FP_REGNO_P (REGNO (reg)))
4016 rclass = FP_REGS;
4017 else
4018 return false;
4019 }
4020
4021 last_reg = reg;
4022 offset += GET_MODE_SIZE (GET_MODE (reg));
4023 }
4024
4025 /* If we have an automodification, check whether the final offset is OK. */
4026 if (automod_base && offset != (automod_offset < 0 ? 0 : automod_offset))
4027 return false;
4028
4029 /* Reject unprofitable cases. */
4030 if (len < first + (rclass == FP_REGS ? MIN_FMOVEM_REGS : MIN_MOVEM_REGS))
4031 return false;
4032
4033 return true;
4034 }
4035
4036 /* Return the assembly code template for a movem or fmovem instruction
4037 whose pattern is given by PATTERN. Store the template's operands
4038 in OPERANDS.
4039
4040 If the instruction uses post-increment or pre-decrement addressing,
4041 AUTOMOD_OFFSET is the total adjustment, otherwise it is 0. STORE_P
4042 is true if this is a store instruction. */
4043
4044 const char *
4045 m68k_output_movem (rtx *operands, rtx pattern,
4046 HOST_WIDE_INT automod_offset, bool store_p)
4047 {
4048 unsigned int mask;
4049 int i, first;
4050
4051 gcc_assert (GET_CODE (pattern) == PARALLEL);
4052 mask = 0;
4053 first = (automod_offset != 0);
4054 for (i = first; i < XVECLEN (pattern, 0); i++)
4055 {
4056 /* When using movem with pre-decrement addressing, register X + D0_REG
4057 is controlled by bit 15 - X. For all other addressing modes,
4058 register X + D0_REG is controlled by bit X. Confusingly, the
4059 register mask for fmovem is in the opposite order to that for
4060 movem. */
4061 unsigned int regno;
4062
4063 gcc_assert (MEM_P (XEXP (XVECEXP (pattern, 0, i), !store_p)));
4064 gcc_assert (REG_P (XEXP (XVECEXP (pattern, 0, i), store_p)));
4065 regno = REGNO (XEXP (XVECEXP (pattern, 0, i), store_p));
4066 if (automod_offset < 0)
4067 {
4068 if (FP_REGNO_P (regno))
4069 mask |= 1 << (regno - FP0_REG);
4070 else
4071 mask |= 1 << (15 - (regno - D0_REG));
4072 }
4073 else
4074 {
4075 if (FP_REGNO_P (regno))
4076 mask |= 1 << (7 - (regno - FP0_REG));
4077 else
4078 mask |= 1 << (regno - D0_REG);
4079 }
4080 }
4081 CC_STATUS_INIT;
4082
4083 if (automod_offset == 0)
4084 operands[0] = XEXP (XEXP (XVECEXP (pattern, 0, first), !store_p), 0);
4085 else if (automod_offset < 0)
4086 operands[0] = gen_rtx_PRE_DEC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
4087 else
4088 operands[0] = gen_rtx_POST_INC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
4089 operands[1] = GEN_INT (mask);
4090 if (FP_REGNO_P (REGNO (XEXP (XVECEXP (pattern, 0, first), store_p))))
4091 {
4092 if (store_p)
4093 return "fmovem %1,%a0";
4094 else
4095 return "fmovem %a0,%1";
4096 }
4097 else
4098 {
4099 if (store_p)
4100 return "movem%.l %1,%a0";
4101 else
4102 return "movem%.l %a0,%1";
4103 }
4104 }
4105
4106 /* Return a REG that occurs in ADDR with coefficient 1.
4107 ADDR can be effectively incremented by incrementing REG. */
4108
4109 static rtx
4110 find_addr_reg (rtx addr)
4111 {
4112 while (GET_CODE (addr) == PLUS)
4113 {
4114 if (GET_CODE (XEXP (addr, 0)) == REG)
4115 addr = XEXP (addr, 0);
4116 else if (GET_CODE (XEXP (addr, 1)) == REG)
4117 addr = XEXP (addr, 1);
4118 else if (CONSTANT_P (XEXP (addr, 0)))
4119 addr = XEXP (addr, 1);
4120 else if (CONSTANT_P (XEXP (addr, 1)))
4121 addr = XEXP (addr, 0);
4122 else
4123 gcc_unreachable ();
4124 }
4125 gcc_assert (GET_CODE (addr) == REG);
4126 return addr;
4127 }
4128
4129 /* Output assembler code to perform a 32-bit 3-operand add. */
4130
4131 const char *
4132 output_addsi3 (rtx *operands)
4133 {
4134 if (! operands_match_p (operands[0], operands[1]))
4135 {
4136 if (!ADDRESS_REG_P (operands[1]))
4137 {
4138 rtx tmp = operands[1];
4139
4140 operands[1] = operands[2];
4141 operands[2] = tmp;
4142 }
4143
4144 /* These insns can result from reloads to access
4145 stack slots over 64k from the frame pointer. */
4146 if (GET_CODE (operands[2]) == CONST_INT
4147 && (INTVAL (operands[2]) < -32768 || INTVAL (operands[2]) > 32767))
4148 return "move%.l %2,%0\n\tadd%.l %1,%0";
4149 if (GET_CODE (operands[2]) == REG)
4150 return MOTOROLA ? "lea (%1,%2.l),%0" : "lea %1@(0,%2:l),%0";
4151 return MOTOROLA ? "lea (%c2,%1),%0" : "lea %1@(%c2),%0";
4152 }
4153 if (GET_CODE (operands[2]) == CONST_INT)
4154 {
4155 if (INTVAL (operands[2]) > 0
4156 && INTVAL (operands[2]) <= 8)
4157 return "addq%.l %2,%0";
4158 if (INTVAL (operands[2]) < 0
4159 && INTVAL (operands[2]) >= -8)
4160 {
4161 operands[2] = GEN_INT (- INTVAL (operands[2]));
4162 return "subq%.l %2,%0";
4163 }
4164 /* On the CPU32 it is faster to use two addql instructions to
4165 add a small integer (8 < N <= 16) to a register.
4166 Likewise for subql. */
4167 if (TUNE_CPU32 && REG_P (operands[0]))
4168 {
4169 if (INTVAL (operands[2]) > 8
4170 && INTVAL (operands[2]) <= 16)
4171 {
4172 operands[2] = GEN_INT (INTVAL (operands[2]) - 8);
4173 return "addq%.l #8,%0\n\taddq%.l %2,%0";
4174 }
4175 if (INTVAL (operands[2]) < -8
4176 && INTVAL (operands[2]) >= -16)
4177 {
4178 operands[2] = GEN_INT (- INTVAL (operands[2]) - 8);
4179 return "subq%.l #8,%0\n\tsubq%.l %2,%0";
4180 }
4181 }
4182 if (ADDRESS_REG_P (operands[0])
4183 && INTVAL (operands[2]) >= -0x8000
4184 && INTVAL (operands[2]) < 0x8000)
4185 {
4186 if (TUNE_68040)
4187 return "add%.w %2,%0";
4188 else
4189 return MOTOROLA ? "lea (%c2,%0),%0" : "lea %0@(%c2),%0";
4190 }
4191 }
4192 return "add%.l %2,%0";
4193 }
4194 \f
4195 /* Store in cc_status the expressions that the condition codes will
4196 describe after execution of an instruction whose pattern is EXP.
4197 Do not alter them if the instruction would not alter the cc's. */
4198
4199 /* On the 68000, all the insns to store in an address register fail to
4200 set the cc's. However, in some cases these instructions can make it
4201 possibly invalid to use the saved cc's. In those cases we clear out
4202 some or all of the saved cc's so they won't be used. */
4203
4204 void
4205 notice_update_cc (rtx exp, rtx insn)
4206 {
4207 if (GET_CODE (exp) == SET)
4208 {
4209 if (GET_CODE (SET_SRC (exp)) == CALL)
4210 CC_STATUS_INIT;
4211 else if (ADDRESS_REG_P (SET_DEST (exp)))
4212 {
4213 if (cc_status.value1 && modified_in_p (cc_status.value1, insn))
4214 cc_status.value1 = 0;
4215 if (cc_status.value2 && modified_in_p (cc_status.value2, insn))
4216 cc_status.value2 = 0;
4217 }
4218 /* fmoves to memory or data registers do not set the condition
4219 codes. Normal moves _do_ set the condition codes, but not in
4220 a way that is appropriate for comparison with 0, because -0.0
4221 would be treated as a negative nonzero number. Note that it
4222 isn't appropriate to conditionalize this restriction on
4223 HONOR_SIGNED_ZEROS because that macro merely indicates whether
4224 we care about the difference between -0.0 and +0.0. */
4225 else if (!FP_REG_P (SET_DEST (exp))
4226 && SET_DEST (exp) != cc0_rtx
4227 && (FP_REG_P (SET_SRC (exp))
4228 || GET_CODE (SET_SRC (exp)) == FIX
4229 || FLOAT_MODE_P (GET_MODE (SET_DEST (exp)))))
4230 CC_STATUS_INIT;
4231 /* A pair of move insns doesn't produce a useful overall cc. */
4232 else if (!FP_REG_P (SET_DEST (exp))
4233 && !FP_REG_P (SET_SRC (exp))
4234 && GET_MODE_SIZE (GET_MODE (SET_SRC (exp))) > 4
4235 && (GET_CODE (SET_SRC (exp)) == REG
4236 || GET_CODE (SET_SRC (exp)) == MEM
4237 || GET_CODE (SET_SRC (exp)) == CONST_DOUBLE))
4238 CC_STATUS_INIT;
4239 else if (SET_DEST (exp) != pc_rtx)
4240 {
4241 cc_status.flags = 0;
4242 cc_status.value1 = SET_DEST (exp);
4243 cc_status.value2 = SET_SRC (exp);
4244 }
4245 }
4246 else if (GET_CODE (exp) == PARALLEL
4247 && GET_CODE (XVECEXP (exp, 0, 0)) == SET)
4248 {
4249 rtx dest = SET_DEST (XVECEXP (exp, 0, 0));
4250 rtx src = SET_SRC (XVECEXP (exp, 0, 0));
4251
4252 if (ADDRESS_REG_P (dest))
4253 CC_STATUS_INIT;
4254 else if (dest != pc_rtx)
4255 {
4256 cc_status.flags = 0;
4257 cc_status.value1 = dest;
4258 cc_status.value2 = src;
4259 }
4260 }
4261 else
4262 CC_STATUS_INIT;
4263 if (cc_status.value2 != 0
4264 && ADDRESS_REG_P (cc_status.value2)
4265 && GET_MODE (cc_status.value2) == QImode)
4266 CC_STATUS_INIT;
4267 if (cc_status.value2 != 0)
4268 switch (GET_CODE (cc_status.value2))
4269 {
4270 case ASHIFT: case ASHIFTRT: case LSHIFTRT:
4271 case ROTATE: case ROTATERT:
4272 /* These instructions always clear the overflow bit, and set
4273 the carry to the bit shifted out. */
4274 cc_status.flags |= CC_OVERFLOW_UNUSABLE | CC_NO_CARRY;
4275 break;
4276
4277 case PLUS: case MINUS: case MULT:
4278 case DIV: case UDIV: case MOD: case UMOD: case NEG:
4279 if (GET_MODE (cc_status.value2) != VOIDmode)
4280 cc_status.flags |= CC_NO_OVERFLOW;
4281 break;
4282 case ZERO_EXTEND:
4283 /* (SET r1 (ZERO_EXTEND r2)) on this machine
4284 ends with a move insn moving r2 in r2's mode.
4285 Thus, the cc's are set for r2.
4286 This can set N bit spuriously. */
4287 cc_status.flags |= CC_NOT_NEGATIVE;
4288
4289 default:
4290 break;
4291 }
4292 if (cc_status.value1 && GET_CODE (cc_status.value1) == REG
4293 && cc_status.value2
4294 && reg_overlap_mentioned_p (cc_status.value1, cc_status.value2))
4295 cc_status.value2 = 0;
4296 if (((cc_status.value1 && FP_REG_P (cc_status.value1))
4297 || (cc_status.value2 && FP_REG_P (cc_status.value2))))
4298 cc_status.flags = CC_IN_68881;
4299 if (cc_status.value2 && GET_CODE (cc_status.value2) == COMPARE
4300 && GET_MODE_CLASS (GET_MODE (XEXP (cc_status.value2, 0))) == MODE_FLOAT)
4301 {
4302 cc_status.flags = CC_IN_68881;
4303 if (!FP_REG_P (XEXP (cc_status.value2, 0)))
4304 cc_status.flags |= CC_REVERSED;
4305 }
4306 }
4307 \f
4308 const char *
4309 output_move_const_double (rtx *operands)
4310 {
4311 int code = standard_68881_constant_p (operands[1]);
4312
4313 if (code != 0)
4314 {
4315 static char buf[40];
4316
4317 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
4318 return buf;
4319 }
4320 return "fmove%.d %1,%0";
4321 }
4322
4323 const char *
4324 output_move_const_single (rtx *operands)
4325 {
4326 int code = standard_68881_constant_p (operands[1]);
4327
4328 if (code != 0)
4329 {
4330 static char buf[40];
4331
4332 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
4333 return buf;
4334 }
4335 return "fmove%.s %f1,%0";
4336 }
4337
4338 /* Return nonzero if X, a CONST_DOUBLE, has a value that we can get
4339 from the "fmovecr" instruction.
4340 The value, anded with 0xff, gives the code to use in fmovecr
4341 to get the desired constant. */
4342
4343 /* This code has been fixed for cross-compilation. */
4344
4345 static int inited_68881_table = 0;
4346
4347 static const char *const strings_68881[7] = {
4348 "0.0",
4349 "1.0",
4350 "10.0",
4351 "100.0",
4352 "10000.0",
4353 "1e8",
4354 "1e16"
4355 };
4356
4357 static const int codes_68881[7] = {
4358 0x0f,
4359 0x32,
4360 0x33,
4361 0x34,
4362 0x35,
4363 0x36,
4364 0x37
4365 };
4366
4367 REAL_VALUE_TYPE values_68881[7];
4368
4369 /* Set up values_68881 array by converting the decimal values
4370 strings_68881 to binary. */
4371
4372 void
4373 init_68881_table (void)
4374 {
4375 int i;
4376 REAL_VALUE_TYPE r;
4377 enum machine_mode mode;
4378
4379 mode = SFmode;
4380 for (i = 0; i < 7; i++)
4381 {
4382 if (i == 6)
4383 mode = DFmode;
4384 r = REAL_VALUE_ATOF (strings_68881[i], mode);
4385 values_68881[i] = r;
4386 }
4387 inited_68881_table = 1;
4388 }
4389
4390 int
4391 standard_68881_constant_p (rtx x)
4392 {
4393 REAL_VALUE_TYPE r;
4394 int i;
4395
4396 /* fmovecr must be emulated on the 68040 and 68060, so it shouldn't be
4397 used at all on those chips. */
4398 if (TUNE_68040_60)
4399 return 0;
4400
4401 if (! inited_68881_table)
4402 init_68881_table ();
4403
4404 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4405
4406 /* Use REAL_VALUES_IDENTICAL instead of REAL_VALUES_EQUAL so that -0.0
4407 is rejected. */
4408 for (i = 0; i < 6; i++)
4409 {
4410 if (REAL_VALUES_IDENTICAL (r, values_68881[i]))
4411 return (codes_68881[i]);
4412 }
4413
4414 if (GET_MODE (x) == SFmode)
4415 return 0;
4416
4417 if (REAL_VALUES_EQUAL (r, values_68881[6]))
4418 return (codes_68881[6]);
4419
4420 /* larger powers of ten in the constants ram are not used
4421 because they are not equal to a `double' C constant. */
4422 return 0;
4423 }
4424
4425 /* If X is a floating-point constant, return the logarithm of X base 2,
4426 or 0 if X is not a power of 2. */
4427
4428 int
4429 floating_exact_log2 (rtx x)
4430 {
4431 REAL_VALUE_TYPE r, r1;
4432 int exp;
4433
4434 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4435
4436 if (REAL_VALUES_LESS (r, dconst1))
4437 return 0;
4438
4439 exp = real_exponent (&r);
4440 real_2expN (&r1, exp, DFmode);
4441 if (REAL_VALUES_EQUAL (r1, r))
4442 return exp;
4443
4444 return 0;
4445 }
4446 \f
4447 /* A C compound statement to output to stdio stream STREAM the
4448 assembler syntax for an instruction operand X. X is an RTL
4449 expression.
4450
4451 CODE is a value that can be used to specify one of several ways
4452 of printing the operand. It is used when identical operands
4453 must be printed differently depending on the context. CODE
4454 comes from the `%' specification that was used to request
4455 printing of the operand. If the specification was just `%DIGIT'
4456 then CODE is 0; if the specification was `%LTR DIGIT' then CODE
4457 is the ASCII code for LTR.
4458
4459 If X is a register, this macro should print the register's name.
4460 The names can be found in an array `reg_names' whose type is
4461 `char *[]'. `reg_names' is initialized from `REGISTER_NAMES'.
4462
4463 When the machine description has a specification `%PUNCT' (a `%'
4464 followed by a punctuation character), this macro is called with
4465 a null pointer for X and the punctuation character for CODE.
4466
4467 The m68k specific codes are:
4468
4469 '.' for dot needed in Motorola-style opcode names.
4470 '-' for an operand pushing on the stack:
4471 sp@-, -(sp) or -(%sp) depending on the style of syntax.
4472 '+' for an operand pushing on the stack:
4473 sp@+, (sp)+ or (%sp)+ depending on the style of syntax.
4474 '@' for a reference to the top word on the stack:
4475 sp@, (sp) or (%sp) depending on the style of syntax.
4476 '#' for an immediate operand prefix (# in MIT and Motorola syntax
4477 but & in SGS syntax).
4478 '!' for the cc register (used in an `and to cc' insn).
4479 '$' for the letter `s' in an op code, but only on the 68040.
4480 '&' for the letter `d' in an op code, but only on the 68040.
4481 '/' for register prefix needed by longlong.h.
4482 '?' for m68k_library_id_string
4483
4484 'b' for byte insn (no effect, on the Sun; this is for the ISI).
4485 'd' to force memory addressing to be absolute, not relative.
4486 'f' for float insn (print a CONST_DOUBLE as a float rather than in hex)
4487 'x' for float insn (print a CONST_DOUBLE as a float rather than in hex),
4488 or print pair of registers as rx:ry.
4489 'p' print an address with @PLTPC attached, but only if the operand
4490 is not locally-bound. */
4491
4492 void
4493 print_operand (FILE *file, rtx op, int letter)
4494 {
4495 if (letter == '.')
4496 {
4497 if (MOTOROLA)
4498 fprintf (file, ".");
4499 }
4500 else if (letter == '#')
4501 asm_fprintf (file, "%I");
4502 else if (letter == '-')
4503 asm_fprintf (file, MOTOROLA ? "-(%Rsp)" : "%Rsp@-");
4504 else if (letter == '+')
4505 asm_fprintf (file, MOTOROLA ? "(%Rsp)+" : "%Rsp@+");
4506 else if (letter == '@')
4507 asm_fprintf (file, MOTOROLA ? "(%Rsp)" : "%Rsp@");
4508 else if (letter == '!')
4509 asm_fprintf (file, "%Rfpcr");
4510 else if (letter == '$')
4511 {
4512 if (TARGET_68040)
4513 fprintf (file, "s");
4514 }
4515 else if (letter == '&')
4516 {
4517 if (TARGET_68040)
4518 fprintf (file, "d");
4519 }
4520 else if (letter == '/')
4521 asm_fprintf (file, "%R");
4522 else if (letter == '?')
4523 asm_fprintf (file, m68k_library_id_string);
4524 else if (letter == 'p')
4525 {
4526 output_addr_const (file, op);
4527 if (!(GET_CODE (op) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op)))
4528 fprintf (file, "@PLTPC");
4529 }
4530 else if (GET_CODE (op) == REG)
4531 {
4532 if (letter == 'R')
4533 /* Print out the second register name of a register pair.
4534 I.e., R (6) => 7. */
4535 fputs (M68K_REGNAME(REGNO (op) + 1), file);
4536 else
4537 fputs (M68K_REGNAME(REGNO (op)), file);
4538 }
4539 else if (GET_CODE (op) == MEM)
4540 {
4541 output_address (XEXP (op, 0));
4542 if (letter == 'd' && ! TARGET_68020
4543 && CONSTANT_ADDRESS_P (XEXP (op, 0))
4544 && !(GET_CODE (XEXP (op, 0)) == CONST_INT
4545 && INTVAL (XEXP (op, 0)) < 0x8000
4546 && INTVAL (XEXP (op, 0)) >= -0x8000))
4547 fprintf (file, MOTOROLA ? ".l" : ":l");
4548 }
4549 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == SFmode)
4550 {
4551 REAL_VALUE_TYPE r;
4552 long l;
4553 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
4554 REAL_VALUE_TO_TARGET_SINGLE (r, l);
4555 asm_fprintf (file, "%I0x%lx", l & 0xFFFFFFFF);
4556 }
4557 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == XFmode)
4558 {
4559 REAL_VALUE_TYPE r;
4560 long l[3];
4561 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
4562 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
4563 asm_fprintf (file, "%I0x%lx%08lx%08lx", l[0] & 0xFFFFFFFF,
4564 l[1] & 0xFFFFFFFF, l[2] & 0xFFFFFFFF);
4565 }
4566 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == DFmode)
4567 {
4568 REAL_VALUE_TYPE r;
4569 long l[2];
4570 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
4571 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
4572 asm_fprintf (file, "%I0x%lx%08lx", l[0] & 0xFFFFFFFF, l[1] & 0xFFFFFFFF);
4573 }
4574 else
4575 {
4576 /* Use `print_operand_address' instead of `output_addr_const'
4577 to ensure that we print relevant PIC stuff. */
4578 asm_fprintf (file, "%I");
4579 if (TARGET_PCREL
4580 && (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST))
4581 print_operand_address (file, op);
4582 else
4583 output_addr_const (file, op);
4584 }
4585 }
4586
4587 /* Return string for TLS relocation RELOC. */
4588
4589 static const char *
4590 m68k_get_reloc_decoration (enum m68k_reloc reloc)
4591 {
4592 /* To my knowledge, !MOTOROLA assemblers don't support TLS. */
4593 gcc_assert (MOTOROLA || reloc == RELOC_GOT);
4594
4595 switch (reloc)
4596 {
4597 case RELOC_GOT:
4598 if (MOTOROLA)
4599 {
4600 if (flag_pic == 1 && TARGET_68020)
4601 return "@GOT.w";
4602 else
4603 return "@GOT";
4604 }
4605 else
4606 {
4607 if (TARGET_68020)
4608 {
4609 switch (flag_pic)
4610 {
4611 case 1:
4612 return ":w";
4613 case 2:
4614 return ":l";
4615 default:
4616 return "";
4617 }
4618 }
4619 }
4620
4621 case RELOC_TLSGD:
4622 return "@TLSGD";
4623
4624 case RELOC_TLSLDM:
4625 return "@TLSLDM";
4626
4627 case RELOC_TLSLDO:
4628 return "@TLSLDO";
4629
4630 case RELOC_TLSIE:
4631 return "@TLSIE";
4632
4633 case RELOC_TLSLE:
4634 return "@TLSLE";
4635
4636 default:
4637 gcc_unreachable ();
4638 }
4639 }
4640
4641 /* m68k implementation of OUTPUT_ADDR_CONST_EXTRA. */
4642
4643 bool
4644 m68k_output_addr_const_extra (FILE *file, rtx x)
4645 {
4646 if (GET_CODE (x) == UNSPEC)
4647 {
4648 switch (XINT (x, 1))
4649 {
4650 case UNSPEC_RELOC16:
4651 case UNSPEC_RELOC32:
4652 output_addr_const (file, XVECEXP (x, 0, 0));
4653 fputs (m68k_get_reloc_decoration
4654 ((enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1))), file);
4655 return true;
4656
4657 default:
4658 break;
4659 }
4660 }
4661
4662 return false;
4663 }
4664
4665 /* M68K implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
4666
4667 static void
4668 m68k_output_dwarf_dtprel (FILE *file, int size, rtx x)
4669 {
4670 gcc_assert (size == 4);
4671 fputs ("\t.long\t", file);
4672 output_addr_const (file, x);
4673 fputs ("@TLSLDO+0x8000", file);
4674 }
4675
4676 /* In the name of slightly smaller debug output, and to cater to
4677 general assembler lossage, recognize various UNSPEC sequences
4678 and turn them back into a direct symbol reference. */
4679
4680 static rtx
4681 m68k_delegitimize_address (rtx orig_x)
4682 {
4683 rtx x;
4684 struct m68k_address addr;
4685 rtx unspec;
4686
4687 orig_x = delegitimize_mem_from_attrs (orig_x);
4688 x = orig_x;
4689 if (MEM_P (x))
4690 x = XEXP (x, 0);
4691
4692 if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
4693 return orig_x;
4694
4695 if (!m68k_decompose_address (GET_MODE (x), x, false, &addr)
4696 || addr.offset == NULL_RTX
4697 || GET_CODE (addr.offset) != CONST)
4698 return orig_x;
4699
4700 unspec = XEXP (addr.offset, 0);
4701 if (GET_CODE (unspec) == PLUS && CONST_INT_P (XEXP (unspec, 1)))
4702 unspec = XEXP (unspec, 0);
4703 if (GET_CODE (unspec) != UNSPEC
4704 || (XINT (unspec, 1) != UNSPEC_RELOC16
4705 && XINT (unspec, 1) != UNSPEC_RELOC32))
4706 return orig_x;
4707 x = XVECEXP (unspec, 0, 0);
4708 gcc_assert (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF);
4709 if (unspec != XEXP (addr.offset, 0))
4710 x = gen_rtx_PLUS (Pmode, x, XEXP (XEXP (addr.offset, 0), 1));
4711 if (addr.index)
4712 {
4713 rtx idx = addr.index;
4714 if (addr.scale != 1)
4715 idx = gen_rtx_MULT (Pmode, idx, GEN_INT (addr.scale));
4716 x = gen_rtx_PLUS (Pmode, idx, x);
4717 }
4718 if (addr.base)
4719 x = gen_rtx_PLUS (Pmode, addr.base, x);
4720 if (MEM_P (orig_x))
4721 x = replace_equiv_address_nv (orig_x, x);
4722 return x;
4723 }
4724
4725 \f
4726 /* A C compound statement to output to stdio stream STREAM the
4727 assembler syntax for an instruction operand that is a memory
4728 reference whose address is ADDR. ADDR is an RTL expression.
4729
4730 Note that this contains a kludge that knows that the only reason
4731 we have an address (plus (label_ref...) (reg...)) when not generating
4732 PIC code is in the insn before a tablejump, and we know that m68k.md
4733 generates a label LInnn: on such an insn.
4734
4735 It is possible for PIC to generate a (plus (label_ref...) (reg...))
4736 and we handle that just like we would a (plus (symbol_ref...) (reg...)).
4737
4738 This routine is responsible for distinguishing between -fpic and -fPIC
4739 style relocations in an address. When generating -fpic code the
4740 offset is output in word mode (e.g. movel a5@(_foo:w), a0). When generating
4741 -fPIC code the offset is output in long mode (e.g. movel a5@(_foo:l), a0) */
4742
4743 void
4744 print_operand_address (FILE *file, rtx addr)
4745 {
4746 struct m68k_address address;
4747
4748 if (!m68k_decompose_address (QImode, addr, true, &address))
4749 gcc_unreachable ();
4750
4751 if (address.code == PRE_DEC)
4752 fprintf (file, MOTOROLA ? "-(%s)" : "%s@-",
4753 M68K_REGNAME (REGNO (address.base)));
4754 else if (address.code == POST_INC)
4755 fprintf (file, MOTOROLA ? "(%s)+" : "%s@+",
4756 M68K_REGNAME (REGNO (address.base)));
4757 else if (!address.base && !address.index)
4758 {
4759 /* A constant address. */
4760 gcc_assert (address.offset == addr);
4761 if (GET_CODE (addr) == CONST_INT)
4762 {
4763 /* (xxx).w or (xxx).l. */
4764 if (IN_RANGE (INTVAL (addr), -0x8000, 0x7fff))
4765 fprintf (file, MOTOROLA ? "%d.w" : "%d:w", (int) INTVAL (addr));
4766 else
4767 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (addr));
4768 }
4769 else if (TARGET_PCREL)
4770 {
4771 /* (d16,PC) or (bd,PC,Xn) (with suppressed index register). */
4772 fputc ('(', file);
4773 output_addr_const (file, addr);
4774 asm_fprintf (file, flag_pic == 1 ? ":w,%Rpc)" : ":l,%Rpc)");
4775 }
4776 else
4777 {
4778 /* (xxx).l. We need a special case for SYMBOL_REF if the symbol
4779 name ends in `.<letter>', as the last 2 characters can be
4780 mistaken as a size suffix. Put the name in parentheses. */
4781 if (GET_CODE (addr) == SYMBOL_REF
4782 && strlen (XSTR (addr, 0)) > 2
4783 && XSTR (addr, 0)[strlen (XSTR (addr, 0)) - 2] == '.')
4784 {
4785 putc ('(', file);
4786 output_addr_const (file, addr);
4787 putc (')', file);
4788 }
4789 else
4790 output_addr_const (file, addr);
4791 }
4792 }
4793 else
4794 {
4795 int labelno;
4796
4797 /* If ADDR is a (d8,pc,Xn) address, this is the number of the
4798 label being accessed, otherwise it is -1. */
4799 labelno = (address.offset
4800 && !address.base
4801 && GET_CODE (address.offset) == LABEL_REF
4802 ? CODE_LABEL_NUMBER (XEXP (address.offset, 0))
4803 : -1);
4804 if (MOTOROLA)
4805 {
4806 /* Print the "offset(base" component. */
4807 if (labelno >= 0)
4808 asm_fprintf (file, "%LL%d(%Rpc,", labelno);
4809 else
4810 {
4811 if (address.offset)
4812 output_addr_const (file, address.offset);
4813
4814 putc ('(', file);
4815 if (address.base)
4816 fputs (M68K_REGNAME (REGNO (address.base)), file);
4817 }
4818 /* Print the ",index" component, if any. */
4819 if (address.index)
4820 {
4821 if (address.base)
4822 putc (',', file);
4823 fprintf (file, "%s.%c",
4824 M68K_REGNAME (REGNO (address.index)),
4825 GET_MODE (address.index) == HImode ? 'w' : 'l');
4826 if (address.scale != 1)
4827 fprintf (file, "*%d", address.scale);
4828 }
4829 putc (')', file);
4830 }
4831 else /* !MOTOROLA */
4832 {
4833 if (!address.offset && !address.index)
4834 fprintf (file, "%s@", M68K_REGNAME (REGNO (address.base)));
4835 else
4836 {
4837 /* Print the "base@(offset" component. */
4838 if (labelno >= 0)
4839 asm_fprintf (file, "%Rpc@(%LL%d", labelno);
4840 else
4841 {
4842 if (address.base)
4843 fputs (M68K_REGNAME (REGNO (address.base)), file);
4844 fprintf (file, "@(");
4845 if (address.offset)
4846 output_addr_const (file, address.offset);
4847 }
4848 /* Print the ",index" component, if any. */
4849 if (address.index)
4850 {
4851 fprintf (file, ",%s:%c",
4852 M68K_REGNAME (REGNO (address.index)),
4853 GET_MODE (address.index) == HImode ? 'w' : 'l');
4854 if (address.scale != 1)
4855 fprintf (file, ":%d", address.scale);
4856 }
4857 putc (')', file);
4858 }
4859 }
4860 }
4861 }
4862 \f
4863 /* Check for cases where a clr insns can be omitted from code using
4864 strict_low_part sets. For example, the second clrl here is not needed:
4865 clrl d0; movw a0@+,d0; use d0; clrl d0; movw a0@+; use d0; ...
4866
4867 MODE is the mode of this STRICT_LOW_PART set. FIRST_INSN is the clear
4868 insn we are checking for redundancy. TARGET is the register set by the
4869 clear insn. */
4870
4871 bool
4872 strict_low_part_peephole_ok (enum machine_mode mode, rtx first_insn,
4873 rtx target)
4874 {
4875 rtx p = first_insn;
4876
4877 while ((p = PREV_INSN (p)))
4878 {
4879 if (NOTE_INSN_BASIC_BLOCK_P (p))
4880 return false;
4881
4882 if (NOTE_P (p))
4883 continue;
4884
4885 /* If it isn't an insn, then give up. */
4886 if (!INSN_P (p))
4887 return false;
4888
4889 if (reg_set_p (target, p))
4890 {
4891 rtx set = single_set (p);
4892 rtx dest;
4893
4894 /* If it isn't an easy to recognize insn, then give up. */
4895 if (! set)
4896 return false;
4897
4898 dest = SET_DEST (set);
4899
4900 /* If this sets the entire target register to zero, then our
4901 first_insn is redundant. */
4902 if (rtx_equal_p (dest, target)
4903 && SET_SRC (set) == const0_rtx)
4904 return true;
4905 else if (GET_CODE (dest) == STRICT_LOW_PART
4906 && GET_CODE (XEXP (dest, 0)) == REG
4907 && REGNO (XEXP (dest, 0)) == REGNO (target)
4908 && (GET_MODE_SIZE (GET_MODE (XEXP (dest, 0)))
4909 <= GET_MODE_SIZE (mode)))
4910 /* This is a strict low part set which modifies less than
4911 we are using, so it is safe. */
4912 ;
4913 else
4914 return false;
4915 }
4916 }
4917
4918 return false;
4919 }
4920
4921 /* Operand predicates for implementing asymmetric pc-relative addressing
4922 on m68k. The m68k supports pc-relative addressing (mode 7, register 2)
4923 when used as a source operand, but not as a destination operand.
4924
4925 We model this by restricting the meaning of the basic predicates
4926 (general_operand, memory_operand, etc) to forbid the use of this
4927 addressing mode, and then define the following predicates that permit
4928 this addressing mode. These predicates can then be used for the
4929 source operands of the appropriate instructions.
4930
4931 n.b. While it is theoretically possible to change all machine patterns
4932 to use this addressing more where permitted by the architecture,
4933 it has only been implemented for "common" cases: SImode, HImode, and
4934 QImode operands, and only for the principle operations that would
4935 require this addressing mode: data movement and simple integer operations.
4936
4937 In parallel with these new predicates, two new constraint letters
4938 were defined: 'S' and 'T'. 'S' is the -mpcrel analog of 'm'.
4939 'T' replaces 's' in the non-pcrel case. It is a no-op in the pcrel case.
4940 In the pcrel case 's' is only valid in combination with 'a' registers.
4941 See addsi3, subsi3, cmpsi, and movsi patterns for a better understanding
4942 of how these constraints are used.
4943
4944 The use of these predicates is strictly optional, though patterns that
4945 don't will cause an extra reload register to be allocated where one
4946 was not necessary:
4947
4948 lea (abc:w,%pc),%a0 ; need to reload address
4949 moveq &1,%d1 ; since write to pc-relative space
4950 movel %d1,%a0@ ; is not allowed
4951 ...
4952 lea (abc:w,%pc),%a1 ; no need to reload address here
4953 movel %a1@,%d0 ; since "movel (abc:w,%pc),%d0" is ok
4954
4955 For more info, consult tiemann@cygnus.com.
4956
4957
4958 All of the ugliness with predicates and constraints is due to the
4959 simple fact that the m68k does not allow a pc-relative addressing
4960 mode as a destination. gcc does not distinguish between source and
4961 destination addresses. Hence, if we claim that pc-relative address
4962 modes are valid, e.g. TARGET_LEGITIMATE_ADDRESS_P accepts them, then we
4963 end up with invalid code. To get around this problem, we left
4964 pc-relative modes as invalid addresses, and then added special
4965 predicates and constraints to accept them.
4966
4967 A cleaner way to handle this is to modify gcc to distinguish
4968 between source and destination addresses. We can then say that
4969 pc-relative is a valid source address but not a valid destination
4970 address, and hopefully avoid a lot of the predicate and constraint
4971 hackery. Unfortunately, this would be a pretty big change. It would
4972 be a useful change for a number of ports, but there aren't any current
4973 plans to undertake this.
4974
4975 ***************************************************************************/
4976
4977
4978 const char *
4979 output_andsi3 (rtx *operands)
4980 {
4981 int logval;
4982 if (GET_CODE (operands[2]) == CONST_INT
4983 && (INTVAL (operands[2]) | 0xffff) == -1
4984 && (DATA_REG_P (operands[0])
4985 || offsettable_memref_p (operands[0]))
4986 && !TARGET_COLDFIRE)
4987 {
4988 if (GET_CODE (operands[0]) != REG)
4989 operands[0] = adjust_address (operands[0], HImode, 2);
4990 operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffff);
4991 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4992 CC_STATUS_INIT;
4993 if (operands[2] == const0_rtx)
4994 return "clr%.w %0";
4995 return "and%.w %2,%0";
4996 }
4997 if (GET_CODE (operands[2]) == CONST_INT
4998 && (logval = exact_log2 (~ INTVAL (operands[2]) & 0xffffffff)) >= 0
4999 && (DATA_REG_P (operands[0])
5000 || offsettable_memref_p (operands[0])))
5001 {
5002 if (DATA_REG_P (operands[0]))
5003 operands[1] = GEN_INT (logval);
5004 else
5005 {
5006 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
5007 operands[1] = GEN_INT (logval % 8);
5008 }
5009 /* This does not set condition codes in a standard way. */
5010 CC_STATUS_INIT;
5011 return "bclr %1,%0";
5012 }
5013 return "and%.l %2,%0";
5014 }
5015
5016 const char *
5017 output_iorsi3 (rtx *operands)
5018 {
5019 register int logval;
5020 if (GET_CODE (operands[2]) == CONST_INT
5021 && INTVAL (operands[2]) >> 16 == 0
5022 && (DATA_REG_P (operands[0])
5023 || offsettable_memref_p (operands[0]))
5024 && !TARGET_COLDFIRE)
5025 {
5026 if (GET_CODE (operands[0]) != REG)
5027 operands[0] = adjust_address (operands[0], HImode, 2);
5028 /* Do not delete a following tstl %0 insn; that would be incorrect. */
5029 CC_STATUS_INIT;
5030 if (INTVAL (operands[2]) == 0xffff)
5031 return "mov%.w %2,%0";
5032 return "or%.w %2,%0";
5033 }
5034 if (GET_CODE (operands[2]) == CONST_INT
5035 && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
5036 && (DATA_REG_P (operands[0])
5037 || offsettable_memref_p (operands[0])))
5038 {
5039 if (DATA_REG_P (operands[0]))
5040 operands[1] = GEN_INT (logval);
5041 else
5042 {
5043 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
5044 operands[1] = GEN_INT (logval % 8);
5045 }
5046 CC_STATUS_INIT;
5047 return "bset %1,%0";
5048 }
5049 return "or%.l %2,%0";
5050 }
5051
5052 const char *
5053 output_xorsi3 (rtx *operands)
5054 {
5055 register int logval;
5056 if (GET_CODE (operands[2]) == CONST_INT
5057 && INTVAL (operands[2]) >> 16 == 0
5058 && (offsettable_memref_p (operands[0]) || DATA_REG_P (operands[0]))
5059 && !TARGET_COLDFIRE)
5060 {
5061 if (! DATA_REG_P (operands[0]))
5062 operands[0] = adjust_address (operands[0], HImode, 2);
5063 /* Do not delete a following tstl %0 insn; that would be incorrect. */
5064 CC_STATUS_INIT;
5065 if (INTVAL (operands[2]) == 0xffff)
5066 return "not%.w %0";
5067 return "eor%.w %2,%0";
5068 }
5069 if (GET_CODE (operands[2]) == CONST_INT
5070 && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
5071 && (DATA_REG_P (operands[0])
5072 || offsettable_memref_p (operands[0])))
5073 {
5074 if (DATA_REG_P (operands[0]))
5075 operands[1] = GEN_INT (logval);
5076 else
5077 {
5078 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
5079 operands[1] = GEN_INT (logval % 8);
5080 }
5081 CC_STATUS_INIT;
5082 return "bchg %1,%0";
5083 }
5084 return "eor%.l %2,%0";
5085 }
5086
5087 /* Return the instruction that should be used for a call to address X,
5088 which is known to be in operand 0. */
5089
5090 const char *
5091 output_call (rtx x)
5092 {
5093 if (symbolic_operand (x, VOIDmode))
5094 return m68k_symbolic_call;
5095 else
5096 return "jsr %a0";
5097 }
5098
5099 /* Likewise sibling calls. */
5100
5101 const char *
5102 output_sibcall (rtx x)
5103 {
5104 if (symbolic_operand (x, VOIDmode))
5105 return m68k_symbolic_jump;
5106 else
5107 return "jmp %a0";
5108 }
5109
5110 static void
5111 m68k_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
5112 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
5113 tree function)
5114 {
5115 rtx this_slot, offset, addr, mem, insn, tmp;
5116
5117 /* Avoid clobbering the struct value reg by using the
5118 static chain reg as a temporary. */
5119 tmp = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
5120
5121 /* Pretend to be a post-reload pass while generating rtl. */
5122 reload_completed = 1;
5123
5124 /* The "this" pointer is stored at 4(%sp). */
5125 this_slot = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx, 4));
5126
5127 /* Add DELTA to THIS. */
5128 if (delta != 0)
5129 {
5130 /* Make the offset a legitimate operand for memory addition. */
5131 offset = GEN_INT (delta);
5132 if ((delta < -8 || delta > 8)
5133 && (TARGET_COLDFIRE || USE_MOVQ (delta)))
5134 {
5135 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), offset);
5136 offset = gen_rtx_REG (Pmode, D0_REG);
5137 }
5138 emit_insn (gen_add3_insn (copy_rtx (this_slot),
5139 copy_rtx (this_slot), offset));
5140 }
5141
5142 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
5143 if (vcall_offset != 0)
5144 {
5145 /* Set the static chain register to *THIS. */
5146 emit_move_insn (tmp, this_slot);
5147 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
5148
5149 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
5150 addr = plus_constant (tmp, vcall_offset);
5151 if (!m68k_legitimate_address_p (Pmode, addr, true))
5152 {
5153 emit_insn (gen_rtx_SET (VOIDmode, tmp, addr));
5154 addr = tmp;
5155 }
5156
5157 /* Load the offset into %d0 and add it to THIS. */
5158 emit_move_insn (gen_rtx_REG (Pmode, D0_REG),
5159 gen_rtx_MEM (Pmode, addr));
5160 emit_insn (gen_add3_insn (copy_rtx (this_slot),
5161 copy_rtx (this_slot),
5162 gen_rtx_REG (Pmode, D0_REG)));
5163 }
5164
5165 /* Jump to the target function. Use a sibcall if direct jumps are
5166 allowed, otherwise load the address into a register first. */
5167 mem = DECL_RTL (function);
5168 if (!sibcall_operand (XEXP (mem, 0), VOIDmode))
5169 {
5170 gcc_assert (flag_pic);
5171
5172 if (!TARGET_SEP_DATA)
5173 {
5174 /* Use the static chain register as a temporary (call-clobbered)
5175 GOT pointer for this function. We can use the static chain
5176 register because it isn't live on entry to the thunk. */
5177 SET_REGNO (pic_offset_table_rtx, STATIC_CHAIN_REGNUM);
5178 emit_insn (gen_load_got (pic_offset_table_rtx));
5179 }
5180 legitimize_pic_address (XEXP (mem, 0), Pmode, tmp);
5181 mem = replace_equiv_address (mem, tmp);
5182 }
5183 insn = emit_call_insn (gen_sibcall (mem, const0_rtx));
5184 SIBLING_CALL_P (insn) = 1;
5185
5186 /* Run just enough of rest_of_compilation. */
5187 insn = get_insns ();
5188 split_all_insns_noflow ();
5189 final_start_function (insn, file, 1);
5190 final (insn, file, 1);
5191 final_end_function ();
5192
5193 /* Clean up the vars set above. */
5194 reload_completed = 0;
5195
5196 /* Restore the original PIC register. */
5197 if (flag_pic)
5198 SET_REGNO (pic_offset_table_rtx, PIC_REG);
5199 }
5200
5201 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
5202
5203 static rtx
5204 m68k_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
5205 int incoming ATTRIBUTE_UNUSED)
5206 {
5207 return gen_rtx_REG (Pmode, M68K_STRUCT_VALUE_REGNUM);
5208 }
5209
5210 /* Return nonzero if register old_reg can be renamed to register new_reg. */
5211 int
5212 m68k_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
5213 unsigned int new_reg)
5214 {
5215
5216 /* Interrupt functions can only use registers that have already been
5217 saved by the prologue, even if they would normally be
5218 call-clobbered. */
5219
5220 if ((m68k_get_function_kind (current_function_decl)
5221 == m68k_fk_interrupt_handler)
5222 && !df_regs_ever_live_p (new_reg))
5223 return 0;
5224
5225 return 1;
5226 }
5227
5228 /* Value is true if hard register REGNO can hold a value of machine-mode
5229 MODE. On the 68000, we let the cpu registers can hold any mode, but
5230 restrict the 68881 registers to floating-point modes. */
5231
5232 bool
5233 m68k_regno_mode_ok (int regno, enum machine_mode mode)
5234 {
5235 if (DATA_REGNO_P (regno))
5236 {
5237 /* Data Registers, can hold aggregate if fits in. */
5238 if (regno + GET_MODE_SIZE (mode) / 4 <= 8)
5239 return true;
5240 }
5241 else if (ADDRESS_REGNO_P (regno))
5242 {
5243 if (regno + GET_MODE_SIZE (mode) / 4 <= 16)
5244 return true;
5245 }
5246 else if (FP_REGNO_P (regno))
5247 {
5248 /* FPU registers, hold float or complex float of long double or
5249 smaller. */
5250 if ((GET_MODE_CLASS (mode) == MODE_FLOAT
5251 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5252 && GET_MODE_UNIT_SIZE (mode) <= TARGET_FP_REG_SIZE)
5253 return true;
5254 }
5255 return false;
5256 }
5257
5258 /* Implement SECONDARY_RELOAD_CLASS. */
5259
5260 enum reg_class
5261 m68k_secondary_reload_class (enum reg_class rclass,
5262 enum machine_mode mode, rtx x)
5263 {
5264 int regno;
5265
5266 regno = true_regnum (x);
5267
5268 /* If one operand of a movqi is an address register, the other
5269 operand must be a general register or constant. Other types
5270 of operand must be reloaded through a data register. */
5271 if (GET_MODE_SIZE (mode) == 1
5272 && reg_classes_intersect_p (rclass, ADDR_REGS)
5273 && !(INT_REGNO_P (regno) || CONSTANT_P (x)))
5274 return DATA_REGS;
5275
5276 /* PC-relative addresses must be loaded into an address register first. */
5277 if (TARGET_PCREL
5278 && !reg_class_subset_p (rclass, ADDR_REGS)
5279 && symbolic_operand (x, VOIDmode))
5280 return ADDR_REGS;
5281
5282 return NO_REGS;
5283 }
5284
5285 /* Implement PREFERRED_RELOAD_CLASS. */
5286
5287 enum reg_class
5288 m68k_preferred_reload_class (rtx x, enum reg_class rclass)
5289 {
5290 enum reg_class secondary_class;
5291
5292 /* If RCLASS might need a secondary reload, try restricting it to
5293 a class that doesn't. */
5294 secondary_class = m68k_secondary_reload_class (rclass, GET_MODE (x), x);
5295 if (secondary_class != NO_REGS
5296 && reg_class_subset_p (secondary_class, rclass))
5297 return secondary_class;
5298
5299 /* Prefer to use moveq for in-range constants. */
5300 if (GET_CODE (x) == CONST_INT
5301 && reg_class_subset_p (DATA_REGS, rclass)
5302 && IN_RANGE (INTVAL (x), -0x80, 0x7f))
5303 return DATA_REGS;
5304
5305 /* ??? Do we really need this now? */
5306 if (GET_CODE (x) == CONST_DOUBLE
5307 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
5308 {
5309 if (TARGET_HARD_FLOAT && reg_class_subset_p (FP_REGS, rclass))
5310 return FP_REGS;
5311
5312 return NO_REGS;
5313 }
5314
5315 return rclass;
5316 }
5317
5318 /* Return floating point values in a 68881 register. This makes 68881 code
5319 a little bit faster. It also makes -msoft-float code incompatible with
5320 hard-float code, so people have to be careful not to mix the two.
5321 For ColdFire it was decided the ABI incompatibility is undesirable.
5322 If there is need for a hard-float ABI it is probably worth doing it
5323 properly and also passing function arguments in FP registers. */
5324 rtx
5325 m68k_libcall_value (enum machine_mode mode)
5326 {
5327 switch (mode) {
5328 case SFmode:
5329 case DFmode:
5330 case XFmode:
5331 if (TARGET_68881)
5332 return gen_rtx_REG (mode, FP0_REG);
5333 break;
5334 default:
5335 break;
5336 }
5337
5338 return gen_rtx_REG (mode, m68k_libcall_value_in_a0_p ? A0_REG : D0_REG);
5339 }
5340
5341 /* Location in which function value is returned.
5342 NOTE: Due to differences in ABIs, don't call this function directly,
5343 use FUNCTION_VALUE instead. */
5344 rtx
5345 m68k_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
5346 {
5347 enum machine_mode mode;
5348
5349 mode = TYPE_MODE (valtype);
5350 switch (mode) {
5351 case SFmode:
5352 case DFmode:
5353 case XFmode:
5354 if (TARGET_68881)
5355 return gen_rtx_REG (mode, FP0_REG);
5356 break;
5357 default:
5358 break;
5359 }
5360
5361 /* If the function returns a pointer, push that into %a0. */
5362 if (func && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (func))))
5363 /* For compatibility with the large body of existing code which
5364 does not always properly declare external functions returning
5365 pointer types, the m68k/SVR4 convention is to copy the value
5366 returned for pointer functions from a0 to d0 in the function
5367 epilogue, so that callers that have neglected to properly
5368 declare the callee can still find the correct return value in
5369 d0. */
5370 return gen_rtx_PARALLEL
5371 (mode,
5372 gen_rtvec (2,
5373 gen_rtx_EXPR_LIST (VOIDmode,
5374 gen_rtx_REG (mode, A0_REG),
5375 const0_rtx),
5376 gen_rtx_EXPR_LIST (VOIDmode,
5377 gen_rtx_REG (mode, D0_REG),
5378 const0_rtx)));
5379 else if (POINTER_TYPE_P (valtype))
5380 return gen_rtx_REG (mode, A0_REG);
5381 else
5382 return gen_rtx_REG (mode, D0_REG);
5383 }
5384
5385 /* Worker function for TARGET_RETURN_IN_MEMORY. */
5386 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
5387 static bool
5388 m68k_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
5389 {
5390 enum machine_mode mode = TYPE_MODE (type);
5391
5392 if (mode == BLKmode)
5393 return true;
5394
5395 /* If TYPE's known alignment is less than the alignment of MODE that
5396 would contain the structure, then return in memory. We need to
5397 do so to maintain the compatibility between code compiled with
5398 -mstrict-align and that compiled with -mno-strict-align. */
5399 if (AGGREGATE_TYPE_P (type)
5400 && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (mode))
5401 return true;
5402
5403 return false;
5404 }
5405 #endif
5406
5407 /* CPU to schedule the program for. */
5408 enum attr_cpu m68k_sched_cpu;
5409
5410 /* MAC to schedule the program for. */
5411 enum attr_mac m68k_sched_mac;
5412
5413 /* Operand type. */
5414 enum attr_op_type
5415 {
5416 /* No operand. */
5417 OP_TYPE_NONE,
5418
5419 /* Integer register. */
5420 OP_TYPE_RN,
5421
5422 /* FP register. */
5423 OP_TYPE_FPN,
5424
5425 /* Implicit mem reference (e.g. stack). */
5426 OP_TYPE_MEM1,
5427
5428 /* Memory without offset or indexing. EA modes 2, 3 and 4. */
5429 OP_TYPE_MEM234,
5430
5431 /* Memory with offset but without indexing. EA mode 5. */
5432 OP_TYPE_MEM5,
5433
5434 /* Memory with indexing. EA mode 6. */
5435 OP_TYPE_MEM6,
5436
5437 /* Memory referenced by absolute address. EA mode 7. */
5438 OP_TYPE_MEM7,
5439
5440 /* Immediate operand that doesn't require extension word. */
5441 OP_TYPE_IMM_Q,
5442
5443 /* Immediate 16 bit operand. */
5444 OP_TYPE_IMM_W,
5445
5446 /* Immediate 32 bit operand. */
5447 OP_TYPE_IMM_L
5448 };
5449
5450 /* Return type of memory ADDR_RTX refers to. */
5451 static enum attr_op_type
5452 sched_address_type (enum machine_mode mode, rtx addr_rtx)
5453 {
5454 struct m68k_address address;
5455
5456 if (symbolic_operand (addr_rtx, VOIDmode))
5457 return OP_TYPE_MEM7;
5458
5459 if (!m68k_decompose_address (mode, addr_rtx,
5460 reload_completed, &address))
5461 {
5462 gcc_assert (!reload_completed);
5463 /* Reload will likely fix the address to be in the register. */
5464 return OP_TYPE_MEM234;
5465 }
5466
5467 if (address.scale != 0)
5468 return OP_TYPE_MEM6;
5469
5470 if (address.base != NULL_RTX)
5471 {
5472 if (address.offset == NULL_RTX)
5473 return OP_TYPE_MEM234;
5474
5475 return OP_TYPE_MEM5;
5476 }
5477
5478 gcc_assert (address.offset != NULL_RTX);
5479
5480 return OP_TYPE_MEM7;
5481 }
5482
5483 /* Return X or Y (depending on OPX_P) operand of INSN. */
5484 static rtx
5485 sched_get_operand (rtx insn, bool opx_p)
5486 {
5487 int i;
5488
5489 if (recog_memoized (insn) < 0)
5490 gcc_unreachable ();
5491
5492 extract_constrain_insn_cached (insn);
5493
5494 if (opx_p)
5495 i = get_attr_opx (insn);
5496 else
5497 i = get_attr_opy (insn);
5498
5499 if (i >= recog_data.n_operands)
5500 return NULL;
5501
5502 return recog_data.operand[i];
5503 }
5504
5505 /* Return type of INSN's operand X (if OPX_P) or operand Y (if !OPX_P).
5506 If ADDRESS_P is true, return type of memory location operand refers to. */
5507 static enum attr_op_type
5508 sched_attr_op_type (rtx insn, bool opx_p, bool address_p)
5509 {
5510 rtx op;
5511
5512 op = sched_get_operand (insn, opx_p);
5513
5514 if (op == NULL)
5515 {
5516 gcc_assert (!reload_completed);
5517 return OP_TYPE_RN;
5518 }
5519
5520 if (address_p)
5521 return sched_address_type (QImode, op);
5522
5523 if (memory_operand (op, VOIDmode))
5524 return sched_address_type (GET_MODE (op), XEXP (op, 0));
5525
5526 if (register_operand (op, VOIDmode))
5527 {
5528 if ((!reload_completed && FLOAT_MODE_P (GET_MODE (op)))
5529 || (reload_completed && FP_REG_P (op)))
5530 return OP_TYPE_FPN;
5531
5532 return OP_TYPE_RN;
5533 }
5534
5535 if (GET_CODE (op) == CONST_INT)
5536 {
5537 int ival;
5538
5539 ival = INTVAL (op);
5540
5541 /* Check for quick constants. */
5542 switch (get_attr_type (insn))
5543 {
5544 case TYPE_ALUQ_L:
5545 if (IN_RANGE (ival, 1, 8) || IN_RANGE (ival, -8, -1))
5546 return OP_TYPE_IMM_Q;
5547
5548 gcc_assert (!reload_completed);
5549 break;
5550
5551 case TYPE_MOVEQ_L:
5552 if (USE_MOVQ (ival))
5553 return OP_TYPE_IMM_Q;
5554
5555 gcc_assert (!reload_completed);
5556 break;
5557
5558 case TYPE_MOV3Q_L:
5559 if (valid_mov3q_const (ival))
5560 return OP_TYPE_IMM_Q;
5561
5562 gcc_assert (!reload_completed);
5563 break;
5564
5565 default:
5566 break;
5567 }
5568
5569 if (IN_RANGE (ival, -0x8000, 0x7fff))
5570 return OP_TYPE_IMM_W;
5571
5572 return OP_TYPE_IMM_L;
5573 }
5574
5575 if (GET_CODE (op) == CONST_DOUBLE)
5576 {
5577 switch (GET_MODE (op))
5578 {
5579 case SFmode:
5580 return OP_TYPE_IMM_W;
5581
5582 case VOIDmode:
5583 case DFmode:
5584 return OP_TYPE_IMM_L;
5585
5586 default:
5587 gcc_unreachable ();
5588 }
5589 }
5590
5591 if (GET_CODE (op) == CONST
5592 || symbolic_operand (op, VOIDmode)
5593 || LABEL_P (op))
5594 {
5595 switch (GET_MODE (op))
5596 {
5597 case QImode:
5598 return OP_TYPE_IMM_Q;
5599
5600 case HImode:
5601 return OP_TYPE_IMM_W;
5602
5603 case SImode:
5604 return OP_TYPE_IMM_L;
5605
5606 default:
5607 if (symbolic_operand (m68k_unwrap_symbol (op, false), VOIDmode))
5608 /* Just a guess. */
5609 return OP_TYPE_IMM_W;
5610
5611 return OP_TYPE_IMM_L;
5612 }
5613 }
5614
5615 gcc_assert (!reload_completed);
5616
5617 if (FLOAT_MODE_P (GET_MODE (op)))
5618 return OP_TYPE_FPN;
5619
5620 return OP_TYPE_RN;
5621 }
5622
5623 /* Implement opx_type attribute.
5624 Return type of INSN's operand X.
5625 If ADDRESS_P is true, return type of memory location operand refers to. */
5626 enum attr_opx_type
5627 m68k_sched_attr_opx_type (rtx insn, int address_p)
5628 {
5629 switch (sched_attr_op_type (insn, true, address_p != 0))
5630 {
5631 case OP_TYPE_RN:
5632 return OPX_TYPE_RN;
5633
5634 case OP_TYPE_FPN:
5635 return OPX_TYPE_FPN;
5636
5637 case OP_TYPE_MEM1:
5638 return OPX_TYPE_MEM1;
5639
5640 case OP_TYPE_MEM234:
5641 return OPX_TYPE_MEM234;
5642
5643 case OP_TYPE_MEM5:
5644 return OPX_TYPE_MEM5;
5645
5646 case OP_TYPE_MEM6:
5647 return OPX_TYPE_MEM6;
5648
5649 case OP_TYPE_MEM7:
5650 return OPX_TYPE_MEM7;
5651
5652 case OP_TYPE_IMM_Q:
5653 return OPX_TYPE_IMM_Q;
5654
5655 case OP_TYPE_IMM_W:
5656 return OPX_TYPE_IMM_W;
5657
5658 case OP_TYPE_IMM_L:
5659 return OPX_TYPE_IMM_L;
5660
5661 default:
5662 gcc_unreachable ();
5663 }
5664 }
5665
5666 /* Implement opy_type attribute.
5667 Return type of INSN's operand Y.
5668 If ADDRESS_P is true, return type of memory location operand refers to. */
5669 enum attr_opy_type
5670 m68k_sched_attr_opy_type (rtx insn, int address_p)
5671 {
5672 switch (sched_attr_op_type (insn, false, address_p != 0))
5673 {
5674 case OP_TYPE_RN:
5675 return OPY_TYPE_RN;
5676
5677 case OP_TYPE_FPN:
5678 return OPY_TYPE_FPN;
5679
5680 case OP_TYPE_MEM1:
5681 return OPY_TYPE_MEM1;
5682
5683 case OP_TYPE_MEM234:
5684 return OPY_TYPE_MEM234;
5685
5686 case OP_TYPE_MEM5:
5687 return OPY_TYPE_MEM5;
5688
5689 case OP_TYPE_MEM6:
5690 return OPY_TYPE_MEM6;
5691
5692 case OP_TYPE_MEM7:
5693 return OPY_TYPE_MEM7;
5694
5695 case OP_TYPE_IMM_Q:
5696 return OPY_TYPE_IMM_Q;
5697
5698 case OP_TYPE_IMM_W:
5699 return OPY_TYPE_IMM_W;
5700
5701 case OP_TYPE_IMM_L:
5702 return OPY_TYPE_IMM_L;
5703
5704 default:
5705 gcc_unreachable ();
5706 }
5707 }
5708
5709 /* Return size of INSN as int. */
5710 static int
5711 sched_get_attr_size_int (rtx insn)
5712 {
5713 int size;
5714
5715 switch (get_attr_type (insn))
5716 {
5717 case TYPE_IGNORE:
5718 /* There should be no references to m68k_sched_attr_size for 'ignore'
5719 instructions. */
5720 gcc_unreachable ();
5721 return 0;
5722
5723 case TYPE_MUL_L:
5724 size = 2;
5725 break;
5726
5727 default:
5728 size = 1;
5729 break;
5730 }
5731
5732 switch (get_attr_opx_type (insn))
5733 {
5734 case OPX_TYPE_NONE:
5735 case OPX_TYPE_RN:
5736 case OPX_TYPE_FPN:
5737 case OPX_TYPE_MEM1:
5738 case OPX_TYPE_MEM234:
5739 case OPY_TYPE_IMM_Q:
5740 break;
5741
5742 case OPX_TYPE_MEM5:
5743 case OPX_TYPE_MEM6:
5744 /* Here we assume that most absolute references are short. */
5745 case OPX_TYPE_MEM7:
5746 case OPY_TYPE_IMM_W:
5747 ++size;
5748 break;
5749
5750 case OPY_TYPE_IMM_L:
5751 size += 2;
5752 break;
5753
5754 default:
5755 gcc_unreachable ();
5756 }
5757
5758 switch (get_attr_opy_type (insn))
5759 {
5760 case OPY_TYPE_NONE:
5761 case OPY_TYPE_RN:
5762 case OPY_TYPE_FPN:
5763 case OPY_TYPE_MEM1:
5764 case OPY_TYPE_MEM234:
5765 case OPY_TYPE_IMM_Q:
5766 break;
5767
5768 case OPY_TYPE_MEM5:
5769 case OPY_TYPE_MEM6:
5770 /* Here we assume that most absolute references are short. */
5771 case OPY_TYPE_MEM7:
5772 case OPY_TYPE_IMM_W:
5773 ++size;
5774 break;
5775
5776 case OPY_TYPE_IMM_L:
5777 size += 2;
5778 break;
5779
5780 default:
5781 gcc_unreachable ();
5782 }
5783
5784 if (size > 3)
5785 {
5786 gcc_assert (!reload_completed);
5787
5788 size = 3;
5789 }
5790
5791 return size;
5792 }
5793
5794 /* Return size of INSN as attribute enum value. */
5795 enum attr_size
5796 m68k_sched_attr_size (rtx insn)
5797 {
5798 switch (sched_get_attr_size_int (insn))
5799 {
5800 case 1:
5801 return SIZE_1;
5802
5803 case 2:
5804 return SIZE_2;
5805
5806 case 3:
5807 return SIZE_3;
5808
5809 default:
5810 gcc_unreachable ();
5811 }
5812 }
5813
5814 /* Return operand X or Y (depending on OPX_P) of INSN,
5815 if it is a MEM, or NULL overwise. */
5816 static enum attr_op_type
5817 sched_get_opxy_mem_type (rtx insn, bool opx_p)
5818 {
5819 if (opx_p)
5820 {
5821 switch (get_attr_opx_type (insn))
5822 {
5823 case OPX_TYPE_NONE:
5824 case OPX_TYPE_RN:
5825 case OPX_TYPE_FPN:
5826 case OPX_TYPE_IMM_Q:
5827 case OPX_TYPE_IMM_W:
5828 case OPX_TYPE_IMM_L:
5829 return OP_TYPE_RN;
5830
5831 case OPX_TYPE_MEM1:
5832 case OPX_TYPE_MEM234:
5833 case OPX_TYPE_MEM5:
5834 case OPX_TYPE_MEM7:
5835 return OP_TYPE_MEM1;
5836
5837 case OPX_TYPE_MEM6:
5838 return OP_TYPE_MEM6;
5839
5840 default:
5841 gcc_unreachable ();
5842 }
5843 }
5844 else
5845 {
5846 switch (get_attr_opy_type (insn))
5847 {
5848 case OPY_TYPE_NONE:
5849 case OPY_TYPE_RN:
5850 case OPY_TYPE_FPN:
5851 case OPY_TYPE_IMM_Q:
5852 case OPY_TYPE_IMM_W:
5853 case OPY_TYPE_IMM_L:
5854 return OP_TYPE_RN;
5855
5856 case OPY_TYPE_MEM1:
5857 case OPY_TYPE_MEM234:
5858 case OPY_TYPE_MEM5:
5859 case OPY_TYPE_MEM7:
5860 return OP_TYPE_MEM1;
5861
5862 case OPY_TYPE_MEM6:
5863 return OP_TYPE_MEM6;
5864
5865 default:
5866 gcc_unreachable ();
5867 }
5868 }
5869 }
5870
5871 /* Implement op_mem attribute. */
5872 enum attr_op_mem
5873 m68k_sched_attr_op_mem (rtx insn)
5874 {
5875 enum attr_op_type opx;
5876 enum attr_op_type opy;
5877
5878 opx = sched_get_opxy_mem_type (insn, true);
5879 opy = sched_get_opxy_mem_type (insn, false);
5880
5881 if (opy == OP_TYPE_RN && opx == OP_TYPE_RN)
5882 return OP_MEM_00;
5883
5884 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM1)
5885 {
5886 switch (get_attr_opx_access (insn))
5887 {
5888 case OPX_ACCESS_R:
5889 return OP_MEM_10;
5890
5891 case OPX_ACCESS_W:
5892 return OP_MEM_01;
5893
5894 case OPX_ACCESS_RW:
5895 return OP_MEM_11;
5896
5897 default:
5898 gcc_unreachable ();
5899 }
5900 }
5901
5902 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM6)
5903 {
5904 switch (get_attr_opx_access (insn))
5905 {
5906 case OPX_ACCESS_R:
5907 return OP_MEM_I0;
5908
5909 case OPX_ACCESS_W:
5910 return OP_MEM_0I;
5911
5912 case OPX_ACCESS_RW:
5913 return OP_MEM_I1;
5914
5915 default:
5916 gcc_unreachable ();
5917 }
5918 }
5919
5920 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_RN)
5921 return OP_MEM_10;
5922
5923 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM1)
5924 {
5925 switch (get_attr_opx_access (insn))
5926 {
5927 case OPX_ACCESS_W:
5928 return OP_MEM_11;
5929
5930 default:
5931 gcc_assert (!reload_completed);
5932 return OP_MEM_11;
5933 }
5934 }
5935
5936 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM6)
5937 {
5938 switch (get_attr_opx_access (insn))
5939 {
5940 case OPX_ACCESS_W:
5941 return OP_MEM_1I;
5942
5943 default:
5944 gcc_assert (!reload_completed);
5945 return OP_MEM_1I;
5946 }
5947 }
5948
5949 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_RN)
5950 return OP_MEM_I0;
5951
5952 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM1)
5953 {
5954 switch (get_attr_opx_access (insn))
5955 {
5956 case OPX_ACCESS_W:
5957 return OP_MEM_I1;
5958
5959 default:
5960 gcc_assert (!reload_completed);
5961 return OP_MEM_I1;
5962 }
5963 }
5964
5965 gcc_assert (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM6);
5966 gcc_assert (!reload_completed);
5967 return OP_MEM_I1;
5968 }
5969
5970 /* Jump instructions types. Indexed by INSN_UID.
5971 The same rtl insn can be expanded into different asm instructions
5972 depending on the cc0_status. To properly determine type of jump
5973 instructions we scan instruction stream and map jumps types to this
5974 array. */
5975 static enum attr_type *sched_branch_type;
5976
5977 /* Return the type of the jump insn. */
5978 enum attr_type
5979 m68k_sched_branch_type (rtx insn)
5980 {
5981 enum attr_type type;
5982
5983 type = sched_branch_type[INSN_UID (insn)];
5984
5985 gcc_assert (type != 0);
5986
5987 return type;
5988 }
5989
5990 /* Data for ColdFire V4 index bypass.
5991 Producer modifies register that is used as index in consumer with
5992 specified scale. */
5993 static struct
5994 {
5995 /* Producer instruction. */
5996 rtx pro;
5997
5998 /* Consumer instruction. */
5999 rtx con;
6000
6001 /* Scale of indexed memory access within consumer.
6002 Or zero if bypass should not be effective at the moment. */
6003 int scale;
6004 } sched_cfv4_bypass_data;
6005
6006 /* An empty state that is used in m68k_sched_adjust_cost. */
6007 static state_t sched_adjust_cost_state;
6008
6009 /* Implement adjust_cost scheduler hook.
6010 Return adjusted COST of dependency LINK between DEF_INSN and INSN. */
6011 static int
6012 m68k_sched_adjust_cost (rtx insn, rtx link ATTRIBUTE_UNUSED, rtx def_insn,
6013 int cost)
6014 {
6015 int delay;
6016
6017 if (recog_memoized (def_insn) < 0
6018 || recog_memoized (insn) < 0)
6019 return cost;
6020
6021 if (sched_cfv4_bypass_data.scale == 1)
6022 /* Handle ColdFire V4 bypass for indexed address with 1x scale. */
6023 {
6024 /* haifa-sched.c: insn_cost () calls bypass_p () just before
6025 targetm.sched.adjust_cost (). Hence, we can be relatively sure
6026 that the data in sched_cfv4_bypass_data is up to date. */
6027 gcc_assert (sched_cfv4_bypass_data.pro == def_insn
6028 && sched_cfv4_bypass_data.con == insn);
6029
6030 if (cost < 3)
6031 cost = 3;
6032
6033 sched_cfv4_bypass_data.pro = NULL;
6034 sched_cfv4_bypass_data.con = NULL;
6035 sched_cfv4_bypass_data.scale = 0;
6036 }
6037 else
6038 gcc_assert (sched_cfv4_bypass_data.pro == NULL
6039 && sched_cfv4_bypass_data.con == NULL
6040 && sched_cfv4_bypass_data.scale == 0);
6041
6042 /* Don't try to issue INSN earlier than DFA permits.
6043 This is especially useful for instructions that write to memory,
6044 as their true dependence (default) latency is better to be set to 0
6045 to workaround alias analysis limitations.
6046 This is, in fact, a machine independent tweak, so, probably,
6047 it should be moved to haifa-sched.c: insn_cost (). */
6048 delay = min_insn_conflict_delay (sched_adjust_cost_state, def_insn, insn);
6049 if (delay > cost)
6050 cost = delay;
6051
6052 return cost;
6053 }
6054
6055 /* Return maximal number of insns that can be scheduled on a single cycle. */
6056 static int
6057 m68k_sched_issue_rate (void)
6058 {
6059 switch (m68k_sched_cpu)
6060 {
6061 case CPU_CFV1:
6062 case CPU_CFV2:
6063 case CPU_CFV3:
6064 return 1;
6065
6066 case CPU_CFV4:
6067 return 2;
6068
6069 default:
6070 gcc_unreachable ();
6071 return 0;
6072 }
6073 }
6074
6075 /* Maximal length of instruction for current CPU.
6076 E.g. it is 3 for any ColdFire core. */
6077 static int max_insn_size;
6078
6079 /* Data to model instruction buffer of CPU. */
6080 struct _sched_ib
6081 {
6082 /* True if instruction buffer model is modeled for current CPU. */
6083 bool enabled_p;
6084
6085 /* Size of the instruction buffer in words. */
6086 int size;
6087
6088 /* Number of filled words in the instruction buffer. */
6089 int filled;
6090
6091 /* Additional information about instruction buffer for CPUs that have
6092 a buffer of instruction records, rather then a plain buffer
6093 of instruction words. */
6094 struct _sched_ib_records
6095 {
6096 /* Size of buffer in records. */
6097 int n_insns;
6098
6099 /* Array to hold data on adjustements made to the size of the buffer. */
6100 int *adjust;
6101
6102 /* Index of the above array. */
6103 int adjust_index;
6104 } records;
6105
6106 /* An insn that reserves (marks empty) one word in the instruction buffer. */
6107 rtx insn;
6108 };
6109
6110 static struct _sched_ib sched_ib;
6111
6112 /* ID of memory unit. */
6113 static int sched_mem_unit_code;
6114
6115 /* Implementation of the targetm.sched.variable_issue () hook.
6116 It is called after INSN was issued. It returns the number of insns
6117 that can possibly get scheduled on the current cycle.
6118 It is used here to determine the effect of INSN on the instruction
6119 buffer. */
6120 static int
6121 m68k_sched_variable_issue (FILE *sched_dump ATTRIBUTE_UNUSED,
6122 int sched_verbose ATTRIBUTE_UNUSED,
6123 rtx insn, int can_issue_more)
6124 {
6125 int insn_size;
6126
6127 if (recog_memoized (insn) >= 0 && get_attr_type (insn) != TYPE_IGNORE)
6128 {
6129 switch (m68k_sched_cpu)
6130 {
6131 case CPU_CFV1:
6132 case CPU_CFV2:
6133 insn_size = sched_get_attr_size_int (insn);
6134 break;
6135
6136 case CPU_CFV3:
6137 insn_size = sched_get_attr_size_int (insn);
6138
6139 /* ColdFire V3 and V4 cores have instruction buffers that can
6140 accumulate up to 8 instructions regardless of instructions'
6141 sizes. So we should take care not to "prefetch" 24 one-word
6142 or 12 two-words instructions.
6143 To model this behavior we temporarily decrease size of the
6144 buffer by (max_insn_size - insn_size) for next 7 instructions. */
6145 {
6146 int adjust;
6147
6148 adjust = max_insn_size - insn_size;
6149 sched_ib.size -= adjust;
6150
6151 if (sched_ib.filled > sched_ib.size)
6152 sched_ib.filled = sched_ib.size;
6153
6154 sched_ib.records.adjust[sched_ib.records.adjust_index] = adjust;
6155 }
6156
6157 ++sched_ib.records.adjust_index;
6158 if (sched_ib.records.adjust_index == sched_ib.records.n_insns)
6159 sched_ib.records.adjust_index = 0;
6160
6161 /* Undo adjustement we did 7 instructions ago. */
6162 sched_ib.size
6163 += sched_ib.records.adjust[sched_ib.records.adjust_index];
6164
6165 break;
6166
6167 case CPU_CFV4:
6168 gcc_assert (!sched_ib.enabled_p);
6169 insn_size = 0;
6170 break;
6171
6172 default:
6173 gcc_unreachable ();
6174 }
6175
6176 gcc_assert (insn_size <= sched_ib.filled);
6177 --can_issue_more;
6178 }
6179 else if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6180 || asm_noperands (PATTERN (insn)) >= 0)
6181 insn_size = sched_ib.filled;
6182 else
6183 insn_size = 0;
6184
6185 sched_ib.filled -= insn_size;
6186
6187 return can_issue_more;
6188 }
6189
6190 /* Return how many instructions should scheduler lookahead to choose the
6191 best one. */
6192 static int
6193 m68k_sched_first_cycle_multipass_dfa_lookahead (void)
6194 {
6195 return m68k_sched_issue_rate () - 1;
6196 }
6197
6198 /* Implementation of targetm.sched.init_global () hook.
6199 It is invoked once per scheduling pass and is used here
6200 to initialize scheduler constants. */
6201 static void
6202 m68k_sched_md_init_global (FILE *sched_dump ATTRIBUTE_UNUSED,
6203 int sched_verbose ATTRIBUTE_UNUSED,
6204 int n_insns ATTRIBUTE_UNUSED)
6205 {
6206 /* Init branch types. */
6207 {
6208 rtx insn;
6209
6210 sched_branch_type = XCNEWVEC (enum attr_type, get_max_uid () + 1);
6211
6212 for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
6213 {
6214 if (JUMP_P (insn))
6215 /* !!! FIXME: Implement real scan here. */
6216 sched_branch_type[INSN_UID (insn)] = TYPE_BCC;
6217 }
6218 }
6219
6220 #ifdef ENABLE_CHECKING
6221 /* Check that all instructions have DFA reservations and
6222 that all instructions can be issued from a clean state. */
6223 {
6224 rtx insn;
6225 state_t state;
6226
6227 state = alloca (state_size ());
6228
6229 for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
6230 {
6231 if (INSN_P (insn) && recog_memoized (insn) >= 0)
6232 {
6233 gcc_assert (insn_has_dfa_reservation_p (insn));
6234
6235 state_reset (state);
6236 if (state_transition (state, insn) >= 0)
6237 gcc_unreachable ();
6238 }
6239 }
6240 }
6241 #endif
6242
6243 /* Setup target cpu. */
6244
6245 /* ColdFire V4 has a set of features to keep its instruction buffer full
6246 (e.g., a separate memory bus for instructions) and, hence, we do not model
6247 buffer for this CPU. */
6248 sched_ib.enabled_p = (m68k_sched_cpu != CPU_CFV4);
6249
6250 switch (m68k_sched_cpu)
6251 {
6252 case CPU_CFV4:
6253 sched_ib.filled = 0;
6254
6255 /* FALLTHRU */
6256
6257 case CPU_CFV1:
6258 case CPU_CFV2:
6259 max_insn_size = 3;
6260 sched_ib.records.n_insns = 0;
6261 sched_ib.records.adjust = NULL;
6262 break;
6263
6264 case CPU_CFV3:
6265 max_insn_size = 3;
6266 sched_ib.records.n_insns = 8;
6267 sched_ib.records.adjust = XNEWVEC (int, sched_ib.records.n_insns);
6268 break;
6269
6270 default:
6271 gcc_unreachable ();
6272 }
6273
6274 sched_mem_unit_code = get_cpu_unit_code ("cf_mem1");
6275
6276 sched_adjust_cost_state = xmalloc (state_size ());
6277 state_reset (sched_adjust_cost_state);
6278
6279 start_sequence ();
6280 emit_insn (gen_ib ());
6281 sched_ib.insn = get_insns ();
6282 end_sequence ();
6283 }
6284
6285 /* Scheduling pass is now finished. Free/reset static variables. */
6286 static void
6287 m68k_sched_md_finish_global (FILE *dump ATTRIBUTE_UNUSED,
6288 int verbose ATTRIBUTE_UNUSED)
6289 {
6290 sched_ib.insn = NULL;
6291
6292 free (sched_adjust_cost_state);
6293 sched_adjust_cost_state = NULL;
6294
6295 sched_mem_unit_code = 0;
6296
6297 free (sched_ib.records.adjust);
6298 sched_ib.records.adjust = NULL;
6299 sched_ib.records.n_insns = 0;
6300 max_insn_size = 0;
6301
6302 free (sched_branch_type);
6303 sched_branch_type = NULL;
6304 }
6305
6306 /* Implementation of targetm.sched.init () hook.
6307 It is invoked each time scheduler starts on the new block (basic block or
6308 extended basic block). */
6309 static void
6310 m68k_sched_md_init (FILE *sched_dump ATTRIBUTE_UNUSED,
6311 int sched_verbose ATTRIBUTE_UNUSED,
6312 int n_insns ATTRIBUTE_UNUSED)
6313 {
6314 switch (m68k_sched_cpu)
6315 {
6316 case CPU_CFV1:
6317 case CPU_CFV2:
6318 sched_ib.size = 6;
6319 break;
6320
6321 case CPU_CFV3:
6322 sched_ib.size = sched_ib.records.n_insns * max_insn_size;
6323
6324 memset (sched_ib.records.adjust, 0,
6325 sched_ib.records.n_insns * sizeof (*sched_ib.records.adjust));
6326 sched_ib.records.adjust_index = 0;
6327 break;
6328
6329 case CPU_CFV4:
6330 gcc_assert (!sched_ib.enabled_p);
6331 sched_ib.size = 0;
6332 break;
6333
6334 default:
6335 gcc_unreachable ();
6336 }
6337
6338 if (sched_ib.enabled_p)
6339 /* haifa-sched.c: schedule_block () calls advance_cycle () just before
6340 the first cycle. Workaround that. */
6341 sched_ib.filled = -2;
6342 }
6343
6344 /* Implementation of targetm.sched.dfa_pre_advance_cycle () hook.
6345 It is invoked just before current cycle finishes and is used here
6346 to track if instruction buffer got its two words this cycle. */
6347 static void
6348 m68k_sched_dfa_pre_advance_cycle (void)
6349 {
6350 if (!sched_ib.enabled_p)
6351 return;
6352
6353 if (!cpu_unit_reservation_p (curr_state, sched_mem_unit_code))
6354 {
6355 sched_ib.filled += 2;
6356
6357 if (sched_ib.filled > sched_ib.size)
6358 sched_ib.filled = sched_ib.size;
6359 }
6360 }
6361
6362 /* Implementation of targetm.sched.dfa_post_advance_cycle () hook.
6363 It is invoked just after new cycle begins and is used here
6364 to setup number of filled words in the instruction buffer so that
6365 instructions which won't have all their words prefetched would be
6366 stalled for a cycle. */
6367 static void
6368 m68k_sched_dfa_post_advance_cycle (void)
6369 {
6370 int i;
6371
6372 if (!sched_ib.enabled_p)
6373 return;
6374
6375 /* Setup number of prefetched instruction words in the instruction
6376 buffer. */
6377 i = max_insn_size - sched_ib.filled;
6378
6379 while (--i >= 0)
6380 {
6381 if (state_transition (curr_state, sched_ib.insn) >= 0)
6382 gcc_unreachable ();
6383 }
6384 }
6385
6386 /* Return X or Y (depending on OPX_P) operand of INSN,
6387 if it is an integer register, or NULL overwise. */
6388 static rtx
6389 sched_get_reg_operand (rtx insn, bool opx_p)
6390 {
6391 rtx op = NULL;
6392
6393 if (opx_p)
6394 {
6395 if (get_attr_opx_type (insn) == OPX_TYPE_RN)
6396 {
6397 op = sched_get_operand (insn, true);
6398 gcc_assert (op != NULL);
6399
6400 if (!reload_completed && !REG_P (op))
6401 return NULL;
6402 }
6403 }
6404 else
6405 {
6406 if (get_attr_opy_type (insn) == OPY_TYPE_RN)
6407 {
6408 op = sched_get_operand (insn, false);
6409 gcc_assert (op != NULL);
6410
6411 if (!reload_completed && !REG_P (op))
6412 return NULL;
6413 }
6414 }
6415
6416 return op;
6417 }
6418
6419 /* Return true, if X or Y (depending on OPX_P) operand of INSN
6420 is a MEM. */
6421 static bool
6422 sched_mem_operand_p (rtx insn, bool opx_p)
6423 {
6424 switch (sched_get_opxy_mem_type (insn, opx_p))
6425 {
6426 case OP_TYPE_MEM1:
6427 case OP_TYPE_MEM6:
6428 return true;
6429
6430 default:
6431 return false;
6432 }
6433 }
6434
6435 /* Return X or Y (depending on OPX_P) operand of INSN,
6436 if it is a MEM, or NULL overwise. */
6437 static rtx
6438 sched_get_mem_operand (rtx insn, bool must_read_p, bool must_write_p)
6439 {
6440 bool opx_p;
6441 bool opy_p;
6442
6443 opx_p = false;
6444 opy_p = false;
6445
6446 if (must_read_p)
6447 {
6448 opx_p = true;
6449 opy_p = true;
6450 }
6451
6452 if (must_write_p)
6453 {
6454 opx_p = true;
6455 opy_p = false;
6456 }
6457
6458 if (opy_p && sched_mem_operand_p (insn, false))
6459 return sched_get_operand (insn, false);
6460
6461 if (opx_p && sched_mem_operand_p (insn, true))
6462 return sched_get_operand (insn, true);
6463
6464 gcc_unreachable ();
6465 return NULL;
6466 }
6467
6468 /* Return non-zero if PRO modifies register used as part of
6469 address in CON. */
6470 int
6471 m68k_sched_address_bypass_p (rtx pro, rtx con)
6472 {
6473 rtx pro_x;
6474 rtx con_mem_read;
6475
6476 pro_x = sched_get_reg_operand (pro, true);
6477 if (pro_x == NULL)
6478 return 0;
6479
6480 con_mem_read = sched_get_mem_operand (con, true, false);
6481 gcc_assert (con_mem_read != NULL);
6482
6483 if (reg_mentioned_p (pro_x, con_mem_read))
6484 return 1;
6485
6486 return 0;
6487 }
6488
6489 /* Helper function for m68k_sched_indexed_address_bypass_p.
6490 if PRO modifies register used as index in CON,
6491 return scale of indexed memory access in CON. Return zero overwise. */
6492 static int
6493 sched_get_indexed_address_scale (rtx pro, rtx con)
6494 {
6495 rtx reg;
6496 rtx mem;
6497 struct m68k_address address;
6498
6499 reg = sched_get_reg_operand (pro, true);
6500 if (reg == NULL)
6501 return 0;
6502
6503 mem = sched_get_mem_operand (con, true, false);
6504 gcc_assert (mem != NULL && MEM_P (mem));
6505
6506 if (!m68k_decompose_address (GET_MODE (mem), XEXP (mem, 0), reload_completed,
6507 &address))
6508 gcc_unreachable ();
6509
6510 if (REGNO (reg) == REGNO (address.index))
6511 {
6512 gcc_assert (address.scale != 0);
6513 return address.scale;
6514 }
6515
6516 return 0;
6517 }
6518
6519 /* Return non-zero if PRO modifies register used
6520 as index with scale 2 or 4 in CON. */
6521 int
6522 m68k_sched_indexed_address_bypass_p (rtx pro, rtx con)
6523 {
6524 gcc_assert (sched_cfv4_bypass_data.pro == NULL
6525 && sched_cfv4_bypass_data.con == NULL
6526 && sched_cfv4_bypass_data.scale == 0);
6527
6528 switch (sched_get_indexed_address_scale (pro, con))
6529 {
6530 case 1:
6531 /* We can't have a variable latency bypass, so
6532 remember to adjust the insn cost in adjust_cost hook. */
6533 sched_cfv4_bypass_data.pro = pro;
6534 sched_cfv4_bypass_data.con = con;
6535 sched_cfv4_bypass_data.scale = 1;
6536 return 0;
6537
6538 case 2:
6539 case 4:
6540 return 1;
6541
6542 default:
6543 return 0;
6544 }
6545 }
6546
6547 /* We generate a two-instructions program at M_TRAMP :
6548 movea.l &CHAIN_VALUE,%a0
6549 jmp FNADDR
6550 where %a0 can be modified by changing STATIC_CHAIN_REGNUM. */
6551
6552 static void
6553 m68k_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
6554 {
6555 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
6556 rtx mem;
6557
6558 gcc_assert (ADDRESS_REGNO_P (STATIC_CHAIN_REGNUM));
6559
6560 mem = adjust_address (m_tramp, HImode, 0);
6561 emit_move_insn (mem, GEN_INT(0x207C + ((STATIC_CHAIN_REGNUM-8) << 9)));
6562 mem = adjust_address (m_tramp, SImode, 2);
6563 emit_move_insn (mem, chain_value);
6564
6565 mem = adjust_address (m_tramp, HImode, 6);
6566 emit_move_insn (mem, GEN_INT(0x4EF9));
6567 mem = adjust_address (m_tramp, SImode, 8);
6568 emit_move_insn (mem, fnaddr);
6569
6570 FINALIZE_TRAMPOLINE (XEXP (m_tramp, 0));
6571 }
6572
6573 /* On the 68000, the RTS insn cannot pop anything.
6574 On the 68010, the RTD insn may be used to pop them if the number
6575 of args is fixed, but if the number is variable then the caller
6576 must pop them all. RTD can't be used for library calls now
6577 because the library is compiled with the Unix compiler.
6578 Use of RTD is a selectable option, since it is incompatible with
6579 standard Unix calling sequences. If the option is not selected,
6580 the caller must always pop the args. */
6581
6582 static int
6583 m68k_return_pops_args (tree fundecl, tree funtype, int size)
6584 {
6585 return ((TARGET_RTD
6586 && (!fundecl
6587 || TREE_CODE (fundecl) != IDENTIFIER_NODE)
6588 && (!stdarg_p (funtype)))
6589 ? size : 0);
6590 }
6591
6592 /* Make sure everything's fine if we *don't* have a given processor.
6593 This assumes that putting a register in fixed_regs will keep the
6594 compiler's mitts completely off it. We don't bother to zero it out
6595 of register classes. */
6596
6597 static void
6598 m68k_conditional_register_usage (void)
6599 {
6600 int i;
6601 HARD_REG_SET x;
6602 if (!TARGET_HARD_FLOAT)
6603 {
6604 COPY_HARD_REG_SET (x, reg_class_contents[(int)FP_REGS]);
6605 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
6606 if (TEST_HARD_REG_BIT (x, i))
6607 fixed_regs[i] = call_used_regs[i] = 1;
6608 }
6609 if (flag_pic)
6610 fixed_regs[PIC_REG] = call_used_regs[PIC_REG] = 1;
6611 }
6612
6613 #include "gt-m68k.h"