]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/m68k/m68k.c
gengenrtl.c (special_rtx): PC, CC0 and RETURN are special.
[thirdparty/gcc.git] / gcc / config / m68k / m68k.c
1 /* Subroutines for insn-output.c for Motorola 68000 family.
2 Copyright (C) 1987, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "tree.h"
27 #include "rtl.h"
28 #include "function.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "conditions.h"
33 #include "output.h"
34 #include "insn-attr.h"
35 #include "recog.h"
36 #include "diagnostic-core.h"
37 #include "expr.h"
38 #include "reload.h"
39 #include "tm_p.h"
40 #include "target.h"
41 #include "target-def.h"
42 #include "debug.h"
43 #include "flags.h"
44 #include "df.h"
45 /* ??? Need to add a dependency between m68k.o and sched-int.h. */
46 #include "sched-int.h"
47 #include "insn-codes.h"
48 #include "ggc.h"
49 #include "opts.h"
50
51 enum reg_class regno_reg_class[] =
52 {
53 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
54 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
55 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
56 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
57 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
58 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
59 ADDR_REGS
60 };
61
62
63 /* The minimum number of integer registers that we want to save with the
64 movem instruction. Using two movel instructions instead of a single
65 moveml is about 15% faster for the 68020 and 68030 at no expense in
66 code size. */
67 #define MIN_MOVEM_REGS 3
68
69 /* The minimum number of floating point registers that we want to save
70 with the fmovem instruction. */
71 #define MIN_FMOVEM_REGS 1
72
73 /* Structure describing stack frame layout. */
74 struct m68k_frame
75 {
76 /* Stack pointer to frame pointer offset. */
77 HOST_WIDE_INT offset;
78
79 /* Offset of FPU registers. */
80 HOST_WIDE_INT foffset;
81
82 /* Frame size in bytes (rounded up). */
83 HOST_WIDE_INT size;
84
85 /* Data and address register. */
86 int reg_no;
87 unsigned int reg_mask;
88
89 /* FPU registers. */
90 int fpu_no;
91 unsigned int fpu_mask;
92
93 /* Offsets relative to ARG_POINTER. */
94 HOST_WIDE_INT frame_pointer_offset;
95 HOST_WIDE_INT stack_pointer_offset;
96
97 /* Function which the above information refers to. */
98 int funcdef_no;
99 };
100
101 /* Current frame information calculated by m68k_compute_frame_layout(). */
102 static struct m68k_frame current_frame;
103
104 /* Structure describing an m68k address.
105
106 If CODE is UNKNOWN, the address is BASE + INDEX * SCALE + OFFSET,
107 with null fields evaluating to 0. Here:
108
109 - BASE satisfies m68k_legitimate_base_reg_p
110 - INDEX satisfies m68k_legitimate_index_reg_p
111 - OFFSET satisfies m68k_legitimate_constant_address_p
112
113 INDEX is either HImode or SImode. The other fields are SImode.
114
115 If CODE is PRE_DEC, the address is -(BASE). If CODE is POST_INC,
116 the address is (BASE)+. */
117 struct m68k_address {
118 enum rtx_code code;
119 rtx base;
120 rtx index;
121 rtx offset;
122 int scale;
123 };
124
125 static int m68k_sched_adjust_cost (rtx, rtx, rtx, int);
126 static int m68k_sched_issue_rate (void);
127 static int m68k_sched_variable_issue (FILE *, int, rtx, int);
128 static void m68k_sched_md_init_global (FILE *, int, int);
129 static void m68k_sched_md_finish_global (FILE *, int);
130 static void m68k_sched_md_init (FILE *, int, int);
131 static void m68k_sched_dfa_pre_advance_cycle (void);
132 static void m68k_sched_dfa_post_advance_cycle (void);
133 static int m68k_sched_first_cycle_multipass_dfa_lookahead (void);
134
135 static bool m68k_can_eliminate (const int, const int);
136 static void m68k_conditional_register_usage (void);
137 static bool m68k_legitimate_address_p (enum machine_mode, rtx, bool);
138 static bool m68k_handle_option (struct gcc_options *, struct gcc_options *,
139 const struct cl_decoded_option *, location_t);
140 static void m68k_option_override (void);
141 static rtx find_addr_reg (rtx);
142 static const char *singlemove_string (rtx *);
143 static void m68k_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
144 HOST_WIDE_INT, tree);
145 static rtx m68k_struct_value_rtx (tree, int);
146 static tree m68k_handle_fndecl_attribute (tree *node, tree name,
147 tree args, int flags,
148 bool *no_add_attrs);
149 static void m68k_compute_frame_layout (void);
150 static bool m68k_save_reg (unsigned int regno, bool interrupt_handler);
151 static bool m68k_ok_for_sibcall_p (tree, tree);
152 static bool m68k_tls_symbol_p (rtx);
153 static rtx m68k_legitimize_address (rtx, rtx, enum machine_mode);
154 static bool m68k_rtx_costs (rtx, int, int, int *, bool);
155 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
156 static bool m68k_return_in_memory (const_tree, const_tree);
157 #endif
158 static void m68k_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
159 static void m68k_trampoline_init (rtx, tree, rtx);
160 static int m68k_return_pops_args (tree, tree, int);
161 static rtx m68k_delegitimize_address (rtx);
162 static void m68k_function_arg_advance (CUMULATIVE_ARGS *, enum machine_mode,
163 const_tree, bool);
164 static rtx m68k_function_arg (CUMULATIVE_ARGS *, enum machine_mode,
165 const_tree, bool);
166 static bool m68k_cannot_force_const_mem (enum machine_mode mode, rtx x);
167 \f
168 /* Initialize the GCC target structure. */
169
170 #if INT_OP_GROUP == INT_OP_DOT_WORD
171 #undef TARGET_ASM_ALIGNED_HI_OP
172 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
173 #endif
174
175 #if INT_OP_GROUP == INT_OP_NO_DOT
176 #undef TARGET_ASM_BYTE_OP
177 #define TARGET_ASM_BYTE_OP "\tbyte\t"
178 #undef TARGET_ASM_ALIGNED_HI_OP
179 #define TARGET_ASM_ALIGNED_HI_OP "\tshort\t"
180 #undef TARGET_ASM_ALIGNED_SI_OP
181 #define TARGET_ASM_ALIGNED_SI_OP "\tlong\t"
182 #endif
183
184 #if INT_OP_GROUP == INT_OP_DC
185 #undef TARGET_ASM_BYTE_OP
186 #define TARGET_ASM_BYTE_OP "\tdc.b\t"
187 #undef TARGET_ASM_ALIGNED_HI_OP
188 #define TARGET_ASM_ALIGNED_HI_OP "\tdc.w\t"
189 #undef TARGET_ASM_ALIGNED_SI_OP
190 #define TARGET_ASM_ALIGNED_SI_OP "\tdc.l\t"
191 #endif
192
193 #undef TARGET_ASM_UNALIGNED_HI_OP
194 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
195 #undef TARGET_ASM_UNALIGNED_SI_OP
196 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
197
198 #undef TARGET_ASM_OUTPUT_MI_THUNK
199 #define TARGET_ASM_OUTPUT_MI_THUNK m68k_output_mi_thunk
200 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
201 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
202
203 #undef TARGET_ASM_FILE_START_APP_OFF
204 #define TARGET_ASM_FILE_START_APP_OFF true
205
206 #undef TARGET_LEGITIMIZE_ADDRESS
207 #define TARGET_LEGITIMIZE_ADDRESS m68k_legitimize_address
208
209 #undef TARGET_SCHED_ADJUST_COST
210 #define TARGET_SCHED_ADJUST_COST m68k_sched_adjust_cost
211
212 #undef TARGET_SCHED_ISSUE_RATE
213 #define TARGET_SCHED_ISSUE_RATE m68k_sched_issue_rate
214
215 #undef TARGET_SCHED_VARIABLE_ISSUE
216 #define TARGET_SCHED_VARIABLE_ISSUE m68k_sched_variable_issue
217
218 #undef TARGET_SCHED_INIT_GLOBAL
219 #define TARGET_SCHED_INIT_GLOBAL m68k_sched_md_init_global
220
221 #undef TARGET_SCHED_FINISH_GLOBAL
222 #define TARGET_SCHED_FINISH_GLOBAL m68k_sched_md_finish_global
223
224 #undef TARGET_SCHED_INIT
225 #define TARGET_SCHED_INIT m68k_sched_md_init
226
227 #undef TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE
228 #define TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE m68k_sched_dfa_pre_advance_cycle
229
230 #undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
231 #define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE m68k_sched_dfa_post_advance_cycle
232
233 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
234 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
235 m68k_sched_first_cycle_multipass_dfa_lookahead
236
237 #undef TARGET_HANDLE_OPTION
238 #define TARGET_HANDLE_OPTION m68k_handle_option
239
240 #undef TARGET_OPTION_OVERRIDE
241 #define TARGET_OPTION_OVERRIDE m68k_option_override
242
243 #undef TARGET_RTX_COSTS
244 #define TARGET_RTX_COSTS m68k_rtx_costs
245
246 #undef TARGET_ATTRIBUTE_TABLE
247 #define TARGET_ATTRIBUTE_TABLE m68k_attribute_table
248
249 #undef TARGET_PROMOTE_PROTOTYPES
250 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
251
252 #undef TARGET_STRUCT_VALUE_RTX
253 #define TARGET_STRUCT_VALUE_RTX m68k_struct_value_rtx
254
255 #undef TARGET_CANNOT_FORCE_CONST_MEM
256 #define TARGET_CANNOT_FORCE_CONST_MEM m68k_cannot_force_const_mem
257
258 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
259 #define TARGET_FUNCTION_OK_FOR_SIBCALL m68k_ok_for_sibcall_p
260
261 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
262 #undef TARGET_RETURN_IN_MEMORY
263 #define TARGET_RETURN_IN_MEMORY m68k_return_in_memory
264 #endif
265
266 #ifdef HAVE_AS_TLS
267 #undef TARGET_HAVE_TLS
268 #define TARGET_HAVE_TLS (true)
269
270 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
271 #define TARGET_ASM_OUTPUT_DWARF_DTPREL m68k_output_dwarf_dtprel
272 #endif
273
274 #undef TARGET_LEGITIMATE_ADDRESS_P
275 #define TARGET_LEGITIMATE_ADDRESS_P m68k_legitimate_address_p
276
277 #undef TARGET_CAN_ELIMINATE
278 #define TARGET_CAN_ELIMINATE m68k_can_eliminate
279
280 #undef TARGET_CONDITIONAL_REGISTER_USAGE
281 #define TARGET_CONDITIONAL_REGISTER_USAGE m68k_conditional_register_usage
282
283 #undef TARGET_TRAMPOLINE_INIT
284 #define TARGET_TRAMPOLINE_INIT m68k_trampoline_init
285
286 #undef TARGET_RETURN_POPS_ARGS
287 #define TARGET_RETURN_POPS_ARGS m68k_return_pops_args
288
289 #undef TARGET_DELEGITIMIZE_ADDRESS
290 #define TARGET_DELEGITIMIZE_ADDRESS m68k_delegitimize_address
291
292 #undef TARGET_FUNCTION_ARG
293 #define TARGET_FUNCTION_ARG m68k_function_arg
294
295 #undef TARGET_FUNCTION_ARG_ADVANCE
296 #define TARGET_FUNCTION_ARG_ADVANCE m68k_function_arg_advance
297
298 #undef TARGET_LEGITIMATE_CONSTANT_P
299 #define TARGET_LEGITIMATE_CONSTANT_P m68k_legitimate_constant_p
300
301 static const struct attribute_spec m68k_attribute_table[] =
302 {
303 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
304 affects_type_identity } */
305 { "interrupt", 0, 0, true, false, false, m68k_handle_fndecl_attribute,
306 false },
307 { "interrupt_handler", 0, 0, true, false, false,
308 m68k_handle_fndecl_attribute, false },
309 { "interrupt_thread", 0, 0, true, false, false,
310 m68k_handle_fndecl_attribute, false },
311 { NULL, 0, 0, false, false, false, NULL, false }
312 };
313
314 struct gcc_target targetm = TARGET_INITIALIZER;
315 \f
316 /* Base flags for 68k ISAs. */
317 #define FL_FOR_isa_00 FL_ISA_68000
318 #define FL_FOR_isa_10 (FL_FOR_isa_00 | FL_ISA_68010)
319 /* FL_68881 controls the default setting of -m68881. gcc has traditionally
320 generated 68881 code for 68020 and 68030 targets unless explicitly told
321 not to. */
322 #define FL_FOR_isa_20 (FL_FOR_isa_10 | FL_ISA_68020 \
323 | FL_BITFIELD | FL_68881)
324 #define FL_FOR_isa_40 (FL_FOR_isa_20 | FL_ISA_68040)
325 #define FL_FOR_isa_cpu32 (FL_FOR_isa_10 | FL_ISA_68020)
326
327 /* Base flags for ColdFire ISAs. */
328 #define FL_FOR_isa_a (FL_COLDFIRE | FL_ISA_A)
329 #define FL_FOR_isa_aplus (FL_FOR_isa_a | FL_ISA_APLUS | FL_CF_USP)
330 /* Note ISA_B doesn't necessarily include USP (user stack pointer) support. */
331 #define FL_FOR_isa_b (FL_FOR_isa_a | FL_ISA_B | FL_CF_HWDIV)
332 /* ISA_C is not upwardly compatible with ISA_B. */
333 #define FL_FOR_isa_c (FL_FOR_isa_a | FL_ISA_C | FL_CF_USP)
334
335 enum m68k_isa
336 {
337 /* Traditional 68000 instruction sets. */
338 isa_00,
339 isa_10,
340 isa_20,
341 isa_40,
342 isa_cpu32,
343 /* ColdFire instruction set variants. */
344 isa_a,
345 isa_aplus,
346 isa_b,
347 isa_c,
348 isa_max
349 };
350
351 /* Information about one of the -march, -mcpu or -mtune arguments. */
352 struct m68k_target_selection
353 {
354 /* The argument being described. */
355 const char *name;
356
357 /* For -mcpu, this is the device selected by the option.
358 For -mtune and -march, it is a representative device
359 for the microarchitecture or ISA respectively. */
360 enum target_device device;
361
362 /* The M68K_DEVICE fields associated with DEVICE. See the comment
363 in m68k-devices.def for details. FAMILY is only valid for -mcpu. */
364 const char *family;
365 enum uarch_type microarch;
366 enum m68k_isa isa;
367 unsigned long flags;
368 };
369
370 /* A list of all devices in m68k-devices.def. Used for -mcpu selection. */
371 static const struct m68k_target_selection all_devices[] =
372 {
373 #define M68K_DEVICE(NAME,ENUM_VALUE,FAMILY,MULTILIB,MICROARCH,ISA,FLAGS) \
374 { NAME, ENUM_VALUE, FAMILY, u##MICROARCH, ISA, FLAGS | FL_FOR_##ISA },
375 #include "m68k-devices.def"
376 #undef M68K_DEVICE
377 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
378 };
379
380 /* A list of all ISAs, mapping each one to a representative device.
381 Used for -march selection. */
382 static const struct m68k_target_selection all_isas[] =
383 {
384 #define M68K_ISA(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
385 { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
386 #include "m68k-isas.def"
387 #undef M68K_ISA
388 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
389 };
390
391 /* A list of all microarchitectures, mapping each one to a representative
392 device. Used for -mtune selection. */
393 static const struct m68k_target_selection all_microarchs[] =
394 {
395 #define M68K_MICROARCH(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
396 { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
397 #include "m68k-microarchs.def"
398 #undef M68K_MICROARCH
399 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
400 };
401 \f
402 /* The entries associated with the -mcpu, -march and -mtune settings,
403 or null for options that have not been used. */
404 const struct m68k_target_selection *m68k_cpu_entry;
405 const struct m68k_target_selection *m68k_arch_entry;
406 const struct m68k_target_selection *m68k_tune_entry;
407
408 /* Which CPU we are generating code for. */
409 enum target_device m68k_cpu;
410
411 /* Which microarchitecture to tune for. */
412 enum uarch_type m68k_tune;
413
414 /* Which FPU to use. */
415 enum fpu_type m68k_fpu;
416
417 /* The set of FL_* flags that apply to the target processor. */
418 unsigned int m68k_cpu_flags;
419
420 /* The set of FL_* flags that apply to the processor to be tuned for. */
421 unsigned int m68k_tune_flags;
422
423 /* Asm templates for calling or jumping to an arbitrary symbolic address,
424 or NULL if such calls or jumps are not supported. The address is held
425 in operand 0. */
426 const char *m68k_symbolic_call;
427 const char *m68k_symbolic_jump;
428
429 /* Enum variable that corresponds to m68k_symbolic_call values. */
430 enum M68K_SYMBOLIC_CALL m68k_symbolic_call_var;
431
432 \f
433 /* Implement TARGET_HANDLE_OPTION. */
434
435 static bool
436 m68k_handle_option (struct gcc_options *opts,
437 struct gcc_options *opts_set ATTRIBUTE_UNUSED,
438 const struct cl_decoded_option *decoded,
439 location_t loc)
440 {
441 size_t code = decoded->opt_index;
442 const char *arg = decoded->arg;
443 int value = decoded->value;
444
445 switch (code)
446 {
447 case OPT_m68020_40:
448 opts->x_m68k_tune_option = u68020_40;
449 opts->x_m68k_cpu_option = m68020;
450 return true;
451
452 case OPT_m68020_60:
453 opts->x_m68k_tune_option = u68020_60;
454 opts->x_m68k_cpu_option = m68020;
455 return true;
456
457 case OPT_mshared_library_id_:
458 if (value > MAX_LIBRARY_ID)
459 error_at (loc, "-mshared-library-id=%s is not between 0 and %d",
460 arg, MAX_LIBRARY_ID);
461 else
462 {
463 char *tmp;
464 asprintf (&tmp, "%d", (value * -4) - 4);
465 opts->x_m68k_library_id_string = tmp;
466 }
467 return true;
468
469 default:
470 return true;
471 }
472 }
473
474 /* Implement TARGET_OPTION_OVERRIDE. */
475
476 static void
477 m68k_option_override (void)
478 {
479 const struct m68k_target_selection *entry;
480 unsigned long target_mask;
481
482 if (global_options_set.x_m68k_arch_option)
483 m68k_arch_entry = &all_isas[m68k_arch_option];
484
485 if (global_options_set.x_m68k_cpu_option)
486 m68k_cpu_entry = &all_devices[(int) m68k_cpu_option];
487
488 if (global_options_set.x_m68k_tune_option)
489 m68k_tune_entry = &all_microarchs[(int) m68k_tune_option];
490
491 /* User can choose:
492
493 -mcpu=
494 -march=
495 -mtune=
496
497 -march=ARCH should generate code that runs any processor
498 implementing architecture ARCH. -mcpu=CPU should override -march
499 and should generate code that runs on processor CPU, making free
500 use of any instructions that CPU understands. -mtune=UARCH applies
501 on top of -mcpu or -march and optimizes the code for UARCH. It does
502 not change the target architecture. */
503 if (m68k_cpu_entry)
504 {
505 /* Complain if the -march setting is for a different microarchitecture,
506 or includes flags that the -mcpu setting doesn't. */
507 if (m68k_arch_entry
508 && (m68k_arch_entry->microarch != m68k_cpu_entry->microarch
509 || (m68k_arch_entry->flags & ~m68k_cpu_entry->flags) != 0))
510 warning (0, "-mcpu=%s conflicts with -march=%s",
511 m68k_cpu_entry->name, m68k_arch_entry->name);
512
513 entry = m68k_cpu_entry;
514 }
515 else
516 entry = m68k_arch_entry;
517
518 if (!entry)
519 entry = all_devices + TARGET_CPU_DEFAULT;
520
521 m68k_cpu_flags = entry->flags;
522
523 /* Use the architecture setting to derive default values for
524 certain flags. */
525 target_mask = 0;
526
527 /* ColdFire is lenient about alignment. */
528 if (!TARGET_COLDFIRE)
529 target_mask |= MASK_STRICT_ALIGNMENT;
530
531 if ((m68k_cpu_flags & FL_BITFIELD) != 0)
532 target_mask |= MASK_BITFIELD;
533 if ((m68k_cpu_flags & FL_CF_HWDIV) != 0)
534 target_mask |= MASK_CF_HWDIV;
535 if ((m68k_cpu_flags & (FL_68881 | FL_CF_FPU)) != 0)
536 target_mask |= MASK_HARD_FLOAT;
537 target_flags |= target_mask & ~target_flags_explicit;
538
539 /* Set the directly-usable versions of the -mcpu and -mtune settings. */
540 m68k_cpu = entry->device;
541 if (m68k_tune_entry)
542 {
543 m68k_tune = m68k_tune_entry->microarch;
544 m68k_tune_flags = m68k_tune_entry->flags;
545 }
546 #ifdef M68K_DEFAULT_TUNE
547 else if (!m68k_cpu_entry && !m68k_arch_entry)
548 {
549 enum target_device dev;
550 dev = all_microarchs[M68K_DEFAULT_TUNE].device;
551 m68k_tune_flags = all_devices[dev]->flags;
552 }
553 #endif
554 else
555 {
556 m68k_tune = entry->microarch;
557 m68k_tune_flags = entry->flags;
558 }
559
560 /* Set the type of FPU. */
561 m68k_fpu = (!TARGET_HARD_FLOAT ? FPUTYPE_NONE
562 : (m68k_cpu_flags & FL_COLDFIRE) != 0 ? FPUTYPE_COLDFIRE
563 : FPUTYPE_68881);
564
565 /* Sanity check to ensure that msep-data and mid-sahred-library are not
566 * both specified together. Doing so simply doesn't make sense.
567 */
568 if (TARGET_SEP_DATA && TARGET_ID_SHARED_LIBRARY)
569 error ("cannot specify both -msep-data and -mid-shared-library");
570
571 /* If we're generating code for a separate A5 relative data segment,
572 * we've got to enable -fPIC as well. This might be relaxable to
573 * -fpic but it hasn't been tested properly.
574 */
575 if (TARGET_SEP_DATA || TARGET_ID_SHARED_LIBRARY)
576 flag_pic = 2;
577
578 /* -mpcrel -fPIC uses 32-bit pc-relative displacements. Raise an
579 error if the target does not support them. */
580 if (TARGET_PCREL && !TARGET_68020 && flag_pic == 2)
581 error ("-mpcrel -fPIC is not currently supported on selected cpu");
582
583 /* ??? A historic way of turning on pic, or is this intended to
584 be an embedded thing that doesn't have the same name binding
585 significance that it does on hosted ELF systems? */
586 if (TARGET_PCREL && flag_pic == 0)
587 flag_pic = 1;
588
589 if (!flag_pic)
590 {
591 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_JSR;
592
593 m68k_symbolic_jump = "jra %a0";
594 }
595 else if (TARGET_ID_SHARED_LIBRARY)
596 /* All addresses must be loaded from the GOT. */
597 ;
598 else if (TARGET_68020 || TARGET_ISAB || TARGET_ISAC)
599 {
600 if (TARGET_PCREL)
601 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_C;
602 else
603 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_P;
604
605 if (TARGET_ISAC)
606 /* No unconditional long branch */;
607 else if (TARGET_PCREL)
608 m68k_symbolic_jump = "bra%.l %c0";
609 else
610 m68k_symbolic_jump = "bra%.l %p0";
611 /* Turn off function cse if we are doing PIC. We always want
612 function call to be done as `bsr foo@PLTPC'. */
613 /* ??? It's traditional to do this for -mpcrel too, but it isn't
614 clear how intentional that is. */
615 flag_no_function_cse = 1;
616 }
617
618 switch (m68k_symbolic_call_var)
619 {
620 case M68K_SYMBOLIC_CALL_JSR:
621 m68k_symbolic_call = "jsr %a0";
622 break;
623
624 case M68K_SYMBOLIC_CALL_BSR_C:
625 m68k_symbolic_call = "bsr%.l %c0";
626 break;
627
628 case M68K_SYMBOLIC_CALL_BSR_P:
629 m68k_symbolic_call = "bsr%.l %p0";
630 break;
631
632 case M68K_SYMBOLIC_CALL_NONE:
633 gcc_assert (m68k_symbolic_call == NULL);
634 break;
635
636 default:
637 gcc_unreachable ();
638 }
639
640 #ifndef ASM_OUTPUT_ALIGN_WITH_NOP
641 if (align_labels > 2)
642 {
643 warning (0, "-falign-labels=%d is not supported", align_labels);
644 align_labels = 0;
645 }
646 if (align_loops > 2)
647 {
648 warning (0, "-falign-loops=%d is not supported", align_loops);
649 align_loops = 0;
650 }
651 #endif
652
653 SUBTARGET_OVERRIDE_OPTIONS;
654
655 /* Setup scheduling options. */
656 if (TUNE_CFV1)
657 m68k_sched_cpu = CPU_CFV1;
658 else if (TUNE_CFV2)
659 m68k_sched_cpu = CPU_CFV2;
660 else if (TUNE_CFV3)
661 m68k_sched_cpu = CPU_CFV3;
662 else if (TUNE_CFV4)
663 m68k_sched_cpu = CPU_CFV4;
664 else
665 {
666 m68k_sched_cpu = CPU_UNKNOWN;
667 flag_schedule_insns = 0;
668 flag_schedule_insns_after_reload = 0;
669 flag_modulo_sched = 0;
670 }
671
672 if (m68k_sched_cpu != CPU_UNKNOWN)
673 {
674 if ((m68k_cpu_flags & (FL_CF_EMAC | FL_CF_EMAC_B)) != 0)
675 m68k_sched_mac = MAC_CF_EMAC;
676 else if ((m68k_cpu_flags & FL_CF_MAC) != 0)
677 m68k_sched_mac = MAC_CF_MAC;
678 else
679 m68k_sched_mac = MAC_NO;
680 }
681 }
682
683 /* Generate a macro of the form __mPREFIX_cpu_NAME, where PREFIX is the
684 given argument and NAME is the argument passed to -mcpu. Return NULL
685 if -mcpu was not passed. */
686
687 const char *
688 m68k_cpp_cpu_ident (const char *prefix)
689 {
690 if (!m68k_cpu_entry)
691 return NULL;
692 return concat ("__m", prefix, "_cpu_", m68k_cpu_entry->name, NULL);
693 }
694
695 /* Generate a macro of the form __mPREFIX_family_NAME, where PREFIX is the
696 given argument and NAME is the name of the representative device for
697 the -mcpu argument's family. Return NULL if -mcpu was not passed. */
698
699 const char *
700 m68k_cpp_cpu_family (const char *prefix)
701 {
702 if (!m68k_cpu_entry)
703 return NULL;
704 return concat ("__m", prefix, "_family_", m68k_cpu_entry->family, NULL);
705 }
706 \f
707 /* Return m68k_fk_interrupt_handler if FUNC has an "interrupt" or
708 "interrupt_handler" attribute and interrupt_thread if FUNC has an
709 "interrupt_thread" attribute. Otherwise, return
710 m68k_fk_normal_function. */
711
712 enum m68k_function_kind
713 m68k_get_function_kind (tree func)
714 {
715 tree a;
716
717 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
718
719 a = lookup_attribute ("interrupt", DECL_ATTRIBUTES (func));
720 if (a != NULL_TREE)
721 return m68k_fk_interrupt_handler;
722
723 a = lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (func));
724 if (a != NULL_TREE)
725 return m68k_fk_interrupt_handler;
726
727 a = lookup_attribute ("interrupt_thread", DECL_ATTRIBUTES (func));
728 if (a != NULL_TREE)
729 return m68k_fk_interrupt_thread;
730
731 return m68k_fk_normal_function;
732 }
733
734 /* Handle an attribute requiring a FUNCTION_DECL; arguments as in
735 struct attribute_spec.handler. */
736 static tree
737 m68k_handle_fndecl_attribute (tree *node, tree name,
738 tree args ATTRIBUTE_UNUSED,
739 int flags ATTRIBUTE_UNUSED,
740 bool *no_add_attrs)
741 {
742 if (TREE_CODE (*node) != FUNCTION_DECL)
743 {
744 warning (OPT_Wattributes, "%qE attribute only applies to functions",
745 name);
746 *no_add_attrs = true;
747 }
748
749 if (m68k_get_function_kind (*node) != m68k_fk_normal_function)
750 {
751 error ("multiple interrupt attributes not allowed");
752 *no_add_attrs = true;
753 }
754
755 if (!TARGET_FIDOA
756 && !strcmp (IDENTIFIER_POINTER (name), "interrupt_thread"))
757 {
758 error ("interrupt_thread is available only on fido");
759 *no_add_attrs = true;
760 }
761
762 return NULL_TREE;
763 }
764
765 static void
766 m68k_compute_frame_layout (void)
767 {
768 int regno, saved;
769 unsigned int mask;
770 enum m68k_function_kind func_kind =
771 m68k_get_function_kind (current_function_decl);
772 bool interrupt_handler = func_kind == m68k_fk_interrupt_handler;
773 bool interrupt_thread = func_kind == m68k_fk_interrupt_thread;
774
775 /* Only compute the frame once per function.
776 Don't cache information until reload has been completed. */
777 if (current_frame.funcdef_no == current_function_funcdef_no
778 && reload_completed)
779 return;
780
781 current_frame.size = (get_frame_size () + 3) & -4;
782
783 mask = saved = 0;
784
785 /* Interrupt thread does not need to save any register. */
786 if (!interrupt_thread)
787 for (regno = 0; regno < 16; regno++)
788 if (m68k_save_reg (regno, interrupt_handler))
789 {
790 mask |= 1 << (regno - D0_REG);
791 saved++;
792 }
793 current_frame.offset = saved * 4;
794 current_frame.reg_no = saved;
795 current_frame.reg_mask = mask;
796
797 current_frame.foffset = 0;
798 mask = saved = 0;
799 if (TARGET_HARD_FLOAT)
800 {
801 /* Interrupt thread does not need to save any register. */
802 if (!interrupt_thread)
803 for (regno = 16; regno < 24; regno++)
804 if (m68k_save_reg (regno, interrupt_handler))
805 {
806 mask |= 1 << (regno - FP0_REG);
807 saved++;
808 }
809 current_frame.foffset = saved * TARGET_FP_REG_SIZE;
810 current_frame.offset += current_frame.foffset;
811 }
812 current_frame.fpu_no = saved;
813 current_frame.fpu_mask = mask;
814
815 /* Remember what function this frame refers to. */
816 current_frame.funcdef_no = current_function_funcdef_no;
817 }
818
819 /* Worker function for TARGET_CAN_ELIMINATE. */
820
821 bool
822 m68k_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
823 {
824 return (to == STACK_POINTER_REGNUM ? ! frame_pointer_needed : true);
825 }
826
827 HOST_WIDE_INT
828 m68k_initial_elimination_offset (int from, int to)
829 {
830 int argptr_offset;
831 /* The arg pointer points 8 bytes before the start of the arguments,
832 as defined by FIRST_PARM_OFFSET. This makes it coincident with the
833 frame pointer in most frames. */
834 argptr_offset = frame_pointer_needed ? 0 : UNITS_PER_WORD;
835 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
836 return argptr_offset;
837
838 m68k_compute_frame_layout ();
839
840 gcc_assert (to == STACK_POINTER_REGNUM);
841 switch (from)
842 {
843 case ARG_POINTER_REGNUM:
844 return current_frame.offset + current_frame.size - argptr_offset;
845 case FRAME_POINTER_REGNUM:
846 return current_frame.offset + current_frame.size;
847 default:
848 gcc_unreachable ();
849 }
850 }
851
852 /* Refer to the array `regs_ever_live' to determine which registers
853 to save; `regs_ever_live[I]' is nonzero if register number I
854 is ever used in the function. This function is responsible for
855 knowing which registers should not be saved even if used.
856 Return true if we need to save REGNO. */
857
858 static bool
859 m68k_save_reg (unsigned int regno, bool interrupt_handler)
860 {
861 if (flag_pic && regno == PIC_REG)
862 {
863 if (crtl->saves_all_registers)
864 return true;
865 if (crtl->uses_pic_offset_table)
866 return true;
867 /* Reload may introduce constant pool references into a function
868 that thitherto didn't need a PIC register. Note that the test
869 above will not catch that case because we will only set
870 crtl->uses_pic_offset_table when emitting
871 the address reloads. */
872 if (crtl->uses_const_pool)
873 return true;
874 }
875
876 if (crtl->calls_eh_return)
877 {
878 unsigned int i;
879 for (i = 0; ; i++)
880 {
881 unsigned int test = EH_RETURN_DATA_REGNO (i);
882 if (test == INVALID_REGNUM)
883 break;
884 if (test == regno)
885 return true;
886 }
887 }
888
889 /* Fixed regs we never touch. */
890 if (fixed_regs[regno])
891 return false;
892
893 /* The frame pointer (if it is such) is handled specially. */
894 if (regno == FRAME_POINTER_REGNUM && frame_pointer_needed)
895 return false;
896
897 /* Interrupt handlers must also save call_used_regs
898 if they are live or when calling nested functions. */
899 if (interrupt_handler)
900 {
901 if (df_regs_ever_live_p (regno))
902 return true;
903
904 if (!current_function_is_leaf && call_used_regs[regno])
905 return true;
906 }
907
908 /* Never need to save registers that aren't touched. */
909 if (!df_regs_ever_live_p (regno))
910 return false;
911
912 /* Otherwise save everything that isn't call-clobbered. */
913 return !call_used_regs[regno];
914 }
915
916 /* Emit RTL for a MOVEM or FMOVEM instruction. BASE + OFFSET represents
917 the lowest memory address. COUNT is the number of registers to be
918 moved, with register REGNO + I being moved if bit I of MASK is set.
919 STORE_P specifies the direction of the move and ADJUST_STACK_P says
920 whether or not this is pre-decrement (if STORE_P) or post-increment
921 (if !STORE_P) operation. */
922
923 static rtx
924 m68k_emit_movem (rtx base, HOST_WIDE_INT offset,
925 unsigned int count, unsigned int regno,
926 unsigned int mask, bool store_p, bool adjust_stack_p)
927 {
928 int i;
929 rtx body, addr, src, operands[2];
930 enum machine_mode mode;
931
932 body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (adjust_stack_p + count));
933 mode = reg_raw_mode[regno];
934 i = 0;
935
936 if (adjust_stack_p)
937 {
938 src = plus_constant (base, (count
939 * GET_MODE_SIZE (mode)
940 * (HOST_WIDE_INT) (store_p ? -1 : 1)));
941 XVECEXP (body, 0, i++) = gen_rtx_SET (VOIDmode, base, src);
942 }
943
944 for (; mask != 0; mask >>= 1, regno++)
945 if (mask & 1)
946 {
947 addr = plus_constant (base, offset);
948 operands[!store_p] = gen_frame_mem (mode, addr);
949 operands[store_p] = gen_rtx_REG (mode, regno);
950 XVECEXP (body, 0, i++)
951 = gen_rtx_SET (VOIDmode, operands[0], operands[1]);
952 offset += GET_MODE_SIZE (mode);
953 }
954 gcc_assert (i == XVECLEN (body, 0));
955
956 return emit_insn (body);
957 }
958
959 /* Make INSN a frame-related instruction. */
960
961 static void
962 m68k_set_frame_related (rtx insn)
963 {
964 rtx body;
965 int i;
966
967 RTX_FRAME_RELATED_P (insn) = 1;
968 body = PATTERN (insn);
969 if (GET_CODE (body) == PARALLEL)
970 for (i = 0; i < XVECLEN (body, 0); i++)
971 RTX_FRAME_RELATED_P (XVECEXP (body, 0, i)) = 1;
972 }
973
974 /* Emit RTL for the "prologue" define_expand. */
975
976 void
977 m68k_expand_prologue (void)
978 {
979 HOST_WIDE_INT fsize_with_regs;
980 rtx limit, src, dest;
981
982 m68k_compute_frame_layout ();
983
984 if (flag_stack_usage)
985 current_function_static_stack_size
986 = current_frame.size + current_frame.offset;
987
988 /* If the stack limit is a symbol, we can check it here,
989 before actually allocating the space. */
990 if (crtl->limit_stack
991 && GET_CODE (stack_limit_rtx) == SYMBOL_REF)
992 {
993 limit = plus_constant (stack_limit_rtx, current_frame.size + 4);
994 if (!m68k_legitimate_constant_p (Pmode, limit))
995 {
996 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), limit);
997 limit = gen_rtx_REG (Pmode, D0_REG);
998 }
999 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode,
1000 stack_pointer_rtx, limit),
1001 stack_pointer_rtx, limit,
1002 const1_rtx));
1003 }
1004
1005 fsize_with_regs = current_frame.size;
1006 if (TARGET_COLDFIRE)
1007 {
1008 /* ColdFire's move multiple instructions do not allow pre-decrement
1009 addressing. Add the size of movem saves to the initial stack
1010 allocation instead. */
1011 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1012 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1013 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1014 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
1015 }
1016
1017 if (frame_pointer_needed)
1018 {
1019 if (fsize_with_regs == 0 && TUNE_68040)
1020 {
1021 /* On the 68040, two separate moves are faster than link.w 0. */
1022 dest = gen_frame_mem (Pmode,
1023 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1024 m68k_set_frame_related (emit_move_insn (dest, frame_pointer_rtx));
1025 m68k_set_frame_related (emit_move_insn (frame_pointer_rtx,
1026 stack_pointer_rtx));
1027 }
1028 else if (fsize_with_regs < 0x8000 || TARGET_68020)
1029 m68k_set_frame_related
1030 (emit_insn (gen_link (frame_pointer_rtx,
1031 GEN_INT (-4 - fsize_with_regs))));
1032 else
1033 {
1034 m68k_set_frame_related
1035 (emit_insn (gen_link (frame_pointer_rtx, GEN_INT (-4))));
1036 m68k_set_frame_related
1037 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1038 stack_pointer_rtx,
1039 GEN_INT (-fsize_with_regs))));
1040 }
1041
1042 /* If the frame pointer is needed, emit a special barrier that
1043 will prevent the scheduler from moving stores to the frame
1044 before the stack adjustment. */
1045 emit_insn (gen_stack_tie (stack_pointer_rtx, frame_pointer_rtx));
1046 }
1047 else if (fsize_with_regs != 0)
1048 m68k_set_frame_related
1049 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1050 stack_pointer_rtx,
1051 GEN_INT (-fsize_with_regs))));
1052
1053 if (current_frame.fpu_mask)
1054 {
1055 gcc_assert (current_frame.fpu_no >= MIN_FMOVEM_REGS);
1056 if (TARGET_68881)
1057 m68k_set_frame_related
1058 (m68k_emit_movem (stack_pointer_rtx,
1059 current_frame.fpu_no * -GET_MODE_SIZE (XFmode),
1060 current_frame.fpu_no, FP0_REG,
1061 current_frame.fpu_mask, true, true));
1062 else
1063 {
1064 int offset;
1065
1066 /* If we're using moveml to save the integer registers,
1067 the stack pointer will point to the bottom of the moveml
1068 save area. Find the stack offset of the first FP register. */
1069 if (current_frame.reg_no < MIN_MOVEM_REGS)
1070 offset = 0;
1071 else
1072 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1073 m68k_set_frame_related
1074 (m68k_emit_movem (stack_pointer_rtx, offset,
1075 current_frame.fpu_no, FP0_REG,
1076 current_frame.fpu_mask, true, false));
1077 }
1078 }
1079
1080 /* If the stack limit is not a symbol, check it here.
1081 This has the disadvantage that it may be too late... */
1082 if (crtl->limit_stack)
1083 {
1084 if (REG_P (stack_limit_rtx))
1085 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode, stack_pointer_rtx,
1086 stack_limit_rtx),
1087 stack_pointer_rtx, stack_limit_rtx,
1088 const1_rtx));
1089
1090 else if (GET_CODE (stack_limit_rtx) != SYMBOL_REF)
1091 warning (0, "stack limit expression is not supported");
1092 }
1093
1094 if (current_frame.reg_no < MIN_MOVEM_REGS)
1095 {
1096 /* Store each register separately in the same order moveml does. */
1097 int i;
1098
1099 for (i = 16; i-- > 0; )
1100 if (current_frame.reg_mask & (1 << i))
1101 {
1102 src = gen_rtx_REG (SImode, D0_REG + i);
1103 dest = gen_frame_mem (SImode,
1104 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1105 m68k_set_frame_related (emit_insn (gen_movsi (dest, src)));
1106 }
1107 }
1108 else
1109 {
1110 if (TARGET_COLDFIRE)
1111 /* The required register save space has already been allocated.
1112 The first register should be stored at (%sp). */
1113 m68k_set_frame_related
1114 (m68k_emit_movem (stack_pointer_rtx, 0,
1115 current_frame.reg_no, D0_REG,
1116 current_frame.reg_mask, true, false));
1117 else
1118 m68k_set_frame_related
1119 (m68k_emit_movem (stack_pointer_rtx,
1120 current_frame.reg_no * -GET_MODE_SIZE (SImode),
1121 current_frame.reg_no, D0_REG,
1122 current_frame.reg_mask, true, true));
1123 }
1124
1125 if (!TARGET_SEP_DATA
1126 && crtl->uses_pic_offset_table)
1127 emit_insn (gen_load_got (pic_offset_table_rtx));
1128 }
1129 \f
1130 /* Return true if a simple (return) instruction is sufficient for this
1131 instruction (i.e. if no epilogue is needed). */
1132
1133 bool
1134 m68k_use_return_insn (void)
1135 {
1136 if (!reload_completed || frame_pointer_needed || get_frame_size () != 0)
1137 return false;
1138
1139 m68k_compute_frame_layout ();
1140 return current_frame.offset == 0;
1141 }
1142
1143 /* Emit RTL for the "epilogue" or "sibcall_epilogue" define_expand;
1144 SIBCALL_P says which.
1145
1146 The function epilogue should not depend on the current stack pointer!
1147 It should use the frame pointer only, if there is a frame pointer.
1148 This is mandatory because of alloca; we also take advantage of it to
1149 omit stack adjustments before returning. */
1150
1151 void
1152 m68k_expand_epilogue (bool sibcall_p)
1153 {
1154 HOST_WIDE_INT fsize, fsize_with_regs;
1155 bool big, restore_from_sp;
1156
1157 m68k_compute_frame_layout ();
1158
1159 fsize = current_frame.size;
1160 big = false;
1161 restore_from_sp = false;
1162
1163 /* FIXME : current_function_is_leaf below is too strong.
1164 What we really need to know there is if there could be pending
1165 stack adjustment needed at that point. */
1166 restore_from_sp = (!frame_pointer_needed
1167 || (!cfun->calls_alloca
1168 && current_function_is_leaf));
1169
1170 /* fsize_with_regs is the size we need to adjust the sp when
1171 popping the frame. */
1172 fsize_with_regs = fsize;
1173 if (TARGET_COLDFIRE && restore_from_sp)
1174 {
1175 /* ColdFire's move multiple instructions do not allow post-increment
1176 addressing. Add the size of movem loads to the final deallocation
1177 instead. */
1178 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1179 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1180 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1181 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
1182 }
1183
1184 if (current_frame.offset + fsize >= 0x8000
1185 && !restore_from_sp
1186 && (current_frame.reg_mask || current_frame.fpu_mask))
1187 {
1188 if (TARGET_COLDFIRE
1189 && (current_frame.reg_no >= MIN_MOVEM_REGS
1190 || current_frame.fpu_no >= MIN_FMOVEM_REGS))
1191 {
1192 /* ColdFire's move multiple instructions do not support the
1193 (d8,Ax,Xi) addressing mode, so we're as well using a normal
1194 stack-based restore. */
1195 emit_move_insn (gen_rtx_REG (Pmode, A1_REG),
1196 GEN_INT (-(current_frame.offset + fsize)));
1197 emit_insn (gen_addsi3 (stack_pointer_rtx,
1198 gen_rtx_REG (Pmode, A1_REG),
1199 frame_pointer_rtx));
1200 restore_from_sp = true;
1201 }
1202 else
1203 {
1204 emit_move_insn (gen_rtx_REG (Pmode, A1_REG), GEN_INT (-fsize));
1205 fsize = 0;
1206 big = true;
1207 }
1208 }
1209
1210 if (current_frame.reg_no < MIN_MOVEM_REGS)
1211 {
1212 /* Restore each register separately in the same order moveml does. */
1213 int i;
1214 HOST_WIDE_INT offset;
1215
1216 offset = current_frame.offset + fsize;
1217 for (i = 0; i < 16; i++)
1218 if (current_frame.reg_mask & (1 << i))
1219 {
1220 rtx addr;
1221
1222 if (big)
1223 {
1224 /* Generate the address -OFFSET(%fp,%a1.l). */
1225 addr = gen_rtx_REG (Pmode, A1_REG);
1226 addr = gen_rtx_PLUS (Pmode, addr, frame_pointer_rtx);
1227 addr = plus_constant (addr, -offset);
1228 }
1229 else if (restore_from_sp)
1230 addr = gen_rtx_POST_INC (Pmode, stack_pointer_rtx);
1231 else
1232 addr = plus_constant (frame_pointer_rtx, -offset);
1233 emit_move_insn (gen_rtx_REG (SImode, D0_REG + i),
1234 gen_frame_mem (SImode, addr));
1235 offset -= GET_MODE_SIZE (SImode);
1236 }
1237 }
1238 else if (current_frame.reg_mask)
1239 {
1240 if (big)
1241 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1242 gen_rtx_REG (Pmode, A1_REG),
1243 frame_pointer_rtx),
1244 -(current_frame.offset + fsize),
1245 current_frame.reg_no, D0_REG,
1246 current_frame.reg_mask, false, false);
1247 else if (restore_from_sp)
1248 m68k_emit_movem (stack_pointer_rtx, 0,
1249 current_frame.reg_no, D0_REG,
1250 current_frame.reg_mask, false,
1251 !TARGET_COLDFIRE);
1252 else
1253 m68k_emit_movem (frame_pointer_rtx,
1254 -(current_frame.offset + fsize),
1255 current_frame.reg_no, D0_REG,
1256 current_frame.reg_mask, false, false);
1257 }
1258
1259 if (current_frame.fpu_no > 0)
1260 {
1261 if (big)
1262 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1263 gen_rtx_REG (Pmode, A1_REG),
1264 frame_pointer_rtx),
1265 -(current_frame.foffset + fsize),
1266 current_frame.fpu_no, FP0_REG,
1267 current_frame.fpu_mask, false, false);
1268 else if (restore_from_sp)
1269 {
1270 if (TARGET_COLDFIRE)
1271 {
1272 int offset;
1273
1274 /* If we used moveml to restore the integer registers, the
1275 stack pointer will still point to the bottom of the moveml
1276 save area. Find the stack offset of the first FP
1277 register. */
1278 if (current_frame.reg_no < MIN_MOVEM_REGS)
1279 offset = 0;
1280 else
1281 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1282 m68k_emit_movem (stack_pointer_rtx, offset,
1283 current_frame.fpu_no, FP0_REG,
1284 current_frame.fpu_mask, false, false);
1285 }
1286 else
1287 m68k_emit_movem (stack_pointer_rtx, 0,
1288 current_frame.fpu_no, FP0_REG,
1289 current_frame.fpu_mask, false, true);
1290 }
1291 else
1292 m68k_emit_movem (frame_pointer_rtx,
1293 -(current_frame.foffset + fsize),
1294 current_frame.fpu_no, FP0_REG,
1295 current_frame.fpu_mask, false, false);
1296 }
1297
1298 if (frame_pointer_needed)
1299 emit_insn (gen_unlink (frame_pointer_rtx));
1300 else if (fsize_with_regs)
1301 emit_insn (gen_addsi3 (stack_pointer_rtx,
1302 stack_pointer_rtx,
1303 GEN_INT (fsize_with_regs)));
1304
1305 if (crtl->calls_eh_return)
1306 emit_insn (gen_addsi3 (stack_pointer_rtx,
1307 stack_pointer_rtx,
1308 EH_RETURN_STACKADJ_RTX));
1309
1310 if (!sibcall_p)
1311 emit_jump_insn (ret_rtx);
1312 }
1313 \f
1314 /* Return true if X is a valid comparison operator for the dbcc
1315 instruction.
1316
1317 Note it rejects floating point comparison operators.
1318 (In the future we could use Fdbcc).
1319
1320 It also rejects some comparisons when CC_NO_OVERFLOW is set. */
1321
1322 int
1323 valid_dbcc_comparison_p_2 (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED)
1324 {
1325 switch (GET_CODE (x))
1326 {
1327 case EQ: case NE: case GTU: case LTU:
1328 case GEU: case LEU:
1329 return 1;
1330
1331 /* Reject some when CC_NO_OVERFLOW is set. This may be over
1332 conservative */
1333 case GT: case LT: case GE: case LE:
1334 return ! (cc_prev_status.flags & CC_NO_OVERFLOW);
1335 default:
1336 return 0;
1337 }
1338 }
1339
1340 /* Return nonzero if flags are currently in the 68881 flag register. */
1341 int
1342 flags_in_68881 (void)
1343 {
1344 /* We could add support for these in the future */
1345 return cc_status.flags & CC_IN_68881;
1346 }
1347
1348 /* Return true if PARALLEL contains register REGNO. */
1349 static bool
1350 m68k_reg_present_p (const_rtx parallel, unsigned int regno)
1351 {
1352 int i;
1353
1354 if (REG_P (parallel) && REGNO (parallel) == regno)
1355 return true;
1356
1357 if (GET_CODE (parallel) != PARALLEL)
1358 return false;
1359
1360 for (i = 0; i < XVECLEN (parallel, 0); ++i)
1361 {
1362 const_rtx x;
1363
1364 x = XEXP (XVECEXP (parallel, 0, i), 0);
1365 if (REG_P (x) && REGNO (x) == regno)
1366 return true;
1367 }
1368
1369 return false;
1370 }
1371
1372 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL_P. */
1373
1374 static bool
1375 m68k_ok_for_sibcall_p (tree decl, tree exp)
1376 {
1377 enum m68k_function_kind kind;
1378
1379 /* We cannot use sibcalls for nested functions because we use the
1380 static chain register for indirect calls. */
1381 if (CALL_EXPR_STATIC_CHAIN (exp))
1382 return false;
1383
1384 if (!VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
1385 {
1386 /* Check that the return value locations are the same. For
1387 example that we aren't returning a value from the sibling in
1388 a D0 register but then need to transfer it to a A0 register. */
1389 rtx cfun_value;
1390 rtx call_value;
1391
1392 cfun_value = FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (cfun->decl)),
1393 cfun->decl);
1394 call_value = FUNCTION_VALUE (TREE_TYPE (exp), decl);
1395
1396 /* Check that the values are equal or that the result the callee
1397 function returns is superset of what the current function returns. */
1398 if (!(rtx_equal_p (cfun_value, call_value)
1399 || (REG_P (cfun_value)
1400 && m68k_reg_present_p (call_value, REGNO (cfun_value)))))
1401 return false;
1402 }
1403
1404 kind = m68k_get_function_kind (current_function_decl);
1405 if (kind == m68k_fk_normal_function)
1406 /* We can always sibcall from a normal function, because it's
1407 undefined if it is calling an interrupt function. */
1408 return true;
1409
1410 /* Otherwise we can only sibcall if the function kind is known to be
1411 the same. */
1412 if (decl && m68k_get_function_kind (decl) == kind)
1413 return true;
1414
1415 return false;
1416 }
1417
1418 /* On the m68k all args are always pushed. */
1419
1420 static rtx
1421 m68k_function_arg (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
1422 enum machine_mode mode ATTRIBUTE_UNUSED,
1423 const_tree type ATTRIBUTE_UNUSED,
1424 bool named ATTRIBUTE_UNUSED)
1425 {
1426 return NULL_RTX;
1427 }
1428
1429 static void
1430 m68k_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
1431 const_tree type, bool named ATTRIBUTE_UNUSED)
1432 {
1433 *cum += (mode != BLKmode
1434 ? (GET_MODE_SIZE (mode) + 3) & ~3
1435 : (int_size_in_bytes (type) + 3) & ~3);
1436 }
1437
1438 /* Convert X to a legitimate function call memory reference and return the
1439 result. */
1440
1441 rtx
1442 m68k_legitimize_call_address (rtx x)
1443 {
1444 gcc_assert (MEM_P (x));
1445 if (call_operand (XEXP (x, 0), VOIDmode))
1446 return x;
1447 return replace_equiv_address (x, force_reg (Pmode, XEXP (x, 0)));
1448 }
1449
1450 /* Likewise for sibling calls. */
1451
1452 rtx
1453 m68k_legitimize_sibcall_address (rtx x)
1454 {
1455 gcc_assert (MEM_P (x));
1456 if (sibcall_operand (XEXP (x, 0), VOIDmode))
1457 return x;
1458
1459 emit_move_insn (gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM), XEXP (x, 0));
1460 return replace_equiv_address (x, gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM));
1461 }
1462
1463 /* Convert X to a legitimate address and return it if successful. Otherwise
1464 return X.
1465
1466 For the 68000, we handle X+REG by loading X into a register R and
1467 using R+REG. R will go in an address reg and indexing will be used.
1468 However, if REG is a broken-out memory address or multiplication,
1469 nothing needs to be done because REG can certainly go in an address reg. */
1470
1471 static rtx
1472 m68k_legitimize_address (rtx x, rtx oldx, enum machine_mode mode)
1473 {
1474 if (m68k_tls_symbol_p (x))
1475 return m68k_legitimize_tls_address (x);
1476
1477 if (GET_CODE (x) == PLUS)
1478 {
1479 int ch = (x) != (oldx);
1480 int copied = 0;
1481
1482 #define COPY_ONCE(Y) if (!copied) { Y = copy_rtx (Y); copied = ch = 1; }
1483
1484 if (GET_CODE (XEXP (x, 0)) == MULT)
1485 {
1486 COPY_ONCE (x);
1487 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
1488 }
1489 if (GET_CODE (XEXP (x, 1)) == MULT)
1490 {
1491 COPY_ONCE (x);
1492 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
1493 }
1494 if (ch)
1495 {
1496 if (GET_CODE (XEXP (x, 1)) == REG
1497 && GET_CODE (XEXP (x, 0)) == REG)
1498 {
1499 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
1500 {
1501 COPY_ONCE (x);
1502 x = force_operand (x, 0);
1503 }
1504 return x;
1505 }
1506 if (memory_address_p (mode, x))
1507 return x;
1508 }
1509 if (GET_CODE (XEXP (x, 0)) == REG
1510 || (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
1511 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1512 && GET_MODE (XEXP (XEXP (x, 0), 0)) == HImode))
1513 {
1514 rtx temp = gen_reg_rtx (Pmode);
1515 rtx val = force_operand (XEXP (x, 1), 0);
1516 emit_move_insn (temp, val);
1517 COPY_ONCE (x);
1518 XEXP (x, 1) = temp;
1519 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1520 && GET_CODE (XEXP (x, 0)) == REG)
1521 x = force_operand (x, 0);
1522 }
1523 else if (GET_CODE (XEXP (x, 1)) == REG
1524 || (GET_CODE (XEXP (x, 1)) == SIGN_EXTEND
1525 && GET_CODE (XEXP (XEXP (x, 1), 0)) == REG
1526 && GET_MODE (XEXP (XEXP (x, 1), 0)) == HImode))
1527 {
1528 rtx temp = gen_reg_rtx (Pmode);
1529 rtx val = force_operand (XEXP (x, 0), 0);
1530 emit_move_insn (temp, val);
1531 COPY_ONCE (x);
1532 XEXP (x, 0) = temp;
1533 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1534 && GET_CODE (XEXP (x, 1)) == REG)
1535 x = force_operand (x, 0);
1536 }
1537 }
1538
1539 return x;
1540 }
1541
1542
1543 /* Output a dbCC; jCC sequence. Note we do not handle the
1544 floating point version of this sequence (Fdbcc). We also
1545 do not handle alternative conditions when CC_NO_OVERFLOW is
1546 set. It is assumed that valid_dbcc_comparison_p and flags_in_68881 will
1547 kick those out before we get here. */
1548
1549 void
1550 output_dbcc_and_branch (rtx *operands)
1551 {
1552 switch (GET_CODE (operands[3]))
1553 {
1554 case EQ:
1555 output_asm_insn ("dbeq %0,%l1\n\tjeq %l2", operands);
1556 break;
1557
1558 case NE:
1559 output_asm_insn ("dbne %0,%l1\n\tjne %l2", operands);
1560 break;
1561
1562 case GT:
1563 output_asm_insn ("dbgt %0,%l1\n\tjgt %l2", operands);
1564 break;
1565
1566 case GTU:
1567 output_asm_insn ("dbhi %0,%l1\n\tjhi %l2", operands);
1568 break;
1569
1570 case LT:
1571 output_asm_insn ("dblt %0,%l1\n\tjlt %l2", operands);
1572 break;
1573
1574 case LTU:
1575 output_asm_insn ("dbcs %0,%l1\n\tjcs %l2", operands);
1576 break;
1577
1578 case GE:
1579 output_asm_insn ("dbge %0,%l1\n\tjge %l2", operands);
1580 break;
1581
1582 case GEU:
1583 output_asm_insn ("dbcc %0,%l1\n\tjcc %l2", operands);
1584 break;
1585
1586 case LE:
1587 output_asm_insn ("dble %0,%l1\n\tjle %l2", operands);
1588 break;
1589
1590 case LEU:
1591 output_asm_insn ("dbls %0,%l1\n\tjls %l2", operands);
1592 break;
1593
1594 default:
1595 gcc_unreachable ();
1596 }
1597
1598 /* If the decrement is to be done in SImode, then we have
1599 to compensate for the fact that dbcc decrements in HImode. */
1600 switch (GET_MODE (operands[0]))
1601 {
1602 case SImode:
1603 output_asm_insn ("clr%.w %0\n\tsubq%.l #1,%0\n\tjpl %l1", operands);
1604 break;
1605
1606 case HImode:
1607 break;
1608
1609 default:
1610 gcc_unreachable ();
1611 }
1612 }
1613
1614 const char *
1615 output_scc_di (rtx op, rtx operand1, rtx operand2, rtx dest)
1616 {
1617 rtx loperands[7];
1618 enum rtx_code op_code = GET_CODE (op);
1619
1620 /* This does not produce a useful cc. */
1621 CC_STATUS_INIT;
1622
1623 /* The m68k cmp.l instruction requires operand1 to be a reg as used
1624 below. Swap the operands and change the op if these requirements
1625 are not fulfilled. */
1626 if (GET_CODE (operand2) == REG && GET_CODE (operand1) != REG)
1627 {
1628 rtx tmp = operand1;
1629
1630 operand1 = operand2;
1631 operand2 = tmp;
1632 op_code = swap_condition (op_code);
1633 }
1634 loperands[0] = operand1;
1635 if (GET_CODE (operand1) == REG)
1636 loperands[1] = gen_rtx_REG (SImode, REGNO (operand1) + 1);
1637 else
1638 loperands[1] = adjust_address (operand1, SImode, 4);
1639 if (operand2 != const0_rtx)
1640 {
1641 loperands[2] = operand2;
1642 if (GET_CODE (operand2) == REG)
1643 loperands[3] = gen_rtx_REG (SImode, REGNO (operand2) + 1);
1644 else
1645 loperands[3] = adjust_address (operand2, SImode, 4);
1646 }
1647 loperands[4] = gen_label_rtx ();
1648 if (operand2 != const0_rtx)
1649 output_asm_insn ("cmp%.l %2,%0\n\tjne %l4\n\tcmp%.l %3,%1", loperands);
1650 else
1651 {
1652 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[0]))
1653 output_asm_insn ("tst%.l %0", loperands);
1654 else
1655 output_asm_insn ("cmp%.w #0,%0", loperands);
1656
1657 output_asm_insn ("jne %l4", loperands);
1658
1659 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[1]))
1660 output_asm_insn ("tst%.l %1", loperands);
1661 else
1662 output_asm_insn ("cmp%.w #0,%1", loperands);
1663 }
1664
1665 loperands[5] = dest;
1666
1667 switch (op_code)
1668 {
1669 case EQ:
1670 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1671 CODE_LABEL_NUMBER (loperands[4]));
1672 output_asm_insn ("seq %5", loperands);
1673 break;
1674
1675 case NE:
1676 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1677 CODE_LABEL_NUMBER (loperands[4]));
1678 output_asm_insn ("sne %5", loperands);
1679 break;
1680
1681 case GT:
1682 loperands[6] = gen_label_rtx ();
1683 output_asm_insn ("shi %5\n\tjra %l6", loperands);
1684 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1685 CODE_LABEL_NUMBER (loperands[4]));
1686 output_asm_insn ("sgt %5", loperands);
1687 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1688 CODE_LABEL_NUMBER (loperands[6]));
1689 break;
1690
1691 case GTU:
1692 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1693 CODE_LABEL_NUMBER (loperands[4]));
1694 output_asm_insn ("shi %5", loperands);
1695 break;
1696
1697 case LT:
1698 loperands[6] = gen_label_rtx ();
1699 output_asm_insn ("scs %5\n\tjra %l6", loperands);
1700 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1701 CODE_LABEL_NUMBER (loperands[4]));
1702 output_asm_insn ("slt %5", loperands);
1703 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1704 CODE_LABEL_NUMBER (loperands[6]));
1705 break;
1706
1707 case LTU:
1708 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1709 CODE_LABEL_NUMBER (loperands[4]));
1710 output_asm_insn ("scs %5", loperands);
1711 break;
1712
1713 case GE:
1714 loperands[6] = gen_label_rtx ();
1715 output_asm_insn ("scc %5\n\tjra %l6", loperands);
1716 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1717 CODE_LABEL_NUMBER (loperands[4]));
1718 output_asm_insn ("sge %5", loperands);
1719 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1720 CODE_LABEL_NUMBER (loperands[6]));
1721 break;
1722
1723 case GEU:
1724 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1725 CODE_LABEL_NUMBER (loperands[4]));
1726 output_asm_insn ("scc %5", loperands);
1727 break;
1728
1729 case LE:
1730 loperands[6] = gen_label_rtx ();
1731 output_asm_insn ("sls %5\n\tjra %l6", loperands);
1732 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1733 CODE_LABEL_NUMBER (loperands[4]));
1734 output_asm_insn ("sle %5", loperands);
1735 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1736 CODE_LABEL_NUMBER (loperands[6]));
1737 break;
1738
1739 case LEU:
1740 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1741 CODE_LABEL_NUMBER (loperands[4]));
1742 output_asm_insn ("sls %5", loperands);
1743 break;
1744
1745 default:
1746 gcc_unreachable ();
1747 }
1748 return "";
1749 }
1750
1751 const char *
1752 output_btst (rtx *operands, rtx countop, rtx dataop, rtx insn, int signpos)
1753 {
1754 operands[0] = countop;
1755 operands[1] = dataop;
1756
1757 if (GET_CODE (countop) == CONST_INT)
1758 {
1759 register int count = INTVAL (countop);
1760 /* If COUNT is bigger than size of storage unit in use,
1761 advance to the containing unit of same size. */
1762 if (count > signpos)
1763 {
1764 int offset = (count & ~signpos) / 8;
1765 count = count & signpos;
1766 operands[1] = dataop = adjust_address (dataop, QImode, offset);
1767 }
1768 if (count == signpos)
1769 cc_status.flags = CC_NOT_POSITIVE | CC_Z_IN_NOT_N;
1770 else
1771 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N;
1772
1773 /* These three statements used to use next_insns_test_no...
1774 but it appears that this should do the same job. */
1775 if (count == 31
1776 && next_insn_tests_no_inequality (insn))
1777 return "tst%.l %1";
1778 if (count == 15
1779 && next_insn_tests_no_inequality (insn))
1780 return "tst%.w %1";
1781 if (count == 7
1782 && next_insn_tests_no_inequality (insn))
1783 return "tst%.b %1";
1784 /* Try to use `movew to ccr' followed by the appropriate branch insn.
1785 On some m68k variants unfortunately that's slower than btst.
1786 On 68000 and higher, that should also work for all HImode operands. */
1787 if (TUNE_CPU32 || TARGET_COLDFIRE || optimize_size)
1788 {
1789 if (count == 3 && DATA_REG_P (operands[1])
1790 && next_insn_tests_no_inequality (insn))
1791 {
1792 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N | CC_NO_OVERFLOW;
1793 return "move%.w %1,%%ccr";
1794 }
1795 if (count == 2 && DATA_REG_P (operands[1])
1796 && next_insn_tests_no_inequality (insn))
1797 {
1798 cc_status.flags = CC_NOT_NEGATIVE | CC_INVERTED | CC_NO_OVERFLOW;
1799 return "move%.w %1,%%ccr";
1800 }
1801 /* count == 1 followed by bvc/bvs and
1802 count == 0 followed by bcc/bcs are also possible, but need
1803 m68k-specific CC_Z_IN_NOT_V and CC_Z_IN_NOT_C flags. */
1804 }
1805
1806 cc_status.flags = CC_NOT_NEGATIVE;
1807 }
1808 return "btst %0,%1";
1809 }
1810 \f
1811 /* Return true if X is a legitimate base register. STRICT_P says
1812 whether we need strict checking. */
1813
1814 bool
1815 m68k_legitimate_base_reg_p (rtx x, bool strict_p)
1816 {
1817 /* Allow SUBREG everywhere we allow REG. This results in better code. */
1818 if (!strict_p && GET_CODE (x) == SUBREG)
1819 x = SUBREG_REG (x);
1820
1821 return (REG_P (x)
1822 && (strict_p
1823 ? REGNO_OK_FOR_BASE_P (REGNO (x))
1824 : REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x))));
1825 }
1826
1827 /* Return true if X is a legitimate index register. STRICT_P says
1828 whether we need strict checking. */
1829
1830 bool
1831 m68k_legitimate_index_reg_p (rtx x, bool strict_p)
1832 {
1833 if (!strict_p && GET_CODE (x) == SUBREG)
1834 x = SUBREG_REG (x);
1835
1836 return (REG_P (x)
1837 && (strict_p
1838 ? REGNO_OK_FOR_INDEX_P (REGNO (x))
1839 : REGNO_OK_FOR_INDEX_NONSTRICT_P (REGNO (x))));
1840 }
1841
1842 /* Return true if X is a legitimate index expression for a (d8,An,Xn) or
1843 (bd,An,Xn) addressing mode. Fill in the INDEX and SCALE fields of
1844 ADDRESS if so. STRICT_P says whether we need strict checking. */
1845
1846 static bool
1847 m68k_decompose_index (rtx x, bool strict_p, struct m68k_address *address)
1848 {
1849 int scale;
1850
1851 /* Check for a scale factor. */
1852 scale = 1;
1853 if ((TARGET_68020 || TARGET_COLDFIRE)
1854 && GET_CODE (x) == MULT
1855 && GET_CODE (XEXP (x, 1)) == CONST_INT
1856 && (INTVAL (XEXP (x, 1)) == 2
1857 || INTVAL (XEXP (x, 1)) == 4
1858 || (INTVAL (XEXP (x, 1)) == 8
1859 && (TARGET_COLDFIRE_FPU || !TARGET_COLDFIRE))))
1860 {
1861 scale = INTVAL (XEXP (x, 1));
1862 x = XEXP (x, 0);
1863 }
1864
1865 /* Check for a word extension. */
1866 if (!TARGET_COLDFIRE
1867 && GET_CODE (x) == SIGN_EXTEND
1868 && GET_MODE (XEXP (x, 0)) == HImode)
1869 x = XEXP (x, 0);
1870
1871 if (m68k_legitimate_index_reg_p (x, strict_p))
1872 {
1873 address->scale = scale;
1874 address->index = x;
1875 return true;
1876 }
1877
1878 return false;
1879 }
1880
1881 /* Return true if X is an illegitimate symbolic constant. */
1882
1883 bool
1884 m68k_illegitimate_symbolic_constant_p (rtx x)
1885 {
1886 rtx base, offset;
1887
1888 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P)
1889 {
1890 split_const (x, &base, &offset);
1891 if (GET_CODE (base) == SYMBOL_REF
1892 && !offset_within_block_p (base, INTVAL (offset)))
1893 return true;
1894 }
1895 return m68k_tls_reference_p (x, false);
1896 }
1897
1898 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1899
1900 static bool
1901 m68k_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1902 {
1903 return m68k_illegitimate_symbolic_constant_p (x);
1904 }
1905
1906 /* Return true if X is a legitimate constant address that can reach
1907 bytes in the range [X, X + REACH). STRICT_P says whether we need
1908 strict checking. */
1909
1910 static bool
1911 m68k_legitimate_constant_address_p (rtx x, unsigned int reach, bool strict_p)
1912 {
1913 rtx base, offset;
1914
1915 if (!CONSTANT_ADDRESS_P (x))
1916 return false;
1917
1918 if (flag_pic
1919 && !(strict_p && TARGET_PCREL)
1920 && symbolic_operand (x, VOIDmode))
1921 return false;
1922
1923 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P && reach > 1)
1924 {
1925 split_const (x, &base, &offset);
1926 if (GET_CODE (base) == SYMBOL_REF
1927 && !offset_within_block_p (base, INTVAL (offset) + reach - 1))
1928 return false;
1929 }
1930
1931 return !m68k_tls_reference_p (x, false);
1932 }
1933
1934 /* Return true if X is a LABEL_REF for a jump table. Assume that unplaced
1935 labels will become jump tables. */
1936
1937 static bool
1938 m68k_jump_table_ref_p (rtx x)
1939 {
1940 if (GET_CODE (x) != LABEL_REF)
1941 return false;
1942
1943 x = XEXP (x, 0);
1944 if (!NEXT_INSN (x) && !PREV_INSN (x))
1945 return true;
1946
1947 x = next_nonnote_insn (x);
1948 return x && JUMP_TABLE_DATA_P (x);
1949 }
1950
1951 /* Return true if X is a legitimate address for values of mode MODE.
1952 STRICT_P says whether strict checking is needed. If the address
1953 is valid, describe its components in *ADDRESS. */
1954
1955 static bool
1956 m68k_decompose_address (enum machine_mode mode, rtx x,
1957 bool strict_p, struct m68k_address *address)
1958 {
1959 unsigned int reach;
1960
1961 memset (address, 0, sizeof (*address));
1962
1963 if (mode == BLKmode)
1964 reach = 1;
1965 else
1966 reach = GET_MODE_SIZE (mode);
1967
1968 /* Check for (An) (mode 2). */
1969 if (m68k_legitimate_base_reg_p (x, strict_p))
1970 {
1971 address->base = x;
1972 return true;
1973 }
1974
1975 /* Check for -(An) and (An)+ (modes 3 and 4). */
1976 if ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_INC)
1977 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
1978 {
1979 address->code = GET_CODE (x);
1980 address->base = XEXP (x, 0);
1981 return true;
1982 }
1983
1984 /* Check for (d16,An) (mode 5). */
1985 if (GET_CODE (x) == PLUS
1986 && GET_CODE (XEXP (x, 1)) == CONST_INT
1987 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x8000, 0x8000 - reach)
1988 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
1989 {
1990 address->base = XEXP (x, 0);
1991 address->offset = XEXP (x, 1);
1992 return true;
1993 }
1994
1995 /* Check for GOT loads. These are (bd,An,Xn) addresses if
1996 TARGET_68020 && flag_pic == 2, otherwise they are (d16,An)
1997 addresses. */
1998 if (GET_CODE (x) == PLUS
1999 && XEXP (x, 0) == pic_offset_table_rtx)
2000 {
2001 /* As we are processing a PLUS, do not unwrap RELOC32 symbols --
2002 they are invalid in this context. */
2003 if (m68k_unwrap_symbol (XEXP (x, 1), false) != XEXP (x, 1))
2004 {
2005 address->base = XEXP (x, 0);
2006 address->offset = XEXP (x, 1);
2007 return true;
2008 }
2009 }
2010
2011 /* The ColdFire FPU only accepts addressing modes 2-5. */
2012 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
2013 return false;
2014
2015 /* Check for (xxx).w and (xxx).l. Also, in the TARGET_PCREL case,
2016 check for (d16,PC) or (bd,PC,Xn) with a suppressed index register.
2017 All these modes are variations of mode 7. */
2018 if (m68k_legitimate_constant_address_p (x, reach, strict_p))
2019 {
2020 address->offset = x;
2021 return true;
2022 }
2023
2024 /* Check for (d8,PC,Xn), a mode 7 form. This case is needed for
2025 tablejumps.
2026
2027 ??? do_tablejump creates these addresses before placing the target
2028 label, so we have to assume that unplaced labels are jump table
2029 references. It seems unlikely that we would ever generate indexed
2030 accesses to unplaced labels in other cases. */
2031 if (GET_CODE (x) == PLUS
2032 && m68k_jump_table_ref_p (XEXP (x, 1))
2033 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2034 {
2035 address->offset = XEXP (x, 1);
2036 return true;
2037 }
2038
2039 /* Everything hereafter deals with (d8,An,Xn.SIZE*SCALE) or
2040 (bd,An,Xn.SIZE*SCALE) addresses. */
2041
2042 if (TARGET_68020)
2043 {
2044 /* Check for a nonzero base displacement. */
2045 if (GET_CODE (x) == PLUS
2046 && m68k_legitimate_constant_address_p (XEXP (x, 1), reach, strict_p))
2047 {
2048 address->offset = XEXP (x, 1);
2049 x = XEXP (x, 0);
2050 }
2051
2052 /* Check for a suppressed index register. */
2053 if (m68k_legitimate_base_reg_p (x, strict_p))
2054 {
2055 address->base = x;
2056 return true;
2057 }
2058
2059 /* Check for a suppressed base register. Do not allow this case
2060 for non-symbolic offsets as it effectively gives gcc freedom
2061 to treat data registers as base registers, which can generate
2062 worse code. */
2063 if (address->offset
2064 && symbolic_operand (address->offset, VOIDmode)
2065 && m68k_decompose_index (x, strict_p, address))
2066 return true;
2067 }
2068 else
2069 {
2070 /* Check for a nonzero base displacement. */
2071 if (GET_CODE (x) == PLUS
2072 && GET_CODE (XEXP (x, 1)) == CONST_INT
2073 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x80, 0x80 - reach))
2074 {
2075 address->offset = XEXP (x, 1);
2076 x = XEXP (x, 0);
2077 }
2078 }
2079
2080 /* We now expect the sum of a base and an index. */
2081 if (GET_CODE (x) == PLUS)
2082 {
2083 if (m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p)
2084 && m68k_decompose_index (XEXP (x, 1), strict_p, address))
2085 {
2086 address->base = XEXP (x, 0);
2087 return true;
2088 }
2089
2090 if (m68k_legitimate_base_reg_p (XEXP (x, 1), strict_p)
2091 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2092 {
2093 address->base = XEXP (x, 1);
2094 return true;
2095 }
2096 }
2097 return false;
2098 }
2099
2100 /* Return true if X is a legitimate address for values of mode MODE.
2101 STRICT_P says whether strict checking is needed. */
2102
2103 bool
2104 m68k_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p)
2105 {
2106 struct m68k_address address;
2107
2108 return m68k_decompose_address (mode, x, strict_p, &address);
2109 }
2110
2111 /* Return true if X is a memory, describing its address in ADDRESS if so.
2112 Apply strict checking if called during or after reload. */
2113
2114 static bool
2115 m68k_legitimate_mem_p (rtx x, struct m68k_address *address)
2116 {
2117 return (MEM_P (x)
2118 && m68k_decompose_address (GET_MODE (x), XEXP (x, 0),
2119 reload_in_progress || reload_completed,
2120 address));
2121 }
2122
2123 /* Implement TARGET_LEGITIMATE_CONSTANT_P. */
2124
2125 bool
2126 m68k_legitimate_constant_p (enum machine_mode mode, rtx x)
2127 {
2128 return mode != XFmode && !m68k_illegitimate_symbolic_constant_p (x);
2129 }
2130
2131 /* Return true if X matches the 'Q' constraint. It must be a memory
2132 with a base address and no constant offset or index. */
2133
2134 bool
2135 m68k_matches_q_p (rtx x)
2136 {
2137 struct m68k_address address;
2138
2139 return (m68k_legitimate_mem_p (x, &address)
2140 && address.code == UNKNOWN
2141 && address.base
2142 && !address.offset
2143 && !address.index);
2144 }
2145
2146 /* Return true if X matches the 'U' constraint. It must be a base address
2147 with a constant offset and no index. */
2148
2149 bool
2150 m68k_matches_u_p (rtx x)
2151 {
2152 struct m68k_address address;
2153
2154 return (m68k_legitimate_mem_p (x, &address)
2155 && address.code == UNKNOWN
2156 && address.base
2157 && address.offset
2158 && !address.index);
2159 }
2160
2161 /* Return GOT pointer. */
2162
2163 static rtx
2164 m68k_get_gp (void)
2165 {
2166 if (pic_offset_table_rtx == NULL_RTX)
2167 pic_offset_table_rtx = gen_rtx_REG (Pmode, PIC_REG);
2168
2169 crtl->uses_pic_offset_table = 1;
2170
2171 return pic_offset_table_rtx;
2172 }
2173
2174 /* M68K relocations, used to distinguish GOT and TLS relocations in UNSPEC
2175 wrappers. */
2176 enum m68k_reloc { RELOC_GOT, RELOC_TLSGD, RELOC_TLSLDM, RELOC_TLSLDO,
2177 RELOC_TLSIE, RELOC_TLSLE };
2178
2179 #define TLS_RELOC_P(RELOC) ((RELOC) != RELOC_GOT)
2180
2181 /* Wrap symbol X into unspec representing relocation RELOC.
2182 BASE_REG - register that should be added to the result.
2183 TEMP_REG - if non-null, temporary register. */
2184
2185 static rtx
2186 m68k_wrap_symbol (rtx x, enum m68k_reloc reloc, rtx base_reg, rtx temp_reg)
2187 {
2188 bool use_x_p;
2189
2190 use_x_p = (base_reg == pic_offset_table_rtx) ? TARGET_XGOT : TARGET_XTLS;
2191
2192 if (TARGET_COLDFIRE && use_x_p)
2193 /* When compiling with -mx{got, tls} switch the code will look like this:
2194
2195 move.l <X>@<RELOC>,<TEMP_REG>
2196 add.l <BASE_REG>,<TEMP_REG> */
2197 {
2198 /* Wrap X in UNSPEC_??? to tip m68k_output_addr_const_extra
2199 to put @RELOC after reference. */
2200 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2201 UNSPEC_RELOC32);
2202 x = gen_rtx_CONST (Pmode, x);
2203
2204 if (temp_reg == NULL)
2205 {
2206 gcc_assert (can_create_pseudo_p ());
2207 temp_reg = gen_reg_rtx (Pmode);
2208 }
2209
2210 emit_move_insn (temp_reg, x);
2211 emit_insn (gen_addsi3 (temp_reg, temp_reg, base_reg));
2212 x = temp_reg;
2213 }
2214 else
2215 {
2216 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2217 UNSPEC_RELOC16);
2218 x = gen_rtx_CONST (Pmode, x);
2219
2220 x = gen_rtx_PLUS (Pmode, base_reg, x);
2221 }
2222
2223 return x;
2224 }
2225
2226 /* Helper for m68k_unwrap_symbol.
2227 Also, if unwrapping was successful (that is if (ORIG != <return value>)),
2228 sets *RELOC_PTR to relocation type for the symbol. */
2229
2230 static rtx
2231 m68k_unwrap_symbol_1 (rtx orig, bool unwrap_reloc32_p,
2232 enum m68k_reloc *reloc_ptr)
2233 {
2234 if (GET_CODE (orig) == CONST)
2235 {
2236 rtx x;
2237 enum m68k_reloc dummy;
2238
2239 x = XEXP (orig, 0);
2240
2241 if (reloc_ptr == NULL)
2242 reloc_ptr = &dummy;
2243
2244 /* Handle an addend. */
2245 if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS)
2246 && CONST_INT_P (XEXP (x, 1)))
2247 x = XEXP (x, 0);
2248
2249 if (GET_CODE (x) == UNSPEC)
2250 {
2251 switch (XINT (x, 1))
2252 {
2253 case UNSPEC_RELOC16:
2254 orig = XVECEXP (x, 0, 0);
2255 *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2256 break;
2257
2258 case UNSPEC_RELOC32:
2259 if (unwrap_reloc32_p)
2260 {
2261 orig = XVECEXP (x, 0, 0);
2262 *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2263 }
2264 break;
2265
2266 default:
2267 break;
2268 }
2269 }
2270 }
2271
2272 return orig;
2273 }
2274
2275 /* Unwrap symbol from UNSPEC_RELOC16 and, if unwrap_reloc32_p,
2276 UNSPEC_RELOC32 wrappers. */
2277
2278 rtx
2279 m68k_unwrap_symbol (rtx orig, bool unwrap_reloc32_p)
2280 {
2281 return m68k_unwrap_symbol_1 (orig, unwrap_reloc32_p, NULL);
2282 }
2283
2284 /* Helper for m68k_final_prescan_insn. */
2285
2286 static int
2287 m68k_final_prescan_insn_1 (rtx *x_ptr, void *data ATTRIBUTE_UNUSED)
2288 {
2289 rtx x = *x_ptr;
2290
2291 if (m68k_unwrap_symbol (x, true) != x)
2292 /* For rationale of the below, see comment in m68k_final_prescan_insn. */
2293 {
2294 rtx plus;
2295
2296 gcc_assert (GET_CODE (x) == CONST);
2297 plus = XEXP (x, 0);
2298
2299 if (GET_CODE (plus) == PLUS || GET_CODE (plus) == MINUS)
2300 {
2301 rtx unspec;
2302 rtx addend;
2303
2304 unspec = XEXP (plus, 0);
2305 gcc_assert (GET_CODE (unspec) == UNSPEC);
2306 addend = XEXP (plus, 1);
2307 gcc_assert (CONST_INT_P (addend));
2308
2309 /* We now have all the pieces, rearrange them. */
2310
2311 /* Move symbol to plus. */
2312 XEXP (plus, 0) = XVECEXP (unspec, 0, 0);
2313
2314 /* Move plus inside unspec. */
2315 XVECEXP (unspec, 0, 0) = plus;
2316
2317 /* Move unspec to top level of const. */
2318 XEXP (x, 0) = unspec;
2319 }
2320
2321 return -1;
2322 }
2323
2324 return 0;
2325 }
2326
2327 /* Prescan insn before outputing assembler for it. */
2328
2329 void
2330 m68k_final_prescan_insn (rtx insn ATTRIBUTE_UNUSED,
2331 rtx *operands, int n_operands)
2332 {
2333 int i;
2334
2335 /* Combine and, possibly, other optimizations may do good job
2336 converting
2337 (const (unspec [(symbol)]))
2338 into
2339 (const (plus (unspec [(symbol)])
2340 (const_int N))).
2341 The problem with this is emitting @TLS or @GOT decorations.
2342 The decoration is emitted when processing (unspec), so the
2343 result would be "#symbol@TLSLE+N" instead of "#symbol+N@TLSLE".
2344
2345 It seems that the easiest solution to this is to convert such
2346 operands to
2347 (const (unspec [(plus (symbol)
2348 (const_int N))])).
2349 Note, that the top level of operand remains intact, so we don't have
2350 to patch up anything outside of the operand. */
2351
2352 for (i = 0; i < n_operands; ++i)
2353 {
2354 rtx op;
2355
2356 op = operands[i];
2357
2358 for_each_rtx (&op, m68k_final_prescan_insn_1, NULL);
2359 }
2360 }
2361
2362 /* Move X to a register and add REG_EQUAL note pointing to ORIG.
2363 If REG is non-null, use it; generate new pseudo otherwise. */
2364
2365 static rtx
2366 m68k_move_to_reg (rtx x, rtx orig, rtx reg)
2367 {
2368 rtx insn;
2369
2370 if (reg == NULL_RTX)
2371 {
2372 gcc_assert (can_create_pseudo_p ());
2373 reg = gen_reg_rtx (Pmode);
2374 }
2375
2376 insn = emit_move_insn (reg, x);
2377 /* Put a REG_EQUAL note on this insn, so that it can be optimized
2378 by loop. */
2379 set_unique_reg_note (insn, REG_EQUAL, orig);
2380
2381 return reg;
2382 }
2383
2384 /* Does the same as m68k_wrap_symbol, but returns a memory reference to
2385 GOT slot. */
2386
2387 static rtx
2388 m68k_wrap_symbol_into_got_ref (rtx x, enum m68k_reloc reloc, rtx temp_reg)
2389 {
2390 x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), temp_reg);
2391
2392 x = gen_rtx_MEM (Pmode, x);
2393 MEM_READONLY_P (x) = 1;
2394
2395 return x;
2396 }
2397
2398 /* Legitimize PIC addresses. If the address is already
2399 position-independent, we return ORIG. Newly generated
2400 position-independent addresses go to REG. If we need more
2401 than one register, we lose.
2402
2403 An address is legitimized by making an indirect reference
2404 through the Global Offset Table with the name of the symbol
2405 used as an offset.
2406
2407 The assembler and linker are responsible for placing the
2408 address of the symbol in the GOT. The function prologue
2409 is responsible for initializing a5 to the starting address
2410 of the GOT.
2411
2412 The assembler is also responsible for translating a symbol name
2413 into a constant displacement from the start of the GOT.
2414
2415 A quick example may make things a little clearer:
2416
2417 When not generating PIC code to store the value 12345 into _foo
2418 we would generate the following code:
2419
2420 movel #12345, _foo
2421
2422 When generating PIC two transformations are made. First, the compiler
2423 loads the address of foo into a register. So the first transformation makes:
2424
2425 lea _foo, a0
2426 movel #12345, a0@
2427
2428 The code in movsi will intercept the lea instruction and call this
2429 routine which will transform the instructions into:
2430
2431 movel a5@(_foo:w), a0
2432 movel #12345, a0@
2433
2434
2435 That (in a nutshell) is how *all* symbol and label references are
2436 handled. */
2437
2438 rtx
2439 legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
2440 rtx reg)
2441 {
2442 rtx pic_ref = orig;
2443
2444 /* First handle a simple SYMBOL_REF or LABEL_REF */
2445 if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
2446 {
2447 gcc_assert (reg);
2448
2449 pic_ref = m68k_wrap_symbol_into_got_ref (orig, RELOC_GOT, reg);
2450 pic_ref = m68k_move_to_reg (pic_ref, orig, reg);
2451 }
2452 else if (GET_CODE (orig) == CONST)
2453 {
2454 rtx base;
2455
2456 /* Make sure this has not already been legitimized. */
2457 if (m68k_unwrap_symbol (orig, true) != orig)
2458 return orig;
2459
2460 gcc_assert (reg);
2461
2462 /* legitimize both operands of the PLUS */
2463 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
2464
2465 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2466 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2467 base == reg ? 0 : reg);
2468
2469 if (GET_CODE (orig) == CONST_INT)
2470 pic_ref = plus_constant (base, INTVAL (orig));
2471 else
2472 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
2473 }
2474
2475 return pic_ref;
2476 }
2477
2478 /* The __tls_get_addr symbol. */
2479 static GTY(()) rtx m68k_tls_get_addr;
2480
2481 /* Return SYMBOL_REF for __tls_get_addr. */
2482
2483 static rtx
2484 m68k_get_tls_get_addr (void)
2485 {
2486 if (m68k_tls_get_addr == NULL_RTX)
2487 m68k_tls_get_addr = init_one_libfunc ("__tls_get_addr");
2488
2489 return m68k_tls_get_addr;
2490 }
2491
2492 /* Return libcall result in A0 instead of usual D0. */
2493 static bool m68k_libcall_value_in_a0_p = false;
2494
2495 /* Emit instruction sequence that calls __tls_get_addr. X is
2496 the TLS symbol we are referencing and RELOC is the symbol type to use
2497 (either TLSGD or TLSLDM). EQV is the REG_EQUAL note for the sequence
2498 emitted. A pseudo register with result of __tls_get_addr call is
2499 returned. */
2500
2501 static rtx
2502 m68k_call_tls_get_addr (rtx x, rtx eqv, enum m68k_reloc reloc)
2503 {
2504 rtx a0;
2505 rtx insns;
2506 rtx dest;
2507
2508 /* Emit the call sequence. */
2509 start_sequence ();
2510
2511 /* FIXME: Unfortunately, emit_library_call_value does not
2512 consider (plus (%a5) (const (unspec))) to be a good enough
2513 operand for push, so it forces it into a register. The bad
2514 thing about this is that combiner, due to copy propagation and other
2515 optimizations, sometimes can not later fix this. As a consequence,
2516 additional register may be allocated resulting in a spill.
2517 For reference, see args processing loops in
2518 calls.c:emit_library_call_value_1.
2519 For testcase, see gcc.target/m68k/tls-{gd, ld}.c */
2520 x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), NULL_RTX);
2521
2522 /* __tls_get_addr() is not a libcall, but emitting a libcall_value
2523 is the simpliest way of generating a call. The difference between
2524 __tls_get_addr() and libcall is that the result is returned in D0
2525 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2526 which temporarily switches returning the result to A0. */
2527
2528 m68k_libcall_value_in_a0_p = true;
2529 a0 = emit_library_call_value (m68k_get_tls_get_addr (), NULL_RTX, LCT_PURE,
2530 Pmode, 1, x, Pmode);
2531 m68k_libcall_value_in_a0_p = false;
2532
2533 insns = get_insns ();
2534 end_sequence ();
2535
2536 gcc_assert (can_create_pseudo_p ());
2537 dest = gen_reg_rtx (Pmode);
2538 emit_libcall_block (insns, dest, a0, eqv);
2539
2540 return dest;
2541 }
2542
2543 /* The __tls_get_addr symbol. */
2544 static GTY(()) rtx m68k_read_tp;
2545
2546 /* Return SYMBOL_REF for __m68k_read_tp. */
2547
2548 static rtx
2549 m68k_get_m68k_read_tp (void)
2550 {
2551 if (m68k_read_tp == NULL_RTX)
2552 m68k_read_tp = init_one_libfunc ("__m68k_read_tp");
2553
2554 return m68k_read_tp;
2555 }
2556
2557 /* Emit instruction sequence that calls __m68k_read_tp.
2558 A pseudo register with result of __m68k_read_tp call is returned. */
2559
2560 static rtx
2561 m68k_call_m68k_read_tp (void)
2562 {
2563 rtx a0;
2564 rtx eqv;
2565 rtx insns;
2566 rtx dest;
2567
2568 start_sequence ();
2569
2570 /* __m68k_read_tp() is not a libcall, but emitting a libcall_value
2571 is the simpliest way of generating a call. The difference between
2572 __m68k_read_tp() and libcall is that the result is returned in D0
2573 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2574 which temporarily switches returning the result to A0. */
2575
2576 /* Emit the call sequence. */
2577 m68k_libcall_value_in_a0_p = true;
2578 a0 = emit_library_call_value (m68k_get_m68k_read_tp (), NULL_RTX, LCT_PURE,
2579 Pmode, 0);
2580 m68k_libcall_value_in_a0_p = false;
2581 insns = get_insns ();
2582 end_sequence ();
2583
2584 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2585 share the m68k_read_tp result with other IE/LE model accesses. */
2586 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx), UNSPEC_RELOC32);
2587
2588 gcc_assert (can_create_pseudo_p ());
2589 dest = gen_reg_rtx (Pmode);
2590 emit_libcall_block (insns, dest, a0, eqv);
2591
2592 return dest;
2593 }
2594
2595 /* Return a legitimized address for accessing TLS SYMBOL_REF X.
2596 For explanations on instructions sequences see TLS/NPTL ABI for m68k and
2597 ColdFire. */
2598
2599 rtx
2600 m68k_legitimize_tls_address (rtx orig)
2601 {
2602 switch (SYMBOL_REF_TLS_MODEL (orig))
2603 {
2604 case TLS_MODEL_GLOBAL_DYNAMIC:
2605 orig = m68k_call_tls_get_addr (orig, orig, RELOC_TLSGD);
2606 break;
2607
2608 case TLS_MODEL_LOCAL_DYNAMIC:
2609 {
2610 rtx eqv;
2611 rtx a0;
2612 rtx x;
2613
2614 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2615 share the LDM result with other LD model accesses. */
2616 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2617 UNSPEC_RELOC32);
2618
2619 a0 = m68k_call_tls_get_addr (orig, eqv, RELOC_TLSLDM);
2620
2621 x = m68k_wrap_symbol (orig, RELOC_TLSLDO, a0, NULL_RTX);
2622
2623 if (can_create_pseudo_p ())
2624 x = m68k_move_to_reg (x, orig, NULL_RTX);
2625
2626 orig = x;
2627 break;
2628 }
2629
2630 case TLS_MODEL_INITIAL_EXEC:
2631 {
2632 rtx a0;
2633 rtx x;
2634
2635 a0 = m68k_call_m68k_read_tp ();
2636
2637 x = m68k_wrap_symbol_into_got_ref (orig, RELOC_TLSIE, NULL_RTX);
2638 x = gen_rtx_PLUS (Pmode, x, a0);
2639
2640 if (can_create_pseudo_p ())
2641 x = m68k_move_to_reg (x, orig, NULL_RTX);
2642
2643 orig = x;
2644 break;
2645 }
2646
2647 case TLS_MODEL_LOCAL_EXEC:
2648 {
2649 rtx a0;
2650 rtx x;
2651
2652 a0 = m68k_call_m68k_read_tp ();
2653
2654 x = m68k_wrap_symbol (orig, RELOC_TLSLE, a0, NULL_RTX);
2655
2656 if (can_create_pseudo_p ())
2657 x = m68k_move_to_reg (x, orig, NULL_RTX);
2658
2659 orig = x;
2660 break;
2661 }
2662
2663 default:
2664 gcc_unreachable ();
2665 }
2666
2667 return orig;
2668 }
2669
2670 /* Return true if X is a TLS symbol. */
2671
2672 static bool
2673 m68k_tls_symbol_p (rtx x)
2674 {
2675 if (!TARGET_HAVE_TLS)
2676 return false;
2677
2678 if (GET_CODE (x) != SYMBOL_REF)
2679 return false;
2680
2681 return SYMBOL_REF_TLS_MODEL (x) != 0;
2682 }
2683
2684 /* Helper for m68k_tls_referenced_p. */
2685
2686 static int
2687 m68k_tls_reference_p_1 (rtx *x_ptr, void *data ATTRIBUTE_UNUSED)
2688 {
2689 /* Note: this is not the same as m68k_tls_symbol_p. */
2690 if (GET_CODE (*x_ptr) == SYMBOL_REF)
2691 return SYMBOL_REF_TLS_MODEL (*x_ptr) != 0 ? 1 : 0;
2692
2693 /* Don't recurse into legitimate TLS references. */
2694 if (m68k_tls_reference_p (*x_ptr, true))
2695 return -1;
2696
2697 return 0;
2698 }
2699
2700 /* If !LEGITIMATE_P, return true if X is a TLS symbol reference,
2701 though illegitimate one.
2702 If LEGITIMATE_P, return true if X is a legitimate TLS symbol reference. */
2703
2704 bool
2705 m68k_tls_reference_p (rtx x, bool legitimate_p)
2706 {
2707 if (!TARGET_HAVE_TLS)
2708 return false;
2709
2710 if (!legitimate_p)
2711 return for_each_rtx (&x, m68k_tls_reference_p_1, NULL) == 1 ? true : false;
2712 else
2713 {
2714 enum m68k_reloc reloc = RELOC_GOT;
2715
2716 return (m68k_unwrap_symbol_1 (x, true, &reloc) != x
2717 && TLS_RELOC_P (reloc));
2718 }
2719 }
2720
2721 \f
2722
2723 #define USE_MOVQ(i) ((unsigned) ((i) + 128) <= 255)
2724
2725 /* Return the type of move that should be used for integer I. */
2726
2727 M68K_CONST_METHOD
2728 m68k_const_method (HOST_WIDE_INT i)
2729 {
2730 unsigned u;
2731
2732 if (USE_MOVQ (i))
2733 return MOVQ;
2734
2735 /* The ColdFire doesn't have byte or word operations. */
2736 /* FIXME: This may not be useful for the m68060 either. */
2737 if (!TARGET_COLDFIRE)
2738 {
2739 /* if -256 < N < 256 but N is not in range for a moveq
2740 N^ff will be, so use moveq #N^ff, dreg; not.b dreg. */
2741 if (USE_MOVQ (i ^ 0xff))
2742 return NOTB;
2743 /* Likewise, try with not.w */
2744 if (USE_MOVQ (i ^ 0xffff))
2745 return NOTW;
2746 /* This is the only value where neg.w is useful */
2747 if (i == -65408)
2748 return NEGW;
2749 }
2750
2751 /* Try also with swap. */
2752 u = i;
2753 if (USE_MOVQ ((u >> 16) | (u << 16)))
2754 return SWAP;
2755
2756 if (TARGET_ISAB)
2757 {
2758 /* Try using MVZ/MVS with an immediate value to load constants. */
2759 if (i >= 0 && i <= 65535)
2760 return MVZ;
2761 if (i >= -32768 && i <= 32767)
2762 return MVS;
2763 }
2764
2765 /* Otherwise, use move.l */
2766 return MOVL;
2767 }
2768
2769 /* Return the cost of moving constant I into a data register. */
2770
2771 static int
2772 const_int_cost (HOST_WIDE_INT i)
2773 {
2774 switch (m68k_const_method (i))
2775 {
2776 case MOVQ:
2777 /* Constants between -128 and 127 are cheap due to moveq. */
2778 return 0;
2779 case MVZ:
2780 case MVS:
2781 case NOTB:
2782 case NOTW:
2783 case NEGW:
2784 case SWAP:
2785 /* Constants easily generated by moveq + not.b/not.w/neg.w/swap. */
2786 return 1;
2787 case MOVL:
2788 return 2;
2789 default:
2790 gcc_unreachable ();
2791 }
2792 }
2793
2794 static bool
2795 m68k_rtx_costs (rtx x, int code, int outer_code, int *total,
2796 bool speed ATTRIBUTE_UNUSED)
2797 {
2798 switch (code)
2799 {
2800 case CONST_INT:
2801 /* Constant zero is super cheap due to clr instruction. */
2802 if (x == const0_rtx)
2803 *total = 0;
2804 else
2805 *total = const_int_cost (INTVAL (x));
2806 return true;
2807
2808 case CONST:
2809 case LABEL_REF:
2810 case SYMBOL_REF:
2811 *total = 3;
2812 return true;
2813
2814 case CONST_DOUBLE:
2815 /* Make 0.0 cheaper than other floating constants to
2816 encourage creating tstsf and tstdf insns. */
2817 if (outer_code == COMPARE
2818 && (x == CONST0_RTX (SFmode) || x == CONST0_RTX (DFmode)))
2819 *total = 4;
2820 else
2821 *total = 5;
2822 return true;
2823
2824 /* These are vaguely right for a 68020. */
2825 /* The costs for long multiply have been adjusted to work properly
2826 in synth_mult on the 68020, relative to an average of the time
2827 for add and the time for shift, taking away a little more because
2828 sometimes move insns are needed. */
2829 /* div?.w is relatively cheaper on 68000 counted in COSTS_N_INSNS
2830 terms. */
2831 #define MULL_COST \
2832 (TUNE_68060 ? 2 \
2833 : TUNE_68040 ? 5 \
2834 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2835 : (TUNE_CFV2 && TUNE_MAC) ? 4 \
2836 : TUNE_CFV2 ? 8 \
2837 : TARGET_COLDFIRE ? 3 : 13)
2838
2839 #define MULW_COST \
2840 (TUNE_68060 ? 2 \
2841 : TUNE_68040 ? 3 \
2842 : TUNE_68000_10 ? 5 \
2843 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2844 : (TUNE_CFV2 && TUNE_MAC) ? 2 \
2845 : TUNE_CFV2 ? 8 \
2846 : TARGET_COLDFIRE ? 2 : 8)
2847
2848 #define DIVW_COST \
2849 (TARGET_CF_HWDIV ? 11 \
2850 : TUNE_68000_10 || TARGET_COLDFIRE ? 12 : 27)
2851
2852 case PLUS:
2853 /* An lea costs about three times as much as a simple add. */
2854 if (GET_MODE (x) == SImode
2855 && GET_CODE (XEXP (x, 1)) == REG
2856 && GET_CODE (XEXP (x, 0)) == MULT
2857 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
2858 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2859 && (INTVAL (XEXP (XEXP (x, 0), 1)) == 2
2860 || INTVAL (XEXP (XEXP (x, 0), 1)) == 4
2861 || INTVAL (XEXP (XEXP (x, 0), 1)) == 8))
2862 {
2863 /* lea an@(dx:l:i),am */
2864 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 2 : 3);
2865 return true;
2866 }
2867 return false;
2868
2869 case ASHIFT:
2870 case ASHIFTRT:
2871 case LSHIFTRT:
2872 if (TUNE_68060)
2873 {
2874 *total = COSTS_N_INSNS(1);
2875 return true;
2876 }
2877 if (TUNE_68000_10)
2878 {
2879 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
2880 {
2881 if (INTVAL (XEXP (x, 1)) < 16)
2882 *total = COSTS_N_INSNS (2) + INTVAL (XEXP (x, 1)) / 2;
2883 else
2884 /* We're using clrw + swap for these cases. */
2885 *total = COSTS_N_INSNS (4) + (INTVAL (XEXP (x, 1)) - 16) / 2;
2886 }
2887 else
2888 *total = COSTS_N_INSNS (10); /* Worst case. */
2889 return true;
2890 }
2891 /* A shift by a big integer takes an extra instruction. */
2892 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2893 && (INTVAL (XEXP (x, 1)) == 16))
2894 {
2895 *total = COSTS_N_INSNS (2); /* clrw;swap */
2896 return true;
2897 }
2898 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2899 && !(INTVAL (XEXP (x, 1)) > 0
2900 && INTVAL (XEXP (x, 1)) <= 8))
2901 {
2902 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 1 : 3); /* lsr #i,dn */
2903 return true;
2904 }
2905 return false;
2906
2907 case MULT:
2908 if ((GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
2909 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
2910 && GET_MODE (x) == SImode)
2911 *total = COSTS_N_INSNS (MULW_COST);
2912 else if (GET_MODE (x) == QImode || GET_MODE (x) == HImode)
2913 *total = COSTS_N_INSNS (MULW_COST);
2914 else
2915 *total = COSTS_N_INSNS (MULL_COST);
2916 return true;
2917
2918 case DIV:
2919 case UDIV:
2920 case MOD:
2921 case UMOD:
2922 if (GET_MODE (x) == QImode || GET_MODE (x) == HImode)
2923 *total = COSTS_N_INSNS (DIVW_COST); /* div.w */
2924 else if (TARGET_CF_HWDIV)
2925 *total = COSTS_N_INSNS (18);
2926 else
2927 *total = COSTS_N_INSNS (43); /* div.l */
2928 return true;
2929
2930 case ZERO_EXTRACT:
2931 if (outer_code == COMPARE)
2932 *total = 0;
2933 return false;
2934
2935 default:
2936 return false;
2937 }
2938 }
2939
2940 /* Return an instruction to move CONST_INT OPERANDS[1] into data register
2941 OPERANDS[0]. */
2942
2943 static const char *
2944 output_move_const_into_data_reg (rtx *operands)
2945 {
2946 HOST_WIDE_INT i;
2947
2948 i = INTVAL (operands[1]);
2949 switch (m68k_const_method (i))
2950 {
2951 case MVZ:
2952 return "mvzw %1,%0";
2953 case MVS:
2954 return "mvsw %1,%0";
2955 case MOVQ:
2956 return "moveq %1,%0";
2957 case NOTB:
2958 CC_STATUS_INIT;
2959 operands[1] = GEN_INT (i ^ 0xff);
2960 return "moveq %1,%0\n\tnot%.b %0";
2961 case NOTW:
2962 CC_STATUS_INIT;
2963 operands[1] = GEN_INT (i ^ 0xffff);
2964 return "moveq %1,%0\n\tnot%.w %0";
2965 case NEGW:
2966 CC_STATUS_INIT;
2967 return "moveq #-128,%0\n\tneg%.w %0";
2968 case SWAP:
2969 {
2970 unsigned u = i;
2971
2972 operands[1] = GEN_INT ((u << 16) | (u >> 16));
2973 return "moveq %1,%0\n\tswap %0";
2974 }
2975 case MOVL:
2976 return "move%.l %1,%0";
2977 default:
2978 gcc_unreachable ();
2979 }
2980 }
2981
2982 /* Return true if I can be handled by ISA B's mov3q instruction. */
2983
2984 bool
2985 valid_mov3q_const (HOST_WIDE_INT i)
2986 {
2987 return TARGET_ISAB && (i == -1 || IN_RANGE (i, 1, 7));
2988 }
2989
2990 /* Return an instruction to move CONST_INT OPERANDS[1] into OPERANDS[0].
2991 I is the value of OPERANDS[1]. */
2992
2993 static const char *
2994 output_move_simode_const (rtx *operands)
2995 {
2996 rtx dest;
2997 HOST_WIDE_INT src;
2998
2999 dest = operands[0];
3000 src = INTVAL (operands[1]);
3001 if (src == 0
3002 && (DATA_REG_P (dest) || MEM_P (dest))
3003 /* clr insns on 68000 read before writing. */
3004 && ((TARGET_68010 || TARGET_COLDFIRE)
3005 || !(MEM_P (dest) && MEM_VOLATILE_P (dest))))
3006 return "clr%.l %0";
3007 else if (GET_MODE (dest) == SImode && valid_mov3q_const (src))
3008 return "mov3q%.l %1,%0";
3009 else if (src == 0 && ADDRESS_REG_P (dest))
3010 return "sub%.l %0,%0";
3011 else if (DATA_REG_P (dest))
3012 return output_move_const_into_data_reg (operands);
3013 else if (ADDRESS_REG_P (dest) && IN_RANGE (src, -0x8000, 0x7fff))
3014 {
3015 if (valid_mov3q_const (src))
3016 return "mov3q%.l %1,%0";
3017 return "move%.w %1,%0";
3018 }
3019 else if (MEM_P (dest)
3020 && GET_CODE (XEXP (dest, 0)) == PRE_DEC
3021 && REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
3022 && IN_RANGE (src, -0x8000, 0x7fff))
3023 {
3024 if (valid_mov3q_const (src))
3025 return "mov3q%.l %1,%-";
3026 return "pea %a1";
3027 }
3028 return "move%.l %1,%0";
3029 }
3030
3031 const char *
3032 output_move_simode (rtx *operands)
3033 {
3034 if (GET_CODE (operands[1]) == CONST_INT)
3035 return output_move_simode_const (operands);
3036 else if ((GET_CODE (operands[1]) == SYMBOL_REF
3037 || GET_CODE (operands[1]) == CONST)
3038 && push_operand (operands[0], SImode))
3039 return "pea %a1";
3040 else if ((GET_CODE (operands[1]) == SYMBOL_REF
3041 || GET_CODE (operands[1]) == CONST)
3042 && ADDRESS_REG_P (operands[0]))
3043 return "lea %a1,%0";
3044 return "move%.l %1,%0";
3045 }
3046
3047 const char *
3048 output_move_himode (rtx *operands)
3049 {
3050 if (GET_CODE (operands[1]) == CONST_INT)
3051 {
3052 if (operands[1] == const0_rtx
3053 && (DATA_REG_P (operands[0])
3054 || GET_CODE (operands[0]) == MEM)
3055 /* clr insns on 68000 read before writing. */
3056 && ((TARGET_68010 || TARGET_COLDFIRE)
3057 || !(GET_CODE (operands[0]) == MEM
3058 && MEM_VOLATILE_P (operands[0]))))
3059 return "clr%.w %0";
3060 else if (operands[1] == const0_rtx
3061 && ADDRESS_REG_P (operands[0]))
3062 return "sub%.l %0,%0";
3063 else if (DATA_REG_P (operands[0])
3064 && INTVAL (operands[1]) < 128
3065 && INTVAL (operands[1]) >= -128)
3066 return "moveq %1,%0";
3067 else if (INTVAL (operands[1]) < 0x8000
3068 && INTVAL (operands[1]) >= -0x8000)
3069 return "move%.w %1,%0";
3070 }
3071 else if (CONSTANT_P (operands[1]))
3072 return "move%.l %1,%0";
3073 return "move%.w %1,%0";
3074 }
3075
3076 const char *
3077 output_move_qimode (rtx *operands)
3078 {
3079 /* 68k family always modifies the stack pointer by at least 2, even for
3080 byte pushes. The 5200 (ColdFire) does not do this. */
3081
3082 /* This case is generated by pushqi1 pattern now. */
3083 gcc_assert (!(GET_CODE (operands[0]) == MEM
3084 && GET_CODE (XEXP (operands[0], 0)) == PRE_DEC
3085 && XEXP (XEXP (operands[0], 0), 0) == stack_pointer_rtx
3086 && ! ADDRESS_REG_P (operands[1])
3087 && ! TARGET_COLDFIRE));
3088
3089 /* clr and st insns on 68000 read before writing. */
3090 if (!ADDRESS_REG_P (operands[0])
3091 && ((TARGET_68010 || TARGET_COLDFIRE)
3092 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3093 {
3094 if (operands[1] == const0_rtx)
3095 return "clr%.b %0";
3096 if ((!TARGET_COLDFIRE || DATA_REG_P (operands[0]))
3097 && GET_CODE (operands[1]) == CONST_INT
3098 && (INTVAL (operands[1]) & 255) == 255)
3099 {
3100 CC_STATUS_INIT;
3101 return "st %0";
3102 }
3103 }
3104 if (GET_CODE (operands[1]) == CONST_INT
3105 && DATA_REG_P (operands[0])
3106 && INTVAL (operands[1]) < 128
3107 && INTVAL (operands[1]) >= -128)
3108 return "moveq %1,%0";
3109 if (operands[1] == const0_rtx && ADDRESS_REG_P (operands[0]))
3110 return "sub%.l %0,%0";
3111 if (GET_CODE (operands[1]) != CONST_INT && CONSTANT_P (operands[1]))
3112 return "move%.l %1,%0";
3113 /* 68k family (including the 5200 ColdFire) does not support byte moves to
3114 from address registers. */
3115 if (ADDRESS_REG_P (operands[0]) || ADDRESS_REG_P (operands[1]))
3116 return "move%.w %1,%0";
3117 return "move%.b %1,%0";
3118 }
3119
3120 const char *
3121 output_move_stricthi (rtx *operands)
3122 {
3123 if (operands[1] == const0_rtx
3124 /* clr insns on 68000 read before writing. */
3125 && ((TARGET_68010 || TARGET_COLDFIRE)
3126 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3127 return "clr%.w %0";
3128 return "move%.w %1,%0";
3129 }
3130
3131 const char *
3132 output_move_strictqi (rtx *operands)
3133 {
3134 if (operands[1] == const0_rtx
3135 /* clr insns on 68000 read before writing. */
3136 && ((TARGET_68010 || TARGET_COLDFIRE)
3137 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3138 return "clr%.b %0";
3139 return "move%.b %1,%0";
3140 }
3141
3142 /* Return the best assembler insn template
3143 for moving operands[1] into operands[0] as a fullword. */
3144
3145 static const char *
3146 singlemove_string (rtx *operands)
3147 {
3148 if (GET_CODE (operands[1]) == CONST_INT)
3149 return output_move_simode_const (operands);
3150 return "move%.l %1,%0";
3151 }
3152
3153
3154 /* Output assembler or rtl code to perform a doubleword move insn
3155 with operands OPERANDS.
3156 Pointers to 3 helper functions should be specified:
3157 HANDLE_REG_ADJUST to adjust a register by a small value,
3158 HANDLE_COMPADR to compute an address and
3159 HANDLE_MOVSI to move 4 bytes. */
3160
3161 static void
3162 handle_move_double (rtx operands[2],
3163 void (*handle_reg_adjust) (rtx, int),
3164 void (*handle_compadr) (rtx [2]),
3165 void (*handle_movsi) (rtx [2]))
3166 {
3167 enum
3168 {
3169 REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP
3170 } optype0, optype1;
3171 rtx latehalf[2];
3172 rtx middlehalf[2];
3173 rtx xops[2];
3174 rtx addreg0 = 0, addreg1 = 0;
3175 int dest_overlapped_low = 0;
3176 int size = GET_MODE_SIZE (GET_MODE (operands[0]));
3177
3178 middlehalf[0] = 0;
3179 middlehalf[1] = 0;
3180
3181 /* First classify both operands. */
3182
3183 if (REG_P (operands[0]))
3184 optype0 = REGOP;
3185 else if (offsettable_memref_p (operands[0]))
3186 optype0 = OFFSOP;
3187 else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
3188 optype0 = POPOP;
3189 else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
3190 optype0 = PUSHOP;
3191 else if (GET_CODE (operands[0]) == MEM)
3192 optype0 = MEMOP;
3193 else
3194 optype0 = RNDOP;
3195
3196 if (REG_P (operands[1]))
3197 optype1 = REGOP;
3198 else if (CONSTANT_P (operands[1]))
3199 optype1 = CNSTOP;
3200 else if (offsettable_memref_p (operands[1]))
3201 optype1 = OFFSOP;
3202 else if (GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3203 optype1 = POPOP;
3204 else if (GET_CODE (XEXP (operands[1], 0)) == PRE_DEC)
3205 optype1 = PUSHOP;
3206 else if (GET_CODE (operands[1]) == MEM)
3207 optype1 = MEMOP;
3208 else
3209 optype1 = RNDOP;
3210
3211 /* Check for the cases that the operand constraints are not supposed
3212 to allow to happen. Generating code for these cases is
3213 painful. */
3214 gcc_assert (optype0 != RNDOP && optype1 != RNDOP);
3215
3216 /* If one operand is decrementing and one is incrementing
3217 decrement the former register explicitly
3218 and change that operand into ordinary indexing. */
3219
3220 if (optype0 == PUSHOP && optype1 == POPOP)
3221 {
3222 operands[0] = XEXP (XEXP (operands[0], 0), 0);
3223
3224 handle_reg_adjust (operands[0], -size);
3225
3226 if (GET_MODE (operands[1]) == XFmode)
3227 operands[0] = gen_rtx_MEM (XFmode, operands[0]);
3228 else if (GET_MODE (operands[0]) == DFmode)
3229 operands[0] = gen_rtx_MEM (DFmode, operands[0]);
3230 else
3231 operands[0] = gen_rtx_MEM (DImode, operands[0]);
3232 optype0 = OFFSOP;
3233 }
3234 if (optype0 == POPOP && optype1 == PUSHOP)
3235 {
3236 operands[1] = XEXP (XEXP (operands[1], 0), 0);
3237
3238 handle_reg_adjust (operands[1], -size);
3239
3240 if (GET_MODE (operands[1]) == XFmode)
3241 operands[1] = gen_rtx_MEM (XFmode, operands[1]);
3242 else if (GET_MODE (operands[1]) == DFmode)
3243 operands[1] = gen_rtx_MEM (DFmode, operands[1]);
3244 else
3245 operands[1] = gen_rtx_MEM (DImode, operands[1]);
3246 optype1 = OFFSOP;
3247 }
3248
3249 /* If an operand is an unoffsettable memory ref, find a register
3250 we can increment temporarily to make it refer to the second word. */
3251
3252 if (optype0 == MEMOP)
3253 addreg0 = find_addr_reg (XEXP (operands[0], 0));
3254
3255 if (optype1 == MEMOP)
3256 addreg1 = find_addr_reg (XEXP (operands[1], 0));
3257
3258 /* Ok, we can do one word at a time.
3259 Normally we do the low-numbered word first,
3260 but if either operand is autodecrementing then we
3261 do the high-numbered word first.
3262
3263 In either case, set up in LATEHALF the operands to use
3264 for the high-numbered word and in some cases alter the
3265 operands in OPERANDS to be suitable for the low-numbered word. */
3266
3267 if (size == 12)
3268 {
3269 if (optype0 == REGOP)
3270 {
3271 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 2);
3272 middlehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
3273 }
3274 else if (optype0 == OFFSOP)
3275 {
3276 middlehalf[0] = adjust_address (operands[0], SImode, 4);
3277 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
3278 }
3279 else
3280 {
3281 middlehalf[0] = adjust_address (operands[0], SImode, 0);
3282 latehalf[0] = adjust_address (operands[0], SImode, 0);
3283 }
3284
3285 if (optype1 == REGOP)
3286 {
3287 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2);
3288 middlehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
3289 }
3290 else if (optype1 == OFFSOP)
3291 {
3292 middlehalf[1] = adjust_address (operands[1], SImode, 4);
3293 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
3294 }
3295 else if (optype1 == CNSTOP)
3296 {
3297 if (GET_CODE (operands[1]) == CONST_DOUBLE)
3298 {
3299 REAL_VALUE_TYPE r;
3300 long l[3];
3301
3302 REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
3303 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
3304 operands[1] = GEN_INT (l[0]);
3305 middlehalf[1] = GEN_INT (l[1]);
3306 latehalf[1] = GEN_INT (l[2]);
3307 }
3308 else
3309 {
3310 /* No non-CONST_DOUBLE constant should ever appear
3311 here. */
3312 gcc_assert (!CONSTANT_P (operands[1]));
3313 }
3314 }
3315 else
3316 {
3317 middlehalf[1] = adjust_address (operands[1], SImode, 0);
3318 latehalf[1] = adjust_address (operands[1], SImode, 0);
3319 }
3320 }
3321 else
3322 /* size is not 12: */
3323 {
3324 if (optype0 == REGOP)
3325 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
3326 else if (optype0 == OFFSOP)
3327 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
3328 else
3329 latehalf[0] = adjust_address (operands[0], SImode, 0);
3330
3331 if (optype1 == REGOP)
3332 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
3333 else if (optype1 == OFFSOP)
3334 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
3335 else if (optype1 == CNSTOP)
3336 split_double (operands[1], &operands[1], &latehalf[1]);
3337 else
3338 latehalf[1] = adjust_address (operands[1], SImode, 0);
3339 }
3340
3341 /* If insn is effectively movd N(sp),-(sp) then we will do the
3342 high word first. We should use the adjusted operand 1 (which is N+4(sp))
3343 for the low word as well, to compensate for the first decrement of sp. */
3344 if (optype0 == PUSHOP
3345 && REGNO (XEXP (XEXP (operands[0], 0), 0)) == STACK_POINTER_REGNUM
3346 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
3347 operands[1] = middlehalf[1] = latehalf[1];
3348
3349 /* For (set (reg:DI N) (mem:DI ... (reg:SI N) ...)),
3350 if the upper part of reg N does not appear in the MEM, arrange to
3351 emit the move late-half first. Otherwise, compute the MEM address
3352 into the upper part of N and use that as a pointer to the memory
3353 operand. */
3354 if (optype0 == REGOP
3355 && (optype1 == OFFSOP || optype1 == MEMOP))
3356 {
3357 rtx testlow = gen_rtx_REG (SImode, REGNO (operands[0]));
3358
3359 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
3360 && reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
3361 {
3362 /* If both halves of dest are used in the src memory address,
3363 compute the address into latehalf of dest.
3364 Note that this can't happen if the dest is two data regs. */
3365 compadr:
3366 xops[0] = latehalf[0];
3367 xops[1] = XEXP (operands[1], 0);
3368
3369 handle_compadr (xops);
3370 if (GET_MODE (operands[1]) == XFmode)
3371 {
3372 operands[1] = gen_rtx_MEM (XFmode, latehalf[0]);
3373 middlehalf[1] = adjust_address (operands[1], DImode, size - 8);
3374 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
3375 }
3376 else
3377 {
3378 operands[1] = gen_rtx_MEM (DImode, latehalf[0]);
3379 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
3380 }
3381 }
3382 else if (size == 12
3383 && reg_overlap_mentioned_p (middlehalf[0],
3384 XEXP (operands[1], 0)))
3385 {
3386 /* Check for two regs used by both source and dest.
3387 Note that this can't happen if the dest is all data regs.
3388 It can happen if the dest is d6, d7, a0.
3389 But in that case, latehalf is an addr reg, so
3390 the code at compadr does ok. */
3391
3392 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
3393 || reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
3394 goto compadr;
3395
3396 /* JRV says this can't happen: */
3397 gcc_assert (!addreg0 && !addreg1);
3398
3399 /* Only the middle reg conflicts; simply put it last. */
3400 handle_movsi (operands);
3401 handle_movsi (latehalf);
3402 handle_movsi (middlehalf);
3403
3404 return;
3405 }
3406 else if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0)))
3407 /* If the low half of dest is mentioned in the source memory
3408 address, the arrange to emit the move late half first. */
3409 dest_overlapped_low = 1;
3410 }
3411
3412 /* If one or both operands autodecrementing,
3413 do the two words, high-numbered first. */
3414
3415 /* Likewise, the first move would clobber the source of the second one,
3416 do them in the other order. This happens only for registers;
3417 such overlap can't happen in memory unless the user explicitly
3418 sets it up, and that is an undefined circumstance. */
3419
3420 if (optype0 == PUSHOP || optype1 == PUSHOP
3421 || (optype0 == REGOP && optype1 == REGOP
3422 && ((middlehalf[1] && REGNO (operands[0]) == REGNO (middlehalf[1]))
3423 || REGNO (operands[0]) == REGNO (latehalf[1])))
3424 || dest_overlapped_low)
3425 {
3426 /* Make any unoffsettable addresses point at high-numbered word. */
3427 if (addreg0)
3428 handle_reg_adjust (addreg0, size - 4);
3429 if (addreg1)
3430 handle_reg_adjust (addreg1, size - 4);
3431
3432 /* Do that word. */
3433 handle_movsi (latehalf);
3434
3435 /* Undo the adds we just did. */
3436 if (addreg0)
3437 handle_reg_adjust (addreg0, -4);
3438 if (addreg1)
3439 handle_reg_adjust (addreg1, -4);
3440
3441 if (size == 12)
3442 {
3443 handle_movsi (middlehalf);
3444
3445 if (addreg0)
3446 handle_reg_adjust (addreg0, -4);
3447 if (addreg1)
3448 handle_reg_adjust (addreg1, -4);
3449 }
3450
3451 /* Do low-numbered word. */
3452
3453 handle_movsi (operands);
3454 return;
3455 }
3456
3457 /* Normal case: do the two words, low-numbered first. */
3458
3459 m68k_final_prescan_insn (NULL, operands, 2);
3460 handle_movsi (operands);
3461
3462 /* Do the middle one of the three words for long double */
3463 if (size == 12)
3464 {
3465 if (addreg0)
3466 handle_reg_adjust (addreg0, 4);
3467 if (addreg1)
3468 handle_reg_adjust (addreg1, 4);
3469
3470 m68k_final_prescan_insn (NULL, middlehalf, 2);
3471 handle_movsi (middlehalf);
3472 }
3473
3474 /* Make any unoffsettable addresses point at high-numbered word. */
3475 if (addreg0)
3476 handle_reg_adjust (addreg0, 4);
3477 if (addreg1)
3478 handle_reg_adjust (addreg1, 4);
3479
3480 /* Do that word. */
3481 m68k_final_prescan_insn (NULL, latehalf, 2);
3482 handle_movsi (latehalf);
3483
3484 /* Undo the adds we just did. */
3485 if (addreg0)
3486 handle_reg_adjust (addreg0, -(size - 4));
3487 if (addreg1)
3488 handle_reg_adjust (addreg1, -(size - 4));
3489
3490 return;
3491 }
3492
3493 /* Output assembler code to adjust REG by N. */
3494 static void
3495 output_reg_adjust (rtx reg, int n)
3496 {
3497 const char *s;
3498
3499 gcc_assert (GET_MODE (reg) == SImode
3500 && -12 <= n && n != 0 && n <= 12);
3501
3502 switch (n)
3503 {
3504 case 12:
3505 s = "add%.l #12,%0";
3506 break;
3507
3508 case 8:
3509 s = "addq%.l #8,%0";
3510 break;
3511
3512 case 4:
3513 s = "addq%.l #4,%0";
3514 break;
3515
3516 case -12:
3517 s = "sub%.l #12,%0";
3518 break;
3519
3520 case -8:
3521 s = "subq%.l #8,%0";
3522 break;
3523
3524 case -4:
3525 s = "subq%.l #4,%0";
3526 break;
3527
3528 default:
3529 gcc_unreachable ();
3530 s = NULL;
3531 }
3532
3533 output_asm_insn (s, &reg);
3534 }
3535
3536 /* Emit rtl code to adjust REG by N. */
3537 static void
3538 emit_reg_adjust (rtx reg1, int n)
3539 {
3540 rtx reg2;
3541
3542 gcc_assert (GET_MODE (reg1) == SImode
3543 && -12 <= n && n != 0 && n <= 12);
3544
3545 reg1 = copy_rtx (reg1);
3546 reg2 = copy_rtx (reg1);
3547
3548 if (n < 0)
3549 emit_insn (gen_subsi3 (reg1, reg2, GEN_INT (-n)));
3550 else if (n > 0)
3551 emit_insn (gen_addsi3 (reg1, reg2, GEN_INT (n)));
3552 else
3553 gcc_unreachable ();
3554 }
3555
3556 /* Output assembler to load address OPERANDS[0] to register OPERANDS[1]. */
3557 static void
3558 output_compadr (rtx operands[2])
3559 {
3560 output_asm_insn ("lea %a1,%0", operands);
3561 }
3562
3563 /* Output the best assembler insn for moving operands[1] into operands[0]
3564 as a fullword. */
3565 static void
3566 output_movsi (rtx operands[2])
3567 {
3568 output_asm_insn (singlemove_string (operands), operands);
3569 }
3570
3571 /* Copy OP and change its mode to MODE. */
3572 static rtx
3573 copy_operand (rtx op, enum machine_mode mode)
3574 {
3575 /* ??? This looks really ugly. There must be a better way
3576 to change a mode on the operand. */
3577 if (GET_MODE (op) != VOIDmode)
3578 {
3579 if (REG_P (op))
3580 op = gen_rtx_REG (mode, REGNO (op));
3581 else
3582 {
3583 op = copy_rtx (op);
3584 PUT_MODE (op, mode);
3585 }
3586 }
3587
3588 return op;
3589 }
3590
3591 /* Emit rtl code for moving operands[1] into operands[0] as a fullword. */
3592 static void
3593 emit_movsi (rtx operands[2])
3594 {
3595 operands[0] = copy_operand (operands[0], SImode);
3596 operands[1] = copy_operand (operands[1], SImode);
3597
3598 emit_insn (gen_movsi (operands[0], operands[1]));
3599 }
3600
3601 /* Output assembler code to perform a doubleword move insn
3602 with operands OPERANDS. */
3603 const char *
3604 output_move_double (rtx *operands)
3605 {
3606 handle_move_double (operands,
3607 output_reg_adjust, output_compadr, output_movsi);
3608
3609 return "";
3610 }
3611
3612 /* Output rtl code to perform a doubleword move insn
3613 with operands OPERANDS. */
3614 void
3615 m68k_emit_move_double (rtx operands[2])
3616 {
3617 handle_move_double (operands, emit_reg_adjust, emit_movsi, emit_movsi);
3618 }
3619
3620 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
3621 new rtx with the correct mode. */
3622
3623 static rtx
3624 force_mode (enum machine_mode mode, rtx orig)
3625 {
3626 if (mode == GET_MODE (orig))
3627 return orig;
3628
3629 if (REGNO (orig) >= FIRST_PSEUDO_REGISTER)
3630 abort ();
3631
3632 return gen_rtx_REG (mode, REGNO (orig));
3633 }
3634
3635 static int
3636 fp_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3637 {
3638 return reg_renumber && FP_REG_P (op);
3639 }
3640
3641 /* Emit insns to move operands[1] into operands[0].
3642
3643 Return 1 if we have written out everything that needs to be done to
3644 do the move. Otherwise, return 0 and the caller will emit the move
3645 normally.
3646
3647 Note SCRATCH_REG may not be in the proper mode depending on how it
3648 will be used. This routine is responsible for creating a new copy
3649 of SCRATCH_REG in the proper mode. */
3650
3651 int
3652 emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
3653 {
3654 register rtx operand0 = operands[0];
3655 register rtx operand1 = operands[1];
3656 register rtx tem;
3657
3658 if (scratch_reg
3659 && reload_in_progress && GET_CODE (operand0) == REG
3660 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
3661 operand0 = reg_equiv_mem (REGNO (operand0));
3662 else if (scratch_reg
3663 && reload_in_progress && GET_CODE (operand0) == SUBREG
3664 && GET_CODE (SUBREG_REG (operand0)) == REG
3665 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
3666 {
3667 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3668 the code which tracks sets/uses for delete_output_reload. */
3669 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
3670 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
3671 SUBREG_BYTE (operand0));
3672 operand0 = alter_subreg (&temp);
3673 }
3674
3675 if (scratch_reg
3676 && reload_in_progress && GET_CODE (operand1) == REG
3677 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
3678 operand1 = reg_equiv_mem (REGNO (operand1));
3679 else if (scratch_reg
3680 && reload_in_progress && GET_CODE (operand1) == SUBREG
3681 && GET_CODE (SUBREG_REG (operand1)) == REG
3682 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
3683 {
3684 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3685 the code which tracks sets/uses for delete_output_reload. */
3686 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
3687 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
3688 SUBREG_BYTE (operand1));
3689 operand1 = alter_subreg (&temp);
3690 }
3691
3692 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
3693 && ((tem = find_replacement (&XEXP (operand0, 0)))
3694 != XEXP (operand0, 0)))
3695 operand0 = gen_rtx_MEM (GET_MODE (operand0), tem);
3696 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
3697 && ((tem = find_replacement (&XEXP (operand1, 0)))
3698 != XEXP (operand1, 0)))
3699 operand1 = gen_rtx_MEM (GET_MODE (operand1), tem);
3700
3701 /* Handle secondary reloads for loads/stores of FP registers where
3702 the address is symbolic by using the scratch register */
3703 if (fp_reg_operand (operand0, mode)
3704 && ((GET_CODE (operand1) == MEM
3705 && ! memory_address_p (DFmode, XEXP (operand1, 0)))
3706 || ((GET_CODE (operand1) == SUBREG
3707 && GET_CODE (XEXP (operand1, 0)) == MEM
3708 && !memory_address_p (DFmode, XEXP (XEXP (operand1, 0), 0)))))
3709 && scratch_reg)
3710 {
3711 if (GET_CODE (operand1) == SUBREG)
3712 operand1 = XEXP (operand1, 0);
3713
3714 /* SCRATCH_REG will hold an address. We want
3715 it in SImode regardless of what mode it was originally given
3716 to us. */
3717 scratch_reg = force_mode (SImode, scratch_reg);
3718
3719 /* D might not fit in 14 bits either; for such cases load D into
3720 scratch reg. */
3721 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
3722 {
3723 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
3724 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
3725 Pmode,
3726 XEXP (XEXP (operand1, 0), 0),
3727 scratch_reg));
3728 }
3729 else
3730 emit_move_insn (scratch_reg, XEXP (operand1, 0));
3731 emit_insn (gen_rtx_SET (VOIDmode, operand0,
3732 gen_rtx_MEM (mode, scratch_reg)));
3733 return 1;
3734 }
3735 else if (fp_reg_operand (operand1, mode)
3736 && ((GET_CODE (operand0) == MEM
3737 && ! memory_address_p (DFmode, XEXP (operand0, 0)))
3738 || ((GET_CODE (operand0) == SUBREG)
3739 && GET_CODE (XEXP (operand0, 0)) == MEM
3740 && !memory_address_p (DFmode, XEXP (XEXP (operand0, 0), 0))))
3741 && scratch_reg)
3742 {
3743 if (GET_CODE (operand0) == SUBREG)
3744 operand0 = XEXP (operand0, 0);
3745
3746 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3747 it in SIMODE regardless of what mode it was originally given
3748 to us. */
3749 scratch_reg = force_mode (SImode, scratch_reg);
3750
3751 /* D might not fit in 14 bits either; for such cases load D into
3752 scratch reg. */
3753 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
3754 {
3755 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
3756 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
3757 0)),
3758 Pmode,
3759 XEXP (XEXP (operand0, 0),
3760 0),
3761 scratch_reg));
3762 }
3763 else
3764 emit_move_insn (scratch_reg, XEXP (operand0, 0));
3765 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (mode, scratch_reg),
3766 operand1));
3767 return 1;
3768 }
3769 /* Handle secondary reloads for loads of FP registers from constant
3770 expressions by forcing the constant into memory.
3771
3772 use scratch_reg to hold the address of the memory location.
3773
3774 The proper fix is to change PREFERRED_RELOAD_CLASS to return
3775 NO_REGS when presented with a const_int and an register class
3776 containing only FP registers. Doing so unfortunately creates
3777 more problems than it solves. Fix this for 2.5. */
3778 else if (fp_reg_operand (operand0, mode)
3779 && CONSTANT_P (operand1)
3780 && scratch_reg)
3781 {
3782 rtx xoperands[2];
3783
3784 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3785 it in SIMODE regardless of what mode it was originally given
3786 to us. */
3787 scratch_reg = force_mode (SImode, scratch_reg);
3788
3789 /* Force the constant into memory and put the address of the
3790 memory location into scratch_reg. */
3791 xoperands[0] = scratch_reg;
3792 xoperands[1] = XEXP (force_const_mem (mode, operand1), 0);
3793 emit_insn (gen_rtx_SET (mode, scratch_reg, xoperands[1]));
3794
3795 /* Now load the destination register. */
3796 emit_insn (gen_rtx_SET (mode, operand0,
3797 gen_rtx_MEM (mode, scratch_reg)));
3798 return 1;
3799 }
3800
3801 /* Now have insn-emit do whatever it normally does. */
3802 return 0;
3803 }
3804
3805 /* Split one or more DImode RTL references into pairs of SImode
3806 references. The RTL can be REG, offsettable MEM, integer constant, or
3807 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
3808 split and "num" is its length. lo_half and hi_half are output arrays
3809 that parallel "operands". */
3810
3811 void
3812 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
3813 {
3814 while (num--)
3815 {
3816 rtx op = operands[num];
3817
3818 /* simplify_subreg refuses to split volatile memory addresses,
3819 but we still have to handle it. */
3820 if (GET_CODE (op) == MEM)
3821 {
3822 lo_half[num] = adjust_address (op, SImode, 4);
3823 hi_half[num] = adjust_address (op, SImode, 0);
3824 }
3825 else
3826 {
3827 lo_half[num] = simplify_gen_subreg (SImode, op,
3828 GET_MODE (op) == VOIDmode
3829 ? DImode : GET_MODE (op), 4);
3830 hi_half[num] = simplify_gen_subreg (SImode, op,
3831 GET_MODE (op) == VOIDmode
3832 ? DImode : GET_MODE (op), 0);
3833 }
3834 }
3835 }
3836
3837 /* Split X into a base and a constant offset, storing them in *BASE
3838 and *OFFSET respectively. */
3839
3840 static void
3841 m68k_split_offset (rtx x, rtx *base, HOST_WIDE_INT *offset)
3842 {
3843 *offset = 0;
3844 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
3845 {
3846 *offset += INTVAL (XEXP (x, 1));
3847 x = XEXP (x, 0);
3848 }
3849 *base = x;
3850 }
3851
3852 /* Return true if PATTERN is a PARALLEL suitable for a movem or fmovem
3853 instruction. STORE_P says whether the move is a load or store.
3854
3855 If the instruction uses post-increment or pre-decrement addressing,
3856 AUTOMOD_BASE is the base register and AUTOMOD_OFFSET is the total
3857 adjustment. This adjustment will be made by the first element of
3858 PARALLEL, with the loads or stores starting at element 1. If the
3859 instruction does not use post-increment or pre-decrement addressing,
3860 AUTOMOD_BASE is null, AUTOMOD_OFFSET is 0, and the loads or stores
3861 start at element 0. */
3862
3863 bool
3864 m68k_movem_pattern_p (rtx pattern, rtx automod_base,
3865 HOST_WIDE_INT automod_offset, bool store_p)
3866 {
3867 rtx base, mem_base, set, mem, reg, last_reg;
3868 HOST_WIDE_INT offset, mem_offset;
3869 int i, first, len;
3870 enum reg_class rclass;
3871
3872 len = XVECLEN (pattern, 0);
3873 first = (automod_base != NULL);
3874
3875 if (automod_base)
3876 {
3877 /* Stores must be pre-decrement and loads must be post-increment. */
3878 if (store_p != (automod_offset < 0))
3879 return false;
3880
3881 /* Work out the base and offset for lowest memory location. */
3882 base = automod_base;
3883 offset = (automod_offset < 0 ? automod_offset : 0);
3884 }
3885 else
3886 {
3887 /* Allow any valid base and offset in the first access. */
3888 base = NULL;
3889 offset = 0;
3890 }
3891
3892 last_reg = NULL;
3893 rclass = NO_REGS;
3894 for (i = first; i < len; i++)
3895 {
3896 /* We need a plain SET. */
3897 set = XVECEXP (pattern, 0, i);
3898 if (GET_CODE (set) != SET)
3899 return false;
3900
3901 /* Check that we have a memory location... */
3902 mem = XEXP (set, !store_p);
3903 if (!MEM_P (mem) || !memory_operand (mem, VOIDmode))
3904 return false;
3905
3906 /* ...with the right address. */
3907 if (base == NULL)
3908 {
3909 m68k_split_offset (XEXP (mem, 0), &base, &offset);
3910 /* The ColdFire instruction only allows (An) and (d16,An) modes.
3911 There are no mode restrictions for 680x0 besides the
3912 automodification rules enforced above. */
3913 if (TARGET_COLDFIRE
3914 && !m68k_legitimate_base_reg_p (base, reload_completed))
3915 return false;
3916 }
3917 else
3918 {
3919 m68k_split_offset (XEXP (mem, 0), &mem_base, &mem_offset);
3920 if (!rtx_equal_p (base, mem_base) || offset != mem_offset)
3921 return false;
3922 }
3923
3924 /* Check that we have a register of the required mode and class. */
3925 reg = XEXP (set, store_p);
3926 if (!REG_P (reg)
3927 || !HARD_REGISTER_P (reg)
3928 || GET_MODE (reg) != reg_raw_mode[REGNO (reg)])
3929 return false;
3930
3931 if (last_reg)
3932 {
3933 /* The register must belong to RCLASS and have a higher number
3934 than the register in the previous SET. */
3935 if (!TEST_HARD_REG_BIT (reg_class_contents[rclass], REGNO (reg))
3936 || REGNO (last_reg) >= REGNO (reg))
3937 return false;
3938 }
3939 else
3940 {
3941 /* Work out which register class we need. */
3942 if (INT_REGNO_P (REGNO (reg)))
3943 rclass = GENERAL_REGS;
3944 else if (FP_REGNO_P (REGNO (reg)))
3945 rclass = FP_REGS;
3946 else
3947 return false;
3948 }
3949
3950 last_reg = reg;
3951 offset += GET_MODE_SIZE (GET_MODE (reg));
3952 }
3953
3954 /* If we have an automodification, check whether the final offset is OK. */
3955 if (automod_base && offset != (automod_offset < 0 ? 0 : automod_offset))
3956 return false;
3957
3958 /* Reject unprofitable cases. */
3959 if (len < first + (rclass == FP_REGS ? MIN_FMOVEM_REGS : MIN_MOVEM_REGS))
3960 return false;
3961
3962 return true;
3963 }
3964
3965 /* Return the assembly code template for a movem or fmovem instruction
3966 whose pattern is given by PATTERN. Store the template's operands
3967 in OPERANDS.
3968
3969 If the instruction uses post-increment or pre-decrement addressing,
3970 AUTOMOD_OFFSET is the total adjustment, otherwise it is 0. STORE_P
3971 is true if this is a store instruction. */
3972
3973 const char *
3974 m68k_output_movem (rtx *operands, rtx pattern,
3975 HOST_WIDE_INT automod_offset, bool store_p)
3976 {
3977 unsigned int mask;
3978 int i, first;
3979
3980 gcc_assert (GET_CODE (pattern) == PARALLEL);
3981 mask = 0;
3982 first = (automod_offset != 0);
3983 for (i = first; i < XVECLEN (pattern, 0); i++)
3984 {
3985 /* When using movem with pre-decrement addressing, register X + D0_REG
3986 is controlled by bit 15 - X. For all other addressing modes,
3987 register X + D0_REG is controlled by bit X. Confusingly, the
3988 register mask for fmovem is in the opposite order to that for
3989 movem. */
3990 unsigned int regno;
3991
3992 gcc_assert (MEM_P (XEXP (XVECEXP (pattern, 0, i), !store_p)));
3993 gcc_assert (REG_P (XEXP (XVECEXP (pattern, 0, i), store_p)));
3994 regno = REGNO (XEXP (XVECEXP (pattern, 0, i), store_p));
3995 if (automod_offset < 0)
3996 {
3997 if (FP_REGNO_P (regno))
3998 mask |= 1 << (regno - FP0_REG);
3999 else
4000 mask |= 1 << (15 - (regno - D0_REG));
4001 }
4002 else
4003 {
4004 if (FP_REGNO_P (regno))
4005 mask |= 1 << (7 - (regno - FP0_REG));
4006 else
4007 mask |= 1 << (regno - D0_REG);
4008 }
4009 }
4010 CC_STATUS_INIT;
4011
4012 if (automod_offset == 0)
4013 operands[0] = XEXP (XEXP (XVECEXP (pattern, 0, first), !store_p), 0);
4014 else if (automod_offset < 0)
4015 operands[0] = gen_rtx_PRE_DEC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
4016 else
4017 operands[0] = gen_rtx_POST_INC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
4018 operands[1] = GEN_INT (mask);
4019 if (FP_REGNO_P (REGNO (XEXP (XVECEXP (pattern, 0, first), store_p))))
4020 {
4021 if (store_p)
4022 return "fmovem %1,%a0";
4023 else
4024 return "fmovem %a0,%1";
4025 }
4026 else
4027 {
4028 if (store_p)
4029 return "movem%.l %1,%a0";
4030 else
4031 return "movem%.l %a0,%1";
4032 }
4033 }
4034
4035 /* Return a REG that occurs in ADDR with coefficient 1.
4036 ADDR can be effectively incremented by incrementing REG. */
4037
4038 static rtx
4039 find_addr_reg (rtx addr)
4040 {
4041 while (GET_CODE (addr) == PLUS)
4042 {
4043 if (GET_CODE (XEXP (addr, 0)) == REG)
4044 addr = XEXP (addr, 0);
4045 else if (GET_CODE (XEXP (addr, 1)) == REG)
4046 addr = XEXP (addr, 1);
4047 else if (CONSTANT_P (XEXP (addr, 0)))
4048 addr = XEXP (addr, 1);
4049 else if (CONSTANT_P (XEXP (addr, 1)))
4050 addr = XEXP (addr, 0);
4051 else
4052 gcc_unreachable ();
4053 }
4054 gcc_assert (GET_CODE (addr) == REG);
4055 return addr;
4056 }
4057
4058 /* Output assembler code to perform a 32-bit 3-operand add. */
4059
4060 const char *
4061 output_addsi3 (rtx *operands)
4062 {
4063 if (! operands_match_p (operands[0], operands[1]))
4064 {
4065 if (!ADDRESS_REG_P (operands[1]))
4066 {
4067 rtx tmp = operands[1];
4068
4069 operands[1] = operands[2];
4070 operands[2] = tmp;
4071 }
4072
4073 /* These insns can result from reloads to access
4074 stack slots over 64k from the frame pointer. */
4075 if (GET_CODE (operands[2]) == CONST_INT
4076 && (INTVAL (operands[2]) < -32768 || INTVAL (operands[2]) > 32767))
4077 return "move%.l %2,%0\n\tadd%.l %1,%0";
4078 if (GET_CODE (operands[2]) == REG)
4079 return MOTOROLA ? "lea (%1,%2.l),%0" : "lea %1@(0,%2:l),%0";
4080 return MOTOROLA ? "lea (%c2,%1),%0" : "lea %1@(%c2),%0";
4081 }
4082 if (GET_CODE (operands[2]) == CONST_INT)
4083 {
4084 if (INTVAL (operands[2]) > 0
4085 && INTVAL (operands[2]) <= 8)
4086 return "addq%.l %2,%0";
4087 if (INTVAL (operands[2]) < 0
4088 && INTVAL (operands[2]) >= -8)
4089 {
4090 operands[2] = GEN_INT (- INTVAL (operands[2]));
4091 return "subq%.l %2,%0";
4092 }
4093 /* On the CPU32 it is faster to use two addql instructions to
4094 add a small integer (8 < N <= 16) to a register.
4095 Likewise for subql. */
4096 if (TUNE_CPU32 && REG_P (operands[0]))
4097 {
4098 if (INTVAL (operands[2]) > 8
4099 && INTVAL (operands[2]) <= 16)
4100 {
4101 operands[2] = GEN_INT (INTVAL (operands[2]) - 8);
4102 return "addq%.l #8,%0\n\taddq%.l %2,%0";
4103 }
4104 if (INTVAL (operands[2]) < -8
4105 && INTVAL (operands[2]) >= -16)
4106 {
4107 operands[2] = GEN_INT (- INTVAL (operands[2]) - 8);
4108 return "subq%.l #8,%0\n\tsubq%.l %2,%0";
4109 }
4110 }
4111 if (ADDRESS_REG_P (operands[0])
4112 && INTVAL (operands[2]) >= -0x8000
4113 && INTVAL (operands[2]) < 0x8000)
4114 {
4115 if (TUNE_68040)
4116 return "add%.w %2,%0";
4117 else
4118 return MOTOROLA ? "lea (%c2,%0),%0" : "lea %0@(%c2),%0";
4119 }
4120 }
4121 return "add%.l %2,%0";
4122 }
4123 \f
4124 /* Store in cc_status the expressions that the condition codes will
4125 describe after execution of an instruction whose pattern is EXP.
4126 Do not alter them if the instruction would not alter the cc's. */
4127
4128 /* On the 68000, all the insns to store in an address register fail to
4129 set the cc's. However, in some cases these instructions can make it
4130 possibly invalid to use the saved cc's. In those cases we clear out
4131 some or all of the saved cc's so they won't be used. */
4132
4133 void
4134 notice_update_cc (rtx exp, rtx insn)
4135 {
4136 if (GET_CODE (exp) == SET)
4137 {
4138 if (GET_CODE (SET_SRC (exp)) == CALL)
4139 CC_STATUS_INIT;
4140 else if (ADDRESS_REG_P (SET_DEST (exp)))
4141 {
4142 if (cc_status.value1 && modified_in_p (cc_status.value1, insn))
4143 cc_status.value1 = 0;
4144 if (cc_status.value2 && modified_in_p (cc_status.value2, insn))
4145 cc_status.value2 = 0;
4146 }
4147 /* fmoves to memory or data registers do not set the condition
4148 codes. Normal moves _do_ set the condition codes, but not in
4149 a way that is appropriate for comparison with 0, because -0.0
4150 would be treated as a negative nonzero number. Note that it
4151 isn't appropriate to conditionalize this restriction on
4152 HONOR_SIGNED_ZEROS because that macro merely indicates whether
4153 we care about the difference between -0.0 and +0.0. */
4154 else if (!FP_REG_P (SET_DEST (exp))
4155 && SET_DEST (exp) != cc0_rtx
4156 && (FP_REG_P (SET_SRC (exp))
4157 || GET_CODE (SET_SRC (exp)) == FIX
4158 || FLOAT_MODE_P (GET_MODE (SET_DEST (exp)))))
4159 CC_STATUS_INIT;
4160 /* A pair of move insns doesn't produce a useful overall cc. */
4161 else if (!FP_REG_P (SET_DEST (exp))
4162 && !FP_REG_P (SET_SRC (exp))
4163 && GET_MODE_SIZE (GET_MODE (SET_SRC (exp))) > 4
4164 && (GET_CODE (SET_SRC (exp)) == REG
4165 || GET_CODE (SET_SRC (exp)) == MEM
4166 || GET_CODE (SET_SRC (exp)) == CONST_DOUBLE))
4167 CC_STATUS_INIT;
4168 else if (SET_DEST (exp) != pc_rtx)
4169 {
4170 cc_status.flags = 0;
4171 cc_status.value1 = SET_DEST (exp);
4172 cc_status.value2 = SET_SRC (exp);
4173 }
4174 }
4175 else if (GET_CODE (exp) == PARALLEL
4176 && GET_CODE (XVECEXP (exp, 0, 0)) == SET)
4177 {
4178 rtx dest = SET_DEST (XVECEXP (exp, 0, 0));
4179 rtx src = SET_SRC (XVECEXP (exp, 0, 0));
4180
4181 if (ADDRESS_REG_P (dest))
4182 CC_STATUS_INIT;
4183 else if (dest != pc_rtx)
4184 {
4185 cc_status.flags = 0;
4186 cc_status.value1 = dest;
4187 cc_status.value2 = src;
4188 }
4189 }
4190 else
4191 CC_STATUS_INIT;
4192 if (cc_status.value2 != 0
4193 && ADDRESS_REG_P (cc_status.value2)
4194 && GET_MODE (cc_status.value2) == QImode)
4195 CC_STATUS_INIT;
4196 if (cc_status.value2 != 0)
4197 switch (GET_CODE (cc_status.value2))
4198 {
4199 case ASHIFT: case ASHIFTRT: case LSHIFTRT:
4200 case ROTATE: case ROTATERT:
4201 /* These instructions always clear the overflow bit, and set
4202 the carry to the bit shifted out. */
4203 cc_status.flags |= CC_OVERFLOW_UNUSABLE | CC_NO_CARRY;
4204 break;
4205
4206 case PLUS: case MINUS: case MULT:
4207 case DIV: case UDIV: case MOD: case UMOD: case NEG:
4208 if (GET_MODE (cc_status.value2) != VOIDmode)
4209 cc_status.flags |= CC_NO_OVERFLOW;
4210 break;
4211 case ZERO_EXTEND:
4212 /* (SET r1 (ZERO_EXTEND r2)) on this machine
4213 ends with a move insn moving r2 in r2's mode.
4214 Thus, the cc's are set for r2.
4215 This can set N bit spuriously. */
4216 cc_status.flags |= CC_NOT_NEGATIVE;
4217
4218 default:
4219 break;
4220 }
4221 if (cc_status.value1 && GET_CODE (cc_status.value1) == REG
4222 && cc_status.value2
4223 && reg_overlap_mentioned_p (cc_status.value1, cc_status.value2))
4224 cc_status.value2 = 0;
4225 if (((cc_status.value1 && FP_REG_P (cc_status.value1))
4226 || (cc_status.value2 && FP_REG_P (cc_status.value2))))
4227 cc_status.flags = CC_IN_68881;
4228 if (cc_status.value2 && GET_CODE (cc_status.value2) == COMPARE
4229 && GET_MODE_CLASS (GET_MODE (XEXP (cc_status.value2, 0))) == MODE_FLOAT)
4230 {
4231 cc_status.flags = CC_IN_68881;
4232 if (!FP_REG_P (XEXP (cc_status.value2, 0)))
4233 cc_status.flags |= CC_REVERSED;
4234 }
4235 }
4236 \f
4237 const char *
4238 output_move_const_double (rtx *operands)
4239 {
4240 int code = standard_68881_constant_p (operands[1]);
4241
4242 if (code != 0)
4243 {
4244 static char buf[40];
4245
4246 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
4247 return buf;
4248 }
4249 return "fmove%.d %1,%0";
4250 }
4251
4252 const char *
4253 output_move_const_single (rtx *operands)
4254 {
4255 int code = standard_68881_constant_p (operands[1]);
4256
4257 if (code != 0)
4258 {
4259 static char buf[40];
4260
4261 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
4262 return buf;
4263 }
4264 return "fmove%.s %f1,%0";
4265 }
4266
4267 /* Return nonzero if X, a CONST_DOUBLE, has a value that we can get
4268 from the "fmovecr" instruction.
4269 The value, anded with 0xff, gives the code to use in fmovecr
4270 to get the desired constant. */
4271
4272 /* This code has been fixed for cross-compilation. */
4273
4274 static int inited_68881_table = 0;
4275
4276 static const char *const strings_68881[7] = {
4277 "0.0",
4278 "1.0",
4279 "10.0",
4280 "100.0",
4281 "10000.0",
4282 "1e8",
4283 "1e16"
4284 };
4285
4286 static const int codes_68881[7] = {
4287 0x0f,
4288 0x32,
4289 0x33,
4290 0x34,
4291 0x35,
4292 0x36,
4293 0x37
4294 };
4295
4296 REAL_VALUE_TYPE values_68881[7];
4297
4298 /* Set up values_68881 array by converting the decimal values
4299 strings_68881 to binary. */
4300
4301 void
4302 init_68881_table (void)
4303 {
4304 int i;
4305 REAL_VALUE_TYPE r;
4306 enum machine_mode mode;
4307
4308 mode = SFmode;
4309 for (i = 0; i < 7; i++)
4310 {
4311 if (i == 6)
4312 mode = DFmode;
4313 r = REAL_VALUE_ATOF (strings_68881[i], mode);
4314 values_68881[i] = r;
4315 }
4316 inited_68881_table = 1;
4317 }
4318
4319 int
4320 standard_68881_constant_p (rtx x)
4321 {
4322 REAL_VALUE_TYPE r;
4323 int i;
4324
4325 /* fmovecr must be emulated on the 68040 and 68060, so it shouldn't be
4326 used at all on those chips. */
4327 if (TUNE_68040_60)
4328 return 0;
4329
4330 if (! inited_68881_table)
4331 init_68881_table ();
4332
4333 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4334
4335 /* Use REAL_VALUES_IDENTICAL instead of REAL_VALUES_EQUAL so that -0.0
4336 is rejected. */
4337 for (i = 0; i < 6; i++)
4338 {
4339 if (REAL_VALUES_IDENTICAL (r, values_68881[i]))
4340 return (codes_68881[i]);
4341 }
4342
4343 if (GET_MODE (x) == SFmode)
4344 return 0;
4345
4346 if (REAL_VALUES_EQUAL (r, values_68881[6]))
4347 return (codes_68881[6]);
4348
4349 /* larger powers of ten in the constants ram are not used
4350 because they are not equal to a `double' C constant. */
4351 return 0;
4352 }
4353
4354 /* If X is a floating-point constant, return the logarithm of X base 2,
4355 or 0 if X is not a power of 2. */
4356
4357 int
4358 floating_exact_log2 (rtx x)
4359 {
4360 REAL_VALUE_TYPE r, r1;
4361 int exp;
4362
4363 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4364
4365 if (REAL_VALUES_LESS (r, dconst1))
4366 return 0;
4367
4368 exp = real_exponent (&r);
4369 real_2expN (&r1, exp, DFmode);
4370 if (REAL_VALUES_EQUAL (r1, r))
4371 return exp;
4372
4373 return 0;
4374 }
4375 \f
4376 /* A C compound statement to output to stdio stream STREAM the
4377 assembler syntax for an instruction operand X. X is an RTL
4378 expression.
4379
4380 CODE is a value that can be used to specify one of several ways
4381 of printing the operand. It is used when identical operands
4382 must be printed differently depending on the context. CODE
4383 comes from the `%' specification that was used to request
4384 printing of the operand. If the specification was just `%DIGIT'
4385 then CODE is 0; if the specification was `%LTR DIGIT' then CODE
4386 is the ASCII code for LTR.
4387
4388 If X is a register, this macro should print the register's name.
4389 The names can be found in an array `reg_names' whose type is
4390 `char *[]'. `reg_names' is initialized from `REGISTER_NAMES'.
4391
4392 When the machine description has a specification `%PUNCT' (a `%'
4393 followed by a punctuation character), this macro is called with
4394 a null pointer for X and the punctuation character for CODE.
4395
4396 The m68k specific codes are:
4397
4398 '.' for dot needed in Motorola-style opcode names.
4399 '-' for an operand pushing on the stack:
4400 sp@-, -(sp) or -(%sp) depending on the style of syntax.
4401 '+' for an operand pushing on the stack:
4402 sp@+, (sp)+ or (%sp)+ depending on the style of syntax.
4403 '@' for a reference to the top word on the stack:
4404 sp@, (sp) or (%sp) depending on the style of syntax.
4405 '#' for an immediate operand prefix (# in MIT and Motorola syntax
4406 but & in SGS syntax).
4407 '!' for the cc register (used in an `and to cc' insn).
4408 '$' for the letter `s' in an op code, but only on the 68040.
4409 '&' for the letter `d' in an op code, but only on the 68040.
4410 '/' for register prefix needed by longlong.h.
4411 '?' for m68k_library_id_string
4412
4413 'b' for byte insn (no effect, on the Sun; this is for the ISI).
4414 'd' to force memory addressing to be absolute, not relative.
4415 'f' for float insn (print a CONST_DOUBLE as a float rather than in hex)
4416 'x' for float insn (print a CONST_DOUBLE as a float rather than in hex),
4417 or print pair of registers as rx:ry.
4418 'p' print an address with @PLTPC attached, but only if the operand
4419 is not locally-bound. */
4420
4421 void
4422 print_operand (FILE *file, rtx op, int letter)
4423 {
4424 if (letter == '.')
4425 {
4426 if (MOTOROLA)
4427 fprintf (file, ".");
4428 }
4429 else if (letter == '#')
4430 asm_fprintf (file, "%I");
4431 else if (letter == '-')
4432 asm_fprintf (file, MOTOROLA ? "-(%Rsp)" : "%Rsp@-");
4433 else if (letter == '+')
4434 asm_fprintf (file, MOTOROLA ? "(%Rsp)+" : "%Rsp@+");
4435 else if (letter == '@')
4436 asm_fprintf (file, MOTOROLA ? "(%Rsp)" : "%Rsp@");
4437 else if (letter == '!')
4438 asm_fprintf (file, "%Rfpcr");
4439 else if (letter == '$')
4440 {
4441 if (TARGET_68040)
4442 fprintf (file, "s");
4443 }
4444 else if (letter == '&')
4445 {
4446 if (TARGET_68040)
4447 fprintf (file, "d");
4448 }
4449 else if (letter == '/')
4450 asm_fprintf (file, "%R");
4451 else if (letter == '?')
4452 asm_fprintf (file, m68k_library_id_string);
4453 else if (letter == 'p')
4454 {
4455 output_addr_const (file, op);
4456 if (!(GET_CODE (op) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op)))
4457 fprintf (file, "@PLTPC");
4458 }
4459 else if (GET_CODE (op) == REG)
4460 {
4461 if (letter == 'R')
4462 /* Print out the second register name of a register pair.
4463 I.e., R (6) => 7. */
4464 fputs (M68K_REGNAME(REGNO (op) + 1), file);
4465 else
4466 fputs (M68K_REGNAME(REGNO (op)), file);
4467 }
4468 else if (GET_CODE (op) == MEM)
4469 {
4470 output_address (XEXP (op, 0));
4471 if (letter == 'd' && ! TARGET_68020
4472 && CONSTANT_ADDRESS_P (XEXP (op, 0))
4473 && !(GET_CODE (XEXP (op, 0)) == CONST_INT
4474 && INTVAL (XEXP (op, 0)) < 0x8000
4475 && INTVAL (XEXP (op, 0)) >= -0x8000))
4476 fprintf (file, MOTOROLA ? ".l" : ":l");
4477 }
4478 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == SFmode)
4479 {
4480 REAL_VALUE_TYPE r;
4481 long l;
4482 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
4483 REAL_VALUE_TO_TARGET_SINGLE (r, l);
4484 asm_fprintf (file, "%I0x%lx", l & 0xFFFFFFFF);
4485 }
4486 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == XFmode)
4487 {
4488 REAL_VALUE_TYPE r;
4489 long l[3];
4490 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
4491 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
4492 asm_fprintf (file, "%I0x%lx%08lx%08lx", l[0] & 0xFFFFFFFF,
4493 l[1] & 0xFFFFFFFF, l[2] & 0xFFFFFFFF);
4494 }
4495 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == DFmode)
4496 {
4497 REAL_VALUE_TYPE r;
4498 long l[2];
4499 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
4500 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
4501 asm_fprintf (file, "%I0x%lx%08lx", l[0] & 0xFFFFFFFF, l[1] & 0xFFFFFFFF);
4502 }
4503 else
4504 {
4505 /* Use `print_operand_address' instead of `output_addr_const'
4506 to ensure that we print relevant PIC stuff. */
4507 asm_fprintf (file, "%I");
4508 if (TARGET_PCREL
4509 && (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST))
4510 print_operand_address (file, op);
4511 else
4512 output_addr_const (file, op);
4513 }
4514 }
4515
4516 /* Return string for TLS relocation RELOC. */
4517
4518 static const char *
4519 m68k_get_reloc_decoration (enum m68k_reloc reloc)
4520 {
4521 /* To my knowledge, !MOTOROLA assemblers don't support TLS. */
4522 gcc_assert (MOTOROLA || reloc == RELOC_GOT);
4523
4524 switch (reloc)
4525 {
4526 case RELOC_GOT:
4527 if (MOTOROLA)
4528 {
4529 if (flag_pic == 1 && TARGET_68020)
4530 return "@GOT.w";
4531 else
4532 return "@GOT";
4533 }
4534 else
4535 {
4536 if (TARGET_68020)
4537 {
4538 switch (flag_pic)
4539 {
4540 case 1:
4541 return ":w";
4542 case 2:
4543 return ":l";
4544 default:
4545 return "";
4546 }
4547 }
4548 }
4549
4550 case RELOC_TLSGD:
4551 return "@TLSGD";
4552
4553 case RELOC_TLSLDM:
4554 return "@TLSLDM";
4555
4556 case RELOC_TLSLDO:
4557 return "@TLSLDO";
4558
4559 case RELOC_TLSIE:
4560 return "@TLSIE";
4561
4562 case RELOC_TLSLE:
4563 return "@TLSLE";
4564
4565 default:
4566 gcc_unreachable ();
4567 }
4568 }
4569
4570 /* m68k implementation of OUTPUT_ADDR_CONST_EXTRA. */
4571
4572 bool
4573 m68k_output_addr_const_extra (FILE *file, rtx x)
4574 {
4575 if (GET_CODE (x) == UNSPEC)
4576 {
4577 switch (XINT (x, 1))
4578 {
4579 case UNSPEC_RELOC16:
4580 case UNSPEC_RELOC32:
4581 output_addr_const (file, XVECEXP (x, 0, 0));
4582 fputs (m68k_get_reloc_decoration
4583 ((enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1))), file);
4584 return true;
4585
4586 default:
4587 break;
4588 }
4589 }
4590
4591 return false;
4592 }
4593
4594 /* M68K implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
4595
4596 static void
4597 m68k_output_dwarf_dtprel (FILE *file, int size, rtx x)
4598 {
4599 gcc_assert (size == 4);
4600 fputs ("\t.long\t", file);
4601 output_addr_const (file, x);
4602 fputs ("@TLSLDO+0x8000", file);
4603 }
4604
4605 /* In the name of slightly smaller debug output, and to cater to
4606 general assembler lossage, recognize various UNSPEC sequences
4607 and turn them back into a direct symbol reference. */
4608
4609 static rtx
4610 m68k_delegitimize_address (rtx orig_x)
4611 {
4612 rtx x;
4613 struct m68k_address addr;
4614 rtx unspec;
4615
4616 orig_x = delegitimize_mem_from_attrs (orig_x);
4617 x = orig_x;
4618 if (MEM_P (x))
4619 x = XEXP (x, 0);
4620
4621 if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
4622 return orig_x;
4623
4624 if (!m68k_decompose_address (GET_MODE (x), x, false, &addr)
4625 || addr.offset == NULL_RTX
4626 || GET_CODE (addr.offset) != CONST)
4627 return orig_x;
4628
4629 unspec = XEXP (addr.offset, 0);
4630 if (GET_CODE (unspec) == PLUS && CONST_INT_P (XEXP (unspec, 1)))
4631 unspec = XEXP (unspec, 0);
4632 if (GET_CODE (unspec) != UNSPEC
4633 || (XINT (unspec, 1) != UNSPEC_RELOC16
4634 && XINT (unspec, 1) != UNSPEC_RELOC32))
4635 return orig_x;
4636 x = XVECEXP (unspec, 0, 0);
4637 gcc_assert (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF);
4638 if (unspec != XEXP (addr.offset, 0))
4639 x = gen_rtx_PLUS (Pmode, x, XEXP (XEXP (addr.offset, 0), 1));
4640 if (addr.index)
4641 {
4642 rtx idx = addr.index;
4643 if (addr.scale != 1)
4644 idx = gen_rtx_MULT (Pmode, idx, GEN_INT (addr.scale));
4645 x = gen_rtx_PLUS (Pmode, idx, x);
4646 }
4647 if (addr.base)
4648 x = gen_rtx_PLUS (Pmode, addr.base, x);
4649 if (MEM_P (orig_x))
4650 x = replace_equiv_address_nv (orig_x, x);
4651 return x;
4652 }
4653
4654 \f
4655 /* A C compound statement to output to stdio stream STREAM the
4656 assembler syntax for an instruction operand that is a memory
4657 reference whose address is ADDR. ADDR is an RTL expression.
4658
4659 Note that this contains a kludge that knows that the only reason
4660 we have an address (plus (label_ref...) (reg...)) when not generating
4661 PIC code is in the insn before a tablejump, and we know that m68k.md
4662 generates a label LInnn: on such an insn.
4663
4664 It is possible for PIC to generate a (plus (label_ref...) (reg...))
4665 and we handle that just like we would a (plus (symbol_ref...) (reg...)).
4666
4667 This routine is responsible for distinguishing between -fpic and -fPIC
4668 style relocations in an address. When generating -fpic code the
4669 offset is output in word mode (e.g. movel a5@(_foo:w), a0). When generating
4670 -fPIC code the offset is output in long mode (e.g. movel a5@(_foo:l), a0) */
4671
4672 void
4673 print_operand_address (FILE *file, rtx addr)
4674 {
4675 struct m68k_address address;
4676
4677 if (!m68k_decompose_address (QImode, addr, true, &address))
4678 gcc_unreachable ();
4679
4680 if (address.code == PRE_DEC)
4681 fprintf (file, MOTOROLA ? "-(%s)" : "%s@-",
4682 M68K_REGNAME (REGNO (address.base)));
4683 else if (address.code == POST_INC)
4684 fprintf (file, MOTOROLA ? "(%s)+" : "%s@+",
4685 M68K_REGNAME (REGNO (address.base)));
4686 else if (!address.base && !address.index)
4687 {
4688 /* A constant address. */
4689 gcc_assert (address.offset == addr);
4690 if (GET_CODE (addr) == CONST_INT)
4691 {
4692 /* (xxx).w or (xxx).l. */
4693 if (IN_RANGE (INTVAL (addr), -0x8000, 0x7fff))
4694 fprintf (file, MOTOROLA ? "%d.w" : "%d:w", (int) INTVAL (addr));
4695 else
4696 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (addr));
4697 }
4698 else if (TARGET_PCREL)
4699 {
4700 /* (d16,PC) or (bd,PC,Xn) (with suppressed index register). */
4701 fputc ('(', file);
4702 output_addr_const (file, addr);
4703 asm_fprintf (file, flag_pic == 1 ? ":w,%Rpc)" : ":l,%Rpc)");
4704 }
4705 else
4706 {
4707 /* (xxx).l. We need a special case for SYMBOL_REF if the symbol
4708 name ends in `.<letter>', as the last 2 characters can be
4709 mistaken as a size suffix. Put the name in parentheses. */
4710 if (GET_CODE (addr) == SYMBOL_REF
4711 && strlen (XSTR (addr, 0)) > 2
4712 && XSTR (addr, 0)[strlen (XSTR (addr, 0)) - 2] == '.')
4713 {
4714 putc ('(', file);
4715 output_addr_const (file, addr);
4716 putc (')', file);
4717 }
4718 else
4719 output_addr_const (file, addr);
4720 }
4721 }
4722 else
4723 {
4724 int labelno;
4725
4726 /* If ADDR is a (d8,pc,Xn) address, this is the number of the
4727 label being accessed, otherwise it is -1. */
4728 labelno = (address.offset
4729 && !address.base
4730 && GET_CODE (address.offset) == LABEL_REF
4731 ? CODE_LABEL_NUMBER (XEXP (address.offset, 0))
4732 : -1);
4733 if (MOTOROLA)
4734 {
4735 /* Print the "offset(base" component. */
4736 if (labelno >= 0)
4737 asm_fprintf (file, "%LL%d(%Rpc,", labelno);
4738 else
4739 {
4740 if (address.offset)
4741 output_addr_const (file, address.offset);
4742
4743 putc ('(', file);
4744 if (address.base)
4745 fputs (M68K_REGNAME (REGNO (address.base)), file);
4746 }
4747 /* Print the ",index" component, if any. */
4748 if (address.index)
4749 {
4750 if (address.base)
4751 putc (',', file);
4752 fprintf (file, "%s.%c",
4753 M68K_REGNAME (REGNO (address.index)),
4754 GET_MODE (address.index) == HImode ? 'w' : 'l');
4755 if (address.scale != 1)
4756 fprintf (file, "*%d", address.scale);
4757 }
4758 putc (')', file);
4759 }
4760 else /* !MOTOROLA */
4761 {
4762 if (!address.offset && !address.index)
4763 fprintf (file, "%s@", M68K_REGNAME (REGNO (address.base)));
4764 else
4765 {
4766 /* Print the "base@(offset" component. */
4767 if (labelno >= 0)
4768 asm_fprintf (file, "%Rpc@(%LL%d", labelno);
4769 else
4770 {
4771 if (address.base)
4772 fputs (M68K_REGNAME (REGNO (address.base)), file);
4773 fprintf (file, "@(");
4774 if (address.offset)
4775 output_addr_const (file, address.offset);
4776 }
4777 /* Print the ",index" component, if any. */
4778 if (address.index)
4779 {
4780 fprintf (file, ",%s:%c",
4781 M68K_REGNAME (REGNO (address.index)),
4782 GET_MODE (address.index) == HImode ? 'w' : 'l');
4783 if (address.scale != 1)
4784 fprintf (file, ":%d", address.scale);
4785 }
4786 putc (')', file);
4787 }
4788 }
4789 }
4790 }
4791 \f
4792 /* Check for cases where a clr insns can be omitted from code using
4793 strict_low_part sets. For example, the second clrl here is not needed:
4794 clrl d0; movw a0@+,d0; use d0; clrl d0; movw a0@+; use d0; ...
4795
4796 MODE is the mode of this STRICT_LOW_PART set. FIRST_INSN is the clear
4797 insn we are checking for redundancy. TARGET is the register set by the
4798 clear insn. */
4799
4800 bool
4801 strict_low_part_peephole_ok (enum machine_mode mode, rtx first_insn,
4802 rtx target)
4803 {
4804 rtx p = first_insn;
4805
4806 while ((p = PREV_INSN (p)))
4807 {
4808 if (NOTE_INSN_BASIC_BLOCK_P (p))
4809 return false;
4810
4811 if (NOTE_P (p))
4812 continue;
4813
4814 /* If it isn't an insn, then give up. */
4815 if (!INSN_P (p))
4816 return false;
4817
4818 if (reg_set_p (target, p))
4819 {
4820 rtx set = single_set (p);
4821 rtx dest;
4822
4823 /* If it isn't an easy to recognize insn, then give up. */
4824 if (! set)
4825 return false;
4826
4827 dest = SET_DEST (set);
4828
4829 /* If this sets the entire target register to zero, then our
4830 first_insn is redundant. */
4831 if (rtx_equal_p (dest, target)
4832 && SET_SRC (set) == const0_rtx)
4833 return true;
4834 else if (GET_CODE (dest) == STRICT_LOW_PART
4835 && GET_CODE (XEXP (dest, 0)) == REG
4836 && REGNO (XEXP (dest, 0)) == REGNO (target)
4837 && (GET_MODE_SIZE (GET_MODE (XEXP (dest, 0)))
4838 <= GET_MODE_SIZE (mode)))
4839 /* This is a strict low part set which modifies less than
4840 we are using, so it is safe. */
4841 ;
4842 else
4843 return false;
4844 }
4845 }
4846
4847 return false;
4848 }
4849
4850 /* Operand predicates for implementing asymmetric pc-relative addressing
4851 on m68k. The m68k supports pc-relative addressing (mode 7, register 2)
4852 when used as a source operand, but not as a destination operand.
4853
4854 We model this by restricting the meaning of the basic predicates
4855 (general_operand, memory_operand, etc) to forbid the use of this
4856 addressing mode, and then define the following predicates that permit
4857 this addressing mode. These predicates can then be used for the
4858 source operands of the appropriate instructions.
4859
4860 n.b. While it is theoretically possible to change all machine patterns
4861 to use this addressing more where permitted by the architecture,
4862 it has only been implemented for "common" cases: SImode, HImode, and
4863 QImode operands, and only for the principle operations that would
4864 require this addressing mode: data movement and simple integer operations.
4865
4866 In parallel with these new predicates, two new constraint letters
4867 were defined: 'S' and 'T'. 'S' is the -mpcrel analog of 'm'.
4868 'T' replaces 's' in the non-pcrel case. It is a no-op in the pcrel case.
4869 In the pcrel case 's' is only valid in combination with 'a' registers.
4870 See addsi3, subsi3, cmpsi, and movsi patterns for a better understanding
4871 of how these constraints are used.
4872
4873 The use of these predicates is strictly optional, though patterns that
4874 don't will cause an extra reload register to be allocated where one
4875 was not necessary:
4876
4877 lea (abc:w,%pc),%a0 ; need to reload address
4878 moveq &1,%d1 ; since write to pc-relative space
4879 movel %d1,%a0@ ; is not allowed
4880 ...
4881 lea (abc:w,%pc),%a1 ; no need to reload address here
4882 movel %a1@,%d0 ; since "movel (abc:w,%pc),%d0" is ok
4883
4884 For more info, consult tiemann@cygnus.com.
4885
4886
4887 All of the ugliness with predicates and constraints is due to the
4888 simple fact that the m68k does not allow a pc-relative addressing
4889 mode as a destination. gcc does not distinguish between source and
4890 destination addresses. Hence, if we claim that pc-relative address
4891 modes are valid, e.g. TARGET_LEGITIMATE_ADDRESS_P accepts them, then we
4892 end up with invalid code. To get around this problem, we left
4893 pc-relative modes as invalid addresses, and then added special
4894 predicates and constraints to accept them.
4895
4896 A cleaner way to handle this is to modify gcc to distinguish
4897 between source and destination addresses. We can then say that
4898 pc-relative is a valid source address but not a valid destination
4899 address, and hopefully avoid a lot of the predicate and constraint
4900 hackery. Unfortunately, this would be a pretty big change. It would
4901 be a useful change for a number of ports, but there aren't any current
4902 plans to undertake this.
4903
4904 ***************************************************************************/
4905
4906
4907 const char *
4908 output_andsi3 (rtx *operands)
4909 {
4910 int logval;
4911 if (GET_CODE (operands[2]) == CONST_INT
4912 && (INTVAL (operands[2]) | 0xffff) == -1
4913 && (DATA_REG_P (operands[0])
4914 || offsettable_memref_p (operands[0]))
4915 && !TARGET_COLDFIRE)
4916 {
4917 if (GET_CODE (operands[0]) != REG)
4918 operands[0] = adjust_address (operands[0], HImode, 2);
4919 operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffff);
4920 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4921 CC_STATUS_INIT;
4922 if (operands[2] == const0_rtx)
4923 return "clr%.w %0";
4924 return "and%.w %2,%0";
4925 }
4926 if (GET_CODE (operands[2]) == CONST_INT
4927 && (logval = exact_log2 (~ INTVAL (operands[2]) & 0xffffffff)) >= 0
4928 && (DATA_REG_P (operands[0])
4929 || offsettable_memref_p (operands[0])))
4930 {
4931 if (DATA_REG_P (operands[0]))
4932 operands[1] = GEN_INT (logval);
4933 else
4934 {
4935 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
4936 operands[1] = GEN_INT (logval % 8);
4937 }
4938 /* This does not set condition codes in a standard way. */
4939 CC_STATUS_INIT;
4940 return "bclr %1,%0";
4941 }
4942 return "and%.l %2,%0";
4943 }
4944
4945 const char *
4946 output_iorsi3 (rtx *operands)
4947 {
4948 register int logval;
4949 if (GET_CODE (operands[2]) == CONST_INT
4950 && INTVAL (operands[2]) >> 16 == 0
4951 && (DATA_REG_P (operands[0])
4952 || offsettable_memref_p (operands[0]))
4953 && !TARGET_COLDFIRE)
4954 {
4955 if (GET_CODE (operands[0]) != REG)
4956 operands[0] = adjust_address (operands[0], HImode, 2);
4957 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4958 CC_STATUS_INIT;
4959 if (INTVAL (operands[2]) == 0xffff)
4960 return "mov%.w %2,%0";
4961 return "or%.w %2,%0";
4962 }
4963 if (GET_CODE (operands[2]) == CONST_INT
4964 && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
4965 && (DATA_REG_P (operands[0])
4966 || offsettable_memref_p (operands[0])))
4967 {
4968 if (DATA_REG_P (operands[0]))
4969 operands[1] = GEN_INT (logval);
4970 else
4971 {
4972 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
4973 operands[1] = GEN_INT (logval % 8);
4974 }
4975 CC_STATUS_INIT;
4976 return "bset %1,%0";
4977 }
4978 return "or%.l %2,%0";
4979 }
4980
4981 const char *
4982 output_xorsi3 (rtx *operands)
4983 {
4984 register int logval;
4985 if (GET_CODE (operands[2]) == CONST_INT
4986 && INTVAL (operands[2]) >> 16 == 0
4987 && (offsettable_memref_p (operands[0]) || DATA_REG_P (operands[0]))
4988 && !TARGET_COLDFIRE)
4989 {
4990 if (! DATA_REG_P (operands[0]))
4991 operands[0] = adjust_address (operands[0], HImode, 2);
4992 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4993 CC_STATUS_INIT;
4994 if (INTVAL (operands[2]) == 0xffff)
4995 return "not%.w %0";
4996 return "eor%.w %2,%0";
4997 }
4998 if (GET_CODE (operands[2]) == CONST_INT
4999 && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
5000 && (DATA_REG_P (operands[0])
5001 || offsettable_memref_p (operands[0])))
5002 {
5003 if (DATA_REG_P (operands[0]))
5004 operands[1] = GEN_INT (logval);
5005 else
5006 {
5007 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
5008 operands[1] = GEN_INT (logval % 8);
5009 }
5010 CC_STATUS_INIT;
5011 return "bchg %1,%0";
5012 }
5013 return "eor%.l %2,%0";
5014 }
5015
5016 /* Return the instruction that should be used for a call to address X,
5017 which is known to be in operand 0. */
5018
5019 const char *
5020 output_call (rtx x)
5021 {
5022 if (symbolic_operand (x, VOIDmode))
5023 return m68k_symbolic_call;
5024 else
5025 return "jsr %a0";
5026 }
5027
5028 /* Likewise sibling calls. */
5029
5030 const char *
5031 output_sibcall (rtx x)
5032 {
5033 if (symbolic_operand (x, VOIDmode))
5034 return m68k_symbolic_jump;
5035 else
5036 return "jmp %a0";
5037 }
5038
5039 static void
5040 m68k_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
5041 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
5042 tree function)
5043 {
5044 rtx this_slot, offset, addr, mem, insn, tmp;
5045
5046 /* Avoid clobbering the struct value reg by using the
5047 static chain reg as a temporary. */
5048 tmp = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
5049
5050 /* Pretend to be a post-reload pass while generating rtl. */
5051 reload_completed = 1;
5052
5053 /* The "this" pointer is stored at 4(%sp). */
5054 this_slot = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx, 4));
5055
5056 /* Add DELTA to THIS. */
5057 if (delta != 0)
5058 {
5059 /* Make the offset a legitimate operand for memory addition. */
5060 offset = GEN_INT (delta);
5061 if ((delta < -8 || delta > 8)
5062 && (TARGET_COLDFIRE || USE_MOVQ (delta)))
5063 {
5064 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), offset);
5065 offset = gen_rtx_REG (Pmode, D0_REG);
5066 }
5067 emit_insn (gen_add3_insn (copy_rtx (this_slot),
5068 copy_rtx (this_slot), offset));
5069 }
5070
5071 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
5072 if (vcall_offset != 0)
5073 {
5074 /* Set the static chain register to *THIS. */
5075 emit_move_insn (tmp, this_slot);
5076 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
5077
5078 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
5079 addr = plus_constant (tmp, vcall_offset);
5080 if (!m68k_legitimate_address_p (Pmode, addr, true))
5081 {
5082 emit_insn (gen_rtx_SET (VOIDmode, tmp, addr));
5083 addr = tmp;
5084 }
5085
5086 /* Load the offset into %d0 and add it to THIS. */
5087 emit_move_insn (gen_rtx_REG (Pmode, D0_REG),
5088 gen_rtx_MEM (Pmode, addr));
5089 emit_insn (gen_add3_insn (copy_rtx (this_slot),
5090 copy_rtx (this_slot),
5091 gen_rtx_REG (Pmode, D0_REG)));
5092 }
5093
5094 /* Jump to the target function. Use a sibcall if direct jumps are
5095 allowed, otherwise load the address into a register first. */
5096 mem = DECL_RTL (function);
5097 if (!sibcall_operand (XEXP (mem, 0), VOIDmode))
5098 {
5099 gcc_assert (flag_pic);
5100
5101 if (!TARGET_SEP_DATA)
5102 {
5103 /* Use the static chain register as a temporary (call-clobbered)
5104 GOT pointer for this function. We can use the static chain
5105 register because it isn't live on entry to the thunk. */
5106 SET_REGNO (pic_offset_table_rtx, STATIC_CHAIN_REGNUM);
5107 emit_insn (gen_load_got (pic_offset_table_rtx));
5108 }
5109 legitimize_pic_address (XEXP (mem, 0), Pmode, tmp);
5110 mem = replace_equiv_address (mem, tmp);
5111 }
5112 insn = emit_call_insn (gen_sibcall (mem, const0_rtx));
5113 SIBLING_CALL_P (insn) = 1;
5114
5115 /* Run just enough of rest_of_compilation. */
5116 insn = get_insns ();
5117 split_all_insns_noflow ();
5118 final_start_function (insn, file, 1);
5119 final (insn, file, 1);
5120 final_end_function ();
5121
5122 /* Clean up the vars set above. */
5123 reload_completed = 0;
5124
5125 /* Restore the original PIC register. */
5126 if (flag_pic)
5127 SET_REGNO (pic_offset_table_rtx, PIC_REG);
5128 }
5129
5130 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
5131
5132 static rtx
5133 m68k_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
5134 int incoming ATTRIBUTE_UNUSED)
5135 {
5136 return gen_rtx_REG (Pmode, M68K_STRUCT_VALUE_REGNUM);
5137 }
5138
5139 /* Return nonzero if register old_reg can be renamed to register new_reg. */
5140 int
5141 m68k_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
5142 unsigned int new_reg)
5143 {
5144
5145 /* Interrupt functions can only use registers that have already been
5146 saved by the prologue, even if they would normally be
5147 call-clobbered. */
5148
5149 if ((m68k_get_function_kind (current_function_decl)
5150 == m68k_fk_interrupt_handler)
5151 && !df_regs_ever_live_p (new_reg))
5152 return 0;
5153
5154 return 1;
5155 }
5156
5157 /* Value is true if hard register REGNO can hold a value of machine-mode
5158 MODE. On the 68000, we let the cpu registers can hold any mode, but
5159 restrict the 68881 registers to floating-point modes. */
5160
5161 bool
5162 m68k_regno_mode_ok (int regno, enum machine_mode mode)
5163 {
5164 if (DATA_REGNO_P (regno))
5165 {
5166 /* Data Registers, can hold aggregate if fits in. */
5167 if (regno + GET_MODE_SIZE (mode) / 4 <= 8)
5168 return true;
5169 }
5170 else if (ADDRESS_REGNO_P (regno))
5171 {
5172 if (regno + GET_MODE_SIZE (mode) / 4 <= 16)
5173 return true;
5174 }
5175 else if (FP_REGNO_P (regno))
5176 {
5177 /* FPU registers, hold float or complex float of long double or
5178 smaller. */
5179 if ((GET_MODE_CLASS (mode) == MODE_FLOAT
5180 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5181 && GET_MODE_UNIT_SIZE (mode) <= TARGET_FP_REG_SIZE)
5182 return true;
5183 }
5184 return false;
5185 }
5186
5187 /* Implement SECONDARY_RELOAD_CLASS. */
5188
5189 enum reg_class
5190 m68k_secondary_reload_class (enum reg_class rclass,
5191 enum machine_mode mode, rtx x)
5192 {
5193 int regno;
5194
5195 regno = true_regnum (x);
5196
5197 /* If one operand of a movqi is an address register, the other
5198 operand must be a general register or constant. Other types
5199 of operand must be reloaded through a data register. */
5200 if (GET_MODE_SIZE (mode) == 1
5201 && reg_classes_intersect_p (rclass, ADDR_REGS)
5202 && !(INT_REGNO_P (regno) || CONSTANT_P (x)))
5203 return DATA_REGS;
5204
5205 /* PC-relative addresses must be loaded into an address register first. */
5206 if (TARGET_PCREL
5207 && !reg_class_subset_p (rclass, ADDR_REGS)
5208 && symbolic_operand (x, VOIDmode))
5209 return ADDR_REGS;
5210
5211 return NO_REGS;
5212 }
5213
5214 /* Implement PREFERRED_RELOAD_CLASS. */
5215
5216 enum reg_class
5217 m68k_preferred_reload_class (rtx x, enum reg_class rclass)
5218 {
5219 enum reg_class secondary_class;
5220
5221 /* If RCLASS might need a secondary reload, try restricting it to
5222 a class that doesn't. */
5223 secondary_class = m68k_secondary_reload_class (rclass, GET_MODE (x), x);
5224 if (secondary_class != NO_REGS
5225 && reg_class_subset_p (secondary_class, rclass))
5226 return secondary_class;
5227
5228 /* Prefer to use moveq for in-range constants. */
5229 if (GET_CODE (x) == CONST_INT
5230 && reg_class_subset_p (DATA_REGS, rclass)
5231 && IN_RANGE (INTVAL (x), -0x80, 0x7f))
5232 return DATA_REGS;
5233
5234 /* ??? Do we really need this now? */
5235 if (GET_CODE (x) == CONST_DOUBLE
5236 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
5237 {
5238 if (TARGET_HARD_FLOAT && reg_class_subset_p (FP_REGS, rclass))
5239 return FP_REGS;
5240
5241 return NO_REGS;
5242 }
5243
5244 return rclass;
5245 }
5246
5247 /* Return floating point values in a 68881 register. This makes 68881 code
5248 a little bit faster. It also makes -msoft-float code incompatible with
5249 hard-float code, so people have to be careful not to mix the two.
5250 For ColdFire it was decided the ABI incompatibility is undesirable.
5251 If there is need for a hard-float ABI it is probably worth doing it
5252 properly and also passing function arguments in FP registers. */
5253 rtx
5254 m68k_libcall_value (enum machine_mode mode)
5255 {
5256 switch (mode) {
5257 case SFmode:
5258 case DFmode:
5259 case XFmode:
5260 if (TARGET_68881)
5261 return gen_rtx_REG (mode, FP0_REG);
5262 break;
5263 default:
5264 break;
5265 }
5266
5267 return gen_rtx_REG (mode, m68k_libcall_value_in_a0_p ? A0_REG : D0_REG);
5268 }
5269
5270 /* Location in which function value is returned.
5271 NOTE: Due to differences in ABIs, don't call this function directly,
5272 use FUNCTION_VALUE instead. */
5273 rtx
5274 m68k_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
5275 {
5276 enum machine_mode mode;
5277
5278 mode = TYPE_MODE (valtype);
5279 switch (mode) {
5280 case SFmode:
5281 case DFmode:
5282 case XFmode:
5283 if (TARGET_68881)
5284 return gen_rtx_REG (mode, FP0_REG);
5285 break;
5286 default:
5287 break;
5288 }
5289
5290 /* If the function returns a pointer, push that into %a0. */
5291 if (func && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (func))))
5292 /* For compatibility with the large body of existing code which
5293 does not always properly declare external functions returning
5294 pointer types, the m68k/SVR4 convention is to copy the value
5295 returned for pointer functions from a0 to d0 in the function
5296 epilogue, so that callers that have neglected to properly
5297 declare the callee can still find the correct return value in
5298 d0. */
5299 return gen_rtx_PARALLEL
5300 (mode,
5301 gen_rtvec (2,
5302 gen_rtx_EXPR_LIST (VOIDmode,
5303 gen_rtx_REG (mode, A0_REG),
5304 const0_rtx),
5305 gen_rtx_EXPR_LIST (VOIDmode,
5306 gen_rtx_REG (mode, D0_REG),
5307 const0_rtx)));
5308 else if (POINTER_TYPE_P (valtype))
5309 return gen_rtx_REG (mode, A0_REG);
5310 else
5311 return gen_rtx_REG (mode, D0_REG);
5312 }
5313
5314 /* Worker function for TARGET_RETURN_IN_MEMORY. */
5315 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
5316 static bool
5317 m68k_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
5318 {
5319 enum machine_mode mode = TYPE_MODE (type);
5320
5321 if (mode == BLKmode)
5322 return true;
5323
5324 /* If TYPE's known alignment is less than the alignment of MODE that
5325 would contain the structure, then return in memory. We need to
5326 do so to maintain the compatibility between code compiled with
5327 -mstrict-align and that compiled with -mno-strict-align. */
5328 if (AGGREGATE_TYPE_P (type)
5329 && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (mode))
5330 return true;
5331
5332 return false;
5333 }
5334 #endif
5335
5336 /* CPU to schedule the program for. */
5337 enum attr_cpu m68k_sched_cpu;
5338
5339 /* MAC to schedule the program for. */
5340 enum attr_mac m68k_sched_mac;
5341
5342 /* Operand type. */
5343 enum attr_op_type
5344 {
5345 /* No operand. */
5346 OP_TYPE_NONE,
5347
5348 /* Integer register. */
5349 OP_TYPE_RN,
5350
5351 /* FP register. */
5352 OP_TYPE_FPN,
5353
5354 /* Implicit mem reference (e.g. stack). */
5355 OP_TYPE_MEM1,
5356
5357 /* Memory without offset or indexing. EA modes 2, 3 and 4. */
5358 OP_TYPE_MEM234,
5359
5360 /* Memory with offset but without indexing. EA mode 5. */
5361 OP_TYPE_MEM5,
5362
5363 /* Memory with indexing. EA mode 6. */
5364 OP_TYPE_MEM6,
5365
5366 /* Memory referenced by absolute address. EA mode 7. */
5367 OP_TYPE_MEM7,
5368
5369 /* Immediate operand that doesn't require extension word. */
5370 OP_TYPE_IMM_Q,
5371
5372 /* Immediate 16 bit operand. */
5373 OP_TYPE_IMM_W,
5374
5375 /* Immediate 32 bit operand. */
5376 OP_TYPE_IMM_L
5377 };
5378
5379 /* Return type of memory ADDR_RTX refers to. */
5380 static enum attr_op_type
5381 sched_address_type (enum machine_mode mode, rtx addr_rtx)
5382 {
5383 struct m68k_address address;
5384
5385 if (symbolic_operand (addr_rtx, VOIDmode))
5386 return OP_TYPE_MEM7;
5387
5388 if (!m68k_decompose_address (mode, addr_rtx,
5389 reload_completed, &address))
5390 {
5391 gcc_assert (!reload_completed);
5392 /* Reload will likely fix the address to be in the register. */
5393 return OP_TYPE_MEM234;
5394 }
5395
5396 if (address.scale != 0)
5397 return OP_TYPE_MEM6;
5398
5399 if (address.base != NULL_RTX)
5400 {
5401 if (address.offset == NULL_RTX)
5402 return OP_TYPE_MEM234;
5403
5404 return OP_TYPE_MEM5;
5405 }
5406
5407 gcc_assert (address.offset != NULL_RTX);
5408
5409 return OP_TYPE_MEM7;
5410 }
5411
5412 /* Return X or Y (depending on OPX_P) operand of INSN. */
5413 static rtx
5414 sched_get_operand (rtx insn, bool opx_p)
5415 {
5416 int i;
5417
5418 if (recog_memoized (insn) < 0)
5419 gcc_unreachable ();
5420
5421 extract_constrain_insn_cached (insn);
5422
5423 if (opx_p)
5424 i = get_attr_opx (insn);
5425 else
5426 i = get_attr_opy (insn);
5427
5428 if (i >= recog_data.n_operands)
5429 return NULL;
5430
5431 return recog_data.operand[i];
5432 }
5433
5434 /* Return type of INSN's operand X (if OPX_P) or operand Y (if !OPX_P).
5435 If ADDRESS_P is true, return type of memory location operand refers to. */
5436 static enum attr_op_type
5437 sched_attr_op_type (rtx insn, bool opx_p, bool address_p)
5438 {
5439 rtx op;
5440
5441 op = sched_get_operand (insn, opx_p);
5442
5443 if (op == NULL)
5444 {
5445 gcc_assert (!reload_completed);
5446 return OP_TYPE_RN;
5447 }
5448
5449 if (address_p)
5450 return sched_address_type (QImode, op);
5451
5452 if (memory_operand (op, VOIDmode))
5453 return sched_address_type (GET_MODE (op), XEXP (op, 0));
5454
5455 if (register_operand (op, VOIDmode))
5456 {
5457 if ((!reload_completed && FLOAT_MODE_P (GET_MODE (op)))
5458 || (reload_completed && FP_REG_P (op)))
5459 return OP_TYPE_FPN;
5460
5461 return OP_TYPE_RN;
5462 }
5463
5464 if (GET_CODE (op) == CONST_INT)
5465 {
5466 int ival;
5467
5468 ival = INTVAL (op);
5469
5470 /* Check for quick constants. */
5471 switch (get_attr_type (insn))
5472 {
5473 case TYPE_ALUQ_L:
5474 if (IN_RANGE (ival, 1, 8) || IN_RANGE (ival, -8, -1))
5475 return OP_TYPE_IMM_Q;
5476
5477 gcc_assert (!reload_completed);
5478 break;
5479
5480 case TYPE_MOVEQ_L:
5481 if (USE_MOVQ (ival))
5482 return OP_TYPE_IMM_Q;
5483
5484 gcc_assert (!reload_completed);
5485 break;
5486
5487 case TYPE_MOV3Q_L:
5488 if (valid_mov3q_const (ival))
5489 return OP_TYPE_IMM_Q;
5490
5491 gcc_assert (!reload_completed);
5492 break;
5493
5494 default:
5495 break;
5496 }
5497
5498 if (IN_RANGE (ival, -0x8000, 0x7fff))
5499 return OP_TYPE_IMM_W;
5500
5501 return OP_TYPE_IMM_L;
5502 }
5503
5504 if (GET_CODE (op) == CONST_DOUBLE)
5505 {
5506 switch (GET_MODE (op))
5507 {
5508 case SFmode:
5509 return OP_TYPE_IMM_W;
5510
5511 case VOIDmode:
5512 case DFmode:
5513 return OP_TYPE_IMM_L;
5514
5515 default:
5516 gcc_unreachable ();
5517 }
5518 }
5519
5520 if (GET_CODE (op) == CONST
5521 || symbolic_operand (op, VOIDmode)
5522 || LABEL_P (op))
5523 {
5524 switch (GET_MODE (op))
5525 {
5526 case QImode:
5527 return OP_TYPE_IMM_Q;
5528
5529 case HImode:
5530 return OP_TYPE_IMM_W;
5531
5532 case SImode:
5533 return OP_TYPE_IMM_L;
5534
5535 default:
5536 if (symbolic_operand (m68k_unwrap_symbol (op, false), VOIDmode))
5537 /* Just a guess. */
5538 return OP_TYPE_IMM_W;
5539
5540 return OP_TYPE_IMM_L;
5541 }
5542 }
5543
5544 gcc_assert (!reload_completed);
5545
5546 if (FLOAT_MODE_P (GET_MODE (op)))
5547 return OP_TYPE_FPN;
5548
5549 return OP_TYPE_RN;
5550 }
5551
5552 /* Implement opx_type attribute.
5553 Return type of INSN's operand X.
5554 If ADDRESS_P is true, return type of memory location operand refers to. */
5555 enum attr_opx_type
5556 m68k_sched_attr_opx_type (rtx insn, int address_p)
5557 {
5558 switch (sched_attr_op_type (insn, true, address_p != 0))
5559 {
5560 case OP_TYPE_RN:
5561 return OPX_TYPE_RN;
5562
5563 case OP_TYPE_FPN:
5564 return OPX_TYPE_FPN;
5565
5566 case OP_TYPE_MEM1:
5567 return OPX_TYPE_MEM1;
5568
5569 case OP_TYPE_MEM234:
5570 return OPX_TYPE_MEM234;
5571
5572 case OP_TYPE_MEM5:
5573 return OPX_TYPE_MEM5;
5574
5575 case OP_TYPE_MEM6:
5576 return OPX_TYPE_MEM6;
5577
5578 case OP_TYPE_MEM7:
5579 return OPX_TYPE_MEM7;
5580
5581 case OP_TYPE_IMM_Q:
5582 return OPX_TYPE_IMM_Q;
5583
5584 case OP_TYPE_IMM_W:
5585 return OPX_TYPE_IMM_W;
5586
5587 case OP_TYPE_IMM_L:
5588 return OPX_TYPE_IMM_L;
5589
5590 default:
5591 gcc_unreachable ();
5592 }
5593 }
5594
5595 /* Implement opy_type attribute.
5596 Return type of INSN's operand Y.
5597 If ADDRESS_P is true, return type of memory location operand refers to. */
5598 enum attr_opy_type
5599 m68k_sched_attr_opy_type (rtx insn, int address_p)
5600 {
5601 switch (sched_attr_op_type (insn, false, address_p != 0))
5602 {
5603 case OP_TYPE_RN:
5604 return OPY_TYPE_RN;
5605
5606 case OP_TYPE_FPN:
5607 return OPY_TYPE_FPN;
5608
5609 case OP_TYPE_MEM1:
5610 return OPY_TYPE_MEM1;
5611
5612 case OP_TYPE_MEM234:
5613 return OPY_TYPE_MEM234;
5614
5615 case OP_TYPE_MEM5:
5616 return OPY_TYPE_MEM5;
5617
5618 case OP_TYPE_MEM6:
5619 return OPY_TYPE_MEM6;
5620
5621 case OP_TYPE_MEM7:
5622 return OPY_TYPE_MEM7;
5623
5624 case OP_TYPE_IMM_Q:
5625 return OPY_TYPE_IMM_Q;
5626
5627 case OP_TYPE_IMM_W:
5628 return OPY_TYPE_IMM_W;
5629
5630 case OP_TYPE_IMM_L:
5631 return OPY_TYPE_IMM_L;
5632
5633 default:
5634 gcc_unreachable ();
5635 }
5636 }
5637
5638 /* Return size of INSN as int. */
5639 static int
5640 sched_get_attr_size_int (rtx insn)
5641 {
5642 int size;
5643
5644 switch (get_attr_type (insn))
5645 {
5646 case TYPE_IGNORE:
5647 /* There should be no references to m68k_sched_attr_size for 'ignore'
5648 instructions. */
5649 gcc_unreachable ();
5650 return 0;
5651
5652 case TYPE_MUL_L:
5653 size = 2;
5654 break;
5655
5656 default:
5657 size = 1;
5658 break;
5659 }
5660
5661 switch (get_attr_opx_type (insn))
5662 {
5663 case OPX_TYPE_NONE:
5664 case OPX_TYPE_RN:
5665 case OPX_TYPE_FPN:
5666 case OPX_TYPE_MEM1:
5667 case OPX_TYPE_MEM234:
5668 case OPY_TYPE_IMM_Q:
5669 break;
5670
5671 case OPX_TYPE_MEM5:
5672 case OPX_TYPE_MEM6:
5673 /* Here we assume that most absolute references are short. */
5674 case OPX_TYPE_MEM7:
5675 case OPY_TYPE_IMM_W:
5676 ++size;
5677 break;
5678
5679 case OPY_TYPE_IMM_L:
5680 size += 2;
5681 break;
5682
5683 default:
5684 gcc_unreachable ();
5685 }
5686
5687 switch (get_attr_opy_type (insn))
5688 {
5689 case OPY_TYPE_NONE:
5690 case OPY_TYPE_RN:
5691 case OPY_TYPE_FPN:
5692 case OPY_TYPE_MEM1:
5693 case OPY_TYPE_MEM234:
5694 case OPY_TYPE_IMM_Q:
5695 break;
5696
5697 case OPY_TYPE_MEM5:
5698 case OPY_TYPE_MEM6:
5699 /* Here we assume that most absolute references are short. */
5700 case OPY_TYPE_MEM7:
5701 case OPY_TYPE_IMM_W:
5702 ++size;
5703 break;
5704
5705 case OPY_TYPE_IMM_L:
5706 size += 2;
5707 break;
5708
5709 default:
5710 gcc_unreachable ();
5711 }
5712
5713 if (size > 3)
5714 {
5715 gcc_assert (!reload_completed);
5716
5717 size = 3;
5718 }
5719
5720 return size;
5721 }
5722
5723 /* Return size of INSN as attribute enum value. */
5724 enum attr_size
5725 m68k_sched_attr_size (rtx insn)
5726 {
5727 switch (sched_get_attr_size_int (insn))
5728 {
5729 case 1:
5730 return SIZE_1;
5731
5732 case 2:
5733 return SIZE_2;
5734
5735 case 3:
5736 return SIZE_3;
5737
5738 default:
5739 gcc_unreachable ();
5740 }
5741 }
5742
5743 /* Return operand X or Y (depending on OPX_P) of INSN,
5744 if it is a MEM, or NULL overwise. */
5745 static enum attr_op_type
5746 sched_get_opxy_mem_type (rtx insn, bool opx_p)
5747 {
5748 if (opx_p)
5749 {
5750 switch (get_attr_opx_type (insn))
5751 {
5752 case OPX_TYPE_NONE:
5753 case OPX_TYPE_RN:
5754 case OPX_TYPE_FPN:
5755 case OPX_TYPE_IMM_Q:
5756 case OPX_TYPE_IMM_W:
5757 case OPX_TYPE_IMM_L:
5758 return OP_TYPE_RN;
5759
5760 case OPX_TYPE_MEM1:
5761 case OPX_TYPE_MEM234:
5762 case OPX_TYPE_MEM5:
5763 case OPX_TYPE_MEM7:
5764 return OP_TYPE_MEM1;
5765
5766 case OPX_TYPE_MEM6:
5767 return OP_TYPE_MEM6;
5768
5769 default:
5770 gcc_unreachable ();
5771 }
5772 }
5773 else
5774 {
5775 switch (get_attr_opy_type (insn))
5776 {
5777 case OPY_TYPE_NONE:
5778 case OPY_TYPE_RN:
5779 case OPY_TYPE_FPN:
5780 case OPY_TYPE_IMM_Q:
5781 case OPY_TYPE_IMM_W:
5782 case OPY_TYPE_IMM_L:
5783 return OP_TYPE_RN;
5784
5785 case OPY_TYPE_MEM1:
5786 case OPY_TYPE_MEM234:
5787 case OPY_TYPE_MEM5:
5788 case OPY_TYPE_MEM7:
5789 return OP_TYPE_MEM1;
5790
5791 case OPY_TYPE_MEM6:
5792 return OP_TYPE_MEM6;
5793
5794 default:
5795 gcc_unreachable ();
5796 }
5797 }
5798 }
5799
5800 /* Implement op_mem attribute. */
5801 enum attr_op_mem
5802 m68k_sched_attr_op_mem (rtx insn)
5803 {
5804 enum attr_op_type opx;
5805 enum attr_op_type opy;
5806
5807 opx = sched_get_opxy_mem_type (insn, true);
5808 opy = sched_get_opxy_mem_type (insn, false);
5809
5810 if (opy == OP_TYPE_RN && opx == OP_TYPE_RN)
5811 return OP_MEM_00;
5812
5813 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM1)
5814 {
5815 switch (get_attr_opx_access (insn))
5816 {
5817 case OPX_ACCESS_R:
5818 return OP_MEM_10;
5819
5820 case OPX_ACCESS_W:
5821 return OP_MEM_01;
5822
5823 case OPX_ACCESS_RW:
5824 return OP_MEM_11;
5825
5826 default:
5827 gcc_unreachable ();
5828 }
5829 }
5830
5831 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM6)
5832 {
5833 switch (get_attr_opx_access (insn))
5834 {
5835 case OPX_ACCESS_R:
5836 return OP_MEM_I0;
5837
5838 case OPX_ACCESS_W:
5839 return OP_MEM_0I;
5840
5841 case OPX_ACCESS_RW:
5842 return OP_MEM_I1;
5843
5844 default:
5845 gcc_unreachable ();
5846 }
5847 }
5848
5849 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_RN)
5850 return OP_MEM_10;
5851
5852 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM1)
5853 {
5854 switch (get_attr_opx_access (insn))
5855 {
5856 case OPX_ACCESS_W:
5857 return OP_MEM_11;
5858
5859 default:
5860 gcc_assert (!reload_completed);
5861 return OP_MEM_11;
5862 }
5863 }
5864
5865 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM6)
5866 {
5867 switch (get_attr_opx_access (insn))
5868 {
5869 case OPX_ACCESS_W:
5870 return OP_MEM_1I;
5871
5872 default:
5873 gcc_assert (!reload_completed);
5874 return OP_MEM_1I;
5875 }
5876 }
5877
5878 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_RN)
5879 return OP_MEM_I0;
5880
5881 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM1)
5882 {
5883 switch (get_attr_opx_access (insn))
5884 {
5885 case OPX_ACCESS_W:
5886 return OP_MEM_I1;
5887
5888 default:
5889 gcc_assert (!reload_completed);
5890 return OP_MEM_I1;
5891 }
5892 }
5893
5894 gcc_assert (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM6);
5895 gcc_assert (!reload_completed);
5896 return OP_MEM_I1;
5897 }
5898
5899 /* Jump instructions types. Indexed by INSN_UID.
5900 The same rtl insn can be expanded into different asm instructions
5901 depending on the cc0_status. To properly determine type of jump
5902 instructions we scan instruction stream and map jumps types to this
5903 array. */
5904 static enum attr_type *sched_branch_type;
5905
5906 /* Return the type of the jump insn. */
5907 enum attr_type
5908 m68k_sched_branch_type (rtx insn)
5909 {
5910 enum attr_type type;
5911
5912 type = sched_branch_type[INSN_UID (insn)];
5913
5914 gcc_assert (type != 0);
5915
5916 return type;
5917 }
5918
5919 /* Data for ColdFire V4 index bypass.
5920 Producer modifies register that is used as index in consumer with
5921 specified scale. */
5922 static struct
5923 {
5924 /* Producer instruction. */
5925 rtx pro;
5926
5927 /* Consumer instruction. */
5928 rtx con;
5929
5930 /* Scale of indexed memory access within consumer.
5931 Or zero if bypass should not be effective at the moment. */
5932 int scale;
5933 } sched_cfv4_bypass_data;
5934
5935 /* An empty state that is used in m68k_sched_adjust_cost. */
5936 static state_t sched_adjust_cost_state;
5937
5938 /* Implement adjust_cost scheduler hook.
5939 Return adjusted COST of dependency LINK between DEF_INSN and INSN. */
5940 static int
5941 m68k_sched_adjust_cost (rtx insn, rtx link ATTRIBUTE_UNUSED, rtx def_insn,
5942 int cost)
5943 {
5944 int delay;
5945
5946 if (recog_memoized (def_insn) < 0
5947 || recog_memoized (insn) < 0)
5948 return cost;
5949
5950 if (sched_cfv4_bypass_data.scale == 1)
5951 /* Handle ColdFire V4 bypass for indexed address with 1x scale. */
5952 {
5953 /* haifa-sched.c: insn_cost () calls bypass_p () just before
5954 targetm.sched.adjust_cost (). Hence, we can be relatively sure
5955 that the data in sched_cfv4_bypass_data is up to date. */
5956 gcc_assert (sched_cfv4_bypass_data.pro == def_insn
5957 && sched_cfv4_bypass_data.con == insn);
5958
5959 if (cost < 3)
5960 cost = 3;
5961
5962 sched_cfv4_bypass_data.pro = NULL;
5963 sched_cfv4_bypass_data.con = NULL;
5964 sched_cfv4_bypass_data.scale = 0;
5965 }
5966 else
5967 gcc_assert (sched_cfv4_bypass_data.pro == NULL
5968 && sched_cfv4_bypass_data.con == NULL
5969 && sched_cfv4_bypass_data.scale == 0);
5970
5971 /* Don't try to issue INSN earlier than DFA permits.
5972 This is especially useful for instructions that write to memory,
5973 as their true dependence (default) latency is better to be set to 0
5974 to workaround alias analysis limitations.
5975 This is, in fact, a machine independent tweak, so, probably,
5976 it should be moved to haifa-sched.c: insn_cost (). */
5977 delay = min_insn_conflict_delay (sched_adjust_cost_state, def_insn, insn);
5978 if (delay > cost)
5979 cost = delay;
5980
5981 return cost;
5982 }
5983
5984 /* Return maximal number of insns that can be scheduled on a single cycle. */
5985 static int
5986 m68k_sched_issue_rate (void)
5987 {
5988 switch (m68k_sched_cpu)
5989 {
5990 case CPU_CFV1:
5991 case CPU_CFV2:
5992 case CPU_CFV3:
5993 return 1;
5994
5995 case CPU_CFV4:
5996 return 2;
5997
5998 default:
5999 gcc_unreachable ();
6000 return 0;
6001 }
6002 }
6003
6004 /* Maximal length of instruction for current CPU.
6005 E.g. it is 3 for any ColdFire core. */
6006 static int max_insn_size;
6007
6008 /* Data to model instruction buffer of CPU. */
6009 struct _sched_ib
6010 {
6011 /* True if instruction buffer model is modeled for current CPU. */
6012 bool enabled_p;
6013
6014 /* Size of the instruction buffer in words. */
6015 int size;
6016
6017 /* Number of filled words in the instruction buffer. */
6018 int filled;
6019
6020 /* Additional information about instruction buffer for CPUs that have
6021 a buffer of instruction records, rather then a plain buffer
6022 of instruction words. */
6023 struct _sched_ib_records
6024 {
6025 /* Size of buffer in records. */
6026 int n_insns;
6027
6028 /* Array to hold data on adjustements made to the size of the buffer. */
6029 int *adjust;
6030
6031 /* Index of the above array. */
6032 int adjust_index;
6033 } records;
6034
6035 /* An insn that reserves (marks empty) one word in the instruction buffer. */
6036 rtx insn;
6037 };
6038
6039 static struct _sched_ib sched_ib;
6040
6041 /* ID of memory unit. */
6042 static int sched_mem_unit_code;
6043
6044 /* Implementation of the targetm.sched.variable_issue () hook.
6045 It is called after INSN was issued. It returns the number of insns
6046 that can possibly get scheduled on the current cycle.
6047 It is used here to determine the effect of INSN on the instruction
6048 buffer. */
6049 static int
6050 m68k_sched_variable_issue (FILE *sched_dump ATTRIBUTE_UNUSED,
6051 int sched_verbose ATTRIBUTE_UNUSED,
6052 rtx insn, int can_issue_more)
6053 {
6054 int insn_size;
6055
6056 if (recog_memoized (insn) >= 0 && get_attr_type (insn) != TYPE_IGNORE)
6057 {
6058 switch (m68k_sched_cpu)
6059 {
6060 case CPU_CFV1:
6061 case CPU_CFV2:
6062 insn_size = sched_get_attr_size_int (insn);
6063 break;
6064
6065 case CPU_CFV3:
6066 insn_size = sched_get_attr_size_int (insn);
6067
6068 /* ColdFire V3 and V4 cores have instruction buffers that can
6069 accumulate up to 8 instructions regardless of instructions'
6070 sizes. So we should take care not to "prefetch" 24 one-word
6071 or 12 two-words instructions.
6072 To model this behavior we temporarily decrease size of the
6073 buffer by (max_insn_size - insn_size) for next 7 instructions. */
6074 {
6075 int adjust;
6076
6077 adjust = max_insn_size - insn_size;
6078 sched_ib.size -= adjust;
6079
6080 if (sched_ib.filled > sched_ib.size)
6081 sched_ib.filled = sched_ib.size;
6082
6083 sched_ib.records.adjust[sched_ib.records.adjust_index] = adjust;
6084 }
6085
6086 ++sched_ib.records.adjust_index;
6087 if (sched_ib.records.adjust_index == sched_ib.records.n_insns)
6088 sched_ib.records.adjust_index = 0;
6089
6090 /* Undo adjustement we did 7 instructions ago. */
6091 sched_ib.size
6092 += sched_ib.records.adjust[sched_ib.records.adjust_index];
6093
6094 break;
6095
6096 case CPU_CFV4:
6097 gcc_assert (!sched_ib.enabled_p);
6098 insn_size = 0;
6099 break;
6100
6101 default:
6102 gcc_unreachable ();
6103 }
6104
6105 if (insn_size > sched_ib.filled)
6106 /* Scheduling for register pressure does not always take DFA into
6107 account. Workaround instruction buffer not being filled enough. */
6108 {
6109 gcc_assert (sched_pressure_p);
6110 insn_size = sched_ib.filled;
6111 }
6112
6113 --can_issue_more;
6114 }
6115 else if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6116 || asm_noperands (PATTERN (insn)) >= 0)
6117 insn_size = sched_ib.filled;
6118 else
6119 insn_size = 0;
6120
6121 sched_ib.filled -= insn_size;
6122
6123 return can_issue_more;
6124 }
6125
6126 /* Return how many instructions should scheduler lookahead to choose the
6127 best one. */
6128 static int
6129 m68k_sched_first_cycle_multipass_dfa_lookahead (void)
6130 {
6131 return m68k_sched_issue_rate () - 1;
6132 }
6133
6134 /* Implementation of targetm.sched.init_global () hook.
6135 It is invoked once per scheduling pass and is used here
6136 to initialize scheduler constants. */
6137 static void
6138 m68k_sched_md_init_global (FILE *sched_dump ATTRIBUTE_UNUSED,
6139 int sched_verbose ATTRIBUTE_UNUSED,
6140 int n_insns ATTRIBUTE_UNUSED)
6141 {
6142 /* Init branch types. */
6143 {
6144 rtx insn;
6145
6146 sched_branch_type = XCNEWVEC (enum attr_type, get_max_uid () + 1);
6147
6148 for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
6149 {
6150 if (JUMP_P (insn))
6151 /* !!! FIXME: Implement real scan here. */
6152 sched_branch_type[INSN_UID (insn)] = TYPE_BCC;
6153 }
6154 }
6155
6156 #ifdef ENABLE_CHECKING
6157 /* Check that all instructions have DFA reservations and
6158 that all instructions can be issued from a clean state. */
6159 {
6160 rtx insn;
6161 state_t state;
6162
6163 state = alloca (state_size ());
6164
6165 for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
6166 {
6167 if (INSN_P (insn) && recog_memoized (insn) >= 0)
6168 {
6169 gcc_assert (insn_has_dfa_reservation_p (insn));
6170
6171 state_reset (state);
6172 if (state_transition (state, insn) >= 0)
6173 gcc_unreachable ();
6174 }
6175 }
6176 }
6177 #endif
6178
6179 /* Setup target cpu. */
6180
6181 /* ColdFire V4 has a set of features to keep its instruction buffer full
6182 (e.g., a separate memory bus for instructions) and, hence, we do not model
6183 buffer for this CPU. */
6184 sched_ib.enabled_p = (m68k_sched_cpu != CPU_CFV4);
6185
6186 switch (m68k_sched_cpu)
6187 {
6188 case CPU_CFV4:
6189 sched_ib.filled = 0;
6190
6191 /* FALLTHRU */
6192
6193 case CPU_CFV1:
6194 case CPU_CFV2:
6195 max_insn_size = 3;
6196 sched_ib.records.n_insns = 0;
6197 sched_ib.records.adjust = NULL;
6198 break;
6199
6200 case CPU_CFV3:
6201 max_insn_size = 3;
6202 sched_ib.records.n_insns = 8;
6203 sched_ib.records.adjust = XNEWVEC (int, sched_ib.records.n_insns);
6204 break;
6205
6206 default:
6207 gcc_unreachable ();
6208 }
6209
6210 sched_mem_unit_code = get_cpu_unit_code ("cf_mem1");
6211
6212 sched_adjust_cost_state = xmalloc (state_size ());
6213 state_reset (sched_adjust_cost_state);
6214
6215 start_sequence ();
6216 emit_insn (gen_ib ());
6217 sched_ib.insn = get_insns ();
6218 end_sequence ();
6219 }
6220
6221 /* Scheduling pass is now finished. Free/reset static variables. */
6222 static void
6223 m68k_sched_md_finish_global (FILE *dump ATTRIBUTE_UNUSED,
6224 int verbose ATTRIBUTE_UNUSED)
6225 {
6226 sched_ib.insn = NULL;
6227
6228 free (sched_adjust_cost_state);
6229 sched_adjust_cost_state = NULL;
6230
6231 sched_mem_unit_code = 0;
6232
6233 free (sched_ib.records.adjust);
6234 sched_ib.records.adjust = NULL;
6235 sched_ib.records.n_insns = 0;
6236 max_insn_size = 0;
6237
6238 free (sched_branch_type);
6239 sched_branch_type = NULL;
6240 }
6241
6242 /* Implementation of targetm.sched.init () hook.
6243 It is invoked each time scheduler starts on the new block (basic block or
6244 extended basic block). */
6245 static void
6246 m68k_sched_md_init (FILE *sched_dump ATTRIBUTE_UNUSED,
6247 int sched_verbose ATTRIBUTE_UNUSED,
6248 int n_insns ATTRIBUTE_UNUSED)
6249 {
6250 switch (m68k_sched_cpu)
6251 {
6252 case CPU_CFV1:
6253 case CPU_CFV2:
6254 sched_ib.size = 6;
6255 break;
6256
6257 case CPU_CFV3:
6258 sched_ib.size = sched_ib.records.n_insns * max_insn_size;
6259
6260 memset (sched_ib.records.adjust, 0,
6261 sched_ib.records.n_insns * sizeof (*sched_ib.records.adjust));
6262 sched_ib.records.adjust_index = 0;
6263 break;
6264
6265 case CPU_CFV4:
6266 gcc_assert (!sched_ib.enabled_p);
6267 sched_ib.size = 0;
6268 break;
6269
6270 default:
6271 gcc_unreachable ();
6272 }
6273
6274 if (sched_ib.enabled_p)
6275 /* haifa-sched.c: schedule_block () calls advance_cycle () just before
6276 the first cycle. Workaround that. */
6277 sched_ib.filled = -2;
6278 }
6279
6280 /* Implementation of targetm.sched.dfa_pre_advance_cycle () hook.
6281 It is invoked just before current cycle finishes and is used here
6282 to track if instruction buffer got its two words this cycle. */
6283 static void
6284 m68k_sched_dfa_pre_advance_cycle (void)
6285 {
6286 if (!sched_ib.enabled_p)
6287 return;
6288
6289 if (!cpu_unit_reservation_p (curr_state, sched_mem_unit_code))
6290 {
6291 sched_ib.filled += 2;
6292
6293 if (sched_ib.filled > sched_ib.size)
6294 sched_ib.filled = sched_ib.size;
6295 }
6296 }
6297
6298 /* Implementation of targetm.sched.dfa_post_advance_cycle () hook.
6299 It is invoked just after new cycle begins and is used here
6300 to setup number of filled words in the instruction buffer so that
6301 instructions which won't have all their words prefetched would be
6302 stalled for a cycle. */
6303 static void
6304 m68k_sched_dfa_post_advance_cycle (void)
6305 {
6306 int i;
6307
6308 if (!sched_ib.enabled_p)
6309 return;
6310
6311 /* Setup number of prefetched instruction words in the instruction
6312 buffer. */
6313 i = max_insn_size - sched_ib.filled;
6314
6315 while (--i >= 0)
6316 {
6317 if (state_transition (curr_state, sched_ib.insn) >= 0)
6318 gcc_unreachable ();
6319 }
6320 }
6321
6322 /* Return X or Y (depending on OPX_P) operand of INSN,
6323 if it is an integer register, or NULL overwise. */
6324 static rtx
6325 sched_get_reg_operand (rtx insn, bool opx_p)
6326 {
6327 rtx op = NULL;
6328
6329 if (opx_p)
6330 {
6331 if (get_attr_opx_type (insn) == OPX_TYPE_RN)
6332 {
6333 op = sched_get_operand (insn, true);
6334 gcc_assert (op != NULL);
6335
6336 if (!reload_completed && !REG_P (op))
6337 return NULL;
6338 }
6339 }
6340 else
6341 {
6342 if (get_attr_opy_type (insn) == OPY_TYPE_RN)
6343 {
6344 op = sched_get_operand (insn, false);
6345 gcc_assert (op != NULL);
6346
6347 if (!reload_completed && !REG_P (op))
6348 return NULL;
6349 }
6350 }
6351
6352 return op;
6353 }
6354
6355 /* Return true, if X or Y (depending on OPX_P) operand of INSN
6356 is a MEM. */
6357 static bool
6358 sched_mem_operand_p (rtx insn, bool opx_p)
6359 {
6360 switch (sched_get_opxy_mem_type (insn, opx_p))
6361 {
6362 case OP_TYPE_MEM1:
6363 case OP_TYPE_MEM6:
6364 return true;
6365
6366 default:
6367 return false;
6368 }
6369 }
6370
6371 /* Return X or Y (depending on OPX_P) operand of INSN,
6372 if it is a MEM, or NULL overwise. */
6373 static rtx
6374 sched_get_mem_operand (rtx insn, bool must_read_p, bool must_write_p)
6375 {
6376 bool opx_p;
6377 bool opy_p;
6378
6379 opx_p = false;
6380 opy_p = false;
6381
6382 if (must_read_p)
6383 {
6384 opx_p = true;
6385 opy_p = true;
6386 }
6387
6388 if (must_write_p)
6389 {
6390 opx_p = true;
6391 opy_p = false;
6392 }
6393
6394 if (opy_p && sched_mem_operand_p (insn, false))
6395 return sched_get_operand (insn, false);
6396
6397 if (opx_p && sched_mem_operand_p (insn, true))
6398 return sched_get_operand (insn, true);
6399
6400 gcc_unreachable ();
6401 return NULL;
6402 }
6403
6404 /* Return non-zero if PRO modifies register used as part of
6405 address in CON. */
6406 int
6407 m68k_sched_address_bypass_p (rtx pro, rtx con)
6408 {
6409 rtx pro_x;
6410 rtx con_mem_read;
6411
6412 pro_x = sched_get_reg_operand (pro, true);
6413 if (pro_x == NULL)
6414 return 0;
6415
6416 con_mem_read = sched_get_mem_operand (con, true, false);
6417 gcc_assert (con_mem_read != NULL);
6418
6419 if (reg_mentioned_p (pro_x, con_mem_read))
6420 return 1;
6421
6422 return 0;
6423 }
6424
6425 /* Helper function for m68k_sched_indexed_address_bypass_p.
6426 if PRO modifies register used as index in CON,
6427 return scale of indexed memory access in CON. Return zero overwise. */
6428 static int
6429 sched_get_indexed_address_scale (rtx pro, rtx con)
6430 {
6431 rtx reg;
6432 rtx mem;
6433 struct m68k_address address;
6434
6435 reg = sched_get_reg_operand (pro, true);
6436 if (reg == NULL)
6437 return 0;
6438
6439 mem = sched_get_mem_operand (con, true, false);
6440 gcc_assert (mem != NULL && MEM_P (mem));
6441
6442 if (!m68k_decompose_address (GET_MODE (mem), XEXP (mem, 0), reload_completed,
6443 &address))
6444 gcc_unreachable ();
6445
6446 if (REGNO (reg) == REGNO (address.index))
6447 {
6448 gcc_assert (address.scale != 0);
6449 return address.scale;
6450 }
6451
6452 return 0;
6453 }
6454
6455 /* Return non-zero if PRO modifies register used
6456 as index with scale 2 or 4 in CON. */
6457 int
6458 m68k_sched_indexed_address_bypass_p (rtx pro, rtx con)
6459 {
6460 gcc_assert (sched_cfv4_bypass_data.pro == NULL
6461 && sched_cfv4_bypass_data.con == NULL
6462 && sched_cfv4_bypass_data.scale == 0);
6463
6464 switch (sched_get_indexed_address_scale (pro, con))
6465 {
6466 case 1:
6467 /* We can't have a variable latency bypass, so
6468 remember to adjust the insn cost in adjust_cost hook. */
6469 sched_cfv4_bypass_data.pro = pro;
6470 sched_cfv4_bypass_data.con = con;
6471 sched_cfv4_bypass_data.scale = 1;
6472 return 0;
6473
6474 case 2:
6475 case 4:
6476 return 1;
6477
6478 default:
6479 return 0;
6480 }
6481 }
6482
6483 /* We generate a two-instructions program at M_TRAMP :
6484 movea.l &CHAIN_VALUE,%a0
6485 jmp FNADDR
6486 where %a0 can be modified by changing STATIC_CHAIN_REGNUM. */
6487
6488 static void
6489 m68k_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
6490 {
6491 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
6492 rtx mem;
6493
6494 gcc_assert (ADDRESS_REGNO_P (STATIC_CHAIN_REGNUM));
6495
6496 mem = adjust_address (m_tramp, HImode, 0);
6497 emit_move_insn (mem, GEN_INT(0x207C + ((STATIC_CHAIN_REGNUM-8) << 9)));
6498 mem = adjust_address (m_tramp, SImode, 2);
6499 emit_move_insn (mem, chain_value);
6500
6501 mem = adjust_address (m_tramp, HImode, 6);
6502 emit_move_insn (mem, GEN_INT(0x4EF9));
6503 mem = adjust_address (m_tramp, SImode, 8);
6504 emit_move_insn (mem, fnaddr);
6505
6506 FINALIZE_TRAMPOLINE (XEXP (m_tramp, 0));
6507 }
6508
6509 /* On the 68000, the RTS insn cannot pop anything.
6510 On the 68010, the RTD insn may be used to pop them if the number
6511 of args is fixed, but if the number is variable then the caller
6512 must pop them all. RTD can't be used for library calls now
6513 because the library is compiled with the Unix compiler.
6514 Use of RTD is a selectable option, since it is incompatible with
6515 standard Unix calling sequences. If the option is not selected,
6516 the caller must always pop the args. */
6517
6518 static int
6519 m68k_return_pops_args (tree fundecl, tree funtype, int size)
6520 {
6521 return ((TARGET_RTD
6522 && (!fundecl
6523 || TREE_CODE (fundecl) != IDENTIFIER_NODE)
6524 && (!stdarg_p (funtype)))
6525 ? size : 0);
6526 }
6527
6528 /* Make sure everything's fine if we *don't* have a given processor.
6529 This assumes that putting a register in fixed_regs will keep the
6530 compiler's mitts completely off it. We don't bother to zero it out
6531 of register classes. */
6532
6533 static void
6534 m68k_conditional_register_usage (void)
6535 {
6536 int i;
6537 HARD_REG_SET x;
6538 if (!TARGET_HARD_FLOAT)
6539 {
6540 COPY_HARD_REG_SET (x, reg_class_contents[(int)FP_REGS]);
6541 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
6542 if (TEST_HARD_REG_BIT (x, i))
6543 fixed_regs[i] = call_used_regs[i] = 1;
6544 }
6545 if (flag_pic)
6546 fixed_regs[PIC_REG] = call_used_regs[PIC_REG] = 1;
6547 }
6548
6549 #include "gt-m68k.h"