]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/m68k/m68k.c
re PR middle-end/46500 (target.h includes tm.h)
[thirdparty/gcc.git] / gcc / config / m68k / m68k.c
1 /* Subroutines for insn-output.c for Motorola 68000 family.
2 Copyright (C) 1987, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "tree.h"
27 #include "rtl.h"
28 #include "function.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "conditions.h"
33 #include "output.h"
34 #include "insn-attr.h"
35 #include "recog.h"
36 #include "diagnostic-core.h"
37 #include "expr.h"
38 #include "reload.h"
39 #include "tm_p.h"
40 #include "target.h"
41 #include "target-def.h"
42 #include "debug.h"
43 #include "flags.h"
44 #include "df.h"
45 /* ??? Need to add a dependency between m68k.o and sched-int.h. */
46 #include "sched-int.h"
47 #include "insn-codes.h"
48 #include "ggc.h"
49 #include "opts.h"
50
51 enum reg_class regno_reg_class[] =
52 {
53 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
54 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
55 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
56 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
57 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
58 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
59 ADDR_REGS
60 };
61
62
63 /* The minimum number of integer registers that we want to save with the
64 movem instruction. Using two movel instructions instead of a single
65 moveml is about 15% faster for the 68020 and 68030 at no expense in
66 code size. */
67 #define MIN_MOVEM_REGS 3
68
69 /* The minimum number of floating point registers that we want to save
70 with the fmovem instruction. */
71 #define MIN_FMOVEM_REGS 1
72
73 /* Structure describing stack frame layout. */
74 struct m68k_frame
75 {
76 /* Stack pointer to frame pointer offset. */
77 HOST_WIDE_INT offset;
78
79 /* Offset of FPU registers. */
80 HOST_WIDE_INT foffset;
81
82 /* Frame size in bytes (rounded up). */
83 HOST_WIDE_INT size;
84
85 /* Data and address register. */
86 int reg_no;
87 unsigned int reg_mask;
88
89 /* FPU registers. */
90 int fpu_no;
91 unsigned int fpu_mask;
92
93 /* Offsets relative to ARG_POINTER. */
94 HOST_WIDE_INT frame_pointer_offset;
95 HOST_WIDE_INT stack_pointer_offset;
96
97 /* Function which the above information refers to. */
98 int funcdef_no;
99 };
100
101 /* Current frame information calculated by m68k_compute_frame_layout(). */
102 static struct m68k_frame current_frame;
103
104 /* Structure describing an m68k address.
105
106 If CODE is UNKNOWN, the address is BASE + INDEX * SCALE + OFFSET,
107 with null fields evaluating to 0. Here:
108
109 - BASE satisfies m68k_legitimate_base_reg_p
110 - INDEX satisfies m68k_legitimate_index_reg_p
111 - OFFSET satisfies m68k_legitimate_constant_address_p
112
113 INDEX is either HImode or SImode. The other fields are SImode.
114
115 If CODE is PRE_DEC, the address is -(BASE). If CODE is POST_INC,
116 the address is (BASE)+. */
117 struct m68k_address {
118 enum rtx_code code;
119 rtx base;
120 rtx index;
121 rtx offset;
122 int scale;
123 };
124
125 static int m68k_sched_adjust_cost (rtx, rtx, rtx, int);
126 static int m68k_sched_issue_rate (void);
127 static int m68k_sched_variable_issue (FILE *, int, rtx, int);
128 static void m68k_sched_md_init_global (FILE *, int, int);
129 static void m68k_sched_md_finish_global (FILE *, int);
130 static void m68k_sched_md_init (FILE *, int, int);
131 static void m68k_sched_dfa_pre_advance_cycle (void);
132 static void m68k_sched_dfa_post_advance_cycle (void);
133 static int m68k_sched_first_cycle_multipass_dfa_lookahead (void);
134
135 static bool m68k_can_eliminate (const int, const int);
136 static void m68k_conditional_register_usage (void);
137 static bool m68k_legitimate_address_p (enum machine_mode, rtx, bool);
138 static void m68k_option_override (void);
139 static rtx find_addr_reg (rtx);
140 static const char *singlemove_string (rtx *);
141 static void m68k_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
142 HOST_WIDE_INT, tree);
143 static rtx m68k_struct_value_rtx (tree, int);
144 static tree m68k_handle_fndecl_attribute (tree *node, tree name,
145 tree args, int flags,
146 bool *no_add_attrs);
147 static void m68k_compute_frame_layout (void);
148 static bool m68k_save_reg (unsigned int regno, bool interrupt_handler);
149 static bool m68k_ok_for_sibcall_p (tree, tree);
150 static bool m68k_tls_symbol_p (rtx);
151 static rtx m68k_legitimize_address (rtx, rtx, enum machine_mode);
152 static bool m68k_rtx_costs (rtx, int, int, int *, bool);
153 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
154 static bool m68k_return_in_memory (const_tree, const_tree);
155 #endif
156 static void m68k_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
157 static void m68k_trampoline_init (rtx, tree, rtx);
158 static int m68k_return_pops_args (tree, tree, int);
159 static rtx m68k_delegitimize_address (rtx);
160 static void m68k_function_arg_advance (cumulative_args_t, enum machine_mode,
161 const_tree, bool);
162 static rtx m68k_function_arg (cumulative_args_t, enum machine_mode,
163 const_tree, bool);
164 static bool m68k_cannot_force_const_mem (enum machine_mode mode, rtx x);
165 \f
166 /* Initialize the GCC target structure. */
167
168 #if INT_OP_GROUP == INT_OP_DOT_WORD
169 #undef TARGET_ASM_ALIGNED_HI_OP
170 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
171 #endif
172
173 #if INT_OP_GROUP == INT_OP_NO_DOT
174 #undef TARGET_ASM_BYTE_OP
175 #define TARGET_ASM_BYTE_OP "\tbyte\t"
176 #undef TARGET_ASM_ALIGNED_HI_OP
177 #define TARGET_ASM_ALIGNED_HI_OP "\tshort\t"
178 #undef TARGET_ASM_ALIGNED_SI_OP
179 #define TARGET_ASM_ALIGNED_SI_OP "\tlong\t"
180 #endif
181
182 #if INT_OP_GROUP == INT_OP_DC
183 #undef TARGET_ASM_BYTE_OP
184 #define TARGET_ASM_BYTE_OP "\tdc.b\t"
185 #undef TARGET_ASM_ALIGNED_HI_OP
186 #define TARGET_ASM_ALIGNED_HI_OP "\tdc.w\t"
187 #undef TARGET_ASM_ALIGNED_SI_OP
188 #define TARGET_ASM_ALIGNED_SI_OP "\tdc.l\t"
189 #endif
190
191 #undef TARGET_ASM_UNALIGNED_HI_OP
192 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
193 #undef TARGET_ASM_UNALIGNED_SI_OP
194 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
195
196 #undef TARGET_ASM_OUTPUT_MI_THUNK
197 #define TARGET_ASM_OUTPUT_MI_THUNK m68k_output_mi_thunk
198 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
199 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
200
201 #undef TARGET_ASM_FILE_START_APP_OFF
202 #define TARGET_ASM_FILE_START_APP_OFF true
203
204 #undef TARGET_LEGITIMIZE_ADDRESS
205 #define TARGET_LEGITIMIZE_ADDRESS m68k_legitimize_address
206
207 #undef TARGET_SCHED_ADJUST_COST
208 #define TARGET_SCHED_ADJUST_COST m68k_sched_adjust_cost
209
210 #undef TARGET_SCHED_ISSUE_RATE
211 #define TARGET_SCHED_ISSUE_RATE m68k_sched_issue_rate
212
213 #undef TARGET_SCHED_VARIABLE_ISSUE
214 #define TARGET_SCHED_VARIABLE_ISSUE m68k_sched_variable_issue
215
216 #undef TARGET_SCHED_INIT_GLOBAL
217 #define TARGET_SCHED_INIT_GLOBAL m68k_sched_md_init_global
218
219 #undef TARGET_SCHED_FINISH_GLOBAL
220 #define TARGET_SCHED_FINISH_GLOBAL m68k_sched_md_finish_global
221
222 #undef TARGET_SCHED_INIT
223 #define TARGET_SCHED_INIT m68k_sched_md_init
224
225 #undef TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE
226 #define TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE m68k_sched_dfa_pre_advance_cycle
227
228 #undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
229 #define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE m68k_sched_dfa_post_advance_cycle
230
231 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
232 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
233 m68k_sched_first_cycle_multipass_dfa_lookahead
234
235 #undef TARGET_OPTION_OVERRIDE
236 #define TARGET_OPTION_OVERRIDE m68k_option_override
237
238 #undef TARGET_RTX_COSTS
239 #define TARGET_RTX_COSTS m68k_rtx_costs
240
241 #undef TARGET_ATTRIBUTE_TABLE
242 #define TARGET_ATTRIBUTE_TABLE m68k_attribute_table
243
244 #undef TARGET_PROMOTE_PROTOTYPES
245 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
246
247 #undef TARGET_STRUCT_VALUE_RTX
248 #define TARGET_STRUCT_VALUE_RTX m68k_struct_value_rtx
249
250 #undef TARGET_CANNOT_FORCE_CONST_MEM
251 #define TARGET_CANNOT_FORCE_CONST_MEM m68k_cannot_force_const_mem
252
253 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
254 #define TARGET_FUNCTION_OK_FOR_SIBCALL m68k_ok_for_sibcall_p
255
256 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
257 #undef TARGET_RETURN_IN_MEMORY
258 #define TARGET_RETURN_IN_MEMORY m68k_return_in_memory
259 #endif
260
261 #ifdef HAVE_AS_TLS
262 #undef TARGET_HAVE_TLS
263 #define TARGET_HAVE_TLS (true)
264
265 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
266 #define TARGET_ASM_OUTPUT_DWARF_DTPREL m68k_output_dwarf_dtprel
267 #endif
268
269 #undef TARGET_LEGITIMATE_ADDRESS_P
270 #define TARGET_LEGITIMATE_ADDRESS_P m68k_legitimate_address_p
271
272 #undef TARGET_CAN_ELIMINATE
273 #define TARGET_CAN_ELIMINATE m68k_can_eliminate
274
275 #undef TARGET_CONDITIONAL_REGISTER_USAGE
276 #define TARGET_CONDITIONAL_REGISTER_USAGE m68k_conditional_register_usage
277
278 #undef TARGET_TRAMPOLINE_INIT
279 #define TARGET_TRAMPOLINE_INIT m68k_trampoline_init
280
281 #undef TARGET_RETURN_POPS_ARGS
282 #define TARGET_RETURN_POPS_ARGS m68k_return_pops_args
283
284 #undef TARGET_DELEGITIMIZE_ADDRESS
285 #define TARGET_DELEGITIMIZE_ADDRESS m68k_delegitimize_address
286
287 #undef TARGET_FUNCTION_ARG
288 #define TARGET_FUNCTION_ARG m68k_function_arg
289
290 #undef TARGET_FUNCTION_ARG_ADVANCE
291 #define TARGET_FUNCTION_ARG_ADVANCE m68k_function_arg_advance
292
293 #undef TARGET_LEGITIMATE_CONSTANT_P
294 #define TARGET_LEGITIMATE_CONSTANT_P m68k_legitimate_constant_p
295
296 static const struct attribute_spec m68k_attribute_table[] =
297 {
298 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
299 affects_type_identity } */
300 { "interrupt", 0, 0, true, false, false, m68k_handle_fndecl_attribute,
301 false },
302 { "interrupt_handler", 0, 0, true, false, false,
303 m68k_handle_fndecl_attribute, false },
304 { "interrupt_thread", 0, 0, true, false, false,
305 m68k_handle_fndecl_attribute, false },
306 { NULL, 0, 0, false, false, false, NULL, false }
307 };
308
309 struct gcc_target targetm = TARGET_INITIALIZER;
310 \f
311 /* Base flags for 68k ISAs. */
312 #define FL_FOR_isa_00 FL_ISA_68000
313 #define FL_FOR_isa_10 (FL_FOR_isa_00 | FL_ISA_68010)
314 /* FL_68881 controls the default setting of -m68881. gcc has traditionally
315 generated 68881 code for 68020 and 68030 targets unless explicitly told
316 not to. */
317 #define FL_FOR_isa_20 (FL_FOR_isa_10 | FL_ISA_68020 \
318 | FL_BITFIELD | FL_68881)
319 #define FL_FOR_isa_40 (FL_FOR_isa_20 | FL_ISA_68040)
320 #define FL_FOR_isa_cpu32 (FL_FOR_isa_10 | FL_ISA_68020)
321
322 /* Base flags for ColdFire ISAs. */
323 #define FL_FOR_isa_a (FL_COLDFIRE | FL_ISA_A)
324 #define FL_FOR_isa_aplus (FL_FOR_isa_a | FL_ISA_APLUS | FL_CF_USP)
325 /* Note ISA_B doesn't necessarily include USP (user stack pointer) support. */
326 #define FL_FOR_isa_b (FL_FOR_isa_a | FL_ISA_B | FL_CF_HWDIV)
327 /* ISA_C is not upwardly compatible with ISA_B. */
328 #define FL_FOR_isa_c (FL_FOR_isa_a | FL_ISA_C | FL_CF_USP)
329
330 enum m68k_isa
331 {
332 /* Traditional 68000 instruction sets. */
333 isa_00,
334 isa_10,
335 isa_20,
336 isa_40,
337 isa_cpu32,
338 /* ColdFire instruction set variants. */
339 isa_a,
340 isa_aplus,
341 isa_b,
342 isa_c,
343 isa_max
344 };
345
346 /* Information about one of the -march, -mcpu or -mtune arguments. */
347 struct m68k_target_selection
348 {
349 /* The argument being described. */
350 const char *name;
351
352 /* For -mcpu, this is the device selected by the option.
353 For -mtune and -march, it is a representative device
354 for the microarchitecture or ISA respectively. */
355 enum target_device device;
356
357 /* The M68K_DEVICE fields associated with DEVICE. See the comment
358 in m68k-devices.def for details. FAMILY is only valid for -mcpu. */
359 const char *family;
360 enum uarch_type microarch;
361 enum m68k_isa isa;
362 unsigned long flags;
363 };
364
365 /* A list of all devices in m68k-devices.def. Used for -mcpu selection. */
366 static const struct m68k_target_selection all_devices[] =
367 {
368 #define M68K_DEVICE(NAME,ENUM_VALUE,FAMILY,MULTILIB,MICROARCH,ISA,FLAGS) \
369 { NAME, ENUM_VALUE, FAMILY, u##MICROARCH, ISA, FLAGS | FL_FOR_##ISA },
370 #include "m68k-devices.def"
371 #undef M68K_DEVICE
372 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
373 };
374
375 /* A list of all ISAs, mapping each one to a representative device.
376 Used for -march selection. */
377 static const struct m68k_target_selection all_isas[] =
378 {
379 #define M68K_ISA(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
380 { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
381 #include "m68k-isas.def"
382 #undef M68K_ISA
383 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
384 };
385
386 /* A list of all microarchitectures, mapping each one to a representative
387 device. Used for -mtune selection. */
388 static const struct m68k_target_selection all_microarchs[] =
389 {
390 #define M68K_MICROARCH(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
391 { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
392 #include "m68k-microarchs.def"
393 #undef M68K_MICROARCH
394 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
395 };
396 \f
397 /* The entries associated with the -mcpu, -march and -mtune settings,
398 or null for options that have not been used. */
399 const struct m68k_target_selection *m68k_cpu_entry;
400 const struct m68k_target_selection *m68k_arch_entry;
401 const struct m68k_target_selection *m68k_tune_entry;
402
403 /* Which CPU we are generating code for. */
404 enum target_device m68k_cpu;
405
406 /* Which microarchitecture to tune for. */
407 enum uarch_type m68k_tune;
408
409 /* Which FPU to use. */
410 enum fpu_type m68k_fpu;
411
412 /* The set of FL_* flags that apply to the target processor. */
413 unsigned int m68k_cpu_flags;
414
415 /* The set of FL_* flags that apply to the processor to be tuned for. */
416 unsigned int m68k_tune_flags;
417
418 /* Asm templates for calling or jumping to an arbitrary symbolic address,
419 or NULL if such calls or jumps are not supported. The address is held
420 in operand 0. */
421 const char *m68k_symbolic_call;
422 const char *m68k_symbolic_jump;
423
424 /* Enum variable that corresponds to m68k_symbolic_call values. */
425 enum M68K_SYMBOLIC_CALL m68k_symbolic_call_var;
426
427 \f
428 /* Implement TARGET_OPTION_OVERRIDE. */
429
430 static void
431 m68k_option_override (void)
432 {
433 const struct m68k_target_selection *entry;
434 unsigned long target_mask;
435
436 if (global_options_set.x_m68k_arch_option)
437 m68k_arch_entry = &all_isas[m68k_arch_option];
438
439 if (global_options_set.x_m68k_cpu_option)
440 m68k_cpu_entry = &all_devices[(int) m68k_cpu_option];
441
442 if (global_options_set.x_m68k_tune_option)
443 m68k_tune_entry = &all_microarchs[(int) m68k_tune_option];
444
445 /* User can choose:
446
447 -mcpu=
448 -march=
449 -mtune=
450
451 -march=ARCH should generate code that runs any processor
452 implementing architecture ARCH. -mcpu=CPU should override -march
453 and should generate code that runs on processor CPU, making free
454 use of any instructions that CPU understands. -mtune=UARCH applies
455 on top of -mcpu or -march and optimizes the code for UARCH. It does
456 not change the target architecture. */
457 if (m68k_cpu_entry)
458 {
459 /* Complain if the -march setting is for a different microarchitecture,
460 or includes flags that the -mcpu setting doesn't. */
461 if (m68k_arch_entry
462 && (m68k_arch_entry->microarch != m68k_cpu_entry->microarch
463 || (m68k_arch_entry->flags & ~m68k_cpu_entry->flags) != 0))
464 warning (0, "-mcpu=%s conflicts with -march=%s",
465 m68k_cpu_entry->name, m68k_arch_entry->name);
466
467 entry = m68k_cpu_entry;
468 }
469 else
470 entry = m68k_arch_entry;
471
472 if (!entry)
473 entry = all_devices + TARGET_CPU_DEFAULT;
474
475 m68k_cpu_flags = entry->flags;
476
477 /* Use the architecture setting to derive default values for
478 certain flags. */
479 target_mask = 0;
480
481 /* ColdFire is lenient about alignment. */
482 if (!TARGET_COLDFIRE)
483 target_mask |= MASK_STRICT_ALIGNMENT;
484
485 if ((m68k_cpu_flags & FL_BITFIELD) != 0)
486 target_mask |= MASK_BITFIELD;
487 if ((m68k_cpu_flags & FL_CF_HWDIV) != 0)
488 target_mask |= MASK_CF_HWDIV;
489 if ((m68k_cpu_flags & (FL_68881 | FL_CF_FPU)) != 0)
490 target_mask |= MASK_HARD_FLOAT;
491 target_flags |= target_mask & ~target_flags_explicit;
492
493 /* Set the directly-usable versions of the -mcpu and -mtune settings. */
494 m68k_cpu = entry->device;
495 if (m68k_tune_entry)
496 {
497 m68k_tune = m68k_tune_entry->microarch;
498 m68k_tune_flags = m68k_tune_entry->flags;
499 }
500 #ifdef M68K_DEFAULT_TUNE
501 else if (!m68k_cpu_entry && !m68k_arch_entry)
502 {
503 enum target_device dev;
504 dev = all_microarchs[M68K_DEFAULT_TUNE].device;
505 m68k_tune_flags = all_devices[dev]->flags;
506 }
507 #endif
508 else
509 {
510 m68k_tune = entry->microarch;
511 m68k_tune_flags = entry->flags;
512 }
513
514 /* Set the type of FPU. */
515 m68k_fpu = (!TARGET_HARD_FLOAT ? FPUTYPE_NONE
516 : (m68k_cpu_flags & FL_COLDFIRE) != 0 ? FPUTYPE_COLDFIRE
517 : FPUTYPE_68881);
518
519 /* Sanity check to ensure that msep-data and mid-sahred-library are not
520 * both specified together. Doing so simply doesn't make sense.
521 */
522 if (TARGET_SEP_DATA && TARGET_ID_SHARED_LIBRARY)
523 error ("cannot specify both -msep-data and -mid-shared-library");
524
525 /* If we're generating code for a separate A5 relative data segment,
526 * we've got to enable -fPIC as well. This might be relaxable to
527 * -fpic but it hasn't been tested properly.
528 */
529 if (TARGET_SEP_DATA || TARGET_ID_SHARED_LIBRARY)
530 flag_pic = 2;
531
532 /* -mpcrel -fPIC uses 32-bit pc-relative displacements. Raise an
533 error if the target does not support them. */
534 if (TARGET_PCREL && !TARGET_68020 && flag_pic == 2)
535 error ("-mpcrel -fPIC is not currently supported on selected cpu");
536
537 /* ??? A historic way of turning on pic, or is this intended to
538 be an embedded thing that doesn't have the same name binding
539 significance that it does on hosted ELF systems? */
540 if (TARGET_PCREL && flag_pic == 0)
541 flag_pic = 1;
542
543 if (!flag_pic)
544 {
545 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_JSR;
546
547 m68k_symbolic_jump = "jra %a0";
548 }
549 else if (TARGET_ID_SHARED_LIBRARY)
550 /* All addresses must be loaded from the GOT. */
551 ;
552 else if (TARGET_68020 || TARGET_ISAB || TARGET_ISAC)
553 {
554 if (TARGET_PCREL)
555 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_C;
556 else
557 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_P;
558
559 if (TARGET_ISAC)
560 /* No unconditional long branch */;
561 else if (TARGET_PCREL)
562 m68k_symbolic_jump = "bra%.l %c0";
563 else
564 m68k_symbolic_jump = "bra%.l %p0";
565 /* Turn off function cse if we are doing PIC. We always want
566 function call to be done as `bsr foo@PLTPC'. */
567 /* ??? It's traditional to do this for -mpcrel too, but it isn't
568 clear how intentional that is. */
569 flag_no_function_cse = 1;
570 }
571
572 switch (m68k_symbolic_call_var)
573 {
574 case M68K_SYMBOLIC_CALL_JSR:
575 m68k_symbolic_call = "jsr %a0";
576 break;
577
578 case M68K_SYMBOLIC_CALL_BSR_C:
579 m68k_symbolic_call = "bsr%.l %c0";
580 break;
581
582 case M68K_SYMBOLIC_CALL_BSR_P:
583 m68k_symbolic_call = "bsr%.l %p0";
584 break;
585
586 case M68K_SYMBOLIC_CALL_NONE:
587 gcc_assert (m68k_symbolic_call == NULL);
588 break;
589
590 default:
591 gcc_unreachable ();
592 }
593
594 #ifndef ASM_OUTPUT_ALIGN_WITH_NOP
595 if (align_labels > 2)
596 {
597 warning (0, "-falign-labels=%d is not supported", align_labels);
598 align_labels = 0;
599 }
600 if (align_loops > 2)
601 {
602 warning (0, "-falign-loops=%d is not supported", align_loops);
603 align_loops = 0;
604 }
605 #endif
606
607 SUBTARGET_OVERRIDE_OPTIONS;
608
609 /* Setup scheduling options. */
610 if (TUNE_CFV1)
611 m68k_sched_cpu = CPU_CFV1;
612 else if (TUNE_CFV2)
613 m68k_sched_cpu = CPU_CFV2;
614 else if (TUNE_CFV3)
615 m68k_sched_cpu = CPU_CFV3;
616 else if (TUNE_CFV4)
617 m68k_sched_cpu = CPU_CFV4;
618 else
619 {
620 m68k_sched_cpu = CPU_UNKNOWN;
621 flag_schedule_insns = 0;
622 flag_schedule_insns_after_reload = 0;
623 flag_modulo_sched = 0;
624 }
625
626 if (m68k_sched_cpu != CPU_UNKNOWN)
627 {
628 if ((m68k_cpu_flags & (FL_CF_EMAC | FL_CF_EMAC_B)) != 0)
629 m68k_sched_mac = MAC_CF_EMAC;
630 else if ((m68k_cpu_flags & FL_CF_MAC) != 0)
631 m68k_sched_mac = MAC_CF_MAC;
632 else
633 m68k_sched_mac = MAC_NO;
634 }
635 }
636
637 /* Generate a macro of the form __mPREFIX_cpu_NAME, where PREFIX is the
638 given argument and NAME is the argument passed to -mcpu. Return NULL
639 if -mcpu was not passed. */
640
641 const char *
642 m68k_cpp_cpu_ident (const char *prefix)
643 {
644 if (!m68k_cpu_entry)
645 return NULL;
646 return concat ("__m", prefix, "_cpu_", m68k_cpu_entry->name, NULL);
647 }
648
649 /* Generate a macro of the form __mPREFIX_family_NAME, where PREFIX is the
650 given argument and NAME is the name of the representative device for
651 the -mcpu argument's family. Return NULL if -mcpu was not passed. */
652
653 const char *
654 m68k_cpp_cpu_family (const char *prefix)
655 {
656 if (!m68k_cpu_entry)
657 return NULL;
658 return concat ("__m", prefix, "_family_", m68k_cpu_entry->family, NULL);
659 }
660 \f
661 /* Return m68k_fk_interrupt_handler if FUNC has an "interrupt" or
662 "interrupt_handler" attribute and interrupt_thread if FUNC has an
663 "interrupt_thread" attribute. Otherwise, return
664 m68k_fk_normal_function. */
665
666 enum m68k_function_kind
667 m68k_get_function_kind (tree func)
668 {
669 tree a;
670
671 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
672
673 a = lookup_attribute ("interrupt", DECL_ATTRIBUTES (func));
674 if (a != NULL_TREE)
675 return m68k_fk_interrupt_handler;
676
677 a = lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (func));
678 if (a != NULL_TREE)
679 return m68k_fk_interrupt_handler;
680
681 a = lookup_attribute ("interrupt_thread", DECL_ATTRIBUTES (func));
682 if (a != NULL_TREE)
683 return m68k_fk_interrupt_thread;
684
685 return m68k_fk_normal_function;
686 }
687
688 /* Handle an attribute requiring a FUNCTION_DECL; arguments as in
689 struct attribute_spec.handler. */
690 static tree
691 m68k_handle_fndecl_attribute (tree *node, tree name,
692 tree args ATTRIBUTE_UNUSED,
693 int flags ATTRIBUTE_UNUSED,
694 bool *no_add_attrs)
695 {
696 if (TREE_CODE (*node) != FUNCTION_DECL)
697 {
698 warning (OPT_Wattributes, "%qE attribute only applies to functions",
699 name);
700 *no_add_attrs = true;
701 }
702
703 if (m68k_get_function_kind (*node) != m68k_fk_normal_function)
704 {
705 error ("multiple interrupt attributes not allowed");
706 *no_add_attrs = true;
707 }
708
709 if (!TARGET_FIDOA
710 && !strcmp (IDENTIFIER_POINTER (name), "interrupt_thread"))
711 {
712 error ("interrupt_thread is available only on fido");
713 *no_add_attrs = true;
714 }
715
716 return NULL_TREE;
717 }
718
719 static void
720 m68k_compute_frame_layout (void)
721 {
722 int regno, saved;
723 unsigned int mask;
724 enum m68k_function_kind func_kind =
725 m68k_get_function_kind (current_function_decl);
726 bool interrupt_handler = func_kind == m68k_fk_interrupt_handler;
727 bool interrupt_thread = func_kind == m68k_fk_interrupt_thread;
728
729 /* Only compute the frame once per function.
730 Don't cache information until reload has been completed. */
731 if (current_frame.funcdef_no == current_function_funcdef_no
732 && reload_completed)
733 return;
734
735 current_frame.size = (get_frame_size () + 3) & -4;
736
737 mask = saved = 0;
738
739 /* Interrupt thread does not need to save any register. */
740 if (!interrupt_thread)
741 for (regno = 0; regno < 16; regno++)
742 if (m68k_save_reg (regno, interrupt_handler))
743 {
744 mask |= 1 << (regno - D0_REG);
745 saved++;
746 }
747 current_frame.offset = saved * 4;
748 current_frame.reg_no = saved;
749 current_frame.reg_mask = mask;
750
751 current_frame.foffset = 0;
752 mask = saved = 0;
753 if (TARGET_HARD_FLOAT)
754 {
755 /* Interrupt thread does not need to save any register. */
756 if (!interrupt_thread)
757 for (regno = 16; regno < 24; regno++)
758 if (m68k_save_reg (regno, interrupt_handler))
759 {
760 mask |= 1 << (regno - FP0_REG);
761 saved++;
762 }
763 current_frame.foffset = saved * TARGET_FP_REG_SIZE;
764 current_frame.offset += current_frame.foffset;
765 }
766 current_frame.fpu_no = saved;
767 current_frame.fpu_mask = mask;
768
769 /* Remember what function this frame refers to. */
770 current_frame.funcdef_no = current_function_funcdef_no;
771 }
772
773 /* Worker function for TARGET_CAN_ELIMINATE. */
774
775 bool
776 m68k_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
777 {
778 return (to == STACK_POINTER_REGNUM ? ! frame_pointer_needed : true);
779 }
780
781 HOST_WIDE_INT
782 m68k_initial_elimination_offset (int from, int to)
783 {
784 int argptr_offset;
785 /* The arg pointer points 8 bytes before the start of the arguments,
786 as defined by FIRST_PARM_OFFSET. This makes it coincident with the
787 frame pointer in most frames. */
788 argptr_offset = frame_pointer_needed ? 0 : UNITS_PER_WORD;
789 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
790 return argptr_offset;
791
792 m68k_compute_frame_layout ();
793
794 gcc_assert (to == STACK_POINTER_REGNUM);
795 switch (from)
796 {
797 case ARG_POINTER_REGNUM:
798 return current_frame.offset + current_frame.size - argptr_offset;
799 case FRAME_POINTER_REGNUM:
800 return current_frame.offset + current_frame.size;
801 default:
802 gcc_unreachable ();
803 }
804 }
805
806 /* Refer to the array `regs_ever_live' to determine which registers
807 to save; `regs_ever_live[I]' is nonzero if register number I
808 is ever used in the function. This function is responsible for
809 knowing which registers should not be saved even if used.
810 Return true if we need to save REGNO. */
811
812 static bool
813 m68k_save_reg (unsigned int regno, bool interrupt_handler)
814 {
815 if (flag_pic && regno == PIC_REG)
816 {
817 if (crtl->saves_all_registers)
818 return true;
819 if (crtl->uses_pic_offset_table)
820 return true;
821 /* Reload may introduce constant pool references into a function
822 that thitherto didn't need a PIC register. Note that the test
823 above will not catch that case because we will only set
824 crtl->uses_pic_offset_table when emitting
825 the address reloads. */
826 if (crtl->uses_const_pool)
827 return true;
828 }
829
830 if (crtl->calls_eh_return)
831 {
832 unsigned int i;
833 for (i = 0; ; i++)
834 {
835 unsigned int test = EH_RETURN_DATA_REGNO (i);
836 if (test == INVALID_REGNUM)
837 break;
838 if (test == regno)
839 return true;
840 }
841 }
842
843 /* Fixed regs we never touch. */
844 if (fixed_regs[regno])
845 return false;
846
847 /* The frame pointer (if it is such) is handled specially. */
848 if (regno == FRAME_POINTER_REGNUM && frame_pointer_needed)
849 return false;
850
851 /* Interrupt handlers must also save call_used_regs
852 if they are live or when calling nested functions. */
853 if (interrupt_handler)
854 {
855 if (df_regs_ever_live_p (regno))
856 return true;
857
858 if (!current_function_is_leaf && call_used_regs[regno])
859 return true;
860 }
861
862 /* Never need to save registers that aren't touched. */
863 if (!df_regs_ever_live_p (regno))
864 return false;
865
866 /* Otherwise save everything that isn't call-clobbered. */
867 return !call_used_regs[regno];
868 }
869
870 /* Emit RTL for a MOVEM or FMOVEM instruction. BASE + OFFSET represents
871 the lowest memory address. COUNT is the number of registers to be
872 moved, with register REGNO + I being moved if bit I of MASK is set.
873 STORE_P specifies the direction of the move and ADJUST_STACK_P says
874 whether or not this is pre-decrement (if STORE_P) or post-increment
875 (if !STORE_P) operation. */
876
877 static rtx
878 m68k_emit_movem (rtx base, HOST_WIDE_INT offset,
879 unsigned int count, unsigned int regno,
880 unsigned int mask, bool store_p, bool adjust_stack_p)
881 {
882 int i;
883 rtx body, addr, src, operands[2];
884 enum machine_mode mode;
885
886 body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (adjust_stack_p + count));
887 mode = reg_raw_mode[regno];
888 i = 0;
889
890 if (adjust_stack_p)
891 {
892 src = plus_constant (base, (count
893 * GET_MODE_SIZE (mode)
894 * (HOST_WIDE_INT) (store_p ? -1 : 1)));
895 XVECEXP (body, 0, i++) = gen_rtx_SET (VOIDmode, base, src);
896 }
897
898 for (; mask != 0; mask >>= 1, regno++)
899 if (mask & 1)
900 {
901 addr = plus_constant (base, offset);
902 operands[!store_p] = gen_frame_mem (mode, addr);
903 operands[store_p] = gen_rtx_REG (mode, regno);
904 XVECEXP (body, 0, i++)
905 = gen_rtx_SET (VOIDmode, operands[0], operands[1]);
906 offset += GET_MODE_SIZE (mode);
907 }
908 gcc_assert (i == XVECLEN (body, 0));
909
910 return emit_insn (body);
911 }
912
913 /* Make INSN a frame-related instruction. */
914
915 static void
916 m68k_set_frame_related (rtx insn)
917 {
918 rtx body;
919 int i;
920
921 RTX_FRAME_RELATED_P (insn) = 1;
922 body = PATTERN (insn);
923 if (GET_CODE (body) == PARALLEL)
924 for (i = 0; i < XVECLEN (body, 0); i++)
925 RTX_FRAME_RELATED_P (XVECEXP (body, 0, i)) = 1;
926 }
927
928 /* Emit RTL for the "prologue" define_expand. */
929
930 void
931 m68k_expand_prologue (void)
932 {
933 HOST_WIDE_INT fsize_with_regs;
934 rtx limit, src, dest;
935
936 m68k_compute_frame_layout ();
937
938 if (flag_stack_usage_info)
939 current_function_static_stack_size
940 = current_frame.size + current_frame.offset;
941
942 /* If the stack limit is a symbol, we can check it here,
943 before actually allocating the space. */
944 if (crtl->limit_stack
945 && GET_CODE (stack_limit_rtx) == SYMBOL_REF)
946 {
947 limit = plus_constant (stack_limit_rtx, current_frame.size + 4);
948 if (!m68k_legitimate_constant_p (Pmode, limit))
949 {
950 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), limit);
951 limit = gen_rtx_REG (Pmode, D0_REG);
952 }
953 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode,
954 stack_pointer_rtx, limit),
955 stack_pointer_rtx, limit,
956 const1_rtx));
957 }
958
959 fsize_with_regs = current_frame.size;
960 if (TARGET_COLDFIRE)
961 {
962 /* ColdFire's move multiple instructions do not allow pre-decrement
963 addressing. Add the size of movem saves to the initial stack
964 allocation instead. */
965 if (current_frame.reg_no >= MIN_MOVEM_REGS)
966 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
967 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
968 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
969 }
970
971 if (frame_pointer_needed)
972 {
973 if (fsize_with_regs == 0 && TUNE_68040)
974 {
975 /* On the 68040, two separate moves are faster than link.w 0. */
976 dest = gen_frame_mem (Pmode,
977 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
978 m68k_set_frame_related (emit_move_insn (dest, frame_pointer_rtx));
979 m68k_set_frame_related (emit_move_insn (frame_pointer_rtx,
980 stack_pointer_rtx));
981 }
982 else if (fsize_with_regs < 0x8000 || TARGET_68020)
983 m68k_set_frame_related
984 (emit_insn (gen_link (frame_pointer_rtx,
985 GEN_INT (-4 - fsize_with_regs))));
986 else
987 {
988 m68k_set_frame_related
989 (emit_insn (gen_link (frame_pointer_rtx, GEN_INT (-4))));
990 m68k_set_frame_related
991 (emit_insn (gen_addsi3 (stack_pointer_rtx,
992 stack_pointer_rtx,
993 GEN_INT (-fsize_with_regs))));
994 }
995
996 /* If the frame pointer is needed, emit a special barrier that
997 will prevent the scheduler from moving stores to the frame
998 before the stack adjustment. */
999 emit_insn (gen_stack_tie (stack_pointer_rtx, frame_pointer_rtx));
1000 }
1001 else if (fsize_with_regs != 0)
1002 m68k_set_frame_related
1003 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1004 stack_pointer_rtx,
1005 GEN_INT (-fsize_with_regs))));
1006
1007 if (current_frame.fpu_mask)
1008 {
1009 gcc_assert (current_frame.fpu_no >= MIN_FMOVEM_REGS);
1010 if (TARGET_68881)
1011 m68k_set_frame_related
1012 (m68k_emit_movem (stack_pointer_rtx,
1013 current_frame.fpu_no * -GET_MODE_SIZE (XFmode),
1014 current_frame.fpu_no, FP0_REG,
1015 current_frame.fpu_mask, true, true));
1016 else
1017 {
1018 int offset;
1019
1020 /* If we're using moveml to save the integer registers,
1021 the stack pointer will point to the bottom of the moveml
1022 save area. Find the stack offset of the first FP register. */
1023 if (current_frame.reg_no < MIN_MOVEM_REGS)
1024 offset = 0;
1025 else
1026 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1027 m68k_set_frame_related
1028 (m68k_emit_movem (stack_pointer_rtx, offset,
1029 current_frame.fpu_no, FP0_REG,
1030 current_frame.fpu_mask, true, false));
1031 }
1032 }
1033
1034 /* If the stack limit is not a symbol, check it here.
1035 This has the disadvantage that it may be too late... */
1036 if (crtl->limit_stack)
1037 {
1038 if (REG_P (stack_limit_rtx))
1039 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode, stack_pointer_rtx,
1040 stack_limit_rtx),
1041 stack_pointer_rtx, stack_limit_rtx,
1042 const1_rtx));
1043
1044 else if (GET_CODE (stack_limit_rtx) != SYMBOL_REF)
1045 warning (0, "stack limit expression is not supported");
1046 }
1047
1048 if (current_frame.reg_no < MIN_MOVEM_REGS)
1049 {
1050 /* Store each register separately in the same order moveml does. */
1051 int i;
1052
1053 for (i = 16; i-- > 0; )
1054 if (current_frame.reg_mask & (1 << i))
1055 {
1056 src = gen_rtx_REG (SImode, D0_REG + i);
1057 dest = gen_frame_mem (SImode,
1058 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1059 m68k_set_frame_related (emit_insn (gen_movsi (dest, src)));
1060 }
1061 }
1062 else
1063 {
1064 if (TARGET_COLDFIRE)
1065 /* The required register save space has already been allocated.
1066 The first register should be stored at (%sp). */
1067 m68k_set_frame_related
1068 (m68k_emit_movem (stack_pointer_rtx, 0,
1069 current_frame.reg_no, D0_REG,
1070 current_frame.reg_mask, true, false));
1071 else
1072 m68k_set_frame_related
1073 (m68k_emit_movem (stack_pointer_rtx,
1074 current_frame.reg_no * -GET_MODE_SIZE (SImode),
1075 current_frame.reg_no, D0_REG,
1076 current_frame.reg_mask, true, true));
1077 }
1078
1079 if (!TARGET_SEP_DATA
1080 && crtl->uses_pic_offset_table)
1081 emit_insn (gen_load_got (pic_offset_table_rtx));
1082 }
1083 \f
1084 /* Return true if a simple (return) instruction is sufficient for this
1085 instruction (i.e. if no epilogue is needed). */
1086
1087 bool
1088 m68k_use_return_insn (void)
1089 {
1090 if (!reload_completed || frame_pointer_needed || get_frame_size () != 0)
1091 return false;
1092
1093 m68k_compute_frame_layout ();
1094 return current_frame.offset == 0;
1095 }
1096
1097 /* Emit RTL for the "epilogue" or "sibcall_epilogue" define_expand;
1098 SIBCALL_P says which.
1099
1100 The function epilogue should not depend on the current stack pointer!
1101 It should use the frame pointer only, if there is a frame pointer.
1102 This is mandatory because of alloca; we also take advantage of it to
1103 omit stack adjustments before returning. */
1104
1105 void
1106 m68k_expand_epilogue (bool sibcall_p)
1107 {
1108 HOST_WIDE_INT fsize, fsize_with_regs;
1109 bool big, restore_from_sp;
1110
1111 m68k_compute_frame_layout ();
1112
1113 fsize = current_frame.size;
1114 big = false;
1115 restore_from_sp = false;
1116
1117 /* FIXME : current_function_is_leaf below is too strong.
1118 What we really need to know there is if there could be pending
1119 stack adjustment needed at that point. */
1120 restore_from_sp = (!frame_pointer_needed
1121 || (!cfun->calls_alloca
1122 && current_function_is_leaf));
1123
1124 /* fsize_with_regs is the size we need to adjust the sp when
1125 popping the frame. */
1126 fsize_with_regs = fsize;
1127 if (TARGET_COLDFIRE && restore_from_sp)
1128 {
1129 /* ColdFire's move multiple instructions do not allow post-increment
1130 addressing. Add the size of movem loads to the final deallocation
1131 instead. */
1132 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1133 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1134 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1135 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
1136 }
1137
1138 if (current_frame.offset + fsize >= 0x8000
1139 && !restore_from_sp
1140 && (current_frame.reg_mask || current_frame.fpu_mask))
1141 {
1142 if (TARGET_COLDFIRE
1143 && (current_frame.reg_no >= MIN_MOVEM_REGS
1144 || current_frame.fpu_no >= MIN_FMOVEM_REGS))
1145 {
1146 /* ColdFire's move multiple instructions do not support the
1147 (d8,Ax,Xi) addressing mode, so we're as well using a normal
1148 stack-based restore. */
1149 emit_move_insn (gen_rtx_REG (Pmode, A1_REG),
1150 GEN_INT (-(current_frame.offset + fsize)));
1151 emit_insn (gen_addsi3 (stack_pointer_rtx,
1152 gen_rtx_REG (Pmode, A1_REG),
1153 frame_pointer_rtx));
1154 restore_from_sp = true;
1155 }
1156 else
1157 {
1158 emit_move_insn (gen_rtx_REG (Pmode, A1_REG), GEN_INT (-fsize));
1159 fsize = 0;
1160 big = true;
1161 }
1162 }
1163
1164 if (current_frame.reg_no < MIN_MOVEM_REGS)
1165 {
1166 /* Restore each register separately in the same order moveml does. */
1167 int i;
1168 HOST_WIDE_INT offset;
1169
1170 offset = current_frame.offset + fsize;
1171 for (i = 0; i < 16; i++)
1172 if (current_frame.reg_mask & (1 << i))
1173 {
1174 rtx addr;
1175
1176 if (big)
1177 {
1178 /* Generate the address -OFFSET(%fp,%a1.l). */
1179 addr = gen_rtx_REG (Pmode, A1_REG);
1180 addr = gen_rtx_PLUS (Pmode, addr, frame_pointer_rtx);
1181 addr = plus_constant (addr, -offset);
1182 }
1183 else if (restore_from_sp)
1184 addr = gen_rtx_POST_INC (Pmode, stack_pointer_rtx);
1185 else
1186 addr = plus_constant (frame_pointer_rtx, -offset);
1187 emit_move_insn (gen_rtx_REG (SImode, D0_REG + i),
1188 gen_frame_mem (SImode, addr));
1189 offset -= GET_MODE_SIZE (SImode);
1190 }
1191 }
1192 else if (current_frame.reg_mask)
1193 {
1194 if (big)
1195 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1196 gen_rtx_REG (Pmode, A1_REG),
1197 frame_pointer_rtx),
1198 -(current_frame.offset + fsize),
1199 current_frame.reg_no, D0_REG,
1200 current_frame.reg_mask, false, false);
1201 else if (restore_from_sp)
1202 m68k_emit_movem (stack_pointer_rtx, 0,
1203 current_frame.reg_no, D0_REG,
1204 current_frame.reg_mask, false,
1205 !TARGET_COLDFIRE);
1206 else
1207 m68k_emit_movem (frame_pointer_rtx,
1208 -(current_frame.offset + fsize),
1209 current_frame.reg_no, D0_REG,
1210 current_frame.reg_mask, false, false);
1211 }
1212
1213 if (current_frame.fpu_no > 0)
1214 {
1215 if (big)
1216 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1217 gen_rtx_REG (Pmode, A1_REG),
1218 frame_pointer_rtx),
1219 -(current_frame.foffset + fsize),
1220 current_frame.fpu_no, FP0_REG,
1221 current_frame.fpu_mask, false, false);
1222 else if (restore_from_sp)
1223 {
1224 if (TARGET_COLDFIRE)
1225 {
1226 int offset;
1227
1228 /* If we used moveml to restore the integer registers, the
1229 stack pointer will still point to the bottom of the moveml
1230 save area. Find the stack offset of the first FP
1231 register. */
1232 if (current_frame.reg_no < MIN_MOVEM_REGS)
1233 offset = 0;
1234 else
1235 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1236 m68k_emit_movem (stack_pointer_rtx, offset,
1237 current_frame.fpu_no, FP0_REG,
1238 current_frame.fpu_mask, false, false);
1239 }
1240 else
1241 m68k_emit_movem (stack_pointer_rtx, 0,
1242 current_frame.fpu_no, FP0_REG,
1243 current_frame.fpu_mask, false, true);
1244 }
1245 else
1246 m68k_emit_movem (frame_pointer_rtx,
1247 -(current_frame.foffset + fsize),
1248 current_frame.fpu_no, FP0_REG,
1249 current_frame.fpu_mask, false, false);
1250 }
1251
1252 if (frame_pointer_needed)
1253 emit_insn (gen_unlink (frame_pointer_rtx));
1254 else if (fsize_with_regs)
1255 emit_insn (gen_addsi3 (stack_pointer_rtx,
1256 stack_pointer_rtx,
1257 GEN_INT (fsize_with_regs)));
1258
1259 if (crtl->calls_eh_return)
1260 emit_insn (gen_addsi3 (stack_pointer_rtx,
1261 stack_pointer_rtx,
1262 EH_RETURN_STACKADJ_RTX));
1263
1264 if (!sibcall_p)
1265 emit_jump_insn (ret_rtx);
1266 }
1267 \f
1268 /* Return true if X is a valid comparison operator for the dbcc
1269 instruction.
1270
1271 Note it rejects floating point comparison operators.
1272 (In the future we could use Fdbcc).
1273
1274 It also rejects some comparisons when CC_NO_OVERFLOW is set. */
1275
1276 int
1277 valid_dbcc_comparison_p_2 (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED)
1278 {
1279 switch (GET_CODE (x))
1280 {
1281 case EQ: case NE: case GTU: case LTU:
1282 case GEU: case LEU:
1283 return 1;
1284
1285 /* Reject some when CC_NO_OVERFLOW is set. This may be over
1286 conservative */
1287 case GT: case LT: case GE: case LE:
1288 return ! (cc_prev_status.flags & CC_NO_OVERFLOW);
1289 default:
1290 return 0;
1291 }
1292 }
1293
1294 /* Return nonzero if flags are currently in the 68881 flag register. */
1295 int
1296 flags_in_68881 (void)
1297 {
1298 /* We could add support for these in the future */
1299 return cc_status.flags & CC_IN_68881;
1300 }
1301
1302 /* Return true if PARALLEL contains register REGNO. */
1303 static bool
1304 m68k_reg_present_p (const_rtx parallel, unsigned int regno)
1305 {
1306 int i;
1307
1308 if (REG_P (parallel) && REGNO (parallel) == regno)
1309 return true;
1310
1311 if (GET_CODE (parallel) != PARALLEL)
1312 return false;
1313
1314 for (i = 0; i < XVECLEN (parallel, 0); ++i)
1315 {
1316 const_rtx x;
1317
1318 x = XEXP (XVECEXP (parallel, 0, i), 0);
1319 if (REG_P (x) && REGNO (x) == regno)
1320 return true;
1321 }
1322
1323 return false;
1324 }
1325
1326 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL_P. */
1327
1328 static bool
1329 m68k_ok_for_sibcall_p (tree decl, tree exp)
1330 {
1331 enum m68k_function_kind kind;
1332
1333 /* We cannot use sibcalls for nested functions because we use the
1334 static chain register for indirect calls. */
1335 if (CALL_EXPR_STATIC_CHAIN (exp))
1336 return false;
1337
1338 if (!VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
1339 {
1340 /* Check that the return value locations are the same. For
1341 example that we aren't returning a value from the sibling in
1342 a D0 register but then need to transfer it to a A0 register. */
1343 rtx cfun_value;
1344 rtx call_value;
1345
1346 cfun_value = FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (cfun->decl)),
1347 cfun->decl);
1348 call_value = FUNCTION_VALUE (TREE_TYPE (exp), decl);
1349
1350 /* Check that the values are equal or that the result the callee
1351 function returns is superset of what the current function returns. */
1352 if (!(rtx_equal_p (cfun_value, call_value)
1353 || (REG_P (cfun_value)
1354 && m68k_reg_present_p (call_value, REGNO (cfun_value)))))
1355 return false;
1356 }
1357
1358 kind = m68k_get_function_kind (current_function_decl);
1359 if (kind == m68k_fk_normal_function)
1360 /* We can always sibcall from a normal function, because it's
1361 undefined if it is calling an interrupt function. */
1362 return true;
1363
1364 /* Otherwise we can only sibcall if the function kind is known to be
1365 the same. */
1366 if (decl && m68k_get_function_kind (decl) == kind)
1367 return true;
1368
1369 return false;
1370 }
1371
1372 /* On the m68k all args are always pushed. */
1373
1374 static rtx
1375 m68k_function_arg (cumulative_args_t cum ATTRIBUTE_UNUSED,
1376 enum machine_mode mode ATTRIBUTE_UNUSED,
1377 const_tree type ATTRIBUTE_UNUSED,
1378 bool named ATTRIBUTE_UNUSED)
1379 {
1380 return NULL_RTX;
1381 }
1382
1383 static void
1384 m68k_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
1385 const_tree type, bool named ATTRIBUTE_UNUSED)
1386 {
1387 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1388
1389 *cum += (mode != BLKmode
1390 ? (GET_MODE_SIZE (mode) + 3) & ~3
1391 : (int_size_in_bytes (type) + 3) & ~3);
1392 }
1393
1394 /* Convert X to a legitimate function call memory reference and return the
1395 result. */
1396
1397 rtx
1398 m68k_legitimize_call_address (rtx x)
1399 {
1400 gcc_assert (MEM_P (x));
1401 if (call_operand (XEXP (x, 0), VOIDmode))
1402 return x;
1403 return replace_equiv_address (x, force_reg (Pmode, XEXP (x, 0)));
1404 }
1405
1406 /* Likewise for sibling calls. */
1407
1408 rtx
1409 m68k_legitimize_sibcall_address (rtx x)
1410 {
1411 gcc_assert (MEM_P (x));
1412 if (sibcall_operand (XEXP (x, 0), VOIDmode))
1413 return x;
1414
1415 emit_move_insn (gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM), XEXP (x, 0));
1416 return replace_equiv_address (x, gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM));
1417 }
1418
1419 /* Convert X to a legitimate address and return it if successful. Otherwise
1420 return X.
1421
1422 For the 68000, we handle X+REG by loading X into a register R and
1423 using R+REG. R will go in an address reg and indexing will be used.
1424 However, if REG is a broken-out memory address or multiplication,
1425 nothing needs to be done because REG can certainly go in an address reg. */
1426
1427 static rtx
1428 m68k_legitimize_address (rtx x, rtx oldx, enum machine_mode mode)
1429 {
1430 if (m68k_tls_symbol_p (x))
1431 return m68k_legitimize_tls_address (x);
1432
1433 if (GET_CODE (x) == PLUS)
1434 {
1435 int ch = (x) != (oldx);
1436 int copied = 0;
1437
1438 #define COPY_ONCE(Y) if (!copied) { Y = copy_rtx (Y); copied = ch = 1; }
1439
1440 if (GET_CODE (XEXP (x, 0)) == MULT)
1441 {
1442 COPY_ONCE (x);
1443 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
1444 }
1445 if (GET_CODE (XEXP (x, 1)) == MULT)
1446 {
1447 COPY_ONCE (x);
1448 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
1449 }
1450 if (ch)
1451 {
1452 if (GET_CODE (XEXP (x, 1)) == REG
1453 && GET_CODE (XEXP (x, 0)) == REG)
1454 {
1455 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
1456 {
1457 COPY_ONCE (x);
1458 x = force_operand (x, 0);
1459 }
1460 return x;
1461 }
1462 if (memory_address_p (mode, x))
1463 return x;
1464 }
1465 if (GET_CODE (XEXP (x, 0)) == REG
1466 || (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
1467 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1468 && GET_MODE (XEXP (XEXP (x, 0), 0)) == HImode))
1469 {
1470 rtx temp = gen_reg_rtx (Pmode);
1471 rtx val = force_operand (XEXP (x, 1), 0);
1472 emit_move_insn (temp, val);
1473 COPY_ONCE (x);
1474 XEXP (x, 1) = temp;
1475 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1476 && GET_CODE (XEXP (x, 0)) == REG)
1477 x = force_operand (x, 0);
1478 }
1479 else if (GET_CODE (XEXP (x, 1)) == REG
1480 || (GET_CODE (XEXP (x, 1)) == SIGN_EXTEND
1481 && GET_CODE (XEXP (XEXP (x, 1), 0)) == REG
1482 && GET_MODE (XEXP (XEXP (x, 1), 0)) == HImode))
1483 {
1484 rtx temp = gen_reg_rtx (Pmode);
1485 rtx val = force_operand (XEXP (x, 0), 0);
1486 emit_move_insn (temp, val);
1487 COPY_ONCE (x);
1488 XEXP (x, 0) = temp;
1489 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1490 && GET_CODE (XEXP (x, 1)) == REG)
1491 x = force_operand (x, 0);
1492 }
1493 }
1494
1495 return x;
1496 }
1497
1498
1499 /* Output a dbCC; jCC sequence. Note we do not handle the
1500 floating point version of this sequence (Fdbcc). We also
1501 do not handle alternative conditions when CC_NO_OVERFLOW is
1502 set. It is assumed that valid_dbcc_comparison_p and flags_in_68881 will
1503 kick those out before we get here. */
1504
1505 void
1506 output_dbcc_and_branch (rtx *operands)
1507 {
1508 switch (GET_CODE (operands[3]))
1509 {
1510 case EQ:
1511 output_asm_insn ("dbeq %0,%l1\n\tjeq %l2", operands);
1512 break;
1513
1514 case NE:
1515 output_asm_insn ("dbne %0,%l1\n\tjne %l2", operands);
1516 break;
1517
1518 case GT:
1519 output_asm_insn ("dbgt %0,%l1\n\tjgt %l2", operands);
1520 break;
1521
1522 case GTU:
1523 output_asm_insn ("dbhi %0,%l1\n\tjhi %l2", operands);
1524 break;
1525
1526 case LT:
1527 output_asm_insn ("dblt %0,%l1\n\tjlt %l2", operands);
1528 break;
1529
1530 case LTU:
1531 output_asm_insn ("dbcs %0,%l1\n\tjcs %l2", operands);
1532 break;
1533
1534 case GE:
1535 output_asm_insn ("dbge %0,%l1\n\tjge %l2", operands);
1536 break;
1537
1538 case GEU:
1539 output_asm_insn ("dbcc %0,%l1\n\tjcc %l2", operands);
1540 break;
1541
1542 case LE:
1543 output_asm_insn ("dble %0,%l1\n\tjle %l2", operands);
1544 break;
1545
1546 case LEU:
1547 output_asm_insn ("dbls %0,%l1\n\tjls %l2", operands);
1548 break;
1549
1550 default:
1551 gcc_unreachable ();
1552 }
1553
1554 /* If the decrement is to be done in SImode, then we have
1555 to compensate for the fact that dbcc decrements in HImode. */
1556 switch (GET_MODE (operands[0]))
1557 {
1558 case SImode:
1559 output_asm_insn ("clr%.w %0\n\tsubq%.l #1,%0\n\tjpl %l1", operands);
1560 break;
1561
1562 case HImode:
1563 break;
1564
1565 default:
1566 gcc_unreachable ();
1567 }
1568 }
1569
1570 const char *
1571 output_scc_di (rtx op, rtx operand1, rtx operand2, rtx dest)
1572 {
1573 rtx loperands[7];
1574 enum rtx_code op_code = GET_CODE (op);
1575
1576 /* This does not produce a useful cc. */
1577 CC_STATUS_INIT;
1578
1579 /* The m68k cmp.l instruction requires operand1 to be a reg as used
1580 below. Swap the operands and change the op if these requirements
1581 are not fulfilled. */
1582 if (GET_CODE (operand2) == REG && GET_CODE (operand1) != REG)
1583 {
1584 rtx tmp = operand1;
1585
1586 operand1 = operand2;
1587 operand2 = tmp;
1588 op_code = swap_condition (op_code);
1589 }
1590 loperands[0] = operand1;
1591 if (GET_CODE (operand1) == REG)
1592 loperands[1] = gen_rtx_REG (SImode, REGNO (operand1) + 1);
1593 else
1594 loperands[1] = adjust_address (operand1, SImode, 4);
1595 if (operand2 != const0_rtx)
1596 {
1597 loperands[2] = operand2;
1598 if (GET_CODE (operand2) == REG)
1599 loperands[3] = gen_rtx_REG (SImode, REGNO (operand2) + 1);
1600 else
1601 loperands[3] = adjust_address (operand2, SImode, 4);
1602 }
1603 loperands[4] = gen_label_rtx ();
1604 if (operand2 != const0_rtx)
1605 output_asm_insn ("cmp%.l %2,%0\n\tjne %l4\n\tcmp%.l %3,%1", loperands);
1606 else
1607 {
1608 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[0]))
1609 output_asm_insn ("tst%.l %0", loperands);
1610 else
1611 output_asm_insn ("cmp%.w #0,%0", loperands);
1612
1613 output_asm_insn ("jne %l4", loperands);
1614
1615 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[1]))
1616 output_asm_insn ("tst%.l %1", loperands);
1617 else
1618 output_asm_insn ("cmp%.w #0,%1", loperands);
1619 }
1620
1621 loperands[5] = dest;
1622
1623 switch (op_code)
1624 {
1625 case EQ:
1626 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1627 CODE_LABEL_NUMBER (loperands[4]));
1628 output_asm_insn ("seq %5", loperands);
1629 break;
1630
1631 case NE:
1632 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1633 CODE_LABEL_NUMBER (loperands[4]));
1634 output_asm_insn ("sne %5", loperands);
1635 break;
1636
1637 case GT:
1638 loperands[6] = gen_label_rtx ();
1639 output_asm_insn ("shi %5\n\tjra %l6", loperands);
1640 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1641 CODE_LABEL_NUMBER (loperands[4]));
1642 output_asm_insn ("sgt %5", loperands);
1643 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1644 CODE_LABEL_NUMBER (loperands[6]));
1645 break;
1646
1647 case GTU:
1648 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1649 CODE_LABEL_NUMBER (loperands[4]));
1650 output_asm_insn ("shi %5", loperands);
1651 break;
1652
1653 case LT:
1654 loperands[6] = gen_label_rtx ();
1655 output_asm_insn ("scs %5\n\tjra %l6", loperands);
1656 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1657 CODE_LABEL_NUMBER (loperands[4]));
1658 output_asm_insn ("slt %5", loperands);
1659 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1660 CODE_LABEL_NUMBER (loperands[6]));
1661 break;
1662
1663 case LTU:
1664 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1665 CODE_LABEL_NUMBER (loperands[4]));
1666 output_asm_insn ("scs %5", loperands);
1667 break;
1668
1669 case GE:
1670 loperands[6] = gen_label_rtx ();
1671 output_asm_insn ("scc %5\n\tjra %l6", loperands);
1672 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1673 CODE_LABEL_NUMBER (loperands[4]));
1674 output_asm_insn ("sge %5", loperands);
1675 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1676 CODE_LABEL_NUMBER (loperands[6]));
1677 break;
1678
1679 case GEU:
1680 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1681 CODE_LABEL_NUMBER (loperands[4]));
1682 output_asm_insn ("scc %5", loperands);
1683 break;
1684
1685 case LE:
1686 loperands[6] = gen_label_rtx ();
1687 output_asm_insn ("sls %5\n\tjra %l6", loperands);
1688 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1689 CODE_LABEL_NUMBER (loperands[4]));
1690 output_asm_insn ("sle %5", loperands);
1691 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1692 CODE_LABEL_NUMBER (loperands[6]));
1693 break;
1694
1695 case LEU:
1696 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1697 CODE_LABEL_NUMBER (loperands[4]));
1698 output_asm_insn ("sls %5", loperands);
1699 break;
1700
1701 default:
1702 gcc_unreachable ();
1703 }
1704 return "";
1705 }
1706
1707 const char *
1708 output_btst (rtx *operands, rtx countop, rtx dataop, rtx insn, int signpos)
1709 {
1710 operands[0] = countop;
1711 operands[1] = dataop;
1712
1713 if (GET_CODE (countop) == CONST_INT)
1714 {
1715 register int count = INTVAL (countop);
1716 /* If COUNT is bigger than size of storage unit in use,
1717 advance to the containing unit of same size. */
1718 if (count > signpos)
1719 {
1720 int offset = (count & ~signpos) / 8;
1721 count = count & signpos;
1722 operands[1] = dataop = adjust_address (dataop, QImode, offset);
1723 }
1724 if (count == signpos)
1725 cc_status.flags = CC_NOT_POSITIVE | CC_Z_IN_NOT_N;
1726 else
1727 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N;
1728
1729 /* These three statements used to use next_insns_test_no...
1730 but it appears that this should do the same job. */
1731 if (count == 31
1732 && next_insn_tests_no_inequality (insn))
1733 return "tst%.l %1";
1734 if (count == 15
1735 && next_insn_tests_no_inequality (insn))
1736 return "tst%.w %1";
1737 if (count == 7
1738 && next_insn_tests_no_inequality (insn))
1739 return "tst%.b %1";
1740 /* Try to use `movew to ccr' followed by the appropriate branch insn.
1741 On some m68k variants unfortunately that's slower than btst.
1742 On 68000 and higher, that should also work for all HImode operands. */
1743 if (TUNE_CPU32 || TARGET_COLDFIRE || optimize_size)
1744 {
1745 if (count == 3 && DATA_REG_P (operands[1])
1746 && next_insn_tests_no_inequality (insn))
1747 {
1748 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N | CC_NO_OVERFLOW;
1749 return "move%.w %1,%%ccr";
1750 }
1751 if (count == 2 && DATA_REG_P (operands[1])
1752 && next_insn_tests_no_inequality (insn))
1753 {
1754 cc_status.flags = CC_NOT_NEGATIVE | CC_INVERTED | CC_NO_OVERFLOW;
1755 return "move%.w %1,%%ccr";
1756 }
1757 /* count == 1 followed by bvc/bvs and
1758 count == 0 followed by bcc/bcs are also possible, but need
1759 m68k-specific CC_Z_IN_NOT_V and CC_Z_IN_NOT_C flags. */
1760 }
1761
1762 cc_status.flags = CC_NOT_NEGATIVE;
1763 }
1764 return "btst %0,%1";
1765 }
1766 \f
1767 /* Return true if X is a legitimate base register. STRICT_P says
1768 whether we need strict checking. */
1769
1770 bool
1771 m68k_legitimate_base_reg_p (rtx x, bool strict_p)
1772 {
1773 /* Allow SUBREG everywhere we allow REG. This results in better code. */
1774 if (!strict_p && GET_CODE (x) == SUBREG)
1775 x = SUBREG_REG (x);
1776
1777 return (REG_P (x)
1778 && (strict_p
1779 ? REGNO_OK_FOR_BASE_P (REGNO (x))
1780 : REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x))));
1781 }
1782
1783 /* Return true if X is a legitimate index register. STRICT_P says
1784 whether we need strict checking. */
1785
1786 bool
1787 m68k_legitimate_index_reg_p (rtx x, bool strict_p)
1788 {
1789 if (!strict_p && GET_CODE (x) == SUBREG)
1790 x = SUBREG_REG (x);
1791
1792 return (REG_P (x)
1793 && (strict_p
1794 ? REGNO_OK_FOR_INDEX_P (REGNO (x))
1795 : REGNO_OK_FOR_INDEX_NONSTRICT_P (REGNO (x))));
1796 }
1797
1798 /* Return true if X is a legitimate index expression for a (d8,An,Xn) or
1799 (bd,An,Xn) addressing mode. Fill in the INDEX and SCALE fields of
1800 ADDRESS if so. STRICT_P says whether we need strict checking. */
1801
1802 static bool
1803 m68k_decompose_index (rtx x, bool strict_p, struct m68k_address *address)
1804 {
1805 int scale;
1806
1807 /* Check for a scale factor. */
1808 scale = 1;
1809 if ((TARGET_68020 || TARGET_COLDFIRE)
1810 && GET_CODE (x) == MULT
1811 && GET_CODE (XEXP (x, 1)) == CONST_INT
1812 && (INTVAL (XEXP (x, 1)) == 2
1813 || INTVAL (XEXP (x, 1)) == 4
1814 || (INTVAL (XEXP (x, 1)) == 8
1815 && (TARGET_COLDFIRE_FPU || !TARGET_COLDFIRE))))
1816 {
1817 scale = INTVAL (XEXP (x, 1));
1818 x = XEXP (x, 0);
1819 }
1820
1821 /* Check for a word extension. */
1822 if (!TARGET_COLDFIRE
1823 && GET_CODE (x) == SIGN_EXTEND
1824 && GET_MODE (XEXP (x, 0)) == HImode)
1825 x = XEXP (x, 0);
1826
1827 if (m68k_legitimate_index_reg_p (x, strict_p))
1828 {
1829 address->scale = scale;
1830 address->index = x;
1831 return true;
1832 }
1833
1834 return false;
1835 }
1836
1837 /* Return true if X is an illegitimate symbolic constant. */
1838
1839 bool
1840 m68k_illegitimate_symbolic_constant_p (rtx x)
1841 {
1842 rtx base, offset;
1843
1844 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P)
1845 {
1846 split_const (x, &base, &offset);
1847 if (GET_CODE (base) == SYMBOL_REF
1848 && !offset_within_block_p (base, INTVAL (offset)))
1849 return true;
1850 }
1851 return m68k_tls_reference_p (x, false);
1852 }
1853
1854 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1855
1856 static bool
1857 m68k_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1858 {
1859 return m68k_illegitimate_symbolic_constant_p (x);
1860 }
1861
1862 /* Return true if X is a legitimate constant address that can reach
1863 bytes in the range [X, X + REACH). STRICT_P says whether we need
1864 strict checking. */
1865
1866 static bool
1867 m68k_legitimate_constant_address_p (rtx x, unsigned int reach, bool strict_p)
1868 {
1869 rtx base, offset;
1870
1871 if (!CONSTANT_ADDRESS_P (x))
1872 return false;
1873
1874 if (flag_pic
1875 && !(strict_p && TARGET_PCREL)
1876 && symbolic_operand (x, VOIDmode))
1877 return false;
1878
1879 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P && reach > 1)
1880 {
1881 split_const (x, &base, &offset);
1882 if (GET_CODE (base) == SYMBOL_REF
1883 && !offset_within_block_p (base, INTVAL (offset) + reach - 1))
1884 return false;
1885 }
1886
1887 return !m68k_tls_reference_p (x, false);
1888 }
1889
1890 /* Return true if X is a LABEL_REF for a jump table. Assume that unplaced
1891 labels will become jump tables. */
1892
1893 static bool
1894 m68k_jump_table_ref_p (rtx x)
1895 {
1896 if (GET_CODE (x) != LABEL_REF)
1897 return false;
1898
1899 x = XEXP (x, 0);
1900 if (!NEXT_INSN (x) && !PREV_INSN (x))
1901 return true;
1902
1903 x = next_nonnote_insn (x);
1904 return x && JUMP_TABLE_DATA_P (x);
1905 }
1906
1907 /* Return true if X is a legitimate address for values of mode MODE.
1908 STRICT_P says whether strict checking is needed. If the address
1909 is valid, describe its components in *ADDRESS. */
1910
1911 static bool
1912 m68k_decompose_address (enum machine_mode mode, rtx x,
1913 bool strict_p, struct m68k_address *address)
1914 {
1915 unsigned int reach;
1916
1917 memset (address, 0, sizeof (*address));
1918
1919 if (mode == BLKmode)
1920 reach = 1;
1921 else
1922 reach = GET_MODE_SIZE (mode);
1923
1924 /* Check for (An) (mode 2). */
1925 if (m68k_legitimate_base_reg_p (x, strict_p))
1926 {
1927 address->base = x;
1928 return true;
1929 }
1930
1931 /* Check for -(An) and (An)+ (modes 3 and 4). */
1932 if ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_INC)
1933 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
1934 {
1935 address->code = GET_CODE (x);
1936 address->base = XEXP (x, 0);
1937 return true;
1938 }
1939
1940 /* Check for (d16,An) (mode 5). */
1941 if (GET_CODE (x) == PLUS
1942 && GET_CODE (XEXP (x, 1)) == CONST_INT
1943 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x8000, 0x8000 - reach)
1944 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
1945 {
1946 address->base = XEXP (x, 0);
1947 address->offset = XEXP (x, 1);
1948 return true;
1949 }
1950
1951 /* Check for GOT loads. These are (bd,An,Xn) addresses if
1952 TARGET_68020 && flag_pic == 2, otherwise they are (d16,An)
1953 addresses. */
1954 if (GET_CODE (x) == PLUS
1955 && XEXP (x, 0) == pic_offset_table_rtx)
1956 {
1957 /* As we are processing a PLUS, do not unwrap RELOC32 symbols --
1958 they are invalid in this context. */
1959 if (m68k_unwrap_symbol (XEXP (x, 1), false) != XEXP (x, 1))
1960 {
1961 address->base = XEXP (x, 0);
1962 address->offset = XEXP (x, 1);
1963 return true;
1964 }
1965 }
1966
1967 /* The ColdFire FPU only accepts addressing modes 2-5. */
1968 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
1969 return false;
1970
1971 /* Check for (xxx).w and (xxx).l. Also, in the TARGET_PCREL case,
1972 check for (d16,PC) or (bd,PC,Xn) with a suppressed index register.
1973 All these modes are variations of mode 7. */
1974 if (m68k_legitimate_constant_address_p (x, reach, strict_p))
1975 {
1976 address->offset = x;
1977 return true;
1978 }
1979
1980 /* Check for (d8,PC,Xn), a mode 7 form. This case is needed for
1981 tablejumps.
1982
1983 ??? do_tablejump creates these addresses before placing the target
1984 label, so we have to assume that unplaced labels are jump table
1985 references. It seems unlikely that we would ever generate indexed
1986 accesses to unplaced labels in other cases. */
1987 if (GET_CODE (x) == PLUS
1988 && m68k_jump_table_ref_p (XEXP (x, 1))
1989 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
1990 {
1991 address->offset = XEXP (x, 1);
1992 return true;
1993 }
1994
1995 /* Everything hereafter deals with (d8,An,Xn.SIZE*SCALE) or
1996 (bd,An,Xn.SIZE*SCALE) addresses. */
1997
1998 if (TARGET_68020)
1999 {
2000 /* Check for a nonzero base displacement. */
2001 if (GET_CODE (x) == PLUS
2002 && m68k_legitimate_constant_address_p (XEXP (x, 1), reach, strict_p))
2003 {
2004 address->offset = XEXP (x, 1);
2005 x = XEXP (x, 0);
2006 }
2007
2008 /* Check for a suppressed index register. */
2009 if (m68k_legitimate_base_reg_p (x, strict_p))
2010 {
2011 address->base = x;
2012 return true;
2013 }
2014
2015 /* Check for a suppressed base register. Do not allow this case
2016 for non-symbolic offsets as it effectively gives gcc freedom
2017 to treat data registers as base registers, which can generate
2018 worse code. */
2019 if (address->offset
2020 && symbolic_operand (address->offset, VOIDmode)
2021 && m68k_decompose_index (x, strict_p, address))
2022 return true;
2023 }
2024 else
2025 {
2026 /* Check for a nonzero base displacement. */
2027 if (GET_CODE (x) == PLUS
2028 && GET_CODE (XEXP (x, 1)) == CONST_INT
2029 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x80, 0x80 - reach))
2030 {
2031 address->offset = XEXP (x, 1);
2032 x = XEXP (x, 0);
2033 }
2034 }
2035
2036 /* We now expect the sum of a base and an index. */
2037 if (GET_CODE (x) == PLUS)
2038 {
2039 if (m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p)
2040 && m68k_decompose_index (XEXP (x, 1), strict_p, address))
2041 {
2042 address->base = XEXP (x, 0);
2043 return true;
2044 }
2045
2046 if (m68k_legitimate_base_reg_p (XEXP (x, 1), strict_p)
2047 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2048 {
2049 address->base = XEXP (x, 1);
2050 return true;
2051 }
2052 }
2053 return false;
2054 }
2055
2056 /* Return true if X is a legitimate address for values of mode MODE.
2057 STRICT_P says whether strict checking is needed. */
2058
2059 bool
2060 m68k_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p)
2061 {
2062 struct m68k_address address;
2063
2064 return m68k_decompose_address (mode, x, strict_p, &address);
2065 }
2066
2067 /* Return true if X is a memory, describing its address in ADDRESS if so.
2068 Apply strict checking if called during or after reload. */
2069
2070 static bool
2071 m68k_legitimate_mem_p (rtx x, struct m68k_address *address)
2072 {
2073 return (MEM_P (x)
2074 && m68k_decompose_address (GET_MODE (x), XEXP (x, 0),
2075 reload_in_progress || reload_completed,
2076 address));
2077 }
2078
2079 /* Implement TARGET_LEGITIMATE_CONSTANT_P. */
2080
2081 bool
2082 m68k_legitimate_constant_p (enum machine_mode mode, rtx x)
2083 {
2084 return mode != XFmode && !m68k_illegitimate_symbolic_constant_p (x);
2085 }
2086
2087 /* Return true if X matches the 'Q' constraint. It must be a memory
2088 with a base address and no constant offset or index. */
2089
2090 bool
2091 m68k_matches_q_p (rtx x)
2092 {
2093 struct m68k_address address;
2094
2095 return (m68k_legitimate_mem_p (x, &address)
2096 && address.code == UNKNOWN
2097 && address.base
2098 && !address.offset
2099 && !address.index);
2100 }
2101
2102 /* Return true if X matches the 'U' constraint. It must be a base address
2103 with a constant offset and no index. */
2104
2105 bool
2106 m68k_matches_u_p (rtx x)
2107 {
2108 struct m68k_address address;
2109
2110 return (m68k_legitimate_mem_p (x, &address)
2111 && address.code == UNKNOWN
2112 && address.base
2113 && address.offset
2114 && !address.index);
2115 }
2116
2117 /* Return GOT pointer. */
2118
2119 static rtx
2120 m68k_get_gp (void)
2121 {
2122 if (pic_offset_table_rtx == NULL_RTX)
2123 pic_offset_table_rtx = gen_rtx_REG (Pmode, PIC_REG);
2124
2125 crtl->uses_pic_offset_table = 1;
2126
2127 return pic_offset_table_rtx;
2128 }
2129
2130 /* M68K relocations, used to distinguish GOT and TLS relocations in UNSPEC
2131 wrappers. */
2132 enum m68k_reloc { RELOC_GOT, RELOC_TLSGD, RELOC_TLSLDM, RELOC_TLSLDO,
2133 RELOC_TLSIE, RELOC_TLSLE };
2134
2135 #define TLS_RELOC_P(RELOC) ((RELOC) != RELOC_GOT)
2136
2137 /* Wrap symbol X into unspec representing relocation RELOC.
2138 BASE_REG - register that should be added to the result.
2139 TEMP_REG - if non-null, temporary register. */
2140
2141 static rtx
2142 m68k_wrap_symbol (rtx x, enum m68k_reloc reloc, rtx base_reg, rtx temp_reg)
2143 {
2144 bool use_x_p;
2145
2146 use_x_p = (base_reg == pic_offset_table_rtx) ? TARGET_XGOT : TARGET_XTLS;
2147
2148 if (TARGET_COLDFIRE && use_x_p)
2149 /* When compiling with -mx{got, tls} switch the code will look like this:
2150
2151 move.l <X>@<RELOC>,<TEMP_REG>
2152 add.l <BASE_REG>,<TEMP_REG> */
2153 {
2154 /* Wrap X in UNSPEC_??? to tip m68k_output_addr_const_extra
2155 to put @RELOC after reference. */
2156 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2157 UNSPEC_RELOC32);
2158 x = gen_rtx_CONST (Pmode, x);
2159
2160 if (temp_reg == NULL)
2161 {
2162 gcc_assert (can_create_pseudo_p ());
2163 temp_reg = gen_reg_rtx (Pmode);
2164 }
2165
2166 emit_move_insn (temp_reg, x);
2167 emit_insn (gen_addsi3 (temp_reg, temp_reg, base_reg));
2168 x = temp_reg;
2169 }
2170 else
2171 {
2172 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2173 UNSPEC_RELOC16);
2174 x = gen_rtx_CONST (Pmode, x);
2175
2176 x = gen_rtx_PLUS (Pmode, base_reg, x);
2177 }
2178
2179 return x;
2180 }
2181
2182 /* Helper for m68k_unwrap_symbol.
2183 Also, if unwrapping was successful (that is if (ORIG != <return value>)),
2184 sets *RELOC_PTR to relocation type for the symbol. */
2185
2186 static rtx
2187 m68k_unwrap_symbol_1 (rtx orig, bool unwrap_reloc32_p,
2188 enum m68k_reloc *reloc_ptr)
2189 {
2190 if (GET_CODE (orig) == CONST)
2191 {
2192 rtx x;
2193 enum m68k_reloc dummy;
2194
2195 x = XEXP (orig, 0);
2196
2197 if (reloc_ptr == NULL)
2198 reloc_ptr = &dummy;
2199
2200 /* Handle an addend. */
2201 if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS)
2202 && CONST_INT_P (XEXP (x, 1)))
2203 x = XEXP (x, 0);
2204
2205 if (GET_CODE (x) == UNSPEC)
2206 {
2207 switch (XINT (x, 1))
2208 {
2209 case UNSPEC_RELOC16:
2210 orig = XVECEXP (x, 0, 0);
2211 *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2212 break;
2213
2214 case UNSPEC_RELOC32:
2215 if (unwrap_reloc32_p)
2216 {
2217 orig = XVECEXP (x, 0, 0);
2218 *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2219 }
2220 break;
2221
2222 default:
2223 break;
2224 }
2225 }
2226 }
2227
2228 return orig;
2229 }
2230
2231 /* Unwrap symbol from UNSPEC_RELOC16 and, if unwrap_reloc32_p,
2232 UNSPEC_RELOC32 wrappers. */
2233
2234 rtx
2235 m68k_unwrap_symbol (rtx orig, bool unwrap_reloc32_p)
2236 {
2237 return m68k_unwrap_symbol_1 (orig, unwrap_reloc32_p, NULL);
2238 }
2239
2240 /* Helper for m68k_final_prescan_insn. */
2241
2242 static int
2243 m68k_final_prescan_insn_1 (rtx *x_ptr, void *data ATTRIBUTE_UNUSED)
2244 {
2245 rtx x = *x_ptr;
2246
2247 if (m68k_unwrap_symbol (x, true) != x)
2248 /* For rationale of the below, see comment in m68k_final_prescan_insn. */
2249 {
2250 rtx plus;
2251
2252 gcc_assert (GET_CODE (x) == CONST);
2253 plus = XEXP (x, 0);
2254
2255 if (GET_CODE (plus) == PLUS || GET_CODE (plus) == MINUS)
2256 {
2257 rtx unspec;
2258 rtx addend;
2259
2260 unspec = XEXP (plus, 0);
2261 gcc_assert (GET_CODE (unspec) == UNSPEC);
2262 addend = XEXP (plus, 1);
2263 gcc_assert (CONST_INT_P (addend));
2264
2265 /* We now have all the pieces, rearrange them. */
2266
2267 /* Move symbol to plus. */
2268 XEXP (plus, 0) = XVECEXP (unspec, 0, 0);
2269
2270 /* Move plus inside unspec. */
2271 XVECEXP (unspec, 0, 0) = plus;
2272
2273 /* Move unspec to top level of const. */
2274 XEXP (x, 0) = unspec;
2275 }
2276
2277 return -1;
2278 }
2279
2280 return 0;
2281 }
2282
2283 /* Prescan insn before outputing assembler for it. */
2284
2285 void
2286 m68k_final_prescan_insn (rtx insn ATTRIBUTE_UNUSED,
2287 rtx *operands, int n_operands)
2288 {
2289 int i;
2290
2291 /* Combine and, possibly, other optimizations may do good job
2292 converting
2293 (const (unspec [(symbol)]))
2294 into
2295 (const (plus (unspec [(symbol)])
2296 (const_int N))).
2297 The problem with this is emitting @TLS or @GOT decorations.
2298 The decoration is emitted when processing (unspec), so the
2299 result would be "#symbol@TLSLE+N" instead of "#symbol+N@TLSLE".
2300
2301 It seems that the easiest solution to this is to convert such
2302 operands to
2303 (const (unspec [(plus (symbol)
2304 (const_int N))])).
2305 Note, that the top level of operand remains intact, so we don't have
2306 to patch up anything outside of the operand. */
2307
2308 for (i = 0; i < n_operands; ++i)
2309 {
2310 rtx op;
2311
2312 op = operands[i];
2313
2314 for_each_rtx (&op, m68k_final_prescan_insn_1, NULL);
2315 }
2316 }
2317
2318 /* Move X to a register and add REG_EQUAL note pointing to ORIG.
2319 If REG is non-null, use it; generate new pseudo otherwise. */
2320
2321 static rtx
2322 m68k_move_to_reg (rtx x, rtx orig, rtx reg)
2323 {
2324 rtx insn;
2325
2326 if (reg == NULL_RTX)
2327 {
2328 gcc_assert (can_create_pseudo_p ());
2329 reg = gen_reg_rtx (Pmode);
2330 }
2331
2332 insn = emit_move_insn (reg, x);
2333 /* Put a REG_EQUAL note on this insn, so that it can be optimized
2334 by loop. */
2335 set_unique_reg_note (insn, REG_EQUAL, orig);
2336
2337 return reg;
2338 }
2339
2340 /* Does the same as m68k_wrap_symbol, but returns a memory reference to
2341 GOT slot. */
2342
2343 static rtx
2344 m68k_wrap_symbol_into_got_ref (rtx x, enum m68k_reloc reloc, rtx temp_reg)
2345 {
2346 x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), temp_reg);
2347
2348 x = gen_rtx_MEM (Pmode, x);
2349 MEM_READONLY_P (x) = 1;
2350
2351 return x;
2352 }
2353
2354 /* Legitimize PIC addresses. If the address is already
2355 position-independent, we return ORIG. Newly generated
2356 position-independent addresses go to REG. If we need more
2357 than one register, we lose.
2358
2359 An address is legitimized by making an indirect reference
2360 through the Global Offset Table with the name of the symbol
2361 used as an offset.
2362
2363 The assembler and linker are responsible for placing the
2364 address of the symbol in the GOT. The function prologue
2365 is responsible for initializing a5 to the starting address
2366 of the GOT.
2367
2368 The assembler is also responsible for translating a symbol name
2369 into a constant displacement from the start of the GOT.
2370
2371 A quick example may make things a little clearer:
2372
2373 When not generating PIC code to store the value 12345 into _foo
2374 we would generate the following code:
2375
2376 movel #12345, _foo
2377
2378 When generating PIC two transformations are made. First, the compiler
2379 loads the address of foo into a register. So the first transformation makes:
2380
2381 lea _foo, a0
2382 movel #12345, a0@
2383
2384 The code in movsi will intercept the lea instruction and call this
2385 routine which will transform the instructions into:
2386
2387 movel a5@(_foo:w), a0
2388 movel #12345, a0@
2389
2390
2391 That (in a nutshell) is how *all* symbol and label references are
2392 handled. */
2393
2394 rtx
2395 legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
2396 rtx reg)
2397 {
2398 rtx pic_ref = orig;
2399
2400 /* First handle a simple SYMBOL_REF or LABEL_REF */
2401 if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
2402 {
2403 gcc_assert (reg);
2404
2405 pic_ref = m68k_wrap_symbol_into_got_ref (orig, RELOC_GOT, reg);
2406 pic_ref = m68k_move_to_reg (pic_ref, orig, reg);
2407 }
2408 else if (GET_CODE (orig) == CONST)
2409 {
2410 rtx base;
2411
2412 /* Make sure this has not already been legitimized. */
2413 if (m68k_unwrap_symbol (orig, true) != orig)
2414 return orig;
2415
2416 gcc_assert (reg);
2417
2418 /* legitimize both operands of the PLUS */
2419 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
2420
2421 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2422 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2423 base == reg ? 0 : reg);
2424
2425 if (GET_CODE (orig) == CONST_INT)
2426 pic_ref = plus_constant (base, INTVAL (orig));
2427 else
2428 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
2429 }
2430
2431 return pic_ref;
2432 }
2433
2434 /* The __tls_get_addr symbol. */
2435 static GTY(()) rtx m68k_tls_get_addr;
2436
2437 /* Return SYMBOL_REF for __tls_get_addr. */
2438
2439 static rtx
2440 m68k_get_tls_get_addr (void)
2441 {
2442 if (m68k_tls_get_addr == NULL_RTX)
2443 m68k_tls_get_addr = init_one_libfunc ("__tls_get_addr");
2444
2445 return m68k_tls_get_addr;
2446 }
2447
2448 /* Return libcall result in A0 instead of usual D0. */
2449 static bool m68k_libcall_value_in_a0_p = false;
2450
2451 /* Emit instruction sequence that calls __tls_get_addr. X is
2452 the TLS symbol we are referencing and RELOC is the symbol type to use
2453 (either TLSGD or TLSLDM). EQV is the REG_EQUAL note for the sequence
2454 emitted. A pseudo register with result of __tls_get_addr call is
2455 returned. */
2456
2457 static rtx
2458 m68k_call_tls_get_addr (rtx x, rtx eqv, enum m68k_reloc reloc)
2459 {
2460 rtx a0;
2461 rtx insns;
2462 rtx dest;
2463
2464 /* Emit the call sequence. */
2465 start_sequence ();
2466
2467 /* FIXME: Unfortunately, emit_library_call_value does not
2468 consider (plus (%a5) (const (unspec))) to be a good enough
2469 operand for push, so it forces it into a register. The bad
2470 thing about this is that combiner, due to copy propagation and other
2471 optimizations, sometimes can not later fix this. As a consequence,
2472 additional register may be allocated resulting in a spill.
2473 For reference, see args processing loops in
2474 calls.c:emit_library_call_value_1.
2475 For testcase, see gcc.target/m68k/tls-{gd, ld}.c */
2476 x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), NULL_RTX);
2477
2478 /* __tls_get_addr() is not a libcall, but emitting a libcall_value
2479 is the simpliest way of generating a call. The difference between
2480 __tls_get_addr() and libcall is that the result is returned in D0
2481 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2482 which temporarily switches returning the result to A0. */
2483
2484 m68k_libcall_value_in_a0_p = true;
2485 a0 = emit_library_call_value (m68k_get_tls_get_addr (), NULL_RTX, LCT_PURE,
2486 Pmode, 1, x, Pmode);
2487 m68k_libcall_value_in_a0_p = false;
2488
2489 insns = get_insns ();
2490 end_sequence ();
2491
2492 gcc_assert (can_create_pseudo_p ());
2493 dest = gen_reg_rtx (Pmode);
2494 emit_libcall_block (insns, dest, a0, eqv);
2495
2496 return dest;
2497 }
2498
2499 /* The __tls_get_addr symbol. */
2500 static GTY(()) rtx m68k_read_tp;
2501
2502 /* Return SYMBOL_REF for __m68k_read_tp. */
2503
2504 static rtx
2505 m68k_get_m68k_read_tp (void)
2506 {
2507 if (m68k_read_tp == NULL_RTX)
2508 m68k_read_tp = init_one_libfunc ("__m68k_read_tp");
2509
2510 return m68k_read_tp;
2511 }
2512
2513 /* Emit instruction sequence that calls __m68k_read_tp.
2514 A pseudo register with result of __m68k_read_tp call is returned. */
2515
2516 static rtx
2517 m68k_call_m68k_read_tp (void)
2518 {
2519 rtx a0;
2520 rtx eqv;
2521 rtx insns;
2522 rtx dest;
2523
2524 start_sequence ();
2525
2526 /* __m68k_read_tp() is not a libcall, but emitting a libcall_value
2527 is the simpliest way of generating a call. The difference between
2528 __m68k_read_tp() and libcall is that the result is returned in D0
2529 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2530 which temporarily switches returning the result to A0. */
2531
2532 /* Emit the call sequence. */
2533 m68k_libcall_value_in_a0_p = true;
2534 a0 = emit_library_call_value (m68k_get_m68k_read_tp (), NULL_RTX, LCT_PURE,
2535 Pmode, 0);
2536 m68k_libcall_value_in_a0_p = false;
2537 insns = get_insns ();
2538 end_sequence ();
2539
2540 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2541 share the m68k_read_tp result with other IE/LE model accesses. */
2542 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx), UNSPEC_RELOC32);
2543
2544 gcc_assert (can_create_pseudo_p ());
2545 dest = gen_reg_rtx (Pmode);
2546 emit_libcall_block (insns, dest, a0, eqv);
2547
2548 return dest;
2549 }
2550
2551 /* Return a legitimized address for accessing TLS SYMBOL_REF X.
2552 For explanations on instructions sequences see TLS/NPTL ABI for m68k and
2553 ColdFire. */
2554
2555 rtx
2556 m68k_legitimize_tls_address (rtx orig)
2557 {
2558 switch (SYMBOL_REF_TLS_MODEL (orig))
2559 {
2560 case TLS_MODEL_GLOBAL_DYNAMIC:
2561 orig = m68k_call_tls_get_addr (orig, orig, RELOC_TLSGD);
2562 break;
2563
2564 case TLS_MODEL_LOCAL_DYNAMIC:
2565 {
2566 rtx eqv;
2567 rtx a0;
2568 rtx x;
2569
2570 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2571 share the LDM result with other LD model accesses. */
2572 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2573 UNSPEC_RELOC32);
2574
2575 a0 = m68k_call_tls_get_addr (orig, eqv, RELOC_TLSLDM);
2576
2577 x = m68k_wrap_symbol (orig, RELOC_TLSLDO, a0, NULL_RTX);
2578
2579 if (can_create_pseudo_p ())
2580 x = m68k_move_to_reg (x, orig, NULL_RTX);
2581
2582 orig = x;
2583 break;
2584 }
2585
2586 case TLS_MODEL_INITIAL_EXEC:
2587 {
2588 rtx a0;
2589 rtx x;
2590
2591 a0 = m68k_call_m68k_read_tp ();
2592
2593 x = m68k_wrap_symbol_into_got_ref (orig, RELOC_TLSIE, NULL_RTX);
2594 x = gen_rtx_PLUS (Pmode, x, a0);
2595
2596 if (can_create_pseudo_p ())
2597 x = m68k_move_to_reg (x, orig, NULL_RTX);
2598
2599 orig = x;
2600 break;
2601 }
2602
2603 case TLS_MODEL_LOCAL_EXEC:
2604 {
2605 rtx a0;
2606 rtx x;
2607
2608 a0 = m68k_call_m68k_read_tp ();
2609
2610 x = m68k_wrap_symbol (orig, RELOC_TLSLE, a0, NULL_RTX);
2611
2612 if (can_create_pseudo_p ())
2613 x = m68k_move_to_reg (x, orig, NULL_RTX);
2614
2615 orig = x;
2616 break;
2617 }
2618
2619 default:
2620 gcc_unreachable ();
2621 }
2622
2623 return orig;
2624 }
2625
2626 /* Return true if X is a TLS symbol. */
2627
2628 static bool
2629 m68k_tls_symbol_p (rtx x)
2630 {
2631 if (!TARGET_HAVE_TLS)
2632 return false;
2633
2634 if (GET_CODE (x) != SYMBOL_REF)
2635 return false;
2636
2637 return SYMBOL_REF_TLS_MODEL (x) != 0;
2638 }
2639
2640 /* Helper for m68k_tls_referenced_p. */
2641
2642 static int
2643 m68k_tls_reference_p_1 (rtx *x_ptr, void *data ATTRIBUTE_UNUSED)
2644 {
2645 /* Note: this is not the same as m68k_tls_symbol_p. */
2646 if (GET_CODE (*x_ptr) == SYMBOL_REF)
2647 return SYMBOL_REF_TLS_MODEL (*x_ptr) != 0 ? 1 : 0;
2648
2649 /* Don't recurse into legitimate TLS references. */
2650 if (m68k_tls_reference_p (*x_ptr, true))
2651 return -1;
2652
2653 return 0;
2654 }
2655
2656 /* If !LEGITIMATE_P, return true if X is a TLS symbol reference,
2657 though illegitimate one.
2658 If LEGITIMATE_P, return true if X is a legitimate TLS symbol reference. */
2659
2660 bool
2661 m68k_tls_reference_p (rtx x, bool legitimate_p)
2662 {
2663 if (!TARGET_HAVE_TLS)
2664 return false;
2665
2666 if (!legitimate_p)
2667 return for_each_rtx (&x, m68k_tls_reference_p_1, NULL) == 1 ? true : false;
2668 else
2669 {
2670 enum m68k_reloc reloc = RELOC_GOT;
2671
2672 return (m68k_unwrap_symbol_1 (x, true, &reloc) != x
2673 && TLS_RELOC_P (reloc));
2674 }
2675 }
2676
2677 \f
2678
2679 #define USE_MOVQ(i) ((unsigned) ((i) + 128) <= 255)
2680
2681 /* Return the type of move that should be used for integer I. */
2682
2683 M68K_CONST_METHOD
2684 m68k_const_method (HOST_WIDE_INT i)
2685 {
2686 unsigned u;
2687
2688 if (USE_MOVQ (i))
2689 return MOVQ;
2690
2691 /* The ColdFire doesn't have byte or word operations. */
2692 /* FIXME: This may not be useful for the m68060 either. */
2693 if (!TARGET_COLDFIRE)
2694 {
2695 /* if -256 < N < 256 but N is not in range for a moveq
2696 N^ff will be, so use moveq #N^ff, dreg; not.b dreg. */
2697 if (USE_MOVQ (i ^ 0xff))
2698 return NOTB;
2699 /* Likewise, try with not.w */
2700 if (USE_MOVQ (i ^ 0xffff))
2701 return NOTW;
2702 /* This is the only value where neg.w is useful */
2703 if (i == -65408)
2704 return NEGW;
2705 }
2706
2707 /* Try also with swap. */
2708 u = i;
2709 if (USE_MOVQ ((u >> 16) | (u << 16)))
2710 return SWAP;
2711
2712 if (TARGET_ISAB)
2713 {
2714 /* Try using MVZ/MVS with an immediate value to load constants. */
2715 if (i >= 0 && i <= 65535)
2716 return MVZ;
2717 if (i >= -32768 && i <= 32767)
2718 return MVS;
2719 }
2720
2721 /* Otherwise, use move.l */
2722 return MOVL;
2723 }
2724
2725 /* Return the cost of moving constant I into a data register. */
2726
2727 static int
2728 const_int_cost (HOST_WIDE_INT i)
2729 {
2730 switch (m68k_const_method (i))
2731 {
2732 case MOVQ:
2733 /* Constants between -128 and 127 are cheap due to moveq. */
2734 return 0;
2735 case MVZ:
2736 case MVS:
2737 case NOTB:
2738 case NOTW:
2739 case NEGW:
2740 case SWAP:
2741 /* Constants easily generated by moveq + not.b/not.w/neg.w/swap. */
2742 return 1;
2743 case MOVL:
2744 return 2;
2745 default:
2746 gcc_unreachable ();
2747 }
2748 }
2749
2750 static bool
2751 m68k_rtx_costs (rtx x, int code, int outer_code, int *total,
2752 bool speed ATTRIBUTE_UNUSED)
2753 {
2754 switch (code)
2755 {
2756 case CONST_INT:
2757 /* Constant zero is super cheap due to clr instruction. */
2758 if (x == const0_rtx)
2759 *total = 0;
2760 else
2761 *total = const_int_cost (INTVAL (x));
2762 return true;
2763
2764 case CONST:
2765 case LABEL_REF:
2766 case SYMBOL_REF:
2767 *total = 3;
2768 return true;
2769
2770 case CONST_DOUBLE:
2771 /* Make 0.0 cheaper than other floating constants to
2772 encourage creating tstsf and tstdf insns. */
2773 if (outer_code == COMPARE
2774 && (x == CONST0_RTX (SFmode) || x == CONST0_RTX (DFmode)))
2775 *total = 4;
2776 else
2777 *total = 5;
2778 return true;
2779
2780 /* These are vaguely right for a 68020. */
2781 /* The costs for long multiply have been adjusted to work properly
2782 in synth_mult on the 68020, relative to an average of the time
2783 for add and the time for shift, taking away a little more because
2784 sometimes move insns are needed. */
2785 /* div?.w is relatively cheaper on 68000 counted in COSTS_N_INSNS
2786 terms. */
2787 #define MULL_COST \
2788 (TUNE_68060 ? 2 \
2789 : TUNE_68040 ? 5 \
2790 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2791 : (TUNE_CFV2 && TUNE_MAC) ? 4 \
2792 : TUNE_CFV2 ? 8 \
2793 : TARGET_COLDFIRE ? 3 : 13)
2794
2795 #define MULW_COST \
2796 (TUNE_68060 ? 2 \
2797 : TUNE_68040 ? 3 \
2798 : TUNE_68000_10 ? 5 \
2799 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2800 : (TUNE_CFV2 && TUNE_MAC) ? 2 \
2801 : TUNE_CFV2 ? 8 \
2802 : TARGET_COLDFIRE ? 2 : 8)
2803
2804 #define DIVW_COST \
2805 (TARGET_CF_HWDIV ? 11 \
2806 : TUNE_68000_10 || TARGET_COLDFIRE ? 12 : 27)
2807
2808 case PLUS:
2809 /* An lea costs about three times as much as a simple add. */
2810 if (GET_MODE (x) == SImode
2811 && GET_CODE (XEXP (x, 1)) == REG
2812 && GET_CODE (XEXP (x, 0)) == MULT
2813 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
2814 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2815 && (INTVAL (XEXP (XEXP (x, 0), 1)) == 2
2816 || INTVAL (XEXP (XEXP (x, 0), 1)) == 4
2817 || INTVAL (XEXP (XEXP (x, 0), 1)) == 8))
2818 {
2819 /* lea an@(dx:l:i),am */
2820 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 2 : 3);
2821 return true;
2822 }
2823 return false;
2824
2825 case ASHIFT:
2826 case ASHIFTRT:
2827 case LSHIFTRT:
2828 if (TUNE_68060)
2829 {
2830 *total = COSTS_N_INSNS(1);
2831 return true;
2832 }
2833 if (TUNE_68000_10)
2834 {
2835 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
2836 {
2837 if (INTVAL (XEXP (x, 1)) < 16)
2838 *total = COSTS_N_INSNS (2) + INTVAL (XEXP (x, 1)) / 2;
2839 else
2840 /* We're using clrw + swap for these cases. */
2841 *total = COSTS_N_INSNS (4) + (INTVAL (XEXP (x, 1)) - 16) / 2;
2842 }
2843 else
2844 *total = COSTS_N_INSNS (10); /* Worst case. */
2845 return true;
2846 }
2847 /* A shift by a big integer takes an extra instruction. */
2848 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2849 && (INTVAL (XEXP (x, 1)) == 16))
2850 {
2851 *total = COSTS_N_INSNS (2); /* clrw;swap */
2852 return true;
2853 }
2854 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2855 && !(INTVAL (XEXP (x, 1)) > 0
2856 && INTVAL (XEXP (x, 1)) <= 8))
2857 {
2858 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 1 : 3); /* lsr #i,dn */
2859 return true;
2860 }
2861 return false;
2862
2863 case MULT:
2864 if ((GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
2865 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
2866 && GET_MODE (x) == SImode)
2867 *total = COSTS_N_INSNS (MULW_COST);
2868 else if (GET_MODE (x) == QImode || GET_MODE (x) == HImode)
2869 *total = COSTS_N_INSNS (MULW_COST);
2870 else
2871 *total = COSTS_N_INSNS (MULL_COST);
2872 return true;
2873
2874 case DIV:
2875 case UDIV:
2876 case MOD:
2877 case UMOD:
2878 if (GET_MODE (x) == QImode || GET_MODE (x) == HImode)
2879 *total = COSTS_N_INSNS (DIVW_COST); /* div.w */
2880 else if (TARGET_CF_HWDIV)
2881 *total = COSTS_N_INSNS (18);
2882 else
2883 *total = COSTS_N_INSNS (43); /* div.l */
2884 return true;
2885
2886 case ZERO_EXTRACT:
2887 if (outer_code == COMPARE)
2888 *total = 0;
2889 return false;
2890
2891 default:
2892 return false;
2893 }
2894 }
2895
2896 /* Return an instruction to move CONST_INT OPERANDS[1] into data register
2897 OPERANDS[0]. */
2898
2899 static const char *
2900 output_move_const_into_data_reg (rtx *operands)
2901 {
2902 HOST_WIDE_INT i;
2903
2904 i = INTVAL (operands[1]);
2905 switch (m68k_const_method (i))
2906 {
2907 case MVZ:
2908 return "mvzw %1,%0";
2909 case MVS:
2910 return "mvsw %1,%0";
2911 case MOVQ:
2912 return "moveq %1,%0";
2913 case NOTB:
2914 CC_STATUS_INIT;
2915 operands[1] = GEN_INT (i ^ 0xff);
2916 return "moveq %1,%0\n\tnot%.b %0";
2917 case NOTW:
2918 CC_STATUS_INIT;
2919 operands[1] = GEN_INT (i ^ 0xffff);
2920 return "moveq %1,%0\n\tnot%.w %0";
2921 case NEGW:
2922 CC_STATUS_INIT;
2923 return "moveq #-128,%0\n\tneg%.w %0";
2924 case SWAP:
2925 {
2926 unsigned u = i;
2927
2928 operands[1] = GEN_INT ((u << 16) | (u >> 16));
2929 return "moveq %1,%0\n\tswap %0";
2930 }
2931 case MOVL:
2932 return "move%.l %1,%0";
2933 default:
2934 gcc_unreachable ();
2935 }
2936 }
2937
2938 /* Return true if I can be handled by ISA B's mov3q instruction. */
2939
2940 bool
2941 valid_mov3q_const (HOST_WIDE_INT i)
2942 {
2943 return TARGET_ISAB && (i == -1 || IN_RANGE (i, 1, 7));
2944 }
2945
2946 /* Return an instruction to move CONST_INT OPERANDS[1] into OPERANDS[0].
2947 I is the value of OPERANDS[1]. */
2948
2949 static const char *
2950 output_move_simode_const (rtx *operands)
2951 {
2952 rtx dest;
2953 HOST_WIDE_INT src;
2954
2955 dest = operands[0];
2956 src = INTVAL (operands[1]);
2957 if (src == 0
2958 && (DATA_REG_P (dest) || MEM_P (dest))
2959 /* clr insns on 68000 read before writing. */
2960 && ((TARGET_68010 || TARGET_COLDFIRE)
2961 || !(MEM_P (dest) && MEM_VOLATILE_P (dest))))
2962 return "clr%.l %0";
2963 else if (GET_MODE (dest) == SImode && valid_mov3q_const (src))
2964 return "mov3q%.l %1,%0";
2965 else if (src == 0 && ADDRESS_REG_P (dest))
2966 return "sub%.l %0,%0";
2967 else if (DATA_REG_P (dest))
2968 return output_move_const_into_data_reg (operands);
2969 else if (ADDRESS_REG_P (dest) && IN_RANGE (src, -0x8000, 0x7fff))
2970 {
2971 if (valid_mov3q_const (src))
2972 return "mov3q%.l %1,%0";
2973 return "move%.w %1,%0";
2974 }
2975 else if (MEM_P (dest)
2976 && GET_CODE (XEXP (dest, 0)) == PRE_DEC
2977 && REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
2978 && IN_RANGE (src, -0x8000, 0x7fff))
2979 {
2980 if (valid_mov3q_const (src))
2981 return "mov3q%.l %1,%-";
2982 return "pea %a1";
2983 }
2984 return "move%.l %1,%0";
2985 }
2986
2987 const char *
2988 output_move_simode (rtx *operands)
2989 {
2990 if (GET_CODE (operands[1]) == CONST_INT)
2991 return output_move_simode_const (operands);
2992 else if ((GET_CODE (operands[1]) == SYMBOL_REF
2993 || GET_CODE (operands[1]) == CONST)
2994 && push_operand (operands[0], SImode))
2995 return "pea %a1";
2996 else if ((GET_CODE (operands[1]) == SYMBOL_REF
2997 || GET_CODE (operands[1]) == CONST)
2998 && ADDRESS_REG_P (operands[0]))
2999 return "lea %a1,%0";
3000 return "move%.l %1,%0";
3001 }
3002
3003 const char *
3004 output_move_himode (rtx *operands)
3005 {
3006 if (GET_CODE (operands[1]) == CONST_INT)
3007 {
3008 if (operands[1] == const0_rtx
3009 && (DATA_REG_P (operands[0])
3010 || GET_CODE (operands[0]) == MEM)
3011 /* clr insns on 68000 read before writing. */
3012 && ((TARGET_68010 || TARGET_COLDFIRE)
3013 || !(GET_CODE (operands[0]) == MEM
3014 && MEM_VOLATILE_P (operands[0]))))
3015 return "clr%.w %0";
3016 else if (operands[1] == const0_rtx
3017 && ADDRESS_REG_P (operands[0]))
3018 return "sub%.l %0,%0";
3019 else if (DATA_REG_P (operands[0])
3020 && INTVAL (operands[1]) < 128
3021 && INTVAL (operands[1]) >= -128)
3022 return "moveq %1,%0";
3023 else if (INTVAL (operands[1]) < 0x8000
3024 && INTVAL (operands[1]) >= -0x8000)
3025 return "move%.w %1,%0";
3026 }
3027 else if (CONSTANT_P (operands[1]))
3028 return "move%.l %1,%0";
3029 return "move%.w %1,%0";
3030 }
3031
3032 const char *
3033 output_move_qimode (rtx *operands)
3034 {
3035 /* 68k family always modifies the stack pointer by at least 2, even for
3036 byte pushes. The 5200 (ColdFire) does not do this. */
3037
3038 /* This case is generated by pushqi1 pattern now. */
3039 gcc_assert (!(GET_CODE (operands[0]) == MEM
3040 && GET_CODE (XEXP (operands[0], 0)) == PRE_DEC
3041 && XEXP (XEXP (operands[0], 0), 0) == stack_pointer_rtx
3042 && ! ADDRESS_REG_P (operands[1])
3043 && ! TARGET_COLDFIRE));
3044
3045 /* clr and st insns on 68000 read before writing. */
3046 if (!ADDRESS_REG_P (operands[0])
3047 && ((TARGET_68010 || TARGET_COLDFIRE)
3048 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3049 {
3050 if (operands[1] == const0_rtx)
3051 return "clr%.b %0";
3052 if ((!TARGET_COLDFIRE || DATA_REG_P (operands[0]))
3053 && GET_CODE (operands[1]) == CONST_INT
3054 && (INTVAL (operands[1]) & 255) == 255)
3055 {
3056 CC_STATUS_INIT;
3057 return "st %0";
3058 }
3059 }
3060 if (GET_CODE (operands[1]) == CONST_INT
3061 && DATA_REG_P (operands[0])
3062 && INTVAL (operands[1]) < 128
3063 && INTVAL (operands[1]) >= -128)
3064 return "moveq %1,%0";
3065 if (operands[1] == const0_rtx && ADDRESS_REG_P (operands[0]))
3066 return "sub%.l %0,%0";
3067 if (GET_CODE (operands[1]) != CONST_INT && CONSTANT_P (operands[1]))
3068 return "move%.l %1,%0";
3069 /* 68k family (including the 5200 ColdFire) does not support byte moves to
3070 from address registers. */
3071 if (ADDRESS_REG_P (operands[0]) || ADDRESS_REG_P (operands[1]))
3072 return "move%.w %1,%0";
3073 return "move%.b %1,%0";
3074 }
3075
3076 const char *
3077 output_move_stricthi (rtx *operands)
3078 {
3079 if (operands[1] == const0_rtx
3080 /* clr insns on 68000 read before writing. */
3081 && ((TARGET_68010 || TARGET_COLDFIRE)
3082 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3083 return "clr%.w %0";
3084 return "move%.w %1,%0";
3085 }
3086
3087 const char *
3088 output_move_strictqi (rtx *operands)
3089 {
3090 if (operands[1] == const0_rtx
3091 /* clr insns on 68000 read before writing. */
3092 && ((TARGET_68010 || TARGET_COLDFIRE)
3093 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3094 return "clr%.b %0";
3095 return "move%.b %1,%0";
3096 }
3097
3098 /* Return the best assembler insn template
3099 for moving operands[1] into operands[0] as a fullword. */
3100
3101 static const char *
3102 singlemove_string (rtx *operands)
3103 {
3104 if (GET_CODE (operands[1]) == CONST_INT)
3105 return output_move_simode_const (operands);
3106 return "move%.l %1,%0";
3107 }
3108
3109
3110 /* Output assembler or rtl code to perform a doubleword move insn
3111 with operands OPERANDS.
3112 Pointers to 3 helper functions should be specified:
3113 HANDLE_REG_ADJUST to adjust a register by a small value,
3114 HANDLE_COMPADR to compute an address and
3115 HANDLE_MOVSI to move 4 bytes. */
3116
3117 static void
3118 handle_move_double (rtx operands[2],
3119 void (*handle_reg_adjust) (rtx, int),
3120 void (*handle_compadr) (rtx [2]),
3121 void (*handle_movsi) (rtx [2]))
3122 {
3123 enum
3124 {
3125 REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP
3126 } optype0, optype1;
3127 rtx latehalf[2];
3128 rtx middlehalf[2];
3129 rtx xops[2];
3130 rtx addreg0 = 0, addreg1 = 0;
3131 int dest_overlapped_low = 0;
3132 int size = GET_MODE_SIZE (GET_MODE (operands[0]));
3133
3134 middlehalf[0] = 0;
3135 middlehalf[1] = 0;
3136
3137 /* First classify both operands. */
3138
3139 if (REG_P (operands[0]))
3140 optype0 = REGOP;
3141 else if (offsettable_memref_p (operands[0]))
3142 optype0 = OFFSOP;
3143 else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
3144 optype0 = POPOP;
3145 else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
3146 optype0 = PUSHOP;
3147 else if (GET_CODE (operands[0]) == MEM)
3148 optype0 = MEMOP;
3149 else
3150 optype0 = RNDOP;
3151
3152 if (REG_P (operands[1]))
3153 optype1 = REGOP;
3154 else if (CONSTANT_P (operands[1]))
3155 optype1 = CNSTOP;
3156 else if (offsettable_memref_p (operands[1]))
3157 optype1 = OFFSOP;
3158 else if (GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3159 optype1 = POPOP;
3160 else if (GET_CODE (XEXP (operands[1], 0)) == PRE_DEC)
3161 optype1 = PUSHOP;
3162 else if (GET_CODE (operands[1]) == MEM)
3163 optype1 = MEMOP;
3164 else
3165 optype1 = RNDOP;
3166
3167 /* Check for the cases that the operand constraints are not supposed
3168 to allow to happen. Generating code for these cases is
3169 painful. */
3170 gcc_assert (optype0 != RNDOP && optype1 != RNDOP);
3171
3172 /* If one operand is decrementing and one is incrementing
3173 decrement the former register explicitly
3174 and change that operand into ordinary indexing. */
3175
3176 if (optype0 == PUSHOP && optype1 == POPOP)
3177 {
3178 operands[0] = XEXP (XEXP (operands[0], 0), 0);
3179
3180 handle_reg_adjust (operands[0], -size);
3181
3182 if (GET_MODE (operands[1]) == XFmode)
3183 operands[0] = gen_rtx_MEM (XFmode, operands[0]);
3184 else if (GET_MODE (operands[0]) == DFmode)
3185 operands[0] = gen_rtx_MEM (DFmode, operands[0]);
3186 else
3187 operands[0] = gen_rtx_MEM (DImode, operands[0]);
3188 optype0 = OFFSOP;
3189 }
3190 if (optype0 == POPOP && optype1 == PUSHOP)
3191 {
3192 operands[1] = XEXP (XEXP (operands[1], 0), 0);
3193
3194 handle_reg_adjust (operands[1], -size);
3195
3196 if (GET_MODE (operands[1]) == XFmode)
3197 operands[1] = gen_rtx_MEM (XFmode, operands[1]);
3198 else if (GET_MODE (operands[1]) == DFmode)
3199 operands[1] = gen_rtx_MEM (DFmode, operands[1]);
3200 else
3201 operands[1] = gen_rtx_MEM (DImode, operands[1]);
3202 optype1 = OFFSOP;
3203 }
3204
3205 /* If an operand is an unoffsettable memory ref, find a register
3206 we can increment temporarily to make it refer to the second word. */
3207
3208 if (optype0 == MEMOP)
3209 addreg0 = find_addr_reg (XEXP (operands[0], 0));
3210
3211 if (optype1 == MEMOP)
3212 addreg1 = find_addr_reg (XEXP (operands[1], 0));
3213
3214 /* Ok, we can do one word at a time.
3215 Normally we do the low-numbered word first,
3216 but if either operand is autodecrementing then we
3217 do the high-numbered word first.
3218
3219 In either case, set up in LATEHALF the operands to use
3220 for the high-numbered word and in some cases alter the
3221 operands in OPERANDS to be suitable for the low-numbered word. */
3222
3223 if (size == 12)
3224 {
3225 if (optype0 == REGOP)
3226 {
3227 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 2);
3228 middlehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
3229 }
3230 else if (optype0 == OFFSOP)
3231 {
3232 middlehalf[0] = adjust_address (operands[0], SImode, 4);
3233 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
3234 }
3235 else
3236 {
3237 middlehalf[0] = adjust_address (operands[0], SImode, 0);
3238 latehalf[0] = adjust_address (operands[0], SImode, 0);
3239 }
3240
3241 if (optype1 == REGOP)
3242 {
3243 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2);
3244 middlehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
3245 }
3246 else if (optype1 == OFFSOP)
3247 {
3248 middlehalf[1] = adjust_address (operands[1], SImode, 4);
3249 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
3250 }
3251 else if (optype1 == CNSTOP)
3252 {
3253 if (GET_CODE (operands[1]) == CONST_DOUBLE)
3254 {
3255 REAL_VALUE_TYPE r;
3256 long l[3];
3257
3258 REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
3259 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
3260 operands[1] = GEN_INT (l[0]);
3261 middlehalf[1] = GEN_INT (l[1]);
3262 latehalf[1] = GEN_INT (l[2]);
3263 }
3264 else
3265 {
3266 /* No non-CONST_DOUBLE constant should ever appear
3267 here. */
3268 gcc_assert (!CONSTANT_P (operands[1]));
3269 }
3270 }
3271 else
3272 {
3273 middlehalf[1] = adjust_address (operands[1], SImode, 0);
3274 latehalf[1] = adjust_address (operands[1], SImode, 0);
3275 }
3276 }
3277 else
3278 /* size is not 12: */
3279 {
3280 if (optype0 == REGOP)
3281 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
3282 else if (optype0 == OFFSOP)
3283 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
3284 else
3285 latehalf[0] = adjust_address (operands[0], SImode, 0);
3286
3287 if (optype1 == REGOP)
3288 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
3289 else if (optype1 == OFFSOP)
3290 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
3291 else if (optype1 == CNSTOP)
3292 split_double (operands[1], &operands[1], &latehalf[1]);
3293 else
3294 latehalf[1] = adjust_address (operands[1], SImode, 0);
3295 }
3296
3297 /* If insn is effectively movd N(sp),-(sp) then we will do the
3298 high word first. We should use the adjusted operand 1 (which is N+4(sp))
3299 for the low word as well, to compensate for the first decrement of sp. */
3300 if (optype0 == PUSHOP
3301 && REGNO (XEXP (XEXP (operands[0], 0), 0)) == STACK_POINTER_REGNUM
3302 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
3303 operands[1] = middlehalf[1] = latehalf[1];
3304
3305 /* For (set (reg:DI N) (mem:DI ... (reg:SI N) ...)),
3306 if the upper part of reg N does not appear in the MEM, arrange to
3307 emit the move late-half first. Otherwise, compute the MEM address
3308 into the upper part of N and use that as a pointer to the memory
3309 operand. */
3310 if (optype0 == REGOP
3311 && (optype1 == OFFSOP || optype1 == MEMOP))
3312 {
3313 rtx testlow = gen_rtx_REG (SImode, REGNO (operands[0]));
3314
3315 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
3316 && reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
3317 {
3318 /* If both halves of dest are used in the src memory address,
3319 compute the address into latehalf of dest.
3320 Note that this can't happen if the dest is two data regs. */
3321 compadr:
3322 xops[0] = latehalf[0];
3323 xops[1] = XEXP (operands[1], 0);
3324
3325 handle_compadr (xops);
3326 if (GET_MODE (operands[1]) == XFmode)
3327 {
3328 operands[1] = gen_rtx_MEM (XFmode, latehalf[0]);
3329 middlehalf[1] = adjust_address (operands[1], DImode, size - 8);
3330 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
3331 }
3332 else
3333 {
3334 operands[1] = gen_rtx_MEM (DImode, latehalf[0]);
3335 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
3336 }
3337 }
3338 else if (size == 12
3339 && reg_overlap_mentioned_p (middlehalf[0],
3340 XEXP (operands[1], 0)))
3341 {
3342 /* Check for two regs used by both source and dest.
3343 Note that this can't happen if the dest is all data regs.
3344 It can happen if the dest is d6, d7, a0.
3345 But in that case, latehalf is an addr reg, so
3346 the code at compadr does ok. */
3347
3348 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
3349 || reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
3350 goto compadr;
3351
3352 /* JRV says this can't happen: */
3353 gcc_assert (!addreg0 && !addreg1);
3354
3355 /* Only the middle reg conflicts; simply put it last. */
3356 handle_movsi (operands);
3357 handle_movsi (latehalf);
3358 handle_movsi (middlehalf);
3359
3360 return;
3361 }
3362 else if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0)))
3363 /* If the low half of dest is mentioned in the source memory
3364 address, the arrange to emit the move late half first. */
3365 dest_overlapped_low = 1;
3366 }
3367
3368 /* If one or both operands autodecrementing,
3369 do the two words, high-numbered first. */
3370
3371 /* Likewise, the first move would clobber the source of the second one,
3372 do them in the other order. This happens only for registers;
3373 such overlap can't happen in memory unless the user explicitly
3374 sets it up, and that is an undefined circumstance. */
3375
3376 if (optype0 == PUSHOP || optype1 == PUSHOP
3377 || (optype0 == REGOP && optype1 == REGOP
3378 && ((middlehalf[1] && REGNO (operands[0]) == REGNO (middlehalf[1]))
3379 || REGNO (operands[0]) == REGNO (latehalf[1])))
3380 || dest_overlapped_low)
3381 {
3382 /* Make any unoffsettable addresses point at high-numbered word. */
3383 if (addreg0)
3384 handle_reg_adjust (addreg0, size - 4);
3385 if (addreg1)
3386 handle_reg_adjust (addreg1, size - 4);
3387
3388 /* Do that word. */
3389 handle_movsi (latehalf);
3390
3391 /* Undo the adds we just did. */
3392 if (addreg0)
3393 handle_reg_adjust (addreg0, -4);
3394 if (addreg1)
3395 handle_reg_adjust (addreg1, -4);
3396
3397 if (size == 12)
3398 {
3399 handle_movsi (middlehalf);
3400
3401 if (addreg0)
3402 handle_reg_adjust (addreg0, -4);
3403 if (addreg1)
3404 handle_reg_adjust (addreg1, -4);
3405 }
3406
3407 /* Do low-numbered word. */
3408
3409 handle_movsi (operands);
3410 return;
3411 }
3412
3413 /* Normal case: do the two words, low-numbered first. */
3414
3415 m68k_final_prescan_insn (NULL, operands, 2);
3416 handle_movsi (operands);
3417
3418 /* Do the middle one of the three words for long double */
3419 if (size == 12)
3420 {
3421 if (addreg0)
3422 handle_reg_adjust (addreg0, 4);
3423 if (addreg1)
3424 handle_reg_adjust (addreg1, 4);
3425
3426 m68k_final_prescan_insn (NULL, middlehalf, 2);
3427 handle_movsi (middlehalf);
3428 }
3429
3430 /* Make any unoffsettable addresses point at high-numbered word. */
3431 if (addreg0)
3432 handle_reg_adjust (addreg0, 4);
3433 if (addreg1)
3434 handle_reg_adjust (addreg1, 4);
3435
3436 /* Do that word. */
3437 m68k_final_prescan_insn (NULL, latehalf, 2);
3438 handle_movsi (latehalf);
3439
3440 /* Undo the adds we just did. */
3441 if (addreg0)
3442 handle_reg_adjust (addreg0, -(size - 4));
3443 if (addreg1)
3444 handle_reg_adjust (addreg1, -(size - 4));
3445
3446 return;
3447 }
3448
3449 /* Output assembler code to adjust REG by N. */
3450 static void
3451 output_reg_adjust (rtx reg, int n)
3452 {
3453 const char *s;
3454
3455 gcc_assert (GET_MODE (reg) == SImode
3456 && -12 <= n && n != 0 && n <= 12);
3457
3458 switch (n)
3459 {
3460 case 12:
3461 s = "add%.l #12,%0";
3462 break;
3463
3464 case 8:
3465 s = "addq%.l #8,%0";
3466 break;
3467
3468 case 4:
3469 s = "addq%.l #4,%0";
3470 break;
3471
3472 case -12:
3473 s = "sub%.l #12,%0";
3474 break;
3475
3476 case -8:
3477 s = "subq%.l #8,%0";
3478 break;
3479
3480 case -4:
3481 s = "subq%.l #4,%0";
3482 break;
3483
3484 default:
3485 gcc_unreachable ();
3486 s = NULL;
3487 }
3488
3489 output_asm_insn (s, &reg);
3490 }
3491
3492 /* Emit rtl code to adjust REG by N. */
3493 static void
3494 emit_reg_adjust (rtx reg1, int n)
3495 {
3496 rtx reg2;
3497
3498 gcc_assert (GET_MODE (reg1) == SImode
3499 && -12 <= n && n != 0 && n <= 12);
3500
3501 reg1 = copy_rtx (reg1);
3502 reg2 = copy_rtx (reg1);
3503
3504 if (n < 0)
3505 emit_insn (gen_subsi3 (reg1, reg2, GEN_INT (-n)));
3506 else if (n > 0)
3507 emit_insn (gen_addsi3 (reg1, reg2, GEN_INT (n)));
3508 else
3509 gcc_unreachable ();
3510 }
3511
3512 /* Output assembler to load address OPERANDS[0] to register OPERANDS[1]. */
3513 static void
3514 output_compadr (rtx operands[2])
3515 {
3516 output_asm_insn ("lea %a1,%0", operands);
3517 }
3518
3519 /* Output the best assembler insn for moving operands[1] into operands[0]
3520 as a fullword. */
3521 static void
3522 output_movsi (rtx operands[2])
3523 {
3524 output_asm_insn (singlemove_string (operands), operands);
3525 }
3526
3527 /* Copy OP and change its mode to MODE. */
3528 static rtx
3529 copy_operand (rtx op, enum machine_mode mode)
3530 {
3531 /* ??? This looks really ugly. There must be a better way
3532 to change a mode on the operand. */
3533 if (GET_MODE (op) != VOIDmode)
3534 {
3535 if (REG_P (op))
3536 op = gen_rtx_REG (mode, REGNO (op));
3537 else
3538 {
3539 op = copy_rtx (op);
3540 PUT_MODE (op, mode);
3541 }
3542 }
3543
3544 return op;
3545 }
3546
3547 /* Emit rtl code for moving operands[1] into operands[0] as a fullword. */
3548 static void
3549 emit_movsi (rtx operands[2])
3550 {
3551 operands[0] = copy_operand (operands[0], SImode);
3552 operands[1] = copy_operand (operands[1], SImode);
3553
3554 emit_insn (gen_movsi (operands[0], operands[1]));
3555 }
3556
3557 /* Output assembler code to perform a doubleword move insn
3558 with operands OPERANDS. */
3559 const char *
3560 output_move_double (rtx *operands)
3561 {
3562 handle_move_double (operands,
3563 output_reg_adjust, output_compadr, output_movsi);
3564
3565 return "";
3566 }
3567
3568 /* Output rtl code to perform a doubleword move insn
3569 with operands OPERANDS. */
3570 void
3571 m68k_emit_move_double (rtx operands[2])
3572 {
3573 handle_move_double (operands, emit_reg_adjust, emit_movsi, emit_movsi);
3574 }
3575
3576 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
3577 new rtx with the correct mode. */
3578
3579 static rtx
3580 force_mode (enum machine_mode mode, rtx orig)
3581 {
3582 if (mode == GET_MODE (orig))
3583 return orig;
3584
3585 if (REGNO (orig) >= FIRST_PSEUDO_REGISTER)
3586 abort ();
3587
3588 return gen_rtx_REG (mode, REGNO (orig));
3589 }
3590
3591 static int
3592 fp_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3593 {
3594 return reg_renumber && FP_REG_P (op);
3595 }
3596
3597 /* Emit insns to move operands[1] into operands[0].
3598
3599 Return 1 if we have written out everything that needs to be done to
3600 do the move. Otherwise, return 0 and the caller will emit the move
3601 normally.
3602
3603 Note SCRATCH_REG may not be in the proper mode depending on how it
3604 will be used. This routine is responsible for creating a new copy
3605 of SCRATCH_REG in the proper mode. */
3606
3607 int
3608 emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
3609 {
3610 register rtx operand0 = operands[0];
3611 register rtx operand1 = operands[1];
3612 register rtx tem;
3613
3614 if (scratch_reg
3615 && reload_in_progress && GET_CODE (operand0) == REG
3616 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
3617 operand0 = reg_equiv_mem (REGNO (operand0));
3618 else if (scratch_reg
3619 && reload_in_progress && GET_CODE (operand0) == SUBREG
3620 && GET_CODE (SUBREG_REG (operand0)) == REG
3621 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
3622 {
3623 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3624 the code which tracks sets/uses for delete_output_reload. */
3625 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
3626 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
3627 SUBREG_BYTE (operand0));
3628 operand0 = alter_subreg (&temp);
3629 }
3630
3631 if (scratch_reg
3632 && reload_in_progress && GET_CODE (operand1) == REG
3633 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
3634 operand1 = reg_equiv_mem (REGNO (operand1));
3635 else if (scratch_reg
3636 && reload_in_progress && GET_CODE (operand1) == SUBREG
3637 && GET_CODE (SUBREG_REG (operand1)) == REG
3638 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
3639 {
3640 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3641 the code which tracks sets/uses for delete_output_reload. */
3642 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
3643 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
3644 SUBREG_BYTE (operand1));
3645 operand1 = alter_subreg (&temp);
3646 }
3647
3648 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
3649 && ((tem = find_replacement (&XEXP (operand0, 0)))
3650 != XEXP (operand0, 0)))
3651 operand0 = gen_rtx_MEM (GET_MODE (operand0), tem);
3652 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
3653 && ((tem = find_replacement (&XEXP (operand1, 0)))
3654 != XEXP (operand1, 0)))
3655 operand1 = gen_rtx_MEM (GET_MODE (operand1), tem);
3656
3657 /* Handle secondary reloads for loads/stores of FP registers where
3658 the address is symbolic by using the scratch register */
3659 if (fp_reg_operand (operand0, mode)
3660 && ((GET_CODE (operand1) == MEM
3661 && ! memory_address_p (DFmode, XEXP (operand1, 0)))
3662 || ((GET_CODE (operand1) == SUBREG
3663 && GET_CODE (XEXP (operand1, 0)) == MEM
3664 && !memory_address_p (DFmode, XEXP (XEXP (operand1, 0), 0)))))
3665 && scratch_reg)
3666 {
3667 if (GET_CODE (operand1) == SUBREG)
3668 operand1 = XEXP (operand1, 0);
3669
3670 /* SCRATCH_REG will hold an address. We want
3671 it in SImode regardless of what mode it was originally given
3672 to us. */
3673 scratch_reg = force_mode (SImode, scratch_reg);
3674
3675 /* D might not fit in 14 bits either; for such cases load D into
3676 scratch reg. */
3677 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
3678 {
3679 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
3680 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
3681 Pmode,
3682 XEXP (XEXP (operand1, 0), 0),
3683 scratch_reg));
3684 }
3685 else
3686 emit_move_insn (scratch_reg, XEXP (operand1, 0));
3687 emit_insn (gen_rtx_SET (VOIDmode, operand0,
3688 gen_rtx_MEM (mode, scratch_reg)));
3689 return 1;
3690 }
3691 else if (fp_reg_operand (operand1, mode)
3692 && ((GET_CODE (operand0) == MEM
3693 && ! memory_address_p (DFmode, XEXP (operand0, 0)))
3694 || ((GET_CODE (operand0) == SUBREG)
3695 && GET_CODE (XEXP (operand0, 0)) == MEM
3696 && !memory_address_p (DFmode, XEXP (XEXP (operand0, 0), 0))))
3697 && scratch_reg)
3698 {
3699 if (GET_CODE (operand0) == SUBREG)
3700 operand0 = XEXP (operand0, 0);
3701
3702 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3703 it in SIMODE regardless of what mode it was originally given
3704 to us. */
3705 scratch_reg = force_mode (SImode, scratch_reg);
3706
3707 /* D might not fit in 14 bits either; for such cases load D into
3708 scratch reg. */
3709 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
3710 {
3711 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
3712 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
3713 0)),
3714 Pmode,
3715 XEXP (XEXP (operand0, 0),
3716 0),
3717 scratch_reg));
3718 }
3719 else
3720 emit_move_insn (scratch_reg, XEXP (operand0, 0));
3721 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (mode, scratch_reg),
3722 operand1));
3723 return 1;
3724 }
3725 /* Handle secondary reloads for loads of FP registers from constant
3726 expressions by forcing the constant into memory.
3727
3728 use scratch_reg to hold the address of the memory location.
3729
3730 The proper fix is to change PREFERRED_RELOAD_CLASS to return
3731 NO_REGS when presented with a const_int and an register class
3732 containing only FP registers. Doing so unfortunately creates
3733 more problems than it solves. Fix this for 2.5. */
3734 else if (fp_reg_operand (operand0, mode)
3735 && CONSTANT_P (operand1)
3736 && scratch_reg)
3737 {
3738 rtx xoperands[2];
3739
3740 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3741 it in SIMODE regardless of what mode it was originally given
3742 to us. */
3743 scratch_reg = force_mode (SImode, scratch_reg);
3744
3745 /* Force the constant into memory and put the address of the
3746 memory location into scratch_reg. */
3747 xoperands[0] = scratch_reg;
3748 xoperands[1] = XEXP (force_const_mem (mode, operand1), 0);
3749 emit_insn (gen_rtx_SET (mode, scratch_reg, xoperands[1]));
3750
3751 /* Now load the destination register. */
3752 emit_insn (gen_rtx_SET (mode, operand0,
3753 gen_rtx_MEM (mode, scratch_reg)));
3754 return 1;
3755 }
3756
3757 /* Now have insn-emit do whatever it normally does. */
3758 return 0;
3759 }
3760
3761 /* Split one or more DImode RTL references into pairs of SImode
3762 references. The RTL can be REG, offsettable MEM, integer constant, or
3763 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
3764 split and "num" is its length. lo_half and hi_half are output arrays
3765 that parallel "operands". */
3766
3767 void
3768 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
3769 {
3770 while (num--)
3771 {
3772 rtx op = operands[num];
3773
3774 /* simplify_subreg refuses to split volatile memory addresses,
3775 but we still have to handle it. */
3776 if (GET_CODE (op) == MEM)
3777 {
3778 lo_half[num] = adjust_address (op, SImode, 4);
3779 hi_half[num] = adjust_address (op, SImode, 0);
3780 }
3781 else
3782 {
3783 lo_half[num] = simplify_gen_subreg (SImode, op,
3784 GET_MODE (op) == VOIDmode
3785 ? DImode : GET_MODE (op), 4);
3786 hi_half[num] = simplify_gen_subreg (SImode, op,
3787 GET_MODE (op) == VOIDmode
3788 ? DImode : GET_MODE (op), 0);
3789 }
3790 }
3791 }
3792
3793 /* Split X into a base and a constant offset, storing them in *BASE
3794 and *OFFSET respectively. */
3795
3796 static void
3797 m68k_split_offset (rtx x, rtx *base, HOST_WIDE_INT *offset)
3798 {
3799 *offset = 0;
3800 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
3801 {
3802 *offset += INTVAL (XEXP (x, 1));
3803 x = XEXP (x, 0);
3804 }
3805 *base = x;
3806 }
3807
3808 /* Return true if PATTERN is a PARALLEL suitable for a movem or fmovem
3809 instruction. STORE_P says whether the move is a load or store.
3810
3811 If the instruction uses post-increment or pre-decrement addressing,
3812 AUTOMOD_BASE is the base register and AUTOMOD_OFFSET is the total
3813 adjustment. This adjustment will be made by the first element of
3814 PARALLEL, with the loads or stores starting at element 1. If the
3815 instruction does not use post-increment or pre-decrement addressing,
3816 AUTOMOD_BASE is null, AUTOMOD_OFFSET is 0, and the loads or stores
3817 start at element 0. */
3818
3819 bool
3820 m68k_movem_pattern_p (rtx pattern, rtx automod_base,
3821 HOST_WIDE_INT automod_offset, bool store_p)
3822 {
3823 rtx base, mem_base, set, mem, reg, last_reg;
3824 HOST_WIDE_INT offset, mem_offset;
3825 int i, first, len;
3826 enum reg_class rclass;
3827
3828 len = XVECLEN (pattern, 0);
3829 first = (automod_base != NULL);
3830
3831 if (automod_base)
3832 {
3833 /* Stores must be pre-decrement and loads must be post-increment. */
3834 if (store_p != (automod_offset < 0))
3835 return false;
3836
3837 /* Work out the base and offset for lowest memory location. */
3838 base = automod_base;
3839 offset = (automod_offset < 0 ? automod_offset : 0);
3840 }
3841 else
3842 {
3843 /* Allow any valid base and offset in the first access. */
3844 base = NULL;
3845 offset = 0;
3846 }
3847
3848 last_reg = NULL;
3849 rclass = NO_REGS;
3850 for (i = first; i < len; i++)
3851 {
3852 /* We need a plain SET. */
3853 set = XVECEXP (pattern, 0, i);
3854 if (GET_CODE (set) != SET)
3855 return false;
3856
3857 /* Check that we have a memory location... */
3858 mem = XEXP (set, !store_p);
3859 if (!MEM_P (mem) || !memory_operand (mem, VOIDmode))
3860 return false;
3861
3862 /* ...with the right address. */
3863 if (base == NULL)
3864 {
3865 m68k_split_offset (XEXP (mem, 0), &base, &offset);
3866 /* The ColdFire instruction only allows (An) and (d16,An) modes.
3867 There are no mode restrictions for 680x0 besides the
3868 automodification rules enforced above. */
3869 if (TARGET_COLDFIRE
3870 && !m68k_legitimate_base_reg_p (base, reload_completed))
3871 return false;
3872 }
3873 else
3874 {
3875 m68k_split_offset (XEXP (mem, 0), &mem_base, &mem_offset);
3876 if (!rtx_equal_p (base, mem_base) || offset != mem_offset)
3877 return false;
3878 }
3879
3880 /* Check that we have a register of the required mode and class. */
3881 reg = XEXP (set, store_p);
3882 if (!REG_P (reg)
3883 || !HARD_REGISTER_P (reg)
3884 || GET_MODE (reg) != reg_raw_mode[REGNO (reg)])
3885 return false;
3886
3887 if (last_reg)
3888 {
3889 /* The register must belong to RCLASS and have a higher number
3890 than the register in the previous SET. */
3891 if (!TEST_HARD_REG_BIT (reg_class_contents[rclass], REGNO (reg))
3892 || REGNO (last_reg) >= REGNO (reg))
3893 return false;
3894 }
3895 else
3896 {
3897 /* Work out which register class we need. */
3898 if (INT_REGNO_P (REGNO (reg)))
3899 rclass = GENERAL_REGS;
3900 else if (FP_REGNO_P (REGNO (reg)))
3901 rclass = FP_REGS;
3902 else
3903 return false;
3904 }
3905
3906 last_reg = reg;
3907 offset += GET_MODE_SIZE (GET_MODE (reg));
3908 }
3909
3910 /* If we have an automodification, check whether the final offset is OK. */
3911 if (automod_base && offset != (automod_offset < 0 ? 0 : automod_offset))
3912 return false;
3913
3914 /* Reject unprofitable cases. */
3915 if (len < first + (rclass == FP_REGS ? MIN_FMOVEM_REGS : MIN_MOVEM_REGS))
3916 return false;
3917
3918 return true;
3919 }
3920
3921 /* Return the assembly code template for a movem or fmovem instruction
3922 whose pattern is given by PATTERN. Store the template's operands
3923 in OPERANDS.
3924
3925 If the instruction uses post-increment or pre-decrement addressing,
3926 AUTOMOD_OFFSET is the total adjustment, otherwise it is 0. STORE_P
3927 is true if this is a store instruction. */
3928
3929 const char *
3930 m68k_output_movem (rtx *operands, rtx pattern,
3931 HOST_WIDE_INT automod_offset, bool store_p)
3932 {
3933 unsigned int mask;
3934 int i, first;
3935
3936 gcc_assert (GET_CODE (pattern) == PARALLEL);
3937 mask = 0;
3938 first = (automod_offset != 0);
3939 for (i = first; i < XVECLEN (pattern, 0); i++)
3940 {
3941 /* When using movem with pre-decrement addressing, register X + D0_REG
3942 is controlled by bit 15 - X. For all other addressing modes,
3943 register X + D0_REG is controlled by bit X. Confusingly, the
3944 register mask for fmovem is in the opposite order to that for
3945 movem. */
3946 unsigned int regno;
3947
3948 gcc_assert (MEM_P (XEXP (XVECEXP (pattern, 0, i), !store_p)));
3949 gcc_assert (REG_P (XEXP (XVECEXP (pattern, 0, i), store_p)));
3950 regno = REGNO (XEXP (XVECEXP (pattern, 0, i), store_p));
3951 if (automod_offset < 0)
3952 {
3953 if (FP_REGNO_P (regno))
3954 mask |= 1 << (regno - FP0_REG);
3955 else
3956 mask |= 1 << (15 - (regno - D0_REG));
3957 }
3958 else
3959 {
3960 if (FP_REGNO_P (regno))
3961 mask |= 1 << (7 - (regno - FP0_REG));
3962 else
3963 mask |= 1 << (regno - D0_REG);
3964 }
3965 }
3966 CC_STATUS_INIT;
3967
3968 if (automod_offset == 0)
3969 operands[0] = XEXP (XEXP (XVECEXP (pattern, 0, first), !store_p), 0);
3970 else if (automod_offset < 0)
3971 operands[0] = gen_rtx_PRE_DEC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
3972 else
3973 operands[0] = gen_rtx_POST_INC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
3974 operands[1] = GEN_INT (mask);
3975 if (FP_REGNO_P (REGNO (XEXP (XVECEXP (pattern, 0, first), store_p))))
3976 {
3977 if (store_p)
3978 return "fmovem %1,%a0";
3979 else
3980 return "fmovem %a0,%1";
3981 }
3982 else
3983 {
3984 if (store_p)
3985 return "movem%.l %1,%a0";
3986 else
3987 return "movem%.l %a0,%1";
3988 }
3989 }
3990
3991 /* Return a REG that occurs in ADDR with coefficient 1.
3992 ADDR can be effectively incremented by incrementing REG. */
3993
3994 static rtx
3995 find_addr_reg (rtx addr)
3996 {
3997 while (GET_CODE (addr) == PLUS)
3998 {
3999 if (GET_CODE (XEXP (addr, 0)) == REG)
4000 addr = XEXP (addr, 0);
4001 else if (GET_CODE (XEXP (addr, 1)) == REG)
4002 addr = XEXP (addr, 1);
4003 else if (CONSTANT_P (XEXP (addr, 0)))
4004 addr = XEXP (addr, 1);
4005 else if (CONSTANT_P (XEXP (addr, 1)))
4006 addr = XEXP (addr, 0);
4007 else
4008 gcc_unreachable ();
4009 }
4010 gcc_assert (GET_CODE (addr) == REG);
4011 return addr;
4012 }
4013
4014 /* Output assembler code to perform a 32-bit 3-operand add. */
4015
4016 const char *
4017 output_addsi3 (rtx *operands)
4018 {
4019 if (! operands_match_p (operands[0], operands[1]))
4020 {
4021 if (!ADDRESS_REG_P (operands[1]))
4022 {
4023 rtx tmp = operands[1];
4024
4025 operands[1] = operands[2];
4026 operands[2] = tmp;
4027 }
4028
4029 /* These insns can result from reloads to access
4030 stack slots over 64k from the frame pointer. */
4031 if (GET_CODE (operands[2]) == CONST_INT
4032 && (INTVAL (operands[2]) < -32768 || INTVAL (operands[2]) > 32767))
4033 return "move%.l %2,%0\n\tadd%.l %1,%0";
4034 if (GET_CODE (operands[2]) == REG)
4035 return MOTOROLA ? "lea (%1,%2.l),%0" : "lea %1@(0,%2:l),%0";
4036 return MOTOROLA ? "lea (%c2,%1),%0" : "lea %1@(%c2),%0";
4037 }
4038 if (GET_CODE (operands[2]) == CONST_INT)
4039 {
4040 if (INTVAL (operands[2]) > 0
4041 && INTVAL (operands[2]) <= 8)
4042 return "addq%.l %2,%0";
4043 if (INTVAL (operands[2]) < 0
4044 && INTVAL (operands[2]) >= -8)
4045 {
4046 operands[2] = GEN_INT (- INTVAL (operands[2]));
4047 return "subq%.l %2,%0";
4048 }
4049 /* On the CPU32 it is faster to use two addql instructions to
4050 add a small integer (8 < N <= 16) to a register.
4051 Likewise for subql. */
4052 if (TUNE_CPU32 && REG_P (operands[0]))
4053 {
4054 if (INTVAL (operands[2]) > 8
4055 && INTVAL (operands[2]) <= 16)
4056 {
4057 operands[2] = GEN_INT (INTVAL (operands[2]) - 8);
4058 return "addq%.l #8,%0\n\taddq%.l %2,%0";
4059 }
4060 if (INTVAL (operands[2]) < -8
4061 && INTVAL (operands[2]) >= -16)
4062 {
4063 operands[2] = GEN_INT (- INTVAL (operands[2]) - 8);
4064 return "subq%.l #8,%0\n\tsubq%.l %2,%0";
4065 }
4066 }
4067 if (ADDRESS_REG_P (operands[0])
4068 && INTVAL (operands[2]) >= -0x8000
4069 && INTVAL (operands[2]) < 0x8000)
4070 {
4071 if (TUNE_68040)
4072 return "add%.w %2,%0";
4073 else
4074 return MOTOROLA ? "lea (%c2,%0),%0" : "lea %0@(%c2),%0";
4075 }
4076 }
4077 return "add%.l %2,%0";
4078 }
4079 \f
4080 /* Store in cc_status the expressions that the condition codes will
4081 describe after execution of an instruction whose pattern is EXP.
4082 Do not alter them if the instruction would not alter the cc's. */
4083
4084 /* On the 68000, all the insns to store in an address register fail to
4085 set the cc's. However, in some cases these instructions can make it
4086 possibly invalid to use the saved cc's. In those cases we clear out
4087 some or all of the saved cc's so they won't be used. */
4088
4089 void
4090 notice_update_cc (rtx exp, rtx insn)
4091 {
4092 if (GET_CODE (exp) == SET)
4093 {
4094 if (GET_CODE (SET_SRC (exp)) == CALL)
4095 CC_STATUS_INIT;
4096 else if (ADDRESS_REG_P (SET_DEST (exp)))
4097 {
4098 if (cc_status.value1 && modified_in_p (cc_status.value1, insn))
4099 cc_status.value1 = 0;
4100 if (cc_status.value2 && modified_in_p (cc_status.value2, insn))
4101 cc_status.value2 = 0;
4102 }
4103 /* fmoves to memory or data registers do not set the condition
4104 codes. Normal moves _do_ set the condition codes, but not in
4105 a way that is appropriate for comparison with 0, because -0.0
4106 would be treated as a negative nonzero number. Note that it
4107 isn't appropriate to conditionalize this restriction on
4108 HONOR_SIGNED_ZEROS because that macro merely indicates whether
4109 we care about the difference between -0.0 and +0.0. */
4110 else if (!FP_REG_P (SET_DEST (exp))
4111 && SET_DEST (exp) != cc0_rtx
4112 && (FP_REG_P (SET_SRC (exp))
4113 || GET_CODE (SET_SRC (exp)) == FIX
4114 || FLOAT_MODE_P (GET_MODE (SET_DEST (exp)))))
4115 CC_STATUS_INIT;
4116 /* A pair of move insns doesn't produce a useful overall cc. */
4117 else if (!FP_REG_P (SET_DEST (exp))
4118 && !FP_REG_P (SET_SRC (exp))
4119 && GET_MODE_SIZE (GET_MODE (SET_SRC (exp))) > 4
4120 && (GET_CODE (SET_SRC (exp)) == REG
4121 || GET_CODE (SET_SRC (exp)) == MEM
4122 || GET_CODE (SET_SRC (exp)) == CONST_DOUBLE))
4123 CC_STATUS_INIT;
4124 else if (SET_DEST (exp) != pc_rtx)
4125 {
4126 cc_status.flags = 0;
4127 cc_status.value1 = SET_DEST (exp);
4128 cc_status.value2 = SET_SRC (exp);
4129 }
4130 }
4131 else if (GET_CODE (exp) == PARALLEL
4132 && GET_CODE (XVECEXP (exp, 0, 0)) == SET)
4133 {
4134 rtx dest = SET_DEST (XVECEXP (exp, 0, 0));
4135 rtx src = SET_SRC (XVECEXP (exp, 0, 0));
4136
4137 if (ADDRESS_REG_P (dest))
4138 CC_STATUS_INIT;
4139 else if (dest != pc_rtx)
4140 {
4141 cc_status.flags = 0;
4142 cc_status.value1 = dest;
4143 cc_status.value2 = src;
4144 }
4145 }
4146 else
4147 CC_STATUS_INIT;
4148 if (cc_status.value2 != 0
4149 && ADDRESS_REG_P (cc_status.value2)
4150 && GET_MODE (cc_status.value2) == QImode)
4151 CC_STATUS_INIT;
4152 if (cc_status.value2 != 0)
4153 switch (GET_CODE (cc_status.value2))
4154 {
4155 case ASHIFT: case ASHIFTRT: case LSHIFTRT:
4156 case ROTATE: case ROTATERT:
4157 /* These instructions always clear the overflow bit, and set
4158 the carry to the bit shifted out. */
4159 cc_status.flags |= CC_OVERFLOW_UNUSABLE | CC_NO_CARRY;
4160 break;
4161
4162 case PLUS: case MINUS: case MULT:
4163 case DIV: case UDIV: case MOD: case UMOD: case NEG:
4164 if (GET_MODE (cc_status.value2) != VOIDmode)
4165 cc_status.flags |= CC_NO_OVERFLOW;
4166 break;
4167 case ZERO_EXTEND:
4168 /* (SET r1 (ZERO_EXTEND r2)) on this machine
4169 ends with a move insn moving r2 in r2's mode.
4170 Thus, the cc's are set for r2.
4171 This can set N bit spuriously. */
4172 cc_status.flags |= CC_NOT_NEGATIVE;
4173
4174 default:
4175 break;
4176 }
4177 if (cc_status.value1 && GET_CODE (cc_status.value1) == REG
4178 && cc_status.value2
4179 && reg_overlap_mentioned_p (cc_status.value1, cc_status.value2))
4180 cc_status.value2 = 0;
4181 if (((cc_status.value1 && FP_REG_P (cc_status.value1))
4182 || (cc_status.value2 && FP_REG_P (cc_status.value2))))
4183 cc_status.flags = CC_IN_68881;
4184 if (cc_status.value2 && GET_CODE (cc_status.value2) == COMPARE
4185 && GET_MODE_CLASS (GET_MODE (XEXP (cc_status.value2, 0))) == MODE_FLOAT)
4186 {
4187 cc_status.flags = CC_IN_68881;
4188 if (!FP_REG_P (XEXP (cc_status.value2, 0)))
4189 cc_status.flags |= CC_REVERSED;
4190 }
4191 }
4192 \f
4193 const char *
4194 output_move_const_double (rtx *operands)
4195 {
4196 int code = standard_68881_constant_p (operands[1]);
4197
4198 if (code != 0)
4199 {
4200 static char buf[40];
4201
4202 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
4203 return buf;
4204 }
4205 return "fmove%.d %1,%0";
4206 }
4207
4208 const char *
4209 output_move_const_single (rtx *operands)
4210 {
4211 int code = standard_68881_constant_p (operands[1]);
4212
4213 if (code != 0)
4214 {
4215 static char buf[40];
4216
4217 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
4218 return buf;
4219 }
4220 return "fmove%.s %f1,%0";
4221 }
4222
4223 /* Return nonzero if X, a CONST_DOUBLE, has a value that we can get
4224 from the "fmovecr" instruction.
4225 The value, anded with 0xff, gives the code to use in fmovecr
4226 to get the desired constant. */
4227
4228 /* This code has been fixed for cross-compilation. */
4229
4230 static int inited_68881_table = 0;
4231
4232 static const char *const strings_68881[7] = {
4233 "0.0",
4234 "1.0",
4235 "10.0",
4236 "100.0",
4237 "10000.0",
4238 "1e8",
4239 "1e16"
4240 };
4241
4242 static const int codes_68881[7] = {
4243 0x0f,
4244 0x32,
4245 0x33,
4246 0x34,
4247 0x35,
4248 0x36,
4249 0x37
4250 };
4251
4252 REAL_VALUE_TYPE values_68881[7];
4253
4254 /* Set up values_68881 array by converting the decimal values
4255 strings_68881 to binary. */
4256
4257 void
4258 init_68881_table (void)
4259 {
4260 int i;
4261 REAL_VALUE_TYPE r;
4262 enum machine_mode mode;
4263
4264 mode = SFmode;
4265 for (i = 0; i < 7; i++)
4266 {
4267 if (i == 6)
4268 mode = DFmode;
4269 r = REAL_VALUE_ATOF (strings_68881[i], mode);
4270 values_68881[i] = r;
4271 }
4272 inited_68881_table = 1;
4273 }
4274
4275 int
4276 standard_68881_constant_p (rtx x)
4277 {
4278 REAL_VALUE_TYPE r;
4279 int i;
4280
4281 /* fmovecr must be emulated on the 68040 and 68060, so it shouldn't be
4282 used at all on those chips. */
4283 if (TUNE_68040_60)
4284 return 0;
4285
4286 if (! inited_68881_table)
4287 init_68881_table ();
4288
4289 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4290
4291 /* Use REAL_VALUES_IDENTICAL instead of REAL_VALUES_EQUAL so that -0.0
4292 is rejected. */
4293 for (i = 0; i < 6; i++)
4294 {
4295 if (REAL_VALUES_IDENTICAL (r, values_68881[i]))
4296 return (codes_68881[i]);
4297 }
4298
4299 if (GET_MODE (x) == SFmode)
4300 return 0;
4301
4302 if (REAL_VALUES_EQUAL (r, values_68881[6]))
4303 return (codes_68881[6]);
4304
4305 /* larger powers of ten in the constants ram are not used
4306 because they are not equal to a `double' C constant. */
4307 return 0;
4308 }
4309
4310 /* If X is a floating-point constant, return the logarithm of X base 2,
4311 or 0 if X is not a power of 2. */
4312
4313 int
4314 floating_exact_log2 (rtx x)
4315 {
4316 REAL_VALUE_TYPE r, r1;
4317 int exp;
4318
4319 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4320
4321 if (REAL_VALUES_LESS (r, dconst1))
4322 return 0;
4323
4324 exp = real_exponent (&r);
4325 real_2expN (&r1, exp, DFmode);
4326 if (REAL_VALUES_EQUAL (r1, r))
4327 return exp;
4328
4329 return 0;
4330 }
4331 \f
4332 /* A C compound statement to output to stdio stream STREAM the
4333 assembler syntax for an instruction operand X. X is an RTL
4334 expression.
4335
4336 CODE is a value that can be used to specify one of several ways
4337 of printing the operand. It is used when identical operands
4338 must be printed differently depending on the context. CODE
4339 comes from the `%' specification that was used to request
4340 printing of the operand. If the specification was just `%DIGIT'
4341 then CODE is 0; if the specification was `%LTR DIGIT' then CODE
4342 is the ASCII code for LTR.
4343
4344 If X is a register, this macro should print the register's name.
4345 The names can be found in an array `reg_names' whose type is
4346 `char *[]'. `reg_names' is initialized from `REGISTER_NAMES'.
4347
4348 When the machine description has a specification `%PUNCT' (a `%'
4349 followed by a punctuation character), this macro is called with
4350 a null pointer for X and the punctuation character for CODE.
4351
4352 The m68k specific codes are:
4353
4354 '.' for dot needed in Motorola-style opcode names.
4355 '-' for an operand pushing on the stack:
4356 sp@-, -(sp) or -(%sp) depending on the style of syntax.
4357 '+' for an operand pushing on the stack:
4358 sp@+, (sp)+ or (%sp)+ depending on the style of syntax.
4359 '@' for a reference to the top word on the stack:
4360 sp@, (sp) or (%sp) depending on the style of syntax.
4361 '#' for an immediate operand prefix (# in MIT and Motorola syntax
4362 but & in SGS syntax).
4363 '!' for the cc register (used in an `and to cc' insn).
4364 '$' for the letter `s' in an op code, but only on the 68040.
4365 '&' for the letter `d' in an op code, but only on the 68040.
4366 '/' for register prefix needed by longlong.h.
4367 '?' for m68k_library_id_string
4368
4369 'b' for byte insn (no effect, on the Sun; this is for the ISI).
4370 'd' to force memory addressing to be absolute, not relative.
4371 'f' for float insn (print a CONST_DOUBLE as a float rather than in hex)
4372 'x' for float insn (print a CONST_DOUBLE as a float rather than in hex),
4373 or print pair of registers as rx:ry.
4374 'p' print an address with @PLTPC attached, but only if the operand
4375 is not locally-bound. */
4376
4377 void
4378 print_operand (FILE *file, rtx op, int letter)
4379 {
4380 if (letter == '.')
4381 {
4382 if (MOTOROLA)
4383 fprintf (file, ".");
4384 }
4385 else if (letter == '#')
4386 asm_fprintf (file, "%I");
4387 else if (letter == '-')
4388 asm_fprintf (file, MOTOROLA ? "-(%Rsp)" : "%Rsp@-");
4389 else if (letter == '+')
4390 asm_fprintf (file, MOTOROLA ? "(%Rsp)+" : "%Rsp@+");
4391 else if (letter == '@')
4392 asm_fprintf (file, MOTOROLA ? "(%Rsp)" : "%Rsp@");
4393 else if (letter == '!')
4394 asm_fprintf (file, "%Rfpcr");
4395 else if (letter == '$')
4396 {
4397 if (TARGET_68040)
4398 fprintf (file, "s");
4399 }
4400 else if (letter == '&')
4401 {
4402 if (TARGET_68040)
4403 fprintf (file, "d");
4404 }
4405 else if (letter == '/')
4406 asm_fprintf (file, "%R");
4407 else if (letter == '?')
4408 asm_fprintf (file, m68k_library_id_string);
4409 else if (letter == 'p')
4410 {
4411 output_addr_const (file, op);
4412 if (!(GET_CODE (op) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op)))
4413 fprintf (file, "@PLTPC");
4414 }
4415 else if (GET_CODE (op) == REG)
4416 {
4417 if (letter == 'R')
4418 /* Print out the second register name of a register pair.
4419 I.e., R (6) => 7. */
4420 fputs (M68K_REGNAME(REGNO (op) + 1), file);
4421 else
4422 fputs (M68K_REGNAME(REGNO (op)), file);
4423 }
4424 else if (GET_CODE (op) == MEM)
4425 {
4426 output_address (XEXP (op, 0));
4427 if (letter == 'd' && ! TARGET_68020
4428 && CONSTANT_ADDRESS_P (XEXP (op, 0))
4429 && !(GET_CODE (XEXP (op, 0)) == CONST_INT
4430 && INTVAL (XEXP (op, 0)) < 0x8000
4431 && INTVAL (XEXP (op, 0)) >= -0x8000))
4432 fprintf (file, MOTOROLA ? ".l" : ":l");
4433 }
4434 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == SFmode)
4435 {
4436 REAL_VALUE_TYPE r;
4437 long l;
4438 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
4439 REAL_VALUE_TO_TARGET_SINGLE (r, l);
4440 asm_fprintf (file, "%I0x%lx", l & 0xFFFFFFFF);
4441 }
4442 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == XFmode)
4443 {
4444 REAL_VALUE_TYPE r;
4445 long l[3];
4446 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
4447 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
4448 asm_fprintf (file, "%I0x%lx%08lx%08lx", l[0] & 0xFFFFFFFF,
4449 l[1] & 0xFFFFFFFF, l[2] & 0xFFFFFFFF);
4450 }
4451 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == DFmode)
4452 {
4453 REAL_VALUE_TYPE r;
4454 long l[2];
4455 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
4456 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
4457 asm_fprintf (file, "%I0x%lx%08lx", l[0] & 0xFFFFFFFF, l[1] & 0xFFFFFFFF);
4458 }
4459 else
4460 {
4461 /* Use `print_operand_address' instead of `output_addr_const'
4462 to ensure that we print relevant PIC stuff. */
4463 asm_fprintf (file, "%I");
4464 if (TARGET_PCREL
4465 && (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST))
4466 print_operand_address (file, op);
4467 else
4468 output_addr_const (file, op);
4469 }
4470 }
4471
4472 /* Return string for TLS relocation RELOC. */
4473
4474 static const char *
4475 m68k_get_reloc_decoration (enum m68k_reloc reloc)
4476 {
4477 /* To my knowledge, !MOTOROLA assemblers don't support TLS. */
4478 gcc_assert (MOTOROLA || reloc == RELOC_GOT);
4479
4480 switch (reloc)
4481 {
4482 case RELOC_GOT:
4483 if (MOTOROLA)
4484 {
4485 if (flag_pic == 1 && TARGET_68020)
4486 return "@GOT.w";
4487 else
4488 return "@GOT";
4489 }
4490 else
4491 {
4492 if (TARGET_68020)
4493 {
4494 switch (flag_pic)
4495 {
4496 case 1:
4497 return ":w";
4498 case 2:
4499 return ":l";
4500 default:
4501 return "";
4502 }
4503 }
4504 }
4505
4506 case RELOC_TLSGD:
4507 return "@TLSGD";
4508
4509 case RELOC_TLSLDM:
4510 return "@TLSLDM";
4511
4512 case RELOC_TLSLDO:
4513 return "@TLSLDO";
4514
4515 case RELOC_TLSIE:
4516 return "@TLSIE";
4517
4518 case RELOC_TLSLE:
4519 return "@TLSLE";
4520
4521 default:
4522 gcc_unreachable ();
4523 }
4524 }
4525
4526 /* m68k implementation of OUTPUT_ADDR_CONST_EXTRA. */
4527
4528 bool
4529 m68k_output_addr_const_extra (FILE *file, rtx x)
4530 {
4531 if (GET_CODE (x) == UNSPEC)
4532 {
4533 switch (XINT (x, 1))
4534 {
4535 case UNSPEC_RELOC16:
4536 case UNSPEC_RELOC32:
4537 output_addr_const (file, XVECEXP (x, 0, 0));
4538 fputs (m68k_get_reloc_decoration
4539 ((enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1))), file);
4540 return true;
4541
4542 default:
4543 break;
4544 }
4545 }
4546
4547 return false;
4548 }
4549
4550 /* M68K implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
4551
4552 static void
4553 m68k_output_dwarf_dtprel (FILE *file, int size, rtx x)
4554 {
4555 gcc_assert (size == 4);
4556 fputs ("\t.long\t", file);
4557 output_addr_const (file, x);
4558 fputs ("@TLSLDO+0x8000", file);
4559 }
4560
4561 /* In the name of slightly smaller debug output, and to cater to
4562 general assembler lossage, recognize various UNSPEC sequences
4563 and turn them back into a direct symbol reference. */
4564
4565 static rtx
4566 m68k_delegitimize_address (rtx orig_x)
4567 {
4568 rtx x;
4569 struct m68k_address addr;
4570 rtx unspec;
4571
4572 orig_x = delegitimize_mem_from_attrs (orig_x);
4573 x = orig_x;
4574 if (MEM_P (x))
4575 x = XEXP (x, 0);
4576
4577 if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
4578 return orig_x;
4579
4580 if (!m68k_decompose_address (GET_MODE (x), x, false, &addr)
4581 || addr.offset == NULL_RTX
4582 || GET_CODE (addr.offset) != CONST)
4583 return orig_x;
4584
4585 unspec = XEXP (addr.offset, 0);
4586 if (GET_CODE (unspec) == PLUS && CONST_INT_P (XEXP (unspec, 1)))
4587 unspec = XEXP (unspec, 0);
4588 if (GET_CODE (unspec) != UNSPEC
4589 || (XINT (unspec, 1) != UNSPEC_RELOC16
4590 && XINT (unspec, 1) != UNSPEC_RELOC32))
4591 return orig_x;
4592 x = XVECEXP (unspec, 0, 0);
4593 gcc_assert (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF);
4594 if (unspec != XEXP (addr.offset, 0))
4595 x = gen_rtx_PLUS (Pmode, x, XEXP (XEXP (addr.offset, 0), 1));
4596 if (addr.index)
4597 {
4598 rtx idx = addr.index;
4599 if (addr.scale != 1)
4600 idx = gen_rtx_MULT (Pmode, idx, GEN_INT (addr.scale));
4601 x = gen_rtx_PLUS (Pmode, idx, x);
4602 }
4603 if (addr.base)
4604 x = gen_rtx_PLUS (Pmode, addr.base, x);
4605 if (MEM_P (orig_x))
4606 x = replace_equiv_address_nv (orig_x, x);
4607 return x;
4608 }
4609
4610 \f
4611 /* A C compound statement to output to stdio stream STREAM the
4612 assembler syntax for an instruction operand that is a memory
4613 reference whose address is ADDR. ADDR is an RTL expression.
4614
4615 Note that this contains a kludge that knows that the only reason
4616 we have an address (plus (label_ref...) (reg...)) when not generating
4617 PIC code is in the insn before a tablejump, and we know that m68k.md
4618 generates a label LInnn: on such an insn.
4619
4620 It is possible for PIC to generate a (plus (label_ref...) (reg...))
4621 and we handle that just like we would a (plus (symbol_ref...) (reg...)).
4622
4623 This routine is responsible for distinguishing between -fpic and -fPIC
4624 style relocations in an address. When generating -fpic code the
4625 offset is output in word mode (e.g. movel a5@(_foo:w), a0). When generating
4626 -fPIC code the offset is output in long mode (e.g. movel a5@(_foo:l), a0) */
4627
4628 void
4629 print_operand_address (FILE *file, rtx addr)
4630 {
4631 struct m68k_address address;
4632
4633 if (!m68k_decompose_address (QImode, addr, true, &address))
4634 gcc_unreachable ();
4635
4636 if (address.code == PRE_DEC)
4637 fprintf (file, MOTOROLA ? "-(%s)" : "%s@-",
4638 M68K_REGNAME (REGNO (address.base)));
4639 else if (address.code == POST_INC)
4640 fprintf (file, MOTOROLA ? "(%s)+" : "%s@+",
4641 M68K_REGNAME (REGNO (address.base)));
4642 else if (!address.base && !address.index)
4643 {
4644 /* A constant address. */
4645 gcc_assert (address.offset == addr);
4646 if (GET_CODE (addr) == CONST_INT)
4647 {
4648 /* (xxx).w or (xxx).l. */
4649 if (IN_RANGE (INTVAL (addr), -0x8000, 0x7fff))
4650 fprintf (file, MOTOROLA ? "%d.w" : "%d:w", (int) INTVAL (addr));
4651 else
4652 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (addr));
4653 }
4654 else if (TARGET_PCREL)
4655 {
4656 /* (d16,PC) or (bd,PC,Xn) (with suppressed index register). */
4657 fputc ('(', file);
4658 output_addr_const (file, addr);
4659 asm_fprintf (file, flag_pic == 1 ? ":w,%Rpc)" : ":l,%Rpc)");
4660 }
4661 else
4662 {
4663 /* (xxx).l. We need a special case for SYMBOL_REF if the symbol
4664 name ends in `.<letter>', as the last 2 characters can be
4665 mistaken as a size suffix. Put the name in parentheses. */
4666 if (GET_CODE (addr) == SYMBOL_REF
4667 && strlen (XSTR (addr, 0)) > 2
4668 && XSTR (addr, 0)[strlen (XSTR (addr, 0)) - 2] == '.')
4669 {
4670 putc ('(', file);
4671 output_addr_const (file, addr);
4672 putc (')', file);
4673 }
4674 else
4675 output_addr_const (file, addr);
4676 }
4677 }
4678 else
4679 {
4680 int labelno;
4681
4682 /* If ADDR is a (d8,pc,Xn) address, this is the number of the
4683 label being accessed, otherwise it is -1. */
4684 labelno = (address.offset
4685 && !address.base
4686 && GET_CODE (address.offset) == LABEL_REF
4687 ? CODE_LABEL_NUMBER (XEXP (address.offset, 0))
4688 : -1);
4689 if (MOTOROLA)
4690 {
4691 /* Print the "offset(base" component. */
4692 if (labelno >= 0)
4693 asm_fprintf (file, "%LL%d(%Rpc,", labelno);
4694 else
4695 {
4696 if (address.offset)
4697 output_addr_const (file, address.offset);
4698
4699 putc ('(', file);
4700 if (address.base)
4701 fputs (M68K_REGNAME (REGNO (address.base)), file);
4702 }
4703 /* Print the ",index" component, if any. */
4704 if (address.index)
4705 {
4706 if (address.base)
4707 putc (',', file);
4708 fprintf (file, "%s.%c",
4709 M68K_REGNAME (REGNO (address.index)),
4710 GET_MODE (address.index) == HImode ? 'w' : 'l');
4711 if (address.scale != 1)
4712 fprintf (file, "*%d", address.scale);
4713 }
4714 putc (')', file);
4715 }
4716 else /* !MOTOROLA */
4717 {
4718 if (!address.offset && !address.index)
4719 fprintf (file, "%s@", M68K_REGNAME (REGNO (address.base)));
4720 else
4721 {
4722 /* Print the "base@(offset" component. */
4723 if (labelno >= 0)
4724 asm_fprintf (file, "%Rpc@(%LL%d", labelno);
4725 else
4726 {
4727 if (address.base)
4728 fputs (M68K_REGNAME (REGNO (address.base)), file);
4729 fprintf (file, "@(");
4730 if (address.offset)
4731 output_addr_const (file, address.offset);
4732 }
4733 /* Print the ",index" component, if any. */
4734 if (address.index)
4735 {
4736 fprintf (file, ",%s:%c",
4737 M68K_REGNAME (REGNO (address.index)),
4738 GET_MODE (address.index) == HImode ? 'w' : 'l');
4739 if (address.scale != 1)
4740 fprintf (file, ":%d", address.scale);
4741 }
4742 putc (')', file);
4743 }
4744 }
4745 }
4746 }
4747 \f
4748 /* Check for cases where a clr insns can be omitted from code using
4749 strict_low_part sets. For example, the second clrl here is not needed:
4750 clrl d0; movw a0@+,d0; use d0; clrl d0; movw a0@+; use d0; ...
4751
4752 MODE is the mode of this STRICT_LOW_PART set. FIRST_INSN is the clear
4753 insn we are checking for redundancy. TARGET is the register set by the
4754 clear insn. */
4755
4756 bool
4757 strict_low_part_peephole_ok (enum machine_mode mode, rtx first_insn,
4758 rtx target)
4759 {
4760 rtx p = first_insn;
4761
4762 while ((p = PREV_INSN (p)))
4763 {
4764 if (NOTE_INSN_BASIC_BLOCK_P (p))
4765 return false;
4766
4767 if (NOTE_P (p))
4768 continue;
4769
4770 /* If it isn't an insn, then give up. */
4771 if (!INSN_P (p))
4772 return false;
4773
4774 if (reg_set_p (target, p))
4775 {
4776 rtx set = single_set (p);
4777 rtx dest;
4778
4779 /* If it isn't an easy to recognize insn, then give up. */
4780 if (! set)
4781 return false;
4782
4783 dest = SET_DEST (set);
4784
4785 /* If this sets the entire target register to zero, then our
4786 first_insn is redundant. */
4787 if (rtx_equal_p (dest, target)
4788 && SET_SRC (set) == const0_rtx)
4789 return true;
4790 else if (GET_CODE (dest) == STRICT_LOW_PART
4791 && GET_CODE (XEXP (dest, 0)) == REG
4792 && REGNO (XEXP (dest, 0)) == REGNO (target)
4793 && (GET_MODE_SIZE (GET_MODE (XEXP (dest, 0)))
4794 <= GET_MODE_SIZE (mode)))
4795 /* This is a strict low part set which modifies less than
4796 we are using, so it is safe. */
4797 ;
4798 else
4799 return false;
4800 }
4801 }
4802
4803 return false;
4804 }
4805
4806 /* Operand predicates for implementing asymmetric pc-relative addressing
4807 on m68k. The m68k supports pc-relative addressing (mode 7, register 2)
4808 when used as a source operand, but not as a destination operand.
4809
4810 We model this by restricting the meaning of the basic predicates
4811 (general_operand, memory_operand, etc) to forbid the use of this
4812 addressing mode, and then define the following predicates that permit
4813 this addressing mode. These predicates can then be used for the
4814 source operands of the appropriate instructions.
4815
4816 n.b. While it is theoretically possible to change all machine patterns
4817 to use this addressing more where permitted by the architecture,
4818 it has only been implemented for "common" cases: SImode, HImode, and
4819 QImode operands, and only for the principle operations that would
4820 require this addressing mode: data movement and simple integer operations.
4821
4822 In parallel with these new predicates, two new constraint letters
4823 were defined: 'S' and 'T'. 'S' is the -mpcrel analog of 'm'.
4824 'T' replaces 's' in the non-pcrel case. It is a no-op in the pcrel case.
4825 In the pcrel case 's' is only valid in combination with 'a' registers.
4826 See addsi3, subsi3, cmpsi, and movsi patterns for a better understanding
4827 of how these constraints are used.
4828
4829 The use of these predicates is strictly optional, though patterns that
4830 don't will cause an extra reload register to be allocated where one
4831 was not necessary:
4832
4833 lea (abc:w,%pc),%a0 ; need to reload address
4834 moveq &1,%d1 ; since write to pc-relative space
4835 movel %d1,%a0@ ; is not allowed
4836 ...
4837 lea (abc:w,%pc),%a1 ; no need to reload address here
4838 movel %a1@,%d0 ; since "movel (abc:w,%pc),%d0" is ok
4839
4840 For more info, consult tiemann@cygnus.com.
4841
4842
4843 All of the ugliness with predicates and constraints is due to the
4844 simple fact that the m68k does not allow a pc-relative addressing
4845 mode as a destination. gcc does not distinguish between source and
4846 destination addresses. Hence, if we claim that pc-relative address
4847 modes are valid, e.g. TARGET_LEGITIMATE_ADDRESS_P accepts them, then we
4848 end up with invalid code. To get around this problem, we left
4849 pc-relative modes as invalid addresses, and then added special
4850 predicates and constraints to accept them.
4851
4852 A cleaner way to handle this is to modify gcc to distinguish
4853 between source and destination addresses. We can then say that
4854 pc-relative is a valid source address but not a valid destination
4855 address, and hopefully avoid a lot of the predicate and constraint
4856 hackery. Unfortunately, this would be a pretty big change. It would
4857 be a useful change for a number of ports, but there aren't any current
4858 plans to undertake this.
4859
4860 ***************************************************************************/
4861
4862
4863 const char *
4864 output_andsi3 (rtx *operands)
4865 {
4866 int logval;
4867 if (GET_CODE (operands[2]) == CONST_INT
4868 && (INTVAL (operands[2]) | 0xffff) == -1
4869 && (DATA_REG_P (operands[0])
4870 || offsettable_memref_p (operands[0]))
4871 && !TARGET_COLDFIRE)
4872 {
4873 if (GET_CODE (operands[0]) != REG)
4874 operands[0] = adjust_address (operands[0], HImode, 2);
4875 operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffff);
4876 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4877 CC_STATUS_INIT;
4878 if (operands[2] == const0_rtx)
4879 return "clr%.w %0";
4880 return "and%.w %2,%0";
4881 }
4882 if (GET_CODE (operands[2]) == CONST_INT
4883 && (logval = exact_log2 (~ INTVAL (operands[2]) & 0xffffffff)) >= 0
4884 && (DATA_REG_P (operands[0])
4885 || offsettable_memref_p (operands[0])))
4886 {
4887 if (DATA_REG_P (operands[0]))
4888 operands[1] = GEN_INT (logval);
4889 else
4890 {
4891 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
4892 operands[1] = GEN_INT (logval % 8);
4893 }
4894 /* This does not set condition codes in a standard way. */
4895 CC_STATUS_INIT;
4896 return "bclr %1,%0";
4897 }
4898 return "and%.l %2,%0";
4899 }
4900
4901 const char *
4902 output_iorsi3 (rtx *operands)
4903 {
4904 register int logval;
4905 if (GET_CODE (operands[2]) == CONST_INT
4906 && INTVAL (operands[2]) >> 16 == 0
4907 && (DATA_REG_P (operands[0])
4908 || offsettable_memref_p (operands[0]))
4909 && !TARGET_COLDFIRE)
4910 {
4911 if (GET_CODE (operands[0]) != REG)
4912 operands[0] = adjust_address (operands[0], HImode, 2);
4913 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4914 CC_STATUS_INIT;
4915 if (INTVAL (operands[2]) == 0xffff)
4916 return "mov%.w %2,%0";
4917 return "or%.w %2,%0";
4918 }
4919 if (GET_CODE (operands[2]) == CONST_INT
4920 && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
4921 && (DATA_REG_P (operands[0])
4922 || offsettable_memref_p (operands[0])))
4923 {
4924 if (DATA_REG_P (operands[0]))
4925 operands[1] = GEN_INT (logval);
4926 else
4927 {
4928 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
4929 operands[1] = GEN_INT (logval % 8);
4930 }
4931 CC_STATUS_INIT;
4932 return "bset %1,%0";
4933 }
4934 return "or%.l %2,%0";
4935 }
4936
4937 const char *
4938 output_xorsi3 (rtx *operands)
4939 {
4940 register int logval;
4941 if (GET_CODE (operands[2]) == CONST_INT
4942 && INTVAL (operands[2]) >> 16 == 0
4943 && (offsettable_memref_p (operands[0]) || DATA_REG_P (operands[0]))
4944 && !TARGET_COLDFIRE)
4945 {
4946 if (! DATA_REG_P (operands[0]))
4947 operands[0] = adjust_address (operands[0], HImode, 2);
4948 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4949 CC_STATUS_INIT;
4950 if (INTVAL (operands[2]) == 0xffff)
4951 return "not%.w %0";
4952 return "eor%.w %2,%0";
4953 }
4954 if (GET_CODE (operands[2]) == CONST_INT
4955 && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
4956 && (DATA_REG_P (operands[0])
4957 || offsettable_memref_p (operands[0])))
4958 {
4959 if (DATA_REG_P (operands[0]))
4960 operands[1] = GEN_INT (logval);
4961 else
4962 {
4963 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
4964 operands[1] = GEN_INT (logval % 8);
4965 }
4966 CC_STATUS_INIT;
4967 return "bchg %1,%0";
4968 }
4969 return "eor%.l %2,%0";
4970 }
4971
4972 /* Return the instruction that should be used for a call to address X,
4973 which is known to be in operand 0. */
4974
4975 const char *
4976 output_call (rtx x)
4977 {
4978 if (symbolic_operand (x, VOIDmode))
4979 return m68k_symbolic_call;
4980 else
4981 return "jsr %a0";
4982 }
4983
4984 /* Likewise sibling calls. */
4985
4986 const char *
4987 output_sibcall (rtx x)
4988 {
4989 if (symbolic_operand (x, VOIDmode))
4990 return m68k_symbolic_jump;
4991 else
4992 return "jmp %a0";
4993 }
4994
4995 static void
4996 m68k_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
4997 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
4998 tree function)
4999 {
5000 rtx this_slot, offset, addr, mem, insn, tmp;
5001
5002 /* Avoid clobbering the struct value reg by using the
5003 static chain reg as a temporary. */
5004 tmp = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
5005
5006 /* Pretend to be a post-reload pass while generating rtl. */
5007 reload_completed = 1;
5008
5009 /* The "this" pointer is stored at 4(%sp). */
5010 this_slot = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx, 4));
5011
5012 /* Add DELTA to THIS. */
5013 if (delta != 0)
5014 {
5015 /* Make the offset a legitimate operand for memory addition. */
5016 offset = GEN_INT (delta);
5017 if ((delta < -8 || delta > 8)
5018 && (TARGET_COLDFIRE || USE_MOVQ (delta)))
5019 {
5020 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), offset);
5021 offset = gen_rtx_REG (Pmode, D0_REG);
5022 }
5023 emit_insn (gen_add3_insn (copy_rtx (this_slot),
5024 copy_rtx (this_slot), offset));
5025 }
5026
5027 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
5028 if (vcall_offset != 0)
5029 {
5030 /* Set the static chain register to *THIS. */
5031 emit_move_insn (tmp, this_slot);
5032 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
5033
5034 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
5035 addr = plus_constant (tmp, vcall_offset);
5036 if (!m68k_legitimate_address_p (Pmode, addr, true))
5037 {
5038 emit_insn (gen_rtx_SET (VOIDmode, tmp, addr));
5039 addr = tmp;
5040 }
5041
5042 /* Load the offset into %d0 and add it to THIS. */
5043 emit_move_insn (gen_rtx_REG (Pmode, D0_REG),
5044 gen_rtx_MEM (Pmode, addr));
5045 emit_insn (gen_add3_insn (copy_rtx (this_slot),
5046 copy_rtx (this_slot),
5047 gen_rtx_REG (Pmode, D0_REG)));
5048 }
5049
5050 /* Jump to the target function. Use a sibcall if direct jumps are
5051 allowed, otherwise load the address into a register first. */
5052 mem = DECL_RTL (function);
5053 if (!sibcall_operand (XEXP (mem, 0), VOIDmode))
5054 {
5055 gcc_assert (flag_pic);
5056
5057 if (!TARGET_SEP_DATA)
5058 {
5059 /* Use the static chain register as a temporary (call-clobbered)
5060 GOT pointer for this function. We can use the static chain
5061 register because it isn't live on entry to the thunk. */
5062 SET_REGNO (pic_offset_table_rtx, STATIC_CHAIN_REGNUM);
5063 emit_insn (gen_load_got (pic_offset_table_rtx));
5064 }
5065 legitimize_pic_address (XEXP (mem, 0), Pmode, tmp);
5066 mem = replace_equiv_address (mem, tmp);
5067 }
5068 insn = emit_call_insn (gen_sibcall (mem, const0_rtx));
5069 SIBLING_CALL_P (insn) = 1;
5070
5071 /* Run just enough of rest_of_compilation. */
5072 insn = get_insns ();
5073 split_all_insns_noflow ();
5074 final_start_function (insn, file, 1);
5075 final (insn, file, 1);
5076 final_end_function ();
5077
5078 /* Clean up the vars set above. */
5079 reload_completed = 0;
5080
5081 /* Restore the original PIC register. */
5082 if (flag_pic)
5083 SET_REGNO (pic_offset_table_rtx, PIC_REG);
5084 }
5085
5086 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
5087
5088 static rtx
5089 m68k_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
5090 int incoming ATTRIBUTE_UNUSED)
5091 {
5092 return gen_rtx_REG (Pmode, M68K_STRUCT_VALUE_REGNUM);
5093 }
5094
5095 /* Return nonzero if register old_reg can be renamed to register new_reg. */
5096 int
5097 m68k_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
5098 unsigned int new_reg)
5099 {
5100
5101 /* Interrupt functions can only use registers that have already been
5102 saved by the prologue, even if they would normally be
5103 call-clobbered. */
5104
5105 if ((m68k_get_function_kind (current_function_decl)
5106 == m68k_fk_interrupt_handler)
5107 && !df_regs_ever_live_p (new_reg))
5108 return 0;
5109
5110 return 1;
5111 }
5112
5113 /* Value is true if hard register REGNO can hold a value of machine-mode
5114 MODE. On the 68000, we let the cpu registers can hold any mode, but
5115 restrict the 68881 registers to floating-point modes. */
5116
5117 bool
5118 m68k_regno_mode_ok (int regno, enum machine_mode mode)
5119 {
5120 if (DATA_REGNO_P (regno))
5121 {
5122 /* Data Registers, can hold aggregate if fits in. */
5123 if (regno + GET_MODE_SIZE (mode) / 4 <= 8)
5124 return true;
5125 }
5126 else if (ADDRESS_REGNO_P (regno))
5127 {
5128 if (regno + GET_MODE_SIZE (mode) / 4 <= 16)
5129 return true;
5130 }
5131 else if (FP_REGNO_P (regno))
5132 {
5133 /* FPU registers, hold float or complex float of long double or
5134 smaller. */
5135 if ((GET_MODE_CLASS (mode) == MODE_FLOAT
5136 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5137 && GET_MODE_UNIT_SIZE (mode) <= TARGET_FP_REG_SIZE)
5138 return true;
5139 }
5140 return false;
5141 }
5142
5143 /* Implement SECONDARY_RELOAD_CLASS. */
5144
5145 enum reg_class
5146 m68k_secondary_reload_class (enum reg_class rclass,
5147 enum machine_mode mode, rtx x)
5148 {
5149 int regno;
5150
5151 regno = true_regnum (x);
5152
5153 /* If one operand of a movqi is an address register, the other
5154 operand must be a general register or constant. Other types
5155 of operand must be reloaded through a data register. */
5156 if (GET_MODE_SIZE (mode) == 1
5157 && reg_classes_intersect_p (rclass, ADDR_REGS)
5158 && !(INT_REGNO_P (regno) || CONSTANT_P (x)))
5159 return DATA_REGS;
5160
5161 /* PC-relative addresses must be loaded into an address register first. */
5162 if (TARGET_PCREL
5163 && !reg_class_subset_p (rclass, ADDR_REGS)
5164 && symbolic_operand (x, VOIDmode))
5165 return ADDR_REGS;
5166
5167 return NO_REGS;
5168 }
5169
5170 /* Implement PREFERRED_RELOAD_CLASS. */
5171
5172 enum reg_class
5173 m68k_preferred_reload_class (rtx x, enum reg_class rclass)
5174 {
5175 enum reg_class secondary_class;
5176
5177 /* If RCLASS might need a secondary reload, try restricting it to
5178 a class that doesn't. */
5179 secondary_class = m68k_secondary_reload_class (rclass, GET_MODE (x), x);
5180 if (secondary_class != NO_REGS
5181 && reg_class_subset_p (secondary_class, rclass))
5182 return secondary_class;
5183
5184 /* Prefer to use moveq for in-range constants. */
5185 if (GET_CODE (x) == CONST_INT
5186 && reg_class_subset_p (DATA_REGS, rclass)
5187 && IN_RANGE (INTVAL (x), -0x80, 0x7f))
5188 return DATA_REGS;
5189
5190 /* ??? Do we really need this now? */
5191 if (GET_CODE (x) == CONST_DOUBLE
5192 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
5193 {
5194 if (TARGET_HARD_FLOAT && reg_class_subset_p (FP_REGS, rclass))
5195 return FP_REGS;
5196
5197 return NO_REGS;
5198 }
5199
5200 return rclass;
5201 }
5202
5203 /* Return floating point values in a 68881 register. This makes 68881 code
5204 a little bit faster. It also makes -msoft-float code incompatible with
5205 hard-float code, so people have to be careful not to mix the two.
5206 For ColdFire it was decided the ABI incompatibility is undesirable.
5207 If there is need for a hard-float ABI it is probably worth doing it
5208 properly and also passing function arguments in FP registers. */
5209 rtx
5210 m68k_libcall_value (enum machine_mode mode)
5211 {
5212 switch (mode) {
5213 case SFmode:
5214 case DFmode:
5215 case XFmode:
5216 if (TARGET_68881)
5217 return gen_rtx_REG (mode, FP0_REG);
5218 break;
5219 default:
5220 break;
5221 }
5222
5223 return gen_rtx_REG (mode, m68k_libcall_value_in_a0_p ? A0_REG : D0_REG);
5224 }
5225
5226 /* Location in which function value is returned.
5227 NOTE: Due to differences in ABIs, don't call this function directly,
5228 use FUNCTION_VALUE instead. */
5229 rtx
5230 m68k_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
5231 {
5232 enum machine_mode mode;
5233
5234 mode = TYPE_MODE (valtype);
5235 switch (mode) {
5236 case SFmode:
5237 case DFmode:
5238 case XFmode:
5239 if (TARGET_68881)
5240 return gen_rtx_REG (mode, FP0_REG);
5241 break;
5242 default:
5243 break;
5244 }
5245
5246 /* If the function returns a pointer, push that into %a0. */
5247 if (func && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (func))))
5248 /* For compatibility with the large body of existing code which
5249 does not always properly declare external functions returning
5250 pointer types, the m68k/SVR4 convention is to copy the value
5251 returned for pointer functions from a0 to d0 in the function
5252 epilogue, so that callers that have neglected to properly
5253 declare the callee can still find the correct return value in
5254 d0. */
5255 return gen_rtx_PARALLEL
5256 (mode,
5257 gen_rtvec (2,
5258 gen_rtx_EXPR_LIST (VOIDmode,
5259 gen_rtx_REG (mode, A0_REG),
5260 const0_rtx),
5261 gen_rtx_EXPR_LIST (VOIDmode,
5262 gen_rtx_REG (mode, D0_REG),
5263 const0_rtx)));
5264 else if (POINTER_TYPE_P (valtype))
5265 return gen_rtx_REG (mode, A0_REG);
5266 else
5267 return gen_rtx_REG (mode, D0_REG);
5268 }
5269
5270 /* Worker function for TARGET_RETURN_IN_MEMORY. */
5271 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
5272 static bool
5273 m68k_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
5274 {
5275 enum machine_mode mode = TYPE_MODE (type);
5276
5277 if (mode == BLKmode)
5278 return true;
5279
5280 /* If TYPE's known alignment is less than the alignment of MODE that
5281 would contain the structure, then return in memory. We need to
5282 do so to maintain the compatibility between code compiled with
5283 -mstrict-align and that compiled with -mno-strict-align. */
5284 if (AGGREGATE_TYPE_P (type)
5285 && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (mode))
5286 return true;
5287
5288 return false;
5289 }
5290 #endif
5291
5292 /* CPU to schedule the program for. */
5293 enum attr_cpu m68k_sched_cpu;
5294
5295 /* MAC to schedule the program for. */
5296 enum attr_mac m68k_sched_mac;
5297
5298 /* Operand type. */
5299 enum attr_op_type
5300 {
5301 /* No operand. */
5302 OP_TYPE_NONE,
5303
5304 /* Integer register. */
5305 OP_TYPE_RN,
5306
5307 /* FP register. */
5308 OP_TYPE_FPN,
5309
5310 /* Implicit mem reference (e.g. stack). */
5311 OP_TYPE_MEM1,
5312
5313 /* Memory without offset or indexing. EA modes 2, 3 and 4. */
5314 OP_TYPE_MEM234,
5315
5316 /* Memory with offset but without indexing. EA mode 5. */
5317 OP_TYPE_MEM5,
5318
5319 /* Memory with indexing. EA mode 6. */
5320 OP_TYPE_MEM6,
5321
5322 /* Memory referenced by absolute address. EA mode 7. */
5323 OP_TYPE_MEM7,
5324
5325 /* Immediate operand that doesn't require extension word. */
5326 OP_TYPE_IMM_Q,
5327
5328 /* Immediate 16 bit operand. */
5329 OP_TYPE_IMM_W,
5330
5331 /* Immediate 32 bit operand. */
5332 OP_TYPE_IMM_L
5333 };
5334
5335 /* Return type of memory ADDR_RTX refers to. */
5336 static enum attr_op_type
5337 sched_address_type (enum machine_mode mode, rtx addr_rtx)
5338 {
5339 struct m68k_address address;
5340
5341 if (symbolic_operand (addr_rtx, VOIDmode))
5342 return OP_TYPE_MEM7;
5343
5344 if (!m68k_decompose_address (mode, addr_rtx,
5345 reload_completed, &address))
5346 {
5347 gcc_assert (!reload_completed);
5348 /* Reload will likely fix the address to be in the register. */
5349 return OP_TYPE_MEM234;
5350 }
5351
5352 if (address.scale != 0)
5353 return OP_TYPE_MEM6;
5354
5355 if (address.base != NULL_RTX)
5356 {
5357 if (address.offset == NULL_RTX)
5358 return OP_TYPE_MEM234;
5359
5360 return OP_TYPE_MEM5;
5361 }
5362
5363 gcc_assert (address.offset != NULL_RTX);
5364
5365 return OP_TYPE_MEM7;
5366 }
5367
5368 /* Return X or Y (depending on OPX_P) operand of INSN. */
5369 static rtx
5370 sched_get_operand (rtx insn, bool opx_p)
5371 {
5372 int i;
5373
5374 if (recog_memoized (insn) < 0)
5375 gcc_unreachable ();
5376
5377 extract_constrain_insn_cached (insn);
5378
5379 if (opx_p)
5380 i = get_attr_opx (insn);
5381 else
5382 i = get_attr_opy (insn);
5383
5384 if (i >= recog_data.n_operands)
5385 return NULL;
5386
5387 return recog_data.operand[i];
5388 }
5389
5390 /* Return type of INSN's operand X (if OPX_P) or operand Y (if !OPX_P).
5391 If ADDRESS_P is true, return type of memory location operand refers to. */
5392 static enum attr_op_type
5393 sched_attr_op_type (rtx insn, bool opx_p, bool address_p)
5394 {
5395 rtx op;
5396
5397 op = sched_get_operand (insn, opx_p);
5398
5399 if (op == NULL)
5400 {
5401 gcc_assert (!reload_completed);
5402 return OP_TYPE_RN;
5403 }
5404
5405 if (address_p)
5406 return sched_address_type (QImode, op);
5407
5408 if (memory_operand (op, VOIDmode))
5409 return sched_address_type (GET_MODE (op), XEXP (op, 0));
5410
5411 if (register_operand (op, VOIDmode))
5412 {
5413 if ((!reload_completed && FLOAT_MODE_P (GET_MODE (op)))
5414 || (reload_completed && FP_REG_P (op)))
5415 return OP_TYPE_FPN;
5416
5417 return OP_TYPE_RN;
5418 }
5419
5420 if (GET_CODE (op) == CONST_INT)
5421 {
5422 int ival;
5423
5424 ival = INTVAL (op);
5425
5426 /* Check for quick constants. */
5427 switch (get_attr_type (insn))
5428 {
5429 case TYPE_ALUQ_L:
5430 if (IN_RANGE (ival, 1, 8) || IN_RANGE (ival, -8, -1))
5431 return OP_TYPE_IMM_Q;
5432
5433 gcc_assert (!reload_completed);
5434 break;
5435
5436 case TYPE_MOVEQ_L:
5437 if (USE_MOVQ (ival))
5438 return OP_TYPE_IMM_Q;
5439
5440 gcc_assert (!reload_completed);
5441 break;
5442
5443 case TYPE_MOV3Q_L:
5444 if (valid_mov3q_const (ival))
5445 return OP_TYPE_IMM_Q;
5446
5447 gcc_assert (!reload_completed);
5448 break;
5449
5450 default:
5451 break;
5452 }
5453
5454 if (IN_RANGE (ival, -0x8000, 0x7fff))
5455 return OP_TYPE_IMM_W;
5456
5457 return OP_TYPE_IMM_L;
5458 }
5459
5460 if (GET_CODE (op) == CONST_DOUBLE)
5461 {
5462 switch (GET_MODE (op))
5463 {
5464 case SFmode:
5465 return OP_TYPE_IMM_W;
5466
5467 case VOIDmode:
5468 case DFmode:
5469 return OP_TYPE_IMM_L;
5470
5471 default:
5472 gcc_unreachable ();
5473 }
5474 }
5475
5476 if (GET_CODE (op) == CONST
5477 || symbolic_operand (op, VOIDmode)
5478 || LABEL_P (op))
5479 {
5480 switch (GET_MODE (op))
5481 {
5482 case QImode:
5483 return OP_TYPE_IMM_Q;
5484
5485 case HImode:
5486 return OP_TYPE_IMM_W;
5487
5488 case SImode:
5489 return OP_TYPE_IMM_L;
5490
5491 default:
5492 if (symbolic_operand (m68k_unwrap_symbol (op, false), VOIDmode))
5493 /* Just a guess. */
5494 return OP_TYPE_IMM_W;
5495
5496 return OP_TYPE_IMM_L;
5497 }
5498 }
5499
5500 gcc_assert (!reload_completed);
5501
5502 if (FLOAT_MODE_P (GET_MODE (op)))
5503 return OP_TYPE_FPN;
5504
5505 return OP_TYPE_RN;
5506 }
5507
5508 /* Implement opx_type attribute.
5509 Return type of INSN's operand X.
5510 If ADDRESS_P is true, return type of memory location operand refers to. */
5511 enum attr_opx_type
5512 m68k_sched_attr_opx_type (rtx insn, int address_p)
5513 {
5514 switch (sched_attr_op_type (insn, true, address_p != 0))
5515 {
5516 case OP_TYPE_RN:
5517 return OPX_TYPE_RN;
5518
5519 case OP_TYPE_FPN:
5520 return OPX_TYPE_FPN;
5521
5522 case OP_TYPE_MEM1:
5523 return OPX_TYPE_MEM1;
5524
5525 case OP_TYPE_MEM234:
5526 return OPX_TYPE_MEM234;
5527
5528 case OP_TYPE_MEM5:
5529 return OPX_TYPE_MEM5;
5530
5531 case OP_TYPE_MEM6:
5532 return OPX_TYPE_MEM6;
5533
5534 case OP_TYPE_MEM7:
5535 return OPX_TYPE_MEM7;
5536
5537 case OP_TYPE_IMM_Q:
5538 return OPX_TYPE_IMM_Q;
5539
5540 case OP_TYPE_IMM_W:
5541 return OPX_TYPE_IMM_W;
5542
5543 case OP_TYPE_IMM_L:
5544 return OPX_TYPE_IMM_L;
5545
5546 default:
5547 gcc_unreachable ();
5548 }
5549 }
5550
5551 /* Implement opy_type attribute.
5552 Return type of INSN's operand Y.
5553 If ADDRESS_P is true, return type of memory location operand refers to. */
5554 enum attr_opy_type
5555 m68k_sched_attr_opy_type (rtx insn, int address_p)
5556 {
5557 switch (sched_attr_op_type (insn, false, address_p != 0))
5558 {
5559 case OP_TYPE_RN:
5560 return OPY_TYPE_RN;
5561
5562 case OP_TYPE_FPN:
5563 return OPY_TYPE_FPN;
5564
5565 case OP_TYPE_MEM1:
5566 return OPY_TYPE_MEM1;
5567
5568 case OP_TYPE_MEM234:
5569 return OPY_TYPE_MEM234;
5570
5571 case OP_TYPE_MEM5:
5572 return OPY_TYPE_MEM5;
5573
5574 case OP_TYPE_MEM6:
5575 return OPY_TYPE_MEM6;
5576
5577 case OP_TYPE_MEM7:
5578 return OPY_TYPE_MEM7;
5579
5580 case OP_TYPE_IMM_Q:
5581 return OPY_TYPE_IMM_Q;
5582
5583 case OP_TYPE_IMM_W:
5584 return OPY_TYPE_IMM_W;
5585
5586 case OP_TYPE_IMM_L:
5587 return OPY_TYPE_IMM_L;
5588
5589 default:
5590 gcc_unreachable ();
5591 }
5592 }
5593
5594 /* Return size of INSN as int. */
5595 static int
5596 sched_get_attr_size_int (rtx insn)
5597 {
5598 int size;
5599
5600 switch (get_attr_type (insn))
5601 {
5602 case TYPE_IGNORE:
5603 /* There should be no references to m68k_sched_attr_size for 'ignore'
5604 instructions. */
5605 gcc_unreachable ();
5606 return 0;
5607
5608 case TYPE_MUL_L:
5609 size = 2;
5610 break;
5611
5612 default:
5613 size = 1;
5614 break;
5615 }
5616
5617 switch (get_attr_opx_type (insn))
5618 {
5619 case OPX_TYPE_NONE:
5620 case OPX_TYPE_RN:
5621 case OPX_TYPE_FPN:
5622 case OPX_TYPE_MEM1:
5623 case OPX_TYPE_MEM234:
5624 case OPY_TYPE_IMM_Q:
5625 break;
5626
5627 case OPX_TYPE_MEM5:
5628 case OPX_TYPE_MEM6:
5629 /* Here we assume that most absolute references are short. */
5630 case OPX_TYPE_MEM7:
5631 case OPY_TYPE_IMM_W:
5632 ++size;
5633 break;
5634
5635 case OPY_TYPE_IMM_L:
5636 size += 2;
5637 break;
5638
5639 default:
5640 gcc_unreachable ();
5641 }
5642
5643 switch (get_attr_opy_type (insn))
5644 {
5645 case OPY_TYPE_NONE:
5646 case OPY_TYPE_RN:
5647 case OPY_TYPE_FPN:
5648 case OPY_TYPE_MEM1:
5649 case OPY_TYPE_MEM234:
5650 case OPY_TYPE_IMM_Q:
5651 break;
5652
5653 case OPY_TYPE_MEM5:
5654 case OPY_TYPE_MEM6:
5655 /* Here we assume that most absolute references are short. */
5656 case OPY_TYPE_MEM7:
5657 case OPY_TYPE_IMM_W:
5658 ++size;
5659 break;
5660
5661 case OPY_TYPE_IMM_L:
5662 size += 2;
5663 break;
5664
5665 default:
5666 gcc_unreachable ();
5667 }
5668
5669 if (size > 3)
5670 {
5671 gcc_assert (!reload_completed);
5672
5673 size = 3;
5674 }
5675
5676 return size;
5677 }
5678
5679 /* Return size of INSN as attribute enum value. */
5680 enum attr_size
5681 m68k_sched_attr_size (rtx insn)
5682 {
5683 switch (sched_get_attr_size_int (insn))
5684 {
5685 case 1:
5686 return SIZE_1;
5687
5688 case 2:
5689 return SIZE_2;
5690
5691 case 3:
5692 return SIZE_3;
5693
5694 default:
5695 gcc_unreachable ();
5696 }
5697 }
5698
5699 /* Return operand X or Y (depending on OPX_P) of INSN,
5700 if it is a MEM, or NULL overwise. */
5701 static enum attr_op_type
5702 sched_get_opxy_mem_type (rtx insn, bool opx_p)
5703 {
5704 if (opx_p)
5705 {
5706 switch (get_attr_opx_type (insn))
5707 {
5708 case OPX_TYPE_NONE:
5709 case OPX_TYPE_RN:
5710 case OPX_TYPE_FPN:
5711 case OPX_TYPE_IMM_Q:
5712 case OPX_TYPE_IMM_W:
5713 case OPX_TYPE_IMM_L:
5714 return OP_TYPE_RN;
5715
5716 case OPX_TYPE_MEM1:
5717 case OPX_TYPE_MEM234:
5718 case OPX_TYPE_MEM5:
5719 case OPX_TYPE_MEM7:
5720 return OP_TYPE_MEM1;
5721
5722 case OPX_TYPE_MEM6:
5723 return OP_TYPE_MEM6;
5724
5725 default:
5726 gcc_unreachable ();
5727 }
5728 }
5729 else
5730 {
5731 switch (get_attr_opy_type (insn))
5732 {
5733 case OPY_TYPE_NONE:
5734 case OPY_TYPE_RN:
5735 case OPY_TYPE_FPN:
5736 case OPY_TYPE_IMM_Q:
5737 case OPY_TYPE_IMM_W:
5738 case OPY_TYPE_IMM_L:
5739 return OP_TYPE_RN;
5740
5741 case OPY_TYPE_MEM1:
5742 case OPY_TYPE_MEM234:
5743 case OPY_TYPE_MEM5:
5744 case OPY_TYPE_MEM7:
5745 return OP_TYPE_MEM1;
5746
5747 case OPY_TYPE_MEM6:
5748 return OP_TYPE_MEM6;
5749
5750 default:
5751 gcc_unreachable ();
5752 }
5753 }
5754 }
5755
5756 /* Implement op_mem attribute. */
5757 enum attr_op_mem
5758 m68k_sched_attr_op_mem (rtx insn)
5759 {
5760 enum attr_op_type opx;
5761 enum attr_op_type opy;
5762
5763 opx = sched_get_opxy_mem_type (insn, true);
5764 opy = sched_get_opxy_mem_type (insn, false);
5765
5766 if (opy == OP_TYPE_RN && opx == OP_TYPE_RN)
5767 return OP_MEM_00;
5768
5769 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM1)
5770 {
5771 switch (get_attr_opx_access (insn))
5772 {
5773 case OPX_ACCESS_R:
5774 return OP_MEM_10;
5775
5776 case OPX_ACCESS_W:
5777 return OP_MEM_01;
5778
5779 case OPX_ACCESS_RW:
5780 return OP_MEM_11;
5781
5782 default:
5783 gcc_unreachable ();
5784 }
5785 }
5786
5787 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM6)
5788 {
5789 switch (get_attr_opx_access (insn))
5790 {
5791 case OPX_ACCESS_R:
5792 return OP_MEM_I0;
5793
5794 case OPX_ACCESS_W:
5795 return OP_MEM_0I;
5796
5797 case OPX_ACCESS_RW:
5798 return OP_MEM_I1;
5799
5800 default:
5801 gcc_unreachable ();
5802 }
5803 }
5804
5805 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_RN)
5806 return OP_MEM_10;
5807
5808 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM1)
5809 {
5810 switch (get_attr_opx_access (insn))
5811 {
5812 case OPX_ACCESS_W:
5813 return OP_MEM_11;
5814
5815 default:
5816 gcc_assert (!reload_completed);
5817 return OP_MEM_11;
5818 }
5819 }
5820
5821 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM6)
5822 {
5823 switch (get_attr_opx_access (insn))
5824 {
5825 case OPX_ACCESS_W:
5826 return OP_MEM_1I;
5827
5828 default:
5829 gcc_assert (!reload_completed);
5830 return OP_MEM_1I;
5831 }
5832 }
5833
5834 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_RN)
5835 return OP_MEM_I0;
5836
5837 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM1)
5838 {
5839 switch (get_attr_opx_access (insn))
5840 {
5841 case OPX_ACCESS_W:
5842 return OP_MEM_I1;
5843
5844 default:
5845 gcc_assert (!reload_completed);
5846 return OP_MEM_I1;
5847 }
5848 }
5849
5850 gcc_assert (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM6);
5851 gcc_assert (!reload_completed);
5852 return OP_MEM_I1;
5853 }
5854
5855 /* Jump instructions types. Indexed by INSN_UID.
5856 The same rtl insn can be expanded into different asm instructions
5857 depending on the cc0_status. To properly determine type of jump
5858 instructions we scan instruction stream and map jumps types to this
5859 array. */
5860 static enum attr_type *sched_branch_type;
5861
5862 /* Return the type of the jump insn. */
5863 enum attr_type
5864 m68k_sched_branch_type (rtx insn)
5865 {
5866 enum attr_type type;
5867
5868 type = sched_branch_type[INSN_UID (insn)];
5869
5870 gcc_assert (type != 0);
5871
5872 return type;
5873 }
5874
5875 /* Data for ColdFire V4 index bypass.
5876 Producer modifies register that is used as index in consumer with
5877 specified scale. */
5878 static struct
5879 {
5880 /* Producer instruction. */
5881 rtx pro;
5882
5883 /* Consumer instruction. */
5884 rtx con;
5885
5886 /* Scale of indexed memory access within consumer.
5887 Or zero if bypass should not be effective at the moment. */
5888 int scale;
5889 } sched_cfv4_bypass_data;
5890
5891 /* An empty state that is used in m68k_sched_adjust_cost. */
5892 static state_t sched_adjust_cost_state;
5893
5894 /* Implement adjust_cost scheduler hook.
5895 Return adjusted COST of dependency LINK between DEF_INSN and INSN. */
5896 static int
5897 m68k_sched_adjust_cost (rtx insn, rtx link ATTRIBUTE_UNUSED, rtx def_insn,
5898 int cost)
5899 {
5900 int delay;
5901
5902 if (recog_memoized (def_insn) < 0
5903 || recog_memoized (insn) < 0)
5904 return cost;
5905
5906 if (sched_cfv4_bypass_data.scale == 1)
5907 /* Handle ColdFire V4 bypass for indexed address with 1x scale. */
5908 {
5909 /* haifa-sched.c: insn_cost () calls bypass_p () just before
5910 targetm.sched.adjust_cost (). Hence, we can be relatively sure
5911 that the data in sched_cfv4_bypass_data is up to date. */
5912 gcc_assert (sched_cfv4_bypass_data.pro == def_insn
5913 && sched_cfv4_bypass_data.con == insn);
5914
5915 if (cost < 3)
5916 cost = 3;
5917
5918 sched_cfv4_bypass_data.pro = NULL;
5919 sched_cfv4_bypass_data.con = NULL;
5920 sched_cfv4_bypass_data.scale = 0;
5921 }
5922 else
5923 gcc_assert (sched_cfv4_bypass_data.pro == NULL
5924 && sched_cfv4_bypass_data.con == NULL
5925 && sched_cfv4_bypass_data.scale == 0);
5926
5927 /* Don't try to issue INSN earlier than DFA permits.
5928 This is especially useful for instructions that write to memory,
5929 as their true dependence (default) latency is better to be set to 0
5930 to workaround alias analysis limitations.
5931 This is, in fact, a machine independent tweak, so, probably,
5932 it should be moved to haifa-sched.c: insn_cost (). */
5933 delay = min_insn_conflict_delay (sched_adjust_cost_state, def_insn, insn);
5934 if (delay > cost)
5935 cost = delay;
5936
5937 return cost;
5938 }
5939
5940 /* Return maximal number of insns that can be scheduled on a single cycle. */
5941 static int
5942 m68k_sched_issue_rate (void)
5943 {
5944 switch (m68k_sched_cpu)
5945 {
5946 case CPU_CFV1:
5947 case CPU_CFV2:
5948 case CPU_CFV3:
5949 return 1;
5950
5951 case CPU_CFV4:
5952 return 2;
5953
5954 default:
5955 gcc_unreachable ();
5956 return 0;
5957 }
5958 }
5959
5960 /* Maximal length of instruction for current CPU.
5961 E.g. it is 3 for any ColdFire core. */
5962 static int max_insn_size;
5963
5964 /* Data to model instruction buffer of CPU. */
5965 struct _sched_ib
5966 {
5967 /* True if instruction buffer model is modeled for current CPU. */
5968 bool enabled_p;
5969
5970 /* Size of the instruction buffer in words. */
5971 int size;
5972
5973 /* Number of filled words in the instruction buffer. */
5974 int filled;
5975
5976 /* Additional information about instruction buffer for CPUs that have
5977 a buffer of instruction records, rather then a plain buffer
5978 of instruction words. */
5979 struct _sched_ib_records
5980 {
5981 /* Size of buffer in records. */
5982 int n_insns;
5983
5984 /* Array to hold data on adjustements made to the size of the buffer. */
5985 int *adjust;
5986
5987 /* Index of the above array. */
5988 int adjust_index;
5989 } records;
5990
5991 /* An insn that reserves (marks empty) one word in the instruction buffer. */
5992 rtx insn;
5993 };
5994
5995 static struct _sched_ib sched_ib;
5996
5997 /* ID of memory unit. */
5998 static int sched_mem_unit_code;
5999
6000 /* Implementation of the targetm.sched.variable_issue () hook.
6001 It is called after INSN was issued. It returns the number of insns
6002 that can possibly get scheduled on the current cycle.
6003 It is used here to determine the effect of INSN on the instruction
6004 buffer. */
6005 static int
6006 m68k_sched_variable_issue (FILE *sched_dump ATTRIBUTE_UNUSED,
6007 int sched_verbose ATTRIBUTE_UNUSED,
6008 rtx insn, int can_issue_more)
6009 {
6010 int insn_size;
6011
6012 if (recog_memoized (insn) >= 0 && get_attr_type (insn) != TYPE_IGNORE)
6013 {
6014 switch (m68k_sched_cpu)
6015 {
6016 case CPU_CFV1:
6017 case CPU_CFV2:
6018 insn_size = sched_get_attr_size_int (insn);
6019 break;
6020
6021 case CPU_CFV3:
6022 insn_size = sched_get_attr_size_int (insn);
6023
6024 /* ColdFire V3 and V4 cores have instruction buffers that can
6025 accumulate up to 8 instructions regardless of instructions'
6026 sizes. So we should take care not to "prefetch" 24 one-word
6027 or 12 two-words instructions.
6028 To model this behavior we temporarily decrease size of the
6029 buffer by (max_insn_size - insn_size) for next 7 instructions. */
6030 {
6031 int adjust;
6032
6033 adjust = max_insn_size - insn_size;
6034 sched_ib.size -= adjust;
6035
6036 if (sched_ib.filled > sched_ib.size)
6037 sched_ib.filled = sched_ib.size;
6038
6039 sched_ib.records.adjust[sched_ib.records.adjust_index] = adjust;
6040 }
6041
6042 ++sched_ib.records.adjust_index;
6043 if (sched_ib.records.adjust_index == sched_ib.records.n_insns)
6044 sched_ib.records.adjust_index = 0;
6045
6046 /* Undo adjustement we did 7 instructions ago. */
6047 sched_ib.size
6048 += sched_ib.records.adjust[sched_ib.records.adjust_index];
6049
6050 break;
6051
6052 case CPU_CFV4:
6053 gcc_assert (!sched_ib.enabled_p);
6054 insn_size = 0;
6055 break;
6056
6057 default:
6058 gcc_unreachable ();
6059 }
6060
6061 if (insn_size > sched_ib.filled)
6062 /* Scheduling for register pressure does not always take DFA into
6063 account. Workaround instruction buffer not being filled enough. */
6064 {
6065 gcc_assert (sched_pressure_p);
6066 insn_size = sched_ib.filled;
6067 }
6068
6069 --can_issue_more;
6070 }
6071 else if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6072 || asm_noperands (PATTERN (insn)) >= 0)
6073 insn_size = sched_ib.filled;
6074 else
6075 insn_size = 0;
6076
6077 sched_ib.filled -= insn_size;
6078
6079 return can_issue_more;
6080 }
6081
6082 /* Return how many instructions should scheduler lookahead to choose the
6083 best one. */
6084 static int
6085 m68k_sched_first_cycle_multipass_dfa_lookahead (void)
6086 {
6087 return m68k_sched_issue_rate () - 1;
6088 }
6089
6090 /* Implementation of targetm.sched.init_global () hook.
6091 It is invoked once per scheduling pass and is used here
6092 to initialize scheduler constants. */
6093 static void
6094 m68k_sched_md_init_global (FILE *sched_dump ATTRIBUTE_UNUSED,
6095 int sched_verbose ATTRIBUTE_UNUSED,
6096 int n_insns ATTRIBUTE_UNUSED)
6097 {
6098 /* Init branch types. */
6099 {
6100 rtx insn;
6101
6102 sched_branch_type = XCNEWVEC (enum attr_type, get_max_uid () + 1);
6103
6104 for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
6105 {
6106 if (JUMP_P (insn))
6107 /* !!! FIXME: Implement real scan here. */
6108 sched_branch_type[INSN_UID (insn)] = TYPE_BCC;
6109 }
6110 }
6111
6112 #ifdef ENABLE_CHECKING
6113 /* Check that all instructions have DFA reservations and
6114 that all instructions can be issued from a clean state. */
6115 {
6116 rtx insn;
6117 state_t state;
6118
6119 state = alloca (state_size ());
6120
6121 for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
6122 {
6123 if (INSN_P (insn) && recog_memoized (insn) >= 0)
6124 {
6125 gcc_assert (insn_has_dfa_reservation_p (insn));
6126
6127 state_reset (state);
6128 if (state_transition (state, insn) >= 0)
6129 gcc_unreachable ();
6130 }
6131 }
6132 }
6133 #endif
6134
6135 /* Setup target cpu. */
6136
6137 /* ColdFire V4 has a set of features to keep its instruction buffer full
6138 (e.g., a separate memory bus for instructions) and, hence, we do not model
6139 buffer for this CPU. */
6140 sched_ib.enabled_p = (m68k_sched_cpu != CPU_CFV4);
6141
6142 switch (m68k_sched_cpu)
6143 {
6144 case CPU_CFV4:
6145 sched_ib.filled = 0;
6146
6147 /* FALLTHRU */
6148
6149 case CPU_CFV1:
6150 case CPU_CFV2:
6151 max_insn_size = 3;
6152 sched_ib.records.n_insns = 0;
6153 sched_ib.records.adjust = NULL;
6154 break;
6155
6156 case CPU_CFV3:
6157 max_insn_size = 3;
6158 sched_ib.records.n_insns = 8;
6159 sched_ib.records.adjust = XNEWVEC (int, sched_ib.records.n_insns);
6160 break;
6161
6162 default:
6163 gcc_unreachable ();
6164 }
6165
6166 sched_mem_unit_code = get_cpu_unit_code ("cf_mem1");
6167
6168 sched_adjust_cost_state = xmalloc (state_size ());
6169 state_reset (sched_adjust_cost_state);
6170
6171 start_sequence ();
6172 emit_insn (gen_ib ());
6173 sched_ib.insn = get_insns ();
6174 end_sequence ();
6175 }
6176
6177 /* Scheduling pass is now finished. Free/reset static variables. */
6178 static void
6179 m68k_sched_md_finish_global (FILE *dump ATTRIBUTE_UNUSED,
6180 int verbose ATTRIBUTE_UNUSED)
6181 {
6182 sched_ib.insn = NULL;
6183
6184 free (sched_adjust_cost_state);
6185 sched_adjust_cost_state = NULL;
6186
6187 sched_mem_unit_code = 0;
6188
6189 free (sched_ib.records.adjust);
6190 sched_ib.records.adjust = NULL;
6191 sched_ib.records.n_insns = 0;
6192 max_insn_size = 0;
6193
6194 free (sched_branch_type);
6195 sched_branch_type = NULL;
6196 }
6197
6198 /* Implementation of targetm.sched.init () hook.
6199 It is invoked each time scheduler starts on the new block (basic block or
6200 extended basic block). */
6201 static void
6202 m68k_sched_md_init (FILE *sched_dump ATTRIBUTE_UNUSED,
6203 int sched_verbose ATTRIBUTE_UNUSED,
6204 int n_insns ATTRIBUTE_UNUSED)
6205 {
6206 switch (m68k_sched_cpu)
6207 {
6208 case CPU_CFV1:
6209 case CPU_CFV2:
6210 sched_ib.size = 6;
6211 break;
6212
6213 case CPU_CFV3:
6214 sched_ib.size = sched_ib.records.n_insns * max_insn_size;
6215
6216 memset (sched_ib.records.adjust, 0,
6217 sched_ib.records.n_insns * sizeof (*sched_ib.records.adjust));
6218 sched_ib.records.adjust_index = 0;
6219 break;
6220
6221 case CPU_CFV4:
6222 gcc_assert (!sched_ib.enabled_p);
6223 sched_ib.size = 0;
6224 break;
6225
6226 default:
6227 gcc_unreachable ();
6228 }
6229
6230 if (sched_ib.enabled_p)
6231 /* haifa-sched.c: schedule_block () calls advance_cycle () just before
6232 the first cycle. Workaround that. */
6233 sched_ib.filled = -2;
6234 }
6235
6236 /* Implementation of targetm.sched.dfa_pre_advance_cycle () hook.
6237 It is invoked just before current cycle finishes and is used here
6238 to track if instruction buffer got its two words this cycle. */
6239 static void
6240 m68k_sched_dfa_pre_advance_cycle (void)
6241 {
6242 if (!sched_ib.enabled_p)
6243 return;
6244
6245 if (!cpu_unit_reservation_p (curr_state, sched_mem_unit_code))
6246 {
6247 sched_ib.filled += 2;
6248
6249 if (sched_ib.filled > sched_ib.size)
6250 sched_ib.filled = sched_ib.size;
6251 }
6252 }
6253
6254 /* Implementation of targetm.sched.dfa_post_advance_cycle () hook.
6255 It is invoked just after new cycle begins and is used here
6256 to setup number of filled words in the instruction buffer so that
6257 instructions which won't have all their words prefetched would be
6258 stalled for a cycle. */
6259 static void
6260 m68k_sched_dfa_post_advance_cycle (void)
6261 {
6262 int i;
6263
6264 if (!sched_ib.enabled_p)
6265 return;
6266
6267 /* Setup number of prefetched instruction words in the instruction
6268 buffer. */
6269 i = max_insn_size - sched_ib.filled;
6270
6271 while (--i >= 0)
6272 {
6273 if (state_transition (curr_state, sched_ib.insn) >= 0)
6274 gcc_unreachable ();
6275 }
6276 }
6277
6278 /* Return X or Y (depending on OPX_P) operand of INSN,
6279 if it is an integer register, or NULL overwise. */
6280 static rtx
6281 sched_get_reg_operand (rtx insn, bool opx_p)
6282 {
6283 rtx op = NULL;
6284
6285 if (opx_p)
6286 {
6287 if (get_attr_opx_type (insn) == OPX_TYPE_RN)
6288 {
6289 op = sched_get_operand (insn, true);
6290 gcc_assert (op != NULL);
6291
6292 if (!reload_completed && !REG_P (op))
6293 return NULL;
6294 }
6295 }
6296 else
6297 {
6298 if (get_attr_opy_type (insn) == OPY_TYPE_RN)
6299 {
6300 op = sched_get_operand (insn, false);
6301 gcc_assert (op != NULL);
6302
6303 if (!reload_completed && !REG_P (op))
6304 return NULL;
6305 }
6306 }
6307
6308 return op;
6309 }
6310
6311 /* Return true, if X or Y (depending on OPX_P) operand of INSN
6312 is a MEM. */
6313 static bool
6314 sched_mem_operand_p (rtx insn, bool opx_p)
6315 {
6316 switch (sched_get_opxy_mem_type (insn, opx_p))
6317 {
6318 case OP_TYPE_MEM1:
6319 case OP_TYPE_MEM6:
6320 return true;
6321
6322 default:
6323 return false;
6324 }
6325 }
6326
6327 /* Return X or Y (depending on OPX_P) operand of INSN,
6328 if it is a MEM, or NULL overwise. */
6329 static rtx
6330 sched_get_mem_operand (rtx insn, bool must_read_p, bool must_write_p)
6331 {
6332 bool opx_p;
6333 bool opy_p;
6334
6335 opx_p = false;
6336 opy_p = false;
6337
6338 if (must_read_p)
6339 {
6340 opx_p = true;
6341 opy_p = true;
6342 }
6343
6344 if (must_write_p)
6345 {
6346 opx_p = true;
6347 opy_p = false;
6348 }
6349
6350 if (opy_p && sched_mem_operand_p (insn, false))
6351 return sched_get_operand (insn, false);
6352
6353 if (opx_p && sched_mem_operand_p (insn, true))
6354 return sched_get_operand (insn, true);
6355
6356 gcc_unreachable ();
6357 return NULL;
6358 }
6359
6360 /* Return non-zero if PRO modifies register used as part of
6361 address in CON. */
6362 int
6363 m68k_sched_address_bypass_p (rtx pro, rtx con)
6364 {
6365 rtx pro_x;
6366 rtx con_mem_read;
6367
6368 pro_x = sched_get_reg_operand (pro, true);
6369 if (pro_x == NULL)
6370 return 0;
6371
6372 con_mem_read = sched_get_mem_operand (con, true, false);
6373 gcc_assert (con_mem_read != NULL);
6374
6375 if (reg_mentioned_p (pro_x, con_mem_read))
6376 return 1;
6377
6378 return 0;
6379 }
6380
6381 /* Helper function for m68k_sched_indexed_address_bypass_p.
6382 if PRO modifies register used as index in CON,
6383 return scale of indexed memory access in CON. Return zero overwise. */
6384 static int
6385 sched_get_indexed_address_scale (rtx pro, rtx con)
6386 {
6387 rtx reg;
6388 rtx mem;
6389 struct m68k_address address;
6390
6391 reg = sched_get_reg_operand (pro, true);
6392 if (reg == NULL)
6393 return 0;
6394
6395 mem = sched_get_mem_operand (con, true, false);
6396 gcc_assert (mem != NULL && MEM_P (mem));
6397
6398 if (!m68k_decompose_address (GET_MODE (mem), XEXP (mem, 0), reload_completed,
6399 &address))
6400 gcc_unreachable ();
6401
6402 if (REGNO (reg) == REGNO (address.index))
6403 {
6404 gcc_assert (address.scale != 0);
6405 return address.scale;
6406 }
6407
6408 return 0;
6409 }
6410
6411 /* Return non-zero if PRO modifies register used
6412 as index with scale 2 or 4 in CON. */
6413 int
6414 m68k_sched_indexed_address_bypass_p (rtx pro, rtx con)
6415 {
6416 gcc_assert (sched_cfv4_bypass_data.pro == NULL
6417 && sched_cfv4_bypass_data.con == NULL
6418 && sched_cfv4_bypass_data.scale == 0);
6419
6420 switch (sched_get_indexed_address_scale (pro, con))
6421 {
6422 case 1:
6423 /* We can't have a variable latency bypass, so
6424 remember to adjust the insn cost in adjust_cost hook. */
6425 sched_cfv4_bypass_data.pro = pro;
6426 sched_cfv4_bypass_data.con = con;
6427 sched_cfv4_bypass_data.scale = 1;
6428 return 0;
6429
6430 case 2:
6431 case 4:
6432 return 1;
6433
6434 default:
6435 return 0;
6436 }
6437 }
6438
6439 /* We generate a two-instructions program at M_TRAMP :
6440 movea.l &CHAIN_VALUE,%a0
6441 jmp FNADDR
6442 where %a0 can be modified by changing STATIC_CHAIN_REGNUM. */
6443
6444 static void
6445 m68k_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
6446 {
6447 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
6448 rtx mem;
6449
6450 gcc_assert (ADDRESS_REGNO_P (STATIC_CHAIN_REGNUM));
6451
6452 mem = adjust_address (m_tramp, HImode, 0);
6453 emit_move_insn (mem, GEN_INT(0x207C + ((STATIC_CHAIN_REGNUM-8) << 9)));
6454 mem = adjust_address (m_tramp, SImode, 2);
6455 emit_move_insn (mem, chain_value);
6456
6457 mem = adjust_address (m_tramp, HImode, 6);
6458 emit_move_insn (mem, GEN_INT(0x4EF9));
6459 mem = adjust_address (m_tramp, SImode, 8);
6460 emit_move_insn (mem, fnaddr);
6461
6462 FINALIZE_TRAMPOLINE (XEXP (m_tramp, 0));
6463 }
6464
6465 /* On the 68000, the RTS insn cannot pop anything.
6466 On the 68010, the RTD insn may be used to pop them if the number
6467 of args is fixed, but if the number is variable then the caller
6468 must pop them all. RTD can't be used for library calls now
6469 because the library is compiled with the Unix compiler.
6470 Use of RTD is a selectable option, since it is incompatible with
6471 standard Unix calling sequences. If the option is not selected,
6472 the caller must always pop the args. */
6473
6474 static int
6475 m68k_return_pops_args (tree fundecl, tree funtype, int size)
6476 {
6477 return ((TARGET_RTD
6478 && (!fundecl
6479 || TREE_CODE (fundecl) != IDENTIFIER_NODE)
6480 && (!stdarg_p (funtype)))
6481 ? size : 0);
6482 }
6483
6484 /* Make sure everything's fine if we *don't* have a given processor.
6485 This assumes that putting a register in fixed_regs will keep the
6486 compiler's mitts completely off it. We don't bother to zero it out
6487 of register classes. */
6488
6489 static void
6490 m68k_conditional_register_usage (void)
6491 {
6492 int i;
6493 HARD_REG_SET x;
6494 if (!TARGET_HARD_FLOAT)
6495 {
6496 COPY_HARD_REG_SET (x, reg_class_contents[(int)FP_REGS]);
6497 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
6498 if (TEST_HARD_REG_BIT (x, i))
6499 fixed_regs[i] = call_used_regs[i] = 1;
6500 }
6501 if (flag_pic)
6502 fixed_regs[PIC_REG] = call_used_regs[PIC_REG] = 1;
6503 }
6504
6505 #include "gt-m68k.h"