]>
Commit | Line | Data |
---|---|---|
5243fbff | 1 | /* Subroutines for insn-output.c for Motorola 68000 family. |
aad93da1 | 2 | Copyright (C) 1987-2017 Free Software Foundation, Inc. |
5243fbff | 3 | |
187b36cf | 4 | This file is part of GCC. |
5243fbff | 5 | |
187b36cf | 6 | GCC is free software; you can redistribute it and/or modify |
5243fbff | 7 | it under the terms of the GNU General Public License as published by |
038d1e19 | 8 | the Free Software Foundation; either version 3, or (at your option) |
5243fbff | 9 | any later version. |
10 | ||
187b36cf | 11 | GCC is distributed in the hope that it will be useful, |
5243fbff | 12 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | GNU General Public License for more details. | |
15 | ||
16 | You should have received a copy of the GNU General Public License | |
038d1e19 | 17 | along with GCC; see the file COPYING3. If not see |
18 | <http://www.gnu.org/licenses/>. */ | |
5243fbff | 19 | |
5243fbff | 20 | #include "config.h" |
6c82faf1 | 21 | #include "system.h" |
805e22b2 | 22 | #include "coretypes.h" |
9ef16211 | 23 | #include "backend.h" |
d040a5b0 | 24 | #include "cfghooks.h" |
7720c877 | 25 | #include "tree.h" |
30a86690 | 26 | #include "stringpool.h" |
27 | #include "attribs.h" | |
9ef16211 | 28 | #include "rtl.h" |
29 | #include "df.h" | |
30 | #include "alias.h" | |
b20a8bb4 | 31 | #include "fold-const.h" |
9ed99284 | 32 | #include "calls.h" |
33 | #include "stor-layout.h" | |
34 | #include "varasm.h" | |
5243fbff | 35 | #include "regs.h" |
5243fbff | 36 | #include "insn-config.h" |
37 | #include "conditions.h" | |
5243fbff | 38 | #include "output.h" |
39 | #include "insn-attr.h" | |
e6b2f841 | 40 | #include "recog.h" |
0b205f4c | 41 | #include "diagnostic-core.h" |
d53441c8 | 42 | #include "flags.h" |
d53441c8 | 43 | #include "expmed.h" |
44 | #include "dojump.h" | |
45 | #include "explow.h" | |
ad7b10a2 | 46 | #include "memmodel.h" |
d53441c8 | 47 | #include "emit-rtl.h" |
48 | #include "stmt.h" | |
9ce40aae | 49 | #include "expr.h" |
50 | #include "reload.h" | |
f2fa409d | 51 | #include "tm_p.h" |
a767736d | 52 | #include "target.h" |
01d15dc5 | 53 | #include "debug.h" |
94ea8568 | 54 | #include "cfgrtl.h" |
55 | #include "cfganal.h" | |
56 | #include "lcm.h" | |
57 | #include "cfgbuild.h" | |
58 | #include "cfgcleanup.h" | |
15b9ea10 | 59 | /* ??? Need to add a dependency between m68k.o and sched-int.h. */ |
60 | #include "sched-int.h" | |
61 | #include "insn-codes.h" | |
fba5dd52 | 62 | #include "opts.h" |
5bb27b7c | 63 | #include "optabs.h" |
f7715905 | 64 | #include "builtins.h" |
d8ca5b53 | 65 | #include "rtl-iter.h" |
5243fbff | 66 | |
0c71fb4f | 67 | /* This file should be included last. */ |
4b498588 | 68 | #include "target-def.h" |
69 | ||
69d0a6ee | 70 | enum reg_class regno_reg_class[] = |
71 | { | |
72 | DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS, | |
73 | DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS, | |
74 | ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, | |
75 | ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, | |
76 | FP_REGS, FP_REGS, FP_REGS, FP_REGS, | |
77 | FP_REGS, FP_REGS, FP_REGS, FP_REGS, | |
78 | ADDR_REGS | |
79 | }; | |
80 | ||
81 | ||
9c47decb | 82 | /* The minimum number of integer registers that we want to save with the |
83 | movem instruction. Using two movel instructions instead of a single | |
84 | moveml is about 15% faster for the 68020 and 68030 at no expense in | |
85 | code size. */ | |
86 | #define MIN_MOVEM_REGS 3 | |
87 | ||
88 | /* The minimum number of floating point registers that we want to save | |
89 | with the fmovem instruction. */ | |
90 | #define MIN_FMOVEM_REGS 1 | |
91 | ||
1d60d981 | 92 | /* Structure describing stack frame layout. */ |
d02588f4 | 93 | struct m68k_frame |
94 | { | |
95 | /* Stack pointer to frame pointer offset. */ | |
7366b851 | 96 | HOST_WIDE_INT offset; |
d02588f4 | 97 | |
98 | /* Offset of FPU registers. */ | |
99 | HOST_WIDE_INT foffset; | |
100 | ||
101 | /* Frame size in bytes (rounded up). */ | |
7366b851 | 102 | HOST_WIDE_INT size; |
d02588f4 | 103 | |
104 | /* Data and address register. */ | |
7366b851 | 105 | int reg_no; |
106 | unsigned int reg_mask; | |
d02588f4 | 107 | |
108 | /* FPU registers. */ | |
7366b851 | 109 | int fpu_no; |
110 | unsigned int fpu_mask; | |
d02588f4 | 111 | |
112 | /* Offsets relative to ARG_POINTER. */ | |
7366b851 | 113 | HOST_WIDE_INT frame_pointer_offset; |
114 | HOST_WIDE_INT stack_pointer_offset; | |
d02588f4 | 115 | |
116 | /* Function which the above information refers to. */ | |
117 | int funcdef_no; | |
7366b851 | 118 | }; |
119 | ||
d02588f4 | 120 | /* Current frame information calculated by m68k_compute_frame_layout(). */ |
121 | static struct m68k_frame current_frame; | |
122 | ||
b97b0687 | 123 | /* Structure describing an m68k address. |
124 | ||
125 | If CODE is UNKNOWN, the address is BASE + INDEX * SCALE + OFFSET, | |
126 | with null fields evaluating to 0. Here: | |
127 | ||
128 | - BASE satisfies m68k_legitimate_base_reg_p | |
129 | - INDEX satisfies m68k_legitimate_index_reg_p | |
130 | - OFFSET satisfies m68k_legitimate_constant_address_p | |
131 | ||
132 | INDEX is either HImode or SImode. The other fields are SImode. | |
133 | ||
134 | If CODE is PRE_DEC, the address is -(BASE). If CODE is POST_INC, | |
135 | the address is (BASE)+. */ | |
136 | struct m68k_address { | |
137 | enum rtx_code code; | |
138 | rtx base; | |
139 | rtx index; | |
140 | rtx offset; | |
141 | int scale; | |
142 | }; | |
143 | ||
99f52c2b | 144 | static int m68k_sched_adjust_cost (rtx_insn *, int, rtx_insn *, int, |
145 | unsigned int); | |
3c904dda | 146 | static int m68k_sched_issue_rate (void); |
18282db0 | 147 | static int m68k_sched_variable_issue (FILE *, int, rtx_insn *, int); |
15b9ea10 | 148 | static void m68k_sched_md_init_global (FILE *, int, int); |
149 | static void m68k_sched_md_finish_global (FILE *, int); | |
150 | static void m68k_sched_md_init (FILE *, int, int); | |
151 | static void m68k_sched_dfa_pre_advance_cycle (void); | |
152 | static void m68k_sched_dfa_post_advance_cycle (void); | |
3c904dda | 153 | static int m68k_sched_first_cycle_multipass_dfa_lookahead (void); |
15b9ea10 | 154 | |
cd90919d | 155 | static bool m68k_can_eliminate (const int, const int); |
b2d7ede1 | 156 | static void m68k_conditional_register_usage (void); |
3754d046 | 157 | static bool m68k_legitimate_address_p (machine_mode, rtx, bool); |
4c834714 | 158 | static void m68k_option_override (void); |
c2e7013d | 159 | static void m68k_override_options_after_change (void); |
821960c7 | 160 | static rtx find_addr_reg (rtx); |
161 | static const char *singlemove_string (rtx *); | |
821960c7 | 162 | static void m68k_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, |
163 | HOST_WIDE_INT, tree); | |
c8c47ddf | 164 | static rtx m68k_struct_value_rtx (tree, int); |
7366b851 | 165 | static tree m68k_handle_fndecl_attribute (tree *node, tree name, |
166 | tree args, int flags, | |
167 | bool *no_add_attrs); | |
d02588f4 | 168 | static void m68k_compute_frame_layout (void); |
7366b851 | 169 | static bool m68k_save_reg (unsigned int regno, bool interrupt_handler); |
33fb08b8 | 170 | static bool m68k_ok_for_sibcall_p (tree, tree); |
869bde6b | 171 | static bool m68k_tls_symbol_p (rtx); |
3754d046 | 172 | static rtx m68k_legitimize_address (rtx, rtx, machine_mode); |
5ae4887d | 173 | static bool m68k_rtx_costs (rtx, machine_mode, int, int, int *, bool); |
159cec75 | 174 | #if M68K_HONOR_TARGET_STRICT_ALIGNMENT |
33be53f9 | 175 | static bool m68k_return_in_memory (const_tree, const_tree); |
159cec75 | 176 | #endif |
869bde6b | 177 | static void m68k_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED; |
61fafd77 | 178 | static void m68k_trampoline_init (rtx, tree, rtx); |
f5bc28da | 179 | static int m68k_return_pops_args (tree, tree, int); |
1a1a6f69 | 180 | static rtx m68k_delegitimize_address (rtx); |
3754d046 | 181 | static void m68k_function_arg_advance (cumulative_args_t, machine_mode, |
385c19d1 | 182 | const_tree, bool); |
3754d046 | 183 | static rtx m68k_function_arg (cumulative_args_t, machine_mode, |
385c19d1 | 184 | const_tree, bool); |
3754d046 | 185 | static bool m68k_cannot_force_const_mem (machine_mode mode, rtx x); |
27f5e69f | 186 | static bool m68k_output_addr_const_extra (FILE *, rtx); |
5bb27b7c | 187 | static void m68k_init_sync_libfuncs (void) ATTRIBUTE_UNUSED; |
41af789c | 188 | static enum flt_eval_method |
189 | m68k_excess_precision (enum excess_precision_type); | |
b395382f | 190 | static bool m68k_hard_regno_mode_ok (unsigned int, machine_mode); |
5243fbff | 191 | \f |
a767736d | 192 | /* Initialize the GCC target structure. */ |
58356836 | 193 | |
194 | #if INT_OP_GROUP == INT_OP_DOT_WORD | |
195 | #undef TARGET_ASM_ALIGNED_HI_OP | |
196 | #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t" | |
197 | #endif | |
198 | ||
199 | #if INT_OP_GROUP == INT_OP_NO_DOT | |
200 | #undef TARGET_ASM_BYTE_OP | |
201 | #define TARGET_ASM_BYTE_OP "\tbyte\t" | |
202 | #undef TARGET_ASM_ALIGNED_HI_OP | |
203 | #define TARGET_ASM_ALIGNED_HI_OP "\tshort\t" | |
204 | #undef TARGET_ASM_ALIGNED_SI_OP | |
205 | #define TARGET_ASM_ALIGNED_SI_OP "\tlong\t" | |
206 | #endif | |
207 | ||
208 | #if INT_OP_GROUP == INT_OP_DC | |
209 | #undef TARGET_ASM_BYTE_OP | |
210 | #define TARGET_ASM_BYTE_OP "\tdc.b\t" | |
211 | #undef TARGET_ASM_ALIGNED_HI_OP | |
212 | #define TARGET_ASM_ALIGNED_HI_OP "\tdc.w\t" | |
213 | #undef TARGET_ASM_ALIGNED_SI_OP | |
214 | #define TARGET_ASM_ALIGNED_SI_OP "\tdc.l\t" | |
215 | #endif | |
216 | ||
217 | #undef TARGET_ASM_UNALIGNED_HI_OP | |
218 | #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP | |
219 | #undef TARGET_ASM_UNALIGNED_SI_OP | |
220 | #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP | |
221 | ||
6988553d | 222 | #undef TARGET_ASM_OUTPUT_MI_THUNK |
223 | #define TARGET_ASM_OUTPUT_MI_THUNK m68k_output_mi_thunk | |
b712bf34 | 224 | #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK |
a9f1838b | 225 | #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true |
6988553d | 226 | |
92c473b8 | 227 | #undef TARGET_ASM_FILE_START_APP_OFF |
228 | #define TARGET_ASM_FILE_START_APP_OFF true | |
229 | ||
41e3a0c7 | 230 | #undef TARGET_LEGITIMIZE_ADDRESS |
231 | #define TARGET_LEGITIMIZE_ADDRESS m68k_legitimize_address | |
232 | ||
15b9ea10 | 233 | #undef TARGET_SCHED_ADJUST_COST |
234 | #define TARGET_SCHED_ADJUST_COST m68k_sched_adjust_cost | |
235 | ||
3c904dda | 236 | #undef TARGET_SCHED_ISSUE_RATE |
237 | #define TARGET_SCHED_ISSUE_RATE m68k_sched_issue_rate | |
238 | ||
15b9ea10 | 239 | #undef TARGET_SCHED_VARIABLE_ISSUE |
240 | #define TARGET_SCHED_VARIABLE_ISSUE m68k_sched_variable_issue | |
241 | ||
242 | #undef TARGET_SCHED_INIT_GLOBAL | |
243 | #define TARGET_SCHED_INIT_GLOBAL m68k_sched_md_init_global | |
244 | ||
245 | #undef TARGET_SCHED_FINISH_GLOBAL | |
246 | #define TARGET_SCHED_FINISH_GLOBAL m68k_sched_md_finish_global | |
247 | ||
248 | #undef TARGET_SCHED_INIT | |
249 | #define TARGET_SCHED_INIT m68k_sched_md_init | |
250 | ||
251 | #undef TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE | |
252 | #define TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE m68k_sched_dfa_pre_advance_cycle | |
253 | ||
254 | #undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE | |
255 | #define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE m68k_sched_dfa_post_advance_cycle | |
256 | ||
3c904dda | 257 | #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD |
258 | #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \ | |
259 | m68k_sched_first_cycle_multipass_dfa_lookahead | |
260 | ||
4c834714 | 261 | #undef TARGET_OPTION_OVERRIDE |
262 | #define TARGET_OPTION_OVERRIDE m68k_option_override | |
263 | ||
c2e7013d | 264 | #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE |
265 | #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m68k_override_options_after_change | |
266 | ||
fab7adbf | 267 | #undef TARGET_RTX_COSTS |
268 | #define TARGET_RTX_COSTS m68k_rtx_costs | |
269 | ||
7366b851 | 270 | #undef TARGET_ATTRIBUTE_TABLE |
271 | #define TARGET_ATTRIBUTE_TABLE m68k_attribute_table | |
272 | ||
c8c47ddf | 273 | #undef TARGET_PROMOTE_PROTOTYPES |
fb80456a | 274 | #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true |
c8c47ddf | 275 | |
276 | #undef TARGET_STRUCT_VALUE_RTX | |
277 | #define TARGET_STRUCT_VALUE_RTX m68k_struct_value_rtx | |
278 | ||
e0ab7256 | 279 | #undef TARGET_CANNOT_FORCE_CONST_MEM |
7d7d7bd2 | 280 | #define TARGET_CANNOT_FORCE_CONST_MEM m68k_cannot_force_const_mem |
e0ab7256 | 281 | |
33fb08b8 | 282 | #undef TARGET_FUNCTION_OK_FOR_SIBCALL |
283 | #define TARGET_FUNCTION_OK_FOR_SIBCALL m68k_ok_for_sibcall_p | |
284 | ||
159cec75 | 285 | #if M68K_HONOR_TARGET_STRICT_ALIGNMENT |
286 | #undef TARGET_RETURN_IN_MEMORY | |
287 | #define TARGET_RETURN_IN_MEMORY m68k_return_in_memory | |
288 | #endif | |
289 | ||
869bde6b | 290 | #ifdef HAVE_AS_TLS |
291 | #undef TARGET_HAVE_TLS | |
292 | #define TARGET_HAVE_TLS (true) | |
293 | ||
294 | #undef TARGET_ASM_OUTPUT_DWARF_DTPREL | |
295 | #define TARGET_ASM_OUTPUT_DWARF_DTPREL m68k_output_dwarf_dtprel | |
296 | #endif | |
297 | ||
e46fbef5 | 298 | #undef TARGET_LRA_P |
299 | #define TARGET_LRA_P hook_bool_void_false | |
300 | ||
fd50b071 | 301 | #undef TARGET_LEGITIMATE_ADDRESS_P |
302 | #define TARGET_LEGITIMATE_ADDRESS_P m68k_legitimate_address_p | |
303 | ||
cd90919d | 304 | #undef TARGET_CAN_ELIMINATE |
305 | #define TARGET_CAN_ELIMINATE m68k_can_eliminate | |
306 | ||
b2d7ede1 | 307 | #undef TARGET_CONDITIONAL_REGISTER_USAGE |
308 | #define TARGET_CONDITIONAL_REGISTER_USAGE m68k_conditional_register_usage | |
309 | ||
61fafd77 | 310 | #undef TARGET_TRAMPOLINE_INIT |
311 | #define TARGET_TRAMPOLINE_INIT m68k_trampoline_init | |
312 | ||
f5bc28da | 313 | #undef TARGET_RETURN_POPS_ARGS |
314 | #define TARGET_RETURN_POPS_ARGS m68k_return_pops_args | |
315 | ||
1a1a6f69 | 316 | #undef TARGET_DELEGITIMIZE_ADDRESS |
317 | #define TARGET_DELEGITIMIZE_ADDRESS m68k_delegitimize_address | |
318 | ||
385c19d1 | 319 | #undef TARGET_FUNCTION_ARG |
320 | #define TARGET_FUNCTION_ARG m68k_function_arg | |
321 | ||
322 | #undef TARGET_FUNCTION_ARG_ADVANCE | |
323 | #define TARGET_FUNCTION_ARG_ADVANCE m68k_function_arg_advance | |
324 | ||
ca316360 | 325 | #undef TARGET_LEGITIMATE_CONSTANT_P |
326 | #define TARGET_LEGITIMATE_CONSTANT_P m68k_legitimate_constant_p | |
327 | ||
27f5e69f | 328 | #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA |
329 | #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA m68k_output_addr_const_extra | |
330 | ||
41af789c | 331 | #undef TARGET_C_EXCESS_PRECISION |
332 | #define TARGET_C_EXCESS_PRECISION m68k_excess_precision | |
333 | ||
2a967206 | 334 | /* The value stored by TAS. */ |
335 | #undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL | |
336 | #define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 128 | |
337 | ||
b395382f | 338 | #undef TARGET_HARD_REGNO_MODE_OK |
339 | #define TARGET_HARD_REGNO_MODE_OK m68k_hard_regno_mode_ok | |
340 | ||
7366b851 | 341 | static const struct attribute_spec m68k_attribute_table[] = |
342 | { | |
ac86af5d | 343 | /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler, |
344 | affects_type_identity } */ | |
345 | { "interrupt", 0, 0, true, false, false, m68k_handle_fndecl_attribute, | |
346 | false }, | |
347 | { "interrupt_handler", 0, 0, true, false, false, | |
348 | m68k_handle_fndecl_attribute, false }, | |
349 | { "interrupt_thread", 0, 0, true, false, false, | |
350 | m68k_handle_fndecl_attribute, false }, | |
351 | { NULL, 0, 0, false, false, false, NULL, false } | |
7366b851 | 352 | }; |
353 | ||
57e4bbfb | 354 | struct gcc_target targetm = TARGET_INITIALIZER; |
a767736d | 355 | \f |
8aed3cb3 | 356 | /* Base flags for 68k ISAs. */ |
357 | #define FL_FOR_isa_00 FL_ISA_68000 | |
358 | #define FL_FOR_isa_10 (FL_FOR_isa_00 | FL_ISA_68010) | |
359 | /* FL_68881 controls the default setting of -m68881. gcc has traditionally | |
360 | generated 68881 code for 68020 and 68030 targets unless explicitly told | |
361 | not to. */ | |
362 | #define FL_FOR_isa_20 (FL_FOR_isa_10 | FL_ISA_68020 \ | |
5980279a | 363 | | FL_BITFIELD | FL_68881 | FL_CAS) |
8aed3cb3 | 364 | #define FL_FOR_isa_40 (FL_FOR_isa_20 | FL_ISA_68040) |
365 | #define FL_FOR_isa_cpu32 (FL_FOR_isa_10 | FL_ISA_68020) | |
366 | ||
367 | /* Base flags for ColdFire ISAs. */ | |
368 | #define FL_FOR_isa_a (FL_COLDFIRE | FL_ISA_A) | |
369 | #define FL_FOR_isa_aplus (FL_FOR_isa_a | FL_ISA_APLUS | FL_CF_USP) | |
370 | /* Note ISA_B doesn't necessarily include USP (user stack pointer) support. */ | |
371 | #define FL_FOR_isa_b (FL_FOR_isa_a | FL_ISA_B | FL_CF_HWDIV) | |
7f2330d5 | 372 | /* ISA_C is not upwardly compatible with ISA_B. */ |
12877e83 | 373 | #define FL_FOR_isa_c (FL_FOR_isa_a | FL_ISA_C | FL_CF_USP) |
8aed3cb3 | 374 | |
375 | enum m68k_isa | |
376 | { | |
377 | /* Traditional 68000 instruction sets. */ | |
378 | isa_00, | |
379 | isa_10, | |
380 | isa_20, | |
381 | isa_40, | |
382 | isa_cpu32, | |
383 | /* ColdFire instruction set variants. */ | |
384 | isa_a, | |
385 | isa_aplus, | |
386 | isa_b, | |
387 | isa_c, | |
388 | isa_max | |
389 | }; | |
390 | ||
391 | /* Information about one of the -march, -mcpu or -mtune arguments. */ | |
392 | struct m68k_target_selection | |
393 | { | |
394 | /* The argument being described. */ | |
395 | const char *name; | |
396 | ||
397 | /* For -mcpu, this is the device selected by the option. | |
398 | For -mtune and -march, it is a representative device | |
399 | for the microarchitecture or ISA respectively. */ | |
400 | enum target_device device; | |
401 | ||
402 | /* The M68K_DEVICE fields associated with DEVICE. See the comment | |
403 | in m68k-devices.def for details. FAMILY is only valid for -mcpu. */ | |
404 | const char *family; | |
405 | enum uarch_type microarch; | |
406 | enum m68k_isa isa; | |
407 | unsigned long flags; | |
408 | }; | |
409 | ||
410 | /* A list of all devices in m68k-devices.def. Used for -mcpu selection. */ | |
411 | static const struct m68k_target_selection all_devices[] = | |
412 | { | |
413 | #define M68K_DEVICE(NAME,ENUM_VALUE,FAMILY,MULTILIB,MICROARCH,ISA,FLAGS) \ | |
414 | { NAME, ENUM_VALUE, FAMILY, u##MICROARCH, ISA, FLAGS | FL_FOR_##ISA }, | |
415 | #include "m68k-devices.def" | |
416 | #undef M68K_DEVICE | |
417 | { NULL, unk_device, NULL, unk_arch, isa_max, 0 } | |
418 | }; | |
419 | ||
420 | /* A list of all ISAs, mapping each one to a representative device. | |
421 | Used for -march selection. */ | |
422 | static const struct m68k_target_selection all_isas[] = | |
423 | { | |
d078aadd | 424 | #define M68K_ISA(NAME,DEVICE,MICROARCH,ISA,FLAGS) \ |
425 | { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS }, | |
426 | #include "m68k-isas.def" | |
427 | #undef M68K_ISA | |
8aed3cb3 | 428 | { NULL, unk_device, NULL, unk_arch, isa_max, 0 } |
429 | }; | |
430 | ||
431 | /* A list of all microarchitectures, mapping each one to a representative | |
432 | device. Used for -mtune selection. */ | |
433 | static const struct m68k_target_selection all_microarchs[] = | |
434 | { | |
d078aadd | 435 | #define M68K_MICROARCH(NAME,DEVICE,MICROARCH,ISA,FLAGS) \ |
436 | { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS }, | |
437 | #include "m68k-microarchs.def" | |
438 | #undef M68K_MICROARCH | |
8aed3cb3 | 439 | { NULL, unk_device, NULL, unk_arch, isa_max, 0 } |
440 | }; | |
441 | \f | |
442 | /* The entries associated with the -mcpu, -march and -mtune settings, | |
443 | or null for options that have not been used. */ | |
444 | const struct m68k_target_selection *m68k_cpu_entry; | |
445 | const struct m68k_target_selection *m68k_arch_entry; | |
446 | const struct m68k_target_selection *m68k_tune_entry; | |
447 | ||
448 | /* Which CPU we are generating code for. */ | |
449 | enum target_device m68k_cpu; | |
450 | ||
451 | /* Which microarchitecture to tune for. */ | |
452 | enum uarch_type m68k_tune; | |
453 | ||
454 | /* Which FPU to use. */ | |
455 | enum fpu_type m68k_fpu; | |
fbdd0521 | 456 | |
8aed3cb3 | 457 | /* The set of FL_* flags that apply to the target processor. */ |
458 | unsigned int m68k_cpu_flags; | |
dd7218bb | 459 | |
53457e36 | 460 | /* The set of FL_* flags that apply to the processor to be tuned for. */ |
461 | unsigned int m68k_tune_flags; | |
462 | ||
dd7218bb | 463 | /* Asm templates for calling or jumping to an arbitrary symbolic address, |
464 | or NULL if such calls or jumps are not supported. The address is held | |
465 | in operand 0. */ | |
466 | const char *m68k_symbolic_call; | |
467 | const char *m68k_symbolic_jump; | |
1d86aeab | 468 | |
469 | /* Enum variable that corresponds to m68k_symbolic_call values. */ | |
470 | enum M68K_SYMBOLIC_CALL m68k_symbolic_call_var; | |
471 | ||
8aed3cb3 | 472 | \f |
4c834714 | 473 | /* Implement TARGET_OPTION_OVERRIDE. */ |
d0c0c9ac | 474 | |
4c834714 | 475 | static void |
476 | m68k_option_override (void) | |
d0c0c9ac | 477 | { |
8aed3cb3 | 478 | const struct m68k_target_selection *entry; |
479 | unsigned long target_mask; | |
480 | ||
d078aadd | 481 | if (global_options_set.x_m68k_arch_option) |
482 | m68k_arch_entry = &all_isas[m68k_arch_option]; | |
483 | ||
484 | if (global_options_set.x_m68k_cpu_option) | |
485 | m68k_cpu_entry = &all_devices[(int) m68k_cpu_option]; | |
486 | ||
487 | if (global_options_set.x_m68k_tune_option) | |
488 | m68k_tune_entry = &all_microarchs[(int) m68k_tune_option]; | |
489 | ||
8aed3cb3 | 490 | /* User can choose: |
491 | ||
492 | -mcpu= | |
493 | -march= | |
494 | -mtune= | |
495 | ||
496 | -march=ARCH should generate code that runs any processor | |
497 | implementing architecture ARCH. -mcpu=CPU should override -march | |
498 | and should generate code that runs on processor CPU, making free | |
499 | use of any instructions that CPU understands. -mtune=UARCH applies | |
f2b32076 | 500 | on top of -mcpu or -march and optimizes the code for UARCH. It does |
8aed3cb3 | 501 | not change the target architecture. */ |
502 | if (m68k_cpu_entry) | |
503 | { | |
504 | /* Complain if the -march setting is for a different microarchitecture, | |
505 | or includes flags that the -mcpu setting doesn't. */ | |
506 | if (m68k_arch_entry | |
507 | && (m68k_arch_entry->microarch != m68k_cpu_entry->microarch | |
508 | || (m68k_arch_entry->flags & ~m68k_cpu_entry->flags) != 0)) | |
509 | warning (0, "-mcpu=%s conflicts with -march=%s", | |
510 | m68k_cpu_entry->name, m68k_arch_entry->name); | |
511 | ||
512 | entry = m68k_cpu_entry; | |
513 | } | |
514 | else | |
515 | entry = m68k_arch_entry; | |
516 | ||
517 | if (!entry) | |
518 | entry = all_devices + TARGET_CPU_DEFAULT; | |
519 | ||
520 | m68k_cpu_flags = entry->flags; | |
521 | ||
522 | /* Use the architecture setting to derive default values for | |
523 | certain flags. */ | |
524 | target_mask = 0; | |
bf849667 | 525 | |
526 | /* ColdFire is lenient about alignment. */ | |
527 | if (!TARGET_COLDFIRE) | |
528 | target_mask |= MASK_STRICT_ALIGNMENT; | |
529 | ||
8aed3cb3 | 530 | if ((m68k_cpu_flags & FL_BITFIELD) != 0) |
531 | target_mask |= MASK_BITFIELD; | |
532 | if ((m68k_cpu_flags & FL_CF_HWDIV) != 0) | |
533 | target_mask |= MASK_CF_HWDIV; | |
534 | if ((m68k_cpu_flags & (FL_68881 | FL_CF_FPU)) != 0) | |
535 | target_mask |= MASK_HARD_FLOAT; | |
536 | target_flags |= target_mask & ~target_flags_explicit; | |
537 | ||
538 | /* Set the directly-usable versions of the -mcpu and -mtune settings. */ | |
539 | m68k_cpu = entry->device; | |
540 | if (m68k_tune_entry) | |
53457e36 | 541 | { |
542 | m68k_tune = m68k_tune_entry->microarch; | |
543 | m68k_tune_flags = m68k_tune_entry->flags; | |
544 | } | |
8aed3cb3 | 545 | #ifdef M68K_DEFAULT_TUNE |
546 | else if (!m68k_cpu_entry && !m68k_arch_entry) | |
53457e36 | 547 | { |
548 | enum target_device dev; | |
549 | dev = all_microarchs[M68K_DEFAULT_TUNE].device; | |
aafc6adb | 550 | m68k_tune_flags = all_devices[dev].flags; |
53457e36 | 551 | } |
8aed3cb3 | 552 | #endif |
553 | else | |
53457e36 | 554 | { |
555 | m68k_tune = entry->microarch; | |
556 | m68k_tune_flags = entry->flags; | |
557 | } | |
8aed3cb3 | 558 | |
559 | /* Set the type of FPU. */ | |
560 | m68k_fpu = (!TARGET_HARD_FLOAT ? FPUTYPE_NONE | |
561 | : (m68k_cpu_flags & FL_COLDFIRE) != 0 ? FPUTYPE_COLDFIRE | |
562 | : FPUTYPE_68881); | |
563 | ||
b09faf5c | 564 | /* Sanity check to ensure that msep-data and mid-sahred-library are not |
565 | * both specified together. Doing so simply doesn't make sense. | |
566 | */ | |
567 | if (TARGET_SEP_DATA && TARGET_ID_SHARED_LIBRARY) | |
568 | error ("cannot specify both -msep-data and -mid-shared-library"); | |
569 | ||
570 | /* If we're generating code for a separate A5 relative data segment, | |
571 | * we've got to enable -fPIC as well. This might be relaxable to | |
572 | * -fpic but it hasn't been tested properly. | |
573 | */ | |
574 | if (TARGET_SEP_DATA || TARGET_ID_SHARED_LIBRARY) | |
575 | flag_pic = 2; | |
576 | ||
f8200b2a | 577 | /* -mpcrel -fPIC uses 32-bit pc-relative displacements. Raise an |
578 | error if the target does not support them. */ | |
579 | if (TARGET_PCREL && !TARGET_68020 && flag_pic == 2) | |
580 | error ("-mpcrel -fPIC is not currently supported on selected cpu"); | |
18fce2f0 | 581 | |
582 | /* ??? A historic way of turning on pic, or is this intended to | |
583 | be an embedded thing that doesn't have the same name binding | |
584 | significance that it does on hosted ELF systems? */ | |
585 | if (TARGET_PCREL && flag_pic == 0) | |
586 | flag_pic = 1; | |
587 | ||
dd7218bb | 588 | if (!flag_pic) |
589 | { | |
1d86aeab | 590 | m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_JSR; |
591 | ||
dd7218bb | 592 | m68k_symbolic_jump = "jra %a0"; |
dd7218bb | 593 | } |
594 | else if (TARGET_ID_SHARED_LIBRARY) | |
595 | /* All addresses must be loaded from the GOT. */ | |
596 | ; | |
7f2330d5 | 597 | else if (TARGET_68020 || TARGET_ISAB || TARGET_ISAC) |
dd7218bb | 598 | { |
599 | if (TARGET_PCREL) | |
1d86aeab | 600 | m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_C; |
7f2330d5 | 601 | else |
1d86aeab | 602 | m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_P; |
603 | ||
7f2330d5 | 604 | if (TARGET_ISAC) |
605 | /* No unconditional long branch */; | |
606 | else if (TARGET_PCREL) | |
a1f3c748 | 607 | m68k_symbolic_jump = "bra%.l %c0"; |
dd7218bb | 608 | else |
a1f3c748 | 609 | m68k_symbolic_jump = "bra%.l %p0"; |
dd7218bb | 610 | /* Turn off function cse if we are doing PIC. We always want |
611 | function call to be done as `bsr foo@PLTPC'. */ | |
612 | /* ??? It's traditional to do this for -mpcrel too, but it isn't | |
613 | clear how intentional that is. */ | |
614 | flag_no_function_cse = 1; | |
615 | } | |
18fce2f0 | 616 | |
1d86aeab | 617 | switch (m68k_symbolic_call_var) |
618 | { | |
619 | case M68K_SYMBOLIC_CALL_JSR: | |
1d86aeab | 620 | m68k_symbolic_call = "jsr %a0"; |
1d86aeab | 621 | break; |
622 | ||
623 | case M68K_SYMBOLIC_CALL_BSR_C: | |
a1f3c748 | 624 | m68k_symbolic_call = "bsr%.l %c0"; |
1d86aeab | 625 | break; |
626 | ||
627 | case M68K_SYMBOLIC_CALL_BSR_P: | |
a1f3c748 | 628 | m68k_symbolic_call = "bsr%.l %p0"; |
1d86aeab | 629 | break; |
630 | ||
631 | case M68K_SYMBOLIC_CALL_NONE: | |
632 | gcc_assert (m68k_symbolic_call == NULL); | |
633 | break; | |
634 | ||
635 | default: | |
636 | gcc_unreachable (); | |
637 | } | |
638 | ||
2e75116d | 639 | #ifndef ASM_OUTPUT_ALIGN_WITH_NOP |
640 | if (align_labels > 2) | |
641 | { | |
642 | warning (0, "-falign-labels=%d is not supported", align_labels); | |
643 | align_labels = 0; | |
644 | } | |
645 | if (align_loops > 2) | |
646 | { | |
647 | warning (0, "-falign-loops=%d is not supported", align_loops); | |
648 | align_loops = 0; | |
649 | } | |
650 | #endif | |
651 | ||
7a8fd205 | 652 | if ((opt_fstack_limit_symbol_arg != NULL || opt_fstack_limit_register_no >= 0) |
653 | && !TARGET_68020) | |
d229c219 | 654 | { |
655 | warning (0, "-fstack-limit- options are not supported on this cpu"); | |
7a8fd205 | 656 | opt_fstack_limit_symbol_arg = NULL; |
657 | opt_fstack_limit_register_no = -1; | |
d229c219 | 658 | } |
659 | ||
18fce2f0 | 660 | SUBTARGET_OVERRIDE_OPTIONS; |
1d86aeab | 661 | |
662 | /* Setup scheduling options. */ | |
2ccdedfd | 663 | if (TUNE_CFV1) |
664 | m68k_sched_cpu = CPU_CFV1; | |
665 | else if (TUNE_CFV2) | |
666 | m68k_sched_cpu = CPU_CFV2; | |
667 | else if (TUNE_CFV3) | |
668 | m68k_sched_cpu = CPU_CFV3; | |
3c904dda | 669 | else if (TUNE_CFV4) |
670 | m68k_sched_cpu = CPU_CFV4; | |
1d86aeab | 671 | else |
672 | { | |
673 | m68k_sched_cpu = CPU_UNKNOWN; | |
674 | flag_schedule_insns = 0; | |
675 | flag_schedule_insns_after_reload = 0; | |
676 | flag_modulo_sched = 0; | |
2bf2188d | 677 | flag_live_range_shrinkage = 0; |
1d86aeab | 678 | } |
2ccdedfd | 679 | |
680 | if (m68k_sched_cpu != CPU_UNKNOWN) | |
681 | { | |
682 | if ((m68k_cpu_flags & (FL_CF_EMAC | FL_CF_EMAC_B)) != 0) | |
683 | m68k_sched_mac = MAC_CF_EMAC; | |
684 | else if ((m68k_cpu_flags & FL_CF_MAC) != 0) | |
685 | m68k_sched_mac = MAC_CF_MAC; | |
686 | else | |
687 | m68k_sched_mac = MAC_NO; | |
688 | } | |
d0c0c9ac | 689 | } |
46e5dde5 | 690 | |
c2e7013d | 691 | /* Implement TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE. */ |
692 | ||
693 | static void | |
694 | m68k_override_options_after_change (void) | |
695 | { | |
696 | if (m68k_sched_cpu == CPU_UNKNOWN) | |
697 | { | |
698 | flag_schedule_insns = 0; | |
699 | flag_schedule_insns_after_reload = 0; | |
700 | flag_modulo_sched = 0; | |
2bf2188d | 701 | flag_live_range_shrinkage = 0; |
c2e7013d | 702 | } |
703 | } | |
704 | ||
46e5dde5 | 705 | /* Generate a macro of the form __mPREFIX_cpu_NAME, where PREFIX is the |
706 | given argument and NAME is the argument passed to -mcpu. Return NULL | |
707 | if -mcpu was not passed. */ | |
708 | ||
709 | const char * | |
710 | m68k_cpp_cpu_ident (const char *prefix) | |
711 | { | |
712 | if (!m68k_cpu_entry) | |
713 | return NULL; | |
714 | return concat ("__m", prefix, "_cpu_", m68k_cpu_entry->name, NULL); | |
715 | } | |
716 | ||
717 | /* Generate a macro of the form __mPREFIX_family_NAME, where PREFIX is the | |
718 | given argument and NAME is the name of the representative device for | |
719 | the -mcpu argument's family. Return NULL if -mcpu was not passed. */ | |
720 | ||
721 | const char * | |
722 | m68k_cpp_cpu_family (const char *prefix) | |
723 | { | |
724 | if (!m68k_cpu_entry) | |
725 | return NULL; | |
726 | return concat ("__m", prefix, "_family_", m68k_cpu_entry->family, NULL); | |
727 | } | |
5243fbff | 728 | \f |
29968436 | 729 | /* Return m68k_fk_interrupt_handler if FUNC has an "interrupt" or |
730 | "interrupt_handler" attribute and interrupt_thread if FUNC has an | |
731 | "interrupt_thread" attribute. Otherwise, return | |
732 | m68k_fk_normal_function. */ | |
f9a44575 | 733 | |
734 | enum m68k_function_kind | |
735 | m68k_get_function_kind (tree func) | |
7366b851 | 736 | { |
737 | tree a; | |
738 | ||
e2809c90 | 739 | gcc_assert (TREE_CODE (func) == FUNCTION_DECL); |
740 | ||
29968436 | 741 | a = lookup_attribute ("interrupt", DECL_ATTRIBUTES (func)); |
742 | if (a != NULL_TREE) | |
743 | return m68k_fk_interrupt_handler; | |
744 | ||
7366b851 | 745 | a = lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (func)); |
f9a44575 | 746 | if (a != NULL_TREE) |
747 | return m68k_fk_interrupt_handler; | |
748 | ||
749 | a = lookup_attribute ("interrupt_thread", DECL_ATTRIBUTES (func)); | |
750 | if (a != NULL_TREE) | |
751 | return m68k_fk_interrupt_thread; | |
752 | ||
753 | return m68k_fk_normal_function; | |
7366b851 | 754 | } |
755 | ||
756 | /* Handle an attribute requiring a FUNCTION_DECL; arguments as in | |
757 | struct attribute_spec.handler. */ | |
758 | static tree | |
759 | m68k_handle_fndecl_attribute (tree *node, tree name, | |
760 | tree args ATTRIBUTE_UNUSED, | |
761 | int flags ATTRIBUTE_UNUSED, | |
762 | bool *no_add_attrs) | |
763 | { | |
764 | if (TREE_CODE (*node) != FUNCTION_DECL) | |
765 | { | |
67a779df | 766 | warning (OPT_Wattributes, "%qE attribute only applies to functions", |
767 | name); | |
7366b851 | 768 | *no_add_attrs = true; |
769 | } | |
770 | ||
f9a44575 | 771 | if (m68k_get_function_kind (*node) != m68k_fk_normal_function) |
772 | { | |
773 | error ("multiple interrupt attributes not allowed"); | |
774 | *no_add_attrs = true; | |
775 | } | |
776 | ||
777 | if (!TARGET_FIDOA | |
778 | && !strcmp (IDENTIFIER_POINTER (name), "interrupt_thread")) | |
779 | { | |
780 | error ("interrupt_thread is available only on fido"); | |
781 | *no_add_attrs = true; | |
782 | } | |
783 | ||
7366b851 | 784 | return NULL_TREE; |
785 | } | |
2398df48 | 786 | |
787 | static void | |
d02588f4 | 788 | m68k_compute_frame_layout (void) |
2398df48 | 789 | { |
790 | int regno, saved; | |
9c47decb | 791 | unsigned int mask; |
f9a44575 | 792 | enum m68k_function_kind func_kind = |
793 | m68k_get_function_kind (current_function_decl); | |
794 | bool interrupt_handler = func_kind == m68k_fk_interrupt_handler; | |
795 | bool interrupt_thread = func_kind == m68k_fk_interrupt_thread; | |
2398df48 | 796 | |
d02588f4 | 797 | /* Only compute the frame once per function. |
798 | Don't cache information until reload has been completed. */ | |
799 | if (current_frame.funcdef_no == current_function_funcdef_no | |
800 | && reload_completed) | |
801 | return; | |
802 | ||
803 | current_frame.size = (get_frame_size () + 3) & -4; | |
2398df48 | 804 | |
9c47decb | 805 | mask = saved = 0; |
f9a44575 | 806 | |
807 | /* Interrupt thread does not need to save any register. */ | |
808 | if (!interrupt_thread) | |
809 | for (regno = 0; regno < 16; regno++) | |
810 | if (m68k_save_reg (regno, interrupt_handler)) | |
811 | { | |
812 | mask |= 1 << (regno - D0_REG); | |
813 | saved++; | |
814 | } | |
d02588f4 | 815 | current_frame.offset = saved * 4; |
816 | current_frame.reg_no = saved; | |
817 | current_frame.reg_mask = mask; | |
2398df48 | 818 | |
7a3ffbb5 | 819 | current_frame.foffset = 0; |
9c47decb | 820 | mask = saved = 0; |
58c23110 | 821 | if (TARGET_HARD_FLOAT) |
2398df48 | 822 | { |
f9a44575 | 823 | /* Interrupt thread does not need to save any register. */ |
824 | if (!interrupt_thread) | |
825 | for (regno = 16; regno < 24; regno++) | |
826 | if (m68k_save_reg (regno, interrupt_handler)) | |
827 | { | |
828 | mask |= 1 << (regno - FP0_REG); | |
829 | saved++; | |
830 | } | |
58c23110 | 831 | current_frame.foffset = saved * TARGET_FP_REG_SIZE; |
d02588f4 | 832 | current_frame.offset += current_frame.foffset; |
2398df48 | 833 | } |
7a3ffbb5 | 834 | current_frame.fpu_no = saved; |
835 | current_frame.fpu_mask = mask; | |
d02588f4 | 836 | |
837 | /* Remember what function this frame refers to. */ | |
838 | current_frame.funcdef_no = current_function_funcdef_no; | |
2398df48 | 839 | } |
840 | ||
cd90919d | 841 | /* Worker function for TARGET_CAN_ELIMINATE. */ |
842 | ||
843 | bool | |
844 | m68k_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to) | |
845 | { | |
846 | return (to == STACK_POINTER_REGNUM ? ! frame_pointer_needed : true); | |
847 | } | |
848 | ||
2398df48 | 849 | HOST_WIDE_INT |
850 | m68k_initial_elimination_offset (int from, int to) | |
851 | { | |
e905c658 | 852 | int argptr_offset; |
853 | /* The arg pointer points 8 bytes before the start of the arguments, | |
854 | as defined by FIRST_PARM_OFFSET. This makes it coincident with the | |
855 | frame pointer in most frames. */ | |
856 | argptr_offset = frame_pointer_needed ? 0 : UNITS_PER_WORD; | |
2398df48 | 857 | if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM) |
e905c658 | 858 | return argptr_offset; |
2398df48 | 859 | |
d02588f4 | 860 | m68k_compute_frame_layout (); |
2398df48 | 861 | |
84653e0f | 862 | gcc_assert (to == STACK_POINTER_REGNUM); |
863 | switch (from) | |
864 | { | |
065b42aa | 865 | case ARG_POINTER_REGNUM: |
e905c658 | 866 | return current_frame.offset + current_frame.size - argptr_offset; |
84653e0f | 867 | case FRAME_POINTER_REGNUM: |
868 | return current_frame.offset + current_frame.size; | |
869 | default: | |
870 | gcc_unreachable (); | |
871 | } | |
2398df48 | 872 | } |
873 | ||
274d52cf | 874 | /* Refer to the array `regs_ever_live' to determine which registers |
875 | to save; `regs_ever_live[I]' is nonzero if register number I | |
876 | is ever used in the function. This function is responsible for | |
877 | knowing which registers should not be saved even if used. | |
878 | Return true if we need to save REGNO. */ | |
879 | ||
7366b851 | 880 | static bool |
881 | m68k_save_reg (unsigned int regno, bool interrupt_handler) | |
0056d78e | 882 | { |
e2182937 | 883 | if (flag_pic && regno == PIC_REG) |
7284415e | 884 | { |
18d50ae6 | 885 | if (crtl->saves_all_registers) |
5e19f315 | 886 | return true; |
18d50ae6 | 887 | if (crtl->uses_pic_offset_table) |
7284415e | 888 | return true; |
d68ee78f | 889 | /* Reload may introduce constant pool references into a function |
890 | that thitherto didn't need a PIC register. Note that the test | |
891 | above will not catch that case because we will only set | |
18d50ae6 | 892 | crtl->uses_pic_offset_table when emitting |
d68ee78f | 893 | the address reloads. */ |
18d50ae6 | 894 | if (crtl->uses_const_pool) |
d68ee78f | 895 | return true; |
7284415e | 896 | } |
0056d78e | 897 | |
18d50ae6 | 898 | if (crtl->calls_eh_return) |
0056d78e | 899 | { |
900 | unsigned int i; | |
901 | for (i = 0; ; i++) | |
902 | { | |
903 | unsigned int test = EH_RETURN_DATA_REGNO (i); | |
904 | if (test == INVALID_REGNUM) | |
905 | break; | |
906 | if (test == regno) | |
7366b851 | 907 | return true; |
0056d78e | 908 | } |
909 | } | |
910 | ||
7366b851 | 911 | /* Fixed regs we never touch. */ |
912 | if (fixed_regs[regno]) | |
913 | return false; | |
914 | ||
915 | /* The frame pointer (if it is such) is handled specially. */ | |
916 | if (regno == FRAME_POINTER_REGNUM && frame_pointer_needed) | |
917 | return false; | |
918 | ||
919 | /* Interrupt handlers must also save call_used_regs | |
920 | if they are live or when calling nested functions. */ | |
921 | if (interrupt_handler) | |
065b42aa | 922 | { |
3072d30e | 923 | if (df_regs_ever_live_p (regno)) |
065b42aa | 924 | return true; |
7366b851 | 925 | |
d5bf7b64 | 926 | if (!crtl->is_leaf && call_used_regs[regno]) |
065b42aa | 927 | return true; |
928 | } | |
7366b851 | 929 | |
930 | /* Never need to save registers that aren't touched. */ | |
3072d30e | 931 | if (!df_regs_ever_live_p (regno)) |
7366b851 | 932 | return false; |
933 | ||
a58c6df6 | 934 | /* Otherwise save everything that isn't call-clobbered. */ |
7366b851 | 935 | return !call_used_regs[regno]; |
0056d78e | 936 | } |
937 | ||
9c47decb | 938 | /* Emit RTL for a MOVEM or FMOVEM instruction. BASE + OFFSET represents |
939 | the lowest memory address. COUNT is the number of registers to be | |
940 | moved, with register REGNO + I being moved if bit I of MASK is set. | |
941 | STORE_P specifies the direction of the move and ADJUST_STACK_P says | |
942 | whether or not this is pre-decrement (if STORE_P) or post-increment | |
943 | (if !STORE_P) operation. */ | |
944 | ||
9e9d6337 | 945 | static rtx_insn * |
9c47decb | 946 | m68k_emit_movem (rtx base, HOST_WIDE_INT offset, |
947 | unsigned int count, unsigned int regno, | |
948 | unsigned int mask, bool store_p, bool adjust_stack_p) | |
949 | { | |
950 | int i; | |
951 | rtx body, addr, src, operands[2]; | |
3754d046 | 952 | machine_mode mode; |
9c47decb | 953 | |
954 | body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (adjust_stack_p + count)); | |
955 | mode = reg_raw_mode[regno]; | |
956 | i = 0; | |
957 | ||
958 | if (adjust_stack_p) | |
959 | { | |
29c05e22 | 960 | src = plus_constant (Pmode, base, |
961 | (count | |
962 | * GET_MODE_SIZE (mode) | |
963 | * (HOST_WIDE_INT) (store_p ? -1 : 1))); | |
d1f9b275 | 964 | XVECEXP (body, 0, i++) = gen_rtx_SET (base, src); |
9c47decb | 965 | } |
966 | ||
967 | for (; mask != 0; mask >>= 1, regno++) | |
968 | if (mask & 1) | |
969 | { | |
29c05e22 | 970 | addr = plus_constant (Pmode, base, offset); |
9c47decb | 971 | operands[!store_p] = gen_frame_mem (mode, addr); |
972 | operands[store_p] = gen_rtx_REG (mode, regno); | |
973 | XVECEXP (body, 0, i++) | |
d1f9b275 | 974 | = gen_rtx_SET (operands[0], operands[1]); |
9c47decb | 975 | offset += GET_MODE_SIZE (mode); |
976 | } | |
977 | gcc_assert (i == XVECLEN (body, 0)); | |
978 | ||
979 | return emit_insn (body); | |
980 | } | |
981 | ||
982 | /* Make INSN a frame-related instruction. */ | |
5243fbff | 983 | |
17d9b0c3 | 984 | static void |
9e9d6337 | 985 | m68k_set_frame_related (rtx_insn *insn) |
9c47decb | 986 | { |
987 | rtx body; | |
988 | int i; | |
989 | ||
990 | RTX_FRAME_RELATED_P (insn) = 1; | |
991 | body = PATTERN (insn); | |
992 | if (GET_CODE (body) == PARALLEL) | |
993 | for (i = 0; i < XVECLEN (body, 0); i++) | |
994 | RTX_FRAME_RELATED_P (XVECEXP (body, 0, i)) = 1; | |
995 | } | |
996 | ||
997 | /* Emit RTL for the "prologue" define_expand. */ | |
998 | ||
999 | void | |
1000 | m68k_expand_prologue (void) | |
5243fbff | 1001 | { |
2398df48 | 1002 | HOST_WIDE_INT fsize_with_regs; |
e26cf4d4 | 1003 | rtx limit, src, dest; |
d02588f4 | 1004 | |
9c47decb | 1005 | m68k_compute_frame_layout (); |
d02588f4 | 1006 | |
8c0dd614 | 1007 | if (flag_stack_usage_info) |
611b3016 | 1008 | current_function_static_stack_size |
1009 | = current_frame.size + current_frame.offset; | |
1010 | ||
8f8ac140 | 1011 | /* If the stack limit is a symbol, we can check it here, |
1012 | before actually allocating the space. */ | |
18d50ae6 | 1013 | if (crtl->limit_stack |
8f8ac140 | 1014 | && GET_CODE (stack_limit_rtx) == SYMBOL_REF) |
9c47decb | 1015 | { |
29c05e22 | 1016 | limit = plus_constant (Pmode, stack_limit_rtx, current_frame.size + 4); |
ca316360 | 1017 | if (!m68k_legitimate_constant_p (Pmode, limit)) |
9c47decb | 1018 | { |
1019 | emit_move_insn (gen_rtx_REG (Pmode, D0_REG), limit); | |
1020 | limit = gen_rtx_REG (Pmode, D0_REG); | |
1021 | } | |
74f4459c | 1022 | emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode, |
1023 | stack_pointer_rtx, limit), | |
1024 | stack_pointer_rtx, limit, | |
1025 | const1_rtx)); | |
9c47decb | 1026 | } |
5243fbff | 1027 | |
09445e53 | 1028 | fsize_with_regs = current_frame.size; |
58c23110 | 1029 | if (TARGET_COLDFIRE) |
1030 | { | |
9c47decb | 1031 | /* ColdFire's move multiple instructions do not allow pre-decrement |
1032 | addressing. Add the size of movem saves to the initial stack | |
1033 | allocation instead. */ | |
1034 | if (current_frame.reg_no >= MIN_MOVEM_REGS) | |
1035 | fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode); | |
1036 | if (current_frame.fpu_no >= MIN_FMOVEM_REGS) | |
1037 | fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode); | |
58c23110 | 1038 | } |
2398df48 | 1039 | |
5243fbff | 1040 | if (frame_pointer_needed) |
1041 | { | |
9c47decb | 1042 | if (fsize_with_regs == 0 && TUNE_68040) |
5243fbff | 1043 | { |
9c47decb | 1044 | /* On the 68040, two separate moves are faster than link.w 0. */ |
1045 | dest = gen_frame_mem (Pmode, | |
1046 | gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx)); | |
1047 | m68k_set_frame_related (emit_move_insn (dest, frame_pointer_rtx)); | |
1048 | m68k_set_frame_related (emit_move_insn (frame_pointer_rtx, | |
1049 | stack_pointer_rtx)); | |
5243fbff | 1050 | } |
9c47decb | 1051 | else if (fsize_with_regs < 0x8000 || TARGET_68020) |
1052 | m68k_set_frame_related | |
1053 | (emit_insn (gen_link (frame_pointer_rtx, | |
1054 | GEN_INT (-4 - fsize_with_regs)))); | |
54576bcb | 1055 | else |
9c47decb | 1056 | { |
1057 | m68k_set_frame_related | |
1058 | (emit_insn (gen_link (frame_pointer_rtx, GEN_INT (-4)))); | |
1059 | m68k_set_frame_related | |
1060 | (emit_insn (gen_addsi3 (stack_pointer_rtx, | |
1061 | stack_pointer_rtx, | |
1062 | GEN_INT (-fsize_with_regs)))); | |
1063 | } | |
3c904dda | 1064 | |
1065 | /* If the frame pointer is needed, emit a special barrier that | |
1066 | will prevent the scheduler from moving stores to the frame | |
1067 | before the stack adjustment. */ | |
1068 | emit_insn (gen_stack_tie (stack_pointer_rtx, frame_pointer_rtx)); | |
54576bcb | 1069 | } |
9c47decb | 1070 | else if (fsize_with_regs != 0) |
1071 | m68k_set_frame_related | |
1072 | (emit_insn (gen_addsi3 (stack_pointer_rtx, | |
1073 | stack_pointer_rtx, | |
1074 | GEN_INT (-fsize_with_regs)))); | |
2398df48 | 1075 | |
7a3ffbb5 | 1076 | if (current_frame.fpu_mask) |
5243fbff | 1077 | { |
9c47decb | 1078 | gcc_assert (current_frame.fpu_no >= MIN_FMOVEM_REGS); |
58c23110 | 1079 | if (TARGET_68881) |
9c47decb | 1080 | m68k_set_frame_related |
1081 | (m68k_emit_movem (stack_pointer_rtx, | |
1082 | current_frame.fpu_no * -GET_MODE_SIZE (XFmode), | |
1083 | current_frame.fpu_no, FP0_REG, | |
1084 | current_frame.fpu_mask, true, true)); | |
58c23110 | 1085 | else |
1086 | { | |
1087 | int offset; | |
1088 | ||
9c47decb | 1089 | /* If we're using moveml to save the integer registers, |
1090 | the stack pointer will point to the bottom of the moveml | |
1091 | save area. Find the stack offset of the first FP register. */ | |
1092 | if (current_frame.reg_no < MIN_MOVEM_REGS) | |
58c23110 | 1093 | offset = 0; |
1094 | else | |
9c47decb | 1095 | offset = current_frame.reg_no * GET_MODE_SIZE (SImode); |
1096 | m68k_set_frame_related | |
1097 | (m68k_emit_movem (stack_pointer_rtx, offset, | |
1098 | current_frame.fpu_no, FP0_REG, | |
1099 | current_frame.fpu_mask, true, false)); | |
7fcf3afe | 1100 | } |
5243fbff | 1101 | } |
376273c1 | 1102 | |
16b4cb0d | 1103 | /* If the stack limit is not a symbol, check it here. |
8f8ac140 | 1104 | This has the disadvantage that it may be too late... */ |
18d50ae6 | 1105 | if (crtl->limit_stack) |
8f8ac140 | 1106 | { |
1107 | if (REG_P (stack_limit_rtx)) | |
74f4459c | 1108 | emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode, stack_pointer_rtx, |
1109 | stack_limit_rtx), | |
1110 | stack_pointer_rtx, stack_limit_rtx, | |
1111 | const1_rtx)); | |
1112 | ||
8f8ac140 | 1113 | else if (GET_CODE (stack_limit_rtx) != SYMBOL_REF) |
c3ceba8e | 1114 | warning (0, "stack limit expression is not supported"); |
8f8ac140 | 1115 | } |
16b4cb0d | 1116 | |
9c47decb | 1117 | if (current_frame.reg_no < MIN_MOVEM_REGS) |
5243fbff | 1118 | { |
9c47decb | 1119 | /* Store each register separately in the same order moveml does. */ |
5243fbff | 1120 | int i; |
1121 | ||
9c47decb | 1122 | for (i = 16; i-- > 0; ) |
1123 | if (current_frame.reg_mask & (1 << i)) | |
357cfda2 | 1124 | { |
9c47decb | 1125 | src = gen_rtx_REG (SImode, D0_REG + i); |
1126 | dest = gen_frame_mem (SImode, | |
1127 | gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx)); | |
1128 | m68k_set_frame_related (emit_insn (gen_movsi (dest, src))); | |
357cfda2 | 1129 | } |
5243fbff | 1130 | } |
9c47decb | 1131 | else |
5243fbff | 1132 | { |
960e6739 | 1133 | if (TARGET_COLDFIRE) |
9c47decb | 1134 | /* The required register save space has already been allocated. |
1135 | The first register should be stored at (%sp). */ | |
1136 | m68k_set_frame_related | |
1137 | (m68k_emit_movem (stack_pointer_rtx, 0, | |
1138 | current_frame.reg_no, D0_REG, | |
1139 | current_frame.reg_mask, true, false)); | |
3524a08d | 1140 | else |
9c47decb | 1141 | m68k_set_frame_related |
1142 | (m68k_emit_movem (stack_pointer_rtx, | |
1143 | current_frame.reg_no * -GET_MODE_SIZE (SImode), | |
1144 | current_frame.reg_no, D0_REG, | |
1145 | current_frame.reg_mask, true, true)); | |
5243fbff | 1146 | } |
9c47decb | 1147 | |
869bde6b | 1148 | if (!TARGET_SEP_DATA |
18d50ae6 | 1149 | && crtl->uses_pic_offset_table) |
e26cf4d4 | 1150 | emit_insn (gen_load_got (pic_offset_table_rtx)); |
5243fbff | 1151 | } |
1152 | \f | |
6571ac6e | 1153 | /* Return true if a simple (return) instruction is sufficient for this |
1154 | instruction (i.e. if no epilogue is needed). */ | |
5243fbff | 1155 | |
d02588f4 | 1156 | bool |
b38bcc54 | 1157 | m68k_use_return_insn (void) |
5243fbff | 1158 | { |
5243fbff | 1159 | if (!reload_completed || frame_pointer_needed || get_frame_size () != 0) |
d02588f4 | 1160 | return false; |
1a2c33ec | 1161 | |
065b42aa | 1162 | m68k_compute_frame_layout (); |
6571ac6e | 1163 | return current_frame.offset == 0; |
5243fbff | 1164 | } |
1165 | ||
33fb08b8 | 1166 | /* Emit RTL for the "epilogue" or "sibcall_epilogue" define_expand; |
1167 | SIBCALL_P says which. | |
5243fbff | 1168 | |
1169 | The function epilogue should not depend on the current stack pointer! | |
1170 | It should use the frame pointer only, if there is a frame pointer. | |
1171 | This is mandatory because of alloca; we also take advantage of it to | |
1172 | omit stack adjustments before returning. */ | |
1173 | ||
9c47decb | 1174 | void |
33fb08b8 | 1175 | m68k_expand_epilogue (bool sibcall_p) |
17d9b0c3 | 1176 | { |
d02588f4 | 1177 | HOST_WIDE_INT fsize, fsize_with_regs; |
9c47decb | 1178 | bool big, restore_from_sp; |
d02588f4 | 1179 | |
065b42aa | 1180 | m68k_compute_frame_layout (); |
d02588f4 | 1181 | |
d02588f4 | 1182 | fsize = current_frame.size; |
9c47decb | 1183 | big = false; |
1184 | restore_from_sp = false; | |
d02588f4 | 1185 | |
d5bf7b64 | 1186 | /* FIXME : crtl->is_leaf below is too strong. |
e4cbd171 | 1187 | What we really need to know there is if there could be pending |
0c0492c9 | 1188 | stack adjustment needed at that point. */ |
9c47decb | 1189 | restore_from_sp = (!frame_pointer_needed |
d5bf7b64 | 1190 | || (!cfun->calls_alloca && crtl->is_leaf)); |
2398df48 | 1191 | |
1192 | /* fsize_with_regs is the size we need to adjust the sp when | |
274d52cf | 1193 | popping the frame. */ |
2398df48 | 1194 | fsize_with_regs = fsize; |
58c23110 | 1195 | if (TARGET_COLDFIRE && restore_from_sp) |
1196 | { | |
9c47decb | 1197 | /* ColdFire's move multiple instructions do not allow post-increment |
1198 | addressing. Add the size of movem loads to the final deallocation | |
1199 | instead. */ | |
1200 | if (current_frame.reg_no >= MIN_MOVEM_REGS) | |
1201 | fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode); | |
1202 | if (current_frame.fpu_no >= MIN_FMOVEM_REGS) | |
1203 | fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode); | |
58c23110 | 1204 | } |
2398df48 | 1205 | |
d02588f4 | 1206 | if (current_frame.offset + fsize >= 0x8000 |
9c47decb | 1207 | && !restore_from_sp |
d02588f4 | 1208 | && (current_frame.reg_mask || current_frame.fpu_mask)) |
5243fbff | 1209 | { |
9c47decb | 1210 | if (TARGET_COLDFIRE |
1211 | && (current_frame.reg_no >= MIN_MOVEM_REGS | |
1212 | || current_frame.fpu_no >= MIN_FMOVEM_REGS)) | |
1213 | { | |
1214 | /* ColdFire's move multiple instructions do not support the | |
1215 | (d8,Ax,Xi) addressing mode, so we're as well using a normal | |
1216 | stack-based restore. */ | |
1217 | emit_move_insn (gen_rtx_REG (Pmode, A1_REG), | |
1218 | GEN_INT (-(current_frame.offset + fsize))); | |
bf04fa3c | 1219 | emit_insn (gen_blockage ()); |
9c47decb | 1220 | emit_insn (gen_addsi3 (stack_pointer_rtx, |
1221 | gen_rtx_REG (Pmode, A1_REG), | |
1222 | frame_pointer_rtx)); | |
1223 | restore_from_sp = true; | |
1224 | } | |
1225 | else | |
1226 | { | |
1227 | emit_move_insn (gen_rtx_REG (Pmode, A1_REG), GEN_INT (-fsize)); | |
1228 | fsize = 0; | |
1229 | big = true; | |
1230 | } | |
5243fbff | 1231 | } |
5243fbff | 1232 | |
9c47decb | 1233 | if (current_frame.reg_no < MIN_MOVEM_REGS) |
1234 | { | |
1235 | /* Restore each register separately in the same order moveml does. */ | |
5243fbff | 1236 | int i; |
9c47decb | 1237 | HOST_WIDE_INT offset; |
5243fbff | 1238 | |
9c47decb | 1239 | offset = current_frame.offset + fsize; |
d02588f4 | 1240 | for (i = 0; i < 16; i++) |
1241 | if (current_frame.reg_mask & (1 << i)) | |
5243fbff | 1242 | { |
9c47decb | 1243 | rtx addr; |
1244 | ||
1245 | if (big) | |
5243fbff | 1246 | { |
9c47decb | 1247 | /* Generate the address -OFFSET(%fp,%a1.l). */ |
1248 | addr = gen_rtx_REG (Pmode, A1_REG); | |
1249 | addr = gen_rtx_PLUS (Pmode, addr, frame_pointer_rtx); | |
29c05e22 | 1250 | addr = plus_constant (Pmode, addr, -offset); |
5243fbff | 1251 | } |
9c47decb | 1252 | else if (restore_from_sp) |
1253 | addr = gen_rtx_POST_INC (Pmode, stack_pointer_rtx); | |
1254 | else | |
29c05e22 | 1255 | addr = plus_constant (Pmode, frame_pointer_rtx, -offset); |
9c47decb | 1256 | emit_move_insn (gen_rtx_REG (SImode, D0_REG + i), |
1257 | gen_frame_mem (SImode, addr)); | |
1258 | offset -= GET_MODE_SIZE (SImode); | |
1259 | } | |
5243fbff | 1260 | } |
d02588f4 | 1261 | else if (current_frame.reg_mask) |
5243fbff | 1262 | { |
9c47decb | 1263 | if (big) |
1264 | m68k_emit_movem (gen_rtx_PLUS (Pmode, | |
1265 | gen_rtx_REG (Pmode, A1_REG), | |
1266 | frame_pointer_rtx), | |
1267 | -(current_frame.offset + fsize), | |
1268 | current_frame.reg_no, D0_REG, | |
1269 | current_frame.reg_mask, false, false); | |
1270 | else if (restore_from_sp) | |
1271 | m68k_emit_movem (stack_pointer_rtx, 0, | |
1272 | current_frame.reg_no, D0_REG, | |
1273 | current_frame.reg_mask, false, | |
1274 | !TARGET_COLDFIRE); | |
1275 | else | |
1276 | m68k_emit_movem (frame_pointer_rtx, | |
1277 | -(current_frame.offset + fsize), | |
1278 | current_frame.reg_no, D0_REG, | |
1279 | current_frame.reg_mask, false, false); | |
5243fbff | 1280 | } |
9c47decb | 1281 | |
1282 | if (current_frame.fpu_no > 0) | |
5243fbff | 1283 | { |
1284 | if (big) | |
9c47decb | 1285 | m68k_emit_movem (gen_rtx_PLUS (Pmode, |
1286 | gen_rtx_REG (Pmode, A1_REG), | |
1287 | frame_pointer_rtx), | |
1288 | -(current_frame.foffset + fsize), | |
1289 | current_frame.fpu_no, FP0_REG, | |
1290 | current_frame.fpu_mask, false, false); | |
e4cd7d76 | 1291 | else if (restore_from_sp) |
5243fbff | 1292 | { |
58c23110 | 1293 | if (TARGET_COLDFIRE) |
1294 | { | |
1295 | int offset; | |
1296 | ||
9c47decb | 1297 | /* If we used moveml to restore the integer registers, the |
1298 | stack pointer will still point to the bottom of the moveml | |
1299 | save area. Find the stack offset of the first FP | |
1300 | register. */ | |
1301 | if (current_frame.reg_no < MIN_MOVEM_REGS) | |
58c23110 | 1302 | offset = 0; |
1303 | else | |
9c47decb | 1304 | offset = current_frame.reg_no * GET_MODE_SIZE (SImode); |
1305 | m68k_emit_movem (stack_pointer_rtx, offset, | |
1306 | current_frame.fpu_no, FP0_REG, | |
1307 | current_frame.fpu_mask, false, false); | |
58c23110 | 1308 | } |
ef2ee7d3 | 1309 | else |
9c47decb | 1310 | m68k_emit_movem (stack_pointer_rtx, 0, |
1311 | current_frame.fpu_no, FP0_REG, | |
1312 | current_frame.fpu_mask, false, true); | |
5243fbff | 1313 | } |
1314 | else | |
9c47decb | 1315 | m68k_emit_movem (frame_pointer_rtx, |
1316 | -(current_frame.foffset + fsize), | |
1317 | current_frame.fpu_no, FP0_REG, | |
1318 | current_frame.fpu_mask, false, false); | |
5243fbff | 1319 | } |
9c47decb | 1320 | |
bf04fa3c | 1321 | emit_insn (gen_blockage ()); |
5243fbff | 1322 | if (frame_pointer_needed) |
9c47decb | 1323 | emit_insn (gen_unlink (frame_pointer_rtx)); |
2398df48 | 1324 | else if (fsize_with_regs) |
9c47decb | 1325 | emit_insn (gen_addsi3 (stack_pointer_rtx, |
1326 | stack_pointer_rtx, | |
1327 | GEN_INT (fsize_with_regs))); | |
1328 | ||
18d50ae6 | 1329 | if (crtl->calls_eh_return) |
9c47decb | 1330 | emit_insn (gen_addsi3 (stack_pointer_rtx, |
1331 | stack_pointer_rtx, | |
1332 | EH_RETURN_STACKADJ_RTX)); | |
1333 | ||
33fb08b8 | 1334 | if (!sibcall_p) |
1a860023 | 1335 | emit_jump_insn (ret_rtx); |
5243fbff | 1336 | } |
1337 | \f | |
821960c7 | 1338 | /* Return true if X is a valid comparison operator for the dbcc |
61405c07 | 1339 | instruction. |
1340 | ||
1341 | Note it rejects floating point comparison operators. | |
1342 | (In the future we could use Fdbcc). | |
1343 | ||
1344 | It also rejects some comparisons when CC_NO_OVERFLOW is set. */ | |
1345 | ||
1346 | int | |
3754d046 | 1347 | valid_dbcc_comparison_p_2 (rtx x, machine_mode mode ATTRIBUTE_UNUSED) |
61405c07 | 1348 | { |
61405c07 | 1349 | switch (GET_CODE (x)) |
1350 | { | |
61405c07 | 1351 | case EQ: case NE: case GTU: case LTU: |
1352 | case GEU: case LEU: | |
1353 | return 1; | |
1354 | ||
1355 | /* Reject some when CC_NO_OVERFLOW is set. This may be over | |
1356 | conservative */ | |
1357 | case GT: case LT: case GE: case LE: | |
1358 | return ! (cc_prev_status.flags & CC_NO_OVERFLOW); | |
1359 | default: | |
1360 | return 0; | |
1361 | } | |
1362 | } | |
1363 | ||
e911aedf | 1364 | /* Return nonzero if flags are currently in the 68881 flag register. */ |
07bd8c0d | 1365 | int |
821960c7 | 1366 | flags_in_68881 (void) |
07bd8c0d | 1367 | { |
1368 | /* We could add support for these in the future */ | |
1369 | return cc_status.flags & CC_IN_68881; | |
1370 | } | |
1371 | ||
d09fd72c | 1372 | /* Return true if PARALLEL contains register REGNO. */ |
1373 | static bool | |
1374 | m68k_reg_present_p (const_rtx parallel, unsigned int regno) | |
1375 | { | |
1376 | int i; | |
1377 | ||
1378 | if (REG_P (parallel) && REGNO (parallel) == regno) | |
1379 | return true; | |
1380 | ||
1381 | if (GET_CODE (parallel) != PARALLEL) | |
1382 | return false; | |
1383 | ||
1384 | for (i = 0; i < XVECLEN (parallel, 0); ++i) | |
1385 | { | |
1386 | const_rtx x; | |
1387 | ||
1388 | x = XEXP (XVECEXP (parallel, 0, i), 0); | |
1389 | if (REG_P (x) && REGNO (x) == regno) | |
1390 | return true; | |
1391 | } | |
1392 | ||
1393 | return false; | |
1394 | } | |
1395 | ||
e2809c90 | 1396 | /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL_P. */ |
33fb08b8 | 1397 | |
1398 | static bool | |
e2809c90 | 1399 | m68k_ok_for_sibcall_p (tree decl, tree exp) |
33fb08b8 | 1400 | { |
e2809c90 | 1401 | enum m68k_function_kind kind; |
1402 | ||
1403 | /* We cannot use sibcalls for nested functions because we use the | |
1404 | static chain register for indirect calls. */ | |
1405 | if (CALL_EXPR_STATIC_CHAIN (exp)) | |
1406 | return false; | |
1407 | ||
d09fd72c | 1408 | if (!VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl)))) |
1409 | { | |
1410 | /* Check that the return value locations are the same. For | |
1411 | example that we aren't returning a value from the sibling in | |
1412 | a D0 register but then need to transfer it to a A0 register. */ | |
1413 | rtx cfun_value; | |
1414 | rtx call_value; | |
1415 | ||
1416 | cfun_value = FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (cfun->decl)), | |
1417 | cfun->decl); | |
1418 | call_value = FUNCTION_VALUE (TREE_TYPE (exp), decl); | |
1419 | ||
1420 | /* Check that the values are equal or that the result the callee | |
1421 | function returns is superset of what the current function returns. */ | |
1422 | if (!(rtx_equal_p (cfun_value, call_value) | |
1423 | || (REG_P (cfun_value) | |
1424 | && m68k_reg_present_p (call_value, REGNO (cfun_value))))) | |
1425 | return false; | |
1426 | } | |
1427 | ||
e2809c90 | 1428 | kind = m68k_get_function_kind (current_function_decl); |
1429 | if (kind == m68k_fk_normal_function) | |
1430 | /* We can always sibcall from a normal function, because it's | |
1431 | undefined if it is calling an interrupt function. */ | |
1432 | return true; | |
1433 | ||
1434 | /* Otherwise we can only sibcall if the function kind is known to be | |
1435 | the same. */ | |
1436 | if (decl && m68k_get_function_kind (decl) == kind) | |
1437 | return true; | |
1438 | ||
1439 | return false; | |
33fb08b8 | 1440 | } |
1441 | ||
385c19d1 | 1442 | /* On the m68k all args are always pushed. */ |
1443 | ||
1444 | static rtx | |
39cba157 | 1445 | m68k_function_arg (cumulative_args_t cum ATTRIBUTE_UNUSED, |
3754d046 | 1446 | machine_mode mode ATTRIBUTE_UNUSED, |
385c19d1 | 1447 | const_tree type ATTRIBUTE_UNUSED, |
1448 | bool named ATTRIBUTE_UNUSED) | |
1449 | { | |
1450 | return NULL_RTX; | |
1451 | } | |
1452 | ||
1453 | static void | |
3754d046 | 1454 | m68k_function_arg_advance (cumulative_args_t cum_v, machine_mode mode, |
385c19d1 | 1455 | const_tree type, bool named ATTRIBUTE_UNUSED) |
1456 | { | |
39cba157 | 1457 | CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v); |
1458 | ||
385c19d1 | 1459 | *cum += (mode != BLKmode |
1460 | ? (GET_MODE_SIZE (mode) + 3) & ~3 | |
1461 | : (int_size_in_bytes (type) + 3) & ~3); | |
1462 | } | |
1463 | ||
dd7218bb | 1464 | /* Convert X to a legitimate function call memory reference and return the |
1465 | result. */ | |
b09faf5c | 1466 | |
dd7218bb | 1467 | rtx |
1468 | m68k_legitimize_call_address (rtx x) | |
1469 | { | |
1470 | gcc_assert (MEM_P (x)); | |
1471 | if (call_operand (XEXP (x, 0), VOIDmode)) | |
1472 | return x; | |
1473 | return replace_equiv_address (x, force_reg (Pmode, XEXP (x, 0))); | |
b09faf5c | 1474 | } |
1475 | ||
33fb08b8 | 1476 | /* Likewise for sibling calls. */ |
1477 | ||
1478 | rtx | |
1479 | m68k_legitimize_sibcall_address (rtx x) | |
1480 | { | |
1481 | gcc_assert (MEM_P (x)); | |
1482 | if (sibcall_operand (XEXP (x, 0), VOIDmode)) | |
1483 | return x; | |
1484 | ||
1485 | emit_move_insn (gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM), XEXP (x, 0)); | |
1486 | return replace_equiv_address (x, gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM)); | |
1487 | } | |
1488 | ||
41e3a0c7 | 1489 | /* Convert X to a legitimate address and return it if successful. Otherwise |
1490 | return X. | |
1491 | ||
1492 | For the 68000, we handle X+REG by loading X into a register R and | |
1493 | using R+REG. R will go in an address reg and indexing will be used. | |
1494 | However, if REG is a broken-out memory address or multiplication, | |
1495 | nothing needs to be done because REG can certainly go in an address reg. */ | |
1496 | ||
2a381450 | 1497 | static rtx |
3754d046 | 1498 | m68k_legitimize_address (rtx x, rtx oldx, machine_mode mode) |
41e3a0c7 | 1499 | { |
869bde6b | 1500 | if (m68k_tls_symbol_p (x)) |
1501 | return m68k_legitimize_tls_address (x); | |
1502 | ||
41e3a0c7 | 1503 | if (GET_CODE (x) == PLUS) |
1504 | { | |
1505 | int ch = (x) != (oldx); | |
1506 | int copied = 0; | |
1507 | ||
1508 | #define COPY_ONCE(Y) if (!copied) { Y = copy_rtx (Y); copied = ch = 1; } | |
1509 | ||
1510 | if (GET_CODE (XEXP (x, 0)) == MULT) | |
1511 | { | |
1512 | COPY_ONCE (x); | |
1513 | XEXP (x, 0) = force_operand (XEXP (x, 0), 0); | |
1514 | } | |
1515 | if (GET_CODE (XEXP (x, 1)) == MULT) | |
1516 | { | |
1517 | COPY_ONCE (x); | |
1518 | XEXP (x, 1) = force_operand (XEXP (x, 1), 0); | |
1519 | } | |
1520 | if (ch) | |
1521 | { | |
1522 | if (GET_CODE (XEXP (x, 1)) == REG | |
1523 | && GET_CODE (XEXP (x, 0)) == REG) | |
1524 | { | |
1525 | if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT) | |
1526 | { | |
1527 | COPY_ONCE (x); | |
1528 | x = force_operand (x, 0); | |
1529 | } | |
1530 | return x; | |
1531 | } | |
1532 | if (memory_address_p (mode, x)) | |
1533 | return x; | |
1534 | } | |
1535 | if (GET_CODE (XEXP (x, 0)) == REG | |
1536 | || (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND | |
1537 | && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG | |
1538 | && GET_MODE (XEXP (XEXP (x, 0), 0)) == HImode)) | |
1539 | { | |
1540 | rtx temp = gen_reg_rtx (Pmode); | |
1541 | rtx val = force_operand (XEXP (x, 1), 0); | |
1542 | emit_move_insn (temp, val); | |
1543 | COPY_ONCE (x); | |
1544 | XEXP (x, 1) = temp; | |
1545 | if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT | |
1546 | && GET_CODE (XEXP (x, 0)) == REG) | |
1547 | x = force_operand (x, 0); | |
1548 | } | |
1549 | else if (GET_CODE (XEXP (x, 1)) == REG | |
1550 | || (GET_CODE (XEXP (x, 1)) == SIGN_EXTEND | |
1551 | && GET_CODE (XEXP (XEXP (x, 1), 0)) == REG | |
1552 | && GET_MODE (XEXP (XEXP (x, 1), 0)) == HImode)) | |
1553 | { | |
1554 | rtx temp = gen_reg_rtx (Pmode); | |
1555 | rtx val = force_operand (XEXP (x, 0), 0); | |
1556 | emit_move_insn (temp, val); | |
1557 | COPY_ONCE (x); | |
1558 | XEXP (x, 0) = temp; | |
1559 | if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT | |
1560 | && GET_CODE (XEXP (x, 1)) == REG) | |
1561 | x = force_operand (x, 0); | |
1562 | } | |
1563 | } | |
1564 | ||
1565 | return x; | |
1566 | } | |
1567 | ||
1568 | ||
61405c07 | 1569 | /* Output a dbCC; jCC sequence. Note we do not handle the |
1570 | floating point version of this sequence (Fdbcc). We also | |
1571 | do not handle alternative conditions when CC_NO_OVERFLOW is | |
07bd8c0d | 1572 | set. It is assumed that valid_dbcc_comparison_p and flags_in_68881 will |
1573 | kick those out before we get here. */ | |
61405c07 | 1574 | |
e6b2f841 | 1575 | void |
821960c7 | 1576 | output_dbcc_and_branch (rtx *operands) |
61405c07 | 1577 | { |
61405c07 | 1578 | switch (GET_CODE (operands[3])) |
1579 | { | |
1580 | case EQ: | |
a1f3c748 | 1581 | output_asm_insn ("dbeq %0,%l1\n\tjeq %l2", operands); |
9cbcee62 | 1582 | break; |
61405c07 | 1583 | |
1584 | case NE: | |
a1f3c748 | 1585 | output_asm_insn ("dbne %0,%l1\n\tjne %l2", operands); |
9cbcee62 | 1586 | break; |
61405c07 | 1587 | |
1588 | case GT: | |
a1f3c748 | 1589 | output_asm_insn ("dbgt %0,%l1\n\tjgt %l2", operands); |
9cbcee62 | 1590 | break; |
61405c07 | 1591 | |
1592 | case GTU: | |
a1f3c748 | 1593 | output_asm_insn ("dbhi %0,%l1\n\tjhi %l2", operands); |
9cbcee62 | 1594 | break; |
61405c07 | 1595 | |
1596 | case LT: | |
a1f3c748 | 1597 | output_asm_insn ("dblt %0,%l1\n\tjlt %l2", operands); |
9cbcee62 | 1598 | break; |
61405c07 | 1599 | |
1600 | case LTU: | |
a1f3c748 | 1601 | output_asm_insn ("dbcs %0,%l1\n\tjcs %l2", operands); |
9cbcee62 | 1602 | break; |
61405c07 | 1603 | |
1604 | case GE: | |
a1f3c748 | 1605 | output_asm_insn ("dbge %0,%l1\n\tjge %l2", operands); |
9cbcee62 | 1606 | break; |
61405c07 | 1607 | |
1608 | case GEU: | |
a1f3c748 | 1609 | output_asm_insn ("dbcc %0,%l1\n\tjcc %l2", operands); |
9cbcee62 | 1610 | break; |
61405c07 | 1611 | |
1612 | case LE: | |
a1f3c748 | 1613 | output_asm_insn ("dble %0,%l1\n\tjle %l2", operands); |
9cbcee62 | 1614 | break; |
61405c07 | 1615 | |
1616 | case LEU: | |
a1f3c748 | 1617 | output_asm_insn ("dbls %0,%l1\n\tjls %l2", operands); |
9cbcee62 | 1618 | break; |
61405c07 | 1619 | |
1620 | default: | |
84653e0f | 1621 | gcc_unreachable (); |
61405c07 | 1622 | } |
1623 | ||
1624 | /* If the decrement is to be done in SImode, then we have | |
0c0492c9 | 1625 | to compensate for the fact that dbcc decrements in HImode. */ |
61405c07 | 1626 | switch (GET_MODE (operands[0])) |
1627 | { | |
916ace94 | 1628 | case E_SImode: |
a1f3c748 | 1629 | output_asm_insn ("clr%.w %0\n\tsubq%.l #1,%0\n\tjpl %l1", operands); |
61405c07 | 1630 | break; |
1631 | ||
916ace94 | 1632 | case E_HImode: |
61405c07 | 1633 | break; |
1634 | ||
1635 | default: | |
84653e0f | 1636 | gcc_unreachable (); |
61405c07 | 1637 | } |
1638 | } | |
1639 | ||
f2fa409d | 1640 | const char * |
84653e0f | 1641 | output_scc_di (rtx op, rtx operand1, rtx operand2, rtx dest) |
3b8c85f3 | 1642 | { |
1643 | rtx loperands[7]; | |
c46628b1 | 1644 | enum rtx_code op_code = GET_CODE (op); |
3b8c85f3 | 1645 | |
8ef587dc | 1646 | /* This does not produce a useful cc. */ |
acef5d85 | 1647 | CC_STATUS_INIT; |
1648 | ||
c46628b1 | 1649 | /* The m68k cmp.l instruction requires operand1 to be a reg as used |
1650 | below. Swap the operands and change the op if these requirements | |
1651 | are not fulfilled. */ | |
1652 | if (GET_CODE (operand2) == REG && GET_CODE (operand1) != REG) | |
1653 | { | |
1654 | rtx tmp = operand1; | |
1655 | ||
1656 | operand1 = operand2; | |
1657 | operand2 = tmp; | |
1658 | op_code = swap_condition (op_code); | |
1659 | } | |
3b8c85f3 | 1660 | loperands[0] = operand1; |
1661 | if (GET_CODE (operand1) == REG) | |
e6b2f841 | 1662 | loperands[1] = gen_rtx_REG (SImode, REGNO (operand1) + 1); |
3b8c85f3 | 1663 | else |
eafc6604 | 1664 | loperands[1] = adjust_address (operand1, SImode, 4); |
3b8c85f3 | 1665 | if (operand2 != const0_rtx) |
1666 | { | |
1667 | loperands[2] = operand2; | |
1668 | if (GET_CODE (operand2) == REG) | |
e6b2f841 | 1669 | loperands[3] = gen_rtx_REG (SImode, REGNO (operand2) + 1); |
3b8c85f3 | 1670 | else |
eafc6604 | 1671 | loperands[3] = adjust_address (operand2, SImode, 4); |
3b8c85f3 | 1672 | } |
f5f2db69 | 1673 | loperands[4] = gen_label_rtx (); |
3b8c85f3 | 1674 | if (operand2 != const0_rtx) |
a1f3c748 | 1675 | output_asm_insn ("cmp%.l %2,%0\n\tjne %l4\n\tcmp%.l %3,%1", loperands); |
f28a9fad | 1676 | else |
8ae4b10a | 1677 | { |
960e6739 | 1678 | if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[0])) |
8ae4b10a | 1679 | output_asm_insn ("tst%.l %0", loperands); |
1680 | else | |
065b42aa | 1681 | output_asm_insn ("cmp%.w #0,%0", loperands); |
8ae4b10a | 1682 | |
a1f3c748 | 1683 | output_asm_insn ("jne %l4", loperands); |
8ae4b10a | 1684 | |
960e6739 | 1685 | if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[1])) |
8ae4b10a | 1686 | output_asm_insn ("tst%.l %1", loperands); |
1687 | else | |
3acc77d9 | 1688 | output_asm_insn ("cmp%.w #0,%1", loperands); |
8ae4b10a | 1689 | } |
1690 | ||
3b8c85f3 | 1691 | loperands[5] = dest; |
3acc77d9 | 1692 | |
c46628b1 | 1693 | switch (op_code) |
3b8c85f3 | 1694 | { |
1695 | case EQ: | |
805e22b2 | 1696 | (*targetm.asm_out.internal_label) (asm_out_file, "L", |
065b42aa | 1697 | CODE_LABEL_NUMBER (loperands[4])); |
3b8c85f3 | 1698 | output_asm_insn ("seq %5", loperands); |
1699 | break; | |
1700 | ||
1701 | case NE: | |
805e22b2 | 1702 | (*targetm.asm_out.internal_label) (asm_out_file, "L", |
065b42aa | 1703 | CODE_LABEL_NUMBER (loperands[4])); |
3b8c85f3 | 1704 | output_asm_insn ("sne %5", loperands); |
1705 | break; | |
1706 | ||
1707 | case GT: | |
f5f2db69 | 1708 | loperands[6] = gen_label_rtx (); |
a1f3c748 | 1709 | output_asm_insn ("shi %5\n\tjra %l6", loperands); |
805e22b2 | 1710 | (*targetm.asm_out.internal_label) (asm_out_file, "L", |
065b42aa | 1711 | CODE_LABEL_NUMBER (loperands[4])); |
3b8c85f3 | 1712 | output_asm_insn ("sgt %5", loperands); |
805e22b2 | 1713 | (*targetm.asm_out.internal_label) (asm_out_file, "L", |
065b42aa | 1714 | CODE_LABEL_NUMBER (loperands[6])); |
3b8c85f3 | 1715 | break; |
1716 | ||
1717 | case GTU: | |
805e22b2 | 1718 | (*targetm.asm_out.internal_label) (asm_out_file, "L", |
065b42aa | 1719 | CODE_LABEL_NUMBER (loperands[4])); |
3b8c85f3 | 1720 | output_asm_insn ("shi %5", loperands); |
1721 | break; | |
1722 | ||
1723 | case LT: | |
f5f2db69 | 1724 | loperands[6] = gen_label_rtx (); |
a1f3c748 | 1725 | output_asm_insn ("scs %5\n\tjra %l6", loperands); |
805e22b2 | 1726 | (*targetm.asm_out.internal_label) (asm_out_file, "L", |
065b42aa | 1727 | CODE_LABEL_NUMBER (loperands[4])); |
3b8c85f3 | 1728 | output_asm_insn ("slt %5", loperands); |
805e22b2 | 1729 | (*targetm.asm_out.internal_label) (asm_out_file, "L", |
065b42aa | 1730 | CODE_LABEL_NUMBER (loperands[6])); |
3b8c85f3 | 1731 | break; |
1732 | ||
1733 | case LTU: | |
805e22b2 | 1734 | (*targetm.asm_out.internal_label) (asm_out_file, "L", |
065b42aa | 1735 | CODE_LABEL_NUMBER (loperands[4])); |
3b8c85f3 | 1736 | output_asm_insn ("scs %5", loperands); |
1737 | break; | |
1738 | ||
1739 | case GE: | |
f5f2db69 | 1740 | loperands[6] = gen_label_rtx (); |
a1f3c748 | 1741 | output_asm_insn ("scc %5\n\tjra %l6", loperands); |
805e22b2 | 1742 | (*targetm.asm_out.internal_label) (asm_out_file, "L", |
065b42aa | 1743 | CODE_LABEL_NUMBER (loperands[4])); |
3b8c85f3 | 1744 | output_asm_insn ("sge %5", loperands); |
805e22b2 | 1745 | (*targetm.asm_out.internal_label) (asm_out_file, "L", |
065b42aa | 1746 | CODE_LABEL_NUMBER (loperands[6])); |
3b8c85f3 | 1747 | break; |
1748 | ||
1749 | case GEU: | |
805e22b2 | 1750 | (*targetm.asm_out.internal_label) (asm_out_file, "L", |
065b42aa | 1751 | CODE_LABEL_NUMBER (loperands[4])); |
3b8c85f3 | 1752 | output_asm_insn ("scc %5", loperands); |
1753 | break; | |
1754 | ||
1755 | case LE: | |
f5f2db69 | 1756 | loperands[6] = gen_label_rtx (); |
a1f3c748 | 1757 | output_asm_insn ("sls %5\n\tjra %l6", loperands); |
805e22b2 | 1758 | (*targetm.asm_out.internal_label) (asm_out_file, "L", |
065b42aa | 1759 | CODE_LABEL_NUMBER (loperands[4])); |
3b8c85f3 | 1760 | output_asm_insn ("sle %5", loperands); |
805e22b2 | 1761 | (*targetm.asm_out.internal_label) (asm_out_file, "L", |
065b42aa | 1762 | CODE_LABEL_NUMBER (loperands[6])); |
3b8c85f3 | 1763 | break; |
1764 | ||
1765 | case LEU: | |
805e22b2 | 1766 | (*targetm.asm_out.internal_label) (asm_out_file, "L", |
065b42aa | 1767 | CODE_LABEL_NUMBER (loperands[4])); |
3b8c85f3 | 1768 | output_asm_insn ("sls %5", loperands); |
1769 | break; | |
1770 | ||
1771 | default: | |
84653e0f | 1772 | gcc_unreachable (); |
3b8c85f3 | 1773 | } |
1774 | return ""; | |
1775 | } | |
1776 | ||
f2fa409d | 1777 | const char * |
9e9d6337 | 1778 | output_btst (rtx *operands, rtx countop, rtx dataop, rtx_insn *insn, int signpos) |
5243fbff | 1779 | { |
1780 | operands[0] = countop; | |
1781 | operands[1] = dataop; | |
1782 | ||
1783 | if (GET_CODE (countop) == CONST_INT) | |
1784 | { | |
1785 | register int count = INTVAL (countop); | |
1786 | /* If COUNT is bigger than size of storage unit in use, | |
1787 | advance to the containing unit of same size. */ | |
1788 | if (count > signpos) | |
1789 | { | |
1790 | int offset = (count & ~signpos) / 8; | |
1791 | count = count & signpos; | |
eafc6604 | 1792 | operands[1] = dataop = adjust_address (dataop, QImode, offset); |
5243fbff | 1793 | } |
1794 | if (count == signpos) | |
1795 | cc_status.flags = CC_NOT_POSITIVE | CC_Z_IN_NOT_N; | |
1796 | else | |
1797 | cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N; | |
1798 | ||
1799 | /* These three statements used to use next_insns_test_no... | |
1800 | but it appears that this should do the same job. */ | |
1801 | if (count == 31 | |
1802 | && next_insn_tests_no_inequality (insn)) | |
1803 | return "tst%.l %1"; | |
1804 | if (count == 15 | |
1805 | && next_insn_tests_no_inequality (insn)) | |
1806 | return "tst%.w %1"; | |
1807 | if (count == 7 | |
1808 | && next_insn_tests_no_inequality (insn)) | |
1809 | return "tst%.b %1"; | |
572d7bec | 1810 | /* Try to use `movew to ccr' followed by the appropriate branch insn. |
1811 | On some m68k variants unfortunately that's slower than btst. | |
1812 | On 68000 and higher, that should also work for all HImode operands. */ | |
1813 | if (TUNE_CPU32 || TARGET_COLDFIRE || optimize_size) | |
1814 | { | |
1815 | if (count == 3 && DATA_REG_P (operands[1]) | |
1816 | && next_insn_tests_no_inequality (insn)) | |
1817 | { | |
1818 | cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N | CC_NO_OVERFLOW; | |
1819 | return "move%.w %1,%%ccr"; | |
1820 | } | |
1821 | if (count == 2 && DATA_REG_P (operands[1]) | |
1822 | && next_insn_tests_no_inequality (insn)) | |
1823 | { | |
1824 | cc_status.flags = CC_NOT_NEGATIVE | CC_INVERTED | CC_NO_OVERFLOW; | |
1825 | return "move%.w %1,%%ccr"; | |
1826 | } | |
1827 | /* count == 1 followed by bvc/bvs and | |
1828 | count == 0 followed by bcc/bcs are also possible, but need | |
1829 | m68k-specific CC_Z_IN_NOT_V and CC_Z_IN_NOT_C flags. */ | |
1830 | } | |
5243fbff | 1831 | |
1832 | cc_status.flags = CC_NOT_NEGATIVE; | |
1833 | } | |
1834 | return "btst %0,%1"; | |
1835 | } | |
5243fbff | 1836 | \f |
b97b0687 | 1837 | /* Return true if X is a legitimate base register. STRICT_P says |
1838 | whether we need strict checking. */ | |
1839 | ||
1840 | bool | |
1841 | m68k_legitimate_base_reg_p (rtx x, bool strict_p) | |
1842 | { | |
1843 | /* Allow SUBREG everywhere we allow REG. This results in better code. */ | |
1844 | if (!strict_p && GET_CODE (x) == SUBREG) | |
1845 | x = SUBREG_REG (x); | |
1846 | ||
1847 | return (REG_P (x) | |
1848 | && (strict_p | |
1849 | ? REGNO_OK_FOR_BASE_P (REGNO (x)) | |
979e5a71 | 1850 | : REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x)))); |
b97b0687 | 1851 | } |
1852 | ||
1853 | /* Return true if X is a legitimate index register. STRICT_P says | |
1854 | whether we need strict checking. */ | |
1855 | ||
1856 | bool | |
1857 | m68k_legitimate_index_reg_p (rtx x, bool strict_p) | |
1858 | { | |
1859 | if (!strict_p && GET_CODE (x) == SUBREG) | |
1860 | x = SUBREG_REG (x); | |
1861 | ||
1862 | return (REG_P (x) | |
1863 | && (strict_p | |
1864 | ? REGNO_OK_FOR_INDEX_P (REGNO (x)) | |
979e5a71 | 1865 | : REGNO_OK_FOR_INDEX_NONSTRICT_P (REGNO (x)))); |
b97b0687 | 1866 | } |
1867 | ||
1868 | /* Return true if X is a legitimate index expression for a (d8,An,Xn) or | |
1869 | (bd,An,Xn) addressing mode. Fill in the INDEX and SCALE fields of | |
1870 | ADDRESS if so. STRICT_P says whether we need strict checking. */ | |
1871 | ||
1872 | static bool | |
1873 | m68k_decompose_index (rtx x, bool strict_p, struct m68k_address *address) | |
1874 | { | |
1875 | int scale; | |
1876 | ||
1877 | /* Check for a scale factor. */ | |
1878 | scale = 1; | |
1879 | if ((TARGET_68020 || TARGET_COLDFIRE) | |
1880 | && GET_CODE (x) == MULT | |
1881 | && GET_CODE (XEXP (x, 1)) == CONST_INT | |
1882 | && (INTVAL (XEXP (x, 1)) == 2 | |
1883 | || INTVAL (XEXP (x, 1)) == 4 | |
1884 | || (INTVAL (XEXP (x, 1)) == 8 | |
1885 | && (TARGET_COLDFIRE_FPU || !TARGET_COLDFIRE)))) | |
1886 | { | |
1887 | scale = INTVAL (XEXP (x, 1)); | |
1888 | x = XEXP (x, 0); | |
1889 | } | |
1890 | ||
1891 | /* Check for a word extension. */ | |
1892 | if (!TARGET_COLDFIRE | |
1893 | && GET_CODE (x) == SIGN_EXTEND | |
1894 | && GET_MODE (XEXP (x, 0)) == HImode) | |
1895 | x = XEXP (x, 0); | |
1896 | ||
1897 | if (m68k_legitimate_index_reg_p (x, strict_p)) | |
1898 | { | |
1899 | address->scale = scale; | |
1900 | address->index = x; | |
1901 | return true; | |
1902 | } | |
1903 | ||
1904 | return false; | |
1905 | } | |
1906 | ||
e0ab7256 | 1907 | /* Return true if X is an illegitimate symbolic constant. */ |
1908 | ||
1909 | bool | |
1910 | m68k_illegitimate_symbolic_constant_p (rtx x) | |
1911 | { | |
1912 | rtx base, offset; | |
1913 | ||
1914 | if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P) | |
1915 | { | |
1916 | split_const (x, &base, &offset); | |
1917 | if (GET_CODE (base) == SYMBOL_REF | |
1918 | && !offset_within_block_p (base, INTVAL (offset))) | |
1919 | return true; | |
1920 | } | |
869bde6b | 1921 | return m68k_tls_reference_p (x, false); |
e0ab7256 | 1922 | } |
1923 | ||
7d7d7bd2 | 1924 | /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */ |
1925 | ||
1926 | static bool | |
3754d046 | 1927 | m68k_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x) |
7d7d7bd2 | 1928 | { |
1929 | return m68k_illegitimate_symbolic_constant_p (x); | |
1930 | } | |
1931 | ||
b97b0687 | 1932 | /* Return true if X is a legitimate constant address that can reach |
1933 | bytes in the range [X, X + REACH). STRICT_P says whether we need | |
1934 | strict checking. */ | |
1935 | ||
1936 | static bool | |
1937 | m68k_legitimate_constant_address_p (rtx x, unsigned int reach, bool strict_p) | |
1938 | { | |
1939 | rtx base, offset; | |
1940 | ||
1941 | if (!CONSTANT_ADDRESS_P (x)) | |
1942 | return false; | |
1943 | ||
1944 | if (flag_pic | |
1945 | && !(strict_p && TARGET_PCREL) | |
1946 | && symbolic_operand (x, VOIDmode)) | |
1947 | return false; | |
1948 | ||
1949 | if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P && reach > 1) | |
1950 | { | |
1951 | split_const (x, &base, &offset); | |
1952 | if (GET_CODE (base) == SYMBOL_REF | |
1953 | && !offset_within_block_p (base, INTVAL (offset) + reach - 1)) | |
1954 | return false; | |
1955 | } | |
1956 | ||
869bde6b | 1957 | return !m68k_tls_reference_p (x, false); |
b97b0687 | 1958 | } |
1959 | ||
1960 | /* Return true if X is a LABEL_REF for a jump table. Assume that unplaced | |
1961 | labels will become jump tables. */ | |
1962 | ||
1963 | static bool | |
1964 | m68k_jump_table_ref_p (rtx x) | |
1965 | { | |
1966 | if (GET_CODE (x) != LABEL_REF) | |
1967 | return false; | |
1968 | ||
91a55c11 | 1969 | rtx_insn *insn = as_a <rtx_insn *> (XEXP (x, 0)); |
1970 | if (!NEXT_INSN (insn) && !PREV_INSN (insn)) | |
b97b0687 | 1971 | return true; |
1972 | ||
91a55c11 | 1973 | insn = next_nonnote_insn (insn); |
1974 | return insn && JUMP_TABLE_DATA_P (insn); | |
b97b0687 | 1975 | } |
1976 | ||
1977 | /* Return true if X is a legitimate address for values of mode MODE. | |
1978 | STRICT_P says whether strict checking is needed. If the address | |
1979 | is valid, describe its components in *ADDRESS. */ | |
1980 | ||
1981 | static bool | |
3754d046 | 1982 | m68k_decompose_address (machine_mode mode, rtx x, |
b97b0687 | 1983 | bool strict_p, struct m68k_address *address) |
1984 | { | |
1985 | unsigned int reach; | |
1986 | ||
1987 | memset (address, 0, sizeof (*address)); | |
1988 | ||
1989 | if (mode == BLKmode) | |
1990 | reach = 1; | |
1991 | else | |
1992 | reach = GET_MODE_SIZE (mode); | |
1993 | ||
1994 | /* Check for (An) (mode 2). */ | |
1995 | if (m68k_legitimate_base_reg_p (x, strict_p)) | |
1996 | { | |
1997 | address->base = x; | |
1998 | return true; | |
1999 | } | |
2000 | ||
2001 | /* Check for -(An) and (An)+ (modes 3 and 4). */ | |
2002 | if ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_INC) | |
2003 | && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p)) | |
2004 | { | |
2005 | address->code = GET_CODE (x); | |
2006 | address->base = XEXP (x, 0); | |
2007 | return true; | |
2008 | } | |
2009 | ||
2010 | /* Check for (d16,An) (mode 5). */ | |
2011 | if (GET_CODE (x) == PLUS | |
2012 | && GET_CODE (XEXP (x, 1)) == CONST_INT | |
2013 | && IN_RANGE (INTVAL (XEXP (x, 1)), -0x8000, 0x8000 - reach) | |
2014 | && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p)) | |
2015 | { | |
2016 | address->base = XEXP (x, 0); | |
2017 | address->offset = XEXP (x, 1); | |
2018 | return true; | |
2019 | } | |
2020 | ||
2021 | /* Check for GOT loads. These are (bd,An,Xn) addresses if | |
2022 | TARGET_68020 && flag_pic == 2, otherwise they are (d16,An) | |
2023 | addresses. */ | |
869bde6b | 2024 | if (GET_CODE (x) == PLUS |
2025 | && XEXP (x, 0) == pic_offset_table_rtx) | |
b97b0687 | 2026 | { |
869bde6b | 2027 | /* As we are processing a PLUS, do not unwrap RELOC32 symbols -- |
2028 | they are invalid in this context. */ | |
2029 | if (m68k_unwrap_symbol (XEXP (x, 1), false) != XEXP (x, 1)) | |
2030 | { | |
2031 | address->base = XEXP (x, 0); | |
2032 | address->offset = XEXP (x, 1); | |
2033 | return true; | |
2034 | } | |
b97b0687 | 2035 | } |
2036 | ||
2037 | /* The ColdFire FPU only accepts addressing modes 2-5. */ | |
2038 | if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT) | |
2039 | return false; | |
2040 | ||
2041 | /* Check for (xxx).w and (xxx).l. Also, in the TARGET_PCREL case, | |
2042 | check for (d16,PC) or (bd,PC,Xn) with a suppressed index register. | |
2043 | All these modes are variations of mode 7. */ | |
2044 | if (m68k_legitimate_constant_address_p (x, reach, strict_p)) | |
2045 | { | |
2046 | address->offset = x; | |
2047 | return true; | |
2048 | } | |
2049 | ||
2050 | /* Check for (d8,PC,Xn), a mode 7 form. This case is needed for | |
2051 | tablejumps. | |
2052 | ||
2053 | ??? do_tablejump creates these addresses before placing the target | |
2054 | label, so we have to assume that unplaced labels are jump table | |
2055 | references. It seems unlikely that we would ever generate indexed | |
2056 | accesses to unplaced labels in other cases. */ | |
2057 | if (GET_CODE (x) == PLUS | |
2058 | && m68k_jump_table_ref_p (XEXP (x, 1)) | |
2059 | && m68k_decompose_index (XEXP (x, 0), strict_p, address)) | |
2060 | { | |
2061 | address->offset = XEXP (x, 1); | |
2062 | return true; | |
2063 | } | |
2064 | ||
2065 | /* Everything hereafter deals with (d8,An,Xn.SIZE*SCALE) or | |
2066 | (bd,An,Xn.SIZE*SCALE) addresses. */ | |
2067 | ||
2068 | if (TARGET_68020) | |
2069 | { | |
2070 | /* Check for a nonzero base displacement. */ | |
2071 | if (GET_CODE (x) == PLUS | |
2072 | && m68k_legitimate_constant_address_p (XEXP (x, 1), reach, strict_p)) | |
2073 | { | |
2074 | address->offset = XEXP (x, 1); | |
2075 | x = XEXP (x, 0); | |
2076 | } | |
2077 | ||
2078 | /* Check for a suppressed index register. */ | |
2079 | if (m68k_legitimate_base_reg_p (x, strict_p)) | |
2080 | { | |
2081 | address->base = x; | |
2082 | return true; | |
2083 | } | |
2084 | ||
2085 | /* Check for a suppressed base register. Do not allow this case | |
2086 | for non-symbolic offsets as it effectively gives gcc freedom | |
2087 | to treat data registers as base registers, which can generate | |
2088 | worse code. */ | |
2089 | if (address->offset | |
2090 | && symbolic_operand (address->offset, VOIDmode) | |
2091 | && m68k_decompose_index (x, strict_p, address)) | |
2092 | return true; | |
2093 | } | |
2094 | else | |
2095 | { | |
2096 | /* Check for a nonzero base displacement. */ | |
2097 | if (GET_CODE (x) == PLUS | |
2098 | && GET_CODE (XEXP (x, 1)) == CONST_INT | |
2099 | && IN_RANGE (INTVAL (XEXP (x, 1)), -0x80, 0x80 - reach)) | |
2100 | { | |
2101 | address->offset = XEXP (x, 1); | |
2102 | x = XEXP (x, 0); | |
2103 | } | |
2104 | } | |
2105 | ||
2106 | /* We now expect the sum of a base and an index. */ | |
2107 | if (GET_CODE (x) == PLUS) | |
2108 | { | |
2109 | if (m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p) | |
2110 | && m68k_decompose_index (XEXP (x, 1), strict_p, address)) | |
2111 | { | |
2112 | address->base = XEXP (x, 0); | |
2113 | return true; | |
2114 | } | |
2115 | ||
2116 | if (m68k_legitimate_base_reg_p (XEXP (x, 1), strict_p) | |
2117 | && m68k_decompose_index (XEXP (x, 0), strict_p, address)) | |
2118 | { | |
2119 | address->base = XEXP (x, 1); | |
2120 | return true; | |
2121 | } | |
2122 | } | |
2123 | return false; | |
2124 | } | |
2125 | ||
2126 | /* Return true if X is a legitimate address for values of mode MODE. | |
2127 | STRICT_P says whether strict checking is needed. */ | |
2128 | ||
2129 | bool | |
3754d046 | 2130 | m68k_legitimate_address_p (machine_mode mode, rtx x, bool strict_p) |
b97b0687 | 2131 | { |
2132 | struct m68k_address address; | |
2133 | ||
2134 | return m68k_decompose_address (mode, x, strict_p, &address); | |
2135 | } | |
2136 | ||
2137 | /* Return true if X is a memory, describing its address in ADDRESS if so. | |
2138 | Apply strict checking if called during or after reload. */ | |
2139 | ||
2140 | static bool | |
2141 | m68k_legitimate_mem_p (rtx x, struct m68k_address *address) | |
2142 | { | |
2143 | return (MEM_P (x) | |
2144 | && m68k_decompose_address (GET_MODE (x), XEXP (x, 0), | |
2145 | reload_in_progress || reload_completed, | |
2146 | address)); | |
2147 | } | |
2148 | ||
ca316360 | 2149 | /* Implement TARGET_LEGITIMATE_CONSTANT_P. */ |
2150 | ||
2151 | bool | |
3754d046 | 2152 | m68k_legitimate_constant_p (machine_mode mode, rtx x) |
ca316360 | 2153 | { |
2154 | return mode != XFmode && !m68k_illegitimate_symbolic_constant_p (x); | |
2155 | } | |
2156 | ||
b97b0687 | 2157 | /* Return true if X matches the 'Q' constraint. It must be a memory |
2158 | with a base address and no constant offset or index. */ | |
2159 | ||
2160 | bool | |
2161 | m68k_matches_q_p (rtx x) | |
2162 | { | |
2163 | struct m68k_address address; | |
2164 | ||
2165 | return (m68k_legitimate_mem_p (x, &address) | |
2166 | && address.code == UNKNOWN | |
2167 | && address.base | |
2168 | && !address.offset | |
2169 | && !address.index); | |
2170 | } | |
2171 | ||
2172 | /* Return true if X matches the 'U' constraint. It must be a base address | |
2173 | with a constant offset and no index. */ | |
2174 | ||
2175 | bool | |
2176 | m68k_matches_u_p (rtx x) | |
2177 | { | |
2178 | struct m68k_address address; | |
2179 | ||
2180 | return (m68k_legitimate_mem_p (x, &address) | |
2181 | && address.code == UNKNOWN | |
2182 | && address.base | |
2183 | && address.offset | |
2184 | && !address.index); | |
2185 | } | |
2186 | ||
869bde6b | 2187 | /* Return GOT pointer. */ |
2188 | ||
2189 | static rtx | |
2190 | m68k_get_gp (void) | |
2191 | { | |
2192 | if (pic_offset_table_rtx == NULL_RTX) | |
2193 | pic_offset_table_rtx = gen_rtx_REG (Pmode, PIC_REG); | |
2194 | ||
2195 | crtl->uses_pic_offset_table = 1; | |
2196 | ||
2197 | return pic_offset_table_rtx; | |
2198 | } | |
2199 | ||
2200 | /* M68K relocations, used to distinguish GOT and TLS relocations in UNSPEC | |
2201 | wrappers. */ | |
2202 | enum m68k_reloc { RELOC_GOT, RELOC_TLSGD, RELOC_TLSLDM, RELOC_TLSLDO, | |
2203 | RELOC_TLSIE, RELOC_TLSLE }; | |
2204 | ||
2205 | #define TLS_RELOC_P(RELOC) ((RELOC) != RELOC_GOT) | |
2206 | ||
2207 | /* Wrap symbol X into unspec representing relocation RELOC. | |
2208 | BASE_REG - register that should be added to the result. | |
2209 | TEMP_REG - if non-null, temporary register. */ | |
2210 | ||
2211 | static rtx | |
2212 | m68k_wrap_symbol (rtx x, enum m68k_reloc reloc, rtx base_reg, rtx temp_reg) | |
2213 | { | |
2214 | bool use_x_p; | |
2215 | ||
2216 | use_x_p = (base_reg == pic_offset_table_rtx) ? TARGET_XGOT : TARGET_XTLS; | |
2217 | ||
2218 | if (TARGET_COLDFIRE && use_x_p) | |
2219 | /* When compiling with -mx{got, tls} switch the code will look like this: | |
2220 | ||
2221 | move.l <X>@<RELOC>,<TEMP_REG> | |
2222 | add.l <BASE_REG>,<TEMP_REG> */ | |
2223 | { | |
2224 | /* Wrap X in UNSPEC_??? to tip m68k_output_addr_const_extra | |
2225 | to put @RELOC after reference. */ | |
2226 | x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)), | |
2227 | UNSPEC_RELOC32); | |
2228 | x = gen_rtx_CONST (Pmode, x); | |
2229 | ||
2230 | if (temp_reg == NULL) | |
2231 | { | |
2232 | gcc_assert (can_create_pseudo_p ()); | |
2233 | temp_reg = gen_reg_rtx (Pmode); | |
2234 | } | |
2235 | ||
2236 | emit_move_insn (temp_reg, x); | |
2237 | emit_insn (gen_addsi3 (temp_reg, temp_reg, base_reg)); | |
2238 | x = temp_reg; | |
2239 | } | |
2240 | else | |
2241 | { | |
2242 | x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)), | |
2243 | UNSPEC_RELOC16); | |
2244 | x = gen_rtx_CONST (Pmode, x); | |
2245 | ||
2246 | x = gen_rtx_PLUS (Pmode, base_reg, x); | |
2247 | } | |
2248 | ||
2249 | return x; | |
2250 | } | |
2251 | ||
2252 | /* Helper for m68k_unwrap_symbol. | |
2253 | Also, if unwrapping was successful (that is if (ORIG != <return value>)), | |
2254 | sets *RELOC_PTR to relocation type for the symbol. */ | |
2255 | ||
2256 | static rtx | |
2257 | m68k_unwrap_symbol_1 (rtx orig, bool unwrap_reloc32_p, | |
2258 | enum m68k_reloc *reloc_ptr) | |
2259 | { | |
2260 | if (GET_CODE (orig) == CONST) | |
2261 | { | |
2262 | rtx x; | |
2263 | enum m68k_reloc dummy; | |
2264 | ||
2265 | x = XEXP (orig, 0); | |
2266 | ||
2267 | if (reloc_ptr == NULL) | |
2268 | reloc_ptr = &dummy; | |
2269 | ||
2270 | /* Handle an addend. */ | |
2271 | if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS) | |
2272 | && CONST_INT_P (XEXP (x, 1))) | |
2273 | x = XEXP (x, 0); | |
2274 | ||
2275 | if (GET_CODE (x) == UNSPEC) | |
2276 | { | |
2277 | switch (XINT (x, 1)) | |
2278 | { | |
2279 | case UNSPEC_RELOC16: | |
2280 | orig = XVECEXP (x, 0, 0); | |
2281 | *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1)); | |
2282 | break; | |
2283 | ||
2284 | case UNSPEC_RELOC32: | |
2285 | if (unwrap_reloc32_p) | |
2286 | { | |
2287 | orig = XVECEXP (x, 0, 0); | |
2288 | *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1)); | |
2289 | } | |
2290 | break; | |
2291 | ||
2292 | default: | |
2293 | break; | |
2294 | } | |
2295 | } | |
2296 | } | |
2297 | ||
2298 | return orig; | |
2299 | } | |
2300 | ||
2301 | /* Unwrap symbol from UNSPEC_RELOC16 and, if unwrap_reloc32_p, | |
2302 | UNSPEC_RELOC32 wrappers. */ | |
2303 | ||
2304 | rtx | |
2305 | m68k_unwrap_symbol (rtx orig, bool unwrap_reloc32_p) | |
2306 | { | |
2307 | return m68k_unwrap_symbol_1 (orig, unwrap_reloc32_p, NULL); | |
2308 | } | |
2309 | ||
869bde6b | 2310 | /* Prescan insn before outputing assembler for it. */ |
2311 | ||
2312 | void | |
9e9d6337 | 2313 | m68k_final_prescan_insn (rtx_insn *insn ATTRIBUTE_UNUSED, |
869bde6b | 2314 | rtx *operands, int n_operands) |
2315 | { | |
2316 | int i; | |
2317 | ||
2318 | /* Combine and, possibly, other optimizations may do good job | |
2319 | converting | |
2320 | (const (unspec [(symbol)])) | |
2321 | into | |
2322 | (const (plus (unspec [(symbol)]) | |
2323 | (const_int N))). | |
2324 | The problem with this is emitting @TLS or @GOT decorations. | |
2325 | The decoration is emitted when processing (unspec), so the | |
2326 | result would be "#symbol@TLSLE+N" instead of "#symbol+N@TLSLE". | |
2327 | ||
2328 | It seems that the easiest solution to this is to convert such | |
2329 | operands to | |
2330 | (const (unspec [(plus (symbol) | |
2331 | (const_int N))])). | |
2332 | Note, that the top level of operand remains intact, so we don't have | |
2333 | to patch up anything outside of the operand. */ | |
2334 | ||
d8ca5b53 | 2335 | subrtx_var_iterator::array_type array; |
869bde6b | 2336 | for (i = 0; i < n_operands; ++i) |
2337 | { | |
2338 | rtx op; | |
2339 | ||
2340 | op = operands[i]; | |
2341 | ||
d8ca5b53 | 2342 | FOR_EACH_SUBRTX_VAR (iter, array, op, ALL) |
2343 | { | |
2344 | rtx x = *iter; | |
2345 | if (m68k_unwrap_symbol (x, true) != x) | |
2346 | { | |
2347 | rtx plus; | |
2348 | ||
2349 | gcc_assert (GET_CODE (x) == CONST); | |
2350 | plus = XEXP (x, 0); | |
2351 | ||
2352 | if (GET_CODE (plus) == PLUS || GET_CODE (plus) == MINUS) | |
2353 | { | |
2354 | rtx unspec; | |
2355 | rtx addend; | |
2356 | ||
2357 | unspec = XEXP (plus, 0); | |
2358 | gcc_assert (GET_CODE (unspec) == UNSPEC); | |
2359 | addend = XEXP (plus, 1); | |
2360 | gcc_assert (CONST_INT_P (addend)); | |
2361 | ||
2362 | /* We now have all the pieces, rearrange them. */ | |
2363 | ||
2364 | /* Move symbol to plus. */ | |
2365 | XEXP (plus, 0) = XVECEXP (unspec, 0, 0); | |
2366 | ||
2367 | /* Move plus inside unspec. */ | |
2368 | XVECEXP (unspec, 0, 0) = plus; | |
2369 | ||
2370 | /* Move unspec to top level of const. */ | |
2371 | XEXP (x, 0) = unspec; | |
2372 | } | |
2373 | iter.skip_subrtxes (); | |
2374 | } | |
2375 | } | |
869bde6b | 2376 | } |
2377 | } | |
2378 | ||
2379 | /* Move X to a register and add REG_EQUAL note pointing to ORIG. | |
2380 | If REG is non-null, use it; generate new pseudo otherwise. */ | |
2381 | ||
2382 | static rtx | |
2383 | m68k_move_to_reg (rtx x, rtx orig, rtx reg) | |
2384 | { | |
9e9d6337 | 2385 | rtx_insn *insn; |
869bde6b | 2386 | |
2387 | if (reg == NULL_RTX) | |
2388 | { | |
2389 | gcc_assert (can_create_pseudo_p ()); | |
2390 | reg = gen_reg_rtx (Pmode); | |
2391 | } | |
2392 | ||
2393 | insn = emit_move_insn (reg, x); | |
2394 | /* Put a REG_EQUAL note on this insn, so that it can be optimized | |
2395 | by loop. */ | |
2396 | set_unique_reg_note (insn, REG_EQUAL, orig); | |
2397 | ||
2398 | return reg; | |
2399 | } | |
2400 | ||
2401 | /* Does the same as m68k_wrap_symbol, but returns a memory reference to | |
2402 | GOT slot. */ | |
2403 | ||
2404 | static rtx | |
2405 | m68k_wrap_symbol_into_got_ref (rtx x, enum m68k_reloc reloc, rtx temp_reg) | |
2406 | { | |
2407 | x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), temp_reg); | |
2408 | ||
2409 | x = gen_rtx_MEM (Pmode, x); | |
2410 | MEM_READONLY_P (x) = 1; | |
2411 | ||
2412 | return x; | |
2413 | } | |
2414 | ||
5243fbff | 2415 | /* Legitimize PIC addresses. If the address is already |
2416 | position-independent, we return ORIG. Newly generated | |
2417 | position-independent addresses go to REG. If we need more | |
2418 | than one register, we lose. | |
2419 | ||
2420 | An address is legitimized by making an indirect reference | |
2421 | through the Global Offset Table with the name of the symbol | |
2422 | used as an offset. | |
2423 | ||
2424 | The assembler and linker are responsible for placing the | |
2425 | address of the symbol in the GOT. The function prologue | |
2426 | is responsible for initializing a5 to the starting address | |
2427 | of the GOT. | |
2428 | ||
2429 | The assembler is also responsible for translating a symbol name | |
2430 | into a constant displacement from the start of the GOT. | |
2431 | ||
2432 | A quick example may make things a little clearer: | |
2433 | ||
2434 | When not generating PIC code to store the value 12345 into _foo | |
2435 | we would generate the following code: | |
2436 | ||
2437 | movel #12345, _foo | |
2438 | ||
2439 | When generating PIC two transformations are made. First, the compiler | |
2440 | loads the address of foo into a register. So the first transformation makes: | |
2441 | ||
2442 | lea _foo, a0 | |
2443 | movel #12345, a0@ | |
2444 | ||
2445 | The code in movsi will intercept the lea instruction and call this | |
2446 | routine which will transform the instructions into: | |
2447 | ||
2448 | movel a5@(_foo:w), a0 | |
2449 | movel #12345, a0@ | |
2450 | ||
2451 | ||
2452 | That (in a nutshell) is how *all* symbol and label references are | |
2453 | handled. */ | |
2454 | ||
2455 | rtx | |
3754d046 | 2456 | legitimize_pic_address (rtx orig, machine_mode mode ATTRIBUTE_UNUSED, |
821960c7 | 2457 | rtx reg) |
5243fbff | 2458 | { |
2459 | rtx pic_ref = orig; | |
2460 | ||
2461 | /* First handle a simple SYMBOL_REF or LABEL_REF */ | |
2462 | if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF) | |
2463 | { | |
84653e0f | 2464 | gcc_assert (reg); |
5243fbff | 2465 | |
869bde6b | 2466 | pic_ref = m68k_wrap_symbol_into_got_ref (orig, RELOC_GOT, reg); |
2467 | pic_ref = m68k_move_to_reg (pic_ref, orig, reg); | |
5243fbff | 2468 | } |
2469 | else if (GET_CODE (orig) == CONST) | |
2470 | { | |
e6b2f841 | 2471 | rtx base; |
5243fbff | 2472 | |
a58c6df6 | 2473 | /* Make sure this has not already been legitimized. */ |
869bde6b | 2474 | if (m68k_unwrap_symbol (orig, true) != orig) |
5243fbff | 2475 | return orig; |
2476 | ||
84653e0f | 2477 | gcc_assert (reg); |
5243fbff | 2478 | |
2479 | /* legitimize both operands of the PLUS */ | |
84653e0f | 2480 | gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS); |
2481 | ||
2482 | base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg); | |
2483 | orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode, | |
2484 | base == reg ? 0 : reg); | |
5243fbff | 2485 | |
2486 | if (GET_CODE (orig) == CONST_INT) | |
29c05e22 | 2487 | pic_ref = plus_constant (Pmode, base, INTVAL (orig)); |
869bde6b | 2488 | else |
2489 | pic_ref = gen_rtx_PLUS (Pmode, base, orig); | |
5243fbff | 2490 | } |
869bde6b | 2491 | |
5243fbff | 2492 | return pic_ref; |
2493 | } | |
2494 | ||
869bde6b | 2495 | /* The __tls_get_addr symbol. */ |
2496 | static GTY(()) rtx m68k_tls_get_addr; | |
2497 | ||
2498 | /* Return SYMBOL_REF for __tls_get_addr. */ | |
2499 | ||
2500 | static rtx | |
2501 | m68k_get_tls_get_addr (void) | |
2502 | { | |
2503 | if (m68k_tls_get_addr == NULL_RTX) | |
2504 | m68k_tls_get_addr = init_one_libfunc ("__tls_get_addr"); | |
2505 | ||
2506 | return m68k_tls_get_addr; | |
2507 | } | |
2508 | ||
2509 | /* Return libcall result in A0 instead of usual D0. */ | |
2510 | static bool m68k_libcall_value_in_a0_p = false; | |
2511 | ||
2512 | /* Emit instruction sequence that calls __tls_get_addr. X is | |
2513 | the TLS symbol we are referencing and RELOC is the symbol type to use | |
2514 | (either TLSGD or TLSLDM). EQV is the REG_EQUAL note for the sequence | |
2515 | emitted. A pseudo register with result of __tls_get_addr call is | |
2516 | returned. */ | |
2517 | ||
2518 | static rtx | |
2519 | m68k_call_tls_get_addr (rtx x, rtx eqv, enum m68k_reloc reloc) | |
2520 | { | |
2521 | rtx a0; | |
9e9d6337 | 2522 | rtx_insn *insns; |
869bde6b | 2523 | rtx dest; |
2524 | ||
2525 | /* Emit the call sequence. */ | |
2526 | start_sequence (); | |
2527 | ||
2528 | /* FIXME: Unfortunately, emit_library_call_value does not | |
2529 | consider (plus (%a5) (const (unspec))) to be a good enough | |
2530 | operand for push, so it forces it into a register. The bad | |
2531 | thing about this is that combiner, due to copy propagation and other | |
2532 | optimizations, sometimes can not later fix this. As a consequence, | |
2533 | additional register may be allocated resulting in a spill. | |
2534 | For reference, see args processing loops in | |
2535 | calls.c:emit_library_call_value_1. | |
2536 | For testcase, see gcc.target/m68k/tls-{gd, ld}.c */ | |
2537 | x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), NULL_RTX); | |
2538 | ||
2539 | /* __tls_get_addr() is not a libcall, but emitting a libcall_value | |
2540 | is the simpliest way of generating a call. The difference between | |
2541 | __tls_get_addr() and libcall is that the result is returned in D0 | |
2542 | instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p | |
2543 | which temporarily switches returning the result to A0. */ | |
2544 | ||
2545 | m68k_libcall_value_in_a0_p = true; | |
2546 | a0 = emit_library_call_value (m68k_get_tls_get_addr (), NULL_RTX, LCT_PURE, | |
9e9e5c15 | 2547 | Pmode, x, Pmode); |
869bde6b | 2548 | m68k_libcall_value_in_a0_p = false; |
2549 | ||
2550 | insns = get_insns (); | |
2551 | end_sequence (); | |
2552 | ||
2553 | gcc_assert (can_create_pseudo_p ()); | |
2554 | dest = gen_reg_rtx (Pmode); | |
2555 | emit_libcall_block (insns, dest, a0, eqv); | |
2556 | ||
2557 | return dest; | |
2558 | } | |
2559 | ||
2560 | /* The __tls_get_addr symbol. */ | |
2561 | static GTY(()) rtx m68k_read_tp; | |
2562 | ||
2563 | /* Return SYMBOL_REF for __m68k_read_tp. */ | |
2564 | ||
2565 | static rtx | |
2566 | m68k_get_m68k_read_tp (void) | |
2567 | { | |
2568 | if (m68k_read_tp == NULL_RTX) | |
2569 | m68k_read_tp = init_one_libfunc ("__m68k_read_tp"); | |
2570 | ||
2571 | return m68k_read_tp; | |
2572 | } | |
2573 | ||
2574 | /* Emit instruction sequence that calls __m68k_read_tp. | |
2575 | A pseudo register with result of __m68k_read_tp call is returned. */ | |
2576 | ||
2577 | static rtx | |
2578 | m68k_call_m68k_read_tp (void) | |
2579 | { | |
2580 | rtx a0; | |
2581 | rtx eqv; | |
9e9d6337 | 2582 | rtx_insn *insns; |
869bde6b | 2583 | rtx dest; |
2584 | ||
2585 | start_sequence (); | |
2586 | ||
2587 | /* __m68k_read_tp() is not a libcall, but emitting a libcall_value | |
2588 | is the simpliest way of generating a call. The difference between | |
2589 | __m68k_read_tp() and libcall is that the result is returned in D0 | |
2590 | instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p | |
2591 | which temporarily switches returning the result to A0. */ | |
2592 | ||
2593 | /* Emit the call sequence. */ | |
2594 | m68k_libcall_value_in_a0_p = true; | |
2595 | a0 = emit_library_call_value (m68k_get_m68k_read_tp (), NULL_RTX, LCT_PURE, | |
9e9e5c15 | 2596 | Pmode); |
869bde6b | 2597 | m68k_libcall_value_in_a0_p = false; |
2598 | insns = get_insns (); | |
2599 | end_sequence (); | |
2600 | ||
2601 | /* Attach a unique REG_EQUIV, to allow the RTL optimizers to | |
2602 | share the m68k_read_tp result with other IE/LE model accesses. */ | |
2603 | eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx), UNSPEC_RELOC32); | |
2604 | ||
2605 | gcc_assert (can_create_pseudo_p ()); | |
2606 | dest = gen_reg_rtx (Pmode); | |
2607 | emit_libcall_block (insns, dest, a0, eqv); | |
2608 | ||
2609 | return dest; | |
2610 | } | |
2611 | ||
2612 | /* Return a legitimized address for accessing TLS SYMBOL_REF X. | |
2613 | For explanations on instructions sequences see TLS/NPTL ABI for m68k and | |
2614 | ColdFire. */ | |
2615 | ||
2616 | rtx | |
2617 | m68k_legitimize_tls_address (rtx orig) | |
2618 | { | |
2619 | switch (SYMBOL_REF_TLS_MODEL (orig)) | |
2620 | { | |
2621 | case TLS_MODEL_GLOBAL_DYNAMIC: | |
2622 | orig = m68k_call_tls_get_addr (orig, orig, RELOC_TLSGD); | |
2623 | break; | |
2624 | ||
2625 | case TLS_MODEL_LOCAL_DYNAMIC: | |
2626 | { | |
2627 | rtx eqv; | |
2628 | rtx a0; | |
2629 | rtx x; | |
2630 | ||
2631 | /* Attach a unique REG_EQUIV, to allow the RTL optimizers to | |
2632 | share the LDM result with other LD model accesses. */ | |
2633 | eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), | |
2634 | UNSPEC_RELOC32); | |
2635 | ||
2636 | a0 = m68k_call_tls_get_addr (orig, eqv, RELOC_TLSLDM); | |
2637 | ||
2638 | x = m68k_wrap_symbol (orig, RELOC_TLSLDO, a0, NULL_RTX); | |
2639 | ||
2640 | if (can_create_pseudo_p ()) | |
2641 | x = m68k_move_to_reg (x, orig, NULL_RTX); | |
2642 | ||
2643 | orig = x; | |
2644 | break; | |
2645 | } | |
2646 | ||
2647 | case TLS_MODEL_INITIAL_EXEC: | |
2648 | { | |
2649 | rtx a0; | |
2650 | rtx x; | |
2651 | ||
2652 | a0 = m68k_call_m68k_read_tp (); | |
2653 | ||
2654 | x = m68k_wrap_symbol_into_got_ref (orig, RELOC_TLSIE, NULL_RTX); | |
2655 | x = gen_rtx_PLUS (Pmode, x, a0); | |
2656 | ||
2657 | if (can_create_pseudo_p ()) | |
2658 | x = m68k_move_to_reg (x, orig, NULL_RTX); | |
2659 | ||
2660 | orig = x; | |
2661 | break; | |
2662 | } | |
2663 | ||
2664 | case TLS_MODEL_LOCAL_EXEC: | |
2665 | { | |
2666 | rtx a0; | |
2667 | rtx x; | |
2668 | ||
2669 | a0 = m68k_call_m68k_read_tp (); | |
2670 | ||
2671 | x = m68k_wrap_symbol (orig, RELOC_TLSLE, a0, NULL_RTX); | |
2672 | ||
2673 | if (can_create_pseudo_p ()) | |
2674 | x = m68k_move_to_reg (x, orig, NULL_RTX); | |
2675 | ||
2676 | orig = x; | |
2677 | break; | |
2678 | } | |
2679 | ||
2680 | default: | |
2681 | gcc_unreachable (); | |
2682 | } | |
2683 | ||
2684 | return orig; | |
2685 | } | |
2686 | ||
2687 | /* Return true if X is a TLS symbol. */ | |
2688 | ||
2689 | static bool | |
2690 | m68k_tls_symbol_p (rtx x) | |
2691 | { | |
2692 | if (!TARGET_HAVE_TLS) | |
2693 | return false; | |
2694 | ||
2695 | if (GET_CODE (x) != SYMBOL_REF) | |
2696 | return false; | |
2697 | ||
2698 | return SYMBOL_REF_TLS_MODEL (x) != 0; | |
2699 | } | |
2700 | ||
869bde6b | 2701 | /* If !LEGITIMATE_P, return true if X is a TLS symbol reference, |
2702 | though illegitimate one. | |
2703 | If LEGITIMATE_P, return true if X is a legitimate TLS symbol reference. */ | |
2704 | ||
2705 | bool | |
2706 | m68k_tls_reference_p (rtx x, bool legitimate_p) | |
2707 | { | |
2708 | if (!TARGET_HAVE_TLS) | |
2709 | return false; | |
2710 | ||
2711 | if (!legitimate_p) | |
bcf1a9e0 | 2712 | { |
2713 | subrtx_var_iterator::array_type array; | |
2714 | FOR_EACH_SUBRTX_VAR (iter, array, x, ALL) | |
2715 | { | |
2716 | rtx x = *iter; | |
2717 | ||
2718 | /* Note: this is not the same as m68k_tls_symbol_p. */ | |
2719 | if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0) | |
2720 | return true; | |
2721 | ||
2722 | /* Don't recurse into legitimate TLS references. */ | |
2723 | if (m68k_tls_reference_p (x, true)) | |
2724 | iter.skip_subrtxes (); | |
2725 | } | |
2726 | return false; | |
2727 | } | |
869bde6b | 2728 | else |
2729 | { | |
2730 | enum m68k_reloc reloc = RELOC_GOT; | |
2731 | ||
2732 | return (m68k_unwrap_symbol_1 (x, true, &reloc) != x | |
2733 | && TLS_RELOC_P (reloc)); | |
2734 | } | |
2735 | } | |
2736 | ||
5243fbff | 2737 | \f |
1286b2d4 | 2738 | |
065b42aa | 2739 | #define USE_MOVQ(i) ((unsigned) ((i) + 128) <= 255) |
1286b2d4 | 2740 | |
92b2fb42 | 2741 | /* Return the type of move that should be used for integer I. */ |
2742 | ||
1d86aeab | 2743 | M68K_CONST_METHOD |
2744 | m68k_const_method (HOST_WIDE_INT i) | |
1286b2d4 | 2745 | { |
1286b2d4 | 2746 | unsigned u; |
2747 | ||
e4cd7d76 | 2748 | if (USE_MOVQ (i)) |
1286b2d4 | 2749 | return MOVQ; |
1f09b395 | 2750 | |
b14b4511 | 2751 | /* The ColdFire doesn't have byte or word operations. */ |
274d52cf | 2752 | /* FIXME: This may not be useful for the m68060 either. */ |
d2a91c7e | 2753 | if (!TARGET_COLDFIRE) |
1f09b395 | 2754 | { |
2755 | /* if -256 < N < 256 but N is not in range for a moveq | |
0c0492c9 | 2756 | N^ff will be, so use moveq #N^ff, dreg; not.b dreg. */ |
1f09b395 | 2757 | if (USE_MOVQ (i ^ 0xff)) |
2758 | return NOTB; | |
2759 | /* Likewise, try with not.w */ | |
2760 | if (USE_MOVQ (i ^ 0xffff)) | |
2761 | return NOTW; | |
2762 | /* This is the only value where neg.w is useful */ | |
2763 | if (i == -65408) | |
2764 | return NEGW; | |
1f09b395 | 2765 | } |
c3f7507d | 2766 | |
0c4bc85a | 2767 | /* Try also with swap. */ |
2768 | u = i; | |
2769 | if (USE_MOVQ ((u >> 16) | (u << 16))) | |
2770 | return SWAP; | |
2771 | ||
42af5438 | 2772 | if (TARGET_ISAB) |
c3f7507d | 2773 | { |
fb4883de | 2774 | /* Try using MVZ/MVS with an immediate value to load constants. */ |
c3f7507d | 2775 | if (i >= 0 && i <= 65535) |
2776 | return MVZ; | |
2777 | if (i >= -32768 && i <= 32767) | |
2778 | return MVS; | |
2779 | } | |
2780 | ||
1286b2d4 | 2781 | /* Otherwise, use move.l */ |
2782 | return MOVL; | |
2783 | } | |
2784 | ||
92b2fb42 | 2785 | /* Return the cost of moving constant I into a data register. */ |
2786 | ||
fab7adbf | 2787 | static int |
92b2fb42 | 2788 | const_int_cost (HOST_WIDE_INT i) |
1286b2d4 | 2789 | { |
1d86aeab | 2790 | switch (m68k_const_method (i)) |
1286b2d4 | 2791 | { |
065b42aa | 2792 | case MOVQ: |
2793 | /* Constants between -128 and 127 are cheap due to moveq. */ | |
2794 | return 0; | |
2795 | case MVZ: | |
2796 | case MVS: | |
2797 | case NOTB: | |
2798 | case NOTW: | |
2799 | case NEGW: | |
2800 | case SWAP: | |
2801 | /* Constants easily generated by moveq + not.b/not.w/neg.w/swap. */ | |
2802 | return 1; | |
2803 | case MOVL: | |
2804 | return 2; | |
2805 | default: | |
2806 | gcc_unreachable (); | |
1286b2d4 | 2807 | } |
2808 | } | |
2809 | ||
fab7adbf | 2810 | static bool |
5ae4887d | 2811 | m68k_rtx_costs (rtx x, machine_mode mode, int outer_code, |
2812 | int opno ATTRIBUTE_UNUSED, | |
20d892d1 | 2813 | int *total, bool speed ATTRIBUTE_UNUSED) |
fab7adbf | 2814 | { |
5ae4887d | 2815 | int code = GET_CODE (x); |
2816 | ||
fab7adbf | 2817 | switch (code) |
2818 | { | |
2819 | case CONST_INT: | |
2820 | /* Constant zero is super cheap due to clr instruction. */ | |
2821 | if (x == const0_rtx) | |
2822 | *total = 0; | |
2823 | else | |
92b2fb42 | 2824 | *total = const_int_cost (INTVAL (x)); |
fab7adbf | 2825 | return true; |
2826 | ||
2827 | case CONST: | |
2828 | case LABEL_REF: | |
2829 | case SYMBOL_REF: | |
2830 | *total = 3; | |
2831 | return true; | |
2832 | ||
2833 | case CONST_DOUBLE: | |
2834 | /* Make 0.0 cheaper than other floating constants to | |
2835 | encourage creating tstsf and tstdf insns. */ | |
2836 | if (outer_code == COMPARE | |
2837 | && (x == CONST0_RTX (SFmode) || x == CONST0_RTX (DFmode))) | |
2838 | *total = 4; | |
2839 | else | |
2840 | *total = 5; | |
2841 | return true; | |
2842 | ||
2843 | /* These are vaguely right for a 68020. */ | |
2844 | /* The costs for long multiply have been adjusted to work properly | |
2845 | in synth_mult on the 68020, relative to an average of the time | |
2846 | for add and the time for shift, taking away a little more because | |
2847 | sometimes move insns are needed. */ | |
065b42aa | 2848 | /* div?.w is relatively cheaper on 68000 counted in COSTS_N_INSNS |
2849 | terms. */ | |
bd40163f | 2850 | #define MULL_COST \ |
2851 | (TUNE_68060 ? 2 \ | |
2852 | : TUNE_68040 ? 5 \ | |
53457e36 | 2853 | : (TUNE_CFV2 && TUNE_EMAC) ? 3 \ |
2854 | : (TUNE_CFV2 && TUNE_MAC) ? 4 \ | |
2855 | : TUNE_CFV2 ? 8 \ | |
bd40163f | 2856 | : TARGET_COLDFIRE ? 3 : 13) |
2857 | ||
2858 | #define MULW_COST \ | |
2859 | (TUNE_68060 ? 2 \ | |
2860 | : TUNE_68040 ? 3 \ | |
53457e36 | 2861 | : TUNE_68000_10 ? 5 \ |
2862 | : (TUNE_CFV2 && TUNE_EMAC) ? 3 \ | |
2863 | : (TUNE_CFV2 && TUNE_MAC) ? 2 \ | |
2864 | : TUNE_CFV2 ? 8 \ | |
bd40163f | 2865 | : TARGET_COLDFIRE ? 2 : 8) |
2866 | ||
2867 | #define DIVW_COST \ | |
2868 | (TARGET_CF_HWDIV ? 11 \ | |
2869 | : TUNE_68000_10 || TARGET_COLDFIRE ? 12 : 27) | |
fab7adbf | 2870 | |
2871 | case PLUS: | |
2872 | /* An lea costs about three times as much as a simple add. */ | |
5ae4887d | 2873 | if (mode == SImode |
fab7adbf | 2874 | && GET_CODE (XEXP (x, 1)) == REG |
2875 | && GET_CODE (XEXP (x, 0)) == MULT | |
2876 | && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG | |
2877 | && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT | |
2878 | && (INTVAL (XEXP (XEXP (x, 0), 1)) == 2 | |
2879 | || INTVAL (XEXP (XEXP (x, 0), 1)) == 4 | |
2880 | || INTVAL (XEXP (XEXP (x, 0), 1)) == 8)) | |
13ab0298 | 2881 | { |
2882 | /* lea an@(dx:l:i),am */ | |
2883 | *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 2 : 3); | |
2884 | return true; | |
2885 | } | |
fab7adbf | 2886 | return false; |
2887 | ||
2888 | case ASHIFT: | |
2889 | case ASHIFTRT: | |
2890 | case LSHIFTRT: | |
bd40163f | 2891 | if (TUNE_68060) |
fab7adbf | 2892 | { |
2893 | *total = COSTS_N_INSNS(1); | |
2894 | return true; | |
2895 | } | |
bd40163f | 2896 | if (TUNE_68000_10) |
fab7adbf | 2897 | { |
2898 | if (GET_CODE (XEXP (x, 1)) == CONST_INT) | |
2899 | { | |
2900 | if (INTVAL (XEXP (x, 1)) < 16) | |
2901 | *total = COSTS_N_INSNS (2) + INTVAL (XEXP (x, 1)) / 2; | |
2902 | else | |
2903 | /* We're using clrw + swap for these cases. */ | |
2904 | *total = COSTS_N_INSNS (4) + (INTVAL (XEXP (x, 1)) - 16) / 2; | |
2905 | } | |
2906 | else | |
065b42aa | 2907 | *total = COSTS_N_INSNS (10); /* Worst case. */ |
fab7adbf | 2908 | return true; |
2909 | } | |
2910 | /* A shift by a big integer takes an extra instruction. */ | |
2911 | if (GET_CODE (XEXP (x, 1)) == CONST_INT | |
2912 | && (INTVAL (XEXP (x, 1)) == 16)) | |
2913 | { | |
2914 | *total = COSTS_N_INSNS (2); /* clrw;swap */ | |
2915 | return true; | |
2916 | } | |
2917 | if (GET_CODE (XEXP (x, 1)) == CONST_INT | |
2918 | && !(INTVAL (XEXP (x, 1)) > 0 | |
2919 | && INTVAL (XEXP (x, 1)) <= 8)) | |
2920 | { | |
13ab0298 | 2921 | *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 1 : 3); /* lsr #i,dn */ |
fab7adbf | 2922 | return true; |
2923 | } | |
2924 | return false; | |
2925 | ||
2926 | case MULT: | |
2927 | if ((GET_CODE (XEXP (x, 0)) == ZERO_EXTEND | |
2928 | || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND) | |
5ae4887d | 2929 | && mode == SImode) |
fab7adbf | 2930 | *total = COSTS_N_INSNS (MULW_COST); |
5ae4887d | 2931 | else if (mode == QImode || mode == HImode) |
fab7adbf | 2932 | *total = COSTS_N_INSNS (MULW_COST); |
2933 | else | |
2934 | *total = COSTS_N_INSNS (MULL_COST); | |
2935 | return true; | |
2936 | ||
2937 | case DIV: | |
2938 | case UDIV: | |
2939 | case MOD: | |
2940 | case UMOD: | |
5ae4887d | 2941 | if (mode == QImode || mode == HImode) |
fab7adbf | 2942 | *total = COSTS_N_INSNS (DIVW_COST); /* div.w */ |
13ab0298 | 2943 | else if (TARGET_CF_HWDIV) |
2944 | *total = COSTS_N_INSNS (18); | |
fab7adbf | 2945 | else |
2946 | *total = COSTS_N_INSNS (43); /* div.l */ | |
2947 | return true; | |
2948 | ||
74f4459c | 2949 | case ZERO_EXTRACT: |
2950 | if (outer_code == COMPARE) | |
2951 | *total = 0; | |
2952 | return false; | |
2953 | ||
fab7adbf | 2954 | default: |
2955 | return false; | |
2956 | } | |
2957 | } | |
2958 | ||
0d424440 | 2959 | /* Return an instruction to move CONST_INT OPERANDS[1] into data register |
92b2fb42 | 2960 | OPERANDS[0]. */ |
2961 | ||
2962 | static const char * | |
821960c7 | 2963 | output_move_const_into_data_reg (rtx *operands) |
1286b2d4 | 2964 | { |
92b2fb42 | 2965 | HOST_WIDE_INT i; |
1286b2d4 | 2966 | |
2967 | i = INTVAL (operands[1]); | |
1d86aeab | 2968 | switch (m68k_const_method (i)) |
1286b2d4 | 2969 | { |
c3f7507d | 2970 | case MVZ: |
c3f7507d | 2971 | return "mvzw %1,%0"; |
176f6408 | 2972 | case MVS: |
2973 | return "mvsw %1,%0"; | |
065b42aa | 2974 | case MOVQ: |
1286b2d4 | 2975 | return "moveq %1,%0"; |
065b42aa | 2976 | case NOTB: |
1f24a83f | 2977 | CC_STATUS_INIT; |
e6b2f841 | 2978 | operands[1] = GEN_INT (i ^ 0xff); |
1286b2d4 | 2979 | return "moveq %1,%0\n\tnot%.b %0"; |
065b42aa | 2980 | case NOTW: |
1f24a83f | 2981 | CC_STATUS_INIT; |
e6b2f841 | 2982 | operands[1] = GEN_INT (i ^ 0xffff); |
1286b2d4 | 2983 | return "moveq %1,%0\n\tnot%.w %0"; |
065b42aa | 2984 | case NEGW: |
1f24a83f | 2985 | CC_STATUS_INIT; |
3acc77d9 | 2986 | return "moveq #-128,%0\n\tneg%.w %0"; |
065b42aa | 2987 | case SWAP: |
1286b2d4 | 2988 | { |
2989 | unsigned u = i; | |
2990 | ||
e6b2f841 | 2991 | operands[1] = GEN_INT ((u << 16) | (u >> 16)); |
1286b2d4 | 2992 | return "moveq %1,%0\n\tswap %0"; |
1286b2d4 | 2993 | } |
065b42aa | 2994 | case MOVL: |
92b2fb42 | 2995 | return "move%.l %1,%0"; |
065b42aa | 2996 | default: |
92b2fb42 | 2997 | gcc_unreachable (); |
1286b2d4 | 2998 | } |
2999 | } | |
3000 | ||
92b2fb42 | 3001 | /* Return true if I can be handled by ISA B's mov3q instruction. */ |
0c4bc85a | 3002 | |
92b2fb42 | 3003 | bool |
3004 | valid_mov3q_const (HOST_WIDE_INT i) | |
3005 | { | |
3006 | return TARGET_ISAB && (i == -1 || IN_RANGE (i, 1, 7)); | |
0c4bc85a | 3007 | } |
3008 | ||
92b2fb42 | 3009 | /* Return an instruction to move CONST_INT OPERANDS[1] into OPERANDS[0]. |
3010 | I is the value of OPERANDS[1]. */ | |
0c4bc85a | 3011 | |
92b2fb42 | 3012 | static const char * |
821960c7 | 3013 | output_move_simode_const (rtx *operands) |
24821a10 | 3014 | { |
92b2fb42 | 3015 | rtx dest; |
3016 | HOST_WIDE_INT src; | |
3017 | ||
3018 | dest = operands[0]; | |
3019 | src = INTVAL (operands[1]); | |
3020 | if (src == 0 | |
3021 | && (DATA_REG_P (dest) || MEM_P (dest)) | |
374e1815 | 3022 | /* clr insns on 68000 read before writing. */ |
3023 | && ((TARGET_68010 || TARGET_COLDFIRE) | |
92b2fb42 | 3024 | || !(MEM_P (dest) && MEM_VOLATILE_P (dest)))) |
24821a10 | 3025 | return "clr%.l %0"; |
92b2fb42 | 3026 | else if (GET_MODE (dest) == SImode && valid_mov3q_const (src)) |
065b42aa | 3027 | return "mov3q%.l %1,%0"; |
92b2fb42 | 3028 | else if (src == 0 && ADDRESS_REG_P (dest)) |
c96c8169 | 3029 | return "sub%.l %0,%0"; |
92b2fb42 | 3030 | else if (DATA_REG_P (dest)) |
24821a10 | 3031 | return output_move_const_into_data_reg (operands); |
92b2fb42 | 3032 | else if (ADDRESS_REG_P (dest) && IN_RANGE (src, -0x8000, 0x7fff)) |
0c4bc85a | 3033 | { |
92b2fb42 | 3034 | if (valid_mov3q_const (src)) |
0c4bc85a | 3035 | return "mov3q%.l %1,%0"; |
3036 | return "move%.w %1,%0"; | |
3037 | } | |
92b2fb42 | 3038 | else if (MEM_P (dest) |
3039 | && GET_CODE (XEXP (dest, 0)) == PRE_DEC | |
3040 | && REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM | |
3041 | && IN_RANGE (src, -0x8000, 0x7fff)) | |
0c4bc85a | 3042 | { |
92b2fb42 | 3043 | if (valid_mov3q_const (src)) |
0c4bc85a | 3044 | return "mov3q%.l %1,%-"; |
3045 | return "pea %a1"; | |
3046 | } | |
24821a10 | 3047 | return "move%.l %1,%0"; |
3048 | } | |
3049 | ||
f2fa409d | 3050 | const char * |
821960c7 | 3051 | output_move_simode (rtx *operands) |
a923bd2f | 3052 | { |
3053 | if (GET_CODE (operands[1]) == CONST_INT) | |
3054 | return output_move_simode_const (operands); | |
3055 | else if ((GET_CODE (operands[1]) == SYMBOL_REF | |
3056 | || GET_CODE (operands[1]) == CONST) | |
3057 | && push_operand (operands[0], SImode)) | |
3058 | return "pea %a1"; | |
3059 | else if ((GET_CODE (operands[1]) == SYMBOL_REF | |
3060 | || GET_CODE (operands[1]) == CONST) | |
3061 | && ADDRESS_REG_P (operands[0])) | |
3062 | return "lea %a1,%0"; | |
3063 | return "move%.l %1,%0"; | |
3064 | } | |
3065 | ||
f2fa409d | 3066 | const char * |
821960c7 | 3067 | output_move_himode (rtx *operands) |
a923bd2f | 3068 | { |
3069 | if (GET_CODE (operands[1]) == CONST_INT) | |
3070 | { | |
3071 | if (operands[1] == const0_rtx | |
3072 | && (DATA_REG_P (operands[0]) | |
3073 | || GET_CODE (operands[0]) == MEM) | |
374e1815 | 3074 | /* clr insns on 68000 read before writing. */ |
3075 | && ((TARGET_68010 || TARGET_COLDFIRE) | |
a923bd2f | 3076 | || !(GET_CODE (operands[0]) == MEM |
3077 | && MEM_VOLATILE_P (operands[0])))) | |
3078 | return "clr%.w %0"; | |
c96c8169 | 3079 | else if (operands[1] == const0_rtx |
3080 | && ADDRESS_REG_P (operands[0])) | |
3081 | return "sub%.l %0,%0"; | |
a923bd2f | 3082 | else if (DATA_REG_P (operands[0]) |
3083 | && INTVAL (operands[1]) < 128 | |
3084 | && INTVAL (operands[1]) >= -128) | |
065b42aa | 3085 | return "moveq %1,%0"; |
a923bd2f | 3086 | else if (INTVAL (operands[1]) < 0x8000 |
3087 | && INTVAL (operands[1]) >= -0x8000) | |
3088 | return "move%.w %1,%0"; | |
3089 | } | |
3090 | else if (CONSTANT_P (operands[1])) | |
3091 | return "move%.l %1,%0"; | |
a923bd2f | 3092 | return "move%.w %1,%0"; |
3093 | } | |
3094 | ||
f2fa409d | 3095 | const char * |
821960c7 | 3096 | output_move_qimode (rtx *operands) |
a923bd2f | 3097 | { |
2c806cfd | 3098 | /* 68k family always modifies the stack pointer by at least 2, even for |
b14b4511 | 3099 | byte pushes. The 5200 (ColdFire) does not do this. */ |
84653e0f | 3100 | |
065b42aa | 3101 | /* This case is generated by pushqi1 pattern now. */ |
84653e0f | 3102 | gcc_assert (!(GET_CODE (operands[0]) == MEM |
3103 | && GET_CODE (XEXP (operands[0], 0)) == PRE_DEC | |
3104 | && XEXP (XEXP (operands[0], 0), 0) == stack_pointer_rtx | |
3105 | && ! ADDRESS_REG_P (operands[1]) | |
3106 | && ! TARGET_COLDFIRE)); | |
a923bd2f | 3107 | |
374e1815 | 3108 | /* clr and st insns on 68000 read before writing. */ |
a923bd2f | 3109 | if (!ADDRESS_REG_P (operands[0]) |
374e1815 | 3110 | && ((TARGET_68010 || TARGET_COLDFIRE) |
a923bd2f | 3111 | || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0])))) |
3112 | { | |
3113 | if (operands[1] == const0_rtx) | |
3114 | return "clr%.b %0"; | |
960e6739 | 3115 | if ((!TARGET_COLDFIRE || DATA_REG_P (operands[0])) |
a923bd2f | 3116 | && GET_CODE (operands[1]) == CONST_INT |
3117 | && (INTVAL (operands[1]) & 255) == 255) | |
3118 | { | |
3119 | CC_STATUS_INIT; | |
3120 | return "st %0"; | |
3121 | } | |
3122 | } | |
3123 | if (GET_CODE (operands[1]) == CONST_INT | |
3124 | && DATA_REG_P (operands[0]) | |
3125 | && INTVAL (operands[1]) < 128 | |
3126 | && INTVAL (operands[1]) >= -128) | |
065b42aa | 3127 | return "moveq %1,%0"; |
c96c8169 | 3128 | if (operands[1] == const0_rtx && ADDRESS_REG_P (operands[0])) |
3129 | return "sub%.l %0,%0"; | |
a923bd2f | 3130 | if (GET_CODE (operands[1]) != CONST_INT && CONSTANT_P (operands[1])) |
3131 | return "move%.l %1,%0"; | |
b14b4511 | 3132 | /* 68k family (including the 5200 ColdFire) does not support byte moves to |
a0a4c749 | 3133 | from address registers. */ |
3134 | if (ADDRESS_REG_P (operands[0]) || ADDRESS_REG_P (operands[1])) | |
a923bd2f | 3135 | return "move%.w %1,%0"; |
3136 | return "move%.b %1,%0"; | |
3137 | } | |
3138 | ||
f2fa409d | 3139 | const char * |
821960c7 | 3140 | output_move_stricthi (rtx *operands) |
8797980e | 3141 | { |
3142 | if (operands[1] == const0_rtx | |
374e1815 | 3143 | /* clr insns on 68000 read before writing. */ |
3144 | && ((TARGET_68010 || TARGET_COLDFIRE) | |
8797980e | 3145 | || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0])))) |
3146 | return "clr%.w %0"; | |
3147 | return "move%.w %1,%0"; | |
3148 | } | |
3149 | ||
f2fa409d | 3150 | const char * |
821960c7 | 3151 | output_move_strictqi (rtx *operands) |
8797980e | 3152 | { |
3153 | if (operands[1] == const0_rtx | |
374e1815 | 3154 | /* clr insns on 68000 read before writing. */ |
3155 | && ((TARGET_68010 || TARGET_COLDFIRE) | |
8797980e | 3156 | || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0])))) |
3157 | return "clr%.b %0"; | |
3158 | return "move%.b %1,%0"; | |
3159 | } | |
3160 | ||
5243fbff | 3161 | /* Return the best assembler insn template |
3162 | for moving operands[1] into operands[0] as a fullword. */ | |
3163 | ||
f2fa409d | 3164 | static const char * |
821960c7 | 3165 | singlemove_string (rtx *operands) |
5243fbff | 3166 | { |
24821a10 | 3167 | if (GET_CODE (operands[1]) == CONST_INT) |
3168 | return output_move_simode_const (operands); | |
3169 | return "move%.l %1,%0"; | |
5243fbff | 3170 | } |
3171 | ||
df86ba0c | 3172 | |
1d86aeab | 3173 | /* Output assembler or rtl code to perform a doubleword move insn |
3174 | with operands OPERANDS. | |
3175 | Pointers to 3 helper functions should be specified: | |
3176 | HANDLE_REG_ADJUST to adjust a register by a small value, | |
3177 | HANDLE_COMPADR to compute an address and | |
3178 | HANDLE_MOVSI to move 4 bytes. */ | |
5243fbff | 3179 | |
1d86aeab | 3180 | static void |
3181 | handle_move_double (rtx operands[2], | |
3182 | void (*handle_reg_adjust) (rtx, int), | |
3183 | void (*handle_compadr) (rtx [2]), | |
3184 | void (*handle_movsi) (rtx [2])) | |
5243fbff | 3185 | { |
df86ba0c | 3186 | enum |
3187 | { | |
3188 | REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP | |
3189 | } optype0, optype1; | |
5243fbff | 3190 | rtx latehalf[2]; |
df86ba0c | 3191 | rtx middlehalf[2]; |
94cfb7c6 | 3192 | rtx xops[2]; |
5243fbff | 3193 | rtx addreg0 = 0, addreg1 = 0; |
94cfb7c6 | 3194 | int dest_overlapped_low = 0; |
7b6f576a | 3195 | int size = GET_MODE_SIZE (GET_MODE (operands[0])); |
df86ba0c | 3196 | |
3197 | middlehalf[0] = 0; | |
3198 | middlehalf[1] = 0; | |
5243fbff | 3199 | |
3200 | /* First classify both operands. */ | |
3201 | ||
3202 | if (REG_P (operands[0])) | |
3203 | optype0 = REGOP; | |
3204 | else if (offsettable_memref_p (operands[0])) | |
3205 | optype0 = OFFSOP; | |
3206 | else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC) | |
3207 | optype0 = POPOP; | |
3208 | else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC) | |
3209 | optype0 = PUSHOP; | |
3210 | else if (GET_CODE (operands[0]) == MEM) | |
3211 | optype0 = MEMOP; | |
3212 | else | |
3213 | optype0 = RNDOP; | |
3214 | ||
3215 | if (REG_P (operands[1])) | |
3216 | optype1 = REGOP; | |
3217 | else if (CONSTANT_P (operands[1])) | |
3218 | optype1 = CNSTOP; | |
3219 | else if (offsettable_memref_p (operands[1])) | |
3220 | optype1 = OFFSOP; | |
3221 | else if (GET_CODE (XEXP (operands[1], 0)) == POST_INC) | |
3222 | optype1 = POPOP; | |
3223 | else if (GET_CODE (XEXP (operands[1], 0)) == PRE_DEC) | |
3224 | optype1 = PUSHOP; | |
3225 | else if (GET_CODE (operands[1]) == MEM) | |
3226 | optype1 = MEMOP; | |
3227 | else | |
3228 | optype1 = RNDOP; | |
3229 | ||
84653e0f | 3230 | /* Check for the cases that the operand constraints are not supposed |
3231 | to allow to happen. Generating code for these cases is | |
3232 | painful. */ | |
3233 | gcc_assert (optype0 != RNDOP && optype1 != RNDOP); | |
5243fbff | 3234 | |
3235 | /* If one operand is decrementing and one is incrementing | |
3236 | decrement the former register explicitly | |
3237 | and change that operand into ordinary indexing. */ | |
3238 | ||
3239 | if (optype0 == PUSHOP && optype1 == POPOP) | |
3240 | { | |
3241 | operands[0] = XEXP (XEXP (operands[0], 0), 0); | |
1d86aeab | 3242 | |
3243 | handle_reg_adjust (operands[0], -size); | |
3244 | ||
df86ba0c | 3245 | if (GET_MODE (operands[1]) == XFmode) |
e6b2f841 | 3246 | operands[0] = gen_rtx_MEM (XFmode, operands[0]); |
df86ba0c | 3247 | else if (GET_MODE (operands[0]) == DFmode) |
e6b2f841 | 3248 | operands[0] = gen_rtx_MEM (DFmode, operands[0]); |
df86ba0c | 3249 | else |
e6b2f841 | 3250 | operands[0] = gen_rtx_MEM (DImode, operands[0]); |
5243fbff | 3251 | optype0 = OFFSOP; |
3252 | } | |
3253 | if (optype0 == POPOP && optype1 == PUSHOP) | |
3254 | { | |
3255 | operands[1] = XEXP (XEXP (operands[1], 0), 0); | |
1d86aeab | 3256 | |
3257 | handle_reg_adjust (operands[1], -size); | |
3258 | ||
df86ba0c | 3259 | if (GET_MODE (operands[1]) == XFmode) |
e6b2f841 | 3260 | operands[1] = gen_rtx_MEM (XFmode, operands[1]); |
df86ba0c | 3261 | else if (GET_MODE (operands[1]) == DFmode) |
e6b2f841 | 3262 | operands[1] = gen_rtx_MEM (DFmode, operands[1]); |
df86ba0c | 3263 | else |
e6b2f841 | 3264 | operands[1] = gen_rtx_MEM (DImode, operands[1]); |
5243fbff | 3265 | optype1 = OFFSOP; |
3266 | } | |
3267 | ||
3268 | /* If an operand is an unoffsettable memory ref, find a register | |
3269 | we can increment temporarily to make it refer to the second word. */ | |
3270 | ||
3271 | if (optype0 == MEMOP) | |
3272 | addreg0 = find_addr_reg (XEXP (operands[0], 0)); | |
3273 | ||
3274 | if (optype1 == MEMOP) | |
3275 | addreg1 = find_addr_reg (XEXP (operands[1], 0)); | |
3276 | ||
3277 | /* Ok, we can do one word at a time. | |
3278 | Normally we do the low-numbered word first, | |
3279 | but if either operand is autodecrementing then we | |
3280 | do the high-numbered word first. | |
3281 | ||
3282 | In either case, set up in LATEHALF the operands to use | |
3283 | for the high-numbered word and in some cases alter the | |
3284 | operands in OPERANDS to be suitable for the low-numbered word. */ | |
3285 | ||
df86ba0c | 3286 | if (size == 12) |
3287 | { | |
3288 | if (optype0 == REGOP) | |
3289 | { | |
e6b2f841 | 3290 | latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 2); |
3291 | middlehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); | |
df86ba0c | 3292 | } |
3293 | else if (optype0 == OFFSOP) | |
3294 | { | |
eafc6604 | 3295 | middlehalf[0] = adjust_address (operands[0], SImode, 4); |
3296 | latehalf[0] = adjust_address (operands[0], SImode, size - 4); | |
df86ba0c | 3297 | } |
3298 | else | |
3299 | { | |
1d86aeab | 3300 | middlehalf[0] = adjust_address (operands[0], SImode, 0); |
3301 | latehalf[0] = adjust_address (operands[0], SImode, 0); | |
df86ba0c | 3302 | } |
3303 | ||
3304 | if (optype1 == REGOP) | |
3305 | { | |
e6b2f841 | 3306 | latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2); |
3307 | middlehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1); | |
df86ba0c | 3308 | } |
3309 | else if (optype1 == OFFSOP) | |
3310 | { | |
eafc6604 | 3311 | middlehalf[1] = adjust_address (operands[1], SImode, 4); |
3312 | latehalf[1] = adjust_address (operands[1], SImode, size - 4); | |
df86ba0c | 3313 | } |
3314 | else if (optype1 == CNSTOP) | |
3315 | { | |
3316 | if (GET_CODE (operands[1]) == CONST_DOUBLE) | |
3317 | { | |
df86ba0c | 3318 | long l[3]; |
3319 | ||
945f7b03 | 3320 | REAL_VALUE_TO_TARGET_LONG_DOUBLE |
3321 | (*CONST_DOUBLE_REAL_VALUE (operands[1]), l); | |
df86ba0c | 3322 | operands[1] = GEN_INT (l[0]); |
3323 | middlehalf[1] = GEN_INT (l[1]); | |
3324 | latehalf[1] = GEN_INT (l[2]); | |
3325 | } | |
84653e0f | 3326 | else |
df86ba0c | 3327 | { |
84653e0f | 3328 | /* No non-CONST_DOUBLE constant should ever appear |
3329 | here. */ | |
3330 | gcc_assert (!CONSTANT_P (operands[1])); | |
df86ba0c | 3331 | } |
3332 | } | |
3333 | else | |
3334 | { | |
1d86aeab | 3335 | middlehalf[1] = adjust_address (operands[1], SImode, 0); |
3336 | latehalf[1] = adjust_address (operands[1], SImode, 0); | |
df86ba0c | 3337 | } |
3338 | } | |
5243fbff | 3339 | else |
df86ba0c | 3340 | /* size is not 12: */ |
3341 | { | |
3342 | if (optype0 == REGOP) | |
e6b2f841 | 3343 | latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); |
df86ba0c | 3344 | else if (optype0 == OFFSOP) |
eafc6604 | 3345 | latehalf[0] = adjust_address (operands[0], SImode, size - 4); |
df86ba0c | 3346 | else |
1d86aeab | 3347 | latehalf[0] = adjust_address (operands[0], SImode, 0); |
df86ba0c | 3348 | |
3349 | if (optype1 == REGOP) | |
e6b2f841 | 3350 | latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1); |
df86ba0c | 3351 | else if (optype1 == OFFSOP) |
eafc6604 | 3352 | latehalf[1] = adjust_address (operands[1], SImode, size - 4); |
df86ba0c | 3353 | else if (optype1 == CNSTOP) |
3354 | split_double (operands[1], &operands[1], &latehalf[1]); | |
3355 | else | |
1d86aeab | 3356 | latehalf[1] = adjust_address (operands[1], SImode, 0); |
df86ba0c | 3357 | } |
5243fbff | 3358 | |
7e862b4e | 3359 | /* If insn is effectively movd N(REG),-(REG) then we will do the high |
3360 | word first. We should use the adjusted operand 1 (which is N+4(REG)) | |
3361 | for the low word as well, to compensate for the first decrement of | |
3362 | REG. */ | |
5243fbff | 3363 | if (optype0 == PUSHOP |
7e862b4e | 3364 | && reg_overlap_mentioned_p (XEXP (XEXP (operands[0], 0), 0), operands[1])) |
2d2c2623 | 3365 | operands[1] = middlehalf[1] = latehalf[1]; |
5243fbff | 3366 | |
94cfb7c6 | 3367 | /* For (set (reg:DI N) (mem:DI ... (reg:SI N) ...)), |
3368 | if the upper part of reg N does not appear in the MEM, arrange to | |
3369 | emit the move late-half first. Otherwise, compute the MEM address | |
3370 | into the upper part of N and use that as a pointer to the memory | |
3371 | operand. */ | |
3372 | if (optype0 == REGOP | |
3373 | && (optype1 == OFFSOP || optype1 == MEMOP)) | |
3374 | { | |
e6b2f841 | 3375 | rtx testlow = gen_rtx_REG (SImode, REGNO (operands[0])); |
25de1190 | 3376 | |
3377 | if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0)) | |
8867c023 | 3378 | && reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0))) |
94cfb7c6 | 3379 | { |
3380 | /* If both halves of dest are used in the src memory address, | |
25de1190 | 3381 | compute the address into latehalf of dest. |
3382 | Note that this can't happen if the dest is two data regs. */ | |
84653e0f | 3383 | compadr: |
94cfb7c6 | 3384 | xops[0] = latehalf[0]; |
3385 | xops[1] = XEXP (operands[1], 0); | |
1d86aeab | 3386 | |
3387 | handle_compadr (xops); | |
3388 | if (GET_MODE (operands[1]) == XFmode) | |
94cfb7c6 | 3389 | { |
e6b2f841 | 3390 | operands[1] = gen_rtx_MEM (XFmode, latehalf[0]); |
eafc6604 | 3391 | middlehalf[1] = adjust_address (operands[1], DImode, size - 8); |
3392 | latehalf[1] = adjust_address (operands[1], DImode, size - 4); | |
94cfb7c6 | 3393 | } |
3394 | else | |
3395 | { | |
e6b2f841 | 3396 | operands[1] = gen_rtx_MEM (DImode, latehalf[0]); |
eafc6604 | 3397 | latehalf[1] = adjust_address (operands[1], DImode, size - 4); |
94cfb7c6 | 3398 | } |
3399 | } | |
3400 | else if (size == 12 | |
8867c023 | 3401 | && reg_overlap_mentioned_p (middlehalf[0], |
3402 | XEXP (operands[1], 0))) | |
94cfb7c6 | 3403 | { |
25de1190 | 3404 | /* Check for two regs used by both source and dest. |
3405 | Note that this can't happen if the dest is all data regs. | |
3406 | It can happen if the dest is d6, d7, a0. | |
3407 | But in that case, latehalf is an addr reg, so | |
3408 | the code at compadr does ok. */ | |
3409 | ||
3410 | if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0)) | |
8867c023 | 3411 | || reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0))) |
3412 | goto compadr; | |
94cfb7c6 | 3413 | |
3414 | /* JRV says this can't happen: */ | |
84653e0f | 3415 | gcc_assert (!addreg0 && !addreg1); |
94cfb7c6 | 3416 | |
0c0492c9 | 3417 | /* Only the middle reg conflicts; simply put it last. */ |
1d86aeab | 3418 | handle_movsi (operands); |
3419 | handle_movsi (latehalf); | |
3420 | handle_movsi (middlehalf); | |
3421 | ||
3422 | return; | |
94cfb7c6 | 3423 | } |
f9ec812b | 3424 | else if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))) |
94cfb7c6 | 3425 | /* If the low half of dest is mentioned in the source memory |
3426 | address, the arrange to emit the move late half first. */ | |
3427 | dest_overlapped_low = 1; | |
3428 | } | |
3429 | ||
5243fbff | 3430 | /* If one or both operands autodecrementing, |
3431 | do the two words, high-numbered first. */ | |
3432 | ||
3433 | /* Likewise, the first move would clobber the source of the second one, | |
3434 | do them in the other order. This happens only for registers; | |
3435 | such overlap can't happen in memory unless the user explicitly | |
3436 | sets it up, and that is an undefined circumstance. */ | |
3437 | ||
3438 | if (optype0 == PUSHOP || optype1 == PUSHOP | |
3439 | || (optype0 == REGOP && optype1 == REGOP | |
df86ba0c | 3440 | && ((middlehalf[1] && REGNO (operands[0]) == REGNO (middlehalf[1])) |
94cfb7c6 | 3441 | || REGNO (operands[0]) == REGNO (latehalf[1]))) |
3442 | || dest_overlapped_low) | |
5243fbff | 3443 | { |
3444 | /* Make any unoffsettable addresses point at high-numbered word. */ | |
3445 | if (addreg0) | |
1d86aeab | 3446 | handle_reg_adjust (addreg0, size - 4); |
5243fbff | 3447 | if (addreg1) |
1d86aeab | 3448 | handle_reg_adjust (addreg1, size - 4); |
5243fbff | 3449 | |
3450 | /* Do that word. */ | |
1d86aeab | 3451 | handle_movsi (latehalf); |
5243fbff | 3452 | |
3453 | /* Undo the adds we just did. */ | |
3454 | if (addreg0) | |
1d86aeab | 3455 | handle_reg_adjust (addreg0, -4); |
5243fbff | 3456 | if (addreg1) |
1d86aeab | 3457 | handle_reg_adjust (addreg1, -4); |
5243fbff | 3458 | |
df86ba0c | 3459 | if (size == 12) |
3460 | { | |
1d86aeab | 3461 | handle_movsi (middlehalf); |
3462 | ||
df86ba0c | 3463 | if (addreg0) |
1d86aeab | 3464 | handle_reg_adjust (addreg0, -4); |
df86ba0c | 3465 | if (addreg1) |
1d86aeab | 3466 | handle_reg_adjust (addreg1, -4); |
df86ba0c | 3467 | } |
3468 | ||
5243fbff | 3469 | /* Do low-numbered word. */ |
1d86aeab | 3470 | |
3471 | handle_movsi (operands); | |
3472 | return; | |
5243fbff | 3473 | } |
3474 | ||
3475 | /* Normal case: do the two words, low-numbered first. */ | |
3476 | ||
5734a3c5 | 3477 | m68k_final_prescan_insn (NULL, operands, 2); |
1d86aeab | 3478 | handle_movsi (operands); |
5243fbff | 3479 | |
df86ba0c | 3480 | /* Do the middle one of the three words for long double */ |
3481 | if (size == 12) | |
3482 | { | |
3483 | if (addreg0) | |
1d86aeab | 3484 | handle_reg_adjust (addreg0, 4); |
df86ba0c | 3485 | if (addreg1) |
1d86aeab | 3486 | handle_reg_adjust (addreg1, 4); |
df86ba0c | 3487 | |
5734a3c5 | 3488 | m68k_final_prescan_insn (NULL, middlehalf, 2); |
1d86aeab | 3489 | handle_movsi (middlehalf); |
df86ba0c | 3490 | } |
3491 | ||
5243fbff | 3492 | /* Make any unoffsettable addresses point at high-numbered word. */ |
3493 | if (addreg0) | |
1d86aeab | 3494 | handle_reg_adjust (addreg0, 4); |
5243fbff | 3495 | if (addreg1) |
1d86aeab | 3496 | handle_reg_adjust (addreg1, 4); |
5243fbff | 3497 | |
3498 | /* Do that word. */ | |
5734a3c5 | 3499 | m68k_final_prescan_insn (NULL, latehalf, 2); |
1d86aeab | 3500 | handle_movsi (latehalf); |
5243fbff | 3501 | |
3502 | /* Undo the adds we just did. */ | |
3503 | if (addreg0) | |
1d86aeab | 3504 | handle_reg_adjust (addreg0, -(size - 4)); |
3505 | if (addreg1) | |
3506 | handle_reg_adjust (addreg1, -(size - 4)); | |
3507 | ||
3508 | return; | |
3509 | } | |
3510 | ||
3511 | /* Output assembler code to adjust REG by N. */ | |
3512 | static void | |
3513 | output_reg_adjust (rtx reg, int n) | |
3514 | { | |
3515 | const char *s; | |
3516 | ||
3517 | gcc_assert (GET_MODE (reg) == SImode | |
3518 | && -12 <= n && n != 0 && n <= 12); | |
3519 | ||
3520 | switch (n) | |
df86ba0c | 3521 | { |
1d86aeab | 3522 | case 12: |
3523 | s = "add%.l #12,%0"; | |
3524 | break; | |
3525 | ||
3526 | case 8: | |
3527 | s = "addq%.l #8,%0"; | |
3528 | break; | |
3529 | ||
3530 | case 4: | |
3531 | s = "addq%.l #4,%0"; | |
3532 | break; | |
3533 | ||
3534 | case -12: | |
3535 | s = "sub%.l #12,%0"; | |
3536 | break; | |
3537 | ||
3538 | case -8: | |
3539 | s = "subq%.l #8,%0"; | |
3540 | break; | |
3541 | ||
3542 | case -4: | |
3543 | s = "subq%.l #4,%0"; | |
3544 | break; | |
3545 | ||
3546 | default: | |
3547 | gcc_unreachable (); | |
3548 | s = NULL; | |
df86ba0c | 3549 | } |
1d86aeab | 3550 | |
3551 | output_asm_insn (s, ®); | |
3552 | } | |
3553 | ||
3554 | /* Emit rtl code to adjust REG by N. */ | |
3555 | static void | |
3556 | emit_reg_adjust (rtx reg1, int n) | |
3557 | { | |
3558 | rtx reg2; | |
3559 | ||
3560 | gcc_assert (GET_MODE (reg1) == SImode | |
3561 | && -12 <= n && n != 0 && n <= 12); | |
3562 | ||
3563 | reg1 = copy_rtx (reg1); | |
3564 | reg2 = copy_rtx (reg1); | |
3565 | ||
3566 | if (n < 0) | |
3567 | emit_insn (gen_subsi3 (reg1, reg2, GEN_INT (-n))); | |
3568 | else if (n > 0) | |
3569 | emit_insn (gen_addsi3 (reg1, reg2, GEN_INT (n))); | |
3570 | else | |
3571 | gcc_unreachable (); | |
3572 | } | |
3573 | ||
3574 | /* Output assembler to load address OPERANDS[0] to register OPERANDS[1]. */ | |
3575 | static void | |
3576 | output_compadr (rtx operands[2]) | |
3577 | { | |
3578 | output_asm_insn ("lea %a1,%0", operands); | |
3579 | } | |
3580 | ||
3581 | /* Output the best assembler insn for moving operands[1] into operands[0] | |
3582 | as a fullword. */ | |
3583 | static void | |
3584 | output_movsi (rtx operands[2]) | |
3585 | { | |
3586 | output_asm_insn (singlemove_string (operands), operands); | |
3587 | } | |
3588 | ||
3589 | /* Copy OP and change its mode to MODE. */ | |
3590 | static rtx | |
3754d046 | 3591 | copy_operand (rtx op, machine_mode mode) |
1d86aeab | 3592 | { |
3593 | /* ??? This looks really ugly. There must be a better way | |
3594 | to change a mode on the operand. */ | |
3595 | if (GET_MODE (op) != VOIDmode) | |
df86ba0c | 3596 | { |
1d86aeab | 3597 | if (REG_P (op)) |
3598 | op = gen_rtx_REG (mode, REGNO (op)); | |
df86ba0c | 3599 | else |
1d86aeab | 3600 | { |
3601 | op = copy_rtx (op); | |
3602 | PUT_MODE (op, mode); | |
3603 | } | |
df86ba0c | 3604 | } |
5243fbff | 3605 | |
1d86aeab | 3606 | return op; |
3607 | } | |
3608 | ||
3609 | /* Emit rtl code for moving operands[1] into operands[0] as a fullword. */ | |
3610 | static void | |
3611 | emit_movsi (rtx operands[2]) | |
3612 | { | |
3613 | operands[0] = copy_operand (operands[0], SImode); | |
3614 | operands[1] = copy_operand (operands[1], SImode); | |
3615 | ||
3616 | emit_insn (gen_movsi (operands[0], operands[1])); | |
3617 | } | |
3618 | ||
3619 | /* Output assembler code to perform a doubleword move insn | |
3620 | with operands OPERANDS. */ | |
3621 | const char * | |
3622 | output_move_double (rtx *operands) | |
3623 | { | |
3624 | handle_move_double (operands, | |
3625 | output_reg_adjust, output_compadr, output_movsi); | |
3626 | ||
5243fbff | 3627 | return ""; |
3628 | } | |
3629 | ||
1d86aeab | 3630 | /* Output rtl code to perform a doubleword move insn |
3631 | with operands OPERANDS. */ | |
3632 | void | |
3633 | m68k_emit_move_double (rtx operands[2]) | |
3634 | { | |
3635 | handle_move_double (operands, emit_reg_adjust, emit_movsi, emit_movsi); | |
3636 | } | |
58c23110 | 3637 | |
3638 | /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a | |
3639 | new rtx with the correct mode. */ | |
3640 | ||
3641 | static rtx | |
3754d046 | 3642 | force_mode (machine_mode mode, rtx orig) |
58c23110 | 3643 | { |
3644 | if (mode == GET_MODE (orig)) | |
3645 | return orig; | |
3646 | ||
3647 | if (REGNO (orig) >= FIRST_PSEUDO_REGISTER) | |
3648 | abort (); | |
3649 | ||
3650 | return gen_rtx_REG (mode, REGNO (orig)); | |
3651 | } | |
3652 | ||
3653 | static int | |
3754d046 | 3654 | fp_reg_operand (rtx op, machine_mode mode ATTRIBUTE_UNUSED) |
58c23110 | 3655 | { |
3656 | return reg_renumber && FP_REG_P (op); | |
3657 | } | |
3658 | ||
3659 | /* Emit insns to move operands[1] into operands[0]. | |
3660 | ||
3661 | Return 1 if we have written out everything that needs to be done to | |
3662 | do the move. Otherwise, return 0 and the caller will emit the move | |
3663 | normally. | |
3664 | ||
3665 | Note SCRATCH_REG may not be in the proper mode depending on how it | |
334ec2d8 | 3666 | will be used. This routine is responsible for creating a new copy |
58c23110 | 3667 | of SCRATCH_REG in the proper mode. */ |
3668 | ||
3669 | int | |
3754d046 | 3670 | emit_move_sequence (rtx *operands, machine_mode mode, rtx scratch_reg) |
58c23110 | 3671 | { |
3672 | register rtx operand0 = operands[0]; | |
3673 | register rtx operand1 = operands[1]; | |
3674 | register rtx tem; | |
3675 | ||
3676 | if (scratch_reg | |
3677 | && reload_in_progress && GET_CODE (operand0) == REG | |
3678 | && REGNO (operand0) >= FIRST_PSEUDO_REGISTER) | |
1c654ff1 | 3679 | operand0 = reg_equiv_mem (REGNO (operand0)); |
58c23110 | 3680 | else if (scratch_reg |
3681 | && reload_in_progress && GET_CODE (operand0) == SUBREG | |
3682 | && GET_CODE (SUBREG_REG (operand0)) == REG | |
3683 | && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER) | |
3684 | { | |
3685 | /* We must not alter SUBREG_BYTE (operand0) since that would confuse | |
3686 | the code which tracks sets/uses for delete_output_reload. */ | |
3687 | rtx temp = gen_rtx_SUBREG (GET_MODE (operand0), | |
1c654ff1 | 3688 | reg_equiv_mem (REGNO (SUBREG_REG (operand0))), |
58c23110 | 3689 | SUBREG_BYTE (operand0)); |
c6a6cdaa | 3690 | operand0 = alter_subreg (&temp, true); |
58c23110 | 3691 | } |
3692 | ||
3693 | if (scratch_reg | |
3694 | && reload_in_progress && GET_CODE (operand1) == REG | |
3695 | && REGNO (operand1) >= FIRST_PSEUDO_REGISTER) | |
1c654ff1 | 3696 | operand1 = reg_equiv_mem (REGNO (operand1)); |
58c23110 | 3697 | else if (scratch_reg |
3698 | && reload_in_progress && GET_CODE (operand1) == SUBREG | |
3699 | && GET_CODE (SUBREG_REG (operand1)) == REG | |
3700 | && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER) | |
3701 | { | |
3702 | /* We must not alter SUBREG_BYTE (operand0) since that would confuse | |
3703 | the code which tracks sets/uses for delete_output_reload. */ | |
3704 | rtx temp = gen_rtx_SUBREG (GET_MODE (operand1), | |
1c654ff1 | 3705 | reg_equiv_mem (REGNO (SUBREG_REG (operand1))), |
58c23110 | 3706 | SUBREG_BYTE (operand1)); |
c6a6cdaa | 3707 | operand1 = alter_subreg (&temp, true); |
58c23110 | 3708 | } |
3709 | ||
3710 | if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM | |
3711 | && ((tem = find_replacement (&XEXP (operand0, 0))) | |
3712 | != XEXP (operand0, 0))) | |
3713 | operand0 = gen_rtx_MEM (GET_MODE (operand0), tem); | |
3714 | if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM | |
3715 | && ((tem = find_replacement (&XEXP (operand1, 0))) | |
3716 | != XEXP (operand1, 0))) | |
3717 | operand1 = gen_rtx_MEM (GET_MODE (operand1), tem); | |
3718 | ||
3719 | /* Handle secondary reloads for loads/stores of FP registers where | |
3720 | the address is symbolic by using the scratch register */ | |
3721 | if (fp_reg_operand (operand0, mode) | |
3722 | && ((GET_CODE (operand1) == MEM | |
3723 | && ! memory_address_p (DFmode, XEXP (operand1, 0))) | |
3724 | || ((GET_CODE (operand1) == SUBREG | |
3725 | && GET_CODE (XEXP (operand1, 0)) == MEM | |
3726 | && !memory_address_p (DFmode, XEXP (XEXP (operand1, 0), 0))))) | |
3727 | && scratch_reg) | |
3728 | { | |
3729 | if (GET_CODE (operand1) == SUBREG) | |
3730 | operand1 = XEXP (operand1, 0); | |
3731 | ||
3732 | /* SCRATCH_REG will hold an address. We want | |
3733 | it in SImode regardless of what mode it was originally given | |
3734 | to us. */ | |
3735 | scratch_reg = force_mode (SImode, scratch_reg); | |
3736 | ||
3737 | /* D might not fit in 14 bits either; for such cases load D into | |
3738 | scratch reg. */ | |
3739 | if (!memory_address_p (Pmode, XEXP (operand1, 0))) | |
3740 | { | |
3741 | emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1)); | |
3742 | emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)), | |
3743 | Pmode, | |
3744 | XEXP (XEXP (operand1, 0), 0), | |
3745 | scratch_reg)); | |
3746 | } | |
3747 | else | |
3748 | emit_move_insn (scratch_reg, XEXP (operand1, 0)); | |
d1f9b275 | 3749 | emit_insn (gen_rtx_SET (operand0, gen_rtx_MEM (mode, scratch_reg))); |
58c23110 | 3750 | return 1; |
3751 | } | |
3752 | else if (fp_reg_operand (operand1, mode) | |
3753 | && ((GET_CODE (operand0) == MEM | |
3754 | && ! memory_address_p (DFmode, XEXP (operand0, 0))) | |
3755 | || ((GET_CODE (operand0) == SUBREG) | |
3756 | && GET_CODE (XEXP (operand0, 0)) == MEM | |
3757 | && !memory_address_p (DFmode, XEXP (XEXP (operand0, 0), 0)))) | |
3758 | && scratch_reg) | |
3759 | { | |
3760 | if (GET_CODE (operand0) == SUBREG) | |
3761 | operand0 = XEXP (operand0, 0); | |
3762 | ||
3763 | /* SCRATCH_REG will hold an address and maybe the actual data. We want | |
3764 | it in SIMODE regardless of what mode it was originally given | |
3765 | to us. */ | |
3766 | scratch_reg = force_mode (SImode, scratch_reg); | |
3767 | ||
3768 | /* D might not fit in 14 bits either; for such cases load D into | |
3769 | scratch reg. */ | |
3770 | if (!memory_address_p (Pmode, XEXP (operand0, 0))) | |
3771 | { | |
3772 | emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1)); | |
3773 | emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0, | |
3774 | 0)), | |
3775 | Pmode, | |
3776 | XEXP (XEXP (operand0, 0), | |
3777 | 0), | |
3778 | scratch_reg)); | |
3779 | } | |
3780 | else | |
3781 | emit_move_insn (scratch_reg, XEXP (operand0, 0)); | |
d1f9b275 | 3782 | emit_insn (gen_rtx_SET (gen_rtx_MEM (mode, scratch_reg), operand1)); |
58c23110 | 3783 | return 1; |
3784 | } | |
3785 | /* Handle secondary reloads for loads of FP registers from constant | |
3786 | expressions by forcing the constant into memory. | |
3787 | ||
3788 | use scratch_reg to hold the address of the memory location. | |
3789 | ||
3790 | The proper fix is to change PREFERRED_RELOAD_CLASS to return | |
3791 | NO_REGS when presented with a const_int and an register class | |
3792 | containing only FP registers. Doing so unfortunately creates | |
3793 | more problems than it solves. Fix this for 2.5. */ | |
3794 | else if (fp_reg_operand (operand0, mode) | |
3795 | && CONSTANT_P (operand1) | |
3796 | && scratch_reg) | |
3797 | { | |
3798 | rtx xoperands[2]; | |
3799 | ||
3800 | /* SCRATCH_REG will hold an address and maybe the actual data. We want | |
3801 | it in SIMODE regardless of what mode it was originally given | |
3802 | to us. */ | |
3803 | scratch_reg = force_mode (SImode, scratch_reg); | |
3804 | ||
3805 | /* Force the constant into memory and put the address of the | |
3806 | memory location into scratch_reg. */ | |
3807 | xoperands[0] = scratch_reg; | |
3808 | xoperands[1] = XEXP (force_const_mem (mode, operand1), 0); | |
d1f9b275 | 3809 | emit_insn (gen_rtx_SET (scratch_reg, xoperands[1])); |
58c23110 | 3810 | |
3811 | /* Now load the destination register. */ | |
d1f9b275 | 3812 | emit_insn (gen_rtx_SET (operand0, gen_rtx_MEM (mode, scratch_reg))); |
58c23110 | 3813 | return 1; |
3814 | } | |
3815 | ||
3816 | /* Now have insn-emit do whatever it normally does. */ | |
3817 | return 0; | |
3818 | } | |
3819 | ||
8ec9650e | 3820 | /* Split one or more DImode RTL references into pairs of SImode |
3821 | references. The RTL can be REG, offsettable MEM, integer constant, or | |
3822 | CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to | |
3823 | split and "num" is its length. lo_half and hi_half are output arrays | |
3824 | that parallel "operands". */ | |
3825 | ||
3826 | void | |
3827 | split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[]) | |
3828 | { | |
3829 | while (num--) | |
3830 | { | |
3831 | rtx op = operands[num]; | |
3832 | ||
3833 | /* simplify_subreg refuses to split volatile memory addresses, | |
3834 | but we still have to handle it. */ | |
3835 | if (GET_CODE (op) == MEM) | |
3836 | { | |
3837 | lo_half[num] = adjust_address (op, SImode, 4); | |
3838 | hi_half[num] = adjust_address (op, SImode, 0); | |
3839 | } | |
3840 | else | |
3841 | { | |
3842 | lo_half[num] = simplify_gen_subreg (SImode, op, | |
3843 | GET_MODE (op) == VOIDmode | |
3844 | ? DImode : GET_MODE (op), 4); | |
3845 | hi_half[num] = simplify_gen_subreg (SImode, op, | |
3846 | GET_MODE (op) == VOIDmode | |
3847 | ? DImode : GET_MODE (op), 0); | |
3848 | } | |
3849 | } | |
3850 | } | |
3851 | ||
9c47decb | 3852 | /* Split X into a base and a constant offset, storing them in *BASE |
3853 | and *OFFSET respectively. */ | |
3854 | ||
3855 | static void | |
3856 | m68k_split_offset (rtx x, rtx *base, HOST_WIDE_INT *offset) | |
3857 | { | |
3858 | *offset = 0; | |
3859 | if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT) | |
3860 | { | |
3861 | *offset += INTVAL (XEXP (x, 1)); | |
3862 | x = XEXP (x, 0); | |
3863 | } | |
3864 | *base = x; | |
3865 | } | |
3866 | ||
3867 | /* Return true if PATTERN is a PARALLEL suitable for a movem or fmovem | |
3868 | instruction. STORE_P says whether the move is a load or store. | |
3869 | ||
3870 | If the instruction uses post-increment or pre-decrement addressing, | |
3871 | AUTOMOD_BASE is the base register and AUTOMOD_OFFSET is the total | |
3872 | adjustment. This adjustment will be made by the first element of | |
3873 | PARALLEL, with the loads or stores starting at element 1. If the | |
3874 | instruction does not use post-increment or pre-decrement addressing, | |
3875 | AUTOMOD_BASE is null, AUTOMOD_OFFSET is 0, and the loads or stores | |
3876 | start at element 0. */ | |
3877 | ||
3878 | bool | |
3879 | m68k_movem_pattern_p (rtx pattern, rtx automod_base, | |
3880 | HOST_WIDE_INT automod_offset, bool store_p) | |
3881 | { | |
3882 | rtx base, mem_base, set, mem, reg, last_reg; | |
3883 | HOST_WIDE_INT offset, mem_offset; | |
3884 | int i, first, len; | |
3885 | enum reg_class rclass; | |
3886 | ||
3887 | len = XVECLEN (pattern, 0); | |
3888 | first = (automod_base != NULL); | |
3889 | ||
3890 | if (automod_base) | |
3891 | { | |
3892 | /* Stores must be pre-decrement and loads must be post-increment. */ | |
3893 | if (store_p != (automod_offset < 0)) | |
3894 | return false; | |
3895 | ||
3896 | /* Work out the base and offset for lowest memory location. */ | |
3897 | base = automod_base; | |
3898 | offset = (automod_offset < 0 ? automod_offset : 0); | |
3899 | } | |
3900 | else | |
3901 | { | |
3902 | /* Allow any valid base and offset in the first access. */ | |
3903 | base = NULL; | |
3904 | offset = 0; | |
3905 | } | |
3906 | ||
3907 | last_reg = NULL; | |
3908 | rclass = NO_REGS; | |
3909 | for (i = first; i < len; i++) | |
3910 | { | |
3911 | /* We need a plain SET. */ | |
3912 | set = XVECEXP (pattern, 0, i); | |
3913 | if (GET_CODE (set) != SET) | |
3914 | return false; | |
3915 | ||
3916 | /* Check that we have a memory location... */ | |
3917 | mem = XEXP (set, !store_p); | |
3918 | if (!MEM_P (mem) || !memory_operand (mem, VOIDmode)) | |
3919 | return false; | |
3920 | ||
3921 | /* ...with the right address. */ | |
3922 | if (base == NULL) | |
3923 | { | |
3924 | m68k_split_offset (XEXP (mem, 0), &base, &offset); | |
3925 | /* The ColdFire instruction only allows (An) and (d16,An) modes. | |
3926 | There are no mode restrictions for 680x0 besides the | |
3927 | automodification rules enforced above. */ | |
3928 | if (TARGET_COLDFIRE | |
3929 | && !m68k_legitimate_base_reg_p (base, reload_completed)) | |
3930 | return false; | |
3931 | } | |
3932 | else | |
3933 | { | |
3934 | m68k_split_offset (XEXP (mem, 0), &mem_base, &mem_offset); | |
3935 | if (!rtx_equal_p (base, mem_base) || offset != mem_offset) | |
3936 | return false; | |
3937 | } | |
3938 | ||
3939 | /* Check that we have a register of the required mode and class. */ | |
3940 | reg = XEXP (set, store_p); | |
3941 | if (!REG_P (reg) | |
3942 | || !HARD_REGISTER_P (reg) | |
3943 | || GET_MODE (reg) != reg_raw_mode[REGNO (reg)]) | |
3944 | return false; | |
3945 | ||
3946 | if (last_reg) | |
3947 | { | |
3948 | /* The register must belong to RCLASS and have a higher number | |
3949 | than the register in the previous SET. */ | |
3950 | if (!TEST_HARD_REG_BIT (reg_class_contents[rclass], REGNO (reg)) | |
3951 | || REGNO (last_reg) >= REGNO (reg)) | |
3952 | return false; | |
3953 | } | |
3954 | else | |
3955 | { | |
3956 | /* Work out which register class we need. */ | |
3957 | if (INT_REGNO_P (REGNO (reg))) | |
3958 | rclass = GENERAL_REGS; | |
3959 | else if (FP_REGNO_P (REGNO (reg))) | |
3960 | rclass = FP_REGS; | |
3961 | else | |
3962 | return false; | |
3963 | } | |
3964 | ||
3965 | last_reg = reg; | |
3966 | offset += GET_MODE_SIZE (GET_MODE (reg)); | |
3967 | } | |
3968 | ||
3969 | /* If we have an automodification, check whether the final offset is OK. */ | |
3970 | if (automod_base && offset != (automod_offset < 0 ? 0 : automod_offset)) | |
3971 | return false; | |
3972 | ||
3973 | /* Reject unprofitable cases. */ | |
3974 | if (len < first + (rclass == FP_REGS ? MIN_FMOVEM_REGS : MIN_MOVEM_REGS)) | |
3975 | return false; | |
3976 | ||
3977 | return true; | |
3978 | } | |
3979 | ||
3980 | /* Return the assembly code template for a movem or fmovem instruction | |
3981 | whose pattern is given by PATTERN. Store the template's operands | |
3982 | in OPERANDS. | |
3983 | ||
3984 | If the instruction uses post-increment or pre-decrement addressing, | |
3985 | AUTOMOD_OFFSET is the total adjustment, otherwise it is 0. STORE_P | |
3986 | is true if this is a store instruction. */ | |
3987 | ||
3988 | const char * | |
3989 | m68k_output_movem (rtx *operands, rtx pattern, | |
3990 | HOST_WIDE_INT automod_offset, bool store_p) | |
3991 | { | |
3992 | unsigned int mask; | |
3993 | int i, first; | |
3994 | ||
3995 | gcc_assert (GET_CODE (pattern) == PARALLEL); | |
3996 | mask = 0; | |
3997 | first = (automod_offset != 0); | |
3998 | for (i = first; i < XVECLEN (pattern, 0); i++) | |
3999 | { | |
4000 | /* When using movem with pre-decrement addressing, register X + D0_REG | |
4001 | is controlled by bit 15 - X. For all other addressing modes, | |
4002 | register X + D0_REG is controlled by bit X. Confusingly, the | |
4003 | register mask for fmovem is in the opposite order to that for | |
4004 | movem. */ | |
4005 | unsigned int regno; | |
4006 | ||
4007 | gcc_assert (MEM_P (XEXP (XVECEXP (pattern, 0, i), !store_p))); | |
4008 | gcc_assert (REG_P (XEXP (XVECEXP (pattern, 0, i), store_p))); | |
4009 | regno = REGNO (XEXP (XVECEXP (pattern, 0, i), store_p)); | |
4010 | if (automod_offset < 0) | |
4011 | { | |
4012 | if (FP_REGNO_P (regno)) | |
4013 | mask |= 1 << (regno - FP0_REG); | |
4014 | else | |
4015 | mask |= 1 << (15 - (regno - D0_REG)); | |
4016 | } | |
4017 | else | |
4018 | { | |
4019 | if (FP_REGNO_P (regno)) | |
4020 | mask |= 1 << (7 - (regno - FP0_REG)); | |
4021 | else | |
4022 | mask |= 1 << (regno - D0_REG); | |
4023 | } | |
4024 | } | |
4025 | CC_STATUS_INIT; | |
4026 | ||
4027 | if (automod_offset == 0) | |
4028 | operands[0] = XEXP (XEXP (XVECEXP (pattern, 0, first), !store_p), 0); | |
4029 | else if (automod_offset < 0) | |
4030 | operands[0] = gen_rtx_PRE_DEC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0))); | |
4031 | else | |
4032 | operands[0] = gen_rtx_POST_INC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0))); | |
4033 | operands[1] = GEN_INT (mask); | |
4034 | if (FP_REGNO_P (REGNO (XEXP (XVECEXP (pattern, 0, first), store_p)))) | |
4035 | { | |
4036 | if (store_p) | |
97747e65 | 4037 | return "fmovem %1,%a0"; |
9c47decb | 4038 | else |
97747e65 | 4039 | return "fmovem %a0,%1"; |
9c47decb | 4040 | } |
4041 | else | |
4042 | { | |
4043 | if (store_p) | |
97747e65 | 4044 | return "movem%.l %1,%a0"; |
9c47decb | 4045 | else |
97747e65 | 4046 | return "movem%.l %a0,%1"; |
9c47decb | 4047 | } |
4048 | } | |
4049 | ||
5243fbff | 4050 | /* Return a REG that occurs in ADDR with coefficient 1. |
4051 | ADDR can be effectively incremented by incrementing REG. */ | |
4052 | ||
4053 | static rtx | |
821960c7 | 4054 | find_addr_reg (rtx addr) |
5243fbff | 4055 | { |
4056 | while (GET_CODE (addr) == PLUS) | |
4057 | { | |
4058 | if (GET_CODE (XEXP (addr, 0)) == REG) | |
4059 | addr = XEXP (addr, 0); | |
4060 | else if (GET_CODE (XEXP (addr, 1)) == REG) | |
4061 | addr = XEXP (addr, 1); | |
4062 | else if (CONSTANT_P (XEXP (addr, 0))) | |
4063 | addr = XEXP (addr, 1); | |
4064 | else if (CONSTANT_P (XEXP (addr, 1))) | |
4065 | addr = XEXP (addr, 0); | |
4066 | else | |
84653e0f | 4067 | gcc_unreachable (); |
5243fbff | 4068 | } |
84653e0f | 4069 | gcc_assert (GET_CODE (addr) == REG); |
4070 | return addr; | |
5243fbff | 4071 | } |
ee808824 | 4072 | |
b14b4511 | 4073 | /* Output assembler code to perform a 32-bit 3-operand add. */ |
ee808824 | 4074 | |
f2fa409d | 4075 | const char * |
821960c7 | 4076 | output_addsi3 (rtx *operands) |
ee808824 | 4077 | { |
4078 | if (! operands_match_p (operands[0], operands[1])) | |
4079 | { | |
4080 | if (!ADDRESS_REG_P (operands[1])) | |
4081 | { | |
4082 | rtx tmp = operands[1]; | |
4083 | ||
4084 | operands[1] = operands[2]; | |
4085 | operands[2] = tmp; | |
4086 | } | |
4087 | ||
4088 | /* These insns can result from reloads to access | |
4089 | stack slots over 64k from the frame pointer. */ | |
4090 | if (GET_CODE (operands[2]) == CONST_INT | |
5a6901d6 | 4091 | && (INTVAL (operands[2]) < -32768 || INTVAL (operands[2]) > 32767)) |
af6ca061 | 4092 | return "move%.l %2,%0\n\tadd%.l %1,%0"; |
ee808824 | 4093 | if (GET_CODE (operands[2]) == REG) |
4ad53767 | 4094 | return MOTOROLA ? "lea (%1,%2.l),%0" : "lea %1@(0,%2:l),%0"; |
4095 | return MOTOROLA ? "lea (%c2,%1),%0" : "lea %1@(%c2),%0"; | |
ee808824 | 4096 | } |
4097 | if (GET_CODE (operands[2]) == CONST_INT) | |
4098 | { | |
ee808824 | 4099 | if (INTVAL (operands[2]) > 0 |
4100 | && INTVAL (operands[2]) <= 8) | |
4101 | return "addq%.l %2,%0"; | |
4102 | if (INTVAL (operands[2]) < 0 | |
4103 | && INTVAL (operands[2]) >= -8) | |
4104 | { | |
7014838c | 4105 | operands[2] = GEN_INT (- INTVAL (operands[2])); |
ee808824 | 4106 | return "subq%.l %2,%0"; |
4107 | } | |
4108 | /* On the CPU32 it is faster to use two addql instructions to | |
4109 | add a small integer (8 < N <= 16) to a register. | |
0c0492c9 | 4110 | Likewise for subql. */ |
bd40163f | 4111 | if (TUNE_CPU32 && REG_P (operands[0])) |
ee808824 | 4112 | { |
4113 | if (INTVAL (operands[2]) > 8 | |
4114 | && INTVAL (operands[2]) <= 16) | |
4115 | { | |
e6b2f841 | 4116 | operands[2] = GEN_INT (INTVAL (operands[2]) - 8); |
3acc77d9 | 4117 | return "addq%.l #8,%0\n\taddq%.l %2,%0"; |
ee808824 | 4118 | } |
4119 | if (INTVAL (operands[2]) < -8 | |
4120 | && INTVAL (operands[2]) >= -16) | |
4121 | { | |
7014838c | 4122 | operands[2] = GEN_INT (- INTVAL (operands[2]) - 8); |
3acc77d9 | 4123 | return "subq%.l #8,%0\n\tsubq%.l %2,%0"; |
ee808824 | 4124 | } |
4125 | } | |
ee808824 | 4126 | if (ADDRESS_REG_P (operands[0]) |
4127 | && INTVAL (operands[2]) >= -0x8000 | |
4128 | && INTVAL (operands[2]) < 0x8000) | |
4129 | { | |
bd40163f | 4130 | if (TUNE_68040) |
ee808824 | 4131 | return "add%.w %2,%0"; |
4132 | else | |
4ad53767 | 4133 | return MOTOROLA ? "lea (%c2,%0),%0" : "lea %0@(%c2),%0"; |
ee808824 | 4134 | } |
4135 | } | |
4136 | return "add%.l %2,%0"; | |
4137 | } | |
5243fbff | 4138 | \f |
4139 | /* Store in cc_status the expressions that the condition codes will | |
4140 | describe after execution of an instruction whose pattern is EXP. | |
4141 | Do not alter them if the instruction would not alter the cc's. */ | |
4142 | ||
4143 | /* On the 68000, all the insns to store in an address register fail to | |
4144 | set the cc's. However, in some cases these instructions can make it | |
4145 | possibly invalid to use the saved cc's. In those cases we clear out | |
4146 | some or all of the saved cc's so they won't be used. */ | |
4147 | ||
e6b2f841 | 4148 | void |
821960c7 | 4149 | notice_update_cc (rtx exp, rtx insn) |
5243fbff | 4150 | { |
d2e633d7 | 4151 | if (GET_CODE (exp) == SET) |
5243fbff | 4152 | { |
4153 | if (GET_CODE (SET_SRC (exp)) == CALL) | |
065b42aa | 4154 | CC_STATUS_INIT; |
5243fbff | 4155 | else if (ADDRESS_REG_P (SET_DEST (exp))) |
4156 | { | |
c5aa1e92 | 4157 | if (cc_status.value1 && modified_in_p (cc_status.value1, insn)) |
5243fbff | 4158 | cc_status.value1 = 0; |
c5aa1e92 | 4159 | if (cc_status.value2 && modified_in_p (cc_status.value2, insn)) |
5243fbff | 4160 | cc_status.value2 = 0; |
4161 | } | |
12e6817c | 4162 | /* fmoves to memory or data registers do not set the condition |
4163 | codes. Normal moves _do_ set the condition codes, but not in | |
4164 | a way that is appropriate for comparison with 0, because -0.0 | |
4165 | would be treated as a negative nonzero number. Note that it | |
0d424440 | 4166 | isn't appropriate to conditionalize this restriction on |
12e6817c | 4167 | HONOR_SIGNED_ZEROS because that macro merely indicates whether |
4168 | we care about the difference between -0.0 and +0.0. */ | |
5243fbff | 4169 | else if (!FP_REG_P (SET_DEST (exp)) |
4170 | && SET_DEST (exp) != cc0_rtx | |
4171 | && (FP_REG_P (SET_SRC (exp)) | |
4172 | || GET_CODE (SET_SRC (exp)) == FIX | |
12e6817c | 4173 | || FLOAT_MODE_P (GET_MODE (SET_DEST (exp))))) |
065b42aa | 4174 | CC_STATUS_INIT; |
5243fbff | 4175 | /* A pair of move insns doesn't produce a useful overall cc. */ |
4176 | else if (!FP_REG_P (SET_DEST (exp)) | |
4177 | && !FP_REG_P (SET_SRC (exp)) | |
4178 | && GET_MODE_SIZE (GET_MODE (SET_SRC (exp))) > 4 | |
4179 | && (GET_CODE (SET_SRC (exp)) == REG | |
4180 | || GET_CODE (SET_SRC (exp)) == MEM | |
4181 | || GET_CODE (SET_SRC (exp)) == CONST_DOUBLE)) | |
065b42aa | 4182 | CC_STATUS_INIT; |
e16bddab | 4183 | else if (SET_DEST (exp) != pc_rtx) |
5243fbff | 4184 | { |
4185 | cc_status.flags = 0; | |
e16bddab | 4186 | cc_status.value1 = SET_DEST (exp); |
4187 | cc_status.value2 = SET_SRC (exp); | |
5243fbff | 4188 | } |
4189 | } | |
4190 | else if (GET_CODE (exp) == PARALLEL | |
4191 | && GET_CODE (XVECEXP (exp, 0, 0)) == SET) | |
4192 | { | |
e16bddab | 4193 | rtx dest = SET_DEST (XVECEXP (exp, 0, 0)); |
4194 | rtx src = SET_SRC (XVECEXP (exp, 0, 0)); | |
4195 | ||
4196 | if (ADDRESS_REG_P (dest)) | |
5243fbff | 4197 | CC_STATUS_INIT; |
e16bddab | 4198 | else if (dest != pc_rtx) |
5243fbff | 4199 | { |
4200 | cc_status.flags = 0; | |
e16bddab | 4201 | cc_status.value1 = dest; |
4202 | cc_status.value2 = src; | |
5243fbff | 4203 | } |
4204 | } | |
4205 | else | |
4206 | CC_STATUS_INIT; | |
4207 | if (cc_status.value2 != 0 | |
4208 | && ADDRESS_REG_P (cc_status.value2) | |
4209 | && GET_MODE (cc_status.value2) == QImode) | |
4210 | CC_STATUS_INIT; | |
d2e633d7 | 4211 | if (cc_status.value2 != 0) |
5243fbff | 4212 | switch (GET_CODE (cc_status.value2)) |
4213 | { | |
9c532fac | 4214 | case ASHIFT: case ASHIFTRT: case LSHIFTRT: |
5243fbff | 4215 | case ROTATE: case ROTATERT: |
64b70d26 | 4216 | /* These instructions always clear the overflow bit, and set |
4217 | the carry to the bit shifted out. */ | |
bf7cf42a | 4218 | cc_status.flags |= CC_OVERFLOW_UNUSABLE | CC_NO_CARRY; |
64b70d26 | 4219 | break; |
4220 | ||
4221 | case PLUS: case MINUS: case MULT: | |
4222 | case DIV: case UDIV: case MOD: case UMOD: case NEG: | |
5243fbff | 4223 | if (GET_MODE (cc_status.value2) != VOIDmode) |
4224 | cc_status.flags |= CC_NO_OVERFLOW; | |
4225 | break; | |
4226 | case ZERO_EXTEND: | |
4227 | /* (SET r1 (ZERO_EXTEND r2)) on this machine | |
4228 | ends with a move insn moving r2 in r2's mode. | |
4229 | Thus, the cc's are set for r2. | |
0c0492c9 | 4230 | This can set N bit spuriously. */ |
5243fbff | 4231 | cc_status.flags |= CC_NOT_NEGATIVE; |
e6b2f841 | 4232 | |
4233 | default: | |
4234 | break; | |
5243fbff | 4235 | } |
4236 | if (cc_status.value1 && GET_CODE (cc_status.value1) == REG | |
4237 | && cc_status.value2 | |
4238 | && reg_overlap_mentioned_p (cc_status.value1, cc_status.value2)) | |
4239 | cc_status.value2 = 0; | |
910f39df | 4240 | /* Check for PRE_DEC in dest modifying a register used in src. */ |
4241 | if (cc_status.value1 && GET_CODE (cc_status.value1) == MEM | |
4242 | && GET_CODE (XEXP (cc_status.value1, 0)) == PRE_DEC | |
4243 | && cc_status.value2 | |
4244 | && reg_overlap_mentioned_p (XEXP (XEXP (cc_status.value1, 0), 0), | |
4245 | cc_status.value2)) | |
4246 | cc_status.value2 = 0; | |
5243fbff | 4247 | if (((cc_status.value1 && FP_REG_P (cc_status.value1)) |
d2e633d7 | 4248 | || (cc_status.value2 && FP_REG_P (cc_status.value2)))) |
5243fbff | 4249 | cc_status.flags = CC_IN_68881; |
e680c1ac | 4250 | if (cc_status.value2 && GET_CODE (cc_status.value2) == COMPARE |
4251 | && GET_MODE_CLASS (GET_MODE (XEXP (cc_status.value2, 0))) == MODE_FLOAT) | |
4252 | { | |
4253 | cc_status.flags = CC_IN_68881; | |
c1aa8817 | 4254 | if (!FP_REG_P (XEXP (cc_status.value2, 0)) |
4255 | && FP_REG_P (XEXP (cc_status.value2, 1))) | |
e680c1ac | 4256 | cc_status.flags |= CC_REVERSED; |
4257 | } | |
5243fbff | 4258 | } |
4259 | \f | |
f2fa409d | 4260 | const char * |
821960c7 | 4261 | output_move_const_double (rtx *operands) |
5243fbff | 4262 | { |
d2e633d7 | 4263 | int code = standard_68881_constant_p (operands[1]); |
5243fbff | 4264 | |
d2e633d7 | 4265 | if (code != 0) |
5243fbff | 4266 | { |
d2e633d7 | 4267 | static char buf[40]; |
5243fbff | 4268 | |
3acc77d9 | 4269 | sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff); |
d2e633d7 | 4270 | return buf; |
5243fbff | 4271 | } |
d2e633d7 | 4272 | return "fmove%.d %1,%0"; |
5243fbff | 4273 | } |
4274 | ||
f2fa409d | 4275 | const char * |
821960c7 | 4276 | output_move_const_single (rtx *operands) |
5243fbff | 4277 | { |
d2e633d7 | 4278 | int code = standard_68881_constant_p (operands[1]); |
5243fbff | 4279 | |
d2e633d7 | 4280 | if (code != 0) |
5243fbff | 4281 | { |
d2e633d7 | 4282 | static char buf[40]; |
5243fbff | 4283 | |
3acc77d9 | 4284 | sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff); |
d2e633d7 | 4285 | return buf; |
5243fbff | 4286 | } |
d2e633d7 | 4287 | return "fmove%.s %f1,%0"; |
5243fbff | 4288 | } |
4289 | ||
4290 | /* Return nonzero if X, a CONST_DOUBLE, has a value that we can get | |
4291 | from the "fmovecr" instruction. | |
4292 | The value, anded with 0xff, gives the code to use in fmovecr | |
4293 | to get the desired constant. */ | |
4294 | ||
0c0492c9 | 4295 | /* This code has been fixed for cross-compilation. */ |
6205ab4b | 4296 | |
4297 | static int inited_68881_table = 0; | |
4298 | ||
f2fa409d | 4299 | static const char *const strings_68881[7] = { |
6205ab4b | 4300 | "0.0", |
4301 | "1.0", | |
4302 | "10.0", | |
4303 | "100.0", | |
4304 | "10000.0", | |
4305 | "1e8", | |
4306 | "1e16" | |
065b42aa | 4307 | }; |
6205ab4b | 4308 | |
e99c3a1d | 4309 | static const int codes_68881[7] = { |
6205ab4b | 4310 | 0x0f, |
4311 | 0x32, | |
4312 | 0x33, | |
4313 | 0x34, | |
4314 | 0x35, | |
4315 | 0x36, | |
4316 | 0x37 | |
065b42aa | 4317 | }; |
6205ab4b | 4318 | |
4319 | REAL_VALUE_TYPE values_68881[7]; | |
4320 | ||
4321 | /* Set up values_68881 array by converting the decimal values | |
0c0492c9 | 4322 | strings_68881 to binary. */ |
6205ab4b | 4323 | |
4324 | void | |
821960c7 | 4325 | init_68881_table (void) |
6205ab4b | 4326 | { |
4327 | int i; | |
4328 | REAL_VALUE_TYPE r; | |
3754d046 | 4329 | machine_mode mode; |
6205ab4b | 4330 | |
6a4dac78 | 4331 | mode = SFmode; |
6205ab4b | 4332 | for (i = 0; i < 7; i++) |
4333 | { | |
4334 | if (i == 6) | |
6a4dac78 | 4335 | mode = DFmode; |
6205ab4b | 4336 | r = REAL_VALUE_ATOF (strings_68881[i], mode); |
4337 | values_68881[i] = r; | |
4338 | } | |
4339 | inited_68881_table = 1; | |
4340 | } | |
5243fbff | 4341 | |
4342 | int | |
821960c7 | 4343 | standard_68881_constant_p (rtx x) |
5243fbff | 4344 | { |
945f7b03 | 4345 | const REAL_VALUE_TYPE *r; |
6205ab4b | 4346 | int i; |
5243fbff | 4347 | |
50d9d0ec | 4348 | /* fmovecr must be emulated on the 68040 and 68060, so it shouldn't be |
0c0492c9 | 4349 | used at all on those chips. */ |
f716b74b | 4350 | if (TUNE_68040_60) |
5243fbff | 4351 | return 0; |
4352 | ||
6205ab4b | 4353 | if (! inited_68881_table) |
4354 | init_68881_table (); | |
4355 | ||
945f7b03 | 4356 | r = CONST_DOUBLE_REAL_VALUE (x); |
6205ab4b | 4357 | |
a417188d | 4358 | /* Use real_identical instead of real_equal so that -0.0 is rejected. */ |
6205ab4b | 4359 | for (i = 0; i < 6; i++) |
4360 | { | |
945f7b03 | 4361 | if (real_identical (r, &values_68881[i])) |
6205ab4b | 4362 | return (codes_68881[i]); |
4363 | } | |
4364 | ||
5243fbff | 4365 | if (GET_MODE (x) == SFmode) |
4366 | return 0; | |
6205ab4b | 4367 | |
945f7b03 | 4368 | if (real_equal (r, &values_68881[6])) |
6205ab4b | 4369 | return (codes_68881[6]); |
4370 | ||
5243fbff | 4371 | /* larger powers of ten in the constants ram are not used |
4372 | because they are not equal to a `double' C constant. */ | |
4373 | return 0; | |
4374 | } | |
4375 | ||
4376 | /* If X is a floating-point constant, return the logarithm of X base 2, | |
4377 | or 0 if X is not a power of 2. */ | |
4378 | ||
4379 | int | |
821960c7 | 4380 | floating_exact_log2 (rtx x) |
5243fbff | 4381 | { |
945f7b03 | 4382 | const REAL_VALUE_TYPE *r; |
4383 | REAL_VALUE_TYPE r1; | |
bd58682f | 4384 | int exp; |
5243fbff | 4385 | |
945f7b03 | 4386 | r = CONST_DOUBLE_REAL_VALUE (x); |
5243fbff | 4387 | |
945f7b03 | 4388 | if (real_less (r, &dconst1)) |
5243fbff | 4389 | return 0; |
4390 | ||
945f7b03 | 4391 | exp = real_exponent (r); |
4e6322c5 | 4392 | real_2expN (&r1, exp, DFmode); |
945f7b03 | 4393 | if (real_equal (&r1, r)) |
bd58682f | 4394 | return exp; |
4395 | ||
5243fbff | 4396 | return 0; |
4397 | } | |
4398 | \f | |
5243fbff | 4399 | /* A C compound statement to output to stdio stream STREAM the |
4400 | assembler syntax for an instruction operand X. X is an RTL | |
4401 | expression. | |
4402 | ||
4403 | CODE is a value that can be used to specify one of several ways | |
4404 | of printing the operand. It is used when identical operands | |
4405 | must be printed differently depending on the context. CODE | |
4406 | comes from the `%' specification that was used to request | |
4407 | printing of the operand. If the specification was just `%DIGIT' | |
4408 | then CODE is 0; if the specification was `%LTR DIGIT' then CODE | |
4409 | is the ASCII code for LTR. | |
4410 | ||
4411 | If X is a register, this macro should print the register's name. | |
4412 | The names can be found in an array `reg_names' whose type is | |
4413 | `char *[]'. `reg_names' is initialized from `REGISTER_NAMES'. | |
4414 | ||
4415 | When the machine description has a specification `%PUNCT' (a `%' | |
4416 | followed by a punctuation character), this macro is called with | |
4417 | a null pointer for X and the punctuation character for CODE. | |
4418 | ||
4419 | The m68k specific codes are: | |
4420 | ||
4421 | '.' for dot needed in Motorola-style opcode names. | |
4422 | '-' for an operand pushing on the stack: | |
4423 | sp@-, -(sp) or -(%sp) depending on the style of syntax. | |
4424 | '+' for an operand pushing on the stack: | |
4425 | sp@+, (sp)+ or (%sp)+ depending on the style of syntax. | |
4426 | '@' for a reference to the top word on the stack: | |
4427 | sp@, (sp) or (%sp) depending on the style of syntax. | |
4428 | '#' for an immediate operand prefix (# in MIT and Motorola syntax | |
2a458a3f | 4429 | but & in SGS syntax). |
5243fbff | 4430 | '!' for the cc register (used in an `and to cc' insn). |
4431 | '$' for the letter `s' in an op code, but only on the 68040. | |
4432 | '&' for the letter `d' in an op code, but only on the 68040. | |
f14f0b88 | 4433 | '/' for register prefix needed by longlong.h. |
9c47decb | 4434 | '?' for m68k_library_id_string |
5243fbff | 4435 | |
4436 | 'b' for byte insn (no effect, on the Sun; this is for the ISI). | |
4437 | 'd' to force memory addressing to be absolute, not relative. | |
4438 | 'f' for float insn (print a CONST_DOUBLE as a float rather than in hex) | |
5243fbff | 4439 | 'x' for float insn (print a CONST_DOUBLE as a float rather than in hex), |
4440 | or print pair of registers as rx:ry. | |
dd7218bb | 4441 | 'p' print an address with @PLTPC attached, but only if the operand |
4442 | is not locally-bound. */ | |
5243fbff | 4443 | |
4444 | void | |
821960c7 | 4445 | print_operand (FILE *file, rtx op, int letter) |
5243fbff | 4446 | { |
5243fbff | 4447 | if (letter == '.') |
4448 | { | |
9cbcee62 | 4449 | if (MOTOROLA) |
4450 | fprintf (file, "."); | |
5243fbff | 4451 | } |
4452 | else if (letter == '#') | |
9cbcee62 | 4453 | asm_fprintf (file, "%I"); |
5243fbff | 4454 | else if (letter == '-') |
4ad53767 | 4455 | asm_fprintf (file, MOTOROLA ? "-(%Rsp)" : "%Rsp@-"); |
5243fbff | 4456 | else if (letter == '+') |
4ad53767 | 4457 | asm_fprintf (file, MOTOROLA ? "(%Rsp)+" : "%Rsp@+"); |
5243fbff | 4458 | else if (letter == '@') |
4ad53767 | 4459 | asm_fprintf (file, MOTOROLA ? "(%Rsp)" : "%Rsp@"); |
5243fbff | 4460 | else if (letter == '!') |
9cbcee62 | 4461 | asm_fprintf (file, "%Rfpcr"); |
5243fbff | 4462 | else if (letter == '$') |
4463 | { | |
6e6d2086 | 4464 | if (TARGET_68040) |
9cbcee62 | 4465 | fprintf (file, "s"); |
5243fbff | 4466 | } |
4467 | else if (letter == '&') | |
4468 | { | |
6e6d2086 | 4469 | if (TARGET_68040) |
9cbcee62 | 4470 | fprintf (file, "d"); |
5243fbff | 4471 | } |
f14f0b88 | 4472 | else if (letter == '/') |
9cbcee62 | 4473 | asm_fprintf (file, "%R"); |
9c47decb | 4474 | else if (letter == '?') |
4475 | asm_fprintf (file, m68k_library_id_string); | |
dd7218bb | 4476 | else if (letter == 'p') |
f0ecff58 | 4477 | { |
dd7218bb | 4478 | output_addr_const (file, op); |
4479 | if (!(GET_CODE (op) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op))) | |
4480 | fprintf (file, "@PLTPC"); | |
f0ecff58 | 4481 | } |
5243fbff | 4482 | else if (GET_CODE (op) == REG) |
4483 | { | |
d2e633d7 | 4484 | if (letter == 'R') |
4485 | /* Print out the second register name of a register pair. | |
4486 | I.e., R (6) => 7. */ | |
16b4cb0d | 4487 | fputs (M68K_REGNAME(REGNO (op) + 1), file); |
5243fbff | 4488 | else |
16b4cb0d | 4489 | fputs (M68K_REGNAME(REGNO (op)), file); |
5243fbff | 4490 | } |
4491 | else if (GET_CODE (op) == MEM) | |
4492 | { | |
3c047fe9 | 4493 | output_address (GET_MODE (op), XEXP (op, 0)); |
5243fbff | 4494 | if (letter == 'd' && ! TARGET_68020 |
4495 | && CONSTANT_ADDRESS_P (XEXP (op, 0)) | |
4496 | && !(GET_CODE (XEXP (op, 0)) == CONST_INT | |
4497 | && INTVAL (XEXP (op, 0)) < 0x8000 | |
4498 | && INTVAL (XEXP (op, 0)) >= -0x8000)) | |
4ad53767 | 4499 | fprintf (file, MOTOROLA ? ".l" : ":l"); |
5243fbff | 4500 | } |
5243fbff | 4501 | else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == SFmode) |
4502 | { | |
d54128c3 | 4503 | long l; |
945f7b03 | 4504 | REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), l); |
62e8616a | 4505 | asm_fprintf (file, "%I0x%lx", l & 0xFFFFFFFF); |
6205ab4b | 4506 | } |
4507 | else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == XFmode) | |
4508 | { | |
d54128c3 | 4509 | long l[3]; |
945f7b03 | 4510 | REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l); |
62e8616a | 4511 | asm_fprintf (file, "%I0x%lx%08lx%08lx", l[0] & 0xFFFFFFFF, |
4512 | l[1] & 0xFFFFFFFF, l[2] & 0xFFFFFFFF); | |
5243fbff | 4513 | } |
bbc6fd5f | 4514 | else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == DFmode) |
5243fbff | 4515 | { |
d54128c3 | 4516 | long l[2]; |
945f7b03 | 4517 | REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l); |
62e8616a | 4518 | asm_fprintf (file, "%I0x%lx%08lx", l[0] & 0xFFFFFFFF, l[1] & 0xFFFFFFFF); |
5243fbff | 4519 | } |
4520 | else | |
4521 | { | |
f0ecff58 | 4522 | /* Use `print_operand_address' instead of `output_addr_const' |
4523 | to ensure that we print relevant PIC stuff. */ | |
b91e0dcb | 4524 | asm_fprintf (file, "%I"); |
f0ecff58 | 4525 | if (TARGET_PCREL |
4526 | && (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST)) | |
4527 | print_operand_address (file, op); | |
4528 | else | |
4529 | output_addr_const (file, op); | |
5243fbff | 4530 | } |
4531 | } | |
4532 | ||
869bde6b | 4533 | /* Return string for TLS relocation RELOC. */ |
4534 | ||
4535 | static const char * | |
4536 | m68k_get_reloc_decoration (enum m68k_reloc reloc) | |
4537 | { | |
4538 | /* To my knowledge, !MOTOROLA assemblers don't support TLS. */ | |
4539 | gcc_assert (MOTOROLA || reloc == RELOC_GOT); | |
4540 | ||
4541 | switch (reloc) | |
4542 | { | |
4543 | case RELOC_GOT: | |
4544 | if (MOTOROLA) | |
4545 | { | |
4546 | if (flag_pic == 1 && TARGET_68020) | |
4547 | return "@GOT.w"; | |
4548 | else | |
4549 | return "@GOT"; | |
4550 | } | |
4551 | else | |
4552 | { | |
4553 | if (TARGET_68020) | |
4554 | { | |
4555 | switch (flag_pic) | |
4556 | { | |
4557 | case 1: | |
4558 | return ":w"; | |
4559 | case 2: | |
4560 | return ":l"; | |
4561 | default: | |
4562 | return ""; | |
4563 | } | |
4564 | } | |
4565 | } | |
1fddbaea | 4566 | gcc_unreachable (); |
869bde6b | 4567 | |
4568 | case RELOC_TLSGD: | |
4569 | return "@TLSGD"; | |
4570 | ||
4571 | case RELOC_TLSLDM: | |
4572 | return "@TLSLDM"; | |
4573 | ||
4574 | case RELOC_TLSLDO: | |
4575 | return "@TLSLDO"; | |
4576 | ||
4577 | case RELOC_TLSIE: | |
4578 | return "@TLSIE"; | |
4579 | ||
4580 | case RELOC_TLSLE: | |
4581 | return "@TLSLE"; | |
4582 | ||
4583 | default: | |
4584 | gcc_unreachable (); | |
4585 | } | |
4586 | } | |
4587 | ||
27f5e69f | 4588 | /* m68k implementation of TARGET_OUTPUT_ADDR_CONST_EXTRA. */ |
6c19bc3c | 4589 | |
27f5e69f | 4590 | static bool |
6c19bc3c | 4591 | m68k_output_addr_const_extra (FILE *file, rtx x) |
4592 | { | |
869bde6b | 4593 | if (GET_CODE (x) == UNSPEC) |
4594 | { | |
4595 | switch (XINT (x, 1)) | |
4596 | { | |
4597 | case UNSPEC_RELOC16: | |
4598 | case UNSPEC_RELOC32: | |
4599 | output_addr_const (file, XVECEXP (x, 0, 0)); | |
2a9840b1 | 4600 | fputs (m68k_get_reloc_decoration |
4601 | ((enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1))), file); | |
869bde6b | 4602 | return true; |
6c19bc3c | 4603 | |
869bde6b | 4604 | default: |
4605 | break; | |
4606 | } | |
4607 | } | |
4608 | ||
4609 | return false; | |
4610 | } | |
4611 | ||
4612 | /* M68K implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */ | |
4613 | ||
4614 | static void | |
4615 | m68k_output_dwarf_dtprel (FILE *file, int size, rtx x) | |
4616 | { | |
4617 | gcc_assert (size == 4); | |
4618 | fputs ("\t.long\t", file); | |
4619 | output_addr_const (file, x); | |
4620 | fputs ("@TLSLDO+0x8000", file); | |
6c19bc3c | 4621 | } |
4622 | ||
1a1a6f69 | 4623 | /* In the name of slightly smaller debug output, and to cater to |
4624 | general assembler lossage, recognize various UNSPEC sequences | |
4625 | and turn them back into a direct symbol reference. */ | |
4626 | ||
4627 | static rtx | |
d09e50a6 | 4628 | m68k_delegitimize_address (rtx orig_x) |
1a1a6f69 | 4629 | { |
d1741f8f | 4630 | rtx x; |
4631 | struct m68k_address addr; | |
4632 | rtx unspec; | |
1a1a6f69 | 4633 | |
d09e50a6 | 4634 | orig_x = delegitimize_mem_from_attrs (orig_x); |
d1741f8f | 4635 | x = orig_x; |
4636 | if (MEM_P (x)) | |
4637 | x = XEXP (x, 0); | |
4638 | ||
4639 | if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode) | |
d09e50a6 | 4640 | return orig_x; |
4641 | ||
d1741f8f | 4642 | if (!m68k_decompose_address (GET_MODE (x), x, false, &addr) |
4643 | || addr.offset == NULL_RTX | |
4644 | || GET_CODE (addr.offset) != CONST) | |
4645 | return orig_x; | |
1a1a6f69 | 4646 | |
d1741f8f | 4647 | unspec = XEXP (addr.offset, 0); |
4648 | if (GET_CODE (unspec) == PLUS && CONST_INT_P (XEXP (unspec, 1))) | |
4649 | unspec = XEXP (unspec, 0); | |
4650 | if (GET_CODE (unspec) != UNSPEC | |
4651 | || (XINT (unspec, 1) != UNSPEC_RELOC16 | |
4652 | && XINT (unspec, 1) != UNSPEC_RELOC32)) | |
4653 | return orig_x; | |
4654 | x = XVECEXP (unspec, 0, 0); | |
eceb989a | 4655 | gcc_assert (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF); |
d1741f8f | 4656 | if (unspec != XEXP (addr.offset, 0)) |
4657 | x = gen_rtx_PLUS (Pmode, x, XEXP (XEXP (addr.offset, 0), 1)); | |
4658 | if (addr.index) | |
1a1a6f69 | 4659 | { |
d1741f8f | 4660 | rtx idx = addr.index; |
4661 | if (addr.scale != 1) | |
4662 | idx = gen_rtx_MULT (Pmode, idx, GEN_INT (addr.scale)); | |
4663 | x = gen_rtx_PLUS (Pmode, idx, x); | |
1a1a6f69 | 4664 | } |
d1741f8f | 4665 | if (addr.base) |
4666 | x = gen_rtx_PLUS (Pmode, addr.base, x); | |
4667 | if (MEM_P (orig_x)) | |
4668 | x = replace_equiv_address_nv (orig_x, x); | |
4669 | return x; | |
1a1a6f69 | 4670 | } |
4671 | ||
5243fbff | 4672 | \f |
4673 | /* A C compound statement to output to stdio stream STREAM the | |
4674 | assembler syntax for an instruction operand that is a memory | |
4675 | reference whose address is ADDR. ADDR is an RTL expression. | |
4676 | ||
4677 | Note that this contains a kludge that knows that the only reason | |
4678 | we have an address (plus (label_ref...) (reg...)) when not generating | |
4679 | PIC code is in the insn before a tablejump, and we know that m68k.md | |
4680 | generates a label LInnn: on such an insn. | |
4681 | ||
4682 | It is possible for PIC to generate a (plus (label_ref...) (reg...)) | |
4683 | and we handle that just like we would a (plus (symbol_ref...) (reg...)). | |
4684 | ||
5243fbff | 4685 | This routine is responsible for distinguishing between -fpic and -fPIC |
4686 | style relocations in an address. When generating -fpic code the | |
a361b456 | 4687 | offset is output in word mode (e.g. movel a5@(_foo:w), a0). When generating |
4688 | -fPIC code the offset is output in long mode (e.g. movel a5@(_foo:l), a0) */ | |
5243fbff | 4689 | |
4690 | void | |
821960c7 | 4691 | print_operand_address (FILE *file, rtx addr) |
5243fbff | 4692 | { |
b97b0687 | 4693 | struct m68k_address address; |
4694 | ||
4695 | if (!m68k_decompose_address (QImode, addr, true, &address)) | |
4696 | gcc_unreachable (); | |
4697 | ||
4698 | if (address.code == PRE_DEC) | |
4ad53767 | 4699 | fprintf (file, MOTOROLA ? "-(%s)" : "%s@-", |
4700 | M68K_REGNAME (REGNO (address.base))); | |
b97b0687 | 4701 | else if (address.code == POST_INC) |
4ad53767 | 4702 | fprintf (file, MOTOROLA ? "(%s)+" : "%s@+", |
4703 | M68K_REGNAME (REGNO (address.base))); | |
b97b0687 | 4704 | else if (!address.base && !address.index) |
4705 | { | |
4706 | /* A constant address. */ | |
4707 | gcc_assert (address.offset == addr); | |
4708 | if (GET_CODE (addr) == CONST_INT) | |
4709 | { | |
4710 | /* (xxx).w or (xxx).l. */ | |
4711 | if (IN_RANGE (INTVAL (addr), -0x8000, 0x7fff)) | |
4ad53767 | 4712 | fprintf (file, MOTOROLA ? "%d.w" : "%d:w", (int) INTVAL (addr)); |
065b42aa | 4713 | else |
b97b0687 | 4714 | fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (addr)); |
065b42aa | 4715 | } |
b97b0687 | 4716 | else if (TARGET_PCREL) |
065b42aa | 4717 | { |
b97b0687 | 4718 | /* (d16,PC) or (bd,PC,Xn) (with suppressed index register). */ |
4719 | fputc ('(', file); | |
4720 | output_addr_const (file, addr); | |
4721 | asm_fprintf (file, flag_pic == 1 ? ":w,%Rpc)" : ":l,%Rpc)"); | |
065b42aa | 4722 | } |
b97b0687 | 4723 | else |
065b42aa | 4724 | { |
b97b0687 | 4725 | /* (xxx).l. We need a special case for SYMBOL_REF if the symbol |
4726 | name ends in `.<letter>', as the last 2 characters can be | |
4727 | mistaken as a size suffix. Put the name in parentheses. */ | |
4728 | if (GET_CODE (addr) == SYMBOL_REF | |
4729 | && strlen (XSTR (addr, 0)) > 2 | |
4730 | && XSTR (addr, 0)[strlen (XSTR (addr, 0)) - 2] == '.') | |
065b42aa | 4731 | { |
b97b0687 | 4732 | putc ('(', file); |
4733 | output_addr_const (file, addr); | |
4734 | putc (')', file); | |
065b42aa | 4735 | } |
4736 | else | |
b97b0687 | 4737 | output_addr_const (file, addr); |
065b42aa | 4738 | } |
b97b0687 | 4739 | } |
4740 | else | |
4741 | { | |
4742 | int labelno; | |
4743 | ||
4744 | /* If ADDR is a (d8,pc,Xn) address, this is the number of the | |
f6a0d06f | 4745 | label being accessed, otherwise it is -1. */ |
b97b0687 | 4746 | labelno = (address.offset |
4747 | && !address.base | |
4748 | && GET_CODE (address.offset) == LABEL_REF | |
4749 | ? CODE_LABEL_NUMBER (XEXP (address.offset, 0)) | |
4750 | : -1); | |
4751 | if (MOTOROLA) | |
065b42aa | 4752 | { |
b97b0687 | 4753 | /* Print the "offset(base" component. */ |
4754 | if (labelno >= 0) | |
4bea814f | 4755 | asm_fprintf (file, "%LL%d(%Rpc,", labelno); |
b97b0687 | 4756 | else |
065b42aa | 4757 | { |
b97b0687 | 4758 | if (address.offset) |
869bde6b | 4759 | output_addr_const (file, address.offset); |
4760 | ||
b97b0687 | 4761 | putc ('(', file); |
4762 | if (address.base) | |
4763 | fputs (M68K_REGNAME (REGNO (address.base)), file); | |
065b42aa | 4764 | } |
b97b0687 | 4765 | /* Print the ",index" component, if any. */ |
4766 | if (address.index) | |
065b42aa | 4767 | { |
b97b0687 | 4768 | if (address.base) |
4769 | putc (',', file); | |
4770 | fprintf (file, "%s.%c", | |
4771 | M68K_REGNAME (REGNO (address.index)), | |
4772 | GET_MODE (address.index) == HImode ? 'w' : 'l'); | |
4773 | if (address.scale != 1) | |
4774 | fprintf (file, "*%d", address.scale); | |
065b42aa | 4775 | } |
065b42aa | 4776 | putc (')', file); |
065b42aa | 4777 | } |
b97b0687 | 4778 | else /* !MOTOROLA */ |
065b42aa | 4779 | { |
b97b0687 | 4780 | if (!address.offset && !address.index) |
4781 | fprintf (file, "%s@", M68K_REGNAME (REGNO (address.base))); | |
065b42aa | 4782 | else |
065b42aa | 4783 | { |
b97b0687 | 4784 | /* Print the "base@(offset" component. */ |
4785 | if (labelno >= 0) | |
4bea814f | 4786 | asm_fprintf (file, "%Rpc@(%LL%d", labelno); |
b97b0687 | 4787 | else |
4788 | { | |
4789 | if (address.base) | |
4790 | fputs (M68K_REGNAME (REGNO (address.base)), file); | |
4791 | fprintf (file, "@("); | |
4792 | if (address.offset) | |
869bde6b | 4793 | output_addr_const (file, address.offset); |
b97b0687 | 4794 | } |
4795 | /* Print the ",index" component, if any. */ | |
4796 | if (address.index) | |
4797 | { | |
4798 | fprintf (file, ",%s:%c", | |
4799 | M68K_REGNAME (REGNO (address.index)), | |
4800 | GET_MODE (address.index) == HImode ? 'w' : 'l'); | |
4801 | if (address.scale != 1) | |
4802 | fprintf (file, ":%d", address.scale); | |
4803 | } | |
065b42aa | 4804 | putc (')', file); |
4805 | } | |
065b42aa | 4806 | } |
5243fbff | 4807 | } |
4808 | } | |
5b1c4249 | 4809 | \f |
4810 | /* Check for cases where a clr insns can be omitted from code using | |
4811 | strict_low_part sets. For example, the second clrl here is not needed: | |
4812 | clrl d0; movw a0@+,d0; use d0; clrl d0; movw a0@+; use d0; ... | |
4813 | ||
4814 | MODE is the mode of this STRICT_LOW_PART set. FIRST_INSN is the clear | |
4815 | insn we are checking for redundancy. TARGET is the register set by the | |
4816 | clear insn. */ | |
4817 | ||
821960c7 | 4818 | bool |
3754d046 | 4819 | strict_low_part_peephole_ok (machine_mode mode, rtx_insn *first_insn, |
821960c7 | 4820 | rtx target) |
5b1c4249 | 4821 | { |
9e9d6337 | 4822 | rtx_insn *p = first_insn; |
5b1c4249 | 4823 | |
e64adf14 | 4824 | while ((p = PREV_INSN (p))) |
5b1c4249 | 4825 | { |
e64adf14 | 4826 | if (NOTE_INSN_BASIC_BLOCK_P (p)) |
4827 | return false; | |
4828 | ||
4829 | if (NOTE_P (p)) | |
4830 | continue; | |
4831 | ||
5b1c4249 | 4832 | /* If it isn't an insn, then give up. */ |
e64adf14 | 4833 | if (!INSN_P (p)) |
821960c7 | 4834 | return false; |
5b1c4249 | 4835 | |
4836 | if (reg_set_p (target, p)) | |
4837 | { | |
4838 | rtx set = single_set (p); | |
4839 | rtx dest; | |
4840 | ||
4841 | /* If it isn't an easy to recognize insn, then give up. */ | |
4842 | if (! set) | |
821960c7 | 4843 | return false; |
5b1c4249 | 4844 | |
4845 | dest = SET_DEST (set); | |
4846 | ||
4847 | /* If this sets the entire target register to zero, then our | |
4848 | first_insn is redundant. */ | |
4849 | if (rtx_equal_p (dest, target) | |
4850 | && SET_SRC (set) == const0_rtx) | |
821960c7 | 4851 | return true; |
5b1c4249 | 4852 | else if (GET_CODE (dest) == STRICT_LOW_PART |
4853 | && GET_CODE (XEXP (dest, 0)) == REG | |
4854 | && REGNO (XEXP (dest, 0)) == REGNO (target) | |
4855 | && (GET_MODE_SIZE (GET_MODE (XEXP (dest, 0))) | |
4856 | <= GET_MODE_SIZE (mode))) | |
4857 | /* This is a strict low part set which modifies less than | |
4858 | we are using, so it is safe. */ | |
4859 | ; | |
4860 | else | |
821960c7 | 4861 | return false; |
5b1c4249 | 4862 | } |
5b1c4249 | 4863 | } |
4864 | ||
821960c7 | 4865 | return false; |
5b1c4249 | 4866 | } |
0c23ddd4 | 4867 | |
f0ecff58 | 4868 | /* Operand predicates for implementing asymmetric pc-relative addressing |
4869 | on m68k. The m68k supports pc-relative addressing (mode 7, register 2) | |
dfd1079d | 4870 | when used as a source operand, but not as a destination operand. |
f0ecff58 | 4871 | |
4872 | We model this by restricting the meaning of the basic predicates | |
4873 | (general_operand, memory_operand, etc) to forbid the use of this | |
4874 | addressing mode, and then define the following predicates that permit | |
4875 | this addressing mode. These predicates can then be used for the | |
4876 | source operands of the appropriate instructions. | |
4877 | ||
4878 | n.b. While it is theoretically possible to change all machine patterns | |
4879 | to use this addressing more where permitted by the architecture, | |
4880 | it has only been implemented for "common" cases: SImode, HImode, and | |
4881 | QImode operands, and only for the principle operations that would | |
4882 | require this addressing mode: data movement and simple integer operations. | |
4883 | ||
4884 | In parallel with these new predicates, two new constraint letters | |
4885 | were defined: 'S' and 'T'. 'S' is the -mpcrel analog of 'm'. | |
4886 | 'T' replaces 's' in the non-pcrel case. It is a no-op in the pcrel case. | |
4887 | In the pcrel case 's' is only valid in combination with 'a' registers. | |
4888 | See addsi3, subsi3, cmpsi, and movsi patterns for a better understanding | |
4889 | of how these constraints are used. | |
4890 | ||
4891 | The use of these predicates is strictly optional, though patterns that | |
4892 | don't will cause an extra reload register to be allocated where one | |
4893 | was not necessary: | |
4894 | ||
4895 | lea (abc:w,%pc),%a0 ; need to reload address | |
4896 | moveq &1,%d1 ; since write to pc-relative space | |
4897 | movel %d1,%a0@ ; is not allowed | |
4898 | ... | |
4899 | lea (abc:w,%pc),%a1 ; no need to reload address here | |
4900 | movel %a1@,%d0 ; since "movel (abc:w,%pc),%d0" is ok | |
4901 | ||
4902 | For more info, consult tiemann@cygnus.com. | |
4903 | ||
4904 | ||
4905 | All of the ugliness with predicates and constraints is due to the | |
4906 | simple fact that the m68k does not allow a pc-relative addressing | |
4907 | mode as a destination. gcc does not distinguish between source and | |
4908 | destination addresses. Hence, if we claim that pc-relative address | |
bc409cb4 | 4909 | modes are valid, e.g. TARGET_LEGITIMATE_ADDRESS_P accepts them, then we |
f0ecff58 | 4910 | end up with invalid code. To get around this problem, we left |
4911 | pc-relative modes as invalid addresses, and then added special | |
4912 | predicates and constraints to accept them. | |
4913 | ||
4914 | A cleaner way to handle this is to modify gcc to distinguish | |
4915 | between source and destination addresses. We can then say that | |
4916 | pc-relative is a valid source address but not a valid destination | |
4917 | address, and hopefully avoid a lot of the predicate and constraint | |
4918 | hackery. Unfortunately, this would be a pretty big change. It would | |
4919 | be a useful change for a number of ports, but there aren't any current | |
4920 | plans to undertake this. | |
4921 | ||
4922 | ***************************************************************************/ | |
4923 | ||
4924 | ||
f2fa409d | 4925 | const char * |
821960c7 | 4926 | output_andsi3 (rtx *operands) |
128cf854 | 4927 | { |
4928 | int logval; | |
4929 | if (GET_CODE (operands[2]) == CONST_INT | |
3e080e22 | 4930 | && (INTVAL (operands[2]) | 0xffff) == -1 |
128cf854 | 4931 | && (DATA_REG_P (operands[0]) |
4932 | || offsettable_memref_p (operands[0])) | |
960e6739 | 4933 | && !TARGET_COLDFIRE) |
128cf854 | 4934 | { |
4935 | if (GET_CODE (operands[0]) != REG) | |
eafc6604 | 4936 | operands[0] = adjust_address (operands[0], HImode, 2); |
e6b2f841 | 4937 | operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffff); |
128cf854 | 4938 | /* Do not delete a following tstl %0 insn; that would be incorrect. */ |
4939 | CC_STATUS_INIT; | |
4940 | if (operands[2] == const0_rtx) | |
4941 | return "clr%.w %0"; | |
4942 | return "and%.w %2,%0"; | |
4943 | } | |
4944 | if (GET_CODE (operands[2]) == CONST_INT | |
f422081b | 4945 | && (logval = exact_log2 (~ INTVAL (operands[2]) & 0xffffffff)) >= 0 |
128cf854 | 4946 | && (DATA_REG_P (operands[0]) |
4947 | || offsettable_memref_p (operands[0]))) | |
4948 | { | |
4949 | if (DATA_REG_P (operands[0])) | |
065b42aa | 4950 | operands[1] = GEN_INT (logval); |
128cf854 | 4951 | else |
4952 | { | |
eafc6604 | 4953 | operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8)); |
e6b2f841 | 4954 | operands[1] = GEN_INT (logval % 8); |
128cf854 | 4955 | } |
4956 | /* This does not set condition codes in a standard way. */ | |
4957 | CC_STATUS_INIT; | |
4958 | return "bclr %1,%0"; | |
4959 | } | |
4960 | return "and%.l %2,%0"; | |
4961 | } | |
4962 | ||
f2fa409d | 4963 | const char * |
821960c7 | 4964 | output_iorsi3 (rtx *operands) |
128cf854 | 4965 | { |
4966 | register int logval; | |
4967 | if (GET_CODE (operands[2]) == CONST_INT | |
4968 | && INTVAL (operands[2]) >> 16 == 0 | |
4969 | && (DATA_REG_P (operands[0]) | |
4970 | || offsettable_memref_p (operands[0])) | |
960e6739 | 4971 | && !TARGET_COLDFIRE) |
128cf854 | 4972 | { |
4973 | if (GET_CODE (operands[0]) != REG) | |
eafc6604 | 4974 | operands[0] = adjust_address (operands[0], HImode, 2); |
128cf854 | 4975 | /* Do not delete a following tstl %0 insn; that would be incorrect. */ |
4976 | CC_STATUS_INIT; | |
4977 | if (INTVAL (operands[2]) == 0xffff) | |
4978 | return "mov%.w %2,%0"; | |
4979 | return "or%.w %2,%0"; | |
4980 | } | |
4981 | if (GET_CODE (operands[2]) == CONST_INT | |
f422081b | 4982 | && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0 |
128cf854 | 4983 | && (DATA_REG_P (operands[0]) |
4984 | || offsettable_memref_p (operands[0]))) | |
4985 | { | |
4986 | if (DATA_REG_P (operands[0])) | |
eafc6604 | 4987 | operands[1] = GEN_INT (logval); |
128cf854 | 4988 | else |
4989 | { | |
eafc6604 | 4990 | operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8)); |
e6b2f841 | 4991 | operands[1] = GEN_INT (logval % 8); |
128cf854 | 4992 | } |
4993 | CC_STATUS_INIT; | |
4994 | return "bset %1,%0"; | |
4995 | } | |
4996 | return "or%.l %2,%0"; | |
4997 | } | |
4998 | ||
f2fa409d | 4999 | const char * |
821960c7 | 5000 | output_xorsi3 (rtx *operands) |
128cf854 | 5001 | { |
5002 | register int logval; | |
5003 | if (GET_CODE (operands[2]) == CONST_INT | |
5004 | && INTVAL (operands[2]) >> 16 == 0 | |
5005 | && (offsettable_memref_p (operands[0]) || DATA_REG_P (operands[0])) | |
960e6739 | 5006 | && !TARGET_COLDFIRE) |
128cf854 | 5007 | { |
5008 | if (! DATA_REG_P (operands[0])) | |
eafc6604 | 5009 | operands[0] = adjust_address (operands[0], HImode, 2); |
128cf854 | 5010 | /* Do not delete a following tstl %0 insn; that would be incorrect. */ |
5011 | CC_STATUS_INIT; | |
5012 | if (INTVAL (operands[2]) == 0xffff) | |
5013 | return "not%.w %0"; | |
5014 | return "eor%.w %2,%0"; | |
5015 | } | |
5016 | if (GET_CODE (operands[2]) == CONST_INT | |
f422081b | 5017 | && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0 |
128cf854 | 5018 | && (DATA_REG_P (operands[0]) |
5019 | || offsettable_memref_p (operands[0]))) | |
5020 | { | |
5021 | if (DATA_REG_P (operands[0])) | |
eafc6604 | 5022 | operands[1] = GEN_INT (logval); |
128cf854 | 5023 | else |
5024 | { | |
eafc6604 | 5025 | operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8)); |
e6b2f841 | 5026 | operands[1] = GEN_INT (logval % 8); |
128cf854 | 5027 | } |
5028 | CC_STATUS_INIT; | |
5029 | return "bchg %1,%0"; | |
5030 | } | |
5031 | return "eor%.l %2,%0"; | |
5032 | } | |
2cb4ac60 | 5033 | |
dd7218bb | 5034 | /* Return the instruction that should be used for a call to address X, |
5035 | which is known to be in operand 0. */ | |
5036 | ||
5037 | const char * | |
5038 | output_call (rtx x) | |
5039 | { | |
5040 | if (symbolic_operand (x, VOIDmode)) | |
5041 | return m68k_symbolic_call; | |
5042 | else | |
5043 | return "jsr %a0"; | |
5044 | } | |
5045 | ||
33fb08b8 | 5046 | /* Likewise sibling calls. */ |
5047 | ||
5048 | const char * | |
5049 | output_sibcall (rtx x) | |
5050 | { | |
5051 | if (symbolic_operand (x, VOIDmode)) | |
5052 | return m68k_symbolic_jump; | |
5053 | else | |
5054 | return "jmp %a0"; | |
5055 | } | |
5056 | ||
6988553d | 5057 | static void |
821960c7 | 5058 | m68k_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED, |
e2182937 | 5059 | HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset, |
821960c7 | 5060 | tree function) |
c6933ba6 | 5061 | { |
9e9d6337 | 5062 | rtx this_slot, offset, addr, mem, tmp; |
5063 | rtx_insn *insn; | |
61fafd77 | 5064 | |
5065 | /* Avoid clobbering the struct value reg by using the | |
5066 | static chain reg as a temporary. */ | |
5067 | tmp = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM); | |
e2182937 | 5068 | |
5069 | /* Pretend to be a post-reload pass while generating rtl. */ | |
e2182937 | 5070 | reload_completed = 1; |
e2182937 | 5071 | |
5072 | /* The "this" pointer is stored at 4(%sp). */ | |
29c05e22 | 5073 | this_slot = gen_rtx_MEM (Pmode, plus_constant (Pmode, |
5074 | stack_pointer_rtx, 4)); | |
e2182937 | 5075 | |
5076 | /* Add DELTA to THIS. */ | |
5077 | if (delta != 0) | |
5e0e9723 | 5078 | { |
e2182937 | 5079 | /* Make the offset a legitimate operand for memory addition. */ |
5080 | offset = GEN_INT (delta); | |
5081 | if ((delta < -8 || delta > 8) | |
5082 | && (TARGET_COLDFIRE || USE_MOVQ (delta))) | |
5083 | { | |
5084 | emit_move_insn (gen_rtx_REG (Pmode, D0_REG), offset); | |
5085 | offset = gen_rtx_REG (Pmode, D0_REG); | |
5086 | } | |
5087 | emit_insn (gen_add3_insn (copy_rtx (this_slot), | |
5088 | copy_rtx (this_slot), offset)); | |
5e0e9723 | 5089 | } |
6988553d | 5090 | |
e2182937 | 5091 | /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */ |
5092 | if (vcall_offset != 0) | |
5093 | { | |
5094 | /* Set the static chain register to *THIS. */ | |
61fafd77 | 5095 | emit_move_insn (tmp, this_slot); |
5096 | emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp)); | |
e2182937 | 5097 | |
5098 | /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */ | |
29c05e22 | 5099 | addr = plus_constant (Pmode, tmp, vcall_offset); |
e2182937 | 5100 | if (!m68k_legitimate_address_p (Pmode, addr, true)) |
5101 | { | |
d1f9b275 | 5102 | emit_insn (gen_rtx_SET (tmp, addr)); |
61fafd77 | 5103 | addr = tmp; |
e2182937 | 5104 | } |
6988553d | 5105 | |
e2182937 | 5106 | /* Load the offset into %d0 and add it to THIS. */ |
5107 | emit_move_insn (gen_rtx_REG (Pmode, D0_REG), | |
5108 | gen_rtx_MEM (Pmode, addr)); | |
5109 | emit_insn (gen_add3_insn (copy_rtx (this_slot), | |
5110 | copy_rtx (this_slot), | |
5111 | gen_rtx_REG (Pmode, D0_REG))); | |
5112 | } | |
dd7218bb | 5113 | |
e2182937 | 5114 | /* Jump to the target function. Use a sibcall if direct jumps are |
5115 | allowed, otherwise load the address into a register first. */ | |
5116 | mem = DECL_RTL (function); | |
5117 | if (!sibcall_operand (XEXP (mem, 0), VOIDmode)) | |
5118 | { | |
5119 | gcc_assert (flag_pic); | |
6988553d | 5120 | |
e2182937 | 5121 | if (!TARGET_SEP_DATA) |
5122 | { | |
5123 | /* Use the static chain register as a temporary (call-clobbered) | |
5124 | GOT pointer for this function. We can use the static chain | |
5125 | register because it isn't live on entry to the thunk. */ | |
3072d30e | 5126 | SET_REGNO (pic_offset_table_rtx, STATIC_CHAIN_REGNUM); |
e2182937 | 5127 | emit_insn (gen_load_got (pic_offset_table_rtx)); |
5128 | } | |
61fafd77 | 5129 | legitimize_pic_address (XEXP (mem, 0), Pmode, tmp); |
5130 | mem = replace_equiv_address (mem, tmp); | |
e2182937 | 5131 | } |
5132 | insn = emit_call_insn (gen_sibcall (mem, const0_rtx)); | |
5133 | SIBLING_CALL_P (insn) = 1; | |
5134 | ||
5135 | /* Run just enough of rest_of_compilation. */ | |
5136 | insn = get_insns (); | |
5137 | split_all_insns_noflow (); | |
5138 | final_start_function (insn, file, 1); | |
5139 | final (insn, file, 1); | |
5140 | final_end_function (); | |
5141 | ||
5142 | /* Clean up the vars set above. */ | |
5143 | reload_completed = 0; | |
e2182937 | 5144 | |
5145 | /* Restore the original PIC register. */ | |
5146 | if (flag_pic) | |
3072d30e | 5147 | SET_REGNO (pic_offset_table_rtx, PIC_REG); |
c6933ba6 | 5148 | } |
c8c47ddf | 5149 | |
5150 | /* Worker function for TARGET_STRUCT_VALUE_RTX. */ | |
5151 | ||
5152 | static rtx | |
5153 | m68k_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED, | |
5154 | int incoming ATTRIBUTE_UNUSED) | |
5155 | { | |
5156 | return gen_rtx_REG (Pmode, M68K_STRUCT_VALUE_REGNUM); | |
5157 | } | |
b09edf9a | 5158 | |
5159 | /* Return nonzero if register old_reg can be renamed to register new_reg. */ | |
5160 | int | |
5161 | m68k_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED, | |
5162 | unsigned int new_reg) | |
5163 | { | |
5164 | ||
5165 | /* Interrupt functions can only use registers that have already been | |
5166 | saved by the prologue, even if they would normally be | |
5167 | call-clobbered. */ | |
5168 | ||
f9a44575 | 5169 | if ((m68k_get_function_kind (current_function_decl) |
5170 | == m68k_fk_interrupt_handler) | |
3072d30e | 5171 | && !df_regs_ever_live_p (new_reg)) |
b09edf9a | 5172 | return 0; |
5173 | ||
5174 | return 1; | |
5175 | } | |
77236994 | 5176 | |
b395382f | 5177 | /* Implement TARGET_HARD_REGNO_MODE_OK. On the 68000, we let the cpu |
5178 | registers can hold any mode, but restrict the 68881 registers to | |
5179 | floating-point modes. */ | |
350e24f5 | 5180 | |
b395382f | 5181 | static bool |
5182 | m68k_hard_regno_mode_ok (unsigned int regno, machine_mode mode) | |
77236994 | 5183 | { |
0b93d200 | 5184 | if (DATA_REGNO_P (regno)) |
77236994 | 5185 | { |
065b42aa | 5186 | /* Data Registers, can hold aggregate if fits in. */ |
5187 | if (regno + GET_MODE_SIZE (mode) / 4 <= 8) | |
5188 | return true; | |
77236994 | 5189 | } |
0b93d200 | 5190 | else if (ADDRESS_REGNO_P (regno)) |
77236994 | 5191 | { |
065b42aa | 5192 | if (regno + GET_MODE_SIZE (mode) / 4 <= 16) |
5193 | return true; | |
77236994 | 5194 | } |
0b93d200 | 5195 | else if (FP_REGNO_P (regno)) |
77236994 | 5196 | { |
5197 | /* FPU registers, hold float or complex float of long double or | |
065b42aa | 5198 | smaller. */ |
5199 | if ((GET_MODE_CLASS (mode) == MODE_FLOAT | |
5200 | || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT) | |
58c23110 | 5201 | && GET_MODE_UNIT_SIZE (mode) <= TARGET_FP_REG_SIZE) |
065b42aa | 5202 | return true; |
77236994 | 5203 | } |
5204 | return false; | |
5205 | } | |
58c23110 | 5206 | |
350e24f5 | 5207 | /* Implement SECONDARY_RELOAD_CLASS. */ |
5208 | ||
5209 | enum reg_class | |
5210 | m68k_secondary_reload_class (enum reg_class rclass, | |
3754d046 | 5211 | machine_mode mode, rtx x) |
350e24f5 | 5212 | { |
5213 | int regno; | |
5214 | ||
5215 | regno = true_regnum (x); | |
5216 | ||
5217 | /* If one operand of a movqi is an address register, the other | |
5218 | operand must be a general register or constant. Other types | |
5219 | of operand must be reloaded through a data register. */ | |
5220 | if (GET_MODE_SIZE (mode) == 1 | |
5221 | && reg_classes_intersect_p (rclass, ADDR_REGS) | |
5222 | && !(INT_REGNO_P (regno) || CONSTANT_P (x))) | |
5223 | return DATA_REGS; | |
5224 | ||
5225 | /* PC-relative addresses must be loaded into an address register first. */ | |
5226 | if (TARGET_PCREL | |
5227 | && !reg_class_subset_p (rclass, ADDR_REGS) | |
5228 | && symbolic_operand (x, VOIDmode)) | |
5229 | return ADDR_REGS; | |
5230 | ||
5231 | return NO_REGS; | |
5232 | } | |
5233 | ||
5234 | /* Implement PREFERRED_RELOAD_CLASS. */ | |
5235 | ||
5236 | enum reg_class | |
5237 | m68k_preferred_reload_class (rtx x, enum reg_class rclass) | |
5238 | { | |
5239 | enum reg_class secondary_class; | |
5240 | ||
5241 | /* If RCLASS might need a secondary reload, try restricting it to | |
5242 | a class that doesn't. */ | |
5243 | secondary_class = m68k_secondary_reload_class (rclass, GET_MODE (x), x); | |
5244 | if (secondary_class != NO_REGS | |
5245 | && reg_class_subset_p (secondary_class, rclass)) | |
5246 | return secondary_class; | |
5247 | ||
5248 | /* Prefer to use moveq for in-range constants. */ | |
5249 | if (GET_CODE (x) == CONST_INT | |
5250 | && reg_class_subset_p (DATA_REGS, rclass) | |
5251 | && IN_RANGE (INTVAL (x), -0x80, 0x7f)) | |
5252 | return DATA_REGS; | |
5253 | ||
5254 | /* ??? Do we really need this now? */ | |
5255 | if (GET_CODE (x) == CONST_DOUBLE | |
5256 | && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT) | |
5257 | { | |
5258 | if (TARGET_HARD_FLOAT && reg_class_subset_p (FP_REGS, rclass)) | |
5259 | return FP_REGS; | |
5260 | ||
5261 | return NO_REGS; | |
5262 | } | |
5263 | ||
5264 | return rclass; | |
5265 | } | |
5266 | ||
58c23110 | 5267 | /* Return floating point values in a 68881 register. This makes 68881 code |
5268 | a little bit faster. It also makes -msoft-float code incompatible with | |
5269 | hard-float code, so people have to be careful not to mix the two. | |
334ec2d8 | 5270 | For ColdFire it was decided the ABI incompatibility is undesirable. |
58c23110 | 5271 | If there is need for a hard-float ABI it is probably worth doing it |
5272 | properly and also passing function arguments in FP registers. */ | |
5273 | rtx | |
3754d046 | 5274 | m68k_libcall_value (machine_mode mode) |
58c23110 | 5275 | { |
5276 | switch (mode) { | |
916ace94 | 5277 | case E_SFmode: |
5278 | case E_DFmode: | |
5279 | case E_XFmode: | |
58c23110 | 5280 | if (TARGET_68881) |
dbaae14a | 5281 | return gen_rtx_REG (mode, FP0_REG); |
58c23110 | 5282 | break; |
5283 | default: | |
5284 | break; | |
5285 | } | |
869bde6b | 5286 | |
5287 | return gen_rtx_REG (mode, m68k_libcall_value_in_a0_p ? A0_REG : D0_REG); | |
58c23110 | 5288 | } |
5289 | ||
d09fd72c | 5290 | /* Location in which function value is returned. |
5291 | NOTE: Due to differences in ABIs, don't call this function directly, | |
5292 | use FUNCTION_VALUE instead. */ | |
58c23110 | 5293 | rtx |
fb80456a | 5294 | m68k_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED) |
58c23110 | 5295 | { |
3754d046 | 5296 | machine_mode mode; |
58c23110 | 5297 | |
5298 | mode = TYPE_MODE (valtype); | |
5299 | switch (mode) { | |
916ace94 | 5300 | case E_SFmode: |
5301 | case E_DFmode: | |
5302 | case E_XFmode: | |
58c23110 | 5303 | if (TARGET_68881) |
dbaae14a | 5304 | return gen_rtx_REG (mode, FP0_REG); |
58c23110 | 5305 | break; |
5306 | default: | |
5307 | break; | |
5308 | } | |
5309 | ||
2d329930 | 5310 | /* If the function returns a pointer, push that into %a0. */ |
5311 | if (func && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (func)))) | |
5312 | /* For compatibility with the large body of existing code which | |
5313 | does not always properly declare external functions returning | |
5314 | pointer types, the m68k/SVR4 convention is to copy the value | |
5315 | returned for pointer functions from a0 to d0 in the function | |
5316 | epilogue, so that callers that have neglected to properly | |
5317 | declare the callee can still find the correct return value in | |
5318 | d0. */ | |
5319 | return gen_rtx_PARALLEL | |
5320 | (mode, | |
5321 | gen_rtvec (2, | |
5322 | gen_rtx_EXPR_LIST (VOIDmode, | |
5323 | gen_rtx_REG (mode, A0_REG), | |
5324 | const0_rtx), | |
5325 | gen_rtx_EXPR_LIST (VOIDmode, | |
5326 | gen_rtx_REG (mode, D0_REG), | |
5327 | const0_rtx))); | |
5328 | else if (POINTER_TYPE_P (valtype)) | |
5329 | return gen_rtx_REG (mode, A0_REG); | |
58c23110 | 5330 | else |
2d329930 | 5331 | return gen_rtx_REG (mode, D0_REG); |
58c23110 | 5332 | } |
159cec75 | 5333 | |
5334 | /* Worker function for TARGET_RETURN_IN_MEMORY. */ | |
5335 | #if M68K_HONOR_TARGET_STRICT_ALIGNMENT | |
5336 | static bool | |
33be53f9 | 5337 | m68k_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED) |
159cec75 | 5338 | { |
3754d046 | 5339 | machine_mode mode = TYPE_MODE (type); |
159cec75 | 5340 | |
5341 | if (mode == BLKmode) | |
5342 | return true; | |
5343 | ||
5344 | /* If TYPE's known alignment is less than the alignment of MODE that | |
5345 | would contain the structure, then return in memory. We need to | |
5346 | do so to maintain the compatibility between code compiled with | |
5347 | -mstrict-align and that compiled with -mno-strict-align. */ | |
5348 | if (AGGREGATE_TYPE_P (type) | |
5349 | && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (mode)) | |
5350 | return true; | |
5351 | ||
5352 | return false; | |
5353 | } | |
5354 | #endif | |
1d86aeab | 5355 | |
5356 | /* CPU to schedule the program for. */ | |
5357 | enum attr_cpu m68k_sched_cpu; | |
5358 | ||
2ccdedfd | 5359 | /* MAC to schedule the program for. */ |
5360 | enum attr_mac m68k_sched_mac; | |
5361 | ||
1d86aeab | 5362 | /* Operand type. */ |
5363 | enum attr_op_type | |
5364 | { | |
5365 | /* No operand. */ | |
5366 | OP_TYPE_NONE, | |
5367 | ||
3c904dda | 5368 | /* Integer register. */ |
5369 | OP_TYPE_RN, | |
5370 | ||
5371 | /* FP register. */ | |
5372 | OP_TYPE_FPN, | |
1d86aeab | 5373 | |
5374 | /* Implicit mem reference (e.g. stack). */ | |
5375 | OP_TYPE_MEM1, | |
5376 | ||
5377 | /* Memory without offset or indexing. EA modes 2, 3 and 4. */ | |
5378 | OP_TYPE_MEM234, | |
5379 | ||
5380 | /* Memory with offset but without indexing. EA mode 5. */ | |
5381 | OP_TYPE_MEM5, | |
5382 | ||
5383 | /* Memory with indexing. EA mode 6. */ | |
5384 | OP_TYPE_MEM6, | |
5385 | ||
5386 | /* Memory referenced by absolute address. EA mode 7. */ | |
5387 | OP_TYPE_MEM7, | |
5388 | ||
5389 | /* Immediate operand that doesn't require extension word. */ | |
5390 | OP_TYPE_IMM_Q, | |
5391 | ||
5392 | /* Immediate 16 bit operand. */ | |
5393 | OP_TYPE_IMM_W, | |
5394 | ||
5395 | /* Immediate 32 bit operand. */ | |
5396 | OP_TYPE_IMM_L | |
5397 | }; | |
5398 | ||
1d86aeab | 5399 | /* Return type of memory ADDR_RTX refers to. */ |
5400 | static enum attr_op_type | |
3754d046 | 5401 | sched_address_type (machine_mode mode, rtx addr_rtx) |
1d86aeab | 5402 | { |
5403 | struct m68k_address address; | |
5404 | ||
3c904dda | 5405 | if (symbolic_operand (addr_rtx, VOIDmode)) |
5406 | return OP_TYPE_MEM7; | |
5407 | ||
1d86aeab | 5408 | if (!m68k_decompose_address (mode, addr_rtx, |
5409 | reload_completed, &address)) | |
5410 | { | |
3c904dda | 5411 | gcc_assert (!reload_completed); |
1d86aeab | 5412 | /* Reload will likely fix the address to be in the register. */ |
5413 | return OP_TYPE_MEM234; | |
5414 | } | |
5415 | ||
5416 | if (address.scale != 0) | |
5417 | return OP_TYPE_MEM6; | |
5418 | ||
5419 | if (address.base != NULL_RTX) | |
5420 | { | |
5421 | if (address.offset == NULL_RTX) | |
5422 | return OP_TYPE_MEM234; | |
5423 | ||
5424 | return OP_TYPE_MEM5; | |
5425 | } | |
5426 | ||
5427 | gcc_assert (address.offset != NULL_RTX); | |
5428 | ||
5429 | return OP_TYPE_MEM7; | |
5430 | } | |
5431 | ||
3c904dda | 5432 | /* Return X or Y (depending on OPX_P) operand of INSN. */ |
5433 | static rtx | |
ed3e6e5d | 5434 | sched_get_operand (rtx_insn *insn, bool opx_p) |
3c904dda | 5435 | { |
5436 | int i; | |
5437 | ||
5438 | if (recog_memoized (insn) < 0) | |
5439 | gcc_unreachable (); | |
5440 | ||
5441 | extract_constrain_insn_cached (insn); | |
5442 | ||
5443 | if (opx_p) | |
5444 | i = get_attr_opx (insn); | |
5445 | else | |
5446 | i = get_attr_opy (insn); | |
5447 | ||
5448 | if (i >= recog_data.n_operands) | |
5449 | return NULL; | |
5450 | ||
5451 | return recog_data.operand[i]; | |
5452 | } | |
5453 | ||
5454 | /* Return type of INSN's operand X (if OPX_P) or operand Y (if !OPX_P). | |
5455 | If ADDRESS_P is true, return type of memory location operand refers to. */ | |
1d86aeab | 5456 | static enum attr_op_type |
ed3e6e5d | 5457 | sched_attr_op_type (rtx_insn *insn, bool opx_p, bool address_p) |
1d86aeab | 5458 | { |
3c904dda | 5459 | rtx op; |
5460 | ||
5461 | op = sched_get_operand (insn, opx_p); | |
5462 | ||
5463 | if (op == NULL) | |
5464 | { | |
5465 | gcc_assert (!reload_completed); | |
5466 | return OP_TYPE_RN; | |
5467 | } | |
1d86aeab | 5468 | |
5469 | if (address_p) | |
5470 | return sched_address_type (QImode, op); | |
5471 | ||
5472 | if (memory_operand (op, VOIDmode)) | |
5473 | return sched_address_type (GET_MODE (op), XEXP (op, 0)); | |
5474 | ||
5475 | if (register_operand (op, VOIDmode)) | |
3c904dda | 5476 | { |
5477 | if ((!reload_completed && FLOAT_MODE_P (GET_MODE (op))) | |
5478 | || (reload_completed && FP_REG_P (op))) | |
5479 | return OP_TYPE_FPN; | |
5480 | ||
5481 | return OP_TYPE_RN; | |
5482 | } | |
1d86aeab | 5483 | |
5484 | if (GET_CODE (op) == CONST_INT) | |
5485 | { | |
3c904dda | 5486 | int ival; |
5487 | ||
5488 | ival = INTVAL (op); | |
5489 | ||
5490 | /* Check for quick constants. */ | |
5491 | switch (get_attr_type (insn)) | |
5492 | { | |
5493 | case TYPE_ALUQ_L: | |
5494 | if (IN_RANGE (ival, 1, 8) || IN_RANGE (ival, -8, -1)) | |
5495 | return OP_TYPE_IMM_Q; | |
5496 | ||
5497 | gcc_assert (!reload_completed); | |
5498 | break; | |
5499 | ||
5500 | case TYPE_MOVEQ_L: | |
5501 | if (USE_MOVQ (ival)) | |
5502 | return OP_TYPE_IMM_Q; | |
5503 | ||
5504 | gcc_assert (!reload_completed); | |
5505 | break; | |
5506 | ||
5507 | case TYPE_MOV3Q_L: | |
5508 | if (valid_mov3q_const (ival)) | |
5509 | return OP_TYPE_IMM_Q; | |
5510 | ||
5511 | gcc_assert (!reload_completed); | |
5512 | break; | |
5513 | ||
5514 | default: | |
5515 | break; | |
5516 | } | |
5517 | ||
5518 | if (IN_RANGE (ival, -0x8000, 0x7fff)) | |
1d86aeab | 5519 | return OP_TYPE_IMM_W; |
5520 | ||
5521 | return OP_TYPE_IMM_L; | |
5522 | } | |
5523 | ||
5524 | if (GET_CODE (op) == CONST_DOUBLE) | |
5525 | { | |
5526 | switch (GET_MODE (op)) | |
5527 | { | |
916ace94 | 5528 | case E_SFmode: |
1d86aeab | 5529 | return OP_TYPE_IMM_W; |
5530 | ||
916ace94 | 5531 | case E_VOIDmode: |
5532 | case E_DFmode: | |
1d86aeab | 5533 | return OP_TYPE_IMM_L; |
5534 | ||
5535 | default: | |
5536 | gcc_unreachable (); | |
5537 | } | |
5538 | } | |
5539 | ||
dc941c53 | 5540 | if (GET_CODE (op) == CONST |
5541 | || symbolic_operand (op, VOIDmode) | |
1d86aeab | 5542 | || LABEL_P (op)) |
5543 | { | |
5544 | switch (GET_MODE (op)) | |
5545 | { | |
916ace94 | 5546 | case E_QImode: |
1d86aeab | 5547 | return OP_TYPE_IMM_Q; |
5548 | ||
916ace94 | 5549 | case E_HImode: |
1d86aeab | 5550 | return OP_TYPE_IMM_W; |
5551 | ||
916ace94 | 5552 | case E_SImode: |
1d86aeab | 5553 | return OP_TYPE_IMM_L; |
5554 | ||
5555 | default: | |
869bde6b | 5556 | if (symbolic_operand (m68k_unwrap_symbol (op, false), VOIDmode)) |
5557 | /* Just a guess. */ | |
1d86aeab | 5558 | return OP_TYPE_IMM_W; |
5559 | ||
5560 | return OP_TYPE_IMM_L; | |
5561 | } | |
5562 | } | |
5563 | ||
3c904dda | 5564 | gcc_assert (!reload_completed); |
1d86aeab | 5565 | |
3c904dda | 5566 | if (FLOAT_MODE_P (GET_MODE (op))) |
5567 | return OP_TYPE_FPN; | |
1d86aeab | 5568 | |
3c904dda | 5569 | return OP_TYPE_RN; |
1d86aeab | 5570 | } |
5571 | ||
5572 | /* Implement opx_type attribute. | |
5573 | Return type of INSN's operand X. | |
5574 | If ADDRESS_P is true, return type of memory location operand refers to. */ | |
5575 | enum attr_opx_type | |
ed3e6e5d | 5576 | m68k_sched_attr_opx_type (rtx_insn *insn, int address_p) |
1d86aeab | 5577 | { |
1d86aeab | 5578 | switch (sched_attr_op_type (insn, true, address_p != 0)) |
5579 | { | |
3c904dda | 5580 | case OP_TYPE_RN: |
5581 | return OPX_TYPE_RN; | |
5582 | ||
5583 | case OP_TYPE_FPN: | |
5584 | return OPX_TYPE_FPN; | |
1d86aeab | 5585 | |
5586 | case OP_TYPE_MEM1: | |
5587 | return OPX_TYPE_MEM1; | |
5588 | ||
5589 | case OP_TYPE_MEM234: | |
5590 | return OPX_TYPE_MEM234; | |
5591 | ||
5592 | case OP_TYPE_MEM5: | |
5593 | return OPX_TYPE_MEM5; | |
5594 | ||
5595 | case OP_TYPE_MEM6: | |
5596 | return OPX_TYPE_MEM6; | |
5597 | ||
5598 | case OP_TYPE_MEM7: | |
5599 | return OPX_TYPE_MEM7; | |
5600 | ||
5601 | case OP_TYPE_IMM_Q: | |
5602 | return OPX_TYPE_IMM_Q; | |
5603 | ||
5604 | case OP_TYPE_IMM_W: | |
5605 | return OPX_TYPE_IMM_W; | |
5606 | ||
5607 | case OP_TYPE_IMM_L: | |
5608 | return OPX_TYPE_IMM_L; | |
5609 | ||
5610 | default: | |
5611 | gcc_unreachable (); | |
1d86aeab | 5612 | } |
5613 | } | |
5614 | ||
5615 | /* Implement opy_type attribute. | |
5616 | Return type of INSN's operand Y. | |
5617 | If ADDRESS_P is true, return type of memory location operand refers to. */ | |
5618 | enum attr_opy_type | |
ed3e6e5d | 5619 | m68k_sched_attr_opy_type (rtx_insn *insn, int address_p) |
1d86aeab | 5620 | { |
1d86aeab | 5621 | switch (sched_attr_op_type (insn, false, address_p != 0)) |
5622 | { | |
3c904dda | 5623 | case OP_TYPE_RN: |
5624 | return OPY_TYPE_RN; | |
5625 | ||
5626 | case OP_TYPE_FPN: | |
5627 | return OPY_TYPE_FPN; | |
1d86aeab | 5628 | |
5629 | case OP_TYPE_MEM1: | |
5630 | return OPY_TYPE_MEM1; | |
5631 | ||
5632 | case OP_TYPE_MEM234: | |
5633 | return OPY_TYPE_MEM234; | |
5634 | ||
5635 | case OP_TYPE_MEM5: | |
5636 | return OPY_TYPE_MEM5; | |
5637 | ||
5638 | case OP_TYPE_MEM6: | |
5639 | return OPY_TYPE_MEM6; | |
5640 | ||
5641 | case OP_TYPE_MEM7: | |
5642 | return OPY_TYPE_MEM7; | |
5643 | ||
5644 | case OP_TYPE_IMM_Q: | |
5645 | return OPY_TYPE_IMM_Q; | |
5646 | ||
5647 | case OP_TYPE_IMM_W: | |
5648 | return OPY_TYPE_IMM_W; | |
5649 | ||
5650 | case OP_TYPE_IMM_L: | |
5651 | return OPY_TYPE_IMM_L; | |
5652 | ||
5653 | default: | |
5654 | gcc_unreachable (); | |
1d86aeab | 5655 | } |
5656 | } | |
5657 | ||
3c904dda | 5658 | /* Return size of INSN as int. */ |
5659 | static int | |
d3ffa7b4 | 5660 | sched_get_attr_size_int (rtx_insn *insn) |
1d86aeab | 5661 | { |
5662 | int size; | |
5663 | ||
3c904dda | 5664 | switch (get_attr_type (insn)) |
1d86aeab | 5665 | { |
3c904dda | 5666 | case TYPE_IGNORE: |
5667 | /* There should be no references to m68k_sched_attr_size for 'ignore' | |
5668 | instructions. */ | |
5669 | gcc_unreachable (); | |
5670 | return 0; | |
5671 | ||
5672 | case TYPE_MUL_L: | |
1d86aeab | 5673 | size = 2; |
5674 | break; | |
5675 | ||
5676 | default: | |
5677 | size = 1; | |
5678 | break; | |
5679 | } | |
5680 | ||
5681 | switch (get_attr_opx_type (insn)) | |
5682 | { | |
5683 | case OPX_TYPE_NONE: | |
3c904dda | 5684 | case OPX_TYPE_RN: |
5685 | case OPX_TYPE_FPN: | |
1d86aeab | 5686 | case OPX_TYPE_MEM1: |
5687 | case OPX_TYPE_MEM234: | |
5688 | case OPY_TYPE_IMM_Q: | |
5689 | break; | |
5690 | ||
5691 | case OPX_TYPE_MEM5: | |
5692 | case OPX_TYPE_MEM6: | |
5693 | /* Here we assume that most absolute references are short. */ | |
5694 | case OPX_TYPE_MEM7: | |
5695 | case OPY_TYPE_IMM_W: | |
5696 | ++size; | |
5697 | break; | |
5698 | ||
5699 | case OPY_TYPE_IMM_L: | |
5700 | size += 2; | |
5701 | break; | |
5702 | ||
5703 | default: | |
5704 | gcc_unreachable (); | |
5705 | } | |
5706 | ||
5707 | switch (get_attr_opy_type (insn)) | |
5708 | { | |
5709 | case OPY_TYPE_NONE: | |
3c904dda | 5710 | case OPY_TYPE_RN: |
5711 | case OPY_TYPE_FPN: | |
1d86aeab | 5712 | case OPY_TYPE_MEM1: |
5713 | case OPY_TYPE_MEM234: | |
5714 | case OPY_TYPE_IMM_Q: | |
5715 | break; | |
5716 | ||
5717 | case OPY_TYPE_MEM5: | |
5718 | case OPY_TYPE_MEM6: | |
5719 | /* Here we assume that most absolute references are short. */ | |
5720 | case OPY_TYPE_MEM7: | |
5721 | case OPY_TYPE_IMM_W: | |
5722 | ++size; | |
5723 | break; | |
5724 | ||
5725 | case OPY_TYPE_IMM_L: | |
5726 | size += 2; | |
5727 | break; | |
5728 | ||
5729 | default: | |
5730 | gcc_unreachable (); | |
5731 | } | |
5732 | ||
5733 | if (size > 3) | |
5734 | { | |
3c904dda | 5735 | gcc_assert (!reload_completed); |
1d86aeab | 5736 | |
5737 | size = 3; | |
5738 | } | |
5739 | ||
5740 | return size; | |
5741 | } | |
5742 | ||
3c904dda | 5743 | /* Return size of INSN as attribute enum value. */ |
5744 | enum attr_size | |
d3ffa7b4 | 5745 | m68k_sched_attr_size (rtx_insn *insn) |
3c904dda | 5746 | { |
5747 | switch (sched_get_attr_size_int (insn)) | |
5748 | { | |
5749 | case 1: | |
5750 | return SIZE_1; | |
5751 | ||
5752 | case 2: | |
5753 | return SIZE_2; | |
5754 | ||
5755 | case 3: | |
5756 | return SIZE_3; | |
5757 | ||
5758 | default: | |
5759 | gcc_unreachable (); | |
3c904dda | 5760 | } |
5761 | } | |
5762 | ||
5763 | /* Return operand X or Y (depending on OPX_P) of INSN, | |
5764 | if it is a MEM, or NULL overwise. */ | |
5765 | static enum attr_op_type | |
d3ffa7b4 | 5766 | sched_get_opxy_mem_type (rtx_insn *insn, bool opx_p) |
3c904dda | 5767 | { |
5768 | if (opx_p) | |
5769 | { | |
5770 | switch (get_attr_opx_type (insn)) | |
5771 | { | |
5772 | case OPX_TYPE_NONE: | |
5773 | case OPX_TYPE_RN: | |
5774 | case OPX_TYPE_FPN: | |
5775 | case OPX_TYPE_IMM_Q: | |
5776 | case OPX_TYPE_IMM_W: | |
5777 | case OPX_TYPE_IMM_L: | |
5778 | return OP_TYPE_RN; | |
5779 | ||
5780 | case OPX_TYPE_MEM1: | |
5781 | case OPX_TYPE_MEM234: | |
5782 | case OPX_TYPE_MEM5: | |
5783 | case OPX_TYPE_MEM7: | |
5784 | return OP_TYPE_MEM1; | |
5785 | ||
5786 | case OPX_TYPE_MEM6: | |
5787 | return OP_TYPE_MEM6; | |
5788 | ||
5789 | default: | |
5790 | gcc_unreachable (); | |
3c904dda | 5791 | } |
5792 | } | |
5793 | else | |
5794 | { | |
5795 | switch (get_attr_opy_type (insn)) | |
5796 | { | |
5797 | case OPY_TYPE_NONE: | |
5798 | case OPY_TYPE_RN: | |
5799 | case OPY_TYPE_FPN: | |
5800 | case OPY_TYPE_IMM_Q: | |
5801 | case OPY_TYPE_IMM_W: | |
5802 | case OPY_TYPE_IMM_L: | |
5803 | return OP_TYPE_RN; | |
5804 | ||
5805 | case OPY_TYPE_MEM1: | |
5806 | case OPY_TYPE_MEM234: | |
5807 | case OPY_TYPE_MEM5: | |
5808 | case OPY_TYPE_MEM7: | |
5809 | return OP_TYPE_MEM1; | |
5810 | ||
5811 | case OPY_TYPE_MEM6: | |
5812 | return OP_TYPE_MEM6; | |
5813 | ||
5814 | default: | |
5815 | gcc_unreachable (); | |
3c904dda | 5816 | } |
5817 | } | |
5818 | } | |
5819 | ||
1d86aeab | 5820 | /* Implement op_mem attribute. */ |
5821 | enum attr_op_mem | |
d3ffa7b4 | 5822 | m68k_sched_attr_op_mem (rtx_insn *insn) |
1d86aeab | 5823 | { |
3c904dda | 5824 | enum attr_op_type opx; |
5825 | enum attr_op_type opy; | |
1d86aeab | 5826 | |
3c904dda | 5827 | opx = sched_get_opxy_mem_type (insn, true); |
5828 | opy = sched_get_opxy_mem_type (insn, false); | |
1d86aeab | 5829 | |
3c904dda | 5830 | if (opy == OP_TYPE_RN && opx == OP_TYPE_RN) |
1d86aeab | 5831 | return OP_MEM_00; |
5832 | ||
3c904dda | 5833 | if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM1) |
1d86aeab | 5834 | { |
5835 | switch (get_attr_opx_access (insn)) | |
5836 | { | |
5837 | case OPX_ACCESS_R: | |
5838 | return OP_MEM_10; | |
5839 | ||
5840 | case OPX_ACCESS_W: | |
5841 | return OP_MEM_01; | |
5842 | ||
5843 | case OPX_ACCESS_RW: | |
5844 | return OP_MEM_11; | |
5845 | ||
5846 | default: | |
3c904dda | 5847 | gcc_unreachable (); |
1d86aeab | 5848 | } |
5849 | } | |
5850 | ||
3c904dda | 5851 | if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM6) |
1d86aeab | 5852 | { |
5853 | switch (get_attr_opx_access (insn)) | |
5854 | { | |
5855 | case OPX_ACCESS_R: | |
5856 | return OP_MEM_I0; | |
5857 | ||
5858 | case OPX_ACCESS_W: | |
5859 | return OP_MEM_0I; | |
5860 | ||
5861 | case OPX_ACCESS_RW: | |
5862 | return OP_MEM_I1; | |
5863 | ||
5864 | default: | |
3c904dda | 5865 | gcc_unreachable (); |
1d86aeab | 5866 | } |
5867 | } | |
5868 | ||
3c904dda | 5869 | if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_RN) |
1d86aeab | 5870 | return OP_MEM_10; |
5871 | ||
3c904dda | 5872 | if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM1) |
1d86aeab | 5873 | { |
5874 | switch (get_attr_opx_access (insn)) | |
5875 | { | |
5876 | case OPX_ACCESS_W: | |
5877 | return OP_MEM_11; | |
5878 | ||
5879 | default: | |
3c904dda | 5880 | gcc_assert (!reload_completed); |
5881 | return OP_MEM_11; | |
1d86aeab | 5882 | } |
5883 | } | |
5884 | ||
3c904dda | 5885 | if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM6) |
1d86aeab | 5886 | { |
5887 | switch (get_attr_opx_access (insn)) | |
5888 | { | |
5889 | case OPX_ACCESS_W: | |
5890 | return OP_MEM_1I; | |
5891 | ||
5892 | default: | |
3c904dda | 5893 | gcc_assert (!reload_completed); |
5894 | return OP_MEM_1I; | |
1d86aeab | 5895 | } |
5896 | } | |
5897 | ||
3c904dda | 5898 | if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_RN) |
1d86aeab | 5899 | return OP_MEM_I0; |
5900 | ||
3c904dda | 5901 | if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM1) |
1d86aeab | 5902 | { |
5903 | switch (get_attr_opx_access (insn)) | |
5904 | { | |
5905 | case OPX_ACCESS_W: | |
5906 | return OP_MEM_I1; | |
5907 | ||
5908 | default: | |
3c904dda | 5909 | gcc_assert (!reload_completed); |
5910 | return OP_MEM_I1; | |
1d86aeab | 5911 | } |
5912 | } | |
5913 | ||
3c904dda | 5914 | gcc_assert (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM6); |
5915 | gcc_assert (!reload_completed); | |
5916 | return OP_MEM_I1; | |
1d86aeab | 5917 | } |
5918 | ||
3c904dda | 5919 | /* Data for ColdFire V4 index bypass. |
5920 | Producer modifies register that is used as index in consumer with | |
5921 | specified scale. */ | |
5922 | static struct | |
15b9ea10 | 5923 | { |
3c904dda | 5924 | /* Producer instruction. */ |
5925 | rtx pro; | |
2ccdedfd | 5926 | |
3c904dda | 5927 | /* Consumer instruction. */ |
5928 | rtx con; | |
15b9ea10 | 5929 | |
3c904dda | 5930 | /* Scale of indexed memory access within consumer. |
5931 | Or zero if bypass should not be effective at the moment. */ | |
5932 | int scale; | |
5933 | } sched_cfv4_bypass_data; | |
15b9ea10 | 5934 | |
5935 | /* An empty state that is used in m68k_sched_adjust_cost. */ | |
5936 | static state_t sched_adjust_cost_state; | |
5937 | ||
5938 | /* Implement adjust_cost scheduler hook. | |
5939 | Return adjusted COST of dependency LINK between DEF_INSN and INSN. */ | |
5940 | static int | |
99f52c2b | 5941 | m68k_sched_adjust_cost (rtx_insn *insn, int, rtx_insn *def_insn, int cost, |
5942 | unsigned int) | |
15b9ea10 | 5943 | { |
5944 | int delay; | |
5945 | ||
5946 | if (recog_memoized (def_insn) < 0 | |
5947 | || recog_memoized (insn) < 0) | |
5948 | return cost; | |
5949 | ||
3c904dda | 5950 | if (sched_cfv4_bypass_data.scale == 1) |
5951 | /* Handle ColdFire V4 bypass for indexed address with 1x scale. */ | |
5952 | { | |
5953 | /* haifa-sched.c: insn_cost () calls bypass_p () just before | |
5954 | targetm.sched.adjust_cost (). Hence, we can be relatively sure | |
5955 | that the data in sched_cfv4_bypass_data is up to date. */ | |
5956 | gcc_assert (sched_cfv4_bypass_data.pro == def_insn | |
5957 | && sched_cfv4_bypass_data.con == insn); | |
5958 | ||
5959 | if (cost < 3) | |
5960 | cost = 3; | |
5961 | ||
5962 | sched_cfv4_bypass_data.pro = NULL; | |
5963 | sched_cfv4_bypass_data.con = NULL; | |
5964 | sched_cfv4_bypass_data.scale = 0; | |
5965 | } | |
5966 | else | |
5967 | gcc_assert (sched_cfv4_bypass_data.pro == NULL | |
5968 | && sched_cfv4_bypass_data.con == NULL | |
5969 | && sched_cfv4_bypass_data.scale == 0); | |
5970 | ||
15b9ea10 | 5971 | /* Don't try to issue INSN earlier than DFA permits. |
5972 | This is especially useful for instructions that write to memory, | |
5973 | as their true dependence (default) latency is better to be set to 0 | |
5974 | to workaround alias analysis limitations. | |
5975 | This is, in fact, a machine independent tweak, so, probably, | |
5976 | it should be moved to haifa-sched.c: insn_cost (). */ | |
15b9ea10 | 5977 | delay = min_insn_conflict_delay (sched_adjust_cost_state, def_insn, insn); |
5978 | if (delay > cost) | |
5979 | cost = delay; | |
5980 | ||
5981 | return cost; | |
5982 | } | |
5983 | ||
3c904dda | 5984 | /* Return maximal number of insns that can be scheduled on a single cycle. */ |
5985 | static int | |
5986 | m68k_sched_issue_rate (void) | |
5987 | { | |
5988 | switch (m68k_sched_cpu) | |
5989 | { | |
5990 | case CPU_CFV1: | |
5991 | case CPU_CFV2: | |
5992 | case CPU_CFV3: | |
5993 | return 1; | |
5994 | ||
5995 | case CPU_CFV4: | |
5996 | return 2; | |
5997 | ||
5998 | default: | |
5999 | gcc_unreachable (); | |
6000 | return 0; | |
6001 | } | |
6002 | } | |
6003 | ||
2ccdedfd | 6004 | /* Maximal length of instruction for current CPU. |
6005 | E.g. it is 3 for any ColdFire core. */ | |
6006 | static int max_insn_size; | |
6007 | ||
6008 | /* Data to model instruction buffer of CPU. */ | |
6009 | struct _sched_ib | |
6010 | { | |
3c904dda | 6011 | /* True if instruction buffer model is modeled for current CPU. */ |
6012 | bool enabled_p; | |
6013 | ||
2ccdedfd | 6014 | /* Size of the instruction buffer in words. */ |
6015 | int size; | |
6016 | ||
6017 | /* Number of filled words in the instruction buffer. */ | |
6018 | int filled; | |
6019 | ||
6020 | /* Additional information about instruction buffer for CPUs that have | |
6021 | a buffer of instruction records, rather then a plain buffer | |
6022 | of instruction words. */ | |
6023 | struct _sched_ib_records | |
6024 | { | |
6025 | /* Size of buffer in records. */ | |
6026 | int n_insns; | |
15b9ea10 | 6027 | |
2fbe7a32 | 6028 | /* Array to hold data on adjustments made to the size of the buffer. */ |
2ccdedfd | 6029 | int *adjust; |
15b9ea10 | 6030 | |
2ccdedfd | 6031 | /* Index of the above array. */ |
6032 | int adjust_index; | |
6033 | } records; | |
6034 | ||
6035 | /* An insn that reserves (marks empty) one word in the instruction buffer. */ | |
6036 | rtx insn; | |
6037 | }; | |
6038 | ||
6039 | static struct _sched_ib sched_ib; | |
15b9ea10 | 6040 | |
6041 | /* ID of memory unit. */ | |
6042 | static int sched_mem_unit_code; | |
6043 | ||
6044 | /* Implementation of the targetm.sched.variable_issue () hook. | |
6045 | It is called after INSN was issued. It returns the number of insns | |
6046 | that can possibly get scheduled on the current cycle. | |
6047 | It is used here to determine the effect of INSN on the instruction | |
6048 | buffer. */ | |
6049 | static int | |
6050 | m68k_sched_variable_issue (FILE *sched_dump ATTRIBUTE_UNUSED, | |
6051 | int sched_verbose ATTRIBUTE_UNUSED, | |
18282db0 | 6052 | rtx_insn *insn, int can_issue_more) |
15b9ea10 | 6053 | { |
6054 | int insn_size; | |
6055 | ||
3c904dda | 6056 | if (recog_memoized (insn) >= 0 && get_attr_type (insn) != TYPE_IGNORE) |
15b9ea10 | 6057 | { |
2ccdedfd | 6058 | switch (m68k_sched_cpu) |
6059 | { | |
6060 | case CPU_CFV1: | |
6061 | case CPU_CFV2: | |
3c904dda | 6062 | insn_size = sched_get_attr_size_int (insn); |
2ccdedfd | 6063 | break; |
6064 | ||
6065 | case CPU_CFV3: | |
3c904dda | 6066 | insn_size = sched_get_attr_size_int (insn); |
2ccdedfd | 6067 | |
6068 | /* ColdFire V3 and V4 cores have instruction buffers that can | |
6069 | accumulate up to 8 instructions regardless of instructions' | |
6070 | sizes. So we should take care not to "prefetch" 24 one-word | |
6071 | or 12 two-words instructions. | |
6072 | To model this behavior we temporarily decrease size of the | |
6073 | buffer by (max_insn_size - insn_size) for next 7 instructions. */ | |
6074 | { | |
6075 | int adjust; | |
6076 | ||
6077 | adjust = max_insn_size - insn_size; | |
6078 | sched_ib.size -= adjust; | |
6079 | ||
6080 | if (sched_ib.filled > sched_ib.size) | |
6081 | sched_ib.filled = sched_ib.size; | |
6082 | ||
6083 | sched_ib.records.adjust[sched_ib.records.adjust_index] = adjust; | |
6084 | } | |
6085 | ||
6086 | ++sched_ib.records.adjust_index; | |
6087 | if (sched_ib.records.adjust_index == sched_ib.records.n_insns) | |
6088 | sched_ib.records.adjust_index = 0; | |
6089 | ||
2fbe7a32 | 6090 | /* Undo adjustment we did 7 instructions ago. */ |
2ccdedfd | 6091 | sched_ib.size |
6092 | += sched_ib.records.adjust[sched_ib.records.adjust_index]; | |
6093 | ||
6094 | break; | |
15b9ea10 | 6095 | |
3c904dda | 6096 | case CPU_CFV4: |
6097 | gcc_assert (!sched_ib.enabled_p); | |
6098 | insn_size = 0; | |
6099 | break; | |
6100 | ||
2ccdedfd | 6101 | default: |
6102 | gcc_unreachable (); | |
6103 | } | |
15b9ea10 | 6104 | |
3dd7d6a7 | 6105 | if (insn_size > sched_ib.filled) |
6106 | /* Scheduling for register pressure does not always take DFA into | |
6107 | account. Workaround instruction buffer not being filled enough. */ | |
6108 | { | |
11189c7a | 6109 | gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED); |
3dd7d6a7 | 6110 | insn_size = sched_ib.filled; |
6111 | } | |
6112 | ||
15b9ea10 | 6113 | --can_issue_more; |
6114 | } | |
6115 | else if (GET_CODE (PATTERN (insn)) == ASM_INPUT | |
6116 | || asm_noperands (PATTERN (insn)) >= 0) | |
2ccdedfd | 6117 | insn_size = sched_ib.filled; |
15b9ea10 | 6118 | else |
6119 | insn_size = 0; | |
6120 | ||
2ccdedfd | 6121 | sched_ib.filled -= insn_size; |
15b9ea10 | 6122 | |
6123 | return can_issue_more; | |
6124 | } | |
6125 | ||
3c904dda | 6126 | /* Return how many instructions should scheduler lookahead to choose the |
6127 | best one. */ | |
6128 | static int | |
6129 | m68k_sched_first_cycle_multipass_dfa_lookahead (void) | |
15b9ea10 | 6130 | { |
3c904dda | 6131 | return m68k_sched_issue_rate () - 1; |
15b9ea10 | 6132 | } |
6133 | ||
c8e5dcf5 | 6134 | /* Implementation of targetm.sched.init_global () hook. |
15b9ea10 | 6135 | It is invoked once per scheduling pass and is used here |
6136 | to initialize scheduler constants. */ | |
6137 | static void | |
6138 | m68k_sched_md_init_global (FILE *sched_dump ATTRIBUTE_UNUSED, | |
6139 | int sched_verbose ATTRIBUTE_UNUSED, | |
6140 | int n_insns ATTRIBUTE_UNUSED) | |
6141 | { | |
3c904dda | 6142 | /* Check that all instructions have DFA reservations and |
6143 | that all instructions can be issued from a clean state. */ | |
1d674b45 | 6144 | if (flag_checking) |
6145 | { | |
6146 | rtx_insn *insn; | |
6147 | state_t state; | |
15b9ea10 | 6148 | |
1d674b45 | 6149 | state = alloca (state_size ()); |
15b9ea10 | 6150 | |
1d674b45 | 6151 | for (insn = get_insns (); insn != NULL; insn = NEXT_INSN (insn)) |
6152 | { | |
6153 | if (INSN_P (insn) && recog_memoized (insn) >= 0) | |
6154 | { | |
6155 | gcc_assert (insn_has_dfa_reservation_p (insn)); | |
15b9ea10 | 6156 | |
1d674b45 | 6157 | state_reset (state); |
6158 | if (state_transition (state, insn) >= 0) | |
6159 | gcc_unreachable (); | |
6160 | } | |
6161 | } | |
6162 | } | |
15b9ea10 | 6163 | |
6164 | /* Setup target cpu. */ | |
3c904dda | 6165 | |
6166 | /* ColdFire V4 has a set of features to keep its instruction buffer full | |
6167 | (e.g., a separate memory bus for instructions) and, hence, we do not model | |
6168 | buffer for this CPU. */ | |
6169 | sched_ib.enabled_p = (m68k_sched_cpu != CPU_CFV4); | |
6170 | ||
15b9ea10 | 6171 | switch (m68k_sched_cpu) |
6172 | { | |
3c904dda | 6173 | case CPU_CFV4: |
6174 | sched_ib.filled = 0; | |
6175 | ||
6176 | /* FALLTHRU */ | |
6177 | ||
2ccdedfd | 6178 | case CPU_CFV1: |
6179 | case CPU_CFV2: | |
6180 | max_insn_size = 3; | |
6181 | sched_ib.records.n_insns = 0; | |
6182 | sched_ib.records.adjust = NULL; | |
6183 | break; | |
6184 | ||
6185 | case CPU_CFV3: | |
6186 | max_insn_size = 3; | |
6187 | sched_ib.records.n_insns = 8; | |
225ab426 | 6188 | sched_ib.records.adjust = XNEWVEC (int, sched_ib.records.n_insns); |
15b9ea10 | 6189 | break; |
6190 | ||
6191 | default: | |
6192 | gcc_unreachable (); | |
6193 | } | |
6194 | ||
2ccdedfd | 6195 | sched_mem_unit_code = get_cpu_unit_code ("cf_mem1"); |
6196 | ||
15b9ea10 | 6197 | sched_adjust_cost_state = xmalloc (state_size ()); |
6198 | state_reset (sched_adjust_cost_state); | |
6199 | ||
6200 | start_sequence (); | |
6201 | emit_insn (gen_ib ()); | |
2ccdedfd | 6202 | sched_ib.insn = get_insns (); |
15b9ea10 | 6203 | end_sequence (); |
6204 | } | |
6205 | ||
6206 | /* Scheduling pass is now finished. Free/reset static variables. */ | |
6207 | static void | |
6208 | m68k_sched_md_finish_global (FILE *dump ATTRIBUTE_UNUSED, | |
6209 | int verbose ATTRIBUTE_UNUSED) | |
6210 | { | |
2ccdedfd | 6211 | sched_ib.insn = NULL; |
15b9ea10 | 6212 | |
6213 | free (sched_adjust_cost_state); | |
6214 | sched_adjust_cost_state = NULL; | |
6215 | ||
6216 | sched_mem_unit_code = 0; | |
2ccdedfd | 6217 | |
6218 | free (sched_ib.records.adjust); | |
6219 | sched_ib.records.adjust = NULL; | |
6220 | sched_ib.records.n_insns = 0; | |
6221 | max_insn_size = 0; | |
15b9ea10 | 6222 | } |
6223 | ||
c8e5dcf5 | 6224 | /* Implementation of targetm.sched.init () hook. |
15b9ea10 | 6225 | It is invoked each time scheduler starts on the new block (basic block or |
6226 | extended basic block). */ | |
6227 | static void | |
6228 | m68k_sched_md_init (FILE *sched_dump ATTRIBUTE_UNUSED, | |
6229 | int sched_verbose ATTRIBUTE_UNUSED, | |
6230 | int n_insns ATTRIBUTE_UNUSED) | |
6231 | { | |
2ccdedfd | 6232 | switch (m68k_sched_cpu) |
6233 | { | |
6234 | case CPU_CFV1: | |
6235 | case CPU_CFV2: | |
6236 | sched_ib.size = 6; | |
6237 | break; | |
6238 | ||
6239 | case CPU_CFV3: | |
6240 | sched_ib.size = sched_ib.records.n_insns * max_insn_size; | |
6241 | ||
6242 | memset (sched_ib.records.adjust, 0, | |
6243 | sched_ib.records.n_insns * sizeof (*sched_ib.records.adjust)); | |
6244 | sched_ib.records.adjust_index = 0; | |
6245 | break; | |
6246 | ||
3c904dda | 6247 | case CPU_CFV4: |
6248 | gcc_assert (!sched_ib.enabled_p); | |
6249 | sched_ib.size = 0; | |
6250 | break; | |
6251 | ||
2ccdedfd | 6252 | default: |
6253 | gcc_unreachable (); | |
6254 | } | |
6255 | ||
3c904dda | 6256 | if (sched_ib.enabled_p) |
6257 | /* haifa-sched.c: schedule_block () calls advance_cycle () just before | |
6258 | the first cycle. Workaround that. */ | |
6259 | sched_ib.filled = -2; | |
15b9ea10 | 6260 | } |
6261 | ||
6262 | /* Implementation of targetm.sched.dfa_pre_advance_cycle () hook. | |
6263 | It is invoked just before current cycle finishes and is used here | |
6264 | to track if instruction buffer got its two words this cycle. */ | |
6265 | static void | |
6266 | m68k_sched_dfa_pre_advance_cycle (void) | |
6267 | { | |
3c904dda | 6268 | if (!sched_ib.enabled_p) |
6269 | return; | |
6270 | ||
15b9ea10 | 6271 | if (!cpu_unit_reservation_p (curr_state, sched_mem_unit_code)) |
6272 | { | |
2ccdedfd | 6273 | sched_ib.filled += 2; |
15b9ea10 | 6274 | |
2ccdedfd | 6275 | if (sched_ib.filled > sched_ib.size) |
6276 | sched_ib.filled = sched_ib.size; | |
15b9ea10 | 6277 | } |
6278 | } | |
6279 | ||
6280 | /* Implementation of targetm.sched.dfa_post_advance_cycle () hook. | |
6281 | It is invoked just after new cycle begins and is used here | |
6282 | to setup number of filled words in the instruction buffer so that | |
6283 | instructions which won't have all their words prefetched would be | |
6284 | stalled for a cycle. */ | |
6285 | static void | |
6286 | m68k_sched_dfa_post_advance_cycle (void) | |
6287 | { | |
6288 | int i; | |
15b9ea10 | 6289 | |
3c904dda | 6290 | if (!sched_ib.enabled_p) |
6291 | return; | |
6292 | ||
15b9ea10 | 6293 | /* Setup number of prefetched instruction words in the instruction |
6294 | buffer. */ | |
2ccdedfd | 6295 | i = max_insn_size - sched_ib.filled; |
6296 | ||
6297 | while (--i >= 0) | |
15b9ea10 | 6298 | { |
2ccdedfd | 6299 | if (state_transition (curr_state, sched_ib.insn) >= 0) |
57248c05 | 6300 | /* Pick up scheduler state. */ |
6301 | ++sched_ib.filled; | |
15b9ea10 | 6302 | } |
6303 | } | |
3c904dda | 6304 | |
6305 | /* Return X or Y (depending on OPX_P) operand of INSN, | |
6306 | if it is an integer register, or NULL overwise. */ | |
6307 | static rtx | |
ed3e6e5d | 6308 | sched_get_reg_operand (rtx_insn *insn, bool opx_p) |
3c904dda | 6309 | { |
6310 | rtx op = NULL; | |
6311 | ||
6312 | if (opx_p) | |
6313 | { | |
6314 | if (get_attr_opx_type (insn) == OPX_TYPE_RN) | |
6315 | { | |
6316 | op = sched_get_operand (insn, true); | |
6317 | gcc_assert (op != NULL); | |
6318 | ||
6319 | if (!reload_completed && !REG_P (op)) | |
6320 | return NULL; | |
6321 | } | |
6322 | } | |
6323 | else | |
6324 | { | |
6325 | if (get_attr_opy_type (insn) == OPY_TYPE_RN) | |
6326 | { | |
6327 | op = sched_get_operand (insn, false); | |
6328 | gcc_assert (op != NULL); | |
6329 | ||
6330 | if (!reload_completed && !REG_P (op)) | |
6331 | return NULL; | |
6332 | } | |
6333 | } | |
6334 | ||
6335 | return op; | |
6336 | } | |
6337 | ||
6338 | /* Return true, if X or Y (depending on OPX_P) operand of INSN | |
6339 | is a MEM. */ | |
6340 | static bool | |
d3ffa7b4 | 6341 | sched_mem_operand_p (rtx_insn *insn, bool opx_p) |
3c904dda | 6342 | { |
6343 | switch (sched_get_opxy_mem_type (insn, opx_p)) | |
6344 | { | |
6345 | case OP_TYPE_MEM1: | |
6346 | case OP_TYPE_MEM6: | |
6347 | return true; | |
6348 | ||
6349 | default: | |
6350 | return false; | |
6351 | } | |
6352 | } | |
6353 | ||
6354 | /* Return X or Y (depending on OPX_P) operand of INSN, | |
6355 | if it is a MEM, or NULL overwise. */ | |
6356 | static rtx | |
ed3e6e5d | 6357 | sched_get_mem_operand (rtx_insn *insn, bool must_read_p, bool must_write_p) |
3c904dda | 6358 | { |
6359 | bool opx_p; | |
6360 | bool opy_p; | |
6361 | ||
6362 | opx_p = false; | |
6363 | opy_p = false; | |
6364 | ||
6365 | if (must_read_p) | |
6366 | { | |
6367 | opx_p = true; | |
6368 | opy_p = true; | |
6369 | } | |
6370 | ||
6371 | if (must_write_p) | |
6372 | { | |
6373 | opx_p = true; | |
6374 | opy_p = false; | |
6375 | } | |
6376 | ||
6377 | if (opy_p && sched_mem_operand_p (insn, false)) | |
6378 | return sched_get_operand (insn, false); | |
6379 | ||
6380 | if (opx_p && sched_mem_operand_p (insn, true)) | |
6381 | return sched_get_operand (insn, true); | |
6382 | ||
6383 | gcc_unreachable (); | |
6384 | return NULL; | |
6385 | } | |
6386 | ||
6387 | /* Return non-zero if PRO modifies register used as part of | |
6388 | address in CON. */ | |
6389 | int | |
ed3e6e5d | 6390 | m68k_sched_address_bypass_p (rtx_insn *pro, rtx_insn *con) |
3c904dda | 6391 | { |
6392 | rtx pro_x; | |
6393 | rtx con_mem_read; | |
6394 | ||
6395 | pro_x = sched_get_reg_operand (pro, true); | |
6396 | if (pro_x == NULL) | |
6397 | return 0; | |
6398 | ||
6399 | con_mem_read = sched_get_mem_operand (con, true, false); | |
6400 | gcc_assert (con_mem_read != NULL); | |
6401 | ||
6402 | if (reg_mentioned_p (pro_x, con_mem_read)) | |
6403 | return 1; | |
6404 | ||
6405 | return 0; | |
6406 | } | |
6407 | ||
6408 | /* Helper function for m68k_sched_indexed_address_bypass_p. | |
6409 | if PRO modifies register used as index in CON, | |
6410 | return scale of indexed memory access in CON. Return zero overwise. */ | |
6411 | static int | |
ed3e6e5d | 6412 | sched_get_indexed_address_scale (rtx_insn *pro, rtx_insn *con) |
3c904dda | 6413 | { |
6414 | rtx reg; | |
6415 | rtx mem; | |
6416 | struct m68k_address address; | |
6417 | ||
6418 | reg = sched_get_reg_operand (pro, true); | |
6419 | if (reg == NULL) | |
6420 | return 0; | |
6421 | ||
6422 | mem = sched_get_mem_operand (con, true, false); | |
6423 | gcc_assert (mem != NULL && MEM_P (mem)); | |
6424 | ||
6425 | if (!m68k_decompose_address (GET_MODE (mem), XEXP (mem, 0), reload_completed, | |
6426 | &address)) | |
6427 | gcc_unreachable (); | |
6428 | ||
6429 | if (REGNO (reg) == REGNO (address.index)) | |
6430 | { | |
6431 | gcc_assert (address.scale != 0); | |
6432 | return address.scale; | |
6433 | } | |
6434 | ||
6435 | return 0; | |
6436 | } | |
6437 | ||
6438 | /* Return non-zero if PRO modifies register used | |
6439 | as index with scale 2 or 4 in CON. */ | |
6440 | int | |
ed3e6e5d | 6441 | m68k_sched_indexed_address_bypass_p (rtx_insn *pro, rtx_insn *con) |
3c904dda | 6442 | { |
6443 | gcc_assert (sched_cfv4_bypass_data.pro == NULL | |
6444 | && sched_cfv4_bypass_data.con == NULL | |
6445 | && sched_cfv4_bypass_data.scale == 0); | |
6446 | ||
6447 | switch (sched_get_indexed_address_scale (pro, con)) | |
6448 | { | |
6449 | case 1: | |
6450 | /* We can't have a variable latency bypass, so | |
6451 | remember to adjust the insn cost in adjust_cost hook. */ | |
6452 | sched_cfv4_bypass_data.pro = pro; | |
6453 | sched_cfv4_bypass_data.con = con; | |
6454 | sched_cfv4_bypass_data.scale = 1; | |
6455 | return 0; | |
6456 | ||
6457 | case 2: | |
6458 | case 4: | |
6459 | return 1; | |
6460 | ||
6461 | default: | |
6462 | return 0; | |
6463 | } | |
6464 | } | |
869bde6b | 6465 | |
61fafd77 | 6466 | /* We generate a two-instructions program at M_TRAMP : |
6467 | movea.l &CHAIN_VALUE,%a0 | |
6468 | jmp FNADDR | |
6469 | where %a0 can be modified by changing STATIC_CHAIN_REGNUM. */ | |
6470 | ||
6471 | static void | |
6472 | m68k_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value) | |
6473 | { | |
6474 | rtx fnaddr = XEXP (DECL_RTL (fndecl), 0); | |
6475 | rtx mem; | |
6476 | ||
6477 | gcc_assert (ADDRESS_REGNO_P (STATIC_CHAIN_REGNUM)); | |
6478 | ||
6479 | mem = adjust_address (m_tramp, HImode, 0); | |
6480 | emit_move_insn (mem, GEN_INT(0x207C + ((STATIC_CHAIN_REGNUM-8) << 9))); | |
6481 | mem = adjust_address (m_tramp, SImode, 2); | |
6482 | emit_move_insn (mem, chain_value); | |
6483 | ||
6484 | mem = adjust_address (m_tramp, HImode, 6); | |
6485 | emit_move_insn (mem, GEN_INT(0x4EF9)); | |
6486 | mem = adjust_address (m_tramp, SImode, 8); | |
6487 | emit_move_insn (mem, fnaddr); | |
6488 | ||
6489 | FINALIZE_TRAMPOLINE (XEXP (m_tramp, 0)); | |
6490 | } | |
6491 | ||
f5bc28da | 6492 | /* On the 68000, the RTS insn cannot pop anything. |
6493 | On the 68010, the RTD insn may be used to pop them if the number | |
6494 | of args is fixed, but if the number is variable then the caller | |
6495 | must pop them all. RTD can't be used for library calls now | |
6496 | because the library is compiled with the Unix compiler. | |
6497 | Use of RTD is a selectable option, since it is incompatible with | |
6498 | standard Unix calling sequences. If the option is not selected, | |
6499 | the caller must always pop the args. */ | |
6500 | ||
6501 | static int | |
6502 | m68k_return_pops_args (tree fundecl, tree funtype, int size) | |
6503 | { | |
6504 | return ((TARGET_RTD | |
6505 | && (!fundecl | |
6506 | || TREE_CODE (fundecl) != IDENTIFIER_NODE) | |
257d99c3 | 6507 | && (!stdarg_p (funtype))) |
f5bc28da | 6508 | ? size : 0); |
6509 | } | |
6510 | ||
b2d7ede1 | 6511 | /* Make sure everything's fine if we *don't* have a given processor. |
6512 | This assumes that putting a register in fixed_regs will keep the | |
6513 | compiler's mitts completely off it. We don't bother to zero it out | |
6514 | of register classes. */ | |
6515 | ||
6516 | static void | |
6517 | m68k_conditional_register_usage (void) | |
6518 | { | |
6519 | int i; | |
6520 | HARD_REG_SET x; | |
6521 | if (!TARGET_HARD_FLOAT) | |
6522 | { | |
6523 | COPY_HARD_REG_SET (x, reg_class_contents[(int)FP_REGS]); | |
6524 | for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) | |
6525 | if (TEST_HARD_REG_BIT (x, i)) | |
6526 | fixed_regs[i] = call_used_regs[i] = 1; | |
6527 | } | |
6528 | if (flag_pic) | |
6529 | fixed_regs[PIC_REG] = call_used_regs[PIC_REG] = 1; | |
6530 | } | |
6531 | ||
5bb27b7c | 6532 | static void |
6533 | m68k_init_sync_libfuncs (void) | |
6534 | { | |
6535 | init_sync_libfuncs (UNITS_PER_WORD); | |
6536 | } | |
6537 | ||
df47af5a | 6538 | /* Implements EPILOGUE_USES. All registers are live on exit from an |
6539 | interrupt routine. */ | |
6540 | bool | |
6541 | m68k_epilogue_uses (int regno ATTRIBUTE_UNUSED) | |
6542 | { | |
6543 | return (reload_completed | |
6544 | && (m68k_get_function_kind (current_function_decl) | |
6545 | == m68k_fk_interrupt_handler)); | |
6546 | } | |
6547 | ||
41af789c | 6548 | |
6549 | /* Implement TARGET_C_EXCESS_PRECISION. | |
6550 | ||
6551 | Set the value of FLT_EVAL_METHOD in float.h. When using 68040 fp | |
6552 | instructions, we get proper intermediate rounding, otherwise we | |
6553 | get extended precision results. */ | |
6554 | ||
6555 | static enum flt_eval_method | |
6556 | m68k_excess_precision (enum excess_precision_type type) | |
6557 | { | |
6558 | switch (type) | |
6559 | { | |
6560 | case EXCESS_PRECISION_TYPE_FAST: | |
6561 | /* The fastest type to promote to will always be the native type, | |
6562 | whether that occurs with implicit excess precision or | |
6563 | otherwise. */ | |
6564 | return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT; | |
6565 | case EXCESS_PRECISION_TYPE_STANDARD: | |
6566 | case EXCESS_PRECISION_TYPE_IMPLICIT: | |
6567 | /* Otherwise, the excess precision we want when we are | |
6568 | in a standards compliant mode, and the implicit precision we | |
6569 | provide can be identical. */ | |
6570 | if (TARGET_68040 || ! TARGET_68881) | |
6571 | return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT; | |
6572 | ||
6573 | return FLT_EVAL_METHOD_PROMOTE_TO_LONG_DOUBLE; | |
6574 | default: | |
6575 | gcc_unreachable (); | |
6576 | } | |
6577 | return FLT_EVAL_METHOD_UNPREDICTABLE; | |
6578 | } | |
6579 | ||
869bde6b | 6580 | #include "gt-m68k.h" |