1 /* Target Code for TI C6X
2 Copyright (C) 2010-2019 Free Software Foundation, Inc.
3 Contributed by Andrew Jenner <andrew@codesourcery.com>
4 Contributed by Bernd Schmidt <bernds@codesourcery.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published
10 by the Free Software Foundation; either version 3, or (at your
11 option) any later version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
16 License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #define IN_TARGET_CODE 1
26 #include "coretypes.h"
31 #include "gimple-expr.h"
36 #include "stringpool.h"
43 #include "diagnostic-core.h"
44 #include "stor-layout.h"
48 #include "insn-attr.h"
52 #include "sched-int.h"
53 #include "tm-constrs.h"
54 #include "langhooks.h"
55 #include "sel-sched.h"
57 #include "hw-doloop.h"
58 #include "regrename.h"
62 /* This file should be included last. */
63 #include "target-def.h"
65 /* Table of supported architecture variants. */
69 enum c6x_cpu_type type
;
70 unsigned short features
;
73 /* A list of all ISAs, mapping each one to a representative device.
74 Used for -march selection. */
75 static const c6x_arch_table all_isas
[] =
77 #define C6X_ISA(NAME,DEVICE,FLAGS) \
78 { NAME, DEVICE, FLAGS },
79 #include "c6x-isas.def"
81 { NULL
, C6X_CPU_C62X
, 0 }
84 /* This is the parsed result of the "-march=" option, if given. */
85 enum c6x_cpu_type c6x_arch
= C6X_DEFAULT_ARCH
;
87 /* A mask of insn types that are allowed by the architecture selected by
89 unsigned long c6x_insn_mask
= C6X_DEFAULT_INSN_MASK
;
91 /* The instruction that is being output (as obtained from FINAL_PRESCAN_INSN).
93 static rtx_insn
*c6x_current_insn
= NULL
;
95 /* A decl we build to access __c6xabi_DSBT_base. */
96 static GTY(()) tree dsbt_decl
;
98 /* Determines whether we run our final scheduling pass or not. We always
99 avoid the normal second scheduling pass. */
100 static int c6x_flag_schedule_insns2
;
102 /* Determines whether we run variable tracking in machine dependent
104 static int c6x_flag_var_tracking
;
106 /* Determines whether we use modulo scheduling. */
107 static int c6x_flag_modulo_sched
;
109 /* Record the state of flag_pic before we set it to 1 for DSBT. */
110 int c6x_initial_flag_pic
;
114 /* We record the clock cycle for every insn during scheduling. */
116 /* After scheduling, we run assign_reservations to choose unit
117 reservations for all insns. These are recorded here. */
119 /* Records the new condition for insns which must be made
120 conditional after scheduling. An entry of NULL_RTX means no such
121 change is necessary. */
123 /* True for the first insn that was scheduled in an ebb. */
125 /* The scheduler state after the insn, transformed into a mask of UNIT_QID
126 bits rather than storing the state. Meaningful only for the last
128 unsigned int unit_mask
;
129 } c6x_sched_insn_info
;
132 /* Record a c6x_sched_insn_info structure for every insn in the function. */
133 static vec
<c6x_sched_insn_info
> insn_info
;
135 #define INSN_INFO_LENGTH (insn_info).length ()
136 #define INSN_INFO_ENTRY(N) (insn_info[(N)])
138 static bool done_cfi_sections
;
140 #define RESERVATION_FLAG_D 1
141 #define RESERVATION_FLAG_L 2
142 #define RESERVATION_FLAG_S 4
143 #define RESERVATION_FLAG_M 8
144 #define RESERVATION_FLAG_DL (RESERVATION_FLAG_D | RESERVATION_FLAG_L)
145 #define RESERVATION_FLAG_DS (RESERVATION_FLAG_D | RESERVATION_FLAG_S)
146 #define RESERVATION_FLAG_LS (RESERVATION_FLAG_L | RESERVATION_FLAG_S)
147 #define RESERVATION_FLAG_DLS (RESERVATION_FLAG_D | RESERVATION_FLAG_LS)
149 /* The DFA names of the units. */
150 static const char *const c6x_unit_names
[] =
152 "d1", "l1", "s1", "m1", "fps1", "fpl1", "adddps1", "adddpl1",
153 "d2", "l2", "s2", "m2", "fps2", "fpl2", "adddps2", "adddpl2"
156 /* The DFA unit number for each unit in c6x_unit_names[]. */
157 static int c6x_unit_codes
[ARRAY_SIZE (c6x_unit_names
)];
159 /* Unit query IDs. */
160 #define UNIT_QID_D1 0
161 #define UNIT_QID_L1 1
162 #define UNIT_QID_S1 2
163 #define UNIT_QID_M1 3
164 #define UNIT_QID_FPS1 4
165 #define UNIT_QID_FPL1 5
166 #define UNIT_QID_ADDDPS1 6
167 #define UNIT_QID_ADDDPL1 7
168 #define UNIT_QID_SIDE_OFFSET 8
170 #define RESERVATION_S1 2
171 #define RESERVATION_S2 10
173 /* An enum for the unit requirements we count in the UNIT_REQS table. */
189 /* A table used to count unit requirements. Used when computing minimum
190 iteration intervals. */
191 typedef int unit_req_table
[2][UNIT_REQ_MAX
];
192 static unit_req_table unit_reqs
;
194 /* Register map for debugging. */
195 unsigned const dbx_register_map
[FIRST_PSEUDO_REGISTER
] =
197 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, /* A0 - A15. */
198 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, /* A16 - A32. */
200 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, /* B0 - B15. */
202 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, /* B16 - B32. */
204 -1, -1, -1 /* FP, ARGP, ILC. */
207 /* Allocate a new, cleared machine_function structure. */
209 static struct machine_function
*
210 c6x_init_machine_status (void)
212 return ggc_cleared_alloc
<machine_function
> ();
215 /* Implement TARGET_OPTION_OVERRIDE. */
218 c6x_option_override (void)
222 if (global_options_set
.x_c6x_arch_option
)
224 c6x_arch
= all_isas
[c6x_arch_option
].type
;
225 c6x_insn_mask
&= ~C6X_INSNS_ALL_CPU_BITS
;
226 c6x_insn_mask
|= all_isas
[c6x_arch_option
].features
;
229 c6x_flag_schedule_insns2
= flag_schedule_insns_after_reload
;
230 flag_schedule_insns_after_reload
= 0;
232 c6x_flag_modulo_sched
= flag_modulo_sched
;
233 flag_modulo_sched
= 0;
235 init_machine_status
= c6x_init_machine_status
;
237 for (i
= 0; i
< ARRAY_SIZE (c6x_unit_names
); i
++)
238 c6x_unit_codes
[i
] = get_cpu_unit_code (c6x_unit_names
[i
]);
240 if (flag_pic
&& !TARGET_DSBT
)
242 error ("%<-fpic%> and %<-fPIC%> not supported without %<-mdsbt%> "
246 c6x_initial_flag_pic
= flag_pic
;
247 if (TARGET_DSBT
&& !flag_pic
)
252 /* Implement the TARGET_CONDITIONAL_REGISTER_USAGE hook. */
255 c6x_conditional_register_usage (void)
258 if (c6x_arch
== C6X_CPU_C62X
|| c6x_arch
== C6X_CPU_C67X
)
259 for (i
= 16; i
< 32; i
++)
262 fixed_regs
[32 + i
] = 1;
266 SET_HARD_REG_BIT (reg_class_contents
[(int)PREDICATE_A_REGS
],
268 SET_HARD_REG_BIT (reg_class_contents
[(int)PREDICATE_REGS
],
270 CLEAR_HARD_REG_BIT (reg_class_contents
[(int)NONPREDICATE_A_REGS
],
272 CLEAR_HARD_REG_BIT (reg_class_contents
[(int)NONPREDICATE_REGS
],
277 static GTY(()) rtx eqdf_libfunc
;
278 static GTY(()) rtx nedf_libfunc
;
279 static GTY(()) rtx ledf_libfunc
;
280 static GTY(()) rtx ltdf_libfunc
;
281 static GTY(()) rtx gedf_libfunc
;
282 static GTY(()) rtx gtdf_libfunc
;
283 static GTY(()) rtx eqsf_libfunc
;
284 static GTY(()) rtx nesf_libfunc
;
285 static GTY(()) rtx lesf_libfunc
;
286 static GTY(()) rtx ltsf_libfunc
;
287 static GTY(()) rtx gesf_libfunc
;
288 static GTY(()) rtx gtsf_libfunc
;
289 static GTY(()) rtx strasgi_libfunc
;
290 static GTY(()) rtx strasgi64p_libfunc
;
292 /* Implement the TARGET_INIT_LIBFUNCS macro. We use this to rename library
293 functions to match the C6x ABI. */
296 c6x_init_libfuncs (void)
298 /* Double-precision floating-point arithmetic. */
299 set_optab_libfunc (add_optab
, DFmode
, "__c6xabi_addd");
300 set_optab_libfunc (sdiv_optab
, DFmode
, "__c6xabi_divd");
301 set_optab_libfunc (smul_optab
, DFmode
, "__c6xabi_mpyd");
302 set_optab_libfunc (neg_optab
, DFmode
, "__c6xabi_negd");
303 set_optab_libfunc (sub_optab
, DFmode
, "__c6xabi_subd");
305 /* Single-precision floating-point arithmetic. */
306 set_optab_libfunc (add_optab
, SFmode
, "__c6xabi_addf");
307 set_optab_libfunc (sdiv_optab
, SFmode
, "__c6xabi_divf");
308 set_optab_libfunc (smul_optab
, SFmode
, "__c6xabi_mpyf");
309 set_optab_libfunc (neg_optab
, SFmode
, "__c6xabi_negf");
310 set_optab_libfunc (sub_optab
, SFmode
, "__c6xabi_subf");
312 /* Floating-point comparisons. */
313 eqsf_libfunc
= init_one_libfunc ("__c6xabi_eqf");
314 nesf_libfunc
= init_one_libfunc ("__c6xabi_neqf");
315 lesf_libfunc
= init_one_libfunc ("__c6xabi_lef");
316 ltsf_libfunc
= init_one_libfunc ("__c6xabi_ltf");
317 gesf_libfunc
= init_one_libfunc ("__c6xabi_gef");
318 gtsf_libfunc
= init_one_libfunc ("__c6xabi_gtf");
319 eqdf_libfunc
= init_one_libfunc ("__c6xabi_eqd");
320 nedf_libfunc
= init_one_libfunc ("__c6xabi_neqd");
321 ledf_libfunc
= init_one_libfunc ("__c6xabi_led");
322 ltdf_libfunc
= init_one_libfunc ("__c6xabi_ltd");
323 gedf_libfunc
= init_one_libfunc ("__c6xabi_ged");
324 gtdf_libfunc
= init_one_libfunc ("__c6xabi_gtd");
326 set_optab_libfunc (eq_optab
, SFmode
, NULL
);
327 set_optab_libfunc (ne_optab
, SFmode
, "__c6xabi_neqf");
328 set_optab_libfunc (gt_optab
, SFmode
, NULL
);
329 set_optab_libfunc (ge_optab
, SFmode
, NULL
);
330 set_optab_libfunc (lt_optab
, SFmode
, NULL
);
331 set_optab_libfunc (le_optab
, SFmode
, NULL
);
332 set_optab_libfunc (unord_optab
, SFmode
, "__c6xabi_unordf");
333 set_optab_libfunc (eq_optab
, DFmode
, NULL
);
334 set_optab_libfunc (ne_optab
, DFmode
, "__c6xabi_neqd");
335 set_optab_libfunc (gt_optab
, DFmode
, NULL
);
336 set_optab_libfunc (ge_optab
, DFmode
, NULL
);
337 set_optab_libfunc (lt_optab
, DFmode
, NULL
);
338 set_optab_libfunc (le_optab
, DFmode
, NULL
);
339 set_optab_libfunc (unord_optab
, DFmode
, "__c6xabi_unordd");
341 /* Floating-point to integer conversions. */
342 set_conv_libfunc (sfix_optab
, SImode
, DFmode
, "__c6xabi_fixdi");
343 set_conv_libfunc (ufix_optab
, SImode
, DFmode
, "__c6xabi_fixdu");
344 set_conv_libfunc (sfix_optab
, DImode
, DFmode
, "__c6xabi_fixdlli");
345 set_conv_libfunc (ufix_optab
, DImode
, DFmode
, "__c6xabi_fixdull");
346 set_conv_libfunc (sfix_optab
, SImode
, SFmode
, "__c6xabi_fixfi");
347 set_conv_libfunc (ufix_optab
, SImode
, SFmode
, "__c6xabi_fixfu");
348 set_conv_libfunc (sfix_optab
, DImode
, SFmode
, "__c6xabi_fixflli");
349 set_conv_libfunc (ufix_optab
, DImode
, SFmode
, "__c6xabi_fixfull");
351 /* Conversions between floating types. */
352 set_conv_libfunc (trunc_optab
, SFmode
, DFmode
, "__c6xabi_cvtdf");
353 set_conv_libfunc (sext_optab
, DFmode
, SFmode
, "__c6xabi_cvtfd");
355 /* Integer to floating-point conversions. */
356 set_conv_libfunc (sfloat_optab
, DFmode
, SImode
, "__c6xabi_fltid");
357 set_conv_libfunc (ufloat_optab
, DFmode
, SImode
, "__c6xabi_fltud");
358 set_conv_libfunc (sfloat_optab
, DFmode
, DImode
, "__c6xabi_fltllid");
359 set_conv_libfunc (ufloat_optab
, DFmode
, DImode
, "__c6xabi_fltulld");
360 set_conv_libfunc (sfloat_optab
, SFmode
, SImode
, "__c6xabi_fltif");
361 set_conv_libfunc (ufloat_optab
, SFmode
, SImode
, "__c6xabi_fltuf");
362 set_conv_libfunc (sfloat_optab
, SFmode
, DImode
, "__c6xabi_fltllif");
363 set_conv_libfunc (ufloat_optab
, SFmode
, DImode
, "__c6xabi_fltullf");
366 set_optab_libfunc (smul_optab
, DImode
, "__c6xabi_mpyll");
367 set_optab_libfunc (ashl_optab
, DImode
, "__c6xabi_llshl");
368 set_optab_libfunc (lshr_optab
, DImode
, "__c6xabi_llshru");
369 set_optab_libfunc (ashr_optab
, DImode
, "__c6xabi_llshr");
371 set_optab_libfunc (sdiv_optab
, SImode
, "__c6xabi_divi");
372 set_optab_libfunc (udiv_optab
, SImode
, "__c6xabi_divu");
373 set_optab_libfunc (smod_optab
, SImode
, "__c6xabi_remi");
374 set_optab_libfunc (umod_optab
, SImode
, "__c6xabi_remu");
375 set_optab_libfunc (sdivmod_optab
, SImode
, "__c6xabi_divremi");
376 set_optab_libfunc (udivmod_optab
, SImode
, "__c6xabi_divremu");
377 set_optab_libfunc (sdiv_optab
, DImode
, "__c6xabi_divlli");
378 set_optab_libfunc (udiv_optab
, DImode
, "__c6xabi_divull");
379 set_optab_libfunc (smod_optab
, DImode
, "__c6xabi_remlli");
380 set_optab_libfunc (umod_optab
, DImode
, "__c6xabi_remull");
381 set_optab_libfunc (udivmod_optab
, DImode
, "__c6xabi_divremull");
384 strasgi_libfunc
= init_one_libfunc ("__c6xabi_strasgi");
385 strasgi64p_libfunc
= init_one_libfunc ("__c6xabi_strasgi_64plus");
388 /* Begin the assembly file. */
391 c6x_file_start (void)
393 /* Variable tracking should be run after all optimizations which change order
394 of insns. It also needs a valid CFG. This can't be done in
395 c6x_override_options, because flag_var_tracking is finalized after
397 c6x_flag_var_tracking
= flag_var_tracking
;
398 flag_var_tracking
= 0;
400 done_cfi_sections
= false;
401 default_file_start ();
403 /* Arrays are aligned to 8-byte boundaries. */
404 asm_fprintf (asm_out_file
,
405 "\t.c6xabi_attribute Tag_ABI_array_object_alignment, 0\n");
406 asm_fprintf (asm_out_file
,
407 "\t.c6xabi_attribute Tag_ABI_array_object_align_expected, 0\n");
409 /* Stack alignment is 8 bytes. */
410 asm_fprintf (asm_out_file
,
411 "\t.c6xabi_attribute Tag_ABI_stack_align_needed, 0\n");
412 asm_fprintf (asm_out_file
,
413 "\t.c6xabi_attribute Tag_ABI_stack_align_preserved, 0\n");
415 #if 0 /* FIXME: Reenable when TI's tools are fixed. */
416 /* ??? Ideally we'd check flag_short_wchar somehow. */
417 asm_fprintf (asm_out_file
, "\t.c6xabi_attribute Tag_ABI_wchar_t, %d\n", 2);
420 /* We conform to version 1.0 of the ABI. */
421 asm_fprintf (asm_out_file
,
422 "\t.c6xabi_attribute Tag_ABI_conformance, \"1.0\"\n");
426 /* The LTO frontend only enables exceptions when it sees a function that
427 uses it. This changes the return value of dwarf2out_do_frame, so we
428 have to check before every function. */
431 c6x_output_file_unwind (FILE * f
)
433 if (done_cfi_sections
)
436 /* Output a .cfi_sections directive. */
437 if (dwarf2out_do_frame ())
439 if (flag_unwind_tables
|| flag_exceptions
)
441 if (write_symbols
== DWARF2_DEBUG
442 || write_symbols
== VMS_AND_DWARF2_DEBUG
)
443 asm_fprintf (f
, "\t.cfi_sections .debug_frame, .c6xabi.exidx\n");
445 asm_fprintf (f
, "\t.cfi_sections .c6xabi.exidx\n");
448 asm_fprintf (f
, "\t.cfi_sections .debug_frame\n");
449 done_cfi_sections
= true;
453 /* Output unwind directives at the end of a function. */
456 c6x_output_fn_unwind (FILE * f
)
458 /* Return immediately if we are not generating unwinding tables. */
459 if (! (flag_unwind_tables
|| flag_exceptions
))
462 /* If this function will never be unwound, then mark it as such. */
463 if (!(flag_unwind_tables
|| crtl
->uses_eh_lsda
)
464 && (TREE_NOTHROW (current_function_decl
)
465 || crtl
->all_throwers_are_sibcalls
))
466 fputs("\t.cantunwind\n", f
);
468 fputs ("\t.endp\n", f
);
472 /* Stack and Calling. */
474 int argument_registers
[10] =
483 /* Implements the macro INIT_CUMULATIVE_ARGS defined in c6x.h. */
486 c6x_init_cumulative_args (CUMULATIVE_ARGS
*cum
, const_tree fntype
, rtx libname
,
487 int n_named_args ATTRIBUTE_UNUSED
)
491 if (!libname
&& fntype
)
493 /* We need to find out the number of named arguments. Unfortunately,
494 for incoming arguments, N_NAMED_ARGS is set to -1. */
495 if (stdarg_p (fntype
))
496 cum
->nregs
= type_num_arguments (fntype
) - 1;
502 /* Implements the macro FUNCTION_ARG defined in c6x.h. */
505 c6x_function_arg (cumulative_args_t cum_v
, machine_mode mode
,
506 const_tree type
, bool named ATTRIBUTE_UNUSED
)
508 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
509 if (cum
->count
>= cum
->nregs
)
513 HOST_WIDE_INT size
= int_size_in_bytes (type
);
514 if (TARGET_BIG_ENDIAN
&& AGGREGATE_TYPE_P (type
))
518 rtx reg1
= gen_rtx_REG (SImode
, argument_registers
[cum
->count
] + 1);
519 rtx reg2
= gen_rtx_REG (SImode
, argument_registers
[cum
->count
]);
520 rtvec vec
= gen_rtvec (2, gen_rtx_EXPR_LIST (VOIDmode
, reg1
, const0_rtx
),
521 gen_rtx_EXPR_LIST (VOIDmode
, reg2
, GEN_INT (4)));
522 return gen_rtx_PARALLEL (mode
, vec
);
526 return gen_rtx_REG (mode
, argument_registers
[cum
->count
]);
530 c6x_function_arg_advance (cumulative_args_t cum_v
,
531 machine_mode mode ATTRIBUTE_UNUSED
,
532 const_tree type ATTRIBUTE_UNUSED
,
533 bool named ATTRIBUTE_UNUSED
)
535 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
540 /* Return true if BLOCK_REG_PADDING (MODE, TYPE, FIRST) should return
541 upward rather than downward. */
544 c6x_block_reg_pad_upward (machine_mode mode ATTRIBUTE_UNUSED
,
545 const_tree type
, bool first
)
549 if (!TARGET_BIG_ENDIAN
)
555 size
= int_size_in_bytes (type
);
559 /* Implement TARGET_FUNCTION_ARG_BOUNDARY. */
562 c6x_function_arg_boundary (machine_mode mode
, const_tree type
)
564 unsigned int boundary
= type
? TYPE_ALIGN (type
) : GET_MODE_BITSIZE (mode
);
566 if (boundary
> BITS_PER_WORD
)
567 return 2 * BITS_PER_WORD
;
571 HOST_WIDE_INT size
= int_size_in_bytes (type
);
573 return 2 * BITS_PER_WORD
;
574 if (boundary
< BITS_PER_WORD
)
577 return BITS_PER_WORD
;
579 return 2 * BITS_PER_UNIT
;
585 /* Implement TARGET_FUNCTION_ARG_ROUND_BOUNDARY. */
587 c6x_function_arg_round_boundary (machine_mode mode
, const_tree type
)
589 return c6x_function_arg_boundary (mode
, type
);
592 /* TARGET_FUNCTION_VALUE implementation. Returns an RTX representing the place
593 where function FUNC returns or receives a value of data type TYPE. */
596 c6x_function_value (const_tree type
, const_tree func ATTRIBUTE_UNUSED
,
597 bool outgoing ATTRIBUTE_UNUSED
)
599 /* Functions return values in register A4. When returning aggregates, we may
600 have to adjust for endianness. */
601 if (TARGET_BIG_ENDIAN
&& type
&& AGGREGATE_TYPE_P (type
))
603 HOST_WIDE_INT size
= int_size_in_bytes (type
);
607 rtx reg1
= gen_rtx_REG (SImode
, REG_A4
+ 1);
608 rtx reg2
= gen_rtx_REG (SImode
, REG_A4
);
609 rtvec vec
= gen_rtvec (2, gen_rtx_EXPR_LIST (VOIDmode
, reg1
, const0_rtx
),
610 gen_rtx_EXPR_LIST (VOIDmode
, reg2
, GEN_INT (4)));
611 return gen_rtx_PARALLEL (TYPE_MODE (type
), vec
);
614 return gen_rtx_REG (TYPE_MODE (type
), REG_A4
);
617 /* Implement TARGET_LIBCALL_VALUE. */
620 c6x_libcall_value (machine_mode mode
, const_rtx fun ATTRIBUTE_UNUSED
)
622 return gen_rtx_REG (mode
, REG_A4
);
625 /* TARGET_STRUCT_VALUE_RTX implementation. */
628 c6x_struct_value_rtx (tree type ATTRIBUTE_UNUSED
, int incoming ATTRIBUTE_UNUSED
)
630 return gen_rtx_REG (Pmode
, REG_A3
);
633 /* Implement TARGET_FUNCTION_VALUE_REGNO_P. */
636 c6x_function_value_regno_p (const unsigned int regno
)
638 return regno
== REG_A4
;
641 /* Types larger than 64 bit, and variable sized types, are passed by
642 reference. The callee must copy them; see c6x_callee_copies. */
645 c6x_pass_by_reference (cumulative_args_t cum_v ATTRIBUTE_UNUSED
,
646 machine_mode mode
, const_tree type
,
647 bool named ATTRIBUTE_UNUSED
)
651 size
= int_size_in_bytes (type
);
652 else if (mode
!= VOIDmode
)
653 size
= GET_MODE_SIZE (mode
);
654 return size
> 2 * UNITS_PER_WORD
|| size
== -1;
657 /* Decide whether a type should be returned in memory (true)
658 or in a register (false). This is called by the macro
659 TARGET_RETURN_IN_MEMORY. */
662 c6x_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
664 int size
= int_size_in_bytes (type
);
665 return size
> 2 * UNITS_PER_WORD
|| size
== -1;
668 /* Values which must be returned in the most-significant end of the return
672 c6x_return_in_msb (const_tree valtype
)
674 HOST_WIDE_INT size
= int_size_in_bytes (valtype
);
675 return TARGET_BIG_ENDIAN
&& AGGREGATE_TYPE_P (valtype
) && size
== 3;
678 /* Implement TARGET_CALLEE_COPIES. */
681 c6x_callee_copies (cumulative_args_t cum_v ATTRIBUTE_UNUSED
,
682 machine_mode mode ATTRIBUTE_UNUSED
,
683 const_tree type ATTRIBUTE_UNUSED
,
684 bool named ATTRIBUTE_UNUSED
)
689 /* Return the type to use as __builtin_va_list. */
691 c6x_build_builtin_va_list (void)
693 return build_pointer_type (char_type_node
);
697 c6x_asm_trampoline_template (FILE *f
)
699 fprintf (f
, "\t.long\t0x0000002b\n"); /* mvkl .s2 fnlow,B0 */
700 fprintf (f
, "\t.long\t0x01000028\n"); /* || mvkl .s1 sclow,A2 */
701 fprintf (f
, "\t.long\t0x0000006b\n"); /* mvkh .s2 fnhigh,B0 */
702 fprintf (f
, "\t.long\t0x01000068\n"); /* || mvkh .s1 schigh,A2 */
703 fprintf (f
, "\t.long\t0x00000362\n"); /* b .s2 B0 */
704 fprintf (f
, "\t.long\t0x00008000\n"); /* nop 5 */
705 fprintf (f
, "\t.long\t0x00000000\n"); /* nop */
706 fprintf (f
, "\t.long\t0x00000000\n"); /* nop */
709 /* Emit RTL insns to initialize the variable parts of a trampoline at
710 TRAMP. FNADDR is an RTX for the address of the function's pure
711 code. CXT is an RTX for the static chain value for the function. */
714 c6x_initialize_trampoline (rtx tramp
, tree fndecl
, rtx cxt
)
716 rtx fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
717 rtx t1
= copy_to_reg (fnaddr
);
718 rtx t2
= copy_to_reg (cxt
);
719 rtx mask
= gen_reg_rtx (SImode
);
722 emit_block_move (tramp
, assemble_trampoline_template (),
723 GEN_INT (TRAMPOLINE_SIZE
), BLOCK_OP_NORMAL
);
725 emit_move_insn (mask
, GEN_INT (0xffff << 7));
727 for (i
= 0; i
< 4; i
++)
729 rtx mem
= adjust_address (tramp
, SImode
, i
* 4);
730 rtx t
= (i
& 1) ? t2
: t1
;
731 rtx v1
= gen_reg_rtx (SImode
);
732 rtx v2
= gen_reg_rtx (SImode
);
733 emit_move_insn (v1
, mem
);
735 emit_insn (gen_ashlsi3 (v2
, t
, GEN_INT (7)));
737 emit_insn (gen_lshrsi3 (v2
, t
, GEN_INT (9)));
738 emit_insn (gen_andsi3 (v2
, v2
, mask
));
739 emit_insn (gen_iorsi3 (v2
, v2
, v1
));
740 emit_move_insn (mem
, v2
);
742 #ifdef CLEAR_INSN_CACHE
743 tramp
= XEXP (tramp
, 0);
744 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, "__gnu_clear_cache"),
745 LCT_NORMAL
, VOIDmode
, tramp
, Pmode
,
746 plus_constant (Pmode
, tramp
, TRAMPOLINE_SIZE
), Pmode
);
750 /* Determine whether c6x_output_mi_thunk can succeed. */
753 c6x_can_output_mi_thunk (const_tree thunk ATTRIBUTE_UNUSED
,
754 HOST_WIDE_INT delta ATTRIBUTE_UNUSED
,
755 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED
,
756 const_tree function ATTRIBUTE_UNUSED
)
758 return !TARGET_LONG_CALLS
;
761 /* Output the assembler code for a thunk function. THUNK is the
762 declaration for the thunk function itself, FUNCTION is the decl for
763 the target function. DELTA is an immediate constant offset to be
764 added to THIS. If VCALL_OFFSET is nonzero, the word at
765 *(*this + vcall_offset) should be added to THIS. */
768 c6x_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED
,
769 tree thunk ATTRIBUTE_UNUSED
, HOST_WIDE_INT delta
,
770 HOST_WIDE_INT vcall_offset
, tree function
)
772 const char *fnname
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (thunk
));
774 /* The this parameter is passed as the first argument. */
775 rtx this_rtx
= gen_rtx_REG (Pmode
, REG_A4
);
777 assemble_start_function (thunk
, fnname
);
778 c6x_current_insn
= NULL
;
780 xops
[4] = XEXP (DECL_RTL (function
), 0);
783 output_asm_insn ("b .s2 \t%4", xops
);
785 output_asm_insn ("nop 5", xops
);
788 /* Adjust the this parameter by a fixed constant. */
791 xops
[0] = GEN_INT (delta
);
793 if (delta
>= -16 && delta
<= 15)
795 output_asm_insn ("add .s1 %0, %1, %1", xops
);
797 output_asm_insn ("nop 4", xops
);
799 else if (delta
>= 16 && delta
< 32)
801 output_asm_insn ("add .d1 %0, %1, %1", xops
);
803 output_asm_insn ("nop 4", xops
);
805 else if (delta
>= -32768 && delta
< 32768)
807 output_asm_insn ("mvk .s1 %0, A0", xops
);
808 output_asm_insn ("add .d1 %1, A0, %1", xops
);
810 output_asm_insn ("nop 3", xops
);
814 output_asm_insn ("mvkl .s1 %0, A0", xops
);
815 output_asm_insn ("mvkh .s1 %0, A0", xops
);
816 output_asm_insn ("add .d1 %1, A0, %1", xops
);
818 output_asm_insn ("nop 3", xops
);
822 /* Adjust the this parameter by a value stored in the vtable. */
825 rtx a0tmp
= gen_rtx_REG (Pmode
, REG_A0
);
826 rtx a3tmp
= gen_rtx_REG (Pmode
, REG_A3
);
830 xops
[3] = gen_rtx_MEM (Pmode
, a0tmp
);
831 output_asm_insn ("mv .s1 a4, %2", xops
);
832 output_asm_insn ("ldw .d1t1 %3, %2", xops
);
834 /* Adjust the this parameter. */
835 xops
[0] = gen_rtx_MEM (Pmode
, plus_constant (Pmode
, a0tmp
,
837 if (!memory_operand (xops
[0], Pmode
))
839 rtx tmp2
= gen_rtx_REG (Pmode
, REG_A1
);
840 xops
[0] = GEN_INT (vcall_offset
);
842 output_asm_insn ("mvkl .s1 %0, %1", xops
);
843 output_asm_insn ("mvkh .s1 %0, %1", xops
);
844 output_asm_insn ("nop 2", xops
);
845 output_asm_insn ("add .d1 %2, %1, %2", xops
);
846 xops
[0] = gen_rtx_MEM (Pmode
, a0tmp
);
849 output_asm_insn ("nop 4", xops
);
851 output_asm_insn ("ldw .d1t1 %0, %1", xops
);
852 output_asm_insn ("|| b .s2 \t%4", xops
);
853 output_asm_insn ("nop 4", xops
);
854 output_asm_insn ("add .d1 %2, %1, %2", xops
);
856 assemble_end_function (thunk
, fnname
);
859 /* Return true if EXP goes in small data/bss. */
862 c6x_in_small_data_p (const_tree exp
)
864 /* We want to merge strings, so we never consider them small data. */
865 if (TREE_CODE (exp
) == STRING_CST
)
868 /* Functions are never small data. */
869 if (TREE_CODE (exp
) == FUNCTION_DECL
)
872 if (TREE_CODE (exp
) == VAR_DECL
&& DECL_WEAK (exp
))
875 if (TREE_CODE (exp
) == VAR_DECL
&& DECL_SECTION_NAME (exp
))
877 const char *section
= DECL_SECTION_NAME (exp
);
879 if (strcmp (section
, ".neardata") == 0
880 || strncmp (section
, ".neardata.", 10) == 0
881 || strncmp (section
, ".gnu.linkonce.s.", 16) == 0
882 || strcmp (section
, ".bss") == 0
883 || strncmp (section
, ".bss.", 5) == 0
884 || strncmp (section
, ".gnu.linkonce.sb.", 17) == 0
885 || strcmp (section
, ".rodata") == 0
886 || strncmp (section
, ".rodata.", 8) == 0
887 || strncmp (section
, ".gnu.linkonce.s2.", 17) == 0)
891 return PLACE_IN_SDATA_P (exp
);
896 /* Return a section for X. The only special thing we do here is to
897 honor small data. We don't have a tree type, so we can't use the
898 PLACE_IN_SDATA_P macro we use everywhere else; we choose to place
899 everything sized 8 bytes or smaller into small data. */
902 c6x_select_rtx_section (machine_mode mode
, rtx x
,
903 unsigned HOST_WIDE_INT align
)
905 if (c6x_sdata_mode
== C6X_SDATA_ALL
906 || (c6x_sdata_mode
!= C6X_SDATA_NONE
&& GET_MODE_SIZE (mode
) <= 8))
907 /* ??? Consider using mergeable sdata sections. */
908 return sdata_section
;
910 return default_elf_select_rtx_section (mode
, x
, align
);
914 c6x_elf_select_section (tree decl
, int reloc
,
915 unsigned HOST_WIDE_INT align
)
917 const char *sname
= NULL
;
918 unsigned int flags
= SECTION_WRITE
;
919 if (c6x_in_small_data_p (decl
))
921 switch (categorize_decl_for_section (decl
, reloc
))
932 flags
|= SECTION_BSS
;
939 switch (categorize_decl_for_section (decl
, reloc
))
944 case SECCAT_DATA_REL
:
945 sname
= ".fardata.rel";
947 case SECCAT_DATA_REL_LOCAL
:
948 sname
= ".fardata.rel.local";
950 case SECCAT_DATA_REL_RO
:
951 sname
= ".fardata.rel.ro";
953 case SECCAT_DATA_REL_RO_LOCAL
:
954 sname
= ".fardata.rel.ro.local";
958 flags
|= SECTION_BSS
;
974 /* We might get called with string constants, but get_named_section
975 doesn't like them as they are not DECLs. Also, we need to set
976 flags in that case. */
978 return get_section (sname
, flags
, NULL
);
979 return get_named_section (decl
, sname
, reloc
);
982 return default_elf_select_section (decl
, reloc
, align
);
985 /* Build up a unique section name, expressed as a
986 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
987 RELOC indicates whether the initial value of EXP requires
988 link-time relocations. */
990 static void ATTRIBUTE_UNUSED
991 c6x_elf_unique_section (tree decl
, int reloc
)
993 const char *prefix
= NULL
;
994 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
995 bool one_only
= DECL_COMDAT_GROUP (decl
) && !HAVE_COMDAT_GROUP
;
997 if (c6x_in_small_data_p (decl
))
999 switch (categorize_decl_for_section (decl
, reloc
))
1002 prefix
= one_only
? ".s" : ".neardata";
1005 prefix
= one_only
? ".sb" : ".bss";
1007 case SECCAT_SRODATA
:
1008 prefix
= one_only
? ".s2" : ".rodata";
1010 case SECCAT_RODATA_MERGE_STR
:
1011 case SECCAT_RODATA_MERGE_STR_INIT
:
1012 case SECCAT_RODATA_MERGE_CONST
:
1015 case SECCAT_DATA_REL
:
1016 case SECCAT_DATA_REL_LOCAL
:
1017 case SECCAT_DATA_REL_RO
:
1018 case SECCAT_DATA_REL_RO_LOCAL
:
1021 /* Everything else we place into default sections and hope for the
1028 switch (categorize_decl_for_section (decl
, reloc
))
1031 case SECCAT_DATA_REL
:
1032 case SECCAT_DATA_REL_LOCAL
:
1033 case SECCAT_DATA_REL_RO
:
1034 case SECCAT_DATA_REL_RO_LOCAL
:
1035 prefix
= one_only
? ".fd" : ".fardata";
1038 prefix
= one_only
? ".fb" : ".far";
1041 case SECCAT_RODATA_MERGE_STR
:
1042 case SECCAT_RODATA_MERGE_STR_INIT
:
1043 case SECCAT_RODATA_MERGE_CONST
:
1044 prefix
= one_only
? ".fr" : ".const";
1046 case SECCAT_SRODATA
:
1057 const char *name
, *linkonce
;
1060 name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
));
1061 name
= targetm
.strip_name_encoding (name
);
1063 /* If we're using one_only, then there needs to be a .gnu.linkonce
1064 prefix to the section name. */
1065 linkonce
= one_only
? ".gnu.linkonce" : "";
1067 string
= ACONCAT ((linkonce
, prefix
, ".", name
, NULL
));
1069 set_decl_section_name (decl
, string
);
1072 default_unique_section (decl
, reloc
);
1076 c6x_section_type_flags (tree decl
, const char *name
, int reloc
)
1078 unsigned int flags
= 0;
1080 if (strcmp (name
, ".far") == 0
1081 || strncmp (name
, ".far.", 5) == 0)
1082 flags
|= SECTION_BSS
;
1084 flags
|= default_section_type_flags (decl
, name
, reloc
);
1089 /* Checks whether the given CALL_EXPR would use a caller saved
1090 register. This is used to decide whether sibling call optimization
1091 could be performed on the respective function call. */
1094 c6x_call_saved_register_used (tree call_expr
)
1096 CUMULATIVE_ARGS cum_v
;
1097 cumulative_args_t cum
;
1098 HARD_REG_SET call_saved_regset
;
1105 INIT_CUMULATIVE_ARGS (cum_v
, NULL
, NULL
, 0, 0);
1106 cum
= pack_cumulative_args (&cum_v
);
1108 COMPL_HARD_REG_SET (call_saved_regset
, call_used_reg_set
);
1109 for (i
= 0; i
< call_expr_nargs (call_expr
); i
++)
1111 parameter
= CALL_EXPR_ARG (call_expr
, i
);
1112 gcc_assert (parameter
);
1114 /* For an undeclared variable passed as parameter we will get
1115 an ERROR_MARK node here. */
1116 if (TREE_CODE (parameter
) == ERROR_MARK
)
1119 type
= TREE_TYPE (parameter
);
1122 mode
= TYPE_MODE (type
);
1125 if (pass_by_reference (&cum_v
, mode
, type
, true))
1128 type
= build_pointer_type (type
);
1131 parm_rtx
= c6x_function_arg (cum
, mode
, type
, 0);
1133 c6x_function_arg_advance (cum
, mode
, type
, 0);
1138 if (REG_P (parm_rtx
)
1139 && overlaps_hard_reg_set_p (call_saved_regset
, GET_MODE (parm_rtx
),
1142 if (GET_CODE (parm_rtx
) == PARALLEL
)
1144 int n
= XVECLEN (parm_rtx
, 0);
1147 rtx x
= XEXP (XVECEXP (parm_rtx
, 0, n
), 0);
1149 && overlaps_hard_reg_set_p (call_saved_regset
,
1150 GET_MODE (x
), REGNO (x
)))
1158 /* Decide whether we can make a sibling call to a function. DECL is the
1159 declaration of the function being targeted by the call and EXP is the
1160 CALL_EXPR representing the call. */
1163 c6x_function_ok_for_sibcall (tree decl
, tree exp
)
1165 /* Registers A10, A12, B10 and B12 are available as arguments
1166 register but unfortunately caller saved. This makes functions
1167 needing these registers for arguments not suitable for
1169 if (c6x_call_saved_register_used (exp
))
1177 /* When compiling for DSBT, the calling function must be local,
1178 so that when we reload B14 in the sibcall epilogue, it will
1179 not change its value. */
1180 struct cgraph_local_info
*this_func
;
1183 /* Not enough information. */
1186 this_func
= cgraph_node::local_info (current_function_decl
);
1187 return this_func
->local
;
1193 /* Return true if DECL is known to be linked into section SECTION. */
1196 c6x_function_in_section_p (tree decl
, section
*section
)
1198 /* We can only be certain about functions defined in the same
1199 compilation unit. */
1200 if (!TREE_STATIC (decl
))
1203 /* Make sure that SYMBOL always binds to the definition in this
1204 compilation unit. */
1205 if (!targetm
.binds_local_p (decl
))
1208 /* If DECL_SECTION_NAME is set, assume it is trustworthy. */
1209 if (!DECL_SECTION_NAME (decl
))
1211 /* Make sure that we will not create a unique section for DECL. */
1212 if (flag_function_sections
|| DECL_COMDAT_GROUP (decl
))
1216 return function_section (decl
) == section
;
1219 /* Return true if a call to OP, which is a SYMBOL_REF, must be expanded
1222 c6x_long_call_p (rtx op
)
1226 if (!TARGET_LONG_CALLS
)
1229 decl
= SYMBOL_REF_DECL (op
);
1231 /* Try to determine whether the symbol is in the same section as the current
1232 function. Be conservative, and only cater for cases in which the
1233 whole of the current function is placed in the same section. */
1234 if (decl
!= NULL_TREE
1235 && !flag_reorder_blocks_and_partition
1236 && TREE_CODE (decl
) == FUNCTION_DECL
1237 && c6x_function_in_section_p (decl
, current_function_section ()))
1243 /* Emit the sequence for a call. */
1245 c6x_expand_call (rtx retval
, rtx address
, bool sibcall
)
1247 rtx callee
= XEXP (address
, 0);
1250 if (!c6x_call_operand (callee
, Pmode
))
1252 callee
= force_reg (Pmode
, callee
);
1253 address
= change_address (address
, Pmode
, callee
);
1255 call_insn
= gen_rtx_CALL (VOIDmode
, address
, const0_rtx
);
1258 call_insn
= emit_call_insn (call_insn
);
1259 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
),
1260 gen_rtx_REG (Pmode
, REG_B3
));
1264 if (retval
== NULL_RTX
)
1265 call_insn
= emit_call_insn (call_insn
);
1267 call_insn
= emit_call_insn (gen_rtx_SET (retval
, call_insn
));
1270 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
), pic_offset_table_rtx
);
1273 /* Legitimize PIC addresses. If the address is already position-independent,
1274 we return ORIG. Newly generated position-independent addresses go into a
1275 reg. This is REG if nonzero, otherwise we allocate register(s) as
1276 necessary. PICREG is the register holding the pointer to the PIC offset
1280 legitimize_pic_address (rtx orig
, rtx reg
, rtx picreg
)
1285 if (GET_CODE (addr
) == SYMBOL_REF
|| GET_CODE (addr
) == LABEL_REF
)
1287 int unspec
= UNSPEC_LOAD_GOT
;
1292 gcc_assert (can_create_pseudo_p ());
1293 reg
= gen_reg_rtx (Pmode
);
1297 if (can_create_pseudo_p ())
1298 tmp
= gen_reg_rtx (Pmode
);
1301 emit_insn (gen_movsi_gotoff_high (tmp
, addr
));
1302 emit_insn (gen_movsi_gotoff_lo_sum (tmp
, tmp
, addr
));
1303 emit_insn (gen_load_got_gotoff (reg
, picreg
, tmp
));
1307 tmp
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), unspec
);
1308 new_rtx
= gen_const_mem (Pmode
, gen_rtx_PLUS (Pmode
, picreg
, tmp
));
1310 emit_move_insn (reg
, new_rtx
);
1312 if (picreg
== pic_offset_table_rtx
)
1313 crtl
->uses_pic_offset_table
= 1;
1317 else if (GET_CODE (addr
) == CONST
|| GET_CODE (addr
) == PLUS
)
1321 if (GET_CODE (addr
) == CONST
)
1323 addr
= XEXP (addr
, 0);
1324 gcc_assert (GET_CODE (addr
) == PLUS
);
1327 if (XEXP (addr
, 0) == picreg
)
1332 gcc_assert (can_create_pseudo_p ());
1333 reg
= gen_reg_rtx (Pmode
);
1336 base
= legitimize_pic_address (XEXP (addr
, 0), reg
, picreg
);
1337 addr
= legitimize_pic_address (XEXP (addr
, 1),
1338 base
== reg
? NULL_RTX
: reg
,
1341 if (GET_CODE (addr
) == CONST_INT
)
1343 gcc_assert (! reload_in_progress
&& ! reload_completed
);
1344 addr
= force_reg (Pmode
, addr
);
1347 if (GET_CODE (addr
) == PLUS
&& CONSTANT_P (XEXP (addr
, 1)))
1349 base
= gen_rtx_PLUS (Pmode
, base
, XEXP (addr
, 0));
1350 addr
= XEXP (addr
, 1);
1353 return gen_rtx_PLUS (Pmode
, base
, addr
);
1359 /* Expand a move operation in mode MODE. The operands are in OPERANDS.
1360 Returns true if no further code must be generated, false if the caller
1361 should generate an insn to move OPERANDS[1] to OPERANDS[0]. */
1364 expand_move (rtx
*operands
, machine_mode mode
)
1366 rtx dest
= operands
[0];
1367 rtx op
= operands
[1];
1369 if ((reload_in_progress
| reload_completed
) == 0
1370 && GET_CODE (dest
) == MEM
&& GET_CODE (op
) != REG
)
1371 operands
[1] = force_reg (mode
, op
);
1372 else if (mode
== SImode
&& symbolic_operand (op
, SImode
))
1376 if (sdata_symbolic_operand (op
, SImode
))
1378 emit_insn (gen_load_sdata_pic (dest
, pic_offset_table_rtx
, op
));
1379 crtl
->uses_pic_offset_table
= 1;
1384 rtx temp
= (reload_completed
|| reload_in_progress
1385 ? dest
: gen_reg_rtx (Pmode
));
1387 operands
[1] = legitimize_pic_address (op
, temp
,
1388 pic_offset_table_rtx
);
1391 else if (reload_completed
1392 && !sdata_symbolic_operand (op
, SImode
))
1394 emit_insn (gen_movsi_high (dest
, op
));
1395 emit_insn (gen_movsi_lo_sum (dest
, dest
, op
));
1402 /* This function is called when we're about to expand an integer compare
1403 operation which performs COMPARISON. It examines the second operand,
1404 and if it is an integer constant that cannot be used directly on the
1405 current machine in a comparison insn, it returns true. */
1407 c6x_force_op_for_comparison_p (enum rtx_code code
, rtx op
)
1409 if (!CONST_INT_P (op
) || satisfies_constraint_Iu4 (op
))
1412 if ((code
== EQ
|| code
== LT
|| code
== GT
)
1413 && !satisfies_constraint_Is5 (op
))
1415 if ((code
== GTU
|| code
== LTU
)
1416 && (!TARGET_INSNS_64
|| !satisfies_constraint_Iu5 (op
)))
1422 /* Emit comparison instruction if necessary, returning the expression
1423 that holds the compare result in the proper mode. Return the comparison
1424 that should be used in the jump insn. */
1427 c6x_expand_compare (rtx comparison
, machine_mode mode
)
1429 enum rtx_code code
= GET_CODE (comparison
);
1430 rtx op0
= XEXP (comparison
, 0);
1431 rtx op1
= XEXP (comparison
, 1);
1433 enum rtx_code jump_code
= code
;
1434 machine_mode op_mode
= GET_MODE (op0
);
1436 if (op_mode
== DImode
&& (code
== NE
|| code
== EQ
) && op1
== const0_rtx
)
1438 rtx t
= gen_reg_rtx (SImode
);
1439 emit_insn (gen_iorsi3 (t
, gen_lowpart (SImode
, op0
),
1440 gen_highpart (SImode
, op0
)));
1444 else if (op_mode
== DImode
)
1449 if (code
== NE
|| code
== GEU
|| code
== LEU
|| code
== GE
|| code
== LE
)
1451 code
= reverse_condition (code
);
1457 split_di (&op0
, 1, lo
, high
);
1458 split_di (&op1
, 1, lo
+ 1, high
+ 1);
1460 if (c6x_force_op_for_comparison_p (code
, high
[1])
1461 || c6x_force_op_for_comparison_p (EQ
, high
[1]))
1462 high
[1] = force_reg (SImode
, high
[1]);
1464 cmp1
= gen_reg_rtx (SImode
);
1465 cmp2
= gen_reg_rtx (SImode
);
1466 emit_insn (gen_rtx_SET (cmp1
, gen_rtx_fmt_ee (code
, SImode
,
1467 high
[0], high
[1])));
1470 if (c6x_force_op_for_comparison_p (code
, lo
[1]))
1471 lo
[1] = force_reg (SImode
, lo
[1]);
1472 emit_insn (gen_rtx_SET (cmp2
, gen_rtx_fmt_ee (code
, SImode
,
1474 emit_insn (gen_andsi3 (cmp1
, cmp1
, cmp2
));
1478 emit_insn (gen_rtx_SET (cmp2
, gen_rtx_EQ (SImode
, high
[0],
1482 else if (code
== LT
)
1484 if (c6x_force_op_for_comparison_p (code
, lo
[1]))
1485 lo
[1] = force_reg (SImode
, lo
[1]);
1486 emit_insn (gen_cmpsi_and (cmp2
, gen_rtx_fmt_ee (code
, SImode
,
1488 lo
[0], lo
[1], cmp2
));
1489 emit_insn (gen_iorsi3 (cmp1
, cmp1
, cmp2
));
1493 else if (TARGET_FP
&& !flag_finite_math_only
1494 && (op_mode
== DFmode
|| op_mode
== SFmode
)
1495 && code
!= EQ
&& code
!= NE
&& code
!= LT
&& code
!= GT
1496 && code
!= UNLE
&& code
!= UNGE
)
1498 enum rtx_code code1
, code2
, code3
;
1499 rtx (*fn
) (rtx
, rtx
, rtx
, rtx
, rtx
);
1511 code1
= code
== LE
|| code
== UNGT
? LT
: GT
;
1536 cmp
= gen_reg_rtx (SImode
);
1537 emit_insn (gen_rtx_SET (cmp
, gen_rtx_fmt_ee (code1
, SImode
, op0
, op1
)));
1538 fn
= op_mode
== DFmode
? gen_cmpdf_ior
: gen_cmpsf_ior
;
1539 emit_insn (fn (cmp
, gen_rtx_fmt_ee (code2
, SImode
, op0
, op1
),
1541 if (code3
!= UNKNOWN
)
1542 emit_insn (fn (cmp
, gen_rtx_fmt_ee (code3
, SImode
, op0
, op1
),
1545 else if (op_mode
== SImode
&& (code
== NE
|| code
== EQ
) && op1
== const0_rtx
)
1550 is_fp_libfunc
= !TARGET_FP
&& (op_mode
== DFmode
|| op_mode
== SFmode
);
1552 if ((code
== NE
|| code
== GEU
|| code
== LEU
|| code
== GE
|| code
== LE
)
1555 code
= reverse_condition (code
);
1558 else if (code
== UNGE
)
1563 else if (code
== UNLE
)
1578 libfunc
= op_mode
== DFmode
? eqdf_libfunc
: eqsf_libfunc
;
1581 libfunc
= op_mode
== DFmode
? nedf_libfunc
: nesf_libfunc
;
1584 libfunc
= op_mode
== DFmode
? gtdf_libfunc
: gtsf_libfunc
;
1587 libfunc
= op_mode
== DFmode
? gedf_libfunc
: gesf_libfunc
;
1590 libfunc
= op_mode
== DFmode
? ltdf_libfunc
: ltsf_libfunc
;
1593 libfunc
= op_mode
== DFmode
? ledf_libfunc
: lesf_libfunc
;
1600 cmp
= emit_library_call_value (libfunc
, 0, LCT_CONST
, SImode
,
1601 op0
, op_mode
, op1
, op_mode
);
1602 insns
= get_insns ();
1605 emit_libcall_block (insns
, cmp
, cmp
,
1606 gen_rtx_fmt_ee (code
, SImode
, op0
, op1
));
1610 cmp
= gen_reg_rtx (SImode
);
1611 if (c6x_force_op_for_comparison_p (code
, op1
))
1612 op1
= force_reg (SImode
, op1
);
1613 emit_insn (gen_rtx_SET (cmp
, gen_rtx_fmt_ee (code
, SImode
,
1618 return gen_rtx_fmt_ee (jump_code
, mode
, cmp
, const0_rtx
);
1621 /* Return one word of double-word value OP. HIGH_P is true to select the
1622 high part, false to select the low part. When encountering auto-increment
1623 addressing, we make the assumption that the low part is going to be accessed
1627 c6x_subword (rtx op
, bool high_p
)
1632 mode
= GET_MODE (op
);
1633 if (mode
== VOIDmode
)
1636 if (TARGET_BIG_ENDIAN
? !high_p
: high_p
)
1637 byte
= UNITS_PER_WORD
;
1643 rtx addr
= XEXP (op
, 0);
1644 if (GET_CODE (addr
) == PLUS
|| REG_P (addr
))
1645 return adjust_address (op
, word_mode
, byte
);
1646 /* FIXME: should really support autoincrement addressing for
1647 multi-word modes. */
1651 return simplify_gen_subreg (word_mode
, op
, mode
, byte
);
1654 /* Split one or more DImode RTL references into pairs of SImode
1655 references. The RTL can be REG, offsettable MEM, integer constant, or
1656 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
1657 split and "num" is its length. lo_half and hi_half are output arrays
1658 that parallel "operands". */
1661 split_di (rtx operands
[], int num
, rtx lo_half
[], rtx hi_half
[])
1665 rtx op
= operands
[num
];
1667 lo_half
[num
] = c6x_subword (op
, false);
1668 hi_half
[num
] = c6x_subword (op
, true);
1672 /* Return true if VAL is a mask valid for a clr instruction. */
1674 c6x_valid_mask_p (HOST_WIDE_INT val
)
1677 for (i
= 0; i
< 32; i
++)
1678 if (!(val
& ((unsigned HOST_WIDE_INT
)1 << i
)))
1681 if (val
& ((unsigned HOST_WIDE_INT
)1 << i
))
1684 if (!(val
& ((unsigned HOST_WIDE_INT
)1 << i
)))
1689 /* Expand a block move for a cpymemM pattern. */
1692 c6x_expand_cpymem (rtx dst
, rtx src
, rtx count_exp
, rtx align_exp
,
1693 rtx expected_align_exp ATTRIBUTE_UNUSED
,
1694 rtx expected_size_exp ATTRIBUTE_UNUSED
)
1696 unsigned HOST_WIDE_INT align
= 1;
1697 unsigned HOST_WIDE_INT src_mem_align
, dst_mem_align
, min_mem_align
;
1698 unsigned HOST_WIDE_INT count
= 0, offset
= 0;
1699 unsigned int biggest_move
= TARGET_STDW
? 8 : 4;
1701 if (CONST_INT_P (align_exp
))
1702 align
= INTVAL (align_exp
);
1704 src_mem_align
= MEM_ALIGN (src
) / BITS_PER_UNIT
;
1705 dst_mem_align
= MEM_ALIGN (dst
) / BITS_PER_UNIT
;
1706 min_mem_align
= MIN (src_mem_align
, dst_mem_align
);
1708 if (min_mem_align
> align
)
1709 align
= min_mem_align
/ BITS_PER_UNIT
;
1710 if (src_mem_align
< align
)
1711 src_mem_align
= align
;
1712 if (dst_mem_align
< align
)
1713 dst_mem_align
= align
;
1715 if (CONST_INT_P (count_exp
))
1716 count
= INTVAL (count_exp
);
1720 /* Make sure we don't need to care about overflow later on. */
1721 if (count
> ((unsigned HOST_WIDE_INT
) 1 << 30))
1724 if (count
>= 28 && (count
& 3) == 0 && align
>= 4)
1726 tree dst_expr
= MEM_EXPR (dst
);
1727 tree src_expr
= MEM_EXPR (src
);
1728 rtx fn
= TARGET_INSNS_64PLUS
? strasgi64p_libfunc
: strasgi_libfunc
;
1729 rtx srcreg
= force_reg (Pmode
, XEXP (src
, 0));
1730 rtx dstreg
= force_reg (Pmode
, XEXP (dst
, 0));
1733 mark_addressable (src_expr
);
1735 mark_addressable (dst_expr
);
1736 emit_library_call (fn
, LCT_NORMAL
, VOIDmode
,
1737 dstreg
, Pmode
, srcreg
, Pmode
, count_exp
, SImode
);
1741 if (biggest_move
> align
&& !TARGET_INSNS_64
)
1742 biggest_move
= align
;
1744 if (count
/ biggest_move
> 7)
1749 rtx reg
, reg_lowpart
;
1750 machine_mode srcmode
, dstmode
;
1751 unsigned HOST_WIDE_INT src_size
, dst_size
, src_left
;
1755 while (biggest_move
> count
)
1758 src_size
= dst_size
= biggest_move
;
1759 if (src_size
> src_mem_align
&& src_size
== 2)
1761 if (dst_size
> dst_mem_align
&& dst_size
== 2)
1764 if (dst_size
> src_size
)
1765 dst_size
= src_size
;
1767 srcmode
= int_mode_for_size (src_size
* BITS_PER_UNIT
, 0).require ();
1768 dstmode
= int_mode_for_size (dst_size
* BITS_PER_UNIT
, 0).require ();
1770 reg_lowpart
= reg
= gen_reg_rtx (srcmode
);
1773 reg
= gen_reg_rtx (SImode
);
1774 reg_lowpart
= gen_lowpart (srcmode
, reg
);
1777 srcmem
= adjust_address (copy_rtx (src
), srcmode
, offset
);
1779 if (src_size
> src_mem_align
)
1781 enum insn_code icode
= (srcmode
== SImode
? CODE_FOR_movmisalignsi
1782 : CODE_FOR_movmisaligndi
);
1783 emit_insn (GEN_FCN (icode
) (reg_lowpart
, srcmem
));
1786 emit_move_insn (reg_lowpart
, srcmem
);
1788 src_left
= src_size
;
1789 shift
= TARGET_BIG_ENDIAN
? (src_size
- dst_size
) * BITS_PER_UNIT
: 0;
1790 while (src_left
> 0)
1792 rtx dstreg
= reg_lowpart
;
1794 if (src_size
> dst_size
)
1797 int shift_amount
= shift
& (BITS_PER_WORD
- 1);
1799 srcword
= operand_subword_force (srcword
, src_left
>= 4 ? 0 : 4,
1801 if (shift_amount
> 0)
1803 dstreg
= gen_reg_rtx (SImode
);
1804 emit_insn (gen_lshrsi3 (dstreg
, srcword
,
1805 GEN_INT (shift_amount
)));
1809 dstreg
= gen_lowpart (dstmode
, dstreg
);
1812 dstmem
= adjust_address (copy_rtx (dst
), dstmode
, offset
);
1813 if (dst_size
> dst_mem_align
)
1815 enum insn_code icode
= (dstmode
== SImode
? CODE_FOR_movmisalignsi
1816 : CODE_FOR_movmisaligndi
);
1817 emit_insn (GEN_FCN (icode
) (dstmem
, dstreg
));
1820 emit_move_insn (dstmem
, dstreg
);
1822 if (TARGET_BIG_ENDIAN
)
1823 shift
-= dst_size
* BITS_PER_UNIT
;
1825 shift
+= dst_size
* BITS_PER_UNIT
;
1827 src_left
-= dst_size
;
1834 /* Subroutine of print_address_operand, print a single address offset OFF for
1835 a memory access of mode MEM_MODE, choosing between normal form and scaled
1836 form depending on the type of the insn. Misaligned memory references must
1837 use the scaled form. */
1840 print_address_offset (FILE *file
, rtx off
, machine_mode mem_mode
)
1844 if (c6x_current_insn
!= NULL_RTX
)
1846 pat
= PATTERN (c6x_current_insn
);
1847 if (GET_CODE (pat
) == COND_EXEC
)
1848 pat
= COND_EXEC_CODE (pat
);
1849 if (GET_CODE (pat
) == PARALLEL
)
1850 pat
= XVECEXP (pat
, 0, 0);
1852 if (GET_CODE (pat
) == SET
1853 && GET_CODE (SET_SRC (pat
)) == UNSPEC
1854 && XINT (SET_SRC (pat
), 1) == UNSPEC_MISALIGNED_ACCESS
)
1856 gcc_assert (CONST_INT_P (off
)
1857 && (INTVAL (off
) & (GET_MODE_SIZE (mem_mode
) - 1)) == 0);
1858 fprintf (file
, "[" HOST_WIDE_INT_PRINT_DEC
"]",
1859 INTVAL (off
) / GET_MODE_SIZE (mem_mode
));
1864 output_address (mem_mode
, off
);
1869 c6x_print_operand_punct_valid_p (unsigned char c
)
1871 return c
== '$' || c
== '.' || c
== '|';
1874 static void c6x_print_operand (FILE *, rtx
, int);
1876 /* Subroutine of c6x_print_operand; used to print a memory reference X to FILE. */
1879 c6x_print_address_operand (FILE *file
, rtx x
, machine_mode mem_mode
)
1882 switch (GET_CODE (x
))
1886 if (GET_CODE (x
) == POST_MODIFY
)
1887 output_address (mem_mode
, XEXP (x
, 0));
1888 off
= XEXP (XEXP (x
, 1), 1);
1889 if (XEXP (x
, 0) == stack_pointer_rtx
)
1891 if (GET_CODE (x
) == PRE_MODIFY
)
1892 gcc_assert (INTVAL (off
) > 0);
1894 gcc_assert (INTVAL (off
) < 0);
1896 if (CONST_INT_P (off
) && INTVAL (off
) < 0)
1898 fprintf (file
, "--");
1899 off
= GEN_INT (-INTVAL (off
));
1902 fprintf (file
, "++");
1903 if (GET_CODE (x
) == PRE_MODIFY
)
1904 output_address (mem_mode
, XEXP (x
, 0));
1905 print_address_offset (file
, off
, mem_mode
);
1910 if (CONST_INT_P (off
) && INTVAL (off
) < 0)
1912 fprintf (file
, "-");
1913 off
= GEN_INT (-INTVAL (off
));
1916 fprintf (file
, "+");
1917 output_address (mem_mode
, XEXP (x
, 0));
1918 print_address_offset (file
, off
, mem_mode
);
1922 gcc_assert (XEXP (x
, 0) != stack_pointer_rtx
);
1923 fprintf (file
, "--");
1924 output_address (mem_mode
, XEXP (x
, 0));
1925 fprintf (file
, "[1]");
1928 fprintf (file
, "++");
1929 output_address (mem_mode
, XEXP (x
, 0));
1930 fprintf (file
, "[1]");
1933 gcc_assert (XEXP (x
, 0) != stack_pointer_rtx
);
1934 output_address (mem_mode
, XEXP (x
, 0));
1935 fprintf (file
, "++[1]");
1938 output_address (mem_mode
, XEXP (x
, 0));
1939 fprintf (file
, "--[1]");
1945 gcc_assert (sdata_symbolic_operand (x
, Pmode
));
1946 fprintf (file
, "+B14(");
1947 output_addr_const (file
, x
);
1948 fprintf (file
, ")");
1952 switch (XINT (x
, 1))
1954 case UNSPEC_LOAD_GOT
:
1955 fputs ("$GOT(", file
);
1956 output_addr_const (file
, XVECEXP (x
, 0, 0));
1959 case UNSPEC_LOAD_SDATA
:
1960 output_addr_const (file
, XVECEXP (x
, 0, 0));
1968 gcc_assert (GET_CODE (x
) != MEM
);
1969 c6x_print_operand (file
, x
, 0);
1974 /* Return a single character, which is either 'l', 's', 'd' or 'm', which
1975 specifies the functional unit used by INSN. */
1978 c6x_get_unit_specifier (rtx_insn
*insn
)
1980 enum attr_units units
;
1982 if (insn_info
.exists ())
1984 int unit
= INSN_INFO_ENTRY (INSN_UID (insn
)).reservation
;
1985 return c6x_unit_names
[unit
][0];
1988 units
= get_attr_units (insn
);
2009 /* Prints the unit specifier field. */
2011 c6x_print_unit_specifier_field (FILE *file
, rtx_insn
*insn
)
2013 enum attr_units units
= get_attr_units (insn
);
2014 enum attr_cross cross
= get_attr_cross (insn
);
2015 enum attr_dest_regfile rf
= get_attr_dest_regfile (insn
);
2019 if (units
== UNITS_D_ADDR
)
2021 enum attr_addr_regfile arf
= get_attr_addr_regfile (insn
);
2023 gcc_assert (arf
!= ADDR_REGFILE_UNKNOWN
);
2024 half
= arf
== ADDR_REGFILE_A
? 1 : 2;
2025 t_half
= rf
== DEST_REGFILE_A
? 1 : 2;
2026 fprintf (file
, ".d%dt%d", half
, t_half
);
2030 if (insn_info
.exists ())
2032 int unit
= INSN_INFO_ENTRY (INSN_UID (insn
)).reservation
;
2034 fputs (c6x_unit_names
[unit
], file
);
2035 if (cross
== CROSS_Y
)
2040 gcc_assert (rf
!= DEST_REGFILE_UNKNOWN
);
2041 unitspec
= c6x_get_unit_specifier (insn
);
2042 half
= rf
== DEST_REGFILE_A
? 1 : 2;
2043 fprintf (file
, ".%c%d%s", unitspec
, half
, cross
== CROSS_Y
? "x" : "");
2046 /* Output assembly language output for the address ADDR to FILE. */
2048 c6x_print_operand_address (FILE *file
, machine_mode mode
, rtx addr
)
2050 c6x_print_address_operand (file
, addr
, mode
);
2053 /* Print an operand, X, to FILE, with an optional modifier in CODE.
2056 $ -- print the unit specifier field for the instruction.
2057 . -- print the predicate for the instruction or an emptry string for an
2059 | -- print "||" if the insn should be issued in parallel with the previous
2062 C -- print an opcode suffix for a reversed condition
2063 d -- H, W or D as a suffix for ADDA, based on the factor given by the
2065 D -- print either B, H, W or D as a suffix for ADDA, based on the size of
2067 J -- print a predicate
2068 j -- like J, but use reverse predicate
2069 k -- treat a CONST_INT as a register number and print it as a register
2070 k -- like k, but print out a doubleword register
2071 n -- print an integer operand, negated
2072 p -- print the low part of a DImode register
2073 P -- print the high part of a DImode register
2074 r -- print the absolute value of an integer operand, shifted right by 1
2075 R -- print the absolute value of an integer operand, shifted right by 2
2076 f -- the first clear bit in an integer operand assumed to be a mask for
2078 F -- the last clear bit in such a mask
2079 s -- the first set bit in an integer operand assumed to be a mask for
2081 S -- the last set bit in such a mask
2082 U -- print either 1 or 2, depending on the side of the machine used by
2086 c6x_print_operand (FILE *file
, rtx x
, int code
)
2095 if (GET_MODE (c6x_current_insn
) != TImode
)
2101 c6x_print_unit_specifier_field (file
, c6x_current_insn
);
2107 x
= current_insn_predicate
;
2110 unsigned int regno
= REGNO (XEXP (x
, 0));
2112 if (GET_CODE (x
) == EQ
)
2114 fputs (reg_names
[regno
], file
);
2120 mode
= GET_MODE (x
);
2127 enum rtx_code c
= GET_CODE (x
);
2129 c
= swap_condition (c
);
2130 fputs (GET_RTX_NAME (c
), file
);
2137 unsigned int regno
= REGNO (XEXP (x
, 0));
2138 if ((GET_CODE (x
) == EQ
) == (code
== 'J'))
2140 fputs (reg_names
[regno
], file
);
2145 gcc_assert (GET_CODE (x
) == CONST_INT
);
2147 fprintf (file
, "%s", reg_names
[v
]);
2150 gcc_assert (GET_CODE (x
) == CONST_INT
);
2152 gcc_assert ((v
& 1) == 0);
2153 fprintf (file
, "%s:%s", reg_names
[v
+ 1], reg_names
[v
]);
2160 gcc_assert (GET_CODE (x
) == CONST_INT
);
2162 for (i
= 0; i
< 32; i
++)
2164 HOST_WIDE_INT tst
= v
& 1;
2165 if (((code
== 'f' || code
== 'F') && !tst
)
2166 || ((code
== 's' || code
== 'S') && tst
))
2170 if (code
== 'f' || code
== 's')
2172 fprintf (file
, "%d", i
);
2177 HOST_WIDE_INT tst
= v
& 1;
2178 if ((code
== 'F' && tst
) || (code
== 'S' && !tst
))
2182 fprintf (file
, "%d", i
- 1);
2186 gcc_assert (GET_CODE (x
) == CONST_INT
);
2187 output_addr_const (file
, GEN_INT (-INTVAL (x
)));
2191 gcc_assert (GET_CODE (x
) == CONST_INT
);
2195 output_addr_const (file
, GEN_INT (v
>> 1));
2199 gcc_assert (GET_CODE (x
) == CONST_INT
);
2203 output_addr_const (file
, GEN_INT (v
>> 2));
2207 gcc_assert (GET_CODE (x
) == CONST_INT
);
2209 fputs (v
== 2 ? "h" : v
== 4 ? "w" : "d", file
);
2214 gcc_assert (GET_CODE (x
) == REG
);
2218 fputs (reg_names
[v
], file
);
2223 if (GET_CODE (x
) == CONST
)
2226 gcc_assert (GET_CODE (x
) == PLUS
);
2227 gcc_assert (GET_CODE (XEXP (x
, 1)) == CONST_INT
);
2228 v
= INTVAL (XEXP (x
, 1));
2232 gcc_assert (GET_CODE (x
) == SYMBOL_REF
);
2234 t
= SYMBOL_REF_DECL (x
);
2236 v
|= DECL_ALIGN_UNIT (t
);
2238 v
|= TYPE_ALIGN_UNIT (TREE_TYPE (t
));
2251 if (GET_CODE (x
) == PLUS
2252 || GET_RTX_CLASS (GET_CODE (x
)) == RTX_AUTOINC
)
2254 if (GET_CODE (x
) == CONST
|| GET_CODE (x
) == SYMBOL_REF
)
2256 gcc_assert (sdata_symbolic_operand (x
, Pmode
));
2261 gcc_assert (REG_P (x
));
2262 if (A_REGNO_P (REGNO (x
)))
2264 if (B_REGNO_P (REGNO (x
)))
2269 switch (GET_CODE (x
))
2272 if (GET_MODE_SIZE (mode
) == 8)
2273 fprintf (file
, "%s:%s", reg_names
[REGNO (x
) + 1],
2274 reg_names
[REGNO (x
)]);
2276 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
2281 gcc_assert (XEXP (x
, 0) != stack_pointer_rtx
);
2282 c6x_print_address_operand (file
, XEXP (x
, 0), GET_MODE (x
));
2287 output_addr_const (file
, x
);
2292 output_addr_const (file
, x
);
2296 output_operand_lossage ("invalid const_double operand");
2300 output_addr_const (file
, x
);
2305 /* Return TRUE if OP is a valid memory address with a base register of
2306 class C. If SMALL_OFFSET is true, we disallow memory references which would
2307 require a long offset with B14/B15. */
2310 c6x_mem_operand (rtx op
, enum reg_class c
, bool small_offset
)
2312 machine_mode mode
= GET_MODE (op
);
2313 rtx base
= XEXP (op
, 0);
2314 switch (GET_CODE (base
))
2320 && (XEXP (base
, 0) == stack_pointer_rtx
2321 || XEXP (base
, 0) == pic_offset_table_rtx
))
2323 if (!c6x_legitimate_address_p_1 (mode
, base
, true, true))
2334 base
= XEXP (base
, 0);
2340 gcc_assert (sdata_symbolic_operand (base
, Pmode
));
2341 return !small_offset
&& c
== B_REGS
;
2346 return TEST_HARD_REG_BIT (reg_class_contents
[ (int) (c
)], REGNO (base
));
2349 /* Returns true if X is a valid address for use in a memory reference
2350 of mode MODE. If STRICT is true, we do not allow pseudo registers
2351 in the address. NO_LARGE_OFFSET is true if we are examining an
2352 address for use in a load or store misaligned instruction, or
2353 recursively examining an operand inside a PRE/POST_MODIFY. */
2356 c6x_legitimate_address_p_1 (machine_mode mode
, rtx x
, bool strict
,
2357 bool no_large_offset
)
2361 enum rtx_code code
= GET_CODE (x
);
2367 /* We can't split these into word-sized pieces yet. */
2368 if (!TARGET_STDW
&& GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)
2370 if (GET_CODE (XEXP (x
, 1)) != PLUS
)
2372 if (!c6x_legitimate_address_p_1 (mode
, XEXP (x
, 1), strict
, true))
2374 if (!rtx_equal_p (XEXP (x
, 0), XEXP (XEXP (x
, 1), 0)))
2382 /* We can't split these into word-sized pieces yet. */
2383 if (!TARGET_STDW
&& GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)
2392 return REGNO_OK_FOR_BASE_STRICT_P (REGNO (x
));
2394 return REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x
));
2397 if (!REG_P (XEXP (x
, 0))
2398 || !c6x_legitimate_address_p_1 (mode
, XEXP (x
, 0), strict
, false))
2400 /* We cannot ensure currently that both registers end up in the
2401 same register file. */
2402 if (REG_P (XEXP (x
, 1)))
2405 if (mode
== BLKmode
)
2407 else if (mode
== VOIDmode
)
2408 /* ??? This can happen during ivopts. */
2411 size
= GET_MODE_SIZE (mode
);
2414 && GET_CODE (XEXP (x
, 1)) == UNSPEC
2415 && XINT (XEXP (x
, 1), 1) == UNSPEC_LOAD_SDATA
2416 && XEXP (x
, 0) == pic_offset_table_rtx
2417 && sdata_symbolic_operand (XVECEXP (XEXP (x
, 1), 0, 0), SImode
))
2418 return !no_large_offset
&& size
<= 4;
2421 && GET_CODE (XEXP (x
, 1)) == UNSPEC
2422 && XINT (XEXP (x
, 1), 1) == UNSPEC_LOAD_GOT
2423 && XEXP (x
, 0) == pic_offset_table_rtx
2424 && (GET_CODE (XVECEXP (XEXP (x
, 1), 0, 0)) == SYMBOL_REF
2425 || GET_CODE (XVECEXP (XEXP (x
, 1), 0, 0)) == LABEL_REF
))
2426 return !no_large_offset
;
2427 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
2430 off
= INTVAL (XEXP (x
, 1));
2432 /* If the machine does not have doubleword load/stores, we'll use
2433 word size accesses. */
2435 if (size
== 2 * UNITS_PER_WORD
&& !TARGET_STDW
)
2436 size
= UNITS_PER_WORD
;
2438 if (((HOST_WIDE_INT
)size1
- 1) & off
)
2441 if (off
> -32 && off
< (size1
== size
? 32 : 28))
2443 if (no_large_offset
|| code
!= PLUS
|| XEXP (x
, 0) != stack_pointer_rtx
2444 || size1
> UNITS_PER_WORD
)
2446 return off
>= 0 && off
< 32768;
2451 return (!no_large_offset
2452 /* With -fpic, we must wrap it in an unspec to show the B14
2455 && GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
2456 && sdata_symbolic_operand (x
, Pmode
));
2464 c6x_legitimate_address_p (machine_mode mode
, rtx x
, bool strict
)
2466 return c6x_legitimate_address_p_1 (mode
, x
, strict
, false);
2470 c6x_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED
,
2471 rtx x ATTRIBUTE_UNUSED
)
2476 /* Implements TARGET_PREFERRED_RENAME_CLASS. */
2478 c6x_preferred_rename_class (reg_class_t cl
)
2481 return NONPREDICATE_A_REGS
;
2483 return NONPREDICATE_B_REGS
;
2484 if (cl
== ALL_REGS
|| cl
== GENERAL_REGS
)
2485 return NONPREDICATE_REGS
;
2489 /* Implements FINAL_PRESCAN_INSN. */
2491 c6x_final_prescan_insn (rtx_insn
*insn
, rtx
*opvec ATTRIBUTE_UNUSED
,
2492 int noperands ATTRIBUTE_UNUSED
)
2494 c6x_current_insn
= insn
;
2497 /* A structure to describe the stack layout of a function. The layout is
2500 [saved frame pointer (or possibly padding0)]
2501 --> incoming stack pointer, new hard frame pointer
2502 [saved call-used regs]
2504 --> soft frame pointer
2506 [outgoing arguments]
2509 The structure members are laid out in this order. */
2514 /* Number of registers to save. */
2517 HOST_WIDE_INT frame
;
2518 int outgoing_arguments_size
;
2521 HOST_WIDE_INT to_allocate
;
2522 /* The offsets relative to the incoming stack pointer (which
2523 becomes HARD_FRAME_POINTER). */
2524 HOST_WIDE_INT frame_pointer_offset
;
2525 HOST_WIDE_INT b3_offset
;
2527 /* True if we should call push_rts/pop_rts to save and restore
2532 /* Return true if we need to save and modify the PIC register in the
2536 must_reload_pic_reg_p (void)
2538 struct cgraph_local_info
*i
= NULL
;
2543 i
= cgraph_node::local_info (current_function_decl
);
2545 if ((crtl
->uses_pic_offset_table
|| !crtl
->is_leaf
) && !i
->local
)
2550 /* Return 1 if we need to save REGNO. */
2552 c6x_save_reg (unsigned int regno
)
2554 return ((df_regs_ever_live_p (regno
)
2555 && !call_used_regs
[regno
]
2556 && !fixed_regs
[regno
])
2557 || (regno
== RETURN_ADDR_REGNO
2558 && (df_regs_ever_live_p (regno
)
2560 || (regno
== PIC_OFFSET_TABLE_REGNUM
&& must_reload_pic_reg_p ()));
2563 /* Examine the number of regs NREGS we've determined we must save.
2564 Return true if we should use __c6xabi_push_rts/__c6xabi_pop_rts for
2565 prologue and epilogue. */
2568 use_push_rts_p (int nregs
)
2570 if (TARGET_INSNS_64PLUS
&& optimize_function_for_size_p (cfun
)
2571 && !cfun
->machine
->contains_sibcall
2572 && !cfun
->returns_struct
2573 && !TARGET_LONG_CALLS
2574 && nregs
>= 6 && !frame_pointer_needed
)
2579 /* Return number of saved general prupose registers. */
2582 c6x_nsaved_regs (void)
2587 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
2588 if (c6x_save_reg (regno
))
2593 /* The safe debug order mandated by the ABI. */
2594 static unsigned reg_save_order
[] =
2596 REG_A10
, REG_A11
, REG_A12
, REG_A13
,
2598 REG_B10
, REG_B11
, REG_B12
, REG_B13
,
2602 #define N_SAVE_ORDER (sizeof reg_save_order / sizeof *reg_save_order)
2604 /* Compute the layout of the stack frame and store it in FRAME. */
2607 c6x_compute_frame_layout (struct c6x_frame
*frame
)
2609 HOST_WIDE_INT size
= get_frame_size ();
2610 HOST_WIDE_INT offset
;
2613 /* We use the four bytes which are technically inside the caller's frame,
2614 usually to save the frame pointer. */
2616 frame
->padding0
= 0;
2617 nregs
= c6x_nsaved_regs ();
2618 frame
->push_rts
= false;
2619 frame
->b3_offset
= 0;
2620 if (use_push_rts_p (nregs
))
2622 frame
->push_rts
= true;
2623 frame
->b3_offset
= (TARGET_BIG_ENDIAN
? -12 : -13) * 4;
2626 else if (c6x_save_reg (REG_B3
))
2629 for (idx
= N_SAVE_ORDER
- 1; reg_save_order
[idx
] != REG_B3
; idx
--)
2631 if (c6x_save_reg (reg_save_order
[idx
]))
2632 frame
->b3_offset
-= 4;
2635 frame
->nregs
= nregs
;
2637 if (size
== 0 && nregs
== 0)
2639 frame
->padding0
= 4;
2640 frame
->padding1
= frame
->padding2
= 0;
2641 frame
->frame_pointer_offset
= frame
->to_allocate
= 0;
2642 frame
->outgoing_arguments_size
= 0;
2646 if (!frame
->push_rts
)
2647 offset
+= frame
->nregs
* 4;
2649 if (offset
== 0 && size
== 0 && crtl
->outgoing_args_size
== 0
2651 /* Don't use the bottom of the caller's frame if we have no
2652 allocation of our own and call other functions. */
2653 frame
->padding0
= frame
->padding1
= 4;
2654 else if (offset
& 4)
2655 frame
->padding1
= 4;
2657 frame
->padding1
= 0;
2659 offset
+= frame
->padding0
+ frame
->padding1
;
2660 frame
->frame_pointer_offset
= offset
;
2663 frame
->outgoing_arguments_size
= crtl
->outgoing_args_size
;
2664 offset
+= frame
->outgoing_arguments_size
;
2666 if ((offset
& 4) == 0)
2667 frame
->padding2
= 8;
2669 frame
->padding2
= 4;
2670 frame
->to_allocate
= offset
+ frame
->padding2
;
2673 /* Return the offset between two registers, one to be eliminated, and the other
2674 its replacement, at the start of a routine. */
2677 c6x_initial_elimination_offset (int from
, int to
)
2679 struct c6x_frame frame
;
2680 c6x_compute_frame_layout (&frame
);
2682 if (from
== ARG_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
2684 else if (from
== FRAME_POINTER_REGNUM
2685 && to
== HARD_FRAME_POINTER_REGNUM
)
2686 return -frame
.frame_pointer_offset
;
2689 gcc_assert (to
== STACK_POINTER_REGNUM
);
2691 if (from
== ARG_POINTER_REGNUM
)
2692 return frame
.to_allocate
+ (frame
.push_rts
? 56 : 0);
2694 gcc_assert (from
== FRAME_POINTER_REGNUM
);
2695 return frame
.to_allocate
- frame
.frame_pointer_offset
;
2699 /* Given FROM and TO register numbers, say whether this elimination is
2700 allowed. Frame pointer elimination is automatically handled. */
2703 c6x_can_eliminate (const int from ATTRIBUTE_UNUSED
, const int to
)
2705 if (to
== STACK_POINTER_REGNUM
)
2706 return !frame_pointer_needed
;
2710 /* Emit insns to increment the stack pointer by OFFSET. If
2711 FRAME_RELATED_P, set the RTX_FRAME_RELATED_P flag on the insns.
2712 Does nothing if the offset is zero. */
2715 emit_add_sp_const (HOST_WIDE_INT offset
, bool frame_related_p
)
2717 rtx to_add
= GEN_INT (offset
);
2718 rtx orig_to_add
= to_add
;
2724 if (offset
< -32768 || offset
> 32767)
2726 rtx reg
= gen_rtx_REG (SImode
, REG_A0
);
2727 rtx low
= GEN_INT (trunc_int_for_mode (offset
, HImode
));
2729 insn
= emit_insn (gen_movsi_high (reg
, low
));
2730 if (frame_related_p
)
2731 RTX_FRAME_RELATED_P (insn
) = 1;
2732 insn
= emit_insn (gen_movsi_lo_sum (reg
, reg
, to_add
));
2733 if (frame_related_p
)
2734 RTX_FRAME_RELATED_P (insn
) = 1;
2737 insn
= emit_insn (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
,
2739 if (frame_related_p
)
2742 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
2743 gen_rtx_SET (stack_pointer_rtx
,
2744 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
2747 RTX_FRAME_RELATED_P (insn
) = 1;
2751 /* Prologue and epilogue. */
2753 c6x_expand_prologue (void)
2755 struct c6x_frame frame
;
2759 HOST_WIDE_INT initial_offset
, off
, added_already
;
2761 c6x_compute_frame_layout (&frame
);
2763 if (flag_stack_usage_info
)
2764 current_function_static_stack_size
= frame
.to_allocate
;
2766 initial_offset
= -frame
.to_allocate
;
2769 emit_insn (gen_push_rts ());
2770 nsaved
= frame
.nregs
;
2773 /* If the offsets would be too large for the memory references we will
2774 create to save registers, do the stack allocation in two parts.
2775 Ensure by subtracting 8 that we don't store to the word pointed to
2776 by the stack pointer. */
2777 if (initial_offset
< -32768)
2778 initial_offset
= -frame
.frame_pointer_offset
- 8;
2780 if (frame
.to_allocate
> 0)
2781 gcc_assert (initial_offset
!= 0);
2783 off
= -initial_offset
+ 4 - frame
.padding0
;
2785 mem
= gen_frame_mem (Pmode
, stack_pointer_rtx
);
2788 if (frame_pointer_needed
)
2790 rtx fp_reg
= gen_rtx_REG (SImode
, REG_A15
);
2791 /* We go through some contortions here to both follow the ABI's
2792 recommendation that FP == incoming SP, and to avoid writing or
2793 reading the word pointed to by the stack pointer. */
2794 rtx addr
= gen_rtx_POST_MODIFY (Pmode
, stack_pointer_rtx
,
2795 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
2797 insn
= emit_move_insn (gen_frame_mem (Pmode
, addr
), fp_reg
);
2798 RTX_FRAME_RELATED_P (insn
) = 1;
2800 insn
= emit_insn (gen_addsi3 (hard_frame_pointer_rtx
, stack_pointer_rtx
,
2802 RTX_FRAME_RELATED_P (insn
) = 1;
2807 emit_add_sp_const (initial_offset
- added_already
, true);
2809 if (nsaved
< frame
.nregs
)
2813 for (i
= 0; i
< N_SAVE_ORDER
; i
++)
2815 int idx
= N_SAVE_ORDER
- i
- 1;
2816 unsigned regno
= reg_save_order
[idx
];
2818 machine_mode save_mode
= SImode
;
2820 if (regno
== REG_A15
&& frame_pointer_needed
)
2821 /* Already saved. */
2823 if (!c6x_save_reg (regno
))
2826 if (TARGET_STDW
&& (off
& 4) == 0 && off
<= 256
2828 && i
+ 1 < N_SAVE_ORDER
2829 && reg_save_order
[idx
- 1] == regno
- 1
2830 && c6x_save_reg (regno
- 1))
2836 reg
= gen_rtx_REG (save_mode
, regno
);
2837 off
-= GET_MODE_SIZE (save_mode
);
2839 insn
= emit_move_insn (adjust_address (mem
, save_mode
, off
),
2841 RTX_FRAME_RELATED_P (insn
) = 1;
2843 nsaved
+= hard_regno_nregs (regno
, save_mode
);
2846 gcc_assert (nsaved
== frame
.nregs
);
2847 emit_add_sp_const (-frame
.to_allocate
- initial_offset
, true);
2848 if (must_reload_pic_reg_p ())
2850 if (dsbt_decl
== NULL
)
2854 t
= build_index_type (integer_one_node
);
2855 t
= build_array_type (integer_type_node
, t
);
2856 t
= build_decl (BUILTINS_LOCATION
, VAR_DECL
,
2857 get_identifier ("__c6xabi_DSBT_BASE"), t
);
2858 DECL_ARTIFICIAL (t
) = 1;
2859 DECL_IGNORED_P (t
) = 1;
2860 DECL_EXTERNAL (t
) = 1;
2861 TREE_STATIC (t
) = 1;
2862 TREE_PUBLIC (t
) = 1;
2867 emit_insn (gen_setup_dsbt (pic_offset_table_rtx
,
2868 XEXP (DECL_RTL (dsbt_decl
), 0)));
2873 c6x_expand_epilogue (bool sibcall
)
2876 struct c6x_frame frame
;
2881 c6x_compute_frame_layout (&frame
);
2883 mem
= gen_frame_mem (Pmode
, stack_pointer_rtx
);
2885 /* Insert a dummy set/use of the stack pointer. This creates a
2886 scheduler barrier between the prologue saves and epilogue restores. */
2887 emit_insn (gen_epilogue_barrier (stack_pointer_rtx
, stack_pointer_rtx
));
2889 /* If the offsets would be too large for the memory references we will
2890 create to restore registers, do a preliminary stack adjustment here. */
2891 off
= frame
.to_allocate
- frame
.frame_pointer_offset
+ frame
.padding1
;
2894 nsaved
= frame
.nregs
;
2898 if (frame
.to_allocate
> 32768)
2900 /* Don't add the entire offset so that we leave an unused word
2901 above the stack pointer. */
2902 emit_add_sp_const ((off
- 16) & ~7, false);
2906 for (i
= 0; i
< N_SAVE_ORDER
; i
++)
2908 unsigned regno
= reg_save_order
[i
];
2910 machine_mode save_mode
= SImode
;
2912 if (!c6x_save_reg (regno
))
2914 if (regno
== REG_A15
&& frame_pointer_needed
)
2917 if (TARGET_STDW
&& (off
& 4) == 0 && off
< 256
2919 && i
+ 1 < N_SAVE_ORDER
2920 && reg_save_order
[i
+ 1] == regno
+ 1
2921 && c6x_save_reg (regno
+ 1))
2926 reg
= gen_rtx_REG (save_mode
, regno
);
2928 emit_move_insn (reg
, adjust_address (mem
, save_mode
, off
));
2930 off
+= GET_MODE_SIZE (save_mode
);
2931 nsaved
+= hard_regno_nregs (regno
, save_mode
);
2934 if (!frame_pointer_needed
)
2935 emit_add_sp_const (off
+ frame
.padding0
- 4, false);
2938 rtx fp_reg
= gen_rtx_REG (SImode
, REG_A15
);
2939 rtx addr
= gen_rtx_PRE_MODIFY (Pmode
, stack_pointer_rtx
,
2940 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
2942 emit_insn (gen_addsi3 (stack_pointer_rtx
, hard_frame_pointer_rtx
,
2944 emit_move_insn (fp_reg
, gen_frame_mem (Pmode
, addr
));
2947 gcc_assert (nsaved
== frame
.nregs
);
2951 emit_jump_insn (gen_pop_rts ());
2953 emit_jump_insn (gen_return_internal (gen_rtx_REG (SImode
,
2954 RETURN_ADDR_REGNO
)));
2958 /* Return the value of the return address for the frame COUNT steps up
2959 from the current frame, after the prologue.
2960 We punt for everything but the current frame by returning const0_rtx. */
2963 c6x_return_addr_rtx (int count
)
2968 return get_hard_reg_initial_val (Pmode
, RETURN_ADDR_REGNO
);
2971 /* Return true iff TYPE is one of the shadow types. */
2973 shadow_type_p (enum attr_type type
)
2975 return (type
== TYPE_SHADOW
|| type
== TYPE_LOAD_SHADOW
2976 || type
== TYPE_MULT_SHADOW
);
2979 /* Return true iff INSN is a shadow pattern. */
2981 shadow_p (rtx_insn
*insn
)
2983 if (!NONDEBUG_INSN_P (insn
) || recog_memoized (insn
) < 0)
2985 return shadow_type_p (get_attr_type (insn
));
2988 /* Return true iff INSN is a shadow or blockage pattern. */
2990 shadow_or_blockage_p (rtx_insn
*insn
)
2992 enum attr_type type
;
2993 if (!NONDEBUG_INSN_P (insn
) || recog_memoized (insn
) < 0)
2995 type
= get_attr_type (insn
);
2996 return shadow_type_p (type
) || type
== TYPE_BLOCKAGE
;
2999 /* Translate UNITS into a bitmask of units we can reserve for this
3002 get_reservation_flags (enum attr_units units
)
3008 return RESERVATION_FLAG_D
;
3010 return RESERVATION_FLAG_L
;
3012 return RESERVATION_FLAG_S
;
3014 return RESERVATION_FLAG_M
;
3016 return RESERVATION_FLAG_LS
;
3018 return RESERVATION_FLAG_DL
;
3020 return RESERVATION_FLAG_DS
;
3022 return RESERVATION_FLAG_DLS
;
3028 /* Compute the side of the machine used by INSN, which reserves UNITS.
3029 This must match the reservations in the scheduling description. */
3031 get_insn_side (rtx_insn
*insn
, enum attr_units units
)
3033 if (units
== UNITS_D_ADDR
)
3034 return (get_attr_addr_regfile (insn
) == ADDR_REGFILE_A
? 0 : 1);
3037 enum attr_dest_regfile rf
= get_attr_dest_regfile (insn
);
3038 if (rf
== DEST_REGFILE_ANY
)
3039 return get_attr_type (insn
) == TYPE_BRANCH
? 0 : 1;
3041 return rf
== DEST_REGFILE_A
? 0 : 1;
3045 /* After scheduling, walk the insns between HEAD and END and assign unit
3048 assign_reservations (rtx_insn
*head
, rtx_insn
*end
)
3051 for (insn
= head
; insn
!= NEXT_INSN (end
); insn
= NEXT_INSN (insn
))
3053 unsigned int sched_mask
, reserved
;
3054 rtx_insn
*within
, *last
;
3057 int rsrv_count
[2][4];
3060 if (GET_MODE (insn
) != TImode
)
3065 /* Find the last insn in the packet. It has a state recorded for it,
3066 which we can use to determine the units we should be using. */
3068 (within
!= NEXT_INSN (end
)
3069 && (within
== insn
|| GET_MODE (within
) != TImode
));
3070 within
= NEXT_INSN (within
))
3073 if (!NONDEBUG_INSN_P (within
))
3075 icode
= recog_memoized (within
);
3078 if (shadow_p (within
))
3080 if (INSN_INFO_ENTRY (INSN_UID (within
)).reservation
!= 0)
3081 reserved
|= 1 << INSN_INFO_ENTRY (INSN_UID (within
)).reservation
;
3084 if (last
== NULL_RTX
)
3087 sched_mask
= INSN_INFO_ENTRY (INSN_UID (last
)).unit_mask
;
3088 sched_mask
&= ~reserved
;
3090 memset (rsrv_count
, 0, sizeof rsrv_count
);
3091 rsrv
[0] = rsrv
[1] = ~0;
3092 for (i
= 0; i
< 8; i
++)
3096 unsigned unit_bit
= 1 << (unit
+ side
* UNIT_QID_SIDE_OFFSET
);
3097 /* Clear the bits which we expect to reserve in the following loop,
3098 leaving the ones set which aren't present in the scheduler's
3099 state and shouldn't be reserved. */
3100 if (sched_mask
& unit_bit
)
3101 rsrv
[i
/ 4] &= ~(1 << unit
);
3104 /* Walk through the insns that occur in the same cycle. We use multiple
3105 passes to assign units, assigning for insns with the most specific
3106 requirements first. */
3107 for (pass
= 0; pass
< 4; pass
++)
3109 (within
!= NEXT_INSN (end
)
3110 && (within
== insn
|| GET_MODE (within
) != TImode
));
3111 within
= NEXT_INSN (within
))
3113 int uid
= INSN_UID (within
);
3114 int this_rsrv
, side
;
3116 enum attr_units units
;
3117 enum attr_type type
;
3120 if (!NONDEBUG_INSN_P (within
))
3122 icode
= recog_memoized (within
);
3125 if (INSN_INFO_ENTRY (uid
).reservation
!= 0)
3127 units
= get_attr_units (within
);
3128 type
= get_attr_type (within
);
3129 this_rsrv
= get_reservation_flags (units
);
3132 side
= get_insn_side (within
, units
);
3134 /* Certain floating point instructions are treated specially. If
3135 an insn can choose between units it can reserve, and its
3136 reservation spans more than one cycle, the reservation contains
3137 special markers in the first cycle to help us reconstruct what
3138 the automaton chose. */
3139 if ((type
== TYPE_ADDDP
|| type
== TYPE_FP4
)
3140 && units
== UNITS_LS
)
3142 int test1_code
= ((type
== TYPE_FP4
? UNIT_QID_FPL1
: UNIT_QID_ADDDPL1
)
3143 + side
* UNIT_QID_SIDE_OFFSET
);
3144 int test2_code
= ((type
== TYPE_FP4
? UNIT_QID_FPS1
: UNIT_QID_ADDDPS1
)
3145 + side
* UNIT_QID_SIDE_OFFSET
);
3146 if ((sched_mask
& (1 << test1_code
)) != 0)
3148 this_rsrv
= RESERVATION_FLAG_L
;
3149 sched_mask
&= ~(1 << test1_code
);
3151 else if ((sched_mask
& (1 << test2_code
)) != 0)
3153 this_rsrv
= RESERVATION_FLAG_S
;
3154 sched_mask
&= ~(1 << test2_code
);
3158 if ((this_rsrv
& (this_rsrv
- 1)) == 0)
3160 int t
= exact_log2 (this_rsrv
) + side
* UNIT_QID_SIDE_OFFSET
;
3161 rsrv
[side
] |= this_rsrv
;
3162 INSN_INFO_ENTRY (uid
).reservation
= t
;
3168 for (j
= 0; j
< 4; j
++)
3169 if (this_rsrv
& (1 << j
))
3170 rsrv_count
[side
][j
]++;
3173 if ((pass
== 2 && this_rsrv
!= RESERVATION_FLAG_DLS
)
3174 || (pass
== 3 && this_rsrv
== RESERVATION_FLAG_DLS
))
3176 int best
= -1, best_cost
= INT_MAX
;
3177 for (j
= 0; j
< 4; j
++)
3178 if ((this_rsrv
& (1 << j
))
3179 && !(rsrv
[side
] & (1 << j
))
3180 && rsrv_count
[side
][j
] < best_cost
)
3182 best_cost
= rsrv_count
[side
][j
];
3185 gcc_assert (best
!= -1);
3186 rsrv
[side
] |= 1 << best
;
3187 for (j
= 0; j
< 4; j
++)
3188 if ((this_rsrv
& (1 << j
)) && j
!= best
)
3189 rsrv_count
[side
][j
]--;
3191 INSN_INFO_ENTRY (uid
).reservation
3192 = best
+ side
* UNIT_QID_SIDE_OFFSET
;
3198 /* Return a factor by which to weight unit imbalances for a reservation
3201 unit_req_factor (enum unitreqs r
)
3223 /* Examine INSN, and store in REQ1/SIDE1 and REQ2/SIDE2 the unit
3224 requirements. Returns zero if INSN can't be handled, otherwise
3225 either one or two to show how many of the two pairs are in use.
3226 REQ1 is always used, it holds what is normally thought of as the
3227 instructions reservation, e.g. UNIT_REQ_DL. REQ2 is used to either
3228 describe a cross path, or for loads/stores, the T unit. */
3230 get_unit_reqs (rtx_insn
*insn
, int *req1
, int *side1
, int *req2
, int *side2
)
3232 enum attr_units units
;
3233 enum attr_cross cross
;
3236 if (!NONDEBUG_INSN_P (insn
) || recog_memoized (insn
) < 0)
3238 units
= get_attr_units (insn
);
3239 if (units
== UNITS_UNKNOWN
)
3241 side
= get_insn_side (insn
, units
);
3242 cross
= get_attr_cross (insn
);
3244 req
= (units
== UNITS_D
? UNIT_REQ_D
3245 : units
== UNITS_D_ADDR
? UNIT_REQ_D
3246 : units
== UNITS_DL
? UNIT_REQ_DL
3247 : units
== UNITS_DS
? UNIT_REQ_DS
3248 : units
== UNITS_L
? UNIT_REQ_L
3249 : units
== UNITS_LS
? UNIT_REQ_LS
3250 : units
== UNITS_S
? UNIT_REQ_S
3251 : units
== UNITS_M
? UNIT_REQ_M
3252 : units
== UNITS_DLS
? UNIT_REQ_DLS
3254 gcc_assert (req
!= -1);
3257 if (units
== UNITS_D_ADDR
)
3260 *side2
= side
^ (cross
== CROSS_Y
? 1 : 0);
3263 else if (cross
== CROSS_Y
)
3272 /* Walk the insns between and including HEAD and TAIL, and mark the
3273 resource requirements in the unit_reqs table. */
3275 count_unit_reqs (unit_req_table reqs
, rtx_insn
*head
, rtx_insn
*tail
)
3279 memset (reqs
, 0, sizeof (unit_req_table
));
3281 for (insn
= head
; insn
!= NEXT_INSN (tail
); insn
= NEXT_INSN (insn
))
3283 int side1
, side2
, req1
, req2
;
3285 switch (get_unit_reqs (insn
, &req1
, &side1
, &req2
, &side2
))
3288 reqs
[side2
][req2
]++;
3291 reqs
[side1
][req1
]++;
3297 /* Update the table REQS by merging more specific unit reservations into
3298 more general ones, i.e. counting (for example) UNIT_REQ_D also in
3299 UNIT_REQ_DL, DS, and DLS. */
3301 merge_unit_reqs (unit_req_table reqs
)
3304 for (side
= 0; side
< 2; side
++)
3306 int d
= reqs
[side
][UNIT_REQ_D
];
3307 int l
= reqs
[side
][UNIT_REQ_L
];
3308 int s
= reqs
[side
][UNIT_REQ_S
];
3309 int dl
= reqs
[side
][UNIT_REQ_DL
];
3310 int ls
= reqs
[side
][UNIT_REQ_LS
];
3311 int ds
= reqs
[side
][UNIT_REQ_DS
];
3313 reqs
[side
][UNIT_REQ_DL
] += d
;
3314 reqs
[side
][UNIT_REQ_DL
] += l
;
3315 reqs
[side
][UNIT_REQ_DS
] += d
;
3316 reqs
[side
][UNIT_REQ_DS
] += s
;
3317 reqs
[side
][UNIT_REQ_LS
] += l
;
3318 reqs
[side
][UNIT_REQ_LS
] += s
;
3319 reqs
[side
][UNIT_REQ_DLS
] += ds
+ dl
+ ls
+ d
+ l
+ s
;
3323 /* Examine the table REQS and return a measure of unit imbalance by comparing
3324 the two sides of the machine. If, for example, D1 is used twice and D2
3325 used not at all, the return value should be 1 in the absence of other
3328 unit_req_imbalance (unit_req_table reqs
)
3333 for (i
= 0; i
< UNIT_REQ_MAX
; i
++)
3335 int factor
= unit_req_factor ((enum unitreqs
) i
);
3336 int diff
= abs (reqs
[0][i
] - reqs
[1][i
]);
3337 val
+= (diff
+ factor
- 1) / factor
/ 2;
3342 /* Return the resource-constrained minimum iteration interval given the
3343 data in the REQS table. This must have been processed with
3344 merge_unit_reqs already. */
3346 res_mii (unit_req_table reqs
)
3350 for (side
= 0; side
< 2; side
++)
3351 for (req
= 0; req
< UNIT_REQ_MAX
; req
++)
3353 int factor
= unit_req_factor ((enum unitreqs
) req
);
3354 worst
= MAX ((reqs
[side
][UNIT_REQ_D
] + factor
- 1) / factor
, worst
);
3360 /* Examine INSN, and store in PMASK1 and PMASK2 bitmasks that represent
3361 the operands that are involved in the (up to) two reservations, as
3362 found by get_unit_reqs. Return true if we did this successfully, false
3363 if we couldn't identify what to do with INSN. */
3365 get_unit_operand_masks (rtx_insn
*insn
, unsigned int *pmask1
,
3366 unsigned int *pmask2
)
3368 enum attr_op_pattern op_pat
;
3370 if (recog_memoized (insn
) < 0)
3372 if (GET_CODE (PATTERN (insn
)) == COND_EXEC
)
3374 extract_insn (insn
);
3375 op_pat
= get_attr_op_pattern (insn
);
3376 if (op_pat
== OP_PATTERN_DT
)
3378 gcc_assert (recog_data
.n_operands
== 2);
3383 else if (op_pat
== OP_PATTERN_TD
)
3385 gcc_assert (recog_data
.n_operands
== 2);
3390 else if (op_pat
== OP_PATTERN_SXS
)
3392 gcc_assert (recog_data
.n_operands
== 3);
3393 *pmask1
= (1 << 0) | (1 << 2);
3397 else if (op_pat
== OP_PATTERN_SX
)
3399 gcc_assert (recog_data
.n_operands
== 2);
3404 else if (op_pat
== OP_PATTERN_SSX
)
3406 gcc_assert (recog_data
.n_operands
== 3);
3407 *pmask1
= (1 << 0) | (1 << 1);
3414 /* Try to replace a register in INSN, which has corresponding rename info
3415 from regrename_analyze in INFO. OP_MASK and ORIG_SIDE provide information
3416 about the operands that must be renamed and the side they are on.
3417 REQS is the table of unit reservations in the loop between HEAD and TAIL.
3418 We recompute this information locally after our transformation, and keep
3419 it only if we managed to improve the balance. */
3421 try_rename_operands (rtx_insn
*head
, rtx_insn
*tail
, unit_req_table reqs
,
3423 insn_rr_info
*info
, unsigned int op_mask
, int orig_side
)
3425 enum reg_class super_class
= orig_side
== 0 ? B_REGS
: A_REGS
;
3426 HARD_REG_SET unavailable
;
3427 du_head_p this_head
;
3428 struct du_chain
*chain
;
3431 int best_reg
, old_reg
;
3432 vec
<du_head_p
> involved_chains
= vNULL
;
3433 unit_req_table new_reqs
;
3436 for (i
= 0, tmp_mask
= op_mask
; tmp_mask
; i
++)
3439 if ((tmp_mask
& (1 << i
)) == 0)
3441 if (info
->op_info
[i
].n_chains
!= 1)
3443 op_chain
= regrename_chain_from_id (info
->op_info
[i
].heads
[0]->id
);
3444 involved_chains
.safe_push (op_chain
);
3445 tmp_mask
&= ~(1 << i
);
3448 if (involved_chains
.length () > 1)
3451 this_head
= involved_chains
[0];
3452 if (this_head
->cannot_rename
)
3455 for (chain
= this_head
->first
; chain
; chain
= chain
->next_use
)
3457 unsigned int mask1
, mask2
, mask_changed
;
3458 int count
, side1
, side2
, req1
, req2
;
3459 insn_rr_info
*this_rr
= &insn_rr
[INSN_UID (chain
->insn
)];
3461 count
= get_unit_reqs (chain
->insn
, &req1
, &side1
, &req2
, &side2
);
3466 if (!get_unit_operand_masks (chain
->insn
, &mask1
, &mask2
))
3469 extract_insn (chain
->insn
);
3472 for (i
= 0; i
< recog_data
.n_operands
; i
++)
3475 int n_this_op
= this_rr
->op_info
[i
].n_chains
;
3476 for (j
= 0; j
< n_this_op
; j
++)
3478 du_head_p other
= this_rr
->op_info
[i
].heads
[j
];
3479 if (regrename_chain_from_id (other
->id
) == this_head
)
3487 mask_changed
|= 1 << i
;
3489 gcc_assert (mask_changed
!= 0);
3490 if (mask_changed
!= mask1
&& mask_changed
!= mask2
)
3494 /* If we get here, we can do the renaming. */
3495 COMPL_HARD_REG_SET (unavailable
, reg_class_contents
[(int) super_class
]);
3497 old_reg
= this_head
->regno
;
3499 find_rename_reg (this_head
, super_class
, &unavailable
, old_reg
, true);
3501 ok
= regrename_do_replace (this_head
, best_reg
);
3504 count_unit_reqs (new_reqs
, head
, PREV_INSN (tail
));
3505 merge_unit_reqs (new_reqs
);
3508 fprintf (dump_file
, "reshuffle for insn %d, op_mask %x, "
3509 "original side %d, new reg %d\n",
3510 INSN_UID (insn
), op_mask
, orig_side
, best_reg
);
3511 fprintf (dump_file
, " imbalance %d -> %d\n",
3512 unit_req_imbalance (reqs
), unit_req_imbalance (new_reqs
));
3514 if (unit_req_imbalance (new_reqs
) > unit_req_imbalance (reqs
))
3516 ok
= regrename_do_replace (this_head
, old_reg
);
3520 memcpy (reqs
, new_reqs
, sizeof (unit_req_table
));
3523 involved_chains
.release ();
3526 /* Find insns in LOOP which would, if shifted to the other side
3527 of the machine, reduce an imbalance in the unit reservations. */
3529 reshuffle_units (basic_block loop
)
3531 rtx_insn
*head
= BB_HEAD (loop
);
3532 rtx_insn
*tail
= BB_END (loop
);
3534 unit_req_table reqs
;
3539 count_unit_reqs (reqs
, head
, PREV_INSN (tail
));
3540 merge_unit_reqs (reqs
);
3542 regrename_init (true);
3544 bitmap_initialize (&bbs
, &bitmap_default_obstack
);
3546 FOR_EACH_EDGE (e
, ei
, loop
->preds
)
3547 bitmap_set_bit (&bbs
, e
->src
->index
);
3549 bitmap_set_bit (&bbs
, loop
->index
);
3550 regrename_analyze (&bbs
);
3552 for (insn
= head
; insn
!= NEXT_INSN (tail
); insn
= NEXT_INSN (insn
))
3554 enum attr_units units
;
3555 int count
, side1
, side2
, req1
, req2
;
3556 unsigned int mask1
, mask2
;
3559 if (!NONDEBUG_INSN_P (insn
))
3562 count
= get_unit_reqs (insn
, &req1
, &side1
, &req2
, &side2
);
3567 if (!get_unit_operand_masks (insn
, &mask1
, &mask2
))
3570 info
= &insn_rr
[INSN_UID (insn
)];
3571 if (info
->op_info
== NULL
)
3574 if (reqs
[side1
][req1
] > 1
3575 && reqs
[side1
][req1
] > 2 * reqs
[side1
^ 1][req1
])
3577 try_rename_operands (head
, tail
, reqs
, insn
, info
, mask1
, side1
);
3580 units
= get_attr_units (insn
);
3581 if (units
== UNITS_D_ADDR
)
3583 gcc_assert (count
== 2);
3584 if (reqs
[side2
][req2
] > 1
3585 && reqs
[side2
][req2
] > 2 * reqs
[side2
^ 1][req2
])
3587 try_rename_operands (head
, tail
, reqs
, insn
, info
, mask2
, side2
);
3591 regrename_finish ();
3594 /* Backend scheduling state. */
3595 typedef struct c6x_sched_context
3597 /* The current scheduler clock, saved in the sched_reorder hook. */
3598 int curr_sched_clock
;
3600 /* Number of insns issued so far in this cycle. */
3601 int issued_this_cycle
;
3603 /* We record the time at which each jump occurs in JUMP_CYCLES. The
3604 theoretical maximum for number of jumps in flight is 12: 2 every
3605 cycle, with a latency of 6 cycles each. This is a circular
3606 buffer; JUMP_CYCLE_INDEX is the pointer to the start. Earlier
3607 jumps have a higher index. This array should be accessed through
3608 the jump_cycle function. */
3609 int jump_cycles
[12];
3610 int jump_cycle_index
;
3612 /* In parallel with jump_cycles, this array records the opposite of
3613 the condition used in each pending jump. This is used to
3614 predicate insns that are scheduled in the jump's delay slots. If
3615 this is NULL_RTX no such predication happens. */
3618 /* Similar to the jump_cycles mechanism, but here we take into
3619 account all insns with delay slots, to avoid scheduling asms into
3621 int delays_finished_at
;
3623 /* The following variable value is the last issued insn. */
3624 rtx_insn
*last_scheduled_insn
;
3625 /* The last issued insn that isn't a shadow of another. */
3626 rtx_insn
*last_scheduled_iter0
;
3628 /* The following variable value is DFA state before issuing the
3629 first insn in the current clock cycle. We do not use this member
3630 of the structure directly; we copy the data in and out of
3631 prev_cycle_state. */
3632 state_t prev_cycle_state_ctx
;
3634 int reg_n_accesses
[FIRST_PSEUDO_REGISTER
];
3635 int reg_n_xaccesses
[FIRST_PSEUDO_REGISTER
];
3636 int reg_set_in_cycle
[FIRST_PSEUDO_REGISTER
];
3638 int tmp_reg_n_accesses
[FIRST_PSEUDO_REGISTER
];
3639 int tmp_reg_n_xaccesses
[FIRST_PSEUDO_REGISTER
];
3640 } *c6x_sched_context_t
;
3642 /* The current scheduling state. */
3643 static struct c6x_sched_context ss
;
3645 /* The following variable value is DFA state before issuing the first insn
3646 in the current clock cycle. This is used in c6x_variable_issue for
3647 comparison with the state after issuing the last insn in a cycle. */
3648 static state_t prev_cycle_state
;
3650 /* Set when we discover while processing an insn that it would lead to too
3651 many accesses of the same register. */
3652 static bool reg_access_stall
;
3654 /* The highest insn uid after delayed insns were split, but before loop bodies
3655 were copied by the modulo scheduling code. */
3656 static int sploop_max_uid_iter0
;
3658 /* Look up the jump cycle with index N. For an out-of-bounds N, we return 0,
3659 so the caller does not specifically have to test for it. */
3661 get_jump_cycle (int n
)
3665 n
+= ss
.jump_cycle_index
;
3668 return ss
.jump_cycles
[n
];
3671 /* Look up the jump condition with index N. */
3673 get_jump_cond (int n
)
3677 n
+= ss
.jump_cycle_index
;
3680 return ss
.jump_cond
[n
];
3683 /* Return the index of the first jump that occurs after CLOCK_VAR. If no jump
3684 has delay slots beyond CLOCK_VAR, return -1. */
3686 first_jump_index (int clock_var
)
3692 int t
= get_jump_cycle (n
);
3701 /* Add a new entry in our scheduling state for a jump that occurs in CYCLE
3702 and has the opposite condition of COND. */
3704 record_jump (int cycle
, rtx cond
)
3706 if (ss
.jump_cycle_index
== 0)
3707 ss
.jump_cycle_index
= 11;
3709 ss
.jump_cycle_index
--;
3710 ss
.jump_cycles
[ss
.jump_cycle_index
] = cycle
;
3711 ss
.jump_cond
[ss
.jump_cycle_index
] = cond
;
3714 /* Set the clock cycle of INSN to CYCLE. Also clears the insn's entry in
3717 insn_set_clock (rtx insn
, int cycle
)
3719 unsigned uid
= INSN_UID (insn
);
3721 if (uid
>= INSN_INFO_LENGTH
)
3722 insn_info
.safe_grow (uid
* 5 / 4 + 10);
3724 INSN_INFO_ENTRY (uid
).clock
= cycle
;
3725 INSN_INFO_ENTRY (uid
).new_cond
= NULL
;
3726 INSN_INFO_ENTRY (uid
).reservation
= 0;
3727 INSN_INFO_ENTRY (uid
).ebb_start
= false;
3730 /* Return the clock cycle we set for the insn with uid UID. */
3732 insn_uid_get_clock (int uid
)
3734 return INSN_INFO_ENTRY (uid
).clock
;
3737 /* Return the clock cycle we set for INSN. */
3739 insn_get_clock (rtx insn
)
3741 return insn_uid_get_clock (INSN_UID (insn
));
3744 /* Examine INSN, and if it is a conditional jump of any kind, return
3745 the opposite of the condition in which it branches. Otherwise,
3748 condjump_opposite_condition (rtx insn
)
3750 rtx pat
= PATTERN (insn
);
3751 int icode
= INSN_CODE (insn
);
3754 if (icode
== CODE_FOR_br_true
|| icode
== CODE_FOR_br_false
)
3756 x
= XEXP (SET_SRC (pat
), 0);
3757 if (icode
== CODE_FOR_br_false
)
3760 if (GET_CODE (pat
) == COND_EXEC
)
3762 rtx t
= COND_EXEC_CODE (pat
);
3763 if ((GET_CODE (t
) == PARALLEL
3764 && GET_CODE (XVECEXP (t
, 0, 0)) == RETURN
)
3765 || (GET_CODE (t
) == UNSPEC
&& XINT (t
, 1) == UNSPEC_REAL_JUMP
)
3766 || (GET_CODE (t
) == SET
&& SET_DEST (t
) == pc_rtx
))
3767 x
= COND_EXEC_TEST (pat
);
3772 enum rtx_code code
= GET_CODE (x
);
3773 x
= gen_rtx_fmt_ee (code
== EQ
? NE
: EQ
,
3774 GET_MODE (x
), XEXP (x
, 0),
3780 /* Return true iff COND1 and COND2 are exactly opposite conditions
3781 one of them NE and the other EQ. */
3783 conditions_opposite_p (rtx cond1
, rtx cond2
)
3785 return (rtx_equal_p (XEXP (cond1
, 0), XEXP (cond2
, 0))
3786 && rtx_equal_p (XEXP (cond1
, 1), XEXP (cond2
, 1))
3787 && GET_CODE (cond1
) == reverse_condition (GET_CODE (cond2
)));
3790 /* Return true if we can add a predicate COND to INSN, or if INSN
3791 already has that predicate. If DOIT is true, also perform the
3794 predicate_insn (rtx_insn
*insn
, rtx cond
, bool doit
)
3797 if (cond
== NULL_RTX
)
3803 if (get_attr_predicable (insn
) == PREDICABLE_YES
3804 && GET_CODE (PATTERN (insn
)) != COND_EXEC
)
3808 cond
= copy_rtx (cond
);
3809 rtx newpat
= gen_rtx_COND_EXEC (VOIDmode
, cond
, PATTERN (insn
));
3810 PATTERN (insn
) = newpat
;
3811 INSN_CODE (insn
) = -1;
3815 if (GET_CODE (PATTERN (insn
)) == COND_EXEC
3816 && rtx_equal_p (COND_EXEC_TEST (PATTERN (insn
)), cond
))
3818 icode
= INSN_CODE (insn
);
3819 if (icode
== CODE_FOR_real_jump
3820 || icode
== CODE_FOR_jump
3821 || icode
== CODE_FOR_indirect_jump
)
3823 rtx pat
= PATTERN (insn
);
3824 rtx dest
= (icode
== CODE_FOR_real_jump
? XVECEXP (pat
, 0, 0)
3825 : icode
== CODE_FOR_jump
? XEXP (SET_SRC (pat
), 0)
3831 newpat
= gen_rtx_COND_EXEC (VOIDmode
, cond
, PATTERN (insn
));
3833 newpat
= gen_br_true (cond
, XEXP (cond
, 0), dest
);
3834 PATTERN (insn
) = newpat
;
3835 INSN_CODE (insn
) = -1;
3839 if (INSN_CODE (insn
) == CODE_FOR_br_true
)
3841 rtx br_cond
= XEXP (SET_SRC (PATTERN (insn
)), 0);
3842 return rtx_equal_p (br_cond
, cond
);
3844 if (INSN_CODE (insn
) == CODE_FOR_br_false
)
3846 rtx br_cond
= XEXP (SET_SRC (PATTERN (insn
)), 0);
3847 return conditions_opposite_p (br_cond
, cond
);
3852 /* Initialize SC. Used by c6x_init_sched_context and c6x_sched_init. */
3854 init_sched_state (c6x_sched_context_t sc
)
3856 sc
->last_scheduled_insn
= NULL
;
3857 sc
->last_scheduled_iter0
= NULL
;
3858 sc
->issued_this_cycle
= 0;
3859 memset (sc
->jump_cycles
, 0, sizeof sc
->jump_cycles
);
3860 memset (sc
->jump_cond
, 0, sizeof sc
->jump_cond
);
3861 sc
->jump_cycle_index
= 0;
3862 sc
->delays_finished_at
= 0;
3863 sc
->curr_sched_clock
= 0;
3865 sc
->prev_cycle_state_ctx
= xmalloc (dfa_state_size
);
3867 memset (sc
->reg_n_accesses
, 0, sizeof sc
->reg_n_accesses
);
3868 memset (sc
->reg_n_xaccesses
, 0, sizeof sc
->reg_n_xaccesses
);
3869 memset (sc
->reg_set_in_cycle
, 0, sizeof sc
->reg_set_in_cycle
);
3871 state_reset (sc
->prev_cycle_state_ctx
);
3874 /* Allocate store for new scheduling context. */
3876 c6x_alloc_sched_context (void)
3878 return xmalloc (sizeof (struct c6x_sched_context
));
3881 /* If CLEAN_P is true then initializes _SC with clean data,
3882 and from the global context otherwise. */
3884 c6x_init_sched_context (void *_sc
, bool clean_p
)
3886 c6x_sched_context_t sc
= (c6x_sched_context_t
) _sc
;
3890 init_sched_state (sc
);
3895 sc
->prev_cycle_state_ctx
= xmalloc (dfa_state_size
);
3896 memcpy (sc
->prev_cycle_state_ctx
, prev_cycle_state
, dfa_state_size
);
3900 /* Sets the global scheduling context to the one pointed to by _SC. */
3902 c6x_set_sched_context (void *_sc
)
3904 c6x_sched_context_t sc
= (c6x_sched_context_t
) _sc
;
3906 gcc_assert (sc
!= NULL
);
3908 memcpy (prev_cycle_state
, sc
->prev_cycle_state_ctx
, dfa_state_size
);
3911 /* Clear data in _SC. */
3913 c6x_clear_sched_context (void *_sc
)
3915 c6x_sched_context_t sc
= (c6x_sched_context_t
) _sc
;
3916 gcc_assert (_sc
!= NULL
);
3918 free (sc
->prev_cycle_state_ctx
);
3923 c6x_free_sched_context (void *_sc
)
3928 /* True if we are currently performing a preliminary scheduling
3929 pass before modulo scheduling; we can't allow the scheduler to
3930 modify instruction patterns using packetization assumptions,
3931 since there will be another scheduling pass later if modulo
3932 scheduling fails. */
3933 static bool in_hwloop
;
3935 /* Provide information about speculation capabilities, and set the
3936 DO_BACKTRACKING flag. */
3938 c6x_set_sched_flags (spec_info_t spec_info
)
3940 unsigned int *flags
= &(current_sched_info
->flags
);
3942 if (*flags
& SCHED_EBB
)
3944 *flags
|= DO_BACKTRACKING
| DO_PREDICATION
;
3947 *flags
|= DONT_BREAK_DEPENDENCIES
;
3949 spec_info
->mask
= 0;
3952 /* Implement the TARGET_SCHED_ISSUE_RATE hook. */
3955 c6x_issue_rate (void)
3960 /* Used together with the collapse_ndfa option, this ensures that we reach a
3961 deterministic automaton state before trying to advance a cycle.
3962 With collapse_ndfa, genautomata creates advance cycle arcs only for
3963 such deterministic states. */
3966 c6x_sched_dfa_pre_cycle_insn (void)
3971 /* We're beginning a new block. Initialize data structures as necessary. */
3974 c6x_sched_init (FILE *dump ATTRIBUTE_UNUSED
,
3975 int sched_verbose ATTRIBUTE_UNUSED
,
3976 int max_ready ATTRIBUTE_UNUSED
)
3978 if (prev_cycle_state
== NULL
)
3980 prev_cycle_state
= xmalloc (dfa_state_size
);
3982 init_sched_state (&ss
);
3983 state_reset (prev_cycle_state
);
3986 /* We are about to being issuing INSN. Return nonzero if we cannot
3987 issue it on given cycle CLOCK and return zero if we should not sort
3988 the ready queue on the next clock start.
3989 For C6X, we use this function just to copy the previous DFA state
3990 for comparison purposes. */
3993 c6x_dfa_new_cycle (FILE *dump ATTRIBUTE_UNUSED
, int verbose ATTRIBUTE_UNUSED
,
3994 rtx_insn
*insn ATTRIBUTE_UNUSED
,
3995 int last_clock ATTRIBUTE_UNUSED
,
3996 int clock ATTRIBUTE_UNUSED
, int *sort_p ATTRIBUTE_UNUSED
)
3998 if (clock
!= last_clock
)
3999 memcpy (prev_cycle_state
, curr_state
, dfa_state_size
);
4004 c6x_mark_regno_read (int regno
, bool cross
)
4006 int t
= ++ss
.tmp_reg_n_accesses
[regno
];
4009 reg_access_stall
= true;
4013 int set_cycle
= ss
.reg_set_in_cycle
[regno
];
4014 /* This must be done in this way rather than by tweaking things in
4015 adjust_cost, since the stall occurs even for insns with opposite
4016 predicates, and the scheduler may not even see a dependency. */
4017 if (set_cycle
> 0 && set_cycle
== ss
.curr_sched_clock
)
4018 reg_access_stall
= true;
4019 /* This doesn't quite do anything yet as we're only modeling one
4021 ++ss
.tmp_reg_n_xaccesses
[regno
];
4025 /* Note that REG is read in the insn being examined. If CROSS, it
4026 means the access is through a cross path. Update the temporary reg
4027 access arrays, and set REG_ACCESS_STALL if the insn can't be issued
4028 in the current cycle. */
4031 c6x_mark_reg_read (rtx reg
, bool cross
)
4033 unsigned regno
= REGNO (reg
);
4034 unsigned nregs
= REG_NREGS (reg
);
4037 c6x_mark_regno_read (regno
+ nregs
, cross
);
4040 /* Note that register REG is written in cycle CYCLES. */
4043 c6x_mark_reg_written (rtx reg
, int cycles
)
4045 unsigned regno
= REGNO (reg
);
4046 unsigned nregs
= REG_NREGS (reg
);
4049 ss
.reg_set_in_cycle
[regno
+ nregs
] = cycles
;
4052 /* Update the register state information for an instruction whose
4053 body is X. Return true if the instruction has to be delayed until the
4057 c6x_registers_update (rtx_insn
*insn
)
4059 enum attr_cross cross
;
4060 enum attr_dest_regfile destrf
;
4064 if (!reload_completed
|| recog_memoized (insn
) < 0)
4067 reg_access_stall
= false;
4068 memcpy (ss
.tmp_reg_n_accesses
, ss
.reg_n_accesses
,
4069 sizeof ss
.tmp_reg_n_accesses
);
4070 memcpy (ss
.tmp_reg_n_xaccesses
, ss
.reg_n_xaccesses
,
4071 sizeof ss
.tmp_reg_n_xaccesses
);
4073 extract_insn (insn
);
4075 cross
= get_attr_cross (insn
);
4076 destrf
= get_attr_dest_regfile (insn
);
4078 nops
= recog_data
.n_operands
;
4080 if (GET_CODE (x
) == COND_EXEC
)
4082 c6x_mark_reg_read (XEXP (XEXP (x
, 0), 0), false);
4086 for (i
= 0; i
< nops
; i
++)
4088 rtx op
= recog_data
.operand
[i
];
4089 if (recog_data
.operand_type
[i
] == OP_OUT
)
4093 bool this_cross
= cross
;
4094 if (destrf
== DEST_REGFILE_A
&& A_REGNO_P (REGNO (op
)))
4096 if (destrf
== DEST_REGFILE_B
&& B_REGNO_P (REGNO (op
)))
4098 c6x_mark_reg_read (op
, this_cross
);
4100 else if (MEM_P (op
))
4103 switch (GET_CODE (op
))
4112 c6x_mark_reg_read (op
, false);
4117 gcc_assert (GET_CODE (op
) == PLUS
);
4120 c6x_mark_reg_read (XEXP (op
, 0), false);
4121 if (REG_P (XEXP (op
, 1)))
4122 c6x_mark_reg_read (XEXP (op
, 1), false);
4127 c6x_mark_regno_read (REG_B14
, false);
4133 else if (!CONSTANT_P (op
) && strlen (recog_data
.constraints
[i
]) > 0)
4136 return reg_access_stall
;
4139 /* Helper function for the TARGET_SCHED_REORDER and
4140 TARGET_SCHED_REORDER2 hooks. If scheduling an insn would be unsafe
4141 in the current cycle, move it down in the ready list and return the
4142 number of non-unsafe insns. */
4145 c6x_sched_reorder_1 (rtx_insn
**ready
, int *pn_ready
, int clock_var
)
4147 int n_ready
= *pn_ready
;
4148 rtx_insn
**e_ready
= ready
+ n_ready
;
4152 /* Keep track of conflicts due to a limit number of register accesses,
4153 and due to stalls incurred by too early accesses of registers using
4156 for (insnp
= ready
; insnp
< e_ready
; insnp
++)
4158 rtx_insn
*insn
= *insnp
;
4159 int icode
= recog_memoized (insn
);
4160 bool is_asm
= (icode
< 0
4161 && (GET_CODE (PATTERN (insn
)) == ASM_INPUT
4162 || asm_noperands (PATTERN (insn
)) >= 0));
4163 bool no_parallel
= (is_asm
|| icode
== CODE_FOR_sploop
4165 && get_attr_type (insn
) == TYPE_ATOMIC
));
4167 /* We delay asm insns until all delay slots are exhausted. We can't
4168 accurately tell how many cycles an asm takes, and the main scheduling
4169 code always assumes at least 1 cycle, which may be wrong. */
4171 && (ss
.issued_this_cycle
> 0 || clock_var
< ss
.delays_finished_at
))
4172 || c6x_registers_update (insn
)
4173 || (ss
.issued_this_cycle
> 0 && icode
== CODE_FOR_sploop
))
4175 memmove (ready
+ 1, ready
, (insnp
- ready
) * sizeof (rtx
));
4180 else if (shadow_p (insn
))
4182 memmove (ready
+ 1, ready
, (insnp
- ready
) * sizeof (rtx
));
4187 /* Ensure that no other jump is scheduled in jump delay slots, since
4188 it would put the machine into the wrong state. Also, we must
4189 avoid scheduling insns that have a latency longer than the
4190 remaining jump delay slots, as the code at the jump destination
4191 won't be prepared for it.
4193 However, we can relax this condition somewhat. The rest of the
4194 scheduler will automatically avoid scheduling an insn on which
4195 the jump shadow depends so late that its side effect happens
4196 after the jump. This means that if we see an insn with a longer
4197 latency here, it can safely be scheduled if we can ensure that it
4198 has a predicate opposite of the previous jump: the side effect
4199 will happen in what we think of as the same basic block. In
4200 c6x_variable_issue, we will record the necessary predicate in
4201 new_conditions, and after scheduling is finished, we will modify
4204 Special care must be taken whenever there is more than one jump
4207 first_jump
= first_jump_index (clock_var
);
4208 if (first_jump
!= -1)
4210 int first_cycle
= get_jump_cycle (first_jump
);
4211 rtx first_cond
= get_jump_cond (first_jump
);
4212 int second_cycle
= 0;
4215 second_cycle
= get_jump_cycle (first_jump
- 1);
4217 for (insnp
= ready
; insnp
< e_ready
; insnp
++)
4219 rtx_insn
*insn
= *insnp
;
4220 int icode
= recog_memoized (insn
);
4221 bool is_asm
= (icode
< 0
4222 && (GET_CODE (PATTERN (insn
)) == ASM_INPUT
4223 || asm_noperands (PATTERN (insn
)) >= 0));
4224 int this_cycles
, rsrv_cycles
;
4225 enum attr_type type
;
4227 gcc_assert (!is_asm
);
4230 this_cycles
= get_attr_cycles (insn
);
4231 rsrv_cycles
= get_attr_reserve_cycles (insn
);
4232 type
= get_attr_type (insn
);
4233 /* Treat branches specially; there is also a hazard if two jumps
4234 end at the same cycle. */
4235 if (type
== TYPE_BRANCH
|| type
== TYPE_CALL
)
4237 if (clock_var
+ this_cycles
<= first_cycle
)
4239 if ((first_jump
> 0 && clock_var
+ this_cycles
> second_cycle
)
4240 || clock_var
+ rsrv_cycles
> first_cycle
4241 || !predicate_insn (insn
, first_cond
, false))
4243 memmove (ready
+ 1, ready
, (insnp
- ready
) * sizeof (rtx
));
4254 /* Implement the TARGET_SCHED_REORDER hook. We save the current clock
4255 for later and clear the register access information for the new
4256 cycle. We also move asm statements out of the way if they would be
4257 scheduled in a delay slot. */
4260 c6x_sched_reorder (FILE *dump ATTRIBUTE_UNUSED
,
4261 int sched_verbose ATTRIBUTE_UNUSED
,
4262 rtx_insn
**ready ATTRIBUTE_UNUSED
,
4263 int *pn_ready ATTRIBUTE_UNUSED
, int clock_var
)
4265 ss
.curr_sched_clock
= clock_var
;
4266 ss
.issued_this_cycle
= 0;
4267 memset (ss
.reg_n_accesses
, 0, sizeof ss
.reg_n_accesses
);
4268 memset (ss
.reg_n_xaccesses
, 0, sizeof ss
.reg_n_xaccesses
);
4273 return c6x_sched_reorder_1 (ready
, pn_ready
, clock_var
);
4276 /* Implement the TARGET_SCHED_REORDER2 hook. We use this to record the clock
4277 cycle for every insn. */
4280 c6x_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED
,
4281 int sched_verbose ATTRIBUTE_UNUSED
,
4282 rtx_insn
**ready ATTRIBUTE_UNUSED
,
4283 int *pn_ready ATTRIBUTE_UNUSED
, int clock_var
)
4285 /* FIXME: the assembler rejects labels inside an execute packet.
4286 This can occur if prologue insns are scheduled in parallel with
4287 others, so we avoid this here. Also make sure that nothing is
4288 scheduled in parallel with a TYPE_ATOMIC insn or after a jump. */
4289 if (RTX_FRAME_RELATED_P (ss
.last_scheduled_insn
)
4290 || JUMP_P (ss
.last_scheduled_insn
)
4291 || (recog_memoized (ss
.last_scheduled_insn
) >= 0
4292 && get_attr_type (ss
.last_scheduled_insn
) == TYPE_ATOMIC
))
4294 int n_ready
= *pn_ready
;
4295 rtx_insn
**e_ready
= ready
+ n_ready
;
4298 for (insnp
= ready
; insnp
< e_ready
; insnp
++)
4300 rtx_insn
*insn
= *insnp
;
4301 if (!shadow_p (insn
))
4303 memmove (ready
+ 1, ready
, (insnp
- ready
) * sizeof (rtx
));
4312 return c6x_sched_reorder_1 (ready
, pn_ready
, clock_var
);
4315 /* Subroutine of maybe_clobber_cond, called through note_stores. */
4318 clobber_cond_1 (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data1
)
4320 rtx
*cond
= (rtx
*)data1
;
4321 if (*cond
!= NULL_RTX
&& reg_overlap_mentioned_p (x
, *cond
))
4325 /* Examine INSN, and if it destroys the conditions have recorded for
4326 any of the jumps in flight, clear that condition so that we don't
4327 predicate any more insns. CLOCK_VAR helps us limit the search to
4328 only those jumps which are still in flight. */
4331 maybe_clobber_cond (rtx insn
, int clock_var
)
4334 idx
= ss
.jump_cycle_index
;
4335 for (n
= 0; n
< 12; n
++, idx
++)
4342 cycle
= ss
.jump_cycles
[idx
];
4343 if (cycle
<= clock_var
)
4346 cond
= ss
.jump_cond
[idx
];
4347 if (cond
== NULL_RTX
)
4352 ss
.jump_cond
[idx
] = NULL_RTX
;
4356 note_stores (PATTERN (insn
), clobber_cond_1
, ss
.jump_cond
+ idx
);
4357 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
4358 if (REG_NOTE_KIND (link
) == REG_INC
)
4359 clobber_cond_1 (XEXP (link
, 0), NULL_RTX
, ss
.jump_cond
+ idx
);
4363 /* Implement the TARGET_SCHED_VARIABLE_ISSUE hook. We are about to
4364 issue INSN. Return the number of insns left on the ready queue
4365 that can be issued this cycle.
4366 We use this hook to record clock cycles and reservations for every insn. */
4369 c6x_variable_issue (FILE *dump ATTRIBUTE_UNUSED
,
4370 int sched_verbose ATTRIBUTE_UNUSED
,
4371 rtx_insn
*insn
, int can_issue_more ATTRIBUTE_UNUSED
)
4373 ss
.last_scheduled_insn
= insn
;
4374 if (INSN_UID (insn
) < sploop_max_uid_iter0
&& !JUMP_P (insn
))
4375 ss
.last_scheduled_iter0
= insn
;
4376 if (GET_CODE (PATTERN (insn
)) != USE
&& GET_CODE (PATTERN (insn
)) != CLOBBER
)
4377 ss
.issued_this_cycle
++;
4378 if (insn_info
.exists ())
4380 state_t st_after
= alloca (dfa_state_size
);
4381 int curr_clock
= ss
.curr_sched_clock
;
4382 int uid
= INSN_UID (insn
);
4383 int icode
= recog_memoized (insn
);
4385 int first
, first_cycle
;
4389 insn_set_clock (insn
, curr_clock
);
4390 INSN_INFO_ENTRY (uid
).ebb_start
4391 = curr_clock
== 0 && ss
.issued_this_cycle
== 1;
4393 first
= first_jump_index (ss
.curr_sched_clock
);
4397 first_cond
= NULL_RTX
;
4401 first_cycle
= get_jump_cycle (first
);
4402 first_cond
= get_jump_cond (first
);
4405 && first_cycle
> curr_clock
4406 && first_cond
!= NULL_RTX
4407 && (curr_clock
+ get_attr_cycles (insn
) > first_cycle
4408 || get_attr_type (insn
) == TYPE_BRANCH
4409 || get_attr_type (insn
) == TYPE_CALL
))
4410 INSN_INFO_ENTRY (uid
).new_cond
= first_cond
;
4412 memcpy (st_after
, curr_state
, dfa_state_size
);
4413 state_transition (st_after
, const0_rtx
);
4416 for (i
= 0; i
< 2 * UNIT_QID_SIDE_OFFSET
; i
++)
4417 if (cpu_unit_reservation_p (st_after
, c6x_unit_codes
[i
])
4418 && !cpu_unit_reservation_p (prev_cycle_state
, c6x_unit_codes
[i
]))
4420 INSN_INFO_ENTRY (uid
).unit_mask
= mask
;
4422 maybe_clobber_cond (insn
, curr_clock
);
4428 c6x_registers_update (insn
);
4429 memcpy (ss
.reg_n_accesses
, ss
.tmp_reg_n_accesses
,
4430 sizeof ss
.reg_n_accesses
);
4431 memcpy (ss
.reg_n_xaccesses
, ss
.tmp_reg_n_accesses
,
4432 sizeof ss
.reg_n_xaccesses
);
4434 cycles
= get_attr_cycles (insn
);
4435 if (ss
.delays_finished_at
< ss
.curr_sched_clock
+ cycles
)
4436 ss
.delays_finished_at
= ss
.curr_sched_clock
+ cycles
;
4437 if (get_attr_type (insn
) == TYPE_BRANCH
4438 || get_attr_type (insn
) == TYPE_CALL
)
4440 rtx opposite
= condjump_opposite_condition (insn
);
4441 record_jump (ss
.curr_sched_clock
+ cycles
, opposite
);
4444 /* Mark the cycles in which the destination registers are written.
4445 This is used for calculating stalls when using cross units. */
4446 extract_insn (insn
);
4447 /* Cross-path stalls don't apply to results of load insns. */
4448 if (get_attr_type (insn
) == TYPE_LOAD
4449 || get_attr_type (insn
) == TYPE_LOADN
4450 || get_attr_type (insn
) == TYPE_LOAD_SHADOW
)
4452 for (i
= 0; i
< recog_data
.n_operands
; i
++)
4454 rtx op
= recog_data
.operand
[i
];
4457 rtx addr
= XEXP (op
, 0);
4458 if (GET_RTX_CLASS (GET_CODE (addr
)) == RTX_AUTOINC
)
4459 c6x_mark_reg_written (XEXP (addr
, 0),
4460 insn_uid_get_clock (uid
) + 1);
4462 if (recog_data
.operand_type
[i
] != OP_IN
4465 c6x_mark_reg_written (op
,
4466 insn_uid_get_clock (uid
) + cycles
);
4471 return can_issue_more
;
4474 /* Implement the TARGET_SCHED_ADJUST_COST hook. We need special handling for
4475 anti- and output dependencies. */
4478 c6x_adjust_cost (rtx_insn
*insn
, int dep_type
, rtx_insn
*dep_insn
, int cost
,
4481 enum attr_type insn_type
= TYPE_UNKNOWN
, dep_insn_type
= TYPE_UNKNOWN
;
4482 int dep_insn_code_number
, insn_code_number
;
4483 int shadow_bonus
= 0;
4485 dep_insn_code_number
= recog_memoized (dep_insn
);
4486 insn_code_number
= recog_memoized (insn
);
4488 if (dep_insn_code_number
>= 0)
4489 dep_insn_type
= get_attr_type (dep_insn
);
4491 if (insn_code_number
>= 0)
4492 insn_type
= get_attr_type (insn
);
4494 kind
= (reg_note
) dep_type
;
4497 /* If we have a dependency on a load, and it's not for the result of
4498 the load, it must be for an autoincrement. Reduce the cost in that
4500 if (dep_insn_type
== TYPE_LOAD
)
4502 rtx set
= PATTERN (dep_insn
);
4503 if (GET_CODE (set
) == COND_EXEC
)
4504 set
= COND_EXEC_CODE (set
);
4505 if (GET_CODE (set
) == UNSPEC
)
4509 gcc_assert (GET_CODE (set
) == SET
);
4510 if (!reg_overlap_mentioned_p (SET_DEST (set
), PATTERN (insn
)))
4516 /* A jump shadow needs to have its latency decreased by one. Conceptually,
4517 it occurs in between two cycles, but we schedule it at the end of the
4519 if (shadow_type_p (insn_type
))
4522 /* Anti and output dependencies usually have zero cost, but we want
4523 to insert a stall after a jump, and after certain floating point
4524 insns that take more than one cycle to read their inputs. In the
4525 future, we should try to find a better algorithm for scheduling
4529 /* We can get anti-dependencies against shadow insns. Treat these
4530 like output dependencies, so that the insn is entirely finished
4531 before the branch takes place. */
4532 if (kind
== REG_DEP_ANTI
&& insn_type
== TYPE_SHADOW
)
4533 kind
= REG_DEP_OUTPUT
;
4534 switch (dep_insn_type
)
4540 if (get_attr_has_shadow (dep_insn
) == HAS_SHADOW_Y
)
4541 /* This is a real_jump/real_call insn. These don't have
4542 outputs, and ensuring the validity of scheduling things
4543 in the delay slot is the job of
4544 c6x_sched_reorder_1. */
4546 /* Unsplit calls can happen - e.g. for divide insns. */
4551 if (kind
== REG_DEP_OUTPUT
)
4552 return 5 - shadow_bonus
;
4556 if (kind
== REG_DEP_OUTPUT
)
4557 return 4 - shadow_bonus
;
4560 if (kind
== REG_DEP_OUTPUT
)
4561 return 2 - shadow_bonus
;
4564 if (kind
== REG_DEP_OUTPUT
)
4565 return 2 - shadow_bonus
;
4569 if (kind
== REG_DEP_OUTPUT
)
4570 return 7 - shadow_bonus
;
4573 if (kind
== REG_DEP_OUTPUT
)
4574 return 5 - shadow_bonus
;
4577 if (kind
== REG_DEP_OUTPUT
)
4578 return 9 - shadow_bonus
;
4582 if (kind
== REG_DEP_OUTPUT
)
4583 return 10 - shadow_bonus
;
4587 if (insn_type
== TYPE_SPKERNEL
)
4589 if (kind
== REG_DEP_OUTPUT
)
4590 return 1 - shadow_bonus
;
4596 return cost
- shadow_bonus
;
4599 /* Create a SEQUENCE rtx to replace the instructions in SLOT, of which there
4600 are N_FILLED. REAL_FIRST identifies the slot if the insn that appears
4601 first in the original stream. */
4604 gen_one_bundle (rtx_insn
**slot
, int n_filled
, int real_first
)
4611 seq
= gen_rtx_SEQUENCE (VOIDmode
, gen_rtvec_v (n_filled
, slot
));
4612 bundle
= make_insn_raw (seq
);
4613 BLOCK_FOR_INSN (bundle
) = BLOCK_FOR_INSN (slot
[0]);
4614 INSN_LOCATION (bundle
) = INSN_LOCATION (slot
[0]);
4615 SET_PREV_INSN (bundle
) = SET_PREV_INSN (slot
[real_first
]);
4619 for (i
= 0; i
< n_filled
; i
++)
4621 rtx_insn
*insn
= slot
[i
];
4623 SET_PREV_INSN (insn
) = t
? t
: PREV_INSN (bundle
);
4625 SET_NEXT_INSN (t
) = insn
;
4628 INSN_LOCATION (slot
[i
]) = INSN_LOCATION (bundle
);
4631 SET_NEXT_INSN (bundle
) = NEXT_INSN (PREV_INSN (bundle
));
4632 SET_NEXT_INSN (t
) = NEXT_INSN (bundle
);
4633 SET_NEXT_INSN (PREV_INSN (bundle
)) = bundle
;
4634 SET_PREV_INSN (NEXT_INSN (bundle
)) = bundle
;
4637 /* Move all parallel instructions into SEQUENCEs, so that no subsequent passes
4638 try to insert labels in the middle. */
4641 c6x_gen_bundles (void)
4645 FOR_EACH_BB_FN (bb
, cfun
)
4647 rtx_insn
*insn
, *next
;
4648 /* The machine is eight insns wide. We can have up to six shadow
4649 insns, plus an extra slot for merging the jump shadow. */
4654 for (insn
= BB_HEAD (bb
);; insn
= next
)
4657 rtx delete_this
= NULL_RTX
;
4659 if (NONDEBUG_INSN_P (insn
))
4661 /* Put calls at the start of the sequence. */
4667 memmove (&slot
[1], &slot
[0],
4668 n_filled
* sizeof (slot
[0]));
4670 if (!shadow_p (insn
))
4672 PUT_MODE (insn
, TImode
);
4674 PUT_MODE (slot
[1], VOIDmode
);
4681 slot
[n_filled
++] = insn
;
4685 next
= NEXT_INSN (insn
);
4686 while (next
&& insn
!= BB_END (bb
)
4687 && !(NONDEBUG_INSN_P (next
)
4688 && GET_CODE (PATTERN (next
)) != USE
4689 && GET_CODE (PATTERN (next
)) != CLOBBER
))
4692 next
= NEXT_INSN (insn
);
4695 at_end
= insn
== BB_END (bb
);
4696 if (delete_this
== NULL_RTX
4697 && (at_end
|| (GET_MODE (next
) == TImode
4698 && !(shadow_p (next
) && CALL_P (next
)))))
4701 gen_one_bundle (slot
, n_filled
, first_slot
);
4712 /* Emit a NOP instruction for CYCLES cycles after insn AFTER. Return it. */
4715 emit_nop_after (int cycles
, rtx_insn
*after
)
4719 /* mpydp has 9 delay slots, and we may schedule a stall for a cross-path
4720 operation. We don't need the extra NOP since in this case, the hardware
4721 will automatically insert the required stall. */
4725 gcc_assert (cycles
< 10);
4727 insn
= emit_insn_after (gen_nop_count (GEN_INT (cycles
)), after
);
4728 PUT_MODE (insn
, TImode
);
4733 /* Determine whether INSN is a call that needs to have a return label
4737 returning_call_p (rtx_insn
*insn
)
4740 return (!SIBLING_CALL_P (insn
)
4741 && get_attr_type (insn
) != TYPE_CALLP
4742 && get_attr_type (insn
) != TYPE_SHADOW
);
4743 if (recog_memoized (insn
) < 0)
4745 if (get_attr_type (insn
) == TYPE_CALL
)
4750 /* Determine whether INSN's pattern can be converted to use callp. */
4752 can_use_callp (rtx_insn
*insn
)
4754 int icode
= recog_memoized (insn
);
4755 if (!TARGET_INSNS_64PLUS
4757 || GET_CODE (PATTERN (insn
)) == COND_EXEC
)
4760 return ((icode
== CODE_FOR_real_call
4761 || icode
== CODE_FOR_call_internal
4762 || icode
== CODE_FOR_call_value_internal
)
4763 && get_attr_dest_regfile (insn
) == DEST_REGFILE_ANY
);
4766 /* Convert the pattern of INSN, which must be a CALL_INSN, into a callp. */
4768 convert_to_callp (rtx_insn
*insn
)
4771 extract_insn (insn
);
4772 if (GET_CODE (PATTERN (insn
)) == SET
)
4774 rtx dest
= recog_data
.operand
[0];
4775 lab
= recog_data
.operand
[1];
4776 PATTERN (insn
) = gen_callp_value (dest
, lab
);
4777 INSN_CODE (insn
) = CODE_FOR_callp_value
;
4781 lab
= recog_data
.operand
[0];
4782 PATTERN (insn
) = gen_callp (lab
);
4783 INSN_CODE (insn
) = CODE_FOR_callp
;
4787 /* Scan forwards from INSN until we find the next insn that has mode TImode
4788 (indicating it starts a new cycle), and occurs in cycle CLOCK.
4789 Return it if we find such an insn, NULL_RTX otherwise. */
4791 find_next_cycle_insn (rtx_insn
*insn
, int clock
)
4794 if (GET_MODE (t
) == TImode
)
4795 t
= next_real_insn (t
);
4796 while (t
&& GET_MODE (t
) != TImode
)
4797 t
= next_real_insn (t
);
4799 if (t
&& insn_get_clock (t
) == clock
)
4804 /* If COND_INSN has a COND_EXEC condition, wrap the same condition
4805 around PAT. Return PAT either unchanged or modified in this
4808 duplicate_cond (rtx pat
, rtx cond_insn
)
4810 rtx cond_pat
= PATTERN (cond_insn
);
4811 if (GET_CODE (cond_pat
) == COND_EXEC
)
4812 pat
= gen_rtx_COND_EXEC (VOIDmode
, copy_rtx (COND_EXEC_TEST (cond_pat
)),
4817 /* Walk forward from INSN to find the last insn that issues in the same clock
4820 find_last_same_clock (rtx_insn
*insn
)
4822 rtx_insn
*retval
= insn
;
4823 rtx_insn
*t
= next_real_insn (insn
);
4825 while (t
&& GET_MODE (t
) != TImode
)
4827 if (!DEBUG_INSN_P (t
) && recog_memoized (t
) >= 0)
4829 t
= next_real_insn (t
);
4834 /* For every call insn in the function, emit code to load the return
4835 address. For each call we create a return label and store it in
4836 CALL_LABELS. If are not scheduling, we emit the labels here,
4837 otherwise the caller will do it later.
4838 This function is called after final insn scheduling, but before creating
4839 the SEQUENCEs that represent execute packets. */
4842 reorg_split_calls (rtx_code_label
**call_labels
)
4844 unsigned int reservation_mask
= 0;
4845 rtx_insn
*insn
= get_insns ();
4846 gcc_assert (NOTE_P (insn
));
4847 insn
= next_real_insn (insn
);
4851 rtx_insn
*next
= next_real_insn (insn
);
4853 if (DEBUG_INSN_P (insn
))
4856 if (GET_MODE (insn
) == TImode
)
4857 reservation_mask
= 0;
4858 uid
= INSN_UID (insn
);
4859 if (c6x_flag_schedule_insns2
&& recog_memoized (insn
) >= 0)
4860 reservation_mask
|= 1 << INSN_INFO_ENTRY (uid
).reservation
;
4862 if (returning_call_p (insn
))
4864 rtx_code_label
*label
= gen_label_rtx ();
4865 rtx labelref
= gen_rtx_LABEL_REF (Pmode
, label
);
4866 rtx reg
= gen_rtx_REG (SImode
, RETURN_ADDR_REGNO
);
4868 LABEL_NUSES (label
) = 2;
4869 if (!c6x_flag_schedule_insns2
)
4871 if (can_use_callp (insn
))
4872 convert_to_callp (insn
);
4877 emit_label_after (label
, insn
);
4879 /* Bundle the call and its delay slots into a single
4880 SEQUENCE. While these do not issue in parallel
4881 we need to group them into a single EH region. */
4883 PUT_MODE (insn
, TImode
);
4884 if (TARGET_INSNS_64
)
4886 t
= gen_addkpc (reg
, labelref
, GEN_INT (4));
4887 slot
[1] = emit_insn_after (duplicate_cond (t
, insn
),
4889 PUT_MODE (slot
[1], TImode
);
4890 gen_one_bundle (slot
, 2, 0);
4894 slot
[3] = emit_insn_after (gen_nop_count (GEN_INT (3)),
4896 PUT_MODE (slot
[3], TImode
);
4897 t
= gen_movsi_lo_sum (reg
, reg
, labelref
);
4898 slot
[2] = emit_insn_after (duplicate_cond (t
, insn
),
4900 PUT_MODE (slot
[2], TImode
);
4901 t
= gen_movsi_high (reg
, labelref
);
4902 slot
[1] = emit_insn_after (duplicate_cond (t
, insn
),
4904 PUT_MODE (slot
[1], TImode
);
4905 gen_one_bundle (slot
, 4, 0);
4911 /* If we scheduled, we reserved the .S2 unit for one or two
4912 cycles after the call. Emit the insns in these slots,
4913 unless it's possible to create a CALLP insn.
4914 Note that this works because the dependencies ensure that
4915 no insn setting/using B3 is scheduled in the delay slots of
4917 int this_clock
= insn_get_clock (insn
);
4920 call_labels
[INSN_UID (insn
)] = label
;
4922 rtx_insn
*last_same_clock
= find_last_same_clock (insn
);
4924 if (can_use_callp (insn
))
4926 /* Find the first insn of the next execute packet. If it
4927 is the shadow insn corresponding to this call, we may
4928 use a CALLP insn. */
4930 next_nonnote_nondebug_insn (last_same_clock
);
4933 && insn_get_clock (shadow
) == this_clock
+ 5)
4935 convert_to_callp (shadow
);
4936 insn_set_clock (shadow
, this_clock
);
4937 INSN_INFO_ENTRY (INSN_UID (shadow
)).reservation
4939 INSN_INFO_ENTRY (INSN_UID (shadow
)).unit_mask
4940 = INSN_INFO_ENTRY (INSN_UID (last_same_clock
)).unit_mask
;
4941 if (GET_MODE (insn
) == TImode
)
4943 rtx_insn
*new_cycle_first
= NEXT_INSN (insn
);
4944 while (!NONDEBUG_INSN_P (new_cycle_first
)
4945 || GET_CODE (PATTERN (new_cycle_first
)) == USE
4946 || GET_CODE (PATTERN (new_cycle_first
)) == CLOBBER
)
4947 new_cycle_first
= NEXT_INSN (new_cycle_first
);
4948 PUT_MODE (new_cycle_first
, TImode
);
4949 if (new_cycle_first
!= shadow
)
4950 PUT_MODE (shadow
, VOIDmode
);
4951 INSN_INFO_ENTRY (INSN_UID (new_cycle_first
)).ebb_start
4952 = INSN_INFO_ENTRY (INSN_UID (insn
)).ebb_start
;
4955 PUT_MODE (shadow
, VOIDmode
);
4960 after1
= find_next_cycle_insn (last_same_clock
, this_clock
+ 1);
4961 if (after1
== NULL_RTX
)
4962 after1
= last_same_clock
;
4964 after1
= find_last_same_clock (after1
);
4965 if (TARGET_INSNS_64
)
4967 rtx x1
= gen_addkpc (reg
, labelref
, const0_rtx
);
4968 x1
= emit_insn_after (duplicate_cond (x1
, insn
), after1
);
4969 insn_set_clock (x1
, this_clock
+ 1);
4970 INSN_INFO_ENTRY (INSN_UID (x1
)).reservation
= RESERVATION_S2
;
4971 if (after1
== last_same_clock
)
4972 PUT_MODE (x1
, TImode
);
4974 INSN_INFO_ENTRY (INSN_UID (x1
)).unit_mask
4975 = INSN_INFO_ENTRY (INSN_UID (after1
)).unit_mask
;
4980 rtx_insn
*after2
= find_next_cycle_insn (after1
,
4982 if (after2
== NULL_RTX
)
4984 x2
= gen_movsi_lo_sum (reg
, reg
, labelref
);
4985 x2
= emit_insn_after (duplicate_cond (x2
, insn
), after2
);
4986 x1
= gen_movsi_high (reg
, labelref
);
4987 x1
= emit_insn_after (duplicate_cond (x1
, insn
), after1
);
4988 insn_set_clock (x1
, this_clock
+ 1);
4989 insn_set_clock (x2
, this_clock
+ 2);
4990 INSN_INFO_ENTRY (INSN_UID (x1
)).reservation
= RESERVATION_S2
;
4991 INSN_INFO_ENTRY (INSN_UID (x2
)).reservation
= RESERVATION_S2
;
4992 if (after1
== last_same_clock
)
4993 PUT_MODE (x1
, TImode
);
4995 INSN_INFO_ENTRY (INSN_UID (x1
)).unit_mask
4996 = INSN_INFO_ENTRY (INSN_UID (after1
)).unit_mask
;
4997 if (after1
== after2
)
4998 PUT_MODE (x2
, TImode
);
5000 INSN_INFO_ENTRY (INSN_UID (x2
)).unit_mask
5001 = INSN_INFO_ENTRY (INSN_UID (after2
)).unit_mask
;
5010 /* Called as part of c6x_reorg. This function emits multi-cycle NOP
5011 insns as required for correctness. CALL_LABELS is the array that
5012 holds the return labels for call insns; we emit these here if
5013 scheduling was run earlier. */
5016 reorg_emit_nops (rtx_code_label
**call_labels
)
5021 int prev_clock
, earliest_bb_end
;
5022 int prev_implicit_nops
;
5023 rtx_insn
*insn
= get_insns ();
5025 /* We look at one insn (or bundle inside a sequence) in each iteration, storing
5026 its issue time in PREV_CLOCK for the next iteration. If there is a gap in
5027 clocks, we must insert a NOP.
5028 EARLIEST_BB_END tracks in which cycle all insns that have been issued in the
5029 current basic block will finish. We must not allow the next basic block to
5030 begin before this cycle.
5031 PREV_IMPLICIT_NOPS tells us whether we've seen an insn that implicitly contains
5032 a multi-cycle nop. The code is scheduled such that subsequent insns will
5033 show the cycle gap, but we needn't insert a real NOP instruction. */
5034 insn
= next_real_insn (insn
);
5035 last_call
= prev
= NULL
;
5037 earliest_bb_end
= 0;
5038 prev_implicit_nops
= 0;
5042 int this_clock
= -1;
5046 next
= next_real_insn (insn
);
5048 if (DEBUG_INSN_P (insn
)
5049 || GET_CODE (PATTERN (insn
)) == USE
5050 || GET_CODE (PATTERN (insn
)) == CLOBBER
5051 || shadow_or_blockage_p (insn
)
5052 || JUMP_TABLE_DATA_P (insn
))
5055 if (!c6x_flag_schedule_insns2
)
5056 /* No scheduling; ensure that no parallel issue happens. */
5057 PUT_MODE (insn
, TImode
);
5062 this_clock
= insn_get_clock (insn
);
5063 if (this_clock
!= prev_clock
)
5065 PUT_MODE (insn
, TImode
);
5069 cycles
= this_clock
- prev_clock
;
5071 cycles
-= prev_implicit_nops
;
5074 rtx nop
= emit_nop_after (cycles
- 1, prev
);
5075 insn_set_clock (nop
, prev_clock
+ prev_implicit_nops
+ 1);
5078 prev_clock
= this_clock
;
5081 && insn_get_clock (last_call
) + 6 <= this_clock
)
5083 emit_label_before (call_labels
[INSN_UID (last_call
)], insn
);
5084 last_call
= NULL_RTX
;
5086 prev_implicit_nops
= 0;
5090 /* Examine how many cycles the current insn takes, and adjust
5091 LAST_CALL, EARLIEST_BB_END and PREV_IMPLICIT_NOPS. */
5092 if (recog_memoized (insn
) >= 0
5093 /* If not scheduling, we've emitted NOPs after calls already. */
5094 && (c6x_flag_schedule_insns2
|| !returning_call_p (insn
)))
5096 max_cycles
= get_attr_cycles (insn
);
5097 if (get_attr_type (insn
) == TYPE_CALLP
)
5098 prev_implicit_nops
= 5;
5102 if (returning_call_p (insn
))
5105 if (c6x_flag_schedule_insns2
)
5107 gcc_assert (this_clock
>= 0);
5108 if (earliest_bb_end
< this_clock
+ max_cycles
)
5109 earliest_bb_end
= this_clock
+ max_cycles
;
5111 else if (max_cycles
> 1)
5112 emit_nop_after (max_cycles
- 1, insn
);
5118 if (c6x_flag_schedule_insns2
5119 && (next
== NULL_RTX
5120 || (GET_MODE (next
) == TImode
5121 && INSN_INFO_ENTRY (INSN_UID (next
)).ebb_start
))
5122 && earliest_bb_end
> 0)
5124 int cycles
= earliest_bb_end
- prev_clock
;
5127 prev
= emit_nop_after (cycles
- 1, prev
);
5128 insn_set_clock (prev
, prev_clock
+ prev_implicit_nops
+ 1);
5130 earliest_bb_end
= 0;
5135 emit_label_after (call_labels
[INSN_UID (last_call
)], prev
);
5136 last_call
= NULL_RTX
;
5142 /* If possible, split INSN, which we know is either a jump or a call, into a real
5143 insn and its shadow. */
5145 split_delayed_branch (rtx_insn
*insn
)
5147 int code
= recog_memoized (insn
);
5150 rtx pat
= PATTERN (insn
);
5152 if (GET_CODE (pat
) == COND_EXEC
)
5153 pat
= COND_EXEC_CODE (pat
);
5157 rtx src
= pat
, dest
= NULL_RTX
;
5159 if (GET_CODE (pat
) == SET
)
5161 dest
= SET_DEST (pat
);
5162 src
= SET_SRC (pat
);
5164 callee
= XEXP (XEXP (src
, 0), 0);
5165 if (SIBLING_CALL_P (insn
))
5168 newpat
= gen_indirect_sibcall_shadow ();
5170 newpat
= gen_sibcall_shadow (callee
);
5171 pat
= gen_real_jump (callee
);
5173 else if (dest
!= NULL_RTX
)
5176 newpat
= gen_indirect_call_value_shadow (dest
);
5178 newpat
= gen_call_value_shadow (dest
, callee
);
5179 pat
= gen_real_call (callee
);
5184 newpat
= gen_indirect_call_shadow ();
5186 newpat
= gen_call_shadow (callee
);
5187 pat
= gen_real_call (callee
);
5189 pat
= duplicate_cond (pat
, insn
);
5190 newpat
= duplicate_cond (newpat
, insn
);
5195 if (GET_CODE (pat
) == PARALLEL
5196 && GET_CODE (XVECEXP (pat
, 0, 0)) == RETURN
)
5198 newpat
= gen_return_shadow ();
5199 pat
= gen_real_ret (XEXP (XVECEXP (pat
, 0, 1), 0));
5200 newpat
= duplicate_cond (newpat
, insn
);
5205 case CODE_FOR_br_true
:
5206 case CODE_FOR_br_false
:
5207 src
= SET_SRC (pat
);
5208 op
= XEXP (src
, code
== CODE_FOR_br_true
? 1 : 2);
5209 newpat
= gen_condjump_shadow (op
);
5210 pat
= gen_real_jump (op
);
5211 if (code
== CODE_FOR_br_true
)
5212 pat
= gen_rtx_COND_EXEC (VOIDmode
, XEXP (src
, 0), pat
);
5214 pat
= gen_rtx_COND_EXEC (VOIDmode
,
5215 reversed_comparison (XEXP (src
, 0),
5222 newpat
= gen_jump_shadow (op
);
5225 case CODE_FOR_indirect_jump
:
5226 newpat
= gen_indirect_jump_shadow ();
5229 case CODE_FOR_return_internal
:
5230 newpat
= gen_return_shadow ();
5231 pat
= gen_real_ret (XEXP (XVECEXP (pat
, 0, 1), 0));
5238 i1
= emit_insn_before (pat
, insn
);
5239 PATTERN (insn
) = newpat
;
5240 INSN_CODE (insn
) = -1;
5241 record_delay_slot_pair (i1
, insn
, 5, 0);
5244 /* If INSN is a multi-cycle insn that should be handled properly in
5245 modulo-scheduling, split it into a real insn and a shadow.
5246 Return true if we made a change.
5248 It is valid for us to fail to split an insn; the caller has to deal
5249 with the possibility. Currently we handle loads and most mpy2 and
5252 split_delayed_nonbranch (rtx_insn
*insn
)
5254 int code
= recog_memoized (insn
);
5255 enum attr_type type
;
5257 rtx newpat
, src
, dest
;
5258 rtx pat
= PATTERN (insn
);
5262 if (GET_CODE (pat
) == COND_EXEC
)
5263 pat
= COND_EXEC_CODE (pat
);
5265 if (code
< 0 || GET_CODE (pat
) != SET
)
5267 src
= SET_SRC (pat
);
5268 dest
= SET_DEST (pat
);
5272 type
= get_attr_type (insn
);
5274 && (type
== TYPE_LOAD
5275 || type
== TYPE_LOADN
))
5278 && (GET_CODE (src
) != ZERO_EXTEND
5279 || !MEM_P (XEXP (src
, 0))))
5282 if (GET_MODE_SIZE (GET_MODE (dest
)) > 4
5283 && (GET_MODE_SIZE (GET_MODE (dest
)) != 8 || !TARGET_LDDW
))
5286 rtv
= gen_rtvec (2, GEN_INT (REGNO (SET_DEST (pat
))),
5288 newpat
= gen_load_shadow (SET_DEST (pat
));
5289 pat
= gen_rtx_UNSPEC (VOIDmode
, rtv
, UNSPEC_REAL_LOAD
);
5293 && (type
== TYPE_MPY2
5294 || type
== TYPE_MPY4
))
5296 /* We don't handle floating point multiplies yet. */
5297 if (GET_MODE (dest
) == SFmode
)
5300 rtv
= gen_rtvec (2, GEN_INT (REGNO (SET_DEST (pat
))),
5302 newpat
= gen_mult_shadow (SET_DEST (pat
));
5303 pat
= gen_rtx_UNSPEC (VOIDmode
, rtv
, UNSPEC_REAL_MULT
);
5304 delay
= type
== TYPE_MPY2
? 1 : 3;
5309 pat
= duplicate_cond (pat
, insn
);
5310 newpat
= duplicate_cond (newpat
, insn
);
5311 i1
= emit_insn_before (pat
, insn
);
5312 PATTERN (insn
) = newpat
;
5313 INSN_CODE (insn
) = -1;
5314 recog_memoized (insn
);
5315 recog_memoized (i1
);
5316 record_delay_slot_pair (i1
, insn
, delay
, 0);
5320 /* Examine if INSN is the result of splitting a load into a real load and a
5321 shadow, and if so, undo the transformation. */
5323 undo_split_delayed_nonbranch (rtx_insn
*insn
)
5325 int icode
= recog_memoized (insn
);
5326 enum attr_type type
;
5327 rtx prev_pat
, insn_pat
;
5332 type
= get_attr_type (insn
);
5333 if (type
!= TYPE_LOAD_SHADOW
&& type
!= TYPE_MULT_SHADOW
)
5335 prev
= PREV_INSN (insn
);
5336 prev_pat
= PATTERN (prev
);
5337 insn_pat
= PATTERN (insn
);
5338 if (GET_CODE (prev_pat
) == COND_EXEC
)
5340 prev_pat
= COND_EXEC_CODE (prev_pat
);
5341 insn_pat
= COND_EXEC_CODE (insn_pat
);
5344 gcc_assert (GET_CODE (prev_pat
) == UNSPEC
5345 && ((XINT (prev_pat
, 1) == UNSPEC_REAL_LOAD
5346 && type
== TYPE_LOAD_SHADOW
)
5347 || (XINT (prev_pat
, 1) == UNSPEC_REAL_MULT
5348 && type
== TYPE_MULT_SHADOW
)));
5349 insn_pat
= gen_rtx_SET (SET_DEST (insn_pat
),
5350 XVECEXP (prev_pat
, 0, 1));
5351 insn_pat
= duplicate_cond (insn_pat
, prev
);
5352 PATTERN (insn
) = insn_pat
;
5353 INSN_CODE (insn
) = -1;
5357 /* Split every insn (i.e. jumps and calls) which can have delay slots into
5358 two parts: the first one is scheduled normally and emits the instruction,
5359 while the second one is a shadow insn which shows the side effect taking
5360 place. The second one is placed in the right cycle by the scheduler, but
5361 not emitted as an assembly instruction. */
5364 split_delayed_insns (void)
5367 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
5369 if (JUMP_P (insn
) || CALL_P (insn
))
5370 split_delayed_branch (insn
);
5374 /* For every insn that has an entry in the new_conditions vector, give it
5375 the appropriate predicate. */
5377 conditionalize_after_sched (void)
5381 FOR_EACH_BB_FN (bb
, cfun
)
5382 FOR_BB_INSNS (bb
, insn
)
5384 unsigned uid
= INSN_UID (insn
);
5386 if (!NONDEBUG_INSN_P (insn
) || uid
>= INSN_INFO_LENGTH
)
5388 cond
= INSN_INFO_ENTRY (uid
).new_cond
;
5389 if (cond
== NULL_RTX
)
5392 fprintf (dump_file
, "Conditionalizing insn %d\n", uid
);
5393 predicate_insn (insn
, cond
, true);
5397 /* A callback for the hw-doloop pass. This function examines INSN; if
5398 it is a loop_end pattern we recognize, return the reg rtx for the
5399 loop counter. Otherwise, return NULL_RTX. */
5402 hwloop_pattern_reg (rtx_insn
*insn
)
5406 if (!JUMP_P (insn
) || recog_memoized (insn
) != CODE_FOR_loop_end
)
5409 pat
= PATTERN (insn
);
5410 reg
= SET_DEST (XVECEXP (pat
, 0, 1));
5416 /* Return the number of cycles taken by BB, as computed by scheduling,
5417 including the latencies of all insns with delay slots. IGNORE is
5418 an insn we should ignore in the calculation, usually the final
5421 bb_earliest_end_cycle (basic_block bb
, rtx ignore
)
5426 FOR_BB_INSNS (bb
, insn
)
5428 int cycles
, this_clock
;
5430 if (LABEL_P (insn
) || NOTE_P (insn
) || DEBUG_INSN_P (insn
)
5431 || GET_CODE (PATTERN (insn
)) == USE
5432 || GET_CODE (PATTERN (insn
)) == CLOBBER
5436 this_clock
= insn_get_clock (insn
);
5437 cycles
= get_attr_cycles (insn
);
5439 if (earliest
< this_clock
+ cycles
)
5440 earliest
= this_clock
+ cycles
;
5445 /* Examine the insns in BB and remove all which have a uid greater or
5446 equal to MAX_UID. */
5448 filter_insns_above (basic_block bb
, int max_uid
)
5450 rtx_insn
*insn
, *next
;
5451 bool prev_ti
= false;
5452 int prev_cycle
= -1;
5454 FOR_BB_INSNS_SAFE (bb
, insn
, next
)
5457 if (!NONDEBUG_INSN_P (insn
))
5459 if (insn
== BB_END (bb
))
5461 this_cycle
= insn_get_clock (insn
);
5462 if (prev_ti
&& this_cycle
== prev_cycle
)
5464 gcc_assert (GET_MODE (insn
) != TImode
);
5465 PUT_MODE (insn
, TImode
);
5468 if (INSN_UID (insn
) >= max_uid
)
5470 if (GET_MODE (insn
) == TImode
)
5473 prev_cycle
= this_cycle
;
5480 /* Implement TARGET_ASM_EMIT_EXCEPT_PERSONALITY. */
5483 c6x_asm_emit_except_personality (rtx personality
)
5485 fputs ("\t.personality\t", asm_out_file
);
5486 output_addr_const (asm_out_file
, personality
);
5487 fputc ('\n', asm_out_file
);
5490 /* Use a special assembly directive rather than a regular setion for
5491 unwind table data. */
5494 c6x_asm_init_sections (void)
5496 exception_section
= get_unnamed_section (0, output_section_asm_op
,
5500 /* A callback for the hw-doloop pass. Called to optimize LOOP in a
5501 machine-specific fashion; returns true if successful and false if
5502 the hwloop_fail function should be called. */
5505 hwloop_optimize (hwloop_info loop
)
5507 basic_block entry_bb
, bb
;
5508 rtx_insn
*seq
, *insn
, *prev
, *entry_after
, *end_packet
;
5509 rtx_insn
*head_insn
, *tail_insn
, *new_insns
, *last_insn
;
5511 int n_execute_packets
;
5514 int max_uid_before
, delayed_splits
;
5515 int i
, sp_ii
, min_ii
, max_ii
, max_parallel
, n_insns
, n_real_insns
, stages
;
5516 rtx_insn
**orig_vec
;
5518 rtx_insn
***insn_copies
;
5520 if (!c6x_flag_modulo_sched
|| !c6x_flag_schedule_insns2
5521 || !TARGET_INSNS_64PLUS
)
5524 if (loop
->iter_reg_used
|| loop
->depth
> 1)
5526 if (loop
->has_call
|| loop
->has_asm
)
5529 if (loop
->head
!= loop
->tail
)
5532 gcc_assert (loop
->incoming_dest
== loop
->head
);
5535 FOR_EACH_VEC_SAFE_ELT (loop
->incoming
, i
, entry_edge
)
5536 if (entry_edge
->flags
& EDGE_FALLTHRU
)
5538 if (entry_edge
== NULL
)
5541 reshuffle_units (loop
->head
);
5544 schedule_ebbs_init ();
5545 schedule_ebb (BB_HEAD (loop
->tail
), loop
->loop_end
, true);
5546 schedule_ebbs_finish ();
5550 loop_earliest
= bb_earliest_end_cycle (bb
, loop
->loop_end
) + 1;
5552 max_uid_before
= get_max_uid ();
5554 /* Split all multi-cycle operations, such as loads. For normal
5555 scheduling, we only do this for branches, as the generated code
5556 would otherwise not be interrupt-safe. When using sploop, it is
5557 safe and beneficial to split them. If any multi-cycle operations
5558 remain after splitting (because we don't handle them yet), we
5559 cannot pipeline the loop. */
5561 FOR_BB_INSNS (bb
, insn
)
5563 if (NONDEBUG_INSN_P (insn
))
5565 recog_memoized (insn
);
5566 if (split_delayed_nonbranch (insn
))
5568 else if (INSN_CODE (insn
) >= 0
5569 && get_attr_cycles (insn
) > 1)
5574 /* Count the number of insns as well as the number real insns, and save
5575 the original sequence of insns in case we must restore it later. */
5576 n_insns
= n_real_insns
= 0;
5577 FOR_BB_INSNS (bb
, insn
)
5580 if (NONDEBUG_INSN_P (insn
) && insn
!= loop
->loop_end
)
5583 orig_vec
= XNEWVEC (rtx_insn
*, n_insns
);
5585 FOR_BB_INSNS (bb
, insn
)
5586 orig_vec
[n_insns
++] = insn
;
5588 /* Count the unit reservations, and compute a minimum II from that
5590 count_unit_reqs (unit_reqs
, loop
->start_label
,
5591 PREV_INSN (loop
->loop_end
));
5592 merge_unit_reqs (unit_reqs
);
5594 min_ii
= res_mii (unit_reqs
);
5595 max_ii
= loop_earliest
< 15 ? loop_earliest
: 14;
5597 /* Make copies of the loop body, up to a maximum number of stages we want
5599 max_parallel
= loop_earliest
/ min_ii
+ 1;
5601 copies
= XCNEWVEC (rtx_insn
*, (max_parallel
+ 1) * n_real_insns
);
5602 insn_copies
= XNEWVEC (rtx_insn
**, max_parallel
+ 1);
5603 for (i
= 0; i
< max_parallel
+ 1; i
++)
5604 insn_copies
[i
] = copies
+ i
* n_real_insns
;
5606 head_insn
= next_nonnote_nondebug_insn (loop
->start_label
);
5607 tail_insn
= prev_real_insn (BB_END (bb
));
5610 FOR_BB_INSNS (bb
, insn
)
5611 if (NONDEBUG_INSN_P (insn
) && insn
!= loop
->loop_end
)
5612 insn_copies
[0][i
++] = insn
;
5614 sploop_max_uid_iter0
= get_max_uid ();
5616 /* Generate the copies of the loop body, and save them in the
5617 INSN_COPIES array. */
5619 for (i
= 0; i
< max_parallel
; i
++)
5622 rtx_insn
*this_iter
;
5624 this_iter
= duplicate_insn_chain (head_insn
, tail_insn
);
5628 rtx_insn
*prev_stage_insn
= insn_copies
[i
][j
];
5629 gcc_assert (INSN_CODE (this_iter
) == INSN_CODE (prev_stage_insn
));
5631 if (INSN_CODE (this_iter
) >= 0
5632 && (get_attr_type (this_iter
) == TYPE_LOAD_SHADOW
5633 || get_attr_type (this_iter
) == TYPE_MULT_SHADOW
))
5635 rtx_insn
*prev
= PREV_INSN (this_iter
);
5636 record_delay_slot_pair (prev
, this_iter
,
5637 get_attr_cycles (prev
) - 1, 0);
5640 record_delay_slot_pair (prev_stage_insn
, this_iter
, i
, 1);
5642 insn_copies
[i
+ 1][j
] = this_iter
;
5644 this_iter
= next_nonnote_nondebug_insn (this_iter
);
5647 new_insns
= get_insns ();
5648 last_insn
= insn_copies
[max_parallel
][n_real_insns
- 1];
5650 emit_insn_before (new_insns
, BB_END (bb
));
5652 /* Try to schedule the loop using varying initiation intervals,
5653 starting with the smallest possible and incrementing it
5655 for (sp_ii
= min_ii
; sp_ii
<= max_ii
; sp_ii
++)
5659 fprintf (dump_file
, "Trying to schedule for II %d\n", sp_ii
);
5661 df_clear_flags (DF_LR_RUN_DCE
);
5663 schedule_ebbs_init ();
5664 set_modulo_params (sp_ii
, max_parallel
, n_real_insns
,
5665 sploop_max_uid_iter0
);
5666 tmp_bb
= schedule_ebb (BB_HEAD (bb
), last_insn
, true);
5667 schedule_ebbs_finish ();
5672 fprintf (dump_file
, "Found schedule with II %d\n", sp_ii
);
5677 discard_delay_pairs_above (max_uid_before
);
5682 stages
= insn_get_clock (ss
.last_scheduled_iter0
) / sp_ii
+ 1;
5684 if (stages
== 1 && sp_ii
> 5)
5687 /* At this point, we know we've been successful, unless we find later that
5688 there are too many execute packets for the loop buffer to hold. */
5690 /* Assign reservations to the instructions in the loop. We must find
5691 the stage that contains the full loop kernel, and transfer the
5692 reservations of the instructions contained in it to the corresponding
5693 instructions from iteration 0, which are the only ones we'll keep. */
5694 assign_reservations (BB_HEAD (bb
), ss
.last_scheduled_insn
);
5695 SET_PREV_INSN (BB_END (bb
)) = ss
.last_scheduled_iter0
;
5696 SET_NEXT_INSN (ss
.last_scheduled_iter0
) = BB_END (bb
);
5697 filter_insns_above (bb
, sploop_max_uid_iter0
);
5699 for (i
= 0; i
< n_real_insns
; i
++)
5701 rtx insn
= insn_copies
[0][i
];
5702 int uid
= INSN_UID (insn
);
5703 int stage
= insn_uid_get_clock (uid
) / sp_ii
;
5705 if (stage
+ 1 < stages
)
5708 stage
= stages
- stage
- 1;
5709 copy_uid
= INSN_UID (insn_copies
[stage
][i
]);
5710 INSN_INFO_ENTRY (uid
).reservation
5711 = INSN_INFO_ENTRY (copy_uid
).reservation
;
5717 /* Compute the number of execute packets the pipelined form of the loop will
5720 n_execute_packets
= 0;
5721 for (insn
= loop
->start_label
;
5722 insn
!= loop
->loop_end
;
5723 insn
= NEXT_INSN (insn
))
5725 if (NONDEBUG_INSN_P (insn
) && GET_MODE (insn
) == TImode
5726 && !shadow_p (insn
))
5728 n_execute_packets
++;
5729 if (prev
&& insn_get_clock (prev
) + 1 != insn_get_clock (insn
))
5730 /* We need an extra NOP instruction. */
5731 n_execute_packets
++;
5737 end_packet
= ss
.last_scheduled_iter0
;
5738 while (!NONDEBUG_INSN_P (end_packet
) || GET_MODE (end_packet
) != TImode
)
5739 end_packet
= PREV_INSN (end_packet
);
5741 /* The earliest cycle in which we can emit the SPKERNEL instruction. */
5742 loop_earliest
= (stages
- 1) * sp_ii
;
5743 if (loop_earliest
> insn_get_clock (end_packet
))
5745 n_execute_packets
++;
5746 end_packet
= loop
->loop_end
;
5749 loop_earliest
= insn_get_clock (end_packet
);
5751 if (n_execute_packets
> 14)
5754 /* Generate the spkernel instruction, and place it at the appropriate
5756 PUT_MODE (end_packet
, VOIDmode
);
5758 insn
= emit_jump_insn_before (
5759 gen_spkernel (GEN_INT (stages
- 1),
5760 const0_rtx
, JUMP_LABEL (loop
->loop_end
)),
5762 JUMP_LABEL (insn
) = JUMP_LABEL (loop
->loop_end
);
5763 insn_set_clock (insn
, loop_earliest
);
5764 PUT_MODE (insn
, TImode
);
5765 INSN_INFO_ENTRY (INSN_UID (insn
)).ebb_start
= false;
5766 delete_insn (loop
->loop_end
);
5768 /* Place the mvc and sploop instructions before the loop. */
5769 entry_bb
= entry_edge
->src
;
5773 insn
= emit_insn (gen_mvilc (loop
->iter_reg
));
5774 if (loop
->iter_reg_used_outside
)
5775 insn
= emit_move_insn (loop
->iter_reg
, const0_rtx
);
5776 insn
= emit_insn (gen_sploop (GEN_INT (sp_ii
)));
5779 if (!single_succ_p (entry_bb
) || vec_safe_length (loop
->incoming
) > 1)
5785 emit_insn_before (seq
, BB_HEAD (loop
->head
));
5786 seq
= emit_label_before (gen_label_rtx (), seq
);
5788 new_bb
= create_basic_block (seq
, insn
, entry_bb
);
5789 FOR_EACH_EDGE (e
, ei
, loop
->incoming
)
5791 if (!(e
->flags
& EDGE_FALLTHRU
))
5792 redirect_edge_and_branch_force (e
, new_bb
);
5794 redirect_edge_succ (e
, new_bb
);
5796 make_edge (new_bb
, loop
->head
, 0);
5800 entry_after
= BB_END (entry_bb
);
5801 while (DEBUG_INSN_P (entry_after
)
5802 || (NOTE_P (entry_after
)
5803 && NOTE_KIND (entry_after
) != NOTE_INSN_BASIC_BLOCK
))
5804 entry_after
= PREV_INSN (entry_after
);
5805 emit_insn_after (seq
, entry_after
);
5810 /* Make sure we don't try to schedule this loop again. */
5811 for (ix
= 0; loop
->blocks
.iterate (ix
, &bb
); ix
++)
5812 bb
->flags
|= BB_DISABLE_SCHEDULE
;
5818 fprintf (dump_file
, "Unable to pipeline loop.\n");
5820 for (i
= 1; i
< n_insns
; i
++)
5822 SET_NEXT_INSN (orig_vec
[i
- 1]) = orig_vec
[i
];
5823 SET_PREV_INSN (orig_vec
[i
]) = orig_vec
[i
- 1];
5825 SET_PREV_INSN (orig_vec
[0]) = PREV_INSN (BB_HEAD (bb
));
5826 SET_NEXT_INSN (PREV_INSN (BB_HEAD (bb
))) = orig_vec
[0];
5827 SET_NEXT_INSN (orig_vec
[n_insns
- 1]) = NEXT_INSN (BB_END (bb
));
5828 SET_PREV_INSN (NEXT_INSN (BB_END (bb
))) = orig_vec
[n_insns
- 1];
5829 BB_HEAD (bb
) = orig_vec
[0];
5830 BB_END (bb
) = orig_vec
[n_insns
- 1];
5832 free_delay_pairs ();
5833 FOR_BB_INSNS (bb
, insn
)
5834 if (NONDEBUG_INSN_P (insn
))
5835 undo_split_delayed_nonbranch (insn
);
5839 /* A callback for the hw-doloop pass. Called when a loop we have discovered
5840 turns out not to be optimizable; we have to split the doloop_end pattern
5841 into a subtract and a test. */
5843 hwloop_fail (hwloop_info loop
)
5845 rtx insn
, test
, testreg
;
5848 fprintf (dump_file
, "splitting doloop insn %d\n",
5849 INSN_UID (loop
->loop_end
));
5850 insn
= gen_addsi3 (loop
->iter_reg
, loop
->iter_reg
, constm1_rtx
);
5851 /* See if we can emit the add at the head of the loop rather than at the
5853 if (loop
->head
== NULL
5854 || loop
->iter_reg_used_outside
5855 || loop
->iter_reg_used
5856 || TEST_HARD_REG_BIT (loop
->regs_set_in_loop
, REGNO (loop
->iter_reg
))
5857 || loop
->incoming_dest
!= loop
->head
5858 || EDGE_COUNT (loop
->head
->preds
) != 2)
5859 emit_insn_before (insn
, loop
->loop_end
);
5862 rtx_insn
*t
= loop
->start_label
;
5863 while (!NOTE_P (t
) || NOTE_KIND (t
) != NOTE_INSN_BASIC_BLOCK
)
5865 emit_insn_after (insn
, t
);
5868 testreg
= SET_DEST (XVECEXP (PATTERN (loop
->loop_end
), 0, 2));
5869 if (GET_CODE (testreg
) == SCRATCH
)
5870 testreg
= loop
->iter_reg
;
5872 emit_insn_before (gen_movsi (testreg
, loop
->iter_reg
), loop
->loop_end
);
5874 test
= gen_rtx_NE (VOIDmode
, testreg
, const0_rtx
);
5875 insn
= emit_jump_insn_before (gen_cbranchsi4 (test
, testreg
, const0_rtx
,
5879 JUMP_LABEL (insn
) = loop
->start_label
;
5880 LABEL_NUSES (loop
->start_label
)++;
5881 delete_insn (loop
->loop_end
);
5884 static struct hw_doloop_hooks c6x_doloop_hooks
=
5891 /* Run the hw-doloop pass to modulo-schedule hardware loops, or split the
5892 doloop_end patterns where such optimizations are impossible. */
5897 reorg_loops (true, &c6x_doloop_hooks
);
5900 /* Implement the TARGET_MACHINE_DEPENDENT_REORG pass. We split call insns here
5901 into a sequence that loads the return register and performs the call,
5902 and emit the return label.
5903 If scheduling after reload is requested, it happens here. */
5909 bool do_selsched
= (c6x_flag_schedule_insns2
&& flag_selective_scheduling2
5910 && !maybe_skip_selective_scheduling ());
5912 /* We are freeing block_for_insn in the toplev to keep compatibility
5913 with old MDEP_REORGS that are not CFG based. Recompute it now. */
5914 compute_bb_for_insn ();
5916 df_clear_flags (DF_LR_RUN_DCE
);
5917 df_note_add_problem ();
5919 /* If optimizing, we'll have split before scheduling. */
5925 if (c6x_flag_schedule_insns2
)
5927 int sz
= get_max_uid () * 3 / 2 + 1;
5929 insn_info
.create (sz
);
5932 /* Make sure the real-jump insns we create are not deleted. When modulo-
5933 scheduling, situations where a reg is only stored in a loop can also
5934 cause dead code when doing the initial unrolling. */
5935 sched_no_dce
= true;
5939 if (c6x_flag_schedule_insns2
)
5941 split_delayed_insns ();
5942 timevar_push (TV_SCHED2
);
5944 run_selective_scheduling ();
5947 conditionalize_after_sched ();
5948 timevar_pop (TV_SCHED2
);
5950 free_delay_pairs ();
5952 sched_no_dce
= false;
5954 rtx_code_label
**call_labels
= XCNEWVEC (rtx_code_label
*, get_max_uid () + 1);
5956 reorg_split_calls (call_labels
);
5958 if (c6x_flag_schedule_insns2
)
5960 FOR_EACH_BB_FN (bb
, cfun
)
5961 if ((bb
->flags
& BB_DISABLE_SCHEDULE
) == 0)
5962 assign_reservations (BB_HEAD (bb
), BB_END (bb
));
5965 if (c6x_flag_var_tracking
)
5967 timevar_push (TV_VAR_TRACKING
);
5968 variable_tracking_main ();
5969 timevar_pop (TV_VAR_TRACKING
);
5972 reorg_emit_nops (call_labels
);
5974 /* Post-process the schedule to move parallel insns into SEQUENCEs. */
5975 if (c6x_flag_schedule_insns2
)
5977 free_delay_pairs ();
5981 df_finish_pass (false);
5984 /* Called when a function has been assembled. It should perform all the
5985 tasks of ASM_DECLARE_FUNCTION_SIZE in elfos.h, plus target-specific
5987 We free the reservation (and other scheduling) information here now that
5988 all insns have been output. */
5990 c6x_function_end (FILE *file
, const char *fname
)
5992 c6x_output_fn_unwind (file
);
5994 insn_info
.release ();
5996 if (!flag_inhibit_size_directive
)
5997 ASM_OUTPUT_MEASURED_SIZE (file
, fname
);
6000 /* Determine whether X is a shift with code CODE and an integer amount
6003 shift_p (rtx x
, enum rtx_code code
, int amount
)
6005 return (GET_CODE (x
) == code
&& GET_CODE (XEXP (x
, 1)) == CONST_INT
6006 && INTVAL (XEXP (x
, 1)) == amount
);
6009 /* Compute a (partial) cost for rtx X. Return true if the complete
6010 cost has been computed, and false if subexpressions should be
6011 scanned. In either case, *TOTAL contains the cost result. */
6014 c6x_rtx_costs (rtx x
, machine_mode mode
, int outer_code
, int opno
, int *total
,
6017 int cost2
= COSTS_N_INSNS (1);
6019 int code
= GET_CODE (x
);
6024 if (outer_code
== SET
|| outer_code
== PLUS
)
6025 *total
= satisfies_constraint_IsB (x
) ? 0 : cost2
;
6026 else if (outer_code
== AND
|| outer_code
== IOR
|| outer_code
== XOR
6027 || outer_code
== MINUS
)
6028 *total
= satisfies_constraint_Is5 (x
) ? 0 : cost2
;
6029 else if (GET_RTX_CLASS (outer_code
) == RTX_COMPARE
6030 || GET_RTX_CLASS (outer_code
) == RTX_COMM_COMPARE
)
6031 *total
= satisfies_constraint_Iu4 (x
) ? 0 : cost2
;
6032 else if (outer_code
== ASHIFT
|| outer_code
== ASHIFTRT
6033 || outer_code
== LSHIFTRT
)
6034 *total
= satisfies_constraint_Iu5 (x
) ? 0 : cost2
;
6043 *total
= COSTS_N_INSNS (2);
6047 /* Recognize a mult_highpart operation. */
6048 if ((mode
== HImode
|| mode
== SImode
)
6049 && GET_CODE (XEXP (x
, 0)) == LSHIFTRT
6050 && GET_MODE (XEXP (x
, 0)) == GET_MODE_2XWIDER_MODE (mode
).require ()
6051 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
6052 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
6053 && INTVAL (XEXP (XEXP (x
, 0), 1)) == GET_MODE_BITSIZE (mode
))
6055 rtx mul
= XEXP (XEXP (x
, 0), 0);
6056 rtx op0
= XEXP (mul
, 0);
6057 rtx op1
= XEXP (mul
, 1);
6058 enum rtx_code code0
= GET_CODE (op0
);
6059 enum rtx_code code1
= GET_CODE (op1
);
6062 && (code0
== SIGN_EXTEND
|| code0
== ZERO_EXTEND
))
6064 && code0
== ZERO_EXTEND
&& code1
== SIGN_EXTEND
))
6067 *total
= COSTS_N_INSNS (2);
6069 *total
= COSTS_N_INSNS (12);
6070 mode
= GET_MODE (XEXP (op0
, 0));
6071 *total
+= rtx_cost (XEXP (op0
, 0), mode
, code0
, 0, speed
);
6072 *total
+= rtx_cost (XEXP (op1
, 0), mode
, code1
, 0, speed
);
6082 *total
= COSTS_N_INSNS (CONSTANT_P (XEXP (x
, 1)) ? 4 : 15);
6084 *total
= COSTS_N_INSNS (1);
6089 *total
= COSTS_N_INSNS (1);
6090 op0
= code
== PLUS
? XEXP (x
, 0) : XEXP (x
, 1);
6091 op1
= code
== PLUS
? XEXP (x
, 1) : XEXP (x
, 0);
6092 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
6093 && INTEGRAL_MODE_P (mode
)
6094 && GET_CODE (op0
) == MULT
6095 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
6096 && (INTVAL (XEXP (op0
, 1)) == 2
6097 || INTVAL (XEXP (op0
, 1)) == 4
6098 || (code
== PLUS
&& INTVAL (XEXP (op0
, 1)) == 8)))
6100 *total
+= rtx_cost (XEXP (op0
, 0), mode
, ASHIFT
, 0, speed
);
6101 *total
+= rtx_cost (op1
, mode
, (enum rtx_code
) code
, 1, speed
);
6112 *total
= COSTS_N_INSNS (speed
? 10 : 1);
6114 *total
= COSTS_N_INSNS (speed
? 200 : 4);
6116 else if (mode
== SFmode
)
6119 *total
= COSTS_N_INSNS (speed
? 4 : 1);
6121 *total
= COSTS_N_INSNS (speed
? 100 : 4);
6123 else if (mode
== DImode
)
6126 && GET_CODE (op0
) == GET_CODE (op1
)
6127 && (GET_CODE (op0
) == ZERO_EXTEND
6128 || GET_CODE (op0
) == SIGN_EXTEND
))
6130 *total
= COSTS_N_INSNS (speed
? 2 : 1);
6131 op0
= XEXP (op0
, 0);
6132 op1
= XEXP (op1
, 0);
6135 /* Maybe improve this laster. */
6136 *total
= COSTS_N_INSNS (20);
6138 else if (mode
== SImode
)
6140 if (((GET_CODE (op0
) == ZERO_EXTEND
6141 || GET_CODE (op0
) == SIGN_EXTEND
6142 || shift_p (op0
, LSHIFTRT
, 16))
6143 && (GET_CODE (op1
) == SIGN_EXTEND
6144 || GET_CODE (op1
) == ZERO_EXTEND
6145 || scst5_operand (op1
, SImode
)
6146 || shift_p (op1
, ASHIFTRT
, 16)
6147 || shift_p (op1
, LSHIFTRT
, 16)))
6148 || (shift_p (op0
, ASHIFTRT
, 16)
6149 && (GET_CODE (op1
) == SIGN_EXTEND
6150 || shift_p (op1
, ASHIFTRT
, 16))))
6152 *total
= COSTS_N_INSNS (speed
? 2 : 1);
6153 op0
= XEXP (op0
, 0);
6154 if (scst5_operand (op1
, SImode
))
6157 op1
= XEXP (op1
, 0);
6160 *total
= COSTS_N_INSNS (1);
6161 else if (TARGET_MPY32
)
6162 *total
= COSTS_N_INSNS (4);
6164 *total
= COSTS_N_INSNS (6);
6166 else if (mode
== HImode
)
6167 *total
= COSTS_N_INSNS (speed
? 2 : 1);
6169 if (GET_CODE (op0
) != REG
6170 && (GET_CODE (op0
) != SUBREG
|| GET_CODE (SUBREG_REG (op0
)) != REG
))
6171 *total
+= rtx_cost (op0
, mode
, MULT
, 0, speed
);
6172 if (op1
&& GET_CODE (op1
) != REG
6173 && (GET_CODE (op1
) != SUBREG
|| GET_CODE (SUBREG_REG (op1
)) != REG
))
6174 *total
+= rtx_cost (op1
, mode
, MULT
, 1, speed
);
6179 /* This is a bit random; assuming on average there'll be 16 leading
6180 zeros. FIXME: estimate better for constant dividends. */
6181 *total
= COSTS_N_INSNS (6 + 3 * 16);
6185 /* Recognize the cmp_and/ior patterns. */
6187 if ((GET_CODE (op0
) == EQ
|| GET_CODE (op0
) == NE
)
6188 && REG_P (XEXP (op0
, 0))
6189 && XEXP (op0
, 1) == const0_rtx
6190 && rtx_equal_p (XEXP (x
, 1), XEXP (op0
, 0)))
6192 *total
= rtx_cost (XEXP (x
, 1), VOIDmode
, (enum rtx_code
) outer_code
,
6203 /* Implements target hook vector_mode_supported_p. */
6206 c6x_vector_mode_supported_p (machine_mode mode
)
6221 /* Implements TARGET_VECTORIZE_PREFERRED_SIMD_MODE. */
6223 c6x_preferred_simd_mode (scalar_mode mode
)
6237 /* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
6240 c6x_scalar_mode_supported_p (scalar_mode mode
)
6242 if (ALL_FIXED_POINT_MODE_P (mode
)
6243 && GET_MODE_PRECISION (mode
) <= 2 * BITS_PER_WORD
)
6246 return default_scalar_mode_supported_p (mode
);
6249 /* Output a reference from a function exception table to the type_info
6250 object X. Output these via a special assembly directive. */
6253 c6x_output_ttype (rtx x
)
6255 /* Use special relocations for symbol references. */
6256 if (GET_CODE (x
) != CONST_INT
)
6257 fputs ("\t.ehtype\t", asm_out_file
);
6259 fputs ("\t.word\t", asm_out_file
);
6260 output_addr_const (asm_out_file
, x
);
6261 fputc ('\n', asm_out_file
);
6266 /* Modify the return address of the current function. */
6269 c6x_set_return_address (rtx source
, rtx scratch
)
6271 struct c6x_frame frame
;
6273 HOST_WIDE_INT offset
;
6275 c6x_compute_frame_layout (&frame
);
6276 if (! c6x_save_reg (RETURN_ADDR_REGNO
))
6277 emit_move_insn (gen_rtx_REG (Pmode
, RETURN_ADDR_REGNO
), source
);
6281 if (frame_pointer_needed
)
6283 addr
= hard_frame_pointer_rtx
;
6284 offset
= frame
.b3_offset
;
6288 addr
= stack_pointer_rtx
;
6289 offset
= frame
.to_allocate
- frame
.b3_offset
;
6292 /* TODO: Use base+offset loads where possible. */
6295 HOST_WIDE_INT low
= trunc_int_for_mode (offset
, HImode
);
6297 emit_insn (gen_movsi_high (scratch
, GEN_INT (low
)));
6299 emit_insn (gen_movsi_lo_sum (scratch
, scratch
, GEN_INT(offset
)));
6300 emit_insn (gen_addsi3 (scratch
, addr
, scratch
));
6304 emit_move_insn (gen_frame_mem (Pmode
, addr
), source
);
6308 /* We save pairs of registers using a DImode store. Describe the component
6309 registers for DWARF generation code. */
6312 c6x_dwarf_register_span (rtx rtl
)
6315 unsigned real_regno
;
6320 regno
= REGNO (rtl
);
6321 nregs
= REG_NREGS (rtl
);
6325 p
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc(nregs
));
6326 for (i
= 0; i
< nregs
; i
++)
6328 if (TARGET_BIG_ENDIAN
)
6329 real_regno
= regno
+ nregs
- (i
+ 1);
6331 real_regno
= regno
+ i
;
6333 XVECEXP (p
, 0, i
) = gen_rtx_REG (SImode
, real_regno
);
6339 /* Codes for all the C6X builtins. */
6374 static GTY(()) tree c6x_builtin_decls
[C6X_BUILTIN_MAX
];
6376 /* Return the C6X builtin for CODE. */
6378 c6x_builtin_decl (unsigned code
, bool initialize_p ATTRIBUTE_UNUSED
)
6380 if (code
>= C6X_BUILTIN_MAX
)
6381 return error_mark_node
;
6383 return c6x_builtin_decls
[code
];
6386 #define def_builtin(NAME, TYPE, CODE) \
6389 bdecl = add_builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, \
6391 c6x_builtin_decls[CODE] = bdecl; \
6394 /* Set up all builtin functions for this target. */
6396 c6x_init_builtins (void)
6398 tree V4QI_type_node
= build_vector_type (unsigned_intQI_type_node
, 4);
6399 tree V2HI_type_node
= build_vector_type (intHI_type_node
, 2);
6400 tree V2SI_type_node
= build_vector_type (intSI_type_node
, 2);
6402 = build_function_type_list (integer_type_node
, integer_type_node
,
6404 tree int_ftype_int_int
6405 = build_function_type_list (integer_type_node
, integer_type_node
,
6406 integer_type_node
, NULL_TREE
);
6407 tree v2hi_ftype_v2hi
6408 = build_function_type_list (V2HI_type_node
, V2HI_type_node
, NULL_TREE
);
6409 tree v4qi_ftype_v4qi_v4qi
6410 = build_function_type_list (V4QI_type_node
, V4QI_type_node
,
6411 V4QI_type_node
, NULL_TREE
);
6412 tree v2hi_ftype_v2hi_v2hi
6413 = build_function_type_list (V2HI_type_node
, V2HI_type_node
,
6414 V2HI_type_node
, NULL_TREE
);
6415 tree v2si_ftype_v2hi_v2hi
6416 = build_function_type_list (V2SI_type_node
, V2HI_type_node
,
6417 V2HI_type_node
, NULL_TREE
);
6419 def_builtin ("__builtin_c6x_sadd", int_ftype_int_int
,
6421 def_builtin ("__builtin_c6x_ssub", int_ftype_int_int
,
6423 def_builtin ("__builtin_c6x_add2", v2hi_ftype_v2hi_v2hi
,
6425 def_builtin ("__builtin_c6x_sub2", v2hi_ftype_v2hi_v2hi
,
6427 def_builtin ("__builtin_c6x_add4", v4qi_ftype_v4qi_v4qi
,
6429 def_builtin ("__builtin_c6x_sub4", v4qi_ftype_v4qi_v4qi
,
6431 def_builtin ("__builtin_c6x_mpy2", v2si_ftype_v2hi_v2hi
,
6433 def_builtin ("__builtin_c6x_sadd2", v2hi_ftype_v2hi_v2hi
,
6435 def_builtin ("__builtin_c6x_ssub2", v2hi_ftype_v2hi_v2hi
,
6437 def_builtin ("__builtin_c6x_saddu4", v4qi_ftype_v4qi_v4qi
,
6438 C6X_BUILTIN_SADDU4
);
6439 def_builtin ("__builtin_c6x_smpy2", v2si_ftype_v2hi_v2hi
,
6442 def_builtin ("__builtin_c6x_smpy", int_ftype_int_int
,
6444 def_builtin ("__builtin_c6x_smpyh", int_ftype_int_int
,
6446 def_builtin ("__builtin_c6x_smpyhl", int_ftype_int_int
,
6447 C6X_BUILTIN_SMPYHL
);
6448 def_builtin ("__builtin_c6x_smpylh", int_ftype_int_int
,
6449 C6X_BUILTIN_SMPYLH
);
6451 def_builtin ("__builtin_c6x_sshl", int_ftype_int_int
,
6453 def_builtin ("__builtin_c6x_subc", int_ftype_int_int
,
6456 def_builtin ("__builtin_c6x_avg2", v2hi_ftype_v2hi_v2hi
,
6458 def_builtin ("__builtin_c6x_avgu4", v4qi_ftype_v4qi_v4qi
,
6461 def_builtin ("__builtin_c6x_clrr", int_ftype_int_int
,
6463 def_builtin ("__builtin_c6x_extr", int_ftype_int_int
,
6465 def_builtin ("__builtin_c6x_extru", int_ftype_int_int
,
6468 def_builtin ("__builtin_c6x_abs", int_ftype_int
, C6X_BUILTIN_ABS
);
6469 def_builtin ("__builtin_c6x_abs2", v2hi_ftype_v2hi
, C6X_BUILTIN_ABS2
);
6473 struct builtin_description
6475 const enum insn_code icode
;
6476 const char *const name
;
6477 const enum c6x_builtins code
;
6480 static const struct builtin_description bdesc_2arg
[] =
6482 { CODE_FOR_saddsi3
, "__builtin_c6x_sadd", C6X_BUILTIN_SADD
},
6483 { CODE_FOR_ssubsi3
, "__builtin_c6x_ssub", C6X_BUILTIN_SSUB
},
6484 { CODE_FOR_addv2hi3
, "__builtin_c6x_add2", C6X_BUILTIN_ADD2
},
6485 { CODE_FOR_subv2hi3
, "__builtin_c6x_sub2", C6X_BUILTIN_SUB2
},
6486 { CODE_FOR_addv4qi3
, "__builtin_c6x_add4", C6X_BUILTIN_ADD4
},
6487 { CODE_FOR_subv4qi3
, "__builtin_c6x_sub4", C6X_BUILTIN_SUB4
},
6488 { CODE_FOR_ss_addv2hi3
, "__builtin_c6x_sadd2", C6X_BUILTIN_SADD2
},
6489 { CODE_FOR_ss_subv2hi3
, "__builtin_c6x_ssub2", C6X_BUILTIN_SSUB2
},
6490 { CODE_FOR_us_addv4qi3
, "__builtin_c6x_saddu4", C6X_BUILTIN_SADDU4
},
6492 { CODE_FOR_subcsi3
, "__builtin_c6x_subc", C6X_BUILTIN_SUBC
},
6493 { CODE_FOR_ss_ashlsi3
, "__builtin_c6x_sshl", C6X_BUILTIN_SSHL
},
6495 { CODE_FOR_avgv2hi3
, "__builtin_c6x_avg2", C6X_BUILTIN_AVG2
},
6496 { CODE_FOR_uavgv4qi3
, "__builtin_c6x_avgu4", C6X_BUILTIN_AVGU4
},
6498 { CODE_FOR_mulhqsq3
, "__builtin_c6x_smpy", C6X_BUILTIN_SMPY
},
6499 { CODE_FOR_mulhqsq3_hh
, "__builtin_c6x_smpyh", C6X_BUILTIN_SMPYH
},
6500 { CODE_FOR_mulhqsq3_lh
, "__builtin_c6x_smpylh", C6X_BUILTIN_SMPYLH
},
6501 { CODE_FOR_mulhqsq3_hl
, "__builtin_c6x_smpyhl", C6X_BUILTIN_SMPYHL
},
6503 { CODE_FOR_mulv2hqv2sq3
, "__builtin_c6x_smpy2", C6X_BUILTIN_SMPY2
},
6505 { CODE_FOR_clrr
, "__builtin_c6x_clrr", C6X_BUILTIN_CLRR
},
6506 { CODE_FOR_extr
, "__builtin_c6x_extr", C6X_BUILTIN_EXTR
},
6507 { CODE_FOR_extru
, "__builtin_c6x_extru", C6X_BUILTIN_EXTRU
}
6510 static const struct builtin_description bdesc_1arg
[] =
6512 { CODE_FOR_ssabssi2
, "__builtin_c6x_abs", C6X_BUILTIN_ABS
},
6513 { CODE_FOR_ssabsv2hi2
, "__builtin_c6x_abs2", C6X_BUILTIN_ABS2
}
6516 /* Errors in the source file can cause expand_expr to return const0_rtx
6517 where we expect a vector. To avoid crashing, use one of the vector
6518 clear instructions. */
6520 safe_vector_operand (rtx x
, machine_mode mode
)
6522 if (x
!= const0_rtx
)
6524 x
= gen_reg_rtx (SImode
);
6526 emit_insn (gen_movsi (x
, CONST0_RTX (SImode
)));
6527 return gen_lowpart (mode
, x
);
6530 /* Subroutine of c6x_expand_builtin to take care of binop insns. MACFLAG is -1
6531 if this is a normal binary op, or one of the MACFLAG_xxx constants. */
6534 c6x_expand_binop_builtin (enum insn_code icode
, tree exp
, rtx target
,
6537 int offs
= match_op
? 1 : 0;
6539 tree arg0
= CALL_EXPR_ARG (exp
, 0);
6540 tree arg1
= CALL_EXPR_ARG (exp
, 1);
6541 rtx op0
= expand_expr (arg0
, NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
6542 rtx op1
= expand_expr (arg1
, NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
6543 machine_mode op0mode
= GET_MODE (op0
);
6544 machine_mode op1mode
= GET_MODE (op1
);
6545 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
6546 machine_mode mode0
= insn_data
[icode
].operand
[1 + offs
].mode
;
6547 machine_mode mode1
= insn_data
[icode
].operand
[2 + offs
].mode
;
6550 if (VECTOR_MODE_P (mode0
))
6551 op0
= safe_vector_operand (op0
, mode0
);
6552 if (VECTOR_MODE_P (mode1
))
6553 op1
= safe_vector_operand (op1
, mode1
);
6556 || GET_MODE (target
) != tmode
6557 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
6559 if (tmode
== SQmode
|| tmode
== V2SQmode
)
6561 ret
= gen_reg_rtx (tmode
== SQmode
? SImode
: V2SImode
);
6562 target
= gen_lowpart (tmode
, ret
);
6565 target
= gen_reg_rtx (tmode
);
6568 if ((op0mode
== V2HImode
|| op0mode
== SImode
|| op0mode
== VOIDmode
)
6569 && (mode0
== V2HQmode
|| mode0
== HQmode
|| mode0
== SQmode
))
6572 op0
= gen_lowpart (mode0
, op0
);
6574 if ((op1mode
== V2HImode
|| op1mode
== SImode
|| op1mode
== VOIDmode
)
6575 && (mode1
== V2HQmode
|| mode1
== HQmode
|| mode1
== SQmode
))
6578 op1
= gen_lowpart (mode1
, op1
);
6580 /* In case the insn wants input operands in modes different from
6581 the result, abort. */
6582 gcc_assert ((op0mode
== mode0
|| op0mode
== VOIDmode
)
6583 && (op1mode
== mode1
|| op1mode
== VOIDmode
));
6585 if (! (*insn_data
[icode
].operand
[1 + offs
].predicate
) (op0
, mode0
))
6586 op0
= copy_to_mode_reg (mode0
, op0
);
6587 if (! (*insn_data
[icode
].operand
[2 + offs
].predicate
) (op1
, mode1
))
6588 op1
= copy_to_mode_reg (mode1
, op1
);
6591 pat
= GEN_FCN (icode
) (target
, target
, op0
, op1
);
6593 pat
= GEN_FCN (icode
) (target
, op0
, op1
);
6603 /* Subroutine of c6x_expand_builtin to take care of unop insns. */
6606 c6x_expand_unop_builtin (enum insn_code icode
, tree exp
,
6610 tree arg0
= CALL_EXPR_ARG (exp
, 0);
6611 rtx op0
= expand_expr (arg0
, NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
6612 machine_mode op0mode
= GET_MODE (op0
);
6613 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
6614 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
6617 || GET_MODE (target
) != tmode
6618 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
6619 target
= gen_reg_rtx (tmode
);
6621 if (VECTOR_MODE_P (mode0
))
6622 op0
= safe_vector_operand (op0
, mode0
);
6624 if (op0mode
== SImode
&& mode0
== HImode
)
6627 op0
= gen_lowpart (HImode
, op0
);
6629 gcc_assert (op0mode
== mode0
|| op0mode
== VOIDmode
);
6631 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
6632 op0
= copy_to_mode_reg (mode0
, op0
);
6634 pat
= GEN_FCN (icode
) (target
, op0
);
6641 /* Expand an expression EXP that calls a built-in function,
6642 with result going to TARGET if that's convenient
6643 (and in mode MODE if that's convenient).
6644 SUBTARGET may be used as the target for computing one of EXP's operands.
6645 IGNORE is nonzero if the value is to be ignored. */
6648 c6x_expand_builtin (tree exp
, rtx target ATTRIBUTE_UNUSED
,
6649 rtx subtarget ATTRIBUTE_UNUSED
,
6650 machine_mode mode ATTRIBUTE_UNUSED
,
6651 int ignore ATTRIBUTE_UNUSED
)
6654 const struct builtin_description
*d
;
6655 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
6656 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
6658 for (i
= 0, d
= bdesc_2arg
; i
< ARRAY_SIZE (bdesc_2arg
); i
++, d
++)
6659 if (d
->code
== fcode
)
6660 return c6x_expand_binop_builtin (d
->icode
, exp
, target
,
6661 fcode
== C6X_BUILTIN_CLRR
);
6663 for (i
= 0, d
= bdesc_1arg
; i
< ARRAY_SIZE (bdesc_1arg
); i
++, d
++)
6664 if (d
->code
== fcode
)
6665 return c6x_expand_unop_builtin (d
->icode
, exp
, target
);
6670 /* Target unwind frame info is generated from dwarf CFI directives, so
6671 always output dwarf2 unwind info. */
6673 static enum unwind_info_type
6674 c6x_debug_unwind_info (void)
6676 if (flag_unwind_tables
|| flag_exceptions
)
6679 return default_debug_unwind_info ();
6682 /* Implement TARGET_HARD_REGNO_MODE_OK. */
6685 c6x_hard_regno_mode_ok (unsigned int regno
, machine_mode mode
)
6687 return GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
|| (regno
& 1) == 0;
6690 /* Implement TARGET_MODES_TIEABLE_P. */
6693 c6x_modes_tieable_p (machine_mode mode1
, machine_mode mode2
)
6695 return (mode1
== mode2
6696 || (GET_MODE_SIZE (mode1
) <= UNITS_PER_WORD
6697 && GET_MODE_SIZE (mode2
) <= UNITS_PER_WORD
));
6701 /* Target Structure. */
6703 /* Initialize the GCC target structure. */
6704 #undef TARGET_FUNCTION_ARG
6705 #define TARGET_FUNCTION_ARG c6x_function_arg
6706 #undef TARGET_FUNCTION_ARG_ADVANCE
6707 #define TARGET_FUNCTION_ARG_ADVANCE c6x_function_arg_advance
6708 #undef TARGET_FUNCTION_ARG_BOUNDARY
6709 #define TARGET_FUNCTION_ARG_BOUNDARY c6x_function_arg_boundary
6710 #undef TARGET_FUNCTION_ARG_ROUND_BOUNDARY
6711 #define TARGET_FUNCTION_ARG_ROUND_BOUNDARY \
6712 c6x_function_arg_round_boundary
6713 #undef TARGET_FUNCTION_VALUE_REGNO_P
6714 #define TARGET_FUNCTION_VALUE_REGNO_P c6x_function_value_regno_p
6715 #undef TARGET_FUNCTION_VALUE
6716 #define TARGET_FUNCTION_VALUE c6x_function_value
6717 #undef TARGET_LIBCALL_VALUE
6718 #define TARGET_LIBCALL_VALUE c6x_libcall_value
6719 #undef TARGET_RETURN_IN_MEMORY
6720 #define TARGET_RETURN_IN_MEMORY c6x_return_in_memory
6721 #undef TARGET_RETURN_IN_MSB
6722 #define TARGET_RETURN_IN_MSB c6x_return_in_msb
6723 #undef TARGET_PASS_BY_REFERENCE
6724 #define TARGET_PASS_BY_REFERENCE c6x_pass_by_reference
6725 #undef TARGET_CALLEE_COPIES
6726 #define TARGET_CALLEE_COPIES c6x_callee_copies
6727 #undef TARGET_STRUCT_VALUE_RTX
6728 #define TARGET_STRUCT_VALUE_RTX c6x_struct_value_rtx
6729 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
6730 #define TARGET_FUNCTION_OK_FOR_SIBCALL c6x_function_ok_for_sibcall
6732 #undef TARGET_ASM_OUTPUT_MI_THUNK
6733 #define TARGET_ASM_OUTPUT_MI_THUNK c6x_output_mi_thunk
6734 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
6735 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK c6x_can_output_mi_thunk
6737 #undef TARGET_BUILD_BUILTIN_VA_LIST
6738 #define TARGET_BUILD_BUILTIN_VA_LIST c6x_build_builtin_va_list
6740 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
6741 #define TARGET_ASM_TRAMPOLINE_TEMPLATE c6x_asm_trampoline_template
6742 #undef TARGET_TRAMPOLINE_INIT
6743 #define TARGET_TRAMPOLINE_INIT c6x_initialize_trampoline
6745 #undef TARGET_LEGITIMATE_CONSTANT_P
6746 #define TARGET_LEGITIMATE_CONSTANT_P c6x_legitimate_constant_p
6747 #undef TARGET_LEGITIMATE_ADDRESS_P
6748 #define TARGET_LEGITIMATE_ADDRESS_P c6x_legitimate_address_p
6751 #define TARGET_LRA_P hook_bool_void_false
6753 #undef TARGET_IN_SMALL_DATA_P
6754 #define TARGET_IN_SMALL_DATA_P c6x_in_small_data_p
6755 #undef TARGET_ASM_SELECT_RTX_SECTION
6756 #define TARGET_ASM_SELECT_RTX_SECTION c6x_select_rtx_section
6757 #undef TARGET_ASM_SELECT_SECTION
6758 #define TARGET_ASM_SELECT_SECTION c6x_elf_select_section
6759 #undef TARGET_ASM_UNIQUE_SECTION
6760 #define TARGET_ASM_UNIQUE_SECTION c6x_elf_unique_section
6761 #undef TARGET_SECTION_TYPE_FLAGS
6762 #define TARGET_SECTION_TYPE_FLAGS c6x_section_type_flags
6763 #undef TARGET_HAVE_SRODATA_SECTION
6764 #define TARGET_HAVE_SRODATA_SECTION true
6765 #undef TARGET_ASM_MERGEABLE_RODATA_PREFIX
6766 #define TARGET_ASM_MERGEABLE_RODATA_PREFIX ".const"
6768 #undef TARGET_OPTION_OVERRIDE
6769 #define TARGET_OPTION_OVERRIDE c6x_option_override
6770 #undef TARGET_CONDITIONAL_REGISTER_USAGE
6771 #define TARGET_CONDITIONAL_REGISTER_USAGE c6x_conditional_register_usage
6773 #undef TARGET_INIT_LIBFUNCS
6774 #define TARGET_INIT_LIBFUNCS c6x_init_libfuncs
6775 #undef TARGET_LIBFUNC_GNU_PREFIX
6776 #define TARGET_LIBFUNC_GNU_PREFIX true
6778 #undef TARGET_SCALAR_MODE_SUPPORTED_P
6779 #define TARGET_SCALAR_MODE_SUPPORTED_P c6x_scalar_mode_supported_p
6780 #undef TARGET_VECTOR_MODE_SUPPORTED_P
6781 #define TARGET_VECTOR_MODE_SUPPORTED_P c6x_vector_mode_supported_p
6782 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
6783 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE c6x_preferred_simd_mode
6785 #undef TARGET_RTX_COSTS
6786 #define TARGET_RTX_COSTS c6x_rtx_costs
6788 #undef TARGET_SCHED_INIT
6789 #define TARGET_SCHED_INIT c6x_sched_init
6790 #undef TARGET_SCHED_SET_SCHED_FLAGS
6791 #define TARGET_SCHED_SET_SCHED_FLAGS c6x_set_sched_flags
6792 #undef TARGET_SCHED_ADJUST_COST
6793 #define TARGET_SCHED_ADJUST_COST c6x_adjust_cost
6794 #undef TARGET_SCHED_ISSUE_RATE
6795 #define TARGET_SCHED_ISSUE_RATE c6x_issue_rate
6796 #undef TARGET_SCHED_VARIABLE_ISSUE
6797 #define TARGET_SCHED_VARIABLE_ISSUE c6x_variable_issue
6798 #undef TARGET_SCHED_REORDER
6799 #define TARGET_SCHED_REORDER c6x_sched_reorder
6800 #undef TARGET_SCHED_REORDER2
6801 #define TARGET_SCHED_REORDER2 c6x_sched_reorder2
6802 #undef TARGET_SCHED_DFA_NEW_CYCLE
6803 #define TARGET_SCHED_DFA_NEW_CYCLE c6x_dfa_new_cycle
6804 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
6805 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN c6x_sched_dfa_pre_cycle_insn
6806 #undef TARGET_SCHED_EXPOSED_PIPELINE
6807 #define TARGET_SCHED_EXPOSED_PIPELINE true
6809 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
6810 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT c6x_alloc_sched_context
6811 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
6812 #define TARGET_SCHED_INIT_SCHED_CONTEXT c6x_init_sched_context
6813 #undef TARGET_SCHED_SET_SCHED_CONTEXT
6814 #define TARGET_SCHED_SET_SCHED_CONTEXT c6x_set_sched_context
6815 #undef TARGET_SCHED_CLEAR_SCHED_CONTEXT
6816 #define TARGET_SCHED_CLEAR_SCHED_CONTEXT c6x_clear_sched_context
6817 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
6818 #define TARGET_SCHED_FREE_SCHED_CONTEXT c6x_free_sched_context
6820 #undef TARGET_CAN_ELIMINATE
6821 #define TARGET_CAN_ELIMINATE c6x_can_eliminate
6823 #undef TARGET_PREFERRED_RENAME_CLASS
6824 #define TARGET_PREFERRED_RENAME_CLASS c6x_preferred_rename_class
6826 #undef TARGET_MACHINE_DEPENDENT_REORG
6827 #define TARGET_MACHINE_DEPENDENT_REORG c6x_reorg
6829 #undef TARGET_ASM_FILE_START
6830 #define TARGET_ASM_FILE_START c6x_file_start
6832 #undef TARGET_PRINT_OPERAND
6833 #define TARGET_PRINT_OPERAND c6x_print_operand
6834 #undef TARGET_PRINT_OPERAND_ADDRESS
6835 #define TARGET_PRINT_OPERAND_ADDRESS c6x_print_operand_address
6836 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
6837 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P c6x_print_operand_punct_valid_p
6839 /* C6x unwinding tables use a different format for the typeinfo tables. */
6840 #undef TARGET_ASM_TTYPE
6841 #define TARGET_ASM_TTYPE c6x_output_ttype
6843 /* The C6x ABI follows the ARM EABI exception handling rules. */
6844 #undef TARGET_ARM_EABI_UNWINDER
6845 #define TARGET_ARM_EABI_UNWINDER true
6847 #undef TARGET_ASM_EMIT_EXCEPT_PERSONALITY
6848 #define TARGET_ASM_EMIT_EXCEPT_PERSONALITY c6x_asm_emit_except_personality
6850 #undef TARGET_ASM_INIT_SECTIONS
6851 #define TARGET_ASM_INIT_SECTIONS c6x_asm_init_sections
6853 #undef TARGET_DEBUG_UNWIND_INFO
6854 #define TARGET_DEBUG_UNWIND_INFO c6x_debug_unwind_info
6856 #undef TARGET_DWARF_REGISTER_SPAN
6857 #define TARGET_DWARF_REGISTER_SPAN c6x_dwarf_register_span
6859 #undef TARGET_INIT_BUILTINS
6860 #define TARGET_INIT_BUILTINS c6x_init_builtins
6861 #undef TARGET_EXPAND_BUILTIN
6862 #define TARGET_EXPAND_BUILTIN c6x_expand_builtin
6863 #undef TARGET_BUILTIN_DECL
6864 #define TARGET_BUILTIN_DECL c6x_builtin_decl
6866 #undef TARGET_HARD_REGNO_MODE_OK
6867 #define TARGET_HARD_REGNO_MODE_OK c6x_hard_regno_mode_ok
6868 #undef TARGET_MODES_TIEABLE_P
6869 #define TARGET_MODES_TIEABLE_P c6x_modes_tieable_p
6871 struct gcc_target targetm
= TARGET_INITIALIZER
;