1 /* Target Code for R8C/M16C/M32C
2 Copyright (C) 2005-2015 Free Software Foundation, Inc.
3 Contributed by Red Hat.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-flags.h"
32 #include "insn-attr.h"
36 #include "diagnostic-core.h"
41 #include "double-int.h"
48 #include "fold-const.h"
49 #include "stor-layout.h"
53 #include "insn-codes.h"
60 #include "target-def.h"
62 #include "langhooks.h"
63 #include "hash-table.h"
65 #include "dominance.h"
71 #include "cfgcleanup.h"
72 #include "basic-block.h"
73 #include "tree-ssa-alias.h"
74 #include "internal-fn.h"
75 #include "gimple-fold.h"
77 #include "gimple-expr.h"
81 #include "tm-constrs.h"
86 /* Used by m32c_pushm_popm. */
94 static bool m32c_function_needs_enter (void);
95 static tree
interrupt_handler (tree
*, tree
, tree
, int, bool *);
96 static tree
function_vector_handler (tree
*, tree
, tree
, int, bool *);
97 static int interrupt_p (tree node
);
98 static int bank_switch_p (tree node
);
99 static int fast_interrupt_p (tree node
);
100 static int interrupt_p (tree node
);
101 static bool m32c_asm_integer (rtx
, unsigned int, int);
102 static int m32c_comp_type_attributes (const_tree
, const_tree
);
103 static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
104 static struct machine_function
*m32c_init_machine_status (void);
105 static void m32c_insert_attributes (tree
, tree
*);
106 static bool m32c_legitimate_address_p (machine_mode
, rtx
, bool);
107 static bool m32c_addr_space_legitimate_address_p (machine_mode
, rtx
, bool, addr_space_t
);
108 static rtx
m32c_function_arg (cumulative_args_t
, machine_mode
,
110 static bool m32c_pass_by_reference (cumulative_args_t
, machine_mode
,
112 static void m32c_function_arg_advance (cumulative_args_t
, machine_mode
,
114 static unsigned int m32c_function_arg_boundary (machine_mode
, const_tree
);
115 static int m32c_pushm_popm (Push_Pop_Type
);
116 static bool m32c_strict_argument_naming (cumulative_args_t
);
117 static rtx
m32c_struct_value_rtx (tree
, int);
118 static rtx
m32c_subreg (machine_mode
, rtx
, machine_mode
, int);
119 static int need_to_save (int);
120 static rtx
m32c_function_value (const_tree
, const_tree
, bool);
121 static rtx
m32c_libcall_value (machine_mode
, const_rtx
);
123 /* Returns true if an address is specified, else false. */
124 static bool m32c_get_pragma_address (const char *varname
, unsigned *addr
);
126 #define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
128 #define streq(a,b) (strcmp ((a), (b)) == 0)
130 /* Internal support routines */
132 /* Debugging statements are tagged with DEBUG0 only so that they can
133 be easily enabled individually, by replacing the '0' with '1' as
139 /* This is needed by some of the commented-out debug statements
141 static char const *class_names
[LIM_REG_CLASSES
] = REG_CLASS_NAMES
;
143 static int class_contents
[LIM_REG_CLASSES
][1] = REG_CLASS_CONTENTS
;
145 /* These are all to support encode_pattern(). */
146 static char pattern
[30], *patternp
;
147 static GTY(()) rtx patternr
[30];
148 #define RTX_IS(x) (streq (pattern, x))
150 /* Some macros to simplify the logic throughout this file. */
151 #define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
152 #define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
154 #define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
155 #define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
158 far_addr_space_p (rtx x
)
160 if (GET_CODE (x
) != MEM
)
163 fprintf(stderr
, "\033[35mfar_addr_space: "); debug_rtx(x
);
164 fprintf(stderr
, " = %d\033[0m\n", MEM_ADDR_SPACE (x
) == ADDR_SPACE_FAR
);
166 return MEM_ADDR_SPACE (x
) == ADDR_SPACE_FAR
;
169 /* We do most RTX matching by converting the RTX into a string, and
170 using string compares. This vastly simplifies the logic in many of
171 the functions in this file.
173 On exit, pattern[] has the encoded string (use RTX_IS("...") to
174 compare it) and patternr[] has pointers to the nodes in the RTX
175 corresponding to each character in the encoded string. The latter
176 is mostly used by print_operand().
178 Unrecognized patterns have '?' in them; this shows up when the
179 assembler complains about syntax errors.
183 encode_pattern_1 (rtx x
)
187 if (patternp
== pattern
+ sizeof (pattern
) - 2)
193 patternr
[patternp
- pattern
] = x
;
195 switch (GET_CODE (x
))
201 if (GET_MODE_SIZE (GET_MODE (x
)) !=
202 GET_MODE_SIZE (GET_MODE (XEXP (x
, 0))))
204 if (GET_MODE (x
) == PSImode
205 && GET_CODE (XEXP (x
, 0)) == REG
)
207 encode_pattern_1 (XEXP (x
, 0));
212 encode_pattern_1 (XEXP (x
, 0));
217 encode_pattern_1 (XEXP (x
, 0));
222 encode_pattern_1 (XEXP (x
, 0));
226 encode_pattern_1 (XEXP (x
, 0));
227 encode_pattern_1 (XEXP (x
, 1));
231 encode_pattern_1 (XEXP (x
, 0));
235 encode_pattern_1 (XEXP (x
, 0));
239 encode_pattern_1 (XEXP (x
, 0));
240 encode_pattern_1 (XEXP (x
, 1));
244 encode_pattern_1 (XEXP (x
, 0));
261 *patternp
++ = '0' + XCINT (x
, 1, UNSPEC
);
262 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
263 encode_pattern_1 (XVECEXP (x
, 0, i
));
270 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
271 encode_pattern_1 (XVECEXP (x
, 0, i
));
275 encode_pattern_1 (XEXP (x
, 0));
277 encode_pattern_1 (XEXP (x
, 1));
282 fprintf (stderr
, "can't encode pattern %s\n",
283 GET_RTX_NAME (GET_CODE (x
)));
292 encode_pattern (rtx x
)
295 encode_pattern_1 (x
);
299 /* Since register names indicate the mode they're used in, we need a
300 way to determine which name to refer to the register with. Called
301 by print_operand(). */
304 reg_name_with_mode (int regno
, machine_mode mode
)
306 int mlen
= GET_MODE_SIZE (mode
);
307 if (regno
== R0_REGNO
&& mlen
== 1)
309 if (regno
== R0_REGNO
&& (mlen
== 3 || mlen
== 4))
311 if (regno
== R0_REGNO
&& mlen
== 6)
313 if (regno
== R0_REGNO
&& mlen
== 8)
315 if (regno
== R1_REGNO
&& mlen
== 1)
317 if (regno
== R1_REGNO
&& (mlen
== 3 || mlen
== 4))
319 if (regno
== A0_REGNO
&& TARGET_A16
&& (mlen
== 3 || mlen
== 4))
321 return reg_names
[regno
];
324 /* How many bytes a register uses on stack when it's pushed. We need
325 to know this because the push opcode needs to explicitly indicate
326 the size of the register, even though the name of the register
327 already tells it that. Used by m32c_output_reg_{push,pop}, which
328 is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}. */
331 reg_push_size (int regno
)
356 /* Given two register classes, find the largest intersection between
357 them. If there is no intersection, return RETURNED_IF_EMPTY
360 reduce_class (reg_class_t original_class
, reg_class_t limiting_class
,
361 reg_class_t returned_if_empty
)
365 reg_class_t best
= NO_REGS
;
366 unsigned int best_size
= 0;
368 if (original_class
== limiting_class
)
369 return original_class
;
371 cc
= reg_class_contents
[original_class
];
372 AND_HARD_REG_SET (cc
, reg_class_contents
[limiting_class
]);
374 for (i
= 0; i
< LIM_REG_CLASSES
; i
++)
376 if (hard_reg_set_subset_p (reg_class_contents
[i
], cc
))
377 if (best_size
< reg_class_size
[i
])
379 best
= (reg_class_t
) i
;
380 best_size
= reg_class_size
[i
];
385 return returned_if_empty
;
389 /* Used by m32c_register_move_cost to determine if a move is
390 impossibly expensive. */
392 class_can_hold_mode (reg_class_t rclass
, machine_mode mode
)
394 /* Cache the results: 0=untested 1=no 2=yes */
395 static char results
[LIM_REG_CLASSES
][MAX_MACHINE_MODE
];
397 if (results
[(int) rclass
][mode
] == 0)
400 results
[rclass
][mode
] = 1;
401 for (r
= 0; r
< FIRST_PSEUDO_REGISTER
; r
++)
402 if (in_hard_reg_set_p (reg_class_contents
[(int) rclass
], mode
, r
)
403 && HARD_REGNO_MODE_OK (r
, mode
))
405 results
[rclass
][mode
] = 2;
411 fprintf (stderr
, "class %s can hold %s? %s\n",
412 class_names
[(int) rclass
], mode_name
[mode
],
413 (results
[rclass
][mode
] == 2) ? "yes" : "no");
415 return results
[(int) rclass
][mode
] == 2;
418 /* Run-time Target Specification. */
420 /* Memregs are memory locations that gcc treats like general
421 registers, as there are a limited number of true registers and the
422 m32c families can use memory in most places that registers can be
425 However, since memory accesses are more expensive than registers,
426 we allow the user to limit the number of memregs available, in
427 order to try to persuade gcc to try harder to use real registers.
429 Memregs are provided by lib1funcs.S.
432 int ok_to_change_target_memregs
= TRUE
;
434 /* Implements TARGET_OPTION_OVERRIDE. */
436 #undef TARGET_OPTION_OVERRIDE
437 #define TARGET_OPTION_OVERRIDE m32c_option_override
440 m32c_option_override (void)
442 /* We limit memregs to 0..16, and provide a default. */
443 if (global_options_set
.x_target_memregs
)
445 if (target_memregs
< 0 || target_memregs
> 16)
446 error ("invalid target memregs value '%d'", target_memregs
);
454 /* This target defaults to strict volatile bitfields. */
455 if (flag_strict_volatile_bitfields
< 0 && abi_version_at_least(2))
456 flag_strict_volatile_bitfields
= 1;
458 /* r8c/m16c have no 16-bit indirect call, so thunks are involved.
459 This is always worse than an absolute call. */
461 flag_no_function_cse
= 1;
463 /* This wants to put insns between compares and their jumps. */
464 /* FIXME: The right solution is to properly trace the flags register
465 values, but that is too much work for stage 4. */
466 flag_combine_stack_adjustments
= 0;
469 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
470 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m32c_override_options_after_change
473 m32c_override_options_after_change (void)
476 flag_no_function_cse
= 1;
479 /* Defining data structures for per-function information */
481 /* The usual; we set up our machine_function data. */
482 static struct machine_function
*
483 m32c_init_machine_status (void)
485 return ggc_cleared_alloc
<machine_function
> ();
488 /* Implements INIT_EXPANDERS. We just set up to call the above
491 m32c_init_expanders (void)
493 init_machine_status
= m32c_init_machine_status
;
498 /* Register Basics */
500 /* Basic Characteristics of Registers */
502 /* Whether a mode fits in a register is complex enough to warrant a
511 } nregs_table
[FIRST_PSEUDO_REGISTER
] =
513 { 1, 1, 2, 2, 4 }, /* r0 */
514 { 0, 1, 0, 0, 0 }, /* r2 */
515 { 1, 1, 2, 2, 0 }, /* r1 */
516 { 0, 1, 0, 0, 0 }, /* r3 */
517 { 0, 1, 1, 0, 0 }, /* a0 */
518 { 0, 1, 1, 0, 0 }, /* a1 */
519 { 0, 1, 1, 0, 0 }, /* sb */
520 { 0, 1, 1, 0, 0 }, /* fb */
521 { 0, 1, 1, 0, 0 }, /* sp */
522 { 1, 1, 1, 0, 0 }, /* pc */
523 { 0, 0, 0, 0, 0 }, /* fl */
524 { 1, 1, 1, 0, 0 }, /* ap */
525 { 1, 1, 2, 2, 4 }, /* mem0 */
526 { 1, 1, 2, 2, 4 }, /* mem1 */
527 { 1, 1, 2, 2, 4 }, /* mem2 */
528 { 1, 1, 2, 2, 4 }, /* mem3 */
529 { 1, 1, 2, 2, 4 }, /* mem4 */
530 { 1, 1, 2, 2, 0 }, /* mem5 */
531 { 1, 1, 2, 2, 0 }, /* mem6 */
532 { 1, 1, 0, 0, 0 }, /* mem7 */
535 /* Implements TARGET_CONDITIONAL_REGISTER_USAGE. We adjust the number
536 of available memregs, and select which registers need to be preserved
537 across calls based on the chip family. */
539 #undef TARGET_CONDITIONAL_REGISTER_USAGE
540 #define TARGET_CONDITIONAL_REGISTER_USAGE m32c_conditional_register_usage
542 m32c_conditional_register_usage (void)
546 if (0 <= target_memregs
&& target_memregs
<= 16)
548 /* The command line option is bytes, but our "registers" are
550 for (i
= (target_memregs
+1)/2; i
< 8; i
++)
552 fixed_regs
[MEM0_REGNO
+ i
] = 1;
553 CLEAR_HARD_REG_BIT (reg_class_contents
[MEM_REGS
], MEM0_REGNO
+ i
);
557 /* M32CM and M32C preserve more registers across function calls. */
560 call_used_regs
[R1_REGNO
] = 0;
561 call_used_regs
[R2_REGNO
] = 0;
562 call_used_regs
[R3_REGNO
] = 0;
563 call_used_regs
[A0_REGNO
] = 0;
564 call_used_regs
[A1_REGNO
] = 0;
568 /* How Values Fit in Registers */
570 /* Implements HARD_REGNO_NREGS. This is complicated by the fact that
571 different registers are different sizes from each other, *and* may
572 be different sizes in different chip families. */
574 m32c_hard_regno_nregs_1 (int regno
, machine_mode mode
)
576 if (regno
== FLG_REGNO
&& mode
== CCmode
)
578 if (regno
>= FIRST_PSEUDO_REGISTER
)
579 return ((GET_MODE_SIZE (mode
) + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
);
581 if (regno
>= MEM0_REGNO
&& regno
<= MEM7_REGNO
)
582 return (GET_MODE_SIZE (mode
) + 1) / 2;
584 if (GET_MODE_SIZE (mode
) <= 1)
585 return nregs_table
[regno
].qi_regs
;
586 if (GET_MODE_SIZE (mode
) <= 2)
587 return nregs_table
[regno
].hi_regs
;
588 if (regno
== A0_REGNO
&& mode
== SImode
&& TARGET_A16
)
590 if ((GET_MODE_SIZE (mode
) <= 3 || mode
== PSImode
) && TARGET_A24
)
591 return nregs_table
[regno
].pi_regs
;
592 if (GET_MODE_SIZE (mode
) <= 4)
593 return nregs_table
[regno
].si_regs
;
594 if (GET_MODE_SIZE (mode
) <= 8)
595 return nregs_table
[regno
].di_regs
;
600 m32c_hard_regno_nregs (int regno
, machine_mode mode
)
602 int rv
= m32c_hard_regno_nregs_1 (regno
, mode
);
606 /* Implements HARD_REGNO_MODE_OK. The above function does the work
607 already; just test its return value. */
609 m32c_hard_regno_ok (int regno
, machine_mode mode
)
611 return m32c_hard_regno_nregs_1 (regno
, mode
) != 0;
614 /* Implements MODES_TIEABLE_P. In general, modes aren't tieable since
615 registers are all different sizes. However, since most modes are
616 bigger than our registers anyway, it's easier to implement this
617 function that way, leaving QImode as the only unique case. */
619 m32c_modes_tieable_p (machine_mode m1
, machine_mode m2
)
621 if (GET_MODE_SIZE (m1
) == GET_MODE_SIZE (m2
))
625 if (m1
== QImode
|| m2
== QImode
)
632 /* Register Classes */
634 /* Implements REGNO_REG_CLASS. */
636 m32c_regno_reg_class (int regno
)
661 if (IS_MEM_REGNO (regno
))
667 /* Implements REGNO_OK_FOR_BASE_P. */
669 m32c_regno_ok_for_base_p (int regno
)
671 if (regno
== A0_REGNO
672 || regno
== A1_REGNO
|| regno
>= FIRST_PSEUDO_REGISTER
)
677 #define DEBUG_RELOAD 0
679 /* Implements TARGET_PREFERRED_RELOAD_CLASS. In general, prefer general
680 registers of the appropriate size. */
682 #undef TARGET_PREFERRED_RELOAD_CLASS
683 #define TARGET_PREFERRED_RELOAD_CLASS m32c_preferred_reload_class
686 m32c_preferred_reload_class (rtx x
, reg_class_t rclass
)
688 reg_class_t newclass
= rclass
;
691 fprintf (stderr
, "\npreferred_reload_class for %s is ",
692 class_names
[rclass
]);
694 if (rclass
== NO_REGS
)
695 rclass
= GET_MODE (x
) == QImode
? HL_REGS
: R03_REGS
;
697 if (reg_classes_intersect_p (rclass
, CR_REGS
))
699 switch (GET_MODE (x
))
705 /* newclass = HI_REGS; */
710 else if (newclass
== QI_REGS
&& GET_MODE_SIZE (GET_MODE (x
)) > 2)
712 else if (GET_MODE_SIZE (GET_MODE (x
)) > 4
713 && ! reg_class_subset_p (R03_REGS
, rclass
))
716 rclass
= reduce_class (rclass
, newclass
, rclass
);
718 if (GET_MODE (x
) == QImode
)
719 rclass
= reduce_class (rclass
, HL_REGS
, rclass
);
722 fprintf (stderr
, "%s\n", class_names
[rclass
]);
725 if (GET_CODE (x
) == MEM
726 && GET_CODE (XEXP (x
, 0)) == PLUS
727 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == PLUS
)
728 fprintf (stderr
, "Glorm!\n");
733 /* Implements TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
735 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
736 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS m32c_preferred_output_reload_class
739 m32c_preferred_output_reload_class (rtx x
, reg_class_t rclass
)
741 return m32c_preferred_reload_class (x
, rclass
);
744 /* Implements LIMIT_RELOAD_CLASS. We basically want to avoid using
745 address registers for reloads since they're needed for address
748 m32c_limit_reload_class (machine_mode mode
, int rclass
)
751 fprintf (stderr
, "limit_reload_class for %s: %s ->",
752 mode_name
[mode
], class_names
[rclass
]);
756 rclass
= reduce_class (rclass
, HL_REGS
, rclass
);
757 else if (mode
== HImode
)
758 rclass
= reduce_class (rclass
, HI_REGS
, rclass
);
759 else if (mode
== SImode
)
760 rclass
= reduce_class (rclass
, SI_REGS
, rclass
);
762 if (rclass
!= A_REGS
)
763 rclass
= reduce_class (rclass
, DI_REGS
, rclass
);
766 fprintf (stderr
, " %s\n", class_names
[rclass
]);
771 /* Implements SECONDARY_RELOAD_CLASS. QImode have to be reloaded in
772 r0 or r1, as those are the only real QImode registers. CR regs get
773 reloaded through appropriately sized general or address
776 m32c_secondary_reload_class (int rclass
, machine_mode mode
, rtx x
)
778 int cc
= class_contents
[rclass
][0];
780 fprintf (stderr
, "\nsecondary reload class %s %s\n",
781 class_names
[rclass
], mode_name
[mode
]);
785 && GET_CODE (x
) == MEM
&& (cc
& ~class_contents
[R23_REGS
][0]) == 0)
787 if (reg_classes_intersect_p (rclass
, CR_REGS
)
788 && GET_CODE (x
) == REG
789 && REGNO (x
) >= SB_REGNO
&& REGNO (x
) <= SP_REGNO
)
790 return (TARGET_A16
|| mode
== HImode
) ? HI_REGS
: A_REGS
;
794 /* Implements TARGET_CLASS_LIKELY_SPILLED_P. A_REGS is needed for address
797 #undef TARGET_CLASS_LIKELY_SPILLED_P
798 #define TARGET_CLASS_LIKELY_SPILLED_P m32c_class_likely_spilled_p
801 m32c_class_likely_spilled_p (reg_class_t regclass
)
803 if (regclass
== A_REGS
)
806 return (reg_class_size
[(int) regclass
] == 1);
809 /* Implements TARGET_CLASS_MAX_NREGS. We calculate this according to its
810 documented meaning, to avoid potential inconsistencies with actual
811 class definitions. */
813 #undef TARGET_CLASS_MAX_NREGS
814 #define TARGET_CLASS_MAX_NREGS m32c_class_max_nregs
817 m32c_class_max_nregs (reg_class_t regclass
, machine_mode mode
)
820 unsigned char max
= 0;
822 for (rn
= 0; rn
< FIRST_PSEUDO_REGISTER
; rn
++)
823 if (TEST_HARD_REG_BIT (reg_class_contents
[(int) regclass
], rn
))
825 unsigned char n
= m32c_hard_regno_nregs (rn
, mode
);
832 /* Implements CANNOT_CHANGE_MODE_CLASS. Only r0 and r1 can change to
833 QI (r0l, r1l) because the chip doesn't support QI ops on other
834 registers (well, it does on a0/a1 but if we let gcc do that, reload
835 suffers). Otherwise, we allow changes to larger modes. */
837 m32c_cannot_change_mode_class (machine_mode from
,
838 machine_mode to
, int rclass
)
842 fprintf (stderr
, "cannot change from %s to %s in %s\n",
843 mode_name
[from
], mode_name
[to
], class_names
[rclass
]);
846 /* If the larger mode isn't allowed in any of these registers, we
847 can't allow the change. */
848 for (rn
= 0; rn
< FIRST_PSEUDO_REGISTER
; rn
++)
849 if (class_contents
[rclass
][0] & (1 << rn
))
850 if (! m32c_hard_regno_ok (rn
, to
))
854 return (class_contents
[rclass
][0] & 0x1ffa);
856 if (class_contents
[rclass
][0] & 0x0005 /* r0, r1 */
857 && GET_MODE_SIZE (from
) > 1)
859 if (GET_MODE_SIZE (from
) > 2) /* all other regs */
865 /* Helpers for the rest of the file. */
866 /* TRUE if the rtx is a REG rtx for the given register. */
867 #define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
868 && REGNO (rtx) == regno)
869 /* TRUE if the rtx is a pseudo - specifically, one we can use as a
870 base register in address calculations (hence the "strict"
872 #define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
873 && (REGNO (rtx) == AP_REGNO \
874 || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
876 #define A0_OR_PSEUDO(x) (IS_REG(x, A0_REGNO) || REGNO (x) >= FIRST_PSEUDO_REGISTER)
878 /* Implements matching for constraints (see next function too). 'S' is
879 for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
880 call return values. */
882 m32c_matches_constraint_p (rtx value
, int constraint
)
884 encode_pattern (value
);
886 switch (constraint
) {
888 return (far_addr_space_p (value
)
890 && A0_OR_PSEUDO (patternr
[1])
891 && GET_MODE (patternr
[1]) == SImode
)
892 || (RTX_IS ("m+^Sri")
893 && A0_OR_PSEUDO (patternr
[4])
894 && GET_MODE (patternr
[4]) == HImode
)
895 || (RTX_IS ("m+^Srs")
896 && A0_OR_PSEUDO (patternr
[4])
897 && GET_MODE (patternr
[4]) == HImode
)
898 || (RTX_IS ("m+^S+ris")
899 && A0_OR_PSEUDO (patternr
[5])
900 && GET_MODE (patternr
[5]) == HImode
)
904 /* This is the common "src/dest" address */
906 if (GET_CODE (value
) == MEM
&& CONSTANT_P (XEXP (value
, 0)))
908 if (RTX_IS ("ms") || RTX_IS ("m+si"))
910 if (RTX_IS ("m++rii"))
912 if (REGNO (patternr
[3]) == FB_REGNO
913 && INTVAL (patternr
[4]) == 0)
918 else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
922 if (REGNO (r
) == SP_REGNO
)
924 return m32c_legitimate_address_p (GET_MODE (value
), XEXP (value
, 0), 1);
931 else if (RTX_IS ("m+ri"))
935 return (IS_REG (r
, A0_REGNO
) || IS_REG (r
, A1_REGNO
));
938 return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
940 return ((RTX_IS ("mr")
941 && (IS_REG (patternr
[1], SP_REGNO
)))
942 || (RTX_IS ("m+ri") && (IS_REG (patternr
[2], SP_REGNO
))));
944 return ((RTX_IS ("mr")
945 && (IS_REG (patternr
[1], FB_REGNO
)))
946 || (RTX_IS ("m+ri") && (IS_REG (patternr
[2], FB_REGNO
))));
948 return ((RTX_IS ("mr")
949 && (IS_REG (patternr
[1], SB_REGNO
)))
950 || (RTX_IS ("m+ri") && (IS_REG (patternr
[2], SB_REGNO
))));
952 /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
953 return (RTX_IS ("mi")
954 && !(INTVAL (patternr
[1]) & ~0x1fff));
956 return r1h_operand (value
, QImode
);
958 return GET_CODE (value
) == PARALLEL
;
964 /* STACK AND CALLING */
968 /* Implements RETURN_ADDR_RTX. Note that R8C and M16C push 24 bits
969 (yes, THREE bytes) onto the stack for the return address, but we
970 don't support pointers bigger than 16 bits on those chips. This
971 will likely wreak havoc with exception unwinding. FIXME. */
973 m32c_return_addr_rtx (int count
)
985 /* It's four bytes */
991 /* FIXME: it's really 3 bytes */
997 gen_rtx_MEM (mode
, plus_constant (Pmode
, gen_rtx_REG (Pmode
, FP_REGNO
),
999 return copy_to_mode_reg (mode
, ra_mem
);
1002 /* Implements INCOMING_RETURN_ADDR_RTX. See comment above. */
1004 m32c_incoming_return_addr_rtx (void)
1007 return gen_rtx_MEM (PSImode
, gen_rtx_REG (PSImode
, SP_REGNO
));
1010 /* Exception Handling Support */
1012 /* Implements EH_RETURN_DATA_REGNO. Choose registers able to hold
1015 m32c_eh_return_data_regno (int n
)
1022 return MEM0_REGNO
+4;
1024 return INVALID_REGNUM
;
1028 /* Implements EH_RETURN_STACKADJ_RTX. Saved and used later in
1029 m32c_emit_eh_epilogue. */
1031 m32c_eh_return_stackadj_rtx (void)
1033 if (!cfun
->machine
->eh_stack_adjust
)
1037 sa
= gen_rtx_REG (Pmode
, R0_REGNO
);
1038 cfun
->machine
->eh_stack_adjust
= sa
;
1040 return cfun
->machine
->eh_stack_adjust
;
1043 /* Registers That Address the Stack Frame */
1045 /* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER. Note that
1046 the original spec called for dwarf numbers to vary with register
1047 width as well, for example, r0l, r0, and r2r0 would each have
1048 different dwarf numbers. GCC doesn't support this, and we don't do
1049 it, and gdb seems to like it this way anyway. */
1051 m32c_dwarf_frame_regnum (int n
)
1077 return DWARF_FRAME_REGISTERS
+ 1;
1081 /* The frame looks like this:
1083 ap -> +------------------------------
1084 | Return address (3 or 4 bytes)
1085 | Saved FB (2 or 4 bytes)
1086 fb -> +------------------------------
1089 | through r0 as needed
1090 sp -> +------------------------------
1093 /* We use this to wrap all emitted insns in the prologue. */
1097 RTX_FRAME_RELATED_P (x
) = 1;
1101 /* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1102 how much the stack pointer moves for each, for each cpu family. */
1111 /* These are in reverse push (nearest-to-sp) order. */
1112 { R0_REGNO
, 0x80, 2, 2 },
1113 { R1_REGNO
, 0x40, 2, 2 },
1114 { R2_REGNO
, 0x20, 2, 2 },
1115 { R3_REGNO
, 0x10, 2, 2 },
1116 { A0_REGNO
, 0x08, 2, 4 },
1117 { A1_REGNO
, 0x04, 2, 4 },
1118 { SB_REGNO
, 0x02, 2, 4 },
1119 { FB_REGNO
, 0x01, 2, 4 }
1122 #define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1124 /* Returns TRUE if we need to save/restore the given register. We
1125 save everything for exception handlers, so that any register can be
1126 unwound. For interrupt handlers, we save everything if the handler
1127 calls something else (because we don't know what *that* function
1128 might do), but try to be a bit smarter if the handler is a leaf
1129 function. We always save $a0, though, because we use that in the
1130 epilogue to copy $fb to $sp. */
1132 need_to_save (int regno
)
1134 if (fixed_regs
[regno
])
1136 if (crtl
->calls_eh_return
)
1138 if (regno
== FP_REGNO
)
1140 if (cfun
->machine
->is_interrupt
1141 && (!cfun
->machine
->is_leaf
1142 || (regno
== A0_REGNO
1143 && m32c_function_needs_enter ())
1146 if (df_regs_ever_live_p (regno
)
1147 && (!call_used_regs
[regno
] || cfun
->machine
->is_interrupt
))
1152 /* This function contains all the intelligence about saving and
1153 restoring registers. It always figures out the register save set.
1154 When called with PP_justcount, it merely returns the size of the
1155 save set (for eliminating the frame pointer, for example). When
1156 called with PP_pushm or PP_popm, it emits the appropriate
1157 instructions for saving (pushm) or restoring (popm) the
1160 m32c_pushm_popm (Push_Pop_Type ppt
)
1163 int byte_count
= 0, bytes
;
1165 rtx dwarf_set
[PUSHM_N
];
1167 int nosave_mask
= 0;
1169 if (crtl
->return_rtx
1170 && GET_CODE (crtl
->return_rtx
) == PARALLEL
1171 && !(crtl
->calls_eh_return
|| cfun
->machine
->is_interrupt
))
1173 rtx exp
= XVECEXP (crtl
->return_rtx
, 0, 0);
1174 rtx rv
= XEXP (exp
, 0);
1175 int rv_bytes
= GET_MODE_SIZE (GET_MODE (rv
));
1178 nosave_mask
|= 0x20; /* PSI, SI */
1180 nosave_mask
|= 0xf0; /* DF */
1182 nosave_mask
|= 0x50; /* DI */
1185 for (i
= 0; i
< (int) PUSHM_N
; i
++)
1187 /* Skip if neither register needs saving. */
1188 if (!need_to_save (pushm_info
[i
].reg1
))
1191 if (pushm_info
[i
].bit
& nosave_mask
)
1194 reg_mask
|= pushm_info
[i
].bit
;
1195 bytes
= TARGET_A16
? pushm_info
[i
].a16_bytes
: pushm_info
[i
].a24_bytes
;
1197 if (ppt
== PP_pushm
)
1199 machine_mode mode
= (bytes
== 2) ? HImode
: SImode
;
1202 /* Always use stack_pointer_rtx instead of calling
1203 rtx_gen_REG ourselves. Code elsewhere in GCC assumes
1204 that there is a single rtx representing the stack pointer,
1205 namely stack_pointer_rtx, and uses == to recognize it. */
1206 addr
= stack_pointer_rtx
;
1208 if (byte_count
!= 0)
1209 addr
= gen_rtx_PLUS (GET_MODE (addr
), addr
, GEN_INT (byte_count
));
1211 dwarf_set
[n_dwarfs
++] =
1212 gen_rtx_SET (VOIDmode
,
1213 gen_rtx_MEM (mode
, addr
),
1214 gen_rtx_REG (mode
, pushm_info
[i
].reg1
));
1215 F (dwarf_set
[n_dwarfs
- 1]);
1218 byte_count
+= bytes
;
1221 if (cfun
->machine
->is_interrupt
)
1223 cfun
->machine
->intr_pushm
= reg_mask
& 0xfe;
1228 if (cfun
->machine
->is_interrupt
)
1229 for (i
= MEM0_REGNO
; i
<= MEM7_REGNO
; i
++)
1230 if (need_to_save (i
))
1233 cfun
->machine
->intr_pushmem
[i
- MEM0_REGNO
] = 1;
1236 if (ppt
== PP_pushm
&& byte_count
)
1238 rtx note
= gen_rtx_SEQUENCE (VOIDmode
, rtvec_alloc (n_dwarfs
+ 1));
1243 XVECEXP (note
, 0, 0)
1244 = gen_rtx_SET (VOIDmode
,
1246 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx
),
1248 GEN_INT (-byte_count
)));
1249 F (XVECEXP (note
, 0, 0));
1251 for (i
= 0; i
< n_dwarfs
; i
++)
1252 XVECEXP (note
, 0, i
+ 1) = dwarf_set
[i
];
1254 pushm
= F (emit_insn (gen_pushm (GEN_INT (reg_mask
))));
1256 add_reg_note (pushm
, REG_FRAME_RELATED_EXPR
, note
);
1259 if (cfun
->machine
->is_interrupt
)
1260 for (i
= MEM0_REGNO
; i
<= MEM7_REGNO
; i
++)
1261 if (cfun
->machine
->intr_pushmem
[i
- MEM0_REGNO
])
1264 pushm
= emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode
, i
)));
1266 pushm
= emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode
, i
)));
1270 if (ppt
== PP_popm
&& byte_count
)
1272 if (cfun
->machine
->is_interrupt
)
1273 for (i
= MEM7_REGNO
; i
>= MEM0_REGNO
; i
--)
1274 if (cfun
->machine
->intr_pushmem
[i
- MEM0_REGNO
])
1277 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode
, i
)));
1279 emit_insn (gen_pophi_24 (gen_rtx_REG (HImode
, i
)));
1282 emit_insn (gen_popm (GEN_INT (reg_mask
)));
1288 /* Implements INITIAL_ELIMINATION_OFFSET. See the comment above that
1289 diagrams our call frame. */
1291 m32c_initial_elimination_offset (int from
, int to
)
1295 if (from
== AP_REGNO
)
1305 ofs
+= m32c_pushm_popm (PP_justcount
);
1306 ofs
+= get_frame_size ();
1309 /* Account for push rounding. */
1311 ofs
= (ofs
+ 1) & ~1;
1313 fprintf (stderr
, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from
,
1319 /* Passing Function Arguments on the Stack */
1321 /* Implements PUSH_ROUNDING. The R8C and M16C have byte stacks, the
1322 M32C has word stacks. */
1324 m32c_push_rounding (int n
)
1326 if (TARGET_R8C
|| TARGET_M16C
)
1328 return (n
+ 1) & ~1;
1331 /* Passing Arguments in Registers */
1333 /* Implements TARGET_FUNCTION_ARG. Arguments are passed partly in
1334 registers, partly on stack. If our function returns a struct, a
1335 pointer to a buffer for it is at the top of the stack (last thing
1336 pushed). The first few real arguments may be in registers as
1339 R8C/M16C: arg1 in r1 if it's QI or HI (else it's pushed on stack)
1340 arg2 in r2 if it's HI (else pushed on stack)
1342 M32C: arg1 in r0 if it's QI or HI (else it's pushed on stack)
1345 Structs are not passed in registers, even if they fit. Only
1346 integer and pointer types are passed in registers.
1348 Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1350 #undef TARGET_FUNCTION_ARG
1351 #define TARGET_FUNCTION_ARG m32c_function_arg
1353 m32c_function_arg (cumulative_args_t ca_v
,
1354 machine_mode mode
, const_tree type
, bool named
)
1356 CUMULATIVE_ARGS
*ca
= get_cumulative_args (ca_v
);
1358 /* Can return a reg, parallel, or 0 for stack */
1361 fprintf (stderr
, "func_arg %d (%s, %d)\n",
1362 ca
->parm_num
, mode_name
[mode
], named
);
1366 if (mode
== VOIDmode
)
1369 if (ca
->force_mem
|| !named
)
1372 fprintf (stderr
, "func arg: force %d named %d, mem\n", ca
->force_mem
,
1378 if (type
&& INTEGRAL_TYPE_P (type
) && POINTER_TYPE_P (type
))
1381 if (type
&& AGGREGATE_TYPE_P (type
))
1384 switch (ca
->parm_num
)
1387 if (GET_MODE_SIZE (mode
) == 1 || GET_MODE_SIZE (mode
) == 2)
1388 rv
= gen_rtx_REG (mode
, TARGET_A16
? R1_REGNO
: R0_REGNO
);
1392 if (TARGET_A16
&& GET_MODE_SIZE (mode
) == 2)
1393 rv
= gen_rtx_REG (mode
, R2_REGNO
);
1403 #undef TARGET_PASS_BY_REFERENCE
1404 #define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1406 m32c_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED
,
1407 machine_mode mode ATTRIBUTE_UNUSED
,
1408 const_tree type ATTRIBUTE_UNUSED
,
1409 bool named ATTRIBUTE_UNUSED
)
1414 /* Implements INIT_CUMULATIVE_ARGS. */
1416 m32c_init_cumulative_args (CUMULATIVE_ARGS
* ca
,
1418 rtx libname ATTRIBUTE_UNUSED
,
1420 int n_named_args ATTRIBUTE_UNUSED
)
1422 if (fntype
&& aggregate_value_p (TREE_TYPE (fntype
), fndecl
))
1429 /* Implements TARGET_FUNCTION_ARG_ADVANCE. force_mem is set for
1430 functions returning structures, so we always reset that. Otherwise,
1431 we only need to know the sequence number of the argument to know what
1433 #undef TARGET_FUNCTION_ARG_ADVANCE
1434 #define TARGET_FUNCTION_ARG_ADVANCE m32c_function_arg_advance
1436 m32c_function_arg_advance (cumulative_args_t ca_v
,
1437 machine_mode mode ATTRIBUTE_UNUSED
,
1438 const_tree type ATTRIBUTE_UNUSED
,
1439 bool named ATTRIBUTE_UNUSED
)
1441 CUMULATIVE_ARGS
*ca
= get_cumulative_args (ca_v
);
1449 /* Implements TARGET_FUNCTION_ARG_BOUNDARY. */
1450 #undef TARGET_FUNCTION_ARG_BOUNDARY
1451 #define TARGET_FUNCTION_ARG_BOUNDARY m32c_function_arg_boundary
1453 m32c_function_arg_boundary (machine_mode mode ATTRIBUTE_UNUSED
,
1454 const_tree type ATTRIBUTE_UNUSED
)
1456 return (TARGET_A16
? 8 : 16);
1459 /* Implements FUNCTION_ARG_REGNO_P. */
1461 m32c_function_arg_regno_p (int r
)
1464 return (r
== R0_REGNO
);
1465 return (r
== R1_REGNO
|| r
== R2_REGNO
);
1468 /* HImode and PSImode are the two "native" modes as far as GCC is
1469 concerned, but the chips also support a 32-bit mode which is used
1470 for some opcodes in R8C/M16C and for reset vectors and such. */
1471 #undef TARGET_VALID_POINTER_MODE
1472 #define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
1474 m32c_valid_pointer_mode (machine_mode mode
)
1484 /* How Scalar Function Values Are Returned */
1486 /* Implements TARGET_LIBCALL_VALUE. Most values are returned in $r0, or some
1487 combination of registers starting there (r2r0 for longs, r3r1r2r0
1488 for long long, r3r2r1r0 for doubles), except that that ABI
1489 currently doesn't work because it ends up using all available
1490 general registers and gcc often can't compile it. So, instead, we
1491 return anything bigger than 16 bits in "mem0" (effectively, a
1492 memory location). */
1494 #undef TARGET_LIBCALL_VALUE
1495 #define TARGET_LIBCALL_VALUE m32c_libcall_value
1498 m32c_libcall_value (machine_mode mode
, const_rtx fun ATTRIBUTE_UNUSED
)
1500 /* return reg or parallel */
1502 /* FIXME: GCC has difficulty returning large values in registers,
1503 because that ties up most of the general registers and gives the
1504 register allocator little to work with. Until we can resolve
1505 this, large values are returned in memory. */
1510 rv
= gen_rtx_PARALLEL (mode
, rtvec_alloc (4));
1511 XVECEXP (rv
, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode
,
1512 gen_rtx_REG (HImode
,
1515 XVECEXP (rv
, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode
,
1516 gen_rtx_REG (HImode
,
1519 XVECEXP (rv
, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode
,
1520 gen_rtx_REG (HImode
,
1523 XVECEXP (rv
, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode
,
1524 gen_rtx_REG (HImode
,
1530 if (TARGET_A24
&& GET_MODE_SIZE (mode
) > 2)
1534 rv
= gen_rtx_PARALLEL (mode
, rtvec_alloc (1));
1535 XVECEXP (rv
, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode
,
1543 if (GET_MODE_SIZE (mode
) > 2)
1544 return gen_rtx_REG (mode
, MEM0_REGNO
);
1545 return gen_rtx_REG (mode
, R0_REGNO
);
1548 /* Implements TARGET_FUNCTION_VALUE. Functions and libcalls have the same
1551 #undef TARGET_FUNCTION_VALUE
1552 #define TARGET_FUNCTION_VALUE m32c_function_value
1555 m32c_function_value (const_tree valtype
,
1556 const_tree fn_decl_or_type ATTRIBUTE_UNUSED
,
1557 bool outgoing ATTRIBUTE_UNUSED
)
1559 /* return reg or parallel */
1560 const machine_mode mode
= TYPE_MODE (valtype
);
1561 return m32c_libcall_value (mode
, NULL_RTX
);
1564 /* Implements TARGET_FUNCTION_VALUE_REGNO_P. */
1566 #undef TARGET_FUNCTION_VALUE_REGNO_P
1567 #define TARGET_FUNCTION_VALUE_REGNO_P m32c_function_value_regno_p
1570 m32c_function_value_regno_p (const unsigned int regno
)
1572 return (regno
== R0_REGNO
|| regno
== MEM0_REGNO
);
1575 /* How Large Values Are Returned */
1577 /* We return structures by pushing the address on the stack, even if
1578 we use registers for the first few "real" arguments. */
1579 #undef TARGET_STRUCT_VALUE_RTX
1580 #define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1582 m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED
,
1583 int incoming ATTRIBUTE_UNUSED
)
1588 /* Function Entry and Exit */
1590 /* Implements EPILOGUE_USES. Interrupts restore all registers. */
1592 m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED
)
1594 if (cfun
->machine
->is_interrupt
)
1599 /* Implementing the Varargs Macros */
1601 #undef TARGET_STRICT_ARGUMENT_NAMING
1602 #define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1604 m32c_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED
)
1609 /* Trampolines for Nested Functions */
1613 1 0000 75C43412 mov.w #0x1234,a0
1614 2 0004 FC000000 jmp.a label
1617 1 0000 BC563412 mov.l:s #0x123456,a0
1618 2 0004 CC000000 jmp.a label
1621 /* Implements TRAMPOLINE_SIZE. */
1623 m32c_trampoline_size (void)
1625 /* Allocate extra space so we can avoid the messy shifts when we
1626 initialize the trampoline; we just write past the end of the
1628 return TARGET_A16
? 8 : 10;
1631 /* Implements TRAMPOLINE_ALIGNMENT. */
1633 m32c_trampoline_alignment (void)
1638 /* Implements TARGET_TRAMPOLINE_INIT. */
1640 #undef TARGET_TRAMPOLINE_INIT
1641 #define TARGET_TRAMPOLINE_INIT m32c_trampoline_init
1643 m32c_trampoline_init (rtx m_tramp
, tree fndecl
, rtx chainval
)
1645 rtx function
= XEXP (DECL_RTL (fndecl
), 0);
1647 #define A0(m,i) adjust_address (m_tramp, m, i)
1650 /* Note: we subtract a "word" because the moves want signed
1651 constants, not unsigned constants. */
1652 emit_move_insn (A0 (HImode
, 0), GEN_INT (0xc475 - 0x10000));
1653 emit_move_insn (A0 (HImode
, 2), chainval
);
1654 emit_move_insn (A0 (QImode
, 4), GEN_INT (0xfc - 0x100));
1655 /* We use 16-bit addresses here, but store the zero to turn it
1656 into a 24-bit offset. */
1657 emit_move_insn (A0 (HImode
, 5), function
);
1658 emit_move_insn (A0 (QImode
, 7), GEN_INT (0x00));
1662 /* Note that the PSI moves actually write 4 bytes. Make sure we
1663 write stuff out in the right order, and leave room for the
1664 extra byte at the end. */
1665 emit_move_insn (A0 (QImode
, 0), GEN_INT (0xbc - 0x100));
1666 emit_move_insn (A0 (PSImode
, 1), chainval
);
1667 emit_move_insn (A0 (QImode
, 4), GEN_INT (0xcc - 0x100));
1668 emit_move_insn (A0 (PSImode
, 5), function
);
1673 /* Addressing Modes */
1675 /* The r8c/m32c family supports a wide range of non-orthogonal
1676 addressing modes, including the ability to double-indirect on *some*
1677 of them. Not all insns support all modes, either, but we rely on
1678 predicates and constraints to deal with that. */
1679 #undef TARGET_LEGITIMATE_ADDRESS_P
1680 #define TARGET_LEGITIMATE_ADDRESS_P m32c_legitimate_address_p
1682 m32c_legitimate_address_p (machine_mode mode
, rtx x
, bool strict
)
1688 if (TARGET_A16
&& GET_MODE (x
) != HImode
&& GET_MODE (x
) != SImode
)
1690 if (TARGET_A24
&& GET_MODE (x
) != PSImode
)
1693 /* Wide references to memory will be split after reload, so we must
1694 ensure that all parts of such splits remain legitimate
1696 mode_adjust
= GET_MODE_SIZE (mode
) - 1;
1698 /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1699 if (GET_CODE (x
) == PRE_DEC
1700 || GET_CODE (x
) == POST_INC
|| GET_CODE (x
) == PRE_MODIFY
)
1702 return (GET_CODE (XEXP (x
, 0)) == REG
1703 && REGNO (XEXP (x
, 0)) == SP_REGNO
);
1707 /* This is the double indirection detection, but it currently
1708 doesn't work as cleanly as this code implies, so until we've had
1709 a chance to debug it, leave it disabled. */
1710 if (TARGET_A24
&& GET_CODE (x
) == MEM
&& GET_CODE (XEXP (x
, 0)) != PLUS
)
1713 fprintf (stderr
, "double indirect\n");
1722 /* Most indexable registers can be used without displacements,
1723 although some of them will be emitted with an explicit zero
1724 to please the assembler. */
1725 switch (REGNO (patternr
[0]))
1731 if (TARGET_A16
&& GET_MODE (x
) == SImode
)
1737 if (IS_PSEUDO (patternr
[0], strict
))
1743 if (TARGET_A16
&& GET_MODE (x
) == SImode
)
1748 /* This is more interesting, because different base registers
1749 allow for different displacements - both range and signedness
1750 - and it differs from chip series to chip series too. */
1751 int rn
= REGNO (patternr
[1]);
1752 HOST_WIDE_INT offs
= INTVAL (patternr
[2]);
1758 /* The syntax only allows positive offsets, but when the
1759 offsets span the entire memory range, we can simulate
1760 negative offsets by wrapping. */
1762 return (offs
>= -65536 && offs
<= 65535 - mode_adjust
);
1764 return (offs
>= 0 && offs
<= 65535 - mode_adjust
);
1766 return (offs
>= -16777216 && offs
<= 16777215);
1770 return (offs
>= -128 && offs
<= 127 - mode_adjust
);
1771 return (offs
>= -65536 && offs
<= 65535 - mode_adjust
);
1774 return (offs
>= -128 && offs
<= 127 - mode_adjust
);
1777 if (IS_PSEUDO (patternr
[1], strict
))
1782 if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1784 rtx reg
= patternr
[1];
1786 /* We don't know where the symbol is, so only allow base
1787 registers which support displacements spanning the whole
1789 switch (REGNO (reg
))
1793 /* $sb needs a secondary reload, but since it's involved in
1794 memory address reloads too, we don't deal with it very
1796 /* case SB_REGNO: */
1799 if (GET_CODE (reg
) == SUBREG
)
1801 if (IS_PSEUDO (reg
, strict
))
1809 /* Implements REG_OK_FOR_BASE_P. */
1811 m32c_reg_ok_for_base_p (rtx x
, int strict
)
1813 if (GET_CODE (x
) != REG
)
1824 if (IS_PSEUDO (x
, strict
))
1830 /* We have three choices for choosing fb->aN offsets. If we choose -128,
1831 we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
1833 EB 4B FF mova -128[$fb],$a0
1834 D8 0C FF FF mov.w:Q #0,-1[$a0]
1836 Alternately, we subtract the frame size, and hopefully use 8-bit aN
1839 77 54 00 01 sub #256,$a0
1840 D8 08 01 mov.w:Q #0,1[$a0]
1842 If we don't offset (i.e. offset by zero), we end up with:
1844 D8 0C 00 FF mov.w:Q #0,-256[$a0]
1846 We have to subtract *something* so that we have a PLUS rtx to mark
1847 that we've done this reload. The -128 offset will never result in
1848 an 8-bit aN offset, and the payoff for the second case is five
1849 loads *if* those loads are within 256 bytes of the other end of the
1850 frame, so the third case seems best. Note that we subtract the
1851 zero, but detect that in the addhi3 pattern. */
1853 #define BIG_FB_ADJ 0
1855 /* Implements LEGITIMIZE_ADDRESS. The only address we really have to
1856 worry about is frame base offsets, as $fb has a limited
1857 displacement range. We deal with this by attempting to reload $fb
1858 itself into an address register; that seems to result in the best
1860 #undef TARGET_LEGITIMIZE_ADDRESS
1861 #define TARGET_LEGITIMIZE_ADDRESS m32c_legitimize_address
1863 m32c_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
1867 fprintf (stderr
, "m32c_legitimize_address for mode %s\n", mode_name
[mode
]);
1869 fprintf (stderr
, "\n");
1872 if (GET_CODE (x
) == PLUS
1873 && GET_CODE (XEXP (x
, 0)) == REG
1874 && REGNO (XEXP (x
, 0)) == FB_REGNO
1875 && GET_CODE (XEXP (x
, 1)) == CONST_INT
1876 && (INTVAL (XEXP (x
, 1)) < -128
1877 || INTVAL (XEXP (x
, 1)) > (128 - GET_MODE_SIZE (mode
))))
1879 /* reload FB to A_REGS */
1880 rtx temp
= gen_reg_rtx (Pmode
);
1882 emit_insn (gen_rtx_SET (VOIDmode
, temp
, XEXP (x
, 0)));
1889 /* Implements LEGITIMIZE_RELOAD_ADDRESS. See comment above. */
1891 m32c_legitimize_reload_address (rtx
* x
,
1894 int type
, int ind_levels ATTRIBUTE_UNUSED
)
1897 fprintf (stderr
, "\nm32c_legitimize_reload_address for mode %s\n",
1902 /* At one point, this function tried to get $fb copied to an address
1903 register, which in theory would maximize sharing, but gcc was
1904 *also* still trying to reload the whole address, and we'd run out
1905 of address registers. So we let gcc do the naive (but safe)
1906 reload instead, when the above function doesn't handle it for
1909 The code below is a second attempt at the above. */
1911 if (GET_CODE (*x
) == PLUS
1912 && GET_CODE (XEXP (*x
, 0)) == REG
1913 && REGNO (XEXP (*x
, 0)) == FB_REGNO
1914 && GET_CODE (XEXP (*x
, 1)) == CONST_INT
1915 && (INTVAL (XEXP (*x
, 1)) < -128
1916 || INTVAL (XEXP (*x
, 1)) > (128 - GET_MODE_SIZE (mode
))))
1919 int offset
= INTVAL (XEXP (*x
, 1));
1920 int adjustment
= -BIG_FB_ADJ
;
1922 sum
= gen_rtx_PLUS (Pmode
, XEXP (*x
, 0),
1923 GEN_INT (adjustment
));
1924 *x
= gen_rtx_PLUS (Pmode
, sum
, GEN_INT (offset
- adjustment
));
1925 if (type
== RELOAD_OTHER
)
1926 type
= RELOAD_FOR_OTHER_ADDRESS
;
1927 push_reload (sum
, NULL_RTX
, &XEXP (*x
, 0), NULL
,
1928 A_REGS
, Pmode
, VOIDmode
, 0, 0, opnum
,
1929 (enum reload_type
) type
);
1933 if (GET_CODE (*x
) == PLUS
1934 && GET_CODE (XEXP (*x
, 0)) == PLUS
1935 && GET_CODE (XEXP (XEXP (*x
, 0), 0)) == REG
1936 && REGNO (XEXP (XEXP (*x
, 0), 0)) == FB_REGNO
1937 && GET_CODE (XEXP (XEXP (*x
, 0), 1)) == CONST_INT
1938 && GET_CODE (XEXP (*x
, 1)) == CONST_INT
1941 if (type
== RELOAD_OTHER
)
1942 type
= RELOAD_FOR_OTHER_ADDRESS
;
1943 push_reload (XEXP (*x
, 0), NULL_RTX
, &XEXP (*x
, 0), NULL
,
1944 A_REGS
, Pmode
, VOIDmode
, 0, 0, opnum
,
1945 (enum reload_type
) type
);
1952 /* Return the appropriate mode for a named address pointer. */
1953 #undef TARGET_ADDR_SPACE_POINTER_MODE
1954 #define TARGET_ADDR_SPACE_POINTER_MODE m32c_addr_space_pointer_mode
1956 m32c_addr_space_pointer_mode (addr_space_t addrspace
)
1960 case ADDR_SPACE_GENERIC
:
1961 return TARGET_A24
? PSImode
: HImode
;
1962 case ADDR_SPACE_FAR
:
1969 /* Return the appropriate mode for a named address address. */
1970 #undef TARGET_ADDR_SPACE_ADDRESS_MODE
1971 #define TARGET_ADDR_SPACE_ADDRESS_MODE m32c_addr_space_address_mode
1973 m32c_addr_space_address_mode (addr_space_t addrspace
)
1977 case ADDR_SPACE_GENERIC
:
1978 return TARGET_A24
? PSImode
: HImode
;
1979 case ADDR_SPACE_FAR
:
1986 /* Like m32c_legitimate_address_p, except with named addresses. */
1987 #undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
1988 #define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P \
1989 m32c_addr_space_legitimate_address_p
1991 m32c_addr_space_legitimate_address_p (machine_mode mode
, rtx x
,
1992 bool strict
, addr_space_t as
)
1994 if (as
== ADDR_SPACE_FAR
)
2001 if (GET_MODE (x
) != SImode
)
2003 switch (REGNO (patternr
[0]))
2009 if (IS_PSEUDO (patternr
[0], strict
))
2014 if (RTX_IS ("+^Sri"))
2016 int rn
= REGNO (patternr
[3]);
2017 HOST_WIDE_INT offs
= INTVAL (patternr
[4]);
2018 if (GET_MODE (patternr
[3]) != HImode
)
2023 return (offs
>= 0 && offs
<= 0xfffff);
2026 if (IS_PSEUDO (patternr
[3], strict
))
2031 if (RTX_IS ("+^Srs"))
2033 int rn
= REGNO (patternr
[3]);
2034 if (GET_MODE (patternr
[3]) != HImode
)
2042 if (IS_PSEUDO (patternr
[3], strict
))
2047 if (RTX_IS ("+^S+ris"))
2049 int rn
= REGNO (patternr
[4]);
2050 if (GET_MODE (patternr
[4]) != HImode
)
2058 if (IS_PSEUDO (patternr
[4], strict
))
2070 else if (as
!= ADDR_SPACE_GENERIC
)
2073 return m32c_legitimate_address_p (mode
, x
, strict
);
2076 /* Like m32c_legitimate_address, except with named address support. */
2077 #undef TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS
2078 #define TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS m32c_addr_space_legitimize_address
2080 m32c_addr_space_legitimize_address (rtx x
, rtx oldx
, machine_mode mode
,
2083 if (as
!= ADDR_SPACE_GENERIC
)
2086 fprintf (stderr
, "\033[36mm32c_addr_space_legitimize_address for mode %s\033[0m\n", mode_name
[mode
]);
2088 fprintf (stderr
, "\n");
2091 if (GET_CODE (x
) != REG
)
2093 x
= force_reg (SImode
, x
);
2098 return m32c_legitimize_address (x
, oldx
, mode
);
2101 /* Determine if one named address space is a subset of another. */
2102 #undef TARGET_ADDR_SPACE_SUBSET_P
2103 #define TARGET_ADDR_SPACE_SUBSET_P m32c_addr_space_subset_p
2105 m32c_addr_space_subset_p (addr_space_t subset
, addr_space_t superset
)
2107 gcc_assert (subset
== ADDR_SPACE_GENERIC
|| subset
== ADDR_SPACE_FAR
);
2108 gcc_assert (superset
== ADDR_SPACE_GENERIC
|| superset
== ADDR_SPACE_FAR
);
2110 if (subset
== superset
)
2114 return (subset
== ADDR_SPACE_GENERIC
&& superset
== ADDR_SPACE_FAR
);
2117 #undef TARGET_ADDR_SPACE_CONVERT
2118 #define TARGET_ADDR_SPACE_CONVERT m32c_addr_space_convert
2119 /* Convert from one address space to another. */
2121 m32c_addr_space_convert (rtx op
, tree from_type
, tree to_type
)
2123 addr_space_t from_as
= TYPE_ADDR_SPACE (TREE_TYPE (from_type
));
2124 addr_space_t to_as
= TYPE_ADDR_SPACE (TREE_TYPE (to_type
));
2127 gcc_assert (from_as
== ADDR_SPACE_GENERIC
|| from_as
== ADDR_SPACE_FAR
);
2128 gcc_assert (to_as
== ADDR_SPACE_GENERIC
|| to_as
== ADDR_SPACE_FAR
);
2130 if (to_as
== ADDR_SPACE_GENERIC
&& from_as
== ADDR_SPACE_FAR
)
2132 /* This is unpredictable, as we're truncating off usable address
2135 result
= gen_reg_rtx (HImode
);
2136 emit_move_insn (result
, simplify_subreg (HImode
, op
, SImode
, 0));
2139 else if (to_as
== ADDR_SPACE_FAR
&& from_as
== ADDR_SPACE_GENERIC
)
2141 /* This always works. */
2142 result
= gen_reg_rtx (SImode
);
2143 emit_insn (gen_zero_extendhisi2 (result
, op
));
2150 /* Condition Code Status */
2152 #undef TARGET_FIXED_CONDITION_CODE_REGS
2153 #define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2155 m32c_fixed_condition_code_regs (unsigned int *p1
, unsigned int *p2
)
2158 *p2
= INVALID_REGNUM
;
2162 /* Describing Relative Costs of Operations */
2164 /* Implements TARGET_REGISTER_MOVE_COST. We make impossible moves
2165 prohibitively expensive, like trying to put QIs in r2/r3 (there are
2166 no opcodes to do that). We also discourage use of mem* registers
2167 since they're really memory. */
2169 #undef TARGET_REGISTER_MOVE_COST
2170 #define TARGET_REGISTER_MOVE_COST m32c_register_move_cost
2173 m32c_register_move_cost (machine_mode mode
, reg_class_t from
,
2176 int cost
= COSTS_N_INSNS (3);
2179 /* FIXME: pick real values, but not 2 for now. */
2180 COPY_HARD_REG_SET (cc
, reg_class_contents
[(int) from
]);
2181 IOR_HARD_REG_SET (cc
, reg_class_contents
[(int) to
]);
2184 && hard_reg_set_intersect_p (cc
, reg_class_contents
[R23_REGS
]))
2186 if (hard_reg_set_subset_p (cc
, reg_class_contents
[R23_REGS
]))
2187 cost
= COSTS_N_INSNS (1000);
2189 cost
= COSTS_N_INSNS (80);
2192 if (!class_can_hold_mode (from
, mode
) || !class_can_hold_mode (to
, mode
))
2193 cost
= COSTS_N_INSNS (1000);
2195 if (reg_classes_intersect_p (from
, CR_REGS
))
2196 cost
+= COSTS_N_INSNS (5);
2198 if (reg_classes_intersect_p (to
, CR_REGS
))
2199 cost
+= COSTS_N_INSNS (5);
2201 if (from
== MEM_REGS
|| to
== MEM_REGS
)
2202 cost
+= COSTS_N_INSNS (50);
2203 else if (reg_classes_intersect_p (from
, MEM_REGS
)
2204 || reg_classes_intersect_p (to
, MEM_REGS
))
2205 cost
+= COSTS_N_INSNS (10);
2208 fprintf (stderr
, "register_move_cost %s from %s to %s = %d\n",
2209 mode_name
[mode
], class_names
[(int) from
], class_names
[(int) to
],
2215 /* Implements TARGET_MEMORY_MOVE_COST. */
2217 #undef TARGET_MEMORY_MOVE_COST
2218 #define TARGET_MEMORY_MOVE_COST m32c_memory_move_cost
2221 m32c_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED
,
2222 reg_class_t rclass ATTRIBUTE_UNUSED
,
2223 bool in ATTRIBUTE_UNUSED
)
2225 /* FIXME: pick real values. */
2226 return COSTS_N_INSNS (10);
2229 /* Here we try to describe when we use multiple opcodes for one RTX so
2230 that gcc knows when to use them. */
2231 #undef TARGET_RTX_COSTS
2232 #define TARGET_RTX_COSTS m32c_rtx_costs
2234 m32c_rtx_costs (rtx x
, int code
, int outer_code
, int opno ATTRIBUTE_UNUSED
,
2235 int *total
, bool speed ATTRIBUTE_UNUSED
)
2240 if (REGNO (x
) >= MEM0_REGNO
&& REGNO (x
) <= MEM7_REGNO
)
2241 *total
+= COSTS_N_INSNS (500);
2243 *total
+= COSTS_N_INSNS (1);
2249 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
2251 /* mov.b r1l, r1h */
2252 *total
+= COSTS_N_INSNS (1);
2255 if (INTVAL (XEXP (x
, 1)) > 8
2256 || INTVAL (XEXP (x
, 1)) < -8)
2259 /* mov.b r1l, r1h */
2260 *total
+= COSTS_N_INSNS (2);
2275 if (outer_code
== SET
)
2277 *total
+= COSTS_N_INSNS (2);
2284 rtx dest
= XEXP (x
, 0);
2285 rtx addr
= XEXP (dest
, 0);
2286 switch (GET_CODE (addr
))
2289 *total
+= COSTS_N_INSNS (1);
2292 *total
+= COSTS_N_INSNS (3);
2295 *total
+= COSTS_N_INSNS (2);
2303 /* Reasonable default. */
2304 if (TARGET_A16
&& GET_MODE(x
) == SImode
)
2305 *total
+= COSTS_N_INSNS (2);
2311 #undef TARGET_ADDRESS_COST
2312 #define TARGET_ADDRESS_COST m32c_address_cost
2314 m32c_address_cost (rtx addr
, machine_mode mode ATTRIBUTE_UNUSED
,
2315 addr_space_t as ATTRIBUTE_UNUSED
,
2316 bool speed ATTRIBUTE_UNUSED
)
2319 /* fprintf(stderr, "\naddress_cost\n");
2321 switch (GET_CODE (addr
))
2326 return COSTS_N_INSNS(1);
2327 if (0 < i
&& i
<= 255)
2328 return COSTS_N_INSNS(2);
2329 if (0 < i
&& i
<= 65535)
2330 return COSTS_N_INSNS(3);
2331 return COSTS_N_INSNS(4);
2333 return COSTS_N_INSNS(4);
2335 return COSTS_N_INSNS(1);
2337 if (GET_CODE (XEXP (addr
, 1)) == CONST_INT
)
2339 i
= INTVAL (XEXP (addr
, 1));
2341 return COSTS_N_INSNS(1);
2342 if (0 < i
&& i
<= 255)
2343 return COSTS_N_INSNS(2);
2344 if (0 < i
&& i
<= 65535)
2345 return COSTS_N_INSNS(3);
2347 return COSTS_N_INSNS(4);
2353 /* Defining the Output Assembler Language */
2355 /* Output of Data */
2357 /* We may have 24 bit sizes, which is the native address size.
2358 Currently unused, but provided for completeness. */
2359 #undef TARGET_ASM_INTEGER
2360 #define TARGET_ASM_INTEGER m32c_asm_integer
2362 m32c_asm_integer (rtx x
, unsigned int size
, int aligned_p
)
2367 fprintf (asm_out_file
, "\t.3byte\t");
2368 output_addr_const (asm_out_file
, x
);
2369 fputc ('\n', asm_out_file
);
2372 if (GET_CODE (x
) == SYMBOL_REF
)
2374 fprintf (asm_out_file
, "\t.long\t");
2375 output_addr_const (asm_out_file
, x
);
2376 fputc ('\n', asm_out_file
);
2381 return default_assemble_integer (x
, size
, aligned_p
);
2384 /* Output of Assembler Instructions */
2386 /* We use a lookup table because the addressing modes are non-orthogonal. */
2391 char const *pattern
;
2394 const conversions
[] = {
2397 { 0, "mr", "z[1]" },
2398 { 0, "m+ri", "3[2]" },
2399 { 0, "m+rs", "3[2]" },
2400 { 0, "m+^Zrs", "5[4]" },
2401 { 0, "m+^Zri", "5[4]" },
2402 { 0, "m+^Z+ris", "7+6[5]" },
2403 { 0, "m+^Srs", "5[4]" },
2404 { 0, "m+^Sri", "5[4]" },
2405 { 0, "m+^S+ris", "7+6[5]" },
2406 { 0, "m+r+si", "4+5[2]" },
2409 { 0, "m+si", "2+3" },
2411 { 0, "mmr", "[z[2]]" },
2412 { 0, "mm+ri", "[4[3]]" },
2413 { 0, "mm+rs", "[4[3]]" },
2414 { 0, "mm+r+si", "[5+6[3]]" },
2415 { 0, "mms", "[[2]]" },
2416 { 0, "mmi", "[[2]]" },
2417 { 0, "mm+si", "[4[3]]" },
2421 { 0, "+si", "#1+2" },
2427 { 'd', "+si", "1+2" },
2430 { 'D', "+si", "1+2" },
2441 /* This is in order according to the bitfield that pushm/popm use. */
2442 static char const *pushm_regs
[] = {
2443 "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2446 /* Implements TARGET_PRINT_OPERAND. */
2448 #undef TARGET_PRINT_OPERAND
2449 #define TARGET_PRINT_OPERAND m32c_print_operand
2452 m32c_print_operand (FILE * file
, rtx x
, int code
)
2457 int unsigned_const
= 0;
2460 /* Multiplies; constants are converted to sign-extended format but
2461 we need unsigned, so 'u' and 'U' tell us what size unsigned we
2473 /* This one is only for debugging; you can put it in a pattern to
2474 force this error. */
2477 fprintf (stderr
, "dj: unreviewed pattern:");
2478 if (current_output_insn
)
2479 debug_rtx (current_output_insn
);
2482 /* PSImode operations are either .w or .l depending on the target. */
2486 fprintf (file
, "w");
2488 fprintf (file
, "l");
2491 /* Inverted conditionals. */
2494 switch (GET_CODE (x
))
2500 fputs ("gtu", file
);
2506 fputs ("geu", file
);
2512 fputs ("leu", file
);
2518 fputs ("ltu", file
);
2531 /* Regular conditionals. */
2534 switch (GET_CODE (x
))
2540 fputs ("leu", file
);
2546 fputs ("ltu", file
);
2552 fputs ("gtu", file
);
2558 fputs ("geu", file
);
2571 /* Used in negsi2 to do HImode ops on the two parts of an SImode
2573 if (code
== 'h' && GET_MODE (x
) == SImode
)
2575 x
= m32c_subreg (HImode
, x
, SImode
, 0);
2578 if (code
== 'H' && GET_MODE (x
) == SImode
)
2580 x
= m32c_subreg (HImode
, x
, SImode
, 2);
2583 if (code
== 'h' && GET_MODE (x
) == HImode
)
2585 x
= m32c_subreg (QImode
, x
, HImode
, 0);
2588 if (code
== 'H' && GET_MODE (x
) == HImode
)
2590 /* We can't actually represent this as an rtx. Do it here. */
2591 if (GET_CODE (x
) == REG
)
2596 fputs ("r0h", file
);
2599 fputs ("r1h", file
);
2605 /* This should be a MEM. */
2606 x
= m32c_subreg (QImode
, x
, HImode
, 1);
2609 /* This is for BMcond, which always wants word register names. */
2610 if (code
== 'h' && GET_MODE (x
) == QImode
)
2612 if (GET_CODE (x
) == REG
)
2613 x
= gen_rtx_REG (HImode
, REGNO (x
));
2616 /* 'x' and 'X' need to be ignored for non-immediates. */
2617 if ((code
== 'x' || code
== 'X') && GET_CODE (x
) != CONST_INT
)
2622 for (i
= 0; conversions
[i
].pattern
; i
++)
2623 if (conversions
[i
].code
== code
2624 && streq (conversions
[i
].pattern
, pattern
))
2626 for (j
= 0; conversions
[i
].format
[j
]; j
++)
2627 /* backslash quotes the next character in the output pattern. */
2628 if (conversions
[i
].format
[j
] == '\\')
2630 fputc (conversions
[i
].format
[j
+ 1], file
);
2633 /* Digits in the output pattern indicate that the
2634 corresponding RTX is to be output at that point. */
2635 else if (ISDIGIT (conversions
[i
].format
[j
]))
2637 rtx r
= patternr
[conversions
[i
].format
[j
] - '0'];
2638 switch (GET_CODE (r
))
2641 fprintf (file
, "%s",
2642 reg_name_with_mode (REGNO (r
), GET_MODE (r
)));
2651 int i
= (int) exact_log2 (v
);
2653 i
= (int) exact_log2 ((v
^ 0xffff) & 0xffff);
2655 i
= (int) exact_log2 ((v
^ 0xff) & 0xff);
2657 fprintf (file
, "%d", i
);
2661 /* Unsigned byte. */
2662 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
,
2666 /* Unsigned word. */
2667 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
,
2668 INTVAL (r
) & 0xffff);
2671 /* pushm and popm encode a register set into a single byte. */
2673 for (b
= 7; b
>= 0; b
--)
2674 if (INTVAL (r
) & (1 << b
))
2676 fprintf (file
, "%s%s", comma
, pushm_regs
[b
]);
2681 /* "Minus". Output -X */
2682 ival
= (-INTVAL (r
) & 0xffff);
2684 ival
= ival
- 0x10000;
2685 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ival
);
2689 if (conversions
[i
].format
[j
+ 1] == '[' && ival
< 0)
2691 /* We can simulate negative displacements by
2692 taking advantage of address space
2693 wrapping when the offset can span the
2694 entire address range. */
2696 patternr
[conversions
[i
].format
[j
+ 2] - '0'];
2697 if (GET_CODE (base
) == REG
)
2698 switch (REGNO (base
))
2703 ival
= 0x1000000 + ival
;
2705 ival
= 0x10000 + ival
;
2709 ival
= 0x10000 + ival
;
2713 else if (code
== 'd' && ival
< 0 && j
== 0)
2714 /* The "mova" opcode is used to do addition by
2715 computing displacements, but again, we need
2716 displacements to be unsigned *if* they're
2717 the only component of the displacement
2718 (i.e. no "symbol-4" type displacement). */
2719 ival
= (TARGET_A24
? 0x1000000 : 0x10000) + ival
;
2721 if (conversions
[i
].format
[j
] == '0')
2723 /* More conversions to unsigned. */
2724 if (unsigned_const
== 2)
2726 if (unsigned_const
== 1)
2729 if (streq (conversions
[i
].pattern
, "mi")
2730 || streq (conversions
[i
].pattern
, "mmi"))
2732 /* Integers used as addresses are unsigned. */
2733 ival
&= (TARGET_A24
? 0xffffff : 0xffff);
2735 if (force_sign
&& ival
>= 0)
2737 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ival
);
2742 /* We don't have const_double constants. If it
2743 happens, make it obvious. */
2744 fprintf (file
, "[const_double 0x%lx]",
2745 (unsigned long) CONST_DOUBLE_HIGH (r
));
2748 assemble_name (file
, XSTR (r
, 0));
2751 output_asm_label (r
);
2754 fprintf (stderr
, "don't know how to print this operand:");
2761 if (conversions
[i
].format
[j
] == 'z')
2763 /* Some addressing modes *must* have a displacement,
2764 so insert a zero here if needed. */
2766 for (k
= j
+ 1; conversions
[i
].format
[k
]; k
++)
2767 if (ISDIGIT (conversions
[i
].format
[k
]))
2769 rtx reg
= patternr
[conversions
[i
].format
[k
] - '0'];
2770 if (GET_CODE (reg
) == REG
2771 && (REGNO (reg
) == SB_REGNO
2772 || REGNO (reg
) == FB_REGNO
2773 || REGNO (reg
) == SP_REGNO
))
2778 /* Signed displacements off symbols need to have signs
2780 if (conversions
[i
].format
[j
] == '+'
2781 && (!code
|| code
== 'D' || code
== 'd')
2782 && ISDIGIT (conversions
[i
].format
[j
+ 1])
2783 && (GET_CODE (patternr
[conversions
[i
].format
[j
+ 1] - '0'])
2789 fputc (conversions
[i
].format
[j
], file
);
2793 if (!conversions
[i
].pattern
)
2795 fprintf (stderr
, "unconvertible operand %c `%s'", code
? code
: '-',
2798 fprintf (file
, "[%c.%s]", code
? code
: '-', pattern
);
2804 /* Implements TARGET_PRINT_OPERAND_PUNCT_VALID_P.
2806 See m32c_print_operand above for descriptions of what these do. */
2808 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
2809 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P m32c_print_operand_punct_valid_p
2812 m32c_print_operand_punct_valid_p (unsigned char c
)
2814 if (c
== '&' || c
== '!')
2820 /* Implements TARGET_PRINT_OPERAND_ADDRESS. Nothing unusual here. */
2822 #undef TARGET_PRINT_OPERAND_ADDRESS
2823 #define TARGET_PRINT_OPERAND_ADDRESS m32c_print_operand_address
2826 m32c_print_operand_address (FILE * stream
, rtx address
)
2828 if (GET_CODE (address
) == MEM
)
2829 address
= XEXP (address
, 0);
2831 /* cf: gcc.dg/asm-4.c. */
2832 gcc_assert (GET_CODE (address
) == REG
);
2834 m32c_print_operand (stream
, address
, 0);
2837 /* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
2838 differently than general registers. */
2840 m32c_output_reg_push (FILE * s
, int regno
)
2842 if (regno
== FLG_REGNO
)
2843 fprintf (s
, "\tpushc\tflg\n");
2845 fprintf (s
, "\tpush.%c\t%s\n",
2846 " bwll"[reg_push_size (regno
)], reg_names
[regno
]);
2849 /* Likewise for ASM_OUTPUT_REG_POP. */
2851 m32c_output_reg_pop (FILE * s
, int regno
)
2853 if (regno
== FLG_REGNO
)
2854 fprintf (s
, "\tpopc\tflg\n");
2856 fprintf (s
, "\tpop.%c\t%s\n",
2857 " bwll"[reg_push_size (regno
)], reg_names
[regno
]);
2860 /* Defining target-specific uses of `__attribute__' */
2862 /* Used to simplify the logic below. Find the attributes wherever
2864 #define M32C_ATTRIBUTES(decl) \
2865 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2866 : DECL_ATTRIBUTES (decl) \
2867 ? (DECL_ATTRIBUTES (decl)) \
2868 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2870 /* Returns TRUE if the given tree has the "interrupt" attribute. */
2872 interrupt_p (tree node ATTRIBUTE_UNUSED
)
2874 tree list
= M32C_ATTRIBUTES (node
);
2877 if (is_attribute_p ("interrupt", TREE_PURPOSE (list
)))
2879 list
= TREE_CHAIN (list
);
2881 return fast_interrupt_p (node
);
2884 /* Returns TRUE if the given tree has the "bank_switch" attribute. */
2886 bank_switch_p (tree node ATTRIBUTE_UNUSED
)
2888 tree list
= M32C_ATTRIBUTES (node
);
2891 if (is_attribute_p ("bank_switch", TREE_PURPOSE (list
)))
2893 list
= TREE_CHAIN (list
);
2898 /* Returns TRUE if the given tree has the "fast_interrupt" attribute. */
2900 fast_interrupt_p (tree node ATTRIBUTE_UNUSED
)
2902 tree list
= M32C_ATTRIBUTES (node
);
2905 if (is_attribute_p ("fast_interrupt", TREE_PURPOSE (list
)))
2907 list
= TREE_CHAIN (list
);
2913 interrupt_handler (tree
* node ATTRIBUTE_UNUSED
,
2914 tree name ATTRIBUTE_UNUSED
,
2915 tree args ATTRIBUTE_UNUSED
,
2916 int flags ATTRIBUTE_UNUSED
,
2917 bool * no_add_attrs ATTRIBUTE_UNUSED
)
2922 /* Returns TRUE if given tree has the "function_vector" attribute. */
2924 m32c_special_page_vector_p (tree func
)
2928 if (TREE_CODE (func
) != FUNCTION_DECL
)
2931 list
= M32C_ATTRIBUTES (func
);
2934 if (is_attribute_p ("function_vector", TREE_PURPOSE (list
)))
2936 list
= TREE_CHAIN (list
);
2942 function_vector_handler (tree
* node ATTRIBUTE_UNUSED
,
2943 tree name ATTRIBUTE_UNUSED
,
2944 tree args ATTRIBUTE_UNUSED
,
2945 int flags ATTRIBUTE_UNUSED
,
2946 bool * no_add_attrs ATTRIBUTE_UNUSED
)
2950 /* The attribute is not supported for R8C target. */
2951 warning (OPT_Wattributes
,
2952 "%qE attribute is not supported for R8C target",
2954 *no_add_attrs
= true;
2956 else if (TREE_CODE (*node
) != FUNCTION_DECL
)
2958 /* The attribute must be applied to functions only. */
2959 warning (OPT_Wattributes
,
2960 "%qE attribute applies only to functions",
2962 *no_add_attrs
= true;
2964 else if (TREE_CODE (TREE_VALUE (args
)) != INTEGER_CST
)
2966 /* The argument must be a constant integer. */
2967 warning (OPT_Wattributes
,
2968 "%qE attribute argument not an integer constant",
2970 *no_add_attrs
= true;
2972 else if (TREE_INT_CST_LOW (TREE_VALUE (args
)) < 18
2973 || TREE_INT_CST_LOW (TREE_VALUE (args
)) > 255)
2975 /* The argument value must be between 18 to 255. */
2976 warning (OPT_Wattributes
,
2977 "%qE attribute argument should be between 18 to 255",
2979 *no_add_attrs
= true;
2984 /* If the function is assigned the attribute 'function_vector', it
2985 returns the function vector number, otherwise returns zero. */
2987 current_function_special_page_vector (rtx x
)
2991 if ((GET_CODE(x
) == SYMBOL_REF
)
2992 && (SYMBOL_REF_FLAGS (x
) & SYMBOL_FLAG_FUNCVEC_FUNCTION
))
2995 tree t
= SYMBOL_REF_DECL (x
);
2997 if (TREE_CODE (t
) != FUNCTION_DECL
)
3000 list
= M32C_ATTRIBUTES (t
);
3003 if (is_attribute_p ("function_vector", TREE_PURPOSE (list
)))
3005 num
= TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list
)));
3009 list
= TREE_CHAIN (list
);
3018 #undef TARGET_ATTRIBUTE_TABLE
3019 #define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
3020 static const struct attribute_spec m32c_attribute_table
[] = {
3021 {"interrupt", 0, 0, false, false, false, interrupt_handler
, false},
3022 {"bank_switch", 0, 0, false, false, false, interrupt_handler
, false},
3023 {"fast_interrupt", 0, 0, false, false, false, interrupt_handler
, false},
3024 {"function_vector", 1, 1, true, false, false, function_vector_handler
,
3026 {0, 0, 0, 0, 0, 0, 0, false}
3029 #undef TARGET_COMP_TYPE_ATTRIBUTES
3030 #define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
3032 m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED
,
3033 const_tree type2 ATTRIBUTE_UNUSED
)
3035 /* 0=incompatible 1=compatible 2=warning */
3039 #undef TARGET_INSERT_ATTRIBUTES
3040 #define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
3042 m32c_insert_attributes (tree node ATTRIBUTE_UNUSED
,
3043 tree
* attr_ptr ATTRIBUTE_UNUSED
)
3046 /* See if we need to make #pragma address variables volatile. */
3048 if (TREE_CODE (node
) == VAR_DECL
)
3050 const char *name
= IDENTIFIER_POINTER (DECL_NAME (node
));
3051 if (m32c_get_pragma_address (name
, &addr
))
3053 TREE_THIS_VOLATILE (node
) = true;
3059 struct pragma_traits
: default_hashmap_traits
3061 static hashval_t
hash (const char *str
) { return htab_hash_string (str
); }
3063 equal_keys (const char *a
, const char *b
)
3065 return !strcmp (a
, b
);
3069 /* Hash table of pragma info. */
3070 static GTY(()) hash_map
<const char *, unsigned, pragma_traits
> *pragma_htab
;
3073 m32c_note_pragma_address (const char *varname
, unsigned address
)
3077 = hash_map
<const char *, unsigned, pragma_traits
>::create_ggc (31);
3079 const char *name
= ggc_strdup (varname
);
3080 unsigned int *slot
= &pragma_htab
->get_or_insert (name
);
3085 m32c_get_pragma_address (const char *varname
, unsigned *address
)
3090 unsigned int *slot
= pragma_htab
->get (varname
);
3100 m32c_output_aligned_common (FILE *stream
, tree decl ATTRIBUTE_UNUSED
,
3102 int size
, int align
, int global
)
3106 if (m32c_get_pragma_address (name
, &address
))
3108 /* We never output these as global. */
3109 assemble_name (stream
, name
);
3110 fprintf (stream
, " = 0x%04x\n", address
);
3115 fprintf (stream
, "\t.local\t");
3116 assemble_name (stream
, name
);
3117 fprintf (stream
, "\n");
3119 fprintf (stream
, "\t.comm\t");
3120 assemble_name (stream
, name
);
3121 fprintf (stream
, ",%u,%u\n", size
, align
/ BITS_PER_UNIT
);
3126 /* This is a list of legal subregs of hard regs. */
3127 static const struct {
3128 unsigned char outer_mode_size
;
3129 unsigned char inner_mode_size
;
3130 unsigned char byte_mask
;
3131 unsigned char legal_when
;
3133 } legal_subregs
[] = {
3134 {1, 2, 0x03, 1, R0_REGNO
}, /* r0h r0l */
3135 {1, 2, 0x03, 1, R1_REGNO
}, /* r1h r1l */
3136 {1, 2, 0x01, 1, A0_REGNO
},
3137 {1, 2, 0x01, 1, A1_REGNO
},
3139 {1, 4, 0x01, 1, A0_REGNO
},
3140 {1, 4, 0x01, 1, A1_REGNO
},
3142 {2, 4, 0x05, 1, R0_REGNO
}, /* r2 r0 */
3143 {2, 4, 0x05, 1, R1_REGNO
}, /* r3 r1 */
3144 {2, 4, 0x05, 16, A0_REGNO
}, /* a1 a0 */
3145 {2, 4, 0x01, 24, A0_REGNO
}, /* a1 a0 */
3146 {2, 4, 0x01, 24, A1_REGNO
}, /* a1 a0 */
3148 {4, 8, 0x55, 1, R0_REGNO
}, /* r3 r1 r2 r0 */
3151 /* Returns TRUE if OP is a subreg of a hard reg which we don't
3152 support. We also bail on MEMs with illegal addresses. */
3154 m32c_illegal_subreg_p (rtx op
)
3158 machine_mode src_mode
, dest_mode
;
3160 if (GET_CODE (op
) == MEM
3161 && ! m32c_legitimate_address_p (Pmode
, XEXP (op
, 0), false))
3166 if (GET_CODE (op
) != SUBREG
)
3169 dest_mode
= GET_MODE (op
);
3170 offset
= SUBREG_BYTE (op
);
3171 op
= SUBREG_REG (op
);
3172 src_mode
= GET_MODE (op
);
3174 if (GET_MODE_SIZE (dest_mode
) == GET_MODE_SIZE (src_mode
))
3176 if (GET_CODE (op
) != REG
)
3178 if (REGNO (op
) >= MEM0_REGNO
)
3181 offset
= (1 << offset
);
3183 for (i
= 0; i
< ARRAY_SIZE (legal_subregs
); i
++)
3184 if (legal_subregs
[i
].outer_mode_size
== GET_MODE_SIZE (dest_mode
)
3185 && legal_subregs
[i
].regno
== REGNO (op
)
3186 && legal_subregs
[i
].inner_mode_size
== GET_MODE_SIZE (src_mode
)
3187 && legal_subregs
[i
].byte_mask
& offset
)
3189 switch (legal_subregs
[i
].legal_when
)
3206 /* Returns TRUE if we support a move between the first two operands.
3207 At the moment, we just want to discourage mem to mem moves until
3208 after reload, because reload has a hard time with our limited
3209 number of address registers, and we can get into a situation where
3210 we need three of them when we only have two. */
3212 m32c_mov_ok (rtx
* operands
, machine_mode mode ATTRIBUTE_UNUSED
)
3214 rtx op0
= operands
[0];
3215 rtx op1
= operands
[1];
3220 #define DEBUG_MOV_OK 0
3222 fprintf (stderr
, "m32c_mov_ok %s\n", mode_name
[mode
]);
3227 if (GET_CODE (op0
) == SUBREG
)
3228 op0
= XEXP (op0
, 0);
3229 if (GET_CODE (op1
) == SUBREG
)
3230 op1
= XEXP (op1
, 0);
3232 if (GET_CODE (op0
) == MEM
3233 && GET_CODE (op1
) == MEM
3234 && ! reload_completed
)
3237 fprintf (stderr
, " - no, mem to mem\n");
3243 fprintf (stderr
, " - ok\n");
3248 /* Returns TRUE if two consecutive HImode mov instructions, generated
3249 for moving an immediate double data to a double data type variable
3250 location, can be combined into single SImode mov instruction. */
3252 m32c_immd_dbl_mov (rtx
* operands ATTRIBUTE_UNUSED
,
3253 machine_mode mode ATTRIBUTE_UNUSED
)
3255 /* ??? This relied on the now-defunct MEM_SCALAR and MEM_IN_STRUCT_P
3262 /* Subregs are non-orthogonal for us, because our registers are all
3265 m32c_subreg (machine_mode outer
,
3266 rtx x
, machine_mode inner
, int byte
)
3270 /* Converting MEMs to different types that are the same size, we
3271 just rewrite them. */
3272 if (GET_CODE (x
) == SUBREG
3273 && SUBREG_BYTE (x
) == 0
3274 && GET_CODE (SUBREG_REG (x
)) == MEM
3275 && (GET_MODE_SIZE (GET_MODE (x
))
3276 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
3279 x
= gen_rtx_MEM (GET_MODE (x
), XEXP (SUBREG_REG (x
), 0));
3280 MEM_COPY_ATTRIBUTES (x
, SUBREG_REG (oldx
));
3283 /* Push/pop get done as smaller push/pops. */
3284 if (GET_CODE (x
) == MEM
3285 && (GET_CODE (XEXP (x
, 0)) == PRE_DEC
3286 || GET_CODE (XEXP (x
, 0)) == POST_INC
))
3287 return gen_rtx_MEM (outer
, XEXP (x
, 0));
3288 if (GET_CODE (x
) == SUBREG
3289 && GET_CODE (XEXP (x
, 0)) == MEM
3290 && (GET_CODE (XEXP (XEXP (x
, 0), 0)) == PRE_DEC
3291 || GET_CODE (XEXP (XEXP (x
, 0), 0)) == POST_INC
))
3292 return gen_rtx_MEM (outer
, XEXP (XEXP (x
, 0), 0));
3294 if (GET_CODE (x
) != REG
)
3296 rtx r
= simplify_gen_subreg (outer
, x
, inner
, byte
);
3297 if (GET_CODE (r
) == SUBREG
3298 && GET_CODE (x
) == MEM
3299 && MEM_VOLATILE_P (x
))
3301 /* Volatile MEMs don't get simplified, but we need them to
3302 be. We are little endian, so the subreg byte is the
3304 r
= adjust_address_nv (x
, outer
, byte
);
3310 if (r
>= FIRST_PSEUDO_REGISTER
|| r
== AP_REGNO
)
3311 return simplify_gen_subreg (outer
, x
, inner
, byte
);
3313 if (IS_MEM_REGNO (r
))
3314 return simplify_gen_subreg (outer
, x
, inner
, byte
);
3316 /* This is where the complexities of our register layout are
3320 else if (outer
== HImode
)
3322 if (r
== R0_REGNO
&& byte
== 2)
3324 else if (r
== R0_REGNO
&& byte
== 4)
3326 else if (r
== R0_REGNO
&& byte
== 6)
3328 else if (r
== R1_REGNO
&& byte
== 2)
3330 else if (r
== A0_REGNO
&& byte
== 2)
3333 else if (outer
== SImode
)
3335 if (r
== R0_REGNO
&& byte
== 0)
3337 else if (r
== R0_REGNO
&& byte
== 4)
3342 fprintf (stderr
, "m32c_subreg %s %s %d\n",
3343 mode_name
[outer
], mode_name
[inner
], byte
);
3347 return gen_rtx_REG (outer
, nr
);
3350 /* Used to emit move instructions. We split some moves,
3351 and avoid mem-mem moves. */
3353 m32c_prepare_move (rtx
* operands
, machine_mode mode
)
3355 if (far_addr_space_p (operands
[0])
3356 && CONSTANT_P (operands
[1]))
3358 operands
[1] = force_reg (GET_MODE (operands
[0]), operands
[1]);
3360 if (TARGET_A16
&& mode
== PSImode
)
3361 return m32c_split_move (operands
, mode
, 1);
3362 if ((GET_CODE (operands
[0]) == MEM
)
3363 && (GET_CODE (XEXP (operands
[0], 0)) == PRE_MODIFY
))
3365 rtx pmv
= XEXP (operands
[0], 0);
3366 rtx dest_reg
= XEXP (pmv
, 0);
3367 rtx dest_mod
= XEXP (pmv
, 1);
3369 emit_insn (gen_rtx_SET (Pmode
, dest_reg
, dest_mod
));
3370 operands
[0] = gen_rtx_MEM (mode
, dest_reg
);
3372 if (can_create_pseudo_p () && MEM_P (operands
[0]) && MEM_P (operands
[1]))
3373 operands
[1] = copy_to_mode_reg (mode
, operands
[1]);
3377 #define DEBUG_SPLIT 0
3379 /* Returns TRUE if the given PSImode move should be split. We split
3380 for all r8c/m16c moves, since it doesn't support them, and for
3381 POP.L as we can only *push* SImode. */
3383 m32c_split_psi_p (rtx
* operands
)
3386 fprintf (stderr
, "\nm32c_split_psi_p\n");
3387 debug_rtx (operands
[0]);
3388 debug_rtx (operands
[1]);
3393 fprintf (stderr
, "yes, A16\n");
3397 if (GET_CODE (operands
[1]) == MEM
3398 && GET_CODE (XEXP (operands
[1], 0)) == POST_INC
)
3401 fprintf (stderr
, "yes, pop.l\n");
3406 fprintf (stderr
, "no, default\n");
3411 /* Split the given move. SPLIT_ALL is 0 if splitting is optional
3412 (define_expand), 1 if it is not optional (define_insn_and_split),
3413 and 3 for define_split (alternate api). */
3415 m32c_split_move (rtx
* operands
, machine_mode mode
, int split_all
)
3418 int parts
, si
, di
, rev
= 0;
3419 int rv
= 0, opi
= 2;
3420 machine_mode submode
= HImode
;
3421 rtx
*ops
, local_ops
[10];
3423 /* define_split modifies the existing operands, but the other two
3424 emit new insns. OPS is where we store the operand pairs, which
3435 /* Before splitting mem-mem moves, force one operand into a
3437 if (can_create_pseudo_p () && MEM_P (operands
[0]) && MEM_P (operands
[1]))
3440 fprintf (stderr
, "force_reg...\n");
3441 debug_rtx (operands
[1]);
3443 operands
[1] = force_reg (mode
, operands
[1]);
3445 debug_rtx (operands
[1]);
3452 fprintf (stderr
, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3454 debug_rtx (operands
[0]);
3455 debug_rtx (operands
[1]);
3458 /* Note that split_all is not used to select the api after this
3459 point, so it's safe to set it to 3 even with define_insn. */
3460 /* None of the chips can move SI operands to sp-relative addresses,
3461 so we always split those. */
3462 if (satisfies_constraint_Ss (operands
[0]))
3466 && (far_addr_space_p (operands
[0])
3467 || far_addr_space_p (operands
[1])))
3470 /* We don't need to split these. */
3473 && (mode
== SImode
|| mode
== PSImode
)
3474 && !(GET_CODE (operands
[1]) == MEM
3475 && GET_CODE (XEXP (operands
[1], 0)) == POST_INC
))
3478 /* First, enumerate the subregs we'll be dealing with. */
3479 for (si
= 0; si
< parts
; si
++)
3482 m32c_subreg (submode
, operands
[0], mode
,
3483 si
* GET_MODE_SIZE (submode
));
3485 m32c_subreg (submode
, operands
[1], mode
,
3486 si
* GET_MODE_SIZE (submode
));
3489 /* Split pushes by emitting a sequence of smaller pushes. */
3490 if (GET_CODE (d
[0]) == MEM
&& GET_CODE (XEXP (d
[0], 0)) == PRE_DEC
)
3492 for (si
= parts
- 1; si
>= 0; si
--)
3494 ops
[opi
++] = gen_rtx_MEM (submode
,
3495 gen_rtx_PRE_DEC (Pmode
,
3503 /* Likewise for pops. */
3504 else if (GET_CODE (s
[0]) == MEM
&& GET_CODE (XEXP (s
[0], 0)) == POST_INC
)
3506 for (di
= 0; di
< parts
; di
++)
3509 ops
[opi
++] = gen_rtx_MEM (submode
,
3510 gen_rtx_POST_INC (Pmode
,
3518 /* if d[di] == s[si] for any di < si, we'll early clobber. */
3519 for (di
= 0; di
< parts
- 1; di
++)
3520 for (si
= di
+ 1; si
< parts
; si
++)
3521 if (reg_mentioned_p (d
[di
], s
[si
]))
3525 for (si
= 0; si
< parts
; si
++)
3531 for (si
= parts
- 1; si
>= 0; si
--)
3538 /* Now emit any moves we may have accumulated. */
3539 if (rv
&& split_all
!= 3)
3542 for (i
= 2; i
< opi
; i
+= 2)
3543 emit_move_insn (ops
[i
], ops
[i
+ 1]);
3548 /* The m32c has a number of opcodes that act like memcpy, strcmp, and
3549 the like. For the R8C they expect one of the addresses to be in
3550 R1L:An so we need to arrange for that. Otherwise, it's just a
3551 matter of picking out the operands we want and emitting the right
3552 pattern for them. All these expanders, which correspond to
3553 patterns in blkmov.md, must return nonzero if they expand the insn,
3554 or zero if they should FAIL. */
3556 /* This is a memset() opcode. All operands are implied, so we need to
3557 arrange for them to be in the right registers. The opcode wants
3558 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3559 the count (HI), and $2 the value (QI). */
3561 m32c_expand_setmemhi(rtx
*operands
)
3563 rtx desta
, count
, val
;
3566 desta
= XEXP (operands
[0], 0);
3567 count
= operands
[1];
3570 desto
= gen_reg_rtx (Pmode
);
3571 counto
= gen_reg_rtx (HImode
);
3573 if (GET_CODE (desta
) != REG
3574 || REGNO (desta
) < FIRST_PSEUDO_REGISTER
)
3575 desta
= copy_to_mode_reg (Pmode
, desta
);
3577 /* This looks like an arbitrary restriction, but this is by far the
3578 most common case. For counts 8..14 this actually results in
3579 smaller code with no speed penalty because the half-sized
3580 constant can be loaded with a shorter opcode. */
3581 if (GET_CODE (count
) == CONST_INT
3582 && GET_CODE (val
) == CONST_INT
3583 && ! (INTVAL (count
) & 1)
3584 && (INTVAL (count
) > 1)
3585 && (INTVAL (val
) <= 7 && INTVAL (val
) >= -8))
3587 unsigned v
= INTVAL (val
) & 0xff;
3589 count
= copy_to_mode_reg (HImode
, GEN_INT (INTVAL (count
) / 2));
3590 val
= copy_to_mode_reg (HImode
, GEN_INT (v
));
3592 emit_insn (gen_setmemhi_whi_op (desto
, counto
, val
, desta
, count
));
3594 emit_insn (gen_setmemhi_wpsi_op (desto
, counto
, val
, desta
, count
));
3598 /* This is the generalized memset() case. */
3599 if (GET_CODE (val
) != REG
3600 || REGNO (val
) < FIRST_PSEUDO_REGISTER
)
3601 val
= copy_to_mode_reg (QImode
, val
);
3603 if (GET_CODE (count
) != REG
3604 || REGNO (count
) < FIRST_PSEUDO_REGISTER
)
3605 count
= copy_to_mode_reg (HImode
, count
);
3608 emit_insn (gen_setmemhi_bhi_op (desto
, counto
, val
, desta
, count
));
3610 emit_insn (gen_setmemhi_bpsi_op (desto
, counto
, val
, desta
, count
));
3615 /* This is a memcpy() opcode. All operands are implied, so we need to
3616 arrange for them to be in the right registers. The opcode wants
3617 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3618 is the source (MEM:BLK), and $2 the count (HI). */
3620 m32c_expand_movmemhi(rtx
*operands
)
3622 rtx desta
, srca
, count
;
3623 rtx desto
, srco
, counto
;
3625 desta
= XEXP (operands
[0], 0);
3626 srca
= XEXP (operands
[1], 0);
3627 count
= operands
[2];
3629 desto
= gen_reg_rtx (Pmode
);
3630 srco
= gen_reg_rtx (Pmode
);
3631 counto
= gen_reg_rtx (HImode
);
3633 if (GET_CODE (desta
) != REG
3634 || REGNO (desta
) < FIRST_PSEUDO_REGISTER
)
3635 desta
= copy_to_mode_reg (Pmode
, desta
);
3637 if (GET_CODE (srca
) != REG
3638 || REGNO (srca
) < FIRST_PSEUDO_REGISTER
)
3639 srca
= copy_to_mode_reg (Pmode
, srca
);
3641 /* Similar to setmem, but we don't need to check the value. */
3642 if (GET_CODE (count
) == CONST_INT
3643 && ! (INTVAL (count
) & 1)
3644 && (INTVAL (count
) > 1))
3646 count
= copy_to_mode_reg (HImode
, GEN_INT (INTVAL (count
) / 2));
3648 emit_insn (gen_movmemhi_whi_op (desto
, srco
, counto
, desta
, srca
, count
));
3650 emit_insn (gen_movmemhi_wpsi_op (desto
, srco
, counto
, desta
, srca
, count
));
3654 /* This is the generalized memset() case. */
3655 if (GET_CODE (count
) != REG
3656 || REGNO (count
) < FIRST_PSEUDO_REGISTER
)
3657 count
= copy_to_mode_reg (HImode
, count
);
3660 emit_insn (gen_movmemhi_bhi_op (desto
, srco
, counto
, desta
, srca
, count
));
3662 emit_insn (gen_movmemhi_bpsi_op (desto
, srco
, counto
, desta
, srca
, count
));
3667 /* This is a stpcpy() opcode. $0 is the destination (MEM:BLK) after
3668 the copy, which should point to the NUL at the end of the string,
3669 $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3670 Since our opcode leaves the destination pointing *after* the NUL,
3671 we must emit an adjustment. */
3673 m32c_expand_movstr(rtx
*operands
)
3678 desta
= XEXP (operands
[1], 0);
3679 srca
= XEXP (operands
[2], 0);
3681 desto
= gen_reg_rtx (Pmode
);
3682 srco
= gen_reg_rtx (Pmode
);
3684 if (GET_CODE (desta
) != REG
3685 || REGNO (desta
) < FIRST_PSEUDO_REGISTER
)
3686 desta
= copy_to_mode_reg (Pmode
, desta
);
3688 if (GET_CODE (srca
) != REG
3689 || REGNO (srca
) < FIRST_PSEUDO_REGISTER
)
3690 srca
= copy_to_mode_reg (Pmode
, srca
);
3692 emit_insn (gen_movstr_op (desto
, srco
, desta
, srca
));
3693 /* desto ends up being a1, which allows this type of add through MOVA. */
3694 emit_insn (gen_addpsi3 (operands
[0], desto
, GEN_INT (-1)));
3699 /* This is a strcmp() opcode. $0 is the destination (HI) which holds
3700 <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3701 $2 is the other (MEM:BLK). We must do the comparison, and then
3702 convert the flags to a signed integer result. */
3704 m32c_expand_cmpstr(rtx
*operands
)
3708 src1a
= XEXP (operands
[1], 0);
3709 src2a
= XEXP (operands
[2], 0);
3711 if (GET_CODE (src1a
) != REG
3712 || REGNO (src1a
) < FIRST_PSEUDO_REGISTER
)
3713 src1a
= copy_to_mode_reg (Pmode
, src1a
);
3715 if (GET_CODE (src2a
) != REG
3716 || REGNO (src2a
) < FIRST_PSEUDO_REGISTER
)
3717 src2a
= copy_to_mode_reg (Pmode
, src2a
);
3719 emit_insn (gen_cmpstrhi_op (src1a
, src2a
, src1a
, src2a
));
3720 emit_insn (gen_cond_to_int (operands
[0]));
3726 typedef rtx (*shift_gen_func
)(rtx
, rtx
, rtx
);
3728 static shift_gen_func
3729 shift_gen_func_for (int mode
, int code
)
3731 #define GFF(m,c,f) if (mode == m && code == c) return f
3732 GFF(QImode
, ASHIFT
, gen_ashlqi3_i
);
3733 GFF(QImode
, ASHIFTRT
, gen_ashrqi3_i
);
3734 GFF(QImode
, LSHIFTRT
, gen_lshrqi3_i
);
3735 GFF(HImode
, ASHIFT
, gen_ashlhi3_i
);
3736 GFF(HImode
, ASHIFTRT
, gen_ashrhi3_i
);
3737 GFF(HImode
, LSHIFTRT
, gen_lshrhi3_i
);
3738 GFF(PSImode
, ASHIFT
, gen_ashlpsi3_i
);
3739 GFF(PSImode
, ASHIFTRT
, gen_ashrpsi3_i
);
3740 GFF(PSImode
, LSHIFTRT
, gen_lshrpsi3_i
);
3741 GFF(SImode
, ASHIFT
, TARGET_A16
? gen_ashlsi3_16
: gen_ashlsi3_24
);
3742 GFF(SImode
, ASHIFTRT
, TARGET_A16
? gen_ashrsi3_16
: gen_ashrsi3_24
);
3743 GFF(SImode
, LSHIFTRT
, TARGET_A16
? gen_lshrsi3_16
: gen_lshrsi3_24
);
3748 /* The m32c only has one shift, but it takes a signed count. GCC
3749 doesn't want this, so we fake it by negating any shift count when
3750 we're pretending to shift the other way. Also, the shift count is
3751 limited to -8..8. It's slightly better to use two shifts for 9..15
3752 than to load the count into r1h, so we do that too. */
3754 m32c_prepare_shift (rtx
* operands
, int scale
, int shift_code
)
3756 machine_mode mode
= GET_MODE (operands
[0]);
3757 shift_gen_func func
= shift_gen_func_for (mode
, shift_code
);
3760 if (GET_CODE (operands
[2]) == CONST_INT
)
3762 int maxc
= TARGET_A24
&& (mode
== PSImode
|| mode
== SImode
) ? 32 : 8;
3763 int count
= INTVAL (operands
[2]) * scale
;
3765 while (count
> maxc
)
3767 temp
= gen_reg_rtx (mode
);
3768 emit_insn (func (temp
, operands
[1], GEN_INT (maxc
)));
3772 while (count
< -maxc
)
3774 temp
= gen_reg_rtx (mode
);
3775 emit_insn (func (temp
, operands
[1], GEN_INT (-maxc
)));
3779 emit_insn (func (operands
[0], operands
[1], GEN_INT (count
)));
3783 temp
= gen_reg_rtx (QImode
);
3785 /* The pattern has a NEG that corresponds to this. */
3786 emit_move_insn (temp
, gen_rtx_NEG (QImode
, operands
[2]));
3787 else if (TARGET_A16
&& mode
== SImode
)
3788 /* We do this because the code below may modify this, we don't
3789 want to modify the origin of this value. */
3790 emit_move_insn (temp
, operands
[2]);
3792 /* We'll only use it for the shift, no point emitting a move. */
3795 if (TARGET_A16
&& GET_MODE_SIZE (mode
) == 4)
3797 /* The m16c has a limit of -16..16 for SI shifts, even when the
3798 shift count is in a register. Since there are so many targets
3799 of these shifts, it's better to expand the RTL here than to
3800 call a helper function.
3802 The resulting code looks something like this:
3814 We take advantage of the fact that "negative" shifts are
3815 undefined to skip one of the comparisons. */
3821 emit_move_insn (operands
[0], operands
[1]);
3824 label
= gen_label_rtx ();
3825 LABEL_NUSES (label
) ++;
3827 tempvar
= gen_reg_rtx (mode
);
3829 if (shift_code
== ASHIFT
)
3831 /* This is a left shift. We only need check positive counts. */
3832 emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode
, 0, 0),
3833 count
, GEN_INT (16), label
));
3834 emit_insn (func (tempvar
, operands
[0], GEN_INT (8)));
3835 emit_insn (func (operands
[0], tempvar
, GEN_INT (8)));
3836 insn
= emit_insn (gen_addqi3 (count
, count
, GEN_INT (-16)));
3837 emit_label_after (label
, insn
);
3841 /* This is a right shift. We only need check negative counts. */
3842 emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode
, 0, 0),
3843 count
, GEN_INT (-16), label
));
3844 emit_insn (func (tempvar
, operands
[0], GEN_INT (-8)));
3845 emit_insn (func (operands
[0], tempvar
, GEN_INT (-8)));
3846 insn
= emit_insn (gen_addqi3 (count
, count
, GEN_INT (16)));
3847 emit_label_after (label
, insn
);
3849 operands
[1] = operands
[0];
3850 emit_insn (func (operands
[0], operands
[0], count
));
3858 /* The m32c has a limited range of operations that work on PSImode
3859 values; we have to expand to SI, do the math, and truncate back to
3860 PSI. Yes, this is expensive, but hopefully gcc will learn to avoid
3863 m32c_expand_neg_mulpsi3 (rtx
* operands
)
3865 /* operands: a = b * i */
3866 rtx temp1
; /* b as SI */
3867 rtx scale
/* i as SI */;
3868 rtx temp2
; /* a*b as SI */
3870 temp1
= gen_reg_rtx (SImode
);
3871 temp2
= gen_reg_rtx (SImode
);
3872 if (GET_CODE (operands
[2]) != CONST_INT
)
3874 scale
= gen_reg_rtx (SImode
);
3875 emit_insn (gen_zero_extendpsisi2 (scale
, operands
[2]));
3878 scale
= copy_to_mode_reg (SImode
, operands
[2]);
3880 emit_insn (gen_zero_extendpsisi2 (temp1
, operands
[1]));
3881 temp2
= expand_simple_binop (SImode
, MULT
, temp1
, scale
, temp2
, 1, OPTAB_LIB
);
3882 emit_insn (gen_truncsipsi2 (operands
[0], temp2
));
3885 /* Pattern Output Functions */
3888 m32c_expand_movcc (rtx
*operands
)
3890 rtx rel
= operands
[1];
3892 if (GET_CODE (rel
) != EQ
&& GET_CODE (rel
) != NE
)
3894 if (GET_CODE (operands
[2]) != CONST_INT
3895 || GET_CODE (operands
[3]) != CONST_INT
)
3897 if (GET_CODE (rel
) == NE
)
3899 rtx tmp
= operands
[2];
3900 operands
[2] = operands
[3];
3902 rel
= gen_rtx_EQ (GET_MODE (rel
), XEXP (rel
, 0), XEXP (rel
, 1));
3905 emit_move_insn (operands
[0],
3906 gen_rtx_IF_THEN_ELSE (GET_MODE (operands
[0]),
3913 /* Used for the "insv" pattern. Return nonzero to fail, else done. */
3915 m32c_expand_insv (rtx
*operands
)
3920 if (INTVAL (operands
[1]) != 1)
3923 /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
3924 if (GET_CODE (operands
[3]) != CONST_INT
)
3926 if (INTVAL (operands
[3]) != 0
3927 && INTVAL (operands
[3]) != 1
3928 && INTVAL (operands
[3]) != -1)
3931 mask
= 1 << INTVAL (operands
[2]);
3934 if (GET_CODE (op0
) == SUBREG
3935 && SUBREG_BYTE (op0
) == 0)
3937 rtx sub
= SUBREG_REG (op0
);
3938 if (GET_MODE (sub
) == HImode
|| GET_MODE (sub
) == QImode
)
3942 if (!can_create_pseudo_p ()
3943 || (GET_CODE (op0
) == MEM
&& MEM_VOLATILE_P (op0
)))
3947 src0
= gen_reg_rtx (GET_MODE (op0
));
3948 emit_move_insn (src0
, op0
);
3951 if (GET_MODE (op0
) == HImode
3952 && INTVAL (operands
[2]) >= 8
3953 && GET_CODE (op0
) == MEM
)
3955 /* We are little endian. */
3956 rtx new_mem
= gen_rtx_MEM (QImode
, plus_constant (Pmode
,
3958 MEM_COPY_ATTRIBUTES (new_mem
, op0
);
3962 /* First, we generate a mask with the correct polarity. If we are
3963 storing a zero, we want an AND mask, so invert it. */
3964 if (INTVAL (operands
[3]) == 0)
3966 /* Storing a zero, use an AND mask */
3967 if (GET_MODE (op0
) == HImode
)
3972 /* Now we need to properly sign-extend the mask in case we need to
3973 fall back to an AND or OR opcode. */
3974 if (GET_MODE (op0
) == HImode
)
3985 switch ( (INTVAL (operands
[3]) ? 4 : 0)
3986 + ((GET_MODE (op0
) == HImode
) ? 2 : 0)
3987 + (TARGET_A24
? 1 : 0))
3989 case 0: p
= gen_andqi3_16 (op0
, src0
, GEN_INT (mask
)); break;
3990 case 1: p
= gen_andqi3_24 (op0
, src0
, GEN_INT (mask
)); break;
3991 case 2: p
= gen_andhi3_16 (op0
, src0
, GEN_INT (mask
)); break;
3992 case 3: p
= gen_andhi3_24 (op0
, src0
, GEN_INT (mask
)); break;
3993 case 4: p
= gen_iorqi3_16 (op0
, src0
, GEN_INT (mask
)); break;
3994 case 5: p
= gen_iorqi3_24 (op0
, src0
, GEN_INT (mask
)); break;
3995 case 6: p
= gen_iorhi3_16 (op0
, src0
, GEN_INT (mask
)); break;
3996 case 7: p
= gen_iorhi3_24 (op0
, src0
, GEN_INT (mask
)); break;
3997 default: p
= NULL_RTX
; break; /* Not reached, but silences a warning. */
4005 m32c_scc_pattern(rtx
*operands
, RTX_CODE code
)
4007 static char buf
[30];
4008 if (GET_CODE (operands
[0]) == REG
4009 && REGNO (operands
[0]) == R0_REGNO
)
4012 return "stzx\t#1,#0,r0l";
4014 return "stzx\t#0,#1,r0l";
4016 sprintf(buf
, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code
));
4020 /* Encode symbol attributes of a SYMBOL_REF into its
4021 SYMBOL_REF_FLAGS. */
4023 m32c_encode_section_info (tree decl
, rtx rtl
, int first
)
4025 int extra_flags
= 0;
4027 default_encode_section_info (decl
, rtl
, first
);
4028 if (TREE_CODE (decl
) == FUNCTION_DECL
4029 && m32c_special_page_vector_p (decl
))
4031 extra_flags
= SYMBOL_FLAG_FUNCVEC_FUNCTION
;
4034 SYMBOL_REF_FLAGS (XEXP (rtl
, 0)) |= extra_flags
;
4037 /* Returns TRUE if the current function is a leaf, and thus we can
4038 determine which registers an interrupt function really needs to
4039 save. The logic below is mostly about finding the insn sequence
4040 that's the function, versus any sequence that might be open for the
4043 m32c_leaf_function_p (void)
4045 rtx_insn
*saved_first
, *saved_last
;
4046 struct sequence_stack
*seq
;
4049 saved_first
= crtl
->emit
.x_first_insn
;
4050 saved_last
= crtl
->emit
.x_last_insn
;
4051 for (seq
= crtl
->emit
.sequence_stack
; seq
&& seq
->next
; seq
= seq
->next
)
4055 crtl
->emit
.x_first_insn
= seq
->first
;
4056 crtl
->emit
.x_last_insn
= seq
->last
;
4059 rv
= leaf_function_p ();
4061 crtl
->emit
.x_first_insn
= saved_first
;
4062 crtl
->emit
.x_last_insn
= saved_last
;
4066 /* Returns TRUE if the current function needs to use the ENTER/EXIT
4067 opcodes. If the function doesn't need the frame base or stack
4068 pointer, it can use the simpler RTS opcode. */
4070 m32c_function_needs_enter (void)
4073 struct sequence_stack
*seq
;
4074 rtx sp
= gen_rtx_REG (Pmode
, SP_REGNO
);
4075 rtx fb
= gen_rtx_REG (Pmode
, FB_REGNO
);
4077 insn
= get_insns ();
4078 for (seq
= crtl
->emit
.sequence_stack
;
4080 insn
= seq
->first
, seq
= seq
->next
);
4084 if (reg_mentioned_p (sp
, insn
))
4086 if (reg_mentioned_p (fb
, insn
))
4088 insn
= NEXT_INSN (insn
);
4093 /* Mark all the subexpressions of the PARALLEL rtx PAR as
4094 frame-related. Return PAR.
4096 dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
4097 PARALLEL rtx other than the first if they do not have the
4098 FRAME_RELATED flag set on them. So this function is handy for
4099 marking up 'enter' instructions. */
4101 m32c_all_frame_related (rtx par
)
4103 int len
= XVECLEN (par
, 0);
4106 for (i
= 0; i
< len
; i
++)
4107 F (XVECEXP (par
, 0, i
));
4112 /* Emits the prologue. See the frame layout comment earlier in this
4113 file. We can reserve up to 256 bytes with the ENTER opcode, beyond
4114 that we manually update sp. */
4116 m32c_emit_prologue (void)
4118 int frame_size
, extra_frame_size
= 0, reg_save_size
;
4119 int complex_prologue
= 0;
4121 cfun
->machine
->is_leaf
= m32c_leaf_function_p ();
4122 if (interrupt_p (cfun
->decl
))
4124 cfun
->machine
->is_interrupt
= 1;
4125 complex_prologue
= 1;
4127 else if (bank_switch_p (cfun
->decl
))
4128 warning (OPT_Wattributes
,
4129 "%<bank_switch%> has no effect on non-interrupt functions");
4131 reg_save_size
= m32c_pushm_popm (PP_justcount
);
4133 if (interrupt_p (cfun
->decl
))
4135 if (bank_switch_p (cfun
->decl
))
4136 emit_insn (gen_fset_b ());
4137 else if (cfun
->machine
->intr_pushm
)
4138 emit_insn (gen_pushm (GEN_INT (cfun
->machine
->intr_pushm
)));
4142 m32c_initial_elimination_offset (FB_REGNO
, SP_REGNO
) - reg_save_size
;
4144 && !m32c_function_needs_enter ())
4145 cfun
->machine
->use_rts
= 1;
4147 if (frame_size
> 254)
4149 extra_frame_size
= frame_size
- 254;
4152 if (cfun
->machine
->use_rts
== 0)
4153 F (emit_insn (m32c_all_frame_related
4155 ? gen_prologue_enter_16 (GEN_INT (frame_size
+ 2))
4156 : gen_prologue_enter_24 (GEN_INT (frame_size
+ 4)))));
4158 if (extra_frame_size
)
4160 complex_prologue
= 1;
4162 F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode
, SP_REGNO
),
4163 gen_rtx_REG (HImode
, SP_REGNO
),
4164 GEN_INT (-extra_frame_size
))));
4166 F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode
, SP_REGNO
),
4167 gen_rtx_REG (PSImode
, SP_REGNO
),
4168 GEN_INT (-extra_frame_size
))));
4171 complex_prologue
+= m32c_pushm_popm (PP_pushm
);
4173 /* This just emits a comment into the .s file for debugging. */
4174 if (complex_prologue
)
4175 emit_insn (gen_prologue_end ());
4178 /* Likewise, for the epilogue. The only exception is that, for
4179 interrupts, we must manually unwind the frame as the REIT opcode
4182 m32c_emit_epilogue (void)
4184 int popm_count
= m32c_pushm_popm (PP_justcount
);
4186 /* This just emits a comment into the .s file for debugging. */
4187 if (popm_count
> 0 || cfun
->machine
->is_interrupt
)
4188 emit_insn (gen_epilogue_start ());
4191 m32c_pushm_popm (PP_popm
);
4193 if (cfun
->machine
->is_interrupt
)
4195 machine_mode spmode
= TARGET_A16
? HImode
: PSImode
;
4197 /* REIT clears B flag and restores $fp for us, but we still
4198 have to fix up the stack. USE_RTS just means we didn't
4200 if (!cfun
->machine
->use_rts
)
4202 emit_move_insn (gen_rtx_REG (spmode
, A0_REGNO
),
4203 gen_rtx_REG (spmode
, FP_REGNO
));
4204 emit_move_insn (gen_rtx_REG (spmode
, SP_REGNO
),
4205 gen_rtx_REG (spmode
, A0_REGNO
));
4206 /* We can't just add this to the POPM because it would be in
4207 the wrong order, and wouldn't fix the stack if we're bank
4210 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode
, FP_REGNO
)));
4212 emit_insn (gen_poppsi (gen_rtx_REG (PSImode
, FP_REGNO
)));
4214 if (!bank_switch_p (cfun
->decl
) && cfun
->machine
->intr_pushm
)
4215 emit_insn (gen_popm (GEN_INT (cfun
->machine
->intr_pushm
)));
4217 /* The FREIT (Fast REturn from InTerrupt) instruction should be
4218 generated only for M32C/M32CM targets (generate the REIT
4219 instruction otherwise). */
4220 if (fast_interrupt_p (cfun
->decl
))
4222 /* Check if fast_attribute is set for M32C or M32CM. */
4225 emit_jump_insn (gen_epilogue_freit ());
4227 /* If fast_interrupt attribute is set for an R8C or M16C
4228 target ignore this attribute and generated REIT
4232 warning (OPT_Wattributes
,
4233 "%<fast_interrupt%> attribute directive ignored");
4234 emit_jump_insn (gen_epilogue_reit_16 ());
4237 else if (TARGET_A16
)
4238 emit_jump_insn (gen_epilogue_reit_16 ());
4240 emit_jump_insn (gen_epilogue_reit_24 ());
4242 else if (cfun
->machine
->use_rts
)
4243 emit_jump_insn (gen_epilogue_rts ());
4244 else if (TARGET_A16
)
4245 emit_jump_insn (gen_epilogue_exitd_16 ());
4247 emit_jump_insn (gen_epilogue_exitd_24 ());
4251 m32c_emit_eh_epilogue (rtx ret_addr
)
4253 /* R0[R2] has the stack adjustment. R1[R3] has the address to
4254 return to. We have to fudge the stack, pop everything, pop SP
4255 (fudged), and return (fudged). This is actually easier to do in
4256 assembler, so punt to libgcc. */
4257 emit_jump_insn (gen_eh_epilogue (ret_addr
, cfun
->machine
->eh_stack_adjust
));
4258 /* emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
4261 /* Indicate which flags must be properly set for a given conditional. */
4263 flags_needed_for_conditional (rtx cond
)
4265 switch (GET_CODE (cond
))
4289 /* Returns true if a compare insn is redundant because it would only
4290 set flags that are already set correctly. */
4292 m32c_compare_redundant (rtx_insn
*cmp
, rtx
*operands
)
4307 fprintf(stderr
, "\n\033[32mm32c_compare_redundant\033[0m\n");
4311 fprintf(stderr
, "operands[%d] = ", i
);
4312 debug_rtx(operands
[i
]);
4316 next
= next_nonnote_insn (cmp
);
4317 if (!next
|| !INSN_P (next
))
4320 fprintf(stderr
, "compare not followed by insn\n");
4325 if (GET_CODE (PATTERN (next
)) == SET
4326 && GET_CODE (XEXP ( PATTERN (next
), 1)) == IF_THEN_ELSE
)
4328 next
= XEXP (XEXP (PATTERN (next
), 1), 0);
4330 else if (GET_CODE (PATTERN (next
)) == SET
)
4332 /* If this is a conditional, flags_needed will be something
4333 other than FLAGS_N, which we test below. */
4334 next
= XEXP (PATTERN (next
), 1);
4339 fprintf(stderr
, "compare not followed by conditional\n");
4345 fprintf(stderr
, "conditional is: ");
4349 flags_needed
= flags_needed_for_conditional (next
);
4350 if (flags_needed
== FLAGS_N
)
4353 fprintf(stderr
, "compare not followed by conditional\n");
4359 /* Compare doesn't set overflow and carry the same way that
4360 arithmetic instructions do, so we can't replace those. */
4361 if (flags_needed
& FLAGS_OC
)
4366 prev
= prev_nonnote_insn (prev
);
4370 fprintf(stderr
, "No previous insn.\n");
4377 fprintf(stderr
, "Previous insn is a non-insn.\n");
4381 pp
= PATTERN (prev
);
4382 if (GET_CODE (pp
) != SET
)
4385 fprintf(stderr
, "Previous insn is not a SET.\n");
4389 pflags
= get_attr_flags (prev
);
4391 /* Looking up attributes of previous insns corrupted the recog
4393 INSN_UID (cmp
) = -1;
4394 recog (PATTERN (cmp
), cmp
, 0);
4396 if (pflags
== FLAGS_N
4397 && reg_mentioned_p (op0
, pp
))
4400 fprintf(stderr
, "intermediate non-flags insn uses op:\n");
4406 /* Check for comparisons against memory - between volatiles and
4407 aliases, we just can't risk this one. */
4408 if (GET_CODE (operands
[0]) == MEM
4409 || GET_CODE (operands
[0]) == MEM
)
4412 fprintf(stderr
, "comparisons with memory:\n");
4418 /* Check for PREV changing a register that's used to compute a
4419 value in CMP, even if it doesn't otherwise change flags. */
4420 if (GET_CODE (operands
[0]) == REG
4421 && rtx_referenced_p (SET_DEST (PATTERN (prev
)), operands
[0]))
4424 fprintf(stderr
, "sub-value affected, op0:\n");
4429 if (GET_CODE (operands
[1]) == REG
4430 && rtx_referenced_p (SET_DEST (PATTERN (prev
)), operands
[1]))
4433 fprintf(stderr
, "sub-value affected, op1:\n");
4439 } while (pflags
== FLAGS_N
);
4441 fprintf(stderr
, "previous flag-setting insn:\n");
4446 if (GET_CODE (pp
) == SET
4447 && GET_CODE (XEXP (pp
, 0)) == REG
4448 && REGNO (XEXP (pp
, 0)) == FLG_REGNO
4449 && GET_CODE (XEXP (pp
, 1)) == COMPARE
)
4451 /* Adjacent cbranches must have the same operands to be
4453 rtx pop0
= XEXP (XEXP (pp
, 1), 0);
4454 rtx pop1
= XEXP (XEXP (pp
, 1), 1);
4456 fprintf(stderr
, "adjacent cbranches\n");
4460 if (rtx_equal_p (op0
, pop0
)
4461 && rtx_equal_p (op1
, pop1
))
4464 fprintf(stderr
, "prev cmp not same\n");
4469 /* Else the previous insn must be a SET, with either the source or
4470 dest equal to operands[0], and operands[1] must be zero. */
4472 if (!rtx_equal_p (op1
, const0_rtx
))
4475 fprintf(stderr
, "operands[1] not const0_rtx\n");
4479 if (GET_CODE (pp
) != SET
)
4482 fprintf (stderr
, "pp not set\n");
4486 if (!rtx_equal_p (op0
, SET_SRC (pp
))
4487 && !rtx_equal_p (op0
, SET_DEST (pp
)))
4490 fprintf(stderr
, "operands[0] not found in set\n");
4496 fprintf(stderr
, "cmp flags %x prev flags %x\n", flags_needed
, pflags
);
4498 if ((pflags
& flags_needed
) == flags_needed
)
4504 /* Return the pattern for a compare. This will be commented out if
4505 the compare is redundant, else a normal pattern is returned. Thus,
4506 the assembler output says where the compare would have been. */
4508 m32c_output_compare (rtx_insn
*insn
, rtx
*operands
)
4510 static char templ
[] = ";cmp.b\t%1,%0";
4513 templ
[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands
[0]))];
4514 if (m32c_compare_redundant (insn
, operands
))
4517 fprintf(stderr
, "cbranch: cmp not needed\n");
4523 fprintf(stderr
, "cbranch: cmp needed: `%s'\n", templ
+ 1);
4528 #undef TARGET_ENCODE_SECTION_INFO
4529 #define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4531 /* If the frame pointer isn't used, we detect it manually. But the
4532 stack pointer doesn't have as flexible addressing as the frame
4533 pointer, so we always assume we have it. */
4535 #undef TARGET_FRAME_POINTER_REQUIRED
4536 #define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
4538 /* The Global `targetm' Variable. */
4540 struct gcc_target targetm
= TARGET_INITIALIZER
;
4542 #include "gt-m32c.h"