1 /* Target Code for R8C/M16C/M32C
2 Copyright (C) 2005-2013 Free Software Foundation, Inc.
3 Contributed by Red Hat.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-flags.h"
32 #include "insn-attr.h"
36 #include "diagnostic-core.h"
39 #include "stor-layout.h"
48 #include "target-def.h"
50 #include "langhooks.h"
53 #include "tm-constrs.h"
57 /* Used by m32c_pushm_popm. */
65 static bool m32c_function_needs_enter (void);
66 static tree
interrupt_handler (tree
*, tree
, tree
, int, bool *);
67 static tree
function_vector_handler (tree
*, tree
, tree
, int, bool *);
68 static int interrupt_p (tree node
);
69 static int bank_switch_p (tree node
);
70 static int fast_interrupt_p (tree node
);
71 static int interrupt_p (tree node
);
72 static bool m32c_asm_integer (rtx
, unsigned int, int);
73 static int m32c_comp_type_attributes (const_tree
, const_tree
);
74 static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
75 static struct machine_function
*m32c_init_machine_status (void);
76 static void m32c_insert_attributes (tree
, tree
*);
77 static bool m32c_legitimate_address_p (enum machine_mode
, rtx
, bool);
78 static bool m32c_addr_space_legitimate_address_p (enum machine_mode
, rtx
, bool, addr_space_t
);
79 static rtx
m32c_function_arg (cumulative_args_t
, enum machine_mode
,
81 static bool m32c_pass_by_reference (cumulative_args_t
, enum machine_mode
,
83 static void m32c_function_arg_advance (cumulative_args_t
, enum machine_mode
,
85 static unsigned int m32c_function_arg_boundary (enum machine_mode
, const_tree
);
86 static int m32c_pushm_popm (Push_Pop_Type
);
87 static bool m32c_strict_argument_naming (cumulative_args_t
);
88 static rtx
m32c_struct_value_rtx (tree
, int);
89 static rtx
m32c_subreg (enum machine_mode
, rtx
, enum machine_mode
, int);
90 static int need_to_save (int);
91 static rtx
m32c_function_value (const_tree
, const_tree
, bool);
92 static rtx
m32c_libcall_value (enum machine_mode
, const_rtx
);
94 /* Returns true if an address is specified, else false. */
95 static bool m32c_get_pragma_address (const char *varname
, unsigned *addr
);
97 #define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
99 #define streq(a,b) (strcmp ((a), (b)) == 0)
101 /* Internal support routines */
103 /* Debugging statements are tagged with DEBUG0 only so that they can
104 be easily enabled individually, by replacing the '0' with '1' as
110 /* This is needed by some of the commented-out debug statements
112 static char const *class_names
[LIM_REG_CLASSES
] = REG_CLASS_NAMES
;
114 static int class_contents
[LIM_REG_CLASSES
][1] = REG_CLASS_CONTENTS
;
116 /* These are all to support encode_pattern(). */
117 static char pattern
[30], *patternp
;
118 static GTY(()) rtx patternr
[30];
119 #define RTX_IS(x) (streq (pattern, x))
121 /* Some macros to simplify the logic throughout this file. */
122 #define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
123 #define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
125 #define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
126 #define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
129 far_addr_space_p (rtx x
)
131 if (GET_CODE (x
) != MEM
)
134 fprintf(stderr
, "\033[35mfar_addr_space: "); debug_rtx(x
);
135 fprintf(stderr
, " = %d\033[0m\n", MEM_ADDR_SPACE (x
) == ADDR_SPACE_FAR
);
137 return MEM_ADDR_SPACE (x
) == ADDR_SPACE_FAR
;
140 /* We do most RTX matching by converting the RTX into a string, and
141 using string compares. This vastly simplifies the logic in many of
142 the functions in this file.
144 On exit, pattern[] has the encoded string (use RTX_IS("...") to
145 compare it) and patternr[] has pointers to the nodes in the RTX
146 corresponding to each character in the encoded string. The latter
147 is mostly used by print_operand().
149 Unrecognized patterns have '?' in them; this shows up when the
150 assembler complains about syntax errors.
154 encode_pattern_1 (rtx x
)
158 if (patternp
== pattern
+ sizeof (pattern
) - 2)
164 patternr
[patternp
- pattern
] = x
;
166 switch (GET_CODE (x
))
172 if (GET_MODE_SIZE (GET_MODE (x
)) !=
173 GET_MODE_SIZE (GET_MODE (XEXP (x
, 0))))
175 encode_pattern_1 (XEXP (x
, 0));
180 encode_pattern_1 (XEXP (x
, 0));
185 encode_pattern_1 (XEXP (x
, 0));
190 encode_pattern_1 (XEXP (x
, 0));
194 encode_pattern_1 (XEXP (x
, 0));
195 encode_pattern_1 (XEXP (x
, 1));
199 encode_pattern_1 (XEXP (x
, 0));
203 encode_pattern_1 (XEXP (x
, 0));
207 encode_pattern_1 (XEXP (x
, 0));
208 encode_pattern_1 (XEXP (x
, 1));
212 encode_pattern_1 (XEXP (x
, 0));
229 *patternp
++ = '0' + XCINT (x
, 1, UNSPEC
);
230 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
231 encode_pattern_1 (XVECEXP (x
, 0, i
));
238 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
239 encode_pattern_1 (XVECEXP (x
, 0, i
));
243 encode_pattern_1 (XEXP (x
, 0));
245 encode_pattern_1 (XEXP (x
, 1));
250 fprintf (stderr
, "can't encode pattern %s\n",
251 GET_RTX_NAME (GET_CODE (x
)));
260 encode_pattern (rtx x
)
263 encode_pattern_1 (x
);
267 /* Since register names indicate the mode they're used in, we need a
268 way to determine which name to refer to the register with. Called
269 by print_operand(). */
272 reg_name_with_mode (int regno
, enum machine_mode mode
)
274 int mlen
= GET_MODE_SIZE (mode
);
275 if (regno
== R0_REGNO
&& mlen
== 1)
277 if (regno
== R0_REGNO
&& (mlen
== 3 || mlen
== 4))
279 if (regno
== R0_REGNO
&& mlen
== 6)
281 if (regno
== R0_REGNO
&& mlen
== 8)
283 if (regno
== R1_REGNO
&& mlen
== 1)
285 if (regno
== R1_REGNO
&& (mlen
== 3 || mlen
== 4))
287 if (regno
== A0_REGNO
&& TARGET_A16
&& (mlen
== 3 || mlen
== 4))
289 return reg_names
[regno
];
292 /* How many bytes a register uses on stack when it's pushed. We need
293 to know this because the push opcode needs to explicitly indicate
294 the size of the register, even though the name of the register
295 already tells it that. Used by m32c_output_reg_{push,pop}, which
296 is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}. */
299 reg_push_size (int regno
)
324 /* Given two register classes, find the largest intersection between
325 them. If there is no intersection, return RETURNED_IF_EMPTY
328 reduce_class (reg_class_t original_class
, reg_class_t limiting_class
,
329 reg_class_t returned_if_empty
)
333 reg_class_t best
= NO_REGS
;
334 unsigned int best_size
= 0;
336 if (original_class
== limiting_class
)
337 return original_class
;
339 cc
= reg_class_contents
[original_class
];
340 AND_HARD_REG_SET (cc
, reg_class_contents
[limiting_class
]);
342 for (i
= 0; i
< LIM_REG_CLASSES
; i
++)
344 if (hard_reg_set_subset_p (reg_class_contents
[i
], cc
))
345 if (best_size
< reg_class_size
[i
])
347 best
= (reg_class_t
) i
;
348 best_size
= reg_class_size
[i
];
353 return returned_if_empty
;
357 /* Used by m32c_register_move_cost to determine if a move is
358 impossibly expensive. */
360 class_can_hold_mode (reg_class_t rclass
, enum machine_mode mode
)
362 /* Cache the results: 0=untested 1=no 2=yes */
363 static char results
[LIM_REG_CLASSES
][MAX_MACHINE_MODE
];
365 if (results
[(int) rclass
][mode
] == 0)
368 results
[rclass
][mode
] = 1;
369 for (r
= 0; r
< FIRST_PSEUDO_REGISTER
; r
++)
370 if (in_hard_reg_set_p (reg_class_contents
[(int) rclass
], mode
, r
)
371 && HARD_REGNO_MODE_OK (r
, mode
))
373 results
[rclass
][mode
] = 2;
379 fprintf (stderr
, "class %s can hold %s? %s\n",
380 class_names
[(int) rclass
], mode_name
[mode
],
381 (results
[rclass
][mode
] == 2) ? "yes" : "no");
383 return results
[(int) rclass
][mode
] == 2;
386 /* Run-time Target Specification. */
388 /* Memregs are memory locations that gcc treats like general
389 registers, as there are a limited number of true registers and the
390 m32c families can use memory in most places that registers can be
393 However, since memory accesses are more expensive than registers,
394 we allow the user to limit the number of memregs available, in
395 order to try to persuade gcc to try harder to use real registers.
397 Memregs are provided by lib1funcs.S.
400 int ok_to_change_target_memregs
= TRUE
;
402 /* Implements TARGET_OPTION_OVERRIDE. */
404 #undef TARGET_OPTION_OVERRIDE
405 #define TARGET_OPTION_OVERRIDE m32c_option_override
408 m32c_option_override (void)
410 /* We limit memregs to 0..16, and provide a default. */
411 if (global_options_set
.x_target_memregs
)
413 if (target_memregs
< 0 || target_memregs
> 16)
414 error ("invalid target memregs value '%d'", target_memregs
);
422 /* This target defaults to strict volatile bitfields. */
423 if (flag_strict_volatile_bitfields
< 0 && abi_version_at_least(2))
424 flag_strict_volatile_bitfields
= 1;
426 /* r8c/m16c have no 16-bit indirect call, so thunks are involved.
427 This is always worse than an absolute call. */
429 flag_no_function_cse
= 1;
431 /* This wants to put insns between compares and their jumps. */
432 /* FIXME: The right solution is to properly trace the flags register
433 values, but that is too much work for stage 4. */
434 flag_combine_stack_adjustments
= 0;
437 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
438 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m32c_override_options_after_change
441 m32c_override_options_after_change (void)
444 flag_no_function_cse
= 1;
447 /* Defining data structures for per-function information */
449 /* The usual; we set up our machine_function data. */
450 static struct machine_function
*
451 m32c_init_machine_status (void)
453 return ggc_alloc_cleared_machine_function ();
456 /* Implements INIT_EXPANDERS. We just set up to call the above
459 m32c_init_expanders (void)
461 init_machine_status
= m32c_init_machine_status
;
466 /* Register Basics */
468 /* Basic Characteristics of Registers */
470 /* Whether a mode fits in a register is complex enough to warrant a
479 } nregs_table
[FIRST_PSEUDO_REGISTER
] =
481 { 1, 1, 2, 2, 4 }, /* r0 */
482 { 0, 1, 0, 0, 0 }, /* r2 */
483 { 1, 1, 2, 2, 0 }, /* r1 */
484 { 0, 1, 0, 0, 0 }, /* r3 */
485 { 0, 1, 1, 0, 0 }, /* a0 */
486 { 0, 1, 1, 0, 0 }, /* a1 */
487 { 0, 1, 1, 0, 0 }, /* sb */
488 { 0, 1, 1, 0, 0 }, /* fb */
489 { 0, 1, 1, 0, 0 }, /* sp */
490 { 1, 1, 1, 0, 0 }, /* pc */
491 { 0, 0, 0, 0, 0 }, /* fl */
492 { 1, 1, 1, 0, 0 }, /* ap */
493 { 1, 1, 2, 2, 4 }, /* mem0 */
494 { 1, 1, 2, 2, 4 }, /* mem1 */
495 { 1, 1, 2, 2, 4 }, /* mem2 */
496 { 1, 1, 2, 2, 4 }, /* mem3 */
497 { 1, 1, 2, 2, 4 }, /* mem4 */
498 { 1, 1, 2, 2, 0 }, /* mem5 */
499 { 1, 1, 2, 2, 0 }, /* mem6 */
500 { 1, 1, 0, 0, 0 }, /* mem7 */
503 /* Implements TARGET_CONDITIONAL_REGISTER_USAGE. We adjust the number
504 of available memregs, and select which registers need to be preserved
505 across calls based on the chip family. */
507 #undef TARGET_CONDITIONAL_REGISTER_USAGE
508 #define TARGET_CONDITIONAL_REGISTER_USAGE m32c_conditional_register_usage
510 m32c_conditional_register_usage (void)
514 if (0 <= target_memregs
&& target_memregs
<= 16)
516 /* The command line option is bytes, but our "registers" are
518 for (i
= (target_memregs
+1)/2; i
< 8; i
++)
520 fixed_regs
[MEM0_REGNO
+ i
] = 1;
521 CLEAR_HARD_REG_BIT (reg_class_contents
[MEM_REGS
], MEM0_REGNO
+ i
);
525 /* M32CM and M32C preserve more registers across function calls. */
528 call_used_regs
[R1_REGNO
] = 0;
529 call_used_regs
[R2_REGNO
] = 0;
530 call_used_regs
[R3_REGNO
] = 0;
531 call_used_regs
[A0_REGNO
] = 0;
532 call_used_regs
[A1_REGNO
] = 0;
536 /* How Values Fit in Registers */
538 /* Implements HARD_REGNO_NREGS. This is complicated by the fact that
539 different registers are different sizes from each other, *and* may
540 be different sizes in different chip families. */
542 m32c_hard_regno_nregs_1 (int regno
, enum machine_mode mode
)
544 if (regno
== FLG_REGNO
&& mode
== CCmode
)
546 if (regno
>= FIRST_PSEUDO_REGISTER
)
547 return ((GET_MODE_SIZE (mode
) + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
);
549 if (regno
>= MEM0_REGNO
&& regno
<= MEM7_REGNO
)
550 return (GET_MODE_SIZE (mode
) + 1) / 2;
552 if (GET_MODE_SIZE (mode
) <= 1)
553 return nregs_table
[regno
].qi_regs
;
554 if (GET_MODE_SIZE (mode
) <= 2)
555 return nregs_table
[regno
].hi_regs
;
556 if (regno
== A0_REGNO
&& mode
== SImode
&& TARGET_A16
)
558 if ((GET_MODE_SIZE (mode
) <= 3 || mode
== PSImode
) && TARGET_A24
)
559 return nregs_table
[regno
].pi_regs
;
560 if (GET_MODE_SIZE (mode
) <= 4)
561 return nregs_table
[regno
].si_regs
;
562 if (GET_MODE_SIZE (mode
) <= 8)
563 return nregs_table
[regno
].di_regs
;
568 m32c_hard_regno_nregs (int regno
, enum machine_mode mode
)
570 int rv
= m32c_hard_regno_nregs_1 (regno
, mode
);
574 /* Implements HARD_REGNO_MODE_OK. The above function does the work
575 already; just test its return value. */
577 m32c_hard_regno_ok (int regno
, enum machine_mode mode
)
579 return m32c_hard_regno_nregs_1 (regno
, mode
) != 0;
582 /* Implements MODES_TIEABLE_P. In general, modes aren't tieable since
583 registers are all different sizes. However, since most modes are
584 bigger than our registers anyway, it's easier to implement this
585 function that way, leaving QImode as the only unique case. */
587 m32c_modes_tieable_p (enum machine_mode m1
, enum machine_mode m2
)
589 if (GET_MODE_SIZE (m1
) == GET_MODE_SIZE (m2
))
593 if (m1
== QImode
|| m2
== QImode
)
600 /* Register Classes */
602 /* Implements REGNO_REG_CLASS. */
604 m32c_regno_reg_class (int regno
)
629 if (IS_MEM_REGNO (regno
))
635 /* Implements REGNO_OK_FOR_BASE_P. */
637 m32c_regno_ok_for_base_p (int regno
)
639 if (regno
== A0_REGNO
640 || regno
== A1_REGNO
|| regno
>= FIRST_PSEUDO_REGISTER
)
645 #define DEBUG_RELOAD 0
647 /* Implements TARGET_PREFERRED_RELOAD_CLASS. In general, prefer general
648 registers of the appropriate size. */
650 #undef TARGET_PREFERRED_RELOAD_CLASS
651 #define TARGET_PREFERRED_RELOAD_CLASS m32c_preferred_reload_class
654 m32c_preferred_reload_class (rtx x
, reg_class_t rclass
)
656 reg_class_t newclass
= rclass
;
659 fprintf (stderr
, "\npreferred_reload_class for %s is ",
660 class_names
[rclass
]);
662 if (rclass
== NO_REGS
)
663 rclass
= GET_MODE (x
) == QImode
? HL_REGS
: R03_REGS
;
665 if (reg_classes_intersect_p (rclass
, CR_REGS
))
667 switch (GET_MODE (x
))
673 /* newclass = HI_REGS; */
678 else if (newclass
== QI_REGS
&& GET_MODE_SIZE (GET_MODE (x
)) > 2)
680 else if (GET_MODE_SIZE (GET_MODE (x
)) > 4
681 && ! reg_class_subset_p (R03_REGS
, rclass
))
684 rclass
= reduce_class (rclass
, newclass
, rclass
);
686 if (GET_MODE (x
) == QImode
)
687 rclass
= reduce_class (rclass
, HL_REGS
, rclass
);
690 fprintf (stderr
, "%s\n", class_names
[rclass
]);
693 if (GET_CODE (x
) == MEM
694 && GET_CODE (XEXP (x
, 0)) == PLUS
695 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == PLUS
)
696 fprintf (stderr
, "Glorm!\n");
701 /* Implements TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
703 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
704 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS m32c_preferred_output_reload_class
707 m32c_preferred_output_reload_class (rtx x
, reg_class_t rclass
)
709 return m32c_preferred_reload_class (x
, rclass
);
712 /* Implements LIMIT_RELOAD_CLASS. We basically want to avoid using
713 address registers for reloads since they're needed for address
716 m32c_limit_reload_class (enum machine_mode mode
, int rclass
)
719 fprintf (stderr
, "limit_reload_class for %s: %s ->",
720 mode_name
[mode
], class_names
[rclass
]);
724 rclass
= reduce_class (rclass
, HL_REGS
, rclass
);
725 else if (mode
== HImode
)
726 rclass
= reduce_class (rclass
, HI_REGS
, rclass
);
727 else if (mode
== SImode
)
728 rclass
= reduce_class (rclass
, SI_REGS
, rclass
);
730 if (rclass
!= A_REGS
)
731 rclass
= reduce_class (rclass
, DI_REGS
, rclass
);
734 fprintf (stderr
, " %s\n", class_names
[rclass
]);
739 /* Implements SECONDARY_RELOAD_CLASS. QImode have to be reloaded in
740 r0 or r1, as those are the only real QImode registers. CR regs get
741 reloaded through appropriately sized general or address
744 m32c_secondary_reload_class (int rclass
, enum machine_mode mode
, rtx x
)
746 int cc
= class_contents
[rclass
][0];
748 fprintf (stderr
, "\nsecondary reload class %s %s\n",
749 class_names
[rclass
], mode_name
[mode
]);
753 && GET_CODE (x
) == MEM
&& (cc
& ~class_contents
[R23_REGS
][0]) == 0)
755 if (reg_classes_intersect_p (rclass
, CR_REGS
)
756 && GET_CODE (x
) == REG
757 && REGNO (x
) >= SB_REGNO
&& REGNO (x
) <= SP_REGNO
)
758 return (TARGET_A16
|| mode
== HImode
) ? HI_REGS
: A_REGS
;
762 /* Implements TARGET_CLASS_LIKELY_SPILLED_P. A_REGS is needed for address
765 #undef TARGET_CLASS_LIKELY_SPILLED_P
766 #define TARGET_CLASS_LIKELY_SPILLED_P m32c_class_likely_spilled_p
769 m32c_class_likely_spilled_p (reg_class_t regclass
)
771 if (regclass
== A_REGS
)
774 return (reg_class_size
[(int) regclass
] == 1);
777 /* Implements TARGET_CLASS_MAX_NREGS. We calculate this according to its
778 documented meaning, to avoid potential inconsistencies with actual
779 class definitions. */
781 #undef TARGET_CLASS_MAX_NREGS
782 #define TARGET_CLASS_MAX_NREGS m32c_class_max_nregs
785 m32c_class_max_nregs (reg_class_t regclass
, enum machine_mode mode
)
788 unsigned char max
= 0;
790 for (rn
= 0; rn
< FIRST_PSEUDO_REGISTER
; rn
++)
791 if (TEST_HARD_REG_BIT (reg_class_contents
[(int) regclass
], rn
))
793 unsigned char n
= m32c_hard_regno_nregs (rn
, mode
);
800 /* Implements CANNOT_CHANGE_MODE_CLASS. Only r0 and r1 can change to
801 QI (r0l, r1l) because the chip doesn't support QI ops on other
802 registers (well, it does on a0/a1 but if we let gcc do that, reload
803 suffers). Otherwise, we allow changes to larger modes. */
805 m32c_cannot_change_mode_class (enum machine_mode from
,
806 enum machine_mode to
, int rclass
)
810 fprintf (stderr
, "cannot change from %s to %s in %s\n",
811 mode_name
[from
], mode_name
[to
], class_names
[rclass
]);
814 /* If the larger mode isn't allowed in any of these registers, we
815 can't allow the change. */
816 for (rn
= 0; rn
< FIRST_PSEUDO_REGISTER
; rn
++)
817 if (class_contents
[rclass
][0] & (1 << rn
))
818 if (! m32c_hard_regno_ok (rn
, to
))
822 return (class_contents
[rclass
][0] & 0x1ffa);
824 if (class_contents
[rclass
][0] & 0x0005 /* r0, r1 */
825 && GET_MODE_SIZE (from
) > 1)
827 if (GET_MODE_SIZE (from
) > 2) /* all other regs */
833 /* Helpers for the rest of the file. */
834 /* TRUE if the rtx is a REG rtx for the given register. */
835 #define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
836 && REGNO (rtx) == regno)
837 /* TRUE if the rtx is a pseudo - specifically, one we can use as a
838 base register in address calculations (hence the "strict"
840 #define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
841 && (REGNO (rtx) == AP_REGNO \
842 || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
844 #define A0_OR_PSEUDO(x) (IS_REG(x, A0_REGNO) || REGNO (x) >= FIRST_PSEUDO_REGISTER)
846 /* Implements EXTRA_CONSTRAINT_STR (see next function too). 'S' is
847 for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
848 call return values. */
850 m32c_matches_constraint_p (rtx value
, int constraint
)
852 encode_pattern (value
);
854 switch (constraint
) {
856 return (far_addr_space_p (value
)
858 && A0_OR_PSEUDO (patternr
[1])
859 && GET_MODE (patternr
[1]) == SImode
)
860 || (RTX_IS ("m+^Sri")
861 && A0_OR_PSEUDO (patternr
[4])
862 && GET_MODE (patternr
[4]) == HImode
)
863 || (RTX_IS ("m+^Srs")
864 && A0_OR_PSEUDO (patternr
[4])
865 && GET_MODE (patternr
[4]) == HImode
)
866 || (RTX_IS ("m+^S+ris")
867 && A0_OR_PSEUDO (patternr
[5])
868 && GET_MODE (patternr
[5]) == HImode
)
872 /* This is the common "src/dest" address */
874 if (GET_CODE (value
) == MEM
&& CONSTANT_P (XEXP (value
, 0)))
876 if (RTX_IS ("ms") || RTX_IS ("m+si"))
878 if (RTX_IS ("m++rii"))
880 if (REGNO (patternr
[3]) == FB_REGNO
881 && INTVAL (patternr
[4]) == 0)
886 else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
890 if (REGNO (r
) == SP_REGNO
)
892 return m32c_legitimate_address_p (GET_MODE (value
), XEXP (value
, 0), 1);
899 else if (RTX_IS ("m+ri"))
903 return (IS_REG (r
, A0_REGNO
) || IS_REG (r
, A1_REGNO
));
906 return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
908 return ((RTX_IS ("mr")
909 && (IS_REG (patternr
[1], SP_REGNO
)))
910 || (RTX_IS ("m+ri") && (IS_REG (patternr
[2], SP_REGNO
))));
912 return ((RTX_IS ("mr")
913 && (IS_REG (patternr
[1], FB_REGNO
)))
914 || (RTX_IS ("m+ri") && (IS_REG (patternr
[2], FB_REGNO
))));
916 return ((RTX_IS ("mr")
917 && (IS_REG (patternr
[1], SB_REGNO
)))
918 || (RTX_IS ("m+ri") && (IS_REG (patternr
[2], SB_REGNO
))));
920 /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
921 return (RTX_IS ("mi")
922 && !(INTVAL (patternr
[1]) & ~0x1fff));
924 return r1h_operand (value
, QImode
);
926 return GET_CODE (value
) == PARALLEL
;
932 /* STACK AND CALLING */
936 /* Implements RETURN_ADDR_RTX. Note that R8C and M16C push 24 bits
937 (yes, THREE bytes) onto the stack for the return address, but we
938 don't support pointers bigger than 16 bits on those chips. This
939 will likely wreak havoc with exception unwinding. FIXME. */
941 m32c_return_addr_rtx (int count
)
943 enum machine_mode mode
;
953 /* It's four bytes */
959 /* FIXME: it's really 3 bytes */
965 gen_rtx_MEM (mode
, plus_constant (Pmode
, gen_rtx_REG (Pmode
, FP_REGNO
),
967 return copy_to_mode_reg (mode
, ra_mem
);
970 /* Implements INCOMING_RETURN_ADDR_RTX. See comment above. */
972 m32c_incoming_return_addr_rtx (void)
975 return gen_rtx_MEM (PSImode
, gen_rtx_REG (PSImode
, SP_REGNO
));
978 /* Exception Handling Support */
980 /* Implements EH_RETURN_DATA_REGNO. Choose registers able to hold
983 m32c_eh_return_data_regno (int n
)
995 return INVALID_REGNUM
;
999 /* Implements EH_RETURN_STACKADJ_RTX. Saved and used later in
1000 m32c_emit_eh_epilogue. */
1002 m32c_eh_return_stackadj_rtx (void)
1004 if (!cfun
->machine
->eh_stack_adjust
)
1008 sa
= gen_rtx_REG (Pmode
, R0_REGNO
);
1009 cfun
->machine
->eh_stack_adjust
= sa
;
1011 return cfun
->machine
->eh_stack_adjust
;
1014 /* Registers That Address the Stack Frame */
1016 /* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER. Note that
1017 the original spec called for dwarf numbers to vary with register
1018 width as well, for example, r0l, r0, and r2r0 would each have
1019 different dwarf numbers. GCC doesn't support this, and we don't do
1020 it, and gdb seems to like it this way anyway. */
1022 m32c_dwarf_frame_regnum (int n
)
1048 return DWARF_FRAME_REGISTERS
+ 1;
1052 /* The frame looks like this:
1054 ap -> +------------------------------
1055 | Return address (3 or 4 bytes)
1056 | Saved FB (2 or 4 bytes)
1057 fb -> +------------------------------
1060 | through r0 as needed
1061 sp -> +------------------------------
1064 /* We use this to wrap all emitted insns in the prologue. */
1068 RTX_FRAME_RELATED_P (x
) = 1;
1072 /* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1073 how much the stack pointer moves for each, for each cpu family. */
1082 /* These are in reverse push (nearest-to-sp) order. */
1083 { R0_REGNO
, 0x80, 2, 2 },
1084 { R1_REGNO
, 0x40, 2, 2 },
1085 { R2_REGNO
, 0x20, 2, 2 },
1086 { R3_REGNO
, 0x10, 2, 2 },
1087 { A0_REGNO
, 0x08, 2, 4 },
1088 { A1_REGNO
, 0x04, 2, 4 },
1089 { SB_REGNO
, 0x02, 2, 4 },
1090 { FB_REGNO
, 0x01, 2, 4 }
1093 #define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1095 /* Returns TRUE if we need to save/restore the given register. We
1096 save everything for exception handlers, so that any register can be
1097 unwound. For interrupt handlers, we save everything if the handler
1098 calls something else (because we don't know what *that* function
1099 might do), but try to be a bit smarter if the handler is a leaf
1100 function. We always save $a0, though, because we use that in the
1101 epilogue to copy $fb to $sp. */
1103 need_to_save (int regno
)
1105 if (fixed_regs
[regno
])
1107 if (crtl
->calls_eh_return
)
1109 if (regno
== FP_REGNO
)
1111 if (cfun
->machine
->is_interrupt
1112 && (!cfun
->machine
->is_leaf
1113 || (regno
== A0_REGNO
1114 && m32c_function_needs_enter ())
1117 if (df_regs_ever_live_p (regno
)
1118 && (!call_used_regs
[regno
] || cfun
->machine
->is_interrupt
))
1123 /* This function contains all the intelligence about saving and
1124 restoring registers. It always figures out the register save set.
1125 When called with PP_justcount, it merely returns the size of the
1126 save set (for eliminating the frame pointer, for example). When
1127 called with PP_pushm or PP_popm, it emits the appropriate
1128 instructions for saving (pushm) or restoring (popm) the
1131 m32c_pushm_popm (Push_Pop_Type ppt
)
1134 int byte_count
= 0, bytes
;
1136 rtx dwarf_set
[PUSHM_N
];
1138 int nosave_mask
= 0;
1140 if (crtl
->return_rtx
1141 && GET_CODE (crtl
->return_rtx
) == PARALLEL
1142 && !(crtl
->calls_eh_return
|| cfun
->machine
->is_interrupt
))
1144 rtx exp
= XVECEXP (crtl
->return_rtx
, 0, 0);
1145 rtx rv
= XEXP (exp
, 0);
1146 int rv_bytes
= GET_MODE_SIZE (GET_MODE (rv
));
1149 nosave_mask
|= 0x20; /* PSI, SI */
1151 nosave_mask
|= 0xf0; /* DF */
1153 nosave_mask
|= 0x50; /* DI */
1156 for (i
= 0; i
< (int) PUSHM_N
; i
++)
1158 /* Skip if neither register needs saving. */
1159 if (!need_to_save (pushm_info
[i
].reg1
))
1162 if (pushm_info
[i
].bit
& nosave_mask
)
1165 reg_mask
|= pushm_info
[i
].bit
;
1166 bytes
= TARGET_A16
? pushm_info
[i
].a16_bytes
: pushm_info
[i
].a24_bytes
;
1168 if (ppt
== PP_pushm
)
1170 enum machine_mode mode
= (bytes
== 2) ? HImode
: SImode
;
1173 /* Always use stack_pointer_rtx instead of calling
1174 rtx_gen_REG ourselves. Code elsewhere in GCC assumes
1175 that there is a single rtx representing the stack pointer,
1176 namely stack_pointer_rtx, and uses == to recognize it. */
1177 addr
= stack_pointer_rtx
;
1179 if (byte_count
!= 0)
1180 addr
= gen_rtx_PLUS (GET_MODE (addr
), addr
, GEN_INT (byte_count
));
1182 dwarf_set
[n_dwarfs
++] =
1183 gen_rtx_SET (VOIDmode
,
1184 gen_rtx_MEM (mode
, addr
),
1185 gen_rtx_REG (mode
, pushm_info
[i
].reg1
));
1186 F (dwarf_set
[n_dwarfs
- 1]);
1189 byte_count
+= bytes
;
1192 if (cfun
->machine
->is_interrupt
)
1194 cfun
->machine
->intr_pushm
= reg_mask
& 0xfe;
1199 if (cfun
->machine
->is_interrupt
)
1200 for (i
= MEM0_REGNO
; i
<= MEM7_REGNO
; i
++)
1201 if (need_to_save (i
))
1204 cfun
->machine
->intr_pushmem
[i
- MEM0_REGNO
] = 1;
1207 if (ppt
== PP_pushm
&& byte_count
)
1209 rtx note
= gen_rtx_SEQUENCE (VOIDmode
, rtvec_alloc (n_dwarfs
+ 1));
1214 XVECEXP (note
, 0, 0)
1215 = gen_rtx_SET (VOIDmode
,
1217 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx
),
1219 GEN_INT (-byte_count
)));
1220 F (XVECEXP (note
, 0, 0));
1222 for (i
= 0; i
< n_dwarfs
; i
++)
1223 XVECEXP (note
, 0, i
+ 1) = dwarf_set
[i
];
1225 pushm
= F (emit_insn (gen_pushm (GEN_INT (reg_mask
))));
1227 add_reg_note (pushm
, REG_FRAME_RELATED_EXPR
, note
);
1230 if (cfun
->machine
->is_interrupt
)
1231 for (i
= MEM0_REGNO
; i
<= MEM7_REGNO
; i
++)
1232 if (cfun
->machine
->intr_pushmem
[i
- MEM0_REGNO
])
1235 pushm
= emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode
, i
)));
1237 pushm
= emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode
, i
)));
1241 if (ppt
== PP_popm
&& byte_count
)
1243 if (cfun
->machine
->is_interrupt
)
1244 for (i
= MEM7_REGNO
; i
>= MEM0_REGNO
; i
--)
1245 if (cfun
->machine
->intr_pushmem
[i
- MEM0_REGNO
])
1248 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode
, i
)));
1250 emit_insn (gen_pophi_24 (gen_rtx_REG (HImode
, i
)));
1253 emit_insn (gen_popm (GEN_INT (reg_mask
)));
1259 /* Implements INITIAL_ELIMINATION_OFFSET. See the comment above that
1260 diagrams our call frame. */
1262 m32c_initial_elimination_offset (int from
, int to
)
1266 if (from
== AP_REGNO
)
1276 ofs
+= m32c_pushm_popm (PP_justcount
);
1277 ofs
+= get_frame_size ();
1280 /* Account for push rounding. */
1282 ofs
= (ofs
+ 1) & ~1;
1284 fprintf (stderr
, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from
,
1290 /* Passing Function Arguments on the Stack */
1292 /* Implements PUSH_ROUNDING. The R8C and M16C have byte stacks, the
1293 M32C has word stacks. */
1295 m32c_push_rounding (int n
)
1297 if (TARGET_R8C
|| TARGET_M16C
)
1299 return (n
+ 1) & ~1;
1302 /* Passing Arguments in Registers */
1304 /* Implements TARGET_FUNCTION_ARG. Arguments are passed partly in
1305 registers, partly on stack. If our function returns a struct, a
1306 pointer to a buffer for it is at the top of the stack (last thing
1307 pushed). The first few real arguments may be in registers as
1310 R8C/M16C: arg1 in r1 if it's QI or HI (else it's pushed on stack)
1311 arg2 in r2 if it's HI (else pushed on stack)
1313 M32C: arg1 in r0 if it's QI or HI (else it's pushed on stack)
1316 Structs are not passed in registers, even if they fit. Only
1317 integer and pointer types are passed in registers.
1319 Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1321 #undef TARGET_FUNCTION_ARG
1322 #define TARGET_FUNCTION_ARG m32c_function_arg
1324 m32c_function_arg (cumulative_args_t ca_v
,
1325 enum machine_mode mode
, const_tree type
, bool named
)
1327 CUMULATIVE_ARGS
*ca
= get_cumulative_args (ca_v
);
1329 /* Can return a reg, parallel, or 0 for stack */
1332 fprintf (stderr
, "func_arg %d (%s, %d)\n",
1333 ca
->parm_num
, mode_name
[mode
], named
);
1337 if (mode
== VOIDmode
)
1340 if (ca
->force_mem
|| !named
)
1343 fprintf (stderr
, "func arg: force %d named %d, mem\n", ca
->force_mem
,
1349 if (type
&& INTEGRAL_TYPE_P (type
) && POINTER_TYPE_P (type
))
1352 if (type
&& AGGREGATE_TYPE_P (type
))
1355 switch (ca
->parm_num
)
1358 if (GET_MODE_SIZE (mode
) == 1 || GET_MODE_SIZE (mode
) == 2)
1359 rv
= gen_rtx_REG (mode
, TARGET_A16
? R1_REGNO
: R0_REGNO
);
1363 if (TARGET_A16
&& GET_MODE_SIZE (mode
) == 2)
1364 rv
= gen_rtx_REG (mode
, R2_REGNO
);
1374 #undef TARGET_PASS_BY_REFERENCE
1375 #define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1377 m32c_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED
,
1378 enum machine_mode mode ATTRIBUTE_UNUSED
,
1379 const_tree type ATTRIBUTE_UNUSED
,
1380 bool named ATTRIBUTE_UNUSED
)
1385 /* Implements INIT_CUMULATIVE_ARGS. */
1387 m32c_init_cumulative_args (CUMULATIVE_ARGS
* ca
,
1389 rtx libname ATTRIBUTE_UNUSED
,
1391 int n_named_args ATTRIBUTE_UNUSED
)
1393 if (fntype
&& aggregate_value_p (TREE_TYPE (fntype
), fndecl
))
1400 /* Implements TARGET_FUNCTION_ARG_ADVANCE. force_mem is set for
1401 functions returning structures, so we always reset that. Otherwise,
1402 we only need to know the sequence number of the argument to know what
1404 #undef TARGET_FUNCTION_ARG_ADVANCE
1405 #define TARGET_FUNCTION_ARG_ADVANCE m32c_function_arg_advance
1407 m32c_function_arg_advance (cumulative_args_t ca_v
,
1408 enum machine_mode mode ATTRIBUTE_UNUSED
,
1409 const_tree type ATTRIBUTE_UNUSED
,
1410 bool named ATTRIBUTE_UNUSED
)
1412 CUMULATIVE_ARGS
*ca
= get_cumulative_args (ca_v
);
1420 /* Implements TARGET_FUNCTION_ARG_BOUNDARY. */
1421 #undef TARGET_FUNCTION_ARG_BOUNDARY
1422 #define TARGET_FUNCTION_ARG_BOUNDARY m32c_function_arg_boundary
1424 m32c_function_arg_boundary (enum machine_mode mode ATTRIBUTE_UNUSED
,
1425 const_tree type ATTRIBUTE_UNUSED
)
1427 return (TARGET_A16
? 8 : 16);
1430 /* Implements FUNCTION_ARG_REGNO_P. */
1432 m32c_function_arg_regno_p (int r
)
1435 return (r
== R0_REGNO
);
1436 return (r
== R1_REGNO
|| r
== R2_REGNO
);
1439 /* HImode and PSImode are the two "native" modes as far as GCC is
1440 concerned, but the chips also support a 32-bit mode which is used
1441 for some opcodes in R8C/M16C and for reset vectors and such. */
1442 #undef TARGET_VALID_POINTER_MODE
1443 #define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
1445 m32c_valid_pointer_mode (enum machine_mode mode
)
1455 /* How Scalar Function Values Are Returned */
1457 /* Implements TARGET_LIBCALL_VALUE. Most values are returned in $r0, or some
1458 combination of registers starting there (r2r0 for longs, r3r1r2r0
1459 for long long, r3r2r1r0 for doubles), except that that ABI
1460 currently doesn't work because it ends up using all available
1461 general registers and gcc often can't compile it. So, instead, we
1462 return anything bigger than 16 bits in "mem0" (effectively, a
1463 memory location). */
1465 #undef TARGET_LIBCALL_VALUE
1466 #define TARGET_LIBCALL_VALUE m32c_libcall_value
1469 m32c_libcall_value (enum machine_mode mode
, const_rtx fun ATTRIBUTE_UNUSED
)
1471 /* return reg or parallel */
1473 /* FIXME: GCC has difficulty returning large values in registers,
1474 because that ties up most of the general registers and gives the
1475 register allocator little to work with. Until we can resolve
1476 this, large values are returned in memory. */
1481 rv
= gen_rtx_PARALLEL (mode
, rtvec_alloc (4));
1482 XVECEXP (rv
, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode
,
1483 gen_rtx_REG (HImode
,
1486 XVECEXP (rv
, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode
,
1487 gen_rtx_REG (HImode
,
1490 XVECEXP (rv
, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode
,
1491 gen_rtx_REG (HImode
,
1494 XVECEXP (rv
, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode
,
1495 gen_rtx_REG (HImode
,
1501 if (TARGET_A24
&& GET_MODE_SIZE (mode
) > 2)
1505 rv
= gen_rtx_PARALLEL (mode
, rtvec_alloc (1));
1506 XVECEXP (rv
, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode
,
1514 if (GET_MODE_SIZE (mode
) > 2)
1515 return gen_rtx_REG (mode
, MEM0_REGNO
);
1516 return gen_rtx_REG (mode
, R0_REGNO
);
1519 /* Implements TARGET_FUNCTION_VALUE. Functions and libcalls have the same
1522 #undef TARGET_FUNCTION_VALUE
1523 #define TARGET_FUNCTION_VALUE m32c_function_value
1526 m32c_function_value (const_tree valtype
,
1527 const_tree fn_decl_or_type ATTRIBUTE_UNUSED
,
1528 bool outgoing ATTRIBUTE_UNUSED
)
1530 /* return reg or parallel */
1531 const enum machine_mode mode
= TYPE_MODE (valtype
);
1532 return m32c_libcall_value (mode
, NULL_RTX
);
1535 /* Implements TARGET_FUNCTION_VALUE_REGNO_P. */
1537 #undef TARGET_FUNCTION_VALUE_REGNO_P
1538 #define TARGET_FUNCTION_VALUE_REGNO_P m32c_function_value_regno_p
1541 m32c_function_value_regno_p (const unsigned int regno
)
1543 return (regno
== R0_REGNO
|| regno
== MEM0_REGNO
);
1546 /* How Large Values Are Returned */
1548 /* We return structures by pushing the address on the stack, even if
1549 we use registers for the first few "real" arguments. */
1550 #undef TARGET_STRUCT_VALUE_RTX
1551 #define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1553 m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED
,
1554 int incoming ATTRIBUTE_UNUSED
)
1559 /* Function Entry and Exit */
1561 /* Implements EPILOGUE_USES. Interrupts restore all registers. */
1563 m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED
)
1565 if (cfun
->machine
->is_interrupt
)
1570 /* Implementing the Varargs Macros */
1572 #undef TARGET_STRICT_ARGUMENT_NAMING
1573 #define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1575 m32c_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED
)
1580 /* Trampolines for Nested Functions */
1584 1 0000 75C43412 mov.w #0x1234,a0
1585 2 0004 FC000000 jmp.a label
1588 1 0000 BC563412 mov.l:s #0x123456,a0
1589 2 0004 CC000000 jmp.a label
1592 /* Implements TRAMPOLINE_SIZE. */
1594 m32c_trampoline_size (void)
1596 /* Allocate extra space so we can avoid the messy shifts when we
1597 initialize the trampoline; we just write past the end of the
1599 return TARGET_A16
? 8 : 10;
1602 /* Implements TRAMPOLINE_ALIGNMENT. */
1604 m32c_trampoline_alignment (void)
1609 /* Implements TARGET_TRAMPOLINE_INIT. */
1611 #undef TARGET_TRAMPOLINE_INIT
1612 #define TARGET_TRAMPOLINE_INIT m32c_trampoline_init
1614 m32c_trampoline_init (rtx m_tramp
, tree fndecl
, rtx chainval
)
1616 rtx function
= XEXP (DECL_RTL (fndecl
), 0);
1618 #define A0(m,i) adjust_address (m_tramp, m, i)
1621 /* Note: we subtract a "word" because the moves want signed
1622 constants, not unsigned constants. */
1623 emit_move_insn (A0 (HImode
, 0), GEN_INT (0xc475 - 0x10000));
1624 emit_move_insn (A0 (HImode
, 2), chainval
);
1625 emit_move_insn (A0 (QImode
, 4), GEN_INT (0xfc - 0x100));
1626 /* We use 16-bit addresses here, but store the zero to turn it
1627 into a 24-bit offset. */
1628 emit_move_insn (A0 (HImode
, 5), function
);
1629 emit_move_insn (A0 (QImode
, 7), GEN_INT (0x00));
1633 /* Note that the PSI moves actually write 4 bytes. Make sure we
1634 write stuff out in the right order, and leave room for the
1635 extra byte at the end. */
1636 emit_move_insn (A0 (QImode
, 0), GEN_INT (0xbc - 0x100));
1637 emit_move_insn (A0 (PSImode
, 1), chainval
);
1638 emit_move_insn (A0 (QImode
, 4), GEN_INT (0xcc - 0x100));
1639 emit_move_insn (A0 (PSImode
, 5), function
);
1644 /* Addressing Modes */
1646 /* The r8c/m32c family supports a wide range of non-orthogonal
1647 addressing modes, including the ability to double-indirect on *some*
1648 of them. Not all insns support all modes, either, but we rely on
1649 predicates and constraints to deal with that. */
1650 #undef TARGET_LEGITIMATE_ADDRESS_P
1651 #define TARGET_LEGITIMATE_ADDRESS_P m32c_legitimate_address_p
1653 m32c_legitimate_address_p (enum machine_mode mode
, rtx x
, bool strict
)
1659 if (TARGET_A16
&& GET_MODE (x
) != HImode
&& GET_MODE (x
) != SImode
)
1661 if (TARGET_A24
&& GET_MODE (x
) != PSImode
)
1664 /* Wide references to memory will be split after reload, so we must
1665 ensure that all parts of such splits remain legitimate
1667 mode_adjust
= GET_MODE_SIZE (mode
) - 1;
1669 /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1670 if (GET_CODE (x
) == PRE_DEC
1671 || GET_CODE (x
) == POST_INC
|| GET_CODE (x
) == PRE_MODIFY
)
1673 return (GET_CODE (XEXP (x
, 0)) == REG
1674 && REGNO (XEXP (x
, 0)) == SP_REGNO
);
1678 /* This is the double indirection detection, but it currently
1679 doesn't work as cleanly as this code implies, so until we've had
1680 a chance to debug it, leave it disabled. */
1681 if (TARGET_A24
&& GET_CODE (x
) == MEM
&& GET_CODE (XEXP (x
, 0)) != PLUS
)
1684 fprintf (stderr
, "double indirect\n");
1693 /* Most indexable registers can be used without displacements,
1694 although some of them will be emitted with an explicit zero
1695 to please the assembler. */
1696 switch (REGNO (patternr
[0]))
1702 if (TARGET_A16
&& GET_MODE (x
) == SImode
)
1708 if (IS_PSEUDO (patternr
[0], strict
))
1714 if (TARGET_A16
&& GET_MODE (x
) == SImode
)
1719 /* This is more interesting, because different base registers
1720 allow for different displacements - both range and signedness
1721 - and it differs from chip series to chip series too. */
1722 int rn
= REGNO (patternr
[1]);
1723 HOST_WIDE_INT offs
= INTVAL (patternr
[2]);
1729 /* The syntax only allows positive offsets, but when the
1730 offsets span the entire memory range, we can simulate
1731 negative offsets by wrapping. */
1733 return (offs
>= -65536 && offs
<= 65535 - mode_adjust
);
1735 return (offs
>= 0 && offs
<= 65535 - mode_adjust
);
1737 return (offs
>= -16777216 && offs
<= 16777215);
1741 return (offs
>= -128 && offs
<= 127 - mode_adjust
);
1742 return (offs
>= -65536 && offs
<= 65535 - mode_adjust
);
1745 return (offs
>= -128 && offs
<= 127 - mode_adjust
);
1748 if (IS_PSEUDO (patternr
[1], strict
))
1753 if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1755 rtx reg
= patternr
[1];
1757 /* We don't know where the symbol is, so only allow base
1758 registers which support displacements spanning the whole
1760 switch (REGNO (reg
))
1764 /* $sb needs a secondary reload, but since it's involved in
1765 memory address reloads too, we don't deal with it very
1767 /* case SB_REGNO: */
1770 if (IS_PSEUDO (reg
, strict
))
1778 /* Implements REG_OK_FOR_BASE_P. */
1780 m32c_reg_ok_for_base_p (rtx x
, int strict
)
1782 if (GET_CODE (x
) != REG
)
1793 if (IS_PSEUDO (x
, strict
))
1799 /* We have three choices for choosing fb->aN offsets. If we choose -128,
1800 we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
1802 EB 4B FF mova -128[$fb],$a0
1803 D8 0C FF FF mov.w:Q #0,-1[$a0]
1805 Alternately, we subtract the frame size, and hopefully use 8-bit aN
1808 77 54 00 01 sub #256,$a0
1809 D8 08 01 mov.w:Q #0,1[$a0]
1811 If we don't offset (i.e. offset by zero), we end up with:
1813 D8 0C 00 FF mov.w:Q #0,-256[$a0]
1815 We have to subtract *something* so that we have a PLUS rtx to mark
1816 that we've done this reload. The -128 offset will never result in
1817 an 8-bit aN offset, and the payoff for the second case is five
1818 loads *if* those loads are within 256 bytes of the other end of the
1819 frame, so the third case seems best. Note that we subtract the
1820 zero, but detect that in the addhi3 pattern. */
1822 #define BIG_FB_ADJ 0
1824 /* Implements LEGITIMIZE_ADDRESS. The only address we really have to
1825 worry about is frame base offsets, as $fb has a limited
1826 displacement range. We deal with this by attempting to reload $fb
1827 itself into an address register; that seems to result in the best
1829 #undef TARGET_LEGITIMIZE_ADDRESS
1830 #define TARGET_LEGITIMIZE_ADDRESS m32c_legitimize_address
1832 m32c_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
1833 enum machine_mode mode
)
1836 fprintf (stderr
, "m32c_legitimize_address for mode %s\n", mode_name
[mode
]);
1838 fprintf (stderr
, "\n");
1841 if (GET_CODE (x
) == PLUS
1842 && GET_CODE (XEXP (x
, 0)) == REG
1843 && REGNO (XEXP (x
, 0)) == FB_REGNO
1844 && GET_CODE (XEXP (x
, 1)) == CONST_INT
1845 && (INTVAL (XEXP (x
, 1)) < -128
1846 || INTVAL (XEXP (x
, 1)) > (128 - GET_MODE_SIZE (mode
))))
1848 /* reload FB to A_REGS */
1849 rtx temp
= gen_reg_rtx (Pmode
);
1851 emit_insn (gen_rtx_SET (VOIDmode
, temp
, XEXP (x
, 0)));
1858 /* Implements LEGITIMIZE_RELOAD_ADDRESS. See comment above. */
1860 m32c_legitimize_reload_address (rtx
* x
,
1861 enum machine_mode mode
,
1863 int type
, int ind_levels ATTRIBUTE_UNUSED
)
1866 fprintf (stderr
, "\nm32c_legitimize_reload_address for mode %s\n",
1871 /* At one point, this function tried to get $fb copied to an address
1872 register, which in theory would maximize sharing, but gcc was
1873 *also* still trying to reload the whole address, and we'd run out
1874 of address registers. So we let gcc do the naive (but safe)
1875 reload instead, when the above function doesn't handle it for
1878 The code below is a second attempt at the above. */
1880 if (GET_CODE (*x
) == PLUS
1881 && GET_CODE (XEXP (*x
, 0)) == REG
1882 && REGNO (XEXP (*x
, 0)) == FB_REGNO
1883 && GET_CODE (XEXP (*x
, 1)) == CONST_INT
1884 && (INTVAL (XEXP (*x
, 1)) < -128
1885 || INTVAL (XEXP (*x
, 1)) > (128 - GET_MODE_SIZE (mode
))))
1888 int offset
= INTVAL (XEXP (*x
, 1));
1889 int adjustment
= -BIG_FB_ADJ
;
1891 sum
= gen_rtx_PLUS (Pmode
, XEXP (*x
, 0),
1892 GEN_INT (adjustment
));
1893 *x
= gen_rtx_PLUS (Pmode
, sum
, GEN_INT (offset
- adjustment
));
1894 if (type
== RELOAD_OTHER
)
1895 type
= RELOAD_FOR_OTHER_ADDRESS
;
1896 push_reload (sum
, NULL_RTX
, &XEXP (*x
, 0), NULL
,
1897 A_REGS
, Pmode
, VOIDmode
, 0, 0, opnum
,
1898 (enum reload_type
) type
);
1902 if (GET_CODE (*x
) == PLUS
1903 && GET_CODE (XEXP (*x
, 0)) == PLUS
1904 && GET_CODE (XEXP (XEXP (*x
, 0), 0)) == REG
1905 && REGNO (XEXP (XEXP (*x
, 0), 0)) == FB_REGNO
1906 && GET_CODE (XEXP (XEXP (*x
, 0), 1)) == CONST_INT
1907 && GET_CODE (XEXP (*x
, 1)) == CONST_INT
1910 if (type
== RELOAD_OTHER
)
1911 type
= RELOAD_FOR_OTHER_ADDRESS
;
1912 push_reload (XEXP (*x
, 0), NULL_RTX
, &XEXP (*x
, 0), NULL
,
1913 A_REGS
, Pmode
, VOIDmode
, 0, 0, opnum
,
1914 (enum reload_type
) type
);
1921 /* Return the appropriate mode for a named address pointer. */
1922 #undef TARGET_ADDR_SPACE_POINTER_MODE
1923 #define TARGET_ADDR_SPACE_POINTER_MODE m32c_addr_space_pointer_mode
1924 static enum machine_mode
1925 m32c_addr_space_pointer_mode (addr_space_t addrspace
)
1929 case ADDR_SPACE_GENERIC
:
1930 return TARGET_A24
? PSImode
: HImode
;
1931 case ADDR_SPACE_FAR
:
1938 /* Return the appropriate mode for a named address address. */
1939 #undef TARGET_ADDR_SPACE_ADDRESS_MODE
1940 #define TARGET_ADDR_SPACE_ADDRESS_MODE m32c_addr_space_address_mode
1941 static enum machine_mode
1942 m32c_addr_space_address_mode (addr_space_t addrspace
)
1946 case ADDR_SPACE_GENERIC
:
1947 return TARGET_A24
? PSImode
: HImode
;
1948 case ADDR_SPACE_FAR
:
1955 /* Like m32c_legitimate_address_p, except with named addresses. */
1956 #undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
1957 #define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P \
1958 m32c_addr_space_legitimate_address_p
1960 m32c_addr_space_legitimate_address_p (enum machine_mode mode
, rtx x
,
1961 bool strict
, addr_space_t as
)
1963 if (as
== ADDR_SPACE_FAR
)
1970 if (GET_MODE (x
) != SImode
)
1972 switch (REGNO (patternr
[0]))
1978 if (IS_PSEUDO (patternr
[0], strict
))
1983 if (RTX_IS ("+^Sri"))
1985 int rn
= REGNO (patternr
[3]);
1986 HOST_WIDE_INT offs
= INTVAL (patternr
[4]);
1987 if (GET_MODE (patternr
[3]) != HImode
)
1992 return (offs
>= 0 && offs
<= 0xfffff);
1995 if (IS_PSEUDO (patternr
[3], strict
))
2000 if (RTX_IS ("+^Srs"))
2002 int rn
= REGNO (patternr
[3]);
2003 if (GET_MODE (patternr
[3]) != HImode
)
2011 if (IS_PSEUDO (patternr
[3], strict
))
2016 if (RTX_IS ("+^S+ris"))
2018 int rn
= REGNO (patternr
[4]);
2019 if (GET_MODE (patternr
[4]) != HImode
)
2027 if (IS_PSEUDO (patternr
[4], strict
))
2039 else if (as
!= ADDR_SPACE_GENERIC
)
2042 return m32c_legitimate_address_p (mode
, x
, strict
);
2045 /* Like m32c_legitimate_address, except with named address support. */
2046 #undef TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS
2047 #define TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS m32c_addr_space_legitimize_address
2049 m32c_addr_space_legitimize_address (rtx x
, rtx oldx
, enum machine_mode mode
,
2052 if (as
!= ADDR_SPACE_GENERIC
)
2055 fprintf (stderr
, "\033[36mm32c_addr_space_legitimize_address for mode %s\033[0m\n", mode_name
[mode
]);
2057 fprintf (stderr
, "\n");
2060 if (GET_CODE (x
) != REG
)
2062 x
= force_reg (SImode
, x
);
2067 return m32c_legitimize_address (x
, oldx
, mode
);
2070 /* Determine if one named address space is a subset of another. */
2071 #undef TARGET_ADDR_SPACE_SUBSET_P
2072 #define TARGET_ADDR_SPACE_SUBSET_P m32c_addr_space_subset_p
2074 m32c_addr_space_subset_p (addr_space_t subset
, addr_space_t superset
)
2076 gcc_assert (subset
== ADDR_SPACE_GENERIC
|| subset
== ADDR_SPACE_FAR
);
2077 gcc_assert (superset
== ADDR_SPACE_GENERIC
|| superset
== ADDR_SPACE_FAR
);
2079 if (subset
== superset
)
2083 return (subset
== ADDR_SPACE_GENERIC
&& superset
== ADDR_SPACE_FAR
);
2086 #undef TARGET_ADDR_SPACE_CONVERT
2087 #define TARGET_ADDR_SPACE_CONVERT m32c_addr_space_convert
2088 /* Convert from one address space to another. */
2090 m32c_addr_space_convert (rtx op
, tree from_type
, tree to_type
)
2092 addr_space_t from_as
= TYPE_ADDR_SPACE (TREE_TYPE (from_type
));
2093 addr_space_t to_as
= TYPE_ADDR_SPACE (TREE_TYPE (to_type
));
2096 gcc_assert (from_as
== ADDR_SPACE_GENERIC
|| from_as
== ADDR_SPACE_FAR
);
2097 gcc_assert (to_as
== ADDR_SPACE_GENERIC
|| to_as
== ADDR_SPACE_FAR
);
2099 if (to_as
== ADDR_SPACE_GENERIC
&& from_as
== ADDR_SPACE_FAR
)
2101 /* This is unpredictable, as we're truncating off usable address
2104 result
= gen_reg_rtx (HImode
);
2105 emit_move_insn (result
, simplify_subreg (HImode
, op
, SImode
, 0));
2108 else if (to_as
== ADDR_SPACE_FAR
&& from_as
== ADDR_SPACE_GENERIC
)
2110 /* This always works. */
2111 result
= gen_reg_rtx (SImode
);
2112 emit_insn (gen_zero_extendhisi2 (result
, op
));
2119 /* Condition Code Status */
2121 #undef TARGET_FIXED_CONDITION_CODE_REGS
2122 #define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2124 m32c_fixed_condition_code_regs (unsigned int *p1
, unsigned int *p2
)
2127 *p2
= INVALID_REGNUM
;
2131 /* Describing Relative Costs of Operations */
2133 /* Implements TARGET_REGISTER_MOVE_COST. We make impossible moves
2134 prohibitively expensive, like trying to put QIs in r2/r3 (there are
2135 no opcodes to do that). We also discourage use of mem* registers
2136 since they're really memory. */
2138 #undef TARGET_REGISTER_MOVE_COST
2139 #define TARGET_REGISTER_MOVE_COST m32c_register_move_cost
2142 m32c_register_move_cost (enum machine_mode mode
, reg_class_t from
,
2145 int cost
= COSTS_N_INSNS (3);
2148 /* FIXME: pick real values, but not 2 for now. */
2149 COPY_HARD_REG_SET (cc
, reg_class_contents
[(int) from
]);
2150 IOR_HARD_REG_SET (cc
, reg_class_contents
[(int) to
]);
2153 && hard_reg_set_intersect_p (cc
, reg_class_contents
[R23_REGS
]))
2155 if (hard_reg_set_subset_p (cc
, reg_class_contents
[R23_REGS
]))
2156 cost
= COSTS_N_INSNS (1000);
2158 cost
= COSTS_N_INSNS (80);
2161 if (!class_can_hold_mode (from
, mode
) || !class_can_hold_mode (to
, mode
))
2162 cost
= COSTS_N_INSNS (1000);
2164 if (reg_classes_intersect_p (from
, CR_REGS
))
2165 cost
+= COSTS_N_INSNS (5);
2167 if (reg_classes_intersect_p (to
, CR_REGS
))
2168 cost
+= COSTS_N_INSNS (5);
2170 if (from
== MEM_REGS
|| to
== MEM_REGS
)
2171 cost
+= COSTS_N_INSNS (50);
2172 else if (reg_classes_intersect_p (from
, MEM_REGS
)
2173 || reg_classes_intersect_p (to
, MEM_REGS
))
2174 cost
+= COSTS_N_INSNS (10);
2177 fprintf (stderr
, "register_move_cost %s from %s to %s = %d\n",
2178 mode_name
[mode
], class_names
[(int) from
], class_names
[(int) to
],
2184 /* Implements TARGET_MEMORY_MOVE_COST. */
2186 #undef TARGET_MEMORY_MOVE_COST
2187 #define TARGET_MEMORY_MOVE_COST m32c_memory_move_cost
2190 m32c_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED
,
2191 reg_class_t rclass ATTRIBUTE_UNUSED
,
2192 bool in ATTRIBUTE_UNUSED
)
2194 /* FIXME: pick real values. */
2195 return COSTS_N_INSNS (10);
2198 /* Here we try to describe when we use multiple opcodes for one RTX so
2199 that gcc knows when to use them. */
2200 #undef TARGET_RTX_COSTS
2201 #define TARGET_RTX_COSTS m32c_rtx_costs
2203 m32c_rtx_costs (rtx x
, int code
, int outer_code
, int opno ATTRIBUTE_UNUSED
,
2204 int *total
, bool speed ATTRIBUTE_UNUSED
)
2209 if (REGNO (x
) >= MEM0_REGNO
&& REGNO (x
) <= MEM7_REGNO
)
2210 *total
+= COSTS_N_INSNS (500);
2212 *total
+= COSTS_N_INSNS (1);
2218 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
2220 /* mov.b r1l, r1h */
2221 *total
+= COSTS_N_INSNS (1);
2224 if (INTVAL (XEXP (x
, 1)) > 8
2225 || INTVAL (XEXP (x
, 1)) < -8)
2228 /* mov.b r1l, r1h */
2229 *total
+= COSTS_N_INSNS (2);
2244 if (outer_code
== SET
)
2246 *total
+= COSTS_N_INSNS (2);
2253 rtx dest
= XEXP (x
, 0);
2254 rtx addr
= XEXP (dest
, 0);
2255 switch (GET_CODE (addr
))
2258 *total
+= COSTS_N_INSNS (1);
2261 *total
+= COSTS_N_INSNS (3);
2264 *total
+= COSTS_N_INSNS (2);
2272 /* Reasonable default. */
2273 if (TARGET_A16
&& GET_MODE(x
) == SImode
)
2274 *total
+= COSTS_N_INSNS (2);
2280 #undef TARGET_ADDRESS_COST
2281 #define TARGET_ADDRESS_COST m32c_address_cost
2283 m32c_address_cost (rtx addr
, enum machine_mode mode ATTRIBUTE_UNUSED
,
2284 addr_space_t as ATTRIBUTE_UNUSED
,
2285 bool speed ATTRIBUTE_UNUSED
)
2288 /* fprintf(stderr, "\naddress_cost\n");
2290 switch (GET_CODE (addr
))
2295 return COSTS_N_INSNS(1);
2296 if (0 < i
&& i
<= 255)
2297 return COSTS_N_INSNS(2);
2298 if (0 < i
&& i
<= 65535)
2299 return COSTS_N_INSNS(3);
2300 return COSTS_N_INSNS(4);
2302 return COSTS_N_INSNS(4);
2304 return COSTS_N_INSNS(1);
2306 if (GET_CODE (XEXP (addr
, 1)) == CONST_INT
)
2308 i
= INTVAL (XEXP (addr
, 1));
2310 return COSTS_N_INSNS(1);
2311 if (0 < i
&& i
<= 255)
2312 return COSTS_N_INSNS(2);
2313 if (0 < i
&& i
<= 65535)
2314 return COSTS_N_INSNS(3);
2316 return COSTS_N_INSNS(4);
2322 /* Defining the Output Assembler Language */
2324 /* Output of Data */
2326 /* We may have 24 bit sizes, which is the native address size.
2327 Currently unused, but provided for completeness. */
2328 #undef TARGET_ASM_INTEGER
2329 #define TARGET_ASM_INTEGER m32c_asm_integer
2331 m32c_asm_integer (rtx x
, unsigned int size
, int aligned_p
)
2336 fprintf (asm_out_file
, "\t.3byte\t");
2337 output_addr_const (asm_out_file
, x
);
2338 fputc ('\n', asm_out_file
);
2341 if (GET_CODE (x
) == SYMBOL_REF
)
2343 fprintf (asm_out_file
, "\t.long\t");
2344 output_addr_const (asm_out_file
, x
);
2345 fputc ('\n', asm_out_file
);
2350 return default_assemble_integer (x
, size
, aligned_p
);
2353 /* Output of Assembler Instructions */
2355 /* We use a lookup table because the addressing modes are non-orthogonal. */
2360 char const *pattern
;
2363 const conversions
[] = {
2366 { 0, "mr", "z[1]" },
2367 { 0, "m+ri", "3[2]" },
2368 { 0, "m+rs", "3[2]" },
2369 { 0, "m+^Zrs", "5[4]" },
2370 { 0, "m+^Zri", "5[4]" },
2371 { 0, "m+^Z+ris", "7+6[5]" },
2372 { 0, "m+^Srs", "5[4]" },
2373 { 0, "m+^Sri", "5[4]" },
2374 { 0, "m+^S+ris", "7+6[5]" },
2375 { 0, "m+r+si", "4+5[2]" },
2378 { 0, "m+si", "2+3" },
2380 { 0, "mmr", "[z[2]]" },
2381 { 0, "mm+ri", "[4[3]]" },
2382 { 0, "mm+rs", "[4[3]]" },
2383 { 0, "mm+r+si", "[5+6[3]]" },
2384 { 0, "mms", "[[2]]" },
2385 { 0, "mmi", "[[2]]" },
2386 { 0, "mm+si", "[4[3]]" },
2390 { 0, "+si", "#1+2" },
2396 { 'd', "+si", "1+2" },
2399 { 'D', "+si", "1+2" },
2410 /* This is in order according to the bitfield that pushm/popm use. */
2411 static char const *pushm_regs
[] = {
2412 "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2415 /* Implements TARGET_PRINT_OPERAND. */
2417 #undef TARGET_PRINT_OPERAND
2418 #define TARGET_PRINT_OPERAND m32c_print_operand
2421 m32c_print_operand (FILE * file
, rtx x
, int code
)
2426 int unsigned_const
= 0;
2429 /* Multiplies; constants are converted to sign-extended format but
2430 we need unsigned, so 'u' and 'U' tell us what size unsigned we
2442 /* This one is only for debugging; you can put it in a pattern to
2443 force this error. */
2446 fprintf (stderr
, "dj: unreviewed pattern:");
2447 if (current_output_insn
)
2448 debug_rtx (current_output_insn
);
2451 /* PSImode operations are either .w or .l depending on the target. */
2455 fprintf (file
, "w");
2457 fprintf (file
, "l");
2460 /* Inverted conditionals. */
2463 switch (GET_CODE (x
))
2469 fputs ("gtu", file
);
2475 fputs ("geu", file
);
2481 fputs ("leu", file
);
2487 fputs ("ltu", file
);
2500 /* Regular conditionals. */
2503 switch (GET_CODE (x
))
2509 fputs ("leu", file
);
2515 fputs ("ltu", file
);
2521 fputs ("gtu", file
);
2527 fputs ("geu", file
);
2540 /* Used in negsi2 to do HImode ops on the two parts of an SImode
2542 if (code
== 'h' && GET_MODE (x
) == SImode
)
2544 x
= m32c_subreg (HImode
, x
, SImode
, 0);
2547 if (code
== 'H' && GET_MODE (x
) == SImode
)
2549 x
= m32c_subreg (HImode
, x
, SImode
, 2);
2552 if (code
== 'h' && GET_MODE (x
) == HImode
)
2554 x
= m32c_subreg (QImode
, x
, HImode
, 0);
2557 if (code
== 'H' && GET_MODE (x
) == HImode
)
2559 /* We can't actually represent this as an rtx. Do it here. */
2560 if (GET_CODE (x
) == REG
)
2565 fputs ("r0h", file
);
2568 fputs ("r1h", file
);
2574 /* This should be a MEM. */
2575 x
= m32c_subreg (QImode
, x
, HImode
, 1);
2578 /* This is for BMcond, which always wants word register names. */
2579 if (code
== 'h' && GET_MODE (x
) == QImode
)
2581 if (GET_CODE (x
) == REG
)
2582 x
= gen_rtx_REG (HImode
, REGNO (x
));
2585 /* 'x' and 'X' need to be ignored for non-immediates. */
2586 if ((code
== 'x' || code
== 'X') && GET_CODE (x
) != CONST_INT
)
2591 for (i
= 0; conversions
[i
].pattern
; i
++)
2592 if (conversions
[i
].code
== code
2593 && streq (conversions
[i
].pattern
, pattern
))
2595 for (j
= 0; conversions
[i
].format
[j
]; j
++)
2596 /* backslash quotes the next character in the output pattern. */
2597 if (conversions
[i
].format
[j
] == '\\')
2599 fputc (conversions
[i
].format
[j
+ 1], file
);
2602 /* Digits in the output pattern indicate that the
2603 corresponding RTX is to be output at that point. */
2604 else if (ISDIGIT (conversions
[i
].format
[j
]))
2606 rtx r
= patternr
[conversions
[i
].format
[j
] - '0'];
2607 switch (GET_CODE (r
))
2610 fprintf (file
, "%s",
2611 reg_name_with_mode (REGNO (r
), GET_MODE (r
)));
2620 int i
= (int) exact_log2 (v
);
2622 i
= (int) exact_log2 ((v
^ 0xffff) & 0xffff);
2624 i
= (int) exact_log2 ((v
^ 0xff) & 0xff);
2626 fprintf (file
, "%d", i
);
2630 /* Unsigned byte. */
2631 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
,
2635 /* Unsigned word. */
2636 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
,
2637 INTVAL (r
) & 0xffff);
2640 /* pushm and popm encode a register set into a single byte. */
2642 for (b
= 7; b
>= 0; b
--)
2643 if (INTVAL (r
) & (1 << b
))
2645 fprintf (file
, "%s%s", comma
, pushm_regs
[b
]);
2650 /* "Minus". Output -X */
2651 ival
= (-INTVAL (r
) & 0xffff);
2653 ival
= ival
- 0x10000;
2654 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ival
);
2658 if (conversions
[i
].format
[j
+ 1] == '[' && ival
< 0)
2660 /* We can simulate negative displacements by
2661 taking advantage of address space
2662 wrapping when the offset can span the
2663 entire address range. */
2665 patternr
[conversions
[i
].format
[j
+ 2] - '0'];
2666 if (GET_CODE (base
) == REG
)
2667 switch (REGNO (base
))
2672 ival
= 0x1000000 + ival
;
2674 ival
= 0x10000 + ival
;
2678 ival
= 0x10000 + ival
;
2682 else if (code
== 'd' && ival
< 0 && j
== 0)
2683 /* The "mova" opcode is used to do addition by
2684 computing displacements, but again, we need
2685 displacements to be unsigned *if* they're
2686 the only component of the displacement
2687 (i.e. no "symbol-4" type displacement). */
2688 ival
= (TARGET_A24
? 0x1000000 : 0x10000) + ival
;
2690 if (conversions
[i
].format
[j
] == '0')
2692 /* More conversions to unsigned. */
2693 if (unsigned_const
== 2)
2695 if (unsigned_const
== 1)
2698 if (streq (conversions
[i
].pattern
, "mi")
2699 || streq (conversions
[i
].pattern
, "mmi"))
2701 /* Integers used as addresses are unsigned. */
2702 ival
&= (TARGET_A24
? 0xffffff : 0xffff);
2704 if (force_sign
&& ival
>= 0)
2706 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ival
);
2711 /* We don't have const_double constants. If it
2712 happens, make it obvious. */
2713 fprintf (file
, "[const_double 0x%lx]",
2714 (unsigned long) CONST_DOUBLE_HIGH (r
));
2717 assemble_name (file
, XSTR (r
, 0));
2720 output_asm_label (r
);
2723 fprintf (stderr
, "don't know how to print this operand:");
2730 if (conversions
[i
].format
[j
] == 'z')
2732 /* Some addressing modes *must* have a displacement,
2733 so insert a zero here if needed. */
2735 for (k
= j
+ 1; conversions
[i
].format
[k
]; k
++)
2736 if (ISDIGIT (conversions
[i
].format
[k
]))
2738 rtx reg
= patternr
[conversions
[i
].format
[k
] - '0'];
2739 if (GET_CODE (reg
) == REG
2740 && (REGNO (reg
) == SB_REGNO
2741 || REGNO (reg
) == FB_REGNO
2742 || REGNO (reg
) == SP_REGNO
))
2747 /* Signed displacements off symbols need to have signs
2749 if (conversions
[i
].format
[j
] == '+'
2750 && (!code
|| code
== 'D' || code
== 'd')
2751 && ISDIGIT (conversions
[i
].format
[j
+ 1])
2752 && (GET_CODE (patternr
[conversions
[i
].format
[j
+ 1] - '0'])
2758 fputc (conversions
[i
].format
[j
], file
);
2762 if (!conversions
[i
].pattern
)
2764 fprintf (stderr
, "unconvertible operand %c `%s'", code
? code
: '-',
2767 fprintf (file
, "[%c.%s]", code
? code
: '-', pattern
);
2773 /* Implements TARGET_PRINT_OPERAND_PUNCT_VALID_P.
2775 See m32c_print_operand above for descriptions of what these do. */
2777 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
2778 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P m32c_print_operand_punct_valid_p
2781 m32c_print_operand_punct_valid_p (unsigned char c
)
2783 if (c
== '&' || c
== '!')
2789 /* Implements TARGET_PRINT_OPERAND_ADDRESS. Nothing unusual here. */
2791 #undef TARGET_PRINT_OPERAND_ADDRESS
2792 #define TARGET_PRINT_OPERAND_ADDRESS m32c_print_operand_address
2795 m32c_print_operand_address (FILE * stream
, rtx address
)
2797 if (GET_CODE (address
) == MEM
)
2798 address
= XEXP (address
, 0);
2800 /* cf: gcc.dg/asm-4.c. */
2801 gcc_assert (GET_CODE (address
) == REG
);
2803 m32c_print_operand (stream
, address
, 0);
2806 /* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
2807 differently than general registers. */
2809 m32c_output_reg_push (FILE * s
, int regno
)
2811 if (regno
== FLG_REGNO
)
2812 fprintf (s
, "\tpushc\tflg\n");
2814 fprintf (s
, "\tpush.%c\t%s\n",
2815 " bwll"[reg_push_size (regno
)], reg_names
[regno
]);
2818 /* Likewise for ASM_OUTPUT_REG_POP. */
2820 m32c_output_reg_pop (FILE * s
, int regno
)
2822 if (regno
== FLG_REGNO
)
2823 fprintf (s
, "\tpopc\tflg\n");
2825 fprintf (s
, "\tpop.%c\t%s\n",
2826 " bwll"[reg_push_size (regno
)], reg_names
[regno
]);
2829 /* Defining target-specific uses of `__attribute__' */
2831 /* Used to simplify the logic below. Find the attributes wherever
2833 #define M32C_ATTRIBUTES(decl) \
2834 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2835 : DECL_ATTRIBUTES (decl) \
2836 ? (DECL_ATTRIBUTES (decl)) \
2837 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2839 /* Returns TRUE if the given tree has the "interrupt" attribute. */
2841 interrupt_p (tree node ATTRIBUTE_UNUSED
)
2843 tree list
= M32C_ATTRIBUTES (node
);
2846 if (is_attribute_p ("interrupt", TREE_PURPOSE (list
)))
2848 list
= TREE_CHAIN (list
);
2850 return fast_interrupt_p (node
);
2853 /* Returns TRUE if the given tree has the "bank_switch" attribute. */
2855 bank_switch_p (tree node ATTRIBUTE_UNUSED
)
2857 tree list
= M32C_ATTRIBUTES (node
);
2860 if (is_attribute_p ("bank_switch", TREE_PURPOSE (list
)))
2862 list
= TREE_CHAIN (list
);
2867 /* Returns TRUE if the given tree has the "fast_interrupt" attribute. */
2869 fast_interrupt_p (tree node ATTRIBUTE_UNUSED
)
2871 tree list
= M32C_ATTRIBUTES (node
);
2874 if (is_attribute_p ("fast_interrupt", TREE_PURPOSE (list
)))
2876 list
= TREE_CHAIN (list
);
2882 interrupt_handler (tree
* node ATTRIBUTE_UNUSED
,
2883 tree name ATTRIBUTE_UNUSED
,
2884 tree args ATTRIBUTE_UNUSED
,
2885 int flags ATTRIBUTE_UNUSED
,
2886 bool * no_add_attrs ATTRIBUTE_UNUSED
)
2891 /* Returns TRUE if given tree has the "function_vector" attribute. */
2893 m32c_special_page_vector_p (tree func
)
2897 if (TREE_CODE (func
) != FUNCTION_DECL
)
2900 list
= M32C_ATTRIBUTES (func
);
2903 if (is_attribute_p ("function_vector", TREE_PURPOSE (list
)))
2905 list
= TREE_CHAIN (list
);
2911 function_vector_handler (tree
* node ATTRIBUTE_UNUSED
,
2912 tree name ATTRIBUTE_UNUSED
,
2913 tree args ATTRIBUTE_UNUSED
,
2914 int flags ATTRIBUTE_UNUSED
,
2915 bool * no_add_attrs ATTRIBUTE_UNUSED
)
2919 /* The attribute is not supported for R8C target. */
2920 warning (OPT_Wattributes
,
2921 "%qE attribute is not supported for R8C target",
2923 *no_add_attrs
= true;
2925 else if (TREE_CODE (*node
) != FUNCTION_DECL
)
2927 /* The attribute must be applied to functions only. */
2928 warning (OPT_Wattributes
,
2929 "%qE attribute applies only to functions",
2931 *no_add_attrs
= true;
2933 else if (TREE_CODE (TREE_VALUE (args
)) != INTEGER_CST
)
2935 /* The argument must be a constant integer. */
2936 warning (OPT_Wattributes
,
2937 "%qE attribute argument not an integer constant",
2939 *no_add_attrs
= true;
2941 else if (TREE_INT_CST_LOW (TREE_VALUE (args
)) < 18
2942 || TREE_INT_CST_LOW (TREE_VALUE (args
)) > 255)
2944 /* The argument value must be between 18 to 255. */
2945 warning (OPT_Wattributes
,
2946 "%qE attribute argument should be between 18 to 255",
2948 *no_add_attrs
= true;
2953 /* If the function is assigned the attribute 'function_vector', it
2954 returns the function vector number, otherwise returns zero. */
2956 current_function_special_page_vector (rtx x
)
2960 if ((GET_CODE(x
) == SYMBOL_REF
)
2961 && (SYMBOL_REF_FLAGS (x
) & SYMBOL_FLAG_FUNCVEC_FUNCTION
))
2964 tree t
= SYMBOL_REF_DECL (x
);
2966 if (TREE_CODE (t
) != FUNCTION_DECL
)
2969 list
= M32C_ATTRIBUTES (t
);
2972 if (is_attribute_p ("function_vector", TREE_PURPOSE (list
)))
2974 num
= TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list
)));
2978 list
= TREE_CHAIN (list
);
2987 #undef TARGET_ATTRIBUTE_TABLE
2988 #define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
2989 static const struct attribute_spec m32c_attribute_table
[] = {
2990 {"interrupt", 0, 0, false, false, false, interrupt_handler
, false},
2991 {"bank_switch", 0, 0, false, false, false, interrupt_handler
, false},
2992 {"fast_interrupt", 0, 0, false, false, false, interrupt_handler
, false},
2993 {"function_vector", 1, 1, true, false, false, function_vector_handler
,
2995 {0, 0, 0, 0, 0, 0, 0, false}
2998 #undef TARGET_COMP_TYPE_ATTRIBUTES
2999 #define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
3001 m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED
,
3002 const_tree type2 ATTRIBUTE_UNUSED
)
3004 /* 0=incompatible 1=compatible 2=warning */
3008 #undef TARGET_INSERT_ATTRIBUTES
3009 #define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
3011 m32c_insert_attributes (tree node ATTRIBUTE_UNUSED
,
3012 tree
* attr_ptr ATTRIBUTE_UNUSED
)
3015 /* See if we need to make #pragma address variables volatile. */
3017 if (TREE_CODE (node
) == VAR_DECL
)
3019 const char *name
= IDENTIFIER_POINTER (DECL_NAME (node
));
3020 if (m32c_get_pragma_address (name
, &addr
))
3022 TREE_THIS_VOLATILE (node
) = true;
3028 struct GTY(()) pragma_entry
{
3029 const char *varname
;
3032 typedef struct pragma_entry pragma_entry
;
3034 /* Hash table of pragma info. */
3035 static GTY((param_is (pragma_entry
))) htab_t pragma_htab
;
3038 pragma_entry_eq (const void *p1
, const void *p2
)
3040 const pragma_entry
*old
= (const pragma_entry
*) p1
;
3041 const char *new_name
= (const char *) p2
;
3043 return strcmp (old
->varname
, new_name
) == 0;
3047 pragma_entry_hash (const void *p
)
3049 const pragma_entry
*old
= (const pragma_entry
*) p
;
3050 return htab_hash_string (old
->varname
);
3054 m32c_note_pragma_address (const char *varname
, unsigned address
)
3056 pragma_entry
**slot
;
3059 pragma_htab
= htab_create_ggc (31, pragma_entry_hash
,
3060 pragma_entry_eq
, NULL
);
3062 slot
= (pragma_entry
**)
3063 htab_find_slot_with_hash (pragma_htab
, varname
,
3064 htab_hash_string (varname
), INSERT
);
3068 *slot
= ggc_alloc_pragma_entry ();
3069 (*slot
)->varname
= ggc_strdup (varname
);
3071 (*slot
)->address
= address
;
3075 m32c_get_pragma_address (const char *varname
, unsigned *address
)
3077 pragma_entry
**slot
;
3082 slot
= (pragma_entry
**)
3083 htab_find_slot_with_hash (pragma_htab
, varname
,
3084 htab_hash_string (varname
), NO_INSERT
);
3087 *address
= (*slot
)->address
;
3094 m32c_output_aligned_common (FILE *stream
, tree decl ATTRIBUTE_UNUSED
,
3096 int size
, int align
, int global
)
3100 if (m32c_get_pragma_address (name
, &address
))
3102 /* We never output these as global. */
3103 assemble_name (stream
, name
);
3104 fprintf (stream
, " = 0x%04x\n", address
);
3109 fprintf (stream
, "\t.local\t");
3110 assemble_name (stream
, name
);
3111 fprintf (stream
, "\n");
3113 fprintf (stream
, "\t.comm\t");
3114 assemble_name (stream
, name
);
3115 fprintf (stream
, ",%u,%u\n", size
, align
/ BITS_PER_UNIT
);
3120 /* This is a list of legal subregs of hard regs. */
3121 static const struct {
3122 unsigned char outer_mode_size
;
3123 unsigned char inner_mode_size
;
3124 unsigned char byte_mask
;
3125 unsigned char legal_when
;
3127 } legal_subregs
[] = {
3128 {1, 2, 0x03, 1, R0_REGNO
}, /* r0h r0l */
3129 {1, 2, 0x03, 1, R1_REGNO
}, /* r1h r1l */
3130 {1, 2, 0x01, 1, A0_REGNO
},
3131 {1, 2, 0x01, 1, A1_REGNO
},
3133 {1, 4, 0x01, 1, A0_REGNO
},
3134 {1, 4, 0x01, 1, A1_REGNO
},
3136 {2, 4, 0x05, 1, R0_REGNO
}, /* r2 r0 */
3137 {2, 4, 0x05, 1, R1_REGNO
}, /* r3 r1 */
3138 {2, 4, 0x05, 16, A0_REGNO
}, /* a1 a0 */
3139 {2, 4, 0x01, 24, A0_REGNO
}, /* a1 a0 */
3140 {2, 4, 0x01, 24, A1_REGNO
}, /* a1 a0 */
3142 {4, 8, 0x55, 1, R0_REGNO
}, /* r3 r1 r2 r0 */
3145 /* Returns TRUE if OP is a subreg of a hard reg which we don't
3146 support. We also bail on MEMs with illegal addresses. */
3148 m32c_illegal_subreg_p (rtx op
)
3152 int src_mode
, dest_mode
;
3154 if (GET_CODE (op
) == MEM
3155 && ! m32c_legitimate_address_p (Pmode
, XEXP (op
, 0), false))
3160 if (GET_CODE (op
) != SUBREG
)
3163 dest_mode
= GET_MODE (op
);
3164 offset
= SUBREG_BYTE (op
);
3165 op
= SUBREG_REG (op
);
3166 src_mode
= GET_MODE (op
);
3168 if (GET_MODE_SIZE (dest_mode
) == GET_MODE_SIZE (src_mode
))
3170 if (GET_CODE (op
) != REG
)
3172 if (REGNO (op
) >= MEM0_REGNO
)
3175 offset
= (1 << offset
);
3177 for (i
= 0; i
< ARRAY_SIZE (legal_subregs
); i
++)
3178 if (legal_subregs
[i
].outer_mode_size
== GET_MODE_SIZE (dest_mode
)
3179 && legal_subregs
[i
].regno
== REGNO (op
)
3180 && legal_subregs
[i
].inner_mode_size
== GET_MODE_SIZE (src_mode
)
3181 && legal_subregs
[i
].byte_mask
& offset
)
3183 switch (legal_subregs
[i
].legal_when
)
3200 /* Returns TRUE if we support a move between the first two operands.
3201 At the moment, we just want to discourage mem to mem moves until
3202 after reload, because reload has a hard time with our limited
3203 number of address registers, and we can get into a situation where
3204 we need three of them when we only have two. */
3206 m32c_mov_ok (rtx
* operands
, enum machine_mode mode ATTRIBUTE_UNUSED
)
3208 rtx op0
= operands
[0];
3209 rtx op1
= operands
[1];
3214 #define DEBUG_MOV_OK 0
3216 fprintf (stderr
, "m32c_mov_ok %s\n", mode_name
[mode
]);
3221 if (GET_CODE (op0
) == SUBREG
)
3222 op0
= XEXP (op0
, 0);
3223 if (GET_CODE (op1
) == SUBREG
)
3224 op1
= XEXP (op1
, 0);
3226 if (GET_CODE (op0
) == MEM
3227 && GET_CODE (op1
) == MEM
3228 && ! reload_completed
)
3231 fprintf (stderr
, " - no, mem to mem\n");
3237 fprintf (stderr
, " - ok\n");
3242 /* Returns TRUE if two consecutive HImode mov instructions, generated
3243 for moving an immediate double data to a double data type variable
3244 location, can be combined into single SImode mov instruction. */
3246 m32c_immd_dbl_mov (rtx
* operands ATTRIBUTE_UNUSED
,
3247 enum machine_mode mode ATTRIBUTE_UNUSED
)
3249 /* ??? This relied on the now-defunct MEM_SCALAR and MEM_IN_STRUCT_P
3256 /* Subregs are non-orthogonal for us, because our registers are all
3259 m32c_subreg (enum machine_mode outer
,
3260 rtx x
, enum machine_mode inner
, int byte
)
3264 /* Converting MEMs to different types that are the same size, we
3265 just rewrite them. */
3266 if (GET_CODE (x
) == SUBREG
3267 && SUBREG_BYTE (x
) == 0
3268 && GET_CODE (SUBREG_REG (x
)) == MEM
3269 && (GET_MODE_SIZE (GET_MODE (x
))
3270 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
3273 x
= gen_rtx_MEM (GET_MODE (x
), XEXP (SUBREG_REG (x
), 0));
3274 MEM_COPY_ATTRIBUTES (x
, SUBREG_REG (oldx
));
3277 /* Push/pop get done as smaller push/pops. */
3278 if (GET_CODE (x
) == MEM
3279 && (GET_CODE (XEXP (x
, 0)) == PRE_DEC
3280 || GET_CODE (XEXP (x
, 0)) == POST_INC
))
3281 return gen_rtx_MEM (outer
, XEXP (x
, 0));
3282 if (GET_CODE (x
) == SUBREG
3283 && GET_CODE (XEXP (x
, 0)) == MEM
3284 && (GET_CODE (XEXP (XEXP (x
, 0), 0)) == PRE_DEC
3285 || GET_CODE (XEXP (XEXP (x
, 0), 0)) == POST_INC
))
3286 return gen_rtx_MEM (outer
, XEXP (XEXP (x
, 0), 0));
3288 if (GET_CODE (x
) != REG
)
3290 rtx r
= simplify_gen_subreg (outer
, x
, inner
, byte
);
3291 if (GET_CODE (r
) == SUBREG
3292 && GET_CODE (x
) == MEM
3293 && MEM_VOLATILE_P (x
))
3295 /* Volatile MEMs don't get simplified, but we need them to
3296 be. We are little endian, so the subreg byte is the
3298 r
= adjust_address_nv (x
, outer
, byte
);
3304 if (r
>= FIRST_PSEUDO_REGISTER
|| r
== AP_REGNO
)
3305 return simplify_gen_subreg (outer
, x
, inner
, byte
);
3307 if (IS_MEM_REGNO (r
))
3308 return simplify_gen_subreg (outer
, x
, inner
, byte
);
3310 /* This is where the complexities of our register layout are
3314 else if (outer
== HImode
)
3316 if (r
== R0_REGNO
&& byte
== 2)
3318 else if (r
== R0_REGNO
&& byte
== 4)
3320 else if (r
== R0_REGNO
&& byte
== 6)
3322 else if (r
== R1_REGNO
&& byte
== 2)
3324 else if (r
== A0_REGNO
&& byte
== 2)
3327 else if (outer
== SImode
)
3329 if (r
== R0_REGNO
&& byte
== 0)
3331 else if (r
== R0_REGNO
&& byte
== 4)
3336 fprintf (stderr
, "m32c_subreg %s %s %d\n",
3337 mode_name
[outer
], mode_name
[inner
], byte
);
3341 return gen_rtx_REG (outer
, nr
);
3344 /* Used to emit move instructions. We split some moves,
3345 and avoid mem-mem moves. */
3347 m32c_prepare_move (rtx
* operands
, enum machine_mode mode
)
3349 if (far_addr_space_p (operands
[0])
3350 && CONSTANT_P (operands
[1]))
3352 operands
[1] = force_reg (GET_MODE (operands
[0]), operands
[1]);
3354 if (TARGET_A16
&& mode
== PSImode
)
3355 return m32c_split_move (operands
, mode
, 1);
3356 if ((GET_CODE (operands
[0]) == MEM
)
3357 && (GET_CODE (XEXP (operands
[0], 0)) == PRE_MODIFY
))
3359 rtx pmv
= XEXP (operands
[0], 0);
3360 rtx dest_reg
= XEXP (pmv
, 0);
3361 rtx dest_mod
= XEXP (pmv
, 1);
3363 emit_insn (gen_rtx_SET (Pmode
, dest_reg
, dest_mod
));
3364 operands
[0] = gen_rtx_MEM (mode
, dest_reg
);
3366 if (can_create_pseudo_p () && MEM_P (operands
[0]) && MEM_P (operands
[1]))
3367 operands
[1] = copy_to_mode_reg (mode
, operands
[1]);
3371 #define DEBUG_SPLIT 0
3373 /* Returns TRUE if the given PSImode move should be split. We split
3374 for all r8c/m16c moves, since it doesn't support them, and for
3375 POP.L as we can only *push* SImode. */
3377 m32c_split_psi_p (rtx
* operands
)
3380 fprintf (stderr
, "\nm32c_split_psi_p\n");
3381 debug_rtx (operands
[0]);
3382 debug_rtx (operands
[1]);
3387 fprintf (stderr
, "yes, A16\n");
3391 if (GET_CODE (operands
[1]) == MEM
3392 && GET_CODE (XEXP (operands
[1], 0)) == POST_INC
)
3395 fprintf (stderr
, "yes, pop.l\n");
3400 fprintf (stderr
, "no, default\n");
3405 /* Split the given move. SPLIT_ALL is 0 if splitting is optional
3406 (define_expand), 1 if it is not optional (define_insn_and_split),
3407 and 3 for define_split (alternate api). */
3409 m32c_split_move (rtx
* operands
, enum machine_mode mode
, int split_all
)
3412 int parts
, si
, di
, rev
= 0;
3413 int rv
= 0, opi
= 2;
3414 enum machine_mode submode
= HImode
;
3415 rtx
*ops
, local_ops
[10];
3417 /* define_split modifies the existing operands, but the other two
3418 emit new insns. OPS is where we store the operand pairs, which
3429 /* Before splitting mem-mem moves, force one operand into a
3431 if (can_create_pseudo_p () && MEM_P (operands
[0]) && MEM_P (operands
[1]))
3434 fprintf (stderr
, "force_reg...\n");
3435 debug_rtx (operands
[1]);
3437 operands
[1] = force_reg (mode
, operands
[1]);
3439 debug_rtx (operands
[1]);
3446 fprintf (stderr
, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3448 debug_rtx (operands
[0]);
3449 debug_rtx (operands
[1]);
3452 /* Note that split_all is not used to select the api after this
3453 point, so it's safe to set it to 3 even with define_insn. */
3454 /* None of the chips can move SI operands to sp-relative addresses,
3455 so we always split those. */
3456 if (satisfies_constraint_Ss (operands
[0]))
3460 && (far_addr_space_p (operands
[0])
3461 || far_addr_space_p (operands
[1])))
3464 /* We don't need to split these. */
3467 && (mode
== SImode
|| mode
== PSImode
)
3468 && !(GET_CODE (operands
[1]) == MEM
3469 && GET_CODE (XEXP (operands
[1], 0)) == POST_INC
))
3472 /* First, enumerate the subregs we'll be dealing with. */
3473 for (si
= 0; si
< parts
; si
++)
3476 m32c_subreg (submode
, operands
[0], mode
,
3477 si
* GET_MODE_SIZE (submode
));
3479 m32c_subreg (submode
, operands
[1], mode
,
3480 si
* GET_MODE_SIZE (submode
));
3483 /* Split pushes by emitting a sequence of smaller pushes. */
3484 if (GET_CODE (d
[0]) == MEM
&& GET_CODE (XEXP (d
[0], 0)) == PRE_DEC
)
3486 for (si
= parts
- 1; si
>= 0; si
--)
3488 ops
[opi
++] = gen_rtx_MEM (submode
,
3489 gen_rtx_PRE_DEC (Pmode
,
3497 /* Likewise for pops. */
3498 else if (GET_CODE (s
[0]) == MEM
&& GET_CODE (XEXP (s
[0], 0)) == POST_INC
)
3500 for (di
= 0; di
< parts
; di
++)
3503 ops
[opi
++] = gen_rtx_MEM (submode
,
3504 gen_rtx_POST_INC (Pmode
,
3512 /* if d[di] == s[si] for any di < si, we'll early clobber. */
3513 for (di
= 0; di
< parts
- 1; di
++)
3514 for (si
= di
+ 1; si
< parts
; si
++)
3515 if (reg_mentioned_p (d
[di
], s
[si
]))
3519 for (si
= 0; si
< parts
; si
++)
3525 for (si
= parts
- 1; si
>= 0; si
--)
3532 /* Now emit any moves we may have accumulated. */
3533 if (rv
&& split_all
!= 3)
3536 for (i
= 2; i
< opi
; i
+= 2)
3537 emit_move_insn (ops
[i
], ops
[i
+ 1]);
3542 /* The m32c has a number of opcodes that act like memcpy, strcmp, and
3543 the like. For the R8C they expect one of the addresses to be in
3544 R1L:An so we need to arrange for that. Otherwise, it's just a
3545 matter of picking out the operands we want and emitting the right
3546 pattern for them. All these expanders, which correspond to
3547 patterns in blkmov.md, must return nonzero if they expand the insn,
3548 or zero if they should FAIL. */
3550 /* This is a memset() opcode. All operands are implied, so we need to
3551 arrange for them to be in the right registers. The opcode wants
3552 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3553 the count (HI), and $2 the value (QI). */
3555 m32c_expand_setmemhi(rtx
*operands
)
3557 rtx desta
, count
, val
;
3560 desta
= XEXP (operands
[0], 0);
3561 count
= operands
[1];
3564 desto
= gen_reg_rtx (Pmode
);
3565 counto
= gen_reg_rtx (HImode
);
3567 if (GET_CODE (desta
) != REG
3568 || REGNO (desta
) < FIRST_PSEUDO_REGISTER
)
3569 desta
= copy_to_mode_reg (Pmode
, desta
);
3571 /* This looks like an arbitrary restriction, but this is by far the
3572 most common case. For counts 8..14 this actually results in
3573 smaller code with no speed penalty because the half-sized
3574 constant can be loaded with a shorter opcode. */
3575 if (GET_CODE (count
) == CONST_INT
3576 && GET_CODE (val
) == CONST_INT
3577 && ! (INTVAL (count
) & 1)
3578 && (INTVAL (count
) > 1)
3579 && (INTVAL (val
) <= 7 && INTVAL (val
) >= -8))
3581 unsigned v
= INTVAL (val
) & 0xff;
3583 count
= copy_to_mode_reg (HImode
, GEN_INT (INTVAL (count
) / 2));
3584 val
= copy_to_mode_reg (HImode
, GEN_INT (v
));
3586 emit_insn (gen_setmemhi_whi_op (desto
, counto
, val
, desta
, count
));
3588 emit_insn (gen_setmemhi_wpsi_op (desto
, counto
, val
, desta
, count
));
3592 /* This is the generalized memset() case. */
3593 if (GET_CODE (val
) != REG
3594 || REGNO (val
) < FIRST_PSEUDO_REGISTER
)
3595 val
= copy_to_mode_reg (QImode
, val
);
3597 if (GET_CODE (count
) != REG
3598 || REGNO (count
) < FIRST_PSEUDO_REGISTER
)
3599 count
= copy_to_mode_reg (HImode
, count
);
3602 emit_insn (gen_setmemhi_bhi_op (desto
, counto
, val
, desta
, count
));
3604 emit_insn (gen_setmemhi_bpsi_op (desto
, counto
, val
, desta
, count
));
3609 /* This is a memcpy() opcode. All operands are implied, so we need to
3610 arrange for them to be in the right registers. The opcode wants
3611 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3612 is the source (MEM:BLK), and $2 the count (HI). */
3614 m32c_expand_movmemhi(rtx
*operands
)
3616 rtx desta
, srca
, count
;
3617 rtx desto
, srco
, counto
;
3619 desta
= XEXP (operands
[0], 0);
3620 srca
= XEXP (operands
[1], 0);
3621 count
= operands
[2];
3623 desto
= gen_reg_rtx (Pmode
);
3624 srco
= gen_reg_rtx (Pmode
);
3625 counto
= gen_reg_rtx (HImode
);
3627 if (GET_CODE (desta
) != REG
3628 || REGNO (desta
) < FIRST_PSEUDO_REGISTER
)
3629 desta
= copy_to_mode_reg (Pmode
, desta
);
3631 if (GET_CODE (srca
) != REG
3632 || REGNO (srca
) < FIRST_PSEUDO_REGISTER
)
3633 srca
= copy_to_mode_reg (Pmode
, srca
);
3635 /* Similar to setmem, but we don't need to check the value. */
3636 if (GET_CODE (count
) == CONST_INT
3637 && ! (INTVAL (count
) & 1)
3638 && (INTVAL (count
) > 1))
3640 count
= copy_to_mode_reg (HImode
, GEN_INT (INTVAL (count
) / 2));
3642 emit_insn (gen_movmemhi_whi_op (desto
, srco
, counto
, desta
, srca
, count
));
3644 emit_insn (gen_movmemhi_wpsi_op (desto
, srco
, counto
, desta
, srca
, count
));
3648 /* This is the generalized memset() case. */
3649 if (GET_CODE (count
) != REG
3650 || REGNO (count
) < FIRST_PSEUDO_REGISTER
)
3651 count
= copy_to_mode_reg (HImode
, count
);
3654 emit_insn (gen_movmemhi_bhi_op (desto
, srco
, counto
, desta
, srca
, count
));
3656 emit_insn (gen_movmemhi_bpsi_op (desto
, srco
, counto
, desta
, srca
, count
));
3661 /* This is a stpcpy() opcode. $0 is the destination (MEM:BLK) after
3662 the copy, which should point to the NUL at the end of the string,
3663 $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3664 Since our opcode leaves the destination pointing *after* the NUL,
3665 we must emit an adjustment. */
3667 m32c_expand_movstr(rtx
*operands
)
3672 desta
= XEXP (operands
[1], 0);
3673 srca
= XEXP (operands
[2], 0);
3675 desto
= gen_reg_rtx (Pmode
);
3676 srco
= gen_reg_rtx (Pmode
);
3678 if (GET_CODE (desta
) != REG
3679 || REGNO (desta
) < FIRST_PSEUDO_REGISTER
)
3680 desta
= copy_to_mode_reg (Pmode
, desta
);
3682 if (GET_CODE (srca
) != REG
3683 || REGNO (srca
) < FIRST_PSEUDO_REGISTER
)
3684 srca
= copy_to_mode_reg (Pmode
, srca
);
3686 emit_insn (gen_movstr_op (desto
, srco
, desta
, srca
));
3687 /* desto ends up being a1, which allows this type of add through MOVA. */
3688 emit_insn (gen_addpsi3 (operands
[0], desto
, GEN_INT (-1)));
3693 /* This is a strcmp() opcode. $0 is the destination (HI) which holds
3694 <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3695 $2 is the other (MEM:BLK). We must do the comparison, and then
3696 convert the flags to a signed integer result. */
3698 m32c_expand_cmpstr(rtx
*operands
)
3702 src1a
= XEXP (operands
[1], 0);
3703 src2a
= XEXP (operands
[2], 0);
3705 if (GET_CODE (src1a
) != REG
3706 || REGNO (src1a
) < FIRST_PSEUDO_REGISTER
)
3707 src1a
= copy_to_mode_reg (Pmode
, src1a
);
3709 if (GET_CODE (src2a
) != REG
3710 || REGNO (src2a
) < FIRST_PSEUDO_REGISTER
)
3711 src2a
= copy_to_mode_reg (Pmode
, src2a
);
3713 emit_insn (gen_cmpstrhi_op (src1a
, src2a
, src1a
, src2a
));
3714 emit_insn (gen_cond_to_int (operands
[0]));
3720 typedef rtx (*shift_gen_func
)(rtx
, rtx
, rtx
);
3722 static shift_gen_func
3723 shift_gen_func_for (int mode
, int code
)
3725 #define GFF(m,c,f) if (mode == m && code == c) return f
3726 GFF(QImode
, ASHIFT
, gen_ashlqi3_i
);
3727 GFF(QImode
, ASHIFTRT
, gen_ashrqi3_i
);
3728 GFF(QImode
, LSHIFTRT
, gen_lshrqi3_i
);
3729 GFF(HImode
, ASHIFT
, gen_ashlhi3_i
);
3730 GFF(HImode
, ASHIFTRT
, gen_ashrhi3_i
);
3731 GFF(HImode
, LSHIFTRT
, gen_lshrhi3_i
);
3732 GFF(PSImode
, ASHIFT
, gen_ashlpsi3_i
);
3733 GFF(PSImode
, ASHIFTRT
, gen_ashrpsi3_i
);
3734 GFF(PSImode
, LSHIFTRT
, gen_lshrpsi3_i
);
3735 GFF(SImode
, ASHIFT
, TARGET_A16
? gen_ashlsi3_16
: gen_ashlsi3_24
);
3736 GFF(SImode
, ASHIFTRT
, TARGET_A16
? gen_ashrsi3_16
: gen_ashrsi3_24
);
3737 GFF(SImode
, LSHIFTRT
, TARGET_A16
? gen_lshrsi3_16
: gen_lshrsi3_24
);
3742 /* The m32c only has one shift, but it takes a signed count. GCC
3743 doesn't want this, so we fake it by negating any shift count when
3744 we're pretending to shift the other way. Also, the shift count is
3745 limited to -8..8. It's slightly better to use two shifts for 9..15
3746 than to load the count into r1h, so we do that too. */
3748 m32c_prepare_shift (rtx
* operands
, int scale
, int shift_code
)
3750 enum machine_mode mode
= GET_MODE (operands
[0]);
3751 shift_gen_func func
= shift_gen_func_for (mode
, shift_code
);
3754 if (GET_CODE (operands
[2]) == CONST_INT
)
3756 int maxc
= TARGET_A24
&& (mode
== PSImode
|| mode
== SImode
) ? 32 : 8;
3757 int count
= INTVAL (operands
[2]) * scale
;
3759 while (count
> maxc
)
3761 temp
= gen_reg_rtx (mode
);
3762 emit_insn (func (temp
, operands
[1], GEN_INT (maxc
)));
3766 while (count
< -maxc
)
3768 temp
= gen_reg_rtx (mode
);
3769 emit_insn (func (temp
, operands
[1], GEN_INT (-maxc
)));
3773 emit_insn (func (operands
[0], operands
[1], GEN_INT (count
)));
3777 temp
= gen_reg_rtx (QImode
);
3779 /* The pattern has a NEG that corresponds to this. */
3780 emit_move_insn (temp
, gen_rtx_NEG (QImode
, operands
[2]));
3781 else if (TARGET_A16
&& mode
== SImode
)
3782 /* We do this because the code below may modify this, we don't
3783 want to modify the origin of this value. */
3784 emit_move_insn (temp
, operands
[2]);
3786 /* We'll only use it for the shift, no point emitting a move. */
3789 if (TARGET_A16
&& GET_MODE_SIZE (mode
) == 4)
3791 /* The m16c has a limit of -16..16 for SI shifts, even when the
3792 shift count is in a register. Since there are so many targets
3793 of these shifts, it's better to expand the RTL here than to
3794 call a helper function.
3796 The resulting code looks something like this:
3808 We take advantage of the fact that "negative" shifts are
3809 undefined to skip one of the comparisons. */
3812 rtx label
, insn
, tempvar
;
3814 emit_move_insn (operands
[0], operands
[1]);
3817 label
= gen_label_rtx ();
3818 LABEL_NUSES (label
) ++;
3820 tempvar
= gen_reg_rtx (mode
);
3822 if (shift_code
== ASHIFT
)
3824 /* This is a left shift. We only need check positive counts. */
3825 emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode
, 0, 0),
3826 count
, GEN_INT (16), label
));
3827 emit_insn (func (tempvar
, operands
[0], GEN_INT (8)));
3828 emit_insn (func (operands
[0], tempvar
, GEN_INT (8)));
3829 insn
= emit_insn (gen_addqi3 (count
, count
, GEN_INT (-16)));
3830 emit_label_after (label
, insn
);
3834 /* This is a right shift. We only need check negative counts. */
3835 emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode
, 0, 0),
3836 count
, GEN_INT (-16), label
));
3837 emit_insn (func (tempvar
, operands
[0], GEN_INT (-8)));
3838 emit_insn (func (operands
[0], tempvar
, GEN_INT (-8)));
3839 insn
= emit_insn (gen_addqi3 (count
, count
, GEN_INT (16)));
3840 emit_label_after (label
, insn
);
3842 operands
[1] = operands
[0];
3843 emit_insn (func (operands
[0], operands
[0], count
));
3851 /* The m32c has a limited range of operations that work on PSImode
3852 values; we have to expand to SI, do the math, and truncate back to
3853 PSI. Yes, this is expensive, but hopefully gcc will learn to avoid
3856 m32c_expand_neg_mulpsi3 (rtx
* operands
)
3858 /* operands: a = b * i */
3859 rtx temp1
; /* b as SI */
3860 rtx scale
/* i as SI */;
3861 rtx temp2
; /* a*b as SI */
3863 temp1
= gen_reg_rtx (SImode
);
3864 temp2
= gen_reg_rtx (SImode
);
3865 if (GET_CODE (operands
[2]) != CONST_INT
)
3867 scale
= gen_reg_rtx (SImode
);
3868 emit_insn (gen_zero_extendpsisi2 (scale
, operands
[2]));
3871 scale
= copy_to_mode_reg (SImode
, operands
[2]);
3873 emit_insn (gen_zero_extendpsisi2 (temp1
, operands
[1]));
3874 temp2
= expand_simple_binop (SImode
, MULT
, temp1
, scale
, temp2
, 1, OPTAB_LIB
);
3875 emit_insn (gen_truncsipsi2 (operands
[0], temp2
));
3878 /* Pattern Output Functions */
3881 m32c_expand_movcc (rtx
*operands
)
3883 rtx rel
= operands
[1];
3885 if (GET_CODE (rel
) != EQ
&& GET_CODE (rel
) != NE
)
3887 if (GET_CODE (operands
[2]) != CONST_INT
3888 || GET_CODE (operands
[3]) != CONST_INT
)
3890 if (GET_CODE (rel
) == NE
)
3892 rtx tmp
= operands
[2];
3893 operands
[2] = operands
[3];
3895 rel
= gen_rtx_EQ (GET_MODE (rel
), XEXP (rel
, 0), XEXP (rel
, 1));
3898 emit_move_insn (operands
[0],
3899 gen_rtx_IF_THEN_ELSE (GET_MODE (operands
[0]),
3906 /* Used for the "insv" pattern. Return nonzero to fail, else done. */
3908 m32c_expand_insv (rtx
*operands
)
3913 if (INTVAL (operands
[1]) != 1)
3916 /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
3917 if (GET_CODE (operands
[3]) != CONST_INT
)
3919 if (INTVAL (operands
[3]) != 0
3920 && INTVAL (operands
[3]) != 1
3921 && INTVAL (operands
[3]) != -1)
3924 mask
= 1 << INTVAL (operands
[2]);
3927 if (GET_CODE (op0
) == SUBREG
3928 && SUBREG_BYTE (op0
) == 0)
3930 rtx sub
= SUBREG_REG (op0
);
3931 if (GET_MODE (sub
) == HImode
|| GET_MODE (sub
) == QImode
)
3935 if (!can_create_pseudo_p ()
3936 || (GET_CODE (op0
) == MEM
&& MEM_VOLATILE_P (op0
)))
3940 src0
= gen_reg_rtx (GET_MODE (op0
));
3941 emit_move_insn (src0
, op0
);
3944 if (GET_MODE (op0
) == HImode
3945 && INTVAL (operands
[2]) >= 8
3946 && GET_CODE (op0
) == MEM
)
3948 /* We are little endian. */
3949 rtx new_mem
= gen_rtx_MEM (QImode
, plus_constant (Pmode
,
3951 MEM_COPY_ATTRIBUTES (new_mem
, op0
);
3955 /* First, we generate a mask with the correct polarity. If we are
3956 storing a zero, we want an AND mask, so invert it. */
3957 if (INTVAL (operands
[3]) == 0)
3959 /* Storing a zero, use an AND mask */
3960 if (GET_MODE (op0
) == HImode
)
3965 /* Now we need to properly sign-extend the mask in case we need to
3966 fall back to an AND or OR opcode. */
3967 if (GET_MODE (op0
) == HImode
)
3978 switch ( (INTVAL (operands
[3]) ? 4 : 0)
3979 + ((GET_MODE (op0
) == HImode
) ? 2 : 0)
3980 + (TARGET_A24
? 1 : 0))
3982 case 0: p
= gen_andqi3_16 (op0
, src0
, GEN_INT (mask
)); break;
3983 case 1: p
= gen_andqi3_24 (op0
, src0
, GEN_INT (mask
)); break;
3984 case 2: p
= gen_andhi3_16 (op0
, src0
, GEN_INT (mask
)); break;
3985 case 3: p
= gen_andhi3_24 (op0
, src0
, GEN_INT (mask
)); break;
3986 case 4: p
= gen_iorqi3_16 (op0
, src0
, GEN_INT (mask
)); break;
3987 case 5: p
= gen_iorqi3_24 (op0
, src0
, GEN_INT (mask
)); break;
3988 case 6: p
= gen_iorhi3_16 (op0
, src0
, GEN_INT (mask
)); break;
3989 case 7: p
= gen_iorhi3_24 (op0
, src0
, GEN_INT (mask
)); break;
3990 default: p
= NULL_RTX
; break; /* Not reached, but silences a warning. */
3998 m32c_scc_pattern(rtx
*operands
, RTX_CODE code
)
4000 static char buf
[30];
4001 if (GET_CODE (operands
[0]) == REG
4002 && REGNO (operands
[0]) == R0_REGNO
)
4005 return "stzx\t#1,#0,r0l";
4007 return "stzx\t#0,#1,r0l";
4009 sprintf(buf
, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code
));
4013 /* Encode symbol attributes of a SYMBOL_REF into its
4014 SYMBOL_REF_FLAGS. */
4016 m32c_encode_section_info (tree decl
, rtx rtl
, int first
)
4018 int extra_flags
= 0;
4020 default_encode_section_info (decl
, rtl
, first
);
4021 if (TREE_CODE (decl
) == FUNCTION_DECL
4022 && m32c_special_page_vector_p (decl
))
4024 extra_flags
= SYMBOL_FLAG_FUNCVEC_FUNCTION
;
4027 SYMBOL_REF_FLAGS (XEXP (rtl
, 0)) |= extra_flags
;
4030 /* Returns TRUE if the current function is a leaf, and thus we can
4031 determine which registers an interrupt function really needs to
4032 save. The logic below is mostly about finding the insn sequence
4033 that's the function, versus any sequence that might be open for the
4036 m32c_leaf_function_p (void)
4038 rtx saved_first
, saved_last
;
4039 struct sequence_stack
*seq
;
4042 saved_first
= crtl
->emit
.x_first_insn
;
4043 saved_last
= crtl
->emit
.x_last_insn
;
4044 for (seq
= crtl
->emit
.sequence_stack
; seq
&& seq
->next
; seq
= seq
->next
)
4048 crtl
->emit
.x_first_insn
= seq
->first
;
4049 crtl
->emit
.x_last_insn
= seq
->last
;
4052 rv
= leaf_function_p ();
4054 crtl
->emit
.x_first_insn
= saved_first
;
4055 crtl
->emit
.x_last_insn
= saved_last
;
4059 /* Returns TRUE if the current function needs to use the ENTER/EXIT
4060 opcodes. If the function doesn't need the frame base or stack
4061 pointer, it can use the simpler RTS opcode. */
4063 m32c_function_needs_enter (void)
4066 struct sequence_stack
*seq
;
4067 rtx sp
= gen_rtx_REG (Pmode
, SP_REGNO
);
4068 rtx fb
= gen_rtx_REG (Pmode
, FB_REGNO
);
4070 insn
= get_insns ();
4071 for (seq
= crtl
->emit
.sequence_stack
;
4073 insn
= seq
->first
, seq
= seq
->next
);
4077 if (reg_mentioned_p (sp
, insn
))
4079 if (reg_mentioned_p (fb
, insn
))
4081 insn
= NEXT_INSN (insn
);
4086 /* Mark all the subexpressions of the PARALLEL rtx PAR as
4087 frame-related. Return PAR.
4089 dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
4090 PARALLEL rtx other than the first if they do not have the
4091 FRAME_RELATED flag set on them. So this function is handy for
4092 marking up 'enter' instructions. */
4094 m32c_all_frame_related (rtx par
)
4096 int len
= XVECLEN (par
, 0);
4099 for (i
= 0; i
< len
; i
++)
4100 F (XVECEXP (par
, 0, i
));
4105 /* Emits the prologue. See the frame layout comment earlier in this
4106 file. We can reserve up to 256 bytes with the ENTER opcode, beyond
4107 that we manually update sp. */
4109 m32c_emit_prologue (void)
4111 int frame_size
, extra_frame_size
= 0, reg_save_size
;
4112 int complex_prologue
= 0;
4114 cfun
->machine
->is_leaf
= m32c_leaf_function_p ();
4115 if (interrupt_p (cfun
->decl
))
4117 cfun
->machine
->is_interrupt
= 1;
4118 complex_prologue
= 1;
4120 else if (bank_switch_p (cfun
->decl
))
4121 warning (OPT_Wattributes
,
4122 "%<bank_switch%> has no effect on non-interrupt functions");
4124 reg_save_size
= m32c_pushm_popm (PP_justcount
);
4126 if (interrupt_p (cfun
->decl
))
4128 if (bank_switch_p (cfun
->decl
))
4129 emit_insn (gen_fset_b ());
4130 else if (cfun
->machine
->intr_pushm
)
4131 emit_insn (gen_pushm (GEN_INT (cfun
->machine
->intr_pushm
)));
4135 m32c_initial_elimination_offset (FB_REGNO
, SP_REGNO
) - reg_save_size
;
4137 && !m32c_function_needs_enter ())
4138 cfun
->machine
->use_rts
= 1;
4140 if (frame_size
> 254)
4142 extra_frame_size
= frame_size
- 254;
4145 if (cfun
->machine
->use_rts
== 0)
4146 F (emit_insn (m32c_all_frame_related
4148 ? gen_prologue_enter_16 (GEN_INT (frame_size
+ 2))
4149 : gen_prologue_enter_24 (GEN_INT (frame_size
+ 4)))));
4151 if (extra_frame_size
)
4153 complex_prologue
= 1;
4155 F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode
, SP_REGNO
),
4156 gen_rtx_REG (HImode
, SP_REGNO
),
4157 GEN_INT (-extra_frame_size
))));
4159 F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode
, SP_REGNO
),
4160 gen_rtx_REG (PSImode
, SP_REGNO
),
4161 GEN_INT (-extra_frame_size
))));
4164 complex_prologue
+= m32c_pushm_popm (PP_pushm
);
4166 /* This just emits a comment into the .s file for debugging. */
4167 if (complex_prologue
)
4168 emit_insn (gen_prologue_end ());
4171 /* Likewise, for the epilogue. The only exception is that, for
4172 interrupts, we must manually unwind the frame as the REIT opcode
4175 m32c_emit_epilogue (void)
4177 int popm_count
= m32c_pushm_popm (PP_justcount
);
4179 /* This just emits a comment into the .s file for debugging. */
4180 if (popm_count
> 0 || cfun
->machine
->is_interrupt
)
4181 emit_insn (gen_epilogue_start ());
4184 m32c_pushm_popm (PP_popm
);
4186 if (cfun
->machine
->is_interrupt
)
4188 enum machine_mode spmode
= TARGET_A16
? HImode
: PSImode
;
4190 /* REIT clears B flag and restores $fp for us, but we still
4191 have to fix up the stack. USE_RTS just means we didn't
4193 if (!cfun
->machine
->use_rts
)
4195 emit_move_insn (gen_rtx_REG (spmode
, A0_REGNO
),
4196 gen_rtx_REG (spmode
, FP_REGNO
));
4197 emit_move_insn (gen_rtx_REG (spmode
, SP_REGNO
),
4198 gen_rtx_REG (spmode
, A0_REGNO
));
4199 /* We can't just add this to the POPM because it would be in
4200 the wrong order, and wouldn't fix the stack if we're bank
4203 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode
, FP_REGNO
)));
4205 emit_insn (gen_poppsi (gen_rtx_REG (PSImode
, FP_REGNO
)));
4207 if (!bank_switch_p (cfun
->decl
) && cfun
->machine
->intr_pushm
)
4208 emit_insn (gen_popm (GEN_INT (cfun
->machine
->intr_pushm
)));
4210 /* The FREIT (Fast REturn from InTerrupt) instruction should be
4211 generated only for M32C/M32CM targets (generate the REIT
4212 instruction otherwise). */
4213 if (fast_interrupt_p (cfun
->decl
))
4215 /* Check if fast_attribute is set for M32C or M32CM. */
4218 emit_jump_insn (gen_epilogue_freit ());
4220 /* If fast_interrupt attribute is set for an R8C or M16C
4221 target ignore this attribute and generated REIT
4225 warning (OPT_Wattributes
,
4226 "%<fast_interrupt%> attribute directive ignored");
4227 emit_jump_insn (gen_epilogue_reit_16 ());
4230 else if (TARGET_A16
)
4231 emit_jump_insn (gen_epilogue_reit_16 ());
4233 emit_jump_insn (gen_epilogue_reit_24 ());
4235 else if (cfun
->machine
->use_rts
)
4236 emit_jump_insn (gen_epilogue_rts ());
4237 else if (TARGET_A16
)
4238 emit_jump_insn (gen_epilogue_exitd_16 ());
4240 emit_jump_insn (gen_epilogue_exitd_24 ());
4244 m32c_emit_eh_epilogue (rtx ret_addr
)
4246 /* R0[R2] has the stack adjustment. R1[R3] has the address to
4247 return to. We have to fudge the stack, pop everything, pop SP
4248 (fudged), and return (fudged). This is actually easier to do in
4249 assembler, so punt to libgcc. */
4250 emit_jump_insn (gen_eh_epilogue (ret_addr
, cfun
->machine
->eh_stack_adjust
));
4251 /* emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
4254 /* Indicate which flags must be properly set for a given conditional. */
4256 flags_needed_for_conditional (rtx cond
)
4258 switch (GET_CODE (cond
))
4282 /* Returns true if a compare insn is redundant because it would only
4283 set flags that are already set correctly. */
4285 m32c_compare_redundant (rtx cmp
, rtx
*operands
)
4299 fprintf(stderr
, "\n\033[32mm32c_compare_redundant\033[0m\n");
4303 fprintf(stderr
, "operands[%d] = ", i
);
4304 debug_rtx(operands
[i
]);
4308 next
= next_nonnote_insn (cmp
);
4309 if (!next
|| !INSN_P (next
))
4312 fprintf(stderr
, "compare not followed by insn\n");
4317 if (GET_CODE (PATTERN (next
)) == SET
4318 && GET_CODE (XEXP ( PATTERN (next
), 1)) == IF_THEN_ELSE
)
4320 next
= XEXP (XEXP (PATTERN (next
), 1), 0);
4322 else if (GET_CODE (PATTERN (next
)) == SET
)
4324 /* If this is a conditional, flags_needed will be something
4325 other than FLAGS_N, which we test below. */
4326 next
= XEXP (PATTERN (next
), 1);
4331 fprintf(stderr
, "compare not followed by conditional\n");
4337 fprintf(stderr
, "conditional is: ");
4341 flags_needed
= flags_needed_for_conditional (next
);
4342 if (flags_needed
== FLAGS_N
)
4345 fprintf(stderr
, "compare not followed by conditional\n");
4351 /* Compare doesn't set overflow and carry the same way that
4352 arithmetic instructions do, so we can't replace those. */
4353 if (flags_needed
& FLAGS_OC
)
4358 prev
= prev_nonnote_insn (prev
);
4362 fprintf(stderr
, "No previous insn.\n");
4369 fprintf(stderr
, "Previous insn is a non-insn.\n");
4373 pp
= PATTERN (prev
);
4374 if (GET_CODE (pp
) != SET
)
4377 fprintf(stderr
, "Previous insn is not a SET.\n");
4381 pflags
= get_attr_flags (prev
);
4383 /* Looking up attributes of previous insns corrupted the recog
4385 INSN_UID (cmp
) = -1;
4386 recog (PATTERN (cmp
), cmp
, 0);
4388 if (pflags
== FLAGS_N
4389 && reg_mentioned_p (op0
, pp
))
4392 fprintf(stderr
, "intermediate non-flags insn uses op:\n");
4398 /* Check for comparisons against memory - between volatiles and
4399 aliases, we just can't risk this one. */
4400 if (GET_CODE (operands
[0]) == MEM
4401 || GET_CODE (operands
[0]) == MEM
)
4404 fprintf(stderr
, "comparisons with memory:\n");
4410 /* Check for PREV changing a register that's used to compute a
4411 value in CMP, even if it doesn't otherwise change flags. */
4412 if (GET_CODE (operands
[0]) == REG
4413 && rtx_referenced_p (SET_DEST (PATTERN (prev
)), operands
[0]))
4416 fprintf(stderr
, "sub-value affected, op0:\n");
4421 if (GET_CODE (operands
[1]) == REG
4422 && rtx_referenced_p (SET_DEST (PATTERN (prev
)), operands
[1]))
4425 fprintf(stderr
, "sub-value affected, op1:\n");
4431 } while (pflags
== FLAGS_N
);
4433 fprintf(stderr
, "previous flag-setting insn:\n");
4438 if (GET_CODE (pp
) == SET
4439 && GET_CODE (XEXP (pp
, 0)) == REG
4440 && REGNO (XEXP (pp
, 0)) == FLG_REGNO
4441 && GET_CODE (XEXP (pp
, 1)) == COMPARE
)
4443 /* Adjacent cbranches must have the same operands to be
4445 rtx pop0
= XEXP (XEXP (pp
, 1), 0);
4446 rtx pop1
= XEXP (XEXP (pp
, 1), 1);
4448 fprintf(stderr
, "adjacent cbranches\n");
4452 if (rtx_equal_p (op0
, pop0
)
4453 && rtx_equal_p (op1
, pop1
))
4456 fprintf(stderr
, "prev cmp not same\n");
4461 /* Else the previous insn must be a SET, with either the source or
4462 dest equal to operands[0], and operands[1] must be zero. */
4464 if (!rtx_equal_p (op1
, const0_rtx
))
4467 fprintf(stderr
, "operands[1] not const0_rtx\n");
4471 if (GET_CODE (pp
) != SET
)
4474 fprintf (stderr
, "pp not set\n");
4478 if (!rtx_equal_p (op0
, SET_SRC (pp
))
4479 && !rtx_equal_p (op0
, SET_DEST (pp
)))
4482 fprintf(stderr
, "operands[0] not found in set\n");
4488 fprintf(stderr
, "cmp flags %x prev flags %x\n", flags_needed
, pflags
);
4490 if ((pflags
& flags_needed
) == flags_needed
)
4496 /* Return the pattern for a compare. This will be commented out if
4497 the compare is redundant, else a normal pattern is returned. Thus,
4498 the assembler output says where the compare would have been. */
4500 m32c_output_compare (rtx insn
, rtx
*operands
)
4502 static char templ
[] = ";cmp.b\t%1,%0";
4505 templ
[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands
[0]))];
4506 if (m32c_compare_redundant (insn
, operands
))
4509 fprintf(stderr
, "cbranch: cmp not needed\n");
4515 fprintf(stderr
, "cbranch: cmp needed: `%s'\n", templ
+ 1);
4520 #undef TARGET_ENCODE_SECTION_INFO
4521 #define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4523 /* If the frame pointer isn't used, we detect it manually. But the
4524 stack pointer doesn't have as flexible addressing as the frame
4525 pointer, so we always assume we have it. */
4527 #undef TARGET_FRAME_POINTER_REQUIRED
4528 #define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
4530 /* The Global `targetm' Variable. */
4532 struct gcc_target targetm
= TARGET_INITIALIZER
;
4534 #include "gt-m32c.h"