]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/m32c/m32c.c
sparc.h (GENERAL_OR_I64, [...]): Remove.
[thirdparty/gcc.git] / gcc / config / m32c / m32c.c
CommitLineData
38b2d076 1/* Target Code for R8C/M16C/M32C
96e45421 2 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011
38b2d076
DD
3 Free Software Foundation, Inc.
4 Contributed by Red Hat.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published
2f83c7d6 10 by the Free Software Foundation; either version 3, or (at your
38b2d076
DD
11 option) any later version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
16 License for more details.
17
18 You should have received a copy of the GNU General Public License
2f83c7d6
NC
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
38b2d076
DD
21
22#include "config.h"
23#include "system.h"
24#include "coretypes.h"
25#include "tm.h"
26#include "rtl.h"
27#include "regs.h"
28#include "hard-reg-set.h"
38b2d076
DD
29#include "insn-config.h"
30#include "conditions.h"
31#include "insn-flags.h"
32#include "output.h"
33#include "insn-attr.h"
34#include "flags.h"
35#include "recog.h"
36#include "reload.h"
718f9c0f 37#include "diagnostic-core.h"
38b2d076
DD
38#include "obstack.h"
39#include "tree.h"
40#include "expr.h"
41#include "optabs.h"
42#include "except.h"
43#include "function.h"
44#include "ggc.h"
45#include "target.h"
46#include "target-def.h"
47#include "tm_p.h"
48#include "langhooks.h"
726a989a 49#include "gimple.h"
fa9fd28a 50#include "df.h"
38b2d076
DD
51
52/* Prototypes */
53
54/* Used by m32c_pushm_popm. */
55typedef enum
56{
57 PP_pushm,
58 PP_popm,
59 PP_justcount
60} Push_Pop_Type;
61
65655f79 62static bool m32c_function_needs_enter (void);
38b2d076 63static tree interrupt_handler (tree *, tree, tree, int, bool *);
5abd2125 64static tree function_vector_handler (tree *, tree, tree, int, bool *);
38b2d076 65static int interrupt_p (tree node);
65655f79
DD
66static int bank_switch_p (tree node);
67static int fast_interrupt_p (tree node);
68static int interrupt_p (tree node);
38b2d076 69static bool m32c_asm_integer (rtx, unsigned int, int);
3101faab 70static int m32c_comp_type_attributes (const_tree, const_tree);
38b2d076
DD
71static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
72static struct machine_function *m32c_init_machine_status (void);
73static void m32c_insert_attributes (tree, tree *);
c6c3dba9 74static bool m32c_legitimate_address_p (enum machine_mode, rtx, bool);
5fd5d713 75static bool m32c_addr_space_legitimate_address_p (enum machine_mode, rtx, bool, addr_space_t);
444d6efe
JR
76static rtx m32c_function_arg (CUMULATIVE_ARGS *, enum machine_mode,
77 const_tree, bool);
38b2d076 78static bool m32c_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
586de218 79 const_tree, bool);
cd34bbe8
NF
80static void m32c_function_arg_advance (CUMULATIVE_ARGS *, enum machine_mode,
81 const_tree, bool);
c2ed6cf8 82static unsigned int m32c_function_arg_boundary (enum machine_mode, const_tree);
38b2d076
DD
83static int m32c_pushm_popm (Push_Pop_Type);
84static bool m32c_strict_argument_naming (CUMULATIVE_ARGS *);
85static rtx m32c_struct_value_rtx (tree, int);
86static rtx m32c_subreg (enum machine_mode, rtx, enum machine_mode, int);
87static int need_to_save (int);
2a31793e
AS
88static rtx m32c_function_value (const_tree, const_tree, bool);
89static rtx m32c_libcall_value (enum machine_mode, const_rtx);
90
f6052f86
DD
91/* Returns true if an address is specified, else false. */
92static bool m32c_get_pragma_address (const char *varname, unsigned *addr);
93
5abd2125 94#define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
38b2d076
DD
95
96#define streq(a,b) (strcmp ((a), (b)) == 0)
97
98/* Internal support routines */
99
100/* Debugging statements are tagged with DEBUG0 only so that they can
101 be easily enabled individually, by replacing the '0' with '1' as
102 needed. */
103#define DEBUG0 0
104#define DEBUG1 1
105
106#if DEBUG0
107/* This is needed by some of the commented-out debug statements
108 below. */
109static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
110#endif
111static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
112
113/* These are all to support encode_pattern(). */
114static char pattern[30], *patternp;
115static GTY(()) rtx patternr[30];
116#define RTX_IS(x) (streq (pattern, x))
117
118/* Some macros to simplify the logic throughout this file. */
119#define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
120#define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
121
122#define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
123#define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
124
5fd5d713
DD
125static int
126far_addr_space_p (rtx x)
127{
128 if (GET_CODE (x) != MEM)
129 return 0;
130#if DEBUG0
131 fprintf(stderr, "\033[35mfar_addr_space: "); debug_rtx(x);
132 fprintf(stderr, " = %d\033[0m\n", MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR);
133#endif
134 return MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR;
135}
136
38b2d076
DD
137/* We do most RTX matching by converting the RTX into a string, and
138 using string compares. This vastly simplifies the logic in many of
139 the functions in this file.
140
141 On exit, pattern[] has the encoded string (use RTX_IS("...") to
142 compare it) and patternr[] has pointers to the nodes in the RTX
143 corresponding to each character in the encoded string. The latter
144 is mostly used by print_operand().
145
146 Unrecognized patterns have '?' in them; this shows up when the
147 assembler complains about syntax errors.
148*/
149
150static void
151encode_pattern_1 (rtx x)
152{
153 int i;
154
155 if (patternp == pattern + sizeof (pattern) - 2)
156 {
157 patternp[-1] = '?';
158 return;
159 }
160
161 patternr[patternp - pattern] = x;
162
163 switch (GET_CODE (x))
164 {
165 case REG:
166 *patternp++ = 'r';
167 break;
168 case SUBREG:
169 if (GET_MODE_SIZE (GET_MODE (x)) !=
170 GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
171 *patternp++ = 'S';
172 encode_pattern_1 (XEXP (x, 0));
173 break;
174 case MEM:
175 *patternp++ = 'm';
176 case CONST:
177 encode_pattern_1 (XEXP (x, 0));
178 break;
5fd5d713
DD
179 case SIGN_EXTEND:
180 *patternp++ = '^';
181 *patternp++ = 'S';
182 encode_pattern_1 (XEXP (x, 0));
183 break;
184 case ZERO_EXTEND:
185 *patternp++ = '^';
186 *patternp++ = 'Z';
187 encode_pattern_1 (XEXP (x, 0));
188 break;
38b2d076
DD
189 case PLUS:
190 *patternp++ = '+';
191 encode_pattern_1 (XEXP (x, 0));
192 encode_pattern_1 (XEXP (x, 1));
193 break;
194 case PRE_DEC:
195 *patternp++ = '>';
196 encode_pattern_1 (XEXP (x, 0));
197 break;
198 case POST_INC:
199 *patternp++ = '<';
200 encode_pattern_1 (XEXP (x, 0));
201 break;
202 case LO_SUM:
203 *patternp++ = 'L';
204 encode_pattern_1 (XEXP (x, 0));
205 encode_pattern_1 (XEXP (x, 1));
206 break;
207 case HIGH:
208 *patternp++ = 'H';
209 encode_pattern_1 (XEXP (x, 0));
210 break;
211 case SYMBOL_REF:
212 *patternp++ = 's';
213 break;
214 case LABEL_REF:
215 *patternp++ = 'l';
216 break;
217 case CODE_LABEL:
218 *patternp++ = 'c';
219 break;
220 case CONST_INT:
221 case CONST_DOUBLE:
222 *patternp++ = 'i';
223 break;
224 case UNSPEC:
225 *patternp++ = 'u';
226 *patternp++ = '0' + XCINT (x, 1, UNSPEC);
227 for (i = 0; i < XVECLEN (x, 0); i++)
228 encode_pattern_1 (XVECEXP (x, 0, i));
229 break;
230 case USE:
231 *patternp++ = 'U';
232 break;
233 case PARALLEL:
234 *patternp++ = '|';
235 for (i = 0; i < XVECLEN (x, 0); i++)
236 encode_pattern_1 (XVECEXP (x, 0, i));
237 break;
238 case EXPR_LIST:
239 *patternp++ = 'E';
240 encode_pattern_1 (XEXP (x, 0));
241 if (XEXP (x, 1))
242 encode_pattern_1 (XEXP (x, 1));
243 break;
244 default:
245 *patternp++ = '?';
246#if DEBUG0
247 fprintf (stderr, "can't encode pattern %s\n",
248 GET_RTX_NAME (GET_CODE (x)));
249 debug_rtx (x);
250 gcc_unreachable ();
251#endif
252 break;
253 }
254}
255
256static void
257encode_pattern (rtx x)
258{
259 patternp = pattern;
260 encode_pattern_1 (x);
261 *patternp = 0;
262}
263
264/* Since register names indicate the mode they're used in, we need a
265 way to determine which name to refer to the register with. Called
266 by print_operand(). */
267
268static const char *
269reg_name_with_mode (int regno, enum machine_mode mode)
270{
271 int mlen = GET_MODE_SIZE (mode);
272 if (regno == R0_REGNO && mlen == 1)
273 return "r0l";
274 if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
275 return "r2r0";
276 if (regno == R0_REGNO && mlen == 6)
277 return "r2r1r0";
278 if (regno == R0_REGNO && mlen == 8)
279 return "r3r1r2r0";
280 if (regno == R1_REGNO && mlen == 1)
281 return "r1l";
282 if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
283 return "r3r1";
284 if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
285 return "a1a0";
286 return reg_names[regno];
287}
288
289/* How many bytes a register uses on stack when it's pushed. We need
290 to know this because the push opcode needs to explicitly indicate
291 the size of the register, even though the name of the register
292 already tells it that. Used by m32c_output_reg_{push,pop}, which
293 is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}. */
294
295static int
296reg_push_size (int regno)
297{
298 switch (regno)
299 {
300 case R0_REGNO:
301 case R1_REGNO:
302 return 2;
303 case R2_REGNO:
304 case R3_REGNO:
305 case FLG_REGNO:
306 return 2;
307 case A0_REGNO:
308 case A1_REGNO:
309 case SB_REGNO:
310 case FB_REGNO:
311 case SP_REGNO:
312 if (TARGET_A16)
313 return 2;
314 else
315 return 3;
316 default:
317 gcc_unreachable ();
318 }
319}
320
321static int *class_sizes = 0;
322
323/* Given two register classes, find the largest intersection between
324 them. If there is no intersection, return RETURNED_IF_EMPTY
325 instead. */
326static int
327reduce_class (int original_class, int limiting_class, int returned_if_empty)
328{
329 int cc = class_contents[original_class][0];
330 int i, best = NO_REGS;
331 int best_size = 0;
332
333 if (original_class == limiting_class)
334 return original_class;
335
336 if (!class_sizes)
337 {
338 int r;
339 class_sizes = (int *) xmalloc (LIM_REG_CLASSES * sizeof (int));
340 for (i = 0; i < LIM_REG_CLASSES; i++)
341 {
342 class_sizes[i] = 0;
343 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
344 if (class_contents[i][0] & (1 << r))
345 class_sizes[i]++;
346 }
347 }
348
349 cc &= class_contents[limiting_class][0];
350 for (i = 0; i < LIM_REG_CLASSES; i++)
351 {
352 int ic = class_contents[i][0];
353
354 if ((~cc & ic) == 0)
355 if (best_size < class_sizes[i])
356 {
357 best = i;
358 best_size = class_sizes[i];
359 }
360
361 }
362 if (best == NO_REGS)
363 return returned_if_empty;
364 return best;
365}
366
38b2d076
DD
367/* Used by m32c_register_move_cost to determine if a move is
368 impossibly expensive. */
0e607518
AS
369static bool
370class_can_hold_mode (reg_class_t rclass, enum machine_mode mode)
38b2d076
DD
371{
372 /* Cache the results: 0=untested 1=no 2=yes */
373 static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
0e607518
AS
374
375 if (results[(int) rclass][mode] == 0)
38b2d076 376 {
0e607518 377 int r;
0a2aaacc 378 results[rclass][mode] = 1;
38b2d076 379 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
0e607518 380 if (in_hard_reg_set_p (reg_class_contents[(int) rclass], mode, r)
38b2d076
DD
381 && HARD_REGNO_MODE_OK (r, mode))
382 {
0e607518
AS
383 results[rclass][mode] = 2;
384 break;
38b2d076
DD
385 }
386 }
0e607518 387
38b2d076
DD
388#if DEBUG0
389 fprintf (stderr, "class %s can hold %s? %s\n",
0e607518 390 class_names[(int) rclass], mode_name[mode],
0a2aaacc 391 (results[rclass][mode] == 2) ? "yes" : "no");
38b2d076 392#endif
0e607518 393 return results[(int) rclass][mode] == 2;
38b2d076
DD
394}
395
396/* Run-time Target Specification. */
397
398/* Memregs are memory locations that gcc treats like general
399 registers, as there are a limited number of true registers and the
400 m32c families can use memory in most places that registers can be
401 used.
402
403 However, since memory accesses are more expensive than registers,
404 we allow the user to limit the number of memregs available, in
405 order to try to persuade gcc to try harder to use real registers.
406
407 Memregs are provided by m32c-lib1.S.
408*/
409
38b2d076
DD
410int ok_to_change_target_memregs = TRUE;
411
f28f2337
AS
412/* Implements TARGET_OPTION_OVERRIDE. */
413
414#undef TARGET_OPTION_OVERRIDE
415#define TARGET_OPTION_OVERRIDE m32c_option_override
416
417static void
418m32c_option_override (void)
38b2d076 419{
f28f2337 420 /* We limit memregs to 0..16, and provide a default. */
bbfc9a8c 421 if (global_options_set.x_target_memregs)
38b2d076
DD
422 {
423 if (target_memregs < 0 || target_memregs > 16)
424 error ("invalid target memregs value '%d'", target_memregs);
425 }
426 else
07127a0a 427 target_memregs = 16;
18b80268
DD
428
429 if (TARGET_A24)
430 flag_ivopts = 0;
0685e770
DD
431
432 /* This target defaults to strict volatile bitfields. */
433 if (flag_strict_volatile_bitfields < 0)
434 flag_strict_volatile_bitfields = 1;
d123bf41
DD
435
436 /* r8c/m16c have no 16-bit indirect call, so thunks are involved.
437 This is always worse than an absolute call. */
438 if (TARGET_A16)
439 flag_no_function_cse = 1;
a4403164
DD
440
441 /* This wants to put insns between compares and their jumps. */
442 /* FIXME: The right solution is to properly trace the flags register
443 values, but that is too much work for stage 4. */
444 flag_combine_stack_adjustments = 0;
d123bf41
DD
445}
446
447#undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
448#define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m32c_override_options_after_change
449
450static void
451m32c_override_options_after_change (void)
452{
453 if (TARGET_A16)
454 flag_no_function_cse = 1;
38b2d076
DD
455}
456
457/* Defining data structures for per-function information */
458
459/* The usual; we set up our machine_function data. */
460static struct machine_function *
461m32c_init_machine_status (void)
462{
a9429e29 463 return ggc_alloc_cleared_machine_function ();
38b2d076
DD
464}
465
466/* Implements INIT_EXPANDERS. We just set up to call the above
467 function. */
468void
469m32c_init_expanders (void)
470{
471 init_machine_status = m32c_init_machine_status;
472}
473
474/* Storage Layout */
475
38b2d076
DD
476/* Register Basics */
477
478/* Basic Characteristics of Registers */
479
480/* Whether a mode fits in a register is complex enough to warrant a
481 table. */
482static struct
483{
484 char qi_regs;
485 char hi_regs;
486 char pi_regs;
487 char si_regs;
488 char di_regs;
489} nregs_table[FIRST_PSEUDO_REGISTER] =
490{
491 { 1, 1, 2, 2, 4 }, /* r0 */
492 { 0, 1, 0, 0, 0 }, /* r2 */
493 { 1, 1, 2, 2, 0 }, /* r1 */
494 { 0, 1, 0, 0, 0 }, /* r3 */
495 { 0, 1, 1, 0, 0 }, /* a0 */
496 { 0, 1, 1, 0, 0 }, /* a1 */
497 { 0, 1, 1, 0, 0 }, /* sb */
498 { 0, 1, 1, 0, 0 }, /* fb */
499 { 0, 1, 1, 0, 0 }, /* sp */
500 { 1, 1, 1, 0, 0 }, /* pc */
501 { 0, 0, 0, 0, 0 }, /* fl */
502 { 1, 1, 1, 0, 0 }, /* ap */
503 { 1, 1, 2, 2, 4 }, /* mem0 */
504 { 1, 1, 2, 2, 4 }, /* mem1 */
505 { 1, 1, 2, 2, 4 }, /* mem2 */
506 { 1, 1, 2, 2, 4 }, /* mem3 */
507 { 1, 1, 2, 2, 4 }, /* mem4 */
508 { 1, 1, 2, 2, 0 }, /* mem5 */
509 { 1, 1, 2, 2, 0 }, /* mem6 */
510 { 1, 1, 0, 0, 0 }, /* mem7 */
511};
512
5efd84c5
NF
513/* Implements TARGET_CONDITIONAL_REGISTER_USAGE. We adjust the number
514 of available memregs, and select which registers need to be preserved
38b2d076
DD
515 across calls based on the chip family. */
516
5efd84c5
NF
517#undef TARGET_CONDITIONAL_REGISTER_USAGE
518#define TARGET_CONDITIONAL_REGISTER_USAGE m32c_conditional_register_usage
d6d17ae7 519void
38b2d076
DD
520m32c_conditional_register_usage (void)
521{
38b2d076
DD
522 int i;
523
524 if (0 <= target_memregs && target_memregs <= 16)
525 {
526 /* The command line option is bytes, but our "registers" are
527 16-bit words. */
65655f79 528 for (i = (target_memregs+1)/2; i < 8; i++)
38b2d076
DD
529 {
530 fixed_regs[MEM0_REGNO + i] = 1;
531 CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
532 }
533 }
534
535 /* M32CM and M32C preserve more registers across function calls. */
536 if (TARGET_A24)
537 {
538 call_used_regs[R1_REGNO] = 0;
539 call_used_regs[R2_REGNO] = 0;
540 call_used_regs[R3_REGNO] = 0;
541 call_used_regs[A0_REGNO] = 0;
542 call_used_regs[A1_REGNO] = 0;
543 }
544}
545
546/* How Values Fit in Registers */
547
548/* Implements HARD_REGNO_NREGS. This is complicated by the fact that
549 different registers are different sizes from each other, *and* may
550 be different sizes in different chip families. */
b8a669d0
DD
551static int
552m32c_hard_regno_nregs_1 (int regno, enum machine_mode mode)
38b2d076
DD
553{
554 if (regno == FLG_REGNO && mode == CCmode)
555 return 1;
556 if (regno >= FIRST_PSEUDO_REGISTER)
557 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
558
559 if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
560 return (GET_MODE_SIZE (mode) + 1) / 2;
561
562 if (GET_MODE_SIZE (mode) <= 1)
563 return nregs_table[regno].qi_regs;
564 if (GET_MODE_SIZE (mode) <= 2)
565 return nregs_table[regno].hi_regs;
5fd5d713 566 if (regno == A0_REGNO && mode == SImode && TARGET_A16)
38b2d076
DD
567 return 2;
568 if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
569 return nregs_table[regno].pi_regs;
570 if (GET_MODE_SIZE (mode) <= 4)
571 return nregs_table[regno].si_regs;
572 if (GET_MODE_SIZE (mode) <= 8)
573 return nregs_table[regno].di_regs;
574 return 0;
575}
576
b8a669d0
DD
577int
578m32c_hard_regno_nregs (int regno, enum machine_mode mode)
579{
580 int rv = m32c_hard_regno_nregs_1 (regno, mode);
581 return rv ? rv : 1;
582}
583
38b2d076
DD
584/* Implements HARD_REGNO_MODE_OK. The above function does the work
585 already; just test its return value. */
586int
587m32c_hard_regno_ok (int regno, enum machine_mode mode)
588{
b8a669d0 589 return m32c_hard_regno_nregs_1 (regno, mode) != 0;
38b2d076
DD
590}
591
592/* Implements MODES_TIEABLE_P. In general, modes aren't tieable since
593 registers are all different sizes. However, since most modes are
594 bigger than our registers anyway, it's easier to implement this
595 function that way, leaving QImode as the only unique case. */
596int
597m32c_modes_tieable_p (enum machine_mode m1, enum machine_mode m2)
598{
599 if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
600 return 1;
601
07127a0a 602#if 0
38b2d076
DD
603 if (m1 == QImode || m2 == QImode)
604 return 0;
07127a0a 605#endif
38b2d076
DD
606
607 return 1;
608}
609
610/* Register Classes */
611
612/* Implements REGNO_REG_CLASS. */
444d6efe 613enum reg_class
38b2d076
DD
614m32c_regno_reg_class (int regno)
615{
616 switch (regno)
617 {
618 case R0_REGNO:
619 return R0_REGS;
620 case R1_REGNO:
621 return R1_REGS;
622 case R2_REGNO:
623 return R2_REGS;
624 case R3_REGNO:
625 return R3_REGS;
626 case A0_REGNO:
22843acd 627 return A0_REGS;
38b2d076 628 case A1_REGNO:
22843acd 629 return A1_REGS;
38b2d076
DD
630 case SB_REGNO:
631 return SB_REGS;
632 case FB_REGNO:
633 return FB_REGS;
634 case SP_REGNO:
635 return SP_REGS;
636 case FLG_REGNO:
637 return FLG_REGS;
638 default:
639 if (IS_MEM_REGNO (regno))
640 return MEM_REGS;
641 return ALL_REGS;
642 }
643}
644
645/* Implements REG_CLASS_FROM_CONSTRAINT. Note that some constraints only match
646 for certain chip families. */
647int
648m32c_reg_class_from_constraint (char c ATTRIBUTE_UNUSED, const char *s)
649{
650 if (memcmp (s, "Rsp", 3) == 0)
651 return SP_REGS;
652 if (memcmp (s, "Rfb", 3) == 0)
653 return FB_REGS;
654 if (memcmp (s, "Rsb", 3) == 0)
655 return SB_REGS;
07127a0a
DD
656 if (memcmp (s, "Rcr", 3) == 0)
657 return TARGET_A16 ? CR_REGS : NO_REGS;
658 if (memcmp (s, "Rcl", 3) == 0)
659 return TARGET_A24 ? CR_REGS : NO_REGS;
38b2d076
DD
660 if (memcmp (s, "R0w", 3) == 0)
661 return R0_REGS;
662 if (memcmp (s, "R1w", 3) == 0)
663 return R1_REGS;
664 if (memcmp (s, "R2w", 3) == 0)
665 return R2_REGS;
666 if (memcmp (s, "R3w", 3) == 0)
667 return R3_REGS;
668 if (memcmp (s, "R02", 3) == 0)
669 return R02_REGS;
18b80268
DD
670 if (memcmp (s, "R13", 3) == 0)
671 return R13_REGS;
38b2d076
DD
672 if (memcmp (s, "R03", 3) == 0)
673 return R03_REGS;
674 if (memcmp (s, "Rdi", 3) == 0)
675 return DI_REGS;
676 if (memcmp (s, "Rhl", 3) == 0)
677 return HL_REGS;
678 if (memcmp (s, "R23", 3) == 0)
679 return R23_REGS;
07127a0a
DD
680 if (memcmp (s, "Ra0", 3) == 0)
681 return A0_REGS;
682 if (memcmp (s, "Ra1", 3) == 0)
683 return A1_REGS;
38b2d076
DD
684 if (memcmp (s, "Raa", 3) == 0)
685 return A_REGS;
07127a0a
DD
686 if (memcmp (s, "Raw", 3) == 0)
687 return TARGET_A16 ? A_REGS : NO_REGS;
688 if (memcmp (s, "Ral", 3) == 0)
689 return TARGET_A24 ? A_REGS : NO_REGS;
38b2d076
DD
690 if (memcmp (s, "Rqi", 3) == 0)
691 return QI_REGS;
692 if (memcmp (s, "Rad", 3) == 0)
693 return AD_REGS;
694 if (memcmp (s, "Rsi", 3) == 0)
695 return SI_REGS;
696 if (memcmp (s, "Rhi", 3) == 0)
697 return HI_REGS;
698 if (memcmp (s, "Rhc", 3) == 0)
699 return HC_REGS;
700 if (memcmp (s, "Rra", 3) == 0)
701 return RA_REGS;
702 if (memcmp (s, "Rfl", 3) == 0)
703 return FLG_REGS;
704 if (memcmp (s, "Rmm", 3) == 0)
705 {
706 if (fixed_regs[MEM0_REGNO])
707 return NO_REGS;
708 return MEM_REGS;
709 }
710
711 /* PSImode registers - i.e. whatever can hold a pointer. */
712 if (memcmp (s, "Rpi", 3) == 0)
713 {
714 if (TARGET_A16)
715 return HI_REGS;
716 else
717 return RA_REGS; /* r2r0 and r3r1 can hold pointers. */
718 }
719
720 /* We handle this one as an EXTRA_CONSTRAINT. */
721 if (memcmp (s, "Rpa", 3) == 0)
722 return NO_REGS;
723
07127a0a
DD
724 if (*s == 'R')
725 {
726 fprintf(stderr, "unrecognized R constraint: %.3s\n", s);
727 gcc_unreachable();
728 }
729
38b2d076
DD
730 return NO_REGS;
731}
732
733/* Implements REGNO_OK_FOR_BASE_P. */
734int
735m32c_regno_ok_for_base_p (int regno)
736{
737 if (regno == A0_REGNO
738 || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
739 return 1;
740 return 0;
741}
742
743#define DEBUG_RELOAD 0
744
745/* Implements PREFERRED_RELOAD_CLASS. In general, prefer general
746 registers of the appropriate size. */
747int
748m32c_preferred_reload_class (rtx x, int rclass)
749{
750 int newclass = rclass;
751
752#if DEBUG_RELOAD
753 fprintf (stderr, "\npreferred_reload_class for %s is ",
754 class_names[rclass]);
755#endif
756 if (rclass == NO_REGS)
757 rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
758
0e607518 759 if (reg_classes_intersect_p (rclass, CR_REGS))
38b2d076
DD
760 {
761 switch (GET_MODE (x))
762 {
763 case QImode:
764 newclass = HL_REGS;
765 break;
766 default:
767 /* newclass = HI_REGS; */
768 break;
769 }
770 }
771
772 else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
773 newclass = SI_REGS;
774 else if (GET_MODE_SIZE (GET_MODE (x)) > 4
775 && ~class_contents[rclass][0] & 0x000f)
776 newclass = DI_REGS;
777
778 rclass = reduce_class (rclass, newclass, rclass);
779
780 if (GET_MODE (x) == QImode)
781 rclass = reduce_class (rclass, HL_REGS, rclass);
782
783#if DEBUG_RELOAD
784 fprintf (stderr, "%s\n", class_names[rclass]);
785 debug_rtx (x);
786
787 if (GET_CODE (x) == MEM
788 && GET_CODE (XEXP (x, 0)) == PLUS
789 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
790 fprintf (stderr, "Glorm!\n");
791#endif
792 return rclass;
793}
794
795/* Implements PREFERRED_OUTPUT_RELOAD_CLASS. */
796int
797m32c_preferred_output_reload_class (rtx x, int rclass)
798{
799 return m32c_preferred_reload_class (x, rclass);
800}
801
802/* Implements LIMIT_RELOAD_CLASS. We basically want to avoid using
803 address registers for reloads since they're needed for address
804 reloads. */
805int
806m32c_limit_reload_class (enum machine_mode mode, int rclass)
807{
808#if DEBUG_RELOAD
809 fprintf (stderr, "limit_reload_class for %s: %s ->",
810 mode_name[mode], class_names[rclass]);
811#endif
812
813 if (mode == QImode)
814 rclass = reduce_class (rclass, HL_REGS, rclass);
815 else if (mode == HImode)
816 rclass = reduce_class (rclass, HI_REGS, rclass);
817 else if (mode == SImode)
818 rclass = reduce_class (rclass, SI_REGS, rclass);
819
820 if (rclass != A_REGS)
821 rclass = reduce_class (rclass, DI_REGS, rclass);
822
823#if DEBUG_RELOAD
824 fprintf (stderr, " %s\n", class_names[rclass]);
825#endif
826 return rclass;
827}
828
829/* Implements SECONDARY_RELOAD_CLASS. QImode have to be reloaded in
830 r0 or r1, as those are the only real QImode registers. CR regs get
831 reloaded through appropriately sized general or address
832 registers. */
833int
834m32c_secondary_reload_class (int rclass, enum machine_mode mode, rtx x)
835{
836 int cc = class_contents[rclass][0];
837#if DEBUG0
838 fprintf (stderr, "\nsecondary reload class %s %s\n",
839 class_names[rclass], mode_name[mode]);
840 debug_rtx (x);
841#endif
842 if (mode == QImode
843 && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
844 return QI_REGS;
0e607518 845 if (reg_classes_intersect_p (rclass, CR_REGS)
38b2d076
DD
846 && GET_CODE (x) == REG
847 && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
848 return TARGET_A16 ? HI_REGS : A_REGS;
849 return NO_REGS;
850}
851
184866c5 852/* Implements TARGET_CLASS_LIKELY_SPILLED_P. A_REGS is needed for address
38b2d076 853 reloads. */
184866c5
AS
854
855#undef TARGET_CLASS_LIKELY_SPILLED_P
856#define TARGET_CLASS_LIKELY_SPILLED_P m32c_class_likely_spilled_p
857
858static bool
859m32c_class_likely_spilled_p (reg_class_t regclass)
38b2d076
DD
860{
861 if (regclass == A_REGS)
184866c5
AS
862 return true;
863
864 return (reg_class_size[(int) regclass] == 1);
38b2d076
DD
865}
866
867/* Implements CLASS_MAX_NREGS. We calculate this according to its
868 documented meaning, to avoid potential inconsistencies with actual
869 class definitions. */
870int
871m32c_class_max_nregs (int regclass, enum machine_mode mode)
872{
873 int rn, max = 0;
874
875 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
876 if (class_contents[regclass][0] & (1 << rn))
877 {
878 int n = m32c_hard_regno_nregs (rn, mode);
879 if (max < n)
880 max = n;
881 }
882 return max;
883}
884
885/* Implements CANNOT_CHANGE_MODE_CLASS. Only r0 and r1 can change to
886 QI (r0l, r1l) because the chip doesn't support QI ops on other
887 registers (well, it does on a0/a1 but if we let gcc do that, reload
888 suffers). Otherwise, we allow changes to larger modes. */
889int
890m32c_cannot_change_mode_class (enum machine_mode from,
891 enum machine_mode to, int rclass)
892{
db9c8397 893 int rn;
38b2d076
DD
894#if DEBUG0
895 fprintf (stderr, "cannot change from %s to %s in %s\n",
896 mode_name[from], mode_name[to], class_names[rclass]);
897#endif
898
db9c8397
DD
899 /* If the larger mode isn't allowed in any of these registers, we
900 can't allow the change. */
901 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
902 if (class_contents[rclass][0] & (1 << rn))
903 if (! m32c_hard_regno_ok (rn, to))
904 return 1;
905
38b2d076
DD
906 if (to == QImode)
907 return (class_contents[rclass][0] & 0x1ffa);
908
909 if (class_contents[rclass][0] & 0x0005 /* r0, r1 */
910 && GET_MODE_SIZE (from) > 1)
911 return 0;
912 if (GET_MODE_SIZE (from) > 2) /* all other regs */
913 return 0;
914
915 return 1;
916}
917
918/* Helpers for the rest of the file. */
919/* TRUE if the rtx is a REG rtx for the given register. */
920#define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
921 && REGNO (rtx) == regno)
922/* TRUE if the rtx is a pseudo - specifically, one we can use as a
923 base register in address calculations (hence the "strict"
924 argument). */
925#define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
926 && (REGNO (rtx) == AP_REGNO \
927 || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
928
929/* Implements CONST_OK_FOR_CONSTRAINT_P. Currently, all constant
930 constraints start with 'I', with the next two characters indicating
931 the type and size of the range allowed. */
932int
933m32c_const_ok_for_constraint_p (HOST_WIDE_INT value,
934 char c ATTRIBUTE_UNUSED, const char *str)
935{
936 /* s=signed u=unsigned n=nonzero m=minus l=log2able,
937 [sun] bits [SUN] bytes, p=pointer size
938 I[-0-9][0-9] matches that number */
939 if (memcmp (str, "Is3", 3) == 0)
940 {
941 return (-8 <= value && value <= 7);
942 }
943 if (memcmp (str, "IS1", 3) == 0)
944 {
945 return (-128 <= value && value <= 127);
946 }
947 if (memcmp (str, "IS2", 3) == 0)
948 {
949 return (-32768 <= value && value <= 32767);
950 }
951 if (memcmp (str, "IU2", 3) == 0)
952 {
953 return (0 <= value && value <= 65535);
954 }
955 if (memcmp (str, "IU3", 3) == 0)
956 {
957 return (0 <= value && value <= 0x00ffffff);
958 }
959 if (memcmp (str, "In4", 3) == 0)
960 {
961 return (-8 <= value && value && value <= 8);
962 }
963 if (memcmp (str, "In5", 3) == 0)
964 {
965 return (-16 <= value && value && value <= 16);
966 }
23fed240
DD
967 if (memcmp (str, "In6", 3) == 0)
968 {
969 return (-32 <= value && value && value <= 32);
970 }
38b2d076
DD
971 if (memcmp (str, "IM2", 3) == 0)
972 {
973 return (-65536 <= value && value && value <= -1);
974 }
975 if (memcmp (str, "Ilb", 3) == 0)
976 {
977 int b = exact_log2 (value);
8e4edce7 978 return (b >= 0 && b <= 7);
38b2d076 979 }
07127a0a
DD
980 if (memcmp (str, "Imb", 3) == 0)
981 {
982 int b = exact_log2 ((value ^ 0xff) & 0xff);
8e4edce7 983 return (b >= 0 && b <= 7);
07127a0a 984 }
600e668e
DD
985 if (memcmp (str, "ImB", 3) == 0)
986 {
987 int b = exact_log2 ((value ^ 0xffff) & 0xffff);
988 return (b >= 0 && b <= 7);
989 }
38b2d076
DD
990 if (memcmp (str, "Ilw", 3) == 0)
991 {
992 int b = exact_log2 (value);
8e4edce7 993 return (b >= 0 && b <= 15);
38b2d076 994 }
07127a0a
DD
995 if (memcmp (str, "Imw", 3) == 0)
996 {
997 int b = exact_log2 ((value ^ 0xffff) & 0xffff);
8e4edce7 998 return (b >= 0 && b <= 15);
07127a0a
DD
999 }
1000 if (memcmp (str, "I00", 3) == 0)
1001 {
1002 return (value == 0);
1003 }
38b2d076
DD
1004 return 0;
1005}
1006
5fd5d713
DD
1007#define A0_OR_PSEUDO(x) (IS_REG(x, A0_REGNO) || REGNO (x) >= FIRST_PSEUDO_REGISTER)
1008
38b2d076
DD
1009/* Implements EXTRA_CONSTRAINT_STR (see next function too). 'S' is
1010 for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
1011 call return values. */
1012int
1013m32c_extra_constraint_p2 (rtx value, char c ATTRIBUTE_UNUSED, const char *str)
1014{
1015 encode_pattern (value);
5fd5d713
DD
1016
1017 if (far_addr_space_p (value))
1018 {
1019 if (memcmp (str, "SF", 2) == 0)
1020 {
1021 return ( (RTX_IS ("mr")
1022 && A0_OR_PSEUDO (patternr[1])
1023 && GET_MODE (patternr[1]) == SImode)
1024 || (RTX_IS ("m+^Sri")
1025 && A0_OR_PSEUDO (patternr[4])
1026 && GET_MODE (patternr[4]) == HImode)
1027 || (RTX_IS ("m+^Srs")
1028 && A0_OR_PSEUDO (patternr[4])
1029 && GET_MODE (patternr[4]) == HImode)
1030 || (RTX_IS ("m+^S+ris")
1031 && A0_OR_PSEUDO (patternr[5])
1032 && GET_MODE (patternr[5]) == HImode)
1033 || RTX_IS ("ms")
1034 );
1035 }
1036 return 0;
1037 }
1038
38b2d076
DD
1039 if (memcmp (str, "Sd", 2) == 0)
1040 {
1041 /* This is the common "src/dest" address */
1042 rtx r;
1043 if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
1044 return 1;
1045 if (RTX_IS ("ms") || RTX_IS ("m+si"))
1046 return 1;
07127a0a
DD
1047 if (RTX_IS ("m++rii"))
1048 {
1049 if (REGNO (patternr[3]) == FB_REGNO
1050 && INTVAL (patternr[4]) == 0)
1051 return 1;
1052 }
38b2d076
DD
1053 if (RTX_IS ("mr"))
1054 r = patternr[1];
1055 else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
1056 r = patternr[2];
1057 else
1058 return 0;
1059 if (REGNO (r) == SP_REGNO)
1060 return 0;
1061 return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
1062 }
1063 else if (memcmp (str, "Sa", 2) == 0)
1064 {
1065 rtx r;
1066 if (RTX_IS ("mr"))
1067 r = patternr[1];
1068 else if (RTX_IS ("m+ri"))
1069 r = patternr[2];
1070 else
1071 return 0;
1072 return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
1073 }
1074 else if (memcmp (str, "Si", 2) == 0)
1075 {
1076 return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
1077 }
1078 else if (memcmp (str, "Ss", 2) == 0)
1079 {
1080 return ((RTX_IS ("mr")
1081 && (IS_REG (patternr[1], SP_REGNO)))
1082 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
1083 }
1084 else if (memcmp (str, "Sf", 2) == 0)
1085 {
1086 return ((RTX_IS ("mr")
1087 && (IS_REG (patternr[1], FB_REGNO)))
1088 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
1089 }
1090 else if (memcmp (str, "Sb", 2) == 0)
1091 {
1092 return ((RTX_IS ("mr")
1093 && (IS_REG (patternr[1], SB_REGNO)))
1094 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
1095 }
07127a0a
DD
1096 else if (memcmp (str, "Sp", 2) == 0)
1097 {
1098 /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
1099 return (RTX_IS ("mi")
1100 && !(INTVAL (patternr[1]) & ~0x1fff));
1101 }
38b2d076
DD
1102 else if (memcmp (str, "S1", 2) == 0)
1103 {
1104 return r1h_operand (value, QImode);
1105 }
5fd5d713
DD
1106 else if (memcmp (str, "SF", 2) == 0)
1107 {
1108 return 0;
1109 }
38b2d076
DD
1110
1111 gcc_assert (str[0] != 'S');
1112
1113 if (memcmp (str, "Rpa", 2) == 0)
1114 return GET_CODE (value) == PARALLEL;
1115
1116 return 0;
1117}
1118
1119/* This is for when we're debugging the above. */
1120int
1121m32c_extra_constraint_p (rtx value, char c, const char *str)
1122{
1123 int rv = m32c_extra_constraint_p2 (value, c, str);
1124#if DEBUG0
1125 fprintf (stderr, "\nconstraint %.*s: %d\n", CONSTRAINT_LEN (c, str), str,
1126 rv);
1127 debug_rtx (value);
1128#endif
1129 return rv;
1130}
1131
1132/* Implements EXTRA_MEMORY_CONSTRAINT. Currently, we only use strings
1133 starting with 'S'. */
1134int
1135m32c_extra_memory_constraint (char c, const char *str ATTRIBUTE_UNUSED)
1136{
1137 return c == 'S';
1138}
1139
1140/* Implements EXTRA_ADDRESS_CONSTRAINT. We reserve 'A' strings for these,
1141 but don't currently define any. */
1142int
1143m32c_extra_address_constraint (char c, const char *str ATTRIBUTE_UNUSED)
1144{
1145 return c == 'A';
1146}
1147
1148/* STACK AND CALLING */
1149
1150/* Frame Layout */
1151
1152/* Implements RETURN_ADDR_RTX. Note that R8C and M16C push 24 bits
1153 (yes, THREE bytes) onto the stack for the return address, but we
1154 don't support pointers bigger than 16 bits on those chips. This
1155 will likely wreak havoc with exception unwinding. FIXME. */
1156rtx
1157m32c_return_addr_rtx (int count)
1158{
1159 enum machine_mode mode;
1160 int offset;
1161 rtx ra_mem;
1162
1163 if (count)
1164 return NULL_RTX;
1165 /* we want 2[$fb] */
1166
1167 if (TARGET_A24)
1168 {
80b093df
DD
1169 /* It's four bytes */
1170 mode = PSImode;
38b2d076
DD
1171 offset = 4;
1172 }
1173 else
1174 {
1175 /* FIXME: it's really 3 bytes */
1176 mode = HImode;
1177 offset = 2;
1178 }
1179
1180 ra_mem =
1181 gen_rtx_MEM (mode, plus_constant (gen_rtx_REG (Pmode, FP_REGNO), offset));
1182 return copy_to_mode_reg (mode, ra_mem);
1183}
1184
1185/* Implements INCOMING_RETURN_ADDR_RTX. See comment above. */
1186rtx
1187m32c_incoming_return_addr_rtx (void)
1188{
1189 /* we want [sp] */
1190 return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
1191}
1192
1193/* Exception Handling Support */
1194
1195/* Implements EH_RETURN_DATA_REGNO. Choose registers able to hold
1196 pointers. */
1197int
1198m32c_eh_return_data_regno (int n)
1199{
1200 switch (n)
1201 {
1202 case 0:
1203 return A0_REGNO;
1204 case 1:
c6004917
RIL
1205 if (TARGET_A16)
1206 return R3_REGNO;
1207 else
1208 return R1_REGNO;
38b2d076
DD
1209 default:
1210 return INVALID_REGNUM;
1211 }
1212}
1213
1214/* Implements EH_RETURN_STACKADJ_RTX. Saved and used later in
1215 m32c_emit_eh_epilogue. */
1216rtx
1217m32c_eh_return_stackadj_rtx (void)
1218{
1219 if (!cfun->machine->eh_stack_adjust)
1220 {
1221 rtx sa;
1222
99920b6f 1223 sa = gen_rtx_REG (Pmode, R0_REGNO);
38b2d076
DD
1224 cfun->machine->eh_stack_adjust = sa;
1225 }
1226 return cfun->machine->eh_stack_adjust;
1227}
1228
1229/* Registers That Address the Stack Frame */
1230
1231/* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER. Note that
1232 the original spec called for dwarf numbers to vary with register
1233 width as well, for example, r0l, r0, and r2r0 would each have
1234 different dwarf numbers. GCC doesn't support this, and we don't do
1235 it, and gdb seems to like it this way anyway. */
1236unsigned int
1237m32c_dwarf_frame_regnum (int n)
1238{
1239 switch (n)
1240 {
1241 case R0_REGNO:
1242 return 5;
1243 case R1_REGNO:
1244 return 6;
1245 case R2_REGNO:
1246 return 7;
1247 case R3_REGNO:
1248 return 8;
1249 case A0_REGNO:
1250 return 9;
1251 case A1_REGNO:
1252 return 10;
1253 case FB_REGNO:
1254 return 11;
1255 case SB_REGNO:
1256 return 19;
1257
1258 case SP_REGNO:
1259 return 12;
1260 case PC_REGNO:
1261 return 13;
1262 default:
1263 return DWARF_FRAME_REGISTERS + 1;
1264 }
1265}
1266
1267/* The frame looks like this:
1268
1269 ap -> +------------------------------
1270 | Return address (3 or 4 bytes)
1271 | Saved FB (2 or 4 bytes)
1272 fb -> +------------------------------
1273 | local vars
1274 | register saves fb
1275 | through r0 as needed
1276 sp -> +------------------------------
1277*/
1278
1279/* We use this to wrap all emitted insns in the prologue. */
1280static rtx
1281F (rtx x)
1282{
1283 RTX_FRAME_RELATED_P (x) = 1;
1284 return x;
1285}
1286
1287/* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1288 how much the stack pointer moves for each, for each cpu family. */
1289static struct
1290{
1291 int reg1;
1292 int bit;
1293 int a16_bytes;
1294 int a24_bytes;
1295} pushm_info[] =
1296{
9d746d5e
DD
1297 /* These are in reverse push (nearest-to-sp) order. */
1298 { R0_REGNO, 0x80, 2, 2 },
38b2d076 1299 { R1_REGNO, 0x40, 2, 2 },
9d746d5e
DD
1300 { R2_REGNO, 0x20, 2, 2 },
1301 { R3_REGNO, 0x10, 2, 2 },
1302 { A0_REGNO, 0x08, 2, 4 },
1303 { A1_REGNO, 0x04, 2, 4 },
1304 { SB_REGNO, 0x02, 2, 4 },
1305 { FB_REGNO, 0x01, 2, 4 }
38b2d076
DD
1306};
1307
1308#define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1309
1310/* Returns TRUE if we need to save/restore the given register. We
1311 save everything for exception handlers, so that any register can be
1312 unwound. For interrupt handlers, we save everything if the handler
1313 calls something else (because we don't know what *that* function
1314 might do), but try to be a bit smarter if the handler is a leaf
1315 function. We always save $a0, though, because we use that in the
85f65093 1316 epilogue to copy $fb to $sp. */
38b2d076
DD
1317static int
1318need_to_save (int regno)
1319{
1320 if (fixed_regs[regno])
1321 return 0;
ad516a74 1322 if (crtl->calls_eh_return)
38b2d076
DD
1323 return 1;
1324 if (regno == FP_REGNO)
1325 return 0;
1326 if (cfun->machine->is_interrupt
65655f79
DD
1327 && (!cfun->machine->is_leaf
1328 || (regno == A0_REGNO
1329 && m32c_function_needs_enter ())
1330 ))
38b2d076 1331 return 1;
6fb5fa3c 1332 if (df_regs_ever_live_p (regno)
38b2d076
DD
1333 && (!call_used_regs[regno] || cfun->machine->is_interrupt))
1334 return 1;
1335 return 0;
1336}
1337
1338/* This function contains all the intelligence about saving and
1339 restoring registers. It always figures out the register save set.
1340 When called with PP_justcount, it merely returns the size of the
1341 save set (for eliminating the frame pointer, for example). When
1342 called with PP_pushm or PP_popm, it emits the appropriate
1343 instructions for saving (pushm) or restoring (popm) the
1344 registers. */
1345static int
1346m32c_pushm_popm (Push_Pop_Type ppt)
1347{
1348 int reg_mask = 0;
1349 int byte_count = 0, bytes;
1350 int i;
1351 rtx dwarf_set[PUSHM_N];
1352 int n_dwarfs = 0;
1353 int nosave_mask = 0;
1354
305da3ec
JH
1355 if (crtl->return_rtx
1356 && GET_CODE (crtl->return_rtx) == PARALLEL
ad516a74 1357 && !(crtl->calls_eh_return || cfun->machine->is_interrupt))
38b2d076 1358 {
305da3ec 1359 rtx exp = XVECEXP (crtl->return_rtx, 0, 0);
38b2d076
DD
1360 rtx rv = XEXP (exp, 0);
1361 int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1362
1363 if (rv_bytes > 2)
1364 nosave_mask |= 0x20; /* PSI, SI */
1365 else
1366 nosave_mask |= 0xf0; /* DF */
1367 if (rv_bytes > 4)
1368 nosave_mask |= 0x50; /* DI */
1369 }
1370
1371 for (i = 0; i < (int) PUSHM_N; i++)
1372 {
1373 /* Skip if neither register needs saving. */
1374 if (!need_to_save (pushm_info[i].reg1))
1375 continue;
1376
1377 if (pushm_info[i].bit & nosave_mask)
1378 continue;
1379
1380 reg_mask |= pushm_info[i].bit;
1381 bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1382
1383 if (ppt == PP_pushm)
1384 {
1385 enum machine_mode mode = (bytes == 2) ? HImode : SImode;
1386 rtx addr;
1387
1388 /* Always use stack_pointer_rtx instead of calling
1389 rtx_gen_REG ourselves. Code elsewhere in GCC assumes
1390 that there is a single rtx representing the stack pointer,
1391 namely stack_pointer_rtx, and uses == to recognize it. */
1392 addr = stack_pointer_rtx;
1393
1394 if (byte_count != 0)
1395 addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1396
1397 dwarf_set[n_dwarfs++] =
1398 gen_rtx_SET (VOIDmode,
1399 gen_rtx_MEM (mode, addr),
1400 gen_rtx_REG (mode, pushm_info[i].reg1));
1401 F (dwarf_set[n_dwarfs - 1]);
1402
1403 }
1404 byte_count += bytes;
1405 }
1406
1407 if (cfun->machine->is_interrupt)
1408 {
1409 cfun->machine->intr_pushm = reg_mask & 0xfe;
1410 reg_mask = 0;
1411 byte_count = 0;
1412 }
1413
1414 if (cfun->machine->is_interrupt)
1415 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1416 if (need_to_save (i))
1417 {
1418 byte_count += 2;
1419 cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1420 }
1421
1422 if (ppt == PP_pushm && byte_count)
1423 {
1424 rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1425 rtx pushm;
1426
1427 if (reg_mask)
1428 {
1429 XVECEXP (note, 0, 0)
1430 = gen_rtx_SET (VOIDmode,
1431 stack_pointer_rtx,
1432 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1433 stack_pointer_rtx,
1434 GEN_INT (-byte_count)));
1435 F (XVECEXP (note, 0, 0));
1436
1437 for (i = 0; i < n_dwarfs; i++)
1438 XVECEXP (note, 0, i + 1) = dwarf_set[i];
1439
1440 pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1441
444d6efe 1442 add_reg_note (pushm, REG_FRAME_RELATED_EXPR, note);
38b2d076
DD
1443 }
1444
1445 if (cfun->machine->is_interrupt)
1446 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1447 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1448 {
1449 if (TARGET_A16)
1450 pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1451 else
1452 pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1453 F (pushm);
1454 }
1455 }
1456 if (ppt == PP_popm && byte_count)
1457 {
38b2d076
DD
1458 if (cfun->machine->is_interrupt)
1459 for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1460 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1461 {
1462 if (TARGET_A16)
b3fdec9e 1463 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
38b2d076 1464 else
b3fdec9e 1465 emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
38b2d076
DD
1466 }
1467 if (reg_mask)
1468 emit_insn (gen_popm (GEN_INT (reg_mask)));
1469 }
1470
1471 return byte_count;
1472}
1473
1474/* Implements INITIAL_ELIMINATION_OFFSET. See the comment above that
1475 diagrams our call frame. */
1476int
1477m32c_initial_elimination_offset (int from, int to)
1478{
1479 int ofs = 0;
1480
1481 if (from == AP_REGNO)
1482 {
1483 if (TARGET_A16)
1484 ofs += 5;
1485 else
1486 ofs += 8;
1487 }
1488
1489 if (to == SP_REGNO)
1490 {
1491 ofs += m32c_pushm_popm (PP_justcount);
1492 ofs += get_frame_size ();
1493 }
1494
1495 /* Account for push rounding. */
1496 if (TARGET_A24)
1497 ofs = (ofs + 1) & ~1;
1498#if DEBUG0
1499 fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1500 to, ofs);
1501#endif
1502 return ofs;
1503}
1504
1505/* Passing Function Arguments on the Stack */
1506
38b2d076
DD
1507/* Implements PUSH_ROUNDING. The R8C and M16C have byte stacks, the
1508 M32C has word stacks. */
444d6efe 1509unsigned int
38b2d076
DD
1510m32c_push_rounding (int n)
1511{
1512 if (TARGET_R8C || TARGET_M16C)
1513 return n;
1514 return (n + 1) & ~1;
1515}
1516
1517/* Passing Arguments in Registers */
1518
cd34bbe8
NF
1519/* Implements TARGET_FUNCTION_ARG. Arguments are passed partly in
1520 registers, partly on stack. If our function returns a struct, a
1521 pointer to a buffer for it is at the top of the stack (last thing
1522 pushed). The first few real arguments may be in registers as
1523 follows:
38b2d076
DD
1524
1525 R8C/M16C: arg1 in r1 if it's QI or HI (else it's pushed on stack)
1526 arg2 in r2 if it's HI (else pushed on stack)
1527 rest on stack
1528 M32C: arg1 in r0 if it's QI or HI (else it's pushed on stack)
1529 rest on stack
1530
1531 Structs are not passed in registers, even if they fit. Only
1532 integer and pointer types are passed in registers.
1533
1534 Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1535 r2 if it fits. */
cd34bbe8
NF
1536#undef TARGET_FUNCTION_ARG
1537#define TARGET_FUNCTION_ARG m32c_function_arg
1538static rtx
38b2d076 1539m32c_function_arg (CUMULATIVE_ARGS * ca,
cd34bbe8 1540 enum machine_mode mode, const_tree type, bool named)
38b2d076
DD
1541{
1542 /* Can return a reg, parallel, or 0 for stack */
1543 rtx rv = NULL_RTX;
1544#if DEBUG0
1545 fprintf (stderr, "func_arg %d (%s, %d)\n",
1546 ca->parm_num, mode_name[mode], named);
1547 debug_tree (type);
1548#endif
1549
1550 if (mode == VOIDmode)
1551 return GEN_INT (0);
1552
1553 if (ca->force_mem || !named)
1554 {
1555#if DEBUG0
1556 fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
1557 named);
1558#endif
1559 return NULL_RTX;
1560 }
1561
1562 if (type && INTEGRAL_TYPE_P (type) && POINTER_TYPE_P (type))
1563 return NULL_RTX;
1564
9d746d5e
DD
1565 if (type && AGGREGATE_TYPE_P (type))
1566 return NULL_RTX;
1567
38b2d076
DD
1568 switch (ca->parm_num)
1569 {
1570 case 1:
1571 if (GET_MODE_SIZE (mode) == 1 || GET_MODE_SIZE (mode) == 2)
1572 rv = gen_rtx_REG (mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
1573 break;
1574
1575 case 2:
1576 if (TARGET_A16 && GET_MODE_SIZE (mode) == 2)
1577 rv = gen_rtx_REG (mode, R2_REGNO);
1578 break;
1579 }
1580
1581#if DEBUG0
1582 debug_rtx (rv);
1583#endif
1584 return rv;
1585}
1586
1587#undef TARGET_PASS_BY_REFERENCE
1588#define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1589static bool
1590m32c_pass_by_reference (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED,
1591 enum machine_mode mode ATTRIBUTE_UNUSED,
586de218 1592 const_tree type ATTRIBUTE_UNUSED,
38b2d076
DD
1593 bool named ATTRIBUTE_UNUSED)
1594{
1595 return 0;
1596}
1597
1598/* Implements INIT_CUMULATIVE_ARGS. */
1599void
1600m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
9d746d5e 1601 tree fntype,
38b2d076 1602 rtx libname ATTRIBUTE_UNUSED,
9d746d5e 1603 tree fndecl,
38b2d076
DD
1604 int n_named_args ATTRIBUTE_UNUSED)
1605{
9d746d5e
DD
1606 if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1607 ca->force_mem = 1;
1608 else
1609 ca->force_mem = 0;
38b2d076
DD
1610 ca->parm_num = 1;
1611}
1612
cd34bbe8
NF
1613/* Implements TARGET_FUNCTION_ARG_ADVANCE. force_mem is set for
1614 functions returning structures, so we always reset that. Otherwise,
1615 we only need to know the sequence number of the argument to know what
1616 to do with it. */
1617#undef TARGET_FUNCTION_ARG_ADVANCE
1618#define TARGET_FUNCTION_ARG_ADVANCE m32c_function_arg_advance
1619static void
38b2d076
DD
1620m32c_function_arg_advance (CUMULATIVE_ARGS * ca,
1621 enum machine_mode mode ATTRIBUTE_UNUSED,
cd34bbe8
NF
1622 const_tree type ATTRIBUTE_UNUSED,
1623 bool named ATTRIBUTE_UNUSED)
38b2d076
DD
1624{
1625 if (ca->force_mem)
1626 ca->force_mem = 0;
9d746d5e
DD
1627 else
1628 ca->parm_num++;
38b2d076
DD
1629}
1630
c2ed6cf8
NF
1631/* Implements TARGET_FUNCTION_ARG_BOUNDARY. */
1632#undef TARGET_FUNCTION_ARG_BOUNDARY
1633#define TARGET_FUNCTION_ARG_BOUNDARY m32c_function_arg_boundary
1634static unsigned int
1635m32c_function_arg_boundary (enum machine_mode mode ATTRIBUTE_UNUSED,
1636 const_tree type ATTRIBUTE_UNUSED)
1637{
1638 return (TARGET_A16 ? 8 : 16);
1639}
1640
38b2d076
DD
1641/* Implements FUNCTION_ARG_REGNO_P. */
1642int
1643m32c_function_arg_regno_p (int r)
1644{
1645 if (TARGET_A24)
1646 return (r == R0_REGNO);
1647 return (r == R1_REGNO || r == R2_REGNO);
1648}
1649
e9555b13 1650/* HImode and PSImode are the two "native" modes as far as GCC is
85f65093 1651 concerned, but the chips also support a 32-bit mode which is used
e9555b13
DD
1652 for some opcodes in R8C/M16C and for reset vectors and such. */
1653#undef TARGET_VALID_POINTER_MODE
1654#define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
23fed240 1655static bool
e9555b13
DD
1656m32c_valid_pointer_mode (enum machine_mode mode)
1657{
e9555b13
DD
1658 if (mode == HImode
1659 || mode == PSImode
1660 || mode == SImode
1661 )
1662 return 1;
1663 return 0;
1664}
1665
38b2d076
DD
1666/* How Scalar Function Values Are Returned */
1667
2a31793e 1668/* Implements TARGET_LIBCALL_VALUE. Most values are returned in $r0, or some
38b2d076
DD
1669 combination of registers starting there (r2r0 for longs, r3r1r2r0
1670 for long long, r3r2r1r0 for doubles), except that that ABI
1671 currently doesn't work because it ends up using all available
1672 general registers and gcc often can't compile it. So, instead, we
1673 return anything bigger than 16 bits in "mem0" (effectively, a
1674 memory location). */
2a31793e
AS
1675
1676#undef TARGET_LIBCALL_VALUE
1677#define TARGET_LIBCALL_VALUE m32c_libcall_value
1678
1679static rtx
1680m32c_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
38b2d076
DD
1681{
1682 /* return reg or parallel */
1683#if 0
1684 /* FIXME: GCC has difficulty returning large values in registers,
1685 because that ties up most of the general registers and gives the
1686 register allocator little to work with. Until we can resolve
1687 this, large values are returned in memory. */
1688 if (mode == DFmode)
1689 {
1690 rtx rv;
1691
1692 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1693 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1694 gen_rtx_REG (HImode,
1695 R0_REGNO),
1696 GEN_INT (0));
1697 XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1698 gen_rtx_REG (HImode,
1699 R1_REGNO),
1700 GEN_INT (2));
1701 XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1702 gen_rtx_REG (HImode,
1703 R2_REGNO),
1704 GEN_INT (4));
1705 XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1706 gen_rtx_REG (HImode,
1707 R3_REGNO),
1708 GEN_INT (6));
1709 return rv;
1710 }
1711
1712 if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1713 {
1714 rtx rv;
1715
1716 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1717 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1718 gen_rtx_REG (mode,
1719 R0_REGNO),
1720 GEN_INT (0));
1721 return rv;
1722 }
1723#endif
1724
1725 if (GET_MODE_SIZE (mode) > 2)
1726 return gen_rtx_REG (mode, MEM0_REGNO);
1727 return gen_rtx_REG (mode, R0_REGNO);
1728}
1729
2a31793e 1730/* Implements TARGET_FUNCTION_VALUE. Functions and libcalls have the same
38b2d076 1731 conventions. */
2a31793e
AS
1732
1733#undef TARGET_FUNCTION_VALUE
1734#define TARGET_FUNCTION_VALUE m32c_function_value
1735
1736static rtx
1737m32c_function_value (const_tree valtype,
1738 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1739 bool outgoing ATTRIBUTE_UNUSED)
38b2d076
DD
1740{
1741 /* return reg or parallel */
586de218 1742 const enum machine_mode mode = TYPE_MODE (valtype);
2a31793e
AS
1743 return m32c_libcall_value (mode, NULL_RTX);
1744}
1745
f28f2337
AS
1746/* Implements TARGET_FUNCTION_VALUE_REGNO_P. */
1747
1748#undef TARGET_FUNCTION_VALUE_REGNO_P
1749#define TARGET_FUNCTION_VALUE_REGNO_P m32c_function_value_regno_p
2a31793e 1750
f28f2337 1751static bool
2a31793e
AS
1752m32c_function_value_regno_p (const unsigned int regno)
1753{
1754 return (regno == R0_REGNO || regno == MEM0_REGNO);
38b2d076
DD
1755}
1756
1757/* How Large Values Are Returned */
1758
1759/* We return structures by pushing the address on the stack, even if
1760 we use registers for the first few "real" arguments. */
1761#undef TARGET_STRUCT_VALUE_RTX
1762#define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1763static rtx
1764m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1765 int incoming ATTRIBUTE_UNUSED)
1766{
1767 return 0;
1768}
1769
1770/* Function Entry and Exit */
1771
1772/* Implements EPILOGUE_USES. Interrupts restore all registers. */
1773int
1774m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1775{
1776 if (cfun->machine->is_interrupt)
1777 return 1;
1778 return 0;
1779}
1780
1781/* Implementing the Varargs Macros */
1782
1783#undef TARGET_STRICT_ARGUMENT_NAMING
1784#define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1785static bool
1786m32c_strict_argument_naming (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED)
1787{
1788 return 1;
1789}
1790
1791/* Trampolines for Nested Functions */
1792
1793/*
1794 m16c:
1795 1 0000 75C43412 mov.w #0x1234,a0
1796 2 0004 FC000000 jmp.a label
1797
1798 m32c:
1799 1 0000 BC563412 mov.l:s #0x123456,a0
1800 2 0004 CC000000 jmp.a label
1801*/
1802
1803/* Implements TRAMPOLINE_SIZE. */
1804int
1805m32c_trampoline_size (void)
1806{
1807 /* Allocate extra space so we can avoid the messy shifts when we
1808 initialize the trampoline; we just write past the end of the
1809 opcode. */
1810 return TARGET_A16 ? 8 : 10;
1811}
1812
1813/* Implements TRAMPOLINE_ALIGNMENT. */
1814int
1815m32c_trampoline_alignment (void)
1816{
1817 return 2;
1818}
1819
229fbccb
RH
1820/* Implements TARGET_TRAMPOLINE_INIT. */
1821
1822#undef TARGET_TRAMPOLINE_INIT
1823#define TARGET_TRAMPOLINE_INIT m32c_trampoline_init
1824static void
1825m32c_trampoline_init (rtx m_tramp, tree fndecl, rtx chainval)
38b2d076 1826{
229fbccb
RH
1827 rtx function = XEXP (DECL_RTL (fndecl), 0);
1828
1829#define A0(m,i) adjust_address (m_tramp, m, i)
38b2d076
DD
1830 if (TARGET_A16)
1831 {
1832 /* Note: we subtract a "word" because the moves want signed
1833 constants, not unsigned constants. */
1834 emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1835 emit_move_insn (A0 (HImode, 2), chainval);
1836 emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
85f65093
KH
1837 /* We use 16-bit addresses here, but store the zero to turn it
1838 into a 24-bit offset. */
38b2d076
DD
1839 emit_move_insn (A0 (HImode, 5), function);
1840 emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1841 }
1842 else
1843 {
1844 /* Note that the PSI moves actually write 4 bytes. Make sure we
1845 write stuff out in the right order, and leave room for the
1846 extra byte at the end. */
1847 emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1848 emit_move_insn (A0 (PSImode, 1), chainval);
1849 emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1850 emit_move_insn (A0 (PSImode, 5), function);
1851 }
1852#undef A0
1853}
1854
07127a0a
DD
1855/* Implicit Calls to Library Routines */
1856
1857#undef TARGET_INIT_LIBFUNCS
1858#define TARGET_INIT_LIBFUNCS m32c_init_libfuncs
1859static void
1860m32c_init_libfuncs (void)
1861{
f90b7a5a
PB
1862 /* We do this because the M32C has an HImode operand, but the
1863 M16C has an 8-bit operand. Since gcc looks at the match data
1864 and not the expanded rtl, we have to reset the optab so that
1865 the right modes are found. */
07127a0a
DD
1866 if (TARGET_A24)
1867 {
947131ba
RS
1868 set_optab_handler (cstore_optab, QImode, CODE_FOR_cstoreqi4_24);
1869 set_optab_handler (cstore_optab, HImode, CODE_FOR_cstorehi4_24);
1870 set_optab_handler (cstore_optab, PSImode, CODE_FOR_cstorepsi4_24);
07127a0a
DD
1871 }
1872}
1873
38b2d076
DD
1874/* Addressing Modes */
1875
c6c3dba9
PB
1876/* The r8c/m32c family supports a wide range of non-orthogonal
1877 addressing modes, including the ability to double-indirect on *some*
1878 of them. Not all insns support all modes, either, but we rely on
1879 predicates and constraints to deal with that. */
1880#undef TARGET_LEGITIMATE_ADDRESS_P
1881#define TARGET_LEGITIMATE_ADDRESS_P m32c_legitimate_address_p
1882bool
1883m32c_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
38b2d076
DD
1884{
1885 int mode_adjust;
1886 if (CONSTANT_P (x))
1887 return 1;
1888
5fd5d713
DD
1889 if (TARGET_A16 && GET_MODE (x) != HImode && GET_MODE (x) != SImode)
1890 return 0;
1891 if (TARGET_A24 && GET_MODE (x) != PSImode)
1892 return 0;
1893
38b2d076
DD
1894 /* Wide references to memory will be split after reload, so we must
1895 ensure that all parts of such splits remain legitimate
1896 addresses. */
1897 mode_adjust = GET_MODE_SIZE (mode) - 1;
1898
1899 /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1900 if (GET_CODE (x) == PRE_DEC
1901 || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1902 {
1903 return (GET_CODE (XEXP (x, 0)) == REG
1904 && REGNO (XEXP (x, 0)) == SP_REGNO);
1905 }
1906
1907#if 0
1908 /* This is the double indirection detection, but it currently
1909 doesn't work as cleanly as this code implies, so until we've had
1910 a chance to debug it, leave it disabled. */
1911 if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1912 {
1913#if DEBUG_DOUBLE
1914 fprintf (stderr, "double indirect\n");
1915#endif
1916 x = XEXP (x, 0);
1917 }
1918#endif
1919
1920 encode_pattern (x);
1921 if (RTX_IS ("r"))
1922 {
1923 /* Most indexable registers can be used without displacements,
1924 although some of them will be emitted with an explicit zero
1925 to please the assembler. */
1926 switch (REGNO (patternr[0]))
1927 {
38b2d076
DD
1928 case A1_REGNO:
1929 case SB_REGNO:
1930 case FB_REGNO:
1931 case SP_REGNO:
5fd5d713
DD
1932 if (TARGET_A16 && GET_MODE (x) == SImode)
1933 return 0;
1934 case A0_REGNO:
38b2d076
DD
1935 return 1;
1936
1937 default:
1938 if (IS_PSEUDO (patternr[0], strict))
1939 return 1;
1940 return 0;
1941 }
1942 }
5fd5d713
DD
1943
1944 if (TARGET_A16 && GET_MODE (x) == SImode)
1945 return 0;
1946
38b2d076
DD
1947 if (RTX_IS ("+ri"))
1948 {
1949 /* This is more interesting, because different base registers
1950 allow for different displacements - both range and signedness
1951 - and it differs from chip series to chip series too. */
1952 int rn = REGNO (patternr[1]);
1953 HOST_WIDE_INT offs = INTVAL (patternr[2]);
1954 switch (rn)
1955 {
1956 case A0_REGNO:
1957 case A1_REGNO:
1958 case SB_REGNO:
1959 /* The syntax only allows positive offsets, but when the
1960 offsets span the entire memory range, we can simulate
1961 negative offsets by wrapping. */
1962 if (TARGET_A16)
1963 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1964 if (rn == SB_REGNO)
1965 return (offs >= 0 && offs <= 65535 - mode_adjust);
1966 /* A0 or A1 */
1967 return (offs >= -16777216 && offs <= 16777215);
1968
1969 case FB_REGNO:
1970 if (TARGET_A16)
1971 return (offs >= -128 && offs <= 127 - mode_adjust);
1972 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1973
1974 case SP_REGNO:
1975 return (offs >= -128 && offs <= 127 - mode_adjust);
1976
1977 default:
1978 if (IS_PSEUDO (patternr[1], strict))
1979 return 1;
1980 return 0;
1981 }
1982 }
1983 if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1984 {
1985 rtx reg = patternr[1];
1986
1987 /* We don't know where the symbol is, so only allow base
1988 registers which support displacements spanning the whole
1989 address range. */
1990 switch (REGNO (reg))
1991 {
1992 case A0_REGNO:
1993 case A1_REGNO:
1994 /* $sb needs a secondary reload, but since it's involved in
1995 memory address reloads too, we don't deal with it very
1996 well. */
1997 /* case SB_REGNO: */
1998 return 1;
1999 default:
2000 if (IS_PSEUDO (reg, strict))
2001 return 1;
2002 return 0;
2003 }
2004 }
2005 return 0;
2006}
2007
2008/* Implements REG_OK_FOR_BASE_P. */
2009int
2010m32c_reg_ok_for_base_p (rtx x, int strict)
2011{
2012 if (GET_CODE (x) != REG)
2013 return 0;
2014 switch (REGNO (x))
2015 {
2016 case A0_REGNO:
2017 case A1_REGNO:
2018 case SB_REGNO:
2019 case FB_REGNO:
2020 case SP_REGNO:
2021 return 1;
2022 default:
2023 if (IS_PSEUDO (x, strict))
2024 return 1;
2025 return 0;
2026 }
2027}
2028
04aff2c0 2029/* We have three choices for choosing fb->aN offsets. If we choose -128,
85f65093 2030 we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
04aff2c0
DD
2031 like this:
2032 EB 4B FF mova -128[$fb],$a0
2033 D8 0C FF FF mov.w:Q #0,-1[$a0]
2034
85f65093 2035 Alternately, we subtract the frame size, and hopefully use 8-bit aN
04aff2c0
DD
2036 displacements:
2037 7B F4 stc $fb,$a0
2038 77 54 00 01 sub #256,$a0
2039 D8 08 01 mov.w:Q #0,1[$a0]
2040
2041 If we don't offset (i.e. offset by zero), we end up with:
2042 7B F4 stc $fb,$a0
2043 D8 0C 00 FF mov.w:Q #0,-256[$a0]
2044
2045 We have to subtract *something* so that we have a PLUS rtx to mark
2046 that we've done this reload. The -128 offset will never result in
85f65093 2047 an 8-bit aN offset, and the payoff for the second case is five
04aff2c0
DD
2048 loads *if* those loads are within 256 bytes of the other end of the
2049 frame, so the third case seems best. Note that we subtract the
2050 zero, but detect that in the addhi3 pattern. */
2051
ea471af0
JM
2052#define BIG_FB_ADJ 0
2053
38b2d076
DD
2054/* Implements LEGITIMIZE_ADDRESS. The only address we really have to
2055 worry about is frame base offsets, as $fb has a limited
2056 displacement range. We deal with this by attempting to reload $fb
2057 itself into an address register; that seems to result in the best
2058 code. */
506d7b68
PB
2059#undef TARGET_LEGITIMIZE_ADDRESS
2060#define TARGET_LEGITIMIZE_ADDRESS m32c_legitimize_address
2061static rtx
2062m32c_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
2063 enum machine_mode mode)
38b2d076
DD
2064{
2065#if DEBUG0
2066 fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
506d7b68 2067 debug_rtx (x);
38b2d076
DD
2068 fprintf (stderr, "\n");
2069#endif
2070
506d7b68
PB
2071 if (GET_CODE (x) == PLUS
2072 && GET_CODE (XEXP (x, 0)) == REG
2073 && REGNO (XEXP (x, 0)) == FB_REGNO
2074 && GET_CODE (XEXP (x, 1)) == CONST_INT
2075 && (INTVAL (XEXP (x, 1)) < -128
2076 || INTVAL (XEXP (x, 1)) > (128 - GET_MODE_SIZE (mode))))
38b2d076
DD
2077 {
2078 /* reload FB to A_REGS */
38b2d076 2079 rtx temp = gen_reg_rtx (Pmode);
506d7b68
PB
2080 x = copy_rtx (x);
2081 emit_insn (gen_rtx_SET (VOIDmode, temp, XEXP (x, 0)));
2082 XEXP (x, 0) = temp;
38b2d076
DD
2083 }
2084
506d7b68 2085 return x;
38b2d076
DD
2086}
2087
2088/* Implements LEGITIMIZE_RELOAD_ADDRESS. See comment above. */
2089int
2090m32c_legitimize_reload_address (rtx * x,
2091 enum machine_mode mode,
2092 int opnum,
2093 int type, int ind_levels ATTRIBUTE_UNUSED)
2094{
2095#if DEBUG0
2096 fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
2097 mode_name[mode]);
2098 debug_rtx (*x);
2099#endif
2100
2101 /* At one point, this function tried to get $fb copied to an address
2102 register, which in theory would maximize sharing, but gcc was
2103 *also* still trying to reload the whole address, and we'd run out
2104 of address registers. So we let gcc do the naive (but safe)
2105 reload instead, when the above function doesn't handle it for
04aff2c0
DD
2106 us.
2107
2108 The code below is a second attempt at the above. */
2109
2110 if (GET_CODE (*x) == PLUS
2111 && GET_CODE (XEXP (*x, 0)) == REG
2112 && REGNO (XEXP (*x, 0)) == FB_REGNO
2113 && GET_CODE (XEXP (*x, 1)) == CONST_INT
2114 && (INTVAL (XEXP (*x, 1)) < -128
2115 || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
2116 {
2117 rtx sum;
2118 int offset = INTVAL (XEXP (*x, 1));
2119 int adjustment = -BIG_FB_ADJ;
2120
2121 sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
2122 GEN_INT (adjustment));
2123 *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
2124 if (type == RELOAD_OTHER)
2125 type = RELOAD_FOR_OTHER_ADDRESS;
2126 push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
2127 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
444d6efe 2128 (enum reload_type) type);
04aff2c0
DD
2129 return 1;
2130 }
2131
2132 if (GET_CODE (*x) == PLUS
2133 && GET_CODE (XEXP (*x, 0)) == PLUS
2134 && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
2135 && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
2136 && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
2137 && GET_CODE (XEXP (*x, 1)) == CONST_INT
2138 )
2139 {
2140 if (type == RELOAD_OTHER)
2141 type = RELOAD_FOR_OTHER_ADDRESS;
2142 push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
2143 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
444d6efe 2144 (enum reload_type) type);
04aff2c0
DD
2145 return 1;
2146 }
38b2d076
DD
2147
2148 return 0;
2149}
2150
38b2d076
DD
2151/* Implements LEGITIMATE_CONSTANT_P. We split large constants anyway,
2152 so we can allow anything. */
2153int
2154m32c_legitimate_constant_p (rtx x ATTRIBUTE_UNUSED)
2155{
2156 return 1;
2157}
2158
2159
5fd5d713
DD
2160/* Return the appropriate mode for a named address pointer. */
2161#undef TARGET_ADDR_SPACE_POINTER_MODE
2162#define TARGET_ADDR_SPACE_POINTER_MODE m32c_addr_space_pointer_mode
2163static enum machine_mode
2164m32c_addr_space_pointer_mode (addr_space_t addrspace)
2165{
2166 switch (addrspace)
2167 {
2168 case ADDR_SPACE_GENERIC:
2169 return TARGET_A24 ? PSImode : HImode;
2170 case ADDR_SPACE_FAR:
2171 return SImode;
2172 default:
2173 gcc_unreachable ();
2174 }
2175}
2176
2177/* Return the appropriate mode for a named address address. */
2178#undef TARGET_ADDR_SPACE_ADDRESS_MODE
2179#define TARGET_ADDR_SPACE_ADDRESS_MODE m32c_addr_space_address_mode
2180static enum machine_mode
2181m32c_addr_space_address_mode (addr_space_t addrspace)
2182{
2183 switch (addrspace)
2184 {
2185 case ADDR_SPACE_GENERIC:
2186 return TARGET_A24 ? PSImode : HImode;
2187 case ADDR_SPACE_FAR:
2188 return SImode;
2189 default:
2190 gcc_unreachable ();
2191 }
2192}
2193
2194/* Like m32c_legitimate_address_p, except with named addresses. */
2195#undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
2196#define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P \
2197 m32c_addr_space_legitimate_address_p
2198static bool
2199m32c_addr_space_legitimate_address_p (enum machine_mode mode, rtx x,
2200 bool strict, addr_space_t as)
2201{
2202 if (as == ADDR_SPACE_FAR)
2203 {
2204 if (TARGET_A24)
2205 return 0;
2206 encode_pattern (x);
2207 if (RTX_IS ("r"))
2208 {
2209 if (GET_MODE (x) != SImode)
2210 return 0;
2211 switch (REGNO (patternr[0]))
2212 {
2213 case A0_REGNO:
2214 return 1;
2215
2216 default:
2217 if (IS_PSEUDO (patternr[0], strict))
2218 return 1;
2219 return 0;
2220 }
2221 }
2222 if (RTX_IS ("+^Sri"))
2223 {
2224 int rn = REGNO (patternr[3]);
2225 HOST_WIDE_INT offs = INTVAL (patternr[4]);
2226 if (GET_MODE (patternr[3]) != HImode)
2227 return 0;
2228 switch (rn)
2229 {
2230 case A0_REGNO:
2231 return (offs >= 0 && offs <= 0xfffff);
2232
2233 default:
2234 if (IS_PSEUDO (patternr[3], strict))
2235 return 1;
2236 return 0;
2237 }
2238 }
2239 if (RTX_IS ("+^Srs"))
2240 {
2241 int rn = REGNO (patternr[3]);
2242 if (GET_MODE (patternr[3]) != HImode)
2243 return 0;
2244 switch (rn)
2245 {
2246 case A0_REGNO:
2247 return 1;
2248
2249 default:
2250 if (IS_PSEUDO (patternr[3], strict))
2251 return 1;
2252 return 0;
2253 }
2254 }
2255 if (RTX_IS ("+^S+ris"))
2256 {
2257 int rn = REGNO (patternr[4]);
2258 if (GET_MODE (patternr[4]) != HImode)
2259 return 0;
2260 switch (rn)
2261 {
2262 case A0_REGNO:
2263 return 1;
2264
2265 default:
2266 if (IS_PSEUDO (patternr[4], strict))
2267 return 1;
2268 return 0;
2269 }
2270 }
2271 if (RTX_IS ("s"))
2272 {
2273 return 1;
2274 }
2275 return 0;
2276 }
2277
2278 else if (as != ADDR_SPACE_GENERIC)
2279 gcc_unreachable ();
2280
2281 return m32c_legitimate_address_p (mode, x, strict);
2282}
2283
2284/* Like m32c_legitimate_address, except with named address support. */
2285#undef TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS
2286#define TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS m32c_addr_space_legitimize_address
2287static rtx
2288m32c_addr_space_legitimize_address (rtx x, rtx oldx, enum machine_mode mode,
2289 addr_space_t as)
2290{
2291 if (as != ADDR_SPACE_GENERIC)
2292 {
2293#if DEBUG0
2294 fprintf (stderr, "\033[36mm32c_addr_space_legitimize_address for mode %s\033[0m\n", mode_name[mode]);
2295 debug_rtx (x);
2296 fprintf (stderr, "\n");
2297#endif
2298
2299 if (GET_CODE (x) != REG)
2300 {
2301 x = force_reg (SImode, x);
2302 }
2303 return x;
2304 }
2305
2306 return m32c_legitimize_address (x, oldx, mode);
2307}
2308
2309/* Determine if one named address space is a subset of another. */
2310#undef TARGET_ADDR_SPACE_SUBSET_P
2311#define TARGET_ADDR_SPACE_SUBSET_P m32c_addr_space_subset_p
2312static bool
2313m32c_addr_space_subset_p (addr_space_t subset, addr_space_t superset)
2314{
2315 gcc_assert (subset == ADDR_SPACE_GENERIC || subset == ADDR_SPACE_FAR);
2316 gcc_assert (superset == ADDR_SPACE_GENERIC || superset == ADDR_SPACE_FAR);
2317
2318 if (subset == superset)
2319 return true;
2320
2321 else
2322 return (subset == ADDR_SPACE_GENERIC && superset == ADDR_SPACE_FAR);
2323}
2324
2325#undef TARGET_ADDR_SPACE_CONVERT
2326#define TARGET_ADDR_SPACE_CONVERT m32c_addr_space_convert
2327/* Convert from one address space to another. */
2328static rtx
2329m32c_addr_space_convert (rtx op, tree from_type, tree to_type)
2330{
2331 addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (from_type));
2332 addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (to_type));
2333 rtx result;
2334
2335 gcc_assert (from_as == ADDR_SPACE_GENERIC || from_as == ADDR_SPACE_FAR);
2336 gcc_assert (to_as == ADDR_SPACE_GENERIC || to_as == ADDR_SPACE_FAR);
2337
2338 if (to_as == ADDR_SPACE_GENERIC && from_as == ADDR_SPACE_FAR)
2339 {
2340 /* This is unpredictable, as we're truncating off usable address
2341 bits. */
2342
2343 result = gen_reg_rtx (HImode);
2344 emit_move_insn (result, simplify_subreg (HImode, op, SImode, 0));
2345 return result;
2346 }
2347 else if (to_as == ADDR_SPACE_FAR && from_as == ADDR_SPACE_GENERIC)
2348 {
2349 /* This always works. */
2350 result = gen_reg_rtx (SImode);
2351 emit_insn (gen_zero_extendhisi2 (result, op));
2352 return result;
2353 }
2354 else
2355 gcc_unreachable ();
2356}
2357
38b2d076
DD
2358/* Condition Code Status */
2359
2360#undef TARGET_FIXED_CONDITION_CODE_REGS
2361#define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2362static bool
2363m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2364{
2365 *p1 = FLG_REGNO;
2366 *p2 = INVALID_REGNUM;
2367 return true;
2368}
2369
2370/* Describing Relative Costs of Operations */
2371
0e607518 2372/* Implements TARGET_REGISTER_MOVE_COST. We make impossible moves
38b2d076
DD
2373 prohibitively expensive, like trying to put QIs in r2/r3 (there are
2374 no opcodes to do that). We also discourage use of mem* registers
2375 since they're really memory. */
0e607518
AS
2376
2377#undef TARGET_REGISTER_MOVE_COST
2378#define TARGET_REGISTER_MOVE_COST m32c_register_move_cost
2379
2380static int
2381m32c_register_move_cost (enum machine_mode mode, reg_class_t from,
2382 reg_class_t to)
38b2d076
DD
2383{
2384 int cost = COSTS_N_INSNS (3);
0e607518
AS
2385 HARD_REG_SET cc;
2386
2387/* FIXME: pick real values, but not 2 for now. */
2388 COPY_HARD_REG_SET (cc, reg_class_contents[(int) from]);
2389 IOR_HARD_REG_SET (cc, reg_class_contents[(int) to]);
2390
2391 if (mode == QImode
2392 && hard_reg_set_intersect_p (cc, reg_class_contents[R23_REGS]))
38b2d076 2393 {
0e607518 2394 if (hard_reg_set_subset_p (cc, reg_class_contents[R23_REGS]))
38b2d076
DD
2395 cost = COSTS_N_INSNS (1000);
2396 else
2397 cost = COSTS_N_INSNS (80);
2398 }
2399
2400 if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2401 cost = COSTS_N_INSNS (1000);
2402
0e607518 2403 if (reg_classes_intersect_p (from, CR_REGS))
38b2d076
DD
2404 cost += COSTS_N_INSNS (5);
2405
0e607518 2406 if (reg_classes_intersect_p (to, CR_REGS))
38b2d076
DD
2407 cost += COSTS_N_INSNS (5);
2408
2409 if (from == MEM_REGS || to == MEM_REGS)
2410 cost += COSTS_N_INSNS (50);
0e607518
AS
2411 else if (reg_classes_intersect_p (from, MEM_REGS)
2412 || reg_classes_intersect_p (to, MEM_REGS))
38b2d076
DD
2413 cost += COSTS_N_INSNS (10);
2414
2415#if DEBUG0
2416 fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
0e607518
AS
2417 mode_name[mode], class_names[(int) from], class_names[(int) to],
2418 cost);
38b2d076
DD
2419#endif
2420 return cost;
2421}
2422
0e607518
AS
2423/* Implements TARGET_MEMORY_MOVE_COST. */
2424
2425#undef TARGET_MEMORY_MOVE_COST
2426#define TARGET_MEMORY_MOVE_COST m32c_memory_move_cost
2427
2428static int
38b2d076 2429m32c_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
0e607518
AS
2430 reg_class_t rclass ATTRIBUTE_UNUSED,
2431 bool in ATTRIBUTE_UNUSED)
38b2d076
DD
2432{
2433 /* FIXME: pick real values. */
2434 return COSTS_N_INSNS (10);
2435}
2436
07127a0a
DD
2437/* Here we try to describe when we use multiple opcodes for one RTX so
2438 that gcc knows when to use them. */
2439#undef TARGET_RTX_COSTS
2440#define TARGET_RTX_COSTS m32c_rtx_costs
2441static bool
f40751dd
JH
2442m32c_rtx_costs (rtx x, int code, int outer_code, int *total,
2443 bool speed ATTRIBUTE_UNUSED)
07127a0a
DD
2444{
2445 switch (code)
2446 {
2447 case REG:
2448 if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2449 *total += COSTS_N_INSNS (500);
2450 else
2451 *total += COSTS_N_INSNS (1);
2452 return true;
2453
2454 case ASHIFT:
2455 case LSHIFTRT:
2456 case ASHIFTRT:
2457 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2458 {
2459 /* mov.b r1l, r1h */
2460 *total += COSTS_N_INSNS (1);
2461 return true;
2462 }
2463 if (INTVAL (XEXP (x, 1)) > 8
2464 || INTVAL (XEXP (x, 1)) < -8)
2465 {
2466 /* mov.b #N, r1l */
2467 /* mov.b r1l, r1h */
2468 *total += COSTS_N_INSNS (2);
2469 return true;
2470 }
2471 return true;
2472
2473 case LE:
2474 case LEU:
2475 case LT:
2476 case LTU:
2477 case GT:
2478 case GTU:
2479 case GE:
2480 case GEU:
2481 case NE:
2482 case EQ:
2483 if (outer_code == SET)
2484 {
2485 *total += COSTS_N_INSNS (2);
2486 return true;
2487 }
2488 break;
2489
2490 case ZERO_EXTRACT:
2491 {
2492 rtx dest = XEXP (x, 0);
2493 rtx addr = XEXP (dest, 0);
2494 switch (GET_CODE (addr))
2495 {
2496 case CONST_INT:
2497 *total += COSTS_N_INSNS (1);
2498 break;
2499 case SYMBOL_REF:
2500 *total += COSTS_N_INSNS (3);
2501 break;
2502 default:
2503 *total += COSTS_N_INSNS (2);
2504 break;
2505 }
2506 return true;
2507 }
2508 break;
2509
2510 default:
2511 /* Reasonable default. */
2512 if (TARGET_A16 && GET_MODE(x) == SImode)
2513 *total += COSTS_N_INSNS (2);
2514 break;
2515 }
2516 return false;
2517}
2518
2519#undef TARGET_ADDRESS_COST
2520#define TARGET_ADDRESS_COST m32c_address_cost
2521static int
f40751dd 2522m32c_address_cost (rtx addr, bool speed ATTRIBUTE_UNUSED)
07127a0a 2523{
80b093df 2524 int i;
07127a0a
DD
2525 /* fprintf(stderr, "\naddress_cost\n");
2526 debug_rtx(addr);*/
2527 switch (GET_CODE (addr))
2528 {
2529 case CONST_INT:
80b093df
DD
2530 i = INTVAL (addr);
2531 if (i == 0)
2532 return COSTS_N_INSNS(1);
2533 if (0 < i && i <= 255)
2534 return COSTS_N_INSNS(2);
2535 if (0 < i && i <= 65535)
2536 return COSTS_N_INSNS(3);
2537 return COSTS_N_INSNS(4);
07127a0a 2538 case SYMBOL_REF:
80b093df 2539 return COSTS_N_INSNS(4);
07127a0a 2540 case REG:
80b093df
DD
2541 return COSTS_N_INSNS(1);
2542 case PLUS:
2543 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
2544 {
2545 i = INTVAL (XEXP (addr, 1));
2546 if (i == 0)
2547 return COSTS_N_INSNS(1);
2548 if (0 < i && i <= 255)
2549 return COSTS_N_INSNS(2);
2550 if (0 < i && i <= 65535)
2551 return COSTS_N_INSNS(3);
2552 }
2553 return COSTS_N_INSNS(4);
07127a0a
DD
2554 default:
2555 return 0;
2556 }
2557}
2558
38b2d076
DD
2559/* Defining the Output Assembler Language */
2560
2561/* The Overall Framework of an Assembler File */
2562
2563#undef TARGET_HAVE_NAMED_SECTIONS
2564#define TARGET_HAVE_NAMED_SECTIONS true
2565
2566/* Output of Data */
2567
2568/* We may have 24 bit sizes, which is the native address size.
2569 Currently unused, but provided for completeness. */
2570#undef TARGET_ASM_INTEGER
2571#define TARGET_ASM_INTEGER m32c_asm_integer
2572static bool
2573m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2574{
2575 switch (size)
2576 {
2577 case 3:
2578 fprintf (asm_out_file, "\t.3byte\t");
2579 output_addr_const (asm_out_file, x);
2580 fputc ('\n', asm_out_file);
2581 return true;
e9555b13
DD
2582 case 4:
2583 if (GET_CODE (x) == SYMBOL_REF)
2584 {
2585 fprintf (asm_out_file, "\t.long\t");
2586 output_addr_const (asm_out_file, x);
2587 fputc ('\n', asm_out_file);
2588 return true;
2589 }
2590 break;
38b2d076
DD
2591 }
2592 return default_assemble_integer (x, size, aligned_p);
2593}
2594
2595/* Output of Assembler Instructions */
2596
a4174ebf 2597/* We use a lookup table because the addressing modes are non-orthogonal. */
38b2d076
DD
2598
2599static struct
2600{
2601 char code;
2602 char const *pattern;
2603 char const *format;
2604}
2605const conversions[] = {
2606 { 0, "r", "0" },
2607
2608 { 0, "mr", "z[1]" },
2609 { 0, "m+ri", "3[2]" },
2610 { 0, "m+rs", "3[2]" },
5fd5d713
DD
2611 { 0, "m+^Zrs", "5[4]" },
2612 { 0, "m+^Zri", "5[4]" },
2613 { 0, "m+^Z+ris", "7+6[5]" },
2614 { 0, "m+^Srs", "5[4]" },
2615 { 0, "m+^Sri", "5[4]" },
2616 { 0, "m+^S+ris", "7+6[5]" },
38b2d076
DD
2617 { 0, "m+r+si", "4+5[2]" },
2618 { 0, "ms", "1" },
2619 { 0, "mi", "1" },
2620 { 0, "m+si", "2+3" },
2621
2622 { 0, "mmr", "[z[2]]" },
2623 { 0, "mm+ri", "[4[3]]" },
2624 { 0, "mm+rs", "[4[3]]" },
2625 { 0, "mm+r+si", "[5+6[3]]" },
2626 { 0, "mms", "[[2]]" },
2627 { 0, "mmi", "[[2]]" },
2628 { 0, "mm+si", "[4[3]]" },
2629
2630 { 0, "i", "#0" },
2631 { 0, "s", "#0" },
2632 { 0, "+si", "#1+2" },
2633 { 0, "l", "#0" },
2634
2635 { 'l', "l", "0" },
2636 { 'd', "i", "0" },
2637 { 'd', "s", "0" },
2638 { 'd', "+si", "1+2" },
2639 { 'D', "i", "0" },
2640 { 'D', "s", "0" },
2641 { 'D', "+si", "1+2" },
2642 { 'x', "i", "#0" },
2643 { 'X', "i", "#0" },
2644 { 'm', "i", "#0" },
2645 { 'b', "i", "#0" },
07127a0a 2646 { 'B', "i", "0" },
38b2d076
DD
2647 { 'p', "i", "0" },
2648
2649 { 0, 0, 0 }
2650};
2651
2652/* This is in order according to the bitfield that pushm/popm use. */
2653static char const *pushm_regs[] = {
2654 "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2655};
2656
2657/* Implements PRINT_OPERAND. */
2658void
2659m32c_print_operand (FILE * file, rtx x, int code)
2660{
2661 int i, j, b;
2662 const char *comma;
2663 HOST_WIDE_INT ival;
2664 int unsigned_const = 0;
ff485e71 2665 int force_sign;
38b2d076
DD
2666
2667 /* Multiplies; constants are converted to sign-extended format but
2668 we need unsigned, so 'u' and 'U' tell us what size unsigned we
2669 need. */
2670 if (code == 'u')
2671 {
2672 unsigned_const = 2;
2673 code = 0;
2674 }
2675 if (code == 'U')
2676 {
2677 unsigned_const = 1;
2678 code = 0;
2679 }
2680 /* This one is only for debugging; you can put it in a pattern to
2681 force this error. */
2682 if (code == '!')
2683 {
2684 fprintf (stderr, "dj: unreviewed pattern:");
2685 if (current_output_insn)
2686 debug_rtx (current_output_insn);
2687 gcc_unreachable ();
2688 }
2689 /* PSImode operations are either .w or .l depending on the target. */
2690 if (code == '&')
2691 {
2692 if (TARGET_A16)
2693 fprintf (file, "w");
2694 else
2695 fprintf (file, "l");
2696 return;
2697 }
2698 /* Inverted conditionals. */
2699 if (code == 'C')
2700 {
2701 switch (GET_CODE (x))
2702 {
2703 case LE:
2704 fputs ("gt", file);
2705 break;
2706 case LEU:
2707 fputs ("gtu", file);
2708 break;
2709 case LT:
2710 fputs ("ge", file);
2711 break;
2712 case LTU:
2713 fputs ("geu", file);
2714 break;
2715 case GT:
2716 fputs ("le", file);
2717 break;
2718 case GTU:
2719 fputs ("leu", file);
2720 break;
2721 case GE:
2722 fputs ("lt", file);
2723 break;
2724 case GEU:
2725 fputs ("ltu", file);
2726 break;
2727 case NE:
2728 fputs ("eq", file);
2729 break;
2730 case EQ:
2731 fputs ("ne", file);
2732 break;
2733 default:
2734 gcc_unreachable ();
2735 }
2736 return;
2737 }
2738 /* Regular conditionals. */
2739 if (code == 'c')
2740 {
2741 switch (GET_CODE (x))
2742 {
2743 case LE:
2744 fputs ("le", file);
2745 break;
2746 case LEU:
2747 fputs ("leu", file);
2748 break;
2749 case LT:
2750 fputs ("lt", file);
2751 break;
2752 case LTU:
2753 fputs ("ltu", file);
2754 break;
2755 case GT:
2756 fputs ("gt", file);
2757 break;
2758 case GTU:
2759 fputs ("gtu", file);
2760 break;
2761 case GE:
2762 fputs ("ge", file);
2763 break;
2764 case GEU:
2765 fputs ("geu", file);
2766 break;
2767 case NE:
2768 fputs ("ne", file);
2769 break;
2770 case EQ:
2771 fputs ("eq", file);
2772 break;
2773 default:
2774 gcc_unreachable ();
2775 }
2776 return;
2777 }
2778 /* Used in negsi2 to do HImode ops on the two parts of an SImode
2779 operand. */
2780 if (code == 'h' && GET_MODE (x) == SImode)
2781 {
2782 x = m32c_subreg (HImode, x, SImode, 0);
2783 code = 0;
2784 }
2785 if (code == 'H' && GET_MODE (x) == SImode)
2786 {
2787 x = m32c_subreg (HImode, x, SImode, 2);
2788 code = 0;
2789 }
07127a0a
DD
2790 if (code == 'h' && GET_MODE (x) == HImode)
2791 {
2792 x = m32c_subreg (QImode, x, HImode, 0);
2793 code = 0;
2794 }
2795 if (code == 'H' && GET_MODE (x) == HImode)
2796 {
2797 /* We can't actually represent this as an rtx. Do it here. */
2798 if (GET_CODE (x) == REG)
2799 {
2800 switch (REGNO (x))
2801 {
2802 case R0_REGNO:
2803 fputs ("r0h", file);
2804 return;
2805 case R1_REGNO:
2806 fputs ("r1h", file);
2807 return;
2808 default:
2809 gcc_unreachable();
2810 }
2811 }
2812 /* This should be a MEM. */
2813 x = m32c_subreg (QImode, x, HImode, 1);
2814 code = 0;
2815 }
2816 /* This is for BMcond, which always wants word register names. */
2817 if (code == 'h' && GET_MODE (x) == QImode)
2818 {
2819 if (GET_CODE (x) == REG)
2820 x = gen_rtx_REG (HImode, REGNO (x));
2821 code = 0;
2822 }
38b2d076
DD
2823 /* 'x' and 'X' need to be ignored for non-immediates. */
2824 if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2825 code = 0;
2826
2827 encode_pattern (x);
ff485e71 2828 force_sign = 0;
38b2d076
DD
2829 for (i = 0; conversions[i].pattern; i++)
2830 if (conversions[i].code == code
2831 && streq (conversions[i].pattern, pattern))
2832 {
2833 for (j = 0; conversions[i].format[j]; j++)
2834 /* backslash quotes the next character in the output pattern. */
2835 if (conversions[i].format[j] == '\\')
2836 {
2837 fputc (conversions[i].format[j + 1], file);
2838 j++;
2839 }
2840 /* Digits in the output pattern indicate that the
2841 corresponding RTX is to be output at that point. */
2842 else if (ISDIGIT (conversions[i].format[j]))
2843 {
2844 rtx r = patternr[conversions[i].format[j] - '0'];
2845 switch (GET_CODE (r))
2846 {
2847 case REG:
2848 fprintf (file, "%s",
2849 reg_name_with_mode (REGNO (r), GET_MODE (r)));
2850 break;
2851 case CONST_INT:
2852 switch (code)
2853 {
2854 case 'b':
07127a0a
DD
2855 case 'B':
2856 {
2857 int v = INTVAL (r);
2858 int i = (int) exact_log2 (v);
2859 if (i == -1)
2860 i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2861 if (i == -1)
2862 i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2863 /* Bit position. */
2864 fprintf (file, "%d", i);
2865 }
38b2d076
DD
2866 break;
2867 case 'x':
2868 /* Unsigned byte. */
2869 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2870 INTVAL (r) & 0xff);
2871 break;
2872 case 'X':
2873 /* Unsigned word. */
2874 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2875 INTVAL (r) & 0xffff);
2876 break;
2877 case 'p':
2878 /* pushm and popm encode a register set into a single byte. */
2879 comma = "";
2880 for (b = 7; b >= 0; b--)
2881 if (INTVAL (r) & (1 << b))
2882 {
2883 fprintf (file, "%s%s", comma, pushm_regs[b]);
2884 comma = ",";
2885 }
2886 break;
2887 case 'm':
2888 /* "Minus". Output -X */
2889 ival = (-INTVAL (r) & 0xffff);
2890 if (ival & 0x8000)
2891 ival = ival - 0x10000;
2892 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2893 break;
2894 default:
2895 ival = INTVAL (r);
2896 if (conversions[i].format[j + 1] == '[' && ival < 0)
2897 {
2898 /* We can simulate negative displacements by
2899 taking advantage of address space
2900 wrapping when the offset can span the
2901 entire address range. */
2902 rtx base =
2903 patternr[conversions[i].format[j + 2] - '0'];
2904 if (GET_CODE (base) == REG)
2905 switch (REGNO (base))
2906 {
2907 case A0_REGNO:
2908 case A1_REGNO:
2909 if (TARGET_A24)
2910 ival = 0x1000000 + ival;
2911 else
2912 ival = 0x10000 + ival;
2913 break;
2914 case SB_REGNO:
2915 if (TARGET_A16)
2916 ival = 0x10000 + ival;
2917 break;
2918 }
2919 }
2920 else if (code == 'd' && ival < 0 && j == 0)
2921 /* The "mova" opcode is used to do addition by
2922 computing displacements, but again, we need
2923 displacements to be unsigned *if* they're
2924 the only component of the displacement
2925 (i.e. no "symbol-4" type displacement). */
2926 ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2927
2928 if (conversions[i].format[j] == '0')
2929 {
2930 /* More conversions to unsigned. */
2931 if (unsigned_const == 2)
2932 ival &= 0xffff;
2933 if (unsigned_const == 1)
2934 ival &= 0xff;
2935 }
2936 if (streq (conversions[i].pattern, "mi")
2937 || streq (conversions[i].pattern, "mmi"))
2938 {
2939 /* Integers used as addresses are unsigned. */
2940 ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2941 }
ff485e71
DD
2942 if (force_sign && ival >= 0)
2943 fputc ('+', file);
38b2d076
DD
2944 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2945 break;
2946 }
2947 break;
2948 case CONST_DOUBLE:
2949 /* We don't have const_double constants. If it
2950 happens, make it obvious. */
2951 fprintf (file, "[const_double 0x%lx]",
2952 (unsigned long) CONST_DOUBLE_HIGH (r));
2953 break;
2954 case SYMBOL_REF:
2955 assemble_name (file, XSTR (r, 0));
2956 break;
2957 case LABEL_REF:
2958 output_asm_label (r);
2959 break;
2960 default:
2961 fprintf (stderr, "don't know how to print this operand:");
2962 debug_rtx (r);
2963 gcc_unreachable ();
2964 }
2965 }
2966 else
2967 {
2968 if (conversions[i].format[j] == 'z')
2969 {
2970 /* Some addressing modes *must* have a displacement,
2971 so insert a zero here if needed. */
2972 int k;
2973 for (k = j + 1; conversions[i].format[k]; k++)
2974 if (ISDIGIT (conversions[i].format[k]))
2975 {
2976 rtx reg = patternr[conversions[i].format[k] - '0'];
2977 if (GET_CODE (reg) == REG
2978 && (REGNO (reg) == SB_REGNO
2979 || REGNO (reg) == FB_REGNO
2980 || REGNO (reg) == SP_REGNO))
2981 fputc ('0', file);
2982 }
2983 continue;
2984 }
2985 /* Signed displacements off symbols need to have signs
2986 blended cleanly. */
2987 if (conversions[i].format[j] == '+'
ff485e71 2988 && (!code || code == 'D' || code == 'd')
38b2d076 2989 && ISDIGIT (conversions[i].format[j + 1])
ff485e71
DD
2990 && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2991 == CONST_INT))
2992 {
2993 force_sign = 1;
2994 continue;
2995 }
38b2d076
DD
2996 fputc (conversions[i].format[j], file);
2997 }
2998 break;
2999 }
3000 if (!conversions[i].pattern)
3001 {
3002 fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
3003 pattern);
3004 debug_rtx (x);
3005 fprintf (file, "[%c.%s]", code ? code : '-', pattern);
3006 }
3007
3008 return;
3009}
3010
3011/* Implements PRINT_OPERAND_PUNCT_VALID_P. See m32c_print_operand
3012 above for descriptions of what these do. */
3013int
3014m32c_print_operand_punct_valid_p (int c)
3015{
3016 if (c == '&' || c == '!')
3017 return 1;
3018 return 0;
3019}
3020
3021/* Implements PRINT_OPERAND_ADDRESS. Nothing unusual here. */
3022void
3023m32c_print_operand_address (FILE * stream, rtx address)
3024{
235e1fe8
NC
3025 if (GET_CODE (address) == MEM)
3026 address = XEXP (address, 0);
3027 else
3028 /* cf: gcc.dg/asm-4.c. */
3029 gcc_assert (GET_CODE (address) == REG);
3030
3031 m32c_print_operand (stream, address, 0);
38b2d076
DD
3032}
3033
3034/* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
3035 differently than general registers. */
3036void
3037m32c_output_reg_push (FILE * s, int regno)
3038{
3039 if (regno == FLG_REGNO)
3040 fprintf (s, "\tpushc\tflg\n");
3041 else
04aff2c0 3042 fprintf (s, "\tpush.%c\t%s\n",
38b2d076
DD
3043 " bwll"[reg_push_size (regno)], reg_names[regno]);
3044}
3045
3046/* Likewise for ASM_OUTPUT_REG_POP. */
3047void
3048m32c_output_reg_pop (FILE * s, int regno)
3049{
3050 if (regno == FLG_REGNO)
3051 fprintf (s, "\tpopc\tflg\n");
3052 else
04aff2c0 3053 fprintf (s, "\tpop.%c\t%s\n",
38b2d076
DD
3054 " bwll"[reg_push_size (regno)], reg_names[regno]);
3055}
3056
3057/* Defining target-specific uses of `__attribute__' */
3058
3059/* Used to simplify the logic below. Find the attributes wherever
3060 they may be. */
3061#define M32C_ATTRIBUTES(decl) \
3062 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
3063 : DECL_ATTRIBUTES (decl) \
3064 ? (DECL_ATTRIBUTES (decl)) \
3065 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
3066
3067/* Returns TRUE if the given tree has the "interrupt" attribute. */
3068static int
3069interrupt_p (tree node ATTRIBUTE_UNUSED)
3070{
3071 tree list = M32C_ATTRIBUTES (node);
3072 while (list)
3073 {
3074 if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
3075 return 1;
3076 list = TREE_CHAIN (list);
3077 }
65655f79
DD
3078 return fast_interrupt_p (node);
3079}
3080
3081/* Returns TRUE if the given tree has the "bank_switch" attribute. */
3082static int
3083bank_switch_p (tree node ATTRIBUTE_UNUSED)
3084{
3085 tree list = M32C_ATTRIBUTES (node);
3086 while (list)
3087 {
3088 if (is_attribute_p ("bank_switch", TREE_PURPOSE (list)))
3089 return 1;
3090 list = TREE_CHAIN (list);
3091 }
3092 return 0;
3093}
3094
3095/* Returns TRUE if the given tree has the "fast_interrupt" attribute. */
3096static int
3097fast_interrupt_p (tree node ATTRIBUTE_UNUSED)
3098{
3099 tree list = M32C_ATTRIBUTES (node);
3100 while (list)
3101 {
3102 if (is_attribute_p ("fast_interrupt", TREE_PURPOSE (list)))
3103 return 1;
3104 list = TREE_CHAIN (list);
3105 }
38b2d076
DD
3106 return 0;
3107}
3108
3109static tree
3110interrupt_handler (tree * node ATTRIBUTE_UNUSED,
3111 tree name ATTRIBUTE_UNUSED,
3112 tree args ATTRIBUTE_UNUSED,
3113 int flags ATTRIBUTE_UNUSED,
3114 bool * no_add_attrs ATTRIBUTE_UNUSED)
3115{
3116 return NULL_TREE;
3117}
3118
5abd2125
JS
3119/* Returns TRUE if given tree has the "function_vector" attribute. */
3120int
3121m32c_special_page_vector_p (tree func)
3122{
653e2568
DD
3123 tree list;
3124
5abd2125
JS
3125 if (TREE_CODE (func) != FUNCTION_DECL)
3126 return 0;
3127
653e2568 3128 list = M32C_ATTRIBUTES (func);
5abd2125
JS
3129 while (list)
3130 {
3131 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
3132 return 1;
3133 list = TREE_CHAIN (list);
3134 }
3135 return 0;
3136}
3137
3138static tree
3139function_vector_handler (tree * node ATTRIBUTE_UNUSED,
3140 tree name ATTRIBUTE_UNUSED,
3141 tree args ATTRIBUTE_UNUSED,
3142 int flags ATTRIBUTE_UNUSED,
3143 bool * no_add_attrs ATTRIBUTE_UNUSED)
3144{
3145 if (TARGET_R8C)
3146 {
3147 /* The attribute is not supported for R8C target. */
3148 warning (OPT_Wattributes,
29d08eba
JM
3149 "%qE attribute is not supported for R8C target",
3150 name);
5abd2125
JS
3151 *no_add_attrs = true;
3152 }
3153 else if (TREE_CODE (*node) != FUNCTION_DECL)
3154 {
3155 /* The attribute must be applied to functions only. */
3156 warning (OPT_Wattributes,
29d08eba
JM
3157 "%qE attribute applies only to functions",
3158 name);
5abd2125
JS
3159 *no_add_attrs = true;
3160 }
3161 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
3162 {
3163 /* The argument must be a constant integer. */
3164 warning (OPT_Wattributes,
29d08eba
JM
3165 "%qE attribute argument not an integer constant",
3166 name);
5abd2125
JS
3167 *no_add_attrs = true;
3168 }
3169 else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
3170 || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
3171 {
3172 /* The argument value must be between 18 to 255. */
3173 warning (OPT_Wattributes,
29d08eba
JM
3174 "%qE attribute argument should be between 18 to 255",
3175 name);
5abd2125
JS
3176 *no_add_attrs = true;
3177 }
3178 return NULL_TREE;
3179}
3180
3181/* If the function is assigned the attribute 'function_vector', it
3182 returns the function vector number, otherwise returns zero. */
3183int
3184current_function_special_page_vector (rtx x)
3185{
3186 int num;
3187
3188 if ((GET_CODE(x) == SYMBOL_REF)
3189 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
3190 {
653e2568 3191 tree list;
5abd2125
JS
3192 tree t = SYMBOL_REF_DECL (x);
3193
3194 if (TREE_CODE (t) != FUNCTION_DECL)
3195 return 0;
3196
653e2568 3197 list = M32C_ATTRIBUTES (t);
5abd2125
JS
3198 while (list)
3199 {
3200 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
3201 {
3202 num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
3203 return num;
3204 }
3205
3206 list = TREE_CHAIN (list);
3207 }
3208
3209 return 0;
3210 }
3211 else
3212 return 0;
3213}
3214
38b2d076
DD
3215#undef TARGET_ATTRIBUTE_TABLE
3216#define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
3217static const struct attribute_spec m32c_attribute_table[] = {
62d784f7
KT
3218 {"interrupt", 0, 0, false, false, false, interrupt_handler, false},
3219 {"bank_switch", 0, 0, false, false, false, interrupt_handler, false},
3220 {"fast_interrupt", 0, 0, false, false, false, interrupt_handler, false},
3221 {"function_vector", 1, 1, true, false, false, function_vector_handler,
3222 false},
3223 {0, 0, 0, 0, 0, 0, 0, false}
38b2d076
DD
3224};
3225
3226#undef TARGET_COMP_TYPE_ATTRIBUTES
3227#define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
3228static int
3101faab
KG
3229m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED,
3230 const_tree type2 ATTRIBUTE_UNUSED)
38b2d076
DD
3231{
3232 /* 0=incompatible 1=compatible 2=warning */
3233 return 1;
3234}
3235
3236#undef TARGET_INSERT_ATTRIBUTES
3237#define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
3238static void
3239m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
3240 tree * attr_ptr ATTRIBUTE_UNUSED)
3241{
f6052f86
DD
3242 unsigned addr;
3243 /* See if we need to make #pragma address variables volatile. */
3244
3245 if (TREE_CODE (node) == VAR_DECL)
3246 {
444d6efe 3247 const char *name = IDENTIFIER_POINTER (DECL_NAME (node));
f6052f86
DD
3248 if (m32c_get_pragma_address (name, &addr))
3249 {
3250 TREE_THIS_VOLATILE (node) = true;
3251 }
3252 }
3253}
3254
3255
3256struct GTY(()) pragma_entry {
3257 const char *varname;
3258 unsigned address;
3259};
3260typedef struct pragma_entry pragma_entry;
3261
3262/* Hash table of pragma info. */
3263static GTY((param_is (pragma_entry))) htab_t pragma_htab;
3264
3265static int
3266pragma_entry_eq (const void *p1, const void *p2)
3267{
3268 const pragma_entry *old = (const pragma_entry *) p1;
3269 const char *new_name = (const char *) p2;
3270
3271 return strcmp (old->varname, new_name) == 0;
3272}
3273
3274static hashval_t
3275pragma_entry_hash (const void *p)
3276{
3277 const pragma_entry *old = (const pragma_entry *) p;
3278 return htab_hash_string (old->varname);
3279}
3280
3281void
3282m32c_note_pragma_address (const char *varname, unsigned address)
3283{
3284 pragma_entry **slot;
3285
3286 if (!pragma_htab)
3287 pragma_htab = htab_create_ggc (31, pragma_entry_hash,
3288 pragma_entry_eq, NULL);
3289
3290 slot = (pragma_entry **)
3291 htab_find_slot_with_hash (pragma_htab, varname,
3292 htab_hash_string (varname), INSERT);
3293
3294 if (!*slot)
3295 {
3296 *slot = ggc_alloc_pragma_entry ();
3297 (*slot)->varname = ggc_strdup (varname);
3298 }
3299 (*slot)->address = address;
3300}
3301
3302static bool
3303m32c_get_pragma_address (const char *varname, unsigned *address)
3304{
3305 pragma_entry **slot;
3306
3307 if (!pragma_htab)
3308 return false;
3309
3310 slot = (pragma_entry **)
3311 htab_find_slot_with_hash (pragma_htab, varname,
3312 htab_hash_string (varname), NO_INSERT);
3313 if (slot && *slot)
3314 {
3315 *address = (*slot)->address;
3316 return true;
3317 }
3318 return false;
3319}
3320
3321void
444d6efe
JR
3322m32c_output_aligned_common (FILE *stream, tree decl ATTRIBUTE_UNUSED,
3323 const char *name,
f6052f86
DD
3324 int size, int align, int global)
3325{
3326 unsigned address;
3327
3328 if (m32c_get_pragma_address (name, &address))
3329 {
3330 /* We never output these as global. */
3331 assemble_name (stream, name);
3332 fprintf (stream, " = 0x%04x\n", address);
3333 return;
3334 }
3335 if (!global)
3336 {
3337 fprintf (stream, "\t.local\t");
3338 assemble_name (stream, name);
3339 fprintf (stream, "\n");
3340 }
3341 fprintf (stream, "\t.comm\t");
3342 assemble_name (stream, name);
3343 fprintf (stream, ",%u,%u\n", size, align / BITS_PER_UNIT);
38b2d076
DD
3344}
3345
3346/* Predicates */
3347
f9b89438 3348/* This is a list of legal subregs of hard regs. */
67fc44cb
DD
3349static const struct {
3350 unsigned char outer_mode_size;
3351 unsigned char inner_mode_size;
3352 unsigned char byte_mask;
3353 unsigned char legal_when;
f9b89438 3354 unsigned int regno;
f9b89438 3355} legal_subregs[] = {
67fc44cb
DD
3356 {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
3357 {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
3358 {1, 2, 0x01, 1, A0_REGNO},
3359 {1, 2, 0x01, 1, A1_REGNO},
f9b89438 3360
67fc44cb
DD
3361 {1, 4, 0x01, 1, A0_REGNO},
3362 {1, 4, 0x01, 1, A1_REGNO},
f9b89438 3363
67fc44cb
DD
3364 {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
3365 {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
3366 {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
3367 {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
3368 {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
f9b89438 3369
67fc44cb 3370 {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
f9b89438
DD
3371};
3372
3373/* Returns TRUE if OP is a subreg of a hard reg which we don't
f6052f86 3374 support. We also bail on MEMs with illegal addresses. */
f9b89438
DD
3375bool
3376m32c_illegal_subreg_p (rtx op)
3377{
f9b89438
DD
3378 int offset;
3379 unsigned int i;
3380 int src_mode, dest_mode;
3381
f6052f86
DD
3382 if (GET_CODE (op) == MEM
3383 && ! m32c_legitimate_address_p (Pmode, XEXP (op, 0), false))
3384 {
3385 return true;
3386 }
3387
f9b89438
DD
3388 if (GET_CODE (op) != SUBREG)
3389 return false;
3390
3391 dest_mode = GET_MODE (op);
3392 offset = SUBREG_BYTE (op);
3393 op = SUBREG_REG (op);
3394 src_mode = GET_MODE (op);
3395
3396 if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
3397 return false;
3398 if (GET_CODE (op) != REG)
3399 return false;
3400 if (REGNO (op) >= MEM0_REGNO)
3401 return false;
3402
3403 offset = (1 << offset);
3404
67fc44cb 3405 for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
f9b89438
DD
3406 if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
3407 && legal_subregs[i].regno == REGNO (op)
3408 && legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
3409 && legal_subregs[i].byte_mask & offset)
3410 {
3411 switch (legal_subregs[i].legal_when)
3412 {
3413 case 1:
3414 return false;
3415 case 16:
3416 if (TARGET_A16)
3417 return false;
3418 break;
3419 case 24:
3420 if (TARGET_A24)
3421 return false;
3422 break;
3423 }
3424 }
3425 return true;
3426}
3427
38b2d076
DD
3428/* Returns TRUE if we support a move between the first two operands.
3429 At the moment, we just want to discourage mem to mem moves until
3430 after reload, because reload has a hard time with our limited
3431 number of address registers, and we can get into a situation where
3432 we need three of them when we only have two. */
3433bool
3434m32c_mov_ok (rtx * operands, enum machine_mode mode ATTRIBUTE_UNUSED)
3435{
3436 rtx op0 = operands[0];
3437 rtx op1 = operands[1];
3438
3439 if (TARGET_A24)
3440 return true;
3441
3442#define DEBUG_MOV_OK 0
3443#if DEBUG_MOV_OK
3444 fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
3445 debug_rtx (op0);
3446 debug_rtx (op1);
3447#endif
3448
3449 if (GET_CODE (op0) == SUBREG)
3450 op0 = XEXP (op0, 0);
3451 if (GET_CODE (op1) == SUBREG)
3452 op1 = XEXP (op1, 0);
3453
3454 if (GET_CODE (op0) == MEM
3455 && GET_CODE (op1) == MEM
3456 && ! reload_completed)
3457 {
3458#if DEBUG_MOV_OK
3459 fprintf (stderr, " - no, mem to mem\n");
3460#endif
3461 return false;
3462 }
3463
3464#if DEBUG_MOV_OK
3465 fprintf (stderr, " - ok\n");
3466#endif
3467 return true;
3468}
3469
ff485e71
DD
3470/* Returns TRUE if two consecutive HImode mov instructions, generated
3471 for moving an immediate double data to a double data type variable
3472 location, can be combined into single SImode mov instruction. */
3473bool
3474m32c_immd_dbl_mov (rtx * operands,
3475 enum machine_mode mode ATTRIBUTE_UNUSED)
3476{
3477 int flag = 0, okflag = 0, offset1 = 0, offset2 = 0, offsetsign = 0;
3478 const char *str1;
3479 const char *str2;
3480
3481 if (GET_CODE (XEXP (operands[0], 0)) == SYMBOL_REF
3482 && MEM_SCALAR_P (operands[0])
3483 && !MEM_IN_STRUCT_P (operands[0])
3484 && GET_CODE (XEXP (operands[2], 0)) == CONST
3485 && GET_CODE (XEXP (XEXP (operands[2], 0), 0)) == PLUS
3486 && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 0)) == SYMBOL_REF
3487 && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 1)) == CONST_INT
3488 && MEM_SCALAR_P (operands[2])
3489 && !MEM_IN_STRUCT_P (operands[2]))
3490 flag = 1;
3491
3492 else if (GET_CODE (XEXP (operands[0], 0)) == CONST
3493 && GET_CODE (XEXP (XEXP (operands[0], 0), 0)) == PLUS
3494 && GET_CODE (XEXP (XEXP (XEXP (operands[0], 0), 0), 0)) == SYMBOL_REF
3495 && MEM_SCALAR_P (operands[0])
3496 && !MEM_IN_STRUCT_P (operands[0])
f9f3567e 3497 && !(INTVAL (XEXP (XEXP (XEXP (operands[0], 0), 0), 1)) %4)
ff485e71
DD
3498 && GET_CODE (XEXP (operands[2], 0)) == CONST
3499 && GET_CODE (XEXP (XEXP (operands[2], 0), 0)) == PLUS
3500 && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 0)) == SYMBOL_REF
3501 && MEM_SCALAR_P (operands[2])
3502 && !MEM_IN_STRUCT_P (operands[2]))
3503 flag = 2;
3504
3505 else if (GET_CODE (XEXP (operands[0], 0)) == PLUS
3506 && GET_CODE (XEXP (XEXP (operands[0], 0), 0)) == REG
3507 && REGNO (XEXP (XEXP (operands[0], 0), 0)) == FB_REGNO
3508 && GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == CONST_INT
3509 && MEM_SCALAR_P (operands[0])
3510 && !MEM_IN_STRUCT_P (operands[0])
f9f3567e 3511 && !(INTVAL (XEXP (XEXP (operands[0], 0), 1)) %4)
ff485e71
DD
3512 && REGNO (XEXP (XEXP (operands[2], 0), 0)) == FB_REGNO
3513 && GET_CODE (XEXP (XEXP (operands[2], 0), 1)) == CONST_INT
3514 && MEM_SCALAR_P (operands[2])
3515 && !MEM_IN_STRUCT_P (operands[2]))
3516 flag = 3;
3517
3518 else
3519 return false;
3520
3521 switch (flag)
3522 {
3523 case 1:
3524 str1 = XSTR (XEXP (operands[0], 0), 0);
3525 str2 = XSTR (XEXP (XEXP (XEXP (operands[2], 0), 0), 0), 0);
3526 if (strcmp (str1, str2) == 0)
3527 okflag = 1;
3528 else
3529 okflag = 0;
3530 break;
3531 case 2:
3532 str1 = XSTR (XEXP (XEXP (XEXP (operands[0], 0), 0), 0), 0);
3533 str2 = XSTR (XEXP (XEXP (XEXP (operands[2], 0), 0), 0), 0);
3534 if (strcmp(str1,str2) == 0)
3535 okflag = 1;
3536 else
3537 okflag = 0;
3538 break;
3539 case 3:
f9f3567e
DD
3540 offset1 = INTVAL (XEXP (XEXP (operands[0], 0), 1));
3541 offset2 = INTVAL (XEXP (XEXP (operands[2], 0), 1));
ff485e71
DD
3542 offsetsign = offset1 >> ((sizeof (offset1) * 8) -1);
3543 if (((offset2-offset1) == 2) && offsetsign != 0)
3544 okflag = 1;
3545 else
3546 okflag = 0;
3547 break;
3548 default:
3549 okflag = 0;
3550 }
3551
3552 if (okflag == 1)
3553 {
3554 HOST_WIDE_INT val;
3555 operands[4] = gen_rtx_MEM (SImode, XEXP (operands[0], 0));
3556
f9f3567e 3557 val = (INTVAL (operands[3]) << 16) + (INTVAL (operands[1]) & 0xFFFF);
ff485e71
DD
3558 operands[5] = gen_rtx_CONST_INT (VOIDmode, val);
3559
3560 return true;
3561 }
3562
3563 return false;
3564}
3565
38b2d076
DD
3566/* Expanders */
3567
3568/* Subregs are non-orthogonal for us, because our registers are all
3569 different sizes. */
3570static rtx
3571m32c_subreg (enum machine_mode outer,
3572 rtx x, enum machine_mode inner, int byte)
3573{
3574 int r, nr = -1;
3575
3576 /* Converting MEMs to different types that are the same size, we
3577 just rewrite them. */
3578 if (GET_CODE (x) == SUBREG
3579 && SUBREG_BYTE (x) == 0
3580 && GET_CODE (SUBREG_REG (x)) == MEM
3581 && (GET_MODE_SIZE (GET_MODE (x))
3582 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
3583 {
3584 rtx oldx = x;
3585 x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
3586 MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
3587 }
3588
3589 /* Push/pop get done as smaller push/pops. */
3590 if (GET_CODE (x) == MEM
3591 && (GET_CODE (XEXP (x, 0)) == PRE_DEC
3592 || GET_CODE (XEXP (x, 0)) == POST_INC))
3593 return gen_rtx_MEM (outer, XEXP (x, 0));
3594 if (GET_CODE (x) == SUBREG
3595 && GET_CODE (XEXP (x, 0)) == MEM
3596 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
3597 || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
3598 return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
3599
3600 if (GET_CODE (x) != REG)
146456c1
DD
3601 {
3602 rtx r = simplify_gen_subreg (outer, x, inner, byte);
3603 if (GET_CODE (r) == SUBREG
3604 && GET_CODE (x) == MEM
3605 && MEM_VOLATILE_P (x))
3606 {
3607 /* Volatile MEMs don't get simplified, but we need them to
3608 be. We are little endian, so the subreg byte is the
3609 offset. */
91140cd3 3610 r = adjust_address_nv (x, outer, byte);
146456c1
DD
3611 }
3612 return r;
3613 }
38b2d076
DD
3614
3615 r = REGNO (x);
3616 if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
3617 return simplify_gen_subreg (outer, x, inner, byte);
3618
3619 if (IS_MEM_REGNO (r))
3620 return simplify_gen_subreg (outer, x, inner, byte);
3621
3622 /* This is where the complexities of our register layout are
3623 described. */
3624 if (byte == 0)
3625 nr = r;
3626 else if (outer == HImode)
3627 {
3628 if (r == R0_REGNO && byte == 2)
3629 nr = R2_REGNO;
3630 else if (r == R0_REGNO && byte == 4)
3631 nr = R1_REGNO;
3632 else if (r == R0_REGNO && byte == 6)
3633 nr = R3_REGNO;
3634 else if (r == R1_REGNO && byte == 2)
3635 nr = R3_REGNO;
3636 else if (r == A0_REGNO && byte == 2)
3637 nr = A1_REGNO;
3638 }
3639 else if (outer == SImode)
3640 {
3641 if (r == R0_REGNO && byte == 0)
3642 nr = R0_REGNO;
3643 else if (r == R0_REGNO && byte == 4)
3644 nr = R1_REGNO;
3645 }
3646 if (nr == -1)
3647 {
3648 fprintf (stderr, "m32c_subreg %s %s %d\n",
3649 mode_name[outer], mode_name[inner], byte);
3650 debug_rtx (x);
3651 gcc_unreachable ();
3652 }
3653 return gen_rtx_REG (outer, nr);
3654}
3655
3656/* Used to emit move instructions. We split some moves,
3657 and avoid mem-mem moves. */
3658int
3659m32c_prepare_move (rtx * operands, enum machine_mode mode)
3660{
5fd5d713
DD
3661 if (far_addr_space_p (operands[0])
3662 && CONSTANT_P (operands[1]))
3663 {
3664 operands[1] = force_reg (GET_MODE (operands[0]), operands[1]);
3665 }
38b2d076
DD
3666 if (TARGET_A16 && mode == PSImode)
3667 return m32c_split_move (operands, mode, 1);
3668 if ((GET_CODE (operands[0]) == MEM)
3669 && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
3670 {
3671 rtx pmv = XEXP (operands[0], 0);
3672 rtx dest_reg = XEXP (pmv, 0);
3673 rtx dest_mod = XEXP (pmv, 1);
3674
3675 emit_insn (gen_rtx_SET (Pmode, dest_reg, dest_mod));
3676 operands[0] = gen_rtx_MEM (mode, dest_reg);
3677 }
b3a13419 3678 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
38b2d076
DD
3679 operands[1] = copy_to_mode_reg (mode, operands[1]);
3680 return 0;
3681}
3682
3683#define DEBUG_SPLIT 0
3684
3685/* Returns TRUE if the given PSImode move should be split. We split
3686 for all r8c/m16c moves, since it doesn't support them, and for
3687 POP.L as we can only *push* SImode. */
3688int
3689m32c_split_psi_p (rtx * operands)
3690{
3691#if DEBUG_SPLIT
3692 fprintf (stderr, "\nm32c_split_psi_p\n");
3693 debug_rtx (operands[0]);
3694 debug_rtx (operands[1]);
3695#endif
3696 if (TARGET_A16)
3697 {
3698#if DEBUG_SPLIT
3699 fprintf (stderr, "yes, A16\n");
3700#endif
3701 return 1;
3702 }
3703 if (GET_CODE (operands[1]) == MEM
3704 && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3705 {
3706#if DEBUG_SPLIT
3707 fprintf (stderr, "yes, pop.l\n");
3708#endif
3709 return 1;
3710 }
3711#if DEBUG_SPLIT
3712 fprintf (stderr, "no, default\n");
3713#endif
3714 return 0;
3715}
3716
3717/* Split the given move. SPLIT_ALL is 0 if splitting is optional
3718 (define_expand), 1 if it is not optional (define_insn_and_split),
3719 and 3 for define_split (alternate api). */
3720int
3721m32c_split_move (rtx * operands, enum machine_mode mode, int split_all)
3722{
3723 rtx s[4], d[4];
3724 int parts, si, di, rev = 0;
3725 int rv = 0, opi = 2;
3726 enum machine_mode submode = HImode;
3727 rtx *ops, local_ops[10];
3728
3729 /* define_split modifies the existing operands, but the other two
3730 emit new insns. OPS is where we store the operand pairs, which
3731 we emit later. */
3732 if (split_all == 3)
3733 ops = operands;
3734 else
3735 ops = local_ops;
3736
3737 /* Else HImode. */
3738 if (mode == DImode)
3739 submode = SImode;
3740
3741 /* Before splitting mem-mem moves, force one operand into a
3742 register. */
b3a13419 3743 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
38b2d076
DD
3744 {
3745#if DEBUG0
3746 fprintf (stderr, "force_reg...\n");
3747 debug_rtx (operands[1]);
3748#endif
3749 operands[1] = force_reg (mode, operands[1]);
3750#if DEBUG0
3751 debug_rtx (operands[1]);
3752#endif
3753 }
3754
3755 parts = 2;
3756
3757#if DEBUG_SPLIT
b3a13419
ILT
3758 fprintf (stderr, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3759 split_all);
38b2d076
DD
3760 debug_rtx (operands[0]);
3761 debug_rtx (operands[1]);
3762#endif
3763
eb5f0c07
DD
3764 /* Note that split_all is not used to select the api after this
3765 point, so it's safe to set it to 3 even with define_insn. */
3766 /* None of the chips can move SI operands to sp-relative addresses,
3767 so we always split those. */
3768 if (m32c_extra_constraint_p (operands[0], 'S', "Ss"))
3769 split_all = 3;
3770
5fd5d713
DD
3771 if (TARGET_A16
3772 && (far_addr_space_p (operands[0])
3773 || far_addr_space_p (operands[1])))
3774 split_all |= 1;
3775
38b2d076
DD
3776 /* We don't need to split these. */
3777 if (TARGET_A24
3778 && split_all != 3
3779 && (mode == SImode || mode == PSImode)
3780 && !(GET_CODE (operands[1]) == MEM
3781 && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3782 return 0;
3783
3784 /* First, enumerate the subregs we'll be dealing with. */
3785 for (si = 0; si < parts; si++)
3786 {
3787 d[si] =
3788 m32c_subreg (submode, operands[0], mode,
3789 si * GET_MODE_SIZE (submode));
3790 s[si] =
3791 m32c_subreg (submode, operands[1], mode,
3792 si * GET_MODE_SIZE (submode));
3793 }
3794
3795 /* Split pushes by emitting a sequence of smaller pushes. */
3796 if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3797 {
3798 for (si = parts - 1; si >= 0; si--)
3799 {
3800 ops[opi++] = gen_rtx_MEM (submode,
3801 gen_rtx_PRE_DEC (Pmode,
3802 gen_rtx_REG (Pmode,
3803 SP_REGNO)));
3804 ops[opi++] = s[si];
3805 }
3806
3807 rv = 1;
3808 }
3809 /* Likewise for pops. */
3810 else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3811 {
3812 for (di = 0; di < parts; di++)
3813 {
3814 ops[opi++] = d[di];
3815 ops[opi++] = gen_rtx_MEM (submode,
3816 gen_rtx_POST_INC (Pmode,
3817 gen_rtx_REG (Pmode,
3818 SP_REGNO)));
3819 }
3820 rv = 1;
3821 }
3822 else if (split_all)
3823 {
3824 /* if d[di] == s[si] for any di < si, we'll early clobber. */
3825 for (di = 0; di < parts - 1; di++)
3826 for (si = di + 1; si < parts; si++)
3827 if (reg_mentioned_p (d[di], s[si]))
3828 rev = 1;
3829
3830 if (rev)
3831 for (si = 0; si < parts; si++)
3832 {
3833 ops[opi++] = d[si];
3834 ops[opi++] = s[si];
3835 }
3836 else
3837 for (si = parts - 1; si >= 0; si--)
3838 {
3839 ops[opi++] = d[si];
3840 ops[opi++] = s[si];
3841 }
3842 rv = 1;
3843 }
3844 /* Now emit any moves we may have accumulated. */
3845 if (rv && split_all != 3)
3846 {
3847 int i;
3848 for (i = 2; i < opi; i += 2)
3849 emit_move_insn (ops[i], ops[i + 1]);
3850 }
3851 return rv;
3852}
3853
07127a0a
DD
3854/* The m32c has a number of opcodes that act like memcpy, strcmp, and
3855 the like. For the R8C they expect one of the addresses to be in
3856 R1L:An so we need to arrange for that. Otherwise, it's just a
3857 matter of picking out the operands we want and emitting the right
3858 pattern for them. All these expanders, which correspond to
3859 patterns in blkmov.md, must return nonzero if they expand the insn,
3860 or zero if they should FAIL. */
3861
3862/* This is a memset() opcode. All operands are implied, so we need to
3863 arrange for them to be in the right registers. The opcode wants
3864 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3865 the count (HI), and $2 the value (QI). */
3866int
3867m32c_expand_setmemhi(rtx *operands)
3868{
3869 rtx desta, count, val;
3870 rtx desto, counto;
3871
3872 desta = XEXP (operands[0], 0);
3873 count = operands[1];
3874 val = operands[2];
3875
3876 desto = gen_reg_rtx (Pmode);
3877 counto = gen_reg_rtx (HImode);
3878
3879 if (GET_CODE (desta) != REG
3880 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3881 desta = copy_to_mode_reg (Pmode, desta);
3882
3883 /* This looks like an arbitrary restriction, but this is by far the
3884 most common case. For counts 8..14 this actually results in
3885 smaller code with no speed penalty because the half-sized
3886 constant can be loaded with a shorter opcode. */
3887 if (GET_CODE (count) == CONST_INT
3888 && GET_CODE (val) == CONST_INT
3889 && ! (INTVAL (count) & 1)
3890 && (INTVAL (count) > 1)
3891 && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3892 {
3893 unsigned v = INTVAL (val) & 0xff;
3894 v = v | (v << 8);
3895 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3896 val = copy_to_mode_reg (HImode, GEN_INT (v));
3897 if (TARGET_A16)
3898 emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3899 else
3900 emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3901 return 1;
3902 }
3903
3904 /* This is the generalized memset() case. */
3905 if (GET_CODE (val) != REG
3906 || REGNO (val) < FIRST_PSEUDO_REGISTER)
3907 val = copy_to_mode_reg (QImode, val);
3908
3909 if (GET_CODE (count) != REG
3910 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3911 count = copy_to_mode_reg (HImode, count);
3912
3913 if (TARGET_A16)
3914 emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3915 else
3916 emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3917
3918 return 1;
3919}
3920
3921/* This is a memcpy() opcode. All operands are implied, so we need to
3922 arrange for them to be in the right registers. The opcode wants
3923 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3924 is the source (MEM:BLK), and $2 the count (HI). */
3925int
3926m32c_expand_movmemhi(rtx *operands)
3927{
3928 rtx desta, srca, count;
3929 rtx desto, srco, counto;
3930
3931 desta = XEXP (operands[0], 0);
3932 srca = XEXP (operands[1], 0);
3933 count = operands[2];
3934
3935 desto = gen_reg_rtx (Pmode);
3936 srco = gen_reg_rtx (Pmode);
3937 counto = gen_reg_rtx (HImode);
3938
3939 if (GET_CODE (desta) != REG
3940 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3941 desta = copy_to_mode_reg (Pmode, desta);
3942
3943 if (GET_CODE (srca) != REG
3944 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3945 srca = copy_to_mode_reg (Pmode, srca);
3946
3947 /* Similar to setmem, but we don't need to check the value. */
3948 if (GET_CODE (count) == CONST_INT
3949 && ! (INTVAL (count) & 1)
3950 && (INTVAL (count) > 1))
3951 {
3952 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3953 if (TARGET_A16)
3954 emit_insn (gen_movmemhi_whi_op (desto, srco, counto, desta, srca, count));
3955 else
3956 emit_insn (gen_movmemhi_wpsi_op (desto, srco, counto, desta, srca, count));
3957 return 1;
3958 }
3959
3960 /* This is the generalized memset() case. */
3961 if (GET_CODE (count) != REG
3962 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3963 count = copy_to_mode_reg (HImode, count);
3964
3965 if (TARGET_A16)
3966 emit_insn (gen_movmemhi_bhi_op (desto, srco, counto, desta, srca, count));
3967 else
3968 emit_insn (gen_movmemhi_bpsi_op (desto, srco, counto, desta, srca, count));
3969
3970 return 1;
3971}
3972
3973/* This is a stpcpy() opcode. $0 is the destination (MEM:BLK) after
3974 the copy, which should point to the NUL at the end of the string,
3975 $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3976 Since our opcode leaves the destination pointing *after* the NUL,
3977 we must emit an adjustment. */
3978int
3979m32c_expand_movstr(rtx *operands)
3980{
3981 rtx desta, srca;
3982 rtx desto, srco;
3983
3984 desta = XEXP (operands[1], 0);
3985 srca = XEXP (operands[2], 0);
3986
3987 desto = gen_reg_rtx (Pmode);
3988 srco = gen_reg_rtx (Pmode);
3989
3990 if (GET_CODE (desta) != REG
3991 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3992 desta = copy_to_mode_reg (Pmode, desta);
3993
3994 if (GET_CODE (srca) != REG
3995 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3996 srca = copy_to_mode_reg (Pmode, srca);
3997
3998 emit_insn (gen_movstr_op (desto, srco, desta, srca));
3999 /* desto ends up being a1, which allows this type of add through MOVA. */
4000 emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
4001
4002 return 1;
4003}
4004
4005/* This is a strcmp() opcode. $0 is the destination (HI) which holds
4006 <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
4007 $2 is the other (MEM:BLK). We must do the comparison, and then
4008 convert the flags to a signed integer result. */
4009int
4010m32c_expand_cmpstr(rtx *operands)
4011{
4012 rtx src1a, src2a;
4013
4014 src1a = XEXP (operands[1], 0);
4015 src2a = XEXP (operands[2], 0);
4016
4017 if (GET_CODE (src1a) != REG
4018 || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
4019 src1a = copy_to_mode_reg (Pmode, src1a);
4020
4021 if (GET_CODE (src2a) != REG
4022 || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
4023 src2a = copy_to_mode_reg (Pmode, src2a);
4024
4025 emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
4026 emit_insn (gen_cond_to_int (operands[0]));
4027
4028 return 1;
4029}
4030
4031
23fed240
DD
4032typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
4033
4034static shift_gen_func
4035shift_gen_func_for (int mode, int code)
4036{
4037#define GFF(m,c,f) if (mode == m && code == c) return f
4038 GFF(QImode, ASHIFT, gen_ashlqi3_i);
4039 GFF(QImode, ASHIFTRT, gen_ashrqi3_i);
4040 GFF(QImode, LSHIFTRT, gen_lshrqi3_i);
4041 GFF(HImode, ASHIFT, gen_ashlhi3_i);
4042 GFF(HImode, ASHIFTRT, gen_ashrhi3_i);
4043 GFF(HImode, LSHIFTRT, gen_lshrhi3_i);
4044 GFF(PSImode, ASHIFT, gen_ashlpsi3_i);
4045 GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
4046 GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
4047 GFF(SImode, ASHIFT, TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
4048 GFF(SImode, ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
4049 GFF(SImode, LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
4050#undef GFF
07127a0a 4051 gcc_unreachable ();
23fed240
DD
4052}
4053
38b2d076
DD
4054/* The m32c only has one shift, but it takes a signed count. GCC
4055 doesn't want this, so we fake it by negating any shift count when
07127a0a
DD
4056 we're pretending to shift the other way. Also, the shift count is
4057 limited to -8..8. It's slightly better to use two shifts for 9..15
4058 than to load the count into r1h, so we do that too. */
38b2d076 4059int
23fed240 4060m32c_prepare_shift (rtx * operands, int scale, int shift_code)
38b2d076 4061{
23fed240
DD
4062 enum machine_mode mode = GET_MODE (operands[0]);
4063 shift_gen_func func = shift_gen_func_for (mode, shift_code);
38b2d076 4064 rtx temp;
23fed240
DD
4065
4066 if (GET_CODE (operands[2]) == CONST_INT)
38b2d076 4067 {
23fed240
DD
4068 int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
4069 int count = INTVAL (operands[2]) * scale;
4070
4071 while (count > maxc)
4072 {
4073 temp = gen_reg_rtx (mode);
4074 emit_insn (func (temp, operands[1], GEN_INT (maxc)));
4075 operands[1] = temp;
4076 count -= maxc;
4077 }
4078 while (count < -maxc)
4079 {
4080 temp = gen_reg_rtx (mode);
4081 emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
4082 operands[1] = temp;
4083 count += maxc;
4084 }
4085 emit_insn (func (operands[0], operands[1], GEN_INT (count)));
4086 return 1;
38b2d076 4087 }
2e160056
DD
4088
4089 temp = gen_reg_rtx (QImode);
38b2d076 4090 if (scale < 0)
2e160056
DD
4091 /* The pattern has a NEG that corresponds to this. */
4092 emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
4093 else if (TARGET_A16 && mode == SImode)
4094 /* We do this because the code below may modify this, we don't
4095 want to modify the origin of this value. */
4096 emit_move_insn (temp, operands[2]);
38b2d076 4097 else
2e160056 4098 /* We'll only use it for the shift, no point emitting a move. */
38b2d076 4099 temp = operands[2];
2e160056 4100
16659fcf 4101 if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
2e160056
DD
4102 {
4103 /* The m16c has a limit of -16..16 for SI shifts, even when the
4104 shift count is in a register. Since there are so many targets
4105 of these shifts, it's better to expand the RTL here than to
4106 call a helper function.
4107
4108 The resulting code looks something like this:
4109
4110 cmp.b r1h,-16
4111 jge.b 1f
4112 shl.l -16,dest
4113 add.b r1h,16
4114 1f: cmp.b r1h,16
4115 jle.b 1f
4116 shl.l 16,dest
4117 sub.b r1h,16
4118 1f: shl.l r1h,dest
4119
4120 We take advantage of the fact that "negative" shifts are
4121 undefined to skip one of the comparisons. */
4122
4123 rtx count;
444d6efe 4124 rtx label, insn, tempvar;
2e160056 4125
16659fcf
DD
4126 emit_move_insn (operands[0], operands[1]);
4127
2e160056
DD
4128 count = temp;
4129 label = gen_label_rtx ();
2e160056
DD
4130 LABEL_NUSES (label) ++;
4131
833bf445
DD
4132 tempvar = gen_reg_rtx (mode);
4133
2e160056
DD
4134 if (shift_code == ASHIFT)
4135 {
4136 /* This is a left shift. We only need check positive counts. */
4137 emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
4138 count, GEN_INT (16), label));
833bf445
DD
4139 emit_insn (func (tempvar, operands[0], GEN_INT (8)));
4140 emit_insn (func (operands[0], tempvar, GEN_INT (8)));
2e160056
DD
4141 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
4142 emit_label_after (label, insn);
4143 }
4144 else
4145 {
4146 /* This is a right shift. We only need check negative counts. */
4147 emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
4148 count, GEN_INT (-16), label));
833bf445
DD
4149 emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
4150 emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
2e160056
DD
4151 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
4152 emit_label_after (label, insn);
4153 }
16659fcf
DD
4154 operands[1] = operands[0];
4155 emit_insn (func (operands[0], operands[0], count));
4156 return 1;
2e160056
DD
4157 }
4158
38b2d076
DD
4159 operands[2] = temp;
4160 return 0;
4161}
4162
12ea2512
DD
4163/* The m32c has a limited range of operations that work on PSImode
4164 values; we have to expand to SI, do the math, and truncate back to
4165 PSI. Yes, this is expensive, but hopefully gcc will learn to avoid
4166 those cases. */
4167void
4168m32c_expand_neg_mulpsi3 (rtx * operands)
4169{
4170 /* operands: a = b * i */
4171 rtx temp1; /* b as SI */
07127a0a
DD
4172 rtx scale /* i as SI */;
4173 rtx temp2; /* a*b as SI */
12ea2512
DD
4174
4175 temp1 = gen_reg_rtx (SImode);
4176 temp2 = gen_reg_rtx (SImode);
07127a0a
DD
4177 if (GET_CODE (operands[2]) != CONST_INT)
4178 {
4179 scale = gen_reg_rtx (SImode);
4180 emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
4181 }
4182 else
4183 scale = copy_to_mode_reg (SImode, operands[2]);
12ea2512
DD
4184
4185 emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
07127a0a
DD
4186 temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
4187 emit_insn (gen_truncsipsi2 (operands[0], temp2));
12ea2512
DD
4188}
4189
38b2d076
DD
4190/* Pattern Output Functions */
4191
07127a0a
DD
4192int
4193m32c_expand_movcc (rtx *operands)
4194{
4195 rtx rel = operands[1];
0166ff05 4196
07127a0a
DD
4197 if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
4198 return 1;
4199 if (GET_CODE (operands[2]) != CONST_INT
4200 || GET_CODE (operands[3]) != CONST_INT)
4201 return 1;
07127a0a
DD
4202 if (GET_CODE (rel) == NE)
4203 {
4204 rtx tmp = operands[2];
4205 operands[2] = operands[3];
4206 operands[3] = tmp;
f90b7a5a 4207 rel = gen_rtx_EQ (GET_MODE (rel), XEXP (rel, 0), XEXP (rel, 1));
07127a0a 4208 }
0166ff05 4209
0166ff05
DD
4210 emit_move_insn (operands[0],
4211 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
f90b7a5a 4212 rel,
0166ff05
DD
4213 operands[2],
4214 operands[3]));
07127a0a
DD
4215 return 0;
4216}
4217
4218/* Used for the "insv" pattern. Return nonzero to fail, else done. */
4219int
4220m32c_expand_insv (rtx *operands)
4221{
4222 rtx op0, src0, p;
4223 int mask;
4224
4225 if (INTVAL (operands[1]) != 1)
4226 return 1;
4227
9cb96754
N
4228 /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
4229 if (GET_CODE (operands[3]) != CONST_INT)
4230 return 1;
4231 if (INTVAL (operands[3]) != 0
4232 && INTVAL (operands[3]) != 1
4233 && INTVAL (operands[3]) != -1)
4234 return 1;
4235
07127a0a
DD
4236 mask = 1 << INTVAL (operands[2]);
4237
4238 op0 = operands[0];
4239 if (GET_CODE (op0) == SUBREG
4240 && SUBREG_BYTE (op0) == 0)
4241 {
4242 rtx sub = SUBREG_REG (op0);
4243 if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
4244 op0 = sub;
4245 }
4246
b3a13419 4247 if (!can_create_pseudo_p ()
07127a0a
DD
4248 || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
4249 src0 = op0;
4250 else
4251 {
4252 src0 = gen_reg_rtx (GET_MODE (op0));
4253 emit_move_insn (src0, op0);
4254 }
4255
4256 if (GET_MODE (op0) == HImode
4257 && INTVAL (operands[2]) >= 8
444d6efe 4258 && GET_CODE (op0) == MEM)
07127a0a
DD
4259 {
4260 /* We are little endian. */
4261 rtx new_mem = gen_rtx_MEM (QImode, plus_constant (XEXP (op0, 0), 1));
4262 MEM_COPY_ATTRIBUTES (new_mem, op0);
4263 mask >>= 8;
4264 }
4265
8e4edce7
DD
4266 /* First, we generate a mask with the correct polarity. If we are
4267 storing a zero, we want an AND mask, so invert it. */
4268 if (INTVAL (operands[3]) == 0)
07127a0a 4269 {
16659fcf 4270 /* Storing a zero, use an AND mask */
07127a0a
DD
4271 if (GET_MODE (op0) == HImode)
4272 mask ^= 0xffff;
4273 else
4274 mask ^= 0xff;
4275 }
8e4edce7
DD
4276 /* Now we need to properly sign-extend the mask in case we need to
4277 fall back to an AND or OR opcode. */
07127a0a
DD
4278 if (GET_MODE (op0) == HImode)
4279 {
4280 if (mask & 0x8000)
4281 mask -= 0x10000;
4282 }
4283 else
4284 {
4285 if (mask & 0x80)
4286 mask -= 0x100;
4287 }
4288
4289 switch ( (INTVAL (operands[3]) ? 4 : 0)
4290 + ((GET_MODE (op0) == HImode) ? 2 : 0)
4291 + (TARGET_A24 ? 1 : 0))
4292 {
4293 case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
4294 case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
4295 case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
4296 case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
4297 case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
4298 case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
4299 case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
4300 case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
653e2568 4301 default: p = NULL_RTX; break; /* Not reached, but silences a warning. */
07127a0a
DD
4302 }
4303
4304 emit_insn (p);
4305 return 0;
4306}
4307
4308const char *
4309m32c_scc_pattern(rtx *operands, RTX_CODE code)
4310{
4311 static char buf[30];
4312 if (GET_CODE (operands[0]) == REG
4313 && REGNO (operands[0]) == R0_REGNO)
4314 {
4315 if (code == EQ)
4316 return "stzx\t#1,#0,r0l";
4317 if (code == NE)
4318 return "stzx\t#0,#1,r0l";
4319 }
4320 sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
4321 return buf;
4322}
4323
5abd2125
JS
4324/* Encode symbol attributes of a SYMBOL_REF into its
4325 SYMBOL_REF_FLAGS. */
4326static void
4327m32c_encode_section_info (tree decl, rtx rtl, int first)
4328{
4329 int extra_flags = 0;
4330
4331 default_encode_section_info (decl, rtl, first);
4332 if (TREE_CODE (decl) == FUNCTION_DECL
4333 && m32c_special_page_vector_p (decl))
4334
4335 extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
4336
4337 if (extra_flags)
4338 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
4339}
4340
38b2d076
DD
4341/* Returns TRUE if the current function is a leaf, and thus we can
4342 determine which registers an interrupt function really needs to
4343 save. The logic below is mostly about finding the insn sequence
4344 that's the function, versus any sequence that might be open for the
4345 current insn. */
4346static int
4347m32c_leaf_function_p (void)
4348{
4349 rtx saved_first, saved_last;
4350 struct sequence_stack *seq;
4351 int rv;
4352
3e029763
JH
4353 saved_first = crtl->emit.x_first_insn;
4354 saved_last = crtl->emit.x_last_insn;
4355 for (seq = crtl->emit.sequence_stack; seq && seq->next; seq = seq->next)
38b2d076
DD
4356 ;
4357 if (seq)
4358 {
3e029763
JH
4359 crtl->emit.x_first_insn = seq->first;
4360 crtl->emit.x_last_insn = seq->last;
38b2d076
DD
4361 }
4362
4363 rv = leaf_function_p ();
4364
3e029763
JH
4365 crtl->emit.x_first_insn = saved_first;
4366 crtl->emit.x_last_insn = saved_last;
38b2d076
DD
4367 return rv;
4368}
4369
4370/* Returns TRUE if the current function needs to use the ENTER/EXIT
4371 opcodes. If the function doesn't need the frame base or stack
4372 pointer, it can use the simpler RTS opcode. */
4373static bool
4374m32c_function_needs_enter (void)
4375{
4376 rtx insn;
4377 struct sequence_stack *seq;
4378 rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
4379 rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
4380
4381 insn = get_insns ();
3e029763 4382 for (seq = crtl->emit.sequence_stack;
38b2d076
DD
4383 seq;
4384 insn = seq->first, seq = seq->next);
4385
4386 while (insn)
4387 {
4388 if (reg_mentioned_p (sp, insn))
4389 return true;
4390 if (reg_mentioned_p (fb, insn))
4391 return true;
4392 insn = NEXT_INSN (insn);
4393 }
4394 return false;
4395}
4396
4397/* Mark all the subexpressions of the PARALLEL rtx PAR as
4398 frame-related. Return PAR.
4399
4400 dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
4401 PARALLEL rtx other than the first if they do not have the
4402 FRAME_RELATED flag set on them. So this function is handy for
4403 marking up 'enter' instructions. */
4404static rtx
4405m32c_all_frame_related (rtx par)
4406{
4407 int len = XVECLEN (par, 0);
4408 int i;
4409
4410 for (i = 0; i < len; i++)
4411 F (XVECEXP (par, 0, i));
4412
4413 return par;
4414}
4415
4416/* Emits the prologue. See the frame layout comment earlier in this
4417 file. We can reserve up to 256 bytes with the ENTER opcode, beyond
4418 that we manually update sp. */
4419void
4420m32c_emit_prologue (void)
4421{
4422 int frame_size, extra_frame_size = 0, reg_save_size;
4423 int complex_prologue = 0;
4424
4425 cfun->machine->is_leaf = m32c_leaf_function_p ();
4426 if (interrupt_p (cfun->decl))
4427 {
4428 cfun->machine->is_interrupt = 1;
4429 complex_prologue = 1;
4430 }
65655f79
DD
4431 else if (bank_switch_p (cfun->decl))
4432 warning (OPT_Wattributes,
4433 "%<bank_switch%> has no effect on non-interrupt functions");
38b2d076
DD
4434
4435 reg_save_size = m32c_pushm_popm (PP_justcount);
4436
4437 if (interrupt_p (cfun->decl))
65655f79
DD
4438 {
4439 if (bank_switch_p (cfun->decl))
4440 emit_insn (gen_fset_b ());
4441 else if (cfun->machine->intr_pushm)
4442 emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
4443 }
38b2d076
DD
4444
4445 frame_size =
4446 m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
4447 if (frame_size == 0
38b2d076
DD
4448 && !m32c_function_needs_enter ())
4449 cfun->machine->use_rts = 1;
4450
4451 if (frame_size > 254)
4452 {
4453 extra_frame_size = frame_size - 254;
4454 frame_size = 254;
4455 }
4456 if (cfun->machine->use_rts == 0)
4457 F (emit_insn (m32c_all_frame_related
4458 (TARGET_A16
fa9fd28a
RIL
4459 ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
4460 : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
38b2d076
DD
4461
4462 if (extra_frame_size)
4463 {
4464 complex_prologue = 1;
4465 if (TARGET_A16)
4466 F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
4467 gen_rtx_REG (HImode, SP_REGNO),
4468 GEN_INT (-extra_frame_size))));
4469 else
4470 F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
4471 gen_rtx_REG (PSImode, SP_REGNO),
4472 GEN_INT (-extra_frame_size))));
4473 }
4474
4475 complex_prologue += m32c_pushm_popm (PP_pushm);
4476
4477 /* This just emits a comment into the .s file for debugging. */
4478 if (complex_prologue)
4479 emit_insn (gen_prologue_end ());
4480}
4481
4482/* Likewise, for the epilogue. The only exception is that, for
4483 interrupts, we must manually unwind the frame as the REIT opcode
4484 doesn't do that. */
4485void
4486m32c_emit_epilogue (void)
4487{
4488 /* This just emits a comment into the .s file for debugging. */
4489 if (m32c_pushm_popm (PP_justcount) > 0 || cfun->machine->is_interrupt)
4490 emit_insn (gen_epilogue_start ());
4491
4492 m32c_pushm_popm (PP_popm);
4493
4494 if (cfun->machine->is_interrupt)
4495 {
4496 enum machine_mode spmode = TARGET_A16 ? HImode : PSImode;
4497
65655f79
DD
4498 /* REIT clears B flag and restores $fp for us, but we still
4499 have to fix up the stack. USE_RTS just means we didn't
4500 emit ENTER. */
4501 if (!cfun->machine->use_rts)
4502 {
4503 emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
4504 gen_rtx_REG (spmode, FP_REGNO));
4505 emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
4506 gen_rtx_REG (spmode, A0_REGNO));
4507 /* We can't just add this to the POPM because it would be in
4508 the wrong order, and wouldn't fix the stack if we're bank
4509 switching. */
4510 if (TARGET_A16)
4511 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
4512 else
4513 emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
4514 }
4515 if (!bank_switch_p (cfun->decl) && cfun->machine->intr_pushm)
4516 emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
4517
402f2db8
DD
4518 /* The FREIT (Fast REturn from InTerrupt) instruction should be
4519 generated only for M32C/M32CM targets (generate the REIT
4520 instruction otherwise). */
65655f79 4521 if (fast_interrupt_p (cfun->decl))
402f2db8
DD
4522 {
4523 /* Check if fast_attribute is set for M32C or M32CM. */
4524 if (TARGET_A24)
4525 {
4526 emit_jump_insn (gen_epilogue_freit ());
4527 }
4528 /* If fast_interrupt attribute is set for an R8C or M16C
4529 target ignore this attribute and generated REIT
4530 instruction. */
4531 else
4532 {
4533 warning (OPT_Wattributes,
4534 "%<fast_interrupt%> attribute directive ignored");
4535 emit_jump_insn (gen_epilogue_reit_16 ());
4536 }
4537 }
65655f79 4538 else if (TARGET_A16)
0e0642aa
RIL
4539 emit_jump_insn (gen_epilogue_reit_16 ());
4540 else
4541 emit_jump_insn (gen_epilogue_reit_24 ());
38b2d076
DD
4542 }
4543 else if (cfun->machine->use_rts)
4544 emit_jump_insn (gen_epilogue_rts ());
0e0642aa
RIL
4545 else if (TARGET_A16)
4546 emit_jump_insn (gen_epilogue_exitd_16 ());
38b2d076 4547 else
0e0642aa 4548 emit_jump_insn (gen_epilogue_exitd_24 ());
38b2d076
DD
4549 emit_barrier ();
4550}
4551
4552void
4553m32c_emit_eh_epilogue (rtx ret_addr)
4554{
4555 /* R0[R2] has the stack adjustment. R1[R3] has the address to
4556 return to. We have to fudge the stack, pop everything, pop SP
4557 (fudged), and return (fudged). This is actually easier to do in
4558 assembler, so punt to libgcc. */
4559 emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
c41c1387 4560 /* emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
38b2d076
DD
4561 emit_barrier ();
4562}
4563
16659fcf
DD
4564/* Indicate which flags must be properly set for a given conditional. */
4565static int
4566flags_needed_for_conditional (rtx cond)
4567{
4568 switch (GET_CODE (cond))
4569 {
4570 case LE:
4571 case GT:
4572 return FLAGS_OSZ;
4573 case LEU:
4574 case GTU:
4575 return FLAGS_ZC;
4576 case LT:
4577 case GE:
4578 return FLAGS_OS;
4579 case LTU:
4580 case GEU:
4581 return FLAGS_C;
4582 case EQ:
4583 case NE:
4584 return FLAGS_Z;
4585 default:
4586 return FLAGS_N;
4587 }
4588}
4589
4590#define DEBUG_CMP 0
4591
4592/* Returns true if a compare insn is redundant because it would only
4593 set flags that are already set correctly. */
4594static bool
4595m32c_compare_redundant (rtx cmp, rtx *operands)
4596{
4597 int flags_needed;
4598 int pflags;
4599 rtx prev, pp, next;
444d6efe 4600 rtx op0, op1;
16659fcf
DD
4601#if DEBUG_CMP
4602 int prev_icode, i;
4603#endif
4604
4605 op0 = operands[0];
4606 op1 = operands[1];
16659fcf
DD
4607
4608#if DEBUG_CMP
4609 fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
4610 debug_rtx(cmp);
4611 for (i=0; i<2; i++)
4612 {
4613 fprintf(stderr, "operands[%d] = ", i);
4614 debug_rtx(operands[i]);
4615 }
4616#endif
4617
4618 next = next_nonnote_insn (cmp);
4619 if (!next || !INSN_P (next))
4620 {
4621#if DEBUG_CMP
4622 fprintf(stderr, "compare not followed by insn\n");
4623 debug_rtx(next);
4624#endif
4625 return false;
4626 }
4627 if (GET_CODE (PATTERN (next)) == SET
4628 && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
4629 {
4630 next = XEXP (XEXP (PATTERN (next), 1), 0);
4631 }
4632 else if (GET_CODE (PATTERN (next)) == SET)
4633 {
4634 /* If this is a conditional, flags_needed will be something
4635 other than FLAGS_N, which we test below. */
4636 next = XEXP (PATTERN (next), 1);
4637 }
4638 else
4639 {
4640#if DEBUG_CMP
4641 fprintf(stderr, "compare not followed by conditional\n");
4642 debug_rtx(next);
4643#endif
4644 return false;
4645 }
4646#if DEBUG_CMP
4647 fprintf(stderr, "conditional is: ");
4648 debug_rtx(next);
4649#endif
4650
4651 flags_needed = flags_needed_for_conditional (next);
4652 if (flags_needed == FLAGS_N)
4653 {
4654#if DEBUG_CMP
4655 fprintf(stderr, "compare not followed by conditional\n");
4656 debug_rtx(next);
4657#endif
4658 return false;
4659 }
4660
4661 /* Compare doesn't set overflow and carry the same way that
4662 arithmetic instructions do, so we can't replace those. */
4663 if (flags_needed & FLAGS_OC)
4664 return false;
4665
4666 prev = cmp;
4667 do {
4668 prev = prev_nonnote_insn (prev);
4669 if (!prev)
4670 {
4671#if DEBUG_CMP
4672 fprintf(stderr, "No previous insn.\n");
4673#endif
4674 return false;
4675 }
4676 if (!INSN_P (prev))
4677 {
4678#if DEBUG_CMP
4679 fprintf(stderr, "Previous insn is a non-insn.\n");
4680#endif
4681 return false;
4682 }
4683 pp = PATTERN (prev);
4684 if (GET_CODE (pp) != SET)
4685 {
4686#if DEBUG_CMP
4687 fprintf(stderr, "Previous insn is not a SET.\n");
4688#endif
4689 return false;
4690 }
4691 pflags = get_attr_flags (prev);
4692
4693 /* Looking up attributes of previous insns corrupted the recog
4694 tables. */
4695 INSN_UID (cmp) = -1;
4696 recog (PATTERN (cmp), cmp, 0);
4697
4698 if (pflags == FLAGS_N
4699 && reg_mentioned_p (op0, pp))
4700 {
4701#if DEBUG_CMP
4702 fprintf(stderr, "intermediate non-flags insn uses op:\n");
4703 debug_rtx(prev);
4704#endif
4705 return false;
4706 }
b3c5a409
DD
4707
4708 /* Check for comparisons against memory - between volatiles and
4709 aliases, we just can't risk this one. */
4710 if (GET_CODE (operands[0]) == MEM
4711 || GET_CODE (operands[0]) == MEM)
4712 {
4713#if DEBUG_CMP
4714 fprintf(stderr, "comparisons with memory:\n");
4715 debug_rtx(prev);
4716#endif
4717 return false;
4718 }
4719
4720 /* Check for PREV changing a register that's used to compute a
4721 value in CMP, even if it doesn't otherwise change flags. */
4722 if (GET_CODE (operands[0]) == REG
4723 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[0]))
4724 {
4725#if DEBUG_CMP
4726 fprintf(stderr, "sub-value affected, op0:\n");
4727 debug_rtx(prev);
4728#endif
4729 return false;
4730 }
4731 if (GET_CODE (operands[1]) == REG
4732 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[1]))
4733 {
4734#if DEBUG_CMP
4735 fprintf(stderr, "sub-value affected, op1:\n");
4736 debug_rtx(prev);
4737#endif
4738 return false;
4739 }
4740
16659fcf
DD
4741 } while (pflags == FLAGS_N);
4742#if DEBUG_CMP
4743 fprintf(stderr, "previous flag-setting insn:\n");
4744 debug_rtx(prev);
4745 debug_rtx(pp);
4746#endif
4747
4748 if (GET_CODE (pp) == SET
4749 && GET_CODE (XEXP (pp, 0)) == REG
4750 && REGNO (XEXP (pp, 0)) == FLG_REGNO
4751 && GET_CODE (XEXP (pp, 1)) == COMPARE)
4752 {
4753 /* Adjacent cbranches must have the same operands to be
4754 redundant. */
4755 rtx pop0 = XEXP (XEXP (pp, 1), 0);
4756 rtx pop1 = XEXP (XEXP (pp, 1), 1);
4757#if DEBUG_CMP
4758 fprintf(stderr, "adjacent cbranches\n");
4759 debug_rtx(pop0);
4760 debug_rtx(pop1);
4761#endif
4762 if (rtx_equal_p (op0, pop0)
4763 && rtx_equal_p (op1, pop1))
4764 return true;
4765#if DEBUG_CMP
4766 fprintf(stderr, "prev cmp not same\n");
4767#endif
4768 return false;
4769 }
4770
4771 /* Else the previous insn must be a SET, with either the source or
4772 dest equal to operands[0], and operands[1] must be zero. */
4773
4774 if (!rtx_equal_p (op1, const0_rtx))
4775 {
4776#if DEBUG_CMP
4777 fprintf(stderr, "operands[1] not const0_rtx\n");
4778#endif
4779 return false;
4780 }
4781 if (GET_CODE (pp) != SET)
4782 {
4783#if DEBUG_CMP
4784 fprintf (stderr, "pp not set\n");
4785#endif
4786 return false;
4787 }
4788 if (!rtx_equal_p (op0, SET_SRC (pp))
4789 && !rtx_equal_p (op0, SET_DEST (pp)))
4790 {
4791#if DEBUG_CMP
4792 fprintf(stderr, "operands[0] not found in set\n");
4793#endif
4794 return false;
4795 }
4796
4797#if DEBUG_CMP
4798 fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4799#endif
4800 if ((pflags & flags_needed) == flags_needed)
4801 return true;
4802
4803 return false;
4804}
4805
4806/* Return the pattern for a compare. This will be commented out if
4807 the compare is redundant, else a normal pattern is returned. Thus,
4808 the assembler output says where the compare would have been. */
4809char *
4810m32c_output_compare (rtx insn, rtx *operands)
4811{
0a2aaacc 4812 static char templ[] = ";cmp.b\t%1,%0";
16659fcf
DD
4813 /* ^ 5 */
4814
0a2aaacc 4815 templ[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
16659fcf
DD
4816 if (m32c_compare_redundant (insn, operands))
4817 {
4818#if DEBUG_CMP
4819 fprintf(stderr, "cbranch: cmp not needed\n");
4820#endif
0a2aaacc 4821 return templ;
16659fcf
DD
4822 }
4823
4824#if DEBUG_CMP
b3c5a409 4825 fprintf(stderr, "cbranch: cmp needed: `%s'\n", templ + 1);
16659fcf 4826#endif
0a2aaacc 4827 return templ + 1;
16659fcf
DD
4828}
4829
5abd2125
JS
4830#undef TARGET_ENCODE_SECTION_INFO
4831#define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4832
b52b1749
AS
4833/* If the frame pointer isn't used, we detect it manually. But the
4834 stack pointer doesn't have as flexible addressing as the frame
4835 pointer, so we always assume we have it. */
4836
4837#undef TARGET_FRAME_POINTER_REQUIRED
4838#define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
4839
38b2d076
DD
4840/* The Global `targetm' Variable. */
4841
4842struct gcc_target targetm = TARGET_INITIALIZER;
4843
4844#include "gt-m32c.h"