]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/m32c/m32c.c
host-hpux.c: Change copyright header to refer to version 3 of the GNU General Public...
[thirdparty/gcc.git] / gcc / config / m32c / m32c.c
CommitLineData
38b2d076 1/* Target Code for R8C/M16C/M32C
6fb5fa3c 2 Copyright (C) 2005, 2006, 2007
38b2d076
DD
3 Free Software Foundation, Inc.
4 Contributed by Red Hat.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published
2f83c7d6 10 by the Free Software Foundation; either version 3, or (at your
38b2d076
DD
11 option) any later version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
16 License for more details.
17
18 You should have received a copy of the GNU General Public License
2f83c7d6
NC
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
38b2d076
DD
21
22#include "config.h"
23#include "system.h"
24#include "coretypes.h"
25#include "tm.h"
26#include "rtl.h"
27#include "regs.h"
28#include "hard-reg-set.h"
29#include "real.h"
30#include "insn-config.h"
31#include "conditions.h"
32#include "insn-flags.h"
33#include "output.h"
34#include "insn-attr.h"
35#include "flags.h"
36#include "recog.h"
37#include "reload.h"
38#include "toplev.h"
39#include "obstack.h"
40#include "tree.h"
41#include "expr.h"
42#include "optabs.h"
43#include "except.h"
44#include "function.h"
45#include "ggc.h"
46#include "target.h"
47#include "target-def.h"
48#include "tm_p.h"
49#include "langhooks.h"
50#include "tree-gimple.h"
fa9fd28a 51#include "df.h"
38b2d076
DD
52
53/* Prototypes */
54
55/* Used by m32c_pushm_popm. */
56typedef enum
57{
58 PP_pushm,
59 PP_popm,
60 PP_justcount
61} Push_Pop_Type;
62
63static tree interrupt_handler (tree *, tree, tree, int, bool *);
5abd2125 64static tree function_vector_handler (tree *, tree, tree, int, bool *);
38b2d076
DD
65static int interrupt_p (tree node);
66static bool m32c_asm_integer (rtx, unsigned int, int);
67static int m32c_comp_type_attributes (tree, tree);
68static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
69static struct machine_function *m32c_init_machine_status (void);
70static void m32c_insert_attributes (tree, tree *);
71static bool m32c_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
72 tree, bool);
73static bool m32c_promote_prototypes (tree);
74static int m32c_pushm_popm (Push_Pop_Type);
75static bool m32c_strict_argument_naming (CUMULATIVE_ARGS *);
76static rtx m32c_struct_value_rtx (tree, int);
77static rtx m32c_subreg (enum machine_mode, rtx, enum machine_mode, int);
78static int need_to_save (int);
5abd2125
JS
79int current_function_special_page_vector (rtx);
80
81#define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
38b2d076
DD
82
83#define streq(a,b) (strcmp ((a), (b)) == 0)
84
85/* Internal support routines */
86
87/* Debugging statements are tagged with DEBUG0 only so that they can
88 be easily enabled individually, by replacing the '0' with '1' as
89 needed. */
90#define DEBUG0 0
91#define DEBUG1 1
92
93#if DEBUG0
94/* This is needed by some of the commented-out debug statements
95 below. */
96static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
97#endif
98static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
99
100/* These are all to support encode_pattern(). */
101static char pattern[30], *patternp;
102static GTY(()) rtx patternr[30];
103#define RTX_IS(x) (streq (pattern, x))
104
105/* Some macros to simplify the logic throughout this file. */
106#define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
107#define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
108
109#define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
110#define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
111
112/* We do most RTX matching by converting the RTX into a string, and
113 using string compares. This vastly simplifies the logic in many of
114 the functions in this file.
115
116 On exit, pattern[] has the encoded string (use RTX_IS("...") to
117 compare it) and patternr[] has pointers to the nodes in the RTX
118 corresponding to each character in the encoded string. The latter
119 is mostly used by print_operand().
120
121 Unrecognized patterns have '?' in them; this shows up when the
122 assembler complains about syntax errors.
123*/
124
125static void
126encode_pattern_1 (rtx x)
127{
128 int i;
129
130 if (patternp == pattern + sizeof (pattern) - 2)
131 {
132 patternp[-1] = '?';
133 return;
134 }
135
136 patternr[patternp - pattern] = x;
137
138 switch (GET_CODE (x))
139 {
140 case REG:
141 *patternp++ = 'r';
142 break;
143 case SUBREG:
144 if (GET_MODE_SIZE (GET_MODE (x)) !=
145 GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
146 *patternp++ = 'S';
147 encode_pattern_1 (XEXP (x, 0));
148 break;
149 case MEM:
150 *patternp++ = 'm';
151 case CONST:
152 encode_pattern_1 (XEXP (x, 0));
153 break;
154 case PLUS:
155 *patternp++ = '+';
156 encode_pattern_1 (XEXP (x, 0));
157 encode_pattern_1 (XEXP (x, 1));
158 break;
159 case PRE_DEC:
160 *patternp++ = '>';
161 encode_pattern_1 (XEXP (x, 0));
162 break;
163 case POST_INC:
164 *patternp++ = '<';
165 encode_pattern_1 (XEXP (x, 0));
166 break;
167 case LO_SUM:
168 *patternp++ = 'L';
169 encode_pattern_1 (XEXP (x, 0));
170 encode_pattern_1 (XEXP (x, 1));
171 break;
172 case HIGH:
173 *patternp++ = 'H';
174 encode_pattern_1 (XEXP (x, 0));
175 break;
176 case SYMBOL_REF:
177 *patternp++ = 's';
178 break;
179 case LABEL_REF:
180 *patternp++ = 'l';
181 break;
182 case CODE_LABEL:
183 *patternp++ = 'c';
184 break;
185 case CONST_INT:
186 case CONST_DOUBLE:
187 *patternp++ = 'i';
188 break;
189 case UNSPEC:
190 *patternp++ = 'u';
191 *patternp++ = '0' + XCINT (x, 1, UNSPEC);
192 for (i = 0; i < XVECLEN (x, 0); i++)
193 encode_pattern_1 (XVECEXP (x, 0, i));
194 break;
195 case USE:
196 *patternp++ = 'U';
197 break;
198 case PARALLEL:
199 *patternp++ = '|';
200 for (i = 0; i < XVECLEN (x, 0); i++)
201 encode_pattern_1 (XVECEXP (x, 0, i));
202 break;
203 case EXPR_LIST:
204 *patternp++ = 'E';
205 encode_pattern_1 (XEXP (x, 0));
206 if (XEXP (x, 1))
207 encode_pattern_1 (XEXP (x, 1));
208 break;
209 default:
210 *patternp++ = '?';
211#if DEBUG0
212 fprintf (stderr, "can't encode pattern %s\n",
213 GET_RTX_NAME (GET_CODE (x)));
214 debug_rtx (x);
215 gcc_unreachable ();
216#endif
217 break;
218 }
219}
220
221static void
222encode_pattern (rtx x)
223{
224 patternp = pattern;
225 encode_pattern_1 (x);
226 *patternp = 0;
227}
228
229/* Since register names indicate the mode they're used in, we need a
230 way to determine which name to refer to the register with. Called
231 by print_operand(). */
232
233static const char *
234reg_name_with_mode (int regno, enum machine_mode mode)
235{
236 int mlen = GET_MODE_SIZE (mode);
237 if (regno == R0_REGNO && mlen == 1)
238 return "r0l";
239 if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
240 return "r2r0";
241 if (regno == R0_REGNO && mlen == 6)
242 return "r2r1r0";
243 if (regno == R0_REGNO && mlen == 8)
244 return "r3r1r2r0";
245 if (regno == R1_REGNO && mlen == 1)
246 return "r1l";
247 if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
248 return "r3r1";
249 if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
250 return "a1a0";
251 return reg_names[regno];
252}
253
254/* How many bytes a register uses on stack when it's pushed. We need
255 to know this because the push opcode needs to explicitly indicate
256 the size of the register, even though the name of the register
257 already tells it that. Used by m32c_output_reg_{push,pop}, which
258 is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}. */
259
260static int
261reg_push_size (int regno)
262{
263 switch (regno)
264 {
265 case R0_REGNO:
266 case R1_REGNO:
267 return 2;
268 case R2_REGNO:
269 case R3_REGNO:
270 case FLG_REGNO:
271 return 2;
272 case A0_REGNO:
273 case A1_REGNO:
274 case SB_REGNO:
275 case FB_REGNO:
276 case SP_REGNO:
277 if (TARGET_A16)
278 return 2;
279 else
280 return 3;
281 default:
282 gcc_unreachable ();
283 }
284}
285
286static int *class_sizes = 0;
287
288/* Given two register classes, find the largest intersection between
289 them. If there is no intersection, return RETURNED_IF_EMPTY
290 instead. */
291static int
292reduce_class (int original_class, int limiting_class, int returned_if_empty)
293{
294 int cc = class_contents[original_class][0];
295 int i, best = NO_REGS;
296 int best_size = 0;
297
298 if (original_class == limiting_class)
299 return original_class;
300
301 if (!class_sizes)
302 {
303 int r;
304 class_sizes = (int *) xmalloc (LIM_REG_CLASSES * sizeof (int));
305 for (i = 0; i < LIM_REG_CLASSES; i++)
306 {
307 class_sizes[i] = 0;
308 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
309 if (class_contents[i][0] & (1 << r))
310 class_sizes[i]++;
311 }
312 }
313
314 cc &= class_contents[limiting_class][0];
315 for (i = 0; i < LIM_REG_CLASSES; i++)
316 {
317 int ic = class_contents[i][0];
318
319 if ((~cc & ic) == 0)
320 if (best_size < class_sizes[i])
321 {
322 best = i;
323 best_size = class_sizes[i];
324 }
325
326 }
327 if (best == NO_REGS)
328 return returned_if_empty;
329 return best;
330}
331
332/* Returns TRUE If there are any registers that exist in both register
333 classes. */
334static int
335classes_intersect (int class1, int class2)
336{
337 return class_contents[class1][0] & class_contents[class2][0];
338}
339
340/* Used by m32c_register_move_cost to determine if a move is
341 impossibly expensive. */
342static int
343class_can_hold_mode (int class, enum machine_mode mode)
344{
345 /* Cache the results: 0=untested 1=no 2=yes */
346 static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
347 if (results[class][mode] == 0)
348 {
349 int r, n, i;
350 results[class][mode] = 1;
351 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
352 if (class_contents[class][0] & (1 << r)
353 && HARD_REGNO_MODE_OK (r, mode))
354 {
355 int ok = 1;
356 n = HARD_REGNO_NREGS (r, mode);
357 for (i = 1; i < n; i++)
358 if (!(class_contents[class][0] & (1 << (r + i))))
359 ok = 0;
360 if (ok)
361 {
362 results[class][mode] = 2;
363 break;
364 }
365 }
366 }
367#if DEBUG0
368 fprintf (stderr, "class %s can hold %s? %s\n",
369 class_names[class], mode_name[mode],
370 (results[class][mode] == 2) ? "yes" : "no");
371#endif
372 return results[class][mode] == 2;
373}
374
375/* Run-time Target Specification. */
376
377/* Memregs are memory locations that gcc treats like general
378 registers, as there are a limited number of true registers and the
379 m32c families can use memory in most places that registers can be
380 used.
381
382 However, since memory accesses are more expensive than registers,
383 we allow the user to limit the number of memregs available, in
384 order to try to persuade gcc to try harder to use real registers.
385
386 Memregs are provided by m32c-lib1.S.
387*/
388
389int target_memregs = 16;
390static bool target_memregs_set = FALSE;
391int ok_to_change_target_memregs = TRUE;
392
393#undef TARGET_HANDLE_OPTION
394#define TARGET_HANDLE_OPTION m32c_handle_option
395static bool
396m32c_handle_option (size_t code,
397 const char *arg ATTRIBUTE_UNUSED,
398 int value ATTRIBUTE_UNUSED)
399{
400 if (code == OPT_memregs_)
401 {
402 target_memregs_set = TRUE;
403 target_memregs = atoi (arg);
404 }
405 return TRUE;
406}
407
408/* Implements OVERRIDE_OPTIONS. We limit memregs to 0..16, and
409 provide a default. */
410void
411m32c_override_options (void)
412{
413 if (target_memregs_set)
414 {
415 if (target_memregs < 0 || target_memregs > 16)
416 error ("invalid target memregs value '%d'", target_memregs);
417 }
418 else
07127a0a 419 target_memregs = 16;
38b2d076
DD
420}
421
422/* Defining data structures for per-function information */
423
424/* The usual; we set up our machine_function data. */
425static struct machine_function *
426m32c_init_machine_status (void)
427{
428 struct machine_function *machine;
429 machine =
430 (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
431
432 return machine;
433}
434
435/* Implements INIT_EXPANDERS. We just set up to call the above
436 function. */
437void
438m32c_init_expanders (void)
439{
440 init_machine_status = m32c_init_machine_status;
441}
442
443/* Storage Layout */
444
445#undef TARGET_PROMOTE_FUNCTION_RETURN
446#define TARGET_PROMOTE_FUNCTION_RETURN m32c_promote_function_return
447bool
448m32c_promote_function_return (tree fntype ATTRIBUTE_UNUSED)
449{
450 return false;
451}
452
453/* Register Basics */
454
455/* Basic Characteristics of Registers */
456
457/* Whether a mode fits in a register is complex enough to warrant a
458 table. */
459static struct
460{
461 char qi_regs;
462 char hi_regs;
463 char pi_regs;
464 char si_regs;
465 char di_regs;
466} nregs_table[FIRST_PSEUDO_REGISTER] =
467{
468 { 1, 1, 2, 2, 4 }, /* r0 */
469 { 0, 1, 0, 0, 0 }, /* r2 */
470 { 1, 1, 2, 2, 0 }, /* r1 */
471 { 0, 1, 0, 0, 0 }, /* r3 */
472 { 0, 1, 1, 0, 0 }, /* a0 */
473 { 0, 1, 1, 0, 0 }, /* a1 */
474 { 0, 1, 1, 0, 0 }, /* sb */
475 { 0, 1, 1, 0, 0 }, /* fb */
476 { 0, 1, 1, 0, 0 }, /* sp */
477 { 1, 1, 1, 0, 0 }, /* pc */
478 { 0, 0, 0, 0, 0 }, /* fl */
479 { 1, 1, 1, 0, 0 }, /* ap */
480 { 1, 1, 2, 2, 4 }, /* mem0 */
481 { 1, 1, 2, 2, 4 }, /* mem1 */
482 { 1, 1, 2, 2, 4 }, /* mem2 */
483 { 1, 1, 2, 2, 4 }, /* mem3 */
484 { 1, 1, 2, 2, 4 }, /* mem4 */
485 { 1, 1, 2, 2, 0 }, /* mem5 */
486 { 1, 1, 2, 2, 0 }, /* mem6 */
487 { 1, 1, 0, 0, 0 }, /* mem7 */
488};
489
490/* Implements CONDITIONAL_REGISTER_USAGE. We adjust the number of
491 available memregs, and select which registers need to be preserved
492 across calls based on the chip family. */
493
494void
495m32c_conditional_register_usage (void)
496{
38b2d076
DD
497 int i;
498
499 if (0 <= target_memregs && target_memregs <= 16)
500 {
501 /* The command line option is bytes, but our "registers" are
502 16-bit words. */
503 for (i = target_memregs/2; i < 8; i++)
504 {
505 fixed_regs[MEM0_REGNO + i] = 1;
506 CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
507 }
508 }
509
510 /* M32CM and M32C preserve more registers across function calls. */
511 if (TARGET_A24)
512 {
513 call_used_regs[R1_REGNO] = 0;
514 call_used_regs[R2_REGNO] = 0;
515 call_used_regs[R3_REGNO] = 0;
516 call_used_regs[A0_REGNO] = 0;
517 call_used_regs[A1_REGNO] = 0;
518 }
519}
520
521/* How Values Fit in Registers */
522
523/* Implements HARD_REGNO_NREGS. This is complicated by the fact that
524 different registers are different sizes from each other, *and* may
525 be different sizes in different chip families. */
526int
527m32c_hard_regno_nregs (int regno, enum machine_mode mode)
528{
529 if (regno == FLG_REGNO && mode == CCmode)
530 return 1;
531 if (regno >= FIRST_PSEUDO_REGISTER)
532 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
533
534 if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
535 return (GET_MODE_SIZE (mode) + 1) / 2;
536
537 if (GET_MODE_SIZE (mode) <= 1)
538 return nregs_table[regno].qi_regs;
539 if (GET_MODE_SIZE (mode) <= 2)
540 return nregs_table[regno].hi_regs;
541 if (regno == A0_REGNO && mode == PSImode && TARGET_A16)
542 return 2;
543 if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
544 return nregs_table[regno].pi_regs;
545 if (GET_MODE_SIZE (mode) <= 4)
546 return nregs_table[regno].si_regs;
547 if (GET_MODE_SIZE (mode) <= 8)
548 return nregs_table[regno].di_regs;
549 return 0;
550}
551
552/* Implements HARD_REGNO_MODE_OK. The above function does the work
553 already; just test its return value. */
554int
555m32c_hard_regno_ok (int regno, enum machine_mode mode)
556{
557 return m32c_hard_regno_nregs (regno, mode) != 0;
558}
559
560/* Implements MODES_TIEABLE_P. In general, modes aren't tieable since
561 registers are all different sizes. However, since most modes are
562 bigger than our registers anyway, it's easier to implement this
563 function that way, leaving QImode as the only unique case. */
564int
565m32c_modes_tieable_p (enum machine_mode m1, enum machine_mode m2)
566{
567 if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
568 return 1;
569
07127a0a 570#if 0
38b2d076
DD
571 if (m1 == QImode || m2 == QImode)
572 return 0;
07127a0a 573#endif
38b2d076
DD
574
575 return 1;
576}
577
578/* Register Classes */
579
580/* Implements REGNO_REG_CLASS. */
581enum machine_mode
582m32c_regno_reg_class (int regno)
583{
584 switch (regno)
585 {
586 case R0_REGNO:
587 return R0_REGS;
588 case R1_REGNO:
589 return R1_REGS;
590 case R2_REGNO:
591 return R2_REGS;
592 case R3_REGNO:
593 return R3_REGS;
594 case A0_REGNO:
595 case A1_REGNO:
596 return A_REGS;
597 case SB_REGNO:
598 return SB_REGS;
599 case FB_REGNO:
600 return FB_REGS;
601 case SP_REGNO:
602 return SP_REGS;
603 case FLG_REGNO:
604 return FLG_REGS;
605 default:
606 if (IS_MEM_REGNO (regno))
607 return MEM_REGS;
608 return ALL_REGS;
609 }
610}
611
612/* Implements REG_CLASS_FROM_CONSTRAINT. Note that some constraints only match
613 for certain chip families. */
614int
615m32c_reg_class_from_constraint (char c ATTRIBUTE_UNUSED, const char *s)
616{
617 if (memcmp (s, "Rsp", 3) == 0)
618 return SP_REGS;
619 if (memcmp (s, "Rfb", 3) == 0)
620 return FB_REGS;
621 if (memcmp (s, "Rsb", 3) == 0)
622 return SB_REGS;
07127a0a
DD
623 if (memcmp (s, "Rcr", 3) == 0)
624 return TARGET_A16 ? CR_REGS : NO_REGS;
625 if (memcmp (s, "Rcl", 3) == 0)
626 return TARGET_A24 ? CR_REGS : NO_REGS;
38b2d076
DD
627 if (memcmp (s, "R0w", 3) == 0)
628 return R0_REGS;
629 if (memcmp (s, "R1w", 3) == 0)
630 return R1_REGS;
631 if (memcmp (s, "R2w", 3) == 0)
632 return R2_REGS;
633 if (memcmp (s, "R3w", 3) == 0)
634 return R3_REGS;
635 if (memcmp (s, "R02", 3) == 0)
636 return R02_REGS;
637 if (memcmp (s, "R03", 3) == 0)
638 return R03_REGS;
639 if (memcmp (s, "Rdi", 3) == 0)
640 return DI_REGS;
641 if (memcmp (s, "Rhl", 3) == 0)
642 return HL_REGS;
643 if (memcmp (s, "R23", 3) == 0)
644 return R23_REGS;
07127a0a
DD
645 if (memcmp (s, "Ra0", 3) == 0)
646 return A0_REGS;
647 if (memcmp (s, "Ra1", 3) == 0)
648 return A1_REGS;
38b2d076
DD
649 if (memcmp (s, "Raa", 3) == 0)
650 return A_REGS;
07127a0a
DD
651 if (memcmp (s, "Raw", 3) == 0)
652 return TARGET_A16 ? A_REGS : NO_REGS;
653 if (memcmp (s, "Ral", 3) == 0)
654 return TARGET_A24 ? A_REGS : NO_REGS;
38b2d076
DD
655 if (memcmp (s, "Rqi", 3) == 0)
656 return QI_REGS;
657 if (memcmp (s, "Rad", 3) == 0)
658 return AD_REGS;
659 if (memcmp (s, "Rsi", 3) == 0)
660 return SI_REGS;
661 if (memcmp (s, "Rhi", 3) == 0)
662 return HI_REGS;
663 if (memcmp (s, "Rhc", 3) == 0)
664 return HC_REGS;
665 if (memcmp (s, "Rra", 3) == 0)
666 return RA_REGS;
667 if (memcmp (s, "Rfl", 3) == 0)
668 return FLG_REGS;
669 if (memcmp (s, "Rmm", 3) == 0)
670 {
671 if (fixed_regs[MEM0_REGNO])
672 return NO_REGS;
673 return MEM_REGS;
674 }
675
676 /* PSImode registers - i.e. whatever can hold a pointer. */
677 if (memcmp (s, "Rpi", 3) == 0)
678 {
679 if (TARGET_A16)
680 return HI_REGS;
681 else
682 return RA_REGS; /* r2r0 and r3r1 can hold pointers. */
683 }
684
685 /* We handle this one as an EXTRA_CONSTRAINT. */
686 if (memcmp (s, "Rpa", 3) == 0)
687 return NO_REGS;
688
07127a0a
DD
689 if (*s == 'R')
690 {
691 fprintf(stderr, "unrecognized R constraint: %.3s\n", s);
692 gcc_unreachable();
693 }
694
38b2d076
DD
695 return NO_REGS;
696}
697
698/* Implements REGNO_OK_FOR_BASE_P. */
699int
700m32c_regno_ok_for_base_p (int regno)
701{
702 if (regno == A0_REGNO
703 || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
704 return 1;
705 return 0;
706}
707
708#define DEBUG_RELOAD 0
709
710/* Implements PREFERRED_RELOAD_CLASS. In general, prefer general
711 registers of the appropriate size. */
712int
713m32c_preferred_reload_class (rtx x, int rclass)
714{
715 int newclass = rclass;
716
717#if DEBUG_RELOAD
718 fprintf (stderr, "\npreferred_reload_class for %s is ",
719 class_names[rclass]);
720#endif
721 if (rclass == NO_REGS)
722 rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
723
724 if (classes_intersect (rclass, CR_REGS))
725 {
726 switch (GET_MODE (x))
727 {
728 case QImode:
729 newclass = HL_REGS;
730 break;
731 default:
732 /* newclass = HI_REGS; */
733 break;
734 }
735 }
736
737 else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
738 newclass = SI_REGS;
739 else if (GET_MODE_SIZE (GET_MODE (x)) > 4
740 && ~class_contents[rclass][0] & 0x000f)
741 newclass = DI_REGS;
742
743 rclass = reduce_class (rclass, newclass, rclass);
744
745 if (GET_MODE (x) == QImode)
746 rclass = reduce_class (rclass, HL_REGS, rclass);
747
748#if DEBUG_RELOAD
749 fprintf (stderr, "%s\n", class_names[rclass]);
750 debug_rtx (x);
751
752 if (GET_CODE (x) == MEM
753 && GET_CODE (XEXP (x, 0)) == PLUS
754 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
755 fprintf (stderr, "Glorm!\n");
756#endif
757 return rclass;
758}
759
760/* Implements PREFERRED_OUTPUT_RELOAD_CLASS. */
761int
762m32c_preferred_output_reload_class (rtx x, int rclass)
763{
764 return m32c_preferred_reload_class (x, rclass);
765}
766
767/* Implements LIMIT_RELOAD_CLASS. We basically want to avoid using
768 address registers for reloads since they're needed for address
769 reloads. */
770int
771m32c_limit_reload_class (enum machine_mode mode, int rclass)
772{
773#if DEBUG_RELOAD
774 fprintf (stderr, "limit_reload_class for %s: %s ->",
775 mode_name[mode], class_names[rclass]);
776#endif
777
778 if (mode == QImode)
779 rclass = reduce_class (rclass, HL_REGS, rclass);
780 else if (mode == HImode)
781 rclass = reduce_class (rclass, HI_REGS, rclass);
782 else if (mode == SImode)
783 rclass = reduce_class (rclass, SI_REGS, rclass);
784
785 if (rclass != A_REGS)
786 rclass = reduce_class (rclass, DI_REGS, rclass);
787
788#if DEBUG_RELOAD
789 fprintf (stderr, " %s\n", class_names[rclass]);
790#endif
791 return rclass;
792}
793
794/* Implements SECONDARY_RELOAD_CLASS. QImode have to be reloaded in
795 r0 or r1, as those are the only real QImode registers. CR regs get
796 reloaded through appropriately sized general or address
797 registers. */
798int
799m32c_secondary_reload_class (int rclass, enum machine_mode mode, rtx x)
800{
801 int cc = class_contents[rclass][0];
802#if DEBUG0
803 fprintf (stderr, "\nsecondary reload class %s %s\n",
804 class_names[rclass], mode_name[mode]);
805 debug_rtx (x);
806#endif
807 if (mode == QImode
808 && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
809 return QI_REGS;
810 if (classes_intersect (rclass, CR_REGS)
811 && GET_CODE (x) == REG
812 && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
813 return TARGET_A16 ? HI_REGS : A_REGS;
814 return NO_REGS;
815}
816
817/* Implements CLASS_LIKELY_SPILLED_P. A_REGS is needed for address
818 reloads. */
819int
820m32c_class_likely_spilled_p (int regclass)
821{
822 if (regclass == A_REGS)
823 return 1;
824 return reg_class_size[regclass] == 1;
825}
826
827/* Implements CLASS_MAX_NREGS. We calculate this according to its
828 documented meaning, to avoid potential inconsistencies with actual
829 class definitions. */
830int
831m32c_class_max_nregs (int regclass, enum machine_mode mode)
832{
833 int rn, max = 0;
834
835 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
836 if (class_contents[regclass][0] & (1 << rn))
837 {
838 int n = m32c_hard_regno_nregs (rn, mode);
839 if (max < n)
840 max = n;
841 }
842 return max;
843}
844
845/* Implements CANNOT_CHANGE_MODE_CLASS. Only r0 and r1 can change to
846 QI (r0l, r1l) because the chip doesn't support QI ops on other
847 registers (well, it does on a0/a1 but if we let gcc do that, reload
848 suffers). Otherwise, we allow changes to larger modes. */
849int
850m32c_cannot_change_mode_class (enum machine_mode from,
851 enum machine_mode to, int rclass)
852{
db9c8397 853 int rn;
38b2d076
DD
854#if DEBUG0
855 fprintf (stderr, "cannot change from %s to %s in %s\n",
856 mode_name[from], mode_name[to], class_names[rclass]);
857#endif
858
db9c8397
DD
859 /* If the larger mode isn't allowed in any of these registers, we
860 can't allow the change. */
861 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
862 if (class_contents[rclass][0] & (1 << rn))
863 if (! m32c_hard_regno_ok (rn, to))
864 return 1;
865
38b2d076
DD
866 if (to == QImode)
867 return (class_contents[rclass][0] & 0x1ffa);
868
869 if (class_contents[rclass][0] & 0x0005 /* r0, r1 */
870 && GET_MODE_SIZE (from) > 1)
871 return 0;
872 if (GET_MODE_SIZE (from) > 2) /* all other regs */
873 return 0;
874
875 return 1;
876}
877
878/* Helpers for the rest of the file. */
879/* TRUE if the rtx is a REG rtx for the given register. */
880#define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
881 && REGNO (rtx) == regno)
882/* TRUE if the rtx is a pseudo - specifically, one we can use as a
883 base register in address calculations (hence the "strict"
884 argument). */
885#define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
886 && (REGNO (rtx) == AP_REGNO \
887 || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
888
889/* Implements CONST_OK_FOR_CONSTRAINT_P. Currently, all constant
890 constraints start with 'I', with the next two characters indicating
891 the type and size of the range allowed. */
892int
893m32c_const_ok_for_constraint_p (HOST_WIDE_INT value,
894 char c ATTRIBUTE_UNUSED, const char *str)
895{
896 /* s=signed u=unsigned n=nonzero m=minus l=log2able,
897 [sun] bits [SUN] bytes, p=pointer size
898 I[-0-9][0-9] matches that number */
899 if (memcmp (str, "Is3", 3) == 0)
900 {
901 return (-8 <= value && value <= 7);
902 }
903 if (memcmp (str, "IS1", 3) == 0)
904 {
905 return (-128 <= value && value <= 127);
906 }
907 if (memcmp (str, "IS2", 3) == 0)
908 {
909 return (-32768 <= value && value <= 32767);
910 }
911 if (memcmp (str, "IU2", 3) == 0)
912 {
913 return (0 <= value && value <= 65535);
914 }
915 if (memcmp (str, "IU3", 3) == 0)
916 {
917 return (0 <= value && value <= 0x00ffffff);
918 }
919 if (memcmp (str, "In4", 3) == 0)
920 {
921 return (-8 <= value && value && value <= 8);
922 }
923 if (memcmp (str, "In5", 3) == 0)
924 {
925 return (-16 <= value && value && value <= 16);
926 }
23fed240
DD
927 if (memcmp (str, "In6", 3) == 0)
928 {
929 return (-32 <= value && value && value <= 32);
930 }
38b2d076
DD
931 if (memcmp (str, "IM2", 3) == 0)
932 {
933 return (-65536 <= value && value && value <= -1);
934 }
935 if (memcmp (str, "Ilb", 3) == 0)
936 {
937 int b = exact_log2 (value);
8e4edce7 938 return (b >= 0 && b <= 7);
38b2d076 939 }
07127a0a
DD
940 if (memcmp (str, "Imb", 3) == 0)
941 {
942 int b = exact_log2 ((value ^ 0xff) & 0xff);
8e4edce7 943 return (b >= 0 && b <= 7);
07127a0a 944 }
38b2d076
DD
945 if (memcmp (str, "Ilw", 3) == 0)
946 {
947 int b = exact_log2 (value);
8e4edce7 948 return (b >= 0 && b <= 15);
38b2d076 949 }
07127a0a
DD
950 if (memcmp (str, "Imw", 3) == 0)
951 {
952 int b = exact_log2 ((value ^ 0xffff) & 0xffff);
8e4edce7 953 return (b >= 0 && b <= 15);
07127a0a
DD
954 }
955 if (memcmp (str, "I00", 3) == 0)
956 {
957 return (value == 0);
958 }
38b2d076
DD
959 return 0;
960}
961
962/* Implements EXTRA_CONSTRAINT_STR (see next function too). 'S' is
963 for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
964 call return values. */
965int
966m32c_extra_constraint_p2 (rtx value, char c ATTRIBUTE_UNUSED, const char *str)
967{
968 encode_pattern (value);
969 if (memcmp (str, "Sd", 2) == 0)
970 {
971 /* This is the common "src/dest" address */
972 rtx r;
973 if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
974 return 1;
975 if (RTX_IS ("ms") || RTX_IS ("m+si"))
976 return 1;
07127a0a
DD
977 if (RTX_IS ("m++rii"))
978 {
979 if (REGNO (patternr[3]) == FB_REGNO
980 && INTVAL (patternr[4]) == 0)
981 return 1;
982 }
38b2d076
DD
983 if (RTX_IS ("mr"))
984 r = patternr[1];
985 else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
986 r = patternr[2];
987 else
988 return 0;
989 if (REGNO (r) == SP_REGNO)
990 return 0;
991 return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
992 }
993 else if (memcmp (str, "Sa", 2) == 0)
994 {
995 rtx r;
996 if (RTX_IS ("mr"))
997 r = patternr[1];
998 else if (RTX_IS ("m+ri"))
999 r = patternr[2];
1000 else
1001 return 0;
1002 return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
1003 }
1004 else if (memcmp (str, "Si", 2) == 0)
1005 {
1006 return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
1007 }
1008 else if (memcmp (str, "Ss", 2) == 0)
1009 {
1010 return ((RTX_IS ("mr")
1011 && (IS_REG (patternr[1], SP_REGNO)))
1012 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
1013 }
1014 else if (memcmp (str, "Sf", 2) == 0)
1015 {
1016 return ((RTX_IS ("mr")
1017 && (IS_REG (patternr[1], FB_REGNO)))
1018 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
1019 }
1020 else if (memcmp (str, "Sb", 2) == 0)
1021 {
1022 return ((RTX_IS ("mr")
1023 && (IS_REG (patternr[1], SB_REGNO)))
1024 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
1025 }
07127a0a
DD
1026 else if (memcmp (str, "Sp", 2) == 0)
1027 {
1028 /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
1029 return (RTX_IS ("mi")
1030 && !(INTVAL (patternr[1]) & ~0x1fff));
1031 }
38b2d076
DD
1032 else if (memcmp (str, "S1", 2) == 0)
1033 {
1034 return r1h_operand (value, QImode);
1035 }
1036
1037 gcc_assert (str[0] != 'S');
1038
1039 if (memcmp (str, "Rpa", 2) == 0)
1040 return GET_CODE (value) == PARALLEL;
1041
1042 return 0;
1043}
1044
1045/* This is for when we're debugging the above. */
1046int
1047m32c_extra_constraint_p (rtx value, char c, const char *str)
1048{
1049 int rv = m32c_extra_constraint_p2 (value, c, str);
1050#if DEBUG0
1051 fprintf (stderr, "\nconstraint %.*s: %d\n", CONSTRAINT_LEN (c, str), str,
1052 rv);
1053 debug_rtx (value);
1054#endif
1055 return rv;
1056}
1057
1058/* Implements EXTRA_MEMORY_CONSTRAINT. Currently, we only use strings
1059 starting with 'S'. */
1060int
1061m32c_extra_memory_constraint (char c, const char *str ATTRIBUTE_UNUSED)
1062{
1063 return c == 'S';
1064}
1065
1066/* Implements EXTRA_ADDRESS_CONSTRAINT. We reserve 'A' strings for these,
1067 but don't currently define any. */
1068int
1069m32c_extra_address_constraint (char c, const char *str ATTRIBUTE_UNUSED)
1070{
1071 return c == 'A';
1072}
1073
1074/* STACK AND CALLING */
1075
1076/* Frame Layout */
1077
1078/* Implements RETURN_ADDR_RTX. Note that R8C and M16C push 24 bits
1079 (yes, THREE bytes) onto the stack for the return address, but we
1080 don't support pointers bigger than 16 bits on those chips. This
1081 will likely wreak havoc with exception unwinding. FIXME. */
1082rtx
1083m32c_return_addr_rtx (int count)
1084{
1085 enum machine_mode mode;
1086 int offset;
1087 rtx ra_mem;
1088
1089 if (count)
1090 return NULL_RTX;
1091 /* we want 2[$fb] */
1092
1093 if (TARGET_A24)
1094 {
1095 mode = SImode;
1096 offset = 4;
1097 }
1098 else
1099 {
1100 /* FIXME: it's really 3 bytes */
1101 mode = HImode;
1102 offset = 2;
1103 }
1104
1105 ra_mem =
1106 gen_rtx_MEM (mode, plus_constant (gen_rtx_REG (Pmode, FP_REGNO), offset));
1107 return copy_to_mode_reg (mode, ra_mem);
1108}
1109
1110/* Implements INCOMING_RETURN_ADDR_RTX. See comment above. */
1111rtx
1112m32c_incoming_return_addr_rtx (void)
1113{
1114 /* we want [sp] */
1115 return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
1116}
1117
1118/* Exception Handling Support */
1119
1120/* Implements EH_RETURN_DATA_REGNO. Choose registers able to hold
1121 pointers. */
1122int
1123m32c_eh_return_data_regno (int n)
1124{
1125 switch (n)
1126 {
1127 case 0:
1128 return A0_REGNO;
1129 case 1:
1130 return A1_REGNO;
1131 default:
1132 return INVALID_REGNUM;
1133 }
1134}
1135
1136/* Implements EH_RETURN_STACKADJ_RTX. Saved and used later in
1137 m32c_emit_eh_epilogue. */
1138rtx
1139m32c_eh_return_stackadj_rtx (void)
1140{
1141 if (!cfun->machine->eh_stack_adjust)
1142 {
1143 rtx sa;
1144
99920b6f 1145 sa = gen_rtx_REG (Pmode, R0_REGNO);
38b2d076
DD
1146 cfun->machine->eh_stack_adjust = sa;
1147 }
1148 return cfun->machine->eh_stack_adjust;
1149}
1150
1151/* Registers That Address the Stack Frame */
1152
1153/* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER. Note that
1154 the original spec called for dwarf numbers to vary with register
1155 width as well, for example, r0l, r0, and r2r0 would each have
1156 different dwarf numbers. GCC doesn't support this, and we don't do
1157 it, and gdb seems to like it this way anyway. */
1158unsigned int
1159m32c_dwarf_frame_regnum (int n)
1160{
1161 switch (n)
1162 {
1163 case R0_REGNO:
1164 return 5;
1165 case R1_REGNO:
1166 return 6;
1167 case R2_REGNO:
1168 return 7;
1169 case R3_REGNO:
1170 return 8;
1171 case A0_REGNO:
1172 return 9;
1173 case A1_REGNO:
1174 return 10;
1175 case FB_REGNO:
1176 return 11;
1177 case SB_REGNO:
1178 return 19;
1179
1180 case SP_REGNO:
1181 return 12;
1182 case PC_REGNO:
1183 return 13;
1184 default:
1185 return DWARF_FRAME_REGISTERS + 1;
1186 }
1187}
1188
1189/* The frame looks like this:
1190
1191 ap -> +------------------------------
1192 | Return address (3 or 4 bytes)
1193 | Saved FB (2 or 4 bytes)
1194 fb -> +------------------------------
1195 | local vars
1196 | register saves fb
1197 | through r0 as needed
1198 sp -> +------------------------------
1199*/
1200
1201/* We use this to wrap all emitted insns in the prologue. */
1202static rtx
1203F (rtx x)
1204{
1205 RTX_FRAME_RELATED_P (x) = 1;
1206 return x;
1207}
1208
1209/* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1210 how much the stack pointer moves for each, for each cpu family. */
1211static struct
1212{
1213 int reg1;
1214 int bit;
1215 int a16_bytes;
1216 int a24_bytes;
1217} pushm_info[] =
1218{
9d746d5e
DD
1219 /* These are in reverse push (nearest-to-sp) order. */
1220 { R0_REGNO, 0x80, 2, 2 },
38b2d076 1221 { R1_REGNO, 0x40, 2, 2 },
9d746d5e
DD
1222 { R2_REGNO, 0x20, 2, 2 },
1223 { R3_REGNO, 0x10, 2, 2 },
1224 { A0_REGNO, 0x08, 2, 4 },
1225 { A1_REGNO, 0x04, 2, 4 },
1226 { SB_REGNO, 0x02, 2, 4 },
1227 { FB_REGNO, 0x01, 2, 4 }
38b2d076
DD
1228};
1229
1230#define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1231
1232/* Returns TRUE if we need to save/restore the given register. We
1233 save everything for exception handlers, so that any register can be
1234 unwound. For interrupt handlers, we save everything if the handler
1235 calls something else (because we don't know what *that* function
1236 might do), but try to be a bit smarter if the handler is a leaf
1237 function. We always save $a0, though, because we use that in the
85f65093 1238 epilogue to copy $fb to $sp. */
38b2d076
DD
1239static int
1240need_to_save (int regno)
1241{
1242 if (fixed_regs[regno])
1243 return 0;
1244 if (cfun->calls_eh_return)
1245 return 1;
1246 if (regno == FP_REGNO)
1247 return 0;
1248 if (cfun->machine->is_interrupt
1249 && (!cfun->machine->is_leaf || regno == A0_REGNO))
1250 return 1;
6fb5fa3c 1251 if (df_regs_ever_live_p (regno)
38b2d076
DD
1252 && (!call_used_regs[regno] || cfun->machine->is_interrupt))
1253 return 1;
1254 return 0;
1255}
1256
1257/* This function contains all the intelligence about saving and
1258 restoring registers. It always figures out the register save set.
1259 When called with PP_justcount, it merely returns the size of the
1260 save set (for eliminating the frame pointer, for example). When
1261 called with PP_pushm or PP_popm, it emits the appropriate
1262 instructions for saving (pushm) or restoring (popm) the
1263 registers. */
1264static int
1265m32c_pushm_popm (Push_Pop_Type ppt)
1266{
1267 int reg_mask = 0;
1268 int byte_count = 0, bytes;
1269 int i;
1270 rtx dwarf_set[PUSHM_N];
1271 int n_dwarfs = 0;
1272 int nosave_mask = 0;
1273
1274 if (cfun->return_rtx
1275 && GET_CODE (cfun->return_rtx) == PARALLEL
1276 && !(cfun->calls_eh_return || cfun->machine->is_interrupt))
1277 {
1278 rtx exp = XVECEXP (cfun->return_rtx, 0, 0);
1279 rtx rv = XEXP (exp, 0);
1280 int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1281
1282 if (rv_bytes > 2)
1283 nosave_mask |= 0x20; /* PSI, SI */
1284 else
1285 nosave_mask |= 0xf0; /* DF */
1286 if (rv_bytes > 4)
1287 nosave_mask |= 0x50; /* DI */
1288 }
1289
1290 for (i = 0; i < (int) PUSHM_N; i++)
1291 {
1292 /* Skip if neither register needs saving. */
1293 if (!need_to_save (pushm_info[i].reg1))
1294 continue;
1295
1296 if (pushm_info[i].bit & nosave_mask)
1297 continue;
1298
1299 reg_mask |= pushm_info[i].bit;
1300 bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1301
1302 if (ppt == PP_pushm)
1303 {
1304 enum machine_mode mode = (bytes == 2) ? HImode : SImode;
1305 rtx addr;
1306
1307 /* Always use stack_pointer_rtx instead of calling
1308 rtx_gen_REG ourselves. Code elsewhere in GCC assumes
1309 that there is a single rtx representing the stack pointer,
1310 namely stack_pointer_rtx, and uses == to recognize it. */
1311 addr = stack_pointer_rtx;
1312
1313 if (byte_count != 0)
1314 addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1315
1316 dwarf_set[n_dwarfs++] =
1317 gen_rtx_SET (VOIDmode,
1318 gen_rtx_MEM (mode, addr),
1319 gen_rtx_REG (mode, pushm_info[i].reg1));
1320 F (dwarf_set[n_dwarfs - 1]);
1321
1322 }
1323 byte_count += bytes;
1324 }
1325
1326 if (cfun->machine->is_interrupt)
1327 {
1328 cfun->machine->intr_pushm = reg_mask & 0xfe;
1329 reg_mask = 0;
1330 byte_count = 0;
1331 }
1332
1333 if (cfun->machine->is_interrupt)
1334 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1335 if (need_to_save (i))
1336 {
1337 byte_count += 2;
1338 cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1339 }
1340
1341 if (ppt == PP_pushm && byte_count)
1342 {
1343 rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1344 rtx pushm;
1345
1346 if (reg_mask)
1347 {
1348 XVECEXP (note, 0, 0)
1349 = gen_rtx_SET (VOIDmode,
1350 stack_pointer_rtx,
1351 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1352 stack_pointer_rtx,
1353 GEN_INT (-byte_count)));
1354 F (XVECEXP (note, 0, 0));
1355
1356 for (i = 0; i < n_dwarfs; i++)
1357 XVECEXP (note, 0, i + 1) = dwarf_set[i];
1358
1359 pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1360
1361 REG_NOTES (pushm) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, note,
1362 REG_NOTES (pushm));
1363 }
1364
1365 if (cfun->machine->is_interrupt)
1366 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1367 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1368 {
1369 if (TARGET_A16)
1370 pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1371 else
1372 pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1373 F (pushm);
1374 }
1375 }
1376 if (ppt == PP_popm && byte_count)
1377 {
38b2d076
DD
1378 if (cfun->machine->is_interrupt)
1379 for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1380 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1381 {
1382 if (TARGET_A16)
b3fdec9e 1383 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
38b2d076 1384 else
b3fdec9e 1385 emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
38b2d076
DD
1386 }
1387 if (reg_mask)
1388 emit_insn (gen_popm (GEN_INT (reg_mask)));
1389 }
1390
1391 return byte_count;
1392}
1393
1394/* Implements INITIAL_ELIMINATION_OFFSET. See the comment above that
1395 diagrams our call frame. */
1396int
1397m32c_initial_elimination_offset (int from, int to)
1398{
1399 int ofs = 0;
1400
1401 if (from == AP_REGNO)
1402 {
1403 if (TARGET_A16)
1404 ofs += 5;
1405 else
1406 ofs += 8;
1407 }
1408
1409 if (to == SP_REGNO)
1410 {
1411 ofs += m32c_pushm_popm (PP_justcount);
1412 ofs += get_frame_size ();
1413 }
1414
1415 /* Account for push rounding. */
1416 if (TARGET_A24)
1417 ofs = (ofs + 1) & ~1;
1418#if DEBUG0
1419 fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1420 to, ofs);
1421#endif
1422 return ofs;
1423}
1424
1425/* Passing Function Arguments on the Stack */
1426
1427#undef TARGET_PROMOTE_PROTOTYPES
1428#define TARGET_PROMOTE_PROTOTYPES m32c_promote_prototypes
1429static bool
1430m32c_promote_prototypes (tree fntype ATTRIBUTE_UNUSED)
1431{
1432 return 0;
1433}
1434
1435/* Implements PUSH_ROUNDING. The R8C and M16C have byte stacks, the
1436 M32C has word stacks. */
1437int
1438m32c_push_rounding (int n)
1439{
1440 if (TARGET_R8C || TARGET_M16C)
1441 return n;
1442 return (n + 1) & ~1;
1443}
1444
1445/* Passing Arguments in Registers */
1446
1447/* Implements FUNCTION_ARG. Arguments are passed partly in registers,
1448 partly on stack. If our function returns a struct, a pointer to a
1449 buffer for it is at the top of the stack (last thing pushed). The
1450 first few real arguments may be in registers as follows:
1451
1452 R8C/M16C: arg1 in r1 if it's QI or HI (else it's pushed on stack)
1453 arg2 in r2 if it's HI (else pushed on stack)
1454 rest on stack
1455 M32C: arg1 in r0 if it's QI or HI (else it's pushed on stack)
1456 rest on stack
1457
1458 Structs are not passed in registers, even if they fit. Only
1459 integer and pointer types are passed in registers.
1460
1461 Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1462 r2 if it fits. */
1463rtx
1464m32c_function_arg (CUMULATIVE_ARGS * ca,
1465 enum machine_mode mode, tree type, int named)
1466{
1467 /* Can return a reg, parallel, or 0 for stack */
1468 rtx rv = NULL_RTX;
1469#if DEBUG0
1470 fprintf (stderr, "func_arg %d (%s, %d)\n",
1471 ca->parm_num, mode_name[mode], named);
1472 debug_tree (type);
1473#endif
1474
1475 if (mode == VOIDmode)
1476 return GEN_INT (0);
1477
1478 if (ca->force_mem || !named)
1479 {
1480#if DEBUG0
1481 fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
1482 named);
1483#endif
1484 return NULL_RTX;
1485 }
1486
1487 if (type && INTEGRAL_TYPE_P (type) && POINTER_TYPE_P (type))
1488 return NULL_RTX;
1489
9d746d5e
DD
1490 if (type && AGGREGATE_TYPE_P (type))
1491 return NULL_RTX;
1492
38b2d076
DD
1493 switch (ca->parm_num)
1494 {
1495 case 1:
1496 if (GET_MODE_SIZE (mode) == 1 || GET_MODE_SIZE (mode) == 2)
1497 rv = gen_rtx_REG (mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
1498 break;
1499
1500 case 2:
1501 if (TARGET_A16 && GET_MODE_SIZE (mode) == 2)
1502 rv = gen_rtx_REG (mode, R2_REGNO);
1503 break;
1504 }
1505
1506#if DEBUG0
1507 debug_rtx (rv);
1508#endif
1509 return rv;
1510}
1511
1512#undef TARGET_PASS_BY_REFERENCE
1513#define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1514static bool
1515m32c_pass_by_reference (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED,
1516 enum machine_mode mode ATTRIBUTE_UNUSED,
1517 tree type ATTRIBUTE_UNUSED,
1518 bool named ATTRIBUTE_UNUSED)
1519{
1520 return 0;
1521}
1522
1523/* Implements INIT_CUMULATIVE_ARGS. */
1524void
1525m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
9d746d5e 1526 tree fntype,
38b2d076 1527 rtx libname ATTRIBUTE_UNUSED,
9d746d5e 1528 tree fndecl,
38b2d076
DD
1529 int n_named_args ATTRIBUTE_UNUSED)
1530{
9d746d5e
DD
1531 if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1532 ca->force_mem = 1;
1533 else
1534 ca->force_mem = 0;
38b2d076
DD
1535 ca->parm_num = 1;
1536}
1537
1538/* Implements FUNCTION_ARG_ADVANCE. force_mem is set for functions
1539 returning structures, so we always reset that. Otherwise, we only
1540 need to know the sequence number of the argument to know what to do
1541 with it. */
1542void
1543m32c_function_arg_advance (CUMULATIVE_ARGS * ca,
1544 enum machine_mode mode ATTRIBUTE_UNUSED,
1545 tree type ATTRIBUTE_UNUSED,
1546 int named ATTRIBUTE_UNUSED)
1547{
1548 if (ca->force_mem)
1549 ca->force_mem = 0;
9d746d5e
DD
1550 else
1551 ca->parm_num++;
38b2d076
DD
1552}
1553
1554/* Implements FUNCTION_ARG_REGNO_P. */
1555int
1556m32c_function_arg_regno_p (int r)
1557{
1558 if (TARGET_A24)
1559 return (r == R0_REGNO);
1560 return (r == R1_REGNO || r == R2_REGNO);
1561}
1562
e9555b13 1563/* HImode and PSImode are the two "native" modes as far as GCC is
85f65093 1564 concerned, but the chips also support a 32-bit mode which is used
e9555b13
DD
1565 for some opcodes in R8C/M16C and for reset vectors and such. */
1566#undef TARGET_VALID_POINTER_MODE
1567#define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
23fed240 1568static bool
e9555b13
DD
1569m32c_valid_pointer_mode (enum machine_mode mode)
1570{
e9555b13
DD
1571 if (mode == HImode
1572 || mode == PSImode
1573 || mode == SImode
1574 )
1575 return 1;
1576 return 0;
1577}
1578
38b2d076
DD
1579/* How Scalar Function Values Are Returned */
1580
1581/* Implements LIBCALL_VALUE. Most values are returned in $r0, or some
1582 combination of registers starting there (r2r0 for longs, r3r1r2r0
1583 for long long, r3r2r1r0 for doubles), except that that ABI
1584 currently doesn't work because it ends up using all available
1585 general registers and gcc often can't compile it. So, instead, we
1586 return anything bigger than 16 bits in "mem0" (effectively, a
1587 memory location). */
1588rtx
1589m32c_libcall_value (enum machine_mode mode)
1590{
1591 /* return reg or parallel */
1592#if 0
1593 /* FIXME: GCC has difficulty returning large values in registers,
1594 because that ties up most of the general registers and gives the
1595 register allocator little to work with. Until we can resolve
1596 this, large values are returned in memory. */
1597 if (mode == DFmode)
1598 {
1599 rtx rv;
1600
1601 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1602 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1603 gen_rtx_REG (HImode,
1604 R0_REGNO),
1605 GEN_INT (0));
1606 XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1607 gen_rtx_REG (HImode,
1608 R1_REGNO),
1609 GEN_INT (2));
1610 XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1611 gen_rtx_REG (HImode,
1612 R2_REGNO),
1613 GEN_INT (4));
1614 XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1615 gen_rtx_REG (HImode,
1616 R3_REGNO),
1617 GEN_INT (6));
1618 return rv;
1619 }
1620
1621 if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1622 {
1623 rtx rv;
1624
1625 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1626 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1627 gen_rtx_REG (mode,
1628 R0_REGNO),
1629 GEN_INT (0));
1630 return rv;
1631 }
1632#endif
1633
1634 if (GET_MODE_SIZE (mode) > 2)
1635 return gen_rtx_REG (mode, MEM0_REGNO);
1636 return gen_rtx_REG (mode, R0_REGNO);
1637}
1638
1639/* Implements FUNCTION_VALUE. Functions and libcalls have the same
1640 conventions. */
1641rtx
1642m32c_function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
1643{
1644 /* return reg or parallel */
1645 enum machine_mode mode = TYPE_MODE (valtype);
1646 return m32c_libcall_value (mode);
1647}
1648
1649/* How Large Values Are Returned */
1650
1651/* We return structures by pushing the address on the stack, even if
1652 we use registers for the first few "real" arguments. */
1653#undef TARGET_STRUCT_VALUE_RTX
1654#define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1655static rtx
1656m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1657 int incoming ATTRIBUTE_UNUSED)
1658{
1659 return 0;
1660}
1661
1662/* Function Entry and Exit */
1663
1664/* Implements EPILOGUE_USES. Interrupts restore all registers. */
1665int
1666m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1667{
1668 if (cfun->machine->is_interrupt)
1669 return 1;
1670 return 0;
1671}
1672
1673/* Implementing the Varargs Macros */
1674
1675#undef TARGET_STRICT_ARGUMENT_NAMING
1676#define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1677static bool
1678m32c_strict_argument_naming (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED)
1679{
1680 return 1;
1681}
1682
1683/* Trampolines for Nested Functions */
1684
1685/*
1686 m16c:
1687 1 0000 75C43412 mov.w #0x1234,a0
1688 2 0004 FC000000 jmp.a label
1689
1690 m32c:
1691 1 0000 BC563412 mov.l:s #0x123456,a0
1692 2 0004 CC000000 jmp.a label
1693*/
1694
1695/* Implements TRAMPOLINE_SIZE. */
1696int
1697m32c_trampoline_size (void)
1698{
1699 /* Allocate extra space so we can avoid the messy shifts when we
1700 initialize the trampoline; we just write past the end of the
1701 opcode. */
1702 return TARGET_A16 ? 8 : 10;
1703}
1704
1705/* Implements TRAMPOLINE_ALIGNMENT. */
1706int
1707m32c_trampoline_alignment (void)
1708{
1709 return 2;
1710}
1711
1712/* Implements INITIALIZE_TRAMPOLINE. */
1713void
1714m32c_initialize_trampoline (rtx tramp, rtx function, rtx chainval)
1715{
1716#define A0(m,i) gen_rtx_MEM (m, plus_constant (tramp, i))
1717 if (TARGET_A16)
1718 {
1719 /* Note: we subtract a "word" because the moves want signed
1720 constants, not unsigned constants. */
1721 emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1722 emit_move_insn (A0 (HImode, 2), chainval);
1723 emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
85f65093
KH
1724 /* We use 16-bit addresses here, but store the zero to turn it
1725 into a 24-bit offset. */
38b2d076
DD
1726 emit_move_insn (A0 (HImode, 5), function);
1727 emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1728 }
1729 else
1730 {
1731 /* Note that the PSI moves actually write 4 bytes. Make sure we
1732 write stuff out in the right order, and leave room for the
1733 extra byte at the end. */
1734 emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1735 emit_move_insn (A0 (PSImode, 1), chainval);
1736 emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1737 emit_move_insn (A0 (PSImode, 5), function);
1738 }
1739#undef A0
1740}
1741
07127a0a
DD
1742/* Implicit Calls to Library Routines */
1743
1744#undef TARGET_INIT_LIBFUNCS
1745#define TARGET_INIT_LIBFUNCS m32c_init_libfuncs
1746static void
1747m32c_init_libfuncs (void)
1748{
1749 if (TARGET_A24)
1750 {
1751 /* We do this because the M32C has an HImode operand, but the
85f65093 1752 M16C has an 8-bit operand. Since gcc looks at the match data
07127a0a
DD
1753 and not the expanded rtl, we have to reset the array so that
1754 the right modes are found. */
1755 setcc_gen_code[EQ] = CODE_FOR_seq_24;
1756 setcc_gen_code[NE] = CODE_FOR_sne_24;
1757 setcc_gen_code[GT] = CODE_FOR_sgt_24;
1758 setcc_gen_code[GE] = CODE_FOR_sge_24;
1759 setcc_gen_code[LT] = CODE_FOR_slt_24;
1760 setcc_gen_code[LE] = CODE_FOR_sle_24;
1761 setcc_gen_code[GTU] = CODE_FOR_sgtu_24;
1762 setcc_gen_code[GEU] = CODE_FOR_sgeu_24;
1763 setcc_gen_code[LTU] = CODE_FOR_sltu_24;
1764 setcc_gen_code[LEU] = CODE_FOR_sleu_24;
1765 }
1766}
1767
38b2d076
DD
1768/* Addressing Modes */
1769
1770/* Used by GO_IF_LEGITIMATE_ADDRESS. The r8c/m32c family supports a
1771 wide range of non-orthogonal addressing modes, including the
1772 ability to double-indirect on *some* of them. Not all insns
1773 support all modes, either, but we rely on predicates and
1774 constraints to deal with that. */
1775int
1776m32c_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
1777{
1778 int mode_adjust;
1779 if (CONSTANT_P (x))
1780 return 1;
1781
1782 /* Wide references to memory will be split after reload, so we must
1783 ensure that all parts of such splits remain legitimate
1784 addresses. */
1785 mode_adjust = GET_MODE_SIZE (mode) - 1;
1786
1787 /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1788 if (GET_CODE (x) == PRE_DEC
1789 || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1790 {
1791 return (GET_CODE (XEXP (x, 0)) == REG
1792 && REGNO (XEXP (x, 0)) == SP_REGNO);
1793 }
1794
1795#if 0
1796 /* This is the double indirection detection, but it currently
1797 doesn't work as cleanly as this code implies, so until we've had
1798 a chance to debug it, leave it disabled. */
1799 if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1800 {
1801#if DEBUG_DOUBLE
1802 fprintf (stderr, "double indirect\n");
1803#endif
1804 x = XEXP (x, 0);
1805 }
1806#endif
1807
1808 encode_pattern (x);
1809 if (RTX_IS ("r"))
1810 {
1811 /* Most indexable registers can be used without displacements,
1812 although some of them will be emitted with an explicit zero
1813 to please the assembler. */
1814 switch (REGNO (patternr[0]))
1815 {
1816 case A0_REGNO:
1817 case A1_REGNO:
1818 case SB_REGNO:
1819 case FB_REGNO:
1820 case SP_REGNO:
1821 return 1;
1822
1823 default:
1824 if (IS_PSEUDO (patternr[0], strict))
1825 return 1;
1826 return 0;
1827 }
1828 }
1829 if (RTX_IS ("+ri"))
1830 {
1831 /* This is more interesting, because different base registers
1832 allow for different displacements - both range and signedness
1833 - and it differs from chip series to chip series too. */
1834 int rn = REGNO (patternr[1]);
1835 HOST_WIDE_INT offs = INTVAL (patternr[2]);
1836 switch (rn)
1837 {
1838 case A0_REGNO:
1839 case A1_REGNO:
1840 case SB_REGNO:
1841 /* The syntax only allows positive offsets, but when the
1842 offsets span the entire memory range, we can simulate
1843 negative offsets by wrapping. */
1844 if (TARGET_A16)
1845 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1846 if (rn == SB_REGNO)
1847 return (offs >= 0 && offs <= 65535 - mode_adjust);
1848 /* A0 or A1 */
1849 return (offs >= -16777216 && offs <= 16777215);
1850
1851 case FB_REGNO:
1852 if (TARGET_A16)
1853 return (offs >= -128 && offs <= 127 - mode_adjust);
1854 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1855
1856 case SP_REGNO:
1857 return (offs >= -128 && offs <= 127 - mode_adjust);
1858
1859 default:
1860 if (IS_PSEUDO (patternr[1], strict))
1861 return 1;
1862 return 0;
1863 }
1864 }
1865 if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1866 {
1867 rtx reg = patternr[1];
1868
1869 /* We don't know where the symbol is, so only allow base
1870 registers which support displacements spanning the whole
1871 address range. */
1872 switch (REGNO (reg))
1873 {
1874 case A0_REGNO:
1875 case A1_REGNO:
1876 /* $sb needs a secondary reload, but since it's involved in
1877 memory address reloads too, we don't deal with it very
1878 well. */
1879 /* case SB_REGNO: */
1880 return 1;
1881 default:
1882 if (IS_PSEUDO (reg, strict))
1883 return 1;
1884 return 0;
1885 }
1886 }
1887 return 0;
1888}
1889
1890/* Implements REG_OK_FOR_BASE_P. */
1891int
1892m32c_reg_ok_for_base_p (rtx x, int strict)
1893{
1894 if (GET_CODE (x) != REG)
1895 return 0;
1896 switch (REGNO (x))
1897 {
1898 case A0_REGNO:
1899 case A1_REGNO:
1900 case SB_REGNO:
1901 case FB_REGNO:
1902 case SP_REGNO:
1903 return 1;
1904 default:
1905 if (IS_PSEUDO (x, strict))
1906 return 1;
1907 return 0;
1908 }
1909}
1910
04aff2c0 1911/* We have three choices for choosing fb->aN offsets. If we choose -128,
85f65093 1912 we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
04aff2c0
DD
1913 like this:
1914 EB 4B FF mova -128[$fb],$a0
1915 D8 0C FF FF mov.w:Q #0,-1[$a0]
1916
85f65093 1917 Alternately, we subtract the frame size, and hopefully use 8-bit aN
04aff2c0
DD
1918 displacements:
1919 7B F4 stc $fb,$a0
1920 77 54 00 01 sub #256,$a0
1921 D8 08 01 mov.w:Q #0,1[$a0]
1922
1923 If we don't offset (i.e. offset by zero), we end up with:
1924 7B F4 stc $fb,$a0
1925 D8 0C 00 FF mov.w:Q #0,-256[$a0]
1926
1927 We have to subtract *something* so that we have a PLUS rtx to mark
1928 that we've done this reload. The -128 offset will never result in
85f65093 1929 an 8-bit aN offset, and the payoff for the second case is five
04aff2c0
DD
1930 loads *if* those loads are within 256 bytes of the other end of the
1931 frame, so the third case seems best. Note that we subtract the
1932 zero, but detect that in the addhi3 pattern. */
1933
1934#define BIG_FB_ADJ 0
1935
38b2d076
DD
1936/* Implements LEGITIMIZE_ADDRESS. The only address we really have to
1937 worry about is frame base offsets, as $fb has a limited
1938 displacement range. We deal with this by attempting to reload $fb
1939 itself into an address register; that seems to result in the best
1940 code. */
1941int
1942m32c_legitimize_address (rtx * x ATTRIBUTE_UNUSED,
1943 rtx oldx ATTRIBUTE_UNUSED,
1944 enum machine_mode mode ATTRIBUTE_UNUSED)
1945{
1946#if DEBUG0
1947 fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
1948 debug_rtx (*x);
1949 fprintf (stderr, "\n");
1950#endif
1951
1952 if (GET_CODE (*x) == PLUS
1953 && GET_CODE (XEXP (*x, 0)) == REG
1954 && REGNO (XEXP (*x, 0)) == FB_REGNO
1955 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1956 && (INTVAL (XEXP (*x, 1)) < -128
1957 || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
1958 {
1959 /* reload FB to A_REGS */
38b2d076
DD
1960 rtx temp = gen_reg_rtx (Pmode);
1961 *x = copy_rtx (*x);
04aff2c0 1962 emit_insn (gen_rtx_SET (VOIDmode, temp, XEXP (*x, 0)));
38b2d076
DD
1963 XEXP (*x, 0) = temp;
1964 return 1;
1965 }
1966
1967 return 0;
1968}
1969
1970/* Implements LEGITIMIZE_RELOAD_ADDRESS. See comment above. */
1971int
1972m32c_legitimize_reload_address (rtx * x,
1973 enum machine_mode mode,
1974 int opnum,
1975 int type, int ind_levels ATTRIBUTE_UNUSED)
1976{
1977#if DEBUG0
1978 fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
1979 mode_name[mode]);
1980 debug_rtx (*x);
1981#endif
1982
1983 /* At one point, this function tried to get $fb copied to an address
1984 register, which in theory would maximize sharing, but gcc was
1985 *also* still trying to reload the whole address, and we'd run out
1986 of address registers. So we let gcc do the naive (but safe)
1987 reload instead, when the above function doesn't handle it for
04aff2c0
DD
1988 us.
1989
1990 The code below is a second attempt at the above. */
1991
1992 if (GET_CODE (*x) == PLUS
1993 && GET_CODE (XEXP (*x, 0)) == REG
1994 && REGNO (XEXP (*x, 0)) == FB_REGNO
1995 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1996 && (INTVAL (XEXP (*x, 1)) < -128
1997 || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
1998 {
1999 rtx sum;
2000 int offset = INTVAL (XEXP (*x, 1));
2001 int adjustment = -BIG_FB_ADJ;
2002
2003 sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
2004 GEN_INT (adjustment));
2005 *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
2006 if (type == RELOAD_OTHER)
2007 type = RELOAD_FOR_OTHER_ADDRESS;
2008 push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
2009 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
2010 type);
2011 return 1;
2012 }
2013
2014 if (GET_CODE (*x) == PLUS
2015 && GET_CODE (XEXP (*x, 0)) == PLUS
2016 && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
2017 && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
2018 && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
2019 && GET_CODE (XEXP (*x, 1)) == CONST_INT
2020 )
2021 {
2022 if (type == RELOAD_OTHER)
2023 type = RELOAD_FOR_OTHER_ADDRESS;
2024 push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
2025 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
2026 type);
2027 return 1;
2028 }
38b2d076
DD
2029
2030 return 0;
2031}
2032
38b2d076
DD
2033/* Implements LEGITIMATE_CONSTANT_P. We split large constants anyway,
2034 so we can allow anything. */
2035int
2036m32c_legitimate_constant_p (rtx x ATTRIBUTE_UNUSED)
2037{
2038 return 1;
2039}
2040
2041
2042/* Condition Code Status */
2043
2044#undef TARGET_FIXED_CONDITION_CODE_REGS
2045#define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2046static bool
2047m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2048{
2049 *p1 = FLG_REGNO;
2050 *p2 = INVALID_REGNUM;
2051 return true;
2052}
2053
2054/* Describing Relative Costs of Operations */
2055
2056/* Implements REGISTER_MOVE_COST. We make impossible moves
2057 prohibitively expensive, like trying to put QIs in r2/r3 (there are
2058 no opcodes to do that). We also discourage use of mem* registers
2059 since they're really memory. */
2060int
2061m32c_register_move_cost (enum machine_mode mode, int from, int to)
2062{
2063 int cost = COSTS_N_INSNS (3);
2064 int cc = class_contents[from][0] | class_contents[to][0];
2065 /* FIXME: pick real values, but not 2 for now. */
2066 if (mode == QImode && (cc & class_contents[R23_REGS][0]))
2067 {
2068 if (!(cc & ~class_contents[R23_REGS][0]))
2069 cost = COSTS_N_INSNS (1000);
2070 else
2071 cost = COSTS_N_INSNS (80);
2072 }
2073
2074 if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2075 cost = COSTS_N_INSNS (1000);
2076
2077 if (classes_intersect (from, CR_REGS))
2078 cost += COSTS_N_INSNS (5);
2079
2080 if (classes_intersect (to, CR_REGS))
2081 cost += COSTS_N_INSNS (5);
2082
2083 if (from == MEM_REGS || to == MEM_REGS)
2084 cost += COSTS_N_INSNS (50);
2085 else if (classes_intersect (from, MEM_REGS)
2086 || classes_intersect (to, MEM_REGS))
2087 cost += COSTS_N_INSNS (10);
2088
2089#if DEBUG0
2090 fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
2091 mode_name[mode], class_names[from], class_names[to], cost);
2092#endif
2093 return cost;
2094}
2095
2096/* Implements MEMORY_MOVE_COST. */
2097int
2098m32c_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2099 int reg_class ATTRIBUTE_UNUSED,
2100 int in ATTRIBUTE_UNUSED)
2101{
2102 /* FIXME: pick real values. */
2103 return COSTS_N_INSNS (10);
2104}
2105
07127a0a
DD
2106/* Here we try to describe when we use multiple opcodes for one RTX so
2107 that gcc knows when to use them. */
2108#undef TARGET_RTX_COSTS
2109#define TARGET_RTX_COSTS m32c_rtx_costs
2110static bool
2111m32c_rtx_costs (rtx x, int code, int outer_code, int *total)
2112{
2113 switch (code)
2114 {
2115 case REG:
2116 if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2117 *total += COSTS_N_INSNS (500);
2118 else
2119 *total += COSTS_N_INSNS (1);
2120 return true;
2121
2122 case ASHIFT:
2123 case LSHIFTRT:
2124 case ASHIFTRT:
2125 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2126 {
2127 /* mov.b r1l, r1h */
2128 *total += COSTS_N_INSNS (1);
2129 return true;
2130 }
2131 if (INTVAL (XEXP (x, 1)) > 8
2132 || INTVAL (XEXP (x, 1)) < -8)
2133 {
2134 /* mov.b #N, r1l */
2135 /* mov.b r1l, r1h */
2136 *total += COSTS_N_INSNS (2);
2137 return true;
2138 }
2139 return true;
2140
2141 case LE:
2142 case LEU:
2143 case LT:
2144 case LTU:
2145 case GT:
2146 case GTU:
2147 case GE:
2148 case GEU:
2149 case NE:
2150 case EQ:
2151 if (outer_code == SET)
2152 {
2153 *total += COSTS_N_INSNS (2);
2154 return true;
2155 }
2156 break;
2157
2158 case ZERO_EXTRACT:
2159 {
2160 rtx dest = XEXP (x, 0);
2161 rtx addr = XEXP (dest, 0);
2162 switch (GET_CODE (addr))
2163 {
2164 case CONST_INT:
2165 *total += COSTS_N_INSNS (1);
2166 break;
2167 case SYMBOL_REF:
2168 *total += COSTS_N_INSNS (3);
2169 break;
2170 default:
2171 *total += COSTS_N_INSNS (2);
2172 break;
2173 }
2174 return true;
2175 }
2176 break;
2177
2178 default:
2179 /* Reasonable default. */
2180 if (TARGET_A16 && GET_MODE(x) == SImode)
2181 *total += COSTS_N_INSNS (2);
2182 break;
2183 }
2184 return false;
2185}
2186
2187#undef TARGET_ADDRESS_COST
2188#define TARGET_ADDRESS_COST m32c_address_cost
2189static int
2190m32c_address_cost (rtx addr)
2191{
2192 /* fprintf(stderr, "\naddress_cost\n");
2193 debug_rtx(addr);*/
2194 switch (GET_CODE (addr))
2195 {
2196 case CONST_INT:
2197 return COSTS_N_INSNS(1);
2198 case SYMBOL_REF:
2199 return COSTS_N_INSNS(3);
2200 case REG:
2201 return COSTS_N_INSNS(2);
2202 default:
2203 return 0;
2204 }
2205}
2206
38b2d076
DD
2207/* Defining the Output Assembler Language */
2208
2209/* The Overall Framework of an Assembler File */
2210
2211#undef TARGET_HAVE_NAMED_SECTIONS
2212#define TARGET_HAVE_NAMED_SECTIONS true
2213
2214/* Output of Data */
2215
2216/* We may have 24 bit sizes, which is the native address size.
2217 Currently unused, but provided for completeness. */
2218#undef TARGET_ASM_INTEGER
2219#define TARGET_ASM_INTEGER m32c_asm_integer
2220static bool
2221m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2222{
2223 switch (size)
2224 {
2225 case 3:
2226 fprintf (asm_out_file, "\t.3byte\t");
2227 output_addr_const (asm_out_file, x);
2228 fputc ('\n', asm_out_file);
2229 return true;
e9555b13
DD
2230 case 4:
2231 if (GET_CODE (x) == SYMBOL_REF)
2232 {
2233 fprintf (asm_out_file, "\t.long\t");
2234 output_addr_const (asm_out_file, x);
2235 fputc ('\n', asm_out_file);
2236 return true;
2237 }
2238 break;
38b2d076
DD
2239 }
2240 return default_assemble_integer (x, size, aligned_p);
2241}
2242
2243/* Output of Assembler Instructions */
2244
a4174ebf 2245/* We use a lookup table because the addressing modes are non-orthogonal. */
38b2d076
DD
2246
2247static struct
2248{
2249 char code;
2250 char const *pattern;
2251 char const *format;
2252}
2253const conversions[] = {
2254 { 0, "r", "0" },
2255
2256 { 0, "mr", "z[1]" },
2257 { 0, "m+ri", "3[2]" },
2258 { 0, "m+rs", "3[2]" },
2259 { 0, "m+r+si", "4+5[2]" },
2260 { 0, "ms", "1" },
2261 { 0, "mi", "1" },
2262 { 0, "m+si", "2+3" },
2263
2264 { 0, "mmr", "[z[2]]" },
2265 { 0, "mm+ri", "[4[3]]" },
2266 { 0, "mm+rs", "[4[3]]" },
2267 { 0, "mm+r+si", "[5+6[3]]" },
2268 { 0, "mms", "[[2]]" },
2269 { 0, "mmi", "[[2]]" },
2270 { 0, "mm+si", "[4[3]]" },
2271
2272 { 0, "i", "#0" },
2273 { 0, "s", "#0" },
2274 { 0, "+si", "#1+2" },
2275 { 0, "l", "#0" },
2276
2277 { 'l', "l", "0" },
2278 { 'd', "i", "0" },
2279 { 'd', "s", "0" },
2280 { 'd', "+si", "1+2" },
2281 { 'D', "i", "0" },
2282 { 'D', "s", "0" },
2283 { 'D', "+si", "1+2" },
2284 { 'x', "i", "#0" },
2285 { 'X', "i", "#0" },
2286 { 'm', "i", "#0" },
2287 { 'b', "i", "#0" },
07127a0a 2288 { 'B', "i", "0" },
38b2d076
DD
2289 { 'p', "i", "0" },
2290
2291 { 0, 0, 0 }
2292};
2293
2294/* This is in order according to the bitfield that pushm/popm use. */
2295static char const *pushm_regs[] = {
2296 "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2297};
2298
2299/* Implements PRINT_OPERAND. */
2300void
2301m32c_print_operand (FILE * file, rtx x, int code)
2302{
2303 int i, j, b;
2304 const char *comma;
2305 HOST_WIDE_INT ival;
2306 int unsigned_const = 0;
ff485e71 2307 int force_sign;
38b2d076
DD
2308
2309 /* Multiplies; constants are converted to sign-extended format but
2310 we need unsigned, so 'u' and 'U' tell us what size unsigned we
2311 need. */
2312 if (code == 'u')
2313 {
2314 unsigned_const = 2;
2315 code = 0;
2316 }
2317 if (code == 'U')
2318 {
2319 unsigned_const = 1;
2320 code = 0;
2321 }
2322 /* This one is only for debugging; you can put it in a pattern to
2323 force this error. */
2324 if (code == '!')
2325 {
2326 fprintf (stderr, "dj: unreviewed pattern:");
2327 if (current_output_insn)
2328 debug_rtx (current_output_insn);
2329 gcc_unreachable ();
2330 }
2331 /* PSImode operations are either .w or .l depending on the target. */
2332 if (code == '&')
2333 {
2334 if (TARGET_A16)
2335 fprintf (file, "w");
2336 else
2337 fprintf (file, "l");
2338 return;
2339 }
2340 /* Inverted conditionals. */
2341 if (code == 'C')
2342 {
2343 switch (GET_CODE (x))
2344 {
2345 case LE:
2346 fputs ("gt", file);
2347 break;
2348 case LEU:
2349 fputs ("gtu", file);
2350 break;
2351 case LT:
2352 fputs ("ge", file);
2353 break;
2354 case LTU:
2355 fputs ("geu", file);
2356 break;
2357 case GT:
2358 fputs ("le", file);
2359 break;
2360 case GTU:
2361 fputs ("leu", file);
2362 break;
2363 case GE:
2364 fputs ("lt", file);
2365 break;
2366 case GEU:
2367 fputs ("ltu", file);
2368 break;
2369 case NE:
2370 fputs ("eq", file);
2371 break;
2372 case EQ:
2373 fputs ("ne", file);
2374 break;
2375 default:
2376 gcc_unreachable ();
2377 }
2378 return;
2379 }
2380 /* Regular conditionals. */
2381 if (code == 'c')
2382 {
2383 switch (GET_CODE (x))
2384 {
2385 case LE:
2386 fputs ("le", file);
2387 break;
2388 case LEU:
2389 fputs ("leu", file);
2390 break;
2391 case LT:
2392 fputs ("lt", file);
2393 break;
2394 case LTU:
2395 fputs ("ltu", file);
2396 break;
2397 case GT:
2398 fputs ("gt", file);
2399 break;
2400 case GTU:
2401 fputs ("gtu", file);
2402 break;
2403 case GE:
2404 fputs ("ge", file);
2405 break;
2406 case GEU:
2407 fputs ("geu", file);
2408 break;
2409 case NE:
2410 fputs ("ne", file);
2411 break;
2412 case EQ:
2413 fputs ("eq", file);
2414 break;
2415 default:
2416 gcc_unreachable ();
2417 }
2418 return;
2419 }
2420 /* Used in negsi2 to do HImode ops on the two parts of an SImode
2421 operand. */
2422 if (code == 'h' && GET_MODE (x) == SImode)
2423 {
2424 x = m32c_subreg (HImode, x, SImode, 0);
2425 code = 0;
2426 }
2427 if (code == 'H' && GET_MODE (x) == SImode)
2428 {
2429 x = m32c_subreg (HImode, x, SImode, 2);
2430 code = 0;
2431 }
07127a0a
DD
2432 if (code == 'h' && GET_MODE (x) == HImode)
2433 {
2434 x = m32c_subreg (QImode, x, HImode, 0);
2435 code = 0;
2436 }
2437 if (code == 'H' && GET_MODE (x) == HImode)
2438 {
2439 /* We can't actually represent this as an rtx. Do it here. */
2440 if (GET_CODE (x) == REG)
2441 {
2442 switch (REGNO (x))
2443 {
2444 case R0_REGNO:
2445 fputs ("r0h", file);
2446 return;
2447 case R1_REGNO:
2448 fputs ("r1h", file);
2449 return;
2450 default:
2451 gcc_unreachable();
2452 }
2453 }
2454 /* This should be a MEM. */
2455 x = m32c_subreg (QImode, x, HImode, 1);
2456 code = 0;
2457 }
2458 /* This is for BMcond, which always wants word register names. */
2459 if (code == 'h' && GET_MODE (x) == QImode)
2460 {
2461 if (GET_CODE (x) == REG)
2462 x = gen_rtx_REG (HImode, REGNO (x));
2463 code = 0;
2464 }
38b2d076
DD
2465 /* 'x' and 'X' need to be ignored for non-immediates. */
2466 if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2467 code = 0;
2468
2469 encode_pattern (x);
ff485e71 2470 force_sign = 0;
38b2d076
DD
2471 for (i = 0; conversions[i].pattern; i++)
2472 if (conversions[i].code == code
2473 && streq (conversions[i].pattern, pattern))
2474 {
2475 for (j = 0; conversions[i].format[j]; j++)
2476 /* backslash quotes the next character in the output pattern. */
2477 if (conversions[i].format[j] == '\\')
2478 {
2479 fputc (conversions[i].format[j + 1], file);
2480 j++;
2481 }
2482 /* Digits in the output pattern indicate that the
2483 corresponding RTX is to be output at that point. */
2484 else if (ISDIGIT (conversions[i].format[j]))
2485 {
2486 rtx r = patternr[conversions[i].format[j] - '0'];
2487 switch (GET_CODE (r))
2488 {
2489 case REG:
2490 fprintf (file, "%s",
2491 reg_name_with_mode (REGNO (r), GET_MODE (r)));
2492 break;
2493 case CONST_INT:
2494 switch (code)
2495 {
2496 case 'b':
07127a0a
DD
2497 case 'B':
2498 {
2499 int v = INTVAL (r);
2500 int i = (int) exact_log2 (v);
2501 if (i == -1)
2502 i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2503 if (i == -1)
2504 i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2505 /* Bit position. */
2506 fprintf (file, "%d", i);
2507 }
38b2d076
DD
2508 break;
2509 case 'x':
2510 /* Unsigned byte. */
2511 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2512 INTVAL (r) & 0xff);
2513 break;
2514 case 'X':
2515 /* Unsigned word. */
2516 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2517 INTVAL (r) & 0xffff);
2518 break;
2519 case 'p':
2520 /* pushm and popm encode a register set into a single byte. */
2521 comma = "";
2522 for (b = 7; b >= 0; b--)
2523 if (INTVAL (r) & (1 << b))
2524 {
2525 fprintf (file, "%s%s", comma, pushm_regs[b]);
2526 comma = ",";
2527 }
2528 break;
2529 case 'm':
2530 /* "Minus". Output -X */
2531 ival = (-INTVAL (r) & 0xffff);
2532 if (ival & 0x8000)
2533 ival = ival - 0x10000;
2534 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2535 break;
2536 default:
2537 ival = INTVAL (r);
2538 if (conversions[i].format[j + 1] == '[' && ival < 0)
2539 {
2540 /* We can simulate negative displacements by
2541 taking advantage of address space
2542 wrapping when the offset can span the
2543 entire address range. */
2544 rtx base =
2545 patternr[conversions[i].format[j + 2] - '0'];
2546 if (GET_CODE (base) == REG)
2547 switch (REGNO (base))
2548 {
2549 case A0_REGNO:
2550 case A1_REGNO:
2551 if (TARGET_A24)
2552 ival = 0x1000000 + ival;
2553 else
2554 ival = 0x10000 + ival;
2555 break;
2556 case SB_REGNO:
2557 if (TARGET_A16)
2558 ival = 0x10000 + ival;
2559 break;
2560 }
2561 }
2562 else if (code == 'd' && ival < 0 && j == 0)
2563 /* The "mova" opcode is used to do addition by
2564 computing displacements, but again, we need
2565 displacements to be unsigned *if* they're
2566 the only component of the displacement
2567 (i.e. no "symbol-4" type displacement). */
2568 ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2569
2570 if (conversions[i].format[j] == '0')
2571 {
2572 /* More conversions to unsigned. */
2573 if (unsigned_const == 2)
2574 ival &= 0xffff;
2575 if (unsigned_const == 1)
2576 ival &= 0xff;
2577 }
2578 if (streq (conversions[i].pattern, "mi")
2579 || streq (conversions[i].pattern, "mmi"))
2580 {
2581 /* Integers used as addresses are unsigned. */
2582 ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2583 }
ff485e71
DD
2584 if (force_sign && ival >= 0)
2585 fputc ('+', file);
38b2d076
DD
2586 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2587 break;
2588 }
2589 break;
2590 case CONST_DOUBLE:
2591 /* We don't have const_double constants. If it
2592 happens, make it obvious. */
2593 fprintf (file, "[const_double 0x%lx]",
2594 (unsigned long) CONST_DOUBLE_HIGH (r));
2595 break;
2596 case SYMBOL_REF:
2597 assemble_name (file, XSTR (r, 0));
2598 break;
2599 case LABEL_REF:
2600 output_asm_label (r);
2601 break;
2602 default:
2603 fprintf (stderr, "don't know how to print this operand:");
2604 debug_rtx (r);
2605 gcc_unreachable ();
2606 }
2607 }
2608 else
2609 {
2610 if (conversions[i].format[j] == 'z')
2611 {
2612 /* Some addressing modes *must* have a displacement,
2613 so insert a zero here if needed. */
2614 int k;
2615 for (k = j + 1; conversions[i].format[k]; k++)
2616 if (ISDIGIT (conversions[i].format[k]))
2617 {
2618 rtx reg = patternr[conversions[i].format[k] - '0'];
2619 if (GET_CODE (reg) == REG
2620 && (REGNO (reg) == SB_REGNO
2621 || REGNO (reg) == FB_REGNO
2622 || REGNO (reg) == SP_REGNO))
2623 fputc ('0', file);
2624 }
2625 continue;
2626 }
2627 /* Signed displacements off symbols need to have signs
2628 blended cleanly. */
2629 if (conversions[i].format[j] == '+'
ff485e71 2630 && (!code || code == 'D' || code == 'd')
38b2d076 2631 && ISDIGIT (conversions[i].format[j + 1])
ff485e71
DD
2632 && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2633 == CONST_INT))
2634 {
2635 force_sign = 1;
2636 continue;
2637 }
38b2d076
DD
2638 fputc (conversions[i].format[j], file);
2639 }
2640 break;
2641 }
2642 if (!conversions[i].pattern)
2643 {
2644 fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
2645 pattern);
2646 debug_rtx (x);
2647 fprintf (file, "[%c.%s]", code ? code : '-', pattern);
2648 }
2649
2650 return;
2651}
2652
2653/* Implements PRINT_OPERAND_PUNCT_VALID_P. See m32c_print_operand
2654 above for descriptions of what these do. */
2655int
2656m32c_print_operand_punct_valid_p (int c)
2657{
2658 if (c == '&' || c == '!')
2659 return 1;
2660 return 0;
2661}
2662
2663/* Implements PRINT_OPERAND_ADDRESS. Nothing unusual here. */
2664void
2665m32c_print_operand_address (FILE * stream, rtx address)
2666{
2667 gcc_assert (GET_CODE (address) == MEM);
2668 m32c_print_operand (stream, XEXP (address, 0), 0);
2669}
2670
2671/* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
2672 differently than general registers. */
2673void
2674m32c_output_reg_push (FILE * s, int regno)
2675{
2676 if (regno == FLG_REGNO)
2677 fprintf (s, "\tpushc\tflg\n");
2678 else
04aff2c0 2679 fprintf (s, "\tpush.%c\t%s\n",
38b2d076
DD
2680 " bwll"[reg_push_size (regno)], reg_names[regno]);
2681}
2682
2683/* Likewise for ASM_OUTPUT_REG_POP. */
2684void
2685m32c_output_reg_pop (FILE * s, int regno)
2686{
2687 if (regno == FLG_REGNO)
2688 fprintf (s, "\tpopc\tflg\n");
2689 else
04aff2c0 2690 fprintf (s, "\tpop.%c\t%s\n",
38b2d076
DD
2691 " bwll"[reg_push_size (regno)], reg_names[regno]);
2692}
2693
2694/* Defining target-specific uses of `__attribute__' */
2695
2696/* Used to simplify the logic below. Find the attributes wherever
2697 they may be. */
2698#define M32C_ATTRIBUTES(decl) \
2699 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2700 : DECL_ATTRIBUTES (decl) \
2701 ? (DECL_ATTRIBUTES (decl)) \
2702 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2703
2704/* Returns TRUE if the given tree has the "interrupt" attribute. */
2705static int
2706interrupt_p (tree node ATTRIBUTE_UNUSED)
2707{
2708 tree list = M32C_ATTRIBUTES (node);
2709 while (list)
2710 {
2711 if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
2712 return 1;
2713 list = TREE_CHAIN (list);
2714 }
2715 return 0;
2716}
2717
2718static tree
2719interrupt_handler (tree * node ATTRIBUTE_UNUSED,
2720 tree name ATTRIBUTE_UNUSED,
2721 tree args ATTRIBUTE_UNUSED,
2722 int flags ATTRIBUTE_UNUSED,
2723 bool * no_add_attrs ATTRIBUTE_UNUSED)
2724{
2725 return NULL_TREE;
2726}
2727
5abd2125
JS
2728/* Returns TRUE if given tree has the "function_vector" attribute. */
2729int
2730m32c_special_page_vector_p (tree func)
2731{
2732 if (TREE_CODE (func) != FUNCTION_DECL)
2733 return 0;
2734
2735 tree list = M32C_ATTRIBUTES (func);
2736 while (list)
2737 {
2738 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2739 return 1;
2740 list = TREE_CHAIN (list);
2741 }
2742 return 0;
2743}
2744
2745static tree
2746function_vector_handler (tree * node ATTRIBUTE_UNUSED,
2747 tree name ATTRIBUTE_UNUSED,
2748 tree args ATTRIBUTE_UNUSED,
2749 int flags ATTRIBUTE_UNUSED,
2750 bool * no_add_attrs ATTRIBUTE_UNUSED)
2751{
2752 if (TARGET_R8C)
2753 {
2754 /* The attribute is not supported for R8C target. */
2755 warning (OPT_Wattributes,
2756 "`%s' attribute is not supported for R8C target",
2757 IDENTIFIER_POINTER (name));
2758 *no_add_attrs = true;
2759 }
2760 else if (TREE_CODE (*node) != FUNCTION_DECL)
2761 {
2762 /* The attribute must be applied to functions only. */
2763 warning (OPT_Wattributes,
2764 "`%s' attribute applies only to functions",
2765 IDENTIFIER_POINTER (name));
2766 *no_add_attrs = true;
2767 }
2768 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
2769 {
2770 /* The argument must be a constant integer. */
2771 warning (OPT_Wattributes,
2772 "`%s' attribute argument not an integer constant",
2773 IDENTIFIER_POINTER (name));
2774 *no_add_attrs = true;
2775 }
2776 else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
2777 || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
2778 {
2779 /* The argument value must be between 18 to 255. */
2780 warning (OPT_Wattributes,
2781 "`%s' attribute argument should be between 18 to 255",
2782 IDENTIFIER_POINTER (name));
2783 *no_add_attrs = true;
2784 }
2785 return NULL_TREE;
2786}
2787
2788/* If the function is assigned the attribute 'function_vector', it
2789 returns the function vector number, otherwise returns zero. */
2790int
2791current_function_special_page_vector (rtx x)
2792{
2793 int num;
2794
2795 if ((GET_CODE(x) == SYMBOL_REF)
2796 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
2797 {
2798 tree t = SYMBOL_REF_DECL (x);
2799
2800 if (TREE_CODE (t) != FUNCTION_DECL)
2801 return 0;
2802
2803 tree list = M32C_ATTRIBUTES (t);
2804 while (list)
2805 {
2806 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2807 {
2808 num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
2809 return num;
2810 }
2811
2812 list = TREE_CHAIN (list);
2813 }
2814
2815 return 0;
2816 }
2817 else
2818 return 0;
2819}
2820
38b2d076
DD
2821#undef TARGET_ATTRIBUTE_TABLE
2822#define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
2823static const struct attribute_spec m32c_attribute_table[] = {
2824 {"interrupt", 0, 0, false, false, false, interrupt_handler},
5abd2125 2825 {"function_vector", 1, 1, true, false, false, function_vector_handler},
38b2d076
DD
2826 {0, 0, 0, 0, 0, 0, 0}
2827};
2828
2829#undef TARGET_COMP_TYPE_ATTRIBUTES
2830#define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
2831static int
2832m32c_comp_type_attributes (tree type1 ATTRIBUTE_UNUSED,
2833 tree type2 ATTRIBUTE_UNUSED)
2834{
2835 /* 0=incompatible 1=compatible 2=warning */
2836 return 1;
2837}
2838
2839#undef TARGET_INSERT_ATTRIBUTES
2840#define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
2841static void
2842m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
2843 tree * attr_ptr ATTRIBUTE_UNUSED)
2844{
2845 /* Nothing to do here. */
2846}
2847
2848/* Predicates */
2849
f9b89438 2850/* This is a list of legal subregs of hard regs. */
67fc44cb
DD
2851static const struct {
2852 unsigned char outer_mode_size;
2853 unsigned char inner_mode_size;
2854 unsigned char byte_mask;
2855 unsigned char legal_when;
f9b89438 2856 unsigned int regno;
f9b89438 2857} legal_subregs[] = {
67fc44cb
DD
2858 {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
2859 {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
2860 {1, 2, 0x01, 1, A0_REGNO},
2861 {1, 2, 0x01, 1, A1_REGNO},
f9b89438 2862
67fc44cb
DD
2863 {1, 4, 0x01, 1, A0_REGNO},
2864 {1, 4, 0x01, 1, A1_REGNO},
f9b89438 2865
67fc44cb
DD
2866 {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
2867 {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
2868 {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
2869 {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
2870 {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
f9b89438 2871
67fc44cb 2872 {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
f9b89438
DD
2873};
2874
2875/* Returns TRUE if OP is a subreg of a hard reg which we don't
2876 support. */
2877bool
2878m32c_illegal_subreg_p (rtx op)
2879{
f9b89438
DD
2880 int offset;
2881 unsigned int i;
2882 int src_mode, dest_mode;
2883
2884 if (GET_CODE (op) != SUBREG)
2885 return false;
2886
2887 dest_mode = GET_MODE (op);
2888 offset = SUBREG_BYTE (op);
2889 op = SUBREG_REG (op);
2890 src_mode = GET_MODE (op);
2891
2892 if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
2893 return false;
2894 if (GET_CODE (op) != REG)
2895 return false;
2896 if (REGNO (op) >= MEM0_REGNO)
2897 return false;
2898
2899 offset = (1 << offset);
2900
67fc44cb 2901 for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
f9b89438
DD
2902 if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
2903 && legal_subregs[i].regno == REGNO (op)
2904 && legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
2905 && legal_subregs[i].byte_mask & offset)
2906 {
2907 switch (legal_subregs[i].legal_when)
2908 {
2909 case 1:
2910 return false;
2911 case 16:
2912 if (TARGET_A16)
2913 return false;
2914 break;
2915 case 24:
2916 if (TARGET_A24)
2917 return false;
2918 break;
2919 }
2920 }
2921 return true;
2922}
2923
38b2d076
DD
2924/* Returns TRUE if we support a move between the first two operands.
2925 At the moment, we just want to discourage mem to mem moves until
2926 after reload, because reload has a hard time with our limited
2927 number of address registers, and we can get into a situation where
2928 we need three of them when we only have two. */
2929bool
2930m32c_mov_ok (rtx * operands, enum machine_mode mode ATTRIBUTE_UNUSED)
2931{
2932 rtx op0 = operands[0];
2933 rtx op1 = operands[1];
2934
2935 if (TARGET_A24)
2936 return true;
2937
2938#define DEBUG_MOV_OK 0
2939#if DEBUG_MOV_OK
2940 fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
2941 debug_rtx (op0);
2942 debug_rtx (op1);
2943#endif
2944
2945 if (GET_CODE (op0) == SUBREG)
2946 op0 = XEXP (op0, 0);
2947 if (GET_CODE (op1) == SUBREG)
2948 op1 = XEXP (op1, 0);
2949
2950 if (GET_CODE (op0) == MEM
2951 && GET_CODE (op1) == MEM
2952 && ! reload_completed)
2953 {
2954#if DEBUG_MOV_OK
2955 fprintf (stderr, " - no, mem to mem\n");
2956#endif
2957 return false;
2958 }
2959
2960#if DEBUG_MOV_OK
2961 fprintf (stderr, " - ok\n");
2962#endif
2963 return true;
2964}
2965
ff485e71
DD
2966/* Returns TRUE if two consecutive HImode mov instructions, generated
2967 for moving an immediate double data to a double data type variable
2968 location, can be combined into single SImode mov instruction. */
2969bool
2970m32c_immd_dbl_mov (rtx * operands,
2971 enum machine_mode mode ATTRIBUTE_UNUSED)
2972{
2973 int flag = 0, okflag = 0, offset1 = 0, offset2 = 0, offsetsign = 0;
2974 const char *str1;
2975 const char *str2;
2976
2977 if (GET_CODE (XEXP (operands[0], 0)) == SYMBOL_REF
2978 && MEM_SCALAR_P (operands[0])
2979 && !MEM_IN_STRUCT_P (operands[0])
2980 && GET_CODE (XEXP (operands[2], 0)) == CONST
2981 && GET_CODE (XEXP (XEXP (operands[2], 0), 0)) == PLUS
2982 && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 0)) == SYMBOL_REF
2983 && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 1)) == CONST_INT
2984 && MEM_SCALAR_P (operands[2])
2985 && !MEM_IN_STRUCT_P (operands[2]))
2986 flag = 1;
2987
2988 else if (GET_CODE (XEXP (operands[0], 0)) == CONST
2989 && GET_CODE (XEXP (XEXP (operands[0], 0), 0)) == PLUS
2990 && GET_CODE (XEXP (XEXP (XEXP (operands[0], 0), 0), 0)) == SYMBOL_REF
2991 && MEM_SCALAR_P (operands[0])
2992 && !MEM_IN_STRUCT_P (operands[0])
2993 && !(XINT (XEXP (XEXP (XEXP (operands[0], 0), 0), 1), 0) %4)
2994 && GET_CODE (XEXP (operands[2], 0)) == CONST
2995 && GET_CODE (XEXP (XEXP (operands[2], 0), 0)) == PLUS
2996 && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 0)) == SYMBOL_REF
2997 && MEM_SCALAR_P (operands[2])
2998 && !MEM_IN_STRUCT_P (operands[2]))
2999 flag = 2;
3000
3001 else if (GET_CODE (XEXP (operands[0], 0)) == PLUS
3002 && GET_CODE (XEXP (XEXP (operands[0], 0), 0)) == REG
3003 && REGNO (XEXP (XEXP (operands[0], 0), 0)) == FB_REGNO
3004 && GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == CONST_INT
3005 && MEM_SCALAR_P (operands[0])
3006 && !MEM_IN_STRUCT_P (operands[0])
3007 && !(XINT (XEXP (XEXP (operands[0], 0), 1), 0) %4)
3008 && REGNO (XEXP (XEXP (operands[2], 0), 0)) == FB_REGNO
3009 && GET_CODE (XEXP (XEXP (operands[2], 0), 1)) == CONST_INT
3010 && MEM_SCALAR_P (operands[2])
3011 && !MEM_IN_STRUCT_P (operands[2]))
3012 flag = 3;
3013
3014 else
3015 return false;
3016
3017 switch (flag)
3018 {
3019 case 1:
3020 str1 = XSTR (XEXP (operands[0], 0), 0);
3021 str2 = XSTR (XEXP (XEXP (XEXP (operands[2], 0), 0), 0), 0);
3022 if (strcmp (str1, str2) == 0)
3023 okflag = 1;
3024 else
3025 okflag = 0;
3026 break;
3027 case 2:
3028 str1 = XSTR (XEXP (XEXP (XEXP (operands[0], 0), 0), 0), 0);
3029 str2 = XSTR (XEXP (XEXP (XEXP (operands[2], 0), 0), 0), 0);
3030 if (strcmp(str1,str2) == 0)
3031 okflag = 1;
3032 else
3033 okflag = 0;
3034 break;
3035 case 3:
3036 offset1 = XINT (XEXP (XEXP (operands[0], 0), 1), 0);
3037 offset2 = XINT (XEXP (XEXP (operands[2], 0), 1), 0);
3038 offsetsign = offset1 >> ((sizeof (offset1) * 8) -1);
3039 if (((offset2-offset1) == 2) && offsetsign != 0)
3040 okflag = 1;
3041 else
3042 okflag = 0;
3043 break;
3044 default:
3045 okflag = 0;
3046 }
3047
3048 if (okflag == 1)
3049 {
3050 HOST_WIDE_INT val;
3051 operands[4] = gen_rtx_MEM (SImode, XEXP (operands[0], 0));
3052
3053 val = (XINT (operands[3], 0) << 16) + (XINT (operands[1], 0) & 0xFFFF);
3054 operands[5] = gen_rtx_CONST_INT (VOIDmode, val);
3055
3056 return true;
3057 }
3058
3059 return false;
3060}
3061
38b2d076
DD
3062/* Expanders */
3063
3064/* Subregs are non-orthogonal for us, because our registers are all
3065 different sizes. */
3066static rtx
3067m32c_subreg (enum machine_mode outer,
3068 rtx x, enum machine_mode inner, int byte)
3069{
3070 int r, nr = -1;
3071
3072 /* Converting MEMs to different types that are the same size, we
3073 just rewrite them. */
3074 if (GET_CODE (x) == SUBREG
3075 && SUBREG_BYTE (x) == 0
3076 && GET_CODE (SUBREG_REG (x)) == MEM
3077 && (GET_MODE_SIZE (GET_MODE (x))
3078 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
3079 {
3080 rtx oldx = x;
3081 x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
3082 MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
3083 }
3084
3085 /* Push/pop get done as smaller push/pops. */
3086 if (GET_CODE (x) == MEM
3087 && (GET_CODE (XEXP (x, 0)) == PRE_DEC
3088 || GET_CODE (XEXP (x, 0)) == POST_INC))
3089 return gen_rtx_MEM (outer, XEXP (x, 0));
3090 if (GET_CODE (x) == SUBREG
3091 && GET_CODE (XEXP (x, 0)) == MEM
3092 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
3093 || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
3094 return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
3095
3096 if (GET_CODE (x) != REG)
3097 return simplify_gen_subreg (outer, x, inner, byte);
3098
3099 r = REGNO (x);
3100 if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
3101 return simplify_gen_subreg (outer, x, inner, byte);
3102
3103 if (IS_MEM_REGNO (r))
3104 return simplify_gen_subreg (outer, x, inner, byte);
3105
3106 /* This is where the complexities of our register layout are
3107 described. */
3108 if (byte == 0)
3109 nr = r;
3110 else if (outer == HImode)
3111 {
3112 if (r == R0_REGNO && byte == 2)
3113 nr = R2_REGNO;
3114 else if (r == R0_REGNO && byte == 4)
3115 nr = R1_REGNO;
3116 else if (r == R0_REGNO && byte == 6)
3117 nr = R3_REGNO;
3118 else if (r == R1_REGNO && byte == 2)
3119 nr = R3_REGNO;
3120 else if (r == A0_REGNO && byte == 2)
3121 nr = A1_REGNO;
3122 }
3123 else if (outer == SImode)
3124 {
3125 if (r == R0_REGNO && byte == 0)
3126 nr = R0_REGNO;
3127 else if (r == R0_REGNO && byte == 4)
3128 nr = R1_REGNO;
3129 }
3130 if (nr == -1)
3131 {
3132 fprintf (stderr, "m32c_subreg %s %s %d\n",
3133 mode_name[outer], mode_name[inner], byte);
3134 debug_rtx (x);
3135 gcc_unreachable ();
3136 }
3137 return gen_rtx_REG (outer, nr);
3138}
3139
3140/* Used to emit move instructions. We split some moves,
3141 and avoid mem-mem moves. */
3142int
3143m32c_prepare_move (rtx * operands, enum machine_mode mode)
3144{
3145 if (TARGET_A16 && mode == PSImode)
3146 return m32c_split_move (operands, mode, 1);
3147 if ((GET_CODE (operands[0]) == MEM)
3148 && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
3149 {
3150 rtx pmv = XEXP (operands[0], 0);
3151 rtx dest_reg = XEXP (pmv, 0);
3152 rtx dest_mod = XEXP (pmv, 1);
3153
3154 emit_insn (gen_rtx_SET (Pmode, dest_reg, dest_mod));
3155 operands[0] = gen_rtx_MEM (mode, dest_reg);
3156 }
b3a13419 3157 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
38b2d076
DD
3158 operands[1] = copy_to_mode_reg (mode, operands[1]);
3159 return 0;
3160}
3161
3162#define DEBUG_SPLIT 0
3163
3164/* Returns TRUE if the given PSImode move should be split. We split
3165 for all r8c/m16c moves, since it doesn't support them, and for
3166 POP.L as we can only *push* SImode. */
3167int
3168m32c_split_psi_p (rtx * operands)
3169{
3170#if DEBUG_SPLIT
3171 fprintf (stderr, "\nm32c_split_psi_p\n");
3172 debug_rtx (operands[0]);
3173 debug_rtx (operands[1]);
3174#endif
3175 if (TARGET_A16)
3176 {
3177#if DEBUG_SPLIT
3178 fprintf (stderr, "yes, A16\n");
3179#endif
3180 return 1;
3181 }
3182 if (GET_CODE (operands[1]) == MEM
3183 && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3184 {
3185#if DEBUG_SPLIT
3186 fprintf (stderr, "yes, pop.l\n");
3187#endif
3188 return 1;
3189 }
3190#if DEBUG_SPLIT
3191 fprintf (stderr, "no, default\n");
3192#endif
3193 return 0;
3194}
3195
3196/* Split the given move. SPLIT_ALL is 0 if splitting is optional
3197 (define_expand), 1 if it is not optional (define_insn_and_split),
3198 and 3 for define_split (alternate api). */
3199int
3200m32c_split_move (rtx * operands, enum machine_mode mode, int split_all)
3201{
3202 rtx s[4], d[4];
3203 int parts, si, di, rev = 0;
3204 int rv = 0, opi = 2;
3205 enum machine_mode submode = HImode;
3206 rtx *ops, local_ops[10];
3207
3208 /* define_split modifies the existing operands, but the other two
3209 emit new insns. OPS is where we store the operand pairs, which
3210 we emit later. */
3211 if (split_all == 3)
3212 ops = operands;
3213 else
3214 ops = local_ops;
3215
3216 /* Else HImode. */
3217 if (mode == DImode)
3218 submode = SImode;
3219
3220 /* Before splitting mem-mem moves, force one operand into a
3221 register. */
b3a13419 3222 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
38b2d076
DD
3223 {
3224#if DEBUG0
3225 fprintf (stderr, "force_reg...\n");
3226 debug_rtx (operands[1]);
3227#endif
3228 operands[1] = force_reg (mode, operands[1]);
3229#if DEBUG0
3230 debug_rtx (operands[1]);
3231#endif
3232 }
3233
3234 parts = 2;
3235
3236#if DEBUG_SPLIT
b3a13419
ILT
3237 fprintf (stderr, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3238 split_all);
38b2d076
DD
3239 debug_rtx (operands[0]);
3240 debug_rtx (operands[1]);
3241#endif
3242
eb5f0c07
DD
3243 /* Note that split_all is not used to select the api after this
3244 point, so it's safe to set it to 3 even with define_insn. */
3245 /* None of the chips can move SI operands to sp-relative addresses,
3246 so we always split those. */
3247 if (m32c_extra_constraint_p (operands[0], 'S', "Ss"))
3248 split_all = 3;
3249
38b2d076
DD
3250 /* We don't need to split these. */
3251 if (TARGET_A24
3252 && split_all != 3
3253 && (mode == SImode || mode == PSImode)
3254 && !(GET_CODE (operands[1]) == MEM
3255 && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3256 return 0;
3257
3258 /* First, enumerate the subregs we'll be dealing with. */
3259 for (si = 0; si < parts; si++)
3260 {
3261 d[si] =
3262 m32c_subreg (submode, operands[0], mode,
3263 si * GET_MODE_SIZE (submode));
3264 s[si] =
3265 m32c_subreg (submode, operands[1], mode,
3266 si * GET_MODE_SIZE (submode));
3267 }
3268
3269 /* Split pushes by emitting a sequence of smaller pushes. */
3270 if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3271 {
3272 for (si = parts - 1; si >= 0; si--)
3273 {
3274 ops[opi++] = gen_rtx_MEM (submode,
3275 gen_rtx_PRE_DEC (Pmode,
3276 gen_rtx_REG (Pmode,
3277 SP_REGNO)));
3278 ops[opi++] = s[si];
3279 }
3280
3281 rv = 1;
3282 }
3283 /* Likewise for pops. */
3284 else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3285 {
3286 for (di = 0; di < parts; di++)
3287 {
3288 ops[opi++] = d[di];
3289 ops[opi++] = gen_rtx_MEM (submode,
3290 gen_rtx_POST_INC (Pmode,
3291 gen_rtx_REG (Pmode,
3292 SP_REGNO)));
3293 }
3294 rv = 1;
3295 }
3296 else if (split_all)
3297 {
3298 /* if d[di] == s[si] for any di < si, we'll early clobber. */
3299 for (di = 0; di < parts - 1; di++)
3300 for (si = di + 1; si < parts; si++)
3301 if (reg_mentioned_p (d[di], s[si]))
3302 rev = 1;
3303
3304 if (rev)
3305 for (si = 0; si < parts; si++)
3306 {
3307 ops[opi++] = d[si];
3308 ops[opi++] = s[si];
3309 }
3310 else
3311 for (si = parts - 1; si >= 0; si--)
3312 {
3313 ops[opi++] = d[si];
3314 ops[opi++] = s[si];
3315 }
3316 rv = 1;
3317 }
3318 /* Now emit any moves we may have accumulated. */
3319 if (rv && split_all != 3)
3320 {
3321 int i;
3322 for (i = 2; i < opi; i += 2)
3323 emit_move_insn (ops[i], ops[i + 1]);
3324 }
3325 return rv;
3326}
3327
07127a0a
DD
3328/* The m32c has a number of opcodes that act like memcpy, strcmp, and
3329 the like. For the R8C they expect one of the addresses to be in
3330 R1L:An so we need to arrange for that. Otherwise, it's just a
3331 matter of picking out the operands we want and emitting the right
3332 pattern for them. All these expanders, which correspond to
3333 patterns in blkmov.md, must return nonzero if they expand the insn,
3334 or zero if they should FAIL. */
3335
3336/* This is a memset() opcode. All operands are implied, so we need to
3337 arrange for them to be in the right registers. The opcode wants
3338 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3339 the count (HI), and $2 the value (QI). */
3340int
3341m32c_expand_setmemhi(rtx *operands)
3342{
3343 rtx desta, count, val;
3344 rtx desto, counto;
3345
3346 desta = XEXP (operands[0], 0);
3347 count = operands[1];
3348 val = operands[2];
3349
3350 desto = gen_reg_rtx (Pmode);
3351 counto = gen_reg_rtx (HImode);
3352
3353 if (GET_CODE (desta) != REG
3354 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3355 desta = copy_to_mode_reg (Pmode, desta);
3356
3357 /* This looks like an arbitrary restriction, but this is by far the
3358 most common case. For counts 8..14 this actually results in
3359 smaller code with no speed penalty because the half-sized
3360 constant can be loaded with a shorter opcode. */
3361 if (GET_CODE (count) == CONST_INT
3362 && GET_CODE (val) == CONST_INT
3363 && ! (INTVAL (count) & 1)
3364 && (INTVAL (count) > 1)
3365 && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3366 {
3367 unsigned v = INTVAL (val) & 0xff;
3368 v = v | (v << 8);
3369 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3370 val = copy_to_mode_reg (HImode, GEN_INT (v));
3371 if (TARGET_A16)
3372 emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3373 else
3374 emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3375 return 1;
3376 }
3377
3378 /* This is the generalized memset() case. */
3379 if (GET_CODE (val) != REG
3380 || REGNO (val) < FIRST_PSEUDO_REGISTER)
3381 val = copy_to_mode_reg (QImode, val);
3382
3383 if (GET_CODE (count) != REG
3384 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3385 count = copy_to_mode_reg (HImode, count);
3386
3387 if (TARGET_A16)
3388 emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3389 else
3390 emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3391
3392 return 1;
3393}
3394
3395/* This is a memcpy() opcode. All operands are implied, so we need to
3396 arrange for them to be in the right registers. The opcode wants
3397 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3398 is the source (MEM:BLK), and $2 the count (HI). */
3399int
3400m32c_expand_movmemhi(rtx *operands)
3401{
3402 rtx desta, srca, count;
3403 rtx desto, srco, counto;
3404
3405 desta = XEXP (operands[0], 0);
3406 srca = XEXP (operands[1], 0);
3407 count = operands[2];
3408
3409 desto = gen_reg_rtx (Pmode);
3410 srco = gen_reg_rtx (Pmode);
3411 counto = gen_reg_rtx (HImode);
3412
3413 if (GET_CODE (desta) != REG
3414 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3415 desta = copy_to_mode_reg (Pmode, desta);
3416
3417 if (GET_CODE (srca) != REG
3418 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3419 srca = copy_to_mode_reg (Pmode, srca);
3420
3421 /* Similar to setmem, but we don't need to check the value. */
3422 if (GET_CODE (count) == CONST_INT
3423 && ! (INTVAL (count) & 1)
3424 && (INTVAL (count) > 1))
3425 {
3426 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3427 if (TARGET_A16)
3428 emit_insn (gen_movmemhi_whi_op (desto, srco, counto, desta, srca, count));
3429 else
3430 emit_insn (gen_movmemhi_wpsi_op (desto, srco, counto, desta, srca, count));
3431 return 1;
3432 }
3433
3434 /* This is the generalized memset() case. */
3435 if (GET_CODE (count) != REG
3436 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3437 count = copy_to_mode_reg (HImode, count);
3438
3439 if (TARGET_A16)
3440 emit_insn (gen_movmemhi_bhi_op (desto, srco, counto, desta, srca, count));
3441 else
3442 emit_insn (gen_movmemhi_bpsi_op (desto, srco, counto, desta, srca, count));
3443
3444 return 1;
3445}
3446
3447/* This is a stpcpy() opcode. $0 is the destination (MEM:BLK) after
3448 the copy, which should point to the NUL at the end of the string,
3449 $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3450 Since our opcode leaves the destination pointing *after* the NUL,
3451 we must emit an adjustment. */
3452int
3453m32c_expand_movstr(rtx *operands)
3454{
3455 rtx desta, srca;
3456 rtx desto, srco;
3457
3458 desta = XEXP (operands[1], 0);
3459 srca = XEXP (operands[2], 0);
3460
3461 desto = gen_reg_rtx (Pmode);
3462 srco = gen_reg_rtx (Pmode);
3463
3464 if (GET_CODE (desta) != REG
3465 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3466 desta = copy_to_mode_reg (Pmode, desta);
3467
3468 if (GET_CODE (srca) != REG
3469 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3470 srca = copy_to_mode_reg (Pmode, srca);
3471
3472 emit_insn (gen_movstr_op (desto, srco, desta, srca));
3473 /* desto ends up being a1, which allows this type of add through MOVA. */
3474 emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
3475
3476 return 1;
3477}
3478
3479/* This is a strcmp() opcode. $0 is the destination (HI) which holds
3480 <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3481 $2 is the other (MEM:BLK). We must do the comparison, and then
3482 convert the flags to a signed integer result. */
3483int
3484m32c_expand_cmpstr(rtx *operands)
3485{
3486 rtx src1a, src2a;
3487
3488 src1a = XEXP (operands[1], 0);
3489 src2a = XEXP (operands[2], 0);
3490
3491 if (GET_CODE (src1a) != REG
3492 || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
3493 src1a = copy_to_mode_reg (Pmode, src1a);
3494
3495 if (GET_CODE (src2a) != REG
3496 || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
3497 src2a = copy_to_mode_reg (Pmode, src2a);
3498
3499 emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
3500 emit_insn (gen_cond_to_int (operands[0]));
3501
3502 return 1;
3503}
3504
3505
23fed240
DD
3506typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
3507
3508static shift_gen_func
3509shift_gen_func_for (int mode, int code)
3510{
3511#define GFF(m,c,f) if (mode == m && code == c) return f
3512 GFF(QImode, ASHIFT, gen_ashlqi3_i);
3513 GFF(QImode, ASHIFTRT, gen_ashrqi3_i);
3514 GFF(QImode, LSHIFTRT, gen_lshrqi3_i);
3515 GFF(HImode, ASHIFT, gen_ashlhi3_i);
3516 GFF(HImode, ASHIFTRT, gen_ashrhi3_i);
3517 GFF(HImode, LSHIFTRT, gen_lshrhi3_i);
3518 GFF(PSImode, ASHIFT, gen_ashlpsi3_i);
3519 GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
3520 GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
3521 GFF(SImode, ASHIFT, TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
3522 GFF(SImode, ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
3523 GFF(SImode, LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
3524#undef GFF
07127a0a 3525 gcc_unreachable ();
23fed240
DD
3526}
3527
38b2d076
DD
3528/* The m32c only has one shift, but it takes a signed count. GCC
3529 doesn't want this, so we fake it by negating any shift count when
07127a0a
DD
3530 we're pretending to shift the other way. Also, the shift count is
3531 limited to -8..8. It's slightly better to use two shifts for 9..15
3532 than to load the count into r1h, so we do that too. */
38b2d076 3533int
23fed240 3534m32c_prepare_shift (rtx * operands, int scale, int shift_code)
38b2d076 3535{
23fed240
DD
3536 enum machine_mode mode = GET_MODE (operands[0]);
3537 shift_gen_func func = shift_gen_func_for (mode, shift_code);
38b2d076 3538 rtx temp;
23fed240
DD
3539
3540 if (GET_CODE (operands[2]) == CONST_INT)
38b2d076 3541 {
23fed240
DD
3542 int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
3543 int count = INTVAL (operands[2]) * scale;
3544
3545 while (count > maxc)
3546 {
3547 temp = gen_reg_rtx (mode);
3548 emit_insn (func (temp, operands[1], GEN_INT (maxc)));
3549 operands[1] = temp;
3550 count -= maxc;
3551 }
3552 while (count < -maxc)
3553 {
3554 temp = gen_reg_rtx (mode);
3555 emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
3556 operands[1] = temp;
3557 count += maxc;
3558 }
3559 emit_insn (func (operands[0], operands[1], GEN_INT (count)));
3560 return 1;
38b2d076 3561 }
2e160056
DD
3562
3563 temp = gen_reg_rtx (QImode);
38b2d076 3564 if (scale < 0)
2e160056
DD
3565 /* The pattern has a NEG that corresponds to this. */
3566 emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
3567 else if (TARGET_A16 && mode == SImode)
3568 /* We do this because the code below may modify this, we don't
3569 want to modify the origin of this value. */
3570 emit_move_insn (temp, operands[2]);
38b2d076 3571 else
2e160056 3572 /* We'll only use it for the shift, no point emitting a move. */
38b2d076 3573 temp = operands[2];
2e160056 3574
16659fcf 3575 if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
2e160056
DD
3576 {
3577 /* The m16c has a limit of -16..16 for SI shifts, even when the
3578 shift count is in a register. Since there are so many targets
3579 of these shifts, it's better to expand the RTL here than to
3580 call a helper function.
3581
3582 The resulting code looks something like this:
3583
3584 cmp.b r1h,-16
3585 jge.b 1f
3586 shl.l -16,dest
3587 add.b r1h,16
3588 1f: cmp.b r1h,16
3589 jle.b 1f
3590 shl.l 16,dest
3591 sub.b r1h,16
3592 1f: shl.l r1h,dest
3593
3594 We take advantage of the fact that "negative" shifts are
3595 undefined to skip one of the comparisons. */
3596
3597 rtx count;
833bf445 3598 rtx label, lref, insn, tempvar;
2e160056 3599
16659fcf
DD
3600 emit_move_insn (operands[0], operands[1]);
3601
2e160056
DD
3602 count = temp;
3603 label = gen_label_rtx ();
3604 lref = gen_rtx_LABEL_REF (VOIDmode, label);
3605 LABEL_NUSES (label) ++;
3606
833bf445
DD
3607 tempvar = gen_reg_rtx (mode);
3608
2e160056
DD
3609 if (shift_code == ASHIFT)
3610 {
3611 /* This is a left shift. We only need check positive counts. */
3612 emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
3613 count, GEN_INT (16), label));
833bf445
DD
3614 emit_insn (func (tempvar, operands[0], GEN_INT (8)));
3615 emit_insn (func (operands[0], tempvar, GEN_INT (8)));
2e160056
DD
3616 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
3617 emit_label_after (label, insn);
3618 }
3619 else
3620 {
3621 /* This is a right shift. We only need check negative counts. */
3622 emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
3623 count, GEN_INT (-16), label));
833bf445
DD
3624 emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
3625 emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
2e160056
DD
3626 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
3627 emit_label_after (label, insn);
3628 }
16659fcf
DD
3629 operands[1] = operands[0];
3630 emit_insn (func (operands[0], operands[0], count));
3631 return 1;
2e160056
DD
3632 }
3633
38b2d076
DD
3634 operands[2] = temp;
3635 return 0;
3636}
3637
12ea2512
DD
3638/* The m32c has a limited range of operations that work on PSImode
3639 values; we have to expand to SI, do the math, and truncate back to
3640 PSI. Yes, this is expensive, but hopefully gcc will learn to avoid
3641 those cases. */
3642void
3643m32c_expand_neg_mulpsi3 (rtx * operands)
3644{
3645 /* operands: a = b * i */
3646 rtx temp1; /* b as SI */
07127a0a
DD
3647 rtx scale /* i as SI */;
3648 rtx temp2; /* a*b as SI */
12ea2512
DD
3649
3650 temp1 = gen_reg_rtx (SImode);
3651 temp2 = gen_reg_rtx (SImode);
07127a0a
DD
3652 if (GET_CODE (operands[2]) != CONST_INT)
3653 {
3654 scale = gen_reg_rtx (SImode);
3655 emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
3656 }
3657 else
3658 scale = copy_to_mode_reg (SImode, operands[2]);
12ea2512
DD
3659
3660 emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
07127a0a
DD
3661 temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
3662 emit_insn (gen_truncsipsi2 (operands[0], temp2));
12ea2512
DD
3663}
3664
0166ff05
DD
3665static rtx compare_op0, compare_op1;
3666
3667void
3668m32c_pend_compare (rtx *operands)
3669{
3670 compare_op0 = operands[0];
3671 compare_op1 = operands[1];
3672}
3673
3674void
3675m32c_unpend_compare (void)
3676{
3677 switch (GET_MODE (compare_op0))
3678 {
3679 case QImode:
3680 emit_insn (gen_cmpqi_op (compare_op0, compare_op1));
3681 case HImode:
3682 emit_insn (gen_cmphi_op (compare_op0, compare_op1));
3683 case PSImode:
3684 emit_insn (gen_cmppsi_op (compare_op0, compare_op1));
67fc44cb
DD
3685 default:
3686 /* Just to silence the "missing case" warnings. */ ;
0166ff05
DD
3687 }
3688}
3689
3690void
3691m32c_expand_scc (int code, rtx *operands)
3692{
3693 enum machine_mode mode = TARGET_A16 ? QImode : HImode;
3694
3695 emit_insn (gen_rtx_SET (mode,
3696 operands[0],
3697 gen_rtx_fmt_ee (code,
3698 mode,
3699 compare_op0,
3700 compare_op1)));
3701}
3702
38b2d076
DD
3703/* Pattern Output Functions */
3704
07127a0a
DD
3705/* Returns a (OP (reg:CC FLG_REGNO) (const_int 0)) from some other
3706 match_operand rtx's OP. */
3707rtx
3708m32c_cmp_flg_0 (rtx cmp)
3709{
3710 return gen_rtx_fmt_ee (GET_CODE (cmp),
3711 GET_MODE (cmp),
3712 gen_rtx_REG (CCmode, FLG_REGNO),
3713 GEN_INT (0));
3714}
3715
3716int
3717m32c_expand_movcc (rtx *operands)
3718{
3719 rtx rel = operands[1];
0166ff05
DD
3720 rtx cmp;
3721
07127a0a
DD
3722 if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
3723 return 1;
3724 if (GET_CODE (operands[2]) != CONST_INT
3725 || GET_CODE (operands[3]) != CONST_INT)
3726 return 1;
3727 emit_insn (gen_cmpqi(XEXP (rel, 0), XEXP (rel, 1)));
3728 if (GET_CODE (rel) == NE)
3729 {
3730 rtx tmp = operands[2];
3731 operands[2] = operands[3];
3732 operands[3] = tmp;
3733 }
0166ff05
DD
3734
3735 cmp = gen_rtx_fmt_ee (GET_CODE (rel),
3736 GET_MODE (rel),
3737 compare_op0,
3738 compare_op1);
3739
3740 emit_move_insn (operands[0],
3741 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3742 cmp,
3743 operands[2],
3744 operands[3]));
07127a0a
DD
3745 return 0;
3746}
3747
3748/* Used for the "insv" pattern. Return nonzero to fail, else done. */
3749int
3750m32c_expand_insv (rtx *operands)
3751{
3752 rtx op0, src0, p;
3753 int mask;
3754
3755 if (INTVAL (operands[1]) != 1)
3756 return 1;
3757
9cb96754
N
3758 /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
3759 if (GET_CODE (operands[3]) != CONST_INT)
3760 return 1;
3761 if (INTVAL (operands[3]) != 0
3762 && INTVAL (operands[3]) != 1
3763 && INTVAL (operands[3]) != -1)
3764 return 1;
3765
07127a0a
DD
3766 mask = 1 << INTVAL (operands[2]);
3767
3768 op0 = operands[0];
3769 if (GET_CODE (op0) == SUBREG
3770 && SUBREG_BYTE (op0) == 0)
3771 {
3772 rtx sub = SUBREG_REG (op0);
3773 if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
3774 op0 = sub;
3775 }
3776
b3a13419 3777 if (!can_create_pseudo_p ()
07127a0a
DD
3778 || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
3779 src0 = op0;
3780 else
3781 {
3782 src0 = gen_reg_rtx (GET_MODE (op0));
3783 emit_move_insn (src0, op0);
3784 }
3785
3786 if (GET_MODE (op0) == HImode
3787 && INTVAL (operands[2]) >= 8
3788 && GET_MODE (op0) == MEM)
3789 {
3790 /* We are little endian. */
3791 rtx new_mem = gen_rtx_MEM (QImode, plus_constant (XEXP (op0, 0), 1));
3792 MEM_COPY_ATTRIBUTES (new_mem, op0);
3793 mask >>= 8;
3794 }
3795
8e4edce7
DD
3796 /* First, we generate a mask with the correct polarity. If we are
3797 storing a zero, we want an AND mask, so invert it. */
3798 if (INTVAL (operands[3]) == 0)
07127a0a 3799 {
16659fcf 3800 /* Storing a zero, use an AND mask */
07127a0a
DD
3801 if (GET_MODE (op0) == HImode)
3802 mask ^= 0xffff;
3803 else
3804 mask ^= 0xff;
3805 }
8e4edce7
DD
3806 /* Now we need to properly sign-extend the mask in case we need to
3807 fall back to an AND or OR opcode. */
07127a0a
DD
3808 if (GET_MODE (op0) == HImode)
3809 {
3810 if (mask & 0x8000)
3811 mask -= 0x10000;
3812 }
3813 else
3814 {
3815 if (mask & 0x80)
3816 mask -= 0x100;
3817 }
3818
3819 switch ( (INTVAL (operands[3]) ? 4 : 0)
3820 + ((GET_MODE (op0) == HImode) ? 2 : 0)
3821 + (TARGET_A24 ? 1 : 0))
3822 {
3823 case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
3824 case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
3825 case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
3826 case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
3827 case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
3828 case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
3829 case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
3830 case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
3831 }
3832
3833 emit_insn (p);
3834 return 0;
3835}
3836
3837const char *
3838m32c_scc_pattern(rtx *operands, RTX_CODE code)
3839{
3840 static char buf[30];
3841 if (GET_CODE (operands[0]) == REG
3842 && REGNO (operands[0]) == R0_REGNO)
3843 {
3844 if (code == EQ)
3845 return "stzx\t#1,#0,r0l";
3846 if (code == NE)
3847 return "stzx\t#0,#1,r0l";
3848 }
3849 sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
3850 return buf;
3851}
3852
5abd2125
JS
3853/* Encode symbol attributes of a SYMBOL_REF into its
3854 SYMBOL_REF_FLAGS. */
3855static void
3856m32c_encode_section_info (tree decl, rtx rtl, int first)
3857{
3858 int extra_flags = 0;
3859
3860 default_encode_section_info (decl, rtl, first);
3861 if (TREE_CODE (decl) == FUNCTION_DECL
3862 && m32c_special_page_vector_p (decl))
3863
3864 extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
3865
3866 if (extra_flags)
3867 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
3868}
3869
38b2d076
DD
3870/* Returns TRUE if the current function is a leaf, and thus we can
3871 determine which registers an interrupt function really needs to
3872 save. The logic below is mostly about finding the insn sequence
3873 that's the function, versus any sequence that might be open for the
3874 current insn. */
3875static int
3876m32c_leaf_function_p (void)
3877{
3878 rtx saved_first, saved_last;
3879 struct sequence_stack *seq;
3880 int rv;
3881
3882 saved_first = cfun->emit->x_first_insn;
3883 saved_last = cfun->emit->x_last_insn;
3884 for (seq = cfun->emit->sequence_stack; seq && seq->next; seq = seq->next)
3885 ;
3886 if (seq)
3887 {
3888 cfun->emit->x_first_insn = seq->first;
3889 cfun->emit->x_last_insn = seq->last;
3890 }
3891
3892 rv = leaf_function_p ();
3893
3894 cfun->emit->x_first_insn = saved_first;
3895 cfun->emit->x_last_insn = saved_last;
3896 return rv;
3897}
3898
3899/* Returns TRUE if the current function needs to use the ENTER/EXIT
3900 opcodes. If the function doesn't need the frame base or stack
3901 pointer, it can use the simpler RTS opcode. */
3902static bool
3903m32c_function_needs_enter (void)
3904{
3905 rtx insn;
3906 struct sequence_stack *seq;
3907 rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
3908 rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
3909
3910 insn = get_insns ();
3911 for (seq = cfun->emit->sequence_stack;
3912 seq;
3913 insn = seq->first, seq = seq->next);
3914
3915 while (insn)
3916 {
3917 if (reg_mentioned_p (sp, insn))
3918 return true;
3919 if (reg_mentioned_p (fb, insn))
3920 return true;
3921 insn = NEXT_INSN (insn);
3922 }
3923 return false;
3924}
3925
3926/* Mark all the subexpressions of the PARALLEL rtx PAR as
3927 frame-related. Return PAR.
3928
3929 dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
3930 PARALLEL rtx other than the first if they do not have the
3931 FRAME_RELATED flag set on them. So this function is handy for
3932 marking up 'enter' instructions. */
3933static rtx
3934m32c_all_frame_related (rtx par)
3935{
3936 int len = XVECLEN (par, 0);
3937 int i;
3938
3939 for (i = 0; i < len; i++)
3940 F (XVECEXP (par, 0, i));
3941
3942 return par;
3943}
3944
3945/* Emits the prologue. See the frame layout comment earlier in this
3946 file. We can reserve up to 256 bytes with the ENTER opcode, beyond
3947 that we manually update sp. */
3948void
3949m32c_emit_prologue (void)
3950{
3951 int frame_size, extra_frame_size = 0, reg_save_size;
3952 int complex_prologue = 0;
3953
3954 cfun->machine->is_leaf = m32c_leaf_function_p ();
3955 if (interrupt_p (cfun->decl))
3956 {
3957 cfun->machine->is_interrupt = 1;
3958 complex_prologue = 1;
3959 }
3960
3961 reg_save_size = m32c_pushm_popm (PP_justcount);
3962
3963 if (interrupt_p (cfun->decl))
3964 emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
3965
3966 frame_size =
3967 m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
3968 if (frame_size == 0
3969 && !cfun->machine->is_interrupt
3970 && !m32c_function_needs_enter ())
3971 cfun->machine->use_rts = 1;
3972
3973 if (frame_size > 254)
3974 {
3975 extra_frame_size = frame_size - 254;
3976 frame_size = 254;
3977 }
3978 if (cfun->machine->use_rts == 0)
3979 F (emit_insn (m32c_all_frame_related
3980 (TARGET_A16
fa9fd28a
RIL
3981 ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
3982 : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
38b2d076
DD
3983
3984 if (extra_frame_size)
3985 {
3986 complex_prologue = 1;
3987 if (TARGET_A16)
3988 F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
3989 gen_rtx_REG (HImode, SP_REGNO),
3990 GEN_INT (-extra_frame_size))));
3991 else
3992 F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
3993 gen_rtx_REG (PSImode, SP_REGNO),
3994 GEN_INT (-extra_frame_size))));
3995 }
3996
3997 complex_prologue += m32c_pushm_popm (PP_pushm);
3998
3999 /* This just emits a comment into the .s file for debugging. */
4000 if (complex_prologue)
4001 emit_insn (gen_prologue_end ());
4002}
4003
4004/* Likewise, for the epilogue. The only exception is that, for
4005 interrupts, we must manually unwind the frame as the REIT opcode
4006 doesn't do that. */
4007void
4008m32c_emit_epilogue (void)
4009{
4010 /* This just emits a comment into the .s file for debugging. */
4011 if (m32c_pushm_popm (PP_justcount) > 0 || cfun->machine->is_interrupt)
4012 emit_insn (gen_epilogue_start ());
4013
4014 m32c_pushm_popm (PP_popm);
4015
4016 if (cfun->machine->is_interrupt)
4017 {
4018 enum machine_mode spmode = TARGET_A16 ? HImode : PSImode;
4019
4020 emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
4021 gen_rtx_REG (spmode, FP_REGNO));
4022 emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
4023 gen_rtx_REG (spmode, A0_REGNO));
4024 if (TARGET_A16)
4025 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
4026 else
4027 emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
4028 emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
0e0642aa
RIL
4029 if (TARGET_A16)
4030 emit_jump_insn (gen_epilogue_reit_16 ());
4031 else
4032 emit_jump_insn (gen_epilogue_reit_24 ());
38b2d076
DD
4033 }
4034 else if (cfun->machine->use_rts)
4035 emit_jump_insn (gen_epilogue_rts ());
0e0642aa
RIL
4036 else if (TARGET_A16)
4037 emit_jump_insn (gen_epilogue_exitd_16 ());
38b2d076 4038 else
0e0642aa 4039 emit_jump_insn (gen_epilogue_exitd_24 ());
38b2d076
DD
4040 emit_barrier ();
4041}
4042
4043void
4044m32c_emit_eh_epilogue (rtx ret_addr)
4045{
4046 /* R0[R2] has the stack adjustment. R1[R3] has the address to
4047 return to. We have to fudge the stack, pop everything, pop SP
4048 (fudged), and return (fudged). This is actually easier to do in
4049 assembler, so punt to libgcc. */
4050 emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
4051 /* emit_insn (gen_rtx_CLOBBER (HImode, gen_rtx_REG (HImode, R0L_REGNO))); */
4052 emit_barrier ();
4053}
4054
16659fcf
DD
4055/* Indicate which flags must be properly set for a given conditional. */
4056static int
4057flags_needed_for_conditional (rtx cond)
4058{
4059 switch (GET_CODE (cond))
4060 {
4061 case LE:
4062 case GT:
4063 return FLAGS_OSZ;
4064 case LEU:
4065 case GTU:
4066 return FLAGS_ZC;
4067 case LT:
4068 case GE:
4069 return FLAGS_OS;
4070 case LTU:
4071 case GEU:
4072 return FLAGS_C;
4073 case EQ:
4074 case NE:
4075 return FLAGS_Z;
4076 default:
4077 return FLAGS_N;
4078 }
4079}
4080
4081#define DEBUG_CMP 0
4082
4083/* Returns true if a compare insn is redundant because it would only
4084 set flags that are already set correctly. */
4085static bool
4086m32c_compare_redundant (rtx cmp, rtx *operands)
4087{
4088 int flags_needed;
4089 int pflags;
4090 rtx prev, pp, next;
4091 rtx op0, op1, op2;
4092#if DEBUG_CMP
4093 int prev_icode, i;
4094#endif
4095
4096 op0 = operands[0];
4097 op1 = operands[1];
4098 op2 = operands[2];
4099
4100#if DEBUG_CMP
4101 fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
4102 debug_rtx(cmp);
4103 for (i=0; i<2; i++)
4104 {
4105 fprintf(stderr, "operands[%d] = ", i);
4106 debug_rtx(operands[i]);
4107 }
4108#endif
4109
4110 next = next_nonnote_insn (cmp);
4111 if (!next || !INSN_P (next))
4112 {
4113#if DEBUG_CMP
4114 fprintf(stderr, "compare not followed by insn\n");
4115 debug_rtx(next);
4116#endif
4117 return false;
4118 }
4119 if (GET_CODE (PATTERN (next)) == SET
4120 && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
4121 {
4122 next = XEXP (XEXP (PATTERN (next), 1), 0);
4123 }
4124 else if (GET_CODE (PATTERN (next)) == SET)
4125 {
4126 /* If this is a conditional, flags_needed will be something
4127 other than FLAGS_N, which we test below. */
4128 next = XEXP (PATTERN (next), 1);
4129 }
4130 else
4131 {
4132#if DEBUG_CMP
4133 fprintf(stderr, "compare not followed by conditional\n");
4134 debug_rtx(next);
4135#endif
4136 return false;
4137 }
4138#if DEBUG_CMP
4139 fprintf(stderr, "conditional is: ");
4140 debug_rtx(next);
4141#endif
4142
4143 flags_needed = flags_needed_for_conditional (next);
4144 if (flags_needed == FLAGS_N)
4145 {
4146#if DEBUG_CMP
4147 fprintf(stderr, "compare not followed by conditional\n");
4148 debug_rtx(next);
4149#endif
4150 return false;
4151 }
4152
4153 /* Compare doesn't set overflow and carry the same way that
4154 arithmetic instructions do, so we can't replace those. */
4155 if (flags_needed & FLAGS_OC)
4156 return false;
4157
4158 prev = cmp;
4159 do {
4160 prev = prev_nonnote_insn (prev);
4161 if (!prev)
4162 {
4163#if DEBUG_CMP
4164 fprintf(stderr, "No previous insn.\n");
4165#endif
4166 return false;
4167 }
4168 if (!INSN_P (prev))
4169 {
4170#if DEBUG_CMP
4171 fprintf(stderr, "Previous insn is a non-insn.\n");
4172#endif
4173 return false;
4174 }
4175 pp = PATTERN (prev);
4176 if (GET_CODE (pp) != SET)
4177 {
4178#if DEBUG_CMP
4179 fprintf(stderr, "Previous insn is not a SET.\n");
4180#endif
4181 return false;
4182 }
4183 pflags = get_attr_flags (prev);
4184
4185 /* Looking up attributes of previous insns corrupted the recog
4186 tables. */
4187 INSN_UID (cmp) = -1;
4188 recog (PATTERN (cmp), cmp, 0);
4189
4190 if (pflags == FLAGS_N
4191 && reg_mentioned_p (op0, pp))
4192 {
4193#if DEBUG_CMP
4194 fprintf(stderr, "intermediate non-flags insn uses op:\n");
4195 debug_rtx(prev);
4196#endif
4197 return false;
4198 }
4199 } while (pflags == FLAGS_N);
4200#if DEBUG_CMP
4201 fprintf(stderr, "previous flag-setting insn:\n");
4202 debug_rtx(prev);
4203 debug_rtx(pp);
4204#endif
4205
4206 if (GET_CODE (pp) == SET
4207 && GET_CODE (XEXP (pp, 0)) == REG
4208 && REGNO (XEXP (pp, 0)) == FLG_REGNO
4209 && GET_CODE (XEXP (pp, 1)) == COMPARE)
4210 {
4211 /* Adjacent cbranches must have the same operands to be
4212 redundant. */
4213 rtx pop0 = XEXP (XEXP (pp, 1), 0);
4214 rtx pop1 = XEXP (XEXP (pp, 1), 1);
4215#if DEBUG_CMP
4216 fprintf(stderr, "adjacent cbranches\n");
4217 debug_rtx(pop0);
4218 debug_rtx(pop1);
4219#endif
4220 if (rtx_equal_p (op0, pop0)
4221 && rtx_equal_p (op1, pop1))
4222 return true;
4223#if DEBUG_CMP
4224 fprintf(stderr, "prev cmp not same\n");
4225#endif
4226 return false;
4227 }
4228
4229 /* Else the previous insn must be a SET, with either the source or
4230 dest equal to operands[0], and operands[1] must be zero. */
4231
4232 if (!rtx_equal_p (op1, const0_rtx))
4233 {
4234#if DEBUG_CMP
4235 fprintf(stderr, "operands[1] not const0_rtx\n");
4236#endif
4237 return false;
4238 }
4239 if (GET_CODE (pp) != SET)
4240 {
4241#if DEBUG_CMP
4242 fprintf (stderr, "pp not set\n");
4243#endif
4244 return false;
4245 }
4246 if (!rtx_equal_p (op0, SET_SRC (pp))
4247 && !rtx_equal_p (op0, SET_DEST (pp)))
4248 {
4249#if DEBUG_CMP
4250 fprintf(stderr, "operands[0] not found in set\n");
4251#endif
4252 return false;
4253 }
4254
4255#if DEBUG_CMP
4256 fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4257#endif
4258 if ((pflags & flags_needed) == flags_needed)
4259 return true;
4260
4261 return false;
4262}
4263
4264/* Return the pattern for a compare. This will be commented out if
4265 the compare is redundant, else a normal pattern is returned. Thus,
4266 the assembler output says where the compare would have been. */
4267char *
4268m32c_output_compare (rtx insn, rtx *operands)
4269{
4270 static char template[] = ";cmp.b\t%1,%0";
4271 /* ^ 5 */
4272
4273 template[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
4274 if (m32c_compare_redundant (insn, operands))
4275 {
4276#if DEBUG_CMP
4277 fprintf(stderr, "cbranch: cmp not needed\n");
4278#endif
4279 return template;
4280 }
4281
4282#if DEBUG_CMP
4283 fprintf(stderr, "cbranch: cmp needed: `%s'\n", template);
4284#endif
4285 return template + 1;
4286}
4287
5abd2125
JS
4288#undef TARGET_ENCODE_SECTION_INFO
4289#define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4290
38b2d076
DD
4291/* The Global `targetm' Variable. */
4292
4293struct gcc_target targetm = TARGET_INITIALIZER;
4294
4295#include "gt-m32c.h"