]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/m32c/m32c.c
sh.md (load_gbr): Use correct operand constraint.
[thirdparty/gcc.git] / gcc / config / m32c / m32c.c
CommitLineData
38b2d076 1/* Target Code for R8C/M16C/M32C
6fb5fa3c 2 Copyright (C) 2005, 2006, 2007
38b2d076
DD
3 Free Software Foundation, Inc.
4 Contributed by Red Hat.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published
10 by the Free Software Foundation; either version 2, or (at your
11 option) any later version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
16 License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
21 02110-1301, USA. */
22
23#include "config.h"
24#include "system.h"
25#include "coretypes.h"
26#include "tm.h"
27#include "rtl.h"
28#include "regs.h"
29#include "hard-reg-set.h"
30#include "real.h"
31#include "insn-config.h"
32#include "conditions.h"
33#include "insn-flags.h"
34#include "output.h"
35#include "insn-attr.h"
36#include "flags.h"
37#include "recog.h"
38#include "reload.h"
39#include "toplev.h"
40#include "obstack.h"
41#include "tree.h"
42#include "expr.h"
43#include "optabs.h"
44#include "except.h"
45#include "function.h"
46#include "ggc.h"
47#include "target.h"
48#include "target-def.h"
49#include "tm_p.h"
50#include "langhooks.h"
51#include "tree-gimple.h"
fa9fd28a 52#include "df.h"
38b2d076
DD
53
54/* Prototypes */
55
56/* Used by m32c_pushm_popm. */
57typedef enum
58{
59 PP_pushm,
60 PP_popm,
61 PP_justcount
62} Push_Pop_Type;
63
64static tree interrupt_handler (tree *, tree, tree, int, bool *);
5abd2125 65static tree function_vector_handler (tree *, tree, tree, int, bool *);
38b2d076
DD
66static int interrupt_p (tree node);
67static bool m32c_asm_integer (rtx, unsigned int, int);
68static int m32c_comp_type_attributes (tree, tree);
69static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
70static struct machine_function *m32c_init_machine_status (void);
71static void m32c_insert_attributes (tree, tree *);
72static bool m32c_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
73 tree, bool);
74static bool m32c_promote_prototypes (tree);
75static int m32c_pushm_popm (Push_Pop_Type);
76static bool m32c_strict_argument_naming (CUMULATIVE_ARGS *);
77static rtx m32c_struct_value_rtx (tree, int);
78static rtx m32c_subreg (enum machine_mode, rtx, enum machine_mode, int);
79static int need_to_save (int);
5abd2125
JS
80int current_function_special_page_vector (rtx);
81
82#define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
38b2d076
DD
83
84#define streq(a,b) (strcmp ((a), (b)) == 0)
85
86/* Internal support routines */
87
88/* Debugging statements are tagged with DEBUG0 only so that they can
89 be easily enabled individually, by replacing the '0' with '1' as
90 needed. */
91#define DEBUG0 0
92#define DEBUG1 1
93
94#if DEBUG0
95/* This is needed by some of the commented-out debug statements
96 below. */
97static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
98#endif
99static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
100
101/* These are all to support encode_pattern(). */
102static char pattern[30], *patternp;
103static GTY(()) rtx patternr[30];
104#define RTX_IS(x) (streq (pattern, x))
105
106/* Some macros to simplify the logic throughout this file. */
107#define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
108#define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
109
110#define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
111#define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
112
113/* We do most RTX matching by converting the RTX into a string, and
114 using string compares. This vastly simplifies the logic in many of
115 the functions in this file.
116
117 On exit, pattern[] has the encoded string (use RTX_IS("...") to
118 compare it) and patternr[] has pointers to the nodes in the RTX
119 corresponding to each character in the encoded string. The latter
120 is mostly used by print_operand().
121
122 Unrecognized patterns have '?' in them; this shows up when the
123 assembler complains about syntax errors.
124*/
125
126static void
127encode_pattern_1 (rtx x)
128{
129 int i;
130
131 if (patternp == pattern + sizeof (pattern) - 2)
132 {
133 patternp[-1] = '?';
134 return;
135 }
136
137 patternr[patternp - pattern] = x;
138
139 switch (GET_CODE (x))
140 {
141 case REG:
142 *patternp++ = 'r';
143 break;
144 case SUBREG:
145 if (GET_MODE_SIZE (GET_MODE (x)) !=
146 GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
147 *patternp++ = 'S';
148 encode_pattern_1 (XEXP (x, 0));
149 break;
150 case MEM:
151 *patternp++ = 'm';
152 case CONST:
153 encode_pattern_1 (XEXP (x, 0));
154 break;
155 case PLUS:
156 *patternp++ = '+';
157 encode_pattern_1 (XEXP (x, 0));
158 encode_pattern_1 (XEXP (x, 1));
159 break;
160 case PRE_DEC:
161 *patternp++ = '>';
162 encode_pattern_1 (XEXP (x, 0));
163 break;
164 case POST_INC:
165 *patternp++ = '<';
166 encode_pattern_1 (XEXP (x, 0));
167 break;
168 case LO_SUM:
169 *patternp++ = 'L';
170 encode_pattern_1 (XEXP (x, 0));
171 encode_pattern_1 (XEXP (x, 1));
172 break;
173 case HIGH:
174 *patternp++ = 'H';
175 encode_pattern_1 (XEXP (x, 0));
176 break;
177 case SYMBOL_REF:
178 *patternp++ = 's';
179 break;
180 case LABEL_REF:
181 *patternp++ = 'l';
182 break;
183 case CODE_LABEL:
184 *patternp++ = 'c';
185 break;
186 case CONST_INT:
187 case CONST_DOUBLE:
188 *patternp++ = 'i';
189 break;
190 case UNSPEC:
191 *patternp++ = 'u';
192 *patternp++ = '0' + XCINT (x, 1, UNSPEC);
193 for (i = 0; i < XVECLEN (x, 0); i++)
194 encode_pattern_1 (XVECEXP (x, 0, i));
195 break;
196 case USE:
197 *patternp++ = 'U';
198 break;
199 case PARALLEL:
200 *patternp++ = '|';
201 for (i = 0; i < XVECLEN (x, 0); i++)
202 encode_pattern_1 (XVECEXP (x, 0, i));
203 break;
204 case EXPR_LIST:
205 *patternp++ = 'E';
206 encode_pattern_1 (XEXP (x, 0));
207 if (XEXP (x, 1))
208 encode_pattern_1 (XEXP (x, 1));
209 break;
210 default:
211 *patternp++ = '?';
212#if DEBUG0
213 fprintf (stderr, "can't encode pattern %s\n",
214 GET_RTX_NAME (GET_CODE (x)));
215 debug_rtx (x);
216 gcc_unreachable ();
217#endif
218 break;
219 }
220}
221
222static void
223encode_pattern (rtx x)
224{
225 patternp = pattern;
226 encode_pattern_1 (x);
227 *patternp = 0;
228}
229
230/* Since register names indicate the mode they're used in, we need a
231 way to determine which name to refer to the register with. Called
232 by print_operand(). */
233
234static const char *
235reg_name_with_mode (int regno, enum machine_mode mode)
236{
237 int mlen = GET_MODE_SIZE (mode);
238 if (regno == R0_REGNO && mlen == 1)
239 return "r0l";
240 if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
241 return "r2r0";
242 if (regno == R0_REGNO && mlen == 6)
243 return "r2r1r0";
244 if (regno == R0_REGNO && mlen == 8)
245 return "r3r1r2r0";
246 if (regno == R1_REGNO && mlen == 1)
247 return "r1l";
248 if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
249 return "r3r1";
250 if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
251 return "a1a0";
252 return reg_names[regno];
253}
254
255/* How many bytes a register uses on stack when it's pushed. We need
256 to know this because the push opcode needs to explicitly indicate
257 the size of the register, even though the name of the register
258 already tells it that. Used by m32c_output_reg_{push,pop}, which
259 is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}. */
260
261static int
262reg_push_size (int regno)
263{
264 switch (regno)
265 {
266 case R0_REGNO:
267 case R1_REGNO:
268 return 2;
269 case R2_REGNO:
270 case R3_REGNO:
271 case FLG_REGNO:
272 return 2;
273 case A0_REGNO:
274 case A1_REGNO:
275 case SB_REGNO:
276 case FB_REGNO:
277 case SP_REGNO:
278 if (TARGET_A16)
279 return 2;
280 else
281 return 3;
282 default:
283 gcc_unreachable ();
284 }
285}
286
287static int *class_sizes = 0;
288
289/* Given two register classes, find the largest intersection between
290 them. If there is no intersection, return RETURNED_IF_EMPTY
291 instead. */
292static int
293reduce_class (int original_class, int limiting_class, int returned_if_empty)
294{
295 int cc = class_contents[original_class][0];
296 int i, best = NO_REGS;
297 int best_size = 0;
298
299 if (original_class == limiting_class)
300 return original_class;
301
302 if (!class_sizes)
303 {
304 int r;
305 class_sizes = (int *) xmalloc (LIM_REG_CLASSES * sizeof (int));
306 for (i = 0; i < LIM_REG_CLASSES; i++)
307 {
308 class_sizes[i] = 0;
309 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
310 if (class_contents[i][0] & (1 << r))
311 class_sizes[i]++;
312 }
313 }
314
315 cc &= class_contents[limiting_class][0];
316 for (i = 0; i < LIM_REG_CLASSES; i++)
317 {
318 int ic = class_contents[i][0];
319
320 if ((~cc & ic) == 0)
321 if (best_size < class_sizes[i])
322 {
323 best = i;
324 best_size = class_sizes[i];
325 }
326
327 }
328 if (best == NO_REGS)
329 return returned_if_empty;
330 return best;
331}
332
333/* Returns TRUE If there are any registers that exist in both register
334 classes. */
335static int
336classes_intersect (int class1, int class2)
337{
338 return class_contents[class1][0] & class_contents[class2][0];
339}
340
341/* Used by m32c_register_move_cost to determine if a move is
342 impossibly expensive. */
343static int
344class_can_hold_mode (int class, enum machine_mode mode)
345{
346 /* Cache the results: 0=untested 1=no 2=yes */
347 static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
348 if (results[class][mode] == 0)
349 {
350 int r, n, i;
351 results[class][mode] = 1;
352 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
353 if (class_contents[class][0] & (1 << r)
354 && HARD_REGNO_MODE_OK (r, mode))
355 {
356 int ok = 1;
357 n = HARD_REGNO_NREGS (r, mode);
358 for (i = 1; i < n; i++)
359 if (!(class_contents[class][0] & (1 << (r + i))))
360 ok = 0;
361 if (ok)
362 {
363 results[class][mode] = 2;
364 break;
365 }
366 }
367 }
368#if DEBUG0
369 fprintf (stderr, "class %s can hold %s? %s\n",
370 class_names[class], mode_name[mode],
371 (results[class][mode] == 2) ? "yes" : "no");
372#endif
373 return results[class][mode] == 2;
374}
375
376/* Run-time Target Specification. */
377
378/* Memregs are memory locations that gcc treats like general
379 registers, as there are a limited number of true registers and the
380 m32c families can use memory in most places that registers can be
381 used.
382
383 However, since memory accesses are more expensive than registers,
384 we allow the user to limit the number of memregs available, in
385 order to try to persuade gcc to try harder to use real registers.
386
387 Memregs are provided by m32c-lib1.S.
388*/
389
390int target_memregs = 16;
391static bool target_memregs_set = FALSE;
392int ok_to_change_target_memregs = TRUE;
393
394#undef TARGET_HANDLE_OPTION
395#define TARGET_HANDLE_OPTION m32c_handle_option
396static bool
397m32c_handle_option (size_t code,
398 const char *arg ATTRIBUTE_UNUSED,
399 int value ATTRIBUTE_UNUSED)
400{
401 if (code == OPT_memregs_)
402 {
403 target_memregs_set = TRUE;
404 target_memregs = atoi (arg);
405 }
406 return TRUE;
407}
408
409/* Implements OVERRIDE_OPTIONS. We limit memregs to 0..16, and
410 provide a default. */
411void
412m32c_override_options (void)
413{
414 if (target_memregs_set)
415 {
416 if (target_memregs < 0 || target_memregs > 16)
417 error ("invalid target memregs value '%d'", target_memregs);
418 }
419 else
07127a0a 420 target_memregs = 16;
38b2d076
DD
421}
422
423/* Defining data structures for per-function information */
424
425/* The usual; we set up our machine_function data. */
426static struct machine_function *
427m32c_init_machine_status (void)
428{
429 struct machine_function *machine;
430 machine =
431 (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
432
433 return machine;
434}
435
436/* Implements INIT_EXPANDERS. We just set up to call the above
437 function. */
438void
439m32c_init_expanders (void)
440{
441 init_machine_status = m32c_init_machine_status;
442}
443
444/* Storage Layout */
445
446#undef TARGET_PROMOTE_FUNCTION_RETURN
447#define TARGET_PROMOTE_FUNCTION_RETURN m32c_promote_function_return
448bool
449m32c_promote_function_return (tree fntype ATTRIBUTE_UNUSED)
450{
451 return false;
452}
453
454/* Register Basics */
455
456/* Basic Characteristics of Registers */
457
458/* Whether a mode fits in a register is complex enough to warrant a
459 table. */
460static struct
461{
462 char qi_regs;
463 char hi_regs;
464 char pi_regs;
465 char si_regs;
466 char di_regs;
467} nregs_table[FIRST_PSEUDO_REGISTER] =
468{
469 { 1, 1, 2, 2, 4 }, /* r0 */
470 { 0, 1, 0, 0, 0 }, /* r2 */
471 { 1, 1, 2, 2, 0 }, /* r1 */
472 { 0, 1, 0, 0, 0 }, /* r3 */
473 { 0, 1, 1, 0, 0 }, /* a0 */
474 { 0, 1, 1, 0, 0 }, /* a1 */
475 { 0, 1, 1, 0, 0 }, /* sb */
476 { 0, 1, 1, 0, 0 }, /* fb */
477 { 0, 1, 1, 0, 0 }, /* sp */
478 { 1, 1, 1, 0, 0 }, /* pc */
479 { 0, 0, 0, 0, 0 }, /* fl */
480 { 1, 1, 1, 0, 0 }, /* ap */
481 { 1, 1, 2, 2, 4 }, /* mem0 */
482 { 1, 1, 2, 2, 4 }, /* mem1 */
483 { 1, 1, 2, 2, 4 }, /* mem2 */
484 { 1, 1, 2, 2, 4 }, /* mem3 */
485 { 1, 1, 2, 2, 4 }, /* mem4 */
486 { 1, 1, 2, 2, 0 }, /* mem5 */
487 { 1, 1, 2, 2, 0 }, /* mem6 */
488 { 1, 1, 0, 0, 0 }, /* mem7 */
489};
490
491/* Implements CONDITIONAL_REGISTER_USAGE. We adjust the number of
492 available memregs, and select which registers need to be preserved
493 across calls based on the chip family. */
494
495void
496m32c_conditional_register_usage (void)
497{
38b2d076
DD
498 int i;
499
500 if (0 <= target_memregs && target_memregs <= 16)
501 {
502 /* The command line option is bytes, but our "registers" are
503 16-bit words. */
504 for (i = target_memregs/2; i < 8; i++)
505 {
506 fixed_regs[MEM0_REGNO + i] = 1;
507 CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
508 }
509 }
510
511 /* M32CM and M32C preserve more registers across function calls. */
512 if (TARGET_A24)
513 {
514 call_used_regs[R1_REGNO] = 0;
515 call_used_regs[R2_REGNO] = 0;
516 call_used_regs[R3_REGNO] = 0;
517 call_used_regs[A0_REGNO] = 0;
518 call_used_regs[A1_REGNO] = 0;
519 }
520}
521
522/* How Values Fit in Registers */
523
524/* Implements HARD_REGNO_NREGS. This is complicated by the fact that
525 different registers are different sizes from each other, *and* may
526 be different sizes in different chip families. */
527int
528m32c_hard_regno_nregs (int regno, enum machine_mode mode)
529{
530 if (regno == FLG_REGNO && mode == CCmode)
531 return 1;
532 if (regno >= FIRST_PSEUDO_REGISTER)
533 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
534
535 if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
536 return (GET_MODE_SIZE (mode) + 1) / 2;
537
538 if (GET_MODE_SIZE (mode) <= 1)
539 return nregs_table[regno].qi_regs;
540 if (GET_MODE_SIZE (mode) <= 2)
541 return nregs_table[regno].hi_regs;
542 if (regno == A0_REGNO && mode == PSImode && TARGET_A16)
543 return 2;
544 if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
545 return nregs_table[regno].pi_regs;
546 if (GET_MODE_SIZE (mode) <= 4)
547 return nregs_table[regno].si_regs;
548 if (GET_MODE_SIZE (mode) <= 8)
549 return nregs_table[regno].di_regs;
550 return 0;
551}
552
553/* Implements HARD_REGNO_MODE_OK. The above function does the work
554 already; just test its return value. */
555int
556m32c_hard_regno_ok (int regno, enum machine_mode mode)
557{
558 return m32c_hard_regno_nregs (regno, mode) != 0;
559}
560
561/* Implements MODES_TIEABLE_P. In general, modes aren't tieable since
562 registers are all different sizes. However, since most modes are
563 bigger than our registers anyway, it's easier to implement this
564 function that way, leaving QImode as the only unique case. */
565int
566m32c_modes_tieable_p (enum machine_mode m1, enum machine_mode m2)
567{
568 if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
569 return 1;
570
07127a0a 571#if 0
38b2d076
DD
572 if (m1 == QImode || m2 == QImode)
573 return 0;
07127a0a 574#endif
38b2d076
DD
575
576 return 1;
577}
578
579/* Register Classes */
580
581/* Implements REGNO_REG_CLASS. */
582enum machine_mode
583m32c_regno_reg_class (int regno)
584{
585 switch (regno)
586 {
587 case R0_REGNO:
588 return R0_REGS;
589 case R1_REGNO:
590 return R1_REGS;
591 case R2_REGNO:
592 return R2_REGS;
593 case R3_REGNO:
594 return R3_REGS;
595 case A0_REGNO:
596 case A1_REGNO:
597 return A_REGS;
598 case SB_REGNO:
599 return SB_REGS;
600 case FB_REGNO:
601 return FB_REGS;
602 case SP_REGNO:
603 return SP_REGS;
604 case FLG_REGNO:
605 return FLG_REGS;
606 default:
607 if (IS_MEM_REGNO (regno))
608 return MEM_REGS;
609 return ALL_REGS;
610 }
611}
612
613/* Implements REG_CLASS_FROM_CONSTRAINT. Note that some constraints only match
614 for certain chip families. */
615int
616m32c_reg_class_from_constraint (char c ATTRIBUTE_UNUSED, const char *s)
617{
618 if (memcmp (s, "Rsp", 3) == 0)
619 return SP_REGS;
620 if (memcmp (s, "Rfb", 3) == 0)
621 return FB_REGS;
622 if (memcmp (s, "Rsb", 3) == 0)
623 return SB_REGS;
07127a0a
DD
624 if (memcmp (s, "Rcr", 3) == 0)
625 return TARGET_A16 ? CR_REGS : NO_REGS;
626 if (memcmp (s, "Rcl", 3) == 0)
627 return TARGET_A24 ? CR_REGS : NO_REGS;
38b2d076
DD
628 if (memcmp (s, "R0w", 3) == 0)
629 return R0_REGS;
630 if (memcmp (s, "R1w", 3) == 0)
631 return R1_REGS;
632 if (memcmp (s, "R2w", 3) == 0)
633 return R2_REGS;
634 if (memcmp (s, "R3w", 3) == 0)
635 return R3_REGS;
636 if (memcmp (s, "R02", 3) == 0)
637 return R02_REGS;
638 if (memcmp (s, "R03", 3) == 0)
639 return R03_REGS;
640 if (memcmp (s, "Rdi", 3) == 0)
641 return DI_REGS;
642 if (memcmp (s, "Rhl", 3) == 0)
643 return HL_REGS;
644 if (memcmp (s, "R23", 3) == 0)
645 return R23_REGS;
07127a0a
DD
646 if (memcmp (s, "Ra0", 3) == 0)
647 return A0_REGS;
648 if (memcmp (s, "Ra1", 3) == 0)
649 return A1_REGS;
38b2d076
DD
650 if (memcmp (s, "Raa", 3) == 0)
651 return A_REGS;
07127a0a
DD
652 if (memcmp (s, "Raw", 3) == 0)
653 return TARGET_A16 ? A_REGS : NO_REGS;
654 if (memcmp (s, "Ral", 3) == 0)
655 return TARGET_A24 ? A_REGS : NO_REGS;
38b2d076
DD
656 if (memcmp (s, "Rqi", 3) == 0)
657 return QI_REGS;
658 if (memcmp (s, "Rad", 3) == 0)
659 return AD_REGS;
660 if (memcmp (s, "Rsi", 3) == 0)
661 return SI_REGS;
662 if (memcmp (s, "Rhi", 3) == 0)
663 return HI_REGS;
664 if (memcmp (s, "Rhc", 3) == 0)
665 return HC_REGS;
666 if (memcmp (s, "Rra", 3) == 0)
667 return RA_REGS;
668 if (memcmp (s, "Rfl", 3) == 0)
669 return FLG_REGS;
670 if (memcmp (s, "Rmm", 3) == 0)
671 {
672 if (fixed_regs[MEM0_REGNO])
673 return NO_REGS;
674 return MEM_REGS;
675 }
676
677 /* PSImode registers - i.e. whatever can hold a pointer. */
678 if (memcmp (s, "Rpi", 3) == 0)
679 {
680 if (TARGET_A16)
681 return HI_REGS;
682 else
683 return RA_REGS; /* r2r0 and r3r1 can hold pointers. */
684 }
685
686 /* We handle this one as an EXTRA_CONSTRAINT. */
687 if (memcmp (s, "Rpa", 3) == 0)
688 return NO_REGS;
689
07127a0a
DD
690 if (*s == 'R')
691 {
692 fprintf(stderr, "unrecognized R constraint: %.3s\n", s);
693 gcc_unreachable();
694 }
695
38b2d076
DD
696 return NO_REGS;
697}
698
699/* Implements REGNO_OK_FOR_BASE_P. */
700int
701m32c_regno_ok_for_base_p (int regno)
702{
703 if (regno == A0_REGNO
704 || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
705 return 1;
706 return 0;
707}
708
709#define DEBUG_RELOAD 0
710
711/* Implements PREFERRED_RELOAD_CLASS. In general, prefer general
712 registers of the appropriate size. */
713int
714m32c_preferred_reload_class (rtx x, int rclass)
715{
716 int newclass = rclass;
717
718#if DEBUG_RELOAD
719 fprintf (stderr, "\npreferred_reload_class for %s is ",
720 class_names[rclass]);
721#endif
722 if (rclass == NO_REGS)
723 rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
724
725 if (classes_intersect (rclass, CR_REGS))
726 {
727 switch (GET_MODE (x))
728 {
729 case QImode:
730 newclass = HL_REGS;
731 break;
732 default:
733 /* newclass = HI_REGS; */
734 break;
735 }
736 }
737
738 else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
739 newclass = SI_REGS;
740 else if (GET_MODE_SIZE (GET_MODE (x)) > 4
741 && ~class_contents[rclass][0] & 0x000f)
742 newclass = DI_REGS;
743
744 rclass = reduce_class (rclass, newclass, rclass);
745
746 if (GET_MODE (x) == QImode)
747 rclass = reduce_class (rclass, HL_REGS, rclass);
748
749#if DEBUG_RELOAD
750 fprintf (stderr, "%s\n", class_names[rclass]);
751 debug_rtx (x);
752
753 if (GET_CODE (x) == MEM
754 && GET_CODE (XEXP (x, 0)) == PLUS
755 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
756 fprintf (stderr, "Glorm!\n");
757#endif
758 return rclass;
759}
760
761/* Implements PREFERRED_OUTPUT_RELOAD_CLASS. */
762int
763m32c_preferred_output_reload_class (rtx x, int rclass)
764{
765 return m32c_preferred_reload_class (x, rclass);
766}
767
768/* Implements LIMIT_RELOAD_CLASS. We basically want to avoid using
769 address registers for reloads since they're needed for address
770 reloads. */
771int
772m32c_limit_reload_class (enum machine_mode mode, int rclass)
773{
774#if DEBUG_RELOAD
775 fprintf (stderr, "limit_reload_class for %s: %s ->",
776 mode_name[mode], class_names[rclass]);
777#endif
778
779 if (mode == QImode)
780 rclass = reduce_class (rclass, HL_REGS, rclass);
781 else if (mode == HImode)
782 rclass = reduce_class (rclass, HI_REGS, rclass);
783 else if (mode == SImode)
784 rclass = reduce_class (rclass, SI_REGS, rclass);
785
786 if (rclass != A_REGS)
787 rclass = reduce_class (rclass, DI_REGS, rclass);
788
789#if DEBUG_RELOAD
790 fprintf (stderr, " %s\n", class_names[rclass]);
791#endif
792 return rclass;
793}
794
795/* Implements SECONDARY_RELOAD_CLASS. QImode have to be reloaded in
796 r0 or r1, as those are the only real QImode registers. CR regs get
797 reloaded through appropriately sized general or address
798 registers. */
799int
800m32c_secondary_reload_class (int rclass, enum machine_mode mode, rtx x)
801{
802 int cc = class_contents[rclass][0];
803#if DEBUG0
804 fprintf (stderr, "\nsecondary reload class %s %s\n",
805 class_names[rclass], mode_name[mode]);
806 debug_rtx (x);
807#endif
808 if (mode == QImode
809 && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
810 return QI_REGS;
811 if (classes_intersect (rclass, CR_REGS)
812 && GET_CODE (x) == REG
813 && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
814 return TARGET_A16 ? HI_REGS : A_REGS;
815 return NO_REGS;
816}
817
818/* Implements CLASS_LIKELY_SPILLED_P. A_REGS is needed for address
819 reloads. */
820int
821m32c_class_likely_spilled_p (int regclass)
822{
823 if (regclass == A_REGS)
824 return 1;
825 return reg_class_size[regclass] == 1;
826}
827
828/* Implements CLASS_MAX_NREGS. We calculate this according to its
829 documented meaning, to avoid potential inconsistencies with actual
830 class definitions. */
831int
832m32c_class_max_nregs (int regclass, enum machine_mode mode)
833{
834 int rn, max = 0;
835
836 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
837 if (class_contents[regclass][0] & (1 << rn))
838 {
839 int n = m32c_hard_regno_nregs (rn, mode);
840 if (max < n)
841 max = n;
842 }
843 return max;
844}
845
846/* Implements CANNOT_CHANGE_MODE_CLASS. Only r0 and r1 can change to
847 QI (r0l, r1l) because the chip doesn't support QI ops on other
848 registers (well, it does on a0/a1 but if we let gcc do that, reload
849 suffers). Otherwise, we allow changes to larger modes. */
850int
851m32c_cannot_change_mode_class (enum machine_mode from,
852 enum machine_mode to, int rclass)
853{
db9c8397 854 int rn;
38b2d076
DD
855#if DEBUG0
856 fprintf (stderr, "cannot change from %s to %s in %s\n",
857 mode_name[from], mode_name[to], class_names[rclass]);
858#endif
859
db9c8397
DD
860 /* If the larger mode isn't allowed in any of these registers, we
861 can't allow the change. */
862 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
863 if (class_contents[rclass][0] & (1 << rn))
864 if (! m32c_hard_regno_ok (rn, to))
865 return 1;
866
38b2d076
DD
867 if (to == QImode)
868 return (class_contents[rclass][0] & 0x1ffa);
869
870 if (class_contents[rclass][0] & 0x0005 /* r0, r1 */
871 && GET_MODE_SIZE (from) > 1)
872 return 0;
873 if (GET_MODE_SIZE (from) > 2) /* all other regs */
874 return 0;
875
876 return 1;
877}
878
879/* Helpers for the rest of the file. */
880/* TRUE if the rtx is a REG rtx for the given register. */
881#define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
882 && REGNO (rtx) == regno)
883/* TRUE if the rtx is a pseudo - specifically, one we can use as a
884 base register in address calculations (hence the "strict"
885 argument). */
886#define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
887 && (REGNO (rtx) == AP_REGNO \
888 || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
889
890/* Implements CONST_OK_FOR_CONSTRAINT_P. Currently, all constant
891 constraints start with 'I', with the next two characters indicating
892 the type and size of the range allowed. */
893int
894m32c_const_ok_for_constraint_p (HOST_WIDE_INT value,
895 char c ATTRIBUTE_UNUSED, const char *str)
896{
897 /* s=signed u=unsigned n=nonzero m=minus l=log2able,
898 [sun] bits [SUN] bytes, p=pointer size
899 I[-0-9][0-9] matches that number */
900 if (memcmp (str, "Is3", 3) == 0)
901 {
902 return (-8 <= value && value <= 7);
903 }
904 if (memcmp (str, "IS1", 3) == 0)
905 {
906 return (-128 <= value && value <= 127);
907 }
908 if (memcmp (str, "IS2", 3) == 0)
909 {
910 return (-32768 <= value && value <= 32767);
911 }
912 if (memcmp (str, "IU2", 3) == 0)
913 {
914 return (0 <= value && value <= 65535);
915 }
916 if (memcmp (str, "IU3", 3) == 0)
917 {
918 return (0 <= value && value <= 0x00ffffff);
919 }
920 if (memcmp (str, "In4", 3) == 0)
921 {
922 return (-8 <= value && value && value <= 8);
923 }
924 if (memcmp (str, "In5", 3) == 0)
925 {
926 return (-16 <= value && value && value <= 16);
927 }
23fed240
DD
928 if (memcmp (str, "In6", 3) == 0)
929 {
930 return (-32 <= value && value && value <= 32);
931 }
38b2d076
DD
932 if (memcmp (str, "IM2", 3) == 0)
933 {
934 return (-65536 <= value && value && value <= -1);
935 }
936 if (memcmp (str, "Ilb", 3) == 0)
937 {
938 int b = exact_log2 (value);
8e4edce7 939 return (b >= 0 && b <= 7);
38b2d076 940 }
07127a0a
DD
941 if (memcmp (str, "Imb", 3) == 0)
942 {
943 int b = exact_log2 ((value ^ 0xff) & 0xff);
8e4edce7 944 return (b >= 0 && b <= 7);
07127a0a 945 }
38b2d076
DD
946 if (memcmp (str, "Ilw", 3) == 0)
947 {
948 int b = exact_log2 (value);
8e4edce7 949 return (b >= 0 && b <= 15);
38b2d076 950 }
07127a0a
DD
951 if (memcmp (str, "Imw", 3) == 0)
952 {
953 int b = exact_log2 ((value ^ 0xffff) & 0xffff);
8e4edce7 954 return (b >= 0 && b <= 15);
07127a0a
DD
955 }
956 if (memcmp (str, "I00", 3) == 0)
957 {
958 return (value == 0);
959 }
38b2d076
DD
960 return 0;
961}
962
963/* Implements EXTRA_CONSTRAINT_STR (see next function too). 'S' is
964 for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
965 call return values. */
966int
967m32c_extra_constraint_p2 (rtx value, char c ATTRIBUTE_UNUSED, const char *str)
968{
969 encode_pattern (value);
970 if (memcmp (str, "Sd", 2) == 0)
971 {
972 /* This is the common "src/dest" address */
973 rtx r;
974 if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
975 return 1;
976 if (RTX_IS ("ms") || RTX_IS ("m+si"))
977 return 1;
07127a0a
DD
978 if (RTX_IS ("m++rii"))
979 {
980 if (REGNO (patternr[3]) == FB_REGNO
981 && INTVAL (patternr[4]) == 0)
982 return 1;
983 }
38b2d076
DD
984 if (RTX_IS ("mr"))
985 r = patternr[1];
986 else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
987 r = patternr[2];
988 else
989 return 0;
990 if (REGNO (r) == SP_REGNO)
991 return 0;
992 return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
993 }
994 else if (memcmp (str, "Sa", 2) == 0)
995 {
996 rtx r;
997 if (RTX_IS ("mr"))
998 r = patternr[1];
999 else if (RTX_IS ("m+ri"))
1000 r = patternr[2];
1001 else
1002 return 0;
1003 return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
1004 }
1005 else if (memcmp (str, "Si", 2) == 0)
1006 {
1007 return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
1008 }
1009 else if (memcmp (str, "Ss", 2) == 0)
1010 {
1011 return ((RTX_IS ("mr")
1012 && (IS_REG (patternr[1], SP_REGNO)))
1013 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
1014 }
1015 else if (memcmp (str, "Sf", 2) == 0)
1016 {
1017 return ((RTX_IS ("mr")
1018 && (IS_REG (patternr[1], FB_REGNO)))
1019 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
1020 }
1021 else if (memcmp (str, "Sb", 2) == 0)
1022 {
1023 return ((RTX_IS ("mr")
1024 && (IS_REG (patternr[1], SB_REGNO)))
1025 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
1026 }
07127a0a
DD
1027 else if (memcmp (str, "Sp", 2) == 0)
1028 {
1029 /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
1030 return (RTX_IS ("mi")
1031 && !(INTVAL (patternr[1]) & ~0x1fff));
1032 }
38b2d076
DD
1033 else if (memcmp (str, "S1", 2) == 0)
1034 {
1035 return r1h_operand (value, QImode);
1036 }
1037
1038 gcc_assert (str[0] != 'S');
1039
1040 if (memcmp (str, "Rpa", 2) == 0)
1041 return GET_CODE (value) == PARALLEL;
1042
1043 return 0;
1044}
1045
1046/* This is for when we're debugging the above. */
1047int
1048m32c_extra_constraint_p (rtx value, char c, const char *str)
1049{
1050 int rv = m32c_extra_constraint_p2 (value, c, str);
1051#if DEBUG0
1052 fprintf (stderr, "\nconstraint %.*s: %d\n", CONSTRAINT_LEN (c, str), str,
1053 rv);
1054 debug_rtx (value);
1055#endif
1056 return rv;
1057}
1058
1059/* Implements EXTRA_MEMORY_CONSTRAINT. Currently, we only use strings
1060 starting with 'S'. */
1061int
1062m32c_extra_memory_constraint (char c, const char *str ATTRIBUTE_UNUSED)
1063{
1064 return c == 'S';
1065}
1066
1067/* Implements EXTRA_ADDRESS_CONSTRAINT. We reserve 'A' strings for these,
1068 but don't currently define any. */
1069int
1070m32c_extra_address_constraint (char c, const char *str ATTRIBUTE_UNUSED)
1071{
1072 return c == 'A';
1073}
1074
1075/* STACK AND CALLING */
1076
1077/* Frame Layout */
1078
1079/* Implements RETURN_ADDR_RTX. Note that R8C and M16C push 24 bits
1080 (yes, THREE bytes) onto the stack for the return address, but we
1081 don't support pointers bigger than 16 bits on those chips. This
1082 will likely wreak havoc with exception unwinding. FIXME. */
1083rtx
1084m32c_return_addr_rtx (int count)
1085{
1086 enum machine_mode mode;
1087 int offset;
1088 rtx ra_mem;
1089
1090 if (count)
1091 return NULL_RTX;
1092 /* we want 2[$fb] */
1093
1094 if (TARGET_A24)
1095 {
1096 mode = SImode;
1097 offset = 4;
1098 }
1099 else
1100 {
1101 /* FIXME: it's really 3 bytes */
1102 mode = HImode;
1103 offset = 2;
1104 }
1105
1106 ra_mem =
1107 gen_rtx_MEM (mode, plus_constant (gen_rtx_REG (Pmode, FP_REGNO), offset));
1108 return copy_to_mode_reg (mode, ra_mem);
1109}
1110
1111/* Implements INCOMING_RETURN_ADDR_RTX. See comment above. */
1112rtx
1113m32c_incoming_return_addr_rtx (void)
1114{
1115 /* we want [sp] */
1116 return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
1117}
1118
1119/* Exception Handling Support */
1120
1121/* Implements EH_RETURN_DATA_REGNO. Choose registers able to hold
1122 pointers. */
1123int
1124m32c_eh_return_data_regno (int n)
1125{
1126 switch (n)
1127 {
1128 case 0:
1129 return A0_REGNO;
1130 case 1:
1131 return A1_REGNO;
1132 default:
1133 return INVALID_REGNUM;
1134 }
1135}
1136
1137/* Implements EH_RETURN_STACKADJ_RTX. Saved and used later in
1138 m32c_emit_eh_epilogue. */
1139rtx
1140m32c_eh_return_stackadj_rtx (void)
1141{
1142 if (!cfun->machine->eh_stack_adjust)
1143 {
1144 rtx sa;
1145
1146 sa = gen_reg_rtx (Pmode);
1147 cfun->machine->eh_stack_adjust = sa;
1148 }
1149 return cfun->machine->eh_stack_adjust;
1150}
1151
1152/* Registers That Address the Stack Frame */
1153
1154/* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER. Note that
1155 the original spec called for dwarf numbers to vary with register
1156 width as well, for example, r0l, r0, and r2r0 would each have
1157 different dwarf numbers. GCC doesn't support this, and we don't do
1158 it, and gdb seems to like it this way anyway. */
1159unsigned int
1160m32c_dwarf_frame_regnum (int n)
1161{
1162 switch (n)
1163 {
1164 case R0_REGNO:
1165 return 5;
1166 case R1_REGNO:
1167 return 6;
1168 case R2_REGNO:
1169 return 7;
1170 case R3_REGNO:
1171 return 8;
1172 case A0_REGNO:
1173 return 9;
1174 case A1_REGNO:
1175 return 10;
1176 case FB_REGNO:
1177 return 11;
1178 case SB_REGNO:
1179 return 19;
1180
1181 case SP_REGNO:
1182 return 12;
1183 case PC_REGNO:
1184 return 13;
1185 default:
1186 return DWARF_FRAME_REGISTERS + 1;
1187 }
1188}
1189
1190/* The frame looks like this:
1191
1192 ap -> +------------------------------
1193 | Return address (3 or 4 bytes)
1194 | Saved FB (2 or 4 bytes)
1195 fb -> +------------------------------
1196 | local vars
1197 | register saves fb
1198 | through r0 as needed
1199 sp -> +------------------------------
1200*/
1201
1202/* We use this to wrap all emitted insns in the prologue. */
1203static rtx
1204F (rtx x)
1205{
1206 RTX_FRAME_RELATED_P (x) = 1;
1207 return x;
1208}
1209
1210/* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1211 how much the stack pointer moves for each, for each cpu family. */
1212static struct
1213{
1214 int reg1;
1215 int bit;
1216 int a16_bytes;
1217 int a24_bytes;
1218} pushm_info[] =
1219{
9d746d5e
DD
1220 /* These are in reverse push (nearest-to-sp) order. */
1221 { R0_REGNO, 0x80, 2, 2 },
38b2d076 1222 { R1_REGNO, 0x40, 2, 2 },
9d746d5e
DD
1223 { R2_REGNO, 0x20, 2, 2 },
1224 { R3_REGNO, 0x10, 2, 2 },
1225 { A0_REGNO, 0x08, 2, 4 },
1226 { A1_REGNO, 0x04, 2, 4 },
1227 { SB_REGNO, 0x02, 2, 4 },
1228 { FB_REGNO, 0x01, 2, 4 }
38b2d076
DD
1229};
1230
1231#define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1232
1233/* Returns TRUE if we need to save/restore the given register. We
1234 save everything for exception handlers, so that any register can be
1235 unwound. For interrupt handlers, we save everything if the handler
1236 calls something else (because we don't know what *that* function
1237 might do), but try to be a bit smarter if the handler is a leaf
1238 function. We always save $a0, though, because we use that in the
85f65093 1239 epilogue to copy $fb to $sp. */
38b2d076
DD
1240static int
1241need_to_save (int regno)
1242{
1243 if (fixed_regs[regno])
1244 return 0;
1245 if (cfun->calls_eh_return)
1246 return 1;
1247 if (regno == FP_REGNO)
1248 return 0;
1249 if (cfun->machine->is_interrupt
1250 && (!cfun->machine->is_leaf || regno == A0_REGNO))
1251 return 1;
6fb5fa3c 1252 if (df_regs_ever_live_p (regno)
38b2d076
DD
1253 && (!call_used_regs[regno] || cfun->machine->is_interrupt))
1254 return 1;
1255 return 0;
1256}
1257
1258/* This function contains all the intelligence about saving and
1259 restoring registers. It always figures out the register save set.
1260 When called with PP_justcount, it merely returns the size of the
1261 save set (for eliminating the frame pointer, for example). When
1262 called with PP_pushm or PP_popm, it emits the appropriate
1263 instructions for saving (pushm) or restoring (popm) the
1264 registers. */
1265static int
1266m32c_pushm_popm (Push_Pop_Type ppt)
1267{
1268 int reg_mask = 0;
1269 int byte_count = 0, bytes;
1270 int i;
1271 rtx dwarf_set[PUSHM_N];
1272 int n_dwarfs = 0;
1273 int nosave_mask = 0;
1274
1275 if (cfun->return_rtx
1276 && GET_CODE (cfun->return_rtx) == PARALLEL
1277 && !(cfun->calls_eh_return || cfun->machine->is_interrupt))
1278 {
1279 rtx exp = XVECEXP (cfun->return_rtx, 0, 0);
1280 rtx rv = XEXP (exp, 0);
1281 int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1282
1283 if (rv_bytes > 2)
1284 nosave_mask |= 0x20; /* PSI, SI */
1285 else
1286 nosave_mask |= 0xf0; /* DF */
1287 if (rv_bytes > 4)
1288 nosave_mask |= 0x50; /* DI */
1289 }
1290
1291 for (i = 0; i < (int) PUSHM_N; i++)
1292 {
1293 /* Skip if neither register needs saving. */
1294 if (!need_to_save (pushm_info[i].reg1))
1295 continue;
1296
1297 if (pushm_info[i].bit & nosave_mask)
1298 continue;
1299
1300 reg_mask |= pushm_info[i].bit;
1301 bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1302
1303 if (ppt == PP_pushm)
1304 {
1305 enum machine_mode mode = (bytes == 2) ? HImode : SImode;
1306 rtx addr;
1307
1308 /* Always use stack_pointer_rtx instead of calling
1309 rtx_gen_REG ourselves. Code elsewhere in GCC assumes
1310 that there is a single rtx representing the stack pointer,
1311 namely stack_pointer_rtx, and uses == to recognize it. */
1312 addr = stack_pointer_rtx;
1313
1314 if (byte_count != 0)
1315 addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1316
1317 dwarf_set[n_dwarfs++] =
1318 gen_rtx_SET (VOIDmode,
1319 gen_rtx_MEM (mode, addr),
1320 gen_rtx_REG (mode, pushm_info[i].reg1));
1321 F (dwarf_set[n_dwarfs - 1]);
1322
1323 }
1324 byte_count += bytes;
1325 }
1326
1327 if (cfun->machine->is_interrupt)
1328 {
1329 cfun->machine->intr_pushm = reg_mask & 0xfe;
1330 reg_mask = 0;
1331 byte_count = 0;
1332 }
1333
1334 if (cfun->machine->is_interrupt)
1335 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1336 if (need_to_save (i))
1337 {
1338 byte_count += 2;
1339 cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1340 }
1341
1342 if (ppt == PP_pushm && byte_count)
1343 {
1344 rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1345 rtx pushm;
1346
1347 if (reg_mask)
1348 {
1349 XVECEXP (note, 0, 0)
1350 = gen_rtx_SET (VOIDmode,
1351 stack_pointer_rtx,
1352 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1353 stack_pointer_rtx,
1354 GEN_INT (-byte_count)));
1355 F (XVECEXP (note, 0, 0));
1356
1357 for (i = 0; i < n_dwarfs; i++)
1358 XVECEXP (note, 0, i + 1) = dwarf_set[i];
1359
1360 pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1361
1362 REG_NOTES (pushm) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, note,
1363 REG_NOTES (pushm));
1364 }
1365
1366 if (cfun->machine->is_interrupt)
1367 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1368 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1369 {
1370 if (TARGET_A16)
1371 pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1372 else
1373 pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1374 F (pushm);
1375 }
1376 }
1377 if (ppt == PP_popm && byte_count)
1378 {
38b2d076
DD
1379 if (cfun->machine->is_interrupt)
1380 for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1381 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1382 {
1383 if (TARGET_A16)
b3fdec9e 1384 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
38b2d076 1385 else
b3fdec9e 1386 emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
38b2d076
DD
1387 }
1388 if (reg_mask)
1389 emit_insn (gen_popm (GEN_INT (reg_mask)));
1390 }
1391
1392 return byte_count;
1393}
1394
1395/* Implements INITIAL_ELIMINATION_OFFSET. See the comment above that
1396 diagrams our call frame. */
1397int
1398m32c_initial_elimination_offset (int from, int to)
1399{
1400 int ofs = 0;
1401
1402 if (from == AP_REGNO)
1403 {
1404 if (TARGET_A16)
1405 ofs += 5;
1406 else
1407 ofs += 8;
1408 }
1409
1410 if (to == SP_REGNO)
1411 {
1412 ofs += m32c_pushm_popm (PP_justcount);
1413 ofs += get_frame_size ();
1414 }
1415
1416 /* Account for push rounding. */
1417 if (TARGET_A24)
1418 ofs = (ofs + 1) & ~1;
1419#if DEBUG0
1420 fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1421 to, ofs);
1422#endif
1423 return ofs;
1424}
1425
1426/* Passing Function Arguments on the Stack */
1427
1428#undef TARGET_PROMOTE_PROTOTYPES
1429#define TARGET_PROMOTE_PROTOTYPES m32c_promote_prototypes
1430static bool
1431m32c_promote_prototypes (tree fntype ATTRIBUTE_UNUSED)
1432{
1433 return 0;
1434}
1435
1436/* Implements PUSH_ROUNDING. The R8C and M16C have byte stacks, the
1437 M32C has word stacks. */
1438int
1439m32c_push_rounding (int n)
1440{
1441 if (TARGET_R8C || TARGET_M16C)
1442 return n;
1443 return (n + 1) & ~1;
1444}
1445
1446/* Passing Arguments in Registers */
1447
1448/* Implements FUNCTION_ARG. Arguments are passed partly in registers,
1449 partly on stack. If our function returns a struct, a pointer to a
1450 buffer for it is at the top of the stack (last thing pushed). The
1451 first few real arguments may be in registers as follows:
1452
1453 R8C/M16C: arg1 in r1 if it's QI or HI (else it's pushed on stack)
1454 arg2 in r2 if it's HI (else pushed on stack)
1455 rest on stack
1456 M32C: arg1 in r0 if it's QI or HI (else it's pushed on stack)
1457 rest on stack
1458
1459 Structs are not passed in registers, even if they fit. Only
1460 integer and pointer types are passed in registers.
1461
1462 Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1463 r2 if it fits. */
1464rtx
1465m32c_function_arg (CUMULATIVE_ARGS * ca,
1466 enum machine_mode mode, tree type, int named)
1467{
1468 /* Can return a reg, parallel, or 0 for stack */
1469 rtx rv = NULL_RTX;
1470#if DEBUG0
1471 fprintf (stderr, "func_arg %d (%s, %d)\n",
1472 ca->parm_num, mode_name[mode], named);
1473 debug_tree (type);
1474#endif
1475
1476 if (mode == VOIDmode)
1477 return GEN_INT (0);
1478
1479 if (ca->force_mem || !named)
1480 {
1481#if DEBUG0
1482 fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
1483 named);
1484#endif
1485 return NULL_RTX;
1486 }
1487
1488 if (type && INTEGRAL_TYPE_P (type) && POINTER_TYPE_P (type))
1489 return NULL_RTX;
1490
9d746d5e
DD
1491 if (type && AGGREGATE_TYPE_P (type))
1492 return NULL_RTX;
1493
38b2d076
DD
1494 switch (ca->parm_num)
1495 {
1496 case 1:
1497 if (GET_MODE_SIZE (mode) == 1 || GET_MODE_SIZE (mode) == 2)
1498 rv = gen_rtx_REG (mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
1499 break;
1500
1501 case 2:
1502 if (TARGET_A16 && GET_MODE_SIZE (mode) == 2)
1503 rv = gen_rtx_REG (mode, R2_REGNO);
1504 break;
1505 }
1506
1507#if DEBUG0
1508 debug_rtx (rv);
1509#endif
1510 return rv;
1511}
1512
1513#undef TARGET_PASS_BY_REFERENCE
1514#define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1515static bool
1516m32c_pass_by_reference (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED,
1517 enum machine_mode mode ATTRIBUTE_UNUSED,
1518 tree type ATTRIBUTE_UNUSED,
1519 bool named ATTRIBUTE_UNUSED)
1520{
1521 return 0;
1522}
1523
1524/* Implements INIT_CUMULATIVE_ARGS. */
1525void
1526m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
9d746d5e 1527 tree fntype,
38b2d076 1528 rtx libname ATTRIBUTE_UNUSED,
9d746d5e 1529 tree fndecl,
38b2d076
DD
1530 int n_named_args ATTRIBUTE_UNUSED)
1531{
9d746d5e
DD
1532 if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1533 ca->force_mem = 1;
1534 else
1535 ca->force_mem = 0;
38b2d076
DD
1536 ca->parm_num = 1;
1537}
1538
1539/* Implements FUNCTION_ARG_ADVANCE. force_mem is set for functions
1540 returning structures, so we always reset that. Otherwise, we only
1541 need to know the sequence number of the argument to know what to do
1542 with it. */
1543void
1544m32c_function_arg_advance (CUMULATIVE_ARGS * ca,
1545 enum machine_mode mode ATTRIBUTE_UNUSED,
1546 tree type ATTRIBUTE_UNUSED,
1547 int named ATTRIBUTE_UNUSED)
1548{
1549 if (ca->force_mem)
1550 ca->force_mem = 0;
9d746d5e
DD
1551 else
1552 ca->parm_num++;
38b2d076
DD
1553}
1554
1555/* Implements FUNCTION_ARG_REGNO_P. */
1556int
1557m32c_function_arg_regno_p (int r)
1558{
1559 if (TARGET_A24)
1560 return (r == R0_REGNO);
1561 return (r == R1_REGNO || r == R2_REGNO);
1562}
1563
e9555b13 1564/* HImode and PSImode are the two "native" modes as far as GCC is
85f65093 1565 concerned, but the chips also support a 32-bit mode which is used
e9555b13
DD
1566 for some opcodes in R8C/M16C and for reset vectors and such. */
1567#undef TARGET_VALID_POINTER_MODE
1568#define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
23fed240 1569static bool
e9555b13
DD
1570m32c_valid_pointer_mode (enum machine_mode mode)
1571{
e9555b13
DD
1572 if (mode == HImode
1573 || mode == PSImode
1574 || mode == SImode
1575 )
1576 return 1;
1577 return 0;
1578}
1579
38b2d076
DD
1580/* How Scalar Function Values Are Returned */
1581
1582/* Implements LIBCALL_VALUE. Most values are returned in $r0, or some
1583 combination of registers starting there (r2r0 for longs, r3r1r2r0
1584 for long long, r3r2r1r0 for doubles), except that that ABI
1585 currently doesn't work because it ends up using all available
1586 general registers and gcc often can't compile it. So, instead, we
1587 return anything bigger than 16 bits in "mem0" (effectively, a
1588 memory location). */
1589rtx
1590m32c_libcall_value (enum machine_mode mode)
1591{
1592 /* return reg or parallel */
1593#if 0
1594 /* FIXME: GCC has difficulty returning large values in registers,
1595 because that ties up most of the general registers and gives the
1596 register allocator little to work with. Until we can resolve
1597 this, large values are returned in memory. */
1598 if (mode == DFmode)
1599 {
1600 rtx rv;
1601
1602 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1603 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1604 gen_rtx_REG (HImode,
1605 R0_REGNO),
1606 GEN_INT (0));
1607 XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1608 gen_rtx_REG (HImode,
1609 R1_REGNO),
1610 GEN_INT (2));
1611 XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1612 gen_rtx_REG (HImode,
1613 R2_REGNO),
1614 GEN_INT (4));
1615 XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1616 gen_rtx_REG (HImode,
1617 R3_REGNO),
1618 GEN_INT (6));
1619 return rv;
1620 }
1621
1622 if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1623 {
1624 rtx rv;
1625
1626 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1627 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1628 gen_rtx_REG (mode,
1629 R0_REGNO),
1630 GEN_INT (0));
1631 return rv;
1632 }
1633#endif
1634
1635 if (GET_MODE_SIZE (mode) > 2)
1636 return gen_rtx_REG (mode, MEM0_REGNO);
1637 return gen_rtx_REG (mode, R0_REGNO);
1638}
1639
1640/* Implements FUNCTION_VALUE. Functions and libcalls have the same
1641 conventions. */
1642rtx
1643m32c_function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
1644{
1645 /* return reg or parallel */
1646 enum machine_mode mode = TYPE_MODE (valtype);
1647 return m32c_libcall_value (mode);
1648}
1649
1650/* How Large Values Are Returned */
1651
1652/* We return structures by pushing the address on the stack, even if
1653 we use registers for the first few "real" arguments. */
1654#undef TARGET_STRUCT_VALUE_RTX
1655#define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1656static rtx
1657m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1658 int incoming ATTRIBUTE_UNUSED)
1659{
1660 return 0;
1661}
1662
1663/* Function Entry and Exit */
1664
1665/* Implements EPILOGUE_USES. Interrupts restore all registers. */
1666int
1667m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1668{
1669 if (cfun->machine->is_interrupt)
1670 return 1;
1671 return 0;
1672}
1673
1674/* Implementing the Varargs Macros */
1675
1676#undef TARGET_STRICT_ARGUMENT_NAMING
1677#define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1678static bool
1679m32c_strict_argument_naming (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED)
1680{
1681 return 1;
1682}
1683
1684/* Trampolines for Nested Functions */
1685
1686/*
1687 m16c:
1688 1 0000 75C43412 mov.w #0x1234,a0
1689 2 0004 FC000000 jmp.a label
1690
1691 m32c:
1692 1 0000 BC563412 mov.l:s #0x123456,a0
1693 2 0004 CC000000 jmp.a label
1694*/
1695
1696/* Implements TRAMPOLINE_SIZE. */
1697int
1698m32c_trampoline_size (void)
1699{
1700 /* Allocate extra space so we can avoid the messy shifts when we
1701 initialize the trampoline; we just write past the end of the
1702 opcode. */
1703 return TARGET_A16 ? 8 : 10;
1704}
1705
1706/* Implements TRAMPOLINE_ALIGNMENT. */
1707int
1708m32c_trampoline_alignment (void)
1709{
1710 return 2;
1711}
1712
1713/* Implements INITIALIZE_TRAMPOLINE. */
1714void
1715m32c_initialize_trampoline (rtx tramp, rtx function, rtx chainval)
1716{
1717#define A0(m,i) gen_rtx_MEM (m, plus_constant (tramp, i))
1718 if (TARGET_A16)
1719 {
1720 /* Note: we subtract a "word" because the moves want signed
1721 constants, not unsigned constants. */
1722 emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1723 emit_move_insn (A0 (HImode, 2), chainval);
1724 emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
85f65093
KH
1725 /* We use 16-bit addresses here, but store the zero to turn it
1726 into a 24-bit offset. */
38b2d076
DD
1727 emit_move_insn (A0 (HImode, 5), function);
1728 emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1729 }
1730 else
1731 {
1732 /* Note that the PSI moves actually write 4 bytes. Make sure we
1733 write stuff out in the right order, and leave room for the
1734 extra byte at the end. */
1735 emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1736 emit_move_insn (A0 (PSImode, 1), chainval);
1737 emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1738 emit_move_insn (A0 (PSImode, 5), function);
1739 }
1740#undef A0
1741}
1742
07127a0a
DD
1743/* Implicit Calls to Library Routines */
1744
1745#undef TARGET_INIT_LIBFUNCS
1746#define TARGET_INIT_LIBFUNCS m32c_init_libfuncs
1747static void
1748m32c_init_libfuncs (void)
1749{
1750 if (TARGET_A24)
1751 {
1752 /* We do this because the M32C has an HImode operand, but the
85f65093 1753 M16C has an 8-bit operand. Since gcc looks at the match data
07127a0a
DD
1754 and not the expanded rtl, we have to reset the array so that
1755 the right modes are found. */
1756 setcc_gen_code[EQ] = CODE_FOR_seq_24;
1757 setcc_gen_code[NE] = CODE_FOR_sne_24;
1758 setcc_gen_code[GT] = CODE_FOR_sgt_24;
1759 setcc_gen_code[GE] = CODE_FOR_sge_24;
1760 setcc_gen_code[LT] = CODE_FOR_slt_24;
1761 setcc_gen_code[LE] = CODE_FOR_sle_24;
1762 setcc_gen_code[GTU] = CODE_FOR_sgtu_24;
1763 setcc_gen_code[GEU] = CODE_FOR_sgeu_24;
1764 setcc_gen_code[LTU] = CODE_FOR_sltu_24;
1765 setcc_gen_code[LEU] = CODE_FOR_sleu_24;
1766 }
1767}
1768
38b2d076
DD
1769/* Addressing Modes */
1770
1771/* Used by GO_IF_LEGITIMATE_ADDRESS. The r8c/m32c family supports a
1772 wide range of non-orthogonal addressing modes, including the
1773 ability to double-indirect on *some* of them. Not all insns
1774 support all modes, either, but we rely on predicates and
1775 constraints to deal with that. */
1776int
1777m32c_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
1778{
1779 int mode_adjust;
1780 if (CONSTANT_P (x))
1781 return 1;
1782
1783 /* Wide references to memory will be split after reload, so we must
1784 ensure that all parts of such splits remain legitimate
1785 addresses. */
1786 mode_adjust = GET_MODE_SIZE (mode) - 1;
1787
1788 /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1789 if (GET_CODE (x) == PRE_DEC
1790 || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1791 {
1792 return (GET_CODE (XEXP (x, 0)) == REG
1793 && REGNO (XEXP (x, 0)) == SP_REGNO);
1794 }
1795
1796#if 0
1797 /* This is the double indirection detection, but it currently
1798 doesn't work as cleanly as this code implies, so until we've had
1799 a chance to debug it, leave it disabled. */
1800 if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1801 {
1802#if DEBUG_DOUBLE
1803 fprintf (stderr, "double indirect\n");
1804#endif
1805 x = XEXP (x, 0);
1806 }
1807#endif
1808
1809 encode_pattern (x);
1810 if (RTX_IS ("r"))
1811 {
1812 /* Most indexable registers can be used without displacements,
1813 although some of them will be emitted with an explicit zero
1814 to please the assembler. */
1815 switch (REGNO (patternr[0]))
1816 {
1817 case A0_REGNO:
1818 case A1_REGNO:
1819 case SB_REGNO:
1820 case FB_REGNO:
1821 case SP_REGNO:
1822 return 1;
1823
1824 default:
1825 if (IS_PSEUDO (patternr[0], strict))
1826 return 1;
1827 return 0;
1828 }
1829 }
1830 if (RTX_IS ("+ri"))
1831 {
1832 /* This is more interesting, because different base registers
1833 allow for different displacements - both range and signedness
1834 - and it differs from chip series to chip series too. */
1835 int rn = REGNO (patternr[1]);
1836 HOST_WIDE_INT offs = INTVAL (patternr[2]);
1837 switch (rn)
1838 {
1839 case A0_REGNO:
1840 case A1_REGNO:
1841 case SB_REGNO:
1842 /* The syntax only allows positive offsets, but when the
1843 offsets span the entire memory range, we can simulate
1844 negative offsets by wrapping. */
1845 if (TARGET_A16)
1846 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1847 if (rn == SB_REGNO)
1848 return (offs >= 0 && offs <= 65535 - mode_adjust);
1849 /* A0 or A1 */
1850 return (offs >= -16777216 && offs <= 16777215);
1851
1852 case FB_REGNO:
1853 if (TARGET_A16)
1854 return (offs >= -128 && offs <= 127 - mode_adjust);
1855 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1856
1857 case SP_REGNO:
1858 return (offs >= -128 && offs <= 127 - mode_adjust);
1859
1860 default:
1861 if (IS_PSEUDO (patternr[1], strict))
1862 return 1;
1863 return 0;
1864 }
1865 }
1866 if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1867 {
1868 rtx reg = patternr[1];
1869
1870 /* We don't know where the symbol is, so only allow base
1871 registers which support displacements spanning the whole
1872 address range. */
1873 switch (REGNO (reg))
1874 {
1875 case A0_REGNO:
1876 case A1_REGNO:
1877 /* $sb needs a secondary reload, but since it's involved in
1878 memory address reloads too, we don't deal with it very
1879 well. */
1880 /* case SB_REGNO: */
1881 return 1;
1882 default:
1883 if (IS_PSEUDO (reg, strict))
1884 return 1;
1885 return 0;
1886 }
1887 }
1888 return 0;
1889}
1890
1891/* Implements REG_OK_FOR_BASE_P. */
1892int
1893m32c_reg_ok_for_base_p (rtx x, int strict)
1894{
1895 if (GET_CODE (x) != REG)
1896 return 0;
1897 switch (REGNO (x))
1898 {
1899 case A0_REGNO:
1900 case A1_REGNO:
1901 case SB_REGNO:
1902 case FB_REGNO:
1903 case SP_REGNO:
1904 return 1;
1905 default:
1906 if (IS_PSEUDO (x, strict))
1907 return 1;
1908 return 0;
1909 }
1910}
1911
04aff2c0 1912/* We have three choices for choosing fb->aN offsets. If we choose -128,
85f65093 1913 we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
04aff2c0
DD
1914 like this:
1915 EB 4B FF mova -128[$fb],$a0
1916 D8 0C FF FF mov.w:Q #0,-1[$a0]
1917
85f65093 1918 Alternately, we subtract the frame size, and hopefully use 8-bit aN
04aff2c0
DD
1919 displacements:
1920 7B F4 stc $fb,$a0
1921 77 54 00 01 sub #256,$a0
1922 D8 08 01 mov.w:Q #0,1[$a0]
1923
1924 If we don't offset (i.e. offset by zero), we end up with:
1925 7B F4 stc $fb,$a0
1926 D8 0C 00 FF mov.w:Q #0,-256[$a0]
1927
1928 We have to subtract *something* so that we have a PLUS rtx to mark
1929 that we've done this reload. The -128 offset will never result in
85f65093 1930 an 8-bit aN offset, and the payoff for the second case is five
04aff2c0
DD
1931 loads *if* those loads are within 256 bytes of the other end of the
1932 frame, so the third case seems best. Note that we subtract the
1933 zero, but detect that in the addhi3 pattern. */
1934
1935#define BIG_FB_ADJ 0
1936
38b2d076
DD
1937/* Implements LEGITIMIZE_ADDRESS. The only address we really have to
1938 worry about is frame base offsets, as $fb has a limited
1939 displacement range. We deal with this by attempting to reload $fb
1940 itself into an address register; that seems to result in the best
1941 code. */
1942int
1943m32c_legitimize_address (rtx * x ATTRIBUTE_UNUSED,
1944 rtx oldx ATTRIBUTE_UNUSED,
1945 enum machine_mode mode ATTRIBUTE_UNUSED)
1946{
1947#if DEBUG0
1948 fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
1949 debug_rtx (*x);
1950 fprintf (stderr, "\n");
1951#endif
1952
1953 if (GET_CODE (*x) == PLUS
1954 && GET_CODE (XEXP (*x, 0)) == REG
1955 && REGNO (XEXP (*x, 0)) == FB_REGNO
1956 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1957 && (INTVAL (XEXP (*x, 1)) < -128
1958 || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
1959 {
1960 /* reload FB to A_REGS */
38b2d076
DD
1961 rtx temp = gen_reg_rtx (Pmode);
1962 *x = copy_rtx (*x);
04aff2c0 1963 emit_insn (gen_rtx_SET (VOIDmode, temp, XEXP (*x, 0)));
38b2d076
DD
1964 XEXP (*x, 0) = temp;
1965 return 1;
1966 }
1967
1968 return 0;
1969}
1970
1971/* Implements LEGITIMIZE_RELOAD_ADDRESS. See comment above. */
1972int
1973m32c_legitimize_reload_address (rtx * x,
1974 enum machine_mode mode,
1975 int opnum,
1976 int type, int ind_levels ATTRIBUTE_UNUSED)
1977{
1978#if DEBUG0
1979 fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
1980 mode_name[mode]);
1981 debug_rtx (*x);
1982#endif
1983
1984 /* At one point, this function tried to get $fb copied to an address
1985 register, which in theory would maximize sharing, but gcc was
1986 *also* still trying to reload the whole address, and we'd run out
1987 of address registers. So we let gcc do the naive (but safe)
1988 reload instead, when the above function doesn't handle it for
04aff2c0
DD
1989 us.
1990
1991 The code below is a second attempt at the above. */
1992
1993 if (GET_CODE (*x) == PLUS
1994 && GET_CODE (XEXP (*x, 0)) == REG
1995 && REGNO (XEXP (*x, 0)) == FB_REGNO
1996 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1997 && (INTVAL (XEXP (*x, 1)) < -128
1998 || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
1999 {
2000 rtx sum;
2001 int offset = INTVAL (XEXP (*x, 1));
2002 int adjustment = -BIG_FB_ADJ;
2003
2004 sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
2005 GEN_INT (adjustment));
2006 *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
2007 if (type == RELOAD_OTHER)
2008 type = RELOAD_FOR_OTHER_ADDRESS;
2009 push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
2010 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
2011 type);
2012 return 1;
2013 }
2014
2015 if (GET_CODE (*x) == PLUS
2016 && GET_CODE (XEXP (*x, 0)) == PLUS
2017 && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
2018 && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
2019 && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
2020 && GET_CODE (XEXP (*x, 1)) == CONST_INT
2021 )
2022 {
2023 if (type == RELOAD_OTHER)
2024 type = RELOAD_FOR_OTHER_ADDRESS;
2025 push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
2026 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
2027 type);
2028 return 1;
2029 }
38b2d076
DD
2030
2031 return 0;
2032}
2033
38b2d076
DD
2034/* Implements LEGITIMATE_CONSTANT_P. We split large constants anyway,
2035 so we can allow anything. */
2036int
2037m32c_legitimate_constant_p (rtx x ATTRIBUTE_UNUSED)
2038{
2039 return 1;
2040}
2041
2042
2043/* Condition Code Status */
2044
2045#undef TARGET_FIXED_CONDITION_CODE_REGS
2046#define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2047static bool
2048m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2049{
2050 *p1 = FLG_REGNO;
2051 *p2 = INVALID_REGNUM;
2052 return true;
2053}
2054
2055/* Describing Relative Costs of Operations */
2056
2057/* Implements REGISTER_MOVE_COST. We make impossible moves
2058 prohibitively expensive, like trying to put QIs in r2/r3 (there are
2059 no opcodes to do that). We also discourage use of mem* registers
2060 since they're really memory. */
2061int
2062m32c_register_move_cost (enum machine_mode mode, int from, int to)
2063{
2064 int cost = COSTS_N_INSNS (3);
2065 int cc = class_contents[from][0] | class_contents[to][0];
2066 /* FIXME: pick real values, but not 2 for now. */
2067 if (mode == QImode && (cc & class_contents[R23_REGS][0]))
2068 {
2069 if (!(cc & ~class_contents[R23_REGS][0]))
2070 cost = COSTS_N_INSNS (1000);
2071 else
2072 cost = COSTS_N_INSNS (80);
2073 }
2074
2075 if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2076 cost = COSTS_N_INSNS (1000);
2077
2078 if (classes_intersect (from, CR_REGS))
2079 cost += COSTS_N_INSNS (5);
2080
2081 if (classes_intersect (to, CR_REGS))
2082 cost += COSTS_N_INSNS (5);
2083
2084 if (from == MEM_REGS || to == MEM_REGS)
2085 cost += COSTS_N_INSNS (50);
2086 else if (classes_intersect (from, MEM_REGS)
2087 || classes_intersect (to, MEM_REGS))
2088 cost += COSTS_N_INSNS (10);
2089
2090#if DEBUG0
2091 fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
2092 mode_name[mode], class_names[from], class_names[to], cost);
2093#endif
2094 return cost;
2095}
2096
2097/* Implements MEMORY_MOVE_COST. */
2098int
2099m32c_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2100 int reg_class ATTRIBUTE_UNUSED,
2101 int in ATTRIBUTE_UNUSED)
2102{
2103 /* FIXME: pick real values. */
2104 return COSTS_N_INSNS (10);
2105}
2106
07127a0a
DD
2107/* Here we try to describe when we use multiple opcodes for one RTX so
2108 that gcc knows when to use them. */
2109#undef TARGET_RTX_COSTS
2110#define TARGET_RTX_COSTS m32c_rtx_costs
2111static bool
2112m32c_rtx_costs (rtx x, int code, int outer_code, int *total)
2113{
2114 switch (code)
2115 {
2116 case REG:
2117 if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2118 *total += COSTS_N_INSNS (500);
2119 else
2120 *total += COSTS_N_INSNS (1);
2121 return true;
2122
2123 case ASHIFT:
2124 case LSHIFTRT:
2125 case ASHIFTRT:
2126 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2127 {
2128 /* mov.b r1l, r1h */
2129 *total += COSTS_N_INSNS (1);
2130 return true;
2131 }
2132 if (INTVAL (XEXP (x, 1)) > 8
2133 || INTVAL (XEXP (x, 1)) < -8)
2134 {
2135 /* mov.b #N, r1l */
2136 /* mov.b r1l, r1h */
2137 *total += COSTS_N_INSNS (2);
2138 return true;
2139 }
2140 return true;
2141
2142 case LE:
2143 case LEU:
2144 case LT:
2145 case LTU:
2146 case GT:
2147 case GTU:
2148 case GE:
2149 case GEU:
2150 case NE:
2151 case EQ:
2152 if (outer_code == SET)
2153 {
2154 *total += COSTS_N_INSNS (2);
2155 return true;
2156 }
2157 break;
2158
2159 case ZERO_EXTRACT:
2160 {
2161 rtx dest = XEXP (x, 0);
2162 rtx addr = XEXP (dest, 0);
2163 switch (GET_CODE (addr))
2164 {
2165 case CONST_INT:
2166 *total += COSTS_N_INSNS (1);
2167 break;
2168 case SYMBOL_REF:
2169 *total += COSTS_N_INSNS (3);
2170 break;
2171 default:
2172 *total += COSTS_N_INSNS (2);
2173 break;
2174 }
2175 return true;
2176 }
2177 break;
2178
2179 default:
2180 /* Reasonable default. */
2181 if (TARGET_A16 && GET_MODE(x) == SImode)
2182 *total += COSTS_N_INSNS (2);
2183 break;
2184 }
2185 return false;
2186}
2187
2188#undef TARGET_ADDRESS_COST
2189#define TARGET_ADDRESS_COST m32c_address_cost
2190static int
2191m32c_address_cost (rtx addr)
2192{
2193 /* fprintf(stderr, "\naddress_cost\n");
2194 debug_rtx(addr);*/
2195 switch (GET_CODE (addr))
2196 {
2197 case CONST_INT:
2198 return COSTS_N_INSNS(1);
2199 case SYMBOL_REF:
2200 return COSTS_N_INSNS(3);
2201 case REG:
2202 return COSTS_N_INSNS(2);
2203 default:
2204 return 0;
2205 }
2206}
2207
38b2d076
DD
2208/* Defining the Output Assembler Language */
2209
2210/* The Overall Framework of an Assembler File */
2211
2212#undef TARGET_HAVE_NAMED_SECTIONS
2213#define TARGET_HAVE_NAMED_SECTIONS true
2214
2215/* Output of Data */
2216
2217/* We may have 24 bit sizes, which is the native address size.
2218 Currently unused, but provided for completeness. */
2219#undef TARGET_ASM_INTEGER
2220#define TARGET_ASM_INTEGER m32c_asm_integer
2221static bool
2222m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2223{
2224 switch (size)
2225 {
2226 case 3:
2227 fprintf (asm_out_file, "\t.3byte\t");
2228 output_addr_const (asm_out_file, x);
2229 fputc ('\n', asm_out_file);
2230 return true;
e9555b13
DD
2231 case 4:
2232 if (GET_CODE (x) == SYMBOL_REF)
2233 {
2234 fprintf (asm_out_file, "\t.long\t");
2235 output_addr_const (asm_out_file, x);
2236 fputc ('\n', asm_out_file);
2237 return true;
2238 }
2239 break;
38b2d076
DD
2240 }
2241 return default_assemble_integer (x, size, aligned_p);
2242}
2243
2244/* Output of Assembler Instructions */
2245
a4174ebf 2246/* We use a lookup table because the addressing modes are non-orthogonal. */
38b2d076
DD
2247
2248static struct
2249{
2250 char code;
2251 char const *pattern;
2252 char const *format;
2253}
2254const conversions[] = {
2255 { 0, "r", "0" },
2256
2257 { 0, "mr", "z[1]" },
2258 { 0, "m+ri", "3[2]" },
2259 { 0, "m+rs", "3[2]" },
2260 { 0, "m+r+si", "4+5[2]" },
2261 { 0, "ms", "1" },
2262 { 0, "mi", "1" },
2263 { 0, "m+si", "2+3" },
2264
2265 { 0, "mmr", "[z[2]]" },
2266 { 0, "mm+ri", "[4[3]]" },
2267 { 0, "mm+rs", "[4[3]]" },
2268 { 0, "mm+r+si", "[5+6[3]]" },
2269 { 0, "mms", "[[2]]" },
2270 { 0, "mmi", "[[2]]" },
2271 { 0, "mm+si", "[4[3]]" },
2272
2273 { 0, "i", "#0" },
2274 { 0, "s", "#0" },
2275 { 0, "+si", "#1+2" },
2276 { 0, "l", "#0" },
2277
2278 { 'l', "l", "0" },
2279 { 'd', "i", "0" },
2280 { 'd', "s", "0" },
2281 { 'd', "+si", "1+2" },
2282 { 'D', "i", "0" },
2283 { 'D', "s", "0" },
2284 { 'D', "+si", "1+2" },
2285 { 'x', "i", "#0" },
2286 { 'X', "i", "#0" },
2287 { 'm', "i", "#0" },
2288 { 'b', "i", "#0" },
07127a0a 2289 { 'B', "i", "0" },
38b2d076
DD
2290 { 'p', "i", "0" },
2291
2292 { 0, 0, 0 }
2293};
2294
2295/* This is in order according to the bitfield that pushm/popm use. */
2296static char const *pushm_regs[] = {
2297 "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2298};
2299
2300/* Implements PRINT_OPERAND. */
2301void
2302m32c_print_operand (FILE * file, rtx x, int code)
2303{
2304 int i, j, b;
2305 const char *comma;
2306 HOST_WIDE_INT ival;
2307 int unsigned_const = 0;
ff485e71 2308 int force_sign;
38b2d076
DD
2309
2310 /* Multiplies; constants are converted to sign-extended format but
2311 we need unsigned, so 'u' and 'U' tell us what size unsigned we
2312 need. */
2313 if (code == 'u')
2314 {
2315 unsigned_const = 2;
2316 code = 0;
2317 }
2318 if (code == 'U')
2319 {
2320 unsigned_const = 1;
2321 code = 0;
2322 }
2323 /* This one is only for debugging; you can put it in a pattern to
2324 force this error. */
2325 if (code == '!')
2326 {
2327 fprintf (stderr, "dj: unreviewed pattern:");
2328 if (current_output_insn)
2329 debug_rtx (current_output_insn);
2330 gcc_unreachable ();
2331 }
2332 /* PSImode operations are either .w or .l depending on the target. */
2333 if (code == '&')
2334 {
2335 if (TARGET_A16)
2336 fprintf (file, "w");
2337 else
2338 fprintf (file, "l");
2339 return;
2340 }
2341 /* Inverted conditionals. */
2342 if (code == 'C')
2343 {
2344 switch (GET_CODE (x))
2345 {
2346 case LE:
2347 fputs ("gt", file);
2348 break;
2349 case LEU:
2350 fputs ("gtu", file);
2351 break;
2352 case LT:
2353 fputs ("ge", file);
2354 break;
2355 case LTU:
2356 fputs ("geu", file);
2357 break;
2358 case GT:
2359 fputs ("le", file);
2360 break;
2361 case GTU:
2362 fputs ("leu", file);
2363 break;
2364 case GE:
2365 fputs ("lt", file);
2366 break;
2367 case GEU:
2368 fputs ("ltu", file);
2369 break;
2370 case NE:
2371 fputs ("eq", file);
2372 break;
2373 case EQ:
2374 fputs ("ne", file);
2375 break;
2376 default:
2377 gcc_unreachable ();
2378 }
2379 return;
2380 }
2381 /* Regular conditionals. */
2382 if (code == 'c')
2383 {
2384 switch (GET_CODE (x))
2385 {
2386 case LE:
2387 fputs ("le", file);
2388 break;
2389 case LEU:
2390 fputs ("leu", file);
2391 break;
2392 case LT:
2393 fputs ("lt", file);
2394 break;
2395 case LTU:
2396 fputs ("ltu", file);
2397 break;
2398 case GT:
2399 fputs ("gt", file);
2400 break;
2401 case GTU:
2402 fputs ("gtu", file);
2403 break;
2404 case GE:
2405 fputs ("ge", file);
2406 break;
2407 case GEU:
2408 fputs ("geu", file);
2409 break;
2410 case NE:
2411 fputs ("ne", file);
2412 break;
2413 case EQ:
2414 fputs ("eq", file);
2415 break;
2416 default:
2417 gcc_unreachable ();
2418 }
2419 return;
2420 }
2421 /* Used in negsi2 to do HImode ops on the two parts of an SImode
2422 operand. */
2423 if (code == 'h' && GET_MODE (x) == SImode)
2424 {
2425 x = m32c_subreg (HImode, x, SImode, 0);
2426 code = 0;
2427 }
2428 if (code == 'H' && GET_MODE (x) == SImode)
2429 {
2430 x = m32c_subreg (HImode, x, SImode, 2);
2431 code = 0;
2432 }
07127a0a
DD
2433 if (code == 'h' && GET_MODE (x) == HImode)
2434 {
2435 x = m32c_subreg (QImode, x, HImode, 0);
2436 code = 0;
2437 }
2438 if (code == 'H' && GET_MODE (x) == HImode)
2439 {
2440 /* We can't actually represent this as an rtx. Do it here. */
2441 if (GET_CODE (x) == REG)
2442 {
2443 switch (REGNO (x))
2444 {
2445 case R0_REGNO:
2446 fputs ("r0h", file);
2447 return;
2448 case R1_REGNO:
2449 fputs ("r1h", file);
2450 return;
2451 default:
2452 gcc_unreachable();
2453 }
2454 }
2455 /* This should be a MEM. */
2456 x = m32c_subreg (QImode, x, HImode, 1);
2457 code = 0;
2458 }
2459 /* This is for BMcond, which always wants word register names. */
2460 if (code == 'h' && GET_MODE (x) == QImode)
2461 {
2462 if (GET_CODE (x) == REG)
2463 x = gen_rtx_REG (HImode, REGNO (x));
2464 code = 0;
2465 }
38b2d076
DD
2466 /* 'x' and 'X' need to be ignored for non-immediates. */
2467 if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2468 code = 0;
2469
2470 encode_pattern (x);
ff485e71 2471 force_sign = 0;
38b2d076
DD
2472 for (i = 0; conversions[i].pattern; i++)
2473 if (conversions[i].code == code
2474 && streq (conversions[i].pattern, pattern))
2475 {
2476 for (j = 0; conversions[i].format[j]; j++)
2477 /* backslash quotes the next character in the output pattern. */
2478 if (conversions[i].format[j] == '\\')
2479 {
2480 fputc (conversions[i].format[j + 1], file);
2481 j++;
2482 }
2483 /* Digits in the output pattern indicate that the
2484 corresponding RTX is to be output at that point. */
2485 else if (ISDIGIT (conversions[i].format[j]))
2486 {
2487 rtx r = patternr[conversions[i].format[j] - '0'];
2488 switch (GET_CODE (r))
2489 {
2490 case REG:
2491 fprintf (file, "%s",
2492 reg_name_with_mode (REGNO (r), GET_MODE (r)));
2493 break;
2494 case CONST_INT:
2495 switch (code)
2496 {
2497 case 'b':
07127a0a
DD
2498 case 'B':
2499 {
2500 int v = INTVAL (r);
2501 int i = (int) exact_log2 (v);
2502 if (i == -1)
2503 i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2504 if (i == -1)
2505 i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2506 /* Bit position. */
2507 fprintf (file, "%d", i);
2508 }
38b2d076
DD
2509 break;
2510 case 'x':
2511 /* Unsigned byte. */
2512 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2513 INTVAL (r) & 0xff);
2514 break;
2515 case 'X':
2516 /* Unsigned word. */
2517 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2518 INTVAL (r) & 0xffff);
2519 break;
2520 case 'p':
2521 /* pushm and popm encode a register set into a single byte. */
2522 comma = "";
2523 for (b = 7; b >= 0; b--)
2524 if (INTVAL (r) & (1 << b))
2525 {
2526 fprintf (file, "%s%s", comma, pushm_regs[b]);
2527 comma = ",";
2528 }
2529 break;
2530 case 'm':
2531 /* "Minus". Output -X */
2532 ival = (-INTVAL (r) & 0xffff);
2533 if (ival & 0x8000)
2534 ival = ival - 0x10000;
2535 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2536 break;
2537 default:
2538 ival = INTVAL (r);
2539 if (conversions[i].format[j + 1] == '[' && ival < 0)
2540 {
2541 /* We can simulate negative displacements by
2542 taking advantage of address space
2543 wrapping when the offset can span the
2544 entire address range. */
2545 rtx base =
2546 patternr[conversions[i].format[j + 2] - '0'];
2547 if (GET_CODE (base) == REG)
2548 switch (REGNO (base))
2549 {
2550 case A0_REGNO:
2551 case A1_REGNO:
2552 if (TARGET_A24)
2553 ival = 0x1000000 + ival;
2554 else
2555 ival = 0x10000 + ival;
2556 break;
2557 case SB_REGNO:
2558 if (TARGET_A16)
2559 ival = 0x10000 + ival;
2560 break;
2561 }
2562 }
2563 else if (code == 'd' && ival < 0 && j == 0)
2564 /* The "mova" opcode is used to do addition by
2565 computing displacements, but again, we need
2566 displacements to be unsigned *if* they're
2567 the only component of the displacement
2568 (i.e. no "symbol-4" type displacement). */
2569 ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2570
2571 if (conversions[i].format[j] == '0')
2572 {
2573 /* More conversions to unsigned. */
2574 if (unsigned_const == 2)
2575 ival &= 0xffff;
2576 if (unsigned_const == 1)
2577 ival &= 0xff;
2578 }
2579 if (streq (conversions[i].pattern, "mi")
2580 || streq (conversions[i].pattern, "mmi"))
2581 {
2582 /* Integers used as addresses are unsigned. */
2583 ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2584 }
ff485e71
DD
2585 if (force_sign && ival >= 0)
2586 fputc ('+', file);
38b2d076
DD
2587 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2588 break;
2589 }
2590 break;
2591 case CONST_DOUBLE:
2592 /* We don't have const_double constants. If it
2593 happens, make it obvious. */
2594 fprintf (file, "[const_double 0x%lx]",
2595 (unsigned long) CONST_DOUBLE_HIGH (r));
2596 break;
2597 case SYMBOL_REF:
2598 assemble_name (file, XSTR (r, 0));
2599 break;
2600 case LABEL_REF:
2601 output_asm_label (r);
2602 break;
2603 default:
2604 fprintf (stderr, "don't know how to print this operand:");
2605 debug_rtx (r);
2606 gcc_unreachable ();
2607 }
2608 }
2609 else
2610 {
2611 if (conversions[i].format[j] == 'z')
2612 {
2613 /* Some addressing modes *must* have a displacement,
2614 so insert a zero here if needed. */
2615 int k;
2616 for (k = j + 1; conversions[i].format[k]; k++)
2617 if (ISDIGIT (conversions[i].format[k]))
2618 {
2619 rtx reg = patternr[conversions[i].format[k] - '0'];
2620 if (GET_CODE (reg) == REG
2621 && (REGNO (reg) == SB_REGNO
2622 || REGNO (reg) == FB_REGNO
2623 || REGNO (reg) == SP_REGNO))
2624 fputc ('0', file);
2625 }
2626 continue;
2627 }
2628 /* Signed displacements off symbols need to have signs
2629 blended cleanly. */
2630 if (conversions[i].format[j] == '+'
ff485e71 2631 && (!code || code == 'D' || code == 'd')
38b2d076 2632 && ISDIGIT (conversions[i].format[j + 1])
ff485e71
DD
2633 && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2634 == CONST_INT))
2635 {
2636 force_sign = 1;
2637 continue;
2638 }
38b2d076
DD
2639 fputc (conversions[i].format[j], file);
2640 }
2641 break;
2642 }
2643 if (!conversions[i].pattern)
2644 {
2645 fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
2646 pattern);
2647 debug_rtx (x);
2648 fprintf (file, "[%c.%s]", code ? code : '-', pattern);
2649 }
2650
2651 return;
2652}
2653
2654/* Implements PRINT_OPERAND_PUNCT_VALID_P. See m32c_print_operand
2655 above for descriptions of what these do. */
2656int
2657m32c_print_operand_punct_valid_p (int c)
2658{
2659 if (c == '&' || c == '!')
2660 return 1;
2661 return 0;
2662}
2663
2664/* Implements PRINT_OPERAND_ADDRESS. Nothing unusual here. */
2665void
2666m32c_print_operand_address (FILE * stream, rtx address)
2667{
2668 gcc_assert (GET_CODE (address) == MEM);
2669 m32c_print_operand (stream, XEXP (address, 0), 0);
2670}
2671
2672/* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
2673 differently than general registers. */
2674void
2675m32c_output_reg_push (FILE * s, int regno)
2676{
2677 if (regno == FLG_REGNO)
2678 fprintf (s, "\tpushc\tflg\n");
2679 else
04aff2c0 2680 fprintf (s, "\tpush.%c\t%s\n",
38b2d076
DD
2681 " bwll"[reg_push_size (regno)], reg_names[regno]);
2682}
2683
2684/* Likewise for ASM_OUTPUT_REG_POP. */
2685void
2686m32c_output_reg_pop (FILE * s, int regno)
2687{
2688 if (regno == FLG_REGNO)
2689 fprintf (s, "\tpopc\tflg\n");
2690 else
04aff2c0 2691 fprintf (s, "\tpop.%c\t%s\n",
38b2d076
DD
2692 " bwll"[reg_push_size (regno)], reg_names[regno]);
2693}
2694
2695/* Defining target-specific uses of `__attribute__' */
2696
2697/* Used to simplify the logic below. Find the attributes wherever
2698 they may be. */
2699#define M32C_ATTRIBUTES(decl) \
2700 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2701 : DECL_ATTRIBUTES (decl) \
2702 ? (DECL_ATTRIBUTES (decl)) \
2703 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2704
2705/* Returns TRUE if the given tree has the "interrupt" attribute. */
2706static int
2707interrupt_p (tree node ATTRIBUTE_UNUSED)
2708{
2709 tree list = M32C_ATTRIBUTES (node);
2710 while (list)
2711 {
2712 if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
2713 return 1;
2714 list = TREE_CHAIN (list);
2715 }
2716 return 0;
2717}
2718
2719static tree
2720interrupt_handler (tree * node ATTRIBUTE_UNUSED,
2721 tree name ATTRIBUTE_UNUSED,
2722 tree args ATTRIBUTE_UNUSED,
2723 int flags ATTRIBUTE_UNUSED,
2724 bool * no_add_attrs ATTRIBUTE_UNUSED)
2725{
2726 return NULL_TREE;
2727}
2728
5abd2125
JS
2729/* Returns TRUE if given tree has the "function_vector" attribute. */
2730int
2731m32c_special_page_vector_p (tree func)
2732{
2733 if (TREE_CODE (func) != FUNCTION_DECL)
2734 return 0;
2735
2736 tree list = M32C_ATTRIBUTES (func);
2737 while (list)
2738 {
2739 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2740 return 1;
2741 list = TREE_CHAIN (list);
2742 }
2743 return 0;
2744}
2745
2746static tree
2747function_vector_handler (tree * node ATTRIBUTE_UNUSED,
2748 tree name ATTRIBUTE_UNUSED,
2749 tree args ATTRIBUTE_UNUSED,
2750 int flags ATTRIBUTE_UNUSED,
2751 bool * no_add_attrs ATTRIBUTE_UNUSED)
2752{
2753 if (TARGET_R8C)
2754 {
2755 /* The attribute is not supported for R8C target. */
2756 warning (OPT_Wattributes,
2757 "`%s' attribute is not supported for R8C target",
2758 IDENTIFIER_POINTER (name));
2759 *no_add_attrs = true;
2760 }
2761 else if (TREE_CODE (*node) != FUNCTION_DECL)
2762 {
2763 /* The attribute must be applied to functions only. */
2764 warning (OPT_Wattributes,
2765 "`%s' attribute applies only to functions",
2766 IDENTIFIER_POINTER (name));
2767 *no_add_attrs = true;
2768 }
2769 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
2770 {
2771 /* The argument must be a constant integer. */
2772 warning (OPT_Wattributes,
2773 "`%s' attribute argument not an integer constant",
2774 IDENTIFIER_POINTER (name));
2775 *no_add_attrs = true;
2776 }
2777 else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
2778 || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
2779 {
2780 /* The argument value must be between 18 to 255. */
2781 warning (OPT_Wattributes,
2782 "`%s' attribute argument should be between 18 to 255",
2783 IDENTIFIER_POINTER (name));
2784 *no_add_attrs = true;
2785 }
2786 return NULL_TREE;
2787}
2788
2789/* If the function is assigned the attribute 'function_vector', it
2790 returns the function vector number, otherwise returns zero. */
2791int
2792current_function_special_page_vector (rtx x)
2793{
2794 int num;
2795
2796 if ((GET_CODE(x) == SYMBOL_REF)
2797 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
2798 {
2799 tree t = SYMBOL_REF_DECL (x);
2800
2801 if (TREE_CODE (t) != FUNCTION_DECL)
2802 return 0;
2803
2804 tree list = M32C_ATTRIBUTES (t);
2805 while (list)
2806 {
2807 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2808 {
2809 num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
2810 return num;
2811 }
2812
2813 list = TREE_CHAIN (list);
2814 }
2815
2816 return 0;
2817 }
2818 else
2819 return 0;
2820}
2821
38b2d076
DD
2822#undef TARGET_ATTRIBUTE_TABLE
2823#define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
2824static const struct attribute_spec m32c_attribute_table[] = {
2825 {"interrupt", 0, 0, false, false, false, interrupt_handler},
5abd2125 2826 {"function_vector", 1, 1, true, false, false, function_vector_handler},
38b2d076
DD
2827 {0, 0, 0, 0, 0, 0, 0}
2828};
2829
2830#undef TARGET_COMP_TYPE_ATTRIBUTES
2831#define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
2832static int
2833m32c_comp_type_attributes (tree type1 ATTRIBUTE_UNUSED,
2834 tree type2 ATTRIBUTE_UNUSED)
2835{
2836 /* 0=incompatible 1=compatible 2=warning */
2837 return 1;
2838}
2839
2840#undef TARGET_INSERT_ATTRIBUTES
2841#define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
2842static void
2843m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
2844 tree * attr_ptr ATTRIBUTE_UNUSED)
2845{
2846 /* Nothing to do here. */
2847}
2848
2849/* Predicates */
2850
f9b89438 2851/* This is a list of legal subregs of hard regs. */
67fc44cb
DD
2852static const struct {
2853 unsigned char outer_mode_size;
2854 unsigned char inner_mode_size;
2855 unsigned char byte_mask;
2856 unsigned char legal_when;
f9b89438 2857 unsigned int regno;
f9b89438 2858} legal_subregs[] = {
67fc44cb
DD
2859 {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
2860 {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
2861 {1, 2, 0x01, 1, A0_REGNO},
2862 {1, 2, 0x01, 1, A1_REGNO},
f9b89438 2863
67fc44cb
DD
2864 {1, 4, 0x01, 1, A0_REGNO},
2865 {1, 4, 0x01, 1, A1_REGNO},
f9b89438 2866
67fc44cb
DD
2867 {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
2868 {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
2869 {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
2870 {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
2871 {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
f9b89438 2872
67fc44cb 2873 {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
f9b89438
DD
2874};
2875
2876/* Returns TRUE if OP is a subreg of a hard reg which we don't
2877 support. */
2878bool
2879m32c_illegal_subreg_p (rtx op)
2880{
f9b89438
DD
2881 int offset;
2882 unsigned int i;
2883 int src_mode, dest_mode;
2884
2885 if (GET_CODE (op) != SUBREG)
2886 return false;
2887
2888 dest_mode = GET_MODE (op);
2889 offset = SUBREG_BYTE (op);
2890 op = SUBREG_REG (op);
2891 src_mode = GET_MODE (op);
2892
2893 if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
2894 return false;
2895 if (GET_CODE (op) != REG)
2896 return false;
2897 if (REGNO (op) >= MEM0_REGNO)
2898 return false;
2899
2900 offset = (1 << offset);
2901
67fc44cb 2902 for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
f9b89438
DD
2903 if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
2904 && legal_subregs[i].regno == REGNO (op)
2905 && legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
2906 && legal_subregs[i].byte_mask & offset)
2907 {
2908 switch (legal_subregs[i].legal_when)
2909 {
2910 case 1:
2911 return false;
2912 case 16:
2913 if (TARGET_A16)
2914 return false;
2915 break;
2916 case 24:
2917 if (TARGET_A24)
2918 return false;
2919 break;
2920 }
2921 }
2922 return true;
2923}
2924
38b2d076
DD
2925/* Returns TRUE if we support a move between the first two operands.
2926 At the moment, we just want to discourage mem to mem moves until
2927 after reload, because reload has a hard time with our limited
2928 number of address registers, and we can get into a situation where
2929 we need three of them when we only have two. */
2930bool
2931m32c_mov_ok (rtx * operands, enum machine_mode mode ATTRIBUTE_UNUSED)
2932{
2933 rtx op0 = operands[0];
2934 rtx op1 = operands[1];
2935
2936 if (TARGET_A24)
2937 return true;
2938
2939#define DEBUG_MOV_OK 0
2940#if DEBUG_MOV_OK
2941 fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
2942 debug_rtx (op0);
2943 debug_rtx (op1);
2944#endif
2945
2946 if (GET_CODE (op0) == SUBREG)
2947 op0 = XEXP (op0, 0);
2948 if (GET_CODE (op1) == SUBREG)
2949 op1 = XEXP (op1, 0);
2950
2951 if (GET_CODE (op0) == MEM
2952 && GET_CODE (op1) == MEM
2953 && ! reload_completed)
2954 {
2955#if DEBUG_MOV_OK
2956 fprintf (stderr, " - no, mem to mem\n");
2957#endif
2958 return false;
2959 }
2960
2961#if DEBUG_MOV_OK
2962 fprintf (stderr, " - ok\n");
2963#endif
2964 return true;
2965}
2966
ff485e71
DD
2967/* Returns TRUE if two consecutive HImode mov instructions, generated
2968 for moving an immediate double data to a double data type variable
2969 location, can be combined into single SImode mov instruction. */
2970bool
2971m32c_immd_dbl_mov (rtx * operands,
2972 enum machine_mode mode ATTRIBUTE_UNUSED)
2973{
2974 int flag = 0, okflag = 0, offset1 = 0, offset2 = 0, offsetsign = 0;
2975 const char *str1;
2976 const char *str2;
2977
2978 if (GET_CODE (XEXP (operands[0], 0)) == SYMBOL_REF
2979 && MEM_SCALAR_P (operands[0])
2980 && !MEM_IN_STRUCT_P (operands[0])
2981 && GET_CODE (XEXP (operands[2], 0)) == CONST
2982 && GET_CODE (XEXP (XEXP (operands[2], 0), 0)) == PLUS
2983 && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 0)) == SYMBOL_REF
2984 && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 1)) == CONST_INT
2985 && MEM_SCALAR_P (operands[2])
2986 && !MEM_IN_STRUCT_P (operands[2]))
2987 flag = 1;
2988
2989 else if (GET_CODE (XEXP (operands[0], 0)) == CONST
2990 && GET_CODE (XEXP (XEXP (operands[0], 0), 0)) == PLUS
2991 && GET_CODE (XEXP (XEXP (XEXP (operands[0], 0), 0), 0)) == SYMBOL_REF
2992 && MEM_SCALAR_P (operands[0])
2993 && !MEM_IN_STRUCT_P (operands[0])
2994 && !(XINT (XEXP (XEXP (XEXP (operands[0], 0), 0), 1), 0) %4)
2995 && GET_CODE (XEXP (operands[2], 0)) == CONST
2996 && GET_CODE (XEXP (XEXP (operands[2], 0), 0)) == PLUS
2997 && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 0)) == SYMBOL_REF
2998 && MEM_SCALAR_P (operands[2])
2999 && !MEM_IN_STRUCT_P (operands[2]))
3000 flag = 2;
3001
3002 else if (GET_CODE (XEXP (operands[0], 0)) == PLUS
3003 && GET_CODE (XEXP (XEXP (operands[0], 0), 0)) == REG
3004 && REGNO (XEXP (XEXP (operands[0], 0), 0)) == FB_REGNO
3005 && GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == CONST_INT
3006 && MEM_SCALAR_P (operands[0])
3007 && !MEM_IN_STRUCT_P (operands[0])
3008 && !(XINT (XEXP (XEXP (operands[0], 0), 1), 0) %4)
3009 && REGNO (XEXP (XEXP (operands[2], 0), 0)) == FB_REGNO
3010 && GET_CODE (XEXP (XEXP (operands[2], 0), 1)) == CONST_INT
3011 && MEM_SCALAR_P (operands[2])
3012 && !MEM_IN_STRUCT_P (operands[2]))
3013 flag = 3;
3014
3015 else
3016 return false;
3017
3018 switch (flag)
3019 {
3020 case 1:
3021 str1 = XSTR (XEXP (operands[0], 0), 0);
3022 str2 = XSTR (XEXP (XEXP (XEXP (operands[2], 0), 0), 0), 0);
3023 if (strcmp (str1, str2) == 0)
3024 okflag = 1;
3025 else
3026 okflag = 0;
3027 break;
3028 case 2:
3029 str1 = XSTR (XEXP (XEXP (XEXP (operands[0], 0), 0), 0), 0);
3030 str2 = XSTR (XEXP (XEXP (XEXP (operands[2], 0), 0), 0), 0);
3031 if (strcmp(str1,str2) == 0)
3032 okflag = 1;
3033 else
3034 okflag = 0;
3035 break;
3036 case 3:
3037 offset1 = XINT (XEXP (XEXP (operands[0], 0), 1), 0);
3038 offset2 = XINT (XEXP (XEXP (operands[2], 0), 1), 0);
3039 offsetsign = offset1 >> ((sizeof (offset1) * 8) -1);
3040 if (((offset2-offset1) == 2) && offsetsign != 0)
3041 okflag = 1;
3042 else
3043 okflag = 0;
3044 break;
3045 default:
3046 okflag = 0;
3047 }
3048
3049 if (okflag == 1)
3050 {
3051 HOST_WIDE_INT val;
3052 operands[4] = gen_rtx_MEM (SImode, XEXP (operands[0], 0));
3053
3054 val = (XINT (operands[3], 0) << 16) + (XINT (operands[1], 0) & 0xFFFF);
3055 operands[5] = gen_rtx_CONST_INT (VOIDmode, val);
3056
3057 return true;
3058 }
3059
3060 return false;
3061}
3062
38b2d076
DD
3063/* Expanders */
3064
3065/* Subregs are non-orthogonal for us, because our registers are all
3066 different sizes. */
3067static rtx
3068m32c_subreg (enum machine_mode outer,
3069 rtx x, enum machine_mode inner, int byte)
3070{
3071 int r, nr = -1;
3072
3073 /* Converting MEMs to different types that are the same size, we
3074 just rewrite them. */
3075 if (GET_CODE (x) == SUBREG
3076 && SUBREG_BYTE (x) == 0
3077 && GET_CODE (SUBREG_REG (x)) == MEM
3078 && (GET_MODE_SIZE (GET_MODE (x))
3079 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
3080 {
3081 rtx oldx = x;
3082 x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
3083 MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
3084 }
3085
3086 /* Push/pop get done as smaller push/pops. */
3087 if (GET_CODE (x) == MEM
3088 && (GET_CODE (XEXP (x, 0)) == PRE_DEC
3089 || GET_CODE (XEXP (x, 0)) == POST_INC))
3090 return gen_rtx_MEM (outer, XEXP (x, 0));
3091 if (GET_CODE (x) == SUBREG
3092 && GET_CODE (XEXP (x, 0)) == MEM
3093 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
3094 || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
3095 return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
3096
3097 if (GET_CODE (x) != REG)
3098 return simplify_gen_subreg (outer, x, inner, byte);
3099
3100 r = REGNO (x);
3101 if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
3102 return simplify_gen_subreg (outer, x, inner, byte);
3103
3104 if (IS_MEM_REGNO (r))
3105 return simplify_gen_subreg (outer, x, inner, byte);
3106
3107 /* This is where the complexities of our register layout are
3108 described. */
3109 if (byte == 0)
3110 nr = r;
3111 else if (outer == HImode)
3112 {
3113 if (r == R0_REGNO && byte == 2)
3114 nr = R2_REGNO;
3115 else if (r == R0_REGNO && byte == 4)
3116 nr = R1_REGNO;
3117 else if (r == R0_REGNO && byte == 6)
3118 nr = R3_REGNO;
3119 else if (r == R1_REGNO && byte == 2)
3120 nr = R3_REGNO;
3121 else if (r == A0_REGNO && byte == 2)
3122 nr = A1_REGNO;
3123 }
3124 else if (outer == SImode)
3125 {
3126 if (r == R0_REGNO && byte == 0)
3127 nr = R0_REGNO;
3128 else if (r == R0_REGNO && byte == 4)
3129 nr = R1_REGNO;
3130 }
3131 if (nr == -1)
3132 {
3133 fprintf (stderr, "m32c_subreg %s %s %d\n",
3134 mode_name[outer], mode_name[inner], byte);
3135 debug_rtx (x);
3136 gcc_unreachable ();
3137 }
3138 return gen_rtx_REG (outer, nr);
3139}
3140
3141/* Used to emit move instructions. We split some moves,
3142 and avoid mem-mem moves. */
3143int
3144m32c_prepare_move (rtx * operands, enum machine_mode mode)
3145{
3146 if (TARGET_A16 && mode == PSImode)
3147 return m32c_split_move (operands, mode, 1);
3148 if ((GET_CODE (operands[0]) == MEM)
3149 && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
3150 {
3151 rtx pmv = XEXP (operands[0], 0);
3152 rtx dest_reg = XEXP (pmv, 0);
3153 rtx dest_mod = XEXP (pmv, 1);
3154
3155 emit_insn (gen_rtx_SET (Pmode, dest_reg, dest_mod));
3156 operands[0] = gen_rtx_MEM (mode, dest_reg);
3157 }
3158 if (!no_new_pseudos && MEM_P (operands[0]) && MEM_P (operands[1]))
3159 operands[1] = copy_to_mode_reg (mode, operands[1]);
3160 return 0;
3161}
3162
3163#define DEBUG_SPLIT 0
3164
3165/* Returns TRUE if the given PSImode move should be split. We split
3166 for all r8c/m16c moves, since it doesn't support them, and for
3167 POP.L as we can only *push* SImode. */
3168int
3169m32c_split_psi_p (rtx * operands)
3170{
3171#if DEBUG_SPLIT
3172 fprintf (stderr, "\nm32c_split_psi_p\n");
3173 debug_rtx (operands[0]);
3174 debug_rtx (operands[1]);
3175#endif
3176 if (TARGET_A16)
3177 {
3178#if DEBUG_SPLIT
3179 fprintf (stderr, "yes, A16\n");
3180#endif
3181 return 1;
3182 }
3183 if (GET_CODE (operands[1]) == MEM
3184 && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3185 {
3186#if DEBUG_SPLIT
3187 fprintf (stderr, "yes, pop.l\n");
3188#endif
3189 return 1;
3190 }
3191#if DEBUG_SPLIT
3192 fprintf (stderr, "no, default\n");
3193#endif
3194 return 0;
3195}
3196
3197/* Split the given move. SPLIT_ALL is 0 if splitting is optional
3198 (define_expand), 1 if it is not optional (define_insn_and_split),
3199 and 3 for define_split (alternate api). */
3200int
3201m32c_split_move (rtx * operands, enum machine_mode mode, int split_all)
3202{
3203 rtx s[4], d[4];
3204 int parts, si, di, rev = 0;
3205 int rv = 0, opi = 2;
3206 enum machine_mode submode = HImode;
3207 rtx *ops, local_ops[10];
3208
3209 /* define_split modifies the existing operands, but the other two
3210 emit new insns. OPS is where we store the operand pairs, which
3211 we emit later. */
3212 if (split_all == 3)
3213 ops = operands;
3214 else
3215 ops = local_ops;
3216
3217 /* Else HImode. */
3218 if (mode == DImode)
3219 submode = SImode;
3220
3221 /* Before splitting mem-mem moves, force one operand into a
3222 register. */
3223 if (!no_new_pseudos && MEM_P (operands[0]) && MEM_P (operands[1]))
3224 {
3225#if DEBUG0
3226 fprintf (stderr, "force_reg...\n");
3227 debug_rtx (operands[1]);
3228#endif
3229 operands[1] = force_reg (mode, operands[1]);
3230#if DEBUG0
3231 debug_rtx (operands[1]);
3232#endif
3233 }
3234
3235 parts = 2;
3236
3237#if DEBUG_SPLIT
3238 fprintf (stderr, "\nsplit_move %d all=%d\n", no_new_pseudos, split_all);
3239 debug_rtx (operands[0]);
3240 debug_rtx (operands[1]);
3241#endif
3242
eb5f0c07
DD
3243 /* Note that split_all is not used to select the api after this
3244 point, so it's safe to set it to 3 even with define_insn. */
3245 /* None of the chips can move SI operands to sp-relative addresses,
3246 so we always split those. */
3247 if (m32c_extra_constraint_p (operands[0], 'S', "Ss"))
3248 split_all = 3;
3249
38b2d076
DD
3250 /* We don't need to split these. */
3251 if (TARGET_A24
3252 && split_all != 3
3253 && (mode == SImode || mode == PSImode)
3254 && !(GET_CODE (operands[1]) == MEM
3255 && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3256 return 0;
3257
3258 /* First, enumerate the subregs we'll be dealing with. */
3259 for (si = 0; si < parts; si++)
3260 {
3261 d[si] =
3262 m32c_subreg (submode, operands[0], mode,
3263 si * GET_MODE_SIZE (submode));
3264 s[si] =
3265 m32c_subreg (submode, operands[1], mode,
3266 si * GET_MODE_SIZE (submode));
3267 }
3268
3269 /* Split pushes by emitting a sequence of smaller pushes. */
3270 if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3271 {
3272 for (si = parts - 1; si >= 0; si--)
3273 {
3274 ops[opi++] = gen_rtx_MEM (submode,
3275 gen_rtx_PRE_DEC (Pmode,
3276 gen_rtx_REG (Pmode,
3277 SP_REGNO)));
3278 ops[opi++] = s[si];
3279 }
3280
3281 rv = 1;
3282 }
3283 /* Likewise for pops. */
3284 else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3285 {
3286 for (di = 0; di < parts; di++)
3287 {
3288 ops[opi++] = d[di];
3289 ops[opi++] = gen_rtx_MEM (submode,
3290 gen_rtx_POST_INC (Pmode,
3291 gen_rtx_REG (Pmode,
3292 SP_REGNO)));
3293 }
3294 rv = 1;
3295 }
3296 else if (split_all)
3297 {
3298 /* if d[di] == s[si] for any di < si, we'll early clobber. */
3299 for (di = 0; di < parts - 1; di++)
3300 for (si = di + 1; si < parts; si++)
3301 if (reg_mentioned_p (d[di], s[si]))
3302 rev = 1;
3303
3304 if (rev)
3305 for (si = 0; si < parts; si++)
3306 {
3307 ops[opi++] = d[si];
3308 ops[opi++] = s[si];
3309 }
3310 else
3311 for (si = parts - 1; si >= 0; si--)
3312 {
3313 ops[opi++] = d[si];
3314 ops[opi++] = s[si];
3315 }
3316 rv = 1;
3317 }
3318 /* Now emit any moves we may have accumulated. */
3319 if (rv && split_all != 3)
3320 {
3321 int i;
3322 for (i = 2; i < opi; i += 2)
3323 emit_move_insn (ops[i], ops[i + 1]);
3324 }
3325 return rv;
3326}
3327
07127a0a
DD
3328/* The m32c has a number of opcodes that act like memcpy, strcmp, and
3329 the like. For the R8C they expect one of the addresses to be in
3330 R1L:An so we need to arrange for that. Otherwise, it's just a
3331 matter of picking out the operands we want and emitting the right
3332 pattern for them. All these expanders, which correspond to
3333 patterns in blkmov.md, must return nonzero if they expand the insn,
3334 or zero if they should FAIL. */
3335
3336/* This is a memset() opcode. All operands are implied, so we need to
3337 arrange for them to be in the right registers. The opcode wants
3338 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3339 the count (HI), and $2 the value (QI). */
3340int
3341m32c_expand_setmemhi(rtx *operands)
3342{
3343 rtx desta, count, val;
3344 rtx desto, counto;
3345
3346 desta = XEXP (operands[0], 0);
3347 count = operands[1];
3348 val = operands[2];
3349
3350 desto = gen_reg_rtx (Pmode);
3351 counto = gen_reg_rtx (HImode);
3352
3353 if (GET_CODE (desta) != REG
3354 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3355 desta = copy_to_mode_reg (Pmode, desta);
3356
3357 /* This looks like an arbitrary restriction, but this is by far the
3358 most common case. For counts 8..14 this actually results in
3359 smaller code with no speed penalty because the half-sized
3360 constant can be loaded with a shorter opcode. */
3361 if (GET_CODE (count) == CONST_INT
3362 && GET_CODE (val) == CONST_INT
3363 && ! (INTVAL (count) & 1)
3364 && (INTVAL (count) > 1)
3365 && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3366 {
3367 unsigned v = INTVAL (val) & 0xff;
3368 v = v | (v << 8);
3369 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3370 val = copy_to_mode_reg (HImode, GEN_INT (v));
3371 if (TARGET_A16)
3372 emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3373 else
3374 emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3375 return 1;
3376 }
3377
3378 /* This is the generalized memset() case. */
3379 if (GET_CODE (val) != REG
3380 || REGNO (val) < FIRST_PSEUDO_REGISTER)
3381 val = copy_to_mode_reg (QImode, val);
3382
3383 if (GET_CODE (count) != REG
3384 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3385 count = copy_to_mode_reg (HImode, count);
3386
3387 if (TARGET_A16)
3388 emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3389 else
3390 emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3391
3392 return 1;
3393}
3394
3395/* This is a memcpy() opcode. All operands are implied, so we need to
3396 arrange for them to be in the right registers. The opcode wants
3397 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3398 is the source (MEM:BLK), and $2 the count (HI). */
3399int
3400m32c_expand_movmemhi(rtx *operands)
3401{
3402 rtx desta, srca, count;
3403 rtx desto, srco, counto;
3404
3405 desta = XEXP (operands[0], 0);
3406 srca = XEXP (operands[1], 0);
3407 count = operands[2];
3408
3409 desto = gen_reg_rtx (Pmode);
3410 srco = gen_reg_rtx (Pmode);
3411 counto = gen_reg_rtx (HImode);
3412
3413 if (GET_CODE (desta) != REG
3414 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3415 desta = copy_to_mode_reg (Pmode, desta);
3416
3417 if (GET_CODE (srca) != REG
3418 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3419 srca = copy_to_mode_reg (Pmode, srca);
3420
3421 /* Similar to setmem, but we don't need to check the value. */
3422 if (GET_CODE (count) == CONST_INT
3423 && ! (INTVAL (count) & 1)
3424 && (INTVAL (count) > 1))
3425 {
3426 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3427 if (TARGET_A16)
3428 emit_insn (gen_movmemhi_whi_op (desto, srco, counto, desta, srca, count));
3429 else
3430 emit_insn (gen_movmemhi_wpsi_op (desto, srco, counto, desta, srca, count));
3431 return 1;
3432 }
3433
3434 /* This is the generalized memset() case. */
3435 if (GET_CODE (count) != REG
3436 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3437 count = copy_to_mode_reg (HImode, count);
3438
3439 if (TARGET_A16)
3440 emit_insn (gen_movmemhi_bhi_op (desto, srco, counto, desta, srca, count));
3441 else
3442 emit_insn (gen_movmemhi_bpsi_op (desto, srco, counto, desta, srca, count));
3443
3444 return 1;
3445}
3446
3447/* This is a stpcpy() opcode. $0 is the destination (MEM:BLK) after
3448 the copy, which should point to the NUL at the end of the string,
3449 $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3450 Since our opcode leaves the destination pointing *after* the NUL,
3451 we must emit an adjustment. */
3452int
3453m32c_expand_movstr(rtx *operands)
3454{
3455 rtx desta, srca;
3456 rtx desto, srco;
3457
3458 desta = XEXP (operands[1], 0);
3459 srca = XEXP (operands[2], 0);
3460
3461 desto = gen_reg_rtx (Pmode);
3462 srco = gen_reg_rtx (Pmode);
3463
3464 if (GET_CODE (desta) != REG
3465 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3466 desta = copy_to_mode_reg (Pmode, desta);
3467
3468 if (GET_CODE (srca) != REG
3469 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3470 srca = copy_to_mode_reg (Pmode, srca);
3471
3472 emit_insn (gen_movstr_op (desto, srco, desta, srca));
3473 /* desto ends up being a1, which allows this type of add through MOVA. */
3474 emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
3475
3476 return 1;
3477}
3478
3479/* This is a strcmp() opcode. $0 is the destination (HI) which holds
3480 <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3481 $2 is the other (MEM:BLK). We must do the comparison, and then
3482 convert the flags to a signed integer result. */
3483int
3484m32c_expand_cmpstr(rtx *operands)
3485{
3486 rtx src1a, src2a;
3487
3488 src1a = XEXP (operands[1], 0);
3489 src2a = XEXP (operands[2], 0);
3490
3491 if (GET_CODE (src1a) != REG
3492 || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
3493 src1a = copy_to_mode_reg (Pmode, src1a);
3494
3495 if (GET_CODE (src2a) != REG
3496 || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
3497 src2a = copy_to_mode_reg (Pmode, src2a);
3498
3499 emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
3500 emit_insn (gen_cond_to_int (operands[0]));
3501
3502 return 1;
3503}
3504
3505
23fed240
DD
3506typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
3507
3508static shift_gen_func
3509shift_gen_func_for (int mode, int code)
3510{
3511#define GFF(m,c,f) if (mode == m && code == c) return f
3512 GFF(QImode, ASHIFT, gen_ashlqi3_i);
3513 GFF(QImode, ASHIFTRT, gen_ashrqi3_i);
3514 GFF(QImode, LSHIFTRT, gen_lshrqi3_i);
3515 GFF(HImode, ASHIFT, gen_ashlhi3_i);
3516 GFF(HImode, ASHIFTRT, gen_ashrhi3_i);
3517 GFF(HImode, LSHIFTRT, gen_lshrhi3_i);
3518 GFF(PSImode, ASHIFT, gen_ashlpsi3_i);
3519 GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
3520 GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
3521 GFF(SImode, ASHIFT, TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
3522 GFF(SImode, ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
3523 GFF(SImode, LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
3524#undef GFF
07127a0a 3525 gcc_unreachable ();
23fed240
DD
3526}
3527
38b2d076
DD
3528/* The m32c only has one shift, but it takes a signed count. GCC
3529 doesn't want this, so we fake it by negating any shift count when
07127a0a
DD
3530 we're pretending to shift the other way. Also, the shift count is
3531 limited to -8..8. It's slightly better to use two shifts for 9..15
3532 than to load the count into r1h, so we do that too. */
38b2d076 3533int
23fed240 3534m32c_prepare_shift (rtx * operands, int scale, int shift_code)
38b2d076 3535{
23fed240
DD
3536 enum machine_mode mode = GET_MODE (operands[0]);
3537 shift_gen_func func = shift_gen_func_for (mode, shift_code);
38b2d076 3538 rtx temp;
23fed240
DD
3539
3540 if (GET_CODE (operands[2]) == CONST_INT)
38b2d076 3541 {
23fed240
DD
3542 int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
3543 int count = INTVAL (operands[2]) * scale;
3544
3545 while (count > maxc)
3546 {
3547 temp = gen_reg_rtx (mode);
3548 emit_insn (func (temp, operands[1], GEN_INT (maxc)));
3549 operands[1] = temp;
3550 count -= maxc;
3551 }
3552 while (count < -maxc)
3553 {
3554 temp = gen_reg_rtx (mode);
3555 emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
3556 operands[1] = temp;
3557 count += maxc;
3558 }
3559 emit_insn (func (operands[0], operands[1], GEN_INT (count)));
3560 return 1;
38b2d076 3561 }
2e160056
DD
3562
3563 temp = gen_reg_rtx (QImode);
38b2d076 3564 if (scale < 0)
2e160056
DD
3565 /* The pattern has a NEG that corresponds to this. */
3566 emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
3567 else if (TARGET_A16 && mode == SImode)
3568 /* We do this because the code below may modify this, we don't
3569 want to modify the origin of this value. */
3570 emit_move_insn (temp, operands[2]);
38b2d076 3571 else
2e160056 3572 /* We'll only use it for the shift, no point emitting a move. */
38b2d076 3573 temp = operands[2];
2e160056 3574
16659fcf 3575 if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
2e160056
DD
3576 {
3577 /* The m16c has a limit of -16..16 for SI shifts, even when the
3578 shift count is in a register. Since there are so many targets
3579 of these shifts, it's better to expand the RTL here than to
3580 call a helper function.
3581
3582 The resulting code looks something like this:
3583
3584 cmp.b r1h,-16
3585 jge.b 1f
3586 shl.l -16,dest
3587 add.b r1h,16
3588 1f: cmp.b r1h,16
3589 jle.b 1f
3590 shl.l 16,dest
3591 sub.b r1h,16
3592 1f: shl.l r1h,dest
3593
3594 We take advantage of the fact that "negative" shifts are
3595 undefined to skip one of the comparisons. */
3596
3597 rtx count;
833bf445 3598 rtx label, lref, insn, tempvar;
2e160056 3599
16659fcf
DD
3600 emit_move_insn (operands[0], operands[1]);
3601
2e160056
DD
3602 count = temp;
3603 label = gen_label_rtx ();
3604 lref = gen_rtx_LABEL_REF (VOIDmode, label);
3605 LABEL_NUSES (label) ++;
3606
833bf445
DD
3607 tempvar = gen_reg_rtx (mode);
3608
2e160056
DD
3609 if (shift_code == ASHIFT)
3610 {
3611 /* This is a left shift. We only need check positive counts. */
3612 emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
3613 count, GEN_INT (16), label));
833bf445
DD
3614 emit_insn (func (tempvar, operands[0], GEN_INT (8)));
3615 emit_insn (func (operands[0], tempvar, GEN_INT (8)));
2e160056
DD
3616 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
3617 emit_label_after (label, insn);
3618 }
3619 else
3620 {
3621 /* This is a right shift. We only need check negative counts. */
3622 emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
3623 count, GEN_INT (-16), label));
833bf445
DD
3624 emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
3625 emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
2e160056
DD
3626 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
3627 emit_label_after (label, insn);
3628 }
16659fcf
DD
3629 operands[1] = operands[0];
3630 emit_insn (func (operands[0], operands[0], count));
3631 return 1;
2e160056
DD
3632 }
3633
38b2d076
DD
3634 operands[2] = temp;
3635 return 0;
3636}
3637
12ea2512
DD
3638/* The m32c has a limited range of operations that work on PSImode
3639 values; we have to expand to SI, do the math, and truncate back to
3640 PSI. Yes, this is expensive, but hopefully gcc will learn to avoid
3641 those cases. */
3642void
3643m32c_expand_neg_mulpsi3 (rtx * operands)
3644{
3645 /* operands: a = b * i */
3646 rtx temp1; /* b as SI */
07127a0a
DD
3647 rtx scale /* i as SI */;
3648 rtx temp2; /* a*b as SI */
12ea2512
DD
3649
3650 temp1 = gen_reg_rtx (SImode);
3651 temp2 = gen_reg_rtx (SImode);
07127a0a
DD
3652 if (GET_CODE (operands[2]) != CONST_INT)
3653 {
3654 scale = gen_reg_rtx (SImode);
3655 emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
3656 }
3657 else
3658 scale = copy_to_mode_reg (SImode, operands[2]);
12ea2512
DD
3659
3660 emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
07127a0a
DD
3661 temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
3662 emit_insn (gen_truncsipsi2 (operands[0], temp2));
12ea2512
DD
3663}
3664
0166ff05
DD
3665static rtx compare_op0, compare_op1;
3666
3667void
3668m32c_pend_compare (rtx *operands)
3669{
3670 compare_op0 = operands[0];
3671 compare_op1 = operands[1];
3672}
3673
3674void
3675m32c_unpend_compare (void)
3676{
3677 switch (GET_MODE (compare_op0))
3678 {
3679 case QImode:
3680 emit_insn (gen_cmpqi_op (compare_op0, compare_op1));
3681 case HImode:
3682 emit_insn (gen_cmphi_op (compare_op0, compare_op1));
3683 case PSImode:
3684 emit_insn (gen_cmppsi_op (compare_op0, compare_op1));
67fc44cb
DD
3685 default:
3686 /* Just to silence the "missing case" warnings. */ ;
0166ff05
DD
3687 }
3688}
3689
3690void
3691m32c_expand_scc (int code, rtx *operands)
3692{
3693 enum machine_mode mode = TARGET_A16 ? QImode : HImode;
3694
3695 emit_insn (gen_rtx_SET (mode,
3696 operands[0],
3697 gen_rtx_fmt_ee (code,
3698 mode,
3699 compare_op0,
3700 compare_op1)));
3701}
3702
38b2d076
DD
3703/* Pattern Output Functions */
3704
07127a0a
DD
3705/* Returns a (OP (reg:CC FLG_REGNO) (const_int 0)) from some other
3706 match_operand rtx's OP. */
3707rtx
3708m32c_cmp_flg_0 (rtx cmp)
3709{
3710 return gen_rtx_fmt_ee (GET_CODE (cmp),
3711 GET_MODE (cmp),
3712 gen_rtx_REG (CCmode, FLG_REGNO),
3713 GEN_INT (0));
3714}
3715
3716int
3717m32c_expand_movcc (rtx *operands)
3718{
3719 rtx rel = operands[1];
0166ff05
DD
3720 rtx cmp;
3721
07127a0a
DD
3722 if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
3723 return 1;
3724 if (GET_CODE (operands[2]) != CONST_INT
3725 || GET_CODE (operands[3]) != CONST_INT)
3726 return 1;
3727 emit_insn (gen_cmpqi(XEXP (rel, 0), XEXP (rel, 1)));
3728 if (GET_CODE (rel) == NE)
3729 {
3730 rtx tmp = operands[2];
3731 operands[2] = operands[3];
3732 operands[3] = tmp;
3733 }
0166ff05
DD
3734
3735 cmp = gen_rtx_fmt_ee (GET_CODE (rel),
3736 GET_MODE (rel),
3737 compare_op0,
3738 compare_op1);
3739
3740 emit_move_insn (operands[0],
3741 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3742 cmp,
3743 operands[2],
3744 operands[3]));
07127a0a
DD
3745 return 0;
3746}
3747
3748/* Used for the "insv" pattern. Return nonzero to fail, else done. */
3749int
3750m32c_expand_insv (rtx *operands)
3751{
3752 rtx op0, src0, p;
3753 int mask;
3754
3755 if (INTVAL (operands[1]) != 1)
3756 return 1;
3757
9cb96754
N
3758 /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
3759 if (GET_CODE (operands[3]) != CONST_INT)
3760 return 1;
3761 if (INTVAL (operands[3]) != 0
3762 && INTVAL (operands[3]) != 1
3763 && INTVAL (operands[3]) != -1)
3764 return 1;
3765
07127a0a
DD
3766 mask = 1 << INTVAL (operands[2]);
3767
3768 op0 = operands[0];
3769 if (GET_CODE (op0) == SUBREG
3770 && SUBREG_BYTE (op0) == 0)
3771 {
3772 rtx sub = SUBREG_REG (op0);
3773 if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
3774 op0 = sub;
3775 }
3776
3777 if (no_new_pseudos
3778 || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
3779 src0 = op0;
3780 else
3781 {
3782 src0 = gen_reg_rtx (GET_MODE (op0));
3783 emit_move_insn (src0, op0);
3784 }
3785
3786 if (GET_MODE (op0) == HImode
3787 && INTVAL (operands[2]) >= 8
3788 && GET_MODE (op0) == MEM)
3789 {
3790 /* We are little endian. */
3791 rtx new_mem = gen_rtx_MEM (QImode, plus_constant (XEXP (op0, 0), 1));
3792 MEM_COPY_ATTRIBUTES (new_mem, op0);
3793 mask >>= 8;
3794 }
3795
8e4edce7
DD
3796 /* First, we generate a mask with the correct polarity. If we are
3797 storing a zero, we want an AND mask, so invert it. */
3798 if (INTVAL (operands[3]) == 0)
07127a0a 3799 {
16659fcf 3800 /* Storing a zero, use an AND mask */
07127a0a
DD
3801 if (GET_MODE (op0) == HImode)
3802 mask ^= 0xffff;
3803 else
3804 mask ^= 0xff;
3805 }
8e4edce7
DD
3806 /* Now we need to properly sign-extend the mask in case we need to
3807 fall back to an AND or OR opcode. */
07127a0a
DD
3808 if (GET_MODE (op0) == HImode)
3809 {
3810 if (mask & 0x8000)
3811 mask -= 0x10000;
3812 }
3813 else
3814 {
3815 if (mask & 0x80)
3816 mask -= 0x100;
3817 }
3818
3819 switch ( (INTVAL (operands[3]) ? 4 : 0)
3820 + ((GET_MODE (op0) == HImode) ? 2 : 0)
3821 + (TARGET_A24 ? 1 : 0))
3822 {
3823 case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
3824 case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
3825 case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
3826 case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
3827 case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
3828 case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
3829 case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
3830 case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
3831 }
3832
3833 emit_insn (p);
3834 return 0;
3835}
3836
3837const char *
3838m32c_scc_pattern(rtx *operands, RTX_CODE code)
3839{
3840 static char buf[30];
3841 if (GET_CODE (operands[0]) == REG
3842 && REGNO (operands[0]) == R0_REGNO)
3843 {
3844 if (code == EQ)
3845 return "stzx\t#1,#0,r0l";
3846 if (code == NE)
3847 return "stzx\t#0,#1,r0l";
3848 }
3849 sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
3850 return buf;
3851}
3852
5abd2125
JS
3853/* Encode symbol attributes of a SYMBOL_REF into its
3854 SYMBOL_REF_FLAGS. */
3855static void
3856m32c_encode_section_info (tree decl, rtx rtl, int first)
3857{
3858 int extra_flags = 0;
3859
3860 default_encode_section_info (decl, rtl, first);
3861 if (TREE_CODE (decl) == FUNCTION_DECL
3862 && m32c_special_page_vector_p (decl))
3863
3864 extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
3865
3866 if (extra_flags)
3867 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
3868}
3869
38b2d076
DD
3870/* Returns TRUE if the current function is a leaf, and thus we can
3871 determine which registers an interrupt function really needs to
3872 save. The logic below is mostly about finding the insn sequence
3873 that's the function, versus any sequence that might be open for the
3874 current insn. */
3875static int
3876m32c_leaf_function_p (void)
3877{
3878 rtx saved_first, saved_last;
3879 struct sequence_stack *seq;
3880 int rv;
3881
3882 saved_first = cfun->emit->x_first_insn;
3883 saved_last = cfun->emit->x_last_insn;
3884 for (seq = cfun->emit->sequence_stack; seq && seq->next; seq = seq->next)
3885 ;
3886 if (seq)
3887 {
3888 cfun->emit->x_first_insn = seq->first;
3889 cfun->emit->x_last_insn = seq->last;
3890 }
3891
3892 rv = leaf_function_p ();
3893
3894 cfun->emit->x_first_insn = saved_first;
3895 cfun->emit->x_last_insn = saved_last;
3896 return rv;
3897}
3898
3899/* Returns TRUE if the current function needs to use the ENTER/EXIT
3900 opcodes. If the function doesn't need the frame base or stack
3901 pointer, it can use the simpler RTS opcode. */
3902static bool
3903m32c_function_needs_enter (void)
3904{
3905 rtx insn;
3906 struct sequence_stack *seq;
3907 rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
3908 rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
3909
3910 insn = get_insns ();
3911 for (seq = cfun->emit->sequence_stack;
3912 seq;
3913 insn = seq->first, seq = seq->next);
3914
3915 while (insn)
3916 {
3917 if (reg_mentioned_p (sp, insn))
3918 return true;
3919 if (reg_mentioned_p (fb, insn))
3920 return true;
3921 insn = NEXT_INSN (insn);
3922 }
3923 return false;
3924}
3925
3926/* Mark all the subexpressions of the PARALLEL rtx PAR as
3927 frame-related. Return PAR.
3928
3929 dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
3930 PARALLEL rtx other than the first if they do not have the
3931 FRAME_RELATED flag set on them. So this function is handy for
3932 marking up 'enter' instructions. */
3933static rtx
3934m32c_all_frame_related (rtx par)
3935{
3936 int len = XVECLEN (par, 0);
3937 int i;
3938
3939 for (i = 0; i < len; i++)
3940 F (XVECEXP (par, 0, i));
3941
3942 return par;
3943}
3944
3945/* Emits the prologue. See the frame layout comment earlier in this
3946 file. We can reserve up to 256 bytes with the ENTER opcode, beyond
3947 that we manually update sp. */
3948void
3949m32c_emit_prologue (void)
3950{
3951 int frame_size, extra_frame_size = 0, reg_save_size;
3952 int complex_prologue = 0;
3953
3954 cfun->machine->is_leaf = m32c_leaf_function_p ();
3955 if (interrupt_p (cfun->decl))
3956 {
3957 cfun->machine->is_interrupt = 1;
3958 complex_prologue = 1;
3959 }
3960
3961 reg_save_size = m32c_pushm_popm (PP_justcount);
3962
3963 if (interrupt_p (cfun->decl))
3964 emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
3965
3966 frame_size =
3967 m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
3968 if (frame_size == 0
3969 && !cfun->machine->is_interrupt
3970 && !m32c_function_needs_enter ())
3971 cfun->machine->use_rts = 1;
3972
3973 if (frame_size > 254)
3974 {
3975 extra_frame_size = frame_size - 254;
3976 frame_size = 254;
3977 }
3978 if (cfun->machine->use_rts == 0)
3979 F (emit_insn (m32c_all_frame_related
3980 (TARGET_A16
fa9fd28a
RIL
3981 ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
3982 : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
38b2d076
DD
3983
3984 if (extra_frame_size)
3985 {
3986 complex_prologue = 1;
3987 if (TARGET_A16)
3988 F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
3989 gen_rtx_REG (HImode, SP_REGNO),
3990 GEN_INT (-extra_frame_size))));
3991 else
3992 F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
3993 gen_rtx_REG (PSImode, SP_REGNO),
3994 GEN_INT (-extra_frame_size))));
3995 }
3996
3997 complex_prologue += m32c_pushm_popm (PP_pushm);
3998
3999 /* This just emits a comment into the .s file for debugging. */
4000 if (complex_prologue)
4001 emit_insn (gen_prologue_end ());
4002}
4003
4004/* Likewise, for the epilogue. The only exception is that, for
4005 interrupts, we must manually unwind the frame as the REIT opcode
4006 doesn't do that. */
4007void
4008m32c_emit_epilogue (void)
4009{
4010 /* This just emits a comment into the .s file for debugging. */
4011 if (m32c_pushm_popm (PP_justcount) > 0 || cfun->machine->is_interrupt)
4012 emit_insn (gen_epilogue_start ());
4013
4014 m32c_pushm_popm (PP_popm);
4015
4016 if (cfun->machine->is_interrupt)
4017 {
4018 enum machine_mode spmode = TARGET_A16 ? HImode : PSImode;
4019
4020 emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
4021 gen_rtx_REG (spmode, FP_REGNO));
4022 emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
4023 gen_rtx_REG (spmode, A0_REGNO));
4024 if (TARGET_A16)
4025 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
4026 else
4027 emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
4028 emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
0e0642aa
RIL
4029 if (TARGET_A16)
4030 emit_jump_insn (gen_epilogue_reit_16 ());
4031 else
4032 emit_jump_insn (gen_epilogue_reit_24 ());
38b2d076
DD
4033 }
4034 else if (cfun->machine->use_rts)
4035 emit_jump_insn (gen_epilogue_rts ());
0e0642aa
RIL
4036 else if (TARGET_A16)
4037 emit_jump_insn (gen_epilogue_exitd_16 ());
38b2d076 4038 else
0e0642aa 4039 emit_jump_insn (gen_epilogue_exitd_24 ());
38b2d076
DD
4040 emit_barrier ();
4041}
4042
4043void
4044m32c_emit_eh_epilogue (rtx ret_addr)
4045{
4046 /* R0[R2] has the stack adjustment. R1[R3] has the address to
4047 return to. We have to fudge the stack, pop everything, pop SP
4048 (fudged), and return (fudged). This is actually easier to do in
4049 assembler, so punt to libgcc. */
4050 emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
4051 /* emit_insn (gen_rtx_CLOBBER (HImode, gen_rtx_REG (HImode, R0L_REGNO))); */
4052 emit_barrier ();
4053}
4054
16659fcf
DD
4055/* Indicate which flags must be properly set for a given conditional. */
4056static int
4057flags_needed_for_conditional (rtx cond)
4058{
4059 switch (GET_CODE (cond))
4060 {
4061 case LE:
4062 case GT:
4063 return FLAGS_OSZ;
4064 case LEU:
4065 case GTU:
4066 return FLAGS_ZC;
4067 case LT:
4068 case GE:
4069 return FLAGS_OS;
4070 case LTU:
4071 case GEU:
4072 return FLAGS_C;
4073 case EQ:
4074 case NE:
4075 return FLAGS_Z;
4076 default:
4077 return FLAGS_N;
4078 }
4079}
4080
4081#define DEBUG_CMP 0
4082
4083/* Returns true if a compare insn is redundant because it would only
4084 set flags that are already set correctly. */
4085static bool
4086m32c_compare_redundant (rtx cmp, rtx *operands)
4087{
4088 int flags_needed;
4089 int pflags;
4090 rtx prev, pp, next;
4091 rtx op0, op1, op2;
4092#if DEBUG_CMP
4093 int prev_icode, i;
4094#endif
4095
4096 op0 = operands[0];
4097 op1 = operands[1];
4098 op2 = operands[2];
4099
4100#if DEBUG_CMP
4101 fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
4102 debug_rtx(cmp);
4103 for (i=0; i<2; i++)
4104 {
4105 fprintf(stderr, "operands[%d] = ", i);
4106 debug_rtx(operands[i]);
4107 }
4108#endif
4109
4110 next = next_nonnote_insn (cmp);
4111 if (!next || !INSN_P (next))
4112 {
4113#if DEBUG_CMP
4114 fprintf(stderr, "compare not followed by insn\n");
4115 debug_rtx(next);
4116#endif
4117 return false;
4118 }
4119 if (GET_CODE (PATTERN (next)) == SET
4120 && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
4121 {
4122 next = XEXP (XEXP (PATTERN (next), 1), 0);
4123 }
4124 else if (GET_CODE (PATTERN (next)) == SET)
4125 {
4126 /* If this is a conditional, flags_needed will be something
4127 other than FLAGS_N, which we test below. */
4128 next = XEXP (PATTERN (next), 1);
4129 }
4130 else
4131 {
4132#if DEBUG_CMP
4133 fprintf(stderr, "compare not followed by conditional\n");
4134 debug_rtx(next);
4135#endif
4136 return false;
4137 }
4138#if DEBUG_CMP
4139 fprintf(stderr, "conditional is: ");
4140 debug_rtx(next);
4141#endif
4142
4143 flags_needed = flags_needed_for_conditional (next);
4144 if (flags_needed == FLAGS_N)
4145 {
4146#if DEBUG_CMP
4147 fprintf(stderr, "compare not followed by conditional\n");
4148 debug_rtx(next);
4149#endif
4150 return false;
4151 }
4152
4153 /* Compare doesn't set overflow and carry the same way that
4154 arithmetic instructions do, so we can't replace those. */
4155 if (flags_needed & FLAGS_OC)
4156 return false;
4157
4158 prev = cmp;
4159 do {
4160 prev = prev_nonnote_insn (prev);
4161 if (!prev)
4162 {
4163#if DEBUG_CMP
4164 fprintf(stderr, "No previous insn.\n");
4165#endif
4166 return false;
4167 }
4168 if (!INSN_P (prev))
4169 {
4170#if DEBUG_CMP
4171 fprintf(stderr, "Previous insn is a non-insn.\n");
4172#endif
4173 return false;
4174 }
4175 pp = PATTERN (prev);
4176 if (GET_CODE (pp) != SET)
4177 {
4178#if DEBUG_CMP
4179 fprintf(stderr, "Previous insn is not a SET.\n");
4180#endif
4181 return false;
4182 }
4183 pflags = get_attr_flags (prev);
4184
4185 /* Looking up attributes of previous insns corrupted the recog
4186 tables. */
4187 INSN_UID (cmp) = -1;
4188 recog (PATTERN (cmp), cmp, 0);
4189
4190 if (pflags == FLAGS_N
4191 && reg_mentioned_p (op0, pp))
4192 {
4193#if DEBUG_CMP
4194 fprintf(stderr, "intermediate non-flags insn uses op:\n");
4195 debug_rtx(prev);
4196#endif
4197 return false;
4198 }
4199 } while (pflags == FLAGS_N);
4200#if DEBUG_CMP
4201 fprintf(stderr, "previous flag-setting insn:\n");
4202 debug_rtx(prev);
4203 debug_rtx(pp);
4204#endif
4205
4206 if (GET_CODE (pp) == SET
4207 && GET_CODE (XEXP (pp, 0)) == REG
4208 && REGNO (XEXP (pp, 0)) == FLG_REGNO
4209 && GET_CODE (XEXP (pp, 1)) == COMPARE)
4210 {
4211 /* Adjacent cbranches must have the same operands to be
4212 redundant. */
4213 rtx pop0 = XEXP (XEXP (pp, 1), 0);
4214 rtx pop1 = XEXP (XEXP (pp, 1), 1);
4215#if DEBUG_CMP
4216 fprintf(stderr, "adjacent cbranches\n");
4217 debug_rtx(pop0);
4218 debug_rtx(pop1);
4219#endif
4220 if (rtx_equal_p (op0, pop0)
4221 && rtx_equal_p (op1, pop1))
4222 return true;
4223#if DEBUG_CMP
4224 fprintf(stderr, "prev cmp not same\n");
4225#endif
4226 return false;
4227 }
4228
4229 /* Else the previous insn must be a SET, with either the source or
4230 dest equal to operands[0], and operands[1] must be zero. */
4231
4232 if (!rtx_equal_p (op1, const0_rtx))
4233 {
4234#if DEBUG_CMP
4235 fprintf(stderr, "operands[1] not const0_rtx\n");
4236#endif
4237 return false;
4238 }
4239 if (GET_CODE (pp) != SET)
4240 {
4241#if DEBUG_CMP
4242 fprintf (stderr, "pp not set\n");
4243#endif
4244 return false;
4245 }
4246 if (!rtx_equal_p (op0, SET_SRC (pp))
4247 && !rtx_equal_p (op0, SET_DEST (pp)))
4248 {
4249#if DEBUG_CMP
4250 fprintf(stderr, "operands[0] not found in set\n");
4251#endif
4252 return false;
4253 }
4254
4255#if DEBUG_CMP
4256 fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4257#endif
4258 if ((pflags & flags_needed) == flags_needed)
4259 return true;
4260
4261 return false;
4262}
4263
4264/* Return the pattern for a compare. This will be commented out if
4265 the compare is redundant, else a normal pattern is returned. Thus,
4266 the assembler output says where the compare would have been. */
4267char *
4268m32c_output_compare (rtx insn, rtx *operands)
4269{
4270 static char template[] = ";cmp.b\t%1,%0";
4271 /* ^ 5 */
4272
4273 template[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
4274 if (m32c_compare_redundant (insn, operands))
4275 {
4276#if DEBUG_CMP
4277 fprintf(stderr, "cbranch: cmp not needed\n");
4278#endif
4279 return template;
4280 }
4281
4282#if DEBUG_CMP
4283 fprintf(stderr, "cbranch: cmp needed: `%s'\n", template);
4284#endif
4285 return template + 1;
4286}
4287
5abd2125
JS
4288#undef TARGET_ENCODE_SECTION_INFO
4289#define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4290
38b2d076
DD
4291/* The Global `targetm' Variable. */
4292
4293struct gcc_target targetm = TARGET_INITIALIZER;
4294
4295#include "gt-m32c.h"