]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/m32c/m32c.c
re PR c++/28152 (Diagnostic about wrong use _Complex prints __complex__)
[thirdparty/gcc.git] / gcc / config / m32c / m32c.c
CommitLineData
38b2d076 1/* Target Code for R8C/M16C/M32C
66647d44 2 Copyright (C) 2005, 2006, 2007, 2008
38b2d076
DD
3 Free Software Foundation, Inc.
4 Contributed by Red Hat.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published
2f83c7d6 10 by the Free Software Foundation; either version 3, or (at your
38b2d076
DD
11 option) any later version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
16 License for more details.
17
18 You should have received a copy of the GNU General Public License
2f83c7d6
NC
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
38b2d076
DD
21
22#include "config.h"
23#include "system.h"
24#include "coretypes.h"
25#include "tm.h"
26#include "rtl.h"
27#include "regs.h"
28#include "hard-reg-set.h"
29#include "real.h"
30#include "insn-config.h"
31#include "conditions.h"
32#include "insn-flags.h"
33#include "output.h"
34#include "insn-attr.h"
35#include "flags.h"
36#include "recog.h"
37#include "reload.h"
38#include "toplev.h"
39#include "obstack.h"
40#include "tree.h"
41#include "expr.h"
42#include "optabs.h"
43#include "except.h"
44#include "function.h"
45#include "ggc.h"
46#include "target.h"
47#include "target-def.h"
48#include "tm_p.h"
49#include "langhooks.h"
726a989a 50#include "gimple.h"
fa9fd28a 51#include "df.h"
38b2d076
DD
52
53/* Prototypes */
54
55/* Used by m32c_pushm_popm. */
56typedef enum
57{
58 PP_pushm,
59 PP_popm,
60 PP_justcount
61} Push_Pop_Type;
62
63static tree interrupt_handler (tree *, tree, tree, int, bool *);
5abd2125 64static tree function_vector_handler (tree *, tree, tree, int, bool *);
38b2d076
DD
65static int interrupt_p (tree node);
66static bool m32c_asm_integer (rtx, unsigned int, int);
3101faab 67static int m32c_comp_type_attributes (const_tree, const_tree);
38b2d076
DD
68static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
69static struct machine_function *m32c_init_machine_status (void);
70static void m32c_insert_attributes (tree, tree *);
71static bool m32c_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
586de218
KG
72 const_tree, bool);
73static bool m32c_promote_prototypes (const_tree);
38b2d076
DD
74static int m32c_pushm_popm (Push_Pop_Type);
75static bool m32c_strict_argument_naming (CUMULATIVE_ARGS *);
76static rtx m32c_struct_value_rtx (tree, int);
77static rtx m32c_subreg (enum machine_mode, rtx, enum machine_mode, int);
78static int need_to_save (int);
5abd2125
JS
79int current_function_special_page_vector (rtx);
80
81#define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
38b2d076
DD
82
83#define streq(a,b) (strcmp ((a), (b)) == 0)
84
85/* Internal support routines */
86
87/* Debugging statements are tagged with DEBUG0 only so that they can
88 be easily enabled individually, by replacing the '0' with '1' as
89 needed. */
90#define DEBUG0 0
91#define DEBUG1 1
92
93#if DEBUG0
94/* This is needed by some of the commented-out debug statements
95 below. */
96static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
97#endif
98static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
99
100/* These are all to support encode_pattern(). */
101static char pattern[30], *patternp;
102static GTY(()) rtx patternr[30];
103#define RTX_IS(x) (streq (pattern, x))
104
105/* Some macros to simplify the logic throughout this file. */
106#define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
107#define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
108
109#define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
110#define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
111
112/* We do most RTX matching by converting the RTX into a string, and
113 using string compares. This vastly simplifies the logic in many of
114 the functions in this file.
115
116 On exit, pattern[] has the encoded string (use RTX_IS("...") to
117 compare it) and patternr[] has pointers to the nodes in the RTX
118 corresponding to each character in the encoded string. The latter
119 is mostly used by print_operand().
120
121 Unrecognized patterns have '?' in them; this shows up when the
122 assembler complains about syntax errors.
123*/
124
125static void
126encode_pattern_1 (rtx x)
127{
128 int i;
129
130 if (patternp == pattern + sizeof (pattern) - 2)
131 {
132 patternp[-1] = '?';
133 return;
134 }
135
136 patternr[patternp - pattern] = x;
137
138 switch (GET_CODE (x))
139 {
140 case REG:
141 *patternp++ = 'r';
142 break;
143 case SUBREG:
144 if (GET_MODE_SIZE (GET_MODE (x)) !=
145 GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
146 *patternp++ = 'S';
147 encode_pattern_1 (XEXP (x, 0));
148 break;
149 case MEM:
150 *patternp++ = 'm';
151 case CONST:
152 encode_pattern_1 (XEXP (x, 0));
153 break;
154 case PLUS:
155 *patternp++ = '+';
156 encode_pattern_1 (XEXP (x, 0));
157 encode_pattern_1 (XEXP (x, 1));
158 break;
159 case PRE_DEC:
160 *patternp++ = '>';
161 encode_pattern_1 (XEXP (x, 0));
162 break;
163 case POST_INC:
164 *patternp++ = '<';
165 encode_pattern_1 (XEXP (x, 0));
166 break;
167 case LO_SUM:
168 *patternp++ = 'L';
169 encode_pattern_1 (XEXP (x, 0));
170 encode_pattern_1 (XEXP (x, 1));
171 break;
172 case HIGH:
173 *patternp++ = 'H';
174 encode_pattern_1 (XEXP (x, 0));
175 break;
176 case SYMBOL_REF:
177 *patternp++ = 's';
178 break;
179 case LABEL_REF:
180 *patternp++ = 'l';
181 break;
182 case CODE_LABEL:
183 *patternp++ = 'c';
184 break;
185 case CONST_INT:
186 case CONST_DOUBLE:
187 *patternp++ = 'i';
188 break;
189 case UNSPEC:
190 *patternp++ = 'u';
191 *patternp++ = '0' + XCINT (x, 1, UNSPEC);
192 for (i = 0; i < XVECLEN (x, 0); i++)
193 encode_pattern_1 (XVECEXP (x, 0, i));
194 break;
195 case USE:
196 *patternp++ = 'U';
197 break;
198 case PARALLEL:
199 *patternp++ = '|';
200 for (i = 0; i < XVECLEN (x, 0); i++)
201 encode_pattern_1 (XVECEXP (x, 0, i));
202 break;
203 case EXPR_LIST:
204 *patternp++ = 'E';
205 encode_pattern_1 (XEXP (x, 0));
206 if (XEXP (x, 1))
207 encode_pattern_1 (XEXP (x, 1));
208 break;
209 default:
210 *patternp++ = '?';
211#if DEBUG0
212 fprintf (stderr, "can't encode pattern %s\n",
213 GET_RTX_NAME (GET_CODE (x)));
214 debug_rtx (x);
215 gcc_unreachable ();
216#endif
217 break;
218 }
219}
220
221static void
222encode_pattern (rtx x)
223{
224 patternp = pattern;
225 encode_pattern_1 (x);
226 *patternp = 0;
227}
228
229/* Since register names indicate the mode they're used in, we need a
230 way to determine which name to refer to the register with. Called
231 by print_operand(). */
232
233static const char *
234reg_name_with_mode (int regno, enum machine_mode mode)
235{
236 int mlen = GET_MODE_SIZE (mode);
237 if (regno == R0_REGNO && mlen == 1)
238 return "r0l";
239 if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
240 return "r2r0";
241 if (regno == R0_REGNO && mlen == 6)
242 return "r2r1r0";
243 if (regno == R0_REGNO && mlen == 8)
244 return "r3r1r2r0";
245 if (regno == R1_REGNO && mlen == 1)
246 return "r1l";
247 if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
248 return "r3r1";
249 if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
250 return "a1a0";
251 return reg_names[regno];
252}
253
254/* How many bytes a register uses on stack when it's pushed. We need
255 to know this because the push opcode needs to explicitly indicate
256 the size of the register, even though the name of the register
257 already tells it that. Used by m32c_output_reg_{push,pop}, which
258 is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}. */
259
260static int
261reg_push_size (int regno)
262{
263 switch (regno)
264 {
265 case R0_REGNO:
266 case R1_REGNO:
267 return 2;
268 case R2_REGNO:
269 case R3_REGNO:
270 case FLG_REGNO:
271 return 2;
272 case A0_REGNO:
273 case A1_REGNO:
274 case SB_REGNO:
275 case FB_REGNO:
276 case SP_REGNO:
277 if (TARGET_A16)
278 return 2;
279 else
280 return 3;
281 default:
282 gcc_unreachable ();
283 }
284}
285
286static int *class_sizes = 0;
287
288/* Given two register classes, find the largest intersection between
289 them. If there is no intersection, return RETURNED_IF_EMPTY
290 instead. */
291static int
292reduce_class (int original_class, int limiting_class, int returned_if_empty)
293{
294 int cc = class_contents[original_class][0];
295 int i, best = NO_REGS;
296 int best_size = 0;
297
298 if (original_class == limiting_class)
299 return original_class;
300
301 if (!class_sizes)
302 {
303 int r;
304 class_sizes = (int *) xmalloc (LIM_REG_CLASSES * sizeof (int));
305 for (i = 0; i < LIM_REG_CLASSES; i++)
306 {
307 class_sizes[i] = 0;
308 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
309 if (class_contents[i][0] & (1 << r))
310 class_sizes[i]++;
311 }
312 }
313
314 cc &= class_contents[limiting_class][0];
315 for (i = 0; i < LIM_REG_CLASSES; i++)
316 {
317 int ic = class_contents[i][0];
318
319 if ((~cc & ic) == 0)
320 if (best_size < class_sizes[i])
321 {
322 best = i;
323 best_size = class_sizes[i];
324 }
325
326 }
327 if (best == NO_REGS)
328 return returned_if_empty;
329 return best;
330}
331
332/* Returns TRUE If there are any registers that exist in both register
333 classes. */
334static int
335classes_intersect (int class1, int class2)
336{
337 return class_contents[class1][0] & class_contents[class2][0];
338}
339
340/* Used by m32c_register_move_cost to determine if a move is
341 impossibly expensive. */
342static int
0a2aaacc 343class_can_hold_mode (int rclass, enum machine_mode mode)
38b2d076
DD
344{
345 /* Cache the results: 0=untested 1=no 2=yes */
346 static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
0a2aaacc 347 if (results[rclass][mode] == 0)
38b2d076
DD
348 {
349 int r, n, i;
0a2aaacc 350 results[rclass][mode] = 1;
38b2d076 351 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
0a2aaacc 352 if (class_contents[rclass][0] & (1 << r)
38b2d076
DD
353 && HARD_REGNO_MODE_OK (r, mode))
354 {
355 int ok = 1;
356 n = HARD_REGNO_NREGS (r, mode);
357 for (i = 1; i < n; i++)
0a2aaacc 358 if (!(class_contents[rclass][0] & (1 << (r + i))))
38b2d076
DD
359 ok = 0;
360 if (ok)
361 {
0a2aaacc 362 results[rclass][mode] = 2;
38b2d076
DD
363 break;
364 }
365 }
366 }
367#if DEBUG0
368 fprintf (stderr, "class %s can hold %s? %s\n",
0a2aaacc
KG
369 class_names[rclass], mode_name[mode],
370 (results[rclass][mode] == 2) ? "yes" : "no");
38b2d076 371#endif
0a2aaacc 372 return results[rclass][mode] == 2;
38b2d076
DD
373}
374
375/* Run-time Target Specification. */
376
377/* Memregs are memory locations that gcc treats like general
378 registers, as there are a limited number of true registers and the
379 m32c families can use memory in most places that registers can be
380 used.
381
382 However, since memory accesses are more expensive than registers,
383 we allow the user to limit the number of memregs available, in
384 order to try to persuade gcc to try harder to use real registers.
385
386 Memregs are provided by m32c-lib1.S.
387*/
388
389int target_memregs = 16;
390static bool target_memregs_set = FALSE;
391int ok_to_change_target_memregs = TRUE;
392
393#undef TARGET_HANDLE_OPTION
394#define TARGET_HANDLE_OPTION m32c_handle_option
395static bool
396m32c_handle_option (size_t code,
397 const char *arg ATTRIBUTE_UNUSED,
398 int value ATTRIBUTE_UNUSED)
399{
400 if (code == OPT_memregs_)
401 {
402 target_memregs_set = TRUE;
403 target_memregs = atoi (arg);
404 }
405 return TRUE;
406}
407
408/* Implements OVERRIDE_OPTIONS. We limit memregs to 0..16, and
409 provide a default. */
410void
411m32c_override_options (void)
412{
413 if (target_memregs_set)
414 {
415 if (target_memregs < 0 || target_memregs > 16)
416 error ("invalid target memregs value '%d'", target_memregs);
417 }
418 else
07127a0a 419 target_memregs = 16;
38b2d076
DD
420}
421
422/* Defining data structures for per-function information */
423
424/* The usual; we set up our machine_function data. */
425static struct machine_function *
426m32c_init_machine_status (void)
427{
428 struct machine_function *machine;
429 machine =
430 (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
431
432 return machine;
433}
434
435/* Implements INIT_EXPANDERS. We just set up to call the above
436 function. */
437void
438m32c_init_expanders (void)
439{
440 init_machine_status = m32c_init_machine_status;
441}
442
443/* Storage Layout */
444
38b2d076
DD
445/* Register Basics */
446
447/* Basic Characteristics of Registers */
448
449/* Whether a mode fits in a register is complex enough to warrant a
450 table. */
451static struct
452{
453 char qi_regs;
454 char hi_regs;
455 char pi_regs;
456 char si_regs;
457 char di_regs;
458} nregs_table[FIRST_PSEUDO_REGISTER] =
459{
460 { 1, 1, 2, 2, 4 }, /* r0 */
461 { 0, 1, 0, 0, 0 }, /* r2 */
462 { 1, 1, 2, 2, 0 }, /* r1 */
463 { 0, 1, 0, 0, 0 }, /* r3 */
464 { 0, 1, 1, 0, 0 }, /* a0 */
465 { 0, 1, 1, 0, 0 }, /* a1 */
466 { 0, 1, 1, 0, 0 }, /* sb */
467 { 0, 1, 1, 0, 0 }, /* fb */
468 { 0, 1, 1, 0, 0 }, /* sp */
469 { 1, 1, 1, 0, 0 }, /* pc */
470 { 0, 0, 0, 0, 0 }, /* fl */
471 { 1, 1, 1, 0, 0 }, /* ap */
472 { 1, 1, 2, 2, 4 }, /* mem0 */
473 { 1, 1, 2, 2, 4 }, /* mem1 */
474 { 1, 1, 2, 2, 4 }, /* mem2 */
475 { 1, 1, 2, 2, 4 }, /* mem3 */
476 { 1, 1, 2, 2, 4 }, /* mem4 */
477 { 1, 1, 2, 2, 0 }, /* mem5 */
478 { 1, 1, 2, 2, 0 }, /* mem6 */
479 { 1, 1, 0, 0, 0 }, /* mem7 */
480};
481
482/* Implements CONDITIONAL_REGISTER_USAGE. We adjust the number of
483 available memregs, and select which registers need to be preserved
484 across calls based on the chip family. */
485
486void
487m32c_conditional_register_usage (void)
488{
38b2d076
DD
489 int i;
490
491 if (0 <= target_memregs && target_memregs <= 16)
492 {
493 /* The command line option is bytes, but our "registers" are
494 16-bit words. */
495 for (i = target_memregs/2; i < 8; i++)
496 {
497 fixed_regs[MEM0_REGNO + i] = 1;
498 CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
499 }
500 }
501
502 /* M32CM and M32C preserve more registers across function calls. */
503 if (TARGET_A24)
504 {
505 call_used_regs[R1_REGNO] = 0;
506 call_used_regs[R2_REGNO] = 0;
507 call_used_regs[R3_REGNO] = 0;
508 call_used_regs[A0_REGNO] = 0;
509 call_used_regs[A1_REGNO] = 0;
510 }
511}
512
513/* How Values Fit in Registers */
514
515/* Implements HARD_REGNO_NREGS. This is complicated by the fact that
516 different registers are different sizes from each other, *and* may
517 be different sizes in different chip families. */
b8a669d0
DD
518static int
519m32c_hard_regno_nregs_1 (int regno, enum machine_mode mode)
38b2d076
DD
520{
521 if (regno == FLG_REGNO && mode == CCmode)
522 return 1;
523 if (regno >= FIRST_PSEUDO_REGISTER)
524 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
525
526 if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
527 return (GET_MODE_SIZE (mode) + 1) / 2;
528
529 if (GET_MODE_SIZE (mode) <= 1)
530 return nregs_table[regno].qi_regs;
531 if (GET_MODE_SIZE (mode) <= 2)
532 return nregs_table[regno].hi_regs;
533 if (regno == A0_REGNO && mode == PSImode && TARGET_A16)
534 return 2;
535 if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
536 return nregs_table[regno].pi_regs;
537 if (GET_MODE_SIZE (mode) <= 4)
538 return nregs_table[regno].si_regs;
539 if (GET_MODE_SIZE (mode) <= 8)
540 return nregs_table[regno].di_regs;
541 return 0;
542}
543
b8a669d0
DD
544int
545m32c_hard_regno_nregs (int regno, enum machine_mode mode)
546{
547 int rv = m32c_hard_regno_nregs_1 (regno, mode);
548 return rv ? rv : 1;
549}
550
38b2d076
DD
551/* Implements HARD_REGNO_MODE_OK. The above function does the work
552 already; just test its return value. */
553int
554m32c_hard_regno_ok (int regno, enum machine_mode mode)
555{
b8a669d0 556 return m32c_hard_regno_nregs_1 (regno, mode) != 0;
38b2d076
DD
557}
558
559/* Implements MODES_TIEABLE_P. In general, modes aren't tieable since
560 registers are all different sizes. However, since most modes are
561 bigger than our registers anyway, it's easier to implement this
562 function that way, leaving QImode as the only unique case. */
563int
564m32c_modes_tieable_p (enum machine_mode m1, enum machine_mode m2)
565{
566 if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
567 return 1;
568
07127a0a 569#if 0
38b2d076
DD
570 if (m1 == QImode || m2 == QImode)
571 return 0;
07127a0a 572#endif
38b2d076
DD
573
574 return 1;
575}
576
577/* Register Classes */
578
579/* Implements REGNO_REG_CLASS. */
580enum machine_mode
581m32c_regno_reg_class (int regno)
582{
583 switch (regno)
584 {
585 case R0_REGNO:
586 return R0_REGS;
587 case R1_REGNO:
588 return R1_REGS;
589 case R2_REGNO:
590 return R2_REGS;
591 case R3_REGNO:
592 return R3_REGS;
593 case A0_REGNO:
594 case A1_REGNO:
595 return A_REGS;
596 case SB_REGNO:
597 return SB_REGS;
598 case FB_REGNO:
599 return FB_REGS;
600 case SP_REGNO:
601 return SP_REGS;
602 case FLG_REGNO:
603 return FLG_REGS;
604 default:
605 if (IS_MEM_REGNO (regno))
606 return MEM_REGS;
607 return ALL_REGS;
608 }
609}
610
611/* Implements REG_CLASS_FROM_CONSTRAINT. Note that some constraints only match
612 for certain chip families. */
613int
614m32c_reg_class_from_constraint (char c ATTRIBUTE_UNUSED, const char *s)
615{
616 if (memcmp (s, "Rsp", 3) == 0)
617 return SP_REGS;
618 if (memcmp (s, "Rfb", 3) == 0)
619 return FB_REGS;
620 if (memcmp (s, "Rsb", 3) == 0)
621 return SB_REGS;
07127a0a
DD
622 if (memcmp (s, "Rcr", 3) == 0)
623 return TARGET_A16 ? CR_REGS : NO_REGS;
624 if (memcmp (s, "Rcl", 3) == 0)
625 return TARGET_A24 ? CR_REGS : NO_REGS;
38b2d076
DD
626 if (memcmp (s, "R0w", 3) == 0)
627 return R0_REGS;
628 if (memcmp (s, "R1w", 3) == 0)
629 return R1_REGS;
630 if (memcmp (s, "R2w", 3) == 0)
631 return R2_REGS;
632 if (memcmp (s, "R3w", 3) == 0)
633 return R3_REGS;
634 if (memcmp (s, "R02", 3) == 0)
635 return R02_REGS;
636 if (memcmp (s, "R03", 3) == 0)
637 return R03_REGS;
638 if (memcmp (s, "Rdi", 3) == 0)
639 return DI_REGS;
640 if (memcmp (s, "Rhl", 3) == 0)
641 return HL_REGS;
642 if (memcmp (s, "R23", 3) == 0)
643 return R23_REGS;
07127a0a
DD
644 if (memcmp (s, "Ra0", 3) == 0)
645 return A0_REGS;
646 if (memcmp (s, "Ra1", 3) == 0)
647 return A1_REGS;
38b2d076
DD
648 if (memcmp (s, "Raa", 3) == 0)
649 return A_REGS;
07127a0a
DD
650 if (memcmp (s, "Raw", 3) == 0)
651 return TARGET_A16 ? A_REGS : NO_REGS;
652 if (memcmp (s, "Ral", 3) == 0)
653 return TARGET_A24 ? A_REGS : NO_REGS;
38b2d076
DD
654 if (memcmp (s, "Rqi", 3) == 0)
655 return QI_REGS;
656 if (memcmp (s, "Rad", 3) == 0)
657 return AD_REGS;
658 if (memcmp (s, "Rsi", 3) == 0)
659 return SI_REGS;
660 if (memcmp (s, "Rhi", 3) == 0)
661 return HI_REGS;
662 if (memcmp (s, "Rhc", 3) == 0)
663 return HC_REGS;
664 if (memcmp (s, "Rra", 3) == 0)
665 return RA_REGS;
666 if (memcmp (s, "Rfl", 3) == 0)
667 return FLG_REGS;
668 if (memcmp (s, "Rmm", 3) == 0)
669 {
670 if (fixed_regs[MEM0_REGNO])
671 return NO_REGS;
672 return MEM_REGS;
673 }
674
675 /* PSImode registers - i.e. whatever can hold a pointer. */
676 if (memcmp (s, "Rpi", 3) == 0)
677 {
678 if (TARGET_A16)
679 return HI_REGS;
680 else
681 return RA_REGS; /* r2r0 and r3r1 can hold pointers. */
682 }
683
684 /* We handle this one as an EXTRA_CONSTRAINT. */
685 if (memcmp (s, "Rpa", 3) == 0)
686 return NO_REGS;
687
07127a0a
DD
688 if (*s == 'R')
689 {
690 fprintf(stderr, "unrecognized R constraint: %.3s\n", s);
691 gcc_unreachable();
692 }
693
38b2d076
DD
694 return NO_REGS;
695}
696
697/* Implements REGNO_OK_FOR_BASE_P. */
698int
699m32c_regno_ok_for_base_p (int regno)
700{
701 if (regno == A0_REGNO
702 || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
703 return 1;
704 return 0;
705}
706
707#define DEBUG_RELOAD 0
708
709/* Implements PREFERRED_RELOAD_CLASS. In general, prefer general
710 registers of the appropriate size. */
711int
712m32c_preferred_reload_class (rtx x, int rclass)
713{
714 int newclass = rclass;
715
716#if DEBUG_RELOAD
717 fprintf (stderr, "\npreferred_reload_class for %s is ",
718 class_names[rclass]);
719#endif
720 if (rclass == NO_REGS)
721 rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
722
723 if (classes_intersect (rclass, CR_REGS))
724 {
725 switch (GET_MODE (x))
726 {
727 case QImode:
728 newclass = HL_REGS;
729 break;
730 default:
731 /* newclass = HI_REGS; */
732 break;
733 }
734 }
735
736 else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
737 newclass = SI_REGS;
738 else if (GET_MODE_SIZE (GET_MODE (x)) > 4
739 && ~class_contents[rclass][0] & 0x000f)
740 newclass = DI_REGS;
741
742 rclass = reduce_class (rclass, newclass, rclass);
743
744 if (GET_MODE (x) == QImode)
745 rclass = reduce_class (rclass, HL_REGS, rclass);
746
747#if DEBUG_RELOAD
748 fprintf (stderr, "%s\n", class_names[rclass]);
749 debug_rtx (x);
750
751 if (GET_CODE (x) == MEM
752 && GET_CODE (XEXP (x, 0)) == PLUS
753 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
754 fprintf (stderr, "Glorm!\n");
755#endif
756 return rclass;
757}
758
759/* Implements PREFERRED_OUTPUT_RELOAD_CLASS. */
760int
761m32c_preferred_output_reload_class (rtx x, int rclass)
762{
763 return m32c_preferred_reload_class (x, rclass);
764}
765
766/* Implements LIMIT_RELOAD_CLASS. We basically want to avoid using
767 address registers for reloads since they're needed for address
768 reloads. */
769int
770m32c_limit_reload_class (enum machine_mode mode, int rclass)
771{
772#if DEBUG_RELOAD
773 fprintf (stderr, "limit_reload_class for %s: %s ->",
774 mode_name[mode], class_names[rclass]);
775#endif
776
777 if (mode == QImode)
778 rclass = reduce_class (rclass, HL_REGS, rclass);
779 else if (mode == HImode)
780 rclass = reduce_class (rclass, HI_REGS, rclass);
781 else if (mode == SImode)
782 rclass = reduce_class (rclass, SI_REGS, rclass);
783
784 if (rclass != A_REGS)
785 rclass = reduce_class (rclass, DI_REGS, rclass);
786
787#if DEBUG_RELOAD
788 fprintf (stderr, " %s\n", class_names[rclass]);
789#endif
790 return rclass;
791}
792
793/* Implements SECONDARY_RELOAD_CLASS. QImode have to be reloaded in
794 r0 or r1, as those are the only real QImode registers. CR regs get
795 reloaded through appropriately sized general or address
796 registers. */
797int
798m32c_secondary_reload_class (int rclass, enum machine_mode mode, rtx x)
799{
800 int cc = class_contents[rclass][0];
801#if DEBUG0
802 fprintf (stderr, "\nsecondary reload class %s %s\n",
803 class_names[rclass], mode_name[mode]);
804 debug_rtx (x);
805#endif
806 if (mode == QImode
807 && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
808 return QI_REGS;
809 if (classes_intersect (rclass, CR_REGS)
810 && GET_CODE (x) == REG
811 && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
812 return TARGET_A16 ? HI_REGS : A_REGS;
813 return NO_REGS;
814}
815
816/* Implements CLASS_LIKELY_SPILLED_P. A_REGS is needed for address
817 reloads. */
818int
819m32c_class_likely_spilled_p (int regclass)
820{
821 if (regclass == A_REGS)
822 return 1;
823 return reg_class_size[regclass] == 1;
824}
825
826/* Implements CLASS_MAX_NREGS. We calculate this according to its
827 documented meaning, to avoid potential inconsistencies with actual
828 class definitions. */
829int
830m32c_class_max_nregs (int regclass, enum machine_mode mode)
831{
832 int rn, max = 0;
833
834 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
835 if (class_contents[regclass][0] & (1 << rn))
836 {
837 int n = m32c_hard_regno_nregs (rn, mode);
838 if (max < n)
839 max = n;
840 }
841 return max;
842}
843
844/* Implements CANNOT_CHANGE_MODE_CLASS. Only r0 and r1 can change to
845 QI (r0l, r1l) because the chip doesn't support QI ops on other
846 registers (well, it does on a0/a1 but if we let gcc do that, reload
847 suffers). Otherwise, we allow changes to larger modes. */
848int
849m32c_cannot_change_mode_class (enum machine_mode from,
850 enum machine_mode to, int rclass)
851{
db9c8397 852 int rn;
38b2d076
DD
853#if DEBUG0
854 fprintf (stderr, "cannot change from %s to %s in %s\n",
855 mode_name[from], mode_name[to], class_names[rclass]);
856#endif
857
db9c8397
DD
858 /* If the larger mode isn't allowed in any of these registers, we
859 can't allow the change. */
860 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
861 if (class_contents[rclass][0] & (1 << rn))
862 if (! m32c_hard_regno_ok (rn, to))
863 return 1;
864
38b2d076
DD
865 if (to == QImode)
866 return (class_contents[rclass][0] & 0x1ffa);
867
868 if (class_contents[rclass][0] & 0x0005 /* r0, r1 */
869 && GET_MODE_SIZE (from) > 1)
870 return 0;
871 if (GET_MODE_SIZE (from) > 2) /* all other regs */
872 return 0;
873
874 return 1;
875}
876
877/* Helpers for the rest of the file. */
878/* TRUE if the rtx is a REG rtx for the given register. */
879#define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
880 && REGNO (rtx) == regno)
881/* TRUE if the rtx is a pseudo - specifically, one we can use as a
882 base register in address calculations (hence the "strict"
883 argument). */
884#define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
885 && (REGNO (rtx) == AP_REGNO \
886 || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
887
888/* Implements CONST_OK_FOR_CONSTRAINT_P. Currently, all constant
889 constraints start with 'I', with the next two characters indicating
890 the type and size of the range allowed. */
891int
892m32c_const_ok_for_constraint_p (HOST_WIDE_INT value,
893 char c ATTRIBUTE_UNUSED, const char *str)
894{
895 /* s=signed u=unsigned n=nonzero m=minus l=log2able,
896 [sun] bits [SUN] bytes, p=pointer size
897 I[-0-9][0-9] matches that number */
898 if (memcmp (str, "Is3", 3) == 0)
899 {
900 return (-8 <= value && value <= 7);
901 }
902 if (memcmp (str, "IS1", 3) == 0)
903 {
904 return (-128 <= value && value <= 127);
905 }
906 if (memcmp (str, "IS2", 3) == 0)
907 {
908 return (-32768 <= value && value <= 32767);
909 }
910 if (memcmp (str, "IU2", 3) == 0)
911 {
912 return (0 <= value && value <= 65535);
913 }
914 if (memcmp (str, "IU3", 3) == 0)
915 {
916 return (0 <= value && value <= 0x00ffffff);
917 }
918 if (memcmp (str, "In4", 3) == 0)
919 {
920 return (-8 <= value && value && value <= 8);
921 }
922 if (memcmp (str, "In5", 3) == 0)
923 {
924 return (-16 <= value && value && value <= 16);
925 }
23fed240
DD
926 if (memcmp (str, "In6", 3) == 0)
927 {
928 return (-32 <= value && value && value <= 32);
929 }
38b2d076
DD
930 if (memcmp (str, "IM2", 3) == 0)
931 {
932 return (-65536 <= value && value && value <= -1);
933 }
934 if (memcmp (str, "Ilb", 3) == 0)
935 {
936 int b = exact_log2 (value);
8e4edce7 937 return (b >= 0 && b <= 7);
38b2d076 938 }
07127a0a
DD
939 if (memcmp (str, "Imb", 3) == 0)
940 {
941 int b = exact_log2 ((value ^ 0xff) & 0xff);
8e4edce7 942 return (b >= 0 && b <= 7);
07127a0a 943 }
600e668e
DD
944 if (memcmp (str, "ImB", 3) == 0)
945 {
946 int b = exact_log2 ((value ^ 0xffff) & 0xffff);
947 return (b >= 0 && b <= 7);
948 }
38b2d076
DD
949 if (memcmp (str, "Ilw", 3) == 0)
950 {
951 int b = exact_log2 (value);
8e4edce7 952 return (b >= 0 && b <= 15);
38b2d076 953 }
07127a0a
DD
954 if (memcmp (str, "Imw", 3) == 0)
955 {
956 int b = exact_log2 ((value ^ 0xffff) & 0xffff);
8e4edce7 957 return (b >= 0 && b <= 15);
07127a0a
DD
958 }
959 if (memcmp (str, "I00", 3) == 0)
960 {
961 return (value == 0);
962 }
38b2d076
DD
963 return 0;
964}
965
966/* Implements EXTRA_CONSTRAINT_STR (see next function too). 'S' is
967 for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
968 call return values. */
969int
970m32c_extra_constraint_p2 (rtx value, char c ATTRIBUTE_UNUSED, const char *str)
971{
972 encode_pattern (value);
973 if (memcmp (str, "Sd", 2) == 0)
974 {
975 /* This is the common "src/dest" address */
976 rtx r;
977 if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
978 return 1;
979 if (RTX_IS ("ms") || RTX_IS ("m+si"))
980 return 1;
07127a0a
DD
981 if (RTX_IS ("m++rii"))
982 {
983 if (REGNO (patternr[3]) == FB_REGNO
984 && INTVAL (patternr[4]) == 0)
985 return 1;
986 }
38b2d076
DD
987 if (RTX_IS ("mr"))
988 r = patternr[1];
989 else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
990 r = patternr[2];
991 else
992 return 0;
993 if (REGNO (r) == SP_REGNO)
994 return 0;
995 return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
996 }
997 else if (memcmp (str, "Sa", 2) == 0)
998 {
999 rtx r;
1000 if (RTX_IS ("mr"))
1001 r = patternr[1];
1002 else if (RTX_IS ("m+ri"))
1003 r = patternr[2];
1004 else
1005 return 0;
1006 return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
1007 }
1008 else if (memcmp (str, "Si", 2) == 0)
1009 {
1010 return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
1011 }
1012 else if (memcmp (str, "Ss", 2) == 0)
1013 {
1014 return ((RTX_IS ("mr")
1015 && (IS_REG (patternr[1], SP_REGNO)))
1016 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
1017 }
1018 else if (memcmp (str, "Sf", 2) == 0)
1019 {
1020 return ((RTX_IS ("mr")
1021 && (IS_REG (patternr[1], FB_REGNO)))
1022 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
1023 }
1024 else if (memcmp (str, "Sb", 2) == 0)
1025 {
1026 return ((RTX_IS ("mr")
1027 && (IS_REG (patternr[1], SB_REGNO)))
1028 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
1029 }
07127a0a
DD
1030 else if (memcmp (str, "Sp", 2) == 0)
1031 {
1032 /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
1033 return (RTX_IS ("mi")
1034 && !(INTVAL (patternr[1]) & ~0x1fff));
1035 }
38b2d076
DD
1036 else if (memcmp (str, "S1", 2) == 0)
1037 {
1038 return r1h_operand (value, QImode);
1039 }
1040
1041 gcc_assert (str[0] != 'S');
1042
1043 if (memcmp (str, "Rpa", 2) == 0)
1044 return GET_CODE (value) == PARALLEL;
1045
1046 return 0;
1047}
1048
1049/* This is for when we're debugging the above. */
1050int
1051m32c_extra_constraint_p (rtx value, char c, const char *str)
1052{
1053 int rv = m32c_extra_constraint_p2 (value, c, str);
1054#if DEBUG0
1055 fprintf (stderr, "\nconstraint %.*s: %d\n", CONSTRAINT_LEN (c, str), str,
1056 rv);
1057 debug_rtx (value);
1058#endif
1059 return rv;
1060}
1061
1062/* Implements EXTRA_MEMORY_CONSTRAINT. Currently, we only use strings
1063 starting with 'S'. */
1064int
1065m32c_extra_memory_constraint (char c, const char *str ATTRIBUTE_UNUSED)
1066{
1067 return c == 'S';
1068}
1069
1070/* Implements EXTRA_ADDRESS_CONSTRAINT. We reserve 'A' strings for these,
1071 but don't currently define any. */
1072int
1073m32c_extra_address_constraint (char c, const char *str ATTRIBUTE_UNUSED)
1074{
1075 return c == 'A';
1076}
1077
1078/* STACK AND CALLING */
1079
1080/* Frame Layout */
1081
1082/* Implements RETURN_ADDR_RTX. Note that R8C and M16C push 24 bits
1083 (yes, THREE bytes) onto the stack for the return address, but we
1084 don't support pointers bigger than 16 bits on those chips. This
1085 will likely wreak havoc with exception unwinding. FIXME. */
1086rtx
1087m32c_return_addr_rtx (int count)
1088{
1089 enum machine_mode mode;
1090 int offset;
1091 rtx ra_mem;
1092
1093 if (count)
1094 return NULL_RTX;
1095 /* we want 2[$fb] */
1096
1097 if (TARGET_A24)
1098 {
80b093df
DD
1099 /* It's four bytes */
1100 mode = PSImode;
38b2d076
DD
1101 offset = 4;
1102 }
1103 else
1104 {
1105 /* FIXME: it's really 3 bytes */
1106 mode = HImode;
1107 offset = 2;
1108 }
1109
1110 ra_mem =
1111 gen_rtx_MEM (mode, plus_constant (gen_rtx_REG (Pmode, FP_REGNO), offset));
1112 return copy_to_mode_reg (mode, ra_mem);
1113}
1114
1115/* Implements INCOMING_RETURN_ADDR_RTX. See comment above. */
1116rtx
1117m32c_incoming_return_addr_rtx (void)
1118{
1119 /* we want [sp] */
1120 return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
1121}
1122
1123/* Exception Handling Support */
1124
1125/* Implements EH_RETURN_DATA_REGNO. Choose registers able to hold
1126 pointers. */
1127int
1128m32c_eh_return_data_regno (int n)
1129{
1130 switch (n)
1131 {
1132 case 0:
1133 return A0_REGNO;
1134 case 1:
c6004917
RIL
1135 if (TARGET_A16)
1136 return R3_REGNO;
1137 else
1138 return R1_REGNO;
38b2d076
DD
1139 default:
1140 return INVALID_REGNUM;
1141 }
1142}
1143
1144/* Implements EH_RETURN_STACKADJ_RTX. Saved and used later in
1145 m32c_emit_eh_epilogue. */
1146rtx
1147m32c_eh_return_stackadj_rtx (void)
1148{
1149 if (!cfun->machine->eh_stack_adjust)
1150 {
1151 rtx sa;
1152
99920b6f 1153 sa = gen_rtx_REG (Pmode, R0_REGNO);
38b2d076
DD
1154 cfun->machine->eh_stack_adjust = sa;
1155 }
1156 return cfun->machine->eh_stack_adjust;
1157}
1158
1159/* Registers That Address the Stack Frame */
1160
1161/* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER. Note that
1162 the original spec called for dwarf numbers to vary with register
1163 width as well, for example, r0l, r0, and r2r0 would each have
1164 different dwarf numbers. GCC doesn't support this, and we don't do
1165 it, and gdb seems to like it this way anyway. */
1166unsigned int
1167m32c_dwarf_frame_regnum (int n)
1168{
1169 switch (n)
1170 {
1171 case R0_REGNO:
1172 return 5;
1173 case R1_REGNO:
1174 return 6;
1175 case R2_REGNO:
1176 return 7;
1177 case R3_REGNO:
1178 return 8;
1179 case A0_REGNO:
1180 return 9;
1181 case A1_REGNO:
1182 return 10;
1183 case FB_REGNO:
1184 return 11;
1185 case SB_REGNO:
1186 return 19;
1187
1188 case SP_REGNO:
1189 return 12;
1190 case PC_REGNO:
1191 return 13;
1192 default:
1193 return DWARF_FRAME_REGISTERS + 1;
1194 }
1195}
1196
1197/* The frame looks like this:
1198
1199 ap -> +------------------------------
1200 | Return address (3 or 4 bytes)
1201 | Saved FB (2 or 4 bytes)
1202 fb -> +------------------------------
1203 | local vars
1204 | register saves fb
1205 | through r0 as needed
1206 sp -> +------------------------------
1207*/
1208
1209/* We use this to wrap all emitted insns in the prologue. */
1210static rtx
1211F (rtx x)
1212{
1213 RTX_FRAME_RELATED_P (x) = 1;
1214 return x;
1215}
1216
1217/* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1218 how much the stack pointer moves for each, for each cpu family. */
1219static struct
1220{
1221 int reg1;
1222 int bit;
1223 int a16_bytes;
1224 int a24_bytes;
1225} pushm_info[] =
1226{
9d746d5e
DD
1227 /* These are in reverse push (nearest-to-sp) order. */
1228 { R0_REGNO, 0x80, 2, 2 },
38b2d076 1229 { R1_REGNO, 0x40, 2, 2 },
9d746d5e
DD
1230 { R2_REGNO, 0x20, 2, 2 },
1231 { R3_REGNO, 0x10, 2, 2 },
1232 { A0_REGNO, 0x08, 2, 4 },
1233 { A1_REGNO, 0x04, 2, 4 },
1234 { SB_REGNO, 0x02, 2, 4 },
1235 { FB_REGNO, 0x01, 2, 4 }
38b2d076
DD
1236};
1237
1238#define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1239
1240/* Returns TRUE if we need to save/restore the given register. We
1241 save everything for exception handlers, so that any register can be
1242 unwound. For interrupt handlers, we save everything if the handler
1243 calls something else (because we don't know what *that* function
1244 might do), but try to be a bit smarter if the handler is a leaf
1245 function. We always save $a0, though, because we use that in the
85f65093 1246 epilogue to copy $fb to $sp. */
38b2d076
DD
1247static int
1248need_to_save (int regno)
1249{
1250 if (fixed_regs[regno])
1251 return 0;
ad516a74 1252 if (crtl->calls_eh_return)
38b2d076
DD
1253 return 1;
1254 if (regno == FP_REGNO)
1255 return 0;
1256 if (cfun->machine->is_interrupt
1257 && (!cfun->machine->is_leaf || regno == A0_REGNO))
1258 return 1;
6fb5fa3c 1259 if (df_regs_ever_live_p (regno)
38b2d076
DD
1260 && (!call_used_regs[regno] || cfun->machine->is_interrupt))
1261 return 1;
1262 return 0;
1263}
1264
1265/* This function contains all the intelligence about saving and
1266 restoring registers. It always figures out the register save set.
1267 When called with PP_justcount, it merely returns the size of the
1268 save set (for eliminating the frame pointer, for example). When
1269 called with PP_pushm or PP_popm, it emits the appropriate
1270 instructions for saving (pushm) or restoring (popm) the
1271 registers. */
1272static int
1273m32c_pushm_popm (Push_Pop_Type ppt)
1274{
1275 int reg_mask = 0;
1276 int byte_count = 0, bytes;
1277 int i;
1278 rtx dwarf_set[PUSHM_N];
1279 int n_dwarfs = 0;
1280 int nosave_mask = 0;
1281
305da3ec
JH
1282 if (crtl->return_rtx
1283 && GET_CODE (crtl->return_rtx) == PARALLEL
ad516a74 1284 && !(crtl->calls_eh_return || cfun->machine->is_interrupt))
38b2d076 1285 {
305da3ec 1286 rtx exp = XVECEXP (crtl->return_rtx, 0, 0);
38b2d076
DD
1287 rtx rv = XEXP (exp, 0);
1288 int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1289
1290 if (rv_bytes > 2)
1291 nosave_mask |= 0x20; /* PSI, SI */
1292 else
1293 nosave_mask |= 0xf0; /* DF */
1294 if (rv_bytes > 4)
1295 nosave_mask |= 0x50; /* DI */
1296 }
1297
1298 for (i = 0; i < (int) PUSHM_N; i++)
1299 {
1300 /* Skip if neither register needs saving. */
1301 if (!need_to_save (pushm_info[i].reg1))
1302 continue;
1303
1304 if (pushm_info[i].bit & nosave_mask)
1305 continue;
1306
1307 reg_mask |= pushm_info[i].bit;
1308 bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1309
1310 if (ppt == PP_pushm)
1311 {
1312 enum machine_mode mode = (bytes == 2) ? HImode : SImode;
1313 rtx addr;
1314
1315 /* Always use stack_pointer_rtx instead of calling
1316 rtx_gen_REG ourselves. Code elsewhere in GCC assumes
1317 that there is a single rtx representing the stack pointer,
1318 namely stack_pointer_rtx, and uses == to recognize it. */
1319 addr = stack_pointer_rtx;
1320
1321 if (byte_count != 0)
1322 addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1323
1324 dwarf_set[n_dwarfs++] =
1325 gen_rtx_SET (VOIDmode,
1326 gen_rtx_MEM (mode, addr),
1327 gen_rtx_REG (mode, pushm_info[i].reg1));
1328 F (dwarf_set[n_dwarfs - 1]);
1329
1330 }
1331 byte_count += bytes;
1332 }
1333
1334 if (cfun->machine->is_interrupt)
1335 {
1336 cfun->machine->intr_pushm = reg_mask & 0xfe;
1337 reg_mask = 0;
1338 byte_count = 0;
1339 }
1340
1341 if (cfun->machine->is_interrupt)
1342 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1343 if (need_to_save (i))
1344 {
1345 byte_count += 2;
1346 cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1347 }
1348
1349 if (ppt == PP_pushm && byte_count)
1350 {
1351 rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1352 rtx pushm;
1353
1354 if (reg_mask)
1355 {
1356 XVECEXP (note, 0, 0)
1357 = gen_rtx_SET (VOIDmode,
1358 stack_pointer_rtx,
1359 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1360 stack_pointer_rtx,
1361 GEN_INT (-byte_count)));
1362 F (XVECEXP (note, 0, 0));
1363
1364 for (i = 0; i < n_dwarfs; i++)
1365 XVECEXP (note, 0, i + 1) = dwarf_set[i];
1366
1367 pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1368
1369 REG_NOTES (pushm) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, note,
1370 REG_NOTES (pushm));
1371 }
1372
1373 if (cfun->machine->is_interrupt)
1374 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1375 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1376 {
1377 if (TARGET_A16)
1378 pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1379 else
1380 pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1381 F (pushm);
1382 }
1383 }
1384 if (ppt == PP_popm && byte_count)
1385 {
38b2d076
DD
1386 if (cfun->machine->is_interrupt)
1387 for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1388 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1389 {
1390 if (TARGET_A16)
b3fdec9e 1391 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
38b2d076 1392 else
b3fdec9e 1393 emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
38b2d076
DD
1394 }
1395 if (reg_mask)
1396 emit_insn (gen_popm (GEN_INT (reg_mask)));
1397 }
1398
1399 return byte_count;
1400}
1401
1402/* Implements INITIAL_ELIMINATION_OFFSET. See the comment above that
1403 diagrams our call frame. */
1404int
1405m32c_initial_elimination_offset (int from, int to)
1406{
1407 int ofs = 0;
1408
1409 if (from == AP_REGNO)
1410 {
1411 if (TARGET_A16)
1412 ofs += 5;
1413 else
1414 ofs += 8;
1415 }
1416
1417 if (to == SP_REGNO)
1418 {
1419 ofs += m32c_pushm_popm (PP_justcount);
1420 ofs += get_frame_size ();
1421 }
1422
1423 /* Account for push rounding. */
1424 if (TARGET_A24)
1425 ofs = (ofs + 1) & ~1;
1426#if DEBUG0
1427 fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1428 to, ofs);
1429#endif
1430 return ofs;
1431}
1432
1433/* Passing Function Arguments on the Stack */
1434
38b2d076
DD
1435/* Implements PUSH_ROUNDING. The R8C and M16C have byte stacks, the
1436 M32C has word stacks. */
1437int
1438m32c_push_rounding (int n)
1439{
1440 if (TARGET_R8C || TARGET_M16C)
1441 return n;
1442 return (n + 1) & ~1;
1443}
1444
1445/* Passing Arguments in Registers */
1446
1447/* Implements FUNCTION_ARG. Arguments are passed partly in registers,
1448 partly on stack. If our function returns a struct, a pointer to a
1449 buffer for it is at the top of the stack (last thing pushed). The
1450 first few real arguments may be in registers as follows:
1451
1452 R8C/M16C: arg1 in r1 if it's QI or HI (else it's pushed on stack)
1453 arg2 in r2 if it's HI (else pushed on stack)
1454 rest on stack
1455 M32C: arg1 in r0 if it's QI or HI (else it's pushed on stack)
1456 rest on stack
1457
1458 Structs are not passed in registers, even if they fit. Only
1459 integer and pointer types are passed in registers.
1460
1461 Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1462 r2 if it fits. */
1463rtx
1464m32c_function_arg (CUMULATIVE_ARGS * ca,
1465 enum machine_mode mode, tree type, int named)
1466{
1467 /* Can return a reg, parallel, or 0 for stack */
1468 rtx rv = NULL_RTX;
1469#if DEBUG0
1470 fprintf (stderr, "func_arg %d (%s, %d)\n",
1471 ca->parm_num, mode_name[mode], named);
1472 debug_tree (type);
1473#endif
1474
1475 if (mode == VOIDmode)
1476 return GEN_INT (0);
1477
1478 if (ca->force_mem || !named)
1479 {
1480#if DEBUG0
1481 fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
1482 named);
1483#endif
1484 return NULL_RTX;
1485 }
1486
1487 if (type && INTEGRAL_TYPE_P (type) && POINTER_TYPE_P (type))
1488 return NULL_RTX;
1489
9d746d5e
DD
1490 if (type && AGGREGATE_TYPE_P (type))
1491 return NULL_RTX;
1492
38b2d076
DD
1493 switch (ca->parm_num)
1494 {
1495 case 1:
1496 if (GET_MODE_SIZE (mode) == 1 || GET_MODE_SIZE (mode) == 2)
1497 rv = gen_rtx_REG (mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
1498 break;
1499
1500 case 2:
1501 if (TARGET_A16 && GET_MODE_SIZE (mode) == 2)
1502 rv = gen_rtx_REG (mode, R2_REGNO);
1503 break;
1504 }
1505
1506#if DEBUG0
1507 debug_rtx (rv);
1508#endif
1509 return rv;
1510}
1511
1512#undef TARGET_PASS_BY_REFERENCE
1513#define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1514static bool
1515m32c_pass_by_reference (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED,
1516 enum machine_mode mode ATTRIBUTE_UNUSED,
586de218 1517 const_tree type ATTRIBUTE_UNUSED,
38b2d076
DD
1518 bool named ATTRIBUTE_UNUSED)
1519{
1520 return 0;
1521}
1522
1523/* Implements INIT_CUMULATIVE_ARGS. */
1524void
1525m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
9d746d5e 1526 tree fntype,
38b2d076 1527 rtx libname ATTRIBUTE_UNUSED,
9d746d5e 1528 tree fndecl,
38b2d076
DD
1529 int n_named_args ATTRIBUTE_UNUSED)
1530{
9d746d5e
DD
1531 if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1532 ca->force_mem = 1;
1533 else
1534 ca->force_mem = 0;
38b2d076
DD
1535 ca->parm_num = 1;
1536}
1537
1538/* Implements FUNCTION_ARG_ADVANCE. force_mem is set for functions
1539 returning structures, so we always reset that. Otherwise, we only
1540 need to know the sequence number of the argument to know what to do
1541 with it. */
1542void
1543m32c_function_arg_advance (CUMULATIVE_ARGS * ca,
1544 enum machine_mode mode ATTRIBUTE_UNUSED,
1545 tree type ATTRIBUTE_UNUSED,
1546 int named ATTRIBUTE_UNUSED)
1547{
1548 if (ca->force_mem)
1549 ca->force_mem = 0;
9d746d5e
DD
1550 else
1551 ca->parm_num++;
38b2d076
DD
1552}
1553
1554/* Implements FUNCTION_ARG_REGNO_P. */
1555int
1556m32c_function_arg_regno_p (int r)
1557{
1558 if (TARGET_A24)
1559 return (r == R0_REGNO);
1560 return (r == R1_REGNO || r == R2_REGNO);
1561}
1562
e9555b13 1563/* HImode and PSImode are the two "native" modes as far as GCC is
85f65093 1564 concerned, but the chips also support a 32-bit mode which is used
e9555b13
DD
1565 for some opcodes in R8C/M16C and for reset vectors and such. */
1566#undef TARGET_VALID_POINTER_MODE
1567#define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
23fed240 1568static bool
e9555b13
DD
1569m32c_valid_pointer_mode (enum machine_mode mode)
1570{
e9555b13
DD
1571 if (mode == HImode
1572 || mode == PSImode
1573 || mode == SImode
1574 )
1575 return 1;
1576 return 0;
1577}
1578
38b2d076
DD
1579/* How Scalar Function Values Are Returned */
1580
1581/* Implements LIBCALL_VALUE. Most values are returned in $r0, or some
1582 combination of registers starting there (r2r0 for longs, r3r1r2r0
1583 for long long, r3r2r1r0 for doubles), except that that ABI
1584 currently doesn't work because it ends up using all available
1585 general registers and gcc often can't compile it. So, instead, we
1586 return anything bigger than 16 bits in "mem0" (effectively, a
1587 memory location). */
1588rtx
1589m32c_libcall_value (enum machine_mode mode)
1590{
1591 /* return reg or parallel */
1592#if 0
1593 /* FIXME: GCC has difficulty returning large values in registers,
1594 because that ties up most of the general registers and gives the
1595 register allocator little to work with. Until we can resolve
1596 this, large values are returned in memory. */
1597 if (mode == DFmode)
1598 {
1599 rtx rv;
1600
1601 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1602 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1603 gen_rtx_REG (HImode,
1604 R0_REGNO),
1605 GEN_INT (0));
1606 XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1607 gen_rtx_REG (HImode,
1608 R1_REGNO),
1609 GEN_INT (2));
1610 XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1611 gen_rtx_REG (HImode,
1612 R2_REGNO),
1613 GEN_INT (4));
1614 XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1615 gen_rtx_REG (HImode,
1616 R3_REGNO),
1617 GEN_INT (6));
1618 return rv;
1619 }
1620
1621 if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1622 {
1623 rtx rv;
1624
1625 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1626 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1627 gen_rtx_REG (mode,
1628 R0_REGNO),
1629 GEN_INT (0));
1630 return rv;
1631 }
1632#endif
1633
1634 if (GET_MODE_SIZE (mode) > 2)
1635 return gen_rtx_REG (mode, MEM0_REGNO);
1636 return gen_rtx_REG (mode, R0_REGNO);
1637}
1638
1639/* Implements FUNCTION_VALUE. Functions and libcalls have the same
1640 conventions. */
1641rtx
586de218 1642m32c_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
38b2d076
DD
1643{
1644 /* return reg or parallel */
586de218 1645 const enum machine_mode mode = TYPE_MODE (valtype);
38b2d076
DD
1646 return m32c_libcall_value (mode);
1647}
1648
1649/* How Large Values Are Returned */
1650
1651/* We return structures by pushing the address on the stack, even if
1652 we use registers for the first few "real" arguments. */
1653#undef TARGET_STRUCT_VALUE_RTX
1654#define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1655static rtx
1656m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1657 int incoming ATTRIBUTE_UNUSED)
1658{
1659 return 0;
1660}
1661
1662/* Function Entry and Exit */
1663
1664/* Implements EPILOGUE_USES. Interrupts restore all registers. */
1665int
1666m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1667{
1668 if (cfun->machine->is_interrupt)
1669 return 1;
1670 return 0;
1671}
1672
1673/* Implementing the Varargs Macros */
1674
1675#undef TARGET_STRICT_ARGUMENT_NAMING
1676#define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1677static bool
1678m32c_strict_argument_naming (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED)
1679{
1680 return 1;
1681}
1682
1683/* Trampolines for Nested Functions */
1684
1685/*
1686 m16c:
1687 1 0000 75C43412 mov.w #0x1234,a0
1688 2 0004 FC000000 jmp.a label
1689
1690 m32c:
1691 1 0000 BC563412 mov.l:s #0x123456,a0
1692 2 0004 CC000000 jmp.a label
1693*/
1694
1695/* Implements TRAMPOLINE_SIZE. */
1696int
1697m32c_trampoline_size (void)
1698{
1699 /* Allocate extra space so we can avoid the messy shifts when we
1700 initialize the trampoline; we just write past the end of the
1701 opcode. */
1702 return TARGET_A16 ? 8 : 10;
1703}
1704
1705/* Implements TRAMPOLINE_ALIGNMENT. */
1706int
1707m32c_trampoline_alignment (void)
1708{
1709 return 2;
1710}
1711
1712/* Implements INITIALIZE_TRAMPOLINE. */
1713void
1714m32c_initialize_trampoline (rtx tramp, rtx function, rtx chainval)
1715{
1716#define A0(m,i) gen_rtx_MEM (m, plus_constant (tramp, i))
1717 if (TARGET_A16)
1718 {
1719 /* Note: we subtract a "word" because the moves want signed
1720 constants, not unsigned constants. */
1721 emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1722 emit_move_insn (A0 (HImode, 2), chainval);
1723 emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
85f65093
KH
1724 /* We use 16-bit addresses here, but store the zero to turn it
1725 into a 24-bit offset. */
38b2d076
DD
1726 emit_move_insn (A0 (HImode, 5), function);
1727 emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1728 }
1729 else
1730 {
1731 /* Note that the PSI moves actually write 4 bytes. Make sure we
1732 write stuff out in the right order, and leave room for the
1733 extra byte at the end. */
1734 emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1735 emit_move_insn (A0 (PSImode, 1), chainval);
1736 emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1737 emit_move_insn (A0 (PSImode, 5), function);
1738 }
1739#undef A0
1740}
1741
07127a0a
DD
1742/* Implicit Calls to Library Routines */
1743
1744#undef TARGET_INIT_LIBFUNCS
1745#define TARGET_INIT_LIBFUNCS m32c_init_libfuncs
1746static void
1747m32c_init_libfuncs (void)
1748{
1749 if (TARGET_A24)
1750 {
1751 /* We do this because the M32C has an HImode operand, but the
85f65093 1752 M16C has an 8-bit operand. Since gcc looks at the match data
07127a0a
DD
1753 and not the expanded rtl, we have to reset the array so that
1754 the right modes are found. */
1755 setcc_gen_code[EQ] = CODE_FOR_seq_24;
1756 setcc_gen_code[NE] = CODE_FOR_sne_24;
1757 setcc_gen_code[GT] = CODE_FOR_sgt_24;
1758 setcc_gen_code[GE] = CODE_FOR_sge_24;
1759 setcc_gen_code[LT] = CODE_FOR_slt_24;
1760 setcc_gen_code[LE] = CODE_FOR_sle_24;
1761 setcc_gen_code[GTU] = CODE_FOR_sgtu_24;
1762 setcc_gen_code[GEU] = CODE_FOR_sgeu_24;
1763 setcc_gen_code[LTU] = CODE_FOR_sltu_24;
1764 setcc_gen_code[LEU] = CODE_FOR_sleu_24;
1765 }
1766}
1767
38b2d076
DD
1768/* Addressing Modes */
1769
1770/* Used by GO_IF_LEGITIMATE_ADDRESS. The r8c/m32c family supports a
1771 wide range of non-orthogonal addressing modes, including the
1772 ability to double-indirect on *some* of them. Not all insns
1773 support all modes, either, but we rely on predicates and
1774 constraints to deal with that. */
1775int
1776m32c_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
1777{
1778 int mode_adjust;
1779 if (CONSTANT_P (x))
1780 return 1;
1781
1782 /* Wide references to memory will be split after reload, so we must
1783 ensure that all parts of such splits remain legitimate
1784 addresses. */
1785 mode_adjust = GET_MODE_SIZE (mode) - 1;
1786
1787 /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1788 if (GET_CODE (x) == PRE_DEC
1789 || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1790 {
1791 return (GET_CODE (XEXP (x, 0)) == REG
1792 && REGNO (XEXP (x, 0)) == SP_REGNO);
1793 }
1794
1795#if 0
1796 /* This is the double indirection detection, but it currently
1797 doesn't work as cleanly as this code implies, so until we've had
1798 a chance to debug it, leave it disabled. */
1799 if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1800 {
1801#if DEBUG_DOUBLE
1802 fprintf (stderr, "double indirect\n");
1803#endif
1804 x = XEXP (x, 0);
1805 }
1806#endif
1807
1808 encode_pattern (x);
1809 if (RTX_IS ("r"))
1810 {
1811 /* Most indexable registers can be used without displacements,
1812 although some of them will be emitted with an explicit zero
1813 to please the assembler. */
1814 switch (REGNO (patternr[0]))
1815 {
1816 case A0_REGNO:
1817 case A1_REGNO:
1818 case SB_REGNO:
1819 case FB_REGNO:
1820 case SP_REGNO:
1821 return 1;
1822
1823 default:
1824 if (IS_PSEUDO (patternr[0], strict))
1825 return 1;
1826 return 0;
1827 }
1828 }
1829 if (RTX_IS ("+ri"))
1830 {
1831 /* This is more interesting, because different base registers
1832 allow for different displacements - both range and signedness
1833 - and it differs from chip series to chip series too. */
1834 int rn = REGNO (patternr[1]);
1835 HOST_WIDE_INT offs = INTVAL (patternr[2]);
1836 switch (rn)
1837 {
1838 case A0_REGNO:
1839 case A1_REGNO:
1840 case SB_REGNO:
1841 /* The syntax only allows positive offsets, but when the
1842 offsets span the entire memory range, we can simulate
1843 negative offsets by wrapping. */
1844 if (TARGET_A16)
1845 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1846 if (rn == SB_REGNO)
1847 return (offs >= 0 && offs <= 65535 - mode_adjust);
1848 /* A0 or A1 */
1849 return (offs >= -16777216 && offs <= 16777215);
1850
1851 case FB_REGNO:
1852 if (TARGET_A16)
1853 return (offs >= -128 && offs <= 127 - mode_adjust);
1854 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1855
1856 case SP_REGNO:
1857 return (offs >= -128 && offs <= 127 - mode_adjust);
1858
1859 default:
1860 if (IS_PSEUDO (patternr[1], strict))
1861 return 1;
1862 return 0;
1863 }
1864 }
1865 if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1866 {
1867 rtx reg = patternr[1];
1868
1869 /* We don't know where the symbol is, so only allow base
1870 registers which support displacements spanning the whole
1871 address range. */
1872 switch (REGNO (reg))
1873 {
1874 case A0_REGNO:
1875 case A1_REGNO:
1876 /* $sb needs a secondary reload, but since it's involved in
1877 memory address reloads too, we don't deal with it very
1878 well. */
1879 /* case SB_REGNO: */
1880 return 1;
1881 default:
1882 if (IS_PSEUDO (reg, strict))
1883 return 1;
1884 return 0;
1885 }
1886 }
1887 return 0;
1888}
1889
1890/* Implements REG_OK_FOR_BASE_P. */
1891int
1892m32c_reg_ok_for_base_p (rtx x, int strict)
1893{
1894 if (GET_CODE (x) != REG)
1895 return 0;
1896 switch (REGNO (x))
1897 {
1898 case A0_REGNO:
1899 case A1_REGNO:
1900 case SB_REGNO:
1901 case FB_REGNO:
1902 case SP_REGNO:
1903 return 1;
1904 default:
1905 if (IS_PSEUDO (x, strict))
1906 return 1;
1907 return 0;
1908 }
1909}
1910
04aff2c0 1911/* We have three choices for choosing fb->aN offsets. If we choose -128,
85f65093 1912 we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
04aff2c0
DD
1913 like this:
1914 EB 4B FF mova -128[$fb],$a0
1915 D8 0C FF FF mov.w:Q #0,-1[$a0]
1916
85f65093 1917 Alternately, we subtract the frame size, and hopefully use 8-bit aN
04aff2c0
DD
1918 displacements:
1919 7B F4 stc $fb,$a0
1920 77 54 00 01 sub #256,$a0
1921 D8 08 01 mov.w:Q #0,1[$a0]
1922
1923 If we don't offset (i.e. offset by zero), we end up with:
1924 7B F4 stc $fb,$a0
1925 D8 0C 00 FF mov.w:Q #0,-256[$a0]
1926
1927 We have to subtract *something* so that we have a PLUS rtx to mark
1928 that we've done this reload. The -128 offset will never result in
85f65093 1929 an 8-bit aN offset, and the payoff for the second case is five
04aff2c0
DD
1930 loads *if* those loads are within 256 bytes of the other end of the
1931 frame, so the third case seems best. Note that we subtract the
1932 zero, but detect that in the addhi3 pattern. */
1933
ea471af0
JM
1934#define BIG_FB_ADJ 0
1935
38b2d076
DD
1936/* Implements LEGITIMIZE_ADDRESS. The only address we really have to
1937 worry about is frame base offsets, as $fb has a limited
1938 displacement range. We deal with this by attempting to reload $fb
1939 itself into an address register; that seems to result in the best
1940 code. */
1941int
1942m32c_legitimize_address (rtx * x ATTRIBUTE_UNUSED,
1943 rtx oldx ATTRIBUTE_UNUSED,
1944 enum machine_mode mode ATTRIBUTE_UNUSED)
1945{
1946#if DEBUG0
1947 fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
1948 debug_rtx (*x);
1949 fprintf (stderr, "\n");
1950#endif
1951
1952 if (GET_CODE (*x) == PLUS
1953 && GET_CODE (XEXP (*x, 0)) == REG
1954 && REGNO (XEXP (*x, 0)) == FB_REGNO
1955 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1956 && (INTVAL (XEXP (*x, 1)) < -128
1957 || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
1958 {
1959 /* reload FB to A_REGS */
38b2d076
DD
1960 rtx temp = gen_reg_rtx (Pmode);
1961 *x = copy_rtx (*x);
04aff2c0 1962 emit_insn (gen_rtx_SET (VOIDmode, temp, XEXP (*x, 0)));
38b2d076
DD
1963 XEXP (*x, 0) = temp;
1964 return 1;
1965 }
1966
1967 return 0;
1968}
1969
1970/* Implements LEGITIMIZE_RELOAD_ADDRESS. See comment above. */
1971int
1972m32c_legitimize_reload_address (rtx * x,
1973 enum machine_mode mode,
1974 int opnum,
1975 int type, int ind_levels ATTRIBUTE_UNUSED)
1976{
1977#if DEBUG0
1978 fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
1979 mode_name[mode]);
1980 debug_rtx (*x);
1981#endif
1982
1983 /* At one point, this function tried to get $fb copied to an address
1984 register, which in theory would maximize sharing, but gcc was
1985 *also* still trying to reload the whole address, and we'd run out
1986 of address registers. So we let gcc do the naive (but safe)
1987 reload instead, when the above function doesn't handle it for
04aff2c0
DD
1988 us.
1989
1990 The code below is a second attempt at the above. */
1991
1992 if (GET_CODE (*x) == PLUS
1993 && GET_CODE (XEXP (*x, 0)) == REG
1994 && REGNO (XEXP (*x, 0)) == FB_REGNO
1995 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1996 && (INTVAL (XEXP (*x, 1)) < -128
1997 || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
1998 {
1999 rtx sum;
2000 int offset = INTVAL (XEXP (*x, 1));
2001 int adjustment = -BIG_FB_ADJ;
2002
2003 sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
2004 GEN_INT (adjustment));
2005 *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
2006 if (type == RELOAD_OTHER)
2007 type = RELOAD_FOR_OTHER_ADDRESS;
2008 push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
2009 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
2010 type);
2011 return 1;
2012 }
2013
2014 if (GET_CODE (*x) == PLUS
2015 && GET_CODE (XEXP (*x, 0)) == PLUS
2016 && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
2017 && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
2018 && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
2019 && GET_CODE (XEXP (*x, 1)) == CONST_INT
2020 )
2021 {
2022 if (type == RELOAD_OTHER)
2023 type = RELOAD_FOR_OTHER_ADDRESS;
2024 push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
2025 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
2026 type);
2027 return 1;
2028 }
38b2d076
DD
2029
2030 return 0;
2031}
2032
38b2d076
DD
2033/* Implements LEGITIMATE_CONSTANT_P. We split large constants anyway,
2034 so we can allow anything. */
2035int
2036m32c_legitimate_constant_p (rtx x ATTRIBUTE_UNUSED)
2037{
2038 return 1;
2039}
2040
2041
2042/* Condition Code Status */
2043
2044#undef TARGET_FIXED_CONDITION_CODE_REGS
2045#define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2046static bool
2047m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2048{
2049 *p1 = FLG_REGNO;
2050 *p2 = INVALID_REGNUM;
2051 return true;
2052}
2053
2054/* Describing Relative Costs of Operations */
2055
2056/* Implements REGISTER_MOVE_COST. We make impossible moves
2057 prohibitively expensive, like trying to put QIs in r2/r3 (there are
2058 no opcodes to do that). We also discourage use of mem* registers
2059 since they're really memory. */
2060int
2061m32c_register_move_cost (enum machine_mode mode, int from, int to)
2062{
2063 int cost = COSTS_N_INSNS (3);
2064 int cc = class_contents[from][0] | class_contents[to][0];
2065 /* FIXME: pick real values, but not 2 for now. */
2066 if (mode == QImode && (cc & class_contents[R23_REGS][0]))
2067 {
2068 if (!(cc & ~class_contents[R23_REGS][0]))
2069 cost = COSTS_N_INSNS (1000);
2070 else
2071 cost = COSTS_N_INSNS (80);
2072 }
2073
2074 if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2075 cost = COSTS_N_INSNS (1000);
2076
2077 if (classes_intersect (from, CR_REGS))
2078 cost += COSTS_N_INSNS (5);
2079
2080 if (classes_intersect (to, CR_REGS))
2081 cost += COSTS_N_INSNS (5);
2082
2083 if (from == MEM_REGS || to == MEM_REGS)
2084 cost += COSTS_N_INSNS (50);
2085 else if (classes_intersect (from, MEM_REGS)
2086 || classes_intersect (to, MEM_REGS))
2087 cost += COSTS_N_INSNS (10);
2088
2089#if DEBUG0
2090 fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
2091 mode_name[mode], class_names[from], class_names[to], cost);
2092#endif
2093 return cost;
2094}
2095
2096/* Implements MEMORY_MOVE_COST. */
2097int
2098m32c_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2099 int reg_class ATTRIBUTE_UNUSED,
2100 int in ATTRIBUTE_UNUSED)
2101{
2102 /* FIXME: pick real values. */
2103 return COSTS_N_INSNS (10);
2104}
2105
07127a0a
DD
2106/* Here we try to describe when we use multiple opcodes for one RTX so
2107 that gcc knows when to use them. */
2108#undef TARGET_RTX_COSTS
2109#define TARGET_RTX_COSTS m32c_rtx_costs
2110static bool
f40751dd
JH
2111m32c_rtx_costs (rtx x, int code, int outer_code, int *total,
2112 bool speed ATTRIBUTE_UNUSED)
07127a0a
DD
2113{
2114 switch (code)
2115 {
2116 case REG:
2117 if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2118 *total += COSTS_N_INSNS (500);
2119 else
2120 *total += COSTS_N_INSNS (1);
2121 return true;
2122
2123 case ASHIFT:
2124 case LSHIFTRT:
2125 case ASHIFTRT:
2126 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2127 {
2128 /* mov.b r1l, r1h */
2129 *total += COSTS_N_INSNS (1);
2130 return true;
2131 }
2132 if (INTVAL (XEXP (x, 1)) > 8
2133 || INTVAL (XEXP (x, 1)) < -8)
2134 {
2135 /* mov.b #N, r1l */
2136 /* mov.b r1l, r1h */
2137 *total += COSTS_N_INSNS (2);
2138 return true;
2139 }
2140 return true;
2141
2142 case LE:
2143 case LEU:
2144 case LT:
2145 case LTU:
2146 case GT:
2147 case GTU:
2148 case GE:
2149 case GEU:
2150 case NE:
2151 case EQ:
2152 if (outer_code == SET)
2153 {
2154 *total += COSTS_N_INSNS (2);
2155 return true;
2156 }
2157 break;
2158
2159 case ZERO_EXTRACT:
2160 {
2161 rtx dest = XEXP (x, 0);
2162 rtx addr = XEXP (dest, 0);
2163 switch (GET_CODE (addr))
2164 {
2165 case CONST_INT:
2166 *total += COSTS_N_INSNS (1);
2167 break;
2168 case SYMBOL_REF:
2169 *total += COSTS_N_INSNS (3);
2170 break;
2171 default:
2172 *total += COSTS_N_INSNS (2);
2173 break;
2174 }
2175 return true;
2176 }
2177 break;
2178
2179 default:
2180 /* Reasonable default. */
2181 if (TARGET_A16 && GET_MODE(x) == SImode)
2182 *total += COSTS_N_INSNS (2);
2183 break;
2184 }
2185 return false;
2186}
2187
2188#undef TARGET_ADDRESS_COST
2189#define TARGET_ADDRESS_COST m32c_address_cost
2190static int
f40751dd 2191m32c_address_cost (rtx addr, bool speed ATTRIBUTE_UNUSED)
07127a0a 2192{
80b093df 2193 int i;
07127a0a
DD
2194 /* fprintf(stderr, "\naddress_cost\n");
2195 debug_rtx(addr);*/
2196 switch (GET_CODE (addr))
2197 {
2198 case CONST_INT:
80b093df
DD
2199 i = INTVAL (addr);
2200 if (i == 0)
2201 return COSTS_N_INSNS(1);
2202 if (0 < i && i <= 255)
2203 return COSTS_N_INSNS(2);
2204 if (0 < i && i <= 65535)
2205 return COSTS_N_INSNS(3);
2206 return COSTS_N_INSNS(4);
07127a0a 2207 case SYMBOL_REF:
80b093df 2208 return COSTS_N_INSNS(4);
07127a0a 2209 case REG:
80b093df
DD
2210 return COSTS_N_INSNS(1);
2211 case PLUS:
2212 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
2213 {
2214 i = INTVAL (XEXP (addr, 1));
2215 if (i == 0)
2216 return COSTS_N_INSNS(1);
2217 if (0 < i && i <= 255)
2218 return COSTS_N_INSNS(2);
2219 if (0 < i && i <= 65535)
2220 return COSTS_N_INSNS(3);
2221 }
2222 return COSTS_N_INSNS(4);
07127a0a
DD
2223 default:
2224 return 0;
2225 }
2226}
2227
38b2d076
DD
2228/* Defining the Output Assembler Language */
2229
2230/* The Overall Framework of an Assembler File */
2231
2232#undef TARGET_HAVE_NAMED_SECTIONS
2233#define TARGET_HAVE_NAMED_SECTIONS true
2234
2235/* Output of Data */
2236
2237/* We may have 24 bit sizes, which is the native address size.
2238 Currently unused, but provided for completeness. */
2239#undef TARGET_ASM_INTEGER
2240#define TARGET_ASM_INTEGER m32c_asm_integer
2241static bool
2242m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2243{
2244 switch (size)
2245 {
2246 case 3:
2247 fprintf (asm_out_file, "\t.3byte\t");
2248 output_addr_const (asm_out_file, x);
2249 fputc ('\n', asm_out_file);
2250 return true;
e9555b13
DD
2251 case 4:
2252 if (GET_CODE (x) == SYMBOL_REF)
2253 {
2254 fprintf (asm_out_file, "\t.long\t");
2255 output_addr_const (asm_out_file, x);
2256 fputc ('\n', asm_out_file);
2257 return true;
2258 }
2259 break;
38b2d076
DD
2260 }
2261 return default_assemble_integer (x, size, aligned_p);
2262}
2263
2264/* Output of Assembler Instructions */
2265
a4174ebf 2266/* We use a lookup table because the addressing modes are non-orthogonal. */
38b2d076
DD
2267
2268static struct
2269{
2270 char code;
2271 char const *pattern;
2272 char const *format;
2273}
2274const conversions[] = {
2275 { 0, "r", "0" },
2276
2277 { 0, "mr", "z[1]" },
2278 { 0, "m+ri", "3[2]" },
2279 { 0, "m+rs", "3[2]" },
2280 { 0, "m+r+si", "4+5[2]" },
2281 { 0, "ms", "1" },
2282 { 0, "mi", "1" },
2283 { 0, "m+si", "2+3" },
2284
2285 { 0, "mmr", "[z[2]]" },
2286 { 0, "mm+ri", "[4[3]]" },
2287 { 0, "mm+rs", "[4[3]]" },
2288 { 0, "mm+r+si", "[5+6[3]]" },
2289 { 0, "mms", "[[2]]" },
2290 { 0, "mmi", "[[2]]" },
2291 { 0, "mm+si", "[4[3]]" },
2292
2293 { 0, "i", "#0" },
2294 { 0, "s", "#0" },
2295 { 0, "+si", "#1+2" },
2296 { 0, "l", "#0" },
2297
2298 { 'l', "l", "0" },
2299 { 'd', "i", "0" },
2300 { 'd', "s", "0" },
2301 { 'd', "+si", "1+2" },
2302 { 'D', "i", "0" },
2303 { 'D', "s", "0" },
2304 { 'D', "+si", "1+2" },
2305 { 'x', "i", "#0" },
2306 { 'X', "i", "#0" },
2307 { 'm', "i", "#0" },
2308 { 'b', "i", "#0" },
07127a0a 2309 { 'B', "i", "0" },
38b2d076
DD
2310 { 'p', "i", "0" },
2311
2312 { 0, 0, 0 }
2313};
2314
2315/* This is in order according to the bitfield that pushm/popm use. */
2316static char const *pushm_regs[] = {
2317 "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2318};
2319
2320/* Implements PRINT_OPERAND. */
2321void
2322m32c_print_operand (FILE * file, rtx x, int code)
2323{
2324 int i, j, b;
2325 const char *comma;
2326 HOST_WIDE_INT ival;
2327 int unsigned_const = 0;
ff485e71 2328 int force_sign;
38b2d076
DD
2329
2330 /* Multiplies; constants are converted to sign-extended format but
2331 we need unsigned, so 'u' and 'U' tell us what size unsigned we
2332 need. */
2333 if (code == 'u')
2334 {
2335 unsigned_const = 2;
2336 code = 0;
2337 }
2338 if (code == 'U')
2339 {
2340 unsigned_const = 1;
2341 code = 0;
2342 }
2343 /* This one is only for debugging; you can put it in a pattern to
2344 force this error. */
2345 if (code == '!')
2346 {
2347 fprintf (stderr, "dj: unreviewed pattern:");
2348 if (current_output_insn)
2349 debug_rtx (current_output_insn);
2350 gcc_unreachable ();
2351 }
2352 /* PSImode operations are either .w or .l depending on the target. */
2353 if (code == '&')
2354 {
2355 if (TARGET_A16)
2356 fprintf (file, "w");
2357 else
2358 fprintf (file, "l");
2359 return;
2360 }
2361 /* Inverted conditionals. */
2362 if (code == 'C')
2363 {
2364 switch (GET_CODE (x))
2365 {
2366 case LE:
2367 fputs ("gt", file);
2368 break;
2369 case LEU:
2370 fputs ("gtu", file);
2371 break;
2372 case LT:
2373 fputs ("ge", file);
2374 break;
2375 case LTU:
2376 fputs ("geu", file);
2377 break;
2378 case GT:
2379 fputs ("le", file);
2380 break;
2381 case GTU:
2382 fputs ("leu", file);
2383 break;
2384 case GE:
2385 fputs ("lt", file);
2386 break;
2387 case GEU:
2388 fputs ("ltu", file);
2389 break;
2390 case NE:
2391 fputs ("eq", file);
2392 break;
2393 case EQ:
2394 fputs ("ne", file);
2395 break;
2396 default:
2397 gcc_unreachable ();
2398 }
2399 return;
2400 }
2401 /* Regular conditionals. */
2402 if (code == 'c')
2403 {
2404 switch (GET_CODE (x))
2405 {
2406 case LE:
2407 fputs ("le", file);
2408 break;
2409 case LEU:
2410 fputs ("leu", file);
2411 break;
2412 case LT:
2413 fputs ("lt", file);
2414 break;
2415 case LTU:
2416 fputs ("ltu", file);
2417 break;
2418 case GT:
2419 fputs ("gt", file);
2420 break;
2421 case GTU:
2422 fputs ("gtu", file);
2423 break;
2424 case GE:
2425 fputs ("ge", file);
2426 break;
2427 case GEU:
2428 fputs ("geu", file);
2429 break;
2430 case NE:
2431 fputs ("ne", file);
2432 break;
2433 case EQ:
2434 fputs ("eq", file);
2435 break;
2436 default:
2437 gcc_unreachable ();
2438 }
2439 return;
2440 }
2441 /* Used in negsi2 to do HImode ops on the two parts of an SImode
2442 operand. */
2443 if (code == 'h' && GET_MODE (x) == SImode)
2444 {
2445 x = m32c_subreg (HImode, x, SImode, 0);
2446 code = 0;
2447 }
2448 if (code == 'H' && GET_MODE (x) == SImode)
2449 {
2450 x = m32c_subreg (HImode, x, SImode, 2);
2451 code = 0;
2452 }
07127a0a
DD
2453 if (code == 'h' && GET_MODE (x) == HImode)
2454 {
2455 x = m32c_subreg (QImode, x, HImode, 0);
2456 code = 0;
2457 }
2458 if (code == 'H' && GET_MODE (x) == HImode)
2459 {
2460 /* We can't actually represent this as an rtx. Do it here. */
2461 if (GET_CODE (x) == REG)
2462 {
2463 switch (REGNO (x))
2464 {
2465 case R0_REGNO:
2466 fputs ("r0h", file);
2467 return;
2468 case R1_REGNO:
2469 fputs ("r1h", file);
2470 return;
2471 default:
2472 gcc_unreachable();
2473 }
2474 }
2475 /* This should be a MEM. */
2476 x = m32c_subreg (QImode, x, HImode, 1);
2477 code = 0;
2478 }
2479 /* This is for BMcond, which always wants word register names. */
2480 if (code == 'h' && GET_MODE (x) == QImode)
2481 {
2482 if (GET_CODE (x) == REG)
2483 x = gen_rtx_REG (HImode, REGNO (x));
2484 code = 0;
2485 }
38b2d076
DD
2486 /* 'x' and 'X' need to be ignored for non-immediates. */
2487 if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2488 code = 0;
2489
2490 encode_pattern (x);
ff485e71 2491 force_sign = 0;
38b2d076
DD
2492 for (i = 0; conversions[i].pattern; i++)
2493 if (conversions[i].code == code
2494 && streq (conversions[i].pattern, pattern))
2495 {
2496 for (j = 0; conversions[i].format[j]; j++)
2497 /* backslash quotes the next character in the output pattern. */
2498 if (conversions[i].format[j] == '\\')
2499 {
2500 fputc (conversions[i].format[j + 1], file);
2501 j++;
2502 }
2503 /* Digits in the output pattern indicate that the
2504 corresponding RTX is to be output at that point. */
2505 else if (ISDIGIT (conversions[i].format[j]))
2506 {
2507 rtx r = patternr[conversions[i].format[j] - '0'];
2508 switch (GET_CODE (r))
2509 {
2510 case REG:
2511 fprintf (file, "%s",
2512 reg_name_with_mode (REGNO (r), GET_MODE (r)));
2513 break;
2514 case CONST_INT:
2515 switch (code)
2516 {
2517 case 'b':
07127a0a
DD
2518 case 'B':
2519 {
2520 int v = INTVAL (r);
2521 int i = (int) exact_log2 (v);
2522 if (i == -1)
2523 i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2524 if (i == -1)
2525 i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2526 /* Bit position. */
2527 fprintf (file, "%d", i);
2528 }
38b2d076
DD
2529 break;
2530 case 'x':
2531 /* Unsigned byte. */
2532 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2533 INTVAL (r) & 0xff);
2534 break;
2535 case 'X':
2536 /* Unsigned word. */
2537 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2538 INTVAL (r) & 0xffff);
2539 break;
2540 case 'p':
2541 /* pushm and popm encode a register set into a single byte. */
2542 comma = "";
2543 for (b = 7; b >= 0; b--)
2544 if (INTVAL (r) & (1 << b))
2545 {
2546 fprintf (file, "%s%s", comma, pushm_regs[b]);
2547 comma = ",";
2548 }
2549 break;
2550 case 'm':
2551 /* "Minus". Output -X */
2552 ival = (-INTVAL (r) & 0xffff);
2553 if (ival & 0x8000)
2554 ival = ival - 0x10000;
2555 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2556 break;
2557 default:
2558 ival = INTVAL (r);
2559 if (conversions[i].format[j + 1] == '[' && ival < 0)
2560 {
2561 /* We can simulate negative displacements by
2562 taking advantage of address space
2563 wrapping when the offset can span the
2564 entire address range. */
2565 rtx base =
2566 patternr[conversions[i].format[j + 2] - '0'];
2567 if (GET_CODE (base) == REG)
2568 switch (REGNO (base))
2569 {
2570 case A0_REGNO:
2571 case A1_REGNO:
2572 if (TARGET_A24)
2573 ival = 0x1000000 + ival;
2574 else
2575 ival = 0x10000 + ival;
2576 break;
2577 case SB_REGNO:
2578 if (TARGET_A16)
2579 ival = 0x10000 + ival;
2580 break;
2581 }
2582 }
2583 else if (code == 'd' && ival < 0 && j == 0)
2584 /* The "mova" opcode is used to do addition by
2585 computing displacements, but again, we need
2586 displacements to be unsigned *if* they're
2587 the only component of the displacement
2588 (i.e. no "symbol-4" type displacement). */
2589 ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2590
2591 if (conversions[i].format[j] == '0')
2592 {
2593 /* More conversions to unsigned. */
2594 if (unsigned_const == 2)
2595 ival &= 0xffff;
2596 if (unsigned_const == 1)
2597 ival &= 0xff;
2598 }
2599 if (streq (conversions[i].pattern, "mi")
2600 || streq (conversions[i].pattern, "mmi"))
2601 {
2602 /* Integers used as addresses are unsigned. */
2603 ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2604 }
ff485e71
DD
2605 if (force_sign && ival >= 0)
2606 fputc ('+', file);
38b2d076
DD
2607 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2608 break;
2609 }
2610 break;
2611 case CONST_DOUBLE:
2612 /* We don't have const_double constants. If it
2613 happens, make it obvious. */
2614 fprintf (file, "[const_double 0x%lx]",
2615 (unsigned long) CONST_DOUBLE_HIGH (r));
2616 break;
2617 case SYMBOL_REF:
2618 assemble_name (file, XSTR (r, 0));
2619 break;
2620 case LABEL_REF:
2621 output_asm_label (r);
2622 break;
2623 default:
2624 fprintf (stderr, "don't know how to print this operand:");
2625 debug_rtx (r);
2626 gcc_unreachable ();
2627 }
2628 }
2629 else
2630 {
2631 if (conversions[i].format[j] == 'z')
2632 {
2633 /* Some addressing modes *must* have a displacement,
2634 so insert a zero here if needed. */
2635 int k;
2636 for (k = j + 1; conversions[i].format[k]; k++)
2637 if (ISDIGIT (conversions[i].format[k]))
2638 {
2639 rtx reg = patternr[conversions[i].format[k] - '0'];
2640 if (GET_CODE (reg) == REG
2641 && (REGNO (reg) == SB_REGNO
2642 || REGNO (reg) == FB_REGNO
2643 || REGNO (reg) == SP_REGNO))
2644 fputc ('0', file);
2645 }
2646 continue;
2647 }
2648 /* Signed displacements off symbols need to have signs
2649 blended cleanly. */
2650 if (conversions[i].format[j] == '+'
ff485e71 2651 && (!code || code == 'D' || code == 'd')
38b2d076 2652 && ISDIGIT (conversions[i].format[j + 1])
ff485e71
DD
2653 && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2654 == CONST_INT))
2655 {
2656 force_sign = 1;
2657 continue;
2658 }
38b2d076
DD
2659 fputc (conversions[i].format[j], file);
2660 }
2661 break;
2662 }
2663 if (!conversions[i].pattern)
2664 {
2665 fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
2666 pattern);
2667 debug_rtx (x);
2668 fprintf (file, "[%c.%s]", code ? code : '-', pattern);
2669 }
2670
2671 return;
2672}
2673
2674/* Implements PRINT_OPERAND_PUNCT_VALID_P. See m32c_print_operand
2675 above for descriptions of what these do. */
2676int
2677m32c_print_operand_punct_valid_p (int c)
2678{
2679 if (c == '&' || c == '!')
2680 return 1;
2681 return 0;
2682}
2683
2684/* Implements PRINT_OPERAND_ADDRESS. Nothing unusual here. */
2685void
2686m32c_print_operand_address (FILE * stream, rtx address)
2687{
235e1fe8
NC
2688 if (GET_CODE (address) == MEM)
2689 address = XEXP (address, 0);
2690 else
2691 /* cf: gcc.dg/asm-4.c. */
2692 gcc_assert (GET_CODE (address) == REG);
2693
2694 m32c_print_operand (stream, address, 0);
38b2d076
DD
2695}
2696
2697/* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
2698 differently than general registers. */
2699void
2700m32c_output_reg_push (FILE * s, int regno)
2701{
2702 if (regno == FLG_REGNO)
2703 fprintf (s, "\tpushc\tflg\n");
2704 else
04aff2c0 2705 fprintf (s, "\tpush.%c\t%s\n",
38b2d076
DD
2706 " bwll"[reg_push_size (regno)], reg_names[regno]);
2707}
2708
2709/* Likewise for ASM_OUTPUT_REG_POP. */
2710void
2711m32c_output_reg_pop (FILE * s, int regno)
2712{
2713 if (regno == FLG_REGNO)
2714 fprintf (s, "\tpopc\tflg\n");
2715 else
04aff2c0 2716 fprintf (s, "\tpop.%c\t%s\n",
38b2d076
DD
2717 " bwll"[reg_push_size (regno)], reg_names[regno]);
2718}
2719
2720/* Defining target-specific uses of `__attribute__' */
2721
2722/* Used to simplify the logic below. Find the attributes wherever
2723 they may be. */
2724#define M32C_ATTRIBUTES(decl) \
2725 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2726 : DECL_ATTRIBUTES (decl) \
2727 ? (DECL_ATTRIBUTES (decl)) \
2728 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2729
2730/* Returns TRUE if the given tree has the "interrupt" attribute. */
2731static int
2732interrupt_p (tree node ATTRIBUTE_UNUSED)
2733{
2734 tree list = M32C_ATTRIBUTES (node);
2735 while (list)
2736 {
2737 if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
2738 return 1;
2739 list = TREE_CHAIN (list);
2740 }
2741 return 0;
2742}
2743
2744static tree
2745interrupt_handler (tree * node ATTRIBUTE_UNUSED,
2746 tree name ATTRIBUTE_UNUSED,
2747 tree args ATTRIBUTE_UNUSED,
2748 int flags ATTRIBUTE_UNUSED,
2749 bool * no_add_attrs ATTRIBUTE_UNUSED)
2750{
2751 return NULL_TREE;
2752}
2753
5abd2125
JS
2754/* Returns TRUE if given tree has the "function_vector" attribute. */
2755int
2756m32c_special_page_vector_p (tree func)
2757{
653e2568
DD
2758 tree list;
2759
5abd2125
JS
2760 if (TREE_CODE (func) != FUNCTION_DECL)
2761 return 0;
2762
653e2568 2763 list = M32C_ATTRIBUTES (func);
5abd2125
JS
2764 while (list)
2765 {
2766 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2767 return 1;
2768 list = TREE_CHAIN (list);
2769 }
2770 return 0;
2771}
2772
2773static tree
2774function_vector_handler (tree * node ATTRIBUTE_UNUSED,
2775 tree name ATTRIBUTE_UNUSED,
2776 tree args ATTRIBUTE_UNUSED,
2777 int flags ATTRIBUTE_UNUSED,
2778 bool * no_add_attrs ATTRIBUTE_UNUSED)
2779{
2780 if (TARGET_R8C)
2781 {
2782 /* The attribute is not supported for R8C target. */
2783 warning (OPT_Wattributes,
2784 "`%s' attribute is not supported for R8C target",
2785 IDENTIFIER_POINTER (name));
2786 *no_add_attrs = true;
2787 }
2788 else if (TREE_CODE (*node) != FUNCTION_DECL)
2789 {
2790 /* The attribute must be applied to functions only. */
2791 warning (OPT_Wattributes,
2792 "`%s' attribute applies only to functions",
2793 IDENTIFIER_POINTER (name));
2794 *no_add_attrs = true;
2795 }
2796 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
2797 {
2798 /* The argument must be a constant integer. */
2799 warning (OPT_Wattributes,
2800 "`%s' attribute argument not an integer constant",
2801 IDENTIFIER_POINTER (name));
2802 *no_add_attrs = true;
2803 }
2804 else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
2805 || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
2806 {
2807 /* The argument value must be between 18 to 255. */
2808 warning (OPT_Wattributes,
2809 "`%s' attribute argument should be between 18 to 255",
2810 IDENTIFIER_POINTER (name));
2811 *no_add_attrs = true;
2812 }
2813 return NULL_TREE;
2814}
2815
2816/* If the function is assigned the attribute 'function_vector', it
2817 returns the function vector number, otherwise returns zero. */
2818int
2819current_function_special_page_vector (rtx x)
2820{
2821 int num;
2822
2823 if ((GET_CODE(x) == SYMBOL_REF)
2824 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
2825 {
653e2568 2826 tree list;
5abd2125
JS
2827 tree t = SYMBOL_REF_DECL (x);
2828
2829 if (TREE_CODE (t) != FUNCTION_DECL)
2830 return 0;
2831
653e2568 2832 list = M32C_ATTRIBUTES (t);
5abd2125
JS
2833 while (list)
2834 {
2835 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2836 {
2837 num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
2838 return num;
2839 }
2840
2841 list = TREE_CHAIN (list);
2842 }
2843
2844 return 0;
2845 }
2846 else
2847 return 0;
2848}
2849
38b2d076
DD
2850#undef TARGET_ATTRIBUTE_TABLE
2851#define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
2852static const struct attribute_spec m32c_attribute_table[] = {
2853 {"interrupt", 0, 0, false, false, false, interrupt_handler},
5abd2125 2854 {"function_vector", 1, 1, true, false, false, function_vector_handler},
38b2d076
DD
2855 {0, 0, 0, 0, 0, 0, 0}
2856};
2857
2858#undef TARGET_COMP_TYPE_ATTRIBUTES
2859#define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
2860static int
3101faab
KG
2861m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED,
2862 const_tree type2 ATTRIBUTE_UNUSED)
38b2d076
DD
2863{
2864 /* 0=incompatible 1=compatible 2=warning */
2865 return 1;
2866}
2867
2868#undef TARGET_INSERT_ATTRIBUTES
2869#define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
2870static void
2871m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
2872 tree * attr_ptr ATTRIBUTE_UNUSED)
2873{
2874 /* Nothing to do here. */
2875}
2876
2877/* Predicates */
2878
f9b89438 2879/* This is a list of legal subregs of hard regs. */
67fc44cb
DD
2880static const struct {
2881 unsigned char outer_mode_size;
2882 unsigned char inner_mode_size;
2883 unsigned char byte_mask;
2884 unsigned char legal_when;
f9b89438 2885 unsigned int regno;
f9b89438 2886} legal_subregs[] = {
67fc44cb
DD
2887 {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
2888 {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
2889 {1, 2, 0x01, 1, A0_REGNO},
2890 {1, 2, 0x01, 1, A1_REGNO},
f9b89438 2891
67fc44cb
DD
2892 {1, 4, 0x01, 1, A0_REGNO},
2893 {1, 4, 0x01, 1, A1_REGNO},
f9b89438 2894
67fc44cb
DD
2895 {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
2896 {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
2897 {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
2898 {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
2899 {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
f9b89438 2900
67fc44cb 2901 {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
f9b89438
DD
2902};
2903
2904/* Returns TRUE if OP is a subreg of a hard reg which we don't
2905 support. */
2906bool
2907m32c_illegal_subreg_p (rtx op)
2908{
f9b89438
DD
2909 int offset;
2910 unsigned int i;
2911 int src_mode, dest_mode;
2912
2913 if (GET_CODE (op) != SUBREG)
2914 return false;
2915
2916 dest_mode = GET_MODE (op);
2917 offset = SUBREG_BYTE (op);
2918 op = SUBREG_REG (op);
2919 src_mode = GET_MODE (op);
2920
2921 if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
2922 return false;
2923 if (GET_CODE (op) != REG)
2924 return false;
2925 if (REGNO (op) >= MEM0_REGNO)
2926 return false;
2927
2928 offset = (1 << offset);
2929
67fc44cb 2930 for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
f9b89438
DD
2931 if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
2932 && legal_subregs[i].regno == REGNO (op)
2933 && legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
2934 && legal_subregs[i].byte_mask & offset)
2935 {
2936 switch (legal_subregs[i].legal_when)
2937 {
2938 case 1:
2939 return false;
2940 case 16:
2941 if (TARGET_A16)
2942 return false;
2943 break;
2944 case 24:
2945 if (TARGET_A24)
2946 return false;
2947 break;
2948 }
2949 }
2950 return true;
2951}
2952
38b2d076
DD
2953/* Returns TRUE if we support a move between the first two operands.
2954 At the moment, we just want to discourage mem to mem moves until
2955 after reload, because reload has a hard time with our limited
2956 number of address registers, and we can get into a situation where
2957 we need three of them when we only have two. */
2958bool
2959m32c_mov_ok (rtx * operands, enum machine_mode mode ATTRIBUTE_UNUSED)
2960{
2961 rtx op0 = operands[0];
2962 rtx op1 = operands[1];
2963
2964 if (TARGET_A24)
2965 return true;
2966
2967#define DEBUG_MOV_OK 0
2968#if DEBUG_MOV_OK
2969 fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
2970 debug_rtx (op0);
2971 debug_rtx (op1);
2972#endif
2973
2974 if (GET_CODE (op0) == SUBREG)
2975 op0 = XEXP (op0, 0);
2976 if (GET_CODE (op1) == SUBREG)
2977 op1 = XEXP (op1, 0);
2978
2979 if (GET_CODE (op0) == MEM
2980 && GET_CODE (op1) == MEM
2981 && ! reload_completed)
2982 {
2983#if DEBUG_MOV_OK
2984 fprintf (stderr, " - no, mem to mem\n");
2985#endif
2986 return false;
2987 }
2988
2989#if DEBUG_MOV_OK
2990 fprintf (stderr, " - ok\n");
2991#endif
2992 return true;
2993}
2994
ff485e71
DD
2995/* Returns TRUE if two consecutive HImode mov instructions, generated
2996 for moving an immediate double data to a double data type variable
2997 location, can be combined into single SImode mov instruction. */
2998bool
2999m32c_immd_dbl_mov (rtx * operands,
3000 enum machine_mode mode ATTRIBUTE_UNUSED)
3001{
3002 int flag = 0, okflag = 0, offset1 = 0, offset2 = 0, offsetsign = 0;
3003 const char *str1;
3004 const char *str2;
3005
3006 if (GET_CODE (XEXP (operands[0], 0)) == SYMBOL_REF
3007 && MEM_SCALAR_P (operands[0])
3008 && !MEM_IN_STRUCT_P (operands[0])
3009 && GET_CODE (XEXP (operands[2], 0)) == CONST
3010 && GET_CODE (XEXP (XEXP (operands[2], 0), 0)) == PLUS
3011 && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 0)) == SYMBOL_REF
3012 && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 1)) == CONST_INT
3013 && MEM_SCALAR_P (operands[2])
3014 && !MEM_IN_STRUCT_P (operands[2]))
3015 flag = 1;
3016
3017 else if (GET_CODE (XEXP (operands[0], 0)) == CONST
3018 && GET_CODE (XEXP (XEXP (operands[0], 0), 0)) == PLUS
3019 && GET_CODE (XEXP (XEXP (XEXP (operands[0], 0), 0), 0)) == SYMBOL_REF
3020 && MEM_SCALAR_P (operands[0])
3021 && !MEM_IN_STRUCT_P (operands[0])
f9f3567e 3022 && !(INTVAL (XEXP (XEXP (XEXP (operands[0], 0), 0), 1)) %4)
ff485e71
DD
3023 && GET_CODE (XEXP (operands[2], 0)) == CONST
3024 && GET_CODE (XEXP (XEXP (operands[2], 0), 0)) == PLUS
3025 && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 0)) == SYMBOL_REF
3026 && MEM_SCALAR_P (operands[2])
3027 && !MEM_IN_STRUCT_P (operands[2]))
3028 flag = 2;
3029
3030 else if (GET_CODE (XEXP (operands[0], 0)) == PLUS
3031 && GET_CODE (XEXP (XEXP (operands[0], 0), 0)) == REG
3032 && REGNO (XEXP (XEXP (operands[0], 0), 0)) == FB_REGNO
3033 && GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == CONST_INT
3034 && MEM_SCALAR_P (operands[0])
3035 && !MEM_IN_STRUCT_P (operands[0])
f9f3567e 3036 && !(INTVAL (XEXP (XEXP (operands[0], 0), 1)) %4)
ff485e71
DD
3037 && REGNO (XEXP (XEXP (operands[2], 0), 0)) == FB_REGNO
3038 && GET_CODE (XEXP (XEXP (operands[2], 0), 1)) == CONST_INT
3039 && MEM_SCALAR_P (operands[2])
3040 && !MEM_IN_STRUCT_P (operands[2]))
3041 flag = 3;
3042
3043 else
3044 return false;
3045
3046 switch (flag)
3047 {
3048 case 1:
3049 str1 = XSTR (XEXP (operands[0], 0), 0);
3050 str2 = XSTR (XEXP (XEXP (XEXP (operands[2], 0), 0), 0), 0);
3051 if (strcmp (str1, str2) == 0)
3052 okflag = 1;
3053 else
3054 okflag = 0;
3055 break;
3056 case 2:
3057 str1 = XSTR (XEXP (XEXP (XEXP (operands[0], 0), 0), 0), 0);
3058 str2 = XSTR (XEXP (XEXP (XEXP (operands[2], 0), 0), 0), 0);
3059 if (strcmp(str1,str2) == 0)
3060 okflag = 1;
3061 else
3062 okflag = 0;
3063 break;
3064 case 3:
f9f3567e
DD
3065 offset1 = INTVAL (XEXP (XEXP (operands[0], 0), 1));
3066 offset2 = INTVAL (XEXP (XEXP (operands[2], 0), 1));
ff485e71
DD
3067 offsetsign = offset1 >> ((sizeof (offset1) * 8) -1);
3068 if (((offset2-offset1) == 2) && offsetsign != 0)
3069 okflag = 1;
3070 else
3071 okflag = 0;
3072 break;
3073 default:
3074 okflag = 0;
3075 }
3076
3077 if (okflag == 1)
3078 {
3079 HOST_WIDE_INT val;
3080 operands[4] = gen_rtx_MEM (SImode, XEXP (operands[0], 0));
3081
f9f3567e 3082 val = (INTVAL (operands[3]) << 16) + (INTVAL (operands[1]) & 0xFFFF);
ff485e71
DD
3083 operands[5] = gen_rtx_CONST_INT (VOIDmode, val);
3084
3085 return true;
3086 }
3087
3088 return false;
3089}
3090
38b2d076
DD
3091/* Expanders */
3092
3093/* Subregs are non-orthogonal for us, because our registers are all
3094 different sizes. */
3095static rtx
3096m32c_subreg (enum machine_mode outer,
3097 rtx x, enum machine_mode inner, int byte)
3098{
3099 int r, nr = -1;
3100
3101 /* Converting MEMs to different types that are the same size, we
3102 just rewrite them. */
3103 if (GET_CODE (x) == SUBREG
3104 && SUBREG_BYTE (x) == 0
3105 && GET_CODE (SUBREG_REG (x)) == MEM
3106 && (GET_MODE_SIZE (GET_MODE (x))
3107 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
3108 {
3109 rtx oldx = x;
3110 x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
3111 MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
3112 }
3113
3114 /* Push/pop get done as smaller push/pops. */
3115 if (GET_CODE (x) == MEM
3116 && (GET_CODE (XEXP (x, 0)) == PRE_DEC
3117 || GET_CODE (XEXP (x, 0)) == POST_INC))
3118 return gen_rtx_MEM (outer, XEXP (x, 0));
3119 if (GET_CODE (x) == SUBREG
3120 && GET_CODE (XEXP (x, 0)) == MEM
3121 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
3122 || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
3123 return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
3124
3125 if (GET_CODE (x) != REG)
3126 return simplify_gen_subreg (outer, x, inner, byte);
3127
3128 r = REGNO (x);
3129 if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
3130 return simplify_gen_subreg (outer, x, inner, byte);
3131
3132 if (IS_MEM_REGNO (r))
3133 return simplify_gen_subreg (outer, x, inner, byte);
3134
3135 /* This is where the complexities of our register layout are
3136 described. */
3137 if (byte == 0)
3138 nr = r;
3139 else if (outer == HImode)
3140 {
3141 if (r == R0_REGNO && byte == 2)
3142 nr = R2_REGNO;
3143 else if (r == R0_REGNO && byte == 4)
3144 nr = R1_REGNO;
3145 else if (r == R0_REGNO && byte == 6)
3146 nr = R3_REGNO;
3147 else if (r == R1_REGNO && byte == 2)
3148 nr = R3_REGNO;
3149 else if (r == A0_REGNO && byte == 2)
3150 nr = A1_REGNO;
3151 }
3152 else if (outer == SImode)
3153 {
3154 if (r == R0_REGNO && byte == 0)
3155 nr = R0_REGNO;
3156 else if (r == R0_REGNO && byte == 4)
3157 nr = R1_REGNO;
3158 }
3159 if (nr == -1)
3160 {
3161 fprintf (stderr, "m32c_subreg %s %s %d\n",
3162 mode_name[outer], mode_name[inner], byte);
3163 debug_rtx (x);
3164 gcc_unreachable ();
3165 }
3166 return gen_rtx_REG (outer, nr);
3167}
3168
3169/* Used to emit move instructions. We split some moves,
3170 and avoid mem-mem moves. */
3171int
3172m32c_prepare_move (rtx * operands, enum machine_mode mode)
3173{
3174 if (TARGET_A16 && mode == PSImode)
3175 return m32c_split_move (operands, mode, 1);
3176 if ((GET_CODE (operands[0]) == MEM)
3177 && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
3178 {
3179 rtx pmv = XEXP (operands[0], 0);
3180 rtx dest_reg = XEXP (pmv, 0);
3181 rtx dest_mod = XEXP (pmv, 1);
3182
3183 emit_insn (gen_rtx_SET (Pmode, dest_reg, dest_mod));
3184 operands[0] = gen_rtx_MEM (mode, dest_reg);
3185 }
b3a13419 3186 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
38b2d076
DD
3187 operands[1] = copy_to_mode_reg (mode, operands[1]);
3188 return 0;
3189}
3190
3191#define DEBUG_SPLIT 0
3192
3193/* Returns TRUE if the given PSImode move should be split. We split
3194 for all r8c/m16c moves, since it doesn't support them, and for
3195 POP.L as we can only *push* SImode. */
3196int
3197m32c_split_psi_p (rtx * operands)
3198{
3199#if DEBUG_SPLIT
3200 fprintf (stderr, "\nm32c_split_psi_p\n");
3201 debug_rtx (operands[0]);
3202 debug_rtx (operands[1]);
3203#endif
3204 if (TARGET_A16)
3205 {
3206#if DEBUG_SPLIT
3207 fprintf (stderr, "yes, A16\n");
3208#endif
3209 return 1;
3210 }
3211 if (GET_CODE (operands[1]) == MEM
3212 && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3213 {
3214#if DEBUG_SPLIT
3215 fprintf (stderr, "yes, pop.l\n");
3216#endif
3217 return 1;
3218 }
3219#if DEBUG_SPLIT
3220 fprintf (stderr, "no, default\n");
3221#endif
3222 return 0;
3223}
3224
3225/* Split the given move. SPLIT_ALL is 0 if splitting is optional
3226 (define_expand), 1 if it is not optional (define_insn_and_split),
3227 and 3 for define_split (alternate api). */
3228int
3229m32c_split_move (rtx * operands, enum machine_mode mode, int split_all)
3230{
3231 rtx s[4], d[4];
3232 int parts, si, di, rev = 0;
3233 int rv = 0, opi = 2;
3234 enum machine_mode submode = HImode;
3235 rtx *ops, local_ops[10];
3236
3237 /* define_split modifies the existing operands, but the other two
3238 emit new insns. OPS is where we store the operand pairs, which
3239 we emit later. */
3240 if (split_all == 3)
3241 ops = operands;
3242 else
3243 ops = local_ops;
3244
3245 /* Else HImode. */
3246 if (mode == DImode)
3247 submode = SImode;
3248
3249 /* Before splitting mem-mem moves, force one operand into a
3250 register. */
b3a13419 3251 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
38b2d076
DD
3252 {
3253#if DEBUG0
3254 fprintf (stderr, "force_reg...\n");
3255 debug_rtx (operands[1]);
3256#endif
3257 operands[1] = force_reg (mode, operands[1]);
3258#if DEBUG0
3259 debug_rtx (operands[1]);
3260#endif
3261 }
3262
3263 parts = 2;
3264
3265#if DEBUG_SPLIT
b3a13419
ILT
3266 fprintf (stderr, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3267 split_all);
38b2d076
DD
3268 debug_rtx (operands[0]);
3269 debug_rtx (operands[1]);
3270#endif
3271
eb5f0c07
DD
3272 /* Note that split_all is not used to select the api after this
3273 point, so it's safe to set it to 3 even with define_insn. */
3274 /* None of the chips can move SI operands to sp-relative addresses,
3275 so we always split those. */
3276 if (m32c_extra_constraint_p (operands[0], 'S', "Ss"))
3277 split_all = 3;
3278
38b2d076
DD
3279 /* We don't need to split these. */
3280 if (TARGET_A24
3281 && split_all != 3
3282 && (mode == SImode || mode == PSImode)
3283 && !(GET_CODE (operands[1]) == MEM
3284 && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3285 return 0;
3286
3287 /* First, enumerate the subregs we'll be dealing with. */
3288 for (si = 0; si < parts; si++)
3289 {
3290 d[si] =
3291 m32c_subreg (submode, operands[0], mode,
3292 si * GET_MODE_SIZE (submode));
3293 s[si] =
3294 m32c_subreg (submode, operands[1], mode,
3295 si * GET_MODE_SIZE (submode));
3296 }
3297
3298 /* Split pushes by emitting a sequence of smaller pushes. */
3299 if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3300 {
3301 for (si = parts - 1; si >= 0; si--)
3302 {
3303 ops[opi++] = gen_rtx_MEM (submode,
3304 gen_rtx_PRE_DEC (Pmode,
3305 gen_rtx_REG (Pmode,
3306 SP_REGNO)));
3307 ops[opi++] = s[si];
3308 }
3309
3310 rv = 1;
3311 }
3312 /* Likewise for pops. */
3313 else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3314 {
3315 for (di = 0; di < parts; di++)
3316 {
3317 ops[opi++] = d[di];
3318 ops[opi++] = gen_rtx_MEM (submode,
3319 gen_rtx_POST_INC (Pmode,
3320 gen_rtx_REG (Pmode,
3321 SP_REGNO)));
3322 }
3323 rv = 1;
3324 }
3325 else if (split_all)
3326 {
3327 /* if d[di] == s[si] for any di < si, we'll early clobber. */
3328 for (di = 0; di < parts - 1; di++)
3329 for (si = di + 1; si < parts; si++)
3330 if (reg_mentioned_p (d[di], s[si]))
3331 rev = 1;
3332
3333 if (rev)
3334 for (si = 0; si < parts; si++)
3335 {
3336 ops[opi++] = d[si];
3337 ops[opi++] = s[si];
3338 }
3339 else
3340 for (si = parts - 1; si >= 0; si--)
3341 {
3342 ops[opi++] = d[si];
3343 ops[opi++] = s[si];
3344 }
3345 rv = 1;
3346 }
3347 /* Now emit any moves we may have accumulated. */
3348 if (rv && split_all != 3)
3349 {
3350 int i;
3351 for (i = 2; i < opi; i += 2)
3352 emit_move_insn (ops[i], ops[i + 1]);
3353 }
3354 return rv;
3355}
3356
07127a0a
DD
3357/* The m32c has a number of opcodes that act like memcpy, strcmp, and
3358 the like. For the R8C they expect one of the addresses to be in
3359 R1L:An so we need to arrange for that. Otherwise, it's just a
3360 matter of picking out the operands we want and emitting the right
3361 pattern for them. All these expanders, which correspond to
3362 patterns in blkmov.md, must return nonzero if they expand the insn,
3363 or zero if they should FAIL. */
3364
3365/* This is a memset() opcode. All operands are implied, so we need to
3366 arrange for them to be in the right registers. The opcode wants
3367 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3368 the count (HI), and $2 the value (QI). */
3369int
3370m32c_expand_setmemhi(rtx *operands)
3371{
3372 rtx desta, count, val;
3373 rtx desto, counto;
3374
3375 desta = XEXP (operands[0], 0);
3376 count = operands[1];
3377 val = operands[2];
3378
3379 desto = gen_reg_rtx (Pmode);
3380 counto = gen_reg_rtx (HImode);
3381
3382 if (GET_CODE (desta) != REG
3383 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3384 desta = copy_to_mode_reg (Pmode, desta);
3385
3386 /* This looks like an arbitrary restriction, but this is by far the
3387 most common case. For counts 8..14 this actually results in
3388 smaller code with no speed penalty because the half-sized
3389 constant can be loaded with a shorter opcode. */
3390 if (GET_CODE (count) == CONST_INT
3391 && GET_CODE (val) == CONST_INT
3392 && ! (INTVAL (count) & 1)
3393 && (INTVAL (count) > 1)
3394 && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3395 {
3396 unsigned v = INTVAL (val) & 0xff;
3397 v = v | (v << 8);
3398 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3399 val = copy_to_mode_reg (HImode, GEN_INT (v));
3400 if (TARGET_A16)
3401 emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3402 else
3403 emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3404 return 1;
3405 }
3406
3407 /* This is the generalized memset() case. */
3408 if (GET_CODE (val) != REG
3409 || REGNO (val) < FIRST_PSEUDO_REGISTER)
3410 val = copy_to_mode_reg (QImode, val);
3411
3412 if (GET_CODE (count) != REG
3413 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3414 count = copy_to_mode_reg (HImode, count);
3415
3416 if (TARGET_A16)
3417 emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3418 else
3419 emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3420
3421 return 1;
3422}
3423
3424/* This is a memcpy() opcode. All operands are implied, so we need to
3425 arrange for them to be in the right registers. The opcode wants
3426 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3427 is the source (MEM:BLK), and $2 the count (HI). */
3428int
3429m32c_expand_movmemhi(rtx *operands)
3430{
3431 rtx desta, srca, count;
3432 rtx desto, srco, counto;
3433
3434 desta = XEXP (operands[0], 0);
3435 srca = XEXP (operands[1], 0);
3436 count = operands[2];
3437
3438 desto = gen_reg_rtx (Pmode);
3439 srco = gen_reg_rtx (Pmode);
3440 counto = gen_reg_rtx (HImode);
3441
3442 if (GET_CODE (desta) != REG
3443 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3444 desta = copy_to_mode_reg (Pmode, desta);
3445
3446 if (GET_CODE (srca) != REG
3447 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3448 srca = copy_to_mode_reg (Pmode, srca);
3449
3450 /* Similar to setmem, but we don't need to check the value. */
3451 if (GET_CODE (count) == CONST_INT
3452 && ! (INTVAL (count) & 1)
3453 && (INTVAL (count) > 1))
3454 {
3455 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3456 if (TARGET_A16)
3457 emit_insn (gen_movmemhi_whi_op (desto, srco, counto, desta, srca, count));
3458 else
3459 emit_insn (gen_movmemhi_wpsi_op (desto, srco, counto, desta, srca, count));
3460 return 1;
3461 }
3462
3463 /* This is the generalized memset() case. */
3464 if (GET_CODE (count) != REG
3465 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3466 count = copy_to_mode_reg (HImode, count);
3467
3468 if (TARGET_A16)
3469 emit_insn (gen_movmemhi_bhi_op (desto, srco, counto, desta, srca, count));
3470 else
3471 emit_insn (gen_movmemhi_bpsi_op (desto, srco, counto, desta, srca, count));
3472
3473 return 1;
3474}
3475
3476/* This is a stpcpy() opcode. $0 is the destination (MEM:BLK) after
3477 the copy, which should point to the NUL at the end of the string,
3478 $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3479 Since our opcode leaves the destination pointing *after* the NUL,
3480 we must emit an adjustment. */
3481int
3482m32c_expand_movstr(rtx *operands)
3483{
3484 rtx desta, srca;
3485 rtx desto, srco;
3486
3487 desta = XEXP (operands[1], 0);
3488 srca = XEXP (operands[2], 0);
3489
3490 desto = gen_reg_rtx (Pmode);
3491 srco = gen_reg_rtx (Pmode);
3492
3493 if (GET_CODE (desta) != REG
3494 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3495 desta = copy_to_mode_reg (Pmode, desta);
3496
3497 if (GET_CODE (srca) != REG
3498 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3499 srca = copy_to_mode_reg (Pmode, srca);
3500
3501 emit_insn (gen_movstr_op (desto, srco, desta, srca));
3502 /* desto ends up being a1, which allows this type of add through MOVA. */
3503 emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
3504
3505 return 1;
3506}
3507
3508/* This is a strcmp() opcode. $0 is the destination (HI) which holds
3509 <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3510 $2 is the other (MEM:BLK). We must do the comparison, and then
3511 convert the flags to a signed integer result. */
3512int
3513m32c_expand_cmpstr(rtx *operands)
3514{
3515 rtx src1a, src2a;
3516
3517 src1a = XEXP (operands[1], 0);
3518 src2a = XEXP (operands[2], 0);
3519
3520 if (GET_CODE (src1a) != REG
3521 || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
3522 src1a = copy_to_mode_reg (Pmode, src1a);
3523
3524 if (GET_CODE (src2a) != REG
3525 || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
3526 src2a = copy_to_mode_reg (Pmode, src2a);
3527
3528 emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
3529 emit_insn (gen_cond_to_int (operands[0]));
3530
3531 return 1;
3532}
3533
3534
23fed240
DD
3535typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
3536
3537static shift_gen_func
3538shift_gen_func_for (int mode, int code)
3539{
3540#define GFF(m,c,f) if (mode == m && code == c) return f
3541 GFF(QImode, ASHIFT, gen_ashlqi3_i);
3542 GFF(QImode, ASHIFTRT, gen_ashrqi3_i);
3543 GFF(QImode, LSHIFTRT, gen_lshrqi3_i);
3544 GFF(HImode, ASHIFT, gen_ashlhi3_i);
3545 GFF(HImode, ASHIFTRT, gen_ashrhi3_i);
3546 GFF(HImode, LSHIFTRT, gen_lshrhi3_i);
3547 GFF(PSImode, ASHIFT, gen_ashlpsi3_i);
3548 GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
3549 GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
3550 GFF(SImode, ASHIFT, TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
3551 GFF(SImode, ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
3552 GFF(SImode, LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
3553#undef GFF
07127a0a 3554 gcc_unreachable ();
23fed240
DD
3555}
3556
38b2d076
DD
3557/* The m32c only has one shift, but it takes a signed count. GCC
3558 doesn't want this, so we fake it by negating any shift count when
07127a0a
DD
3559 we're pretending to shift the other way. Also, the shift count is
3560 limited to -8..8. It's slightly better to use two shifts for 9..15
3561 than to load the count into r1h, so we do that too. */
38b2d076 3562int
23fed240 3563m32c_prepare_shift (rtx * operands, int scale, int shift_code)
38b2d076 3564{
23fed240
DD
3565 enum machine_mode mode = GET_MODE (operands[0]);
3566 shift_gen_func func = shift_gen_func_for (mode, shift_code);
38b2d076 3567 rtx temp;
23fed240
DD
3568
3569 if (GET_CODE (operands[2]) == CONST_INT)
38b2d076 3570 {
23fed240
DD
3571 int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
3572 int count = INTVAL (operands[2]) * scale;
3573
3574 while (count > maxc)
3575 {
3576 temp = gen_reg_rtx (mode);
3577 emit_insn (func (temp, operands[1], GEN_INT (maxc)));
3578 operands[1] = temp;
3579 count -= maxc;
3580 }
3581 while (count < -maxc)
3582 {
3583 temp = gen_reg_rtx (mode);
3584 emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
3585 operands[1] = temp;
3586 count += maxc;
3587 }
3588 emit_insn (func (operands[0], operands[1], GEN_INT (count)));
3589 return 1;
38b2d076 3590 }
2e160056
DD
3591
3592 temp = gen_reg_rtx (QImode);
38b2d076 3593 if (scale < 0)
2e160056
DD
3594 /* The pattern has a NEG that corresponds to this. */
3595 emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
3596 else if (TARGET_A16 && mode == SImode)
3597 /* We do this because the code below may modify this, we don't
3598 want to modify the origin of this value. */
3599 emit_move_insn (temp, operands[2]);
38b2d076 3600 else
2e160056 3601 /* We'll only use it for the shift, no point emitting a move. */
38b2d076 3602 temp = operands[2];
2e160056 3603
16659fcf 3604 if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
2e160056
DD
3605 {
3606 /* The m16c has a limit of -16..16 for SI shifts, even when the
3607 shift count is in a register. Since there are so many targets
3608 of these shifts, it's better to expand the RTL here than to
3609 call a helper function.
3610
3611 The resulting code looks something like this:
3612
3613 cmp.b r1h,-16
3614 jge.b 1f
3615 shl.l -16,dest
3616 add.b r1h,16
3617 1f: cmp.b r1h,16
3618 jle.b 1f
3619 shl.l 16,dest
3620 sub.b r1h,16
3621 1f: shl.l r1h,dest
3622
3623 We take advantage of the fact that "negative" shifts are
3624 undefined to skip one of the comparisons. */
3625
3626 rtx count;
833bf445 3627 rtx label, lref, insn, tempvar;
2e160056 3628
16659fcf
DD
3629 emit_move_insn (operands[0], operands[1]);
3630
2e160056
DD
3631 count = temp;
3632 label = gen_label_rtx ();
3633 lref = gen_rtx_LABEL_REF (VOIDmode, label);
3634 LABEL_NUSES (label) ++;
3635
833bf445
DD
3636 tempvar = gen_reg_rtx (mode);
3637
2e160056
DD
3638 if (shift_code == ASHIFT)
3639 {
3640 /* This is a left shift. We only need check positive counts. */
3641 emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
3642 count, GEN_INT (16), label));
833bf445
DD
3643 emit_insn (func (tempvar, operands[0], GEN_INT (8)));
3644 emit_insn (func (operands[0], tempvar, GEN_INT (8)));
2e160056
DD
3645 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
3646 emit_label_after (label, insn);
3647 }
3648 else
3649 {
3650 /* This is a right shift. We only need check negative counts. */
3651 emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
3652 count, GEN_INT (-16), label));
833bf445
DD
3653 emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
3654 emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
2e160056
DD
3655 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
3656 emit_label_after (label, insn);
3657 }
16659fcf
DD
3658 operands[1] = operands[0];
3659 emit_insn (func (operands[0], operands[0], count));
3660 return 1;
2e160056
DD
3661 }
3662
38b2d076
DD
3663 operands[2] = temp;
3664 return 0;
3665}
3666
12ea2512
DD
3667/* The m32c has a limited range of operations that work on PSImode
3668 values; we have to expand to SI, do the math, and truncate back to
3669 PSI. Yes, this is expensive, but hopefully gcc will learn to avoid
3670 those cases. */
3671void
3672m32c_expand_neg_mulpsi3 (rtx * operands)
3673{
3674 /* operands: a = b * i */
3675 rtx temp1; /* b as SI */
07127a0a
DD
3676 rtx scale /* i as SI */;
3677 rtx temp2; /* a*b as SI */
12ea2512
DD
3678
3679 temp1 = gen_reg_rtx (SImode);
3680 temp2 = gen_reg_rtx (SImode);
07127a0a
DD
3681 if (GET_CODE (operands[2]) != CONST_INT)
3682 {
3683 scale = gen_reg_rtx (SImode);
3684 emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
3685 }
3686 else
3687 scale = copy_to_mode_reg (SImode, operands[2]);
12ea2512
DD
3688
3689 emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
07127a0a
DD
3690 temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
3691 emit_insn (gen_truncsipsi2 (operands[0], temp2));
12ea2512
DD
3692}
3693
0166ff05
DD
3694static rtx compare_op0, compare_op1;
3695
3696void
3697m32c_pend_compare (rtx *operands)
3698{
3699 compare_op0 = operands[0];
3700 compare_op1 = operands[1];
3701}
3702
3703void
3704m32c_unpend_compare (void)
3705{
3706 switch (GET_MODE (compare_op0))
3707 {
3708 case QImode:
3709 emit_insn (gen_cmpqi_op (compare_op0, compare_op1));
3710 case HImode:
3711 emit_insn (gen_cmphi_op (compare_op0, compare_op1));
3712 case PSImode:
3713 emit_insn (gen_cmppsi_op (compare_op0, compare_op1));
67fc44cb
DD
3714 default:
3715 /* Just to silence the "missing case" warnings. */ ;
0166ff05
DD
3716 }
3717}
3718
3719void
3720m32c_expand_scc (int code, rtx *operands)
3721{
3722 enum machine_mode mode = TARGET_A16 ? QImode : HImode;
3723
3724 emit_insn (gen_rtx_SET (mode,
3725 operands[0],
3726 gen_rtx_fmt_ee (code,
3727 mode,
3728 compare_op0,
3729 compare_op1)));
3730}
3731
38b2d076
DD
3732/* Pattern Output Functions */
3733
07127a0a
DD
3734/* Returns a (OP (reg:CC FLG_REGNO) (const_int 0)) from some other
3735 match_operand rtx's OP. */
3736rtx
3737m32c_cmp_flg_0 (rtx cmp)
3738{
3739 return gen_rtx_fmt_ee (GET_CODE (cmp),
3740 GET_MODE (cmp),
3741 gen_rtx_REG (CCmode, FLG_REGNO),
3742 GEN_INT (0));
3743}
3744
3745int
3746m32c_expand_movcc (rtx *operands)
3747{
3748 rtx rel = operands[1];
0166ff05
DD
3749 rtx cmp;
3750
07127a0a
DD
3751 if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
3752 return 1;
3753 if (GET_CODE (operands[2]) != CONST_INT
3754 || GET_CODE (operands[3]) != CONST_INT)
3755 return 1;
3756 emit_insn (gen_cmpqi(XEXP (rel, 0), XEXP (rel, 1)));
3757 if (GET_CODE (rel) == NE)
3758 {
3759 rtx tmp = operands[2];
3760 operands[2] = operands[3];
3761 operands[3] = tmp;
3762 }
0166ff05
DD
3763
3764 cmp = gen_rtx_fmt_ee (GET_CODE (rel),
3765 GET_MODE (rel),
3766 compare_op0,
3767 compare_op1);
3768
3769 emit_move_insn (operands[0],
3770 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3771 cmp,
3772 operands[2],
3773 operands[3]));
07127a0a
DD
3774 return 0;
3775}
3776
3777/* Used for the "insv" pattern. Return nonzero to fail, else done. */
3778int
3779m32c_expand_insv (rtx *operands)
3780{
3781 rtx op0, src0, p;
3782 int mask;
3783
3784 if (INTVAL (operands[1]) != 1)
3785 return 1;
3786
9cb96754
N
3787 /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
3788 if (GET_CODE (operands[3]) != CONST_INT)
3789 return 1;
3790 if (INTVAL (operands[3]) != 0
3791 && INTVAL (operands[3]) != 1
3792 && INTVAL (operands[3]) != -1)
3793 return 1;
3794
07127a0a
DD
3795 mask = 1 << INTVAL (operands[2]);
3796
3797 op0 = operands[0];
3798 if (GET_CODE (op0) == SUBREG
3799 && SUBREG_BYTE (op0) == 0)
3800 {
3801 rtx sub = SUBREG_REG (op0);
3802 if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
3803 op0 = sub;
3804 }
3805
b3a13419 3806 if (!can_create_pseudo_p ()
07127a0a
DD
3807 || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
3808 src0 = op0;
3809 else
3810 {
3811 src0 = gen_reg_rtx (GET_MODE (op0));
3812 emit_move_insn (src0, op0);
3813 }
3814
3815 if (GET_MODE (op0) == HImode
3816 && INTVAL (operands[2]) >= 8
3817 && GET_MODE (op0) == MEM)
3818 {
3819 /* We are little endian. */
3820 rtx new_mem = gen_rtx_MEM (QImode, plus_constant (XEXP (op0, 0), 1));
3821 MEM_COPY_ATTRIBUTES (new_mem, op0);
3822 mask >>= 8;
3823 }
3824
8e4edce7
DD
3825 /* First, we generate a mask with the correct polarity. If we are
3826 storing a zero, we want an AND mask, so invert it. */
3827 if (INTVAL (operands[3]) == 0)
07127a0a 3828 {
16659fcf 3829 /* Storing a zero, use an AND mask */
07127a0a
DD
3830 if (GET_MODE (op0) == HImode)
3831 mask ^= 0xffff;
3832 else
3833 mask ^= 0xff;
3834 }
8e4edce7
DD
3835 /* Now we need to properly sign-extend the mask in case we need to
3836 fall back to an AND or OR opcode. */
07127a0a
DD
3837 if (GET_MODE (op0) == HImode)
3838 {
3839 if (mask & 0x8000)
3840 mask -= 0x10000;
3841 }
3842 else
3843 {
3844 if (mask & 0x80)
3845 mask -= 0x100;
3846 }
3847
3848 switch ( (INTVAL (operands[3]) ? 4 : 0)
3849 + ((GET_MODE (op0) == HImode) ? 2 : 0)
3850 + (TARGET_A24 ? 1 : 0))
3851 {
3852 case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
3853 case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
3854 case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
3855 case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
3856 case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
3857 case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
3858 case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
3859 case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
653e2568 3860 default: p = NULL_RTX; break; /* Not reached, but silences a warning. */
07127a0a
DD
3861 }
3862
3863 emit_insn (p);
3864 return 0;
3865}
3866
3867const char *
3868m32c_scc_pattern(rtx *operands, RTX_CODE code)
3869{
3870 static char buf[30];
3871 if (GET_CODE (operands[0]) == REG
3872 && REGNO (operands[0]) == R0_REGNO)
3873 {
3874 if (code == EQ)
3875 return "stzx\t#1,#0,r0l";
3876 if (code == NE)
3877 return "stzx\t#0,#1,r0l";
3878 }
3879 sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
3880 return buf;
3881}
3882
5abd2125
JS
3883/* Encode symbol attributes of a SYMBOL_REF into its
3884 SYMBOL_REF_FLAGS. */
3885static void
3886m32c_encode_section_info (tree decl, rtx rtl, int first)
3887{
3888 int extra_flags = 0;
3889
3890 default_encode_section_info (decl, rtl, first);
3891 if (TREE_CODE (decl) == FUNCTION_DECL
3892 && m32c_special_page_vector_p (decl))
3893
3894 extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
3895
3896 if (extra_flags)
3897 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
3898}
3899
38b2d076
DD
3900/* Returns TRUE if the current function is a leaf, and thus we can
3901 determine which registers an interrupt function really needs to
3902 save. The logic below is mostly about finding the insn sequence
3903 that's the function, versus any sequence that might be open for the
3904 current insn. */
3905static int
3906m32c_leaf_function_p (void)
3907{
3908 rtx saved_first, saved_last;
3909 struct sequence_stack *seq;
3910 int rv;
3911
3e029763
JH
3912 saved_first = crtl->emit.x_first_insn;
3913 saved_last = crtl->emit.x_last_insn;
3914 for (seq = crtl->emit.sequence_stack; seq && seq->next; seq = seq->next)
38b2d076
DD
3915 ;
3916 if (seq)
3917 {
3e029763
JH
3918 crtl->emit.x_first_insn = seq->first;
3919 crtl->emit.x_last_insn = seq->last;
38b2d076
DD
3920 }
3921
3922 rv = leaf_function_p ();
3923
3e029763
JH
3924 crtl->emit.x_first_insn = saved_first;
3925 crtl->emit.x_last_insn = saved_last;
38b2d076
DD
3926 return rv;
3927}
3928
3929/* Returns TRUE if the current function needs to use the ENTER/EXIT
3930 opcodes. If the function doesn't need the frame base or stack
3931 pointer, it can use the simpler RTS opcode. */
3932static bool
3933m32c_function_needs_enter (void)
3934{
3935 rtx insn;
3936 struct sequence_stack *seq;
3937 rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
3938 rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
3939
3940 insn = get_insns ();
3e029763 3941 for (seq = crtl->emit.sequence_stack;
38b2d076
DD
3942 seq;
3943 insn = seq->first, seq = seq->next);
3944
3945 while (insn)
3946 {
3947 if (reg_mentioned_p (sp, insn))
3948 return true;
3949 if (reg_mentioned_p (fb, insn))
3950 return true;
3951 insn = NEXT_INSN (insn);
3952 }
3953 return false;
3954}
3955
3956/* Mark all the subexpressions of the PARALLEL rtx PAR as
3957 frame-related. Return PAR.
3958
3959 dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
3960 PARALLEL rtx other than the first if they do not have the
3961 FRAME_RELATED flag set on them. So this function is handy for
3962 marking up 'enter' instructions. */
3963static rtx
3964m32c_all_frame_related (rtx par)
3965{
3966 int len = XVECLEN (par, 0);
3967 int i;
3968
3969 for (i = 0; i < len; i++)
3970 F (XVECEXP (par, 0, i));
3971
3972 return par;
3973}
3974
3975/* Emits the prologue. See the frame layout comment earlier in this
3976 file. We can reserve up to 256 bytes with the ENTER opcode, beyond
3977 that we manually update sp. */
3978void
3979m32c_emit_prologue (void)
3980{
3981 int frame_size, extra_frame_size = 0, reg_save_size;
3982 int complex_prologue = 0;
3983
3984 cfun->machine->is_leaf = m32c_leaf_function_p ();
3985 if (interrupt_p (cfun->decl))
3986 {
3987 cfun->machine->is_interrupt = 1;
3988 complex_prologue = 1;
3989 }
3990
3991 reg_save_size = m32c_pushm_popm (PP_justcount);
3992
3993 if (interrupt_p (cfun->decl))
3994 emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
3995
3996 frame_size =
3997 m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
3998 if (frame_size == 0
3999 && !cfun->machine->is_interrupt
4000 && !m32c_function_needs_enter ())
4001 cfun->machine->use_rts = 1;
4002
4003 if (frame_size > 254)
4004 {
4005 extra_frame_size = frame_size - 254;
4006 frame_size = 254;
4007 }
4008 if (cfun->machine->use_rts == 0)
4009 F (emit_insn (m32c_all_frame_related
4010 (TARGET_A16
fa9fd28a
RIL
4011 ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
4012 : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
38b2d076
DD
4013
4014 if (extra_frame_size)
4015 {
4016 complex_prologue = 1;
4017 if (TARGET_A16)
4018 F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
4019 gen_rtx_REG (HImode, SP_REGNO),
4020 GEN_INT (-extra_frame_size))));
4021 else
4022 F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
4023 gen_rtx_REG (PSImode, SP_REGNO),
4024 GEN_INT (-extra_frame_size))));
4025 }
4026
4027 complex_prologue += m32c_pushm_popm (PP_pushm);
4028
4029 /* This just emits a comment into the .s file for debugging. */
4030 if (complex_prologue)
4031 emit_insn (gen_prologue_end ());
4032}
4033
4034/* Likewise, for the epilogue. The only exception is that, for
4035 interrupts, we must manually unwind the frame as the REIT opcode
4036 doesn't do that. */
4037void
4038m32c_emit_epilogue (void)
4039{
4040 /* This just emits a comment into the .s file for debugging. */
4041 if (m32c_pushm_popm (PP_justcount) > 0 || cfun->machine->is_interrupt)
4042 emit_insn (gen_epilogue_start ());
4043
4044 m32c_pushm_popm (PP_popm);
4045
4046 if (cfun->machine->is_interrupt)
4047 {
4048 enum machine_mode spmode = TARGET_A16 ? HImode : PSImode;
4049
4050 emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
4051 gen_rtx_REG (spmode, FP_REGNO));
4052 emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
4053 gen_rtx_REG (spmode, A0_REGNO));
4054 if (TARGET_A16)
4055 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
4056 else
4057 emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
4058 emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
0e0642aa
RIL
4059 if (TARGET_A16)
4060 emit_jump_insn (gen_epilogue_reit_16 ());
4061 else
4062 emit_jump_insn (gen_epilogue_reit_24 ());
38b2d076
DD
4063 }
4064 else if (cfun->machine->use_rts)
4065 emit_jump_insn (gen_epilogue_rts ());
0e0642aa
RIL
4066 else if (TARGET_A16)
4067 emit_jump_insn (gen_epilogue_exitd_16 ());
38b2d076 4068 else
0e0642aa 4069 emit_jump_insn (gen_epilogue_exitd_24 ());
38b2d076
DD
4070 emit_barrier ();
4071}
4072
4073void
4074m32c_emit_eh_epilogue (rtx ret_addr)
4075{
4076 /* R0[R2] has the stack adjustment. R1[R3] has the address to
4077 return to. We have to fudge the stack, pop everything, pop SP
4078 (fudged), and return (fudged). This is actually easier to do in
4079 assembler, so punt to libgcc. */
4080 emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
c41c1387 4081 /* emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
38b2d076
DD
4082 emit_barrier ();
4083}
4084
16659fcf
DD
4085/* Indicate which flags must be properly set for a given conditional. */
4086static int
4087flags_needed_for_conditional (rtx cond)
4088{
4089 switch (GET_CODE (cond))
4090 {
4091 case LE:
4092 case GT:
4093 return FLAGS_OSZ;
4094 case LEU:
4095 case GTU:
4096 return FLAGS_ZC;
4097 case LT:
4098 case GE:
4099 return FLAGS_OS;
4100 case LTU:
4101 case GEU:
4102 return FLAGS_C;
4103 case EQ:
4104 case NE:
4105 return FLAGS_Z;
4106 default:
4107 return FLAGS_N;
4108 }
4109}
4110
4111#define DEBUG_CMP 0
4112
4113/* Returns true if a compare insn is redundant because it would only
4114 set flags that are already set correctly. */
4115static bool
4116m32c_compare_redundant (rtx cmp, rtx *operands)
4117{
4118 int flags_needed;
4119 int pflags;
4120 rtx prev, pp, next;
4121 rtx op0, op1, op2;
4122#if DEBUG_CMP
4123 int prev_icode, i;
4124#endif
4125
4126 op0 = operands[0];
4127 op1 = operands[1];
4128 op2 = operands[2];
4129
4130#if DEBUG_CMP
4131 fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
4132 debug_rtx(cmp);
4133 for (i=0; i<2; i++)
4134 {
4135 fprintf(stderr, "operands[%d] = ", i);
4136 debug_rtx(operands[i]);
4137 }
4138#endif
4139
4140 next = next_nonnote_insn (cmp);
4141 if (!next || !INSN_P (next))
4142 {
4143#if DEBUG_CMP
4144 fprintf(stderr, "compare not followed by insn\n");
4145 debug_rtx(next);
4146#endif
4147 return false;
4148 }
4149 if (GET_CODE (PATTERN (next)) == SET
4150 && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
4151 {
4152 next = XEXP (XEXP (PATTERN (next), 1), 0);
4153 }
4154 else if (GET_CODE (PATTERN (next)) == SET)
4155 {
4156 /* If this is a conditional, flags_needed will be something
4157 other than FLAGS_N, which we test below. */
4158 next = XEXP (PATTERN (next), 1);
4159 }
4160 else
4161 {
4162#if DEBUG_CMP
4163 fprintf(stderr, "compare not followed by conditional\n");
4164 debug_rtx(next);
4165#endif
4166 return false;
4167 }
4168#if DEBUG_CMP
4169 fprintf(stderr, "conditional is: ");
4170 debug_rtx(next);
4171#endif
4172
4173 flags_needed = flags_needed_for_conditional (next);
4174 if (flags_needed == FLAGS_N)
4175 {
4176#if DEBUG_CMP
4177 fprintf(stderr, "compare not followed by conditional\n");
4178 debug_rtx(next);
4179#endif
4180 return false;
4181 }
4182
4183 /* Compare doesn't set overflow and carry the same way that
4184 arithmetic instructions do, so we can't replace those. */
4185 if (flags_needed & FLAGS_OC)
4186 return false;
4187
4188 prev = cmp;
4189 do {
4190 prev = prev_nonnote_insn (prev);
4191 if (!prev)
4192 {
4193#if DEBUG_CMP
4194 fprintf(stderr, "No previous insn.\n");
4195#endif
4196 return false;
4197 }
4198 if (!INSN_P (prev))
4199 {
4200#if DEBUG_CMP
4201 fprintf(stderr, "Previous insn is a non-insn.\n");
4202#endif
4203 return false;
4204 }
4205 pp = PATTERN (prev);
4206 if (GET_CODE (pp) != SET)
4207 {
4208#if DEBUG_CMP
4209 fprintf(stderr, "Previous insn is not a SET.\n");
4210#endif
4211 return false;
4212 }
4213 pflags = get_attr_flags (prev);
4214
4215 /* Looking up attributes of previous insns corrupted the recog
4216 tables. */
4217 INSN_UID (cmp) = -1;
4218 recog (PATTERN (cmp), cmp, 0);
4219
4220 if (pflags == FLAGS_N
4221 && reg_mentioned_p (op0, pp))
4222 {
4223#if DEBUG_CMP
4224 fprintf(stderr, "intermediate non-flags insn uses op:\n");
4225 debug_rtx(prev);
4226#endif
4227 return false;
4228 }
4229 } while (pflags == FLAGS_N);
4230#if DEBUG_CMP
4231 fprintf(stderr, "previous flag-setting insn:\n");
4232 debug_rtx(prev);
4233 debug_rtx(pp);
4234#endif
4235
4236 if (GET_CODE (pp) == SET
4237 && GET_CODE (XEXP (pp, 0)) == REG
4238 && REGNO (XEXP (pp, 0)) == FLG_REGNO
4239 && GET_CODE (XEXP (pp, 1)) == COMPARE)
4240 {
4241 /* Adjacent cbranches must have the same operands to be
4242 redundant. */
4243 rtx pop0 = XEXP (XEXP (pp, 1), 0);
4244 rtx pop1 = XEXP (XEXP (pp, 1), 1);
4245#if DEBUG_CMP
4246 fprintf(stderr, "adjacent cbranches\n");
4247 debug_rtx(pop0);
4248 debug_rtx(pop1);
4249#endif
4250 if (rtx_equal_p (op0, pop0)
4251 && rtx_equal_p (op1, pop1))
4252 return true;
4253#if DEBUG_CMP
4254 fprintf(stderr, "prev cmp not same\n");
4255#endif
4256 return false;
4257 }
4258
4259 /* Else the previous insn must be a SET, with either the source or
4260 dest equal to operands[0], and operands[1] must be zero. */
4261
4262 if (!rtx_equal_p (op1, const0_rtx))
4263 {
4264#if DEBUG_CMP
4265 fprintf(stderr, "operands[1] not const0_rtx\n");
4266#endif
4267 return false;
4268 }
4269 if (GET_CODE (pp) != SET)
4270 {
4271#if DEBUG_CMP
4272 fprintf (stderr, "pp not set\n");
4273#endif
4274 return false;
4275 }
4276 if (!rtx_equal_p (op0, SET_SRC (pp))
4277 && !rtx_equal_p (op0, SET_DEST (pp)))
4278 {
4279#if DEBUG_CMP
4280 fprintf(stderr, "operands[0] not found in set\n");
4281#endif
4282 return false;
4283 }
4284
4285#if DEBUG_CMP
4286 fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4287#endif
4288 if ((pflags & flags_needed) == flags_needed)
4289 return true;
4290
4291 return false;
4292}
4293
4294/* Return the pattern for a compare. This will be commented out if
4295 the compare is redundant, else a normal pattern is returned. Thus,
4296 the assembler output says where the compare would have been. */
4297char *
4298m32c_output_compare (rtx insn, rtx *operands)
4299{
0a2aaacc 4300 static char templ[] = ";cmp.b\t%1,%0";
16659fcf
DD
4301 /* ^ 5 */
4302
0a2aaacc 4303 templ[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
16659fcf
DD
4304 if (m32c_compare_redundant (insn, operands))
4305 {
4306#if DEBUG_CMP
4307 fprintf(stderr, "cbranch: cmp not needed\n");
4308#endif
0a2aaacc 4309 return templ;
16659fcf
DD
4310 }
4311
4312#if DEBUG_CMP
0a2aaacc 4313 fprintf(stderr, "cbranch: cmp needed: `%s'\n", templ);
16659fcf 4314#endif
0a2aaacc 4315 return templ + 1;
16659fcf
DD
4316}
4317
5abd2125
JS
4318#undef TARGET_ENCODE_SECTION_INFO
4319#define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4320
38b2d076
DD
4321/* The Global `targetm' Variable. */
4322
4323struct gcc_target targetm = TARGET_INITIALIZER;
4324
4325#include "gt-m32c.h"