]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/m32c/m32c.c
varasm.c (default_function_rodata_section): Declare DOT as const char*.
[thirdparty/gcc.git] / gcc / config / m32c / m32c.c
CommitLineData
38b2d076 1/* Target Code for R8C/M16C/M32C
66647d44 2 Copyright (C) 2005, 2006, 2007, 2008
38b2d076
DD
3 Free Software Foundation, Inc.
4 Contributed by Red Hat.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published
2f83c7d6 10 by the Free Software Foundation; either version 3, or (at your
38b2d076
DD
11 option) any later version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
16 License for more details.
17
18 You should have received a copy of the GNU General Public License
2f83c7d6
NC
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
38b2d076
DD
21
22#include "config.h"
23#include "system.h"
24#include "coretypes.h"
25#include "tm.h"
26#include "rtl.h"
27#include "regs.h"
28#include "hard-reg-set.h"
29#include "real.h"
30#include "insn-config.h"
31#include "conditions.h"
32#include "insn-flags.h"
33#include "output.h"
34#include "insn-attr.h"
35#include "flags.h"
36#include "recog.h"
37#include "reload.h"
38#include "toplev.h"
39#include "obstack.h"
40#include "tree.h"
41#include "expr.h"
42#include "optabs.h"
43#include "except.h"
44#include "function.h"
45#include "ggc.h"
46#include "target.h"
47#include "target-def.h"
48#include "tm_p.h"
49#include "langhooks.h"
726a989a 50#include "gimple.h"
fa9fd28a 51#include "df.h"
38b2d076
DD
52
53/* Prototypes */
54
55/* Used by m32c_pushm_popm. */
56typedef enum
57{
58 PP_pushm,
59 PP_popm,
60 PP_justcount
61} Push_Pop_Type;
62
63static tree interrupt_handler (tree *, tree, tree, int, bool *);
5abd2125 64static tree function_vector_handler (tree *, tree, tree, int, bool *);
38b2d076
DD
65static int interrupt_p (tree node);
66static bool m32c_asm_integer (rtx, unsigned int, int);
3101faab 67static int m32c_comp_type_attributes (const_tree, const_tree);
38b2d076
DD
68static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
69static struct machine_function *m32c_init_machine_status (void);
70static void m32c_insert_attributes (tree, tree *);
71static bool m32c_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
586de218
KG
72 const_tree, bool);
73static bool m32c_promote_prototypes (const_tree);
38b2d076
DD
74static int m32c_pushm_popm (Push_Pop_Type);
75static bool m32c_strict_argument_naming (CUMULATIVE_ARGS *);
76static rtx m32c_struct_value_rtx (tree, int);
77static rtx m32c_subreg (enum machine_mode, rtx, enum machine_mode, int);
78static int need_to_save (int);
5abd2125
JS
79int current_function_special_page_vector (rtx);
80
81#define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
38b2d076
DD
82
83#define streq(a,b) (strcmp ((a), (b)) == 0)
84
85/* Internal support routines */
86
87/* Debugging statements are tagged with DEBUG0 only so that they can
88 be easily enabled individually, by replacing the '0' with '1' as
89 needed. */
90#define DEBUG0 0
91#define DEBUG1 1
92
93#if DEBUG0
94/* This is needed by some of the commented-out debug statements
95 below. */
96static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
97#endif
98static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
99
100/* These are all to support encode_pattern(). */
101static char pattern[30], *patternp;
102static GTY(()) rtx patternr[30];
103#define RTX_IS(x) (streq (pattern, x))
104
105/* Some macros to simplify the logic throughout this file. */
106#define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
107#define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
108
109#define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
110#define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
111
112/* We do most RTX matching by converting the RTX into a string, and
113 using string compares. This vastly simplifies the logic in many of
114 the functions in this file.
115
116 On exit, pattern[] has the encoded string (use RTX_IS("...") to
117 compare it) and patternr[] has pointers to the nodes in the RTX
118 corresponding to each character in the encoded string. The latter
119 is mostly used by print_operand().
120
121 Unrecognized patterns have '?' in them; this shows up when the
122 assembler complains about syntax errors.
123*/
124
125static void
126encode_pattern_1 (rtx x)
127{
128 int i;
129
130 if (patternp == pattern + sizeof (pattern) - 2)
131 {
132 patternp[-1] = '?';
133 return;
134 }
135
136 patternr[patternp - pattern] = x;
137
138 switch (GET_CODE (x))
139 {
140 case REG:
141 *patternp++ = 'r';
142 break;
143 case SUBREG:
144 if (GET_MODE_SIZE (GET_MODE (x)) !=
145 GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
146 *patternp++ = 'S';
147 encode_pattern_1 (XEXP (x, 0));
148 break;
149 case MEM:
150 *patternp++ = 'm';
151 case CONST:
152 encode_pattern_1 (XEXP (x, 0));
153 break;
154 case PLUS:
155 *patternp++ = '+';
156 encode_pattern_1 (XEXP (x, 0));
157 encode_pattern_1 (XEXP (x, 1));
158 break;
159 case PRE_DEC:
160 *patternp++ = '>';
161 encode_pattern_1 (XEXP (x, 0));
162 break;
163 case POST_INC:
164 *patternp++ = '<';
165 encode_pattern_1 (XEXP (x, 0));
166 break;
167 case LO_SUM:
168 *patternp++ = 'L';
169 encode_pattern_1 (XEXP (x, 0));
170 encode_pattern_1 (XEXP (x, 1));
171 break;
172 case HIGH:
173 *patternp++ = 'H';
174 encode_pattern_1 (XEXP (x, 0));
175 break;
176 case SYMBOL_REF:
177 *patternp++ = 's';
178 break;
179 case LABEL_REF:
180 *patternp++ = 'l';
181 break;
182 case CODE_LABEL:
183 *patternp++ = 'c';
184 break;
185 case CONST_INT:
186 case CONST_DOUBLE:
187 *patternp++ = 'i';
188 break;
189 case UNSPEC:
190 *patternp++ = 'u';
191 *patternp++ = '0' + XCINT (x, 1, UNSPEC);
192 for (i = 0; i < XVECLEN (x, 0); i++)
193 encode_pattern_1 (XVECEXP (x, 0, i));
194 break;
195 case USE:
196 *patternp++ = 'U';
197 break;
198 case PARALLEL:
199 *patternp++ = '|';
200 for (i = 0; i < XVECLEN (x, 0); i++)
201 encode_pattern_1 (XVECEXP (x, 0, i));
202 break;
203 case EXPR_LIST:
204 *patternp++ = 'E';
205 encode_pattern_1 (XEXP (x, 0));
206 if (XEXP (x, 1))
207 encode_pattern_1 (XEXP (x, 1));
208 break;
209 default:
210 *patternp++ = '?';
211#if DEBUG0
212 fprintf (stderr, "can't encode pattern %s\n",
213 GET_RTX_NAME (GET_CODE (x)));
214 debug_rtx (x);
215 gcc_unreachable ();
216#endif
217 break;
218 }
219}
220
221static void
222encode_pattern (rtx x)
223{
224 patternp = pattern;
225 encode_pattern_1 (x);
226 *patternp = 0;
227}
228
229/* Since register names indicate the mode they're used in, we need a
230 way to determine which name to refer to the register with. Called
231 by print_operand(). */
232
233static const char *
234reg_name_with_mode (int regno, enum machine_mode mode)
235{
236 int mlen = GET_MODE_SIZE (mode);
237 if (regno == R0_REGNO && mlen == 1)
238 return "r0l";
239 if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
240 return "r2r0";
241 if (regno == R0_REGNO && mlen == 6)
242 return "r2r1r0";
243 if (regno == R0_REGNO && mlen == 8)
244 return "r3r1r2r0";
245 if (regno == R1_REGNO && mlen == 1)
246 return "r1l";
247 if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
248 return "r3r1";
249 if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
250 return "a1a0";
251 return reg_names[regno];
252}
253
254/* How many bytes a register uses on stack when it's pushed. We need
255 to know this because the push opcode needs to explicitly indicate
256 the size of the register, even though the name of the register
257 already tells it that. Used by m32c_output_reg_{push,pop}, which
258 is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}. */
259
260static int
261reg_push_size (int regno)
262{
263 switch (regno)
264 {
265 case R0_REGNO:
266 case R1_REGNO:
267 return 2;
268 case R2_REGNO:
269 case R3_REGNO:
270 case FLG_REGNO:
271 return 2;
272 case A0_REGNO:
273 case A1_REGNO:
274 case SB_REGNO:
275 case FB_REGNO:
276 case SP_REGNO:
277 if (TARGET_A16)
278 return 2;
279 else
280 return 3;
281 default:
282 gcc_unreachable ();
283 }
284}
285
286static int *class_sizes = 0;
287
288/* Given two register classes, find the largest intersection between
289 them. If there is no intersection, return RETURNED_IF_EMPTY
290 instead. */
291static int
292reduce_class (int original_class, int limiting_class, int returned_if_empty)
293{
294 int cc = class_contents[original_class][0];
295 int i, best = NO_REGS;
296 int best_size = 0;
297
298 if (original_class == limiting_class)
299 return original_class;
300
301 if (!class_sizes)
302 {
303 int r;
304 class_sizes = (int *) xmalloc (LIM_REG_CLASSES * sizeof (int));
305 for (i = 0; i < LIM_REG_CLASSES; i++)
306 {
307 class_sizes[i] = 0;
308 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
309 if (class_contents[i][0] & (1 << r))
310 class_sizes[i]++;
311 }
312 }
313
314 cc &= class_contents[limiting_class][0];
315 for (i = 0; i < LIM_REG_CLASSES; i++)
316 {
317 int ic = class_contents[i][0];
318
319 if ((~cc & ic) == 0)
320 if (best_size < class_sizes[i])
321 {
322 best = i;
323 best_size = class_sizes[i];
324 }
325
326 }
327 if (best == NO_REGS)
328 return returned_if_empty;
329 return best;
330}
331
332/* Returns TRUE If there are any registers that exist in both register
333 classes. */
334static int
335classes_intersect (int class1, int class2)
336{
337 return class_contents[class1][0] & class_contents[class2][0];
338}
339
340/* Used by m32c_register_move_cost to determine if a move is
341 impossibly expensive. */
342static int
0a2aaacc 343class_can_hold_mode (int rclass, enum machine_mode mode)
38b2d076
DD
344{
345 /* Cache the results: 0=untested 1=no 2=yes */
346 static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
0a2aaacc 347 if (results[rclass][mode] == 0)
38b2d076
DD
348 {
349 int r, n, i;
0a2aaacc 350 results[rclass][mode] = 1;
38b2d076 351 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
0a2aaacc 352 if (class_contents[rclass][0] & (1 << r)
38b2d076
DD
353 && HARD_REGNO_MODE_OK (r, mode))
354 {
355 int ok = 1;
356 n = HARD_REGNO_NREGS (r, mode);
357 for (i = 1; i < n; i++)
0a2aaacc 358 if (!(class_contents[rclass][0] & (1 << (r + i))))
38b2d076
DD
359 ok = 0;
360 if (ok)
361 {
0a2aaacc 362 results[rclass][mode] = 2;
38b2d076
DD
363 break;
364 }
365 }
366 }
367#if DEBUG0
368 fprintf (stderr, "class %s can hold %s? %s\n",
0a2aaacc
KG
369 class_names[rclass], mode_name[mode],
370 (results[rclass][mode] == 2) ? "yes" : "no");
38b2d076 371#endif
0a2aaacc 372 return results[rclass][mode] == 2;
38b2d076
DD
373}
374
375/* Run-time Target Specification. */
376
377/* Memregs are memory locations that gcc treats like general
378 registers, as there are a limited number of true registers and the
379 m32c families can use memory in most places that registers can be
380 used.
381
382 However, since memory accesses are more expensive than registers,
383 we allow the user to limit the number of memregs available, in
384 order to try to persuade gcc to try harder to use real registers.
385
386 Memregs are provided by m32c-lib1.S.
387*/
388
389int target_memregs = 16;
390static bool target_memregs_set = FALSE;
391int ok_to_change_target_memregs = TRUE;
392
393#undef TARGET_HANDLE_OPTION
394#define TARGET_HANDLE_OPTION m32c_handle_option
395static bool
396m32c_handle_option (size_t code,
397 const char *arg ATTRIBUTE_UNUSED,
398 int value ATTRIBUTE_UNUSED)
399{
400 if (code == OPT_memregs_)
401 {
402 target_memregs_set = TRUE;
403 target_memregs = atoi (arg);
404 }
405 return TRUE;
406}
407
408/* Implements OVERRIDE_OPTIONS. We limit memregs to 0..16, and
409 provide a default. */
410void
411m32c_override_options (void)
412{
413 if (target_memregs_set)
414 {
415 if (target_memregs < 0 || target_memregs > 16)
416 error ("invalid target memregs value '%d'", target_memregs);
417 }
418 else
07127a0a 419 target_memregs = 16;
38b2d076
DD
420}
421
422/* Defining data structures for per-function information */
423
424/* The usual; we set up our machine_function data. */
425static struct machine_function *
426m32c_init_machine_status (void)
427{
428 struct machine_function *machine;
429 machine =
430 (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
431
432 return machine;
433}
434
435/* Implements INIT_EXPANDERS. We just set up to call the above
436 function. */
437void
438m32c_init_expanders (void)
439{
440 init_machine_status = m32c_init_machine_status;
441}
442
443/* Storage Layout */
444
445#undef TARGET_PROMOTE_FUNCTION_RETURN
446#define TARGET_PROMOTE_FUNCTION_RETURN m32c_promote_function_return
447bool
586de218 448m32c_promote_function_return (const_tree fntype ATTRIBUTE_UNUSED)
38b2d076
DD
449{
450 return false;
451}
452
453/* Register Basics */
454
455/* Basic Characteristics of Registers */
456
457/* Whether a mode fits in a register is complex enough to warrant a
458 table. */
459static struct
460{
461 char qi_regs;
462 char hi_regs;
463 char pi_regs;
464 char si_regs;
465 char di_regs;
466} nregs_table[FIRST_PSEUDO_REGISTER] =
467{
468 { 1, 1, 2, 2, 4 }, /* r0 */
469 { 0, 1, 0, 0, 0 }, /* r2 */
470 { 1, 1, 2, 2, 0 }, /* r1 */
471 { 0, 1, 0, 0, 0 }, /* r3 */
472 { 0, 1, 1, 0, 0 }, /* a0 */
473 { 0, 1, 1, 0, 0 }, /* a1 */
474 { 0, 1, 1, 0, 0 }, /* sb */
475 { 0, 1, 1, 0, 0 }, /* fb */
476 { 0, 1, 1, 0, 0 }, /* sp */
477 { 1, 1, 1, 0, 0 }, /* pc */
478 { 0, 0, 0, 0, 0 }, /* fl */
479 { 1, 1, 1, 0, 0 }, /* ap */
480 { 1, 1, 2, 2, 4 }, /* mem0 */
481 { 1, 1, 2, 2, 4 }, /* mem1 */
482 { 1, 1, 2, 2, 4 }, /* mem2 */
483 { 1, 1, 2, 2, 4 }, /* mem3 */
484 { 1, 1, 2, 2, 4 }, /* mem4 */
485 { 1, 1, 2, 2, 0 }, /* mem5 */
486 { 1, 1, 2, 2, 0 }, /* mem6 */
487 { 1, 1, 0, 0, 0 }, /* mem7 */
488};
489
490/* Implements CONDITIONAL_REGISTER_USAGE. We adjust the number of
491 available memregs, and select which registers need to be preserved
492 across calls based on the chip family. */
493
494void
495m32c_conditional_register_usage (void)
496{
38b2d076
DD
497 int i;
498
499 if (0 <= target_memregs && target_memregs <= 16)
500 {
501 /* The command line option is bytes, but our "registers" are
502 16-bit words. */
503 for (i = target_memregs/2; i < 8; i++)
504 {
505 fixed_regs[MEM0_REGNO + i] = 1;
506 CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
507 }
508 }
509
510 /* M32CM and M32C preserve more registers across function calls. */
511 if (TARGET_A24)
512 {
513 call_used_regs[R1_REGNO] = 0;
514 call_used_regs[R2_REGNO] = 0;
515 call_used_regs[R3_REGNO] = 0;
516 call_used_regs[A0_REGNO] = 0;
517 call_used_regs[A1_REGNO] = 0;
518 }
519}
520
521/* How Values Fit in Registers */
522
523/* Implements HARD_REGNO_NREGS. This is complicated by the fact that
524 different registers are different sizes from each other, *and* may
525 be different sizes in different chip families. */
b8a669d0
DD
526static int
527m32c_hard_regno_nregs_1 (int regno, enum machine_mode mode)
38b2d076
DD
528{
529 if (regno == FLG_REGNO && mode == CCmode)
530 return 1;
531 if (regno >= FIRST_PSEUDO_REGISTER)
532 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
533
534 if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
535 return (GET_MODE_SIZE (mode) + 1) / 2;
536
537 if (GET_MODE_SIZE (mode) <= 1)
538 return nregs_table[regno].qi_regs;
539 if (GET_MODE_SIZE (mode) <= 2)
540 return nregs_table[regno].hi_regs;
541 if (regno == A0_REGNO && mode == PSImode && TARGET_A16)
542 return 2;
543 if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
544 return nregs_table[regno].pi_regs;
545 if (GET_MODE_SIZE (mode) <= 4)
546 return nregs_table[regno].si_regs;
547 if (GET_MODE_SIZE (mode) <= 8)
548 return nregs_table[regno].di_regs;
549 return 0;
550}
551
b8a669d0
DD
552int
553m32c_hard_regno_nregs (int regno, enum machine_mode mode)
554{
555 int rv = m32c_hard_regno_nregs_1 (regno, mode);
556 return rv ? rv : 1;
557}
558
38b2d076
DD
559/* Implements HARD_REGNO_MODE_OK. The above function does the work
560 already; just test its return value. */
561int
562m32c_hard_regno_ok (int regno, enum machine_mode mode)
563{
b8a669d0 564 return m32c_hard_regno_nregs_1 (regno, mode) != 0;
38b2d076
DD
565}
566
567/* Implements MODES_TIEABLE_P. In general, modes aren't tieable since
568 registers are all different sizes. However, since most modes are
569 bigger than our registers anyway, it's easier to implement this
570 function that way, leaving QImode as the only unique case. */
571int
572m32c_modes_tieable_p (enum machine_mode m1, enum machine_mode m2)
573{
574 if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
575 return 1;
576
07127a0a 577#if 0
38b2d076
DD
578 if (m1 == QImode || m2 == QImode)
579 return 0;
07127a0a 580#endif
38b2d076
DD
581
582 return 1;
583}
584
585/* Register Classes */
586
587/* Implements REGNO_REG_CLASS. */
588enum machine_mode
589m32c_regno_reg_class (int regno)
590{
591 switch (regno)
592 {
593 case R0_REGNO:
594 return R0_REGS;
595 case R1_REGNO:
596 return R1_REGS;
597 case R2_REGNO:
598 return R2_REGS;
599 case R3_REGNO:
600 return R3_REGS;
601 case A0_REGNO:
602 case A1_REGNO:
603 return A_REGS;
604 case SB_REGNO:
605 return SB_REGS;
606 case FB_REGNO:
607 return FB_REGS;
608 case SP_REGNO:
609 return SP_REGS;
610 case FLG_REGNO:
611 return FLG_REGS;
612 default:
613 if (IS_MEM_REGNO (regno))
614 return MEM_REGS;
615 return ALL_REGS;
616 }
617}
618
619/* Implements REG_CLASS_FROM_CONSTRAINT. Note that some constraints only match
620 for certain chip families. */
621int
622m32c_reg_class_from_constraint (char c ATTRIBUTE_UNUSED, const char *s)
623{
624 if (memcmp (s, "Rsp", 3) == 0)
625 return SP_REGS;
626 if (memcmp (s, "Rfb", 3) == 0)
627 return FB_REGS;
628 if (memcmp (s, "Rsb", 3) == 0)
629 return SB_REGS;
07127a0a
DD
630 if (memcmp (s, "Rcr", 3) == 0)
631 return TARGET_A16 ? CR_REGS : NO_REGS;
632 if (memcmp (s, "Rcl", 3) == 0)
633 return TARGET_A24 ? CR_REGS : NO_REGS;
38b2d076
DD
634 if (memcmp (s, "R0w", 3) == 0)
635 return R0_REGS;
636 if (memcmp (s, "R1w", 3) == 0)
637 return R1_REGS;
638 if (memcmp (s, "R2w", 3) == 0)
639 return R2_REGS;
640 if (memcmp (s, "R3w", 3) == 0)
641 return R3_REGS;
642 if (memcmp (s, "R02", 3) == 0)
643 return R02_REGS;
644 if (memcmp (s, "R03", 3) == 0)
645 return R03_REGS;
646 if (memcmp (s, "Rdi", 3) == 0)
647 return DI_REGS;
648 if (memcmp (s, "Rhl", 3) == 0)
649 return HL_REGS;
650 if (memcmp (s, "R23", 3) == 0)
651 return R23_REGS;
07127a0a
DD
652 if (memcmp (s, "Ra0", 3) == 0)
653 return A0_REGS;
654 if (memcmp (s, "Ra1", 3) == 0)
655 return A1_REGS;
38b2d076
DD
656 if (memcmp (s, "Raa", 3) == 0)
657 return A_REGS;
07127a0a
DD
658 if (memcmp (s, "Raw", 3) == 0)
659 return TARGET_A16 ? A_REGS : NO_REGS;
660 if (memcmp (s, "Ral", 3) == 0)
661 return TARGET_A24 ? A_REGS : NO_REGS;
38b2d076
DD
662 if (memcmp (s, "Rqi", 3) == 0)
663 return QI_REGS;
664 if (memcmp (s, "Rad", 3) == 0)
665 return AD_REGS;
666 if (memcmp (s, "Rsi", 3) == 0)
667 return SI_REGS;
668 if (memcmp (s, "Rhi", 3) == 0)
669 return HI_REGS;
670 if (memcmp (s, "Rhc", 3) == 0)
671 return HC_REGS;
672 if (memcmp (s, "Rra", 3) == 0)
673 return RA_REGS;
674 if (memcmp (s, "Rfl", 3) == 0)
675 return FLG_REGS;
676 if (memcmp (s, "Rmm", 3) == 0)
677 {
678 if (fixed_regs[MEM0_REGNO])
679 return NO_REGS;
680 return MEM_REGS;
681 }
682
683 /* PSImode registers - i.e. whatever can hold a pointer. */
684 if (memcmp (s, "Rpi", 3) == 0)
685 {
686 if (TARGET_A16)
687 return HI_REGS;
688 else
689 return RA_REGS; /* r2r0 and r3r1 can hold pointers. */
690 }
691
692 /* We handle this one as an EXTRA_CONSTRAINT. */
693 if (memcmp (s, "Rpa", 3) == 0)
694 return NO_REGS;
695
07127a0a
DD
696 if (*s == 'R')
697 {
698 fprintf(stderr, "unrecognized R constraint: %.3s\n", s);
699 gcc_unreachable();
700 }
701
38b2d076
DD
702 return NO_REGS;
703}
704
705/* Implements REGNO_OK_FOR_BASE_P. */
706int
707m32c_regno_ok_for_base_p (int regno)
708{
709 if (regno == A0_REGNO
710 || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
711 return 1;
712 return 0;
713}
714
715#define DEBUG_RELOAD 0
716
717/* Implements PREFERRED_RELOAD_CLASS. In general, prefer general
718 registers of the appropriate size. */
719int
720m32c_preferred_reload_class (rtx x, int rclass)
721{
722 int newclass = rclass;
723
724#if DEBUG_RELOAD
725 fprintf (stderr, "\npreferred_reload_class for %s is ",
726 class_names[rclass]);
727#endif
728 if (rclass == NO_REGS)
729 rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
730
731 if (classes_intersect (rclass, CR_REGS))
732 {
733 switch (GET_MODE (x))
734 {
735 case QImode:
736 newclass = HL_REGS;
737 break;
738 default:
739 /* newclass = HI_REGS; */
740 break;
741 }
742 }
743
744 else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
745 newclass = SI_REGS;
746 else if (GET_MODE_SIZE (GET_MODE (x)) > 4
747 && ~class_contents[rclass][0] & 0x000f)
748 newclass = DI_REGS;
749
750 rclass = reduce_class (rclass, newclass, rclass);
751
752 if (GET_MODE (x) == QImode)
753 rclass = reduce_class (rclass, HL_REGS, rclass);
754
755#if DEBUG_RELOAD
756 fprintf (stderr, "%s\n", class_names[rclass]);
757 debug_rtx (x);
758
759 if (GET_CODE (x) == MEM
760 && GET_CODE (XEXP (x, 0)) == PLUS
761 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
762 fprintf (stderr, "Glorm!\n");
763#endif
764 return rclass;
765}
766
767/* Implements PREFERRED_OUTPUT_RELOAD_CLASS. */
768int
769m32c_preferred_output_reload_class (rtx x, int rclass)
770{
771 return m32c_preferred_reload_class (x, rclass);
772}
773
774/* Implements LIMIT_RELOAD_CLASS. We basically want to avoid using
775 address registers for reloads since they're needed for address
776 reloads. */
777int
778m32c_limit_reload_class (enum machine_mode mode, int rclass)
779{
780#if DEBUG_RELOAD
781 fprintf (stderr, "limit_reload_class for %s: %s ->",
782 mode_name[mode], class_names[rclass]);
783#endif
784
785 if (mode == QImode)
786 rclass = reduce_class (rclass, HL_REGS, rclass);
787 else if (mode == HImode)
788 rclass = reduce_class (rclass, HI_REGS, rclass);
789 else if (mode == SImode)
790 rclass = reduce_class (rclass, SI_REGS, rclass);
791
792 if (rclass != A_REGS)
793 rclass = reduce_class (rclass, DI_REGS, rclass);
794
795#if DEBUG_RELOAD
796 fprintf (stderr, " %s\n", class_names[rclass]);
797#endif
798 return rclass;
799}
800
801/* Implements SECONDARY_RELOAD_CLASS. QImode have to be reloaded in
802 r0 or r1, as those are the only real QImode registers. CR regs get
803 reloaded through appropriately sized general or address
804 registers. */
805int
806m32c_secondary_reload_class (int rclass, enum machine_mode mode, rtx x)
807{
808 int cc = class_contents[rclass][0];
809#if DEBUG0
810 fprintf (stderr, "\nsecondary reload class %s %s\n",
811 class_names[rclass], mode_name[mode]);
812 debug_rtx (x);
813#endif
814 if (mode == QImode
815 && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
816 return QI_REGS;
817 if (classes_intersect (rclass, CR_REGS)
818 && GET_CODE (x) == REG
819 && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
820 return TARGET_A16 ? HI_REGS : A_REGS;
821 return NO_REGS;
822}
823
824/* Implements CLASS_LIKELY_SPILLED_P. A_REGS is needed for address
825 reloads. */
826int
827m32c_class_likely_spilled_p (int regclass)
828{
829 if (regclass == A_REGS)
830 return 1;
831 return reg_class_size[regclass] == 1;
832}
833
834/* Implements CLASS_MAX_NREGS. We calculate this according to its
835 documented meaning, to avoid potential inconsistencies with actual
836 class definitions. */
837int
838m32c_class_max_nregs (int regclass, enum machine_mode mode)
839{
840 int rn, max = 0;
841
842 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
843 if (class_contents[regclass][0] & (1 << rn))
844 {
845 int n = m32c_hard_regno_nregs (rn, mode);
846 if (max < n)
847 max = n;
848 }
849 return max;
850}
851
852/* Implements CANNOT_CHANGE_MODE_CLASS. Only r0 and r1 can change to
853 QI (r0l, r1l) because the chip doesn't support QI ops on other
854 registers (well, it does on a0/a1 but if we let gcc do that, reload
855 suffers). Otherwise, we allow changes to larger modes. */
856int
857m32c_cannot_change_mode_class (enum machine_mode from,
858 enum machine_mode to, int rclass)
859{
db9c8397 860 int rn;
38b2d076
DD
861#if DEBUG0
862 fprintf (stderr, "cannot change from %s to %s in %s\n",
863 mode_name[from], mode_name[to], class_names[rclass]);
864#endif
865
db9c8397
DD
866 /* If the larger mode isn't allowed in any of these registers, we
867 can't allow the change. */
868 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
869 if (class_contents[rclass][0] & (1 << rn))
870 if (! m32c_hard_regno_ok (rn, to))
871 return 1;
872
38b2d076
DD
873 if (to == QImode)
874 return (class_contents[rclass][0] & 0x1ffa);
875
876 if (class_contents[rclass][0] & 0x0005 /* r0, r1 */
877 && GET_MODE_SIZE (from) > 1)
878 return 0;
879 if (GET_MODE_SIZE (from) > 2) /* all other regs */
880 return 0;
881
882 return 1;
883}
884
885/* Helpers for the rest of the file. */
886/* TRUE if the rtx is a REG rtx for the given register. */
887#define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
888 && REGNO (rtx) == regno)
889/* TRUE if the rtx is a pseudo - specifically, one we can use as a
890 base register in address calculations (hence the "strict"
891 argument). */
892#define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
893 && (REGNO (rtx) == AP_REGNO \
894 || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
895
896/* Implements CONST_OK_FOR_CONSTRAINT_P. Currently, all constant
897 constraints start with 'I', with the next two characters indicating
898 the type and size of the range allowed. */
899int
900m32c_const_ok_for_constraint_p (HOST_WIDE_INT value,
901 char c ATTRIBUTE_UNUSED, const char *str)
902{
903 /* s=signed u=unsigned n=nonzero m=minus l=log2able,
904 [sun] bits [SUN] bytes, p=pointer size
905 I[-0-9][0-9] matches that number */
906 if (memcmp (str, "Is3", 3) == 0)
907 {
908 return (-8 <= value && value <= 7);
909 }
910 if (memcmp (str, "IS1", 3) == 0)
911 {
912 return (-128 <= value && value <= 127);
913 }
914 if (memcmp (str, "IS2", 3) == 0)
915 {
916 return (-32768 <= value && value <= 32767);
917 }
918 if (memcmp (str, "IU2", 3) == 0)
919 {
920 return (0 <= value && value <= 65535);
921 }
922 if (memcmp (str, "IU3", 3) == 0)
923 {
924 return (0 <= value && value <= 0x00ffffff);
925 }
926 if (memcmp (str, "In4", 3) == 0)
927 {
928 return (-8 <= value && value && value <= 8);
929 }
930 if (memcmp (str, "In5", 3) == 0)
931 {
932 return (-16 <= value && value && value <= 16);
933 }
23fed240
DD
934 if (memcmp (str, "In6", 3) == 0)
935 {
936 return (-32 <= value && value && value <= 32);
937 }
38b2d076
DD
938 if (memcmp (str, "IM2", 3) == 0)
939 {
940 return (-65536 <= value && value && value <= -1);
941 }
942 if (memcmp (str, "Ilb", 3) == 0)
943 {
944 int b = exact_log2 (value);
8e4edce7 945 return (b >= 0 && b <= 7);
38b2d076 946 }
07127a0a
DD
947 if (memcmp (str, "Imb", 3) == 0)
948 {
949 int b = exact_log2 ((value ^ 0xff) & 0xff);
8e4edce7 950 return (b >= 0 && b <= 7);
07127a0a 951 }
600e668e
DD
952 if (memcmp (str, "ImB", 3) == 0)
953 {
954 int b = exact_log2 ((value ^ 0xffff) & 0xffff);
955 return (b >= 0 && b <= 7);
956 }
38b2d076
DD
957 if (memcmp (str, "Ilw", 3) == 0)
958 {
959 int b = exact_log2 (value);
8e4edce7 960 return (b >= 0 && b <= 15);
38b2d076 961 }
07127a0a
DD
962 if (memcmp (str, "Imw", 3) == 0)
963 {
964 int b = exact_log2 ((value ^ 0xffff) & 0xffff);
8e4edce7 965 return (b >= 0 && b <= 15);
07127a0a
DD
966 }
967 if (memcmp (str, "I00", 3) == 0)
968 {
969 return (value == 0);
970 }
38b2d076
DD
971 return 0;
972}
973
974/* Implements EXTRA_CONSTRAINT_STR (see next function too). 'S' is
975 for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
976 call return values. */
977int
978m32c_extra_constraint_p2 (rtx value, char c ATTRIBUTE_UNUSED, const char *str)
979{
980 encode_pattern (value);
981 if (memcmp (str, "Sd", 2) == 0)
982 {
983 /* This is the common "src/dest" address */
984 rtx r;
985 if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
986 return 1;
987 if (RTX_IS ("ms") || RTX_IS ("m+si"))
988 return 1;
07127a0a
DD
989 if (RTX_IS ("m++rii"))
990 {
991 if (REGNO (patternr[3]) == FB_REGNO
992 && INTVAL (patternr[4]) == 0)
993 return 1;
994 }
38b2d076
DD
995 if (RTX_IS ("mr"))
996 r = patternr[1];
997 else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
998 r = patternr[2];
999 else
1000 return 0;
1001 if (REGNO (r) == SP_REGNO)
1002 return 0;
1003 return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
1004 }
1005 else if (memcmp (str, "Sa", 2) == 0)
1006 {
1007 rtx r;
1008 if (RTX_IS ("mr"))
1009 r = patternr[1];
1010 else if (RTX_IS ("m+ri"))
1011 r = patternr[2];
1012 else
1013 return 0;
1014 return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
1015 }
1016 else if (memcmp (str, "Si", 2) == 0)
1017 {
1018 return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
1019 }
1020 else if (memcmp (str, "Ss", 2) == 0)
1021 {
1022 return ((RTX_IS ("mr")
1023 && (IS_REG (patternr[1], SP_REGNO)))
1024 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
1025 }
1026 else if (memcmp (str, "Sf", 2) == 0)
1027 {
1028 return ((RTX_IS ("mr")
1029 && (IS_REG (patternr[1], FB_REGNO)))
1030 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
1031 }
1032 else if (memcmp (str, "Sb", 2) == 0)
1033 {
1034 return ((RTX_IS ("mr")
1035 && (IS_REG (patternr[1], SB_REGNO)))
1036 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
1037 }
07127a0a
DD
1038 else if (memcmp (str, "Sp", 2) == 0)
1039 {
1040 /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
1041 return (RTX_IS ("mi")
1042 && !(INTVAL (patternr[1]) & ~0x1fff));
1043 }
38b2d076
DD
1044 else if (memcmp (str, "S1", 2) == 0)
1045 {
1046 return r1h_operand (value, QImode);
1047 }
1048
1049 gcc_assert (str[0] != 'S');
1050
1051 if (memcmp (str, "Rpa", 2) == 0)
1052 return GET_CODE (value) == PARALLEL;
1053
1054 return 0;
1055}
1056
1057/* This is for when we're debugging the above. */
1058int
1059m32c_extra_constraint_p (rtx value, char c, const char *str)
1060{
1061 int rv = m32c_extra_constraint_p2 (value, c, str);
1062#if DEBUG0
1063 fprintf (stderr, "\nconstraint %.*s: %d\n", CONSTRAINT_LEN (c, str), str,
1064 rv);
1065 debug_rtx (value);
1066#endif
1067 return rv;
1068}
1069
1070/* Implements EXTRA_MEMORY_CONSTRAINT. Currently, we only use strings
1071 starting with 'S'. */
1072int
1073m32c_extra_memory_constraint (char c, const char *str ATTRIBUTE_UNUSED)
1074{
1075 return c == 'S';
1076}
1077
1078/* Implements EXTRA_ADDRESS_CONSTRAINT. We reserve 'A' strings for these,
1079 but don't currently define any. */
1080int
1081m32c_extra_address_constraint (char c, const char *str ATTRIBUTE_UNUSED)
1082{
1083 return c == 'A';
1084}
1085
1086/* STACK AND CALLING */
1087
1088/* Frame Layout */
1089
1090/* Implements RETURN_ADDR_RTX. Note that R8C and M16C push 24 bits
1091 (yes, THREE bytes) onto the stack for the return address, but we
1092 don't support pointers bigger than 16 bits on those chips. This
1093 will likely wreak havoc with exception unwinding. FIXME. */
1094rtx
1095m32c_return_addr_rtx (int count)
1096{
1097 enum machine_mode mode;
1098 int offset;
1099 rtx ra_mem;
1100
1101 if (count)
1102 return NULL_RTX;
1103 /* we want 2[$fb] */
1104
1105 if (TARGET_A24)
1106 {
80b093df
DD
1107 /* It's four bytes */
1108 mode = PSImode;
38b2d076
DD
1109 offset = 4;
1110 }
1111 else
1112 {
1113 /* FIXME: it's really 3 bytes */
1114 mode = HImode;
1115 offset = 2;
1116 }
1117
1118 ra_mem =
1119 gen_rtx_MEM (mode, plus_constant (gen_rtx_REG (Pmode, FP_REGNO), offset));
1120 return copy_to_mode_reg (mode, ra_mem);
1121}
1122
1123/* Implements INCOMING_RETURN_ADDR_RTX. See comment above. */
1124rtx
1125m32c_incoming_return_addr_rtx (void)
1126{
1127 /* we want [sp] */
1128 return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
1129}
1130
1131/* Exception Handling Support */
1132
1133/* Implements EH_RETURN_DATA_REGNO. Choose registers able to hold
1134 pointers. */
1135int
1136m32c_eh_return_data_regno (int n)
1137{
1138 switch (n)
1139 {
1140 case 0:
1141 return A0_REGNO;
1142 case 1:
c6004917
RIL
1143 if (TARGET_A16)
1144 return R3_REGNO;
1145 else
1146 return R1_REGNO;
38b2d076
DD
1147 default:
1148 return INVALID_REGNUM;
1149 }
1150}
1151
1152/* Implements EH_RETURN_STACKADJ_RTX. Saved and used later in
1153 m32c_emit_eh_epilogue. */
1154rtx
1155m32c_eh_return_stackadj_rtx (void)
1156{
1157 if (!cfun->machine->eh_stack_adjust)
1158 {
1159 rtx sa;
1160
99920b6f 1161 sa = gen_rtx_REG (Pmode, R0_REGNO);
38b2d076
DD
1162 cfun->machine->eh_stack_adjust = sa;
1163 }
1164 return cfun->machine->eh_stack_adjust;
1165}
1166
1167/* Registers That Address the Stack Frame */
1168
1169/* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER. Note that
1170 the original spec called for dwarf numbers to vary with register
1171 width as well, for example, r0l, r0, and r2r0 would each have
1172 different dwarf numbers. GCC doesn't support this, and we don't do
1173 it, and gdb seems to like it this way anyway. */
1174unsigned int
1175m32c_dwarf_frame_regnum (int n)
1176{
1177 switch (n)
1178 {
1179 case R0_REGNO:
1180 return 5;
1181 case R1_REGNO:
1182 return 6;
1183 case R2_REGNO:
1184 return 7;
1185 case R3_REGNO:
1186 return 8;
1187 case A0_REGNO:
1188 return 9;
1189 case A1_REGNO:
1190 return 10;
1191 case FB_REGNO:
1192 return 11;
1193 case SB_REGNO:
1194 return 19;
1195
1196 case SP_REGNO:
1197 return 12;
1198 case PC_REGNO:
1199 return 13;
1200 default:
1201 return DWARF_FRAME_REGISTERS + 1;
1202 }
1203}
1204
1205/* The frame looks like this:
1206
1207 ap -> +------------------------------
1208 | Return address (3 or 4 bytes)
1209 | Saved FB (2 or 4 bytes)
1210 fb -> +------------------------------
1211 | local vars
1212 | register saves fb
1213 | through r0 as needed
1214 sp -> +------------------------------
1215*/
1216
1217/* We use this to wrap all emitted insns in the prologue. */
1218static rtx
1219F (rtx x)
1220{
1221 RTX_FRAME_RELATED_P (x) = 1;
1222 return x;
1223}
1224
1225/* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1226 how much the stack pointer moves for each, for each cpu family. */
1227static struct
1228{
1229 int reg1;
1230 int bit;
1231 int a16_bytes;
1232 int a24_bytes;
1233} pushm_info[] =
1234{
9d746d5e
DD
1235 /* These are in reverse push (nearest-to-sp) order. */
1236 { R0_REGNO, 0x80, 2, 2 },
38b2d076 1237 { R1_REGNO, 0x40, 2, 2 },
9d746d5e
DD
1238 { R2_REGNO, 0x20, 2, 2 },
1239 { R3_REGNO, 0x10, 2, 2 },
1240 { A0_REGNO, 0x08, 2, 4 },
1241 { A1_REGNO, 0x04, 2, 4 },
1242 { SB_REGNO, 0x02, 2, 4 },
1243 { FB_REGNO, 0x01, 2, 4 }
38b2d076
DD
1244};
1245
1246#define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1247
1248/* Returns TRUE if we need to save/restore the given register. We
1249 save everything for exception handlers, so that any register can be
1250 unwound. For interrupt handlers, we save everything if the handler
1251 calls something else (because we don't know what *that* function
1252 might do), but try to be a bit smarter if the handler is a leaf
1253 function. We always save $a0, though, because we use that in the
85f65093 1254 epilogue to copy $fb to $sp. */
38b2d076
DD
1255static int
1256need_to_save (int regno)
1257{
1258 if (fixed_regs[regno])
1259 return 0;
ad516a74 1260 if (crtl->calls_eh_return)
38b2d076
DD
1261 return 1;
1262 if (regno == FP_REGNO)
1263 return 0;
1264 if (cfun->machine->is_interrupt
1265 && (!cfun->machine->is_leaf || regno == A0_REGNO))
1266 return 1;
6fb5fa3c 1267 if (df_regs_ever_live_p (regno)
38b2d076
DD
1268 && (!call_used_regs[regno] || cfun->machine->is_interrupt))
1269 return 1;
1270 return 0;
1271}
1272
1273/* This function contains all the intelligence about saving and
1274 restoring registers. It always figures out the register save set.
1275 When called with PP_justcount, it merely returns the size of the
1276 save set (for eliminating the frame pointer, for example). When
1277 called with PP_pushm or PP_popm, it emits the appropriate
1278 instructions for saving (pushm) or restoring (popm) the
1279 registers. */
1280static int
1281m32c_pushm_popm (Push_Pop_Type ppt)
1282{
1283 int reg_mask = 0;
1284 int byte_count = 0, bytes;
1285 int i;
1286 rtx dwarf_set[PUSHM_N];
1287 int n_dwarfs = 0;
1288 int nosave_mask = 0;
1289
305da3ec
JH
1290 if (crtl->return_rtx
1291 && GET_CODE (crtl->return_rtx) == PARALLEL
ad516a74 1292 && !(crtl->calls_eh_return || cfun->machine->is_interrupt))
38b2d076 1293 {
305da3ec 1294 rtx exp = XVECEXP (crtl->return_rtx, 0, 0);
38b2d076
DD
1295 rtx rv = XEXP (exp, 0);
1296 int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1297
1298 if (rv_bytes > 2)
1299 nosave_mask |= 0x20; /* PSI, SI */
1300 else
1301 nosave_mask |= 0xf0; /* DF */
1302 if (rv_bytes > 4)
1303 nosave_mask |= 0x50; /* DI */
1304 }
1305
1306 for (i = 0; i < (int) PUSHM_N; i++)
1307 {
1308 /* Skip if neither register needs saving. */
1309 if (!need_to_save (pushm_info[i].reg1))
1310 continue;
1311
1312 if (pushm_info[i].bit & nosave_mask)
1313 continue;
1314
1315 reg_mask |= pushm_info[i].bit;
1316 bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1317
1318 if (ppt == PP_pushm)
1319 {
1320 enum machine_mode mode = (bytes == 2) ? HImode : SImode;
1321 rtx addr;
1322
1323 /* Always use stack_pointer_rtx instead of calling
1324 rtx_gen_REG ourselves. Code elsewhere in GCC assumes
1325 that there is a single rtx representing the stack pointer,
1326 namely stack_pointer_rtx, and uses == to recognize it. */
1327 addr = stack_pointer_rtx;
1328
1329 if (byte_count != 0)
1330 addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1331
1332 dwarf_set[n_dwarfs++] =
1333 gen_rtx_SET (VOIDmode,
1334 gen_rtx_MEM (mode, addr),
1335 gen_rtx_REG (mode, pushm_info[i].reg1));
1336 F (dwarf_set[n_dwarfs - 1]);
1337
1338 }
1339 byte_count += bytes;
1340 }
1341
1342 if (cfun->machine->is_interrupt)
1343 {
1344 cfun->machine->intr_pushm = reg_mask & 0xfe;
1345 reg_mask = 0;
1346 byte_count = 0;
1347 }
1348
1349 if (cfun->machine->is_interrupt)
1350 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1351 if (need_to_save (i))
1352 {
1353 byte_count += 2;
1354 cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1355 }
1356
1357 if (ppt == PP_pushm && byte_count)
1358 {
1359 rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1360 rtx pushm;
1361
1362 if (reg_mask)
1363 {
1364 XVECEXP (note, 0, 0)
1365 = gen_rtx_SET (VOIDmode,
1366 stack_pointer_rtx,
1367 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1368 stack_pointer_rtx,
1369 GEN_INT (-byte_count)));
1370 F (XVECEXP (note, 0, 0));
1371
1372 for (i = 0; i < n_dwarfs; i++)
1373 XVECEXP (note, 0, i + 1) = dwarf_set[i];
1374
1375 pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1376
1377 REG_NOTES (pushm) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, note,
1378 REG_NOTES (pushm));
1379 }
1380
1381 if (cfun->machine->is_interrupt)
1382 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1383 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1384 {
1385 if (TARGET_A16)
1386 pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1387 else
1388 pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1389 F (pushm);
1390 }
1391 }
1392 if (ppt == PP_popm && byte_count)
1393 {
38b2d076
DD
1394 if (cfun->machine->is_interrupt)
1395 for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1396 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1397 {
1398 if (TARGET_A16)
b3fdec9e 1399 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
38b2d076 1400 else
b3fdec9e 1401 emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
38b2d076
DD
1402 }
1403 if (reg_mask)
1404 emit_insn (gen_popm (GEN_INT (reg_mask)));
1405 }
1406
1407 return byte_count;
1408}
1409
1410/* Implements INITIAL_ELIMINATION_OFFSET. See the comment above that
1411 diagrams our call frame. */
1412int
1413m32c_initial_elimination_offset (int from, int to)
1414{
1415 int ofs = 0;
1416
1417 if (from == AP_REGNO)
1418 {
1419 if (TARGET_A16)
1420 ofs += 5;
1421 else
1422 ofs += 8;
1423 }
1424
1425 if (to == SP_REGNO)
1426 {
1427 ofs += m32c_pushm_popm (PP_justcount);
1428 ofs += get_frame_size ();
1429 }
1430
1431 /* Account for push rounding. */
1432 if (TARGET_A24)
1433 ofs = (ofs + 1) & ~1;
1434#if DEBUG0
1435 fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1436 to, ofs);
1437#endif
1438 return ofs;
1439}
1440
1441/* Passing Function Arguments on the Stack */
1442
1443#undef TARGET_PROMOTE_PROTOTYPES
1444#define TARGET_PROMOTE_PROTOTYPES m32c_promote_prototypes
1445static bool
586de218 1446m32c_promote_prototypes (const_tree fntype ATTRIBUTE_UNUSED)
38b2d076
DD
1447{
1448 return 0;
1449}
1450
1451/* Implements PUSH_ROUNDING. The R8C and M16C have byte stacks, the
1452 M32C has word stacks. */
1453int
1454m32c_push_rounding (int n)
1455{
1456 if (TARGET_R8C || TARGET_M16C)
1457 return n;
1458 return (n + 1) & ~1;
1459}
1460
1461/* Passing Arguments in Registers */
1462
1463/* Implements FUNCTION_ARG. Arguments are passed partly in registers,
1464 partly on stack. If our function returns a struct, a pointer to a
1465 buffer for it is at the top of the stack (last thing pushed). The
1466 first few real arguments may be in registers as follows:
1467
1468 R8C/M16C: arg1 in r1 if it's QI or HI (else it's pushed on stack)
1469 arg2 in r2 if it's HI (else pushed on stack)
1470 rest on stack
1471 M32C: arg1 in r0 if it's QI or HI (else it's pushed on stack)
1472 rest on stack
1473
1474 Structs are not passed in registers, even if they fit. Only
1475 integer and pointer types are passed in registers.
1476
1477 Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1478 r2 if it fits. */
1479rtx
1480m32c_function_arg (CUMULATIVE_ARGS * ca,
1481 enum machine_mode mode, tree type, int named)
1482{
1483 /* Can return a reg, parallel, or 0 for stack */
1484 rtx rv = NULL_RTX;
1485#if DEBUG0
1486 fprintf (stderr, "func_arg %d (%s, %d)\n",
1487 ca->parm_num, mode_name[mode], named);
1488 debug_tree (type);
1489#endif
1490
1491 if (mode == VOIDmode)
1492 return GEN_INT (0);
1493
1494 if (ca->force_mem || !named)
1495 {
1496#if DEBUG0
1497 fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
1498 named);
1499#endif
1500 return NULL_RTX;
1501 }
1502
1503 if (type && INTEGRAL_TYPE_P (type) && POINTER_TYPE_P (type))
1504 return NULL_RTX;
1505
9d746d5e
DD
1506 if (type && AGGREGATE_TYPE_P (type))
1507 return NULL_RTX;
1508
38b2d076
DD
1509 switch (ca->parm_num)
1510 {
1511 case 1:
1512 if (GET_MODE_SIZE (mode) == 1 || GET_MODE_SIZE (mode) == 2)
1513 rv = gen_rtx_REG (mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
1514 break;
1515
1516 case 2:
1517 if (TARGET_A16 && GET_MODE_SIZE (mode) == 2)
1518 rv = gen_rtx_REG (mode, R2_REGNO);
1519 break;
1520 }
1521
1522#if DEBUG0
1523 debug_rtx (rv);
1524#endif
1525 return rv;
1526}
1527
1528#undef TARGET_PASS_BY_REFERENCE
1529#define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1530static bool
1531m32c_pass_by_reference (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED,
1532 enum machine_mode mode ATTRIBUTE_UNUSED,
586de218 1533 const_tree type ATTRIBUTE_UNUSED,
38b2d076
DD
1534 bool named ATTRIBUTE_UNUSED)
1535{
1536 return 0;
1537}
1538
1539/* Implements INIT_CUMULATIVE_ARGS. */
1540void
1541m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
9d746d5e 1542 tree fntype,
38b2d076 1543 rtx libname ATTRIBUTE_UNUSED,
9d746d5e 1544 tree fndecl,
38b2d076
DD
1545 int n_named_args ATTRIBUTE_UNUSED)
1546{
9d746d5e
DD
1547 if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1548 ca->force_mem = 1;
1549 else
1550 ca->force_mem = 0;
38b2d076
DD
1551 ca->parm_num = 1;
1552}
1553
1554/* Implements FUNCTION_ARG_ADVANCE. force_mem is set for functions
1555 returning structures, so we always reset that. Otherwise, we only
1556 need to know the sequence number of the argument to know what to do
1557 with it. */
1558void
1559m32c_function_arg_advance (CUMULATIVE_ARGS * ca,
1560 enum machine_mode mode ATTRIBUTE_UNUSED,
1561 tree type ATTRIBUTE_UNUSED,
1562 int named ATTRIBUTE_UNUSED)
1563{
1564 if (ca->force_mem)
1565 ca->force_mem = 0;
9d746d5e
DD
1566 else
1567 ca->parm_num++;
38b2d076
DD
1568}
1569
1570/* Implements FUNCTION_ARG_REGNO_P. */
1571int
1572m32c_function_arg_regno_p (int r)
1573{
1574 if (TARGET_A24)
1575 return (r == R0_REGNO);
1576 return (r == R1_REGNO || r == R2_REGNO);
1577}
1578
e9555b13 1579/* HImode and PSImode are the two "native" modes as far as GCC is
85f65093 1580 concerned, but the chips also support a 32-bit mode which is used
e9555b13
DD
1581 for some opcodes in R8C/M16C and for reset vectors and such. */
1582#undef TARGET_VALID_POINTER_MODE
1583#define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
23fed240 1584static bool
e9555b13
DD
1585m32c_valid_pointer_mode (enum machine_mode mode)
1586{
e9555b13
DD
1587 if (mode == HImode
1588 || mode == PSImode
1589 || mode == SImode
1590 )
1591 return 1;
1592 return 0;
1593}
1594
38b2d076
DD
1595/* How Scalar Function Values Are Returned */
1596
1597/* Implements LIBCALL_VALUE. Most values are returned in $r0, or some
1598 combination of registers starting there (r2r0 for longs, r3r1r2r0
1599 for long long, r3r2r1r0 for doubles), except that that ABI
1600 currently doesn't work because it ends up using all available
1601 general registers and gcc often can't compile it. So, instead, we
1602 return anything bigger than 16 bits in "mem0" (effectively, a
1603 memory location). */
1604rtx
1605m32c_libcall_value (enum machine_mode mode)
1606{
1607 /* return reg or parallel */
1608#if 0
1609 /* FIXME: GCC has difficulty returning large values in registers,
1610 because that ties up most of the general registers and gives the
1611 register allocator little to work with. Until we can resolve
1612 this, large values are returned in memory. */
1613 if (mode == DFmode)
1614 {
1615 rtx rv;
1616
1617 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1618 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1619 gen_rtx_REG (HImode,
1620 R0_REGNO),
1621 GEN_INT (0));
1622 XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1623 gen_rtx_REG (HImode,
1624 R1_REGNO),
1625 GEN_INT (2));
1626 XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1627 gen_rtx_REG (HImode,
1628 R2_REGNO),
1629 GEN_INT (4));
1630 XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1631 gen_rtx_REG (HImode,
1632 R3_REGNO),
1633 GEN_INT (6));
1634 return rv;
1635 }
1636
1637 if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1638 {
1639 rtx rv;
1640
1641 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1642 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1643 gen_rtx_REG (mode,
1644 R0_REGNO),
1645 GEN_INT (0));
1646 return rv;
1647 }
1648#endif
1649
1650 if (GET_MODE_SIZE (mode) > 2)
1651 return gen_rtx_REG (mode, MEM0_REGNO);
1652 return gen_rtx_REG (mode, R0_REGNO);
1653}
1654
1655/* Implements FUNCTION_VALUE. Functions and libcalls have the same
1656 conventions. */
1657rtx
586de218 1658m32c_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
38b2d076
DD
1659{
1660 /* return reg or parallel */
586de218 1661 const enum machine_mode mode = TYPE_MODE (valtype);
38b2d076
DD
1662 return m32c_libcall_value (mode);
1663}
1664
1665/* How Large Values Are Returned */
1666
1667/* We return structures by pushing the address on the stack, even if
1668 we use registers for the first few "real" arguments. */
1669#undef TARGET_STRUCT_VALUE_RTX
1670#define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1671static rtx
1672m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1673 int incoming ATTRIBUTE_UNUSED)
1674{
1675 return 0;
1676}
1677
1678/* Function Entry and Exit */
1679
1680/* Implements EPILOGUE_USES. Interrupts restore all registers. */
1681int
1682m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1683{
1684 if (cfun->machine->is_interrupt)
1685 return 1;
1686 return 0;
1687}
1688
1689/* Implementing the Varargs Macros */
1690
1691#undef TARGET_STRICT_ARGUMENT_NAMING
1692#define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1693static bool
1694m32c_strict_argument_naming (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED)
1695{
1696 return 1;
1697}
1698
1699/* Trampolines for Nested Functions */
1700
1701/*
1702 m16c:
1703 1 0000 75C43412 mov.w #0x1234,a0
1704 2 0004 FC000000 jmp.a label
1705
1706 m32c:
1707 1 0000 BC563412 mov.l:s #0x123456,a0
1708 2 0004 CC000000 jmp.a label
1709*/
1710
1711/* Implements TRAMPOLINE_SIZE. */
1712int
1713m32c_trampoline_size (void)
1714{
1715 /* Allocate extra space so we can avoid the messy shifts when we
1716 initialize the trampoline; we just write past the end of the
1717 opcode. */
1718 return TARGET_A16 ? 8 : 10;
1719}
1720
1721/* Implements TRAMPOLINE_ALIGNMENT. */
1722int
1723m32c_trampoline_alignment (void)
1724{
1725 return 2;
1726}
1727
1728/* Implements INITIALIZE_TRAMPOLINE. */
1729void
1730m32c_initialize_trampoline (rtx tramp, rtx function, rtx chainval)
1731{
1732#define A0(m,i) gen_rtx_MEM (m, plus_constant (tramp, i))
1733 if (TARGET_A16)
1734 {
1735 /* Note: we subtract a "word" because the moves want signed
1736 constants, not unsigned constants. */
1737 emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1738 emit_move_insn (A0 (HImode, 2), chainval);
1739 emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
85f65093
KH
1740 /* We use 16-bit addresses here, but store the zero to turn it
1741 into a 24-bit offset. */
38b2d076
DD
1742 emit_move_insn (A0 (HImode, 5), function);
1743 emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1744 }
1745 else
1746 {
1747 /* Note that the PSI moves actually write 4 bytes. Make sure we
1748 write stuff out in the right order, and leave room for the
1749 extra byte at the end. */
1750 emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1751 emit_move_insn (A0 (PSImode, 1), chainval);
1752 emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1753 emit_move_insn (A0 (PSImode, 5), function);
1754 }
1755#undef A0
1756}
1757
07127a0a
DD
1758/* Implicit Calls to Library Routines */
1759
1760#undef TARGET_INIT_LIBFUNCS
1761#define TARGET_INIT_LIBFUNCS m32c_init_libfuncs
1762static void
1763m32c_init_libfuncs (void)
1764{
1765 if (TARGET_A24)
1766 {
1767 /* We do this because the M32C has an HImode operand, but the
85f65093 1768 M16C has an 8-bit operand. Since gcc looks at the match data
07127a0a
DD
1769 and not the expanded rtl, we have to reset the array so that
1770 the right modes are found. */
1771 setcc_gen_code[EQ] = CODE_FOR_seq_24;
1772 setcc_gen_code[NE] = CODE_FOR_sne_24;
1773 setcc_gen_code[GT] = CODE_FOR_sgt_24;
1774 setcc_gen_code[GE] = CODE_FOR_sge_24;
1775 setcc_gen_code[LT] = CODE_FOR_slt_24;
1776 setcc_gen_code[LE] = CODE_FOR_sle_24;
1777 setcc_gen_code[GTU] = CODE_FOR_sgtu_24;
1778 setcc_gen_code[GEU] = CODE_FOR_sgeu_24;
1779 setcc_gen_code[LTU] = CODE_FOR_sltu_24;
1780 setcc_gen_code[LEU] = CODE_FOR_sleu_24;
1781 }
1782}
1783
38b2d076
DD
1784/* Addressing Modes */
1785
1786/* Used by GO_IF_LEGITIMATE_ADDRESS. The r8c/m32c family supports a
1787 wide range of non-orthogonal addressing modes, including the
1788 ability to double-indirect on *some* of them. Not all insns
1789 support all modes, either, but we rely on predicates and
1790 constraints to deal with that. */
1791int
1792m32c_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
1793{
1794 int mode_adjust;
1795 if (CONSTANT_P (x))
1796 return 1;
1797
1798 /* Wide references to memory will be split after reload, so we must
1799 ensure that all parts of such splits remain legitimate
1800 addresses. */
1801 mode_adjust = GET_MODE_SIZE (mode) - 1;
1802
1803 /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1804 if (GET_CODE (x) == PRE_DEC
1805 || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1806 {
1807 return (GET_CODE (XEXP (x, 0)) == REG
1808 && REGNO (XEXP (x, 0)) == SP_REGNO);
1809 }
1810
1811#if 0
1812 /* This is the double indirection detection, but it currently
1813 doesn't work as cleanly as this code implies, so until we've had
1814 a chance to debug it, leave it disabled. */
1815 if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1816 {
1817#if DEBUG_DOUBLE
1818 fprintf (stderr, "double indirect\n");
1819#endif
1820 x = XEXP (x, 0);
1821 }
1822#endif
1823
1824 encode_pattern (x);
1825 if (RTX_IS ("r"))
1826 {
1827 /* Most indexable registers can be used without displacements,
1828 although some of them will be emitted with an explicit zero
1829 to please the assembler. */
1830 switch (REGNO (patternr[0]))
1831 {
1832 case A0_REGNO:
1833 case A1_REGNO:
1834 case SB_REGNO:
1835 case FB_REGNO:
1836 case SP_REGNO:
1837 return 1;
1838
1839 default:
1840 if (IS_PSEUDO (patternr[0], strict))
1841 return 1;
1842 return 0;
1843 }
1844 }
1845 if (RTX_IS ("+ri"))
1846 {
1847 /* This is more interesting, because different base registers
1848 allow for different displacements - both range and signedness
1849 - and it differs from chip series to chip series too. */
1850 int rn = REGNO (patternr[1]);
1851 HOST_WIDE_INT offs = INTVAL (patternr[2]);
1852 switch (rn)
1853 {
1854 case A0_REGNO:
1855 case A1_REGNO:
1856 case SB_REGNO:
1857 /* The syntax only allows positive offsets, but when the
1858 offsets span the entire memory range, we can simulate
1859 negative offsets by wrapping. */
1860 if (TARGET_A16)
1861 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1862 if (rn == SB_REGNO)
1863 return (offs >= 0 && offs <= 65535 - mode_adjust);
1864 /* A0 or A1 */
1865 return (offs >= -16777216 && offs <= 16777215);
1866
1867 case FB_REGNO:
1868 if (TARGET_A16)
1869 return (offs >= -128 && offs <= 127 - mode_adjust);
1870 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1871
1872 case SP_REGNO:
1873 return (offs >= -128 && offs <= 127 - mode_adjust);
1874
1875 default:
1876 if (IS_PSEUDO (patternr[1], strict))
1877 return 1;
1878 return 0;
1879 }
1880 }
1881 if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1882 {
1883 rtx reg = patternr[1];
1884
1885 /* We don't know where the symbol is, so only allow base
1886 registers which support displacements spanning the whole
1887 address range. */
1888 switch (REGNO (reg))
1889 {
1890 case A0_REGNO:
1891 case A1_REGNO:
1892 /* $sb needs a secondary reload, but since it's involved in
1893 memory address reloads too, we don't deal with it very
1894 well. */
1895 /* case SB_REGNO: */
1896 return 1;
1897 default:
1898 if (IS_PSEUDO (reg, strict))
1899 return 1;
1900 return 0;
1901 }
1902 }
1903 return 0;
1904}
1905
1906/* Implements REG_OK_FOR_BASE_P. */
1907int
1908m32c_reg_ok_for_base_p (rtx x, int strict)
1909{
1910 if (GET_CODE (x) != REG)
1911 return 0;
1912 switch (REGNO (x))
1913 {
1914 case A0_REGNO:
1915 case A1_REGNO:
1916 case SB_REGNO:
1917 case FB_REGNO:
1918 case SP_REGNO:
1919 return 1;
1920 default:
1921 if (IS_PSEUDO (x, strict))
1922 return 1;
1923 return 0;
1924 }
1925}
1926
04aff2c0 1927/* We have three choices for choosing fb->aN offsets. If we choose -128,
85f65093 1928 we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
04aff2c0
DD
1929 like this:
1930 EB 4B FF mova -128[$fb],$a0
1931 D8 0C FF FF mov.w:Q #0,-1[$a0]
1932
85f65093 1933 Alternately, we subtract the frame size, and hopefully use 8-bit aN
04aff2c0
DD
1934 displacements:
1935 7B F4 stc $fb,$a0
1936 77 54 00 01 sub #256,$a0
1937 D8 08 01 mov.w:Q #0,1[$a0]
1938
1939 If we don't offset (i.e. offset by zero), we end up with:
1940 7B F4 stc $fb,$a0
1941 D8 0C 00 FF mov.w:Q #0,-256[$a0]
1942
1943 We have to subtract *something* so that we have a PLUS rtx to mark
1944 that we've done this reload. The -128 offset will never result in
85f65093 1945 an 8-bit aN offset, and the payoff for the second case is five
04aff2c0
DD
1946 loads *if* those loads are within 256 bytes of the other end of the
1947 frame, so the third case seems best. Note that we subtract the
1948 zero, but detect that in the addhi3 pattern. */
1949
ea471af0
JM
1950#define BIG_FB_ADJ 0
1951
38b2d076
DD
1952/* Implements LEGITIMIZE_ADDRESS. The only address we really have to
1953 worry about is frame base offsets, as $fb has a limited
1954 displacement range. We deal with this by attempting to reload $fb
1955 itself into an address register; that seems to result in the best
1956 code. */
1957int
1958m32c_legitimize_address (rtx * x ATTRIBUTE_UNUSED,
1959 rtx oldx ATTRIBUTE_UNUSED,
1960 enum machine_mode mode ATTRIBUTE_UNUSED)
1961{
1962#if DEBUG0
1963 fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
1964 debug_rtx (*x);
1965 fprintf (stderr, "\n");
1966#endif
1967
1968 if (GET_CODE (*x) == PLUS
1969 && GET_CODE (XEXP (*x, 0)) == REG
1970 && REGNO (XEXP (*x, 0)) == FB_REGNO
1971 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1972 && (INTVAL (XEXP (*x, 1)) < -128
1973 || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
1974 {
1975 /* reload FB to A_REGS */
38b2d076
DD
1976 rtx temp = gen_reg_rtx (Pmode);
1977 *x = copy_rtx (*x);
04aff2c0 1978 emit_insn (gen_rtx_SET (VOIDmode, temp, XEXP (*x, 0)));
38b2d076
DD
1979 XEXP (*x, 0) = temp;
1980 return 1;
1981 }
1982
1983 return 0;
1984}
1985
1986/* Implements LEGITIMIZE_RELOAD_ADDRESS. See comment above. */
1987int
1988m32c_legitimize_reload_address (rtx * x,
1989 enum machine_mode mode,
1990 int opnum,
1991 int type, int ind_levels ATTRIBUTE_UNUSED)
1992{
1993#if DEBUG0
1994 fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
1995 mode_name[mode]);
1996 debug_rtx (*x);
1997#endif
1998
1999 /* At one point, this function tried to get $fb copied to an address
2000 register, which in theory would maximize sharing, but gcc was
2001 *also* still trying to reload the whole address, and we'd run out
2002 of address registers. So we let gcc do the naive (but safe)
2003 reload instead, when the above function doesn't handle it for
04aff2c0
DD
2004 us.
2005
2006 The code below is a second attempt at the above. */
2007
2008 if (GET_CODE (*x) == PLUS
2009 && GET_CODE (XEXP (*x, 0)) == REG
2010 && REGNO (XEXP (*x, 0)) == FB_REGNO
2011 && GET_CODE (XEXP (*x, 1)) == CONST_INT
2012 && (INTVAL (XEXP (*x, 1)) < -128
2013 || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
2014 {
2015 rtx sum;
2016 int offset = INTVAL (XEXP (*x, 1));
2017 int adjustment = -BIG_FB_ADJ;
2018
2019 sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
2020 GEN_INT (adjustment));
2021 *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
2022 if (type == RELOAD_OTHER)
2023 type = RELOAD_FOR_OTHER_ADDRESS;
2024 push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
2025 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
2026 type);
2027 return 1;
2028 }
2029
2030 if (GET_CODE (*x) == PLUS
2031 && GET_CODE (XEXP (*x, 0)) == PLUS
2032 && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
2033 && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
2034 && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
2035 && GET_CODE (XEXP (*x, 1)) == CONST_INT
2036 )
2037 {
2038 if (type == RELOAD_OTHER)
2039 type = RELOAD_FOR_OTHER_ADDRESS;
2040 push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
2041 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
2042 type);
2043 return 1;
2044 }
38b2d076
DD
2045
2046 return 0;
2047}
2048
38b2d076
DD
2049/* Implements LEGITIMATE_CONSTANT_P. We split large constants anyway,
2050 so we can allow anything. */
2051int
2052m32c_legitimate_constant_p (rtx x ATTRIBUTE_UNUSED)
2053{
2054 return 1;
2055}
2056
2057
2058/* Condition Code Status */
2059
2060#undef TARGET_FIXED_CONDITION_CODE_REGS
2061#define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2062static bool
2063m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2064{
2065 *p1 = FLG_REGNO;
2066 *p2 = INVALID_REGNUM;
2067 return true;
2068}
2069
2070/* Describing Relative Costs of Operations */
2071
2072/* Implements REGISTER_MOVE_COST. We make impossible moves
2073 prohibitively expensive, like trying to put QIs in r2/r3 (there are
2074 no opcodes to do that). We also discourage use of mem* registers
2075 since they're really memory. */
2076int
2077m32c_register_move_cost (enum machine_mode mode, int from, int to)
2078{
2079 int cost = COSTS_N_INSNS (3);
2080 int cc = class_contents[from][0] | class_contents[to][0];
2081 /* FIXME: pick real values, but not 2 for now. */
2082 if (mode == QImode && (cc & class_contents[R23_REGS][0]))
2083 {
2084 if (!(cc & ~class_contents[R23_REGS][0]))
2085 cost = COSTS_N_INSNS (1000);
2086 else
2087 cost = COSTS_N_INSNS (80);
2088 }
2089
2090 if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2091 cost = COSTS_N_INSNS (1000);
2092
2093 if (classes_intersect (from, CR_REGS))
2094 cost += COSTS_N_INSNS (5);
2095
2096 if (classes_intersect (to, CR_REGS))
2097 cost += COSTS_N_INSNS (5);
2098
2099 if (from == MEM_REGS || to == MEM_REGS)
2100 cost += COSTS_N_INSNS (50);
2101 else if (classes_intersect (from, MEM_REGS)
2102 || classes_intersect (to, MEM_REGS))
2103 cost += COSTS_N_INSNS (10);
2104
2105#if DEBUG0
2106 fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
2107 mode_name[mode], class_names[from], class_names[to], cost);
2108#endif
2109 return cost;
2110}
2111
2112/* Implements MEMORY_MOVE_COST. */
2113int
2114m32c_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2115 int reg_class ATTRIBUTE_UNUSED,
2116 int in ATTRIBUTE_UNUSED)
2117{
2118 /* FIXME: pick real values. */
2119 return COSTS_N_INSNS (10);
2120}
2121
07127a0a
DD
2122/* Here we try to describe when we use multiple opcodes for one RTX so
2123 that gcc knows when to use them. */
2124#undef TARGET_RTX_COSTS
2125#define TARGET_RTX_COSTS m32c_rtx_costs
2126static bool
f40751dd
JH
2127m32c_rtx_costs (rtx x, int code, int outer_code, int *total,
2128 bool speed ATTRIBUTE_UNUSED)
07127a0a
DD
2129{
2130 switch (code)
2131 {
2132 case REG:
2133 if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2134 *total += COSTS_N_INSNS (500);
2135 else
2136 *total += COSTS_N_INSNS (1);
2137 return true;
2138
2139 case ASHIFT:
2140 case LSHIFTRT:
2141 case ASHIFTRT:
2142 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2143 {
2144 /* mov.b r1l, r1h */
2145 *total += COSTS_N_INSNS (1);
2146 return true;
2147 }
2148 if (INTVAL (XEXP (x, 1)) > 8
2149 || INTVAL (XEXP (x, 1)) < -8)
2150 {
2151 /* mov.b #N, r1l */
2152 /* mov.b r1l, r1h */
2153 *total += COSTS_N_INSNS (2);
2154 return true;
2155 }
2156 return true;
2157
2158 case LE:
2159 case LEU:
2160 case LT:
2161 case LTU:
2162 case GT:
2163 case GTU:
2164 case GE:
2165 case GEU:
2166 case NE:
2167 case EQ:
2168 if (outer_code == SET)
2169 {
2170 *total += COSTS_N_INSNS (2);
2171 return true;
2172 }
2173 break;
2174
2175 case ZERO_EXTRACT:
2176 {
2177 rtx dest = XEXP (x, 0);
2178 rtx addr = XEXP (dest, 0);
2179 switch (GET_CODE (addr))
2180 {
2181 case CONST_INT:
2182 *total += COSTS_N_INSNS (1);
2183 break;
2184 case SYMBOL_REF:
2185 *total += COSTS_N_INSNS (3);
2186 break;
2187 default:
2188 *total += COSTS_N_INSNS (2);
2189 break;
2190 }
2191 return true;
2192 }
2193 break;
2194
2195 default:
2196 /* Reasonable default. */
2197 if (TARGET_A16 && GET_MODE(x) == SImode)
2198 *total += COSTS_N_INSNS (2);
2199 break;
2200 }
2201 return false;
2202}
2203
2204#undef TARGET_ADDRESS_COST
2205#define TARGET_ADDRESS_COST m32c_address_cost
2206static int
f40751dd 2207m32c_address_cost (rtx addr, bool speed ATTRIBUTE_UNUSED)
07127a0a 2208{
80b093df 2209 int i;
07127a0a
DD
2210 /* fprintf(stderr, "\naddress_cost\n");
2211 debug_rtx(addr);*/
2212 switch (GET_CODE (addr))
2213 {
2214 case CONST_INT:
80b093df
DD
2215 i = INTVAL (addr);
2216 if (i == 0)
2217 return COSTS_N_INSNS(1);
2218 if (0 < i && i <= 255)
2219 return COSTS_N_INSNS(2);
2220 if (0 < i && i <= 65535)
2221 return COSTS_N_INSNS(3);
2222 return COSTS_N_INSNS(4);
07127a0a 2223 case SYMBOL_REF:
80b093df 2224 return COSTS_N_INSNS(4);
07127a0a 2225 case REG:
80b093df
DD
2226 return COSTS_N_INSNS(1);
2227 case PLUS:
2228 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
2229 {
2230 i = INTVAL (XEXP (addr, 1));
2231 if (i == 0)
2232 return COSTS_N_INSNS(1);
2233 if (0 < i && i <= 255)
2234 return COSTS_N_INSNS(2);
2235 if (0 < i && i <= 65535)
2236 return COSTS_N_INSNS(3);
2237 }
2238 return COSTS_N_INSNS(4);
07127a0a
DD
2239 default:
2240 return 0;
2241 }
2242}
2243
38b2d076
DD
2244/* Defining the Output Assembler Language */
2245
2246/* The Overall Framework of an Assembler File */
2247
2248#undef TARGET_HAVE_NAMED_SECTIONS
2249#define TARGET_HAVE_NAMED_SECTIONS true
2250
2251/* Output of Data */
2252
2253/* We may have 24 bit sizes, which is the native address size.
2254 Currently unused, but provided for completeness. */
2255#undef TARGET_ASM_INTEGER
2256#define TARGET_ASM_INTEGER m32c_asm_integer
2257static bool
2258m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2259{
2260 switch (size)
2261 {
2262 case 3:
2263 fprintf (asm_out_file, "\t.3byte\t");
2264 output_addr_const (asm_out_file, x);
2265 fputc ('\n', asm_out_file);
2266 return true;
e9555b13
DD
2267 case 4:
2268 if (GET_CODE (x) == SYMBOL_REF)
2269 {
2270 fprintf (asm_out_file, "\t.long\t");
2271 output_addr_const (asm_out_file, x);
2272 fputc ('\n', asm_out_file);
2273 return true;
2274 }
2275 break;
38b2d076
DD
2276 }
2277 return default_assemble_integer (x, size, aligned_p);
2278}
2279
2280/* Output of Assembler Instructions */
2281
a4174ebf 2282/* We use a lookup table because the addressing modes are non-orthogonal. */
38b2d076
DD
2283
2284static struct
2285{
2286 char code;
2287 char const *pattern;
2288 char const *format;
2289}
2290const conversions[] = {
2291 { 0, "r", "0" },
2292
2293 { 0, "mr", "z[1]" },
2294 { 0, "m+ri", "3[2]" },
2295 { 0, "m+rs", "3[2]" },
2296 { 0, "m+r+si", "4+5[2]" },
2297 { 0, "ms", "1" },
2298 { 0, "mi", "1" },
2299 { 0, "m+si", "2+3" },
2300
2301 { 0, "mmr", "[z[2]]" },
2302 { 0, "mm+ri", "[4[3]]" },
2303 { 0, "mm+rs", "[4[3]]" },
2304 { 0, "mm+r+si", "[5+6[3]]" },
2305 { 0, "mms", "[[2]]" },
2306 { 0, "mmi", "[[2]]" },
2307 { 0, "mm+si", "[4[3]]" },
2308
2309 { 0, "i", "#0" },
2310 { 0, "s", "#0" },
2311 { 0, "+si", "#1+2" },
2312 { 0, "l", "#0" },
2313
2314 { 'l', "l", "0" },
2315 { 'd', "i", "0" },
2316 { 'd', "s", "0" },
2317 { 'd', "+si", "1+2" },
2318 { 'D', "i", "0" },
2319 { 'D', "s", "0" },
2320 { 'D', "+si", "1+2" },
2321 { 'x', "i", "#0" },
2322 { 'X', "i", "#0" },
2323 { 'm', "i", "#0" },
2324 { 'b', "i", "#0" },
07127a0a 2325 { 'B', "i", "0" },
38b2d076
DD
2326 { 'p', "i", "0" },
2327
2328 { 0, 0, 0 }
2329};
2330
2331/* This is in order according to the bitfield that pushm/popm use. */
2332static char const *pushm_regs[] = {
2333 "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2334};
2335
2336/* Implements PRINT_OPERAND. */
2337void
2338m32c_print_operand (FILE * file, rtx x, int code)
2339{
2340 int i, j, b;
2341 const char *comma;
2342 HOST_WIDE_INT ival;
2343 int unsigned_const = 0;
ff485e71 2344 int force_sign;
38b2d076
DD
2345
2346 /* Multiplies; constants are converted to sign-extended format but
2347 we need unsigned, so 'u' and 'U' tell us what size unsigned we
2348 need. */
2349 if (code == 'u')
2350 {
2351 unsigned_const = 2;
2352 code = 0;
2353 }
2354 if (code == 'U')
2355 {
2356 unsigned_const = 1;
2357 code = 0;
2358 }
2359 /* This one is only for debugging; you can put it in a pattern to
2360 force this error. */
2361 if (code == '!')
2362 {
2363 fprintf (stderr, "dj: unreviewed pattern:");
2364 if (current_output_insn)
2365 debug_rtx (current_output_insn);
2366 gcc_unreachable ();
2367 }
2368 /* PSImode operations are either .w or .l depending on the target. */
2369 if (code == '&')
2370 {
2371 if (TARGET_A16)
2372 fprintf (file, "w");
2373 else
2374 fprintf (file, "l");
2375 return;
2376 }
2377 /* Inverted conditionals. */
2378 if (code == 'C')
2379 {
2380 switch (GET_CODE (x))
2381 {
2382 case LE:
2383 fputs ("gt", file);
2384 break;
2385 case LEU:
2386 fputs ("gtu", file);
2387 break;
2388 case LT:
2389 fputs ("ge", file);
2390 break;
2391 case LTU:
2392 fputs ("geu", file);
2393 break;
2394 case GT:
2395 fputs ("le", file);
2396 break;
2397 case GTU:
2398 fputs ("leu", file);
2399 break;
2400 case GE:
2401 fputs ("lt", file);
2402 break;
2403 case GEU:
2404 fputs ("ltu", file);
2405 break;
2406 case NE:
2407 fputs ("eq", file);
2408 break;
2409 case EQ:
2410 fputs ("ne", file);
2411 break;
2412 default:
2413 gcc_unreachable ();
2414 }
2415 return;
2416 }
2417 /* Regular conditionals. */
2418 if (code == 'c')
2419 {
2420 switch (GET_CODE (x))
2421 {
2422 case LE:
2423 fputs ("le", file);
2424 break;
2425 case LEU:
2426 fputs ("leu", file);
2427 break;
2428 case LT:
2429 fputs ("lt", file);
2430 break;
2431 case LTU:
2432 fputs ("ltu", file);
2433 break;
2434 case GT:
2435 fputs ("gt", file);
2436 break;
2437 case GTU:
2438 fputs ("gtu", file);
2439 break;
2440 case GE:
2441 fputs ("ge", file);
2442 break;
2443 case GEU:
2444 fputs ("geu", file);
2445 break;
2446 case NE:
2447 fputs ("ne", file);
2448 break;
2449 case EQ:
2450 fputs ("eq", file);
2451 break;
2452 default:
2453 gcc_unreachable ();
2454 }
2455 return;
2456 }
2457 /* Used in negsi2 to do HImode ops on the two parts of an SImode
2458 operand. */
2459 if (code == 'h' && GET_MODE (x) == SImode)
2460 {
2461 x = m32c_subreg (HImode, x, SImode, 0);
2462 code = 0;
2463 }
2464 if (code == 'H' && GET_MODE (x) == SImode)
2465 {
2466 x = m32c_subreg (HImode, x, SImode, 2);
2467 code = 0;
2468 }
07127a0a
DD
2469 if (code == 'h' && GET_MODE (x) == HImode)
2470 {
2471 x = m32c_subreg (QImode, x, HImode, 0);
2472 code = 0;
2473 }
2474 if (code == 'H' && GET_MODE (x) == HImode)
2475 {
2476 /* We can't actually represent this as an rtx. Do it here. */
2477 if (GET_CODE (x) == REG)
2478 {
2479 switch (REGNO (x))
2480 {
2481 case R0_REGNO:
2482 fputs ("r0h", file);
2483 return;
2484 case R1_REGNO:
2485 fputs ("r1h", file);
2486 return;
2487 default:
2488 gcc_unreachable();
2489 }
2490 }
2491 /* This should be a MEM. */
2492 x = m32c_subreg (QImode, x, HImode, 1);
2493 code = 0;
2494 }
2495 /* This is for BMcond, which always wants word register names. */
2496 if (code == 'h' && GET_MODE (x) == QImode)
2497 {
2498 if (GET_CODE (x) == REG)
2499 x = gen_rtx_REG (HImode, REGNO (x));
2500 code = 0;
2501 }
38b2d076
DD
2502 /* 'x' and 'X' need to be ignored for non-immediates. */
2503 if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2504 code = 0;
2505
2506 encode_pattern (x);
ff485e71 2507 force_sign = 0;
38b2d076
DD
2508 for (i = 0; conversions[i].pattern; i++)
2509 if (conversions[i].code == code
2510 && streq (conversions[i].pattern, pattern))
2511 {
2512 for (j = 0; conversions[i].format[j]; j++)
2513 /* backslash quotes the next character in the output pattern. */
2514 if (conversions[i].format[j] == '\\')
2515 {
2516 fputc (conversions[i].format[j + 1], file);
2517 j++;
2518 }
2519 /* Digits in the output pattern indicate that the
2520 corresponding RTX is to be output at that point. */
2521 else if (ISDIGIT (conversions[i].format[j]))
2522 {
2523 rtx r = patternr[conversions[i].format[j] - '0'];
2524 switch (GET_CODE (r))
2525 {
2526 case REG:
2527 fprintf (file, "%s",
2528 reg_name_with_mode (REGNO (r), GET_MODE (r)));
2529 break;
2530 case CONST_INT:
2531 switch (code)
2532 {
2533 case 'b':
07127a0a
DD
2534 case 'B':
2535 {
2536 int v = INTVAL (r);
2537 int i = (int) exact_log2 (v);
2538 if (i == -1)
2539 i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2540 if (i == -1)
2541 i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2542 /* Bit position. */
2543 fprintf (file, "%d", i);
2544 }
38b2d076
DD
2545 break;
2546 case 'x':
2547 /* Unsigned byte. */
2548 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2549 INTVAL (r) & 0xff);
2550 break;
2551 case 'X':
2552 /* Unsigned word. */
2553 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2554 INTVAL (r) & 0xffff);
2555 break;
2556 case 'p':
2557 /* pushm and popm encode a register set into a single byte. */
2558 comma = "";
2559 for (b = 7; b >= 0; b--)
2560 if (INTVAL (r) & (1 << b))
2561 {
2562 fprintf (file, "%s%s", comma, pushm_regs[b]);
2563 comma = ",";
2564 }
2565 break;
2566 case 'm':
2567 /* "Minus". Output -X */
2568 ival = (-INTVAL (r) & 0xffff);
2569 if (ival & 0x8000)
2570 ival = ival - 0x10000;
2571 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2572 break;
2573 default:
2574 ival = INTVAL (r);
2575 if (conversions[i].format[j + 1] == '[' && ival < 0)
2576 {
2577 /* We can simulate negative displacements by
2578 taking advantage of address space
2579 wrapping when the offset can span the
2580 entire address range. */
2581 rtx base =
2582 patternr[conversions[i].format[j + 2] - '0'];
2583 if (GET_CODE (base) == REG)
2584 switch (REGNO (base))
2585 {
2586 case A0_REGNO:
2587 case A1_REGNO:
2588 if (TARGET_A24)
2589 ival = 0x1000000 + ival;
2590 else
2591 ival = 0x10000 + ival;
2592 break;
2593 case SB_REGNO:
2594 if (TARGET_A16)
2595 ival = 0x10000 + ival;
2596 break;
2597 }
2598 }
2599 else if (code == 'd' && ival < 0 && j == 0)
2600 /* The "mova" opcode is used to do addition by
2601 computing displacements, but again, we need
2602 displacements to be unsigned *if* they're
2603 the only component of the displacement
2604 (i.e. no "symbol-4" type displacement). */
2605 ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2606
2607 if (conversions[i].format[j] == '0')
2608 {
2609 /* More conversions to unsigned. */
2610 if (unsigned_const == 2)
2611 ival &= 0xffff;
2612 if (unsigned_const == 1)
2613 ival &= 0xff;
2614 }
2615 if (streq (conversions[i].pattern, "mi")
2616 || streq (conversions[i].pattern, "mmi"))
2617 {
2618 /* Integers used as addresses are unsigned. */
2619 ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2620 }
ff485e71
DD
2621 if (force_sign && ival >= 0)
2622 fputc ('+', file);
38b2d076
DD
2623 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2624 break;
2625 }
2626 break;
2627 case CONST_DOUBLE:
2628 /* We don't have const_double constants. If it
2629 happens, make it obvious. */
2630 fprintf (file, "[const_double 0x%lx]",
2631 (unsigned long) CONST_DOUBLE_HIGH (r));
2632 break;
2633 case SYMBOL_REF:
2634 assemble_name (file, XSTR (r, 0));
2635 break;
2636 case LABEL_REF:
2637 output_asm_label (r);
2638 break;
2639 default:
2640 fprintf (stderr, "don't know how to print this operand:");
2641 debug_rtx (r);
2642 gcc_unreachable ();
2643 }
2644 }
2645 else
2646 {
2647 if (conversions[i].format[j] == 'z')
2648 {
2649 /* Some addressing modes *must* have a displacement,
2650 so insert a zero here if needed. */
2651 int k;
2652 for (k = j + 1; conversions[i].format[k]; k++)
2653 if (ISDIGIT (conversions[i].format[k]))
2654 {
2655 rtx reg = patternr[conversions[i].format[k] - '0'];
2656 if (GET_CODE (reg) == REG
2657 && (REGNO (reg) == SB_REGNO
2658 || REGNO (reg) == FB_REGNO
2659 || REGNO (reg) == SP_REGNO))
2660 fputc ('0', file);
2661 }
2662 continue;
2663 }
2664 /* Signed displacements off symbols need to have signs
2665 blended cleanly. */
2666 if (conversions[i].format[j] == '+'
ff485e71 2667 && (!code || code == 'D' || code == 'd')
38b2d076 2668 && ISDIGIT (conversions[i].format[j + 1])
ff485e71
DD
2669 && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2670 == CONST_INT))
2671 {
2672 force_sign = 1;
2673 continue;
2674 }
38b2d076
DD
2675 fputc (conversions[i].format[j], file);
2676 }
2677 break;
2678 }
2679 if (!conversions[i].pattern)
2680 {
2681 fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
2682 pattern);
2683 debug_rtx (x);
2684 fprintf (file, "[%c.%s]", code ? code : '-', pattern);
2685 }
2686
2687 return;
2688}
2689
2690/* Implements PRINT_OPERAND_PUNCT_VALID_P. See m32c_print_operand
2691 above for descriptions of what these do. */
2692int
2693m32c_print_operand_punct_valid_p (int c)
2694{
2695 if (c == '&' || c == '!')
2696 return 1;
2697 return 0;
2698}
2699
2700/* Implements PRINT_OPERAND_ADDRESS. Nothing unusual here. */
2701void
2702m32c_print_operand_address (FILE * stream, rtx address)
2703{
2704 gcc_assert (GET_CODE (address) == MEM);
2705 m32c_print_operand (stream, XEXP (address, 0), 0);
2706}
2707
2708/* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
2709 differently than general registers. */
2710void
2711m32c_output_reg_push (FILE * s, int regno)
2712{
2713 if (regno == FLG_REGNO)
2714 fprintf (s, "\tpushc\tflg\n");
2715 else
04aff2c0 2716 fprintf (s, "\tpush.%c\t%s\n",
38b2d076
DD
2717 " bwll"[reg_push_size (regno)], reg_names[regno]);
2718}
2719
2720/* Likewise for ASM_OUTPUT_REG_POP. */
2721void
2722m32c_output_reg_pop (FILE * s, int regno)
2723{
2724 if (regno == FLG_REGNO)
2725 fprintf (s, "\tpopc\tflg\n");
2726 else
04aff2c0 2727 fprintf (s, "\tpop.%c\t%s\n",
38b2d076
DD
2728 " bwll"[reg_push_size (regno)], reg_names[regno]);
2729}
2730
2731/* Defining target-specific uses of `__attribute__' */
2732
2733/* Used to simplify the logic below. Find the attributes wherever
2734 they may be. */
2735#define M32C_ATTRIBUTES(decl) \
2736 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2737 : DECL_ATTRIBUTES (decl) \
2738 ? (DECL_ATTRIBUTES (decl)) \
2739 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2740
2741/* Returns TRUE if the given tree has the "interrupt" attribute. */
2742static int
2743interrupt_p (tree node ATTRIBUTE_UNUSED)
2744{
2745 tree list = M32C_ATTRIBUTES (node);
2746 while (list)
2747 {
2748 if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
2749 return 1;
2750 list = TREE_CHAIN (list);
2751 }
2752 return 0;
2753}
2754
2755static tree
2756interrupt_handler (tree * node ATTRIBUTE_UNUSED,
2757 tree name ATTRIBUTE_UNUSED,
2758 tree args ATTRIBUTE_UNUSED,
2759 int flags ATTRIBUTE_UNUSED,
2760 bool * no_add_attrs ATTRIBUTE_UNUSED)
2761{
2762 return NULL_TREE;
2763}
2764
5abd2125
JS
2765/* Returns TRUE if given tree has the "function_vector" attribute. */
2766int
2767m32c_special_page_vector_p (tree func)
2768{
2769 if (TREE_CODE (func) != FUNCTION_DECL)
2770 return 0;
2771
2772 tree list = M32C_ATTRIBUTES (func);
2773 while (list)
2774 {
2775 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2776 return 1;
2777 list = TREE_CHAIN (list);
2778 }
2779 return 0;
2780}
2781
2782static tree
2783function_vector_handler (tree * node ATTRIBUTE_UNUSED,
2784 tree name ATTRIBUTE_UNUSED,
2785 tree args ATTRIBUTE_UNUSED,
2786 int flags ATTRIBUTE_UNUSED,
2787 bool * no_add_attrs ATTRIBUTE_UNUSED)
2788{
2789 if (TARGET_R8C)
2790 {
2791 /* The attribute is not supported for R8C target. */
2792 warning (OPT_Wattributes,
2793 "`%s' attribute is not supported for R8C target",
2794 IDENTIFIER_POINTER (name));
2795 *no_add_attrs = true;
2796 }
2797 else if (TREE_CODE (*node) != FUNCTION_DECL)
2798 {
2799 /* The attribute must be applied to functions only. */
2800 warning (OPT_Wattributes,
2801 "`%s' attribute applies only to functions",
2802 IDENTIFIER_POINTER (name));
2803 *no_add_attrs = true;
2804 }
2805 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
2806 {
2807 /* The argument must be a constant integer. */
2808 warning (OPT_Wattributes,
2809 "`%s' attribute argument not an integer constant",
2810 IDENTIFIER_POINTER (name));
2811 *no_add_attrs = true;
2812 }
2813 else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
2814 || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
2815 {
2816 /* The argument value must be between 18 to 255. */
2817 warning (OPT_Wattributes,
2818 "`%s' attribute argument should be between 18 to 255",
2819 IDENTIFIER_POINTER (name));
2820 *no_add_attrs = true;
2821 }
2822 return NULL_TREE;
2823}
2824
2825/* If the function is assigned the attribute 'function_vector', it
2826 returns the function vector number, otherwise returns zero. */
2827int
2828current_function_special_page_vector (rtx x)
2829{
2830 int num;
2831
2832 if ((GET_CODE(x) == SYMBOL_REF)
2833 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
2834 {
2835 tree t = SYMBOL_REF_DECL (x);
2836
2837 if (TREE_CODE (t) != FUNCTION_DECL)
2838 return 0;
2839
2840 tree list = M32C_ATTRIBUTES (t);
2841 while (list)
2842 {
2843 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2844 {
2845 num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
2846 return num;
2847 }
2848
2849 list = TREE_CHAIN (list);
2850 }
2851
2852 return 0;
2853 }
2854 else
2855 return 0;
2856}
2857
38b2d076
DD
2858#undef TARGET_ATTRIBUTE_TABLE
2859#define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
2860static const struct attribute_spec m32c_attribute_table[] = {
2861 {"interrupt", 0, 0, false, false, false, interrupt_handler},
5abd2125 2862 {"function_vector", 1, 1, true, false, false, function_vector_handler},
38b2d076
DD
2863 {0, 0, 0, 0, 0, 0, 0}
2864};
2865
2866#undef TARGET_COMP_TYPE_ATTRIBUTES
2867#define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
2868static int
3101faab
KG
2869m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED,
2870 const_tree type2 ATTRIBUTE_UNUSED)
38b2d076
DD
2871{
2872 /* 0=incompatible 1=compatible 2=warning */
2873 return 1;
2874}
2875
2876#undef TARGET_INSERT_ATTRIBUTES
2877#define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
2878static void
2879m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
2880 tree * attr_ptr ATTRIBUTE_UNUSED)
2881{
2882 /* Nothing to do here. */
2883}
2884
2885/* Predicates */
2886
f9b89438 2887/* This is a list of legal subregs of hard regs. */
67fc44cb
DD
2888static const struct {
2889 unsigned char outer_mode_size;
2890 unsigned char inner_mode_size;
2891 unsigned char byte_mask;
2892 unsigned char legal_when;
f9b89438 2893 unsigned int regno;
f9b89438 2894} legal_subregs[] = {
67fc44cb
DD
2895 {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
2896 {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
2897 {1, 2, 0x01, 1, A0_REGNO},
2898 {1, 2, 0x01, 1, A1_REGNO},
f9b89438 2899
67fc44cb
DD
2900 {1, 4, 0x01, 1, A0_REGNO},
2901 {1, 4, 0x01, 1, A1_REGNO},
f9b89438 2902
67fc44cb
DD
2903 {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
2904 {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
2905 {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
2906 {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
2907 {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
f9b89438 2908
67fc44cb 2909 {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
f9b89438
DD
2910};
2911
2912/* Returns TRUE if OP is a subreg of a hard reg which we don't
2913 support. */
2914bool
2915m32c_illegal_subreg_p (rtx op)
2916{
f9b89438
DD
2917 int offset;
2918 unsigned int i;
2919 int src_mode, dest_mode;
2920
2921 if (GET_CODE (op) != SUBREG)
2922 return false;
2923
2924 dest_mode = GET_MODE (op);
2925 offset = SUBREG_BYTE (op);
2926 op = SUBREG_REG (op);
2927 src_mode = GET_MODE (op);
2928
2929 if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
2930 return false;
2931 if (GET_CODE (op) != REG)
2932 return false;
2933 if (REGNO (op) >= MEM0_REGNO)
2934 return false;
2935
2936 offset = (1 << offset);
2937
67fc44cb 2938 for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
f9b89438
DD
2939 if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
2940 && legal_subregs[i].regno == REGNO (op)
2941 && legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
2942 && legal_subregs[i].byte_mask & offset)
2943 {
2944 switch (legal_subregs[i].legal_when)
2945 {
2946 case 1:
2947 return false;
2948 case 16:
2949 if (TARGET_A16)
2950 return false;
2951 break;
2952 case 24:
2953 if (TARGET_A24)
2954 return false;
2955 break;
2956 }
2957 }
2958 return true;
2959}
2960
38b2d076
DD
2961/* Returns TRUE if we support a move between the first two operands.
2962 At the moment, we just want to discourage mem to mem moves until
2963 after reload, because reload has a hard time with our limited
2964 number of address registers, and we can get into a situation where
2965 we need three of them when we only have two. */
2966bool
2967m32c_mov_ok (rtx * operands, enum machine_mode mode ATTRIBUTE_UNUSED)
2968{
2969 rtx op0 = operands[0];
2970 rtx op1 = operands[1];
2971
2972 if (TARGET_A24)
2973 return true;
2974
2975#define DEBUG_MOV_OK 0
2976#if DEBUG_MOV_OK
2977 fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
2978 debug_rtx (op0);
2979 debug_rtx (op1);
2980#endif
2981
2982 if (GET_CODE (op0) == SUBREG)
2983 op0 = XEXP (op0, 0);
2984 if (GET_CODE (op1) == SUBREG)
2985 op1 = XEXP (op1, 0);
2986
2987 if (GET_CODE (op0) == MEM
2988 && GET_CODE (op1) == MEM
2989 && ! reload_completed)
2990 {
2991#if DEBUG_MOV_OK
2992 fprintf (stderr, " - no, mem to mem\n");
2993#endif
2994 return false;
2995 }
2996
2997#if DEBUG_MOV_OK
2998 fprintf (stderr, " - ok\n");
2999#endif
3000 return true;
3001}
3002
ff485e71
DD
3003/* Returns TRUE if two consecutive HImode mov instructions, generated
3004 for moving an immediate double data to a double data type variable
3005 location, can be combined into single SImode mov instruction. */
3006bool
3007m32c_immd_dbl_mov (rtx * operands,
3008 enum machine_mode mode ATTRIBUTE_UNUSED)
3009{
3010 int flag = 0, okflag = 0, offset1 = 0, offset2 = 0, offsetsign = 0;
3011 const char *str1;
3012 const char *str2;
3013
3014 if (GET_CODE (XEXP (operands[0], 0)) == SYMBOL_REF
3015 && MEM_SCALAR_P (operands[0])
3016 && !MEM_IN_STRUCT_P (operands[0])
3017 && GET_CODE (XEXP (operands[2], 0)) == CONST
3018 && GET_CODE (XEXP (XEXP (operands[2], 0), 0)) == PLUS
3019 && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 0)) == SYMBOL_REF
3020 && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 1)) == CONST_INT
3021 && MEM_SCALAR_P (operands[2])
3022 && !MEM_IN_STRUCT_P (operands[2]))
3023 flag = 1;
3024
3025 else if (GET_CODE (XEXP (operands[0], 0)) == CONST
3026 && GET_CODE (XEXP (XEXP (operands[0], 0), 0)) == PLUS
3027 && GET_CODE (XEXP (XEXP (XEXP (operands[0], 0), 0), 0)) == SYMBOL_REF
3028 && MEM_SCALAR_P (operands[0])
3029 && !MEM_IN_STRUCT_P (operands[0])
f9f3567e 3030 && !(INTVAL (XEXP (XEXP (XEXP (operands[0], 0), 0), 1)) %4)
ff485e71
DD
3031 && GET_CODE (XEXP (operands[2], 0)) == CONST
3032 && GET_CODE (XEXP (XEXP (operands[2], 0), 0)) == PLUS
3033 && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 0)) == SYMBOL_REF
3034 && MEM_SCALAR_P (operands[2])
3035 && !MEM_IN_STRUCT_P (operands[2]))
3036 flag = 2;
3037
3038 else if (GET_CODE (XEXP (operands[0], 0)) == PLUS
3039 && GET_CODE (XEXP (XEXP (operands[0], 0), 0)) == REG
3040 && REGNO (XEXP (XEXP (operands[0], 0), 0)) == FB_REGNO
3041 && GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == CONST_INT
3042 && MEM_SCALAR_P (operands[0])
3043 && !MEM_IN_STRUCT_P (operands[0])
f9f3567e 3044 && !(INTVAL (XEXP (XEXP (operands[0], 0), 1)) %4)
ff485e71
DD
3045 && REGNO (XEXP (XEXP (operands[2], 0), 0)) == FB_REGNO
3046 && GET_CODE (XEXP (XEXP (operands[2], 0), 1)) == CONST_INT
3047 && MEM_SCALAR_P (operands[2])
3048 && !MEM_IN_STRUCT_P (operands[2]))
3049 flag = 3;
3050
3051 else
3052 return false;
3053
3054 switch (flag)
3055 {
3056 case 1:
3057 str1 = XSTR (XEXP (operands[0], 0), 0);
3058 str2 = XSTR (XEXP (XEXP (XEXP (operands[2], 0), 0), 0), 0);
3059 if (strcmp (str1, str2) == 0)
3060 okflag = 1;
3061 else
3062 okflag = 0;
3063 break;
3064 case 2:
3065 str1 = XSTR (XEXP (XEXP (XEXP (operands[0], 0), 0), 0), 0);
3066 str2 = XSTR (XEXP (XEXP (XEXP (operands[2], 0), 0), 0), 0);
3067 if (strcmp(str1,str2) == 0)
3068 okflag = 1;
3069 else
3070 okflag = 0;
3071 break;
3072 case 3:
f9f3567e
DD
3073 offset1 = INTVAL (XEXP (XEXP (operands[0], 0), 1));
3074 offset2 = INTVAL (XEXP (XEXP (operands[2], 0), 1));
ff485e71
DD
3075 offsetsign = offset1 >> ((sizeof (offset1) * 8) -1);
3076 if (((offset2-offset1) == 2) && offsetsign != 0)
3077 okflag = 1;
3078 else
3079 okflag = 0;
3080 break;
3081 default:
3082 okflag = 0;
3083 }
3084
3085 if (okflag == 1)
3086 {
3087 HOST_WIDE_INT val;
3088 operands[4] = gen_rtx_MEM (SImode, XEXP (operands[0], 0));
3089
f9f3567e 3090 val = (INTVAL (operands[3]) << 16) + (INTVAL (operands[1]) & 0xFFFF);
ff485e71
DD
3091 operands[5] = gen_rtx_CONST_INT (VOIDmode, val);
3092
3093 return true;
3094 }
3095
3096 return false;
3097}
3098
38b2d076
DD
3099/* Expanders */
3100
3101/* Subregs are non-orthogonal for us, because our registers are all
3102 different sizes. */
3103static rtx
3104m32c_subreg (enum machine_mode outer,
3105 rtx x, enum machine_mode inner, int byte)
3106{
3107 int r, nr = -1;
3108
3109 /* Converting MEMs to different types that are the same size, we
3110 just rewrite them. */
3111 if (GET_CODE (x) == SUBREG
3112 && SUBREG_BYTE (x) == 0
3113 && GET_CODE (SUBREG_REG (x)) == MEM
3114 && (GET_MODE_SIZE (GET_MODE (x))
3115 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
3116 {
3117 rtx oldx = x;
3118 x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
3119 MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
3120 }
3121
3122 /* Push/pop get done as smaller push/pops. */
3123 if (GET_CODE (x) == MEM
3124 && (GET_CODE (XEXP (x, 0)) == PRE_DEC
3125 || GET_CODE (XEXP (x, 0)) == POST_INC))
3126 return gen_rtx_MEM (outer, XEXP (x, 0));
3127 if (GET_CODE (x) == SUBREG
3128 && GET_CODE (XEXP (x, 0)) == MEM
3129 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
3130 || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
3131 return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
3132
3133 if (GET_CODE (x) != REG)
3134 return simplify_gen_subreg (outer, x, inner, byte);
3135
3136 r = REGNO (x);
3137 if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
3138 return simplify_gen_subreg (outer, x, inner, byte);
3139
3140 if (IS_MEM_REGNO (r))
3141 return simplify_gen_subreg (outer, x, inner, byte);
3142
3143 /* This is where the complexities of our register layout are
3144 described. */
3145 if (byte == 0)
3146 nr = r;
3147 else if (outer == HImode)
3148 {
3149 if (r == R0_REGNO && byte == 2)
3150 nr = R2_REGNO;
3151 else if (r == R0_REGNO && byte == 4)
3152 nr = R1_REGNO;
3153 else if (r == R0_REGNO && byte == 6)
3154 nr = R3_REGNO;
3155 else if (r == R1_REGNO && byte == 2)
3156 nr = R3_REGNO;
3157 else if (r == A0_REGNO && byte == 2)
3158 nr = A1_REGNO;
3159 }
3160 else if (outer == SImode)
3161 {
3162 if (r == R0_REGNO && byte == 0)
3163 nr = R0_REGNO;
3164 else if (r == R0_REGNO && byte == 4)
3165 nr = R1_REGNO;
3166 }
3167 if (nr == -1)
3168 {
3169 fprintf (stderr, "m32c_subreg %s %s %d\n",
3170 mode_name[outer], mode_name[inner], byte);
3171 debug_rtx (x);
3172 gcc_unreachable ();
3173 }
3174 return gen_rtx_REG (outer, nr);
3175}
3176
3177/* Used to emit move instructions. We split some moves,
3178 and avoid mem-mem moves. */
3179int
3180m32c_prepare_move (rtx * operands, enum machine_mode mode)
3181{
3182 if (TARGET_A16 && mode == PSImode)
3183 return m32c_split_move (operands, mode, 1);
3184 if ((GET_CODE (operands[0]) == MEM)
3185 && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
3186 {
3187 rtx pmv = XEXP (operands[0], 0);
3188 rtx dest_reg = XEXP (pmv, 0);
3189 rtx dest_mod = XEXP (pmv, 1);
3190
3191 emit_insn (gen_rtx_SET (Pmode, dest_reg, dest_mod));
3192 operands[0] = gen_rtx_MEM (mode, dest_reg);
3193 }
b3a13419 3194 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
38b2d076
DD
3195 operands[1] = copy_to_mode_reg (mode, operands[1]);
3196 return 0;
3197}
3198
3199#define DEBUG_SPLIT 0
3200
3201/* Returns TRUE if the given PSImode move should be split. We split
3202 for all r8c/m16c moves, since it doesn't support them, and for
3203 POP.L as we can only *push* SImode. */
3204int
3205m32c_split_psi_p (rtx * operands)
3206{
3207#if DEBUG_SPLIT
3208 fprintf (stderr, "\nm32c_split_psi_p\n");
3209 debug_rtx (operands[0]);
3210 debug_rtx (operands[1]);
3211#endif
3212 if (TARGET_A16)
3213 {
3214#if DEBUG_SPLIT
3215 fprintf (stderr, "yes, A16\n");
3216#endif
3217 return 1;
3218 }
3219 if (GET_CODE (operands[1]) == MEM
3220 && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3221 {
3222#if DEBUG_SPLIT
3223 fprintf (stderr, "yes, pop.l\n");
3224#endif
3225 return 1;
3226 }
3227#if DEBUG_SPLIT
3228 fprintf (stderr, "no, default\n");
3229#endif
3230 return 0;
3231}
3232
3233/* Split the given move. SPLIT_ALL is 0 if splitting is optional
3234 (define_expand), 1 if it is not optional (define_insn_and_split),
3235 and 3 for define_split (alternate api). */
3236int
3237m32c_split_move (rtx * operands, enum machine_mode mode, int split_all)
3238{
3239 rtx s[4], d[4];
3240 int parts, si, di, rev = 0;
3241 int rv = 0, opi = 2;
3242 enum machine_mode submode = HImode;
3243 rtx *ops, local_ops[10];
3244
3245 /* define_split modifies the existing operands, but the other two
3246 emit new insns. OPS is where we store the operand pairs, which
3247 we emit later. */
3248 if (split_all == 3)
3249 ops = operands;
3250 else
3251 ops = local_ops;
3252
3253 /* Else HImode. */
3254 if (mode == DImode)
3255 submode = SImode;
3256
3257 /* Before splitting mem-mem moves, force one operand into a
3258 register. */
b3a13419 3259 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
38b2d076
DD
3260 {
3261#if DEBUG0
3262 fprintf (stderr, "force_reg...\n");
3263 debug_rtx (operands[1]);
3264#endif
3265 operands[1] = force_reg (mode, operands[1]);
3266#if DEBUG0
3267 debug_rtx (operands[1]);
3268#endif
3269 }
3270
3271 parts = 2;
3272
3273#if DEBUG_SPLIT
b3a13419
ILT
3274 fprintf (stderr, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3275 split_all);
38b2d076
DD
3276 debug_rtx (operands[0]);
3277 debug_rtx (operands[1]);
3278#endif
3279
eb5f0c07
DD
3280 /* Note that split_all is not used to select the api after this
3281 point, so it's safe to set it to 3 even with define_insn. */
3282 /* None of the chips can move SI operands to sp-relative addresses,
3283 so we always split those. */
3284 if (m32c_extra_constraint_p (operands[0], 'S', "Ss"))
3285 split_all = 3;
3286
38b2d076
DD
3287 /* We don't need to split these. */
3288 if (TARGET_A24
3289 && split_all != 3
3290 && (mode == SImode || mode == PSImode)
3291 && !(GET_CODE (operands[1]) == MEM
3292 && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3293 return 0;
3294
3295 /* First, enumerate the subregs we'll be dealing with. */
3296 for (si = 0; si < parts; si++)
3297 {
3298 d[si] =
3299 m32c_subreg (submode, operands[0], mode,
3300 si * GET_MODE_SIZE (submode));
3301 s[si] =
3302 m32c_subreg (submode, operands[1], mode,
3303 si * GET_MODE_SIZE (submode));
3304 }
3305
3306 /* Split pushes by emitting a sequence of smaller pushes. */
3307 if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3308 {
3309 for (si = parts - 1; si >= 0; si--)
3310 {
3311 ops[opi++] = gen_rtx_MEM (submode,
3312 gen_rtx_PRE_DEC (Pmode,
3313 gen_rtx_REG (Pmode,
3314 SP_REGNO)));
3315 ops[opi++] = s[si];
3316 }
3317
3318 rv = 1;
3319 }
3320 /* Likewise for pops. */
3321 else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3322 {
3323 for (di = 0; di < parts; di++)
3324 {
3325 ops[opi++] = d[di];
3326 ops[opi++] = gen_rtx_MEM (submode,
3327 gen_rtx_POST_INC (Pmode,
3328 gen_rtx_REG (Pmode,
3329 SP_REGNO)));
3330 }
3331 rv = 1;
3332 }
3333 else if (split_all)
3334 {
3335 /* if d[di] == s[si] for any di < si, we'll early clobber. */
3336 for (di = 0; di < parts - 1; di++)
3337 for (si = di + 1; si < parts; si++)
3338 if (reg_mentioned_p (d[di], s[si]))
3339 rev = 1;
3340
3341 if (rev)
3342 for (si = 0; si < parts; si++)
3343 {
3344 ops[opi++] = d[si];
3345 ops[opi++] = s[si];
3346 }
3347 else
3348 for (si = parts - 1; si >= 0; si--)
3349 {
3350 ops[opi++] = d[si];
3351 ops[opi++] = s[si];
3352 }
3353 rv = 1;
3354 }
3355 /* Now emit any moves we may have accumulated. */
3356 if (rv && split_all != 3)
3357 {
3358 int i;
3359 for (i = 2; i < opi; i += 2)
3360 emit_move_insn (ops[i], ops[i + 1]);
3361 }
3362 return rv;
3363}
3364
07127a0a
DD
3365/* The m32c has a number of opcodes that act like memcpy, strcmp, and
3366 the like. For the R8C they expect one of the addresses to be in
3367 R1L:An so we need to arrange for that. Otherwise, it's just a
3368 matter of picking out the operands we want and emitting the right
3369 pattern for them. All these expanders, which correspond to
3370 patterns in blkmov.md, must return nonzero if they expand the insn,
3371 or zero if they should FAIL. */
3372
3373/* This is a memset() opcode. All operands are implied, so we need to
3374 arrange for them to be in the right registers. The opcode wants
3375 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3376 the count (HI), and $2 the value (QI). */
3377int
3378m32c_expand_setmemhi(rtx *operands)
3379{
3380 rtx desta, count, val;
3381 rtx desto, counto;
3382
3383 desta = XEXP (operands[0], 0);
3384 count = operands[1];
3385 val = operands[2];
3386
3387 desto = gen_reg_rtx (Pmode);
3388 counto = gen_reg_rtx (HImode);
3389
3390 if (GET_CODE (desta) != REG
3391 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3392 desta = copy_to_mode_reg (Pmode, desta);
3393
3394 /* This looks like an arbitrary restriction, but this is by far the
3395 most common case. For counts 8..14 this actually results in
3396 smaller code with no speed penalty because the half-sized
3397 constant can be loaded with a shorter opcode. */
3398 if (GET_CODE (count) == CONST_INT
3399 && GET_CODE (val) == CONST_INT
3400 && ! (INTVAL (count) & 1)
3401 && (INTVAL (count) > 1)
3402 && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3403 {
3404 unsigned v = INTVAL (val) & 0xff;
3405 v = v | (v << 8);
3406 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3407 val = copy_to_mode_reg (HImode, GEN_INT (v));
3408 if (TARGET_A16)
3409 emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3410 else
3411 emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3412 return 1;
3413 }
3414
3415 /* This is the generalized memset() case. */
3416 if (GET_CODE (val) != REG
3417 || REGNO (val) < FIRST_PSEUDO_REGISTER)
3418 val = copy_to_mode_reg (QImode, val);
3419
3420 if (GET_CODE (count) != REG
3421 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3422 count = copy_to_mode_reg (HImode, count);
3423
3424 if (TARGET_A16)
3425 emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3426 else
3427 emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3428
3429 return 1;
3430}
3431
3432/* This is a memcpy() opcode. All operands are implied, so we need to
3433 arrange for them to be in the right registers. The opcode wants
3434 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3435 is the source (MEM:BLK), and $2 the count (HI). */
3436int
3437m32c_expand_movmemhi(rtx *operands)
3438{
3439 rtx desta, srca, count;
3440 rtx desto, srco, counto;
3441
3442 desta = XEXP (operands[0], 0);
3443 srca = XEXP (operands[1], 0);
3444 count = operands[2];
3445
3446 desto = gen_reg_rtx (Pmode);
3447 srco = gen_reg_rtx (Pmode);
3448 counto = gen_reg_rtx (HImode);
3449
3450 if (GET_CODE (desta) != REG
3451 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3452 desta = copy_to_mode_reg (Pmode, desta);
3453
3454 if (GET_CODE (srca) != REG
3455 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3456 srca = copy_to_mode_reg (Pmode, srca);
3457
3458 /* Similar to setmem, but we don't need to check the value. */
3459 if (GET_CODE (count) == CONST_INT
3460 && ! (INTVAL (count) & 1)
3461 && (INTVAL (count) > 1))
3462 {
3463 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3464 if (TARGET_A16)
3465 emit_insn (gen_movmemhi_whi_op (desto, srco, counto, desta, srca, count));
3466 else
3467 emit_insn (gen_movmemhi_wpsi_op (desto, srco, counto, desta, srca, count));
3468 return 1;
3469 }
3470
3471 /* This is the generalized memset() case. */
3472 if (GET_CODE (count) != REG
3473 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3474 count = copy_to_mode_reg (HImode, count);
3475
3476 if (TARGET_A16)
3477 emit_insn (gen_movmemhi_bhi_op (desto, srco, counto, desta, srca, count));
3478 else
3479 emit_insn (gen_movmemhi_bpsi_op (desto, srco, counto, desta, srca, count));
3480
3481 return 1;
3482}
3483
3484/* This is a stpcpy() opcode. $0 is the destination (MEM:BLK) after
3485 the copy, which should point to the NUL at the end of the string,
3486 $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3487 Since our opcode leaves the destination pointing *after* the NUL,
3488 we must emit an adjustment. */
3489int
3490m32c_expand_movstr(rtx *operands)
3491{
3492 rtx desta, srca;
3493 rtx desto, srco;
3494
3495 desta = XEXP (operands[1], 0);
3496 srca = XEXP (operands[2], 0);
3497
3498 desto = gen_reg_rtx (Pmode);
3499 srco = gen_reg_rtx (Pmode);
3500
3501 if (GET_CODE (desta) != REG
3502 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3503 desta = copy_to_mode_reg (Pmode, desta);
3504
3505 if (GET_CODE (srca) != REG
3506 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3507 srca = copy_to_mode_reg (Pmode, srca);
3508
3509 emit_insn (gen_movstr_op (desto, srco, desta, srca));
3510 /* desto ends up being a1, which allows this type of add through MOVA. */
3511 emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
3512
3513 return 1;
3514}
3515
3516/* This is a strcmp() opcode. $0 is the destination (HI) which holds
3517 <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3518 $2 is the other (MEM:BLK). We must do the comparison, and then
3519 convert the flags to a signed integer result. */
3520int
3521m32c_expand_cmpstr(rtx *operands)
3522{
3523 rtx src1a, src2a;
3524
3525 src1a = XEXP (operands[1], 0);
3526 src2a = XEXP (operands[2], 0);
3527
3528 if (GET_CODE (src1a) != REG
3529 || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
3530 src1a = copy_to_mode_reg (Pmode, src1a);
3531
3532 if (GET_CODE (src2a) != REG
3533 || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
3534 src2a = copy_to_mode_reg (Pmode, src2a);
3535
3536 emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
3537 emit_insn (gen_cond_to_int (operands[0]));
3538
3539 return 1;
3540}
3541
3542
23fed240
DD
3543typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
3544
3545static shift_gen_func
3546shift_gen_func_for (int mode, int code)
3547{
3548#define GFF(m,c,f) if (mode == m && code == c) return f
3549 GFF(QImode, ASHIFT, gen_ashlqi3_i);
3550 GFF(QImode, ASHIFTRT, gen_ashrqi3_i);
3551 GFF(QImode, LSHIFTRT, gen_lshrqi3_i);
3552 GFF(HImode, ASHIFT, gen_ashlhi3_i);
3553 GFF(HImode, ASHIFTRT, gen_ashrhi3_i);
3554 GFF(HImode, LSHIFTRT, gen_lshrhi3_i);
3555 GFF(PSImode, ASHIFT, gen_ashlpsi3_i);
3556 GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
3557 GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
3558 GFF(SImode, ASHIFT, TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
3559 GFF(SImode, ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
3560 GFF(SImode, LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
3561#undef GFF
07127a0a 3562 gcc_unreachable ();
23fed240
DD
3563}
3564
38b2d076
DD
3565/* The m32c only has one shift, but it takes a signed count. GCC
3566 doesn't want this, so we fake it by negating any shift count when
07127a0a
DD
3567 we're pretending to shift the other way. Also, the shift count is
3568 limited to -8..8. It's slightly better to use two shifts for 9..15
3569 than to load the count into r1h, so we do that too. */
38b2d076 3570int
23fed240 3571m32c_prepare_shift (rtx * operands, int scale, int shift_code)
38b2d076 3572{
23fed240
DD
3573 enum machine_mode mode = GET_MODE (operands[0]);
3574 shift_gen_func func = shift_gen_func_for (mode, shift_code);
38b2d076 3575 rtx temp;
23fed240
DD
3576
3577 if (GET_CODE (operands[2]) == CONST_INT)
38b2d076 3578 {
23fed240
DD
3579 int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
3580 int count = INTVAL (operands[2]) * scale;
3581
3582 while (count > maxc)
3583 {
3584 temp = gen_reg_rtx (mode);
3585 emit_insn (func (temp, operands[1], GEN_INT (maxc)));
3586 operands[1] = temp;
3587 count -= maxc;
3588 }
3589 while (count < -maxc)
3590 {
3591 temp = gen_reg_rtx (mode);
3592 emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
3593 operands[1] = temp;
3594 count += maxc;
3595 }
3596 emit_insn (func (operands[0], operands[1], GEN_INT (count)));
3597 return 1;
38b2d076 3598 }
2e160056
DD
3599
3600 temp = gen_reg_rtx (QImode);
38b2d076 3601 if (scale < 0)
2e160056
DD
3602 /* The pattern has a NEG that corresponds to this. */
3603 emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
3604 else if (TARGET_A16 && mode == SImode)
3605 /* We do this because the code below may modify this, we don't
3606 want to modify the origin of this value. */
3607 emit_move_insn (temp, operands[2]);
38b2d076 3608 else
2e160056 3609 /* We'll only use it for the shift, no point emitting a move. */
38b2d076 3610 temp = operands[2];
2e160056 3611
16659fcf 3612 if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
2e160056
DD
3613 {
3614 /* The m16c has a limit of -16..16 for SI shifts, even when the
3615 shift count is in a register. Since there are so many targets
3616 of these shifts, it's better to expand the RTL here than to
3617 call a helper function.
3618
3619 The resulting code looks something like this:
3620
3621 cmp.b r1h,-16
3622 jge.b 1f
3623 shl.l -16,dest
3624 add.b r1h,16
3625 1f: cmp.b r1h,16
3626 jle.b 1f
3627 shl.l 16,dest
3628 sub.b r1h,16
3629 1f: shl.l r1h,dest
3630
3631 We take advantage of the fact that "negative" shifts are
3632 undefined to skip one of the comparisons. */
3633
3634 rtx count;
833bf445 3635 rtx label, lref, insn, tempvar;
2e160056 3636
16659fcf
DD
3637 emit_move_insn (operands[0], operands[1]);
3638
2e160056
DD
3639 count = temp;
3640 label = gen_label_rtx ();
3641 lref = gen_rtx_LABEL_REF (VOIDmode, label);
3642 LABEL_NUSES (label) ++;
3643
833bf445
DD
3644 tempvar = gen_reg_rtx (mode);
3645
2e160056
DD
3646 if (shift_code == ASHIFT)
3647 {
3648 /* This is a left shift. We only need check positive counts. */
3649 emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
3650 count, GEN_INT (16), label));
833bf445
DD
3651 emit_insn (func (tempvar, operands[0], GEN_INT (8)));
3652 emit_insn (func (operands[0], tempvar, GEN_INT (8)));
2e160056
DD
3653 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
3654 emit_label_after (label, insn);
3655 }
3656 else
3657 {
3658 /* This is a right shift. We only need check negative counts. */
3659 emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
3660 count, GEN_INT (-16), label));
833bf445
DD
3661 emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
3662 emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
2e160056
DD
3663 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
3664 emit_label_after (label, insn);
3665 }
16659fcf
DD
3666 operands[1] = operands[0];
3667 emit_insn (func (operands[0], operands[0], count));
3668 return 1;
2e160056
DD
3669 }
3670
38b2d076
DD
3671 operands[2] = temp;
3672 return 0;
3673}
3674
12ea2512
DD
3675/* The m32c has a limited range of operations that work on PSImode
3676 values; we have to expand to SI, do the math, and truncate back to
3677 PSI. Yes, this is expensive, but hopefully gcc will learn to avoid
3678 those cases. */
3679void
3680m32c_expand_neg_mulpsi3 (rtx * operands)
3681{
3682 /* operands: a = b * i */
3683 rtx temp1; /* b as SI */
07127a0a
DD
3684 rtx scale /* i as SI */;
3685 rtx temp2; /* a*b as SI */
12ea2512
DD
3686
3687 temp1 = gen_reg_rtx (SImode);
3688 temp2 = gen_reg_rtx (SImode);
07127a0a
DD
3689 if (GET_CODE (operands[2]) != CONST_INT)
3690 {
3691 scale = gen_reg_rtx (SImode);
3692 emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
3693 }
3694 else
3695 scale = copy_to_mode_reg (SImode, operands[2]);
12ea2512
DD
3696
3697 emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
07127a0a
DD
3698 temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
3699 emit_insn (gen_truncsipsi2 (operands[0], temp2));
12ea2512
DD
3700}
3701
0166ff05
DD
3702static rtx compare_op0, compare_op1;
3703
3704void
3705m32c_pend_compare (rtx *operands)
3706{
3707 compare_op0 = operands[0];
3708 compare_op1 = operands[1];
3709}
3710
3711void
3712m32c_unpend_compare (void)
3713{
3714 switch (GET_MODE (compare_op0))
3715 {
3716 case QImode:
3717 emit_insn (gen_cmpqi_op (compare_op0, compare_op1));
3718 case HImode:
3719 emit_insn (gen_cmphi_op (compare_op0, compare_op1));
3720 case PSImode:
3721 emit_insn (gen_cmppsi_op (compare_op0, compare_op1));
67fc44cb
DD
3722 default:
3723 /* Just to silence the "missing case" warnings. */ ;
0166ff05
DD
3724 }
3725}
3726
3727void
3728m32c_expand_scc (int code, rtx *operands)
3729{
3730 enum machine_mode mode = TARGET_A16 ? QImode : HImode;
3731
3732 emit_insn (gen_rtx_SET (mode,
3733 operands[0],
3734 gen_rtx_fmt_ee (code,
3735 mode,
3736 compare_op0,
3737 compare_op1)));
3738}
3739
38b2d076
DD
3740/* Pattern Output Functions */
3741
07127a0a
DD
3742/* Returns a (OP (reg:CC FLG_REGNO) (const_int 0)) from some other
3743 match_operand rtx's OP. */
3744rtx
3745m32c_cmp_flg_0 (rtx cmp)
3746{
3747 return gen_rtx_fmt_ee (GET_CODE (cmp),
3748 GET_MODE (cmp),
3749 gen_rtx_REG (CCmode, FLG_REGNO),
3750 GEN_INT (0));
3751}
3752
3753int
3754m32c_expand_movcc (rtx *operands)
3755{
3756 rtx rel = operands[1];
0166ff05
DD
3757 rtx cmp;
3758
07127a0a
DD
3759 if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
3760 return 1;
3761 if (GET_CODE (operands[2]) != CONST_INT
3762 || GET_CODE (operands[3]) != CONST_INT)
3763 return 1;
3764 emit_insn (gen_cmpqi(XEXP (rel, 0), XEXP (rel, 1)));
3765 if (GET_CODE (rel) == NE)
3766 {
3767 rtx tmp = operands[2];
3768 operands[2] = operands[3];
3769 operands[3] = tmp;
3770 }
0166ff05
DD
3771
3772 cmp = gen_rtx_fmt_ee (GET_CODE (rel),
3773 GET_MODE (rel),
3774 compare_op0,
3775 compare_op1);
3776
3777 emit_move_insn (operands[0],
3778 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3779 cmp,
3780 operands[2],
3781 operands[3]));
07127a0a
DD
3782 return 0;
3783}
3784
3785/* Used for the "insv" pattern. Return nonzero to fail, else done. */
3786int
3787m32c_expand_insv (rtx *operands)
3788{
3789 rtx op0, src0, p;
3790 int mask;
3791
3792 if (INTVAL (operands[1]) != 1)
3793 return 1;
3794
9cb96754
N
3795 /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
3796 if (GET_CODE (operands[3]) != CONST_INT)
3797 return 1;
3798 if (INTVAL (operands[3]) != 0
3799 && INTVAL (operands[3]) != 1
3800 && INTVAL (operands[3]) != -1)
3801 return 1;
3802
07127a0a
DD
3803 mask = 1 << INTVAL (operands[2]);
3804
3805 op0 = operands[0];
3806 if (GET_CODE (op0) == SUBREG
3807 && SUBREG_BYTE (op0) == 0)
3808 {
3809 rtx sub = SUBREG_REG (op0);
3810 if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
3811 op0 = sub;
3812 }
3813
b3a13419 3814 if (!can_create_pseudo_p ()
07127a0a
DD
3815 || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
3816 src0 = op0;
3817 else
3818 {
3819 src0 = gen_reg_rtx (GET_MODE (op0));
3820 emit_move_insn (src0, op0);
3821 }
3822
3823 if (GET_MODE (op0) == HImode
3824 && INTVAL (operands[2]) >= 8
3825 && GET_MODE (op0) == MEM)
3826 {
3827 /* We are little endian. */
3828 rtx new_mem = gen_rtx_MEM (QImode, plus_constant (XEXP (op0, 0), 1));
3829 MEM_COPY_ATTRIBUTES (new_mem, op0);
3830 mask >>= 8;
3831 }
3832
8e4edce7
DD
3833 /* First, we generate a mask with the correct polarity. If we are
3834 storing a zero, we want an AND mask, so invert it. */
3835 if (INTVAL (operands[3]) == 0)
07127a0a 3836 {
16659fcf 3837 /* Storing a zero, use an AND mask */
07127a0a
DD
3838 if (GET_MODE (op0) == HImode)
3839 mask ^= 0xffff;
3840 else
3841 mask ^= 0xff;
3842 }
8e4edce7
DD
3843 /* Now we need to properly sign-extend the mask in case we need to
3844 fall back to an AND or OR opcode. */
07127a0a
DD
3845 if (GET_MODE (op0) == HImode)
3846 {
3847 if (mask & 0x8000)
3848 mask -= 0x10000;
3849 }
3850 else
3851 {
3852 if (mask & 0x80)
3853 mask -= 0x100;
3854 }
3855
3856 switch ( (INTVAL (operands[3]) ? 4 : 0)
3857 + ((GET_MODE (op0) == HImode) ? 2 : 0)
3858 + (TARGET_A24 ? 1 : 0))
3859 {
3860 case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
3861 case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
3862 case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
3863 case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
3864 case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
3865 case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
3866 case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
3867 case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
3868 }
3869
3870 emit_insn (p);
3871 return 0;
3872}
3873
3874const char *
3875m32c_scc_pattern(rtx *operands, RTX_CODE code)
3876{
3877 static char buf[30];
3878 if (GET_CODE (operands[0]) == REG
3879 && REGNO (operands[0]) == R0_REGNO)
3880 {
3881 if (code == EQ)
3882 return "stzx\t#1,#0,r0l";
3883 if (code == NE)
3884 return "stzx\t#0,#1,r0l";
3885 }
3886 sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
3887 return buf;
3888}
3889
5abd2125
JS
3890/* Encode symbol attributes of a SYMBOL_REF into its
3891 SYMBOL_REF_FLAGS. */
3892static void
3893m32c_encode_section_info (tree decl, rtx rtl, int first)
3894{
3895 int extra_flags = 0;
3896
3897 default_encode_section_info (decl, rtl, first);
3898 if (TREE_CODE (decl) == FUNCTION_DECL
3899 && m32c_special_page_vector_p (decl))
3900
3901 extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
3902
3903 if (extra_flags)
3904 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
3905}
3906
38b2d076
DD
3907/* Returns TRUE if the current function is a leaf, and thus we can
3908 determine which registers an interrupt function really needs to
3909 save. The logic below is mostly about finding the insn sequence
3910 that's the function, versus any sequence that might be open for the
3911 current insn. */
3912static int
3913m32c_leaf_function_p (void)
3914{
3915 rtx saved_first, saved_last;
3916 struct sequence_stack *seq;
3917 int rv;
3918
3e029763
JH
3919 saved_first = crtl->emit.x_first_insn;
3920 saved_last = crtl->emit.x_last_insn;
3921 for (seq = crtl->emit.sequence_stack; seq && seq->next; seq = seq->next)
38b2d076
DD
3922 ;
3923 if (seq)
3924 {
3e029763
JH
3925 crtl->emit.x_first_insn = seq->first;
3926 crtl->emit.x_last_insn = seq->last;
38b2d076
DD
3927 }
3928
3929 rv = leaf_function_p ();
3930
3e029763
JH
3931 crtl->emit.x_first_insn = saved_first;
3932 crtl->emit.x_last_insn = saved_last;
38b2d076
DD
3933 return rv;
3934}
3935
3936/* Returns TRUE if the current function needs to use the ENTER/EXIT
3937 opcodes. If the function doesn't need the frame base or stack
3938 pointer, it can use the simpler RTS opcode. */
3939static bool
3940m32c_function_needs_enter (void)
3941{
3942 rtx insn;
3943 struct sequence_stack *seq;
3944 rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
3945 rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
3946
3947 insn = get_insns ();
3e029763 3948 for (seq = crtl->emit.sequence_stack;
38b2d076
DD
3949 seq;
3950 insn = seq->first, seq = seq->next);
3951
3952 while (insn)
3953 {
3954 if (reg_mentioned_p (sp, insn))
3955 return true;
3956 if (reg_mentioned_p (fb, insn))
3957 return true;
3958 insn = NEXT_INSN (insn);
3959 }
3960 return false;
3961}
3962
3963/* Mark all the subexpressions of the PARALLEL rtx PAR as
3964 frame-related. Return PAR.
3965
3966 dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
3967 PARALLEL rtx other than the first if they do not have the
3968 FRAME_RELATED flag set on them. So this function is handy for
3969 marking up 'enter' instructions. */
3970static rtx
3971m32c_all_frame_related (rtx par)
3972{
3973 int len = XVECLEN (par, 0);
3974 int i;
3975
3976 for (i = 0; i < len; i++)
3977 F (XVECEXP (par, 0, i));
3978
3979 return par;
3980}
3981
3982/* Emits the prologue. See the frame layout comment earlier in this
3983 file. We can reserve up to 256 bytes with the ENTER opcode, beyond
3984 that we manually update sp. */
3985void
3986m32c_emit_prologue (void)
3987{
3988 int frame_size, extra_frame_size = 0, reg_save_size;
3989 int complex_prologue = 0;
3990
3991 cfun->machine->is_leaf = m32c_leaf_function_p ();
3992 if (interrupt_p (cfun->decl))
3993 {
3994 cfun->machine->is_interrupt = 1;
3995 complex_prologue = 1;
3996 }
3997
3998 reg_save_size = m32c_pushm_popm (PP_justcount);
3999
4000 if (interrupt_p (cfun->decl))
4001 emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
4002
4003 frame_size =
4004 m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
4005 if (frame_size == 0
4006 && !cfun->machine->is_interrupt
4007 && !m32c_function_needs_enter ())
4008 cfun->machine->use_rts = 1;
4009
4010 if (frame_size > 254)
4011 {
4012 extra_frame_size = frame_size - 254;
4013 frame_size = 254;
4014 }
4015 if (cfun->machine->use_rts == 0)
4016 F (emit_insn (m32c_all_frame_related
4017 (TARGET_A16
fa9fd28a
RIL
4018 ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
4019 : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
38b2d076
DD
4020
4021 if (extra_frame_size)
4022 {
4023 complex_prologue = 1;
4024 if (TARGET_A16)
4025 F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
4026 gen_rtx_REG (HImode, SP_REGNO),
4027 GEN_INT (-extra_frame_size))));
4028 else
4029 F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
4030 gen_rtx_REG (PSImode, SP_REGNO),
4031 GEN_INT (-extra_frame_size))));
4032 }
4033
4034 complex_prologue += m32c_pushm_popm (PP_pushm);
4035
4036 /* This just emits a comment into the .s file for debugging. */
4037 if (complex_prologue)
4038 emit_insn (gen_prologue_end ());
4039}
4040
4041/* Likewise, for the epilogue. The only exception is that, for
4042 interrupts, we must manually unwind the frame as the REIT opcode
4043 doesn't do that. */
4044void
4045m32c_emit_epilogue (void)
4046{
4047 /* This just emits a comment into the .s file for debugging. */
4048 if (m32c_pushm_popm (PP_justcount) > 0 || cfun->machine->is_interrupt)
4049 emit_insn (gen_epilogue_start ());
4050
4051 m32c_pushm_popm (PP_popm);
4052
4053 if (cfun->machine->is_interrupt)
4054 {
4055 enum machine_mode spmode = TARGET_A16 ? HImode : PSImode;
4056
4057 emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
4058 gen_rtx_REG (spmode, FP_REGNO));
4059 emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
4060 gen_rtx_REG (spmode, A0_REGNO));
4061 if (TARGET_A16)
4062 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
4063 else
4064 emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
4065 emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
0e0642aa
RIL
4066 if (TARGET_A16)
4067 emit_jump_insn (gen_epilogue_reit_16 ());
4068 else
4069 emit_jump_insn (gen_epilogue_reit_24 ());
38b2d076
DD
4070 }
4071 else if (cfun->machine->use_rts)
4072 emit_jump_insn (gen_epilogue_rts ());
0e0642aa
RIL
4073 else if (TARGET_A16)
4074 emit_jump_insn (gen_epilogue_exitd_16 ());
38b2d076 4075 else
0e0642aa 4076 emit_jump_insn (gen_epilogue_exitd_24 ());
38b2d076
DD
4077 emit_barrier ();
4078}
4079
4080void
4081m32c_emit_eh_epilogue (rtx ret_addr)
4082{
4083 /* R0[R2] has the stack adjustment. R1[R3] has the address to
4084 return to. We have to fudge the stack, pop everything, pop SP
4085 (fudged), and return (fudged). This is actually easier to do in
4086 assembler, so punt to libgcc. */
4087 emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
c41c1387 4088 /* emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
38b2d076
DD
4089 emit_barrier ();
4090}
4091
16659fcf
DD
4092/* Indicate which flags must be properly set for a given conditional. */
4093static int
4094flags_needed_for_conditional (rtx cond)
4095{
4096 switch (GET_CODE (cond))
4097 {
4098 case LE:
4099 case GT:
4100 return FLAGS_OSZ;
4101 case LEU:
4102 case GTU:
4103 return FLAGS_ZC;
4104 case LT:
4105 case GE:
4106 return FLAGS_OS;
4107 case LTU:
4108 case GEU:
4109 return FLAGS_C;
4110 case EQ:
4111 case NE:
4112 return FLAGS_Z;
4113 default:
4114 return FLAGS_N;
4115 }
4116}
4117
4118#define DEBUG_CMP 0
4119
4120/* Returns true if a compare insn is redundant because it would only
4121 set flags that are already set correctly. */
4122static bool
4123m32c_compare_redundant (rtx cmp, rtx *operands)
4124{
4125 int flags_needed;
4126 int pflags;
4127 rtx prev, pp, next;
4128 rtx op0, op1, op2;
4129#if DEBUG_CMP
4130 int prev_icode, i;
4131#endif
4132
4133 op0 = operands[0];
4134 op1 = operands[1];
4135 op2 = operands[2];
4136
4137#if DEBUG_CMP
4138 fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
4139 debug_rtx(cmp);
4140 for (i=0; i<2; i++)
4141 {
4142 fprintf(stderr, "operands[%d] = ", i);
4143 debug_rtx(operands[i]);
4144 }
4145#endif
4146
4147 next = next_nonnote_insn (cmp);
4148 if (!next || !INSN_P (next))
4149 {
4150#if DEBUG_CMP
4151 fprintf(stderr, "compare not followed by insn\n");
4152 debug_rtx(next);
4153#endif
4154 return false;
4155 }
4156 if (GET_CODE (PATTERN (next)) == SET
4157 && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
4158 {
4159 next = XEXP (XEXP (PATTERN (next), 1), 0);
4160 }
4161 else if (GET_CODE (PATTERN (next)) == SET)
4162 {
4163 /* If this is a conditional, flags_needed will be something
4164 other than FLAGS_N, which we test below. */
4165 next = XEXP (PATTERN (next), 1);
4166 }
4167 else
4168 {
4169#if DEBUG_CMP
4170 fprintf(stderr, "compare not followed by conditional\n");
4171 debug_rtx(next);
4172#endif
4173 return false;
4174 }
4175#if DEBUG_CMP
4176 fprintf(stderr, "conditional is: ");
4177 debug_rtx(next);
4178#endif
4179
4180 flags_needed = flags_needed_for_conditional (next);
4181 if (flags_needed == FLAGS_N)
4182 {
4183#if DEBUG_CMP
4184 fprintf(stderr, "compare not followed by conditional\n");
4185 debug_rtx(next);
4186#endif
4187 return false;
4188 }
4189
4190 /* Compare doesn't set overflow and carry the same way that
4191 arithmetic instructions do, so we can't replace those. */
4192 if (flags_needed & FLAGS_OC)
4193 return false;
4194
4195 prev = cmp;
4196 do {
4197 prev = prev_nonnote_insn (prev);
4198 if (!prev)
4199 {
4200#if DEBUG_CMP
4201 fprintf(stderr, "No previous insn.\n");
4202#endif
4203 return false;
4204 }
4205 if (!INSN_P (prev))
4206 {
4207#if DEBUG_CMP
4208 fprintf(stderr, "Previous insn is a non-insn.\n");
4209#endif
4210 return false;
4211 }
4212 pp = PATTERN (prev);
4213 if (GET_CODE (pp) != SET)
4214 {
4215#if DEBUG_CMP
4216 fprintf(stderr, "Previous insn is not a SET.\n");
4217#endif
4218 return false;
4219 }
4220 pflags = get_attr_flags (prev);
4221
4222 /* Looking up attributes of previous insns corrupted the recog
4223 tables. */
4224 INSN_UID (cmp) = -1;
4225 recog (PATTERN (cmp), cmp, 0);
4226
4227 if (pflags == FLAGS_N
4228 && reg_mentioned_p (op0, pp))
4229 {
4230#if DEBUG_CMP
4231 fprintf(stderr, "intermediate non-flags insn uses op:\n");
4232 debug_rtx(prev);
4233#endif
4234 return false;
4235 }
4236 } while (pflags == FLAGS_N);
4237#if DEBUG_CMP
4238 fprintf(stderr, "previous flag-setting insn:\n");
4239 debug_rtx(prev);
4240 debug_rtx(pp);
4241#endif
4242
4243 if (GET_CODE (pp) == SET
4244 && GET_CODE (XEXP (pp, 0)) == REG
4245 && REGNO (XEXP (pp, 0)) == FLG_REGNO
4246 && GET_CODE (XEXP (pp, 1)) == COMPARE)
4247 {
4248 /* Adjacent cbranches must have the same operands to be
4249 redundant. */
4250 rtx pop0 = XEXP (XEXP (pp, 1), 0);
4251 rtx pop1 = XEXP (XEXP (pp, 1), 1);
4252#if DEBUG_CMP
4253 fprintf(stderr, "adjacent cbranches\n");
4254 debug_rtx(pop0);
4255 debug_rtx(pop1);
4256#endif
4257 if (rtx_equal_p (op0, pop0)
4258 && rtx_equal_p (op1, pop1))
4259 return true;
4260#if DEBUG_CMP
4261 fprintf(stderr, "prev cmp not same\n");
4262#endif
4263 return false;
4264 }
4265
4266 /* Else the previous insn must be a SET, with either the source or
4267 dest equal to operands[0], and operands[1] must be zero. */
4268
4269 if (!rtx_equal_p (op1, const0_rtx))
4270 {
4271#if DEBUG_CMP
4272 fprintf(stderr, "operands[1] not const0_rtx\n");
4273#endif
4274 return false;
4275 }
4276 if (GET_CODE (pp) != SET)
4277 {
4278#if DEBUG_CMP
4279 fprintf (stderr, "pp not set\n");
4280#endif
4281 return false;
4282 }
4283 if (!rtx_equal_p (op0, SET_SRC (pp))
4284 && !rtx_equal_p (op0, SET_DEST (pp)))
4285 {
4286#if DEBUG_CMP
4287 fprintf(stderr, "operands[0] not found in set\n");
4288#endif
4289 return false;
4290 }
4291
4292#if DEBUG_CMP
4293 fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4294#endif
4295 if ((pflags & flags_needed) == flags_needed)
4296 return true;
4297
4298 return false;
4299}
4300
4301/* Return the pattern for a compare. This will be commented out if
4302 the compare is redundant, else a normal pattern is returned. Thus,
4303 the assembler output says where the compare would have been. */
4304char *
4305m32c_output_compare (rtx insn, rtx *operands)
4306{
0a2aaacc 4307 static char templ[] = ";cmp.b\t%1,%0";
16659fcf
DD
4308 /* ^ 5 */
4309
0a2aaacc 4310 templ[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
16659fcf
DD
4311 if (m32c_compare_redundant (insn, operands))
4312 {
4313#if DEBUG_CMP
4314 fprintf(stderr, "cbranch: cmp not needed\n");
4315#endif
0a2aaacc 4316 return templ;
16659fcf
DD
4317 }
4318
4319#if DEBUG_CMP
0a2aaacc 4320 fprintf(stderr, "cbranch: cmp needed: `%s'\n", templ);
16659fcf 4321#endif
0a2aaacc 4322 return templ + 1;
16659fcf
DD
4323}
4324
5abd2125
JS
4325#undef TARGET_ENCODE_SECTION_INFO
4326#define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4327
38b2d076
DD
4328/* The Global `targetm' Variable. */
4329
4330struct gcc_target targetm = TARGET_INITIALIZER;
4331
4332#include "gt-m32c.h"