]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/m32c/m32c.c
c-opts.c (c_common_post_options): If C++0x mode is enabled, don't warn about C++0x...
[thirdparty/gcc.git] / gcc / config / m32c / m32c.c
CommitLineData
38b2d076
DD
1/* Target Code for R8C/M16C/M32C
2 Copyright (C) 2005
3 Free Software Foundation, Inc.
4 Contributed by Red Hat.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published
10 by the Free Software Foundation; either version 2, or (at your
11 option) any later version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
16 License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
21 02110-1301, USA. */
22
23#include "config.h"
24#include "system.h"
25#include "coretypes.h"
26#include "tm.h"
27#include "rtl.h"
28#include "regs.h"
29#include "hard-reg-set.h"
30#include "real.h"
31#include "insn-config.h"
32#include "conditions.h"
33#include "insn-flags.h"
34#include "output.h"
35#include "insn-attr.h"
36#include "flags.h"
37#include "recog.h"
38#include "reload.h"
39#include "toplev.h"
40#include "obstack.h"
41#include "tree.h"
42#include "expr.h"
43#include "optabs.h"
44#include "except.h"
45#include "function.h"
46#include "ggc.h"
47#include "target.h"
48#include "target-def.h"
49#include "tm_p.h"
50#include "langhooks.h"
51#include "tree-gimple.h"
52
53/* Prototypes */
54
55/* Used by m32c_pushm_popm. */
56typedef enum
57{
58 PP_pushm,
59 PP_popm,
60 PP_justcount
61} Push_Pop_Type;
62
63static tree interrupt_handler (tree *, tree, tree, int, bool *);
64static int interrupt_p (tree node);
65static bool m32c_asm_integer (rtx, unsigned int, int);
66static int m32c_comp_type_attributes (tree, tree);
67static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
68static struct machine_function *m32c_init_machine_status (void);
69static void m32c_insert_attributes (tree, tree *);
70static bool m32c_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
71 tree, bool);
72static bool m32c_promote_prototypes (tree);
73static int m32c_pushm_popm (Push_Pop_Type);
74static bool m32c_strict_argument_naming (CUMULATIVE_ARGS *);
75static rtx m32c_struct_value_rtx (tree, int);
76static rtx m32c_subreg (enum machine_mode, rtx, enum machine_mode, int);
77static int need_to_save (int);
78
79#define streq(a,b) (strcmp ((a), (b)) == 0)
80
81/* Internal support routines */
82
83/* Debugging statements are tagged with DEBUG0 only so that they can
84 be easily enabled individually, by replacing the '0' with '1' as
85 needed. */
86#define DEBUG0 0
87#define DEBUG1 1
88
89#if DEBUG0
90/* This is needed by some of the commented-out debug statements
91 below. */
92static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
93#endif
94static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
95
96/* These are all to support encode_pattern(). */
97static char pattern[30], *patternp;
98static GTY(()) rtx patternr[30];
99#define RTX_IS(x) (streq (pattern, x))
100
101/* Some macros to simplify the logic throughout this file. */
102#define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
103#define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
104
105#define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
106#define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
107
108/* We do most RTX matching by converting the RTX into a string, and
109 using string compares. This vastly simplifies the logic in many of
110 the functions in this file.
111
112 On exit, pattern[] has the encoded string (use RTX_IS("...") to
113 compare it) and patternr[] has pointers to the nodes in the RTX
114 corresponding to each character in the encoded string. The latter
115 is mostly used by print_operand().
116
117 Unrecognized patterns have '?' in them; this shows up when the
118 assembler complains about syntax errors.
119*/
120
121static void
122encode_pattern_1 (rtx x)
123{
124 int i;
125
126 if (patternp == pattern + sizeof (pattern) - 2)
127 {
128 patternp[-1] = '?';
129 return;
130 }
131
132 patternr[patternp - pattern] = x;
133
134 switch (GET_CODE (x))
135 {
136 case REG:
137 *patternp++ = 'r';
138 break;
139 case SUBREG:
140 if (GET_MODE_SIZE (GET_MODE (x)) !=
141 GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
142 *patternp++ = 'S';
143 encode_pattern_1 (XEXP (x, 0));
144 break;
145 case MEM:
146 *patternp++ = 'm';
147 case CONST:
148 encode_pattern_1 (XEXP (x, 0));
149 break;
150 case PLUS:
151 *patternp++ = '+';
152 encode_pattern_1 (XEXP (x, 0));
153 encode_pattern_1 (XEXP (x, 1));
154 break;
155 case PRE_DEC:
156 *patternp++ = '>';
157 encode_pattern_1 (XEXP (x, 0));
158 break;
159 case POST_INC:
160 *patternp++ = '<';
161 encode_pattern_1 (XEXP (x, 0));
162 break;
163 case LO_SUM:
164 *patternp++ = 'L';
165 encode_pattern_1 (XEXP (x, 0));
166 encode_pattern_1 (XEXP (x, 1));
167 break;
168 case HIGH:
169 *patternp++ = 'H';
170 encode_pattern_1 (XEXP (x, 0));
171 break;
172 case SYMBOL_REF:
173 *patternp++ = 's';
174 break;
175 case LABEL_REF:
176 *patternp++ = 'l';
177 break;
178 case CODE_LABEL:
179 *patternp++ = 'c';
180 break;
181 case CONST_INT:
182 case CONST_DOUBLE:
183 *patternp++ = 'i';
184 break;
185 case UNSPEC:
186 *patternp++ = 'u';
187 *patternp++ = '0' + XCINT (x, 1, UNSPEC);
188 for (i = 0; i < XVECLEN (x, 0); i++)
189 encode_pattern_1 (XVECEXP (x, 0, i));
190 break;
191 case USE:
192 *patternp++ = 'U';
193 break;
194 case PARALLEL:
195 *patternp++ = '|';
196 for (i = 0; i < XVECLEN (x, 0); i++)
197 encode_pattern_1 (XVECEXP (x, 0, i));
198 break;
199 case EXPR_LIST:
200 *patternp++ = 'E';
201 encode_pattern_1 (XEXP (x, 0));
202 if (XEXP (x, 1))
203 encode_pattern_1 (XEXP (x, 1));
204 break;
205 default:
206 *patternp++ = '?';
207#if DEBUG0
208 fprintf (stderr, "can't encode pattern %s\n",
209 GET_RTX_NAME (GET_CODE (x)));
210 debug_rtx (x);
211 gcc_unreachable ();
212#endif
213 break;
214 }
215}
216
217static void
218encode_pattern (rtx x)
219{
220 patternp = pattern;
221 encode_pattern_1 (x);
222 *patternp = 0;
223}
224
225/* Since register names indicate the mode they're used in, we need a
226 way to determine which name to refer to the register with. Called
227 by print_operand(). */
228
229static const char *
230reg_name_with_mode (int regno, enum machine_mode mode)
231{
232 int mlen = GET_MODE_SIZE (mode);
233 if (regno == R0_REGNO && mlen == 1)
234 return "r0l";
235 if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
236 return "r2r0";
237 if (regno == R0_REGNO && mlen == 6)
238 return "r2r1r0";
239 if (regno == R0_REGNO && mlen == 8)
240 return "r3r1r2r0";
241 if (regno == R1_REGNO && mlen == 1)
242 return "r1l";
243 if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
244 return "r3r1";
245 if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
246 return "a1a0";
247 return reg_names[regno];
248}
249
250/* How many bytes a register uses on stack when it's pushed. We need
251 to know this because the push opcode needs to explicitly indicate
252 the size of the register, even though the name of the register
253 already tells it that. Used by m32c_output_reg_{push,pop}, which
254 is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}. */
255
256static int
257reg_push_size (int regno)
258{
259 switch (regno)
260 {
261 case R0_REGNO:
262 case R1_REGNO:
263 return 2;
264 case R2_REGNO:
265 case R3_REGNO:
266 case FLG_REGNO:
267 return 2;
268 case A0_REGNO:
269 case A1_REGNO:
270 case SB_REGNO:
271 case FB_REGNO:
272 case SP_REGNO:
273 if (TARGET_A16)
274 return 2;
275 else
276 return 3;
277 default:
278 gcc_unreachable ();
279 }
280}
281
282static int *class_sizes = 0;
283
284/* Given two register classes, find the largest intersection between
285 them. If there is no intersection, return RETURNED_IF_EMPTY
286 instead. */
287static int
288reduce_class (int original_class, int limiting_class, int returned_if_empty)
289{
290 int cc = class_contents[original_class][0];
291 int i, best = NO_REGS;
292 int best_size = 0;
293
294 if (original_class == limiting_class)
295 return original_class;
296
297 if (!class_sizes)
298 {
299 int r;
300 class_sizes = (int *) xmalloc (LIM_REG_CLASSES * sizeof (int));
301 for (i = 0; i < LIM_REG_CLASSES; i++)
302 {
303 class_sizes[i] = 0;
304 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
305 if (class_contents[i][0] & (1 << r))
306 class_sizes[i]++;
307 }
308 }
309
310 cc &= class_contents[limiting_class][0];
311 for (i = 0; i < LIM_REG_CLASSES; i++)
312 {
313 int ic = class_contents[i][0];
314
315 if ((~cc & ic) == 0)
316 if (best_size < class_sizes[i])
317 {
318 best = i;
319 best_size = class_sizes[i];
320 }
321
322 }
323 if (best == NO_REGS)
324 return returned_if_empty;
325 return best;
326}
327
328/* Returns TRUE If there are any registers that exist in both register
329 classes. */
330static int
331classes_intersect (int class1, int class2)
332{
333 return class_contents[class1][0] & class_contents[class2][0];
334}
335
336/* Used by m32c_register_move_cost to determine if a move is
337 impossibly expensive. */
338static int
339class_can_hold_mode (int class, enum machine_mode mode)
340{
341 /* Cache the results: 0=untested 1=no 2=yes */
342 static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
343 if (results[class][mode] == 0)
344 {
345 int r, n, i;
346 results[class][mode] = 1;
347 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
348 if (class_contents[class][0] & (1 << r)
349 && HARD_REGNO_MODE_OK (r, mode))
350 {
351 int ok = 1;
352 n = HARD_REGNO_NREGS (r, mode);
353 for (i = 1; i < n; i++)
354 if (!(class_contents[class][0] & (1 << (r + i))))
355 ok = 0;
356 if (ok)
357 {
358 results[class][mode] = 2;
359 break;
360 }
361 }
362 }
363#if DEBUG0
364 fprintf (stderr, "class %s can hold %s? %s\n",
365 class_names[class], mode_name[mode],
366 (results[class][mode] == 2) ? "yes" : "no");
367#endif
368 return results[class][mode] == 2;
369}
370
371/* Run-time Target Specification. */
372
373/* Memregs are memory locations that gcc treats like general
374 registers, as there are a limited number of true registers and the
375 m32c families can use memory in most places that registers can be
376 used.
377
378 However, since memory accesses are more expensive than registers,
379 we allow the user to limit the number of memregs available, in
380 order to try to persuade gcc to try harder to use real registers.
381
382 Memregs are provided by m32c-lib1.S.
383*/
384
385int target_memregs = 16;
386static bool target_memregs_set = FALSE;
387int ok_to_change_target_memregs = TRUE;
388
389#undef TARGET_HANDLE_OPTION
390#define TARGET_HANDLE_OPTION m32c_handle_option
391static bool
392m32c_handle_option (size_t code,
393 const char *arg ATTRIBUTE_UNUSED,
394 int value ATTRIBUTE_UNUSED)
395{
396 if (code == OPT_memregs_)
397 {
398 target_memregs_set = TRUE;
399 target_memregs = atoi (arg);
400 }
401 return TRUE;
402}
403
404/* Implements OVERRIDE_OPTIONS. We limit memregs to 0..16, and
405 provide a default. */
406void
407m32c_override_options (void)
408{
409 if (target_memregs_set)
410 {
411 if (target_memregs < 0 || target_memregs > 16)
412 error ("invalid target memregs value '%d'", target_memregs);
413 }
414 else
07127a0a 415 target_memregs = 16;
38b2d076
DD
416}
417
418/* Defining data structures for per-function information */
419
420/* The usual; we set up our machine_function data. */
421static struct machine_function *
422m32c_init_machine_status (void)
423{
424 struct machine_function *machine;
425 machine =
426 (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
427
428 return machine;
429}
430
431/* Implements INIT_EXPANDERS. We just set up to call the above
432 function. */
433void
434m32c_init_expanders (void)
435{
436 init_machine_status = m32c_init_machine_status;
437}
438
439/* Storage Layout */
440
441#undef TARGET_PROMOTE_FUNCTION_RETURN
442#define TARGET_PROMOTE_FUNCTION_RETURN m32c_promote_function_return
443bool
444m32c_promote_function_return (tree fntype ATTRIBUTE_UNUSED)
445{
446 return false;
447}
448
449/* Register Basics */
450
451/* Basic Characteristics of Registers */
452
453/* Whether a mode fits in a register is complex enough to warrant a
454 table. */
455static struct
456{
457 char qi_regs;
458 char hi_regs;
459 char pi_regs;
460 char si_regs;
461 char di_regs;
462} nregs_table[FIRST_PSEUDO_REGISTER] =
463{
464 { 1, 1, 2, 2, 4 }, /* r0 */
465 { 0, 1, 0, 0, 0 }, /* r2 */
466 { 1, 1, 2, 2, 0 }, /* r1 */
467 { 0, 1, 0, 0, 0 }, /* r3 */
468 { 0, 1, 1, 0, 0 }, /* a0 */
469 { 0, 1, 1, 0, 0 }, /* a1 */
470 { 0, 1, 1, 0, 0 }, /* sb */
471 { 0, 1, 1, 0, 0 }, /* fb */
472 { 0, 1, 1, 0, 0 }, /* sp */
473 { 1, 1, 1, 0, 0 }, /* pc */
474 { 0, 0, 0, 0, 0 }, /* fl */
475 { 1, 1, 1, 0, 0 }, /* ap */
476 { 1, 1, 2, 2, 4 }, /* mem0 */
477 { 1, 1, 2, 2, 4 }, /* mem1 */
478 { 1, 1, 2, 2, 4 }, /* mem2 */
479 { 1, 1, 2, 2, 4 }, /* mem3 */
480 { 1, 1, 2, 2, 4 }, /* mem4 */
481 { 1, 1, 2, 2, 0 }, /* mem5 */
482 { 1, 1, 2, 2, 0 }, /* mem6 */
483 { 1, 1, 0, 0, 0 }, /* mem7 */
484};
485
486/* Implements CONDITIONAL_REGISTER_USAGE. We adjust the number of
487 available memregs, and select which registers need to be preserved
488 across calls based on the chip family. */
489
490void
491m32c_conditional_register_usage (void)
492{
38b2d076
DD
493 int i;
494
495 if (0 <= target_memregs && target_memregs <= 16)
496 {
497 /* The command line option is bytes, but our "registers" are
498 16-bit words. */
499 for (i = target_memregs/2; i < 8; i++)
500 {
501 fixed_regs[MEM0_REGNO + i] = 1;
502 CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
503 }
504 }
505
506 /* M32CM and M32C preserve more registers across function calls. */
507 if (TARGET_A24)
508 {
509 call_used_regs[R1_REGNO] = 0;
510 call_used_regs[R2_REGNO] = 0;
511 call_used_regs[R3_REGNO] = 0;
512 call_used_regs[A0_REGNO] = 0;
513 call_used_regs[A1_REGNO] = 0;
514 }
515}
516
517/* How Values Fit in Registers */
518
519/* Implements HARD_REGNO_NREGS. This is complicated by the fact that
520 different registers are different sizes from each other, *and* may
521 be different sizes in different chip families. */
522int
523m32c_hard_regno_nregs (int regno, enum machine_mode mode)
524{
525 if (regno == FLG_REGNO && mode == CCmode)
526 return 1;
527 if (regno >= FIRST_PSEUDO_REGISTER)
528 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
529
530 if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
531 return (GET_MODE_SIZE (mode) + 1) / 2;
532
533 if (GET_MODE_SIZE (mode) <= 1)
534 return nregs_table[regno].qi_regs;
535 if (GET_MODE_SIZE (mode) <= 2)
536 return nregs_table[regno].hi_regs;
537 if (regno == A0_REGNO && mode == PSImode && TARGET_A16)
538 return 2;
539 if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
540 return nregs_table[regno].pi_regs;
541 if (GET_MODE_SIZE (mode) <= 4)
542 return nregs_table[regno].si_regs;
543 if (GET_MODE_SIZE (mode) <= 8)
544 return nregs_table[regno].di_regs;
545 return 0;
546}
547
548/* Implements HARD_REGNO_MODE_OK. The above function does the work
549 already; just test its return value. */
550int
551m32c_hard_regno_ok (int regno, enum machine_mode mode)
552{
553 return m32c_hard_regno_nregs (regno, mode) != 0;
554}
555
556/* Implements MODES_TIEABLE_P. In general, modes aren't tieable since
557 registers are all different sizes. However, since most modes are
558 bigger than our registers anyway, it's easier to implement this
559 function that way, leaving QImode as the only unique case. */
560int
561m32c_modes_tieable_p (enum machine_mode m1, enum machine_mode m2)
562{
563 if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
564 return 1;
565
07127a0a 566#if 0
38b2d076
DD
567 if (m1 == QImode || m2 == QImode)
568 return 0;
07127a0a 569#endif
38b2d076
DD
570
571 return 1;
572}
573
574/* Register Classes */
575
576/* Implements REGNO_REG_CLASS. */
577enum machine_mode
578m32c_regno_reg_class (int regno)
579{
580 switch (regno)
581 {
582 case R0_REGNO:
583 return R0_REGS;
584 case R1_REGNO:
585 return R1_REGS;
586 case R2_REGNO:
587 return R2_REGS;
588 case R3_REGNO:
589 return R3_REGS;
590 case A0_REGNO:
591 case A1_REGNO:
592 return A_REGS;
593 case SB_REGNO:
594 return SB_REGS;
595 case FB_REGNO:
596 return FB_REGS;
597 case SP_REGNO:
598 return SP_REGS;
599 case FLG_REGNO:
600 return FLG_REGS;
601 default:
602 if (IS_MEM_REGNO (regno))
603 return MEM_REGS;
604 return ALL_REGS;
605 }
606}
607
608/* Implements REG_CLASS_FROM_CONSTRAINT. Note that some constraints only match
609 for certain chip families. */
610int
611m32c_reg_class_from_constraint (char c ATTRIBUTE_UNUSED, const char *s)
612{
613 if (memcmp (s, "Rsp", 3) == 0)
614 return SP_REGS;
615 if (memcmp (s, "Rfb", 3) == 0)
616 return FB_REGS;
617 if (memcmp (s, "Rsb", 3) == 0)
618 return SB_REGS;
07127a0a
DD
619 if (memcmp (s, "Rcr", 3) == 0)
620 return TARGET_A16 ? CR_REGS : NO_REGS;
621 if (memcmp (s, "Rcl", 3) == 0)
622 return TARGET_A24 ? CR_REGS : NO_REGS;
38b2d076
DD
623 if (memcmp (s, "R0w", 3) == 0)
624 return R0_REGS;
625 if (memcmp (s, "R1w", 3) == 0)
626 return R1_REGS;
627 if (memcmp (s, "R2w", 3) == 0)
628 return R2_REGS;
629 if (memcmp (s, "R3w", 3) == 0)
630 return R3_REGS;
631 if (memcmp (s, "R02", 3) == 0)
632 return R02_REGS;
633 if (memcmp (s, "R03", 3) == 0)
634 return R03_REGS;
635 if (memcmp (s, "Rdi", 3) == 0)
636 return DI_REGS;
637 if (memcmp (s, "Rhl", 3) == 0)
638 return HL_REGS;
639 if (memcmp (s, "R23", 3) == 0)
640 return R23_REGS;
07127a0a
DD
641 if (memcmp (s, "Ra0", 3) == 0)
642 return A0_REGS;
643 if (memcmp (s, "Ra1", 3) == 0)
644 return A1_REGS;
38b2d076
DD
645 if (memcmp (s, "Raa", 3) == 0)
646 return A_REGS;
07127a0a
DD
647 if (memcmp (s, "Raw", 3) == 0)
648 return TARGET_A16 ? A_REGS : NO_REGS;
649 if (memcmp (s, "Ral", 3) == 0)
650 return TARGET_A24 ? A_REGS : NO_REGS;
38b2d076
DD
651 if (memcmp (s, "Rqi", 3) == 0)
652 return QI_REGS;
653 if (memcmp (s, "Rad", 3) == 0)
654 return AD_REGS;
655 if (memcmp (s, "Rsi", 3) == 0)
656 return SI_REGS;
657 if (memcmp (s, "Rhi", 3) == 0)
658 return HI_REGS;
659 if (memcmp (s, "Rhc", 3) == 0)
660 return HC_REGS;
661 if (memcmp (s, "Rra", 3) == 0)
662 return RA_REGS;
663 if (memcmp (s, "Rfl", 3) == 0)
664 return FLG_REGS;
665 if (memcmp (s, "Rmm", 3) == 0)
666 {
667 if (fixed_regs[MEM0_REGNO])
668 return NO_REGS;
669 return MEM_REGS;
670 }
671
672 /* PSImode registers - i.e. whatever can hold a pointer. */
673 if (memcmp (s, "Rpi", 3) == 0)
674 {
675 if (TARGET_A16)
676 return HI_REGS;
677 else
678 return RA_REGS; /* r2r0 and r3r1 can hold pointers. */
679 }
680
681 /* We handle this one as an EXTRA_CONSTRAINT. */
682 if (memcmp (s, "Rpa", 3) == 0)
683 return NO_REGS;
684
07127a0a
DD
685 if (*s == 'R')
686 {
687 fprintf(stderr, "unrecognized R constraint: %.3s\n", s);
688 gcc_unreachable();
689 }
690
38b2d076
DD
691 return NO_REGS;
692}
693
694/* Implements REGNO_OK_FOR_BASE_P. */
695int
696m32c_regno_ok_for_base_p (int regno)
697{
698 if (regno == A0_REGNO
699 || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
700 return 1;
701 return 0;
702}
703
704#define DEBUG_RELOAD 0
705
706/* Implements PREFERRED_RELOAD_CLASS. In general, prefer general
707 registers of the appropriate size. */
708int
709m32c_preferred_reload_class (rtx x, int rclass)
710{
711 int newclass = rclass;
712
713#if DEBUG_RELOAD
714 fprintf (stderr, "\npreferred_reload_class for %s is ",
715 class_names[rclass]);
716#endif
717 if (rclass == NO_REGS)
718 rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
719
720 if (classes_intersect (rclass, CR_REGS))
721 {
722 switch (GET_MODE (x))
723 {
724 case QImode:
725 newclass = HL_REGS;
726 break;
727 default:
728 /* newclass = HI_REGS; */
729 break;
730 }
731 }
732
733 else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
734 newclass = SI_REGS;
735 else if (GET_MODE_SIZE (GET_MODE (x)) > 4
736 && ~class_contents[rclass][0] & 0x000f)
737 newclass = DI_REGS;
738
739 rclass = reduce_class (rclass, newclass, rclass);
740
741 if (GET_MODE (x) == QImode)
742 rclass = reduce_class (rclass, HL_REGS, rclass);
743
744#if DEBUG_RELOAD
745 fprintf (stderr, "%s\n", class_names[rclass]);
746 debug_rtx (x);
747
748 if (GET_CODE (x) == MEM
749 && GET_CODE (XEXP (x, 0)) == PLUS
750 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
751 fprintf (stderr, "Glorm!\n");
752#endif
753 return rclass;
754}
755
756/* Implements PREFERRED_OUTPUT_RELOAD_CLASS. */
757int
758m32c_preferred_output_reload_class (rtx x, int rclass)
759{
760 return m32c_preferred_reload_class (x, rclass);
761}
762
763/* Implements LIMIT_RELOAD_CLASS. We basically want to avoid using
764 address registers for reloads since they're needed for address
765 reloads. */
766int
767m32c_limit_reload_class (enum machine_mode mode, int rclass)
768{
769#if DEBUG_RELOAD
770 fprintf (stderr, "limit_reload_class for %s: %s ->",
771 mode_name[mode], class_names[rclass]);
772#endif
773
774 if (mode == QImode)
775 rclass = reduce_class (rclass, HL_REGS, rclass);
776 else if (mode == HImode)
777 rclass = reduce_class (rclass, HI_REGS, rclass);
778 else if (mode == SImode)
779 rclass = reduce_class (rclass, SI_REGS, rclass);
780
781 if (rclass != A_REGS)
782 rclass = reduce_class (rclass, DI_REGS, rclass);
783
784#if DEBUG_RELOAD
785 fprintf (stderr, " %s\n", class_names[rclass]);
786#endif
787 return rclass;
788}
789
790/* Implements SECONDARY_RELOAD_CLASS. QImode have to be reloaded in
791 r0 or r1, as those are the only real QImode registers. CR regs get
792 reloaded through appropriately sized general or address
793 registers. */
794int
795m32c_secondary_reload_class (int rclass, enum machine_mode mode, rtx x)
796{
797 int cc = class_contents[rclass][0];
798#if DEBUG0
799 fprintf (stderr, "\nsecondary reload class %s %s\n",
800 class_names[rclass], mode_name[mode]);
801 debug_rtx (x);
802#endif
803 if (mode == QImode
804 && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
805 return QI_REGS;
806 if (classes_intersect (rclass, CR_REGS)
807 && GET_CODE (x) == REG
808 && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
809 return TARGET_A16 ? HI_REGS : A_REGS;
810 return NO_REGS;
811}
812
813/* Implements CLASS_LIKELY_SPILLED_P. A_REGS is needed for address
814 reloads. */
815int
816m32c_class_likely_spilled_p (int regclass)
817{
818 if (regclass == A_REGS)
819 return 1;
820 return reg_class_size[regclass] == 1;
821}
822
823/* Implements CLASS_MAX_NREGS. We calculate this according to its
824 documented meaning, to avoid potential inconsistencies with actual
825 class definitions. */
826int
827m32c_class_max_nregs (int regclass, enum machine_mode mode)
828{
829 int rn, max = 0;
830
831 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
832 if (class_contents[regclass][0] & (1 << rn))
833 {
834 int n = m32c_hard_regno_nregs (rn, mode);
835 if (max < n)
836 max = n;
837 }
838 return max;
839}
840
841/* Implements CANNOT_CHANGE_MODE_CLASS. Only r0 and r1 can change to
842 QI (r0l, r1l) because the chip doesn't support QI ops on other
843 registers (well, it does on a0/a1 but if we let gcc do that, reload
844 suffers). Otherwise, we allow changes to larger modes. */
845int
846m32c_cannot_change_mode_class (enum machine_mode from,
847 enum machine_mode to, int rclass)
848{
db9c8397 849 int rn;
38b2d076
DD
850#if DEBUG0
851 fprintf (stderr, "cannot change from %s to %s in %s\n",
852 mode_name[from], mode_name[to], class_names[rclass]);
853#endif
854
db9c8397
DD
855 /* If the larger mode isn't allowed in any of these registers, we
856 can't allow the change. */
857 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
858 if (class_contents[rclass][0] & (1 << rn))
859 if (! m32c_hard_regno_ok (rn, to))
860 return 1;
861
38b2d076
DD
862 if (to == QImode)
863 return (class_contents[rclass][0] & 0x1ffa);
864
865 if (class_contents[rclass][0] & 0x0005 /* r0, r1 */
866 && GET_MODE_SIZE (from) > 1)
867 return 0;
868 if (GET_MODE_SIZE (from) > 2) /* all other regs */
869 return 0;
870
871 return 1;
872}
873
874/* Helpers for the rest of the file. */
875/* TRUE if the rtx is a REG rtx for the given register. */
876#define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
877 && REGNO (rtx) == regno)
878/* TRUE if the rtx is a pseudo - specifically, one we can use as a
879 base register in address calculations (hence the "strict"
880 argument). */
881#define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
882 && (REGNO (rtx) == AP_REGNO \
883 || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
884
885/* Implements CONST_OK_FOR_CONSTRAINT_P. Currently, all constant
886 constraints start with 'I', with the next two characters indicating
887 the type and size of the range allowed. */
888int
889m32c_const_ok_for_constraint_p (HOST_WIDE_INT value,
890 char c ATTRIBUTE_UNUSED, const char *str)
891{
892 /* s=signed u=unsigned n=nonzero m=minus l=log2able,
893 [sun] bits [SUN] bytes, p=pointer size
894 I[-0-9][0-9] matches that number */
895 if (memcmp (str, "Is3", 3) == 0)
896 {
897 return (-8 <= value && value <= 7);
898 }
899 if (memcmp (str, "IS1", 3) == 0)
900 {
901 return (-128 <= value && value <= 127);
902 }
903 if (memcmp (str, "IS2", 3) == 0)
904 {
905 return (-32768 <= value && value <= 32767);
906 }
907 if (memcmp (str, "IU2", 3) == 0)
908 {
909 return (0 <= value && value <= 65535);
910 }
911 if (memcmp (str, "IU3", 3) == 0)
912 {
913 return (0 <= value && value <= 0x00ffffff);
914 }
915 if (memcmp (str, "In4", 3) == 0)
916 {
917 return (-8 <= value && value && value <= 8);
918 }
919 if (memcmp (str, "In5", 3) == 0)
920 {
921 return (-16 <= value && value && value <= 16);
922 }
23fed240
DD
923 if (memcmp (str, "In6", 3) == 0)
924 {
925 return (-32 <= value && value && value <= 32);
926 }
38b2d076
DD
927 if (memcmp (str, "IM2", 3) == 0)
928 {
929 return (-65536 <= value && value && value <= -1);
930 }
931 if (memcmp (str, "Ilb", 3) == 0)
932 {
933 int b = exact_log2 (value);
8e4edce7 934 return (b >= 0 && b <= 7);
38b2d076 935 }
07127a0a
DD
936 if (memcmp (str, "Imb", 3) == 0)
937 {
938 int b = exact_log2 ((value ^ 0xff) & 0xff);
8e4edce7 939 return (b >= 0 && b <= 7);
07127a0a 940 }
38b2d076
DD
941 if (memcmp (str, "Ilw", 3) == 0)
942 {
943 int b = exact_log2 (value);
8e4edce7 944 return (b >= 0 && b <= 15);
38b2d076 945 }
07127a0a
DD
946 if (memcmp (str, "Imw", 3) == 0)
947 {
948 int b = exact_log2 ((value ^ 0xffff) & 0xffff);
8e4edce7 949 return (b >= 0 && b <= 15);
07127a0a
DD
950 }
951 if (memcmp (str, "I00", 3) == 0)
952 {
953 return (value == 0);
954 }
38b2d076
DD
955 return 0;
956}
957
958/* Implements EXTRA_CONSTRAINT_STR (see next function too). 'S' is
959 for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
960 call return values. */
961int
962m32c_extra_constraint_p2 (rtx value, char c ATTRIBUTE_UNUSED, const char *str)
963{
964 encode_pattern (value);
965 if (memcmp (str, "Sd", 2) == 0)
966 {
967 /* This is the common "src/dest" address */
968 rtx r;
969 if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
970 return 1;
971 if (RTX_IS ("ms") || RTX_IS ("m+si"))
972 return 1;
07127a0a
DD
973 if (RTX_IS ("m++rii"))
974 {
975 if (REGNO (patternr[3]) == FB_REGNO
976 && INTVAL (patternr[4]) == 0)
977 return 1;
978 }
38b2d076
DD
979 if (RTX_IS ("mr"))
980 r = patternr[1];
981 else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
982 r = patternr[2];
983 else
984 return 0;
985 if (REGNO (r) == SP_REGNO)
986 return 0;
987 return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
988 }
989 else if (memcmp (str, "Sa", 2) == 0)
990 {
991 rtx r;
992 if (RTX_IS ("mr"))
993 r = patternr[1];
994 else if (RTX_IS ("m+ri"))
995 r = patternr[2];
996 else
997 return 0;
998 return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
999 }
1000 else if (memcmp (str, "Si", 2) == 0)
1001 {
1002 return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
1003 }
1004 else if (memcmp (str, "Ss", 2) == 0)
1005 {
1006 return ((RTX_IS ("mr")
1007 && (IS_REG (patternr[1], SP_REGNO)))
1008 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
1009 }
1010 else if (memcmp (str, "Sf", 2) == 0)
1011 {
1012 return ((RTX_IS ("mr")
1013 && (IS_REG (patternr[1], FB_REGNO)))
1014 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
1015 }
1016 else if (memcmp (str, "Sb", 2) == 0)
1017 {
1018 return ((RTX_IS ("mr")
1019 && (IS_REG (patternr[1], SB_REGNO)))
1020 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
1021 }
07127a0a
DD
1022 else if (memcmp (str, "Sp", 2) == 0)
1023 {
1024 /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
1025 return (RTX_IS ("mi")
1026 && !(INTVAL (patternr[1]) & ~0x1fff));
1027 }
38b2d076
DD
1028 else if (memcmp (str, "S1", 2) == 0)
1029 {
1030 return r1h_operand (value, QImode);
1031 }
1032
1033 gcc_assert (str[0] != 'S');
1034
1035 if (memcmp (str, "Rpa", 2) == 0)
1036 return GET_CODE (value) == PARALLEL;
1037
1038 return 0;
1039}
1040
1041/* This is for when we're debugging the above. */
1042int
1043m32c_extra_constraint_p (rtx value, char c, const char *str)
1044{
1045 int rv = m32c_extra_constraint_p2 (value, c, str);
1046#if DEBUG0
1047 fprintf (stderr, "\nconstraint %.*s: %d\n", CONSTRAINT_LEN (c, str), str,
1048 rv);
1049 debug_rtx (value);
1050#endif
1051 return rv;
1052}
1053
1054/* Implements EXTRA_MEMORY_CONSTRAINT. Currently, we only use strings
1055 starting with 'S'. */
1056int
1057m32c_extra_memory_constraint (char c, const char *str ATTRIBUTE_UNUSED)
1058{
1059 return c == 'S';
1060}
1061
1062/* Implements EXTRA_ADDRESS_CONSTRAINT. We reserve 'A' strings for these,
1063 but don't currently define any. */
1064int
1065m32c_extra_address_constraint (char c, const char *str ATTRIBUTE_UNUSED)
1066{
1067 return c == 'A';
1068}
1069
1070/* STACK AND CALLING */
1071
1072/* Frame Layout */
1073
1074/* Implements RETURN_ADDR_RTX. Note that R8C and M16C push 24 bits
1075 (yes, THREE bytes) onto the stack for the return address, but we
1076 don't support pointers bigger than 16 bits on those chips. This
1077 will likely wreak havoc with exception unwinding. FIXME. */
1078rtx
1079m32c_return_addr_rtx (int count)
1080{
1081 enum machine_mode mode;
1082 int offset;
1083 rtx ra_mem;
1084
1085 if (count)
1086 return NULL_RTX;
1087 /* we want 2[$fb] */
1088
1089 if (TARGET_A24)
1090 {
1091 mode = SImode;
1092 offset = 4;
1093 }
1094 else
1095 {
1096 /* FIXME: it's really 3 bytes */
1097 mode = HImode;
1098 offset = 2;
1099 }
1100
1101 ra_mem =
1102 gen_rtx_MEM (mode, plus_constant (gen_rtx_REG (Pmode, FP_REGNO), offset));
1103 return copy_to_mode_reg (mode, ra_mem);
1104}
1105
1106/* Implements INCOMING_RETURN_ADDR_RTX. See comment above. */
1107rtx
1108m32c_incoming_return_addr_rtx (void)
1109{
1110 /* we want [sp] */
1111 return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
1112}
1113
1114/* Exception Handling Support */
1115
1116/* Implements EH_RETURN_DATA_REGNO. Choose registers able to hold
1117 pointers. */
1118int
1119m32c_eh_return_data_regno (int n)
1120{
1121 switch (n)
1122 {
1123 case 0:
1124 return A0_REGNO;
1125 case 1:
1126 return A1_REGNO;
1127 default:
1128 return INVALID_REGNUM;
1129 }
1130}
1131
1132/* Implements EH_RETURN_STACKADJ_RTX. Saved and used later in
1133 m32c_emit_eh_epilogue. */
1134rtx
1135m32c_eh_return_stackadj_rtx (void)
1136{
1137 if (!cfun->machine->eh_stack_adjust)
1138 {
1139 rtx sa;
1140
1141 sa = gen_reg_rtx (Pmode);
1142 cfun->machine->eh_stack_adjust = sa;
1143 }
1144 return cfun->machine->eh_stack_adjust;
1145}
1146
1147/* Registers That Address the Stack Frame */
1148
1149/* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER. Note that
1150 the original spec called for dwarf numbers to vary with register
1151 width as well, for example, r0l, r0, and r2r0 would each have
1152 different dwarf numbers. GCC doesn't support this, and we don't do
1153 it, and gdb seems to like it this way anyway. */
1154unsigned int
1155m32c_dwarf_frame_regnum (int n)
1156{
1157 switch (n)
1158 {
1159 case R0_REGNO:
1160 return 5;
1161 case R1_REGNO:
1162 return 6;
1163 case R2_REGNO:
1164 return 7;
1165 case R3_REGNO:
1166 return 8;
1167 case A0_REGNO:
1168 return 9;
1169 case A1_REGNO:
1170 return 10;
1171 case FB_REGNO:
1172 return 11;
1173 case SB_REGNO:
1174 return 19;
1175
1176 case SP_REGNO:
1177 return 12;
1178 case PC_REGNO:
1179 return 13;
1180 default:
1181 return DWARF_FRAME_REGISTERS + 1;
1182 }
1183}
1184
1185/* The frame looks like this:
1186
1187 ap -> +------------------------------
1188 | Return address (3 or 4 bytes)
1189 | Saved FB (2 or 4 bytes)
1190 fb -> +------------------------------
1191 | local vars
1192 | register saves fb
1193 | through r0 as needed
1194 sp -> +------------------------------
1195*/
1196
1197/* We use this to wrap all emitted insns in the prologue. */
1198static rtx
1199F (rtx x)
1200{
1201 RTX_FRAME_RELATED_P (x) = 1;
1202 return x;
1203}
1204
1205/* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1206 how much the stack pointer moves for each, for each cpu family. */
1207static struct
1208{
1209 int reg1;
1210 int bit;
1211 int a16_bytes;
1212 int a24_bytes;
1213} pushm_info[] =
1214{
9d746d5e
DD
1215 /* These are in reverse push (nearest-to-sp) order. */
1216 { R0_REGNO, 0x80, 2, 2 },
38b2d076 1217 { R1_REGNO, 0x40, 2, 2 },
9d746d5e
DD
1218 { R2_REGNO, 0x20, 2, 2 },
1219 { R3_REGNO, 0x10, 2, 2 },
1220 { A0_REGNO, 0x08, 2, 4 },
1221 { A1_REGNO, 0x04, 2, 4 },
1222 { SB_REGNO, 0x02, 2, 4 },
1223 { FB_REGNO, 0x01, 2, 4 }
38b2d076
DD
1224};
1225
1226#define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1227
1228/* Returns TRUE if we need to save/restore the given register. We
1229 save everything for exception handlers, so that any register can be
1230 unwound. For interrupt handlers, we save everything if the handler
1231 calls something else (because we don't know what *that* function
1232 might do), but try to be a bit smarter if the handler is a leaf
1233 function. We always save $a0, though, because we use that in the
1234 epilog to copy $fb to $sp. */
1235static int
1236need_to_save (int regno)
1237{
1238 if (fixed_regs[regno])
1239 return 0;
1240 if (cfun->calls_eh_return)
1241 return 1;
1242 if (regno == FP_REGNO)
1243 return 0;
1244 if (cfun->machine->is_interrupt
1245 && (!cfun->machine->is_leaf || regno == A0_REGNO))
1246 return 1;
1247 if (regs_ever_live[regno]
1248 && (!call_used_regs[regno] || cfun->machine->is_interrupt))
1249 return 1;
1250 return 0;
1251}
1252
1253/* This function contains all the intelligence about saving and
1254 restoring registers. It always figures out the register save set.
1255 When called with PP_justcount, it merely returns the size of the
1256 save set (for eliminating the frame pointer, for example). When
1257 called with PP_pushm or PP_popm, it emits the appropriate
1258 instructions for saving (pushm) or restoring (popm) the
1259 registers. */
1260static int
1261m32c_pushm_popm (Push_Pop_Type ppt)
1262{
1263 int reg_mask = 0;
1264 int byte_count = 0, bytes;
1265 int i;
1266 rtx dwarf_set[PUSHM_N];
1267 int n_dwarfs = 0;
1268 int nosave_mask = 0;
1269
1270 if (cfun->return_rtx
1271 && GET_CODE (cfun->return_rtx) == PARALLEL
1272 && !(cfun->calls_eh_return || cfun->machine->is_interrupt))
1273 {
1274 rtx exp = XVECEXP (cfun->return_rtx, 0, 0);
1275 rtx rv = XEXP (exp, 0);
1276 int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1277
1278 if (rv_bytes > 2)
1279 nosave_mask |= 0x20; /* PSI, SI */
1280 else
1281 nosave_mask |= 0xf0; /* DF */
1282 if (rv_bytes > 4)
1283 nosave_mask |= 0x50; /* DI */
1284 }
1285
1286 for (i = 0; i < (int) PUSHM_N; i++)
1287 {
1288 /* Skip if neither register needs saving. */
1289 if (!need_to_save (pushm_info[i].reg1))
1290 continue;
1291
1292 if (pushm_info[i].bit & nosave_mask)
1293 continue;
1294
1295 reg_mask |= pushm_info[i].bit;
1296 bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1297
1298 if (ppt == PP_pushm)
1299 {
1300 enum machine_mode mode = (bytes == 2) ? HImode : SImode;
1301 rtx addr;
1302
1303 /* Always use stack_pointer_rtx instead of calling
1304 rtx_gen_REG ourselves. Code elsewhere in GCC assumes
1305 that there is a single rtx representing the stack pointer,
1306 namely stack_pointer_rtx, and uses == to recognize it. */
1307 addr = stack_pointer_rtx;
1308
1309 if (byte_count != 0)
1310 addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1311
1312 dwarf_set[n_dwarfs++] =
1313 gen_rtx_SET (VOIDmode,
1314 gen_rtx_MEM (mode, addr),
1315 gen_rtx_REG (mode, pushm_info[i].reg1));
1316 F (dwarf_set[n_dwarfs - 1]);
1317
1318 }
1319 byte_count += bytes;
1320 }
1321
1322 if (cfun->machine->is_interrupt)
1323 {
1324 cfun->machine->intr_pushm = reg_mask & 0xfe;
1325 reg_mask = 0;
1326 byte_count = 0;
1327 }
1328
1329 if (cfun->machine->is_interrupt)
1330 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1331 if (need_to_save (i))
1332 {
1333 byte_count += 2;
1334 cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1335 }
1336
1337 if (ppt == PP_pushm && byte_count)
1338 {
1339 rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1340 rtx pushm;
1341
1342 if (reg_mask)
1343 {
1344 XVECEXP (note, 0, 0)
1345 = gen_rtx_SET (VOIDmode,
1346 stack_pointer_rtx,
1347 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1348 stack_pointer_rtx,
1349 GEN_INT (-byte_count)));
1350 F (XVECEXP (note, 0, 0));
1351
1352 for (i = 0; i < n_dwarfs; i++)
1353 XVECEXP (note, 0, i + 1) = dwarf_set[i];
1354
1355 pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1356
1357 REG_NOTES (pushm) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, note,
1358 REG_NOTES (pushm));
1359 }
1360
1361 if (cfun->machine->is_interrupt)
1362 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1363 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1364 {
1365 if (TARGET_A16)
1366 pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1367 else
1368 pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1369 F (pushm);
1370 }
1371 }
1372 if (ppt == PP_popm && byte_count)
1373 {
38b2d076
DD
1374 if (cfun->machine->is_interrupt)
1375 for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1376 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1377 {
1378 if (TARGET_A16)
b3fdec9e 1379 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
38b2d076 1380 else
b3fdec9e 1381 emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
38b2d076
DD
1382 }
1383 if (reg_mask)
1384 emit_insn (gen_popm (GEN_INT (reg_mask)));
1385 }
1386
1387 return byte_count;
1388}
1389
1390/* Implements INITIAL_ELIMINATION_OFFSET. See the comment above that
1391 diagrams our call frame. */
1392int
1393m32c_initial_elimination_offset (int from, int to)
1394{
1395 int ofs = 0;
1396
1397 if (from == AP_REGNO)
1398 {
1399 if (TARGET_A16)
1400 ofs += 5;
1401 else
1402 ofs += 8;
1403 }
1404
1405 if (to == SP_REGNO)
1406 {
1407 ofs += m32c_pushm_popm (PP_justcount);
1408 ofs += get_frame_size ();
1409 }
1410
1411 /* Account for push rounding. */
1412 if (TARGET_A24)
1413 ofs = (ofs + 1) & ~1;
1414#if DEBUG0
1415 fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1416 to, ofs);
1417#endif
1418 return ofs;
1419}
1420
1421/* Passing Function Arguments on the Stack */
1422
1423#undef TARGET_PROMOTE_PROTOTYPES
1424#define TARGET_PROMOTE_PROTOTYPES m32c_promote_prototypes
1425static bool
1426m32c_promote_prototypes (tree fntype ATTRIBUTE_UNUSED)
1427{
1428 return 0;
1429}
1430
1431/* Implements PUSH_ROUNDING. The R8C and M16C have byte stacks, the
1432 M32C has word stacks. */
1433int
1434m32c_push_rounding (int n)
1435{
1436 if (TARGET_R8C || TARGET_M16C)
1437 return n;
1438 return (n + 1) & ~1;
1439}
1440
1441/* Passing Arguments in Registers */
1442
1443/* Implements FUNCTION_ARG. Arguments are passed partly in registers,
1444 partly on stack. If our function returns a struct, a pointer to a
1445 buffer for it is at the top of the stack (last thing pushed). The
1446 first few real arguments may be in registers as follows:
1447
1448 R8C/M16C: arg1 in r1 if it's QI or HI (else it's pushed on stack)
1449 arg2 in r2 if it's HI (else pushed on stack)
1450 rest on stack
1451 M32C: arg1 in r0 if it's QI or HI (else it's pushed on stack)
1452 rest on stack
1453
1454 Structs are not passed in registers, even if they fit. Only
1455 integer and pointer types are passed in registers.
1456
1457 Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1458 r2 if it fits. */
1459rtx
1460m32c_function_arg (CUMULATIVE_ARGS * ca,
1461 enum machine_mode mode, tree type, int named)
1462{
1463 /* Can return a reg, parallel, or 0 for stack */
1464 rtx rv = NULL_RTX;
1465#if DEBUG0
1466 fprintf (stderr, "func_arg %d (%s, %d)\n",
1467 ca->parm_num, mode_name[mode], named);
1468 debug_tree (type);
1469#endif
1470
1471 if (mode == VOIDmode)
1472 return GEN_INT (0);
1473
1474 if (ca->force_mem || !named)
1475 {
1476#if DEBUG0
1477 fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
1478 named);
1479#endif
1480 return NULL_RTX;
1481 }
1482
1483 if (type && INTEGRAL_TYPE_P (type) && POINTER_TYPE_P (type))
1484 return NULL_RTX;
1485
9d746d5e
DD
1486 if (type && AGGREGATE_TYPE_P (type))
1487 return NULL_RTX;
1488
38b2d076
DD
1489 switch (ca->parm_num)
1490 {
1491 case 1:
1492 if (GET_MODE_SIZE (mode) == 1 || GET_MODE_SIZE (mode) == 2)
1493 rv = gen_rtx_REG (mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
1494 break;
1495
1496 case 2:
1497 if (TARGET_A16 && GET_MODE_SIZE (mode) == 2)
1498 rv = gen_rtx_REG (mode, R2_REGNO);
1499 break;
1500 }
1501
1502#if DEBUG0
1503 debug_rtx (rv);
1504#endif
1505 return rv;
1506}
1507
1508#undef TARGET_PASS_BY_REFERENCE
1509#define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1510static bool
1511m32c_pass_by_reference (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED,
1512 enum machine_mode mode ATTRIBUTE_UNUSED,
1513 tree type ATTRIBUTE_UNUSED,
1514 bool named ATTRIBUTE_UNUSED)
1515{
1516 return 0;
1517}
1518
1519/* Implements INIT_CUMULATIVE_ARGS. */
1520void
1521m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
9d746d5e 1522 tree fntype,
38b2d076 1523 rtx libname ATTRIBUTE_UNUSED,
9d746d5e 1524 tree fndecl,
38b2d076
DD
1525 int n_named_args ATTRIBUTE_UNUSED)
1526{
9d746d5e
DD
1527 if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1528 ca->force_mem = 1;
1529 else
1530 ca->force_mem = 0;
38b2d076
DD
1531 ca->parm_num = 1;
1532}
1533
1534/* Implements FUNCTION_ARG_ADVANCE. force_mem is set for functions
1535 returning structures, so we always reset that. Otherwise, we only
1536 need to know the sequence number of the argument to know what to do
1537 with it. */
1538void
1539m32c_function_arg_advance (CUMULATIVE_ARGS * ca,
1540 enum machine_mode mode ATTRIBUTE_UNUSED,
1541 tree type ATTRIBUTE_UNUSED,
1542 int named ATTRIBUTE_UNUSED)
1543{
1544 if (ca->force_mem)
1545 ca->force_mem = 0;
9d746d5e
DD
1546 else
1547 ca->parm_num++;
38b2d076
DD
1548}
1549
1550/* Implements FUNCTION_ARG_REGNO_P. */
1551int
1552m32c_function_arg_regno_p (int r)
1553{
1554 if (TARGET_A24)
1555 return (r == R0_REGNO);
1556 return (r == R1_REGNO || r == R2_REGNO);
1557}
1558
e9555b13
DD
1559/* HImode and PSImode are the two "native" modes as far as GCC is
1560 concerned, but the chips also support a 32 bit mode which is used
1561 for some opcodes in R8C/M16C and for reset vectors and such. */
1562#undef TARGET_VALID_POINTER_MODE
1563#define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
23fed240 1564static bool
e9555b13
DD
1565m32c_valid_pointer_mode (enum machine_mode mode)
1566{
e9555b13
DD
1567 if (mode == HImode
1568 || mode == PSImode
1569 || mode == SImode
1570 )
1571 return 1;
1572 return 0;
1573}
1574
38b2d076
DD
1575/* How Scalar Function Values Are Returned */
1576
1577/* Implements LIBCALL_VALUE. Most values are returned in $r0, or some
1578 combination of registers starting there (r2r0 for longs, r3r1r2r0
1579 for long long, r3r2r1r0 for doubles), except that that ABI
1580 currently doesn't work because it ends up using all available
1581 general registers and gcc often can't compile it. So, instead, we
1582 return anything bigger than 16 bits in "mem0" (effectively, a
1583 memory location). */
1584rtx
1585m32c_libcall_value (enum machine_mode mode)
1586{
1587 /* return reg or parallel */
1588#if 0
1589 /* FIXME: GCC has difficulty returning large values in registers,
1590 because that ties up most of the general registers and gives the
1591 register allocator little to work with. Until we can resolve
1592 this, large values are returned in memory. */
1593 if (mode == DFmode)
1594 {
1595 rtx rv;
1596
1597 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1598 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1599 gen_rtx_REG (HImode,
1600 R0_REGNO),
1601 GEN_INT (0));
1602 XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1603 gen_rtx_REG (HImode,
1604 R1_REGNO),
1605 GEN_INT (2));
1606 XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1607 gen_rtx_REG (HImode,
1608 R2_REGNO),
1609 GEN_INT (4));
1610 XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1611 gen_rtx_REG (HImode,
1612 R3_REGNO),
1613 GEN_INT (6));
1614 return rv;
1615 }
1616
1617 if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1618 {
1619 rtx rv;
1620
1621 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1622 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1623 gen_rtx_REG (mode,
1624 R0_REGNO),
1625 GEN_INT (0));
1626 return rv;
1627 }
1628#endif
1629
1630 if (GET_MODE_SIZE (mode) > 2)
1631 return gen_rtx_REG (mode, MEM0_REGNO);
1632 return gen_rtx_REG (mode, R0_REGNO);
1633}
1634
1635/* Implements FUNCTION_VALUE. Functions and libcalls have the same
1636 conventions. */
1637rtx
1638m32c_function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
1639{
1640 /* return reg or parallel */
1641 enum machine_mode mode = TYPE_MODE (valtype);
1642 return m32c_libcall_value (mode);
1643}
1644
1645/* How Large Values Are Returned */
1646
1647/* We return structures by pushing the address on the stack, even if
1648 we use registers for the first few "real" arguments. */
1649#undef TARGET_STRUCT_VALUE_RTX
1650#define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1651static rtx
1652m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1653 int incoming ATTRIBUTE_UNUSED)
1654{
1655 return 0;
1656}
1657
1658/* Function Entry and Exit */
1659
1660/* Implements EPILOGUE_USES. Interrupts restore all registers. */
1661int
1662m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1663{
1664 if (cfun->machine->is_interrupt)
1665 return 1;
1666 return 0;
1667}
1668
1669/* Implementing the Varargs Macros */
1670
1671#undef TARGET_STRICT_ARGUMENT_NAMING
1672#define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1673static bool
1674m32c_strict_argument_naming (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED)
1675{
1676 return 1;
1677}
1678
1679/* Trampolines for Nested Functions */
1680
1681/*
1682 m16c:
1683 1 0000 75C43412 mov.w #0x1234,a0
1684 2 0004 FC000000 jmp.a label
1685
1686 m32c:
1687 1 0000 BC563412 mov.l:s #0x123456,a0
1688 2 0004 CC000000 jmp.a label
1689*/
1690
1691/* Implements TRAMPOLINE_SIZE. */
1692int
1693m32c_trampoline_size (void)
1694{
1695 /* Allocate extra space so we can avoid the messy shifts when we
1696 initialize the trampoline; we just write past the end of the
1697 opcode. */
1698 return TARGET_A16 ? 8 : 10;
1699}
1700
1701/* Implements TRAMPOLINE_ALIGNMENT. */
1702int
1703m32c_trampoline_alignment (void)
1704{
1705 return 2;
1706}
1707
1708/* Implements INITIALIZE_TRAMPOLINE. */
1709void
1710m32c_initialize_trampoline (rtx tramp, rtx function, rtx chainval)
1711{
1712#define A0(m,i) gen_rtx_MEM (m, plus_constant (tramp, i))
1713 if (TARGET_A16)
1714 {
1715 /* Note: we subtract a "word" because the moves want signed
1716 constants, not unsigned constants. */
1717 emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1718 emit_move_insn (A0 (HImode, 2), chainval);
1719 emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
1720 /* We use 16 bit addresses here, but store the zero to turn it
1721 into a 24 bit offset. */
1722 emit_move_insn (A0 (HImode, 5), function);
1723 emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1724 }
1725 else
1726 {
1727 /* Note that the PSI moves actually write 4 bytes. Make sure we
1728 write stuff out in the right order, and leave room for the
1729 extra byte at the end. */
1730 emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1731 emit_move_insn (A0 (PSImode, 1), chainval);
1732 emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1733 emit_move_insn (A0 (PSImode, 5), function);
1734 }
1735#undef A0
1736}
1737
07127a0a
DD
1738/* Implicit Calls to Library Routines */
1739
1740#undef TARGET_INIT_LIBFUNCS
1741#define TARGET_INIT_LIBFUNCS m32c_init_libfuncs
1742static void
1743m32c_init_libfuncs (void)
1744{
1745 if (TARGET_A24)
1746 {
1747 /* We do this because the M32C has an HImode operand, but the
1748 M16C has an 8 bit operand. Since gcc looks at the match data
1749 and not the expanded rtl, we have to reset the array so that
1750 the right modes are found. */
1751 setcc_gen_code[EQ] = CODE_FOR_seq_24;
1752 setcc_gen_code[NE] = CODE_FOR_sne_24;
1753 setcc_gen_code[GT] = CODE_FOR_sgt_24;
1754 setcc_gen_code[GE] = CODE_FOR_sge_24;
1755 setcc_gen_code[LT] = CODE_FOR_slt_24;
1756 setcc_gen_code[LE] = CODE_FOR_sle_24;
1757 setcc_gen_code[GTU] = CODE_FOR_sgtu_24;
1758 setcc_gen_code[GEU] = CODE_FOR_sgeu_24;
1759 setcc_gen_code[LTU] = CODE_FOR_sltu_24;
1760 setcc_gen_code[LEU] = CODE_FOR_sleu_24;
1761 }
1762}
1763
38b2d076
DD
1764/* Addressing Modes */
1765
1766/* Used by GO_IF_LEGITIMATE_ADDRESS. The r8c/m32c family supports a
1767 wide range of non-orthogonal addressing modes, including the
1768 ability to double-indirect on *some* of them. Not all insns
1769 support all modes, either, but we rely on predicates and
1770 constraints to deal with that. */
1771int
1772m32c_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
1773{
1774 int mode_adjust;
1775 if (CONSTANT_P (x))
1776 return 1;
1777
1778 /* Wide references to memory will be split after reload, so we must
1779 ensure that all parts of such splits remain legitimate
1780 addresses. */
1781 mode_adjust = GET_MODE_SIZE (mode) - 1;
1782
1783 /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1784 if (GET_CODE (x) == PRE_DEC
1785 || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1786 {
1787 return (GET_CODE (XEXP (x, 0)) == REG
1788 && REGNO (XEXP (x, 0)) == SP_REGNO);
1789 }
1790
1791#if 0
1792 /* This is the double indirection detection, but it currently
1793 doesn't work as cleanly as this code implies, so until we've had
1794 a chance to debug it, leave it disabled. */
1795 if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1796 {
1797#if DEBUG_DOUBLE
1798 fprintf (stderr, "double indirect\n");
1799#endif
1800 x = XEXP (x, 0);
1801 }
1802#endif
1803
1804 encode_pattern (x);
1805 if (RTX_IS ("r"))
1806 {
1807 /* Most indexable registers can be used without displacements,
1808 although some of them will be emitted with an explicit zero
1809 to please the assembler. */
1810 switch (REGNO (patternr[0]))
1811 {
1812 case A0_REGNO:
1813 case A1_REGNO:
1814 case SB_REGNO:
1815 case FB_REGNO:
1816 case SP_REGNO:
1817 return 1;
1818
1819 default:
1820 if (IS_PSEUDO (patternr[0], strict))
1821 return 1;
1822 return 0;
1823 }
1824 }
1825 if (RTX_IS ("+ri"))
1826 {
1827 /* This is more interesting, because different base registers
1828 allow for different displacements - both range and signedness
1829 - and it differs from chip series to chip series too. */
1830 int rn = REGNO (patternr[1]);
1831 HOST_WIDE_INT offs = INTVAL (patternr[2]);
1832 switch (rn)
1833 {
1834 case A0_REGNO:
1835 case A1_REGNO:
1836 case SB_REGNO:
1837 /* The syntax only allows positive offsets, but when the
1838 offsets span the entire memory range, we can simulate
1839 negative offsets by wrapping. */
1840 if (TARGET_A16)
1841 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1842 if (rn == SB_REGNO)
1843 return (offs >= 0 && offs <= 65535 - mode_adjust);
1844 /* A0 or A1 */
1845 return (offs >= -16777216 && offs <= 16777215);
1846
1847 case FB_REGNO:
1848 if (TARGET_A16)
1849 return (offs >= -128 && offs <= 127 - mode_adjust);
1850 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1851
1852 case SP_REGNO:
1853 return (offs >= -128 && offs <= 127 - mode_adjust);
1854
1855 default:
1856 if (IS_PSEUDO (patternr[1], strict))
1857 return 1;
1858 return 0;
1859 }
1860 }
1861 if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1862 {
1863 rtx reg = patternr[1];
1864
1865 /* We don't know where the symbol is, so only allow base
1866 registers which support displacements spanning the whole
1867 address range. */
1868 switch (REGNO (reg))
1869 {
1870 case A0_REGNO:
1871 case A1_REGNO:
1872 /* $sb needs a secondary reload, but since it's involved in
1873 memory address reloads too, we don't deal with it very
1874 well. */
1875 /* case SB_REGNO: */
1876 return 1;
1877 default:
1878 if (IS_PSEUDO (reg, strict))
1879 return 1;
1880 return 0;
1881 }
1882 }
1883 return 0;
1884}
1885
1886/* Implements REG_OK_FOR_BASE_P. */
1887int
1888m32c_reg_ok_for_base_p (rtx x, int strict)
1889{
1890 if (GET_CODE (x) != REG)
1891 return 0;
1892 switch (REGNO (x))
1893 {
1894 case A0_REGNO:
1895 case A1_REGNO:
1896 case SB_REGNO:
1897 case FB_REGNO:
1898 case SP_REGNO:
1899 return 1;
1900 default:
1901 if (IS_PSEUDO (x, strict))
1902 return 1;
1903 return 0;
1904 }
1905}
1906
04aff2c0
DD
1907/* We have three choices for choosing fb->aN offsets. If we choose -128,
1908 we need one MOVA -128[fb],aN opcode and 16 bit aN displacements,
1909 like this:
1910 EB 4B FF mova -128[$fb],$a0
1911 D8 0C FF FF mov.w:Q #0,-1[$a0]
1912
1913 Alternately, we subtract the frame size, and hopefully use 8 bit aN
1914 displacements:
1915 7B F4 stc $fb,$a0
1916 77 54 00 01 sub #256,$a0
1917 D8 08 01 mov.w:Q #0,1[$a0]
1918
1919 If we don't offset (i.e. offset by zero), we end up with:
1920 7B F4 stc $fb,$a0
1921 D8 0C 00 FF mov.w:Q #0,-256[$a0]
1922
1923 We have to subtract *something* so that we have a PLUS rtx to mark
1924 that we've done this reload. The -128 offset will never result in
1925 an 8 bit aN offset, and the payoff for the second case is five
1926 loads *if* those loads are within 256 bytes of the other end of the
1927 frame, so the third case seems best. Note that we subtract the
1928 zero, but detect that in the addhi3 pattern. */
1929
1930#define BIG_FB_ADJ 0
1931
38b2d076
DD
1932/* Implements LEGITIMIZE_ADDRESS. The only address we really have to
1933 worry about is frame base offsets, as $fb has a limited
1934 displacement range. We deal with this by attempting to reload $fb
1935 itself into an address register; that seems to result in the best
1936 code. */
1937int
1938m32c_legitimize_address (rtx * x ATTRIBUTE_UNUSED,
1939 rtx oldx ATTRIBUTE_UNUSED,
1940 enum machine_mode mode ATTRIBUTE_UNUSED)
1941{
1942#if DEBUG0
1943 fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
1944 debug_rtx (*x);
1945 fprintf (stderr, "\n");
1946#endif
1947
1948 if (GET_CODE (*x) == PLUS
1949 && GET_CODE (XEXP (*x, 0)) == REG
1950 && REGNO (XEXP (*x, 0)) == FB_REGNO
1951 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1952 && (INTVAL (XEXP (*x, 1)) < -128
1953 || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
1954 {
1955 /* reload FB to A_REGS */
38b2d076
DD
1956 rtx temp = gen_reg_rtx (Pmode);
1957 *x = copy_rtx (*x);
04aff2c0 1958 emit_insn (gen_rtx_SET (VOIDmode, temp, XEXP (*x, 0)));
38b2d076
DD
1959 XEXP (*x, 0) = temp;
1960 return 1;
1961 }
1962
1963 return 0;
1964}
1965
1966/* Implements LEGITIMIZE_RELOAD_ADDRESS. See comment above. */
1967int
1968m32c_legitimize_reload_address (rtx * x,
1969 enum machine_mode mode,
1970 int opnum,
1971 int type, int ind_levels ATTRIBUTE_UNUSED)
1972{
1973#if DEBUG0
1974 fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
1975 mode_name[mode]);
1976 debug_rtx (*x);
1977#endif
1978
1979 /* At one point, this function tried to get $fb copied to an address
1980 register, which in theory would maximize sharing, but gcc was
1981 *also* still trying to reload the whole address, and we'd run out
1982 of address registers. So we let gcc do the naive (but safe)
1983 reload instead, when the above function doesn't handle it for
04aff2c0
DD
1984 us.
1985
1986 The code below is a second attempt at the above. */
1987
1988 if (GET_CODE (*x) == PLUS
1989 && GET_CODE (XEXP (*x, 0)) == REG
1990 && REGNO (XEXP (*x, 0)) == FB_REGNO
1991 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1992 && (INTVAL (XEXP (*x, 1)) < -128
1993 || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
1994 {
1995 rtx sum;
1996 int offset = INTVAL (XEXP (*x, 1));
1997 int adjustment = -BIG_FB_ADJ;
1998
1999 sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
2000 GEN_INT (adjustment));
2001 *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
2002 if (type == RELOAD_OTHER)
2003 type = RELOAD_FOR_OTHER_ADDRESS;
2004 push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
2005 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
2006 type);
2007 return 1;
2008 }
2009
2010 if (GET_CODE (*x) == PLUS
2011 && GET_CODE (XEXP (*x, 0)) == PLUS
2012 && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
2013 && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
2014 && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
2015 && GET_CODE (XEXP (*x, 1)) == CONST_INT
2016 )
2017 {
2018 if (type == RELOAD_OTHER)
2019 type = RELOAD_FOR_OTHER_ADDRESS;
2020 push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
2021 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
2022 type);
2023 return 1;
2024 }
38b2d076
DD
2025
2026 return 0;
2027}
2028
38b2d076
DD
2029/* Implements LEGITIMATE_CONSTANT_P. We split large constants anyway,
2030 so we can allow anything. */
2031int
2032m32c_legitimate_constant_p (rtx x ATTRIBUTE_UNUSED)
2033{
2034 return 1;
2035}
2036
2037
2038/* Condition Code Status */
2039
2040#undef TARGET_FIXED_CONDITION_CODE_REGS
2041#define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2042static bool
2043m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2044{
2045 *p1 = FLG_REGNO;
2046 *p2 = INVALID_REGNUM;
2047 return true;
2048}
2049
2050/* Describing Relative Costs of Operations */
2051
2052/* Implements REGISTER_MOVE_COST. We make impossible moves
2053 prohibitively expensive, like trying to put QIs in r2/r3 (there are
2054 no opcodes to do that). We also discourage use of mem* registers
2055 since they're really memory. */
2056int
2057m32c_register_move_cost (enum machine_mode mode, int from, int to)
2058{
2059 int cost = COSTS_N_INSNS (3);
2060 int cc = class_contents[from][0] | class_contents[to][0];
2061 /* FIXME: pick real values, but not 2 for now. */
2062 if (mode == QImode && (cc & class_contents[R23_REGS][0]))
2063 {
2064 if (!(cc & ~class_contents[R23_REGS][0]))
2065 cost = COSTS_N_INSNS (1000);
2066 else
2067 cost = COSTS_N_INSNS (80);
2068 }
2069
2070 if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2071 cost = COSTS_N_INSNS (1000);
2072
2073 if (classes_intersect (from, CR_REGS))
2074 cost += COSTS_N_INSNS (5);
2075
2076 if (classes_intersect (to, CR_REGS))
2077 cost += COSTS_N_INSNS (5);
2078
2079 if (from == MEM_REGS || to == MEM_REGS)
2080 cost += COSTS_N_INSNS (50);
2081 else if (classes_intersect (from, MEM_REGS)
2082 || classes_intersect (to, MEM_REGS))
2083 cost += COSTS_N_INSNS (10);
2084
2085#if DEBUG0
2086 fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
2087 mode_name[mode], class_names[from], class_names[to], cost);
2088#endif
2089 return cost;
2090}
2091
2092/* Implements MEMORY_MOVE_COST. */
2093int
2094m32c_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2095 int reg_class ATTRIBUTE_UNUSED,
2096 int in ATTRIBUTE_UNUSED)
2097{
2098 /* FIXME: pick real values. */
2099 return COSTS_N_INSNS (10);
2100}
2101
07127a0a
DD
2102/* Here we try to describe when we use multiple opcodes for one RTX so
2103 that gcc knows when to use them. */
2104#undef TARGET_RTX_COSTS
2105#define TARGET_RTX_COSTS m32c_rtx_costs
2106static bool
2107m32c_rtx_costs (rtx x, int code, int outer_code, int *total)
2108{
2109 switch (code)
2110 {
2111 case REG:
2112 if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2113 *total += COSTS_N_INSNS (500);
2114 else
2115 *total += COSTS_N_INSNS (1);
2116 return true;
2117
2118 case ASHIFT:
2119 case LSHIFTRT:
2120 case ASHIFTRT:
2121 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2122 {
2123 /* mov.b r1l, r1h */
2124 *total += COSTS_N_INSNS (1);
2125 return true;
2126 }
2127 if (INTVAL (XEXP (x, 1)) > 8
2128 || INTVAL (XEXP (x, 1)) < -8)
2129 {
2130 /* mov.b #N, r1l */
2131 /* mov.b r1l, r1h */
2132 *total += COSTS_N_INSNS (2);
2133 return true;
2134 }
2135 return true;
2136
2137 case LE:
2138 case LEU:
2139 case LT:
2140 case LTU:
2141 case GT:
2142 case GTU:
2143 case GE:
2144 case GEU:
2145 case NE:
2146 case EQ:
2147 if (outer_code == SET)
2148 {
2149 *total += COSTS_N_INSNS (2);
2150 return true;
2151 }
2152 break;
2153
2154 case ZERO_EXTRACT:
2155 {
2156 rtx dest = XEXP (x, 0);
2157 rtx addr = XEXP (dest, 0);
2158 switch (GET_CODE (addr))
2159 {
2160 case CONST_INT:
2161 *total += COSTS_N_INSNS (1);
2162 break;
2163 case SYMBOL_REF:
2164 *total += COSTS_N_INSNS (3);
2165 break;
2166 default:
2167 *total += COSTS_N_INSNS (2);
2168 break;
2169 }
2170 return true;
2171 }
2172 break;
2173
2174 default:
2175 /* Reasonable default. */
2176 if (TARGET_A16 && GET_MODE(x) == SImode)
2177 *total += COSTS_N_INSNS (2);
2178 break;
2179 }
2180 return false;
2181}
2182
2183#undef TARGET_ADDRESS_COST
2184#define TARGET_ADDRESS_COST m32c_address_cost
2185static int
2186m32c_address_cost (rtx addr)
2187{
2188 /* fprintf(stderr, "\naddress_cost\n");
2189 debug_rtx(addr);*/
2190 switch (GET_CODE (addr))
2191 {
2192 case CONST_INT:
2193 return COSTS_N_INSNS(1);
2194 case SYMBOL_REF:
2195 return COSTS_N_INSNS(3);
2196 case REG:
2197 return COSTS_N_INSNS(2);
2198 default:
2199 return 0;
2200 }
2201}
2202
38b2d076
DD
2203/* Defining the Output Assembler Language */
2204
2205/* The Overall Framework of an Assembler File */
2206
2207#undef TARGET_HAVE_NAMED_SECTIONS
2208#define TARGET_HAVE_NAMED_SECTIONS true
2209
2210/* Output of Data */
2211
2212/* We may have 24 bit sizes, which is the native address size.
2213 Currently unused, but provided for completeness. */
2214#undef TARGET_ASM_INTEGER
2215#define TARGET_ASM_INTEGER m32c_asm_integer
2216static bool
2217m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2218{
2219 switch (size)
2220 {
2221 case 3:
2222 fprintf (asm_out_file, "\t.3byte\t");
2223 output_addr_const (asm_out_file, x);
2224 fputc ('\n', asm_out_file);
2225 return true;
e9555b13
DD
2226 case 4:
2227 if (GET_CODE (x) == SYMBOL_REF)
2228 {
2229 fprintf (asm_out_file, "\t.long\t");
2230 output_addr_const (asm_out_file, x);
2231 fputc ('\n', asm_out_file);
2232 return true;
2233 }
2234 break;
38b2d076
DD
2235 }
2236 return default_assemble_integer (x, size, aligned_p);
2237}
2238
2239/* Output of Assembler Instructions */
2240
a4174ebf 2241/* We use a lookup table because the addressing modes are non-orthogonal. */
38b2d076
DD
2242
2243static struct
2244{
2245 char code;
2246 char const *pattern;
2247 char const *format;
2248}
2249const conversions[] = {
2250 { 0, "r", "0" },
2251
2252 { 0, "mr", "z[1]" },
2253 { 0, "m+ri", "3[2]" },
2254 { 0, "m+rs", "3[2]" },
2255 { 0, "m+r+si", "4+5[2]" },
2256 { 0, "ms", "1" },
2257 { 0, "mi", "1" },
2258 { 0, "m+si", "2+3" },
2259
2260 { 0, "mmr", "[z[2]]" },
2261 { 0, "mm+ri", "[4[3]]" },
2262 { 0, "mm+rs", "[4[3]]" },
2263 { 0, "mm+r+si", "[5+6[3]]" },
2264 { 0, "mms", "[[2]]" },
2265 { 0, "mmi", "[[2]]" },
2266 { 0, "mm+si", "[4[3]]" },
2267
2268 { 0, "i", "#0" },
2269 { 0, "s", "#0" },
2270 { 0, "+si", "#1+2" },
2271 { 0, "l", "#0" },
2272
2273 { 'l', "l", "0" },
2274 { 'd', "i", "0" },
2275 { 'd', "s", "0" },
2276 { 'd', "+si", "1+2" },
2277 { 'D', "i", "0" },
2278 { 'D', "s", "0" },
2279 { 'D', "+si", "1+2" },
2280 { 'x', "i", "#0" },
2281 { 'X', "i", "#0" },
2282 { 'm', "i", "#0" },
2283 { 'b', "i", "#0" },
07127a0a 2284 { 'B', "i", "0" },
38b2d076
DD
2285 { 'p', "i", "0" },
2286
2287 { 0, 0, 0 }
2288};
2289
2290/* This is in order according to the bitfield that pushm/popm use. */
2291static char const *pushm_regs[] = {
2292 "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2293};
2294
2295/* Implements PRINT_OPERAND. */
2296void
2297m32c_print_operand (FILE * file, rtx x, int code)
2298{
2299 int i, j, b;
2300 const char *comma;
2301 HOST_WIDE_INT ival;
2302 int unsigned_const = 0;
ff485e71 2303 int force_sign;
38b2d076
DD
2304
2305 /* Multiplies; constants are converted to sign-extended format but
2306 we need unsigned, so 'u' and 'U' tell us what size unsigned we
2307 need. */
2308 if (code == 'u')
2309 {
2310 unsigned_const = 2;
2311 code = 0;
2312 }
2313 if (code == 'U')
2314 {
2315 unsigned_const = 1;
2316 code = 0;
2317 }
2318 /* This one is only for debugging; you can put it in a pattern to
2319 force this error. */
2320 if (code == '!')
2321 {
2322 fprintf (stderr, "dj: unreviewed pattern:");
2323 if (current_output_insn)
2324 debug_rtx (current_output_insn);
2325 gcc_unreachable ();
2326 }
2327 /* PSImode operations are either .w or .l depending on the target. */
2328 if (code == '&')
2329 {
2330 if (TARGET_A16)
2331 fprintf (file, "w");
2332 else
2333 fprintf (file, "l");
2334 return;
2335 }
2336 /* Inverted conditionals. */
2337 if (code == 'C')
2338 {
2339 switch (GET_CODE (x))
2340 {
2341 case LE:
2342 fputs ("gt", file);
2343 break;
2344 case LEU:
2345 fputs ("gtu", file);
2346 break;
2347 case LT:
2348 fputs ("ge", file);
2349 break;
2350 case LTU:
2351 fputs ("geu", file);
2352 break;
2353 case GT:
2354 fputs ("le", file);
2355 break;
2356 case GTU:
2357 fputs ("leu", file);
2358 break;
2359 case GE:
2360 fputs ("lt", file);
2361 break;
2362 case GEU:
2363 fputs ("ltu", file);
2364 break;
2365 case NE:
2366 fputs ("eq", file);
2367 break;
2368 case EQ:
2369 fputs ("ne", file);
2370 break;
2371 default:
2372 gcc_unreachable ();
2373 }
2374 return;
2375 }
2376 /* Regular conditionals. */
2377 if (code == 'c')
2378 {
2379 switch (GET_CODE (x))
2380 {
2381 case LE:
2382 fputs ("le", file);
2383 break;
2384 case LEU:
2385 fputs ("leu", file);
2386 break;
2387 case LT:
2388 fputs ("lt", file);
2389 break;
2390 case LTU:
2391 fputs ("ltu", file);
2392 break;
2393 case GT:
2394 fputs ("gt", file);
2395 break;
2396 case GTU:
2397 fputs ("gtu", file);
2398 break;
2399 case GE:
2400 fputs ("ge", file);
2401 break;
2402 case GEU:
2403 fputs ("geu", file);
2404 break;
2405 case NE:
2406 fputs ("ne", file);
2407 break;
2408 case EQ:
2409 fputs ("eq", file);
2410 break;
2411 default:
2412 gcc_unreachable ();
2413 }
2414 return;
2415 }
2416 /* Used in negsi2 to do HImode ops on the two parts of an SImode
2417 operand. */
2418 if (code == 'h' && GET_MODE (x) == SImode)
2419 {
2420 x = m32c_subreg (HImode, x, SImode, 0);
2421 code = 0;
2422 }
2423 if (code == 'H' && GET_MODE (x) == SImode)
2424 {
2425 x = m32c_subreg (HImode, x, SImode, 2);
2426 code = 0;
2427 }
07127a0a
DD
2428 if (code == 'h' && GET_MODE (x) == HImode)
2429 {
2430 x = m32c_subreg (QImode, x, HImode, 0);
2431 code = 0;
2432 }
2433 if (code == 'H' && GET_MODE (x) == HImode)
2434 {
2435 /* We can't actually represent this as an rtx. Do it here. */
2436 if (GET_CODE (x) == REG)
2437 {
2438 switch (REGNO (x))
2439 {
2440 case R0_REGNO:
2441 fputs ("r0h", file);
2442 return;
2443 case R1_REGNO:
2444 fputs ("r1h", file);
2445 return;
2446 default:
2447 gcc_unreachable();
2448 }
2449 }
2450 /* This should be a MEM. */
2451 x = m32c_subreg (QImode, x, HImode, 1);
2452 code = 0;
2453 }
2454 /* This is for BMcond, which always wants word register names. */
2455 if (code == 'h' && GET_MODE (x) == QImode)
2456 {
2457 if (GET_CODE (x) == REG)
2458 x = gen_rtx_REG (HImode, REGNO (x));
2459 code = 0;
2460 }
38b2d076
DD
2461 /* 'x' and 'X' need to be ignored for non-immediates. */
2462 if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2463 code = 0;
2464
2465 encode_pattern (x);
ff485e71 2466 force_sign = 0;
38b2d076
DD
2467 for (i = 0; conversions[i].pattern; i++)
2468 if (conversions[i].code == code
2469 && streq (conversions[i].pattern, pattern))
2470 {
2471 for (j = 0; conversions[i].format[j]; j++)
2472 /* backslash quotes the next character in the output pattern. */
2473 if (conversions[i].format[j] == '\\')
2474 {
2475 fputc (conversions[i].format[j + 1], file);
2476 j++;
2477 }
2478 /* Digits in the output pattern indicate that the
2479 corresponding RTX is to be output at that point. */
2480 else if (ISDIGIT (conversions[i].format[j]))
2481 {
2482 rtx r = patternr[conversions[i].format[j] - '0'];
2483 switch (GET_CODE (r))
2484 {
2485 case REG:
2486 fprintf (file, "%s",
2487 reg_name_with_mode (REGNO (r), GET_MODE (r)));
2488 break;
2489 case CONST_INT:
2490 switch (code)
2491 {
2492 case 'b':
07127a0a
DD
2493 case 'B':
2494 {
2495 int v = INTVAL (r);
2496 int i = (int) exact_log2 (v);
2497 if (i == -1)
2498 i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2499 if (i == -1)
2500 i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2501 /* Bit position. */
2502 fprintf (file, "%d", i);
2503 }
38b2d076
DD
2504 break;
2505 case 'x':
2506 /* Unsigned byte. */
2507 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2508 INTVAL (r) & 0xff);
2509 break;
2510 case 'X':
2511 /* Unsigned word. */
2512 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2513 INTVAL (r) & 0xffff);
2514 break;
2515 case 'p':
2516 /* pushm and popm encode a register set into a single byte. */
2517 comma = "";
2518 for (b = 7; b >= 0; b--)
2519 if (INTVAL (r) & (1 << b))
2520 {
2521 fprintf (file, "%s%s", comma, pushm_regs[b]);
2522 comma = ",";
2523 }
2524 break;
2525 case 'm':
2526 /* "Minus". Output -X */
2527 ival = (-INTVAL (r) & 0xffff);
2528 if (ival & 0x8000)
2529 ival = ival - 0x10000;
2530 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2531 break;
2532 default:
2533 ival = INTVAL (r);
2534 if (conversions[i].format[j + 1] == '[' && ival < 0)
2535 {
2536 /* We can simulate negative displacements by
2537 taking advantage of address space
2538 wrapping when the offset can span the
2539 entire address range. */
2540 rtx base =
2541 patternr[conversions[i].format[j + 2] - '0'];
2542 if (GET_CODE (base) == REG)
2543 switch (REGNO (base))
2544 {
2545 case A0_REGNO:
2546 case A1_REGNO:
2547 if (TARGET_A24)
2548 ival = 0x1000000 + ival;
2549 else
2550 ival = 0x10000 + ival;
2551 break;
2552 case SB_REGNO:
2553 if (TARGET_A16)
2554 ival = 0x10000 + ival;
2555 break;
2556 }
2557 }
2558 else if (code == 'd' && ival < 0 && j == 0)
2559 /* The "mova" opcode is used to do addition by
2560 computing displacements, but again, we need
2561 displacements to be unsigned *if* they're
2562 the only component of the displacement
2563 (i.e. no "symbol-4" type displacement). */
2564 ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2565
2566 if (conversions[i].format[j] == '0')
2567 {
2568 /* More conversions to unsigned. */
2569 if (unsigned_const == 2)
2570 ival &= 0xffff;
2571 if (unsigned_const == 1)
2572 ival &= 0xff;
2573 }
2574 if (streq (conversions[i].pattern, "mi")
2575 || streq (conversions[i].pattern, "mmi"))
2576 {
2577 /* Integers used as addresses are unsigned. */
2578 ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2579 }
ff485e71
DD
2580 if (force_sign && ival >= 0)
2581 fputc ('+', file);
38b2d076
DD
2582 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2583 break;
2584 }
2585 break;
2586 case CONST_DOUBLE:
2587 /* We don't have const_double constants. If it
2588 happens, make it obvious. */
2589 fprintf (file, "[const_double 0x%lx]",
2590 (unsigned long) CONST_DOUBLE_HIGH (r));
2591 break;
2592 case SYMBOL_REF:
2593 assemble_name (file, XSTR (r, 0));
2594 break;
2595 case LABEL_REF:
2596 output_asm_label (r);
2597 break;
2598 default:
2599 fprintf (stderr, "don't know how to print this operand:");
2600 debug_rtx (r);
2601 gcc_unreachable ();
2602 }
2603 }
2604 else
2605 {
2606 if (conversions[i].format[j] == 'z')
2607 {
2608 /* Some addressing modes *must* have a displacement,
2609 so insert a zero here if needed. */
2610 int k;
2611 for (k = j + 1; conversions[i].format[k]; k++)
2612 if (ISDIGIT (conversions[i].format[k]))
2613 {
2614 rtx reg = patternr[conversions[i].format[k] - '0'];
2615 if (GET_CODE (reg) == REG
2616 && (REGNO (reg) == SB_REGNO
2617 || REGNO (reg) == FB_REGNO
2618 || REGNO (reg) == SP_REGNO))
2619 fputc ('0', file);
2620 }
2621 continue;
2622 }
2623 /* Signed displacements off symbols need to have signs
2624 blended cleanly. */
2625 if (conversions[i].format[j] == '+'
ff485e71 2626 && (!code || code == 'D' || code == 'd')
38b2d076 2627 && ISDIGIT (conversions[i].format[j + 1])
ff485e71
DD
2628 && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2629 == CONST_INT))
2630 {
2631 force_sign = 1;
2632 continue;
2633 }
38b2d076
DD
2634 fputc (conversions[i].format[j], file);
2635 }
2636 break;
2637 }
2638 if (!conversions[i].pattern)
2639 {
2640 fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
2641 pattern);
2642 debug_rtx (x);
2643 fprintf (file, "[%c.%s]", code ? code : '-', pattern);
2644 }
2645
2646 return;
2647}
2648
2649/* Implements PRINT_OPERAND_PUNCT_VALID_P. See m32c_print_operand
2650 above for descriptions of what these do. */
2651int
2652m32c_print_operand_punct_valid_p (int c)
2653{
2654 if (c == '&' || c == '!')
2655 return 1;
2656 return 0;
2657}
2658
2659/* Implements PRINT_OPERAND_ADDRESS. Nothing unusual here. */
2660void
2661m32c_print_operand_address (FILE * stream, rtx address)
2662{
2663 gcc_assert (GET_CODE (address) == MEM);
2664 m32c_print_operand (stream, XEXP (address, 0), 0);
2665}
2666
2667/* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
2668 differently than general registers. */
2669void
2670m32c_output_reg_push (FILE * s, int regno)
2671{
2672 if (regno == FLG_REGNO)
2673 fprintf (s, "\tpushc\tflg\n");
2674 else
04aff2c0 2675 fprintf (s, "\tpush.%c\t%s\n",
38b2d076
DD
2676 " bwll"[reg_push_size (regno)], reg_names[regno]);
2677}
2678
2679/* Likewise for ASM_OUTPUT_REG_POP. */
2680void
2681m32c_output_reg_pop (FILE * s, int regno)
2682{
2683 if (regno == FLG_REGNO)
2684 fprintf (s, "\tpopc\tflg\n");
2685 else
04aff2c0 2686 fprintf (s, "\tpop.%c\t%s\n",
38b2d076
DD
2687 " bwll"[reg_push_size (regno)], reg_names[regno]);
2688}
2689
2690/* Defining target-specific uses of `__attribute__' */
2691
2692/* Used to simplify the logic below. Find the attributes wherever
2693 they may be. */
2694#define M32C_ATTRIBUTES(decl) \
2695 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2696 : DECL_ATTRIBUTES (decl) \
2697 ? (DECL_ATTRIBUTES (decl)) \
2698 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2699
2700/* Returns TRUE if the given tree has the "interrupt" attribute. */
2701static int
2702interrupt_p (tree node ATTRIBUTE_UNUSED)
2703{
2704 tree list = M32C_ATTRIBUTES (node);
2705 while (list)
2706 {
2707 if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
2708 return 1;
2709 list = TREE_CHAIN (list);
2710 }
2711 return 0;
2712}
2713
2714static tree
2715interrupt_handler (tree * node ATTRIBUTE_UNUSED,
2716 tree name ATTRIBUTE_UNUSED,
2717 tree args ATTRIBUTE_UNUSED,
2718 int flags ATTRIBUTE_UNUSED,
2719 bool * no_add_attrs ATTRIBUTE_UNUSED)
2720{
2721 return NULL_TREE;
2722}
2723
2724#undef TARGET_ATTRIBUTE_TABLE
2725#define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
2726static const struct attribute_spec m32c_attribute_table[] = {
2727 {"interrupt", 0, 0, false, false, false, interrupt_handler},
2728 {0, 0, 0, 0, 0, 0, 0}
2729};
2730
2731#undef TARGET_COMP_TYPE_ATTRIBUTES
2732#define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
2733static int
2734m32c_comp_type_attributes (tree type1 ATTRIBUTE_UNUSED,
2735 tree type2 ATTRIBUTE_UNUSED)
2736{
2737 /* 0=incompatible 1=compatible 2=warning */
2738 return 1;
2739}
2740
2741#undef TARGET_INSERT_ATTRIBUTES
2742#define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
2743static void
2744m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
2745 tree * attr_ptr ATTRIBUTE_UNUSED)
2746{
2747 /* Nothing to do here. */
2748}
2749
2750/* Predicates */
2751
2752/* Returns TRUE if we support a move between the first two operands.
2753 At the moment, we just want to discourage mem to mem moves until
2754 after reload, because reload has a hard time with our limited
2755 number of address registers, and we can get into a situation where
2756 we need three of them when we only have two. */
2757bool
2758m32c_mov_ok (rtx * operands, enum machine_mode mode ATTRIBUTE_UNUSED)
2759{
2760 rtx op0 = operands[0];
2761 rtx op1 = operands[1];
2762
2763 if (TARGET_A24)
2764 return true;
2765
2766#define DEBUG_MOV_OK 0
2767#if DEBUG_MOV_OK
2768 fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
2769 debug_rtx (op0);
2770 debug_rtx (op1);
2771#endif
2772
2773 if (GET_CODE (op0) == SUBREG)
2774 op0 = XEXP (op0, 0);
2775 if (GET_CODE (op1) == SUBREG)
2776 op1 = XEXP (op1, 0);
2777
2778 if (GET_CODE (op0) == MEM
2779 && GET_CODE (op1) == MEM
2780 && ! reload_completed)
2781 {
2782#if DEBUG_MOV_OK
2783 fprintf (stderr, " - no, mem to mem\n");
2784#endif
2785 return false;
2786 }
2787
2788#if DEBUG_MOV_OK
2789 fprintf (stderr, " - ok\n");
2790#endif
2791 return true;
2792}
2793
ff485e71
DD
2794/* Returns TRUE if two consecutive HImode mov instructions, generated
2795 for moving an immediate double data to a double data type variable
2796 location, can be combined into single SImode mov instruction. */
2797bool
2798m32c_immd_dbl_mov (rtx * operands,
2799 enum machine_mode mode ATTRIBUTE_UNUSED)
2800{
2801 int flag = 0, okflag = 0, offset1 = 0, offset2 = 0, offsetsign = 0;
2802 const char *str1;
2803 const char *str2;
2804
2805 if (GET_CODE (XEXP (operands[0], 0)) == SYMBOL_REF
2806 && MEM_SCALAR_P (operands[0])
2807 && !MEM_IN_STRUCT_P (operands[0])
2808 && GET_CODE (XEXP (operands[2], 0)) == CONST
2809 && GET_CODE (XEXP (XEXP (operands[2], 0), 0)) == PLUS
2810 && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 0)) == SYMBOL_REF
2811 && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 1)) == CONST_INT
2812 && MEM_SCALAR_P (operands[2])
2813 && !MEM_IN_STRUCT_P (operands[2]))
2814 flag = 1;
2815
2816 else if (GET_CODE (XEXP (operands[0], 0)) == CONST
2817 && GET_CODE (XEXP (XEXP (operands[0], 0), 0)) == PLUS
2818 && GET_CODE (XEXP (XEXP (XEXP (operands[0], 0), 0), 0)) == SYMBOL_REF
2819 && MEM_SCALAR_P (operands[0])
2820 && !MEM_IN_STRUCT_P (operands[0])
2821 && !(XINT (XEXP (XEXP (XEXP (operands[0], 0), 0), 1), 0) %4)
2822 && GET_CODE (XEXP (operands[2], 0)) == CONST
2823 && GET_CODE (XEXP (XEXP (operands[2], 0), 0)) == PLUS
2824 && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 0)) == SYMBOL_REF
2825 && MEM_SCALAR_P (operands[2])
2826 && !MEM_IN_STRUCT_P (operands[2]))
2827 flag = 2;
2828
2829 else if (GET_CODE (XEXP (operands[0], 0)) == PLUS
2830 && GET_CODE (XEXP (XEXP (operands[0], 0), 0)) == REG
2831 && REGNO (XEXP (XEXP (operands[0], 0), 0)) == FB_REGNO
2832 && GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == CONST_INT
2833 && MEM_SCALAR_P (operands[0])
2834 && !MEM_IN_STRUCT_P (operands[0])
2835 && !(XINT (XEXP (XEXP (operands[0], 0), 1), 0) %4)
2836 && REGNO (XEXP (XEXP (operands[2], 0), 0)) == FB_REGNO
2837 && GET_CODE (XEXP (XEXP (operands[2], 0), 1)) == CONST_INT
2838 && MEM_SCALAR_P (operands[2])
2839 && !MEM_IN_STRUCT_P (operands[2]))
2840 flag = 3;
2841
2842 else
2843 return false;
2844
2845 switch (flag)
2846 {
2847 case 1:
2848 str1 = XSTR (XEXP (operands[0], 0), 0);
2849 str2 = XSTR (XEXP (XEXP (XEXP (operands[2], 0), 0), 0), 0);
2850 if (strcmp (str1, str2) == 0)
2851 okflag = 1;
2852 else
2853 okflag = 0;
2854 break;
2855 case 2:
2856 str1 = XSTR (XEXP (XEXP (XEXP (operands[0], 0), 0), 0), 0);
2857 str2 = XSTR (XEXP (XEXP (XEXP (operands[2], 0), 0), 0), 0);
2858 if (strcmp(str1,str2) == 0)
2859 okflag = 1;
2860 else
2861 okflag = 0;
2862 break;
2863 case 3:
2864 offset1 = XINT (XEXP (XEXP (operands[0], 0), 1), 0);
2865 offset2 = XINT (XEXP (XEXP (operands[2], 0), 1), 0);
2866 offsetsign = offset1 >> ((sizeof (offset1) * 8) -1);
2867 if (((offset2-offset1) == 2) && offsetsign != 0)
2868 okflag = 1;
2869 else
2870 okflag = 0;
2871 break;
2872 default:
2873 okflag = 0;
2874 }
2875
2876 if (okflag == 1)
2877 {
2878 HOST_WIDE_INT val;
2879 operands[4] = gen_rtx_MEM (SImode, XEXP (operands[0], 0));
2880
2881 val = (XINT (operands[3], 0) << 16) + (XINT (operands[1], 0) & 0xFFFF);
2882 operands[5] = gen_rtx_CONST_INT (VOIDmode, val);
2883
2884 return true;
2885 }
2886
2887 return false;
2888}
2889
38b2d076
DD
2890/* Expanders */
2891
2892/* Subregs are non-orthogonal for us, because our registers are all
2893 different sizes. */
2894static rtx
2895m32c_subreg (enum machine_mode outer,
2896 rtx x, enum machine_mode inner, int byte)
2897{
2898 int r, nr = -1;
2899
2900 /* Converting MEMs to different types that are the same size, we
2901 just rewrite them. */
2902 if (GET_CODE (x) == SUBREG
2903 && SUBREG_BYTE (x) == 0
2904 && GET_CODE (SUBREG_REG (x)) == MEM
2905 && (GET_MODE_SIZE (GET_MODE (x))
2906 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
2907 {
2908 rtx oldx = x;
2909 x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
2910 MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
2911 }
2912
2913 /* Push/pop get done as smaller push/pops. */
2914 if (GET_CODE (x) == MEM
2915 && (GET_CODE (XEXP (x, 0)) == PRE_DEC
2916 || GET_CODE (XEXP (x, 0)) == POST_INC))
2917 return gen_rtx_MEM (outer, XEXP (x, 0));
2918 if (GET_CODE (x) == SUBREG
2919 && GET_CODE (XEXP (x, 0)) == MEM
2920 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
2921 || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
2922 return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
2923
2924 if (GET_CODE (x) != REG)
2925 return simplify_gen_subreg (outer, x, inner, byte);
2926
2927 r = REGNO (x);
2928 if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
2929 return simplify_gen_subreg (outer, x, inner, byte);
2930
2931 if (IS_MEM_REGNO (r))
2932 return simplify_gen_subreg (outer, x, inner, byte);
2933
2934 /* This is where the complexities of our register layout are
2935 described. */
2936 if (byte == 0)
2937 nr = r;
2938 else if (outer == HImode)
2939 {
2940 if (r == R0_REGNO && byte == 2)
2941 nr = R2_REGNO;
2942 else if (r == R0_REGNO && byte == 4)
2943 nr = R1_REGNO;
2944 else if (r == R0_REGNO && byte == 6)
2945 nr = R3_REGNO;
2946 else if (r == R1_REGNO && byte == 2)
2947 nr = R3_REGNO;
2948 else if (r == A0_REGNO && byte == 2)
2949 nr = A1_REGNO;
2950 }
2951 else if (outer == SImode)
2952 {
2953 if (r == R0_REGNO && byte == 0)
2954 nr = R0_REGNO;
2955 else if (r == R0_REGNO && byte == 4)
2956 nr = R1_REGNO;
2957 }
2958 if (nr == -1)
2959 {
2960 fprintf (stderr, "m32c_subreg %s %s %d\n",
2961 mode_name[outer], mode_name[inner], byte);
2962 debug_rtx (x);
2963 gcc_unreachable ();
2964 }
2965 return gen_rtx_REG (outer, nr);
2966}
2967
2968/* Used to emit move instructions. We split some moves,
2969 and avoid mem-mem moves. */
2970int
2971m32c_prepare_move (rtx * operands, enum machine_mode mode)
2972{
2973 if (TARGET_A16 && mode == PSImode)
2974 return m32c_split_move (operands, mode, 1);
2975 if ((GET_CODE (operands[0]) == MEM)
2976 && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
2977 {
2978 rtx pmv = XEXP (operands[0], 0);
2979 rtx dest_reg = XEXP (pmv, 0);
2980 rtx dest_mod = XEXP (pmv, 1);
2981
2982 emit_insn (gen_rtx_SET (Pmode, dest_reg, dest_mod));
2983 operands[0] = gen_rtx_MEM (mode, dest_reg);
2984 }
2985 if (!no_new_pseudos && MEM_P (operands[0]) && MEM_P (operands[1]))
2986 operands[1] = copy_to_mode_reg (mode, operands[1]);
2987 return 0;
2988}
2989
2990#define DEBUG_SPLIT 0
2991
2992/* Returns TRUE if the given PSImode move should be split. We split
2993 for all r8c/m16c moves, since it doesn't support them, and for
2994 POP.L as we can only *push* SImode. */
2995int
2996m32c_split_psi_p (rtx * operands)
2997{
2998#if DEBUG_SPLIT
2999 fprintf (stderr, "\nm32c_split_psi_p\n");
3000 debug_rtx (operands[0]);
3001 debug_rtx (operands[1]);
3002#endif
3003 if (TARGET_A16)
3004 {
3005#if DEBUG_SPLIT
3006 fprintf (stderr, "yes, A16\n");
3007#endif
3008 return 1;
3009 }
3010 if (GET_CODE (operands[1]) == MEM
3011 && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3012 {
3013#if DEBUG_SPLIT
3014 fprintf (stderr, "yes, pop.l\n");
3015#endif
3016 return 1;
3017 }
3018#if DEBUG_SPLIT
3019 fprintf (stderr, "no, default\n");
3020#endif
3021 return 0;
3022}
3023
3024/* Split the given move. SPLIT_ALL is 0 if splitting is optional
3025 (define_expand), 1 if it is not optional (define_insn_and_split),
3026 and 3 for define_split (alternate api). */
3027int
3028m32c_split_move (rtx * operands, enum machine_mode mode, int split_all)
3029{
3030 rtx s[4], d[4];
3031 int parts, si, di, rev = 0;
3032 int rv = 0, opi = 2;
3033 enum machine_mode submode = HImode;
3034 rtx *ops, local_ops[10];
3035
3036 /* define_split modifies the existing operands, but the other two
3037 emit new insns. OPS is where we store the operand pairs, which
3038 we emit later. */
3039 if (split_all == 3)
3040 ops = operands;
3041 else
3042 ops = local_ops;
3043
3044 /* Else HImode. */
3045 if (mode == DImode)
3046 submode = SImode;
3047
3048 /* Before splitting mem-mem moves, force one operand into a
3049 register. */
3050 if (!no_new_pseudos && MEM_P (operands[0]) && MEM_P (operands[1]))
3051 {
3052#if DEBUG0
3053 fprintf (stderr, "force_reg...\n");
3054 debug_rtx (operands[1]);
3055#endif
3056 operands[1] = force_reg (mode, operands[1]);
3057#if DEBUG0
3058 debug_rtx (operands[1]);
3059#endif
3060 }
3061
3062 parts = 2;
3063
3064#if DEBUG_SPLIT
3065 fprintf (stderr, "\nsplit_move %d all=%d\n", no_new_pseudos, split_all);
3066 debug_rtx (operands[0]);
3067 debug_rtx (operands[1]);
3068#endif
3069
eb5f0c07
DD
3070 /* Note that split_all is not used to select the api after this
3071 point, so it's safe to set it to 3 even with define_insn. */
3072 /* None of the chips can move SI operands to sp-relative addresses,
3073 so we always split those. */
3074 if (m32c_extra_constraint_p (operands[0], 'S', "Ss"))
3075 split_all = 3;
3076
38b2d076
DD
3077 /* We don't need to split these. */
3078 if (TARGET_A24
3079 && split_all != 3
3080 && (mode == SImode || mode == PSImode)
3081 && !(GET_CODE (operands[1]) == MEM
3082 && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3083 return 0;
3084
3085 /* First, enumerate the subregs we'll be dealing with. */
3086 for (si = 0; si < parts; si++)
3087 {
3088 d[si] =
3089 m32c_subreg (submode, operands[0], mode,
3090 si * GET_MODE_SIZE (submode));
3091 s[si] =
3092 m32c_subreg (submode, operands[1], mode,
3093 si * GET_MODE_SIZE (submode));
3094 }
3095
3096 /* Split pushes by emitting a sequence of smaller pushes. */
3097 if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3098 {
3099 for (si = parts - 1; si >= 0; si--)
3100 {
3101 ops[opi++] = gen_rtx_MEM (submode,
3102 gen_rtx_PRE_DEC (Pmode,
3103 gen_rtx_REG (Pmode,
3104 SP_REGNO)));
3105 ops[opi++] = s[si];
3106 }
3107
3108 rv = 1;
3109 }
3110 /* Likewise for pops. */
3111 else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3112 {
3113 for (di = 0; di < parts; di++)
3114 {
3115 ops[opi++] = d[di];
3116 ops[opi++] = gen_rtx_MEM (submode,
3117 gen_rtx_POST_INC (Pmode,
3118 gen_rtx_REG (Pmode,
3119 SP_REGNO)));
3120 }
3121 rv = 1;
3122 }
3123 else if (split_all)
3124 {
3125 /* if d[di] == s[si] for any di < si, we'll early clobber. */
3126 for (di = 0; di < parts - 1; di++)
3127 for (si = di + 1; si < parts; si++)
3128 if (reg_mentioned_p (d[di], s[si]))
3129 rev = 1;
3130
3131 if (rev)
3132 for (si = 0; si < parts; si++)
3133 {
3134 ops[opi++] = d[si];
3135 ops[opi++] = s[si];
3136 }
3137 else
3138 for (si = parts - 1; si >= 0; si--)
3139 {
3140 ops[opi++] = d[si];
3141 ops[opi++] = s[si];
3142 }
3143 rv = 1;
3144 }
3145 /* Now emit any moves we may have accumulated. */
3146 if (rv && split_all != 3)
3147 {
3148 int i;
3149 for (i = 2; i < opi; i += 2)
3150 emit_move_insn (ops[i], ops[i + 1]);
3151 }
3152 return rv;
3153}
3154
07127a0a
DD
3155/* The m32c has a number of opcodes that act like memcpy, strcmp, and
3156 the like. For the R8C they expect one of the addresses to be in
3157 R1L:An so we need to arrange for that. Otherwise, it's just a
3158 matter of picking out the operands we want and emitting the right
3159 pattern for them. All these expanders, which correspond to
3160 patterns in blkmov.md, must return nonzero if they expand the insn,
3161 or zero if they should FAIL. */
3162
3163/* This is a memset() opcode. All operands are implied, so we need to
3164 arrange for them to be in the right registers. The opcode wants
3165 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3166 the count (HI), and $2 the value (QI). */
3167int
3168m32c_expand_setmemhi(rtx *operands)
3169{
3170 rtx desta, count, val;
3171 rtx desto, counto;
3172
3173 desta = XEXP (operands[0], 0);
3174 count = operands[1];
3175 val = operands[2];
3176
3177 desto = gen_reg_rtx (Pmode);
3178 counto = gen_reg_rtx (HImode);
3179
3180 if (GET_CODE (desta) != REG
3181 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3182 desta = copy_to_mode_reg (Pmode, desta);
3183
3184 /* This looks like an arbitrary restriction, but this is by far the
3185 most common case. For counts 8..14 this actually results in
3186 smaller code with no speed penalty because the half-sized
3187 constant can be loaded with a shorter opcode. */
3188 if (GET_CODE (count) == CONST_INT
3189 && GET_CODE (val) == CONST_INT
3190 && ! (INTVAL (count) & 1)
3191 && (INTVAL (count) > 1)
3192 && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3193 {
3194 unsigned v = INTVAL (val) & 0xff;
3195 v = v | (v << 8);
3196 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3197 val = copy_to_mode_reg (HImode, GEN_INT (v));
3198 if (TARGET_A16)
3199 emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3200 else
3201 emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3202 return 1;
3203 }
3204
3205 /* This is the generalized memset() case. */
3206 if (GET_CODE (val) != REG
3207 || REGNO (val) < FIRST_PSEUDO_REGISTER)
3208 val = copy_to_mode_reg (QImode, val);
3209
3210 if (GET_CODE (count) != REG
3211 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3212 count = copy_to_mode_reg (HImode, count);
3213
3214 if (TARGET_A16)
3215 emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3216 else
3217 emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3218
3219 return 1;
3220}
3221
3222/* This is a memcpy() opcode. All operands are implied, so we need to
3223 arrange for them to be in the right registers. The opcode wants
3224 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3225 is the source (MEM:BLK), and $2 the count (HI). */
3226int
3227m32c_expand_movmemhi(rtx *operands)
3228{
3229 rtx desta, srca, count;
3230 rtx desto, srco, counto;
3231
3232 desta = XEXP (operands[0], 0);
3233 srca = XEXP (operands[1], 0);
3234 count = operands[2];
3235
3236 desto = gen_reg_rtx (Pmode);
3237 srco = gen_reg_rtx (Pmode);
3238 counto = gen_reg_rtx (HImode);
3239
3240 if (GET_CODE (desta) != REG
3241 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3242 desta = copy_to_mode_reg (Pmode, desta);
3243
3244 if (GET_CODE (srca) != REG
3245 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3246 srca = copy_to_mode_reg (Pmode, srca);
3247
3248 /* Similar to setmem, but we don't need to check the value. */
3249 if (GET_CODE (count) == CONST_INT
3250 && ! (INTVAL (count) & 1)
3251 && (INTVAL (count) > 1))
3252 {
3253 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3254 if (TARGET_A16)
3255 emit_insn (gen_movmemhi_whi_op (desto, srco, counto, desta, srca, count));
3256 else
3257 emit_insn (gen_movmemhi_wpsi_op (desto, srco, counto, desta, srca, count));
3258 return 1;
3259 }
3260
3261 /* This is the generalized memset() case. */
3262 if (GET_CODE (count) != REG
3263 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3264 count = copy_to_mode_reg (HImode, count);
3265
3266 if (TARGET_A16)
3267 emit_insn (gen_movmemhi_bhi_op (desto, srco, counto, desta, srca, count));
3268 else
3269 emit_insn (gen_movmemhi_bpsi_op (desto, srco, counto, desta, srca, count));
3270
3271 return 1;
3272}
3273
3274/* This is a stpcpy() opcode. $0 is the destination (MEM:BLK) after
3275 the copy, which should point to the NUL at the end of the string,
3276 $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3277 Since our opcode leaves the destination pointing *after* the NUL,
3278 we must emit an adjustment. */
3279int
3280m32c_expand_movstr(rtx *operands)
3281{
3282 rtx desta, srca;
3283 rtx desto, srco;
3284
3285 desta = XEXP (operands[1], 0);
3286 srca = XEXP (operands[2], 0);
3287
3288 desto = gen_reg_rtx (Pmode);
3289 srco = gen_reg_rtx (Pmode);
3290
3291 if (GET_CODE (desta) != REG
3292 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3293 desta = copy_to_mode_reg (Pmode, desta);
3294
3295 if (GET_CODE (srca) != REG
3296 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3297 srca = copy_to_mode_reg (Pmode, srca);
3298
3299 emit_insn (gen_movstr_op (desto, srco, desta, srca));
3300 /* desto ends up being a1, which allows this type of add through MOVA. */
3301 emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
3302
3303 return 1;
3304}
3305
3306/* This is a strcmp() opcode. $0 is the destination (HI) which holds
3307 <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3308 $2 is the other (MEM:BLK). We must do the comparison, and then
3309 convert the flags to a signed integer result. */
3310int
3311m32c_expand_cmpstr(rtx *operands)
3312{
3313 rtx src1a, src2a;
3314
3315 src1a = XEXP (operands[1], 0);
3316 src2a = XEXP (operands[2], 0);
3317
3318 if (GET_CODE (src1a) != REG
3319 || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
3320 src1a = copy_to_mode_reg (Pmode, src1a);
3321
3322 if (GET_CODE (src2a) != REG
3323 || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
3324 src2a = copy_to_mode_reg (Pmode, src2a);
3325
3326 emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
3327 emit_insn (gen_cond_to_int (operands[0]));
3328
3329 return 1;
3330}
3331
3332
23fed240
DD
3333typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
3334
3335static shift_gen_func
3336shift_gen_func_for (int mode, int code)
3337{
3338#define GFF(m,c,f) if (mode == m && code == c) return f
3339 GFF(QImode, ASHIFT, gen_ashlqi3_i);
3340 GFF(QImode, ASHIFTRT, gen_ashrqi3_i);
3341 GFF(QImode, LSHIFTRT, gen_lshrqi3_i);
3342 GFF(HImode, ASHIFT, gen_ashlhi3_i);
3343 GFF(HImode, ASHIFTRT, gen_ashrhi3_i);
3344 GFF(HImode, LSHIFTRT, gen_lshrhi3_i);
3345 GFF(PSImode, ASHIFT, gen_ashlpsi3_i);
3346 GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
3347 GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
3348 GFF(SImode, ASHIFT, TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
3349 GFF(SImode, ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
3350 GFF(SImode, LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
3351#undef GFF
07127a0a 3352 gcc_unreachable ();
23fed240
DD
3353}
3354
38b2d076
DD
3355/* The m32c only has one shift, but it takes a signed count. GCC
3356 doesn't want this, so we fake it by negating any shift count when
07127a0a
DD
3357 we're pretending to shift the other way. Also, the shift count is
3358 limited to -8..8. It's slightly better to use two shifts for 9..15
3359 than to load the count into r1h, so we do that too. */
38b2d076 3360int
23fed240 3361m32c_prepare_shift (rtx * operands, int scale, int shift_code)
38b2d076 3362{
23fed240
DD
3363 enum machine_mode mode = GET_MODE (operands[0]);
3364 shift_gen_func func = shift_gen_func_for (mode, shift_code);
38b2d076 3365 rtx temp;
23fed240
DD
3366
3367 if (GET_CODE (operands[2]) == CONST_INT)
38b2d076 3368 {
23fed240
DD
3369 int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
3370 int count = INTVAL (operands[2]) * scale;
3371
3372 while (count > maxc)
3373 {
3374 temp = gen_reg_rtx (mode);
3375 emit_insn (func (temp, operands[1], GEN_INT (maxc)));
3376 operands[1] = temp;
3377 count -= maxc;
3378 }
3379 while (count < -maxc)
3380 {
3381 temp = gen_reg_rtx (mode);
3382 emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
3383 operands[1] = temp;
3384 count += maxc;
3385 }
3386 emit_insn (func (operands[0], operands[1], GEN_INT (count)));
3387 return 1;
38b2d076 3388 }
2e160056
DD
3389
3390 temp = gen_reg_rtx (QImode);
38b2d076 3391 if (scale < 0)
2e160056
DD
3392 /* The pattern has a NEG that corresponds to this. */
3393 emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
3394 else if (TARGET_A16 && mode == SImode)
3395 /* We do this because the code below may modify this, we don't
3396 want to modify the origin of this value. */
3397 emit_move_insn (temp, operands[2]);
38b2d076 3398 else
2e160056 3399 /* We'll only use it for the shift, no point emitting a move. */
38b2d076 3400 temp = operands[2];
2e160056 3401
16659fcf 3402 if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
2e160056
DD
3403 {
3404 /* The m16c has a limit of -16..16 for SI shifts, even when the
3405 shift count is in a register. Since there are so many targets
3406 of these shifts, it's better to expand the RTL here than to
3407 call a helper function.
3408
3409 The resulting code looks something like this:
3410
3411 cmp.b r1h,-16
3412 jge.b 1f
3413 shl.l -16,dest
3414 add.b r1h,16
3415 1f: cmp.b r1h,16
3416 jle.b 1f
3417 shl.l 16,dest
3418 sub.b r1h,16
3419 1f: shl.l r1h,dest
3420
3421 We take advantage of the fact that "negative" shifts are
3422 undefined to skip one of the comparisons. */
3423
3424 rtx count;
833bf445 3425 rtx label, lref, insn, tempvar;
2e160056 3426
16659fcf
DD
3427 emit_move_insn (operands[0], operands[1]);
3428
2e160056
DD
3429 count = temp;
3430 label = gen_label_rtx ();
3431 lref = gen_rtx_LABEL_REF (VOIDmode, label);
3432 LABEL_NUSES (label) ++;
3433
833bf445
DD
3434 tempvar = gen_reg_rtx (mode);
3435
2e160056
DD
3436 if (shift_code == ASHIFT)
3437 {
3438 /* This is a left shift. We only need check positive counts. */
3439 emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
3440 count, GEN_INT (16), label));
833bf445
DD
3441 emit_insn (func (tempvar, operands[0], GEN_INT (8)));
3442 emit_insn (func (operands[0], tempvar, GEN_INT (8)));
2e160056
DD
3443 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
3444 emit_label_after (label, insn);
3445 }
3446 else
3447 {
3448 /* This is a right shift. We only need check negative counts. */
3449 emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
3450 count, GEN_INT (-16), label));
833bf445
DD
3451 emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
3452 emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
2e160056
DD
3453 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
3454 emit_label_after (label, insn);
3455 }
16659fcf
DD
3456 operands[1] = operands[0];
3457 emit_insn (func (operands[0], operands[0], count));
3458 return 1;
2e160056
DD
3459 }
3460
38b2d076
DD
3461 operands[2] = temp;
3462 return 0;
3463}
3464
12ea2512
DD
3465/* The m32c has a limited range of operations that work on PSImode
3466 values; we have to expand to SI, do the math, and truncate back to
3467 PSI. Yes, this is expensive, but hopefully gcc will learn to avoid
3468 those cases. */
3469void
3470m32c_expand_neg_mulpsi3 (rtx * operands)
3471{
3472 /* operands: a = b * i */
3473 rtx temp1; /* b as SI */
07127a0a
DD
3474 rtx scale /* i as SI */;
3475 rtx temp2; /* a*b as SI */
12ea2512
DD
3476
3477 temp1 = gen_reg_rtx (SImode);
3478 temp2 = gen_reg_rtx (SImode);
07127a0a
DD
3479 if (GET_CODE (operands[2]) != CONST_INT)
3480 {
3481 scale = gen_reg_rtx (SImode);
3482 emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
3483 }
3484 else
3485 scale = copy_to_mode_reg (SImode, operands[2]);
12ea2512
DD
3486
3487 emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
07127a0a
DD
3488 temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
3489 emit_insn (gen_truncsipsi2 (operands[0], temp2));
12ea2512
DD
3490}
3491
0166ff05
DD
3492static rtx compare_op0, compare_op1;
3493
3494void
3495m32c_pend_compare (rtx *operands)
3496{
3497 compare_op0 = operands[0];
3498 compare_op1 = operands[1];
3499}
3500
3501void
3502m32c_unpend_compare (void)
3503{
3504 switch (GET_MODE (compare_op0))
3505 {
3506 case QImode:
3507 emit_insn (gen_cmpqi_op (compare_op0, compare_op1));
3508 case HImode:
3509 emit_insn (gen_cmphi_op (compare_op0, compare_op1));
3510 case PSImode:
3511 emit_insn (gen_cmppsi_op (compare_op0, compare_op1));
3512 }
3513}
3514
3515void
3516m32c_expand_scc (int code, rtx *operands)
3517{
3518 enum machine_mode mode = TARGET_A16 ? QImode : HImode;
3519
3520 emit_insn (gen_rtx_SET (mode,
3521 operands[0],
3522 gen_rtx_fmt_ee (code,
3523 mode,
3524 compare_op0,
3525 compare_op1)));
3526}
3527
38b2d076
DD
3528/* Pattern Output Functions */
3529
07127a0a
DD
3530/* Returns a (OP (reg:CC FLG_REGNO) (const_int 0)) from some other
3531 match_operand rtx's OP. */
3532rtx
3533m32c_cmp_flg_0 (rtx cmp)
3534{
3535 return gen_rtx_fmt_ee (GET_CODE (cmp),
3536 GET_MODE (cmp),
3537 gen_rtx_REG (CCmode, FLG_REGNO),
3538 GEN_INT (0));
3539}
3540
3541int
3542m32c_expand_movcc (rtx *operands)
3543{
3544 rtx rel = operands[1];
0166ff05
DD
3545 rtx cmp;
3546
07127a0a
DD
3547 if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
3548 return 1;
3549 if (GET_CODE (operands[2]) != CONST_INT
3550 || GET_CODE (operands[3]) != CONST_INT)
3551 return 1;
3552 emit_insn (gen_cmpqi(XEXP (rel, 0), XEXP (rel, 1)));
3553 if (GET_CODE (rel) == NE)
3554 {
3555 rtx tmp = operands[2];
3556 operands[2] = operands[3];
3557 operands[3] = tmp;
3558 }
0166ff05
DD
3559
3560 cmp = gen_rtx_fmt_ee (GET_CODE (rel),
3561 GET_MODE (rel),
3562 compare_op0,
3563 compare_op1);
3564
3565 emit_move_insn (operands[0],
3566 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3567 cmp,
3568 operands[2],
3569 operands[3]));
07127a0a
DD
3570 return 0;
3571}
3572
3573/* Used for the "insv" pattern. Return nonzero to fail, else done. */
3574int
3575m32c_expand_insv (rtx *operands)
3576{
3577 rtx op0, src0, p;
3578 int mask;
3579
3580 if (INTVAL (operands[1]) != 1)
3581 return 1;
3582
9cb96754
N
3583 /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
3584 if (GET_CODE (operands[3]) != CONST_INT)
3585 return 1;
3586 if (INTVAL (operands[3]) != 0
3587 && INTVAL (operands[3]) != 1
3588 && INTVAL (operands[3]) != -1)
3589 return 1;
3590
07127a0a
DD
3591 mask = 1 << INTVAL (operands[2]);
3592
3593 op0 = operands[0];
3594 if (GET_CODE (op0) == SUBREG
3595 && SUBREG_BYTE (op0) == 0)
3596 {
3597 rtx sub = SUBREG_REG (op0);
3598 if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
3599 op0 = sub;
3600 }
3601
3602 if (no_new_pseudos
3603 || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
3604 src0 = op0;
3605 else
3606 {
3607 src0 = gen_reg_rtx (GET_MODE (op0));
3608 emit_move_insn (src0, op0);
3609 }
3610
3611 if (GET_MODE (op0) == HImode
3612 && INTVAL (operands[2]) >= 8
3613 && GET_MODE (op0) == MEM)
3614 {
3615 /* We are little endian. */
3616 rtx new_mem = gen_rtx_MEM (QImode, plus_constant (XEXP (op0, 0), 1));
3617 MEM_COPY_ATTRIBUTES (new_mem, op0);
3618 mask >>= 8;
3619 }
3620
8e4edce7
DD
3621 /* First, we generate a mask with the correct polarity. If we are
3622 storing a zero, we want an AND mask, so invert it. */
3623 if (INTVAL (operands[3]) == 0)
07127a0a 3624 {
16659fcf 3625 /* Storing a zero, use an AND mask */
07127a0a
DD
3626 if (GET_MODE (op0) == HImode)
3627 mask ^= 0xffff;
3628 else
3629 mask ^= 0xff;
3630 }
8e4edce7
DD
3631 /* Now we need to properly sign-extend the mask in case we need to
3632 fall back to an AND or OR opcode. */
07127a0a
DD
3633 if (GET_MODE (op0) == HImode)
3634 {
3635 if (mask & 0x8000)
3636 mask -= 0x10000;
3637 }
3638 else
3639 {
3640 if (mask & 0x80)
3641 mask -= 0x100;
3642 }
3643
3644 switch ( (INTVAL (operands[3]) ? 4 : 0)
3645 + ((GET_MODE (op0) == HImode) ? 2 : 0)
3646 + (TARGET_A24 ? 1 : 0))
3647 {
3648 case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
3649 case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
3650 case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
3651 case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
3652 case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
3653 case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
3654 case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
3655 case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
3656 }
3657
3658 emit_insn (p);
3659 return 0;
3660}
3661
3662const char *
3663m32c_scc_pattern(rtx *operands, RTX_CODE code)
3664{
3665 static char buf[30];
3666 if (GET_CODE (operands[0]) == REG
3667 && REGNO (operands[0]) == R0_REGNO)
3668 {
3669 if (code == EQ)
3670 return "stzx\t#1,#0,r0l";
3671 if (code == NE)
3672 return "stzx\t#0,#1,r0l";
3673 }
3674 sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
3675 return buf;
3676}
3677
38b2d076
DD
3678/* Returns TRUE if the current function is a leaf, and thus we can
3679 determine which registers an interrupt function really needs to
3680 save. The logic below is mostly about finding the insn sequence
3681 that's the function, versus any sequence that might be open for the
3682 current insn. */
3683static int
3684m32c_leaf_function_p (void)
3685{
3686 rtx saved_first, saved_last;
3687 struct sequence_stack *seq;
3688 int rv;
3689
3690 saved_first = cfun->emit->x_first_insn;
3691 saved_last = cfun->emit->x_last_insn;
3692 for (seq = cfun->emit->sequence_stack; seq && seq->next; seq = seq->next)
3693 ;
3694 if (seq)
3695 {
3696 cfun->emit->x_first_insn = seq->first;
3697 cfun->emit->x_last_insn = seq->last;
3698 }
3699
3700 rv = leaf_function_p ();
3701
3702 cfun->emit->x_first_insn = saved_first;
3703 cfun->emit->x_last_insn = saved_last;
3704 return rv;
3705}
3706
3707/* Returns TRUE if the current function needs to use the ENTER/EXIT
3708 opcodes. If the function doesn't need the frame base or stack
3709 pointer, it can use the simpler RTS opcode. */
3710static bool
3711m32c_function_needs_enter (void)
3712{
3713 rtx insn;
3714 struct sequence_stack *seq;
3715 rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
3716 rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
3717
3718 insn = get_insns ();
3719 for (seq = cfun->emit->sequence_stack;
3720 seq;
3721 insn = seq->first, seq = seq->next);
3722
3723 while (insn)
3724 {
3725 if (reg_mentioned_p (sp, insn))
3726 return true;
3727 if (reg_mentioned_p (fb, insn))
3728 return true;
3729 insn = NEXT_INSN (insn);
3730 }
3731 return false;
3732}
3733
3734/* Mark all the subexpressions of the PARALLEL rtx PAR as
3735 frame-related. Return PAR.
3736
3737 dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
3738 PARALLEL rtx other than the first if they do not have the
3739 FRAME_RELATED flag set on them. So this function is handy for
3740 marking up 'enter' instructions. */
3741static rtx
3742m32c_all_frame_related (rtx par)
3743{
3744 int len = XVECLEN (par, 0);
3745 int i;
3746
3747 for (i = 0; i < len; i++)
3748 F (XVECEXP (par, 0, i));
3749
3750 return par;
3751}
3752
3753/* Emits the prologue. See the frame layout comment earlier in this
3754 file. We can reserve up to 256 bytes with the ENTER opcode, beyond
3755 that we manually update sp. */
3756void
3757m32c_emit_prologue (void)
3758{
3759 int frame_size, extra_frame_size = 0, reg_save_size;
3760 int complex_prologue = 0;
3761
3762 cfun->machine->is_leaf = m32c_leaf_function_p ();
3763 if (interrupt_p (cfun->decl))
3764 {
3765 cfun->machine->is_interrupt = 1;
3766 complex_prologue = 1;
3767 }
3768
3769 reg_save_size = m32c_pushm_popm (PP_justcount);
3770
3771 if (interrupt_p (cfun->decl))
3772 emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
3773
3774 frame_size =
3775 m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
3776 if (frame_size == 0
3777 && !cfun->machine->is_interrupt
3778 && !m32c_function_needs_enter ())
3779 cfun->machine->use_rts = 1;
3780
3781 if (frame_size > 254)
3782 {
3783 extra_frame_size = frame_size - 254;
3784 frame_size = 254;
3785 }
3786 if (cfun->machine->use_rts == 0)
3787 F (emit_insn (m32c_all_frame_related
3788 (TARGET_A16
3789 ? gen_prologue_enter_16 (GEN_INT (frame_size))
3790 : gen_prologue_enter_24 (GEN_INT (frame_size)))));
3791
3792 if (extra_frame_size)
3793 {
3794 complex_prologue = 1;
3795 if (TARGET_A16)
3796 F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
3797 gen_rtx_REG (HImode, SP_REGNO),
3798 GEN_INT (-extra_frame_size))));
3799 else
3800 F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
3801 gen_rtx_REG (PSImode, SP_REGNO),
3802 GEN_INT (-extra_frame_size))));
3803 }
3804
3805 complex_prologue += m32c_pushm_popm (PP_pushm);
3806
3807 /* This just emits a comment into the .s file for debugging. */
3808 if (complex_prologue)
3809 emit_insn (gen_prologue_end ());
3810}
3811
3812/* Likewise, for the epilogue. The only exception is that, for
3813 interrupts, we must manually unwind the frame as the REIT opcode
3814 doesn't do that. */
3815void
3816m32c_emit_epilogue (void)
3817{
3818 /* This just emits a comment into the .s file for debugging. */
3819 if (m32c_pushm_popm (PP_justcount) > 0 || cfun->machine->is_interrupt)
3820 emit_insn (gen_epilogue_start ());
3821
3822 m32c_pushm_popm (PP_popm);
3823
3824 if (cfun->machine->is_interrupt)
3825 {
3826 enum machine_mode spmode = TARGET_A16 ? HImode : PSImode;
3827
3828 emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
3829 gen_rtx_REG (spmode, FP_REGNO));
3830 emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
3831 gen_rtx_REG (spmode, A0_REGNO));
3832 if (TARGET_A16)
3833 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
3834 else
3835 emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
3836 emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
3837 emit_jump_insn (gen_epilogue_reit (GEN_INT (TARGET_A16 ? 4 : 6)));
3838 }
3839 else if (cfun->machine->use_rts)
3840 emit_jump_insn (gen_epilogue_rts ());
3841 else
3842 emit_jump_insn (gen_epilogue_exitd (GEN_INT (TARGET_A16 ? 2 : 4)));
3843 emit_barrier ();
3844}
3845
3846void
3847m32c_emit_eh_epilogue (rtx ret_addr)
3848{
3849 /* R0[R2] has the stack adjustment. R1[R3] has the address to
3850 return to. We have to fudge the stack, pop everything, pop SP
3851 (fudged), and return (fudged). This is actually easier to do in
3852 assembler, so punt to libgcc. */
3853 emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
3854 /* emit_insn (gen_rtx_CLOBBER (HImode, gen_rtx_REG (HImode, R0L_REGNO))); */
3855 emit_barrier ();
3856}
3857
16659fcf
DD
3858/* Indicate which flags must be properly set for a given conditional. */
3859static int
3860flags_needed_for_conditional (rtx cond)
3861{
3862 switch (GET_CODE (cond))
3863 {
3864 case LE:
3865 case GT:
3866 return FLAGS_OSZ;
3867 case LEU:
3868 case GTU:
3869 return FLAGS_ZC;
3870 case LT:
3871 case GE:
3872 return FLAGS_OS;
3873 case LTU:
3874 case GEU:
3875 return FLAGS_C;
3876 case EQ:
3877 case NE:
3878 return FLAGS_Z;
3879 default:
3880 return FLAGS_N;
3881 }
3882}
3883
3884#define DEBUG_CMP 0
3885
3886/* Returns true if a compare insn is redundant because it would only
3887 set flags that are already set correctly. */
3888static bool
3889m32c_compare_redundant (rtx cmp, rtx *operands)
3890{
3891 int flags_needed;
3892 int pflags;
3893 rtx prev, pp, next;
3894 rtx op0, op1, op2;
3895#if DEBUG_CMP
3896 int prev_icode, i;
3897#endif
3898
3899 op0 = operands[0];
3900 op1 = operands[1];
3901 op2 = operands[2];
3902
3903#if DEBUG_CMP
3904 fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
3905 debug_rtx(cmp);
3906 for (i=0; i<2; i++)
3907 {
3908 fprintf(stderr, "operands[%d] = ", i);
3909 debug_rtx(operands[i]);
3910 }
3911#endif
3912
3913 next = next_nonnote_insn (cmp);
3914 if (!next || !INSN_P (next))
3915 {
3916#if DEBUG_CMP
3917 fprintf(stderr, "compare not followed by insn\n");
3918 debug_rtx(next);
3919#endif
3920 return false;
3921 }
3922 if (GET_CODE (PATTERN (next)) == SET
3923 && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
3924 {
3925 next = XEXP (XEXP (PATTERN (next), 1), 0);
3926 }
3927 else if (GET_CODE (PATTERN (next)) == SET)
3928 {
3929 /* If this is a conditional, flags_needed will be something
3930 other than FLAGS_N, which we test below. */
3931 next = XEXP (PATTERN (next), 1);
3932 }
3933 else
3934 {
3935#if DEBUG_CMP
3936 fprintf(stderr, "compare not followed by conditional\n");
3937 debug_rtx(next);
3938#endif
3939 return false;
3940 }
3941#if DEBUG_CMP
3942 fprintf(stderr, "conditional is: ");
3943 debug_rtx(next);
3944#endif
3945
3946 flags_needed = flags_needed_for_conditional (next);
3947 if (flags_needed == FLAGS_N)
3948 {
3949#if DEBUG_CMP
3950 fprintf(stderr, "compare not followed by conditional\n");
3951 debug_rtx(next);
3952#endif
3953 return false;
3954 }
3955
3956 /* Compare doesn't set overflow and carry the same way that
3957 arithmetic instructions do, so we can't replace those. */
3958 if (flags_needed & FLAGS_OC)
3959 return false;
3960
3961 prev = cmp;
3962 do {
3963 prev = prev_nonnote_insn (prev);
3964 if (!prev)
3965 {
3966#if DEBUG_CMP
3967 fprintf(stderr, "No previous insn.\n");
3968#endif
3969 return false;
3970 }
3971 if (!INSN_P (prev))
3972 {
3973#if DEBUG_CMP
3974 fprintf(stderr, "Previous insn is a non-insn.\n");
3975#endif
3976 return false;
3977 }
3978 pp = PATTERN (prev);
3979 if (GET_CODE (pp) != SET)
3980 {
3981#if DEBUG_CMP
3982 fprintf(stderr, "Previous insn is not a SET.\n");
3983#endif
3984 return false;
3985 }
3986 pflags = get_attr_flags (prev);
3987
3988 /* Looking up attributes of previous insns corrupted the recog
3989 tables. */
3990 INSN_UID (cmp) = -1;
3991 recog (PATTERN (cmp), cmp, 0);
3992
3993 if (pflags == FLAGS_N
3994 && reg_mentioned_p (op0, pp))
3995 {
3996#if DEBUG_CMP
3997 fprintf(stderr, "intermediate non-flags insn uses op:\n");
3998 debug_rtx(prev);
3999#endif
4000 return false;
4001 }
4002 } while (pflags == FLAGS_N);
4003#if DEBUG_CMP
4004 fprintf(stderr, "previous flag-setting insn:\n");
4005 debug_rtx(prev);
4006 debug_rtx(pp);
4007#endif
4008
4009 if (GET_CODE (pp) == SET
4010 && GET_CODE (XEXP (pp, 0)) == REG
4011 && REGNO (XEXP (pp, 0)) == FLG_REGNO
4012 && GET_CODE (XEXP (pp, 1)) == COMPARE)
4013 {
4014 /* Adjacent cbranches must have the same operands to be
4015 redundant. */
4016 rtx pop0 = XEXP (XEXP (pp, 1), 0);
4017 rtx pop1 = XEXP (XEXP (pp, 1), 1);
4018#if DEBUG_CMP
4019 fprintf(stderr, "adjacent cbranches\n");
4020 debug_rtx(pop0);
4021 debug_rtx(pop1);
4022#endif
4023 if (rtx_equal_p (op0, pop0)
4024 && rtx_equal_p (op1, pop1))
4025 return true;
4026#if DEBUG_CMP
4027 fprintf(stderr, "prev cmp not same\n");
4028#endif
4029 return false;
4030 }
4031
4032 /* Else the previous insn must be a SET, with either the source or
4033 dest equal to operands[0], and operands[1] must be zero. */
4034
4035 if (!rtx_equal_p (op1, const0_rtx))
4036 {
4037#if DEBUG_CMP
4038 fprintf(stderr, "operands[1] not const0_rtx\n");
4039#endif
4040 return false;
4041 }
4042 if (GET_CODE (pp) != SET)
4043 {
4044#if DEBUG_CMP
4045 fprintf (stderr, "pp not set\n");
4046#endif
4047 return false;
4048 }
4049 if (!rtx_equal_p (op0, SET_SRC (pp))
4050 && !rtx_equal_p (op0, SET_DEST (pp)))
4051 {
4052#if DEBUG_CMP
4053 fprintf(stderr, "operands[0] not found in set\n");
4054#endif
4055 return false;
4056 }
4057
4058#if DEBUG_CMP
4059 fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4060#endif
4061 if ((pflags & flags_needed) == flags_needed)
4062 return true;
4063
4064 return false;
4065}
4066
4067/* Return the pattern for a compare. This will be commented out if
4068 the compare is redundant, else a normal pattern is returned. Thus,
4069 the assembler output says where the compare would have been. */
4070char *
4071m32c_output_compare (rtx insn, rtx *operands)
4072{
4073 static char template[] = ";cmp.b\t%1,%0";
4074 /* ^ 5 */
4075
4076 template[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
4077 if (m32c_compare_redundant (insn, operands))
4078 {
4079#if DEBUG_CMP
4080 fprintf(stderr, "cbranch: cmp not needed\n");
4081#endif
4082 return template;
4083 }
4084
4085#if DEBUG_CMP
4086 fprintf(stderr, "cbranch: cmp needed: `%s'\n", template);
4087#endif
4088 return template + 1;
4089}
4090
38b2d076
DD
4091/* The Global `targetm' Variable. */
4092
4093struct gcc_target targetm = TARGET_INITIALIZER;
4094
4095#include "gt-m32c.h"