]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/m32c/m32c.c
ifcvt.c (noce_emit_cmove): If both of the values are SUBREGs...
[thirdparty/gcc.git] / gcc / config / m32c / m32c.c
CommitLineData
38b2d076 1/* Target Code for R8C/M16C/M32C
f28f2337 2 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010
38b2d076
DD
3 Free Software Foundation, Inc.
4 Contributed by Red Hat.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published
2f83c7d6 10 by the Free Software Foundation; either version 3, or (at your
38b2d076
DD
11 option) any later version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
16 License for more details.
17
18 You should have received a copy of the GNU General Public License
2f83c7d6
NC
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
38b2d076
DD
21
22#include "config.h"
23#include "system.h"
24#include "coretypes.h"
25#include "tm.h"
26#include "rtl.h"
27#include "regs.h"
28#include "hard-reg-set.h"
38b2d076
DD
29#include "insn-config.h"
30#include "conditions.h"
31#include "insn-flags.h"
32#include "output.h"
33#include "insn-attr.h"
34#include "flags.h"
35#include "recog.h"
36#include "reload.h"
718f9c0f 37#include "diagnostic-core.h"
38b2d076
DD
38#include "toplev.h"
39#include "obstack.h"
40#include "tree.h"
41#include "expr.h"
42#include "optabs.h"
43#include "except.h"
44#include "function.h"
45#include "ggc.h"
46#include "target.h"
47#include "target-def.h"
48#include "tm_p.h"
49#include "langhooks.h"
726a989a 50#include "gimple.h"
fa9fd28a 51#include "df.h"
38b2d076
DD
52
53/* Prototypes */
54
55/* Used by m32c_pushm_popm. */
56typedef enum
57{
58 PP_pushm,
59 PP_popm,
60 PP_justcount
61} Push_Pop_Type;
62
65655f79 63static bool m32c_function_needs_enter (void);
38b2d076 64static tree interrupt_handler (tree *, tree, tree, int, bool *);
5abd2125 65static tree function_vector_handler (tree *, tree, tree, int, bool *);
38b2d076 66static int interrupt_p (tree node);
65655f79
DD
67static int bank_switch_p (tree node);
68static int fast_interrupt_p (tree node);
69static int interrupt_p (tree node);
38b2d076 70static bool m32c_asm_integer (rtx, unsigned int, int);
3101faab 71static int m32c_comp_type_attributes (const_tree, const_tree);
38b2d076
DD
72static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
73static struct machine_function *m32c_init_machine_status (void);
74static void m32c_insert_attributes (tree, tree *);
c6c3dba9 75static bool m32c_legitimate_address_p (enum machine_mode, rtx, bool);
cd34bbe8
NF
76static rtx m32_function_arg (CUMULATIVE_ARGS *, enum machine_mode
77 const_tree, bool);
38b2d076 78static bool m32c_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
586de218 79 const_tree, bool);
cd34bbe8
NF
80static void m32c_function_arg_advance (CUMULATIVE_ARGS *, enum machine_mode,
81 const_tree, bool);
586de218 82static bool m32c_promote_prototypes (const_tree);
38b2d076
DD
83static int m32c_pushm_popm (Push_Pop_Type);
84static bool m32c_strict_argument_naming (CUMULATIVE_ARGS *);
85static rtx m32c_struct_value_rtx (tree, int);
86static rtx m32c_subreg (enum machine_mode, rtx, enum machine_mode, int);
87static int need_to_save (int);
2a31793e
AS
88static rtx m32c_function_value (const_tree, const_tree, bool);
89static rtx m32c_libcall_value (enum machine_mode, const_rtx);
90
f6052f86
DD
91/* Returns true if an address is specified, else false. */
92static bool m32c_get_pragma_address (const char *varname, unsigned *addr);
93
5abd2125
JS
94int current_function_special_page_vector (rtx);
95
96#define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
38b2d076
DD
97
98#define streq(a,b) (strcmp ((a), (b)) == 0)
99
100/* Internal support routines */
101
102/* Debugging statements are tagged with DEBUG0 only so that they can
103 be easily enabled individually, by replacing the '0' with '1' as
104 needed. */
105#define DEBUG0 0
106#define DEBUG1 1
107
108#if DEBUG0
109/* This is needed by some of the commented-out debug statements
110 below. */
111static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
112#endif
113static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
114
115/* These are all to support encode_pattern(). */
116static char pattern[30], *patternp;
117static GTY(()) rtx patternr[30];
118#define RTX_IS(x) (streq (pattern, x))
119
120/* Some macros to simplify the logic throughout this file. */
121#define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
122#define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
123
124#define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
125#define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
126
127/* We do most RTX matching by converting the RTX into a string, and
128 using string compares. This vastly simplifies the logic in many of
129 the functions in this file.
130
131 On exit, pattern[] has the encoded string (use RTX_IS("...") to
132 compare it) and patternr[] has pointers to the nodes in the RTX
133 corresponding to each character in the encoded string. The latter
134 is mostly used by print_operand().
135
136 Unrecognized patterns have '?' in them; this shows up when the
137 assembler complains about syntax errors.
138*/
139
140static void
141encode_pattern_1 (rtx x)
142{
143 int i;
144
145 if (patternp == pattern + sizeof (pattern) - 2)
146 {
147 patternp[-1] = '?';
148 return;
149 }
150
151 patternr[patternp - pattern] = x;
152
153 switch (GET_CODE (x))
154 {
155 case REG:
156 *patternp++ = 'r';
157 break;
158 case SUBREG:
159 if (GET_MODE_SIZE (GET_MODE (x)) !=
160 GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
161 *patternp++ = 'S';
162 encode_pattern_1 (XEXP (x, 0));
163 break;
164 case MEM:
165 *patternp++ = 'm';
166 case CONST:
167 encode_pattern_1 (XEXP (x, 0));
168 break;
169 case PLUS:
170 *patternp++ = '+';
171 encode_pattern_1 (XEXP (x, 0));
172 encode_pattern_1 (XEXP (x, 1));
173 break;
174 case PRE_DEC:
175 *patternp++ = '>';
176 encode_pattern_1 (XEXP (x, 0));
177 break;
178 case POST_INC:
179 *patternp++ = '<';
180 encode_pattern_1 (XEXP (x, 0));
181 break;
182 case LO_SUM:
183 *patternp++ = 'L';
184 encode_pattern_1 (XEXP (x, 0));
185 encode_pattern_1 (XEXP (x, 1));
186 break;
187 case HIGH:
188 *patternp++ = 'H';
189 encode_pattern_1 (XEXP (x, 0));
190 break;
191 case SYMBOL_REF:
192 *patternp++ = 's';
193 break;
194 case LABEL_REF:
195 *patternp++ = 'l';
196 break;
197 case CODE_LABEL:
198 *patternp++ = 'c';
199 break;
200 case CONST_INT:
201 case CONST_DOUBLE:
202 *patternp++ = 'i';
203 break;
204 case UNSPEC:
205 *patternp++ = 'u';
206 *patternp++ = '0' + XCINT (x, 1, UNSPEC);
207 for (i = 0; i < XVECLEN (x, 0); i++)
208 encode_pattern_1 (XVECEXP (x, 0, i));
209 break;
210 case USE:
211 *patternp++ = 'U';
212 break;
213 case PARALLEL:
214 *patternp++ = '|';
215 for (i = 0; i < XVECLEN (x, 0); i++)
216 encode_pattern_1 (XVECEXP (x, 0, i));
217 break;
218 case EXPR_LIST:
219 *patternp++ = 'E';
220 encode_pattern_1 (XEXP (x, 0));
221 if (XEXP (x, 1))
222 encode_pattern_1 (XEXP (x, 1));
223 break;
224 default:
225 *patternp++ = '?';
226#if DEBUG0
227 fprintf (stderr, "can't encode pattern %s\n",
228 GET_RTX_NAME (GET_CODE (x)));
229 debug_rtx (x);
230 gcc_unreachable ();
231#endif
232 break;
233 }
234}
235
236static void
237encode_pattern (rtx x)
238{
239 patternp = pattern;
240 encode_pattern_1 (x);
241 *patternp = 0;
242}
243
244/* Since register names indicate the mode they're used in, we need a
245 way to determine which name to refer to the register with. Called
246 by print_operand(). */
247
248static const char *
249reg_name_with_mode (int regno, enum machine_mode mode)
250{
251 int mlen = GET_MODE_SIZE (mode);
252 if (regno == R0_REGNO && mlen == 1)
253 return "r0l";
254 if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
255 return "r2r0";
256 if (regno == R0_REGNO && mlen == 6)
257 return "r2r1r0";
258 if (regno == R0_REGNO && mlen == 8)
259 return "r3r1r2r0";
260 if (regno == R1_REGNO && mlen == 1)
261 return "r1l";
262 if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
263 return "r3r1";
264 if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
265 return "a1a0";
266 return reg_names[regno];
267}
268
269/* How many bytes a register uses on stack when it's pushed. We need
270 to know this because the push opcode needs to explicitly indicate
271 the size of the register, even though the name of the register
272 already tells it that. Used by m32c_output_reg_{push,pop}, which
273 is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}. */
274
275static int
276reg_push_size (int regno)
277{
278 switch (regno)
279 {
280 case R0_REGNO:
281 case R1_REGNO:
282 return 2;
283 case R2_REGNO:
284 case R3_REGNO:
285 case FLG_REGNO:
286 return 2;
287 case A0_REGNO:
288 case A1_REGNO:
289 case SB_REGNO:
290 case FB_REGNO:
291 case SP_REGNO:
292 if (TARGET_A16)
293 return 2;
294 else
295 return 3;
296 default:
297 gcc_unreachable ();
298 }
299}
300
301static int *class_sizes = 0;
302
303/* Given two register classes, find the largest intersection between
304 them. If there is no intersection, return RETURNED_IF_EMPTY
305 instead. */
306static int
307reduce_class (int original_class, int limiting_class, int returned_if_empty)
308{
309 int cc = class_contents[original_class][0];
310 int i, best = NO_REGS;
311 int best_size = 0;
312
313 if (original_class == limiting_class)
314 return original_class;
315
316 if (!class_sizes)
317 {
318 int r;
319 class_sizes = (int *) xmalloc (LIM_REG_CLASSES * sizeof (int));
320 for (i = 0; i < LIM_REG_CLASSES; i++)
321 {
322 class_sizes[i] = 0;
323 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
324 if (class_contents[i][0] & (1 << r))
325 class_sizes[i]++;
326 }
327 }
328
329 cc &= class_contents[limiting_class][0];
330 for (i = 0; i < LIM_REG_CLASSES; i++)
331 {
332 int ic = class_contents[i][0];
333
334 if ((~cc & ic) == 0)
335 if (best_size < class_sizes[i])
336 {
337 best = i;
338 best_size = class_sizes[i];
339 }
340
341 }
342 if (best == NO_REGS)
343 return returned_if_empty;
344 return best;
345}
346
38b2d076
DD
347/* Used by m32c_register_move_cost to determine if a move is
348 impossibly expensive. */
0e607518
AS
349static bool
350class_can_hold_mode (reg_class_t rclass, enum machine_mode mode)
38b2d076
DD
351{
352 /* Cache the results: 0=untested 1=no 2=yes */
353 static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
0e607518
AS
354
355 if (results[(int) rclass][mode] == 0)
38b2d076 356 {
0e607518 357 int r;
0a2aaacc 358 results[rclass][mode] = 1;
38b2d076 359 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
0e607518 360 if (in_hard_reg_set_p (reg_class_contents[(int) rclass], mode, r)
38b2d076
DD
361 && HARD_REGNO_MODE_OK (r, mode))
362 {
0e607518
AS
363 results[rclass][mode] = 2;
364 break;
38b2d076
DD
365 }
366 }
0e607518 367
38b2d076
DD
368#if DEBUG0
369 fprintf (stderr, "class %s can hold %s? %s\n",
0e607518 370 class_names[(int) rclass], mode_name[mode],
0a2aaacc 371 (results[rclass][mode] == 2) ? "yes" : "no");
38b2d076 372#endif
0e607518 373 return results[(int) rclass][mode] == 2;
38b2d076
DD
374}
375
376/* Run-time Target Specification. */
377
378/* Memregs are memory locations that gcc treats like general
379 registers, as there are a limited number of true registers and the
380 m32c families can use memory in most places that registers can be
381 used.
382
383 However, since memory accesses are more expensive than registers,
384 we allow the user to limit the number of memregs available, in
385 order to try to persuade gcc to try harder to use real registers.
386
387 Memregs are provided by m32c-lib1.S.
388*/
389
390int target_memregs = 16;
391static bool target_memregs_set = FALSE;
392int ok_to_change_target_memregs = TRUE;
393
394#undef TARGET_HANDLE_OPTION
395#define TARGET_HANDLE_OPTION m32c_handle_option
396static bool
397m32c_handle_option (size_t code,
398 const char *arg ATTRIBUTE_UNUSED,
399 int value ATTRIBUTE_UNUSED)
400{
401 if (code == OPT_memregs_)
402 {
403 target_memregs_set = TRUE;
404 target_memregs = atoi (arg);
405 }
406 return TRUE;
407}
408
f28f2337
AS
409/* Implements TARGET_OPTION_OVERRIDE. */
410
411#undef TARGET_OPTION_OVERRIDE
412#define TARGET_OPTION_OVERRIDE m32c_option_override
413
414static void
415m32c_option_override (void)
38b2d076 416{
f28f2337 417 /* We limit memregs to 0..16, and provide a default. */
38b2d076
DD
418 if (target_memregs_set)
419 {
420 if (target_memregs < 0 || target_memregs > 16)
421 error ("invalid target memregs value '%d'", target_memregs);
422 }
423 else
07127a0a 424 target_memregs = 16;
18b80268
DD
425
426 if (TARGET_A24)
427 flag_ivopts = 0;
0685e770
DD
428
429 /* This target defaults to strict volatile bitfields. */
430 if (flag_strict_volatile_bitfields < 0)
431 flag_strict_volatile_bitfields = 1;
38b2d076
DD
432}
433
434/* Defining data structures for per-function information */
435
436/* The usual; we set up our machine_function data. */
437static struct machine_function *
438m32c_init_machine_status (void)
439{
a9429e29 440 return ggc_alloc_cleared_machine_function ();
38b2d076
DD
441}
442
443/* Implements INIT_EXPANDERS. We just set up to call the above
444 function. */
445void
446m32c_init_expanders (void)
447{
448 init_machine_status = m32c_init_machine_status;
449}
450
451/* Storage Layout */
452
38b2d076
DD
453/* Register Basics */
454
455/* Basic Characteristics of Registers */
456
457/* Whether a mode fits in a register is complex enough to warrant a
458 table. */
459static struct
460{
461 char qi_regs;
462 char hi_regs;
463 char pi_regs;
464 char si_regs;
465 char di_regs;
466} nregs_table[FIRST_PSEUDO_REGISTER] =
467{
468 { 1, 1, 2, 2, 4 }, /* r0 */
469 { 0, 1, 0, 0, 0 }, /* r2 */
470 { 1, 1, 2, 2, 0 }, /* r1 */
471 { 0, 1, 0, 0, 0 }, /* r3 */
472 { 0, 1, 1, 0, 0 }, /* a0 */
473 { 0, 1, 1, 0, 0 }, /* a1 */
474 { 0, 1, 1, 0, 0 }, /* sb */
475 { 0, 1, 1, 0, 0 }, /* fb */
476 { 0, 1, 1, 0, 0 }, /* sp */
477 { 1, 1, 1, 0, 0 }, /* pc */
478 { 0, 0, 0, 0, 0 }, /* fl */
479 { 1, 1, 1, 0, 0 }, /* ap */
480 { 1, 1, 2, 2, 4 }, /* mem0 */
481 { 1, 1, 2, 2, 4 }, /* mem1 */
482 { 1, 1, 2, 2, 4 }, /* mem2 */
483 { 1, 1, 2, 2, 4 }, /* mem3 */
484 { 1, 1, 2, 2, 4 }, /* mem4 */
485 { 1, 1, 2, 2, 0 }, /* mem5 */
486 { 1, 1, 2, 2, 0 }, /* mem6 */
487 { 1, 1, 0, 0, 0 }, /* mem7 */
488};
489
490/* Implements CONDITIONAL_REGISTER_USAGE. We adjust the number of
491 available memregs, and select which registers need to be preserved
492 across calls based on the chip family. */
493
494void
495m32c_conditional_register_usage (void)
496{
38b2d076
DD
497 int i;
498
499 if (0 <= target_memregs && target_memregs <= 16)
500 {
501 /* The command line option is bytes, but our "registers" are
502 16-bit words. */
65655f79 503 for (i = (target_memregs+1)/2; i < 8; i++)
38b2d076
DD
504 {
505 fixed_regs[MEM0_REGNO + i] = 1;
506 CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
507 }
508 }
509
510 /* M32CM and M32C preserve more registers across function calls. */
511 if (TARGET_A24)
512 {
513 call_used_regs[R1_REGNO] = 0;
514 call_used_regs[R2_REGNO] = 0;
515 call_used_regs[R3_REGNO] = 0;
516 call_used_regs[A0_REGNO] = 0;
517 call_used_regs[A1_REGNO] = 0;
518 }
519}
520
521/* How Values Fit in Registers */
522
523/* Implements HARD_REGNO_NREGS. This is complicated by the fact that
524 different registers are different sizes from each other, *and* may
525 be different sizes in different chip families. */
b8a669d0
DD
526static int
527m32c_hard_regno_nregs_1 (int regno, enum machine_mode mode)
38b2d076
DD
528{
529 if (regno == FLG_REGNO && mode == CCmode)
530 return 1;
531 if (regno >= FIRST_PSEUDO_REGISTER)
532 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
533
534 if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
535 return (GET_MODE_SIZE (mode) + 1) / 2;
536
537 if (GET_MODE_SIZE (mode) <= 1)
538 return nregs_table[regno].qi_regs;
539 if (GET_MODE_SIZE (mode) <= 2)
540 return nregs_table[regno].hi_regs;
541 if (regno == A0_REGNO && mode == PSImode && TARGET_A16)
542 return 2;
543 if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
544 return nregs_table[regno].pi_regs;
545 if (GET_MODE_SIZE (mode) <= 4)
546 return nregs_table[regno].si_regs;
547 if (GET_MODE_SIZE (mode) <= 8)
548 return nregs_table[regno].di_regs;
549 return 0;
550}
551
b8a669d0
DD
552int
553m32c_hard_regno_nregs (int regno, enum machine_mode mode)
554{
555 int rv = m32c_hard_regno_nregs_1 (regno, mode);
556 return rv ? rv : 1;
557}
558
38b2d076
DD
559/* Implements HARD_REGNO_MODE_OK. The above function does the work
560 already; just test its return value. */
561int
562m32c_hard_regno_ok (int regno, enum machine_mode mode)
563{
b8a669d0 564 return m32c_hard_regno_nregs_1 (regno, mode) != 0;
38b2d076
DD
565}
566
567/* Implements MODES_TIEABLE_P. In general, modes aren't tieable since
568 registers are all different sizes. However, since most modes are
569 bigger than our registers anyway, it's easier to implement this
570 function that way, leaving QImode as the only unique case. */
571int
572m32c_modes_tieable_p (enum machine_mode m1, enum machine_mode m2)
573{
574 if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
575 return 1;
576
07127a0a 577#if 0
38b2d076
DD
578 if (m1 == QImode || m2 == QImode)
579 return 0;
07127a0a 580#endif
38b2d076
DD
581
582 return 1;
583}
584
585/* Register Classes */
586
587/* Implements REGNO_REG_CLASS. */
588enum machine_mode
589m32c_regno_reg_class (int regno)
590{
591 switch (regno)
592 {
593 case R0_REGNO:
594 return R0_REGS;
595 case R1_REGNO:
596 return R1_REGS;
597 case R2_REGNO:
598 return R2_REGS;
599 case R3_REGNO:
600 return R3_REGS;
601 case A0_REGNO:
602 case A1_REGNO:
603 return A_REGS;
604 case SB_REGNO:
605 return SB_REGS;
606 case FB_REGNO:
607 return FB_REGS;
608 case SP_REGNO:
609 return SP_REGS;
610 case FLG_REGNO:
611 return FLG_REGS;
612 default:
613 if (IS_MEM_REGNO (regno))
614 return MEM_REGS;
615 return ALL_REGS;
616 }
617}
618
619/* Implements REG_CLASS_FROM_CONSTRAINT. Note that some constraints only match
620 for certain chip families. */
621int
622m32c_reg_class_from_constraint (char c ATTRIBUTE_UNUSED, const char *s)
623{
624 if (memcmp (s, "Rsp", 3) == 0)
625 return SP_REGS;
626 if (memcmp (s, "Rfb", 3) == 0)
627 return FB_REGS;
628 if (memcmp (s, "Rsb", 3) == 0)
629 return SB_REGS;
07127a0a
DD
630 if (memcmp (s, "Rcr", 3) == 0)
631 return TARGET_A16 ? CR_REGS : NO_REGS;
632 if (memcmp (s, "Rcl", 3) == 0)
633 return TARGET_A24 ? CR_REGS : NO_REGS;
38b2d076
DD
634 if (memcmp (s, "R0w", 3) == 0)
635 return R0_REGS;
636 if (memcmp (s, "R1w", 3) == 0)
637 return R1_REGS;
638 if (memcmp (s, "R2w", 3) == 0)
639 return R2_REGS;
640 if (memcmp (s, "R3w", 3) == 0)
641 return R3_REGS;
642 if (memcmp (s, "R02", 3) == 0)
643 return R02_REGS;
18b80268
DD
644 if (memcmp (s, "R13", 3) == 0)
645 return R13_REGS;
38b2d076
DD
646 if (memcmp (s, "R03", 3) == 0)
647 return R03_REGS;
648 if (memcmp (s, "Rdi", 3) == 0)
649 return DI_REGS;
650 if (memcmp (s, "Rhl", 3) == 0)
651 return HL_REGS;
652 if (memcmp (s, "R23", 3) == 0)
653 return R23_REGS;
07127a0a
DD
654 if (memcmp (s, "Ra0", 3) == 0)
655 return A0_REGS;
656 if (memcmp (s, "Ra1", 3) == 0)
657 return A1_REGS;
38b2d076
DD
658 if (memcmp (s, "Raa", 3) == 0)
659 return A_REGS;
07127a0a
DD
660 if (memcmp (s, "Raw", 3) == 0)
661 return TARGET_A16 ? A_REGS : NO_REGS;
662 if (memcmp (s, "Ral", 3) == 0)
663 return TARGET_A24 ? A_REGS : NO_REGS;
38b2d076
DD
664 if (memcmp (s, "Rqi", 3) == 0)
665 return QI_REGS;
666 if (memcmp (s, "Rad", 3) == 0)
667 return AD_REGS;
668 if (memcmp (s, "Rsi", 3) == 0)
669 return SI_REGS;
670 if (memcmp (s, "Rhi", 3) == 0)
671 return HI_REGS;
672 if (memcmp (s, "Rhc", 3) == 0)
673 return HC_REGS;
674 if (memcmp (s, "Rra", 3) == 0)
675 return RA_REGS;
676 if (memcmp (s, "Rfl", 3) == 0)
677 return FLG_REGS;
678 if (memcmp (s, "Rmm", 3) == 0)
679 {
680 if (fixed_regs[MEM0_REGNO])
681 return NO_REGS;
682 return MEM_REGS;
683 }
684
685 /* PSImode registers - i.e. whatever can hold a pointer. */
686 if (memcmp (s, "Rpi", 3) == 0)
687 {
688 if (TARGET_A16)
689 return HI_REGS;
690 else
691 return RA_REGS; /* r2r0 and r3r1 can hold pointers. */
692 }
693
694 /* We handle this one as an EXTRA_CONSTRAINT. */
695 if (memcmp (s, "Rpa", 3) == 0)
696 return NO_REGS;
697
07127a0a
DD
698 if (*s == 'R')
699 {
700 fprintf(stderr, "unrecognized R constraint: %.3s\n", s);
701 gcc_unreachable();
702 }
703
38b2d076
DD
704 return NO_REGS;
705}
706
707/* Implements REGNO_OK_FOR_BASE_P. */
708int
709m32c_regno_ok_for_base_p (int regno)
710{
711 if (regno == A0_REGNO
712 || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
713 return 1;
714 return 0;
715}
716
717#define DEBUG_RELOAD 0
718
719/* Implements PREFERRED_RELOAD_CLASS. In general, prefer general
720 registers of the appropriate size. */
721int
722m32c_preferred_reload_class (rtx x, int rclass)
723{
724 int newclass = rclass;
725
726#if DEBUG_RELOAD
727 fprintf (stderr, "\npreferred_reload_class for %s is ",
728 class_names[rclass]);
729#endif
730 if (rclass == NO_REGS)
731 rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
732
0e607518 733 if (reg_classes_intersect_p (rclass, CR_REGS))
38b2d076
DD
734 {
735 switch (GET_MODE (x))
736 {
737 case QImode:
738 newclass = HL_REGS;
739 break;
740 default:
741 /* newclass = HI_REGS; */
742 break;
743 }
744 }
745
746 else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
747 newclass = SI_REGS;
748 else if (GET_MODE_SIZE (GET_MODE (x)) > 4
749 && ~class_contents[rclass][0] & 0x000f)
750 newclass = DI_REGS;
751
752 rclass = reduce_class (rclass, newclass, rclass);
753
754 if (GET_MODE (x) == QImode)
755 rclass = reduce_class (rclass, HL_REGS, rclass);
756
757#if DEBUG_RELOAD
758 fprintf (stderr, "%s\n", class_names[rclass]);
759 debug_rtx (x);
760
761 if (GET_CODE (x) == MEM
762 && GET_CODE (XEXP (x, 0)) == PLUS
763 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
764 fprintf (stderr, "Glorm!\n");
765#endif
766 return rclass;
767}
768
769/* Implements PREFERRED_OUTPUT_RELOAD_CLASS. */
770int
771m32c_preferred_output_reload_class (rtx x, int rclass)
772{
773 return m32c_preferred_reload_class (x, rclass);
774}
775
776/* Implements LIMIT_RELOAD_CLASS. We basically want to avoid using
777 address registers for reloads since they're needed for address
778 reloads. */
779int
780m32c_limit_reload_class (enum machine_mode mode, int rclass)
781{
782#if DEBUG_RELOAD
783 fprintf (stderr, "limit_reload_class for %s: %s ->",
784 mode_name[mode], class_names[rclass]);
785#endif
786
787 if (mode == QImode)
788 rclass = reduce_class (rclass, HL_REGS, rclass);
789 else if (mode == HImode)
790 rclass = reduce_class (rclass, HI_REGS, rclass);
791 else if (mode == SImode)
792 rclass = reduce_class (rclass, SI_REGS, rclass);
793
794 if (rclass != A_REGS)
795 rclass = reduce_class (rclass, DI_REGS, rclass);
796
797#if DEBUG_RELOAD
798 fprintf (stderr, " %s\n", class_names[rclass]);
799#endif
800 return rclass;
801}
802
803/* Implements SECONDARY_RELOAD_CLASS. QImode have to be reloaded in
804 r0 or r1, as those are the only real QImode registers. CR regs get
805 reloaded through appropriately sized general or address
806 registers. */
807int
808m32c_secondary_reload_class (int rclass, enum machine_mode mode, rtx x)
809{
810 int cc = class_contents[rclass][0];
811#if DEBUG0
812 fprintf (stderr, "\nsecondary reload class %s %s\n",
813 class_names[rclass], mode_name[mode]);
814 debug_rtx (x);
815#endif
816 if (mode == QImode
817 && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
818 return QI_REGS;
0e607518 819 if (reg_classes_intersect_p (rclass, CR_REGS)
38b2d076
DD
820 && GET_CODE (x) == REG
821 && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
822 return TARGET_A16 ? HI_REGS : A_REGS;
823 return NO_REGS;
824}
825
184866c5 826/* Implements TARGET_CLASS_LIKELY_SPILLED_P. A_REGS is needed for address
38b2d076 827 reloads. */
184866c5
AS
828
829#undef TARGET_CLASS_LIKELY_SPILLED_P
830#define TARGET_CLASS_LIKELY_SPILLED_P m32c_class_likely_spilled_p
831
832static bool
833m32c_class_likely_spilled_p (reg_class_t regclass)
38b2d076
DD
834{
835 if (regclass == A_REGS)
184866c5
AS
836 return true;
837
838 return (reg_class_size[(int) regclass] == 1);
38b2d076
DD
839}
840
841/* Implements CLASS_MAX_NREGS. We calculate this according to its
842 documented meaning, to avoid potential inconsistencies with actual
843 class definitions. */
844int
845m32c_class_max_nregs (int regclass, enum machine_mode mode)
846{
847 int rn, max = 0;
848
849 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
850 if (class_contents[regclass][0] & (1 << rn))
851 {
852 int n = m32c_hard_regno_nregs (rn, mode);
853 if (max < n)
854 max = n;
855 }
856 return max;
857}
858
859/* Implements CANNOT_CHANGE_MODE_CLASS. Only r0 and r1 can change to
860 QI (r0l, r1l) because the chip doesn't support QI ops on other
861 registers (well, it does on a0/a1 but if we let gcc do that, reload
862 suffers). Otherwise, we allow changes to larger modes. */
863int
864m32c_cannot_change_mode_class (enum machine_mode from,
865 enum machine_mode to, int rclass)
866{
db9c8397 867 int rn;
38b2d076
DD
868#if DEBUG0
869 fprintf (stderr, "cannot change from %s to %s in %s\n",
870 mode_name[from], mode_name[to], class_names[rclass]);
871#endif
872
db9c8397
DD
873 /* If the larger mode isn't allowed in any of these registers, we
874 can't allow the change. */
875 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
876 if (class_contents[rclass][0] & (1 << rn))
877 if (! m32c_hard_regno_ok (rn, to))
878 return 1;
879
38b2d076
DD
880 if (to == QImode)
881 return (class_contents[rclass][0] & 0x1ffa);
882
883 if (class_contents[rclass][0] & 0x0005 /* r0, r1 */
884 && GET_MODE_SIZE (from) > 1)
885 return 0;
886 if (GET_MODE_SIZE (from) > 2) /* all other regs */
887 return 0;
888
889 return 1;
890}
891
892/* Helpers for the rest of the file. */
893/* TRUE if the rtx is a REG rtx for the given register. */
894#define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
895 && REGNO (rtx) == regno)
896/* TRUE if the rtx is a pseudo - specifically, one we can use as a
897 base register in address calculations (hence the "strict"
898 argument). */
899#define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
900 && (REGNO (rtx) == AP_REGNO \
901 || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
902
903/* Implements CONST_OK_FOR_CONSTRAINT_P. Currently, all constant
904 constraints start with 'I', with the next two characters indicating
905 the type and size of the range allowed. */
906int
907m32c_const_ok_for_constraint_p (HOST_WIDE_INT value,
908 char c ATTRIBUTE_UNUSED, const char *str)
909{
910 /* s=signed u=unsigned n=nonzero m=minus l=log2able,
911 [sun] bits [SUN] bytes, p=pointer size
912 I[-0-9][0-9] matches that number */
913 if (memcmp (str, "Is3", 3) == 0)
914 {
915 return (-8 <= value && value <= 7);
916 }
917 if (memcmp (str, "IS1", 3) == 0)
918 {
919 return (-128 <= value && value <= 127);
920 }
921 if (memcmp (str, "IS2", 3) == 0)
922 {
923 return (-32768 <= value && value <= 32767);
924 }
925 if (memcmp (str, "IU2", 3) == 0)
926 {
927 return (0 <= value && value <= 65535);
928 }
929 if (memcmp (str, "IU3", 3) == 0)
930 {
931 return (0 <= value && value <= 0x00ffffff);
932 }
933 if (memcmp (str, "In4", 3) == 0)
934 {
935 return (-8 <= value && value && value <= 8);
936 }
937 if (memcmp (str, "In5", 3) == 0)
938 {
939 return (-16 <= value && value && value <= 16);
940 }
23fed240
DD
941 if (memcmp (str, "In6", 3) == 0)
942 {
943 return (-32 <= value && value && value <= 32);
944 }
38b2d076
DD
945 if (memcmp (str, "IM2", 3) == 0)
946 {
947 return (-65536 <= value && value && value <= -1);
948 }
949 if (memcmp (str, "Ilb", 3) == 0)
950 {
951 int b = exact_log2 (value);
8e4edce7 952 return (b >= 0 && b <= 7);
38b2d076 953 }
07127a0a
DD
954 if (memcmp (str, "Imb", 3) == 0)
955 {
956 int b = exact_log2 ((value ^ 0xff) & 0xff);
8e4edce7 957 return (b >= 0 && b <= 7);
07127a0a 958 }
600e668e
DD
959 if (memcmp (str, "ImB", 3) == 0)
960 {
961 int b = exact_log2 ((value ^ 0xffff) & 0xffff);
962 return (b >= 0 && b <= 7);
963 }
38b2d076
DD
964 if (memcmp (str, "Ilw", 3) == 0)
965 {
966 int b = exact_log2 (value);
8e4edce7 967 return (b >= 0 && b <= 15);
38b2d076 968 }
07127a0a
DD
969 if (memcmp (str, "Imw", 3) == 0)
970 {
971 int b = exact_log2 ((value ^ 0xffff) & 0xffff);
8e4edce7 972 return (b >= 0 && b <= 15);
07127a0a
DD
973 }
974 if (memcmp (str, "I00", 3) == 0)
975 {
976 return (value == 0);
977 }
38b2d076
DD
978 return 0;
979}
980
981/* Implements EXTRA_CONSTRAINT_STR (see next function too). 'S' is
982 for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
983 call return values. */
984int
985m32c_extra_constraint_p2 (rtx value, char c ATTRIBUTE_UNUSED, const char *str)
986{
987 encode_pattern (value);
988 if (memcmp (str, "Sd", 2) == 0)
989 {
990 /* This is the common "src/dest" address */
991 rtx r;
992 if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
993 return 1;
994 if (RTX_IS ("ms") || RTX_IS ("m+si"))
995 return 1;
07127a0a
DD
996 if (RTX_IS ("m++rii"))
997 {
998 if (REGNO (patternr[3]) == FB_REGNO
999 && INTVAL (patternr[4]) == 0)
1000 return 1;
1001 }
38b2d076
DD
1002 if (RTX_IS ("mr"))
1003 r = patternr[1];
1004 else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
1005 r = patternr[2];
1006 else
1007 return 0;
1008 if (REGNO (r) == SP_REGNO)
1009 return 0;
1010 return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
1011 }
1012 else if (memcmp (str, "Sa", 2) == 0)
1013 {
1014 rtx r;
1015 if (RTX_IS ("mr"))
1016 r = patternr[1];
1017 else if (RTX_IS ("m+ri"))
1018 r = patternr[2];
1019 else
1020 return 0;
1021 return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
1022 }
1023 else if (memcmp (str, "Si", 2) == 0)
1024 {
1025 return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
1026 }
1027 else if (memcmp (str, "Ss", 2) == 0)
1028 {
1029 return ((RTX_IS ("mr")
1030 && (IS_REG (patternr[1], SP_REGNO)))
1031 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
1032 }
1033 else if (memcmp (str, "Sf", 2) == 0)
1034 {
1035 return ((RTX_IS ("mr")
1036 && (IS_REG (patternr[1], FB_REGNO)))
1037 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
1038 }
1039 else if (memcmp (str, "Sb", 2) == 0)
1040 {
1041 return ((RTX_IS ("mr")
1042 && (IS_REG (patternr[1], SB_REGNO)))
1043 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
1044 }
07127a0a
DD
1045 else if (memcmp (str, "Sp", 2) == 0)
1046 {
1047 /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
1048 return (RTX_IS ("mi")
1049 && !(INTVAL (patternr[1]) & ~0x1fff));
1050 }
38b2d076
DD
1051 else if (memcmp (str, "S1", 2) == 0)
1052 {
1053 return r1h_operand (value, QImode);
1054 }
1055
1056 gcc_assert (str[0] != 'S');
1057
1058 if (memcmp (str, "Rpa", 2) == 0)
1059 return GET_CODE (value) == PARALLEL;
1060
1061 return 0;
1062}
1063
1064/* This is for when we're debugging the above. */
1065int
1066m32c_extra_constraint_p (rtx value, char c, const char *str)
1067{
1068 int rv = m32c_extra_constraint_p2 (value, c, str);
1069#if DEBUG0
1070 fprintf (stderr, "\nconstraint %.*s: %d\n", CONSTRAINT_LEN (c, str), str,
1071 rv);
1072 debug_rtx (value);
1073#endif
1074 return rv;
1075}
1076
1077/* Implements EXTRA_MEMORY_CONSTRAINT. Currently, we only use strings
1078 starting with 'S'. */
1079int
1080m32c_extra_memory_constraint (char c, const char *str ATTRIBUTE_UNUSED)
1081{
1082 return c == 'S';
1083}
1084
1085/* Implements EXTRA_ADDRESS_CONSTRAINT. We reserve 'A' strings for these,
1086 but don't currently define any. */
1087int
1088m32c_extra_address_constraint (char c, const char *str ATTRIBUTE_UNUSED)
1089{
1090 return c == 'A';
1091}
1092
1093/* STACK AND CALLING */
1094
1095/* Frame Layout */
1096
1097/* Implements RETURN_ADDR_RTX. Note that R8C and M16C push 24 bits
1098 (yes, THREE bytes) onto the stack for the return address, but we
1099 don't support pointers bigger than 16 bits on those chips. This
1100 will likely wreak havoc with exception unwinding. FIXME. */
1101rtx
1102m32c_return_addr_rtx (int count)
1103{
1104 enum machine_mode mode;
1105 int offset;
1106 rtx ra_mem;
1107
1108 if (count)
1109 return NULL_RTX;
1110 /* we want 2[$fb] */
1111
1112 if (TARGET_A24)
1113 {
80b093df
DD
1114 /* It's four bytes */
1115 mode = PSImode;
38b2d076
DD
1116 offset = 4;
1117 }
1118 else
1119 {
1120 /* FIXME: it's really 3 bytes */
1121 mode = HImode;
1122 offset = 2;
1123 }
1124
1125 ra_mem =
1126 gen_rtx_MEM (mode, plus_constant (gen_rtx_REG (Pmode, FP_REGNO), offset));
1127 return copy_to_mode_reg (mode, ra_mem);
1128}
1129
1130/* Implements INCOMING_RETURN_ADDR_RTX. See comment above. */
1131rtx
1132m32c_incoming_return_addr_rtx (void)
1133{
1134 /* we want [sp] */
1135 return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
1136}
1137
1138/* Exception Handling Support */
1139
1140/* Implements EH_RETURN_DATA_REGNO. Choose registers able to hold
1141 pointers. */
1142int
1143m32c_eh_return_data_regno (int n)
1144{
1145 switch (n)
1146 {
1147 case 0:
1148 return A0_REGNO;
1149 case 1:
c6004917
RIL
1150 if (TARGET_A16)
1151 return R3_REGNO;
1152 else
1153 return R1_REGNO;
38b2d076
DD
1154 default:
1155 return INVALID_REGNUM;
1156 }
1157}
1158
1159/* Implements EH_RETURN_STACKADJ_RTX. Saved and used later in
1160 m32c_emit_eh_epilogue. */
1161rtx
1162m32c_eh_return_stackadj_rtx (void)
1163{
1164 if (!cfun->machine->eh_stack_adjust)
1165 {
1166 rtx sa;
1167
99920b6f 1168 sa = gen_rtx_REG (Pmode, R0_REGNO);
38b2d076
DD
1169 cfun->machine->eh_stack_adjust = sa;
1170 }
1171 return cfun->machine->eh_stack_adjust;
1172}
1173
1174/* Registers That Address the Stack Frame */
1175
1176/* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER. Note that
1177 the original spec called for dwarf numbers to vary with register
1178 width as well, for example, r0l, r0, and r2r0 would each have
1179 different dwarf numbers. GCC doesn't support this, and we don't do
1180 it, and gdb seems to like it this way anyway. */
1181unsigned int
1182m32c_dwarf_frame_regnum (int n)
1183{
1184 switch (n)
1185 {
1186 case R0_REGNO:
1187 return 5;
1188 case R1_REGNO:
1189 return 6;
1190 case R2_REGNO:
1191 return 7;
1192 case R3_REGNO:
1193 return 8;
1194 case A0_REGNO:
1195 return 9;
1196 case A1_REGNO:
1197 return 10;
1198 case FB_REGNO:
1199 return 11;
1200 case SB_REGNO:
1201 return 19;
1202
1203 case SP_REGNO:
1204 return 12;
1205 case PC_REGNO:
1206 return 13;
1207 default:
1208 return DWARF_FRAME_REGISTERS + 1;
1209 }
1210}
1211
1212/* The frame looks like this:
1213
1214 ap -> +------------------------------
1215 | Return address (3 or 4 bytes)
1216 | Saved FB (2 or 4 bytes)
1217 fb -> +------------------------------
1218 | local vars
1219 | register saves fb
1220 | through r0 as needed
1221 sp -> +------------------------------
1222*/
1223
1224/* We use this to wrap all emitted insns in the prologue. */
1225static rtx
1226F (rtx x)
1227{
1228 RTX_FRAME_RELATED_P (x) = 1;
1229 return x;
1230}
1231
1232/* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1233 how much the stack pointer moves for each, for each cpu family. */
1234static struct
1235{
1236 int reg1;
1237 int bit;
1238 int a16_bytes;
1239 int a24_bytes;
1240} pushm_info[] =
1241{
9d746d5e
DD
1242 /* These are in reverse push (nearest-to-sp) order. */
1243 { R0_REGNO, 0x80, 2, 2 },
38b2d076 1244 { R1_REGNO, 0x40, 2, 2 },
9d746d5e
DD
1245 { R2_REGNO, 0x20, 2, 2 },
1246 { R3_REGNO, 0x10, 2, 2 },
1247 { A0_REGNO, 0x08, 2, 4 },
1248 { A1_REGNO, 0x04, 2, 4 },
1249 { SB_REGNO, 0x02, 2, 4 },
1250 { FB_REGNO, 0x01, 2, 4 }
38b2d076
DD
1251};
1252
1253#define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1254
1255/* Returns TRUE if we need to save/restore the given register. We
1256 save everything for exception handlers, so that any register can be
1257 unwound. For interrupt handlers, we save everything if the handler
1258 calls something else (because we don't know what *that* function
1259 might do), but try to be a bit smarter if the handler is a leaf
1260 function. We always save $a0, though, because we use that in the
85f65093 1261 epilogue to copy $fb to $sp. */
38b2d076
DD
1262static int
1263need_to_save (int regno)
1264{
1265 if (fixed_regs[regno])
1266 return 0;
ad516a74 1267 if (crtl->calls_eh_return)
38b2d076
DD
1268 return 1;
1269 if (regno == FP_REGNO)
1270 return 0;
1271 if (cfun->machine->is_interrupt
65655f79
DD
1272 && (!cfun->machine->is_leaf
1273 || (regno == A0_REGNO
1274 && m32c_function_needs_enter ())
1275 ))
38b2d076 1276 return 1;
6fb5fa3c 1277 if (df_regs_ever_live_p (regno)
38b2d076
DD
1278 && (!call_used_regs[regno] || cfun->machine->is_interrupt))
1279 return 1;
1280 return 0;
1281}
1282
1283/* This function contains all the intelligence about saving and
1284 restoring registers. It always figures out the register save set.
1285 When called with PP_justcount, it merely returns the size of the
1286 save set (for eliminating the frame pointer, for example). When
1287 called with PP_pushm or PP_popm, it emits the appropriate
1288 instructions for saving (pushm) or restoring (popm) the
1289 registers. */
1290static int
1291m32c_pushm_popm (Push_Pop_Type ppt)
1292{
1293 int reg_mask = 0;
1294 int byte_count = 0, bytes;
1295 int i;
1296 rtx dwarf_set[PUSHM_N];
1297 int n_dwarfs = 0;
1298 int nosave_mask = 0;
1299
305da3ec
JH
1300 if (crtl->return_rtx
1301 && GET_CODE (crtl->return_rtx) == PARALLEL
ad516a74 1302 && !(crtl->calls_eh_return || cfun->machine->is_interrupt))
38b2d076 1303 {
305da3ec 1304 rtx exp = XVECEXP (crtl->return_rtx, 0, 0);
38b2d076
DD
1305 rtx rv = XEXP (exp, 0);
1306 int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1307
1308 if (rv_bytes > 2)
1309 nosave_mask |= 0x20; /* PSI, SI */
1310 else
1311 nosave_mask |= 0xf0; /* DF */
1312 if (rv_bytes > 4)
1313 nosave_mask |= 0x50; /* DI */
1314 }
1315
1316 for (i = 0; i < (int) PUSHM_N; i++)
1317 {
1318 /* Skip if neither register needs saving. */
1319 if (!need_to_save (pushm_info[i].reg1))
1320 continue;
1321
1322 if (pushm_info[i].bit & nosave_mask)
1323 continue;
1324
1325 reg_mask |= pushm_info[i].bit;
1326 bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1327
1328 if (ppt == PP_pushm)
1329 {
1330 enum machine_mode mode = (bytes == 2) ? HImode : SImode;
1331 rtx addr;
1332
1333 /* Always use stack_pointer_rtx instead of calling
1334 rtx_gen_REG ourselves. Code elsewhere in GCC assumes
1335 that there is a single rtx representing the stack pointer,
1336 namely stack_pointer_rtx, and uses == to recognize it. */
1337 addr = stack_pointer_rtx;
1338
1339 if (byte_count != 0)
1340 addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1341
1342 dwarf_set[n_dwarfs++] =
1343 gen_rtx_SET (VOIDmode,
1344 gen_rtx_MEM (mode, addr),
1345 gen_rtx_REG (mode, pushm_info[i].reg1));
1346 F (dwarf_set[n_dwarfs - 1]);
1347
1348 }
1349 byte_count += bytes;
1350 }
1351
1352 if (cfun->machine->is_interrupt)
1353 {
1354 cfun->machine->intr_pushm = reg_mask & 0xfe;
1355 reg_mask = 0;
1356 byte_count = 0;
1357 }
1358
1359 if (cfun->machine->is_interrupt)
1360 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1361 if (need_to_save (i))
1362 {
1363 byte_count += 2;
1364 cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1365 }
1366
1367 if (ppt == PP_pushm && byte_count)
1368 {
1369 rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1370 rtx pushm;
1371
1372 if (reg_mask)
1373 {
1374 XVECEXP (note, 0, 0)
1375 = gen_rtx_SET (VOIDmode,
1376 stack_pointer_rtx,
1377 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1378 stack_pointer_rtx,
1379 GEN_INT (-byte_count)));
1380 F (XVECEXP (note, 0, 0));
1381
1382 for (i = 0; i < n_dwarfs; i++)
1383 XVECEXP (note, 0, i + 1) = dwarf_set[i];
1384
1385 pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1386
1387 REG_NOTES (pushm) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, note,
1388 REG_NOTES (pushm));
1389 }
1390
1391 if (cfun->machine->is_interrupt)
1392 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1393 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1394 {
1395 if (TARGET_A16)
1396 pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1397 else
1398 pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1399 F (pushm);
1400 }
1401 }
1402 if (ppt == PP_popm && byte_count)
1403 {
38b2d076
DD
1404 if (cfun->machine->is_interrupt)
1405 for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1406 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1407 {
1408 if (TARGET_A16)
b3fdec9e 1409 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
38b2d076 1410 else
b3fdec9e 1411 emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
38b2d076
DD
1412 }
1413 if (reg_mask)
1414 emit_insn (gen_popm (GEN_INT (reg_mask)));
1415 }
1416
1417 return byte_count;
1418}
1419
1420/* Implements INITIAL_ELIMINATION_OFFSET. See the comment above that
1421 diagrams our call frame. */
1422int
1423m32c_initial_elimination_offset (int from, int to)
1424{
1425 int ofs = 0;
1426
1427 if (from == AP_REGNO)
1428 {
1429 if (TARGET_A16)
1430 ofs += 5;
1431 else
1432 ofs += 8;
1433 }
1434
1435 if (to == SP_REGNO)
1436 {
1437 ofs += m32c_pushm_popm (PP_justcount);
1438 ofs += get_frame_size ();
1439 }
1440
1441 /* Account for push rounding. */
1442 if (TARGET_A24)
1443 ofs = (ofs + 1) & ~1;
1444#if DEBUG0
1445 fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1446 to, ofs);
1447#endif
1448 return ofs;
1449}
1450
1451/* Passing Function Arguments on the Stack */
1452
38b2d076
DD
1453/* Implements PUSH_ROUNDING. The R8C and M16C have byte stacks, the
1454 M32C has word stacks. */
1455int
1456m32c_push_rounding (int n)
1457{
1458 if (TARGET_R8C || TARGET_M16C)
1459 return n;
1460 return (n + 1) & ~1;
1461}
1462
1463/* Passing Arguments in Registers */
1464
cd34bbe8
NF
1465/* Implements TARGET_FUNCTION_ARG. Arguments are passed partly in
1466 registers, partly on stack. If our function returns a struct, a
1467 pointer to a buffer for it is at the top of the stack (last thing
1468 pushed). The first few real arguments may be in registers as
1469 follows:
38b2d076
DD
1470
1471 R8C/M16C: arg1 in r1 if it's QI or HI (else it's pushed on stack)
1472 arg2 in r2 if it's HI (else pushed on stack)
1473 rest on stack
1474 M32C: arg1 in r0 if it's QI or HI (else it's pushed on stack)
1475 rest on stack
1476
1477 Structs are not passed in registers, even if they fit. Only
1478 integer and pointer types are passed in registers.
1479
1480 Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1481 r2 if it fits. */
cd34bbe8
NF
1482#undef TARGET_FUNCTION_ARG
1483#define TARGET_FUNCTION_ARG m32c_function_arg
1484static rtx
38b2d076 1485m32c_function_arg (CUMULATIVE_ARGS * ca,
cd34bbe8 1486 enum machine_mode mode, const_tree type, bool named)
38b2d076
DD
1487{
1488 /* Can return a reg, parallel, or 0 for stack */
1489 rtx rv = NULL_RTX;
1490#if DEBUG0
1491 fprintf (stderr, "func_arg %d (%s, %d)\n",
1492 ca->parm_num, mode_name[mode], named);
1493 debug_tree (type);
1494#endif
1495
1496 if (mode == VOIDmode)
1497 return GEN_INT (0);
1498
1499 if (ca->force_mem || !named)
1500 {
1501#if DEBUG0
1502 fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
1503 named);
1504#endif
1505 return NULL_RTX;
1506 }
1507
1508 if (type && INTEGRAL_TYPE_P (type) && POINTER_TYPE_P (type))
1509 return NULL_RTX;
1510
9d746d5e
DD
1511 if (type && AGGREGATE_TYPE_P (type))
1512 return NULL_RTX;
1513
38b2d076
DD
1514 switch (ca->parm_num)
1515 {
1516 case 1:
1517 if (GET_MODE_SIZE (mode) == 1 || GET_MODE_SIZE (mode) == 2)
1518 rv = gen_rtx_REG (mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
1519 break;
1520
1521 case 2:
1522 if (TARGET_A16 && GET_MODE_SIZE (mode) == 2)
1523 rv = gen_rtx_REG (mode, R2_REGNO);
1524 break;
1525 }
1526
1527#if DEBUG0
1528 debug_rtx (rv);
1529#endif
1530 return rv;
1531}
1532
1533#undef TARGET_PASS_BY_REFERENCE
1534#define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1535static bool
1536m32c_pass_by_reference (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED,
1537 enum machine_mode mode ATTRIBUTE_UNUSED,
586de218 1538 const_tree type ATTRIBUTE_UNUSED,
38b2d076
DD
1539 bool named ATTRIBUTE_UNUSED)
1540{
1541 return 0;
1542}
1543
1544/* Implements INIT_CUMULATIVE_ARGS. */
1545void
1546m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
9d746d5e 1547 tree fntype,
38b2d076 1548 rtx libname ATTRIBUTE_UNUSED,
9d746d5e 1549 tree fndecl,
38b2d076
DD
1550 int n_named_args ATTRIBUTE_UNUSED)
1551{
9d746d5e
DD
1552 if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1553 ca->force_mem = 1;
1554 else
1555 ca->force_mem = 0;
38b2d076
DD
1556 ca->parm_num = 1;
1557}
1558
cd34bbe8
NF
1559/* Implements TARGET_FUNCTION_ARG_ADVANCE. force_mem is set for
1560 functions returning structures, so we always reset that. Otherwise,
1561 we only need to know the sequence number of the argument to know what
1562 to do with it. */
1563#undef TARGET_FUNCTION_ARG_ADVANCE
1564#define TARGET_FUNCTION_ARG_ADVANCE m32c_function_arg_advance
1565static void
38b2d076
DD
1566m32c_function_arg_advance (CUMULATIVE_ARGS * ca,
1567 enum machine_mode mode ATTRIBUTE_UNUSED,
cd34bbe8
NF
1568 const_tree type ATTRIBUTE_UNUSED,
1569 bool named ATTRIBUTE_UNUSED)
38b2d076
DD
1570{
1571 if (ca->force_mem)
1572 ca->force_mem = 0;
9d746d5e
DD
1573 else
1574 ca->parm_num++;
38b2d076
DD
1575}
1576
1577/* Implements FUNCTION_ARG_REGNO_P. */
1578int
1579m32c_function_arg_regno_p (int r)
1580{
1581 if (TARGET_A24)
1582 return (r == R0_REGNO);
1583 return (r == R1_REGNO || r == R2_REGNO);
1584}
1585
e9555b13 1586/* HImode and PSImode are the two "native" modes as far as GCC is
85f65093 1587 concerned, but the chips also support a 32-bit mode which is used
e9555b13
DD
1588 for some opcodes in R8C/M16C and for reset vectors and such. */
1589#undef TARGET_VALID_POINTER_MODE
1590#define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
23fed240 1591static bool
e9555b13
DD
1592m32c_valid_pointer_mode (enum machine_mode mode)
1593{
e9555b13
DD
1594 if (mode == HImode
1595 || mode == PSImode
1596 || mode == SImode
1597 )
1598 return 1;
1599 return 0;
1600}
1601
38b2d076
DD
1602/* How Scalar Function Values Are Returned */
1603
2a31793e 1604/* Implements TARGET_LIBCALL_VALUE. Most values are returned in $r0, or some
38b2d076
DD
1605 combination of registers starting there (r2r0 for longs, r3r1r2r0
1606 for long long, r3r2r1r0 for doubles), except that that ABI
1607 currently doesn't work because it ends up using all available
1608 general registers and gcc often can't compile it. So, instead, we
1609 return anything bigger than 16 bits in "mem0" (effectively, a
1610 memory location). */
2a31793e
AS
1611
1612#undef TARGET_LIBCALL_VALUE
1613#define TARGET_LIBCALL_VALUE m32c_libcall_value
1614
1615static rtx
1616m32c_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
38b2d076
DD
1617{
1618 /* return reg or parallel */
1619#if 0
1620 /* FIXME: GCC has difficulty returning large values in registers,
1621 because that ties up most of the general registers and gives the
1622 register allocator little to work with. Until we can resolve
1623 this, large values are returned in memory. */
1624 if (mode == DFmode)
1625 {
1626 rtx rv;
1627
1628 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1629 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1630 gen_rtx_REG (HImode,
1631 R0_REGNO),
1632 GEN_INT (0));
1633 XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1634 gen_rtx_REG (HImode,
1635 R1_REGNO),
1636 GEN_INT (2));
1637 XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1638 gen_rtx_REG (HImode,
1639 R2_REGNO),
1640 GEN_INT (4));
1641 XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1642 gen_rtx_REG (HImode,
1643 R3_REGNO),
1644 GEN_INT (6));
1645 return rv;
1646 }
1647
1648 if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1649 {
1650 rtx rv;
1651
1652 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1653 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1654 gen_rtx_REG (mode,
1655 R0_REGNO),
1656 GEN_INT (0));
1657 return rv;
1658 }
1659#endif
1660
1661 if (GET_MODE_SIZE (mode) > 2)
1662 return gen_rtx_REG (mode, MEM0_REGNO);
1663 return gen_rtx_REG (mode, R0_REGNO);
1664}
1665
2a31793e 1666/* Implements TARGET_FUNCTION_VALUE. Functions and libcalls have the same
38b2d076 1667 conventions. */
2a31793e
AS
1668
1669#undef TARGET_FUNCTION_VALUE
1670#define TARGET_FUNCTION_VALUE m32c_function_value
1671
1672static rtx
1673m32c_function_value (const_tree valtype,
1674 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1675 bool outgoing ATTRIBUTE_UNUSED)
38b2d076
DD
1676{
1677 /* return reg or parallel */
586de218 1678 const enum machine_mode mode = TYPE_MODE (valtype);
2a31793e
AS
1679 return m32c_libcall_value (mode, NULL_RTX);
1680}
1681
f28f2337
AS
1682/* Implements TARGET_FUNCTION_VALUE_REGNO_P. */
1683
1684#undef TARGET_FUNCTION_VALUE_REGNO_P
1685#define TARGET_FUNCTION_VALUE_REGNO_P m32c_function_value_regno_p
2a31793e 1686
f28f2337 1687static bool
2a31793e
AS
1688m32c_function_value_regno_p (const unsigned int regno)
1689{
1690 return (regno == R0_REGNO || regno == MEM0_REGNO);
38b2d076
DD
1691}
1692
1693/* How Large Values Are Returned */
1694
1695/* We return structures by pushing the address on the stack, even if
1696 we use registers for the first few "real" arguments. */
1697#undef TARGET_STRUCT_VALUE_RTX
1698#define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1699static rtx
1700m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1701 int incoming ATTRIBUTE_UNUSED)
1702{
1703 return 0;
1704}
1705
1706/* Function Entry and Exit */
1707
1708/* Implements EPILOGUE_USES. Interrupts restore all registers. */
1709int
1710m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1711{
1712 if (cfun->machine->is_interrupt)
1713 return 1;
1714 return 0;
1715}
1716
1717/* Implementing the Varargs Macros */
1718
1719#undef TARGET_STRICT_ARGUMENT_NAMING
1720#define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1721static bool
1722m32c_strict_argument_naming (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED)
1723{
1724 return 1;
1725}
1726
1727/* Trampolines for Nested Functions */
1728
1729/*
1730 m16c:
1731 1 0000 75C43412 mov.w #0x1234,a0
1732 2 0004 FC000000 jmp.a label
1733
1734 m32c:
1735 1 0000 BC563412 mov.l:s #0x123456,a0
1736 2 0004 CC000000 jmp.a label
1737*/
1738
1739/* Implements TRAMPOLINE_SIZE. */
1740int
1741m32c_trampoline_size (void)
1742{
1743 /* Allocate extra space so we can avoid the messy shifts when we
1744 initialize the trampoline; we just write past the end of the
1745 opcode. */
1746 return TARGET_A16 ? 8 : 10;
1747}
1748
1749/* Implements TRAMPOLINE_ALIGNMENT. */
1750int
1751m32c_trampoline_alignment (void)
1752{
1753 return 2;
1754}
1755
229fbccb
RH
1756/* Implements TARGET_TRAMPOLINE_INIT. */
1757
1758#undef TARGET_TRAMPOLINE_INIT
1759#define TARGET_TRAMPOLINE_INIT m32c_trampoline_init
1760static void
1761m32c_trampoline_init (rtx m_tramp, tree fndecl, rtx chainval)
38b2d076 1762{
229fbccb
RH
1763 rtx function = XEXP (DECL_RTL (fndecl), 0);
1764
1765#define A0(m,i) adjust_address (m_tramp, m, i)
38b2d076
DD
1766 if (TARGET_A16)
1767 {
1768 /* Note: we subtract a "word" because the moves want signed
1769 constants, not unsigned constants. */
1770 emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1771 emit_move_insn (A0 (HImode, 2), chainval);
1772 emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
85f65093
KH
1773 /* We use 16-bit addresses here, but store the zero to turn it
1774 into a 24-bit offset. */
38b2d076
DD
1775 emit_move_insn (A0 (HImode, 5), function);
1776 emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1777 }
1778 else
1779 {
1780 /* Note that the PSI moves actually write 4 bytes. Make sure we
1781 write stuff out in the right order, and leave room for the
1782 extra byte at the end. */
1783 emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1784 emit_move_insn (A0 (PSImode, 1), chainval);
1785 emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1786 emit_move_insn (A0 (PSImode, 5), function);
1787 }
1788#undef A0
1789}
1790
07127a0a
DD
1791/* Implicit Calls to Library Routines */
1792
1793#undef TARGET_INIT_LIBFUNCS
1794#define TARGET_INIT_LIBFUNCS m32c_init_libfuncs
1795static void
1796m32c_init_libfuncs (void)
1797{
f90b7a5a
PB
1798 /* We do this because the M32C has an HImode operand, but the
1799 M16C has an 8-bit operand. Since gcc looks at the match data
1800 and not the expanded rtl, we have to reset the optab so that
1801 the right modes are found. */
07127a0a
DD
1802 if (TARGET_A24)
1803 {
947131ba
RS
1804 set_optab_handler (cstore_optab, QImode, CODE_FOR_cstoreqi4_24);
1805 set_optab_handler (cstore_optab, HImode, CODE_FOR_cstorehi4_24);
1806 set_optab_handler (cstore_optab, PSImode, CODE_FOR_cstorepsi4_24);
07127a0a
DD
1807 }
1808}
1809
38b2d076
DD
1810/* Addressing Modes */
1811
c6c3dba9
PB
1812/* The r8c/m32c family supports a wide range of non-orthogonal
1813 addressing modes, including the ability to double-indirect on *some*
1814 of them. Not all insns support all modes, either, but we rely on
1815 predicates and constraints to deal with that. */
1816#undef TARGET_LEGITIMATE_ADDRESS_P
1817#define TARGET_LEGITIMATE_ADDRESS_P m32c_legitimate_address_p
1818bool
1819m32c_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
38b2d076
DD
1820{
1821 int mode_adjust;
1822 if (CONSTANT_P (x))
1823 return 1;
1824
1825 /* Wide references to memory will be split after reload, so we must
1826 ensure that all parts of such splits remain legitimate
1827 addresses. */
1828 mode_adjust = GET_MODE_SIZE (mode) - 1;
1829
1830 /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1831 if (GET_CODE (x) == PRE_DEC
1832 || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1833 {
1834 return (GET_CODE (XEXP (x, 0)) == REG
1835 && REGNO (XEXP (x, 0)) == SP_REGNO);
1836 }
1837
1838#if 0
1839 /* This is the double indirection detection, but it currently
1840 doesn't work as cleanly as this code implies, so until we've had
1841 a chance to debug it, leave it disabled. */
1842 if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1843 {
1844#if DEBUG_DOUBLE
1845 fprintf (stderr, "double indirect\n");
1846#endif
1847 x = XEXP (x, 0);
1848 }
1849#endif
1850
1851 encode_pattern (x);
1852 if (RTX_IS ("r"))
1853 {
1854 /* Most indexable registers can be used without displacements,
1855 although some of them will be emitted with an explicit zero
1856 to please the assembler. */
1857 switch (REGNO (patternr[0]))
1858 {
1859 case A0_REGNO:
1860 case A1_REGNO:
1861 case SB_REGNO:
1862 case FB_REGNO:
1863 case SP_REGNO:
1864 return 1;
1865
1866 default:
1867 if (IS_PSEUDO (patternr[0], strict))
1868 return 1;
1869 return 0;
1870 }
1871 }
1872 if (RTX_IS ("+ri"))
1873 {
1874 /* This is more interesting, because different base registers
1875 allow for different displacements - both range and signedness
1876 - and it differs from chip series to chip series too. */
1877 int rn = REGNO (patternr[1]);
1878 HOST_WIDE_INT offs = INTVAL (patternr[2]);
1879 switch (rn)
1880 {
1881 case A0_REGNO:
1882 case A1_REGNO:
1883 case SB_REGNO:
1884 /* The syntax only allows positive offsets, but when the
1885 offsets span the entire memory range, we can simulate
1886 negative offsets by wrapping. */
1887 if (TARGET_A16)
1888 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1889 if (rn == SB_REGNO)
1890 return (offs >= 0 && offs <= 65535 - mode_adjust);
1891 /* A0 or A1 */
1892 return (offs >= -16777216 && offs <= 16777215);
1893
1894 case FB_REGNO:
1895 if (TARGET_A16)
1896 return (offs >= -128 && offs <= 127 - mode_adjust);
1897 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1898
1899 case SP_REGNO:
1900 return (offs >= -128 && offs <= 127 - mode_adjust);
1901
1902 default:
1903 if (IS_PSEUDO (patternr[1], strict))
1904 return 1;
1905 return 0;
1906 }
1907 }
1908 if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1909 {
1910 rtx reg = patternr[1];
1911
1912 /* We don't know where the symbol is, so only allow base
1913 registers which support displacements spanning the whole
1914 address range. */
1915 switch (REGNO (reg))
1916 {
1917 case A0_REGNO:
1918 case A1_REGNO:
1919 /* $sb needs a secondary reload, but since it's involved in
1920 memory address reloads too, we don't deal with it very
1921 well. */
1922 /* case SB_REGNO: */
1923 return 1;
1924 default:
1925 if (IS_PSEUDO (reg, strict))
1926 return 1;
1927 return 0;
1928 }
1929 }
1930 return 0;
1931}
1932
1933/* Implements REG_OK_FOR_BASE_P. */
1934int
1935m32c_reg_ok_for_base_p (rtx x, int strict)
1936{
1937 if (GET_CODE (x) != REG)
1938 return 0;
1939 switch (REGNO (x))
1940 {
1941 case A0_REGNO:
1942 case A1_REGNO:
1943 case SB_REGNO:
1944 case FB_REGNO:
1945 case SP_REGNO:
1946 return 1;
1947 default:
1948 if (IS_PSEUDO (x, strict))
1949 return 1;
1950 return 0;
1951 }
1952}
1953
04aff2c0 1954/* We have three choices for choosing fb->aN offsets. If we choose -128,
85f65093 1955 we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
04aff2c0
DD
1956 like this:
1957 EB 4B FF mova -128[$fb],$a0
1958 D8 0C FF FF mov.w:Q #0,-1[$a0]
1959
85f65093 1960 Alternately, we subtract the frame size, and hopefully use 8-bit aN
04aff2c0
DD
1961 displacements:
1962 7B F4 stc $fb,$a0
1963 77 54 00 01 sub #256,$a0
1964 D8 08 01 mov.w:Q #0,1[$a0]
1965
1966 If we don't offset (i.e. offset by zero), we end up with:
1967 7B F4 stc $fb,$a0
1968 D8 0C 00 FF mov.w:Q #0,-256[$a0]
1969
1970 We have to subtract *something* so that we have a PLUS rtx to mark
1971 that we've done this reload. The -128 offset will never result in
85f65093 1972 an 8-bit aN offset, and the payoff for the second case is five
04aff2c0
DD
1973 loads *if* those loads are within 256 bytes of the other end of the
1974 frame, so the third case seems best. Note that we subtract the
1975 zero, but detect that in the addhi3 pattern. */
1976
ea471af0
JM
1977#define BIG_FB_ADJ 0
1978
38b2d076
DD
1979/* Implements LEGITIMIZE_ADDRESS. The only address we really have to
1980 worry about is frame base offsets, as $fb has a limited
1981 displacement range. We deal with this by attempting to reload $fb
1982 itself into an address register; that seems to result in the best
1983 code. */
506d7b68
PB
1984#undef TARGET_LEGITIMIZE_ADDRESS
1985#define TARGET_LEGITIMIZE_ADDRESS m32c_legitimize_address
1986static rtx
1987m32c_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1988 enum machine_mode mode)
38b2d076
DD
1989{
1990#if DEBUG0
1991 fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
506d7b68 1992 debug_rtx (x);
38b2d076
DD
1993 fprintf (stderr, "\n");
1994#endif
1995
506d7b68
PB
1996 if (GET_CODE (x) == PLUS
1997 && GET_CODE (XEXP (x, 0)) == REG
1998 && REGNO (XEXP (x, 0)) == FB_REGNO
1999 && GET_CODE (XEXP (x, 1)) == CONST_INT
2000 && (INTVAL (XEXP (x, 1)) < -128
2001 || INTVAL (XEXP (x, 1)) > (128 - GET_MODE_SIZE (mode))))
38b2d076
DD
2002 {
2003 /* reload FB to A_REGS */
38b2d076 2004 rtx temp = gen_reg_rtx (Pmode);
506d7b68
PB
2005 x = copy_rtx (x);
2006 emit_insn (gen_rtx_SET (VOIDmode, temp, XEXP (x, 0)));
2007 XEXP (x, 0) = temp;
38b2d076
DD
2008 }
2009
506d7b68 2010 return x;
38b2d076
DD
2011}
2012
2013/* Implements LEGITIMIZE_RELOAD_ADDRESS. See comment above. */
2014int
2015m32c_legitimize_reload_address (rtx * x,
2016 enum machine_mode mode,
2017 int opnum,
2018 int type, int ind_levels ATTRIBUTE_UNUSED)
2019{
2020#if DEBUG0
2021 fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
2022 mode_name[mode]);
2023 debug_rtx (*x);
2024#endif
2025
2026 /* At one point, this function tried to get $fb copied to an address
2027 register, which in theory would maximize sharing, but gcc was
2028 *also* still trying to reload the whole address, and we'd run out
2029 of address registers. So we let gcc do the naive (but safe)
2030 reload instead, when the above function doesn't handle it for
04aff2c0
DD
2031 us.
2032
2033 The code below is a second attempt at the above. */
2034
2035 if (GET_CODE (*x) == PLUS
2036 && GET_CODE (XEXP (*x, 0)) == REG
2037 && REGNO (XEXP (*x, 0)) == FB_REGNO
2038 && GET_CODE (XEXP (*x, 1)) == CONST_INT
2039 && (INTVAL (XEXP (*x, 1)) < -128
2040 || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
2041 {
2042 rtx sum;
2043 int offset = INTVAL (XEXP (*x, 1));
2044 int adjustment = -BIG_FB_ADJ;
2045
2046 sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
2047 GEN_INT (adjustment));
2048 *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
2049 if (type == RELOAD_OTHER)
2050 type = RELOAD_FOR_OTHER_ADDRESS;
2051 push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
2052 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
2053 type);
2054 return 1;
2055 }
2056
2057 if (GET_CODE (*x) == PLUS
2058 && GET_CODE (XEXP (*x, 0)) == PLUS
2059 && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
2060 && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
2061 && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
2062 && GET_CODE (XEXP (*x, 1)) == CONST_INT
2063 )
2064 {
2065 if (type == RELOAD_OTHER)
2066 type = RELOAD_FOR_OTHER_ADDRESS;
2067 push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
2068 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
2069 type);
2070 return 1;
2071 }
38b2d076
DD
2072
2073 return 0;
2074}
2075
38b2d076
DD
2076/* Implements LEGITIMATE_CONSTANT_P. We split large constants anyway,
2077 so we can allow anything. */
2078int
2079m32c_legitimate_constant_p (rtx x ATTRIBUTE_UNUSED)
2080{
2081 return 1;
2082}
2083
2084
2085/* Condition Code Status */
2086
2087#undef TARGET_FIXED_CONDITION_CODE_REGS
2088#define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2089static bool
2090m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2091{
2092 *p1 = FLG_REGNO;
2093 *p2 = INVALID_REGNUM;
2094 return true;
2095}
2096
2097/* Describing Relative Costs of Operations */
2098
0e607518 2099/* Implements TARGET_REGISTER_MOVE_COST. We make impossible moves
38b2d076
DD
2100 prohibitively expensive, like trying to put QIs in r2/r3 (there are
2101 no opcodes to do that). We also discourage use of mem* registers
2102 since they're really memory. */
0e607518
AS
2103
2104#undef TARGET_REGISTER_MOVE_COST
2105#define TARGET_REGISTER_MOVE_COST m32c_register_move_cost
2106
2107static int
2108m32c_register_move_cost (enum machine_mode mode, reg_class_t from,
2109 reg_class_t to)
38b2d076
DD
2110{
2111 int cost = COSTS_N_INSNS (3);
0e607518
AS
2112 HARD_REG_SET cc;
2113
2114/* FIXME: pick real values, but not 2 for now. */
2115 COPY_HARD_REG_SET (cc, reg_class_contents[(int) from]);
2116 IOR_HARD_REG_SET (cc, reg_class_contents[(int) to]);
2117
2118 if (mode == QImode
2119 && hard_reg_set_intersect_p (cc, reg_class_contents[R23_REGS]))
38b2d076 2120 {
0e607518 2121 if (hard_reg_set_subset_p (cc, reg_class_contents[R23_REGS]))
38b2d076
DD
2122 cost = COSTS_N_INSNS (1000);
2123 else
2124 cost = COSTS_N_INSNS (80);
2125 }
2126
2127 if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2128 cost = COSTS_N_INSNS (1000);
2129
0e607518 2130 if (reg_classes_intersect_p (from, CR_REGS))
38b2d076
DD
2131 cost += COSTS_N_INSNS (5);
2132
0e607518 2133 if (reg_classes_intersect_p (to, CR_REGS))
38b2d076
DD
2134 cost += COSTS_N_INSNS (5);
2135
2136 if (from == MEM_REGS || to == MEM_REGS)
2137 cost += COSTS_N_INSNS (50);
0e607518
AS
2138 else if (reg_classes_intersect_p (from, MEM_REGS)
2139 || reg_classes_intersect_p (to, MEM_REGS))
38b2d076
DD
2140 cost += COSTS_N_INSNS (10);
2141
2142#if DEBUG0
2143 fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
0e607518
AS
2144 mode_name[mode], class_names[(int) from], class_names[(int) to],
2145 cost);
38b2d076
DD
2146#endif
2147 return cost;
2148}
2149
0e607518
AS
2150/* Implements TARGET_MEMORY_MOVE_COST. */
2151
2152#undef TARGET_MEMORY_MOVE_COST
2153#define TARGET_MEMORY_MOVE_COST m32c_memory_move_cost
2154
2155static int
38b2d076 2156m32c_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
0e607518
AS
2157 reg_class_t rclass ATTRIBUTE_UNUSED,
2158 bool in ATTRIBUTE_UNUSED)
38b2d076
DD
2159{
2160 /* FIXME: pick real values. */
2161 return COSTS_N_INSNS (10);
2162}
2163
07127a0a
DD
2164/* Here we try to describe when we use multiple opcodes for one RTX so
2165 that gcc knows when to use them. */
2166#undef TARGET_RTX_COSTS
2167#define TARGET_RTX_COSTS m32c_rtx_costs
2168static bool
f40751dd
JH
2169m32c_rtx_costs (rtx x, int code, int outer_code, int *total,
2170 bool speed ATTRIBUTE_UNUSED)
07127a0a
DD
2171{
2172 switch (code)
2173 {
2174 case REG:
2175 if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2176 *total += COSTS_N_INSNS (500);
2177 else
2178 *total += COSTS_N_INSNS (1);
2179 return true;
2180
2181 case ASHIFT:
2182 case LSHIFTRT:
2183 case ASHIFTRT:
2184 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2185 {
2186 /* mov.b r1l, r1h */
2187 *total += COSTS_N_INSNS (1);
2188 return true;
2189 }
2190 if (INTVAL (XEXP (x, 1)) > 8
2191 || INTVAL (XEXP (x, 1)) < -8)
2192 {
2193 /* mov.b #N, r1l */
2194 /* mov.b r1l, r1h */
2195 *total += COSTS_N_INSNS (2);
2196 return true;
2197 }
2198 return true;
2199
2200 case LE:
2201 case LEU:
2202 case LT:
2203 case LTU:
2204 case GT:
2205 case GTU:
2206 case GE:
2207 case GEU:
2208 case NE:
2209 case EQ:
2210 if (outer_code == SET)
2211 {
2212 *total += COSTS_N_INSNS (2);
2213 return true;
2214 }
2215 break;
2216
2217 case ZERO_EXTRACT:
2218 {
2219 rtx dest = XEXP (x, 0);
2220 rtx addr = XEXP (dest, 0);
2221 switch (GET_CODE (addr))
2222 {
2223 case CONST_INT:
2224 *total += COSTS_N_INSNS (1);
2225 break;
2226 case SYMBOL_REF:
2227 *total += COSTS_N_INSNS (3);
2228 break;
2229 default:
2230 *total += COSTS_N_INSNS (2);
2231 break;
2232 }
2233 return true;
2234 }
2235 break;
2236
2237 default:
2238 /* Reasonable default. */
2239 if (TARGET_A16 && GET_MODE(x) == SImode)
2240 *total += COSTS_N_INSNS (2);
2241 break;
2242 }
2243 return false;
2244}
2245
2246#undef TARGET_ADDRESS_COST
2247#define TARGET_ADDRESS_COST m32c_address_cost
2248static int
f40751dd 2249m32c_address_cost (rtx addr, bool speed ATTRIBUTE_UNUSED)
07127a0a 2250{
80b093df 2251 int i;
07127a0a
DD
2252 /* fprintf(stderr, "\naddress_cost\n");
2253 debug_rtx(addr);*/
2254 switch (GET_CODE (addr))
2255 {
2256 case CONST_INT:
80b093df
DD
2257 i = INTVAL (addr);
2258 if (i == 0)
2259 return COSTS_N_INSNS(1);
2260 if (0 < i && i <= 255)
2261 return COSTS_N_INSNS(2);
2262 if (0 < i && i <= 65535)
2263 return COSTS_N_INSNS(3);
2264 return COSTS_N_INSNS(4);
07127a0a 2265 case SYMBOL_REF:
80b093df 2266 return COSTS_N_INSNS(4);
07127a0a 2267 case REG:
80b093df
DD
2268 return COSTS_N_INSNS(1);
2269 case PLUS:
2270 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
2271 {
2272 i = INTVAL (XEXP (addr, 1));
2273 if (i == 0)
2274 return COSTS_N_INSNS(1);
2275 if (0 < i && i <= 255)
2276 return COSTS_N_INSNS(2);
2277 if (0 < i && i <= 65535)
2278 return COSTS_N_INSNS(3);
2279 }
2280 return COSTS_N_INSNS(4);
07127a0a
DD
2281 default:
2282 return 0;
2283 }
2284}
2285
38b2d076
DD
2286/* Defining the Output Assembler Language */
2287
2288/* The Overall Framework of an Assembler File */
2289
2290#undef TARGET_HAVE_NAMED_SECTIONS
2291#define TARGET_HAVE_NAMED_SECTIONS true
2292
2293/* Output of Data */
2294
2295/* We may have 24 bit sizes, which is the native address size.
2296 Currently unused, but provided for completeness. */
2297#undef TARGET_ASM_INTEGER
2298#define TARGET_ASM_INTEGER m32c_asm_integer
2299static bool
2300m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2301{
2302 switch (size)
2303 {
2304 case 3:
2305 fprintf (asm_out_file, "\t.3byte\t");
2306 output_addr_const (asm_out_file, x);
2307 fputc ('\n', asm_out_file);
2308 return true;
e9555b13
DD
2309 case 4:
2310 if (GET_CODE (x) == SYMBOL_REF)
2311 {
2312 fprintf (asm_out_file, "\t.long\t");
2313 output_addr_const (asm_out_file, x);
2314 fputc ('\n', asm_out_file);
2315 return true;
2316 }
2317 break;
38b2d076
DD
2318 }
2319 return default_assemble_integer (x, size, aligned_p);
2320}
2321
2322/* Output of Assembler Instructions */
2323
a4174ebf 2324/* We use a lookup table because the addressing modes are non-orthogonal. */
38b2d076
DD
2325
2326static struct
2327{
2328 char code;
2329 char const *pattern;
2330 char const *format;
2331}
2332const conversions[] = {
2333 { 0, "r", "0" },
2334
2335 { 0, "mr", "z[1]" },
2336 { 0, "m+ri", "3[2]" },
2337 { 0, "m+rs", "3[2]" },
2338 { 0, "m+r+si", "4+5[2]" },
2339 { 0, "ms", "1" },
2340 { 0, "mi", "1" },
2341 { 0, "m+si", "2+3" },
2342
2343 { 0, "mmr", "[z[2]]" },
2344 { 0, "mm+ri", "[4[3]]" },
2345 { 0, "mm+rs", "[4[3]]" },
2346 { 0, "mm+r+si", "[5+6[3]]" },
2347 { 0, "mms", "[[2]]" },
2348 { 0, "mmi", "[[2]]" },
2349 { 0, "mm+si", "[4[3]]" },
2350
2351 { 0, "i", "#0" },
2352 { 0, "s", "#0" },
2353 { 0, "+si", "#1+2" },
2354 { 0, "l", "#0" },
2355
2356 { 'l', "l", "0" },
2357 { 'd', "i", "0" },
2358 { 'd', "s", "0" },
2359 { 'd', "+si", "1+2" },
2360 { 'D', "i", "0" },
2361 { 'D', "s", "0" },
2362 { 'D', "+si", "1+2" },
2363 { 'x', "i", "#0" },
2364 { 'X', "i", "#0" },
2365 { 'm', "i", "#0" },
2366 { 'b', "i", "#0" },
07127a0a 2367 { 'B', "i", "0" },
38b2d076
DD
2368 { 'p', "i", "0" },
2369
2370 { 0, 0, 0 }
2371};
2372
2373/* This is in order according to the bitfield that pushm/popm use. */
2374static char const *pushm_regs[] = {
2375 "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2376};
2377
2378/* Implements PRINT_OPERAND. */
2379void
2380m32c_print_operand (FILE * file, rtx x, int code)
2381{
2382 int i, j, b;
2383 const char *comma;
2384 HOST_WIDE_INT ival;
2385 int unsigned_const = 0;
ff485e71 2386 int force_sign;
38b2d076
DD
2387
2388 /* Multiplies; constants are converted to sign-extended format but
2389 we need unsigned, so 'u' and 'U' tell us what size unsigned we
2390 need. */
2391 if (code == 'u')
2392 {
2393 unsigned_const = 2;
2394 code = 0;
2395 }
2396 if (code == 'U')
2397 {
2398 unsigned_const = 1;
2399 code = 0;
2400 }
2401 /* This one is only for debugging; you can put it in a pattern to
2402 force this error. */
2403 if (code == '!')
2404 {
2405 fprintf (stderr, "dj: unreviewed pattern:");
2406 if (current_output_insn)
2407 debug_rtx (current_output_insn);
2408 gcc_unreachable ();
2409 }
2410 /* PSImode operations are either .w or .l depending on the target. */
2411 if (code == '&')
2412 {
2413 if (TARGET_A16)
2414 fprintf (file, "w");
2415 else
2416 fprintf (file, "l");
2417 return;
2418 }
2419 /* Inverted conditionals. */
2420 if (code == 'C')
2421 {
2422 switch (GET_CODE (x))
2423 {
2424 case LE:
2425 fputs ("gt", file);
2426 break;
2427 case LEU:
2428 fputs ("gtu", file);
2429 break;
2430 case LT:
2431 fputs ("ge", file);
2432 break;
2433 case LTU:
2434 fputs ("geu", file);
2435 break;
2436 case GT:
2437 fputs ("le", file);
2438 break;
2439 case GTU:
2440 fputs ("leu", file);
2441 break;
2442 case GE:
2443 fputs ("lt", file);
2444 break;
2445 case GEU:
2446 fputs ("ltu", file);
2447 break;
2448 case NE:
2449 fputs ("eq", file);
2450 break;
2451 case EQ:
2452 fputs ("ne", file);
2453 break;
2454 default:
2455 gcc_unreachable ();
2456 }
2457 return;
2458 }
2459 /* Regular conditionals. */
2460 if (code == 'c')
2461 {
2462 switch (GET_CODE (x))
2463 {
2464 case LE:
2465 fputs ("le", file);
2466 break;
2467 case LEU:
2468 fputs ("leu", file);
2469 break;
2470 case LT:
2471 fputs ("lt", file);
2472 break;
2473 case LTU:
2474 fputs ("ltu", file);
2475 break;
2476 case GT:
2477 fputs ("gt", file);
2478 break;
2479 case GTU:
2480 fputs ("gtu", file);
2481 break;
2482 case GE:
2483 fputs ("ge", file);
2484 break;
2485 case GEU:
2486 fputs ("geu", file);
2487 break;
2488 case NE:
2489 fputs ("ne", file);
2490 break;
2491 case EQ:
2492 fputs ("eq", file);
2493 break;
2494 default:
2495 gcc_unreachable ();
2496 }
2497 return;
2498 }
2499 /* Used in negsi2 to do HImode ops on the two parts of an SImode
2500 operand. */
2501 if (code == 'h' && GET_MODE (x) == SImode)
2502 {
2503 x = m32c_subreg (HImode, x, SImode, 0);
2504 code = 0;
2505 }
2506 if (code == 'H' && GET_MODE (x) == SImode)
2507 {
2508 x = m32c_subreg (HImode, x, SImode, 2);
2509 code = 0;
2510 }
07127a0a
DD
2511 if (code == 'h' && GET_MODE (x) == HImode)
2512 {
2513 x = m32c_subreg (QImode, x, HImode, 0);
2514 code = 0;
2515 }
2516 if (code == 'H' && GET_MODE (x) == HImode)
2517 {
2518 /* We can't actually represent this as an rtx. Do it here. */
2519 if (GET_CODE (x) == REG)
2520 {
2521 switch (REGNO (x))
2522 {
2523 case R0_REGNO:
2524 fputs ("r0h", file);
2525 return;
2526 case R1_REGNO:
2527 fputs ("r1h", file);
2528 return;
2529 default:
2530 gcc_unreachable();
2531 }
2532 }
2533 /* This should be a MEM. */
2534 x = m32c_subreg (QImode, x, HImode, 1);
2535 code = 0;
2536 }
2537 /* This is for BMcond, which always wants word register names. */
2538 if (code == 'h' && GET_MODE (x) == QImode)
2539 {
2540 if (GET_CODE (x) == REG)
2541 x = gen_rtx_REG (HImode, REGNO (x));
2542 code = 0;
2543 }
38b2d076
DD
2544 /* 'x' and 'X' need to be ignored for non-immediates. */
2545 if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2546 code = 0;
2547
2548 encode_pattern (x);
ff485e71 2549 force_sign = 0;
38b2d076
DD
2550 for (i = 0; conversions[i].pattern; i++)
2551 if (conversions[i].code == code
2552 && streq (conversions[i].pattern, pattern))
2553 {
2554 for (j = 0; conversions[i].format[j]; j++)
2555 /* backslash quotes the next character in the output pattern. */
2556 if (conversions[i].format[j] == '\\')
2557 {
2558 fputc (conversions[i].format[j + 1], file);
2559 j++;
2560 }
2561 /* Digits in the output pattern indicate that the
2562 corresponding RTX is to be output at that point. */
2563 else if (ISDIGIT (conversions[i].format[j]))
2564 {
2565 rtx r = patternr[conversions[i].format[j] - '0'];
2566 switch (GET_CODE (r))
2567 {
2568 case REG:
2569 fprintf (file, "%s",
2570 reg_name_with_mode (REGNO (r), GET_MODE (r)));
2571 break;
2572 case CONST_INT:
2573 switch (code)
2574 {
2575 case 'b':
07127a0a
DD
2576 case 'B':
2577 {
2578 int v = INTVAL (r);
2579 int i = (int) exact_log2 (v);
2580 if (i == -1)
2581 i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2582 if (i == -1)
2583 i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2584 /* Bit position. */
2585 fprintf (file, "%d", i);
2586 }
38b2d076
DD
2587 break;
2588 case 'x':
2589 /* Unsigned byte. */
2590 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2591 INTVAL (r) & 0xff);
2592 break;
2593 case 'X':
2594 /* Unsigned word. */
2595 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2596 INTVAL (r) & 0xffff);
2597 break;
2598 case 'p':
2599 /* pushm and popm encode a register set into a single byte. */
2600 comma = "";
2601 for (b = 7; b >= 0; b--)
2602 if (INTVAL (r) & (1 << b))
2603 {
2604 fprintf (file, "%s%s", comma, pushm_regs[b]);
2605 comma = ",";
2606 }
2607 break;
2608 case 'm':
2609 /* "Minus". Output -X */
2610 ival = (-INTVAL (r) & 0xffff);
2611 if (ival & 0x8000)
2612 ival = ival - 0x10000;
2613 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2614 break;
2615 default:
2616 ival = INTVAL (r);
2617 if (conversions[i].format[j + 1] == '[' && ival < 0)
2618 {
2619 /* We can simulate negative displacements by
2620 taking advantage of address space
2621 wrapping when the offset can span the
2622 entire address range. */
2623 rtx base =
2624 patternr[conversions[i].format[j + 2] - '0'];
2625 if (GET_CODE (base) == REG)
2626 switch (REGNO (base))
2627 {
2628 case A0_REGNO:
2629 case A1_REGNO:
2630 if (TARGET_A24)
2631 ival = 0x1000000 + ival;
2632 else
2633 ival = 0x10000 + ival;
2634 break;
2635 case SB_REGNO:
2636 if (TARGET_A16)
2637 ival = 0x10000 + ival;
2638 break;
2639 }
2640 }
2641 else if (code == 'd' && ival < 0 && j == 0)
2642 /* The "mova" opcode is used to do addition by
2643 computing displacements, but again, we need
2644 displacements to be unsigned *if* they're
2645 the only component of the displacement
2646 (i.e. no "symbol-4" type displacement). */
2647 ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2648
2649 if (conversions[i].format[j] == '0')
2650 {
2651 /* More conversions to unsigned. */
2652 if (unsigned_const == 2)
2653 ival &= 0xffff;
2654 if (unsigned_const == 1)
2655 ival &= 0xff;
2656 }
2657 if (streq (conversions[i].pattern, "mi")
2658 || streq (conversions[i].pattern, "mmi"))
2659 {
2660 /* Integers used as addresses are unsigned. */
2661 ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2662 }
ff485e71
DD
2663 if (force_sign && ival >= 0)
2664 fputc ('+', file);
38b2d076
DD
2665 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2666 break;
2667 }
2668 break;
2669 case CONST_DOUBLE:
2670 /* We don't have const_double constants. If it
2671 happens, make it obvious. */
2672 fprintf (file, "[const_double 0x%lx]",
2673 (unsigned long) CONST_DOUBLE_HIGH (r));
2674 break;
2675 case SYMBOL_REF:
2676 assemble_name (file, XSTR (r, 0));
2677 break;
2678 case LABEL_REF:
2679 output_asm_label (r);
2680 break;
2681 default:
2682 fprintf (stderr, "don't know how to print this operand:");
2683 debug_rtx (r);
2684 gcc_unreachable ();
2685 }
2686 }
2687 else
2688 {
2689 if (conversions[i].format[j] == 'z')
2690 {
2691 /* Some addressing modes *must* have a displacement,
2692 so insert a zero here if needed. */
2693 int k;
2694 for (k = j + 1; conversions[i].format[k]; k++)
2695 if (ISDIGIT (conversions[i].format[k]))
2696 {
2697 rtx reg = patternr[conversions[i].format[k] - '0'];
2698 if (GET_CODE (reg) == REG
2699 && (REGNO (reg) == SB_REGNO
2700 || REGNO (reg) == FB_REGNO
2701 || REGNO (reg) == SP_REGNO))
2702 fputc ('0', file);
2703 }
2704 continue;
2705 }
2706 /* Signed displacements off symbols need to have signs
2707 blended cleanly. */
2708 if (conversions[i].format[j] == '+'
ff485e71 2709 && (!code || code == 'D' || code == 'd')
38b2d076 2710 && ISDIGIT (conversions[i].format[j + 1])
ff485e71
DD
2711 && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2712 == CONST_INT))
2713 {
2714 force_sign = 1;
2715 continue;
2716 }
38b2d076
DD
2717 fputc (conversions[i].format[j], file);
2718 }
2719 break;
2720 }
2721 if (!conversions[i].pattern)
2722 {
2723 fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
2724 pattern);
2725 debug_rtx (x);
2726 fprintf (file, "[%c.%s]", code ? code : '-', pattern);
2727 }
2728
2729 return;
2730}
2731
2732/* Implements PRINT_OPERAND_PUNCT_VALID_P. See m32c_print_operand
2733 above for descriptions of what these do. */
2734int
2735m32c_print_operand_punct_valid_p (int c)
2736{
2737 if (c == '&' || c == '!')
2738 return 1;
2739 return 0;
2740}
2741
2742/* Implements PRINT_OPERAND_ADDRESS. Nothing unusual here. */
2743void
2744m32c_print_operand_address (FILE * stream, rtx address)
2745{
235e1fe8
NC
2746 if (GET_CODE (address) == MEM)
2747 address = XEXP (address, 0);
2748 else
2749 /* cf: gcc.dg/asm-4.c. */
2750 gcc_assert (GET_CODE (address) == REG);
2751
2752 m32c_print_operand (stream, address, 0);
38b2d076
DD
2753}
2754
2755/* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
2756 differently than general registers. */
2757void
2758m32c_output_reg_push (FILE * s, int regno)
2759{
2760 if (regno == FLG_REGNO)
2761 fprintf (s, "\tpushc\tflg\n");
2762 else
04aff2c0 2763 fprintf (s, "\tpush.%c\t%s\n",
38b2d076
DD
2764 " bwll"[reg_push_size (regno)], reg_names[regno]);
2765}
2766
2767/* Likewise for ASM_OUTPUT_REG_POP. */
2768void
2769m32c_output_reg_pop (FILE * s, int regno)
2770{
2771 if (regno == FLG_REGNO)
2772 fprintf (s, "\tpopc\tflg\n");
2773 else
04aff2c0 2774 fprintf (s, "\tpop.%c\t%s\n",
38b2d076
DD
2775 " bwll"[reg_push_size (regno)], reg_names[regno]);
2776}
2777
2778/* Defining target-specific uses of `__attribute__' */
2779
2780/* Used to simplify the logic below. Find the attributes wherever
2781 they may be. */
2782#define M32C_ATTRIBUTES(decl) \
2783 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2784 : DECL_ATTRIBUTES (decl) \
2785 ? (DECL_ATTRIBUTES (decl)) \
2786 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2787
2788/* Returns TRUE if the given tree has the "interrupt" attribute. */
2789static int
2790interrupt_p (tree node ATTRIBUTE_UNUSED)
2791{
2792 tree list = M32C_ATTRIBUTES (node);
2793 while (list)
2794 {
2795 if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
2796 return 1;
2797 list = TREE_CHAIN (list);
2798 }
65655f79
DD
2799 return fast_interrupt_p (node);
2800}
2801
2802/* Returns TRUE if the given tree has the "bank_switch" attribute. */
2803static int
2804bank_switch_p (tree node ATTRIBUTE_UNUSED)
2805{
2806 tree list = M32C_ATTRIBUTES (node);
2807 while (list)
2808 {
2809 if (is_attribute_p ("bank_switch", TREE_PURPOSE (list)))
2810 return 1;
2811 list = TREE_CHAIN (list);
2812 }
2813 return 0;
2814}
2815
2816/* Returns TRUE if the given tree has the "fast_interrupt" attribute. */
2817static int
2818fast_interrupt_p (tree node ATTRIBUTE_UNUSED)
2819{
2820 tree list = M32C_ATTRIBUTES (node);
2821 while (list)
2822 {
2823 if (is_attribute_p ("fast_interrupt", TREE_PURPOSE (list)))
2824 return 1;
2825 list = TREE_CHAIN (list);
2826 }
38b2d076
DD
2827 return 0;
2828}
2829
2830static tree
2831interrupt_handler (tree * node ATTRIBUTE_UNUSED,
2832 tree name ATTRIBUTE_UNUSED,
2833 tree args ATTRIBUTE_UNUSED,
2834 int flags ATTRIBUTE_UNUSED,
2835 bool * no_add_attrs ATTRIBUTE_UNUSED)
2836{
2837 return NULL_TREE;
2838}
2839
5abd2125
JS
2840/* Returns TRUE if given tree has the "function_vector" attribute. */
2841int
2842m32c_special_page_vector_p (tree func)
2843{
653e2568
DD
2844 tree list;
2845
5abd2125
JS
2846 if (TREE_CODE (func) != FUNCTION_DECL)
2847 return 0;
2848
653e2568 2849 list = M32C_ATTRIBUTES (func);
5abd2125
JS
2850 while (list)
2851 {
2852 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2853 return 1;
2854 list = TREE_CHAIN (list);
2855 }
2856 return 0;
2857}
2858
2859static tree
2860function_vector_handler (tree * node ATTRIBUTE_UNUSED,
2861 tree name ATTRIBUTE_UNUSED,
2862 tree args ATTRIBUTE_UNUSED,
2863 int flags ATTRIBUTE_UNUSED,
2864 bool * no_add_attrs ATTRIBUTE_UNUSED)
2865{
2866 if (TARGET_R8C)
2867 {
2868 /* The attribute is not supported for R8C target. */
2869 warning (OPT_Wattributes,
29d08eba
JM
2870 "%qE attribute is not supported for R8C target",
2871 name);
5abd2125
JS
2872 *no_add_attrs = true;
2873 }
2874 else if (TREE_CODE (*node) != FUNCTION_DECL)
2875 {
2876 /* The attribute must be applied to functions only. */
2877 warning (OPT_Wattributes,
29d08eba
JM
2878 "%qE attribute applies only to functions",
2879 name);
5abd2125
JS
2880 *no_add_attrs = true;
2881 }
2882 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
2883 {
2884 /* The argument must be a constant integer. */
2885 warning (OPT_Wattributes,
29d08eba
JM
2886 "%qE attribute argument not an integer constant",
2887 name);
5abd2125
JS
2888 *no_add_attrs = true;
2889 }
2890 else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
2891 || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
2892 {
2893 /* The argument value must be between 18 to 255. */
2894 warning (OPT_Wattributes,
29d08eba
JM
2895 "%qE attribute argument should be between 18 to 255",
2896 name);
5abd2125
JS
2897 *no_add_attrs = true;
2898 }
2899 return NULL_TREE;
2900}
2901
2902/* If the function is assigned the attribute 'function_vector', it
2903 returns the function vector number, otherwise returns zero. */
2904int
2905current_function_special_page_vector (rtx x)
2906{
2907 int num;
2908
2909 if ((GET_CODE(x) == SYMBOL_REF)
2910 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
2911 {
653e2568 2912 tree list;
5abd2125
JS
2913 tree t = SYMBOL_REF_DECL (x);
2914
2915 if (TREE_CODE (t) != FUNCTION_DECL)
2916 return 0;
2917
653e2568 2918 list = M32C_ATTRIBUTES (t);
5abd2125
JS
2919 while (list)
2920 {
2921 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2922 {
2923 num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
2924 return num;
2925 }
2926
2927 list = TREE_CHAIN (list);
2928 }
2929
2930 return 0;
2931 }
2932 else
2933 return 0;
2934}
2935
38b2d076
DD
2936#undef TARGET_ATTRIBUTE_TABLE
2937#define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
2938static const struct attribute_spec m32c_attribute_table[] = {
2939 {"interrupt", 0, 0, false, false, false, interrupt_handler},
65655f79
DD
2940 {"bank_switch", 0, 0, false, false, false, interrupt_handler},
2941 {"fast_interrupt", 0, 0, false, false, false, interrupt_handler},
5abd2125 2942 {"function_vector", 1, 1, true, false, false, function_vector_handler},
38b2d076
DD
2943 {0, 0, 0, 0, 0, 0, 0}
2944};
2945
2946#undef TARGET_COMP_TYPE_ATTRIBUTES
2947#define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
2948static int
3101faab
KG
2949m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED,
2950 const_tree type2 ATTRIBUTE_UNUSED)
38b2d076
DD
2951{
2952 /* 0=incompatible 1=compatible 2=warning */
2953 return 1;
2954}
2955
2956#undef TARGET_INSERT_ATTRIBUTES
2957#define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
2958static void
2959m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
2960 tree * attr_ptr ATTRIBUTE_UNUSED)
2961{
f6052f86
DD
2962 unsigned addr;
2963 /* See if we need to make #pragma address variables volatile. */
2964
2965 if (TREE_CODE (node) == VAR_DECL)
2966 {
2967 char *name = IDENTIFIER_POINTER (DECL_NAME (node));
2968 if (m32c_get_pragma_address (name, &addr))
2969 {
2970 TREE_THIS_VOLATILE (node) = true;
2971 }
2972 }
2973}
2974
2975
2976struct GTY(()) pragma_entry {
2977 const char *varname;
2978 unsigned address;
2979};
2980typedef struct pragma_entry pragma_entry;
2981
2982/* Hash table of pragma info. */
2983static GTY((param_is (pragma_entry))) htab_t pragma_htab;
2984
2985static int
2986pragma_entry_eq (const void *p1, const void *p2)
2987{
2988 const pragma_entry *old = (const pragma_entry *) p1;
2989 const char *new_name = (const char *) p2;
2990
2991 return strcmp (old->varname, new_name) == 0;
2992}
2993
2994static hashval_t
2995pragma_entry_hash (const void *p)
2996{
2997 const pragma_entry *old = (const pragma_entry *) p;
2998 return htab_hash_string (old->varname);
2999}
3000
3001void
3002m32c_note_pragma_address (const char *varname, unsigned address)
3003{
3004 pragma_entry **slot;
3005
3006 if (!pragma_htab)
3007 pragma_htab = htab_create_ggc (31, pragma_entry_hash,
3008 pragma_entry_eq, NULL);
3009
3010 slot = (pragma_entry **)
3011 htab_find_slot_with_hash (pragma_htab, varname,
3012 htab_hash_string (varname), INSERT);
3013
3014 if (!*slot)
3015 {
3016 *slot = ggc_alloc_pragma_entry ();
3017 (*slot)->varname = ggc_strdup (varname);
3018 }
3019 (*slot)->address = address;
3020}
3021
3022static bool
3023m32c_get_pragma_address (const char *varname, unsigned *address)
3024{
3025 pragma_entry **slot;
3026
3027 if (!pragma_htab)
3028 return false;
3029
3030 slot = (pragma_entry **)
3031 htab_find_slot_with_hash (pragma_htab, varname,
3032 htab_hash_string (varname), NO_INSERT);
3033 if (slot && *slot)
3034 {
3035 *address = (*slot)->address;
3036 return true;
3037 }
3038 return false;
3039}
3040
3041void
3042m32c_output_aligned_common (FILE *stream, tree decl, const char *name,
3043 int size, int align, int global)
3044{
3045 unsigned address;
3046
3047 if (m32c_get_pragma_address (name, &address))
3048 {
3049 /* We never output these as global. */
3050 assemble_name (stream, name);
3051 fprintf (stream, " = 0x%04x\n", address);
3052 return;
3053 }
3054 if (!global)
3055 {
3056 fprintf (stream, "\t.local\t");
3057 assemble_name (stream, name);
3058 fprintf (stream, "\n");
3059 }
3060 fprintf (stream, "\t.comm\t");
3061 assemble_name (stream, name);
3062 fprintf (stream, ",%u,%u\n", size, align / BITS_PER_UNIT);
38b2d076
DD
3063}
3064
3065/* Predicates */
3066
f9b89438 3067/* This is a list of legal subregs of hard regs. */
67fc44cb
DD
3068static const struct {
3069 unsigned char outer_mode_size;
3070 unsigned char inner_mode_size;
3071 unsigned char byte_mask;
3072 unsigned char legal_when;
f9b89438 3073 unsigned int regno;
f9b89438 3074} legal_subregs[] = {
67fc44cb
DD
3075 {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
3076 {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
3077 {1, 2, 0x01, 1, A0_REGNO},
3078 {1, 2, 0x01, 1, A1_REGNO},
f9b89438 3079
67fc44cb
DD
3080 {1, 4, 0x01, 1, A0_REGNO},
3081 {1, 4, 0x01, 1, A1_REGNO},
f9b89438 3082
67fc44cb
DD
3083 {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
3084 {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
3085 {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
3086 {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
3087 {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
f9b89438 3088
67fc44cb 3089 {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
f9b89438
DD
3090};
3091
3092/* Returns TRUE if OP is a subreg of a hard reg which we don't
f6052f86 3093 support. We also bail on MEMs with illegal addresses. */
f9b89438
DD
3094bool
3095m32c_illegal_subreg_p (rtx op)
3096{
f9b89438
DD
3097 int offset;
3098 unsigned int i;
3099 int src_mode, dest_mode;
3100
f6052f86
DD
3101 if (GET_CODE (op) == MEM
3102 && ! m32c_legitimate_address_p (Pmode, XEXP (op, 0), false))
3103 {
3104 return true;
3105 }
3106
f9b89438
DD
3107 if (GET_CODE (op) != SUBREG)
3108 return false;
3109
3110 dest_mode = GET_MODE (op);
3111 offset = SUBREG_BYTE (op);
3112 op = SUBREG_REG (op);
3113 src_mode = GET_MODE (op);
3114
3115 if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
3116 return false;
3117 if (GET_CODE (op) != REG)
3118 return false;
3119 if (REGNO (op) >= MEM0_REGNO)
3120 return false;
3121
3122 offset = (1 << offset);
3123
67fc44cb 3124 for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
f9b89438
DD
3125 if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
3126 && legal_subregs[i].regno == REGNO (op)
3127 && legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
3128 && legal_subregs[i].byte_mask & offset)
3129 {
3130 switch (legal_subregs[i].legal_when)
3131 {
3132 case 1:
3133 return false;
3134 case 16:
3135 if (TARGET_A16)
3136 return false;
3137 break;
3138 case 24:
3139 if (TARGET_A24)
3140 return false;
3141 break;
3142 }
3143 }
3144 return true;
3145}
3146
38b2d076
DD
3147/* Returns TRUE if we support a move between the first two operands.
3148 At the moment, we just want to discourage mem to mem moves until
3149 after reload, because reload has a hard time with our limited
3150 number of address registers, and we can get into a situation where
3151 we need three of them when we only have two. */
3152bool
3153m32c_mov_ok (rtx * operands, enum machine_mode mode ATTRIBUTE_UNUSED)
3154{
3155 rtx op0 = operands[0];
3156 rtx op1 = operands[1];
3157
3158 if (TARGET_A24)
3159 return true;
3160
3161#define DEBUG_MOV_OK 0
3162#if DEBUG_MOV_OK
3163 fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
3164 debug_rtx (op0);
3165 debug_rtx (op1);
3166#endif
3167
3168 if (GET_CODE (op0) == SUBREG)
3169 op0 = XEXP (op0, 0);
3170 if (GET_CODE (op1) == SUBREG)
3171 op1 = XEXP (op1, 0);
3172
3173 if (GET_CODE (op0) == MEM
3174 && GET_CODE (op1) == MEM
3175 && ! reload_completed)
3176 {
3177#if DEBUG_MOV_OK
3178 fprintf (stderr, " - no, mem to mem\n");
3179#endif
3180 return false;
3181 }
3182
3183#if DEBUG_MOV_OK
3184 fprintf (stderr, " - ok\n");
3185#endif
3186 return true;
3187}
3188
ff485e71
DD
3189/* Returns TRUE if two consecutive HImode mov instructions, generated
3190 for moving an immediate double data to a double data type variable
3191 location, can be combined into single SImode mov instruction. */
3192bool
3193m32c_immd_dbl_mov (rtx * operands,
3194 enum machine_mode mode ATTRIBUTE_UNUSED)
3195{
3196 int flag = 0, okflag = 0, offset1 = 0, offset2 = 0, offsetsign = 0;
3197 const char *str1;
3198 const char *str2;
3199
3200 if (GET_CODE (XEXP (operands[0], 0)) == SYMBOL_REF
3201 && MEM_SCALAR_P (operands[0])
3202 && !MEM_IN_STRUCT_P (operands[0])
3203 && GET_CODE (XEXP (operands[2], 0)) == CONST
3204 && GET_CODE (XEXP (XEXP (operands[2], 0), 0)) == PLUS
3205 && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 0)) == SYMBOL_REF
3206 && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 1)) == CONST_INT
3207 && MEM_SCALAR_P (operands[2])
3208 && !MEM_IN_STRUCT_P (operands[2]))
3209 flag = 1;
3210
3211 else if (GET_CODE (XEXP (operands[0], 0)) == CONST
3212 && GET_CODE (XEXP (XEXP (operands[0], 0), 0)) == PLUS
3213 && GET_CODE (XEXP (XEXP (XEXP (operands[0], 0), 0), 0)) == SYMBOL_REF
3214 && MEM_SCALAR_P (operands[0])
3215 && !MEM_IN_STRUCT_P (operands[0])
f9f3567e 3216 && !(INTVAL (XEXP (XEXP (XEXP (operands[0], 0), 0), 1)) %4)
ff485e71
DD
3217 && GET_CODE (XEXP (operands[2], 0)) == CONST
3218 && GET_CODE (XEXP (XEXP (operands[2], 0), 0)) == PLUS
3219 && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 0)) == SYMBOL_REF
3220 && MEM_SCALAR_P (operands[2])
3221 && !MEM_IN_STRUCT_P (operands[2]))
3222 flag = 2;
3223
3224 else if (GET_CODE (XEXP (operands[0], 0)) == PLUS
3225 && GET_CODE (XEXP (XEXP (operands[0], 0), 0)) == REG
3226 && REGNO (XEXP (XEXP (operands[0], 0), 0)) == FB_REGNO
3227 && GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == CONST_INT
3228 && MEM_SCALAR_P (operands[0])
3229 && !MEM_IN_STRUCT_P (operands[0])
f9f3567e 3230 && !(INTVAL (XEXP (XEXP (operands[0], 0), 1)) %4)
ff485e71
DD
3231 && REGNO (XEXP (XEXP (operands[2], 0), 0)) == FB_REGNO
3232 && GET_CODE (XEXP (XEXP (operands[2], 0), 1)) == CONST_INT
3233 && MEM_SCALAR_P (operands[2])
3234 && !MEM_IN_STRUCT_P (operands[2]))
3235 flag = 3;
3236
3237 else
3238 return false;
3239
3240 switch (flag)
3241 {
3242 case 1:
3243 str1 = XSTR (XEXP (operands[0], 0), 0);
3244 str2 = XSTR (XEXP (XEXP (XEXP (operands[2], 0), 0), 0), 0);
3245 if (strcmp (str1, str2) == 0)
3246 okflag = 1;
3247 else
3248 okflag = 0;
3249 break;
3250 case 2:
3251 str1 = XSTR (XEXP (XEXP (XEXP (operands[0], 0), 0), 0), 0);
3252 str2 = XSTR (XEXP (XEXP (XEXP (operands[2], 0), 0), 0), 0);
3253 if (strcmp(str1,str2) == 0)
3254 okflag = 1;
3255 else
3256 okflag = 0;
3257 break;
3258 case 3:
f9f3567e
DD
3259 offset1 = INTVAL (XEXP (XEXP (operands[0], 0), 1));
3260 offset2 = INTVAL (XEXP (XEXP (operands[2], 0), 1));
ff485e71
DD
3261 offsetsign = offset1 >> ((sizeof (offset1) * 8) -1);
3262 if (((offset2-offset1) == 2) && offsetsign != 0)
3263 okflag = 1;
3264 else
3265 okflag = 0;
3266 break;
3267 default:
3268 okflag = 0;
3269 }
3270
3271 if (okflag == 1)
3272 {
3273 HOST_WIDE_INT val;
3274 operands[4] = gen_rtx_MEM (SImode, XEXP (operands[0], 0));
3275
f9f3567e 3276 val = (INTVAL (operands[3]) << 16) + (INTVAL (operands[1]) & 0xFFFF);
ff485e71
DD
3277 operands[5] = gen_rtx_CONST_INT (VOIDmode, val);
3278
3279 return true;
3280 }
3281
3282 return false;
3283}
3284
38b2d076
DD
3285/* Expanders */
3286
3287/* Subregs are non-orthogonal for us, because our registers are all
3288 different sizes. */
3289static rtx
3290m32c_subreg (enum machine_mode outer,
3291 rtx x, enum machine_mode inner, int byte)
3292{
3293 int r, nr = -1;
3294
3295 /* Converting MEMs to different types that are the same size, we
3296 just rewrite them. */
3297 if (GET_CODE (x) == SUBREG
3298 && SUBREG_BYTE (x) == 0
3299 && GET_CODE (SUBREG_REG (x)) == MEM
3300 && (GET_MODE_SIZE (GET_MODE (x))
3301 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
3302 {
3303 rtx oldx = x;
3304 x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
3305 MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
3306 }
3307
3308 /* Push/pop get done as smaller push/pops. */
3309 if (GET_CODE (x) == MEM
3310 && (GET_CODE (XEXP (x, 0)) == PRE_DEC
3311 || GET_CODE (XEXP (x, 0)) == POST_INC))
3312 return gen_rtx_MEM (outer, XEXP (x, 0));
3313 if (GET_CODE (x) == SUBREG
3314 && GET_CODE (XEXP (x, 0)) == MEM
3315 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
3316 || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
3317 return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
3318
3319 if (GET_CODE (x) != REG)
146456c1
DD
3320 {
3321 rtx r = simplify_gen_subreg (outer, x, inner, byte);
3322 if (GET_CODE (r) == SUBREG
3323 && GET_CODE (x) == MEM
3324 && MEM_VOLATILE_P (x))
3325 {
3326 /* Volatile MEMs don't get simplified, but we need them to
3327 be. We are little endian, so the subreg byte is the
3328 offset. */
3329 r = adjust_address (x, outer, byte);
3330 }
3331 return r;
3332 }
38b2d076
DD
3333
3334 r = REGNO (x);
3335 if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
3336 return simplify_gen_subreg (outer, x, inner, byte);
3337
3338 if (IS_MEM_REGNO (r))
3339 return simplify_gen_subreg (outer, x, inner, byte);
3340
3341 /* This is where the complexities of our register layout are
3342 described. */
3343 if (byte == 0)
3344 nr = r;
3345 else if (outer == HImode)
3346 {
3347 if (r == R0_REGNO && byte == 2)
3348 nr = R2_REGNO;
3349 else if (r == R0_REGNO && byte == 4)
3350 nr = R1_REGNO;
3351 else if (r == R0_REGNO && byte == 6)
3352 nr = R3_REGNO;
3353 else if (r == R1_REGNO && byte == 2)
3354 nr = R3_REGNO;
3355 else if (r == A0_REGNO && byte == 2)
3356 nr = A1_REGNO;
3357 }
3358 else if (outer == SImode)
3359 {
3360 if (r == R0_REGNO && byte == 0)
3361 nr = R0_REGNO;
3362 else if (r == R0_REGNO && byte == 4)
3363 nr = R1_REGNO;
3364 }
3365 if (nr == -1)
3366 {
3367 fprintf (stderr, "m32c_subreg %s %s %d\n",
3368 mode_name[outer], mode_name[inner], byte);
3369 debug_rtx (x);
3370 gcc_unreachable ();
3371 }
3372 return gen_rtx_REG (outer, nr);
3373}
3374
3375/* Used to emit move instructions. We split some moves,
3376 and avoid mem-mem moves. */
3377int
3378m32c_prepare_move (rtx * operands, enum machine_mode mode)
3379{
3380 if (TARGET_A16 && mode == PSImode)
3381 return m32c_split_move (operands, mode, 1);
3382 if ((GET_CODE (operands[0]) == MEM)
3383 && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
3384 {
3385 rtx pmv = XEXP (operands[0], 0);
3386 rtx dest_reg = XEXP (pmv, 0);
3387 rtx dest_mod = XEXP (pmv, 1);
3388
3389 emit_insn (gen_rtx_SET (Pmode, dest_reg, dest_mod));
3390 operands[0] = gen_rtx_MEM (mode, dest_reg);
3391 }
b3a13419 3392 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
38b2d076
DD
3393 operands[1] = copy_to_mode_reg (mode, operands[1]);
3394 return 0;
3395}
3396
3397#define DEBUG_SPLIT 0
3398
3399/* Returns TRUE if the given PSImode move should be split. We split
3400 for all r8c/m16c moves, since it doesn't support them, and for
3401 POP.L as we can only *push* SImode. */
3402int
3403m32c_split_psi_p (rtx * operands)
3404{
3405#if DEBUG_SPLIT
3406 fprintf (stderr, "\nm32c_split_psi_p\n");
3407 debug_rtx (operands[0]);
3408 debug_rtx (operands[1]);
3409#endif
3410 if (TARGET_A16)
3411 {
3412#if DEBUG_SPLIT
3413 fprintf (stderr, "yes, A16\n");
3414#endif
3415 return 1;
3416 }
3417 if (GET_CODE (operands[1]) == MEM
3418 && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3419 {
3420#if DEBUG_SPLIT
3421 fprintf (stderr, "yes, pop.l\n");
3422#endif
3423 return 1;
3424 }
3425#if DEBUG_SPLIT
3426 fprintf (stderr, "no, default\n");
3427#endif
3428 return 0;
3429}
3430
3431/* Split the given move. SPLIT_ALL is 0 if splitting is optional
3432 (define_expand), 1 if it is not optional (define_insn_and_split),
3433 and 3 for define_split (alternate api). */
3434int
3435m32c_split_move (rtx * operands, enum machine_mode mode, int split_all)
3436{
3437 rtx s[4], d[4];
3438 int parts, si, di, rev = 0;
3439 int rv = 0, opi = 2;
3440 enum machine_mode submode = HImode;
3441 rtx *ops, local_ops[10];
3442
3443 /* define_split modifies the existing operands, but the other two
3444 emit new insns. OPS is where we store the operand pairs, which
3445 we emit later. */
3446 if (split_all == 3)
3447 ops = operands;
3448 else
3449 ops = local_ops;
3450
3451 /* Else HImode. */
3452 if (mode == DImode)
3453 submode = SImode;
3454
3455 /* Before splitting mem-mem moves, force one operand into a
3456 register. */
b3a13419 3457 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
38b2d076
DD
3458 {
3459#if DEBUG0
3460 fprintf (stderr, "force_reg...\n");
3461 debug_rtx (operands[1]);
3462#endif
3463 operands[1] = force_reg (mode, operands[1]);
3464#if DEBUG0
3465 debug_rtx (operands[1]);
3466#endif
3467 }
3468
3469 parts = 2;
3470
3471#if DEBUG_SPLIT
b3a13419
ILT
3472 fprintf (stderr, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3473 split_all);
38b2d076
DD
3474 debug_rtx (operands[0]);
3475 debug_rtx (operands[1]);
3476#endif
3477
eb5f0c07
DD
3478 /* Note that split_all is not used to select the api after this
3479 point, so it's safe to set it to 3 even with define_insn. */
3480 /* None of the chips can move SI operands to sp-relative addresses,
3481 so we always split those. */
3482 if (m32c_extra_constraint_p (operands[0], 'S', "Ss"))
3483 split_all = 3;
3484
38b2d076
DD
3485 /* We don't need to split these. */
3486 if (TARGET_A24
3487 && split_all != 3
3488 && (mode == SImode || mode == PSImode)
3489 && !(GET_CODE (operands[1]) == MEM
3490 && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3491 return 0;
3492
3493 /* First, enumerate the subregs we'll be dealing with. */
3494 for (si = 0; si < parts; si++)
3495 {
3496 d[si] =
3497 m32c_subreg (submode, operands[0], mode,
3498 si * GET_MODE_SIZE (submode));
3499 s[si] =
3500 m32c_subreg (submode, operands[1], mode,
3501 si * GET_MODE_SIZE (submode));
3502 }
3503
3504 /* Split pushes by emitting a sequence of smaller pushes. */
3505 if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3506 {
3507 for (si = parts - 1; si >= 0; si--)
3508 {
3509 ops[opi++] = gen_rtx_MEM (submode,
3510 gen_rtx_PRE_DEC (Pmode,
3511 gen_rtx_REG (Pmode,
3512 SP_REGNO)));
3513 ops[opi++] = s[si];
3514 }
3515
3516 rv = 1;
3517 }
3518 /* Likewise for pops. */
3519 else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3520 {
3521 for (di = 0; di < parts; di++)
3522 {
3523 ops[opi++] = d[di];
3524 ops[opi++] = gen_rtx_MEM (submode,
3525 gen_rtx_POST_INC (Pmode,
3526 gen_rtx_REG (Pmode,
3527 SP_REGNO)));
3528 }
3529 rv = 1;
3530 }
3531 else if (split_all)
3532 {
3533 /* if d[di] == s[si] for any di < si, we'll early clobber. */
3534 for (di = 0; di < parts - 1; di++)
3535 for (si = di + 1; si < parts; si++)
3536 if (reg_mentioned_p (d[di], s[si]))
3537 rev = 1;
3538
3539 if (rev)
3540 for (si = 0; si < parts; si++)
3541 {
3542 ops[opi++] = d[si];
3543 ops[opi++] = s[si];
3544 }
3545 else
3546 for (si = parts - 1; si >= 0; si--)
3547 {
3548 ops[opi++] = d[si];
3549 ops[opi++] = s[si];
3550 }
3551 rv = 1;
3552 }
3553 /* Now emit any moves we may have accumulated. */
3554 if (rv && split_all != 3)
3555 {
3556 int i;
3557 for (i = 2; i < opi; i += 2)
3558 emit_move_insn (ops[i], ops[i + 1]);
3559 }
3560 return rv;
3561}
3562
07127a0a
DD
3563/* The m32c has a number of opcodes that act like memcpy, strcmp, and
3564 the like. For the R8C they expect one of the addresses to be in
3565 R1L:An so we need to arrange for that. Otherwise, it's just a
3566 matter of picking out the operands we want and emitting the right
3567 pattern for them. All these expanders, which correspond to
3568 patterns in blkmov.md, must return nonzero if they expand the insn,
3569 or zero if they should FAIL. */
3570
3571/* This is a memset() opcode. All operands are implied, so we need to
3572 arrange for them to be in the right registers. The opcode wants
3573 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3574 the count (HI), and $2 the value (QI). */
3575int
3576m32c_expand_setmemhi(rtx *operands)
3577{
3578 rtx desta, count, val;
3579 rtx desto, counto;
3580
3581 desta = XEXP (operands[0], 0);
3582 count = operands[1];
3583 val = operands[2];
3584
3585 desto = gen_reg_rtx (Pmode);
3586 counto = gen_reg_rtx (HImode);
3587
3588 if (GET_CODE (desta) != REG
3589 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3590 desta = copy_to_mode_reg (Pmode, desta);
3591
3592 /* This looks like an arbitrary restriction, but this is by far the
3593 most common case. For counts 8..14 this actually results in
3594 smaller code with no speed penalty because the half-sized
3595 constant can be loaded with a shorter opcode. */
3596 if (GET_CODE (count) == CONST_INT
3597 && GET_CODE (val) == CONST_INT
3598 && ! (INTVAL (count) & 1)
3599 && (INTVAL (count) > 1)
3600 && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3601 {
3602 unsigned v = INTVAL (val) & 0xff;
3603 v = v | (v << 8);
3604 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3605 val = copy_to_mode_reg (HImode, GEN_INT (v));
3606 if (TARGET_A16)
3607 emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3608 else
3609 emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3610 return 1;
3611 }
3612
3613 /* This is the generalized memset() case. */
3614 if (GET_CODE (val) != REG
3615 || REGNO (val) < FIRST_PSEUDO_REGISTER)
3616 val = copy_to_mode_reg (QImode, val);
3617
3618 if (GET_CODE (count) != REG
3619 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3620 count = copy_to_mode_reg (HImode, count);
3621
3622 if (TARGET_A16)
3623 emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3624 else
3625 emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3626
3627 return 1;
3628}
3629
3630/* This is a memcpy() opcode. All operands are implied, so we need to
3631 arrange for them to be in the right registers. The opcode wants
3632 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3633 is the source (MEM:BLK), and $2 the count (HI). */
3634int
3635m32c_expand_movmemhi(rtx *operands)
3636{
3637 rtx desta, srca, count;
3638 rtx desto, srco, counto;
3639
3640 desta = XEXP (operands[0], 0);
3641 srca = XEXP (operands[1], 0);
3642 count = operands[2];
3643
3644 desto = gen_reg_rtx (Pmode);
3645 srco = gen_reg_rtx (Pmode);
3646 counto = gen_reg_rtx (HImode);
3647
3648 if (GET_CODE (desta) != REG
3649 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3650 desta = copy_to_mode_reg (Pmode, desta);
3651
3652 if (GET_CODE (srca) != REG
3653 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3654 srca = copy_to_mode_reg (Pmode, srca);
3655
3656 /* Similar to setmem, but we don't need to check the value. */
3657 if (GET_CODE (count) == CONST_INT
3658 && ! (INTVAL (count) & 1)
3659 && (INTVAL (count) > 1))
3660 {
3661 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3662 if (TARGET_A16)
3663 emit_insn (gen_movmemhi_whi_op (desto, srco, counto, desta, srca, count));
3664 else
3665 emit_insn (gen_movmemhi_wpsi_op (desto, srco, counto, desta, srca, count));
3666 return 1;
3667 }
3668
3669 /* This is the generalized memset() case. */
3670 if (GET_CODE (count) != REG
3671 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3672 count = copy_to_mode_reg (HImode, count);
3673
3674 if (TARGET_A16)
3675 emit_insn (gen_movmemhi_bhi_op (desto, srco, counto, desta, srca, count));
3676 else
3677 emit_insn (gen_movmemhi_bpsi_op (desto, srco, counto, desta, srca, count));
3678
3679 return 1;
3680}
3681
3682/* This is a stpcpy() opcode. $0 is the destination (MEM:BLK) after
3683 the copy, which should point to the NUL at the end of the string,
3684 $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3685 Since our opcode leaves the destination pointing *after* the NUL,
3686 we must emit an adjustment. */
3687int
3688m32c_expand_movstr(rtx *operands)
3689{
3690 rtx desta, srca;
3691 rtx desto, srco;
3692
3693 desta = XEXP (operands[1], 0);
3694 srca = XEXP (operands[2], 0);
3695
3696 desto = gen_reg_rtx (Pmode);
3697 srco = gen_reg_rtx (Pmode);
3698
3699 if (GET_CODE (desta) != REG
3700 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3701 desta = copy_to_mode_reg (Pmode, desta);
3702
3703 if (GET_CODE (srca) != REG
3704 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3705 srca = copy_to_mode_reg (Pmode, srca);
3706
3707 emit_insn (gen_movstr_op (desto, srco, desta, srca));
3708 /* desto ends up being a1, which allows this type of add through MOVA. */
3709 emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
3710
3711 return 1;
3712}
3713
3714/* This is a strcmp() opcode. $0 is the destination (HI) which holds
3715 <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3716 $2 is the other (MEM:BLK). We must do the comparison, and then
3717 convert the flags to a signed integer result. */
3718int
3719m32c_expand_cmpstr(rtx *operands)
3720{
3721 rtx src1a, src2a;
3722
3723 src1a = XEXP (operands[1], 0);
3724 src2a = XEXP (operands[2], 0);
3725
3726 if (GET_CODE (src1a) != REG
3727 || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
3728 src1a = copy_to_mode_reg (Pmode, src1a);
3729
3730 if (GET_CODE (src2a) != REG
3731 || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
3732 src2a = copy_to_mode_reg (Pmode, src2a);
3733
3734 emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
3735 emit_insn (gen_cond_to_int (operands[0]));
3736
3737 return 1;
3738}
3739
3740
23fed240
DD
3741typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
3742
3743static shift_gen_func
3744shift_gen_func_for (int mode, int code)
3745{
3746#define GFF(m,c,f) if (mode == m && code == c) return f
3747 GFF(QImode, ASHIFT, gen_ashlqi3_i);
3748 GFF(QImode, ASHIFTRT, gen_ashrqi3_i);
3749 GFF(QImode, LSHIFTRT, gen_lshrqi3_i);
3750 GFF(HImode, ASHIFT, gen_ashlhi3_i);
3751 GFF(HImode, ASHIFTRT, gen_ashrhi3_i);
3752 GFF(HImode, LSHIFTRT, gen_lshrhi3_i);
3753 GFF(PSImode, ASHIFT, gen_ashlpsi3_i);
3754 GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
3755 GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
3756 GFF(SImode, ASHIFT, TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
3757 GFF(SImode, ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
3758 GFF(SImode, LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
3759#undef GFF
07127a0a 3760 gcc_unreachable ();
23fed240
DD
3761}
3762
38b2d076
DD
3763/* The m32c only has one shift, but it takes a signed count. GCC
3764 doesn't want this, so we fake it by negating any shift count when
07127a0a
DD
3765 we're pretending to shift the other way. Also, the shift count is
3766 limited to -8..8. It's slightly better to use two shifts for 9..15
3767 than to load the count into r1h, so we do that too. */
38b2d076 3768int
23fed240 3769m32c_prepare_shift (rtx * operands, int scale, int shift_code)
38b2d076 3770{
23fed240
DD
3771 enum machine_mode mode = GET_MODE (operands[0]);
3772 shift_gen_func func = shift_gen_func_for (mode, shift_code);
38b2d076 3773 rtx temp;
23fed240
DD
3774
3775 if (GET_CODE (operands[2]) == CONST_INT)
38b2d076 3776 {
23fed240
DD
3777 int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
3778 int count = INTVAL (operands[2]) * scale;
3779
3780 while (count > maxc)
3781 {
3782 temp = gen_reg_rtx (mode);
3783 emit_insn (func (temp, operands[1], GEN_INT (maxc)));
3784 operands[1] = temp;
3785 count -= maxc;
3786 }
3787 while (count < -maxc)
3788 {
3789 temp = gen_reg_rtx (mode);
3790 emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
3791 operands[1] = temp;
3792 count += maxc;
3793 }
3794 emit_insn (func (operands[0], operands[1], GEN_INT (count)));
3795 return 1;
38b2d076 3796 }
2e160056
DD
3797
3798 temp = gen_reg_rtx (QImode);
38b2d076 3799 if (scale < 0)
2e160056
DD
3800 /* The pattern has a NEG that corresponds to this. */
3801 emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
3802 else if (TARGET_A16 && mode == SImode)
3803 /* We do this because the code below may modify this, we don't
3804 want to modify the origin of this value. */
3805 emit_move_insn (temp, operands[2]);
38b2d076 3806 else
2e160056 3807 /* We'll only use it for the shift, no point emitting a move. */
38b2d076 3808 temp = operands[2];
2e160056 3809
16659fcf 3810 if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
2e160056
DD
3811 {
3812 /* The m16c has a limit of -16..16 for SI shifts, even when the
3813 shift count is in a register. Since there are so many targets
3814 of these shifts, it's better to expand the RTL here than to
3815 call a helper function.
3816
3817 The resulting code looks something like this:
3818
3819 cmp.b r1h,-16
3820 jge.b 1f
3821 shl.l -16,dest
3822 add.b r1h,16
3823 1f: cmp.b r1h,16
3824 jle.b 1f
3825 shl.l 16,dest
3826 sub.b r1h,16
3827 1f: shl.l r1h,dest
3828
3829 We take advantage of the fact that "negative" shifts are
3830 undefined to skip one of the comparisons. */
3831
3832 rtx count;
833bf445 3833 rtx label, lref, insn, tempvar;
2e160056 3834
16659fcf
DD
3835 emit_move_insn (operands[0], operands[1]);
3836
2e160056
DD
3837 count = temp;
3838 label = gen_label_rtx ();
3839 lref = gen_rtx_LABEL_REF (VOIDmode, label);
3840 LABEL_NUSES (label) ++;
3841
833bf445
DD
3842 tempvar = gen_reg_rtx (mode);
3843
2e160056
DD
3844 if (shift_code == ASHIFT)
3845 {
3846 /* This is a left shift. We only need check positive counts. */
3847 emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
3848 count, GEN_INT (16), label));
833bf445
DD
3849 emit_insn (func (tempvar, operands[0], GEN_INT (8)));
3850 emit_insn (func (operands[0], tempvar, GEN_INT (8)));
2e160056
DD
3851 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
3852 emit_label_after (label, insn);
3853 }
3854 else
3855 {
3856 /* This is a right shift. We only need check negative counts. */
3857 emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
3858 count, GEN_INT (-16), label));
833bf445
DD
3859 emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
3860 emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
2e160056
DD
3861 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
3862 emit_label_after (label, insn);
3863 }
16659fcf
DD
3864 operands[1] = operands[0];
3865 emit_insn (func (operands[0], operands[0], count));
3866 return 1;
2e160056
DD
3867 }
3868
38b2d076
DD
3869 operands[2] = temp;
3870 return 0;
3871}
3872
12ea2512
DD
3873/* The m32c has a limited range of operations that work on PSImode
3874 values; we have to expand to SI, do the math, and truncate back to
3875 PSI. Yes, this is expensive, but hopefully gcc will learn to avoid
3876 those cases. */
3877void
3878m32c_expand_neg_mulpsi3 (rtx * operands)
3879{
3880 /* operands: a = b * i */
3881 rtx temp1; /* b as SI */
07127a0a
DD
3882 rtx scale /* i as SI */;
3883 rtx temp2; /* a*b as SI */
12ea2512
DD
3884
3885 temp1 = gen_reg_rtx (SImode);
3886 temp2 = gen_reg_rtx (SImode);
07127a0a
DD
3887 if (GET_CODE (operands[2]) != CONST_INT)
3888 {
3889 scale = gen_reg_rtx (SImode);
3890 emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
3891 }
3892 else
3893 scale = copy_to_mode_reg (SImode, operands[2]);
12ea2512
DD
3894
3895 emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
07127a0a
DD
3896 temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
3897 emit_insn (gen_truncsipsi2 (operands[0], temp2));
12ea2512
DD
3898}
3899
38b2d076
DD
3900/* Pattern Output Functions */
3901
07127a0a
DD
3902int
3903m32c_expand_movcc (rtx *operands)
3904{
3905 rtx rel = operands[1];
0166ff05
DD
3906 rtx cmp;
3907
07127a0a
DD
3908 if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
3909 return 1;
3910 if (GET_CODE (operands[2]) != CONST_INT
3911 || GET_CODE (operands[3]) != CONST_INT)
3912 return 1;
07127a0a
DD
3913 if (GET_CODE (rel) == NE)
3914 {
3915 rtx tmp = operands[2];
3916 operands[2] = operands[3];
3917 operands[3] = tmp;
f90b7a5a 3918 rel = gen_rtx_EQ (GET_MODE (rel), XEXP (rel, 0), XEXP (rel, 1));
07127a0a 3919 }
0166ff05 3920
0166ff05
DD
3921 emit_move_insn (operands[0],
3922 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
f90b7a5a 3923 rel,
0166ff05
DD
3924 operands[2],
3925 operands[3]));
07127a0a
DD
3926 return 0;
3927}
3928
3929/* Used for the "insv" pattern. Return nonzero to fail, else done. */
3930int
3931m32c_expand_insv (rtx *operands)
3932{
3933 rtx op0, src0, p;
3934 int mask;
3935
3936 if (INTVAL (operands[1]) != 1)
3937 return 1;
3938
9cb96754
N
3939 /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
3940 if (GET_CODE (operands[3]) != CONST_INT)
3941 return 1;
3942 if (INTVAL (operands[3]) != 0
3943 && INTVAL (operands[3]) != 1
3944 && INTVAL (operands[3]) != -1)
3945 return 1;
3946
07127a0a
DD
3947 mask = 1 << INTVAL (operands[2]);
3948
3949 op0 = operands[0];
3950 if (GET_CODE (op0) == SUBREG
3951 && SUBREG_BYTE (op0) == 0)
3952 {
3953 rtx sub = SUBREG_REG (op0);
3954 if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
3955 op0 = sub;
3956 }
3957
b3a13419 3958 if (!can_create_pseudo_p ()
07127a0a
DD
3959 || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
3960 src0 = op0;
3961 else
3962 {
3963 src0 = gen_reg_rtx (GET_MODE (op0));
3964 emit_move_insn (src0, op0);
3965 }
3966
3967 if (GET_MODE (op0) == HImode
3968 && INTVAL (operands[2]) >= 8
3969 && GET_MODE (op0) == MEM)
3970 {
3971 /* We are little endian. */
3972 rtx new_mem = gen_rtx_MEM (QImode, plus_constant (XEXP (op0, 0), 1));
3973 MEM_COPY_ATTRIBUTES (new_mem, op0);
3974 mask >>= 8;
3975 }
3976
8e4edce7
DD
3977 /* First, we generate a mask with the correct polarity. If we are
3978 storing a zero, we want an AND mask, so invert it. */
3979 if (INTVAL (operands[3]) == 0)
07127a0a 3980 {
16659fcf 3981 /* Storing a zero, use an AND mask */
07127a0a
DD
3982 if (GET_MODE (op0) == HImode)
3983 mask ^= 0xffff;
3984 else
3985 mask ^= 0xff;
3986 }
8e4edce7
DD
3987 /* Now we need to properly sign-extend the mask in case we need to
3988 fall back to an AND or OR opcode. */
07127a0a
DD
3989 if (GET_MODE (op0) == HImode)
3990 {
3991 if (mask & 0x8000)
3992 mask -= 0x10000;
3993 }
3994 else
3995 {
3996 if (mask & 0x80)
3997 mask -= 0x100;
3998 }
3999
4000 switch ( (INTVAL (operands[3]) ? 4 : 0)
4001 + ((GET_MODE (op0) == HImode) ? 2 : 0)
4002 + (TARGET_A24 ? 1 : 0))
4003 {
4004 case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
4005 case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
4006 case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
4007 case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
4008 case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
4009 case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
4010 case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
4011 case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
653e2568 4012 default: p = NULL_RTX; break; /* Not reached, but silences a warning. */
07127a0a
DD
4013 }
4014
4015 emit_insn (p);
4016 return 0;
4017}
4018
4019const char *
4020m32c_scc_pattern(rtx *operands, RTX_CODE code)
4021{
4022 static char buf[30];
4023 if (GET_CODE (operands[0]) == REG
4024 && REGNO (operands[0]) == R0_REGNO)
4025 {
4026 if (code == EQ)
4027 return "stzx\t#1,#0,r0l";
4028 if (code == NE)
4029 return "stzx\t#0,#1,r0l";
4030 }
4031 sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
4032 return buf;
4033}
4034
5abd2125
JS
4035/* Encode symbol attributes of a SYMBOL_REF into its
4036 SYMBOL_REF_FLAGS. */
4037static void
4038m32c_encode_section_info (tree decl, rtx rtl, int first)
4039{
4040 int extra_flags = 0;
4041
4042 default_encode_section_info (decl, rtl, first);
4043 if (TREE_CODE (decl) == FUNCTION_DECL
4044 && m32c_special_page_vector_p (decl))
4045
4046 extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
4047
4048 if (extra_flags)
4049 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
4050}
4051
38b2d076
DD
4052/* Returns TRUE if the current function is a leaf, and thus we can
4053 determine which registers an interrupt function really needs to
4054 save. The logic below is mostly about finding the insn sequence
4055 that's the function, versus any sequence that might be open for the
4056 current insn. */
4057static int
4058m32c_leaf_function_p (void)
4059{
4060 rtx saved_first, saved_last;
4061 struct sequence_stack *seq;
4062 int rv;
4063
3e029763
JH
4064 saved_first = crtl->emit.x_first_insn;
4065 saved_last = crtl->emit.x_last_insn;
4066 for (seq = crtl->emit.sequence_stack; seq && seq->next; seq = seq->next)
38b2d076
DD
4067 ;
4068 if (seq)
4069 {
3e029763
JH
4070 crtl->emit.x_first_insn = seq->first;
4071 crtl->emit.x_last_insn = seq->last;
38b2d076
DD
4072 }
4073
4074 rv = leaf_function_p ();
4075
3e029763
JH
4076 crtl->emit.x_first_insn = saved_first;
4077 crtl->emit.x_last_insn = saved_last;
38b2d076
DD
4078 return rv;
4079}
4080
4081/* Returns TRUE if the current function needs to use the ENTER/EXIT
4082 opcodes. If the function doesn't need the frame base or stack
4083 pointer, it can use the simpler RTS opcode. */
4084static bool
4085m32c_function_needs_enter (void)
4086{
4087 rtx insn;
4088 struct sequence_stack *seq;
4089 rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
4090 rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
4091
4092 insn = get_insns ();
3e029763 4093 for (seq = crtl->emit.sequence_stack;
38b2d076
DD
4094 seq;
4095 insn = seq->first, seq = seq->next);
4096
4097 while (insn)
4098 {
4099 if (reg_mentioned_p (sp, insn))
4100 return true;
4101 if (reg_mentioned_p (fb, insn))
4102 return true;
4103 insn = NEXT_INSN (insn);
4104 }
4105 return false;
4106}
4107
4108/* Mark all the subexpressions of the PARALLEL rtx PAR as
4109 frame-related. Return PAR.
4110
4111 dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
4112 PARALLEL rtx other than the first if they do not have the
4113 FRAME_RELATED flag set on them. So this function is handy for
4114 marking up 'enter' instructions. */
4115static rtx
4116m32c_all_frame_related (rtx par)
4117{
4118 int len = XVECLEN (par, 0);
4119 int i;
4120
4121 for (i = 0; i < len; i++)
4122 F (XVECEXP (par, 0, i));
4123
4124 return par;
4125}
4126
4127/* Emits the prologue. See the frame layout comment earlier in this
4128 file. We can reserve up to 256 bytes with the ENTER opcode, beyond
4129 that we manually update sp. */
4130void
4131m32c_emit_prologue (void)
4132{
4133 int frame_size, extra_frame_size = 0, reg_save_size;
4134 int complex_prologue = 0;
4135
4136 cfun->machine->is_leaf = m32c_leaf_function_p ();
4137 if (interrupt_p (cfun->decl))
4138 {
4139 cfun->machine->is_interrupt = 1;
4140 complex_prologue = 1;
4141 }
65655f79
DD
4142 else if (bank_switch_p (cfun->decl))
4143 warning (OPT_Wattributes,
4144 "%<bank_switch%> has no effect on non-interrupt functions");
38b2d076
DD
4145
4146 reg_save_size = m32c_pushm_popm (PP_justcount);
4147
4148 if (interrupt_p (cfun->decl))
65655f79
DD
4149 {
4150 if (bank_switch_p (cfun->decl))
4151 emit_insn (gen_fset_b ());
4152 else if (cfun->machine->intr_pushm)
4153 emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
4154 }
38b2d076
DD
4155
4156 frame_size =
4157 m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
4158 if (frame_size == 0
38b2d076
DD
4159 && !m32c_function_needs_enter ())
4160 cfun->machine->use_rts = 1;
4161
4162 if (frame_size > 254)
4163 {
4164 extra_frame_size = frame_size - 254;
4165 frame_size = 254;
4166 }
4167 if (cfun->machine->use_rts == 0)
4168 F (emit_insn (m32c_all_frame_related
4169 (TARGET_A16
fa9fd28a
RIL
4170 ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
4171 : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
38b2d076
DD
4172
4173 if (extra_frame_size)
4174 {
4175 complex_prologue = 1;
4176 if (TARGET_A16)
4177 F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
4178 gen_rtx_REG (HImode, SP_REGNO),
4179 GEN_INT (-extra_frame_size))));
4180 else
4181 F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
4182 gen_rtx_REG (PSImode, SP_REGNO),
4183 GEN_INT (-extra_frame_size))));
4184 }
4185
4186 complex_prologue += m32c_pushm_popm (PP_pushm);
4187
4188 /* This just emits a comment into the .s file for debugging. */
4189 if (complex_prologue)
4190 emit_insn (gen_prologue_end ());
4191}
4192
4193/* Likewise, for the epilogue. The only exception is that, for
4194 interrupts, we must manually unwind the frame as the REIT opcode
4195 doesn't do that. */
4196void
4197m32c_emit_epilogue (void)
4198{
4199 /* This just emits a comment into the .s file for debugging. */
4200 if (m32c_pushm_popm (PP_justcount) > 0 || cfun->machine->is_interrupt)
4201 emit_insn (gen_epilogue_start ());
4202
4203 m32c_pushm_popm (PP_popm);
4204
4205 if (cfun->machine->is_interrupt)
4206 {
4207 enum machine_mode spmode = TARGET_A16 ? HImode : PSImode;
4208
65655f79
DD
4209 /* REIT clears B flag and restores $fp for us, but we still
4210 have to fix up the stack. USE_RTS just means we didn't
4211 emit ENTER. */
4212 if (!cfun->machine->use_rts)
4213 {
4214 emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
4215 gen_rtx_REG (spmode, FP_REGNO));
4216 emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
4217 gen_rtx_REG (spmode, A0_REGNO));
4218 /* We can't just add this to the POPM because it would be in
4219 the wrong order, and wouldn't fix the stack if we're bank
4220 switching. */
4221 if (TARGET_A16)
4222 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
4223 else
4224 emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
4225 }
4226 if (!bank_switch_p (cfun->decl) && cfun->machine->intr_pushm)
4227 emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
4228
402f2db8
DD
4229 /* The FREIT (Fast REturn from InTerrupt) instruction should be
4230 generated only for M32C/M32CM targets (generate the REIT
4231 instruction otherwise). */
65655f79 4232 if (fast_interrupt_p (cfun->decl))
402f2db8
DD
4233 {
4234 /* Check if fast_attribute is set for M32C or M32CM. */
4235 if (TARGET_A24)
4236 {
4237 emit_jump_insn (gen_epilogue_freit ());
4238 }
4239 /* If fast_interrupt attribute is set for an R8C or M16C
4240 target ignore this attribute and generated REIT
4241 instruction. */
4242 else
4243 {
4244 warning (OPT_Wattributes,
4245 "%<fast_interrupt%> attribute directive ignored");
4246 emit_jump_insn (gen_epilogue_reit_16 ());
4247 }
4248 }
65655f79 4249 else if (TARGET_A16)
0e0642aa
RIL
4250 emit_jump_insn (gen_epilogue_reit_16 ());
4251 else
4252 emit_jump_insn (gen_epilogue_reit_24 ());
38b2d076
DD
4253 }
4254 else if (cfun->machine->use_rts)
4255 emit_jump_insn (gen_epilogue_rts ());
0e0642aa
RIL
4256 else if (TARGET_A16)
4257 emit_jump_insn (gen_epilogue_exitd_16 ());
38b2d076 4258 else
0e0642aa 4259 emit_jump_insn (gen_epilogue_exitd_24 ());
38b2d076
DD
4260 emit_barrier ();
4261}
4262
4263void
4264m32c_emit_eh_epilogue (rtx ret_addr)
4265{
4266 /* R0[R2] has the stack adjustment. R1[R3] has the address to
4267 return to. We have to fudge the stack, pop everything, pop SP
4268 (fudged), and return (fudged). This is actually easier to do in
4269 assembler, so punt to libgcc. */
4270 emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
c41c1387 4271 /* emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
38b2d076
DD
4272 emit_barrier ();
4273}
4274
16659fcf
DD
4275/* Indicate which flags must be properly set for a given conditional. */
4276static int
4277flags_needed_for_conditional (rtx cond)
4278{
4279 switch (GET_CODE (cond))
4280 {
4281 case LE:
4282 case GT:
4283 return FLAGS_OSZ;
4284 case LEU:
4285 case GTU:
4286 return FLAGS_ZC;
4287 case LT:
4288 case GE:
4289 return FLAGS_OS;
4290 case LTU:
4291 case GEU:
4292 return FLAGS_C;
4293 case EQ:
4294 case NE:
4295 return FLAGS_Z;
4296 default:
4297 return FLAGS_N;
4298 }
4299}
4300
4301#define DEBUG_CMP 0
4302
4303/* Returns true if a compare insn is redundant because it would only
4304 set flags that are already set correctly. */
4305static bool
4306m32c_compare_redundant (rtx cmp, rtx *operands)
4307{
4308 int flags_needed;
4309 int pflags;
4310 rtx prev, pp, next;
4311 rtx op0, op1, op2;
4312#if DEBUG_CMP
4313 int prev_icode, i;
4314#endif
4315
4316 op0 = operands[0];
4317 op1 = operands[1];
4318 op2 = operands[2];
4319
4320#if DEBUG_CMP
4321 fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
4322 debug_rtx(cmp);
4323 for (i=0; i<2; i++)
4324 {
4325 fprintf(stderr, "operands[%d] = ", i);
4326 debug_rtx(operands[i]);
4327 }
4328#endif
4329
4330 next = next_nonnote_insn (cmp);
4331 if (!next || !INSN_P (next))
4332 {
4333#if DEBUG_CMP
4334 fprintf(stderr, "compare not followed by insn\n");
4335 debug_rtx(next);
4336#endif
4337 return false;
4338 }
4339 if (GET_CODE (PATTERN (next)) == SET
4340 && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
4341 {
4342 next = XEXP (XEXP (PATTERN (next), 1), 0);
4343 }
4344 else if (GET_CODE (PATTERN (next)) == SET)
4345 {
4346 /* If this is a conditional, flags_needed will be something
4347 other than FLAGS_N, which we test below. */
4348 next = XEXP (PATTERN (next), 1);
4349 }
4350 else
4351 {
4352#if DEBUG_CMP
4353 fprintf(stderr, "compare not followed by conditional\n");
4354 debug_rtx(next);
4355#endif
4356 return false;
4357 }
4358#if DEBUG_CMP
4359 fprintf(stderr, "conditional is: ");
4360 debug_rtx(next);
4361#endif
4362
4363 flags_needed = flags_needed_for_conditional (next);
4364 if (flags_needed == FLAGS_N)
4365 {
4366#if DEBUG_CMP
4367 fprintf(stderr, "compare not followed by conditional\n");
4368 debug_rtx(next);
4369#endif
4370 return false;
4371 }
4372
4373 /* Compare doesn't set overflow and carry the same way that
4374 arithmetic instructions do, so we can't replace those. */
4375 if (flags_needed & FLAGS_OC)
4376 return false;
4377
4378 prev = cmp;
4379 do {
4380 prev = prev_nonnote_insn (prev);
4381 if (!prev)
4382 {
4383#if DEBUG_CMP
4384 fprintf(stderr, "No previous insn.\n");
4385#endif
4386 return false;
4387 }
4388 if (!INSN_P (prev))
4389 {
4390#if DEBUG_CMP
4391 fprintf(stderr, "Previous insn is a non-insn.\n");
4392#endif
4393 return false;
4394 }
4395 pp = PATTERN (prev);
4396 if (GET_CODE (pp) != SET)
4397 {
4398#if DEBUG_CMP
4399 fprintf(stderr, "Previous insn is not a SET.\n");
4400#endif
4401 return false;
4402 }
4403 pflags = get_attr_flags (prev);
4404
4405 /* Looking up attributes of previous insns corrupted the recog
4406 tables. */
4407 INSN_UID (cmp) = -1;
4408 recog (PATTERN (cmp), cmp, 0);
4409
4410 if (pflags == FLAGS_N
4411 && reg_mentioned_p (op0, pp))
4412 {
4413#if DEBUG_CMP
4414 fprintf(stderr, "intermediate non-flags insn uses op:\n");
4415 debug_rtx(prev);
4416#endif
4417 return false;
4418 }
b3c5a409
DD
4419
4420 /* Check for comparisons against memory - between volatiles and
4421 aliases, we just can't risk this one. */
4422 if (GET_CODE (operands[0]) == MEM
4423 || GET_CODE (operands[0]) == MEM)
4424 {
4425#if DEBUG_CMP
4426 fprintf(stderr, "comparisons with memory:\n");
4427 debug_rtx(prev);
4428#endif
4429 return false;
4430 }
4431
4432 /* Check for PREV changing a register that's used to compute a
4433 value in CMP, even if it doesn't otherwise change flags. */
4434 if (GET_CODE (operands[0]) == REG
4435 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[0]))
4436 {
4437#if DEBUG_CMP
4438 fprintf(stderr, "sub-value affected, op0:\n");
4439 debug_rtx(prev);
4440#endif
4441 return false;
4442 }
4443 if (GET_CODE (operands[1]) == REG
4444 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[1]))
4445 {
4446#if DEBUG_CMP
4447 fprintf(stderr, "sub-value affected, op1:\n");
4448 debug_rtx(prev);
4449#endif
4450 return false;
4451 }
4452
16659fcf
DD
4453 } while (pflags == FLAGS_N);
4454#if DEBUG_CMP
4455 fprintf(stderr, "previous flag-setting insn:\n");
4456 debug_rtx(prev);
4457 debug_rtx(pp);
4458#endif
4459
4460 if (GET_CODE (pp) == SET
4461 && GET_CODE (XEXP (pp, 0)) == REG
4462 && REGNO (XEXP (pp, 0)) == FLG_REGNO
4463 && GET_CODE (XEXP (pp, 1)) == COMPARE)
4464 {
4465 /* Adjacent cbranches must have the same operands to be
4466 redundant. */
4467 rtx pop0 = XEXP (XEXP (pp, 1), 0);
4468 rtx pop1 = XEXP (XEXP (pp, 1), 1);
4469#if DEBUG_CMP
4470 fprintf(stderr, "adjacent cbranches\n");
4471 debug_rtx(pop0);
4472 debug_rtx(pop1);
4473#endif
4474 if (rtx_equal_p (op0, pop0)
4475 && rtx_equal_p (op1, pop1))
4476 return true;
4477#if DEBUG_CMP
4478 fprintf(stderr, "prev cmp not same\n");
4479#endif
4480 return false;
4481 }
4482
4483 /* Else the previous insn must be a SET, with either the source or
4484 dest equal to operands[0], and operands[1] must be zero. */
4485
4486 if (!rtx_equal_p (op1, const0_rtx))
4487 {
4488#if DEBUG_CMP
4489 fprintf(stderr, "operands[1] not const0_rtx\n");
4490#endif
4491 return false;
4492 }
4493 if (GET_CODE (pp) != SET)
4494 {
4495#if DEBUG_CMP
4496 fprintf (stderr, "pp not set\n");
4497#endif
4498 return false;
4499 }
4500 if (!rtx_equal_p (op0, SET_SRC (pp))
4501 && !rtx_equal_p (op0, SET_DEST (pp)))
4502 {
4503#if DEBUG_CMP
4504 fprintf(stderr, "operands[0] not found in set\n");
4505#endif
4506 return false;
4507 }
4508
4509#if DEBUG_CMP
4510 fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4511#endif
4512 if ((pflags & flags_needed) == flags_needed)
4513 return true;
4514
4515 return false;
4516}
4517
4518/* Return the pattern for a compare. This will be commented out if
4519 the compare is redundant, else a normal pattern is returned. Thus,
4520 the assembler output says where the compare would have been. */
4521char *
4522m32c_output_compare (rtx insn, rtx *operands)
4523{
0a2aaacc 4524 static char templ[] = ";cmp.b\t%1,%0";
16659fcf
DD
4525 /* ^ 5 */
4526
0a2aaacc 4527 templ[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
16659fcf
DD
4528 if (m32c_compare_redundant (insn, operands))
4529 {
4530#if DEBUG_CMP
4531 fprintf(stderr, "cbranch: cmp not needed\n");
4532#endif
0a2aaacc 4533 return templ;
16659fcf
DD
4534 }
4535
4536#if DEBUG_CMP
b3c5a409 4537 fprintf(stderr, "cbranch: cmp needed: `%s'\n", templ + 1);
16659fcf 4538#endif
0a2aaacc 4539 return templ + 1;
16659fcf
DD
4540}
4541
5abd2125
JS
4542#undef TARGET_ENCODE_SECTION_INFO
4543#define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4544
b52b1749
AS
4545/* If the frame pointer isn't used, we detect it manually. But the
4546 stack pointer doesn't have as flexible addressing as the frame
4547 pointer, so we always assume we have it. */
4548
4549#undef TARGET_FRAME_POINTER_REQUIRED
4550#define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
4551
38b2d076
DD
4552/* The Global `targetm' Variable. */
4553
4554struct gcc_target targetm = TARGET_INITIALIZER;
4555
4556#include "gt-m32c.h"