]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/m32c/m32c.c
target.def (legitimate_constant_p): New hook.
[thirdparty/gcc.git] / gcc / config / m32c / m32c.c
1 /* Target Code for R8C/M16C/M32C
2 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
4 Contributed by Red Hat.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published
10 by the Free Software Foundation; either version 3, or (at your
11 option) any later version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
16 License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "regs.h"
28 #include "hard-reg-set.h"
29 #include "insn-config.h"
30 #include "conditions.h"
31 #include "insn-flags.h"
32 #include "output.h"
33 #include "insn-attr.h"
34 #include "flags.h"
35 #include "recog.h"
36 #include "reload.h"
37 #include "diagnostic-core.h"
38 #include "obstack.h"
39 #include "tree.h"
40 #include "expr.h"
41 #include "optabs.h"
42 #include "except.h"
43 #include "function.h"
44 #include "ggc.h"
45 #include "target.h"
46 #include "target-def.h"
47 #include "tm_p.h"
48 #include "langhooks.h"
49 #include "gimple.h"
50 #include "df.h"
51
52 /* Prototypes */
53
54 /* Used by m32c_pushm_popm. */
55 typedef enum
56 {
57 PP_pushm,
58 PP_popm,
59 PP_justcount
60 } Push_Pop_Type;
61
62 static bool m32c_function_needs_enter (void);
63 static tree interrupt_handler (tree *, tree, tree, int, bool *);
64 static tree function_vector_handler (tree *, tree, tree, int, bool *);
65 static int interrupt_p (tree node);
66 static int bank_switch_p (tree node);
67 static int fast_interrupt_p (tree node);
68 static int interrupt_p (tree node);
69 static bool m32c_asm_integer (rtx, unsigned int, int);
70 static int m32c_comp_type_attributes (const_tree, const_tree);
71 static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
72 static struct machine_function *m32c_init_machine_status (void);
73 static void m32c_insert_attributes (tree, tree *);
74 static bool m32c_legitimate_address_p (enum machine_mode, rtx, bool);
75 static bool m32c_addr_space_legitimate_address_p (enum machine_mode, rtx, bool, addr_space_t);
76 static rtx m32c_function_arg (CUMULATIVE_ARGS *, enum machine_mode,
77 const_tree, bool);
78 static bool m32c_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
79 const_tree, bool);
80 static void m32c_function_arg_advance (CUMULATIVE_ARGS *, enum machine_mode,
81 const_tree, bool);
82 static unsigned int m32c_function_arg_boundary (enum machine_mode, const_tree);
83 static int m32c_pushm_popm (Push_Pop_Type);
84 static bool m32c_strict_argument_naming (CUMULATIVE_ARGS *);
85 static rtx m32c_struct_value_rtx (tree, int);
86 static rtx m32c_subreg (enum machine_mode, rtx, enum machine_mode, int);
87 static int need_to_save (int);
88 static rtx m32c_function_value (const_tree, const_tree, bool);
89 static rtx m32c_libcall_value (enum machine_mode, const_rtx);
90
91 /* Returns true if an address is specified, else false. */
92 static bool m32c_get_pragma_address (const char *varname, unsigned *addr);
93
94 #define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
95
96 #define streq(a,b) (strcmp ((a), (b)) == 0)
97
98 /* Internal support routines */
99
100 /* Debugging statements are tagged with DEBUG0 only so that they can
101 be easily enabled individually, by replacing the '0' with '1' as
102 needed. */
103 #define DEBUG0 0
104 #define DEBUG1 1
105
106 #if DEBUG0
107 /* This is needed by some of the commented-out debug statements
108 below. */
109 static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
110 #endif
111 static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
112
113 /* These are all to support encode_pattern(). */
114 static char pattern[30], *patternp;
115 static GTY(()) rtx patternr[30];
116 #define RTX_IS(x) (streq (pattern, x))
117
118 /* Some macros to simplify the logic throughout this file. */
119 #define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
120 #define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
121
122 #define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
123 #define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
124
125 static int
126 far_addr_space_p (rtx x)
127 {
128 if (GET_CODE (x) != MEM)
129 return 0;
130 #if DEBUG0
131 fprintf(stderr, "\033[35mfar_addr_space: "); debug_rtx(x);
132 fprintf(stderr, " = %d\033[0m\n", MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR);
133 #endif
134 return MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR;
135 }
136
137 /* We do most RTX matching by converting the RTX into a string, and
138 using string compares. This vastly simplifies the logic in many of
139 the functions in this file.
140
141 On exit, pattern[] has the encoded string (use RTX_IS("...") to
142 compare it) and patternr[] has pointers to the nodes in the RTX
143 corresponding to each character in the encoded string. The latter
144 is mostly used by print_operand().
145
146 Unrecognized patterns have '?' in them; this shows up when the
147 assembler complains about syntax errors.
148 */
149
150 static void
151 encode_pattern_1 (rtx x)
152 {
153 int i;
154
155 if (patternp == pattern + sizeof (pattern) - 2)
156 {
157 patternp[-1] = '?';
158 return;
159 }
160
161 patternr[patternp - pattern] = x;
162
163 switch (GET_CODE (x))
164 {
165 case REG:
166 *patternp++ = 'r';
167 break;
168 case SUBREG:
169 if (GET_MODE_SIZE (GET_MODE (x)) !=
170 GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
171 *patternp++ = 'S';
172 encode_pattern_1 (XEXP (x, 0));
173 break;
174 case MEM:
175 *patternp++ = 'm';
176 case CONST:
177 encode_pattern_1 (XEXP (x, 0));
178 break;
179 case SIGN_EXTEND:
180 *patternp++ = '^';
181 *patternp++ = 'S';
182 encode_pattern_1 (XEXP (x, 0));
183 break;
184 case ZERO_EXTEND:
185 *patternp++ = '^';
186 *patternp++ = 'Z';
187 encode_pattern_1 (XEXP (x, 0));
188 break;
189 case PLUS:
190 *patternp++ = '+';
191 encode_pattern_1 (XEXP (x, 0));
192 encode_pattern_1 (XEXP (x, 1));
193 break;
194 case PRE_DEC:
195 *patternp++ = '>';
196 encode_pattern_1 (XEXP (x, 0));
197 break;
198 case POST_INC:
199 *patternp++ = '<';
200 encode_pattern_1 (XEXP (x, 0));
201 break;
202 case LO_SUM:
203 *patternp++ = 'L';
204 encode_pattern_1 (XEXP (x, 0));
205 encode_pattern_1 (XEXP (x, 1));
206 break;
207 case HIGH:
208 *patternp++ = 'H';
209 encode_pattern_1 (XEXP (x, 0));
210 break;
211 case SYMBOL_REF:
212 *patternp++ = 's';
213 break;
214 case LABEL_REF:
215 *patternp++ = 'l';
216 break;
217 case CODE_LABEL:
218 *patternp++ = 'c';
219 break;
220 case CONST_INT:
221 case CONST_DOUBLE:
222 *patternp++ = 'i';
223 break;
224 case UNSPEC:
225 *patternp++ = 'u';
226 *patternp++ = '0' + XCINT (x, 1, UNSPEC);
227 for (i = 0; i < XVECLEN (x, 0); i++)
228 encode_pattern_1 (XVECEXP (x, 0, i));
229 break;
230 case USE:
231 *patternp++ = 'U';
232 break;
233 case PARALLEL:
234 *patternp++ = '|';
235 for (i = 0; i < XVECLEN (x, 0); i++)
236 encode_pattern_1 (XVECEXP (x, 0, i));
237 break;
238 case EXPR_LIST:
239 *patternp++ = 'E';
240 encode_pattern_1 (XEXP (x, 0));
241 if (XEXP (x, 1))
242 encode_pattern_1 (XEXP (x, 1));
243 break;
244 default:
245 *patternp++ = '?';
246 #if DEBUG0
247 fprintf (stderr, "can't encode pattern %s\n",
248 GET_RTX_NAME (GET_CODE (x)));
249 debug_rtx (x);
250 gcc_unreachable ();
251 #endif
252 break;
253 }
254 }
255
256 static void
257 encode_pattern (rtx x)
258 {
259 patternp = pattern;
260 encode_pattern_1 (x);
261 *patternp = 0;
262 }
263
264 /* Since register names indicate the mode they're used in, we need a
265 way to determine which name to refer to the register with. Called
266 by print_operand(). */
267
268 static const char *
269 reg_name_with_mode (int regno, enum machine_mode mode)
270 {
271 int mlen = GET_MODE_SIZE (mode);
272 if (regno == R0_REGNO && mlen == 1)
273 return "r0l";
274 if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
275 return "r2r0";
276 if (regno == R0_REGNO && mlen == 6)
277 return "r2r1r0";
278 if (regno == R0_REGNO && mlen == 8)
279 return "r3r1r2r0";
280 if (regno == R1_REGNO && mlen == 1)
281 return "r1l";
282 if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
283 return "r3r1";
284 if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
285 return "a1a0";
286 return reg_names[regno];
287 }
288
289 /* How many bytes a register uses on stack when it's pushed. We need
290 to know this because the push opcode needs to explicitly indicate
291 the size of the register, even though the name of the register
292 already tells it that. Used by m32c_output_reg_{push,pop}, which
293 is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}. */
294
295 static int
296 reg_push_size (int regno)
297 {
298 switch (regno)
299 {
300 case R0_REGNO:
301 case R1_REGNO:
302 return 2;
303 case R2_REGNO:
304 case R3_REGNO:
305 case FLG_REGNO:
306 return 2;
307 case A0_REGNO:
308 case A1_REGNO:
309 case SB_REGNO:
310 case FB_REGNO:
311 case SP_REGNO:
312 if (TARGET_A16)
313 return 2;
314 else
315 return 3;
316 default:
317 gcc_unreachable ();
318 }
319 }
320
321 static int *class_sizes = 0;
322
323 /* Given two register classes, find the largest intersection between
324 them. If there is no intersection, return RETURNED_IF_EMPTY
325 instead. */
326 static int
327 reduce_class (int original_class, int limiting_class, int returned_if_empty)
328 {
329 int cc = class_contents[original_class][0];
330 int i, best = NO_REGS;
331 int best_size = 0;
332
333 if (original_class == limiting_class)
334 return original_class;
335
336 if (!class_sizes)
337 {
338 int r;
339 class_sizes = (int *) xmalloc (LIM_REG_CLASSES * sizeof (int));
340 for (i = 0; i < LIM_REG_CLASSES; i++)
341 {
342 class_sizes[i] = 0;
343 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
344 if (class_contents[i][0] & (1 << r))
345 class_sizes[i]++;
346 }
347 }
348
349 cc &= class_contents[limiting_class][0];
350 for (i = 0; i < LIM_REG_CLASSES; i++)
351 {
352 int ic = class_contents[i][0];
353
354 if ((~cc & ic) == 0)
355 if (best_size < class_sizes[i])
356 {
357 best = i;
358 best_size = class_sizes[i];
359 }
360
361 }
362 if (best == NO_REGS)
363 return returned_if_empty;
364 return best;
365 }
366
367 /* Used by m32c_register_move_cost to determine if a move is
368 impossibly expensive. */
369 static bool
370 class_can_hold_mode (reg_class_t rclass, enum machine_mode mode)
371 {
372 /* Cache the results: 0=untested 1=no 2=yes */
373 static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
374
375 if (results[(int) rclass][mode] == 0)
376 {
377 int r;
378 results[rclass][mode] = 1;
379 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
380 if (in_hard_reg_set_p (reg_class_contents[(int) rclass], mode, r)
381 && HARD_REGNO_MODE_OK (r, mode))
382 {
383 results[rclass][mode] = 2;
384 break;
385 }
386 }
387
388 #if DEBUG0
389 fprintf (stderr, "class %s can hold %s? %s\n",
390 class_names[(int) rclass], mode_name[mode],
391 (results[rclass][mode] == 2) ? "yes" : "no");
392 #endif
393 return results[(int) rclass][mode] == 2;
394 }
395
396 /* Run-time Target Specification. */
397
398 /* Memregs are memory locations that gcc treats like general
399 registers, as there are a limited number of true registers and the
400 m32c families can use memory in most places that registers can be
401 used.
402
403 However, since memory accesses are more expensive than registers,
404 we allow the user to limit the number of memregs available, in
405 order to try to persuade gcc to try harder to use real registers.
406
407 Memregs are provided by m32c-lib1.S.
408 */
409
410 int ok_to_change_target_memregs = TRUE;
411
412 /* Implements TARGET_OPTION_OVERRIDE. */
413
414 #undef TARGET_OPTION_OVERRIDE
415 #define TARGET_OPTION_OVERRIDE m32c_option_override
416
417 static void
418 m32c_option_override (void)
419 {
420 /* We limit memregs to 0..16, and provide a default. */
421 if (global_options_set.x_target_memregs)
422 {
423 if (target_memregs < 0 || target_memregs > 16)
424 error ("invalid target memregs value '%d'", target_memregs);
425 }
426 else
427 target_memregs = 16;
428
429 if (TARGET_A24)
430 flag_ivopts = 0;
431
432 /* This target defaults to strict volatile bitfields. */
433 if (flag_strict_volatile_bitfields < 0)
434 flag_strict_volatile_bitfields = 1;
435
436 /* r8c/m16c have no 16-bit indirect call, so thunks are involved.
437 This is always worse than an absolute call. */
438 if (TARGET_A16)
439 flag_no_function_cse = 1;
440
441 /* This wants to put insns between compares and their jumps. */
442 /* FIXME: The right solution is to properly trace the flags register
443 values, but that is too much work for stage 4. */
444 flag_combine_stack_adjustments = 0;
445 }
446
447 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
448 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m32c_override_options_after_change
449
450 static void
451 m32c_override_options_after_change (void)
452 {
453 if (TARGET_A16)
454 flag_no_function_cse = 1;
455 }
456
457 /* Defining data structures for per-function information */
458
459 /* The usual; we set up our machine_function data. */
460 static struct machine_function *
461 m32c_init_machine_status (void)
462 {
463 return ggc_alloc_cleared_machine_function ();
464 }
465
466 /* Implements INIT_EXPANDERS. We just set up to call the above
467 function. */
468 void
469 m32c_init_expanders (void)
470 {
471 init_machine_status = m32c_init_machine_status;
472 }
473
474 /* Storage Layout */
475
476 /* Register Basics */
477
478 /* Basic Characteristics of Registers */
479
480 /* Whether a mode fits in a register is complex enough to warrant a
481 table. */
482 static struct
483 {
484 char qi_regs;
485 char hi_regs;
486 char pi_regs;
487 char si_regs;
488 char di_regs;
489 } nregs_table[FIRST_PSEUDO_REGISTER] =
490 {
491 { 1, 1, 2, 2, 4 }, /* r0 */
492 { 0, 1, 0, 0, 0 }, /* r2 */
493 { 1, 1, 2, 2, 0 }, /* r1 */
494 { 0, 1, 0, 0, 0 }, /* r3 */
495 { 0, 1, 1, 0, 0 }, /* a0 */
496 { 0, 1, 1, 0, 0 }, /* a1 */
497 { 0, 1, 1, 0, 0 }, /* sb */
498 { 0, 1, 1, 0, 0 }, /* fb */
499 { 0, 1, 1, 0, 0 }, /* sp */
500 { 1, 1, 1, 0, 0 }, /* pc */
501 { 0, 0, 0, 0, 0 }, /* fl */
502 { 1, 1, 1, 0, 0 }, /* ap */
503 { 1, 1, 2, 2, 4 }, /* mem0 */
504 { 1, 1, 2, 2, 4 }, /* mem1 */
505 { 1, 1, 2, 2, 4 }, /* mem2 */
506 { 1, 1, 2, 2, 4 }, /* mem3 */
507 { 1, 1, 2, 2, 4 }, /* mem4 */
508 { 1, 1, 2, 2, 0 }, /* mem5 */
509 { 1, 1, 2, 2, 0 }, /* mem6 */
510 { 1, 1, 0, 0, 0 }, /* mem7 */
511 };
512
513 /* Implements TARGET_CONDITIONAL_REGISTER_USAGE. We adjust the number
514 of available memregs, and select which registers need to be preserved
515 across calls based on the chip family. */
516
517 #undef TARGET_CONDITIONAL_REGISTER_USAGE
518 #define TARGET_CONDITIONAL_REGISTER_USAGE m32c_conditional_register_usage
519 void
520 m32c_conditional_register_usage (void)
521 {
522 int i;
523
524 if (0 <= target_memregs && target_memregs <= 16)
525 {
526 /* The command line option is bytes, but our "registers" are
527 16-bit words. */
528 for (i = (target_memregs+1)/2; i < 8; i++)
529 {
530 fixed_regs[MEM0_REGNO + i] = 1;
531 CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
532 }
533 }
534
535 /* M32CM and M32C preserve more registers across function calls. */
536 if (TARGET_A24)
537 {
538 call_used_regs[R1_REGNO] = 0;
539 call_used_regs[R2_REGNO] = 0;
540 call_used_regs[R3_REGNO] = 0;
541 call_used_regs[A0_REGNO] = 0;
542 call_used_regs[A1_REGNO] = 0;
543 }
544 }
545
546 /* How Values Fit in Registers */
547
548 /* Implements HARD_REGNO_NREGS. This is complicated by the fact that
549 different registers are different sizes from each other, *and* may
550 be different sizes in different chip families. */
551 static int
552 m32c_hard_regno_nregs_1 (int regno, enum machine_mode mode)
553 {
554 if (regno == FLG_REGNO && mode == CCmode)
555 return 1;
556 if (regno >= FIRST_PSEUDO_REGISTER)
557 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
558
559 if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
560 return (GET_MODE_SIZE (mode) + 1) / 2;
561
562 if (GET_MODE_SIZE (mode) <= 1)
563 return nregs_table[regno].qi_regs;
564 if (GET_MODE_SIZE (mode) <= 2)
565 return nregs_table[regno].hi_regs;
566 if (regno == A0_REGNO && mode == SImode && TARGET_A16)
567 return 2;
568 if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
569 return nregs_table[regno].pi_regs;
570 if (GET_MODE_SIZE (mode) <= 4)
571 return nregs_table[regno].si_regs;
572 if (GET_MODE_SIZE (mode) <= 8)
573 return nregs_table[regno].di_regs;
574 return 0;
575 }
576
577 int
578 m32c_hard_regno_nregs (int regno, enum machine_mode mode)
579 {
580 int rv = m32c_hard_regno_nregs_1 (regno, mode);
581 return rv ? rv : 1;
582 }
583
584 /* Implements HARD_REGNO_MODE_OK. The above function does the work
585 already; just test its return value. */
586 int
587 m32c_hard_regno_ok (int regno, enum machine_mode mode)
588 {
589 return m32c_hard_regno_nregs_1 (regno, mode) != 0;
590 }
591
592 /* Implements MODES_TIEABLE_P. In general, modes aren't tieable since
593 registers are all different sizes. However, since most modes are
594 bigger than our registers anyway, it's easier to implement this
595 function that way, leaving QImode as the only unique case. */
596 int
597 m32c_modes_tieable_p (enum machine_mode m1, enum machine_mode m2)
598 {
599 if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
600 return 1;
601
602 #if 0
603 if (m1 == QImode || m2 == QImode)
604 return 0;
605 #endif
606
607 return 1;
608 }
609
610 /* Register Classes */
611
612 /* Implements REGNO_REG_CLASS. */
613 enum reg_class
614 m32c_regno_reg_class (int regno)
615 {
616 switch (regno)
617 {
618 case R0_REGNO:
619 return R0_REGS;
620 case R1_REGNO:
621 return R1_REGS;
622 case R2_REGNO:
623 return R2_REGS;
624 case R3_REGNO:
625 return R3_REGS;
626 case A0_REGNO:
627 return A0_REGS;
628 case A1_REGNO:
629 return A1_REGS;
630 case SB_REGNO:
631 return SB_REGS;
632 case FB_REGNO:
633 return FB_REGS;
634 case SP_REGNO:
635 return SP_REGS;
636 case FLG_REGNO:
637 return FLG_REGS;
638 default:
639 if (IS_MEM_REGNO (regno))
640 return MEM_REGS;
641 return ALL_REGS;
642 }
643 }
644
645 /* Implements REG_CLASS_FROM_CONSTRAINT. Note that some constraints only match
646 for certain chip families. */
647 int
648 m32c_reg_class_from_constraint (char c ATTRIBUTE_UNUSED, const char *s)
649 {
650 if (memcmp (s, "Rsp", 3) == 0)
651 return SP_REGS;
652 if (memcmp (s, "Rfb", 3) == 0)
653 return FB_REGS;
654 if (memcmp (s, "Rsb", 3) == 0)
655 return SB_REGS;
656 if (memcmp (s, "Rcr", 3) == 0)
657 return TARGET_A16 ? CR_REGS : NO_REGS;
658 if (memcmp (s, "Rcl", 3) == 0)
659 return TARGET_A24 ? CR_REGS : NO_REGS;
660 if (memcmp (s, "R0w", 3) == 0)
661 return R0_REGS;
662 if (memcmp (s, "R1w", 3) == 0)
663 return R1_REGS;
664 if (memcmp (s, "R2w", 3) == 0)
665 return R2_REGS;
666 if (memcmp (s, "R3w", 3) == 0)
667 return R3_REGS;
668 if (memcmp (s, "R02", 3) == 0)
669 return R02_REGS;
670 if (memcmp (s, "R13", 3) == 0)
671 return R13_REGS;
672 if (memcmp (s, "R03", 3) == 0)
673 return R03_REGS;
674 if (memcmp (s, "Rdi", 3) == 0)
675 return DI_REGS;
676 if (memcmp (s, "Rhl", 3) == 0)
677 return HL_REGS;
678 if (memcmp (s, "R23", 3) == 0)
679 return R23_REGS;
680 if (memcmp (s, "Ra0", 3) == 0)
681 return A0_REGS;
682 if (memcmp (s, "Ra1", 3) == 0)
683 return A1_REGS;
684 if (memcmp (s, "Raa", 3) == 0)
685 return A_REGS;
686 if (memcmp (s, "Raw", 3) == 0)
687 return TARGET_A16 ? A_REGS : NO_REGS;
688 if (memcmp (s, "Ral", 3) == 0)
689 return TARGET_A24 ? A_REGS : NO_REGS;
690 if (memcmp (s, "Rqi", 3) == 0)
691 return QI_REGS;
692 if (memcmp (s, "Rad", 3) == 0)
693 return AD_REGS;
694 if (memcmp (s, "Rsi", 3) == 0)
695 return SI_REGS;
696 if (memcmp (s, "Rhi", 3) == 0)
697 return HI_REGS;
698 if (memcmp (s, "Rhc", 3) == 0)
699 return HC_REGS;
700 if (memcmp (s, "Rra", 3) == 0)
701 return RA_REGS;
702 if (memcmp (s, "Rfl", 3) == 0)
703 return FLG_REGS;
704 if (memcmp (s, "Rmm", 3) == 0)
705 {
706 if (fixed_regs[MEM0_REGNO])
707 return NO_REGS;
708 return MEM_REGS;
709 }
710
711 /* PSImode registers - i.e. whatever can hold a pointer. */
712 if (memcmp (s, "Rpi", 3) == 0)
713 {
714 if (TARGET_A16)
715 return HI_REGS;
716 else
717 return RA_REGS; /* r2r0 and r3r1 can hold pointers. */
718 }
719
720 /* We handle this one as an EXTRA_CONSTRAINT. */
721 if (memcmp (s, "Rpa", 3) == 0)
722 return NO_REGS;
723
724 if (*s == 'R')
725 {
726 fprintf(stderr, "unrecognized R constraint: %.3s\n", s);
727 gcc_unreachable();
728 }
729
730 return NO_REGS;
731 }
732
733 /* Implements REGNO_OK_FOR_BASE_P. */
734 int
735 m32c_regno_ok_for_base_p (int regno)
736 {
737 if (regno == A0_REGNO
738 || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
739 return 1;
740 return 0;
741 }
742
743 #define DEBUG_RELOAD 0
744
745 /* Implements PREFERRED_RELOAD_CLASS. In general, prefer general
746 registers of the appropriate size. */
747 int
748 m32c_preferred_reload_class (rtx x, int rclass)
749 {
750 int newclass = rclass;
751
752 #if DEBUG_RELOAD
753 fprintf (stderr, "\npreferred_reload_class for %s is ",
754 class_names[rclass]);
755 #endif
756 if (rclass == NO_REGS)
757 rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
758
759 if (reg_classes_intersect_p (rclass, CR_REGS))
760 {
761 switch (GET_MODE (x))
762 {
763 case QImode:
764 newclass = HL_REGS;
765 break;
766 default:
767 /* newclass = HI_REGS; */
768 break;
769 }
770 }
771
772 else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
773 newclass = SI_REGS;
774 else if (GET_MODE_SIZE (GET_MODE (x)) > 4
775 && ~class_contents[rclass][0] & 0x000f)
776 newclass = DI_REGS;
777
778 rclass = reduce_class (rclass, newclass, rclass);
779
780 if (GET_MODE (x) == QImode)
781 rclass = reduce_class (rclass, HL_REGS, rclass);
782
783 #if DEBUG_RELOAD
784 fprintf (stderr, "%s\n", class_names[rclass]);
785 debug_rtx (x);
786
787 if (GET_CODE (x) == MEM
788 && GET_CODE (XEXP (x, 0)) == PLUS
789 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
790 fprintf (stderr, "Glorm!\n");
791 #endif
792 return rclass;
793 }
794
795 /* Implements PREFERRED_OUTPUT_RELOAD_CLASS. */
796 int
797 m32c_preferred_output_reload_class (rtx x, int rclass)
798 {
799 return m32c_preferred_reload_class (x, rclass);
800 }
801
802 /* Implements LIMIT_RELOAD_CLASS. We basically want to avoid using
803 address registers for reloads since they're needed for address
804 reloads. */
805 int
806 m32c_limit_reload_class (enum machine_mode mode, int rclass)
807 {
808 #if DEBUG_RELOAD
809 fprintf (stderr, "limit_reload_class for %s: %s ->",
810 mode_name[mode], class_names[rclass]);
811 #endif
812
813 if (mode == QImode)
814 rclass = reduce_class (rclass, HL_REGS, rclass);
815 else if (mode == HImode)
816 rclass = reduce_class (rclass, HI_REGS, rclass);
817 else if (mode == SImode)
818 rclass = reduce_class (rclass, SI_REGS, rclass);
819
820 if (rclass != A_REGS)
821 rclass = reduce_class (rclass, DI_REGS, rclass);
822
823 #if DEBUG_RELOAD
824 fprintf (stderr, " %s\n", class_names[rclass]);
825 #endif
826 return rclass;
827 }
828
829 /* Implements SECONDARY_RELOAD_CLASS. QImode have to be reloaded in
830 r0 or r1, as those are the only real QImode registers. CR regs get
831 reloaded through appropriately sized general or address
832 registers. */
833 int
834 m32c_secondary_reload_class (int rclass, enum machine_mode mode, rtx x)
835 {
836 int cc = class_contents[rclass][0];
837 #if DEBUG0
838 fprintf (stderr, "\nsecondary reload class %s %s\n",
839 class_names[rclass], mode_name[mode]);
840 debug_rtx (x);
841 #endif
842 if (mode == QImode
843 && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
844 return QI_REGS;
845 if (reg_classes_intersect_p (rclass, CR_REGS)
846 && GET_CODE (x) == REG
847 && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
848 return TARGET_A16 ? HI_REGS : A_REGS;
849 return NO_REGS;
850 }
851
852 /* Implements TARGET_CLASS_LIKELY_SPILLED_P. A_REGS is needed for address
853 reloads. */
854
855 #undef TARGET_CLASS_LIKELY_SPILLED_P
856 #define TARGET_CLASS_LIKELY_SPILLED_P m32c_class_likely_spilled_p
857
858 static bool
859 m32c_class_likely_spilled_p (reg_class_t regclass)
860 {
861 if (regclass == A_REGS)
862 return true;
863
864 return (reg_class_size[(int) regclass] == 1);
865 }
866
867 /* Implements CLASS_MAX_NREGS. We calculate this according to its
868 documented meaning, to avoid potential inconsistencies with actual
869 class definitions. */
870 int
871 m32c_class_max_nregs (int regclass, enum machine_mode mode)
872 {
873 int rn, max = 0;
874
875 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
876 if (class_contents[regclass][0] & (1 << rn))
877 {
878 int n = m32c_hard_regno_nregs (rn, mode);
879 if (max < n)
880 max = n;
881 }
882 return max;
883 }
884
885 /* Implements CANNOT_CHANGE_MODE_CLASS. Only r0 and r1 can change to
886 QI (r0l, r1l) because the chip doesn't support QI ops on other
887 registers (well, it does on a0/a1 but if we let gcc do that, reload
888 suffers). Otherwise, we allow changes to larger modes. */
889 int
890 m32c_cannot_change_mode_class (enum machine_mode from,
891 enum machine_mode to, int rclass)
892 {
893 int rn;
894 #if DEBUG0
895 fprintf (stderr, "cannot change from %s to %s in %s\n",
896 mode_name[from], mode_name[to], class_names[rclass]);
897 #endif
898
899 /* If the larger mode isn't allowed in any of these registers, we
900 can't allow the change. */
901 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
902 if (class_contents[rclass][0] & (1 << rn))
903 if (! m32c_hard_regno_ok (rn, to))
904 return 1;
905
906 if (to == QImode)
907 return (class_contents[rclass][0] & 0x1ffa);
908
909 if (class_contents[rclass][0] & 0x0005 /* r0, r1 */
910 && GET_MODE_SIZE (from) > 1)
911 return 0;
912 if (GET_MODE_SIZE (from) > 2) /* all other regs */
913 return 0;
914
915 return 1;
916 }
917
918 /* Helpers for the rest of the file. */
919 /* TRUE if the rtx is a REG rtx for the given register. */
920 #define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
921 && REGNO (rtx) == regno)
922 /* TRUE if the rtx is a pseudo - specifically, one we can use as a
923 base register in address calculations (hence the "strict"
924 argument). */
925 #define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
926 && (REGNO (rtx) == AP_REGNO \
927 || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
928
929 /* Implements CONST_OK_FOR_CONSTRAINT_P. Currently, all constant
930 constraints start with 'I', with the next two characters indicating
931 the type and size of the range allowed. */
932 int
933 m32c_const_ok_for_constraint_p (HOST_WIDE_INT value,
934 char c ATTRIBUTE_UNUSED, const char *str)
935 {
936 /* s=signed u=unsigned n=nonzero m=minus l=log2able,
937 [sun] bits [SUN] bytes, p=pointer size
938 I[-0-9][0-9] matches that number */
939 if (memcmp (str, "Is3", 3) == 0)
940 {
941 return (-8 <= value && value <= 7);
942 }
943 if (memcmp (str, "IS1", 3) == 0)
944 {
945 return (-128 <= value && value <= 127);
946 }
947 if (memcmp (str, "IS2", 3) == 0)
948 {
949 return (-32768 <= value && value <= 32767);
950 }
951 if (memcmp (str, "IU2", 3) == 0)
952 {
953 return (0 <= value && value <= 65535);
954 }
955 if (memcmp (str, "IU3", 3) == 0)
956 {
957 return (0 <= value && value <= 0x00ffffff);
958 }
959 if (memcmp (str, "In4", 3) == 0)
960 {
961 return (-8 <= value && value && value <= 8);
962 }
963 if (memcmp (str, "In5", 3) == 0)
964 {
965 return (-16 <= value && value && value <= 16);
966 }
967 if (memcmp (str, "In6", 3) == 0)
968 {
969 return (-32 <= value && value && value <= 32);
970 }
971 if (memcmp (str, "IM2", 3) == 0)
972 {
973 return (-65536 <= value && value && value <= -1);
974 }
975 if (memcmp (str, "Ilb", 3) == 0)
976 {
977 int b = exact_log2 (value);
978 return (b >= 0 && b <= 7);
979 }
980 if (memcmp (str, "Imb", 3) == 0)
981 {
982 int b = exact_log2 ((value ^ 0xff) & 0xff);
983 return (b >= 0 && b <= 7);
984 }
985 if (memcmp (str, "ImB", 3) == 0)
986 {
987 int b = exact_log2 ((value ^ 0xffff) & 0xffff);
988 return (b >= 0 && b <= 7);
989 }
990 if (memcmp (str, "Ilw", 3) == 0)
991 {
992 int b = exact_log2 (value);
993 return (b >= 0 && b <= 15);
994 }
995 if (memcmp (str, "Imw", 3) == 0)
996 {
997 int b = exact_log2 ((value ^ 0xffff) & 0xffff);
998 return (b >= 0 && b <= 15);
999 }
1000 if (memcmp (str, "I00", 3) == 0)
1001 {
1002 return (value == 0);
1003 }
1004 return 0;
1005 }
1006
1007 #define A0_OR_PSEUDO(x) (IS_REG(x, A0_REGNO) || REGNO (x) >= FIRST_PSEUDO_REGISTER)
1008
1009 /* Implements EXTRA_CONSTRAINT_STR (see next function too). 'S' is
1010 for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
1011 call return values. */
1012 int
1013 m32c_extra_constraint_p2 (rtx value, char c ATTRIBUTE_UNUSED, const char *str)
1014 {
1015 encode_pattern (value);
1016
1017 if (far_addr_space_p (value))
1018 {
1019 if (memcmp (str, "SF", 2) == 0)
1020 {
1021 return ( (RTX_IS ("mr")
1022 && A0_OR_PSEUDO (patternr[1])
1023 && GET_MODE (patternr[1]) == SImode)
1024 || (RTX_IS ("m+^Sri")
1025 && A0_OR_PSEUDO (patternr[4])
1026 && GET_MODE (patternr[4]) == HImode)
1027 || (RTX_IS ("m+^Srs")
1028 && A0_OR_PSEUDO (patternr[4])
1029 && GET_MODE (patternr[4]) == HImode)
1030 || (RTX_IS ("m+^S+ris")
1031 && A0_OR_PSEUDO (patternr[5])
1032 && GET_MODE (patternr[5]) == HImode)
1033 || RTX_IS ("ms")
1034 );
1035 }
1036 return 0;
1037 }
1038
1039 if (memcmp (str, "Sd", 2) == 0)
1040 {
1041 /* This is the common "src/dest" address */
1042 rtx r;
1043 if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
1044 return 1;
1045 if (RTX_IS ("ms") || RTX_IS ("m+si"))
1046 return 1;
1047 if (RTX_IS ("m++rii"))
1048 {
1049 if (REGNO (patternr[3]) == FB_REGNO
1050 && INTVAL (patternr[4]) == 0)
1051 return 1;
1052 }
1053 if (RTX_IS ("mr"))
1054 r = patternr[1];
1055 else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
1056 r = patternr[2];
1057 else
1058 return 0;
1059 if (REGNO (r) == SP_REGNO)
1060 return 0;
1061 return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
1062 }
1063 else if (memcmp (str, "Sa", 2) == 0)
1064 {
1065 rtx r;
1066 if (RTX_IS ("mr"))
1067 r = patternr[1];
1068 else if (RTX_IS ("m+ri"))
1069 r = patternr[2];
1070 else
1071 return 0;
1072 return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
1073 }
1074 else if (memcmp (str, "Si", 2) == 0)
1075 {
1076 return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
1077 }
1078 else if (memcmp (str, "Ss", 2) == 0)
1079 {
1080 return ((RTX_IS ("mr")
1081 && (IS_REG (patternr[1], SP_REGNO)))
1082 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
1083 }
1084 else if (memcmp (str, "Sf", 2) == 0)
1085 {
1086 return ((RTX_IS ("mr")
1087 && (IS_REG (patternr[1], FB_REGNO)))
1088 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
1089 }
1090 else if (memcmp (str, "Sb", 2) == 0)
1091 {
1092 return ((RTX_IS ("mr")
1093 && (IS_REG (patternr[1], SB_REGNO)))
1094 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
1095 }
1096 else if (memcmp (str, "Sp", 2) == 0)
1097 {
1098 /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
1099 return (RTX_IS ("mi")
1100 && !(INTVAL (patternr[1]) & ~0x1fff));
1101 }
1102 else if (memcmp (str, "S1", 2) == 0)
1103 {
1104 return r1h_operand (value, QImode);
1105 }
1106 else if (memcmp (str, "SF", 2) == 0)
1107 {
1108 return 0;
1109 }
1110
1111 gcc_assert (str[0] != 'S');
1112
1113 if (memcmp (str, "Rpa", 2) == 0)
1114 return GET_CODE (value) == PARALLEL;
1115
1116 return 0;
1117 }
1118
1119 /* This is for when we're debugging the above. */
1120 int
1121 m32c_extra_constraint_p (rtx value, char c, const char *str)
1122 {
1123 int rv = m32c_extra_constraint_p2 (value, c, str);
1124 #if DEBUG0
1125 fprintf (stderr, "\nconstraint %.*s: %d\n", CONSTRAINT_LEN (c, str), str,
1126 rv);
1127 debug_rtx (value);
1128 #endif
1129 return rv;
1130 }
1131
1132 /* Implements EXTRA_MEMORY_CONSTRAINT. Currently, we only use strings
1133 starting with 'S'. */
1134 int
1135 m32c_extra_memory_constraint (char c, const char *str ATTRIBUTE_UNUSED)
1136 {
1137 return c == 'S';
1138 }
1139
1140 /* Implements EXTRA_ADDRESS_CONSTRAINT. We reserve 'A' strings for these,
1141 but don't currently define any. */
1142 int
1143 m32c_extra_address_constraint (char c, const char *str ATTRIBUTE_UNUSED)
1144 {
1145 return c == 'A';
1146 }
1147
1148 /* STACK AND CALLING */
1149
1150 /* Frame Layout */
1151
1152 /* Implements RETURN_ADDR_RTX. Note that R8C and M16C push 24 bits
1153 (yes, THREE bytes) onto the stack for the return address, but we
1154 don't support pointers bigger than 16 bits on those chips. This
1155 will likely wreak havoc with exception unwinding. FIXME. */
1156 rtx
1157 m32c_return_addr_rtx (int count)
1158 {
1159 enum machine_mode mode;
1160 int offset;
1161 rtx ra_mem;
1162
1163 if (count)
1164 return NULL_RTX;
1165 /* we want 2[$fb] */
1166
1167 if (TARGET_A24)
1168 {
1169 /* It's four bytes */
1170 mode = PSImode;
1171 offset = 4;
1172 }
1173 else
1174 {
1175 /* FIXME: it's really 3 bytes */
1176 mode = HImode;
1177 offset = 2;
1178 }
1179
1180 ra_mem =
1181 gen_rtx_MEM (mode, plus_constant (gen_rtx_REG (Pmode, FP_REGNO), offset));
1182 return copy_to_mode_reg (mode, ra_mem);
1183 }
1184
1185 /* Implements INCOMING_RETURN_ADDR_RTX. See comment above. */
1186 rtx
1187 m32c_incoming_return_addr_rtx (void)
1188 {
1189 /* we want [sp] */
1190 return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
1191 }
1192
1193 /* Exception Handling Support */
1194
1195 /* Implements EH_RETURN_DATA_REGNO. Choose registers able to hold
1196 pointers. */
1197 int
1198 m32c_eh_return_data_regno (int n)
1199 {
1200 switch (n)
1201 {
1202 case 0:
1203 return A0_REGNO;
1204 case 1:
1205 if (TARGET_A16)
1206 return R3_REGNO;
1207 else
1208 return R1_REGNO;
1209 default:
1210 return INVALID_REGNUM;
1211 }
1212 }
1213
1214 /* Implements EH_RETURN_STACKADJ_RTX. Saved and used later in
1215 m32c_emit_eh_epilogue. */
1216 rtx
1217 m32c_eh_return_stackadj_rtx (void)
1218 {
1219 if (!cfun->machine->eh_stack_adjust)
1220 {
1221 rtx sa;
1222
1223 sa = gen_rtx_REG (Pmode, R0_REGNO);
1224 cfun->machine->eh_stack_adjust = sa;
1225 }
1226 return cfun->machine->eh_stack_adjust;
1227 }
1228
1229 /* Registers That Address the Stack Frame */
1230
1231 /* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER. Note that
1232 the original spec called for dwarf numbers to vary with register
1233 width as well, for example, r0l, r0, and r2r0 would each have
1234 different dwarf numbers. GCC doesn't support this, and we don't do
1235 it, and gdb seems to like it this way anyway. */
1236 unsigned int
1237 m32c_dwarf_frame_regnum (int n)
1238 {
1239 switch (n)
1240 {
1241 case R0_REGNO:
1242 return 5;
1243 case R1_REGNO:
1244 return 6;
1245 case R2_REGNO:
1246 return 7;
1247 case R3_REGNO:
1248 return 8;
1249 case A0_REGNO:
1250 return 9;
1251 case A1_REGNO:
1252 return 10;
1253 case FB_REGNO:
1254 return 11;
1255 case SB_REGNO:
1256 return 19;
1257
1258 case SP_REGNO:
1259 return 12;
1260 case PC_REGNO:
1261 return 13;
1262 default:
1263 return DWARF_FRAME_REGISTERS + 1;
1264 }
1265 }
1266
1267 /* The frame looks like this:
1268
1269 ap -> +------------------------------
1270 | Return address (3 or 4 bytes)
1271 | Saved FB (2 or 4 bytes)
1272 fb -> +------------------------------
1273 | local vars
1274 | register saves fb
1275 | through r0 as needed
1276 sp -> +------------------------------
1277 */
1278
1279 /* We use this to wrap all emitted insns in the prologue. */
1280 static rtx
1281 F (rtx x)
1282 {
1283 RTX_FRAME_RELATED_P (x) = 1;
1284 return x;
1285 }
1286
1287 /* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1288 how much the stack pointer moves for each, for each cpu family. */
1289 static struct
1290 {
1291 int reg1;
1292 int bit;
1293 int a16_bytes;
1294 int a24_bytes;
1295 } pushm_info[] =
1296 {
1297 /* These are in reverse push (nearest-to-sp) order. */
1298 { R0_REGNO, 0x80, 2, 2 },
1299 { R1_REGNO, 0x40, 2, 2 },
1300 { R2_REGNO, 0x20, 2, 2 },
1301 { R3_REGNO, 0x10, 2, 2 },
1302 { A0_REGNO, 0x08, 2, 4 },
1303 { A1_REGNO, 0x04, 2, 4 },
1304 { SB_REGNO, 0x02, 2, 4 },
1305 { FB_REGNO, 0x01, 2, 4 }
1306 };
1307
1308 #define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1309
1310 /* Returns TRUE if we need to save/restore the given register. We
1311 save everything for exception handlers, so that any register can be
1312 unwound. For interrupt handlers, we save everything if the handler
1313 calls something else (because we don't know what *that* function
1314 might do), but try to be a bit smarter if the handler is a leaf
1315 function. We always save $a0, though, because we use that in the
1316 epilogue to copy $fb to $sp. */
1317 static int
1318 need_to_save (int regno)
1319 {
1320 if (fixed_regs[regno])
1321 return 0;
1322 if (crtl->calls_eh_return)
1323 return 1;
1324 if (regno == FP_REGNO)
1325 return 0;
1326 if (cfun->machine->is_interrupt
1327 && (!cfun->machine->is_leaf
1328 || (regno == A0_REGNO
1329 && m32c_function_needs_enter ())
1330 ))
1331 return 1;
1332 if (df_regs_ever_live_p (regno)
1333 && (!call_used_regs[regno] || cfun->machine->is_interrupt))
1334 return 1;
1335 return 0;
1336 }
1337
1338 /* This function contains all the intelligence about saving and
1339 restoring registers. It always figures out the register save set.
1340 When called with PP_justcount, it merely returns the size of the
1341 save set (for eliminating the frame pointer, for example). When
1342 called with PP_pushm or PP_popm, it emits the appropriate
1343 instructions for saving (pushm) or restoring (popm) the
1344 registers. */
1345 static int
1346 m32c_pushm_popm (Push_Pop_Type ppt)
1347 {
1348 int reg_mask = 0;
1349 int byte_count = 0, bytes;
1350 int i;
1351 rtx dwarf_set[PUSHM_N];
1352 int n_dwarfs = 0;
1353 int nosave_mask = 0;
1354
1355 if (crtl->return_rtx
1356 && GET_CODE (crtl->return_rtx) == PARALLEL
1357 && !(crtl->calls_eh_return || cfun->machine->is_interrupt))
1358 {
1359 rtx exp = XVECEXP (crtl->return_rtx, 0, 0);
1360 rtx rv = XEXP (exp, 0);
1361 int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1362
1363 if (rv_bytes > 2)
1364 nosave_mask |= 0x20; /* PSI, SI */
1365 else
1366 nosave_mask |= 0xf0; /* DF */
1367 if (rv_bytes > 4)
1368 nosave_mask |= 0x50; /* DI */
1369 }
1370
1371 for (i = 0; i < (int) PUSHM_N; i++)
1372 {
1373 /* Skip if neither register needs saving. */
1374 if (!need_to_save (pushm_info[i].reg1))
1375 continue;
1376
1377 if (pushm_info[i].bit & nosave_mask)
1378 continue;
1379
1380 reg_mask |= pushm_info[i].bit;
1381 bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1382
1383 if (ppt == PP_pushm)
1384 {
1385 enum machine_mode mode = (bytes == 2) ? HImode : SImode;
1386 rtx addr;
1387
1388 /* Always use stack_pointer_rtx instead of calling
1389 rtx_gen_REG ourselves. Code elsewhere in GCC assumes
1390 that there is a single rtx representing the stack pointer,
1391 namely stack_pointer_rtx, and uses == to recognize it. */
1392 addr = stack_pointer_rtx;
1393
1394 if (byte_count != 0)
1395 addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1396
1397 dwarf_set[n_dwarfs++] =
1398 gen_rtx_SET (VOIDmode,
1399 gen_rtx_MEM (mode, addr),
1400 gen_rtx_REG (mode, pushm_info[i].reg1));
1401 F (dwarf_set[n_dwarfs - 1]);
1402
1403 }
1404 byte_count += bytes;
1405 }
1406
1407 if (cfun->machine->is_interrupt)
1408 {
1409 cfun->machine->intr_pushm = reg_mask & 0xfe;
1410 reg_mask = 0;
1411 byte_count = 0;
1412 }
1413
1414 if (cfun->machine->is_interrupt)
1415 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1416 if (need_to_save (i))
1417 {
1418 byte_count += 2;
1419 cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1420 }
1421
1422 if (ppt == PP_pushm && byte_count)
1423 {
1424 rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1425 rtx pushm;
1426
1427 if (reg_mask)
1428 {
1429 XVECEXP (note, 0, 0)
1430 = gen_rtx_SET (VOIDmode,
1431 stack_pointer_rtx,
1432 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1433 stack_pointer_rtx,
1434 GEN_INT (-byte_count)));
1435 F (XVECEXP (note, 0, 0));
1436
1437 for (i = 0; i < n_dwarfs; i++)
1438 XVECEXP (note, 0, i + 1) = dwarf_set[i];
1439
1440 pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1441
1442 add_reg_note (pushm, REG_FRAME_RELATED_EXPR, note);
1443 }
1444
1445 if (cfun->machine->is_interrupt)
1446 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1447 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1448 {
1449 if (TARGET_A16)
1450 pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1451 else
1452 pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1453 F (pushm);
1454 }
1455 }
1456 if (ppt == PP_popm && byte_count)
1457 {
1458 if (cfun->machine->is_interrupt)
1459 for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1460 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1461 {
1462 if (TARGET_A16)
1463 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
1464 else
1465 emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
1466 }
1467 if (reg_mask)
1468 emit_insn (gen_popm (GEN_INT (reg_mask)));
1469 }
1470
1471 return byte_count;
1472 }
1473
1474 /* Implements INITIAL_ELIMINATION_OFFSET. See the comment above that
1475 diagrams our call frame. */
1476 int
1477 m32c_initial_elimination_offset (int from, int to)
1478 {
1479 int ofs = 0;
1480
1481 if (from == AP_REGNO)
1482 {
1483 if (TARGET_A16)
1484 ofs += 5;
1485 else
1486 ofs += 8;
1487 }
1488
1489 if (to == SP_REGNO)
1490 {
1491 ofs += m32c_pushm_popm (PP_justcount);
1492 ofs += get_frame_size ();
1493 }
1494
1495 /* Account for push rounding. */
1496 if (TARGET_A24)
1497 ofs = (ofs + 1) & ~1;
1498 #if DEBUG0
1499 fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1500 to, ofs);
1501 #endif
1502 return ofs;
1503 }
1504
1505 /* Passing Function Arguments on the Stack */
1506
1507 /* Implements PUSH_ROUNDING. The R8C and M16C have byte stacks, the
1508 M32C has word stacks. */
1509 unsigned int
1510 m32c_push_rounding (int n)
1511 {
1512 if (TARGET_R8C || TARGET_M16C)
1513 return n;
1514 return (n + 1) & ~1;
1515 }
1516
1517 /* Passing Arguments in Registers */
1518
1519 /* Implements TARGET_FUNCTION_ARG. Arguments are passed partly in
1520 registers, partly on stack. If our function returns a struct, a
1521 pointer to a buffer for it is at the top of the stack (last thing
1522 pushed). The first few real arguments may be in registers as
1523 follows:
1524
1525 R8C/M16C: arg1 in r1 if it's QI or HI (else it's pushed on stack)
1526 arg2 in r2 if it's HI (else pushed on stack)
1527 rest on stack
1528 M32C: arg1 in r0 if it's QI or HI (else it's pushed on stack)
1529 rest on stack
1530
1531 Structs are not passed in registers, even if they fit. Only
1532 integer and pointer types are passed in registers.
1533
1534 Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1535 r2 if it fits. */
1536 #undef TARGET_FUNCTION_ARG
1537 #define TARGET_FUNCTION_ARG m32c_function_arg
1538 static rtx
1539 m32c_function_arg (CUMULATIVE_ARGS * ca,
1540 enum machine_mode mode, const_tree type, bool named)
1541 {
1542 /* Can return a reg, parallel, or 0 for stack */
1543 rtx rv = NULL_RTX;
1544 #if DEBUG0
1545 fprintf (stderr, "func_arg %d (%s, %d)\n",
1546 ca->parm_num, mode_name[mode], named);
1547 debug_tree (type);
1548 #endif
1549
1550 if (mode == VOIDmode)
1551 return GEN_INT (0);
1552
1553 if (ca->force_mem || !named)
1554 {
1555 #if DEBUG0
1556 fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
1557 named);
1558 #endif
1559 return NULL_RTX;
1560 }
1561
1562 if (type && INTEGRAL_TYPE_P (type) && POINTER_TYPE_P (type))
1563 return NULL_RTX;
1564
1565 if (type && AGGREGATE_TYPE_P (type))
1566 return NULL_RTX;
1567
1568 switch (ca->parm_num)
1569 {
1570 case 1:
1571 if (GET_MODE_SIZE (mode) == 1 || GET_MODE_SIZE (mode) == 2)
1572 rv = gen_rtx_REG (mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
1573 break;
1574
1575 case 2:
1576 if (TARGET_A16 && GET_MODE_SIZE (mode) == 2)
1577 rv = gen_rtx_REG (mode, R2_REGNO);
1578 break;
1579 }
1580
1581 #if DEBUG0
1582 debug_rtx (rv);
1583 #endif
1584 return rv;
1585 }
1586
1587 #undef TARGET_PASS_BY_REFERENCE
1588 #define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1589 static bool
1590 m32c_pass_by_reference (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED,
1591 enum machine_mode mode ATTRIBUTE_UNUSED,
1592 const_tree type ATTRIBUTE_UNUSED,
1593 bool named ATTRIBUTE_UNUSED)
1594 {
1595 return 0;
1596 }
1597
1598 /* Implements INIT_CUMULATIVE_ARGS. */
1599 void
1600 m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
1601 tree fntype,
1602 rtx libname ATTRIBUTE_UNUSED,
1603 tree fndecl,
1604 int n_named_args ATTRIBUTE_UNUSED)
1605 {
1606 if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1607 ca->force_mem = 1;
1608 else
1609 ca->force_mem = 0;
1610 ca->parm_num = 1;
1611 }
1612
1613 /* Implements TARGET_FUNCTION_ARG_ADVANCE. force_mem is set for
1614 functions returning structures, so we always reset that. Otherwise,
1615 we only need to know the sequence number of the argument to know what
1616 to do with it. */
1617 #undef TARGET_FUNCTION_ARG_ADVANCE
1618 #define TARGET_FUNCTION_ARG_ADVANCE m32c_function_arg_advance
1619 static void
1620 m32c_function_arg_advance (CUMULATIVE_ARGS * ca,
1621 enum machine_mode mode ATTRIBUTE_UNUSED,
1622 const_tree type ATTRIBUTE_UNUSED,
1623 bool named ATTRIBUTE_UNUSED)
1624 {
1625 if (ca->force_mem)
1626 ca->force_mem = 0;
1627 else
1628 ca->parm_num++;
1629 }
1630
1631 /* Implements TARGET_FUNCTION_ARG_BOUNDARY. */
1632 #undef TARGET_FUNCTION_ARG_BOUNDARY
1633 #define TARGET_FUNCTION_ARG_BOUNDARY m32c_function_arg_boundary
1634 static unsigned int
1635 m32c_function_arg_boundary (enum machine_mode mode ATTRIBUTE_UNUSED,
1636 const_tree type ATTRIBUTE_UNUSED)
1637 {
1638 return (TARGET_A16 ? 8 : 16);
1639 }
1640
1641 /* Implements FUNCTION_ARG_REGNO_P. */
1642 int
1643 m32c_function_arg_regno_p (int r)
1644 {
1645 if (TARGET_A24)
1646 return (r == R0_REGNO);
1647 return (r == R1_REGNO || r == R2_REGNO);
1648 }
1649
1650 /* HImode and PSImode are the two "native" modes as far as GCC is
1651 concerned, but the chips also support a 32-bit mode which is used
1652 for some opcodes in R8C/M16C and for reset vectors and such. */
1653 #undef TARGET_VALID_POINTER_MODE
1654 #define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
1655 static bool
1656 m32c_valid_pointer_mode (enum machine_mode mode)
1657 {
1658 if (mode == HImode
1659 || mode == PSImode
1660 || mode == SImode
1661 )
1662 return 1;
1663 return 0;
1664 }
1665
1666 /* How Scalar Function Values Are Returned */
1667
1668 /* Implements TARGET_LIBCALL_VALUE. Most values are returned in $r0, or some
1669 combination of registers starting there (r2r0 for longs, r3r1r2r0
1670 for long long, r3r2r1r0 for doubles), except that that ABI
1671 currently doesn't work because it ends up using all available
1672 general registers and gcc often can't compile it. So, instead, we
1673 return anything bigger than 16 bits in "mem0" (effectively, a
1674 memory location). */
1675
1676 #undef TARGET_LIBCALL_VALUE
1677 #define TARGET_LIBCALL_VALUE m32c_libcall_value
1678
1679 static rtx
1680 m32c_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
1681 {
1682 /* return reg or parallel */
1683 #if 0
1684 /* FIXME: GCC has difficulty returning large values in registers,
1685 because that ties up most of the general registers and gives the
1686 register allocator little to work with. Until we can resolve
1687 this, large values are returned in memory. */
1688 if (mode == DFmode)
1689 {
1690 rtx rv;
1691
1692 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1693 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1694 gen_rtx_REG (HImode,
1695 R0_REGNO),
1696 GEN_INT (0));
1697 XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1698 gen_rtx_REG (HImode,
1699 R1_REGNO),
1700 GEN_INT (2));
1701 XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1702 gen_rtx_REG (HImode,
1703 R2_REGNO),
1704 GEN_INT (4));
1705 XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1706 gen_rtx_REG (HImode,
1707 R3_REGNO),
1708 GEN_INT (6));
1709 return rv;
1710 }
1711
1712 if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1713 {
1714 rtx rv;
1715
1716 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1717 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1718 gen_rtx_REG (mode,
1719 R0_REGNO),
1720 GEN_INT (0));
1721 return rv;
1722 }
1723 #endif
1724
1725 if (GET_MODE_SIZE (mode) > 2)
1726 return gen_rtx_REG (mode, MEM0_REGNO);
1727 return gen_rtx_REG (mode, R0_REGNO);
1728 }
1729
1730 /* Implements TARGET_FUNCTION_VALUE. Functions and libcalls have the same
1731 conventions. */
1732
1733 #undef TARGET_FUNCTION_VALUE
1734 #define TARGET_FUNCTION_VALUE m32c_function_value
1735
1736 static rtx
1737 m32c_function_value (const_tree valtype,
1738 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1739 bool outgoing ATTRIBUTE_UNUSED)
1740 {
1741 /* return reg or parallel */
1742 const enum machine_mode mode = TYPE_MODE (valtype);
1743 return m32c_libcall_value (mode, NULL_RTX);
1744 }
1745
1746 /* Implements TARGET_FUNCTION_VALUE_REGNO_P. */
1747
1748 #undef TARGET_FUNCTION_VALUE_REGNO_P
1749 #define TARGET_FUNCTION_VALUE_REGNO_P m32c_function_value_regno_p
1750
1751 static bool
1752 m32c_function_value_regno_p (const unsigned int regno)
1753 {
1754 return (regno == R0_REGNO || regno == MEM0_REGNO);
1755 }
1756
1757 /* How Large Values Are Returned */
1758
1759 /* We return structures by pushing the address on the stack, even if
1760 we use registers for the first few "real" arguments. */
1761 #undef TARGET_STRUCT_VALUE_RTX
1762 #define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1763 static rtx
1764 m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1765 int incoming ATTRIBUTE_UNUSED)
1766 {
1767 return 0;
1768 }
1769
1770 /* Function Entry and Exit */
1771
1772 /* Implements EPILOGUE_USES. Interrupts restore all registers. */
1773 int
1774 m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1775 {
1776 if (cfun->machine->is_interrupt)
1777 return 1;
1778 return 0;
1779 }
1780
1781 /* Implementing the Varargs Macros */
1782
1783 #undef TARGET_STRICT_ARGUMENT_NAMING
1784 #define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1785 static bool
1786 m32c_strict_argument_naming (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED)
1787 {
1788 return 1;
1789 }
1790
1791 /* Trampolines for Nested Functions */
1792
1793 /*
1794 m16c:
1795 1 0000 75C43412 mov.w #0x1234,a0
1796 2 0004 FC000000 jmp.a label
1797
1798 m32c:
1799 1 0000 BC563412 mov.l:s #0x123456,a0
1800 2 0004 CC000000 jmp.a label
1801 */
1802
1803 /* Implements TRAMPOLINE_SIZE. */
1804 int
1805 m32c_trampoline_size (void)
1806 {
1807 /* Allocate extra space so we can avoid the messy shifts when we
1808 initialize the trampoline; we just write past the end of the
1809 opcode. */
1810 return TARGET_A16 ? 8 : 10;
1811 }
1812
1813 /* Implements TRAMPOLINE_ALIGNMENT. */
1814 int
1815 m32c_trampoline_alignment (void)
1816 {
1817 return 2;
1818 }
1819
1820 /* Implements TARGET_TRAMPOLINE_INIT. */
1821
1822 #undef TARGET_TRAMPOLINE_INIT
1823 #define TARGET_TRAMPOLINE_INIT m32c_trampoline_init
1824 static void
1825 m32c_trampoline_init (rtx m_tramp, tree fndecl, rtx chainval)
1826 {
1827 rtx function = XEXP (DECL_RTL (fndecl), 0);
1828
1829 #define A0(m,i) adjust_address (m_tramp, m, i)
1830 if (TARGET_A16)
1831 {
1832 /* Note: we subtract a "word" because the moves want signed
1833 constants, not unsigned constants. */
1834 emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1835 emit_move_insn (A0 (HImode, 2), chainval);
1836 emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
1837 /* We use 16-bit addresses here, but store the zero to turn it
1838 into a 24-bit offset. */
1839 emit_move_insn (A0 (HImode, 5), function);
1840 emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1841 }
1842 else
1843 {
1844 /* Note that the PSI moves actually write 4 bytes. Make sure we
1845 write stuff out in the right order, and leave room for the
1846 extra byte at the end. */
1847 emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1848 emit_move_insn (A0 (PSImode, 1), chainval);
1849 emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1850 emit_move_insn (A0 (PSImode, 5), function);
1851 }
1852 #undef A0
1853 }
1854
1855 /* Implicit Calls to Library Routines */
1856
1857 #undef TARGET_INIT_LIBFUNCS
1858 #define TARGET_INIT_LIBFUNCS m32c_init_libfuncs
1859 static void
1860 m32c_init_libfuncs (void)
1861 {
1862 /* We do this because the M32C has an HImode operand, but the
1863 M16C has an 8-bit operand. Since gcc looks at the match data
1864 and not the expanded rtl, we have to reset the optab so that
1865 the right modes are found. */
1866 if (TARGET_A24)
1867 {
1868 set_optab_handler (cstore_optab, QImode, CODE_FOR_cstoreqi4_24);
1869 set_optab_handler (cstore_optab, HImode, CODE_FOR_cstorehi4_24);
1870 set_optab_handler (cstore_optab, PSImode, CODE_FOR_cstorepsi4_24);
1871 }
1872 }
1873
1874 /* Addressing Modes */
1875
1876 /* The r8c/m32c family supports a wide range of non-orthogonal
1877 addressing modes, including the ability to double-indirect on *some*
1878 of them. Not all insns support all modes, either, but we rely on
1879 predicates and constraints to deal with that. */
1880 #undef TARGET_LEGITIMATE_ADDRESS_P
1881 #define TARGET_LEGITIMATE_ADDRESS_P m32c_legitimate_address_p
1882 bool
1883 m32c_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
1884 {
1885 int mode_adjust;
1886 if (CONSTANT_P (x))
1887 return 1;
1888
1889 if (TARGET_A16 && GET_MODE (x) != HImode && GET_MODE (x) != SImode)
1890 return 0;
1891 if (TARGET_A24 && GET_MODE (x) != PSImode)
1892 return 0;
1893
1894 /* Wide references to memory will be split after reload, so we must
1895 ensure that all parts of such splits remain legitimate
1896 addresses. */
1897 mode_adjust = GET_MODE_SIZE (mode) - 1;
1898
1899 /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1900 if (GET_CODE (x) == PRE_DEC
1901 || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1902 {
1903 return (GET_CODE (XEXP (x, 0)) == REG
1904 && REGNO (XEXP (x, 0)) == SP_REGNO);
1905 }
1906
1907 #if 0
1908 /* This is the double indirection detection, but it currently
1909 doesn't work as cleanly as this code implies, so until we've had
1910 a chance to debug it, leave it disabled. */
1911 if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1912 {
1913 #if DEBUG_DOUBLE
1914 fprintf (stderr, "double indirect\n");
1915 #endif
1916 x = XEXP (x, 0);
1917 }
1918 #endif
1919
1920 encode_pattern (x);
1921 if (RTX_IS ("r"))
1922 {
1923 /* Most indexable registers can be used without displacements,
1924 although some of them will be emitted with an explicit zero
1925 to please the assembler. */
1926 switch (REGNO (patternr[0]))
1927 {
1928 case A1_REGNO:
1929 case SB_REGNO:
1930 case FB_REGNO:
1931 case SP_REGNO:
1932 if (TARGET_A16 && GET_MODE (x) == SImode)
1933 return 0;
1934 case A0_REGNO:
1935 return 1;
1936
1937 default:
1938 if (IS_PSEUDO (patternr[0], strict))
1939 return 1;
1940 return 0;
1941 }
1942 }
1943
1944 if (TARGET_A16 && GET_MODE (x) == SImode)
1945 return 0;
1946
1947 if (RTX_IS ("+ri"))
1948 {
1949 /* This is more interesting, because different base registers
1950 allow for different displacements - both range and signedness
1951 - and it differs from chip series to chip series too. */
1952 int rn = REGNO (patternr[1]);
1953 HOST_WIDE_INT offs = INTVAL (patternr[2]);
1954 switch (rn)
1955 {
1956 case A0_REGNO:
1957 case A1_REGNO:
1958 case SB_REGNO:
1959 /* The syntax only allows positive offsets, but when the
1960 offsets span the entire memory range, we can simulate
1961 negative offsets by wrapping. */
1962 if (TARGET_A16)
1963 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1964 if (rn == SB_REGNO)
1965 return (offs >= 0 && offs <= 65535 - mode_adjust);
1966 /* A0 or A1 */
1967 return (offs >= -16777216 && offs <= 16777215);
1968
1969 case FB_REGNO:
1970 if (TARGET_A16)
1971 return (offs >= -128 && offs <= 127 - mode_adjust);
1972 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1973
1974 case SP_REGNO:
1975 return (offs >= -128 && offs <= 127 - mode_adjust);
1976
1977 default:
1978 if (IS_PSEUDO (patternr[1], strict))
1979 return 1;
1980 return 0;
1981 }
1982 }
1983 if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1984 {
1985 rtx reg = patternr[1];
1986
1987 /* We don't know where the symbol is, so only allow base
1988 registers which support displacements spanning the whole
1989 address range. */
1990 switch (REGNO (reg))
1991 {
1992 case A0_REGNO:
1993 case A1_REGNO:
1994 /* $sb needs a secondary reload, but since it's involved in
1995 memory address reloads too, we don't deal with it very
1996 well. */
1997 /* case SB_REGNO: */
1998 return 1;
1999 default:
2000 if (IS_PSEUDO (reg, strict))
2001 return 1;
2002 return 0;
2003 }
2004 }
2005 return 0;
2006 }
2007
2008 /* Implements REG_OK_FOR_BASE_P. */
2009 int
2010 m32c_reg_ok_for_base_p (rtx x, int strict)
2011 {
2012 if (GET_CODE (x) != REG)
2013 return 0;
2014 switch (REGNO (x))
2015 {
2016 case A0_REGNO:
2017 case A1_REGNO:
2018 case SB_REGNO:
2019 case FB_REGNO:
2020 case SP_REGNO:
2021 return 1;
2022 default:
2023 if (IS_PSEUDO (x, strict))
2024 return 1;
2025 return 0;
2026 }
2027 }
2028
2029 /* We have three choices for choosing fb->aN offsets. If we choose -128,
2030 we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
2031 like this:
2032 EB 4B FF mova -128[$fb],$a0
2033 D8 0C FF FF mov.w:Q #0,-1[$a0]
2034
2035 Alternately, we subtract the frame size, and hopefully use 8-bit aN
2036 displacements:
2037 7B F4 stc $fb,$a0
2038 77 54 00 01 sub #256,$a0
2039 D8 08 01 mov.w:Q #0,1[$a0]
2040
2041 If we don't offset (i.e. offset by zero), we end up with:
2042 7B F4 stc $fb,$a0
2043 D8 0C 00 FF mov.w:Q #0,-256[$a0]
2044
2045 We have to subtract *something* so that we have a PLUS rtx to mark
2046 that we've done this reload. The -128 offset will never result in
2047 an 8-bit aN offset, and the payoff for the second case is five
2048 loads *if* those loads are within 256 bytes of the other end of the
2049 frame, so the third case seems best. Note that we subtract the
2050 zero, but detect that in the addhi3 pattern. */
2051
2052 #define BIG_FB_ADJ 0
2053
2054 /* Implements LEGITIMIZE_ADDRESS. The only address we really have to
2055 worry about is frame base offsets, as $fb has a limited
2056 displacement range. We deal with this by attempting to reload $fb
2057 itself into an address register; that seems to result in the best
2058 code. */
2059 #undef TARGET_LEGITIMIZE_ADDRESS
2060 #define TARGET_LEGITIMIZE_ADDRESS m32c_legitimize_address
2061 static rtx
2062 m32c_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
2063 enum machine_mode mode)
2064 {
2065 #if DEBUG0
2066 fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
2067 debug_rtx (x);
2068 fprintf (stderr, "\n");
2069 #endif
2070
2071 if (GET_CODE (x) == PLUS
2072 && GET_CODE (XEXP (x, 0)) == REG
2073 && REGNO (XEXP (x, 0)) == FB_REGNO
2074 && GET_CODE (XEXP (x, 1)) == CONST_INT
2075 && (INTVAL (XEXP (x, 1)) < -128
2076 || INTVAL (XEXP (x, 1)) > (128 - GET_MODE_SIZE (mode))))
2077 {
2078 /* reload FB to A_REGS */
2079 rtx temp = gen_reg_rtx (Pmode);
2080 x = copy_rtx (x);
2081 emit_insn (gen_rtx_SET (VOIDmode, temp, XEXP (x, 0)));
2082 XEXP (x, 0) = temp;
2083 }
2084
2085 return x;
2086 }
2087
2088 /* Implements LEGITIMIZE_RELOAD_ADDRESS. See comment above. */
2089 int
2090 m32c_legitimize_reload_address (rtx * x,
2091 enum machine_mode mode,
2092 int opnum,
2093 int type, int ind_levels ATTRIBUTE_UNUSED)
2094 {
2095 #if DEBUG0
2096 fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
2097 mode_name[mode]);
2098 debug_rtx (*x);
2099 #endif
2100
2101 /* At one point, this function tried to get $fb copied to an address
2102 register, which in theory would maximize sharing, but gcc was
2103 *also* still trying to reload the whole address, and we'd run out
2104 of address registers. So we let gcc do the naive (but safe)
2105 reload instead, when the above function doesn't handle it for
2106 us.
2107
2108 The code below is a second attempt at the above. */
2109
2110 if (GET_CODE (*x) == PLUS
2111 && GET_CODE (XEXP (*x, 0)) == REG
2112 && REGNO (XEXP (*x, 0)) == FB_REGNO
2113 && GET_CODE (XEXP (*x, 1)) == CONST_INT
2114 && (INTVAL (XEXP (*x, 1)) < -128
2115 || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
2116 {
2117 rtx sum;
2118 int offset = INTVAL (XEXP (*x, 1));
2119 int adjustment = -BIG_FB_ADJ;
2120
2121 sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
2122 GEN_INT (adjustment));
2123 *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
2124 if (type == RELOAD_OTHER)
2125 type = RELOAD_FOR_OTHER_ADDRESS;
2126 push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
2127 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
2128 (enum reload_type) type);
2129 return 1;
2130 }
2131
2132 if (GET_CODE (*x) == PLUS
2133 && GET_CODE (XEXP (*x, 0)) == PLUS
2134 && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
2135 && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
2136 && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
2137 && GET_CODE (XEXP (*x, 1)) == CONST_INT
2138 )
2139 {
2140 if (type == RELOAD_OTHER)
2141 type = RELOAD_FOR_OTHER_ADDRESS;
2142 push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
2143 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
2144 (enum reload_type) type);
2145 return 1;
2146 }
2147
2148 return 0;
2149 }
2150
2151 /* Return the appropriate mode for a named address pointer. */
2152 #undef TARGET_ADDR_SPACE_POINTER_MODE
2153 #define TARGET_ADDR_SPACE_POINTER_MODE m32c_addr_space_pointer_mode
2154 static enum machine_mode
2155 m32c_addr_space_pointer_mode (addr_space_t addrspace)
2156 {
2157 switch (addrspace)
2158 {
2159 case ADDR_SPACE_GENERIC:
2160 return TARGET_A24 ? PSImode : HImode;
2161 case ADDR_SPACE_FAR:
2162 return SImode;
2163 default:
2164 gcc_unreachable ();
2165 }
2166 }
2167
2168 /* Return the appropriate mode for a named address address. */
2169 #undef TARGET_ADDR_SPACE_ADDRESS_MODE
2170 #define TARGET_ADDR_SPACE_ADDRESS_MODE m32c_addr_space_address_mode
2171 static enum machine_mode
2172 m32c_addr_space_address_mode (addr_space_t addrspace)
2173 {
2174 switch (addrspace)
2175 {
2176 case ADDR_SPACE_GENERIC:
2177 return TARGET_A24 ? PSImode : HImode;
2178 case ADDR_SPACE_FAR:
2179 return SImode;
2180 default:
2181 gcc_unreachable ();
2182 }
2183 }
2184
2185 /* Like m32c_legitimate_address_p, except with named addresses. */
2186 #undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
2187 #define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P \
2188 m32c_addr_space_legitimate_address_p
2189 static bool
2190 m32c_addr_space_legitimate_address_p (enum machine_mode mode, rtx x,
2191 bool strict, addr_space_t as)
2192 {
2193 if (as == ADDR_SPACE_FAR)
2194 {
2195 if (TARGET_A24)
2196 return 0;
2197 encode_pattern (x);
2198 if (RTX_IS ("r"))
2199 {
2200 if (GET_MODE (x) != SImode)
2201 return 0;
2202 switch (REGNO (patternr[0]))
2203 {
2204 case A0_REGNO:
2205 return 1;
2206
2207 default:
2208 if (IS_PSEUDO (patternr[0], strict))
2209 return 1;
2210 return 0;
2211 }
2212 }
2213 if (RTX_IS ("+^Sri"))
2214 {
2215 int rn = REGNO (patternr[3]);
2216 HOST_WIDE_INT offs = INTVAL (patternr[4]);
2217 if (GET_MODE (patternr[3]) != HImode)
2218 return 0;
2219 switch (rn)
2220 {
2221 case A0_REGNO:
2222 return (offs >= 0 && offs <= 0xfffff);
2223
2224 default:
2225 if (IS_PSEUDO (patternr[3], strict))
2226 return 1;
2227 return 0;
2228 }
2229 }
2230 if (RTX_IS ("+^Srs"))
2231 {
2232 int rn = REGNO (patternr[3]);
2233 if (GET_MODE (patternr[3]) != HImode)
2234 return 0;
2235 switch (rn)
2236 {
2237 case A0_REGNO:
2238 return 1;
2239
2240 default:
2241 if (IS_PSEUDO (patternr[3], strict))
2242 return 1;
2243 return 0;
2244 }
2245 }
2246 if (RTX_IS ("+^S+ris"))
2247 {
2248 int rn = REGNO (patternr[4]);
2249 if (GET_MODE (patternr[4]) != HImode)
2250 return 0;
2251 switch (rn)
2252 {
2253 case A0_REGNO:
2254 return 1;
2255
2256 default:
2257 if (IS_PSEUDO (patternr[4], strict))
2258 return 1;
2259 return 0;
2260 }
2261 }
2262 if (RTX_IS ("s"))
2263 {
2264 return 1;
2265 }
2266 return 0;
2267 }
2268
2269 else if (as != ADDR_SPACE_GENERIC)
2270 gcc_unreachable ();
2271
2272 return m32c_legitimate_address_p (mode, x, strict);
2273 }
2274
2275 /* Like m32c_legitimate_address, except with named address support. */
2276 #undef TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS
2277 #define TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS m32c_addr_space_legitimize_address
2278 static rtx
2279 m32c_addr_space_legitimize_address (rtx x, rtx oldx, enum machine_mode mode,
2280 addr_space_t as)
2281 {
2282 if (as != ADDR_SPACE_GENERIC)
2283 {
2284 #if DEBUG0
2285 fprintf (stderr, "\033[36mm32c_addr_space_legitimize_address for mode %s\033[0m\n", mode_name[mode]);
2286 debug_rtx (x);
2287 fprintf (stderr, "\n");
2288 #endif
2289
2290 if (GET_CODE (x) != REG)
2291 {
2292 x = force_reg (SImode, x);
2293 }
2294 return x;
2295 }
2296
2297 return m32c_legitimize_address (x, oldx, mode);
2298 }
2299
2300 /* Determine if one named address space is a subset of another. */
2301 #undef TARGET_ADDR_SPACE_SUBSET_P
2302 #define TARGET_ADDR_SPACE_SUBSET_P m32c_addr_space_subset_p
2303 static bool
2304 m32c_addr_space_subset_p (addr_space_t subset, addr_space_t superset)
2305 {
2306 gcc_assert (subset == ADDR_SPACE_GENERIC || subset == ADDR_SPACE_FAR);
2307 gcc_assert (superset == ADDR_SPACE_GENERIC || superset == ADDR_SPACE_FAR);
2308
2309 if (subset == superset)
2310 return true;
2311
2312 else
2313 return (subset == ADDR_SPACE_GENERIC && superset == ADDR_SPACE_FAR);
2314 }
2315
2316 #undef TARGET_ADDR_SPACE_CONVERT
2317 #define TARGET_ADDR_SPACE_CONVERT m32c_addr_space_convert
2318 /* Convert from one address space to another. */
2319 static rtx
2320 m32c_addr_space_convert (rtx op, tree from_type, tree to_type)
2321 {
2322 addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (from_type));
2323 addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (to_type));
2324 rtx result;
2325
2326 gcc_assert (from_as == ADDR_SPACE_GENERIC || from_as == ADDR_SPACE_FAR);
2327 gcc_assert (to_as == ADDR_SPACE_GENERIC || to_as == ADDR_SPACE_FAR);
2328
2329 if (to_as == ADDR_SPACE_GENERIC && from_as == ADDR_SPACE_FAR)
2330 {
2331 /* This is unpredictable, as we're truncating off usable address
2332 bits. */
2333
2334 result = gen_reg_rtx (HImode);
2335 emit_move_insn (result, simplify_subreg (HImode, op, SImode, 0));
2336 return result;
2337 }
2338 else if (to_as == ADDR_SPACE_FAR && from_as == ADDR_SPACE_GENERIC)
2339 {
2340 /* This always works. */
2341 result = gen_reg_rtx (SImode);
2342 emit_insn (gen_zero_extendhisi2 (result, op));
2343 return result;
2344 }
2345 else
2346 gcc_unreachable ();
2347 }
2348
2349 /* Condition Code Status */
2350
2351 #undef TARGET_FIXED_CONDITION_CODE_REGS
2352 #define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2353 static bool
2354 m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2355 {
2356 *p1 = FLG_REGNO;
2357 *p2 = INVALID_REGNUM;
2358 return true;
2359 }
2360
2361 /* Describing Relative Costs of Operations */
2362
2363 /* Implements TARGET_REGISTER_MOVE_COST. We make impossible moves
2364 prohibitively expensive, like trying to put QIs in r2/r3 (there are
2365 no opcodes to do that). We also discourage use of mem* registers
2366 since they're really memory. */
2367
2368 #undef TARGET_REGISTER_MOVE_COST
2369 #define TARGET_REGISTER_MOVE_COST m32c_register_move_cost
2370
2371 static int
2372 m32c_register_move_cost (enum machine_mode mode, reg_class_t from,
2373 reg_class_t to)
2374 {
2375 int cost = COSTS_N_INSNS (3);
2376 HARD_REG_SET cc;
2377
2378 /* FIXME: pick real values, but not 2 for now. */
2379 COPY_HARD_REG_SET (cc, reg_class_contents[(int) from]);
2380 IOR_HARD_REG_SET (cc, reg_class_contents[(int) to]);
2381
2382 if (mode == QImode
2383 && hard_reg_set_intersect_p (cc, reg_class_contents[R23_REGS]))
2384 {
2385 if (hard_reg_set_subset_p (cc, reg_class_contents[R23_REGS]))
2386 cost = COSTS_N_INSNS (1000);
2387 else
2388 cost = COSTS_N_INSNS (80);
2389 }
2390
2391 if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2392 cost = COSTS_N_INSNS (1000);
2393
2394 if (reg_classes_intersect_p (from, CR_REGS))
2395 cost += COSTS_N_INSNS (5);
2396
2397 if (reg_classes_intersect_p (to, CR_REGS))
2398 cost += COSTS_N_INSNS (5);
2399
2400 if (from == MEM_REGS || to == MEM_REGS)
2401 cost += COSTS_N_INSNS (50);
2402 else if (reg_classes_intersect_p (from, MEM_REGS)
2403 || reg_classes_intersect_p (to, MEM_REGS))
2404 cost += COSTS_N_INSNS (10);
2405
2406 #if DEBUG0
2407 fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
2408 mode_name[mode], class_names[(int) from], class_names[(int) to],
2409 cost);
2410 #endif
2411 return cost;
2412 }
2413
2414 /* Implements TARGET_MEMORY_MOVE_COST. */
2415
2416 #undef TARGET_MEMORY_MOVE_COST
2417 #define TARGET_MEMORY_MOVE_COST m32c_memory_move_cost
2418
2419 static int
2420 m32c_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2421 reg_class_t rclass ATTRIBUTE_UNUSED,
2422 bool in ATTRIBUTE_UNUSED)
2423 {
2424 /* FIXME: pick real values. */
2425 return COSTS_N_INSNS (10);
2426 }
2427
2428 /* Here we try to describe when we use multiple opcodes for one RTX so
2429 that gcc knows when to use them. */
2430 #undef TARGET_RTX_COSTS
2431 #define TARGET_RTX_COSTS m32c_rtx_costs
2432 static bool
2433 m32c_rtx_costs (rtx x, int code, int outer_code, int *total,
2434 bool speed ATTRIBUTE_UNUSED)
2435 {
2436 switch (code)
2437 {
2438 case REG:
2439 if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2440 *total += COSTS_N_INSNS (500);
2441 else
2442 *total += COSTS_N_INSNS (1);
2443 return true;
2444
2445 case ASHIFT:
2446 case LSHIFTRT:
2447 case ASHIFTRT:
2448 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2449 {
2450 /* mov.b r1l, r1h */
2451 *total += COSTS_N_INSNS (1);
2452 return true;
2453 }
2454 if (INTVAL (XEXP (x, 1)) > 8
2455 || INTVAL (XEXP (x, 1)) < -8)
2456 {
2457 /* mov.b #N, r1l */
2458 /* mov.b r1l, r1h */
2459 *total += COSTS_N_INSNS (2);
2460 return true;
2461 }
2462 return true;
2463
2464 case LE:
2465 case LEU:
2466 case LT:
2467 case LTU:
2468 case GT:
2469 case GTU:
2470 case GE:
2471 case GEU:
2472 case NE:
2473 case EQ:
2474 if (outer_code == SET)
2475 {
2476 *total += COSTS_N_INSNS (2);
2477 return true;
2478 }
2479 break;
2480
2481 case ZERO_EXTRACT:
2482 {
2483 rtx dest = XEXP (x, 0);
2484 rtx addr = XEXP (dest, 0);
2485 switch (GET_CODE (addr))
2486 {
2487 case CONST_INT:
2488 *total += COSTS_N_INSNS (1);
2489 break;
2490 case SYMBOL_REF:
2491 *total += COSTS_N_INSNS (3);
2492 break;
2493 default:
2494 *total += COSTS_N_INSNS (2);
2495 break;
2496 }
2497 return true;
2498 }
2499 break;
2500
2501 default:
2502 /* Reasonable default. */
2503 if (TARGET_A16 && GET_MODE(x) == SImode)
2504 *total += COSTS_N_INSNS (2);
2505 break;
2506 }
2507 return false;
2508 }
2509
2510 #undef TARGET_ADDRESS_COST
2511 #define TARGET_ADDRESS_COST m32c_address_cost
2512 static int
2513 m32c_address_cost (rtx addr, bool speed ATTRIBUTE_UNUSED)
2514 {
2515 int i;
2516 /* fprintf(stderr, "\naddress_cost\n");
2517 debug_rtx(addr);*/
2518 switch (GET_CODE (addr))
2519 {
2520 case CONST_INT:
2521 i = INTVAL (addr);
2522 if (i == 0)
2523 return COSTS_N_INSNS(1);
2524 if (0 < i && i <= 255)
2525 return COSTS_N_INSNS(2);
2526 if (0 < i && i <= 65535)
2527 return COSTS_N_INSNS(3);
2528 return COSTS_N_INSNS(4);
2529 case SYMBOL_REF:
2530 return COSTS_N_INSNS(4);
2531 case REG:
2532 return COSTS_N_INSNS(1);
2533 case PLUS:
2534 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
2535 {
2536 i = INTVAL (XEXP (addr, 1));
2537 if (i == 0)
2538 return COSTS_N_INSNS(1);
2539 if (0 < i && i <= 255)
2540 return COSTS_N_INSNS(2);
2541 if (0 < i && i <= 65535)
2542 return COSTS_N_INSNS(3);
2543 }
2544 return COSTS_N_INSNS(4);
2545 default:
2546 return 0;
2547 }
2548 }
2549
2550 /* Defining the Output Assembler Language */
2551
2552 /* The Overall Framework of an Assembler File */
2553
2554 #undef TARGET_HAVE_NAMED_SECTIONS
2555 #define TARGET_HAVE_NAMED_SECTIONS true
2556
2557 /* Output of Data */
2558
2559 /* We may have 24 bit sizes, which is the native address size.
2560 Currently unused, but provided for completeness. */
2561 #undef TARGET_ASM_INTEGER
2562 #define TARGET_ASM_INTEGER m32c_asm_integer
2563 static bool
2564 m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2565 {
2566 switch (size)
2567 {
2568 case 3:
2569 fprintf (asm_out_file, "\t.3byte\t");
2570 output_addr_const (asm_out_file, x);
2571 fputc ('\n', asm_out_file);
2572 return true;
2573 case 4:
2574 if (GET_CODE (x) == SYMBOL_REF)
2575 {
2576 fprintf (asm_out_file, "\t.long\t");
2577 output_addr_const (asm_out_file, x);
2578 fputc ('\n', asm_out_file);
2579 return true;
2580 }
2581 break;
2582 }
2583 return default_assemble_integer (x, size, aligned_p);
2584 }
2585
2586 /* Output of Assembler Instructions */
2587
2588 /* We use a lookup table because the addressing modes are non-orthogonal. */
2589
2590 static struct
2591 {
2592 char code;
2593 char const *pattern;
2594 char const *format;
2595 }
2596 const conversions[] = {
2597 { 0, "r", "0" },
2598
2599 { 0, "mr", "z[1]" },
2600 { 0, "m+ri", "3[2]" },
2601 { 0, "m+rs", "3[2]" },
2602 { 0, "m+^Zrs", "5[4]" },
2603 { 0, "m+^Zri", "5[4]" },
2604 { 0, "m+^Z+ris", "7+6[5]" },
2605 { 0, "m+^Srs", "5[4]" },
2606 { 0, "m+^Sri", "5[4]" },
2607 { 0, "m+^S+ris", "7+6[5]" },
2608 { 0, "m+r+si", "4+5[2]" },
2609 { 0, "ms", "1" },
2610 { 0, "mi", "1" },
2611 { 0, "m+si", "2+3" },
2612
2613 { 0, "mmr", "[z[2]]" },
2614 { 0, "mm+ri", "[4[3]]" },
2615 { 0, "mm+rs", "[4[3]]" },
2616 { 0, "mm+r+si", "[5+6[3]]" },
2617 { 0, "mms", "[[2]]" },
2618 { 0, "mmi", "[[2]]" },
2619 { 0, "mm+si", "[4[3]]" },
2620
2621 { 0, "i", "#0" },
2622 { 0, "s", "#0" },
2623 { 0, "+si", "#1+2" },
2624 { 0, "l", "#0" },
2625
2626 { 'l', "l", "0" },
2627 { 'd', "i", "0" },
2628 { 'd', "s", "0" },
2629 { 'd', "+si", "1+2" },
2630 { 'D', "i", "0" },
2631 { 'D', "s", "0" },
2632 { 'D', "+si", "1+2" },
2633 { 'x', "i", "#0" },
2634 { 'X', "i", "#0" },
2635 { 'm', "i", "#0" },
2636 { 'b', "i", "#0" },
2637 { 'B', "i", "0" },
2638 { 'p', "i", "0" },
2639
2640 { 0, 0, 0 }
2641 };
2642
2643 /* This is in order according to the bitfield that pushm/popm use. */
2644 static char const *pushm_regs[] = {
2645 "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2646 };
2647
2648 /* Implements PRINT_OPERAND. */
2649 void
2650 m32c_print_operand (FILE * file, rtx x, int code)
2651 {
2652 int i, j, b;
2653 const char *comma;
2654 HOST_WIDE_INT ival;
2655 int unsigned_const = 0;
2656 int force_sign;
2657
2658 /* Multiplies; constants are converted to sign-extended format but
2659 we need unsigned, so 'u' and 'U' tell us what size unsigned we
2660 need. */
2661 if (code == 'u')
2662 {
2663 unsigned_const = 2;
2664 code = 0;
2665 }
2666 if (code == 'U')
2667 {
2668 unsigned_const = 1;
2669 code = 0;
2670 }
2671 /* This one is only for debugging; you can put it in a pattern to
2672 force this error. */
2673 if (code == '!')
2674 {
2675 fprintf (stderr, "dj: unreviewed pattern:");
2676 if (current_output_insn)
2677 debug_rtx (current_output_insn);
2678 gcc_unreachable ();
2679 }
2680 /* PSImode operations are either .w or .l depending on the target. */
2681 if (code == '&')
2682 {
2683 if (TARGET_A16)
2684 fprintf (file, "w");
2685 else
2686 fprintf (file, "l");
2687 return;
2688 }
2689 /* Inverted conditionals. */
2690 if (code == 'C')
2691 {
2692 switch (GET_CODE (x))
2693 {
2694 case LE:
2695 fputs ("gt", file);
2696 break;
2697 case LEU:
2698 fputs ("gtu", file);
2699 break;
2700 case LT:
2701 fputs ("ge", file);
2702 break;
2703 case LTU:
2704 fputs ("geu", file);
2705 break;
2706 case GT:
2707 fputs ("le", file);
2708 break;
2709 case GTU:
2710 fputs ("leu", file);
2711 break;
2712 case GE:
2713 fputs ("lt", file);
2714 break;
2715 case GEU:
2716 fputs ("ltu", file);
2717 break;
2718 case NE:
2719 fputs ("eq", file);
2720 break;
2721 case EQ:
2722 fputs ("ne", file);
2723 break;
2724 default:
2725 gcc_unreachable ();
2726 }
2727 return;
2728 }
2729 /* Regular conditionals. */
2730 if (code == 'c')
2731 {
2732 switch (GET_CODE (x))
2733 {
2734 case LE:
2735 fputs ("le", file);
2736 break;
2737 case LEU:
2738 fputs ("leu", file);
2739 break;
2740 case LT:
2741 fputs ("lt", file);
2742 break;
2743 case LTU:
2744 fputs ("ltu", file);
2745 break;
2746 case GT:
2747 fputs ("gt", file);
2748 break;
2749 case GTU:
2750 fputs ("gtu", file);
2751 break;
2752 case GE:
2753 fputs ("ge", file);
2754 break;
2755 case GEU:
2756 fputs ("geu", file);
2757 break;
2758 case NE:
2759 fputs ("ne", file);
2760 break;
2761 case EQ:
2762 fputs ("eq", file);
2763 break;
2764 default:
2765 gcc_unreachable ();
2766 }
2767 return;
2768 }
2769 /* Used in negsi2 to do HImode ops on the two parts of an SImode
2770 operand. */
2771 if (code == 'h' && GET_MODE (x) == SImode)
2772 {
2773 x = m32c_subreg (HImode, x, SImode, 0);
2774 code = 0;
2775 }
2776 if (code == 'H' && GET_MODE (x) == SImode)
2777 {
2778 x = m32c_subreg (HImode, x, SImode, 2);
2779 code = 0;
2780 }
2781 if (code == 'h' && GET_MODE (x) == HImode)
2782 {
2783 x = m32c_subreg (QImode, x, HImode, 0);
2784 code = 0;
2785 }
2786 if (code == 'H' && GET_MODE (x) == HImode)
2787 {
2788 /* We can't actually represent this as an rtx. Do it here. */
2789 if (GET_CODE (x) == REG)
2790 {
2791 switch (REGNO (x))
2792 {
2793 case R0_REGNO:
2794 fputs ("r0h", file);
2795 return;
2796 case R1_REGNO:
2797 fputs ("r1h", file);
2798 return;
2799 default:
2800 gcc_unreachable();
2801 }
2802 }
2803 /* This should be a MEM. */
2804 x = m32c_subreg (QImode, x, HImode, 1);
2805 code = 0;
2806 }
2807 /* This is for BMcond, which always wants word register names. */
2808 if (code == 'h' && GET_MODE (x) == QImode)
2809 {
2810 if (GET_CODE (x) == REG)
2811 x = gen_rtx_REG (HImode, REGNO (x));
2812 code = 0;
2813 }
2814 /* 'x' and 'X' need to be ignored for non-immediates. */
2815 if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2816 code = 0;
2817
2818 encode_pattern (x);
2819 force_sign = 0;
2820 for (i = 0; conversions[i].pattern; i++)
2821 if (conversions[i].code == code
2822 && streq (conversions[i].pattern, pattern))
2823 {
2824 for (j = 0; conversions[i].format[j]; j++)
2825 /* backslash quotes the next character in the output pattern. */
2826 if (conversions[i].format[j] == '\\')
2827 {
2828 fputc (conversions[i].format[j + 1], file);
2829 j++;
2830 }
2831 /* Digits in the output pattern indicate that the
2832 corresponding RTX is to be output at that point. */
2833 else if (ISDIGIT (conversions[i].format[j]))
2834 {
2835 rtx r = patternr[conversions[i].format[j] - '0'];
2836 switch (GET_CODE (r))
2837 {
2838 case REG:
2839 fprintf (file, "%s",
2840 reg_name_with_mode (REGNO (r), GET_MODE (r)));
2841 break;
2842 case CONST_INT:
2843 switch (code)
2844 {
2845 case 'b':
2846 case 'B':
2847 {
2848 int v = INTVAL (r);
2849 int i = (int) exact_log2 (v);
2850 if (i == -1)
2851 i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2852 if (i == -1)
2853 i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2854 /* Bit position. */
2855 fprintf (file, "%d", i);
2856 }
2857 break;
2858 case 'x':
2859 /* Unsigned byte. */
2860 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2861 INTVAL (r) & 0xff);
2862 break;
2863 case 'X':
2864 /* Unsigned word. */
2865 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2866 INTVAL (r) & 0xffff);
2867 break;
2868 case 'p':
2869 /* pushm and popm encode a register set into a single byte. */
2870 comma = "";
2871 for (b = 7; b >= 0; b--)
2872 if (INTVAL (r) & (1 << b))
2873 {
2874 fprintf (file, "%s%s", comma, pushm_regs[b]);
2875 comma = ",";
2876 }
2877 break;
2878 case 'm':
2879 /* "Minus". Output -X */
2880 ival = (-INTVAL (r) & 0xffff);
2881 if (ival & 0x8000)
2882 ival = ival - 0x10000;
2883 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2884 break;
2885 default:
2886 ival = INTVAL (r);
2887 if (conversions[i].format[j + 1] == '[' && ival < 0)
2888 {
2889 /* We can simulate negative displacements by
2890 taking advantage of address space
2891 wrapping when the offset can span the
2892 entire address range. */
2893 rtx base =
2894 patternr[conversions[i].format[j + 2] - '0'];
2895 if (GET_CODE (base) == REG)
2896 switch (REGNO (base))
2897 {
2898 case A0_REGNO:
2899 case A1_REGNO:
2900 if (TARGET_A24)
2901 ival = 0x1000000 + ival;
2902 else
2903 ival = 0x10000 + ival;
2904 break;
2905 case SB_REGNO:
2906 if (TARGET_A16)
2907 ival = 0x10000 + ival;
2908 break;
2909 }
2910 }
2911 else if (code == 'd' && ival < 0 && j == 0)
2912 /* The "mova" opcode is used to do addition by
2913 computing displacements, but again, we need
2914 displacements to be unsigned *if* they're
2915 the only component of the displacement
2916 (i.e. no "symbol-4" type displacement). */
2917 ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2918
2919 if (conversions[i].format[j] == '0')
2920 {
2921 /* More conversions to unsigned. */
2922 if (unsigned_const == 2)
2923 ival &= 0xffff;
2924 if (unsigned_const == 1)
2925 ival &= 0xff;
2926 }
2927 if (streq (conversions[i].pattern, "mi")
2928 || streq (conversions[i].pattern, "mmi"))
2929 {
2930 /* Integers used as addresses are unsigned. */
2931 ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2932 }
2933 if (force_sign && ival >= 0)
2934 fputc ('+', file);
2935 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2936 break;
2937 }
2938 break;
2939 case CONST_DOUBLE:
2940 /* We don't have const_double constants. If it
2941 happens, make it obvious. */
2942 fprintf (file, "[const_double 0x%lx]",
2943 (unsigned long) CONST_DOUBLE_HIGH (r));
2944 break;
2945 case SYMBOL_REF:
2946 assemble_name (file, XSTR (r, 0));
2947 break;
2948 case LABEL_REF:
2949 output_asm_label (r);
2950 break;
2951 default:
2952 fprintf (stderr, "don't know how to print this operand:");
2953 debug_rtx (r);
2954 gcc_unreachable ();
2955 }
2956 }
2957 else
2958 {
2959 if (conversions[i].format[j] == 'z')
2960 {
2961 /* Some addressing modes *must* have a displacement,
2962 so insert a zero here if needed. */
2963 int k;
2964 for (k = j + 1; conversions[i].format[k]; k++)
2965 if (ISDIGIT (conversions[i].format[k]))
2966 {
2967 rtx reg = patternr[conversions[i].format[k] - '0'];
2968 if (GET_CODE (reg) == REG
2969 && (REGNO (reg) == SB_REGNO
2970 || REGNO (reg) == FB_REGNO
2971 || REGNO (reg) == SP_REGNO))
2972 fputc ('0', file);
2973 }
2974 continue;
2975 }
2976 /* Signed displacements off symbols need to have signs
2977 blended cleanly. */
2978 if (conversions[i].format[j] == '+'
2979 && (!code || code == 'D' || code == 'd')
2980 && ISDIGIT (conversions[i].format[j + 1])
2981 && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2982 == CONST_INT))
2983 {
2984 force_sign = 1;
2985 continue;
2986 }
2987 fputc (conversions[i].format[j], file);
2988 }
2989 break;
2990 }
2991 if (!conversions[i].pattern)
2992 {
2993 fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
2994 pattern);
2995 debug_rtx (x);
2996 fprintf (file, "[%c.%s]", code ? code : '-', pattern);
2997 }
2998
2999 return;
3000 }
3001
3002 /* Implements PRINT_OPERAND_PUNCT_VALID_P. See m32c_print_operand
3003 above for descriptions of what these do. */
3004 int
3005 m32c_print_operand_punct_valid_p (int c)
3006 {
3007 if (c == '&' || c == '!')
3008 return 1;
3009 return 0;
3010 }
3011
3012 /* Implements PRINT_OPERAND_ADDRESS. Nothing unusual here. */
3013 void
3014 m32c_print_operand_address (FILE * stream, rtx address)
3015 {
3016 if (GET_CODE (address) == MEM)
3017 address = XEXP (address, 0);
3018 else
3019 /* cf: gcc.dg/asm-4.c. */
3020 gcc_assert (GET_CODE (address) == REG);
3021
3022 m32c_print_operand (stream, address, 0);
3023 }
3024
3025 /* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
3026 differently than general registers. */
3027 void
3028 m32c_output_reg_push (FILE * s, int regno)
3029 {
3030 if (regno == FLG_REGNO)
3031 fprintf (s, "\tpushc\tflg\n");
3032 else
3033 fprintf (s, "\tpush.%c\t%s\n",
3034 " bwll"[reg_push_size (regno)], reg_names[regno]);
3035 }
3036
3037 /* Likewise for ASM_OUTPUT_REG_POP. */
3038 void
3039 m32c_output_reg_pop (FILE * s, int regno)
3040 {
3041 if (regno == FLG_REGNO)
3042 fprintf (s, "\tpopc\tflg\n");
3043 else
3044 fprintf (s, "\tpop.%c\t%s\n",
3045 " bwll"[reg_push_size (regno)], reg_names[regno]);
3046 }
3047
3048 /* Defining target-specific uses of `__attribute__' */
3049
3050 /* Used to simplify the logic below. Find the attributes wherever
3051 they may be. */
3052 #define M32C_ATTRIBUTES(decl) \
3053 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
3054 : DECL_ATTRIBUTES (decl) \
3055 ? (DECL_ATTRIBUTES (decl)) \
3056 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
3057
3058 /* Returns TRUE if the given tree has the "interrupt" attribute. */
3059 static int
3060 interrupt_p (tree node ATTRIBUTE_UNUSED)
3061 {
3062 tree list = M32C_ATTRIBUTES (node);
3063 while (list)
3064 {
3065 if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
3066 return 1;
3067 list = TREE_CHAIN (list);
3068 }
3069 return fast_interrupt_p (node);
3070 }
3071
3072 /* Returns TRUE if the given tree has the "bank_switch" attribute. */
3073 static int
3074 bank_switch_p (tree node ATTRIBUTE_UNUSED)
3075 {
3076 tree list = M32C_ATTRIBUTES (node);
3077 while (list)
3078 {
3079 if (is_attribute_p ("bank_switch", TREE_PURPOSE (list)))
3080 return 1;
3081 list = TREE_CHAIN (list);
3082 }
3083 return 0;
3084 }
3085
3086 /* Returns TRUE if the given tree has the "fast_interrupt" attribute. */
3087 static int
3088 fast_interrupt_p (tree node ATTRIBUTE_UNUSED)
3089 {
3090 tree list = M32C_ATTRIBUTES (node);
3091 while (list)
3092 {
3093 if (is_attribute_p ("fast_interrupt", TREE_PURPOSE (list)))
3094 return 1;
3095 list = TREE_CHAIN (list);
3096 }
3097 return 0;
3098 }
3099
3100 static tree
3101 interrupt_handler (tree * node ATTRIBUTE_UNUSED,
3102 tree name ATTRIBUTE_UNUSED,
3103 tree args ATTRIBUTE_UNUSED,
3104 int flags ATTRIBUTE_UNUSED,
3105 bool * no_add_attrs ATTRIBUTE_UNUSED)
3106 {
3107 return NULL_TREE;
3108 }
3109
3110 /* Returns TRUE if given tree has the "function_vector" attribute. */
3111 int
3112 m32c_special_page_vector_p (tree func)
3113 {
3114 tree list;
3115
3116 if (TREE_CODE (func) != FUNCTION_DECL)
3117 return 0;
3118
3119 list = M32C_ATTRIBUTES (func);
3120 while (list)
3121 {
3122 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
3123 return 1;
3124 list = TREE_CHAIN (list);
3125 }
3126 return 0;
3127 }
3128
3129 static tree
3130 function_vector_handler (tree * node ATTRIBUTE_UNUSED,
3131 tree name ATTRIBUTE_UNUSED,
3132 tree args ATTRIBUTE_UNUSED,
3133 int flags ATTRIBUTE_UNUSED,
3134 bool * no_add_attrs ATTRIBUTE_UNUSED)
3135 {
3136 if (TARGET_R8C)
3137 {
3138 /* The attribute is not supported for R8C target. */
3139 warning (OPT_Wattributes,
3140 "%qE attribute is not supported for R8C target",
3141 name);
3142 *no_add_attrs = true;
3143 }
3144 else if (TREE_CODE (*node) != FUNCTION_DECL)
3145 {
3146 /* The attribute must be applied to functions only. */
3147 warning (OPT_Wattributes,
3148 "%qE attribute applies only to functions",
3149 name);
3150 *no_add_attrs = true;
3151 }
3152 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
3153 {
3154 /* The argument must be a constant integer. */
3155 warning (OPT_Wattributes,
3156 "%qE attribute argument not an integer constant",
3157 name);
3158 *no_add_attrs = true;
3159 }
3160 else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
3161 || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
3162 {
3163 /* The argument value must be between 18 to 255. */
3164 warning (OPT_Wattributes,
3165 "%qE attribute argument should be between 18 to 255",
3166 name);
3167 *no_add_attrs = true;
3168 }
3169 return NULL_TREE;
3170 }
3171
3172 /* If the function is assigned the attribute 'function_vector', it
3173 returns the function vector number, otherwise returns zero. */
3174 int
3175 current_function_special_page_vector (rtx x)
3176 {
3177 int num;
3178
3179 if ((GET_CODE(x) == SYMBOL_REF)
3180 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
3181 {
3182 tree list;
3183 tree t = SYMBOL_REF_DECL (x);
3184
3185 if (TREE_CODE (t) != FUNCTION_DECL)
3186 return 0;
3187
3188 list = M32C_ATTRIBUTES (t);
3189 while (list)
3190 {
3191 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
3192 {
3193 num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
3194 return num;
3195 }
3196
3197 list = TREE_CHAIN (list);
3198 }
3199
3200 return 0;
3201 }
3202 else
3203 return 0;
3204 }
3205
3206 #undef TARGET_ATTRIBUTE_TABLE
3207 #define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
3208 static const struct attribute_spec m32c_attribute_table[] = {
3209 {"interrupt", 0, 0, false, false, false, interrupt_handler, false},
3210 {"bank_switch", 0, 0, false, false, false, interrupt_handler, false},
3211 {"fast_interrupt", 0, 0, false, false, false, interrupt_handler, false},
3212 {"function_vector", 1, 1, true, false, false, function_vector_handler,
3213 false},
3214 {0, 0, 0, 0, 0, 0, 0, false}
3215 };
3216
3217 #undef TARGET_COMP_TYPE_ATTRIBUTES
3218 #define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
3219 static int
3220 m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED,
3221 const_tree type2 ATTRIBUTE_UNUSED)
3222 {
3223 /* 0=incompatible 1=compatible 2=warning */
3224 return 1;
3225 }
3226
3227 #undef TARGET_INSERT_ATTRIBUTES
3228 #define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
3229 static void
3230 m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
3231 tree * attr_ptr ATTRIBUTE_UNUSED)
3232 {
3233 unsigned addr;
3234 /* See if we need to make #pragma address variables volatile. */
3235
3236 if (TREE_CODE (node) == VAR_DECL)
3237 {
3238 const char *name = IDENTIFIER_POINTER (DECL_NAME (node));
3239 if (m32c_get_pragma_address (name, &addr))
3240 {
3241 TREE_THIS_VOLATILE (node) = true;
3242 }
3243 }
3244 }
3245
3246
3247 struct GTY(()) pragma_entry {
3248 const char *varname;
3249 unsigned address;
3250 };
3251 typedef struct pragma_entry pragma_entry;
3252
3253 /* Hash table of pragma info. */
3254 static GTY((param_is (pragma_entry))) htab_t pragma_htab;
3255
3256 static int
3257 pragma_entry_eq (const void *p1, const void *p2)
3258 {
3259 const pragma_entry *old = (const pragma_entry *) p1;
3260 const char *new_name = (const char *) p2;
3261
3262 return strcmp (old->varname, new_name) == 0;
3263 }
3264
3265 static hashval_t
3266 pragma_entry_hash (const void *p)
3267 {
3268 const pragma_entry *old = (const pragma_entry *) p;
3269 return htab_hash_string (old->varname);
3270 }
3271
3272 void
3273 m32c_note_pragma_address (const char *varname, unsigned address)
3274 {
3275 pragma_entry **slot;
3276
3277 if (!pragma_htab)
3278 pragma_htab = htab_create_ggc (31, pragma_entry_hash,
3279 pragma_entry_eq, NULL);
3280
3281 slot = (pragma_entry **)
3282 htab_find_slot_with_hash (pragma_htab, varname,
3283 htab_hash_string (varname), INSERT);
3284
3285 if (!*slot)
3286 {
3287 *slot = ggc_alloc_pragma_entry ();
3288 (*slot)->varname = ggc_strdup (varname);
3289 }
3290 (*slot)->address = address;
3291 }
3292
3293 static bool
3294 m32c_get_pragma_address (const char *varname, unsigned *address)
3295 {
3296 pragma_entry **slot;
3297
3298 if (!pragma_htab)
3299 return false;
3300
3301 slot = (pragma_entry **)
3302 htab_find_slot_with_hash (pragma_htab, varname,
3303 htab_hash_string (varname), NO_INSERT);
3304 if (slot && *slot)
3305 {
3306 *address = (*slot)->address;
3307 return true;
3308 }
3309 return false;
3310 }
3311
3312 void
3313 m32c_output_aligned_common (FILE *stream, tree decl ATTRIBUTE_UNUSED,
3314 const char *name,
3315 int size, int align, int global)
3316 {
3317 unsigned address;
3318
3319 if (m32c_get_pragma_address (name, &address))
3320 {
3321 /* We never output these as global. */
3322 assemble_name (stream, name);
3323 fprintf (stream, " = 0x%04x\n", address);
3324 return;
3325 }
3326 if (!global)
3327 {
3328 fprintf (stream, "\t.local\t");
3329 assemble_name (stream, name);
3330 fprintf (stream, "\n");
3331 }
3332 fprintf (stream, "\t.comm\t");
3333 assemble_name (stream, name);
3334 fprintf (stream, ",%u,%u\n", size, align / BITS_PER_UNIT);
3335 }
3336
3337 /* Predicates */
3338
3339 /* This is a list of legal subregs of hard regs. */
3340 static const struct {
3341 unsigned char outer_mode_size;
3342 unsigned char inner_mode_size;
3343 unsigned char byte_mask;
3344 unsigned char legal_when;
3345 unsigned int regno;
3346 } legal_subregs[] = {
3347 {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
3348 {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
3349 {1, 2, 0x01, 1, A0_REGNO},
3350 {1, 2, 0x01, 1, A1_REGNO},
3351
3352 {1, 4, 0x01, 1, A0_REGNO},
3353 {1, 4, 0x01, 1, A1_REGNO},
3354
3355 {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
3356 {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
3357 {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
3358 {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
3359 {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
3360
3361 {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
3362 };
3363
3364 /* Returns TRUE if OP is a subreg of a hard reg which we don't
3365 support. We also bail on MEMs with illegal addresses. */
3366 bool
3367 m32c_illegal_subreg_p (rtx op)
3368 {
3369 int offset;
3370 unsigned int i;
3371 int src_mode, dest_mode;
3372
3373 if (GET_CODE (op) == MEM
3374 && ! m32c_legitimate_address_p (Pmode, XEXP (op, 0), false))
3375 {
3376 return true;
3377 }
3378
3379 if (GET_CODE (op) != SUBREG)
3380 return false;
3381
3382 dest_mode = GET_MODE (op);
3383 offset = SUBREG_BYTE (op);
3384 op = SUBREG_REG (op);
3385 src_mode = GET_MODE (op);
3386
3387 if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
3388 return false;
3389 if (GET_CODE (op) != REG)
3390 return false;
3391 if (REGNO (op) >= MEM0_REGNO)
3392 return false;
3393
3394 offset = (1 << offset);
3395
3396 for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
3397 if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
3398 && legal_subregs[i].regno == REGNO (op)
3399 && legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
3400 && legal_subregs[i].byte_mask & offset)
3401 {
3402 switch (legal_subregs[i].legal_when)
3403 {
3404 case 1:
3405 return false;
3406 case 16:
3407 if (TARGET_A16)
3408 return false;
3409 break;
3410 case 24:
3411 if (TARGET_A24)
3412 return false;
3413 break;
3414 }
3415 }
3416 return true;
3417 }
3418
3419 /* Returns TRUE if we support a move between the first two operands.
3420 At the moment, we just want to discourage mem to mem moves until
3421 after reload, because reload has a hard time with our limited
3422 number of address registers, and we can get into a situation where
3423 we need three of them when we only have two. */
3424 bool
3425 m32c_mov_ok (rtx * operands, enum machine_mode mode ATTRIBUTE_UNUSED)
3426 {
3427 rtx op0 = operands[0];
3428 rtx op1 = operands[1];
3429
3430 if (TARGET_A24)
3431 return true;
3432
3433 #define DEBUG_MOV_OK 0
3434 #if DEBUG_MOV_OK
3435 fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
3436 debug_rtx (op0);
3437 debug_rtx (op1);
3438 #endif
3439
3440 if (GET_CODE (op0) == SUBREG)
3441 op0 = XEXP (op0, 0);
3442 if (GET_CODE (op1) == SUBREG)
3443 op1 = XEXP (op1, 0);
3444
3445 if (GET_CODE (op0) == MEM
3446 && GET_CODE (op1) == MEM
3447 && ! reload_completed)
3448 {
3449 #if DEBUG_MOV_OK
3450 fprintf (stderr, " - no, mem to mem\n");
3451 #endif
3452 return false;
3453 }
3454
3455 #if DEBUG_MOV_OK
3456 fprintf (stderr, " - ok\n");
3457 #endif
3458 return true;
3459 }
3460
3461 /* Returns TRUE if two consecutive HImode mov instructions, generated
3462 for moving an immediate double data to a double data type variable
3463 location, can be combined into single SImode mov instruction. */
3464 bool
3465 m32c_immd_dbl_mov (rtx * operands,
3466 enum machine_mode mode ATTRIBUTE_UNUSED)
3467 {
3468 int flag = 0, okflag = 0, offset1 = 0, offset2 = 0, offsetsign = 0;
3469 const char *str1;
3470 const char *str2;
3471
3472 if (GET_CODE (XEXP (operands[0], 0)) == SYMBOL_REF
3473 && MEM_SCALAR_P (operands[0])
3474 && !MEM_IN_STRUCT_P (operands[0])
3475 && GET_CODE (XEXP (operands[2], 0)) == CONST
3476 && GET_CODE (XEXP (XEXP (operands[2], 0), 0)) == PLUS
3477 && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 0)) == SYMBOL_REF
3478 && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 1)) == CONST_INT
3479 && MEM_SCALAR_P (operands[2])
3480 && !MEM_IN_STRUCT_P (operands[2]))
3481 flag = 1;
3482
3483 else if (GET_CODE (XEXP (operands[0], 0)) == CONST
3484 && GET_CODE (XEXP (XEXP (operands[0], 0), 0)) == PLUS
3485 && GET_CODE (XEXP (XEXP (XEXP (operands[0], 0), 0), 0)) == SYMBOL_REF
3486 && MEM_SCALAR_P (operands[0])
3487 && !MEM_IN_STRUCT_P (operands[0])
3488 && !(INTVAL (XEXP (XEXP (XEXP (operands[0], 0), 0), 1)) %4)
3489 && GET_CODE (XEXP (operands[2], 0)) == CONST
3490 && GET_CODE (XEXP (XEXP (operands[2], 0), 0)) == PLUS
3491 && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 0)) == SYMBOL_REF
3492 && MEM_SCALAR_P (operands[2])
3493 && !MEM_IN_STRUCT_P (operands[2]))
3494 flag = 2;
3495
3496 else if (GET_CODE (XEXP (operands[0], 0)) == PLUS
3497 && GET_CODE (XEXP (XEXP (operands[0], 0), 0)) == REG
3498 && REGNO (XEXP (XEXP (operands[0], 0), 0)) == FB_REGNO
3499 && GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == CONST_INT
3500 && MEM_SCALAR_P (operands[0])
3501 && !MEM_IN_STRUCT_P (operands[0])
3502 && !(INTVAL (XEXP (XEXP (operands[0], 0), 1)) %4)
3503 && REGNO (XEXP (XEXP (operands[2], 0), 0)) == FB_REGNO
3504 && GET_CODE (XEXP (XEXP (operands[2], 0), 1)) == CONST_INT
3505 && MEM_SCALAR_P (operands[2])
3506 && !MEM_IN_STRUCT_P (operands[2]))
3507 flag = 3;
3508
3509 else
3510 return false;
3511
3512 switch (flag)
3513 {
3514 case 1:
3515 str1 = XSTR (XEXP (operands[0], 0), 0);
3516 str2 = XSTR (XEXP (XEXP (XEXP (operands[2], 0), 0), 0), 0);
3517 if (strcmp (str1, str2) == 0)
3518 okflag = 1;
3519 else
3520 okflag = 0;
3521 break;
3522 case 2:
3523 str1 = XSTR (XEXP (XEXP (XEXP (operands[0], 0), 0), 0), 0);
3524 str2 = XSTR (XEXP (XEXP (XEXP (operands[2], 0), 0), 0), 0);
3525 if (strcmp(str1,str2) == 0)
3526 okflag = 1;
3527 else
3528 okflag = 0;
3529 break;
3530 case 3:
3531 offset1 = INTVAL (XEXP (XEXP (operands[0], 0), 1));
3532 offset2 = INTVAL (XEXP (XEXP (operands[2], 0), 1));
3533 offsetsign = offset1 >> ((sizeof (offset1) * 8) -1);
3534 if (((offset2-offset1) == 2) && offsetsign != 0)
3535 okflag = 1;
3536 else
3537 okflag = 0;
3538 break;
3539 default:
3540 okflag = 0;
3541 }
3542
3543 if (okflag == 1)
3544 {
3545 HOST_WIDE_INT val;
3546 operands[4] = gen_rtx_MEM (SImode, XEXP (operands[0], 0));
3547
3548 val = (INTVAL (operands[3]) << 16) + (INTVAL (operands[1]) & 0xFFFF);
3549 operands[5] = gen_rtx_CONST_INT (VOIDmode, val);
3550
3551 return true;
3552 }
3553
3554 return false;
3555 }
3556
3557 /* Expanders */
3558
3559 /* Subregs are non-orthogonal for us, because our registers are all
3560 different sizes. */
3561 static rtx
3562 m32c_subreg (enum machine_mode outer,
3563 rtx x, enum machine_mode inner, int byte)
3564 {
3565 int r, nr = -1;
3566
3567 /* Converting MEMs to different types that are the same size, we
3568 just rewrite them. */
3569 if (GET_CODE (x) == SUBREG
3570 && SUBREG_BYTE (x) == 0
3571 && GET_CODE (SUBREG_REG (x)) == MEM
3572 && (GET_MODE_SIZE (GET_MODE (x))
3573 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
3574 {
3575 rtx oldx = x;
3576 x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
3577 MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
3578 }
3579
3580 /* Push/pop get done as smaller push/pops. */
3581 if (GET_CODE (x) == MEM
3582 && (GET_CODE (XEXP (x, 0)) == PRE_DEC
3583 || GET_CODE (XEXP (x, 0)) == POST_INC))
3584 return gen_rtx_MEM (outer, XEXP (x, 0));
3585 if (GET_CODE (x) == SUBREG
3586 && GET_CODE (XEXP (x, 0)) == MEM
3587 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
3588 || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
3589 return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
3590
3591 if (GET_CODE (x) != REG)
3592 {
3593 rtx r = simplify_gen_subreg (outer, x, inner, byte);
3594 if (GET_CODE (r) == SUBREG
3595 && GET_CODE (x) == MEM
3596 && MEM_VOLATILE_P (x))
3597 {
3598 /* Volatile MEMs don't get simplified, but we need them to
3599 be. We are little endian, so the subreg byte is the
3600 offset. */
3601 r = adjust_address_nv (x, outer, byte);
3602 }
3603 return r;
3604 }
3605
3606 r = REGNO (x);
3607 if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
3608 return simplify_gen_subreg (outer, x, inner, byte);
3609
3610 if (IS_MEM_REGNO (r))
3611 return simplify_gen_subreg (outer, x, inner, byte);
3612
3613 /* This is where the complexities of our register layout are
3614 described. */
3615 if (byte == 0)
3616 nr = r;
3617 else if (outer == HImode)
3618 {
3619 if (r == R0_REGNO && byte == 2)
3620 nr = R2_REGNO;
3621 else if (r == R0_REGNO && byte == 4)
3622 nr = R1_REGNO;
3623 else if (r == R0_REGNO && byte == 6)
3624 nr = R3_REGNO;
3625 else if (r == R1_REGNO && byte == 2)
3626 nr = R3_REGNO;
3627 else if (r == A0_REGNO && byte == 2)
3628 nr = A1_REGNO;
3629 }
3630 else if (outer == SImode)
3631 {
3632 if (r == R0_REGNO && byte == 0)
3633 nr = R0_REGNO;
3634 else if (r == R0_REGNO && byte == 4)
3635 nr = R1_REGNO;
3636 }
3637 if (nr == -1)
3638 {
3639 fprintf (stderr, "m32c_subreg %s %s %d\n",
3640 mode_name[outer], mode_name[inner], byte);
3641 debug_rtx (x);
3642 gcc_unreachable ();
3643 }
3644 return gen_rtx_REG (outer, nr);
3645 }
3646
3647 /* Used to emit move instructions. We split some moves,
3648 and avoid mem-mem moves. */
3649 int
3650 m32c_prepare_move (rtx * operands, enum machine_mode mode)
3651 {
3652 if (far_addr_space_p (operands[0])
3653 && CONSTANT_P (operands[1]))
3654 {
3655 operands[1] = force_reg (GET_MODE (operands[0]), operands[1]);
3656 }
3657 if (TARGET_A16 && mode == PSImode)
3658 return m32c_split_move (operands, mode, 1);
3659 if ((GET_CODE (operands[0]) == MEM)
3660 && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
3661 {
3662 rtx pmv = XEXP (operands[0], 0);
3663 rtx dest_reg = XEXP (pmv, 0);
3664 rtx dest_mod = XEXP (pmv, 1);
3665
3666 emit_insn (gen_rtx_SET (Pmode, dest_reg, dest_mod));
3667 operands[0] = gen_rtx_MEM (mode, dest_reg);
3668 }
3669 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3670 operands[1] = copy_to_mode_reg (mode, operands[1]);
3671 return 0;
3672 }
3673
3674 #define DEBUG_SPLIT 0
3675
3676 /* Returns TRUE if the given PSImode move should be split. We split
3677 for all r8c/m16c moves, since it doesn't support them, and for
3678 POP.L as we can only *push* SImode. */
3679 int
3680 m32c_split_psi_p (rtx * operands)
3681 {
3682 #if DEBUG_SPLIT
3683 fprintf (stderr, "\nm32c_split_psi_p\n");
3684 debug_rtx (operands[0]);
3685 debug_rtx (operands[1]);
3686 #endif
3687 if (TARGET_A16)
3688 {
3689 #if DEBUG_SPLIT
3690 fprintf (stderr, "yes, A16\n");
3691 #endif
3692 return 1;
3693 }
3694 if (GET_CODE (operands[1]) == MEM
3695 && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3696 {
3697 #if DEBUG_SPLIT
3698 fprintf (stderr, "yes, pop.l\n");
3699 #endif
3700 return 1;
3701 }
3702 #if DEBUG_SPLIT
3703 fprintf (stderr, "no, default\n");
3704 #endif
3705 return 0;
3706 }
3707
3708 /* Split the given move. SPLIT_ALL is 0 if splitting is optional
3709 (define_expand), 1 if it is not optional (define_insn_and_split),
3710 and 3 for define_split (alternate api). */
3711 int
3712 m32c_split_move (rtx * operands, enum machine_mode mode, int split_all)
3713 {
3714 rtx s[4], d[4];
3715 int parts, si, di, rev = 0;
3716 int rv = 0, opi = 2;
3717 enum machine_mode submode = HImode;
3718 rtx *ops, local_ops[10];
3719
3720 /* define_split modifies the existing operands, but the other two
3721 emit new insns. OPS is where we store the operand pairs, which
3722 we emit later. */
3723 if (split_all == 3)
3724 ops = operands;
3725 else
3726 ops = local_ops;
3727
3728 /* Else HImode. */
3729 if (mode == DImode)
3730 submode = SImode;
3731
3732 /* Before splitting mem-mem moves, force one operand into a
3733 register. */
3734 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3735 {
3736 #if DEBUG0
3737 fprintf (stderr, "force_reg...\n");
3738 debug_rtx (operands[1]);
3739 #endif
3740 operands[1] = force_reg (mode, operands[1]);
3741 #if DEBUG0
3742 debug_rtx (operands[1]);
3743 #endif
3744 }
3745
3746 parts = 2;
3747
3748 #if DEBUG_SPLIT
3749 fprintf (stderr, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3750 split_all);
3751 debug_rtx (operands[0]);
3752 debug_rtx (operands[1]);
3753 #endif
3754
3755 /* Note that split_all is not used to select the api after this
3756 point, so it's safe to set it to 3 even with define_insn. */
3757 /* None of the chips can move SI operands to sp-relative addresses,
3758 so we always split those. */
3759 if (m32c_extra_constraint_p (operands[0], 'S', "Ss"))
3760 split_all = 3;
3761
3762 if (TARGET_A16
3763 && (far_addr_space_p (operands[0])
3764 || far_addr_space_p (operands[1])))
3765 split_all |= 1;
3766
3767 /* We don't need to split these. */
3768 if (TARGET_A24
3769 && split_all != 3
3770 && (mode == SImode || mode == PSImode)
3771 && !(GET_CODE (operands[1]) == MEM
3772 && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3773 return 0;
3774
3775 /* First, enumerate the subregs we'll be dealing with. */
3776 for (si = 0; si < parts; si++)
3777 {
3778 d[si] =
3779 m32c_subreg (submode, operands[0], mode,
3780 si * GET_MODE_SIZE (submode));
3781 s[si] =
3782 m32c_subreg (submode, operands[1], mode,
3783 si * GET_MODE_SIZE (submode));
3784 }
3785
3786 /* Split pushes by emitting a sequence of smaller pushes. */
3787 if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3788 {
3789 for (si = parts - 1; si >= 0; si--)
3790 {
3791 ops[opi++] = gen_rtx_MEM (submode,
3792 gen_rtx_PRE_DEC (Pmode,
3793 gen_rtx_REG (Pmode,
3794 SP_REGNO)));
3795 ops[opi++] = s[si];
3796 }
3797
3798 rv = 1;
3799 }
3800 /* Likewise for pops. */
3801 else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3802 {
3803 for (di = 0; di < parts; di++)
3804 {
3805 ops[opi++] = d[di];
3806 ops[opi++] = gen_rtx_MEM (submode,
3807 gen_rtx_POST_INC (Pmode,
3808 gen_rtx_REG (Pmode,
3809 SP_REGNO)));
3810 }
3811 rv = 1;
3812 }
3813 else if (split_all)
3814 {
3815 /* if d[di] == s[si] for any di < si, we'll early clobber. */
3816 for (di = 0; di < parts - 1; di++)
3817 for (si = di + 1; si < parts; si++)
3818 if (reg_mentioned_p (d[di], s[si]))
3819 rev = 1;
3820
3821 if (rev)
3822 for (si = 0; si < parts; si++)
3823 {
3824 ops[opi++] = d[si];
3825 ops[opi++] = s[si];
3826 }
3827 else
3828 for (si = parts - 1; si >= 0; si--)
3829 {
3830 ops[opi++] = d[si];
3831 ops[opi++] = s[si];
3832 }
3833 rv = 1;
3834 }
3835 /* Now emit any moves we may have accumulated. */
3836 if (rv && split_all != 3)
3837 {
3838 int i;
3839 for (i = 2; i < opi; i += 2)
3840 emit_move_insn (ops[i], ops[i + 1]);
3841 }
3842 return rv;
3843 }
3844
3845 /* The m32c has a number of opcodes that act like memcpy, strcmp, and
3846 the like. For the R8C they expect one of the addresses to be in
3847 R1L:An so we need to arrange for that. Otherwise, it's just a
3848 matter of picking out the operands we want and emitting the right
3849 pattern for them. All these expanders, which correspond to
3850 patterns in blkmov.md, must return nonzero if they expand the insn,
3851 or zero if they should FAIL. */
3852
3853 /* This is a memset() opcode. All operands are implied, so we need to
3854 arrange for them to be in the right registers. The opcode wants
3855 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3856 the count (HI), and $2 the value (QI). */
3857 int
3858 m32c_expand_setmemhi(rtx *operands)
3859 {
3860 rtx desta, count, val;
3861 rtx desto, counto;
3862
3863 desta = XEXP (operands[0], 0);
3864 count = operands[1];
3865 val = operands[2];
3866
3867 desto = gen_reg_rtx (Pmode);
3868 counto = gen_reg_rtx (HImode);
3869
3870 if (GET_CODE (desta) != REG
3871 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3872 desta = copy_to_mode_reg (Pmode, desta);
3873
3874 /* This looks like an arbitrary restriction, but this is by far the
3875 most common case. For counts 8..14 this actually results in
3876 smaller code with no speed penalty because the half-sized
3877 constant can be loaded with a shorter opcode. */
3878 if (GET_CODE (count) == CONST_INT
3879 && GET_CODE (val) == CONST_INT
3880 && ! (INTVAL (count) & 1)
3881 && (INTVAL (count) > 1)
3882 && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3883 {
3884 unsigned v = INTVAL (val) & 0xff;
3885 v = v | (v << 8);
3886 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3887 val = copy_to_mode_reg (HImode, GEN_INT (v));
3888 if (TARGET_A16)
3889 emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3890 else
3891 emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3892 return 1;
3893 }
3894
3895 /* This is the generalized memset() case. */
3896 if (GET_CODE (val) != REG
3897 || REGNO (val) < FIRST_PSEUDO_REGISTER)
3898 val = copy_to_mode_reg (QImode, val);
3899
3900 if (GET_CODE (count) != REG
3901 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3902 count = copy_to_mode_reg (HImode, count);
3903
3904 if (TARGET_A16)
3905 emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3906 else
3907 emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3908
3909 return 1;
3910 }
3911
3912 /* This is a memcpy() opcode. All operands are implied, so we need to
3913 arrange for them to be in the right registers. The opcode wants
3914 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3915 is the source (MEM:BLK), and $2 the count (HI). */
3916 int
3917 m32c_expand_movmemhi(rtx *operands)
3918 {
3919 rtx desta, srca, count;
3920 rtx desto, srco, counto;
3921
3922 desta = XEXP (operands[0], 0);
3923 srca = XEXP (operands[1], 0);
3924 count = operands[2];
3925
3926 desto = gen_reg_rtx (Pmode);
3927 srco = gen_reg_rtx (Pmode);
3928 counto = gen_reg_rtx (HImode);
3929
3930 if (GET_CODE (desta) != REG
3931 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3932 desta = copy_to_mode_reg (Pmode, desta);
3933
3934 if (GET_CODE (srca) != REG
3935 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3936 srca = copy_to_mode_reg (Pmode, srca);
3937
3938 /* Similar to setmem, but we don't need to check the value. */
3939 if (GET_CODE (count) == CONST_INT
3940 && ! (INTVAL (count) & 1)
3941 && (INTVAL (count) > 1))
3942 {
3943 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3944 if (TARGET_A16)
3945 emit_insn (gen_movmemhi_whi_op (desto, srco, counto, desta, srca, count));
3946 else
3947 emit_insn (gen_movmemhi_wpsi_op (desto, srco, counto, desta, srca, count));
3948 return 1;
3949 }
3950
3951 /* This is the generalized memset() case. */
3952 if (GET_CODE (count) != REG
3953 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3954 count = copy_to_mode_reg (HImode, count);
3955
3956 if (TARGET_A16)
3957 emit_insn (gen_movmemhi_bhi_op (desto, srco, counto, desta, srca, count));
3958 else
3959 emit_insn (gen_movmemhi_bpsi_op (desto, srco, counto, desta, srca, count));
3960
3961 return 1;
3962 }
3963
3964 /* This is a stpcpy() opcode. $0 is the destination (MEM:BLK) after
3965 the copy, which should point to the NUL at the end of the string,
3966 $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3967 Since our opcode leaves the destination pointing *after* the NUL,
3968 we must emit an adjustment. */
3969 int
3970 m32c_expand_movstr(rtx *operands)
3971 {
3972 rtx desta, srca;
3973 rtx desto, srco;
3974
3975 desta = XEXP (operands[1], 0);
3976 srca = XEXP (operands[2], 0);
3977
3978 desto = gen_reg_rtx (Pmode);
3979 srco = gen_reg_rtx (Pmode);
3980
3981 if (GET_CODE (desta) != REG
3982 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3983 desta = copy_to_mode_reg (Pmode, desta);
3984
3985 if (GET_CODE (srca) != REG
3986 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3987 srca = copy_to_mode_reg (Pmode, srca);
3988
3989 emit_insn (gen_movstr_op (desto, srco, desta, srca));
3990 /* desto ends up being a1, which allows this type of add through MOVA. */
3991 emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
3992
3993 return 1;
3994 }
3995
3996 /* This is a strcmp() opcode. $0 is the destination (HI) which holds
3997 <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3998 $2 is the other (MEM:BLK). We must do the comparison, and then
3999 convert the flags to a signed integer result. */
4000 int
4001 m32c_expand_cmpstr(rtx *operands)
4002 {
4003 rtx src1a, src2a;
4004
4005 src1a = XEXP (operands[1], 0);
4006 src2a = XEXP (operands[2], 0);
4007
4008 if (GET_CODE (src1a) != REG
4009 || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
4010 src1a = copy_to_mode_reg (Pmode, src1a);
4011
4012 if (GET_CODE (src2a) != REG
4013 || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
4014 src2a = copy_to_mode_reg (Pmode, src2a);
4015
4016 emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
4017 emit_insn (gen_cond_to_int (operands[0]));
4018
4019 return 1;
4020 }
4021
4022
4023 typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
4024
4025 static shift_gen_func
4026 shift_gen_func_for (int mode, int code)
4027 {
4028 #define GFF(m,c,f) if (mode == m && code == c) return f
4029 GFF(QImode, ASHIFT, gen_ashlqi3_i);
4030 GFF(QImode, ASHIFTRT, gen_ashrqi3_i);
4031 GFF(QImode, LSHIFTRT, gen_lshrqi3_i);
4032 GFF(HImode, ASHIFT, gen_ashlhi3_i);
4033 GFF(HImode, ASHIFTRT, gen_ashrhi3_i);
4034 GFF(HImode, LSHIFTRT, gen_lshrhi3_i);
4035 GFF(PSImode, ASHIFT, gen_ashlpsi3_i);
4036 GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
4037 GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
4038 GFF(SImode, ASHIFT, TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
4039 GFF(SImode, ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
4040 GFF(SImode, LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
4041 #undef GFF
4042 gcc_unreachable ();
4043 }
4044
4045 /* The m32c only has one shift, but it takes a signed count. GCC
4046 doesn't want this, so we fake it by negating any shift count when
4047 we're pretending to shift the other way. Also, the shift count is
4048 limited to -8..8. It's slightly better to use two shifts for 9..15
4049 than to load the count into r1h, so we do that too. */
4050 int
4051 m32c_prepare_shift (rtx * operands, int scale, int shift_code)
4052 {
4053 enum machine_mode mode = GET_MODE (operands[0]);
4054 shift_gen_func func = shift_gen_func_for (mode, shift_code);
4055 rtx temp;
4056
4057 if (GET_CODE (operands[2]) == CONST_INT)
4058 {
4059 int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
4060 int count = INTVAL (operands[2]) * scale;
4061
4062 while (count > maxc)
4063 {
4064 temp = gen_reg_rtx (mode);
4065 emit_insn (func (temp, operands[1], GEN_INT (maxc)));
4066 operands[1] = temp;
4067 count -= maxc;
4068 }
4069 while (count < -maxc)
4070 {
4071 temp = gen_reg_rtx (mode);
4072 emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
4073 operands[1] = temp;
4074 count += maxc;
4075 }
4076 emit_insn (func (operands[0], operands[1], GEN_INT (count)));
4077 return 1;
4078 }
4079
4080 temp = gen_reg_rtx (QImode);
4081 if (scale < 0)
4082 /* The pattern has a NEG that corresponds to this. */
4083 emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
4084 else if (TARGET_A16 && mode == SImode)
4085 /* We do this because the code below may modify this, we don't
4086 want to modify the origin of this value. */
4087 emit_move_insn (temp, operands[2]);
4088 else
4089 /* We'll only use it for the shift, no point emitting a move. */
4090 temp = operands[2];
4091
4092 if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
4093 {
4094 /* The m16c has a limit of -16..16 for SI shifts, even when the
4095 shift count is in a register. Since there are so many targets
4096 of these shifts, it's better to expand the RTL here than to
4097 call a helper function.
4098
4099 The resulting code looks something like this:
4100
4101 cmp.b r1h,-16
4102 jge.b 1f
4103 shl.l -16,dest
4104 add.b r1h,16
4105 1f: cmp.b r1h,16
4106 jle.b 1f
4107 shl.l 16,dest
4108 sub.b r1h,16
4109 1f: shl.l r1h,dest
4110
4111 We take advantage of the fact that "negative" shifts are
4112 undefined to skip one of the comparisons. */
4113
4114 rtx count;
4115 rtx label, insn, tempvar;
4116
4117 emit_move_insn (operands[0], operands[1]);
4118
4119 count = temp;
4120 label = gen_label_rtx ();
4121 LABEL_NUSES (label) ++;
4122
4123 tempvar = gen_reg_rtx (mode);
4124
4125 if (shift_code == ASHIFT)
4126 {
4127 /* This is a left shift. We only need check positive counts. */
4128 emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
4129 count, GEN_INT (16), label));
4130 emit_insn (func (tempvar, operands[0], GEN_INT (8)));
4131 emit_insn (func (operands[0], tempvar, GEN_INT (8)));
4132 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
4133 emit_label_after (label, insn);
4134 }
4135 else
4136 {
4137 /* This is a right shift. We only need check negative counts. */
4138 emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
4139 count, GEN_INT (-16), label));
4140 emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
4141 emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
4142 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
4143 emit_label_after (label, insn);
4144 }
4145 operands[1] = operands[0];
4146 emit_insn (func (operands[0], operands[0], count));
4147 return 1;
4148 }
4149
4150 operands[2] = temp;
4151 return 0;
4152 }
4153
4154 /* The m32c has a limited range of operations that work on PSImode
4155 values; we have to expand to SI, do the math, and truncate back to
4156 PSI. Yes, this is expensive, but hopefully gcc will learn to avoid
4157 those cases. */
4158 void
4159 m32c_expand_neg_mulpsi3 (rtx * operands)
4160 {
4161 /* operands: a = b * i */
4162 rtx temp1; /* b as SI */
4163 rtx scale /* i as SI */;
4164 rtx temp2; /* a*b as SI */
4165
4166 temp1 = gen_reg_rtx (SImode);
4167 temp2 = gen_reg_rtx (SImode);
4168 if (GET_CODE (operands[2]) != CONST_INT)
4169 {
4170 scale = gen_reg_rtx (SImode);
4171 emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
4172 }
4173 else
4174 scale = copy_to_mode_reg (SImode, operands[2]);
4175
4176 emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
4177 temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
4178 emit_insn (gen_truncsipsi2 (operands[0], temp2));
4179 }
4180
4181 /* Pattern Output Functions */
4182
4183 int
4184 m32c_expand_movcc (rtx *operands)
4185 {
4186 rtx rel = operands[1];
4187
4188 if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
4189 return 1;
4190 if (GET_CODE (operands[2]) != CONST_INT
4191 || GET_CODE (operands[3]) != CONST_INT)
4192 return 1;
4193 if (GET_CODE (rel) == NE)
4194 {
4195 rtx tmp = operands[2];
4196 operands[2] = operands[3];
4197 operands[3] = tmp;
4198 rel = gen_rtx_EQ (GET_MODE (rel), XEXP (rel, 0), XEXP (rel, 1));
4199 }
4200
4201 emit_move_insn (operands[0],
4202 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
4203 rel,
4204 operands[2],
4205 operands[3]));
4206 return 0;
4207 }
4208
4209 /* Used for the "insv" pattern. Return nonzero to fail, else done. */
4210 int
4211 m32c_expand_insv (rtx *operands)
4212 {
4213 rtx op0, src0, p;
4214 int mask;
4215
4216 if (INTVAL (operands[1]) != 1)
4217 return 1;
4218
4219 /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
4220 if (GET_CODE (operands[3]) != CONST_INT)
4221 return 1;
4222 if (INTVAL (operands[3]) != 0
4223 && INTVAL (operands[3]) != 1
4224 && INTVAL (operands[3]) != -1)
4225 return 1;
4226
4227 mask = 1 << INTVAL (operands[2]);
4228
4229 op0 = operands[0];
4230 if (GET_CODE (op0) == SUBREG
4231 && SUBREG_BYTE (op0) == 0)
4232 {
4233 rtx sub = SUBREG_REG (op0);
4234 if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
4235 op0 = sub;
4236 }
4237
4238 if (!can_create_pseudo_p ()
4239 || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
4240 src0 = op0;
4241 else
4242 {
4243 src0 = gen_reg_rtx (GET_MODE (op0));
4244 emit_move_insn (src0, op0);
4245 }
4246
4247 if (GET_MODE (op0) == HImode
4248 && INTVAL (operands[2]) >= 8
4249 && GET_CODE (op0) == MEM)
4250 {
4251 /* We are little endian. */
4252 rtx new_mem = gen_rtx_MEM (QImode, plus_constant (XEXP (op0, 0), 1));
4253 MEM_COPY_ATTRIBUTES (new_mem, op0);
4254 mask >>= 8;
4255 }
4256
4257 /* First, we generate a mask with the correct polarity. If we are
4258 storing a zero, we want an AND mask, so invert it. */
4259 if (INTVAL (operands[3]) == 0)
4260 {
4261 /* Storing a zero, use an AND mask */
4262 if (GET_MODE (op0) == HImode)
4263 mask ^= 0xffff;
4264 else
4265 mask ^= 0xff;
4266 }
4267 /* Now we need to properly sign-extend the mask in case we need to
4268 fall back to an AND or OR opcode. */
4269 if (GET_MODE (op0) == HImode)
4270 {
4271 if (mask & 0x8000)
4272 mask -= 0x10000;
4273 }
4274 else
4275 {
4276 if (mask & 0x80)
4277 mask -= 0x100;
4278 }
4279
4280 switch ( (INTVAL (operands[3]) ? 4 : 0)
4281 + ((GET_MODE (op0) == HImode) ? 2 : 0)
4282 + (TARGET_A24 ? 1 : 0))
4283 {
4284 case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
4285 case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
4286 case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
4287 case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
4288 case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
4289 case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
4290 case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
4291 case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
4292 default: p = NULL_RTX; break; /* Not reached, but silences a warning. */
4293 }
4294
4295 emit_insn (p);
4296 return 0;
4297 }
4298
4299 const char *
4300 m32c_scc_pattern(rtx *operands, RTX_CODE code)
4301 {
4302 static char buf[30];
4303 if (GET_CODE (operands[0]) == REG
4304 && REGNO (operands[0]) == R0_REGNO)
4305 {
4306 if (code == EQ)
4307 return "stzx\t#1,#0,r0l";
4308 if (code == NE)
4309 return "stzx\t#0,#1,r0l";
4310 }
4311 sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
4312 return buf;
4313 }
4314
4315 /* Encode symbol attributes of a SYMBOL_REF into its
4316 SYMBOL_REF_FLAGS. */
4317 static void
4318 m32c_encode_section_info (tree decl, rtx rtl, int first)
4319 {
4320 int extra_flags = 0;
4321
4322 default_encode_section_info (decl, rtl, first);
4323 if (TREE_CODE (decl) == FUNCTION_DECL
4324 && m32c_special_page_vector_p (decl))
4325
4326 extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
4327
4328 if (extra_flags)
4329 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
4330 }
4331
4332 /* Returns TRUE if the current function is a leaf, and thus we can
4333 determine which registers an interrupt function really needs to
4334 save. The logic below is mostly about finding the insn sequence
4335 that's the function, versus any sequence that might be open for the
4336 current insn. */
4337 static int
4338 m32c_leaf_function_p (void)
4339 {
4340 rtx saved_first, saved_last;
4341 struct sequence_stack *seq;
4342 int rv;
4343
4344 saved_first = crtl->emit.x_first_insn;
4345 saved_last = crtl->emit.x_last_insn;
4346 for (seq = crtl->emit.sequence_stack; seq && seq->next; seq = seq->next)
4347 ;
4348 if (seq)
4349 {
4350 crtl->emit.x_first_insn = seq->first;
4351 crtl->emit.x_last_insn = seq->last;
4352 }
4353
4354 rv = leaf_function_p ();
4355
4356 crtl->emit.x_first_insn = saved_first;
4357 crtl->emit.x_last_insn = saved_last;
4358 return rv;
4359 }
4360
4361 /* Returns TRUE if the current function needs to use the ENTER/EXIT
4362 opcodes. If the function doesn't need the frame base or stack
4363 pointer, it can use the simpler RTS opcode. */
4364 static bool
4365 m32c_function_needs_enter (void)
4366 {
4367 rtx insn;
4368 struct sequence_stack *seq;
4369 rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
4370 rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
4371
4372 insn = get_insns ();
4373 for (seq = crtl->emit.sequence_stack;
4374 seq;
4375 insn = seq->first, seq = seq->next);
4376
4377 while (insn)
4378 {
4379 if (reg_mentioned_p (sp, insn))
4380 return true;
4381 if (reg_mentioned_p (fb, insn))
4382 return true;
4383 insn = NEXT_INSN (insn);
4384 }
4385 return false;
4386 }
4387
4388 /* Mark all the subexpressions of the PARALLEL rtx PAR as
4389 frame-related. Return PAR.
4390
4391 dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
4392 PARALLEL rtx other than the first if they do not have the
4393 FRAME_RELATED flag set on them. So this function is handy for
4394 marking up 'enter' instructions. */
4395 static rtx
4396 m32c_all_frame_related (rtx par)
4397 {
4398 int len = XVECLEN (par, 0);
4399 int i;
4400
4401 for (i = 0; i < len; i++)
4402 F (XVECEXP (par, 0, i));
4403
4404 return par;
4405 }
4406
4407 /* Emits the prologue. See the frame layout comment earlier in this
4408 file. We can reserve up to 256 bytes with the ENTER opcode, beyond
4409 that we manually update sp. */
4410 void
4411 m32c_emit_prologue (void)
4412 {
4413 int frame_size, extra_frame_size = 0, reg_save_size;
4414 int complex_prologue = 0;
4415
4416 cfun->machine->is_leaf = m32c_leaf_function_p ();
4417 if (interrupt_p (cfun->decl))
4418 {
4419 cfun->machine->is_interrupt = 1;
4420 complex_prologue = 1;
4421 }
4422 else if (bank_switch_p (cfun->decl))
4423 warning (OPT_Wattributes,
4424 "%<bank_switch%> has no effect on non-interrupt functions");
4425
4426 reg_save_size = m32c_pushm_popm (PP_justcount);
4427
4428 if (interrupt_p (cfun->decl))
4429 {
4430 if (bank_switch_p (cfun->decl))
4431 emit_insn (gen_fset_b ());
4432 else if (cfun->machine->intr_pushm)
4433 emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
4434 }
4435
4436 frame_size =
4437 m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
4438 if (frame_size == 0
4439 && !m32c_function_needs_enter ())
4440 cfun->machine->use_rts = 1;
4441
4442 if (frame_size > 254)
4443 {
4444 extra_frame_size = frame_size - 254;
4445 frame_size = 254;
4446 }
4447 if (cfun->machine->use_rts == 0)
4448 F (emit_insn (m32c_all_frame_related
4449 (TARGET_A16
4450 ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
4451 : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
4452
4453 if (extra_frame_size)
4454 {
4455 complex_prologue = 1;
4456 if (TARGET_A16)
4457 F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
4458 gen_rtx_REG (HImode, SP_REGNO),
4459 GEN_INT (-extra_frame_size))));
4460 else
4461 F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
4462 gen_rtx_REG (PSImode, SP_REGNO),
4463 GEN_INT (-extra_frame_size))));
4464 }
4465
4466 complex_prologue += m32c_pushm_popm (PP_pushm);
4467
4468 /* This just emits a comment into the .s file for debugging. */
4469 if (complex_prologue)
4470 emit_insn (gen_prologue_end ());
4471 }
4472
4473 /* Likewise, for the epilogue. The only exception is that, for
4474 interrupts, we must manually unwind the frame as the REIT opcode
4475 doesn't do that. */
4476 void
4477 m32c_emit_epilogue (void)
4478 {
4479 int popm_count = m32c_pushm_popm (PP_justcount);
4480
4481 /* This just emits a comment into the .s file for debugging. */
4482 if (popm_count > 0 || cfun->machine->is_interrupt)
4483 emit_insn (gen_epilogue_start ());
4484
4485 if (popm_count > 0)
4486 m32c_pushm_popm (PP_popm);
4487
4488 if (cfun->machine->is_interrupt)
4489 {
4490 enum machine_mode spmode = TARGET_A16 ? HImode : PSImode;
4491
4492 /* REIT clears B flag and restores $fp for us, but we still
4493 have to fix up the stack. USE_RTS just means we didn't
4494 emit ENTER. */
4495 if (!cfun->machine->use_rts)
4496 {
4497 emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
4498 gen_rtx_REG (spmode, FP_REGNO));
4499 emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
4500 gen_rtx_REG (spmode, A0_REGNO));
4501 /* We can't just add this to the POPM because it would be in
4502 the wrong order, and wouldn't fix the stack if we're bank
4503 switching. */
4504 if (TARGET_A16)
4505 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
4506 else
4507 emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
4508 }
4509 if (!bank_switch_p (cfun->decl) && cfun->machine->intr_pushm)
4510 emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
4511
4512 /* The FREIT (Fast REturn from InTerrupt) instruction should be
4513 generated only for M32C/M32CM targets (generate the REIT
4514 instruction otherwise). */
4515 if (fast_interrupt_p (cfun->decl))
4516 {
4517 /* Check if fast_attribute is set for M32C or M32CM. */
4518 if (TARGET_A24)
4519 {
4520 emit_jump_insn (gen_epilogue_freit ());
4521 }
4522 /* If fast_interrupt attribute is set for an R8C or M16C
4523 target ignore this attribute and generated REIT
4524 instruction. */
4525 else
4526 {
4527 warning (OPT_Wattributes,
4528 "%<fast_interrupt%> attribute directive ignored");
4529 emit_jump_insn (gen_epilogue_reit_16 ());
4530 }
4531 }
4532 else if (TARGET_A16)
4533 emit_jump_insn (gen_epilogue_reit_16 ());
4534 else
4535 emit_jump_insn (gen_epilogue_reit_24 ());
4536 }
4537 else if (cfun->machine->use_rts)
4538 emit_jump_insn (gen_epilogue_rts ());
4539 else if (TARGET_A16)
4540 emit_jump_insn (gen_epilogue_exitd_16 ());
4541 else
4542 emit_jump_insn (gen_epilogue_exitd_24 ());
4543 }
4544
4545 void
4546 m32c_emit_eh_epilogue (rtx ret_addr)
4547 {
4548 /* R0[R2] has the stack adjustment. R1[R3] has the address to
4549 return to. We have to fudge the stack, pop everything, pop SP
4550 (fudged), and return (fudged). This is actually easier to do in
4551 assembler, so punt to libgcc. */
4552 emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
4553 /* emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
4554 }
4555
4556 /* Indicate which flags must be properly set for a given conditional. */
4557 static int
4558 flags_needed_for_conditional (rtx cond)
4559 {
4560 switch (GET_CODE (cond))
4561 {
4562 case LE:
4563 case GT:
4564 return FLAGS_OSZ;
4565 case LEU:
4566 case GTU:
4567 return FLAGS_ZC;
4568 case LT:
4569 case GE:
4570 return FLAGS_OS;
4571 case LTU:
4572 case GEU:
4573 return FLAGS_C;
4574 case EQ:
4575 case NE:
4576 return FLAGS_Z;
4577 default:
4578 return FLAGS_N;
4579 }
4580 }
4581
4582 #define DEBUG_CMP 0
4583
4584 /* Returns true if a compare insn is redundant because it would only
4585 set flags that are already set correctly. */
4586 static bool
4587 m32c_compare_redundant (rtx cmp, rtx *operands)
4588 {
4589 int flags_needed;
4590 int pflags;
4591 rtx prev, pp, next;
4592 rtx op0, op1;
4593 #if DEBUG_CMP
4594 int prev_icode, i;
4595 #endif
4596
4597 op0 = operands[0];
4598 op1 = operands[1];
4599
4600 #if DEBUG_CMP
4601 fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
4602 debug_rtx(cmp);
4603 for (i=0; i<2; i++)
4604 {
4605 fprintf(stderr, "operands[%d] = ", i);
4606 debug_rtx(operands[i]);
4607 }
4608 #endif
4609
4610 next = next_nonnote_insn (cmp);
4611 if (!next || !INSN_P (next))
4612 {
4613 #if DEBUG_CMP
4614 fprintf(stderr, "compare not followed by insn\n");
4615 debug_rtx(next);
4616 #endif
4617 return false;
4618 }
4619 if (GET_CODE (PATTERN (next)) == SET
4620 && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
4621 {
4622 next = XEXP (XEXP (PATTERN (next), 1), 0);
4623 }
4624 else if (GET_CODE (PATTERN (next)) == SET)
4625 {
4626 /* If this is a conditional, flags_needed will be something
4627 other than FLAGS_N, which we test below. */
4628 next = XEXP (PATTERN (next), 1);
4629 }
4630 else
4631 {
4632 #if DEBUG_CMP
4633 fprintf(stderr, "compare not followed by conditional\n");
4634 debug_rtx(next);
4635 #endif
4636 return false;
4637 }
4638 #if DEBUG_CMP
4639 fprintf(stderr, "conditional is: ");
4640 debug_rtx(next);
4641 #endif
4642
4643 flags_needed = flags_needed_for_conditional (next);
4644 if (flags_needed == FLAGS_N)
4645 {
4646 #if DEBUG_CMP
4647 fprintf(stderr, "compare not followed by conditional\n");
4648 debug_rtx(next);
4649 #endif
4650 return false;
4651 }
4652
4653 /* Compare doesn't set overflow and carry the same way that
4654 arithmetic instructions do, so we can't replace those. */
4655 if (flags_needed & FLAGS_OC)
4656 return false;
4657
4658 prev = cmp;
4659 do {
4660 prev = prev_nonnote_insn (prev);
4661 if (!prev)
4662 {
4663 #if DEBUG_CMP
4664 fprintf(stderr, "No previous insn.\n");
4665 #endif
4666 return false;
4667 }
4668 if (!INSN_P (prev))
4669 {
4670 #if DEBUG_CMP
4671 fprintf(stderr, "Previous insn is a non-insn.\n");
4672 #endif
4673 return false;
4674 }
4675 pp = PATTERN (prev);
4676 if (GET_CODE (pp) != SET)
4677 {
4678 #if DEBUG_CMP
4679 fprintf(stderr, "Previous insn is not a SET.\n");
4680 #endif
4681 return false;
4682 }
4683 pflags = get_attr_flags (prev);
4684
4685 /* Looking up attributes of previous insns corrupted the recog
4686 tables. */
4687 INSN_UID (cmp) = -1;
4688 recog (PATTERN (cmp), cmp, 0);
4689
4690 if (pflags == FLAGS_N
4691 && reg_mentioned_p (op0, pp))
4692 {
4693 #if DEBUG_CMP
4694 fprintf(stderr, "intermediate non-flags insn uses op:\n");
4695 debug_rtx(prev);
4696 #endif
4697 return false;
4698 }
4699
4700 /* Check for comparisons against memory - between volatiles and
4701 aliases, we just can't risk this one. */
4702 if (GET_CODE (operands[0]) == MEM
4703 || GET_CODE (operands[0]) == MEM)
4704 {
4705 #if DEBUG_CMP
4706 fprintf(stderr, "comparisons with memory:\n");
4707 debug_rtx(prev);
4708 #endif
4709 return false;
4710 }
4711
4712 /* Check for PREV changing a register that's used to compute a
4713 value in CMP, even if it doesn't otherwise change flags. */
4714 if (GET_CODE (operands[0]) == REG
4715 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[0]))
4716 {
4717 #if DEBUG_CMP
4718 fprintf(stderr, "sub-value affected, op0:\n");
4719 debug_rtx(prev);
4720 #endif
4721 return false;
4722 }
4723 if (GET_CODE (operands[1]) == REG
4724 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[1]))
4725 {
4726 #if DEBUG_CMP
4727 fprintf(stderr, "sub-value affected, op1:\n");
4728 debug_rtx(prev);
4729 #endif
4730 return false;
4731 }
4732
4733 } while (pflags == FLAGS_N);
4734 #if DEBUG_CMP
4735 fprintf(stderr, "previous flag-setting insn:\n");
4736 debug_rtx(prev);
4737 debug_rtx(pp);
4738 #endif
4739
4740 if (GET_CODE (pp) == SET
4741 && GET_CODE (XEXP (pp, 0)) == REG
4742 && REGNO (XEXP (pp, 0)) == FLG_REGNO
4743 && GET_CODE (XEXP (pp, 1)) == COMPARE)
4744 {
4745 /* Adjacent cbranches must have the same operands to be
4746 redundant. */
4747 rtx pop0 = XEXP (XEXP (pp, 1), 0);
4748 rtx pop1 = XEXP (XEXP (pp, 1), 1);
4749 #if DEBUG_CMP
4750 fprintf(stderr, "adjacent cbranches\n");
4751 debug_rtx(pop0);
4752 debug_rtx(pop1);
4753 #endif
4754 if (rtx_equal_p (op0, pop0)
4755 && rtx_equal_p (op1, pop1))
4756 return true;
4757 #if DEBUG_CMP
4758 fprintf(stderr, "prev cmp not same\n");
4759 #endif
4760 return false;
4761 }
4762
4763 /* Else the previous insn must be a SET, with either the source or
4764 dest equal to operands[0], and operands[1] must be zero. */
4765
4766 if (!rtx_equal_p (op1, const0_rtx))
4767 {
4768 #if DEBUG_CMP
4769 fprintf(stderr, "operands[1] not const0_rtx\n");
4770 #endif
4771 return false;
4772 }
4773 if (GET_CODE (pp) != SET)
4774 {
4775 #if DEBUG_CMP
4776 fprintf (stderr, "pp not set\n");
4777 #endif
4778 return false;
4779 }
4780 if (!rtx_equal_p (op0, SET_SRC (pp))
4781 && !rtx_equal_p (op0, SET_DEST (pp)))
4782 {
4783 #if DEBUG_CMP
4784 fprintf(stderr, "operands[0] not found in set\n");
4785 #endif
4786 return false;
4787 }
4788
4789 #if DEBUG_CMP
4790 fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4791 #endif
4792 if ((pflags & flags_needed) == flags_needed)
4793 return true;
4794
4795 return false;
4796 }
4797
4798 /* Return the pattern for a compare. This will be commented out if
4799 the compare is redundant, else a normal pattern is returned. Thus,
4800 the assembler output says where the compare would have been. */
4801 char *
4802 m32c_output_compare (rtx insn, rtx *operands)
4803 {
4804 static char templ[] = ";cmp.b\t%1,%0";
4805 /* ^ 5 */
4806
4807 templ[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
4808 if (m32c_compare_redundant (insn, operands))
4809 {
4810 #if DEBUG_CMP
4811 fprintf(stderr, "cbranch: cmp not needed\n");
4812 #endif
4813 return templ;
4814 }
4815
4816 #if DEBUG_CMP
4817 fprintf(stderr, "cbranch: cmp needed: `%s'\n", templ + 1);
4818 #endif
4819 return templ + 1;
4820 }
4821
4822 #undef TARGET_ENCODE_SECTION_INFO
4823 #define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4824
4825 /* If the frame pointer isn't used, we detect it manually. But the
4826 stack pointer doesn't have as flexible addressing as the frame
4827 pointer, so we always assume we have it. */
4828
4829 #undef TARGET_FRAME_POINTER_REQUIRED
4830 #define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
4831
4832 /* The Global `targetm' Variable. */
4833
4834 struct gcc_target targetm = TARGET_INITIALIZER;
4835
4836 #include "gt-m32c.h"