]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/m32c/m32c.c
re PR middle-end/46500 (target.h includes tm.h)
[thirdparty/gcc.git] / gcc / config / m32c / m32c.c
1 /* Target Code for R8C/M16C/M32C
2 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
4 Contributed by Red Hat.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published
10 by the Free Software Foundation; either version 3, or (at your
11 option) any later version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
16 License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "regs.h"
28 #include "hard-reg-set.h"
29 #include "insn-config.h"
30 #include "conditions.h"
31 #include "insn-flags.h"
32 #include "output.h"
33 #include "insn-attr.h"
34 #include "flags.h"
35 #include "recog.h"
36 #include "reload.h"
37 #include "diagnostic-core.h"
38 #include "obstack.h"
39 #include "tree.h"
40 #include "expr.h"
41 #include "optabs.h"
42 #include "except.h"
43 #include "function.h"
44 #include "ggc.h"
45 #include "target.h"
46 #include "target-def.h"
47 #include "tm_p.h"
48 #include "langhooks.h"
49 #include "gimple.h"
50 #include "df.h"
51
52 /* Prototypes */
53
54 /* Used by m32c_pushm_popm. */
55 typedef enum
56 {
57 PP_pushm,
58 PP_popm,
59 PP_justcount
60 } Push_Pop_Type;
61
62 static bool m32c_function_needs_enter (void);
63 static tree interrupt_handler (tree *, tree, tree, int, bool *);
64 static tree function_vector_handler (tree *, tree, tree, int, bool *);
65 static int interrupt_p (tree node);
66 static int bank_switch_p (tree node);
67 static int fast_interrupt_p (tree node);
68 static int interrupt_p (tree node);
69 static bool m32c_asm_integer (rtx, unsigned int, int);
70 static int m32c_comp_type_attributes (const_tree, const_tree);
71 static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
72 static struct machine_function *m32c_init_machine_status (void);
73 static void m32c_insert_attributes (tree, tree *);
74 static bool m32c_legitimate_address_p (enum machine_mode, rtx, bool);
75 static bool m32c_addr_space_legitimate_address_p (enum machine_mode, rtx, bool, addr_space_t);
76 static rtx m32c_function_arg (cumulative_args_t, enum machine_mode,
77 const_tree, bool);
78 static bool m32c_pass_by_reference (cumulative_args_t, enum machine_mode,
79 const_tree, bool);
80 static void m32c_function_arg_advance (cumulative_args_t, enum machine_mode,
81 const_tree, bool);
82 static unsigned int m32c_function_arg_boundary (enum machine_mode, const_tree);
83 static int m32c_pushm_popm (Push_Pop_Type);
84 static bool m32c_strict_argument_naming (cumulative_args_t);
85 static rtx m32c_struct_value_rtx (tree, int);
86 static rtx m32c_subreg (enum machine_mode, rtx, enum machine_mode, int);
87 static int need_to_save (int);
88 static rtx m32c_function_value (const_tree, const_tree, bool);
89 static rtx m32c_libcall_value (enum machine_mode, const_rtx);
90
91 /* Returns true if an address is specified, else false. */
92 static bool m32c_get_pragma_address (const char *varname, unsigned *addr);
93
94 #define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
95
96 #define streq(a,b) (strcmp ((a), (b)) == 0)
97
98 /* Internal support routines */
99
100 /* Debugging statements are tagged with DEBUG0 only so that they can
101 be easily enabled individually, by replacing the '0' with '1' as
102 needed. */
103 #define DEBUG0 0
104 #define DEBUG1 1
105
106 #if DEBUG0
107 /* This is needed by some of the commented-out debug statements
108 below. */
109 static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
110 #endif
111 static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
112
113 /* These are all to support encode_pattern(). */
114 static char pattern[30], *patternp;
115 static GTY(()) rtx patternr[30];
116 #define RTX_IS(x) (streq (pattern, x))
117
118 /* Some macros to simplify the logic throughout this file. */
119 #define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
120 #define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
121
122 #define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
123 #define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
124
125 static int
126 far_addr_space_p (rtx x)
127 {
128 if (GET_CODE (x) != MEM)
129 return 0;
130 #if DEBUG0
131 fprintf(stderr, "\033[35mfar_addr_space: "); debug_rtx(x);
132 fprintf(stderr, " = %d\033[0m\n", MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR);
133 #endif
134 return MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR;
135 }
136
137 /* We do most RTX matching by converting the RTX into a string, and
138 using string compares. This vastly simplifies the logic in many of
139 the functions in this file.
140
141 On exit, pattern[] has the encoded string (use RTX_IS("...") to
142 compare it) and patternr[] has pointers to the nodes in the RTX
143 corresponding to each character in the encoded string. The latter
144 is mostly used by print_operand().
145
146 Unrecognized patterns have '?' in them; this shows up when the
147 assembler complains about syntax errors.
148 */
149
150 static void
151 encode_pattern_1 (rtx x)
152 {
153 int i;
154
155 if (patternp == pattern + sizeof (pattern) - 2)
156 {
157 patternp[-1] = '?';
158 return;
159 }
160
161 patternr[patternp - pattern] = x;
162
163 switch (GET_CODE (x))
164 {
165 case REG:
166 *patternp++ = 'r';
167 break;
168 case SUBREG:
169 if (GET_MODE_SIZE (GET_MODE (x)) !=
170 GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
171 *patternp++ = 'S';
172 encode_pattern_1 (XEXP (x, 0));
173 break;
174 case MEM:
175 *patternp++ = 'm';
176 case CONST:
177 encode_pattern_1 (XEXP (x, 0));
178 break;
179 case SIGN_EXTEND:
180 *patternp++ = '^';
181 *patternp++ = 'S';
182 encode_pattern_1 (XEXP (x, 0));
183 break;
184 case ZERO_EXTEND:
185 *patternp++ = '^';
186 *patternp++ = 'Z';
187 encode_pattern_1 (XEXP (x, 0));
188 break;
189 case PLUS:
190 *patternp++ = '+';
191 encode_pattern_1 (XEXP (x, 0));
192 encode_pattern_1 (XEXP (x, 1));
193 break;
194 case PRE_DEC:
195 *patternp++ = '>';
196 encode_pattern_1 (XEXP (x, 0));
197 break;
198 case POST_INC:
199 *patternp++ = '<';
200 encode_pattern_1 (XEXP (x, 0));
201 break;
202 case LO_SUM:
203 *patternp++ = 'L';
204 encode_pattern_1 (XEXP (x, 0));
205 encode_pattern_1 (XEXP (x, 1));
206 break;
207 case HIGH:
208 *patternp++ = 'H';
209 encode_pattern_1 (XEXP (x, 0));
210 break;
211 case SYMBOL_REF:
212 *patternp++ = 's';
213 break;
214 case LABEL_REF:
215 *patternp++ = 'l';
216 break;
217 case CODE_LABEL:
218 *patternp++ = 'c';
219 break;
220 case CONST_INT:
221 case CONST_DOUBLE:
222 *patternp++ = 'i';
223 break;
224 case UNSPEC:
225 *patternp++ = 'u';
226 *patternp++ = '0' + XCINT (x, 1, UNSPEC);
227 for (i = 0; i < XVECLEN (x, 0); i++)
228 encode_pattern_1 (XVECEXP (x, 0, i));
229 break;
230 case USE:
231 *patternp++ = 'U';
232 break;
233 case PARALLEL:
234 *patternp++ = '|';
235 for (i = 0; i < XVECLEN (x, 0); i++)
236 encode_pattern_1 (XVECEXP (x, 0, i));
237 break;
238 case EXPR_LIST:
239 *patternp++ = 'E';
240 encode_pattern_1 (XEXP (x, 0));
241 if (XEXP (x, 1))
242 encode_pattern_1 (XEXP (x, 1));
243 break;
244 default:
245 *patternp++ = '?';
246 #if DEBUG0
247 fprintf (stderr, "can't encode pattern %s\n",
248 GET_RTX_NAME (GET_CODE (x)));
249 debug_rtx (x);
250 gcc_unreachable ();
251 #endif
252 break;
253 }
254 }
255
256 static void
257 encode_pattern (rtx x)
258 {
259 patternp = pattern;
260 encode_pattern_1 (x);
261 *patternp = 0;
262 }
263
264 /* Since register names indicate the mode they're used in, we need a
265 way to determine which name to refer to the register with. Called
266 by print_operand(). */
267
268 static const char *
269 reg_name_with_mode (int regno, enum machine_mode mode)
270 {
271 int mlen = GET_MODE_SIZE (mode);
272 if (regno == R0_REGNO && mlen == 1)
273 return "r0l";
274 if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
275 return "r2r0";
276 if (regno == R0_REGNO && mlen == 6)
277 return "r2r1r0";
278 if (regno == R0_REGNO && mlen == 8)
279 return "r3r1r2r0";
280 if (regno == R1_REGNO && mlen == 1)
281 return "r1l";
282 if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
283 return "r3r1";
284 if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
285 return "a1a0";
286 return reg_names[regno];
287 }
288
289 /* How many bytes a register uses on stack when it's pushed. We need
290 to know this because the push opcode needs to explicitly indicate
291 the size of the register, even though the name of the register
292 already tells it that. Used by m32c_output_reg_{push,pop}, which
293 is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}. */
294
295 static int
296 reg_push_size (int regno)
297 {
298 switch (regno)
299 {
300 case R0_REGNO:
301 case R1_REGNO:
302 return 2;
303 case R2_REGNO:
304 case R3_REGNO:
305 case FLG_REGNO:
306 return 2;
307 case A0_REGNO:
308 case A1_REGNO:
309 case SB_REGNO:
310 case FB_REGNO:
311 case SP_REGNO:
312 if (TARGET_A16)
313 return 2;
314 else
315 return 3;
316 default:
317 gcc_unreachable ();
318 }
319 }
320
321 static int *class_sizes = 0;
322
323 /* Given two register classes, find the largest intersection between
324 them. If there is no intersection, return RETURNED_IF_EMPTY
325 instead. */
326 static int
327 reduce_class (int original_class, int limiting_class, int returned_if_empty)
328 {
329 int cc = class_contents[original_class][0];
330 int i, best = NO_REGS;
331 int best_size = 0;
332
333 if (original_class == limiting_class)
334 return original_class;
335
336 if (!class_sizes)
337 {
338 int r;
339 class_sizes = (int *) xmalloc (LIM_REG_CLASSES * sizeof (int));
340 for (i = 0; i < LIM_REG_CLASSES; i++)
341 {
342 class_sizes[i] = 0;
343 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
344 if (class_contents[i][0] & (1 << r))
345 class_sizes[i]++;
346 }
347 }
348
349 cc &= class_contents[limiting_class][0];
350 for (i = 0; i < LIM_REG_CLASSES; i++)
351 {
352 int ic = class_contents[i][0];
353
354 if ((~cc & ic) == 0)
355 if (best_size < class_sizes[i])
356 {
357 best = i;
358 best_size = class_sizes[i];
359 }
360
361 }
362 if (best == NO_REGS)
363 return returned_if_empty;
364 return best;
365 }
366
367 /* Used by m32c_register_move_cost to determine if a move is
368 impossibly expensive. */
369 static bool
370 class_can_hold_mode (reg_class_t rclass, enum machine_mode mode)
371 {
372 /* Cache the results: 0=untested 1=no 2=yes */
373 static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
374
375 if (results[(int) rclass][mode] == 0)
376 {
377 int r;
378 results[rclass][mode] = 1;
379 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
380 if (in_hard_reg_set_p (reg_class_contents[(int) rclass], mode, r)
381 && HARD_REGNO_MODE_OK (r, mode))
382 {
383 results[rclass][mode] = 2;
384 break;
385 }
386 }
387
388 #if DEBUG0
389 fprintf (stderr, "class %s can hold %s? %s\n",
390 class_names[(int) rclass], mode_name[mode],
391 (results[rclass][mode] == 2) ? "yes" : "no");
392 #endif
393 return results[(int) rclass][mode] == 2;
394 }
395
396 /* Run-time Target Specification. */
397
398 /* Memregs are memory locations that gcc treats like general
399 registers, as there are a limited number of true registers and the
400 m32c families can use memory in most places that registers can be
401 used.
402
403 However, since memory accesses are more expensive than registers,
404 we allow the user to limit the number of memregs available, in
405 order to try to persuade gcc to try harder to use real registers.
406
407 Memregs are provided by m32c-lib1.S.
408 */
409
410 int ok_to_change_target_memregs = TRUE;
411
412 /* Implements TARGET_OPTION_OVERRIDE. */
413
414 #undef TARGET_OPTION_OVERRIDE
415 #define TARGET_OPTION_OVERRIDE m32c_option_override
416
417 static void
418 m32c_option_override (void)
419 {
420 /* We limit memregs to 0..16, and provide a default. */
421 if (global_options_set.x_target_memregs)
422 {
423 if (target_memregs < 0 || target_memregs > 16)
424 error ("invalid target memregs value '%d'", target_memregs);
425 }
426 else
427 target_memregs = 16;
428
429 if (TARGET_A24)
430 flag_ivopts = 0;
431
432 /* This target defaults to strict volatile bitfields. */
433 if (flag_strict_volatile_bitfields < 0)
434 flag_strict_volatile_bitfields = 1;
435
436 /* r8c/m16c have no 16-bit indirect call, so thunks are involved.
437 This is always worse than an absolute call. */
438 if (TARGET_A16)
439 flag_no_function_cse = 1;
440
441 /* This wants to put insns between compares and their jumps. */
442 /* FIXME: The right solution is to properly trace the flags register
443 values, but that is too much work for stage 4. */
444 flag_combine_stack_adjustments = 0;
445 }
446
447 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
448 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m32c_override_options_after_change
449
450 static void
451 m32c_override_options_after_change (void)
452 {
453 if (TARGET_A16)
454 flag_no_function_cse = 1;
455 }
456
457 /* Defining data structures for per-function information */
458
459 /* The usual; we set up our machine_function data. */
460 static struct machine_function *
461 m32c_init_machine_status (void)
462 {
463 return ggc_alloc_cleared_machine_function ();
464 }
465
466 /* Implements INIT_EXPANDERS. We just set up to call the above
467 function. */
468 void
469 m32c_init_expanders (void)
470 {
471 init_machine_status = m32c_init_machine_status;
472 }
473
474 /* Storage Layout */
475
476 /* Register Basics */
477
478 /* Basic Characteristics of Registers */
479
480 /* Whether a mode fits in a register is complex enough to warrant a
481 table. */
482 static struct
483 {
484 char qi_regs;
485 char hi_regs;
486 char pi_regs;
487 char si_regs;
488 char di_regs;
489 } nregs_table[FIRST_PSEUDO_REGISTER] =
490 {
491 { 1, 1, 2, 2, 4 }, /* r0 */
492 { 0, 1, 0, 0, 0 }, /* r2 */
493 { 1, 1, 2, 2, 0 }, /* r1 */
494 { 0, 1, 0, 0, 0 }, /* r3 */
495 { 0, 1, 1, 0, 0 }, /* a0 */
496 { 0, 1, 1, 0, 0 }, /* a1 */
497 { 0, 1, 1, 0, 0 }, /* sb */
498 { 0, 1, 1, 0, 0 }, /* fb */
499 { 0, 1, 1, 0, 0 }, /* sp */
500 { 1, 1, 1, 0, 0 }, /* pc */
501 { 0, 0, 0, 0, 0 }, /* fl */
502 { 1, 1, 1, 0, 0 }, /* ap */
503 { 1, 1, 2, 2, 4 }, /* mem0 */
504 { 1, 1, 2, 2, 4 }, /* mem1 */
505 { 1, 1, 2, 2, 4 }, /* mem2 */
506 { 1, 1, 2, 2, 4 }, /* mem3 */
507 { 1, 1, 2, 2, 4 }, /* mem4 */
508 { 1, 1, 2, 2, 0 }, /* mem5 */
509 { 1, 1, 2, 2, 0 }, /* mem6 */
510 { 1, 1, 0, 0, 0 }, /* mem7 */
511 };
512
513 /* Implements TARGET_CONDITIONAL_REGISTER_USAGE. We adjust the number
514 of available memregs, and select which registers need to be preserved
515 across calls based on the chip family. */
516
517 #undef TARGET_CONDITIONAL_REGISTER_USAGE
518 #define TARGET_CONDITIONAL_REGISTER_USAGE m32c_conditional_register_usage
519 void
520 m32c_conditional_register_usage (void)
521 {
522 int i;
523
524 if (0 <= target_memregs && target_memregs <= 16)
525 {
526 /* The command line option is bytes, but our "registers" are
527 16-bit words. */
528 for (i = (target_memregs+1)/2; i < 8; i++)
529 {
530 fixed_regs[MEM0_REGNO + i] = 1;
531 CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
532 }
533 }
534
535 /* M32CM and M32C preserve more registers across function calls. */
536 if (TARGET_A24)
537 {
538 call_used_regs[R1_REGNO] = 0;
539 call_used_regs[R2_REGNO] = 0;
540 call_used_regs[R3_REGNO] = 0;
541 call_used_regs[A0_REGNO] = 0;
542 call_used_regs[A1_REGNO] = 0;
543 }
544 }
545
546 /* How Values Fit in Registers */
547
548 /* Implements HARD_REGNO_NREGS. This is complicated by the fact that
549 different registers are different sizes from each other, *and* may
550 be different sizes in different chip families. */
551 static int
552 m32c_hard_regno_nregs_1 (int regno, enum machine_mode mode)
553 {
554 if (regno == FLG_REGNO && mode == CCmode)
555 return 1;
556 if (regno >= FIRST_PSEUDO_REGISTER)
557 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
558
559 if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
560 return (GET_MODE_SIZE (mode) + 1) / 2;
561
562 if (GET_MODE_SIZE (mode) <= 1)
563 return nregs_table[regno].qi_regs;
564 if (GET_MODE_SIZE (mode) <= 2)
565 return nregs_table[regno].hi_regs;
566 if (regno == A0_REGNO && mode == SImode && TARGET_A16)
567 return 2;
568 if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
569 return nregs_table[regno].pi_regs;
570 if (GET_MODE_SIZE (mode) <= 4)
571 return nregs_table[regno].si_regs;
572 if (GET_MODE_SIZE (mode) <= 8)
573 return nregs_table[regno].di_regs;
574 return 0;
575 }
576
577 int
578 m32c_hard_regno_nregs (int regno, enum machine_mode mode)
579 {
580 int rv = m32c_hard_regno_nregs_1 (regno, mode);
581 return rv ? rv : 1;
582 }
583
584 /* Implements HARD_REGNO_MODE_OK. The above function does the work
585 already; just test its return value. */
586 int
587 m32c_hard_regno_ok (int regno, enum machine_mode mode)
588 {
589 return m32c_hard_regno_nregs_1 (regno, mode) != 0;
590 }
591
592 /* Implements MODES_TIEABLE_P. In general, modes aren't tieable since
593 registers are all different sizes. However, since most modes are
594 bigger than our registers anyway, it's easier to implement this
595 function that way, leaving QImode as the only unique case. */
596 int
597 m32c_modes_tieable_p (enum machine_mode m1, enum machine_mode m2)
598 {
599 if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
600 return 1;
601
602 #if 0
603 if (m1 == QImode || m2 == QImode)
604 return 0;
605 #endif
606
607 return 1;
608 }
609
610 /* Register Classes */
611
612 /* Implements REGNO_REG_CLASS. */
613 enum reg_class
614 m32c_regno_reg_class (int regno)
615 {
616 switch (regno)
617 {
618 case R0_REGNO:
619 return R0_REGS;
620 case R1_REGNO:
621 return R1_REGS;
622 case R2_REGNO:
623 return R2_REGS;
624 case R3_REGNO:
625 return R3_REGS;
626 case A0_REGNO:
627 return A0_REGS;
628 case A1_REGNO:
629 return A1_REGS;
630 case SB_REGNO:
631 return SB_REGS;
632 case FB_REGNO:
633 return FB_REGS;
634 case SP_REGNO:
635 return SP_REGS;
636 case FLG_REGNO:
637 return FLG_REGS;
638 default:
639 if (IS_MEM_REGNO (regno))
640 return MEM_REGS;
641 return ALL_REGS;
642 }
643 }
644
645 /* Implements REG_CLASS_FROM_CONSTRAINT. Note that some constraints only match
646 for certain chip families. */
647 int
648 m32c_reg_class_from_constraint (char c ATTRIBUTE_UNUSED, const char *s)
649 {
650 if (memcmp (s, "Rsp", 3) == 0)
651 return SP_REGS;
652 if (memcmp (s, "Rfb", 3) == 0)
653 return FB_REGS;
654 if (memcmp (s, "Rsb", 3) == 0)
655 return SB_REGS;
656 if (memcmp (s, "Rcr", 3) == 0)
657 return TARGET_A16 ? CR_REGS : NO_REGS;
658 if (memcmp (s, "Rcl", 3) == 0)
659 return TARGET_A24 ? CR_REGS : NO_REGS;
660 if (memcmp (s, "R0w", 3) == 0)
661 return R0_REGS;
662 if (memcmp (s, "R1w", 3) == 0)
663 return R1_REGS;
664 if (memcmp (s, "R2w", 3) == 0)
665 return R2_REGS;
666 if (memcmp (s, "R3w", 3) == 0)
667 return R3_REGS;
668 if (memcmp (s, "R02", 3) == 0)
669 return R02_REGS;
670 if (memcmp (s, "R13", 3) == 0)
671 return R13_REGS;
672 if (memcmp (s, "R03", 3) == 0)
673 return R03_REGS;
674 if (memcmp (s, "Rdi", 3) == 0)
675 return DI_REGS;
676 if (memcmp (s, "Rhl", 3) == 0)
677 return HL_REGS;
678 if (memcmp (s, "R23", 3) == 0)
679 return R23_REGS;
680 if (memcmp (s, "Ra0", 3) == 0)
681 return A0_REGS;
682 if (memcmp (s, "Ra1", 3) == 0)
683 return A1_REGS;
684 if (memcmp (s, "Raa", 3) == 0)
685 return A_REGS;
686 if (memcmp (s, "Raw", 3) == 0)
687 return TARGET_A16 ? A_REGS : NO_REGS;
688 if (memcmp (s, "Ral", 3) == 0)
689 return TARGET_A24 ? A_REGS : NO_REGS;
690 if (memcmp (s, "Rqi", 3) == 0)
691 return QI_REGS;
692 if (memcmp (s, "Rad", 3) == 0)
693 return AD_REGS;
694 if (memcmp (s, "Rsi", 3) == 0)
695 return SI_REGS;
696 if (memcmp (s, "Rhi", 3) == 0)
697 return HI_REGS;
698 if (memcmp (s, "Rhc", 3) == 0)
699 return HC_REGS;
700 if (memcmp (s, "Rra", 3) == 0)
701 return RA_REGS;
702 if (memcmp (s, "Rfl", 3) == 0)
703 return FLG_REGS;
704 if (memcmp (s, "Rmm", 3) == 0)
705 {
706 if (fixed_regs[MEM0_REGNO])
707 return NO_REGS;
708 return MEM_REGS;
709 }
710
711 /* PSImode registers - i.e. whatever can hold a pointer. */
712 if (memcmp (s, "Rpi", 3) == 0)
713 {
714 if (TARGET_A16)
715 return HI_REGS;
716 else
717 return RA_REGS; /* r2r0 and r3r1 can hold pointers. */
718 }
719
720 /* We handle this one as an EXTRA_CONSTRAINT. */
721 if (memcmp (s, "Rpa", 3) == 0)
722 return NO_REGS;
723
724 if (*s == 'R')
725 {
726 fprintf(stderr, "unrecognized R constraint: %.3s\n", s);
727 gcc_unreachable();
728 }
729
730 return NO_REGS;
731 }
732
733 /* Implements REGNO_OK_FOR_BASE_P. */
734 int
735 m32c_regno_ok_for_base_p (int regno)
736 {
737 if (regno == A0_REGNO
738 || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
739 return 1;
740 return 0;
741 }
742
743 #define DEBUG_RELOAD 0
744
745 /* Implements PREFERRED_RELOAD_CLASS. In general, prefer general
746 registers of the appropriate size. */
747 int
748 m32c_preferred_reload_class (rtx x, int rclass)
749 {
750 int newclass = rclass;
751
752 #if DEBUG_RELOAD
753 fprintf (stderr, "\npreferred_reload_class for %s is ",
754 class_names[rclass]);
755 #endif
756 if (rclass == NO_REGS)
757 rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
758
759 if (reg_classes_intersect_p (rclass, CR_REGS))
760 {
761 switch (GET_MODE (x))
762 {
763 case QImode:
764 newclass = HL_REGS;
765 break;
766 default:
767 /* newclass = HI_REGS; */
768 break;
769 }
770 }
771
772 else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
773 newclass = SI_REGS;
774 else if (GET_MODE_SIZE (GET_MODE (x)) > 4
775 && ~class_contents[rclass][0] & 0x000f)
776 newclass = DI_REGS;
777
778 rclass = reduce_class (rclass, newclass, rclass);
779
780 if (GET_MODE (x) == QImode)
781 rclass = reduce_class (rclass, HL_REGS, rclass);
782
783 #if DEBUG_RELOAD
784 fprintf (stderr, "%s\n", class_names[rclass]);
785 debug_rtx (x);
786
787 if (GET_CODE (x) == MEM
788 && GET_CODE (XEXP (x, 0)) == PLUS
789 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
790 fprintf (stderr, "Glorm!\n");
791 #endif
792 return rclass;
793 }
794
795 /* Implements PREFERRED_OUTPUT_RELOAD_CLASS. */
796 int
797 m32c_preferred_output_reload_class (rtx x, int rclass)
798 {
799 return m32c_preferred_reload_class (x, rclass);
800 }
801
802 /* Implements LIMIT_RELOAD_CLASS. We basically want to avoid using
803 address registers for reloads since they're needed for address
804 reloads. */
805 int
806 m32c_limit_reload_class (enum machine_mode mode, int rclass)
807 {
808 #if DEBUG_RELOAD
809 fprintf (stderr, "limit_reload_class for %s: %s ->",
810 mode_name[mode], class_names[rclass]);
811 #endif
812
813 if (mode == QImode)
814 rclass = reduce_class (rclass, HL_REGS, rclass);
815 else if (mode == HImode)
816 rclass = reduce_class (rclass, HI_REGS, rclass);
817 else if (mode == SImode)
818 rclass = reduce_class (rclass, SI_REGS, rclass);
819
820 if (rclass != A_REGS)
821 rclass = reduce_class (rclass, DI_REGS, rclass);
822
823 #if DEBUG_RELOAD
824 fprintf (stderr, " %s\n", class_names[rclass]);
825 #endif
826 return rclass;
827 }
828
829 /* Implements SECONDARY_RELOAD_CLASS. QImode have to be reloaded in
830 r0 or r1, as those are the only real QImode registers. CR regs get
831 reloaded through appropriately sized general or address
832 registers. */
833 int
834 m32c_secondary_reload_class (int rclass, enum machine_mode mode, rtx x)
835 {
836 int cc = class_contents[rclass][0];
837 #if DEBUG0
838 fprintf (stderr, "\nsecondary reload class %s %s\n",
839 class_names[rclass], mode_name[mode]);
840 debug_rtx (x);
841 #endif
842 if (mode == QImode
843 && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
844 return QI_REGS;
845 if (reg_classes_intersect_p (rclass, CR_REGS)
846 && GET_CODE (x) == REG
847 && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
848 return TARGET_A16 ? HI_REGS : A_REGS;
849 return NO_REGS;
850 }
851
852 /* Implements TARGET_CLASS_LIKELY_SPILLED_P. A_REGS is needed for address
853 reloads. */
854
855 #undef TARGET_CLASS_LIKELY_SPILLED_P
856 #define TARGET_CLASS_LIKELY_SPILLED_P m32c_class_likely_spilled_p
857
858 static bool
859 m32c_class_likely_spilled_p (reg_class_t regclass)
860 {
861 if (regclass == A_REGS)
862 return true;
863
864 return (reg_class_size[(int) regclass] == 1);
865 }
866
867 /* Implements CLASS_MAX_NREGS. We calculate this according to its
868 documented meaning, to avoid potential inconsistencies with actual
869 class definitions. */
870 int
871 m32c_class_max_nregs (int regclass, enum machine_mode mode)
872 {
873 int rn, max = 0;
874
875 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
876 if (class_contents[regclass][0] & (1 << rn))
877 {
878 int n = m32c_hard_regno_nregs (rn, mode);
879 if (max < n)
880 max = n;
881 }
882 return max;
883 }
884
885 /* Implements CANNOT_CHANGE_MODE_CLASS. Only r0 and r1 can change to
886 QI (r0l, r1l) because the chip doesn't support QI ops on other
887 registers (well, it does on a0/a1 but if we let gcc do that, reload
888 suffers). Otherwise, we allow changes to larger modes. */
889 int
890 m32c_cannot_change_mode_class (enum machine_mode from,
891 enum machine_mode to, int rclass)
892 {
893 int rn;
894 #if DEBUG0
895 fprintf (stderr, "cannot change from %s to %s in %s\n",
896 mode_name[from], mode_name[to], class_names[rclass]);
897 #endif
898
899 /* If the larger mode isn't allowed in any of these registers, we
900 can't allow the change. */
901 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
902 if (class_contents[rclass][0] & (1 << rn))
903 if (! m32c_hard_regno_ok (rn, to))
904 return 1;
905
906 if (to == QImode)
907 return (class_contents[rclass][0] & 0x1ffa);
908
909 if (class_contents[rclass][0] & 0x0005 /* r0, r1 */
910 && GET_MODE_SIZE (from) > 1)
911 return 0;
912 if (GET_MODE_SIZE (from) > 2) /* all other regs */
913 return 0;
914
915 return 1;
916 }
917
918 /* Helpers for the rest of the file. */
919 /* TRUE if the rtx is a REG rtx for the given register. */
920 #define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
921 && REGNO (rtx) == regno)
922 /* TRUE if the rtx is a pseudo - specifically, one we can use as a
923 base register in address calculations (hence the "strict"
924 argument). */
925 #define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
926 && (REGNO (rtx) == AP_REGNO \
927 || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
928
929 /* Implements CONST_OK_FOR_CONSTRAINT_P. Currently, all constant
930 constraints start with 'I', with the next two characters indicating
931 the type and size of the range allowed. */
932 int
933 m32c_const_ok_for_constraint_p (HOST_WIDE_INT value,
934 char c ATTRIBUTE_UNUSED, const char *str)
935 {
936 /* s=signed u=unsigned n=nonzero m=minus l=log2able,
937 [sun] bits [SUN] bytes, p=pointer size
938 I[-0-9][0-9] matches that number */
939 if (memcmp (str, "Is3", 3) == 0)
940 {
941 return (-8 <= value && value <= 7);
942 }
943 if (memcmp (str, "IS1", 3) == 0)
944 {
945 return (-128 <= value && value <= 127);
946 }
947 if (memcmp (str, "IS2", 3) == 0)
948 {
949 return (-32768 <= value && value <= 32767);
950 }
951 if (memcmp (str, "IU2", 3) == 0)
952 {
953 return (0 <= value && value <= 65535);
954 }
955 if (memcmp (str, "IU3", 3) == 0)
956 {
957 return (0 <= value && value <= 0x00ffffff);
958 }
959 if (memcmp (str, "In4", 3) == 0)
960 {
961 return (-8 <= value && value && value <= 8);
962 }
963 if (memcmp (str, "In5", 3) == 0)
964 {
965 return (-16 <= value && value && value <= 16);
966 }
967 if (memcmp (str, "In6", 3) == 0)
968 {
969 return (-32 <= value && value && value <= 32);
970 }
971 if (memcmp (str, "IM2", 3) == 0)
972 {
973 return (-65536 <= value && value && value <= -1);
974 }
975 if (memcmp (str, "Ilb", 3) == 0)
976 {
977 int b = exact_log2 (value);
978 return (b >= 0 && b <= 7);
979 }
980 if (memcmp (str, "Imb", 3) == 0)
981 {
982 int b = exact_log2 ((value ^ 0xff) & 0xff);
983 return (b >= 0 && b <= 7);
984 }
985 if (memcmp (str, "ImB", 3) == 0)
986 {
987 int b = exact_log2 ((value ^ 0xffff) & 0xffff);
988 return (b >= 0 && b <= 7);
989 }
990 if (memcmp (str, "Ilw", 3) == 0)
991 {
992 int b = exact_log2 (value);
993 return (b >= 0 && b <= 15);
994 }
995 if (memcmp (str, "Imw", 3) == 0)
996 {
997 int b = exact_log2 ((value ^ 0xffff) & 0xffff);
998 return (b >= 0 && b <= 15);
999 }
1000 if (memcmp (str, "I00", 3) == 0)
1001 {
1002 return (value == 0);
1003 }
1004 return 0;
1005 }
1006
1007 #define A0_OR_PSEUDO(x) (IS_REG(x, A0_REGNO) || REGNO (x) >= FIRST_PSEUDO_REGISTER)
1008
1009 /* Implements EXTRA_CONSTRAINT_STR (see next function too). 'S' is
1010 for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
1011 call return values. */
1012 int
1013 m32c_extra_constraint_p2 (rtx value, char c ATTRIBUTE_UNUSED, const char *str)
1014 {
1015 encode_pattern (value);
1016
1017 if (far_addr_space_p (value))
1018 {
1019 if (memcmp (str, "SF", 2) == 0)
1020 {
1021 return ( (RTX_IS ("mr")
1022 && A0_OR_PSEUDO (patternr[1])
1023 && GET_MODE (patternr[1]) == SImode)
1024 || (RTX_IS ("m+^Sri")
1025 && A0_OR_PSEUDO (patternr[4])
1026 && GET_MODE (patternr[4]) == HImode)
1027 || (RTX_IS ("m+^Srs")
1028 && A0_OR_PSEUDO (patternr[4])
1029 && GET_MODE (patternr[4]) == HImode)
1030 || (RTX_IS ("m+^S+ris")
1031 && A0_OR_PSEUDO (patternr[5])
1032 && GET_MODE (patternr[5]) == HImode)
1033 || RTX_IS ("ms")
1034 );
1035 }
1036 return 0;
1037 }
1038
1039 if (memcmp (str, "Sd", 2) == 0)
1040 {
1041 /* This is the common "src/dest" address */
1042 rtx r;
1043 if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
1044 return 1;
1045 if (RTX_IS ("ms") || RTX_IS ("m+si"))
1046 return 1;
1047 if (RTX_IS ("m++rii"))
1048 {
1049 if (REGNO (patternr[3]) == FB_REGNO
1050 && INTVAL (patternr[4]) == 0)
1051 return 1;
1052 }
1053 if (RTX_IS ("mr"))
1054 r = patternr[1];
1055 else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
1056 r = patternr[2];
1057 else
1058 return 0;
1059 if (REGNO (r) == SP_REGNO)
1060 return 0;
1061 return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
1062 }
1063 else if (memcmp (str, "Sa", 2) == 0)
1064 {
1065 rtx r;
1066 if (RTX_IS ("mr"))
1067 r = patternr[1];
1068 else if (RTX_IS ("m+ri"))
1069 r = patternr[2];
1070 else
1071 return 0;
1072 return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
1073 }
1074 else if (memcmp (str, "Si", 2) == 0)
1075 {
1076 return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
1077 }
1078 else if (memcmp (str, "Ss", 2) == 0)
1079 {
1080 return ((RTX_IS ("mr")
1081 && (IS_REG (patternr[1], SP_REGNO)))
1082 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
1083 }
1084 else if (memcmp (str, "Sf", 2) == 0)
1085 {
1086 return ((RTX_IS ("mr")
1087 && (IS_REG (patternr[1], FB_REGNO)))
1088 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
1089 }
1090 else if (memcmp (str, "Sb", 2) == 0)
1091 {
1092 return ((RTX_IS ("mr")
1093 && (IS_REG (patternr[1], SB_REGNO)))
1094 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
1095 }
1096 else if (memcmp (str, "Sp", 2) == 0)
1097 {
1098 /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
1099 return (RTX_IS ("mi")
1100 && !(INTVAL (patternr[1]) & ~0x1fff));
1101 }
1102 else if (memcmp (str, "S1", 2) == 0)
1103 {
1104 return r1h_operand (value, QImode);
1105 }
1106 else if (memcmp (str, "SF", 2) == 0)
1107 {
1108 return 0;
1109 }
1110
1111 gcc_assert (str[0] != 'S');
1112
1113 if (memcmp (str, "Rpa", 2) == 0)
1114 return GET_CODE (value) == PARALLEL;
1115
1116 return 0;
1117 }
1118
1119 /* This is for when we're debugging the above. */
1120 int
1121 m32c_extra_constraint_p (rtx value, char c, const char *str)
1122 {
1123 int rv = m32c_extra_constraint_p2 (value, c, str);
1124 #if DEBUG0
1125 fprintf (stderr, "\nconstraint %.*s: %d\n", CONSTRAINT_LEN (c, str), str,
1126 rv);
1127 debug_rtx (value);
1128 #endif
1129 return rv;
1130 }
1131
1132 /* Implements EXTRA_MEMORY_CONSTRAINT. Currently, we only use strings
1133 starting with 'S'. */
1134 int
1135 m32c_extra_memory_constraint (char c, const char *str ATTRIBUTE_UNUSED)
1136 {
1137 return c == 'S';
1138 }
1139
1140 /* Implements EXTRA_ADDRESS_CONSTRAINT. We reserve 'A' strings for these,
1141 but don't currently define any. */
1142 int
1143 m32c_extra_address_constraint (char c, const char *str ATTRIBUTE_UNUSED)
1144 {
1145 return c == 'A';
1146 }
1147
1148 /* STACK AND CALLING */
1149
1150 /* Frame Layout */
1151
1152 /* Implements RETURN_ADDR_RTX. Note that R8C and M16C push 24 bits
1153 (yes, THREE bytes) onto the stack for the return address, but we
1154 don't support pointers bigger than 16 bits on those chips. This
1155 will likely wreak havoc with exception unwinding. FIXME. */
1156 rtx
1157 m32c_return_addr_rtx (int count)
1158 {
1159 enum machine_mode mode;
1160 int offset;
1161 rtx ra_mem;
1162
1163 if (count)
1164 return NULL_RTX;
1165 /* we want 2[$fb] */
1166
1167 if (TARGET_A24)
1168 {
1169 /* It's four bytes */
1170 mode = PSImode;
1171 offset = 4;
1172 }
1173 else
1174 {
1175 /* FIXME: it's really 3 bytes */
1176 mode = HImode;
1177 offset = 2;
1178 }
1179
1180 ra_mem =
1181 gen_rtx_MEM (mode, plus_constant (gen_rtx_REG (Pmode, FP_REGNO), offset));
1182 return copy_to_mode_reg (mode, ra_mem);
1183 }
1184
1185 /* Implements INCOMING_RETURN_ADDR_RTX. See comment above. */
1186 rtx
1187 m32c_incoming_return_addr_rtx (void)
1188 {
1189 /* we want [sp] */
1190 return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
1191 }
1192
1193 /* Exception Handling Support */
1194
1195 /* Implements EH_RETURN_DATA_REGNO. Choose registers able to hold
1196 pointers. */
1197 int
1198 m32c_eh_return_data_regno (int n)
1199 {
1200 switch (n)
1201 {
1202 case 0:
1203 return A0_REGNO;
1204 case 1:
1205 if (TARGET_A16)
1206 return R3_REGNO;
1207 else
1208 return R1_REGNO;
1209 default:
1210 return INVALID_REGNUM;
1211 }
1212 }
1213
1214 /* Implements EH_RETURN_STACKADJ_RTX. Saved and used later in
1215 m32c_emit_eh_epilogue. */
1216 rtx
1217 m32c_eh_return_stackadj_rtx (void)
1218 {
1219 if (!cfun->machine->eh_stack_adjust)
1220 {
1221 rtx sa;
1222
1223 sa = gen_rtx_REG (Pmode, R0_REGNO);
1224 cfun->machine->eh_stack_adjust = sa;
1225 }
1226 return cfun->machine->eh_stack_adjust;
1227 }
1228
1229 /* Registers That Address the Stack Frame */
1230
1231 /* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER. Note that
1232 the original spec called for dwarf numbers to vary with register
1233 width as well, for example, r0l, r0, and r2r0 would each have
1234 different dwarf numbers. GCC doesn't support this, and we don't do
1235 it, and gdb seems to like it this way anyway. */
1236 unsigned int
1237 m32c_dwarf_frame_regnum (int n)
1238 {
1239 switch (n)
1240 {
1241 case R0_REGNO:
1242 return 5;
1243 case R1_REGNO:
1244 return 6;
1245 case R2_REGNO:
1246 return 7;
1247 case R3_REGNO:
1248 return 8;
1249 case A0_REGNO:
1250 return 9;
1251 case A1_REGNO:
1252 return 10;
1253 case FB_REGNO:
1254 return 11;
1255 case SB_REGNO:
1256 return 19;
1257
1258 case SP_REGNO:
1259 return 12;
1260 case PC_REGNO:
1261 return 13;
1262 default:
1263 return DWARF_FRAME_REGISTERS + 1;
1264 }
1265 }
1266
1267 /* The frame looks like this:
1268
1269 ap -> +------------------------------
1270 | Return address (3 or 4 bytes)
1271 | Saved FB (2 or 4 bytes)
1272 fb -> +------------------------------
1273 | local vars
1274 | register saves fb
1275 | through r0 as needed
1276 sp -> +------------------------------
1277 */
1278
1279 /* We use this to wrap all emitted insns in the prologue. */
1280 static rtx
1281 F (rtx x)
1282 {
1283 RTX_FRAME_RELATED_P (x) = 1;
1284 return x;
1285 }
1286
1287 /* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1288 how much the stack pointer moves for each, for each cpu family. */
1289 static struct
1290 {
1291 int reg1;
1292 int bit;
1293 int a16_bytes;
1294 int a24_bytes;
1295 } pushm_info[] =
1296 {
1297 /* These are in reverse push (nearest-to-sp) order. */
1298 { R0_REGNO, 0x80, 2, 2 },
1299 { R1_REGNO, 0x40, 2, 2 },
1300 { R2_REGNO, 0x20, 2, 2 },
1301 { R3_REGNO, 0x10, 2, 2 },
1302 { A0_REGNO, 0x08, 2, 4 },
1303 { A1_REGNO, 0x04, 2, 4 },
1304 { SB_REGNO, 0x02, 2, 4 },
1305 { FB_REGNO, 0x01, 2, 4 }
1306 };
1307
1308 #define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1309
1310 /* Returns TRUE if we need to save/restore the given register. We
1311 save everything for exception handlers, so that any register can be
1312 unwound. For interrupt handlers, we save everything if the handler
1313 calls something else (because we don't know what *that* function
1314 might do), but try to be a bit smarter if the handler is a leaf
1315 function. We always save $a0, though, because we use that in the
1316 epilogue to copy $fb to $sp. */
1317 static int
1318 need_to_save (int regno)
1319 {
1320 if (fixed_regs[regno])
1321 return 0;
1322 if (crtl->calls_eh_return)
1323 return 1;
1324 if (regno == FP_REGNO)
1325 return 0;
1326 if (cfun->machine->is_interrupt
1327 && (!cfun->machine->is_leaf
1328 || (regno == A0_REGNO
1329 && m32c_function_needs_enter ())
1330 ))
1331 return 1;
1332 if (df_regs_ever_live_p (regno)
1333 && (!call_used_regs[regno] || cfun->machine->is_interrupt))
1334 return 1;
1335 return 0;
1336 }
1337
1338 /* This function contains all the intelligence about saving and
1339 restoring registers. It always figures out the register save set.
1340 When called with PP_justcount, it merely returns the size of the
1341 save set (for eliminating the frame pointer, for example). When
1342 called with PP_pushm or PP_popm, it emits the appropriate
1343 instructions for saving (pushm) or restoring (popm) the
1344 registers. */
1345 static int
1346 m32c_pushm_popm (Push_Pop_Type ppt)
1347 {
1348 int reg_mask = 0;
1349 int byte_count = 0, bytes;
1350 int i;
1351 rtx dwarf_set[PUSHM_N];
1352 int n_dwarfs = 0;
1353 int nosave_mask = 0;
1354
1355 if (crtl->return_rtx
1356 && GET_CODE (crtl->return_rtx) == PARALLEL
1357 && !(crtl->calls_eh_return || cfun->machine->is_interrupt))
1358 {
1359 rtx exp = XVECEXP (crtl->return_rtx, 0, 0);
1360 rtx rv = XEXP (exp, 0);
1361 int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1362
1363 if (rv_bytes > 2)
1364 nosave_mask |= 0x20; /* PSI, SI */
1365 else
1366 nosave_mask |= 0xf0; /* DF */
1367 if (rv_bytes > 4)
1368 nosave_mask |= 0x50; /* DI */
1369 }
1370
1371 for (i = 0; i < (int) PUSHM_N; i++)
1372 {
1373 /* Skip if neither register needs saving. */
1374 if (!need_to_save (pushm_info[i].reg1))
1375 continue;
1376
1377 if (pushm_info[i].bit & nosave_mask)
1378 continue;
1379
1380 reg_mask |= pushm_info[i].bit;
1381 bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1382
1383 if (ppt == PP_pushm)
1384 {
1385 enum machine_mode mode = (bytes == 2) ? HImode : SImode;
1386 rtx addr;
1387
1388 /* Always use stack_pointer_rtx instead of calling
1389 rtx_gen_REG ourselves. Code elsewhere in GCC assumes
1390 that there is a single rtx representing the stack pointer,
1391 namely stack_pointer_rtx, and uses == to recognize it. */
1392 addr = stack_pointer_rtx;
1393
1394 if (byte_count != 0)
1395 addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1396
1397 dwarf_set[n_dwarfs++] =
1398 gen_rtx_SET (VOIDmode,
1399 gen_rtx_MEM (mode, addr),
1400 gen_rtx_REG (mode, pushm_info[i].reg1));
1401 F (dwarf_set[n_dwarfs - 1]);
1402
1403 }
1404 byte_count += bytes;
1405 }
1406
1407 if (cfun->machine->is_interrupt)
1408 {
1409 cfun->machine->intr_pushm = reg_mask & 0xfe;
1410 reg_mask = 0;
1411 byte_count = 0;
1412 }
1413
1414 if (cfun->machine->is_interrupt)
1415 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1416 if (need_to_save (i))
1417 {
1418 byte_count += 2;
1419 cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1420 }
1421
1422 if (ppt == PP_pushm && byte_count)
1423 {
1424 rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1425 rtx pushm;
1426
1427 if (reg_mask)
1428 {
1429 XVECEXP (note, 0, 0)
1430 = gen_rtx_SET (VOIDmode,
1431 stack_pointer_rtx,
1432 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1433 stack_pointer_rtx,
1434 GEN_INT (-byte_count)));
1435 F (XVECEXP (note, 0, 0));
1436
1437 for (i = 0; i < n_dwarfs; i++)
1438 XVECEXP (note, 0, i + 1) = dwarf_set[i];
1439
1440 pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1441
1442 add_reg_note (pushm, REG_FRAME_RELATED_EXPR, note);
1443 }
1444
1445 if (cfun->machine->is_interrupt)
1446 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1447 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1448 {
1449 if (TARGET_A16)
1450 pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1451 else
1452 pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1453 F (pushm);
1454 }
1455 }
1456 if (ppt == PP_popm && byte_count)
1457 {
1458 if (cfun->machine->is_interrupt)
1459 for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1460 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1461 {
1462 if (TARGET_A16)
1463 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
1464 else
1465 emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
1466 }
1467 if (reg_mask)
1468 emit_insn (gen_popm (GEN_INT (reg_mask)));
1469 }
1470
1471 return byte_count;
1472 }
1473
1474 /* Implements INITIAL_ELIMINATION_OFFSET. See the comment above that
1475 diagrams our call frame. */
1476 int
1477 m32c_initial_elimination_offset (int from, int to)
1478 {
1479 int ofs = 0;
1480
1481 if (from == AP_REGNO)
1482 {
1483 if (TARGET_A16)
1484 ofs += 5;
1485 else
1486 ofs += 8;
1487 }
1488
1489 if (to == SP_REGNO)
1490 {
1491 ofs += m32c_pushm_popm (PP_justcount);
1492 ofs += get_frame_size ();
1493 }
1494
1495 /* Account for push rounding. */
1496 if (TARGET_A24)
1497 ofs = (ofs + 1) & ~1;
1498 #if DEBUG0
1499 fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1500 to, ofs);
1501 #endif
1502 return ofs;
1503 }
1504
1505 /* Passing Function Arguments on the Stack */
1506
1507 /* Implements PUSH_ROUNDING. The R8C and M16C have byte stacks, the
1508 M32C has word stacks. */
1509 unsigned int
1510 m32c_push_rounding (int n)
1511 {
1512 if (TARGET_R8C || TARGET_M16C)
1513 return n;
1514 return (n + 1) & ~1;
1515 }
1516
1517 /* Passing Arguments in Registers */
1518
1519 /* Implements TARGET_FUNCTION_ARG. Arguments are passed partly in
1520 registers, partly on stack. If our function returns a struct, a
1521 pointer to a buffer for it is at the top of the stack (last thing
1522 pushed). The first few real arguments may be in registers as
1523 follows:
1524
1525 R8C/M16C: arg1 in r1 if it's QI or HI (else it's pushed on stack)
1526 arg2 in r2 if it's HI (else pushed on stack)
1527 rest on stack
1528 M32C: arg1 in r0 if it's QI or HI (else it's pushed on stack)
1529 rest on stack
1530
1531 Structs are not passed in registers, even if they fit. Only
1532 integer and pointer types are passed in registers.
1533
1534 Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1535 r2 if it fits. */
1536 #undef TARGET_FUNCTION_ARG
1537 #define TARGET_FUNCTION_ARG m32c_function_arg
1538 static rtx
1539 m32c_function_arg (cumulative_args_t ca_v,
1540 enum machine_mode mode, const_tree type, bool named)
1541 {
1542 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1543
1544 /* Can return a reg, parallel, or 0 for stack */
1545 rtx rv = NULL_RTX;
1546 #if DEBUG0
1547 fprintf (stderr, "func_arg %d (%s, %d)\n",
1548 ca->parm_num, mode_name[mode], named);
1549 debug_tree (type);
1550 #endif
1551
1552 if (mode == VOIDmode)
1553 return GEN_INT (0);
1554
1555 if (ca->force_mem || !named)
1556 {
1557 #if DEBUG0
1558 fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
1559 named);
1560 #endif
1561 return NULL_RTX;
1562 }
1563
1564 if (type && INTEGRAL_TYPE_P (type) && POINTER_TYPE_P (type))
1565 return NULL_RTX;
1566
1567 if (type && AGGREGATE_TYPE_P (type))
1568 return NULL_RTX;
1569
1570 switch (ca->parm_num)
1571 {
1572 case 1:
1573 if (GET_MODE_SIZE (mode) == 1 || GET_MODE_SIZE (mode) == 2)
1574 rv = gen_rtx_REG (mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
1575 break;
1576
1577 case 2:
1578 if (TARGET_A16 && GET_MODE_SIZE (mode) == 2)
1579 rv = gen_rtx_REG (mode, R2_REGNO);
1580 break;
1581 }
1582
1583 #if DEBUG0
1584 debug_rtx (rv);
1585 #endif
1586 return rv;
1587 }
1588
1589 #undef TARGET_PASS_BY_REFERENCE
1590 #define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1591 static bool
1592 m32c_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
1593 enum machine_mode mode ATTRIBUTE_UNUSED,
1594 const_tree type ATTRIBUTE_UNUSED,
1595 bool named ATTRIBUTE_UNUSED)
1596 {
1597 return 0;
1598 }
1599
1600 /* Implements INIT_CUMULATIVE_ARGS. */
1601 void
1602 m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
1603 tree fntype,
1604 rtx libname ATTRIBUTE_UNUSED,
1605 tree fndecl,
1606 int n_named_args ATTRIBUTE_UNUSED)
1607 {
1608 if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1609 ca->force_mem = 1;
1610 else
1611 ca->force_mem = 0;
1612 ca->parm_num = 1;
1613 }
1614
1615 /* Implements TARGET_FUNCTION_ARG_ADVANCE. force_mem is set for
1616 functions returning structures, so we always reset that. Otherwise,
1617 we only need to know the sequence number of the argument to know what
1618 to do with it. */
1619 #undef TARGET_FUNCTION_ARG_ADVANCE
1620 #define TARGET_FUNCTION_ARG_ADVANCE m32c_function_arg_advance
1621 static void
1622 m32c_function_arg_advance (cumulative_args_t ca_v,
1623 enum machine_mode mode ATTRIBUTE_UNUSED,
1624 const_tree type ATTRIBUTE_UNUSED,
1625 bool named ATTRIBUTE_UNUSED)
1626 {
1627 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1628
1629 if (ca->force_mem)
1630 ca->force_mem = 0;
1631 else
1632 ca->parm_num++;
1633 }
1634
1635 /* Implements TARGET_FUNCTION_ARG_BOUNDARY. */
1636 #undef TARGET_FUNCTION_ARG_BOUNDARY
1637 #define TARGET_FUNCTION_ARG_BOUNDARY m32c_function_arg_boundary
1638 static unsigned int
1639 m32c_function_arg_boundary (enum machine_mode mode ATTRIBUTE_UNUSED,
1640 const_tree type ATTRIBUTE_UNUSED)
1641 {
1642 return (TARGET_A16 ? 8 : 16);
1643 }
1644
1645 /* Implements FUNCTION_ARG_REGNO_P. */
1646 int
1647 m32c_function_arg_regno_p (int r)
1648 {
1649 if (TARGET_A24)
1650 return (r == R0_REGNO);
1651 return (r == R1_REGNO || r == R2_REGNO);
1652 }
1653
1654 /* HImode and PSImode are the two "native" modes as far as GCC is
1655 concerned, but the chips also support a 32-bit mode which is used
1656 for some opcodes in R8C/M16C and for reset vectors and such. */
1657 #undef TARGET_VALID_POINTER_MODE
1658 #define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
1659 static bool
1660 m32c_valid_pointer_mode (enum machine_mode mode)
1661 {
1662 if (mode == HImode
1663 || mode == PSImode
1664 || mode == SImode
1665 )
1666 return 1;
1667 return 0;
1668 }
1669
1670 /* How Scalar Function Values Are Returned */
1671
1672 /* Implements TARGET_LIBCALL_VALUE. Most values are returned in $r0, or some
1673 combination of registers starting there (r2r0 for longs, r3r1r2r0
1674 for long long, r3r2r1r0 for doubles), except that that ABI
1675 currently doesn't work because it ends up using all available
1676 general registers and gcc often can't compile it. So, instead, we
1677 return anything bigger than 16 bits in "mem0" (effectively, a
1678 memory location). */
1679
1680 #undef TARGET_LIBCALL_VALUE
1681 #define TARGET_LIBCALL_VALUE m32c_libcall_value
1682
1683 static rtx
1684 m32c_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
1685 {
1686 /* return reg or parallel */
1687 #if 0
1688 /* FIXME: GCC has difficulty returning large values in registers,
1689 because that ties up most of the general registers and gives the
1690 register allocator little to work with. Until we can resolve
1691 this, large values are returned in memory. */
1692 if (mode == DFmode)
1693 {
1694 rtx rv;
1695
1696 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1697 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1698 gen_rtx_REG (HImode,
1699 R0_REGNO),
1700 GEN_INT (0));
1701 XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1702 gen_rtx_REG (HImode,
1703 R1_REGNO),
1704 GEN_INT (2));
1705 XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1706 gen_rtx_REG (HImode,
1707 R2_REGNO),
1708 GEN_INT (4));
1709 XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1710 gen_rtx_REG (HImode,
1711 R3_REGNO),
1712 GEN_INT (6));
1713 return rv;
1714 }
1715
1716 if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1717 {
1718 rtx rv;
1719
1720 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1721 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1722 gen_rtx_REG (mode,
1723 R0_REGNO),
1724 GEN_INT (0));
1725 return rv;
1726 }
1727 #endif
1728
1729 if (GET_MODE_SIZE (mode) > 2)
1730 return gen_rtx_REG (mode, MEM0_REGNO);
1731 return gen_rtx_REG (mode, R0_REGNO);
1732 }
1733
1734 /* Implements TARGET_FUNCTION_VALUE. Functions and libcalls have the same
1735 conventions. */
1736
1737 #undef TARGET_FUNCTION_VALUE
1738 #define TARGET_FUNCTION_VALUE m32c_function_value
1739
1740 static rtx
1741 m32c_function_value (const_tree valtype,
1742 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1743 bool outgoing ATTRIBUTE_UNUSED)
1744 {
1745 /* return reg or parallel */
1746 const enum machine_mode mode = TYPE_MODE (valtype);
1747 return m32c_libcall_value (mode, NULL_RTX);
1748 }
1749
1750 /* Implements TARGET_FUNCTION_VALUE_REGNO_P. */
1751
1752 #undef TARGET_FUNCTION_VALUE_REGNO_P
1753 #define TARGET_FUNCTION_VALUE_REGNO_P m32c_function_value_regno_p
1754
1755 static bool
1756 m32c_function_value_regno_p (const unsigned int regno)
1757 {
1758 return (regno == R0_REGNO || regno == MEM0_REGNO);
1759 }
1760
1761 /* How Large Values Are Returned */
1762
1763 /* We return structures by pushing the address on the stack, even if
1764 we use registers for the first few "real" arguments. */
1765 #undef TARGET_STRUCT_VALUE_RTX
1766 #define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1767 static rtx
1768 m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1769 int incoming ATTRIBUTE_UNUSED)
1770 {
1771 return 0;
1772 }
1773
1774 /* Function Entry and Exit */
1775
1776 /* Implements EPILOGUE_USES. Interrupts restore all registers. */
1777 int
1778 m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1779 {
1780 if (cfun->machine->is_interrupt)
1781 return 1;
1782 return 0;
1783 }
1784
1785 /* Implementing the Varargs Macros */
1786
1787 #undef TARGET_STRICT_ARGUMENT_NAMING
1788 #define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1789 static bool
1790 m32c_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
1791 {
1792 return 1;
1793 }
1794
1795 /* Trampolines for Nested Functions */
1796
1797 /*
1798 m16c:
1799 1 0000 75C43412 mov.w #0x1234,a0
1800 2 0004 FC000000 jmp.a label
1801
1802 m32c:
1803 1 0000 BC563412 mov.l:s #0x123456,a0
1804 2 0004 CC000000 jmp.a label
1805 */
1806
1807 /* Implements TRAMPOLINE_SIZE. */
1808 int
1809 m32c_trampoline_size (void)
1810 {
1811 /* Allocate extra space so we can avoid the messy shifts when we
1812 initialize the trampoline; we just write past the end of the
1813 opcode. */
1814 return TARGET_A16 ? 8 : 10;
1815 }
1816
1817 /* Implements TRAMPOLINE_ALIGNMENT. */
1818 int
1819 m32c_trampoline_alignment (void)
1820 {
1821 return 2;
1822 }
1823
1824 /* Implements TARGET_TRAMPOLINE_INIT. */
1825
1826 #undef TARGET_TRAMPOLINE_INIT
1827 #define TARGET_TRAMPOLINE_INIT m32c_trampoline_init
1828 static void
1829 m32c_trampoline_init (rtx m_tramp, tree fndecl, rtx chainval)
1830 {
1831 rtx function = XEXP (DECL_RTL (fndecl), 0);
1832
1833 #define A0(m,i) adjust_address (m_tramp, m, i)
1834 if (TARGET_A16)
1835 {
1836 /* Note: we subtract a "word" because the moves want signed
1837 constants, not unsigned constants. */
1838 emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1839 emit_move_insn (A0 (HImode, 2), chainval);
1840 emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
1841 /* We use 16-bit addresses here, but store the zero to turn it
1842 into a 24-bit offset. */
1843 emit_move_insn (A0 (HImode, 5), function);
1844 emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1845 }
1846 else
1847 {
1848 /* Note that the PSI moves actually write 4 bytes. Make sure we
1849 write stuff out in the right order, and leave room for the
1850 extra byte at the end. */
1851 emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1852 emit_move_insn (A0 (PSImode, 1), chainval);
1853 emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1854 emit_move_insn (A0 (PSImode, 5), function);
1855 }
1856 #undef A0
1857 }
1858
1859 /* Implicit Calls to Library Routines */
1860
1861 #undef TARGET_INIT_LIBFUNCS
1862 #define TARGET_INIT_LIBFUNCS m32c_init_libfuncs
1863 static void
1864 m32c_init_libfuncs (void)
1865 {
1866 /* We do this because the M32C has an HImode operand, but the
1867 M16C has an 8-bit operand. Since gcc looks at the match data
1868 and not the expanded rtl, we have to reset the optab so that
1869 the right modes are found. */
1870 if (TARGET_A24)
1871 {
1872 set_optab_handler (cstore_optab, QImode, CODE_FOR_cstoreqi4_24);
1873 set_optab_handler (cstore_optab, HImode, CODE_FOR_cstorehi4_24);
1874 set_optab_handler (cstore_optab, PSImode, CODE_FOR_cstorepsi4_24);
1875 }
1876 }
1877
1878 /* Addressing Modes */
1879
1880 /* The r8c/m32c family supports a wide range of non-orthogonal
1881 addressing modes, including the ability to double-indirect on *some*
1882 of them. Not all insns support all modes, either, but we rely on
1883 predicates and constraints to deal with that. */
1884 #undef TARGET_LEGITIMATE_ADDRESS_P
1885 #define TARGET_LEGITIMATE_ADDRESS_P m32c_legitimate_address_p
1886 bool
1887 m32c_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
1888 {
1889 int mode_adjust;
1890 if (CONSTANT_P (x))
1891 return 1;
1892
1893 if (TARGET_A16 && GET_MODE (x) != HImode && GET_MODE (x) != SImode)
1894 return 0;
1895 if (TARGET_A24 && GET_MODE (x) != PSImode)
1896 return 0;
1897
1898 /* Wide references to memory will be split after reload, so we must
1899 ensure that all parts of such splits remain legitimate
1900 addresses. */
1901 mode_adjust = GET_MODE_SIZE (mode) - 1;
1902
1903 /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1904 if (GET_CODE (x) == PRE_DEC
1905 || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1906 {
1907 return (GET_CODE (XEXP (x, 0)) == REG
1908 && REGNO (XEXP (x, 0)) == SP_REGNO);
1909 }
1910
1911 #if 0
1912 /* This is the double indirection detection, but it currently
1913 doesn't work as cleanly as this code implies, so until we've had
1914 a chance to debug it, leave it disabled. */
1915 if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1916 {
1917 #if DEBUG_DOUBLE
1918 fprintf (stderr, "double indirect\n");
1919 #endif
1920 x = XEXP (x, 0);
1921 }
1922 #endif
1923
1924 encode_pattern (x);
1925 if (RTX_IS ("r"))
1926 {
1927 /* Most indexable registers can be used without displacements,
1928 although some of them will be emitted with an explicit zero
1929 to please the assembler. */
1930 switch (REGNO (patternr[0]))
1931 {
1932 case A1_REGNO:
1933 case SB_REGNO:
1934 case FB_REGNO:
1935 case SP_REGNO:
1936 if (TARGET_A16 && GET_MODE (x) == SImode)
1937 return 0;
1938 case A0_REGNO:
1939 return 1;
1940
1941 default:
1942 if (IS_PSEUDO (patternr[0], strict))
1943 return 1;
1944 return 0;
1945 }
1946 }
1947
1948 if (TARGET_A16 && GET_MODE (x) == SImode)
1949 return 0;
1950
1951 if (RTX_IS ("+ri"))
1952 {
1953 /* This is more interesting, because different base registers
1954 allow for different displacements - both range and signedness
1955 - and it differs from chip series to chip series too. */
1956 int rn = REGNO (patternr[1]);
1957 HOST_WIDE_INT offs = INTVAL (patternr[2]);
1958 switch (rn)
1959 {
1960 case A0_REGNO:
1961 case A1_REGNO:
1962 case SB_REGNO:
1963 /* The syntax only allows positive offsets, but when the
1964 offsets span the entire memory range, we can simulate
1965 negative offsets by wrapping. */
1966 if (TARGET_A16)
1967 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1968 if (rn == SB_REGNO)
1969 return (offs >= 0 && offs <= 65535 - mode_adjust);
1970 /* A0 or A1 */
1971 return (offs >= -16777216 && offs <= 16777215);
1972
1973 case FB_REGNO:
1974 if (TARGET_A16)
1975 return (offs >= -128 && offs <= 127 - mode_adjust);
1976 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1977
1978 case SP_REGNO:
1979 return (offs >= -128 && offs <= 127 - mode_adjust);
1980
1981 default:
1982 if (IS_PSEUDO (patternr[1], strict))
1983 return 1;
1984 return 0;
1985 }
1986 }
1987 if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1988 {
1989 rtx reg = patternr[1];
1990
1991 /* We don't know where the symbol is, so only allow base
1992 registers which support displacements spanning the whole
1993 address range. */
1994 switch (REGNO (reg))
1995 {
1996 case A0_REGNO:
1997 case A1_REGNO:
1998 /* $sb needs a secondary reload, but since it's involved in
1999 memory address reloads too, we don't deal with it very
2000 well. */
2001 /* case SB_REGNO: */
2002 return 1;
2003 default:
2004 if (IS_PSEUDO (reg, strict))
2005 return 1;
2006 return 0;
2007 }
2008 }
2009 return 0;
2010 }
2011
2012 /* Implements REG_OK_FOR_BASE_P. */
2013 int
2014 m32c_reg_ok_for_base_p (rtx x, int strict)
2015 {
2016 if (GET_CODE (x) != REG)
2017 return 0;
2018 switch (REGNO (x))
2019 {
2020 case A0_REGNO:
2021 case A1_REGNO:
2022 case SB_REGNO:
2023 case FB_REGNO:
2024 case SP_REGNO:
2025 return 1;
2026 default:
2027 if (IS_PSEUDO (x, strict))
2028 return 1;
2029 return 0;
2030 }
2031 }
2032
2033 /* We have three choices for choosing fb->aN offsets. If we choose -128,
2034 we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
2035 like this:
2036 EB 4B FF mova -128[$fb],$a0
2037 D8 0C FF FF mov.w:Q #0,-1[$a0]
2038
2039 Alternately, we subtract the frame size, and hopefully use 8-bit aN
2040 displacements:
2041 7B F4 stc $fb,$a0
2042 77 54 00 01 sub #256,$a0
2043 D8 08 01 mov.w:Q #0,1[$a0]
2044
2045 If we don't offset (i.e. offset by zero), we end up with:
2046 7B F4 stc $fb,$a0
2047 D8 0C 00 FF mov.w:Q #0,-256[$a0]
2048
2049 We have to subtract *something* so that we have a PLUS rtx to mark
2050 that we've done this reload. The -128 offset will never result in
2051 an 8-bit aN offset, and the payoff for the second case is five
2052 loads *if* those loads are within 256 bytes of the other end of the
2053 frame, so the third case seems best. Note that we subtract the
2054 zero, but detect that in the addhi3 pattern. */
2055
2056 #define BIG_FB_ADJ 0
2057
2058 /* Implements LEGITIMIZE_ADDRESS. The only address we really have to
2059 worry about is frame base offsets, as $fb has a limited
2060 displacement range. We deal with this by attempting to reload $fb
2061 itself into an address register; that seems to result in the best
2062 code. */
2063 #undef TARGET_LEGITIMIZE_ADDRESS
2064 #define TARGET_LEGITIMIZE_ADDRESS m32c_legitimize_address
2065 static rtx
2066 m32c_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
2067 enum machine_mode mode)
2068 {
2069 #if DEBUG0
2070 fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
2071 debug_rtx (x);
2072 fprintf (stderr, "\n");
2073 #endif
2074
2075 if (GET_CODE (x) == PLUS
2076 && GET_CODE (XEXP (x, 0)) == REG
2077 && REGNO (XEXP (x, 0)) == FB_REGNO
2078 && GET_CODE (XEXP (x, 1)) == CONST_INT
2079 && (INTVAL (XEXP (x, 1)) < -128
2080 || INTVAL (XEXP (x, 1)) > (128 - GET_MODE_SIZE (mode))))
2081 {
2082 /* reload FB to A_REGS */
2083 rtx temp = gen_reg_rtx (Pmode);
2084 x = copy_rtx (x);
2085 emit_insn (gen_rtx_SET (VOIDmode, temp, XEXP (x, 0)));
2086 XEXP (x, 0) = temp;
2087 }
2088
2089 return x;
2090 }
2091
2092 /* Implements LEGITIMIZE_RELOAD_ADDRESS. See comment above. */
2093 int
2094 m32c_legitimize_reload_address (rtx * x,
2095 enum machine_mode mode,
2096 int opnum,
2097 int type, int ind_levels ATTRIBUTE_UNUSED)
2098 {
2099 #if DEBUG0
2100 fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
2101 mode_name[mode]);
2102 debug_rtx (*x);
2103 #endif
2104
2105 /* At one point, this function tried to get $fb copied to an address
2106 register, which in theory would maximize sharing, but gcc was
2107 *also* still trying to reload the whole address, and we'd run out
2108 of address registers. So we let gcc do the naive (but safe)
2109 reload instead, when the above function doesn't handle it for
2110 us.
2111
2112 The code below is a second attempt at the above. */
2113
2114 if (GET_CODE (*x) == PLUS
2115 && GET_CODE (XEXP (*x, 0)) == REG
2116 && REGNO (XEXP (*x, 0)) == FB_REGNO
2117 && GET_CODE (XEXP (*x, 1)) == CONST_INT
2118 && (INTVAL (XEXP (*x, 1)) < -128
2119 || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
2120 {
2121 rtx sum;
2122 int offset = INTVAL (XEXP (*x, 1));
2123 int adjustment = -BIG_FB_ADJ;
2124
2125 sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
2126 GEN_INT (adjustment));
2127 *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
2128 if (type == RELOAD_OTHER)
2129 type = RELOAD_FOR_OTHER_ADDRESS;
2130 push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
2131 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
2132 (enum reload_type) type);
2133 return 1;
2134 }
2135
2136 if (GET_CODE (*x) == PLUS
2137 && GET_CODE (XEXP (*x, 0)) == PLUS
2138 && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
2139 && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
2140 && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
2141 && GET_CODE (XEXP (*x, 1)) == CONST_INT
2142 )
2143 {
2144 if (type == RELOAD_OTHER)
2145 type = RELOAD_FOR_OTHER_ADDRESS;
2146 push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
2147 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
2148 (enum reload_type) type);
2149 return 1;
2150 }
2151
2152 return 0;
2153 }
2154
2155 /* Return the appropriate mode for a named address pointer. */
2156 #undef TARGET_ADDR_SPACE_POINTER_MODE
2157 #define TARGET_ADDR_SPACE_POINTER_MODE m32c_addr_space_pointer_mode
2158 static enum machine_mode
2159 m32c_addr_space_pointer_mode (addr_space_t addrspace)
2160 {
2161 switch (addrspace)
2162 {
2163 case ADDR_SPACE_GENERIC:
2164 return TARGET_A24 ? PSImode : HImode;
2165 case ADDR_SPACE_FAR:
2166 return SImode;
2167 default:
2168 gcc_unreachable ();
2169 }
2170 }
2171
2172 /* Return the appropriate mode for a named address address. */
2173 #undef TARGET_ADDR_SPACE_ADDRESS_MODE
2174 #define TARGET_ADDR_SPACE_ADDRESS_MODE m32c_addr_space_address_mode
2175 static enum machine_mode
2176 m32c_addr_space_address_mode (addr_space_t addrspace)
2177 {
2178 switch (addrspace)
2179 {
2180 case ADDR_SPACE_GENERIC:
2181 return TARGET_A24 ? PSImode : HImode;
2182 case ADDR_SPACE_FAR:
2183 return SImode;
2184 default:
2185 gcc_unreachable ();
2186 }
2187 }
2188
2189 /* Like m32c_legitimate_address_p, except with named addresses. */
2190 #undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
2191 #define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P \
2192 m32c_addr_space_legitimate_address_p
2193 static bool
2194 m32c_addr_space_legitimate_address_p (enum machine_mode mode, rtx x,
2195 bool strict, addr_space_t as)
2196 {
2197 if (as == ADDR_SPACE_FAR)
2198 {
2199 if (TARGET_A24)
2200 return 0;
2201 encode_pattern (x);
2202 if (RTX_IS ("r"))
2203 {
2204 if (GET_MODE (x) != SImode)
2205 return 0;
2206 switch (REGNO (patternr[0]))
2207 {
2208 case A0_REGNO:
2209 return 1;
2210
2211 default:
2212 if (IS_PSEUDO (patternr[0], strict))
2213 return 1;
2214 return 0;
2215 }
2216 }
2217 if (RTX_IS ("+^Sri"))
2218 {
2219 int rn = REGNO (patternr[3]);
2220 HOST_WIDE_INT offs = INTVAL (patternr[4]);
2221 if (GET_MODE (patternr[3]) != HImode)
2222 return 0;
2223 switch (rn)
2224 {
2225 case A0_REGNO:
2226 return (offs >= 0 && offs <= 0xfffff);
2227
2228 default:
2229 if (IS_PSEUDO (patternr[3], strict))
2230 return 1;
2231 return 0;
2232 }
2233 }
2234 if (RTX_IS ("+^Srs"))
2235 {
2236 int rn = REGNO (patternr[3]);
2237 if (GET_MODE (patternr[3]) != HImode)
2238 return 0;
2239 switch (rn)
2240 {
2241 case A0_REGNO:
2242 return 1;
2243
2244 default:
2245 if (IS_PSEUDO (patternr[3], strict))
2246 return 1;
2247 return 0;
2248 }
2249 }
2250 if (RTX_IS ("+^S+ris"))
2251 {
2252 int rn = REGNO (patternr[4]);
2253 if (GET_MODE (patternr[4]) != HImode)
2254 return 0;
2255 switch (rn)
2256 {
2257 case A0_REGNO:
2258 return 1;
2259
2260 default:
2261 if (IS_PSEUDO (patternr[4], strict))
2262 return 1;
2263 return 0;
2264 }
2265 }
2266 if (RTX_IS ("s"))
2267 {
2268 return 1;
2269 }
2270 return 0;
2271 }
2272
2273 else if (as != ADDR_SPACE_GENERIC)
2274 gcc_unreachable ();
2275
2276 return m32c_legitimate_address_p (mode, x, strict);
2277 }
2278
2279 /* Like m32c_legitimate_address, except with named address support. */
2280 #undef TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS
2281 #define TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS m32c_addr_space_legitimize_address
2282 static rtx
2283 m32c_addr_space_legitimize_address (rtx x, rtx oldx, enum machine_mode mode,
2284 addr_space_t as)
2285 {
2286 if (as != ADDR_SPACE_GENERIC)
2287 {
2288 #if DEBUG0
2289 fprintf (stderr, "\033[36mm32c_addr_space_legitimize_address for mode %s\033[0m\n", mode_name[mode]);
2290 debug_rtx (x);
2291 fprintf (stderr, "\n");
2292 #endif
2293
2294 if (GET_CODE (x) != REG)
2295 {
2296 x = force_reg (SImode, x);
2297 }
2298 return x;
2299 }
2300
2301 return m32c_legitimize_address (x, oldx, mode);
2302 }
2303
2304 /* Determine if one named address space is a subset of another. */
2305 #undef TARGET_ADDR_SPACE_SUBSET_P
2306 #define TARGET_ADDR_SPACE_SUBSET_P m32c_addr_space_subset_p
2307 static bool
2308 m32c_addr_space_subset_p (addr_space_t subset, addr_space_t superset)
2309 {
2310 gcc_assert (subset == ADDR_SPACE_GENERIC || subset == ADDR_SPACE_FAR);
2311 gcc_assert (superset == ADDR_SPACE_GENERIC || superset == ADDR_SPACE_FAR);
2312
2313 if (subset == superset)
2314 return true;
2315
2316 else
2317 return (subset == ADDR_SPACE_GENERIC && superset == ADDR_SPACE_FAR);
2318 }
2319
2320 #undef TARGET_ADDR_SPACE_CONVERT
2321 #define TARGET_ADDR_SPACE_CONVERT m32c_addr_space_convert
2322 /* Convert from one address space to another. */
2323 static rtx
2324 m32c_addr_space_convert (rtx op, tree from_type, tree to_type)
2325 {
2326 addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (from_type));
2327 addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (to_type));
2328 rtx result;
2329
2330 gcc_assert (from_as == ADDR_SPACE_GENERIC || from_as == ADDR_SPACE_FAR);
2331 gcc_assert (to_as == ADDR_SPACE_GENERIC || to_as == ADDR_SPACE_FAR);
2332
2333 if (to_as == ADDR_SPACE_GENERIC && from_as == ADDR_SPACE_FAR)
2334 {
2335 /* This is unpredictable, as we're truncating off usable address
2336 bits. */
2337
2338 result = gen_reg_rtx (HImode);
2339 emit_move_insn (result, simplify_subreg (HImode, op, SImode, 0));
2340 return result;
2341 }
2342 else if (to_as == ADDR_SPACE_FAR && from_as == ADDR_SPACE_GENERIC)
2343 {
2344 /* This always works. */
2345 result = gen_reg_rtx (SImode);
2346 emit_insn (gen_zero_extendhisi2 (result, op));
2347 return result;
2348 }
2349 else
2350 gcc_unreachable ();
2351 }
2352
2353 /* Condition Code Status */
2354
2355 #undef TARGET_FIXED_CONDITION_CODE_REGS
2356 #define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2357 static bool
2358 m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2359 {
2360 *p1 = FLG_REGNO;
2361 *p2 = INVALID_REGNUM;
2362 return true;
2363 }
2364
2365 /* Describing Relative Costs of Operations */
2366
2367 /* Implements TARGET_REGISTER_MOVE_COST. We make impossible moves
2368 prohibitively expensive, like trying to put QIs in r2/r3 (there are
2369 no opcodes to do that). We also discourage use of mem* registers
2370 since they're really memory. */
2371
2372 #undef TARGET_REGISTER_MOVE_COST
2373 #define TARGET_REGISTER_MOVE_COST m32c_register_move_cost
2374
2375 static int
2376 m32c_register_move_cost (enum machine_mode mode, reg_class_t from,
2377 reg_class_t to)
2378 {
2379 int cost = COSTS_N_INSNS (3);
2380 HARD_REG_SET cc;
2381
2382 /* FIXME: pick real values, but not 2 for now. */
2383 COPY_HARD_REG_SET (cc, reg_class_contents[(int) from]);
2384 IOR_HARD_REG_SET (cc, reg_class_contents[(int) to]);
2385
2386 if (mode == QImode
2387 && hard_reg_set_intersect_p (cc, reg_class_contents[R23_REGS]))
2388 {
2389 if (hard_reg_set_subset_p (cc, reg_class_contents[R23_REGS]))
2390 cost = COSTS_N_INSNS (1000);
2391 else
2392 cost = COSTS_N_INSNS (80);
2393 }
2394
2395 if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2396 cost = COSTS_N_INSNS (1000);
2397
2398 if (reg_classes_intersect_p (from, CR_REGS))
2399 cost += COSTS_N_INSNS (5);
2400
2401 if (reg_classes_intersect_p (to, CR_REGS))
2402 cost += COSTS_N_INSNS (5);
2403
2404 if (from == MEM_REGS || to == MEM_REGS)
2405 cost += COSTS_N_INSNS (50);
2406 else if (reg_classes_intersect_p (from, MEM_REGS)
2407 || reg_classes_intersect_p (to, MEM_REGS))
2408 cost += COSTS_N_INSNS (10);
2409
2410 #if DEBUG0
2411 fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
2412 mode_name[mode], class_names[(int) from], class_names[(int) to],
2413 cost);
2414 #endif
2415 return cost;
2416 }
2417
2418 /* Implements TARGET_MEMORY_MOVE_COST. */
2419
2420 #undef TARGET_MEMORY_MOVE_COST
2421 #define TARGET_MEMORY_MOVE_COST m32c_memory_move_cost
2422
2423 static int
2424 m32c_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2425 reg_class_t rclass ATTRIBUTE_UNUSED,
2426 bool in ATTRIBUTE_UNUSED)
2427 {
2428 /* FIXME: pick real values. */
2429 return COSTS_N_INSNS (10);
2430 }
2431
2432 /* Here we try to describe when we use multiple opcodes for one RTX so
2433 that gcc knows when to use them. */
2434 #undef TARGET_RTX_COSTS
2435 #define TARGET_RTX_COSTS m32c_rtx_costs
2436 static bool
2437 m32c_rtx_costs (rtx x, int code, int outer_code, int *total,
2438 bool speed ATTRIBUTE_UNUSED)
2439 {
2440 switch (code)
2441 {
2442 case REG:
2443 if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2444 *total += COSTS_N_INSNS (500);
2445 else
2446 *total += COSTS_N_INSNS (1);
2447 return true;
2448
2449 case ASHIFT:
2450 case LSHIFTRT:
2451 case ASHIFTRT:
2452 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2453 {
2454 /* mov.b r1l, r1h */
2455 *total += COSTS_N_INSNS (1);
2456 return true;
2457 }
2458 if (INTVAL (XEXP (x, 1)) > 8
2459 || INTVAL (XEXP (x, 1)) < -8)
2460 {
2461 /* mov.b #N, r1l */
2462 /* mov.b r1l, r1h */
2463 *total += COSTS_N_INSNS (2);
2464 return true;
2465 }
2466 return true;
2467
2468 case LE:
2469 case LEU:
2470 case LT:
2471 case LTU:
2472 case GT:
2473 case GTU:
2474 case GE:
2475 case GEU:
2476 case NE:
2477 case EQ:
2478 if (outer_code == SET)
2479 {
2480 *total += COSTS_N_INSNS (2);
2481 return true;
2482 }
2483 break;
2484
2485 case ZERO_EXTRACT:
2486 {
2487 rtx dest = XEXP (x, 0);
2488 rtx addr = XEXP (dest, 0);
2489 switch (GET_CODE (addr))
2490 {
2491 case CONST_INT:
2492 *total += COSTS_N_INSNS (1);
2493 break;
2494 case SYMBOL_REF:
2495 *total += COSTS_N_INSNS (3);
2496 break;
2497 default:
2498 *total += COSTS_N_INSNS (2);
2499 break;
2500 }
2501 return true;
2502 }
2503 break;
2504
2505 default:
2506 /* Reasonable default. */
2507 if (TARGET_A16 && GET_MODE(x) == SImode)
2508 *total += COSTS_N_INSNS (2);
2509 break;
2510 }
2511 return false;
2512 }
2513
2514 #undef TARGET_ADDRESS_COST
2515 #define TARGET_ADDRESS_COST m32c_address_cost
2516 static int
2517 m32c_address_cost (rtx addr, bool speed ATTRIBUTE_UNUSED)
2518 {
2519 int i;
2520 /* fprintf(stderr, "\naddress_cost\n");
2521 debug_rtx(addr);*/
2522 switch (GET_CODE (addr))
2523 {
2524 case CONST_INT:
2525 i = INTVAL (addr);
2526 if (i == 0)
2527 return COSTS_N_INSNS(1);
2528 if (0 < i && i <= 255)
2529 return COSTS_N_INSNS(2);
2530 if (0 < i && i <= 65535)
2531 return COSTS_N_INSNS(3);
2532 return COSTS_N_INSNS(4);
2533 case SYMBOL_REF:
2534 return COSTS_N_INSNS(4);
2535 case REG:
2536 return COSTS_N_INSNS(1);
2537 case PLUS:
2538 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
2539 {
2540 i = INTVAL (XEXP (addr, 1));
2541 if (i == 0)
2542 return COSTS_N_INSNS(1);
2543 if (0 < i && i <= 255)
2544 return COSTS_N_INSNS(2);
2545 if (0 < i && i <= 65535)
2546 return COSTS_N_INSNS(3);
2547 }
2548 return COSTS_N_INSNS(4);
2549 default:
2550 return 0;
2551 }
2552 }
2553
2554 /* Defining the Output Assembler Language */
2555
2556 /* Output of Data */
2557
2558 /* We may have 24 bit sizes, which is the native address size.
2559 Currently unused, but provided for completeness. */
2560 #undef TARGET_ASM_INTEGER
2561 #define TARGET_ASM_INTEGER m32c_asm_integer
2562 static bool
2563 m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2564 {
2565 switch (size)
2566 {
2567 case 3:
2568 fprintf (asm_out_file, "\t.3byte\t");
2569 output_addr_const (asm_out_file, x);
2570 fputc ('\n', asm_out_file);
2571 return true;
2572 case 4:
2573 if (GET_CODE (x) == SYMBOL_REF)
2574 {
2575 fprintf (asm_out_file, "\t.long\t");
2576 output_addr_const (asm_out_file, x);
2577 fputc ('\n', asm_out_file);
2578 return true;
2579 }
2580 break;
2581 }
2582 return default_assemble_integer (x, size, aligned_p);
2583 }
2584
2585 /* Output of Assembler Instructions */
2586
2587 /* We use a lookup table because the addressing modes are non-orthogonal. */
2588
2589 static struct
2590 {
2591 char code;
2592 char const *pattern;
2593 char const *format;
2594 }
2595 const conversions[] = {
2596 { 0, "r", "0" },
2597
2598 { 0, "mr", "z[1]" },
2599 { 0, "m+ri", "3[2]" },
2600 { 0, "m+rs", "3[2]" },
2601 { 0, "m+^Zrs", "5[4]" },
2602 { 0, "m+^Zri", "5[4]" },
2603 { 0, "m+^Z+ris", "7+6[5]" },
2604 { 0, "m+^Srs", "5[4]" },
2605 { 0, "m+^Sri", "5[4]" },
2606 { 0, "m+^S+ris", "7+6[5]" },
2607 { 0, "m+r+si", "4+5[2]" },
2608 { 0, "ms", "1" },
2609 { 0, "mi", "1" },
2610 { 0, "m+si", "2+3" },
2611
2612 { 0, "mmr", "[z[2]]" },
2613 { 0, "mm+ri", "[4[3]]" },
2614 { 0, "mm+rs", "[4[3]]" },
2615 { 0, "mm+r+si", "[5+6[3]]" },
2616 { 0, "mms", "[[2]]" },
2617 { 0, "mmi", "[[2]]" },
2618 { 0, "mm+si", "[4[3]]" },
2619
2620 { 0, "i", "#0" },
2621 { 0, "s", "#0" },
2622 { 0, "+si", "#1+2" },
2623 { 0, "l", "#0" },
2624
2625 { 'l', "l", "0" },
2626 { 'd', "i", "0" },
2627 { 'd', "s", "0" },
2628 { 'd', "+si", "1+2" },
2629 { 'D', "i", "0" },
2630 { 'D', "s", "0" },
2631 { 'D', "+si", "1+2" },
2632 { 'x', "i", "#0" },
2633 { 'X', "i", "#0" },
2634 { 'm', "i", "#0" },
2635 { 'b', "i", "#0" },
2636 { 'B', "i", "0" },
2637 { 'p', "i", "0" },
2638
2639 { 0, 0, 0 }
2640 };
2641
2642 /* This is in order according to the bitfield that pushm/popm use. */
2643 static char const *pushm_regs[] = {
2644 "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2645 };
2646
2647 /* Implements PRINT_OPERAND. */
2648 void
2649 m32c_print_operand (FILE * file, rtx x, int code)
2650 {
2651 int i, j, b;
2652 const char *comma;
2653 HOST_WIDE_INT ival;
2654 int unsigned_const = 0;
2655 int force_sign;
2656
2657 /* Multiplies; constants are converted to sign-extended format but
2658 we need unsigned, so 'u' and 'U' tell us what size unsigned we
2659 need. */
2660 if (code == 'u')
2661 {
2662 unsigned_const = 2;
2663 code = 0;
2664 }
2665 if (code == 'U')
2666 {
2667 unsigned_const = 1;
2668 code = 0;
2669 }
2670 /* This one is only for debugging; you can put it in a pattern to
2671 force this error. */
2672 if (code == '!')
2673 {
2674 fprintf (stderr, "dj: unreviewed pattern:");
2675 if (current_output_insn)
2676 debug_rtx (current_output_insn);
2677 gcc_unreachable ();
2678 }
2679 /* PSImode operations are either .w or .l depending on the target. */
2680 if (code == '&')
2681 {
2682 if (TARGET_A16)
2683 fprintf (file, "w");
2684 else
2685 fprintf (file, "l");
2686 return;
2687 }
2688 /* Inverted conditionals. */
2689 if (code == 'C')
2690 {
2691 switch (GET_CODE (x))
2692 {
2693 case LE:
2694 fputs ("gt", file);
2695 break;
2696 case LEU:
2697 fputs ("gtu", file);
2698 break;
2699 case LT:
2700 fputs ("ge", file);
2701 break;
2702 case LTU:
2703 fputs ("geu", file);
2704 break;
2705 case GT:
2706 fputs ("le", file);
2707 break;
2708 case GTU:
2709 fputs ("leu", file);
2710 break;
2711 case GE:
2712 fputs ("lt", file);
2713 break;
2714 case GEU:
2715 fputs ("ltu", file);
2716 break;
2717 case NE:
2718 fputs ("eq", file);
2719 break;
2720 case EQ:
2721 fputs ("ne", file);
2722 break;
2723 default:
2724 gcc_unreachable ();
2725 }
2726 return;
2727 }
2728 /* Regular conditionals. */
2729 if (code == 'c')
2730 {
2731 switch (GET_CODE (x))
2732 {
2733 case LE:
2734 fputs ("le", file);
2735 break;
2736 case LEU:
2737 fputs ("leu", file);
2738 break;
2739 case LT:
2740 fputs ("lt", file);
2741 break;
2742 case LTU:
2743 fputs ("ltu", file);
2744 break;
2745 case GT:
2746 fputs ("gt", file);
2747 break;
2748 case GTU:
2749 fputs ("gtu", file);
2750 break;
2751 case GE:
2752 fputs ("ge", file);
2753 break;
2754 case GEU:
2755 fputs ("geu", file);
2756 break;
2757 case NE:
2758 fputs ("ne", file);
2759 break;
2760 case EQ:
2761 fputs ("eq", file);
2762 break;
2763 default:
2764 gcc_unreachable ();
2765 }
2766 return;
2767 }
2768 /* Used in negsi2 to do HImode ops on the two parts of an SImode
2769 operand. */
2770 if (code == 'h' && GET_MODE (x) == SImode)
2771 {
2772 x = m32c_subreg (HImode, x, SImode, 0);
2773 code = 0;
2774 }
2775 if (code == 'H' && GET_MODE (x) == SImode)
2776 {
2777 x = m32c_subreg (HImode, x, SImode, 2);
2778 code = 0;
2779 }
2780 if (code == 'h' && GET_MODE (x) == HImode)
2781 {
2782 x = m32c_subreg (QImode, x, HImode, 0);
2783 code = 0;
2784 }
2785 if (code == 'H' && GET_MODE (x) == HImode)
2786 {
2787 /* We can't actually represent this as an rtx. Do it here. */
2788 if (GET_CODE (x) == REG)
2789 {
2790 switch (REGNO (x))
2791 {
2792 case R0_REGNO:
2793 fputs ("r0h", file);
2794 return;
2795 case R1_REGNO:
2796 fputs ("r1h", file);
2797 return;
2798 default:
2799 gcc_unreachable();
2800 }
2801 }
2802 /* This should be a MEM. */
2803 x = m32c_subreg (QImode, x, HImode, 1);
2804 code = 0;
2805 }
2806 /* This is for BMcond, which always wants word register names. */
2807 if (code == 'h' && GET_MODE (x) == QImode)
2808 {
2809 if (GET_CODE (x) == REG)
2810 x = gen_rtx_REG (HImode, REGNO (x));
2811 code = 0;
2812 }
2813 /* 'x' and 'X' need to be ignored for non-immediates. */
2814 if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2815 code = 0;
2816
2817 encode_pattern (x);
2818 force_sign = 0;
2819 for (i = 0; conversions[i].pattern; i++)
2820 if (conversions[i].code == code
2821 && streq (conversions[i].pattern, pattern))
2822 {
2823 for (j = 0; conversions[i].format[j]; j++)
2824 /* backslash quotes the next character in the output pattern. */
2825 if (conversions[i].format[j] == '\\')
2826 {
2827 fputc (conversions[i].format[j + 1], file);
2828 j++;
2829 }
2830 /* Digits in the output pattern indicate that the
2831 corresponding RTX is to be output at that point. */
2832 else if (ISDIGIT (conversions[i].format[j]))
2833 {
2834 rtx r = patternr[conversions[i].format[j] - '0'];
2835 switch (GET_CODE (r))
2836 {
2837 case REG:
2838 fprintf (file, "%s",
2839 reg_name_with_mode (REGNO (r), GET_MODE (r)));
2840 break;
2841 case CONST_INT:
2842 switch (code)
2843 {
2844 case 'b':
2845 case 'B':
2846 {
2847 int v = INTVAL (r);
2848 int i = (int) exact_log2 (v);
2849 if (i == -1)
2850 i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2851 if (i == -1)
2852 i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2853 /* Bit position. */
2854 fprintf (file, "%d", i);
2855 }
2856 break;
2857 case 'x':
2858 /* Unsigned byte. */
2859 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2860 INTVAL (r) & 0xff);
2861 break;
2862 case 'X':
2863 /* Unsigned word. */
2864 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2865 INTVAL (r) & 0xffff);
2866 break;
2867 case 'p':
2868 /* pushm and popm encode a register set into a single byte. */
2869 comma = "";
2870 for (b = 7; b >= 0; b--)
2871 if (INTVAL (r) & (1 << b))
2872 {
2873 fprintf (file, "%s%s", comma, pushm_regs[b]);
2874 comma = ",";
2875 }
2876 break;
2877 case 'm':
2878 /* "Minus". Output -X */
2879 ival = (-INTVAL (r) & 0xffff);
2880 if (ival & 0x8000)
2881 ival = ival - 0x10000;
2882 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2883 break;
2884 default:
2885 ival = INTVAL (r);
2886 if (conversions[i].format[j + 1] == '[' && ival < 0)
2887 {
2888 /* We can simulate negative displacements by
2889 taking advantage of address space
2890 wrapping when the offset can span the
2891 entire address range. */
2892 rtx base =
2893 patternr[conversions[i].format[j + 2] - '0'];
2894 if (GET_CODE (base) == REG)
2895 switch (REGNO (base))
2896 {
2897 case A0_REGNO:
2898 case A1_REGNO:
2899 if (TARGET_A24)
2900 ival = 0x1000000 + ival;
2901 else
2902 ival = 0x10000 + ival;
2903 break;
2904 case SB_REGNO:
2905 if (TARGET_A16)
2906 ival = 0x10000 + ival;
2907 break;
2908 }
2909 }
2910 else if (code == 'd' && ival < 0 && j == 0)
2911 /* The "mova" opcode is used to do addition by
2912 computing displacements, but again, we need
2913 displacements to be unsigned *if* they're
2914 the only component of the displacement
2915 (i.e. no "symbol-4" type displacement). */
2916 ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2917
2918 if (conversions[i].format[j] == '0')
2919 {
2920 /* More conversions to unsigned. */
2921 if (unsigned_const == 2)
2922 ival &= 0xffff;
2923 if (unsigned_const == 1)
2924 ival &= 0xff;
2925 }
2926 if (streq (conversions[i].pattern, "mi")
2927 || streq (conversions[i].pattern, "mmi"))
2928 {
2929 /* Integers used as addresses are unsigned. */
2930 ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2931 }
2932 if (force_sign && ival >= 0)
2933 fputc ('+', file);
2934 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2935 break;
2936 }
2937 break;
2938 case CONST_DOUBLE:
2939 /* We don't have const_double constants. If it
2940 happens, make it obvious. */
2941 fprintf (file, "[const_double 0x%lx]",
2942 (unsigned long) CONST_DOUBLE_HIGH (r));
2943 break;
2944 case SYMBOL_REF:
2945 assemble_name (file, XSTR (r, 0));
2946 break;
2947 case LABEL_REF:
2948 output_asm_label (r);
2949 break;
2950 default:
2951 fprintf (stderr, "don't know how to print this operand:");
2952 debug_rtx (r);
2953 gcc_unreachable ();
2954 }
2955 }
2956 else
2957 {
2958 if (conversions[i].format[j] == 'z')
2959 {
2960 /* Some addressing modes *must* have a displacement,
2961 so insert a zero here if needed. */
2962 int k;
2963 for (k = j + 1; conversions[i].format[k]; k++)
2964 if (ISDIGIT (conversions[i].format[k]))
2965 {
2966 rtx reg = patternr[conversions[i].format[k] - '0'];
2967 if (GET_CODE (reg) == REG
2968 && (REGNO (reg) == SB_REGNO
2969 || REGNO (reg) == FB_REGNO
2970 || REGNO (reg) == SP_REGNO))
2971 fputc ('0', file);
2972 }
2973 continue;
2974 }
2975 /* Signed displacements off symbols need to have signs
2976 blended cleanly. */
2977 if (conversions[i].format[j] == '+'
2978 && (!code || code == 'D' || code == 'd')
2979 && ISDIGIT (conversions[i].format[j + 1])
2980 && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2981 == CONST_INT))
2982 {
2983 force_sign = 1;
2984 continue;
2985 }
2986 fputc (conversions[i].format[j], file);
2987 }
2988 break;
2989 }
2990 if (!conversions[i].pattern)
2991 {
2992 fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
2993 pattern);
2994 debug_rtx (x);
2995 fprintf (file, "[%c.%s]", code ? code : '-', pattern);
2996 }
2997
2998 return;
2999 }
3000
3001 /* Implements PRINT_OPERAND_PUNCT_VALID_P. See m32c_print_operand
3002 above for descriptions of what these do. */
3003 int
3004 m32c_print_operand_punct_valid_p (int c)
3005 {
3006 if (c == '&' || c == '!')
3007 return 1;
3008 return 0;
3009 }
3010
3011 /* Implements PRINT_OPERAND_ADDRESS. Nothing unusual here. */
3012 void
3013 m32c_print_operand_address (FILE * stream, rtx address)
3014 {
3015 if (GET_CODE (address) == MEM)
3016 address = XEXP (address, 0);
3017 else
3018 /* cf: gcc.dg/asm-4.c. */
3019 gcc_assert (GET_CODE (address) == REG);
3020
3021 m32c_print_operand (stream, address, 0);
3022 }
3023
3024 /* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
3025 differently than general registers. */
3026 void
3027 m32c_output_reg_push (FILE * s, int regno)
3028 {
3029 if (regno == FLG_REGNO)
3030 fprintf (s, "\tpushc\tflg\n");
3031 else
3032 fprintf (s, "\tpush.%c\t%s\n",
3033 " bwll"[reg_push_size (regno)], reg_names[regno]);
3034 }
3035
3036 /* Likewise for ASM_OUTPUT_REG_POP. */
3037 void
3038 m32c_output_reg_pop (FILE * s, int regno)
3039 {
3040 if (regno == FLG_REGNO)
3041 fprintf (s, "\tpopc\tflg\n");
3042 else
3043 fprintf (s, "\tpop.%c\t%s\n",
3044 " bwll"[reg_push_size (regno)], reg_names[regno]);
3045 }
3046
3047 /* Defining target-specific uses of `__attribute__' */
3048
3049 /* Used to simplify the logic below. Find the attributes wherever
3050 they may be. */
3051 #define M32C_ATTRIBUTES(decl) \
3052 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
3053 : DECL_ATTRIBUTES (decl) \
3054 ? (DECL_ATTRIBUTES (decl)) \
3055 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
3056
3057 /* Returns TRUE if the given tree has the "interrupt" attribute. */
3058 static int
3059 interrupt_p (tree node ATTRIBUTE_UNUSED)
3060 {
3061 tree list = M32C_ATTRIBUTES (node);
3062 while (list)
3063 {
3064 if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
3065 return 1;
3066 list = TREE_CHAIN (list);
3067 }
3068 return fast_interrupt_p (node);
3069 }
3070
3071 /* Returns TRUE if the given tree has the "bank_switch" attribute. */
3072 static int
3073 bank_switch_p (tree node ATTRIBUTE_UNUSED)
3074 {
3075 tree list = M32C_ATTRIBUTES (node);
3076 while (list)
3077 {
3078 if (is_attribute_p ("bank_switch", TREE_PURPOSE (list)))
3079 return 1;
3080 list = TREE_CHAIN (list);
3081 }
3082 return 0;
3083 }
3084
3085 /* Returns TRUE if the given tree has the "fast_interrupt" attribute. */
3086 static int
3087 fast_interrupt_p (tree node ATTRIBUTE_UNUSED)
3088 {
3089 tree list = M32C_ATTRIBUTES (node);
3090 while (list)
3091 {
3092 if (is_attribute_p ("fast_interrupt", TREE_PURPOSE (list)))
3093 return 1;
3094 list = TREE_CHAIN (list);
3095 }
3096 return 0;
3097 }
3098
3099 static tree
3100 interrupt_handler (tree * node ATTRIBUTE_UNUSED,
3101 tree name ATTRIBUTE_UNUSED,
3102 tree args ATTRIBUTE_UNUSED,
3103 int flags ATTRIBUTE_UNUSED,
3104 bool * no_add_attrs ATTRIBUTE_UNUSED)
3105 {
3106 return NULL_TREE;
3107 }
3108
3109 /* Returns TRUE if given tree has the "function_vector" attribute. */
3110 int
3111 m32c_special_page_vector_p (tree func)
3112 {
3113 tree list;
3114
3115 if (TREE_CODE (func) != FUNCTION_DECL)
3116 return 0;
3117
3118 list = M32C_ATTRIBUTES (func);
3119 while (list)
3120 {
3121 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
3122 return 1;
3123 list = TREE_CHAIN (list);
3124 }
3125 return 0;
3126 }
3127
3128 static tree
3129 function_vector_handler (tree * node ATTRIBUTE_UNUSED,
3130 tree name ATTRIBUTE_UNUSED,
3131 tree args ATTRIBUTE_UNUSED,
3132 int flags ATTRIBUTE_UNUSED,
3133 bool * no_add_attrs ATTRIBUTE_UNUSED)
3134 {
3135 if (TARGET_R8C)
3136 {
3137 /* The attribute is not supported for R8C target. */
3138 warning (OPT_Wattributes,
3139 "%qE attribute is not supported for R8C target",
3140 name);
3141 *no_add_attrs = true;
3142 }
3143 else if (TREE_CODE (*node) != FUNCTION_DECL)
3144 {
3145 /* The attribute must be applied to functions only. */
3146 warning (OPT_Wattributes,
3147 "%qE attribute applies only to functions",
3148 name);
3149 *no_add_attrs = true;
3150 }
3151 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
3152 {
3153 /* The argument must be a constant integer. */
3154 warning (OPT_Wattributes,
3155 "%qE attribute argument not an integer constant",
3156 name);
3157 *no_add_attrs = true;
3158 }
3159 else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
3160 || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
3161 {
3162 /* The argument value must be between 18 to 255. */
3163 warning (OPT_Wattributes,
3164 "%qE attribute argument should be between 18 to 255",
3165 name);
3166 *no_add_attrs = true;
3167 }
3168 return NULL_TREE;
3169 }
3170
3171 /* If the function is assigned the attribute 'function_vector', it
3172 returns the function vector number, otherwise returns zero. */
3173 int
3174 current_function_special_page_vector (rtx x)
3175 {
3176 int num;
3177
3178 if ((GET_CODE(x) == SYMBOL_REF)
3179 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
3180 {
3181 tree list;
3182 tree t = SYMBOL_REF_DECL (x);
3183
3184 if (TREE_CODE (t) != FUNCTION_DECL)
3185 return 0;
3186
3187 list = M32C_ATTRIBUTES (t);
3188 while (list)
3189 {
3190 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
3191 {
3192 num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
3193 return num;
3194 }
3195
3196 list = TREE_CHAIN (list);
3197 }
3198
3199 return 0;
3200 }
3201 else
3202 return 0;
3203 }
3204
3205 #undef TARGET_ATTRIBUTE_TABLE
3206 #define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
3207 static const struct attribute_spec m32c_attribute_table[] = {
3208 {"interrupt", 0, 0, false, false, false, interrupt_handler, false},
3209 {"bank_switch", 0, 0, false, false, false, interrupt_handler, false},
3210 {"fast_interrupt", 0, 0, false, false, false, interrupt_handler, false},
3211 {"function_vector", 1, 1, true, false, false, function_vector_handler,
3212 false},
3213 {0, 0, 0, 0, 0, 0, 0, false}
3214 };
3215
3216 #undef TARGET_COMP_TYPE_ATTRIBUTES
3217 #define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
3218 static int
3219 m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED,
3220 const_tree type2 ATTRIBUTE_UNUSED)
3221 {
3222 /* 0=incompatible 1=compatible 2=warning */
3223 return 1;
3224 }
3225
3226 #undef TARGET_INSERT_ATTRIBUTES
3227 #define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
3228 static void
3229 m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
3230 tree * attr_ptr ATTRIBUTE_UNUSED)
3231 {
3232 unsigned addr;
3233 /* See if we need to make #pragma address variables volatile. */
3234
3235 if (TREE_CODE (node) == VAR_DECL)
3236 {
3237 const char *name = IDENTIFIER_POINTER (DECL_NAME (node));
3238 if (m32c_get_pragma_address (name, &addr))
3239 {
3240 TREE_THIS_VOLATILE (node) = true;
3241 }
3242 }
3243 }
3244
3245
3246 struct GTY(()) pragma_entry {
3247 const char *varname;
3248 unsigned address;
3249 };
3250 typedef struct pragma_entry pragma_entry;
3251
3252 /* Hash table of pragma info. */
3253 static GTY((param_is (pragma_entry))) htab_t pragma_htab;
3254
3255 static int
3256 pragma_entry_eq (const void *p1, const void *p2)
3257 {
3258 const pragma_entry *old = (const pragma_entry *) p1;
3259 const char *new_name = (const char *) p2;
3260
3261 return strcmp (old->varname, new_name) == 0;
3262 }
3263
3264 static hashval_t
3265 pragma_entry_hash (const void *p)
3266 {
3267 const pragma_entry *old = (const pragma_entry *) p;
3268 return htab_hash_string (old->varname);
3269 }
3270
3271 void
3272 m32c_note_pragma_address (const char *varname, unsigned address)
3273 {
3274 pragma_entry **slot;
3275
3276 if (!pragma_htab)
3277 pragma_htab = htab_create_ggc (31, pragma_entry_hash,
3278 pragma_entry_eq, NULL);
3279
3280 slot = (pragma_entry **)
3281 htab_find_slot_with_hash (pragma_htab, varname,
3282 htab_hash_string (varname), INSERT);
3283
3284 if (!*slot)
3285 {
3286 *slot = ggc_alloc_pragma_entry ();
3287 (*slot)->varname = ggc_strdup (varname);
3288 }
3289 (*slot)->address = address;
3290 }
3291
3292 static bool
3293 m32c_get_pragma_address (const char *varname, unsigned *address)
3294 {
3295 pragma_entry **slot;
3296
3297 if (!pragma_htab)
3298 return false;
3299
3300 slot = (pragma_entry **)
3301 htab_find_slot_with_hash (pragma_htab, varname,
3302 htab_hash_string (varname), NO_INSERT);
3303 if (slot && *slot)
3304 {
3305 *address = (*slot)->address;
3306 return true;
3307 }
3308 return false;
3309 }
3310
3311 void
3312 m32c_output_aligned_common (FILE *stream, tree decl ATTRIBUTE_UNUSED,
3313 const char *name,
3314 int size, int align, int global)
3315 {
3316 unsigned address;
3317
3318 if (m32c_get_pragma_address (name, &address))
3319 {
3320 /* We never output these as global. */
3321 assemble_name (stream, name);
3322 fprintf (stream, " = 0x%04x\n", address);
3323 return;
3324 }
3325 if (!global)
3326 {
3327 fprintf (stream, "\t.local\t");
3328 assemble_name (stream, name);
3329 fprintf (stream, "\n");
3330 }
3331 fprintf (stream, "\t.comm\t");
3332 assemble_name (stream, name);
3333 fprintf (stream, ",%u,%u\n", size, align / BITS_PER_UNIT);
3334 }
3335
3336 /* Predicates */
3337
3338 /* This is a list of legal subregs of hard regs. */
3339 static const struct {
3340 unsigned char outer_mode_size;
3341 unsigned char inner_mode_size;
3342 unsigned char byte_mask;
3343 unsigned char legal_when;
3344 unsigned int regno;
3345 } legal_subregs[] = {
3346 {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
3347 {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
3348 {1, 2, 0x01, 1, A0_REGNO},
3349 {1, 2, 0x01, 1, A1_REGNO},
3350
3351 {1, 4, 0x01, 1, A0_REGNO},
3352 {1, 4, 0x01, 1, A1_REGNO},
3353
3354 {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
3355 {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
3356 {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
3357 {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
3358 {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
3359
3360 {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
3361 };
3362
3363 /* Returns TRUE if OP is a subreg of a hard reg which we don't
3364 support. We also bail on MEMs with illegal addresses. */
3365 bool
3366 m32c_illegal_subreg_p (rtx op)
3367 {
3368 int offset;
3369 unsigned int i;
3370 int src_mode, dest_mode;
3371
3372 if (GET_CODE (op) == MEM
3373 && ! m32c_legitimate_address_p (Pmode, XEXP (op, 0), false))
3374 {
3375 return true;
3376 }
3377
3378 if (GET_CODE (op) != SUBREG)
3379 return false;
3380
3381 dest_mode = GET_MODE (op);
3382 offset = SUBREG_BYTE (op);
3383 op = SUBREG_REG (op);
3384 src_mode = GET_MODE (op);
3385
3386 if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
3387 return false;
3388 if (GET_CODE (op) != REG)
3389 return false;
3390 if (REGNO (op) >= MEM0_REGNO)
3391 return false;
3392
3393 offset = (1 << offset);
3394
3395 for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
3396 if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
3397 && legal_subregs[i].regno == REGNO (op)
3398 && legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
3399 && legal_subregs[i].byte_mask & offset)
3400 {
3401 switch (legal_subregs[i].legal_when)
3402 {
3403 case 1:
3404 return false;
3405 case 16:
3406 if (TARGET_A16)
3407 return false;
3408 break;
3409 case 24:
3410 if (TARGET_A24)
3411 return false;
3412 break;
3413 }
3414 }
3415 return true;
3416 }
3417
3418 /* Returns TRUE if we support a move between the first two operands.
3419 At the moment, we just want to discourage mem to mem moves until
3420 after reload, because reload has a hard time with our limited
3421 number of address registers, and we can get into a situation where
3422 we need three of them when we only have two. */
3423 bool
3424 m32c_mov_ok (rtx * operands, enum machine_mode mode ATTRIBUTE_UNUSED)
3425 {
3426 rtx op0 = operands[0];
3427 rtx op1 = operands[1];
3428
3429 if (TARGET_A24)
3430 return true;
3431
3432 #define DEBUG_MOV_OK 0
3433 #if DEBUG_MOV_OK
3434 fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
3435 debug_rtx (op0);
3436 debug_rtx (op1);
3437 #endif
3438
3439 if (GET_CODE (op0) == SUBREG)
3440 op0 = XEXP (op0, 0);
3441 if (GET_CODE (op1) == SUBREG)
3442 op1 = XEXP (op1, 0);
3443
3444 if (GET_CODE (op0) == MEM
3445 && GET_CODE (op1) == MEM
3446 && ! reload_completed)
3447 {
3448 #if DEBUG_MOV_OK
3449 fprintf (stderr, " - no, mem to mem\n");
3450 #endif
3451 return false;
3452 }
3453
3454 #if DEBUG_MOV_OK
3455 fprintf (stderr, " - ok\n");
3456 #endif
3457 return true;
3458 }
3459
3460 /* Returns TRUE if two consecutive HImode mov instructions, generated
3461 for moving an immediate double data to a double data type variable
3462 location, can be combined into single SImode mov instruction. */
3463 bool
3464 m32c_immd_dbl_mov (rtx * operands,
3465 enum machine_mode mode ATTRIBUTE_UNUSED)
3466 {
3467 int flag = 0, okflag = 0, offset1 = 0, offset2 = 0, offsetsign = 0;
3468 const char *str1;
3469 const char *str2;
3470
3471 if (GET_CODE (XEXP (operands[0], 0)) == SYMBOL_REF
3472 && MEM_SCALAR_P (operands[0])
3473 && !MEM_IN_STRUCT_P (operands[0])
3474 && GET_CODE (XEXP (operands[2], 0)) == CONST
3475 && GET_CODE (XEXP (XEXP (operands[2], 0), 0)) == PLUS
3476 && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 0)) == SYMBOL_REF
3477 && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 1)) == CONST_INT
3478 && MEM_SCALAR_P (operands[2])
3479 && !MEM_IN_STRUCT_P (operands[2]))
3480 flag = 1;
3481
3482 else if (GET_CODE (XEXP (operands[0], 0)) == CONST
3483 && GET_CODE (XEXP (XEXP (operands[0], 0), 0)) == PLUS
3484 && GET_CODE (XEXP (XEXP (XEXP (operands[0], 0), 0), 0)) == SYMBOL_REF
3485 && MEM_SCALAR_P (operands[0])
3486 && !MEM_IN_STRUCT_P (operands[0])
3487 && !(INTVAL (XEXP (XEXP (XEXP (operands[0], 0), 0), 1)) %4)
3488 && GET_CODE (XEXP (operands[2], 0)) == CONST
3489 && GET_CODE (XEXP (XEXP (operands[2], 0), 0)) == PLUS
3490 && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 0)) == SYMBOL_REF
3491 && MEM_SCALAR_P (operands[2])
3492 && !MEM_IN_STRUCT_P (operands[2]))
3493 flag = 2;
3494
3495 else if (GET_CODE (XEXP (operands[0], 0)) == PLUS
3496 && GET_CODE (XEXP (XEXP (operands[0], 0), 0)) == REG
3497 && REGNO (XEXP (XEXP (operands[0], 0), 0)) == FB_REGNO
3498 && GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == CONST_INT
3499 && MEM_SCALAR_P (operands[0])
3500 && !MEM_IN_STRUCT_P (operands[0])
3501 && !(INTVAL (XEXP (XEXP (operands[0], 0), 1)) %4)
3502 && REGNO (XEXP (XEXP (operands[2], 0), 0)) == FB_REGNO
3503 && GET_CODE (XEXP (XEXP (operands[2], 0), 1)) == CONST_INT
3504 && MEM_SCALAR_P (operands[2])
3505 && !MEM_IN_STRUCT_P (operands[2]))
3506 flag = 3;
3507
3508 else
3509 return false;
3510
3511 switch (flag)
3512 {
3513 case 1:
3514 str1 = XSTR (XEXP (operands[0], 0), 0);
3515 str2 = XSTR (XEXP (XEXP (XEXP (operands[2], 0), 0), 0), 0);
3516 if (strcmp (str1, str2) == 0)
3517 okflag = 1;
3518 else
3519 okflag = 0;
3520 break;
3521 case 2:
3522 str1 = XSTR (XEXP (XEXP (XEXP (operands[0], 0), 0), 0), 0);
3523 str2 = XSTR (XEXP (XEXP (XEXP (operands[2], 0), 0), 0), 0);
3524 if (strcmp(str1,str2) == 0)
3525 okflag = 1;
3526 else
3527 okflag = 0;
3528 break;
3529 case 3:
3530 offset1 = INTVAL (XEXP (XEXP (operands[0], 0), 1));
3531 offset2 = INTVAL (XEXP (XEXP (operands[2], 0), 1));
3532 offsetsign = offset1 >> ((sizeof (offset1) * 8) -1);
3533 if (((offset2-offset1) == 2) && offsetsign != 0)
3534 okflag = 1;
3535 else
3536 okflag = 0;
3537 break;
3538 default:
3539 okflag = 0;
3540 }
3541
3542 if (okflag == 1)
3543 {
3544 HOST_WIDE_INT val;
3545 operands[4] = gen_rtx_MEM (SImode, XEXP (operands[0], 0));
3546
3547 val = (INTVAL (operands[3]) << 16) + (INTVAL (operands[1]) & 0xFFFF);
3548 operands[5] = gen_rtx_CONST_INT (VOIDmode, val);
3549
3550 return true;
3551 }
3552
3553 return false;
3554 }
3555
3556 /* Expanders */
3557
3558 /* Subregs are non-orthogonal for us, because our registers are all
3559 different sizes. */
3560 static rtx
3561 m32c_subreg (enum machine_mode outer,
3562 rtx x, enum machine_mode inner, int byte)
3563 {
3564 int r, nr = -1;
3565
3566 /* Converting MEMs to different types that are the same size, we
3567 just rewrite them. */
3568 if (GET_CODE (x) == SUBREG
3569 && SUBREG_BYTE (x) == 0
3570 && GET_CODE (SUBREG_REG (x)) == MEM
3571 && (GET_MODE_SIZE (GET_MODE (x))
3572 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
3573 {
3574 rtx oldx = x;
3575 x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
3576 MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
3577 }
3578
3579 /* Push/pop get done as smaller push/pops. */
3580 if (GET_CODE (x) == MEM
3581 && (GET_CODE (XEXP (x, 0)) == PRE_DEC
3582 || GET_CODE (XEXP (x, 0)) == POST_INC))
3583 return gen_rtx_MEM (outer, XEXP (x, 0));
3584 if (GET_CODE (x) == SUBREG
3585 && GET_CODE (XEXP (x, 0)) == MEM
3586 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
3587 || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
3588 return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
3589
3590 if (GET_CODE (x) != REG)
3591 {
3592 rtx r = simplify_gen_subreg (outer, x, inner, byte);
3593 if (GET_CODE (r) == SUBREG
3594 && GET_CODE (x) == MEM
3595 && MEM_VOLATILE_P (x))
3596 {
3597 /* Volatile MEMs don't get simplified, but we need them to
3598 be. We are little endian, so the subreg byte is the
3599 offset. */
3600 r = adjust_address_nv (x, outer, byte);
3601 }
3602 return r;
3603 }
3604
3605 r = REGNO (x);
3606 if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
3607 return simplify_gen_subreg (outer, x, inner, byte);
3608
3609 if (IS_MEM_REGNO (r))
3610 return simplify_gen_subreg (outer, x, inner, byte);
3611
3612 /* This is where the complexities of our register layout are
3613 described. */
3614 if (byte == 0)
3615 nr = r;
3616 else if (outer == HImode)
3617 {
3618 if (r == R0_REGNO && byte == 2)
3619 nr = R2_REGNO;
3620 else if (r == R0_REGNO && byte == 4)
3621 nr = R1_REGNO;
3622 else if (r == R0_REGNO && byte == 6)
3623 nr = R3_REGNO;
3624 else if (r == R1_REGNO && byte == 2)
3625 nr = R3_REGNO;
3626 else if (r == A0_REGNO && byte == 2)
3627 nr = A1_REGNO;
3628 }
3629 else if (outer == SImode)
3630 {
3631 if (r == R0_REGNO && byte == 0)
3632 nr = R0_REGNO;
3633 else if (r == R0_REGNO && byte == 4)
3634 nr = R1_REGNO;
3635 }
3636 if (nr == -1)
3637 {
3638 fprintf (stderr, "m32c_subreg %s %s %d\n",
3639 mode_name[outer], mode_name[inner], byte);
3640 debug_rtx (x);
3641 gcc_unreachable ();
3642 }
3643 return gen_rtx_REG (outer, nr);
3644 }
3645
3646 /* Used to emit move instructions. We split some moves,
3647 and avoid mem-mem moves. */
3648 int
3649 m32c_prepare_move (rtx * operands, enum machine_mode mode)
3650 {
3651 if (far_addr_space_p (operands[0])
3652 && CONSTANT_P (operands[1]))
3653 {
3654 operands[1] = force_reg (GET_MODE (operands[0]), operands[1]);
3655 }
3656 if (TARGET_A16 && mode == PSImode)
3657 return m32c_split_move (operands, mode, 1);
3658 if ((GET_CODE (operands[0]) == MEM)
3659 && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
3660 {
3661 rtx pmv = XEXP (operands[0], 0);
3662 rtx dest_reg = XEXP (pmv, 0);
3663 rtx dest_mod = XEXP (pmv, 1);
3664
3665 emit_insn (gen_rtx_SET (Pmode, dest_reg, dest_mod));
3666 operands[0] = gen_rtx_MEM (mode, dest_reg);
3667 }
3668 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3669 operands[1] = copy_to_mode_reg (mode, operands[1]);
3670 return 0;
3671 }
3672
3673 #define DEBUG_SPLIT 0
3674
3675 /* Returns TRUE if the given PSImode move should be split. We split
3676 for all r8c/m16c moves, since it doesn't support them, and for
3677 POP.L as we can only *push* SImode. */
3678 int
3679 m32c_split_psi_p (rtx * operands)
3680 {
3681 #if DEBUG_SPLIT
3682 fprintf (stderr, "\nm32c_split_psi_p\n");
3683 debug_rtx (operands[0]);
3684 debug_rtx (operands[1]);
3685 #endif
3686 if (TARGET_A16)
3687 {
3688 #if DEBUG_SPLIT
3689 fprintf (stderr, "yes, A16\n");
3690 #endif
3691 return 1;
3692 }
3693 if (GET_CODE (operands[1]) == MEM
3694 && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3695 {
3696 #if DEBUG_SPLIT
3697 fprintf (stderr, "yes, pop.l\n");
3698 #endif
3699 return 1;
3700 }
3701 #if DEBUG_SPLIT
3702 fprintf (stderr, "no, default\n");
3703 #endif
3704 return 0;
3705 }
3706
3707 /* Split the given move. SPLIT_ALL is 0 if splitting is optional
3708 (define_expand), 1 if it is not optional (define_insn_and_split),
3709 and 3 for define_split (alternate api). */
3710 int
3711 m32c_split_move (rtx * operands, enum machine_mode mode, int split_all)
3712 {
3713 rtx s[4], d[4];
3714 int parts, si, di, rev = 0;
3715 int rv = 0, opi = 2;
3716 enum machine_mode submode = HImode;
3717 rtx *ops, local_ops[10];
3718
3719 /* define_split modifies the existing operands, but the other two
3720 emit new insns. OPS is where we store the operand pairs, which
3721 we emit later. */
3722 if (split_all == 3)
3723 ops = operands;
3724 else
3725 ops = local_ops;
3726
3727 /* Else HImode. */
3728 if (mode == DImode)
3729 submode = SImode;
3730
3731 /* Before splitting mem-mem moves, force one operand into a
3732 register. */
3733 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3734 {
3735 #if DEBUG0
3736 fprintf (stderr, "force_reg...\n");
3737 debug_rtx (operands[1]);
3738 #endif
3739 operands[1] = force_reg (mode, operands[1]);
3740 #if DEBUG0
3741 debug_rtx (operands[1]);
3742 #endif
3743 }
3744
3745 parts = 2;
3746
3747 #if DEBUG_SPLIT
3748 fprintf (stderr, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3749 split_all);
3750 debug_rtx (operands[0]);
3751 debug_rtx (operands[1]);
3752 #endif
3753
3754 /* Note that split_all is not used to select the api after this
3755 point, so it's safe to set it to 3 even with define_insn. */
3756 /* None of the chips can move SI operands to sp-relative addresses,
3757 so we always split those. */
3758 if (m32c_extra_constraint_p (operands[0], 'S', "Ss"))
3759 split_all = 3;
3760
3761 if (TARGET_A16
3762 && (far_addr_space_p (operands[0])
3763 || far_addr_space_p (operands[1])))
3764 split_all |= 1;
3765
3766 /* We don't need to split these. */
3767 if (TARGET_A24
3768 && split_all != 3
3769 && (mode == SImode || mode == PSImode)
3770 && !(GET_CODE (operands[1]) == MEM
3771 && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3772 return 0;
3773
3774 /* First, enumerate the subregs we'll be dealing with. */
3775 for (si = 0; si < parts; si++)
3776 {
3777 d[si] =
3778 m32c_subreg (submode, operands[0], mode,
3779 si * GET_MODE_SIZE (submode));
3780 s[si] =
3781 m32c_subreg (submode, operands[1], mode,
3782 si * GET_MODE_SIZE (submode));
3783 }
3784
3785 /* Split pushes by emitting a sequence of smaller pushes. */
3786 if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3787 {
3788 for (si = parts - 1; si >= 0; si--)
3789 {
3790 ops[opi++] = gen_rtx_MEM (submode,
3791 gen_rtx_PRE_DEC (Pmode,
3792 gen_rtx_REG (Pmode,
3793 SP_REGNO)));
3794 ops[opi++] = s[si];
3795 }
3796
3797 rv = 1;
3798 }
3799 /* Likewise for pops. */
3800 else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3801 {
3802 for (di = 0; di < parts; di++)
3803 {
3804 ops[opi++] = d[di];
3805 ops[opi++] = gen_rtx_MEM (submode,
3806 gen_rtx_POST_INC (Pmode,
3807 gen_rtx_REG (Pmode,
3808 SP_REGNO)));
3809 }
3810 rv = 1;
3811 }
3812 else if (split_all)
3813 {
3814 /* if d[di] == s[si] for any di < si, we'll early clobber. */
3815 for (di = 0; di < parts - 1; di++)
3816 for (si = di + 1; si < parts; si++)
3817 if (reg_mentioned_p (d[di], s[si]))
3818 rev = 1;
3819
3820 if (rev)
3821 for (si = 0; si < parts; si++)
3822 {
3823 ops[opi++] = d[si];
3824 ops[opi++] = s[si];
3825 }
3826 else
3827 for (si = parts - 1; si >= 0; si--)
3828 {
3829 ops[opi++] = d[si];
3830 ops[opi++] = s[si];
3831 }
3832 rv = 1;
3833 }
3834 /* Now emit any moves we may have accumulated. */
3835 if (rv && split_all != 3)
3836 {
3837 int i;
3838 for (i = 2; i < opi; i += 2)
3839 emit_move_insn (ops[i], ops[i + 1]);
3840 }
3841 return rv;
3842 }
3843
3844 /* The m32c has a number of opcodes that act like memcpy, strcmp, and
3845 the like. For the R8C they expect one of the addresses to be in
3846 R1L:An so we need to arrange for that. Otherwise, it's just a
3847 matter of picking out the operands we want and emitting the right
3848 pattern for them. All these expanders, which correspond to
3849 patterns in blkmov.md, must return nonzero if they expand the insn,
3850 or zero if they should FAIL. */
3851
3852 /* This is a memset() opcode. All operands are implied, so we need to
3853 arrange for them to be in the right registers. The opcode wants
3854 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3855 the count (HI), and $2 the value (QI). */
3856 int
3857 m32c_expand_setmemhi(rtx *operands)
3858 {
3859 rtx desta, count, val;
3860 rtx desto, counto;
3861
3862 desta = XEXP (operands[0], 0);
3863 count = operands[1];
3864 val = operands[2];
3865
3866 desto = gen_reg_rtx (Pmode);
3867 counto = gen_reg_rtx (HImode);
3868
3869 if (GET_CODE (desta) != REG
3870 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3871 desta = copy_to_mode_reg (Pmode, desta);
3872
3873 /* This looks like an arbitrary restriction, but this is by far the
3874 most common case. For counts 8..14 this actually results in
3875 smaller code with no speed penalty because the half-sized
3876 constant can be loaded with a shorter opcode. */
3877 if (GET_CODE (count) == CONST_INT
3878 && GET_CODE (val) == CONST_INT
3879 && ! (INTVAL (count) & 1)
3880 && (INTVAL (count) > 1)
3881 && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3882 {
3883 unsigned v = INTVAL (val) & 0xff;
3884 v = v | (v << 8);
3885 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3886 val = copy_to_mode_reg (HImode, GEN_INT (v));
3887 if (TARGET_A16)
3888 emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3889 else
3890 emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3891 return 1;
3892 }
3893
3894 /* This is the generalized memset() case. */
3895 if (GET_CODE (val) != REG
3896 || REGNO (val) < FIRST_PSEUDO_REGISTER)
3897 val = copy_to_mode_reg (QImode, val);
3898
3899 if (GET_CODE (count) != REG
3900 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3901 count = copy_to_mode_reg (HImode, count);
3902
3903 if (TARGET_A16)
3904 emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3905 else
3906 emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3907
3908 return 1;
3909 }
3910
3911 /* This is a memcpy() opcode. All operands are implied, so we need to
3912 arrange for them to be in the right registers. The opcode wants
3913 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3914 is the source (MEM:BLK), and $2 the count (HI). */
3915 int
3916 m32c_expand_movmemhi(rtx *operands)
3917 {
3918 rtx desta, srca, count;
3919 rtx desto, srco, counto;
3920
3921 desta = XEXP (operands[0], 0);
3922 srca = XEXP (operands[1], 0);
3923 count = operands[2];
3924
3925 desto = gen_reg_rtx (Pmode);
3926 srco = gen_reg_rtx (Pmode);
3927 counto = gen_reg_rtx (HImode);
3928
3929 if (GET_CODE (desta) != REG
3930 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3931 desta = copy_to_mode_reg (Pmode, desta);
3932
3933 if (GET_CODE (srca) != REG
3934 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3935 srca = copy_to_mode_reg (Pmode, srca);
3936
3937 /* Similar to setmem, but we don't need to check the value. */
3938 if (GET_CODE (count) == CONST_INT
3939 && ! (INTVAL (count) & 1)
3940 && (INTVAL (count) > 1))
3941 {
3942 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3943 if (TARGET_A16)
3944 emit_insn (gen_movmemhi_whi_op (desto, srco, counto, desta, srca, count));
3945 else
3946 emit_insn (gen_movmemhi_wpsi_op (desto, srco, counto, desta, srca, count));
3947 return 1;
3948 }
3949
3950 /* This is the generalized memset() case. */
3951 if (GET_CODE (count) != REG
3952 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3953 count = copy_to_mode_reg (HImode, count);
3954
3955 if (TARGET_A16)
3956 emit_insn (gen_movmemhi_bhi_op (desto, srco, counto, desta, srca, count));
3957 else
3958 emit_insn (gen_movmemhi_bpsi_op (desto, srco, counto, desta, srca, count));
3959
3960 return 1;
3961 }
3962
3963 /* This is a stpcpy() opcode. $0 is the destination (MEM:BLK) after
3964 the copy, which should point to the NUL at the end of the string,
3965 $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3966 Since our opcode leaves the destination pointing *after* the NUL,
3967 we must emit an adjustment. */
3968 int
3969 m32c_expand_movstr(rtx *operands)
3970 {
3971 rtx desta, srca;
3972 rtx desto, srco;
3973
3974 desta = XEXP (operands[1], 0);
3975 srca = XEXP (operands[2], 0);
3976
3977 desto = gen_reg_rtx (Pmode);
3978 srco = gen_reg_rtx (Pmode);
3979
3980 if (GET_CODE (desta) != REG
3981 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3982 desta = copy_to_mode_reg (Pmode, desta);
3983
3984 if (GET_CODE (srca) != REG
3985 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3986 srca = copy_to_mode_reg (Pmode, srca);
3987
3988 emit_insn (gen_movstr_op (desto, srco, desta, srca));
3989 /* desto ends up being a1, which allows this type of add through MOVA. */
3990 emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
3991
3992 return 1;
3993 }
3994
3995 /* This is a strcmp() opcode. $0 is the destination (HI) which holds
3996 <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3997 $2 is the other (MEM:BLK). We must do the comparison, and then
3998 convert the flags to a signed integer result. */
3999 int
4000 m32c_expand_cmpstr(rtx *operands)
4001 {
4002 rtx src1a, src2a;
4003
4004 src1a = XEXP (operands[1], 0);
4005 src2a = XEXP (operands[2], 0);
4006
4007 if (GET_CODE (src1a) != REG
4008 || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
4009 src1a = copy_to_mode_reg (Pmode, src1a);
4010
4011 if (GET_CODE (src2a) != REG
4012 || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
4013 src2a = copy_to_mode_reg (Pmode, src2a);
4014
4015 emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
4016 emit_insn (gen_cond_to_int (operands[0]));
4017
4018 return 1;
4019 }
4020
4021
4022 typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
4023
4024 static shift_gen_func
4025 shift_gen_func_for (int mode, int code)
4026 {
4027 #define GFF(m,c,f) if (mode == m && code == c) return f
4028 GFF(QImode, ASHIFT, gen_ashlqi3_i);
4029 GFF(QImode, ASHIFTRT, gen_ashrqi3_i);
4030 GFF(QImode, LSHIFTRT, gen_lshrqi3_i);
4031 GFF(HImode, ASHIFT, gen_ashlhi3_i);
4032 GFF(HImode, ASHIFTRT, gen_ashrhi3_i);
4033 GFF(HImode, LSHIFTRT, gen_lshrhi3_i);
4034 GFF(PSImode, ASHIFT, gen_ashlpsi3_i);
4035 GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
4036 GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
4037 GFF(SImode, ASHIFT, TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
4038 GFF(SImode, ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
4039 GFF(SImode, LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
4040 #undef GFF
4041 gcc_unreachable ();
4042 }
4043
4044 /* The m32c only has one shift, but it takes a signed count. GCC
4045 doesn't want this, so we fake it by negating any shift count when
4046 we're pretending to shift the other way. Also, the shift count is
4047 limited to -8..8. It's slightly better to use two shifts for 9..15
4048 than to load the count into r1h, so we do that too. */
4049 int
4050 m32c_prepare_shift (rtx * operands, int scale, int shift_code)
4051 {
4052 enum machine_mode mode = GET_MODE (operands[0]);
4053 shift_gen_func func = shift_gen_func_for (mode, shift_code);
4054 rtx temp;
4055
4056 if (GET_CODE (operands[2]) == CONST_INT)
4057 {
4058 int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
4059 int count = INTVAL (operands[2]) * scale;
4060
4061 while (count > maxc)
4062 {
4063 temp = gen_reg_rtx (mode);
4064 emit_insn (func (temp, operands[1], GEN_INT (maxc)));
4065 operands[1] = temp;
4066 count -= maxc;
4067 }
4068 while (count < -maxc)
4069 {
4070 temp = gen_reg_rtx (mode);
4071 emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
4072 operands[1] = temp;
4073 count += maxc;
4074 }
4075 emit_insn (func (operands[0], operands[1], GEN_INT (count)));
4076 return 1;
4077 }
4078
4079 temp = gen_reg_rtx (QImode);
4080 if (scale < 0)
4081 /* The pattern has a NEG that corresponds to this. */
4082 emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
4083 else if (TARGET_A16 && mode == SImode)
4084 /* We do this because the code below may modify this, we don't
4085 want to modify the origin of this value. */
4086 emit_move_insn (temp, operands[2]);
4087 else
4088 /* We'll only use it for the shift, no point emitting a move. */
4089 temp = operands[2];
4090
4091 if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
4092 {
4093 /* The m16c has a limit of -16..16 for SI shifts, even when the
4094 shift count is in a register. Since there are so many targets
4095 of these shifts, it's better to expand the RTL here than to
4096 call a helper function.
4097
4098 The resulting code looks something like this:
4099
4100 cmp.b r1h,-16
4101 jge.b 1f
4102 shl.l -16,dest
4103 add.b r1h,16
4104 1f: cmp.b r1h,16
4105 jle.b 1f
4106 shl.l 16,dest
4107 sub.b r1h,16
4108 1f: shl.l r1h,dest
4109
4110 We take advantage of the fact that "negative" shifts are
4111 undefined to skip one of the comparisons. */
4112
4113 rtx count;
4114 rtx label, insn, tempvar;
4115
4116 emit_move_insn (operands[0], operands[1]);
4117
4118 count = temp;
4119 label = gen_label_rtx ();
4120 LABEL_NUSES (label) ++;
4121
4122 tempvar = gen_reg_rtx (mode);
4123
4124 if (shift_code == ASHIFT)
4125 {
4126 /* This is a left shift. We only need check positive counts. */
4127 emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
4128 count, GEN_INT (16), label));
4129 emit_insn (func (tempvar, operands[0], GEN_INT (8)));
4130 emit_insn (func (operands[0], tempvar, GEN_INT (8)));
4131 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
4132 emit_label_after (label, insn);
4133 }
4134 else
4135 {
4136 /* This is a right shift. We only need check negative counts. */
4137 emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
4138 count, GEN_INT (-16), label));
4139 emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
4140 emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
4141 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
4142 emit_label_after (label, insn);
4143 }
4144 operands[1] = operands[0];
4145 emit_insn (func (operands[0], operands[0], count));
4146 return 1;
4147 }
4148
4149 operands[2] = temp;
4150 return 0;
4151 }
4152
4153 /* The m32c has a limited range of operations that work on PSImode
4154 values; we have to expand to SI, do the math, and truncate back to
4155 PSI. Yes, this is expensive, but hopefully gcc will learn to avoid
4156 those cases. */
4157 void
4158 m32c_expand_neg_mulpsi3 (rtx * operands)
4159 {
4160 /* operands: a = b * i */
4161 rtx temp1; /* b as SI */
4162 rtx scale /* i as SI */;
4163 rtx temp2; /* a*b as SI */
4164
4165 temp1 = gen_reg_rtx (SImode);
4166 temp2 = gen_reg_rtx (SImode);
4167 if (GET_CODE (operands[2]) != CONST_INT)
4168 {
4169 scale = gen_reg_rtx (SImode);
4170 emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
4171 }
4172 else
4173 scale = copy_to_mode_reg (SImode, operands[2]);
4174
4175 emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
4176 temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
4177 emit_insn (gen_truncsipsi2 (operands[0], temp2));
4178 }
4179
4180 /* Pattern Output Functions */
4181
4182 int
4183 m32c_expand_movcc (rtx *operands)
4184 {
4185 rtx rel = operands[1];
4186
4187 if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
4188 return 1;
4189 if (GET_CODE (operands[2]) != CONST_INT
4190 || GET_CODE (operands[3]) != CONST_INT)
4191 return 1;
4192 if (GET_CODE (rel) == NE)
4193 {
4194 rtx tmp = operands[2];
4195 operands[2] = operands[3];
4196 operands[3] = tmp;
4197 rel = gen_rtx_EQ (GET_MODE (rel), XEXP (rel, 0), XEXP (rel, 1));
4198 }
4199
4200 emit_move_insn (operands[0],
4201 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
4202 rel,
4203 operands[2],
4204 operands[3]));
4205 return 0;
4206 }
4207
4208 /* Used for the "insv" pattern. Return nonzero to fail, else done. */
4209 int
4210 m32c_expand_insv (rtx *operands)
4211 {
4212 rtx op0, src0, p;
4213 int mask;
4214
4215 if (INTVAL (operands[1]) != 1)
4216 return 1;
4217
4218 /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
4219 if (GET_CODE (operands[3]) != CONST_INT)
4220 return 1;
4221 if (INTVAL (operands[3]) != 0
4222 && INTVAL (operands[3]) != 1
4223 && INTVAL (operands[3]) != -1)
4224 return 1;
4225
4226 mask = 1 << INTVAL (operands[2]);
4227
4228 op0 = operands[0];
4229 if (GET_CODE (op0) == SUBREG
4230 && SUBREG_BYTE (op0) == 0)
4231 {
4232 rtx sub = SUBREG_REG (op0);
4233 if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
4234 op0 = sub;
4235 }
4236
4237 if (!can_create_pseudo_p ()
4238 || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
4239 src0 = op0;
4240 else
4241 {
4242 src0 = gen_reg_rtx (GET_MODE (op0));
4243 emit_move_insn (src0, op0);
4244 }
4245
4246 if (GET_MODE (op0) == HImode
4247 && INTVAL (operands[2]) >= 8
4248 && GET_CODE (op0) == MEM)
4249 {
4250 /* We are little endian. */
4251 rtx new_mem = gen_rtx_MEM (QImode, plus_constant (XEXP (op0, 0), 1));
4252 MEM_COPY_ATTRIBUTES (new_mem, op0);
4253 mask >>= 8;
4254 }
4255
4256 /* First, we generate a mask with the correct polarity. If we are
4257 storing a zero, we want an AND mask, so invert it. */
4258 if (INTVAL (operands[3]) == 0)
4259 {
4260 /* Storing a zero, use an AND mask */
4261 if (GET_MODE (op0) == HImode)
4262 mask ^= 0xffff;
4263 else
4264 mask ^= 0xff;
4265 }
4266 /* Now we need to properly sign-extend the mask in case we need to
4267 fall back to an AND or OR opcode. */
4268 if (GET_MODE (op0) == HImode)
4269 {
4270 if (mask & 0x8000)
4271 mask -= 0x10000;
4272 }
4273 else
4274 {
4275 if (mask & 0x80)
4276 mask -= 0x100;
4277 }
4278
4279 switch ( (INTVAL (operands[3]) ? 4 : 0)
4280 + ((GET_MODE (op0) == HImode) ? 2 : 0)
4281 + (TARGET_A24 ? 1 : 0))
4282 {
4283 case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
4284 case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
4285 case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
4286 case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
4287 case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
4288 case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
4289 case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
4290 case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
4291 default: p = NULL_RTX; break; /* Not reached, but silences a warning. */
4292 }
4293
4294 emit_insn (p);
4295 return 0;
4296 }
4297
4298 const char *
4299 m32c_scc_pattern(rtx *operands, RTX_CODE code)
4300 {
4301 static char buf[30];
4302 if (GET_CODE (operands[0]) == REG
4303 && REGNO (operands[0]) == R0_REGNO)
4304 {
4305 if (code == EQ)
4306 return "stzx\t#1,#0,r0l";
4307 if (code == NE)
4308 return "stzx\t#0,#1,r0l";
4309 }
4310 sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
4311 return buf;
4312 }
4313
4314 /* Encode symbol attributes of a SYMBOL_REF into its
4315 SYMBOL_REF_FLAGS. */
4316 static void
4317 m32c_encode_section_info (tree decl, rtx rtl, int first)
4318 {
4319 int extra_flags = 0;
4320
4321 default_encode_section_info (decl, rtl, first);
4322 if (TREE_CODE (decl) == FUNCTION_DECL
4323 && m32c_special_page_vector_p (decl))
4324
4325 extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
4326
4327 if (extra_flags)
4328 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
4329 }
4330
4331 /* Returns TRUE if the current function is a leaf, and thus we can
4332 determine which registers an interrupt function really needs to
4333 save. The logic below is mostly about finding the insn sequence
4334 that's the function, versus any sequence that might be open for the
4335 current insn. */
4336 static int
4337 m32c_leaf_function_p (void)
4338 {
4339 rtx saved_first, saved_last;
4340 struct sequence_stack *seq;
4341 int rv;
4342
4343 saved_first = crtl->emit.x_first_insn;
4344 saved_last = crtl->emit.x_last_insn;
4345 for (seq = crtl->emit.sequence_stack; seq && seq->next; seq = seq->next)
4346 ;
4347 if (seq)
4348 {
4349 crtl->emit.x_first_insn = seq->first;
4350 crtl->emit.x_last_insn = seq->last;
4351 }
4352
4353 rv = leaf_function_p ();
4354
4355 crtl->emit.x_first_insn = saved_first;
4356 crtl->emit.x_last_insn = saved_last;
4357 return rv;
4358 }
4359
4360 /* Returns TRUE if the current function needs to use the ENTER/EXIT
4361 opcodes. If the function doesn't need the frame base or stack
4362 pointer, it can use the simpler RTS opcode. */
4363 static bool
4364 m32c_function_needs_enter (void)
4365 {
4366 rtx insn;
4367 struct sequence_stack *seq;
4368 rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
4369 rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
4370
4371 insn = get_insns ();
4372 for (seq = crtl->emit.sequence_stack;
4373 seq;
4374 insn = seq->first, seq = seq->next);
4375
4376 while (insn)
4377 {
4378 if (reg_mentioned_p (sp, insn))
4379 return true;
4380 if (reg_mentioned_p (fb, insn))
4381 return true;
4382 insn = NEXT_INSN (insn);
4383 }
4384 return false;
4385 }
4386
4387 /* Mark all the subexpressions of the PARALLEL rtx PAR as
4388 frame-related. Return PAR.
4389
4390 dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
4391 PARALLEL rtx other than the first if they do not have the
4392 FRAME_RELATED flag set on them. So this function is handy for
4393 marking up 'enter' instructions. */
4394 static rtx
4395 m32c_all_frame_related (rtx par)
4396 {
4397 int len = XVECLEN (par, 0);
4398 int i;
4399
4400 for (i = 0; i < len; i++)
4401 F (XVECEXP (par, 0, i));
4402
4403 return par;
4404 }
4405
4406 /* Emits the prologue. See the frame layout comment earlier in this
4407 file. We can reserve up to 256 bytes with the ENTER opcode, beyond
4408 that we manually update sp. */
4409 void
4410 m32c_emit_prologue (void)
4411 {
4412 int frame_size, extra_frame_size = 0, reg_save_size;
4413 int complex_prologue = 0;
4414
4415 cfun->machine->is_leaf = m32c_leaf_function_p ();
4416 if (interrupt_p (cfun->decl))
4417 {
4418 cfun->machine->is_interrupt = 1;
4419 complex_prologue = 1;
4420 }
4421 else if (bank_switch_p (cfun->decl))
4422 warning (OPT_Wattributes,
4423 "%<bank_switch%> has no effect on non-interrupt functions");
4424
4425 reg_save_size = m32c_pushm_popm (PP_justcount);
4426
4427 if (interrupt_p (cfun->decl))
4428 {
4429 if (bank_switch_p (cfun->decl))
4430 emit_insn (gen_fset_b ());
4431 else if (cfun->machine->intr_pushm)
4432 emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
4433 }
4434
4435 frame_size =
4436 m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
4437 if (frame_size == 0
4438 && !m32c_function_needs_enter ())
4439 cfun->machine->use_rts = 1;
4440
4441 if (frame_size > 254)
4442 {
4443 extra_frame_size = frame_size - 254;
4444 frame_size = 254;
4445 }
4446 if (cfun->machine->use_rts == 0)
4447 F (emit_insn (m32c_all_frame_related
4448 (TARGET_A16
4449 ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
4450 : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
4451
4452 if (extra_frame_size)
4453 {
4454 complex_prologue = 1;
4455 if (TARGET_A16)
4456 F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
4457 gen_rtx_REG (HImode, SP_REGNO),
4458 GEN_INT (-extra_frame_size))));
4459 else
4460 F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
4461 gen_rtx_REG (PSImode, SP_REGNO),
4462 GEN_INT (-extra_frame_size))));
4463 }
4464
4465 complex_prologue += m32c_pushm_popm (PP_pushm);
4466
4467 /* This just emits a comment into the .s file for debugging. */
4468 if (complex_prologue)
4469 emit_insn (gen_prologue_end ());
4470 }
4471
4472 /* Likewise, for the epilogue. The only exception is that, for
4473 interrupts, we must manually unwind the frame as the REIT opcode
4474 doesn't do that. */
4475 void
4476 m32c_emit_epilogue (void)
4477 {
4478 int popm_count = m32c_pushm_popm (PP_justcount);
4479
4480 /* This just emits a comment into the .s file for debugging. */
4481 if (popm_count > 0 || cfun->machine->is_interrupt)
4482 emit_insn (gen_epilogue_start ());
4483
4484 if (popm_count > 0)
4485 m32c_pushm_popm (PP_popm);
4486
4487 if (cfun->machine->is_interrupt)
4488 {
4489 enum machine_mode spmode = TARGET_A16 ? HImode : PSImode;
4490
4491 /* REIT clears B flag and restores $fp for us, but we still
4492 have to fix up the stack. USE_RTS just means we didn't
4493 emit ENTER. */
4494 if (!cfun->machine->use_rts)
4495 {
4496 emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
4497 gen_rtx_REG (spmode, FP_REGNO));
4498 emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
4499 gen_rtx_REG (spmode, A0_REGNO));
4500 /* We can't just add this to the POPM because it would be in
4501 the wrong order, and wouldn't fix the stack if we're bank
4502 switching. */
4503 if (TARGET_A16)
4504 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
4505 else
4506 emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
4507 }
4508 if (!bank_switch_p (cfun->decl) && cfun->machine->intr_pushm)
4509 emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
4510
4511 /* The FREIT (Fast REturn from InTerrupt) instruction should be
4512 generated only for M32C/M32CM targets (generate the REIT
4513 instruction otherwise). */
4514 if (fast_interrupt_p (cfun->decl))
4515 {
4516 /* Check if fast_attribute is set for M32C or M32CM. */
4517 if (TARGET_A24)
4518 {
4519 emit_jump_insn (gen_epilogue_freit ());
4520 }
4521 /* If fast_interrupt attribute is set for an R8C or M16C
4522 target ignore this attribute and generated REIT
4523 instruction. */
4524 else
4525 {
4526 warning (OPT_Wattributes,
4527 "%<fast_interrupt%> attribute directive ignored");
4528 emit_jump_insn (gen_epilogue_reit_16 ());
4529 }
4530 }
4531 else if (TARGET_A16)
4532 emit_jump_insn (gen_epilogue_reit_16 ());
4533 else
4534 emit_jump_insn (gen_epilogue_reit_24 ());
4535 }
4536 else if (cfun->machine->use_rts)
4537 emit_jump_insn (gen_epilogue_rts ());
4538 else if (TARGET_A16)
4539 emit_jump_insn (gen_epilogue_exitd_16 ());
4540 else
4541 emit_jump_insn (gen_epilogue_exitd_24 ());
4542 }
4543
4544 void
4545 m32c_emit_eh_epilogue (rtx ret_addr)
4546 {
4547 /* R0[R2] has the stack adjustment. R1[R3] has the address to
4548 return to. We have to fudge the stack, pop everything, pop SP
4549 (fudged), and return (fudged). This is actually easier to do in
4550 assembler, so punt to libgcc. */
4551 emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
4552 /* emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
4553 }
4554
4555 /* Indicate which flags must be properly set for a given conditional. */
4556 static int
4557 flags_needed_for_conditional (rtx cond)
4558 {
4559 switch (GET_CODE (cond))
4560 {
4561 case LE:
4562 case GT:
4563 return FLAGS_OSZ;
4564 case LEU:
4565 case GTU:
4566 return FLAGS_ZC;
4567 case LT:
4568 case GE:
4569 return FLAGS_OS;
4570 case LTU:
4571 case GEU:
4572 return FLAGS_C;
4573 case EQ:
4574 case NE:
4575 return FLAGS_Z;
4576 default:
4577 return FLAGS_N;
4578 }
4579 }
4580
4581 #define DEBUG_CMP 0
4582
4583 /* Returns true if a compare insn is redundant because it would only
4584 set flags that are already set correctly. */
4585 static bool
4586 m32c_compare_redundant (rtx cmp, rtx *operands)
4587 {
4588 int flags_needed;
4589 int pflags;
4590 rtx prev, pp, next;
4591 rtx op0, op1;
4592 #if DEBUG_CMP
4593 int prev_icode, i;
4594 #endif
4595
4596 op0 = operands[0];
4597 op1 = operands[1];
4598
4599 #if DEBUG_CMP
4600 fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
4601 debug_rtx(cmp);
4602 for (i=0; i<2; i++)
4603 {
4604 fprintf(stderr, "operands[%d] = ", i);
4605 debug_rtx(operands[i]);
4606 }
4607 #endif
4608
4609 next = next_nonnote_insn (cmp);
4610 if (!next || !INSN_P (next))
4611 {
4612 #if DEBUG_CMP
4613 fprintf(stderr, "compare not followed by insn\n");
4614 debug_rtx(next);
4615 #endif
4616 return false;
4617 }
4618 if (GET_CODE (PATTERN (next)) == SET
4619 && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
4620 {
4621 next = XEXP (XEXP (PATTERN (next), 1), 0);
4622 }
4623 else if (GET_CODE (PATTERN (next)) == SET)
4624 {
4625 /* If this is a conditional, flags_needed will be something
4626 other than FLAGS_N, which we test below. */
4627 next = XEXP (PATTERN (next), 1);
4628 }
4629 else
4630 {
4631 #if DEBUG_CMP
4632 fprintf(stderr, "compare not followed by conditional\n");
4633 debug_rtx(next);
4634 #endif
4635 return false;
4636 }
4637 #if DEBUG_CMP
4638 fprintf(stderr, "conditional is: ");
4639 debug_rtx(next);
4640 #endif
4641
4642 flags_needed = flags_needed_for_conditional (next);
4643 if (flags_needed == FLAGS_N)
4644 {
4645 #if DEBUG_CMP
4646 fprintf(stderr, "compare not followed by conditional\n");
4647 debug_rtx(next);
4648 #endif
4649 return false;
4650 }
4651
4652 /* Compare doesn't set overflow and carry the same way that
4653 arithmetic instructions do, so we can't replace those. */
4654 if (flags_needed & FLAGS_OC)
4655 return false;
4656
4657 prev = cmp;
4658 do {
4659 prev = prev_nonnote_insn (prev);
4660 if (!prev)
4661 {
4662 #if DEBUG_CMP
4663 fprintf(stderr, "No previous insn.\n");
4664 #endif
4665 return false;
4666 }
4667 if (!INSN_P (prev))
4668 {
4669 #if DEBUG_CMP
4670 fprintf(stderr, "Previous insn is a non-insn.\n");
4671 #endif
4672 return false;
4673 }
4674 pp = PATTERN (prev);
4675 if (GET_CODE (pp) != SET)
4676 {
4677 #if DEBUG_CMP
4678 fprintf(stderr, "Previous insn is not a SET.\n");
4679 #endif
4680 return false;
4681 }
4682 pflags = get_attr_flags (prev);
4683
4684 /* Looking up attributes of previous insns corrupted the recog
4685 tables. */
4686 INSN_UID (cmp) = -1;
4687 recog (PATTERN (cmp), cmp, 0);
4688
4689 if (pflags == FLAGS_N
4690 && reg_mentioned_p (op0, pp))
4691 {
4692 #if DEBUG_CMP
4693 fprintf(stderr, "intermediate non-flags insn uses op:\n");
4694 debug_rtx(prev);
4695 #endif
4696 return false;
4697 }
4698
4699 /* Check for comparisons against memory - between volatiles and
4700 aliases, we just can't risk this one. */
4701 if (GET_CODE (operands[0]) == MEM
4702 || GET_CODE (operands[0]) == MEM)
4703 {
4704 #if DEBUG_CMP
4705 fprintf(stderr, "comparisons with memory:\n");
4706 debug_rtx(prev);
4707 #endif
4708 return false;
4709 }
4710
4711 /* Check for PREV changing a register that's used to compute a
4712 value in CMP, even if it doesn't otherwise change flags. */
4713 if (GET_CODE (operands[0]) == REG
4714 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[0]))
4715 {
4716 #if DEBUG_CMP
4717 fprintf(stderr, "sub-value affected, op0:\n");
4718 debug_rtx(prev);
4719 #endif
4720 return false;
4721 }
4722 if (GET_CODE (operands[1]) == REG
4723 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[1]))
4724 {
4725 #if DEBUG_CMP
4726 fprintf(stderr, "sub-value affected, op1:\n");
4727 debug_rtx(prev);
4728 #endif
4729 return false;
4730 }
4731
4732 } while (pflags == FLAGS_N);
4733 #if DEBUG_CMP
4734 fprintf(stderr, "previous flag-setting insn:\n");
4735 debug_rtx(prev);
4736 debug_rtx(pp);
4737 #endif
4738
4739 if (GET_CODE (pp) == SET
4740 && GET_CODE (XEXP (pp, 0)) == REG
4741 && REGNO (XEXP (pp, 0)) == FLG_REGNO
4742 && GET_CODE (XEXP (pp, 1)) == COMPARE)
4743 {
4744 /* Adjacent cbranches must have the same operands to be
4745 redundant. */
4746 rtx pop0 = XEXP (XEXP (pp, 1), 0);
4747 rtx pop1 = XEXP (XEXP (pp, 1), 1);
4748 #if DEBUG_CMP
4749 fprintf(stderr, "adjacent cbranches\n");
4750 debug_rtx(pop0);
4751 debug_rtx(pop1);
4752 #endif
4753 if (rtx_equal_p (op0, pop0)
4754 && rtx_equal_p (op1, pop1))
4755 return true;
4756 #if DEBUG_CMP
4757 fprintf(stderr, "prev cmp not same\n");
4758 #endif
4759 return false;
4760 }
4761
4762 /* Else the previous insn must be a SET, with either the source or
4763 dest equal to operands[0], and operands[1] must be zero. */
4764
4765 if (!rtx_equal_p (op1, const0_rtx))
4766 {
4767 #if DEBUG_CMP
4768 fprintf(stderr, "operands[1] not const0_rtx\n");
4769 #endif
4770 return false;
4771 }
4772 if (GET_CODE (pp) != SET)
4773 {
4774 #if DEBUG_CMP
4775 fprintf (stderr, "pp not set\n");
4776 #endif
4777 return false;
4778 }
4779 if (!rtx_equal_p (op0, SET_SRC (pp))
4780 && !rtx_equal_p (op0, SET_DEST (pp)))
4781 {
4782 #if DEBUG_CMP
4783 fprintf(stderr, "operands[0] not found in set\n");
4784 #endif
4785 return false;
4786 }
4787
4788 #if DEBUG_CMP
4789 fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4790 #endif
4791 if ((pflags & flags_needed) == flags_needed)
4792 return true;
4793
4794 return false;
4795 }
4796
4797 /* Return the pattern for a compare. This will be commented out if
4798 the compare is redundant, else a normal pattern is returned. Thus,
4799 the assembler output says where the compare would have been. */
4800 char *
4801 m32c_output_compare (rtx insn, rtx *operands)
4802 {
4803 static char templ[] = ";cmp.b\t%1,%0";
4804 /* ^ 5 */
4805
4806 templ[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
4807 if (m32c_compare_redundant (insn, operands))
4808 {
4809 #if DEBUG_CMP
4810 fprintf(stderr, "cbranch: cmp not needed\n");
4811 #endif
4812 return templ;
4813 }
4814
4815 #if DEBUG_CMP
4816 fprintf(stderr, "cbranch: cmp needed: `%s'\n", templ + 1);
4817 #endif
4818 return templ + 1;
4819 }
4820
4821 #undef TARGET_ENCODE_SECTION_INFO
4822 #define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4823
4824 /* If the frame pointer isn't used, we detect it manually. But the
4825 stack pointer doesn't have as flexible addressing as the frame
4826 pointer, so we always assume we have it. */
4827
4828 #undef TARGET_FRAME_POINTER_REQUIRED
4829 #define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
4830
4831 /* The Global `targetm' Variable. */
4832
4833 struct gcc_target targetm = TARGET_INITIALIZER;
4834
4835 #include "gt-m32c.h"