]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/m32c/m32c.c
genattrtab.c (write_header): Include hash-set.h...
[thirdparty/gcc.git] / gcc / config / m32c / m32c.c
1 /* Target Code for R8C/M16C/M32C
2 Copyright (C) 2005-2015 Free Software Foundation, Inc.
3 Contributed by Red Hat.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "regs.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-flags.h"
31 #include "output.h"
32 #include "insn-attr.h"
33 #include "flags.h"
34 #include "recog.h"
35 #include "reload.h"
36 #include "diagnostic-core.h"
37 #include "obstack.h"
38 #include "hash-set.h"
39 #include "machmode.h"
40 #include "vec.h"
41 #include "double-int.h"
42 #include "input.h"
43 #include "alias.h"
44 #include "symtab.h"
45 #include "wide-int.h"
46 #include "inchash.h"
47 #include "tree.h"
48 #include "fold-const.h"
49 #include "stor-layout.h"
50 #include "varasm.h"
51 #include "calls.h"
52 #include "expr.h"
53 #include "insn-codes.h"
54 #include "optabs.h"
55 #include "except.h"
56 #include "input.h"
57 #include "function.h"
58 #include "ggc.h"
59 #include "target.h"
60 #include "target-def.h"
61 #include "tm_p.h"
62 #include "langhooks.h"
63 #include "hash-table.h"
64 #include "predict.h"
65 #include "dominance.h"
66 #include "cfg.h"
67 #include "cfgrtl.h"
68 #include "cfganal.h"
69 #include "lcm.h"
70 #include "cfgbuild.h"
71 #include "cfgcleanup.h"
72 #include "basic-block.h"
73 #include "tree-ssa-alias.h"
74 #include "internal-fn.h"
75 #include "gimple-fold.h"
76 #include "tree-eh.h"
77 #include "gimple-expr.h"
78 #include "is-a.h"
79 #include "gimple.h"
80 #include "df.h"
81 #include "tm-constrs.h"
82 #include "builtins.h"
83
84 /* Prototypes */
85
86 /* Used by m32c_pushm_popm. */
87 typedef enum
88 {
89 PP_pushm,
90 PP_popm,
91 PP_justcount
92 } Push_Pop_Type;
93
94 static bool m32c_function_needs_enter (void);
95 static tree interrupt_handler (tree *, tree, tree, int, bool *);
96 static tree function_vector_handler (tree *, tree, tree, int, bool *);
97 static int interrupt_p (tree node);
98 static int bank_switch_p (tree node);
99 static int fast_interrupt_p (tree node);
100 static int interrupt_p (tree node);
101 static bool m32c_asm_integer (rtx, unsigned int, int);
102 static int m32c_comp_type_attributes (const_tree, const_tree);
103 static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
104 static struct machine_function *m32c_init_machine_status (void);
105 static void m32c_insert_attributes (tree, tree *);
106 static bool m32c_legitimate_address_p (machine_mode, rtx, bool);
107 static bool m32c_addr_space_legitimate_address_p (machine_mode, rtx, bool, addr_space_t);
108 static rtx m32c_function_arg (cumulative_args_t, machine_mode,
109 const_tree, bool);
110 static bool m32c_pass_by_reference (cumulative_args_t, machine_mode,
111 const_tree, bool);
112 static void m32c_function_arg_advance (cumulative_args_t, machine_mode,
113 const_tree, bool);
114 static unsigned int m32c_function_arg_boundary (machine_mode, const_tree);
115 static int m32c_pushm_popm (Push_Pop_Type);
116 static bool m32c_strict_argument_naming (cumulative_args_t);
117 static rtx m32c_struct_value_rtx (tree, int);
118 static rtx m32c_subreg (machine_mode, rtx, machine_mode, int);
119 static int need_to_save (int);
120 static rtx m32c_function_value (const_tree, const_tree, bool);
121 static rtx m32c_libcall_value (machine_mode, const_rtx);
122
123 /* Returns true if an address is specified, else false. */
124 static bool m32c_get_pragma_address (const char *varname, unsigned *addr);
125
126 #define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
127
128 #define streq(a,b) (strcmp ((a), (b)) == 0)
129
130 /* Internal support routines */
131
132 /* Debugging statements are tagged with DEBUG0 only so that they can
133 be easily enabled individually, by replacing the '0' with '1' as
134 needed. */
135 #define DEBUG0 0
136 #define DEBUG1 1
137
138 #if DEBUG0
139 /* This is needed by some of the commented-out debug statements
140 below. */
141 static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
142 #endif
143 static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
144
145 /* These are all to support encode_pattern(). */
146 static char pattern[30], *patternp;
147 static GTY(()) rtx patternr[30];
148 #define RTX_IS(x) (streq (pattern, x))
149
150 /* Some macros to simplify the logic throughout this file. */
151 #define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
152 #define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
153
154 #define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
155 #define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
156
157 static int
158 far_addr_space_p (rtx x)
159 {
160 if (GET_CODE (x) != MEM)
161 return 0;
162 #if DEBUG0
163 fprintf(stderr, "\033[35mfar_addr_space: "); debug_rtx(x);
164 fprintf(stderr, " = %d\033[0m\n", MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR);
165 #endif
166 return MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR;
167 }
168
169 /* We do most RTX matching by converting the RTX into a string, and
170 using string compares. This vastly simplifies the logic in many of
171 the functions in this file.
172
173 On exit, pattern[] has the encoded string (use RTX_IS("...") to
174 compare it) and patternr[] has pointers to the nodes in the RTX
175 corresponding to each character in the encoded string. The latter
176 is mostly used by print_operand().
177
178 Unrecognized patterns have '?' in them; this shows up when the
179 assembler complains about syntax errors.
180 */
181
182 static void
183 encode_pattern_1 (rtx x)
184 {
185 int i;
186
187 if (patternp == pattern + sizeof (pattern) - 2)
188 {
189 patternp[-1] = '?';
190 return;
191 }
192
193 patternr[patternp - pattern] = x;
194
195 switch (GET_CODE (x))
196 {
197 case REG:
198 *patternp++ = 'r';
199 break;
200 case SUBREG:
201 if (GET_MODE_SIZE (GET_MODE (x)) !=
202 GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
203 *patternp++ = 'S';
204 if (GET_MODE (x) == PSImode
205 && GET_CODE (XEXP (x, 0)) == REG)
206 *patternp++ = 'S';
207 encode_pattern_1 (XEXP (x, 0));
208 break;
209 case MEM:
210 *patternp++ = 'm';
211 case CONST:
212 encode_pattern_1 (XEXP (x, 0));
213 break;
214 case SIGN_EXTEND:
215 *patternp++ = '^';
216 *patternp++ = 'S';
217 encode_pattern_1 (XEXP (x, 0));
218 break;
219 case ZERO_EXTEND:
220 *patternp++ = '^';
221 *patternp++ = 'Z';
222 encode_pattern_1 (XEXP (x, 0));
223 break;
224 case PLUS:
225 *patternp++ = '+';
226 encode_pattern_1 (XEXP (x, 0));
227 encode_pattern_1 (XEXP (x, 1));
228 break;
229 case PRE_DEC:
230 *patternp++ = '>';
231 encode_pattern_1 (XEXP (x, 0));
232 break;
233 case POST_INC:
234 *patternp++ = '<';
235 encode_pattern_1 (XEXP (x, 0));
236 break;
237 case LO_SUM:
238 *patternp++ = 'L';
239 encode_pattern_1 (XEXP (x, 0));
240 encode_pattern_1 (XEXP (x, 1));
241 break;
242 case HIGH:
243 *patternp++ = 'H';
244 encode_pattern_1 (XEXP (x, 0));
245 break;
246 case SYMBOL_REF:
247 *patternp++ = 's';
248 break;
249 case LABEL_REF:
250 *patternp++ = 'l';
251 break;
252 case CODE_LABEL:
253 *patternp++ = 'c';
254 break;
255 case CONST_INT:
256 case CONST_DOUBLE:
257 *patternp++ = 'i';
258 break;
259 case UNSPEC:
260 *patternp++ = 'u';
261 *patternp++ = '0' + XCINT (x, 1, UNSPEC);
262 for (i = 0; i < XVECLEN (x, 0); i++)
263 encode_pattern_1 (XVECEXP (x, 0, i));
264 break;
265 case USE:
266 *patternp++ = 'U';
267 break;
268 case PARALLEL:
269 *patternp++ = '|';
270 for (i = 0; i < XVECLEN (x, 0); i++)
271 encode_pattern_1 (XVECEXP (x, 0, i));
272 break;
273 case EXPR_LIST:
274 *patternp++ = 'E';
275 encode_pattern_1 (XEXP (x, 0));
276 if (XEXP (x, 1))
277 encode_pattern_1 (XEXP (x, 1));
278 break;
279 default:
280 *patternp++ = '?';
281 #if DEBUG0
282 fprintf (stderr, "can't encode pattern %s\n",
283 GET_RTX_NAME (GET_CODE (x)));
284 debug_rtx (x);
285 gcc_unreachable ();
286 #endif
287 break;
288 }
289 }
290
291 static void
292 encode_pattern (rtx x)
293 {
294 patternp = pattern;
295 encode_pattern_1 (x);
296 *patternp = 0;
297 }
298
299 /* Since register names indicate the mode they're used in, we need a
300 way to determine which name to refer to the register with. Called
301 by print_operand(). */
302
303 static const char *
304 reg_name_with_mode (int regno, machine_mode mode)
305 {
306 int mlen = GET_MODE_SIZE (mode);
307 if (regno == R0_REGNO && mlen == 1)
308 return "r0l";
309 if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
310 return "r2r0";
311 if (regno == R0_REGNO && mlen == 6)
312 return "r2r1r0";
313 if (regno == R0_REGNO && mlen == 8)
314 return "r3r1r2r0";
315 if (regno == R1_REGNO && mlen == 1)
316 return "r1l";
317 if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
318 return "r3r1";
319 if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
320 return "a1a0";
321 return reg_names[regno];
322 }
323
324 /* How many bytes a register uses on stack when it's pushed. We need
325 to know this because the push opcode needs to explicitly indicate
326 the size of the register, even though the name of the register
327 already tells it that. Used by m32c_output_reg_{push,pop}, which
328 is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}. */
329
330 static int
331 reg_push_size (int regno)
332 {
333 switch (regno)
334 {
335 case R0_REGNO:
336 case R1_REGNO:
337 return 2;
338 case R2_REGNO:
339 case R3_REGNO:
340 case FLG_REGNO:
341 return 2;
342 case A0_REGNO:
343 case A1_REGNO:
344 case SB_REGNO:
345 case FB_REGNO:
346 case SP_REGNO:
347 if (TARGET_A16)
348 return 2;
349 else
350 return 3;
351 default:
352 gcc_unreachable ();
353 }
354 }
355
356 /* Given two register classes, find the largest intersection between
357 them. If there is no intersection, return RETURNED_IF_EMPTY
358 instead. */
359 static reg_class_t
360 reduce_class (reg_class_t original_class, reg_class_t limiting_class,
361 reg_class_t returned_if_empty)
362 {
363 HARD_REG_SET cc;
364 int i;
365 reg_class_t best = NO_REGS;
366 unsigned int best_size = 0;
367
368 if (original_class == limiting_class)
369 return original_class;
370
371 cc = reg_class_contents[original_class];
372 AND_HARD_REG_SET (cc, reg_class_contents[limiting_class]);
373
374 for (i = 0; i < LIM_REG_CLASSES; i++)
375 {
376 if (hard_reg_set_subset_p (reg_class_contents[i], cc))
377 if (best_size < reg_class_size[i])
378 {
379 best = (reg_class_t) i;
380 best_size = reg_class_size[i];
381 }
382
383 }
384 if (best == NO_REGS)
385 return returned_if_empty;
386 return best;
387 }
388
389 /* Used by m32c_register_move_cost to determine if a move is
390 impossibly expensive. */
391 static bool
392 class_can_hold_mode (reg_class_t rclass, machine_mode mode)
393 {
394 /* Cache the results: 0=untested 1=no 2=yes */
395 static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
396
397 if (results[(int) rclass][mode] == 0)
398 {
399 int r;
400 results[rclass][mode] = 1;
401 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
402 if (in_hard_reg_set_p (reg_class_contents[(int) rclass], mode, r)
403 && HARD_REGNO_MODE_OK (r, mode))
404 {
405 results[rclass][mode] = 2;
406 break;
407 }
408 }
409
410 #if DEBUG0
411 fprintf (stderr, "class %s can hold %s? %s\n",
412 class_names[(int) rclass], mode_name[mode],
413 (results[rclass][mode] == 2) ? "yes" : "no");
414 #endif
415 return results[(int) rclass][mode] == 2;
416 }
417
418 /* Run-time Target Specification. */
419
420 /* Memregs are memory locations that gcc treats like general
421 registers, as there are a limited number of true registers and the
422 m32c families can use memory in most places that registers can be
423 used.
424
425 However, since memory accesses are more expensive than registers,
426 we allow the user to limit the number of memregs available, in
427 order to try to persuade gcc to try harder to use real registers.
428
429 Memregs are provided by lib1funcs.S.
430 */
431
432 int ok_to_change_target_memregs = TRUE;
433
434 /* Implements TARGET_OPTION_OVERRIDE. */
435
436 #undef TARGET_OPTION_OVERRIDE
437 #define TARGET_OPTION_OVERRIDE m32c_option_override
438
439 static void
440 m32c_option_override (void)
441 {
442 /* We limit memregs to 0..16, and provide a default. */
443 if (global_options_set.x_target_memregs)
444 {
445 if (target_memregs < 0 || target_memregs > 16)
446 error ("invalid target memregs value '%d'", target_memregs);
447 }
448 else
449 target_memregs = 16;
450
451 if (TARGET_A24)
452 flag_ivopts = 0;
453
454 /* This target defaults to strict volatile bitfields. */
455 if (flag_strict_volatile_bitfields < 0 && abi_version_at_least(2))
456 flag_strict_volatile_bitfields = 1;
457
458 /* r8c/m16c have no 16-bit indirect call, so thunks are involved.
459 This is always worse than an absolute call. */
460 if (TARGET_A16)
461 flag_no_function_cse = 1;
462
463 /* This wants to put insns between compares and their jumps. */
464 /* FIXME: The right solution is to properly trace the flags register
465 values, but that is too much work for stage 4. */
466 flag_combine_stack_adjustments = 0;
467 }
468
469 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
470 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m32c_override_options_after_change
471
472 static void
473 m32c_override_options_after_change (void)
474 {
475 if (TARGET_A16)
476 flag_no_function_cse = 1;
477 }
478
479 /* Defining data structures for per-function information */
480
481 /* The usual; we set up our machine_function data. */
482 static struct machine_function *
483 m32c_init_machine_status (void)
484 {
485 return ggc_cleared_alloc<machine_function> ();
486 }
487
488 /* Implements INIT_EXPANDERS. We just set up to call the above
489 function. */
490 void
491 m32c_init_expanders (void)
492 {
493 init_machine_status = m32c_init_machine_status;
494 }
495
496 /* Storage Layout */
497
498 /* Register Basics */
499
500 /* Basic Characteristics of Registers */
501
502 /* Whether a mode fits in a register is complex enough to warrant a
503 table. */
504 static struct
505 {
506 char qi_regs;
507 char hi_regs;
508 char pi_regs;
509 char si_regs;
510 char di_regs;
511 } nregs_table[FIRST_PSEUDO_REGISTER] =
512 {
513 { 1, 1, 2, 2, 4 }, /* r0 */
514 { 0, 1, 0, 0, 0 }, /* r2 */
515 { 1, 1, 2, 2, 0 }, /* r1 */
516 { 0, 1, 0, 0, 0 }, /* r3 */
517 { 0, 1, 1, 0, 0 }, /* a0 */
518 { 0, 1, 1, 0, 0 }, /* a1 */
519 { 0, 1, 1, 0, 0 }, /* sb */
520 { 0, 1, 1, 0, 0 }, /* fb */
521 { 0, 1, 1, 0, 0 }, /* sp */
522 { 1, 1, 1, 0, 0 }, /* pc */
523 { 0, 0, 0, 0, 0 }, /* fl */
524 { 1, 1, 1, 0, 0 }, /* ap */
525 { 1, 1, 2, 2, 4 }, /* mem0 */
526 { 1, 1, 2, 2, 4 }, /* mem1 */
527 { 1, 1, 2, 2, 4 }, /* mem2 */
528 { 1, 1, 2, 2, 4 }, /* mem3 */
529 { 1, 1, 2, 2, 4 }, /* mem4 */
530 { 1, 1, 2, 2, 0 }, /* mem5 */
531 { 1, 1, 2, 2, 0 }, /* mem6 */
532 { 1, 1, 0, 0, 0 }, /* mem7 */
533 };
534
535 /* Implements TARGET_CONDITIONAL_REGISTER_USAGE. We adjust the number
536 of available memregs, and select which registers need to be preserved
537 across calls based on the chip family. */
538
539 #undef TARGET_CONDITIONAL_REGISTER_USAGE
540 #define TARGET_CONDITIONAL_REGISTER_USAGE m32c_conditional_register_usage
541 void
542 m32c_conditional_register_usage (void)
543 {
544 int i;
545
546 if (0 <= target_memregs && target_memregs <= 16)
547 {
548 /* The command line option is bytes, but our "registers" are
549 16-bit words. */
550 for (i = (target_memregs+1)/2; i < 8; i++)
551 {
552 fixed_regs[MEM0_REGNO + i] = 1;
553 CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
554 }
555 }
556
557 /* M32CM and M32C preserve more registers across function calls. */
558 if (TARGET_A24)
559 {
560 call_used_regs[R1_REGNO] = 0;
561 call_used_regs[R2_REGNO] = 0;
562 call_used_regs[R3_REGNO] = 0;
563 call_used_regs[A0_REGNO] = 0;
564 call_used_regs[A1_REGNO] = 0;
565 }
566 }
567
568 /* How Values Fit in Registers */
569
570 /* Implements HARD_REGNO_NREGS. This is complicated by the fact that
571 different registers are different sizes from each other, *and* may
572 be different sizes in different chip families. */
573 static int
574 m32c_hard_regno_nregs_1 (int regno, machine_mode mode)
575 {
576 if (regno == FLG_REGNO && mode == CCmode)
577 return 1;
578 if (regno >= FIRST_PSEUDO_REGISTER)
579 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
580
581 if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
582 return (GET_MODE_SIZE (mode) + 1) / 2;
583
584 if (GET_MODE_SIZE (mode) <= 1)
585 return nregs_table[regno].qi_regs;
586 if (GET_MODE_SIZE (mode) <= 2)
587 return nregs_table[regno].hi_regs;
588 if (regno == A0_REGNO && mode == SImode && TARGET_A16)
589 return 2;
590 if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
591 return nregs_table[regno].pi_regs;
592 if (GET_MODE_SIZE (mode) <= 4)
593 return nregs_table[regno].si_regs;
594 if (GET_MODE_SIZE (mode) <= 8)
595 return nregs_table[regno].di_regs;
596 return 0;
597 }
598
599 int
600 m32c_hard_regno_nregs (int regno, machine_mode mode)
601 {
602 int rv = m32c_hard_regno_nregs_1 (regno, mode);
603 return rv ? rv : 1;
604 }
605
606 /* Implements HARD_REGNO_MODE_OK. The above function does the work
607 already; just test its return value. */
608 int
609 m32c_hard_regno_ok (int regno, machine_mode mode)
610 {
611 return m32c_hard_regno_nregs_1 (regno, mode) != 0;
612 }
613
614 /* Implements MODES_TIEABLE_P. In general, modes aren't tieable since
615 registers are all different sizes. However, since most modes are
616 bigger than our registers anyway, it's easier to implement this
617 function that way, leaving QImode as the only unique case. */
618 int
619 m32c_modes_tieable_p (machine_mode m1, machine_mode m2)
620 {
621 if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
622 return 1;
623
624 #if 0
625 if (m1 == QImode || m2 == QImode)
626 return 0;
627 #endif
628
629 return 1;
630 }
631
632 /* Register Classes */
633
634 /* Implements REGNO_REG_CLASS. */
635 enum reg_class
636 m32c_regno_reg_class (int regno)
637 {
638 switch (regno)
639 {
640 case R0_REGNO:
641 return R0_REGS;
642 case R1_REGNO:
643 return R1_REGS;
644 case R2_REGNO:
645 return R2_REGS;
646 case R3_REGNO:
647 return R3_REGS;
648 case A0_REGNO:
649 return A0_REGS;
650 case A1_REGNO:
651 return A1_REGS;
652 case SB_REGNO:
653 return SB_REGS;
654 case FB_REGNO:
655 return FB_REGS;
656 case SP_REGNO:
657 return SP_REGS;
658 case FLG_REGNO:
659 return FLG_REGS;
660 default:
661 if (IS_MEM_REGNO (regno))
662 return MEM_REGS;
663 return ALL_REGS;
664 }
665 }
666
667 /* Implements REGNO_OK_FOR_BASE_P. */
668 int
669 m32c_regno_ok_for_base_p (int regno)
670 {
671 if (regno == A0_REGNO
672 || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
673 return 1;
674 return 0;
675 }
676
677 #define DEBUG_RELOAD 0
678
679 /* Implements TARGET_PREFERRED_RELOAD_CLASS. In general, prefer general
680 registers of the appropriate size. */
681
682 #undef TARGET_PREFERRED_RELOAD_CLASS
683 #define TARGET_PREFERRED_RELOAD_CLASS m32c_preferred_reload_class
684
685 static reg_class_t
686 m32c_preferred_reload_class (rtx x, reg_class_t rclass)
687 {
688 reg_class_t newclass = rclass;
689
690 #if DEBUG_RELOAD
691 fprintf (stderr, "\npreferred_reload_class for %s is ",
692 class_names[rclass]);
693 #endif
694 if (rclass == NO_REGS)
695 rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
696
697 if (reg_classes_intersect_p (rclass, CR_REGS))
698 {
699 switch (GET_MODE (x))
700 {
701 case QImode:
702 newclass = HL_REGS;
703 break;
704 default:
705 /* newclass = HI_REGS; */
706 break;
707 }
708 }
709
710 else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
711 newclass = SI_REGS;
712 else if (GET_MODE_SIZE (GET_MODE (x)) > 4
713 && ! reg_class_subset_p (R03_REGS, rclass))
714 newclass = DI_REGS;
715
716 rclass = reduce_class (rclass, newclass, rclass);
717
718 if (GET_MODE (x) == QImode)
719 rclass = reduce_class (rclass, HL_REGS, rclass);
720
721 #if DEBUG_RELOAD
722 fprintf (stderr, "%s\n", class_names[rclass]);
723 debug_rtx (x);
724
725 if (GET_CODE (x) == MEM
726 && GET_CODE (XEXP (x, 0)) == PLUS
727 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
728 fprintf (stderr, "Glorm!\n");
729 #endif
730 return rclass;
731 }
732
733 /* Implements TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
734
735 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
736 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS m32c_preferred_output_reload_class
737
738 static reg_class_t
739 m32c_preferred_output_reload_class (rtx x, reg_class_t rclass)
740 {
741 return m32c_preferred_reload_class (x, rclass);
742 }
743
744 /* Implements LIMIT_RELOAD_CLASS. We basically want to avoid using
745 address registers for reloads since they're needed for address
746 reloads. */
747 int
748 m32c_limit_reload_class (machine_mode mode, int rclass)
749 {
750 #if DEBUG_RELOAD
751 fprintf (stderr, "limit_reload_class for %s: %s ->",
752 mode_name[mode], class_names[rclass]);
753 #endif
754
755 if (mode == QImode)
756 rclass = reduce_class (rclass, HL_REGS, rclass);
757 else if (mode == HImode)
758 rclass = reduce_class (rclass, HI_REGS, rclass);
759 else if (mode == SImode)
760 rclass = reduce_class (rclass, SI_REGS, rclass);
761
762 if (rclass != A_REGS)
763 rclass = reduce_class (rclass, DI_REGS, rclass);
764
765 #if DEBUG_RELOAD
766 fprintf (stderr, " %s\n", class_names[rclass]);
767 #endif
768 return rclass;
769 }
770
771 /* Implements SECONDARY_RELOAD_CLASS. QImode have to be reloaded in
772 r0 or r1, as those are the only real QImode registers. CR regs get
773 reloaded through appropriately sized general or address
774 registers. */
775 int
776 m32c_secondary_reload_class (int rclass, machine_mode mode, rtx x)
777 {
778 int cc = class_contents[rclass][0];
779 #if DEBUG0
780 fprintf (stderr, "\nsecondary reload class %s %s\n",
781 class_names[rclass], mode_name[mode]);
782 debug_rtx (x);
783 #endif
784 if (mode == QImode
785 && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
786 return QI_REGS;
787 if (reg_classes_intersect_p (rclass, CR_REGS)
788 && GET_CODE (x) == REG
789 && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
790 return (TARGET_A16 || mode == HImode) ? HI_REGS : A_REGS;
791 return NO_REGS;
792 }
793
794 /* Implements TARGET_CLASS_LIKELY_SPILLED_P. A_REGS is needed for address
795 reloads. */
796
797 #undef TARGET_CLASS_LIKELY_SPILLED_P
798 #define TARGET_CLASS_LIKELY_SPILLED_P m32c_class_likely_spilled_p
799
800 static bool
801 m32c_class_likely_spilled_p (reg_class_t regclass)
802 {
803 if (regclass == A_REGS)
804 return true;
805
806 return (reg_class_size[(int) regclass] == 1);
807 }
808
809 /* Implements TARGET_CLASS_MAX_NREGS. We calculate this according to its
810 documented meaning, to avoid potential inconsistencies with actual
811 class definitions. */
812
813 #undef TARGET_CLASS_MAX_NREGS
814 #define TARGET_CLASS_MAX_NREGS m32c_class_max_nregs
815
816 static unsigned char
817 m32c_class_max_nregs (reg_class_t regclass, machine_mode mode)
818 {
819 int rn;
820 unsigned char max = 0;
821
822 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
823 if (TEST_HARD_REG_BIT (reg_class_contents[(int) regclass], rn))
824 {
825 unsigned char n = m32c_hard_regno_nregs (rn, mode);
826 if (max < n)
827 max = n;
828 }
829 return max;
830 }
831
832 /* Implements CANNOT_CHANGE_MODE_CLASS. Only r0 and r1 can change to
833 QI (r0l, r1l) because the chip doesn't support QI ops on other
834 registers (well, it does on a0/a1 but if we let gcc do that, reload
835 suffers). Otherwise, we allow changes to larger modes. */
836 int
837 m32c_cannot_change_mode_class (machine_mode from,
838 machine_mode to, int rclass)
839 {
840 int rn;
841 #if DEBUG0
842 fprintf (stderr, "cannot change from %s to %s in %s\n",
843 mode_name[from], mode_name[to], class_names[rclass]);
844 #endif
845
846 /* If the larger mode isn't allowed in any of these registers, we
847 can't allow the change. */
848 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
849 if (class_contents[rclass][0] & (1 << rn))
850 if (! m32c_hard_regno_ok (rn, to))
851 return 1;
852
853 if (to == QImode)
854 return (class_contents[rclass][0] & 0x1ffa);
855
856 if (class_contents[rclass][0] & 0x0005 /* r0, r1 */
857 && GET_MODE_SIZE (from) > 1)
858 return 0;
859 if (GET_MODE_SIZE (from) > 2) /* all other regs */
860 return 0;
861
862 return 1;
863 }
864
865 /* Helpers for the rest of the file. */
866 /* TRUE if the rtx is a REG rtx for the given register. */
867 #define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
868 && REGNO (rtx) == regno)
869 /* TRUE if the rtx is a pseudo - specifically, one we can use as a
870 base register in address calculations (hence the "strict"
871 argument). */
872 #define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
873 && (REGNO (rtx) == AP_REGNO \
874 || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
875
876 #define A0_OR_PSEUDO(x) (IS_REG(x, A0_REGNO) || REGNO (x) >= FIRST_PSEUDO_REGISTER)
877
878 /* Implements matching for constraints (see next function too). 'S' is
879 for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
880 call return values. */
881 bool
882 m32c_matches_constraint_p (rtx value, int constraint)
883 {
884 encode_pattern (value);
885
886 switch (constraint) {
887 case CONSTRAINT_SF:
888 return (far_addr_space_p (value)
889 && ((RTX_IS ("mr")
890 && A0_OR_PSEUDO (patternr[1])
891 && GET_MODE (patternr[1]) == SImode)
892 || (RTX_IS ("m+^Sri")
893 && A0_OR_PSEUDO (patternr[4])
894 && GET_MODE (patternr[4]) == HImode)
895 || (RTX_IS ("m+^Srs")
896 && A0_OR_PSEUDO (patternr[4])
897 && GET_MODE (patternr[4]) == HImode)
898 || (RTX_IS ("m+^S+ris")
899 && A0_OR_PSEUDO (patternr[5])
900 && GET_MODE (patternr[5]) == HImode)
901 || RTX_IS ("ms")));
902 case CONSTRAINT_Sd:
903 {
904 /* This is the common "src/dest" address */
905 rtx r;
906 if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
907 return true;
908 if (RTX_IS ("ms") || RTX_IS ("m+si"))
909 return true;
910 if (RTX_IS ("m++rii"))
911 {
912 if (REGNO (patternr[3]) == FB_REGNO
913 && INTVAL (patternr[4]) == 0)
914 return true;
915 }
916 if (RTX_IS ("mr"))
917 r = patternr[1];
918 else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
919 r = patternr[2];
920 else
921 return false;
922 if (REGNO (r) == SP_REGNO)
923 return false;
924 return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
925 }
926 case CONSTRAINT_Sa:
927 {
928 rtx r;
929 if (RTX_IS ("mr"))
930 r = patternr[1];
931 else if (RTX_IS ("m+ri"))
932 r = patternr[2];
933 else
934 return false;
935 return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
936 }
937 case CONSTRAINT_Si:
938 return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
939 case CONSTRAINT_Ss:
940 return ((RTX_IS ("mr")
941 && (IS_REG (patternr[1], SP_REGNO)))
942 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
943 case CONSTRAINT_Sf:
944 return ((RTX_IS ("mr")
945 && (IS_REG (patternr[1], FB_REGNO)))
946 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
947 case CONSTRAINT_Sb:
948 return ((RTX_IS ("mr")
949 && (IS_REG (patternr[1], SB_REGNO)))
950 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
951 case CONSTRAINT_Sp:
952 /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
953 return (RTX_IS ("mi")
954 && !(INTVAL (patternr[1]) & ~0x1fff));
955 case CONSTRAINT_S1:
956 return r1h_operand (value, QImode);
957 case CONSTRAINT_Rpa:
958 return GET_CODE (value) == PARALLEL;
959 default:
960 return false;
961 }
962 }
963
964 /* STACK AND CALLING */
965
966 /* Frame Layout */
967
968 /* Implements RETURN_ADDR_RTX. Note that R8C and M16C push 24 bits
969 (yes, THREE bytes) onto the stack for the return address, but we
970 don't support pointers bigger than 16 bits on those chips. This
971 will likely wreak havoc with exception unwinding. FIXME. */
972 rtx
973 m32c_return_addr_rtx (int count)
974 {
975 machine_mode mode;
976 int offset;
977 rtx ra_mem;
978
979 if (count)
980 return NULL_RTX;
981 /* we want 2[$fb] */
982
983 if (TARGET_A24)
984 {
985 /* It's four bytes */
986 mode = PSImode;
987 offset = 4;
988 }
989 else
990 {
991 /* FIXME: it's really 3 bytes */
992 mode = HImode;
993 offset = 2;
994 }
995
996 ra_mem =
997 gen_rtx_MEM (mode, plus_constant (Pmode, gen_rtx_REG (Pmode, FP_REGNO),
998 offset));
999 return copy_to_mode_reg (mode, ra_mem);
1000 }
1001
1002 /* Implements INCOMING_RETURN_ADDR_RTX. See comment above. */
1003 rtx
1004 m32c_incoming_return_addr_rtx (void)
1005 {
1006 /* we want [sp] */
1007 return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
1008 }
1009
1010 /* Exception Handling Support */
1011
1012 /* Implements EH_RETURN_DATA_REGNO. Choose registers able to hold
1013 pointers. */
1014 int
1015 m32c_eh_return_data_regno (int n)
1016 {
1017 switch (n)
1018 {
1019 case 0:
1020 return MEM0_REGNO;
1021 case 1:
1022 return MEM0_REGNO+4;
1023 default:
1024 return INVALID_REGNUM;
1025 }
1026 }
1027
1028 /* Implements EH_RETURN_STACKADJ_RTX. Saved and used later in
1029 m32c_emit_eh_epilogue. */
1030 rtx
1031 m32c_eh_return_stackadj_rtx (void)
1032 {
1033 if (!cfun->machine->eh_stack_adjust)
1034 {
1035 rtx sa;
1036
1037 sa = gen_rtx_REG (Pmode, R0_REGNO);
1038 cfun->machine->eh_stack_adjust = sa;
1039 }
1040 return cfun->machine->eh_stack_adjust;
1041 }
1042
1043 /* Registers That Address the Stack Frame */
1044
1045 /* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER. Note that
1046 the original spec called for dwarf numbers to vary with register
1047 width as well, for example, r0l, r0, and r2r0 would each have
1048 different dwarf numbers. GCC doesn't support this, and we don't do
1049 it, and gdb seems to like it this way anyway. */
1050 unsigned int
1051 m32c_dwarf_frame_regnum (int n)
1052 {
1053 switch (n)
1054 {
1055 case R0_REGNO:
1056 return 5;
1057 case R1_REGNO:
1058 return 6;
1059 case R2_REGNO:
1060 return 7;
1061 case R3_REGNO:
1062 return 8;
1063 case A0_REGNO:
1064 return 9;
1065 case A1_REGNO:
1066 return 10;
1067 case FB_REGNO:
1068 return 11;
1069 case SB_REGNO:
1070 return 19;
1071
1072 case SP_REGNO:
1073 return 12;
1074 case PC_REGNO:
1075 return 13;
1076 default:
1077 return DWARF_FRAME_REGISTERS + 1;
1078 }
1079 }
1080
1081 /* The frame looks like this:
1082
1083 ap -> +------------------------------
1084 | Return address (3 or 4 bytes)
1085 | Saved FB (2 or 4 bytes)
1086 fb -> +------------------------------
1087 | local vars
1088 | register saves fb
1089 | through r0 as needed
1090 sp -> +------------------------------
1091 */
1092
1093 /* We use this to wrap all emitted insns in the prologue. */
1094 static rtx
1095 F (rtx x)
1096 {
1097 RTX_FRAME_RELATED_P (x) = 1;
1098 return x;
1099 }
1100
1101 /* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1102 how much the stack pointer moves for each, for each cpu family. */
1103 static struct
1104 {
1105 int reg1;
1106 int bit;
1107 int a16_bytes;
1108 int a24_bytes;
1109 } pushm_info[] =
1110 {
1111 /* These are in reverse push (nearest-to-sp) order. */
1112 { R0_REGNO, 0x80, 2, 2 },
1113 { R1_REGNO, 0x40, 2, 2 },
1114 { R2_REGNO, 0x20, 2, 2 },
1115 { R3_REGNO, 0x10, 2, 2 },
1116 { A0_REGNO, 0x08, 2, 4 },
1117 { A1_REGNO, 0x04, 2, 4 },
1118 { SB_REGNO, 0x02, 2, 4 },
1119 { FB_REGNO, 0x01, 2, 4 }
1120 };
1121
1122 #define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1123
1124 /* Returns TRUE if we need to save/restore the given register. We
1125 save everything for exception handlers, so that any register can be
1126 unwound. For interrupt handlers, we save everything if the handler
1127 calls something else (because we don't know what *that* function
1128 might do), but try to be a bit smarter if the handler is a leaf
1129 function. We always save $a0, though, because we use that in the
1130 epilogue to copy $fb to $sp. */
1131 static int
1132 need_to_save (int regno)
1133 {
1134 if (fixed_regs[regno])
1135 return 0;
1136 if (crtl->calls_eh_return)
1137 return 1;
1138 if (regno == FP_REGNO)
1139 return 0;
1140 if (cfun->machine->is_interrupt
1141 && (!cfun->machine->is_leaf
1142 || (regno == A0_REGNO
1143 && m32c_function_needs_enter ())
1144 ))
1145 return 1;
1146 if (df_regs_ever_live_p (regno)
1147 && (!call_used_regs[regno] || cfun->machine->is_interrupt))
1148 return 1;
1149 return 0;
1150 }
1151
1152 /* This function contains all the intelligence about saving and
1153 restoring registers. It always figures out the register save set.
1154 When called with PP_justcount, it merely returns the size of the
1155 save set (for eliminating the frame pointer, for example). When
1156 called with PP_pushm or PP_popm, it emits the appropriate
1157 instructions for saving (pushm) or restoring (popm) the
1158 registers. */
1159 static int
1160 m32c_pushm_popm (Push_Pop_Type ppt)
1161 {
1162 int reg_mask = 0;
1163 int byte_count = 0, bytes;
1164 int i;
1165 rtx dwarf_set[PUSHM_N];
1166 int n_dwarfs = 0;
1167 int nosave_mask = 0;
1168
1169 if (crtl->return_rtx
1170 && GET_CODE (crtl->return_rtx) == PARALLEL
1171 && !(crtl->calls_eh_return || cfun->machine->is_interrupt))
1172 {
1173 rtx exp = XVECEXP (crtl->return_rtx, 0, 0);
1174 rtx rv = XEXP (exp, 0);
1175 int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1176
1177 if (rv_bytes > 2)
1178 nosave_mask |= 0x20; /* PSI, SI */
1179 else
1180 nosave_mask |= 0xf0; /* DF */
1181 if (rv_bytes > 4)
1182 nosave_mask |= 0x50; /* DI */
1183 }
1184
1185 for (i = 0; i < (int) PUSHM_N; i++)
1186 {
1187 /* Skip if neither register needs saving. */
1188 if (!need_to_save (pushm_info[i].reg1))
1189 continue;
1190
1191 if (pushm_info[i].bit & nosave_mask)
1192 continue;
1193
1194 reg_mask |= pushm_info[i].bit;
1195 bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1196
1197 if (ppt == PP_pushm)
1198 {
1199 machine_mode mode = (bytes == 2) ? HImode : SImode;
1200 rtx addr;
1201
1202 /* Always use stack_pointer_rtx instead of calling
1203 rtx_gen_REG ourselves. Code elsewhere in GCC assumes
1204 that there is a single rtx representing the stack pointer,
1205 namely stack_pointer_rtx, and uses == to recognize it. */
1206 addr = stack_pointer_rtx;
1207
1208 if (byte_count != 0)
1209 addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1210
1211 dwarf_set[n_dwarfs++] =
1212 gen_rtx_SET (VOIDmode,
1213 gen_rtx_MEM (mode, addr),
1214 gen_rtx_REG (mode, pushm_info[i].reg1));
1215 F (dwarf_set[n_dwarfs - 1]);
1216
1217 }
1218 byte_count += bytes;
1219 }
1220
1221 if (cfun->machine->is_interrupt)
1222 {
1223 cfun->machine->intr_pushm = reg_mask & 0xfe;
1224 reg_mask = 0;
1225 byte_count = 0;
1226 }
1227
1228 if (cfun->machine->is_interrupt)
1229 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1230 if (need_to_save (i))
1231 {
1232 byte_count += 2;
1233 cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1234 }
1235
1236 if (ppt == PP_pushm && byte_count)
1237 {
1238 rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1239 rtx pushm;
1240
1241 if (reg_mask)
1242 {
1243 XVECEXP (note, 0, 0)
1244 = gen_rtx_SET (VOIDmode,
1245 stack_pointer_rtx,
1246 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1247 stack_pointer_rtx,
1248 GEN_INT (-byte_count)));
1249 F (XVECEXP (note, 0, 0));
1250
1251 for (i = 0; i < n_dwarfs; i++)
1252 XVECEXP (note, 0, i + 1) = dwarf_set[i];
1253
1254 pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1255
1256 add_reg_note (pushm, REG_FRAME_RELATED_EXPR, note);
1257 }
1258
1259 if (cfun->machine->is_interrupt)
1260 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1261 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1262 {
1263 if (TARGET_A16)
1264 pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1265 else
1266 pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1267 F (pushm);
1268 }
1269 }
1270 if (ppt == PP_popm && byte_count)
1271 {
1272 if (cfun->machine->is_interrupt)
1273 for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1274 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1275 {
1276 if (TARGET_A16)
1277 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
1278 else
1279 emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
1280 }
1281 if (reg_mask)
1282 emit_insn (gen_popm (GEN_INT (reg_mask)));
1283 }
1284
1285 return byte_count;
1286 }
1287
1288 /* Implements INITIAL_ELIMINATION_OFFSET. See the comment above that
1289 diagrams our call frame. */
1290 int
1291 m32c_initial_elimination_offset (int from, int to)
1292 {
1293 int ofs = 0;
1294
1295 if (from == AP_REGNO)
1296 {
1297 if (TARGET_A16)
1298 ofs += 5;
1299 else
1300 ofs += 8;
1301 }
1302
1303 if (to == SP_REGNO)
1304 {
1305 ofs += m32c_pushm_popm (PP_justcount);
1306 ofs += get_frame_size ();
1307 }
1308
1309 /* Account for push rounding. */
1310 if (TARGET_A24)
1311 ofs = (ofs + 1) & ~1;
1312 #if DEBUG0
1313 fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1314 to, ofs);
1315 #endif
1316 return ofs;
1317 }
1318
1319 /* Passing Function Arguments on the Stack */
1320
1321 /* Implements PUSH_ROUNDING. The R8C and M16C have byte stacks, the
1322 M32C has word stacks. */
1323 unsigned int
1324 m32c_push_rounding (int n)
1325 {
1326 if (TARGET_R8C || TARGET_M16C)
1327 return n;
1328 return (n + 1) & ~1;
1329 }
1330
1331 /* Passing Arguments in Registers */
1332
1333 /* Implements TARGET_FUNCTION_ARG. Arguments are passed partly in
1334 registers, partly on stack. If our function returns a struct, a
1335 pointer to a buffer for it is at the top of the stack (last thing
1336 pushed). The first few real arguments may be in registers as
1337 follows:
1338
1339 R8C/M16C: arg1 in r1 if it's QI or HI (else it's pushed on stack)
1340 arg2 in r2 if it's HI (else pushed on stack)
1341 rest on stack
1342 M32C: arg1 in r0 if it's QI or HI (else it's pushed on stack)
1343 rest on stack
1344
1345 Structs are not passed in registers, even if they fit. Only
1346 integer and pointer types are passed in registers.
1347
1348 Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1349 r2 if it fits. */
1350 #undef TARGET_FUNCTION_ARG
1351 #define TARGET_FUNCTION_ARG m32c_function_arg
1352 static rtx
1353 m32c_function_arg (cumulative_args_t ca_v,
1354 machine_mode mode, const_tree type, bool named)
1355 {
1356 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1357
1358 /* Can return a reg, parallel, or 0 for stack */
1359 rtx rv = NULL_RTX;
1360 #if DEBUG0
1361 fprintf (stderr, "func_arg %d (%s, %d)\n",
1362 ca->parm_num, mode_name[mode], named);
1363 debug_tree (type);
1364 #endif
1365
1366 if (mode == VOIDmode)
1367 return GEN_INT (0);
1368
1369 if (ca->force_mem || !named)
1370 {
1371 #if DEBUG0
1372 fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
1373 named);
1374 #endif
1375 return NULL_RTX;
1376 }
1377
1378 if (type && INTEGRAL_TYPE_P (type) && POINTER_TYPE_P (type))
1379 return NULL_RTX;
1380
1381 if (type && AGGREGATE_TYPE_P (type))
1382 return NULL_RTX;
1383
1384 switch (ca->parm_num)
1385 {
1386 case 1:
1387 if (GET_MODE_SIZE (mode) == 1 || GET_MODE_SIZE (mode) == 2)
1388 rv = gen_rtx_REG (mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
1389 break;
1390
1391 case 2:
1392 if (TARGET_A16 && GET_MODE_SIZE (mode) == 2)
1393 rv = gen_rtx_REG (mode, R2_REGNO);
1394 break;
1395 }
1396
1397 #if DEBUG0
1398 debug_rtx (rv);
1399 #endif
1400 return rv;
1401 }
1402
1403 #undef TARGET_PASS_BY_REFERENCE
1404 #define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1405 static bool
1406 m32c_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
1407 machine_mode mode ATTRIBUTE_UNUSED,
1408 const_tree type ATTRIBUTE_UNUSED,
1409 bool named ATTRIBUTE_UNUSED)
1410 {
1411 return 0;
1412 }
1413
1414 /* Implements INIT_CUMULATIVE_ARGS. */
1415 void
1416 m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
1417 tree fntype,
1418 rtx libname ATTRIBUTE_UNUSED,
1419 tree fndecl,
1420 int n_named_args ATTRIBUTE_UNUSED)
1421 {
1422 if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1423 ca->force_mem = 1;
1424 else
1425 ca->force_mem = 0;
1426 ca->parm_num = 1;
1427 }
1428
1429 /* Implements TARGET_FUNCTION_ARG_ADVANCE. force_mem is set for
1430 functions returning structures, so we always reset that. Otherwise,
1431 we only need to know the sequence number of the argument to know what
1432 to do with it. */
1433 #undef TARGET_FUNCTION_ARG_ADVANCE
1434 #define TARGET_FUNCTION_ARG_ADVANCE m32c_function_arg_advance
1435 static void
1436 m32c_function_arg_advance (cumulative_args_t ca_v,
1437 machine_mode mode ATTRIBUTE_UNUSED,
1438 const_tree type ATTRIBUTE_UNUSED,
1439 bool named ATTRIBUTE_UNUSED)
1440 {
1441 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1442
1443 if (ca->force_mem)
1444 ca->force_mem = 0;
1445 else
1446 ca->parm_num++;
1447 }
1448
1449 /* Implements TARGET_FUNCTION_ARG_BOUNDARY. */
1450 #undef TARGET_FUNCTION_ARG_BOUNDARY
1451 #define TARGET_FUNCTION_ARG_BOUNDARY m32c_function_arg_boundary
1452 static unsigned int
1453 m32c_function_arg_boundary (machine_mode mode ATTRIBUTE_UNUSED,
1454 const_tree type ATTRIBUTE_UNUSED)
1455 {
1456 return (TARGET_A16 ? 8 : 16);
1457 }
1458
1459 /* Implements FUNCTION_ARG_REGNO_P. */
1460 int
1461 m32c_function_arg_regno_p (int r)
1462 {
1463 if (TARGET_A24)
1464 return (r == R0_REGNO);
1465 return (r == R1_REGNO || r == R2_REGNO);
1466 }
1467
1468 /* HImode and PSImode are the two "native" modes as far as GCC is
1469 concerned, but the chips also support a 32-bit mode which is used
1470 for some opcodes in R8C/M16C and for reset vectors and such. */
1471 #undef TARGET_VALID_POINTER_MODE
1472 #define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
1473 static bool
1474 m32c_valid_pointer_mode (machine_mode mode)
1475 {
1476 if (mode == HImode
1477 || mode == PSImode
1478 || mode == SImode
1479 )
1480 return 1;
1481 return 0;
1482 }
1483
1484 /* How Scalar Function Values Are Returned */
1485
1486 /* Implements TARGET_LIBCALL_VALUE. Most values are returned in $r0, or some
1487 combination of registers starting there (r2r0 for longs, r3r1r2r0
1488 for long long, r3r2r1r0 for doubles), except that that ABI
1489 currently doesn't work because it ends up using all available
1490 general registers and gcc often can't compile it. So, instead, we
1491 return anything bigger than 16 bits in "mem0" (effectively, a
1492 memory location). */
1493
1494 #undef TARGET_LIBCALL_VALUE
1495 #define TARGET_LIBCALL_VALUE m32c_libcall_value
1496
1497 static rtx
1498 m32c_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
1499 {
1500 /* return reg or parallel */
1501 #if 0
1502 /* FIXME: GCC has difficulty returning large values in registers,
1503 because that ties up most of the general registers and gives the
1504 register allocator little to work with. Until we can resolve
1505 this, large values are returned in memory. */
1506 if (mode == DFmode)
1507 {
1508 rtx rv;
1509
1510 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1511 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1512 gen_rtx_REG (HImode,
1513 R0_REGNO),
1514 GEN_INT (0));
1515 XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1516 gen_rtx_REG (HImode,
1517 R1_REGNO),
1518 GEN_INT (2));
1519 XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1520 gen_rtx_REG (HImode,
1521 R2_REGNO),
1522 GEN_INT (4));
1523 XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1524 gen_rtx_REG (HImode,
1525 R3_REGNO),
1526 GEN_INT (6));
1527 return rv;
1528 }
1529
1530 if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1531 {
1532 rtx rv;
1533
1534 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1535 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1536 gen_rtx_REG (mode,
1537 R0_REGNO),
1538 GEN_INT (0));
1539 return rv;
1540 }
1541 #endif
1542
1543 if (GET_MODE_SIZE (mode) > 2)
1544 return gen_rtx_REG (mode, MEM0_REGNO);
1545 return gen_rtx_REG (mode, R0_REGNO);
1546 }
1547
1548 /* Implements TARGET_FUNCTION_VALUE. Functions and libcalls have the same
1549 conventions. */
1550
1551 #undef TARGET_FUNCTION_VALUE
1552 #define TARGET_FUNCTION_VALUE m32c_function_value
1553
1554 static rtx
1555 m32c_function_value (const_tree valtype,
1556 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1557 bool outgoing ATTRIBUTE_UNUSED)
1558 {
1559 /* return reg or parallel */
1560 const machine_mode mode = TYPE_MODE (valtype);
1561 return m32c_libcall_value (mode, NULL_RTX);
1562 }
1563
1564 /* Implements TARGET_FUNCTION_VALUE_REGNO_P. */
1565
1566 #undef TARGET_FUNCTION_VALUE_REGNO_P
1567 #define TARGET_FUNCTION_VALUE_REGNO_P m32c_function_value_regno_p
1568
1569 static bool
1570 m32c_function_value_regno_p (const unsigned int regno)
1571 {
1572 return (regno == R0_REGNO || regno == MEM0_REGNO);
1573 }
1574
1575 /* How Large Values Are Returned */
1576
1577 /* We return structures by pushing the address on the stack, even if
1578 we use registers for the first few "real" arguments. */
1579 #undef TARGET_STRUCT_VALUE_RTX
1580 #define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1581 static rtx
1582 m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1583 int incoming ATTRIBUTE_UNUSED)
1584 {
1585 return 0;
1586 }
1587
1588 /* Function Entry and Exit */
1589
1590 /* Implements EPILOGUE_USES. Interrupts restore all registers. */
1591 int
1592 m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1593 {
1594 if (cfun->machine->is_interrupt)
1595 return 1;
1596 return 0;
1597 }
1598
1599 /* Implementing the Varargs Macros */
1600
1601 #undef TARGET_STRICT_ARGUMENT_NAMING
1602 #define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1603 static bool
1604 m32c_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
1605 {
1606 return 1;
1607 }
1608
1609 /* Trampolines for Nested Functions */
1610
1611 /*
1612 m16c:
1613 1 0000 75C43412 mov.w #0x1234,a0
1614 2 0004 FC000000 jmp.a label
1615
1616 m32c:
1617 1 0000 BC563412 mov.l:s #0x123456,a0
1618 2 0004 CC000000 jmp.a label
1619 */
1620
1621 /* Implements TRAMPOLINE_SIZE. */
1622 int
1623 m32c_trampoline_size (void)
1624 {
1625 /* Allocate extra space so we can avoid the messy shifts when we
1626 initialize the trampoline; we just write past the end of the
1627 opcode. */
1628 return TARGET_A16 ? 8 : 10;
1629 }
1630
1631 /* Implements TRAMPOLINE_ALIGNMENT. */
1632 int
1633 m32c_trampoline_alignment (void)
1634 {
1635 return 2;
1636 }
1637
1638 /* Implements TARGET_TRAMPOLINE_INIT. */
1639
1640 #undef TARGET_TRAMPOLINE_INIT
1641 #define TARGET_TRAMPOLINE_INIT m32c_trampoline_init
1642 static void
1643 m32c_trampoline_init (rtx m_tramp, tree fndecl, rtx chainval)
1644 {
1645 rtx function = XEXP (DECL_RTL (fndecl), 0);
1646
1647 #define A0(m,i) adjust_address (m_tramp, m, i)
1648 if (TARGET_A16)
1649 {
1650 /* Note: we subtract a "word" because the moves want signed
1651 constants, not unsigned constants. */
1652 emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1653 emit_move_insn (A0 (HImode, 2), chainval);
1654 emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
1655 /* We use 16-bit addresses here, but store the zero to turn it
1656 into a 24-bit offset. */
1657 emit_move_insn (A0 (HImode, 5), function);
1658 emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1659 }
1660 else
1661 {
1662 /* Note that the PSI moves actually write 4 bytes. Make sure we
1663 write stuff out in the right order, and leave room for the
1664 extra byte at the end. */
1665 emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1666 emit_move_insn (A0 (PSImode, 1), chainval);
1667 emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1668 emit_move_insn (A0 (PSImode, 5), function);
1669 }
1670 #undef A0
1671 }
1672
1673 /* Addressing Modes */
1674
1675 /* The r8c/m32c family supports a wide range of non-orthogonal
1676 addressing modes, including the ability to double-indirect on *some*
1677 of them. Not all insns support all modes, either, but we rely on
1678 predicates and constraints to deal with that. */
1679 #undef TARGET_LEGITIMATE_ADDRESS_P
1680 #define TARGET_LEGITIMATE_ADDRESS_P m32c_legitimate_address_p
1681 bool
1682 m32c_legitimate_address_p (machine_mode mode, rtx x, bool strict)
1683 {
1684 int mode_adjust;
1685 if (CONSTANT_P (x))
1686 return 1;
1687
1688 if (TARGET_A16 && GET_MODE (x) != HImode && GET_MODE (x) != SImode)
1689 return 0;
1690 if (TARGET_A24 && GET_MODE (x) != PSImode)
1691 return 0;
1692
1693 /* Wide references to memory will be split after reload, so we must
1694 ensure that all parts of such splits remain legitimate
1695 addresses. */
1696 mode_adjust = GET_MODE_SIZE (mode) - 1;
1697
1698 /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1699 if (GET_CODE (x) == PRE_DEC
1700 || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1701 {
1702 return (GET_CODE (XEXP (x, 0)) == REG
1703 && REGNO (XEXP (x, 0)) == SP_REGNO);
1704 }
1705
1706 #if 0
1707 /* This is the double indirection detection, but it currently
1708 doesn't work as cleanly as this code implies, so until we've had
1709 a chance to debug it, leave it disabled. */
1710 if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1711 {
1712 #if DEBUG_DOUBLE
1713 fprintf (stderr, "double indirect\n");
1714 #endif
1715 x = XEXP (x, 0);
1716 }
1717 #endif
1718
1719 encode_pattern (x);
1720 if (RTX_IS ("r"))
1721 {
1722 /* Most indexable registers can be used without displacements,
1723 although some of them will be emitted with an explicit zero
1724 to please the assembler. */
1725 switch (REGNO (patternr[0]))
1726 {
1727 case A1_REGNO:
1728 case SB_REGNO:
1729 case FB_REGNO:
1730 case SP_REGNO:
1731 if (TARGET_A16 && GET_MODE (x) == SImode)
1732 return 0;
1733 case A0_REGNO:
1734 return 1;
1735
1736 default:
1737 if (IS_PSEUDO (patternr[0], strict))
1738 return 1;
1739 return 0;
1740 }
1741 }
1742
1743 if (TARGET_A16 && GET_MODE (x) == SImode)
1744 return 0;
1745
1746 if (RTX_IS ("+ri"))
1747 {
1748 /* This is more interesting, because different base registers
1749 allow for different displacements - both range and signedness
1750 - and it differs from chip series to chip series too. */
1751 int rn = REGNO (patternr[1]);
1752 HOST_WIDE_INT offs = INTVAL (patternr[2]);
1753 switch (rn)
1754 {
1755 case A0_REGNO:
1756 case A1_REGNO:
1757 case SB_REGNO:
1758 /* The syntax only allows positive offsets, but when the
1759 offsets span the entire memory range, we can simulate
1760 negative offsets by wrapping. */
1761 if (TARGET_A16)
1762 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1763 if (rn == SB_REGNO)
1764 return (offs >= 0 && offs <= 65535 - mode_adjust);
1765 /* A0 or A1 */
1766 return (offs >= -16777216 && offs <= 16777215);
1767
1768 case FB_REGNO:
1769 if (TARGET_A16)
1770 return (offs >= -128 && offs <= 127 - mode_adjust);
1771 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1772
1773 case SP_REGNO:
1774 return (offs >= -128 && offs <= 127 - mode_adjust);
1775
1776 default:
1777 if (IS_PSEUDO (patternr[1], strict))
1778 return 1;
1779 return 0;
1780 }
1781 }
1782 if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1783 {
1784 rtx reg = patternr[1];
1785
1786 /* We don't know where the symbol is, so only allow base
1787 registers which support displacements spanning the whole
1788 address range. */
1789 switch (REGNO (reg))
1790 {
1791 case A0_REGNO:
1792 case A1_REGNO:
1793 /* $sb needs a secondary reload, but since it's involved in
1794 memory address reloads too, we don't deal with it very
1795 well. */
1796 /* case SB_REGNO: */
1797 return 1;
1798 default:
1799 if (GET_CODE (reg) == SUBREG)
1800 return 0;
1801 if (IS_PSEUDO (reg, strict))
1802 return 1;
1803 return 0;
1804 }
1805 }
1806 return 0;
1807 }
1808
1809 /* Implements REG_OK_FOR_BASE_P. */
1810 int
1811 m32c_reg_ok_for_base_p (rtx x, int strict)
1812 {
1813 if (GET_CODE (x) != REG)
1814 return 0;
1815 switch (REGNO (x))
1816 {
1817 case A0_REGNO:
1818 case A1_REGNO:
1819 case SB_REGNO:
1820 case FB_REGNO:
1821 case SP_REGNO:
1822 return 1;
1823 default:
1824 if (IS_PSEUDO (x, strict))
1825 return 1;
1826 return 0;
1827 }
1828 }
1829
1830 /* We have three choices for choosing fb->aN offsets. If we choose -128,
1831 we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
1832 like this:
1833 EB 4B FF mova -128[$fb],$a0
1834 D8 0C FF FF mov.w:Q #0,-1[$a0]
1835
1836 Alternately, we subtract the frame size, and hopefully use 8-bit aN
1837 displacements:
1838 7B F4 stc $fb,$a0
1839 77 54 00 01 sub #256,$a0
1840 D8 08 01 mov.w:Q #0,1[$a0]
1841
1842 If we don't offset (i.e. offset by zero), we end up with:
1843 7B F4 stc $fb,$a0
1844 D8 0C 00 FF mov.w:Q #0,-256[$a0]
1845
1846 We have to subtract *something* so that we have a PLUS rtx to mark
1847 that we've done this reload. The -128 offset will never result in
1848 an 8-bit aN offset, and the payoff for the second case is five
1849 loads *if* those loads are within 256 bytes of the other end of the
1850 frame, so the third case seems best. Note that we subtract the
1851 zero, but detect that in the addhi3 pattern. */
1852
1853 #define BIG_FB_ADJ 0
1854
1855 /* Implements LEGITIMIZE_ADDRESS. The only address we really have to
1856 worry about is frame base offsets, as $fb has a limited
1857 displacement range. We deal with this by attempting to reload $fb
1858 itself into an address register; that seems to result in the best
1859 code. */
1860 #undef TARGET_LEGITIMIZE_ADDRESS
1861 #define TARGET_LEGITIMIZE_ADDRESS m32c_legitimize_address
1862 static rtx
1863 m32c_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1864 machine_mode mode)
1865 {
1866 #if DEBUG0
1867 fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
1868 debug_rtx (x);
1869 fprintf (stderr, "\n");
1870 #endif
1871
1872 if (GET_CODE (x) == PLUS
1873 && GET_CODE (XEXP (x, 0)) == REG
1874 && REGNO (XEXP (x, 0)) == FB_REGNO
1875 && GET_CODE (XEXP (x, 1)) == CONST_INT
1876 && (INTVAL (XEXP (x, 1)) < -128
1877 || INTVAL (XEXP (x, 1)) > (128 - GET_MODE_SIZE (mode))))
1878 {
1879 /* reload FB to A_REGS */
1880 rtx temp = gen_reg_rtx (Pmode);
1881 x = copy_rtx (x);
1882 emit_insn (gen_rtx_SET (VOIDmode, temp, XEXP (x, 0)));
1883 XEXP (x, 0) = temp;
1884 }
1885
1886 return x;
1887 }
1888
1889 /* Implements LEGITIMIZE_RELOAD_ADDRESS. See comment above. */
1890 int
1891 m32c_legitimize_reload_address (rtx * x,
1892 machine_mode mode,
1893 int opnum,
1894 int type, int ind_levels ATTRIBUTE_UNUSED)
1895 {
1896 #if DEBUG0
1897 fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
1898 mode_name[mode]);
1899 debug_rtx (*x);
1900 #endif
1901
1902 /* At one point, this function tried to get $fb copied to an address
1903 register, which in theory would maximize sharing, but gcc was
1904 *also* still trying to reload the whole address, and we'd run out
1905 of address registers. So we let gcc do the naive (but safe)
1906 reload instead, when the above function doesn't handle it for
1907 us.
1908
1909 The code below is a second attempt at the above. */
1910
1911 if (GET_CODE (*x) == PLUS
1912 && GET_CODE (XEXP (*x, 0)) == REG
1913 && REGNO (XEXP (*x, 0)) == FB_REGNO
1914 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1915 && (INTVAL (XEXP (*x, 1)) < -128
1916 || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
1917 {
1918 rtx sum;
1919 int offset = INTVAL (XEXP (*x, 1));
1920 int adjustment = -BIG_FB_ADJ;
1921
1922 sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
1923 GEN_INT (adjustment));
1924 *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
1925 if (type == RELOAD_OTHER)
1926 type = RELOAD_FOR_OTHER_ADDRESS;
1927 push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
1928 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
1929 (enum reload_type) type);
1930 return 1;
1931 }
1932
1933 if (GET_CODE (*x) == PLUS
1934 && GET_CODE (XEXP (*x, 0)) == PLUS
1935 && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
1936 && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
1937 && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
1938 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1939 )
1940 {
1941 if (type == RELOAD_OTHER)
1942 type = RELOAD_FOR_OTHER_ADDRESS;
1943 push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
1944 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
1945 (enum reload_type) type);
1946 return 1;
1947 }
1948
1949 return 0;
1950 }
1951
1952 /* Return the appropriate mode for a named address pointer. */
1953 #undef TARGET_ADDR_SPACE_POINTER_MODE
1954 #define TARGET_ADDR_SPACE_POINTER_MODE m32c_addr_space_pointer_mode
1955 static machine_mode
1956 m32c_addr_space_pointer_mode (addr_space_t addrspace)
1957 {
1958 switch (addrspace)
1959 {
1960 case ADDR_SPACE_GENERIC:
1961 return TARGET_A24 ? PSImode : HImode;
1962 case ADDR_SPACE_FAR:
1963 return SImode;
1964 default:
1965 gcc_unreachable ();
1966 }
1967 }
1968
1969 /* Return the appropriate mode for a named address address. */
1970 #undef TARGET_ADDR_SPACE_ADDRESS_MODE
1971 #define TARGET_ADDR_SPACE_ADDRESS_MODE m32c_addr_space_address_mode
1972 static machine_mode
1973 m32c_addr_space_address_mode (addr_space_t addrspace)
1974 {
1975 switch (addrspace)
1976 {
1977 case ADDR_SPACE_GENERIC:
1978 return TARGET_A24 ? PSImode : HImode;
1979 case ADDR_SPACE_FAR:
1980 return SImode;
1981 default:
1982 gcc_unreachable ();
1983 }
1984 }
1985
1986 /* Like m32c_legitimate_address_p, except with named addresses. */
1987 #undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
1988 #define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P \
1989 m32c_addr_space_legitimate_address_p
1990 static bool
1991 m32c_addr_space_legitimate_address_p (machine_mode mode, rtx x,
1992 bool strict, addr_space_t as)
1993 {
1994 if (as == ADDR_SPACE_FAR)
1995 {
1996 if (TARGET_A24)
1997 return 0;
1998 encode_pattern (x);
1999 if (RTX_IS ("r"))
2000 {
2001 if (GET_MODE (x) != SImode)
2002 return 0;
2003 switch (REGNO (patternr[0]))
2004 {
2005 case A0_REGNO:
2006 return 1;
2007
2008 default:
2009 if (IS_PSEUDO (patternr[0], strict))
2010 return 1;
2011 return 0;
2012 }
2013 }
2014 if (RTX_IS ("+^Sri"))
2015 {
2016 int rn = REGNO (patternr[3]);
2017 HOST_WIDE_INT offs = INTVAL (patternr[4]);
2018 if (GET_MODE (patternr[3]) != HImode)
2019 return 0;
2020 switch (rn)
2021 {
2022 case A0_REGNO:
2023 return (offs >= 0 && offs <= 0xfffff);
2024
2025 default:
2026 if (IS_PSEUDO (patternr[3], strict))
2027 return 1;
2028 return 0;
2029 }
2030 }
2031 if (RTX_IS ("+^Srs"))
2032 {
2033 int rn = REGNO (patternr[3]);
2034 if (GET_MODE (patternr[3]) != HImode)
2035 return 0;
2036 switch (rn)
2037 {
2038 case A0_REGNO:
2039 return 1;
2040
2041 default:
2042 if (IS_PSEUDO (patternr[3], strict))
2043 return 1;
2044 return 0;
2045 }
2046 }
2047 if (RTX_IS ("+^S+ris"))
2048 {
2049 int rn = REGNO (patternr[4]);
2050 if (GET_MODE (patternr[4]) != HImode)
2051 return 0;
2052 switch (rn)
2053 {
2054 case A0_REGNO:
2055 return 1;
2056
2057 default:
2058 if (IS_PSEUDO (patternr[4], strict))
2059 return 1;
2060 return 0;
2061 }
2062 }
2063 if (RTX_IS ("s"))
2064 {
2065 return 1;
2066 }
2067 return 0;
2068 }
2069
2070 else if (as != ADDR_SPACE_GENERIC)
2071 gcc_unreachable ();
2072
2073 return m32c_legitimate_address_p (mode, x, strict);
2074 }
2075
2076 /* Like m32c_legitimate_address, except with named address support. */
2077 #undef TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS
2078 #define TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS m32c_addr_space_legitimize_address
2079 static rtx
2080 m32c_addr_space_legitimize_address (rtx x, rtx oldx, machine_mode mode,
2081 addr_space_t as)
2082 {
2083 if (as != ADDR_SPACE_GENERIC)
2084 {
2085 #if DEBUG0
2086 fprintf (stderr, "\033[36mm32c_addr_space_legitimize_address for mode %s\033[0m\n", mode_name[mode]);
2087 debug_rtx (x);
2088 fprintf (stderr, "\n");
2089 #endif
2090
2091 if (GET_CODE (x) != REG)
2092 {
2093 x = force_reg (SImode, x);
2094 }
2095 return x;
2096 }
2097
2098 return m32c_legitimize_address (x, oldx, mode);
2099 }
2100
2101 /* Determine if one named address space is a subset of another. */
2102 #undef TARGET_ADDR_SPACE_SUBSET_P
2103 #define TARGET_ADDR_SPACE_SUBSET_P m32c_addr_space_subset_p
2104 static bool
2105 m32c_addr_space_subset_p (addr_space_t subset, addr_space_t superset)
2106 {
2107 gcc_assert (subset == ADDR_SPACE_GENERIC || subset == ADDR_SPACE_FAR);
2108 gcc_assert (superset == ADDR_SPACE_GENERIC || superset == ADDR_SPACE_FAR);
2109
2110 if (subset == superset)
2111 return true;
2112
2113 else
2114 return (subset == ADDR_SPACE_GENERIC && superset == ADDR_SPACE_FAR);
2115 }
2116
2117 #undef TARGET_ADDR_SPACE_CONVERT
2118 #define TARGET_ADDR_SPACE_CONVERT m32c_addr_space_convert
2119 /* Convert from one address space to another. */
2120 static rtx
2121 m32c_addr_space_convert (rtx op, tree from_type, tree to_type)
2122 {
2123 addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (from_type));
2124 addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (to_type));
2125 rtx result;
2126
2127 gcc_assert (from_as == ADDR_SPACE_GENERIC || from_as == ADDR_SPACE_FAR);
2128 gcc_assert (to_as == ADDR_SPACE_GENERIC || to_as == ADDR_SPACE_FAR);
2129
2130 if (to_as == ADDR_SPACE_GENERIC && from_as == ADDR_SPACE_FAR)
2131 {
2132 /* This is unpredictable, as we're truncating off usable address
2133 bits. */
2134
2135 result = gen_reg_rtx (HImode);
2136 emit_move_insn (result, simplify_subreg (HImode, op, SImode, 0));
2137 return result;
2138 }
2139 else if (to_as == ADDR_SPACE_FAR && from_as == ADDR_SPACE_GENERIC)
2140 {
2141 /* This always works. */
2142 result = gen_reg_rtx (SImode);
2143 emit_insn (gen_zero_extendhisi2 (result, op));
2144 return result;
2145 }
2146 else
2147 gcc_unreachable ();
2148 }
2149
2150 /* Condition Code Status */
2151
2152 #undef TARGET_FIXED_CONDITION_CODE_REGS
2153 #define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2154 static bool
2155 m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2156 {
2157 *p1 = FLG_REGNO;
2158 *p2 = INVALID_REGNUM;
2159 return true;
2160 }
2161
2162 /* Describing Relative Costs of Operations */
2163
2164 /* Implements TARGET_REGISTER_MOVE_COST. We make impossible moves
2165 prohibitively expensive, like trying to put QIs in r2/r3 (there are
2166 no opcodes to do that). We also discourage use of mem* registers
2167 since they're really memory. */
2168
2169 #undef TARGET_REGISTER_MOVE_COST
2170 #define TARGET_REGISTER_MOVE_COST m32c_register_move_cost
2171
2172 static int
2173 m32c_register_move_cost (machine_mode mode, reg_class_t from,
2174 reg_class_t to)
2175 {
2176 int cost = COSTS_N_INSNS (3);
2177 HARD_REG_SET cc;
2178
2179 /* FIXME: pick real values, but not 2 for now. */
2180 COPY_HARD_REG_SET (cc, reg_class_contents[(int) from]);
2181 IOR_HARD_REG_SET (cc, reg_class_contents[(int) to]);
2182
2183 if (mode == QImode
2184 && hard_reg_set_intersect_p (cc, reg_class_contents[R23_REGS]))
2185 {
2186 if (hard_reg_set_subset_p (cc, reg_class_contents[R23_REGS]))
2187 cost = COSTS_N_INSNS (1000);
2188 else
2189 cost = COSTS_N_INSNS (80);
2190 }
2191
2192 if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2193 cost = COSTS_N_INSNS (1000);
2194
2195 if (reg_classes_intersect_p (from, CR_REGS))
2196 cost += COSTS_N_INSNS (5);
2197
2198 if (reg_classes_intersect_p (to, CR_REGS))
2199 cost += COSTS_N_INSNS (5);
2200
2201 if (from == MEM_REGS || to == MEM_REGS)
2202 cost += COSTS_N_INSNS (50);
2203 else if (reg_classes_intersect_p (from, MEM_REGS)
2204 || reg_classes_intersect_p (to, MEM_REGS))
2205 cost += COSTS_N_INSNS (10);
2206
2207 #if DEBUG0
2208 fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
2209 mode_name[mode], class_names[(int) from], class_names[(int) to],
2210 cost);
2211 #endif
2212 return cost;
2213 }
2214
2215 /* Implements TARGET_MEMORY_MOVE_COST. */
2216
2217 #undef TARGET_MEMORY_MOVE_COST
2218 #define TARGET_MEMORY_MOVE_COST m32c_memory_move_cost
2219
2220 static int
2221 m32c_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
2222 reg_class_t rclass ATTRIBUTE_UNUSED,
2223 bool in ATTRIBUTE_UNUSED)
2224 {
2225 /* FIXME: pick real values. */
2226 return COSTS_N_INSNS (10);
2227 }
2228
2229 /* Here we try to describe when we use multiple opcodes for one RTX so
2230 that gcc knows when to use them. */
2231 #undef TARGET_RTX_COSTS
2232 #define TARGET_RTX_COSTS m32c_rtx_costs
2233 static bool
2234 m32c_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2235 int *total, bool speed ATTRIBUTE_UNUSED)
2236 {
2237 switch (code)
2238 {
2239 case REG:
2240 if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2241 *total += COSTS_N_INSNS (500);
2242 else
2243 *total += COSTS_N_INSNS (1);
2244 return true;
2245
2246 case ASHIFT:
2247 case LSHIFTRT:
2248 case ASHIFTRT:
2249 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2250 {
2251 /* mov.b r1l, r1h */
2252 *total += COSTS_N_INSNS (1);
2253 return true;
2254 }
2255 if (INTVAL (XEXP (x, 1)) > 8
2256 || INTVAL (XEXP (x, 1)) < -8)
2257 {
2258 /* mov.b #N, r1l */
2259 /* mov.b r1l, r1h */
2260 *total += COSTS_N_INSNS (2);
2261 return true;
2262 }
2263 return true;
2264
2265 case LE:
2266 case LEU:
2267 case LT:
2268 case LTU:
2269 case GT:
2270 case GTU:
2271 case GE:
2272 case GEU:
2273 case NE:
2274 case EQ:
2275 if (outer_code == SET)
2276 {
2277 *total += COSTS_N_INSNS (2);
2278 return true;
2279 }
2280 break;
2281
2282 case ZERO_EXTRACT:
2283 {
2284 rtx dest = XEXP (x, 0);
2285 rtx addr = XEXP (dest, 0);
2286 switch (GET_CODE (addr))
2287 {
2288 case CONST_INT:
2289 *total += COSTS_N_INSNS (1);
2290 break;
2291 case SYMBOL_REF:
2292 *total += COSTS_N_INSNS (3);
2293 break;
2294 default:
2295 *total += COSTS_N_INSNS (2);
2296 break;
2297 }
2298 return true;
2299 }
2300 break;
2301
2302 default:
2303 /* Reasonable default. */
2304 if (TARGET_A16 && GET_MODE(x) == SImode)
2305 *total += COSTS_N_INSNS (2);
2306 break;
2307 }
2308 return false;
2309 }
2310
2311 #undef TARGET_ADDRESS_COST
2312 #define TARGET_ADDRESS_COST m32c_address_cost
2313 static int
2314 m32c_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
2315 addr_space_t as ATTRIBUTE_UNUSED,
2316 bool speed ATTRIBUTE_UNUSED)
2317 {
2318 int i;
2319 /* fprintf(stderr, "\naddress_cost\n");
2320 debug_rtx(addr);*/
2321 switch (GET_CODE (addr))
2322 {
2323 case CONST_INT:
2324 i = INTVAL (addr);
2325 if (i == 0)
2326 return COSTS_N_INSNS(1);
2327 if (0 < i && i <= 255)
2328 return COSTS_N_INSNS(2);
2329 if (0 < i && i <= 65535)
2330 return COSTS_N_INSNS(3);
2331 return COSTS_N_INSNS(4);
2332 case SYMBOL_REF:
2333 return COSTS_N_INSNS(4);
2334 case REG:
2335 return COSTS_N_INSNS(1);
2336 case PLUS:
2337 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
2338 {
2339 i = INTVAL (XEXP (addr, 1));
2340 if (i == 0)
2341 return COSTS_N_INSNS(1);
2342 if (0 < i && i <= 255)
2343 return COSTS_N_INSNS(2);
2344 if (0 < i && i <= 65535)
2345 return COSTS_N_INSNS(3);
2346 }
2347 return COSTS_N_INSNS(4);
2348 default:
2349 return 0;
2350 }
2351 }
2352
2353 /* Defining the Output Assembler Language */
2354
2355 /* Output of Data */
2356
2357 /* We may have 24 bit sizes, which is the native address size.
2358 Currently unused, but provided for completeness. */
2359 #undef TARGET_ASM_INTEGER
2360 #define TARGET_ASM_INTEGER m32c_asm_integer
2361 static bool
2362 m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2363 {
2364 switch (size)
2365 {
2366 case 3:
2367 fprintf (asm_out_file, "\t.3byte\t");
2368 output_addr_const (asm_out_file, x);
2369 fputc ('\n', asm_out_file);
2370 return true;
2371 case 4:
2372 if (GET_CODE (x) == SYMBOL_REF)
2373 {
2374 fprintf (asm_out_file, "\t.long\t");
2375 output_addr_const (asm_out_file, x);
2376 fputc ('\n', asm_out_file);
2377 return true;
2378 }
2379 break;
2380 }
2381 return default_assemble_integer (x, size, aligned_p);
2382 }
2383
2384 /* Output of Assembler Instructions */
2385
2386 /* We use a lookup table because the addressing modes are non-orthogonal. */
2387
2388 static struct
2389 {
2390 char code;
2391 char const *pattern;
2392 char const *format;
2393 }
2394 const conversions[] = {
2395 { 0, "r", "0" },
2396
2397 { 0, "mr", "z[1]" },
2398 { 0, "m+ri", "3[2]" },
2399 { 0, "m+rs", "3[2]" },
2400 { 0, "m+^Zrs", "5[4]" },
2401 { 0, "m+^Zri", "5[4]" },
2402 { 0, "m+^Z+ris", "7+6[5]" },
2403 { 0, "m+^Srs", "5[4]" },
2404 { 0, "m+^Sri", "5[4]" },
2405 { 0, "m+^S+ris", "7+6[5]" },
2406 { 0, "m+r+si", "4+5[2]" },
2407 { 0, "ms", "1" },
2408 { 0, "mi", "1" },
2409 { 0, "m+si", "2+3" },
2410
2411 { 0, "mmr", "[z[2]]" },
2412 { 0, "mm+ri", "[4[3]]" },
2413 { 0, "mm+rs", "[4[3]]" },
2414 { 0, "mm+r+si", "[5+6[3]]" },
2415 { 0, "mms", "[[2]]" },
2416 { 0, "mmi", "[[2]]" },
2417 { 0, "mm+si", "[4[3]]" },
2418
2419 { 0, "i", "#0" },
2420 { 0, "s", "#0" },
2421 { 0, "+si", "#1+2" },
2422 { 0, "l", "#0" },
2423
2424 { 'l', "l", "0" },
2425 { 'd', "i", "0" },
2426 { 'd', "s", "0" },
2427 { 'd', "+si", "1+2" },
2428 { 'D', "i", "0" },
2429 { 'D', "s", "0" },
2430 { 'D', "+si", "1+2" },
2431 { 'x', "i", "#0" },
2432 { 'X', "i", "#0" },
2433 { 'm', "i", "#0" },
2434 { 'b', "i", "#0" },
2435 { 'B', "i", "0" },
2436 { 'p', "i", "0" },
2437
2438 { 0, 0, 0 }
2439 };
2440
2441 /* This is in order according to the bitfield that pushm/popm use. */
2442 static char const *pushm_regs[] = {
2443 "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2444 };
2445
2446 /* Implements TARGET_PRINT_OPERAND. */
2447
2448 #undef TARGET_PRINT_OPERAND
2449 #define TARGET_PRINT_OPERAND m32c_print_operand
2450
2451 static void
2452 m32c_print_operand (FILE * file, rtx x, int code)
2453 {
2454 int i, j, b;
2455 const char *comma;
2456 HOST_WIDE_INT ival;
2457 int unsigned_const = 0;
2458 int force_sign;
2459
2460 /* Multiplies; constants are converted to sign-extended format but
2461 we need unsigned, so 'u' and 'U' tell us what size unsigned we
2462 need. */
2463 if (code == 'u')
2464 {
2465 unsigned_const = 2;
2466 code = 0;
2467 }
2468 if (code == 'U')
2469 {
2470 unsigned_const = 1;
2471 code = 0;
2472 }
2473 /* This one is only for debugging; you can put it in a pattern to
2474 force this error. */
2475 if (code == '!')
2476 {
2477 fprintf (stderr, "dj: unreviewed pattern:");
2478 if (current_output_insn)
2479 debug_rtx (current_output_insn);
2480 gcc_unreachable ();
2481 }
2482 /* PSImode operations are either .w or .l depending on the target. */
2483 if (code == '&')
2484 {
2485 if (TARGET_A16)
2486 fprintf (file, "w");
2487 else
2488 fprintf (file, "l");
2489 return;
2490 }
2491 /* Inverted conditionals. */
2492 if (code == 'C')
2493 {
2494 switch (GET_CODE (x))
2495 {
2496 case LE:
2497 fputs ("gt", file);
2498 break;
2499 case LEU:
2500 fputs ("gtu", file);
2501 break;
2502 case LT:
2503 fputs ("ge", file);
2504 break;
2505 case LTU:
2506 fputs ("geu", file);
2507 break;
2508 case GT:
2509 fputs ("le", file);
2510 break;
2511 case GTU:
2512 fputs ("leu", file);
2513 break;
2514 case GE:
2515 fputs ("lt", file);
2516 break;
2517 case GEU:
2518 fputs ("ltu", file);
2519 break;
2520 case NE:
2521 fputs ("eq", file);
2522 break;
2523 case EQ:
2524 fputs ("ne", file);
2525 break;
2526 default:
2527 gcc_unreachable ();
2528 }
2529 return;
2530 }
2531 /* Regular conditionals. */
2532 if (code == 'c')
2533 {
2534 switch (GET_CODE (x))
2535 {
2536 case LE:
2537 fputs ("le", file);
2538 break;
2539 case LEU:
2540 fputs ("leu", file);
2541 break;
2542 case LT:
2543 fputs ("lt", file);
2544 break;
2545 case LTU:
2546 fputs ("ltu", file);
2547 break;
2548 case GT:
2549 fputs ("gt", file);
2550 break;
2551 case GTU:
2552 fputs ("gtu", file);
2553 break;
2554 case GE:
2555 fputs ("ge", file);
2556 break;
2557 case GEU:
2558 fputs ("geu", file);
2559 break;
2560 case NE:
2561 fputs ("ne", file);
2562 break;
2563 case EQ:
2564 fputs ("eq", file);
2565 break;
2566 default:
2567 gcc_unreachable ();
2568 }
2569 return;
2570 }
2571 /* Used in negsi2 to do HImode ops on the two parts of an SImode
2572 operand. */
2573 if (code == 'h' && GET_MODE (x) == SImode)
2574 {
2575 x = m32c_subreg (HImode, x, SImode, 0);
2576 code = 0;
2577 }
2578 if (code == 'H' && GET_MODE (x) == SImode)
2579 {
2580 x = m32c_subreg (HImode, x, SImode, 2);
2581 code = 0;
2582 }
2583 if (code == 'h' && GET_MODE (x) == HImode)
2584 {
2585 x = m32c_subreg (QImode, x, HImode, 0);
2586 code = 0;
2587 }
2588 if (code == 'H' && GET_MODE (x) == HImode)
2589 {
2590 /* We can't actually represent this as an rtx. Do it here. */
2591 if (GET_CODE (x) == REG)
2592 {
2593 switch (REGNO (x))
2594 {
2595 case R0_REGNO:
2596 fputs ("r0h", file);
2597 return;
2598 case R1_REGNO:
2599 fputs ("r1h", file);
2600 return;
2601 default:
2602 gcc_unreachable();
2603 }
2604 }
2605 /* This should be a MEM. */
2606 x = m32c_subreg (QImode, x, HImode, 1);
2607 code = 0;
2608 }
2609 /* This is for BMcond, which always wants word register names. */
2610 if (code == 'h' && GET_MODE (x) == QImode)
2611 {
2612 if (GET_CODE (x) == REG)
2613 x = gen_rtx_REG (HImode, REGNO (x));
2614 code = 0;
2615 }
2616 /* 'x' and 'X' need to be ignored for non-immediates. */
2617 if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2618 code = 0;
2619
2620 encode_pattern (x);
2621 force_sign = 0;
2622 for (i = 0; conversions[i].pattern; i++)
2623 if (conversions[i].code == code
2624 && streq (conversions[i].pattern, pattern))
2625 {
2626 for (j = 0; conversions[i].format[j]; j++)
2627 /* backslash quotes the next character in the output pattern. */
2628 if (conversions[i].format[j] == '\\')
2629 {
2630 fputc (conversions[i].format[j + 1], file);
2631 j++;
2632 }
2633 /* Digits in the output pattern indicate that the
2634 corresponding RTX is to be output at that point. */
2635 else if (ISDIGIT (conversions[i].format[j]))
2636 {
2637 rtx r = patternr[conversions[i].format[j] - '0'];
2638 switch (GET_CODE (r))
2639 {
2640 case REG:
2641 fprintf (file, "%s",
2642 reg_name_with_mode (REGNO (r), GET_MODE (r)));
2643 break;
2644 case CONST_INT:
2645 switch (code)
2646 {
2647 case 'b':
2648 case 'B':
2649 {
2650 int v = INTVAL (r);
2651 int i = (int) exact_log2 (v);
2652 if (i == -1)
2653 i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2654 if (i == -1)
2655 i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2656 /* Bit position. */
2657 fprintf (file, "%d", i);
2658 }
2659 break;
2660 case 'x':
2661 /* Unsigned byte. */
2662 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2663 INTVAL (r) & 0xff);
2664 break;
2665 case 'X':
2666 /* Unsigned word. */
2667 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2668 INTVAL (r) & 0xffff);
2669 break;
2670 case 'p':
2671 /* pushm and popm encode a register set into a single byte. */
2672 comma = "";
2673 for (b = 7; b >= 0; b--)
2674 if (INTVAL (r) & (1 << b))
2675 {
2676 fprintf (file, "%s%s", comma, pushm_regs[b]);
2677 comma = ",";
2678 }
2679 break;
2680 case 'm':
2681 /* "Minus". Output -X */
2682 ival = (-INTVAL (r) & 0xffff);
2683 if (ival & 0x8000)
2684 ival = ival - 0x10000;
2685 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2686 break;
2687 default:
2688 ival = INTVAL (r);
2689 if (conversions[i].format[j + 1] == '[' && ival < 0)
2690 {
2691 /* We can simulate negative displacements by
2692 taking advantage of address space
2693 wrapping when the offset can span the
2694 entire address range. */
2695 rtx base =
2696 patternr[conversions[i].format[j + 2] - '0'];
2697 if (GET_CODE (base) == REG)
2698 switch (REGNO (base))
2699 {
2700 case A0_REGNO:
2701 case A1_REGNO:
2702 if (TARGET_A24)
2703 ival = 0x1000000 + ival;
2704 else
2705 ival = 0x10000 + ival;
2706 break;
2707 case SB_REGNO:
2708 if (TARGET_A16)
2709 ival = 0x10000 + ival;
2710 break;
2711 }
2712 }
2713 else if (code == 'd' && ival < 0 && j == 0)
2714 /* The "mova" opcode is used to do addition by
2715 computing displacements, but again, we need
2716 displacements to be unsigned *if* they're
2717 the only component of the displacement
2718 (i.e. no "symbol-4" type displacement). */
2719 ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2720
2721 if (conversions[i].format[j] == '0')
2722 {
2723 /* More conversions to unsigned. */
2724 if (unsigned_const == 2)
2725 ival &= 0xffff;
2726 if (unsigned_const == 1)
2727 ival &= 0xff;
2728 }
2729 if (streq (conversions[i].pattern, "mi")
2730 || streq (conversions[i].pattern, "mmi"))
2731 {
2732 /* Integers used as addresses are unsigned. */
2733 ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2734 }
2735 if (force_sign && ival >= 0)
2736 fputc ('+', file);
2737 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2738 break;
2739 }
2740 break;
2741 case CONST_DOUBLE:
2742 /* We don't have const_double constants. If it
2743 happens, make it obvious. */
2744 fprintf (file, "[const_double 0x%lx]",
2745 (unsigned long) CONST_DOUBLE_HIGH (r));
2746 break;
2747 case SYMBOL_REF:
2748 assemble_name (file, XSTR (r, 0));
2749 break;
2750 case LABEL_REF:
2751 output_asm_label (r);
2752 break;
2753 default:
2754 fprintf (stderr, "don't know how to print this operand:");
2755 debug_rtx (r);
2756 gcc_unreachable ();
2757 }
2758 }
2759 else
2760 {
2761 if (conversions[i].format[j] == 'z')
2762 {
2763 /* Some addressing modes *must* have a displacement,
2764 so insert a zero here if needed. */
2765 int k;
2766 for (k = j + 1; conversions[i].format[k]; k++)
2767 if (ISDIGIT (conversions[i].format[k]))
2768 {
2769 rtx reg = patternr[conversions[i].format[k] - '0'];
2770 if (GET_CODE (reg) == REG
2771 && (REGNO (reg) == SB_REGNO
2772 || REGNO (reg) == FB_REGNO
2773 || REGNO (reg) == SP_REGNO))
2774 fputc ('0', file);
2775 }
2776 continue;
2777 }
2778 /* Signed displacements off symbols need to have signs
2779 blended cleanly. */
2780 if (conversions[i].format[j] == '+'
2781 && (!code || code == 'D' || code == 'd')
2782 && ISDIGIT (conversions[i].format[j + 1])
2783 && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2784 == CONST_INT))
2785 {
2786 force_sign = 1;
2787 continue;
2788 }
2789 fputc (conversions[i].format[j], file);
2790 }
2791 break;
2792 }
2793 if (!conversions[i].pattern)
2794 {
2795 fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
2796 pattern);
2797 debug_rtx (x);
2798 fprintf (file, "[%c.%s]", code ? code : '-', pattern);
2799 }
2800
2801 return;
2802 }
2803
2804 /* Implements TARGET_PRINT_OPERAND_PUNCT_VALID_P.
2805
2806 See m32c_print_operand above for descriptions of what these do. */
2807
2808 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
2809 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P m32c_print_operand_punct_valid_p
2810
2811 static bool
2812 m32c_print_operand_punct_valid_p (unsigned char c)
2813 {
2814 if (c == '&' || c == '!')
2815 return true;
2816
2817 return false;
2818 }
2819
2820 /* Implements TARGET_PRINT_OPERAND_ADDRESS. Nothing unusual here. */
2821
2822 #undef TARGET_PRINT_OPERAND_ADDRESS
2823 #define TARGET_PRINT_OPERAND_ADDRESS m32c_print_operand_address
2824
2825 static void
2826 m32c_print_operand_address (FILE * stream, rtx address)
2827 {
2828 if (GET_CODE (address) == MEM)
2829 address = XEXP (address, 0);
2830 else
2831 /* cf: gcc.dg/asm-4.c. */
2832 gcc_assert (GET_CODE (address) == REG);
2833
2834 m32c_print_operand (stream, address, 0);
2835 }
2836
2837 /* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
2838 differently than general registers. */
2839 void
2840 m32c_output_reg_push (FILE * s, int regno)
2841 {
2842 if (regno == FLG_REGNO)
2843 fprintf (s, "\tpushc\tflg\n");
2844 else
2845 fprintf (s, "\tpush.%c\t%s\n",
2846 " bwll"[reg_push_size (regno)], reg_names[regno]);
2847 }
2848
2849 /* Likewise for ASM_OUTPUT_REG_POP. */
2850 void
2851 m32c_output_reg_pop (FILE * s, int regno)
2852 {
2853 if (regno == FLG_REGNO)
2854 fprintf (s, "\tpopc\tflg\n");
2855 else
2856 fprintf (s, "\tpop.%c\t%s\n",
2857 " bwll"[reg_push_size (regno)], reg_names[regno]);
2858 }
2859
2860 /* Defining target-specific uses of `__attribute__' */
2861
2862 /* Used to simplify the logic below. Find the attributes wherever
2863 they may be. */
2864 #define M32C_ATTRIBUTES(decl) \
2865 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2866 : DECL_ATTRIBUTES (decl) \
2867 ? (DECL_ATTRIBUTES (decl)) \
2868 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2869
2870 /* Returns TRUE if the given tree has the "interrupt" attribute. */
2871 static int
2872 interrupt_p (tree node ATTRIBUTE_UNUSED)
2873 {
2874 tree list = M32C_ATTRIBUTES (node);
2875 while (list)
2876 {
2877 if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
2878 return 1;
2879 list = TREE_CHAIN (list);
2880 }
2881 return fast_interrupt_p (node);
2882 }
2883
2884 /* Returns TRUE if the given tree has the "bank_switch" attribute. */
2885 static int
2886 bank_switch_p (tree node ATTRIBUTE_UNUSED)
2887 {
2888 tree list = M32C_ATTRIBUTES (node);
2889 while (list)
2890 {
2891 if (is_attribute_p ("bank_switch", TREE_PURPOSE (list)))
2892 return 1;
2893 list = TREE_CHAIN (list);
2894 }
2895 return 0;
2896 }
2897
2898 /* Returns TRUE if the given tree has the "fast_interrupt" attribute. */
2899 static int
2900 fast_interrupt_p (tree node ATTRIBUTE_UNUSED)
2901 {
2902 tree list = M32C_ATTRIBUTES (node);
2903 while (list)
2904 {
2905 if (is_attribute_p ("fast_interrupt", TREE_PURPOSE (list)))
2906 return 1;
2907 list = TREE_CHAIN (list);
2908 }
2909 return 0;
2910 }
2911
2912 static tree
2913 interrupt_handler (tree * node ATTRIBUTE_UNUSED,
2914 tree name ATTRIBUTE_UNUSED,
2915 tree args ATTRIBUTE_UNUSED,
2916 int flags ATTRIBUTE_UNUSED,
2917 bool * no_add_attrs ATTRIBUTE_UNUSED)
2918 {
2919 return NULL_TREE;
2920 }
2921
2922 /* Returns TRUE if given tree has the "function_vector" attribute. */
2923 int
2924 m32c_special_page_vector_p (tree func)
2925 {
2926 tree list;
2927
2928 if (TREE_CODE (func) != FUNCTION_DECL)
2929 return 0;
2930
2931 list = M32C_ATTRIBUTES (func);
2932 while (list)
2933 {
2934 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2935 return 1;
2936 list = TREE_CHAIN (list);
2937 }
2938 return 0;
2939 }
2940
2941 static tree
2942 function_vector_handler (tree * node ATTRIBUTE_UNUSED,
2943 tree name ATTRIBUTE_UNUSED,
2944 tree args ATTRIBUTE_UNUSED,
2945 int flags ATTRIBUTE_UNUSED,
2946 bool * no_add_attrs ATTRIBUTE_UNUSED)
2947 {
2948 if (TARGET_R8C)
2949 {
2950 /* The attribute is not supported for R8C target. */
2951 warning (OPT_Wattributes,
2952 "%qE attribute is not supported for R8C target",
2953 name);
2954 *no_add_attrs = true;
2955 }
2956 else if (TREE_CODE (*node) != FUNCTION_DECL)
2957 {
2958 /* The attribute must be applied to functions only. */
2959 warning (OPT_Wattributes,
2960 "%qE attribute applies only to functions",
2961 name);
2962 *no_add_attrs = true;
2963 }
2964 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
2965 {
2966 /* The argument must be a constant integer. */
2967 warning (OPT_Wattributes,
2968 "%qE attribute argument not an integer constant",
2969 name);
2970 *no_add_attrs = true;
2971 }
2972 else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
2973 || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
2974 {
2975 /* The argument value must be between 18 to 255. */
2976 warning (OPT_Wattributes,
2977 "%qE attribute argument should be between 18 to 255",
2978 name);
2979 *no_add_attrs = true;
2980 }
2981 return NULL_TREE;
2982 }
2983
2984 /* If the function is assigned the attribute 'function_vector', it
2985 returns the function vector number, otherwise returns zero. */
2986 int
2987 current_function_special_page_vector (rtx x)
2988 {
2989 int num;
2990
2991 if ((GET_CODE(x) == SYMBOL_REF)
2992 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
2993 {
2994 tree list;
2995 tree t = SYMBOL_REF_DECL (x);
2996
2997 if (TREE_CODE (t) != FUNCTION_DECL)
2998 return 0;
2999
3000 list = M32C_ATTRIBUTES (t);
3001 while (list)
3002 {
3003 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
3004 {
3005 num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
3006 return num;
3007 }
3008
3009 list = TREE_CHAIN (list);
3010 }
3011
3012 return 0;
3013 }
3014 else
3015 return 0;
3016 }
3017
3018 #undef TARGET_ATTRIBUTE_TABLE
3019 #define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
3020 static const struct attribute_spec m32c_attribute_table[] = {
3021 {"interrupt", 0, 0, false, false, false, interrupt_handler, false},
3022 {"bank_switch", 0, 0, false, false, false, interrupt_handler, false},
3023 {"fast_interrupt", 0, 0, false, false, false, interrupt_handler, false},
3024 {"function_vector", 1, 1, true, false, false, function_vector_handler,
3025 false},
3026 {0, 0, 0, 0, 0, 0, 0, false}
3027 };
3028
3029 #undef TARGET_COMP_TYPE_ATTRIBUTES
3030 #define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
3031 static int
3032 m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED,
3033 const_tree type2 ATTRIBUTE_UNUSED)
3034 {
3035 /* 0=incompatible 1=compatible 2=warning */
3036 return 1;
3037 }
3038
3039 #undef TARGET_INSERT_ATTRIBUTES
3040 #define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
3041 static void
3042 m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
3043 tree * attr_ptr ATTRIBUTE_UNUSED)
3044 {
3045 unsigned addr;
3046 /* See if we need to make #pragma address variables volatile. */
3047
3048 if (TREE_CODE (node) == VAR_DECL)
3049 {
3050 const char *name = IDENTIFIER_POINTER (DECL_NAME (node));
3051 if (m32c_get_pragma_address (name, &addr))
3052 {
3053 TREE_THIS_VOLATILE (node) = true;
3054 }
3055 }
3056 }
3057
3058
3059 struct pragma_traits : default_hashmap_traits
3060 {
3061 static hashval_t hash (const char *str) { return htab_hash_string (str); }
3062 static bool
3063 equal_keys (const char *a, const char *b)
3064 {
3065 return !strcmp (a, b);
3066 }
3067 };
3068
3069 /* Hash table of pragma info. */
3070 static GTY(()) hash_map<const char *, unsigned, pragma_traits> *pragma_htab;
3071
3072 void
3073 m32c_note_pragma_address (const char *varname, unsigned address)
3074 {
3075 if (!pragma_htab)
3076 pragma_htab
3077 = hash_map<const char *, unsigned, pragma_traits>::create_ggc (31);
3078
3079 const char *name = ggc_strdup (varname);
3080 unsigned int *slot = &pragma_htab->get_or_insert (name);
3081 *slot = address;
3082 }
3083
3084 static bool
3085 m32c_get_pragma_address (const char *varname, unsigned *address)
3086 {
3087 if (!pragma_htab)
3088 return false;
3089
3090 unsigned int *slot = pragma_htab->get (varname);
3091 if (slot)
3092 {
3093 *address = *slot;
3094 return true;
3095 }
3096 return false;
3097 }
3098
3099 void
3100 m32c_output_aligned_common (FILE *stream, tree decl ATTRIBUTE_UNUSED,
3101 const char *name,
3102 int size, int align, int global)
3103 {
3104 unsigned address;
3105
3106 if (m32c_get_pragma_address (name, &address))
3107 {
3108 /* We never output these as global. */
3109 assemble_name (stream, name);
3110 fprintf (stream, " = 0x%04x\n", address);
3111 return;
3112 }
3113 if (!global)
3114 {
3115 fprintf (stream, "\t.local\t");
3116 assemble_name (stream, name);
3117 fprintf (stream, "\n");
3118 }
3119 fprintf (stream, "\t.comm\t");
3120 assemble_name (stream, name);
3121 fprintf (stream, ",%u,%u\n", size, align / BITS_PER_UNIT);
3122 }
3123
3124 /* Predicates */
3125
3126 /* This is a list of legal subregs of hard regs. */
3127 static const struct {
3128 unsigned char outer_mode_size;
3129 unsigned char inner_mode_size;
3130 unsigned char byte_mask;
3131 unsigned char legal_when;
3132 unsigned int regno;
3133 } legal_subregs[] = {
3134 {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
3135 {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
3136 {1, 2, 0x01, 1, A0_REGNO},
3137 {1, 2, 0x01, 1, A1_REGNO},
3138
3139 {1, 4, 0x01, 1, A0_REGNO},
3140 {1, 4, 0x01, 1, A1_REGNO},
3141
3142 {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
3143 {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
3144 {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
3145 {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
3146 {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
3147
3148 {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
3149 };
3150
3151 /* Returns TRUE if OP is a subreg of a hard reg which we don't
3152 support. We also bail on MEMs with illegal addresses. */
3153 bool
3154 m32c_illegal_subreg_p (rtx op)
3155 {
3156 int offset;
3157 unsigned int i;
3158 machine_mode src_mode, dest_mode;
3159
3160 if (GET_CODE (op) == MEM
3161 && ! m32c_legitimate_address_p (Pmode, XEXP (op, 0), false))
3162 {
3163 return true;
3164 }
3165
3166 if (GET_CODE (op) != SUBREG)
3167 return false;
3168
3169 dest_mode = GET_MODE (op);
3170 offset = SUBREG_BYTE (op);
3171 op = SUBREG_REG (op);
3172 src_mode = GET_MODE (op);
3173
3174 if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
3175 return false;
3176 if (GET_CODE (op) != REG)
3177 return false;
3178 if (REGNO (op) >= MEM0_REGNO)
3179 return false;
3180
3181 offset = (1 << offset);
3182
3183 for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
3184 if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
3185 && legal_subregs[i].regno == REGNO (op)
3186 && legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
3187 && legal_subregs[i].byte_mask & offset)
3188 {
3189 switch (legal_subregs[i].legal_when)
3190 {
3191 case 1:
3192 return false;
3193 case 16:
3194 if (TARGET_A16)
3195 return false;
3196 break;
3197 case 24:
3198 if (TARGET_A24)
3199 return false;
3200 break;
3201 }
3202 }
3203 return true;
3204 }
3205
3206 /* Returns TRUE if we support a move between the first two operands.
3207 At the moment, we just want to discourage mem to mem moves until
3208 after reload, because reload has a hard time with our limited
3209 number of address registers, and we can get into a situation where
3210 we need three of them when we only have two. */
3211 bool
3212 m32c_mov_ok (rtx * operands, machine_mode mode ATTRIBUTE_UNUSED)
3213 {
3214 rtx op0 = operands[0];
3215 rtx op1 = operands[1];
3216
3217 if (TARGET_A24)
3218 return true;
3219
3220 #define DEBUG_MOV_OK 0
3221 #if DEBUG_MOV_OK
3222 fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
3223 debug_rtx (op0);
3224 debug_rtx (op1);
3225 #endif
3226
3227 if (GET_CODE (op0) == SUBREG)
3228 op0 = XEXP (op0, 0);
3229 if (GET_CODE (op1) == SUBREG)
3230 op1 = XEXP (op1, 0);
3231
3232 if (GET_CODE (op0) == MEM
3233 && GET_CODE (op1) == MEM
3234 && ! reload_completed)
3235 {
3236 #if DEBUG_MOV_OK
3237 fprintf (stderr, " - no, mem to mem\n");
3238 #endif
3239 return false;
3240 }
3241
3242 #if DEBUG_MOV_OK
3243 fprintf (stderr, " - ok\n");
3244 #endif
3245 return true;
3246 }
3247
3248 /* Returns TRUE if two consecutive HImode mov instructions, generated
3249 for moving an immediate double data to a double data type variable
3250 location, can be combined into single SImode mov instruction. */
3251 bool
3252 m32c_immd_dbl_mov (rtx * operands ATTRIBUTE_UNUSED,
3253 machine_mode mode ATTRIBUTE_UNUSED)
3254 {
3255 /* ??? This relied on the now-defunct MEM_SCALAR and MEM_IN_STRUCT_P
3256 flags. */
3257 return false;
3258 }
3259
3260 /* Expanders */
3261
3262 /* Subregs are non-orthogonal for us, because our registers are all
3263 different sizes. */
3264 static rtx
3265 m32c_subreg (machine_mode outer,
3266 rtx x, machine_mode inner, int byte)
3267 {
3268 int r, nr = -1;
3269
3270 /* Converting MEMs to different types that are the same size, we
3271 just rewrite them. */
3272 if (GET_CODE (x) == SUBREG
3273 && SUBREG_BYTE (x) == 0
3274 && GET_CODE (SUBREG_REG (x)) == MEM
3275 && (GET_MODE_SIZE (GET_MODE (x))
3276 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
3277 {
3278 rtx oldx = x;
3279 x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
3280 MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
3281 }
3282
3283 /* Push/pop get done as smaller push/pops. */
3284 if (GET_CODE (x) == MEM
3285 && (GET_CODE (XEXP (x, 0)) == PRE_DEC
3286 || GET_CODE (XEXP (x, 0)) == POST_INC))
3287 return gen_rtx_MEM (outer, XEXP (x, 0));
3288 if (GET_CODE (x) == SUBREG
3289 && GET_CODE (XEXP (x, 0)) == MEM
3290 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
3291 || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
3292 return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
3293
3294 if (GET_CODE (x) != REG)
3295 {
3296 rtx r = simplify_gen_subreg (outer, x, inner, byte);
3297 if (GET_CODE (r) == SUBREG
3298 && GET_CODE (x) == MEM
3299 && MEM_VOLATILE_P (x))
3300 {
3301 /* Volatile MEMs don't get simplified, but we need them to
3302 be. We are little endian, so the subreg byte is the
3303 offset. */
3304 r = adjust_address_nv (x, outer, byte);
3305 }
3306 return r;
3307 }
3308
3309 r = REGNO (x);
3310 if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
3311 return simplify_gen_subreg (outer, x, inner, byte);
3312
3313 if (IS_MEM_REGNO (r))
3314 return simplify_gen_subreg (outer, x, inner, byte);
3315
3316 /* This is where the complexities of our register layout are
3317 described. */
3318 if (byte == 0)
3319 nr = r;
3320 else if (outer == HImode)
3321 {
3322 if (r == R0_REGNO && byte == 2)
3323 nr = R2_REGNO;
3324 else if (r == R0_REGNO && byte == 4)
3325 nr = R1_REGNO;
3326 else if (r == R0_REGNO && byte == 6)
3327 nr = R3_REGNO;
3328 else if (r == R1_REGNO && byte == 2)
3329 nr = R3_REGNO;
3330 else if (r == A0_REGNO && byte == 2)
3331 nr = A1_REGNO;
3332 }
3333 else if (outer == SImode)
3334 {
3335 if (r == R0_REGNO && byte == 0)
3336 nr = R0_REGNO;
3337 else if (r == R0_REGNO && byte == 4)
3338 nr = R1_REGNO;
3339 }
3340 if (nr == -1)
3341 {
3342 fprintf (stderr, "m32c_subreg %s %s %d\n",
3343 mode_name[outer], mode_name[inner], byte);
3344 debug_rtx (x);
3345 gcc_unreachable ();
3346 }
3347 return gen_rtx_REG (outer, nr);
3348 }
3349
3350 /* Used to emit move instructions. We split some moves,
3351 and avoid mem-mem moves. */
3352 int
3353 m32c_prepare_move (rtx * operands, machine_mode mode)
3354 {
3355 if (far_addr_space_p (operands[0])
3356 && CONSTANT_P (operands[1]))
3357 {
3358 operands[1] = force_reg (GET_MODE (operands[0]), operands[1]);
3359 }
3360 if (TARGET_A16 && mode == PSImode)
3361 return m32c_split_move (operands, mode, 1);
3362 if ((GET_CODE (operands[0]) == MEM)
3363 && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
3364 {
3365 rtx pmv = XEXP (operands[0], 0);
3366 rtx dest_reg = XEXP (pmv, 0);
3367 rtx dest_mod = XEXP (pmv, 1);
3368
3369 emit_insn (gen_rtx_SET (Pmode, dest_reg, dest_mod));
3370 operands[0] = gen_rtx_MEM (mode, dest_reg);
3371 }
3372 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3373 operands[1] = copy_to_mode_reg (mode, operands[1]);
3374 return 0;
3375 }
3376
3377 #define DEBUG_SPLIT 0
3378
3379 /* Returns TRUE if the given PSImode move should be split. We split
3380 for all r8c/m16c moves, since it doesn't support them, and for
3381 POP.L as we can only *push* SImode. */
3382 int
3383 m32c_split_psi_p (rtx * operands)
3384 {
3385 #if DEBUG_SPLIT
3386 fprintf (stderr, "\nm32c_split_psi_p\n");
3387 debug_rtx (operands[0]);
3388 debug_rtx (operands[1]);
3389 #endif
3390 if (TARGET_A16)
3391 {
3392 #if DEBUG_SPLIT
3393 fprintf (stderr, "yes, A16\n");
3394 #endif
3395 return 1;
3396 }
3397 if (GET_CODE (operands[1]) == MEM
3398 && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3399 {
3400 #if DEBUG_SPLIT
3401 fprintf (stderr, "yes, pop.l\n");
3402 #endif
3403 return 1;
3404 }
3405 #if DEBUG_SPLIT
3406 fprintf (stderr, "no, default\n");
3407 #endif
3408 return 0;
3409 }
3410
3411 /* Split the given move. SPLIT_ALL is 0 if splitting is optional
3412 (define_expand), 1 if it is not optional (define_insn_and_split),
3413 and 3 for define_split (alternate api). */
3414 int
3415 m32c_split_move (rtx * operands, machine_mode mode, int split_all)
3416 {
3417 rtx s[4], d[4];
3418 int parts, si, di, rev = 0;
3419 int rv = 0, opi = 2;
3420 machine_mode submode = HImode;
3421 rtx *ops, local_ops[10];
3422
3423 /* define_split modifies the existing operands, but the other two
3424 emit new insns. OPS is where we store the operand pairs, which
3425 we emit later. */
3426 if (split_all == 3)
3427 ops = operands;
3428 else
3429 ops = local_ops;
3430
3431 /* Else HImode. */
3432 if (mode == DImode)
3433 submode = SImode;
3434
3435 /* Before splitting mem-mem moves, force one operand into a
3436 register. */
3437 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3438 {
3439 #if DEBUG0
3440 fprintf (stderr, "force_reg...\n");
3441 debug_rtx (operands[1]);
3442 #endif
3443 operands[1] = force_reg (mode, operands[1]);
3444 #if DEBUG0
3445 debug_rtx (operands[1]);
3446 #endif
3447 }
3448
3449 parts = 2;
3450
3451 #if DEBUG_SPLIT
3452 fprintf (stderr, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3453 split_all);
3454 debug_rtx (operands[0]);
3455 debug_rtx (operands[1]);
3456 #endif
3457
3458 /* Note that split_all is not used to select the api after this
3459 point, so it's safe to set it to 3 even with define_insn. */
3460 /* None of the chips can move SI operands to sp-relative addresses,
3461 so we always split those. */
3462 if (satisfies_constraint_Ss (operands[0]))
3463 split_all = 3;
3464
3465 if (TARGET_A16
3466 && (far_addr_space_p (operands[0])
3467 || far_addr_space_p (operands[1])))
3468 split_all |= 1;
3469
3470 /* We don't need to split these. */
3471 if (TARGET_A24
3472 && split_all != 3
3473 && (mode == SImode || mode == PSImode)
3474 && !(GET_CODE (operands[1]) == MEM
3475 && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3476 return 0;
3477
3478 /* First, enumerate the subregs we'll be dealing with. */
3479 for (si = 0; si < parts; si++)
3480 {
3481 d[si] =
3482 m32c_subreg (submode, operands[0], mode,
3483 si * GET_MODE_SIZE (submode));
3484 s[si] =
3485 m32c_subreg (submode, operands[1], mode,
3486 si * GET_MODE_SIZE (submode));
3487 }
3488
3489 /* Split pushes by emitting a sequence of smaller pushes. */
3490 if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3491 {
3492 for (si = parts - 1; si >= 0; si--)
3493 {
3494 ops[opi++] = gen_rtx_MEM (submode,
3495 gen_rtx_PRE_DEC (Pmode,
3496 gen_rtx_REG (Pmode,
3497 SP_REGNO)));
3498 ops[opi++] = s[si];
3499 }
3500
3501 rv = 1;
3502 }
3503 /* Likewise for pops. */
3504 else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3505 {
3506 for (di = 0; di < parts; di++)
3507 {
3508 ops[opi++] = d[di];
3509 ops[opi++] = gen_rtx_MEM (submode,
3510 gen_rtx_POST_INC (Pmode,
3511 gen_rtx_REG (Pmode,
3512 SP_REGNO)));
3513 }
3514 rv = 1;
3515 }
3516 else if (split_all)
3517 {
3518 /* if d[di] == s[si] for any di < si, we'll early clobber. */
3519 for (di = 0; di < parts - 1; di++)
3520 for (si = di + 1; si < parts; si++)
3521 if (reg_mentioned_p (d[di], s[si]))
3522 rev = 1;
3523
3524 if (rev)
3525 for (si = 0; si < parts; si++)
3526 {
3527 ops[opi++] = d[si];
3528 ops[opi++] = s[si];
3529 }
3530 else
3531 for (si = parts - 1; si >= 0; si--)
3532 {
3533 ops[opi++] = d[si];
3534 ops[opi++] = s[si];
3535 }
3536 rv = 1;
3537 }
3538 /* Now emit any moves we may have accumulated. */
3539 if (rv && split_all != 3)
3540 {
3541 int i;
3542 for (i = 2; i < opi; i += 2)
3543 emit_move_insn (ops[i], ops[i + 1]);
3544 }
3545 return rv;
3546 }
3547
3548 /* The m32c has a number of opcodes that act like memcpy, strcmp, and
3549 the like. For the R8C they expect one of the addresses to be in
3550 R1L:An so we need to arrange for that. Otherwise, it's just a
3551 matter of picking out the operands we want and emitting the right
3552 pattern for them. All these expanders, which correspond to
3553 patterns in blkmov.md, must return nonzero if they expand the insn,
3554 or zero if they should FAIL. */
3555
3556 /* This is a memset() opcode. All operands are implied, so we need to
3557 arrange for them to be in the right registers. The opcode wants
3558 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3559 the count (HI), and $2 the value (QI). */
3560 int
3561 m32c_expand_setmemhi(rtx *operands)
3562 {
3563 rtx desta, count, val;
3564 rtx desto, counto;
3565
3566 desta = XEXP (operands[0], 0);
3567 count = operands[1];
3568 val = operands[2];
3569
3570 desto = gen_reg_rtx (Pmode);
3571 counto = gen_reg_rtx (HImode);
3572
3573 if (GET_CODE (desta) != REG
3574 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3575 desta = copy_to_mode_reg (Pmode, desta);
3576
3577 /* This looks like an arbitrary restriction, but this is by far the
3578 most common case. For counts 8..14 this actually results in
3579 smaller code with no speed penalty because the half-sized
3580 constant can be loaded with a shorter opcode. */
3581 if (GET_CODE (count) == CONST_INT
3582 && GET_CODE (val) == CONST_INT
3583 && ! (INTVAL (count) & 1)
3584 && (INTVAL (count) > 1)
3585 && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3586 {
3587 unsigned v = INTVAL (val) & 0xff;
3588 v = v | (v << 8);
3589 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3590 val = copy_to_mode_reg (HImode, GEN_INT (v));
3591 if (TARGET_A16)
3592 emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3593 else
3594 emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3595 return 1;
3596 }
3597
3598 /* This is the generalized memset() case. */
3599 if (GET_CODE (val) != REG
3600 || REGNO (val) < FIRST_PSEUDO_REGISTER)
3601 val = copy_to_mode_reg (QImode, val);
3602
3603 if (GET_CODE (count) != REG
3604 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3605 count = copy_to_mode_reg (HImode, count);
3606
3607 if (TARGET_A16)
3608 emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3609 else
3610 emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3611
3612 return 1;
3613 }
3614
3615 /* This is a memcpy() opcode. All operands are implied, so we need to
3616 arrange for them to be in the right registers. The opcode wants
3617 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3618 is the source (MEM:BLK), and $2 the count (HI). */
3619 int
3620 m32c_expand_movmemhi(rtx *operands)
3621 {
3622 rtx desta, srca, count;
3623 rtx desto, srco, counto;
3624
3625 desta = XEXP (operands[0], 0);
3626 srca = XEXP (operands[1], 0);
3627 count = operands[2];
3628
3629 desto = gen_reg_rtx (Pmode);
3630 srco = gen_reg_rtx (Pmode);
3631 counto = gen_reg_rtx (HImode);
3632
3633 if (GET_CODE (desta) != REG
3634 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3635 desta = copy_to_mode_reg (Pmode, desta);
3636
3637 if (GET_CODE (srca) != REG
3638 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3639 srca = copy_to_mode_reg (Pmode, srca);
3640
3641 /* Similar to setmem, but we don't need to check the value. */
3642 if (GET_CODE (count) == CONST_INT
3643 && ! (INTVAL (count) & 1)
3644 && (INTVAL (count) > 1))
3645 {
3646 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3647 if (TARGET_A16)
3648 emit_insn (gen_movmemhi_whi_op (desto, srco, counto, desta, srca, count));
3649 else
3650 emit_insn (gen_movmemhi_wpsi_op (desto, srco, counto, desta, srca, count));
3651 return 1;
3652 }
3653
3654 /* This is the generalized memset() case. */
3655 if (GET_CODE (count) != REG
3656 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3657 count = copy_to_mode_reg (HImode, count);
3658
3659 if (TARGET_A16)
3660 emit_insn (gen_movmemhi_bhi_op (desto, srco, counto, desta, srca, count));
3661 else
3662 emit_insn (gen_movmemhi_bpsi_op (desto, srco, counto, desta, srca, count));
3663
3664 return 1;
3665 }
3666
3667 /* This is a stpcpy() opcode. $0 is the destination (MEM:BLK) after
3668 the copy, which should point to the NUL at the end of the string,
3669 $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3670 Since our opcode leaves the destination pointing *after* the NUL,
3671 we must emit an adjustment. */
3672 int
3673 m32c_expand_movstr(rtx *operands)
3674 {
3675 rtx desta, srca;
3676 rtx desto, srco;
3677
3678 desta = XEXP (operands[1], 0);
3679 srca = XEXP (operands[2], 0);
3680
3681 desto = gen_reg_rtx (Pmode);
3682 srco = gen_reg_rtx (Pmode);
3683
3684 if (GET_CODE (desta) != REG
3685 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3686 desta = copy_to_mode_reg (Pmode, desta);
3687
3688 if (GET_CODE (srca) != REG
3689 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3690 srca = copy_to_mode_reg (Pmode, srca);
3691
3692 emit_insn (gen_movstr_op (desto, srco, desta, srca));
3693 /* desto ends up being a1, which allows this type of add through MOVA. */
3694 emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
3695
3696 return 1;
3697 }
3698
3699 /* This is a strcmp() opcode. $0 is the destination (HI) which holds
3700 <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3701 $2 is the other (MEM:BLK). We must do the comparison, and then
3702 convert the flags to a signed integer result. */
3703 int
3704 m32c_expand_cmpstr(rtx *operands)
3705 {
3706 rtx src1a, src2a;
3707
3708 src1a = XEXP (operands[1], 0);
3709 src2a = XEXP (operands[2], 0);
3710
3711 if (GET_CODE (src1a) != REG
3712 || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
3713 src1a = copy_to_mode_reg (Pmode, src1a);
3714
3715 if (GET_CODE (src2a) != REG
3716 || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
3717 src2a = copy_to_mode_reg (Pmode, src2a);
3718
3719 emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
3720 emit_insn (gen_cond_to_int (operands[0]));
3721
3722 return 1;
3723 }
3724
3725
3726 typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
3727
3728 static shift_gen_func
3729 shift_gen_func_for (int mode, int code)
3730 {
3731 #define GFF(m,c,f) if (mode == m && code == c) return f
3732 GFF(QImode, ASHIFT, gen_ashlqi3_i);
3733 GFF(QImode, ASHIFTRT, gen_ashrqi3_i);
3734 GFF(QImode, LSHIFTRT, gen_lshrqi3_i);
3735 GFF(HImode, ASHIFT, gen_ashlhi3_i);
3736 GFF(HImode, ASHIFTRT, gen_ashrhi3_i);
3737 GFF(HImode, LSHIFTRT, gen_lshrhi3_i);
3738 GFF(PSImode, ASHIFT, gen_ashlpsi3_i);
3739 GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
3740 GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
3741 GFF(SImode, ASHIFT, TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
3742 GFF(SImode, ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
3743 GFF(SImode, LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
3744 #undef GFF
3745 gcc_unreachable ();
3746 }
3747
3748 /* The m32c only has one shift, but it takes a signed count. GCC
3749 doesn't want this, so we fake it by negating any shift count when
3750 we're pretending to shift the other way. Also, the shift count is
3751 limited to -8..8. It's slightly better to use two shifts for 9..15
3752 than to load the count into r1h, so we do that too. */
3753 int
3754 m32c_prepare_shift (rtx * operands, int scale, int shift_code)
3755 {
3756 machine_mode mode = GET_MODE (operands[0]);
3757 shift_gen_func func = shift_gen_func_for (mode, shift_code);
3758 rtx temp;
3759
3760 if (GET_CODE (operands[2]) == CONST_INT)
3761 {
3762 int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
3763 int count = INTVAL (operands[2]) * scale;
3764
3765 while (count > maxc)
3766 {
3767 temp = gen_reg_rtx (mode);
3768 emit_insn (func (temp, operands[1], GEN_INT (maxc)));
3769 operands[1] = temp;
3770 count -= maxc;
3771 }
3772 while (count < -maxc)
3773 {
3774 temp = gen_reg_rtx (mode);
3775 emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
3776 operands[1] = temp;
3777 count += maxc;
3778 }
3779 emit_insn (func (operands[0], operands[1], GEN_INT (count)));
3780 return 1;
3781 }
3782
3783 temp = gen_reg_rtx (QImode);
3784 if (scale < 0)
3785 /* The pattern has a NEG that corresponds to this. */
3786 emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
3787 else if (TARGET_A16 && mode == SImode)
3788 /* We do this because the code below may modify this, we don't
3789 want to modify the origin of this value. */
3790 emit_move_insn (temp, operands[2]);
3791 else
3792 /* We'll only use it for the shift, no point emitting a move. */
3793 temp = operands[2];
3794
3795 if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
3796 {
3797 /* The m16c has a limit of -16..16 for SI shifts, even when the
3798 shift count is in a register. Since there are so many targets
3799 of these shifts, it's better to expand the RTL here than to
3800 call a helper function.
3801
3802 The resulting code looks something like this:
3803
3804 cmp.b r1h,-16
3805 jge.b 1f
3806 shl.l -16,dest
3807 add.b r1h,16
3808 1f: cmp.b r1h,16
3809 jle.b 1f
3810 shl.l 16,dest
3811 sub.b r1h,16
3812 1f: shl.l r1h,dest
3813
3814 We take advantage of the fact that "negative" shifts are
3815 undefined to skip one of the comparisons. */
3816
3817 rtx count;
3818 rtx label, tempvar;
3819 rtx_insn *insn;
3820
3821 emit_move_insn (operands[0], operands[1]);
3822
3823 count = temp;
3824 label = gen_label_rtx ();
3825 LABEL_NUSES (label) ++;
3826
3827 tempvar = gen_reg_rtx (mode);
3828
3829 if (shift_code == ASHIFT)
3830 {
3831 /* This is a left shift. We only need check positive counts. */
3832 emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
3833 count, GEN_INT (16), label));
3834 emit_insn (func (tempvar, operands[0], GEN_INT (8)));
3835 emit_insn (func (operands[0], tempvar, GEN_INT (8)));
3836 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
3837 emit_label_after (label, insn);
3838 }
3839 else
3840 {
3841 /* This is a right shift. We only need check negative counts. */
3842 emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
3843 count, GEN_INT (-16), label));
3844 emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
3845 emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
3846 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
3847 emit_label_after (label, insn);
3848 }
3849 operands[1] = operands[0];
3850 emit_insn (func (operands[0], operands[0], count));
3851 return 1;
3852 }
3853
3854 operands[2] = temp;
3855 return 0;
3856 }
3857
3858 /* The m32c has a limited range of operations that work on PSImode
3859 values; we have to expand to SI, do the math, and truncate back to
3860 PSI. Yes, this is expensive, but hopefully gcc will learn to avoid
3861 those cases. */
3862 void
3863 m32c_expand_neg_mulpsi3 (rtx * operands)
3864 {
3865 /* operands: a = b * i */
3866 rtx temp1; /* b as SI */
3867 rtx scale /* i as SI */;
3868 rtx temp2; /* a*b as SI */
3869
3870 temp1 = gen_reg_rtx (SImode);
3871 temp2 = gen_reg_rtx (SImode);
3872 if (GET_CODE (operands[2]) != CONST_INT)
3873 {
3874 scale = gen_reg_rtx (SImode);
3875 emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
3876 }
3877 else
3878 scale = copy_to_mode_reg (SImode, operands[2]);
3879
3880 emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
3881 temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
3882 emit_insn (gen_truncsipsi2 (operands[0], temp2));
3883 }
3884
3885 /* Pattern Output Functions */
3886
3887 int
3888 m32c_expand_movcc (rtx *operands)
3889 {
3890 rtx rel = operands[1];
3891
3892 if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
3893 return 1;
3894 if (GET_CODE (operands[2]) != CONST_INT
3895 || GET_CODE (operands[3]) != CONST_INT)
3896 return 1;
3897 if (GET_CODE (rel) == NE)
3898 {
3899 rtx tmp = operands[2];
3900 operands[2] = operands[3];
3901 operands[3] = tmp;
3902 rel = gen_rtx_EQ (GET_MODE (rel), XEXP (rel, 0), XEXP (rel, 1));
3903 }
3904
3905 emit_move_insn (operands[0],
3906 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3907 rel,
3908 operands[2],
3909 operands[3]));
3910 return 0;
3911 }
3912
3913 /* Used for the "insv" pattern. Return nonzero to fail, else done. */
3914 int
3915 m32c_expand_insv (rtx *operands)
3916 {
3917 rtx op0, src0, p;
3918 int mask;
3919
3920 if (INTVAL (operands[1]) != 1)
3921 return 1;
3922
3923 /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
3924 if (GET_CODE (operands[3]) != CONST_INT)
3925 return 1;
3926 if (INTVAL (operands[3]) != 0
3927 && INTVAL (operands[3]) != 1
3928 && INTVAL (operands[3]) != -1)
3929 return 1;
3930
3931 mask = 1 << INTVAL (operands[2]);
3932
3933 op0 = operands[0];
3934 if (GET_CODE (op0) == SUBREG
3935 && SUBREG_BYTE (op0) == 0)
3936 {
3937 rtx sub = SUBREG_REG (op0);
3938 if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
3939 op0 = sub;
3940 }
3941
3942 if (!can_create_pseudo_p ()
3943 || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
3944 src0 = op0;
3945 else
3946 {
3947 src0 = gen_reg_rtx (GET_MODE (op0));
3948 emit_move_insn (src0, op0);
3949 }
3950
3951 if (GET_MODE (op0) == HImode
3952 && INTVAL (operands[2]) >= 8
3953 && GET_CODE (op0) == MEM)
3954 {
3955 /* We are little endian. */
3956 rtx new_mem = gen_rtx_MEM (QImode, plus_constant (Pmode,
3957 XEXP (op0, 0), 1));
3958 MEM_COPY_ATTRIBUTES (new_mem, op0);
3959 mask >>= 8;
3960 }
3961
3962 /* First, we generate a mask with the correct polarity. If we are
3963 storing a zero, we want an AND mask, so invert it. */
3964 if (INTVAL (operands[3]) == 0)
3965 {
3966 /* Storing a zero, use an AND mask */
3967 if (GET_MODE (op0) == HImode)
3968 mask ^= 0xffff;
3969 else
3970 mask ^= 0xff;
3971 }
3972 /* Now we need to properly sign-extend the mask in case we need to
3973 fall back to an AND or OR opcode. */
3974 if (GET_MODE (op0) == HImode)
3975 {
3976 if (mask & 0x8000)
3977 mask -= 0x10000;
3978 }
3979 else
3980 {
3981 if (mask & 0x80)
3982 mask -= 0x100;
3983 }
3984
3985 switch ( (INTVAL (operands[3]) ? 4 : 0)
3986 + ((GET_MODE (op0) == HImode) ? 2 : 0)
3987 + (TARGET_A24 ? 1 : 0))
3988 {
3989 case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
3990 case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
3991 case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
3992 case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
3993 case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
3994 case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
3995 case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
3996 case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
3997 default: p = NULL_RTX; break; /* Not reached, but silences a warning. */
3998 }
3999
4000 emit_insn (p);
4001 return 0;
4002 }
4003
4004 const char *
4005 m32c_scc_pattern(rtx *operands, RTX_CODE code)
4006 {
4007 static char buf[30];
4008 if (GET_CODE (operands[0]) == REG
4009 && REGNO (operands[0]) == R0_REGNO)
4010 {
4011 if (code == EQ)
4012 return "stzx\t#1,#0,r0l";
4013 if (code == NE)
4014 return "stzx\t#0,#1,r0l";
4015 }
4016 sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
4017 return buf;
4018 }
4019
4020 /* Encode symbol attributes of a SYMBOL_REF into its
4021 SYMBOL_REF_FLAGS. */
4022 static void
4023 m32c_encode_section_info (tree decl, rtx rtl, int first)
4024 {
4025 int extra_flags = 0;
4026
4027 default_encode_section_info (decl, rtl, first);
4028 if (TREE_CODE (decl) == FUNCTION_DECL
4029 && m32c_special_page_vector_p (decl))
4030
4031 extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
4032
4033 if (extra_flags)
4034 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
4035 }
4036
4037 /* Returns TRUE if the current function is a leaf, and thus we can
4038 determine which registers an interrupt function really needs to
4039 save. The logic below is mostly about finding the insn sequence
4040 that's the function, versus any sequence that might be open for the
4041 current insn. */
4042 static int
4043 m32c_leaf_function_p (void)
4044 {
4045 rtx_insn *saved_first, *saved_last;
4046 struct sequence_stack *seq;
4047 int rv;
4048
4049 saved_first = crtl->emit.x_first_insn;
4050 saved_last = crtl->emit.x_last_insn;
4051 for (seq = crtl->emit.sequence_stack; seq && seq->next; seq = seq->next)
4052 ;
4053 if (seq)
4054 {
4055 crtl->emit.x_first_insn = seq->first;
4056 crtl->emit.x_last_insn = seq->last;
4057 }
4058
4059 rv = leaf_function_p ();
4060
4061 crtl->emit.x_first_insn = saved_first;
4062 crtl->emit.x_last_insn = saved_last;
4063 return rv;
4064 }
4065
4066 /* Returns TRUE if the current function needs to use the ENTER/EXIT
4067 opcodes. If the function doesn't need the frame base or stack
4068 pointer, it can use the simpler RTS opcode. */
4069 static bool
4070 m32c_function_needs_enter (void)
4071 {
4072 rtx_insn *insn;
4073 struct sequence_stack *seq;
4074 rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
4075 rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
4076
4077 insn = get_insns ();
4078 for (seq = crtl->emit.sequence_stack;
4079 seq;
4080 insn = seq->first, seq = seq->next);
4081
4082 while (insn)
4083 {
4084 if (reg_mentioned_p (sp, insn))
4085 return true;
4086 if (reg_mentioned_p (fb, insn))
4087 return true;
4088 insn = NEXT_INSN (insn);
4089 }
4090 return false;
4091 }
4092
4093 /* Mark all the subexpressions of the PARALLEL rtx PAR as
4094 frame-related. Return PAR.
4095
4096 dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
4097 PARALLEL rtx other than the first if they do not have the
4098 FRAME_RELATED flag set on them. So this function is handy for
4099 marking up 'enter' instructions. */
4100 static rtx
4101 m32c_all_frame_related (rtx par)
4102 {
4103 int len = XVECLEN (par, 0);
4104 int i;
4105
4106 for (i = 0; i < len; i++)
4107 F (XVECEXP (par, 0, i));
4108
4109 return par;
4110 }
4111
4112 /* Emits the prologue. See the frame layout comment earlier in this
4113 file. We can reserve up to 256 bytes with the ENTER opcode, beyond
4114 that we manually update sp. */
4115 void
4116 m32c_emit_prologue (void)
4117 {
4118 int frame_size, extra_frame_size = 0, reg_save_size;
4119 int complex_prologue = 0;
4120
4121 cfun->machine->is_leaf = m32c_leaf_function_p ();
4122 if (interrupt_p (cfun->decl))
4123 {
4124 cfun->machine->is_interrupt = 1;
4125 complex_prologue = 1;
4126 }
4127 else if (bank_switch_p (cfun->decl))
4128 warning (OPT_Wattributes,
4129 "%<bank_switch%> has no effect on non-interrupt functions");
4130
4131 reg_save_size = m32c_pushm_popm (PP_justcount);
4132
4133 if (interrupt_p (cfun->decl))
4134 {
4135 if (bank_switch_p (cfun->decl))
4136 emit_insn (gen_fset_b ());
4137 else if (cfun->machine->intr_pushm)
4138 emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
4139 }
4140
4141 frame_size =
4142 m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
4143 if (frame_size == 0
4144 && !m32c_function_needs_enter ())
4145 cfun->machine->use_rts = 1;
4146
4147 if (frame_size > 254)
4148 {
4149 extra_frame_size = frame_size - 254;
4150 frame_size = 254;
4151 }
4152 if (cfun->machine->use_rts == 0)
4153 F (emit_insn (m32c_all_frame_related
4154 (TARGET_A16
4155 ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
4156 : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
4157
4158 if (extra_frame_size)
4159 {
4160 complex_prologue = 1;
4161 if (TARGET_A16)
4162 F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
4163 gen_rtx_REG (HImode, SP_REGNO),
4164 GEN_INT (-extra_frame_size))));
4165 else
4166 F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
4167 gen_rtx_REG (PSImode, SP_REGNO),
4168 GEN_INT (-extra_frame_size))));
4169 }
4170
4171 complex_prologue += m32c_pushm_popm (PP_pushm);
4172
4173 /* This just emits a comment into the .s file for debugging. */
4174 if (complex_prologue)
4175 emit_insn (gen_prologue_end ());
4176 }
4177
4178 /* Likewise, for the epilogue. The only exception is that, for
4179 interrupts, we must manually unwind the frame as the REIT opcode
4180 doesn't do that. */
4181 void
4182 m32c_emit_epilogue (void)
4183 {
4184 int popm_count = m32c_pushm_popm (PP_justcount);
4185
4186 /* This just emits a comment into the .s file for debugging. */
4187 if (popm_count > 0 || cfun->machine->is_interrupt)
4188 emit_insn (gen_epilogue_start ());
4189
4190 if (popm_count > 0)
4191 m32c_pushm_popm (PP_popm);
4192
4193 if (cfun->machine->is_interrupt)
4194 {
4195 machine_mode spmode = TARGET_A16 ? HImode : PSImode;
4196
4197 /* REIT clears B flag and restores $fp for us, but we still
4198 have to fix up the stack. USE_RTS just means we didn't
4199 emit ENTER. */
4200 if (!cfun->machine->use_rts)
4201 {
4202 emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
4203 gen_rtx_REG (spmode, FP_REGNO));
4204 emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
4205 gen_rtx_REG (spmode, A0_REGNO));
4206 /* We can't just add this to the POPM because it would be in
4207 the wrong order, and wouldn't fix the stack if we're bank
4208 switching. */
4209 if (TARGET_A16)
4210 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
4211 else
4212 emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
4213 }
4214 if (!bank_switch_p (cfun->decl) && cfun->machine->intr_pushm)
4215 emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
4216
4217 /* The FREIT (Fast REturn from InTerrupt) instruction should be
4218 generated only for M32C/M32CM targets (generate the REIT
4219 instruction otherwise). */
4220 if (fast_interrupt_p (cfun->decl))
4221 {
4222 /* Check if fast_attribute is set for M32C or M32CM. */
4223 if (TARGET_A24)
4224 {
4225 emit_jump_insn (gen_epilogue_freit ());
4226 }
4227 /* If fast_interrupt attribute is set for an R8C or M16C
4228 target ignore this attribute and generated REIT
4229 instruction. */
4230 else
4231 {
4232 warning (OPT_Wattributes,
4233 "%<fast_interrupt%> attribute directive ignored");
4234 emit_jump_insn (gen_epilogue_reit_16 ());
4235 }
4236 }
4237 else if (TARGET_A16)
4238 emit_jump_insn (gen_epilogue_reit_16 ());
4239 else
4240 emit_jump_insn (gen_epilogue_reit_24 ());
4241 }
4242 else if (cfun->machine->use_rts)
4243 emit_jump_insn (gen_epilogue_rts ());
4244 else if (TARGET_A16)
4245 emit_jump_insn (gen_epilogue_exitd_16 ());
4246 else
4247 emit_jump_insn (gen_epilogue_exitd_24 ());
4248 }
4249
4250 void
4251 m32c_emit_eh_epilogue (rtx ret_addr)
4252 {
4253 /* R0[R2] has the stack adjustment. R1[R3] has the address to
4254 return to. We have to fudge the stack, pop everything, pop SP
4255 (fudged), and return (fudged). This is actually easier to do in
4256 assembler, so punt to libgcc. */
4257 emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
4258 /* emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
4259 }
4260
4261 /* Indicate which flags must be properly set for a given conditional. */
4262 static int
4263 flags_needed_for_conditional (rtx cond)
4264 {
4265 switch (GET_CODE (cond))
4266 {
4267 case LE:
4268 case GT:
4269 return FLAGS_OSZ;
4270 case LEU:
4271 case GTU:
4272 return FLAGS_ZC;
4273 case LT:
4274 case GE:
4275 return FLAGS_OS;
4276 case LTU:
4277 case GEU:
4278 return FLAGS_C;
4279 case EQ:
4280 case NE:
4281 return FLAGS_Z;
4282 default:
4283 return FLAGS_N;
4284 }
4285 }
4286
4287 #define DEBUG_CMP 0
4288
4289 /* Returns true if a compare insn is redundant because it would only
4290 set flags that are already set correctly. */
4291 static bool
4292 m32c_compare_redundant (rtx_insn *cmp, rtx *operands)
4293 {
4294 int flags_needed;
4295 int pflags;
4296 rtx_insn *prev;
4297 rtx pp, next;
4298 rtx op0, op1;
4299 #if DEBUG_CMP
4300 int prev_icode, i;
4301 #endif
4302
4303 op0 = operands[0];
4304 op1 = operands[1];
4305
4306 #if DEBUG_CMP
4307 fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
4308 debug_rtx(cmp);
4309 for (i=0; i<2; i++)
4310 {
4311 fprintf(stderr, "operands[%d] = ", i);
4312 debug_rtx(operands[i]);
4313 }
4314 #endif
4315
4316 next = next_nonnote_insn (cmp);
4317 if (!next || !INSN_P (next))
4318 {
4319 #if DEBUG_CMP
4320 fprintf(stderr, "compare not followed by insn\n");
4321 debug_rtx(next);
4322 #endif
4323 return false;
4324 }
4325 if (GET_CODE (PATTERN (next)) == SET
4326 && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
4327 {
4328 next = XEXP (XEXP (PATTERN (next), 1), 0);
4329 }
4330 else if (GET_CODE (PATTERN (next)) == SET)
4331 {
4332 /* If this is a conditional, flags_needed will be something
4333 other than FLAGS_N, which we test below. */
4334 next = XEXP (PATTERN (next), 1);
4335 }
4336 else
4337 {
4338 #if DEBUG_CMP
4339 fprintf(stderr, "compare not followed by conditional\n");
4340 debug_rtx(next);
4341 #endif
4342 return false;
4343 }
4344 #if DEBUG_CMP
4345 fprintf(stderr, "conditional is: ");
4346 debug_rtx(next);
4347 #endif
4348
4349 flags_needed = flags_needed_for_conditional (next);
4350 if (flags_needed == FLAGS_N)
4351 {
4352 #if DEBUG_CMP
4353 fprintf(stderr, "compare not followed by conditional\n");
4354 debug_rtx(next);
4355 #endif
4356 return false;
4357 }
4358
4359 /* Compare doesn't set overflow and carry the same way that
4360 arithmetic instructions do, so we can't replace those. */
4361 if (flags_needed & FLAGS_OC)
4362 return false;
4363
4364 prev = cmp;
4365 do {
4366 prev = prev_nonnote_insn (prev);
4367 if (!prev)
4368 {
4369 #if DEBUG_CMP
4370 fprintf(stderr, "No previous insn.\n");
4371 #endif
4372 return false;
4373 }
4374 if (!INSN_P (prev))
4375 {
4376 #if DEBUG_CMP
4377 fprintf(stderr, "Previous insn is a non-insn.\n");
4378 #endif
4379 return false;
4380 }
4381 pp = PATTERN (prev);
4382 if (GET_CODE (pp) != SET)
4383 {
4384 #if DEBUG_CMP
4385 fprintf(stderr, "Previous insn is not a SET.\n");
4386 #endif
4387 return false;
4388 }
4389 pflags = get_attr_flags (prev);
4390
4391 /* Looking up attributes of previous insns corrupted the recog
4392 tables. */
4393 INSN_UID (cmp) = -1;
4394 recog (PATTERN (cmp), cmp, 0);
4395
4396 if (pflags == FLAGS_N
4397 && reg_mentioned_p (op0, pp))
4398 {
4399 #if DEBUG_CMP
4400 fprintf(stderr, "intermediate non-flags insn uses op:\n");
4401 debug_rtx(prev);
4402 #endif
4403 return false;
4404 }
4405
4406 /* Check for comparisons against memory - between volatiles and
4407 aliases, we just can't risk this one. */
4408 if (GET_CODE (operands[0]) == MEM
4409 || GET_CODE (operands[0]) == MEM)
4410 {
4411 #if DEBUG_CMP
4412 fprintf(stderr, "comparisons with memory:\n");
4413 debug_rtx(prev);
4414 #endif
4415 return false;
4416 }
4417
4418 /* Check for PREV changing a register that's used to compute a
4419 value in CMP, even if it doesn't otherwise change flags. */
4420 if (GET_CODE (operands[0]) == REG
4421 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[0]))
4422 {
4423 #if DEBUG_CMP
4424 fprintf(stderr, "sub-value affected, op0:\n");
4425 debug_rtx(prev);
4426 #endif
4427 return false;
4428 }
4429 if (GET_CODE (operands[1]) == REG
4430 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[1]))
4431 {
4432 #if DEBUG_CMP
4433 fprintf(stderr, "sub-value affected, op1:\n");
4434 debug_rtx(prev);
4435 #endif
4436 return false;
4437 }
4438
4439 } while (pflags == FLAGS_N);
4440 #if DEBUG_CMP
4441 fprintf(stderr, "previous flag-setting insn:\n");
4442 debug_rtx(prev);
4443 debug_rtx(pp);
4444 #endif
4445
4446 if (GET_CODE (pp) == SET
4447 && GET_CODE (XEXP (pp, 0)) == REG
4448 && REGNO (XEXP (pp, 0)) == FLG_REGNO
4449 && GET_CODE (XEXP (pp, 1)) == COMPARE)
4450 {
4451 /* Adjacent cbranches must have the same operands to be
4452 redundant. */
4453 rtx pop0 = XEXP (XEXP (pp, 1), 0);
4454 rtx pop1 = XEXP (XEXP (pp, 1), 1);
4455 #if DEBUG_CMP
4456 fprintf(stderr, "adjacent cbranches\n");
4457 debug_rtx(pop0);
4458 debug_rtx(pop1);
4459 #endif
4460 if (rtx_equal_p (op0, pop0)
4461 && rtx_equal_p (op1, pop1))
4462 return true;
4463 #if DEBUG_CMP
4464 fprintf(stderr, "prev cmp not same\n");
4465 #endif
4466 return false;
4467 }
4468
4469 /* Else the previous insn must be a SET, with either the source or
4470 dest equal to operands[0], and operands[1] must be zero. */
4471
4472 if (!rtx_equal_p (op1, const0_rtx))
4473 {
4474 #if DEBUG_CMP
4475 fprintf(stderr, "operands[1] not const0_rtx\n");
4476 #endif
4477 return false;
4478 }
4479 if (GET_CODE (pp) != SET)
4480 {
4481 #if DEBUG_CMP
4482 fprintf (stderr, "pp not set\n");
4483 #endif
4484 return false;
4485 }
4486 if (!rtx_equal_p (op0, SET_SRC (pp))
4487 && !rtx_equal_p (op0, SET_DEST (pp)))
4488 {
4489 #if DEBUG_CMP
4490 fprintf(stderr, "operands[0] not found in set\n");
4491 #endif
4492 return false;
4493 }
4494
4495 #if DEBUG_CMP
4496 fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4497 #endif
4498 if ((pflags & flags_needed) == flags_needed)
4499 return true;
4500
4501 return false;
4502 }
4503
4504 /* Return the pattern for a compare. This will be commented out if
4505 the compare is redundant, else a normal pattern is returned. Thus,
4506 the assembler output says where the compare would have been. */
4507 char *
4508 m32c_output_compare (rtx_insn *insn, rtx *operands)
4509 {
4510 static char templ[] = ";cmp.b\t%1,%0";
4511 /* ^ 5 */
4512
4513 templ[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
4514 if (m32c_compare_redundant (insn, operands))
4515 {
4516 #if DEBUG_CMP
4517 fprintf(stderr, "cbranch: cmp not needed\n");
4518 #endif
4519 return templ;
4520 }
4521
4522 #if DEBUG_CMP
4523 fprintf(stderr, "cbranch: cmp needed: `%s'\n", templ + 1);
4524 #endif
4525 return templ + 1;
4526 }
4527
4528 #undef TARGET_ENCODE_SECTION_INFO
4529 #define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4530
4531 /* If the frame pointer isn't used, we detect it manually. But the
4532 stack pointer doesn't have as flexible addressing as the frame
4533 pointer, so we always assume we have it. */
4534
4535 #undef TARGET_FRAME_POINTER_REQUIRED
4536 #define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
4537
4538 /* The Global `targetm' Variable. */
4539
4540 struct gcc_target targetm = TARGET_INITIALIZER;
4541
4542 #include "gt-m32c.h"