]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/m32c/m32c.c
ec30b8d7f9ba1c2fb7c4b2f205a488f97dbed7c3
[thirdparty/gcc.git] / gcc / config / m32c / m32c.c
1 /* Target Code for R8C/M16C/M32C
2 Copyright (C) 2005-2013 Free Software Foundation, Inc.
3 Contributed by Red Hat.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "regs.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-flags.h"
31 #include "output.h"
32 #include "insn-attr.h"
33 #include "flags.h"
34 #include "recog.h"
35 #include "reload.h"
36 #include "diagnostic-core.h"
37 #include "obstack.h"
38 #include "tree.h"
39 #include "stor-layout.h"
40 #include "varasm.h"
41 #include "calls.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "except.h"
45 #include "function.h"
46 #include "ggc.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "tm_p.h"
50 #include "langhooks.h"
51 #include "gimple.h"
52 #include "df.h"
53 #include "tm-constrs.h"
54
55 /* Prototypes */
56
57 /* Used by m32c_pushm_popm. */
58 typedef enum
59 {
60 PP_pushm,
61 PP_popm,
62 PP_justcount
63 } Push_Pop_Type;
64
65 static bool m32c_function_needs_enter (void);
66 static tree interrupt_handler (tree *, tree, tree, int, bool *);
67 static tree function_vector_handler (tree *, tree, tree, int, bool *);
68 static int interrupt_p (tree node);
69 static int bank_switch_p (tree node);
70 static int fast_interrupt_p (tree node);
71 static int interrupt_p (tree node);
72 static bool m32c_asm_integer (rtx, unsigned int, int);
73 static int m32c_comp_type_attributes (const_tree, const_tree);
74 static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
75 static struct machine_function *m32c_init_machine_status (void);
76 static void m32c_insert_attributes (tree, tree *);
77 static bool m32c_legitimate_address_p (enum machine_mode, rtx, bool);
78 static bool m32c_addr_space_legitimate_address_p (enum machine_mode, rtx, bool, addr_space_t);
79 static rtx m32c_function_arg (cumulative_args_t, enum machine_mode,
80 const_tree, bool);
81 static bool m32c_pass_by_reference (cumulative_args_t, enum machine_mode,
82 const_tree, bool);
83 static void m32c_function_arg_advance (cumulative_args_t, enum machine_mode,
84 const_tree, bool);
85 static unsigned int m32c_function_arg_boundary (enum machine_mode, const_tree);
86 static int m32c_pushm_popm (Push_Pop_Type);
87 static bool m32c_strict_argument_naming (cumulative_args_t);
88 static rtx m32c_struct_value_rtx (tree, int);
89 static rtx m32c_subreg (enum machine_mode, rtx, enum machine_mode, int);
90 static int need_to_save (int);
91 static rtx m32c_function_value (const_tree, const_tree, bool);
92 static rtx m32c_libcall_value (enum machine_mode, const_rtx);
93
94 /* Returns true if an address is specified, else false. */
95 static bool m32c_get_pragma_address (const char *varname, unsigned *addr);
96
97 #define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
98
99 #define streq(a,b) (strcmp ((a), (b)) == 0)
100
101 /* Internal support routines */
102
103 /* Debugging statements are tagged with DEBUG0 only so that they can
104 be easily enabled individually, by replacing the '0' with '1' as
105 needed. */
106 #define DEBUG0 0
107 #define DEBUG1 1
108
109 #if DEBUG0
110 /* This is needed by some of the commented-out debug statements
111 below. */
112 static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
113 #endif
114 static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
115
116 /* These are all to support encode_pattern(). */
117 static char pattern[30], *patternp;
118 static GTY(()) rtx patternr[30];
119 #define RTX_IS(x) (streq (pattern, x))
120
121 /* Some macros to simplify the logic throughout this file. */
122 #define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
123 #define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
124
125 #define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
126 #define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
127
128 static int
129 far_addr_space_p (rtx x)
130 {
131 if (GET_CODE (x) != MEM)
132 return 0;
133 #if DEBUG0
134 fprintf(stderr, "\033[35mfar_addr_space: "); debug_rtx(x);
135 fprintf(stderr, " = %d\033[0m\n", MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR);
136 #endif
137 return MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR;
138 }
139
140 /* We do most RTX matching by converting the RTX into a string, and
141 using string compares. This vastly simplifies the logic in many of
142 the functions in this file.
143
144 On exit, pattern[] has the encoded string (use RTX_IS("...") to
145 compare it) and patternr[] has pointers to the nodes in the RTX
146 corresponding to each character in the encoded string. The latter
147 is mostly used by print_operand().
148
149 Unrecognized patterns have '?' in them; this shows up when the
150 assembler complains about syntax errors.
151 */
152
153 static void
154 encode_pattern_1 (rtx x)
155 {
156 int i;
157
158 if (patternp == pattern + sizeof (pattern) - 2)
159 {
160 patternp[-1] = '?';
161 return;
162 }
163
164 patternr[patternp - pattern] = x;
165
166 switch (GET_CODE (x))
167 {
168 case REG:
169 *patternp++ = 'r';
170 break;
171 case SUBREG:
172 if (GET_MODE_SIZE (GET_MODE (x)) !=
173 GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
174 *patternp++ = 'S';
175 encode_pattern_1 (XEXP (x, 0));
176 break;
177 case MEM:
178 *patternp++ = 'm';
179 case CONST:
180 encode_pattern_1 (XEXP (x, 0));
181 break;
182 case SIGN_EXTEND:
183 *patternp++ = '^';
184 *patternp++ = 'S';
185 encode_pattern_1 (XEXP (x, 0));
186 break;
187 case ZERO_EXTEND:
188 *patternp++ = '^';
189 *patternp++ = 'Z';
190 encode_pattern_1 (XEXP (x, 0));
191 break;
192 case PLUS:
193 *patternp++ = '+';
194 encode_pattern_1 (XEXP (x, 0));
195 encode_pattern_1 (XEXP (x, 1));
196 break;
197 case PRE_DEC:
198 *patternp++ = '>';
199 encode_pattern_1 (XEXP (x, 0));
200 break;
201 case POST_INC:
202 *patternp++ = '<';
203 encode_pattern_1 (XEXP (x, 0));
204 break;
205 case LO_SUM:
206 *patternp++ = 'L';
207 encode_pattern_1 (XEXP (x, 0));
208 encode_pattern_1 (XEXP (x, 1));
209 break;
210 case HIGH:
211 *patternp++ = 'H';
212 encode_pattern_1 (XEXP (x, 0));
213 break;
214 case SYMBOL_REF:
215 *patternp++ = 's';
216 break;
217 case LABEL_REF:
218 *patternp++ = 'l';
219 break;
220 case CODE_LABEL:
221 *patternp++ = 'c';
222 break;
223 case CONST_INT:
224 case CONST_DOUBLE:
225 *patternp++ = 'i';
226 break;
227 case UNSPEC:
228 *patternp++ = 'u';
229 *patternp++ = '0' + XCINT (x, 1, UNSPEC);
230 for (i = 0; i < XVECLEN (x, 0); i++)
231 encode_pattern_1 (XVECEXP (x, 0, i));
232 break;
233 case USE:
234 *patternp++ = 'U';
235 break;
236 case PARALLEL:
237 *patternp++ = '|';
238 for (i = 0; i < XVECLEN (x, 0); i++)
239 encode_pattern_1 (XVECEXP (x, 0, i));
240 break;
241 case EXPR_LIST:
242 *patternp++ = 'E';
243 encode_pattern_1 (XEXP (x, 0));
244 if (XEXP (x, 1))
245 encode_pattern_1 (XEXP (x, 1));
246 break;
247 default:
248 *patternp++ = '?';
249 #if DEBUG0
250 fprintf (stderr, "can't encode pattern %s\n",
251 GET_RTX_NAME (GET_CODE (x)));
252 debug_rtx (x);
253 gcc_unreachable ();
254 #endif
255 break;
256 }
257 }
258
259 static void
260 encode_pattern (rtx x)
261 {
262 patternp = pattern;
263 encode_pattern_1 (x);
264 *patternp = 0;
265 }
266
267 /* Since register names indicate the mode they're used in, we need a
268 way to determine which name to refer to the register with. Called
269 by print_operand(). */
270
271 static const char *
272 reg_name_with_mode (int regno, enum machine_mode mode)
273 {
274 int mlen = GET_MODE_SIZE (mode);
275 if (regno == R0_REGNO && mlen == 1)
276 return "r0l";
277 if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
278 return "r2r0";
279 if (regno == R0_REGNO && mlen == 6)
280 return "r2r1r0";
281 if (regno == R0_REGNO && mlen == 8)
282 return "r3r1r2r0";
283 if (regno == R1_REGNO && mlen == 1)
284 return "r1l";
285 if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
286 return "r3r1";
287 if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
288 return "a1a0";
289 return reg_names[regno];
290 }
291
292 /* How many bytes a register uses on stack when it's pushed. We need
293 to know this because the push opcode needs to explicitly indicate
294 the size of the register, even though the name of the register
295 already tells it that. Used by m32c_output_reg_{push,pop}, which
296 is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}. */
297
298 static int
299 reg_push_size (int regno)
300 {
301 switch (regno)
302 {
303 case R0_REGNO:
304 case R1_REGNO:
305 return 2;
306 case R2_REGNO:
307 case R3_REGNO:
308 case FLG_REGNO:
309 return 2;
310 case A0_REGNO:
311 case A1_REGNO:
312 case SB_REGNO:
313 case FB_REGNO:
314 case SP_REGNO:
315 if (TARGET_A16)
316 return 2;
317 else
318 return 3;
319 default:
320 gcc_unreachable ();
321 }
322 }
323
324 /* Given two register classes, find the largest intersection between
325 them. If there is no intersection, return RETURNED_IF_EMPTY
326 instead. */
327 static reg_class_t
328 reduce_class (reg_class_t original_class, reg_class_t limiting_class,
329 reg_class_t returned_if_empty)
330 {
331 HARD_REG_SET cc;
332 int i;
333 reg_class_t best = NO_REGS;
334 unsigned int best_size = 0;
335
336 if (original_class == limiting_class)
337 return original_class;
338
339 cc = reg_class_contents[original_class];
340 AND_HARD_REG_SET (cc, reg_class_contents[limiting_class]);
341
342 for (i = 0; i < LIM_REG_CLASSES; i++)
343 {
344 if (hard_reg_set_subset_p (reg_class_contents[i], cc))
345 if (best_size < reg_class_size[i])
346 {
347 best = (reg_class_t) i;
348 best_size = reg_class_size[i];
349 }
350
351 }
352 if (best == NO_REGS)
353 return returned_if_empty;
354 return best;
355 }
356
357 /* Used by m32c_register_move_cost to determine if a move is
358 impossibly expensive. */
359 static bool
360 class_can_hold_mode (reg_class_t rclass, enum machine_mode mode)
361 {
362 /* Cache the results: 0=untested 1=no 2=yes */
363 static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
364
365 if (results[(int) rclass][mode] == 0)
366 {
367 int r;
368 results[rclass][mode] = 1;
369 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
370 if (in_hard_reg_set_p (reg_class_contents[(int) rclass], mode, r)
371 && HARD_REGNO_MODE_OK (r, mode))
372 {
373 results[rclass][mode] = 2;
374 break;
375 }
376 }
377
378 #if DEBUG0
379 fprintf (stderr, "class %s can hold %s? %s\n",
380 class_names[(int) rclass], mode_name[mode],
381 (results[rclass][mode] == 2) ? "yes" : "no");
382 #endif
383 return results[(int) rclass][mode] == 2;
384 }
385
386 /* Run-time Target Specification. */
387
388 /* Memregs are memory locations that gcc treats like general
389 registers, as there are a limited number of true registers and the
390 m32c families can use memory in most places that registers can be
391 used.
392
393 However, since memory accesses are more expensive than registers,
394 we allow the user to limit the number of memregs available, in
395 order to try to persuade gcc to try harder to use real registers.
396
397 Memregs are provided by lib1funcs.S.
398 */
399
400 int ok_to_change_target_memregs = TRUE;
401
402 /* Implements TARGET_OPTION_OVERRIDE. */
403
404 #undef TARGET_OPTION_OVERRIDE
405 #define TARGET_OPTION_OVERRIDE m32c_option_override
406
407 static void
408 m32c_option_override (void)
409 {
410 /* We limit memregs to 0..16, and provide a default. */
411 if (global_options_set.x_target_memregs)
412 {
413 if (target_memregs < 0 || target_memregs > 16)
414 error ("invalid target memregs value '%d'", target_memregs);
415 }
416 else
417 target_memregs = 16;
418
419 if (TARGET_A24)
420 flag_ivopts = 0;
421
422 /* This target defaults to strict volatile bitfields. */
423 if (flag_strict_volatile_bitfields < 0 && abi_version_at_least(2))
424 flag_strict_volatile_bitfields = 1;
425
426 /* r8c/m16c have no 16-bit indirect call, so thunks are involved.
427 This is always worse than an absolute call. */
428 if (TARGET_A16)
429 flag_no_function_cse = 1;
430
431 /* This wants to put insns between compares and their jumps. */
432 /* FIXME: The right solution is to properly trace the flags register
433 values, but that is too much work for stage 4. */
434 flag_combine_stack_adjustments = 0;
435 }
436
437 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
438 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m32c_override_options_after_change
439
440 static void
441 m32c_override_options_after_change (void)
442 {
443 if (TARGET_A16)
444 flag_no_function_cse = 1;
445 }
446
447 /* Defining data structures for per-function information */
448
449 /* The usual; we set up our machine_function data. */
450 static struct machine_function *
451 m32c_init_machine_status (void)
452 {
453 return ggc_alloc_cleared_machine_function ();
454 }
455
456 /* Implements INIT_EXPANDERS. We just set up to call the above
457 function. */
458 void
459 m32c_init_expanders (void)
460 {
461 init_machine_status = m32c_init_machine_status;
462 }
463
464 /* Storage Layout */
465
466 /* Register Basics */
467
468 /* Basic Characteristics of Registers */
469
470 /* Whether a mode fits in a register is complex enough to warrant a
471 table. */
472 static struct
473 {
474 char qi_regs;
475 char hi_regs;
476 char pi_regs;
477 char si_regs;
478 char di_regs;
479 } nregs_table[FIRST_PSEUDO_REGISTER] =
480 {
481 { 1, 1, 2, 2, 4 }, /* r0 */
482 { 0, 1, 0, 0, 0 }, /* r2 */
483 { 1, 1, 2, 2, 0 }, /* r1 */
484 { 0, 1, 0, 0, 0 }, /* r3 */
485 { 0, 1, 1, 0, 0 }, /* a0 */
486 { 0, 1, 1, 0, 0 }, /* a1 */
487 { 0, 1, 1, 0, 0 }, /* sb */
488 { 0, 1, 1, 0, 0 }, /* fb */
489 { 0, 1, 1, 0, 0 }, /* sp */
490 { 1, 1, 1, 0, 0 }, /* pc */
491 { 0, 0, 0, 0, 0 }, /* fl */
492 { 1, 1, 1, 0, 0 }, /* ap */
493 { 1, 1, 2, 2, 4 }, /* mem0 */
494 { 1, 1, 2, 2, 4 }, /* mem1 */
495 { 1, 1, 2, 2, 4 }, /* mem2 */
496 { 1, 1, 2, 2, 4 }, /* mem3 */
497 { 1, 1, 2, 2, 4 }, /* mem4 */
498 { 1, 1, 2, 2, 0 }, /* mem5 */
499 { 1, 1, 2, 2, 0 }, /* mem6 */
500 { 1, 1, 0, 0, 0 }, /* mem7 */
501 };
502
503 /* Implements TARGET_CONDITIONAL_REGISTER_USAGE. We adjust the number
504 of available memregs, and select which registers need to be preserved
505 across calls based on the chip family. */
506
507 #undef TARGET_CONDITIONAL_REGISTER_USAGE
508 #define TARGET_CONDITIONAL_REGISTER_USAGE m32c_conditional_register_usage
509 void
510 m32c_conditional_register_usage (void)
511 {
512 int i;
513
514 if (0 <= target_memregs && target_memregs <= 16)
515 {
516 /* The command line option is bytes, but our "registers" are
517 16-bit words. */
518 for (i = (target_memregs+1)/2; i < 8; i++)
519 {
520 fixed_regs[MEM0_REGNO + i] = 1;
521 CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
522 }
523 }
524
525 /* M32CM and M32C preserve more registers across function calls. */
526 if (TARGET_A24)
527 {
528 call_used_regs[R1_REGNO] = 0;
529 call_used_regs[R2_REGNO] = 0;
530 call_used_regs[R3_REGNO] = 0;
531 call_used_regs[A0_REGNO] = 0;
532 call_used_regs[A1_REGNO] = 0;
533 }
534 }
535
536 /* How Values Fit in Registers */
537
538 /* Implements HARD_REGNO_NREGS. This is complicated by the fact that
539 different registers are different sizes from each other, *and* may
540 be different sizes in different chip families. */
541 static int
542 m32c_hard_regno_nregs_1 (int regno, enum machine_mode mode)
543 {
544 if (regno == FLG_REGNO && mode == CCmode)
545 return 1;
546 if (regno >= FIRST_PSEUDO_REGISTER)
547 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
548
549 if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
550 return (GET_MODE_SIZE (mode) + 1) / 2;
551
552 if (GET_MODE_SIZE (mode) <= 1)
553 return nregs_table[regno].qi_regs;
554 if (GET_MODE_SIZE (mode) <= 2)
555 return nregs_table[regno].hi_regs;
556 if (regno == A0_REGNO && mode == SImode && TARGET_A16)
557 return 2;
558 if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
559 return nregs_table[regno].pi_regs;
560 if (GET_MODE_SIZE (mode) <= 4)
561 return nregs_table[regno].si_regs;
562 if (GET_MODE_SIZE (mode) <= 8)
563 return nregs_table[regno].di_regs;
564 return 0;
565 }
566
567 int
568 m32c_hard_regno_nregs (int regno, enum machine_mode mode)
569 {
570 int rv = m32c_hard_regno_nregs_1 (regno, mode);
571 return rv ? rv : 1;
572 }
573
574 /* Implements HARD_REGNO_MODE_OK. The above function does the work
575 already; just test its return value. */
576 int
577 m32c_hard_regno_ok (int regno, enum machine_mode mode)
578 {
579 return m32c_hard_regno_nregs_1 (regno, mode) != 0;
580 }
581
582 /* Implements MODES_TIEABLE_P. In general, modes aren't tieable since
583 registers are all different sizes. However, since most modes are
584 bigger than our registers anyway, it's easier to implement this
585 function that way, leaving QImode as the only unique case. */
586 int
587 m32c_modes_tieable_p (enum machine_mode m1, enum machine_mode m2)
588 {
589 if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
590 return 1;
591
592 #if 0
593 if (m1 == QImode || m2 == QImode)
594 return 0;
595 #endif
596
597 return 1;
598 }
599
600 /* Register Classes */
601
602 /* Implements REGNO_REG_CLASS. */
603 enum reg_class
604 m32c_regno_reg_class (int regno)
605 {
606 switch (regno)
607 {
608 case R0_REGNO:
609 return R0_REGS;
610 case R1_REGNO:
611 return R1_REGS;
612 case R2_REGNO:
613 return R2_REGS;
614 case R3_REGNO:
615 return R3_REGS;
616 case A0_REGNO:
617 return A0_REGS;
618 case A1_REGNO:
619 return A1_REGS;
620 case SB_REGNO:
621 return SB_REGS;
622 case FB_REGNO:
623 return FB_REGS;
624 case SP_REGNO:
625 return SP_REGS;
626 case FLG_REGNO:
627 return FLG_REGS;
628 default:
629 if (IS_MEM_REGNO (regno))
630 return MEM_REGS;
631 return ALL_REGS;
632 }
633 }
634
635 /* Implements REGNO_OK_FOR_BASE_P. */
636 int
637 m32c_regno_ok_for_base_p (int regno)
638 {
639 if (regno == A0_REGNO
640 || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
641 return 1;
642 return 0;
643 }
644
645 #define DEBUG_RELOAD 0
646
647 /* Implements TARGET_PREFERRED_RELOAD_CLASS. In general, prefer general
648 registers of the appropriate size. */
649
650 #undef TARGET_PREFERRED_RELOAD_CLASS
651 #define TARGET_PREFERRED_RELOAD_CLASS m32c_preferred_reload_class
652
653 static reg_class_t
654 m32c_preferred_reload_class (rtx x, reg_class_t rclass)
655 {
656 reg_class_t newclass = rclass;
657
658 #if DEBUG_RELOAD
659 fprintf (stderr, "\npreferred_reload_class for %s is ",
660 class_names[rclass]);
661 #endif
662 if (rclass == NO_REGS)
663 rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
664
665 if (reg_classes_intersect_p (rclass, CR_REGS))
666 {
667 switch (GET_MODE (x))
668 {
669 case QImode:
670 newclass = HL_REGS;
671 break;
672 default:
673 /* newclass = HI_REGS; */
674 break;
675 }
676 }
677
678 else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
679 newclass = SI_REGS;
680 else if (GET_MODE_SIZE (GET_MODE (x)) > 4
681 && ! reg_class_subset_p (R03_REGS, rclass))
682 newclass = DI_REGS;
683
684 rclass = reduce_class (rclass, newclass, rclass);
685
686 if (GET_MODE (x) == QImode)
687 rclass = reduce_class (rclass, HL_REGS, rclass);
688
689 #if DEBUG_RELOAD
690 fprintf (stderr, "%s\n", class_names[rclass]);
691 debug_rtx (x);
692
693 if (GET_CODE (x) == MEM
694 && GET_CODE (XEXP (x, 0)) == PLUS
695 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
696 fprintf (stderr, "Glorm!\n");
697 #endif
698 return rclass;
699 }
700
701 /* Implements TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
702
703 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
704 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS m32c_preferred_output_reload_class
705
706 static reg_class_t
707 m32c_preferred_output_reload_class (rtx x, reg_class_t rclass)
708 {
709 return m32c_preferred_reload_class (x, rclass);
710 }
711
712 /* Implements LIMIT_RELOAD_CLASS. We basically want to avoid using
713 address registers for reloads since they're needed for address
714 reloads. */
715 int
716 m32c_limit_reload_class (enum machine_mode mode, int rclass)
717 {
718 #if DEBUG_RELOAD
719 fprintf (stderr, "limit_reload_class for %s: %s ->",
720 mode_name[mode], class_names[rclass]);
721 #endif
722
723 if (mode == QImode)
724 rclass = reduce_class (rclass, HL_REGS, rclass);
725 else if (mode == HImode)
726 rclass = reduce_class (rclass, HI_REGS, rclass);
727 else if (mode == SImode)
728 rclass = reduce_class (rclass, SI_REGS, rclass);
729
730 if (rclass != A_REGS)
731 rclass = reduce_class (rclass, DI_REGS, rclass);
732
733 #if DEBUG_RELOAD
734 fprintf (stderr, " %s\n", class_names[rclass]);
735 #endif
736 return rclass;
737 }
738
739 /* Implements SECONDARY_RELOAD_CLASS. QImode have to be reloaded in
740 r0 or r1, as those are the only real QImode registers. CR regs get
741 reloaded through appropriately sized general or address
742 registers. */
743 int
744 m32c_secondary_reload_class (int rclass, enum machine_mode mode, rtx x)
745 {
746 int cc = class_contents[rclass][0];
747 #if DEBUG0
748 fprintf (stderr, "\nsecondary reload class %s %s\n",
749 class_names[rclass], mode_name[mode]);
750 debug_rtx (x);
751 #endif
752 if (mode == QImode
753 && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
754 return QI_REGS;
755 if (reg_classes_intersect_p (rclass, CR_REGS)
756 && GET_CODE (x) == REG
757 && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
758 return (TARGET_A16 || mode == HImode) ? HI_REGS : A_REGS;
759 return NO_REGS;
760 }
761
762 /* Implements TARGET_CLASS_LIKELY_SPILLED_P. A_REGS is needed for address
763 reloads. */
764
765 #undef TARGET_CLASS_LIKELY_SPILLED_P
766 #define TARGET_CLASS_LIKELY_SPILLED_P m32c_class_likely_spilled_p
767
768 static bool
769 m32c_class_likely_spilled_p (reg_class_t regclass)
770 {
771 if (regclass == A_REGS)
772 return true;
773
774 return (reg_class_size[(int) regclass] == 1);
775 }
776
777 /* Implements TARGET_CLASS_MAX_NREGS. We calculate this according to its
778 documented meaning, to avoid potential inconsistencies with actual
779 class definitions. */
780
781 #undef TARGET_CLASS_MAX_NREGS
782 #define TARGET_CLASS_MAX_NREGS m32c_class_max_nregs
783
784 static unsigned char
785 m32c_class_max_nregs (reg_class_t regclass, enum machine_mode mode)
786 {
787 int rn;
788 unsigned char max = 0;
789
790 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
791 if (TEST_HARD_REG_BIT (reg_class_contents[(int) regclass], rn))
792 {
793 unsigned char n = m32c_hard_regno_nregs (rn, mode);
794 if (max < n)
795 max = n;
796 }
797 return max;
798 }
799
800 /* Implements CANNOT_CHANGE_MODE_CLASS. Only r0 and r1 can change to
801 QI (r0l, r1l) because the chip doesn't support QI ops on other
802 registers (well, it does on a0/a1 but if we let gcc do that, reload
803 suffers). Otherwise, we allow changes to larger modes. */
804 int
805 m32c_cannot_change_mode_class (enum machine_mode from,
806 enum machine_mode to, int rclass)
807 {
808 int rn;
809 #if DEBUG0
810 fprintf (stderr, "cannot change from %s to %s in %s\n",
811 mode_name[from], mode_name[to], class_names[rclass]);
812 #endif
813
814 /* If the larger mode isn't allowed in any of these registers, we
815 can't allow the change. */
816 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
817 if (class_contents[rclass][0] & (1 << rn))
818 if (! m32c_hard_regno_ok (rn, to))
819 return 1;
820
821 if (to == QImode)
822 return (class_contents[rclass][0] & 0x1ffa);
823
824 if (class_contents[rclass][0] & 0x0005 /* r0, r1 */
825 && GET_MODE_SIZE (from) > 1)
826 return 0;
827 if (GET_MODE_SIZE (from) > 2) /* all other regs */
828 return 0;
829
830 return 1;
831 }
832
833 /* Helpers for the rest of the file. */
834 /* TRUE if the rtx is a REG rtx for the given register. */
835 #define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
836 && REGNO (rtx) == regno)
837 /* TRUE if the rtx is a pseudo - specifically, one we can use as a
838 base register in address calculations (hence the "strict"
839 argument). */
840 #define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
841 && (REGNO (rtx) == AP_REGNO \
842 || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
843
844 #define A0_OR_PSEUDO(x) (IS_REG(x, A0_REGNO) || REGNO (x) >= FIRST_PSEUDO_REGISTER)
845
846 /* Implements EXTRA_CONSTRAINT_STR (see next function too). 'S' is
847 for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
848 call return values. */
849 bool
850 m32c_matches_constraint_p (rtx value, int constraint)
851 {
852 encode_pattern (value);
853
854 switch (constraint) {
855 case CONSTRAINT_SF:
856 return (far_addr_space_p (value)
857 && ((RTX_IS ("mr")
858 && A0_OR_PSEUDO (patternr[1])
859 && GET_MODE (patternr[1]) == SImode)
860 || (RTX_IS ("m+^Sri")
861 && A0_OR_PSEUDO (patternr[4])
862 && GET_MODE (patternr[4]) == HImode)
863 || (RTX_IS ("m+^Srs")
864 && A0_OR_PSEUDO (patternr[4])
865 && GET_MODE (patternr[4]) == HImode)
866 || (RTX_IS ("m+^S+ris")
867 && A0_OR_PSEUDO (patternr[5])
868 && GET_MODE (patternr[5]) == HImode)
869 || RTX_IS ("ms")));
870 case CONSTRAINT_Sd:
871 {
872 /* This is the common "src/dest" address */
873 rtx r;
874 if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
875 return true;
876 if (RTX_IS ("ms") || RTX_IS ("m+si"))
877 return true;
878 if (RTX_IS ("m++rii"))
879 {
880 if (REGNO (patternr[3]) == FB_REGNO
881 && INTVAL (patternr[4]) == 0)
882 return true;
883 }
884 if (RTX_IS ("mr"))
885 r = patternr[1];
886 else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
887 r = patternr[2];
888 else
889 return false;
890 if (REGNO (r) == SP_REGNO)
891 return false;
892 return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
893 }
894 case CONSTRAINT_Sa:
895 {
896 rtx r;
897 if (RTX_IS ("mr"))
898 r = patternr[1];
899 else if (RTX_IS ("m+ri"))
900 r = patternr[2];
901 else
902 return false;
903 return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
904 }
905 case CONSTRAINT_Si:
906 return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
907 case CONSTRAINT_Ss:
908 return ((RTX_IS ("mr")
909 && (IS_REG (patternr[1], SP_REGNO)))
910 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
911 case CONSTRAINT_Sf:
912 return ((RTX_IS ("mr")
913 && (IS_REG (patternr[1], FB_REGNO)))
914 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
915 case CONSTRAINT_Sb:
916 return ((RTX_IS ("mr")
917 && (IS_REG (patternr[1], SB_REGNO)))
918 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
919 case CONSTRAINT_Sp:
920 /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
921 return (RTX_IS ("mi")
922 && !(INTVAL (patternr[1]) & ~0x1fff));
923 case CONSTRAINT_S1:
924 return r1h_operand (value, QImode);
925 case CONSTRAINT_Rpa:
926 return GET_CODE (value) == PARALLEL;
927 default:
928 return false;
929 }
930 }
931
932 /* STACK AND CALLING */
933
934 /* Frame Layout */
935
936 /* Implements RETURN_ADDR_RTX. Note that R8C and M16C push 24 bits
937 (yes, THREE bytes) onto the stack for the return address, but we
938 don't support pointers bigger than 16 bits on those chips. This
939 will likely wreak havoc with exception unwinding. FIXME. */
940 rtx
941 m32c_return_addr_rtx (int count)
942 {
943 enum machine_mode mode;
944 int offset;
945 rtx ra_mem;
946
947 if (count)
948 return NULL_RTX;
949 /* we want 2[$fb] */
950
951 if (TARGET_A24)
952 {
953 /* It's four bytes */
954 mode = PSImode;
955 offset = 4;
956 }
957 else
958 {
959 /* FIXME: it's really 3 bytes */
960 mode = HImode;
961 offset = 2;
962 }
963
964 ra_mem =
965 gen_rtx_MEM (mode, plus_constant (Pmode, gen_rtx_REG (Pmode, FP_REGNO),
966 offset));
967 return copy_to_mode_reg (mode, ra_mem);
968 }
969
970 /* Implements INCOMING_RETURN_ADDR_RTX. See comment above. */
971 rtx
972 m32c_incoming_return_addr_rtx (void)
973 {
974 /* we want [sp] */
975 return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
976 }
977
978 /* Exception Handling Support */
979
980 /* Implements EH_RETURN_DATA_REGNO. Choose registers able to hold
981 pointers. */
982 int
983 m32c_eh_return_data_regno (int n)
984 {
985 switch (n)
986 {
987 case 0:
988 return A0_REGNO;
989 case 1:
990 if (TARGET_A16)
991 return R3_REGNO;
992 else
993 return R1_REGNO;
994 default:
995 return INVALID_REGNUM;
996 }
997 }
998
999 /* Implements EH_RETURN_STACKADJ_RTX. Saved and used later in
1000 m32c_emit_eh_epilogue. */
1001 rtx
1002 m32c_eh_return_stackadj_rtx (void)
1003 {
1004 if (!cfun->machine->eh_stack_adjust)
1005 {
1006 rtx sa;
1007
1008 sa = gen_rtx_REG (Pmode, R0_REGNO);
1009 cfun->machine->eh_stack_adjust = sa;
1010 }
1011 return cfun->machine->eh_stack_adjust;
1012 }
1013
1014 /* Registers That Address the Stack Frame */
1015
1016 /* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER. Note that
1017 the original spec called for dwarf numbers to vary with register
1018 width as well, for example, r0l, r0, and r2r0 would each have
1019 different dwarf numbers. GCC doesn't support this, and we don't do
1020 it, and gdb seems to like it this way anyway. */
1021 unsigned int
1022 m32c_dwarf_frame_regnum (int n)
1023 {
1024 switch (n)
1025 {
1026 case R0_REGNO:
1027 return 5;
1028 case R1_REGNO:
1029 return 6;
1030 case R2_REGNO:
1031 return 7;
1032 case R3_REGNO:
1033 return 8;
1034 case A0_REGNO:
1035 return 9;
1036 case A1_REGNO:
1037 return 10;
1038 case FB_REGNO:
1039 return 11;
1040 case SB_REGNO:
1041 return 19;
1042
1043 case SP_REGNO:
1044 return 12;
1045 case PC_REGNO:
1046 return 13;
1047 default:
1048 return DWARF_FRAME_REGISTERS + 1;
1049 }
1050 }
1051
1052 /* The frame looks like this:
1053
1054 ap -> +------------------------------
1055 | Return address (3 or 4 bytes)
1056 | Saved FB (2 or 4 bytes)
1057 fb -> +------------------------------
1058 | local vars
1059 | register saves fb
1060 | through r0 as needed
1061 sp -> +------------------------------
1062 */
1063
1064 /* We use this to wrap all emitted insns in the prologue. */
1065 static rtx
1066 F (rtx x)
1067 {
1068 RTX_FRAME_RELATED_P (x) = 1;
1069 return x;
1070 }
1071
1072 /* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1073 how much the stack pointer moves for each, for each cpu family. */
1074 static struct
1075 {
1076 int reg1;
1077 int bit;
1078 int a16_bytes;
1079 int a24_bytes;
1080 } pushm_info[] =
1081 {
1082 /* These are in reverse push (nearest-to-sp) order. */
1083 { R0_REGNO, 0x80, 2, 2 },
1084 { R1_REGNO, 0x40, 2, 2 },
1085 { R2_REGNO, 0x20, 2, 2 },
1086 { R3_REGNO, 0x10, 2, 2 },
1087 { A0_REGNO, 0x08, 2, 4 },
1088 { A1_REGNO, 0x04, 2, 4 },
1089 { SB_REGNO, 0x02, 2, 4 },
1090 { FB_REGNO, 0x01, 2, 4 }
1091 };
1092
1093 #define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1094
1095 /* Returns TRUE if we need to save/restore the given register. We
1096 save everything for exception handlers, so that any register can be
1097 unwound. For interrupt handlers, we save everything if the handler
1098 calls something else (because we don't know what *that* function
1099 might do), but try to be a bit smarter if the handler is a leaf
1100 function. We always save $a0, though, because we use that in the
1101 epilogue to copy $fb to $sp. */
1102 static int
1103 need_to_save (int regno)
1104 {
1105 if (fixed_regs[regno])
1106 return 0;
1107 if (crtl->calls_eh_return)
1108 return 1;
1109 if (regno == FP_REGNO)
1110 return 0;
1111 if (cfun->machine->is_interrupt
1112 && (!cfun->machine->is_leaf
1113 || (regno == A0_REGNO
1114 && m32c_function_needs_enter ())
1115 ))
1116 return 1;
1117 if (df_regs_ever_live_p (regno)
1118 && (!call_used_regs[regno] || cfun->machine->is_interrupt))
1119 return 1;
1120 return 0;
1121 }
1122
1123 /* This function contains all the intelligence about saving and
1124 restoring registers. It always figures out the register save set.
1125 When called with PP_justcount, it merely returns the size of the
1126 save set (for eliminating the frame pointer, for example). When
1127 called with PP_pushm or PP_popm, it emits the appropriate
1128 instructions for saving (pushm) or restoring (popm) the
1129 registers. */
1130 static int
1131 m32c_pushm_popm (Push_Pop_Type ppt)
1132 {
1133 int reg_mask = 0;
1134 int byte_count = 0, bytes;
1135 int i;
1136 rtx dwarf_set[PUSHM_N];
1137 int n_dwarfs = 0;
1138 int nosave_mask = 0;
1139
1140 if (crtl->return_rtx
1141 && GET_CODE (crtl->return_rtx) == PARALLEL
1142 && !(crtl->calls_eh_return || cfun->machine->is_interrupt))
1143 {
1144 rtx exp = XVECEXP (crtl->return_rtx, 0, 0);
1145 rtx rv = XEXP (exp, 0);
1146 int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1147
1148 if (rv_bytes > 2)
1149 nosave_mask |= 0x20; /* PSI, SI */
1150 else
1151 nosave_mask |= 0xf0; /* DF */
1152 if (rv_bytes > 4)
1153 nosave_mask |= 0x50; /* DI */
1154 }
1155
1156 for (i = 0; i < (int) PUSHM_N; i++)
1157 {
1158 /* Skip if neither register needs saving. */
1159 if (!need_to_save (pushm_info[i].reg1))
1160 continue;
1161
1162 if (pushm_info[i].bit & nosave_mask)
1163 continue;
1164
1165 reg_mask |= pushm_info[i].bit;
1166 bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1167
1168 if (ppt == PP_pushm)
1169 {
1170 enum machine_mode mode = (bytes == 2) ? HImode : SImode;
1171 rtx addr;
1172
1173 /* Always use stack_pointer_rtx instead of calling
1174 rtx_gen_REG ourselves. Code elsewhere in GCC assumes
1175 that there is a single rtx representing the stack pointer,
1176 namely stack_pointer_rtx, and uses == to recognize it. */
1177 addr = stack_pointer_rtx;
1178
1179 if (byte_count != 0)
1180 addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1181
1182 dwarf_set[n_dwarfs++] =
1183 gen_rtx_SET (VOIDmode,
1184 gen_rtx_MEM (mode, addr),
1185 gen_rtx_REG (mode, pushm_info[i].reg1));
1186 F (dwarf_set[n_dwarfs - 1]);
1187
1188 }
1189 byte_count += bytes;
1190 }
1191
1192 if (cfun->machine->is_interrupt)
1193 {
1194 cfun->machine->intr_pushm = reg_mask & 0xfe;
1195 reg_mask = 0;
1196 byte_count = 0;
1197 }
1198
1199 if (cfun->machine->is_interrupt)
1200 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1201 if (need_to_save (i))
1202 {
1203 byte_count += 2;
1204 cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1205 }
1206
1207 if (ppt == PP_pushm && byte_count)
1208 {
1209 rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1210 rtx pushm;
1211
1212 if (reg_mask)
1213 {
1214 XVECEXP (note, 0, 0)
1215 = gen_rtx_SET (VOIDmode,
1216 stack_pointer_rtx,
1217 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1218 stack_pointer_rtx,
1219 GEN_INT (-byte_count)));
1220 F (XVECEXP (note, 0, 0));
1221
1222 for (i = 0; i < n_dwarfs; i++)
1223 XVECEXP (note, 0, i + 1) = dwarf_set[i];
1224
1225 pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1226
1227 add_reg_note (pushm, REG_FRAME_RELATED_EXPR, note);
1228 }
1229
1230 if (cfun->machine->is_interrupt)
1231 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1232 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1233 {
1234 if (TARGET_A16)
1235 pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1236 else
1237 pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1238 F (pushm);
1239 }
1240 }
1241 if (ppt == PP_popm && byte_count)
1242 {
1243 if (cfun->machine->is_interrupt)
1244 for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1245 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1246 {
1247 if (TARGET_A16)
1248 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
1249 else
1250 emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
1251 }
1252 if (reg_mask)
1253 emit_insn (gen_popm (GEN_INT (reg_mask)));
1254 }
1255
1256 return byte_count;
1257 }
1258
1259 /* Implements INITIAL_ELIMINATION_OFFSET. See the comment above that
1260 diagrams our call frame. */
1261 int
1262 m32c_initial_elimination_offset (int from, int to)
1263 {
1264 int ofs = 0;
1265
1266 if (from == AP_REGNO)
1267 {
1268 if (TARGET_A16)
1269 ofs += 5;
1270 else
1271 ofs += 8;
1272 }
1273
1274 if (to == SP_REGNO)
1275 {
1276 ofs += m32c_pushm_popm (PP_justcount);
1277 ofs += get_frame_size ();
1278 }
1279
1280 /* Account for push rounding. */
1281 if (TARGET_A24)
1282 ofs = (ofs + 1) & ~1;
1283 #if DEBUG0
1284 fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1285 to, ofs);
1286 #endif
1287 return ofs;
1288 }
1289
1290 /* Passing Function Arguments on the Stack */
1291
1292 /* Implements PUSH_ROUNDING. The R8C and M16C have byte stacks, the
1293 M32C has word stacks. */
1294 unsigned int
1295 m32c_push_rounding (int n)
1296 {
1297 if (TARGET_R8C || TARGET_M16C)
1298 return n;
1299 return (n + 1) & ~1;
1300 }
1301
1302 /* Passing Arguments in Registers */
1303
1304 /* Implements TARGET_FUNCTION_ARG. Arguments are passed partly in
1305 registers, partly on stack. If our function returns a struct, a
1306 pointer to a buffer for it is at the top of the stack (last thing
1307 pushed). The first few real arguments may be in registers as
1308 follows:
1309
1310 R8C/M16C: arg1 in r1 if it's QI or HI (else it's pushed on stack)
1311 arg2 in r2 if it's HI (else pushed on stack)
1312 rest on stack
1313 M32C: arg1 in r0 if it's QI or HI (else it's pushed on stack)
1314 rest on stack
1315
1316 Structs are not passed in registers, even if they fit. Only
1317 integer and pointer types are passed in registers.
1318
1319 Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1320 r2 if it fits. */
1321 #undef TARGET_FUNCTION_ARG
1322 #define TARGET_FUNCTION_ARG m32c_function_arg
1323 static rtx
1324 m32c_function_arg (cumulative_args_t ca_v,
1325 enum machine_mode mode, const_tree type, bool named)
1326 {
1327 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1328
1329 /* Can return a reg, parallel, or 0 for stack */
1330 rtx rv = NULL_RTX;
1331 #if DEBUG0
1332 fprintf (stderr, "func_arg %d (%s, %d)\n",
1333 ca->parm_num, mode_name[mode], named);
1334 debug_tree (type);
1335 #endif
1336
1337 if (mode == VOIDmode)
1338 return GEN_INT (0);
1339
1340 if (ca->force_mem || !named)
1341 {
1342 #if DEBUG0
1343 fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
1344 named);
1345 #endif
1346 return NULL_RTX;
1347 }
1348
1349 if (type && INTEGRAL_TYPE_P (type) && POINTER_TYPE_P (type))
1350 return NULL_RTX;
1351
1352 if (type && AGGREGATE_TYPE_P (type))
1353 return NULL_RTX;
1354
1355 switch (ca->parm_num)
1356 {
1357 case 1:
1358 if (GET_MODE_SIZE (mode) == 1 || GET_MODE_SIZE (mode) == 2)
1359 rv = gen_rtx_REG (mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
1360 break;
1361
1362 case 2:
1363 if (TARGET_A16 && GET_MODE_SIZE (mode) == 2)
1364 rv = gen_rtx_REG (mode, R2_REGNO);
1365 break;
1366 }
1367
1368 #if DEBUG0
1369 debug_rtx (rv);
1370 #endif
1371 return rv;
1372 }
1373
1374 #undef TARGET_PASS_BY_REFERENCE
1375 #define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1376 static bool
1377 m32c_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
1378 enum machine_mode mode ATTRIBUTE_UNUSED,
1379 const_tree type ATTRIBUTE_UNUSED,
1380 bool named ATTRIBUTE_UNUSED)
1381 {
1382 return 0;
1383 }
1384
1385 /* Implements INIT_CUMULATIVE_ARGS. */
1386 void
1387 m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
1388 tree fntype,
1389 rtx libname ATTRIBUTE_UNUSED,
1390 tree fndecl,
1391 int n_named_args ATTRIBUTE_UNUSED)
1392 {
1393 if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1394 ca->force_mem = 1;
1395 else
1396 ca->force_mem = 0;
1397 ca->parm_num = 1;
1398 }
1399
1400 /* Implements TARGET_FUNCTION_ARG_ADVANCE. force_mem is set for
1401 functions returning structures, so we always reset that. Otherwise,
1402 we only need to know the sequence number of the argument to know what
1403 to do with it. */
1404 #undef TARGET_FUNCTION_ARG_ADVANCE
1405 #define TARGET_FUNCTION_ARG_ADVANCE m32c_function_arg_advance
1406 static void
1407 m32c_function_arg_advance (cumulative_args_t ca_v,
1408 enum machine_mode mode ATTRIBUTE_UNUSED,
1409 const_tree type ATTRIBUTE_UNUSED,
1410 bool named ATTRIBUTE_UNUSED)
1411 {
1412 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1413
1414 if (ca->force_mem)
1415 ca->force_mem = 0;
1416 else
1417 ca->parm_num++;
1418 }
1419
1420 /* Implements TARGET_FUNCTION_ARG_BOUNDARY. */
1421 #undef TARGET_FUNCTION_ARG_BOUNDARY
1422 #define TARGET_FUNCTION_ARG_BOUNDARY m32c_function_arg_boundary
1423 static unsigned int
1424 m32c_function_arg_boundary (enum machine_mode mode ATTRIBUTE_UNUSED,
1425 const_tree type ATTRIBUTE_UNUSED)
1426 {
1427 return (TARGET_A16 ? 8 : 16);
1428 }
1429
1430 /* Implements FUNCTION_ARG_REGNO_P. */
1431 int
1432 m32c_function_arg_regno_p (int r)
1433 {
1434 if (TARGET_A24)
1435 return (r == R0_REGNO);
1436 return (r == R1_REGNO || r == R2_REGNO);
1437 }
1438
1439 /* HImode and PSImode are the two "native" modes as far as GCC is
1440 concerned, but the chips also support a 32-bit mode which is used
1441 for some opcodes in R8C/M16C and for reset vectors and such. */
1442 #undef TARGET_VALID_POINTER_MODE
1443 #define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
1444 static bool
1445 m32c_valid_pointer_mode (enum machine_mode mode)
1446 {
1447 if (mode == HImode
1448 || mode == PSImode
1449 || mode == SImode
1450 )
1451 return 1;
1452 return 0;
1453 }
1454
1455 /* How Scalar Function Values Are Returned */
1456
1457 /* Implements TARGET_LIBCALL_VALUE. Most values are returned in $r0, or some
1458 combination of registers starting there (r2r0 for longs, r3r1r2r0
1459 for long long, r3r2r1r0 for doubles), except that that ABI
1460 currently doesn't work because it ends up using all available
1461 general registers and gcc often can't compile it. So, instead, we
1462 return anything bigger than 16 bits in "mem0" (effectively, a
1463 memory location). */
1464
1465 #undef TARGET_LIBCALL_VALUE
1466 #define TARGET_LIBCALL_VALUE m32c_libcall_value
1467
1468 static rtx
1469 m32c_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
1470 {
1471 /* return reg or parallel */
1472 #if 0
1473 /* FIXME: GCC has difficulty returning large values in registers,
1474 because that ties up most of the general registers and gives the
1475 register allocator little to work with. Until we can resolve
1476 this, large values are returned in memory. */
1477 if (mode == DFmode)
1478 {
1479 rtx rv;
1480
1481 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1482 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1483 gen_rtx_REG (HImode,
1484 R0_REGNO),
1485 GEN_INT (0));
1486 XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1487 gen_rtx_REG (HImode,
1488 R1_REGNO),
1489 GEN_INT (2));
1490 XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1491 gen_rtx_REG (HImode,
1492 R2_REGNO),
1493 GEN_INT (4));
1494 XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1495 gen_rtx_REG (HImode,
1496 R3_REGNO),
1497 GEN_INT (6));
1498 return rv;
1499 }
1500
1501 if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1502 {
1503 rtx rv;
1504
1505 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1506 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1507 gen_rtx_REG (mode,
1508 R0_REGNO),
1509 GEN_INT (0));
1510 return rv;
1511 }
1512 #endif
1513
1514 if (GET_MODE_SIZE (mode) > 2)
1515 return gen_rtx_REG (mode, MEM0_REGNO);
1516 return gen_rtx_REG (mode, R0_REGNO);
1517 }
1518
1519 /* Implements TARGET_FUNCTION_VALUE. Functions and libcalls have the same
1520 conventions. */
1521
1522 #undef TARGET_FUNCTION_VALUE
1523 #define TARGET_FUNCTION_VALUE m32c_function_value
1524
1525 static rtx
1526 m32c_function_value (const_tree valtype,
1527 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1528 bool outgoing ATTRIBUTE_UNUSED)
1529 {
1530 /* return reg or parallel */
1531 const enum machine_mode mode = TYPE_MODE (valtype);
1532 return m32c_libcall_value (mode, NULL_RTX);
1533 }
1534
1535 /* Implements TARGET_FUNCTION_VALUE_REGNO_P. */
1536
1537 #undef TARGET_FUNCTION_VALUE_REGNO_P
1538 #define TARGET_FUNCTION_VALUE_REGNO_P m32c_function_value_regno_p
1539
1540 static bool
1541 m32c_function_value_regno_p (const unsigned int regno)
1542 {
1543 return (regno == R0_REGNO || regno == MEM0_REGNO);
1544 }
1545
1546 /* How Large Values Are Returned */
1547
1548 /* We return structures by pushing the address on the stack, even if
1549 we use registers for the first few "real" arguments. */
1550 #undef TARGET_STRUCT_VALUE_RTX
1551 #define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1552 static rtx
1553 m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1554 int incoming ATTRIBUTE_UNUSED)
1555 {
1556 return 0;
1557 }
1558
1559 /* Function Entry and Exit */
1560
1561 /* Implements EPILOGUE_USES. Interrupts restore all registers. */
1562 int
1563 m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1564 {
1565 if (cfun->machine->is_interrupt)
1566 return 1;
1567 return 0;
1568 }
1569
1570 /* Implementing the Varargs Macros */
1571
1572 #undef TARGET_STRICT_ARGUMENT_NAMING
1573 #define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1574 static bool
1575 m32c_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
1576 {
1577 return 1;
1578 }
1579
1580 /* Trampolines for Nested Functions */
1581
1582 /*
1583 m16c:
1584 1 0000 75C43412 mov.w #0x1234,a0
1585 2 0004 FC000000 jmp.a label
1586
1587 m32c:
1588 1 0000 BC563412 mov.l:s #0x123456,a0
1589 2 0004 CC000000 jmp.a label
1590 */
1591
1592 /* Implements TRAMPOLINE_SIZE. */
1593 int
1594 m32c_trampoline_size (void)
1595 {
1596 /* Allocate extra space so we can avoid the messy shifts when we
1597 initialize the trampoline; we just write past the end of the
1598 opcode. */
1599 return TARGET_A16 ? 8 : 10;
1600 }
1601
1602 /* Implements TRAMPOLINE_ALIGNMENT. */
1603 int
1604 m32c_trampoline_alignment (void)
1605 {
1606 return 2;
1607 }
1608
1609 /* Implements TARGET_TRAMPOLINE_INIT. */
1610
1611 #undef TARGET_TRAMPOLINE_INIT
1612 #define TARGET_TRAMPOLINE_INIT m32c_trampoline_init
1613 static void
1614 m32c_trampoline_init (rtx m_tramp, tree fndecl, rtx chainval)
1615 {
1616 rtx function = XEXP (DECL_RTL (fndecl), 0);
1617
1618 #define A0(m,i) adjust_address (m_tramp, m, i)
1619 if (TARGET_A16)
1620 {
1621 /* Note: we subtract a "word" because the moves want signed
1622 constants, not unsigned constants. */
1623 emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1624 emit_move_insn (A0 (HImode, 2), chainval);
1625 emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
1626 /* We use 16-bit addresses here, but store the zero to turn it
1627 into a 24-bit offset. */
1628 emit_move_insn (A0 (HImode, 5), function);
1629 emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1630 }
1631 else
1632 {
1633 /* Note that the PSI moves actually write 4 bytes. Make sure we
1634 write stuff out in the right order, and leave room for the
1635 extra byte at the end. */
1636 emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1637 emit_move_insn (A0 (PSImode, 1), chainval);
1638 emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1639 emit_move_insn (A0 (PSImode, 5), function);
1640 }
1641 #undef A0
1642 }
1643
1644 /* Addressing Modes */
1645
1646 /* The r8c/m32c family supports a wide range of non-orthogonal
1647 addressing modes, including the ability to double-indirect on *some*
1648 of them. Not all insns support all modes, either, but we rely on
1649 predicates and constraints to deal with that. */
1650 #undef TARGET_LEGITIMATE_ADDRESS_P
1651 #define TARGET_LEGITIMATE_ADDRESS_P m32c_legitimate_address_p
1652 bool
1653 m32c_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
1654 {
1655 int mode_adjust;
1656 if (CONSTANT_P (x))
1657 return 1;
1658
1659 if (TARGET_A16 && GET_MODE (x) != HImode && GET_MODE (x) != SImode)
1660 return 0;
1661 if (TARGET_A24 && GET_MODE (x) != PSImode)
1662 return 0;
1663
1664 /* Wide references to memory will be split after reload, so we must
1665 ensure that all parts of such splits remain legitimate
1666 addresses. */
1667 mode_adjust = GET_MODE_SIZE (mode) - 1;
1668
1669 /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1670 if (GET_CODE (x) == PRE_DEC
1671 || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1672 {
1673 return (GET_CODE (XEXP (x, 0)) == REG
1674 && REGNO (XEXP (x, 0)) == SP_REGNO);
1675 }
1676
1677 #if 0
1678 /* This is the double indirection detection, but it currently
1679 doesn't work as cleanly as this code implies, so until we've had
1680 a chance to debug it, leave it disabled. */
1681 if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1682 {
1683 #if DEBUG_DOUBLE
1684 fprintf (stderr, "double indirect\n");
1685 #endif
1686 x = XEXP (x, 0);
1687 }
1688 #endif
1689
1690 encode_pattern (x);
1691 if (RTX_IS ("r"))
1692 {
1693 /* Most indexable registers can be used without displacements,
1694 although some of them will be emitted with an explicit zero
1695 to please the assembler. */
1696 switch (REGNO (patternr[0]))
1697 {
1698 case A1_REGNO:
1699 case SB_REGNO:
1700 case FB_REGNO:
1701 case SP_REGNO:
1702 if (TARGET_A16 && GET_MODE (x) == SImode)
1703 return 0;
1704 case A0_REGNO:
1705 return 1;
1706
1707 default:
1708 if (IS_PSEUDO (patternr[0], strict))
1709 return 1;
1710 return 0;
1711 }
1712 }
1713
1714 if (TARGET_A16 && GET_MODE (x) == SImode)
1715 return 0;
1716
1717 if (RTX_IS ("+ri"))
1718 {
1719 /* This is more interesting, because different base registers
1720 allow for different displacements - both range and signedness
1721 - and it differs from chip series to chip series too. */
1722 int rn = REGNO (patternr[1]);
1723 HOST_WIDE_INT offs = INTVAL (patternr[2]);
1724 switch (rn)
1725 {
1726 case A0_REGNO:
1727 case A1_REGNO:
1728 case SB_REGNO:
1729 /* The syntax only allows positive offsets, but when the
1730 offsets span the entire memory range, we can simulate
1731 negative offsets by wrapping. */
1732 if (TARGET_A16)
1733 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1734 if (rn == SB_REGNO)
1735 return (offs >= 0 && offs <= 65535 - mode_adjust);
1736 /* A0 or A1 */
1737 return (offs >= -16777216 && offs <= 16777215);
1738
1739 case FB_REGNO:
1740 if (TARGET_A16)
1741 return (offs >= -128 && offs <= 127 - mode_adjust);
1742 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1743
1744 case SP_REGNO:
1745 return (offs >= -128 && offs <= 127 - mode_adjust);
1746
1747 default:
1748 if (IS_PSEUDO (patternr[1], strict))
1749 return 1;
1750 return 0;
1751 }
1752 }
1753 if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1754 {
1755 rtx reg = patternr[1];
1756
1757 /* We don't know where the symbol is, so only allow base
1758 registers which support displacements spanning the whole
1759 address range. */
1760 switch (REGNO (reg))
1761 {
1762 case A0_REGNO:
1763 case A1_REGNO:
1764 /* $sb needs a secondary reload, but since it's involved in
1765 memory address reloads too, we don't deal with it very
1766 well. */
1767 /* case SB_REGNO: */
1768 return 1;
1769 default:
1770 if (IS_PSEUDO (reg, strict))
1771 return 1;
1772 return 0;
1773 }
1774 }
1775 return 0;
1776 }
1777
1778 /* Implements REG_OK_FOR_BASE_P. */
1779 int
1780 m32c_reg_ok_for_base_p (rtx x, int strict)
1781 {
1782 if (GET_CODE (x) != REG)
1783 return 0;
1784 switch (REGNO (x))
1785 {
1786 case A0_REGNO:
1787 case A1_REGNO:
1788 case SB_REGNO:
1789 case FB_REGNO:
1790 case SP_REGNO:
1791 return 1;
1792 default:
1793 if (IS_PSEUDO (x, strict))
1794 return 1;
1795 return 0;
1796 }
1797 }
1798
1799 /* We have three choices for choosing fb->aN offsets. If we choose -128,
1800 we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
1801 like this:
1802 EB 4B FF mova -128[$fb],$a0
1803 D8 0C FF FF mov.w:Q #0,-1[$a0]
1804
1805 Alternately, we subtract the frame size, and hopefully use 8-bit aN
1806 displacements:
1807 7B F4 stc $fb,$a0
1808 77 54 00 01 sub #256,$a0
1809 D8 08 01 mov.w:Q #0,1[$a0]
1810
1811 If we don't offset (i.e. offset by zero), we end up with:
1812 7B F4 stc $fb,$a0
1813 D8 0C 00 FF mov.w:Q #0,-256[$a0]
1814
1815 We have to subtract *something* so that we have a PLUS rtx to mark
1816 that we've done this reload. The -128 offset will never result in
1817 an 8-bit aN offset, and the payoff for the second case is five
1818 loads *if* those loads are within 256 bytes of the other end of the
1819 frame, so the third case seems best. Note that we subtract the
1820 zero, but detect that in the addhi3 pattern. */
1821
1822 #define BIG_FB_ADJ 0
1823
1824 /* Implements LEGITIMIZE_ADDRESS. The only address we really have to
1825 worry about is frame base offsets, as $fb has a limited
1826 displacement range. We deal with this by attempting to reload $fb
1827 itself into an address register; that seems to result in the best
1828 code. */
1829 #undef TARGET_LEGITIMIZE_ADDRESS
1830 #define TARGET_LEGITIMIZE_ADDRESS m32c_legitimize_address
1831 static rtx
1832 m32c_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1833 enum machine_mode mode)
1834 {
1835 #if DEBUG0
1836 fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
1837 debug_rtx (x);
1838 fprintf (stderr, "\n");
1839 #endif
1840
1841 if (GET_CODE (x) == PLUS
1842 && GET_CODE (XEXP (x, 0)) == REG
1843 && REGNO (XEXP (x, 0)) == FB_REGNO
1844 && GET_CODE (XEXP (x, 1)) == CONST_INT
1845 && (INTVAL (XEXP (x, 1)) < -128
1846 || INTVAL (XEXP (x, 1)) > (128 - GET_MODE_SIZE (mode))))
1847 {
1848 /* reload FB to A_REGS */
1849 rtx temp = gen_reg_rtx (Pmode);
1850 x = copy_rtx (x);
1851 emit_insn (gen_rtx_SET (VOIDmode, temp, XEXP (x, 0)));
1852 XEXP (x, 0) = temp;
1853 }
1854
1855 return x;
1856 }
1857
1858 /* Implements LEGITIMIZE_RELOAD_ADDRESS. See comment above. */
1859 int
1860 m32c_legitimize_reload_address (rtx * x,
1861 enum machine_mode mode,
1862 int opnum,
1863 int type, int ind_levels ATTRIBUTE_UNUSED)
1864 {
1865 #if DEBUG0
1866 fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
1867 mode_name[mode]);
1868 debug_rtx (*x);
1869 #endif
1870
1871 /* At one point, this function tried to get $fb copied to an address
1872 register, which in theory would maximize sharing, but gcc was
1873 *also* still trying to reload the whole address, and we'd run out
1874 of address registers. So we let gcc do the naive (but safe)
1875 reload instead, when the above function doesn't handle it for
1876 us.
1877
1878 The code below is a second attempt at the above. */
1879
1880 if (GET_CODE (*x) == PLUS
1881 && GET_CODE (XEXP (*x, 0)) == REG
1882 && REGNO (XEXP (*x, 0)) == FB_REGNO
1883 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1884 && (INTVAL (XEXP (*x, 1)) < -128
1885 || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
1886 {
1887 rtx sum;
1888 int offset = INTVAL (XEXP (*x, 1));
1889 int adjustment = -BIG_FB_ADJ;
1890
1891 sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
1892 GEN_INT (adjustment));
1893 *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
1894 if (type == RELOAD_OTHER)
1895 type = RELOAD_FOR_OTHER_ADDRESS;
1896 push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
1897 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
1898 (enum reload_type) type);
1899 return 1;
1900 }
1901
1902 if (GET_CODE (*x) == PLUS
1903 && GET_CODE (XEXP (*x, 0)) == PLUS
1904 && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
1905 && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
1906 && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
1907 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1908 )
1909 {
1910 if (type == RELOAD_OTHER)
1911 type = RELOAD_FOR_OTHER_ADDRESS;
1912 push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
1913 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
1914 (enum reload_type) type);
1915 return 1;
1916 }
1917
1918 return 0;
1919 }
1920
1921 /* Return the appropriate mode for a named address pointer. */
1922 #undef TARGET_ADDR_SPACE_POINTER_MODE
1923 #define TARGET_ADDR_SPACE_POINTER_MODE m32c_addr_space_pointer_mode
1924 static enum machine_mode
1925 m32c_addr_space_pointer_mode (addr_space_t addrspace)
1926 {
1927 switch (addrspace)
1928 {
1929 case ADDR_SPACE_GENERIC:
1930 return TARGET_A24 ? PSImode : HImode;
1931 case ADDR_SPACE_FAR:
1932 return SImode;
1933 default:
1934 gcc_unreachable ();
1935 }
1936 }
1937
1938 /* Return the appropriate mode for a named address address. */
1939 #undef TARGET_ADDR_SPACE_ADDRESS_MODE
1940 #define TARGET_ADDR_SPACE_ADDRESS_MODE m32c_addr_space_address_mode
1941 static enum machine_mode
1942 m32c_addr_space_address_mode (addr_space_t addrspace)
1943 {
1944 switch (addrspace)
1945 {
1946 case ADDR_SPACE_GENERIC:
1947 return TARGET_A24 ? PSImode : HImode;
1948 case ADDR_SPACE_FAR:
1949 return SImode;
1950 default:
1951 gcc_unreachable ();
1952 }
1953 }
1954
1955 /* Like m32c_legitimate_address_p, except with named addresses. */
1956 #undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
1957 #define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P \
1958 m32c_addr_space_legitimate_address_p
1959 static bool
1960 m32c_addr_space_legitimate_address_p (enum machine_mode mode, rtx x,
1961 bool strict, addr_space_t as)
1962 {
1963 if (as == ADDR_SPACE_FAR)
1964 {
1965 if (TARGET_A24)
1966 return 0;
1967 encode_pattern (x);
1968 if (RTX_IS ("r"))
1969 {
1970 if (GET_MODE (x) != SImode)
1971 return 0;
1972 switch (REGNO (patternr[0]))
1973 {
1974 case A0_REGNO:
1975 return 1;
1976
1977 default:
1978 if (IS_PSEUDO (patternr[0], strict))
1979 return 1;
1980 return 0;
1981 }
1982 }
1983 if (RTX_IS ("+^Sri"))
1984 {
1985 int rn = REGNO (patternr[3]);
1986 HOST_WIDE_INT offs = INTVAL (patternr[4]);
1987 if (GET_MODE (patternr[3]) != HImode)
1988 return 0;
1989 switch (rn)
1990 {
1991 case A0_REGNO:
1992 return (offs >= 0 && offs <= 0xfffff);
1993
1994 default:
1995 if (IS_PSEUDO (patternr[3], strict))
1996 return 1;
1997 return 0;
1998 }
1999 }
2000 if (RTX_IS ("+^Srs"))
2001 {
2002 int rn = REGNO (patternr[3]);
2003 if (GET_MODE (patternr[3]) != HImode)
2004 return 0;
2005 switch (rn)
2006 {
2007 case A0_REGNO:
2008 return 1;
2009
2010 default:
2011 if (IS_PSEUDO (patternr[3], strict))
2012 return 1;
2013 return 0;
2014 }
2015 }
2016 if (RTX_IS ("+^S+ris"))
2017 {
2018 int rn = REGNO (patternr[4]);
2019 if (GET_MODE (patternr[4]) != HImode)
2020 return 0;
2021 switch (rn)
2022 {
2023 case A0_REGNO:
2024 return 1;
2025
2026 default:
2027 if (IS_PSEUDO (patternr[4], strict))
2028 return 1;
2029 return 0;
2030 }
2031 }
2032 if (RTX_IS ("s"))
2033 {
2034 return 1;
2035 }
2036 return 0;
2037 }
2038
2039 else if (as != ADDR_SPACE_GENERIC)
2040 gcc_unreachable ();
2041
2042 return m32c_legitimate_address_p (mode, x, strict);
2043 }
2044
2045 /* Like m32c_legitimate_address, except with named address support. */
2046 #undef TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS
2047 #define TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS m32c_addr_space_legitimize_address
2048 static rtx
2049 m32c_addr_space_legitimize_address (rtx x, rtx oldx, enum machine_mode mode,
2050 addr_space_t as)
2051 {
2052 if (as != ADDR_SPACE_GENERIC)
2053 {
2054 #if DEBUG0
2055 fprintf (stderr, "\033[36mm32c_addr_space_legitimize_address for mode %s\033[0m\n", mode_name[mode]);
2056 debug_rtx (x);
2057 fprintf (stderr, "\n");
2058 #endif
2059
2060 if (GET_CODE (x) != REG)
2061 {
2062 x = force_reg (SImode, x);
2063 }
2064 return x;
2065 }
2066
2067 return m32c_legitimize_address (x, oldx, mode);
2068 }
2069
2070 /* Determine if one named address space is a subset of another. */
2071 #undef TARGET_ADDR_SPACE_SUBSET_P
2072 #define TARGET_ADDR_SPACE_SUBSET_P m32c_addr_space_subset_p
2073 static bool
2074 m32c_addr_space_subset_p (addr_space_t subset, addr_space_t superset)
2075 {
2076 gcc_assert (subset == ADDR_SPACE_GENERIC || subset == ADDR_SPACE_FAR);
2077 gcc_assert (superset == ADDR_SPACE_GENERIC || superset == ADDR_SPACE_FAR);
2078
2079 if (subset == superset)
2080 return true;
2081
2082 else
2083 return (subset == ADDR_SPACE_GENERIC && superset == ADDR_SPACE_FAR);
2084 }
2085
2086 #undef TARGET_ADDR_SPACE_CONVERT
2087 #define TARGET_ADDR_SPACE_CONVERT m32c_addr_space_convert
2088 /* Convert from one address space to another. */
2089 static rtx
2090 m32c_addr_space_convert (rtx op, tree from_type, tree to_type)
2091 {
2092 addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (from_type));
2093 addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (to_type));
2094 rtx result;
2095
2096 gcc_assert (from_as == ADDR_SPACE_GENERIC || from_as == ADDR_SPACE_FAR);
2097 gcc_assert (to_as == ADDR_SPACE_GENERIC || to_as == ADDR_SPACE_FAR);
2098
2099 if (to_as == ADDR_SPACE_GENERIC && from_as == ADDR_SPACE_FAR)
2100 {
2101 /* This is unpredictable, as we're truncating off usable address
2102 bits. */
2103
2104 result = gen_reg_rtx (HImode);
2105 emit_move_insn (result, simplify_subreg (HImode, op, SImode, 0));
2106 return result;
2107 }
2108 else if (to_as == ADDR_SPACE_FAR && from_as == ADDR_SPACE_GENERIC)
2109 {
2110 /* This always works. */
2111 result = gen_reg_rtx (SImode);
2112 emit_insn (gen_zero_extendhisi2 (result, op));
2113 return result;
2114 }
2115 else
2116 gcc_unreachable ();
2117 }
2118
2119 /* Condition Code Status */
2120
2121 #undef TARGET_FIXED_CONDITION_CODE_REGS
2122 #define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2123 static bool
2124 m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2125 {
2126 *p1 = FLG_REGNO;
2127 *p2 = INVALID_REGNUM;
2128 return true;
2129 }
2130
2131 /* Describing Relative Costs of Operations */
2132
2133 /* Implements TARGET_REGISTER_MOVE_COST. We make impossible moves
2134 prohibitively expensive, like trying to put QIs in r2/r3 (there are
2135 no opcodes to do that). We also discourage use of mem* registers
2136 since they're really memory. */
2137
2138 #undef TARGET_REGISTER_MOVE_COST
2139 #define TARGET_REGISTER_MOVE_COST m32c_register_move_cost
2140
2141 static int
2142 m32c_register_move_cost (enum machine_mode mode, reg_class_t from,
2143 reg_class_t to)
2144 {
2145 int cost = COSTS_N_INSNS (3);
2146 HARD_REG_SET cc;
2147
2148 /* FIXME: pick real values, but not 2 for now. */
2149 COPY_HARD_REG_SET (cc, reg_class_contents[(int) from]);
2150 IOR_HARD_REG_SET (cc, reg_class_contents[(int) to]);
2151
2152 if (mode == QImode
2153 && hard_reg_set_intersect_p (cc, reg_class_contents[R23_REGS]))
2154 {
2155 if (hard_reg_set_subset_p (cc, reg_class_contents[R23_REGS]))
2156 cost = COSTS_N_INSNS (1000);
2157 else
2158 cost = COSTS_N_INSNS (80);
2159 }
2160
2161 if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2162 cost = COSTS_N_INSNS (1000);
2163
2164 if (reg_classes_intersect_p (from, CR_REGS))
2165 cost += COSTS_N_INSNS (5);
2166
2167 if (reg_classes_intersect_p (to, CR_REGS))
2168 cost += COSTS_N_INSNS (5);
2169
2170 if (from == MEM_REGS || to == MEM_REGS)
2171 cost += COSTS_N_INSNS (50);
2172 else if (reg_classes_intersect_p (from, MEM_REGS)
2173 || reg_classes_intersect_p (to, MEM_REGS))
2174 cost += COSTS_N_INSNS (10);
2175
2176 #if DEBUG0
2177 fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
2178 mode_name[mode], class_names[(int) from], class_names[(int) to],
2179 cost);
2180 #endif
2181 return cost;
2182 }
2183
2184 /* Implements TARGET_MEMORY_MOVE_COST. */
2185
2186 #undef TARGET_MEMORY_MOVE_COST
2187 #define TARGET_MEMORY_MOVE_COST m32c_memory_move_cost
2188
2189 static int
2190 m32c_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2191 reg_class_t rclass ATTRIBUTE_UNUSED,
2192 bool in ATTRIBUTE_UNUSED)
2193 {
2194 /* FIXME: pick real values. */
2195 return COSTS_N_INSNS (10);
2196 }
2197
2198 /* Here we try to describe when we use multiple opcodes for one RTX so
2199 that gcc knows when to use them. */
2200 #undef TARGET_RTX_COSTS
2201 #define TARGET_RTX_COSTS m32c_rtx_costs
2202 static bool
2203 m32c_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2204 int *total, bool speed ATTRIBUTE_UNUSED)
2205 {
2206 switch (code)
2207 {
2208 case REG:
2209 if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2210 *total += COSTS_N_INSNS (500);
2211 else
2212 *total += COSTS_N_INSNS (1);
2213 return true;
2214
2215 case ASHIFT:
2216 case LSHIFTRT:
2217 case ASHIFTRT:
2218 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2219 {
2220 /* mov.b r1l, r1h */
2221 *total += COSTS_N_INSNS (1);
2222 return true;
2223 }
2224 if (INTVAL (XEXP (x, 1)) > 8
2225 || INTVAL (XEXP (x, 1)) < -8)
2226 {
2227 /* mov.b #N, r1l */
2228 /* mov.b r1l, r1h */
2229 *total += COSTS_N_INSNS (2);
2230 return true;
2231 }
2232 return true;
2233
2234 case LE:
2235 case LEU:
2236 case LT:
2237 case LTU:
2238 case GT:
2239 case GTU:
2240 case GE:
2241 case GEU:
2242 case NE:
2243 case EQ:
2244 if (outer_code == SET)
2245 {
2246 *total += COSTS_N_INSNS (2);
2247 return true;
2248 }
2249 break;
2250
2251 case ZERO_EXTRACT:
2252 {
2253 rtx dest = XEXP (x, 0);
2254 rtx addr = XEXP (dest, 0);
2255 switch (GET_CODE (addr))
2256 {
2257 case CONST_INT:
2258 *total += COSTS_N_INSNS (1);
2259 break;
2260 case SYMBOL_REF:
2261 *total += COSTS_N_INSNS (3);
2262 break;
2263 default:
2264 *total += COSTS_N_INSNS (2);
2265 break;
2266 }
2267 return true;
2268 }
2269 break;
2270
2271 default:
2272 /* Reasonable default. */
2273 if (TARGET_A16 && GET_MODE(x) == SImode)
2274 *total += COSTS_N_INSNS (2);
2275 break;
2276 }
2277 return false;
2278 }
2279
2280 #undef TARGET_ADDRESS_COST
2281 #define TARGET_ADDRESS_COST m32c_address_cost
2282 static int
2283 m32c_address_cost (rtx addr, enum machine_mode mode ATTRIBUTE_UNUSED,
2284 addr_space_t as ATTRIBUTE_UNUSED,
2285 bool speed ATTRIBUTE_UNUSED)
2286 {
2287 int i;
2288 /* fprintf(stderr, "\naddress_cost\n");
2289 debug_rtx(addr);*/
2290 switch (GET_CODE (addr))
2291 {
2292 case CONST_INT:
2293 i = INTVAL (addr);
2294 if (i == 0)
2295 return COSTS_N_INSNS(1);
2296 if (0 < i && i <= 255)
2297 return COSTS_N_INSNS(2);
2298 if (0 < i && i <= 65535)
2299 return COSTS_N_INSNS(3);
2300 return COSTS_N_INSNS(4);
2301 case SYMBOL_REF:
2302 return COSTS_N_INSNS(4);
2303 case REG:
2304 return COSTS_N_INSNS(1);
2305 case PLUS:
2306 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
2307 {
2308 i = INTVAL (XEXP (addr, 1));
2309 if (i == 0)
2310 return COSTS_N_INSNS(1);
2311 if (0 < i && i <= 255)
2312 return COSTS_N_INSNS(2);
2313 if (0 < i && i <= 65535)
2314 return COSTS_N_INSNS(3);
2315 }
2316 return COSTS_N_INSNS(4);
2317 default:
2318 return 0;
2319 }
2320 }
2321
2322 /* Defining the Output Assembler Language */
2323
2324 /* Output of Data */
2325
2326 /* We may have 24 bit sizes, which is the native address size.
2327 Currently unused, but provided for completeness. */
2328 #undef TARGET_ASM_INTEGER
2329 #define TARGET_ASM_INTEGER m32c_asm_integer
2330 static bool
2331 m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2332 {
2333 switch (size)
2334 {
2335 case 3:
2336 fprintf (asm_out_file, "\t.3byte\t");
2337 output_addr_const (asm_out_file, x);
2338 fputc ('\n', asm_out_file);
2339 return true;
2340 case 4:
2341 if (GET_CODE (x) == SYMBOL_REF)
2342 {
2343 fprintf (asm_out_file, "\t.long\t");
2344 output_addr_const (asm_out_file, x);
2345 fputc ('\n', asm_out_file);
2346 return true;
2347 }
2348 break;
2349 }
2350 return default_assemble_integer (x, size, aligned_p);
2351 }
2352
2353 /* Output of Assembler Instructions */
2354
2355 /* We use a lookup table because the addressing modes are non-orthogonal. */
2356
2357 static struct
2358 {
2359 char code;
2360 char const *pattern;
2361 char const *format;
2362 }
2363 const conversions[] = {
2364 { 0, "r", "0" },
2365
2366 { 0, "mr", "z[1]" },
2367 { 0, "m+ri", "3[2]" },
2368 { 0, "m+rs", "3[2]" },
2369 { 0, "m+^Zrs", "5[4]" },
2370 { 0, "m+^Zri", "5[4]" },
2371 { 0, "m+^Z+ris", "7+6[5]" },
2372 { 0, "m+^Srs", "5[4]" },
2373 { 0, "m+^Sri", "5[4]" },
2374 { 0, "m+^S+ris", "7+6[5]" },
2375 { 0, "m+r+si", "4+5[2]" },
2376 { 0, "ms", "1" },
2377 { 0, "mi", "1" },
2378 { 0, "m+si", "2+3" },
2379
2380 { 0, "mmr", "[z[2]]" },
2381 { 0, "mm+ri", "[4[3]]" },
2382 { 0, "mm+rs", "[4[3]]" },
2383 { 0, "mm+r+si", "[5+6[3]]" },
2384 { 0, "mms", "[[2]]" },
2385 { 0, "mmi", "[[2]]" },
2386 { 0, "mm+si", "[4[3]]" },
2387
2388 { 0, "i", "#0" },
2389 { 0, "s", "#0" },
2390 { 0, "+si", "#1+2" },
2391 { 0, "l", "#0" },
2392
2393 { 'l', "l", "0" },
2394 { 'd', "i", "0" },
2395 { 'd', "s", "0" },
2396 { 'd', "+si", "1+2" },
2397 { 'D', "i", "0" },
2398 { 'D', "s", "0" },
2399 { 'D', "+si", "1+2" },
2400 { 'x', "i", "#0" },
2401 { 'X', "i", "#0" },
2402 { 'm', "i", "#0" },
2403 { 'b', "i", "#0" },
2404 { 'B', "i", "0" },
2405 { 'p', "i", "0" },
2406
2407 { 0, 0, 0 }
2408 };
2409
2410 /* This is in order according to the bitfield that pushm/popm use. */
2411 static char const *pushm_regs[] = {
2412 "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2413 };
2414
2415 /* Implements TARGET_PRINT_OPERAND. */
2416
2417 #undef TARGET_PRINT_OPERAND
2418 #define TARGET_PRINT_OPERAND m32c_print_operand
2419
2420 static void
2421 m32c_print_operand (FILE * file, rtx x, int code)
2422 {
2423 int i, j, b;
2424 const char *comma;
2425 HOST_WIDE_INT ival;
2426 int unsigned_const = 0;
2427 int force_sign;
2428
2429 /* Multiplies; constants are converted to sign-extended format but
2430 we need unsigned, so 'u' and 'U' tell us what size unsigned we
2431 need. */
2432 if (code == 'u')
2433 {
2434 unsigned_const = 2;
2435 code = 0;
2436 }
2437 if (code == 'U')
2438 {
2439 unsigned_const = 1;
2440 code = 0;
2441 }
2442 /* This one is only for debugging; you can put it in a pattern to
2443 force this error. */
2444 if (code == '!')
2445 {
2446 fprintf (stderr, "dj: unreviewed pattern:");
2447 if (current_output_insn)
2448 debug_rtx (current_output_insn);
2449 gcc_unreachable ();
2450 }
2451 /* PSImode operations are either .w or .l depending on the target. */
2452 if (code == '&')
2453 {
2454 if (TARGET_A16)
2455 fprintf (file, "w");
2456 else
2457 fprintf (file, "l");
2458 return;
2459 }
2460 /* Inverted conditionals. */
2461 if (code == 'C')
2462 {
2463 switch (GET_CODE (x))
2464 {
2465 case LE:
2466 fputs ("gt", file);
2467 break;
2468 case LEU:
2469 fputs ("gtu", file);
2470 break;
2471 case LT:
2472 fputs ("ge", file);
2473 break;
2474 case LTU:
2475 fputs ("geu", file);
2476 break;
2477 case GT:
2478 fputs ("le", file);
2479 break;
2480 case GTU:
2481 fputs ("leu", file);
2482 break;
2483 case GE:
2484 fputs ("lt", file);
2485 break;
2486 case GEU:
2487 fputs ("ltu", file);
2488 break;
2489 case NE:
2490 fputs ("eq", file);
2491 break;
2492 case EQ:
2493 fputs ("ne", file);
2494 break;
2495 default:
2496 gcc_unreachable ();
2497 }
2498 return;
2499 }
2500 /* Regular conditionals. */
2501 if (code == 'c')
2502 {
2503 switch (GET_CODE (x))
2504 {
2505 case LE:
2506 fputs ("le", file);
2507 break;
2508 case LEU:
2509 fputs ("leu", file);
2510 break;
2511 case LT:
2512 fputs ("lt", file);
2513 break;
2514 case LTU:
2515 fputs ("ltu", file);
2516 break;
2517 case GT:
2518 fputs ("gt", file);
2519 break;
2520 case GTU:
2521 fputs ("gtu", file);
2522 break;
2523 case GE:
2524 fputs ("ge", file);
2525 break;
2526 case GEU:
2527 fputs ("geu", file);
2528 break;
2529 case NE:
2530 fputs ("ne", file);
2531 break;
2532 case EQ:
2533 fputs ("eq", file);
2534 break;
2535 default:
2536 gcc_unreachable ();
2537 }
2538 return;
2539 }
2540 /* Used in negsi2 to do HImode ops on the two parts of an SImode
2541 operand. */
2542 if (code == 'h' && GET_MODE (x) == SImode)
2543 {
2544 x = m32c_subreg (HImode, x, SImode, 0);
2545 code = 0;
2546 }
2547 if (code == 'H' && GET_MODE (x) == SImode)
2548 {
2549 x = m32c_subreg (HImode, x, SImode, 2);
2550 code = 0;
2551 }
2552 if (code == 'h' && GET_MODE (x) == HImode)
2553 {
2554 x = m32c_subreg (QImode, x, HImode, 0);
2555 code = 0;
2556 }
2557 if (code == 'H' && GET_MODE (x) == HImode)
2558 {
2559 /* We can't actually represent this as an rtx. Do it here. */
2560 if (GET_CODE (x) == REG)
2561 {
2562 switch (REGNO (x))
2563 {
2564 case R0_REGNO:
2565 fputs ("r0h", file);
2566 return;
2567 case R1_REGNO:
2568 fputs ("r1h", file);
2569 return;
2570 default:
2571 gcc_unreachable();
2572 }
2573 }
2574 /* This should be a MEM. */
2575 x = m32c_subreg (QImode, x, HImode, 1);
2576 code = 0;
2577 }
2578 /* This is for BMcond, which always wants word register names. */
2579 if (code == 'h' && GET_MODE (x) == QImode)
2580 {
2581 if (GET_CODE (x) == REG)
2582 x = gen_rtx_REG (HImode, REGNO (x));
2583 code = 0;
2584 }
2585 /* 'x' and 'X' need to be ignored for non-immediates. */
2586 if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2587 code = 0;
2588
2589 encode_pattern (x);
2590 force_sign = 0;
2591 for (i = 0; conversions[i].pattern; i++)
2592 if (conversions[i].code == code
2593 && streq (conversions[i].pattern, pattern))
2594 {
2595 for (j = 0; conversions[i].format[j]; j++)
2596 /* backslash quotes the next character in the output pattern. */
2597 if (conversions[i].format[j] == '\\')
2598 {
2599 fputc (conversions[i].format[j + 1], file);
2600 j++;
2601 }
2602 /* Digits in the output pattern indicate that the
2603 corresponding RTX is to be output at that point. */
2604 else if (ISDIGIT (conversions[i].format[j]))
2605 {
2606 rtx r = patternr[conversions[i].format[j] - '0'];
2607 switch (GET_CODE (r))
2608 {
2609 case REG:
2610 fprintf (file, "%s",
2611 reg_name_with_mode (REGNO (r), GET_MODE (r)));
2612 break;
2613 case CONST_INT:
2614 switch (code)
2615 {
2616 case 'b':
2617 case 'B':
2618 {
2619 int v = INTVAL (r);
2620 int i = (int) exact_log2 (v);
2621 if (i == -1)
2622 i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2623 if (i == -1)
2624 i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2625 /* Bit position. */
2626 fprintf (file, "%d", i);
2627 }
2628 break;
2629 case 'x':
2630 /* Unsigned byte. */
2631 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2632 INTVAL (r) & 0xff);
2633 break;
2634 case 'X':
2635 /* Unsigned word. */
2636 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2637 INTVAL (r) & 0xffff);
2638 break;
2639 case 'p':
2640 /* pushm and popm encode a register set into a single byte. */
2641 comma = "";
2642 for (b = 7; b >= 0; b--)
2643 if (INTVAL (r) & (1 << b))
2644 {
2645 fprintf (file, "%s%s", comma, pushm_regs[b]);
2646 comma = ",";
2647 }
2648 break;
2649 case 'm':
2650 /* "Minus". Output -X */
2651 ival = (-INTVAL (r) & 0xffff);
2652 if (ival & 0x8000)
2653 ival = ival - 0x10000;
2654 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2655 break;
2656 default:
2657 ival = INTVAL (r);
2658 if (conversions[i].format[j + 1] == '[' && ival < 0)
2659 {
2660 /* We can simulate negative displacements by
2661 taking advantage of address space
2662 wrapping when the offset can span the
2663 entire address range. */
2664 rtx base =
2665 patternr[conversions[i].format[j + 2] - '0'];
2666 if (GET_CODE (base) == REG)
2667 switch (REGNO (base))
2668 {
2669 case A0_REGNO:
2670 case A1_REGNO:
2671 if (TARGET_A24)
2672 ival = 0x1000000 + ival;
2673 else
2674 ival = 0x10000 + ival;
2675 break;
2676 case SB_REGNO:
2677 if (TARGET_A16)
2678 ival = 0x10000 + ival;
2679 break;
2680 }
2681 }
2682 else if (code == 'd' && ival < 0 && j == 0)
2683 /* The "mova" opcode is used to do addition by
2684 computing displacements, but again, we need
2685 displacements to be unsigned *if* they're
2686 the only component of the displacement
2687 (i.e. no "symbol-4" type displacement). */
2688 ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2689
2690 if (conversions[i].format[j] == '0')
2691 {
2692 /* More conversions to unsigned. */
2693 if (unsigned_const == 2)
2694 ival &= 0xffff;
2695 if (unsigned_const == 1)
2696 ival &= 0xff;
2697 }
2698 if (streq (conversions[i].pattern, "mi")
2699 || streq (conversions[i].pattern, "mmi"))
2700 {
2701 /* Integers used as addresses are unsigned. */
2702 ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2703 }
2704 if (force_sign && ival >= 0)
2705 fputc ('+', file);
2706 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2707 break;
2708 }
2709 break;
2710 case CONST_DOUBLE:
2711 /* We don't have const_double constants. If it
2712 happens, make it obvious. */
2713 fprintf (file, "[const_double 0x%lx]",
2714 (unsigned long) CONST_DOUBLE_HIGH (r));
2715 break;
2716 case SYMBOL_REF:
2717 assemble_name (file, XSTR (r, 0));
2718 break;
2719 case LABEL_REF:
2720 output_asm_label (r);
2721 break;
2722 default:
2723 fprintf (stderr, "don't know how to print this operand:");
2724 debug_rtx (r);
2725 gcc_unreachable ();
2726 }
2727 }
2728 else
2729 {
2730 if (conversions[i].format[j] == 'z')
2731 {
2732 /* Some addressing modes *must* have a displacement,
2733 so insert a zero here if needed. */
2734 int k;
2735 for (k = j + 1; conversions[i].format[k]; k++)
2736 if (ISDIGIT (conversions[i].format[k]))
2737 {
2738 rtx reg = patternr[conversions[i].format[k] - '0'];
2739 if (GET_CODE (reg) == REG
2740 && (REGNO (reg) == SB_REGNO
2741 || REGNO (reg) == FB_REGNO
2742 || REGNO (reg) == SP_REGNO))
2743 fputc ('0', file);
2744 }
2745 continue;
2746 }
2747 /* Signed displacements off symbols need to have signs
2748 blended cleanly. */
2749 if (conversions[i].format[j] == '+'
2750 && (!code || code == 'D' || code == 'd')
2751 && ISDIGIT (conversions[i].format[j + 1])
2752 && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2753 == CONST_INT))
2754 {
2755 force_sign = 1;
2756 continue;
2757 }
2758 fputc (conversions[i].format[j], file);
2759 }
2760 break;
2761 }
2762 if (!conversions[i].pattern)
2763 {
2764 fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
2765 pattern);
2766 debug_rtx (x);
2767 fprintf (file, "[%c.%s]", code ? code : '-', pattern);
2768 }
2769
2770 return;
2771 }
2772
2773 /* Implements TARGET_PRINT_OPERAND_PUNCT_VALID_P.
2774
2775 See m32c_print_operand above for descriptions of what these do. */
2776
2777 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
2778 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P m32c_print_operand_punct_valid_p
2779
2780 static bool
2781 m32c_print_operand_punct_valid_p (unsigned char c)
2782 {
2783 if (c == '&' || c == '!')
2784 return true;
2785
2786 return false;
2787 }
2788
2789 /* Implements TARGET_PRINT_OPERAND_ADDRESS. Nothing unusual here. */
2790
2791 #undef TARGET_PRINT_OPERAND_ADDRESS
2792 #define TARGET_PRINT_OPERAND_ADDRESS m32c_print_operand_address
2793
2794 static void
2795 m32c_print_operand_address (FILE * stream, rtx address)
2796 {
2797 if (GET_CODE (address) == MEM)
2798 address = XEXP (address, 0);
2799 else
2800 /* cf: gcc.dg/asm-4.c. */
2801 gcc_assert (GET_CODE (address) == REG);
2802
2803 m32c_print_operand (stream, address, 0);
2804 }
2805
2806 /* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
2807 differently than general registers. */
2808 void
2809 m32c_output_reg_push (FILE * s, int regno)
2810 {
2811 if (regno == FLG_REGNO)
2812 fprintf (s, "\tpushc\tflg\n");
2813 else
2814 fprintf (s, "\tpush.%c\t%s\n",
2815 " bwll"[reg_push_size (regno)], reg_names[regno]);
2816 }
2817
2818 /* Likewise for ASM_OUTPUT_REG_POP. */
2819 void
2820 m32c_output_reg_pop (FILE * s, int regno)
2821 {
2822 if (regno == FLG_REGNO)
2823 fprintf (s, "\tpopc\tflg\n");
2824 else
2825 fprintf (s, "\tpop.%c\t%s\n",
2826 " bwll"[reg_push_size (regno)], reg_names[regno]);
2827 }
2828
2829 /* Defining target-specific uses of `__attribute__' */
2830
2831 /* Used to simplify the logic below. Find the attributes wherever
2832 they may be. */
2833 #define M32C_ATTRIBUTES(decl) \
2834 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2835 : DECL_ATTRIBUTES (decl) \
2836 ? (DECL_ATTRIBUTES (decl)) \
2837 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2838
2839 /* Returns TRUE if the given tree has the "interrupt" attribute. */
2840 static int
2841 interrupt_p (tree node ATTRIBUTE_UNUSED)
2842 {
2843 tree list = M32C_ATTRIBUTES (node);
2844 while (list)
2845 {
2846 if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
2847 return 1;
2848 list = TREE_CHAIN (list);
2849 }
2850 return fast_interrupt_p (node);
2851 }
2852
2853 /* Returns TRUE if the given tree has the "bank_switch" attribute. */
2854 static int
2855 bank_switch_p (tree node ATTRIBUTE_UNUSED)
2856 {
2857 tree list = M32C_ATTRIBUTES (node);
2858 while (list)
2859 {
2860 if (is_attribute_p ("bank_switch", TREE_PURPOSE (list)))
2861 return 1;
2862 list = TREE_CHAIN (list);
2863 }
2864 return 0;
2865 }
2866
2867 /* Returns TRUE if the given tree has the "fast_interrupt" attribute. */
2868 static int
2869 fast_interrupt_p (tree node ATTRIBUTE_UNUSED)
2870 {
2871 tree list = M32C_ATTRIBUTES (node);
2872 while (list)
2873 {
2874 if (is_attribute_p ("fast_interrupt", TREE_PURPOSE (list)))
2875 return 1;
2876 list = TREE_CHAIN (list);
2877 }
2878 return 0;
2879 }
2880
2881 static tree
2882 interrupt_handler (tree * node ATTRIBUTE_UNUSED,
2883 tree name ATTRIBUTE_UNUSED,
2884 tree args ATTRIBUTE_UNUSED,
2885 int flags ATTRIBUTE_UNUSED,
2886 bool * no_add_attrs ATTRIBUTE_UNUSED)
2887 {
2888 return NULL_TREE;
2889 }
2890
2891 /* Returns TRUE if given tree has the "function_vector" attribute. */
2892 int
2893 m32c_special_page_vector_p (tree func)
2894 {
2895 tree list;
2896
2897 if (TREE_CODE (func) != FUNCTION_DECL)
2898 return 0;
2899
2900 list = M32C_ATTRIBUTES (func);
2901 while (list)
2902 {
2903 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2904 return 1;
2905 list = TREE_CHAIN (list);
2906 }
2907 return 0;
2908 }
2909
2910 static tree
2911 function_vector_handler (tree * node ATTRIBUTE_UNUSED,
2912 tree name ATTRIBUTE_UNUSED,
2913 tree args ATTRIBUTE_UNUSED,
2914 int flags ATTRIBUTE_UNUSED,
2915 bool * no_add_attrs ATTRIBUTE_UNUSED)
2916 {
2917 if (TARGET_R8C)
2918 {
2919 /* The attribute is not supported for R8C target. */
2920 warning (OPT_Wattributes,
2921 "%qE attribute is not supported for R8C target",
2922 name);
2923 *no_add_attrs = true;
2924 }
2925 else if (TREE_CODE (*node) != FUNCTION_DECL)
2926 {
2927 /* The attribute must be applied to functions only. */
2928 warning (OPT_Wattributes,
2929 "%qE attribute applies only to functions",
2930 name);
2931 *no_add_attrs = true;
2932 }
2933 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
2934 {
2935 /* The argument must be a constant integer. */
2936 warning (OPT_Wattributes,
2937 "%qE attribute argument not an integer constant",
2938 name);
2939 *no_add_attrs = true;
2940 }
2941 else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
2942 || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
2943 {
2944 /* The argument value must be between 18 to 255. */
2945 warning (OPT_Wattributes,
2946 "%qE attribute argument should be between 18 to 255",
2947 name);
2948 *no_add_attrs = true;
2949 }
2950 return NULL_TREE;
2951 }
2952
2953 /* If the function is assigned the attribute 'function_vector', it
2954 returns the function vector number, otherwise returns zero. */
2955 int
2956 current_function_special_page_vector (rtx x)
2957 {
2958 int num;
2959
2960 if ((GET_CODE(x) == SYMBOL_REF)
2961 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
2962 {
2963 tree list;
2964 tree t = SYMBOL_REF_DECL (x);
2965
2966 if (TREE_CODE (t) != FUNCTION_DECL)
2967 return 0;
2968
2969 list = M32C_ATTRIBUTES (t);
2970 while (list)
2971 {
2972 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2973 {
2974 num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
2975 return num;
2976 }
2977
2978 list = TREE_CHAIN (list);
2979 }
2980
2981 return 0;
2982 }
2983 else
2984 return 0;
2985 }
2986
2987 #undef TARGET_ATTRIBUTE_TABLE
2988 #define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
2989 static const struct attribute_spec m32c_attribute_table[] = {
2990 {"interrupt", 0, 0, false, false, false, interrupt_handler, false},
2991 {"bank_switch", 0, 0, false, false, false, interrupt_handler, false},
2992 {"fast_interrupt", 0, 0, false, false, false, interrupt_handler, false},
2993 {"function_vector", 1, 1, true, false, false, function_vector_handler,
2994 false},
2995 {0, 0, 0, 0, 0, 0, 0, false}
2996 };
2997
2998 #undef TARGET_COMP_TYPE_ATTRIBUTES
2999 #define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
3000 static int
3001 m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED,
3002 const_tree type2 ATTRIBUTE_UNUSED)
3003 {
3004 /* 0=incompatible 1=compatible 2=warning */
3005 return 1;
3006 }
3007
3008 #undef TARGET_INSERT_ATTRIBUTES
3009 #define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
3010 static void
3011 m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
3012 tree * attr_ptr ATTRIBUTE_UNUSED)
3013 {
3014 unsigned addr;
3015 /* See if we need to make #pragma address variables volatile. */
3016
3017 if (TREE_CODE (node) == VAR_DECL)
3018 {
3019 const char *name = IDENTIFIER_POINTER (DECL_NAME (node));
3020 if (m32c_get_pragma_address (name, &addr))
3021 {
3022 TREE_THIS_VOLATILE (node) = true;
3023 }
3024 }
3025 }
3026
3027
3028 struct GTY(()) pragma_entry {
3029 const char *varname;
3030 unsigned address;
3031 };
3032 typedef struct pragma_entry pragma_entry;
3033
3034 /* Hash table of pragma info. */
3035 static GTY((param_is (pragma_entry))) htab_t pragma_htab;
3036
3037 static int
3038 pragma_entry_eq (const void *p1, const void *p2)
3039 {
3040 const pragma_entry *old = (const pragma_entry *) p1;
3041 const char *new_name = (const char *) p2;
3042
3043 return strcmp (old->varname, new_name) == 0;
3044 }
3045
3046 static hashval_t
3047 pragma_entry_hash (const void *p)
3048 {
3049 const pragma_entry *old = (const pragma_entry *) p;
3050 return htab_hash_string (old->varname);
3051 }
3052
3053 void
3054 m32c_note_pragma_address (const char *varname, unsigned address)
3055 {
3056 pragma_entry **slot;
3057
3058 if (!pragma_htab)
3059 pragma_htab = htab_create_ggc (31, pragma_entry_hash,
3060 pragma_entry_eq, NULL);
3061
3062 slot = (pragma_entry **)
3063 htab_find_slot_with_hash (pragma_htab, varname,
3064 htab_hash_string (varname), INSERT);
3065
3066 if (!*slot)
3067 {
3068 *slot = ggc_alloc_pragma_entry ();
3069 (*slot)->varname = ggc_strdup (varname);
3070 }
3071 (*slot)->address = address;
3072 }
3073
3074 static bool
3075 m32c_get_pragma_address (const char *varname, unsigned *address)
3076 {
3077 pragma_entry **slot;
3078
3079 if (!pragma_htab)
3080 return false;
3081
3082 slot = (pragma_entry **)
3083 htab_find_slot_with_hash (pragma_htab, varname,
3084 htab_hash_string (varname), NO_INSERT);
3085 if (slot && *slot)
3086 {
3087 *address = (*slot)->address;
3088 return true;
3089 }
3090 return false;
3091 }
3092
3093 void
3094 m32c_output_aligned_common (FILE *stream, tree decl ATTRIBUTE_UNUSED,
3095 const char *name,
3096 int size, int align, int global)
3097 {
3098 unsigned address;
3099
3100 if (m32c_get_pragma_address (name, &address))
3101 {
3102 /* We never output these as global. */
3103 assemble_name (stream, name);
3104 fprintf (stream, " = 0x%04x\n", address);
3105 return;
3106 }
3107 if (!global)
3108 {
3109 fprintf (stream, "\t.local\t");
3110 assemble_name (stream, name);
3111 fprintf (stream, "\n");
3112 }
3113 fprintf (stream, "\t.comm\t");
3114 assemble_name (stream, name);
3115 fprintf (stream, ",%u,%u\n", size, align / BITS_PER_UNIT);
3116 }
3117
3118 /* Predicates */
3119
3120 /* This is a list of legal subregs of hard regs. */
3121 static const struct {
3122 unsigned char outer_mode_size;
3123 unsigned char inner_mode_size;
3124 unsigned char byte_mask;
3125 unsigned char legal_when;
3126 unsigned int regno;
3127 } legal_subregs[] = {
3128 {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
3129 {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
3130 {1, 2, 0x01, 1, A0_REGNO},
3131 {1, 2, 0x01, 1, A1_REGNO},
3132
3133 {1, 4, 0x01, 1, A0_REGNO},
3134 {1, 4, 0x01, 1, A1_REGNO},
3135
3136 {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
3137 {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
3138 {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
3139 {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
3140 {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
3141
3142 {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
3143 };
3144
3145 /* Returns TRUE if OP is a subreg of a hard reg which we don't
3146 support. We also bail on MEMs with illegal addresses. */
3147 bool
3148 m32c_illegal_subreg_p (rtx op)
3149 {
3150 int offset;
3151 unsigned int i;
3152 int src_mode, dest_mode;
3153
3154 if (GET_CODE (op) == MEM
3155 && ! m32c_legitimate_address_p (Pmode, XEXP (op, 0), false))
3156 {
3157 return true;
3158 }
3159
3160 if (GET_CODE (op) != SUBREG)
3161 return false;
3162
3163 dest_mode = GET_MODE (op);
3164 offset = SUBREG_BYTE (op);
3165 op = SUBREG_REG (op);
3166 src_mode = GET_MODE (op);
3167
3168 if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
3169 return false;
3170 if (GET_CODE (op) != REG)
3171 return false;
3172 if (REGNO (op) >= MEM0_REGNO)
3173 return false;
3174
3175 offset = (1 << offset);
3176
3177 for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
3178 if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
3179 && legal_subregs[i].regno == REGNO (op)
3180 && legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
3181 && legal_subregs[i].byte_mask & offset)
3182 {
3183 switch (legal_subregs[i].legal_when)
3184 {
3185 case 1:
3186 return false;
3187 case 16:
3188 if (TARGET_A16)
3189 return false;
3190 break;
3191 case 24:
3192 if (TARGET_A24)
3193 return false;
3194 break;
3195 }
3196 }
3197 return true;
3198 }
3199
3200 /* Returns TRUE if we support a move between the first two operands.
3201 At the moment, we just want to discourage mem to mem moves until
3202 after reload, because reload has a hard time with our limited
3203 number of address registers, and we can get into a situation where
3204 we need three of them when we only have two. */
3205 bool
3206 m32c_mov_ok (rtx * operands, enum machine_mode mode ATTRIBUTE_UNUSED)
3207 {
3208 rtx op0 = operands[0];
3209 rtx op1 = operands[1];
3210
3211 if (TARGET_A24)
3212 return true;
3213
3214 #define DEBUG_MOV_OK 0
3215 #if DEBUG_MOV_OK
3216 fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
3217 debug_rtx (op0);
3218 debug_rtx (op1);
3219 #endif
3220
3221 if (GET_CODE (op0) == SUBREG)
3222 op0 = XEXP (op0, 0);
3223 if (GET_CODE (op1) == SUBREG)
3224 op1 = XEXP (op1, 0);
3225
3226 if (GET_CODE (op0) == MEM
3227 && GET_CODE (op1) == MEM
3228 && ! reload_completed)
3229 {
3230 #if DEBUG_MOV_OK
3231 fprintf (stderr, " - no, mem to mem\n");
3232 #endif
3233 return false;
3234 }
3235
3236 #if DEBUG_MOV_OK
3237 fprintf (stderr, " - ok\n");
3238 #endif
3239 return true;
3240 }
3241
3242 /* Returns TRUE if two consecutive HImode mov instructions, generated
3243 for moving an immediate double data to a double data type variable
3244 location, can be combined into single SImode mov instruction. */
3245 bool
3246 m32c_immd_dbl_mov (rtx * operands ATTRIBUTE_UNUSED,
3247 enum machine_mode mode ATTRIBUTE_UNUSED)
3248 {
3249 /* ??? This relied on the now-defunct MEM_SCALAR and MEM_IN_STRUCT_P
3250 flags. */
3251 return false;
3252 }
3253
3254 /* Expanders */
3255
3256 /* Subregs are non-orthogonal for us, because our registers are all
3257 different sizes. */
3258 static rtx
3259 m32c_subreg (enum machine_mode outer,
3260 rtx x, enum machine_mode inner, int byte)
3261 {
3262 int r, nr = -1;
3263
3264 /* Converting MEMs to different types that are the same size, we
3265 just rewrite them. */
3266 if (GET_CODE (x) == SUBREG
3267 && SUBREG_BYTE (x) == 0
3268 && GET_CODE (SUBREG_REG (x)) == MEM
3269 && (GET_MODE_SIZE (GET_MODE (x))
3270 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
3271 {
3272 rtx oldx = x;
3273 x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
3274 MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
3275 }
3276
3277 /* Push/pop get done as smaller push/pops. */
3278 if (GET_CODE (x) == MEM
3279 && (GET_CODE (XEXP (x, 0)) == PRE_DEC
3280 || GET_CODE (XEXP (x, 0)) == POST_INC))
3281 return gen_rtx_MEM (outer, XEXP (x, 0));
3282 if (GET_CODE (x) == SUBREG
3283 && GET_CODE (XEXP (x, 0)) == MEM
3284 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
3285 || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
3286 return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
3287
3288 if (GET_CODE (x) != REG)
3289 {
3290 rtx r = simplify_gen_subreg (outer, x, inner, byte);
3291 if (GET_CODE (r) == SUBREG
3292 && GET_CODE (x) == MEM
3293 && MEM_VOLATILE_P (x))
3294 {
3295 /* Volatile MEMs don't get simplified, but we need them to
3296 be. We are little endian, so the subreg byte is the
3297 offset. */
3298 r = adjust_address_nv (x, outer, byte);
3299 }
3300 return r;
3301 }
3302
3303 r = REGNO (x);
3304 if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
3305 return simplify_gen_subreg (outer, x, inner, byte);
3306
3307 if (IS_MEM_REGNO (r))
3308 return simplify_gen_subreg (outer, x, inner, byte);
3309
3310 /* This is where the complexities of our register layout are
3311 described. */
3312 if (byte == 0)
3313 nr = r;
3314 else if (outer == HImode)
3315 {
3316 if (r == R0_REGNO && byte == 2)
3317 nr = R2_REGNO;
3318 else if (r == R0_REGNO && byte == 4)
3319 nr = R1_REGNO;
3320 else if (r == R0_REGNO && byte == 6)
3321 nr = R3_REGNO;
3322 else if (r == R1_REGNO && byte == 2)
3323 nr = R3_REGNO;
3324 else if (r == A0_REGNO && byte == 2)
3325 nr = A1_REGNO;
3326 }
3327 else if (outer == SImode)
3328 {
3329 if (r == R0_REGNO && byte == 0)
3330 nr = R0_REGNO;
3331 else if (r == R0_REGNO && byte == 4)
3332 nr = R1_REGNO;
3333 }
3334 if (nr == -1)
3335 {
3336 fprintf (stderr, "m32c_subreg %s %s %d\n",
3337 mode_name[outer], mode_name[inner], byte);
3338 debug_rtx (x);
3339 gcc_unreachable ();
3340 }
3341 return gen_rtx_REG (outer, nr);
3342 }
3343
3344 /* Used to emit move instructions. We split some moves,
3345 and avoid mem-mem moves. */
3346 int
3347 m32c_prepare_move (rtx * operands, enum machine_mode mode)
3348 {
3349 if (far_addr_space_p (operands[0])
3350 && CONSTANT_P (operands[1]))
3351 {
3352 operands[1] = force_reg (GET_MODE (operands[0]), operands[1]);
3353 }
3354 if (TARGET_A16 && mode == PSImode)
3355 return m32c_split_move (operands, mode, 1);
3356 if ((GET_CODE (operands[0]) == MEM)
3357 && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
3358 {
3359 rtx pmv = XEXP (operands[0], 0);
3360 rtx dest_reg = XEXP (pmv, 0);
3361 rtx dest_mod = XEXP (pmv, 1);
3362
3363 emit_insn (gen_rtx_SET (Pmode, dest_reg, dest_mod));
3364 operands[0] = gen_rtx_MEM (mode, dest_reg);
3365 }
3366 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3367 operands[1] = copy_to_mode_reg (mode, operands[1]);
3368 return 0;
3369 }
3370
3371 #define DEBUG_SPLIT 0
3372
3373 /* Returns TRUE if the given PSImode move should be split. We split
3374 for all r8c/m16c moves, since it doesn't support them, and for
3375 POP.L as we can only *push* SImode. */
3376 int
3377 m32c_split_psi_p (rtx * operands)
3378 {
3379 #if DEBUG_SPLIT
3380 fprintf (stderr, "\nm32c_split_psi_p\n");
3381 debug_rtx (operands[0]);
3382 debug_rtx (operands[1]);
3383 #endif
3384 if (TARGET_A16)
3385 {
3386 #if DEBUG_SPLIT
3387 fprintf (stderr, "yes, A16\n");
3388 #endif
3389 return 1;
3390 }
3391 if (GET_CODE (operands[1]) == MEM
3392 && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3393 {
3394 #if DEBUG_SPLIT
3395 fprintf (stderr, "yes, pop.l\n");
3396 #endif
3397 return 1;
3398 }
3399 #if DEBUG_SPLIT
3400 fprintf (stderr, "no, default\n");
3401 #endif
3402 return 0;
3403 }
3404
3405 /* Split the given move. SPLIT_ALL is 0 if splitting is optional
3406 (define_expand), 1 if it is not optional (define_insn_and_split),
3407 and 3 for define_split (alternate api). */
3408 int
3409 m32c_split_move (rtx * operands, enum machine_mode mode, int split_all)
3410 {
3411 rtx s[4], d[4];
3412 int parts, si, di, rev = 0;
3413 int rv = 0, opi = 2;
3414 enum machine_mode submode = HImode;
3415 rtx *ops, local_ops[10];
3416
3417 /* define_split modifies the existing operands, but the other two
3418 emit new insns. OPS is where we store the operand pairs, which
3419 we emit later. */
3420 if (split_all == 3)
3421 ops = operands;
3422 else
3423 ops = local_ops;
3424
3425 /* Else HImode. */
3426 if (mode == DImode)
3427 submode = SImode;
3428
3429 /* Before splitting mem-mem moves, force one operand into a
3430 register. */
3431 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3432 {
3433 #if DEBUG0
3434 fprintf (stderr, "force_reg...\n");
3435 debug_rtx (operands[1]);
3436 #endif
3437 operands[1] = force_reg (mode, operands[1]);
3438 #if DEBUG0
3439 debug_rtx (operands[1]);
3440 #endif
3441 }
3442
3443 parts = 2;
3444
3445 #if DEBUG_SPLIT
3446 fprintf (stderr, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3447 split_all);
3448 debug_rtx (operands[0]);
3449 debug_rtx (operands[1]);
3450 #endif
3451
3452 /* Note that split_all is not used to select the api after this
3453 point, so it's safe to set it to 3 even with define_insn. */
3454 /* None of the chips can move SI operands to sp-relative addresses,
3455 so we always split those. */
3456 if (satisfies_constraint_Ss (operands[0]))
3457 split_all = 3;
3458
3459 if (TARGET_A16
3460 && (far_addr_space_p (operands[0])
3461 || far_addr_space_p (operands[1])))
3462 split_all |= 1;
3463
3464 /* We don't need to split these. */
3465 if (TARGET_A24
3466 && split_all != 3
3467 && (mode == SImode || mode == PSImode)
3468 && !(GET_CODE (operands[1]) == MEM
3469 && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3470 return 0;
3471
3472 /* First, enumerate the subregs we'll be dealing with. */
3473 for (si = 0; si < parts; si++)
3474 {
3475 d[si] =
3476 m32c_subreg (submode, operands[0], mode,
3477 si * GET_MODE_SIZE (submode));
3478 s[si] =
3479 m32c_subreg (submode, operands[1], mode,
3480 si * GET_MODE_SIZE (submode));
3481 }
3482
3483 /* Split pushes by emitting a sequence of smaller pushes. */
3484 if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3485 {
3486 for (si = parts - 1; si >= 0; si--)
3487 {
3488 ops[opi++] = gen_rtx_MEM (submode,
3489 gen_rtx_PRE_DEC (Pmode,
3490 gen_rtx_REG (Pmode,
3491 SP_REGNO)));
3492 ops[opi++] = s[si];
3493 }
3494
3495 rv = 1;
3496 }
3497 /* Likewise for pops. */
3498 else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3499 {
3500 for (di = 0; di < parts; di++)
3501 {
3502 ops[opi++] = d[di];
3503 ops[opi++] = gen_rtx_MEM (submode,
3504 gen_rtx_POST_INC (Pmode,
3505 gen_rtx_REG (Pmode,
3506 SP_REGNO)));
3507 }
3508 rv = 1;
3509 }
3510 else if (split_all)
3511 {
3512 /* if d[di] == s[si] for any di < si, we'll early clobber. */
3513 for (di = 0; di < parts - 1; di++)
3514 for (si = di + 1; si < parts; si++)
3515 if (reg_mentioned_p (d[di], s[si]))
3516 rev = 1;
3517
3518 if (rev)
3519 for (si = 0; si < parts; si++)
3520 {
3521 ops[opi++] = d[si];
3522 ops[opi++] = s[si];
3523 }
3524 else
3525 for (si = parts - 1; si >= 0; si--)
3526 {
3527 ops[opi++] = d[si];
3528 ops[opi++] = s[si];
3529 }
3530 rv = 1;
3531 }
3532 /* Now emit any moves we may have accumulated. */
3533 if (rv && split_all != 3)
3534 {
3535 int i;
3536 for (i = 2; i < opi; i += 2)
3537 emit_move_insn (ops[i], ops[i + 1]);
3538 }
3539 return rv;
3540 }
3541
3542 /* The m32c has a number of opcodes that act like memcpy, strcmp, and
3543 the like. For the R8C they expect one of the addresses to be in
3544 R1L:An so we need to arrange for that. Otherwise, it's just a
3545 matter of picking out the operands we want and emitting the right
3546 pattern for them. All these expanders, which correspond to
3547 patterns in blkmov.md, must return nonzero if they expand the insn,
3548 or zero if they should FAIL. */
3549
3550 /* This is a memset() opcode. All operands are implied, so we need to
3551 arrange for them to be in the right registers. The opcode wants
3552 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3553 the count (HI), and $2 the value (QI). */
3554 int
3555 m32c_expand_setmemhi(rtx *operands)
3556 {
3557 rtx desta, count, val;
3558 rtx desto, counto;
3559
3560 desta = XEXP (operands[0], 0);
3561 count = operands[1];
3562 val = operands[2];
3563
3564 desto = gen_reg_rtx (Pmode);
3565 counto = gen_reg_rtx (HImode);
3566
3567 if (GET_CODE (desta) != REG
3568 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3569 desta = copy_to_mode_reg (Pmode, desta);
3570
3571 /* This looks like an arbitrary restriction, but this is by far the
3572 most common case. For counts 8..14 this actually results in
3573 smaller code with no speed penalty because the half-sized
3574 constant can be loaded with a shorter opcode. */
3575 if (GET_CODE (count) == CONST_INT
3576 && GET_CODE (val) == CONST_INT
3577 && ! (INTVAL (count) & 1)
3578 && (INTVAL (count) > 1)
3579 && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3580 {
3581 unsigned v = INTVAL (val) & 0xff;
3582 v = v | (v << 8);
3583 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3584 val = copy_to_mode_reg (HImode, GEN_INT (v));
3585 if (TARGET_A16)
3586 emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3587 else
3588 emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3589 return 1;
3590 }
3591
3592 /* This is the generalized memset() case. */
3593 if (GET_CODE (val) != REG
3594 || REGNO (val) < FIRST_PSEUDO_REGISTER)
3595 val = copy_to_mode_reg (QImode, val);
3596
3597 if (GET_CODE (count) != REG
3598 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3599 count = copy_to_mode_reg (HImode, count);
3600
3601 if (TARGET_A16)
3602 emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3603 else
3604 emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3605
3606 return 1;
3607 }
3608
3609 /* This is a memcpy() opcode. All operands are implied, so we need to
3610 arrange for them to be in the right registers. The opcode wants
3611 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3612 is the source (MEM:BLK), and $2 the count (HI). */
3613 int
3614 m32c_expand_movmemhi(rtx *operands)
3615 {
3616 rtx desta, srca, count;
3617 rtx desto, srco, counto;
3618
3619 desta = XEXP (operands[0], 0);
3620 srca = XEXP (operands[1], 0);
3621 count = operands[2];
3622
3623 desto = gen_reg_rtx (Pmode);
3624 srco = gen_reg_rtx (Pmode);
3625 counto = gen_reg_rtx (HImode);
3626
3627 if (GET_CODE (desta) != REG
3628 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3629 desta = copy_to_mode_reg (Pmode, desta);
3630
3631 if (GET_CODE (srca) != REG
3632 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3633 srca = copy_to_mode_reg (Pmode, srca);
3634
3635 /* Similar to setmem, but we don't need to check the value. */
3636 if (GET_CODE (count) == CONST_INT
3637 && ! (INTVAL (count) & 1)
3638 && (INTVAL (count) > 1))
3639 {
3640 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3641 if (TARGET_A16)
3642 emit_insn (gen_movmemhi_whi_op (desto, srco, counto, desta, srca, count));
3643 else
3644 emit_insn (gen_movmemhi_wpsi_op (desto, srco, counto, desta, srca, count));
3645 return 1;
3646 }
3647
3648 /* This is the generalized memset() case. */
3649 if (GET_CODE (count) != REG
3650 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3651 count = copy_to_mode_reg (HImode, count);
3652
3653 if (TARGET_A16)
3654 emit_insn (gen_movmemhi_bhi_op (desto, srco, counto, desta, srca, count));
3655 else
3656 emit_insn (gen_movmemhi_bpsi_op (desto, srco, counto, desta, srca, count));
3657
3658 return 1;
3659 }
3660
3661 /* This is a stpcpy() opcode. $0 is the destination (MEM:BLK) after
3662 the copy, which should point to the NUL at the end of the string,
3663 $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3664 Since our opcode leaves the destination pointing *after* the NUL,
3665 we must emit an adjustment. */
3666 int
3667 m32c_expand_movstr(rtx *operands)
3668 {
3669 rtx desta, srca;
3670 rtx desto, srco;
3671
3672 desta = XEXP (operands[1], 0);
3673 srca = XEXP (operands[2], 0);
3674
3675 desto = gen_reg_rtx (Pmode);
3676 srco = gen_reg_rtx (Pmode);
3677
3678 if (GET_CODE (desta) != REG
3679 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3680 desta = copy_to_mode_reg (Pmode, desta);
3681
3682 if (GET_CODE (srca) != REG
3683 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3684 srca = copy_to_mode_reg (Pmode, srca);
3685
3686 emit_insn (gen_movstr_op (desto, srco, desta, srca));
3687 /* desto ends up being a1, which allows this type of add through MOVA. */
3688 emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
3689
3690 return 1;
3691 }
3692
3693 /* This is a strcmp() opcode. $0 is the destination (HI) which holds
3694 <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3695 $2 is the other (MEM:BLK). We must do the comparison, and then
3696 convert the flags to a signed integer result. */
3697 int
3698 m32c_expand_cmpstr(rtx *operands)
3699 {
3700 rtx src1a, src2a;
3701
3702 src1a = XEXP (operands[1], 0);
3703 src2a = XEXP (operands[2], 0);
3704
3705 if (GET_CODE (src1a) != REG
3706 || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
3707 src1a = copy_to_mode_reg (Pmode, src1a);
3708
3709 if (GET_CODE (src2a) != REG
3710 || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
3711 src2a = copy_to_mode_reg (Pmode, src2a);
3712
3713 emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
3714 emit_insn (gen_cond_to_int (operands[0]));
3715
3716 return 1;
3717 }
3718
3719
3720 typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
3721
3722 static shift_gen_func
3723 shift_gen_func_for (int mode, int code)
3724 {
3725 #define GFF(m,c,f) if (mode == m && code == c) return f
3726 GFF(QImode, ASHIFT, gen_ashlqi3_i);
3727 GFF(QImode, ASHIFTRT, gen_ashrqi3_i);
3728 GFF(QImode, LSHIFTRT, gen_lshrqi3_i);
3729 GFF(HImode, ASHIFT, gen_ashlhi3_i);
3730 GFF(HImode, ASHIFTRT, gen_ashrhi3_i);
3731 GFF(HImode, LSHIFTRT, gen_lshrhi3_i);
3732 GFF(PSImode, ASHIFT, gen_ashlpsi3_i);
3733 GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
3734 GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
3735 GFF(SImode, ASHIFT, TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
3736 GFF(SImode, ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
3737 GFF(SImode, LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
3738 #undef GFF
3739 gcc_unreachable ();
3740 }
3741
3742 /* The m32c only has one shift, but it takes a signed count. GCC
3743 doesn't want this, so we fake it by negating any shift count when
3744 we're pretending to shift the other way. Also, the shift count is
3745 limited to -8..8. It's slightly better to use two shifts for 9..15
3746 than to load the count into r1h, so we do that too. */
3747 int
3748 m32c_prepare_shift (rtx * operands, int scale, int shift_code)
3749 {
3750 enum machine_mode mode = GET_MODE (operands[0]);
3751 shift_gen_func func = shift_gen_func_for (mode, shift_code);
3752 rtx temp;
3753
3754 if (GET_CODE (operands[2]) == CONST_INT)
3755 {
3756 int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
3757 int count = INTVAL (operands[2]) * scale;
3758
3759 while (count > maxc)
3760 {
3761 temp = gen_reg_rtx (mode);
3762 emit_insn (func (temp, operands[1], GEN_INT (maxc)));
3763 operands[1] = temp;
3764 count -= maxc;
3765 }
3766 while (count < -maxc)
3767 {
3768 temp = gen_reg_rtx (mode);
3769 emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
3770 operands[1] = temp;
3771 count += maxc;
3772 }
3773 emit_insn (func (operands[0], operands[1], GEN_INT (count)));
3774 return 1;
3775 }
3776
3777 temp = gen_reg_rtx (QImode);
3778 if (scale < 0)
3779 /* The pattern has a NEG that corresponds to this. */
3780 emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
3781 else if (TARGET_A16 && mode == SImode)
3782 /* We do this because the code below may modify this, we don't
3783 want to modify the origin of this value. */
3784 emit_move_insn (temp, operands[2]);
3785 else
3786 /* We'll only use it for the shift, no point emitting a move. */
3787 temp = operands[2];
3788
3789 if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
3790 {
3791 /* The m16c has a limit of -16..16 for SI shifts, even when the
3792 shift count is in a register. Since there are so many targets
3793 of these shifts, it's better to expand the RTL here than to
3794 call a helper function.
3795
3796 The resulting code looks something like this:
3797
3798 cmp.b r1h,-16
3799 jge.b 1f
3800 shl.l -16,dest
3801 add.b r1h,16
3802 1f: cmp.b r1h,16
3803 jle.b 1f
3804 shl.l 16,dest
3805 sub.b r1h,16
3806 1f: shl.l r1h,dest
3807
3808 We take advantage of the fact that "negative" shifts are
3809 undefined to skip one of the comparisons. */
3810
3811 rtx count;
3812 rtx label, insn, tempvar;
3813
3814 emit_move_insn (operands[0], operands[1]);
3815
3816 count = temp;
3817 label = gen_label_rtx ();
3818 LABEL_NUSES (label) ++;
3819
3820 tempvar = gen_reg_rtx (mode);
3821
3822 if (shift_code == ASHIFT)
3823 {
3824 /* This is a left shift. We only need check positive counts. */
3825 emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
3826 count, GEN_INT (16), label));
3827 emit_insn (func (tempvar, operands[0], GEN_INT (8)));
3828 emit_insn (func (operands[0], tempvar, GEN_INT (8)));
3829 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
3830 emit_label_after (label, insn);
3831 }
3832 else
3833 {
3834 /* This is a right shift. We only need check negative counts. */
3835 emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
3836 count, GEN_INT (-16), label));
3837 emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
3838 emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
3839 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
3840 emit_label_after (label, insn);
3841 }
3842 operands[1] = operands[0];
3843 emit_insn (func (operands[0], operands[0], count));
3844 return 1;
3845 }
3846
3847 operands[2] = temp;
3848 return 0;
3849 }
3850
3851 /* The m32c has a limited range of operations that work on PSImode
3852 values; we have to expand to SI, do the math, and truncate back to
3853 PSI. Yes, this is expensive, but hopefully gcc will learn to avoid
3854 those cases. */
3855 void
3856 m32c_expand_neg_mulpsi3 (rtx * operands)
3857 {
3858 /* operands: a = b * i */
3859 rtx temp1; /* b as SI */
3860 rtx scale /* i as SI */;
3861 rtx temp2; /* a*b as SI */
3862
3863 temp1 = gen_reg_rtx (SImode);
3864 temp2 = gen_reg_rtx (SImode);
3865 if (GET_CODE (operands[2]) != CONST_INT)
3866 {
3867 scale = gen_reg_rtx (SImode);
3868 emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
3869 }
3870 else
3871 scale = copy_to_mode_reg (SImode, operands[2]);
3872
3873 emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
3874 temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
3875 emit_insn (gen_truncsipsi2 (operands[0], temp2));
3876 }
3877
3878 /* Pattern Output Functions */
3879
3880 int
3881 m32c_expand_movcc (rtx *operands)
3882 {
3883 rtx rel = operands[1];
3884
3885 if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
3886 return 1;
3887 if (GET_CODE (operands[2]) != CONST_INT
3888 || GET_CODE (operands[3]) != CONST_INT)
3889 return 1;
3890 if (GET_CODE (rel) == NE)
3891 {
3892 rtx tmp = operands[2];
3893 operands[2] = operands[3];
3894 operands[3] = tmp;
3895 rel = gen_rtx_EQ (GET_MODE (rel), XEXP (rel, 0), XEXP (rel, 1));
3896 }
3897
3898 emit_move_insn (operands[0],
3899 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3900 rel,
3901 operands[2],
3902 operands[3]));
3903 return 0;
3904 }
3905
3906 /* Used for the "insv" pattern. Return nonzero to fail, else done. */
3907 int
3908 m32c_expand_insv (rtx *operands)
3909 {
3910 rtx op0, src0, p;
3911 int mask;
3912
3913 if (INTVAL (operands[1]) != 1)
3914 return 1;
3915
3916 /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
3917 if (GET_CODE (operands[3]) != CONST_INT)
3918 return 1;
3919 if (INTVAL (operands[3]) != 0
3920 && INTVAL (operands[3]) != 1
3921 && INTVAL (operands[3]) != -1)
3922 return 1;
3923
3924 mask = 1 << INTVAL (operands[2]);
3925
3926 op0 = operands[0];
3927 if (GET_CODE (op0) == SUBREG
3928 && SUBREG_BYTE (op0) == 0)
3929 {
3930 rtx sub = SUBREG_REG (op0);
3931 if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
3932 op0 = sub;
3933 }
3934
3935 if (!can_create_pseudo_p ()
3936 || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
3937 src0 = op0;
3938 else
3939 {
3940 src0 = gen_reg_rtx (GET_MODE (op0));
3941 emit_move_insn (src0, op0);
3942 }
3943
3944 if (GET_MODE (op0) == HImode
3945 && INTVAL (operands[2]) >= 8
3946 && GET_CODE (op0) == MEM)
3947 {
3948 /* We are little endian. */
3949 rtx new_mem = gen_rtx_MEM (QImode, plus_constant (Pmode,
3950 XEXP (op0, 0), 1));
3951 MEM_COPY_ATTRIBUTES (new_mem, op0);
3952 mask >>= 8;
3953 }
3954
3955 /* First, we generate a mask with the correct polarity. If we are
3956 storing a zero, we want an AND mask, so invert it. */
3957 if (INTVAL (operands[3]) == 0)
3958 {
3959 /* Storing a zero, use an AND mask */
3960 if (GET_MODE (op0) == HImode)
3961 mask ^= 0xffff;
3962 else
3963 mask ^= 0xff;
3964 }
3965 /* Now we need to properly sign-extend the mask in case we need to
3966 fall back to an AND or OR opcode. */
3967 if (GET_MODE (op0) == HImode)
3968 {
3969 if (mask & 0x8000)
3970 mask -= 0x10000;
3971 }
3972 else
3973 {
3974 if (mask & 0x80)
3975 mask -= 0x100;
3976 }
3977
3978 switch ( (INTVAL (operands[3]) ? 4 : 0)
3979 + ((GET_MODE (op0) == HImode) ? 2 : 0)
3980 + (TARGET_A24 ? 1 : 0))
3981 {
3982 case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
3983 case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
3984 case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
3985 case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
3986 case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
3987 case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
3988 case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
3989 case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
3990 default: p = NULL_RTX; break; /* Not reached, but silences a warning. */
3991 }
3992
3993 emit_insn (p);
3994 return 0;
3995 }
3996
3997 const char *
3998 m32c_scc_pattern(rtx *operands, RTX_CODE code)
3999 {
4000 static char buf[30];
4001 if (GET_CODE (operands[0]) == REG
4002 && REGNO (operands[0]) == R0_REGNO)
4003 {
4004 if (code == EQ)
4005 return "stzx\t#1,#0,r0l";
4006 if (code == NE)
4007 return "stzx\t#0,#1,r0l";
4008 }
4009 sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
4010 return buf;
4011 }
4012
4013 /* Encode symbol attributes of a SYMBOL_REF into its
4014 SYMBOL_REF_FLAGS. */
4015 static void
4016 m32c_encode_section_info (tree decl, rtx rtl, int first)
4017 {
4018 int extra_flags = 0;
4019
4020 default_encode_section_info (decl, rtl, first);
4021 if (TREE_CODE (decl) == FUNCTION_DECL
4022 && m32c_special_page_vector_p (decl))
4023
4024 extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
4025
4026 if (extra_flags)
4027 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
4028 }
4029
4030 /* Returns TRUE if the current function is a leaf, and thus we can
4031 determine which registers an interrupt function really needs to
4032 save. The logic below is mostly about finding the insn sequence
4033 that's the function, versus any sequence that might be open for the
4034 current insn. */
4035 static int
4036 m32c_leaf_function_p (void)
4037 {
4038 rtx saved_first, saved_last;
4039 struct sequence_stack *seq;
4040 int rv;
4041
4042 saved_first = crtl->emit.x_first_insn;
4043 saved_last = crtl->emit.x_last_insn;
4044 for (seq = crtl->emit.sequence_stack; seq && seq->next; seq = seq->next)
4045 ;
4046 if (seq)
4047 {
4048 crtl->emit.x_first_insn = seq->first;
4049 crtl->emit.x_last_insn = seq->last;
4050 }
4051
4052 rv = leaf_function_p ();
4053
4054 crtl->emit.x_first_insn = saved_first;
4055 crtl->emit.x_last_insn = saved_last;
4056 return rv;
4057 }
4058
4059 /* Returns TRUE if the current function needs to use the ENTER/EXIT
4060 opcodes. If the function doesn't need the frame base or stack
4061 pointer, it can use the simpler RTS opcode. */
4062 static bool
4063 m32c_function_needs_enter (void)
4064 {
4065 rtx insn;
4066 struct sequence_stack *seq;
4067 rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
4068 rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
4069
4070 insn = get_insns ();
4071 for (seq = crtl->emit.sequence_stack;
4072 seq;
4073 insn = seq->first, seq = seq->next);
4074
4075 while (insn)
4076 {
4077 if (reg_mentioned_p (sp, insn))
4078 return true;
4079 if (reg_mentioned_p (fb, insn))
4080 return true;
4081 insn = NEXT_INSN (insn);
4082 }
4083 return false;
4084 }
4085
4086 /* Mark all the subexpressions of the PARALLEL rtx PAR as
4087 frame-related. Return PAR.
4088
4089 dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
4090 PARALLEL rtx other than the first if they do not have the
4091 FRAME_RELATED flag set on them. So this function is handy for
4092 marking up 'enter' instructions. */
4093 static rtx
4094 m32c_all_frame_related (rtx par)
4095 {
4096 int len = XVECLEN (par, 0);
4097 int i;
4098
4099 for (i = 0; i < len; i++)
4100 F (XVECEXP (par, 0, i));
4101
4102 return par;
4103 }
4104
4105 /* Emits the prologue. See the frame layout comment earlier in this
4106 file. We can reserve up to 256 bytes with the ENTER opcode, beyond
4107 that we manually update sp. */
4108 void
4109 m32c_emit_prologue (void)
4110 {
4111 int frame_size, extra_frame_size = 0, reg_save_size;
4112 int complex_prologue = 0;
4113
4114 cfun->machine->is_leaf = m32c_leaf_function_p ();
4115 if (interrupt_p (cfun->decl))
4116 {
4117 cfun->machine->is_interrupt = 1;
4118 complex_prologue = 1;
4119 }
4120 else if (bank_switch_p (cfun->decl))
4121 warning (OPT_Wattributes,
4122 "%<bank_switch%> has no effect on non-interrupt functions");
4123
4124 reg_save_size = m32c_pushm_popm (PP_justcount);
4125
4126 if (interrupt_p (cfun->decl))
4127 {
4128 if (bank_switch_p (cfun->decl))
4129 emit_insn (gen_fset_b ());
4130 else if (cfun->machine->intr_pushm)
4131 emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
4132 }
4133
4134 frame_size =
4135 m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
4136 if (frame_size == 0
4137 && !m32c_function_needs_enter ())
4138 cfun->machine->use_rts = 1;
4139
4140 if (frame_size > 254)
4141 {
4142 extra_frame_size = frame_size - 254;
4143 frame_size = 254;
4144 }
4145 if (cfun->machine->use_rts == 0)
4146 F (emit_insn (m32c_all_frame_related
4147 (TARGET_A16
4148 ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
4149 : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
4150
4151 if (extra_frame_size)
4152 {
4153 complex_prologue = 1;
4154 if (TARGET_A16)
4155 F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
4156 gen_rtx_REG (HImode, SP_REGNO),
4157 GEN_INT (-extra_frame_size))));
4158 else
4159 F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
4160 gen_rtx_REG (PSImode, SP_REGNO),
4161 GEN_INT (-extra_frame_size))));
4162 }
4163
4164 complex_prologue += m32c_pushm_popm (PP_pushm);
4165
4166 /* This just emits a comment into the .s file for debugging. */
4167 if (complex_prologue)
4168 emit_insn (gen_prologue_end ());
4169 }
4170
4171 /* Likewise, for the epilogue. The only exception is that, for
4172 interrupts, we must manually unwind the frame as the REIT opcode
4173 doesn't do that. */
4174 void
4175 m32c_emit_epilogue (void)
4176 {
4177 int popm_count = m32c_pushm_popm (PP_justcount);
4178
4179 /* This just emits a comment into the .s file for debugging. */
4180 if (popm_count > 0 || cfun->machine->is_interrupt)
4181 emit_insn (gen_epilogue_start ());
4182
4183 if (popm_count > 0)
4184 m32c_pushm_popm (PP_popm);
4185
4186 if (cfun->machine->is_interrupt)
4187 {
4188 enum machine_mode spmode = TARGET_A16 ? HImode : PSImode;
4189
4190 /* REIT clears B flag and restores $fp for us, but we still
4191 have to fix up the stack. USE_RTS just means we didn't
4192 emit ENTER. */
4193 if (!cfun->machine->use_rts)
4194 {
4195 emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
4196 gen_rtx_REG (spmode, FP_REGNO));
4197 emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
4198 gen_rtx_REG (spmode, A0_REGNO));
4199 /* We can't just add this to the POPM because it would be in
4200 the wrong order, and wouldn't fix the stack if we're bank
4201 switching. */
4202 if (TARGET_A16)
4203 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
4204 else
4205 emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
4206 }
4207 if (!bank_switch_p (cfun->decl) && cfun->machine->intr_pushm)
4208 emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
4209
4210 /* The FREIT (Fast REturn from InTerrupt) instruction should be
4211 generated only for M32C/M32CM targets (generate the REIT
4212 instruction otherwise). */
4213 if (fast_interrupt_p (cfun->decl))
4214 {
4215 /* Check if fast_attribute is set for M32C or M32CM. */
4216 if (TARGET_A24)
4217 {
4218 emit_jump_insn (gen_epilogue_freit ());
4219 }
4220 /* If fast_interrupt attribute is set for an R8C or M16C
4221 target ignore this attribute and generated REIT
4222 instruction. */
4223 else
4224 {
4225 warning (OPT_Wattributes,
4226 "%<fast_interrupt%> attribute directive ignored");
4227 emit_jump_insn (gen_epilogue_reit_16 ());
4228 }
4229 }
4230 else if (TARGET_A16)
4231 emit_jump_insn (gen_epilogue_reit_16 ());
4232 else
4233 emit_jump_insn (gen_epilogue_reit_24 ());
4234 }
4235 else if (cfun->machine->use_rts)
4236 emit_jump_insn (gen_epilogue_rts ());
4237 else if (TARGET_A16)
4238 emit_jump_insn (gen_epilogue_exitd_16 ());
4239 else
4240 emit_jump_insn (gen_epilogue_exitd_24 ());
4241 }
4242
4243 void
4244 m32c_emit_eh_epilogue (rtx ret_addr)
4245 {
4246 /* R0[R2] has the stack adjustment. R1[R3] has the address to
4247 return to. We have to fudge the stack, pop everything, pop SP
4248 (fudged), and return (fudged). This is actually easier to do in
4249 assembler, so punt to libgcc. */
4250 emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
4251 /* emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
4252 }
4253
4254 /* Indicate which flags must be properly set for a given conditional. */
4255 static int
4256 flags_needed_for_conditional (rtx cond)
4257 {
4258 switch (GET_CODE (cond))
4259 {
4260 case LE:
4261 case GT:
4262 return FLAGS_OSZ;
4263 case LEU:
4264 case GTU:
4265 return FLAGS_ZC;
4266 case LT:
4267 case GE:
4268 return FLAGS_OS;
4269 case LTU:
4270 case GEU:
4271 return FLAGS_C;
4272 case EQ:
4273 case NE:
4274 return FLAGS_Z;
4275 default:
4276 return FLAGS_N;
4277 }
4278 }
4279
4280 #define DEBUG_CMP 0
4281
4282 /* Returns true if a compare insn is redundant because it would only
4283 set flags that are already set correctly. */
4284 static bool
4285 m32c_compare_redundant (rtx cmp, rtx *operands)
4286 {
4287 int flags_needed;
4288 int pflags;
4289 rtx prev, pp, next;
4290 rtx op0, op1;
4291 #if DEBUG_CMP
4292 int prev_icode, i;
4293 #endif
4294
4295 op0 = operands[0];
4296 op1 = operands[1];
4297
4298 #if DEBUG_CMP
4299 fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
4300 debug_rtx(cmp);
4301 for (i=0; i<2; i++)
4302 {
4303 fprintf(stderr, "operands[%d] = ", i);
4304 debug_rtx(operands[i]);
4305 }
4306 #endif
4307
4308 next = next_nonnote_insn (cmp);
4309 if (!next || !INSN_P (next))
4310 {
4311 #if DEBUG_CMP
4312 fprintf(stderr, "compare not followed by insn\n");
4313 debug_rtx(next);
4314 #endif
4315 return false;
4316 }
4317 if (GET_CODE (PATTERN (next)) == SET
4318 && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
4319 {
4320 next = XEXP (XEXP (PATTERN (next), 1), 0);
4321 }
4322 else if (GET_CODE (PATTERN (next)) == SET)
4323 {
4324 /* If this is a conditional, flags_needed will be something
4325 other than FLAGS_N, which we test below. */
4326 next = XEXP (PATTERN (next), 1);
4327 }
4328 else
4329 {
4330 #if DEBUG_CMP
4331 fprintf(stderr, "compare not followed by conditional\n");
4332 debug_rtx(next);
4333 #endif
4334 return false;
4335 }
4336 #if DEBUG_CMP
4337 fprintf(stderr, "conditional is: ");
4338 debug_rtx(next);
4339 #endif
4340
4341 flags_needed = flags_needed_for_conditional (next);
4342 if (flags_needed == FLAGS_N)
4343 {
4344 #if DEBUG_CMP
4345 fprintf(stderr, "compare not followed by conditional\n");
4346 debug_rtx(next);
4347 #endif
4348 return false;
4349 }
4350
4351 /* Compare doesn't set overflow and carry the same way that
4352 arithmetic instructions do, so we can't replace those. */
4353 if (flags_needed & FLAGS_OC)
4354 return false;
4355
4356 prev = cmp;
4357 do {
4358 prev = prev_nonnote_insn (prev);
4359 if (!prev)
4360 {
4361 #if DEBUG_CMP
4362 fprintf(stderr, "No previous insn.\n");
4363 #endif
4364 return false;
4365 }
4366 if (!INSN_P (prev))
4367 {
4368 #if DEBUG_CMP
4369 fprintf(stderr, "Previous insn is a non-insn.\n");
4370 #endif
4371 return false;
4372 }
4373 pp = PATTERN (prev);
4374 if (GET_CODE (pp) != SET)
4375 {
4376 #if DEBUG_CMP
4377 fprintf(stderr, "Previous insn is not a SET.\n");
4378 #endif
4379 return false;
4380 }
4381 pflags = get_attr_flags (prev);
4382
4383 /* Looking up attributes of previous insns corrupted the recog
4384 tables. */
4385 INSN_UID (cmp) = -1;
4386 recog (PATTERN (cmp), cmp, 0);
4387
4388 if (pflags == FLAGS_N
4389 && reg_mentioned_p (op0, pp))
4390 {
4391 #if DEBUG_CMP
4392 fprintf(stderr, "intermediate non-flags insn uses op:\n");
4393 debug_rtx(prev);
4394 #endif
4395 return false;
4396 }
4397
4398 /* Check for comparisons against memory - between volatiles and
4399 aliases, we just can't risk this one. */
4400 if (GET_CODE (operands[0]) == MEM
4401 || GET_CODE (operands[0]) == MEM)
4402 {
4403 #if DEBUG_CMP
4404 fprintf(stderr, "comparisons with memory:\n");
4405 debug_rtx(prev);
4406 #endif
4407 return false;
4408 }
4409
4410 /* Check for PREV changing a register that's used to compute a
4411 value in CMP, even if it doesn't otherwise change flags. */
4412 if (GET_CODE (operands[0]) == REG
4413 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[0]))
4414 {
4415 #if DEBUG_CMP
4416 fprintf(stderr, "sub-value affected, op0:\n");
4417 debug_rtx(prev);
4418 #endif
4419 return false;
4420 }
4421 if (GET_CODE (operands[1]) == REG
4422 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[1]))
4423 {
4424 #if DEBUG_CMP
4425 fprintf(stderr, "sub-value affected, op1:\n");
4426 debug_rtx(prev);
4427 #endif
4428 return false;
4429 }
4430
4431 } while (pflags == FLAGS_N);
4432 #if DEBUG_CMP
4433 fprintf(stderr, "previous flag-setting insn:\n");
4434 debug_rtx(prev);
4435 debug_rtx(pp);
4436 #endif
4437
4438 if (GET_CODE (pp) == SET
4439 && GET_CODE (XEXP (pp, 0)) == REG
4440 && REGNO (XEXP (pp, 0)) == FLG_REGNO
4441 && GET_CODE (XEXP (pp, 1)) == COMPARE)
4442 {
4443 /* Adjacent cbranches must have the same operands to be
4444 redundant. */
4445 rtx pop0 = XEXP (XEXP (pp, 1), 0);
4446 rtx pop1 = XEXP (XEXP (pp, 1), 1);
4447 #if DEBUG_CMP
4448 fprintf(stderr, "adjacent cbranches\n");
4449 debug_rtx(pop0);
4450 debug_rtx(pop1);
4451 #endif
4452 if (rtx_equal_p (op0, pop0)
4453 && rtx_equal_p (op1, pop1))
4454 return true;
4455 #if DEBUG_CMP
4456 fprintf(stderr, "prev cmp not same\n");
4457 #endif
4458 return false;
4459 }
4460
4461 /* Else the previous insn must be a SET, with either the source or
4462 dest equal to operands[0], and operands[1] must be zero. */
4463
4464 if (!rtx_equal_p (op1, const0_rtx))
4465 {
4466 #if DEBUG_CMP
4467 fprintf(stderr, "operands[1] not const0_rtx\n");
4468 #endif
4469 return false;
4470 }
4471 if (GET_CODE (pp) != SET)
4472 {
4473 #if DEBUG_CMP
4474 fprintf (stderr, "pp not set\n");
4475 #endif
4476 return false;
4477 }
4478 if (!rtx_equal_p (op0, SET_SRC (pp))
4479 && !rtx_equal_p (op0, SET_DEST (pp)))
4480 {
4481 #if DEBUG_CMP
4482 fprintf(stderr, "operands[0] not found in set\n");
4483 #endif
4484 return false;
4485 }
4486
4487 #if DEBUG_CMP
4488 fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4489 #endif
4490 if ((pflags & flags_needed) == flags_needed)
4491 return true;
4492
4493 return false;
4494 }
4495
4496 /* Return the pattern for a compare. This will be commented out if
4497 the compare is redundant, else a normal pattern is returned. Thus,
4498 the assembler output says where the compare would have been. */
4499 char *
4500 m32c_output_compare (rtx insn, rtx *operands)
4501 {
4502 static char templ[] = ";cmp.b\t%1,%0";
4503 /* ^ 5 */
4504
4505 templ[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
4506 if (m32c_compare_redundant (insn, operands))
4507 {
4508 #if DEBUG_CMP
4509 fprintf(stderr, "cbranch: cmp not needed\n");
4510 #endif
4511 return templ;
4512 }
4513
4514 #if DEBUG_CMP
4515 fprintf(stderr, "cbranch: cmp needed: `%s'\n", templ + 1);
4516 #endif
4517 return templ + 1;
4518 }
4519
4520 #undef TARGET_ENCODE_SECTION_INFO
4521 #define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4522
4523 /* If the frame pointer isn't used, we detect it manually. But the
4524 stack pointer doesn't have as flexible addressing as the frame
4525 pointer, so we always assume we have it. */
4526
4527 #undef TARGET_FRAME_POINTER_REQUIRED
4528 #define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
4529
4530 /* The Global `targetm' Variable. */
4531
4532 struct gcc_target targetm = TARGET_INITIALIZER;
4533
4534 #include "gt-m32c.h"