]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/m32c/m32c.c
Move crtstuff support to toplevel libgcc
[thirdparty/gcc.git] / gcc / config / m32c / m32c.c
CommitLineData
38b2d076 1/* Target Code for R8C/M16C/M32C
96e45421 2 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011
38b2d076
DD
3 Free Software Foundation, Inc.
4 Contributed by Red Hat.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published
2f83c7d6 10 by the Free Software Foundation; either version 3, or (at your
38b2d076
DD
11 option) any later version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
16 License for more details.
17
18 You should have received a copy of the GNU General Public License
2f83c7d6
NC
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
38b2d076
DD
21
22#include "config.h"
23#include "system.h"
24#include "coretypes.h"
25#include "tm.h"
26#include "rtl.h"
27#include "regs.h"
28#include "hard-reg-set.h"
38b2d076
DD
29#include "insn-config.h"
30#include "conditions.h"
31#include "insn-flags.h"
32#include "output.h"
33#include "insn-attr.h"
34#include "flags.h"
35#include "recog.h"
36#include "reload.h"
718f9c0f 37#include "diagnostic-core.h"
38b2d076
DD
38#include "obstack.h"
39#include "tree.h"
40#include "expr.h"
41#include "optabs.h"
42#include "except.h"
43#include "function.h"
44#include "ggc.h"
45#include "target.h"
46#include "target-def.h"
47#include "tm_p.h"
48#include "langhooks.h"
726a989a 49#include "gimple.h"
fa9fd28a 50#include "df.h"
38b2d076
DD
51
52/* Prototypes */
53
54/* Used by m32c_pushm_popm. */
55typedef enum
56{
57 PP_pushm,
58 PP_popm,
59 PP_justcount
60} Push_Pop_Type;
61
65655f79 62static bool m32c_function_needs_enter (void);
38b2d076 63static tree interrupt_handler (tree *, tree, tree, int, bool *);
5abd2125 64static tree function_vector_handler (tree *, tree, tree, int, bool *);
38b2d076 65static int interrupt_p (tree node);
65655f79
DD
66static int bank_switch_p (tree node);
67static int fast_interrupt_p (tree node);
68static int interrupt_p (tree node);
38b2d076 69static bool m32c_asm_integer (rtx, unsigned int, int);
3101faab 70static int m32c_comp_type_attributes (const_tree, const_tree);
38b2d076
DD
71static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
72static struct machine_function *m32c_init_machine_status (void);
73static void m32c_insert_attributes (tree, tree *);
c6c3dba9 74static bool m32c_legitimate_address_p (enum machine_mode, rtx, bool);
5fd5d713 75static bool m32c_addr_space_legitimate_address_p (enum machine_mode, rtx, bool, addr_space_t);
d5cc9181 76static rtx m32c_function_arg (cumulative_args_t, enum machine_mode,
444d6efe 77 const_tree, bool);
d5cc9181 78static bool m32c_pass_by_reference (cumulative_args_t, enum machine_mode,
586de218 79 const_tree, bool);
d5cc9181 80static void m32c_function_arg_advance (cumulative_args_t, enum machine_mode,
cd34bbe8 81 const_tree, bool);
c2ed6cf8 82static unsigned int m32c_function_arg_boundary (enum machine_mode, const_tree);
38b2d076 83static int m32c_pushm_popm (Push_Pop_Type);
d5cc9181 84static bool m32c_strict_argument_naming (cumulative_args_t);
38b2d076
DD
85static rtx m32c_struct_value_rtx (tree, int);
86static rtx m32c_subreg (enum machine_mode, rtx, enum machine_mode, int);
87static int need_to_save (int);
2a31793e
AS
88static rtx m32c_function_value (const_tree, const_tree, bool);
89static rtx m32c_libcall_value (enum machine_mode, const_rtx);
90
f6052f86
DD
91/* Returns true if an address is specified, else false. */
92static bool m32c_get_pragma_address (const char *varname, unsigned *addr);
93
5abd2125 94#define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
38b2d076
DD
95
96#define streq(a,b) (strcmp ((a), (b)) == 0)
97
98/* Internal support routines */
99
100/* Debugging statements are tagged with DEBUG0 only so that they can
101 be easily enabled individually, by replacing the '0' with '1' as
102 needed. */
103#define DEBUG0 0
104#define DEBUG1 1
105
106#if DEBUG0
107/* This is needed by some of the commented-out debug statements
108 below. */
109static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
110#endif
111static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
112
113/* These are all to support encode_pattern(). */
114static char pattern[30], *patternp;
115static GTY(()) rtx patternr[30];
116#define RTX_IS(x) (streq (pattern, x))
117
118/* Some macros to simplify the logic throughout this file. */
119#define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
120#define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
121
122#define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
123#define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
124
5fd5d713
DD
125static int
126far_addr_space_p (rtx x)
127{
128 if (GET_CODE (x) != MEM)
129 return 0;
130#if DEBUG0
131 fprintf(stderr, "\033[35mfar_addr_space: "); debug_rtx(x);
132 fprintf(stderr, " = %d\033[0m\n", MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR);
133#endif
134 return MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR;
135}
136
38b2d076
DD
137/* We do most RTX matching by converting the RTX into a string, and
138 using string compares. This vastly simplifies the logic in many of
139 the functions in this file.
140
141 On exit, pattern[] has the encoded string (use RTX_IS("...") to
142 compare it) and patternr[] has pointers to the nodes in the RTX
143 corresponding to each character in the encoded string. The latter
144 is mostly used by print_operand().
145
146 Unrecognized patterns have '?' in them; this shows up when the
147 assembler complains about syntax errors.
148*/
149
150static void
151encode_pattern_1 (rtx x)
152{
153 int i;
154
155 if (patternp == pattern + sizeof (pattern) - 2)
156 {
157 patternp[-1] = '?';
158 return;
159 }
160
161 patternr[patternp - pattern] = x;
162
163 switch (GET_CODE (x))
164 {
165 case REG:
166 *patternp++ = 'r';
167 break;
168 case SUBREG:
169 if (GET_MODE_SIZE (GET_MODE (x)) !=
170 GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
171 *patternp++ = 'S';
172 encode_pattern_1 (XEXP (x, 0));
173 break;
174 case MEM:
175 *patternp++ = 'm';
176 case CONST:
177 encode_pattern_1 (XEXP (x, 0));
178 break;
5fd5d713
DD
179 case SIGN_EXTEND:
180 *patternp++ = '^';
181 *patternp++ = 'S';
182 encode_pattern_1 (XEXP (x, 0));
183 break;
184 case ZERO_EXTEND:
185 *patternp++ = '^';
186 *patternp++ = 'Z';
187 encode_pattern_1 (XEXP (x, 0));
188 break;
38b2d076
DD
189 case PLUS:
190 *patternp++ = '+';
191 encode_pattern_1 (XEXP (x, 0));
192 encode_pattern_1 (XEXP (x, 1));
193 break;
194 case PRE_DEC:
195 *patternp++ = '>';
196 encode_pattern_1 (XEXP (x, 0));
197 break;
198 case POST_INC:
199 *patternp++ = '<';
200 encode_pattern_1 (XEXP (x, 0));
201 break;
202 case LO_SUM:
203 *patternp++ = 'L';
204 encode_pattern_1 (XEXP (x, 0));
205 encode_pattern_1 (XEXP (x, 1));
206 break;
207 case HIGH:
208 *patternp++ = 'H';
209 encode_pattern_1 (XEXP (x, 0));
210 break;
211 case SYMBOL_REF:
212 *patternp++ = 's';
213 break;
214 case LABEL_REF:
215 *patternp++ = 'l';
216 break;
217 case CODE_LABEL:
218 *patternp++ = 'c';
219 break;
220 case CONST_INT:
221 case CONST_DOUBLE:
222 *patternp++ = 'i';
223 break;
224 case UNSPEC:
225 *patternp++ = 'u';
226 *patternp++ = '0' + XCINT (x, 1, UNSPEC);
227 for (i = 0; i < XVECLEN (x, 0); i++)
228 encode_pattern_1 (XVECEXP (x, 0, i));
229 break;
230 case USE:
231 *patternp++ = 'U';
232 break;
233 case PARALLEL:
234 *patternp++ = '|';
235 for (i = 0; i < XVECLEN (x, 0); i++)
236 encode_pattern_1 (XVECEXP (x, 0, i));
237 break;
238 case EXPR_LIST:
239 *patternp++ = 'E';
240 encode_pattern_1 (XEXP (x, 0));
241 if (XEXP (x, 1))
242 encode_pattern_1 (XEXP (x, 1));
243 break;
244 default:
245 *patternp++ = '?';
246#if DEBUG0
247 fprintf (stderr, "can't encode pattern %s\n",
248 GET_RTX_NAME (GET_CODE (x)));
249 debug_rtx (x);
250 gcc_unreachable ();
251#endif
252 break;
253 }
254}
255
256static void
257encode_pattern (rtx x)
258{
259 patternp = pattern;
260 encode_pattern_1 (x);
261 *patternp = 0;
262}
263
264/* Since register names indicate the mode they're used in, we need a
265 way to determine which name to refer to the register with. Called
266 by print_operand(). */
267
268static const char *
269reg_name_with_mode (int regno, enum machine_mode mode)
270{
271 int mlen = GET_MODE_SIZE (mode);
272 if (regno == R0_REGNO && mlen == 1)
273 return "r0l";
274 if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
275 return "r2r0";
276 if (regno == R0_REGNO && mlen == 6)
277 return "r2r1r0";
278 if (regno == R0_REGNO && mlen == 8)
279 return "r3r1r2r0";
280 if (regno == R1_REGNO && mlen == 1)
281 return "r1l";
282 if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
283 return "r3r1";
284 if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
285 return "a1a0";
286 return reg_names[regno];
287}
288
289/* How many bytes a register uses on stack when it's pushed. We need
290 to know this because the push opcode needs to explicitly indicate
291 the size of the register, even though the name of the register
292 already tells it that. Used by m32c_output_reg_{push,pop}, which
293 is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}. */
294
295static int
296reg_push_size (int regno)
297{
298 switch (regno)
299 {
300 case R0_REGNO:
301 case R1_REGNO:
302 return 2;
303 case R2_REGNO:
304 case R3_REGNO:
305 case FLG_REGNO:
306 return 2;
307 case A0_REGNO:
308 case A1_REGNO:
309 case SB_REGNO:
310 case FB_REGNO:
311 case SP_REGNO:
312 if (TARGET_A16)
313 return 2;
314 else
315 return 3;
316 default:
317 gcc_unreachable ();
318 }
319}
320
38b2d076
DD
321/* Given two register classes, find the largest intersection between
322 them. If there is no intersection, return RETURNED_IF_EMPTY
323 instead. */
35bdbc69
AS
324static reg_class_t
325reduce_class (reg_class_t original_class, reg_class_t limiting_class,
326 reg_class_t returned_if_empty)
38b2d076 327{
35bdbc69
AS
328 HARD_REG_SET cc;
329 int i;
330 reg_class_t best = NO_REGS;
331 unsigned int best_size = 0;
38b2d076
DD
332
333 if (original_class == limiting_class)
334 return original_class;
335
35bdbc69
AS
336 cc = reg_class_contents[original_class];
337 AND_HARD_REG_SET (cc, reg_class_contents[limiting_class]);
38b2d076 338
38b2d076
DD
339 for (i = 0; i < LIM_REG_CLASSES; i++)
340 {
35bdbc69
AS
341 if (hard_reg_set_subset_p (reg_class_contents[i], cc))
342 if (best_size < reg_class_size[i])
38b2d076 343 {
35bdbc69
AS
344 best = (reg_class_t) i;
345 best_size = reg_class_size[i];
38b2d076
DD
346 }
347
348 }
349 if (best == NO_REGS)
350 return returned_if_empty;
351 return best;
352}
353
38b2d076
DD
354/* Used by m32c_register_move_cost to determine if a move is
355 impossibly expensive. */
0e607518
AS
356static bool
357class_can_hold_mode (reg_class_t rclass, enum machine_mode mode)
38b2d076
DD
358{
359 /* Cache the results: 0=untested 1=no 2=yes */
360 static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
0e607518
AS
361
362 if (results[(int) rclass][mode] == 0)
38b2d076 363 {
0e607518 364 int r;
0a2aaacc 365 results[rclass][mode] = 1;
38b2d076 366 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
0e607518 367 if (in_hard_reg_set_p (reg_class_contents[(int) rclass], mode, r)
38b2d076
DD
368 && HARD_REGNO_MODE_OK (r, mode))
369 {
0e607518
AS
370 results[rclass][mode] = 2;
371 break;
38b2d076
DD
372 }
373 }
0e607518 374
38b2d076
DD
375#if DEBUG0
376 fprintf (stderr, "class %s can hold %s? %s\n",
0e607518 377 class_names[(int) rclass], mode_name[mode],
0a2aaacc 378 (results[rclass][mode] == 2) ? "yes" : "no");
38b2d076 379#endif
0e607518 380 return results[(int) rclass][mode] == 2;
38b2d076
DD
381}
382
383/* Run-time Target Specification. */
384
385/* Memregs are memory locations that gcc treats like general
386 registers, as there are a limited number of true registers and the
387 m32c families can use memory in most places that registers can be
388 used.
389
390 However, since memory accesses are more expensive than registers,
391 we allow the user to limit the number of memregs available, in
392 order to try to persuade gcc to try harder to use real registers.
393
394 Memregs are provided by m32c-lib1.S.
395*/
396
38b2d076
DD
397int ok_to_change_target_memregs = TRUE;
398
f28f2337
AS
399/* Implements TARGET_OPTION_OVERRIDE. */
400
401#undef TARGET_OPTION_OVERRIDE
402#define TARGET_OPTION_OVERRIDE m32c_option_override
403
404static void
405m32c_option_override (void)
38b2d076 406{
f28f2337 407 /* We limit memregs to 0..16, and provide a default. */
bbfc9a8c 408 if (global_options_set.x_target_memregs)
38b2d076
DD
409 {
410 if (target_memregs < 0 || target_memregs > 16)
411 error ("invalid target memregs value '%d'", target_memregs);
412 }
413 else
07127a0a 414 target_memregs = 16;
18b80268
DD
415
416 if (TARGET_A24)
417 flag_ivopts = 0;
0685e770
DD
418
419 /* This target defaults to strict volatile bitfields. */
420 if (flag_strict_volatile_bitfields < 0)
421 flag_strict_volatile_bitfields = 1;
d123bf41
DD
422
423 /* r8c/m16c have no 16-bit indirect call, so thunks are involved.
424 This is always worse than an absolute call. */
425 if (TARGET_A16)
426 flag_no_function_cse = 1;
a4403164
DD
427
428 /* This wants to put insns between compares and their jumps. */
429 /* FIXME: The right solution is to properly trace the flags register
430 values, but that is too much work for stage 4. */
431 flag_combine_stack_adjustments = 0;
d123bf41
DD
432}
433
434#undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
435#define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m32c_override_options_after_change
436
437static void
438m32c_override_options_after_change (void)
439{
440 if (TARGET_A16)
441 flag_no_function_cse = 1;
38b2d076
DD
442}
443
444/* Defining data structures for per-function information */
445
446/* The usual; we set up our machine_function data. */
447static struct machine_function *
448m32c_init_machine_status (void)
449{
a9429e29 450 return ggc_alloc_cleared_machine_function ();
38b2d076
DD
451}
452
453/* Implements INIT_EXPANDERS. We just set up to call the above
454 function. */
455void
456m32c_init_expanders (void)
457{
458 init_machine_status = m32c_init_machine_status;
459}
460
461/* Storage Layout */
462
38b2d076
DD
463/* Register Basics */
464
465/* Basic Characteristics of Registers */
466
467/* Whether a mode fits in a register is complex enough to warrant a
468 table. */
469static struct
470{
471 char qi_regs;
472 char hi_regs;
473 char pi_regs;
474 char si_regs;
475 char di_regs;
476} nregs_table[FIRST_PSEUDO_REGISTER] =
477{
478 { 1, 1, 2, 2, 4 }, /* r0 */
479 { 0, 1, 0, 0, 0 }, /* r2 */
480 { 1, 1, 2, 2, 0 }, /* r1 */
481 { 0, 1, 0, 0, 0 }, /* r3 */
482 { 0, 1, 1, 0, 0 }, /* a0 */
483 { 0, 1, 1, 0, 0 }, /* a1 */
484 { 0, 1, 1, 0, 0 }, /* sb */
485 { 0, 1, 1, 0, 0 }, /* fb */
486 { 0, 1, 1, 0, 0 }, /* sp */
487 { 1, 1, 1, 0, 0 }, /* pc */
488 { 0, 0, 0, 0, 0 }, /* fl */
489 { 1, 1, 1, 0, 0 }, /* ap */
490 { 1, 1, 2, 2, 4 }, /* mem0 */
491 { 1, 1, 2, 2, 4 }, /* mem1 */
492 { 1, 1, 2, 2, 4 }, /* mem2 */
493 { 1, 1, 2, 2, 4 }, /* mem3 */
494 { 1, 1, 2, 2, 4 }, /* mem4 */
495 { 1, 1, 2, 2, 0 }, /* mem5 */
496 { 1, 1, 2, 2, 0 }, /* mem6 */
497 { 1, 1, 0, 0, 0 }, /* mem7 */
498};
499
5efd84c5
NF
500/* Implements TARGET_CONDITIONAL_REGISTER_USAGE. We adjust the number
501 of available memregs, and select which registers need to be preserved
38b2d076
DD
502 across calls based on the chip family. */
503
5efd84c5
NF
504#undef TARGET_CONDITIONAL_REGISTER_USAGE
505#define TARGET_CONDITIONAL_REGISTER_USAGE m32c_conditional_register_usage
d6d17ae7 506void
38b2d076
DD
507m32c_conditional_register_usage (void)
508{
38b2d076
DD
509 int i;
510
511 if (0 <= target_memregs && target_memregs <= 16)
512 {
513 /* The command line option is bytes, but our "registers" are
514 16-bit words. */
65655f79 515 for (i = (target_memregs+1)/2; i < 8; i++)
38b2d076
DD
516 {
517 fixed_regs[MEM0_REGNO + i] = 1;
518 CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
519 }
520 }
521
522 /* M32CM and M32C preserve more registers across function calls. */
523 if (TARGET_A24)
524 {
525 call_used_regs[R1_REGNO] = 0;
526 call_used_regs[R2_REGNO] = 0;
527 call_used_regs[R3_REGNO] = 0;
528 call_used_regs[A0_REGNO] = 0;
529 call_used_regs[A1_REGNO] = 0;
530 }
531}
532
533/* How Values Fit in Registers */
534
535/* Implements HARD_REGNO_NREGS. This is complicated by the fact that
536 different registers are different sizes from each other, *and* may
537 be different sizes in different chip families. */
b8a669d0
DD
538static int
539m32c_hard_regno_nregs_1 (int regno, enum machine_mode mode)
38b2d076
DD
540{
541 if (regno == FLG_REGNO && mode == CCmode)
542 return 1;
543 if (regno >= FIRST_PSEUDO_REGISTER)
544 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
545
546 if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
547 return (GET_MODE_SIZE (mode) + 1) / 2;
548
549 if (GET_MODE_SIZE (mode) <= 1)
550 return nregs_table[regno].qi_regs;
551 if (GET_MODE_SIZE (mode) <= 2)
552 return nregs_table[regno].hi_regs;
5fd5d713 553 if (regno == A0_REGNO && mode == SImode && TARGET_A16)
38b2d076
DD
554 return 2;
555 if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
556 return nregs_table[regno].pi_regs;
557 if (GET_MODE_SIZE (mode) <= 4)
558 return nregs_table[regno].si_regs;
559 if (GET_MODE_SIZE (mode) <= 8)
560 return nregs_table[regno].di_regs;
561 return 0;
562}
563
b8a669d0
DD
564int
565m32c_hard_regno_nregs (int regno, enum machine_mode mode)
566{
567 int rv = m32c_hard_regno_nregs_1 (regno, mode);
568 return rv ? rv : 1;
569}
570
38b2d076
DD
571/* Implements HARD_REGNO_MODE_OK. The above function does the work
572 already; just test its return value. */
573int
574m32c_hard_regno_ok (int regno, enum machine_mode mode)
575{
b8a669d0 576 return m32c_hard_regno_nregs_1 (regno, mode) != 0;
38b2d076
DD
577}
578
579/* Implements MODES_TIEABLE_P. In general, modes aren't tieable since
580 registers are all different sizes. However, since most modes are
581 bigger than our registers anyway, it's easier to implement this
582 function that way, leaving QImode as the only unique case. */
583int
584m32c_modes_tieable_p (enum machine_mode m1, enum machine_mode m2)
585{
586 if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
587 return 1;
588
07127a0a 589#if 0
38b2d076
DD
590 if (m1 == QImode || m2 == QImode)
591 return 0;
07127a0a 592#endif
38b2d076
DD
593
594 return 1;
595}
596
597/* Register Classes */
598
599/* Implements REGNO_REG_CLASS. */
444d6efe 600enum reg_class
38b2d076
DD
601m32c_regno_reg_class (int regno)
602{
603 switch (regno)
604 {
605 case R0_REGNO:
606 return R0_REGS;
607 case R1_REGNO:
608 return R1_REGS;
609 case R2_REGNO:
610 return R2_REGS;
611 case R3_REGNO:
612 return R3_REGS;
613 case A0_REGNO:
22843acd 614 return A0_REGS;
38b2d076 615 case A1_REGNO:
22843acd 616 return A1_REGS;
38b2d076
DD
617 case SB_REGNO:
618 return SB_REGS;
619 case FB_REGNO:
620 return FB_REGS;
621 case SP_REGNO:
622 return SP_REGS;
623 case FLG_REGNO:
624 return FLG_REGS;
625 default:
626 if (IS_MEM_REGNO (regno))
627 return MEM_REGS;
628 return ALL_REGS;
629 }
630}
631
632/* Implements REG_CLASS_FROM_CONSTRAINT. Note that some constraints only match
633 for certain chip families. */
634int
635m32c_reg_class_from_constraint (char c ATTRIBUTE_UNUSED, const char *s)
636{
637 if (memcmp (s, "Rsp", 3) == 0)
638 return SP_REGS;
639 if (memcmp (s, "Rfb", 3) == 0)
640 return FB_REGS;
641 if (memcmp (s, "Rsb", 3) == 0)
642 return SB_REGS;
07127a0a
DD
643 if (memcmp (s, "Rcr", 3) == 0)
644 return TARGET_A16 ? CR_REGS : NO_REGS;
645 if (memcmp (s, "Rcl", 3) == 0)
646 return TARGET_A24 ? CR_REGS : NO_REGS;
38b2d076
DD
647 if (memcmp (s, "R0w", 3) == 0)
648 return R0_REGS;
649 if (memcmp (s, "R1w", 3) == 0)
650 return R1_REGS;
651 if (memcmp (s, "R2w", 3) == 0)
652 return R2_REGS;
653 if (memcmp (s, "R3w", 3) == 0)
654 return R3_REGS;
655 if (memcmp (s, "R02", 3) == 0)
656 return R02_REGS;
18b80268
DD
657 if (memcmp (s, "R13", 3) == 0)
658 return R13_REGS;
38b2d076
DD
659 if (memcmp (s, "R03", 3) == 0)
660 return R03_REGS;
661 if (memcmp (s, "Rdi", 3) == 0)
662 return DI_REGS;
663 if (memcmp (s, "Rhl", 3) == 0)
664 return HL_REGS;
665 if (memcmp (s, "R23", 3) == 0)
666 return R23_REGS;
07127a0a
DD
667 if (memcmp (s, "Ra0", 3) == 0)
668 return A0_REGS;
669 if (memcmp (s, "Ra1", 3) == 0)
670 return A1_REGS;
38b2d076
DD
671 if (memcmp (s, "Raa", 3) == 0)
672 return A_REGS;
07127a0a
DD
673 if (memcmp (s, "Raw", 3) == 0)
674 return TARGET_A16 ? A_REGS : NO_REGS;
675 if (memcmp (s, "Ral", 3) == 0)
676 return TARGET_A24 ? A_REGS : NO_REGS;
38b2d076
DD
677 if (memcmp (s, "Rqi", 3) == 0)
678 return QI_REGS;
679 if (memcmp (s, "Rad", 3) == 0)
680 return AD_REGS;
681 if (memcmp (s, "Rsi", 3) == 0)
682 return SI_REGS;
683 if (memcmp (s, "Rhi", 3) == 0)
684 return HI_REGS;
685 if (memcmp (s, "Rhc", 3) == 0)
686 return HC_REGS;
687 if (memcmp (s, "Rra", 3) == 0)
688 return RA_REGS;
689 if (memcmp (s, "Rfl", 3) == 0)
690 return FLG_REGS;
691 if (memcmp (s, "Rmm", 3) == 0)
692 {
693 if (fixed_regs[MEM0_REGNO])
694 return NO_REGS;
695 return MEM_REGS;
696 }
697
698 /* PSImode registers - i.e. whatever can hold a pointer. */
699 if (memcmp (s, "Rpi", 3) == 0)
700 {
701 if (TARGET_A16)
702 return HI_REGS;
703 else
704 return RA_REGS; /* r2r0 and r3r1 can hold pointers. */
705 }
706
707 /* We handle this one as an EXTRA_CONSTRAINT. */
708 if (memcmp (s, "Rpa", 3) == 0)
709 return NO_REGS;
710
07127a0a
DD
711 if (*s == 'R')
712 {
713 fprintf(stderr, "unrecognized R constraint: %.3s\n", s);
714 gcc_unreachable();
715 }
716
38b2d076
DD
717 return NO_REGS;
718}
719
720/* Implements REGNO_OK_FOR_BASE_P. */
721int
722m32c_regno_ok_for_base_p (int regno)
723{
724 if (regno == A0_REGNO
725 || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
726 return 1;
727 return 0;
728}
729
730#define DEBUG_RELOAD 0
731
b05933f5 732/* Implements TARGET_PREFERRED_RELOAD_CLASS. In general, prefer general
38b2d076 733 registers of the appropriate size. */
b05933f5
AS
734
735#undef TARGET_PREFERRED_RELOAD_CLASS
736#define TARGET_PREFERRED_RELOAD_CLASS m32c_preferred_reload_class
737
738static reg_class_t
739m32c_preferred_reload_class (rtx x, reg_class_t rclass)
38b2d076 740{
b05933f5 741 reg_class_t newclass = rclass;
38b2d076
DD
742
743#if DEBUG_RELOAD
744 fprintf (stderr, "\npreferred_reload_class for %s is ",
745 class_names[rclass]);
746#endif
747 if (rclass == NO_REGS)
748 rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
749
0e607518 750 if (reg_classes_intersect_p (rclass, CR_REGS))
38b2d076
DD
751 {
752 switch (GET_MODE (x))
753 {
754 case QImode:
755 newclass = HL_REGS;
756 break;
757 default:
758 /* newclass = HI_REGS; */
759 break;
760 }
761 }
762
763 else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
764 newclass = SI_REGS;
765 else if (GET_MODE_SIZE (GET_MODE (x)) > 4
b05933f5 766 && ! reg_class_subset_p (R03_REGS, rclass))
38b2d076
DD
767 newclass = DI_REGS;
768
769 rclass = reduce_class (rclass, newclass, rclass);
770
771 if (GET_MODE (x) == QImode)
772 rclass = reduce_class (rclass, HL_REGS, rclass);
773
774#if DEBUG_RELOAD
775 fprintf (stderr, "%s\n", class_names[rclass]);
776 debug_rtx (x);
777
778 if (GET_CODE (x) == MEM
779 && GET_CODE (XEXP (x, 0)) == PLUS
780 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
781 fprintf (stderr, "Glorm!\n");
782#endif
783 return rclass;
784}
785
b05933f5
AS
786/* Implements TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
787
788#undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
789#define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS m32c_preferred_output_reload_class
790
791static reg_class_t
792m32c_preferred_output_reload_class (rtx x, reg_class_t rclass)
38b2d076
DD
793{
794 return m32c_preferred_reload_class (x, rclass);
795}
796
797/* Implements LIMIT_RELOAD_CLASS. We basically want to avoid using
798 address registers for reloads since they're needed for address
799 reloads. */
800int
801m32c_limit_reload_class (enum machine_mode mode, int rclass)
802{
803#if DEBUG_RELOAD
804 fprintf (stderr, "limit_reload_class for %s: %s ->",
805 mode_name[mode], class_names[rclass]);
806#endif
807
808 if (mode == QImode)
809 rclass = reduce_class (rclass, HL_REGS, rclass);
810 else if (mode == HImode)
811 rclass = reduce_class (rclass, HI_REGS, rclass);
812 else if (mode == SImode)
813 rclass = reduce_class (rclass, SI_REGS, rclass);
814
815 if (rclass != A_REGS)
816 rclass = reduce_class (rclass, DI_REGS, rclass);
817
818#if DEBUG_RELOAD
819 fprintf (stderr, " %s\n", class_names[rclass]);
820#endif
821 return rclass;
822}
823
824/* Implements SECONDARY_RELOAD_CLASS. QImode have to be reloaded in
825 r0 or r1, as those are the only real QImode registers. CR regs get
826 reloaded through appropriately sized general or address
827 registers. */
828int
829m32c_secondary_reload_class (int rclass, enum machine_mode mode, rtx x)
830{
831 int cc = class_contents[rclass][0];
832#if DEBUG0
833 fprintf (stderr, "\nsecondary reload class %s %s\n",
834 class_names[rclass], mode_name[mode]);
835 debug_rtx (x);
836#endif
837 if (mode == QImode
838 && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
839 return QI_REGS;
0e607518 840 if (reg_classes_intersect_p (rclass, CR_REGS)
38b2d076
DD
841 && GET_CODE (x) == REG
842 && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
13a23442 843 return (TARGET_A16 || mode == HImode) ? HI_REGS : A_REGS;
38b2d076
DD
844 return NO_REGS;
845}
846
184866c5 847/* Implements TARGET_CLASS_LIKELY_SPILLED_P. A_REGS is needed for address
38b2d076 848 reloads. */
184866c5
AS
849
850#undef TARGET_CLASS_LIKELY_SPILLED_P
851#define TARGET_CLASS_LIKELY_SPILLED_P m32c_class_likely_spilled_p
852
853static bool
854m32c_class_likely_spilled_p (reg_class_t regclass)
38b2d076
DD
855{
856 if (regclass == A_REGS)
184866c5
AS
857 return true;
858
859 return (reg_class_size[(int) regclass] == 1);
38b2d076
DD
860}
861
c4831cff 862/* Implements TARGET_CLASS_MAX_NREGS. We calculate this according to its
38b2d076
DD
863 documented meaning, to avoid potential inconsistencies with actual
864 class definitions. */
c4831cff
AS
865
866#undef TARGET_CLASS_MAX_NREGS
867#define TARGET_CLASS_MAX_NREGS m32c_class_max_nregs
868
869static unsigned char
870m32c_class_max_nregs (reg_class_t regclass, enum machine_mode mode)
38b2d076 871{
c4831cff
AS
872 int rn;
873 unsigned char max = 0;
38b2d076
DD
874
875 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
c4831cff 876 if (TEST_HARD_REG_BIT (reg_class_contents[(int) regclass], rn))
38b2d076 877 {
c4831cff 878 unsigned char n = m32c_hard_regno_nregs (rn, mode);
38b2d076
DD
879 if (max < n)
880 max = n;
881 }
882 return max;
883}
884
885/* Implements CANNOT_CHANGE_MODE_CLASS. Only r0 and r1 can change to
886 QI (r0l, r1l) because the chip doesn't support QI ops on other
887 registers (well, it does on a0/a1 but if we let gcc do that, reload
888 suffers). Otherwise, we allow changes to larger modes. */
889int
890m32c_cannot_change_mode_class (enum machine_mode from,
891 enum machine_mode to, int rclass)
892{
db9c8397 893 int rn;
38b2d076
DD
894#if DEBUG0
895 fprintf (stderr, "cannot change from %s to %s in %s\n",
896 mode_name[from], mode_name[to], class_names[rclass]);
897#endif
898
db9c8397
DD
899 /* If the larger mode isn't allowed in any of these registers, we
900 can't allow the change. */
901 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
902 if (class_contents[rclass][0] & (1 << rn))
903 if (! m32c_hard_regno_ok (rn, to))
904 return 1;
905
38b2d076
DD
906 if (to == QImode)
907 return (class_contents[rclass][0] & 0x1ffa);
908
909 if (class_contents[rclass][0] & 0x0005 /* r0, r1 */
910 && GET_MODE_SIZE (from) > 1)
911 return 0;
912 if (GET_MODE_SIZE (from) > 2) /* all other regs */
913 return 0;
914
915 return 1;
916}
917
918/* Helpers for the rest of the file. */
919/* TRUE if the rtx is a REG rtx for the given register. */
920#define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
921 && REGNO (rtx) == regno)
922/* TRUE if the rtx is a pseudo - specifically, one we can use as a
923 base register in address calculations (hence the "strict"
924 argument). */
925#define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
926 && (REGNO (rtx) == AP_REGNO \
927 || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
928
929/* Implements CONST_OK_FOR_CONSTRAINT_P. Currently, all constant
930 constraints start with 'I', with the next two characters indicating
931 the type and size of the range allowed. */
932int
933m32c_const_ok_for_constraint_p (HOST_WIDE_INT value,
934 char c ATTRIBUTE_UNUSED, const char *str)
935{
936 /* s=signed u=unsigned n=nonzero m=minus l=log2able,
937 [sun] bits [SUN] bytes, p=pointer size
938 I[-0-9][0-9] matches that number */
939 if (memcmp (str, "Is3", 3) == 0)
940 {
941 return (-8 <= value && value <= 7);
942 }
943 if (memcmp (str, "IS1", 3) == 0)
944 {
945 return (-128 <= value && value <= 127);
946 }
947 if (memcmp (str, "IS2", 3) == 0)
948 {
949 return (-32768 <= value && value <= 32767);
950 }
951 if (memcmp (str, "IU2", 3) == 0)
952 {
953 return (0 <= value && value <= 65535);
954 }
955 if (memcmp (str, "IU3", 3) == 0)
956 {
957 return (0 <= value && value <= 0x00ffffff);
958 }
959 if (memcmp (str, "In4", 3) == 0)
960 {
961 return (-8 <= value && value && value <= 8);
962 }
963 if (memcmp (str, "In5", 3) == 0)
964 {
965 return (-16 <= value && value && value <= 16);
966 }
23fed240
DD
967 if (memcmp (str, "In6", 3) == 0)
968 {
969 return (-32 <= value && value && value <= 32);
970 }
38b2d076
DD
971 if (memcmp (str, "IM2", 3) == 0)
972 {
973 return (-65536 <= value && value && value <= -1);
974 }
975 if (memcmp (str, "Ilb", 3) == 0)
976 {
977 int b = exact_log2 (value);
8e4edce7 978 return (b >= 0 && b <= 7);
38b2d076 979 }
07127a0a
DD
980 if (memcmp (str, "Imb", 3) == 0)
981 {
982 int b = exact_log2 ((value ^ 0xff) & 0xff);
8e4edce7 983 return (b >= 0 && b <= 7);
07127a0a 984 }
600e668e
DD
985 if (memcmp (str, "ImB", 3) == 0)
986 {
987 int b = exact_log2 ((value ^ 0xffff) & 0xffff);
988 return (b >= 0 && b <= 7);
989 }
38b2d076
DD
990 if (memcmp (str, "Ilw", 3) == 0)
991 {
992 int b = exact_log2 (value);
8e4edce7 993 return (b >= 0 && b <= 15);
38b2d076 994 }
07127a0a
DD
995 if (memcmp (str, "Imw", 3) == 0)
996 {
997 int b = exact_log2 ((value ^ 0xffff) & 0xffff);
8e4edce7 998 return (b >= 0 && b <= 15);
07127a0a
DD
999 }
1000 if (memcmp (str, "I00", 3) == 0)
1001 {
1002 return (value == 0);
1003 }
38b2d076
DD
1004 return 0;
1005}
1006
5fd5d713
DD
1007#define A0_OR_PSEUDO(x) (IS_REG(x, A0_REGNO) || REGNO (x) >= FIRST_PSEUDO_REGISTER)
1008
38b2d076
DD
1009/* Implements EXTRA_CONSTRAINT_STR (see next function too). 'S' is
1010 for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
1011 call return values. */
1012int
1013m32c_extra_constraint_p2 (rtx value, char c ATTRIBUTE_UNUSED, const char *str)
1014{
1015 encode_pattern (value);
5fd5d713
DD
1016
1017 if (far_addr_space_p (value))
1018 {
1019 if (memcmp (str, "SF", 2) == 0)
1020 {
1021 return ( (RTX_IS ("mr")
1022 && A0_OR_PSEUDO (patternr[1])
1023 && GET_MODE (patternr[1]) == SImode)
1024 || (RTX_IS ("m+^Sri")
1025 && A0_OR_PSEUDO (patternr[4])
1026 && GET_MODE (patternr[4]) == HImode)
1027 || (RTX_IS ("m+^Srs")
1028 && A0_OR_PSEUDO (patternr[4])
1029 && GET_MODE (patternr[4]) == HImode)
1030 || (RTX_IS ("m+^S+ris")
1031 && A0_OR_PSEUDO (patternr[5])
1032 && GET_MODE (patternr[5]) == HImode)
1033 || RTX_IS ("ms")
1034 );
1035 }
1036 return 0;
1037 }
1038
38b2d076
DD
1039 if (memcmp (str, "Sd", 2) == 0)
1040 {
1041 /* This is the common "src/dest" address */
1042 rtx r;
1043 if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
1044 return 1;
1045 if (RTX_IS ("ms") || RTX_IS ("m+si"))
1046 return 1;
07127a0a
DD
1047 if (RTX_IS ("m++rii"))
1048 {
1049 if (REGNO (patternr[3]) == FB_REGNO
1050 && INTVAL (patternr[4]) == 0)
1051 return 1;
1052 }
38b2d076
DD
1053 if (RTX_IS ("mr"))
1054 r = patternr[1];
1055 else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
1056 r = patternr[2];
1057 else
1058 return 0;
1059 if (REGNO (r) == SP_REGNO)
1060 return 0;
1061 return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
1062 }
1063 else if (memcmp (str, "Sa", 2) == 0)
1064 {
1065 rtx r;
1066 if (RTX_IS ("mr"))
1067 r = patternr[1];
1068 else if (RTX_IS ("m+ri"))
1069 r = patternr[2];
1070 else
1071 return 0;
1072 return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
1073 }
1074 else if (memcmp (str, "Si", 2) == 0)
1075 {
1076 return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
1077 }
1078 else if (memcmp (str, "Ss", 2) == 0)
1079 {
1080 return ((RTX_IS ("mr")
1081 && (IS_REG (patternr[1], SP_REGNO)))
1082 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
1083 }
1084 else if (memcmp (str, "Sf", 2) == 0)
1085 {
1086 return ((RTX_IS ("mr")
1087 && (IS_REG (patternr[1], FB_REGNO)))
1088 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
1089 }
1090 else if (memcmp (str, "Sb", 2) == 0)
1091 {
1092 return ((RTX_IS ("mr")
1093 && (IS_REG (patternr[1], SB_REGNO)))
1094 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
1095 }
07127a0a
DD
1096 else if (memcmp (str, "Sp", 2) == 0)
1097 {
1098 /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
1099 return (RTX_IS ("mi")
1100 && !(INTVAL (patternr[1]) & ~0x1fff));
1101 }
38b2d076
DD
1102 else if (memcmp (str, "S1", 2) == 0)
1103 {
1104 return r1h_operand (value, QImode);
1105 }
5fd5d713
DD
1106 else if (memcmp (str, "SF", 2) == 0)
1107 {
1108 return 0;
1109 }
38b2d076
DD
1110
1111 gcc_assert (str[0] != 'S');
1112
1113 if (memcmp (str, "Rpa", 2) == 0)
1114 return GET_CODE (value) == PARALLEL;
1115
1116 return 0;
1117}
1118
1119/* This is for when we're debugging the above. */
1120int
1121m32c_extra_constraint_p (rtx value, char c, const char *str)
1122{
1123 int rv = m32c_extra_constraint_p2 (value, c, str);
1124#if DEBUG0
1125 fprintf (stderr, "\nconstraint %.*s: %d\n", CONSTRAINT_LEN (c, str), str,
1126 rv);
1127 debug_rtx (value);
1128#endif
1129 return rv;
1130}
1131
1132/* Implements EXTRA_MEMORY_CONSTRAINT. Currently, we only use strings
1133 starting with 'S'. */
1134int
1135m32c_extra_memory_constraint (char c, const char *str ATTRIBUTE_UNUSED)
1136{
1137 return c == 'S';
1138}
1139
1140/* Implements EXTRA_ADDRESS_CONSTRAINT. We reserve 'A' strings for these,
1141 but don't currently define any. */
1142int
1143m32c_extra_address_constraint (char c, const char *str ATTRIBUTE_UNUSED)
1144{
1145 return c == 'A';
1146}
1147
1148/* STACK AND CALLING */
1149
1150/* Frame Layout */
1151
1152/* Implements RETURN_ADDR_RTX. Note that R8C and M16C push 24 bits
1153 (yes, THREE bytes) onto the stack for the return address, but we
1154 don't support pointers bigger than 16 bits on those chips. This
1155 will likely wreak havoc with exception unwinding. FIXME. */
1156rtx
1157m32c_return_addr_rtx (int count)
1158{
1159 enum machine_mode mode;
1160 int offset;
1161 rtx ra_mem;
1162
1163 if (count)
1164 return NULL_RTX;
1165 /* we want 2[$fb] */
1166
1167 if (TARGET_A24)
1168 {
80b093df
DD
1169 /* It's four bytes */
1170 mode = PSImode;
38b2d076
DD
1171 offset = 4;
1172 }
1173 else
1174 {
1175 /* FIXME: it's really 3 bytes */
1176 mode = HImode;
1177 offset = 2;
1178 }
1179
1180 ra_mem =
1181 gen_rtx_MEM (mode, plus_constant (gen_rtx_REG (Pmode, FP_REGNO), offset));
1182 return copy_to_mode_reg (mode, ra_mem);
1183}
1184
1185/* Implements INCOMING_RETURN_ADDR_RTX. See comment above. */
1186rtx
1187m32c_incoming_return_addr_rtx (void)
1188{
1189 /* we want [sp] */
1190 return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
1191}
1192
1193/* Exception Handling Support */
1194
1195/* Implements EH_RETURN_DATA_REGNO. Choose registers able to hold
1196 pointers. */
1197int
1198m32c_eh_return_data_regno (int n)
1199{
1200 switch (n)
1201 {
1202 case 0:
1203 return A0_REGNO;
1204 case 1:
c6004917
RIL
1205 if (TARGET_A16)
1206 return R3_REGNO;
1207 else
1208 return R1_REGNO;
38b2d076
DD
1209 default:
1210 return INVALID_REGNUM;
1211 }
1212}
1213
1214/* Implements EH_RETURN_STACKADJ_RTX. Saved and used later in
1215 m32c_emit_eh_epilogue. */
1216rtx
1217m32c_eh_return_stackadj_rtx (void)
1218{
1219 if (!cfun->machine->eh_stack_adjust)
1220 {
1221 rtx sa;
1222
99920b6f 1223 sa = gen_rtx_REG (Pmode, R0_REGNO);
38b2d076
DD
1224 cfun->machine->eh_stack_adjust = sa;
1225 }
1226 return cfun->machine->eh_stack_adjust;
1227}
1228
1229/* Registers That Address the Stack Frame */
1230
1231/* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER. Note that
1232 the original spec called for dwarf numbers to vary with register
1233 width as well, for example, r0l, r0, and r2r0 would each have
1234 different dwarf numbers. GCC doesn't support this, and we don't do
1235 it, and gdb seems to like it this way anyway. */
1236unsigned int
1237m32c_dwarf_frame_regnum (int n)
1238{
1239 switch (n)
1240 {
1241 case R0_REGNO:
1242 return 5;
1243 case R1_REGNO:
1244 return 6;
1245 case R2_REGNO:
1246 return 7;
1247 case R3_REGNO:
1248 return 8;
1249 case A0_REGNO:
1250 return 9;
1251 case A1_REGNO:
1252 return 10;
1253 case FB_REGNO:
1254 return 11;
1255 case SB_REGNO:
1256 return 19;
1257
1258 case SP_REGNO:
1259 return 12;
1260 case PC_REGNO:
1261 return 13;
1262 default:
1263 return DWARF_FRAME_REGISTERS + 1;
1264 }
1265}
1266
1267/* The frame looks like this:
1268
1269 ap -> +------------------------------
1270 | Return address (3 or 4 bytes)
1271 | Saved FB (2 or 4 bytes)
1272 fb -> +------------------------------
1273 | local vars
1274 | register saves fb
1275 | through r0 as needed
1276 sp -> +------------------------------
1277*/
1278
1279/* We use this to wrap all emitted insns in the prologue. */
1280static rtx
1281F (rtx x)
1282{
1283 RTX_FRAME_RELATED_P (x) = 1;
1284 return x;
1285}
1286
1287/* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1288 how much the stack pointer moves for each, for each cpu family. */
1289static struct
1290{
1291 int reg1;
1292 int bit;
1293 int a16_bytes;
1294 int a24_bytes;
1295} pushm_info[] =
1296{
9d746d5e
DD
1297 /* These are in reverse push (nearest-to-sp) order. */
1298 { R0_REGNO, 0x80, 2, 2 },
38b2d076 1299 { R1_REGNO, 0x40, 2, 2 },
9d746d5e
DD
1300 { R2_REGNO, 0x20, 2, 2 },
1301 { R3_REGNO, 0x10, 2, 2 },
1302 { A0_REGNO, 0x08, 2, 4 },
1303 { A1_REGNO, 0x04, 2, 4 },
1304 { SB_REGNO, 0x02, 2, 4 },
1305 { FB_REGNO, 0x01, 2, 4 }
38b2d076
DD
1306};
1307
1308#define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1309
1310/* Returns TRUE if we need to save/restore the given register. We
1311 save everything for exception handlers, so that any register can be
1312 unwound. For interrupt handlers, we save everything if the handler
1313 calls something else (because we don't know what *that* function
1314 might do), but try to be a bit smarter if the handler is a leaf
1315 function. We always save $a0, though, because we use that in the
85f65093 1316 epilogue to copy $fb to $sp. */
38b2d076
DD
1317static int
1318need_to_save (int regno)
1319{
1320 if (fixed_regs[regno])
1321 return 0;
ad516a74 1322 if (crtl->calls_eh_return)
38b2d076
DD
1323 return 1;
1324 if (regno == FP_REGNO)
1325 return 0;
1326 if (cfun->machine->is_interrupt
65655f79
DD
1327 && (!cfun->machine->is_leaf
1328 || (regno == A0_REGNO
1329 && m32c_function_needs_enter ())
1330 ))
38b2d076 1331 return 1;
6fb5fa3c 1332 if (df_regs_ever_live_p (regno)
38b2d076
DD
1333 && (!call_used_regs[regno] || cfun->machine->is_interrupt))
1334 return 1;
1335 return 0;
1336}
1337
1338/* This function contains all the intelligence about saving and
1339 restoring registers. It always figures out the register save set.
1340 When called with PP_justcount, it merely returns the size of the
1341 save set (for eliminating the frame pointer, for example). When
1342 called with PP_pushm or PP_popm, it emits the appropriate
1343 instructions for saving (pushm) or restoring (popm) the
1344 registers. */
1345static int
1346m32c_pushm_popm (Push_Pop_Type ppt)
1347{
1348 int reg_mask = 0;
1349 int byte_count = 0, bytes;
1350 int i;
1351 rtx dwarf_set[PUSHM_N];
1352 int n_dwarfs = 0;
1353 int nosave_mask = 0;
1354
305da3ec
JH
1355 if (crtl->return_rtx
1356 && GET_CODE (crtl->return_rtx) == PARALLEL
ad516a74 1357 && !(crtl->calls_eh_return || cfun->machine->is_interrupt))
38b2d076 1358 {
305da3ec 1359 rtx exp = XVECEXP (crtl->return_rtx, 0, 0);
38b2d076
DD
1360 rtx rv = XEXP (exp, 0);
1361 int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1362
1363 if (rv_bytes > 2)
1364 nosave_mask |= 0x20; /* PSI, SI */
1365 else
1366 nosave_mask |= 0xf0; /* DF */
1367 if (rv_bytes > 4)
1368 nosave_mask |= 0x50; /* DI */
1369 }
1370
1371 for (i = 0; i < (int) PUSHM_N; i++)
1372 {
1373 /* Skip if neither register needs saving. */
1374 if (!need_to_save (pushm_info[i].reg1))
1375 continue;
1376
1377 if (pushm_info[i].bit & nosave_mask)
1378 continue;
1379
1380 reg_mask |= pushm_info[i].bit;
1381 bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1382
1383 if (ppt == PP_pushm)
1384 {
1385 enum machine_mode mode = (bytes == 2) ? HImode : SImode;
1386 rtx addr;
1387
1388 /* Always use stack_pointer_rtx instead of calling
1389 rtx_gen_REG ourselves. Code elsewhere in GCC assumes
1390 that there is a single rtx representing the stack pointer,
1391 namely stack_pointer_rtx, and uses == to recognize it. */
1392 addr = stack_pointer_rtx;
1393
1394 if (byte_count != 0)
1395 addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1396
1397 dwarf_set[n_dwarfs++] =
1398 gen_rtx_SET (VOIDmode,
1399 gen_rtx_MEM (mode, addr),
1400 gen_rtx_REG (mode, pushm_info[i].reg1));
1401 F (dwarf_set[n_dwarfs - 1]);
1402
1403 }
1404 byte_count += bytes;
1405 }
1406
1407 if (cfun->machine->is_interrupt)
1408 {
1409 cfun->machine->intr_pushm = reg_mask & 0xfe;
1410 reg_mask = 0;
1411 byte_count = 0;
1412 }
1413
1414 if (cfun->machine->is_interrupt)
1415 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1416 if (need_to_save (i))
1417 {
1418 byte_count += 2;
1419 cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1420 }
1421
1422 if (ppt == PP_pushm && byte_count)
1423 {
1424 rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1425 rtx pushm;
1426
1427 if (reg_mask)
1428 {
1429 XVECEXP (note, 0, 0)
1430 = gen_rtx_SET (VOIDmode,
1431 stack_pointer_rtx,
1432 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1433 stack_pointer_rtx,
1434 GEN_INT (-byte_count)));
1435 F (XVECEXP (note, 0, 0));
1436
1437 for (i = 0; i < n_dwarfs; i++)
1438 XVECEXP (note, 0, i + 1) = dwarf_set[i];
1439
1440 pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1441
444d6efe 1442 add_reg_note (pushm, REG_FRAME_RELATED_EXPR, note);
38b2d076
DD
1443 }
1444
1445 if (cfun->machine->is_interrupt)
1446 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1447 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1448 {
1449 if (TARGET_A16)
1450 pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1451 else
1452 pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1453 F (pushm);
1454 }
1455 }
1456 if (ppt == PP_popm && byte_count)
1457 {
38b2d076
DD
1458 if (cfun->machine->is_interrupt)
1459 for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1460 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1461 {
1462 if (TARGET_A16)
b3fdec9e 1463 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
38b2d076 1464 else
b3fdec9e 1465 emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
38b2d076
DD
1466 }
1467 if (reg_mask)
1468 emit_insn (gen_popm (GEN_INT (reg_mask)));
1469 }
1470
1471 return byte_count;
1472}
1473
1474/* Implements INITIAL_ELIMINATION_OFFSET. See the comment above that
1475 diagrams our call frame. */
1476int
1477m32c_initial_elimination_offset (int from, int to)
1478{
1479 int ofs = 0;
1480
1481 if (from == AP_REGNO)
1482 {
1483 if (TARGET_A16)
1484 ofs += 5;
1485 else
1486 ofs += 8;
1487 }
1488
1489 if (to == SP_REGNO)
1490 {
1491 ofs += m32c_pushm_popm (PP_justcount);
1492 ofs += get_frame_size ();
1493 }
1494
1495 /* Account for push rounding. */
1496 if (TARGET_A24)
1497 ofs = (ofs + 1) & ~1;
1498#if DEBUG0
1499 fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1500 to, ofs);
1501#endif
1502 return ofs;
1503}
1504
1505/* Passing Function Arguments on the Stack */
1506
38b2d076
DD
1507/* Implements PUSH_ROUNDING. The R8C and M16C have byte stacks, the
1508 M32C has word stacks. */
444d6efe 1509unsigned int
38b2d076
DD
1510m32c_push_rounding (int n)
1511{
1512 if (TARGET_R8C || TARGET_M16C)
1513 return n;
1514 return (n + 1) & ~1;
1515}
1516
1517/* Passing Arguments in Registers */
1518
cd34bbe8
NF
1519/* Implements TARGET_FUNCTION_ARG. Arguments are passed partly in
1520 registers, partly on stack. If our function returns a struct, a
1521 pointer to a buffer for it is at the top of the stack (last thing
1522 pushed). The first few real arguments may be in registers as
1523 follows:
38b2d076
DD
1524
1525 R8C/M16C: arg1 in r1 if it's QI or HI (else it's pushed on stack)
1526 arg2 in r2 if it's HI (else pushed on stack)
1527 rest on stack
1528 M32C: arg1 in r0 if it's QI or HI (else it's pushed on stack)
1529 rest on stack
1530
1531 Structs are not passed in registers, even if they fit. Only
1532 integer and pointer types are passed in registers.
1533
1534 Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1535 r2 if it fits. */
cd34bbe8
NF
1536#undef TARGET_FUNCTION_ARG
1537#define TARGET_FUNCTION_ARG m32c_function_arg
1538static rtx
d5cc9181 1539m32c_function_arg (cumulative_args_t ca_v,
cd34bbe8 1540 enum machine_mode mode, const_tree type, bool named)
38b2d076 1541{
d5cc9181
JR
1542 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1543
38b2d076
DD
1544 /* Can return a reg, parallel, or 0 for stack */
1545 rtx rv = NULL_RTX;
1546#if DEBUG0
1547 fprintf (stderr, "func_arg %d (%s, %d)\n",
1548 ca->parm_num, mode_name[mode], named);
1549 debug_tree (type);
1550#endif
1551
1552 if (mode == VOIDmode)
1553 return GEN_INT (0);
1554
1555 if (ca->force_mem || !named)
1556 {
1557#if DEBUG0
1558 fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
1559 named);
1560#endif
1561 return NULL_RTX;
1562 }
1563
1564 if (type && INTEGRAL_TYPE_P (type) && POINTER_TYPE_P (type))
1565 return NULL_RTX;
1566
9d746d5e
DD
1567 if (type && AGGREGATE_TYPE_P (type))
1568 return NULL_RTX;
1569
38b2d076
DD
1570 switch (ca->parm_num)
1571 {
1572 case 1:
1573 if (GET_MODE_SIZE (mode) == 1 || GET_MODE_SIZE (mode) == 2)
1574 rv = gen_rtx_REG (mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
1575 break;
1576
1577 case 2:
1578 if (TARGET_A16 && GET_MODE_SIZE (mode) == 2)
1579 rv = gen_rtx_REG (mode, R2_REGNO);
1580 break;
1581 }
1582
1583#if DEBUG0
1584 debug_rtx (rv);
1585#endif
1586 return rv;
1587}
1588
1589#undef TARGET_PASS_BY_REFERENCE
1590#define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1591static bool
d5cc9181 1592m32c_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
38b2d076 1593 enum machine_mode mode ATTRIBUTE_UNUSED,
586de218 1594 const_tree type ATTRIBUTE_UNUSED,
38b2d076
DD
1595 bool named ATTRIBUTE_UNUSED)
1596{
1597 return 0;
1598}
1599
1600/* Implements INIT_CUMULATIVE_ARGS. */
1601void
1602m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
9d746d5e 1603 tree fntype,
38b2d076 1604 rtx libname ATTRIBUTE_UNUSED,
9d746d5e 1605 tree fndecl,
38b2d076
DD
1606 int n_named_args ATTRIBUTE_UNUSED)
1607{
9d746d5e
DD
1608 if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1609 ca->force_mem = 1;
1610 else
1611 ca->force_mem = 0;
38b2d076
DD
1612 ca->parm_num = 1;
1613}
1614
cd34bbe8
NF
1615/* Implements TARGET_FUNCTION_ARG_ADVANCE. force_mem is set for
1616 functions returning structures, so we always reset that. Otherwise,
1617 we only need to know the sequence number of the argument to know what
1618 to do with it. */
1619#undef TARGET_FUNCTION_ARG_ADVANCE
1620#define TARGET_FUNCTION_ARG_ADVANCE m32c_function_arg_advance
1621static void
d5cc9181 1622m32c_function_arg_advance (cumulative_args_t ca_v,
38b2d076 1623 enum machine_mode mode ATTRIBUTE_UNUSED,
cd34bbe8
NF
1624 const_tree type ATTRIBUTE_UNUSED,
1625 bool named ATTRIBUTE_UNUSED)
38b2d076 1626{
d5cc9181
JR
1627 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1628
38b2d076
DD
1629 if (ca->force_mem)
1630 ca->force_mem = 0;
9d746d5e
DD
1631 else
1632 ca->parm_num++;
38b2d076
DD
1633}
1634
c2ed6cf8
NF
1635/* Implements TARGET_FUNCTION_ARG_BOUNDARY. */
1636#undef TARGET_FUNCTION_ARG_BOUNDARY
1637#define TARGET_FUNCTION_ARG_BOUNDARY m32c_function_arg_boundary
1638static unsigned int
1639m32c_function_arg_boundary (enum machine_mode mode ATTRIBUTE_UNUSED,
1640 const_tree type ATTRIBUTE_UNUSED)
1641{
1642 return (TARGET_A16 ? 8 : 16);
1643}
1644
38b2d076
DD
1645/* Implements FUNCTION_ARG_REGNO_P. */
1646int
1647m32c_function_arg_regno_p (int r)
1648{
1649 if (TARGET_A24)
1650 return (r == R0_REGNO);
1651 return (r == R1_REGNO || r == R2_REGNO);
1652}
1653
e9555b13 1654/* HImode and PSImode are the two "native" modes as far as GCC is
85f65093 1655 concerned, but the chips also support a 32-bit mode which is used
e9555b13
DD
1656 for some opcodes in R8C/M16C and for reset vectors and such. */
1657#undef TARGET_VALID_POINTER_MODE
1658#define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
23fed240 1659static bool
e9555b13
DD
1660m32c_valid_pointer_mode (enum machine_mode mode)
1661{
e9555b13
DD
1662 if (mode == HImode
1663 || mode == PSImode
1664 || mode == SImode
1665 )
1666 return 1;
1667 return 0;
1668}
1669
38b2d076
DD
1670/* How Scalar Function Values Are Returned */
1671
2a31793e 1672/* Implements TARGET_LIBCALL_VALUE. Most values are returned in $r0, or some
38b2d076
DD
1673 combination of registers starting there (r2r0 for longs, r3r1r2r0
1674 for long long, r3r2r1r0 for doubles), except that that ABI
1675 currently doesn't work because it ends up using all available
1676 general registers and gcc often can't compile it. So, instead, we
1677 return anything bigger than 16 bits in "mem0" (effectively, a
1678 memory location). */
2a31793e
AS
1679
1680#undef TARGET_LIBCALL_VALUE
1681#define TARGET_LIBCALL_VALUE m32c_libcall_value
1682
1683static rtx
1684m32c_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
38b2d076
DD
1685{
1686 /* return reg or parallel */
1687#if 0
1688 /* FIXME: GCC has difficulty returning large values in registers,
1689 because that ties up most of the general registers and gives the
1690 register allocator little to work with. Until we can resolve
1691 this, large values are returned in memory. */
1692 if (mode == DFmode)
1693 {
1694 rtx rv;
1695
1696 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1697 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1698 gen_rtx_REG (HImode,
1699 R0_REGNO),
1700 GEN_INT (0));
1701 XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1702 gen_rtx_REG (HImode,
1703 R1_REGNO),
1704 GEN_INT (2));
1705 XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1706 gen_rtx_REG (HImode,
1707 R2_REGNO),
1708 GEN_INT (4));
1709 XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1710 gen_rtx_REG (HImode,
1711 R3_REGNO),
1712 GEN_INT (6));
1713 return rv;
1714 }
1715
1716 if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1717 {
1718 rtx rv;
1719
1720 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1721 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1722 gen_rtx_REG (mode,
1723 R0_REGNO),
1724 GEN_INT (0));
1725 return rv;
1726 }
1727#endif
1728
1729 if (GET_MODE_SIZE (mode) > 2)
1730 return gen_rtx_REG (mode, MEM0_REGNO);
1731 return gen_rtx_REG (mode, R0_REGNO);
1732}
1733
2a31793e 1734/* Implements TARGET_FUNCTION_VALUE. Functions and libcalls have the same
38b2d076 1735 conventions. */
2a31793e
AS
1736
1737#undef TARGET_FUNCTION_VALUE
1738#define TARGET_FUNCTION_VALUE m32c_function_value
1739
1740static rtx
1741m32c_function_value (const_tree valtype,
1742 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1743 bool outgoing ATTRIBUTE_UNUSED)
38b2d076
DD
1744{
1745 /* return reg or parallel */
586de218 1746 const enum machine_mode mode = TYPE_MODE (valtype);
2a31793e
AS
1747 return m32c_libcall_value (mode, NULL_RTX);
1748}
1749
f28f2337
AS
1750/* Implements TARGET_FUNCTION_VALUE_REGNO_P. */
1751
1752#undef TARGET_FUNCTION_VALUE_REGNO_P
1753#define TARGET_FUNCTION_VALUE_REGNO_P m32c_function_value_regno_p
2a31793e 1754
f28f2337 1755static bool
2a31793e
AS
1756m32c_function_value_regno_p (const unsigned int regno)
1757{
1758 return (regno == R0_REGNO || regno == MEM0_REGNO);
38b2d076
DD
1759}
1760
1761/* How Large Values Are Returned */
1762
1763/* We return structures by pushing the address on the stack, even if
1764 we use registers for the first few "real" arguments. */
1765#undef TARGET_STRUCT_VALUE_RTX
1766#define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1767static rtx
1768m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1769 int incoming ATTRIBUTE_UNUSED)
1770{
1771 return 0;
1772}
1773
1774/* Function Entry and Exit */
1775
1776/* Implements EPILOGUE_USES. Interrupts restore all registers. */
1777int
1778m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1779{
1780 if (cfun->machine->is_interrupt)
1781 return 1;
1782 return 0;
1783}
1784
1785/* Implementing the Varargs Macros */
1786
1787#undef TARGET_STRICT_ARGUMENT_NAMING
1788#define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1789static bool
d5cc9181 1790m32c_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
38b2d076
DD
1791{
1792 return 1;
1793}
1794
1795/* Trampolines for Nested Functions */
1796
1797/*
1798 m16c:
1799 1 0000 75C43412 mov.w #0x1234,a0
1800 2 0004 FC000000 jmp.a label
1801
1802 m32c:
1803 1 0000 BC563412 mov.l:s #0x123456,a0
1804 2 0004 CC000000 jmp.a label
1805*/
1806
1807/* Implements TRAMPOLINE_SIZE. */
1808int
1809m32c_trampoline_size (void)
1810{
1811 /* Allocate extra space so we can avoid the messy shifts when we
1812 initialize the trampoline; we just write past the end of the
1813 opcode. */
1814 return TARGET_A16 ? 8 : 10;
1815}
1816
1817/* Implements TRAMPOLINE_ALIGNMENT. */
1818int
1819m32c_trampoline_alignment (void)
1820{
1821 return 2;
1822}
1823
229fbccb
RH
1824/* Implements TARGET_TRAMPOLINE_INIT. */
1825
1826#undef TARGET_TRAMPOLINE_INIT
1827#define TARGET_TRAMPOLINE_INIT m32c_trampoline_init
1828static void
1829m32c_trampoline_init (rtx m_tramp, tree fndecl, rtx chainval)
38b2d076 1830{
229fbccb
RH
1831 rtx function = XEXP (DECL_RTL (fndecl), 0);
1832
1833#define A0(m,i) adjust_address (m_tramp, m, i)
38b2d076
DD
1834 if (TARGET_A16)
1835 {
1836 /* Note: we subtract a "word" because the moves want signed
1837 constants, not unsigned constants. */
1838 emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1839 emit_move_insn (A0 (HImode, 2), chainval);
1840 emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
85f65093
KH
1841 /* We use 16-bit addresses here, but store the zero to turn it
1842 into a 24-bit offset. */
38b2d076
DD
1843 emit_move_insn (A0 (HImode, 5), function);
1844 emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1845 }
1846 else
1847 {
1848 /* Note that the PSI moves actually write 4 bytes. Make sure we
1849 write stuff out in the right order, and leave room for the
1850 extra byte at the end. */
1851 emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1852 emit_move_insn (A0 (PSImode, 1), chainval);
1853 emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1854 emit_move_insn (A0 (PSImode, 5), function);
1855 }
1856#undef A0
1857}
1858
07127a0a
DD
1859/* Implicit Calls to Library Routines */
1860
1861#undef TARGET_INIT_LIBFUNCS
1862#define TARGET_INIT_LIBFUNCS m32c_init_libfuncs
1863static void
1864m32c_init_libfuncs (void)
1865{
f90b7a5a
PB
1866 /* We do this because the M32C has an HImode operand, but the
1867 M16C has an 8-bit operand. Since gcc looks at the match data
1868 and not the expanded rtl, we have to reset the optab so that
1869 the right modes are found. */
07127a0a
DD
1870 if (TARGET_A24)
1871 {
947131ba
RS
1872 set_optab_handler (cstore_optab, QImode, CODE_FOR_cstoreqi4_24);
1873 set_optab_handler (cstore_optab, HImode, CODE_FOR_cstorehi4_24);
1874 set_optab_handler (cstore_optab, PSImode, CODE_FOR_cstorepsi4_24);
07127a0a
DD
1875 }
1876}
1877
38b2d076
DD
1878/* Addressing Modes */
1879
c6c3dba9
PB
1880/* The r8c/m32c family supports a wide range of non-orthogonal
1881 addressing modes, including the ability to double-indirect on *some*
1882 of them. Not all insns support all modes, either, but we rely on
1883 predicates and constraints to deal with that. */
1884#undef TARGET_LEGITIMATE_ADDRESS_P
1885#define TARGET_LEGITIMATE_ADDRESS_P m32c_legitimate_address_p
1886bool
1887m32c_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
38b2d076
DD
1888{
1889 int mode_adjust;
1890 if (CONSTANT_P (x))
1891 return 1;
1892
5fd5d713
DD
1893 if (TARGET_A16 && GET_MODE (x) != HImode && GET_MODE (x) != SImode)
1894 return 0;
1895 if (TARGET_A24 && GET_MODE (x) != PSImode)
1896 return 0;
1897
38b2d076
DD
1898 /* Wide references to memory will be split after reload, so we must
1899 ensure that all parts of such splits remain legitimate
1900 addresses. */
1901 mode_adjust = GET_MODE_SIZE (mode) - 1;
1902
1903 /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1904 if (GET_CODE (x) == PRE_DEC
1905 || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1906 {
1907 return (GET_CODE (XEXP (x, 0)) == REG
1908 && REGNO (XEXP (x, 0)) == SP_REGNO);
1909 }
1910
1911#if 0
1912 /* This is the double indirection detection, but it currently
1913 doesn't work as cleanly as this code implies, so until we've had
1914 a chance to debug it, leave it disabled. */
1915 if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1916 {
1917#if DEBUG_DOUBLE
1918 fprintf (stderr, "double indirect\n");
1919#endif
1920 x = XEXP (x, 0);
1921 }
1922#endif
1923
1924 encode_pattern (x);
1925 if (RTX_IS ("r"))
1926 {
1927 /* Most indexable registers can be used without displacements,
1928 although some of them will be emitted with an explicit zero
1929 to please the assembler. */
1930 switch (REGNO (patternr[0]))
1931 {
38b2d076
DD
1932 case A1_REGNO:
1933 case SB_REGNO:
1934 case FB_REGNO:
1935 case SP_REGNO:
5fd5d713
DD
1936 if (TARGET_A16 && GET_MODE (x) == SImode)
1937 return 0;
1938 case A0_REGNO:
38b2d076
DD
1939 return 1;
1940
1941 default:
1942 if (IS_PSEUDO (patternr[0], strict))
1943 return 1;
1944 return 0;
1945 }
1946 }
5fd5d713
DD
1947
1948 if (TARGET_A16 && GET_MODE (x) == SImode)
1949 return 0;
1950
38b2d076
DD
1951 if (RTX_IS ("+ri"))
1952 {
1953 /* This is more interesting, because different base registers
1954 allow for different displacements - both range and signedness
1955 - and it differs from chip series to chip series too. */
1956 int rn = REGNO (patternr[1]);
1957 HOST_WIDE_INT offs = INTVAL (patternr[2]);
1958 switch (rn)
1959 {
1960 case A0_REGNO:
1961 case A1_REGNO:
1962 case SB_REGNO:
1963 /* The syntax only allows positive offsets, but when the
1964 offsets span the entire memory range, we can simulate
1965 negative offsets by wrapping. */
1966 if (TARGET_A16)
1967 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1968 if (rn == SB_REGNO)
1969 return (offs >= 0 && offs <= 65535 - mode_adjust);
1970 /* A0 or A1 */
1971 return (offs >= -16777216 && offs <= 16777215);
1972
1973 case FB_REGNO:
1974 if (TARGET_A16)
1975 return (offs >= -128 && offs <= 127 - mode_adjust);
1976 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1977
1978 case SP_REGNO:
1979 return (offs >= -128 && offs <= 127 - mode_adjust);
1980
1981 default:
1982 if (IS_PSEUDO (patternr[1], strict))
1983 return 1;
1984 return 0;
1985 }
1986 }
1987 if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1988 {
1989 rtx reg = patternr[1];
1990
1991 /* We don't know where the symbol is, so only allow base
1992 registers which support displacements spanning the whole
1993 address range. */
1994 switch (REGNO (reg))
1995 {
1996 case A0_REGNO:
1997 case A1_REGNO:
1998 /* $sb needs a secondary reload, but since it's involved in
1999 memory address reloads too, we don't deal with it very
2000 well. */
2001 /* case SB_REGNO: */
2002 return 1;
2003 default:
2004 if (IS_PSEUDO (reg, strict))
2005 return 1;
2006 return 0;
2007 }
2008 }
2009 return 0;
2010}
2011
2012/* Implements REG_OK_FOR_BASE_P. */
2013int
2014m32c_reg_ok_for_base_p (rtx x, int strict)
2015{
2016 if (GET_CODE (x) != REG)
2017 return 0;
2018 switch (REGNO (x))
2019 {
2020 case A0_REGNO:
2021 case A1_REGNO:
2022 case SB_REGNO:
2023 case FB_REGNO:
2024 case SP_REGNO:
2025 return 1;
2026 default:
2027 if (IS_PSEUDO (x, strict))
2028 return 1;
2029 return 0;
2030 }
2031}
2032
04aff2c0 2033/* We have three choices for choosing fb->aN offsets. If we choose -128,
85f65093 2034 we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
04aff2c0
DD
2035 like this:
2036 EB 4B FF mova -128[$fb],$a0
2037 D8 0C FF FF mov.w:Q #0,-1[$a0]
2038
85f65093 2039 Alternately, we subtract the frame size, and hopefully use 8-bit aN
04aff2c0
DD
2040 displacements:
2041 7B F4 stc $fb,$a0
2042 77 54 00 01 sub #256,$a0
2043 D8 08 01 mov.w:Q #0,1[$a0]
2044
2045 If we don't offset (i.e. offset by zero), we end up with:
2046 7B F4 stc $fb,$a0
2047 D8 0C 00 FF mov.w:Q #0,-256[$a0]
2048
2049 We have to subtract *something* so that we have a PLUS rtx to mark
2050 that we've done this reload. The -128 offset will never result in
85f65093 2051 an 8-bit aN offset, and the payoff for the second case is five
04aff2c0
DD
2052 loads *if* those loads are within 256 bytes of the other end of the
2053 frame, so the third case seems best. Note that we subtract the
2054 zero, but detect that in the addhi3 pattern. */
2055
ea471af0
JM
2056#define BIG_FB_ADJ 0
2057
38b2d076
DD
2058/* Implements LEGITIMIZE_ADDRESS. The only address we really have to
2059 worry about is frame base offsets, as $fb has a limited
2060 displacement range. We deal with this by attempting to reload $fb
2061 itself into an address register; that seems to result in the best
2062 code. */
506d7b68
PB
2063#undef TARGET_LEGITIMIZE_ADDRESS
2064#define TARGET_LEGITIMIZE_ADDRESS m32c_legitimize_address
2065static rtx
2066m32c_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
2067 enum machine_mode mode)
38b2d076
DD
2068{
2069#if DEBUG0
2070 fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
506d7b68 2071 debug_rtx (x);
38b2d076
DD
2072 fprintf (stderr, "\n");
2073#endif
2074
506d7b68
PB
2075 if (GET_CODE (x) == PLUS
2076 && GET_CODE (XEXP (x, 0)) == REG
2077 && REGNO (XEXP (x, 0)) == FB_REGNO
2078 && GET_CODE (XEXP (x, 1)) == CONST_INT
2079 && (INTVAL (XEXP (x, 1)) < -128
2080 || INTVAL (XEXP (x, 1)) > (128 - GET_MODE_SIZE (mode))))
38b2d076
DD
2081 {
2082 /* reload FB to A_REGS */
38b2d076 2083 rtx temp = gen_reg_rtx (Pmode);
506d7b68
PB
2084 x = copy_rtx (x);
2085 emit_insn (gen_rtx_SET (VOIDmode, temp, XEXP (x, 0)));
2086 XEXP (x, 0) = temp;
38b2d076
DD
2087 }
2088
506d7b68 2089 return x;
38b2d076
DD
2090}
2091
2092/* Implements LEGITIMIZE_RELOAD_ADDRESS. See comment above. */
2093int
2094m32c_legitimize_reload_address (rtx * x,
2095 enum machine_mode mode,
2096 int opnum,
2097 int type, int ind_levels ATTRIBUTE_UNUSED)
2098{
2099#if DEBUG0
2100 fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
2101 mode_name[mode]);
2102 debug_rtx (*x);
2103#endif
2104
2105 /* At one point, this function tried to get $fb copied to an address
2106 register, which in theory would maximize sharing, but gcc was
2107 *also* still trying to reload the whole address, and we'd run out
2108 of address registers. So we let gcc do the naive (but safe)
2109 reload instead, when the above function doesn't handle it for
04aff2c0
DD
2110 us.
2111
2112 The code below is a second attempt at the above. */
2113
2114 if (GET_CODE (*x) == PLUS
2115 && GET_CODE (XEXP (*x, 0)) == REG
2116 && REGNO (XEXP (*x, 0)) == FB_REGNO
2117 && GET_CODE (XEXP (*x, 1)) == CONST_INT
2118 && (INTVAL (XEXP (*x, 1)) < -128
2119 || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
2120 {
2121 rtx sum;
2122 int offset = INTVAL (XEXP (*x, 1));
2123 int adjustment = -BIG_FB_ADJ;
2124
2125 sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
2126 GEN_INT (adjustment));
2127 *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
2128 if (type == RELOAD_OTHER)
2129 type = RELOAD_FOR_OTHER_ADDRESS;
2130 push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
2131 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
444d6efe 2132 (enum reload_type) type);
04aff2c0
DD
2133 return 1;
2134 }
2135
2136 if (GET_CODE (*x) == PLUS
2137 && GET_CODE (XEXP (*x, 0)) == PLUS
2138 && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
2139 && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
2140 && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
2141 && GET_CODE (XEXP (*x, 1)) == CONST_INT
2142 )
2143 {
2144 if (type == RELOAD_OTHER)
2145 type = RELOAD_FOR_OTHER_ADDRESS;
2146 push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
2147 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
444d6efe 2148 (enum reload_type) type);
04aff2c0
DD
2149 return 1;
2150 }
38b2d076
DD
2151
2152 return 0;
2153}
2154
5fd5d713
DD
2155/* Return the appropriate mode for a named address pointer. */
2156#undef TARGET_ADDR_SPACE_POINTER_MODE
2157#define TARGET_ADDR_SPACE_POINTER_MODE m32c_addr_space_pointer_mode
2158static enum machine_mode
2159m32c_addr_space_pointer_mode (addr_space_t addrspace)
2160{
2161 switch (addrspace)
2162 {
2163 case ADDR_SPACE_GENERIC:
2164 return TARGET_A24 ? PSImode : HImode;
2165 case ADDR_SPACE_FAR:
2166 return SImode;
2167 default:
2168 gcc_unreachable ();
2169 }
2170}
2171
2172/* Return the appropriate mode for a named address address. */
2173#undef TARGET_ADDR_SPACE_ADDRESS_MODE
2174#define TARGET_ADDR_SPACE_ADDRESS_MODE m32c_addr_space_address_mode
2175static enum machine_mode
2176m32c_addr_space_address_mode (addr_space_t addrspace)
2177{
2178 switch (addrspace)
2179 {
2180 case ADDR_SPACE_GENERIC:
2181 return TARGET_A24 ? PSImode : HImode;
2182 case ADDR_SPACE_FAR:
2183 return SImode;
2184 default:
2185 gcc_unreachable ();
2186 }
2187}
2188
2189/* Like m32c_legitimate_address_p, except with named addresses. */
2190#undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
2191#define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P \
2192 m32c_addr_space_legitimate_address_p
2193static bool
2194m32c_addr_space_legitimate_address_p (enum machine_mode mode, rtx x,
2195 bool strict, addr_space_t as)
2196{
2197 if (as == ADDR_SPACE_FAR)
2198 {
2199 if (TARGET_A24)
2200 return 0;
2201 encode_pattern (x);
2202 if (RTX_IS ("r"))
2203 {
2204 if (GET_MODE (x) != SImode)
2205 return 0;
2206 switch (REGNO (patternr[0]))
2207 {
2208 case A0_REGNO:
2209 return 1;
2210
2211 default:
2212 if (IS_PSEUDO (patternr[0], strict))
2213 return 1;
2214 return 0;
2215 }
2216 }
2217 if (RTX_IS ("+^Sri"))
2218 {
2219 int rn = REGNO (patternr[3]);
2220 HOST_WIDE_INT offs = INTVAL (patternr[4]);
2221 if (GET_MODE (patternr[3]) != HImode)
2222 return 0;
2223 switch (rn)
2224 {
2225 case A0_REGNO:
2226 return (offs >= 0 && offs <= 0xfffff);
2227
2228 default:
2229 if (IS_PSEUDO (patternr[3], strict))
2230 return 1;
2231 return 0;
2232 }
2233 }
2234 if (RTX_IS ("+^Srs"))
2235 {
2236 int rn = REGNO (patternr[3]);
2237 if (GET_MODE (patternr[3]) != HImode)
2238 return 0;
2239 switch (rn)
2240 {
2241 case A0_REGNO:
2242 return 1;
2243
2244 default:
2245 if (IS_PSEUDO (patternr[3], strict))
2246 return 1;
2247 return 0;
2248 }
2249 }
2250 if (RTX_IS ("+^S+ris"))
2251 {
2252 int rn = REGNO (patternr[4]);
2253 if (GET_MODE (patternr[4]) != HImode)
2254 return 0;
2255 switch (rn)
2256 {
2257 case A0_REGNO:
2258 return 1;
2259
2260 default:
2261 if (IS_PSEUDO (patternr[4], strict))
2262 return 1;
2263 return 0;
2264 }
2265 }
2266 if (RTX_IS ("s"))
2267 {
2268 return 1;
2269 }
2270 return 0;
2271 }
2272
2273 else if (as != ADDR_SPACE_GENERIC)
2274 gcc_unreachable ();
2275
2276 return m32c_legitimate_address_p (mode, x, strict);
2277}
2278
2279/* Like m32c_legitimate_address, except with named address support. */
2280#undef TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS
2281#define TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS m32c_addr_space_legitimize_address
2282static rtx
2283m32c_addr_space_legitimize_address (rtx x, rtx oldx, enum machine_mode mode,
2284 addr_space_t as)
2285{
2286 if (as != ADDR_SPACE_GENERIC)
2287 {
2288#if DEBUG0
2289 fprintf (stderr, "\033[36mm32c_addr_space_legitimize_address for mode %s\033[0m\n", mode_name[mode]);
2290 debug_rtx (x);
2291 fprintf (stderr, "\n");
2292#endif
2293
2294 if (GET_CODE (x) != REG)
2295 {
2296 x = force_reg (SImode, x);
2297 }
2298 return x;
2299 }
2300
2301 return m32c_legitimize_address (x, oldx, mode);
2302}
2303
2304/* Determine if one named address space is a subset of another. */
2305#undef TARGET_ADDR_SPACE_SUBSET_P
2306#define TARGET_ADDR_SPACE_SUBSET_P m32c_addr_space_subset_p
2307static bool
2308m32c_addr_space_subset_p (addr_space_t subset, addr_space_t superset)
2309{
2310 gcc_assert (subset == ADDR_SPACE_GENERIC || subset == ADDR_SPACE_FAR);
2311 gcc_assert (superset == ADDR_SPACE_GENERIC || superset == ADDR_SPACE_FAR);
2312
2313 if (subset == superset)
2314 return true;
2315
2316 else
2317 return (subset == ADDR_SPACE_GENERIC && superset == ADDR_SPACE_FAR);
2318}
2319
2320#undef TARGET_ADDR_SPACE_CONVERT
2321#define TARGET_ADDR_SPACE_CONVERT m32c_addr_space_convert
2322/* Convert from one address space to another. */
2323static rtx
2324m32c_addr_space_convert (rtx op, tree from_type, tree to_type)
2325{
2326 addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (from_type));
2327 addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (to_type));
2328 rtx result;
2329
2330 gcc_assert (from_as == ADDR_SPACE_GENERIC || from_as == ADDR_SPACE_FAR);
2331 gcc_assert (to_as == ADDR_SPACE_GENERIC || to_as == ADDR_SPACE_FAR);
2332
2333 if (to_as == ADDR_SPACE_GENERIC && from_as == ADDR_SPACE_FAR)
2334 {
2335 /* This is unpredictable, as we're truncating off usable address
2336 bits. */
2337
2338 result = gen_reg_rtx (HImode);
2339 emit_move_insn (result, simplify_subreg (HImode, op, SImode, 0));
2340 return result;
2341 }
2342 else if (to_as == ADDR_SPACE_FAR && from_as == ADDR_SPACE_GENERIC)
2343 {
2344 /* This always works. */
2345 result = gen_reg_rtx (SImode);
2346 emit_insn (gen_zero_extendhisi2 (result, op));
2347 return result;
2348 }
2349 else
2350 gcc_unreachable ();
2351}
2352
38b2d076
DD
2353/* Condition Code Status */
2354
2355#undef TARGET_FIXED_CONDITION_CODE_REGS
2356#define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2357static bool
2358m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2359{
2360 *p1 = FLG_REGNO;
2361 *p2 = INVALID_REGNUM;
2362 return true;
2363}
2364
2365/* Describing Relative Costs of Operations */
2366
0e607518 2367/* Implements TARGET_REGISTER_MOVE_COST. We make impossible moves
38b2d076
DD
2368 prohibitively expensive, like trying to put QIs in r2/r3 (there are
2369 no opcodes to do that). We also discourage use of mem* registers
2370 since they're really memory. */
0e607518
AS
2371
2372#undef TARGET_REGISTER_MOVE_COST
2373#define TARGET_REGISTER_MOVE_COST m32c_register_move_cost
2374
2375static int
2376m32c_register_move_cost (enum machine_mode mode, reg_class_t from,
2377 reg_class_t to)
38b2d076
DD
2378{
2379 int cost = COSTS_N_INSNS (3);
0e607518
AS
2380 HARD_REG_SET cc;
2381
2382/* FIXME: pick real values, but not 2 for now. */
2383 COPY_HARD_REG_SET (cc, reg_class_contents[(int) from]);
2384 IOR_HARD_REG_SET (cc, reg_class_contents[(int) to]);
2385
2386 if (mode == QImode
2387 && hard_reg_set_intersect_p (cc, reg_class_contents[R23_REGS]))
38b2d076 2388 {
0e607518 2389 if (hard_reg_set_subset_p (cc, reg_class_contents[R23_REGS]))
38b2d076
DD
2390 cost = COSTS_N_INSNS (1000);
2391 else
2392 cost = COSTS_N_INSNS (80);
2393 }
2394
2395 if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2396 cost = COSTS_N_INSNS (1000);
2397
0e607518 2398 if (reg_classes_intersect_p (from, CR_REGS))
38b2d076
DD
2399 cost += COSTS_N_INSNS (5);
2400
0e607518 2401 if (reg_classes_intersect_p (to, CR_REGS))
38b2d076
DD
2402 cost += COSTS_N_INSNS (5);
2403
2404 if (from == MEM_REGS || to == MEM_REGS)
2405 cost += COSTS_N_INSNS (50);
0e607518
AS
2406 else if (reg_classes_intersect_p (from, MEM_REGS)
2407 || reg_classes_intersect_p (to, MEM_REGS))
38b2d076
DD
2408 cost += COSTS_N_INSNS (10);
2409
2410#if DEBUG0
2411 fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
0e607518
AS
2412 mode_name[mode], class_names[(int) from], class_names[(int) to],
2413 cost);
38b2d076
DD
2414#endif
2415 return cost;
2416}
2417
0e607518
AS
2418/* Implements TARGET_MEMORY_MOVE_COST. */
2419
2420#undef TARGET_MEMORY_MOVE_COST
2421#define TARGET_MEMORY_MOVE_COST m32c_memory_move_cost
2422
2423static int
38b2d076 2424m32c_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
0e607518
AS
2425 reg_class_t rclass ATTRIBUTE_UNUSED,
2426 bool in ATTRIBUTE_UNUSED)
38b2d076
DD
2427{
2428 /* FIXME: pick real values. */
2429 return COSTS_N_INSNS (10);
2430}
2431
07127a0a
DD
2432/* Here we try to describe when we use multiple opcodes for one RTX so
2433 that gcc knows when to use them. */
2434#undef TARGET_RTX_COSTS
2435#define TARGET_RTX_COSTS m32c_rtx_costs
2436static bool
68f932c4
RS
2437m32c_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2438 int *total, bool speed ATTRIBUTE_UNUSED)
07127a0a
DD
2439{
2440 switch (code)
2441 {
2442 case REG:
2443 if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2444 *total += COSTS_N_INSNS (500);
2445 else
2446 *total += COSTS_N_INSNS (1);
2447 return true;
2448
2449 case ASHIFT:
2450 case LSHIFTRT:
2451 case ASHIFTRT:
2452 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2453 {
2454 /* mov.b r1l, r1h */
2455 *total += COSTS_N_INSNS (1);
2456 return true;
2457 }
2458 if (INTVAL (XEXP (x, 1)) > 8
2459 || INTVAL (XEXP (x, 1)) < -8)
2460 {
2461 /* mov.b #N, r1l */
2462 /* mov.b r1l, r1h */
2463 *total += COSTS_N_INSNS (2);
2464 return true;
2465 }
2466 return true;
2467
2468 case LE:
2469 case LEU:
2470 case LT:
2471 case LTU:
2472 case GT:
2473 case GTU:
2474 case GE:
2475 case GEU:
2476 case NE:
2477 case EQ:
2478 if (outer_code == SET)
2479 {
2480 *total += COSTS_N_INSNS (2);
2481 return true;
2482 }
2483 break;
2484
2485 case ZERO_EXTRACT:
2486 {
2487 rtx dest = XEXP (x, 0);
2488 rtx addr = XEXP (dest, 0);
2489 switch (GET_CODE (addr))
2490 {
2491 case CONST_INT:
2492 *total += COSTS_N_INSNS (1);
2493 break;
2494 case SYMBOL_REF:
2495 *total += COSTS_N_INSNS (3);
2496 break;
2497 default:
2498 *total += COSTS_N_INSNS (2);
2499 break;
2500 }
2501 return true;
2502 }
2503 break;
2504
2505 default:
2506 /* Reasonable default. */
2507 if (TARGET_A16 && GET_MODE(x) == SImode)
2508 *total += COSTS_N_INSNS (2);
2509 break;
2510 }
2511 return false;
2512}
2513
2514#undef TARGET_ADDRESS_COST
2515#define TARGET_ADDRESS_COST m32c_address_cost
2516static int
f40751dd 2517m32c_address_cost (rtx addr, bool speed ATTRIBUTE_UNUSED)
07127a0a 2518{
80b093df 2519 int i;
07127a0a
DD
2520 /* fprintf(stderr, "\naddress_cost\n");
2521 debug_rtx(addr);*/
2522 switch (GET_CODE (addr))
2523 {
2524 case CONST_INT:
80b093df
DD
2525 i = INTVAL (addr);
2526 if (i == 0)
2527 return COSTS_N_INSNS(1);
2528 if (0 < i && i <= 255)
2529 return COSTS_N_INSNS(2);
2530 if (0 < i && i <= 65535)
2531 return COSTS_N_INSNS(3);
2532 return COSTS_N_INSNS(4);
07127a0a 2533 case SYMBOL_REF:
80b093df 2534 return COSTS_N_INSNS(4);
07127a0a 2535 case REG:
80b093df
DD
2536 return COSTS_N_INSNS(1);
2537 case PLUS:
2538 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
2539 {
2540 i = INTVAL (XEXP (addr, 1));
2541 if (i == 0)
2542 return COSTS_N_INSNS(1);
2543 if (0 < i && i <= 255)
2544 return COSTS_N_INSNS(2);
2545 if (0 < i && i <= 65535)
2546 return COSTS_N_INSNS(3);
2547 }
2548 return COSTS_N_INSNS(4);
07127a0a
DD
2549 default:
2550 return 0;
2551 }
2552}
2553
38b2d076
DD
2554/* Defining the Output Assembler Language */
2555
38b2d076
DD
2556/* Output of Data */
2557
2558/* We may have 24 bit sizes, which is the native address size.
2559 Currently unused, but provided for completeness. */
2560#undef TARGET_ASM_INTEGER
2561#define TARGET_ASM_INTEGER m32c_asm_integer
2562static bool
2563m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2564{
2565 switch (size)
2566 {
2567 case 3:
2568 fprintf (asm_out_file, "\t.3byte\t");
2569 output_addr_const (asm_out_file, x);
2570 fputc ('\n', asm_out_file);
2571 return true;
e9555b13
DD
2572 case 4:
2573 if (GET_CODE (x) == SYMBOL_REF)
2574 {
2575 fprintf (asm_out_file, "\t.long\t");
2576 output_addr_const (asm_out_file, x);
2577 fputc ('\n', asm_out_file);
2578 return true;
2579 }
2580 break;
38b2d076
DD
2581 }
2582 return default_assemble_integer (x, size, aligned_p);
2583}
2584
2585/* Output of Assembler Instructions */
2586
a4174ebf 2587/* We use a lookup table because the addressing modes are non-orthogonal. */
38b2d076
DD
2588
2589static struct
2590{
2591 char code;
2592 char const *pattern;
2593 char const *format;
2594}
2595const conversions[] = {
2596 { 0, "r", "0" },
2597
2598 { 0, "mr", "z[1]" },
2599 { 0, "m+ri", "3[2]" },
2600 { 0, "m+rs", "3[2]" },
5fd5d713
DD
2601 { 0, "m+^Zrs", "5[4]" },
2602 { 0, "m+^Zri", "5[4]" },
2603 { 0, "m+^Z+ris", "7+6[5]" },
2604 { 0, "m+^Srs", "5[4]" },
2605 { 0, "m+^Sri", "5[4]" },
2606 { 0, "m+^S+ris", "7+6[5]" },
38b2d076
DD
2607 { 0, "m+r+si", "4+5[2]" },
2608 { 0, "ms", "1" },
2609 { 0, "mi", "1" },
2610 { 0, "m+si", "2+3" },
2611
2612 { 0, "mmr", "[z[2]]" },
2613 { 0, "mm+ri", "[4[3]]" },
2614 { 0, "mm+rs", "[4[3]]" },
2615 { 0, "mm+r+si", "[5+6[3]]" },
2616 { 0, "mms", "[[2]]" },
2617 { 0, "mmi", "[[2]]" },
2618 { 0, "mm+si", "[4[3]]" },
2619
2620 { 0, "i", "#0" },
2621 { 0, "s", "#0" },
2622 { 0, "+si", "#1+2" },
2623 { 0, "l", "#0" },
2624
2625 { 'l', "l", "0" },
2626 { 'd', "i", "0" },
2627 { 'd', "s", "0" },
2628 { 'd', "+si", "1+2" },
2629 { 'D', "i", "0" },
2630 { 'D', "s", "0" },
2631 { 'D', "+si", "1+2" },
2632 { 'x', "i", "#0" },
2633 { 'X', "i", "#0" },
2634 { 'm', "i", "#0" },
2635 { 'b', "i", "#0" },
07127a0a 2636 { 'B', "i", "0" },
38b2d076
DD
2637 { 'p', "i", "0" },
2638
2639 { 0, 0, 0 }
2640};
2641
2642/* This is in order according to the bitfield that pushm/popm use. */
2643static char const *pushm_regs[] = {
2644 "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2645};
2646
4645179e
AS
2647/* Implements TARGET_PRINT_OPERAND. */
2648
2649#undef TARGET_PRINT_OPERAND
2650#define TARGET_PRINT_OPERAND m32c_print_operand
2651
2652static void
38b2d076
DD
2653m32c_print_operand (FILE * file, rtx x, int code)
2654{
2655 int i, j, b;
2656 const char *comma;
2657 HOST_WIDE_INT ival;
2658 int unsigned_const = 0;
ff485e71 2659 int force_sign;
38b2d076
DD
2660
2661 /* Multiplies; constants are converted to sign-extended format but
2662 we need unsigned, so 'u' and 'U' tell us what size unsigned we
2663 need. */
2664 if (code == 'u')
2665 {
2666 unsigned_const = 2;
2667 code = 0;
2668 }
2669 if (code == 'U')
2670 {
2671 unsigned_const = 1;
2672 code = 0;
2673 }
2674 /* This one is only for debugging; you can put it in a pattern to
2675 force this error. */
2676 if (code == '!')
2677 {
2678 fprintf (stderr, "dj: unreviewed pattern:");
2679 if (current_output_insn)
2680 debug_rtx (current_output_insn);
2681 gcc_unreachable ();
2682 }
2683 /* PSImode operations are either .w or .l depending on the target. */
2684 if (code == '&')
2685 {
2686 if (TARGET_A16)
2687 fprintf (file, "w");
2688 else
2689 fprintf (file, "l");
2690 return;
2691 }
2692 /* Inverted conditionals. */
2693 if (code == 'C')
2694 {
2695 switch (GET_CODE (x))
2696 {
2697 case LE:
2698 fputs ("gt", file);
2699 break;
2700 case LEU:
2701 fputs ("gtu", file);
2702 break;
2703 case LT:
2704 fputs ("ge", file);
2705 break;
2706 case LTU:
2707 fputs ("geu", file);
2708 break;
2709 case GT:
2710 fputs ("le", file);
2711 break;
2712 case GTU:
2713 fputs ("leu", file);
2714 break;
2715 case GE:
2716 fputs ("lt", file);
2717 break;
2718 case GEU:
2719 fputs ("ltu", file);
2720 break;
2721 case NE:
2722 fputs ("eq", file);
2723 break;
2724 case EQ:
2725 fputs ("ne", file);
2726 break;
2727 default:
2728 gcc_unreachable ();
2729 }
2730 return;
2731 }
2732 /* Regular conditionals. */
2733 if (code == 'c')
2734 {
2735 switch (GET_CODE (x))
2736 {
2737 case LE:
2738 fputs ("le", file);
2739 break;
2740 case LEU:
2741 fputs ("leu", file);
2742 break;
2743 case LT:
2744 fputs ("lt", file);
2745 break;
2746 case LTU:
2747 fputs ("ltu", file);
2748 break;
2749 case GT:
2750 fputs ("gt", file);
2751 break;
2752 case GTU:
2753 fputs ("gtu", file);
2754 break;
2755 case GE:
2756 fputs ("ge", file);
2757 break;
2758 case GEU:
2759 fputs ("geu", file);
2760 break;
2761 case NE:
2762 fputs ("ne", file);
2763 break;
2764 case EQ:
2765 fputs ("eq", file);
2766 break;
2767 default:
2768 gcc_unreachable ();
2769 }
2770 return;
2771 }
2772 /* Used in negsi2 to do HImode ops on the two parts of an SImode
2773 operand. */
2774 if (code == 'h' && GET_MODE (x) == SImode)
2775 {
2776 x = m32c_subreg (HImode, x, SImode, 0);
2777 code = 0;
2778 }
2779 if (code == 'H' && GET_MODE (x) == SImode)
2780 {
2781 x = m32c_subreg (HImode, x, SImode, 2);
2782 code = 0;
2783 }
07127a0a
DD
2784 if (code == 'h' && GET_MODE (x) == HImode)
2785 {
2786 x = m32c_subreg (QImode, x, HImode, 0);
2787 code = 0;
2788 }
2789 if (code == 'H' && GET_MODE (x) == HImode)
2790 {
2791 /* We can't actually represent this as an rtx. Do it here. */
2792 if (GET_CODE (x) == REG)
2793 {
2794 switch (REGNO (x))
2795 {
2796 case R0_REGNO:
2797 fputs ("r0h", file);
2798 return;
2799 case R1_REGNO:
2800 fputs ("r1h", file);
2801 return;
2802 default:
2803 gcc_unreachable();
2804 }
2805 }
2806 /* This should be a MEM. */
2807 x = m32c_subreg (QImode, x, HImode, 1);
2808 code = 0;
2809 }
2810 /* This is for BMcond, which always wants word register names. */
2811 if (code == 'h' && GET_MODE (x) == QImode)
2812 {
2813 if (GET_CODE (x) == REG)
2814 x = gen_rtx_REG (HImode, REGNO (x));
2815 code = 0;
2816 }
38b2d076
DD
2817 /* 'x' and 'X' need to be ignored for non-immediates. */
2818 if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2819 code = 0;
2820
2821 encode_pattern (x);
ff485e71 2822 force_sign = 0;
38b2d076
DD
2823 for (i = 0; conversions[i].pattern; i++)
2824 if (conversions[i].code == code
2825 && streq (conversions[i].pattern, pattern))
2826 {
2827 for (j = 0; conversions[i].format[j]; j++)
2828 /* backslash quotes the next character in the output pattern. */
2829 if (conversions[i].format[j] == '\\')
2830 {
2831 fputc (conversions[i].format[j + 1], file);
2832 j++;
2833 }
2834 /* Digits in the output pattern indicate that the
2835 corresponding RTX is to be output at that point. */
2836 else if (ISDIGIT (conversions[i].format[j]))
2837 {
2838 rtx r = patternr[conversions[i].format[j] - '0'];
2839 switch (GET_CODE (r))
2840 {
2841 case REG:
2842 fprintf (file, "%s",
2843 reg_name_with_mode (REGNO (r), GET_MODE (r)));
2844 break;
2845 case CONST_INT:
2846 switch (code)
2847 {
2848 case 'b':
07127a0a
DD
2849 case 'B':
2850 {
2851 int v = INTVAL (r);
2852 int i = (int) exact_log2 (v);
2853 if (i == -1)
2854 i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2855 if (i == -1)
2856 i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2857 /* Bit position. */
2858 fprintf (file, "%d", i);
2859 }
38b2d076
DD
2860 break;
2861 case 'x':
2862 /* Unsigned byte. */
2863 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2864 INTVAL (r) & 0xff);
2865 break;
2866 case 'X':
2867 /* Unsigned word. */
2868 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2869 INTVAL (r) & 0xffff);
2870 break;
2871 case 'p':
2872 /* pushm and popm encode a register set into a single byte. */
2873 comma = "";
2874 for (b = 7; b >= 0; b--)
2875 if (INTVAL (r) & (1 << b))
2876 {
2877 fprintf (file, "%s%s", comma, pushm_regs[b]);
2878 comma = ",";
2879 }
2880 break;
2881 case 'm':
2882 /* "Minus". Output -X */
2883 ival = (-INTVAL (r) & 0xffff);
2884 if (ival & 0x8000)
2885 ival = ival - 0x10000;
2886 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2887 break;
2888 default:
2889 ival = INTVAL (r);
2890 if (conversions[i].format[j + 1] == '[' && ival < 0)
2891 {
2892 /* We can simulate negative displacements by
2893 taking advantage of address space
2894 wrapping when the offset can span the
2895 entire address range. */
2896 rtx base =
2897 patternr[conversions[i].format[j + 2] - '0'];
2898 if (GET_CODE (base) == REG)
2899 switch (REGNO (base))
2900 {
2901 case A0_REGNO:
2902 case A1_REGNO:
2903 if (TARGET_A24)
2904 ival = 0x1000000 + ival;
2905 else
2906 ival = 0x10000 + ival;
2907 break;
2908 case SB_REGNO:
2909 if (TARGET_A16)
2910 ival = 0x10000 + ival;
2911 break;
2912 }
2913 }
2914 else if (code == 'd' && ival < 0 && j == 0)
2915 /* The "mova" opcode is used to do addition by
2916 computing displacements, but again, we need
2917 displacements to be unsigned *if* they're
2918 the only component of the displacement
2919 (i.e. no "symbol-4" type displacement). */
2920 ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2921
2922 if (conversions[i].format[j] == '0')
2923 {
2924 /* More conversions to unsigned. */
2925 if (unsigned_const == 2)
2926 ival &= 0xffff;
2927 if (unsigned_const == 1)
2928 ival &= 0xff;
2929 }
2930 if (streq (conversions[i].pattern, "mi")
2931 || streq (conversions[i].pattern, "mmi"))
2932 {
2933 /* Integers used as addresses are unsigned. */
2934 ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2935 }
ff485e71
DD
2936 if (force_sign && ival >= 0)
2937 fputc ('+', file);
38b2d076
DD
2938 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2939 break;
2940 }
2941 break;
2942 case CONST_DOUBLE:
2943 /* We don't have const_double constants. If it
2944 happens, make it obvious. */
2945 fprintf (file, "[const_double 0x%lx]",
2946 (unsigned long) CONST_DOUBLE_HIGH (r));
2947 break;
2948 case SYMBOL_REF:
2949 assemble_name (file, XSTR (r, 0));
2950 break;
2951 case LABEL_REF:
2952 output_asm_label (r);
2953 break;
2954 default:
2955 fprintf (stderr, "don't know how to print this operand:");
2956 debug_rtx (r);
2957 gcc_unreachable ();
2958 }
2959 }
2960 else
2961 {
2962 if (conversions[i].format[j] == 'z')
2963 {
2964 /* Some addressing modes *must* have a displacement,
2965 so insert a zero here if needed. */
2966 int k;
2967 for (k = j + 1; conversions[i].format[k]; k++)
2968 if (ISDIGIT (conversions[i].format[k]))
2969 {
2970 rtx reg = patternr[conversions[i].format[k] - '0'];
2971 if (GET_CODE (reg) == REG
2972 && (REGNO (reg) == SB_REGNO
2973 || REGNO (reg) == FB_REGNO
2974 || REGNO (reg) == SP_REGNO))
2975 fputc ('0', file);
2976 }
2977 continue;
2978 }
2979 /* Signed displacements off symbols need to have signs
2980 blended cleanly. */
2981 if (conversions[i].format[j] == '+'
ff485e71 2982 && (!code || code == 'D' || code == 'd')
38b2d076 2983 && ISDIGIT (conversions[i].format[j + 1])
ff485e71
DD
2984 && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2985 == CONST_INT))
2986 {
2987 force_sign = 1;
2988 continue;
2989 }
38b2d076
DD
2990 fputc (conversions[i].format[j], file);
2991 }
2992 break;
2993 }
2994 if (!conversions[i].pattern)
2995 {
2996 fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
2997 pattern);
2998 debug_rtx (x);
2999 fprintf (file, "[%c.%s]", code ? code : '-', pattern);
3000 }
3001
3002 return;
3003}
3004
4645179e
AS
3005/* Implements TARGET_PRINT_OPERAND_PUNCT_VALID_P.
3006
3007 See m32c_print_operand above for descriptions of what these do. */
3008
3009#undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
3010#define TARGET_PRINT_OPERAND_PUNCT_VALID_P m32c_print_operand_punct_valid_p
3011
3012static bool
3013m32c_print_operand_punct_valid_p (unsigned char c)
38b2d076
DD
3014{
3015 if (c == '&' || c == '!')
4645179e
AS
3016 return true;
3017
3018 return false;
38b2d076
DD
3019}
3020
4645179e
AS
3021/* Implements TARGET_PRINT_OPERAND_ADDRESS. Nothing unusual here. */
3022
3023#undef TARGET_PRINT_OPERAND_ADDRESS
3024#define TARGET_PRINT_OPERAND_ADDRESS m32c_print_operand_address
3025
3026static void
38b2d076
DD
3027m32c_print_operand_address (FILE * stream, rtx address)
3028{
235e1fe8
NC
3029 if (GET_CODE (address) == MEM)
3030 address = XEXP (address, 0);
3031 else
3032 /* cf: gcc.dg/asm-4.c. */
3033 gcc_assert (GET_CODE (address) == REG);
3034
3035 m32c_print_operand (stream, address, 0);
38b2d076
DD
3036}
3037
3038/* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
3039 differently than general registers. */
3040void
3041m32c_output_reg_push (FILE * s, int regno)
3042{
3043 if (regno == FLG_REGNO)
3044 fprintf (s, "\tpushc\tflg\n");
3045 else
04aff2c0 3046 fprintf (s, "\tpush.%c\t%s\n",
38b2d076
DD
3047 " bwll"[reg_push_size (regno)], reg_names[regno]);
3048}
3049
3050/* Likewise for ASM_OUTPUT_REG_POP. */
3051void
3052m32c_output_reg_pop (FILE * s, int regno)
3053{
3054 if (regno == FLG_REGNO)
3055 fprintf (s, "\tpopc\tflg\n");
3056 else
04aff2c0 3057 fprintf (s, "\tpop.%c\t%s\n",
38b2d076
DD
3058 " bwll"[reg_push_size (regno)], reg_names[regno]);
3059}
3060
3061/* Defining target-specific uses of `__attribute__' */
3062
3063/* Used to simplify the logic below. Find the attributes wherever
3064 they may be. */
3065#define M32C_ATTRIBUTES(decl) \
3066 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
3067 : DECL_ATTRIBUTES (decl) \
3068 ? (DECL_ATTRIBUTES (decl)) \
3069 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
3070
3071/* Returns TRUE if the given tree has the "interrupt" attribute. */
3072static int
3073interrupt_p (tree node ATTRIBUTE_UNUSED)
3074{
3075 tree list = M32C_ATTRIBUTES (node);
3076 while (list)
3077 {
3078 if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
3079 return 1;
3080 list = TREE_CHAIN (list);
3081 }
65655f79
DD
3082 return fast_interrupt_p (node);
3083}
3084
3085/* Returns TRUE if the given tree has the "bank_switch" attribute. */
3086static int
3087bank_switch_p (tree node ATTRIBUTE_UNUSED)
3088{
3089 tree list = M32C_ATTRIBUTES (node);
3090 while (list)
3091 {
3092 if (is_attribute_p ("bank_switch", TREE_PURPOSE (list)))
3093 return 1;
3094 list = TREE_CHAIN (list);
3095 }
3096 return 0;
3097}
3098
3099/* Returns TRUE if the given tree has the "fast_interrupt" attribute. */
3100static int
3101fast_interrupt_p (tree node ATTRIBUTE_UNUSED)
3102{
3103 tree list = M32C_ATTRIBUTES (node);
3104 while (list)
3105 {
3106 if (is_attribute_p ("fast_interrupt", TREE_PURPOSE (list)))
3107 return 1;
3108 list = TREE_CHAIN (list);
3109 }
38b2d076
DD
3110 return 0;
3111}
3112
3113static tree
3114interrupt_handler (tree * node ATTRIBUTE_UNUSED,
3115 tree name ATTRIBUTE_UNUSED,
3116 tree args ATTRIBUTE_UNUSED,
3117 int flags ATTRIBUTE_UNUSED,
3118 bool * no_add_attrs ATTRIBUTE_UNUSED)
3119{
3120 return NULL_TREE;
3121}
3122
5abd2125
JS
3123/* Returns TRUE if given tree has the "function_vector" attribute. */
3124int
3125m32c_special_page_vector_p (tree func)
3126{
653e2568
DD
3127 tree list;
3128
5abd2125
JS
3129 if (TREE_CODE (func) != FUNCTION_DECL)
3130 return 0;
3131
653e2568 3132 list = M32C_ATTRIBUTES (func);
5abd2125
JS
3133 while (list)
3134 {
3135 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
3136 return 1;
3137 list = TREE_CHAIN (list);
3138 }
3139 return 0;
3140}
3141
3142static tree
3143function_vector_handler (tree * node ATTRIBUTE_UNUSED,
3144 tree name ATTRIBUTE_UNUSED,
3145 tree args ATTRIBUTE_UNUSED,
3146 int flags ATTRIBUTE_UNUSED,
3147 bool * no_add_attrs ATTRIBUTE_UNUSED)
3148{
3149 if (TARGET_R8C)
3150 {
3151 /* The attribute is not supported for R8C target. */
3152 warning (OPT_Wattributes,
29d08eba
JM
3153 "%qE attribute is not supported for R8C target",
3154 name);
5abd2125
JS
3155 *no_add_attrs = true;
3156 }
3157 else if (TREE_CODE (*node) != FUNCTION_DECL)
3158 {
3159 /* The attribute must be applied to functions only. */
3160 warning (OPT_Wattributes,
29d08eba
JM
3161 "%qE attribute applies only to functions",
3162 name);
5abd2125
JS
3163 *no_add_attrs = true;
3164 }
3165 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
3166 {
3167 /* The argument must be a constant integer. */
3168 warning (OPT_Wattributes,
29d08eba
JM
3169 "%qE attribute argument not an integer constant",
3170 name);
5abd2125
JS
3171 *no_add_attrs = true;
3172 }
3173 else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
3174 || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
3175 {
3176 /* The argument value must be between 18 to 255. */
3177 warning (OPT_Wattributes,
29d08eba
JM
3178 "%qE attribute argument should be between 18 to 255",
3179 name);
5abd2125
JS
3180 *no_add_attrs = true;
3181 }
3182 return NULL_TREE;
3183}
3184
3185/* If the function is assigned the attribute 'function_vector', it
3186 returns the function vector number, otherwise returns zero. */
3187int
3188current_function_special_page_vector (rtx x)
3189{
3190 int num;
3191
3192 if ((GET_CODE(x) == SYMBOL_REF)
3193 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
3194 {
653e2568 3195 tree list;
5abd2125
JS
3196 tree t = SYMBOL_REF_DECL (x);
3197
3198 if (TREE_CODE (t) != FUNCTION_DECL)
3199 return 0;
3200
653e2568 3201 list = M32C_ATTRIBUTES (t);
5abd2125
JS
3202 while (list)
3203 {
3204 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
3205 {
3206 num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
3207 return num;
3208 }
3209
3210 list = TREE_CHAIN (list);
3211 }
3212
3213 return 0;
3214 }
3215 else
3216 return 0;
3217}
3218
38b2d076
DD
3219#undef TARGET_ATTRIBUTE_TABLE
3220#define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
3221static const struct attribute_spec m32c_attribute_table[] = {
62d784f7
KT
3222 {"interrupt", 0, 0, false, false, false, interrupt_handler, false},
3223 {"bank_switch", 0, 0, false, false, false, interrupt_handler, false},
3224 {"fast_interrupt", 0, 0, false, false, false, interrupt_handler, false},
3225 {"function_vector", 1, 1, true, false, false, function_vector_handler,
3226 false},
3227 {0, 0, 0, 0, 0, 0, 0, false}
38b2d076
DD
3228};
3229
3230#undef TARGET_COMP_TYPE_ATTRIBUTES
3231#define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
3232static int
3101faab
KG
3233m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED,
3234 const_tree type2 ATTRIBUTE_UNUSED)
38b2d076
DD
3235{
3236 /* 0=incompatible 1=compatible 2=warning */
3237 return 1;
3238}
3239
3240#undef TARGET_INSERT_ATTRIBUTES
3241#define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
3242static void
3243m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
3244 tree * attr_ptr ATTRIBUTE_UNUSED)
3245{
f6052f86
DD
3246 unsigned addr;
3247 /* See if we need to make #pragma address variables volatile. */
3248
3249 if (TREE_CODE (node) == VAR_DECL)
3250 {
444d6efe 3251 const char *name = IDENTIFIER_POINTER (DECL_NAME (node));
f6052f86
DD
3252 if (m32c_get_pragma_address (name, &addr))
3253 {
3254 TREE_THIS_VOLATILE (node) = true;
3255 }
3256 }
3257}
3258
3259
3260struct GTY(()) pragma_entry {
3261 const char *varname;
3262 unsigned address;
3263};
3264typedef struct pragma_entry pragma_entry;
3265
3266/* Hash table of pragma info. */
3267static GTY((param_is (pragma_entry))) htab_t pragma_htab;
3268
3269static int
3270pragma_entry_eq (const void *p1, const void *p2)
3271{
3272 const pragma_entry *old = (const pragma_entry *) p1;
3273 const char *new_name = (const char *) p2;
3274
3275 return strcmp (old->varname, new_name) == 0;
3276}
3277
3278static hashval_t
3279pragma_entry_hash (const void *p)
3280{
3281 const pragma_entry *old = (const pragma_entry *) p;
3282 return htab_hash_string (old->varname);
3283}
3284
3285void
3286m32c_note_pragma_address (const char *varname, unsigned address)
3287{
3288 pragma_entry **slot;
3289
3290 if (!pragma_htab)
3291 pragma_htab = htab_create_ggc (31, pragma_entry_hash,
3292 pragma_entry_eq, NULL);
3293
3294 slot = (pragma_entry **)
3295 htab_find_slot_with_hash (pragma_htab, varname,
3296 htab_hash_string (varname), INSERT);
3297
3298 if (!*slot)
3299 {
3300 *slot = ggc_alloc_pragma_entry ();
3301 (*slot)->varname = ggc_strdup (varname);
3302 }
3303 (*slot)->address = address;
3304}
3305
3306static bool
3307m32c_get_pragma_address (const char *varname, unsigned *address)
3308{
3309 pragma_entry **slot;
3310
3311 if (!pragma_htab)
3312 return false;
3313
3314 slot = (pragma_entry **)
3315 htab_find_slot_with_hash (pragma_htab, varname,
3316 htab_hash_string (varname), NO_INSERT);
3317 if (slot && *slot)
3318 {
3319 *address = (*slot)->address;
3320 return true;
3321 }
3322 return false;
3323}
3324
3325void
444d6efe
JR
3326m32c_output_aligned_common (FILE *stream, tree decl ATTRIBUTE_UNUSED,
3327 const char *name,
f6052f86
DD
3328 int size, int align, int global)
3329{
3330 unsigned address;
3331
3332 if (m32c_get_pragma_address (name, &address))
3333 {
3334 /* We never output these as global. */
3335 assemble_name (stream, name);
3336 fprintf (stream, " = 0x%04x\n", address);
3337 return;
3338 }
3339 if (!global)
3340 {
3341 fprintf (stream, "\t.local\t");
3342 assemble_name (stream, name);
3343 fprintf (stream, "\n");
3344 }
3345 fprintf (stream, "\t.comm\t");
3346 assemble_name (stream, name);
3347 fprintf (stream, ",%u,%u\n", size, align / BITS_PER_UNIT);
38b2d076
DD
3348}
3349
3350/* Predicates */
3351
f9b89438 3352/* This is a list of legal subregs of hard regs. */
67fc44cb
DD
3353static const struct {
3354 unsigned char outer_mode_size;
3355 unsigned char inner_mode_size;
3356 unsigned char byte_mask;
3357 unsigned char legal_when;
f9b89438 3358 unsigned int regno;
f9b89438 3359} legal_subregs[] = {
67fc44cb
DD
3360 {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
3361 {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
3362 {1, 2, 0x01, 1, A0_REGNO},
3363 {1, 2, 0x01, 1, A1_REGNO},
f9b89438 3364
67fc44cb
DD
3365 {1, 4, 0x01, 1, A0_REGNO},
3366 {1, 4, 0x01, 1, A1_REGNO},
f9b89438 3367
67fc44cb
DD
3368 {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
3369 {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
3370 {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
3371 {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
3372 {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
f9b89438 3373
67fc44cb 3374 {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
f9b89438
DD
3375};
3376
3377/* Returns TRUE if OP is a subreg of a hard reg which we don't
f6052f86 3378 support. We also bail on MEMs with illegal addresses. */
f9b89438
DD
3379bool
3380m32c_illegal_subreg_p (rtx op)
3381{
f9b89438
DD
3382 int offset;
3383 unsigned int i;
3384 int src_mode, dest_mode;
3385
f6052f86
DD
3386 if (GET_CODE (op) == MEM
3387 && ! m32c_legitimate_address_p (Pmode, XEXP (op, 0), false))
3388 {
3389 return true;
3390 }
3391
f9b89438
DD
3392 if (GET_CODE (op) != SUBREG)
3393 return false;
3394
3395 dest_mode = GET_MODE (op);
3396 offset = SUBREG_BYTE (op);
3397 op = SUBREG_REG (op);
3398 src_mode = GET_MODE (op);
3399
3400 if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
3401 return false;
3402 if (GET_CODE (op) != REG)
3403 return false;
3404 if (REGNO (op) >= MEM0_REGNO)
3405 return false;
3406
3407 offset = (1 << offset);
3408
67fc44cb 3409 for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
f9b89438
DD
3410 if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
3411 && legal_subregs[i].regno == REGNO (op)
3412 && legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
3413 && legal_subregs[i].byte_mask & offset)
3414 {
3415 switch (legal_subregs[i].legal_when)
3416 {
3417 case 1:
3418 return false;
3419 case 16:
3420 if (TARGET_A16)
3421 return false;
3422 break;
3423 case 24:
3424 if (TARGET_A24)
3425 return false;
3426 break;
3427 }
3428 }
3429 return true;
3430}
3431
38b2d076
DD
3432/* Returns TRUE if we support a move between the first two operands.
3433 At the moment, we just want to discourage mem to mem moves until
3434 after reload, because reload has a hard time with our limited
3435 number of address registers, and we can get into a situation where
3436 we need three of them when we only have two. */
3437bool
3438m32c_mov_ok (rtx * operands, enum machine_mode mode ATTRIBUTE_UNUSED)
3439{
3440 rtx op0 = operands[0];
3441 rtx op1 = operands[1];
3442
3443 if (TARGET_A24)
3444 return true;
3445
3446#define DEBUG_MOV_OK 0
3447#if DEBUG_MOV_OK
3448 fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
3449 debug_rtx (op0);
3450 debug_rtx (op1);
3451#endif
3452
3453 if (GET_CODE (op0) == SUBREG)
3454 op0 = XEXP (op0, 0);
3455 if (GET_CODE (op1) == SUBREG)
3456 op1 = XEXP (op1, 0);
3457
3458 if (GET_CODE (op0) == MEM
3459 && GET_CODE (op1) == MEM
3460 && ! reload_completed)
3461 {
3462#if DEBUG_MOV_OK
3463 fprintf (stderr, " - no, mem to mem\n");
3464#endif
3465 return false;
3466 }
3467
3468#if DEBUG_MOV_OK
3469 fprintf (stderr, " - ok\n");
3470#endif
3471 return true;
3472}
3473
ff485e71
DD
3474/* Returns TRUE if two consecutive HImode mov instructions, generated
3475 for moving an immediate double data to a double data type variable
3476 location, can be combined into single SImode mov instruction. */
3477bool
3478m32c_immd_dbl_mov (rtx * operands,
3479 enum machine_mode mode ATTRIBUTE_UNUSED)
3480{
3481 int flag = 0, okflag = 0, offset1 = 0, offset2 = 0, offsetsign = 0;
3482 const char *str1;
3483 const char *str2;
3484
3485 if (GET_CODE (XEXP (operands[0], 0)) == SYMBOL_REF
3486 && MEM_SCALAR_P (operands[0])
3487 && !MEM_IN_STRUCT_P (operands[0])
3488 && GET_CODE (XEXP (operands[2], 0)) == CONST
3489 && GET_CODE (XEXP (XEXP (operands[2], 0), 0)) == PLUS
3490 && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 0)) == SYMBOL_REF
3491 && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 1)) == CONST_INT
3492 && MEM_SCALAR_P (operands[2])
3493 && !MEM_IN_STRUCT_P (operands[2]))
3494 flag = 1;
3495
3496 else if (GET_CODE (XEXP (operands[0], 0)) == CONST
3497 && GET_CODE (XEXP (XEXP (operands[0], 0), 0)) == PLUS
3498 && GET_CODE (XEXP (XEXP (XEXP (operands[0], 0), 0), 0)) == SYMBOL_REF
3499 && MEM_SCALAR_P (operands[0])
3500 && !MEM_IN_STRUCT_P (operands[0])
f9f3567e 3501 && !(INTVAL (XEXP (XEXP (XEXP (operands[0], 0), 0), 1)) %4)
ff485e71
DD
3502 && GET_CODE (XEXP (operands[2], 0)) == CONST
3503 && GET_CODE (XEXP (XEXP (operands[2], 0), 0)) == PLUS
3504 && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 0)) == SYMBOL_REF
3505 && MEM_SCALAR_P (operands[2])
3506 && !MEM_IN_STRUCT_P (operands[2]))
3507 flag = 2;
3508
3509 else if (GET_CODE (XEXP (operands[0], 0)) == PLUS
3510 && GET_CODE (XEXP (XEXP (operands[0], 0), 0)) == REG
3511 && REGNO (XEXP (XEXP (operands[0], 0), 0)) == FB_REGNO
3512 && GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == CONST_INT
3513 && MEM_SCALAR_P (operands[0])
3514 && !MEM_IN_STRUCT_P (operands[0])
f9f3567e 3515 && !(INTVAL (XEXP (XEXP (operands[0], 0), 1)) %4)
ff485e71
DD
3516 && REGNO (XEXP (XEXP (operands[2], 0), 0)) == FB_REGNO
3517 && GET_CODE (XEXP (XEXP (operands[2], 0), 1)) == CONST_INT
3518 && MEM_SCALAR_P (operands[2])
3519 && !MEM_IN_STRUCT_P (operands[2]))
3520 flag = 3;
3521
3522 else
3523 return false;
3524
3525 switch (flag)
3526 {
3527 case 1:
3528 str1 = XSTR (XEXP (operands[0], 0), 0);
3529 str2 = XSTR (XEXP (XEXP (XEXP (operands[2], 0), 0), 0), 0);
3530 if (strcmp (str1, str2) == 0)
3531 okflag = 1;
3532 else
3533 okflag = 0;
3534 break;
3535 case 2:
3536 str1 = XSTR (XEXP (XEXP (XEXP (operands[0], 0), 0), 0), 0);
3537 str2 = XSTR (XEXP (XEXP (XEXP (operands[2], 0), 0), 0), 0);
3538 if (strcmp(str1,str2) == 0)
3539 okflag = 1;
3540 else
3541 okflag = 0;
3542 break;
3543 case 3:
f9f3567e
DD
3544 offset1 = INTVAL (XEXP (XEXP (operands[0], 0), 1));
3545 offset2 = INTVAL (XEXP (XEXP (operands[2], 0), 1));
ff485e71
DD
3546 offsetsign = offset1 >> ((sizeof (offset1) * 8) -1);
3547 if (((offset2-offset1) == 2) && offsetsign != 0)
3548 okflag = 1;
3549 else
3550 okflag = 0;
3551 break;
3552 default:
3553 okflag = 0;
3554 }
3555
3556 if (okflag == 1)
3557 {
3558 HOST_WIDE_INT val;
3559 operands[4] = gen_rtx_MEM (SImode, XEXP (operands[0], 0));
3560
f9f3567e 3561 val = (INTVAL (operands[3]) << 16) + (INTVAL (operands[1]) & 0xFFFF);
ff485e71
DD
3562 operands[5] = gen_rtx_CONST_INT (VOIDmode, val);
3563
3564 return true;
3565 }
3566
3567 return false;
3568}
3569
38b2d076
DD
3570/* Expanders */
3571
3572/* Subregs are non-orthogonal for us, because our registers are all
3573 different sizes. */
3574static rtx
3575m32c_subreg (enum machine_mode outer,
3576 rtx x, enum machine_mode inner, int byte)
3577{
3578 int r, nr = -1;
3579
3580 /* Converting MEMs to different types that are the same size, we
3581 just rewrite them. */
3582 if (GET_CODE (x) == SUBREG
3583 && SUBREG_BYTE (x) == 0
3584 && GET_CODE (SUBREG_REG (x)) == MEM
3585 && (GET_MODE_SIZE (GET_MODE (x))
3586 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
3587 {
3588 rtx oldx = x;
3589 x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
3590 MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
3591 }
3592
3593 /* Push/pop get done as smaller push/pops. */
3594 if (GET_CODE (x) == MEM
3595 && (GET_CODE (XEXP (x, 0)) == PRE_DEC
3596 || GET_CODE (XEXP (x, 0)) == POST_INC))
3597 return gen_rtx_MEM (outer, XEXP (x, 0));
3598 if (GET_CODE (x) == SUBREG
3599 && GET_CODE (XEXP (x, 0)) == MEM
3600 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
3601 || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
3602 return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
3603
3604 if (GET_CODE (x) != REG)
146456c1
DD
3605 {
3606 rtx r = simplify_gen_subreg (outer, x, inner, byte);
3607 if (GET_CODE (r) == SUBREG
3608 && GET_CODE (x) == MEM
3609 && MEM_VOLATILE_P (x))
3610 {
3611 /* Volatile MEMs don't get simplified, but we need them to
3612 be. We are little endian, so the subreg byte is the
3613 offset. */
91140cd3 3614 r = adjust_address_nv (x, outer, byte);
146456c1
DD
3615 }
3616 return r;
3617 }
38b2d076
DD
3618
3619 r = REGNO (x);
3620 if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
3621 return simplify_gen_subreg (outer, x, inner, byte);
3622
3623 if (IS_MEM_REGNO (r))
3624 return simplify_gen_subreg (outer, x, inner, byte);
3625
3626 /* This is where the complexities of our register layout are
3627 described. */
3628 if (byte == 0)
3629 nr = r;
3630 else if (outer == HImode)
3631 {
3632 if (r == R0_REGNO && byte == 2)
3633 nr = R2_REGNO;
3634 else if (r == R0_REGNO && byte == 4)
3635 nr = R1_REGNO;
3636 else if (r == R0_REGNO && byte == 6)
3637 nr = R3_REGNO;
3638 else if (r == R1_REGNO && byte == 2)
3639 nr = R3_REGNO;
3640 else if (r == A0_REGNO && byte == 2)
3641 nr = A1_REGNO;
3642 }
3643 else if (outer == SImode)
3644 {
3645 if (r == R0_REGNO && byte == 0)
3646 nr = R0_REGNO;
3647 else if (r == R0_REGNO && byte == 4)
3648 nr = R1_REGNO;
3649 }
3650 if (nr == -1)
3651 {
3652 fprintf (stderr, "m32c_subreg %s %s %d\n",
3653 mode_name[outer], mode_name[inner], byte);
3654 debug_rtx (x);
3655 gcc_unreachable ();
3656 }
3657 return gen_rtx_REG (outer, nr);
3658}
3659
3660/* Used to emit move instructions. We split some moves,
3661 and avoid mem-mem moves. */
3662int
3663m32c_prepare_move (rtx * operands, enum machine_mode mode)
3664{
5fd5d713
DD
3665 if (far_addr_space_p (operands[0])
3666 && CONSTANT_P (operands[1]))
3667 {
3668 operands[1] = force_reg (GET_MODE (operands[0]), operands[1]);
3669 }
38b2d076
DD
3670 if (TARGET_A16 && mode == PSImode)
3671 return m32c_split_move (operands, mode, 1);
3672 if ((GET_CODE (operands[0]) == MEM)
3673 && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
3674 {
3675 rtx pmv = XEXP (operands[0], 0);
3676 rtx dest_reg = XEXP (pmv, 0);
3677 rtx dest_mod = XEXP (pmv, 1);
3678
3679 emit_insn (gen_rtx_SET (Pmode, dest_reg, dest_mod));
3680 operands[0] = gen_rtx_MEM (mode, dest_reg);
3681 }
b3a13419 3682 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
38b2d076
DD
3683 operands[1] = copy_to_mode_reg (mode, operands[1]);
3684 return 0;
3685}
3686
3687#define DEBUG_SPLIT 0
3688
3689/* Returns TRUE if the given PSImode move should be split. We split
3690 for all r8c/m16c moves, since it doesn't support them, and for
3691 POP.L as we can only *push* SImode. */
3692int
3693m32c_split_psi_p (rtx * operands)
3694{
3695#if DEBUG_SPLIT
3696 fprintf (stderr, "\nm32c_split_psi_p\n");
3697 debug_rtx (operands[0]);
3698 debug_rtx (operands[1]);
3699#endif
3700 if (TARGET_A16)
3701 {
3702#if DEBUG_SPLIT
3703 fprintf (stderr, "yes, A16\n");
3704#endif
3705 return 1;
3706 }
3707 if (GET_CODE (operands[1]) == MEM
3708 && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3709 {
3710#if DEBUG_SPLIT
3711 fprintf (stderr, "yes, pop.l\n");
3712#endif
3713 return 1;
3714 }
3715#if DEBUG_SPLIT
3716 fprintf (stderr, "no, default\n");
3717#endif
3718 return 0;
3719}
3720
3721/* Split the given move. SPLIT_ALL is 0 if splitting is optional
3722 (define_expand), 1 if it is not optional (define_insn_and_split),
3723 and 3 for define_split (alternate api). */
3724int
3725m32c_split_move (rtx * operands, enum machine_mode mode, int split_all)
3726{
3727 rtx s[4], d[4];
3728 int parts, si, di, rev = 0;
3729 int rv = 0, opi = 2;
3730 enum machine_mode submode = HImode;
3731 rtx *ops, local_ops[10];
3732
3733 /* define_split modifies the existing operands, but the other two
3734 emit new insns. OPS is where we store the operand pairs, which
3735 we emit later. */
3736 if (split_all == 3)
3737 ops = operands;
3738 else
3739 ops = local_ops;
3740
3741 /* Else HImode. */
3742 if (mode == DImode)
3743 submode = SImode;
3744
3745 /* Before splitting mem-mem moves, force one operand into a
3746 register. */
b3a13419 3747 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
38b2d076
DD
3748 {
3749#if DEBUG0
3750 fprintf (stderr, "force_reg...\n");
3751 debug_rtx (operands[1]);
3752#endif
3753 operands[1] = force_reg (mode, operands[1]);
3754#if DEBUG0
3755 debug_rtx (operands[1]);
3756#endif
3757 }
3758
3759 parts = 2;
3760
3761#if DEBUG_SPLIT
b3a13419
ILT
3762 fprintf (stderr, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3763 split_all);
38b2d076
DD
3764 debug_rtx (operands[0]);
3765 debug_rtx (operands[1]);
3766#endif
3767
eb5f0c07
DD
3768 /* Note that split_all is not used to select the api after this
3769 point, so it's safe to set it to 3 even with define_insn. */
3770 /* None of the chips can move SI operands to sp-relative addresses,
3771 so we always split those. */
3772 if (m32c_extra_constraint_p (operands[0], 'S', "Ss"))
3773 split_all = 3;
3774
5fd5d713
DD
3775 if (TARGET_A16
3776 && (far_addr_space_p (operands[0])
3777 || far_addr_space_p (operands[1])))
3778 split_all |= 1;
3779
38b2d076
DD
3780 /* We don't need to split these. */
3781 if (TARGET_A24
3782 && split_all != 3
3783 && (mode == SImode || mode == PSImode)
3784 && !(GET_CODE (operands[1]) == MEM
3785 && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3786 return 0;
3787
3788 /* First, enumerate the subregs we'll be dealing with. */
3789 for (si = 0; si < parts; si++)
3790 {
3791 d[si] =
3792 m32c_subreg (submode, operands[0], mode,
3793 si * GET_MODE_SIZE (submode));
3794 s[si] =
3795 m32c_subreg (submode, operands[1], mode,
3796 si * GET_MODE_SIZE (submode));
3797 }
3798
3799 /* Split pushes by emitting a sequence of smaller pushes. */
3800 if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3801 {
3802 for (si = parts - 1; si >= 0; si--)
3803 {
3804 ops[opi++] = gen_rtx_MEM (submode,
3805 gen_rtx_PRE_DEC (Pmode,
3806 gen_rtx_REG (Pmode,
3807 SP_REGNO)));
3808 ops[opi++] = s[si];
3809 }
3810
3811 rv = 1;
3812 }
3813 /* Likewise for pops. */
3814 else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3815 {
3816 for (di = 0; di < parts; di++)
3817 {
3818 ops[opi++] = d[di];
3819 ops[opi++] = gen_rtx_MEM (submode,
3820 gen_rtx_POST_INC (Pmode,
3821 gen_rtx_REG (Pmode,
3822 SP_REGNO)));
3823 }
3824 rv = 1;
3825 }
3826 else if (split_all)
3827 {
3828 /* if d[di] == s[si] for any di < si, we'll early clobber. */
3829 for (di = 0; di < parts - 1; di++)
3830 for (si = di + 1; si < parts; si++)
3831 if (reg_mentioned_p (d[di], s[si]))
3832 rev = 1;
3833
3834 if (rev)
3835 for (si = 0; si < parts; si++)
3836 {
3837 ops[opi++] = d[si];
3838 ops[opi++] = s[si];
3839 }
3840 else
3841 for (si = parts - 1; si >= 0; si--)
3842 {
3843 ops[opi++] = d[si];
3844 ops[opi++] = s[si];
3845 }
3846 rv = 1;
3847 }
3848 /* Now emit any moves we may have accumulated. */
3849 if (rv && split_all != 3)
3850 {
3851 int i;
3852 for (i = 2; i < opi; i += 2)
3853 emit_move_insn (ops[i], ops[i + 1]);
3854 }
3855 return rv;
3856}
3857
07127a0a
DD
3858/* The m32c has a number of opcodes that act like memcpy, strcmp, and
3859 the like. For the R8C they expect one of the addresses to be in
3860 R1L:An so we need to arrange for that. Otherwise, it's just a
3861 matter of picking out the operands we want and emitting the right
3862 pattern for them. All these expanders, which correspond to
3863 patterns in blkmov.md, must return nonzero if they expand the insn,
3864 or zero if they should FAIL. */
3865
3866/* This is a memset() opcode. All operands are implied, so we need to
3867 arrange for them to be in the right registers. The opcode wants
3868 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3869 the count (HI), and $2 the value (QI). */
3870int
3871m32c_expand_setmemhi(rtx *operands)
3872{
3873 rtx desta, count, val;
3874 rtx desto, counto;
3875
3876 desta = XEXP (operands[0], 0);
3877 count = operands[1];
3878 val = operands[2];
3879
3880 desto = gen_reg_rtx (Pmode);
3881 counto = gen_reg_rtx (HImode);
3882
3883 if (GET_CODE (desta) != REG
3884 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3885 desta = copy_to_mode_reg (Pmode, desta);
3886
3887 /* This looks like an arbitrary restriction, but this is by far the
3888 most common case. For counts 8..14 this actually results in
3889 smaller code with no speed penalty because the half-sized
3890 constant can be loaded with a shorter opcode. */
3891 if (GET_CODE (count) == CONST_INT
3892 && GET_CODE (val) == CONST_INT
3893 && ! (INTVAL (count) & 1)
3894 && (INTVAL (count) > 1)
3895 && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3896 {
3897 unsigned v = INTVAL (val) & 0xff;
3898 v = v | (v << 8);
3899 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3900 val = copy_to_mode_reg (HImode, GEN_INT (v));
3901 if (TARGET_A16)
3902 emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3903 else
3904 emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3905 return 1;
3906 }
3907
3908 /* This is the generalized memset() case. */
3909 if (GET_CODE (val) != REG
3910 || REGNO (val) < FIRST_PSEUDO_REGISTER)
3911 val = copy_to_mode_reg (QImode, val);
3912
3913 if (GET_CODE (count) != REG
3914 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3915 count = copy_to_mode_reg (HImode, count);
3916
3917 if (TARGET_A16)
3918 emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3919 else
3920 emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3921
3922 return 1;
3923}
3924
3925/* This is a memcpy() opcode. All operands are implied, so we need to
3926 arrange for them to be in the right registers. The opcode wants
3927 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3928 is the source (MEM:BLK), and $2 the count (HI). */
3929int
3930m32c_expand_movmemhi(rtx *operands)
3931{
3932 rtx desta, srca, count;
3933 rtx desto, srco, counto;
3934
3935 desta = XEXP (operands[0], 0);
3936 srca = XEXP (operands[1], 0);
3937 count = operands[2];
3938
3939 desto = gen_reg_rtx (Pmode);
3940 srco = gen_reg_rtx (Pmode);
3941 counto = gen_reg_rtx (HImode);
3942
3943 if (GET_CODE (desta) != REG
3944 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3945 desta = copy_to_mode_reg (Pmode, desta);
3946
3947 if (GET_CODE (srca) != REG
3948 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3949 srca = copy_to_mode_reg (Pmode, srca);
3950
3951 /* Similar to setmem, but we don't need to check the value. */
3952 if (GET_CODE (count) == CONST_INT
3953 && ! (INTVAL (count) & 1)
3954 && (INTVAL (count) > 1))
3955 {
3956 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3957 if (TARGET_A16)
3958 emit_insn (gen_movmemhi_whi_op (desto, srco, counto, desta, srca, count));
3959 else
3960 emit_insn (gen_movmemhi_wpsi_op (desto, srco, counto, desta, srca, count));
3961 return 1;
3962 }
3963
3964 /* This is the generalized memset() case. */
3965 if (GET_CODE (count) != REG
3966 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3967 count = copy_to_mode_reg (HImode, count);
3968
3969 if (TARGET_A16)
3970 emit_insn (gen_movmemhi_bhi_op (desto, srco, counto, desta, srca, count));
3971 else
3972 emit_insn (gen_movmemhi_bpsi_op (desto, srco, counto, desta, srca, count));
3973
3974 return 1;
3975}
3976
3977/* This is a stpcpy() opcode. $0 is the destination (MEM:BLK) after
3978 the copy, which should point to the NUL at the end of the string,
3979 $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3980 Since our opcode leaves the destination pointing *after* the NUL,
3981 we must emit an adjustment. */
3982int
3983m32c_expand_movstr(rtx *operands)
3984{
3985 rtx desta, srca;
3986 rtx desto, srco;
3987
3988 desta = XEXP (operands[1], 0);
3989 srca = XEXP (operands[2], 0);
3990
3991 desto = gen_reg_rtx (Pmode);
3992 srco = gen_reg_rtx (Pmode);
3993
3994 if (GET_CODE (desta) != REG
3995 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3996 desta = copy_to_mode_reg (Pmode, desta);
3997
3998 if (GET_CODE (srca) != REG
3999 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
4000 srca = copy_to_mode_reg (Pmode, srca);
4001
4002 emit_insn (gen_movstr_op (desto, srco, desta, srca));
4003 /* desto ends up being a1, which allows this type of add through MOVA. */
4004 emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
4005
4006 return 1;
4007}
4008
4009/* This is a strcmp() opcode. $0 is the destination (HI) which holds
4010 <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
4011 $2 is the other (MEM:BLK). We must do the comparison, and then
4012 convert the flags to a signed integer result. */
4013int
4014m32c_expand_cmpstr(rtx *operands)
4015{
4016 rtx src1a, src2a;
4017
4018 src1a = XEXP (operands[1], 0);
4019 src2a = XEXP (operands[2], 0);
4020
4021 if (GET_CODE (src1a) != REG
4022 || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
4023 src1a = copy_to_mode_reg (Pmode, src1a);
4024
4025 if (GET_CODE (src2a) != REG
4026 || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
4027 src2a = copy_to_mode_reg (Pmode, src2a);
4028
4029 emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
4030 emit_insn (gen_cond_to_int (operands[0]));
4031
4032 return 1;
4033}
4034
4035
23fed240
DD
4036typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
4037
4038static shift_gen_func
4039shift_gen_func_for (int mode, int code)
4040{
4041#define GFF(m,c,f) if (mode == m && code == c) return f
4042 GFF(QImode, ASHIFT, gen_ashlqi3_i);
4043 GFF(QImode, ASHIFTRT, gen_ashrqi3_i);
4044 GFF(QImode, LSHIFTRT, gen_lshrqi3_i);
4045 GFF(HImode, ASHIFT, gen_ashlhi3_i);
4046 GFF(HImode, ASHIFTRT, gen_ashrhi3_i);
4047 GFF(HImode, LSHIFTRT, gen_lshrhi3_i);
4048 GFF(PSImode, ASHIFT, gen_ashlpsi3_i);
4049 GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
4050 GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
4051 GFF(SImode, ASHIFT, TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
4052 GFF(SImode, ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
4053 GFF(SImode, LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
4054#undef GFF
07127a0a 4055 gcc_unreachable ();
23fed240
DD
4056}
4057
38b2d076
DD
4058/* The m32c only has one shift, but it takes a signed count. GCC
4059 doesn't want this, so we fake it by negating any shift count when
07127a0a
DD
4060 we're pretending to shift the other way. Also, the shift count is
4061 limited to -8..8. It's slightly better to use two shifts for 9..15
4062 than to load the count into r1h, so we do that too. */
38b2d076 4063int
23fed240 4064m32c_prepare_shift (rtx * operands, int scale, int shift_code)
38b2d076 4065{
23fed240
DD
4066 enum machine_mode mode = GET_MODE (operands[0]);
4067 shift_gen_func func = shift_gen_func_for (mode, shift_code);
38b2d076 4068 rtx temp;
23fed240
DD
4069
4070 if (GET_CODE (operands[2]) == CONST_INT)
38b2d076 4071 {
23fed240
DD
4072 int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
4073 int count = INTVAL (operands[2]) * scale;
4074
4075 while (count > maxc)
4076 {
4077 temp = gen_reg_rtx (mode);
4078 emit_insn (func (temp, operands[1], GEN_INT (maxc)));
4079 operands[1] = temp;
4080 count -= maxc;
4081 }
4082 while (count < -maxc)
4083 {
4084 temp = gen_reg_rtx (mode);
4085 emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
4086 operands[1] = temp;
4087 count += maxc;
4088 }
4089 emit_insn (func (operands[0], operands[1], GEN_INT (count)));
4090 return 1;
38b2d076 4091 }
2e160056
DD
4092
4093 temp = gen_reg_rtx (QImode);
38b2d076 4094 if (scale < 0)
2e160056
DD
4095 /* The pattern has a NEG that corresponds to this. */
4096 emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
4097 else if (TARGET_A16 && mode == SImode)
4098 /* We do this because the code below may modify this, we don't
4099 want to modify the origin of this value. */
4100 emit_move_insn (temp, operands[2]);
38b2d076 4101 else
2e160056 4102 /* We'll only use it for the shift, no point emitting a move. */
38b2d076 4103 temp = operands[2];
2e160056 4104
16659fcf 4105 if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
2e160056
DD
4106 {
4107 /* The m16c has a limit of -16..16 for SI shifts, even when the
4108 shift count is in a register. Since there are so many targets
4109 of these shifts, it's better to expand the RTL here than to
4110 call a helper function.
4111
4112 The resulting code looks something like this:
4113
4114 cmp.b r1h,-16
4115 jge.b 1f
4116 shl.l -16,dest
4117 add.b r1h,16
4118 1f: cmp.b r1h,16
4119 jle.b 1f
4120 shl.l 16,dest
4121 sub.b r1h,16
4122 1f: shl.l r1h,dest
4123
4124 We take advantage of the fact that "negative" shifts are
4125 undefined to skip one of the comparisons. */
4126
4127 rtx count;
444d6efe 4128 rtx label, insn, tempvar;
2e160056 4129
16659fcf
DD
4130 emit_move_insn (operands[0], operands[1]);
4131
2e160056
DD
4132 count = temp;
4133 label = gen_label_rtx ();
2e160056
DD
4134 LABEL_NUSES (label) ++;
4135
833bf445
DD
4136 tempvar = gen_reg_rtx (mode);
4137
2e160056
DD
4138 if (shift_code == ASHIFT)
4139 {
4140 /* This is a left shift. We only need check positive counts. */
4141 emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
4142 count, GEN_INT (16), label));
833bf445
DD
4143 emit_insn (func (tempvar, operands[0], GEN_INT (8)));
4144 emit_insn (func (operands[0], tempvar, GEN_INT (8)));
2e160056
DD
4145 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
4146 emit_label_after (label, insn);
4147 }
4148 else
4149 {
4150 /* This is a right shift. We only need check negative counts. */
4151 emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
4152 count, GEN_INT (-16), label));
833bf445
DD
4153 emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
4154 emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
2e160056
DD
4155 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
4156 emit_label_after (label, insn);
4157 }
16659fcf
DD
4158 operands[1] = operands[0];
4159 emit_insn (func (operands[0], operands[0], count));
4160 return 1;
2e160056
DD
4161 }
4162
38b2d076
DD
4163 operands[2] = temp;
4164 return 0;
4165}
4166
12ea2512
DD
4167/* The m32c has a limited range of operations that work on PSImode
4168 values; we have to expand to SI, do the math, and truncate back to
4169 PSI. Yes, this is expensive, but hopefully gcc will learn to avoid
4170 those cases. */
4171void
4172m32c_expand_neg_mulpsi3 (rtx * operands)
4173{
4174 /* operands: a = b * i */
4175 rtx temp1; /* b as SI */
07127a0a
DD
4176 rtx scale /* i as SI */;
4177 rtx temp2; /* a*b as SI */
12ea2512
DD
4178
4179 temp1 = gen_reg_rtx (SImode);
4180 temp2 = gen_reg_rtx (SImode);
07127a0a
DD
4181 if (GET_CODE (operands[2]) != CONST_INT)
4182 {
4183 scale = gen_reg_rtx (SImode);
4184 emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
4185 }
4186 else
4187 scale = copy_to_mode_reg (SImode, operands[2]);
12ea2512
DD
4188
4189 emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
07127a0a
DD
4190 temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
4191 emit_insn (gen_truncsipsi2 (operands[0], temp2));
12ea2512
DD
4192}
4193
38b2d076
DD
4194/* Pattern Output Functions */
4195
07127a0a
DD
4196int
4197m32c_expand_movcc (rtx *operands)
4198{
4199 rtx rel = operands[1];
0166ff05 4200
07127a0a
DD
4201 if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
4202 return 1;
4203 if (GET_CODE (operands[2]) != CONST_INT
4204 || GET_CODE (operands[3]) != CONST_INT)
4205 return 1;
07127a0a
DD
4206 if (GET_CODE (rel) == NE)
4207 {
4208 rtx tmp = operands[2];
4209 operands[2] = operands[3];
4210 operands[3] = tmp;
f90b7a5a 4211 rel = gen_rtx_EQ (GET_MODE (rel), XEXP (rel, 0), XEXP (rel, 1));
07127a0a 4212 }
0166ff05 4213
0166ff05
DD
4214 emit_move_insn (operands[0],
4215 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
f90b7a5a 4216 rel,
0166ff05
DD
4217 operands[2],
4218 operands[3]));
07127a0a
DD
4219 return 0;
4220}
4221
4222/* Used for the "insv" pattern. Return nonzero to fail, else done. */
4223int
4224m32c_expand_insv (rtx *operands)
4225{
4226 rtx op0, src0, p;
4227 int mask;
4228
4229 if (INTVAL (operands[1]) != 1)
4230 return 1;
4231
9cb96754
N
4232 /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
4233 if (GET_CODE (operands[3]) != CONST_INT)
4234 return 1;
4235 if (INTVAL (operands[3]) != 0
4236 && INTVAL (operands[3]) != 1
4237 && INTVAL (operands[3]) != -1)
4238 return 1;
4239
07127a0a
DD
4240 mask = 1 << INTVAL (operands[2]);
4241
4242 op0 = operands[0];
4243 if (GET_CODE (op0) == SUBREG
4244 && SUBREG_BYTE (op0) == 0)
4245 {
4246 rtx sub = SUBREG_REG (op0);
4247 if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
4248 op0 = sub;
4249 }
4250
b3a13419 4251 if (!can_create_pseudo_p ()
07127a0a
DD
4252 || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
4253 src0 = op0;
4254 else
4255 {
4256 src0 = gen_reg_rtx (GET_MODE (op0));
4257 emit_move_insn (src0, op0);
4258 }
4259
4260 if (GET_MODE (op0) == HImode
4261 && INTVAL (operands[2]) >= 8
444d6efe 4262 && GET_CODE (op0) == MEM)
07127a0a
DD
4263 {
4264 /* We are little endian. */
4265 rtx new_mem = gen_rtx_MEM (QImode, plus_constant (XEXP (op0, 0), 1));
4266 MEM_COPY_ATTRIBUTES (new_mem, op0);
4267 mask >>= 8;
4268 }
4269
8e4edce7
DD
4270 /* First, we generate a mask with the correct polarity. If we are
4271 storing a zero, we want an AND mask, so invert it. */
4272 if (INTVAL (operands[3]) == 0)
07127a0a 4273 {
16659fcf 4274 /* Storing a zero, use an AND mask */
07127a0a
DD
4275 if (GET_MODE (op0) == HImode)
4276 mask ^= 0xffff;
4277 else
4278 mask ^= 0xff;
4279 }
8e4edce7
DD
4280 /* Now we need to properly sign-extend the mask in case we need to
4281 fall back to an AND or OR opcode. */
07127a0a
DD
4282 if (GET_MODE (op0) == HImode)
4283 {
4284 if (mask & 0x8000)
4285 mask -= 0x10000;
4286 }
4287 else
4288 {
4289 if (mask & 0x80)
4290 mask -= 0x100;
4291 }
4292
4293 switch ( (INTVAL (operands[3]) ? 4 : 0)
4294 + ((GET_MODE (op0) == HImode) ? 2 : 0)
4295 + (TARGET_A24 ? 1 : 0))
4296 {
4297 case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
4298 case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
4299 case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
4300 case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
4301 case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
4302 case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
4303 case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
4304 case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
653e2568 4305 default: p = NULL_RTX; break; /* Not reached, but silences a warning. */
07127a0a
DD
4306 }
4307
4308 emit_insn (p);
4309 return 0;
4310}
4311
4312const char *
4313m32c_scc_pattern(rtx *operands, RTX_CODE code)
4314{
4315 static char buf[30];
4316 if (GET_CODE (operands[0]) == REG
4317 && REGNO (operands[0]) == R0_REGNO)
4318 {
4319 if (code == EQ)
4320 return "stzx\t#1,#0,r0l";
4321 if (code == NE)
4322 return "stzx\t#0,#1,r0l";
4323 }
4324 sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
4325 return buf;
4326}
4327
5abd2125
JS
4328/* Encode symbol attributes of a SYMBOL_REF into its
4329 SYMBOL_REF_FLAGS. */
4330static void
4331m32c_encode_section_info (tree decl, rtx rtl, int first)
4332{
4333 int extra_flags = 0;
4334
4335 default_encode_section_info (decl, rtl, first);
4336 if (TREE_CODE (decl) == FUNCTION_DECL
4337 && m32c_special_page_vector_p (decl))
4338
4339 extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
4340
4341 if (extra_flags)
4342 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
4343}
4344
38b2d076
DD
4345/* Returns TRUE if the current function is a leaf, and thus we can
4346 determine which registers an interrupt function really needs to
4347 save. The logic below is mostly about finding the insn sequence
4348 that's the function, versus any sequence that might be open for the
4349 current insn. */
4350static int
4351m32c_leaf_function_p (void)
4352{
4353 rtx saved_first, saved_last;
4354 struct sequence_stack *seq;
4355 int rv;
4356
3e029763
JH
4357 saved_first = crtl->emit.x_first_insn;
4358 saved_last = crtl->emit.x_last_insn;
4359 for (seq = crtl->emit.sequence_stack; seq && seq->next; seq = seq->next)
38b2d076
DD
4360 ;
4361 if (seq)
4362 {
3e029763
JH
4363 crtl->emit.x_first_insn = seq->first;
4364 crtl->emit.x_last_insn = seq->last;
38b2d076
DD
4365 }
4366
4367 rv = leaf_function_p ();
4368
3e029763
JH
4369 crtl->emit.x_first_insn = saved_first;
4370 crtl->emit.x_last_insn = saved_last;
38b2d076
DD
4371 return rv;
4372}
4373
4374/* Returns TRUE if the current function needs to use the ENTER/EXIT
4375 opcodes. If the function doesn't need the frame base or stack
4376 pointer, it can use the simpler RTS opcode. */
4377static bool
4378m32c_function_needs_enter (void)
4379{
4380 rtx insn;
4381 struct sequence_stack *seq;
4382 rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
4383 rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
4384
4385 insn = get_insns ();
3e029763 4386 for (seq = crtl->emit.sequence_stack;
38b2d076
DD
4387 seq;
4388 insn = seq->first, seq = seq->next);
4389
4390 while (insn)
4391 {
4392 if (reg_mentioned_p (sp, insn))
4393 return true;
4394 if (reg_mentioned_p (fb, insn))
4395 return true;
4396 insn = NEXT_INSN (insn);
4397 }
4398 return false;
4399}
4400
4401/* Mark all the subexpressions of the PARALLEL rtx PAR as
4402 frame-related. Return PAR.
4403
4404 dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
4405 PARALLEL rtx other than the first if they do not have the
4406 FRAME_RELATED flag set on them. So this function is handy for
4407 marking up 'enter' instructions. */
4408static rtx
4409m32c_all_frame_related (rtx par)
4410{
4411 int len = XVECLEN (par, 0);
4412 int i;
4413
4414 for (i = 0; i < len; i++)
4415 F (XVECEXP (par, 0, i));
4416
4417 return par;
4418}
4419
4420/* Emits the prologue. See the frame layout comment earlier in this
4421 file. We can reserve up to 256 bytes with the ENTER opcode, beyond
4422 that we manually update sp. */
4423void
4424m32c_emit_prologue (void)
4425{
4426 int frame_size, extra_frame_size = 0, reg_save_size;
4427 int complex_prologue = 0;
4428
4429 cfun->machine->is_leaf = m32c_leaf_function_p ();
4430 if (interrupt_p (cfun->decl))
4431 {
4432 cfun->machine->is_interrupt = 1;
4433 complex_prologue = 1;
4434 }
65655f79
DD
4435 else if (bank_switch_p (cfun->decl))
4436 warning (OPT_Wattributes,
4437 "%<bank_switch%> has no effect on non-interrupt functions");
38b2d076
DD
4438
4439 reg_save_size = m32c_pushm_popm (PP_justcount);
4440
4441 if (interrupt_p (cfun->decl))
65655f79
DD
4442 {
4443 if (bank_switch_p (cfun->decl))
4444 emit_insn (gen_fset_b ());
4445 else if (cfun->machine->intr_pushm)
4446 emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
4447 }
38b2d076
DD
4448
4449 frame_size =
4450 m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
4451 if (frame_size == 0
38b2d076
DD
4452 && !m32c_function_needs_enter ())
4453 cfun->machine->use_rts = 1;
4454
4455 if (frame_size > 254)
4456 {
4457 extra_frame_size = frame_size - 254;
4458 frame_size = 254;
4459 }
4460 if (cfun->machine->use_rts == 0)
4461 F (emit_insn (m32c_all_frame_related
4462 (TARGET_A16
fa9fd28a
RIL
4463 ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
4464 : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
38b2d076
DD
4465
4466 if (extra_frame_size)
4467 {
4468 complex_prologue = 1;
4469 if (TARGET_A16)
4470 F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
4471 gen_rtx_REG (HImode, SP_REGNO),
4472 GEN_INT (-extra_frame_size))));
4473 else
4474 F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
4475 gen_rtx_REG (PSImode, SP_REGNO),
4476 GEN_INT (-extra_frame_size))));
4477 }
4478
4479 complex_prologue += m32c_pushm_popm (PP_pushm);
4480
4481 /* This just emits a comment into the .s file for debugging. */
4482 if (complex_prologue)
4483 emit_insn (gen_prologue_end ());
4484}
4485
4486/* Likewise, for the epilogue. The only exception is that, for
4487 interrupts, we must manually unwind the frame as the REIT opcode
4488 doesn't do that. */
4489void
4490m32c_emit_epilogue (void)
4491{
f0679612
DD
4492 int popm_count = m32c_pushm_popm (PP_justcount);
4493
38b2d076 4494 /* This just emits a comment into the .s file for debugging. */
f0679612 4495 if (popm_count > 0 || cfun->machine->is_interrupt)
38b2d076
DD
4496 emit_insn (gen_epilogue_start ());
4497
f0679612
DD
4498 if (popm_count > 0)
4499 m32c_pushm_popm (PP_popm);
38b2d076
DD
4500
4501 if (cfun->machine->is_interrupt)
4502 {
4503 enum machine_mode spmode = TARGET_A16 ? HImode : PSImode;
4504
65655f79
DD
4505 /* REIT clears B flag and restores $fp for us, but we still
4506 have to fix up the stack. USE_RTS just means we didn't
4507 emit ENTER. */
4508 if (!cfun->machine->use_rts)
4509 {
4510 emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
4511 gen_rtx_REG (spmode, FP_REGNO));
4512 emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
4513 gen_rtx_REG (spmode, A0_REGNO));
4514 /* We can't just add this to the POPM because it would be in
4515 the wrong order, and wouldn't fix the stack if we're bank
4516 switching. */
4517 if (TARGET_A16)
4518 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
4519 else
4520 emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
4521 }
4522 if (!bank_switch_p (cfun->decl) && cfun->machine->intr_pushm)
4523 emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
4524
402f2db8
DD
4525 /* The FREIT (Fast REturn from InTerrupt) instruction should be
4526 generated only for M32C/M32CM targets (generate the REIT
4527 instruction otherwise). */
65655f79 4528 if (fast_interrupt_p (cfun->decl))
402f2db8
DD
4529 {
4530 /* Check if fast_attribute is set for M32C or M32CM. */
4531 if (TARGET_A24)
4532 {
4533 emit_jump_insn (gen_epilogue_freit ());
4534 }
4535 /* If fast_interrupt attribute is set for an R8C or M16C
4536 target ignore this attribute and generated REIT
4537 instruction. */
4538 else
4539 {
4540 warning (OPT_Wattributes,
4541 "%<fast_interrupt%> attribute directive ignored");
4542 emit_jump_insn (gen_epilogue_reit_16 ());
4543 }
4544 }
65655f79 4545 else if (TARGET_A16)
0e0642aa
RIL
4546 emit_jump_insn (gen_epilogue_reit_16 ());
4547 else
4548 emit_jump_insn (gen_epilogue_reit_24 ());
38b2d076
DD
4549 }
4550 else if (cfun->machine->use_rts)
4551 emit_jump_insn (gen_epilogue_rts ());
0e0642aa
RIL
4552 else if (TARGET_A16)
4553 emit_jump_insn (gen_epilogue_exitd_16 ());
38b2d076 4554 else
0e0642aa 4555 emit_jump_insn (gen_epilogue_exitd_24 ());
38b2d076
DD
4556}
4557
4558void
4559m32c_emit_eh_epilogue (rtx ret_addr)
4560{
4561 /* R0[R2] has the stack adjustment. R1[R3] has the address to
4562 return to. We have to fudge the stack, pop everything, pop SP
4563 (fudged), and return (fudged). This is actually easier to do in
4564 assembler, so punt to libgcc. */
4565 emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
c41c1387 4566 /* emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
38b2d076
DD
4567}
4568
16659fcf
DD
4569/* Indicate which flags must be properly set for a given conditional. */
4570static int
4571flags_needed_for_conditional (rtx cond)
4572{
4573 switch (GET_CODE (cond))
4574 {
4575 case LE:
4576 case GT:
4577 return FLAGS_OSZ;
4578 case LEU:
4579 case GTU:
4580 return FLAGS_ZC;
4581 case LT:
4582 case GE:
4583 return FLAGS_OS;
4584 case LTU:
4585 case GEU:
4586 return FLAGS_C;
4587 case EQ:
4588 case NE:
4589 return FLAGS_Z;
4590 default:
4591 return FLAGS_N;
4592 }
4593}
4594
4595#define DEBUG_CMP 0
4596
4597/* Returns true if a compare insn is redundant because it would only
4598 set flags that are already set correctly. */
4599static bool
4600m32c_compare_redundant (rtx cmp, rtx *operands)
4601{
4602 int flags_needed;
4603 int pflags;
4604 rtx prev, pp, next;
444d6efe 4605 rtx op0, op1;
16659fcf
DD
4606#if DEBUG_CMP
4607 int prev_icode, i;
4608#endif
4609
4610 op0 = operands[0];
4611 op1 = operands[1];
16659fcf
DD
4612
4613#if DEBUG_CMP
4614 fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
4615 debug_rtx(cmp);
4616 for (i=0; i<2; i++)
4617 {
4618 fprintf(stderr, "operands[%d] = ", i);
4619 debug_rtx(operands[i]);
4620 }
4621#endif
4622
4623 next = next_nonnote_insn (cmp);
4624 if (!next || !INSN_P (next))
4625 {
4626#if DEBUG_CMP
4627 fprintf(stderr, "compare not followed by insn\n");
4628 debug_rtx(next);
4629#endif
4630 return false;
4631 }
4632 if (GET_CODE (PATTERN (next)) == SET
4633 && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
4634 {
4635 next = XEXP (XEXP (PATTERN (next), 1), 0);
4636 }
4637 else if (GET_CODE (PATTERN (next)) == SET)
4638 {
4639 /* If this is a conditional, flags_needed will be something
4640 other than FLAGS_N, which we test below. */
4641 next = XEXP (PATTERN (next), 1);
4642 }
4643 else
4644 {
4645#if DEBUG_CMP
4646 fprintf(stderr, "compare not followed by conditional\n");
4647 debug_rtx(next);
4648#endif
4649 return false;
4650 }
4651#if DEBUG_CMP
4652 fprintf(stderr, "conditional is: ");
4653 debug_rtx(next);
4654#endif
4655
4656 flags_needed = flags_needed_for_conditional (next);
4657 if (flags_needed == FLAGS_N)
4658 {
4659#if DEBUG_CMP
4660 fprintf(stderr, "compare not followed by conditional\n");
4661 debug_rtx(next);
4662#endif
4663 return false;
4664 }
4665
4666 /* Compare doesn't set overflow and carry the same way that
4667 arithmetic instructions do, so we can't replace those. */
4668 if (flags_needed & FLAGS_OC)
4669 return false;
4670
4671 prev = cmp;
4672 do {
4673 prev = prev_nonnote_insn (prev);
4674 if (!prev)
4675 {
4676#if DEBUG_CMP
4677 fprintf(stderr, "No previous insn.\n");
4678#endif
4679 return false;
4680 }
4681 if (!INSN_P (prev))
4682 {
4683#if DEBUG_CMP
4684 fprintf(stderr, "Previous insn is a non-insn.\n");
4685#endif
4686 return false;
4687 }
4688 pp = PATTERN (prev);
4689 if (GET_CODE (pp) != SET)
4690 {
4691#if DEBUG_CMP
4692 fprintf(stderr, "Previous insn is not a SET.\n");
4693#endif
4694 return false;
4695 }
4696 pflags = get_attr_flags (prev);
4697
4698 /* Looking up attributes of previous insns corrupted the recog
4699 tables. */
4700 INSN_UID (cmp) = -1;
4701 recog (PATTERN (cmp), cmp, 0);
4702
4703 if (pflags == FLAGS_N
4704 && reg_mentioned_p (op0, pp))
4705 {
4706#if DEBUG_CMP
4707 fprintf(stderr, "intermediate non-flags insn uses op:\n");
4708 debug_rtx(prev);
4709#endif
4710 return false;
4711 }
b3c5a409
DD
4712
4713 /* Check for comparisons against memory - between volatiles and
4714 aliases, we just can't risk this one. */
4715 if (GET_CODE (operands[0]) == MEM
4716 || GET_CODE (operands[0]) == MEM)
4717 {
4718#if DEBUG_CMP
4719 fprintf(stderr, "comparisons with memory:\n");
4720 debug_rtx(prev);
4721#endif
4722 return false;
4723 }
4724
4725 /* Check for PREV changing a register that's used to compute a
4726 value in CMP, even if it doesn't otherwise change flags. */
4727 if (GET_CODE (operands[0]) == REG
4728 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[0]))
4729 {
4730#if DEBUG_CMP
4731 fprintf(stderr, "sub-value affected, op0:\n");
4732 debug_rtx(prev);
4733#endif
4734 return false;
4735 }
4736 if (GET_CODE (operands[1]) == REG
4737 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[1]))
4738 {
4739#if DEBUG_CMP
4740 fprintf(stderr, "sub-value affected, op1:\n");
4741 debug_rtx(prev);
4742#endif
4743 return false;
4744 }
4745
16659fcf
DD
4746 } while (pflags == FLAGS_N);
4747#if DEBUG_CMP
4748 fprintf(stderr, "previous flag-setting insn:\n");
4749 debug_rtx(prev);
4750 debug_rtx(pp);
4751#endif
4752
4753 if (GET_CODE (pp) == SET
4754 && GET_CODE (XEXP (pp, 0)) == REG
4755 && REGNO (XEXP (pp, 0)) == FLG_REGNO
4756 && GET_CODE (XEXP (pp, 1)) == COMPARE)
4757 {
4758 /* Adjacent cbranches must have the same operands to be
4759 redundant. */
4760 rtx pop0 = XEXP (XEXP (pp, 1), 0);
4761 rtx pop1 = XEXP (XEXP (pp, 1), 1);
4762#if DEBUG_CMP
4763 fprintf(stderr, "adjacent cbranches\n");
4764 debug_rtx(pop0);
4765 debug_rtx(pop1);
4766#endif
4767 if (rtx_equal_p (op0, pop0)
4768 && rtx_equal_p (op1, pop1))
4769 return true;
4770#if DEBUG_CMP
4771 fprintf(stderr, "prev cmp not same\n");
4772#endif
4773 return false;
4774 }
4775
4776 /* Else the previous insn must be a SET, with either the source or
4777 dest equal to operands[0], and operands[1] must be zero. */
4778
4779 if (!rtx_equal_p (op1, const0_rtx))
4780 {
4781#if DEBUG_CMP
4782 fprintf(stderr, "operands[1] not const0_rtx\n");
4783#endif
4784 return false;
4785 }
4786 if (GET_CODE (pp) != SET)
4787 {
4788#if DEBUG_CMP
4789 fprintf (stderr, "pp not set\n");
4790#endif
4791 return false;
4792 }
4793 if (!rtx_equal_p (op0, SET_SRC (pp))
4794 && !rtx_equal_p (op0, SET_DEST (pp)))
4795 {
4796#if DEBUG_CMP
4797 fprintf(stderr, "operands[0] not found in set\n");
4798#endif
4799 return false;
4800 }
4801
4802#if DEBUG_CMP
4803 fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4804#endif
4805 if ((pflags & flags_needed) == flags_needed)
4806 return true;
4807
4808 return false;
4809}
4810
4811/* Return the pattern for a compare. This will be commented out if
4812 the compare is redundant, else a normal pattern is returned. Thus,
4813 the assembler output says where the compare would have been. */
4814char *
4815m32c_output_compare (rtx insn, rtx *operands)
4816{
0a2aaacc 4817 static char templ[] = ";cmp.b\t%1,%0";
16659fcf
DD
4818 /* ^ 5 */
4819
0a2aaacc 4820 templ[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
16659fcf
DD
4821 if (m32c_compare_redundant (insn, operands))
4822 {
4823#if DEBUG_CMP
4824 fprintf(stderr, "cbranch: cmp not needed\n");
4825#endif
0a2aaacc 4826 return templ;
16659fcf
DD
4827 }
4828
4829#if DEBUG_CMP
b3c5a409 4830 fprintf(stderr, "cbranch: cmp needed: `%s'\n", templ + 1);
16659fcf 4831#endif
0a2aaacc 4832 return templ + 1;
16659fcf
DD
4833}
4834
5abd2125
JS
4835#undef TARGET_ENCODE_SECTION_INFO
4836#define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4837
b52b1749
AS
4838/* If the frame pointer isn't used, we detect it manually. But the
4839 stack pointer doesn't have as flexible addressing as the frame
4840 pointer, so we always assume we have it. */
4841
4842#undef TARGET_FRAME_POINTER_REQUIRED
4843#define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
4844
38b2d076
DD
4845/* The Global `targetm' Variable. */
4846
4847struct gcc_target targetm = TARGET_INITIALIZER;
4848
4849#include "gt-m32c.h"