]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/m32c/m32c.c
use templates instead of gengtype for typed allocation functions
[thirdparty/gcc.git] / gcc / config / m32c / m32c.c
CommitLineData
38b2d076 1/* Target Code for R8C/M16C/M32C
23a5b65a 2 Copyright (C) 2005-2014 Free Software Foundation, Inc.
38b2d076
DD
3 Contributed by Red Hat.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
2f83c7d6 9 by the Free Software Foundation; either version 3, or (at your
38b2d076
DD
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
2f83c7d6
NC
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
38b2d076
DD
20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
24#include "tm.h"
25#include "rtl.h"
26#include "regs.h"
27#include "hard-reg-set.h"
38b2d076
DD
28#include "insn-config.h"
29#include "conditions.h"
30#include "insn-flags.h"
31#include "output.h"
32#include "insn-attr.h"
33#include "flags.h"
34#include "recog.h"
35#include "reload.h"
718f9c0f 36#include "diagnostic-core.h"
38b2d076
DD
37#include "obstack.h"
38#include "tree.h"
d8a2d370
DN
39#include "stor-layout.h"
40#include "varasm.h"
41#include "calls.h"
38b2d076
DD
42#include "expr.h"
43#include "optabs.h"
44#include "except.h"
45#include "function.h"
46#include "ggc.h"
47#include "target.h"
48#include "target-def.h"
49#include "tm_p.h"
50#include "langhooks.h"
2fb9a547
AM
51#include "pointer-set.h"
52#include "hash-table.h"
53#include "vec.h"
54#include "basic-block.h"
55#include "tree-ssa-alias.h"
56#include "internal-fn.h"
57#include "gimple-fold.h"
58#include "tree-eh.h"
59#include "gimple-expr.h"
60#include "is-a.h"
726a989a 61#include "gimple.h"
fa9fd28a 62#include "df.h"
03dd17b1 63#include "tm-constrs.h"
38b2d076
DD
64
65/* Prototypes */
66
67/* Used by m32c_pushm_popm. */
68typedef enum
69{
70 PP_pushm,
71 PP_popm,
72 PP_justcount
73} Push_Pop_Type;
74
65655f79 75static bool m32c_function_needs_enter (void);
38b2d076 76static tree interrupt_handler (tree *, tree, tree, int, bool *);
5abd2125 77static tree function_vector_handler (tree *, tree, tree, int, bool *);
38b2d076 78static int interrupt_p (tree node);
65655f79
DD
79static int bank_switch_p (tree node);
80static int fast_interrupt_p (tree node);
81static int interrupt_p (tree node);
38b2d076 82static bool m32c_asm_integer (rtx, unsigned int, int);
3101faab 83static int m32c_comp_type_attributes (const_tree, const_tree);
38b2d076
DD
84static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
85static struct machine_function *m32c_init_machine_status (void);
86static void m32c_insert_attributes (tree, tree *);
c6c3dba9 87static bool m32c_legitimate_address_p (enum machine_mode, rtx, bool);
5fd5d713 88static bool m32c_addr_space_legitimate_address_p (enum machine_mode, rtx, bool, addr_space_t);
d5cc9181 89static rtx m32c_function_arg (cumulative_args_t, enum machine_mode,
444d6efe 90 const_tree, bool);
d5cc9181 91static bool m32c_pass_by_reference (cumulative_args_t, enum machine_mode,
586de218 92 const_tree, bool);
d5cc9181 93static void m32c_function_arg_advance (cumulative_args_t, enum machine_mode,
cd34bbe8 94 const_tree, bool);
c2ed6cf8 95static unsigned int m32c_function_arg_boundary (enum machine_mode, const_tree);
38b2d076 96static int m32c_pushm_popm (Push_Pop_Type);
d5cc9181 97static bool m32c_strict_argument_naming (cumulative_args_t);
38b2d076
DD
98static rtx m32c_struct_value_rtx (tree, int);
99static rtx m32c_subreg (enum machine_mode, rtx, enum machine_mode, int);
100static int need_to_save (int);
2a31793e
AS
101static rtx m32c_function_value (const_tree, const_tree, bool);
102static rtx m32c_libcall_value (enum machine_mode, const_rtx);
103
f6052f86
DD
104/* Returns true if an address is specified, else false. */
105static bool m32c_get_pragma_address (const char *varname, unsigned *addr);
106
5abd2125 107#define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
38b2d076
DD
108
109#define streq(a,b) (strcmp ((a), (b)) == 0)
110
111/* Internal support routines */
112
113/* Debugging statements are tagged with DEBUG0 only so that they can
114 be easily enabled individually, by replacing the '0' with '1' as
115 needed. */
116#define DEBUG0 0
117#define DEBUG1 1
118
119#if DEBUG0
120/* This is needed by some of the commented-out debug statements
121 below. */
122static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
123#endif
124static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
125
126/* These are all to support encode_pattern(). */
127static char pattern[30], *patternp;
128static GTY(()) rtx patternr[30];
129#define RTX_IS(x) (streq (pattern, x))
130
131/* Some macros to simplify the logic throughout this file. */
132#define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
133#define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
134
135#define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
136#define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
137
5fd5d713
DD
138static int
139far_addr_space_p (rtx x)
140{
141 if (GET_CODE (x) != MEM)
142 return 0;
143#if DEBUG0
144 fprintf(stderr, "\033[35mfar_addr_space: "); debug_rtx(x);
145 fprintf(stderr, " = %d\033[0m\n", MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR);
146#endif
147 return MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR;
148}
149
38b2d076
DD
150/* We do most RTX matching by converting the RTX into a string, and
151 using string compares. This vastly simplifies the logic in many of
152 the functions in this file.
153
154 On exit, pattern[] has the encoded string (use RTX_IS("...") to
155 compare it) and patternr[] has pointers to the nodes in the RTX
156 corresponding to each character in the encoded string. The latter
157 is mostly used by print_operand().
158
159 Unrecognized patterns have '?' in them; this shows up when the
160 assembler complains about syntax errors.
161*/
162
163static void
164encode_pattern_1 (rtx x)
165{
166 int i;
167
168 if (patternp == pattern + sizeof (pattern) - 2)
169 {
170 patternp[-1] = '?';
171 return;
172 }
173
174 patternr[patternp - pattern] = x;
175
176 switch (GET_CODE (x))
177 {
178 case REG:
179 *patternp++ = 'r';
180 break;
181 case SUBREG:
182 if (GET_MODE_SIZE (GET_MODE (x)) !=
183 GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
184 *patternp++ = 'S';
185 encode_pattern_1 (XEXP (x, 0));
186 break;
187 case MEM:
188 *patternp++ = 'm';
189 case CONST:
190 encode_pattern_1 (XEXP (x, 0));
191 break;
5fd5d713
DD
192 case SIGN_EXTEND:
193 *patternp++ = '^';
194 *patternp++ = 'S';
195 encode_pattern_1 (XEXP (x, 0));
196 break;
197 case ZERO_EXTEND:
198 *patternp++ = '^';
199 *patternp++ = 'Z';
200 encode_pattern_1 (XEXP (x, 0));
201 break;
38b2d076
DD
202 case PLUS:
203 *patternp++ = '+';
204 encode_pattern_1 (XEXP (x, 0));
205 encode_pattern_1 (XEXP (x, 1));
206 break;
207 case PRE_DEC:
208 *patternp++ = '>';
209 encode_pattern_1 (XEXP (x, 0));
210 break;
211 case POST_INC:
212 *patternp++ = '<';
213 encode_pattern_1 (XEXP (x, 0));
214 break;
215 case LO_SUM:
216 *patternp++ = 'L';
217 encode_pattern_1 (XEXP (x, 0));
218 encode_pattern_1 (XEXP (x, 1));
219 break;
220 case HIGH:
221 *patternp++ = 'H';
222 encode_pattern_1 (XEXP (x, 0));
223 break;
224 case SYMBOL_REF:
225 *patternp++ = 's';
226 break;
227 case LABEL_REF:
228 *patternp++ = 'l';
229 break;
230 case CODE_LABEL:
231 *patternp++ = 'c';
232 break;
233 case CONST_INT:
234 case CONST_DOUBLE:
235 *patternp++ = 'i';
236 break;
237 case UNSPEC:
238 *patternp++ = 'u';
239 *patternp++ = '0' + XCINT (x, 1, UNSPEC);
240 for (i = 0; i < XVECLEN (x, 0); i++)
241 encode_pattern_1 (XVECEXP (x, 0, i));
242 break;
243 case USE:
244 *patternp++ = 'U';
245 break;
246 case PARALLEL:
247 *patternp++ = '|';
248 for (i = 0; i < XVECLEN (x, 0); i++)
249 encode_pattern_1 (XVECEXP (x, 0, i));
250 break;
251 case EXPR_LIST:
252 *patternp++ = 'E';
253 encode_pattern_1 (XEXP (x, 0));
254 if (XEXP (x, 1))
255 encode_pattern_1 (XEXP (x, 1));
256 break;
257 default:
258 *patternp++ = '?';
259#if DEBUG0
260 fprintf (stderr, "can't encode pattern %s\n",
261 GET_RTX_NAME (GET_CODE (x)));
262 debug_rtx (x);
263 gcc_unreachable ();
264#endif
265 break;
266 }
267}
268
269static void
270encode_pattern (rtx x)
271{
272 patternp = pattern;
273 encode_pattern_1 (x);
274 *patternp = 0;
275}
276
277/* Since register names indicate the mode they're used in, we need a
278 way to determine which name to refer to the register with. Called
279 by print_operand(). */
280
281static const char *
282reg_name_with_mode (int regno, enum machine_mode mode)
283{
284 int mlen = GET_MODE_SIZE (mode);
285 if (regno == R0_REGNO && mlen == 1)
286 return "r0l";
287 if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
288 return "r2r0";
289 if (regno == R0_REGNO && mlen == 6)
290 return "r2r1r0";
291 if (regno == R0_REGNO && mlen == 8)
292 return "r3r1r2r0";
293 if (regno == R1_REGNO && mlen == 1)
294 return "r1l";
295 if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
296 return "r3r1";
297 if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
298 return "a1a0";
299 return reg_names[regno];
300}
301
302/* How many bytes a register uses on stack when it's pushed. We need
303 to know this because the push opcode needs to explicitly indicate
304 the size of the register, even though the name of the register
305 already tells it that. Used by m32c_output_reg_{push,pop}, which
306 is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}. */
307
308static int
309reg_push_size (int regno)
310{
311 switch (regno)
312 {
313 case R0_REGNO:
314 case R1_REGNO:
315 return 2;
316 case R2_REGNO:
317 case R3_REGNO:
318 case FLG_REGNO:
319 return 2;
320 case A0_REGNO:
321 case A1_REGNO:
322 case SB_REGNO:
323 case FB_REGNO:
324 case SP_REGNO:
325 if (TARGET_A16)
326 return 2;
327 else
328 return 3;
329 default:
330 gcc_unreachable ();
331 }
332}
333
38b2d076
DD
334/* Given two register classes, find the largest intersection between
335 them. If there is no intersection, return RETURNED_IF_EMPTY
336 instead. */
35bdbc69
AS
337static reg_class_t
338reduce_class (reg_class_t original_class, reg_class_t limiting_class,
339 reg_class_t returned_if_empty)
38b2d076 340{
35bdbc69
AS
341 HARD_REG_SET cc;
342 int i;
343 reg_class_t best = NO_REGS;
344 unsigned int best_size = 0;
38b2d076
DD
345
346 if (original_class == limiting_class)
347 return original_class;
348
35bdbc69
AS
349 cc = reg_class_contents[original_class];
350 AND_HARD_REG_SET (cc, reg_class_contents[limiting_class]);
38b2d076 351
38b2d076
DD
352 for (i = 0; i < LIM_REG_CLASSES; i++)
353 {
35bdbc69
AS
354 if (hard_reg_set_subset_p (reg_class_contents[i], cc))
355 if (best_size < reg_class_size[i])
38b2d076 356 {
35bdbc69
AS
357 best = (reg_class_t) i;
358 best_size = reg_class_size[i];
38b2d076
DD
359 }
360
361 }
362 if (best == NO_REGS)
363 return returned_if_empty;
364 return best;
365}
366
38b2d076
DD
367/* Used by m32c_register_move_cost to determine if a move is
368 impossibly expensive. */
0e607518
AS
369static bool
370class_can_hold_mode (reg_class_t rclass, enum machine_mode mode)
38b2d076
DD
371{
372 /* Cache the results: 0=untested 1=no 2=yes */
373 static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
0e607518
AS
374
375 if (results[(int) rclass][mode] == 0)
38b2d076 376 {
0e607518 377 int r;
0a2aaacc 378 results[rclass][mode] = 1;
38b2d076 379 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
0e607518 380 if (in_hard_reg_set_p (reg_class_contents[(int) rclass], mode, r)
38b2d076
DD
381 && HARD_REGNO_MODE_OK (r, mode))
382 {
0e607518
AS
383 results[rclass][mode] = 2;
384 break;
38b2d076
DD
385 }
386 }
0e607518 387
38b2d076
DD
388#if DEBUG0
389 fprintf (stderr, "class %s can hold %s? %s\n",
0e607518 390 class_names[(int) rclass], mode_name[mode],
0a2aaacc 391 (results[rclass][mode] == 2) ? "yes" : "no");
38b2d076 392#endif
0e607518 393 return results[(int) rclass][mode] == 2;
38b2d076
DD
394}
395
396/* Run-time Target Specification. */
397
398/* Memregs are memory locations that gcc treats like general
399 registers, as there are a limited number of true registers and the
400 m32c families can use memory in most places that registers can be
401 used.
402
403 However, since memory accesses are more expensive than registers,
404 we allow the user to limit the number of memregs available, in
405 order to try to persuade gcc to try harder to use real registers.
406
45b86625 407 Memregs are provided by lib1funcs.S.
38b2d076
DD
408*/
409
38b2d076
DD
410int ok_to_change_target_memregs = TRUE;
411
f28f2337
AS
412/* Implements TARGET_OPTION_OVERRIDE. */
413
414#undef TARGET_OPTION_OVERRIDE
415#define TARGET_OPTION_OVERRIDE m32c_option_override
416
417static void
418m32c_option_override (void)
38b2d076 419{
f28f2337 420 /* We limit memregs to 0..16, and provide a default. */
bbfc9a8c 421 if (global_options_set.x_target_memregs)
38b2d076
DD
422 {
423 if (target_memregs < 0 || target_memregs > 16)
424 error ("invalid target memregs value '%d'", target_memregs);
425 }
426 else
07127a0a 427 target_memregs = 16;
18b80268
DD
428
429 if (TARGET_A24)
430 flag_ivopts = 0;
0685e770
DD
431
432 /* This target defaults to strict volatile bitfields. */
36acc1a2 433 if (flag_strict_volatile_bitfields < 0 && abi_version_at_least(2))
0685e770 434 flag_strict_volatile_bitfields = 1;
d123bf41
DD
435
436 /* r8c/m16c have no 16-bit indirect call, so thunks are involved.
437 This is always worse than an absolute call. */
438 if (TARGET_A16)
439 flag_no_function_cse = 1;
a4403164
DD
440
441 /* This wants to put insns between compares and their jumps. */
442 /* FIXME: The right solution is to properly trace the flags register
443 values, but that is too much work for stage 4. */
444 flag_combine_stack_adjustments = 0;
d123bf41
DD
445}
446
447#undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
448#define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m32c_override_options_after_change
449
450static void
451m32c_override_options_after_change (void)
452{
453 if (TARGET_A16)
454 flag_no_function_cse = 1;
38b2d076
DD
455}
456
457/* Defining data structures for per-function information */
458
459/* The usual; we set up our machine_function data. */
460static struct machine_function *
461m32c_init_machine_status (void)
462{
766090c2 463 return ggc_cleared_alloc<machine_function> ();
38b2d076
DD
464}
465
466/* Implements INIT_EXPANDERS. We just set up to call the above
467 function. */
468void
469m32c_init_expanders (void)
470{
471 init_machine_status = m32c_init_machine_status;
472}
473
474/* Storage Layout */
475
38b2d076
DD
476/* Register Basics */
477
478/* Basic Characteristics of Registers */
479
480/* Whether a mode fits in a register is complex enough to warrant a
481 table. */
482static struct
483{
484 char qi_regs;
485 char hi_regs;
486 char pi_regs;
487 char si_regs;
488 char di_regs;
489} nregs_table[FIRST_PSEUDO_REGISTER] =
490{
491 { 1, 1, 2, 2, 4 }, /* r0 */
492 { 0, 1, 0, 0, 0 }, /* r2 */
493 { 1, 1, 2, 2, 0 }, /* r1 */
494 { 0, 1, 0, 0, 0 }, /* r3 */
495 { 0, 1, 1, 0, 0 }, /* a0 */
496 { 0, 1, 1, 0, 0 }, /* a1 */
497 { 0, 1, 1, 0, 0 }, /* sb */
498 { 0, 1, 1, 0, 0 }, /* fb */
499 { 0, 1, 1, 0, 0 }, /* sp */
500 { 1, 1, 1, 0, 0 }, /* pc */
501 { 0, 0, 0, 0, 0 }, /* fl */
502 { 1, 1, 1, 0, 0 }, /* ap */
503 { 1, 1, 2, 2, 4 }, /* mem0 */
504 { 1, 1, 2, 2, 4 }, /* mem1 */
505 { 1, 1, 2, 2, 4 }, /* mem2 */
506 { 1, 1, 2, 2, 4 }, /* mem3 */
507 { 1, 1, 2, 2, 4 }, /* mem4 */
508 { 1, 1, 2, 2, 0 }, /* mem5 */
509 { 1, 1, 2, 2, 0 }, /* mem6 */
510 { 1, 1, 0, 0, 0 }, /* mem7 */
511};
512
5efd84c5
NF
513/* Implements TARGET_CONDITIONAL_REGISTER_USAGE. We adjust the number
514 of available memregs, and select which registers need to be preserved
38b2d076
DD
515 across calls based on the chip family. */
516
5efd84c5
NF
517#undef TARGET_CONDITIONAL_REGISTER_USAGE
518#define TARGET_CONDITIONAL_REGISTER_USAGE m32c_conditional_register_usage
d6d17ae7 519void
38b2d076
DD
520m32c_conditional_register_usage (void)
521{
38b2d076
DD
522 int i;
523
524 if (0 <= target_memregs && target_memregs <= 16)
525 {
526 /* The command line option is bytes, but our "registers" are
527 16-bit words. */
65655f79 528 for (i = (target_memregs+1)/2; i < 8; i++)
38b2d076
DD
529 {
530 fixed_regs[MEM0_REGNO + i] = 1;
531 CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
532 }
533 }
534
535 /* M32CM and M32C preserve more registers across function calls. */
536 if (TARGET_A24)
537 {
538 call_used_regs[R1_REGNO] = 0;
539 call_used_regs[R2_REGNO] = 0;
540 call_used_regs[R3_REGNO] = 0;
541 call_used_regs[A0_REGNO] = 0;
542 call_used_regs[A1_REGNO] = 0;
543 }
544}
545
546/* How Values Fit in Registers */
547
548/* Implements HARD_REGNO_NREGS. This is complicated by the fact that
549 different registers are different sizes from each other, *and* may
550 be different sizes in different chip families. */
b8a669d0
DD
551static int
552m32c_hard_regno_nregs_1 (int regno, enum machine_mode mode)
38b2d076
DD
553{
554 if (regno == FLG_REGNO && mode == CCmode)
555 return 1;
556 if (regno >= FIRST_PSEUDO_REGISTER)
557 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
558
559 if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
560 return (GET_MODE_SIZE (mode) + 1) / 2;
561
562 if (GET_MODE_SIZE (mode) <= 1)
563 return nregs_table[regno].qi_regs;
564 if (GET_MODE_SIZE (mode) <= 2)
565 return nregs_table[regno].hi_regs;
5fd5d713 566 if (regno == A0_REGNO && mode == SImode && TARGET_A16)
38b2d076
DD
567 return 2;
568 if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
569 return nregs_table[regno].pi_regs;
570 if (GET_MODE_SIZE (mode) <= 4)
571 return nregs_table[regno].si_regs;
572 if (GET_MODE_SIZE (mode) <= 8)
573 return nregs_table[regno].di_regs;
574 return 0;
575}
576
b8a669d0
DD
577int
578m32c_hard_regno_nregs (int regno, enum machine_mode mode)
579{
580 int rv = m32c_hard_regno_nregs_1 (regno, mode);
581 return rv ? rv : 1;
582}
583
38b2d076
DD
584/* Implements HARD_REGNO_MODE_OK. The above function does the work
585 already; just test its return value. */
586int
587m32c_hard_regno_ok (int regno, enum machine_mode mode)
588{
b8a669d0 589 return m32c_hard_regno_nregs_1 (regno, mode) != 0;
38b2d076
DD
590}
591
592/* Implements MODES_TIEABLE_P. In general, modes aren't tieable since
593 registers are all different sizes. However, since most modes are
594 bigger than our registers anyway, it's easier to implement this
595 function that way, leaving QImode as the only unique case. */
596int
597m32c_modes_tieable_p (enum machine_mode m1, enum machine_mode m2)
598{
599 if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
600 return 1;
601
07127a0a 602#if 0
38b2d076
DD
603 if (m1 == QImode || m2 == QImode)
604 return 0;
07127a0a 605#endif
38b2d076
DD
606
607 return 1;
608}
609
610/* Register Classes */
611
612/* Implements REGNO_REG_CLASS. */
444d6efe 613enum reg_class
38b2d076
DD
614m32c_regno_reg_class (int regno)
615{
616 switch (regno)
617 {
618 case R0_REGNO:
619 return R0_REGS;
620 case R1_REGNO:
621 return R1_REGS;
622 case R2_REGNO:
623 return R2_REGS;
624 case R3_REGNO:
625 return R3_REGS;
626 case A0_REGNO:
22843acd 627 return A0_REGS;
38b2d076 628 case A1_REGNO:
22843acd 629 return A1_REGS;
38b2d076
DD
630 case SB_REGNO:
631 return SB_REGS;
632 case FB_REGNO:
633 return FB_REGS;
634 case SP_REGNO:
635 return SP_REGS;
636 case FLG_REGNO:
637 return FLG_REGS;
638 default:
639 if (IS_MEM_REGNO (regno))
640 return MEM_REGS;
641 return ALL_REGS;
642 }
643}
644
38b2d076
DD
645/* Implements REGNO_OK_FOR_BASE_P. */
646int
647m32c_regno_ok_for_base_p (int regno)
648{
649 if (regno == A0_REGNO
650 || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
651 return 1;
652 return 0;
653}
654
655#define DEBUG_RELOAD 0
656
b05933f5 657/* Implements TARGET_PREFERRED_RELOAD_CLASS. In general, prefer general
38b2d076 658 registers of the appropriate size. */
b05933f5
AS
659
660#undef TARGET_PREFERRED_RELOAD_CLASS
661#define TARGET_PREFERRED_RELOAD_CLASS m32c_preferred_reload_class
662
663static reg_class_t
664m32c_preferred_reload_class (rtx x, reg_class_t rclass)
38b2d076 665{
b05933f5 666 reg_class_t newclass = rclass;
38b2d076
DD
667
668#if DEBUG_RELOAD
669 fprintf (stderr, "\npreferred_reload_class for %s is ",
670 class_names[rclass]);
671#endif
672 if (rclass == NO_REGS)
673 rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
674
0e607518 675 if (reg_classes_intersect_p (rclass, CR_REGS))
38b2d076
DD
676 {
677 switch (GET_MODE (x))
678 {
679 case QImode:
680 newclass = HL_REGS;
681 break;
682 default:
683 /* newclass = HI_REGS; */
684 break;
685 }
686 }
687
688 else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
689 newclass = SI_REGS;
690 else if (GET_MODE_SIZE (GET_MODE (x)) > 4
b05933f5 691 && ! reg_class_subset_p (R03_REGS, rclass))
38b2d076
DD
692 newclass = DI_REGS;
693
694 rclass = reduce_class (rclass, newclass, rclass);
695
696 if (GET_MODE (x) == QImode)
697 rclass = reduce_class (rclass, HL_REGS, rclass);
698
699#if DEBUG_RELOAD
700 fprintf (stderr, "%s\n", class_names[rclass]);
701 debug_rtx (x);
702
703 if (GET_CODE (x) == MEM
704 && GET_CODE (XEXP (x, 0)) == PLUS
705 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
706 fprintf (stderr, "Glorm!\n");
707#endif
708 return rclass;
709}
710
b05933f5
AS
711/* Implements TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
712
713#undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
714#define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS m32c_preferred_output_reload_class
715
716static reg_class_t
717m32c_preferred_output_reload_class (rtx x, reg_class_t rclass)
38b2d076
DD
718{
719 return m32c_preferred_reload_class (x, rclass);
720}
721
722/* Implements LIMIT_RELOAD_CLASS. We basically want to avoid using
723 address registers for reloads since they're needed for address
724 reloads. */
725int
726m32c_limit_reload_class (enum machine_mode mode, int rclass)
727{
728#if DEBUG_RELOAD
729 fprintf (stderr, "limit_reload_class for %s: %s ->",
730 mode_name[mode], class_names[rclass]);
731#endif
732
733 if (mode == QImode)
734 rclass = reduce_class (rclass, HL_REGS, rclass);
735 else if (mode == HImode)
736 rclass = reduce_class (rclass, HI_REGS, rclass);
737 else if (mode == SImode)
738 rclass = reduce_class (rclass, SI_REGS, rclass);
739
740 if (rclass != A_REGS)
741 rclass = reduce_class (rclass, DI_REGS, rclass);
742
743#if DEBUG_RELOAD
744 fprintf (stderr, " %s\n", class_names[rclass]);
745#endif
746 return rclass;
747}
748
749/* Implements SECONDARY_RELOAD_CLASS. QImode have to be reloaded in
750 r0 or r1, as those are the only real QImode registers. CR regs get
751 reloaded through appropriately sized general or address
752 registers. */
753int
754m32c_secondary_reload_class (int rclass, enum machine_mode mode, rtx x)
755{
756 int cc = class_contents[rclass][0];
757#if DEBUG0
758 fprintf (stderr, "\nsecondary reload class %s %s\n",
759 class_names[rclass], mode_name[mode]);
760 debug_rtx (x);
761#endif
762 if (mode == QImode
763 && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
764 return QI_REGS;
0e607518 765 if (reg_classes_intersect_p (rclass, CR_REGS)
38b2d076
DD
766 && GET_CODE (x) == REG
767 && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
13a23442 768 return (TARGET_A16 || mode == HImode) ? HI_REGS : A_REGS;
38b2d076
DD
769 return NO_REGS;
770}
771
184866c5 772/* Implements TARGET_CLASS_LIKELY_SPILLED_P. A_REGS is needed for address
38b2d076 773 reloads. */
184866c5
AS
774
775#undef TARGET_CLASS_LIKELY_SPILLED_P
776#define TARGET_CLASS_LIKELY_SPILLED_P m32c_class_likely_spilled_p
777
778static bool
779m32c_class_likely_spilled_p (reg_class_t regclass)
38b2d076
DD
780{
781 if (regclass == A_REGS)
184866c5
AS
782 return true;
783
784 return (reg_class_size[(int) regclass] == 1);
38b2d076
DD
785}
786
c4831cff 787/* Implements TARGET_CLASS_MAX_NREGS. We calculate this according to its
38b2d076
DD
788 documented meaning, to avoid potential inconsistencies with actual
789 class definitions. */
c4831cff
AS
790
791#undef TARGET_CLASS_MAX_NREGS
792#define TARGET_CLASS_MAX_NREGS m32c_class_max_nregs
793
794static unsigned char
795m32c_class_max_nregs (reg_class_t regclass, enum machine_mode mode)
38b2d076 796{
c4831cff
AS
797 int rn;
798 unsigned char max = 0;
38b2d076
DD
799
800 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
c4831cff 801 if (TEST_HARD_REG_BIT (reg_class_contents[(int) regclass], rn))
38b2d076 802 {
c4831cff 803 unsigned char n = m32c_hard_regno_nregs (rn, mode);
38b2d076
DD
804 if (max < n)
805 max = n;
806 }
807 return max;
808}
809
810/* Implements CANNOT_CHANGE_MODE_CLASS. Only r0 and r1 can change to
811 QI (r0l, r1l) because the chip doesn't support QI ops on other
812 registers (well, it does on a0/a1 but if we let gcc do that, reload
813 suffers). Otherwise, we allow changes to larger modes. */
814int
815m32c_cannot_change_mode_class (enum machine_mode from,
816 enum machine_mode to, int rclass)
817{
db9c8397 818 int rn;
38b2d076
DD
819#if DEBUG0
820 fprintf (stderr, "cannot change from %s to %s in %s\n",
821 mode_name[from], mode_name[to], class_names[rclass]);
822#endif
823
db9c8397
DD
824 /* If the larger mode isn't allowed in any of these registers, we
825 can't allow the change. */
826 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
827 if (class_contents[rclass][0] & (1 << rn))
828 if (! m32c_hard_regno_ok (rn, to))
829 return 1;
830
38b2d076
DD
831 if (to == QImode)
832 return (class_contents[rclass][0] & 0x1ffa);
833
834 if (class_contents[rclass][0] & 0x0005 /* r0, r1 */
835 && GET_MODE_SIZE (from) > 1)
836 return 0;
837 if (GET_MODE_SIZE (from) > 2) /* all other regs */
838 return 0;
839
840 return 1;
841}
842
843/* Helpers for the rest of the file. */
844/* TRUE if the rtx is a REG rtx for the given register. */
845#define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
846 && REGNO (rtx) == regno)
847/* TRUE if the rtx is a pseudo - specifically, one we can use as a
848 base register in address calculations (hence the "strict"
849 argument). */
850#define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
851 && (REGNO (rtx) == AP_REGNO \
852 || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
853
5fd5d713
DD
854#define A0_OR_PSEUDO(x) (IS_REG(x, A0_REGNO) || REGNO (x) >= FIRST_PSEUDO_REGISTER)
855
38b2d076
DD
856/* Implements EXTRA_CONSTRAINT_STR (see next function too). 'S' is
857 for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
858 call return values. */
03dd17b1
NF
859bool
860m32c_matches_constraint_p (rtx value, int constraint)
38b2d076
DD
861{
862 encode_pattern (value);
5fd5d713 863
03dd17b1
NF
864 switch (constraint) {
865 case CONSTRAINT_SF:
866 return (far_addr_space_p (value)
867 && ((RTX_IS ("mr")
868 && A0_OR_PSEUDO (patternr[1])
869 && GET_MODE (patternr[1]) == SImode)
870 || (RTX_IS ("m+^Sri")
871 && A0_OR_PSEUDO (patternr[4])
872 && GET_MODE (patternr[4]) == HImode)
873 || (RTX_IS ("m+^Srs")
874 && A0_OR_PSEUDO (patternr[4])
875 && GET_MODE (patternr[4]) == HImode)
876 || (RTX_IS ("m+^S+ris")
877 && A0_OR_PSEUDO (patternr[5])
878 && GET_MODE (patternr[5]) == HImode)
879 || RTX_IS ("ms")));
880 case CONSTRAINT_Sd:
38b2d076
DD
881 {
882 /* This is the common "src/dest" address */
883 rtx r;
884 if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
03dd17b1 885 return true;
38b2d076 886 if (RTX_IS ("ms") || RTX_IS ("m+si"))
03dd17b1 887 return true;
07127a0a
DD
888 if (RTX_IS ("m++rii"))
889 {
890 if (REGNO (patternr[3]) == FB_REGNO
891 && INTVAL (patternr[4]) == 0)
03dd17b1 892 return true;
07127a0a 893 }
38b2d076
DD
894 if (RTX_IS ("mr"))
895 r = patternr[1];
896 else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
897 r = patternr[2];
898 else
03dd17b1 899 return false;
38b2d076 900 if (REGNO (r) == SP_REGNO)
03dd17b1 901 return false;
38b2d076
DD
902 return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
903 }
03dd17b1 904 case CONSTRAINT_Sa:
38b2d076
DD
905 {
906 rtx r;
907 if (RTX_IS ("mr"))
908 r = patternr[1];
909 else if (RTX_IS ("m+ri"))
910 r = patternr[2];
911 else
03dd17b1 912 return false;
38b2d076
DD
913 return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
914 }
03dd17b1
NF
915 case CONSTRAINT_Si:
916 return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
917 case CONSTRAINT_Ss:
918 return ((RTX_IS ("mr")
919 && (IS_REG (patternr[1], SP_REGNO)))
920 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
921 case CONSTRAINT_Sf:
922 return ((RTX_IS ("mr")
923 && (IS_REG (patternr[1], FB_REGNO)))
924 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
925 case CONSTRAINT_Sb:
926 return ((RTX_IS ("mr")
927 && (IS_REG (patternr[1], SB_REGNO)))
928 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
929 case CONSTRAINT_Sp:
930 /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
931 return (RTX_IS ("mi")
932 && !(INTVAL (patternr[1]) & ~0x1fff));
933 case CONSTRAINT_S1:
934 return r1h_operand (value, QImode);
935 case CONSTRAINT_Rpa:
38b2d076 936 return GET_CODE (value) == PARALLEL;
03dd17b1
NF
937 default:
938 return false;
939 }
38b2d076
DD
940}
941
942/* STACK AND CALLING */
943
944/* Frame Layout */
945
946/* Implements RETURN_ADDR_RTX. Note that R8C and M16C push 24 bits
947 (yes, THREE bytes) onto the stack for the return address, but we
948 don't support pointers bigger than 16 bits on those chips. This
949 will likely wreak havoc with exception unwinding. FIXME. */
950rtx
951m32c_return_addr_rtx (int count)
952{
953 enum machine_mode mode;
954 int offset;
955 rtx ra_mem;
956
957 if (count)
958 return NULL_RTX;
959 /* we want 2[$fb] */
960
961 if (TARGET_A24)
962 {
80b093df
DD
963 /* It's four bytes */
964 mode = PSImode;
38b2d076
DD
965 offset = 4;
966 }
967 else
968 {
969 /* FIXME: it's really 3 bytes */
970 mode = HImode;
971 offset = 2;
972 }
973
974 ra_mem =
0a81f074
RS
975 gen_rtx_MEM (mode, plus_constant (Pmode, gen_rtx_REG (Pmode, FP_REGNO),
976 offset));
38b2d076
DD
977 return copy_to_mode_reg (mode, ra_mem);
978}
979
980/* Implements INCOMING_RETURN_ADDR_RTX. See comment above. */
981rtx
982m32c_incoming_return_addr_rtx (void)
983{
984 /* we want [sp] */
985 return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
986}
987
988/* Exception Handling Support */
989
990/* Implements EH_RETURN_DATA_REGNO. Choose registers able to hold
991 pointers. */
992int
993m32c_eh_return_data_regno (int n)
994{
995 switch (n)
996 {
997 case 0:
998 return A0_REGNO;
999 case 1:
c6004917
RIL
1000 if (TARGET_A16)
1001 return R3_REGNO;
1002 else
1003 return R1_REGNO;
38b2d076
DD
1004 default:
1005 return INVALID_REGNUM;
1006 }
1007}
1008
1009/* Implements EH_RETURN_STACKADJ_RTX. Saved and used later in
1010 m32c_emit_eh_epilogue. */
1011rtx
1012m32c_eh_return_stackadj_rtx (void)
1013{
1014 if (!cfun->machine->eh_stack_adjust)
1015 {
1016 rtx sa;
1017
99920b6f 1018 sa = gen_rtx_REG (Pmode, R0_REGNO);
38b2d076
DD
1019 cfun->machine->eh_stack_adjust = sa;
1020 }
1021 return cfun->machine->eh_stack_adjust;
1022}
1023
1024/* Registers That Address the Stack Frame */
1025
1026/* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER. Note that
1027 the original spec called for dwarf numbers to vary with register
1028 width as well, for example, r0l, r0, and r2r0 would each have
1029 different dwarf numbers. GCC doesn't support this, and we don't do
1030 it, and gdb seems to like it this way anyway. */
1031unsigned int
1032m32c_dwarf_frame_regnum (int n)
1033{
1034 switch (n)
1035 {
1036 case R0_REGNO:
1037 return 5;
1038 case R1_REGNO:
1039 return 6;
1040 case R2_REGNO:
1041 return 7;
1042 case R3_REGNO:
1043 return 8;
1044 case A0_REGNO:
1045 return 9;
1046 case A1_REGNO:
1047 return 10;
1048 case FB_REGNO:
1049 return 11;
1050 case SB_REGNO:
1051 return 19;
1052
1053 case SP_REGNO:
1054 return 12;
1055 case PC_REGNO:
1056 return 13;
1057 default:
1058 return DWARF_FRAME_REGISTERS + 1;
1059 }
1060}
1061
1062/* The frame looks like this:
1063
1064 ap -> +------------------------------
1065 | Return address (3 or 4 bytes)
1066 | Saved FB (2 or 4 bytes)
1067 fb -> +------------------------------
1068 | local vars
1069 | register saves fb
1070 | through r0 as needed
1071 sp -> +------------------------------
1072*/
1073
1074/* We use this to wrap all emitted insns in the prologue. */
1075static rtx
1076F (rtx x)
1077{
1078 RTX_FRAME_RELATED_P (x) = 1;
1079 return x;
1080}
1081
1082/* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1083 how much the stack pointer moves for each, for each cpu family. */
1084static struct
1085{
1086 int reg1;
1087 int bit;
1088 int a16_bytes;
1089 int a24_bytes;
1090} pushm_info[] =
1091{
9d746d5e
DD
1092 /* These are in reverse push (nearest-to-sp) order. */
1093 { R0_REGNO, 0x80, 2, 2 },
38b2d076 1094 { R1_REGNO, 0x40, 2, 2 },
9d746d5e
DD
1095 { R2_REGNO, 0x20, 2, 2 },
1096 { R3_REGNO, 0x10, 2, 2 },
1097 { A0_REGNO, 0x08, 2, 4 },
1098 { A1_REGNO, 0x04, 2, 4 },
1099 { SB_REGNO, 0x02, 2, 4 },
1100 { FB_REGNO, 0x01, 2, 4 }
38b2d076
DD
1101};
1102
1103#define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1104
1105/* Returns TRUE if we need to save/restore the given register. We
1106 save everything for exception handlers, so that any register can be
1107 unwound. For interrupt handlers, we save everything if the handler
1108 calls something else (because we don't know what *that* function
1109 might do), but try to be a bit smarter if the handler is a leaf
1110 function. We always save $a0, though, because we use that in the
85f65093 1111 epilogue to copy $fb to $sp. */
38b2d076
DD
1112static int
1113need_to_save (int regno)
1114{
1115 if (fixed_regs[regno])
1116 return 0;
ad516a74 1117 if (crtl->calls_eh_return)
38b2d076
DD
1118 return 1;
1119 if (regno == FP_REGNO)
1120 return 0;
1121 if (cfun->machine->is_interrupt
65655f79
DD
1122 && (!cfun->machine->is_leaf
1123 || (regno == A0_REGNO
1124 && m32c_function_needs_enter ())
1125 ))
38b2d076 1126 return 1;
6fb5fa3c 1127 if (df_regs_ever_live_p (regno)
38b2d076
DD
1128 && (!call_used_regs[regno] || cfun->machine->is_interrupt))
1129 return 1;
1130 return 0;
1131}
1132
1133/* This function contains all the intelligence about saving and
1134 restoring registers. It always figures out the register save set.
1135 When called with PP_justcount, it merely returns the size of the
1136 save set (for eliminating the frame pointer, for example). When
1137 called with PP_pushm or PP_popm, it emits the appropriate
1138 instructions for saving (pushm) or restoring (popm) the
1139 registers. */
1140static int
1141m32c_pushm_popm (Push_Pop_Type ppt)
1142{
1143 int reg_mask = 0;
1144 int byte_count = 0, bytes;
1145 int i;
1146 rtx dwarf_set[PUSHM_N];
1147 int n_dwarfs = 0;
1148 int nosave_mask = 0;
1149
305da3ec
JH
1150 if (crtl->return_rtx
1151 && GET_CODE (crtl->return_rtx) == PARALLEL
ad516a74 1152 && !(crtl->calls_eh_return || cfun->machine->is_interrupt))
38b2d076 1153 {
305da3ec 1154 rtx exp = XVECEXP (crtl->return_rtx, 0, 0);
38b2d076
DD
1155 rtx rv = XEXP (exp, 0);
1156 int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1157
1158 if (rv_bytes > 2)
1159 nosave_mask |= 0x20; /* PSI, SI */
1160 else
1161 nosave_mask |= 0xf0; /* DF */
1162 if (rv_bytes > 4)
1163 nosave_mask |= 0x50; /* DI */
1164 }
1165
1166 for (i = 0; i < (int) PUSHM_N; i++)
1167 {
1168 /* Skip if neither register needs saving. */
1169 if (!need_to_save (pushm_info[i].reg1))
1170 continue;
1171
1172 if (pushm_info[i].bit & nosave_mask)
1173 continue;
1174
1175 reg_mask |= pushm_info[i].bit;
1176 bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1177
1178 if (ppt == PP_pushm)
1179 {
1180 enum machine_mode mode = (bytes == 2) ? HImode : SImode;
1181 rtx addr;
1182
1183 /* Always use stack_pointer_rtx instead of calling
1184 rtx_gen_REG ourselves. Code elsewhere in GCC assumes
1185 that there is a single rtx representing the stack pointer,
1186 namely stack_pointer_rtx, and uses == to recognize it. */
1187 addr = stack_pointer_rtx;
1188
1189 if (byte_count != 0)
1190 addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1191
1192 dwarf_set[n_dwarfs++] =
1193 gen_rtx_SET (VOIDmode,
1194 gen_rtx_MEM (mode, addr),
1195 gen_rtx_REG (mode, pushm_info[i].reg1));
1196 F (dwarf_set[n_dwarfs - 1]);
1197
1198 }
1199 byte_count += bytes;
1200 }
1201
1202 if (cfun->machine->is_interrupt)
1203 {
1204 cfun->machine->intr_pushm = reg_mask & 0xfe;
1205 reg_mask = 0;
1206 byte_count = 0;
1207 }
1208
1209 if (cfun->machine->is_interrupt)
1210 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1211 if (need_to_save (i))
1212 {
1213 byte_count += 2;
1214 cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1215 }
1216
1217 if (ppt == PP_pushm && byte_count)
1218 {
1219 rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1220 rtx pushm;
1221
1222 if (reg_mask)
1223 {
1224 XVECEXP (note, 0, 0)
1225 = gen_rtx_SET (VOIDmode,
1226 stack_pointer_rtx,
1227 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1228 stack_pointer_rtx,
1229 GEN_INT (-byte_count)));
1230 F (XVECEXP (note, 0, 0));
1231
1232 for (i = 0; i < n_dwarfs; i++)
1233 XVECEXP (note, 0, i + 1) = dwarf_set[i];
1234
1235 pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1236
444d6efe 1237 add_reg_note (pushm, REG_FRAME_RELATED_EXPR, note);
38b2d076
DD
1238 }
1239
1240 if (cfun->machine->is_interrupt)
1241 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1242 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1243 {
1244 if (TARGET_A16)
1245 pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1246 else
1247 pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1248 F (pushm);
1249 }
1250 }
1251 if (ppt == PP_popm && byte_count)
1252 {
38b2d076
DD
1253 if (cfun->machine->is_interrupt)
1254 for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1255 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1256 {
1257 if (TARGET_A16)
b3fdec9e 1258 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
38b2d076 1259 else
b3fdec9e 1260 emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
38b2d076
DD
1261 }
1262 if (reg_mask)
1263 emit_insn (gen_popm (GEN_INT (reg_mask)));
1264 }
1265
1266 return byte_count;
1267}
1268
1269/* Implements INITIAL_ELIMINATION_OFFSET. See the comment above that
1270 diagrams our call frame. */
1271int
1272m32c_initial_elimination_offset (int from, int to)
1273{
1274 int ofs = 0;
1275
1276 if (from == AP_REGNO)
1277 {
1278 if (TARGET_A16)
1279 ofs += 5;
1280 else
1281 ofs += 8;
1282 }
1283
1284 if (to == SP_REGNO)
1285 {
1286 ofs += m32c_pushm_popm (PP_justcount);
1287 ofs += get_frame_size ();
1288 }
1289
1290 /* Account for push rounding. */
1291 if (TARGET_A24)
1292 ofs = (ofs + 1) & ~1;
1293#if DEBUG0
1294 fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1295 to, ofs);
1296#endif
1297 return ofs;
1298}
1299
1300/* Passing Function Arguments on the Stack */
1301
38b2d076
DD
1302/* Implements PUSH_ROUNDING. The R8C and M16C have byte stacks, the
1303 M32C has word stacks. */
444d6efe 1304unsigned int
38b2d076
DD
1305m32c_push_rounding (int n)
1306{
1307 if (TARGET_R8C || TARGET_M16C)
1308 return n;
1309 return (n + 1) & ~1;
1310}
1311
1312/* Passing Arguments in Registers */
1313
cd34bbe8
NF
1314/* Implements TARGET_FUNCTION_ARG. Arguments are passed partly in
1315 registers, partly on stack. If our function returns a struct, a
1316 pointer to a buffer for it is at the top of the stack (last thing
1317 pushed). The first few real arguments may be in registers as
1318 follows:
38b2d076
DD
1319
1320 R8C/M16C: arg1 in r1 if it's QI or HI (else it's pushed on stack)
1321 arg2 in r2 if it's HI (else pushed on stack)
1322 rest on stack
1323 M32C: arg1 in r0 if it's QI or HI (else it's pushed on stack)
1324 rest on stack
1325
1326 Structs are not passed in registers, even if they fit. Only
1327 integer and pointer types are passed in registers.
1328
1329 Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1330 r2 if it fits. */
cd34bbe8
NF
1331#undef TARGET_FUNCTION_ARG
1332#define TARGET_FUNCTION_ARG m32c_function_arg
1333static rtx
d5cc9181 1334m32c_function_arg (cumulative_args_t ca_v,
cd34bbe8 1335 enum machine_mode mode, const_tree type, bool named)
38b2d076 1336{
d5cc9181
JR
1337 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1338
38b2d076
DD
1339 /* Can return a reg, parallel, or 0 for stack */
1340 rtx rv = NULL_RTX;
1341#if DEBUG0
1342 fprintf (stderr, "func_arg %d (%s, %d)\n",
1343 ca->parm_num, mode_name[mode], named);
1344 debug_tree (type);
1345#endif
1346
1347 if (mode == VOIDmode)
1348 return GEN_INT (0);
1349
1350 if (ca->force_mem || !named)
1351 {
1352#if DEBUG0
1353 fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
1354 named);
1355#endif
1356 return NULL_RTX;
1357 }
1358
1359 if (type && INTEGRAL_TYPE_P (type) && POINTER_TYPE_P (type))
1360 return NULL_RTX;
1361
9d746d5e
DD
1362 if (type && AGGREGATE_TYPE_P (type))
1363 return NULL_RTX;
1364
38b2d076
DD
1365 switch (ca->parm_num)
1366 {
1367 case 1:
1368 if (GET_MODE_SIZE (mode) == 1 || GET_MODE_SIZE (mode) == 2)
1369 rv = gen_rtx_REG (mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
1370 break;
1371
1372 case 2:
1373 if (TARGET_A16 && GET_MODE_SIZE (mode) == 2)
1374 rv = gen_rtx_REG (mode, R2_REGNO);
1375 break;
1376 }
1377
1378#if DEBUG0
1379 debug_rtx (rv);
1380#endif
1381 return rv;
1382}
1383
1384#undef TARGET_PASS_BY_REFERENCE
1385#define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1386static bool
d5cc9181 1387m32c_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
38b2d076 1388 enum machine_mode mode ATTRIBUTE_UNUSED,
586de218 1389 const_tree type ATTRIBUTE_UNUSED,
38b2d076
DD
1390 bool named ATTRIBUTE_UNUSED)
1391{
1392 return 0;
1393}
1394
1395/* Implements INIT_CUMULATIVE_ARGS. */
1396void
1397m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
9d746d5e 1398 tree fntype,
38b2d076 1399 rtx libname ATTRIBUTE_UNUSED,
9d746d5e 1400 tree fndecl,
38b2d076
DD
1401 int n_named_args ATTRIBUTE_UNUSED)
1402{
9d746d5e
DD
1403 if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1404 ca->force_mem = 1;
1405 else
1406 ca->force_mem = 0;
38b2d076
DD
1407 ca->parm_num = 1;
1408}
1409
cd34bbe8
NF
1410/* Implements TARGET_FUNCTION_ARG_ADVANCE. force_mem is set for
1411 functions returning structures, so we always reset that. Otherwise,
1412 we only need to know the sequence number of the argument to know what
1413 to do with it. */
1414#undef TARGET_FUNCTION_ARG_ADVANCE
1415#define TARGET_FUNCTION_ARG_ADVANCE m32c_function_arg_advance
1416static void
d5cc9181 1417m32c_function_arg_advance (cumulative_args_t ca_v,
38b2d076 1418 enum machine_mode mode ATTRIBUTE_UNUSED,
cd34bbe8
NF
1419 const_tree type ATTRIBUTE_UNUSED,
1420 bool named ATTRIBUTE_UNUSED)
38b2d076 1421{
d5cc9181
JR
1422 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1423
38b2d076
DD
1424 if (ca->force_mem)
1425 ca->force_mem = 0;
9d746d5e
DD
1426 else
1427 ca->parm_num++;
38b2d076
DD
1428}
1429
c2ed6cf8
NF
1430/* Implements TARGET_FUNCTION_ARG_BOUNDARY. */
1431#undef TARGET_FUNCTION_ARG_BOUNDARY
1432#define TARGET_FUNCTION_ARG_BOUNDARY m32c_function_arg_boundary
1433static unsigned int
1434m32c_function_arg_boundary (enum machine_mode mode ATTRIBUTE_UNUSED,
1435 const_tree type ATTRIBUTE_UNUSED)
1436{
1437 return (TARGET_A16 ? 8 : 16);
1438}
1439
38b2d076
DD
1440/* Implements FUNCTION_ARG_REGNO_P. */
1441int
1442m32c_function_arg_regno_p (int r)
1443{
1444 if (TARGET_A24)
1445 return (r == R0_REGNO);
1446 return (r == R1_REGNO || r == R2_REGNO);
1447}
1448
e9555b13 1449/* HImode and PSImode are the two "native" modes as far as GCC is
85f65093 1450 concerned, but the chips also support a 32-bit mode which is used
e9555b13
DD
1451 for some opcodes in R8C/M16C and for reset vectors and such. */
1452#undef TARGET_VALID_POINTER_MODE
1453#define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
23fed240 1454static bool
e9555b13
DD
1455m32c_valid_pointer_mode (enum machine_mode mode)
1456{
e9555b13
DD
1457 if (mode == HImode
1458 || mode == PSImode
1459 || mode == SImode
1460 )
1461 return 1;
1462 return 0;
1463}
1464
38b2d076
DD
1465/* How Scalar Function Values Are Returned */
1466
2a31793e 1467/* Implements TARGET_LIBCALL_VALUE. Most values are returned in $r0, or some
38b2d076
DD
1468 combination of registers starting there (r2r0 for longs, r3r1r2r0
1469 for long long, r3r2r1r0 for doubles), except that that ABI
1470 currently doesn't work because it ends up using all available
1471 general registers and gcc often can't compile it. So, instead, we
1472 return anything bigger than 16 bits in "mem0" (effectively, a
1473 memory location). */
2a31793e
AS
1474
1475#undef TARGET_LIBCALL_VALUE
1476#define TARGET_LIBCALL_VALUE m32c_libcall_value
1477
1478static rtx
1479m32c_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
38b2d076
DD
1480{
1481 /* return reg or parallel */
1482#if 0
1483 /* FIXME: GCC has difficulty returning large values in registers,
1484 because that ties up most of the general registers and gives the
1485 register allocator little to work with. Until we can resolve
1486 this, large values are returned in memory. */
1487 if (mode == DFmode)
1488 {
1489 rtx rv;
1490
1491 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1492 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1493 gen_rtx_REG (HImode,
1494 R0_REGNO),
1495 GEN_INT (0));
1496 XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1497 gen_rtx_REG (HImode,
1498 R1_REGNO),
1499 GEN_INT (2));
1500 XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1501 gen_rtx_REG (HImode,
1502 R2_REGNO),
1503 GEN_INT (4));
1504 XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1505 gen_rtx_REG (HImode,
1506 R3_REGNO),
1507 GEN_INT (6));
1508 return rv;
1509 }
1510
1511 if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1512 {
1513 rtx rv;
1514
1515 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1516 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1517 gen_rtx_REG (mode,
1518 R0_REGNO),
1519 GEN_INT (0));
1520 return rv;
1521 }
1522#endif
1523
1524 if (GET_MODE_SIZE (mode) > 2)
1525 return gen_rtx_REG (mode, MEM0_REGNO);
1526 return gen_rtx_REG (mode, R0_REGNO);
1527}
1528
2a31793e 1529/* Implements TARGET_FUNCTION_VALUE. Functions and libcalls have the same
38b2d076 1530 conventions. */
2a31793e
AS
1531
1532#undef TARGET_FUNCTION_VALUE
1533#define TARGET_FUNCTION_VALUE m32c_function_value
1534
1535static rtx
1536m32c_function_value (const_tree valtype,
1537 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1538 bool outgoing ATTRIBUTE_UNUSED)
38b2d076
DD
1539{
1540 /* return reg or parallel */
586de218 1541 const enum machine_mode mode = TYPE_MODE (valtype);
2a31793e
AS
1542 return m32c_libcall_value (mode, NULL_RTX);
1543}
1544
f28f2337
AS
1545/* Implements TARGET_FUNCTION_VALUE_REGNO_P. */
1546
1547#undef TARGET_FUNCTION_VALUE_REGNO_P
1548#define TARGET_FUNCTION_VALUE_REGNO_P m32c_function_value_regno_p
2a31793e 1549
f28f2337 1550static bool
2a31793e
AS
1551m32c_function_value_regno_p (const unsigned int regno)
1552{
1553 return (regno == R0_REGNO || regno == MEM0_REGNO);
38b2d076
DD
1554}
1555
1556/* How Large Values Are Returned */
1557
1558/* We return structures by pushing the address on the stack, even if
1559 we use registers for the first few "real" arguments. */
1560#undef TARGET_STRUCT_VALUE_RTX
1561#define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1562static rtx
1563m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1564 int incoming ATTRIBUTE_UNUSED)
1565{
1566 return 0;
1567}
1568
1569/* Function Entry and Exit */
1570
1571/* Implements EPILOGUE_USES. Interrupts restore all registers. */
1572int
1573m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1574{
1575 if (cfun->machine->is_interrupt)
1576 return 1;
1577 return 0;
1578}
1579
1580/* Implementing the Varargs Macros */
1581
1582#undef TARGET_STRICT_ARGUMENT_NAMING
1583#define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1584static bool
d5cc9181 1585m32c_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
38b2d076
DD
1586{
1587 return 1;
1588}
1589
1590/* Trampolines for Nested Functions */
1591
1592/*
1593 m16c:
1594 1 0000 75C43412 mov.w #0x1234,a0
1595 2 0004 FC000000 jmp.a label
1596
1597 m32c:
1598 1 0000 BC563412 mov.l:s #0x123456,a0
1599 2 0004 CC000000 jmp.a label
1600*/
1601
1602/* Implements TRAMPOLINE_SIZE. */
1603int
1604m32c_trampoline_size (void)
1605{
1606 /* Allocate extra space so we can avoid the messy shifts when we
1607 initialize the trampoline; we just write past the end of the
1608 opcode. */
1609 return TARGET_A16 ? 8 : 10;
1610}
1611
1612/* Implements TRAMPOLINE_ALIGNMENT. */
1613int
1614m32c_trampoline_alignment (void)
1615{
1616 return 2;
1617}
1618
229fbccb
RH
1619/* Implements TARGET_TRAMPOLINE_INIT. */
1620
1621#undef TARGET_TRAMPOLINE_INIT
1622#define TARGET_TRAMPOLINE_INIT m32c_trampoline_init
1623static void
1624m32c_trampoline_init (rtx m_tramp, tree fndecl, rtx chainval)
38b2d076 1625{
229fbccb
RH
1626 rtx function = XEXP (DECL_RTL (fndecl), 0);
1627
1628#define A0(m,i) adjust_address (m_tramp, m, i)
38b2d076
DD
1629 if (TARGET_A16)
1630 {
1631 /* Note: we subtract a "word" because the moves want signed
1632 constants, not unsigned constants. */
1633 emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1634 emit_move_insn (A0 (HImode, 2), chainval);
1635 emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
85f65093
KH
1636 /* We use 16-bit addresses here, but store the zero to turn it
1637 into a 24-bit offset. */
38b2d076
DD
1638 emit_move_insn (A0 (HImode, 5), function);
1639 emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1640 }
1641 else
1642 {
1643 /* Note that the PSI moves actually write 4 bytes. Make sure we
1644 write stuff out in the right order, and leave room for the
1645 extra byte at the end. */
1646 emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1647 emit_move_insn (A0 (PSImode, 1), chainval);
1648 emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1649 emit_move_insn (A0 (PSImode, 5), function);
1650 }
1651#undef A0
1652}
1653
1654/* Addressing Modes */
1655
c6c3dba9
PB
1656/* The r8c/m32c family supports a wide range of non-orthogonal
1657 addressing modes, including the ability to double-indirect on *some*
1658 of them. Not all insns support all modes, either, but we rely on
1659 predicates and constraints to deal with that. */
1660#undef TARGET_LEGITIMATE_ADDRESS_P
1661#define TARGET_LEGITIMATE_ADDRESS_P m32c_legitimate_address_p
1662bool
1663m32c_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
38b2d076
DD
1664{
1665 int mode_adjust;
1666 if (CONSTANT_P (x))
1667 return 1;
1668
5fd5d713
DD
1669 if (TARGET_A16 && GET_MODE (x) != HImode && GET_MODE (x) != SImode)
1670 return 0;
1671 if (TARGET_A24 && GET_MODE (x) != PSImode)
1672 return 0;
1673
38b2d076
DD
1674 /* Wide references to memory will be split after reload, so we must
1675 ensure that all parts of such splits remain legitimate
1676 addresses. */
1677 mode_adjust = GET_MODE_SIZE (mode) - 1;
1678
1679 /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1680 if (GET_CODE (x) == PRE_DEC
1681 || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1682 {
1683 return (GET_CODE (XEXP (x, 0)) == REG
1684 && REGNO (XEXP (x, 0)) == SP_REGNO);
1685 }
1686
1687#if 0
1688 /* This is the double indirection detection, but it currently
1689 doesn't work as cleanly as this code implies, so until we've had
1690 a chance to debug it, leave it disabled. */
1691 if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1692 {
1693#if DEBUG_DOUBLE
1694 fprintf (stderr, "double indirect\n");
1695#endif
1696 x = XEXP (x, 0);
1697 }
1698#endif
1699
1700 encode_pattern (x);
1701 if (RTX_IS ("r"))
1702 {
1703 /* Most indexable registers can be used without displacements,
1704 although some of them will be emitted with an explicit zero
1705 to please the assembler. */
1706 switch (REGNO (patternr[0]))
1707 {
38b2d076
DD
1708 case A1_REGNO:
1709 case SB_REGNO:
1710 case FB_REGNO:
1711 case SP_REGNO:
5fd5d713
DD
1712 if (TARGET_A16 && GET_MODE (x) == SImode)
1713 return 0;
1714 case A0_REGNO:
38b2d076
DD
1715 return 1;
1716
1717 default:
1718 if (IS_PSEUDO (patternr[0], strict))
1719 return 1;
1720 return 0;
1721 }
1722 }
5fd5d713
DD
1723
1724 if (TARGET_A16 && GET_MODE (x) == SImode)
1725 return 0;
1726
38b2d076
DD
1727 if (RTX_IS ("+ri"))
1728 {
1729 /* This is more interesting, because different base registers
1730 allow for different displacements - both range and signedness
1731 - and it differs from chip series to chip series too. */
1732 int rn = REGNO (patternr[1]);
1733 HOST_WIDE_INT offs = INTVAL (patternr[2]);
1734 switch (rn)
1735 {
1736 case A0_REGNO:
1737 case A1_REGNO:
1738 case SB_REGNO:
1739 /* The syntax only allows positive offsets, but when the
1740 offsets span the entire memory range, we can simulate
1741 negative offsets by wrapping. */
1742 if (TARGET_A16)
1743 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1744 if (rn == SB_REGNO)
1745 return (offs >= 0 && offs <= 65535 - mode_adjust);
1746 /* A0 or A1 */
1747 return (offs >= -16777216 && offs <= 16777215);
1748
1749 case FB_REGNO:
1750 if (TARGET_A16)
1751 return (offs >= -128 && offs <= 127 - mode_adjust);
1752 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1753
1754 case SP_REGNO:
1755 return (offs >= -128 && offs <= 127 - mode_adjust);
1756
1757 default:
1758 if (IS_PSEUDO (patternr[1], strict))
1759 return 1;
1760 return 0;
1761 }
1762 }
1763 if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1764 {
1765 rtx reg = patternr[1];
1766
1767 /* We don't know where the symbol is, so only allow base
1768 registers which support displacements spanning the whole
1769 address range. */
1770 switch (REGNO (reg))
1771 {
1772 case A0_REGNO:
1773 case A1_REGNO:
1774 /* $sb needs a secondary reload, but since it's involved in
1775 memory address reloads too, we don't deal with it very
1776 well. */
1777 /* case SB_REGNO: */
1778 return 1;
1779 default:
1780 if (IS_PSEUDO (reg, strict))
1781 return 1;
1782 return 0;
1783 }
1784 }
1785 return 0;
1786}
1787
1788/* Implements REG_OK_FOR_BASE_P. */
1789int
1790m32c_reg_ok_for_base_p (rtx x, int strict)
1791{
1792 if (GET_CODE (x) != REG)
1793 return 0;
1794 switch (REGNO (x))
1795 {
1796 case A0_REGNO:
1797 case A1_REGNO:
1798 case SB_REGNO:
1799 case FB_REGNO:
1800 case SP_REGNO:
1801 return 1;
1802 default:
1803 if (IS_PSEUDO (x, strict))
1804 return 1;
1805 return 0;
1806 }
1807}
1808
04aff2c0 1809/* We have three choices for choosing fb->aN offsets. If we choose -128,
85f65093 1810 we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
04aff2c0
DD
1811 like this:
1812 EB 4B FF mova -128[$fb],$a0
1813 D8 0C FF FF mov.w:Q #0,-1[$a0]
1814
85f65093 1815 Alternately, we subtract the frame size, and hopefully use 8-bit aN
04aff2c0
DD
1816 displacements:
1817 7B F4 stc $fb,$a0
1818 77 54 00 01 sub #256,$a0
1819 D8 08 01 mov.w:Q #0,1[$a0]
1820
1821 If we don't offset (i.e. offset by zero), we end up with:
1822 7B F4 stc $fb,$a0
1823 D8 0C 00 FF mov.w:Q #0,-256[$a0]
1824
1825 We have to subtract *something* so that we have a PLUS rtx to mark
1826 that we've done this reload. The -128 offset will never result in
85f65093 1827 an 8-bit aN offset, and the payoff for the second case is five
04aff2c0
DD
1828 loads *if* those loads are within 256 bytes of the other end of the
1829 frame, so the third case seems best. Note that we subtract the
1830 zero, but detect that in the addhi3 pattern. */
1831
ea471af0
JM
1832#define BIG_FB_ADJ 0
1833
38b2d076
DD
1834/* Implements LEGITIMIZE_ADDRESS. The only address we really have to
1835 worry about is frame base offsets, as $fb has a limited
1836 displacement range. We deal with this by attempting to reload $fb
1837 itself into an address register; that seems to result in the best
1838 code. */
506d7b68
PB
1839#undef TARGET_LEGITIMIZE_ADDRESS
1840#define TARGET_LEGITIMIZE_ADDRESS m32c_legitimize_address
1841static rtx
1842m32c_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1843 enum machine_mode mode)
38b2d076
DD
1844{
1845#if DEBUG0
1846 fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
506d7b68 1847 debug_rtx (x);
38b2d076
DD
1848 fprintf (stderr, "\n");
1849#endif
1850
506d7b68
PB
1851 if (GET_CODE (x) == PLUS
1852 && GET_CODE (XEXP (x, 0)) == REG
1853 && REGNO (XEXP (x, 0)) == FB_REGNO
1854 && GET_CODE (XEXP (x, 1)) == CONST_INT
1855 && (INTVAL (XEXP (x, 1)) < -128
1856 || INTVAL (XEXP (x, 1)) > (128 - GET_MODE_SIZE (mode))))
38b2d076
DD
1857 {
1858 /* reload FB to A_REGS */
38b2d076 1859 rtx temp = gen_reg_rtx (Pmode);
506d7b68
PB
1860 x = copy_rtx (x);
1861 emit_insn (gen_rtx_SET (VOIDmode, temp, XEXP (x, 0)));
1862 XEXP (x, 0) = temp;
38b2d076
DD
1863 }
1864
506d7b68 1865 return x;
38b2d076
DD
1866}
1867
1868/* Implements LEGITIMIZE_RELOAD_ADDRESS. See comment above. */
1869int
1870m32c_legitimize_reload_address (rtx * x,
1871 enum machine_mode mode,
1872 int opnum,
1873 int type, int ind_levels ATTRIBUTE_UNUSED)
1874{
1875#if DEBUG0
1876 fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
1877 mode_name[mode]);
1878 debug_rtx (*x);
1879#endif
1880
1881 /* At one point, this function tried to get $fb copied to an address
1882 register, which in theory would maximize sharing, but gcc was
1883 *also* still trying to reload the whole address, and we'd run out
1884 of address registers. So we let gcc do the naive (but safe)
1885 reload instead, when the above function doesn't handle it for
04aff2c0
DD
1886 us.
1887
1888 The code below is a second attempt at the above. */
1889
1890 if (GET_CODE (*x) == PLUS
1891 && GET_CODE (XEXP (*x, 0)) == REG
1892 && REGNO (XEXP (*x, 0)) == FB_REGNO
1893 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1894 && (INTVAL (XEXP (*x, 1)) < -128
1895 || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
1896 {
1897 rtx sum;
1898 int offset = INTVAL (XEXP (*x, 1));
1899 int adjustment = -BIG_FB_ADJ;
1900
1901 sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
1902 GEN_INT (adjustment));
1903 *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
1904 if (type == RELOAD_OTHER)
1905 type = RELOAD_FOR_OTHER_ADDRESS;
1906 push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
1907 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
444d6efe 1908 (enum reload_type) type);
04aff2c0
DD
1909 return 1;
1910 }
1911
1912 if (GET_CODE (*x) == PLUS
1913 && GET_CODE (XEXP (*x, 0)) == PLUS
1914 && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
1915 && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
1916 && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
1917 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1918 )
1919 {
1920 if (type == RELOAD_OTHER)
1921 type = RELOAD_FOR_OTHER_ADDRESS;
1922 push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
1923 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
444d6efe 1924 (enum reload_type) type);
04aff2c0
DD
1925 return 1;
1926 }
38b2d076
DD
1927
1928 return 0;
1929}
1930
5fd5d713
DD
1931/* Return the appropriate mode for a named address pointer. */
1932#undef TARGET_ADDR_SPACE_POINTER_MODE
1933#define TARGET_ADDR_SPACE_POINTER_MODE m32c_addr_space_pointer_mode
1934static enum machine_mode
1935m32c_addr_space_pointer_mode (addr_space_t addrspace)
1936{
1937 switch (addrspace)
1938 {
1939 case ADDR_SPACE_GENERIC:
1940 return TARGET_A24 ? PSImode : HImode;
1941 case ADDR_SPACE_FAR:
1942 return SImode;
1943 default:
1944 gcc_unreachable ();
1945 }
1946}
1947
1948/* Return the appropriate mode for a named address address. */
1949#undef TARGET_ADDR_SPACE_ADDRESS_MODE
1950#define TARGET_ADDR_SPACE_ADDRESS_MODE m32c_addr_space_address_mode
1951static enum machine_mode
1952m32c_addr_space_address_mode (addr_space_t addrspace)
1953{
1954 switch (addrspace)
1955 {
1956 case ADDR_SPACE_GENERIC:
1957 return TARGET_A24 ? PSImode : HImode;
1958 case ADDR_SPACE_FAR:
1959 return SImode;
1960 default:
1961 gcc_unreachable ();
1962 }
1963}
1964
1965/* Like m32c_legitimate_address_p, except with named addresses. */
1966#undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
1967#define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P \
1968 m32c_addr_space_legitimate_address_p
1969static bool
1970m32c_addr_space_legitimate_address_p (enum machine_mode mode, rtx x,
1971 bool strict, addr_space_t as)
1972{
1973 if (as == ADDR_SPACE_FAR)
1974 {
1975 if (TARGET_A24)
1976 return 0;
1977 encode_pattern (x);
1978 if (RTX_IS ("r"))
1979 {
1980 if (GET_MODE (x) != SImode)
1981 return 0;
1982 switch (REGNO (patternr[0]))
1983 {
1984 case A0_REGNO:
1985 return 1;
1986
1987 default:
1988 if (IS_PSEUDO (patternr[0], strict))
1989 return 1;
1990 return 0;
1991 }
1992 }
1993 if (RTX_IS ("+^Sri"))
1994 {
1995 int rn = REGNO (patternr[3]);
1996 HOST_WIDE_INT offs = INTVAL (patternr[4]);
1997 if (GET_MODE (patternr[3]) != HImode)
1998 return 0;
1999 switch (rn)
2000 {
2001 case A0_REGNO:
2002 return (offs >= 0 && offs <= 0xfffff);
2003
2004 default:
2005 if (IS_PSEUDO (patternr[3], strict))
2006 return 1;
2007 return 0;
2008 }
2009 }
2010 if (RTX_IS ("+^Srs"))
2011 {
2012 int rn = REGNO (patternr[3]);
2013 if (GET_MODE (patternr[3]) != HImode)
2014 return 0;
2015 switch (rn)
2016 {
2017 case A0_REGNO:
2018 return 1;
2019
2020 default:
2021 if (IS_PSEUDO (patternr[3], strict))
2022 return 1;
2023 return 0;
2024 }
2025 }
2026 if (RTX_IS ("+^S+ris"))
2027 {
2028 int rn = REGNO (patternr[4]);
2029 if (GET_MODE (patternr[4]) != HImode)
2030 return 0;
2031 switch (rn)
2032 {
2033 case A0_REGNO:
2034 return 1;
2035
2036 default:
2037 if (IS_PSEUDO (patternr[4], strict))
2038 return 1;
2039 return 0;
2040 }
2041 }
2042 if (RTX_IS ("s"))
2043 {
2044 return 1;
2045 }
2046 return 0;
2047 }
2048
2049 else if (as != ADDR_SPACE_GENERIC)
2050 gcc_unreachable ();
2051
2052 return m32c_legitimate_address_p (mode, x, strict);
2053}
2054
2055/* Like m32c_legitimate_address, except with named address support. */
2056#undef TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS
2057#define TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS m32c_addr_space_legitimize_address
2058static rtx
2059m32c_addr_space_legitimize_address (rtx x, rtx oldx, enum machine_mode mode,
2060 addr_space_t as)
2061{
2062 if (as != ADDR_SPACE_GENERIC)
2063 {
2064#if DEBUG0
2065 fprintf (stderr, "\033[36mm32c_addr_space_legitimize_address for mode %s\033[0m\n", mode_name[mode]);
2066 debug_rtx (x);
2067 fprintf (stderr, "\n");
2068#endif
2069
2070 if (GET_CODE (x) != REG)
2071 {
2072 x = force_reg (SImode, x);
2073 }
2074 return x;
2075 }
2076
2077 return m32c_legitimize_address (x, oldx, mode);
2078}
2079
2080/* Determine if one named address space is a subset of another. */
2081#undef TARGET_ADDR_SPACE_SUBSET_P
2082#define TARGET_ADDR_SPACE_SUBSET_P m32c_addr_space_subset_p
2083static bool
2084m32c_addr_space_subset_p (addr_space_t subset, addr_space_t superset)
2085{
2086 gcc_assert (subset == ADDR_SPACE_GENERIC || subset == ADDR_SPACE_FAR);
2087 gcc_assert (superset == ADDR_SPACE_GENERIC || superset == ADDR_SPACE_FAR);
2088
2089 if (subset == superset)
2090 return true;
2091
2092 else
2093 return (subset == ADDR_SPACE_GENERIC && superset == ADDR_SPACE_FAR);
2094}
2095
2096#undef TARGET_ADDR_SPACE_CONVERT
2097#define TARGET_ADDR_SPACE_CONVERT m32c_addr_space_convert
2098/* Convert from one address space to another. */
2099static rtx
2100m32c_addr_space_convert (rtx op, tree from_type, tree to_type)
2101{
2102 addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (from_type));
2103 addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (to_type));
2104 rtx result;
2105
2106 gcc_assert (from_as == ADDR_SPACE_GENERIC || from_as == ADDR_SPACE_FAR);
2107 gcc_assert (to_as == ADDR_SPACE_GENERIC || to_as == ADDR_SPACE_FAR);
2108
2109 if (to_as == ADDR_SPACE_GENERIC && from_as == ADDR_SPACE_FAR)
2110 {
2111 /* This is unpredictable, as we're truncating off usable address
2112 bits. */
2113
2114 result = gen_reg_rtx (HImode);
2115 emit_move_insn (result, simplify_subreg (HImode, op, SImode, 0));
2116 return result;
2117 }
2118 else if (to_as == ADDR_SPACE_FAR && from_as == ADDR_SPACE_GENERIC)
2119 {
2120 /* This always works. */
2121 result = gen_reg_rtx (SImode);
2122 emit_insn (gen_zero_extendhisi2 (result, op));
2123 return result;
2124 }
2125 else
2126 gcc_unreachable ();
2127}
2128
38b2d076
DD
2129/* Condition Code Status */
2130
2131#undef TARGET_FIXED_CONDITION_CODE_REGS
2132#define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2133static bool
2134m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2135{
2136 *p1 = FLG_REGNO;
2137 *p2 = INVALID_REGNUM;
2138 return true;
2139}
2140
2141/* Describing Relative Costs of Operations */
2142
0e607518 2143/* Implements TARGET_REGISTER_MOVE_COST. We make impossible moves
38b2d076
DD
2144 prohibitively expensive, like trying to put QIs in r2/r3 (there are
2145 no opcodes to do that). We also discourage use of mem* registers
2146 since they're really memory. */
0e607518
AS
2147
2148#undef TARGET_REGISTER_MOVE_COST
2149#define TARGET_REGISTER_MOVE_COST m32c_register_move_cost
2150
2151static int
2152m32c_register_move_cost (enum machine_mode mode, reg_class_t from,
2153 reg_class_t to)
38b2d076
DD
2154{
2155 int cost = COSTS_N_INSNS (3);
0e607518
AS
2156 HARD_REG_SET cc;
2157
2158/* FIXME: pick real values, but not 2 for now. */
2159 COPY_HARD_REG_SET (cc, reg_class_contents[(int) from]);
2160 IOR_HARD_REG_SET (cc, reg_class_contents[(int) to]);
2161
2162 if (mode == QImode
2163 && hard_reg_set_intersect_p (cc, reg_class_contents[R23_REGS]))
38b2d076 2164 {
0e607518 2165 if (hard_reg_set_subset_p (cc, reg_class_contents[R23_REGS]))
38b2d076
DD
2166 cost = COSTS_N_INSNS (1000);
2167 else
2168 cost = COSTS_N_INSNS (80);
2169 }
2170
2171 if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2172 cost = COSTS_N_INSNS (1000);
2173
0e607518 2174 if (reg_classes_intersect_p (from, CR_REGS))
38b2d076
DD
2175 cost += COSTS_N_INSNS (5);
2176
0e607518 2177 if (reg_classes_intersect_p (to, CR_REGS))
38b2d076
DD
2178 cost += COSTS_N_INSNS (5);
2179
2180 if (from == MEM_REGS || to == MEM_REGS)
2181 cost += COSTS_N_INSNS (50);
0e607518
AS
2182 else if (reg_classes_intersect_p (from, MEM_REGS)
2183 || reg_classes_intersect_p (to, MEM_REGS))
38b2d076
DD
2184 cost += COSTS_N_INSNS (10);
2185
2186#if DEBUG0
2187 fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
0e607518
AS
2188 mode_name[mode], class_names[(int) from], class_names[(int) to],
2189 cost);
38b2d076
DD
2190#endif
2191 return cost;
2192}
2193
0e607518
AS
2194/* Implements TARGET_MEMORY_MOVE_COST. */
2195
2196#undef TARGET_MEMORY_MOVE_COST
2197#define TARGET_MEMORY_MOVE_COST m32c_memory_move_cost
2198
2199static int
38b2d076 2200m32c_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
0e607518
AS
2201 reg_class_t rclass ATTRIBUTE_UNUSED,
2202 bool in ATTRIBUTE_UNUSED)
38b2d076
DD
2203{
2204 /* FIXME: pick real values. */
2205 return COSTS_N_INSNS (10);
2206}
2207
07127a0a
DD
2208/* Here we try to describe when we use multiple opcodes for one RTX so
2209 that gcc knows when to use them. */
2210#undef TARGET_RTX_COSTS
2211#define TARGET_RTX_COSTS m32c_rtx_costs
2212static bool
68f932c4
RS
2213m32c_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2214 int *total, bool speed ATTRIBUTE_UNUSED)
07127a0a
DD
2215{
2216 switch (code)
2217 {
2218 case REG:
2219 if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2220 *total += COSTS_N_INSNS (500);
2221 else
2222 *total += COSTS_N_INSNS (1);
2223 return true;
2224
2225 case ASHIFT:
2226 case LSHIFTRT:
2227 case ASHIFTRT:
2228 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2229 {
2230 /* mov.b r1l, r1h */
2231 *total += COSTS_N_INSNS (1);
2232 return true;
2233 }
2234 if (INTVAL (XEXP (x, 1)) > 8
2235 || INTVAL (XEXP (x, 1)) < -8)
2236 {
2237 /* mov.b #N, r1l */
2238 /* mov.b r1l, r1h */
2239 *total += COSTS_N_INSNS (2);
2240 return true;
2241 }
2242 return true;
2243
2244 case LE:
2245 case LEU:
2246 case LT:
2247 case LTU:
2248 case GT:
2249 case GTU:
2250 case GE:
2251 case GEU:
2252 case NE:
2253 case EQ:
2254 if (outer_code == SET)
2255 {
2256 *total += COSTS_N_INSNS (2);
2257 return true;
2258 }
2259 break;
2260
2261 case ZERO_EXTRACT:
2262 {
2263 rtx dest = XEXP (x, 0);
2264 rtx addr = XEXP (dest, 0);
2265 switch (GET_CODE (addr))
2266 {
2267 case CONST_INT:
2268 *total += COSTS_N_INSNS (1);
2269 break;
2270 case SYMBOL_REF:
2271 *total += COSTS_N_INSNS (3);
2272 break;
2273 default:
2274 *total += COSTS_N_INSNS (2);
2275 break;
2276 }
2277 return true;
2278 }
2279 break;
2280
2281 default:
2282 /* Reasonable default. */
2283 if (TARGET_A16 && GET_MODE(x) == SImode)
2284 *total += COSTS_N_INSNS (2);
2285 break;
2286 }
2287 return false;
2288}
2289
2290#undef TARGET_ADDRESS_COST
2291#define TARGET_ADDRESS_COST m32c_address_cost
2292static int
b413068c
OE
2293m32c_address_cost (rtx addr, enum machine_mode mode ATTRIBUTE_UNUSED,
2294 addr_space_t as ATTRIBUTE_UNUSED,
2295 bool speed ATTRIBUTE_UNUSED)
07127a0a 2296{
80b093df 2297 int i;
07127a0a
DD
2298 /* fprintf(stderr, "\naddress_cost\n");
2299 debug_rtx(addr);*/
2300 switch (GET_CODE (addr))
2301 {
2302 case CONST_INT:
80b093df
DD
2303 i = INTVAL (addr);
2304 if (i == 0)
2305 return COSTS_N_INSNS(1);
2306 if (0 < i && i <= 255)
2307 return COSTS_N_INSNS(2);
2308 if (0 < i && i <= 65535)
2309 return COSTS_N_INSNS(3);
2310 return COSTS_N_INSNS(4);
07127a0a 2311 case SYMBOL_REF:
80b093df 2312 return COSTS_N_INSNS(4);
07127a0a 2313 case REG:
80b093df
DD
2314 return COSTS_N_INSNS(1);
2315 case PLUS:
2316 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
2317 {
2318 i = INTVAL (XEXP (addr, 1));
2319 if (i == 0)
2320 return COSTS_N_INSNS(1);
2321 if (0 < i && i <= 255)
2322 return COSTS_N_INSNS(2);
2323 if (0 < i && i <= 65535)
2324 return COSTS_N_INSNS(3);
2325 }
2326 return COSTS_N_INSNS(4);
07127a0a
DD
2327 default:
2328 return 0;
2329 }
2330}
2331
38b2d076
DD
2332/* Defining the Output Assembler Language */
2333
38b2d076
DD
2334/* Output of Data */
2335
2336/* We may have 24 bit sizes, which is the native address size.
2337 Currently unused, but provided for completeness. */
2338#undef TARGET_ASM_INTEGER
2339#define TARGET_ASM_INTEGER m32c_asm_integer
2340static bool
2341m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2342{
2343 switch (size)
2344 {
2345 case 3:
2346 fprintf (asm_out_file, "\t.3byte\t");
2347 output_addr_const (asm_out_file, x);
2348 fputc ('\n', asm_out_file);
2349 return true;
e9555b13
DD
2350 case 4:
2351 if (GET_CODE (x) == SYMBOL_REF)
2352 {
2353 fprintf (asm_out_file, "\t.long\t");
2354 output_addr_const (asm_out_file, x);
2355 fputc ('\n', asm_out_file);
2356 return true;
2357 }
2358 break;
38b2d076
DD
2359 }
2360 return default_assemble_integer (x, size, aligned_p);
2361}
2362
2363/* Output of Assembler Instructions */
2364
a4174ebf 2365/* We use a lookup table because the addressing modes are non-orthogonal. */
38b2d076
DD
2366
2367static struct
2368{
2369 char code;
2370 char const *pattern;
2371 char const *format;
2372}
2373const conversions[] = {
2374 { 0, "r", "0" },
2375
2376 { 0, "mr", "z[1]" },
2377 { 0, "m+ri", "3[2]" },
2378 { 0, "m+rs", "3[2]" },
5fd5d713
DD
2379 { 0, "m+^Zrs", "5[4]" },
2380 { 0, "m+^Zri", "5[4]" },
2381 { 0, "m+^Z+ris", "7+6[5]" },
2382 { 0, "m+^Srs", "5[4]" },
2383 { 0, "m+^Sri", "5[4]" },
2384 { 0, "m+^S+ris", "7+6[5]" },
38b2d076
DD
2385 { 0, "m+r+si", "4+5[2]" },
2386 { 0, "ms", "1" },
2387 { 0, "mi", "1" },
2388 { 0, "m+si", "2+3" },
2389
2390 { 0, "mmr", "[z[2]]" },
2391 { 0, "mm+ri", "[4[3]]" },
2392 { 0, "mm+rs", "[4[3]]" },
2393 { 0, "mm+r+si", "[5+6[3]]" },
2394 { 0, "mms", "[[2]]" },
2395 { 0, "mmi", "[[2]]" },
2396 { 0, "mm+si", "[4[3]]" },
2397
2398 { 0, "i", "#0" },
2399 { 0, "s", "#0" },
2400 { 0, "+si", "#1+2" },
2401 { 0, "l", "#0" },
2402
2403 { 'l', "l", "0" },
2404 { 'd', "i", "0" },
2405 { 'd', "s", "0" },
2406 { 'd', "+si", "1+2" },
2407 { 'D', "i", "0" },
2408 { 'D', "s", "0" },
2409 { 'D', "+si", "1+2" },
2410 { 'x', "i", "#0" },
2411 { 'X', "i", "#0" },
2412 { 'm', "i", "#0" },
2413 { 'b', "i", "#0" },
07127a0a 2414 { 'B', "i", "0" },
38b2d076
DD
2415 { 'p', "i", "0" },
2416
2417 { 0, 0, 0 }
2418};
2419
2420/* This is in order according to the bitfield that pushm/popm use. */
2421static char const *pushm_regs[] = {
2422 "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2423};
2424
4645179e
AS
2425/* Implements TARGET_PRINT_OPERAND. */
2426
2427#undef TARGET_PRINT_OPERAND
2428#define TARGET_PRINT_OPERAND m32c_print_operand
2429
2430static void
38b2d076
DD
2431m32c_print_operand (FILE * file, rtx x, int code)
2432{
2433 int i, j, b;
2434 const char *comma;
2435 HOST_WIDE_INT ival;
2436 int unsigned_const = 0;
ff485e71 2437 int force_sign;
38b2d076
DD
2438
2439 /* Multiplies; constants are converted to sign-extended format but
2440 we need unsigned, so 'u' and 'U' tell us what size unsigned we
2441 need. */
2442 if (code == 'u')
2443 {
2444 unsigned_const = 2;
2445 code = 0;
2446 }
2447 if (code == 'U')
2448 {
2449 unsigned_const = 1;
2450 code = 0;
2451 }
2452 /* This one is only for debugging; you can put it in a pattern to
2453 force this error. */
2454 if (code == '!')
2455 {
2456 fprintf (stderr, "dj: unreviewed pattern:");
2457 if (current_output_insn)
2458 debug_rtx (current_output_insn);
2459 gcc_unreachable ();
2460 }
2461 /* PSImode operations are either .w or .l depending on the target. */
2462 if (code == '&')
2463 {
2464 if (TARGET_A16)
2465 fprintf (file, "w");
2466 else
2467 fprintf (file, "l");
2468 return;
2469 }
2470 /* Inverted conditionals. */
2471 if (code == 'C')
2472 {
2473 switch (GET_CODE (x))
2474 {
2475 case LE:
2476 fputs ("gt", file);
2477 break;
2478 case LEU:
2479 fputs ("gtu", file);
2480 break;
2481 case LT:
2482 fputs ("ge", file);
2483 break;
2484 case LTU:
2485 fputs ("geu", file);
2486 break;
2487 case GT:
2488 fputs ("le", file);
2489 break;
2490 case GTU:
2491 fputs ("leu", file);
2492 break;
2493 case GE:
2494 fputs ("lt", file);
2495 break;
2496 case GEU:
2497 fputs ("ltu", file);
2498 break;
2499 case NE:
2500 fputs ("eq", file);
2501 break;
2502 case EQ:
2503 fputs ("ne", file);
2504 break;
2505 default:
2506 gcc_unreachable ();
2507 }
2508 return;
2509 }
2510 /* Regular conditionals. */
2511 if (code == 'c')
2512 {
2513 switch (GET_CODE (x))
2514 {
2515 case LE:
2516 fputs ("le", file);
2517 break;
2518 case LEU:
2519 fputs ("leu", file);
2520 break;
2521 case LT:
2522 fputs ("lt", file);
2523 break;
2524 case LTU:
2525 fputs ("ltu", file);
2526 break;
2527 case GT:
2528 fputs ("gt", file);
2529 break;
2530 case GTU:
2531 fputs ("gtu", file);
2532 break;
2533 case GE:
2534 fputs ("ge", file);
2535 break;
2536 case GEU:
2537 fputs ("geu", file);
2538 break;
2539 case NE:
2540 fputs ("ne", file);
2541 break;
2542 case EQ:
2543 fputs ("eq", file);
2544 break;
2545 default:
2546 gcc_unreachable ();
2547 }
2548 return;
2549 }
2550 /* Used in negsi2 to do HImode ops on the two parts of an SImode
2551 operand. */
2552 if (code == 'h' && GET_MODE (x) == SImode)
2553 {
2554 x = m32c_subreg (HImode, x, SImode, 0);
2555 code = 0;
2556 }
2557 if (code == 'H' && GET_MODE (x) == SImode)
2558 {
2559 x = m32c_subreg (HImode, x, SImode, 2);
2560 code = 0;
2561 }
07127a0a
DD
2562 if (code == 'h' && GET_MODE (x) == HImode)
2563 {
2564 x = m32c_subreg (QImode, x, HImode, 0);
2565 code = 0;
2566 }
2567 if (code == 'H' && GET_MODE (x) == HImode)
2568 {
2569 /* We can't actually represent this as an rtx. Do it here. */
2570 if (GET_CODE (x) == REG)
2571 {
2572 switch (REGNO (x))
2573 {
2574 case R0_REGNO:
2575 fputs ("r0h", file);
2576 return;
2577 case R1_REGNO:
2578 fputs ("r1h", file);
2579 return;
2580 default:
2581 gcc_unreachable();
2582 }
2583 }
2584 /* This should be a MEM. */
2585 x = m32c_subreg (QImode, x, HImode, 1);
2586 code = 0;
2587 }
2588 /* This is for BMcond, which always wants word register names. */
2589 if (code == 'h' && GET_MODE (x) == QImode)
2590 {
2591 if (GET_CODE (x) == REG)
2592 x = gen_rtx_REG (HImode, REGNO (x));
2593 code = 0;
2594 }
38b2d076
DD
2595 /* 'x' and 'X' need to be ignored for non-immediates. */
2596 if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2597 code = 0;
2598
2599 encode_pattern (x);
ff485e71 2600 force_sign = 0;
38b2d076
DD
2601 for (i = 0; conversions[i].pattern; i++)
2602 if (conversions[i].code == code
2603 && streq (conversions[i].pattern, pattern))
2604 {
2605 for (j = 0; conversions[i].format[j]; j++)
2606 /* backslash quotes the next character in the output pattern. */
2607 if (conversions[i].format[j] == '\\')
2608 {
2609 fputc (conversions[i].format[j + 1], file);
2610 j++;
2611 }
2612 /* Digits in the output pattern indicate that the
2613 corresponding RTX is to be output at that point. */
2614 else if (ISDIGIT (conversions[i].format[j]))
2615 {
2616 rtx r = patternr[conversions[i].format[j] - '0'];
2617 switch (GET_CODE (r))
2618 {
2619 case REG:
2620 fprintf (file, "%s",
2621 reg_name_with_mode (REGNO (r), GET_MODE (r)));
2622 break;
2623 case CONST_INT:
2624 switch (code)
2625 {
2626 case 'b':
07127a0a
DD
2627 case 'B':
2628 {
2629 int v = INTVAL (r);
2630 int i = (int) exact_log2 (v);
2631 if (i == -1)
2632 i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2633 if (i == -1)
2634 i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2635 /* Bit position. */
2636 fprintf (file, "%d", i);
2637 }
38b2d076
DD
2638 break;
2639 case 'x':
2640 /* Unsigned byte. */
2641 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2642 INTVAL (r) & 0xff);
2643 break;
2644 case 'X':
2645 /* Unsigned word. */
2646 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2647 INTVAL (r) & 0xffff);
2648 break;
2649 case 'p':
2650 /* pushm and popm encode a register set into a single byte. */
2651 comma = "";
2652 for (b = 7; b >= 0; b--)
2653 if (INTVAL (r) & (1 << b))
2654 {
2655 fprintf (file, "%s%s", comma, pushm_regs[b]);
2656 comma = ",";
2657 }
2658 break;
2659 case 'm':
2660 /* "Minus". Output -X */
2661 ival = (-INTVAL (r) & 0xffff);
2662 if (ival & 0x8000)
2663 ival = ival - 0x10000;
2664 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2665 break;
2666 default:
2667 ival = INTVAL (r);
2668 if (conversions[i].format[j + 1] == '[' && ival < 0)
2669 {
2670 /* We can simulate negative displacements by
2671 taking advantage of address space
2672 wrapping when the offset can span the
2673 entire address range. */
2674 rtx base =
2675 patternr[conversions[i].format[j + 2] - '0'];
2676 if (GET_CODE (base) == REG)
2677 switch (REGNO (base))
2678 {
2679 case A0_REGNO:
2680 case A1_REGNO:
2681 if (TARGET_A24)
2682 ival = 0x1000000 + ival;
2683 else
2684 ival = 0x10000 + ival;
2685 break;
2686 case SB_REGNO:
2687 if (TARGET_A16)
2688 ival = 0x10000 + ival;
2689 break;
2690 }
2691 }
2692 else if (code == 'd' && ival < 0 && j == 0)
2693 /* The "mova" opcode is used to do addition by
2694 computing displacements, but again, we need
2695 displacements to be unsigned *if* they're
2696 the only component of the displacement
2697 (i.e. no "symbol-4" type displacement). */
2698 ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2699
2700 if (conversions[i].format[j] == '0')
2701 {
2702 /* More conversions to unsigned. */
2703 if (unsigned_const == 2)
2704 ival &= 0xffff;
2705 if (unsigned_const == 1)
2706 ival &= 0xff;
2707 }
2708 if (streq (conversions[i].pattern, "mi")
2709 || streq (conversions[i].pattern, "mmi"))
2710 {
2711 /* Integers used as addresses are unsigned. */
2712 ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2713 }
ff485e71
DD
2714 if (force_sign && ival >= 0)
2715 fputc ('+', file);
38b2d076
DD
2716 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2717 break;
2718 }
2719 break;
2720 case CONST_DOUBLE:
2721 /* We don't have const_double constants. If it
2722 happens, make it obvious. */
2723 fprintf (file, "[const_double 0x%lx]",
2724 (unsigned long) CONST_DOUBLE_HIGH (r));
2725 break;
2726 case SYMBOL_REF:
2727 assemble_name (file, XSTR (r, 0));
2728 break;
2729 case LABEL_REF:
2730 output_asm_label (r);
2731 break;
2732 default:
2733 fprintf (stderr, "don't know how to print this operand:");
2734 debug_rtx (r);
2735 gcc_unreachable ();
2736 }
2737 }
2738 else
2739 {
2740 if (conversions[i].format[j] == 'z')
2741 {
2742 /* Some addressing modes *must* have a displacement,
2743 so insert a zero here if needed. */
2744 int k;
2745 for (k = j + 1; conversions[i].format[k]; k++)
2746 if (ISDIGIT (conversions[i].format[k]))
2747 {
2748 rtx reg = patternr[conversions[i].format[k] - '0'];
2749 if (GET_CODE (reg) == REG
2750 && (REGNO (reg) == SB_REGNO
2751 || REGNO (reg) == FB_REGNO
2752 || REGNO (reg) == SP_REGNO))
2753 fputc ('0', file);
2754 }
2755 continue;
2756 }
2757 /* Signed displacements off symbols need to have signs
2758 blended cleanly. */
2759 if (conversions[i].format[j] == '+'
ff485e71 2760 && (!code || code == 'D' || code == 'd')
38b2d076 2761 && ISDIGIT (conversions[i].format[j + 1])
ff485e71
DD
2762 && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2763 == CONST_INT))
2764 {
2765 force_sign = 1;
2766 continue;
2767 }
38b2d076
DD
2768 fputc (conversions[i].format[j], file);
2769 }
2770 break;
2771 }
2772 if (!conversions[i].pattern)
2773 {
2774 fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
2775 pattern);
2776 debug_rtx (x);
2777 fprintf (file, "[%c.%s]", code ? code : '-', pattern);
2778 }
2779
2780 return;
2781}
2782
4645179e
AS
2783/* Implements TARGET_PRINT_OPERAND_PUNCT_VALID_P.
2784
2785 See m32c_print_operand above for descriptions of what these do. */
2786
2787#undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
2788#define TARGET_PRINT_OPERAND_PUNCT_VALID_P m32c_print_operand_punct_valid_p
2789
2790static bool
2791m32c_print_operand_punct_valid_p (unsigned char c)
38b2d076
DD
2792{
2793 if (c == '&' || c == '!')
4645179e
AS
2794 return true;
2795
2796 return false;
38b2d076
DD
2797}
2798
4645179e
AS
2799/* Implements TARGET_PRINT_OPERAND_ADDRESS. Nothing unusual here. */
2800
2801#undef TARGET_PRINT_OPERAND_ADDRESS
2802#define TARGET_PRINT_OPERAND_ADDRESS m32c_print_operand_address
2803
2804static void
38b2d076
DD
2805m32c_print_operand_address (FILE * stream, rtx address)
2806{
235e1fe8
NC
2807 if (GET_CODE (address) == MEM)
2808 address = XEXP (address, 0);
2809 else
2810 /* cf: gcc.dg/asm-4.c. */
2811 gcc_assert (GET_CODE (address) == REG);
2812
2813 m32c_print_operand (stream, address, 0);
38b2d076
DD
2814}
2815
2816/* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
2817 differently than general registers. */
2818void
2819m32c_output_reg_push (FILE * s, int regno)
2820{
2821 if (regno == FLG_REGNO)
2822 fprintf (s, "\tpushc\tflg\n");
2823 else
04aff2c0 2824 fprintf (s, "\tpush.%c\t%s\n",
38b2d076
DD
2825 " bwll"[reg_push_size (regno)], reg_names[regno]);
2826}
2827
2828/* Likewise for ASM_OUTPUT_REG_POP. */
2829void
2830m32c_output_reg_pop (FILE * s, int regno)
2831{
2832 if (regno == FLG_REGNO)
2833 fprintf (s, "\tpopc\tflg\n");
2834 else
04aff2c0 2835 fprintf (s, "\tpop.%c\t%s\n",
38b2d076
DD
2836 " bwll"[reg_push_size (regno)], reg_names[regno]);
2837}
2838
2839/* Defining target-specific uses of `__attribute__' */
2840
2841/* Used to simplify the logic below. Find the attributes wherever
2842 they may be. */
2843#define M32C_ATTRIBUTES(decl) \
2844 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2845 : DECL_ATTRIBUTES (decl) \
2846 ? (DECL_ATTRIBUTES (decl)) \
2847 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2848
2849/* Returns TRUE if the given tree has the "interrupt" attribute. */
2850static int
2851interrupt_p (tree node ATTRIBUTE_UNUSED)
2852{
2853 tree list = M32C_ATTRIBUTES (node);
2854 while (list)
2855 {
2856 if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
2857 return 1;
2858 list = TREE_CHAIN (list);
2859 }
65655f79
DD
2860 return fast_interrupt_p (node);
2861}
2862
2863/* Returns TRUE if the given tree has the "bank_switch" attribute. */
2864static int
2865bank_switch_p (tree node ATTRIBUTE_UNUSED)
2866{
2867 tree list = M32C_ATTRIBUTES (node);
2868 while (list)
2869 {
2870 if (is_attribute_p ("bank_switch", TREE_PURPOSE (list)))
2871 return 1;
2872 list = TREE_CHAIN (list);
2873 }
2874 return 0;
2875}
2876
2877/* Returns TRUE if the given tree has the "fast_interrupt" attribute. */
2878static int
2879fast_interrupt_p (tree node ATTRIBUTE_UNUSED)
2880{
2881 tree list = M32C_ATTRIBUTES (node);
2882 while (list)
2883 {
2884 if (is_attribute_p ("fast_interrupt", TREE_PURPOSE (list)))
2885 return 1;
2886 list = TREE_CHAIN (list);
2887 }
38b2d076
DD
2888 return 0;
2889}
2890
2891static tree
2892interrupt_handler (tree * node ATTRIBUTE_UNUSED,
2893 tree name ATTRIBUTE_UNUSED,
2894 tree args ATTRIBUTE_UNUSED,
2895 int flags ATTRIBUTE_UNUSED,
2896 bool * no_add_attrs ATTRIBUTE_UNUSED)
2897{
2898 return NULL_TREE;
2899}
2900
5abd2125
JS
2901/* Returns TRUE if given tree has the "function_vector" attribute. */
2902int
2903m32c_special_page_vector_p (tree func)
2904{
653e2568
DD
2905 tree list;
2906
5abd2125
JS
2907 if (TREE_CODE (func) != FUNCTION_DECL)
2908 return 0;
2909
653e2568 2910 list = M32C_ATTRIBUTES (func);
5abd2125
JS
2911 while (list)
2912 {
2913 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2914 return 1;
2915 list = TREE_CHAIN (list);
2916 }
2917 return 0;
2918}
2919
2920static tree
2921function_vector_handler (tree * node ATTRIBUTE_UNUSED,
2922 tree name ATTRIBUTE_UNUSED,
2923 tree args ATTRIBUTE_UNUSED,
2924 int flags ATTRIBUTE_UNUSED,
2925 bool * no_add_attrs ATTRIBUTE_UNUSED)
2926{
2927 if (TARGET_R8C)
2928 {
2929 /* The attribute is not supported for R8C target. */
2930 warning (OPT_Wattributes,
29d08eba
JM
2931 "%qE attribute is not supported for R8C target",
2932 name);
5abd2125
JS
2933 *no_add_attrs = true;
2934 }
2935 else if (TREE_CODE (*node) != FUNCTION_DECL)
2936 {
2937 /* The attribute must be applied to functions only. */
2938 warning (OPT_Wattributes,
29d08eba
JM
2939 "%qE attribute applies only to functions",
2940 name);
5abd2125
JS
2941 *no_add_attrs = true;
2942 }
2943 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
2944 {
2945 /* The argument must be a constant integer. */
2946 warning (OPT_Wattributes,
29d08eba
JM
2947 "%qE attribute argument not an integer constant",
2948 name);
5abd2125
JS
2949 *no_add_attrs = true;
2950 }
2951 else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
2952 || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
2953 {
2954 /* The argument value must be between 18 to 255. */
2955 warning (OPT_Wattributes,
29d08eba
JM
2956 "%qE attribute argument should be between 18 to 255",
2957 name);
5abd2125
JS
2958 *no_add_attrs = true;
2959 }
2960 return NULL_TREE;
2961}
2962
2963/* If the function is assigned the attribute 'function_vector', it
2964 returns the function vector number, otherwise returns zero. */
2965int
2966current_function_special_page_vector (rtx x)
2967{
2968 int num;
2969
2970 if ((GET_CODE(x) == SYMBOL_REF)
2971 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
2972 {
653e2568 2973 tree list;
5abd2125
JS
2974 tree t = SYMBOL_REF_DECL (x);
2975
2976 if (TREE_CODE (t) != FUNCTION_DECL)
2977 return 0;
2978
653e2568 2979 list = M32C_ATTRIBUTES (t);
5abd2125
JS
2980 while (list)
2981 {
2982 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2983 {
2984 num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
2985 return num;
2986 }
2987
2988 list = TREE_CHAIN (list);
2989 }
2990
2991 return 0;
2992 }
2993 else
2994 return 0;
2995}
2996
38b2d076
DD
2997#undef TARGET_ATTRIBUTE_TABLE
2998#define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
2999static const struct attribute_spec m32c_attribute_table[] = {
62d784f7
KT
3000 {"interrupt", 0, 0, false, false, false, interrupt_handler, false},
3001 {"bank_switch", 0, 0, false, false, false, interrupt_handler, false},
3002 {"fast_interrupt", 0, 0, false, false, false, interrupt_handler, false},
3003 {"function_vector", 1, 1, true, false, false, function_vector_handler,
3004 false},
3005 {0, 0, 0, 0, 0, 0, 0, false}
38b2d076
DD
3006};
3007
3008#undef TARGET_COMP_TYPE_ATTRIBUTES
3009#define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
3010static int
3101faab
KG
3011m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED,
3012 const_tree type2 ATTRIBUTE_UNUSED)
38b2d076
DD
3013{
3014 /* 0=incompatible 1=compatible 2=warning */
3015 return 1;
3016}
3017
3018#undef TARGET_INSERT_ATTRIBUTES
3019#define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
3020static void
3021m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
3022 tree * attr_ptr ATTRIBUTE_UNUSED)
3023{
f6052f86
DD
3024 unsigned addr;
3025 /* See if we need to make #pragma address variables volatile. */
3026
3027 if (TREE_CODE (node) == VAR_DECL)
3028 {
444d6efe 3029 const char *name = IDENTIFIER_POINTER (DECL_NAME (node));
f6052f86
DD
3030 if (m32c_get_pragma_address (name, &addr))
3031 {
3032 TREE_THIS_VOLATILE (node) = true;
3033 }
3034 }
3035}
3036
3037
3038struct GTY(()) pragma_entry {
3039 const char *varname;
3040 unsigned address;
3041};
3042typedef struct pragma_entry pragma_entry;
3043
3044/* Hash table of pragma info. */
3045static GTY((param_is (pragma_entry))) htab_t pragma_htab;
3046
3047static int
3048pragma_entry_eq (const void *p1, const void *p2)
3049{
3050 const pragma_entry *old = (const pragma_entry *) p1;
3051 const char *new_name = (const char *) p2;
3052
3053 return strcmp (old->varname, new_name) == 0;
3054}
3055
3056static hashval_t
3057pragma_entry_hash (const void *p)
3058{
3059 const pragma_entry *old = (const pragma_entry *) p;
3060 return htab_hash_string (old->varname);
3061}
3062
3063void
3064m32c_note_pragma_address (const char *varname, unsigned address)
3065{
3066 pragma_entry **slot;
3067
3068 if (!pragma_htab)
3069 pragma_htab = htab_create_ggc (31, pragma_entry_hash,
3070 pragma_entry_eq, NULL);
3071
3072 slot = (pragma_entry **)
3073 htab_find_slot_with_hash (pragma_htab, varname,
3074 htab_hash_string (varname), INSERT);
3075
3076 if (!*slot)
3077 {
766090c2 3078 *slot = ggc_alloc<pragma_entry> ();
f6052f86
DD
3079 (*slot)->varname = ggc_strdup (varname);
3080 }
3081 (*slot)->address = address;
3082}
3083
3084static bool
3085m32c_get_pragma_address (const char *varname, unsigned *address)
3086{
3087 pragma_entry **slot;
3088
3089 if (!pragma_htab)
3090 return false;
3091
3092 slot = (pragma_entry **)
3093 htab_find_slot_with_hash (pragma_htab, varname,
3094 htab_hash_string (varname), NO_INSERT);
3095 if (slot && *slot)
3096 {
3097 *address = (*slot)->address;
3098 return true;
3099 }
3100 return false;
3101}
3102
3103void
444d6efe
JR
3104m32c_output_aligned_common (FILE *stream, tree decl ATTRIBUTE_UNUSED,
3105 const char *name,
f6052f86
DD
3106 int size, int align, int global)
3107{
3108 unsigned address;
3109
3110 if (m32c_get_pragma_address (name, &address))
3111 {
3112 /* We never output these as global. */
3113 assemble_name (stream, name);
3114 fprintf (stream, " = 0x%04x\n", address);
3115 return;
3116 }
3117 if (!global)
3118 {
3119 fprintf (stream, "\t.local\t");
3120 assemble_name (stream, name);
3121 fprintf (stream, "\n");
3122 }
3123 fprintf (stream, "\t.comm\t");
3124 assemble_name (stream, name);
3125 fprintf (stream, ",%u,%u\n", size, align / BITS_PER_UNIT);
38b2d076
DD
3126}
3127
3128/* Predicates */
3129
f9b89438 3130/* This is a list of legal subregs of hard regs. */
67fc44cb
DD
3131static const struct {
3132 unsigned char outer_mode_size;
3133 unsigned char inner_mode_size;
3134 unsigned char byte_mask;
3135 unsigned char legal_when;
f9b89438 3136 unsigned int regno;
f9b89438 3137} legal_subregs[] = {
67fc44cb
DD
3138 {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
3139 {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
3140 {1, 2, 0x01, 1, A0_REGNO},
3141 {1, 2, 0x01, 1, A1_REGNO},
f9b89438 3142
67fc44cb
DD
3143 {1, 4, 0x01, 1, A0_REGNO},
3144 {1, 4, 0x01, 1, A1_REGNO},
f9b89438 3145
67fc44cb
DD
3146 {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
3147 {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
3148 {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
3149 {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
3150 {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
f9b89438 3151
67fc44cb 3152 {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
f9b89438
DD
3153};
3154
3155/* Returns TRUE if OP is a subreg of a hard reg which we don't
f6052f86 3156 support. We also bail on MEMs with illegal addresses. */
f9b89438
DD
3157bool
3158m32c_illegal_subreg_p (rtx op)
3159{
f9b89438
DD
3160 int offset;
3161 unsigned int i;
f8d91e80 3162 enum machine_mode src_mode, dest_mode;
f9b89438 3163
f6052f86
DD
3164 if (GET_CODE (op) == MEM
3165 && ! m32c_legitimate_address_p (Pmode, XEXP (op, 0), false))
3166 {
3167 return true;
3168 }
3169
f9b89438
DD
3170 if (GET_CODE (op) != SUBREG)
3171 return false;
3172
3173 dest_mode = GET_MODE (op);
3174 offset = SUBREG_BYTE (op);
3175 op = SUBREG_REG (op);
3176 src_mode = GET_MODE (op);
3177
3178 if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
3179 return false;
3180 if (GET_CODE (op) != REG)
3181 return false;
3182 if (REGNO (op) >= MEM0_REGNO)
3183 return false;
3184
3185 offset = (1 << offset);
3186
67fc44cb 3187 for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
f9b89438
DD
3188 if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
3189 && legal_subregs[i].regno == REGNO (op)
3190 && legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
3191 && legal_subregs[i].byte_mask & offset)
3192 {
3193 switch (legal_subregs[i].legal_when)
3194 {
3195 case 1:
3196 return false;
3197 case 16:
3198 if (TARGET_A16)
3199 return false;
3200 break;
3201 case 24:
3202 if (TARGET_A24)
3203 return false;
3204 break;
3205 }
3206 }
3207 return true;
3208}
3209
38b2d076
DD
3210/* Returns TRUE if we support a move between the first two operands.
3211 At the moment, we just want to discourage mem to mem moves until
3212 after reload, because reload has a hard time with our limited
3213 number of address registers, and we can get into a situation where
3214 we need three of them when we only have two. */
3215bool
3216m32c_mov_ok (rtx * operands, enum machine_mode mode ATTRIBUTE_UNUSED)
3217{
3218 rtx op0 = operands[0];
3219 rtx op1 = operands[1];
3220
3221 if (TARGET_A24)
3222 return true;
3223
3224#define DEBUG_MOV_OK 0
3225#if DEBUG_MOV_OK
3226 fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
3227 debug_rtx (op0);
3228 debug_rtx (op1);
3229#endif
3230
3231 if (GET_CODE (op0) == SUBREG)
3232 op0 = XEXP (op0, 0);
3233 if (GET_CODE (op1) == SUBREG)
3234 op1 = XEXP (op1, 0);
3235
3236 if (GET_CODE (op0) == MEM
3237 && GET_CODE (op1) == MEM
3238 && ! reload_completed)
3239 {
3240#if DEBUG_MOV_OK
3241 fprintf (stderr, " - no, mem to mem\n");
3242#endif
3243 return false;
3244 }
3245
3246#if DEBUG_MOV_OK
3247 fprintf (stderr, " - ok\n");
3248#endif
3249 return true;
3250}
3251
ff485e71
DD
3252/* Returns TRUE if two consecutive HImode mov instructions, generated
3253 for moving an immediate double data to a double data type variable
3254 location, can be combined into single SImode mov instruction. */
3255bool
55356334 3256m32c_immd_dbl_mov (rtx * operands ATTRIBUTE_UNUSED,
ff485e71
DD
3257 enum machine_mode mode ATTRIBUTE_UNUSED)
3258{
55356334
RS
3259 /* ??? This relied on the now-defunct MEM_SCALAR and MEM_IN_STRUCT_P
3260 flags. */
ff485e71
DD
3261 return false;
3262}
3263
38b2d076
DD
3264/* Expanders */
3265
3266/* Subregs are non-orthogonal for us, because our registers are all
3267 different sizes. */
3268static rtx
3269m32c_subreg (enum machine_mode outer,
3270 rtx x, enum machine_mode inner, int byte)
3271{
3272 int r, nr = -1;
3273
3274 /* Converting MEMs to different types that are the same size, we
3275 just rewrite them. */
3276 if (GET_CODE (x) == SUBREG
3277 && SUBREG_BYTE (x) == 0
3278 && GET_CODE (SUBREG_REG (x)) == MEM
3279 && (GET_MODE_SIZE (GET_MODE (x))
3280 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
3281 {
3282 rtx oldx = x;
3283 x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
3284 MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
3285 }
3286
3287 /* Push/pop get done as smaller push/pops. */
3288 if (GET_CODE (x) == MEM
3289 && (GET_CODE (XEXP (x, 0)) == PRE_DEC
3290 || GET_CODE (XEXP (x, 0)) == POST_INC))
3291 return gen_rtx_MEM (outer, XEXP (x, 0));
3292 if (GET_CODE (x) == SUBREG
3293 && GET_CODE (XEXP (x, 0)) == MEM
3294 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
3295 || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
3296 return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
3297
3298 if (GET_CODE (x) != REG)
146456c1
DD
3299 {
3300 rtx r = simplify_gen_subreg (outer, x, inner, byte);
3301 if (GET_CODE (r) == SUBREG
3302 && GET_CODE (x) == MEM
3303 && MEM_VOLATILE_P (x))
3304 {
3305 /* Volatile MEMs don't get simplified, but we need them to
3306 be. We are little endian, so the subreg byte is the
3307 offset. */
91140cd3 3308 r = adjust_address_nv (x, outer, byte);
146456c1
DD
3309 }
3310 return r;
3311 }
38b2d076
DD
3312
3313 r = REGNO (x);
3314 if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
3315 return simplify_gen_subreg (outer, x, inner, byte);
3316
3317 if (IS_MEM_REGNO (r))
3318 return simplify_gen_subreg (outer, x, inner, byte);
3319
3320 /* This is where the complexities of our register layout are
3321 described. */
3322 if (byte == 0)
3323 nr = r;
3324 else if (outer == HImode)
3325 {
3326 if (r == R0_REGNO && byte == 2)
3327 nr = R2_REGNO;
3328 else if (r == R0_REGNO && byte == 4)
3329 nr = R1_REGNO;
3330 else if (r == R0_REGNO && byte == 6)
3331 nr = R3_REGNO;
3332 else if (r == R1_REGNO && byte == 2)
3333 nr = R3_REGNO;
3334 else if (r == A0_REGNO && byte == 2)
3335 nr = A1_REGNO;
3336 }
3337 else if (outer == SImode)
3338 {
3339 if (r == R0_REGNO && byte == 0)
3340 nr = R0_REGNO;
3341 else if (r == R0_REGNO && byte == 4)
3342 nr = R1_REGNO;
3343 }
3344 if (nr == -1)
3345 {
3346 fprintf (stderr, "m32c_subreg %s %s %d\n",
3347 mode_name[outer], mode_name[inner], byte);
3348 debug_rtx (x);
3349 gcc_unreachable ();
3350 }
3351 return gen_rtx_REG (outer, nr);
3352}
3353
3354/* Used to emit move instructions. We split some moves,
3355 and avoid mem-mem moves. */
3356int
3357m32c_prepare_move (rtx * operands, enum machine_mode mode)
3358{
5fd5d713
DD
3359 if (far_addr_space_p (operands[0])
3360 && CONSTANT_P (operands[1]))
3361 {
3362 operands[1] = force_reg (GET_MODE (operands[0]), operands[1]);
3363 }
38b2d076
DD
3364 if (TARGET_A16 && mode == PSImode)
3365 return m32c_split_move (operands, mode, 1);
3366 if ((GET_CODE (operands[0]) == MEM)
3367 && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
3368 {
3369 rtx pmv = XEXP (operands[0], 0);
3370 rtx dest_reg = XEXP (pmv, 0);
3371 rtx dest_mod = XEXP (pmv, 1);
3372
3373 emit_insn (gen_rtx_SET (Pmode, dest_reg, dest_mod));
3374 operands[0] = gen_rtx_MEM (mode, dest_reg);
3375 }
b3a13419 3376 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
38b2d076
DD
3377 operands[1] = copy_to_mode_reg (mode, operands[1]);
3378 return 0;
3379}
3380
3381#define DEBUG_SPLIT 0
3382
3383/* Returns TRUE if the given PSImode move should be split. We split
3384 for all r8c/m16c moves, since it doesn't support them, and for
3385 POP.L as we can only *push* SImode. */
3386int
3387m32c_split_psi_p (rtx * operands)
3388{
3389#if DEBUG_SPLIT
3390 fprintf (stderr, "\nm32c_split_psi_p\n");
3391 debug_rtx (operands[0]);
3392 debug_rtx (operands[1]);
3393#endif
3394 if (TARGET_A16)
3395 {
3396#if DEBUG_SPLIT
3397 fprintf (stderr, "yes, A16\n");
3398#endif
3399 return 1;
3400 }
3401 if (GET_CODE (operands[1]) == MEM
3402 && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3403 {
3404#if DEBUG_SPLIT
3405 fprintf (stderr, "yes, pop.l\n");
3406#endif
3407 return 1;
3408 }
3409#if DEBUG_SPLIT
3410 fprintf (stderr, "no, default\n");
3411#endif
3412 return 0;
3413}
3414
3415/* Split the given move. SPLIT_ALL is 0 if splitting is optional
3416 (define_expand), 1 if it is not optional (define_insn_and_split),
3417 and 3 for define_split (alternate api). */
3418int
3419m32c_split_move (rtx * operands, enum machine_mode mode, int split_all)
3420{
3421 rtx s[4], d[4];
3422 int parts, si, di, rev = 0;
3423 int rv = 0, opi = 2;
3424 enum machine_mode submode = HImode;
3425 rtx *ops, local_ops[10];
3426
3427 /* define_split modifies the existing operands, but the other two
3428 emit new insns. OPS is where we store the operand pairs, which
3429 we emit later. */
3430 if (split_all == 3)
3431 ops = operands;
3432 else
3433 ops = local_ops;
3434
3435 /* Else HImode. */
3436 if (mode == DImode)
3437 submode = SImode;
3438
3439 /* Before splitting mem-mem moves, force one operand into a
3440 register. */
b3a13419 3441 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
38b2d076
DD
3442 {
3443#if DEBUG0
3444 fprintf (stderr, "force_reg...\n");
3445 debug_rtx (operands[1]);
3446#endif
3447 operands[1] = force_reg (mode, operands[1]);
3448#if DEBUG0
3449 debug_rtx (operands[1]);
3450#endif
3451 }
3452
3453 parts = 2;
3454
3455#if DEBUG_SPLIT
b3a13419
ILT
3456 fprintf (stderr, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3457 split_all);
38b2d076
DD
3458 debug_rtx (operands[0]);
3459 debug_rtx (operands[1]);
3460#endif
3461
eb5f0c07
DD
3462 /* Note that split_all is not used to select the api after this
3463 point, so it's safe to set it to 3 even with define_insn. */
3464 /* None of the chips can move SI operands to sp-relative addresses,
3465 so we always split those. */
03dd17b1 3466 if (satisfies_constraint_Ss (operands[0]))
eb5f0c07
DD
3467 split_all = 3;
3468
5fd5d713
DD
3469 if (TARGET_A16
3470 && (far_addr_space_p (operands[0])
3471 || far_addr_space_p (operands[1])))
3472 split_all |= 1;
3473
38b2d076
DD
3474 /* We don't need to split these. */
3475 if (TARGET_A24
3476 && split_all != 3
3477 && (mode == SImode || mode == PSImode)
3478 && !(GET_CODE (operands[1]) == MEM
3479 && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3480 return 0;
3481
3482 /* First, enumerate the subregs we'll be dealing with. */
3483 for (si = 0; si < parts; si++)
3484 {
3485 d[si] =
3486 m32c_subreg (submode, operands[0], mode,
3487 si * GET_MODE_SIZE (submode));
3488 s[si] =
3489 m32c_subreg (submode, operands[1], mode,
3490 si * GET_MODE_SIZE (submode));
3491 }
3492
3493 /* Split pushes by emitting a sequence of smaller pushes. */
3494 if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3495 {
3496 for (si = parts - 1; si >= 0; si--)
3497 {
3498 ops[opi++] = gen_rtx_MEM (submode,
3499 gen_rtx_PRE_DEC (Pmode,
3500 gen_rtx_REG (Pmode,
3501 SP_REGNO)));
3502 ops[opi++] = s[si];
3503 }
3504
3505 rv = 1;
3506 }
3507 /* Likewise for pops. */
3508 else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3509 {
3510 for (di = 0; di < parts; di++)
3511 {
3512 ops[opi++] = d[di];
3513 ops[opi++] = gen_rtx_MEM (submode,
3514 gen_rtx_POST_INC (Pmode,
3515 gen_rtx_REG (Pmode,
3516 SP_REGNO)));
3517 }
3518 rv = 1;
3519 }
3520 else if (split_all)
3521 {
3522 /* if d[di] == s[si] for any di < si, we'll early clobber. */
3523 for (di = 0; di < parts - 1; di++)
3524 for (si = di + 1; si < parts; si++)
3525 if (reg_mentioned_p (d[di], s[si]))
3526 rev = 1;
3527
3528 if (rev)
3529 for (si = 0; si < parts; si++)
3530 {
3531 ops[opi++] = d[si];
3532 ops[opi++] = s[si];
3533 }
3534 else
3535 for (si = parts - 1; si >= 0; si--)
3536 {
3537 ops[opi++] = d[si];
3538 ops[opi++] = s[si];
3539 }
3540 rv = 1;
3541 }
3542 /* Now emit any moves we may have accumulated. */
3543 if (rv && split_all != 3)
3544 {
3545 int i;
3546 for (i = 2; i < opi; i += 2)
3547 emit_move_insn (ops[i], ops[i + 1]);
3548 }
3549 return rv;
3550}
3551
07127a0a
DD
3552/* The m32c has a number of opcodes that act like memcpy, strcmp, and
3553 the like. For the R8C they expect one of the addresses to be in
3554 R1L:An so we need to arrange for that. Otherwise, it's just a
3555 matter of picking out the operands we want and emitting the right
3556 pattern for them. All these expanders, which correspond to
3557 patterns in blkmov.md, must return nonzero if they expand the insn,
3558 or zero if they should FAIL. */
3559
3560/* This is a memset() opcode. All operands are implied, so we need to
3561 arrange for them to be in the right registers. The opcode wants
3562 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3563 the count (HI), and $2 the value (QI). */
3564int
3565m32c_expand_setmemhi(rtx *operands)
3566{
3567 rtx desta, count, val;
3568 rtx desto, counto;
3569
3570 desta = XEXP (operands[0], 0);
3571 count = operands[1];
3572 val = operands[2];
3573
3574 desto = gen_reg_rtx (Pmode);
3575 counto = gen_reg_rtx (HImode);
3576
3577 if (GET_CODE (desta) != REG
3578 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3579 desta = copy_to_mode_reg (Pmode, desta);
3580
3581 /* This looks like an arbitrary restriction, but this is by far the
3582 most common case. For counts 8..14 this actually results in
3583 smaller code with no speed penalty because the half-sized
3584 constant can be loaded with a shorter opcode. */
3585 if (GET_CODE (count) == CONST_INT
3586 && GET_CODE (val) == CONST_INT
3587 && ! (INTVAL (count) & 1)
3588 && (INTVAL (count) > 1)
3589 && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3590 {
3591 unsigned v = INTVAL (val) & 0xff;
3592 v = v | (v << 8);
3593 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3594 val = copy_to_mode_reg (HImode, GEN_INT (v));
3595 if (TARGET_A16)
3596 emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3597 else
3598 emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3599 return 1;
3600 }
3601
3602 /* This is the generalized memset() case. */
3603 if (GET_CODE (val) != REG
3604 || REGNO (val) < FIRST_PSEUDO_REGISTER)
3605 val = copy_to_mode_reg (QImode, val);
3606
3607 if (GET_CODE (count) != REG
3608 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3609 count = copy_to_mode_reg (HImode, count);
3610
3611 if (TARGET_A16)
3612 emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3613 else
3614 emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3615
3616 return 1;
3617}
3618
3619/* This is a memcpy() opcode. All operands are implied, so we need to
3620 arrange for them to be in the right registers. The opcode wants
3621 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3622 is the source (MEM:BLK), and $2 the count (HI). */
3623int
3624m32c_expand_movmemhi(rtx *operands)
3625{
3626 rtx desta, srca, count;
3627 rtx desto, srco, counto;
3628
3629 desta = XEXP (operands[0], 0);
3630 srca = XEXP (operands[1], 0);
3631 count = operands[2];
3632
3633 desto = gen_reg_rtx (Pmode);
3634 srco = gen_reg_rtx (Pmode);
3635 counto = gen_reg_rtx (HImode);
3636
3637 if (GET_CODE (desta) != REG
3638 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3639 desta = copy_to_mode_reg (Pmode, desta);
3640
3641 if (GET_CODE (srca) != REG
3642 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3643 srca = copy_to_mode_reg (Pmode, srca);
3644
3645 /* Similar to setmem, but we don't need to check the value. */
3646 if (GET_CODE (count) == CONST_INT
3647 && ! (INTVAL (count) & 1)
3648 && (INTVAL (count) > 1))
3649 {
3650 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3651 if (TARGET_A16)
3652 emit_insn (gen_movmemhi_whi_op (desto, srco, counto, desta, srca, count));
3653 else
3654 emit_insn (gen_movmemhi_wpsi_op (desto, srco, counto, desta, srca, count));
3655 return 1;
3656 }
3657
3658 /* This is the generalized memset() case. */
3659 if (GET_CODE (count) != REG
3660 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3661 count = copy_to_mode_reg (HImode, count);
3662
3663 if (TARGET_A16)
3664 emit_insn (gen_movmemhi_bhi_op (desto, srco, counto, desta, srca, count));
3665 else
3666 emit_insn (gen_movmemhi_bpsi_op (desto, srco, counto, desta, srca, count));
3667
3668 return 1;
3669}
3670
3671/* This is a stpcpy() opcode. $0 is the destination (MEM:BLK) after
3672 the copy, which should point to the NUL at the end of the string,
3673 $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3674 Since our opcode leaves the destination pointing *after* the NUL,
3675 we must emit an adjustment. */
3676int
3677m32c_expand_movstr(rtx *operands)
3678{
3679 rtx desta, srca;
3680 rtx desto, srco;
3681
3682 desta = XEXP (operands[1], 0);
3683 srca = XEXP (operands[2], 0);
3684
3685 desto = gen_reg_rtx (Pmode);
3686 srco = gen_reg_rtx (Pmode);
3687
3688 if (GET_CODE (desta) != REG
3689 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3690 desta = copy_to_mode_reg (Pmode, desta);
3691
3692 if (GET_CODE (srca) != REG
3693 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3694 srca = copy_to_mode_reg (Pmode, srca);
3695
3696 emit_insn (gen_movstr_op (desto, srco, desta, srca));
3697 /* desto ends up being a1, which allows this type of add through MOVA. */
3698 emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
3699
3700 return 1;
3701}
3702
3703/* This is a strcmp() opcode. $0 is the destination (HI) which holds
3704 <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3705 $2 is the other (MEM:BLK). We must do the comparison, and then
3706 convert the flags to a signed integer result. */
3707int
3708m32c_expand_cmpstr(rtx *operands)
3709{
3710 rtx src1a, src2a;
3711
3712 src1a = XEXP (operands[1], 0);
3713 src2a = XEXP (operands[2], 0);
3714
3715 if (GET_CODE (src1a) != REG
3716 || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
3717 src1a = copy_to_mode_reg (Pmode, src1a);
3718
3719 if (GET_CODE (src2a) != REG
3720 || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
3721 src2a = copy_to_mode_reg (Pmode, src2a);
3722
3723 emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
3724 emit_insn (gen_cond_to_int (operands[0]));
3725
3726 return 1;
3727}
3728
3729
23fed240
DD
3730typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
3731
3732static shift_gen_func
3733shift_gen_func_for (int mode, int code)
3734{
3735#define GFF(m,c,f) if (mode == m && code == c) return f
3736 GFF(QImode, ASHIFT, gen_ashlqi3_i);
3737 GFF(QImode, ASHIFTRT, gen_ashrqi3_i);
3738 GFF(QImode, LSHIFTRT, gen_lshrqi3_i);
3739 GFF(HImode, ASHIFT, gen_ashlhi3_i);
3740 GFF(HImode, ASHIFTRT, gen_ashrhi3_i);
3741 GFF(HImode, LSHIFTRT, gen_lshrhi3_i);
3742 GFF(PSImode, ASHIFT, gen_ashlpsi3_i);
3743 GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
3744 GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
3745 GFF(SImode, ASHIFT, TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
3746 GFF(SImode, ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
3747 GFF(SImode, LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
3748#undef GFF
07127a0a 3749 gcc_unreachable ();
23fed240
DD
3750}
3751
38b2d076
DD
3752/* The m32c only has one shift, but it takes a signed count. GCC
3753 doesn't want this, so we fake it by negating any shift count when
07127a0a
DD
3754 we're pretending to shift the other way. Also, the shift count is
3755 limited to -8..8. It's slightly better to use two shifts for 9..15
3756 than to load the count into r1h, so we do that too. */
38b2d076 3757int
23fed240 3758m32c_prepare_shift (rtx * operands, int scale, int shift_code)
38b2d076 3759{
23fed240
DD
3760 enum machine_mode mode = GET_MODE (operands[0]);
3761 shift_gen_func func = shift_gen_func_for (mode, shift_code);
38b2d076 3762 rtx temp;
23fed240
DD
3763
3764 if (GET_CODE (operands[2]) == CONST_INT)
38b2d076 3765 {
23fed240
DD
3766 int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
3767 int count = INTVAL (operands[2]) * scale;
3768
3769 while (count > maxc)
3770 {
3771 temp = gen_reg_rtx (mode);
3772 emit_insn (func (temp, operands[1], GEN_INT (maxc)));
3773 operands[1] = temp;
3774 count -= maxc;
3775 }
3776 while (count < -maxc)
3777 {
3778 temp = gen_reg_rtx (mode);
3779 emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
3780 operands[1] = temp;
3781 count += maxc;
3782 }
3783 emit_insn (func (operands[0], operands[1], GEN_INT (count)));
3784 return 1;
38b2d076 3785 }
2e160056
DD
3786
3787 temp = gen_reg_rtx (QImode);
38b2d076 3788 if (scale < 0)
2e160056
DD
3789 /* The pattern has a NEG that corresponds to this. */
3790 emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
3791 else if (TARGET_A16 && mode == SImode)
3792 /* We do this because the code below may modify this, we don't
3793 want to modify the origin of this value. */
3794 emit_move_insn (temp, operands[2]);
38b2d076 3795 else
2e160056 3796 /* We'll only use it for the shift, no point emitting a move. */
38b2d076 3797 temp = operands[2];
2e160056 3798
16659fcf 3799 if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
2e160056
DD
3800 {
3801 /* The m16c has a limit of -16..16 for SI shifts, even when the
3802 shift count is in a register. Since there are so many targets
3803 of these shifts, it's better to expand the RTL here than to
3804 call a helper function.
3805
3806 The resulting code looks something like this:
3807
3808 cmp.b r1h,-16
3809 jge.b 1f
3810 shl.l -16,dest
3811 add.b r1h,16
3812 1f: cmp.b r1h,16
3813 jle.b 1f
3814 shl.l 16,dest
3815 sub.b r1h,16
3816 1f: shl.l r1h,dest
3817
3818 We take advantage of the fact that "negative" shifts are
3819 undefined to skip one of the comparisons. */
3820
3821 rtx count;
444d6efe 3822 rtx label, insn, tempvar;
2e160056 3823
16659fcf
DD
3824 emit_move_insn (operands[0], operands[1]);
3825
2e160056
DD
3826 count = temp;
3827 label = gen_label_rtx ();
2e160056
DD
3828 LABEL_NUSES (label) ++;
3829
833bf445
DD
3830 tempvar = gen_reg_rtx (mode);
3831
2e160056
DD
3832 if (shift_code == ASHIFT)
3833 {
3834 /* This is a left shift. We only need check positive counts. */
3835 emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
3836 count, GEN_INT (16), label));
833bf445
DD
3837 emit_insn (func (tempvar, operands[0], GEN_INT (8)));
3838 emit_insn (func (operands[0], tempvar, GEN_INT (8)));
2e160056
DD
3839 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
3840 emit_label_after (label, insn);
3841 }
3842 else
3843 {
3844 /* This is a right shift. We only need check negative counts. */
3845 emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
3846 count, GEN_INT (-16), label));
833bf445
DD
3847 emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
3848 emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
2e160056
DD
3849 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
3850 emit_label_after (label, insn);
3851 }
16659fcf
DD
3852 operands[1] = operands[0];
3853 emit_insn (func (operands[0], operands[0], count));
3854 return 1;
2e160056
DD
3855 }
3856
38b2d076
DD
3857 operands[2] = temp;
3858 return 0;
3859}
3860
12ea2512
DD
3861/* The m32c has a limited range of operations that work on PSImode
3862 values; we have to expand to SI, do the math, and truncate back to
3863 PSI. Yes, this is expensive, but hopefully gcc will learn to avoid
3864 those cases. */
3865void
3866m32c_expand_neg_mulpsi3 (rtx * operands)
3867{
3868 /* operands: a = b * i */
3869 rtx temp1; /* b as SI */
07127a0a
DD
3870 rtx scale /* i as SI */;
3871 rtx temp2; /* a*b as SI */
12ea2512
DD
3872
3873 temp1 = gen_reg_rtx (SImode);
3874 temp2 = gen_reg_rtx (SImode);
07127a0a
DD
3875 if (GET_CODE (operands[2]) != CONST_INT)
3876 {
3877 scale = gen_reg_rtx (SImode);
3878 emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
3879 }
3880 else
3881 scale = copy_to_mode_reg (SImode, operands[2]);
12ea2512
DD
3882
3883 emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
07127a0a
DD
3884 temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
3885 emit_insn (gen_truncsipsi2 (operands[0], temp2));
12ea2512
DD
3886}
3887
38b2d076
DD
3888/* Pattern Output Functions */
3889
07127a0a
DD
3890int
3891m32c_expand_movcc (rtx *operands)
3892{
3893 rtx rel = operands[1];
0166ff05 3894
07127a0a
DD
3895 if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
3896 return 1;
3897 if (GET_CODE (operands[2]) != CONST_INT
3898 || GET_CODE (operands[3]) != CONST_INT)
3899 return 1;
07127a0a
DD
3900 if (GET_CODE (rel) == NE)
3901 {
3902 rtx tmp = operands[2];
3903 operands[2] = operands[3];
3904 operands[3] = tmp;
f90b7a5a 3905 rel = gen_rtx_EQ (GET_MODE (rel), XEXP (rel, 0), XEXP (rel, 1));
07127a0a 3906 }
0166ff05 3907
0166ff05
DD
3908 emit_move_insn (operands[0],
3909 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
f90b7a5a 3910 rel,
0166ff05
DD
3911 operands[2],
3912 operands[3]));
07127a0a
DD
3913 return 0;
3914}
3915
3916/* Used for the "insv" pattern. Return nonzero to fail, else done. */
3917int
3918m32c_expand_insv (rtx *operands)
3919{
3920 rtx op0, src0, p;
3921 int mask;
3922
3923 if (INTVAL (operands[1]) != 1)
3924 return 1;
3925
9cb96754
N
3926 /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
3927 if (GET_CODE (operands[3]) != CONST_INT)
3928 return 1;
3929 if (INTVAL (operands[3]) != 0
3930 && INTVAL (operands[3]) != 1
3931 && INTVAL (operands[3]) != -1)
3932 return 1;
3933
07127a0a
DD
3934 mask = 1 << INTVAL (operands[2]);
3935
3936 op0 = operands[0];
3937 if (GET_CODE (op0) == SUBREG
3938 && SUBREG_BYTE (op0) == 0)
3939 {
3940 rtx sub = SUBREG_REG (op0);
3941 if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
3942 op0 = sub;
3943 }
3944
b3a13419 3945 if (!can_create_pseudo_p ()
07127a0a
DD
3946 || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
3947 src0 = op0;
3948 else
3949 {
3950 src0 = gen_reg_rtx (GET_MODE (op0));
3951 emit_move_insn (src0, op0);
3952 }
3953
3954 if (GET_MODE (op0) == HImode
3955 && INTVAL (operands[2]) >= 8
444d6efe 3956 && GET_CODE (op0) == MEM)
07127a0a
DD
3957 {
3958 /* We are little endian. */
0a81f074
RS
3959 rtx new_mem = gen_rtx_MEM (QImode, plus_constant (Pmode,
3960 XEXP (op0, 0), 1));
07127a0a
DD
3961 MEM_COPY_ATTRIBUTES (new_mem, op0);
3962 mask >>= 8;
3963 }
3964
8e4edce7
DD
3965 /* First, we generate a mask with the correct polarity. If we are
3966 storing a zero, we want an AND mask, so invert it. */
3967 if (INTVAL (operands[3]) == 0)
07127a0a 3968 {
16659fcf 3969 /* Storing a zero, use an AND mask */
07127a0a
DD
3970 if (GET_MODE (op0) == HImode)
3971 mask ^= 0xffff;
3972 else
3973 mask ^= 0xff;
3974 }
8e4edce7
DD
3975 /* Now we need to properly sign-extend the mask in case we need to
3976 fall back to an AND or OR opcode. */
07127a0a
DD
3977 if (GET_MODE (op0) == HImode)
3978 {
3979 if (mask & 0x8000)
3980 mask -= 0x10000;
3981 }
3982 else
3983 {
3984 if (mask & 0x80)
3985 mask -= 0x100;
3986 }
3987
3988 switch ( (INTVAL (operands[3]) ? 4 : 0)
3989 + ((GET_MODE (op0) == HImode) ? 2 : 0)
3990 + (TARGET_A24 ? 1 : 0))
3991 {
3992 case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
3993 case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
3994 case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
3995 case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
3996 case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
3997 case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
3998 case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
3999 case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
653e2568 4000 default: p = NULL_RTX; break; /* Not reached, but silences a warning. */
07127a0a
DD
4001 }
4002
4003 emit_insn (p);
4004 return 0;
4005}
4006
4007const char *
4008m32c_scc_pattern(rtx *operands, RTX_CODE code)
4009{
4010 static char buf[30];
4011 if (GET_CODE (operands[0]) == REG
4012 && REGNO (operands[0]) == R0_REGNO)
4013 {
4014 if (code == EQ)
4015 return "stzx\t#1,#0,r0l";
4016 if (code == NE)
4017 return "stzx\t#0,#1,r0l";
4018 }
4019 sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
4020 return buf;
4021}
4022
5abd2125
JS
4023/* Encode symbol attributes of a SYMBOL_REF into its
4024 SYMBOL_REF_FLAGS. */
4025static void
4026m32c_encode_section_info (tree decl, rtx rtl, int first)
4027{
4028 int extra_flags = 0;
4029
4030 default_encode_section_info (decl, rtl, first);
4031 if (TREE_CODE (decl) == FUNCTION_DECL
4032 && m32c_special_page_vector_p (decl))
4033
4034 extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
4035
4036 if (extra_flags)
4037 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
4038}
4039
38b2d076
DD
4040/* Returns TRUE if the current function is a leaf, and thus we can
4041 determine which registers an interrupt function really needs to
4042 save. The logic below is mostly about finding the insn sequence
4043 that's the function, versus any sequence that might be open for the
4044 current insn. */
4045static int
4046m32c_leaf_function_p (void)
4047{
4048 rtx saved_first, saved_last;
4049 struct sequence_stack *seq;
4050 int rv;
4051
3e029763
JH
4052 saved_first = crtl->emit.x_first_insn;
4053 saved_last = crtl->emit.x_last_insn;
4054 for (seq = crtl->emit.sequence_stack; seq && seq->next; seq = seq->next)
38b2d076
DD
4055 ;
4056 if (seq)
4057 {
3e029763
JH
4058 crtl->emit.x_first_insn = seq->first;
4059 crtl->emit.x_last_insn = seq->last;
38b2d076
DD
4060 }
4061
4062 rv = leaf_function_p ();
4063
3e029763
JH
4064 crtl->emit.x_first_insn = saved_first;
4065 crtl->emit.x_last_insn = saved_last;
38b2d076
DD
4066 return rv;
4067}
4068
4069/* Returns TRUE if the current function needs to use the ENTER/EXIT
4070 opcodes. If the function doesn't need the frame base or stack
4071 pointer, it can use the simpler RTS opcode. */
4072static bool
4073m32c_function_needs_enter (void)
4074{
4075 rtx insn;
4076 struct sequence_stack *seq;
4077 rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
4078 rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
4079
4080 insn = get_insns ();
3e029763 4081 for (seq = crtl->emit.sequence_stack;
38b2d076
DD
4082 seq;
4083 insn = seq->first, seq = seq->next);
4084
4085 while (insn)
4086 {
4087 if (reg_mentioned_p (sp, insn))
4088 return true;
4089 if (reg_mentioned_p (fb, insn))
4090 return true;
4091 insn = NEXT_INSN (insn);
4092 }
4093 return false;
4094}
4095
4096/* Mark all the subexpressions of the PARALLEL rtx PAR as
4097 frame-related. Return PAR.
4098
4099 dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
4100 PARALLEL rtx other than the first if they do not have the
4101 FRAME_RELATED flag set on them. So this function is handy for
4102 marking up 'enter' instructions. */
4103static rtx
4104m32c_all_frame_related (rtx par)
4105{
4106 int len = XVECLEN (par, 0);
4107 int i;
4108
4109 for (i = 0; i < len; i++)
4110 F (XVECEXP (par, 0, i));
4111
4112 return par;
4113}
4114
4115/* Emits the prologue. See the frame layout comment earlier in this
4116 file. We can reserve up to 256 bytes with the ENTER opcode, beyond
4117 that we manually update sp. */
4118void
4119m32c_emit_prologue (void)
4120{
4121 int frame_size, extra_frame_size = 0, reg_save_size;
4122 int complex_prologue = 0;
4123
4124 cfun->machine->is_leaf = m32c_leaf_function_p ();
4125 if (interrupt_p (cfun->decl))
4126 {
4127 cfun->machine->is_interrupt = 1;
4128 complex_prologue = 1;
4129 }
65655f79
DD
4130 else if (bank_switch_p (cfun->decl))
4131 warning (OPT_Wattributes,
4132 "%<bank_switch%> has no effect on non-interrupt functions");
38b2d076
DD
4133
4134 reg_save_size = m32c_pushm_popm (PP_justcount);
4135
4136 if (interrupt_p (cfun->decl))
65655f79
DD
4137 {
4138 if (bank_switch_p (cfun->decl))
4139 emit_insn (gen_fset_b ());
4140 else if (cfun->machine->intr_pushm)
4141 emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
4142 }
38b2d076
DD
4143
4144 frame_size =
4145 m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
4146 if (frame_size == 0
38b2d076
DD
4147 && !m32c_function_needs_enter ())
4148 cfun->machine->use_rts = 1;
4149
4150 if (frame_size > 254)
4151 {
4152 extra_frame_size = frame_size - 254;
4153 frame_size = 254;
4154 }
4155 if (cfun->machine->use_rts == 0)
4156 F (emit_insn (m32c_all_frame_related
4157 (TARGET_A16
fa9fd28a
RIL
4158 ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
4159 : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
38b2d076
DD
4160
4161 if (extra_frame_size)
4162 {
4163 complex_prologue = 1;
4164 if (TARGET_A16)
4165 F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
4166 gen_rtx_REG (HImode, SP_REGNO),
4167 GEN_INT (-extra_frame_size))));
4168 else
4169 F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
4170 gen_rtx_REG (PSImode, SP_REGNO),
4171 GEN_INT (-extra_frame_size))));
4172 }
4173
4174 complex_prologue += m32c_pushm_popm (PP_pushm);
4175
4176 /* This just emits a comment into the .s file for debugging. */
4177 if (complex_prologue)
4178 emit_insn (gen_prologue_end ());
4179}
4180
4181/* Likewise, for the epilogue. The only exception is that, for
4182 interrupts, we must manually unwind the frame as the REIT opcode
4183 doesn't do that. */
4184void
4185m32c_emit_epilogue (void)
4186{
f0679612
DD
4187 int popm_count = m32c_pushm_popm (PP_justcount);
4188
38b2d076 4189 /* This just emits a comment into the .s file for debugging. */
f0679612 4190 if (popm_count > 0 || cfun->machine->is_interrupt)
38b2d076
DD
4191 emit_insn (gen_epilogue_start ());
4192
f0679612
DD
4193 if (popm_count > 0)
4194 m32c_pushm_popm (PP_popm);
38b2d076
DD
4195
4196 if (cfun->machine->is_interrupt)
4197 {
4198 enum machine_mode spmode = TARGET_A16 ? HImode : PSImode;
4199
65655f79
DD
4200 /* REIT clears B flag and restores $fp for us, but we still
4201 have to fix up the stack. USE_RTS just means we didn't
4202 emit ENTER. */
4203 if (!cfun->machine->use_rts)
4204 {
4205 emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
4206 gen_rtx_REG (spmode, FP_REGNO));
4207 emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
4208 gen_rtx_REG (spmode, A0_REGNO));
4209 /* We can't just add this to the POPM because it would be in
4210 the wrong order, and wouldn't fix the stack if we're bank
4211 switching. */
4212 if (TARGET_A16)
4213 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
4214 else
4215 emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
4216 }
4217 if (!bank_switch_p (cfun->decl) && cfun->machine->intr_pushm)
4218 emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
4219
402f2db8
DD
4220 /* The FREIT (Fast REturn from InTerrupt) instruction should be
4221 generated only for M32C/M32CM targets (generate the REIT
4222 instruction otherwise). */
65655f79 4223 if (fast_interrupt_p (cfun->decl))
402f2db8
DD
4224 {
4225 /* Check if fast_attribute is set for M32C or M32CM. */
4226 if (TARGET_A24)
4227 {
4228 emit_jump_insn (gen_epilogue_freit ());
4229 }
4230 /* If fast_interrupt attribute is set for an R8C or M16C
4231 target ignore this attribute and generated REIT
4232 instruction. */
4233 else
4234 {
4235 warning (OPT_Wattributes,
4236 "%<fast_interrupt%> attribute directive ignored");
4237 emit_jump_insn (gen_epilogue_reit_16 ());
4238 }
4239 }
65655f79 4240 else if (TARGET_A16)
0e0642aa
RIL
4241 emit_jump_insn (gen_epilogue_reit_16 ());
4242 else
4243 emit_jump_insn (gen_epilogue_reit_24 ());
38b2d076
DD
4244 }
4245 else if (cfun->machine->use_rts)
4246 emit_jump_insn (gen_epilogue_rts ());
0e0642aa
RIL
4247 else if (TARGET_A16)
4248 emit_jump_insn (gen_epilogue_exitd_16 ());
38b2d076 4249 else
0e0642aa 4250 emit_jump_insn (gen_epilogue_exitd_24 ());
38b2d076
DD
4251}
4252
4253void
4254m32c_emit_eh_epilogue (rtx ret_addr)
4255{
4256 /* R0[R2] has the stack adjustment. R1[R3] has the address to
4257 return to. We have to fudge the stack, pop everything, pop SP
4258 (fudged), and return (fudged). This is actually easier to do in
4259 assembler, so punt to libgcc. */
4260 emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
c41c1387 4261 /* emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
38b2d076
DD
4262}
4263
16659fcf
DD
4264/* Indicate which flags must be properly set for a given conditional. */
4265static int
4266flags_needed_for_conditional (rtx cond)
4267{
4268 switch (GET_CODE (cond))
4269 {
4270 case LE:
4271 case GT:
4272 return FLAGS_OSZ;
4273 case LEU:
4274 case GTU:
4275 return FLAGS_ZC;
4276 case LT:
4277 case GE:
4278 return FLAGS_OS;
4279 case LTU:
4280 case GEU:
4281 return FLAGS_C;
4282 case EQ:
4283 case NE:
4284 return FLAGS_Z;
4285 default:
4286 return FLAGS_N;
4287 }
4288}
4289
4290#define DEBUG_CMP 0
4291
4292/* Returns true if a compare insn is redundant because it would only
4293 set flags that are already set correctly. */
4294static bool
4295m32c_compare_redundant (rtx cmp, rtx *operands)
4296{
4297 int flags_needed;
4298 int pflags;
4299 rtx prev, pp, next;
444d6efe 4300 rtx op0, op1;
16659fcf
DD
4301#if DEBUG_CMP
4302 int prev_icode, i;
4303#endif
4304
4305 op0 = operands[0];
4306 op1 = operands[1];
16659fcf
DD
4307
4308#if DEBUG_CMP
4309 fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
4310 debug_rtx(cmp);
4311 for (i=0; i<2; i++)
4312 {
4313 fprintf(stderr, "operands[%d] = ", i);
4314 debug_rtx(operands[i]);
4315 }
4316#endif
4317
4318 next = next_nonnote_insn (cmp);
4319 if (!next || !INSN_P (next))
4320 {
4321#if DEBUG_CMP
4322 fprintf(stderr, "compare not followed by insn\n");
4323 debug_rtx(next);
4324#endif
4325 return false;
4326 }
4327 if (GET_CODE (PATTERN (next)) == SET
4328 && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
4329 {
4330 next = XEXP (XEXP (PATTERN (next), 1), 0);
4331 }
4332 else if (GET_CODE (PATTERN (next)) == SET)
4333 {
4334 /* If this is a conditional, flags_needed will be something
4335 other than FLAGS_N, which we test below. */
4336 next = XEXP (PATTERN (next), 1);
4337 }
4338 else
4339 {
4340#if DEBUG_CMP
4341 fprintf(stderr, "compare not followed by conditional\n");
4342 debug_rtx(next);
4343#endif
4344 return false;
4345 }
4346#if DEBUG_CMP
4347 fprintf(stderr, "conditional is: ");
4348 debug_rtx(next);
4349#endif
4350
4351 flags_needed = flags_needed_for_conditional (next);
4352 if (flags_needed == FLAGS_N)
4353 {
4354#if DEBUG_CMP
4355 fprintf(stderr, "compare not followed by conditional\n");
4356 debug_rtx(next);
4357#endif
4358 return false;
4359 }
4360
4361 /* Compare doesn't set overflow and carry the same way that
4362 arithmetic instructions do, so we can't replace those. */
4363 if (flags_needed & FLAGS_OC)
4364 return false;
4365
4366 prev = cmp;
4367 do {
4368 prev = prev_nonnote_insn (prev);
4369 if (!prev)
4370 {
4371#if DEBUG_CMP
4372 fprintf(stderr, "No previous insn.\n");
4373#endif
4374 return false;
4375 }
4376 if (!INSN_P (prev))
4377 {
4378#if DEBUG_CMP
4379 fprintf(stderr, "Previous insn is a non-insn.\n");
4380#endif
4381 return false;
4382 }
4383 pp = PATTERN (prev);
4384 if (GET_CODE (pp) != SET)
4385 {
4386#if DEBUG_CMP
4387 fprintf(stderr, "Previous insn is not a SET.\n");
4388#endif
4389 return false;
4390 }
4391 pflags = get_attr_flags (prev);
4392
4393 /* Looking up attributes of previous insns corrupted the recog
4394 tables. */
4395 INSN_UID (cmp) = -1;
4396 recog (PATTERN (cmp), cmp, 0);
4397
4398 if (pflags == FLAGS_N
4399 && reg_mentioned_p (op0, pp))
4400 {
4401#if DEBUG_CMP
4402 fprintf(stderr, "intermediate non-flags insn uses op:\n");
4403 debug_rtx(prev);
4404#endif
4405 return false;
4406 }
b3c5a409
DD
4407
4408 /* Check for comparisons against memory - between volatiles and
4409 aliases, we just can't risk this one. */
4410 if (GET_CODE (operands[0]) == MEM
4411 || GET_CODE (operands[0]) == MEM)
4412 {
4413#if DEBUG_CMP
4414 fprintf(stderr, "comparisons with memory:\n");
4415 debug_rtx(prev);
4416#endif
4417 return false;
4418 }
4419
4420 /* Check for PREV changing a register that's used to compute a
4421 value in CMP, even if it doesn't otherwise change flags. */
4422 if (GET_CODE (operands[0]) == REG
4423 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[0]))
4424 {
4425#if DEBUG_CMP
4426 fprintf(stderr, "sub-value affected, op0:\n");
4427 debug_rtx(prev);
4428#endif
4429 return false;
4430 }
4431 if (GET_CODE (operands[1]) == REG
4432 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[1]))
4433 {
4434#if DEBUG_CMP
4435 fprintf(stderr, "sub-value affected, op1:\n");
4436 debug_rtx(prev);
4437#endif
4438 return false;
4439 }
4440
16659fcf
DD
4441 } while (pflags == FLAGS_N);
4442#if DEBUG_CMP
4443 fprintf(stderr, "previous flag-setting insn:\n");
4444 debug_rtx(prev);
4445 debug_rtx(pp);
4446#endif
4447
4448 if (GET_CODE (pp) == SET
4449 && GET_CODE (XEXP (pp, 0)) == REG
4450 && REGNO (XEXP (pp, 0)) == FLG_REGNO
4451 && GET_CODE (XEXP (pp, 1)) == COMPARE)
4452 {
4453 /* Adjacent cbranches must have the same operands to be
4454 redundant. */
4455 rtx pop0 = XEXP (XEXP (pp, 1), 0);
4456 rtx pop1 = XEXP (XEXP (pp, 1), 1);
4457#if DEBUG_CMP
4458 fprintf(stderr, "adjacent cbranches\n");
4459 debug_rtx(pop0);
4460 debug_rtx(pop1);
4461#endif
4462 if (rtx_equal_p (op0, pop0)
4463 && rtx_equal_p (op1, pop1))
4464 return true;
4465#if DEBUG_CMP
4466 fprintf(stderr, "prev cmp not same\n");
4467#endif
4468 return false;
4469 }
4470
4471 /* Else the previous insn must be a SET, with either the source or
4472 dest equal to operands[0], and operands[1] must be zero. */
4473
4474 if (!rtx_equal_p (op1, const0_rtx))
4475 {
4476#if DEBUG_CMP
4477 fprintf(stderr, "operands[1] not const0_rtx\n");
4478#endif
4479 return false;
4480 }
4481 if (GET_CODE (pp) != SET)
4482 {
4483#if DEBUG_CMP
4484 fprintf (stderr, "pp not set\n");
4485#endif
4486 return false;
4487 }
4488 if (!rtx_equal_p (op0, SET_SRC (pp))
4489 && !rtx_equal_p (op0, SET_DEST (pp)))
4490 {
4491#if DEBUG_CMP
4492 fprintf(stderr, "operands[0] not found in set\n");
4493#endif
4494 return false;
4495 }
4496
4497#if DEBUG_CMP
4498 fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4499#endif
4500 if ((pflags & flags_needed) == flags_needed)
4501 return true;
4502
4503 return false;
4504}
4505
4506/* Return the pattern for a compare. This will be commented out if
4507 the compare is redundant, else a normal pattern is returned. Thus,
4508 the assembler output says where the compare would have been. */
4509char *
4510m32c_output_compare (rtx insn, rtx *operands)
4511{
0a2aaacc 4512 static char templ[] = ";cmp.b\t%1,%0";
16659fcf
DD
4513 /* ^ 5 */
4514
0a2aaacc 4515 templ[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
16659fcf
DD
4516 if (m32c_compare_redundant (insn, operands))
4517 {
4518#if DEBUG_CMP
4519 fprintf(stderr, "cbranch: cmp not needed\n");
4520#endif
0a2aaacc 4521 return templ;
16659fcf
DD
4522 }
4523
4524#if DEBUG_CMP
b3c5a409 4525 fprintf(stderr, "cbranch: cmp needed: `%s'\n", templ + 1);
16659fcf 4526#endif
0a2aaacc 4527 return templ + 1;
16659fcf
DD
4528}
4529
5abd2125
JS
4530#undef TARGET_ENCODE_SECTION_INFO
4531#define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4532
b52b1749
AS
4533/* If the frame pointer isn't used, we detect it manually. But the
4534 stack pointer doesn't have as flexible addressing as the frame
4535 pointer, so we always assume we have it. */
4536
4537#undef TARGET_FRAME_POINTER_REQUIRED
4538#define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
4539
38b2d076
DD
4540/* The Global `targetm' Variable. */
4541
4542struct gcc_target targetm = TARGET_INITIALIZER;
4543
4544#include "gt-m32c.h"