]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/m32c/m32c.c
Add a hook to inform a port about call arguments.
[thirdparty/gcc.git] / gcc / config / m32c / m32c.c
CommitLineData
38b2d076 1/* Target Code for R8C/M16C/M32C
23a5b65a 2 Copyright (C) 2005-2014 Free Software Foundation, Inc.
38b2d076
DD
3 Contributed by Red Hat.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
2f83c7d6 9 by the Free Software Foundation; either version 3, or (at your
38b2d076
DD
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
2f83c7d6
NC
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
38b2d076
DD
20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
24#include "tm.h"
25#include "rtl.h"
26#include "regs.h"
27#include "hard-reg-set.h"
38b2d076
DD
28#include "insn-config.h"
29#include "conditions.h"
30#include "insn-flags.h"
31#include "output.h"
32#include "insn-attr.h"
33#include "flags.h"
34#include "recog.h"
35#include "reload.h"
718f9c0f 36#include "diagnostic-core.h"
38b2d076
DD
37#include "obstack.h"
38#include "tree.h"
d8a2d370
DN
39#include "stor-layout.h"
40#include "varasm.h"
41#include "calls.h"
38b2d076 42#include "expr.h"
b0710fe1 43#include "insn-codes.h"
38b2d076
DD
44#include "optabs.h"
45#include "except.h"
83685514
AM
46#include "hashtab.h"
47#include "hash-set.h"
48#include "vec.h"
49#include "machmode.h"
50#include "input.h"
38b2d076
DD
51#include "function.h"
52#include "ggc.h"
53#include "target.h"
54#include "target-def.h"
55#include "tm_p.h"
56#include "langhooks.h"
2fb9a547 57#include "hash-table.h"
60393bbc
AM
58#include "predict.h"
59#include "dominance.h"
60#include "cfg.h"
61#include "cfgrtl.h"
62#include "cfganal.h"
63#include "lcm.h"
64#include "cfgbuild.h"
65#include "cfgcleanup.h"
2fb9a547
AM
66#include "basic-block.h"
67#include "tree-ssa-alias.h"
68#include "internal-fn.h"
69#include "gimple-fold.h"
70#include "tree-eh.h"
71#include "gimple-expr.h"
72#include "is-a.h"
726a989a 73#include "gimple.h"
fa9fd28a 74#include "df.h"
03dd17b1 75#include "tm-constrs.h"
9b2b7279 76#include "builtins.h"
38b2d076
DD
77
78/* Prototypes */
79
80/* Used by m32c_pushm_popm. */
81typedef enum
82{
83 PP_pushm,
84 PP_popm,
85 PP_justcount
86} Push_Pop_Type;
87
65655f79 88static bool m32c_function_needs_enter (void);
38b2d076 89static tree interrupt_handler (tree *, tree, tree, int, bool *);
5abd2125 90static tree function_vector_handler (tree *, tree, tree, int, bool *);
38b2d076 91static int interrupt_p (tree node);
65655f79
DD
92static int bank_switch_p (tree node);
93static int fast_interrupt_p (tree node);
94static int interrupt_p (tree node);
38b2d076 95static bool m32c_asm_integer (rtx, unsigned int, int);
3101faab 96static int m32c_comp_type_attributes (const_tree, const_tree);
38b2d076
DD
97static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
98static struct machine_function *m32c_init_machine_status (void);
99static void m32c_insert_attributes (tree, tree *);
ef4bddc2
RS
100static bool m32c_legitimate_address_p (machine_mode, rtx, bool);
101static bool m32c_addr_space_legitimate_address_p (machine_mode, rtx, bool, addr_space_t);
102static rtx m32c_function_arg (cumulative_args_t, machine_mode,
444d6efe 103 const_tree, bool);
ef4bddc2 104static bool m32c_pass_by_reference (cumulative_args_t, machine_mode,
586de218 105 const_tree, bool);
ef4bddc2 106static void m32c_function_arg_advance (cumulative_args_t, machine_mode,
cd34bbe8 107 const_tree, bool);
ef4bddc2 108static unsigned int m32c_function_arg_boundary (machine_mode, const_tree);
38b2d076 109static int m32c_pushm_popm (Push_Pop_Type);
d5cc9181 110static bool m32c_strict_argument_naming (cumulative_args_t);
38b2d076 111static rtx m32c_struct_value_rtx (tree, int);
ef4bddc2 112static rtx m32c_subreg (machine_mode, rtx, machine_mode, int);
38b2d076 113static int need_to_save (int);
2a31793e 114static rtx m32c_function_value (const_tree, const_tree, bool);
ef4bddc2 115static rtx m32c_libcall_value (machine_mode, const_rtx);
2a31793e 116
f6052f86
DD
117/* Returns true if an address is specified, else false. */
118static bool m32c_get_pragma_address (const char *varname, unsigned *addr);
119
5abd2125 120#define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
38b2d076
DD
121
122#define streq(a,b) (strcmp ((a), (b)) == 0)
123
124/* Internal support routines */
125
126/* Debugging statements are tagged with DEBUG0 only so that they can
127 be easily enabled individually, by replacing the '0' with '1' as
128 needed. */
129#define DEBUG0 0
130#define DEBUG1 1
131
132#if DEBUG0
133/* This is needed by some of the commented-out debug statements
134 below. */
135static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
136#endif
137static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
138
139/* These are all to support encode_pattern(). */
140static char pattern[30], *patternp;
141static GTY(()) rtx patternr[30];
142#define RTX_IS(x) (streq (pattern, x))
143
144/* Some macros to simplify the logic throughout this file. */
145#define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
146#define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
147
148#define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
149#define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
150
5fd5d713
DD
151static int
152far_addr_space_p (rtx x)
153{
154 if (GET_CODE (x) != MEM)
155 return 0;
156#if DEBUG0
157 fprintf(stderr, "\033[35mfar_addr_space: "); debug_rtx(x);
158 fprintf(stderr, " = %d\033[0m\n", MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR);
159#endif
160 return MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR;
161}
162
38b2d076
DD
163/* We do most RTX matching by converting the RTX into a string, and
164 using string compares. This vastly simplifies the logic in many of
165 the functions in this file.
166
167 On exit, pattern[] has the encoded string (use RTX_IS("...") to
168 compare it) and patternr[] has pointers to the nodes in the RTX
169 corresponding to each character in the encoded string. The latter
170 is mostly used by print_operand().
171
172 Unrecognized patterns have '?' in them; this shows up when the
173 assembler complains about syntax errors.
174*/
175
176static void
177encode_pattern_1 (rtx x)
178{
179 int i;
180
181 if (patternp == pattern + sizeof (pattern) - 2)
182 {
183 patternp[-1] = '?';
184 return;
185 }
186
187 patternr[patternp - pattern] = x;
188
189 switch (GET_CODE (x))
190 {
191 case REG:
192 *patternp++ = 'r';
193 break;
194 case SUBREG:
195 if (GET_MODE_SIZE (GET_MODE (x)) !=
196 GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
197 *patternp++ = 'S';
198 encode_pattern_1 (XEXP (x, 0));
199 break;
200 case MEM:
201 *patternp++ = 'm';
202 case CONST:
203 encode_pattern_1 (XEXP (x, 0));
204 break;
5fd5d713
DD
205 case SIGN_EXTEND:
206 *patternp++ = '^';
207 *patternp++ = 'S';
208 encode_pattern_1 (XEXP (x, 0));
209 break;
210 case ZERO_EXTEND:
211 *patternp++ = '^';
212 *patternp++ = 'Z';
213 encode_pattern_1 (XEXP (x, 0));
214 break;
38b2d076
DD
215 case PLUS:
216 *patternp++ = '+';
217 encode_pattern_1 (XEXP (x, 0));
218 encode_pattern_1 (XEXP (x, 1));
219 break;
220 case PRE_DEC:
221 *patternp++ = '>';
222 encode_pattern_1 (XEXP (x, 0));
223 break;
224 case POST_INC:
225 *patternp++ = '<';
226 encode_pattern_1 (XEXP (x, 0));
227 break;
228 case LO_SUM:
229 *patternp++ = 'L';
230 encode_pattern_1 (XEXP (x, 0));
231 encode_pattern_1 (XEXP (x, 1));
232 break;
233 case HIGH:
234 *patternp++ = 'H';
235 encode_pattern_1 (XEXP (x, 0));
236 break;
237 case SYMBOL_REF:
238 *patternp++ = 's';
239 break;
240 case LABEL_REF:
241 *patternp++ = 'l';
242 break;
243 case CODE_LABEL:
244 *patternp++ = 'c';
245 break;
246 case CONST_INT:
247 case CONST_DOUBLE:
248 *patternp++ = 'i';
249 break;
250 case UNSPEC:
251 *patternp++ = 'u';
252 *patternp++ = '0' + XCINT (x, 1, UNSPEC);
253 for (i = 0; i < XVECLEN (x, 0); i++)
254 encode_pattern_1 (XVECEXP (x, 0, i));
255 break;
256 case USE:
257 *patternp++ = 'U';
258 break;
259 case PARALLEL:
260 *patternp++ = '|';
261 for (i = 0; i < XVECLEN (x, 0); i++)
262 encode_pattern_1 (XVECEXP (x, 0, i));
263 break;
264 case EXPR_LIST:
265 *patternp++ = 'E';
266 encode_pattern_1 (XEXP (x, 0));
267 if (XEXP (x, 1))
268 encode_pattern_1 (XEXP (x, 1));
269 break;
270 default:
271 *patternp++ = '?';
272#if DEBUG0
273 fprintf (stderr, "can't encode pattern %s\n",
274 GET_RTX_NAME (GET_CODE (x)));
275 debug_rtx (x);
276 gcc_unreachable ();
277#endif
278 break;
279 }
280}
281
282static void
283encode_pattern (rtx x)
284{
285 patternp = pattern;
286 encode_pattern_1 (x);
287 *patternp = 0;
288}
289
290/* Since register names indicate the mode they're used in, we need a
291 way to determine which name to refer to the register with. Called
292 by print_operand(). */
293
294static const char *
ef4bddc2 295reg_name_with_mode (int regno, machine_mode mode)
38b2d076
DD
296{
297 int mlen = GET_MODE_SIZE (mode);
298 if (regno == R0_REGNO && mlen == 1)
299 return "r0l";
300 if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
301 return "r2r0";
302 if (regno == R0_REGNO && mlen == 6)
303 return "r2r1r0";
304 if (regno == R0_REGNO && mlen == 8)
305 return "r3r1r2r0";
306 if (regno == R1_REGNO && mlen == 1)
307 return "r1l";
308 if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
309 return "r3r1";
310 if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
311 return "a1a0";
312 return reg_names[regno];
313}
314
315/* How many bytes a register uses on stack when it's pushed. We need
316 to know this because the push opcode needs to explicitly indicate
317 the size of the register, even though the name of the register
318 already tells it that. Used by m32c_output_reg_{push,pop}, which
319 is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}. */
320
321static int
322reg_push_size (int regno)
323{
324 switch (regno)
325 {
326 case R0_REGNO:
327 case R1_REGNO:
328 return 2;
329 case R2_REGNO:
330 case R3_REGNO:
331 case FLG_REGNO:
332 return 2;
333 case A0_REGNO:
334 case A1_REGNO:
335 case SB_REGNO:
336 case FB_REGNO:
337 case SP_REGNO:
338 if (TARGET_A16)
339 return 2;
340 else
341 return 3;
342 default:
343 gcc_unreachable ();
344 }
345}
346
38b2d076
DD
347/* Given two register classes, find the largest intersection between
348 them. If there is no intersection, return RETURNED_IF_EMPTY
349 instead. */
35bdbc69
AS
350static reg_class_t
351reduce_class (reg_class_t original_class, reg_class_t limiting_class,
352 reg_class_t returned_if_empty)
38b2d076 353{
35bdbc69
AS
354 HARD_REG_SET cc;
355 int i;
356 reg_class_t best = NO_REGS;
357 unsigned int best_size = 0;
38b2d076
DD
358
359 if (original_class == limiting_class)
360 return original_class;
361
35bdbc69
AS
362 cc = reg_class_contents[original_class];
363 AND_HARD_REG_SET (cc, reg_class_contents[limiting_class]);
38b2d076 364
38b2d076
DD
365 for (i = 0; i < LIM_REG_CLASSES; i++)
366 {
35bdbc69
AS
367 if (hard_reg_set_subset_p (reg_class_contents[i], cc))
368 if (best_size < reg_class_size[i])
38b2d076 369 {
35bdbc69
AS
370 best = (reg_class_t) i;
371 best_size = reg_class_size[i];
38b2d076
DD
372 }
373
374 }
375 if (best == NO_REGS)
376 return returned_if_empty;
377 return best;
378}
379
38b2d076
DD
380/* Used by m32c_register_move_cost to determine if a move is
381 impossibly expensive. */
0e607518 382static bool
ef4bddc2 383class_can_hold_mode (reg_class_t rclass, machine_mode mode)
38b2d076
DD
384{
385 /* Cache the results: 0=untested 1=no 2=yes */
386 static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
0e607518
AS
387
388 if (results[(int) rclass][mode] == 0)
38b2d076 389 {
0e607518 390 int r;
0a2aaacc 391 results[rclass][mode] = 1;
38b2d076 392 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
0e607518 393 if (in_hard_reg_set_p (reg_class_contents[(int) rclass], mode, r)
38b2d076
DD
394 && HARD_REGNO_MODE_OK (r, mode))
395 {
0e607518
AS
396 results[rclass][mode] = 2;
397 break;
38b2d076
DD
398 }
399 }
0e607518 400
38b2d076
DD
401#if DEBUG0
402 fprintf (stderr, "class %s can hold %s? %s\n",
0e607518 403 class_names[(int) rclass], mode_name[mode],
0a2aaacc 404 (results[rclass][mode] == 2) ? "yes" : "no");
38b2d076 405#endif
0e607518 406 return results[(int) rclass][mode] == 2;
38b2d076
DD
407}
408
409/* Run-time Target Specification. */
410
411/* Memregs are memory locations that gcc treats like general
412 registers, as there are a limited number of true registers and the
413 m32c families can use memory in most places that registers can be
414 used.
415
416 However, since memory accesses are more expensive than registers,
417 we allow the user to limit the number of memregs available, in
418 order to try to persuade gcc to try harder to use real registers.
419
45b86625 420 Memregs are provided by lib1funcs.S.
38b2d076
DD
421*/
422
38b2d076
DD
423int ok_to_change_target_memregs = TRUE;
424
f28f2337
AS
425/* Implements TARGET_OPTION_OVERRIDE. */
426
427#undef TARGET_OPTION_OVERRIDE
428#define TARGET_OPTION_OVERRIDE m32c_option_override
429
430static void
431m32c_option_override (void)
38b2d076 432{
f28f2337 433 /* We limit memregs to 0..16, and provide a default. */
bbfc9a8c 434 if (global_options_set.x_target_memregs)
38b2d076
DD
435 {
436 if (target_memregs < 0 || target_memregs > 16)
437 error ("invalid target memregs value '%d'", target_memregs);
438 }
439 else
07127a0a 440 target_memregs = 16;
18b80268
DD
441
442 if (TARGET_A24)
443 flag_ivopts = 0;
0685e770
DD
444
445 /* This target defaults to strict volatile bitfields. */
36acc1a2 446 if (flag_strict_volatile_bitfields < 0 && abi_version_at_least(2))
0685e770 447 flag_strict_volatile_bitfields = 1;
d123bf41
DD
448
449 /* r8c/m16c have no 16-bit indirect call, so thunks are involved.
450 This is always worse than an absolute call. */
451 if (TARGET_A16)
452 flag_no_function_cse = 1;
a4403164
DD
453
454 /* This wants to put insns between compares and their jumps. */
455 /* FIXME: The right solution is to properly trace the flags register
456 values, but that is too much work for stage 4. */
457 flag_combine_stack_adjustments = 0;
d123bf41
DD
458}
459
460#undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
461#define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m32c_override_options_after_change
462
463static void
464m32c_override_options_after_change (void)
465{
466 if (TARGET_A16)
467 flag_no_function_cse = 1;
38b2d076
DD
468}
469
470/* Defining data structures for per-function information */
471
472/* The usual; we set up our machine_function data. */
473static struct machine_function *
474m32c_init_machine_status (void)
475{
766090c2 476 return ggc_cleared_alloc<machine_function> ();
38b2d076
DD
477}
478
479/* Implements INIT_EXPANDERS. We just set up to call the above
480 function. */
481void
482m32c_init_expanders (void)
483{
484 init_machine_status = m32c_init_machine_status;
485}
486
487/* Storage Layout */
488
38b2d076
DD
489/* Register Basics */
490
491/* Basic Characteristics of Registers */
492
493/* Whether a mode fits in a register is complex enough to warrant a
494 table. */
495static struct
496{
497 char qi_regs;
498 char hi_regs;
499 char pi_regs;
500 char si_regs;
501 char di_regs;
502} nregs_table[FIRST_PSEUDO_REGISTER] =
503{
504 { 1, 1, 2, 2, 4 }, /* r0 */
505 { 0, 1, 0, 0, 0 }, /* r2 */
506 { 1, 1, 2, 2, 0 }, /* r1 */
507 { 0, 1, 0, 0, 0 }, /* r3 */
508 { 0, 1, 1, 0, 0 }, /* a0 */
509 { 0, 1, 1, 0, 0 }, /* a1 */
510 { 0, 1, 1, 0, 0 }, /* sb */
511 { 0, 1, 1, 0, 0 }, /* fb */
512 { 0, 1, 1, 0, 0 }, /* sp */
513 { 1, 1, 1, 0, 0 }, /* pc */
514 { 0, 0, 0, 0, 0 }, /* fl */
515 { 1, 1, 1, 0, 0 }, /* ap */
516 { 1, 1, 2, 2, 4 }, /* mem0 */
517 { 1, 1, 2, 2, 4 }, /* mem1 */
518 { 1, 1, 2, 2, 4 }, /* mem2 */
519 { 1, 1, 2, 2, 4 }, /* mem3 */
520 { 1, 1, 2, 2, 4 }, /* mem4 */
521 { 1, 1, 2, 2, 0 }, /* mem5 */
522 { 1, 1, 2, 2, 0 }, /* mem6 */
523 { 1, 1, 0, 0, 0 }, /* mem7 */
524};
525
5efd84c5
NF
526/* Implements TARGET_CONDITIONAL_REGISTER_USAGE. We adjust the number
527 of available memregs, and select which registers need to be preserved
38b2d076
DD
528 across calls based on the chip family. */
529
5efd84c5
NF
530#undef TARGET_CONDITIONAL_REGISTER_USAGE
531#define TARGET_CONDITIONAL_REGISTER_USAGE m32c_conditional_register_usage
d6d17ae7 532void
38b2d076
DD
533m32c_conditional_register_usage (void)
534{
38b2d076
DD
535 int i;
536
537 if (0 <= target_memregs && target_memregs <= 16)
538 {
539 /* The command line option is bytes, but our "registers" are
540 16-bit words. */
65655f79 541 for (i = (target_memregs+1)/2; i < 8; i++)
38b2d076
DD
542 {
543 fixed_regs[MEM0_REGNO + i] = 1;
544 CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
545 }
546 }
547
548 /* M32CM and M32C preserve more registers across function calls. */
549 if (TARGET_A24)
550 {
551 call_used_regs[R1_REGNO] = 0;
552 call_used_regs[R2_REGNO] = 0;
553 call_used_regs[R3_REGNO] = 0;
554 call_used_regs[A0_REGNO] = 0;
555 call_used_regs[A1_REGNO] = 0;
556 }
557}
558
559/* How Values Fit in Registers */
560
561/* Implements HARD_REGNO_NREGS. This is complicated by the fact that
562 different registers are different sizes from each other, *and* may
563 be different sizes in different chip families. */
b8a669d0 564static int
ef4bddc2 565m32c_hard_regno_nregs_1 (int regno, machine_mode mode)
38b2d076
DD
566{
567 if (regno == FLG_REGNO && mode == CCmode)
568 return 1;
569 if (regno >= FIRST_PSEUDO_REGISTER)
570 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
571
572 if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
573 return (GET_MODE_SIZE (mode) + 1) / 2;
574
575 if (GET_MODE_SIZE (mode) <= 1)
576 return nregs_table[regno].qi_regs;
577 if (GET_MODE_SIZE (mode) <= 2)
578 return nregs_table[regno].hi_regs;
5fd5d713 579 if (regno == A0_REGNO && mode == SImode && TARGET_A16)
38b2d076
DD
580 return 2;
581 if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
582 return nregs_table[regno].pi_regs;
583 if (GET_MODE_SIZE (mode) <= 4)
584 return nregs_table[regno].si_regs;
585 if (GET_MODE_SIZE (mode) <= 8)
586 return nregs_table[regno].di_regs;
587 return 0;
588}
589
b8a669d0 590int
ef4bddc2 591m32c_hard_regno_nregs (int regno, machine_mode mode)
b8a669d0
DD
592{
593 int rv = m32c_hard_regno_nregs_1 (regno, mode);
594 return rv ? rv : 1;
595}
596
38b2d076
DD
597/* Implements HARD_REGNO_MODE_OK. The above function does the work
598 already; just test its return value. */
599int
ef4bddc2 600m32c_hard_regno_ok (int regno, machine_mode mode)
38b2d076 601{
b8a669d0 602 return m32c_hard_regno_nregs_1 (regno, mode) != 0;
38b2d076
DD
603}
604
605/* Implements MODES_TIEABLE_P. In general, modes aren't tieable since
606 registers are all different sizes. However, since most modes are
607 bigger than our registers anyway, it's easier to implement this
608 function that way, leaving QImode as the only unique case. */
609int
ef4bddc2 610m32c_modes_tieable_p (machine_mode m1, machine_mode m2)
38b2d076
DD
611{
612 if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
613 return 1;
614
07127a0a 615#if 0
38b2d076
DD
616 if (m1 == QImode || m2 == QImode)
617 return 0;
07127a0a 618#endif
38b2d076
DD
619
620 return 1;
621}
622
623/* Register Classes */
624
625/* Implements REGNO_REG_CLASS. */
444d6efe 626enum reg_class
38b2d076
DD
627m32c_regno_reg_class (int regno)
628{
629 switch (regno)
630 {
631 case R0_REGNO:
632 return R0_REGS;
633 case R1_REGNO:
634 return R1_REGS;
635 case R2_REGNO:
636 return R2_REGS;
637 case R3_REGNO:
638 return R3_REGS;
639 case A0_REGNO:
22843acd 640 return A0_REGS;
38b2d076 641 case A1_REGNO:
22843acd 642 return A1_REGS;
38b2d076
DD
643 case SB_REGNO:
644 return SB_REGS;
645 case FB_REGNO:
646 return FB_REGS;
647 case SP_REGNO:
648 return SP_REGS;
649 case FLG_REGNO:
650 return FLG_REGS;
651 default:
652 if (IS_MEM_REGNO (regno))
653 return MEM_REGS;
654 return ALL_REGS;
655 }
656}
657
38b2d076
DD
658/* Implements REGNO_OK_FOR_BASE_P. */
659int
660m32c_regno_ok_for_base_p (int regno)
661{
662 if (regno == A0_REGNO
663 || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
664 return 1;
665 return 0;
666}
667
668#define DEBUG_RELOAD 0
669
b05933f5 670/* Implements TARGET_PREFERRED_RELOAD_CLASS. In general, prefer general
38b2d076 671 registers of the appropriate size. */
b05933f5
AS
672
673#undef TARGET_PREFERRED_RELOAD_CLASS
674#define TARGET_PREFERRED_RELOAD_CLASS m32c_preferred_reload_class
675
676static reg_class_t
677m32c_preferred_reload_class (rtx x, reg_class_t rclass)
38b2d076 678{
b05933f5 679 reg_class_t newclass = rclass;
38b2d076
DD
680
681#if DEBUG_RELOAD
682 fprintf (stderr, "\npreferred_reload_class for %s is ",
683 class_names[rclass]);
684#endif
685 if (rclass == NO_REGS)
686 rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
687
0e607518 688 if (reg_classes_intersect_p (rclass, CR_REGS))
38b2d076
DD
689 {
690 switch (GET_MODE (x))
691 {
692 case QImode:
693 newclass = HL_REGS;
694 break;
695 default:
696 /* newclass = HI_REGS; */
697 break;
698 }
699 }
700
701 else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
702 newclass = SI_REGS;
703 else if (GET_MODE_SIZE (GET_MODE (x)) > 4
b05933f5 704 && ! reg_class_subset_p (R03_REGS, rclass))
38b2d076
DD
705 newclass = DI_REGS;
706
707 rclass = reduce_class (rclass, newclass, rclass);
708
709 if (GET_MODE (x) == QImode)
710 rclass = reduce_class (rclass, HL_REGS, rclass);
711
712#if DEBUG_RELOAD
713 fprintf (stderr, "%s\n", class_names[rclass]);
714 debug_rtx (x);
715
716 if (GET_CODE (x) == MEM
717 && GET_CODE (XEXP (x, 0)) == PLUS
718 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
719 fprintf (stderr, "Glorm!\n");
720#endif
721 return rclass;
722}
723
b05933f5
AS
724/* Implements TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
725
726#undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
727#define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS m32c_preferred_output_reload_class
728
729static reg_class_t
730m32c_preferred_output_reload_class (rtx x, reg_class_t rclass)
38b2d076
DD
731{
732 return m32c_preferred_reload_class (x, rclass);
733}
734
735/* Implements LIMIT_RELOAD_CLASS. We basically want to avoid using
736 address registers for reloads since they're needed for address
737 reloads. */
738int
ef4bddc2 739m32c_limit_reload_class (machine_mode mode, int rclass)
38b2d076
DD
740{
741#if DEBUG_RELOAD
742 fprintf (stderr, "limit_reload_class for %s: %s ->",
743 mode_name[mode], class_names[rclass]);
744#endif
745
746 if (mode == QImode)
747 rclass = reduce_class (rclass, HL_REGS, rclass);
748 else if (mode == HImode)
749 rclass = reduce_class (rclass, HI_REGS, rclass);
750 else if (mode == SImode)
751 rclass = reduce_class (rclass, SI_REGS, rclass);
752
753 if (rclass != A_REGS)
754 rclass = reduce_class (rclass, DI_REGS, rclass);
755
756#if DEBUG_RELOAD
757 fprintf (stderr, " %s\n", class_names[rclass]);
758#endif
759 return rclass;
760}
761
762/* Implements SECONDARY_RELOAD_CLASS. QImode have to be reloaded in
763 r0 or r1, as those are the only real QImode registers. CR regs get
764 reloaded through appropriately sized general or address
765 registers. */
766int
ef4bddc2 767m32c_secondary_reload_class (int rclass, machine_mode mode, rtx x)
38b2d076
DD
768{
769 int cc = class_contents[rclass][0];
770#if DEBUG0
771 fprintf (stderr, "\nsecondary reload class %s %s\n",
772 class_names[rclass], mode_name[mode]);
773 debug_rtx (x);
774#endif
775 if (mode == QImode
776 && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
777 return QI_REGS;
0e607518 778 if (reg_classes_intersect_p (rclass, CR_REGS)
38b2d076
DD
779 && GET_CODE (x) == REG
780 && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
13a23442 781 return (TARGET_A16 || mode == HImode) ? HI_REGS : A_REGS;
38b2d076
DD
782 return NO_REGS;
783}
784
184866c5 785/* Implements TARGET_CLASS_LIKELY_SPILLED_P. A_REGS is needed for address
38b2d076 786 reloads. */
184866c5
AS
787
788#undef TARGET_CLASS_LIKELY_SPILLED_P
789#define TARGET_CLASS_LIKELY_SPILLED_P m32c_class_likely_spilled_p
790
791static bool
792m32c_class_likely_spilled_p (reg_class_t regclass)
38b2d076
DD
793{
794 if (regclass == A_REGS)
184866c5
AS
795 return true;
796
797 return (reg_class_size[(int) regclass] == 1);
38b2d076
DD
798}
799
c4831cff 800/* Implements TARGET_CLASS_MAX_NREGS. We calculate this according to its
38b2d076
DD
801 documented meaning, to avoid potential inconsistencies with actual
802 class definitions. */
c4831cff
AS
803
804#undef TARGET_CLASS_MAX_NREGS
805#define TARGET_CLASS_MAX_NREGS m32c_class_max_nregs
806
807static unsigned char
ef4bddc2 808m32c_class_max_nregs (reg_class_t regclass, machine_mode mode)
38b2d076 809{
c4831cff
AS
810 int rn;
811 unsigned char max = 0;
38b2d076
DD
812
813 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
c4831cff 814 if (TEST_HARD_REG_BIT (reg_class_contents[(int) regclass], rn))
38b2d076 815 {
c4831cff 816 unsigned char n = m32c_hard_regno_nregs (rn, mode);
38b2d076
DD
817 if (max < n)
818 max = n;
819 }
820 return max;
821}
822
823/* Implements CANNOT_CHANGE_MODE_CLASS. Only r0 and r1 can change to
824 QI (r0l, r1l) because the chip doesn't support QI ops on other
825 registers (well, it does on a0/a1 but if we let gcc do that, reload
826 suffers). Otherwise, we allow changes to larger modes. */
827int
ef4bddc2
RS
828m32c_cannot_change_mode_class (machine_mode from,
829 machine_mode to, int rclass)
38b2d076 830{
db9c8397 831 int rn;
38b2d076
DD
832#if DEBUG0
833 fprintf (stderr, "cannot change from %s to %s in %s\n",
834 mode_name[from], mode_name[to], class_names[rclass]);
835#endif
836
db9c8397
DD
837 /* If the larger mode isn't allowed in any of these registers, we
838 can't allow the change. */
839 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
840 if (class_contents[rclass][0] & (1 << rn))
841 if (! m32c_hard_regno_ok (rn, to))
842 return 1;
843
38b2d076
DD
844 if (to == QImode)
845 return (class_contents[rclass][0] & 0x1ffa);
846
847 if (class_contents[rclass][0] & 0x0005 /* r0, r1 */
848 && GET_MODE_SIZE (from) > 1)
849 return 0;
850 if (GET_MODE_SIZE (from) > 2) /* all other regs */
851 return 0;
852
853 return 1;
854}
855
856/* Helpers for the rest of the file. */
857/* TRUE if the rtx is a REG rtx for the given register. */
858#define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
859 && REGNO (rtx) == regno)
860/* TRUE if the rtx is a pseudo - specifically, one we can use as a
861 base register in address calculations (hence the "strict"
862 argument). */
863#define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
864 && (REGNO (rtx) == AP_REGNO \
865 || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
866
5fd5d713
DD
867#define A0_OR_PSEUDO(x) (IS_REG(x, A0_REGNO) || REGNO (x) >= FIRST_PSEUDO_REGISTER)
868
777e635f 869/* Implements matching for constraints (see next function too). 'S' is
38b2d076
DD
870 for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
871 call return values. */
03dd17b1
NF
872bool
873m32c_matches_constraint_p (rtx value, int constraint)
38b2d076
DD
874{
875 encode_pattern (value);
5fd5d713 876
03dd17b1
NF
877 switch (constraint) {
878 case CONSTRAINT_SF:
879 return (far_addr_space_p (value)
880 && ((RTX_IS ("mr")
881 && A0_OR_PSEUDO (patternr[1])
882 && GET_MODE (patternr[1]) == SImode)
883 || (RTX_IS ("m+^Sri")
884 && A0_OR_PSEUDO (patternr[4])
885 && GET_MODE (patternr[4]) == HImode)
886 || (RTX_IS ("m+^Srs")
887 && A0_OR_PSEUDO (patternr[4])
888 && GET_MODE (patternr[4]) == HImode)
889 || (RTX_IS ("m+^S+ris")
890 && A0_OR_PSEUDO (patternr[5])
891 && GET_MODE (patternr[5]) == HImode)
892 || RTX_IS ("ms")));
893 case CONSTRAINT_Sd:
38b2d076
DD
894 {
895 /* This is the common "src/dest" address */
896 rtx r;
897 if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
03dd17b1 898 return true;
38b2d076 899 if (RTX_IS ("ms") || RTX_IS ("m+si"))
03dd17b1 900 return true;
07127a0a
DD
901 if (RTX_IS ("m++rii"))
902 {
903 if (REGNO (patternr[3]) == FB_REGNO
904 && INTVAL (patternr[4]) == 0)
03dd17b1 905 return true;
07127a0a 906 }
38b2d076
DD
907 if (RTX_IS ("mr"))
908 r = patternr[1];
909 else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
910 r = patternr[2];
911 else
03dd17b1 912 return false;
38b2d076 913 if (REGNO (r) == SP_REGNO)
03dd17b1 914 return false;
38b2d076
DD
915 return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
916 }
03dd17b1 917 case CONSTRAINT_Sa:
38b2d076
DD
918 {
919 rtx r;
920 if (RTX_IS ("mr"))
921 r = patternr[1];
922 else if (RTX_IS ("m+ri"))
923 r = patternr[2];
924 else
03dd17b1 925 return false;
38b2d076
DD
926 return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
927 }
03dd17b1
NF
928 case CONSTRAINT_Si:
929 return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
930 case CONSTRAINT_Ss:
931 return ((RTX_IS ("mr")
932 && (IS_REG (patternr[1], SP_REGNO)))
933 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
934 case CONSTRAINT_Sf:
935 return ((RTX_IS ("mr")
936 && (IS_REG (patternr[1], FB_REGNO)))
937 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
938 case CONSTRAINT_Sb:
939 return ((RTX_IS ("mr")
940 && (IS_REG (patternr[1], SB_REGNO)))
941 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
942 case CONSTRAINT_Sp:
943 /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
944 return (RTX_IS ("mi")
945 && !(INTVAL (patternr[1]) & ~0x1fff));
946 case CONSTRAINT_S1:
947 return r1h_operand (value, QImode);
948 case CONSTRAINT_Rpa:
38b2d076 949 return GET_CODE (value) == PARALLEL;
03dd17b1
NF
950 default:
951 return false;
952 }
38b2d076
DD
953}
954
955/* STACK AND CALLING */
956
957/* Frame Layout */
958
959/* Implements RETURN_ADDR_RTX. Note that R8C and M16C push 24 bits
960 (yes, THREE bytes) onto the stack for the return address, but we
961 don't support pointers bigger than 16 bits on those chips. This
962 will likely wreak havoc with exception unwinding. FIXME. */
963rtx
964m32c_return_addr_rtx (int count)
965{
ef4bddc2 966 machine_mode mode;
38b2d076
DD
967 int offset;
968 rtx ra_mem;
969
970 if (count)
971 return NULL_RTX;
972 /* we want 2[$fb] */
973
974 if (TARGET_A24)
975 {
80b093df
DD
976 /* It's four bytes */
977 mode = PSImode;
38b2d076
DD
978 offset = 4;
979 }
980 else
981 {
982 /* FIXME: it's really 3 bytes */
983 mode = HImode;
984 offset = 2;
985 }
986
987 ra_mem =
0a81f074
RS
988 gen_rtx_MEM (mode, plus_constant (Pmode, gen_rtx_REG (Pmode, FP_REGNO),
989 offset));
38b2d076
DD
990 return copy_to_mode_reg (mode, ra_mem);
991}
992
993/* Implements INCOMING_RETURN_ADDR_RTX. See comment above. */
994rtx
995m32c_incoming_return_addr_rtx (void)
996{
997 /* we want [sp] */
998 return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
999}
1000
1001/* Exception Handling Support */
1002
1003/* Implements EH_RETURN_DATA_REGNO. Choose registers able to hold
1004 pointers. */
1005int
1006m32c_eh_return_data_regno (int n)
1007{
1008 switch (n)
1009 {
1010 case 0:
1011 return A0_REGNO;
1012 case 1:
c6004917
RIL
1013 if (TARGET_A16)
1014 return R3_REGNO;
1015 else
1016 return R1_REGNO;
38b2d076
DD
1017 default:
1018 return INVALID_REGNUM;
1019 }
1020}
1021
1022/* Implements EH_RETURN_STACKADJ_RTX. Saved and used later in
1023 m32c_emit_eh_epilogue. */
1024rtx
1025m32c_eh_return_stackadj_rtx (void)
1026{
1027 if (!cfun->machine->eh_stack_adjust)
1028 {
1029 rtx sa;
1030
99920b6f 1031 sa = gen_rtx_REG (Pmode, R0_REGNO);
38b2d076
DD
1032 cfun->machine->eh_stack_adjust = sa;
1033 }
1034 return cfun->machine->eh_stack_adjust;
1035}
1036
1037/* Registers That Address the Stack Frame */
1038
1039/* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER. Note that
1040 the original spec called for dwarf numbers to vary with register
1041 width as well, for example, r0l, r0, and r2r0 would each have
1042 different dwarf numbers. GCC doesn't support this, and we don't do
1043 it, and gdb seems to like it this way anyway. */
1044unsigned int
1045m32c_dwarf_frame_regnum (int n)
1046{
1047 switch (n)
1048 {
1049 case R0_REGNO:
1050 return 5;
1051 case R1_REGNO:
1052 return 6;
1053 case R2_REGNO:
1054 return 7;
1055 case R3_REGNO:
1056 return 8;
1057 case A0_REGNO:
1058 return 9;
1059 case A1_REGNO:
1060 return 10;
1061 case FB_REGNO:
1062 return 11;
1063 case SB_REGNO:
1064 return 19;
1065
1066 case SP_REGNO:
1067 return 12;
1068 case PC_REGNO:
1069 return 13;
1070 default:
1071 return DWARF_FRAME_REGISTERS + 1;
1072 }
1073}
1074
1075/* The frame looks like this:
1076
1077 ap -> +------------------------------
1078 | Return address (3 or 4 bytes)
1079 | Saved FB (2 or 4 bytes)
1080 fb -> +------------------------------
1081 | local vars
1082 | register saves fb
1083 | through r0 as needed
1084 sp -> +------------------------------
1085*/
1086
1087/* We use this to wrap all emitted insns in the prologue. */
1088static rtx
1089F (rtx x)
1090{
1091 RTX_FRAME_RELATED_P (x) = 1;
1092 return x;
1093}
1094
1095/* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1096 how much the stack pointer moves for each, for each cpu family. */
1097static struct
1098{
1099 int reg1;
1100 int bit;
1101 int a16_bytes;
1102 int a24_bytes;
1103} pushm_info[] =
1104{
9d746d5e
DD
1105 /* These are in reverse push (nearest-to-sp) order. */
1106 { R0_REGNO, 0x80, 2, 2 },
38b2d076 1107 { R1_REGNO, 0x40, 2, 2 },
9d746d5e
DD
1108 { R2_REGNO, 0x20, 2, 2 },
1109 { R3_REGNO, 0x10, 2, 2 },
1110 { A0_REGNO, 0x08, 2, 4 },
1111 { A1_REGNO, 0x04, 2, 4 },
1112 { SB_REGNO, 0x02, 2, 4 },
1113 { FB_REGNO, 0x01, 2, 4 }
38b2d076
DD
1114};
1115
1116#define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1117
1118/* Returns TRUE if we need to save/restore the given register. We
1119 save everything for exception handlers, so that any register can be
1120 unwound. For interrupt handlers, we save everything if the handler
1121 calls something else (because we don't know what *that* function
1122 might do), but try to be a bit smarter if the handler is a leaf
1123 function. We always save $a0, though, because we use that in the
85f65093 1124 epilogue to copy $fb to $sp. */
38b2d076
DD
1125static int
1126need_to_save (int regno)
1127{
1128 if (fixed_regs[regno])
1129 return 0;
ad516a74 1130 if (crtl->calls_eh_return)
38b2d076
DD
1131 return 1;
1132 if (regno == FP_REGNO)
1133 return 0;
1134 if (cfun->machine->is_interrupt
65655f79
DD
1135 && (!cfun->machine->is_leaf
1136 || (regno == A0_REGNO
1137 && m32c_function_needs_enter ())
1138 ))
38b2d076 1139 return 1;
6fb5fa3c 1140 if (df_regs_ever_live_p (regno)
38b2d076
DD
1141 && (!call_used_regs[regno] || cfun->machine->is_interrupt))
1142 return 1;
1143 return 0;
1144}
1145
1146/* This function contains all the intelligence about saving and
1147 restoring registers. It always figures out the register save set.
1148 When called with PP_justcount, it merely returns the size of the
1149 save set (for eliminating the frame pointer, for example). When
1150 called with PP_pushm or PP_popm, it emits the appropriate
1151 instructions for saving (pushm) or restoring (popm) the
1152 registers. */
1153static int
1154m32c_pushm_popm (Push_Pop_Type ppt)
1155{
1156 int reg_mask = 0;
1157 int byte_count = 0, bytes;
1158 int i;
1159 rtx dwarf_set[PUSHM_N];
1160 int n_dwarfs = 0;
1161 int nosave_mask = 0;
1162
305da3ec
JH
1163 if (crtl->return_rtx
1164 && GET_CODE (crtl->return_rtx) == PARALLEL
ad516a74 1165 && !(crtl->calls_eh_return || cfun->machine->is_interrupt))
38b2d076 1166 {
305da3ec 1167 rtx exp = XVECEXP (crtl->return_rtx, 0, 0);
38b2d076
DD
1168 rtx rv = XEXP (exp, 0);
1169 int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1170
1171 if (rv_bytes > 2)
1172 nosave_mask |= 0x20; /* PSI, SI */
1173 else
1174 nosave_mask |= 0xf0; /* DF */
1175 if (rv_bytes > 4)
1176 nosave_mask |= 0x50; /* DI */
1177 }
1178
1179 for (i = 0; i < (int) PUSHM_N; i++)
1180 {
1181 /* Skip if neither register needs saving. */
1182 if (!need_to_save (pushm_info[i].reg1))
1183 continue;
1184
1185 if (pushm_info[i].bit & nosave_mask)
1186 continue;
1187
1188 reg_mask |= pushm_info[i].bit;
1189 bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1190
1191 if (ppt == PP_pushm)
1192 {
ef4bddc2 1193 machine_mode mode = (bytes == 2) ? HImode : SImode;
38b2d076
DD
1194 rtx addr;
1195
1196 /* Always use stack_pointer_rtx instead of calling
1197 rtx_gen_REG ourselves. Code elsewhere in GCC assumes
1198 that there is a single rtx representing the stack pointer,
1199 namely stack_pointer_rtx, and uses == to recognize it. */
1200 addr = stack_pointer_rtx;
1201
1202 if (byte_count != 0)
1203 addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1204
1205 dwarf_set[n_dwarfs++] =
1206 gen_rtx_SET (VOIDmode,
1207 gen_rtx_MEM (mode, addr),
1208 gen_rtx_REG (mode, pushm_info[i].reg1));
1209 F (dwarf_set[n_dwarfs - 1]);
1210
1211 }
1212 byte_count += bytes;
1213 }
1214
1215 if (cfun->machine->is_interrupt)
1216 {
1217 cfun->machine->intr_pushm = reg_mask & 0xfe;
1218 reg_mask = 0;
1219 byte_count = 0;
1220 }
1221
1222 if (cfun->machine->is_interrupt)
1223 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1224 if (need_to_save (i))
1225 {
1226 byte_count += 2;
1227 cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1228 }
1229
1230 if (ppt == PP_pushm && byte_count)
1231 {
1232 rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1233 rtx pushm;
1234
1235 if (reg_mask)
1236 {
1237 XVECEXP (note, 0, 0)
1238 = gen_rtx_SET (VOIDmode,
1239 stack_pointer_rtx,
1240 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1241 stack_pointer_rtx,
1242 GEN_INT (-byte_count)));
1243 F (XVECEXP (note, 0, 0));
1244
1245 for (i = 0; i < n_dwarfs; i++)
1246 XVECEXP (note, 0, i + 1) = dwarf_set[i];
1247
1248 pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1249
444d6efe 1250 add_reg_note (pushm, REG_FRAME_RELATED_EXPR, note);
38b2d076
DD
1251 }
1252
1253 if (cfun->machine->is_interrupt)
1254 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1255 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1256 {
1257 if (TARGET_A16)
1258 pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1259 else
1260 pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1261 F (pushm);
1262 }
1263 }
1264 if (ppt == PP_popm && byte_count)
1265 {
38b2d076
DD
1266 if (cfun->machine->is_interrupt)
1267 for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1268 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1269 {
1270 if (TARGET_A16)
b3fdec9e 1271 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
38b2d076 1272 else
b3fdec9e 1273 emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
38b2d076
DD
1274 }
1275 if (reg_mask)
1276 emit_insn (gen_popm (GEN_INT (reg_mask)));
1277 }
1278
1279 return byte_count;
1280}
1281
1282/* Implements INITIAL_ELIMINATION_OFFSET. See the comment above that
1283 diagrams our call frame. */
1284int
1285m32c_initial_elimination_offset (int from, int to)
1286{
1287 int ofs = 0;
1288
1289 if (from == AP_REGNO)
1290 {
1291 if (TARGET_A16)
1292 ofs += 5;
1293 else
1294 ofs += 8;
1295 }
1296
1297 if (to == SP_REGNO)
1298 {
1299 ofs += m32c_pushm_popm (PP_justcount);
1300 ofs += get_frame_size ();
1301 }
1302
1303 /* Account for push rounding. */
1304 if (TARGET_A24)
1305 ofs = (ofs + 1) & ~1;
1306#if DEBUG0
1307 fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1308 to, ofs);
1309#endif
1310 return ofs;
1311}
1312
1313/* Passing Function Arguments on the Stack */
1314
38b2d076
DD
1315/* Implements PUSH_ROUNDING. The R8C and M16C have byte stacks, the
1316 M32C has word stacks. */
444d6efe 1317unsigned int
38b2d076
DD
1318m32c_push_rounding (int n)
1319{
1320 if (TARGET_R8C || TARGET_M16C)
1321 return n;
1322 return (n + 1) & ~1;
1323}
1324
1325/* Passing Arguments in Registers */
1326
cd34bbe8
NF
1327/* Implements TARGET_FUNCTION_ARG. Arguments are passed partly in
1328 registers, partly on stack. If our function returns a struct, a
1329 pointer to a buffer for it is at the top of the stack (last thing
1330 pushed). The first few real arguments may be in registers as
1331 follows:
38b2d076
DD
1332
1333 R8C/M16C: arg1 in r1 if it's QI or HI (else it's pushed on stack)
1334 arg2 in r2 if it's HI (else pushed on stack)
1335 rest on stack
1336 M32C: arg1 in r0 if it's QI or HI (else it's pushed on stack)
1337 rest on stack
1338
1339 Structs are not passed in registers, even if they fit. Only
1340 integer and pointer types are passed in registers.
1341
1342 Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1343 r2 if it fits. */
cd34bbe8
NF
1344#undef TARGET_FUNCTION_ARG
1345#define TARGET_FUNCTION_ARG m32c_function_arg
1346static rtx
d5cc9181 1347m32c_function_arg (cumulative_args_t ca_v,
ef4bddc2 1348 machine_mode mode, const_tree type, bool named)
38b2d076 1349{
d5cc9181
JR
1350 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1351
38b2d076
DD
1352 /* Can return a reg, parallel, or 0 for stack */
1353 rtx rv = NULL_RTX;
1354#if DEBUG0
1355 fprintf (stderr, "func_arg %d (%s, %d)\n",
1356 ca->parm_num, mode_name[mode], named);
1357 debug_tree (type);
1358#endif
1359
1360 if (mode == VOIDmode)
1361 return GEN_INT (0);
1362
1363 if (ca->force_mem || !named)
1364 {
1365#if DEBUG0
1366 fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
1367 named);
1368#endif
1369 return NULL_RTX;
1370 }
1371
1372 if (type && INTEGRAL_TYPE_P (type) && POINTER_TYPE_P (type))
1373 return NULL_RTX;
1374
9d746d5e
DD
1375 if (type && AGGREGATE_TYPE_P (type))
1376 return NULL_RTX;
1377
38b2d076
DD
1378 switch (ca->parm_num)
1379 {
1380 case 1:
1381 if (GET_MODE_SIZE (mode) == 1 || GET_MODE_SIZE (mode) == 2)
1382 rv = gen_rtx_REG (mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
1383 break;
1384
1385 case 2:
1386 if (TARGET_A16 && GET_MODE_SIZE (mode) == 2)
1387 rv = gen_rtx_REG (mode, R2_REGNO);
1388 break;
1389 }
1390
1391#if DEBUG0
1392 debug_rtx (rv);
1393#endif
1394 return rv;
1395}
1396
1397#undef TARGET_PASS_BY_REFERENCE
1398#define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1399static bool
d5cc9181 1400m32c_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
ef4bddc2 1401 machine_mode mode ATTRIBUTE_UNUSED,
586de218 1402 const_tree type ATTRIBUTE_UNUSED,
38b2d076
DD
1403 bool named ATTRIBUTE_UNUSED)
1404{
1405 return 0;
1406}
1407
1408/* Implements INIT_CUMULATIVE_ARGS. */
1409void
1410m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
9d746d5e 1411 tree fntype,
38b2d076 1412 rtx libname ATTRIBUTE_UNUSED,
9d746d5e 1413 tree fndecl,
38b2d076
DD
1414 int n_named_args ATTRIBUTE_UNUSED)
1415{
9d746d5e
DD
1416 if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1417 ca->force_mem = 1;
1418 else
1419 ca->force_mem = 0;
38b2d076
DD
1420 ca->parm_num = 1;
1421}
1422
cd34bbe8
NF
1423/* Implements TARGET_FUNCTION_ARG_ADVANCE. force_mem is set for
1424 functions returning structures, so we always reset that. Otherwise,
1425 we only need to know the sequence number of the argument to know what
1426 to do with it. */
1427#undef TARGET_FUNCTION_ARG_ADVANCE
1428#define TARGET_FUNCTION_ARG_ADVANCE m32c_function_arg_advance
1429static void
d5cc9181 1430m32c_function_arg_advance (cumulative_args_t ca_v,
ef4bddc2 1431 machine_mode mode ATTRIBUTE_UNUSED,
cd34bbe8
NF
1432 const_tree type ATTRIBUTE_UNUSED,
1433 bool named ATTRIBUTE_UNUSED)
38b2d076 1434{
d5cc9181
JR
1435 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1436
38b2d076
DD
1437 if (ca->force_mem)
1438 ca->force_mem = 0;
9d746d5e
DD
1439 else
1440 ca->parm_num++;
38b2d076
DD
1441}
1442
c2ed6cf8
NF
1443/* Implements TARGET_FUNCTION_ARG_BOUNDARY. */
1444#undef TARGET_FUNCTION_ARG_BOUNDARY
1445#define TARGET_FUNCTION_ARG_BOUNDARY m32c_function_arg_boundary
1446static unsigned int
ef4bddc2 1447m32c_function_arg_boundary (machine_mode mode ATTRIBUTE_UNUSED,
c2ed6cf8
NF
1448 const_tree type ATTRIBUTE_UNUSED)
1449{
1450 return (TARGET_A16 ? 8 : 16);
1451}
1452
38b2d076
DD
1453/* Implements FUNCTION_ARG_REGNO_P. */
1454int
1455m32c_function_arg_regno_p (int r)
1456{
1457 if (TARGET_A24)
1458 return (r == R0_REGNO);
1459 return (r == R1_REGNO || r == R2_REGNO);
1460}
1461
e9555b13 1462/* HImode and PSImode are the two "native" modes as far as GCC is
85f65093 1463 concerned, but the chips also support a 32-bit mode which is used
e9555b13
DD
1464 for some opcodes in R8C/M16C and for reset vectors and such. */
1465#undef TARGET_VALID_POINTER_MODE
1466#define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
23fed240 1467static bool
ef4bddc2 1468m32c_valid_pointer_mode (machine_mode mode)
e9555b13 1469{
e9555b13
DD
1470 if (mode == HImode
1471 || mode == PSImode
1472 || mode == SImode
1473 )
1474 return 1;
1475 return 0;
1476}
1477
38b2d076
DD
1478/* How Scalar Function Values Are Returned */
1479
2a31793e 1480/* Implements TARGET_LIBCALL_VALUE. Most values are returned in $r0, or some
38b2d076
DD
1481 combination of registers starting there (r2r0 for longs, r3r1r2r0
1482 for long long, r3r2r1r0 for doubles), except that that ABI
1483 currently doesn't work because it ends up using all available
1484 general registers and gcc often can't compile it. So, instead, we
1485 return anything bigger than 16 bits in "mem0" (effectively, a
1486 memory location). */
2a31793e
AS
1487
1488#undef TARGET_LIBCALL_VALUE
1489#define TARGET_LIBCALL_VALUE m32c_libcall_value
1490
1491static rtx
ef4bddc2 1492m32c_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
38b2d076
DD
1493{
1494 /* return reg or parallel */
1495#if 0
1496 /* FIXME: GCC has difficulty returning large values in registers,
1497 because that ties up most of the general registers and gives the
1498 register allocator little to work with. Until we can resolve
1499 this, large values are returned in memory. */
1500 if (mode == DFmode)
1501 {
1502 rtx rv;
1503
1504 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1505 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1506 gen_rtx_REG (HImode,
1507 R0_REGNO),
1508 GEN_INT (0));
1509 XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1510 gen_rtx_REG (HImode,
1511 R1_REGNO),
1512 GEN_INT (2));
1513 XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1514 gen_rtx_REG (HImode,
1515 R2_REGNO),
1516 GEN_INT (4));
1517 XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1518 gen_rtx_REG (HImode,
1519 R3_REGNO),
1520 GEN_INT (6));
1521 return rv;
1522 }
1523
1524 if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1525 {
1526 rtx rv;
1527
1528 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1529 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1530 gen_rtx_REG (mode,
1531 R0_REGNO),
1532 GEN_INT (0));
1533 return rv;
1534 }
1535#endif
1536
1537 if (GET_MODE_SIZE (mode) > 2)
1538 return gen_rtx_REG (mode, MEM0_REGNO);
1539 return gen_rtx_REG (mode, R0_REGNO);
1540}
1541
2a31793e 1542/* Implements TARGET_FUNCTION_VALUE. Functions and libcalls have the same
38b2d076 1543 conventions. */
2a31793e
AS
1544
1545#undef TARGET_FUNCTION_VALUE
1546#define TARGET_FUNCTION_VALUE m32c_function_value
1547
1548static rtx
1549m32c_function_value (const_tree valtype,
1550 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1551 bool outgoing ATTRIBUTE_UNUSED)
38b2d076
DD
1552{
1553 /* return reg or parallel */
ef4bddc2 1554 const machine_mode mode = TYPE_MODE (valtype);
2a31793e
AS
1555 return m32c_libcall_value (mode, NULL_RTX);
1556}
1557
f28f2337
AS
1558/* Implements TARGET_FUNCTION_VALUE_REGNO_P. */
1559
1560#undef TARGET_FUNCTION_VALUE_REGNO_P
1561#define TARGET_FUNCTION_VALUE_REGNO_P m32c_function_value_regno_p
2a31793e 1562
f28f2337 1563static bool
2a31793e
AS
1564m32c_function_value_regno_p (const unsigned int regno)
1565{
1566 return (regno == R0_REGNO || regno == MEM0_REGNO);
38b2d076
DD
1567}
1568
1569/* How Large Values Are Returned */
1570
1571/* We return structures by pushing the address on the stack, even if
1572 we use registers for the first few "real" arguments. */
1573#undef TARGET_STRUCT_VALUE_RTX
1574#define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1575static rtx
1576m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1577 int incoming ATTRIBUTE_UNUSED)
1578{
1579 return 0;
1580}
1581
1582/* Function Entry and Exit */
1583
1584/* Implements EPILOGUE_USES. Interrupts restore all registers. */
1585int
1586m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1587{
1588 if (cfun->machine->is_interrupt)
1589 return 1;
1590 return 0;
1591}
1592
1593/* Implementing the Varargs Macros */
1594
1595#undef TARGET_STRICT_ARGUMENT_NAMING
1596#define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1597static bool
d5cc9181 1598m32c_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
38b2d076
DD
1599{
1600 return 1;
1601}
1602
1603/* Trampolines for Nested Functions */
1604
1605/*
1606 m16c:
1607 1 0000 75C43412 mov.w #0x1234,a0
1608 2 0004 FC000000 jmp.a label
1609
1610 m32c:
1611 1 0000 BC563412 mov.l:s #0x123456,a0
1612 2 0004 CC000000 jmp.a label
1613*/
1614
1615/* Implements TRAMPOLINE_SIZE. */
1616int
1617m32c_trampoline_size (void)
1618{
1619 /* Allocate extra space so we can avoid the messy shifts when we
1620 initialize the trampoline; we just write past the end of the
1621 opcode. */
1622 return TARGET_A16 ? 8 : 10;
1623}
1624
1625/* Implements TRAMPOLINE_ALIGNMENT. */
1626int
1627m32c_trampoline_alignment (void)
1628{
1629 return 2;
1630}
1631
229fbccb
RH
1632/* Implements TARGET_TRAMPOLINE_INIT. */
1633
1634#undef TARGET_TRAMPOLINE_INIT
1635#define TARGET_TRAMPOLINE_INIT m32c_trampoline_init
1636static void
1637m32c_trampoline_init (rtx m_tramp, tree fndecl, rtx chainval)
38b2d076 1638{
229fbccb
RH
1639 rtx function = XEXP (DECL_RTL (fndecl), 0);
1640
1641#define A0(m,i) adjust_address (m_tramp, m, i)
38b2d076
DD
1642 if (TARGET_A16)
1643 {
1644 /* Note: we subtract a "word" because the moves want signed
1645 constants, not unsigned constants. */
1646 emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1647 emit_move_insn (A0 (HImode, 2), chainval);
1648 emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
85f65093
KH
1649 /* We use 16-bit addresses here, but store the zero to turn it
1650 into a 24-bit offset. */
38b2d076
DD
1651 emit_move_insn (A0 (HImode, 5), function);
1652 emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1653 }
1654 else
1655 {
1656 /* Note that the PSI moves actually write 4 bytes. Make sure we
1657 write stuff out in the right order, and leave room for the
1658 extra byte at the end. */
1659 emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1660 emit_move_insn (A0 (PSImode, 1), chainval);
1661 emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1662 emit_move_insn (A0 (PSImode, 5), function);
1663 }
1664#undef A0
1665}
1666
1667/* Addressing Modes */
1668
c6c3dba9
PB
1669/* The r8c/m32c family supports a wide range of non-orthogonal
1670 addressing modes, including the ability to double-indirect on *some*
1671 of them. Not all insns support all modes, either, but we rely on
1672 predicates and constraints to deal with that. */
1673#undef TARGET_LEGITIMATE_ADDRESS_P
1674#define TARGET_LEGITIMATE_ADDRESS_P m32c_legitimate_address_p
1675bool
ef4bddc2 1676m32c_legitimate_address_p (machine_mode mode, rtx x, bool strict)
38b2d076
DD
1677{
1678 int mode_adjust;
1679 if (CONSTANT_P (x))
1680 return 1;
1681
5fd5d713
DD
1682 if (TARGET_A16 && GET_MODE (x) != HImode && GET_MODE (x) != SImode)
1683 return 0;
1684 if (TARGET_A24 && GET_MODE (x) != PSImode)
1685 return 0;
1686
38b2d076
DD
1687 /* Wide references to memory will be split after reload, so we must
1688 ensure that all parts of such splits remain legitimate
1689 addresses. */
1690 mode_adjust = GET_MODE_SIZE (mode) - 1;
1691
1692 /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1693 if (GET_CODE (x) == PRE_DEC
1694 || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1695 {
1696 return (GET_CODE (XEXP (x, 0)) == REG
1697 && REGNO (XEXP (x, 0)) == SP_REGNO);
1698 }
1699
1700#if 0
1701 /* This is the double indirection detection, but it currently
1702 doesn't work as cleanly as this code implies, so until we've had
1703 a chance to debug it, leave it disabled. */
1704 if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1705 {
1706#if DEBUG_DOUBLE
1707 fprintf (stderr, "double indirect\n");
1708#endif
1709 x = XEXP (x, 0);
1710 }
1711#endif
1712
1713 encode_pattern (x);
1714 if (RTX_IS ("r"))
1715 {
1716 /* Most indexable registers can be used without displacements,
1717 although some of them will be emitted with an explicit zero
1718 to please the assembler. */
1719 switch (REGNO (patternr[0]))
1720 {
38b2d076
DD
1721 case A1_REGNO:
1722 case SB_REGNO:
1723 case FB_REGNO:
1724 case SP_REGNO:
5fd5d713
DD
1725 if (TARGET_A16 && GET_MODE (x) == SImode)
1726 return 0;
1727 case A0_REGNO:
38b2d076
DD
1728 return 1;
1729
1730 default:
1731 if (IS_PSEUDO (patternr[0], strict))
1732 return 1;
1733 return 0;
1734 }
1735 }
5fd5d713
DD
1736
1737 if (TARGET_A16 && GET_MODE (x) == SImode)
1738 return 0;
1739
38b2d076
DD
1740 if (RTX_IS ("+ri"))
1741 {
1742 /* This is more interesting, because different base registers
1743 allow for different displacements - both range and signedness
1744 - and it differs from chip series to chip series too. */
1745 int rn = REGNO (patternr[1]);
1746 HOST_WIDE_INT offs = INTVAL (patternr[2]);
1747 switch (rn)
1748 {
1749 case A0_REGNO:
1750 case A1_REGNO:
1751 case SB_REGNO:
1752 /* The syntax only allows positive offsets, but when the
1753 offsets span the entire memory range, we can simulate
1754 negative offsets by wrapping. */
1755 if (TARGET_A16)
1756 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1757 if (rn == SB_REGNO)
1758 return (offs >= 0 && offs <= 65535 - mode_adjust);
1759 /* A0 or A1 */
1760 return (offs >= -16777216 && offs <= 16777215);
1761
1762 case FB_REGNO:
1763 if (TARGET_A16)
1764 return (offs >= -128 && offs <= 127 - mode_adjust);
1765 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1766
1767 case SP_REGNO:
1768 return (offs >= -128 && offs <= 127 - mode_adjust);
1769
1770 default:
1771 if (IS_PSEUDO (patternr[1], strict))
1772 return 1;
1773 return 0;
1774 }
1775 }
1776 if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1777 {
1778 rtx reg = patternr[1];
1779
1780 /* We don't know where the symbol is, so only allow base
1781 registers which support displacements spanning the whole
1782 address range. */
1783 switch (REGNO (reg))
1784 {
1785 case A0_REGNO:
1786 case A1_REGNO:
1787 /* $sb needs a secondary reload, but since it's involved in
1788 memory address reloads too, we don't deal with it very
1789 well. */
1790 /* case SB_REGNO: */
1791 return 1;
1792 default:
1793 if (IS_PSEUDO (reg, strict))
1794 return 1;
1795 return 0;
1796 }
1797 }
1798 return 0;
1799}
1800
1801/* Implements REG_OK_FOR_BASE_P. */
1802int
1803m32c_reg_ok_for_base_p (rtx x, int strict)
1804{
1805 if (GET_CODE (x) != REG)
1806 return 0;
1807 switch (REGNO (x))
1808 {
1809 case A0_REGNO:
1810 case A1_REGNO:
1811 case SB_REGNO:
1812 case FB_REGNO:
1813 case SP_REGNO:
1814 return 1;
1815 default:
1816 if (IS_PSEUDO (x, strict))
1817 return 1;
1818 return 0;
1819 }
1820}
1821
04aff2c0 1822/* We have three choices for choosing fb->aN offsets. If we choose -128,
85f65093 1823 we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
04aff2c0
DD
1824 like this:
1825 EB 4B FF mova -128[$fb],$a0
1826 D8 0C FF FF mov.w:Q #0,-1[$a0]
1827
85f65093 1828 Alternately, we subtract the frame size, and hopefully use 8-bit aN
04aff2c0
DD
1829 displacements:
1830 7B F4 stc $fb,$a0
1831 77 54 00 01 sub #256,$a0
1832 D8 08 01 mov.w:Q #0,1[$a0]
1833
1834 If we don't offset (i.e. offset by zero), we end up with:
1835 7B F4 stc $fb,$a0
1836 D8 0C 00 FF mov.w:Q #0,-256[$a0]
1837
1838 We have to subtract *something* so that we have a PLUS rtx to mark
1839 that we've done this reload. The -128 offset will never result in
85f65093 1840 an 8-bit aN offset, and the payoff for the second case is five
04aff2c0
DD
1841 loads *if* those loads are within 256 bytes of the other end of the
1842 frame, so the third case seems best. Note that we subtract the
1843 zero, but detect that in the addhi3 pattern. */
1844
ea471af0
JM
1845#define BIG_FB_ADJ 0
1846
38b2d076
DD
1847/* Implements LEGITIMIZE_ADDRESS. The only address we really have to
1848 worry about is frame base offsets, as $fb has a limited
1849 displacement range. We deal with this by attempting to reload $fb
1850 itself into an address register; that seems to result in the best
1851 code. */
506d7b68
PB
1852#undef TARGET_LEGITIMIZE_ADDRESS
1853#define TARGET_LEGITIMIZE_ADDRESS m32c_legitimize_address
1854static rtx
1855m32c_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
ef4bddc2 1856 machine_mode mode)
38b2d076
DD
1857{
1858#if DEBUG0
1859 fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
506d7b68 1860 debug_rtx (x);
38b2d076
DD
1861 fprintf (stderr, "\n");
1862#endif
1863
506d7b68
PB
1864 if (GET_CODE (x) == PLUS
1865 && GET_CODE (XEXP (x, 0)) == REG
1866 && REGNO (XEXP (x, 0)) == FB_REGNO
1867 && GET_CODE (XEXP (x, 1)) == CONST_INT
1868 && (INTVAL (XEXP (x, 1)) < -128
1869 || INTVAL (XEXP (x, 1)) > (128 - GET_MODE_SIZE (mode))))
38b2d076
DD
1870 {
1871 /* reload FB to A_REGS */
38b2d076 1872 rtx temp = gen_reg_rtx (Pmode);
506d7b68
PB
1873 x = copy_rtx (x);
1874 emit_insn (gen_rtx_SET (VOIDmode, temp, XEXP (x, 0)));
1875 XEXP (x, 0) = temp;
38b2d076
DD
1876 }
1877
506d7b68 1878 return x;
38b2d076
DD
1879}
1880
1881/* Implements LEGITIMIZE_RELOAD_ADDRESS. See comment above. */
1882int
1883m32c_legitimize_reload_address (rtx * x,
ef4bddc2 1884 machine_mode mode,
38b2d076
DD
1885 int opnum,
1886 int type, int ind_levels ATTRIBUTE_UNUSED)
1887{
1888#if DEBUG0
1889 fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
1890 mode_name[mode]);
1891 debug_rtx (*x);
1892#endif
1893
1894 /* At one point, this function tried to get $fb copied to an address
1895 register, which in theory would maximize sharing, but gcc was
1896 *also* still trying to reload the whole address, and we'd run out
1897 of address registers. So we let gcc do the naive (but safe)
1898 reload instead, when the above function doesn't handle it for
04aff2c0
DD
1899 us.
1900
1901 The code below is a second attempt at the above. */
1902
1903 if (GET_CODE (*x) == PLUS
1904 && GET_CODE (XEXP (*x, 0)) == REG
1905 && REGNO (XEXP (*x, 0)) == FB_REGNO
1906 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1907 && (INTVAL (XEXP (*x, 1)) < -128
1908 || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
1909 {
1910 rtx sum;
1911 int offset = INTVAL (XEXP (*x, 1));
1912 int adjustment = -BIG_FB_ADJ;
1913
1914 sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
1915 GEN_INT (adjustment));
1916 *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
1917 if (type == RELOAD_OTHER)
1918 type = RELOAD_FOR_OTHER_ADDRESS;
1919 push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
1920 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
444d6efe 1921 (enum reload_type) type);
04aff2c0
DD
1922 return 1;
1923 }
1924
1925 if (GET_CODE (*x) == PLUS
1926 && GET_CODE (XEXP (*x, 0)) == PLUS
1927 && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
1928 && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
1929 && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
1930 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1931 )
1932 {
1933 if (type == RELOAD_OTHER)
1934 type = RELOAD_FOR_OTHER_ADDRESS;
1935 push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
1936 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
444d6efe 1937 (enum reload_type) type);
04aff2c0
DD
1938 return 1;
1939 }
38b2d076
DD
1940
1941 return 0;
1942}
1943
5fd5d713
DD
1944/* Return the appropriate mode for a named address pointer. */
1945#undef TARGET_ADDR_SPACE_POINTER_MODE
1946#define TARGET_ADDR_SPACE_POINTER_MODE m32c_addr_space_pointer_mode
ef4bddc2 1947static machine_mode
5fd5d713
DD
1948m32c_addr_space_pointer_mode (addr_space_t addrspace)
1949{
1950 switch (addrspace)
1951 {
1952 case ADDR_SPACE_GENERIC:
1953 return TARGET_A24 ? PSImode : HImode;
1954 case ADDR_SPACE_FAR:
1955 return SImode;
1956 default:
1957 gcc_unreachable ();
1958 }
1959}
1960
1961/* Return the appropriate mode for a named address address. */
1962#undef TARGET_ADDR_SPACE_ADDRESS_MODE
1963#define TARGET_ADDR_SPACE_ADDRESS_MODE m32c_addr_space_address_mode
ef4bddc2 1964static machine_mode
5fd5d713
DD
1965m32c_addr_space_address_mode (addr_space_t addrspace)
1966{
1967 switch (addrspace)
1968 {
1969 case ADDR_SPACE_GENERIC:
1970 return TARGET_A24 ? PSImode : HImode;
1971 case ADDR_SPACE_FAR:
1972 return SImode;
1973 default:
1974 gcc_unreachable ();
1975 }
1976}
1977
1978/* Like m32c_legitimate_address_p, except with named addresses. */
1979#undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
1980#define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P \
1981 m32c_addr_space_legitimate_address_p
1982static bool
ef4bddc2 1983m32c_addr_space_legitimate_address_p (machine_mode mode, rtx x,
5fd5d713
DD
1984 bool strict, addr_space_t as)
1985{
1986 if (as == ADDR_SPACE_FAR)
1987 {
1988 if (TARGET_A24)
1989 return 0;
1990 encode_pattern (x);
1991 if (RTX_IS ("r"))
1992 {
1993 if (GET_MODE (x) != SImode)
1994 return 0;
1995 switch (REGNO (patternr[0]))
1996 {
1997 case A0_REGNO:
1998 return 1;
1999
2000 default:
2001 if (IS_PSEUDO (patternr[0], strict))
2002 return 1;
2003 return 0;
2004 }
2005 }
2006 if (RTX_IS ("+^Sri"))
2007 {
2008 int rn = REGNO (patternr[3]);
2009 HOST_WIDE_INT offs = INTVAL (patternr[4]);
2010 if (GET_MODE (patternr[3]) != HImode)
2011 return 0;
2012 switch (rn)
2013 {
2014 case A0_REGNO:
2015 return (offs >= 0 && offs <= 0xfffff);
2016
2017 default:
2018 if (IS_PSEUDO (patternr[3], strict))
2019 return 1;
2020 return 0;
2021 }
2022 }
2023 if (RTX_IS ("+^Srs"))
2024 {
2025 int rn = REGNO (patternr[3]);
2026 if (GET_MODE (patternr[3]) != HImode)
2027 return 0;
2028 switch (rn)
2029 {
2030 case A0_REGNO:
2031 return 1;
2032
2033 default:
2034 if (IS_PSEUDO (patternr[3], strict))
2035 return 1;
2036 return 0;
2037 }
2038 }
2039 if (RTX_IS ("+^S+ris"))
2040 {
2041 int rn = REGNO (patternr[4]);
2042 if (GET_MODE (patternr[4]) != HImode)
2043 return 0;
2044 switch (rn)
2045 {
2046 case A0_REGNO:
2047 return 1;
2048
2049 default:
2050 if (IS_PSEUDO (patternr[4], strict))
2051 return 1;
2052 return 0;
2053 }
2054 }
2055 if (RTX_IS ("s"))
2056 {
2057 return 1;
2058 }
2059 return 0;
2060 }
2061
2062 else if (as != ADDR_SPACE_GENERIC)
2063 gcc_unreachable ();
2064
2065 return m32c_legitimate_address_p (mode, x, strict);
2066}
2067
2068/* Like m32c_legitimate_address, except with named address support. */
2069#undef TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS
2070#define TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS m32c_addr_space_legitimize_address
2071static rtx
ef4bddc2 2072m32c_addr_space_legitimize_address (rtx x, rtx oldx, machine_mode mode,
5fd5d713
DD
2073 addr_space_t as)
2074{
2075 if (as != ADDR_SPACE_GENERIC)
2076 {
2077#if DEBUG0
2078 fprintf (stderr, "\033[36mm32c_addr_space_legitimize_address for mode %s\033[0m\n", mode_name[mode]);
2079 debug_rtx (x);
2080 fprintf (stderr, "\n");
2081#endif
2082
2083 if (GET_CODE (x) != REG)
2084 {
2085 x = force_reg (SImode, x);
2086 }
2087 return x;
2088 }
2089
2090 return m32c_legitimize_address (x, oldx, mode);
2091}
2092
2093/* Determine if one named address space is a subset of another. */
2094#undef TARGET_ADDR_SPACE_SUBSET_P
2095#define TARGET_ADDR_SPACE_SUBSET_P m32c_addr_space_subset_p
2096static bool
2097m32c_addr_space_subset_p (addr_space_t subset, addr_space_t superset)
2098{
2099 gcc_assert (subset == ADDR_SPACE_GENERIC || subset == ADDR_SPACE_FAR);
2100 gcc_assert (superset == ADDR_SPACE_GENERIC || superset == ADDR_SPACE_FAR);
2101
2102 if (subset == superset)
2103 return true;
2104
2105 else
2106 return (subset == ADDR_SPACE_GENERIC && superset == ADDR_SPACE_FAR);
2107}
2108
2109#undef TARGET_ADDR_SPACE_CONVERT
2110#define TARGET_ADDR_SPACE_CONVERT m32c_addr_space_convert
2111/* Convert from one address space to another. */
2112static rtx
2113m32c_addr_space_convert (rtx op, tree from_type, tree to_type)
2114{
2115 addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (from_type));
2116 addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (to_type));
2117 rtx result;
2118
2119 gcc_assert (from_as == ADDR_SPACE_GENERIC || from_as == ADDR_SPACE_FAR);
2120 gcc_assert (to_as == ADDR_SPACE_GENERIC || to_as == ADDR_SPACE_FAR);
2121
2122 if (to_as == ADDR_SPACE_GENERIC && from_as == ADDR_SPACE_FAR)
2123 {
2124 /* This is unpredictable, as we're truncating off usable address
2125 bits. */
2126
2127 result = gen_reg_rtx (HImode);
2128 emit_move_insn (result, simplify_subreg (HImode, op, SImode, 0));
2129 return result;
2130 }
2131 else if (to_as == ADDR_SPACE_FAR && from_as == ADDR_SPACE_GENERIC)
2132 {
2133 /* This always works. */
2134 result = gen_reg_rtx (SImode);
2135 emit_insn (gen_zero_extendhisi2 (result, op));
2136 return result;
2137 }
2138 else
2139 gcc_unreachable ();
2140}
2141
38b2d076
DD
2142/* Condition Code Status */
2143
2144#undef TARGET_FIXED_CONDITION_CODE_REGS
2145#define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2146static bool
2147m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2148{
2149 *p1 = FLG_REGNO;
2150 *p2 = INVALID_REGNUM;
2151 return true;
2152}
2153
2154/* Describing Relative Costs of Operations */
2155
0e607518 2156/* Implements TARGET_REGISTER_MOVE_COST. We make impossible moves
38b2d076
DD
2157 prohibitively expensive, like trying to put QIs in r2/r3 (there are
2158 no opcodes to do that). We also discourage use of mem* registers
2159 since they're really memory. */
0e607518
AS
2160
2161#undef TARGET_REGISTER_MOVE_COST
2162#define TARGET_REGISTER_MOVE_COST m32c_register_move_cost
2163
2164static int
ef4bddc2 2165m32c_register_move_cost (machine_mode mode, reg_class_t from,
0e607518 2166 reg_class_t to)
38b2d076
DD
2167{
2168 int cost = COSTS_N_INSNS (3);
0e607518
AS
2169 HARD_REG_SET cc;
2170
2171/* FIXME: pick real values, but not 2 for now. */
2172 COPY_HARD_REG_SET (cc, reg_class_contents[(int) from]);
2173 IOR_HARD_REG_SET (cc, reg_class_contents[(int) to]);
2174
2175 if (mode == QImode
2176 && hard_reg_set_intersect_p (cc, reg_class_contents[R23_REGS]))
38b2d076 2177 {
0e607518 2178 if (hard_reg_set_subset_p (cc, reg_class_contents[R23_REGS]))
38b2d076
DD
2179 cost = COSTS_N_INSNS (1000);
2180 else
2181 cost = COSTS_N_INSNS (80);
2182 }
2183
2184 if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2185 cost = COSTS_N_INSNS (1000);
2186
0e607518 2187 if (reg_classes_intersect_p (from, CR_REGS))
38b2d076
DD
2188 cost += COSTS_N_INSNS (5);
2189
0e607518 2190 if (reg_classes_intersect_p (to, CR_REGS))
38b2d076
DD
2191 cost += COSTS_N_INSNS (5);
2192
2193 if (from == MEM_REGS || to == MEM_REGS)
2194 cost += COSTS_N_INSNS (50);
0e607518
AS
2195 else if (reg_classes_intersect_p (from, MEM_REGS)
2196 || reg_classes_intersect_p (to, MEM_REGS))
38b2d076
DD
2197 cost += COSTS_N_INSNS (10);
2198
2199#if DEBUG0
2200 fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
0e607518
AS
2201 mode_name[mode], class_names[(int) from], class_names[(int) to],
2202 cost);
38b2d076
DD
2203#endif
2204 return cost;
2205}
2206
0e607518
AS
2207/* Implements TARGET_MEMORY_MOVE_COST. */
2208
2209#undef TARGET_MEMORY_MOVE_COST
2210#define TARGET_MEMORY_MOVE_COST m32c_memory_move_cost
2211
2212static int
ef4bddc2 2213m32c_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
0e607518
AS
2214 reg_class_t rclass ATTRIBUTE_UNUSED,
2215 bool in ATTRIBUTE_UNUSED)
38b2d076
DD
2216{
2217 /* FIXME: pick real values. */
2218 return COSTS_N_INSNS (10);
2219}
2220
07127a0a
DD
2221/* Here we try to describe when we use multiple opcodes for one RTX so
2222 that gcc knows when to use them. */
2223#undef TARGET_RTX_COSTS
2224#define TARGET_RTX_COSTS m32c_rtx_costs
2225static bool
68f932c4
RS
2226m32c_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2227 int *total, bool speed ATTRIBUTE_UNUSED)
07127a0a
DD
2228{
2229 switch (code)
2230 {
2231 case REG:
2232 if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2233 *total += COSTS_N_INSNS (500);
2234 else
2235 *total += COSTS_N_INSNS (1);
2236 return true;
2237
2238 case ASHIFT:
2239 case LSHIFTRT:
2240 case ASHIFTRT:
2241 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2242 {
2243 /* mov.b r1l, r1h */
2244 *total += COSTS_N_INSNS (1);
2245 return true;
2246 }
2247 if (INTVAL (XEXP (x, 1)) > 8
2248 || INTVAL (XEXP (x, 1)) < -8)
2249 {
2250 /* mov.b #N, r1l */
2251 /* mov.b r1l, r1h */
2252 *total += COSTS_N_INSNS (2);
2253 return true;
2254 }
2255 return true;
2256
2257 case LE:
2258 case LEU:
2259 case LT:
2260 case LTU:
2261 case GT:
2262 case GTU:
2263 case GE:
2264 case GEU:
2265 case NE:
2266 case EQ:
2267 if (outer_code == SET)
2268 {
2269 *total += COSTS_N_INSNS (2);
2270 return true;
2271 }
2272 break;
2273
2274 case ZERO_EXTRACT:
2275 {
2276 rtx dest = XEXP (x, 0);
2277 rtx addr = XEXP (dest, 0);
2278 switch (GET_CODE (addr))
2279 {
2280 case CONST_INT:
2281 *total += COSTS_N_INSNS (1);
2282 break;
2283 case SYMBOL_REF:
2284 *total += COSTS_N_INSNS (3);
2285 break;
2286 default:
2287 *total += COSTS_N_INSNS (2);
2288 break;
2289 }
2290 return true;
2291 }
2292 break;
2293
2294 default:
2295 /* Reasonable default. */
2296 if (TARGET_A16 && GET_MODE(x) == SImode)
2297 *total += COSTS_N_INSNS (2);
2298 break;
2299 }
2300 return false;
2301}
2302
2303#undef TARGET_ADDRESS_COST
2304#define TARGET_ADDRESS_COST m32c_address_cost
2305static int
ef4bddc2 2306m32c_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
b413068c
OE
2307 addr_space_t as ATTRIBUTE_UNUSED,
2308 bool speed ATTRIBUTE_UNUSED)
07127a0a 2309{
80b093df 2310 int i;
07127a0a
DD
2311 /* fprintf(stderr, "\naddress_cost\n");
2312 debug_rtx(addr);*/
2313 switch (GET_CODE (addr))
2314 {
2315 case CONST_INT:
80b093df
DD
2316 i = INTVAL (addr);
2317 if (i == 0)
2318 return COSTS_N_INSNS(1);
2319 if (0 < i && i <= 255)
2320 return COSTS_N_INSNS(2);
2321 if (0 < i && i <= 65535)
2322 return COSTS_N_INSNS(3);
2323 return COSTS_N_INSNS(4);
07127a0a 2324 case SYMBOL_REF:
80b093df 2325 return COSTS_N_INSNS(4);
07127a0a 2326 case REG:
80b093df
DD
2327 return COSTS_N_INSNS(1);
2328 case PLUS:
2329 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
2330 {
2331 i = INTVAL (XEXP (addr, 1));
2332 if (i == 0)
2333 return COSTS_N_INSNS(1);
2334 if (0 < i && i <= 255)
2335 return COSTS_N_INSNS(2);
2336 if (0 < i && i <= 65535)
2337 return COSTS_N_INSNS(3);
2338 }
2339 return COSTS_N_INSNS(4);
07127a0a
DD
2340 default:
2341 return 0;
2342 }
2343}
2344
38b2d076
DD
2345/* Defining the Output Assembler Language */
2346
38b2d076
DD
2347/* Output of Data */
2348
2349/* We may have 24 bit sizes, which is the native address size.
2350 Currently unused, but provided for completeness. */
2351#undef TARGET_ASM_INTEGER
2352#define TARGET_ASM_INTEGER m32c_asm_integer
2353static bool
2354m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2355{
2356 switch (size)
2357 {
2358 case 3:
2359 fprintf (asm_out_file, "\t.3byte\t");
2360 output_addr_const (asm_out_file, x);
2361 fputc ('\n', asm_out_file);
2362 return true;
e9555b13
DD
2363 case 4:
2364 if (GET_CODE (x) == SYMBOL_REF)
2365 {
2366 fprintf (asm_out_file, "\t.long\t");
2367 output_addr_const (asm_out_file, x);
2368 fputc ('\n', asm_out_file);
2369 return true;
2370 }
2371 break;
38b2d076
DD
2372 }
2373 return default_assemble_integer (x, size, aligned_p);
2374}
2375
2376/* Output of Assembler Instructions */
2377
a4174ebf 2378/* We use a lookup table because the addressing modes are non-orthogonal. */
38b2d076
DD
2379
2380static struct
2381{
2382 char code;
2383 char const *pattern;
2384 char const *format;
2385}
2386const conversions[] = {
2387 { 0, "r", "0" },
2388
2389 { 0, "mr", "z[1]" },
2390 { 0, "m+ri", "3[2]" },
2391 { 0, "m+rs", "3[2]" },
5fd5d713
DD
2392 { 0, "m+^Zrs", "5[4]" },
2393 { 0, "m+^Zri", "5[4]" },
2394 { 0, "m+^Z+ris", "7+6[5]" },
2395 { 0, "m+^Srs", "5[4]" },
2396 { 0, "m+^Sri", "5[4]" },
2397 { 0, "m+^S+ris", "7+6[5]" },
38b2d076
DD
2398 { 0, "m+r+si", "4+5[2]" },
2399 { 0, "ms", "1" },
2400 { 0, "mi", "1" },
2401 { 0, "m+si", "2+3" },
2402
2403 { 0, "mmr", "[z[2]]" },
2404 { 0, "mm+ri", "[4[3]]" },
2405 { 0, "mm+rs", "[4[3]]" },
2406 { 0, "mm+r+si", "[5+6[3]]" },
2407 { 0, "mms", "[[2]]" },
2408 { 0, "mmi", "[[2]]" },
2409 { 0, "mm+si", "[4[3]]" },
2410
2411 { 0, "i", "#0" },
2412 { 0, "s", "#0" },
2413 { 0, "+si", "#1+2" },
2414 { 0, "l", "#0" },
2415
2416 { 'l', "l", "0" },
2417 { 'd', "i", "0" },
2418 { 'd', "s", "0" },
2419 { 'd', "+si", "1+2" },
2420 { 'D', "i", "0" },
2421 { 'D', "s", "0" },
2422 { 'D', "+si", "1+2" },
2423 { 'x', "i", "#0" },
2424 { 'X', "i", "#0" },
2425 { 'm', "i", "#0" },
2426 { 'b', "i", "#0" },
07127a0a 2427 { 'B', "i", "0" },
38b2d076
DD
2428 { 'p', "i", "0" },
2429
2430 { 0, 0, 0 }
2431};
2432
2433/* This is in order according to the bitfield that pushm/popm use. */
2434static char const *pushm_regs[] = {
2435 "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2436};
2437
4645179e
AS
2438/* Implements TARGET_PRINT_OPERAND. */
2439
2440#undef TARGET_PRINT_OPERAND
2441#define TARGET_PRINT_OPERAND m32c_print_operand
2442
2443static void
38b2d076
DD
2444m32c_print_operand (FILE * file, rtx x, int code)
2445{
2446 int i, j, b;
2447 const char *comma;
2448 HOST_WIDE_INT ival;
2449 int unsigned_const = 0;
ff485e71 2450 int force_sign;
38b2d076
DD
2451
2452 /* Multiplies; constants are converted to sign-extended format but
2453 we need unsigned, so 'u' and 'U' tell us what size unsigned we
2454 need. */
2455 if (code == 'u')
2456 {
2457 unsigned_const = 2;
2458 code = 0;
2459 }
2460 if (code == 'U')
2461 {
2462 unsigned_const = 1;
2463 code = 0;
2464 }
2465 /* This one is only for debugging; you can put it in a pattern to
2466 force this error. */
2467 if (code == '!')
2468 {
2469 fprintf (stderr, "dj: unreviewed pattern:");
2470 if (current_output_insn)
2471 debug_rtx (current_output_insn);
2472 gcc_unreachable ();
2473 }
2474 /* PSImode operations are either .w or .l depending on the target. */
2475 if (code == '&')
2476 {
2477 if (TARGET_A16)
2478 fprintf (file, "w");
2479 else
2480 fprintf (file, "l");
2481 return;
2482 }
2483 /* Inverted conditionals. */
2484 if (code == 'C')
2485 {
2486 switch (GET_CODE (x))
2487 {
2488 case LE:
2489 fputs ("gt", file);
2490 break;
2491 case LEU:
2492 fputs ("gtu", file);
2493 break;
2494 case LT:
2495 fputs ("ge", file);
2496 break;
2497 case LTU:
2498 fputs ("geu", file);
2499 break;
2500 case GT:
2501 fputs ("le", file);
2502 break;
2503 case GTU:
2504 fputs ("leu", file);
2505 break;
2506 case GE:
2507 fputs ("lt", file);
2508 break;
2509 case GEU:
2510 fputs ("ltu", file);
2511 break;
2512 case NE:
2513 fputs ("eq", file);
2514 break;
2515 case EQ:
2516 fputs ("ne", file);
2517 break;
2518 default:
2519 gcc_unreachable ();
2520 }
2521 return;
2522 }
2523 /* Regular conditionals. */
2524 if (code == 'c')
2525 {
2526 switch (GET_CODE (x))
2527 {
2528 case LE:
2529 fputs ("le", file);
2530 break;
2531 case LEU:
2532 fputs ("leu", file);
2533 break;
2534 case LT:
2535 fputs ("lt", file);
2536 break;
2537 case LTU:
2538 fputs ("ltu", file);
2539 break;
2540 case GT:
2541 fputs ("gt", file);
2542 break;
2543 case GTU:
2544 fputs ("gtu", file);
2545 break;
2546 case GE:
2547 fputs ("ge", file);
2548 break;
2549 case GEU:
2550 fputs ("geu", file);
2551 break;
2552 case NE:
2553 fputs ("ne", file);
2554 break;
2555 case EQ:
2556 fputs ("eq", file);
2557 break;
2558 default:
2559 gcc_unreachable ();
2560 }
2561 return;
2562 }
2563 /* Used in negsi2 to do HImode ops on the two parts of an SImode
2564 operand. */
2565 if (code == 'h' && GET_MODE (x) == SImode)
2566 {
2567 x = m32c_subreg (HImode, x, SImode, 0);
2568 code = 0;
2569 }
2570 if (code == 'H' && GET_MODE (x) == SImode)
2571 {
2572 x = m32c_subreg (HImode, x, SImode, 2);
2573 code = 0;
2574 }
07127a0a
DD
2575 if (code == 'h' && GET_MODE (x) == HImode)
2576 {
2577 x = m32c_subreg (QImode, x, HImode, 0);
2578 code = 0;
2579 }
2580 if (code == 'H' && GET_MODE (x) == HImode)
2581 {
2582 /* We can't actually represent this as an rtx. Do it here. */
2583 if (GET_CODE (x) == REG)
2584 {
2585 switch (REGNO (x))
2586 {
2587 case R0_REGNO:
2588 fputs ("r0h", file);
2589 return;
2590 case R1_REGNO:
2591 fputs ("r1h", file);
2592 return;
2593 default:
2594 gcc_unreachable();
2595 }
2596 }
2597 /* This should be a MEM. */
2598 x = m32c_subreg (QImode, x, HImode, 1);
2599 code = 0;
2600 }
2601 /* This is for BMcond, which always wants word register names. */
2602 if (code == 'h' && GET_MODE (x) == QImode)
2603 {
2604 if (GET_CODE (x) == REG)
2605 x = gen_rtx_REG (HImode, REGNO (x));
2606 code = 0;
2607 }
38b2d076
DD
2608 /* 'x' and 'X' need to be ignored for non-immediates. */
2609 if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2610 code = 0;
2611
2612 encode_pattern (x);
ff485e71 2613 force_sign = 0;
38b2d076
DD
2614 for (i = 0; conversions[i].pattern; i++)
2615 if (conversions[i].code == code
2616 && streq (conversions[i].pattern, pattern))
2617 {
2618 for (j = 0; conversions[i].format[j]; j++)
2619 /* backslash quotes the next character in the output pattern. */
2620 if (conversions[i].format[j] == '\\')
2621 {
2622 fputc (conversions[i].format[j + 1], file);
2623 j++;
2624 }
2625 /* Digits in the output pattern indicate that the
2626 corresponding RTX is to be output at that point. */
2627 else if (ISDIGIT (conversions[i].format[j]))
2628 {
2629 rtx r = patternr[conversions[i].format[j] - '0'];
2630 switch (GET_CODE (r))
2631 {
2632 case REG:
2633 fprintf (file, "%s",
2634 reg_name_with_mode (REGNO (r), GET_MODE (r)));
2635 break;
2636 case CONST_INT:
2637 switch (code)
2638 {
2639 case 'b':
07127a0a
DD
2640 case 'B':
2641 {
2642 int v = INTVAL (r);
2643 int i = (int) exact_log2 (v);
2644 if (i == -1)
2645 i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2646 if (i == -1)
2647 i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2648 /* Bit position. */
2649 fprintf (file, "%d", i);
2650 }
38b2d076
DD
2651 break;
2652 case 'x':
2653 /* Unsigned byte. */
2654 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2655 INTVAL (r) & 0xff);
2656 break;
2657 case 'X':
2658 /* Unsigned word. */
2659 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2660 INTVAL (r) & 0xffff);
2661 break;
2662 case 'p':
2663 /* pushm and popm encode a register set into a single byte. */
2664 comma = "";
2665 for (b = 7; b >= 0; b--)
2666 if (INTVAL (r) & (1 << b))
2667 {
2668 fprintf (file, "%s%s", comma, pushm_regs[b]);
2669 comma = ",";
2670 }
2671 break;
2672 case 'm':
2673 /* "Minus". Output -X */
2674 ival = (-INTVAL (r) & 0xffff);
2675 if (ival & 0x8000)
2676 ival = ival - 0x10000;
2677 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2678 break;
2679 default:
2680 ival = INTVAL (r);
2681 if (conversions[i].format[j + 1] == '[' && ival < 0)
2682 {
2683 /* We can simulate negative displacements by
2684 taking advantage of address space
2685 wrapping when the offset can span the
2686 entire address range. */
2687 rtx base =
2688 patternr[conversions[i].format[j + 2] - '0'];
2689 if (GET_CODE (base) == REG)
2690 switch (REGNO (base))
2691 {
2692 case A0_REGNO:
2693 case A1_REGNO:
2694 if (TARGET_A24)
2695 ival = 0x1000000 + ival;
2696 else
2697 ival = 0x10000 + ival;
2698 break;
2699 case SB_REGNO:
2700 if (TARGET_A16)
2701 ival = 0x10000 + ival;
2702 break;
2703 }
2704 }
2705 else if (code == 'd' && ival < 0 && j == 0)
2706 /* The "mova" opcode is used to do addition by
2707 computing displacements, but again, we need
2708 displacements to be unsigned *if* they're
2709 the only component of the displacement
2710 (i.e. no "symbol-4" type displacement). */
2711 ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2712
2713 if (conversions[i].format[j] == '0')
2714 {
2715 /* More conversions to unsigned. */
2716 if (unsigned_const == 2)
2717 ival &= 0xffff;
2718 if (unsigned_const == 1)
2719 ival &= 0xff;
2720 }
2721 if (streq (conversions[i].pattern, "mi")
2722 || streq (conversions[i].pattern, "mmi"))
2723 {
2724 /* Integers used as addresses are unsigned. */
2725 ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2726 }
ff485e71
DD
2727 if (force_sign && ival >= 0)
2728 fputc ('+', file);
38b2d076
DD
2729 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2730 break;
2731 }
2732 break;
2733 case CONST_DOUBLE:
2734 /* We don't have const_double constants. If it
2735 happens, make it obvious. */
2736 fprintf (file, "[const_double 0x%lx]",
2737 (unsigned long) CONST_DOUBLE_HIGH (r));
2738 break;
2739 case SYMBOL_REF:
2740 assemble_name (file, XSTR (r, 0));
2741 break;
2742 case LABEL_REF:
2743 output_asm_label (r);
2744 break;
2745 default:
2746 fprintf (stderr, "don't know how to print this operand:");
2747 debug_rtx (r);
2748 gcc_unreachable ();
2749 }
2750 }
2751 else
2752 {
2753 if (conversions[i].format[j] == 'z')
2754 {
2755 /* Some addressing modes *must* have a displacement,
2756 so insert a zero here if needed. */
2757 int k;
2758 for (k = j + 1; conversions[i].format[k]; k++)
2759 if (ISDIGIT (conversions[i].format[k]))
2760 {
2761 rtx reg = patternr[conversions[i].format[k] - '0'];
2762 if (GET_CODE (reg) == REG
2763 && (REGNO (reg) == SB_REGNO
2764 || REGNO (reg) == FB_REGNO
2765 || REGNO (reg) == SP_REGNO))
2766 fputc ('0', file);
2767 }
2768 continue;
2769 }
2770 /* Signed displacements off symbols need to have signs
2771 blended cleanly. */
2772 if (conversions[i].format[j] == '+'
ff485e71 2773 && (!code || code == 'D' || code == 'd')
38b2d076 2774 && ISDIGIT (conversions[i].format[j + 1])
ff485e71
DD
2775 && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2776 == CONST_INT))
2777 {
2778 force_sign = 1;
2779 continue;
2780 }
38b2d076
DD
2781 fputc (conversions[i].format[j], file);
2782 }
2783 break;
2784 }
2785 if (!conversions[i].pattern)
2786 {
2787 fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
2788 pattern);
2789 debug_rtx (x);
2790 fprintf (file, "[%c.%s]", code ? code : '-', pattern);
2791 }
2792
2793 return;
2794}
2795
4645179e
AS
2796/* Implements TARGET_PRINT_OPERAND_PUNCT_VALID_P.
2797
2798 See m32c_print_operand above for descriptions of what these do. */
2799
2800#undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
2801#define TARGET_PRINT_OPERAND_PUNCT_VALID_P m32c_print_operand_punct_valid_p
2802
2803static bool
2804m32c_print_operand_punct_valid_p (unsigned char c)
38b2d076
DD
2805{
2806 if (c == '&' || c == '!')
4645179e
AS
2807 return true;
2808
2809 return false;
38b2d076
DD
2810}
2811
4645179e
AS
2812/* Implements TARGET_PRINT_OPERAND_ADDRESS. Nothing unusual here. */
2813
2814#undef TARGET_PRINT_OPERAND_ADDRESS
2815#define TARGET_PRINT_OPERAND_ADDRESS m32c_print_operand_address
2816
2817static void
38b2d076
DD
2818m32c_print_operand_address (FILE * stream, rtx address)
2819{
235e1fe8
NC
2820 if (GET_CODE (address) == MEM)
2821 address = XEXP (address, 0);
2822 else
2823 /* cf: gcc.dg/asm-4.c. */
2824 gcc_assert (GET_CODE (address) == REG);
2825
2826 m32c_print_operand (stream, address, 0);
38b2d076
DD
2827}
2828
2829/* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
2830 differently than general registers. */
2831void
2832m32c_output_reg_push (FILE * s, int regno)
2833{
2834 if (regno == FLG_REGNO)
2835 fprintf (s, "\tpushc\tflg\n");
2836 else
04aff2c0 2837 fprintf (s, "\tpush.%c\t%s\n",
38b2d076
DD
2838 " bwll"[reg_push_size (regno)], reg_names[regno]);
2839}
2840
2841/* Likewise for ASM_OUTPUT_REG_POP. */
2842void
2843m32c_output_reg_pop (FILE * s, int regno)
2844{
2845 if (regno == FLG_REGNO)
2846 fprintf (s, "\tpopc\tflg\n");
2847 else
04aff2c0 2848 fprintf (s, "\tpop.%c\t%s\n",
38b2d076
DD
2849 " bwll"[reg_push_size (regno)], reg_names[regno]);
2850}
2851
2852/* Defining target-specific uses of `__attribute__' */
2853
2854/* Used to simplify the logic below. Find the attributes wherever
2855 they may be. */
2856#define M32C_ATTRIBUTES(decl) \
2857 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2858 : DECL_ATTRIBUTES (decl) \
2859 ? (DECL_ATTRIBUTES (decl)) \
2860 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2861
2862/* Returns TRUE if the given tree has the "interrupt" attribute. */
2863static int
2864interrupt_p (tree node ATTRIBUTE_UNUSED)
2865{
2866 tree list = M32C_ATTRIBUTES (node);
2867 while (list)
2868 {
2869 if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
2870 return 1;
2871 list = TREE_CHAIN (list);
2872 }
65655f79
DD
2873 return fast_interrupt_p (node);
2874}
2875
2876/* Returns TRUE if the given tree has the "bank_switch" attribute. */
2877static int
2878bank_switch_p (tree node ATTRIBUTE_UNUSED)
2879{
2880 tree list = M32C_ATTRIBUTES (node);
2881 while (list)
2882 {
2883 if (is_attribute_p ("bank_switch", TREE_PURPOSE (list)))
2884 return 1;
2885 list = TREE_CHAIN (list);
2886 }
2887 return 0;
2888}
2889
2890/* Returns TRUE if the given tree has the "fast_interrupt" attribute. */
2891static int
2892fast_interrupt_p (tree node ATTRIBUTE_UNUSED)
2893{
2894 tree list = M32C_ATTRIBUTES (node);
2895 while (list)
2896 {
2897 if (is_attribute_p ("fast_interrupt", TREE_PURPOSE (list)))
2898 return 1;
2899 list = TREE_CHAIN (list);
2900 }
38b2d076
DD
2901 return 0;
2902}
2903
2904static tree
2905interrupt_handler (tree * node ATTRIBUTE_UNUSED,
2906 tree name ATTRIBUTE_UNUSED,
2907 tree args ATTRIBUTE_UNUSED,
2908 int flags ATTRIBUTE_UNUSED,
2909 bool * no_add_attrs ATTRIBUTE_UNUSED)
2910{
2911 return NULL_TREE;
2912}
2913
5abd2125
JS
2914/* Returns TRUE if given tree has the "function_vector" attribute. */
2915int
2916m32c_special_page_vector_p (tree func)
2917{
653e2568
DD
2918 tree list;
2919
5abd2125
JS
2920 if (TREE_CODE (func) != FUNCTION_DECL)
2921 return 0;
2922
653e2568 2923 list = M32C_ATTRIBUTES (func);
5abd2125
JS
2924 while (list)
2925 {
2926 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2927 return 1;
2928 list = TREE_CHAIN (list);
2929 }
2930 return 0;
2931}
2932
2933static tree
2934function_vector_handler (tree * node ATTRIBUTE_UNUSED,
2935 tree name ATTRIBUTE_UNUSED,
2936 tree args ATTRIBUTE_UNUSED,
2937 int flags ATTRIBUTE_UNUSED,
2938 bool * no_add_attrs ATTRIBUTE_UNUSED)
2939{
2940 if (TARGET_R8C)
2941 {
2942 /* The attribute is not supported for R8C target. */
2943 warning (OPT_Wattributes,
29d08eba
JM
2944 "%qE attribute is not supported for R8C target",
2945 name);
5abd2125
JS
2946 *no_add_attrs = true;
2947 }
2948 else if (TREE_CODE (*node) != FUNCTION_DECL)
2949 {
2950 /* The attribute must be applied to functions only. */
2951 warning (OPT_Wattributes,
29d08eba
JM
2952 "%qE attribute applies only to functions",
2953 name);
5abd2125
JS
2954 *no_add_attrs = true;
2955 }
2956 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
2957 {
2958 /* The argument must be a constant integer. */
2959 warning (OPT_Wattributes,
29d08eba
JM
2960 "%qE attribute argument not an integer constant",
2961 name);
5abd2125
JS
2962 *no_add_attrs = true;
2963 }
2964 else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
2965 || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
2966 {
2967 /* The argument value must be between 18 to 255. */
2968 warning (OPT_Wattributes,
29d08eba
JM
2969 "%qE attribute argument should be between 18 to 255",
2970 name);
5abd2125
JS
2971 *no_add_attrs = true;
2972 }
2973 return NULL_TREE;
2974}
2975
2976/* If the function is assigned the attribute 'function_vector', it
2977 returns the function vector number, otherwise returns zero. */
2978int
2979current_function_special_page_vector (rtx x)
2980{
2981 int num;
2982
2983 if ((GET_CODE(x) == SYMBOL_REF)
2984 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
2985 {
653e2568 2986 tree list;
5abd2125
JS
2987 tree t = SYMBOL_REF_DECL (x);
2988
2989 if (TREE_CODE (t) != FUNCTION_DECL)
2990 return 0;
2991
653e2568 2992 list = M32C_ATTRIBUTES (t);
5abd2125
JS
2993 while (list)
2994 {
2995 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2996 {
2997 num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
2998 return num;
2999 }
3000
3001 list = TREE_CHAIN (list);
3002 }
3003
3004 return 0;
3005 }
3006 else
3007 return 0;
3008}
3009
38b2d076
DD
3010#undef TARGET_ATTRIBUTE_TABLE
3011#define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
3012static const struct attribute_spec m32c_attribute_table[] = {
62d784f7
KT
3013 {"interrupt", 0, 0, false, false, false, interrupt_handler, false},
3014 {"bank_switch", 0, 0, false, false, false, interrupt_handler, false},
3015 {"fast_interrupt", 0, 0, false, false, false, interrupt_handler, false},
3016 {"function_vector", 1, 1, true, false, false, function_vector_handler,
3017 false},
3018 {0, 0, 0, 0, 0, 0, 0, false}
38b2d076
DD
3019};
3020
3021#undef TARGET_COMP_TYPE_ATTRIBUTES
3022#define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
3023static int
3101faab
KG
3024m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED,
3025 const_tree type2 ATTRIBUTE_UNUSED)
38b2d076
DD
3026{
3027 /* 0=incompatible 1=compatible 2=warning */
3028 return 1;
3029}
3030
3031#undef TARGET_INSERT_ATTRIBUTES
3032#define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
3033static void
3034m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
3035 tree * attr_ptr ATTRIBUTE_UNUSED)
3036{
f6052f86
DD
3037 unsigned addr;
3038 /* See if we need to make #pragma address variables volatile. */
3039
3040 if (TREE_CODE (node) == VAR_DECL)
3041 {
444d6efe 3042 const char *name = IDENTIFIER_POINTER (DECL_NAME (node));
f6052f86
DD
3043 if (m32c_get_pragma_address (name, &addr))
3044 {
3045 TREE_THIS_VOLATILE (node) = true;
3046 }
3047 }
3048}
3049
3050
2a22f99c
TS
3051struct pragma_traits : default_hashmap_traits
3052{
3053 static hashval_t hash (const char *str) { return htab_hash_string (str); }
3054 static bool
3055 equal_keys (const char *a, const char *b)
3056 {
3057 return !strcmp (a, b);
3058 }
f6052f86 3059};
f6052f86
DD
3060
3061/* Hash table of pragma info. */
2a22f99c 3062static GTY(()) hash_map<const char *, unsigned, pragma_traits> *pragma_htab;
f6052f86
DD
3063
3064void
3065m32c_note_pragma_address (const char *varname, unsigned address)
3066{
f6052f86 3067 if (!pragma_htab)
2a22f99c
TS
3068 pragma_htab
3069 = hash_map<const char *, unsigned, pragma_traits>::create_ggc (31);
f6052f86 3070
2a22f99c
TS
3071 const char *name = ggc_strdup (varname);
3072 unsigned int *slot = &pragma_htab->get_or_insert (name);
3073 *slot = address;
f6052f86
DD
3074}
3075
3076static bool
3077m32c_get_pragma_address (const char *varname, unsigned *address)
3078{
f6052f86
DD
3079 if (!pragma_htab)
3080 return false;
3081
2a22f99c
TS
3082 unsigned int *slot = pragma_htab->get (varname);
3083 if (slot)
f6052f86 3084 {
2a22f99c 3085 *address = *slot;
f6052f86
DD
3086 return true;
3087 }
3088 return false;
3089}
3090
3091void
444d6efe
JR
3092m32c_output_aligned_common (FILE *stream, tree decl ATTRIBUTE_UNUSED,
3093 const char *name,
f6052f86
DD
3094 int size, int align, int global)
3095{
3096 unsigned address;
3097
3098 if (m32c_get_pragma_address (name, &address))
3099 {
3100 /* We never output these as global. */
3101 assemble_name (stream, name);
3102 fprintf (stream, " = 0x%04x\n", address);
3103 return;
3104 }
3105 if (!global)
3106 {
3107 fprintf (stream, "\t.local\t");
3108 assemble_name (stream, name);
3109 fprintf (stream, "\n");
3110 }
3111 fprintf (stream, "\t.comm\t");
3112 assemble_name (stream, name);
3113 fprintf (stream, ",%u,%u\n", size, align / BITS_PER_UNIT);
38b2d076
DD
3114}
3115
3116/* Predicates */
3117
f9b89438 3118/* This is a list of legal subregs of hard regs. */
67fc44cb
DD
3119static const struct {
3120 unsigned char outer_mode_size;
3121 unsigned char inner_mode_size;
3122 unsigned char byte_mask;
3123 unsigned char legal_when;
f9b89438 3124 unsigned int regno;
f9b89438 3125} legal_subregs[] = {
67fc44cb
DD
3126 {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
3127 {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
3128 {1, 2, 0x01, 1, A0_REGNO},
3129 {1, 2, 0x01, 1, A1_REGNO},
f9b89438 3130
67fc44cb
DD
3131 {1, 4, 0x01, 1, A0_REGNO},
3132 {1, 4, 0x01, 1, A1_REGNO},
f9b89438 3133
67fc44cb
DD
3134 {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
3135 {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
3136 {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
3137 {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
3138 {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
f9b89438 3139
67fc44cb 3140 {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
f9b89438
DD
3141};
3142
3143/* Returns TRUE if OP is a subreg of a hard reg which we don't
f6052f86 3144 support. We also bail on MEMs with illegal addresses. */
f9b89438
DD
3145bool
3146m32c_illegal_subreg_p (rtx op)
3147{
f9b89438
DD
3148 int offset;
3149 unsigned int i;
ef4bddc2 3150 machine_mode src_mode, dest_mode;
f9b89438 3151
f6052f86
DD
3152 if (GET_CODE (op) == MEM
3153 && ! m32c_legitimate_address_p (Pmode, XEXP (op, 0), false))
3154 {
3155 return true;
3156 }
3157
f9b89438
DD
3158 if (GET_CODE (op) != SUBREG)
3159 return false;
3160
3161 dest_mode = GET_MODE (op);
3162 offset = SUBREG_BYTE (op);
3163 op = SUBREG_REG (op);
3164 src_mode = GET_MODE (op);
3165
3166 if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
3167 return false;
3168 if (GET_CODE (op) != REG)
3169 return false;
3170 if (REGNO (op) >= MEM0_REGNO)
3171 return false;
3172
3173 offset = (1 << offset);
3174
67fc44cb 3175 for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
f9b89438
DD
3176 if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
3177 && legal_subregs[i].regno == REGNO (op)
3178 && legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
3179 && legal_subregs[i].byte_mask & offset)
3180 {
3181 switch (legal_subregs[i].legal_when)
3182 {
3183 case 1:
3184 return false;
3185 case 16:
3186 if (TARGET_A16)
3187 return false;
3188 break;
3189 case 24:
3190 if (TARGET_A24)
3191 return false;
3192 break;
3193 }
3194 }
3195 return true;
3196}
3197
38b2d076
DD
3198/* Returns TRUE if we support a move between the first two operands.
3199 At the moment, we just want to discourage mem to mem moves until
3200 after reload, because reload has a hard time with our limited
3201 number of address registers, and we can get into a situation where
3202 we need three of them when we only have two. */
3203bool
ef4bddc2 3204m32c_mov_ok (rtx * operands, machine_mode mode ATTRIBUTE_UNUSED)
38b2d076
DD
3205{
3206 rtx op0 = operands[0];
3207 rtx op1 = operands[1];
3208
3209 if (TARGET_A24)
3210 return true;
3211
3212#define DEBUG_MOV_OK 0
3213#if DEBUG_MOV_OK
3214 fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
3215 debug_rtx (op0);
3216 debug_rtx (op1);
3217#endif
3218
3219 if (GET_CODE (op0) == SUBREG)
3220 op0 = XEXP (op0, 0);
3221 if (GET_CODE (op1) == SUBREG)
3222 op1 = XEXP (op1, 0);
3223
3224 if (GET_CODE (op0) == MEM
3225 && GET_CODE (op1) == MEM
3226 && ! reload_completed)
3227 {
3228#if DEBUG_MOV_OK
3229 fprintf (stderr, " - no, mem to mem\n");
3230#endif
3231 return false;
3232 }
3233
3234#if DEBUG_MOV_OK
3235 fprintf (stderr, " - ok\n");
3236#endif
3237 return true;
3238}
3239
ff485e71
DD
3240/* Returns TRUE if two consecutive HImode mov instructions, generated
3241 for moving an immediate double data to a double data type variable
3242 location, can be combined into single SImode mov instruction. */
3243bool
55356334 3244m32c_immd_dbl_mov (rtx * operands ATTRIBUTE_UNUSED,
ef4bddc2 3245 machine_mode mode ATTRIBUTE_UNUSED)
ff485e71 3246{
55356334
RS
3247 /* ??? This relied on the now-defunct MEM_SCALAR and MEM_IN_STRUCT_P
3248 flags. */
ff485e71
DD
3249 return false;
3250}
3251
38b2d076
DD
3252/* Expanders */
3253
3254/* Subregs are non-orthogonal for us, because our registers are all
3255 different sizes. */
3256static rtx
ef4bddc2
RS
3257m32c_subreg (machine_mode outer,
3258 rtx x, machine_mode inner, int byte)
38b2d076
DD
3259{
3260 int r, nr = -1;
3261
3262 /* Converting MEMs to different types that are the same size, we
3263 just rewrite them. */
3264 if (GET_CODE (x) == SUBREG
3265 && SUBREG_BYTE (x) == 0
3266 && GET_CODE (SUBREG_REG (x)) == MEM
3267 && (GET_MODE_SIZE (GET_MODE (x))
3268 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
3269 {
3270 rtx oldx = x;
3271 x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
3272 MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
3273 }
3274
3275 /* Push/pop get done as smaller push/pops. */
3276 if (GET_CODE (x) == MEM
3277 && (GET_CODE (XEXP (x, 0)) == PRE_DEC
3278 || GET_CODE (XEXP (x, 0)) == POST_INC))
3279 return gen_rtx_MEM (outer, XEXP (x, 0));
3280 if (GET_CODE (x) == SUBREG
3281 && GET_CODE (XEXP (x, 0)) == MEM
3282 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
3283 || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
3284 return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
3285
3286 if (GET_CODE (x) != REG)
146456c1
DD
3287 {
3288 rtx r = simplify_gen_subreg (outer, x, inner, byte);
3289 if (GET_CODE (r) == SUBREG
3290 && GET_CODE (x) == MEM
3291 && MEM_VOLATILE_P (x))
3292 {
3293 /* Volatile MEMs don't get simplified, but we need them to
3294 be. We are little endian, so the subreg byte is the
3295 offset. */
91140cd3 3296 r = adjust_address_nv (x, outer, byte);
146456c1
DD
3297 }
3298 return r;
3299 }
38b2d076
DD
3300
3301 r = REGNO (x);
3302 if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
3303 return simplify_gen_subreg (outer, x, inner, byte);
3304
3305 if (IS_MEM_REGNO (r))
3306 return simplify_gen_subreg (outer, x, inner, byte);
3307
3308 /* This is where the complexities of our register layout are
3309 described. */
3310 if (byte == 0)
3311 nr = r;
3312 else if (outer == HImode)
3313 {
3314 if (r == R0_REGNO && byte == 2)
3315 nr = R2_REGNO;
3316 else if (r == R0_REGNO && byte == 4)
3317 nr = R1_REGNO;
3318 else if (r == R0_REGNO && byte == 6)
3319 nr = R3_REGNO;
3320 else if (r == R1_REGNO && byte == 2)
3321 nr = R3_REGNO;
3322 else if (r == A0_REGNO && byte == 2)
3323 nr = A1_REGNO;
3324 }
3325 else if (outer == SImode)
3326 {
3327 if (r == R0_REGNO && byte == 0)
3328 nr = R0_REGNO;
3329 else if (r == R0_REGNO && byte == 4)
3330 nr = R1_REGNO;
3331 }
3332 if (nr == -1)
3333 {
3334 fprintf (stderr, "m32c_subreg %s %s %d\n",
3335 mode_name[outer], mode_name[inner], byte);
3336 debug_rtx (x);
3337 gcc_unreachable ();
3338 }
3339 return gen_rtx_REG (outer, nr);
3340}
3341
3342/* Used to emit move instructions. We split some moves,
3343 and avoid mem-mem moves. */
3344int
ef4bddc2 3345m32c_prepare_move (rtx * operands, machine_mode mode)
38b2d076 3346{
5fd5d713
DD
3347 if (far_addr_space_p (operands[0])
3348 && CONSTANT_P (operands[1]))
3349 {
3350 operands[1] = force_reg (GET_MODE (operands[0]), operands[1]);
3351 }
38b2d076
DD
3352 if (TARGET_A16 && mode == PSImode)
3353 return m32c_split_move (operands, mode, 1);
3354 if ((GET_CODE (operands[0]) == MEM)
3355 && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
3356 {
3357 rtx pmv = XEXP (operands[0], 0);
3358 rtx dest_reg = XEXP (pmv, 0);
3359 rtx dest_mod = XEXP (pmv, 1);
3360
3361 emit_insn (gen_rtx_SET (Pmode, dest_reg, dest_mod));
3362 operands[0] = gen_rtx_MEM (mode, dest_reg);
3363 }
b3a13419 3364 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
38b2d076
DD
3365 operands[1] = copy_to_mode_reg (mode, operands[1]);
3366 return 0;
3367}
3368
3369#define DEBUG_SPLIT 0
3370
3371/* Returns TRUE if the given PSImode move should be split. We split
3372 for all r8c/m16c moves, since it doesn't support them, and for
3373 POP.L as we can only *push* SImode. */
3374int
3375m32c_split_psi_p (rtx * operands)
3376{
3377#if DEBUG_SPLIT
3378 fprintf (stderr, "\nm32c_split_psi_p\n");
3379 debug_rtx (operands[0]);
3380 debug_rtx (operands[1]);
3381#endif
3382 if (TARGET_A16)
3383 {
3384#if DEBUG_SPLIT
3385 fprintf (stderr, "yes, A16\n");
3386#endif
3387 return 1;
3388 }
3389 if (GET_CODE (operands[1]) == MEM
3390 && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3391 {
3392#if DEBUG_SPLIT
3393 fprintf (stderr, "yes, pop.l\n");
3394#endif
3395 return 1;
3396 }
3397#if DEBUG_SPLIT
3398 fprintf (stderr, "no, default\n");
3399#endif
3400 return 0;
3401}
3402
3403/* Split the given move. SPLIT_ALL is 0 if splitting is optional
3404 (define_expand), 1 if it is not optional (define_insn_and_split),
3405 and 3 for define_split (alternate api). */
3406int
ef4bddc2 3407m32c_split_move (rtx * operands, machine_mode mode, int split_all)
38b2d076
DD
3408{
3409 rtx s[4], d[4];
3410 int parts, si, di, rev = 0;
3411 int rv = 0, opi = 2;
ef4bddc2 3412 machine_mode submode = HImode;
38b2d076
DD
3413 rtx *ops, local_ops[10];
3414
3415 /* define_split modifies the existing operands, but the other two
3416 emit new insns. OPS is where we store the operand pairs, which
3417 we emit later. */
3418 if (split_all == 3)
3419 ops = operands;
3420 else
3421 ops = local_ops;
3422
3423 /* Else HImode. */
3424 if (mode == DImode)
3425 submode = SImode;
3426
3427 /* Before splitting mem-mem moves, force one operand into a
3428 register. */
b3a13419 3429 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
38b2d076
DD
3430 {
3431#if DEBUG0
3432 fprintf (stderr, "force_reg...\n");
3433 debug_rtx (operands[1]);
3434#endif
3435 operands[1] = force_reg (mode, operands[1]);
3436#if DEBUG0
3437 debug_rtx (operands[1]);
3438#endif
3439 }
3440
3441 parts = 2;
3442
3443#if DEBUG_SPLIT
b3a13419
ILT
3444 fprintf (stderr, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3445 split_all);
38b2d076
DD
3446 debug_rtx (operands[0]);
3447 debug_rtx (operands[1]);
3448#endif
3449
eb5f0c07
DD
3450 /* Note that split_all is not used to select the api after this
3451 point, so it's safe to set it to 3 even with define_insn. */
3452 /* None of the chips can move SI operands to sp-relative addresses,
3453 so we always split those. */
03dd17b1 3454 if (satisfies_constraint_Ss (operands[0]))
eb5f0c07
DD
3455 split_all = 3;
3456
5fd5d713
DD
3457 if (TARGET_A16
3458 && (far_addr_space_p (operands[0])
3459 || far_addr_space_p (operands[1])))
3460 split_all |= 1;
3461
38b2d076
DD
3462 /* We don't need to split these. */
3463 if (TARGET_A24
3464 && split_all != 3
3465 && (mode == SImode || mode == PSImode)
3466 && !(GET_CODE (operands[1]) == MEM
3467 && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3468 return 0;
3469
3470 /* First, enumerate the subregs we'll be dealing with. */
3471 for (si = 0; si < parts; si++)
3472 {
3473 d[si] =
3474 m32c_subreg (submode, operands[0], mode,
3475 si * GET_MODE_SIZE (submode));
3476 s[si] =
3477 m32c_subreg (submode, operands[1], mode,
3478 si * GET_MODE_SIZE (submode));
3479 }
3480
3481 /* Split pushes by emitting a sequence of smaller pushes. */
3482 if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3483 {
3484 for (si = parts - 1; si >= 0; si--)
3485 {
3486 ops[opi++] = gen_rtx_MEM (submode,
3487 gen_rtx_PRE_DEC (Pmode,
3488 gen_rtx_REG (Pmode,
3489 SP_REGNO)));
3490 ops[opi++] = s[si];
3491 }
3492
3493 rv = 1;
3494 }
3495 /* Likewise for pops. */
3496 else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3497 {
3498 for (di = 0; di < parts; di++)
3499 {
3500 ops[opi++] = d[di];
3501 ops[opi++] = gen_rtx_MEM (submode,
3502 gen_rtx_POST_INC (Pmode,
3503 gen_rtx_REG (Pmode,
3504 SP_REGNO)));
3505 }
3506 rv = 1;
3507 }
3508 else if (split_all)
3509 {
3510 /* if d[di] == s[si] for any di < si, we'll early clobber. */
3511 for (di = 0; di < parts - 1; di++)
3512 for (si = di + 1; si < parts; si++)
3513 if (reg_mentioned_p (d[di], s[si]))
3514 rev = 1;
3515
3516 if (rev)
3517 for (si = 0; si < parts; si++)
3518 {
3519 ops[opi++] = d[si];
3520 ops[opi++] = s[si];
3521 }
3522 else
3523 for (si = parts - 1; si >= 0; si--)
3524 {
3525 ops[opi++] = d[si];
3526 ops[opi++] = s[si];
3527 }
3528 rv = 1;
3529 }
3530 /* Now emit any moves we may have accumulated. */
3531 if (rv && split_all != 3)
3532 {
3533 int i;
3534 for (i = 2; i < opi; i += 2)
3535 emit_move_insn (ops[i], ops[i + 1]);
3536 }
3537 return rv;
3538}
3539
07127a0a
DD
3540/* The m32c has a number of opcodes that act like memcpy, strcmp, and
3541 the like. For the R8C they expect one of the addresses to be in
3542 R1L:An so we need to arrange for that. Otherwise, it's just a
3543 matter of picking out the operands we want and emitting the right
3544 pattern for them. All these expanders, which correspond to
3545 patterns in blkmov.md, must return nonzero if they expand the insn,
3546 or zero if they should FAIL. */
3547
3548/* This is a memset() opcode. All operands are implied, so we need to
3549 arrange for them to be in the right registers. The opcode wants
3550 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3551 the count (HI), and $2 the value (QI). */
3552int
3553m32c_expand_setmemhi(rtx *operands)
3554{
3555 rtx desta, count, val;
3556 rtx desto, counto;
3557
3558 desta = XEXP (operands[0], 0);
3559 count = operands[1];
3560 val = operands[2];
3561
3562 desto = gen_reg_rtx (Pmode);
3563 counto = gen_reg_rtx (HImode);
3564
3565 if (GET_CODE (desta) != REG
3566 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3567 desta = copy_to_mode_reg (Pmode, desta);
3568
3569 /* This looks like an arbitrary restriction, but this is by far the
3570 most common case. For counts 8..14 this actually results in
3571 smaller code with no speed penalty because the half-sized
3572 constant can be loaded with a shorter opcode. */
3573 if (GET_CODE (count) == CONST_INT
3574 && GET_CODE (val) == CONST_INT
3575 && ! (INTVAL (count) & 1)
3576 && (INTVAL (count) > 1)
3577 && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3578 {
3579 unsigned v = INTVAL (val) & 0xff;
3580 v = v | (v << 8);
3581 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3582 val = copy_to_mode_reg (HImode, GEN_INT (v));
3583 if (TARGET_A16)
3584 emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3585 else
3586 emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3587 return 1;
3588 }
3589
3590 /* This is the generalized memset() case. */
3591 if (GET_CODE (val) != REG
3592 || REGNO (val) < FIRST_PSEUDO_REGISTER)
3593 val = copy_to_mode_reg (QImode, val);
3594
3595 if (GET_CODE (count) != REG
3596 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3597 count = copy_to_mode_reg (HImode, count);
3598
3599 if (TARGET_A16)
3600 emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3601 else
3602 emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3603
3604 return 1;
3605}
3606
3607/* This is a memcpy() opcode. All operands are implied, so we need to
3608 arrange for them to be in the right registers. The opcode wants
3609 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3610 is the source (MEM:BLK), and $2 the count (HI). */
3611int
3612m32c_expand_movmemhi(rtx *operands)
3613{
3614 rtx desta, srca, count;
3615 rtx desto, srco, counto;
3616
3617 desta = XEXP (operands[0], 0);
3618 srca = XEXP (operands[1], 0);
3619 count = operands[2];
3620
3621 desto = gen_reg_rtx (Pmode);
3622 srco = gen_reg_rtx (Pmode);
3623 counto = gen_reg_rtx (HImode);
3624
3625 if (GET_CODE (desta) != REG
3626 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3627 desta = copy_to_mode_reg (Pmode, desta);
3628
3629 if (GET_CODE (srca) != REG
3630 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3631 srca = copy_to_mode_reg (Pmode, srca);
3632
3633 /* Similar to setmem, but we don't need to check the value. */
3634 if (GET_CODE (count) == CONST_INT
3635 && ! (INTVAL (count) & 1)
3636 && (INTVAL (count) > 1))
3637 {
3638 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3639 if (TARGET_A16)
3640 emit_insn (gen_movmemhi_whi_op (desto, srco, counto, desta, srca, count));
3641 else
3642 emit_insn (gen_movmemhi_wpsi_op (desto, srco, counto, desta, srca, count));
3643 return 1;
3644 }
3645
3646 /* This is the generalized memset() case. */
3647 if (GET_CODE (count) != REG
3648 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3649 count = copy_to_mode_reg (HImode, count);
3650
3651 if (TARGET_A16)
3652 emit_insn (gen_movmemhi_bhi_op (desto, srco, counto, desta, srca, count));
3653 else
3654 emit_insn (gen_movmemhi_bpsi_op (desto, srco, counto, desta, srca, count));
3655
3656 return 1;
3657}
3658
3659/* This is a stpcpy() opcode. $0 is the destination (MEM:BLK) after
3660 the copy, which should point to the NUL at the end of the string,
3661 $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3662 Since our opcode leaves the destination pointing *after* the NUL,
3663 we must emit an adjustment. */
3664int
3665m32c_expand_movstr(rtx *operands)
3666{
3667 rtx desta, srca;
3668 rtx desto, srco;
3669
3670 desta = XEXP (operands[1], 0);
3671 srca = XEXP (operands[2], 0);
3672
3673 desto = gen_reg_rtx (Pmode);
3674 srco = gen_reg_rtx (Pmode);
3675
3676 if (GET_CODE (desta) != REG
3677 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3678 desta = copy_to_mode_reg (Pmode, desta);
3679
3680 if (GET_CODE (srca) != REG
3681 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3682 srca = copy_to_mode_reg (Pmode, srca);
3683
3684 emit_insn (gen_movstr_op (desto, srco, desta, srca));
3685 /* desto ends up being a1, which allows this type of add through MOVA. */
3686 emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
3687
3688 return 1;
3689}
3690
3691/* This is a strcmp() opcode. $0 is the destination (HI) which holds
3692 <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3693 $2 is the other (MEM:BLK). We must do the comparison, and then
3694 convert the flags to a signed integer result. */
3695int
3696m32c_expand_cmpstr(rtx *operands)
3697{
3698 rtx src1a, src2a;
3699
3700 src1a = XEXP (operands[1], 0);
3701 src2a = XEXP (operands[2], 0);
3702
3703 if (GET_CODE (src1a) != REG
3704 || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
3705 src1a = copy_to_mode_reg (Pmode, src1a);
3706
3707 if (GET_CODE (src2a) != REG
3708 || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
3709 src2a = copy_to_mode_reg (Pmode, src2a);
3710
3711 emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
3712 emit_insn (gen_cond_to_int (operands[0]));
3713
3714 return 1;
3715}
3716
3717
23fed240
DD
3718typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
3719
3720static shift_gen_func
3721shift_gen_func_for (int mode, int code)
3722{
3723#define GFF(m,c,f) if (mode == m && code == c) return f
3724 GFF(QImode, ASHIFT, gen_ashlqi3_i);
3725 GFF(QImode, ASHIFTRT, gen_ashrqi3_i);
3726 GFF(QImode, LSHIFTRT, gen_lshrqi3_i);
3727 GFF(HImode, ASHIFT, gen_ashlhi3_i);
3728 GFF(HImode, ASHIFTRT, gen_ashrhi3_i);
3729 GFF(HImode, LSHIFTRT, gen_lshrhi3_i);
3730 GFF(PSImode, ASHIFT, gen_ashlpsi3_i);
3731 GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
3732 GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
3733 GFF(SImode, ASHIFT, TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
3734 GFF(SImode, ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
3735 GFF(SImode, LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
3736#undef GFF
07127a0a 3737 gcc_unreachable ();
23fed240
DD
3738}
3739
38b2d076
DD
3740/* The m32c only has one shift, but it takes a signed count. GCC
3741 doesn't want this, so we fake it by negating any shift count when
07127a0a
DD
3742 we're pretending to shift the other way. Also, the shift count is
3743 limited to -8..8. It's slightly better to use two shifts for 9..15
3744 than to load the count into r1h, so we do that too. */
38b2d076 3745int
23fed240 3746m32c_prepare_shift (rtx * operands, int scale, int shift_code)
38b2d076 3747{
ef4bddc2 3748 machine_mode mode = GET_MODE (operands[0]);
23fed240 3749 shift_gen_func func = shift_gen_func_for (mode, shift_code);
38b2d076 3750 rtx temp;
23fed240
DD
3751
3752 if (GET_CODE (operands[2]) == CONST_INT)
38b2d076 3753 {
23fed240
DD
3754 int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
3755 int count = INTVAL (operands[2]) * scale;
3756
3757 while (count > maxc)
3758 {
3759 temp = gen_reg_rtx (mode);
3760 emit_insn (func (temp, operands[1], GEN_INT (maxc)));
3761 operands[1] = temp;
3762 count -= maxc;
3763 }
3764 while (count < -maxc)
3765 {
3766 temp = gen_reg_rtx (mode);
3767 emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
3768 operands[1] = temp;
3769 count += maxc;
3770 }
3771 emit_insn (func (operands[0], operands[1], GEN_INT (count)));
3772 return 1;
38b2d076 3773 }
2e160056
DD
3774
3775 temp = gen_reg_rtx (QImode);
38b2d076 3776 if (scale < 0)
2e160056
DD
3777 /* The pattern has a NEG that corresponds to this. */
3778 emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
3779 else if (TARGET_A16 && mode == SImode)
3780 /* We do this because the code below may modify this, we don't
3781 want to modify the origin of this value. */
3782 emit_move_insn (temp, operands[2]);
38b2d076 3783 else
2e160056 3784 /* We'll only use it for the shift, no point emitting a move. */
38b2d076 3785 temp = operands[2];
2e160056 3786
16659fcf 3787 if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
2e160056
DD
3788 {
3789 /* The m16c has a limit of -16..16 for SI shifts, even when the
3790 shift count is in a register. Since there are so many targets
3791 of these shifts, it's better to expand the RTL here than to
3792 call a helper function.
3793
3794 The resulting code looks something like this:
3795
3796 cmp.b r1h,-16
3797 jge.b 1f
3798 shl.l -16,dest
3799 add.b r1h,16
3800 1f: cmp.b r1h,16
3801 jle.b 1f
3802 shl.l 16,dest
3803 sub.b r1h,16
3804 1f: shl.l r1h,dest
3805
3806 We take advantage of the fact that "negative" shifts are
3807 undefined to skip one of the comparisons. */
3808
3809 rtx count;
e60365d3
TS
3810 rtx label, tempvar;
3811 rtx_insn *insn;
2e160056 3812
16659fcf
DD
3813 emit_move_insn (operands[0], operands[1]);
3814
2e160056
DD
3815 count = temp;
3816 label = gen_label_rtx ();
2e160056
DD
3817 LABEL_NUSES (label) ++;
3818
833bf445
DD
3819 tempvar = gen_reg_rtx (mode);
3820
2e160056
DD
3821 if (shift_code == ASHIFT)
3822 {
3823 /* This is a left shift. We only need check positive counts. */
3824 emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
3825 count, GEN_INT (16), label));
833bf445
DD
3826 emit_insn (func (tempvar, operands[0], GEN_INT (8)));
3827 emit_insn (func (operands[0], tempvar, GEN_INT (8)));
2e160056
DD
3828 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
3829 emit_label_after (label, insn);
3830 }
3831 else
3832 {
3833 /* This is a right shift. We only need check negative counts. */
3834 emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
3835 count, GEN_INT (-16), label));
833bf445
DD
3836 emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
3837 emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
2e160056
DD
3838 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
3839 emit_label_after (label, insn);
3840 }
16659fcf
DD
3841 operands[1] = operands[0];
3842 emit_insn (func (operands[0], operands[0], count));
3843 return 1;
2e160056
DD
3844 }
3845
38b2d076
DD
3846 operands[2] = temp;
3847 return 0;
3848}
3849
12ea2512
DD
3850/* The m32c has a limited range of operations that work on PSImode
3851 values; we have to expand to SI, do the math, and truncate back to
3852 PSI. Yes, this is expensive, but hopefully gcc will learn to avoid
3853 those cases. */
3854void
3855m32c_expand_neg_mulpsi3 (rtx * operands)
3856{
3857 /* operands: a = b * i */
3858 rtx temp1; /* b as SI */
07127a0a
DD
3859 rtx scale /* i as SI */;
3860 rtx temp2; /* a*b as SI */
12ea2512
DD
3861
3862 temp1 = gen_reg_rtx (SImode);
3863 temp2 = gen_reg_rtx (SImode);
07127a0a
DD
3864 if (GET_CODE (operands[2]) != CONST_INT)
3865 {
3866 scale = gen_reg_rtx (SImode);
3867 emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
3868 }
3869 else
3870 scale = copy_to_mode_reg (SImode, operands[2]);
12ea2512
DD
3871
3872 emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
07127a0a
DD
3873 temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
3874 emit_insn (gen_truncsipsi2 (operands[0], temp2));
12ea2512
DD
3875}
3876
38b2d076
DD
3877/* Pattern Output Functions */
3878
07127a0a
DD
3879int
3880m32c_expand_movcc (rtx *operands)
3881{
3882 rtx rel = operands[1];
0166ff05 3883
07127a0a
DD
3884 if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
3885 return 1;
3886 if (GET_CODE (operands[2]) != CONST_INT
3887 || GET_CODE (operands[3]) != CONST_INT)
3888 return 1;
07127a0a
DD
3889 if (GET_CODE (rel) == NE)
3890 {
3891 rtx tmp = operands[2];
3892 operands[2] = operands[3];
3893 operands[3] = tmp;
f90b7a5a 3894 rel = gen_rtx_EQ (GET_MODE (rel), XEXP (rel, 0), XEXP (rel, 1));
07127a0a 3895 }
0166ff05 3896
0166ff05
DD
3897 emit_move_insn (operands[0],
3898 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
f90b7a5a 3899 rel,
0166ff05
DD
3900 operands[2],
3901 operands[3]));
07127a0a
DD
3902 return 0;
3903}
3904
3905/* Used for the "insv" pattern. Return nonzero to fail, else done. */
3906int
3907m32c_expand_insv (rtx *operands)
3908{
3909 rtx op0, src0, p;
3910 int mask;
3911
3912 if (INTVAL (operands[1]) != 1)
3913 return 1;
3914
9cb96754
N
3915 /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
3916 if (GET_CODE (operands[3]) != CONST_INT)
3917 return 1;
3918 if (INTVAL (operands[3]) != 0
3919 && INTVAL (operands[3]) != 1
3920 && INTVAL (operands[3]) != -1)
3921 return 1;
3922
07127a0a
DD
3923 mask = 1 << INTVAL (operands[2]);
3924
3925 op0 = operands[0];
3926 if (GET_CODE (op0) == SUBREG
3927 && SUBREG_BYTE (op0) == 0)
3928 {
3929 rtx sub = SUBREG_REG (op0);
3930 if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
3931 op0 = sub;
3932 }
3933
b3a13419 3934 if (!can_create_pseudo_p ()
07127a0a
DD
3935 || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
3936 src0 = op0;
3937 else
3938 {
3939 src0 = gen_reg_rtx (GET_MODE (op0));
3940 emit_move_insn (src0, op0);
3941 }
3942
3943 if (GET_MODE (op0) == HImode
3944 && INTVAL (operands[2]) >= 8
444d6efe 3945 && GET_CODE (op0) == MEM)
07127a0a
DD
3946 {
3947 /* We are little endian. */
0a81f074
RS
3948 rtx new_mem = gen_rtx_MEM (QImode, plus_constant (Pmode,
3949 XEXP (op0, 0), 1));
07127a0a
DD
3950 MEM_COPY_ATTRIBUTES (new_mem, op0);
3951 mask >>= 8;
3952 }
3953
8e4edce7
DD
3954 /* First, we generate a mask with the correct polarity. If we are
3955 storing a zero, we want an AND mask, so invert it. */
3956 if (INTVAL (operands[3]) == 0)
07127a0a 3957 {
16659fcf 3958 /* Storing a zero, use an AND mask */
07127a0a
DD
3959 if (GET_MODE (op0) == HImode)
3960 mask ^= 0xffff;
3961 else
3962 mask ^= 0xff;
3963 }
8e4edce7
DD
3964 /* Now we need to properly sign-extend the mask in case we need to
3965 fall back to an AND or OR opcode. */
07127a0a
DD
3966 if (GET_MODE (op0) == HImode)
3967 {
3968 if (mask & 0x8000)
3969 mask -= 0x10000;
3970 }
3971 else
3972 {
3973 if (mask & 0x80)
3974 mask -= 0x100;
3975 }
3976
3977 switch ( (INTVAL (operands[3]) ? 4 : 0)
3978 + ((GET_MODE (op0) == HImode) ? 2 : 0)
3979 + (TARGET_A24 ? 1 : 0))
3980 {
3981 case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
3982 case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
3983 case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
3984 case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
3985 case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
3986 case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
3987 case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
3988 case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
653e2568 3989 default: p = NULL_RTX; break; /* Not reached, but silences a warning. */
07127a0a
DD
3990 }
3991
3992 emit_insn (p);
3993 return 0;
3994}
3995
3996const char *
3997m32c_scc_pattern(rtx *operands, RTX_CODE code)
3998{
3999 static char buf[30];
4000 if (GET_CODE (operands[0]) == REG
4001 && REGNO (operands[0]) == R0_REGNO)
4002 {
4003 if (code == EQ)
4004 return "stzx\t#1,#0,r0l";
4005 if (code == NE)
4006 return "stzx\t#0,#1,r0l";
4007 }
4008 sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
4009 return buf;
4010}
4011
5abd2125
JS
4012/* Encode symbol attributes of a SYMBOL_REF into its
4013 SYMBOL_REF_FLAGS. */
4014static void
4015m32c_encode_section_info (tree decl, rtx rtl, int first)
4016{
4017 int extra_flags = 0;
4018
4019 default_encode_section_info (decl, rtl, first);
4020 if (TREE_CODE (decl) == FUNCTION_DECL
4021 && m32c_special_page_vector_p (decl))
4022
4023 extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
4024
4025 if (extra_flags)
4026 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
4027}
4028
38b2d076
DD
4029/* Returns TRUE if the current function is a leaf, and thus we can
4030 determine which registers an interrupt function really needs to
4031 save. The logic below is mostly about finding the insn sequence
4032 that's the function, versus any sequence that might be open for the
4033 current insn. */
4034static int
4035m32c_leaf_function_p (void)
4036{
0926539c 4037 rtx_insn *saved_first, *saved_last;
38b2d076
DD
4038 struct sequence_stack *seq;
4039 int rv;
4040
3e029763
JH
4041 saved_first = crtl->emit.x_first_insn;
4042 saved_last = crtl->emit.x_last_insn;
4043 for (seq = crtl->emit.sequence_stack; seq && seq->next; seq = seq->next)
38b2d076
DD
4044 ;
4045 if (seq)
4046 {
3e029763
JH
4047 crtl->emit.x_first_insn = seq->first;
4048 crtl->emit.x_last_insn = seq->last;
38b2d076
DD
4049 }
4050
4051 rv = leaf_function_p ();
4052
3e029763
JH
4053 crtl->emit.x_first_insn = saved_first;
4054 crtl->emit.x_last_insn = saved_last;
38b2d076
DD
4055 return rv;
4056}
4057
4058/* Returns TRUE if the current function needs to use the ENTER/EXIT
4059 opcodes. If the function doesn't need the frame base or stack
4060 pointer, it can use the simpler RTS opcode. */
4061static bool
4062m32c_function_needs_enter (void)
4063{
b32d5189 4064 rtx_insn *insn;
38b2d076
DD
4065 struct sequence_stack *seq;
4066 rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
4067 rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
4068
4069 insn = get_insns ();
3e029763 4070 for (seq = crtl->emit.sequence_stack;
38b2d076
DD
4071 seq;
4072 insn = seq->first, seq = seq->next);
4073
4074 while (insn)
4075 {
4076 if (reg_mentioned_p (sp, insn))
4077 return true;
4078 if (reg_mentioned_p (fb, insn))
4079 return true;
4080 insn = NEXT_INSN (insn);
4081 }
4082 return false;
4083}
4084
4085/* Mark all the subexpressions of the PARALLEL rtx PAR as
4086 frame-related. Return PAR.
4087
4088 dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
4089 PARALLEL rtx other than the first if they do not have the
4090 FRAME_RELATED flag set on them. So this function is handy for
4091 marking up 'enter' instructions. */
4092static rtx
4093m32c_all_frame_related (rtx par)
4094{
4095 int len = XVECLEN (par, 0);
4096 int i;
4097
4098 for (i = 0; i < len; i++)
4099 F (XVECEXP (par, 0, i));
4100
4101 return par;
4102}
4103
4104/* Emits the prologue. See the frame layout comment earlier in this
4105 file. We can reserve up to 256 bytes with the ENTER opcode, beyond
4106 that we manually update sp. */
4107void
4108m32c_emit_prologue (void)
4109{
4110 int frame_size, extra_frame_size = 0, reg_save_size;
4111 int complex_prologue = 0;
4112
4113 cfun->machine->is_leaf = m32c_leaf_function_p ();
4114 if (interrupt_p (cfun->decl))
4115 {
4116 cfun->machine->is_interrupt = 1;
4117 complex_prologue = 1;
4118 }
65655f79
DD
4119 else if (bank_switch_p (cfun->decl))
4120 warning (OPT_Wattributes,
4121 "%<bank_switch%> has no effect on non-interrupt functions");
38b2d076
DD
4122
4123 reg_save_size = m32c_pushm_popm (PP_justcount);
4124
4125 if (interrupt_p (cfun->decl))
65655f79
DD
4126 {
4127 if (bank_switch_p (cfun->decl))
4128 emit_insn (gen_fset_b ());
4129 else if (cfun->machine->intr_pushm)
4130 emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
4131 }
38b2d076
DD
4132
4133 frame_size =
4134 m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
4135 if (frame_size == 0
38b2d076
DD
4136 && !m32c_function_needs_enter ())
4137 cfun->machine->use_rts = 1;
4138
4139 if (frame_size > 254)
4140 {
4141 extra_frame_size = frame_size - 254;
4142 frame_size = 254;
4143 }
4144 if (cfun->machine->use_rts == 0)
4145 F (emit_insn (m32c_all_frame_related
4146 (TARGET_A16
fa9fd28a
RIL
4147 ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
4148 : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
38b2d076
DD
4149
4150 if (extra_frame_size)
4151 {
4152 complex_prologue = 1;
4153 if (TARGET_A16)
4154 F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
4155 gen_rtx_REG (HImode, SP_REGNO),
4156 GEN_INT (-extra_frame_size))));
4157 else
4158 F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
4159 gen_rtx_REG (PSImode, SP_REGNO),
4160 GEN_INT (-extra_frame_size))));
4161 }
4162
4163 complex_prologue += m32c_pushm_popm (PP_pushm);
4164
4165 /* This just emits a comment into the .s file for debugging. */
4166 if (complex_prologue)
4167 emit_insn (gen_prologue_end ());
4168}
4169
4170/* Likewise, for the epilogue. The only exception is that, for
4171 interrupts, we must manually unwind the frame as the REIT opcode
4172 doesn't do that. */
4173void
4174m32c_emit_epilogue (void)
4175{
f0679612
DD
4176 int popm_count = m32c_pushm_popm (PP_justcount);
4177
38b2d076 4178 /* This just emits a comment into the .s file for debugging. */
f0679612 4179 if (popm_count > 0 || cfun->machine->is_interrupt)
38b2d076
DD
4180 emit_insn (gen_epilogue_start ());
4181
f0679612
DD
4182 if (popm_count > 0)
4183 m32c_pushm_popm (PP_popm);
38b2d076
DD
4184
4185 if (cfun->machine->is_interrupt)
4186 {
ef4bddc2 4187 machine_mode spmode = TARGET_A16 ? HImode : PSImode;
38b2d076 4188
65655f79
DD
4189 /* REIT clears B flag and restores $fp for us, but we still
4190 have to fix up the stack. USE_RTS just means we didn't
4191 emit ENTER. */
4192 if (!cfun->machine->use_rts)
4193 {
4194 emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
4195 gen_rtx_REG (spmode, FP_REGNO));
4196 emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
4197 gen_rtx_REG (spmode, A0_REGNO));
4198 /* We can't just add this to the POPM because it would be in
4199 the wrong order, and wouldn't fix the stack if we're bank
4200 switching. */
4201 if (TARGET_A16)
4202 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
4203 else
4204 emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
4205 }
4206 if (!bank_switch_p (cfun->decl) && cfun->machine->intr_pushm)
4207 emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
4208
402f2db8
DD
4209 /* The FREIT (Fast REturn from InTerrupt) instruction should be
4210 generated only for M32C/M32CM targets (generate the REIT
4211 instruction otherwise). */
65655f79 4212 if (fast_interrupt_p (cfun->decl))
402f2db8
DD
4213 {
4214 /* Check if fast_attribute is set for M32C or M32CM. */
4215 if (TARGET_A24)
4216 {
4217 emit_jump_insn (gen_epilogue_freit ());
4218 }
4219 /* If fast_interrupt attribute is set for an R8C or M16C
4220 target ignore this attribute and generated REIT
4221 instruction. */
4222 else
4223 {
4224 warning (OPT_Wattributes,
4225 "%<fast_interrupt%> attribute directive ignored");
4226 emit_jump_insn (gen_epilogue_reit_16 ());
4227 }
4228 }
65655f79 4229 else if (TARGET_A16)
0e0642aa
RIL
4230 emit_jump_insn (gen_epilogue_reit_16 ());
4231 else
4232 emit_jump_insn (gen_epilogue_reit_24 ());
38b2d076
DD
4233 }
4234 else if (cfun->machine->use_rts)
4235 emit_jump_insn (gen_epilogue_rts ());
0e0642aa
RIL
4236 else if (TARGET_A16)
4237 emit_jump_insn (gen_epilogue_exitd_16 ());
38b2d076 4238 else
0e0642aa 4239 emit_jump_insn (gen_epilogue_exitd_24 ());
38b2d076
DD
4240}
4241
4242void
4243m32c_emit_eh_epilogue (rtx ret_addr)
4244{
4245 /* R0[R2] has the stack adjustment. R1[R3] has the address to
4246 return to. We have to fudge the stack, pop everything, pop SP
4247 (fudged), and return (fudged). This is actually easier to do in
4248 assembler, so punt to libgcc. */
4249 emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
c41c1387 4250 /* emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
38b2d076
DD
4251}
4252
16659fcf
DD
4253/* Indicate which flags must be properly set for a given conditional. */
4254static int
4255flags_needed_for_conditional (rtx cond)
4256{
4257 switch (GET_CODE (cond))
4258 {
4259 case LE:
4260 case GT:
4261 return FLAGS_OSZ;
4262 case LEU:
4263 case GTU:
4264 return FLAGS_ZC;
4265 case LT:
4266 case GE:
4267 return FLAGS_OS;
4268 case LTU:
4269 case GEU:
4270 return FLAGS_C;
4271 case EQ:
4272 case NE:
4273 return FLAGS_Z;
4274 default:
4275 return FLAGS_N;
4276 }
4277}
4278
4279#define DEBUG_CMP 0
4280
4281/* Returns true if a compare insn is redundant because it would only
4282 set flags that are already set correctly. */
4283static bool
84034c69 4284m32c_compare_redundant (rtx_insn *cmp, rtx *operands)
16659fcf
DD
4285{
4286 int flags_needed;
4287 int pflags;
84034c69
DM
4288 rtx_insn *prev;
4289 rtx pp, next;
444d6efe 4290 rtx op0, op1;
16659fcf
DD
4291#if DEBUG_CMP
4292 int prev_icode, i;
4293#endif
4294
4295 op0 = operands[0];
4296 op1 = operands[1];
16659fcf
DD
4297
4298#if DEBUG_CMP
4299 fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
4300 debug_rtx(cmp);
4301 for (i=0; i<2; i++)
4302 {
4303 fprintf(stderr, "operands[%d] = ", i);
4304 debug_rtx(operands[i]);
4305 }
4306#endif
4307
4308 next = next_nonnote_insn (cmp);
4309 if (!next || !INSN_P (next))
4310 {
4311#if DEBUG_CMP
4312 fprintf(stderr, "compare not followed by insn\n");
4313 debug_rtx(next);
4314#endif
4315 return false;
4316 }
4317 if (GET_CODE (PATTERN (next)) == SET
4318 && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
4319 {
4320 next = XEXP (XEXP (PATTERN (next), 1), 0);
4321 }
4322 else if (GET_CODE (PATTERN (next)) == SET)
4323 {
4324 /* If this is a conditional, flags_needed will be something
4325 other than FLAGS_N, which we test below. */
4326 next = XEXP (PATTERN (next), 1);
4327 }
4328 else
4329 {
4330#if DEBUG_CMP
4331 fprintf(stderr, "compare not followed by conditional\n");
4332 debug_rtx(next);
4333#endif
4334 return false;
4335 }
4336#if DEBUG_CMP
4337 fprintf(stderr, "conditional is: ");
4338 debug_rtx(next);
4339#endif
4340
4341 flags_needed = flags_needed_for_conditional (next);
4342 if (flags_needed == FLAGS_N)
4343 {
4344#if DEBUG_CMP
4345 fprintf(stderr, "compare not followed by conditional\n");
4346 debug_rtx(next);
4347#endif
4348 return false;
4349 }
4350
4351 /* Compare doesn't set overflow and carry the same way that
4352 arithmetic instructions do, so we can't replace those. */
4353 if (flags_needed & FLAGS_OC)
4354 return false;
4355
4356 prev = cmp;
4357 do {
4358 prev = prev_nonnote_insn (prev);
4359 if (!prev)
4360 {
4361#if DEBUG_CMP
4362 fprintf(stderr, "No previous insn.\n");
4363#endif
4364 return false;
4365 }
4366 if (!INSN_P (prev))
4367 {
4368#if DEBUG_CMP
4369 fprintf(stderr, "Previous insn is a non-insn.\n");
4370#endif
4371 return false;
4372 }
4373 pp = PATTERN (prev);
4374 if (GET_CODE (pp) != SET)
4375 {
4376#if DEBUG_CMP
4377 fprintf(stderr, "Previous insn is not a SET.\n");
4378#endif
4379 return false;
4380 }
4381 pflags = get_attr_flags (prev);
4382
4383 /* Looking up attributes of previous insns corrupted the recog
4384 tables. */
4385 INSN_UID (cmp) = -1;
4386 recog (PATTERN (cmp), cmp, 0);
4387
4388 if (pflags == FLAGS_N
4389 && reg_mentioned_p (op0, pp))
4390 {
4391#if DEBUG_CMP
4392 fprintf(stderr, "intermediate non-flags insn uses op:\n");
4393 debug_rtx(prev);
4394#endif
4395 return false;
4396 }
b3c5a409
DD
4397
4398 /* Check for comparisons against memory - between volatiles and
4399 aliases, we just can't risk this one. */
4400 if (GET_CODE (operands[0]) == MEM
4401 || GET_CODE (operands[0]) == MEM)
4402 {
4403#if DEBUG_CMP
4404 fprintf(stderr, "comparisons with memory:\n");
4405 debug_rtx(prev);
4406#endif
4407 return false;
4408 }
4409
4410 /* Check for PREV changing a register that's used to compute a
4411 value in CMP, even if it doesn't otherwise change flags. */
4412 if (GET_CODE (operands[0]) == REG
4413 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[0]))
4414 {
4415#if DEBUG_CMP
4416 fprintf(stderr, "sub-value affected, op0:\n");
4417 debug_rtx(prev);
4418#endif
4419 return false;
4420 }
4421 if (GET_CODE (operands[1]) == REG
4422 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[1]))
4423 {
4424#if DEBUG_CMP
4425 fprintf(stderr, "sub-value affected, op1:\n");
4426 debug_rtx(prev);
4427#endif
4428 return false;
4429 }
4430
16659fcf
DD
4431 } while (pflags == FLAGS_N);
4432#if DEBUG_CMP
4433 fprintf(stderr, "previous flag-setting insn:\n");
4434 debug_rtx(prev);
4435 debug_rtx(pp);
4436#endif
4437
4438 if (GET_CODE (pp) == SET
4439 && GET_CODE (XEXP (pp, 0)) == REG
4440 && REGNO (XEXP (pp, 0)) == FLG_REGNO
4441 && GET_CODE (XEXP (pp, 1)) == COMPARE)
4442 {
4443 /* Adjacent cbranches must have the same operands to be
4444 redundant. */
4445 rtx pop0 = XEXP (XEXP (pp, 1), 0);
4446 rtx pop1 = XEXP (XEXP (pp, 1), 1);
4447#if DEBUG_CMP
4448 fprintf(stderr, "adjacent cbranches\n");
4449 debug_rtx(pop0);
4450 debug_rtx(pop1);
4451#endif
4452 if (rtx_equal_p (op0, pop0)
4453 && rtx_equal_p (op1, pop1))
4454 return true;
4455#if DEBUG_CMP
4456 fprintf(stderr, "prev cmp not same\n");
4457#endif
4458 return false;
4459 }
4460
4461 /* Else the previous insn must be a SET, with either the source or
4462 dest equal to operands[0], and operands[1] must be zero. */
4463
4464 if (!rtx_equal_p (op1, const0_rtx))
4465 {
4466#if DEBUG_CMP
4467 fprintf(stderr, "operands[1] not const0_rtx\n");
4468#endif
4469 return false;
4470 }
4471 if (GET_CODE (pp) != SET)
4472 {
4473#if DEBUG_CMP
4474 fprintf (stderr, "pp not set\n");
4475#endif
4476 return false;
4477 }
4478 if (!rtx_equal_p (op0, SET_SRC (pp))
4479 && !rtx_equal_p (op0, SET_DEST (pp)))
4480 {
4481#if DEBUG_CMP
4482 fprintf(stderr, "operands[0] not found in set\n");
4483#endif
4484 return false;
4485 }
4486
4487#if DEBUG_CMP
4488 fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4489#endif
4490 if ((pflags & flags_needed) == flags_needed)
4491 return true;
4492
4493 return false;
4494}
4495
4496/* Return the pattern for a compare. This will be commented out if
4497 the compare is redundant, else a normal pattern is returned. Thus,
4498 the assembler output says where the compare would have been. */
4499char *
84034c69 4500m32c_output_compare (rtx_insn *insn, rtx *operands)
16659fcf 4501{
0a2aaacc 4502 static char templ[] = ";cmp.b\t%1,%0";
16659fcf
DD
4503 /* ^ 5 */
4504
0a2aaacc 4505 templ[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
16659fcf
DD
4506 if (m32c_compare_redundant (insn, operands))
4507 {
4508#if DEBUG_CMP
4509 fprintf(stderr, "cbranch: cmp not needed\n");
4510#endif
0a2aaacc 4511 return templ;
16659fcf
DD
4512 }
4513
4514#if DEBUG_CMP
b3c5a409 4515 fprintf(stderr, "cbranch: cmp needed: `%s'\n", templ + 1);
16659fcf 4516#endif
0a2aaacc 4517 return templ + 1;
16659fcf
DD
4518}
4519
5abd2125
JS
4520#undef TARGET_ENCODE_SECTION_INFO
4521#define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4522
b52b1749
AS
4523/* If the frame pointer isn't used, we detect it manually. But the
4524 stack pointer doesn't have as flexible addressing as the frame
4525 pointer, so we always assume we have it. */
4526
4527#undef TARGET_FRAME_POINTER_REQUIRED
4528#define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
4529
38b2d076
DD
4530/* The Global `targetm' Variable. */
4531
4532struct gcc_target targetm = TARGET_INITIALIZER;
4533
4534#include "gt-m32c.h"