]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/m32c/m32c.c
2015-06-17 Andrew MacLeod <amacleod@redhat.com>
[thirdparty/gcc.git] / gcc / config / m32c / m32c.c
CommitLineData
85c84d5c 1/* Target Code for R8C/M16C/M32C
d353bf18 2 Copyright (C) 2005-2015 Free Software Foundation, Inc.
85c84d5c 3 Contributed by Red Hat.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
038d1e19 9 by the Free Software Foundation; either version 3, or (at your
85c84d5c 10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
038d1e19 18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
85c84d5c 20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
24#include "tm.h"
25#include "rtl.h"
26#include "regs.h"
27#include "hard-reg-set.h"
85c84d5c 28#include "insn-config.h"
29#include "conditions.h"
30#include "insn-flags.h"
31#include "output.h"
32#include "insn-attr.h"
33#include "flags.h"
34#include "recog.h"
35#include "reload.h"
0b205f4c 36#include "diagnostic-core.h"
85c84d5c 37#include "obstack.h"
b20a8bb4 38#include "alias.h"
39#include "symtab.h"
85c84d5c 40#include "tree.h"
b20a8bb4 41#include "fold-const.h"
9ed99284 42#include "stor-layout.h"
43#include "varasm.h"
44#include "calls.h"
d53441c8 45#include "function.h"
d53441c8 46#include "expmed.h"
47#include "dojump.h"
48#include "explow.h"
49#include "emit-rtl.h"
50#include "stmt.h"
85c84d5c 51#include "expr.h"
34517c64 52#include "insn-codes.h"
85c84d5c 53#include "optabs.h"
54#include "except.h"
85c84d5c 55#include "target.h"
56#include "target-def.h"
57#include "tm_p.h"
58#include "langhooks.h"
94ea8568 59#include "predict.h"
60#include "dominance.h"
61#include "cfg.h"
62#include "cfgrtl.h"
63#include "cfganal.h"
64#include "lcm.h"
65#include "cfgbuild.h"
66#include "cfgcleanup.h"
bc61cadb 67#include "basic-block.h"
68#include "tree-ssa-alias.h"
69#include "internal-fn.h"
70#include "gimple-fold.h"
71#include "tree-eh.h"
72#include "gimple-expr.h"
75a70cf9 73#include "gimple.h"
97678fce 74#include "df.h"
4ead5e30 75#include "tm-constrs.h"
f7715905 76#include "builtins.h"
85c84d5c 77
78/* Prototypes */
79
80/* Used by m32c_pushm_popm. */
81typedef enum
82{
83 PP_pushm,
84 PP_popm,
85 PP_justcount
86} Push_Pop_Type;
87
cc24427c 88static bool m32c_function_needs_enter (void);
85c84d5c 89static tree interrupt_handler (tree *, tree, tree, int, bool *);
2efce110 90static tree function_vector_handler (tree *, tree, tree, int, bool *);
85c84d5c 91static int interrupt_p (tree node);
cc24427c 92static int bank_switch_p (tree node);
93static int fast_interrupt_p (tree node);
94static int interrupt_p (tree node);
85c84d5c 95static bool m32c_asm_integer (rtx, unsigned int, int);
a9f1838b 96static int m32c_comp_type_attributes (const_tree, const_tree);
85c84d5c 97static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
98static struct machine_function *m32c_init_machine_status (void);
99static void m32c_insert_attributes (tree, tree *);
3754d046 100static bool m32c_legitimate_address_p (machine_mode, rtx, bool);
101static bool m32c_addr_space_legitimate_address_p (machine_mode, rtx, bool, addr_space_t);
102static rtx m32c_function_arg (cumulative_args_t, machine_mode,
1675aa0a 103 const_tree, bool);
3754d046 104static bool m32c_pass_by_reference (cumulative_args_t, machine_mode,
fb80456a 105 const_tree, bool);
3754d046 106static void m32c_function_arg_advance (cumulative_args_t, machine_mode,
8e2cc24f 107 const_tree, bool);
3754d046 108static unsigned int m32c_function_arg_boundary (machine_mode, const_tree);
85c84d5c 109static int m32c_pushm_popm (Push_Pop_Type);
39cba157 110static bool m32c_strict_argument_naming (cumulative_args_t);
85c84d5c 111static rtx m32c_struct_value_rtx (tree, int);
3754d046 112static rtx m32c_subreg (machine_mode, rtx, machine_mode, int);
85c84d5c 113static int need_to_save (int);
f57d8b49 114static rtx m32c_function_value (const_tree, const_tree, bool);
3754d046 115static rtx m32c_libcall_value (machine_mode, const_rtx);
f57d8b49 116
e3d4e41e 117/* Returns true if an address is specified, else false. */
118static bool m32c_get_pragma_address (const char *varname, unsigned *addr);
119
2efce110 120#define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
85c84d5c 121
122#define streq(a,b) (strcmp ((a), (b)) == 0)
123
124/* Internal support routines */
125
126/* Debugging statements are tagged with DEBUG0 only so that they can
127 be easily enabled individually, by replacing the '0' with '1' as
128 needed. */
129#define DEBUG0 0
130#define DEBUG1 1
131
132#if DEBUG0
2cd6aee1 133#include "print-tree.h"
85c84d5c 134/* This is needed by some of the commented-out debug statements
135 below. */
136static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
137#endif
138static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
139
140/* These are all to support encode_pattern(). */
141static char pattern[30], *patternp;
142static GTY(()) rtx patternr[30];
143#define RTX_IS(x) (streq (pattern, x))
144
145/* Some macros to simplify the logic throughout this file. */
146#define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
147#define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
148
149#define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
150#define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
151
d9530df8 152static int
153far_addr_space_p (rtx x)
154{
155 if (GET_CODE (x) != MEM)
156 return 0;
157#if DEBUG0
158 fprintf(stderr, "\033[35mfar_addr_space: "); debug_rtx(x);
159 fprintf(stderr, " = %d\033[0m\n", MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR);
160#endif
161 return MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR;
162}
163
85c84d5c 164/* We do most RTX matching by converting the RTX into a string, and
165 using string compares. This vastly simplifies the logic in many of
166 the functions in this file.
167
168 On exit, pattern[] has the encoded string (use RTX_IS("...") to
169 compare it) and patternr[] has pointers to the nodes in the RTX
170 corresponding to each character in the encoded string. The latter
171 is mostly used by print_operand().
172
173 Unrecognized patterns have '?' in them; this shows up when the
174 assembler complains about syntax errors.
175*/
176
177static void
178encode_pattern_1 (rtx x)
179{
180 int i;
181
182 if (patternp == pattern + sizeof (pattern) - 2)
183 {
184 patternp[-1] = '?';
185 return;
186 }
187
188 patternr[patternp - pattern] = x;
189
190 switch (GET_CODE (x))
191 {
192 case REG:
193 *patternp++ = 'r';
194 break;
195 case SUBREG:
196 if (GET_MODE_SIZE (GET_MODE (x)) !=
197 GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
198 *patternp++ = 'S';
09a686a0 199 if (GET_MODE (x) == PSImode
200 && GET_CODE (XEXP (x, 0)) == REG)
201 *patternp++ = 'S';
85c84d5c 202 encode_pattern_1 (XEXP (x, 0));
203 break;
204 case MEM:
205 *patternp++ = 'm';
206 case CONST:
207 encode_pattern_1 (XEXP (x, 0));
208 break;
d9530df8 209 case SIGN_EXTEND:
210 *patternp++ = '^';
211 *patternp++ = 'S';
212 encode_pattern_1 (XEXP (x, 0));
213 break;
214 case ZERO_EXTEND:
215 *patternp++ = '^';
216 *patternp++ = 'Z';
217 encode_pattern_1 (XEXP (x, 0));
218 break;
85c84d5c 219 case PLUS:
220 *patternp++ = '+';
221 encode_pattern_1 (XEXP (x, 0));
222 encode_pattern_1 (XEXP (x, 1));
223 break;
224 case PRE_DEC:
225 *patternp++ = '>';
226 encode_pattern_1 (XEXP (x, 0));
227 break;
228 case POST_INC:
229 *patternp++ = '<';
230 encode_pattern_1 (XEXP (x, 0));
231 break;
232 case LO_SUM:
233 *patternp++ = 'L';
234 encode_pattern_1 (XEXP (x, 0));
235 encode_pattern_1 (XEXP (x, 1));
236 break;
237 case HIGH:
238 *patternp++ = 'H';
239 encode_pattern_1 (XEXP (x, 0));
240 break;
241 case SYMBOL_REF:
242 *patternp++ = 's';
243 break;
244 case LABEL_REF:
245 *patternp++ = 'l';
246 break;
247 case CODE_LABEL:
248 *patternp++ = 'c';
249 break;
250 case CONST_INT:
251 case CONST_DOUBLE:
252 *patternp++ = 'i';
253 break;
254 case UNSPEC:
255 *patternp++ = 'u';
256 *patternp++ = '0' + XCINT (x, 1, UNSPEC);
257 for (i = 0; i < XVECLEN (x, 0); i++)
258 encode_pattern_1 (XVECEXP (x, 0, i));
259 break;
260 case USE:
261 *patternp++ = 'U';
262 break;
263 case PARALLEL:
264 *patternp++ = '|';
265 for (i = 0; i < XVECLEN (x, 0); i++)
266 encode_pattern_1 (XVECEXP (x, 0, i));
267 break;
268 case EXPR_LIST:
269 *patternp++ = 'E';
270 encode_pattern_1 (XEXP (x, 0));
271 if (XEXP (x, 1))
272 encode_pattern_1 (XEXP (x, 1));
273 break;
274 default:
275 *patternp++ = '?';
276#if DEBUG0
277 fprintf (stderr, "can't encode pattern %s\n",
278 GET_RTX_NAME (GET_CODE (x)));
279 debug_rtx (x);
85c84d5c 280#endif
281 break;
282 }
283}
284
285static void
286encode_pattern (rtx x)
287{
288 patternp = pattern;
289 encode_pattern_1 (x);
290 *patternp = 0;
291}
292
293/* Since register names indicate the mode they're used in, we need a
294 way to determine which name to refer to the register with. Called
295 by print_operand(). */
296
297static const char *
3754d046 298reg_name_with_mode (int regno, machine_mode mode)
85c84d5c 299{
300 int mlen = GET_MODE_SIZE (mode);
301 if (regno == R0_REGNO && mlen == 1)
302 return "r0l";
303 if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
304 return "r2r0";
305 if (regno == R0_REGNO && mlen == 6)
306 return "r2r1r0";
307 if (regno == R0_REGNO && mlen == 8)
308 return "r3r1r2r0";
309 if (regno == R1_REGNO && mlen == 1)
310 return "r1l";
311 if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
312 return "r3r1";
313 if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
314 return "a1a0";
315 return reg_names[regno];
316}
317
318/* How many bytes a register uses on stack when it's pushed. We need
319 to know this because the push opcode needs to explicitly indicate
320 the size of the register, even though the name of the register
321 already tells it that. Used by m32c_output_reg_{push,pop}, which
322 is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}. */
323
324static int
325reg_push_size (int regno)
326{
327 switch (regno)
328 {
329 case R0_REGNO:
330 case R1_REGNO:
331 return 2;
332 case R2_REGNO:
333 case R3_REGNO:
334 case FLG_REGNO:
335 return 2;
336 case A0_REGNO:
337 case A1_REGNO:
338 case SB_REGNO:
339 case FB_REGNO:
340 case SP_REGNO:
341 if (TARGET_A16)
342 return 2;
343 else
344 return 3;
345 default:
346 gcc_unreachable ();
347 }
348}
349
85c84d5c 350/* Given two register classes, find the largest intersection between
351 them. If there is no intersection, return RETURNED_IF_EMPTY
352 instead. */
afe8797e 353static reg_class_t
354reduce_class (reg_class_t original_class, reg_class_t limiting_class,
355 reg_class_t returned_if_empty)
85c84d5c 356{
afe8797e 357 HARD_REG_SET cc;
358 int i;
359 reg_class_t best = NO_REGS;
360 unsigned int best_size = 0;
85c84d5c 361
362 if (original_class == limiting_class)
363 return original_class;
364
afe8797e 365 cc = reg_class_contents[original_class];
366 AND_HARD_REG_SET (cc, reg_class_contents[limiting_class]);
85c84d5c 367
85c84d5c 368 for (i = 0; i < LIM_REG_CLASSES; i++)
369 {
afe8797e 370 if (hard_reg_set_subset_p (reg_class_contents[i], cc))
371 if (best_size < reg_class_size[i])
85c84d5c 372 {
afe8797e 373 best = (reg_class_t) i;
374 best_size = reg_class_size[i];
85c84d5c 375 }
376
377 }
378 if (best == NO_REGS)
379 return returned_if_empty;
380 return best;
381}
382
85c84d5c 383/* Used by m32c_register_move_cost to determine if a move is
384 impossibly expensive. */
4cf1a89b 385static bool
3754d046 386class_can_hold_mode (reg_class_t rclass, machine_mode mode)
85c84d5c 387{
388 /* Cache the results: 0=untested 1=no 2=yes */
389 static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
4cf1a89b 390
391 if (results[(int) rclass][mode] == 0)
85c84d5c 392 {
4cf1a89b 393 int r;
8deb3959 394 results[rclass][mode] = 1;
85c84d5c 395 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
4cf1a89b 396 if (in_hard_reg_set_p (reg_class_contents[(int) rclass], mode, r)
85c84d5c 397 && HARD_REGNO_MODE_OK (r, mode))
398 {
4cf1a89b 399 results[rclass][mode] = 2;
400 break;
85c84d5c 401 }
402 }
4cf1a89b 403
85c84d5c 404#if DEBUG0
405 fprintf (stderr, "class %s can hold %s? %s\n",
4cf1a89b 406 class_names[(int) rclass], mode_name[mode],
8deb3959 407 (results[rclass][mode] == 2) ? "yes" : "no");
85c84d5c 408#endif
4cf1a89b 409 return results[(int) rclass][mode] == 2;
85c84d5c 410}
411
412/* Run-time Target Specification. */
413
414/* Memregs are memory locations that gcc treats like general
415 registers, as there are a limited number of true registers and the
416 m32c families can use memory in most places that registers can be
417 used.
418
419 However, since memory accesses are more expensive than registers,
420 we allow the user to limit the number of memregs available, in
421 order to try to persuade gcc to try harder to use real registers.
422
9213d2eb 423 Memregs are provided by lib1funcs.S.
85c84d5c 424*/
425
85c84d5c 426int ok_to_change_target_memregs = TRUE;
427
1722522a 428/* Implements TARGET_OPTION_OVERRIDE. */
429
430#undef TARGET_OPTION_OVERRIDE
431#define TARGET_OPTION_OVERRIDE m32c_option_override
432
433static void
434m32c_option_override (void)
85c84d5c 435{
1722522a 436 /* We limit memregs to 0..16, and provide a default. */
eea6e787 437 if (global_options_set.x_target_memregs)
85c84d5c 438 {
439 if (target_memregs < 0 || target_memregs > 16)
440 error ("invalid target memregs value '%d'", target_memregs);
441 }
442 else
fedc146b 443 target_memregs = 16;
f8e7cebd 444
445 if (TARGET_A24)
446 flag_ivopts = 0;
1af17d44 447
448 /* This target defaults to strict volatile bitfields. */
941a2396 449 if (flag_strict_volatile_bitfields < 0 && abi_version_at_least(2))
1af17d44 450 flag_strict_volatile_bitfields = 1;
54f36750 451
452 /* r8c/m16c have no 16-bit indirect call, so thunks are involved.
453 This is always worse than an absolute call. */
454 if (TARGET_A16)
455 flag_no_function_cse = 1;
45bba533 456
457 /* This wants to put insns between compares and their jumps. */
458 /* FIXME: The right solution is to properly trace the flags register
459 values, but that is too much work for stage 4. */
460 flag_combine_stack_adjustments = 0;
54f36750 461}
462
463#undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
464#define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m32c_override_options_after_change
465
466static void
467m32c_override_options_after_change (void)
468{
469 if (TARGET_A16)
470 flag_no_function_cse = 1;
85c84d5c 471}
472
473/* Defining data structures for per-function information */
474
475/* The usual; we set up our machine_function data. */
476static struct machine_function *
477m32c_init_machine_status (void)
478{
25a27413 479 return ggc_cleared_alloc<machine_function> ();
85c84d5c 480}
481
482/* Implements INIT_EXPANDERS. We just set up to call the above
483 function. */
484void
485m32c_init_expanders (void)
486{
487 init_machine_status = m32c_init_machine_status;
488}
489
490/* Storage Layout */
491
85c84d5c 492/* Register Basics */
493
494/* Basic Characteristics of Registers */
495
496/* Whether a mode fits in a register is complex enough to warrant a
497 table. */
498static struct
499{
500 char qi_regs;
501 char hi_regs;
502 char pi_regs;
503 char si_regs;
504 char di_regs;
505} nregs_table[FIRST_PSEUDO_REGISTER] =
506{
507 { 1, 1, 2, 2, 4 }, /* r0 */
508 { 0, 1, 0, 0, 0 }, /* r2 */
509 { 1, 1, 2, 2, 0 }, /* r1 */
510 { 0, 1, 0, 0, 0 }, /* r3 */
511 { 0, 1, 1, 0, 0 }, /* a0 */
512 { 0, 1, 1, 0, 0 }, /* a1 */
513 { 0, 1, 1, 0, 0 }, /* sb */
514 { 0, 1, 1, 0, 0 }, /* fb */
515 { 0, 1, 1, 0, 0 }, /* sp */
516 { 1, 1, 1, 0, 0 }, /* pc */
517 { 0, 0, 0, 0, 0 }, /* fl */
518 { 1, 1, 1, 0, 0 }, /* ap */
519 { 1, 1, 2, 2, 4 }, /* mem0 */
520 { 1, 1, 2, 2, 4 }, /* mem1 */
521 { 1, 1, 2, 2, 4 }, /* mem2 */
522 { 1, 1, 2, 2, 4 }, /* mem3 */
523 { 1, 1, 2, 2, 4 }, /* mem4 */
524 { 1, 1, 2, 2, 0 }, /* mem5 */
525 { 1, 1, 2, 2, 0 }, /* mem6 */
526 { 1, 1, 0, 0, 0 }, /* mem7 */
527};
528
b2d7ede1 529/* Implements TARGET_CONDITIONAL_REGISTER_USAGE. We adjust the number
530 of available memregs, and select which registers need to be preserved
85c84d5c 531 across calls based on the chip family. */
532
b2d7ede1 533#undef TARGET_CONDITIONAL_REGISTER_USAGE
534#define TARGET_CONDITIONAL_REGISTER_USAGE m32c_conditional_register_usage
7182acf5 535void
85c84d5c 536m32c_conditional_register_usage (void)
537{
85c84d5c 538 int i;
539
540 if (0 <= target_memregs && target_memregs <= 16)
541 {
542 /* The command line option is bytes, but our "registers" are
543 16-bit words. */
cc24427c 544 for (i = (target_memregs+1)/2; i < 8; i++)
85c84d5c 545 {
546 fixed_regs[MEM0_REGNO + i] = 1;
547 CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
548 }
549 }
550
551 /* M32CM and M32C preserve more registers across function calls. */
552 if (TARGET_A24)
553 {
554 call_used_regs[R1_REGNO] = 0;
555 call_used_regs[R2_REGNO] = 0;
556 call_used_regs[R3_REGNO] = 0;
557 call_used_regs[A0_REGNO] = 0;
558 call_used_regs[A1_REGNO] = 0;
559 }
560}
561
562/* How Values Fit in Registers */
563
564/* Implements HARD_REGNO_NREGS. This is complicated by the fact that
565 different registers are different sizes from each other, *and* may
566 be different sizes in different chip families. */
4a6a8336 567static int
3754d046 568m32c_hard_regno_nregs_1 (int regno, machine_mode mode)
85c84d5c 569{
570 if (regno == FLG_REGNO && mode == CCmode)
571 return 1;
572 if (regno >= FIRST_PSEUDO_REGISTER)
573 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
574
575 if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
576 return (GET_MODE_SIZE (mode) + 1) / 2;
577
578 if (GET_MODE_SIZE (mode) <= 1)
579 return nregs_table[regno].qi_regs;
580 if (GET_MODE_SIZE (mode) <= 2)
581 return nregs_table[regno].hi_regs;
d9530df8 582 if (regno == A0_REGNO && mode == SImode && TARGET_A16)
85c84d5c 583 return 2;
584 if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
585 return nregs_table[regno].pi_regs;
586 if (GET_MODE_SIZE (mode) <= 4)
587 return nregs_table[regno].si_regs;
588 if (GET_MODE_SIZE (mode) <= 8)
589 return nregs_table[regno].di_regs;
590 return 0;
591}
592
4a6a8336 593int
3754d046 594m32c_hard_regno_nregs (int regno, machine_mode mode)
4a6a8336 595{
596 int rv = m32c_hard_regno_nregs_1 (regno, mode);
597 return rv ? rv : 1;
598}
599
85c84d5c 600/* Implements HARD_REGNO_MODE_OK. The above function does the work
601 already; just test its return value. */
602int
3754d046 603m32c_hard_regno_ok (int regno, machine_mode mode)
85c84d5c 604{
4a6a8336 605 return m32c_hard_regno_nregs_1 (regno, mode) != 0;
85c84d5c 606}
607
608/* Implements MODES_TIEABLE_P. In general, modes aren't tieable since
609 registers are all different sizes. However, since most modes are
610 bigger than our registers anyway, it's easier to implement this
611 function that way, leaving QImode as the only unique case. */
612int
3754d046 613m32c_modes_tieable_p (machine_mode m1, machine_mode m2)
85c84d5c 614{
615 if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
616 return 1;
617
fedc146b 618#if 0
85c84d5c 619 if (m1 == QImode || m2 == QImode)
620 return 0;
fedc146b 621#endif
85c84d5c 622
623 return 1;
624}
625
626/* Register Classes */
627
628/* Implements REGNO_REG_CLASS. */
1675aa0a 629enum reg_class
85c84d5c 630m32c_regno_reg_class (int regno)
631{
632 switch (regno)
633 {
634 case R0_REGNO:
635 return R0_REGS;
636 case R1_REGNO:
637 return R1_REGS;
638 case R2_REGNO:
639 return R2_REGS;
640 case R3_REGNO:
641 return R3_REGS;
642 case A0_REGNO:
1facaf0d 643 return A0_REGS;
85c84d5c 644 case A1_REGNO:
1facaf0d 645 return A1_REGS;
85c84d5c 646 case SB_REGNO:
647 return SB_REGS;
648 case FB_REGNO:
649 return FB_REGS;
650 case SP_REGNO:
651 return SP_REGS;
652 case FLG_REGNO:
653 return FLG_REGS;
654 default:
655 if (IS_MEM_REGNO (regno))
656 return MEM_REGS;
657 return ALL_REGS;
658 }
659}
660
85c84d5c 661/* Implements REGNO_OK_FOR_BASE_P. */
662int
663m32c_regno_ok_for_base_p (int regno)
664{
665 if (regno == A0_REGNO
666 || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
667 return 1;
668 return 0;
669}
670
7d7d4922 671/* Implements TARGET_PREFERRED_RELOAD_CLASS. In general, prefer general
85c84d5c 672 registers of the appropriate size. */
7d7d4922 673
674#undef TARGET_PREFERRED_RELOAD_CLASS
675#define TARGET_PREFERRED_RELOAD_CLASS m32c_preferred_reload_class
676
677static reg_class_t
678m32c_preferred_reload_class (rtx x, reg_class_t rclass)
85c84d5c 679{
7d7d4922 680 reg_class_t newclass = rclass;
85c84d5c 681
2cd6aee1 682#if DEBUG0
85c84d5c 683 fprintf (stderr, "\npreferred_reload_class for %s is ",
684 class_names[rclass]);
685#endif
686 if (rclass == NO_REGS)
687 rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
688
4cf1a89b 689 if (reg_classes_intersect_p (rclass, CR_REGS))
85c84d5c 690 {
691 switch (GET_MODE (x))
692 {
693 case QImode:
694 newclass = HL_REGS;
695 break;
696 default:
697 /* newclass = HI_REGS; */
698 break;
699 }
700 }
701
702 else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
703 newclass = SI_REGS;
704 else if (GET_MODE_SIZE (GET_MODE (x)) > 4
7d7d4922 705 && ! reg_class_subset_p (R03_REGS, rclass))
85c84d5c 706 newclass = DI_REGS;
707
708 rclass = reduce_class (rclass, newclass, rclass);
709
710 if (GET_MODE (x) == QImode)
711 rclass = reduce_class (rclass, HL_REGS, rclass);
712
2cd6aee1 713#if DEBUG0
85c84d5c 714 fprintf (stderr, "%s\n", class_names[rclass]);
715 debug_rtx (x);
716
717 if (GET_CODE (x) == MEM
718 && GET_CODE (XEXP (x, 0)) == PLUS
719 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
720 fprintf (stderr, "Glorm!\n");
721#endif
722 return rclass;
723}
724
7d7d4922 725/* Implements TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
726
727#undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
728#define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS m32c_preferred_output_reload_class
729
730static reg_class_t
731m32c_preferred_output_reload_class (rtx x, reg_class_t rclass)
85c84d5c 732{
733 return m32c_preferred_reload_class (x, rclass);
734}
735
736/* Implements LIMIT_RELOAD_CLASS. We basically want to avoid using
737 address registers for reloads since they're needed for address
738 reloads. */
739int
3754d046 740m32c_limit_reload_class (machine_mode mode, int rclass)
85c84d5c 741{
2cd6aee1 742#if DEBUG0
85c84d5c 743 fprintf (stderr, "limit_reload_class for %s: %s ->",
744 mode_name[mode], class_names[rclass]);
745#endif
746
747 if (mode == QImode)
748 rclass = reduce_class (rclass, HL_REGS, rclass);
749 else if (mode == HImode)
750 rclass = reduce_class (rclass, HI_REGS, rclass);
751 else if (mode == SImode)
752 rclass = reduce_class (rclass, SI_REGS, rclass);
753
754 if (rclass != A_REGS)
755 rclass = reduce_class (rclass, DI_REGS, rclass);
756
2cd6aee1 757#if DEBUG0
85c84d5c 758 fprintf (stderr, " %s\n", class_names[rclass]);
759#endif
760 return rclass;
761}
762
763/* Implements SECONDARY_RELOAD_CLASS. QImode have to be reloaded in
764 r0 or r1, as those are the only real QImode registers. CR regs get
765 reloaded through appropriately sized general or address
766 registers. */
767int
3754d046 768m32c_secondary_reload_class (int rclass, machine_mode mode, rtx x)
85c84d5c 769{
770 int cc = class_contents[rclass][0];
771#if DEBUG0
772 fprintf (stderr, "\nsecondary reload class %s %s\n",
773 class_names[rclass], mode_name[mode]);
774 debug_rtx (x);
775#endif
776 if (mode == QImode
777 && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
778 return QI_REGS;
4cf1a89b 779 if (reg_classes_intersect_p (rclass, CR_REGS)
85c84d5c 780 && GET_CODE (x) == REG
781 && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
5a4f6e8c 782 return (TARGET_A16 || mode == HImode) ? HI_REGS : A_REGS;
85c84d5c 783 return NO_REGS;
784}
785
cac9b7c7 786/* Implements TARGET_CLASS_LIKELY_SPILLED_P. A_REGS is needed for address
85c84d5c 787 reloads. */
cac9b7c7 788
789#undef TARGET_CLASS_LIKELY_SPILLED_P
790#define TARGET_CLASS_LIKELY_SPILLED_P m32c_class_likely_spilled_p
791
792static bool
793m32c_class_likely_spilled_p (reg_class_t regclass)
85c84d5c 794{
795 if (regclass == A_REGS)
cac9b7c7 796 return true;
797
798 return (reg_class_size[(int) regclass] == 1);
85c84d5c 799}
800
c3271fdb 801/* Implements TARGET_CLASS_MAX_NREGS. We calculate this according to its
85c84d5c 802 documented meaning, to avoid potential inconsistencies with actual
803 class definitions. */
c3271fdb 804
805#undef TARGET_CLASS_MAX_NREGS
806#define TARGET_CLASS_MAX_NREGS m32c_class_max_nregs
807
808static unsigned char
3754d046 809m32c_class_max_nregs (reg_class_t regclass, machine_mode mode)
85c84d5c 810{
c3271fdb 811 int rn;
812 unsigned char max = 0;
85c84d5c 813
814 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
c3271fdb 815 if (TEST_HARD_REG_BIT (reg_class_contents[(int) regclass], rn))
85c84d5c 816 {
c3271fdb 817 unsigned char n = m32c_hard_regno_nregs (rn, mode);
85c84d5c 818 if (max < n)
819 max = n;
820 }
821 return max;
822}
823
824/* Implements CANNOT_CHANGE_MODE_CLASS. Only r0 and r1 can change to
825 QI (r0l, r1l) because the chip doesn't support QI ops on other
826 registers (well, it does on a0/a1 but if we let gcc do that, reload
827 suffers). Otherwise, we allow changes to larger modes. */
828int
3754d046 829m32c_cannot_change_mode_class (machine_mode from,
830 machine_mode to, int rclass)
85c84d5c 831{
ced4068a 832 int rn;
85c84d5c 833#if DEBUG0
834 fprintf (stderr, "cannot change from %s to %s in %s\n",
835 mode_name[from], mode_name[to], class_names[rclass]);
836#endif
837
ced4068a 838 /* If the larger mode isn't allowed in any of these registers, we
839 can't allow the change. */
840 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
841 if (class_contents[rclass][0] & (1 << rn))
842 if (! m32c_hard_regno_ok (rn, to))
843 return 1;
844
85c84d5c 845 if (to == QImode)
846 return (class_contents[rclass][0] & 0x1ffa);
847
848 if (class_contents[rclass][0] & 0x0005 /* r0, r1 */
849 && GET_MODE_SIZE (from) > 1)
850 return 0;
851 if (GET_MODE_SIZE (from) > 2) /* all other regs */
852 return 0;
853
854 return 1;
855}
856
857/* Helpers for the rest of the file. */
858/* TRUE if the rtx is a REG rtx for the given register. */
859#define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
860 && REGNO (rtx) == regno)
861/* TRUE if the rtx is a pseudo - specifically, one we can use as a
862 base register in address calculations (hence the "strict"
863 argument). */
864#define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
865 && (REGNO (rtx) == AP_REGNO \
866 || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
867
d9530df8 868#define A0_OR_PSEUDO(x) (IS_REG(x, A0_REGNO) || REGNO (x) >= FIRST_PSEUDO_REGISTER)
869
79bc09fb 870/* Implements matching for constraints (see next function too). 'S' is
85c84d5c 871 for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
872 call return values. */
4ead5e30 873bool
874m32c_matches_constraint_p (rtx value, int constraint)
85c84d5c 875{
876 encode_pattern (value);
d9530df8 877
4ead5e30 878 switch (constraint) {
879 case CONSTRAINT_SF:
880 return (far_addr_space_p (value)
881 && ((RTX_IS ("mr")
882 && A0_OR_PSEUDO (patternr[1])
883 && GET_MODE (patternr[1]) == SImode)
884 || (RTX_IS ("m+^Sri")
885 && A0_OR_PSEUDO (patternr[4])
886 && GET_MODE (patternr[4]) == HImode)
887 || (RTX_IS ("m+^Srs")
888 && A0_OR_PSEUDO (patternr[4])
889 && GET_MODE (patternr[4]) == HImode)
890 || (RTX_IS ("m+^S+ris")
891 && A0_OR_PSEUDO (patternr[5])
892 && GET_MODE (patternr[5]) == HImode)
893 || RTX_IS ("ms")));
894 case CONSTRAINT_Sd:
85c84d5c 895 {
896 /* This is the common "src/dest" address */
897 rtx r;
898 if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
4ead5e30 899 return true;
85c84d5c 900 if (RTX_IS ("ms") || RTX_IS ("m+si"))
4ead5e30 901 return true;
fedc146b 902 if (RTX_IS ("m++rii"))
903 {
904 if (REGNO (patternr[3]) == FB_REGNO
905 && INTVAL (patternr[4]) == 0)
4ead5e30 906 return true;
fedc146b 907 }
85c84d5c 908 if (RTX_IS ("mr"))
909 r = patternr[1];
910 else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
911 r = patternr[2];
912 else
4ead5e30 913 return false;
85c84d5c 914 if (REGNO (r) == SP_REGNO)
4ead5e30 915 return false;
85c84d5c 916 return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
917 }
4ead5e30 918 case CONSTRAINT_Sa:
85c84d5c 919 {
920 rtx r;
921 if (RTX_IS ("mr"))
922 r = patternr[1];
923 else if (RTX_IS ("m+ri"))
924 r = patternr[2];
925 else
4ead5e30 926 return false;
85c84d5c 927 return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
928 }
4ead5e30 929 case CONSTRAINT_Si:
930 return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
931 case CONSTRAINT_Ss:
932 return ((RTX_IS ("mr")
933 && (IS_REG (patternr[1], SP_REGNO)))
934 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
935 case CONSTRAINT_Sf:
936 return ((RTX_IS ("mr")
937 && (IS_REG (patternr[1], FB_REGNO)))
938 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
939 case CONSTRAINT_Sb:
940 return ((RTX_IS ("mr")
941 && (IS_REG (patternr[1], SB_REGNO)))
942 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
943 case CONSTRAINT_Sp:
944 /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
945 return (RTX_IS ("mi")
946 && !(INTVAL (patternr[1]) & ~0x1fff));
947 case CONSTRAINT_S1:
948 return r1h_operand (value, QImode);
949 case CONSTRAINT_Rpa:
85c84d5c 950 return GET_CODE (value) == PARALLEL;
4ead5e30 951 default:
952 return false;
953 }
85c84d5c 954}
955
956/* STACK AND CALLING */
957
958/* Frame Layout */
959
960/* Implements RETURN_ADDR_RTX. Note that R8C and M16C push 24 bits
961 (yes, THREE bytes) onto the stack for the return address, but we
962 don't support pointers bigger than 16 bits on those chips. This
963 will likely wreak havoc with exception unwinding. FIXME. */
964rtx
965m32c_return_addr_rtx (int count)
966{
3754d046 967 machine_mode mode;
85c84d5c 968 int offset;
969 rtx ra_mem;
970
971 if (count)
972 return NULL_RTX;
973 /* we want 2[$fb] */
974
975 if (TARGET_A24)
976 {
a8651e7d 977 /* It's four bytes */
978 mode = PSImode;
85c84d5c 979 offset = 4;
980 }
981 else
982 {
983 /* FIXME: it's really 3 bytes */
984 mode = HImode;
985 offset = 2;
986 }
987
988 ra_mem =
29c05e22 989 gen_rtx_MEM (mode, plus_constant (Pmode, gen_rtx_REG (Pmode, FP_REGNO),
990 offset));
85c84d5c 991 return copy_to_mode_reg (mode, ra_mem);
992}
993
994/* Implements INCOMING_RETURN_ADDR_RTX. See comment above. */
995rtx
996m32c_incoming_return_addr_rtx (void)
997{
998 /* we want [sp] */
999 return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
1000}
1001
1002/* Exception Handling Support */
1003
1004/* Implements EH_RETURN_DATA_REGNO. Choose registers able to hold
1005 pointers. */
1006int
1007m32c_eh_return_data_regno (int n)
1008{
1009 switch (n)
1010 {
1011 case 0:
09a686a0 1012 return MEM0_REGNO;
85c84d5c 1013 case 1:
09a686a0 1014 return MEM0_REGNO+4;
85c84d5c 1015 default:
1016 return INVALID_REGNUM;
1017 }
1018}
1019
1020/* Implements EH_RETURN_STACKADJ_RTX. Saved and used later in
1021 m32c_emit_eh_epilogue. */
1022rtx
1023m32c_eh_return_stackadj_rtx (void)
1024{
1025 if (!cfun->machine->eh_stack_adjust)
1026 {
1027 rtx sa;
1028
afde7ac7 1029 sa = gen_rtx_REG (Pmode, R0_REGNO);
85c84d5c 1030 cfun->machine->eh_stack_adjust = sa;
1031 }
1032 return cfun->machine->eh_stack_adjust;
1033}
1034
1035/* Registers That Address the Stack Frame */
1036
1037/* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER. Note that
1038 the original spec called for dwarf numbers to vary with register
1039 width as well, for example, r0l, r0, and r2r0 would each have
1040 different dwarf numbers. GCC doesn't support this, and we don't do
1041 it, and gdb seems to like it this way anyway. */
1042unsigned int
1043m32c_dwarf_frame_regnum (int n)
1044{
1045 switch (n)
1046 {
1047 case R0_REGNO:
1048 return 5;
1049 case R1_REGNO:
1050 return 6;
1051 case R2_REGNO:
1052 return 7;
1053 case R3_REGNO:
1054 return 8;
1055 case A0_REGNO:
1056 return 9;
1057 case A1_REGNO:
1058 return 10;
1059 case FB_REGNO:
1060 return 11;
1061 case SB_REGNO:
1062 return 19;
1063
1064 case SP_REGNO:
1065 return 12;
1066 case PC_REGNO:
1067 return 13;
1068 default:
1069 return DWARF_FRAME_REGISTERS + 1;
1070 }
1071}
1072
1073/* The frame looks like this:
1074
1075 ap -> +------------------------------
1076 | Return address (3 or 4 bytes)
1077 | Saved FB (2 or 4 bytes)
1078 fb -> +------------------------------
1079 | local vars
1080 | register saves fb
1081 | through r0 as needed
1082 sp -> +------------------------------
1083*/
1084
1085/* We use this to wrap all emitted insns in the prologue. */
1086static rtx
1087F (rtx x)
1088{
1089 RTX_FRAME_RELATED_P (x) = 1;
1090 return x;
1091}
1092
1093/* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1094 how much the stack pointer moves for each, for each cpu family. */
1095static struct
1096{
1097 int reg1;
1098 int bit;
1099 int a16_bytes;
1100 int a24_bytes;
1101} pushm_info[] =
1102{
87eb9cbf 1103 /* These are in reverse push (nearest-to-sp) order. */
1104 { R0_REGNO, 0x80, 2, 2 },
85c84d5c 1105 { R1_REGNO, 0x40, 2, 2 },
87eb9cbf 1106 { R2_REGNO, 0x20, 2, 2 },
1107 { R3_REGNO, 0x10, 2, 2 },
1108 { A0_REGNO, 0x08, 2, 4 },
1109 { A1_REGNO, 0x04, 2, 4 },
1110 { SB_REGNO, 0x02, 2, 4 },
1111 { FB_REGNO, 0x01, 2, 4 }
85c84d5c 1112};
1113
1114#define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1115
1116/* Returns TRUE if we need to save/restore the given register. We
1117 save everything for exception handlers, so that any register can be
1118 unwound. For interrupt handlers, we save everything if the handler
1119 calls something else (because we don't know what *that* function
1120 might do), but try to be a bit smarter if the handler is a leaf
1121 function. We always save $a0, though, because we use that in the
c910419d 1122 epilogue to copy $fb to $sp. */
85c84d5c 1123static int
1124need_to_save (int regno)
1125{
1126 if (fixed_regs[regno])
1127 return 0;
6025a5e6 1128 if (crtl->calls_eh_return)
85c84d5c 1129 return 1;
1130 if (regno == FP_REGNO)
1131 return 0;
1132 if (cfun->machine->is_interrupt
cc24427c 1133 && (!cfun->machine->is_leaf
1134 || (regno == A0_REGNO
1135 && m32c_function_needs_enter ())
1136 ))
85c84d5c 1137 return 1;
3072d30e 1138 if (df_regs_ever_live_p (regno)
85c84d5c 1139 && (!call_used_regs[regno] || cfun->machine->is_interrupt))
1140 return 1;
1141 return 0;
1142}
1143
1144/* This function contains all the intelligence about saving and
1145 restoring registers. It always figures out the register save set.
1146 When called with PP_justcount, it merely returns the size of the
1147 save set (for eliminating the frame pointer, for example). When
1148 called with PP_pushm or PP_popm, it emits the appropriate
1149 instructions for saving (pushm) or restoring (popm) the
1150 registers. */
1151static int
1152m32c_pushm_popm (Push_Pop_Type ppt)
1153{
1154 int reg_mask = 0;
1155 int byte_count = 0, bytes;
1156 int i;
1157 rtx dwarf_set[PUSHM_N];
1158 int n_dwarfs = 0;
1159 int nosave_mask = 0;
1160
393edb51 1161 if (crtl->return_rtx
1162 && GET_CODE (crtl->return_rtx) == PARALLEL
6025a5e6 1163 && !(crtl->calls_eh_return || cfun->machine->is_interrupt))
85c84d5c 1164 {
393edb51 1165 rtx exp = XVECEXP (crtl->return_rtx, 0, 0);
85c84d5c 1166 rtx rv = XEXP (exp, 0);
1167 int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1168
1169 if (rv_bytes > 2)
1170 nosave_mask |= 0x20; /* PSI, SI */
1171 else
1172 nosave_mask |= 0xf0; /* DF */
1173 if (rv_bytes > 4)
1174 nosave_mask |= 0x50; /* DI */
1175 }
1176
1177 for (i = 0; i < (int) PUSHM_N; i++)
1178 {
1179 /* Skip if neither register needs saving. */
1180 if (!need_to_save (pushm_info[i].reg1))
1181 continue;
1182
1183 if (pushm_info[i].bit & nosave_mask)
1184 continue;
1185
1186 reg_mask |= pushm_info[i].bit;
1187 bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1188
1189 if (ppt == PP_pushm)
1190 {
3754d046 1191 machine_mode mode = (bytes == 2) ? HImode : SImode;
85c84d5c 1192 rtx addr;
1193
1194 /* Always use stack_pointer_rtx instead of calling
1195 rtx_gen_REG ourselves. Code elsewhere in GCC assumes
1196 that there is a single rtx representing the stack pointer,
1197 namely stack_pointer_rtx, and uses == to recognize it. */
1198 addr = stack_pointer_rtx;
1199
1200 if (byte_count != 0)
1201 addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1202
1203 dwarf_set[n_dwarfs++] =
d1f9b275 1204 gen_rtx_SET (gen_rtx_MEM (mode, addr),
85c84d5c 1205 gen_rtx_REG (mode, pushm_info[i].reg1));
1206 F (dwarf_set[n_dwarfs - 1]);
1207
1208 }
1209 byte_count += bytes;
1210 }
1211
1212 if (cfun->machine->is_interrupt)
1213 {
1214 cfun->machine->intr_pushm = reg_mask & 0xfe;
1215 reg_mask = 0;
1216 byte_count = 0;
1217 }
1218
1219 if (cfun->machine->is_interrupt)
1220 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1221 if (need_to_save (i))
1222 {
1223 byte_count += 2;
1224 cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1225 }
1226
1227 if (ppt == PP_pushm && byte_count)
1228 {
1229 rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1230 rtx pushm;
1231
1232 if (reg_mask)
1233 {
1234 XVECEXP (note, 0, 0)
d1f9b275 1235 = gen_rtx_SET (stack_pointer_rtx,
85c84d5c 1236 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1237 stack_pointer_rtx,
1238 GEN_INT (-byte_count)));
1239 F (XVECEXP (note, 0, 0));
1240
1241 for (i = 0; i < n_dwarfs; i++)
1242 XVECEXP (note, 0, i + 1) = dwarf_set[i];
1243
1244 pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1245
1675aa0a 1246 add_reg_note (pushm, REG_FRAME_RELATED_EXPR, note);
85c84d5c 1247 }
1248
1249 if (cfun->machine->is_interrupt)
1250 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1251 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1252 {
1253 if (TARGET_A16)
1254 pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1255 else
1256 pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1257 F (pushm);
1258 }
1259 }
1260 if (ppt == PP_popm && byte_count)
1261 {
85c84d5c 1262 if (cfun->machine->is_interrupt)
1263 for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1264 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1265 {
1266 if (TARGET_A16)
84bb0cc5 1267 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
85c84d5c 1268 else
84bb0cc5 1269 emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
85c84d5c 1270 }
1271 if (reg_mask)
1272 emit_insn (gen_popm (GEN_INT (reg_mask)));
1273 }
1274
1275 return byte_count;
1276}
1277
1278/* Implements INITIAL_ELIMINATION_OFFSET. See the comment above that
1279 diagrams our call frame. */
1280int
1281m32c_initial_elimination_offset (int from, int to)
1282{
1283 int ofs = 0;
1284
1285 if (from == AP_REGNO)
1286 {
1287 if (TARGET_A16)
1288 ofs += 5;
1289 else
1290 ofs += 8;
1291 }
1292
1293 if (to == SP_REGNO)
1294 {
1295 ofs += m32c_pushm_popm (PP_justcount);
1296 ofs += get_frame_size ();
1297 }
1298
1299 /* Account for push rounding. */
1300 if (TARGET_A24)
1301 ofs = (ofs + 1) & ~1;
1302#if DEBUG0
1303 fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1304 to, ofs);
1305#endif
1306 return ofs;
1307}
1308
1309/* Passing Function Arguments on the Stack */
1310
85c84d5c 1311/* Implements PUSH_ROUNDING. The R8C and M16C have byte stacks, the
1312 M32C has word stacks. */
1675aa0a 1313unsigned int
85c84d5c 1314m32c_push_rounding (int n)
1315{
1316 if (TARGET_R8C || TARGET_M16C)
1317 return n;
1318 return (n + 1) & ~1;
1319}
1320
1321/* Passing Arguments in Registers */
1322
8e2cc24f 1323/* Implements TARGET_FUNCTION_ARG. Arguments are passed partly in
1324 registers, partly on stack. If our function returns a struct, a
1325 pointer to a buffer for it is at the top of the stack (last thing
1326 pushed). The first few real arguments may be in registers as
1327 follows:
85c84d5c 1328
1329 R8C/M16C: arg1 in r1 if it's QI or HI (else it's pushed on stack)
1330 arg2 in r2 if it's HI (else pushed on stack)
1331 rest on stack
1332 M32C: arg1 in r0 if it's QI or HI (else it's pushed on stack)
1333 rest on stack
1334
1335 Structs are not passed in registers, even if they fit. Only
1336 integer and pointer types are passed in registers.
1337
1338 Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1339 r2 if it fits. */
8e2cc24f 1340#undef TARGET_FUNCTION_ARG
1341#define TARGET_FUNCTION_ARG m32c_function_arg
1342static rtx
39cba157 1343m32c_function_arg (cumulative_args_t ca_v,
3754d046 1344 machine_mode mode, const_tree type, bool named)
85c84d5c 1345{
39cba157 1346 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1347
85c84d5c 1348 /* Can return a reg, parallel, or 0 for stack */
1349 rtx rv = NULL_RTX;
1350#if DEBUG0
1351 fprintf (stderr, "func_arg %d (%s, %d)\n",
1352 ca->parm_num, mode_name[mode], named);
2cd6aee1 1353 debug_tree ((tree)type);
85c84d5c 1354#endif
1355
1356 if (mode == VOIDmode)
1357 return GEN_INT (0);
1358
1359 if (ca->force_mem || !named)
1360 {
1361#if DEBUG0
1362 fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
1363 named);
1364#endif
1365 return NULL_RTX;
1366 }
1367
1368 if (type && INTEGRAL_TYPE_P (type) && POINTER_TYPE_P (type))
1369 return NULL_RTX;
1370
87eb9cbf 1371 if (type && AGGREGATE_TYPE_P (type))
1372 return NULL_RTX;
1373
85c84d5c 1374 switch (ca->parm_num)
1375 {
1376 case 1:
1377 if (GET_MODE_SIZE (mode) == 1 || GET_MODE_SIZE (mode) == 2)
1378 rv = gen_rtx_REG (mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
1379 break;
1380
1381 case 2:
1382 if (TARGET_A16 && GET_MODE_SIZE (mode) == 2)
1383 rv = gen_rtx_REG (mode, R2_REGNO);
1384 break;
1385 }
1386
1387#if DEBUG0
1388 debug_rtx (rv);
1389#endif
1390 return rv;
1391}
1392
1393#undef TARGET_PASS_BY_REFERENCE
1394#define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1395static bool
39cba157 1396m32c_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
3754d046 1397 machine_mode mode ATTRIBUTE_UNUSED,
fb80456a 1398 const_tree type ATTRIBUTE_UNUSED,
85c84d5c 1399 bool named ATTRIBUTE_UNUSED)
1400{
1401 return 0;
1402}
1403
1404/* Implements INIT_CUMULATIVE_ARGS. */
1405void
1406m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
87eb9cbf 1407 tree fntype,
85c84d5c 1408 rtx libname ATTRIBUTE_UNUSED,
87eb9cbf 1409 tree fndecl,
85c84d5c 1410 int n_named_args ATTRIBUTE_UNUSED)
1411{
87eb9cbf 1412 if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1413 ca->force_mem = 1;
1414 else
1415 ca->force_mem = 0;
85c84d5c 1416 ca->parm_num = 1;
1417}
1418
8e2cc24f 1419/* Implements TARGET_FUNCTION_ARG_ADVANCE. force_mem is set for
1420 functions returning structures, so we always reset that. Otherwise,
1421 we only need to know the sequence number of the argument to know what
1422 to do with it. */
1423#undef TARGET_FUNCTION_ARG_ADVANCE
1424#define TARGET_FUNCTION_ARG_ADVANCE m32c_function_arg_advance
1425static void
39cba157 1426m32c_function_arg_advance (cumulative_args_t ca_v,
3754d046 1427 machine_mode mode ATTRIBUTE_UNUSED,
8e2cc24f 1428 const_tree type ATTRIBUTE_UNUSED,
1429 bool named ATTRIBUTE_UNUSED)
85c84d5c 1430{
39cba157 1431 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1432
85c84d5c 1433 if (ca->force_mem)
1434 ca->force_mem = 0;
87eb9cbf 1435 else
1436 ca->parm_num++;
85c84d5c 1437}
1438
bd99ba64 1439/* Implements TARGET_FUNCTION_ARG_BOUNDARY. */
1440#undef TARGET_FUNCTION_ARG_BOUNDARY
1441#define TARGET_FUNCTION_ARG_BOUNDARY m32c_function_arg_boundary
1442static unsigned int
3754d046 1443m32c_function_arg_boundary (machine_mode mode ATTRIBUTE_UNUSED,
bd99ba64 1444 const_tree type ATTRIBUTE_UNUSED)
1445{
1446 return (TARGET_A16 ? 8 : 16);
1447}
1448
85c84d5c 1449/* Implements FUNCTION_ARG_REGNO_P. */
1450int
1451m32c_function_arg_regno_p (int r)
1452{
1453 if (TARGET_A24)
1454 return (r == R0_REGNO);
1455 return (r == R1_REGNO || r == R2_REGNO);
1456}
1457
0a8d9665 1458/* HImode and PSImode are the two "native" modes as far as GCC is
c910419d 1459 concerned, but the chips also support a 32-bit mode which is used
0a8d9665 1460 for some opcodes in R8C/M16C and for reset vectors and such. */
1461#undef TARGET_VALID_POINTER_MODE
1462#define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
3fd11504 1463static bool
3754d046 1464m32c_valid_pointer_mode (machine_mode mode)
0a8d9665 1465{
0a8d9665 1466 if (mode == HImode
1467 || mode == PSImode
1468 || mode == SImode
1469 )
1470 return 1;
1471 return 0;
1472}
1473
85c84d5c 1474/* How Scalar Function Values Are Returned */
1475
f57d8b49 1476/* Implements TARGET_LIBCALL_VALUE. Most values are returned in $r0, or some
85c84d5c 1477 combination of registers starting there (r2r0 for longs, r3r1r2r0
1478 for long long, r3r2r1r0 for doubles), except that that ABI
1479 currently doesn't work because it ends up using all available
1480 general registers and gcc often can't compile it. So, instead, we
1481 return anything bigger than 16 bits in "mem0" (effectively, a
1482 memory location). */
f57d8b49 1483
1484#undef TARGET_LIBCALL_VALUE
1485#define TARGET_LIBCALL_VALUE m32c_libcall_value
1486
1487static rtx
3754d046 1488m32c_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
85c84d5c 1489{
1490 /* return reg or parallel */
1491#if 0
1492 /* FIXME: GCC has difficulty returning large values in registers,
1493 because that ties up most of the general registers and gives the
1494 register allocator little to work with. Until we can resolve
1495 this, large values are returned in memory. */
1496 if (mode == DFmode)
1497 {
1498 rtx rv;
1499
1500 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1501 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1502 gen_rtx_REG (HImode,
1503 R0_REGNO),
1504 GEN_INT (0));
1505 XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1506 gen_rtx_REG (HImode,
1507 R1_REGNO),
1508 GEN_INT (2));
1509 XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1510 gen_rtx_REG (HImode,
1511 R2_REGNO),
1512 GEN_INT (4));
1513 XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1514 gen_rtx_REG (HImode,
1515 R3_REGNO),
1516 GEN_INT (6));
1517 return rv;
1518 }
1519
1520 if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1521 {
1522 rtx rv;
1523
1524 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1525 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1526 gen_rtx_REG (mode,
1527 R0_REGNO),
1528 GEN_INT (0));
1529 return rv;
1530 }
1531#endif
1532
1533 if (GET_MODE_SIZE (mode) > 2)
1534 return gen_rtx_REG (mode, MEM0_REGNO);
1535 return gen_rtx_REG (mode, R0_REGNO);
1536}
1537
f57d8b49 1538/* Implements TARGET_FUNCTION_VALUE. Functions and libcalls have the same
85c84d5c 1539 conventions. */
f57d8b49 1540
1541#undef TARGET_FUNCTION_VALUE
1542#define TARGET_FUNCTION_VALUE m32c_function_value
1543
1544static rtx
1545m32c_function_value (const_tree valtype,
1546 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1547 bool outgoing ATTRIBUTE_UNUSED)
85c84d5c 1548{
1549 /* return reg or parallel */
3754d046 1550 const machine_mode mode = TYPE_MODE (valtype);
f57d8b49 1551 return m32c_libcall_value (mode, NULL_RTX);
1552}
1553
1722522a 1554/* Implements TARGET_FUNCTION_VALUE_REGNO_P. */
1555
1556#undef TARGET_FUNCTION_VALUE_REGNO_P
1557#define TARGET_FUNCTION_VALUE_REGNO_P m32c_function_value_regno_p
f57d8b49 1558
1722522a 1559static bool
f57d8b49 1560m32c_function_value_regno_p (const unsigned int regno)
1561{
1562 return (regno == R0_REGNO || regno == MEM0_REGNO);
85c84d5c 1563}
1564
1565/* How Large Values Are Returned */
1566
1567/* We return structures by pushing the address on the stack, even if
1568 we use registers for the first few "real" arguments. */
1569#undef TARGET_STRUCT_VALUE_RTX
1570#define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1571static rtx
1572m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1573 int incoming ATTRIBUTE_UNUSED)
1574{
1575 return 0;
1576}
1577
1578/* Function Entry and Exit */
1579
1580/* Implements EPILOGUE_USES. Interrupts restore all registers. */
1581int
1582m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1583{
1584 if (cfun->machine->is_interrupt)
1585 return 1;
1586 return 0;
1587}
1588
1589/* Implementing the Varargs Macros */
1590
1591#undef TARGET_STRICT_ARGUMENT_NAMING
1592#define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1593static bool
39cba157 1594m32c_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
85c84d5c 1595{
1596 return 1;
1597}
1598
1599/* Trampolines for Nested Functions */
1600
1601/*
1602 m16c:
1603 1 0000 75C43412 mov.w #0x1234,a0
1604 2 0004 FC000000 jmp.a label
1605
1606 m32c:
1607 1 0000 BC563412 mov.l:s #0x123456,a0
1608 2 0004 CC000000 jmp.a label
1609*/
1610
1611/* Implements TRAMPOLINE_SIZE. */
1612int
1613m32c_trampoline_size (void)
1614{
1615 /* Allocate extra space so we can avoid the messy shifts when we
1616 initialize the trampoline; we just write past the end of the
1617 opcode. */
1618 return TARGET_A16 ? 8 : 10;
1619}
1620
1621/* Implements TRAMPOLINE_ALIGNMENT. */
1622int
1623m32c_trampoline_alignment (void)
1624{
1625 return 2;
1626}
1627
557e8bdb 1628/* Implements TARGET_TRAMPOLINE_INIT. */
1629
1630#undef TARGET_TRAMPOLINE_INIT
1631#define TARGET_TRAMPOLINE_INIT m32c_trampoline_init
1632static void
1633m32c_trampoline_init (rtx m_tramp, tree fndecl, rtx chainval)
85c84d5c 1634{
557e8bdb 1635 rtx function = XEXP (DECL_RTL (fndecl), 0);
1636
1637#define A0(m,i) adjust_address (m_tramp, m, i)
85c84d5c 1638 if (TARGET_A16)
1639 {
1640 /* Note: we subtract a "word" because the moves want signed
1641 constants, not unsigned constants. */
1642 emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1643 emit_move_insn (A0 (HImode, 2), chainval);
1644 emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
c910419d 1645 /* We use 16-bit addresses here, but store the zero to turn it
1646 into a 24-bit offset. */
85c84d5c 1647 emit_move_insn (A0 (HImode, 5), function);
1648 emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1649 }
1650 else
1651 {
1652 /* Note that the PSI moves actually write 4 bytes. Make sure we
1653 write stuff out in the right order, and leave room for the
1654 extra byte at the end. */
1655 emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1656 emit_move_insn (A0 (PSImode, 1), chainval);
1657 emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1658 emit_move_insn (A0 (PSImode, 5), function);
1659 }
1660#undef A0
1661}
1662
1663/* Addressing Modes */
1664
fd50b071 1665/* The r8c/m32c family supports a wide range of non-orthogonal
1666 addressing modes, including the ability to double-indirect on *some*
1667 of them. Not all insns support all modes, either, but we rely on
1668 predicates and constraints to deal with that. */
1669#undef TARGET_LEGITIMATE_ADDRESS_P
1670#define TARGET_LEGITIMATE_ADDRESS_P m32c_legitimate_address_p
1671bool
3754d046 1672m32c_legitimate_address_p (machine_mode mode, rtx x, bool strict)
85c84d5c 1673{
1674 int mode_adjust;
1675 if (CONSTANT_P (x))
1676 return 1;
1677
d9530df8 1678 if (TARGET_A16 && GET_MODE (x) != HImode && GET_MODE (x) != SImode)
1679 return 0;
1680 if (TARGET_A24 && GET_MODE (x) != PSImode)
1681 return 0;
1682
85c84d5c 1683 /* Wide references to memory will be split after reload, so we must
1684 ensure that all parts of such splits remain legitimate
1685 addresses. */
1686 mode_adjust = GET_MODE_SIZE (mode) - 1;
1687
1688 /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1689 if (GET_CODE (x) == PRE_DEC
1690 || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1691 {
1692 return (GET_CODE (XEXP (x, 0)) == REG
1693 && REGNO (XEXP (x, 0)) == SP_REGNO);
1694 }
1695
1696#if 0
1697 /* This is the double indirection detection, but it currently
1698 doesn't work as cleanly as this code implies, so until we've had
1699 a chance to debug it, leave it disabled. */
1700 if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1701 {
1702#if DEBUG_DOUBLE
1703 fprintf (stderr, "double indirect\n");
1704#endif
1705 x = XEXP (x, 0);
1706 }
1707#endif
1708
1709 encode_pattern (x);
1710 if (RTX_IS ("r"))
1711 {
1712 /* Most indexable registers can be used without displacements,
1713 although some of them will be emitted with an explicit zero
1714 to please the assembler. */
1715 switch (REGNO (patternr[0]))
1716 {
85c84d5c 1717 case A1_REGNO:
1718 case SB_REGNO:
1719 case FB_REGNO:
1720 case SP_REGNO:
d9530df8 1721 if (TARGET_A16 && GET_MODE (x) == SImode)
1722 return 0;
1723 case A0_REGNO:
85c84d5c 1724 return 1;
1725
1726 default:
1727 if (IS_PSEUDO (patternr[0], strict))
1728 return 1;
1729 return 0;
1730 }
1731 }
d9530df8 1732
1733 if (TARGET_A16 && GET_MODE (x) == SImode)
1734 return 0;
1735
85c84d5c 1736 if (RTX_IS ("+ri"))
1737 {
1738 /* This is more interesting, because different base registers
1739 allow for different displacements - both range and signedness
1740 - and it differs from chip series to chip series too. */
1741 int rn = REGNO (patternr[1]);
1742 HOST_WIDE_INT offs = INTVAL (patternr[2]);
1743 switch (rn)
1744 {
1745 case A0_REGNO:
1746 case A1_REGNO:
1747 case SB_REGNO:
1748 /* The syntax only allows positive offsets, but when the
1749 offsets span the entire memory range, we can simulate
1750 negative offsets by wrapping. */
1751 if (TARGET_A16)
1752 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1753 if (rn == SB_REGNO)
1754 return (offs >= 0 && offs <= 65535 - mode_adjust);
1755 /* A0 or A1 */
1756 return (offs >= -16777216 && offs <= 16777215);
1757
1758 case FB_REGNO:
1759 if (TARGET_A16)
1760 return (offs >= -128 && offs <= 127 - mode_adjust);
1761 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1762
1763 case SP_REGNO:
1764 return (offs >= -128 && offs <= 127 - mode_adjust);
1765
1766 default:
1767 if (IS_PSEUDO (patternr[1], strict))
1768 return 1;
1769 return 0;
1770 }
1771 }
1772 if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1773 {
1774 rtx reg = patternr[1];
1775
1776 /* We don't know where the symbol is, so only allow base
1777 registers which support displacements spanning the whole
1778 address range. */
1779 switch (REGNO (reg))
1780 {
1781 case A0_REGNO:
1782 case A1_REGNO:
1783 /* $sb needs a secondary reload, but since it's involved in
1784 memory address reloads too, we don't deal with it very
1785 well. */
1786 /* case SB_REGNO: */
1787 return 1;
1788 default:
09a686a0 1789 if (GET_CODE (reg) == SUBREG)
1790 return 0;
85c84d5c 1791 if (IS_PSEUDO (reg, strict))
1792 return 1;
1793 return 0;
1794 }
1795 }
1796 return 0;
1797}
1798
1799/* Implements REG_OK_FOR_BASE_P. */
1800int
1801m32c_reg_ok_for_base_p (rtx x, int strict)
1802{
1803 if (GET_CODE (x) != REG)
1804 return 0;
1805 switch (REGNO (x))
1806 {
1807 case A0_REGNO:
1808 case A1_REGNO:
1809 case SB_REGNO:
1810 case FB_REGNO:
1811 case SP_REGNO:
1812 return 1;
1813 default:
1814 if (IS_PSEUDO (x, strict))
1815 return 1;
1816 return 0;
1817 }
1818}
1819
71d46ffa 1820/* We have three choices for choosing fb->aN offsets. If we choose -128,
c910419d 1821 we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
71d46ffa 1822 like this:
1823 EB 4B FF mova -128[$fb],$a0
1824 D8 0C FF FF mov.w:Q #0,-1[$a0]
1825
c910419d 1826 Alternately, we subtract the frame size, and hopefully use 8-bit aN
71d46ffa 1827 displacements:
1828 7B F4 stc $fb,$a0
1829 77 54 00 01 sub #256,$a0
1830 D8 08 01 mov.w:Q #0,1[$a0]
1831
1832 If we don't offset (i.e. offset by zero), we end up with:
1833 7B F4 stc $fb,$a0
1834 D8 0C 00 FF mov.w:Q #0,-256[$a0]
1835
1836 We have to subtract *something* so that we have a PLUS rtx to mark
1837 that we've done this reload. The -128 offset will never result in
c910419d 1838 an 8-bit aN offset, and the payoff for the second case is five
71d46ffa 1839 loads *if* those loads are within 256 bytes of the other end of the
1840 frame, so the third case seems best. Note that we subtract the
1841 zero, but detect that in the addhi3 pattern. */
1842
25fe2cca 1843#define BIG_FB_ADJ 0
1844
85c84d5c 1845/* Implements LEGITIMIZE_ADDRESS. The only address we really have to
1846 worry about is frame base offsets, as $fb has a limited
1847 displacement range. We deal with this by attempting to reload $fb
1848 itself into an address register; that seems to result in the best
1849 code. */
41e3a0c7 1850#undef TARGET_LEGITIMIZE_ADDRESS
1851#define TARGET_LEGITIMIZE_ADDRESS m32c_legitimize_address
1852static rtx
1853m32c_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3754d046 1854 machine_mode mode)
85c84d5c 1855{
1856#if DEBUG0
1857 fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
41e3a0c7 1858 debug_rtx (x);
85c84d5c 1859 fprintf (stderr, "\n");
1860#endif
1861
41e3a0c7 1862 if (GET_CODE (x) == PLUS
1863 && GET_CODE (XEXP (x, 0)) == REG
1864 && REGNO (XEXP (x, 0)) == FB_REGNO
1865 && GET_CODE (XEXP (x, 1)) == CONST_INT
1866 && (INTVAL (XEXP (x, 1)) < -128
1867 || INTVAL (XEXP (x, 1)) > (128 - GET_MODE_SIZE (mode))))
85c84d5c 1868 {
1869 /* reload FB to A_REGS */
85c84d5c 1870 rtx temp = gen_reg_rtx (Pmode);
41e3a0c7 1871 x = copy_rtx (x);
d1f9b275 1872 emit_insn (gen_rtx_SET (temp, XEXP (x, 0)));
41e3a0c7 1873 XEXP (x, 0) = temp;
85c84d5c 1874 }
1875
41e3a0c7 1876 return x;
85c84d5c 1877}
1878
1879/* Implements LEGITIMIZE_RELOAD_ADDRESS. See comment above. */
1880int
1881m32c_legitimize_reload_address (rtx * x,
3754d046 1882 machine_mode mode,
85c84d5c 1883 int opnum,
1884 int type, int ind_levels ATTRIBUTE_UNUSED)
1885{
1886#if DEBUG0
1887 fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
1888 mode_name[mode]);
1889 debug_rtx (*x);
1890#endif
1891
1892 /* At one point, this function tried to get $fb copied to an address
1893 register, which in theory would maximize sharing, but gcc was
1894 *also* still trying to reload the whole address, and we'd run out
1895 of address registers. So we let gcc do the naive (but safe)
1896 reload instead, when the above function doesn't handle it for
71d46ffa 1897 us.
1898
1899 The code below is a second attempt at the above. */
1900
1901 if (GET_CODE (*x) == PLUS
1902 && GET_CODE (XEXP (*x, 0)) == REG
1903 && REGNO (XEXP (*x, 0)) == FB_REGNO
1904 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1905 && (INTVAL (XEXP (*x, 1)) < -128
1906 || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
1907 {
1908 rtx sum;
1909 int offset = INTVAL (XEXP (*x, 1));
1910 int adjustment = -BIG_FB_ADJ;
1911
1912 sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
1913 GEN_INT (adjustment));
1914 *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
1915 if (type == RELOAD_OTHER)
1916 type = RELOAD_FOR_OTHER_ADDRESS;
1917 push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
1918 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
1675aa0a 1919 (enum reload_type) type);
71d46ffa 1920 return 1;
1921 }
1922
1923 if (GET_CODE (*x) == PLUS
1924 && GET_CODE (XEXP (*x, 0)) == PLUS
1925 && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
1926 && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
1927 && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
1928 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1929 )
1930 {
1931 if (type == RELOAD_OTHER)
1932 type = RELOAD_FOR_OTHER_ADDRESS;
1933 push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
1934 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
1675aa0a 1935 (enum reload_type) type);
2cd6aee1 1936 return 1;
1937 }
1938
1939 if (TARGET_A24 && GET_MODE (*x) == PSImode)
1940 {
1941 push_reload (*x, NULL_RTX, x, NULL,
1942 A_REGS, PSImode, VOIDmode, 0, 0, opnum,
1943 (enum reload_type) type);
71d46ffa 1944 return 1;
1945 }
85c84d5c 1946
1947 return 0;
1948}
1949
d9530df8 1950/* Return the appropriate mode for a named address pointer. */
1951#undef TARGET_ADDR_SPACE_POINTER_MODE
1952#define TARGET_ADDR_SPACE_POINTER_MODE m32c_addr_space_pointer_mode
3754d046 1953static machine_mode
d9530df8 1954m32c_addr_space_pointer_mode (addr_space_t addrspace)
1955{
1956 switch (addrspace)
1957 {
1958 case ADDR_SPACE_GENERIC:
1959 return TARGET_A24 ? PSImode : HImode;
1960 case ADDR_SPACE_FAR:
1961 return SImode;
1962 default:
1963 gcc_unreachable ();
1964 }
1965}
1966
1967/* Return the appropriate mode for a named address address. */
1968#undef TARGET_ADDR_SPACE_ADDRESS_MODE
1969#define TARGET_ADDR_SPACE_ADDRESS_MODE m32c_addr_space_address_mode
3754d046 1970static machine_mode
d9530df8 1971m32c_addr_space_address_mode (addr_space_t addrspace)
1972{
1973 switch (addrspace)
1974 {
1975 case ADDR_SPACE_GENERIC:
1976 return TARGET_A24 ? PSImode : HImode;
1977 case ADDR_SPACE_FAR:
1978 return SImode;
1979 default:
1980 gcc_unreachable ();
1981 }
1982}
1983
1984/* Like m32c_legitimate_address_p, except with named addresses. */
1985#undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
1986#define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P \
1987 m32c_addr_space_legitimate_address_p
1988static bool
3754d046 1989m32c_addr_space_legitimate_address_p (machine_mode mode, rtx x,
d9530df8 1990 bool strict, addr_space_t as)
1991{
1992 if (as == ADDR_SPACE_FAR)
1993 {
1994 if (TARGET_A24)
1995 return 0;
1996 encode_pattern (x);
1997 if (RTX_IS ("r"))
1998 {
1999 if (GET_MODE (x) != SImode)
2000 return 0;
2001 switch (REGNO (patternr[0]))
2002 {
2003 case A0_REGNO:
2004 return 1;
2005
2006 default:
2007 if (IS_PSEUDO (patternr[0], strict))
2008 return 1;
2009 return 0;
2010 }
2011 }
2012 if (RTX_IS ("+^Sri"))
2013 {
2014 int rn = REGNO (patternr[3]);
2015 HOST_WIDE_INT offs = INTVAL (patternr[4]);
2016 if (GET_MODE (patternr[3]) != HImode)
2017 return 0;
2018 switch (rn)
2019 {
2020 case A0_REGNO:
2021 return (offs >= 0 && offs <= 0xfffff);
2022
2023 default:
2024 if (IS_PSEUDO (patternr[3], strict))
2025 return 1;
2026 return 0;
2027 }
2028 }
2029 if (RTX_IS ("+^Srs"))
2030 {
2031 int rn = REGNO (patternr[3]);
2032 if (GET_MODE (patternr[3]) != HImode)
2033 return 0;
2034 switch (rn)
2035 {
2036 case A0_REGNO:
2037 return 1;
2038
2039 default:
2040 if (IS_PSEUDO (patternr[3], strict))
2041 return 1;
2042 return 0;
2043 }
2044 }
2045 if (RTX_IS ("+^S+ris"))
2046 {
2047 int rn = REGNO (patternr[4]);
2048 if (GET_MODE (patternr[4]) != HImode)
2049 return 0;
2050 switch (rn)
2051 {
2052 case A0_REGNO:
2053 return 1;
2054
2055 default:
2056 if (IS_PSEUDO (patternr[4], strict))
2057 return 1;
2058 return 0;
2059 }
2060 }
2061 if (RTX_IS ("s"))
2062 {
2063 return 1;
2064 }
2065 return 0;
2066 }
2067
2068 else if (as != ADDR_SPACE_GENERIC)
2069 gcc_unreachable ();
2070
2071 return m32c_legitimate_address_p (mode, x, strict);
2072}
2073
2074/* Like m32c_legitimate_address, except with named address support. */
2075#undef TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS
2076#define TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS m32c_addr_space_legitimize_address
2077static rtx
3754d046 2078m32c_addr_space_legitimize_address (rtx x, rtx oldx, machine_mode mode,
d9530df8 2079 addr_space_t as)
2080{
2081 if (as != ADDR_SPACE_GENERIC)
2082 {
2083#if DEBUG0
2084 fprintf (stderr, "\033[36mm32c_addr_space_legitimize_address for mode %s\033[0m\n", mode_name[mode]);
2085 debug_rtx (x);
2086 fprintf (stderr, "\n");
2087#endif
2088
2089 if (GET_CODE (x) != REG)
2090 {
2091 x = force_reg (SImode, x);
2092 }
2093 return x;
2094 }
2095
2096 return m32c_legitimize_address (x, oldx, mode);
2097}
2098
2099/* Determine if one named address space is a subset of another. */
2100#undef TARGET_ADDR_SPACE_SUBSET_P
2101#define TARGET_ADDR_SPACE_SUBSET_P m32c_addr_space_subset_p
2102static bool
2103m32c_addr_space_subset_p (addr_space_t subset, addr_space_t superset)
2104{
2105 gcc_assert (subset == ADDR_SPACE_GENERIC || subset == ADDR_SPACE_FAR);
2106 gcc_assert (superset == ADDR_SPACE_GENERIC || superset == ADDR_SPACE_FAR);
2107
2108 if (subset == superset)
2109 return true;
2110
2111 else
2112 return (subset == ADDR_SPACE_GENERIC && superset == ADDR_SPACE_FAR);
2113}
2114
2115#undef TARGET_ADDR_SPACE_CONVERT
2116#define TARGET_ADDR_SPACE_CONVERT m32c_addr_space_convert
2117/* Convert from one address space to another. */
2118static rtx
2119m32c_addr_space_convert (rtx op, tree from_type, tree to_type)
2120{
2121 addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (from_type));
2122 addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (to_type));
2123 rtx result;
2124
2125 gcc_assert (from_as == ADDR_SPACE_GENERIC || from_as == ADDR_SPACE_FAR);
2126 gcc_assert (to_as == ADDR_SPACE_GENERIC || to_as == ADDR_SPACE_FAR);
2127
2128 if (to_as == ADDR_SPACE_GENERIC && from_as == ADDR_SPACE_FAR)
2129 {
2130 /* This is unpredictable, as we're truncating off usable address
2131 bits. */
2132
2133 result = gen_reg_rtx (HImode);
2134 emit_move_insn (result, simplify_subreg (HImode, op, SImode, 0));
2135 return result;
2136 }
2137 else if (to_as == ADDR_SPACE_FAR && from_as == ADDR_SPACE_GENERIC)
2138 {
2139 /* This always works. */
2140 result = gen_reg_rtx (SImode);
2141 emit_insn (gen_zero_extendhisi2 (result, op));
2142 return result;
2143 }
2144 else
2145 gcc_unreachable ();
2146}
2147
85c84d5c 2148/* Condition Code Status */
2149
2150#undef TARGET_FIXED_CONDITION_CODE_REGS
2151#define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2152static bool
2153m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2154{
2155 *p1 = FLG_REGNO;
2156 *p2 = INVALID_REGNUM;
2157 return true;
2158}
2159
2160/* Describing Relative Costs of Operations */
2161
4cf1a89b 2162/* Implements TARGET_REGISTER_MOVE_COST. We make impossible moves
85c84d5c 2163 prohibitively expensive, like trying to put QIs in r2/r3 (there are
2164 no opcodes to do that). We also discourage use of mem* registers
2165 since they're really memory. */
4cf1a89b 2166
2167#undef TARGET_REGISTER_MOVE_COST
2168#define TARGET_REGISTER_MOVE_COST m32c_register_move_cost
2169
2170static int
3754d046 2171m32c_register_move_cost (machine_mode mode, reg_class_t from,
4cf1a89b 2172 reg_class_t to)
85c84d5c 2173{
2174 int cost = COSTS_N_INSNS (3);
4cf1a89b 2175 HARD_REG_SET cc;
2176
2177/* FIXME: pick real values, but not 2 for now. */
2178 COPY_HARD_REG_SET (cc, reg_class_contents[(int) from]);
2179 IOR_HARD_REG_SET (cc, reg_class_contents[(int) to]);
2180
2181 if (mode == QImode
2182 && hard_reg_set_intersect_p (cc, reg_class_contents[R23_REGS]))
85c84d5c 2183 {
4cf1a89b 2184 if (hard_reg_set_subset_p (cc, reg_class_contents[R23_REGS]))
85c84d5c 2185 cost = COSTS_N_INSNS (1000);
2186 else
2187 cost = COSTS_N_INSNS (80);
2188 }
2189
2190 if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2191 cost = COSTS_N_INSNS (1000);
2192
4cf1a89b 2193 if (reg_classes_intersect_p (from, CR_REGS))
85c84d5c 2194 cost += COSTS_N_INSNS (5);
2195
4cf1a89b 2196 if (reg_classes_intersect_p (to, CR_REGS))
85c84d5c 2197 cost += COSTS_N_INSNS (5);
2198
2199 if (from == MEM_REGS || to == MEM_REGS)
2200 cost += COSTS_N_INSNS (50);
4cf1a89b 2201 else if (reg_classes_intersect_p (from, MEM_REGS)
2202 || reg_classes_intersect_p (to, MEM_REGS))
85c84d5c 2203 cost += COSTS_N_INSNS (10);
2204
2205#if DEBUG0
2206 fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
4cf1a89b 2207 mode_name[mode], class_names[(int) from], class_names[(int) to],
2208 cost);
85c84d5c 2209#endif
2210 return cost;
2211}
2212
4cf1a89b 2213/* Implements TARGET_MEMORY_MOVE_COST. */
2214
2215#undef TARGET_MEMORY_MOVE_COST
2216#define TARGET_MEMORY_MOVE_COST m32c_memory_move_cost
2217
2218static int
3754d046 2219m32c_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
4cf1a89b 2220 reg_class_t rclass ATTRIBUTE_UNUSED,
2221 bool in ATTRIBUTE_UNUSED)
85c84d5c 2222{
2223 /* FIXME: pick real values. */
2224 return COSTS_N_INSNS (10);
2225}
2226
fedc146b 2227/* Here we try to describe when we use multiple opcodes for one RTX so
2228 that gcc knows when to use them. */
2229#undef TARGET_RTX_COSTS
2230#define TARGET_RTX_COSTS m32c_rtx_costs
2231static bool
20d892d1 2232m32c_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2233 int *total, bool speed ATTRIBUTE_UNUSED)
fedc146b 2234{
2235 switch (code)
2236 {
2237 case REG:
2238 if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2239 *total += COSTS_N_INSNS (500);
2240 else
2241 *total += COSTS_N_INSNS (1);
2242 return true;
2243
2244 case ASHIFT:
2245 case LSHIFTRT:
2246 case ASHIFTRT:
2247 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2248 {
2249 /* mov.b r1l, r1h */
2250 *total += COSTS_N_INSNS (1);
2251 return true;
2252 }
2253 if (INTVAL (XEXP (x, 1)) > 8
2254 || INTVAL (XEXP (x, 1)) < -8)
2255 {
2256 /* mov.b #N, r1l */
2257 /* mov.b r1l, r1h */
2258 *total += COSTS_N_INSNS (2);
2259 return true;
2260 }
2261 return true;
2262
2263 case LE:
2264 case LEU:
2265 case LT:
2266 case LTU:
2267 case GT:
2268 case GTU:
2269 case GE:
2270 case GEU:
2271 case NE:
2272 case EQ:
2273 if (outer_code == SET)
2274 {
2275 *total += COSTS_N_INSNS (2);
2276 return true;
2277 }
2278 break;
2279
2280 case ZERO_EXTRACT:
2281 {
2282 rtx dest = XEXP (x, 0);
2283 rtx addr = XEXP (dest, 0);
2284 switch (GET_CODE (addr))
2285 {
2286 case CONST_INT:
2287 *total += COSTS_N_INSNS (1);
2288 break;
2289 case SYMBOL_REF:
2290 *total += COSTS_N_INSNS (3);
2291 break;
2292 default:
2293 *total += COSTS_N_INSNS (2);
2294 break;
2295 }
2296 return true;
2297 }
2298 break;
2299
2300 default:
2301 /* Reasonable default. */
2302 if (TARGET_A16 && GET_MODE(x) == SImode)
2303 *total += COSTS_N_INSNS (2);
2304 break;
2305 }
2306 return false;
2307}
2308
2309#undef TARGET_ADDRESS_COST
2310#define TARGET_ADDRESS_COST m32c_address_cost
2311static int
3754d046 2312m32c_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
d9c5e5f4 2313 addr_space_t as ATTRIBUTE_UNUSED,
2314 bool speed ATTRIBUTE_UNUSED)
fedc146b 2315{
a8651e7d 2316 int i;
fedc146b 2317 /* fprintf(stderr, "\naddress_cost\n");
2318 debug_rtx(addr);*/
2319 switch (GET_CODE (addr))
2320 {
2321 case CONST_INT:
a8651e7d 2322 i = INTVAL (addr);
2323 if (i == 0)
2324 return COSTS_N_INSNS(1);
2325 if (0 < i && i <= 255)
2326 return COSTS_N_INSNS(2);
2327 if (0 < i && i <= 65535)
2328 return COSTS_N_INSNS(3);
2329 return COSTS_N_INSNS(4);
fedc146b 2330 case SYMBOL_REF:
a8651e7d 2331 return COSTS_N_INSNS(4);
fedc146b 2332 case REG:
a8651e7d 2333 return COSTS_N_INSNS(1);
2334 case PLUS:
2335 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
2336 {
2337 i = INTVAL (XEXP (addr, 1));
2338 if (i == 0)
2339 return COSTS_N_INSNS(1);
2340 if (0 < i && i <= 255)
2341 return COSTS_N_INSNS(2);
2342 if (0 < i && i <= 65535)
2343 return COSTS_N_INSNS(3);
2344 }
2345 return COSTS_N_INSNS(4);
fedc146b 2346 default:
2347 return 0;
2348 }
2349}
2350
85c84d5c 2351/* Defining the Output Assembler Language */
2352
85c84d5c 2353/* Output of Data */
2354
2355/* We may have 24 bit sizes, which is the native address size.
2356 Currently unused, but provided for completeness. */
2357#undef TARGET_ASM_INTEGER
2358#define TARGET_ASM_INTEGER m32c_asm_integer
2359static bool
2360m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2361{
2362 switch (size)
2363 {
2364 case 3:
2365 fprintf (asm_out_file, "\t.3byte\t");
2366 output_addr_const (asm_out_file, x);
2367 fputc ('\n', asm_out_file);
2368 return true;
0a8d9665 2369 case 4:
2370 if (GET_CODE (x) == SYMBOL_REF)
2371 {
2372 fprintf (asm_out_file, "\t.long\t");
2373 output_addr_const (asm_out_file, x);
2374 fputc ('\n', asm_out_file);
2375 return true;
2376 }
2377 break;
85c84d5c 2378 }
2379 return default_assemble_integer (x, size, aligned_p);
2380}
2381
2382/* Output of Assembler Instructions */
2383
23943319 2384/* We use a lookup table because the addressing modes are non-orthogonal. */
85c84d5c 2385
2386static struct
2387{
2388 char code;
2389 char const *pattern;
2390 char const *format;
2391}
2392const conversions[] = {
2393 { 0, "r", "0" },
2394
2395 { 0, "mr", "z[1]" },
2396 { 0, "m+ri", "3[2]" },
2397 { 0, "m+rs", "3[2]" },
d9530df8 2398 { 0, "m+^Zrs", "5[4]" },
2399 { 0, "m+^Zri", "5[4]" },
2400 { 0, "m+^Z+ris", "7+6[5]" },
2401 { 0, "m+^Srs", "5[4]" },
2402 { 0, "m+^Sri", "5[4]" },
2403 { 0, "m+^S+ris", "7+6[5]" },
85c84d5c 2404 { 0, "m+r+si", "4+5[2]" },
2405 { 0, "ms", "1" },
2406 { 0, "mi", "1" },
2407 { 0, "m+si", "2+3" },
2408
2409 { 0, "mmr", "[z[2]]" },
2410 { 0, "mm+ri", "[4[3]]" },
2411 { 0, "mm+rs", "[4[3]]" },
2412 { 0, "mm+r+si", "[5+6[3]]" },
2413 { 0, "mms", "[[2]]" },
2414 { 0, "mmi", "[[2]]" },
2415 { 0, "mm+si", "[4[3]]" },
2416
2417 { 0, "i", "#0" },
2418 { 0, "s", "#0" },
2419 { 0, "+si", "#1+2" },
2420 { 0, "l", "#0" },
2421
2422 { 'l', "l", "0" },
2423 { 'd', "i", "0" },
2424 { 'd', "s", "0" },
2425 { 'd', "+si", "1+2" },
2426 { 'D', "i", "0" },
2427 { 'D', "s", "0" },
2428 { 'D', "+si", "1+2" },
2429 { 'x', "i", "#0" },
2430 { 'X', "i", "#0" },
2431 { 'm', "i", "#0" },
2432 { 'b', "i", "#0" },
fedc146b 2433 { 'B', "i", "0" },
85c84d5c 2434 { 'p', "i", "0" },
2435
2436 { 0, 0, 0 }
2437};
2438
2439/* This is in order according to the bitfield that pushm/popm use. */
2440static char const *pushm_regs[] = {
2441 "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2442};
2443
b9e1ef49 2444/* Implements TARGET_PRINT_OPERAND. */
2445
2446#undef TARGET_PRINT_OPERAND
2447#define TARGET_PRINT_OPERAND m32c_print_operand
2448
2449static void
85c84d5c 2450m32c_print_operand (FILE * file, rtx x, int code)
2451{
2452 int i, j, b;
2453 const char *comma;
2454 HOST_WIDE_INT ival;
2455 int unsigned_const = 0;
54536dfe 2456 int force_sign;
85c84d5c 2457
2458 /* Multiplies; constants are converted to sign-extended format but
2459 we need unsigned, so 'u' and 'U' tell us what size unsigned we
2460 need. */
2461 if (code == 'u')
2462 {
2463 unsigned_const = 2;
2464 code = 0;
2465 }
2466 if (code == 'U')
2467 {
2468 unsigned_const = 1;
2469 code = 0;
2470 }
2471 /* This one is only for debugging; you can put it in a pattern to
2472 force this error. */
2473 if (code == '!')
2474 {
2475 fprintf (stderr, "dj: unreviewed pattern:");
2476 if (current_output_insn)
2477 debug_rtx (current_output_insn);
2478 gcc_unreachable ();
2479 }
2480 /* PSImode operations are either .w or .l depending on the target. */
2481 if (code == '&')
2482 {
2483 if (TARGET_A16)
2484 fprintf (file, "w");
2485 else
2486 fprintf (file, "l");
2487 return;
2488 }
2489 /* Inverted conditionals. */
2490 if (code == 'C')
2491 {
2492 switch (GET_CODE (x))
2493 {
2494 case LE:
2495 fputs ("gt", file);
2496 break;
2497 case LEU:
2498 fputs ("gtu", file);
2499 break;
2500 case LT:
2501 fputs ("ge", file);
2502 break;
2503 case LTU:
2504 fputs ("geu", file);
2505 break;
2506 case GT:
2507 fputs ("le", file);
2508 break;
2509 case GTU:
2510 fputs ("leu", file);
2511 break;
2512 case GE:
2513 fputs ("lt", file);
2514 break;
2515 case GEU:
2516 fputs ("ltu", file);
2517 break;
2518 case NE:
2519 fputs ("eq", file);
2520 break;
2521 case EQ:
2522 fputs ("ne", file);
2523 break;
2524 default:
2525 gcc_unreachable ();
2526 }
2527 return;
2528 }
2529 /* Regular conditionals. */
2530 if (code == 'c')
2531 {
2532 switch (GET_CODE (x))
2533 {
2534 case LE:
2535 fputs ("le", file);
2536 break;
2537 case LEU:
2538 fputs ("leu", file);
2539 break;
2540 case LT:
2541 fputs ("lt", file);
2542 break;
2543 case LTU:
2544 fputs ("ltu", file);
2545 break;
2546 case GT:
2547 fputs ("gt", file);
2548 break;
2549 case GTU:
2550 fputs ("gtu", file);
2551 break;
2552 case GE:
2553 fputs ("ge", file);
2554 break;
2555 case GEU:
2556 fputs ("geu", file);
2557 break;
2558 case NE:
2559 fputs ("ne", file);
2560 break;
2561 case EQ:
2562 fputs ("eq", file);
2563 break;
2564 default:
2565 gcc_unreachable ();
2566 }
2567 return;
2568 }
2569 /* Used in negsi2 to do HImode ops on the two parts of an SImode
2570 operand. */
2571 if (code == 'h' && GET_MODE (x) == SImode)
2572 {
2573 x = m32c_subreg (HImode, x, SImode, 0);
2574 code = 0;
2575 }
2576 if (code == 'H' && GET_MODE (x) == SImode)
2577 {
2578 x = m32c_subreg (HImode, x, SImode, 2);
2579 code = 0;
2580 }
fedc146b 2581 if (code == 'h' && GET_MODE (x) == HImode)
2582 {
2583 x = m32c_subreg (QImode, x, HImode, 0);
2584 code = 0;
2585 }
2586 if (code == 'H' && GET_MODE (x) == HImode)
2587 {
2588 /* We can't actually represent this as an rtx. Do it here. */
2589 if (GET_CODE (x) == REG)
2590 {
2591 switch (REGNO (x))
2592 {
2593 case R0_REGNO:
2594 fputs ("r0h", file);
2595 return;
2596 case R1_REGNO:
2597 fputs ("r1h", file);
2598 return;
2599 default:
2600 gcc_unreachable();
2601 }
2602 }
2603 /* This should be a MEM. */
2604 x = m32c_subreg (QImode, x, HImode, 1);
2605 code = 0;
2606 }
2607 /* This is for BMcond, which always wants word register names. */
2608 if (code == 'h' && GET_MODE (x) == QImode)
2609 {
2610 if (GET_CODE (x) == REG)
2611 x = gen_rtx_REG (HImode, REGNO (x));
2612 code = 0;
2613 }
85c84d5c 2614 /* 'x' and 'X' need to be ignored for non-immediates. */
2615 if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2616 code = 0;
2617
2618 encode_pattern (x);
54536dfe 2619 force_sign = 0;
85c84d5c 2620 for (i = 0; conversions[i].pattern; i++)
2621 if (conversions[i].code == code
2622 && streq (conversions[i].pattern, pattern))
2623 {
2624 for (j = 0; conversions[i].format[j]; j++)
2625 /* backslash quotes the next character in the output pattern. */
2626 if (conversions[i].format[j] == '\\')
2627 {
2628 fputc (conversions[i].format[j + 1], file);
2629 j++;
2630 }
2631 /* Digits in the output pattern indicate that the
2632 corresponding RTX is to be output at that point. */
2633 else if (ISDIGIT (conversions[i].format[j]))
2634 {
2635 rtx r = patternr[conversions[i].format[j] - '0'];
2636 switch (GET_CODE (r))
2637 {
2638 case REG:
2639 fprintf (file, "%s",
2640 reg_name_with_mode (REGNO (r), GET_MODE (r)));
2641 break;
2642 case CONST_INT:
2643 switch (code)
2644 {
2645 case 'b':
fedc146b 2646 case 'B':
2647 {
2648 int v = INTVAL (r);
2649 int i = (int) exact_log2 (v);
2650 if (i == -1)
2651 i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2652 if (i == -1)
2653 i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2654 /* Bit position. */
2655 fprintf (file, "%d", i);
2656 }
85c84d5c 2657 break;
2658 case 'x':
2659 /* Unsigned byte. */
2660 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2661 INTVAL (r) & 0xff);
2662 break;
2663 case 'X':
2664 /* Unsigned word. */
2665 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2666 INTVAL (r) & 0xffff);
2667 break;
2668 case 'p':
2669 /* pushm and popm encode a register set into a single byte. */
2670 comma = "";
2671 for (b = 7; b >= 0; b--)
2672 if (INTVAL (r) & (1 << b))
2673 {
2674 fprintf (file, "%s%s", comma, pushm_regs[b]);
2675 comma = ",";
2676 }
2677 break;
2678 case 'm':
2679 /* "Minus". Output -X */
2680 ival = (-INTVAL (r) & 0xffff);
2681 if (ival & 0x8000)
2682 ival = ival - 0x10000;
2683 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2684 break;
2685 default:
2686 ival = INTVAL (r);
2687 if (conversions[i].format[j + 1] == '[' && ival < 0)
2688 {
2689 /* We can simulate negative displacements by
2690 taking advantage of address space
2691 wrapping when the offset can span the
2692 entire address range. */
2693 rtx base =
2694 patternr[conversions[i].format[j + 2] - '0'];
2695 if (GET_CODE (base) == REG)
2696 switch (REGNO (base))
2697 {
2698 case A0_REGNO:
2699 case A1_REGNO:
2700 if (TARGET_A24)
2701 ival = 0x1000000 + ival;
2702 else
2703 ival = 0x10000 + ival;
2704 break;
2705 case SB_REGNO:
2706 if (TARGET_A16)
2707 ival = 0x10000 + ival;
2708 break;
2709 }
2710 }
2711 else if (code == 'd' && ival < 0 && j == 0)
2712 /* The "mova" opcode is used to do addition by
2713 computing displacements, but again, we need
2714 displacements to be unsigned *if* they're
2715 the only component of the displacement
2716 (i.e. no "symbol-4" type displacement). */
2717 ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2718
2719 if (conversions[i].format[j] == '0')
2720 {
2721 /* More conversions to unsigned. */
2722 if (unsigned_const == 2)
2723 ival &= 0xffff;
2724 if (unsigned_const == 1)
2725 ival &= 0xff;
2726 }
2727 if (streq (conversions[i].pattern, "mi")
2728 || streq (conversions[i].pattern, "mmi"))
2729 {
2730 /* Integers used as addresses are unsigned. */
2731 ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2732 }
54536dfe 2733 if (force_sign && ival >= 0)
2734 fputc ('+', file);
85c84d5c 2735 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2736 break;
2737 }
2738 break;
2739 case CONST_DOUBLE:
2740 /* We don't have const_double constants. If it
2741 happens, make it obvious. */
2742 fprintf (file, "[const_double 0x%lx]",
2743 (unsigned long) CONST_DOUBLE_HIGH (r));
2744 break;
2745 case SYMBOL_REF:
2746 assemble_name (file, XSTR (r, 0));
2747 break;
2748 case LABEL_REF:
2749 output_asm_label (r);
2750 break;
2751 default:
2752 fprintf (stderr, "don't know how to print this operand:");
2753 debug_rtx (r);
2754 gcc_unreachable ();
2755 }
2756 }
2757 else
2758 {
2759 if (conversions[i].format[j] == 'z')
2760 {
2761 /* Some addressing modes *must* have a displacement,
2762 so insert a zero here if needed. */
2763 int k;
2764 for (k = j + 1; conversions[i].format[k]; k++)
2765 if (ISDIGIT (conversions[i].format[k]))
2766 {
2767 rtx reg = patternr[conversions[i].format[k] - '0'];
2768 if (GET_CODE (reg) == REG
2769 && (REGNO (reg) == SB_REGNO
2770 || REGNO (reg) == FB_REGNO
2771 || REGNO (reg) == SP_REGNO))
2772 fputc ('0', file);
2773 }
2774 continue;
2775 }
2776 /* Signed displacements off symbols need to have signs
2777 blended cleanly. */
2778 if (conversions[i].format[j] == '+'
54536dfe 2779 && (!code || code == 'D' || code == 'd')
85c84d5c 2780 && ISDIGIT (conversions[i].format[j + 1])
54536dfe 2781 && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2782 == CONST_INT))
2783 {
2784 force_sign = 1;
2785 continue;
2786 }
85c84d5c 2787 fputc (conversions[i].format[j], file);
2788 }
2789 break;
2790 }
2791 if (!conversions[i].pattern)
2792 {
2793 fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
2794 pattern);
2795 debug_rtx (x);
2796 fprintf (file, "[%c.%s]", code ? code : '-', pattern);
2797 }
2798
2799 return;
2800}
2801
b9e1ef49 2802/* Implements TARGET_PRINT_OPERAND_PUNCT_VALID_P.
2803
2804 See m32c_print_operand above for descriptions of what these do. */
2805
2806#undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
2807#define TARGET_PRINT_OPERAND_PUNCT_VALID_P m32c_print_operand_punct_valid_p
2808
2809static bool
2810m32c_print_operand_punct_valid_p (unsigned char c)
85c84d5c 2811{
2812 if (c == '&' || c == '!')
b9e1ef49 2813 return true;
2814
2815 return false;
85c84d5c 2816}
2817
b9e1ef49 2818/* Implements TARGET_PRINT_OPERAND_ADDRESS. Nothing unusual here. */
2819
2820#undef TARGET_PRINT_OPERAND_ADDRESS
2821#define TARGET_PRINT_OPERAND_ADDRESS m32c_print_operand_address
2822
2823static void
85c84d5c 2824m32c_print_operand_address (FILE * stream, rtx address)
2825{
c46bf770 2826 if (GET_CODE (address) == MEM)
2827 address = XEXP (address, 0);
2828 else
2829 /* cf: gcc.dg/asm-4.c. */
2830 gcc_assert (GET_CODE (address) == REG);
2831
2832 m32c_print_operand (stream, address, 0);
85c84d5c 2833}
2834
2835/* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
2836 differently than general registers. */
2837void
2838m32c_output_reg_push (FILE * s, int regno)
2839{
2840 if (regno == FLG_REGNO)
2841 fprintf (s, "\tpushc\tflg\n");
2842 else
71d46ffa 2843 fprintf (s, "\tpush.%c\t%s\n",
85c84d5c 2844 " bwll"[reg_push_size (regno)], reg_names[regno]);
2845}
2846
2847/* Likewise for ASM_OUTPUT_REG_POP. */
2848void
2849m32c_output_reg_pop (FILE * s, int regno)
2850{
2851 if (regno == FLG_REGNO)
2852 fprintf (s, "\tpopc\tflg\n");
2853 else
71d46ffa 2854 fprintf (s, "\tpop.%c\t%s\n",
85c84d5c 2855 " bwll"[reg_push_size (regno)], reg_names[regno]);
2856}
2857
2858/* Defining target-specific uses of `__attribute__' */
2859
2860/* Used to simplify the logic below. Find the attributes wherever
2861 they may be. */
2862#define M32C_ATTRIBUTES(decl) \
2863 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2864 : DECL_ATTRIBUTES (decl) \
2865 ? (DECL_ATTRIBUTES (decl)) \
2866 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2867
2868/* Returns TRUE if the given tree has the "interrupt" attribute. */
2869static int
2870interrupt_p (tree node ATTRIBUTE_UNUSED)
2871{
2872 tree list = M32C_ATTRIBUTES (node);
2873 while (list)
2874 {
2875 if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
2876 return 1;
2877 list = TREE_CHAIN (list);
2878 }
cc24427c 2879 return fast_interrupt_p (node);
2880}
2881
2882/* Returns TRUE if the given tree has the "bank_switch" attribute. */
2883static int
2884bank_switch_p (tree node ATTRIBUTE_UNUSED)
2885{
2886 tree list = M32C_ATTRIBUTES (node);
2887 while (list)
2888 {
2889 if (is_attribute_p ("bank_switch", TREE_PURPOSE (list)))
2890 return 1;
2891 list = TREE_CHAIN (list);
2892 }
2893 return 0;
2894}
2895
2896/* Returns TRUE if the given tree has the "fast_interrupt" attribute. */
2897static int
2898fast_interrupt_p (tree node ATTRIBUTE_UNUSED)
2899{
2900 tree list = M32C_ATTRIBUTES (node);
2901 while (list)
2902 {
2903 if (is_attribute_p ("fast_interrupt", TREE_PURPOSE (list)))
2904 return 1;
2905 list = TREE_CHAIN (list);
2906 }
85c84d5c 2907 return 0;
2908}
2909
2910static tree
2911interrupt_handler (tree * node ATTRIBUTE_UNUSED,
2912 tree name ATTRIBUTE_UNUSED,
2913 tree args ATTRIBUTE_UNUSED,
2914 int flags ATTRIBUTE_UNUSED,
2915 bool * no_add_attrs ATTRIBUTE_UNUSED)
2916{
2917 return NULL_TREE;
2918}
2919
2efce110 2920/* Returns TRUE if given tree has the "function_vector" attribute. */
2921int
2922m32c_special_page_vector_p (tree func)
2923{
6276c4d1 2924 tree list;
2925
2efce110 2926 if (TREE_CODE (func) != FUNCTION_DECL)
2927 return 0;
2928
6276c4d1 2929 list = M32C_ATTRIBUTES (func);
2efce110 2930 while (list)
2931 {
2932 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2933 return 1;
2934 list = TREE_CHAIN (list);
2935 }
2936 return 0;
2937}
2938
2939static tree
2940function_vector_handler (tree * node ATTRIBUTE_UNUSED,
2941 tree name ATTRIBUTE_UNUSED,
2942 tree args ATTRIBUTE_UNUSED,
2943 int flags ATTRIBUTE_UNUSED,
2944 bool * no_add_attrs ATTRIBUTE_UNUSED)
2945{
2946 if (TARGET_R8C)
2947 {
2948 /* The attribute is not supported for R8C target. */
2949 warning (OPT_Wattributes,
67a779df 2950 "%qE attribute is not supported for R8C target",
2951 name);
2efce110 2952 *no_add_attrs = true;
2953 }
2954 else if (TREE_CODE (*node) != FUNCTION_DECL)
2955 {
2956 /* The attribute must be applied to functions only. */
2957 warning (OPT_Wattributes,
67a779df 2958 "%qE attribute applies only to functions",
2959 name);
2efce110 2960 *no_add_attrs = true;
2961 }
2962 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
2963 {
2964 /* The argument must be a constant integer. */
2965 warning (OPT_Wattributes,
67a779df 2966 "%qE attribute argument not an integer constant",
2967 name);
2efce110 2968 *no_add_attrs = true;
2969 }
2970 else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
2971 || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
2972 {
2973 /* The argument value must be between 18 to 255. */
2974 warning (OPT_Wattributes,
67a779df 2975 "%qE attribute argument should be between 18 to 255",
2976 name);
2efce110 2977 *no_add_attrs = true;
2978 }
2979 return NULL_TREE;
2980}
2981
2982/* If the function is assigned the attribute 'function_vector', it
2983 returns the function vector number, otherwise returns zero. */
2984int
2985current_function_special_page_vector (rtx x)
2986{
2987 int num;
2988
2989 if ((GET_CODE(x) == SYMBOL_REF)
2990 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
2991 {
6276c4d1 2992 tree list;
2efce110 2993 tree t = SYMBOL_REF_DECL (x);
2994
2995 if (TREE_CODE (t) != FUNCTION_DECL)
2996 return 0;
2997
6276c4d1 2998 list = M32C_ATTRIBUTES (t);
2efce110 2999 while (list)
3000 {
3001 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
3002 {
3003 num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
3004 return num;
3005 }
3006
3007 list = TREE_CHAIN (list);
3008 }
3009
3010 return 0;
3011 }
3012 else
3013 return 0;
3014}
3015
85c84d5c 3016#undef TARGET_ATTRIBUTE_TABLE
3017#define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
3018static const struct attribute_spec m32c_attribute_table[] = {
ac86af5d 3019 {"interrupt", 0, 0, false, false, false, interrupt_handler, false},
3020 {"bank_switch", 0, 0, false, false, false, interrupt_handler, false},
3021 {"fast_interrupt", 0, 0, false, false, false, interrupt_handler, false},
3022 {"function_vector", 1, 1, true, false, false, function_vector_handler,
3023 false},
3024 {0, 0, 0, 0, 0, 0, 0, false}
85c84d5c 3025};
3026
3027#undef TARGET_COMP_TYPE_ATTRIBUTES
3028#define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
3029static int
a9f1838b 3030m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED,
3031 const_tree type2 ATTRIBUTE_UNUSED)
85c84d5c 3032{
3033 /* 0=incompatible 1=compatible 2=warning */
3034 return 1;
3035}
3036
3037#undef TARGET_INSERT_ATTRIBUTES
3038#define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
3039static void
3040m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
3041 tree * attr_ptr ATTRIBUTE_UNUSED)
3042{
e3d4e41e 3043 unsigned addr;
3044 /* See if we need to make #pragma address variables volatile. */
3045
3046 if (TREE_CODE (node) == VAR_DECL)
3047 {
1675aa0a 3048 const char *name = IDENTIFIER_POINTER (DECL_NAME (node));
e3d4e41e 3049 if (m32c_get_pragma_address (name, &addr))
3050 {
3051 TREE_THIS_VOLATILE (node) = true;
3052 }
3053 }
3054}
3055
3056
2ef51f0e 3057struct pragma_traits : default_hashmap_traits
3058{
3059 static hashval_t hash (const char *str) { return htab_hash_string (str); }
3060 static bool
3061 equal_keys (const char *a, const char *b)
3062 {
3063 return !strcmp (a, b);
3064 }
e3d4e41e 3065};
e3d4e41e 3066
3067/* Hash table of pragma info. */
2ef51f0e 3068static GTY(()) hash_map<const char *, unsigned, pragma_traits> *pragma_htab;
e3d4e41e 3069
3070void
3071m32c_note_pragma_address (const char *varname, unsigned address)
3072{
e3d4e41e 3073 if (!pragma_htab)
2ef51f0e 3074 pragma_htab
3075 = hash_map<const char *, unsigned, pragma_traits>::create_ggc (31);
e3d4e41e 3076
2ef51f0e 3077 const char *name = ggc_strdup (varname);
3078 unsigned int *slot = &pragma_htab->get_or_insert (name);
3079 *slot = address;
e3d4e41e 3080}
3081
3082static bool
3083m32c_get_pragma_address (const char *varname, unsigned *address)
3084{
e3d4e41e 3085 if (!pragma_htab)
3086 return false;
3087
2ef51f0e 3088 unsigned int *slot = pragma_htab->get (varname);
3089 if (slot)
e3d4e41e 3090 {
2ef51f0e 3091 *address = *slot;
e3d4e41e 3092 return true;
3093 }
3094 return false;
3095}
3096
3097void
1675aa0a 3098m32c_output_aligned_common (FILE *stream, tree decl ATTRIBUTE_UNUSED,
3099 const char *name,
e3d4e41e 3100 int size, int align, int global)
3101{
3102 unsigned address;
3103
3104 if (m32c_get_pragma_address (name, &address))
3105 {
3106 /* We never output these as global. */
3107 assemble_name (stream, name);
3108 fprintf (stream, " = 0x%04x\n", address);
3109 return;
3110 }
3111 if (!global)
3112 {
3113 fprintf (stream, "\t.local\t");
3114 assemble_name (stream, name);
3115 fprintf (stream, "\n");
3116 }
3117 fprintf (stream, "\t.comm\t");
3118 assemble_name (stream, name);
3119 fprintf (stream, ",%u,%u\n", size, align / BITS_PER_UNIT);
85c84d5c 3120}
3121
3122/* Predicates */
3123
80be3ac5 3124/* This is a list of legal subregs of hard regs. */
89adc165 3125static const struct {
3126 unsigned char outer_mode_size;
3127 unsigned char inner_mode_size;
3128 unsigned char byte_mask;
3129 unsigned char legal_when;
80be3ac5 3130 unsigned int regno;
80be3ac5 3131} legal_subregs[] = {
89adc165 3132 {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
3133 {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
3134 {1, 2, 0x01, 1, A0_REGNO},
3135 {1, 2, 0x01, 1, A1_REGNO},
80be3ac5 3136
89adc165 3137 {1, 4, 0x01, 1, A0_REGNO},
3138 {1, 4, 0x01, 1, A1_REGNO},
80be3ac5 3139
89adc165 3140 {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
3141 {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
3142 {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
3143 {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
3144 {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
80be3ac5 3145
89adc165 3146 {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
80be3ac5 3147};
3148
3149/* Returns TRUE if OP is a subreg of a hard reg which we don't
e3d4e41e 3150 support. We also bail on MEMs with illegal addresses. */
80be3ac5 3151bool
3152m32c_illegal_subreg_p (rtx op)
3153{
80be3ac5 3154 int offset;
3155 unsigned int i;
3754d046 3156 machine_mode src_mode, dest_mode;
80be3ac5 3157
e3d4e41e 3158 if (GET_CODE (op) == MEM
3159 && ! m32c_legitimate_address_p (Pmode, XEXP (op, 0), false))
3160 {
3161 return true;
3162 }
3163
80be3ac5 3164 if (GET_CODE (op) != SUBREG)
3165 return false;
3166
3167 dest_mode = GET_MODE (op);
3168 offset = SUBREG_BYTE (op);
3169 op = SUBREG_REG (op);
3170 src_mode = GET_MODE (op);
3171
3172 if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
3173 return false;
3174 if (GET_CODE (op) != REG)
3175 return false;
3176 if (REGNO (op) >= MEM0_REGNO)
3177 return false;
3178
3179 offset = (1 << offset);
3180
89adc165 3181 for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
80be3ac5 3182 if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
3183 && legal_subregs[i].regno == REGNO (op)
3184 && legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
3185 && legal_subregs[i].byte_mask & offset)
3186 {
3187 switch (legal_subregs[i].legal_when)
3188 {
3189 case 1:
3190 return false;
3191 case 16:
3192 if (TARGET_A16)
3193 return false;
3194 break;
3195 case 24:
3196 if (TARGET_A24)
3197 return false;
3198 break;
3199 }
3200 }
3201 return true;
3202}
3203
85c84d5c 3204/* Returns TRUE if we support a move between the first two operands.
3205 At the moment, we just want to discourage mem to mem moves until
3206 after reload, because reload has a hard time with our limited
3207 number of address registers, and we can get into a situation where
3208 we need three of them when we only have two. */
3209bool
3754d046 3210m32c_mov_ok (rtx * operands, machine_mode mode ATTRIBUTE_UNUSED)
85c84d5c 3211{
3212 rtx op0 = operands[0];
3213 rtx op1 = operands[1];
3214
3215 if (TARGET_A24)
3216 return true;
3217
3218#define DEBUG_MOV_OK 0
3219#if DEBUG_MOV_OK
3220 fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
3221 debug_rtx (op0);
3222 debug_rtx (op1);
3223#endif
3224
3225 if (GET_CODE (op0) == SUBREG)
3226 op0 = XEXP (op0, 0);
3227 if (GET_CODE (op1) == SUBREG)
3228 op1 = XEXP (op1, 0);
3229
3230 if (GET_CODE (op0) == MEM
3231 && GET_CODE (op1) == MEM
3232 && ! reload_completed)
3233 {
3234#if DEBUG_MOV_OK
3235 fprintf (stderr, " - no, mem to mem\n");
3236#endif
3237 return false;
3238 }
3239
3240#if DEBUG_MOV_OK
3241 fprintf (stderr, " - ok\n");
3242#endif
3243 return true;
3244}
3245
54536dfe 3246/* Returns TRUE if two consecutive HImode mov instructions, generated
3247 for moving an immediate double data to a double data type variable
3248 location, can be combined into single SImode mov instruction. */
3249bool
402f6a9e 3250m32c_immd_dbl_mov (rtx * operands ATTRIBUTE_UNUSED,
3754d046 3251 machine_mode mode ATTRIBUTE_UNUSED)
54536dfe 3252{
402f6a9e 3253 /* ??? This relied on the now-defunct MEM_SCALAR and MEM_IN_STRUCT_P
3254 flags. */
54536dfe 3255 return false;
3256}
3257
85c84d5c 3258/* Expanders */
3259
3260/* Subregs are non-orthogonal for us, because our registers are all
3261 different sizes. */
3262static rtx
3754d046 3263m32c_subreg (machine_mode outer,
3264 rtx x, machine_mode inner, int byte)
85c84d5c 3265{
3266 int r, nr = -1;
3267
3268 /* Converting MEMs to different types that are the same size, we
3269 just rewrite them. */
3270 if (GET_CODE (x) == SUBREG
3271 && SUBREG_BYTE (x) == 0
3272 && GET_CODE (SUBREG_REG (x)) == MEM
3273 && (GET_MODE_SIZE (GET_MODE (x))
3274 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
3275 {
3276 rtx oldx = x;
3277 x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
3278 MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
3279 }
3280
3281 /* Push/pop get done as smaller push/pops. */
3282 if (GET_CODE (x) == MEM
3283 && (GET_CODE (XEXP (x, 0)) == PRE_DEC
3284 || GET_CODE (XEXP (x, 0)) == POST_INC))
3285 return gen_rtx_MEM (outer, XEXP (x, 0));
3286 if (GET_CODE (x) == SUBREG
3287 && GET_CODE (XEXP (x, 0)) == MEM
3288 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
3289 || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
3290 return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
3291
3292 if (GET_CODE (x) != REG)
dbbdf510 3293 {
3294 rtx r = simplify_gen_subreg (outer, x, inner, byte);
3295 if (GET_CODE (r) == SUBREG
3296 && GET_CODE (x) == MEM
3297 && MEM_VOLATILE_P (x))
3298 {
3299 /* Volatile MEMs don't get simplified, but we need them to
3300 be. We are little endian, so the subreg byte is the
3301 offset. */
ee5533ba 3302 r = adjust_address_nv (x, outer, byte);
dbbdf510 3303 }
3304 return r;
3305 }
85c84d5c 3306
3307 r = REGNO (x);
3308 if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
3309 return simplify_gen_subreg (outer, x, inner, byte);
3310
3311 if (IS_MEM_REGNO (r))
3312 return simplify_gen_subreg (outer, x, inner, byte);
3313
3314 /* This is where the complexities of our register layout are
3315 described. */
3316 if (byte == 0)
3317 nr = r;
3318 else if (outer == HImode)
3319 {
3320 if (r == R0_REGNO && byte == 2)
3321 nr = R2_REGNO;
3322 else if (r == R0_REGNO && byte == 4)
3323 nr = R1_REGNO;
3324 else if (r == R0_REGNO && byte == 6)
3325 nr = R3_REGNO;
3326 else if (r == R1_REGNO && byte == 2)
3327 nr = R3_REGNO;
3328 else if (r == A0_REGNO && byte == 2)
3329 nr = A1_REGNO;
3330 }
3331 else if (outer == SImode)
3332 {
3333 if (r == R0_REGNO && byte == 0)
3334 nr = R0_REGNO;
3335 else if (r == R0_REGNO && byte == 4)
3336 nr = R1_REGNO;
3337 }
3338 if (nr == -1)
3339 {
3340 fprintf (stderr, "m32c_subreg %s %s %d\n",
3341 mode_name[outer], mode_name[inner], byte);
3342 debug_rtx (x);
3343 gcc_unreachable ();
3344 }
3345 return gen_rtx_REG (outer, nr);
3346}
3347
3348/* Used to emit move instructions. We split some moves,
3349 and avoid mem-mem moves. */
3350int
3754d046 3351m32c_prepare_move (rtx * operands, machine_mode mode)
85c84d5c 3352{
d9530df8 3353 if (far_addr_space_p (operands[0])
3354 && CONSTANT_P (operands[1]))
3355 {
3356 operands[1] = force_reg (GET_MODE (operands[0]), operands[1]);
3357 }
85c84d5c 3358 if (TARGET_A16 && mode == PSImode)
3359 return m32c_split_move (operands, mode, 1);
3360 if ((GET_CODE (operands[0]) == MEM)
3361 && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
3362 {
3363 rtx pmv = XEXP (operands[0], 0);
3364 rtx dest_reg = XEXP (pmv, 0);
3365 rtx dest_mod = XEXP (pmv, 1);
3366
d1f9b275 3367 emit_insn (gen_rtx_SET (dest_reg, dest_mod));
85c84d5c 3368 operands[0] = gen_rtx_MEM (mode, dest_reg);
3369 }
e1ba4a27 3370 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
85c84d5c 3371 operands[1] = copy_to_mode_reg (mode, operands[1]);
3372 return 0;
3373}
3374
3375#define DEBUG_SPLIT 0
3376
3377/* Returns TRUE if the given PSImode move should be split. We split
3378 for all r8c/m16c moves, since it doesn't support them, and for
3379 POP.L as we can only *push* SImode. */
3380int
3381m32c_split_psi_p (rtx * operands)
3382{
3383#if DEBUG_SPLIT
3384 fprintf (stderr, "\nm32c_split_psi_p\n");
3385 debug_rtx (operands[0]);
3386 debug_rtx (operands[1]);
3387#endif
3388 if (TARGET_A16)
3389 {
3390#if DEBUG_SPLIT
3391 fprintf (stderr, "yes, A16\n");
3392#endif
3393 return 1;
3394 }
3395 if (GET_CODE (operands[1]) == MEM
3396 && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3397 {
3398#if DEBUG_SPLIT
3399 fprintf (stderr, "yes, pop.l\n");
3400#endif
3401 return 1;
3402 }
3403#if DEBUG_SPLIT
3404 fprintf (stderr, "no, default\n");
3405#endif
3406 return 0;
3407}
3408
3409/* Split the given move. SPLIT_ALL is 0 if splitting is optional
3410 (define_expand), 1 if it is not optional (define_insn_and_split),
3411 and 3 for define_split (alternate api). */
3412int
3754d046 3413m32c_split_move (rtx * operands, machine_mode mode, int split_all)
85c84d5c 3414{
3415 rtx s[4], d[4];
3416 int parts, si, di, rev = 0;
3417 int rv = 0, opi = 2;
3754d046 3418 machine_mode submode = HImode;
85c84d5c 3419 rtx *ops, local_ops[10];
3420
3421 /* define_split modifies the existing operands, but the other two
3422 emit new insns. OPS is where we store the operand pairs, which
3423 we emit later. */
3424 if (split_all == 3)
3425 ops = operands;
3426 else
3427 ops = local_ops;
3428
3429 /* Else HImode. */
3430 if (mode == DImode)
3431 submode = SImode;
3432
3433 /* Before splitting mem-mem moves, force one operand into a
3434 register. */
e1ba4a27 3435 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
85c84d5c 3436 {
3437#if DEBUG0
3438 fprintf (stderr, "force_reg...\n");
3439 debug_rtx (operands[1]);
3440#endif
3441 operands[1] = force_reg (mode, operands[1]);
3442#if DEBUG0
3443 debug_rtx (operands[1]);
3444#endif
3445 }
3446
3447 parts = 2;
3448
3449#if DEBUG_SPLIT
e1ba4a27 3450 fprintf (stderr, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3451 split_all);
85c84d5c 3452 debug_rtx (operands[0]);
3453 debug_rtx (operands[1]);
3454#endif
3455
53c07d79 3456 /* Note that split_all is not used to select the api after this
3457 point, so it's safe to set it to 3 even with define_insn. */
3458 /* None of the chips can move SI operands to sp-relative addresses,
3459 so we always split those. */
4ead5e30 3460 if (satisfies_constraint_Ss (operands[0]))
53c07d79 3461 split_all = 3;
3462
d9530df8 3463 if (TARGET_A16
3464 && (far_addr_space_p (operands[0])
3465 || far_addr_space_p (operands[1])))
3466 split_all |= 1;
3467
85c84d5c 3468 /* We don't need to split these. */
3469 if (TARGET_A24
3470 && split_all != 3
3471 && (mode == SImode || mode == PSImode)
3472 && !(GET_CODE (operands[1]) == MEM
3473 && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3474 return 0;
3475
3476 /* First, enumerate the subregs we'll be dealing with. */
3477 for (si = 0; si < parts; si++)
3478 {
3479 d[si] =
3480 m32c_subreg (submode, operands[0], mode,
3481 si * GET_MODE_SIZE (submode));
3482 s[si] =
3483 m32c_subreg (submode, operands[1], mode,
3484 si * GET_MODE_SIZE (submode));
3485 }
3486
3487 /* Split pushes by emitting a sequence of smaller pushes. */
3488 if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3489 {
3490 for (si = parts - 1; si >= 0; si--)
3491 {
3492 ops[opi++] = gen_rtx_MEM (submode,
3493 gen_rtx_PRE_DEC (Pmode,
3494 gen_rtx_REG (Pmode,
3495 SP_REGNO)));
3496 ops[opi++] = s[si];
3497 }
3498
3499 rv = 1;
3500 }
3501 /* Likewise for pops. */
3502 else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3503 {
3504 for (di = 0; di < parts; di++)
3505 {
3506 ops[opi++] = d[di];
3507 ops[opi++] = gen_rtx_MEM (submode,
3508 gen_rtx_POST_INC (Pmode,
3509 gen_rtx_REG (Pmode,
3510 SP_REGNO)));
3511 }
3512 rv = 1;
3513 }
3514 else if (split_all)
3515 {
3516 /* if d[di] == s[si] for any di < si, we'll early clobber. */
3517 for (di = 0; di < parts - 1; di++)
3518 for (si = di + 1; si < parts; si++)
3519 if (reg_mentioned_p (d[di], s[si]))
3520 rev = 1;
3521
3522 if (rev)
3523 for (si = 0; si < parts; si++)
3524 {
3525 ops[opi++] = d[si];
3526 ops[opi++] = s[si];
3527 }
3528 else
3529 for (si = parts - 1; si >= 0; si--)
3530 {
3531 ops[opi++] = d[si];
3532 ops[opi++] = s[si];
3533 }
3534 rv = 1;
3535 }
3536 /* Now emit any moves we may have accumulated. */
3537 if (rv && split_all != 3)
3538 {
3539 int i;
3540 for (i = 2; i < opi; i += 2)
3541 emit_move_insn (ops[i], ops[i + 1]);
3542 }
3543 return rv;
3544}
3545
fedc146b 3546/* The m32c has a number of opcodes that act like memcpy, strcmp, and
3547 the like. For the R8C they expect one of the addresses to be in
3548 R1L:An so we need to arrange for that. Otherwise, it's just a
3549 matter of picking out the operands we want and emitting the right
3550 pattern for them. All these expanders, which correspond to
3551 patterns in blkmov.md, must return nonzero if they expand the insn,
3552 or zero if they should FAIL. */
3553
3554/* This is a memset() opcode. All operands are implied, so we need to
3555 arrange for them to be in the right registers. The opcode wants
3556 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3557 the count (HI), and $2 the value (QI). */
3558int
3559m32c_expand_setmemhi(rtx *operands)
3560{
3561 rtx desta, count, val;
3562 rtx desto, counto;
3563
3564 desta = XEXP (operands[0], 0);
3565 count = operands[1];
3566 val = operands[2];
3567
3568 desto = gen_reg_rtx (Pmode);
3569 counto = gen_reg_rtx (HImode);
3570
3571 if (GET_CODE (desta) != REG
3572 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3573 desta = copy_to_mode_reg (Pmode, desta);
3574
3575 /* This looks like an arbitrary restriction, but this is by far the
3576 most common case. For counts 8..14 this actually results in
3577 smaller code with no speed penalty because the half-sized
3578 constant can be loaded with a shorter opcode. */
3579 if (GET_CODE (count) == CONST_INT
3580 && GET_CODE (val) == CONST_INT
3581 && ! (INTVAL (count) & 1)
3582 && (INTVAL (count) > 1)
3583 && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3584 {
3585 unsigned v = INTVAL (val) & 0xff;
3586 v = v | (v << 8);
3587 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3588 val = copy_to_mode_reg (HImode, GEN_INT (v));
3589 if (TARGET_A16)
3590 emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3591 else
3592 emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3593 return 1;
3594 }
3595
3596 /* This is the generalized memset() case. */
3597 if (GET_CODE (val) != REG
3598 || REGNO (val) < FIRST_PSEUDO_REGISTER)
3599 val = copy_to_mode_reg (QImode, val);
3600
3601 if (GET_CODE (count) != REG
3602 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3603 count = copy_to_mode_reg (HImode, count);
3604
3605 if (TARGET_A16)
3606 emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3607 else
3608 emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3609
3610 return 1;
3611}
3612
3613/* This is a memcpy() opcode. All operands are implied, so we need to
3614 arrange for them to be in the right registers. The opcode wants
3615 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3616 is the source (MEM:BLK), and $2 the count (HI). */
3617int
3618m32c_expand_movmemhi(rtx *operands)
3619{
3620 rtx desta, srca, count;
3621 rtx desto, srco, counto;
3622
3623 desta = XEXP (operands[0], 0);
3624 srca = XEXP (operands[1], 0);
3625 count = operands[2];
3626
3627 desto = gen_reg_rtx (Pmode);
3628 srco = gen_reg_rtx (Pmode);
3629 counto = gen_reg_rtx (HImode);
3630
3631 if (GET_CODE (desta) != REG
3632 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3633 desta = copy_to_mode_reg (Pmode, desta);
3634
3635 if (GET_CODE (srca) != REG
3636 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3637 srca = copy_to_mode_reg (Pmode, srca);
3638
3639 /* Similar to setmem, but we don't need to check the value. */
3640 if (GET_CODE (count) == CONST_INT
3641 && ! (INTVAL (count) & 1)
3642 && (INTVAL (count) > 1))
3643 {
3644 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3645 if (TARGET_A16)
3646 emit_insn (gen_movmemhi_whi_op (desto, srco, counto, desta, srca, count));
3647 else
3648 emit_insn (gen_movmemhi_wpsi_op (desto, srco, counto, desta, srca, count));
3649 return 1;
3650 }
3651
3652 /* This is the generalized memset() case. */
3653 if (GET_CODE (count) != REG
3654 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3655 count = copy_to_mode_reg (HImode, count);
3656
3657 if (TARGET_A16)
3658 emit_insn (gen_movmemhi_bhi_op (desto, srco, counto, desta, srca, count));
3659 else
3660 emit_insn (gen_movmemhi_bpsi_op (desto, srco, counto, desta, srca, count));
3661
3662 return 1;
3663}
3664
3665/* This is a stpcpy() opcode. $0 is the destination (MEM:BLK) after
3666 the copy, which should point to the NUL at the end of the string,
3667 $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3668 Since our opcode leaves the destination pointing *after* the NUL,
3669 we must emit an adjustment. */
3670int
3671m32c_expand_movstr(rtx *operands)
3672{
3673 rtx desta, srca;
3674 rtx desto, srco;
3675
3676 desta = XEXP (operands[1], 0);
3677 srca = XEXP (operands[2], 0);
3678
3679 desto = gen_reg_rtx (Pmode);
3680 srco = gen_reg_rtx (Pmode);
3681
3682 if (GET_CODE (desta) != REG
3683 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3684 desta = copy_to_mode_reg (Pmode, desta);
3685
3686 if (GET_CODE (srca) != REG
3687 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3688 srca = copy_to_mode_reg (Pmode, srca);
3689
3690 emit_insn (gen_movstr_op (desto, srco, desta, srca));
3691 /* desto ends up being a1, which allows this type of add through MOVA. */
3692 emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
3693
3694 return 1;
3695}
3696
3697/* This is a strcmp() opcode. $0 is the destination (HI) which holds
3698 <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3699 $2 is the other (MEM:BLK). We must do the comparison, and then
3700 convert the flags to a signed integer result. */
3701int
3702m32c_expand_cmpstr(rtx *operands)
3703{
3704 rtx src1a, src2a;
3705
3706 src1a = XEXP (operands[1], 0);
3707 src2a = XEXP (operands[2], 0);
3708
3709 if (GET_CODE (src1a) != REG
3710 || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
3711 src1a = copy_to_mode_reg (Pmode, src1a);
3712
3713 if (GET_CODE (src2a) != REG
3714 || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
3715 src2a = copy_to_mode_reg (Pmode, src2a);
3716
3717 emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
3718 emit_insn (gen_cond_to_int (operands[0]));
3719
3720 return 1;
3721}
3722
3723
3fd11504 3724typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
3725
3726static shift_gen_func
3727shift_gen_func_for (int mode, int code)
3728{
3729#define GFF(m,c,f) if (mode == m && code == c) return f
3730 GFF(QImode, ASHIFT, gen_ashlqi3_i);
3731 GFF(QImode, ASHIFTRT, gen_ashrqi3_i);
3732 GFF(QImode, LSHIFTRT, gen_lshrqi3_i);
3733 GFF(HImode, ASHIFT, gen_ashlhi3_i);
3734 GFF(HImode, ASHIFTRT, gen_ashrhi3_i);
3735 GFF(HImode, LSHIFTRT, gen_lshrhi3_i);
3736 GFF(PSImode, ASHIFT, gen_ashlpsi3_i);
3737 GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
3738 GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
3739 GFF(SImode, ASHIFT, TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
3740 GFF(SImode, ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
3741 GFF(SImode, LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
3742#undef GFF
fedc146b 3743 gcc_unreachable ();
3fd11504 3744}
3745
85c84d5c 3746/* The m32c only has one shift, but it takes a signed count. GCC
3747 doesn't want this, so we fake it by negating any shift count when
fedc146b 3748 we're pretending to shift the other way. Also, the shift count is
3749 limited to -8..8. It's slightly better to use two shifts for 9..15
3750 than to load the count into r1h, so we do that too. */
85c84d5c 3751int
3fd11504 3752m32c_prepare_shift (rtx * operands, int scale, int shift_code)
85c84d5c 3753{
3754d046 3754 machine_mode mode = GET_MODE (operands[0]);
3fd11504 3755 shift_gen_func func = shift_gen_func_for (mode, shift_code);
85c84d5c 3756 rtx temp;
3fd11504 3757
3758 if (GET_CODE (operands[2]) == CONST_INT)
85c84d5c 3759 {
3fd11504 3760 int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
3761 int count = INTVAL (operands[2]) * scale;
3762
3763 while (count > maxc)
3764 {
3765 temp = gen_reg_rtx (mode);
3766 emit_insn (func (temp, operands[1], GEN_INT (maxc)));
3767 operands[1] = temp;
3768 count -= maxc;
3769 }
3770 while (count < -maxc)
3771 {
3772 temp = gen_reg_rtx (mode);
3773 emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
3774 operands[1] = temp;
3775 count += maxc;
3776 }
3777 emit_insn (func (operands[0], operands[1], GEN_INT (count)));
3778 return 1;
85c84d5c 3779 }
7636ec8f 3780
3781 temp = gen_reg_rtx (QImode);
85c84d5c 3782 if (scale < 0)
7636ec8f 3783 /* The pattern has a NEG that corresponds to this. */
3784 emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
3785 else if (TARGET_A16 && mode == SImode)
3786 /* We do this because the code below may modify this, we don't
3787 want to modify the origin of this value. */
3788 emit_move_insn (temp, operands[2]);
85c84d5c 3789 else
7636ec8f 3790 /* We'll only use it for the shift, no point emitting a move. */
85c84d5c 3791 temp = operands[2];
7636ec8f 3792
992bd98c 3793 if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
7636ec8f 3794 {
3795 /* The m16c has a limit of -16..16 for SI shifts, even when the
3796 shift count is in a register. Since there are so many targets
3797 of these shifts, it's better to expand the RTL here than to
3798 call a helper function.
3799
3800 The resulting code looks something like this:
3801
3802 cmp.b r1h,-16
3803 jge.b 1f
3804 shl.l -16,dest
3805 add.b r1h,16
3806 1f: cmp.b r1h,16
3807 jle.b 1f
3808 shl.l 16,dest
3809 sub.b r1h,16
3810 1f: shl.l r1h,dest
3811
3812 We take advantage of the fact that "negative" shifts are
3813 undefined to skip one of the comparisons. */
3814
3815 rtx count;
158a522b 3816 rtx label, tempvar;
3817 rtx_insn *insn;
7636ec8f 3818
992bd98c 3819 emit_move_insn (operands[0], operands[1]);
3820
7636ec8f 3821 count = temp;
3822 label = gen_label_rtx ();
7636ec8f 3823 LABEL_NUSES (label) ++;
3824
db049e78 3825 tempvar = gen_reg_rtx (mode);
3826
7636ec8f 3827 if (shift_code == ASHIFT)
3828 {
3829 /* This is a left shift. We only need check positive counts. */
3830 emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
3831 count, GEN_INT (16), label));
db049e78 3832 emit_insn (func (tempvar, operands[0], GEN_INT (8)));
3833 emit_insn (func (operands[0], tempvar, GEN_INT (8)));
7636ec8f 3834 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
3835 emit_label_after (label, insn);
3836 }
3837 else
3838 {
3839 /* This is a right shift. We only need check negative counts. */
3840 emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
3841 count, GEN_INT (-16), label));
db049e78 3842 emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
3843 emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
7636ec8f 3844 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
3845 emit_label_after (label, insn);
3846 }
992bd98c 3847 operands[1] = operands[0];
3848 emit_insn (func (operands[0], operands[0], count));
3849 return 1;
7636ec8f 3850 }
3851
85c84d5c 3852 operands[2] = temp;
3853 return 0;
3854}
3855
c47b8642 3856/* The m32c has a limited range of operations that work on PSImode
3857 values; we have to expand to SI, do the math, and truncate back to
3858 PSI. Yes, this is expensive, but hopefully gcc will learn to avoid
3859 those cases. */
3860void
3861m32c_expand_neg_mulpsi3 (rtx * operands)
3862{
3863 /* operands: a = b * i */
3864 rtx temp1; /* b as SI */
fedc146b 3865 rtx scale /* i as SI */;
3866 rtx temp2; /* a*b as SI */
c47b8642 3867
3868 temp1 = gen_reg_rtx (SImode);
3869 temp2 = gen_reg_rtx (SImode);
fedc146b 3870 if (GET_CODE (operands[2]) != CONST_INT)
3871 {
3872 scale = gen_reg_rtx (SImode);
3873 emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
3874 }
3875 else
3876 scale = copy_to_mode_reg (SImode, operands[2]);
c47b8642 3877
3878 emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
fedc146b 3879 temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
3880 emit_insn (gen_truncsipsi2 (operands[0], temp2));
c47b8642 3881}
3882
85c84d5c 3883/* Pattern Output Functions */
3884
fedc146b 3885int
3886m32c_expand_movcc (rtx *operands)
3887{
3888 rtx rel = operands[1];
3d594561 3889
fedc146b 3890 if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
3891 return 1;
3892 if (GET_CODE (operands[2]) != CONST_INT
3893 || GET_CODE (operands[3]) != CONST_INT)
3894 return 1;
fedc146b 3895 if (GET_CODE (rel) == NE)
3896 {
3897 rtx tmp = operands[2];
3898 operands[2] = operands[3];
3899 operands[3] = tmp;
74f4459c 3900 rel = gen_rtx_EQ (GET_MODE (rel), XEXP (rel, 0), XEXP (rel, 1));
fedc146b 3901 }
3d594561 3902
3d594561 3903 emit_move_insn (operands[0],
3904 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
74f4459c 3905 rel,
3d594561 3906 operands[2],
3907 operands[3]));
fedc146b 3908 return 0;
3909}
3910
3911/* Used for the "insv" pattern. Return nonzero to fail, else done. */
3912int
3913m32c_expand_insv (rtx *operands)
3914{
3915 rtx op0, src0, p;
3916 int mask;
3917
3918 if (INTVAL (operands[1]) != 1)
3919 return 1;
3920
7b4716cf 3921 /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
3922 if (GET_CODE (operands[3]) != CONST_INT)
3923 return 1;
3924 if (INTVAL (operands[3]) != 0
3925 && INTVAL (operands[3]) != 1
3926 && INTVAL (operands[3]) != -1)
3927 return 1;
3928
fedc146b 3929 mask = 1 << INTVAL (operands[2]);
3930
3931 op0 = operands[0];
3932 if (GET_CODE (op0) == SUBREG
3933 && SUBREG_BYTE (op0) == 0)
3934 {
3935 rtx sub = SUBREG_REG (op0);
3936 if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
3937 op0 = sub;
3938 }
3939
e1ba4a27 3940 if (!can_create_pseudo_p ()
fedc146b 3941 || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
3942 src0 = op0;
3943 else
3944 {
3945 src0 = gen_reg_rtx (GET_MODE (op0));
3946 emit_move_insn (src0, op0);
3947 }
3948
3949 if (GET_MODE (op0) == HImode
3950 && INTVAL (operands[2]) >= 8
1675aa0a 3951 && GET_CODE (op0) == MEM)
fedc146b 3952 {
3953 /* We are little endian. */
29c05e22 3954 rtx new_mem = gen_rtx_MEM (QImode, plus_constant (Pmode,
3955 XEXP (op0, 0), 1));
fedc146b 3956 MEM_COPY_ATTRIBUTES (new_mem, op0);
3957 mask >>= 8;
3958 }
3959
e282424f 3960 /* First, we generate a mask with the correct polarity. If we are
3961 storing a zero, we want an AND mask, so invert it. */
3962 if (INTVAL (operands[3]) == 0)
fedc146b 3963 {
992bd98c 3964 /* Storing a zero, use an AND mask */
fedc146b 3965 if (GET_MODE (op0) == HImode)
3966 mask ^= 0xffff;
3967 else
3968 mask ^= 0xff;
3969 }
e282424f 3970 /* Now we need to properly sign-extend the mask in case we need to
3971 fall back to an AND or OR opcode. */
fedc146b 3972 if (GET_MODE (op0) == HImode)
3973 {
3974 if (mask & 0x8000)
3975 mask -= 0x10000;
3976 }
3977 else
3978 {
3979 if (mask & 0x80)
3980 mask -= 0x100;
3981 }
3982
3983 switch ( (INTVAL (operands[3]) ? 4 : 0)
3984 + ((GET_MODE (op0) == HImode) ? 2 : 0)
3985 + (TARGET_A24 ? 1 : 0))
3986 {
3987 case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
3988 case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
3989 case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
3990 case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
3991 case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
3992 case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
3993 case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
3994 case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
6276c4d1 3995 default: p = NULL_RTX; break; /* Not reached, but silences a warning. */
fedc146b 3996 }
3997
3998 emit_insn (p);
3999 return 0;
4000}
4001
4002const char *
4003m32c_scc_pattern(rtx *operands, RTX_CODE code)
4004{
4005 static char buf[30];
4006 if (GET_CODE (operands[0]) == REG
4007 && REGNO (operands[0]) == R0_REGNO)
4008 {
4009 if (code == EQ)
4010 return "stzx\t#1,#0,r0l";
4011 if (code == NE)
4012 return "stzx\t#0,#1,r0l";
4013 }
4014 sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
4015 return buf;
4016}
4017
2efce110 4018/* Encode symbol attributes of a SYMBOL_REF into its
4019 SYMBOL_REF_FLAGS. */
4020static void
4021m32c_encode_section_info (tree decl, rtx rtl, int first)
4022{
4023 int extra_flags = 0;
4024
4025 default_encode_section_info (decl, rtl, first);
4026 if (TREE_CODE (decl) == FUNCTION_DECL
4027 && m32c_special_page_vector_p (decl))
4028
4029 extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
4030
4031 if (extra_flags)
4032 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
4033}
4034
85c84d5c 4035/* Returns TRUE if the current function is a leaf, and thus we can
4036 determine which registers an interrupt function really needs to
4037 save. The logic below is mostly about finding the insn sequence
4038 that's the function, versus any sequence that might be open for the
4039 current insn. */
4040static int
4041m32c_leaf_function_p (void)
4042{
85c84d5c 4043 int rv;
4044
c36aa54b 4045 push_topmost_sequence ();
85c84d5c 4046 rv = leaf_function_p ();
c36aa54b 4047 pop_topmost_sequence ();
85c84d5c 4048 return rv;
4049}
4050
4051/* Returns TRUE if the current function needs to use the ENTER/EXIT
4052 opcodes. If the function doesn't need the frame base or stack
4053 pointer, it can use the simpler RTS opcode. */
4054static bool
4055m32c_function_needs_enter (void)
4056{
91a55c11 4057 rtx_insn *insn;
85c84d5c 4058 rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
4059 rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
4060
c36aa54b 4061 for (insn = get_topmost_sequence ()->first; insn; insn = NEXT_INSN (insn))
4062 if (NONDEBUG_INSN_P (insn))
4063 {
4064 if (reg_mentioned_p (sp, insn))
4065 return true;
4066 if (reg_mentioned_p (fb, insn))
4067 return true;
4068 }
85c84d5c 4069 return false;
4070}
4071
4072/* Mark all the subexpressions of the PARALLEL rtx PAR as
4073 frame-related. Return PAR.
4074
4075 dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
4076 PARALLEL rtx other than the first if they do not have the
4077 FRAME_RELATED flag set on them. So this function is handy for
4078 marking up 'enter' instructions. */
4079static rtx
4080m32c_all_frame_related (rtx par)
4081{
4082 int len = XVECLEN (par, 0);
4083 int i;
4084
4085 for (i = 0; i < len; i++)
4086 F (XVECEXP (par, 0, i));
4087
4088 return par;
4089}
4090
4091/* Emits the prologue. See the frame layout comment earlier in this
4092 file. We can reserve up to 256 bytes with the ENTER opcode, beyond
4093 that we manually update sp. */
4094void
4095m32c_emit_prologue (void)
4096{
4097 int frame_size, extra_frame_size = 0, reg_save_size;
4098 int complex_prologue = 0;
4099
4100 cfun->machine->is_leaf = m32c_leaf_function_p ();
4101 if (interrupt_p (cfun->decl))
4102 {
4103 cfun->machine->is_interrupt = 1;
4104 complex_prologue = 1;
4105 }
cc24427c 4106 else if (bank_switch_p (cfun->decl))
4107 warning (OPT_Wattributes,
4108 "%<bank_switch%> has no effect on non-interrupt functions");
85c84d5c 4109
4110 reg_save_size = m32c_pushm_popm (PP_justcount);
4111
4112 if (interrupt_p (cfun->decl))
cc24427c 4113 {
4114 if (bank_switch_p (cfun->decl))
4115 emit_insn (gen_fset_b ());
4116 else if (cfun->machine->intr_pushm)
4117 emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
4118 }
85c84d5c 4119
4120 frame_size =
4121 m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
4122 if (frame_size == 0
85c84d5c 4123 && !m32c_function_needs_enter ())
4124 cfun->machine->use_rts = 1;
4125
4126 if (frame_size > 254)
4127 {
4128 extra_frame_size = frame_size - 254;
4129 frame_size = 254;
4130 }
4131 if (cfun->machine->use_rts == 0)
4132 F (emit_insn (m32c_all_frame_related
4133 (TARGET_A16
97678fce 4134 ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
4135 : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
85c84d5c 4136
4137 if (extra_frame_size)
4138 {
4139 complex_prologue = 1;
4140 if (TARGET_A16)
4141 F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
4142 gen_rtx_REG (HImode, SP_REGNO),
4143 GEN_INT (-extra_frame_size))));
4144 else
4145 F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
4146 gen_rtx_REG (PSImode, SP_REGNO),
4147 GEN_INT (-extra_frame_size))));
4148 }
4149
4150 complex_prologue += m32c_pushm_popm (PP_pushm);
4151
4152 /* This just emits a comment into the .s file for debugging. */
4153 if (complex_prologue)
4154 emit_insn (gen_prologue_end ());
4155}
4156
4157/* Likewise, for the epilogue. The only exception is that, for
4158 interrupts, we must manually unwind the frame as the REIT opcode
4159 doesn't do that. */
4160void
4161m32c_emit_epilogue (void)
4162{
5dfa0f5a 4163 int popm_count = m32c_pushm_popm (PP_justcount);
4164
85c84d5c 4165 /* This just emits a comment into the .s file for debugging. */
5dfa0f5a 4166 if (popm_count > 0 || cfun->machine->is_interrupt)
85c84d5c 4167 emit_insn (gen_epilogue_start ());
4168
5dfa0f5a 4169 if (popm_count > 0)
4170 m32c_pushm_popm (PP_popm);
85c84d5c 4171
4172 if (cfun->machine->is_interrupt)
4173 {
3754d046 4174 machine_mode spmode = TARGET_A16 ? HImode : PSImode;
85c84d5c 4175
cc24427c 4176 /* REIT clears B flag and restores $fp for us, but we still
4177 have to fix up the stack. USE_RTS just means we didn't
4178 emit ENTER. */
4179 if (!cfun->machine->use_rts)
4180 {
4181 emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
4182 gen_rtx_REG (spmode, FP_REGNO));
4183 emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
4184 gen_rtx_REG (spmode, A0_REGNO));
4185 /* We can't just add this to the POPM because it would be in
4186 the wrong order, and wouldn't fix the stack if we're bank
4187 switching. */
4188 if (TARGET_A16)
4189 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
4190 else
4191 emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
4192 }
4193 if (!bank_switch_p (cfun->decl) && cfun->machine->intr_pushm)
4194 emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
4195
bd2fe2f1 4196 /* The FREIT (Fast REturn from InTerrupt) instruction should be
4197 generated only for M32C/M32CM targets (generate the REIT
4198 instruction otherwise). */
cc24427c 4199 if (fast_interrupt_p (cfun->decl))
bd2fe2f1 4200 {
4201 /* Check if fast_attribute is set for M32C or M32CM. */
4202 if (TARGET_A24)
4203 {
4204 emit_jump_insn (gen_epilogue_freit ());
4205 }
4206 /* If fast_interrupt attribute is set for an R8C or M16C
4207 target ignore this attribute and generated REIT
4208 instruction. */
4209 else
4210 {
4211 warning (OPT_Wattributes,
4212 "%<fast_interrupt%> attribute directive ignored");
4213 emit_jump_insn (gen_epilogue_reit_16 ());
4214 }
4215 }
cc24427c 4216 else if (TARGET_A16)
ed16d658 4217 emit_jump_insn (gen_epilogue_reit_16 ());
4218 else
4219 emit_jump_insn (gen_epilogue_reit_24 ());
85c84d5c 4220 }
4221 else if (cfun->machine->use_rts)
4222 emit_jump_insn (gen_epilogue_rts ());
ed16d658 4223 else if (TARGET_A16)
4224 emit_jump_insn (gen_epilogue_exitd_16 ());
85c84d5c 4225 else
ed16d658 4226 emit_jump_insn (gen_epilogue_exitd_24 ());
85c84d5c 4227}
4228
4229void
4230m32c_emit_eh_epilogue (rtx ret_addr)
4231{
4232 /* R0[R2] has the stack adjustment. R1[R3] has the address to
4233 return to. We have to fudge the stack, pop everything, pop SP
4234 (fudged), and return (fudged). This is actually easier to do in
4235 assembler, so punt to libgcc. */
4236 emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
18b42941 4237 /* emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
85c84d5c 4238}
4239
992bd98c 4240/* Indicate which flags must be properly set for a given conditional. */
4241static int
4242flags_needed_for_conditional (rtx cond)
4243{
4244 switch (GET_CODE (cond))
4245 {
4246 case LE:
4247 case GT:
4248 return FLAGS_OSZ;
4249 case LEU:
4250 case GTU:
4251 return FLAGS_ZC;
4252 case LT:
4253 case GE:
4254 return FLAGS_OS;
4255 case LTU:
4256 case GEU:
4257 return FLAGS_C;
4258 case EQ:
4259 case NE:
4260 return FLAGS_Z;
4261 default:
4262 return FLAGS_N;
4263 }
4264}
4265
4266#define DEBUG_CMP 0
4267
4268/* Returns true if a compare insn is redundant because it would only
4269 set flags that are already set correctly. */
4270static bool
d3ffa7b4 4271m32c_compare_redundant (rtx_insn *cmp, rtx *operands)
992bd98c 4272{
4273 int flags_needed;
4274 int pflags;
d3ffa7b4 4275 rtx_insn *prev;
4276 rtx pp, next;
1675aa0a 4277 rtx op0, op1;
992bd98c 4278#if DEBUG_CMP
4279 int prev_icode, i;
4280#endif
4281
4282 op0 = operands[0];
4283 op1 = operands[1];
992bd98c 4284
4285#if DEBUG_CMP
4286 fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
4287 debug_rtx(cmp);
4288 for (i=0; i<2; i++)
4289 {
4290 fprintf(stderr, "operands[%d] = ", i);
4291 debug_rtx(operands[i]);
4292 }
4293#endif
4294
4295 next = next_nonnote_insn (cmp);
4296 if (!next || !INSN_P (next))
4297 {
4298#if DEBUG_CMP
4299 fprintf(stderr, "compare not followed by insn\n");
4300 debug_rtx(next);
4301#endif
4302 return false;
4303 }
4304 if (GET_CODE (PATTERN (next)) == SET
4305 && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
4306 {
4307 next = XEXP (XEXP (PATTERN (next), 1), 0);
4308 }
4309 else if (GET_CODE (PATTERN (next)) == SET)
4310 {
4311 /* If this is a conditional, flags_needed will be something
4312 other than FLAGS_N, which we test below. */
4313 next = XEXP (PATTERN (next), 1);
4314 }
4315 else
4316 {
4317#if DEBUG_CMP
4318 fprintf(stderr, "compare not followed by conditional\n");
4319 debug_rtx(next);
4320#endif
4321 return false;
4322 }
4323#if DEBUG_CMP
4324 fprintf(stderr, "conditional is: ");
4325 debug_rtx(next);
4326#endif
4327
4328 flags_needed = flags_needed_for_conditional (next);
4329 if (flags_needed == FLAGS_N)
4330 {
4331#if DEBUG_CMP
4332 fprintf(stderr, "compare not followed by conditional\n");
4333 debug_rtx(next);
4334#endif
4335 return false;
4336 }
4337
4338 /* Compare doesn't set overflow and carry the same way that
4339 arithmetic instructions do, so we can't replace those. */
4340 if (flags_needed & FLAGS_OC)
4341 return false;
4342
4343 prev = cmp;
4344 do {
4345 prev = prev_nonnote_insn (prev);
4346 if (!prev)
4347 {
4348#if DEBUG_CMP
4349 fprintf(stderr, "No previous insn.\n");
4350#endif
4351 return false;
4352 }
4353 if (!INSN_P (prev))
4354 {
4355#if DEBUG_CMP
4356 fprintf(stderr, "Previous insn is a non-insn.\n");
4357#endif
4358 return false;
4359 }
4360 pp = PATTERN (prev);
4361 if (GET_CODE (pp) != SET)
4362 {
4363#if DEBUG_CMP
4364 fprintf(stderr, "Previous insn is not a SET.\n");
4365#endif
4366 return false;
4367 }
4368 pflags = get_attr_flags (prev);
4369
4370 /* Looking up attributes of previous insns corrupted the recog
4371 tables. */
4372 INSN_UID (cmp) = -1;
4373 recog (PATTERN (cmp), cmp, 0);
4374
4375 if (pflags == FLAGS_N
4376 && reg_mentioned_p (op0, pp))
4377 {
4378#if DEBUG_CMP
4379 fprintf(stderr, "intermediate non-flags insn uses op:\n");
4380 debug_rtx(prev);
4381#endif
4382 return false;
4383 }
f3269732 4384
4385 /* Check for comparisons against memory - between volatiles and
4386 aliases, we just can't risk this one. */
4387 if (GET_CODE (operands[0]) == MEM
4388 || GET_CODE (operands[0]) == MEM)
4389 {
4390#if DEBUG_CMP
4391 fprintf(stderr, "comparisons with memory:\n");
4392 debug_rtx(prev);
4393#endif
4394 return false;
4395 }
4396
4397 /* Check for PREV changing a register that's used to compute a
4398 value in CMP, even if it doesn't otherwise change flags. */
4399 if (GET_CODE (operands[0]) == REG
4400 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[0]))
4401 {
4402#if DEBUG_CMP
4403 fprintf(stderr, "sub-value affected, op0:\n");
4404 debug_rtx(prev);
4405#endif
4406 return false;
4407 }
4408 if (GET_CODE (operands[1]) == REG
4409 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[1]))
4410 {
4411#if DEBUG_CMP
4412 fprintf(stderr, "sub-value affected, op1:\n");
4413 debug_rtx(prev);
4414#endif
4415 return false;
4416 }
4417
992bd98c 4418 } while (pflags == FLAGS_N);
4419#if DEBUG_CMP
4420 fprintf(stderr, "previous flag-setting insn:\n");
4421 debug_rtx(prev);
4422 debug_rtx(pp);
4423#endif
4424
4425 if (GET_CODE (pp) == SET
4426 && GET_CODE (XEXP (pp, 0)) == REG
4427 && REGNO (XEXP (pp, 0)) == FLG_REGNO
4428 && GET_CODE (XEXP (pp, 1)) == COMPARE)
4429 {
4430 /* Adjacent cbranches must have the same operands to be
4431 redundant. */
4432 rtx pop0 = XEXP (XEXP (pp, 1), 0);
4433 rtx pop1 = XEXP (XEXP (pp, 1), 1);
4434#if DEBUG_CMP
4435 fprintf(stderr, "adjacent cbranches\n");
4436 debug_rtx(pop0);
4437 debug_rtx(pop1);
4438#endif
4439 if (rtx_equal_p (op0, pop0)
4440 && rtx_equal_p (op1, pop1))
4441 return true;
4442#if DEBUG_CMP
4443 fprintf(stderr, "prev cmp not same\n");
4444#endif
4445 return false;
4446 }
4447
4448 /* Else the previous insn must be a SET, with either the source or
4449 dest equal to operands[0], and operands[1] must be zero. */
4450
4451 if (!rtx_equal_p (op1, const0_rtx))
4452 {
4453#if DEBUG_CMP
4454 fprintf(stderr, "operands[1] not const0_rtx\n");
4455#endif
4456 return false;
4457 }
4458 if (GET_CODE (pp) != SET)
4459 {
4460#if DEBUG_CMP
4461 fprintf (stderr, "pp not set\n");
4462#endif
4463 return false;
4464 }
4465 if (!rtx_equal_p (op0, SET_SRC (pp))
4466 && !rtx_equal_p (op0, SET_DEST (pp)))
4467 {
4468#if DEBUG_CMP
4469 fprintf(stderr, "operands[0] not found in set\n");
4470#endif
4471 return false;
4472 }
4473
4474#if DEBUG_CMP
4475 fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4476#endif
4477 if ((pflags & flags_needed) == flags_needed)
4478 return true;
4479
4480 return false;
4481}
4482
4483/* Return the pattern for a compare. This will be commented out if
4484 the compare is redundant, else a normal pattern is returned. Thus,
4485 the assembler output says where the compare would have been. */
4486char *
d3ffa7b4 4487m32c_output_compare (rtx_insn *insn, rtx *operands)
992bd98c 4488{
8deb3959 4489 static char templ[] = ";cmp.b\t%1,%0";
992bd98c 4490 /* ^ 5 */
4491
8deb3959 4492 templ[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
992bd98c 4493 if (m32c_compare_redundant (insn, operands))
4494 {
4495#if DEBUG_CMP
4496 fprintf(stderr, "cbranch: cmp not needed\n");
4497#endif
8deb3959 4498 return templ;
992bd98c 4499 }
4500
4501#if DEBUG_CMP
f3269732 4502 fprintf(stderr, "cbranch: cmp needed: `%s'\n", templ + 1);
992bd98c 4503#endif
8deb3959 4504 return templ + 1;
992bd98c 4505}
4506
2efce110 4507#undef TARGET_ENCODE_SECTION_INFO
4508#define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4509
5a1c68c3 4510/* If the frame pointer isn't used, we detect it manually. But the
4511 stack pointer doesn't have as flexible addressing as the frame
4512 pointer, so we always assume we have it. */
4513
4514#undef TARGET_FRAME_POINTER_REQUIRED
4515#define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
4516
85c84d5c 4517/* The Global `targetm' Variable. */
4518
4519struct gcc_target targetm = TARGET_INITIALIZER;
4520
4521#include "gt-m32c.h"