]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/m32c/m32c.c
* expr.h: Remove prototypes of functions defined in builtins.c.
[thirdparty/gcc.git] / gcc / config / m32c / m32c.c
CommitLineData
85c84d5c 1/* Target Code for R8C/M16C/M32C
3aea1f79 2 Copyright (C) 2005-2014 Free Software Foundation, Inc.
85c84d5c 3 Contributed by Red Hat.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
038d1e19 9 by the Free Software Foundation; either version 3, or (at your
85c84d5c 10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
038d1e19 18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
85c84d5c 20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
24#include "tm.h"
25#include "rtl.h"
26#include "regs.h"
27#include "hard-reg-set.h"
85c84d5c 28#include "insn-config.h"
29#include "conditions.h"
30#include "insn-flags.h"
31#include "output.h"
32#include "insn-attr.h"
33#include "flags.h"
34#include "recog.h"
35#include "reload.h"
0b205f4c 36#include "diagnostic-core.h"
85c84d5c 37#include "obstack.h"
38#include "tree.h"
9ed99284 39#include "stor-layout.h"
40#include "varasm.h"
41#include "calls.h"
85c84d5c 42#include "expr.h"
43#include "optabs.h"
44#include "except.h"
45#include "function.h"
46#include "ggc.h"
47#include "target.h"
48#include "target-def.h"
49#include "tm_p.h"
50#include "langhooks.h"
bc61cadb 51#include "pointer-set.h"
52#include "hash-table.h"
53#include "vec.h"
54#include "basic-block.h"
55#include "tree-ssa-alias.h"
56#include "internal-fn.h"
57#include "gimple-fold.h"
58#include "tree-eh.h"
59#include "gimple-expr.h"
60#include "is-a.h"
75a70cf9 61#include "gimple.h"
97678fce 62#include "df.h"
4ead5e30 63#include "tm-constrs.h"
f7715905 64#include "builtins.h"
85c84d5c 65
66/* Prototypes */
67
68/* Used by m32c_pushm_popm. */
69typedef enum
70{
71 PP_pushm,
72 PP_popm,
73 PP_justcount
74} Push_Pop_Type;
75
cc24427c 76static bool m32c_function_needs_enter (void);
85c84d5c 77static tree interrupt_handler (tree *, tree, tree, int, bool *);
2efce110 78static tree function_vector_handler (tree *, tree, tree, int, bool *);
85c84d5c 79static int interrupt_p (tree node);
cc24427c 80static int bank_switch_p (tree node);
81static int fast_interrupt_p (tree node);
82static int interrupt_p (tree node);
85c84d5c 83static bool m32c_asm_integer (rtx, unsigned int, int);
a9f1838b 84static int m32c_comp_type_attributes (const_tree, const_tree);
85c84d5c 85static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
86static struct machine_function *m32c_init_machine_status (void);
87static void m32c_insert_attributes (tree, tree *);
fd50b071 88static bool m32c_legitimate_address_p (enum machine_mode, rtx, bool);
d9530df8 89static bool m32c_addr_space_legitimate_address_p (enum machine_mode, rtx, bool, addr_space_t);
39cba157 90static rtx m32c_function_arg (cumulative_args_t, enum machine_mode,
1675aa0a 91 const_tree, bool);
39cba157 92static bool m32c_pass_by_reference (cumulative_args_t, enum machine_mode,
fb80456a 93 const_tree, bool);
39cba157 94static void m32c_function_arg_advance (cumulative_args_t, enum machine_mode,
8e2cc24f 95 const_tree, bool);
bd99ba64 96static unsigned int m32c_function_arg_boundary (enum machine_mode, const_tree);
85c84d5c 97static int m32c_pushm_popm (Push_Pop_Type);
39cba157 98static bool m32c_strict_argument_naming (cumulative_args_t);
85c84d5c 99static rtx m32c_struct_value_rtx (tree, int);
100static rtx m32c_subreg (enum machine_mode, rtx, enum machine_mode, int);
101static int need_to_save (int);
f57d8b49 102static rtx m32c_function_value (const_tree, const_tree, bool);
103static rtx m32c_libcall_value (enum machine_mode, const_rtx);
104
e3d4e41e 105/* Returns true if an address is specified, else false. */
106static bool m32c_get_pragma_address (const char *varname, unsigned *addr);
107
2efce110 108#define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
85c84d5c 109
110#define streq(a,b) (strcmp ((a), (b)) == 0)
111
112/* Internal support routines */
113
114/* Debugging statements are tagged with DEBUG0 only so that they can
115 be easily enabled individually, by replacing the '0' with '1' as
116 needed. */
117#define DEBUG0 0
118#define DEBUG1 1
119
120#if DEBUG0
121/* This is needed by some of the commented-out debug statements
122 below. */
123static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
124#endif
125static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
126
127/* These are all to support encode_pattern(). */
128static char pattern[30], *patternp;
129static GTY(()) rtx patternr[30];
130#define RTX_IS(x) (streq (pattern, x))
131
132/* Some macros to simplify the logic throughout this file. */
133#define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
134#define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
135
136#define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
137#define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
138
d9530df8 139static int
140far_addr_space_p (rtx x)
141{
142 if (GET_CODE (x) != MEM)
143 return 0;
144#if DEBUG0
145 fprintf(stderr, "\033[35mfar_addr_space: "); debug_rtx(x);
146 fprintf(stderr, " = %d\033[0m\n", MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR);
147#endif
148 return MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR;
149}
150
85c84d5c 151/* We do most RTX matching by converting the RTX into a string, and
152 using string compares. This vastly simplifies the logic in many of
153 the functions in this file.
154
155 On exit, pattern[] has the encoded string (use RTX_IS("...") to
156 compare it) and patternr[] has pointers to the nodes in the RTX
157 corresponding to each character in the encoded string. The latter
158 is mostly used by print_operand().
159
160 Unrecognized patterns have '?' in them; this shows up when the
161 assembler complains about syntax errors.
162*/
163
164static void
165encode_pattern_1 (rtx x)
166{
167 int i;
168
169 if (patternp == pattern + sizeof (pattern) - 2)
170 {
171 patternp[-1] = '?';
172 return;
173 }
174
175 patternr[patternp - pattern] = x;
176
177 switch (GET_CODE (x))
178 {
179 case REG:
180 *patternp++ = 'r';
181 break;
182 case SUBREG:
183 if (GET_MODE_SIZE (GET_MODE (x)) !=
184 GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
185 *patternp++ = 'S';
186 encode_pattern_1 (XEXP (x, 0));
187 break;
188 case MEM:
189 *patternp++ = 'm';
190 case CONST:
191 encode_pattern_1 (XEXP (x, 0));
192 break;
d9530df8 193 case SIGN_EXTEND:
194 *patternp++ = '^';
195 *patternp++ = 'S';
196 encode_pattern_1 (XEXP (x, 0));
197 break;
198 case ZERO_EXTEND:
199 *patternp++ = '^';
200 *patternp++ = 'Z';
201 encode_pattern_1 (XEXP (x, 0));
202 break;
85c84d5c 203 case PLUS:
204 *patternp++ = '+';
205 encode_pattern_1 (XEXP (x, 0));
206 encode_pattern_1 (XEXP (x, 1));
207 break;
208 case PRE_DEC:
209 *patternp++ = '>';
210 encode_pattern_1 (XEXP (x, 0));
211 break;
212 case POST_INC:
213 *patternp++ = '<';
214 encode_pattern_1 (XEXP (x, 0));
215 break;
216 case LO_SUM:
217 *patternp++ = 'L';
218 encode_pattern_1 (XEXP (x, 0));
219 encode_pattern_1 (XEXP (x, 1));
220 break;
221 case HIGH:
222 *patternp++ = 'H';
223 encode_pattern_1 (XEXP (x, 0));
224 break;
225 case SYMBOL_REF:
226 *patternp++ = 's';
227 break;
228 case LABEL_REF:
229 *patternp++ = 'l';
230 break;
231 case CODE_LABEL:
232 *patternp++ = 'c';
233 break;
234 case CONST_INT:
235 case CONST_DOUBLE:
236 *patternp++ = 'i';
237 break;
238 case UNSPEC:
239 *patternp++ = 'u';
240 *patternp++ = '0' + XCINT (x, 1, UNSPEC);
241 for (i = 0; i < XVECLEN (x, 0); i++)
242 encode_pattern_1 (XVECEXP (x, 0, i));
243 break;
244 case USE:
245 *patternp++ = 'U';
246 break;
247 case PARALLEL:
248 *patternp++ = '|';
249 for (i = 0; i < XVECLEN (x, 0); i++)
250 encode_pattern_1 (XVECEXP (x, 0, i));
251 break;
252 case EXPR_LIST:
253 *patternp++ = 'E';
254 encode_pattern_1 (XEXP (x, 0));
255 if (XEXP (x, 1))
256 encode_pattern_1 (XEXP (x, 1));
257 break;
258 default:
259 *patternp++ = '?';
260#if DEBUG0
261 fprintf (stderr, "can't encode pattern %s\n",
262 GET_RTX_NAME (GET_CODE (x)));
263 debug_rtx (x);
264 gcc_unreachable ();
265#endif
266 break;
267 }
268}
269
270static void
271encode_pattern (rtx x)
272{
273 patternp = pattern;
274 encode_pattern_1 (x);
275 *patternp = 0;
276}
277
278/* Since register names indicate the mode they're used in, we need a
279 way to determine which name to refer to the register with. Called
280 by print_operand(). */
281
282static const char *
283reg_name_with_mode (int regno, enum machine_mode mode)
284{
285 int mlen = GET_MODE_SIZE (mode);
286 if (regno == R0_REGNO && mlen == 1)
287 return "r0l";
288 if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
289 return "r2r0";
290 if (regno == R0_REGNO && mlen == 6)
291 return "r2r1r0";
292 if (regno == R0_REGNO && mlen == 8)
293 return "r3r1r2r0";
294 if (regno == R1_REGNO && mlen == 1)
295 return "r1l";
296 if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
297 return "r3r1";
298 if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
299 return "a1a0";
300 return reg_names[regno];
301}
302
303/* How many bytes a register uses on stack when it's pushed. We need
304 to know this because the push opcode needs to explicitly indicate
305 the size of the register, even though the name of the register
306 already tells it that. Used by m32c_output_reg_{push,pop}, which
307 is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}. */
308
309static int
310reg_push_size (int regno)
311{
312 switch (regno)
313 {
314 case R0_REGNO:
315 case R1_REGNO:
316 return 2;
317 case R2_REGNO:
318 case R3_REGNO:
319 case FLG_REGNO:
320 return 2;
321 case A0_REGNO:
322 case A1_REGNO:
323 case SB_REGNO:
324 case FB_REGNO:
325 case SP_REGNO:
326 if (TARGET_A16)
327 return 2;
328 else
329 return 3;
330 default:
331 gcc_unreachable ();
332 }
333}
334
85c84d5c 335/* Given two register classes, find the largest intersection between
336 them. If there is no intersection, return RETURNED_IF_EMPTY
337 instead. */
afe8797e 338static reg_class_t
339reduce_class (reg_class_t original_class, reg_class_t limiting_class,
340 reg_class_t returned_if_empty)
85c84d5c 341{
afe8797e 342 HARD_REG_SET cc;
343 int i;
344 reg_class_t best = NO_REGS;
345 unsigned int best_size = 0;
85c84d5c 346
347 if (original_class == limiting_class)
348 return original_class;
349
afe8797e 350 cc = reg_class_contents[original_class];
351 AND_HARD_REG_SET (cc, reg_class_contents[limiting_class]);
85c84d5c 352
85c84d5c 353 for (i = 0; i < LIM_REG_CLASSES; i++)
354 {
afe8797e 355 if (hard_reg_set_subset_p (reg_class_contents[i], cc))
356 if (best_size < reg_class_size[i])
85c84d5c 357 {
afe8797e 358 best = (reg_class_t) i;
359 best_size = reg_class_size[i];
85c84d5c 360 }
361
362 }
363 if (best == NO_REGS)
364 return returned_if_empty;
365 return best;
366}
367
85c84d5c 368/* Used by m32c_register_move_cost to determine if a move is
369 impossibly expensive. */
4cf1a89b 370static bool
371class_can_hold_mode (reg_class_t rclass, enum machine_mode mode)
85c84d5c 372{
373 /* Cache the results: 0=untested 1=no 2=yes */
374 static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
4cf1a89b 375
376 if (results[(int) rclass][mode] == 0)
85c84d5c 377 {
4cf1a89b 378 int r;
8deb3959 379 results[rclass][mode] = 1;
85c84d5c 380 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
4cf1a89b 381 if (in_hard_reg_set_p (reg_class_contents[(int) rclass], mode, r)
85c84d5c 382 && HARD_REGNO_MODE_OK (r, mode))
383 {
4cf1a89b 384 results[rclass][mode] = 2;
385 break;
85c84d5c 386 }
387 }
4cf1a89b 388
85c84d5c 389#if DEBUG0
390 fprintf (stderr, "class %s can hold %s? %s\n",
4cf1a89b 391 class_names[(int) rclass], mode_name[mode],
8deb3959 392 (results[rclass][mode] == 2) ? "yes" : "no");
85c84d5c 393#endif
4cf1a89b 394 return results[(int) rclass][mode] == 2;
85c84d5c 395}
396
397/* Run-time Target Specification. */
398
399/* Memregs are memory locations that gcc treats like general
400 registers, as there are a limited number of true registers and the
401 m32c families can use memory in most places that registers can be
402 used.
403
404 However, since memory accesses are more expensive than registers,
405 we allow the user to limit the number of memregs available, in
406 order to try to persuade gcc to try harder to use real registers.
407
9213d2eb 408 Memregs are provided by lib1funcs.S.
85c84d5c 409*/
410
85c84d5c 411int ok_to_change_target_memregs = TRUE;
412
1722522a 413/* Implements TARGET_OPTION_OVERRIDE. */
414
415#undef TARGET_OPTION_OVERRIDE
416#define TARGET_OPTION_OVERRIDE m32c_option_override
417
418static void
419m32c_option_override (void)
85c84d5c 420{
1722522a 421 /* We limit memregs to 0..16, and provide a default. */
eea6e787 422 if (global_options_set.x_target_memregs)
85c84d5c 423 {
424 if (target_memregs < 0 || target_memregs > 16)
425 error ("invalid target memregs value '%d'", target_memregs);
426 }
427 else
fedc146b 428 target_memregs = 16;
f8e7cebd 429
430 if (TARGET_A24)
431 flag_ivopts = 0;
1af17d44 432
433 /* This target defaults to strict volatile bitfields. */
941a2396 434 if (flag_strict_volatile_bitfields < 0 && abi_version_at_least(2))
1af17d44 435 flag_strict_volatile_bitfields = 1;
54f36750 436
437 /* r8c/m16c have no 16-bit indirect call, so thunks are involved.
438 This is always worse than an absolute call. */
439 if (TARGET_A16)
440 flag_no_function_cse = 1;
45bba533 441
442 /* This wants to put insns between compares and their jumps. */
443 /* FIXME: The right solution is to properly trace the flags register
444 values, but that is too much work for stage 4. */
445 flag_combine_stack_adjustments = 0;
54f36750 446}
447
448#undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
449#define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m32c_override_options_after_change
450
451static void
452m32c_override_options_after_change (void)
453{
454 if (TARGET_A16)
455 flag_no_function_cse = 1;
85c84d5c 456}
457
458/* Defining data structures for per-function information */
459
460/* The usual; we set up our machine_function data. */
461static struct machine_function *
462m32c_init_machine_status (void)
463{
25a27413 464 return ggc_cleared_alloc<machine_function> ();
85c84d5c 465}
466
467/* Implements INIT_EXPANDERS. We just set up to call the above
468 function. */
469void
470m32c_init_expanders (void)
471{
472 init_machine_status = m32c_init_machine_status;
473}
474
475/* Storage Layout */
476
85c84d5c 477/* Register Basics */
478
479/* Basic Characteristics of Registers */
480
481/* Whether a mode fits in a register is complex enough to warrant a
482 table. */
483static struct
484{
485 char qi_regs;
486 char hi_regs;
487 char pi_regs;
488 char si_regs;
489 char di_regs;
490} nregs_table[FIRST_PSEUDO_REGISTER] =
491{
492 { 1, 1, 2, 2, 4 }, /* r0 */
493 { 0, 1, 0, 0, 0 }, /* r2 */
494 { 1, 1, 2, 2, 0 }, /* r1 */
495 { 0, 1, 0, 0, 0 }, /* r3 */
496 { 0, 1, 1, 0, 0 }, /* a0 */
497 { 0, 1, 1, 0, 0 }, /* a1 */
498 { 0, 1, 1, 0, 0 }, /* sb */
499 { 0, 1, 1, 0, 0 }, /* fb */
500 { 0, 1, 1, 0, 0 }, /* sp */
501 { 1, 1, 1, 0, 0 }, /* pc */
502 { 0, 0, 0, 0, 0 }, /* fl */
503 { 1, 1, 1, 0, 0 }, /* ap */
504 { 1, 1, 2, 2, 4 }, /* mem0 */
505 { 1, 1, 2, 2, 4 }, /* mem1 */
506 { 1, 1, 2, 2, 4 }, /* mem2 */
507 { 1, 1, 2, 2, 4 }, /* mem3 */
508 { 1, 1, 2, 2, 4 }, /* mem4 */
509 { 1, 1, 2, 2, 0 }, /* mem5 */
510 { 1, 1, 2, 2, 0 }, /* mem6 */
511 { 1, 1, 0, 0, 0 }, /* mem7 */
512};
513
b2d7ede1 514/* Implements TARGET_CONDITIONAL_REGISTER_USAGE. We adjust the number
515 of available memregs, and select which registers need to be preserved
85c84d5c 516 across calls based on the chip family. */
517
b2d7ede1 518#undef TARGET_CONDITIONAL_REGISTER_USAGE
519#define TARGET_CONDITIONAL_REGISTER_USAGE m32c_conditional_register_usage
7182acf5 520void
85c84d5c 521m32c_conditional_register_usage (void)
522{
85c84d5c 523 int i;
524
525 if (0 <= target_memregs && target_memregs <= 16)
526 {
527 /* The command line option is bytes, but our "registers" are
528 16-bit words. */
cc24427c 529 for (i = (target_memregs+1)/2; i < 8; i++)
85c84d5c 530 {
531 fixed_regs[MEM0_REGNO + i] = 1;
532 CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
533 }
534 }
535
536 /* M32CM and M32C preserve more registers across function calls. */
537 if (TARGET_A24)
538 {
539 call_used_regs[R1_REGNO] = 0;
540 call_used_regs[R2_REGNO] = 0;
541 call_used_regs[R3_REGNO] = 0;
542 call_used_regs[A0_REGNO] = 0;
543 call_used_regs[A1_REGNO] = 0;
544 }
545}
546
547/* How Values Fit in Registers */
548
549/* Implements HARD_REGNO_NREGS. This is complicated by the fact that
550 different registers are different sizes from each other, *and* may
551 be different sizes in different chip families. */
4a6a8336 552static int
553m32c_hard_regno_nregs_1 (int regno, enum machine_mode mode)
85c84d5c 554{
555 if (regno == FLG_REGNO && mode == CCmode)
556 return 1;
557 if (regno >= FIRST_PSEUDO_REGISTER)
558 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
559
560 if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
561 return (GET_MODE_SIZE (mode) + 1) / 2;
562
563 if (GET_MODE_SIZE (mode) <= 1)
564 return nregs_table[regno].qi_regs;
565 if (GET_MODE_SIZE (mode) <= 2)
566 return nregs_table[regno].hi_regs;
d9530df8 567 if (regno == A0_REGNO && mode == SImode && TARGET_A16)
85c84d5c 568 return 2;
569 if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
570 return nregs_table[regno].pi_regs;
571 if (GET_MODE_SIZE (mode) <= 4)
572 return nregs_table[regno].si_regs;
573 if (GET_MODE_SIZE (mode) <= 8)
574 return nregs_table[regno].di_regs;
575 return 0;
576}
577
4a6a8336 578int
579m32c_hard_regno_nregs (int regno, enum machine_mode mode)
580{
581 int rv = m32c_hard_regno_nregs_1 (regno, mode);
582 return rv ? rv : 1;
583}
584
85c84d5c 585/* Implements HARD_REGNO_MODE_OK. The above function does the work
586 already; just test its return value. */
587int
588m32c_hard_regno_ok (int regno, enum machine_mode mode)
589{
4a6a8336 590 return m32c_hard_regno_nregs_1 (regno, mode) != 0;
85c84d5c 591}
592
593/* Implements MODES_TIEABLE_P. In general, modes aren't tieable since
594 registers are all different sizes. However, since most modes are
595 bigger than our registers anyway, it's easier to implement this
596 function that way, leaving QImode as the only unique case. */
597int
598m32c_modes_tieable_p (enum machine_mode m1, enum machine_mode m2)
599{
600 if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
601 return 1;
602
fedc146b 603#if 0
85c84d5c 604 if (m1 == QImode || m2 == QImode)
605 return 0;
fedc146b 606#endif
85c84d5c 607
608 return 1;
609}
610
611/* Register Classes */
612
613/* Implements REGNO_REG_CLASS. */
1675aa0a 614enum reg_class
85c84d5c 615m32c_regno_reg_class (int regno)
616{
617 switch (regno)
618 {
619 case R0_REGNO:
620 return R0_REGS;
621 case R1_REGNO:
622 return R1_REGS;
623 case R2_REGNO:
624 return R2_REGS;
625 case R3_REGNO:
626 return R3_REGS;
627 case A0_REGNO:
1facaf0d 628 return A0_REGS;
85c84d5c 629 case A1_REGNO:
1facaf0d 630 return A1_REGS;
85c84d5c 631 case SB_REGNO:
632 return SB_REGS;
633 case FB_REGNO:
634 return FB_REGS;
635 case SP_REGNO:
636 return SP_REGS;
637 case FLG_REGNO:
638 return FLG_REGS;
639 default:
640 if (IS_MEM_REGNO (regno))
641 return MEM_REGS;
642 return ALL_REGS;
643 }
644}
645
85c84d5c 646/* Implements REGNO_OK_FOR_BASE_P. */
647int
648m32c_regno_ok_for_base_p (int regno)
649{
650 if (regno == A0_REGNO
651 || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
652 return 1;
653 return 0;
654}
655
656#define DEBUG_RELOAD 0
657
7d7d4922 658/* Implements TARGET_PREFERRED_RELOAD_CLASS. In general, prefer general
85c84d5c 659 registers of the appropriate size. */
7d7d4922 660
661#undef TARGET_PREFERRED_RELOAD_CLASS
662#define TARGET_PREFERRED_RELOAD_CLASS m32c_preferred_reload_class
663
664static reg_class_t
665m32c_preferred_reload_class (rtx x, reg_class_t rclass)
85c84d5c 666{
7d7d4922 667 reg_class_t newclass = rclass;
85c84d5c 668
669#if DEBUG_RELOAD
670 fprintf (stderr, "\npreferred_reload_class for %s is ",
671 class_names[rclass]);
672#endif
673 if (rclass == NO_REGS)
674 rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
675
4cf1a89b 676 if (reg_classes_intersect_p (rclass, CR_REGS))
85c84d5c 677 {
678 switch (GET_MODE (x))
679 {
680 case QImode:
681 newclass = HL_REGS;
682 break;
683 default:
684 /* newclass = HI_REGS; */
685 break;
686 }
687 }
688
689 else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
690 newclass = SI_REGS;
691 else if (GET_MODE_SIZE (GET_MODE (x)) > 4
7d7d4922 692 && ! reg_class_subset_p (R03_REGS, rclass))
85c84d5c 693 newclass = DI_REGS;
694
695 rclass = reduce_class (rclass, newclass, rclass);
696
697 if (GET_MODE (x) == QImode)
698 rclass = reduce_class (rclass, HL_REGS, rclass);
699
700#if DEBUG_RELOAD
701 fprintf (stderr, "%s\n", class_names[rclass]);
702 debug_rtx (x);
703
704 if (GET_CODE (x) == MEM
705 && GET_CODE (XEXP (x, 0)) == PLUS
706 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
707 fprintf (stderr, "Glorm!\n");
708#endif
709 return rclass;
710}
711
7d7d4922 712/* Implements TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
713
714#undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
715#define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS m32c_preferred_output_reload_class
716
717static reg_class_t
718m32c_preferred_output_reload_class (rtx x, reg_class_t rclass)
85c84d5c 719{
720 return m32c_preferred_reload_class (x, rclass);
721}
722
723/* Implements LIMIT_RELOAD_CLASS. We basically want to avoid using
724 address registers for reloads since they're needed for address
725 reloads. */
726int
727m32c_limit_reload_class (enum machine_mode mode, int rclass)
728{
729#if DEBUG_RELOAD
730 fprintf (stderr, "limit_reload_class for %s: %s ->",
731 mode_name[mode], class_names[rclass]);
732#endif
733
734 if (mode == QImode)
735 rclass = reduce_class (rclass, HL_REGS, rclass);
736 else if (mode == HImode)
737 rclass = reduce_class (rclass, HI_REGS, rclass);
738 else if (mode == SImode)
739 rclass = reduce_class (rclass, SI_REGS, rclass);
740
741 if (rclass != A_REGS)
742 rclass = reduce_class (rclass, DI_REGS, rclass);
743
744#if DEBUG_RELOAD
745 fprintf (stderr, " %s\n", class_names[rclass]);
746#endif
747 return rclass;
748}
749
750/* Implements SECONDARY_RELOAD_CLASS. QImode have to be reloaded in
751 r0 or r1, as those are the only real QImode registers. CR regs get
752 reloaded through appropriately sized general or address
753 registers. */
754int
755m32c_secondary_reload_class (int rclass, enum machine_mode mode, rtx x)
756{
757 int cc = class_contents[rclass][0];
758#if DEBUG0
759 fprintf (stderr, "\nsecondary reload class %s %s\n",
760 class_names[rclass], mode_name[mode]);
761 debug_rtx (x);
762#endif
763 if (mode == QImode
764 && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
765 return QI_REGS;
4cf1a89b 766 if (reg_classes_intersect_p (rclass, CR_REGS)
85c84d5c 767 && GET_CODE (x) == REG
768 && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
5a4f6e8c 769 return (TARGET_A16 || mode == HImode) ? HI_REGS : A_REGS;
85c84d5c 770 return NO_REGS;
771}
772
cac9b7c7 773/* Implements TARGET_CLASS_LIKELY_SPILLED_P. A_REGS is needed for address
85c84d5c 774 reloads. */
cac9b7c7 775
776#undef TARGET_CLASS_LIKELY_SPILLED_P
777#define TARGET_CLASS_LIKELY_SPILLED_P m32c_class_likely_spilled_p
778
779static bool
780m32c_class_likely_spilled_p (reg_class_t regclass)
85c84d5c 781{
782 if (regclass == A_REGS)
cac9b7c7 783 return true;
784
785 return (reg_class_size[(int) regclass] == 1);
85c84d5c 786}
787
c3271fdb 788/* Implements TARGET_CLASS_MAX_NREGS. We calculate this according to its
85c84d5c 789 documented meaning, to avoid potential inconsistencies with actual
790 class definitions. */
c3271fdb 791
792#undef TARGET_CLASS_MAX_NREGS
793#define TARGET_CLASS_MAX_NREGS m32c_class_max_nregs
794
795static unsigned char
796m32c_class_max_nregs (reg_class_t regclass, enum machine_mode mode)
85c84d5c 797{
c3271fdb 798 int rn;
799 unsigned char max = 0;
85c84d5c 800
801 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
c3271fdb 802 if (TEST_HARD_REG_BIT (reg_class_contents[(int) regclass], rn))
85c84d5c 803 {
c3271fdb 804 unsigned char n = m32c_hard_regno_nregs (rn, mode);
85c84d5c 805 if (max < n)
806 max = n;
807 }
808 return max;
809}
810
811/* Implements CANNOT_CHANGE_MODE_CLASS. Only r0 and r1 can change to
812 QI (r0l, r1l) because the chip doesn't support QI ops on other
813 registers (well, it does on a0/a1 but if we let gcc do that, reload
814 suffers). Otherwise, we allow changes to larger modes. */
815int
816m32c_cannot_change_mode_class (enum machine_mode from,
817 enum machine_mode to, int rclass)
818{
ced4068a 819 int rn;
85c84d5c 820#if DEBUG0
821 fprintf (stderr, "cannot change from %s to %s in %s\n",
822 mode_name[from], mode_name[to], class_names[rclass]);
823#endif
824
ced4068a 825 /* If the larger mode isn't allowed in any of these registers, we
826 can't allow the change. */
827 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
828 if (class_contents[rclass][0] & (1 << rn))
829 if (! m32c_hard_regno_ok (rn, to))
830 return 1;
831
85c84d5c 832 if (to == QImode)
833 return (class_contents[rclass][0] & 0x1ffa);
834
835 if (class_contents[rclass][0] & 0x0005 /* r0, r1 */
836 && GET_MODE_SIZE (from) > 1)
837 return 0;
838 if (GET_MODE_SIZE (from) > 2) /* all other regs */
839 return 0;
840
841 return 1;
842}
843
844/* Helpers for the rest of the file. */
845/* TRUE if the rtx is a REG rtx for the given register. */
846#define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
847 && REGNO (rtx) == regno)
848/* TRUE if the rtx is a pseudo - specifically, one we can use as a
849 base register in address calculations (hence the "strict"
850 argument). */
851#define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
852 && (REGNO (rtx) == AP_REGNO \
853 || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
854
d9530df8 855#define A0_OR_PSEUDO(x) (IS_REG(x, A0_REGNO) || REGNO (x) >= FIRST_PSEUDO_REGISTER)
856
85c84d5c 857/* Implements EXTRA_CONSTRAINT_STR (see next function too). 'S' is
858 for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
859 call return values. */
4ead5e30 860bool
861m32c_matches_constraint_p (rtx value, int constraint)
85c84d5c 862{
863 encode_pattern (value);
d9530df8 864
4ead5e30 865 switch (constraint) {
866 case CONSTRAINT_SF:
867 return (far_addr_space_p (value)
868 && ((RTX_IS ("mr")
869 && A0_OR_PSEUDO (patternr[1])
870 && GET_MODE (patternr[1]) == SImode)
871 || (RTX_IS ("m+^Sri")
872 && A0_OR_PSEUDO (patternr[4])
873 && GET_MODE (patternr[4]) == HImode)
874 || (RTX_IS ("m+^Srs")
875 && A0_OR_PSEUDO (patternr[4])
876 && GET_MODE (patternr[4]) == HImode)
877 || (RTX_IS ("m+^S+ris")
878 && A0_OR_PSEUDO (patternr[5])
879 && GET_MODE (patternr[5]) == HImode)
880 || RTX_IS ("ms")));
881 case CONSTRAINT_Sd:
85c84d5c 882 {
883 /* This is the common "src/dest" address */
884 rtx r;
885 if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
4ead5e30 886 return true;
85c84d5c 887 if (RTX_IS ("ms") || RTX_IS ("m+si"))
4ead5e30 888 return true;
fedc146b 889 if (RTX_IS ("m++rii"))
890 {
891 if (REGNO (patternr[3]) == FB_REGNO
892 && INTVAL (patternr[4]) == 0)
4ead5e30 893 return true;
fedc146b 894 }
85c84d5c 895 if (RTX_IS ("mr"))
896 r = patternr[1];
897 else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
898 r = patternr[2];
899 else
4ead5e30 900 return false;
85c84d5c 901 if (REGNO (r) == SP_REGNO)
4ead5e30 902 return false;
85c84d5c 903 return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
904 }
4ead5e30 905 case CONSTRAINT_Sa:
85c84d5c 906 {
907 rtx r;
908 if (RTX_IS ("mr"))
909 r = patternr[1];
910 else if (RTX_IS ("m+ri"))
911 r = patternr[2];
912 else
4ead5e30 913 return false;
85c84d5c 914 return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
915 }
4ead5e30 916 case CONSTRAINT_Si:
917 return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
918 case CONSTRAINT_Ss:
919 return ((RTX_IS ("mr")
920 && (IS_REG (patternr[1], SP_REGNO)))
921 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
922 case CONSTRAINT_Sf:
923 return ((RTX_IS ("mr")
924 && (IS_REG (patternr[1], FB_REGNO)))
925 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
926 case CONSTRAINT_Sb:
927 return ((RTX_IS ("mr")
928 && (IS_REG (patternr[1], SB_REGNO)))
929 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
930 case CONSTRAINT_Sp:
931 /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
932 return (RTX_IS ("mi")
933 && !(INTVAL (patternr[1]) & ~0x1fff));
934 case CONSTRAINT_S1:
935 return r1h_operand (value, QImode);
936 case CONSTRAINT_Rpa:
85c84d5c 937 return GET_CODE (value) == PARALLEL;
4ead5e30 938 default:
939 return false;
940 }
85c84d5c 941}
942
943/* STACK AND CALLING */
944
945/* Frame Layout */
946
947/* Implements RETURN_ADDR_RTX. Note that R8C and M16C push 24 bits
948 (yes, THREE bytes) onto the stack for the return address, but we
949 don't support pointers bigger than 16 bits on those chips. This
950 will likely wreak havoc with exception unwinding. FIXME. */
951rtx
952m32c_return_addr_rtx (int count)
953{
954 enum machine_mode mode;
955 int offset;
956 rtx ra_mem;
957
958 if (count)
959 return NULL_RTX;
960 /* we want 2[$fb] */
961
962 if (TARGET_A24)
963 {
a8651e7d 964 /* It's four bytes */
965 mode = PSImode;
85c84d5c 966 offset = 4;
967 }
968 else
969 {
970 /* FIXME: it's really 3 bytes */
971 mode = HImode;
972 offset = 2;
973 }
974
975 ra_mem =
29c05e22 976 gen_rtx_MEM (mode, plus_constant (Pmode, gen_rtx_REG (Pmode, FP_REGNO),
977 offset));
85c84d5c 978 return copy_to_mode_reg (mode, ra_mem);
979}
980
981/* Implements INCOMING_RETURN_ADDR_RTX. See comment above. */
982rtx
983m32c_incoming_return_addr_rtx (void)
984{
985 /* we want [sp] */
986 return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
987}
988
989/* Exception Handling Support */
990
991/* Implements EH_RETURN_DATA_REGNO. Choose registers able to hold
992 pointers. */
993int
994m32c_eh_return_data_regno (int n)
995{
996 switch (n)
997 {
998 case 0:
999 return A0_REGNO;
1000 case 1:
f09e0f74 1001 if (TARGET_A16)
1002 return R3_REGNO;
1003 else
1004 return R1_REGNO;
85c84d5c 1005 default:
1006 return INVALID_REGNUM;
1007 }
1008}
1009
1010/* Implements EH_RETURN_STACKADJ_RTX. Saved and used later in
1011 m32c_emit_eh_epilogue. */
1012rtx
1013m32c_eh_return_stackadj_rtx (void)
1014{
1015 if (!cfun->machine->eh_stack_adjust)
1016 {
1017 rtx sa;
1018
afde7ac7 1019 sa = gen_rtx_REG (Pmode, R0_REGNO);
85c84d5c 1020 cfun->machine->eh_stack_adjust = sa;
1021 }
1022 return cfun->machine->eh_stack_adjust;
1023}
1024
1025/* Registers That Address the Stack Frame */
1026
1027/* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER. Note that
1028 the original spec called for dwarf numbers to vary with register
1029 width as well, for example, r0l, r0, and r2r0 would each have
1030 different dwarf numbers. GCC doesn't support this, and we don't do
1031 it, and gdb seems to like it this way anyway. */
1032unsigned int
1033m32c_dwarf_frame_regnum (int n)
1034{
1035 switch (n)
1036 {
1037 case R0_REGNO:
1038 return 5;
1039 case R1_REGNO:
1040 return 6;
1041 case R2_REGNO:
1042 return 7;
1043 case R3_REGNO:
1044 return 8;
1045 case A0_REGNO:
1046 return 9;
1047 case A1_REGNO:
1048 return 10;
1049 case FB_REGNO:
1050 return 11;
1051 case SB_REGNO:
1052 return 19;
1053
1054 case SP_REGNO:
1055 return 12;
1056 case PC_REGNO:
1057 return 13;
1058 default:
1059 return DWARF_FRAME_REGISTERS + 1;
1060 }
1061}
1062
1063/* The frame looks like this:
1064
1065 ap -> +------------------------------
1066 | Return address (3 or 4 bytes)
1067 | Saved FB (2 or 4 bytes)
1068 fb -> +------------------------------
1069 | local vars
1070 | register saves fb
1071 | through r0 as needed
1072 sp -> +------------------------------
1073*/
1074
1075/* We use this to wrap all emitted insns in the prologue. */
1076static rtx
1077F (rtx x)
1078{
1079 RTX_FRAME_RELATED_P (x) = 1;
1080 return x;
1081}
1082
1083/* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1084 how much the stack pointer moves for each, for each cpu family. */
1085static struct
1086{
1087 int reg1;
1088 int bit;
1089 int a16_bytes;
1090 int a24_bytes;
1091} pushm_info[] =
1092{
87eb9cbf 1093 /* These are in reverse push (nearest-to-sp) order. */
1094 { R0_REGNO, 0x80, 2, 2 },
85c84d5c 1095 { R1_REGNO, 0x40, 2, 2 },
87eb9cbf 1096 { R2_REGNO, 0x20, 2, 2 },
1097 { R3_REGNO, 0x10, 2, 2 },
1098 { A0_REGNO, 0x08, 2, 4 },
1099 { A1_REGNO, 0x04, 2, 4 },
1100 { SB_REGNO, 0x02, 2, 4 },
1101 { FB_REGNO, 0x01, 2, 4 }
85c84d5c 1102};
1103
1104#define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1105
1106/* Returns TRUE if we need to save/restore the given register. We
1107 save everything for exception handlers, so that any register can be
1108 unwound. For interrupt handlers, we save everything if the handler
1109 calls something else (because we don't know what *that* function
1110 might do), but try to be a bit smarter if the handler is a leaf
1111 function. We always save $a0, though, because we use that in the
c910419d 1112 epilogue to copy $fb to $sp. */
85c84d5c 1113static int
1114need_to_save (int regno)
1115{
1116 if (fixed_regs[regno])
1117 return 0;
6025a5e6 1118 if (crtl->calls_eh_return)
85c84d5c 1119 return 1;
1120 if (regno == FP_REGNO)
1121 return 0;
1122 if (cfun->machine->is_interrupt
cc24427c 1123 && (!cfun->machine->is_leaf
1124 || (regno == A0_REGNO
1125 && m32c_function_needs_enter ())
1126 ))
85c84d5c 1127 return 1;
3072d30e 1128 if (df_regs_ever_live_p (regno)
85c84d5c 1129 && (!call_used_regs[regno] || cfun->machine->is_interrupt))
1130 return 1;
1131 return 0;
1132}
1133
1134/* This function contains all the intelligence about saving and
1135 restoring registers. It always figures out the register save set.
1136 When called with PP_justcount, it merely returns the size of the
1137 save set (for eliminating the frame pointer, for example). When
1138 called with PP_pushm or PP_popm, it emits the appropriate
1139 instructions for saving (pushm) or restoring (popm) the
1140 registers. */
1141static int
1142m32c_pushm_popm (Push_Pop_Type ppt)
1143{
1144 int reg_mask = 0;
1145 int byte_count = 0, bytes;
1146 int i;
1147 rtx dwarf_set[PUSHM_N];
1148 int n_dwarfs = 0;
1149 int nosave_mask = 0;
1150
393edb51 1151 if (crtl->return_rtx
1152 && GET_CODE (crtl->return_rtx) == PARALLEL
6025a5e6 1153 && !(crtl->calls_eh_return || cfun->machine->is_interrupt))
85c84d5c 1154 {
393edb51 1155 rtx exp = XVECEXP (crtl->return_rtx, 0, 0);
85c84d5c 1156 rtx rv = XEXP (exp, 0);
1157 int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1158
1159 if (rv_bytes > 2)
1160 nosave_mask |= 0x20; /* PSI, SI */
1161 else
1162 nosave_mask |= 0xf0; /* DF */
1163 if (rv_bytes > 4)
1164 nosave_mask |= 0x50; /* DI */
1165 }
1166
1167 for (i = 0; i < (int) PUSHM_N; i++)
1168 {
1169 /* Skip if neither register needs saving. */
1170 if (!need_to_save (pushm_info[i].reg1))
1171 continue;
1172
1173 if (pushm_info[i].bit & nosave_mask)
1174 continue;
1175
1176 reg_mask |= pushm_info[i].bit;
1177 bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1178
1179 if (ppt == PP_pushm)
1180 {
1181 enum machine_mode mode = (bytes == 2) ? HImode : SImode;
1182 rtx addr;
1183
1184 /* Always use stack_pointer_rtx instead of calling
1185 rtx_gen_REG ourselves. Code elsewhere in GCC assumes
1186 that there is a single rtx representing the stack pointer,
1187 namely stack_pointer_rtx, and uses == to recognize it. */
1188 addr = stack_pointer_rtx;
1189
1190 if (byte_count != 0)
1191 addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1192
1193 dwarf_set[n_dwarfs++] =
1194 gen_rtx_SET (VOIDmode,
1195 gen_rtx_MEM (mode, addr),
1196 gen_rtx_REG (mode, pushm_info[i].reg1));
1197 F (dwarf_set[n_dwarfs - 1]);
1198
1199 }
1200 byte_count += bytes;
1201 }
1202
1203 if (cfun->machine->is_interrupt)
1204 {
1205 cfun->machine->intr_pushm = reg_mask & 0xfe;
1206 reg_mask = 0;
1207 byte_count = 0;
1208 }
1209
1210 if (cfun->machine->is_interrupt)
1211 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1212 if (need_to_save (i))
1213 {
1214 byte_count += 2;
1215 cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1216 }
1217
1218 if (ppt == PP_pushm && byte_count)
1219 {
1220 rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1221 rtx pushm;
1222
1223 if (reg_mask)
1224 {
1225 XVECEXP (note, 0, 0)
1226 = gen_rtx_SET (VOIDmode,
1227 stack_pointer_rtx,
1228 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1229 stack_pointer_rtx,
1230 GEN_INT (-byte_count)));
1231 F (XVECEXP (note, 0, 0));
1232
1233 for (i = 0; i < n_dwarfs; i++)
1234 XVECEXP (note, 0, i + 1) = dwarf_set[i];
1235
1236 pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1237
1675aa0a 1238 add_reg_note (pushm, REG_FRAME_RELATED_EXPR, note);
85c84d5c 1239 }
1240
1241 if (cfun->machine->is_interrupt)
1242 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1243 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1244 {
1245 if (TARGET_A16)
1246 pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1247 else
1248 pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1249 F (pushm);
1250 }
1251 }
1252 if (ppt == PP_popm && byte_count)
1253 {
85c84d5c 1254 if (cfun->machine->is_interrupt)
1255 for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1256 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1257 {
1258 if (TARGET_A16)
84bb0cc5 1259 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
85c84d5c 1260 else
84bb0cc5 1261 emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
85c84d5c 1262 }
1263 if (reg_mask)
1264 emit_insn (gen_popm (GEN_INT (reg_mask)));
1265 }
1266
1267 return byte_count;
1268}
1269
1270/* Implements INITIAL_ELIMINATION_OFFSET. See the comment above that
1271 diagrams our call frame. */
1272int
1273m32c_initial_elimination_offset (int from, int to)
1274{
1275 int ofs = 0;
1276
1277 if (from == AP_REGNO)
1278 {
1279 if (TARGET_A16)
1280 ofs += 5;
1281 else
1282 ofs += 8;
1283 }
1284
1285 if (to == SP_REGNO)
1286 {
1287 ofs += m32c_pushm_popm (PP_justcount);
1288 ofs += get_frame_size ();
1289 }
1290
1291 /* Account for push rounding. */
1292 if (TARGET_A24)
1293 ofs = (ofs + 1) & ~1;
1294#if DEBUG0
1295 fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1296 to, ofs);
1297#endif
1298 return ofs;
1299}
1300
1301/* Passing Function Arguments on the Stack */
1302
85c84d5c 1303/* Implements PUSH_ROUNDING. The R8C and M16C have byte stacks, the
1304 M32C has word stacks. */
1675aa0a 1305unsigned int
85c84d5c 1306m32c_push_rounding (int n)
1307{
1308 if (TARGET_R8C || TARGET_M16C)
1309 return n;
1310 return (n + 1) & ~1;
1311}
1312
1313/* Passing Arguments in Registers */
1314
8e2cc24f 1315/* Implements TARGET_FUNCTION_ARG. Arguments are passed partly in
1316 registers, partly on stack. If our function returns a struct, a
1317 pointer to a buffer for it is at the top of the stack (last thing
1318 pushed). The first few real arguments may be in registers as
1319 follows:
85c84d5c 1320
1321 R8C/M16C: arg1 in r1 if it's QI or HI (else it's pushed on stack)
1322 arg2 in r2 if it's HI (else pushed on stack)
1323 rest on stack
1324 M32C: arg1 in r0 if it's QI or HI (else it's pushed on stack)
1325 rest on stack
1326
1327 Structs are not passed in registers, even if they fit. Only
1328 integer and pointer types are passed in registers.
1329
1330 Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1331 r2 if it fits. */
8e2cc24f 1332#undef TARGET_FUNCTION_ARG
1333#define TARGET_FUNCTION_ARG m32c_function_arg
1334static rtx
39cba157 1335m32c_function_arg (cumulative_args_t ca_v,
8e2cc24f 1336 enum machine_mode mode, const_tree type, bool named)
85c84d5c 1337{
39cba157 1338 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1339
85c84d5c 1340 /* Can return a reg, parallel, or 0 for stack */
1341 rtx rv = NULL_RTX;
1342#if DEBUG0
1343 fprintf (stderr, "func_arg %d (%s, %d)\n",
1344 ca->parm_num, mode_name[mode], named);
1345 debug_tree (type);
1346#endif
1347
1348 if (mode == VOIDmode)
1349 return GEN_INT (0);
1350
1351 if (ca->force_mem || !named)
1352 {
1353#if DEBUG0
1354 fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
1355 named);
1356#endif
1357 return NULL_RTX;
1358 }
1359
1360 if (type && INTEGRAL_TYPE_P (type) && POINTER_TYPE_P (type))
1361 return NULL_RTX;
1362
87eb9cbf 1363 if (type && AGGREGATE_TYPE_P (type))
1364 return NULL_RTX;
1365
85c84d5c 1366 switch (ca->parm_num)
1367 {
1368 case 1:
1369 if (GET_MODE_SIZE (mode) == 1 || GET_MODE_SIZE (mode) == 2)
1370 rv = gen_rtx_REG (mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
1371 break;
1372
1373 case 2:
1374 if (TARGET_A16 && GET_MODE_SIZE (mode) == 2)
1375 rv = gen_rtx_REG (mode, R2_REGNO);
1376 break;
1377 }
1378
1379#if DEBUG0
1380 debug_rtx (rv);
1381#endif
1382 return rv;
1383}
1384
1385#undef TARGET_PASS_BY_REFERENCE
1386#define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1387static bool
39cba157 1388m32c_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
85c84d5c 1389 enum machine_mode mode ATTRIBUTE_UNUSED,
fb80456a 1390 const_tree type ATTRIBUTE_UNUSED,
85c84d5c 1391 bool named ATTRIBUTE_UNUSED)
1392{
1393 return 0;
1394}
1395
1396/* Implements INIT_CUMULATIVE_ARGS. */
1397void
1398m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
87eb9cbf 1399 tree fntype,
85c84d5c 1400 rtx libname ATTRIBUTE_UNUSED,
87eb9cbf 1401 tree fndecl,
85c84d5c 1402 int n_named_args ATTRIBUTE_UNUSED)
1403{
87eb9cbf 1404 if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1405 ca->force_mem = 1;
1406 else
1407 ca->force_mem = 0;
85c84d5c 1408 ca->parm_num = 1;
1409}
1410
8e2cc24f 1411/* Implements TARGET_FUNCTION_ARG_ADVANCE. force_mem is set for
1412 functions returning structures, so we always reset that. Otherwise,
1413 we only need to know the sequence number of the argument to know what
1414 to do with it. */
1415#undef TARGET_FUNCTION_ARG_ADVANCE
1416#define TARGET_FUNCTION_ARG_ADVANCE m32c_function_arg_advance
1417static void
39cba157 1418m32c_function_arg_advance (cumulative_args_t ca_v,
85c84d5c 1419 enum machine_mode mode ATTRIBUTE_UNUSED,
8e2cc24f 1420 const_tree type ATTRIBUTE_UNUSED,
1421 bool named ATTRIBUTE_UNUSED)
85c84d5c 1422{
39cba157 1423 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1424
85c84d5c 1425 if (ca->force_mem)
1426 ca->force_mem = 0;
87eb9cbf 1427 else
1428 ca->parm_num++;
85c84d5c 1429}
1430
bd99ba64 1431/* Implements TARGET_FUNCTION_ARG_BOUNDARY. */
1432#undef TARGET_FUNCTION_ARG_BOUNDARY
1433#define TARGET_FUNCTION_ARG_BOUNDARY m32c_function_arg_boundary
1434static unsigned int
1435m32c_function_arg_boundary (enum machine_mode mode ATTRIBUTE_UNUSED,
1436 const_tree type ATTRIBUTE_UNUSED)
1437{
1438 return (TARGET_A16 ? 8 : 16);
1439}
1440
85c84d5c 1441/* Implements FUNCTION_ARG_REGNO_P. */
1442int
1443m32c_function_arg_regno_p (int r)
1444{
1445 if (TARGET_A24)
1446 return (r == R0_REGNO);
1447 return (r == R1_REGNO || r == R2_REGNO);
1448}
1449
0a8d9665 1450/* HImode and PSImode are the two "native" modes as far as GCC is
c910419d 1451 concerned, but the chips also support a 32-bit mode which is used
0a8d9665 1452 for some opcodes in R8C/M16C and for reset vectors and such. */
1453#undef TARGET_VALID_POINTER_MODE
1454#define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
3fd11504 1455static bool
0a8d9665 1456m32c_valid_pointer_mode (enum machine_mode mode)
1457{
0a8d9665 1458 if (mode == HImode
1459 || mode == PSImode
1460 || mode == SImode
1461 )
1462 return 1;
1463 return 0;
1464}
1465
85c84d5c 1466/* How Scalar Function Values Are Returned */
1467
f57d8b49 1468/* Implements TARGET_LIBCALL_VALUE. Most values are returned in $r0, or some
85c84d5c 1469 combination of registers starting there (r2r0 for longs, r3r1r2r0
1470 for long long, r3r2r1r0 for doubles), except that that ABI
1471 currently doesn't work because it ends up using all available
1472 general registers and gcc often can't compile it. So, instead, we
1473 return anything bigger than 16 bits in "mem0" (effectively, a
1474 memory location). */
f57d8b49 1475
1476#undef TARGET_LIBCALL_VALUE
1477#define TARGET_LIBCALL_VALUE m32c_libcall_value
1478
1479static rtx
1480m32c_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
85c84d5c 1481{
1482 /* return reg or parallel */
1483#if 0
1484 /* FIXME: GCC has difficulty returning large values in registers,
1485 because that ties up most of the general registers and gives the
1486 register allocator little to work with. Until we can resolve
1487 this, large values are returned in memory. */
1488 if (mode == DFmode)
1489 {
1490 rtx rv;
1491
1492 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1493 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1494 gen_rtx_REG (HImode,
1495 R0_REGNO),
1496 GEN_INT (0));
1497 XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1498 gen_rtx_REG (HImode,
1499 R1_REGNO),
1500 GEN_INT (2));
1501 XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1502 gen_rtx_REG (HImode,
1503 R2_REGNO),
1504 GEN_INT (4));
1505 XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1506 gen_rtx_REG (HImode,
1507 R3_REGNO),
1508 GEN_INT (6));
1509 return rv;
1510 }
1511
1512 if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1513 {
1514 rtx rv;
1515
1516 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1517 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1518 gen_rtx_REG (mode,
1519 R0_REGNO),
1520 GEN_INT (0));
1521 return rv;
1522 }
1523#endif
1524
1525 if (GET_MODE_SIZE (mode) > 2)
1526 return gen_rtx_REG (mode, MEM0_REGNO);
1527 return gen_rtx_REG (mode, R0_REGNO);
1528}
1529
f57d8b49 1530/* Implements TARGET_FUNCTION_VALUE. Functions and libcalls have the same
85c84d5c 1531 conventions. */
f57d8b49 1532
1533#undef TARGET_FUNCTION_VALUE
1534#define TARGET_FUNCTION_VALUE m32c_function_value
1535
1536static rtx
1537m32c_function_value (const_tree valtype,
1538 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1539 bool outgoing ATTRIBUTE_UNUSED)
85c84d5c 1540{
1541 /* return reg or parallel */
fb80456a 1542 const enum machine_mode mode = TYPE_MODE (valtype);
f57d8b49 1543 return m32c_libcall_value (mode, NULL_RTX);
1544}
1545
1722522a 1546/* Implements TARGET_FUNCTION_VALUE_REGNO_P. */
1547
1548#undef TARGET_FUNCTION_VALUE_REGNO_P
1549#define TARGET_FUNCTION_VALUE_REGNO_P m32c_function_value_regno_p
f57d8b49 1550
1722522a 1551static bool
f57d8b49 1552m32c_function_value_regno_p (const unsigned int regno)
1553{
1554 return (regno == R0_REGNO || regno == MEM0_REGNO);
85c84d5c 1555}
1556
1557/* How Large Values Are Returned */
1558
1559/* We return structures by pushing the address on the stack, even if
1560 we use registers for the first few "real" arguments. */
1561#undef TARGET_STRUCT_VALUE_RTX
1562#define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1563static rtx
1564m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1565 int incoming ATTRIBUTE_UNUSED)
1566{
1567 return 0;
1568}
1569
1570/* Function Entry and Exit */
1571
1572/* Implements EPILOGUE_USES. Interrupts restore all registers. */
1573int
1574m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1575{
1576 if (cfun->machine->is_interrupt)
1577 return 1;
1578 return 0;
1579}
1580
1581/* Implementing the Varargs Macros */
1582
1583#undef TARGET_STRICT_ARGUMENT_NAMING
1584#define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1585static bool
39cba157 1586m32c_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
85c84d5c 1587{
1588 return 1;
1589}
1590
1591/* Trampolines for Nested Functions */
1592
1593/*
1594 m16c:
1595 1 0000 75C43412 mov.w #0x1234,a0
1596 2 0004 FC000000 jmp.a label
1597
1598 m32c:
1599 1 0000 BC563412 mov.l:s #0x123456,a0
1600 2 0004 CC000000 jmp.a label
1601*/
1602
1603/* Implements TRAMPOLINE_SIZE. */
1604int
1605m32c_trampoline_size (void)
1606{
1607 /* Allocate extra space so we can avoid the messy shifts when we
1608 initialize the trampoline; we just write past the end of the
1609 opcode. */
1610 return TARGET_A16 ? 8 : 10;
1611}
1612
1613/* Implements TRAMPOLINE_ALIGNMENT. */
1614int
1615m32c_trampoline_alignment (void)
1616{
1617 return 2;
1618}
1619
557e8bdb 1620/* Implements TARGET_TRAMPOLINE_INIT. */
1621
1622#undef TARGET_TRAMPOLINE_INIT
1623#define TARGET_TRAMPOLINE_INIT m32c_trampoline_init
1624static void
1625m32c_trampoline_init (rtx m_tramp, tree fndecl, rtx chainval)
85c84d5c 1626{
557e8bdb 1627 rtx function = XEXP (DECL_RTL (fndecl), 0);
1628
1629#define A0(m,i) adjust_address (m_tramp, m, i)
85c84d5c 1630 if (TARGET_A16)
1631 {
1632 /* Note: we subtract a "word" because the moves want signed
1633 constants, not unsigned constants. */
1634 emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1635 emit_move_insn (A0 (HImode, 2), chainval);
1636 emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
c910419d 1637 /* We use 16-bit addresses here, but store the zero to turn it
1638 into a 24-bit offset. */
85c84d5c 1639 emit_move_insn (A0 (HImode, 5), function);
1640 emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1641 }
1642 else
1643 {
1644 /* Note that the PSI moves actually write 4 bytes. Make sure we
1645 write stuff out in the right order, and leave room for the
1646 extra byte at the end. */
1647 emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1648 emit_move_insn (A0 (PSImode, 1), chainval);
1649 emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1650 emit_move_insn (A0 (PSImode, 5), function);
1651 }
1652#undef A0
1653}
1654
1655/* Addressing Modes */
1656
fd50b071 1657/* The r8c/m32c family supports a wide range of non-orthogonal
1658 addressing modes, including the ability to double-indirect on *some*
1659 of them. Not all insns support all modes, either, but we rely on
1660 predicates and constraints to deal with that. */
1661#undef TARGET_LEGITIMATE_ADDRESS_P
1662#define TARGET_LEGITIMATE_ADDRESS_P m32c_legitimate_address_p
1663bool
1664m32c_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
85c84d5c 1665{
1666 int mode_adjust;
1667 if (CONSTANT_P (x))
1668 return 1;
1669
d9530df8 1670 if (TARGET_A16 && GET_MODE (x) != HImode && GET_MODE (x) != SImode)
1671 return 0;
1672 if (TARGET_A24 && GET_MODE (x) != PSImode)
1673 return 0;
1674
85c84d5c 1675 /* Wide references to memory will be split after reload, so we must
1676 ensure that all parts of such splits remain legitimate
1677 addresses. */
1678 mode_adjust = GET_MODE_SIZE (mode) - 1;
1679
1680 /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1681 if (GET_CODE (x) == PRE_DEC
1682 || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1683 {
1684 return (GET_CODE (XEXP (x, 0)) == REG
1685 && REGNO (XEXP (x, 0)) == SP_REGNO);
1686 }
1687
1688#if 0
1689 /* This is the double indirection detection, but it currently
1690 doesn't work as cleanly as this code implies, so until we've had
1691 a chance to debug it, leave it disabled. */
1692 if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1693 {
1694#if DEBUG_DOUBLE
1695 fprintf (stderr, "double indirect\n");
1696#endif
1697 x = XEXP (x, 0);
1698 }
1699#endif
1700
1701 encode_pattern (x);
1702 if (RTX_IS ("r"))
1703 {
1704 /* Most indexable registers can be used without displacements,
1705 although some of them will be emitted with an explicit zero
1706 to please the assembler. */
1707 switch (REGNO (patternr[0]))
1708 {
85c84d5c 1709 case A1_REGNO:
1710 case SB_REGNO:
1711 case FB_REGNO:
1712 case SP_REGNO:
d9530df8 1713 if (TARGET_A16 && GET_MODE (x) == SImode)
1714 return 0;
1715 case A0_REGNO:
85c84d5c 1716 return 1;
1717
1718 default:
1719 if (IS_PSEUDO (patternr[0], strict))
1720 return 1;
1721 return 0;
1722 }
1723 }
d9530df8 1724
1725 if (TARGET_A16 && GET_MODE (x) == SImode)
1726 return 0;
1727
85c84d5c 1728 if (RTX_IS ("+ri"))
1729 {
1730 /* This is more interesting, because different base registers
1731 allow for different displacements - both range and signedness
1732 - and it differs from chip series to chip series too. */
1733 int rn = REGNO (patternr[1]);
1734 HOST_WIDE_INT offs = INTVAL (patternr[2]);
1735 switch (rn)
1736 {
1737 case A0_REGNO:
1738 case A1_REGNO:
1739 case SB_REGNO:
1740 /* The syntax only allows positive offsets, but when the
1741 offsets span the entire memory range, we can simulate
1742 negative offsets by wrapping. */
1743 if (TARGET_A16)
1744 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1745 if (rn == SB_REGNO)
1746 return (offs >= 0 && offs <= 65535 - mode_adjust);
1747 /* A0 or A1 */
1748 return (offs >= -16777216 && offs <= 16777215);
1749
1750 case FB_REGNO:
1751 if (TARGET_A16)
1752 return (offs >= -128 && offs <= 127 - mode_adjust);
1753 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1754
1755 case SP_REGNO:
1756 return (offs >= -128 && offs <= 127 - mode_adjust);
1757
1758 default:
1759 if (IS_PSEUDO (patternr[1], strict))
1760 return 1;
1761 return 0;
1762 }
1763 }
1764 if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1765 {
1766 rtx reg = patternr[1];
1767
1768 /* We don't know where the symbol is, so only allow base
1769 registers which support displacements spanning the whole
1770 address range. */
1771 switch (REGNO (reg))
1772 {
1773 case A0_REGNO:
1774 case A1_REGNO:
1775 /* $sb needs a secondary reload, but since it's involved in
1776 memory address reloads too, we don't deal with it very
1777 well. */
1778 /* case SB_REGNO: */
1779 return 1;
1780 default:
1781 if (IS_PSEUDO (reg, strict))
1782 return 1;
1783 return 0;
1784 }
1785 }
1786 return 0;
1787}
1788
1789/* Implements REG_OK_FOR_BASE_P. */
1790int
1791m32c_reg_ok_for_base_p (rtx x, int strict)
1792{
1793 if (GET_CODE (x) != REG)
1794 return 0;
1795 switch (REGNO (x))
1796 {
1797 case A0_REGNO:
1798 case A1_REGNO:
1799 case SB_REGNO:
1800 case FB_REGNO:
1801 case SP_REGNO:
1802 return 1;
1803 default:
1804 if (IS_PSEUDO (x, strict))
1805 return 1;
1806 return 0;
1807 }
1808}
1809
71d46ffa 1810/* We have three choices for choosing fb->aN offsets. If we choose -128,
c910419d 1811 we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
71d46ffa 1812 like this:
1813 EB 4B FF mova -128[$fb],$a0
1814 D8 0C FF FF mov.w:Q #0,-1[$a0]
1815
c910419d 1816 Alternately, we subtract the frame size, and hopefully use 8-bit aN
71d46ffa 1817 displacements:
1818 7B F4 stc $fb,$a0
1819 77 54 00 01 sub #256,$a0
1820 D8 08 01 mov.w:Q #0,1[$a0]
1821
1822 If we don't offset (i.e. offset by zero), we end up with:
1823 7B F4 stc $fb,$a0
1824 D8 0C 00 FF mov.w:Q #0,-256[$a0]
1825
1826 We have to subtract *something* so that we have a PLUS rtx to mark
1827 that we've done this reload. The -128 offset will never result in
c910419d 1828 an 8-bit aN offset, and the payoff for the second case is five
71d46ffa 1829 loads *if* those loads are within 256 bytes of the other end of the
1830 frame, so the third case seems best. Note that we subtract the
1831 zero, but detect that in the addhi3 pattern. */
1832
25fe2cca 1833#define BIG_FB_ADJ 0
1834
85c84d5c 1835/* Implements LEGITIMIZE_ADDRESS. The only address we really have to
1836 worry about is frame base offsets, as $fb has a limited
1837 displacement range. We deal with this by attempting to reload $fb
1838 itself into an address register; that seems to result in the best
1839 code. */
41e3a0c7 1840#undef TARGET_LEGITIMIZE_ADDRESS
1841#define TARGET_LEGITIMIZE_ADDRESS m32c_legitimize_address
1842static rtx
1843m32c_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1844 enum machine_mode mode)
85c84d5c 1845{
1846#if DEBUG0
1847 fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
41e3a0c7 1848 debug_rtx (x);
85c84d5c 1849 fprintf (stderr, "\n");
1850#endif
1851
41e3a0c7 1852 if (GET_CODE (x) == PLUS
1853 && GET_CODE (XEXP (x, 0)) == REG
1854 && REGNO (XEXP (x, 0)) == FB_REGNO
1855 && GET_CODE (XEXP (x, 1)) == CONST_INT
1856 && (INTVAL (XEXP (x, 1)) < -128
1857 || INTVAL (XEXP (x, 1)) > (128 - GET_MODE_SIZE (mode))))
85c84d5c 1858 {
1859 /* reload FB to A_REGS */
85c84d5c 1860 rtx temp = gen_reg_rtx (Pmode);
41e3a0c7 1861 x = copy_rtx (x);
1862 emit_insn (gen_rtx_SET (VOIDmode, temp, XEXP (x, 0)));
1863 XEXP (x, 0) = temp;
85c84d5c 1864 }
1865
41e3a0c7 1866 return x;
85c84d5c 1867}
1868
1869/* Implements LEGITIMIZE_RELOAD_ADDRESS. See comment above. */
1870int
1871m32c_legitimize_reload_address (rtx * x,
1872 enum machine_mode mode,
1873 int opnum,
1874 int type, int ind_levels ATTRIBUTE_UNUSED)
1875{
1876#if DEBUG0
1877 fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
1878 mode_name[mode]);
1879 debug_rtx (*x);
1880#endif
1881
1882 /* At one point, this function tried to get $fb copied to an address
1883 register, which in theory would maximize sharing, but gcc was
1884 *also* still trying to reload the whole address, and we'd run out
1885 of address registers. So we let gcc do the naive (but safe)
1886 reload instead, when the above function doesn't handle it for
71d46ffa 1887 us.
1888
1889 The code below is a second attempt at the above. */
1890
1891 if (GET_CODE (*x) == PLUS
1892 && GET_CODE (XEXP (*x, 0)) == REG
1893 && REGNO (XEXP (*x, 0)) == FB_REGNO
1894 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1895 && (INTVAL (XEXP (*x, 1)) < -128
1896 || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
1897 {
1898 rtx sum;
1899 int offset = INTVAL (XEXP (*x, 1));
1900 int adjustment = -BIG_FB_ADJ;
1901
1902 sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
1903 GEN_INT (adjustment));
1904 *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
1905 if (type == RELOAD_OTHER)
1906 type = RELOAD_FOR_OTHER_ADDRESS;
1907 push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
1908 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
1675aa0a 1909 (enum reload_type) type);
71d46ffa 1910 return 1;
1911 }
1912
1913 if (GET_CODE (*x) == PLUS
1914 && GET_CODE (XEXP (*x, 0)) == PLUS
1915 && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
1916 && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
1917 && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
1918 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1919 )
1920 {
1921 if (type == RELOAD_OTHER)
1922 type = RELOAD_FOR_OTHER_ADDRESS;
1923 push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
1924 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
1675aa0a 1925 (enum reload_type) type);
71d46ffa 1926 return 1;
1927 }
85c84d5c 1928
1929 return 0;
1930}
1931
d9530df8 1932/* Return the appropriate mode for a named address pointer. */
1933#undef TARGET_ADDR_SPACE_POINTER_MODE
1934#define TARGET_ADDR_SPACE_POINTER_MODE m32c_addr_space_pointer_mode
1935static enum machine_mode
1936m32c_addr_space_pointer_mode (addr_space_t addrspace)
1937{
1938 switch (addrspace)
1939 {
1940 case ADDR_SPACE_GENERIC:
1941 return TARGET_A24 ? PSImode : HImode;
1942 case ADDR_SPACE_FAR:
1943 return SImode;
1944 default:
1945 gcc_unreachable ();
1946 }
1947}
1948
1949/* Return the appropriate mode for a named address address. */
1950#undef TARGET_ADDR_SPACE_ADDRESS_MODE
1951#define TARGET_ADDR_SPACE_ADDRESS_MODE m32c_addr_space_address_mode
1952static enum machine_mode
1953m32c_addr_space_address_mode (addr_space_t addrspace)
1954{
1955 switch (addrspace)
1956 {
1957 case ADDR_SPACE_GENERIC:
1958 return TARGET_A24 ? PSImode : HImode;
1959 case ADDR_SPACE_FAR:
1960 return SImode;
1961 default:
1962 gcc_unreachable ();
1963 }
1964}
1965
1966/* Like m32c_legitimate_address_p, except with named addresses. */
1967#undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
1968#define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P \
1969 m32c_addr_space_legitimate_address_p
1970static bool
1971m32c_addr_space_legitimate_address_p (enum machine_mode mode, rtx x,
1972 bool strict, addr_space_t as)
1973{
1974 if (as == ADDR_SPACE_FAR)
1975 {
1976 if (TARGET_A24)
1977 return 0;
1978 encode_pattern (x);
1979 if (RTX_IS ("r"))
1980 {
1981 if (GET_MODE (x) != SImode)
1982 return 0;
1983 switch (REGNO (patternr[0]))
1984 {
1985 case A0_REGNO:
1986 return 1;
1987
1988 default:
1989 if (IS_PSEUDO (patternr[0], strict))
1990 return 1;
1991 return 0;
1992 }
1993 }
1994 if (RTX_IS ("+^Sri"))
1995 {
1996 int rn = REGNO (patternr[3]);
1997 HOST_WIDE_INT offs = INTVAL (patternr[4]);
1998 if (GET_MODE (patternr[3]) != HImode)
1999 return 0;
2000 switch (rn)
2001 {
2002 case A0_REGNO:
2003 return (offs >= 0 && offs <= 0xfffff);
2004
2005 default:
2006 if (IS_PSEUDO (patternr[3], strict))
2007 return 1;
2008 return 0;
2009 }
2010 }
2011 if (RTX_IS ("+^Srs"))
2012 {
2013 int rn = REGNO (patternr[3]);
2014 if (GET_MODE (patternr[3]) != HImode)
2015 return 0;
2016 switch (rn)
2017 {
2018 case A0_REGNO:
2019 return 1;
2020
2021 default:
2022 if (IS_PSEUDO (patternr[3], strict))
2023 return 1;
2024 return 0;
2025 }
2026 }
2027 if (RTX_IS ("+^S+ris"))
2028 {
2029 int rn = REGNO (patternr[4]);
2030 if (GET_MODE (patternr[4]) != HImode)
2031 return 0;
2032 switch (rn)
2033 {
2034 case A0_REGNO:
2035 return 1;
2036
2037 default:
2038 if (IS_PSEUDO (patternr[4], strict))
2039 return 1;
2040 return 0;
2041 }
2042 }
2043 if (RTX_IS ("s"))
2044 {
2045 return 1;
2046 }
2047 return 0;
2048 }
2049
2050 else if (as != ADDR_SPACE_GENERIC)
2051 gcc_unreachable ();
2052
2053 return m32c_legitimate_address_p (mode, x, strict);
2054}
2055
2056/* Like m32c_legitimate_address, except with named address support. */
2057#undef TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS
2058#define TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS m32c_addr_space_legitimize_address
2059static rtx
2060m32c_addr_space_legitimize_address (rtx x, rtx oldx, enum machine_mode mode,
2061 addr_space_t as)
2062{
2063 if (as != ADDR_SPACE_GENERIC)
2064 {
2065#if DEBUG0
2066 fprintf (stderr, "\033[36mm32c_addr_space_legitimize_address for mode %s\033[0m\n", mode_name[mode]);
2067 debug_rtx (x);
2068 fprintf (stderr, "\n");
2069#endif
2070
2071 if (GET_CODE (x) != REG)
2072 {
2073 x = force_reg (SImode, x);
2074 }
2075 return x;
2076 }
2077
2078 return m32c_legitimize_address (x, oldx, mode);
2079}
2080
2081/* Determine if one named address space is a subset of another. */
2082#undef TARGET_ADDR_SPACE_SUBSET_P
2083#define TARGET_ADDR_SPACE_SUBSET_P m32c_addr_space_subset_p
2084static bool
2085m32c_addr_space_subset_p (addr_space_t subset, addr_space_t superset)
2086{
2087 gcc_assert (subset == ADDR_SPACE_GENERIC || subset == ADDR_SPACE_FAR);
2088 gcc_assert (superset == ADDR_SPACE_GENERIC || superset == ADDR_SPACE_FAR);
2089
2090 if (subset == superset)
2091 return true;
2092
2093 else
2094 return (subset == ADDR_SPACE_GENERIC && superset == ADDR_SPACE_FAR);
2095}
2096
2097#undef TARGET_ADDR_SPACE_CONVERT
2098#define TARGET_ADDR_SPACE_CONVERT m32c_addr_space_convert
2099/* Convert from one address space to another. */
2100static rtx
2101m32c_addr_space_convert (rtx op, tree from_type, tree to_type)
2102{
2103 addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (from_type));
2104 addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (to_type));
2105 rtx result;
2106
2107 gcc_assert (from_as == ADDR_SPACE_GENERIC || from_as == ADDR_SPACE_FAR);
2108 gcc_assert (to_as == ADDR_SPACE_GENERIC || to_as == ADDR_SPACE_FAR);
2109
2110 if (to_as == ADDR_SPACE_GENERIC && from_as == ADDR_SPACE_FAR)
2111 {
2112 /* This is unpredictable, as we're truncating off usable address
2113 bits. */
2114
2115 result = gen_reg_rtx (HImode);
2116 emit_move_insn (result, simplify_subreg (HImode, op, SImode, 0));
2117 return result;
2118 }
2119 else if (to_as == ADDR_SPACE_FAR && from_as == ADDR_SPACE_GENERIC)
2120 {
2121 /* This always works. */
2122 result = gen_reg_rtx (SImode);
2123 emit_insn (gen_zero_extendhisi2 (result, op));
2124 return result;
2125 }
2126 else
2127 gcc_unreachable ();
2128}
2129
85c84d5c 2130/* Condition Code Status */
2131
2132#undef TARGET_FIXED_CONDITION_CODE_REGS
2133#define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2134static bool
2135m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2136{
2137 *p1 = FLG_REGNO;
2138 *p2 = INVALID_REGNUM;
2139 return true;
2140}
2141
2142/* Describing Relative Costs of Operations */
2143
4cf1a89b 2144/* Implements TARGET_REGISTER_MOVE_COST. We make impossible moves
85c84d5c 2145 prohibitively expensive, like trying to put QIs in r2/r3 (there are
2146 no opcodes to do that). We also discourage use of mem* registers
2147 since they're really memory. */
4cf1a89b 2148
2149#undef TARGET_REGISTER_MOVE_COST
2150#define TARGET_REGISTER_MOVE_COST m32c_register_move_cost
2151
2152static int
2153m32c_register_move_cost (enum machine_mode mode, reg_class_t from,
2154 reg_class_t to)
85c84d5c 2155{
2156 int cost = COSTS_N_INSNS (3);
4cf1a89b 2157 HARD_REG_SET cc;
2158
2159/* FIXME: pick real values, but not 2 for now. */
2160 COPY_HARD_REG_SET (cc, reg_class_contents[(int) from]);
2161 IOR_HARD_REG_SET (cc, reg_class_contents[(int) to]);
2162
2163 if (mode == QImode
2164 && hard_reg_set_intersect_p (cc, reg_class_contents[R23_REGS]))
85c84d5c 2165 {
4cf1a89b 2166 if (hard_reg_set_subset_p (cc, reg_class_contents[R23_REGS]))
85c84d5c 2167 cost = COSTS_N_INSNS (1000);
2168 else
2169 cost = COSTS_N_INSNS (80);
2170 }
2171
2172 if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2173 cost = COSTS_N_INSNS (1000);
2174
4cf1a89b 2175 if (reg_classes_intersect_p (from, CR_REGS))
85c84d5c 2176 cost += COSTS_N_INSNS (5);
2177
4cf1a89b 2178 if (reg_classes_intersect_p (to, CR_REGS))
85c84d5c 2179 cost += COSTS_N_INSNS (5);
2180
2181 if (from == MEM_REGS || to == MEM_REGS)
2182 cost += COSTS_N_INSNS (50);
4cf1a89b 2183 else if (reg_classes_intersect_p (from, MEM_REGS)
2184 || reg_classes_intersect_p (to, MEM_REGS))
85c84d5c 2185 cost += COSTS_N_INSNS (10);
2186
2187#if DEBUG0
2188 fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
4cf1a89b 2189 mode_name[mode], class_names[(int) from], class_names[(int) to],
2190 cost);
85c84d5c 2191#endif
2192 return cost;
2193}
2194
4cf1a89b 2195/* Implements TARGET_MEMORY_MOVE_COST. */
2196
2197#undef TARGET_MEMORY_MOVE_COST
2198#define TARGET_MEMORY_MOVE_COST m32c_memory_move_cost
2199
2200static int
85c84d5c 2201m32c_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
4cf1a89b 2202 reg_class_t rclass ATTRIBUTE_UNUSED,
2203 bool in ATTRIBUTE_UNUSED)
85c84d5c 2204{
2205 /* FIXME: pick real values. */
2206 return COSTS_N_INSNS (10);
2207}
2208
fedc146b 2209/* Here we try to describe when we use multiple opcodes for one RTX so
2210 that gcc knows when to use them. */
2211#undef TARGET_RTX_COSTS
2212#define TARGET_RTX_COSTS m32c_rtx_costs
2213static bool
20d892d1 2214m32c_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2215 int *total, bool speed ATTRIBUTE_UNUSED)
fedc146b 2216{
2217 switch (code)
2218 {
2219 case REG:
2220 if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2221 *total += COSTS_N_INSNS (500);
2222 else
2223 *total += COSTS_N_INSNS (1);
2224 return true;
2225
2226 case ASHIFT:
2227 case LSHIFTRT:
2228 case ASHIFTRT:
2229 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2230 {
2231 /* mov.b r1l, r1h */
2232 *total += COSTS_N_INSNS (1);
2233 return true;
2234 }
2235 if (INTVAL (XEXP (x, 1)) > 8
2236 || INTVAL (XEXP (x, 1)) < -8)
2237 {
2238 /* mov.b #N, r1l */
2239 /* mov.b r1l, r1h */
2240 *total += COSTS_N_INSNS (2);
2241 return true;
2242 }
2243 return true;
2244
2245 case LE:
2246 case LEU:
2247 case LT:
2248 case LTU:
2249 case GT:
2250 case GTU:
2251 case GE:
2252 case GEU:
2253 case NE:
2254 case EQ:
2255 if (outer_code == SET)
2256 {
2257 *total += COSTS_N_INSNS (2);
2258 return true;
2259 }
2260 break;
2261
2262 case ZERO_EXTRACT:
2263 {
2264 rtx dest = XEXP (x, 0);
2265 rtx addr = XEXP (dest, 0);
2266 switch (GET_CODE (addr))
2267 {
2268 case CONST_INT:
2269 *total += COSTS_N_INSNS (1);
2270 break;
2271 case SYMBOL_REF:
2272 *total += COSTS_N_INSNS (3);
2273 break;
2274 default:
2275 *total += COSTS_N_INSNS (2);
2276 break;
2277 }
2278 return true;
2279 }
2280 break;
2281
2282 default:
2283 /* Reasonable default. */
2284 if (TARGET_A16 && GET_MODE(x) == SImode)
2285 *total += COSTS_N_INSNS (2);
2286 break;
2287 }
2288 return false;
2289}
2290
2291#undef TARGET_ADDRESS_COST
2292#define TARGET_ADDRESS_COST m32c_address_cost
2293static int
d9c5e5f4 2294m32c_address_cost (rtx addr, enum machine_mode mode ATTRIBUTE_UNUSED,
2295 addr_space_t as ATTRIBUTE_UNUSED,
2296 bool speed ATTRIBUTE_UNUSED)
fedc146b 2297{
a8651e7d 2298 int i;
fedc146b 2299 /* fprintf(stderr, "\naddress_cost\n");
2300 debug_rtx(addr);*/
2301 switch (GET_CODE (addr))
2302 {
2303 case CONST_INT:
a8651e7d 2304 i = INTVAL (addr);
2305 if (i == 0)
2306 return COSTS_N_INSNS(1);
2307 if (0 < i && i <= 255)
2308 return COSTS_N_INSNS(2);
2309 if (0 < i && i <= 65535)
2310 return COSTS_N_INSNS(3);
2311 return COSTS_N_INSNS(4);
fedc146b 2312 case SYMBOL_REF:
a8651e7d 2313 return COSTS_N_INSNS(4);
fedc146b 2314 case REG:
a8651e7d 2315 return COSTS_N_INSNS(1);
2316 case PLUS:
2317 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
2318 {
2319 i = INTVAL (XEXP (addr, 1));
2320 if (i == 0)
2321 return COSTS_N_INSNS(1);
2322 if (0 < i && i <= 255)
2323 return COSTS_N_INSNS(2);
2324 if (0 < i && i <= 65535)
2325 return COSTS_N_INSNS(3);
2326 }
2327 return COSTS_N_INSNS(4);
fedc146b 2328 default:
2329 return 0;
2330 }
2331}
2332
85c84d5c 2333/* Defining the Output Assembler Language */
2334
85c84d5c 2335/* Output of Data */
2336
2337/* We may have 24 bit sizes, which is the native address size.
2338 Currently unused, but provided for completeness. */
2339#undef TARGET_ASM_INTEGER
2340#define TARGET_ASM_INTEGER m32c_asm_integer
2341static bool
2342m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2343{
2344 switch (size)
2345 {
2346 case 3:
2347 fprintf (asm_out_file, "\t.3byte\t");
2348 output_addr_const (asm_out_file, x);
2349 fputc ('\n', asm_out_file);
2350 return true;
0a8d9665 2351 case 4:
2352 if (GET_CODE (x) == SYMBOL_REF)
2353 {
2354 fprintf (asm_out_file, "\t.long\t");
2355 output_addr_const (asm_out_file, x);
2356 fputc ('\n', asm_out_file);
2357 return true;
2358 }
2359 break;
85c84d5c 2360 }
2361 return default_assemble_integer (x, size, aligned_p);
2362}
2363
2364/* Output of Assembler Instructions */
2365
23943319 2366/* We use a lookup table because the addressing modes are non-orthogonal. */
85c84d5c 2367
2368static struct
2369{
2370 char code;
2371 char const *pattern;
2372 char const *format;
2373}
2374const conversions[] = {
2375 { 0, "r", "0" },
2376
2377 { 0, "mr", "z[1]" },
2378 { 0, "m+ri", "3[2]" },
2379 { 0, "m+rs", "3[2]" },
d9530df8 2380 { 0, "m+^Zrs", "5[4]" },
2381 { 0, "m+^Zri", "5[4]" },
2382 { 0, "m+^Z+ris", "7+6[5]" },
2383 { 0, "m+^Srs", "5[4]" },
2384 { 0, "m+^Sri", "5[4]" },
2385 { 0, "m+^S+ris", "7+6[5]" },
85c84d5c 2386 { 0, "m+r+si", "4+5[2]" },
2387 { 0, "ms", "1" },
2388 { 0, "mi", "1" },
2389 { 0, "m+si", "2+3" },
2390
2391 { 0, "mmr", "[z[2]]" },
2392 { 0, "mm+ri", "[4[3]]" },
2393 { 0, "mm+rs", "[4[3]]" },
2394 { 0, "mm+r+si", "[5+6[3]]" },
2395 { 0, "mms", "[[2]]" },
2396 { 0, "mmi", "[[2]]" },
2397 { 0, "mm+si", "[4[3]]" },
2398
2399 { 0, "i", "#0" },
2400 { 0, "s", "#0" },
2401 { 0, "+si", "#1+2" },
2402 { 0, "l", "#0" },
2403
2404 { 'l', "l", "0" },
2405 { 'd', "i", "0" },
2406 { 'd', "s", "0" },
2407 { 'd', "+si", "1+2" },
2408 { 'D', "i", "0" },
2409 { 'D', "s", "0" },
2410 { 'D', "+si", "1+2" },
2411 { 'x', "i", "#0" },
2412 { 'X', "i", "#0" },
2413 { 'm', "i", "#0" },
2414 { 'b', "i", "#0" },
fedc146b 2415 { 'B', "i", "0" },
85c84d5c 2416 { 'p', "i", "0" },
2417
2418 { 0, 0, 0 }
2419};
2420
2421/* This is in order according to the bitfield that pushm/popm use. */
2422static char const *pushm_regs[] = {
2423 "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2424};
2425
b9e1ef49 2426/* Implements TARGET_PRINT_OPERAND. */
2427
2428#undef TARGET_PRINT_OPERAND
2429#define TARGET_PRINT_OPERAND m32c_print_operand
2430
2431static void
85c84d5c 2432m32c_print_operand (FILE * file, rtx x, int code)
2433{
2434 int i, j, b;
2435 const char *comma;
2436 HOST_WIDE_INT ival;
2437 int unsigned_const = 0;
54536dfe 2438 int force_sign;
85c84d5c 2439
2440 /* Multiplies; constants are converted to sign-extended format but
2441 we need unsigned, so 'u' and 'U' tell us what size unsigned we
2442 need. */
2443 if (code == 'u')
2444 {
2445 unsigned_const = 2;
2446 code = 0;
2447 }
2448 if (code == 'U')
2449 {
2450 unsigned_const = 1;
2451 code = 0;
2452 }
2453 /* This one is only for debugging; you can put it in a pattern to
2454 force this error. */
2455 if (code == '!')
2456 {
2457 fprintf (stderr, "dj: unreviewed pattern:");
2458 if (current_output_insn)
2459 debug_rtx (current_output_insn);
2460 gcc_unreachable ();
2461 }
2462 /* PSImode operations are either .w or .l depending on the target. */
2463 if (code == '&')
2464 {
2465 if (TARGET_A16)
2466 fprintf (file, "w");
2467 else
2468 fprintf (file, "l");
2469 return;
2470 }
2471 /* Inverted conditionals. */
2472 if (code == 'C')
2473 {
2474 switch (GET_CODE (x))
2475 {
2476 case LE:
2477 fputs ("gt", file);
2478 break;
2479 case LEU:
2480 fputs ("gtu", file);
2481 break;
2482 case LT:
2483 fputs ("ge", file);
2484 break;
2485 case LTU:
2486 fputs ("geu", file);
2487 break;
2488 case GT:
2489 fputs ("le", file);
2490 break;
2491 case GTU:
2492 fputs ("leu", file);
2493 break;
2494 case GE:
2495 fputs ("lt", file);
2496 break;
2497 case GEU:
2498 fputs ("ltu", file);
2499 break;
2500 case NE:
2501 fputs ("eq", file);
2502 break;
2503 case EQ:
2504 fputs ("ne", file);
2505 break;
2506 default:
2507 gcc_unreachable ();
2508 }
2509 return;
2510 }
2511 /* Regular conditionals. */
2512 if (code == 'c')
2513 {
2514 switch (GET_CODE (x))
2515 {
2516 case LE:
2517 fputs ("le", file);
2518 break;
2519 case LEU:
2520 fputs ("leu", file);
2521 break;
2522 case LT:
2523 fputs ("lt", file);
2524 break;
2525 case LTU:
2526 fputs ("ltu", file);
2527 break;
2528 case GT:
2529 fputs ("gt", file);
2530 break;
2531 case GTU:
2532 fputs ("gtu", file);
2533 break;
2534 case GE:
2535 fputs ("ge", file);
2536 break;
2537 case GEU:
2538 fputs ("geu", file);
2539 break;
2540 case NE:
2541 fputs ("ne", file);
2542 break;
2543 case EQ:
2544 fputs ("eq", file);
2545 break;
2546 default:
2547 gcc_unreachable ();
2548 }
2549 return;
2550 }
2551 /* Used in negsi2 to do HImode ops on the two parts of an SImode
2552 operand. */
2553 if (code == 'h' && GET_MODE (x) == SImode)
2554 {
2555 x = m32c_subreg (HImode, x, SImode, 0);
2556 code = 0;
2557 }
2558 if (code == 'H' && GET_MODE (x) == SImode)
2559 {
2560 x = m32c_subreg (HImode, x, SImode, 2);
2561 code = 0;
2562 }
fedc146b 2563 if (code == 'h' && GET_MODE (x) == HImode)
2564 {
2565 x = m32c_subreg (QImode, x, HImode, 0);
2566 code = 0;
2567 }
2568 if (code == 'H' && GET_MODE (x) == HImode)
2569 {
2570 /* We can't actually represent this as an rtx. Do it here. */
2571 if (GET_CODE (x) == REG)
2572 {
2573 switch (REGNO (x))
2574 {
2575 case R0_REGNO:
2576 fputs ("r0h", file);
2577 return;
2578 case R1_REGNO:
2579 fputs ("r1h", file);
2580 return;
2581 default:
2582 gcc_unreachable();
2583 }
2584 }
2585 /* This should be a MEM. */
2586 x = m32c_subreg (QImode, x, HImode, 1);
2587 code = 0;
2588 }
2589 /* This is for BMcond, which always wants word register names. */
2590 if (code == 'h' && GET_MODE (x) == QImode)
2591 {
2592 if (GET_CODE (x) == REG)
2593 x = gen_rtx_REG (HImode, REGNO (x));
2594 code = 0;
2595 }
85c84d5c 2596 /* 'x' and 'X' need to be ignored for non-immediates. */
2597 if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2598 code = 0;
2599
2600 encode_pattern (x);
54536dfe 2601 force_sign = 0;
85c84d5c 2602 for (i = 0; conversions[i].pattern; i++)
2603 if (conversions[i].code == code
2604 && streq (conversions[i].pattern, pattern))
2605 {
2606 for (j = 0; conversions[i].format[j]; j++)
2607 /* backslash quotes the next character in the output pattern. */
2608 if (conversions[i].format[j] == '\\')
2609 {
2610 fputc (conversions[i].format[j + 1], file);
2611 j++;
2612 }
2613 /* Digits in the output pattern indicate that the
2614 corresponding RTX is to be output at that point. */
2615 else if (ISDIGIT (conversions[i].format[j]))
2616 {
2617 rtx r = patternr[conversions[i].format[j] - '0'];
2618 switch (GET_CODE (r))
2619 {
2620 case REG:
2621 fprintf (file, "%s",
2622 reg_name_with_mode (REGNO (r), GET_MODE (r)));
2623 break;
2624 case CONST_INT:
2625 switch (code)
2626 {
2627 case 'b':
fedc146b 2628 case 'B':
2629 {
2630 int v = INTVAL (r);
2631 int i = (int) exact_log2 (v);
2632 if (i == -1)
2633 i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2634 if (i == -1)
2635 i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2636 /* Bit position. */
2637 fprintf (file, "%d", i);
2638 }
85c84d5c 2639 break;
2640 case 'x':
2641 /* Unsigned byte. */
2642 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2643 INTVAL (r) & 0xff);
2644 break;
2645 case 'X':
2646 /* Unsigned word. */
2647 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2648 INTVAL (r) & 0xffff);
2649 break;
2650 case 'p':
2651 /* pushm and popm encode a register set into a single byte. */
2652 comma = "";
2653 for (b = 7; b >= 0; b--)
2654 if (INTVAL (r) & (1 << b))
2655 {
2656 fprintf (file, "%s%s", comma, pushm_regs[b]);
2657 comma = ",";
2658 }
2659 break;
2660 case 'm':
2661 /* "Minus". Output -X */
2662 ival = (-INTVAL (r) & 0xffff);
2663 if (ival & 0x8000)
2664 ival = ival - 0x10000;
2665 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2666 break;
2667 default:
2668 ival = INTVAL (r);
2669 if (conversions[i].format[j + 1] == '[' && ival < 0)
2670 {
2671 /* We can simulate negative displacements by
2672 taking advantage of address space
2673 wrapping when the offset can span the
2674 entire address range. */
2675 rtx base =
2676 patternr[conversions[i].format[j + 2] - '0'];
2677 if (GET_CODE (base) == REG)
2678 switch (REGNO (base))
2679 {
2680 case A0_REGNO:
2681 case A1_REGNO:
2682 if (TARGET_A24)
2683 ival = 0x1000000 + ival;
2684 else
2685 ival = 0x10000 + ival;
2686 break;
2687 case SB_REGNO:
2688 if (TARGET_A16)
2689 ival = 0x10000 + ival;
2690 break;
2691 }
2692 }
2693 else if (code == 'd' && ival < 0 && j == 0)
2694 /* The "mova" opcode is used to do addition by
2695 computing displacements, but again, we need
2696 displacements to be unsigned *if* they're
2697 the only component of the displacement
2698 (i.e. no "symbol-4" type displacement). */
2699 ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2700
2701 if (conversions[i].format[j] == '0')
2702 {
2703 /* More conversions to unsigned. */
2704 if (unsigned_const == 2)
2705 ival &= 0xffff;
2706 if (unsigned_const == 1)
2707 ival &= 0xff;
2708 }
2709 if (streq (conversions[i].pattern, "mi")
2710 || streq (conversions[i].pattern, "mmi"))
2711 {
2712 /* Integers used as addresses are unsigned. */
2713 ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2714 }
54536dfe 2715 if (force_sign && ival >= 0)
2716 fputc ('+', file);
85c84d5c 2717 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2718 break;
2719 }
2720 break;
2721 case CONST_DOUBLE:
2722 /* We don't have const_double constants. If it
2723 happens, make it obvious. */
2724 fprintf (file, "[const_double 0x%lx]",
2725 (unsigned long) CONST_DOUBLE_HIGH (r));
2726 break;
2727 case SYMBOL_REF:
2728 assemble_name (file, XSTR (r, 0));
2729 break;
2730 case LABEL_REF:
2731 output_asm_label (r);
2732 break;
2733 default:
2734 fprintf (stderr, "don't know how to print this operand:");
2735 debug_rtx (r);
2736 gcc_unreachable ();
2737 }
2738 }
2739 else
2740 {
2741 if (conversions[i].format[j] == 'z')
2742 {
2743 /* Some addressing modes *must* have a displacement,
2744 so insert a zero here if needed. */
2745 int k;
2746 for (k = j + 1; conversions[i].format[k]; k++)
2747 if (ISDIGIT (conversions[i].format[k]))
2748 {
2749 rtx reg = patternr[conversions[i].format[k] - '0'];
2750 if (GET_CODE (reg) == REG
2751 && (REGNO (reg) == SB_REGNO
2752 || REGNO (reg) == FB_REGNO
2753 || REGNO (reg) == SP_REGNO))
2754 fputc ('0', file);
2755 }
2756 continue;
2757 }
2758 /* Signed displacements off symbols need to have signs
2759 blended cleanly. */
2760 if (conversions[i].format[j] == '+'
54536dfe 2761 && (!code || code == 'D' || code == 'd')
85c84d5c 2762 && ISDIGIT (conversions[i].format[j + 1])
54536dfe 2763 && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2764 == CONST_INT))
2765 {
2766 force_sign = 1;
2767 continue;
2768 }
85c84d5c 2769 fputc (conversions[i].format[j], file);
2770 }
2771 break;
2772 }
2773 if (!conversions[i].pattern)
2774 {
2775 fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
2776 pattern);
2777 debug_rtx (x);
2778 fprintf (file, "[%c.%s]", code ? code : '-', pattern);
2779 }
2780
2781 return;
2782}
2783
b9e1ef49 2784/* Implements TARGET_PRINT_OPERAND_PUNCT_VALID_P.
2785
2786 See m32c_print_operand above for descriptions of what these do. */
2787
2788#undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
2789#define TARGET_PRINT_OPERAND_PUNCT_VALID_P m32c_print_operand_punct_valid_p
2790
2791static bool
2792m32c_print_operand_punct_valid_p (unsigned char c)
85c84d5c 2793{
2794 if (c == '&' || c == '!')
b9e1ef49 2795 return true;
2796
2797 return false;
85c84d5c 2798}
2799
b9e1ef49 2800/* Implements TARGET_PRINT_OPERAND_ADDRESS. Nothing unusual here. */
2801
2802#undef TARGET_PRINT_OPERAND_ADDRESS
2803#define TARGET_PRINT_OPERAND_ADDRESS m32c_print_operand_address
2804
2805static void
85c84d5c 2806m32c_print_operand_address (FILE * stream, rtx address)
2807{
c46bf770 2808 if (GET_CODE (address) == MEM)
2809 address = XEXP (address, 0);
2810 else
2811 /* cf: gcc.dg/asm-4.c. */
2812 gcc_assert (GET_CODE (address) == REG);
2813
2814 m32c_print_operand (stream, address, 0);
85c84d5c 2815}
2816
2817/* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
2818 differently than general registers. */
2819void
2820m32c_output_reg_push (FILE * s, int regno)
2821{
2822 if (regno == FLG_REGNO)
2823 fprintf (s, "\tpushc\tflg\n");
2824 else
71d46ffa 2825 fprintf (s, "\tpush.%c\t%s\n",
85c84d5c 2826 " bwll"[reg_push_size (regno)], reg_names[regno]);
2827}
2828
2829/* Likewise for ASM_OUTPUT_REG_POP. */
2830void
2831m32c_output_reg_pop (FILE * s, int regno)
2832{
2833 if (regno == FLG_REGNO)
2834 fprintf (s, "\tpopc\tflg\n");
2835 else
71d46ffa 2836 fprintf (s, "\tpop.%c\t%s\n",
85c84d5c 2837 " bwll"[reg_push_size (regno)], reg_names[regno]);
2838}
2839
2840/* Defining target-specific uses of `__attribute__' */
2841
2842/* Used to simplify the logic below. Find the attributes wherever
2843 they may be. */
2844#define M32C_ATTRIBUTES(decl) \
2845 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2846 : DECL_ATTRIBUTES (decl) \
2847 ? (DECL_ATTRIBUTES (decl)) \
2848 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2849
2850/* Returns TRUE if the given tree has the "interrupt" attribute. */
2851static int
2852interrupt_p (tree node ATTRIBUTE_UNUSED)
2853{
2854 tree list = M32C_ATTRIBUTES (node);
2855 while (list)
2856 {
2857 if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
2858 return 1;
2859 list = TREE_CHAIN (list);
2860 }
cc24427c 2861 return fast_interrupt_p (node);
2862}
2863
2864/* Returns TRUE if the given tree has the "bank_switch" attribute. */
2865static int
2866bank_switch_p (tree node ATTRIBUTE_UNUSED)
2867{
2868 tree list = M32C_ATTRIBUTES (node);
2869 while (list)
2870 {
2871 if (is_attribute_p ("bank_switch", TREE_PURPOSE (list)))
2872 return 1;
2873 list = TREE_CHAIN (list);
2874 }
2875 return 0;
2876}
2877
2878/* Returns TRUE if the given tree has the "fast_interrupt" attribute. */
2879static int
2880fast_interrupt_p (tree node ATTRIBUTE_UNUSED)
2881{
2882 tree list = M32C_ATTRIBUTES (node);
2883 while (list)
2884 {
2885 if (is_attribute_p ("fast_interrupt", TREE_PURPOSE (list)))
2886 return 1;
2887 list = TREE_CHAIN (list);
2888 }
85c84d5c 2889 return 0;
2890}
2891
2892static tree
2893interrupt_handler (tree * node ATTRIBUTE_UNUSED,
2894 tree name ATTRIBUTE_UNUSED,
2895 tree args ATTRIBUTE_UNUSED,
2896 int flags ATTRIBUTE_UNUSED,
2897 bool * no_add_attrs ATTRIBUTE_UNUSED)
2898{
2899 return NULL_TREE;
2900}
2901
2efce110 2902/* Returns TRUE if given tree has the "function_vector" attribute. */
2903int
2904m32c_special_page_vector_p (tree func)
2905{
6276c4d1 2906 tree list;
2907
2efce110 2908 if (TREE_CODE (func) != FUNCTION_DECL)
2909 return 0;
2910
6276c4d1 2911 list = M32C_ATTRIBUTES (func);
2efce110 2912 while (list)
2913 {
2914 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2915 return 1;
2916 list = TREE_CHAIN (list);
2917 }
2918 return 0;
2919}
2920
2921static tree
2922function_vector_handler (tree * node ATTRIBUTE_UNUSED,
2923 tree name ATTRIBUTE_UNUSED,
2924 tree args ATTRIBUTE_UNUSED,
2925 int flags ATTRIBUTE_UNUSED,
2926 bool * no_add_attrs ATTRIBUTE_UNUSED)
2927{
2928 if (TARGET_R8C)
2929 {
2930 /* The attribute is not supported for R8C target. */
2931 warning (OPT_Wattributes,
67a779df 2932 "%qE attribute is not supported for R8C target",
2933 name);
2efce110 2934 *no_add_attrs = true;
2935 }
2936 else if (TREE_CODE (*node) != FUNCTION_DECL)
2937 {
2938 /* The attribute must be applied to functions only. */
2939 warning (OPT_Wattributes,
67a779df 2940 "%qE attribute applies only to functions",
2941 name);
2efce110 2942 *no_add_attrs = true;
2943 }
2944 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
2945 {
2946 /* The argument must be a constant integer. */
2947 warning (OPT_Wattributes,
67a779df 2948 "%qE attribute argument not an integer constant",
2949 name);
2efce110 2950 *no_add_attrs = true;
2951 }
2952 else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
2953 || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
2954 {
2955 /* The argument value must be between 18 to 255. */
2956 warning (OPT_Wattributes,
67a779df 2957 "%qE attribute argument should be between 18 to 255",
2958 name);
2efce110 2959 *no_add_attrs = true;
2960 }
2961 return NULL_TREE;
2962}
2963
2964/* If the function is assigned the attribute 'function_vector', it
2965 returns the function vector number, otherwise returns zero. */
2966int
2967current_function_special_page_vector (rtx x)
2968{
2969 int num;
2970
2971 if ((GET_CODE(x) == SYMBOL_REF)
2972 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
2973 {
6276c4d1 2974 tree list;
2efce110 2975 tree t = SYMBOL_REF_DECL (x);
2976
2977 if (TREE_CODE (t) != FUNCTION_DECL)
2978 return 0;
2979
6276c4d1 2980 list = M32C_ATTRIBUTES (t);
2efce110 2981 while (list)
2982 {
2983 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2984 {
2985 num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
2986 return num;
2987 }
2988
2989 list = TREE_CHAIN (list);
2990 }
2991
2992 return 0;
2993 }
2994 else
2995 return 0;
2996}
2997
85c84d5c 2998#undef TARGET_ATTRIBUTE_TABLE
2999#define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
3000static const struct attribute_spec m32c_attribute_table[] = {
ac86af5d 3001 {"interrupt", 0, 0, false, false, false, interrupt_handler, false},
3002 {"bank_switch", 0, 0, false, false, false, interrupt_handler, false},
3003 {"fast_interrupt", 0, 0, false, false, false, interrupt_handler, false},
3004 {"function_vector", 1, 1, true, false, false, function_vector_handler,
3005 false},
3006 {0, 0, 0, 0, 0, 0, 0, false}
85c84d5c 3007};
3008
3009#undef TARGET_COMP_TYPE_ATTRIBUTES
3010#define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
3011static int
a9f1838b 3012m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED,
3013 const_tree type2 ATTRIBUTE_UNUSED)
85c84d5c 3014{
3015 /* 0=incompatible 1=compatible 2=warning */
3016 return 1;
3017}
3018
3019#undef TARGET_INSERT_ATTRIBUTES
3020#define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
3021static void
3022m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
3023 tree * attr_ptr ATTRIBUTE_UNUSED)
3024{
e3d4e41e 3025 unsigned addr;
3026 /* See if we need to make #pragma address variables volatile. */
3027
3028 if (TREE_CODE (node) == VAR_DECL)
3029 {
1675aa0a 3030 const char *name = IDENTIFIER_POINTER (DECL_NAME (node));
e3d4e41e 3031 if (m32c_get_pragma_address (name, &addr))
3032 {
3033 TREE_THIS_VOLATILE (node) = true;
3034 }
3035 }
3036}
3037
3038
3039struct GTY(()) pragma_entry {
3040 const char *varname;
3041 unsigned address;
3042};
3043typedef struct pragma_entry pragma_entry;
3044
3045/* Hash table of pragma info. */
3046static GTY((param_is (pragma_entry))) htab_t pragma_htab;
3047
3048static int
3049pragma_entry_eq (const void *p1, const void *p2)
3050{
3051 const pragma_entry *old = (const pragma_entry *) p1;
3052 const char *new_name = (const char *) p2;
3053
3054 return strcmp (old->varname, new_name) == 0;
3055}
3056
3057static hashval_t
3058pragma_entry_hash (const void *p)
3059{
3060 const pragma_entry *old = (const pragma_entry *) p;
3061 return htab_hash_string (old->varname);
3062}
3063
3064void
3065m32c_note_pragma_address (const char *varname, unsigned address)
3066{
3067 pragma_entry **slot;
3068
3069 if (!pragma_htab)
3070 pragma_htab = htab_create_ggc (31, pragma_entry_hash,
3071 pragma_entry_eq, NULL);
3072
3073 slot = (pragma_entry **)
3074 htab_find_slot_with_hash (pragma_htab, varname,
3075 htab_hash_string (varname), INSERT);
3076
3077 if (!*slot)
3078 {
25a27413 3079 *slot = ggc_alloc<pragma_entry> ();
e3d4e41e 3080 (*slot)->varname = ggc_strdup (varname);
3081 }
3082 (*slot)->address = address;
3083}
3084
3085static bool
3086m32c_get_pragma_address (const char *varname, unsigned *address)
3087{
3088 pragma_entry **slot;
3089
3090 if (!pragma_htab)
3091 return false;
3092
3093 slot = (pragma_entry **)
3094 htab_find_slot_with_hash (pragma_htab, varname,
3095 htab_hash_string (varname), NO_INSERT);
3096 if (slot && *slot)
3097 {
3098 *address = (*slot)->address;
3099 return true;
3100 }
3101 return false;
3102}
3103
3104void
1675aa0a 3105m32c_output_aligned_common (FILE *stream, tree decl ATTRIBUTE_UNUSED,
3106 const char *name,
e3d4e41e 3107 int size, int align, int global)
3108{
3109 unsigned address;
3110
3111 if (m32c_get_pragma_address (name, &address))
3112 {
3113 /* We never output these as global. */
3114 assemble_name (stream, name);
3115 fprintf (stream, " = 0x%04x\n", address);
3116 return;
3117 }
3118 if (!global)
3119 {
3120 fprintf (stream, "\t.local\t");
3121 assemble_name (stream, name);
3122 fprintf (stream, "\n");
3123 }
3124 fprintf (stream, "\t.comm\t");
3125 assemble_name (stream, name);
3126 fprintf (stream, ",%u,%u\n", size, align / BITS_PER_UNIT);
85c84d5c 3127}
3128
3129/* Predicates */
3130
80be3ac5 3131/* This is a list of legal subregs of hard regs. */
89adc165 3132static const struct {
3133 unsigned char outer_mode_size;
3134 unsigned char inner_mode_size;
3135 unsigned char byte_mask;
3136 unsigned char legal_when;
80be3ac5 3137 unsigned int regno;
80be3ac5 3138} legal_subregs[] = {
89adc165 3139 {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
3140 {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
3141 {1, 2, 0x01, 1, A0_REGNO},
3142 {1, 2, 0x01, 1, A1_REGNO},
80be3ac5 3143
89adc165 3144 {1, 4, 0x01, 1, A0_REGNO},
3145 {1, 4, 0x01, 1, A1_REGNO},
80be3ac5 3146
89adc165 3147 {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
3148 {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
3149 {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
3150 {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
3151 {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
80be3ac5 3152
89adc165 3153 {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
80be3ac5 3154};
3155
3156/* Returns TRUE if OP is a subreg of a hard reg which we don't
e3d4e41e 3157 support. We also bail on MEMs with illegal addresses. */
80be3ac5 3158bool
3159m32c_illegal_subreg_p (rtx op)
3160{
80be3ac5 3161 int offset;
3162 unsigned int i;
92e46ab1 3163 enum machine_mode src_mode, dest_mode;
80be3ac5 3164
e3d4e41e 3165 if (GET_CODE (op) == MEM
3166 && ! m32c_legitimate_address_p (Pmode, XEXP (op, 0), false))
3167 {
3168 return true;
3169 }
3170
80be3ac5 3171 if (GET_CODE (op) != SUBREG)
3172 return false;
3173
3174 dest_mode = GET_MODE (op);
3175 offset = SUBREG_BYTE (op);
3176 op = SUBREG_REG (op);
3177 src_mode = GET_MODE (op);
3178
3179 if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
3180 return false;
3181 if (GET_CODE (op) != REG)
3182 return false;
3183 if (REGNO (op) >= MEM0_REGNO)
3184 return false;
3185
3186 offset = (1 << offset);
3187
89adc165 3188 for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
80be3ac5 3189 if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
3190 && legal_subregs[i].regno == REGNO (op)
3191 && legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
3192 && legal_subregs[i].byte_mask & offset)
3193 {
3194 switch (legal_subregs[i].legal_when)
3195 {
3196 case 1:
3197 return false;
3198 case 16:
3199 if (TARGET_A16)
3200 return false;
3201 break;
3202 case 24:
3203 if (TARGET_A24)
3204 return false;
3205 break;
3206 }
3207 }
3208 return true;
3209}
3210
85c84d5c 3211/* Returns TRUE if we support a move between the first two operands.
3212 At the moment, we just want to discourage mem to mem moves until
3213 after reload, because reload has a hard time with our limited
3214 number of address registers, and we can get into a situation where
3215 we need three of them when we only have two. */
3216bool
3217m32c_mov_ok (rtx * operands, enum machine_mode mode ATTRIBUTE_UNUSED)
3218{
3219 rtx op0 = operands[0];
3220 rtx op1 = operands[1];
3221
3222 if (TARGET_A24)
3223 return true;
3224
3225#define DEBUG_MOV_OK 0
3226#if DEBUG_MOV_OK
3227 fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
3228 debug_rtx (op0);
3229 debug_rtx (op1);
3230#endif
3231
3232 if (GET_CODE (op0) == SUBREG)
3233 op0 = XEXP (op0, 0);
3234 if (GET_CODE (op1) == SUBREG)
3235 op1 = XEXP (op1, 0);
3236
3237 if (GET_CODE (op0) == MEM
3238 && GET_CODE (op1) == MEM
3239 && ! reload_completed)
3240 {
3241#if DEBUG_MOV_OK
3242 fprintf (stderr, " - no, mem to mem\n");
3243#endif
3244 return false;
3245 }
3246
3247#if DEBUG_MOV_OK
3248 fprintf (stderr, " - ok\n");
3249#endif
3250 return true;
3251}
3252
54536dfe 3253/* Returns TRUE if two consecutive HImode mov instructions, generated
3254 for moving an immediate double data to a double data type variable
3255 location, can be combined into single SImode mov instruction. */
3256bool
402f6a9e 3257m32c_immd_dbl_mov (rtx * operands ATTRIBUTE_UNUSED,
54536dfe 3258 enum machine_mode mode ATTRIBUTE_UNUSED)
3259{
402f6a9e 3260 /* ??? This relied on the now-defunct MEM_SCALAR and MEM_IN_STRUCT_P
3261 flags. */
54536dfe 3262 return false;
3263}
3264
85c84d5c 3265/* Expanders */
3266
3267/* Subregs are non-orthogonal for us, because our registers are all
3268 different sizes. */
3269static rtx
3270m32c_subreg (enum machine_mode outer,
3271 rtx x, enum machine_mode inner, int byte)
3272{
3273 int r, nr = -1;
3274
3275 /* Converting MEMs to different types that are the same size, we
3276 just rewrite them. */
3277 if (GET_CODE (x) == SUBREG
3278 && SUBREG_BYTE (x) == 0
3279 && GET_CODE (SUBREG_REG (x)) == MEM
3280 && (GET_MODE_SIZE (GET_MODE (x))
3281 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
3282 {
3283 rtx oldx = x;
3284 x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
3285 MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
3286 }
3287
3288 /* Push/pop get done as smaller push/pops. */
3289 if (GET_CODE (x) == MEM
3290 && (GET_CODE (XEXP (x, 0)) == PRE_DEC
3291 || GET_CODE (XEXP (x, 0)) == POST_INC))
3292 return gen_rtx_MEM (outer, XEXP (x, 0));
3293 if (GET_CODE (x) == SUBREG
3294 && GET_CODE (XEXP (x, 0)) == MEM
3295 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
3296 || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
3297 return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
3298
3299 if (GET_CODE (x) != REG)
dbbdf510 3300 {
3301 rtx r = simplify_gen_subreg (outer, x, inner, byte);
3302 if (GET_CODE (r) == SUBREG
3303 && GET_CODE (x) == MEM
3304 && MEM_VOLATILE_P (x))
3305 {
3306 /* Volatile MEMs don't get simplified, but we need them to
3307 be. We are little endian, so the subreg byte is the
3308 offset. */
ee5533ba 3309 r = adjust_address_nv (x, outer, byte);
dbbdf510 3310 }
3311 return r;
3312 }
85c84d5c 3313
3314 r = REGNO (x);
3315 if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
3316 return simplify_gen_subreg (outer, x, inner, byte);
3317
3318 if (IS_MEM_REGNO (r))
3319 return simplify_gen_subreg (outer, x, inner, byte);
3320
3321 /* This is where the complexities of our register layout are
3322 described. */
3323 if (byte == 0)
3324 nr = r;
3325 else if (outer == HImode)
3326 {
3327 if (r == R0_REGNO && byte == 2)
3328 nr = R2_REGNO;
3329 else if (r == R0_REGNO && byte == 4)
3330 nr = R1_REGNO;
3331 else if (r == R0_REGNO && byte == 6)
3332 nr = R3_REGNO;
3333 else if (r == R1_REGNO && byte == 2)
3334 nr = R3_REGNO;
3335 else if (r == A0_REGNO && byte == 2)
3336 nr = A1_REGNO;
3337 }
3338 else if (outer == SImode)
3339 {
3340 if (r == R0_REGNO && byte == 0)
3341 nr = R0_REGNO;
3342 else if (r == R0_REGNO && byte == 4)
3343 nr = R1_REGNO;
3344 }
3345 if (nr == -1)
3346 {
3347 fprintf (stderr, "m32c_subreg %s %s %d\n",
3348 mode_name[outer], mode_name[inner], byte);
3349 debug_rtx (x);
3350 gcc_unreachable ();
3351 }
3352 return gen_rtx_REG (outer, nr);
3353}
3354
3355/* Used to emit move instructions. We split some moves,
3356 and avoid mem-mem moves. */
3357int
3358m32c_prepare_move (rtx * operands, enum machine_mode mode)
3359{
d9530df8 3360 if (far_addr_space_p (operands[0])
3361 && CONSTANT_P (operands[1]))
3362 {
3363 operands[1] = force_reg (GET_MODE (operands[0]), operands[1]);
3364 }
85c84d5c 3365 if (TARGET_A16 && mode == PSImode)
3366 return m32c_split_move (operands, mode, 1);
3367 if ((GET_CODE (operands[0]) == MEM)
3368 && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
3369 {
3370 rtx pmv = XEXP (operands[0], 0);
3371 rtx dest_reg = XEXP (pmv, 0);
3372 rtx dest_mod = XEXP (pmv, 1);
3373
3374 emit_insn (gen_rtx_SET (Pmode, dest_reg, dest_mod));
3375 operands[0] = gen_rtx_MEM (mode, dest_reg);
3376 }
e1ba4a27 3377 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
85c84d5c 3378 operands[1] = copy_to_mode_reg (mode, operands[1]);
3379 return 0;
3380}
3381
3382#define DEBUG_SPLIT 0
3383
3384/* Returns TRUE if the given PSImode move should be split. We split
3385 for all r8c/m16c moves, since it doesn't support them, and for
3386 POP.L as we can only *push* SImode. */
3387int
3388m32c_split_psi_p (rtx * operands)
3389{
3390#if DEBUG_SPLIT
3391 fprintf (stderr, "\nm32c_split_psi_p\n");
3392 debug_rtx (operands[0]);
3393 debug_rtx (operands[1]);
3394#endif
3395 if (TARGET_A16)
3396 {
3397#if DEBUG_SPLIT
3398 fprintf (stderr, "yes, A16\n");
3399#endif
3400 return 1;
3401 }
3402 if (GET_CODE (operands[1]) == MEM
3403 && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3404 {
3405#if DEBUG_SPLIT
3406 fprintf (stderr, "yes, pop.l\n");
3407#endif
3408 return 1;
3409 }
3410#if DEBUG_SPLIT
3411 fprintf (stderr, "no, default\n");
3412#endif
3413 return 0;
3414}
3415
3416/* Split the given move. SPLIT_ALL is 0 if splitting is optional
3417 (define_expand), 1 if it is not optional (define_insn_and_split),
3418 and 3 for define_split (alternate api). */
3419int
3420m32c_split_move (rtx * operands, enum machine_mode mode, int split_all)
3421{
3422 rtx s[4], d[4];
3423 int parts, si, di, rev = 0;
3424 int rv = 0, opi = 2;
3425 enum machine_mode submode = HImode;
3426 rtx *ops, local_ops[10];
3427
3428 /* define_split modifies the existing operands, but the other two
3429 emit new insns. OPS is where we store the operand pairs, which
3430 we emit later. */
3431 if (split_all == 3)
3432 ops = operands;
3433 else
3434 ops = local_ops;
3435
3436 /* Else HImode. */
3437 if (mode == DImode)
3438 submode = SImode;
3439
3440 /* Before splitting mem-mem moves, force one operand into a
3441 register. */
e1ba4a27 3442 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
85c84d5c 3443 {
3444#if DEBUG0
3445 fprintf (stderr, "force_reg...\n");
3446 debug_rtx (operands[1]);
3447#endif
3448 operands[1] = force_reg (mode, operands[1]);
3449#if DEBUG0
3450 debug_rtx (operands[1]);
3451#endif
3452 }
3453
3454 parts = 2;
3455
3456#if DEBUG_SPLIT
e1ba4a27 3457 fprintf (stderr, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3458 split_all);
85c84d5c 3459 debug_rtx (operands[0]);
3460 debug_rtx (operands[1]);
3461#endif
3462
53c07d79 3463 /* Note that split_all is not used to select the api after this
3464 point, so it's safe to set it to 3 even with define_insn. */
3465 /* None of the chips can move SI operands to sp-relative addresses,
3466 so we always split those. */
4ead5e30 3467 if (satisfies_constraint_Ss (operands[0]))
53c07d79 3468 split_all = 3;
3469
d9530df8 3470 if (TARGET_A16
3471 && (far_addr_space_p (operands[0])
3472 || far_addr_space_p (operands[1])))
3473 split_all |= 1;
3474
85c84d5c 3475 /* We don't need to split these. */
3476 if (TARGET_A24
3477 && split_all != 3
3478 && (mode == SImode || mode == PSImode)
3479 && !(GET_CODE (operands[1]) == MEM
3480 && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3481 return 0;
3482
3483 /* First, enumerate the subregs we'll be dealing with. */
3484 for (si = 0; si < parts; si++)
3485 {
3486 d[si] =
3487 m32c_subreg (submode, operands[0], mode,
3488 si * GET_MODE_SIZE (submode));
3489 s[si] =
3490 m32c_subreg (submode, operands[1], mode,
3491 si * GET_MODE_SIZE (submode));
3492 }
3493
3494 /* Split pushes by emitting a sequence of smaller pushes. */
3495 if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3496 {
3497 for (si = parts - 1; si >= 0; si--)
3498 {
3499 ops[opi++] = gen_rtx_MEM (submode,
3500 gen_rtx_PRE_DEC (Pmode,
3501 gen_rtx_REG (Pmode,
3502 SP_REGNO)));
3503 ops[opi++] = s[si];
3504 }
3505
3506 rv = 1;
3507 }
3508 /* Likewise for pops. */
3509 else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3510 {
3511 for (di = 0; di < parts; di++)
3512 {
3513 ops[opi++] = d[di];
3514 ops[opi++] = gen_rtx_MEM (submode,
3515 gen_rtx_POST_INC (Pmode,
3516 gen_rtx_REG (Pmode,
3517 SP_REGNO)));
3518 }
3519 rv = 1;
3520 }
3521 else if (split_all)
3522 {
3523 /* if d[di] == s[si] for any di < si, we'll early clobber. */
3524 for (di = 0; di < parts - 1; di++)
3525 for (si = di + 1; si < parts; si++)
3526 if (reg_mentioned_p (d[di], s[si]))
3527 rev = 1;
3528
3529 if (rev)
3530 for (si = 0; si < parts; si++)
3531 {
3532 ops[opi++] = d[si];
3533 ops[opi++] = s[si];
3534 }
3535 else
3536 for (si = parts - 1; si >= 0; si--)
3537 {
3538 ops[opi++] = d[si];
3539 ops[opi++] = s[si];
3540 }
3541 rv = 1;
3542 }
3543 /* Now emit any moves we may have accumulated. */
3544 if (rv && split_all != 3)
3545 {
3546 int i;
3547 for (i = 2; i < opi; i += 2)
3548 emit_move_insn (ops[i], ops[i + 1]);
3549 }
3550 return rv;
3551}
3552
fedc146b 3553/* The m32c has a number of opcodes that act like memcpy, strcmp, and
3554 the like. For the R8C they expect one of the addresses to be in
3555 R1L:An so we need to arrange for that. Otherwise, it's just a
3556 matter of picking out the operands we want and emitting the right
3557 pattern for them. All these expanders, which correspond to
3558 patterns in blkmov.md, must return nonzero if they expand the insn,
3559 or zero if they should FAIL. */
3560
3561/* This is a memset() opcode. All operands are implied, so we need to
3562 arrange for them to be in the right registers. The opcode wants
3563 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3564 the count (HI), and $2 the value (QI). */
3565int
3566m32c_expand_setmemhi(rtx *operands)
3567{
3568 rtx desta, count, val;
3569 rtx desto, counto;
3570
3571 desta = XEXP (operands[0], 0);
3572 count = operands[1];
3573 val = operands[2];
3574
3575 desto = gen_reg_rtx (Pmode);
3576 counto = gen_reg_rtx (HImode);
3577
3578 if (GET_CODE (desta) != REG
3579 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3580 desta = copy_to_mode_reg (Pmode, desta);
3581
3582 /* This looks like an arbitrary restriction, but this is by far the
3583 most common case. For counts 8..14 this actually results in
3584 smaller code with no speed penalty because the half-sized
3585 constant can be loaded with a shorter opcode. */
3586 if (GET_CODE (count) == CONST_INT
3587 && GET_CODE (val) == CONST_INT
3588 && ! (INTVAL (count) & 1)
3589 && (INTVAL (count) > 1)
3590 && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3591 {
3592 unsigned v = INTVAL (val) & 0xff;
3593 v = v | (v << 8);
3594 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3595 val = copy_to_mode_reg (HImode, GEN_INT (v));
3596 if (TARGET_A16)
3597 emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3598 else
3599 emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3600 return 1;
3601 }
3602
3603 /* This is the generalized memset() case. */
3604 if (GET_CODE (val) != REG
3605 || REGNO (val) < FIRST_PSEUDO_REGISTER)
3606 val = copy_to_mode_reg (QImode, val);
3607
3608 if (GET_CODE (count) != REG
3609 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3610 count = copy_to_mode_reg (HImode, count);
3611
3612 if (TARGET_A16)
3613 emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3614 else
3615 emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3616
3617 return 1;
3618}
3619
3620/* This is a memcpy() opcode. All operands are implied, so we need to
3621 arrange for them to be in the right registers. The opcode wants
3622 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3623 is the source (MEM:BLK), and $2 the count (HI). */
3624int
3625m32c_expand_movmemhi(rtx *operands)
3626{
3627 rtx desta, srca, count;
3628 rtx desto, srco, counto;
3629
3630 desta = XEXP (operands[0], 0);
3631 srca = XEXP (operands[1], 0);
3632 count = operands[2];
3633
3634 desto = gen_reg_rtx (Pmode);
3635 srco = gen_reg_rtx (Pmode);
3636 counto = gen_reg_rtx (HImode);
3637
3638 if (GET_CODE (desta) != REG
3639 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3640 desta = copy_to_mode_reg (Pmode, desta);
3641
3642 if (GET_CODE (srca) != REG
3643 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3644 srca = copy_to_mode_reg (Pmode, srca);
3645
3646 /* Similar to setmem, but we don't need to check the value. */
3647 if (GET_CODE (count) == CONST_INT
3648 && ! (INTVAL (count) & 1)
3649 && (INTVAL (count) > 1))
3650 {
3651 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3652 if (TARGET_A16)
3653 emit_insn (gen_movmemhi_whi_op (desto, srco, counto, desta, srca, count));
3654 else
3655 emit_insn (gen_movmemhi_wpsi_op (desto, srco, counto, desta, srca, count));
3656 return 1;
3657 }
3658
3659 /* This is the generalized memset() case. */
3660 if (GET_CODE (count) != REG
3661 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3662 count = copy_to_mode_reg (HImode, count);
3663
3664 if (TARGET_A16)
3665 emit_insn (gen_movmemhi_bhi_op (desto, srco, counto, desta, srca, count));
3666 else
3667 emit_insn (gen_movmemhi_bpsi_op (desto, srco, counto, desta, srca, count));
3668
3669 return 1;
3670}
3671
3672/* This is a stpcpy() opcode. $0 is the destination (MEM:BLK) after
3673 the copy, which should point to the NUL at the end of the string,
3674 $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3675 Since our opcode leaves the destination pointing *after* the NUL,
3676 we must emit an adjustment. */
3677int
3678m32c_expand_movstr(rtx *operands)
3679{
3680 rtx desta, srca;
3681 rtx desto, srco;
3682
3683 desta = XEXP (operands[1], 0);
3684 srca = XEXP (operands[2], 0);
3685
3686 desto = gen_reg_rtx (Pmode);
3687 srco = gen_reg_rtx (Pmode);
3688
3689 if (GET_CODE (desta) != REG
3690 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3691 desta = copy_to_mode_reg (Pmode, desta);
3692
3693 if (GET_CODE (srca) != REG
3694 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3695 srca = copy_to_mode_reg (Pmode, srca);
3696
3697 emit_insn (gen_movstr_op (desto, srco, desta, srca));
3698 /* desto ends up being a1, which allows this type of add through MOVA. */
3699 emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
3700
3701 return 1;
3702}
3703
3704/* This is a strcmp() opcode. $0 is the destination (HI) which holds
3705 <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3706 $2 is the other (MEM:BLK). We must do the comparison, and then
3707 convert the flags to a signed integer result. */
3708int
3709m32c_expand_cmpstr(rtx *operands)
3710{
3711 rtx src1a, src2a;
3712
3713 src1a = XEXP (operands[1], 0);
3714 src2a = XEXP (operands[2], 0);
3715
3716 if (GET_CODE (src1a) != REG
3717 || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
3718 src1a = copy_to_mode_reg (Pmode, src1a);
3719
3720 if (GET_CODE (src2a) != REG
3721 || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
3722 src2a = copy_to_mode_reg (Pmode, src2a);
3723
3724 emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
3725 emit_insn (gen_cond_to_int (operands[0]));
3726
3727 return 1;
3728}
3729
3730
3fd11504 3731typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
3732
3733static shift_gen_func
3734shift_gen_func_for (int mode, int code)
3735{
3736#define GFF(m,c,f) if (mode == m && code == c) return f
3737 GFF(QImode, ASHIFT, gen_ashlqi3_i);
3738 GFF(QImode, ASHIFTRT, gen_ashrqi3_i);
3739 GFF(QImode, LSHIFTRT, gen_lshrqi3_i);
3740 GFF(HImode, ASHIFT, gen_ashlhi3_i);
3741 GFF(HImode, ASHIFTRT, gen_ashrhi3_i);
3742 GFF(HImode, LSHIFTRT, gen_lshrhi3_i);
3743 GFF(PSImode, ASHIFT, gen_ashlpsi3_i);
3744 GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
3745 GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
3746 GFF(SImode, ASHIFT, TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
3747 GFF(SImode, ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
3748 GFF(SImode, LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
3749#undef GFF
fedc146b 3750 gcc_unreachable ();
3fd11504 3751}
3752
85c84d5c 3753/* The m32c only has one shift, but it takes a signed count. GCC
3754 doesn't want this, so we fake it by negating any shift count when
fedc146b 3755 we're pretending to shift the other way. Also, the shift count is
3756 limited to -8..8. It's slightly better to use two shifts for 9..15
3757 than to load the count into r1h, so we do that too. */
85c84d5c 3758int
3fd11504 3759m32c_prepare_shift (rtx * operands, int scale, int shift_code)
85c84d5c 3760{
3fd11504 3761 enum machine_mode mode = GET_MODE (operands[0]);
3762 shift_gen_func func = shift_gen_func_for (mode, shift_code);
85c84d5c 3763 rtx temp;
3fd11504 3764
3765 if (GET_CODE (operands[2]) == CONST_INT)
85c84d5c 3766 {
3fd11504 3767 int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
3768 int count = INTVAL (operands[2]) * scale;
3769
3770 while (count > maxc)
3771 {
3772 temp = gen_reg_rtx (mode);
3773 emit_insn (func (temp, operands[1], GEN_INT (maxc)));
3774 operands[1] = temp;
3775 count -= maxc;
3776 }
3777 while (count < -maxc)
3778 {
3779 temp = gen_reg_rtx (mode);
3780 emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
3781 operands[1] = temp;
3782 count += maxc;
3783 }
3784 emit_insn (func (operands[0], operands[1], GEN_INT (count)));
3785 return 1;
85c84d5c 3786 }
7636ec8f 3787
3788 temp = gen_reg_rtx (QImode);
85c84d5c 3789 if (scale < 0)
7636ec8f 3790 /* The pattern has a NEG that corresponds to this. */
3791 emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
3792 else if (TARGET_A16 && mode == SImode)
3793 /* We do this because the code below may modify this, we don't
3794 want to modify the origin of this value. */
3795 emit_move_insn (temp, operands[2]);
85c84d5c 3796 else
7636ec8f 3797 /* We'll only use it for the shift, no point emitting a move. */
85c84d5c 3798 temp = operands[2];
7636ec8f 3799
992bd98c 3800 if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
7636ec8f 3801 {
3802 /* The m16c has a limit of -16..16 for SI shifts, even when the
3803 shift count is in a register. Since there are so many targets
3804 of these shifts, it's better to expand the RTL here than to
3805 call a helper function.
3806
3807 The resulting code looks something like this:
3808
3809 cmp.b r1h,-16
3810 jge.b 1f
3811 shl.l -16,dest
3812 add.b r1h,16
3813 1f: cmp.b r1h,16
3814 jle.b 1f
3815 shl.l 16,dest
3816 sub.b r1h,16
3817 1f: shl.l r1h,dest
3818
3819 We take advantage of the fact that "negative" shifts are
3820 undefined to skip one of the comparisons. */
3821
3822 rtx count;
1675aa0a 3823 rtx label, insn, tempvar;
7636ec8f 3824
992bd98c 3825 emit_move_insn (operands[0], operands[1]);
3826
7636ec8f 3827 count = temp;
3828 label = gen_label_rtx ();
7636ec8f 3829 LABEL_NUSES (label) ++;
3830
db049e78 3831 tempvar = gen_reg_rtx (mode);
3832
7636ec8f 3833 if (shift_code == ASHIFT)
3834 {
3835 /* This is a left shift. We only need check positive counts. */
3836 emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
3837 count, GEN_INT (16), label));
db049e78 3838 emit_insn (func (tempvar, operands[0], GEN_INT (8)));
3839 emit_insn (func (operands[0], tempvar, GEN_INT (8)));
7636ec8f 3840 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
3841 emit_label_after (label, insn);
3842 }
3843 else
3844 {
3845 /* This is a right shift. We only need check negative counts. */
3846 emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
3847 count, GEN_INT (-16), label));
db049e78 3848 emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
3849 emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
7636ec8f 3850 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
3851 emit_label_after (label, insn);
3852 }
992bd98c 3853 operands[1] = operands[0];
3854 emit_insn (func (operands[0], operands[0], count));
3855 return 1;
7636ec8f 3856 }
3857
85c84d5c 3858 operands[2] = temp;
3859 return 0;
3860}
3861
c47b8642 3862/* The m32c has a limited range of operations that work on PSImode
3863 values; we have to expand to SI, do the math, and truncate back to
3864 PSI. Yes, this is expensive, but hopefully gcc will learn to avoid
3865 those cases. */
3866void
3867m32c_expand_neg_mulpsi3 (rtx * operands)
3868{
3869 /* operands: a = b * i */
3870 rtx temp1; /* b as SI */
fedc146b 3871 rtx scale /* i as SI */;
3872 rtx temp2; /* a*b as SI */
c47b8642 3873
3874 temp1 = gen_reg_rtx (SImode);
3875 temp2 = gen_reg_rtx (SImode);
fedc146b 3876 if (GET_CODE (operands[2]) != CONST_INT)
3877 {
3878 scale = gen_reg_rtx (SImode);
3879 emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
3880 }
3881 else
3882 scale = copy_to_mode_reg (SImode, operands[2]);
c47b8642 3883
3884 emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
fedc146b 3885 temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
3886 emit_insn (gen_truncsipsi2 (operands[0], temp2));
c47b8642 3887}
3888
85c84d5c 3889/* Pattern Output Functions */
3890
fedc146b 3891int
3892m32c_expand_movcc (rtx *operands)
3893{
3894 rtx rel = operands[1];
3d594561 3895
fedc146b 3896 if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
3897 return 1;
3898 if (GET_CODE (operands[2]) != CONST_INT
3899 || GET_CODE (operands[3]) != CONST_INT)
3900 return 1;
fedc146b 3901 if (GET_CODE (rel) == NE)
3902 {
3903 rtx tmp = operands[2];
3904 operands[2] = operands[3];
3905 operands[3] = tmp;
74f4459c 3906 rel = gen_rtx_EQ (GET_MODE (rel), XEXP (rel, 0), XEXP (rel, 1));
fedc146b 3907 }
3d594561 3908
3d594561 3909 emit_move_insn (operands[0],
3910 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
74f4459c 3911 rel,
3d594561 3912 operands[2],
3913 operands[3]));
fedc146b 3914 return 0;
3915}
3916
3917/* Used for the "insv" pattern. Return nonzero to fail, else done. */
3918int
3919m32c_expand_insv (rtx *operands)
3920{
3921 rtx op0, src0, p;
3922 int mask;
3923
3924 if (INTVAL (operands[1]) != 1)
3925 return 1;
3926
7b4716cf 3927 /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
3928 if (GET_CODE (operands[3]) != CONST_INT)
3929 return 1;
3930 if (INTVAL (operands[3]) != 0
3931 && INTVAL (operands[3]) != 1
3932 && INTVAL (operands[3]) != -1)
3933 return 1;
3934
fedc146b 3935 mask = 1 << INTVAL (operands[2]);
3936
3937 op0 = operands[0];
3938 if (GET_CODE (op0) == SUBREG
3939 && SUBREG_BYTE (op0) == 0)
3940 {
3941 rtx sub = SUBREG_REG (op0);
3942 if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
3943 op0 = sub;
3944 }
3945
e1ba4a27 3946 if (!can_create_pseudo_p ()
fedc146b 3947 || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
3948 src0 = op0;
3949 else
3950 {
3951 src0 = gen_reg_rtx (GET_MODE (op0));
3952 emit_move_insn (src0, op0);
3953 }
3954
3955 if (GET_MODE (op0) == HImode
3956 && INTVAL (operands[2]) >= 8
1675aa0a 3957 && GET_CODE (op0) == MEM)
fedc146b 3958 {
3959 /* We are little endian. */
29c05e22 3960 rtx new_mem = gen_rtx_MEM (QImode, plus_constant (Pmode,
3961 XEXP (op0, 0), 1));
fedc146b 3962 MEM_COPY_ATTRIBUTES (new_mem, op0);
3963 mask >>= 8;
3964 }
3965
e282424f 3966 /* First, we generate a mask with the correct polarity. If we are
3967 storing a zero, we want an AND mask, so invert it. */
3968 if (INTVAL (operands[3]) == 0)
fedc146b 3969 {
992bd98c 3970 /* Storing a zero, use an AND mask */
fedc146b 3971 if (GET_MODE (op0) == HImode)
3972 mask ^= 0xffff;
3973 else
3974 mask ^= 0xff;
3975 }
e282424f 3976 /* Now we need to properly sign-extend the mask in case we need to
3977 fall back to an AND or OR opcode. */
fedc146b 3978 if (GET_MODE (op0) == HImode)
3979 {
3980 if (mask & 0x8000)
3981 mask -= 0x10000;
3982 }
3983 else
3984 {
3985 if (mask & 0x80)
3986 mask -= 0x100;
3987 }
3988
3989 switch ( (INTVAL (operands[3]) ? 4 : 0)
3990 + ((GET_MODE (op0) == HImode) ? 2 : 0)
3991 + (TARGET_A24 ? 1 : 0))
3992 {
3993 case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
3994 case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
3995 case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
3996 case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
3997 case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
3998 case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
3999 case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
4000 case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
6276c4d1 4001 default: p = NULL_RTX; break; /* Not reached, but silences a warning. */
fedc146b 4002 }
4003
4004 emit_insn (p);
4005 return 0;
4006}
4007
4008const char *
4009m32c_scc_pattern(rtx *operands, RTX_CODE code)
4010{
4011 static char buf[30];
4012 if (GET_CODE (operands[0]) == REG
4013 && REGNO (operands[0]) == R0_REGNO)
4014 {
4015 if (code == EQ)
4016 return "stzx\t#1,#0,r0l";
4017 if (code == NE)
4018 return "stzx\t#0,#1,r0l";
4019 }
4020 sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
4021 return buf;
4022}
4023
2efce110 4024/* Encode symbol attributes of a SYMBOL_REF into its
4025 SYMBOL_REF_FLAGS. */
4026static void
4027m32c_encode_section_info (tree decl, rtx rtl, int first)
4028{
4029 int extra_flags = 0;
4030
4031 default_encode_section_info (decl, rtl, first);
4032 if (TREE_CODE (decl) == FUNCTION_DECL
4033 && m32c_special_page_vector_p (decl))
4034
4035 extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
4036
4037 if (extra_flags)
4038 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
4039}
4040
85c84d5c 4041/* Returns TRUE if the current function is a leaf, and thus we can
4042 determine which registers an interrupt function really needs to
4043 save. The logic below is mostly about finding the insn sequence
4044 that's the function, versus any sequence that might be open for the
4045 current insn. */
4046static int
4047m32c_leaf_function_p (void)
4048{
4049 rtx saved_first, saved_last;
4050 struct sequence_stack *seq;
4051 int rv;
4052
fd6ffb7c 4053 saved_first = crtl->emit.x_first_insn;
4054 saved_last = crtl->emit.x_last_insn;
4055 for (seq = crtl->emit.sequence_stack; seq && seq->next; seq = seq->next)
85c84d5c 4056 ;
4057 if (seq)
4058 {
fd6ffb7c 4059 crtl->emit.x_first_insn = seq->first;
4060 crtl->emit.x_last_insn = seq->last;
85c84d5c 4061 }
4062
4063 rv = leaf_function_p ();
4064
fd6ffb7c 4065 crtl->emit.x_first_insn = saved_first;
4066 crtl->emit.x_last_insn = saved_last;
85c84d5c 4067 return rv;
4068}
4069
4070/* Returns TRUE if the current function needs to use the ENTER/EXIT
4071 opcodes. If the function doesn't need the frame base or stack
4072 pointer, it can use the simpler RTS opcode. */
4073static bool
4074m32c_function_needs_enter (void)
4075{
4076 rtx insn;
4077 struct sequence_stack *seq;
4078 rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
4079 rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
4080
4081 insn = get_insns ();
fd6ffb7c 4082 for (seq = crtl->emit.sequence_stack;
85c84d5c 4083 seq;
4084 insn = seq->first, seq = seq->next);
4085
4086 while (insn)
4087 {
4088 if (reg_mentioned_p (sp, insn))
4089 return true;
4090 if (reg_mentioned_p (fb, insn))
4091 return true;
4092 insn = NEXT_INSN (insn);
4093 }
4094 return false;
4095}
4096
4097/* Mark all the subexpressions of the PARALLEL rtx PAR as
4098 frame-related. Return PAR.
4099
4100 dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
4101 PARALLEL rtx other than the first if they do not have the
4102 FRAME_RELATED flag set on them. So this function is handy for
4103 marking up 'enter' instructions. */
4104static rtx
4105m32c_all_frame_related (rtx par)
4106{
4107 int len = XVECLEN (par, 0);
4108 int i;
4109
4110 for (i = 0; i < len; i++)
4111 F (XVECEXP (par, 0, i));
4112
4113 return par;
4114}
4115
4116/* Emits the prologue. See the frame layout comment earlier in this
4117 file. We can reserve up to 256 bytes with the ENTER opcode, beyond
4118 that we manually update sp. */
4119void
4120m32c_emit_prologue (void)
4121{
4122 int frame_size, extra_frame_size = 0, reg_save_size;
4123 int complex_prologue = 0;
4124
4125 cfun->machine->is_leaf = m32c_leaf_function_p ();
4126 if (interrupt_p (cfun->decl))
4127 {
4128 cfun->machine->is_interrupt = 1;
4129 complex_prologue = 1;
4130 }
cc24427c 4131 else if (bank_switch_p (cfun->decl))
4132 warning (OPT_Wattributes,
4133 "%<bank_switch%> has no effect on non-interrupt functions");
85c84d5c 4134
4135 reg_save_size = m32c_pushm_popm (PP_justcount);
4136
4137 if (interrupt_p (cfun->decl))
cc24427c 4138 {
4139 if (bank_switch_p (cfun->decl))
4140 emit_insn (gen_fset_b ());
4141 else if (cfun->machine->intr_pushm)
4142 emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
4143 }
85c84d5c 4144
4145 frame_size =
4146 m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
4147 if (frame_size == 0
85c84d5c 4148 && !m32c_function_needs_enter ())
4149 cfun->machine->use_rts = 1;
4150
4151 if (frame_size > 254)
4152 {
4153 extra_frame_size = frame_size - 254;
4154 frame_size = 254;
4155 }
4156 if (cfun->machine->use_rts == 0)
4157 F (emit_insn (m32c_all_frame_related
4158 (TARGET_A16
97678fce 4159 ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
4160 : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
85c84d5c 4161
4162 if (extra_frame_size)
4163 {
4164 complex_prologue = 1;
4165 if (TARGET_A16)
4166 F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
4167 gen_rtx_REG (HImode, SP_REGNO),
4168 GEN_INT (-extra_frame_size))));
4169 else
4170 F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
4171 gen_rtx_REG (PSImode, SP_REGNO),
4172 GEN_INT (-extra_frame_size))));
4173 }
4174
4175 complex_prologue += m32c_pushm_popm (PP_pushm);
4176
4177 /* This just emits a comment into the .s file for debugging. */
4178 if (complex_prologue)
4179 emit_insn (gen_prologue_end ());
4180}
4181
4182/* Likewise, for the epilogue. The only exception is that, for
4183 interrupts, we must manually unwind the frame as the REIT opcode
4184 doesn't do that. */
4185void
4186m32c_emit_epilogue (void)
4187{
5dfa0f5a 4188 int popm_count = m32c_pushm_popm (PP_justcount);
4189
85c84d5c 4190 /* This just emits a comment into the .s file for debugging. */
5dfa0f5a 4191 if (popm_count > 0 || cfun->machine->is_interrupt)
85c84d5c 4192 emit_insn (gen_epilogue_start ());
4193
5dfa0f5a 4194 if (popm_count > 0)
4195 m32c_pushm_popm (PP_popm);
85c84d5c 4196
4197 if (cfun->machine->is_interrupt)
4198 {
4199 enum machine_mode spmode = TARGET_A16 ? HImode : PSImode;
4200
cc24427c 4201 /* REIT clears B flag and restores $fp for us, but we still
4202 have to fix up the stack. USE_RTS just means we didn't
4203 emit ENTER. */
4204 if (!cfun->machine->use_rts)
4205 {
4206 emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
4207 gen_rtx_REG (spmode, FP_REGNO));
4208 emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
4209 gen_rtx_REG (spmode, A0_REGNO));
4210 /* We can't just add this to the POPM because it would be in
4211 the wrong order, and wouldn't fix the stack if we're bank
4212 switching. */
4213 if (TARGET_A16)
4214 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
4215 else
4216 emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
4217 }
4218 if (!bank_switch_p (cfun->decl) && cfun->machine->intr_pushm)
4219 emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
4220
bd2fe2f1 4221 /* The FREIT (Fast REturn from InTerrupt) instruction should be
4222 generated only for M32C/M32CM targets (generate the REIT
4223 instruction otherwise). */
cc24427c 4224 if (fast_interrupt_p (cfun->decl))
bd2fe2f1 4225 {
4226 /* Check if fast_attribute is set for M32C or M32CM. */
4227 if (TARGET_A24)
4228 {
4229 emit_jump_insn (gen_epilogue_freit ());
4230 }
4231 /* If fast_interrupt attribute is set for an R8C or M16C
4232 target ignore this attribute and generated REIT
4233 instruction. */
4234 else
4235 {
4236 warning (OPT_Wattributes,
4237 "%<fast_interrupt%> attribute directive ignored");
4238 emit_jump_insn (gen_epilogue_reit_16 ());
4239 }
4240 }
cc24427c 4241 else if (TARGET_A16)
ed16d658 4242 emit_jump_insn (gen_epilogue_reit_16 ());
4243 else
4244 emit_jump_insn (gen_epilogue_reit_24 ());
85c84d5c 4245 }
4246 else if (cfun->machine->use_rts)
4247 emit_jump_insn (gen_epilogue_rts ());
ed16d658 4248 else if (TARGET_A16)
4249 emit_jump_insn (gen_epilogue_exitd_16 ());
85c84d5c 4250 else
ed16d658 4251 emit_jump_insn (gen_epilogue_exitd_24 ());
85c84d5c 4252}
4253
4254void
4255m32c_emit_eh_epilogue (rtx ret_addr)
4256{
4257 /* R0[R2] has the stack adjustment. R1[R3] has the address to
4258 return to. We have to fudge the stack, pop everything, pop SP
4259 (fudged), and return (fudged). This is actually easier to do in
4260 assembler, so punt to libgcc. */
4261 emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
18b42941 4262 /* emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
85c84d5c 4263}
4264
992bd98c 4265/* Indicate which flags must be properly set for a given conditional. */
4266static int
4267flags_needed_for_conditional (rtx cond)
4268{
4269 switch (GET_CODE (cond))
4270 {
4271 case LE:
4272 case GT:
4273 return FLAGS_OSZ;
4274 case LEU:
4275 case GTU:
4276 return FLAGS_ZC;
4277 case LT:
4278 case GE:
4279 return FLAGS_OS;
4280 case LTU:
4281 case GEU:
4282 return FLAGS_C;
4283 case EQ:
4284 case NE:
4285 return FLAGS_Z;
4286 default:
4287 return FLAGS_N;
4288 }
4289}
4290
4291#define DEBUG_CMP 0
4292
4293/* Returns true if a compare insn is redundant because it would only
4294 set flags that are already set correctly. */
4295static bool
4296m32c_compare_redundant (rtx cmp, rtx *operands)
4297{
4298 int flags_needed;
4299 int pflags;
4300 rtx prev, pp, next;
1675aa0a 4301 rtx op0, op1;
992bd98c 4302#if DEBUG_CMP
4303 int prev_icode, i;
4304#endif
4305
4306 op0 = operands[0];
4307 op1 = operands[1];
992bd98c 4308
4309#if DEBUG_CMP
4310 fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
4311 debug_rtx(cmp);
4312 for (i=0; i<2; i++)
4313 {
4314 fprintf(stderr, "operands[%d] = ", i);
4315 debug_rtx(operands[i]);
4316 }
4317#endif
4318
4319 next = next_nonnote_insn (cmp);
4320 if (!next || !INSN_P (next))
4321 {
4322#if DEBUG_CMP
4323 fprintf(stderr, "compare not followed by insn\n");
4324 debug_rtx(next);
4325#endif
4326 return false;
4327 }
4328 if (GET_CODE (PATTERN (next)) == SET
4329 && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
4330 {
4331 next = XEXP (XEXP (PATTERN (next), 1), 0);
4332 }
4333 else if (GET_CODE (PATTERN (next)) == SET)
4334 {
4335 /* If this is a conditional, flags_needed will be something
4336 other than FLAGS_N, which we test below. */
4337 next = XEXP (PATTERN (next), 1);
4338 }
4339 else
4340 {
4341#if DEBUG_CMP
4342 fprintf(stderr, "compare not followed by conditional\n");
4343 debug_rtx(next);
4344#endif
4345 return false;
4346 }
4347#if DEBUG_CMP
4348 fprintf(stderr, "conditional is: ");
4349 debug_rtx(next);
4350#endif
4351
4352 flags_needed = flags_needed_for_conditional (next);
4353 if (flags_needed == FLAGS_N)
4354 {
4355#if DEBUG_CMP
4356 fprintf(stderr, "compare not followed by conditional\n");
4357 debug_rtx(next);
4358#endif
4359 return false;
4360 }
4361
4362 /* Compare doesn't set overflow and carry the same way that
4363 arithmetic instructions do, so we can't replace those. */
4364 if (flags_needed & FLAGS_OC)
4365 return false;
4366
4367 prev = cmp;
4368 do {
4369 prev = prev_nonnote_insn (prev);
4370 if (!prev)
4371 {
4372#if DEBUG_CMP
4373 fprintf(stderr, "No previous insn.\n");
4374#endif
4375 return false;
4376 }
4377 if (!INSN_P (prev))
4378 {
4379#if DEBUG_CMP
4380 fprintf(stderr, "Previous insn is a non-insn.\n");
4381#endif
4382 return false;
4383 }
4384 pp = PATTERN (prev);
4385 if (GET_CODE (pp) != SET)
4386 {
4387#if DEBUG_CMP
4388 fprintf(stderr, "Previous insn is not a SET.\n");
4389#endif
4390 return false;
4391 }
4392 pflags = get_attr_flags (prev);
4393
4394 /* Looking up attributes of previous insns corrupted the recog
4395 tables. */
4396 INSN_UID (cmp) = -1;
4397 recog (PATTERN (cmp), cmp, 0);
4398
4399 if (pflags == FLAGS_N
4400 && reg_mentioned_p (op0, pp))
4401 {
4402#if DEBUG_CMP
4403 fprintf(stderr, "intermediate non-flags insn uses op:\n");
4404 debug_rtx(prev);
4405#endif
4406 return false;
4407 }
f3269732 4408
4409 /* Check for comparisons against memory - between volatiles and
4410 aliases, we just can't risk this one. */
4411 if (GET_CODE (operands[0]) == MEM
4412 || GET_CODE (operands[0]) == MEM)
4413 {
4414#if DEBUG_CMP
4415 fprintf(stderr, "comparisons with memory:\n");
4416 debug_rtx(prev);
4417#endif
4418 return false;
4419 }
4420
4421 /* Check for PREV changing a register that's used to compute a
4422 value in CMP, even if it doesn't otherwise change flags. */
4423 if (GET_CODE (operands[0]) == REG
4424 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[0]))
4425 {
4426#if DEBUG_CMP
4427 fprintf(stderr, "sub-value affected, op0:\n");
4428 debug_rtx(prev);
4429#endif
4430 return false;
4431 }
4432 if (GET_CODE (operands[1]) == REG
4433 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[1]))
4434 {
4435#if DEBUG_CMP
4436 fprintf(stderr, "sub-value affected, op1:\n");
4437 debug_rtx(prev);
4438#endif
4439 return false;
4440 }
4441
992bd98c 4442 } while (pflags == FLAGS_N);
4443#if DEBUG_CMP
4444 fprintf(stderr, "previous flag-setting insn:\n");
4445 debug_rtx(prev);
4446 debug_rtx(pp);
4447#endif
4448
4449 if (GET_CODE (pp) == SET
4450 && GET_CODE (XEXP (pp, 0)) == REG
4451 && REGNO (XEXP (pp, 0)) == FLG_REGNO
4452 && GET_CODE (XEXP (pp, 1)) == COMPARE)
4453 {
4454 /* Adjacent cbranches must have the same operands to be
4455 redundant. */
4456 rtx pop0 = XEXP (XEXP (pp, 1), 0);
4457 rtx pop1 = XEXP (XEXP (pp, 1), 1);
4458#if DEBUG_CMP
4459 fprintf(stderr, "adjacent cbranches\n");
4460 debug_rtx(pop0);
4461 debug_rtx(pop1);
4462#endif
4463 if (rtx_equal_p (op0, pop0)
4464 && rtx_equal_p (op1, pop1))
4465 return true;
4466#if DEBUG_CMP
4467 fprintf(stderr, "prev cmp not same\n");
4468#endif
4469 return false;
4470 }
4471
4472 /* Else the previous insn must be a SET, with either the source or
4473 dest equal to operands[0], and operands[1] must be zero. */
4474
4475 if (!rtx_equal_p (op1, const0_rtx))
4476 {
4477#if DEBUG_CMP
4478 fprintf(stderr, "operands[1] not const0_rtx\n");
4479#endif
4480 return false;
4481 }
4482 if (GET_CODE (pp) != SET)
4483 {
4484#if DEBUG_CMP
4485 fprintf (stderr, "pp not set\n");
4486#endif
4487 return false;
4488 }
4489 if (!rtx_equal_p (op0, SET_SRC (pp))
4490 && !rtx_equal_p (op0, SET_DEST (pp)))
4491 {
4492#if DEBUG_CMP
4493 fprintf(stderr, "operands[0] not found in set\n");
4494#endif
4495 return false;
4496 }
4497
4498#if DEBUG_CMP
4499 fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4500#endif
4501 if ((pflags & flags_needed) == flags_needed)
4502 return true;
4503
4504 return false;
4505}
4506
4507/* Return the pattern for a compare. This will be commented out if
4508 the compare is redundant, else a normal pattern is returned. Thus,
4509 the assembler output says where the compare would have been. */
4510char *
4511m32c_output_compare (rtx insn, rtx *operands)
4512{
8deb3959 4513 static char templ[] = ";cmp.b\t%1,%0";
992bd98c 4514 /* ^ 5 */
4515
8deb3959 4516 templ[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
992bd98c 4517 if (m32c_compare_redundant (insn, operands))
4518 {
4519#if DEBUG_CMP
4520 fprintf(stderr, "cbranch: cmp not needed\n");
4521#endif
8deb3959 4522 return templ;
992bd98c 4523 }
4524
4525#if DEBUG_CMP
f3269732 4526 fprintf(stderr, "cbranch: cmp needed: `%s'\n", templ + 1);
992bd98c 4527#endif
8deb3959 4528 return templ + 1;
992bd98c 4529}
4530
2efce110 4531#undef TARGET_ENCODE_SECTION_INFO
4532#define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4533
5a1c68c3 4534/* If the frame pointer isn't used, we detect it manually. But the
4535 stack pointer doesn't have as flexible addressing as the frame
4536 pointer, so we always assume we have it. */
4537
4538#undef TARGET_FRAME_POINTER_REQUIRED
4539#define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
4540
85c84d5c 4541/* The Global `targetm' Variable. */
4542
4543struct gcc_target targetm = TARGET_INITIALIZER;
4544
4545#include "gt-m32c.h"