]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/m32c/m32c.c
Replace printf with __builtin_printf
[thirdparty/gcc.git] / gcc / config / m32c / m32c.c
CommitLineData
38b2d076 1/* Target Code for R8C/M16C/M32C
5624e564 2 Copyright (C) 2005-2015 Free Software Foundation, Inc.
38b2d076
DD
3 Contributed by Red Hat.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
2f83c7d6 9 by the Free Software Foundation; either version 3, or (at your
38b2d076
DD
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
2f83c7d6
NC
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
38b2d076
DD
20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
c7131fb2 24#include "backend.h"
9fdcd34e 25#include "cfghooks.h"
c7131fb2
AM
26#include "tree.h"
27#include "gimple.h"
38b2d076 28#include "rtl.h"
c7131fb2 29#include "df.h"
38b2d076 30#include "regs.h"
38b2d076
DD
31#include "insn-config.h"
32#include "conditions.h"
33#include "insn-flags.h"
34#include "output.h"
35#include "insn-attr.h"
36#include "flags.h"
37#include "recog.h"
38#include "reload.h"
718f9c0f 39#include "diagnostic-core.h"
40e23961 40#include "alias.h"
40e23961 41#include "fold-const.h"
d8a2d370
DN
42#include "stor-layout.h"
43#include "varasm.h"
44#include "calls.h"
36566b39
PK
45#include "expmed.h"
46#include "dojump.h"
47#include "explow.h"
48#include "emit-rtl.h"
49#include "stmt.h"
38b2d076 50#include "expr.h"
b0710fe1 51#include "insn-codes.h"
38b2d076
DD
52#include "optabs.h"
53#include "except.h"
38b2d076 54#include "target.h"
38b2d076
DD
55#include "tm_p.h"
56#include "langhooks.h"
60393bbc
AM
57#include "cfgrtl.h"
58#include "cfganal.h"
59#include "lcm.h"
60#include "cfgbuild.h"
61#include "cfgcleanup.h"
2fb9a547
AM
62#include "internal-fn.h"
63#include "gimple-fold.h"
64#include "tree-eh.h"
03dd17b1 65#include "tm-constrs.h"
9b2b7279 66#include "builtins.h"
38b2d076 67
994c5d85 68/* This file should be included last. */
d58627a0
RS
69#include "target-def.h"
70
38b2d076
DD
71/* Prototypes */
72
73/* Used by m32c_pushm_popm. */
74typedef enum
75{
76 PP_pushm,
77 PP_popm,
78 PP_justcount
79} Push_Pop_Type;
80
65655f79 81static bool m32c_function_needs_enter (void);
38b2d076 82static tree interrupt_handler (tree *, tree, tree, int, bool *);
5abd2125 83static tree function_vector_handler (tree *, tree, tree, int, bool *);
38b2d076 84static int interrupt_p (tree node);
65655f79
DD
85static int bank_switch_p (tree node);
86static int fast_interrupt_p (tree node);
87static int interrupt_p (tree node);
38b2d076 88static bool m32c_asm_integer (rtx, unsigned int, int);
3101faab 89static int m32c_comp_type_attributes (const_tree, const_tree);
38b2d076
DD
90static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
91static struct machine_function *m32c_init_machine_status (void);
92static void m32c_insert_attributes (tree, tree *);
ef4bddc2
RS
93static bool m32c_legitimate_address_p (machine_mode, rtx, bool);
94static bool m32c_addr_space_legitimate_address_p (machine_mode, rtx, bool, addr_space_t);
95static rtx m32c_function_arg (cumulative_args_t, machine_mode,
444d6efe 96 const_tree, bool);
ef4bddc2 97static bool m32c_pass_by_reference (cumulative_args_t, machine_mode,
586de218 98 const_tree, bool);
ef4bddc2 99static void m32c_function_arg_advance (cumulative_args_t, machine_mode,
cd34bbe8 100 const_tree, bool);
ef4bddc2 101static unsigned int m32c_function_arg_boundary (machine_mode, const_tree);
38b2d076 102static int m32c_pushm_popm (Push_Pop_Type);
d5cc9181 103static bool m32c_strict_argument_naming (cumulative_args_t);
38b2d076 104static rtx m32c_struct_value_rtx (tree, int);
ef4bddc2 105static rtx m32c_subreg (machine_mode, rtx, machine_mode, int);
38b2d076 106static int need_to_save (int);
2a31793e 107static rtx m32c_function_value (const_tree, const_tree, bool);
ef4bddc2 108static rtx m32c_libcall_value (machine_mode, const_rtx);
2a31793e 109
f6052f86
DD
110/* Returns true if an address is specified, else false. */
111static bool m32c_get_pragma_address (const char *varname, unsigned *addr);
112
5abd2125 113#define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
38b2d076
DD
114
115#define streq(a,b) (strcmp ((a), (b)) == 0)
116
117/* Internal support routines */
118
119/* Debugging statements are tagged with DEBUG0 only so that they can
120 be easily enabled individually, by replacing the '0' with '1' as
121 needed. */
122#define DEBUG0 0
123#define DEBUG1 1
124
125#if DEBUG0
f75e07bc 126#include "print-tree.h"
38b2d076
DD
127/* This is needed by some of the commented-out debug statements
128 below. */
129static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
130#endif
131static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
132
133/* These are all to support encode_pattern(). */
134static char pattern[30], *patternp;
135static GTY(()) rtx patternr[30];
136#define RTX_IS(x) (streq (pattern, x))
137
138/* Some macros to simplify the logic throughout this file. */
139#define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
140#define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
141
142#define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
143#define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
144
5fd5d713
DD
145static int
146far_addr_space_p (rtx x)
147{
148 if (GET_CODE (x) != MEM)
149 return 0;
150#if DEBUG0
151 fprintf(stderr, "\033[35mfar_addr_space: "); debug_rtx(x);
152 fprintf(stderr, " = %d\033[0m\n", MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR);
153#endif
154 return MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR;
155}
156
38b2d076
DD
157/* We do most RTX matching by converting the RTX into a string, and
158 using string compares. This vastly simplifies the logic in many of
159 the functions in this file.
160
161 On exit, pattern[] has the encoded string (use RTX_IS("...") to
162 compare it) and patternr[] has pointers to the nodes in the RTX
163 corresponding to each character in the encoded string. The latter
164 is mostly used by print_operand().
165
166 Unrecognized patterns have '?' in them; this shows up when the
167 assembler complains about syntax errors.
168*/
169
170static void
171encode_pattern_1 (rtx x)
172{
173 int i;
174
175 if (patternp == pattern + sizeof (pattern) - 2)
176 {
177 patternp[-1] = '?';
178 return;
179 }
180
181 patternr[patternp - pattern] = x;
182
183 switch (GET_CODE (x))
184 {
185 case REG:
186 *patternp++ = 'r';
187 break;
188 case SUBREG:
189 if (GET_MODE_SIZE (GET_MODE (x)) !=
190 GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
191 *patternp++ = 'S';
45d898e4
DD
192 if (GET_MODE (x) == PSImode
193 && GET_CODE (XEXP (x, 0)) == REG)
194 *patternp++ = 'S';
38b2d076
DD
195 encode_pattern_1 (XEXP (x, 0));
196 break;
197 case MEM:
198 *patternp++ = 'm';
199 case CONST:
200 encode_pattern_1 (XEXP (x, 0));
201 break;
5fd5d713
DD
202 case SIGN_EXTEND:
203 *patternp++ = '^';
204 *patternp++ = 'S';
205 encode_pattern_1 (XEXP (x, 0));
206 break;
207 case ZERO_EXTEND:
208 *patternp++ = '^';
209 *patternp++ = 'Z';
210 encode_pattern_1 (XEXP (x, 0));
211 break;
38b2d076
DD
212 case PLUS:
213 *patternp++ = '+';
214 encode_pattern_1 (XEXP (x, 0));
215 encode_pattern_1 (XEXP (x, 1));
216 break;
217 case PRE_DEC:
218 *patternp++ = '>';
219 encode_pattern_1 (XEXP (x, 0));
220 break;
221 case POST_INC:
222 *patternp++ = '<';
223 encode_pattern_1 (XEXP (x, 0));
224 break;
225 case LO_SUM:
226 *patternp++ = 'L';
227 encode_pattern_1 (XEXP (x, 0));
228 encode_pattern_1 (XEXP (x, 1));
229 break;
230 case HIGH:
231 *patternp++ = 'H';
232 encode_pattern_1 (XEXP (x, 0));
233 break;
234 case SYMBOL_REF:
235 *patternp++ = 's';
236 break;
237 case LABEL_REF:
238 *patternp++ = 'l';
239 break;
240 case CODE_LABEL:
241 *patternp++ = 'c';
242 break;
243 case CONST_INT:
244 case CONST_DOUBLE:
245 *patternp++ = 'i';
246 break;
247 case UNSPEC:
248 *patternp++ = 'u';
249 *patternp++ = '0' + XCINT (x, 1, UNSPEC);
250 for (i = 0; i < XVECLEN (x, 0); i++)
251 encode_pattern_1 (XVECEXP (x, 0, i));
252 break;
253 case USE:
254 *patternp++ = 'U';
255 break;
256 case PARALLEL:
257 *patternp++ = '|';
258 for (i = 0; i < XVECLEN (x, 0); i++)
259 encode_pattern_1 (XVECEXP (x, 0, i));
260 break;
261 case EXPR_LIST:
262 *patternp++ = 'E';
263 encode_pattern_1 (XEXP (x, 0));
264 if (XEXP (x, 1))
265 encode_pattern_1 (XEXP (x, 1));
266 break;
267 default:
268 *patternp++ = '?';
269#if DEBUG0
270 fprintf (stderr, "can't encode pattern %s\n",
271 GET_RTX_NAME (GET_CODE (x)));
272 debug_rtx (x);
38b2d076
DD
273#endif
274 break;
275 }
276}
277
278static void
279encode_pattern (rtx x)
280{
281 patternp = pattern;
282 encode_pattern_1 (x);
283 *patternp = 0;
284}
285
286/* Since register names indicate the mode they're used in, we need a
287 way to determine which name to refer to the register with. Called
288 by print_operand(). */
289
290static const char *
ef4bddc2 291reg_name_with_mode (int regno, machine_mode mode)
38b2d076
DD
292{
293 int mlen = GET_MODE_SIZE (mode);
294 if (regno == R0_REGNO && mlen == 1)
295 return "r0l";
296 if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
297 return "r2r0";
298 if (regno == R0_REGNO && mlen == 6)
299 return "r2r1r0";
300 if (regno == R0_REGNO && mlen == 8)
301 return "r3r1r2r0";
302 if (regno == R1_REGNO && mlen == 1)
303 return "r1l";
304 if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
305 return "r3r1";
306 if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
307 return "a1a0";
308 return reg_names[regno];
309}
310
311/* How many bytes a register uses on stack when it's pushed. We need
312 to know this because the push opcode needs to explicitly indicate
313 the size of the register, even though the name of the register
314 already tells it that. Used by m32c_output_reg_{push,pop}, which
315 is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}. */
316
317static int
318reg_push_size (int regno)
319{
320 switch (regno)
321 {
322 case R0_REGNO:
323 case R1_REGNO:
324 return 2;
325 case R2_REGNO:
326 case R3_REGNO:
327 case FLG_REGNO:
328 return 2;
329 case A0_REGNO:
330 case A1_REGNO:
331 case SB_REGNO:
332 case FB_REGNO:
333 case SP_REGNO:
334 if (TARGET_A16)
335 return 2;
336 else
337 return 3;
338 default:
339 gcc_unreachable ();
340 }
341}
342
38b2d076
DD
343/* Given two register classes, find the largest intersection between
344 them. If there is no intersection, return RETURNED_IF_EMPTY
345 instead. */
35bdbc69
AS
346static reg_class_t
347reduce_class (reg_class_t original_class, reg_class_t limiting_class,
348 reg_class_t returned_if_empty)
38b2d076 349{
35bdbc69
AS
350 HARD_REG_SET cc;
351 int i;
352 reg_class_t best = NO_REGS;
353 unsigned int best_size = 0;
38b2d076
DD
354
355 if (original_class == limiting_class)
356 return original_class;
357
35bdbc69
AS
358 cc = reg_class_contents[original_class];
359 AND_HARD_REG_SET (cc, reg_class_contents[limiting_class]);
38b2d076 360
38b2d076
DD
361 for (i = 0; i < LIM_REG_CLASSES; i++)
362 {
35bdbc69
AS
363 if (hard_reg_set_subset_p (reg_class_contents[i], cc))
364 if (best_size < reg_class_size[i])
38b2d076 365 {
35bdbc69
AS
366 best = (reg_class_t) i;
367 best_size = reg_class_size[i];
38b2d076
DD
368 }
369
370 }
371 if (best == NO_REGS)
372 return returned_if_empty;
373 return best;
374}
375
38b2d076
DD
376/* Used by m32c_register_move_cost to determine if a move is
377 impossibly expensive. */
0e607518 378static bool
ef4bddc2 379class_can_hold_mode (reg_class_t rclass, machine_mode mode)
38b2d076
DD
380{
381 /* Cache the results: 0=untested 1=no 2=yes */
382 static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
0e607518
AS
383
384 if (results[(int) rclass][mode] == 0)
38b2d076 385 {
0e607518 386 int r;
0a2aaacc 387 results[rclass][mode] = 1;
38b2d076 388 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
0e607518 389 if (in_hard_reg_set_p (reg_class_contents[(int) rclass], mode, r)
38b2d076
DD
390 && HARD_REGNO_MODE_OK (r, mode))
391 {
0e607518
AS
392 results[rclass][mode] = 2;
393 break;
38b2d076
DD
394 }
395 }
0e607518 396
38b2d076
DD
397#if DEBUG0
398 fprintf (stderr, "class %s can hold %s? %s\n",
0e607518 399 class_names[(int) rclass], mode_name[mode],
0a2aaacc 400 (results[rclass][mode] == 2) ? "yes" : "no");
38b2d076 401#endif
0e607518 402 return results[(int) rclass][mode] == 2;
38b2d076
DD
403}
404
405/* Run-time Target Specification. */
406
407/* Memregs are memory locations that gcc treats like general
408 registers, as there are a limited number of true registers and the
409 m32c families can use memory in most places that registers can be
410 used.
411
412 However, since memory accesses are more expensive than registers,
413 we allow the user to limit the number of memregs available, in
414 order to try to persuade gcc to try harder to use real registers.
415
45b86625 416 Memregs are provided by lib1funcs.S.
38b2d076
DD
417*/
418
38b2d076
DD
419int ok_to_change_target_memregs = TRUE;
420
f28f2337
AS
421/* Implements TARGET_OPTION_OVERRIDE. */
422
423#undef TARGET_OPTION_OVERRIDE
424#define TARGET_OPTION_OVERRIDE m32c_option_override
425
426static void
427m32c_option_override (void)
38b2d076 428{
f28f2337 429 /* We limit memregs to 0..16, and provide a default. */
bbfc9a8c 430 if (global_options_set.x_target_memregs)
38b2d076
DD
431 {
432 if (target_memregs < 0 || target_memregs > 16)
433 error ("invalid target memregs value '%d'", target_memregs);
434 }
435 else
07127a0a 436 target_memregs = 16;
18b80268
DD
437
438 if (TARGET_A24)
439 flag_ivopts = 0;
0685e770
DD
440
441 /* This target defaults to strict volatile bitfields. */
36acc1a2 442 if (flag_strict_volatile_bitfields < 0 && abi_version_at_least(2))
0685e770 443 flag_strict_volatile_bitfields = 1;
d123bf41
DD
444
445 /* r8c/m16c have no 16-bit indirect call, so thunks are involved.
446 This is always worse than an absolute call. */
447 if (TARGET_A16)
448 flag_no_function_cse = 1;
a4403164
DD
449
450 /* This wants to put insns between compares and their jumps. */
451 /* FIXME: The right solution is to properly trace the flags register
452 values, but that is too much work for stage 4. */
453 flag_combine_stack_adjustments = 0;
d123bf41
DD
454}
455
456#undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
457#define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m32c_override_options_after_change
458
459static void
460m32c_override_options_after_change (void)
461{
462 if (TARGET_A16)
463 flag_no_function_cse = 1;
38b2d076
DD
464}
465
466/* Defining data structures for per-function information */
467
468/* The usual; we set up our machine_function data. */
469static struct machine_function *
470m32c_init_machine_status (void)
471{
766090c2 472 return ggc_cleared_alloc<machine_function> ();
38b2d076
DD
473}
474
475/* Implements INIT_EXPANDERS. We just set up to call the above
476 function. */
477void
478m32c_init_expanders (void)
479{
480 init_machine_status = m32c_init_machine_status;
481}
482
483/* Storage Layout */
484
38b2d076
DD
485/* Register Basics */
486
487/* Basic Characteristics of Registers */
488
489/* Whether a mode fits in a register is complex enough to warrant a
490 table. */
491static struct
492{
493 char qi_regs;
494 char hi_regs;
495 char pi_regs;
496 char si_regs;
497 char di_regs;
498} nregs_table[FIRST_PSEUDO_REGISTER] =
499{
500 { 1, 1, 2, 2, 4 }, /* r0 */
501 { 0, 1, 0, 0, 0 }, /* r2 */
502 { 1, 1, 2, 2, 0 }, /* r1 */
503 { 0, 1, 0, 0, 0 }, /* r3 */
504 { 0, 1, 1, 0, 0 }, /* a0 */
505 { 0, 1, 1, 0, 0 }, /* a1 */
506 { 0, 1, 1, 0, 0 }, /* sb */
507 { 0, 1, 1, 0, 0 }, /* fb */
508 { 0, 1, 1, 0, 0 }, /* sp */
509 { 1, 1, 1, 0, 0 }, /* pc */
510 { 0, 0, 0, 0, 0 }, /* fl */
511 { 1, 1, 1, 0, 0 }, /* ap */
512 { 1, 1, 2, 2, 4 }, /* mem0 */
513 { 1, 1, 2, 2, 4 }, /* mem1 */
514 { 1, 1, 2, 2, 4 }, /* mem2 */
515 { 1, 1, 2, 2, 4 }, /* mem3 */
516 { 1, 1, 2, 2, 4 }, /* mem4 */
517 { 1, 1, 2, 2, 0 }, /* mem5 */
518 { 1, 1, 2, 2, 0 }, /* mem6 */
519 { 1, 1, 0, 0, 0 }, /* mem7 */
520};
521
5efd84c5
NF
522/* Implements TARGET_CONDITIONAL_REGISTER_USAGE. We adjust the number
523 of available memregs, and select which registers need to be preserved
38b2d076
DD
524 across calls based on the chip family. */
525
5efd84c5
NF
526#undef TARGET_CONDITIONAL_REGISTER_USAGE
527#define TARGET_CONDITIONAL_REGISTER_USAGE m32c_conditional_register_usage
d6d17ae7 528void
38b2d076
DD
529m32c_conditional_register_usage (void)
530{
38b2d076
DD
531 int i;
532
533 if (0 <= target_memregs && target_memregs <= 16)
534 {
535 /* The command line option is bytes, but our "registers" are
536 16-bit words. */
65655f79 537 for (i = (target_memregs+1)/2; i < 8; i++)
38b2d076
DD
538 {
539 fixed_regs[MEM0_REGNO + i] = 1;
540 CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
541 }
542 }
543
544 /* M32CM and M32C preserve more registers across function calls. */
545 if (TARGET_A24)
546 {
547 call_used_regs[R1_REGNO] = 0;
548 call_used_regs[R2_REGNO] = 0;
549 call_used_regs[R3_REGNO] = 0;
550 call_used_regs[A0_REGNO] = 0;
551 call_used_regs[A1_REGNO] = 0;
552 }
553}
554
555/* How Values Fit in Registers */
556
557/* Implements HARD_REGNO_NREGS. This is complicated by the fact that
558 different registers are different sizes from each other, *and* may
559 be different sizes in different chip families. */
b8a669d0 560static int
ef4bddc2 561m32c_hard_regno_nregs_1 (int regno, machine_mode mode)
38b2d076
DD
562{
563 if (regno == FLG_REGNO && mode == CCmode)
564 return 1;
565 if (regno >= FIRST_PSEUDO_REGISTER)
566 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
567
568 if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
569 return (GET_MODE_SIZE (mode) + 1) / 2;
570
571 if (GET_MODE_SIZE (mode) <= 1)
572 return nregs_table[regno].qi_regs;
573 if (GET_MODE_SIZE (mode) <= 2)
574 return nregs_table[regno].hi_regs;
5fd5d713 575 if (regno == A0_REGNO && mode == SImode && TARGET_A16)
38b2d076
DD
576 return 2;
577 if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
578 return nregs_table[regno].pi_regs;
579 if (GET_MODE_SIZE (mode) <= 4)
580 return nregs_table[regno].si_regs;
581 if (GET_MODE_SIZE (mode) <= 8)
582 return nregs_table[regno].di_regs;
583 return 0;
584}
585
b8a669d0 586int
ef4bddc2 587m32c_hard_regno_nregs (int regno, machine_mode mode)
b8a669d0
DD
588{
589 int rv = m32c_hard_regno_nregs_1 (regno, mode);
590 return rv ? rv : 1;
591}
592
38b2d076
DD
593/* Implements HARD_REGNO_MODE_OK. The above function does the work
594 already; just test its return value. */
595int
ef4bddc2 596m32c_hard_regno_ok (int regno, machine_mode mode)
38b2d076 597{
b8a669d0 598 return m32c_hard_regno_nregs_1 (regno, mode) != 0;
38b2d076
DD
599}
600
601/* Implements MODES_TIEABLE_P. In general, modes aren't tieable since
602 registers are all different sizes. However, since most modes are
603 bigger than our registers anyway, it's easier to implement this
604 function that way, leaving QImode as the only unique case. */
605int
ef4bddc2 606m32c_modes_tieable_p (machine_mode m1, machine_mode m2)
38b2d076
DD
607{
608 if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
609 return 1;
610
07127a0a 611#if 0
38b2d076
DD
612 if (m1 == QImode || m2 == QImode)
613 return 0;
07127a0a 614#endif
38b2d076
DD
615
616 return 1;
617}
618
619/* Register Classes */
620
621/* Implements REGNO_REG_CLASS. */
444d6efe 622enum reg_class
38b2d076
DD
623m32c_regno_reg_class (int regno)
624{
625 switch (regno)
626 {
627 case R0_REGNO:
628 return R0_REGS;
629 case R1_REGNO:
630 return R1_REGS;
631 case R2_REGNO:
632 return R2_REGS;
633 case R3_REGNO:
634 return R3_REGS;
635 case A0_REGNO:
22843acd 636 return A0_REGS;
38b2d076 637 case A1_REGNO:
22843acd 638 return A1_REGS;
38b2d076
DD
639 case SB_REGNO:
640 return SB_REGS;
641 case FB_REGNO:
642 return FB_REGS;
643 case SP_REGNO:
644 return SP_REGS;
645 case FLG_REGNO:
646 return FLG_REGS;
647 default:
648 if (IS_MEM_REGNO (regno))
649 return MEM_REGS;
650 return ALL_REGS;
651 }
652}
653
38b2d076
DD
654/* Implements REGNO_OK_FOR_BASE_P. */
655int
656m32c_regno_ok_for_base_p (int regno)
657{
658 if (regno == A0_REGNO
659 || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
660 return 1;
661 return 0;
662}
663
b05933f5 664/* Implements TARGET_PREFERRED_RELOAD_CLASS. In general, prefer general
38b2d076 665 registers of the appropriate size. */
b05933f5
AS
666
667#undef TARGET_PREFERRED_RELOAD_CLASS
668#define TARGET_PREFERRED_RELOAD_CLASS m32c_preferred_reload_class
669
670static reg_class_t
671m32c_preferred_reload_class (rtx x, reg_class_t rclass)
38b2d076 672{
b05933f5 673 reg_class_t newclass = rclass;
38b2d076 674
f75e07bc 675#if DEBUG0
38b2d076
DD
676 fprintf (stderr, "\npreferred_reload_class for %s is ",
677 class_names[rclass]);
678#endif
679 if (rclass == NO_REGS)
680 rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
681
0e607518 682 if (reg_classes_intersect_p (rclass, CR_REGS))
38b2d076
DD
683 {
684 switch (GET_MODE (x))
685 {
686 case QImode:
687 newclass = HL_REGS;
688 break;
689 default:
690 /* newclass = HI_REGS; */
691 break;
692 }
693 }
694
695 else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
696 newclass = SI_REGS;
697 else if (GET_MODE_SIZE (GET_MODE (x)) > 4
b05933f5 698 && ! reg_class_subset_p (R03_REGS, rclass))
38b2d076
DD
699 newclass = DI_REGS;
700
701 rclass = reduce_class (rclass, newclass, rclass);
702
703 if (GET_MODE (x) == QImode)
704 rclass = reduce_class (rclass, HL_REGS, rclass);
705
f75e07bc 706#if DEBUG0
38b2d076
DD
707 fprintf (stderr, "%s\n", class_names[rclass]);
708 debug_rtx (x);
709
710 if (GET_CODE (x) == MEM
711 && GET_CODE (XEXP (x, 0)) == PLUS
712 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
713 fprintf (stderr, "Glorm!\n");
714#endif
715 return rclass;
716}
717
b05933f5
AS
718/* Implements TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
719
720#undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
721#define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS m32c_preferred_output_reload_class
722
723static reg_class_t
724m32c_preferred_output_reload_class (rtx x, reg_class_t rclass)
38b2d076
DD
725{
726 return m32c_preferred_reload_class (x, rclass);
727}
728
729/* Implements LIMIT_RELOAD_CLASS. We basically want to avoid using
730 address registers for reloads since they're needed for address
731 reloads. */
732int
ef4bddc2 733m32c_limit_reload_class (machine_mode mode, int rclass)
38b2d076 734{
f75e07bc 735#if DEBUG0
38b2d076
DD
736 fprintf (stderr, "limit_reload_class for %s: %s ->",
737 mode_name[mode], class_names[rclass]);
738#endif
739
740 if (mode == QImode)
741 rclass = reduce_class (rclass, HL_REGS, rclass);
742 else if (mode == HImode)
743 rclass = reduce_class (rclass, HI_REGS, rclass);
744 else if (mode == SImode)
745 rclass = reduce_class (rclass, SI_REGS, rclass);
746
747 if (rclass != A_REGS)
748 rclass = reduce_class (rclass, DI_REGS, rclass);
749
f75e07bc 750#if DEBUG0
38b2d076
DD
751 fprintf (stderr, " %s\n", class_names[rclass]);
752#endif
753 return rclass;
754}
755
756/* Implements SECONDARY_RELOAD_CLASS. QImode have to be reloaded in
757 r0 or r1, as those are the only real QImode registers. CR regs get
758 reloaded through appropriately sized general or address
759 registers. */
760int
ef4bddc2 761m32c_secondary_reload_class (int rclass, machine_mode mode, rtx x)
38b2d076
DD
762{
763 int cc = class_contents[rclass][0];
764#if DEBUG0
765 fprintf (stderr, "\nsecondary reload class %s %s\n",
766 class_names[rclass], mode_name[mode]);
767 debug_rtx (x);
768#endif
769 if (mode == QImode
770 && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
771 return QI_REGS;
0e607518 772 if (reg_classes_intersect_p (rclass, CR_REGS)
38b2d076
DD
773 && GET_CODE (x) == REG
774 && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
13a23442 775 return (TARGET_A16 || mode == HImode) ? HI_REGS : A_REGS;
38b2d076
DD
776 return NO_REGS;
777}
778
184866c5 779/* Implements TARGET_CLASS_LIKELY_SPILLED_P. A_REGS is needed for address
38b2d076 780 reloads. */
184866c5
AS
781
782#undef TARGET_CLASS_LIKELY_SPILLED_P
783#define TARGET_CLASS_LIKELY_SPILLED_P m32c_class_likely_spilled_p
784
785static bool
786m32c_class_likely_spilled_p (reg_class_t regclass)
38b2d076
DD
787{
788 if (regclass == A_REGS)
184866c5
AS
789 return true;
790
791 return (reg_class_size[(int) regclass] == 1);
38b2d076
DD
792}
793
c4831cff 794/* Implements TARGET_CLASS_MAX_NREGS. We calculate this according to its
38b2d076
DD
795 documented meaning, to avoid potential inconsistencies with actual
796 class definitions. */
c4831cff
AS
797
798#undef TARGET_CLASS_MAX_NREGS
799#define TARGET_CLASS_MAX_NREGS m32c_class_max_nregs
800
801static unsigned char
ef4bddc2 802m32c_class_max_nregs (reg_class_t regclass, machine_mode mode)
38b2d076 803{
c4831cff
AS
804 int rn;
805 unsigned char max = 0;
38b2d076
DD
806
807 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
c4831cff 808 if (TEST_HARD_REG_BIT (reg_class_contents[(int) regclass], rn))
38b2d076 809 {
c4831cff 810 unsigned char n = m32c_hard_regno_nregs (rn, mode);
38b2d076
DD
811 if (max < n)
812 max = n;
813 }
814 return max;
815}
816
817/* Implements CANNOT_CHANGE_MODE_CLASS. Only r0 and r1 can change to
818 QI (r0l, r1l) because the chip doesn't support QI ops on other
819 registers (well, it does on a0/a1 but if we let gcc do that, reload
820 suffers). Otherwise, we allow changes to larger modes. */
821int
ef4bddc2
RS
822m32c_cannot_change_mode_class (machine_mode from,
823 machine_mode to, int rclass)
38b2d076 824{
db9c8397 825 int rn;
38b2d076
DD
826#if DEBUG0
827 fprintf (stderr, "cannot change from %s to %s in %s\n",
828 mode_name[from], mode_name[to], class_names[rclass]);
829#endif
830
db9c8397
DD
831 /* If the larger mode isn't allowed in any of these registers, we
832 can't allow the change. */
833 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
834 if (class_contents[rclass][0] & (1 << rn))
835 if (! m32c_hard_regno_ok (rn, to))
836 return 1;
837
38b2d076
DD
838 if (to == QImode)
839 return (class_contents[rclass][0] & 0x1ffa);
840
841 if (class_contents[rclass][0] & 0x0005 /* r0, r1 */
842 && GET_MODE_SIZE (from) > 1)
843 return 0;
844 if (GET_MODE_SIZE (from) > 2) /* all other regs */
845 return 0;
846
847 return 1;
848}
849
850/* Helpers for the rest of the file. */
851/* TRUE if the rtx is a REG rtx for the given register. */
852#define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
853 && REGNO (rtx) == regno)
854/* TRUE if the rtx is a pseudo - specifically, one we can use as a
855 base register in address calculations (hence the "strict"
856 argument). */
857#define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
858 && (REGNO (rtx) == AP_REGNO \
859 || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
860
5fd5d713
DD
861#define A0_OR_PSEUDO(x) (IS_REG(x, A0_REGNO) || REGNO (x) >= FIRST_PSEUDO_REGISTER)
862
777e635f 863/* Implements matching for constraints (see next function too). 'S' is
38b2d076
DD
864 for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
865 call return values. */
03dd17b1
NF
866bool
867m32c_matches_constraint_p (rtx value, int constraint)
38b2d076
DD
868{
869 encode_pattern (value);
5fd5d713 870
03dd17b1
NF
871 switch (constraint) {
872 case CONSTRAINT_SF:
873 return (far_addr_space_p (value)
874 && ((RTX_IS ("mr")
875 && A0_OR_PSEUDO (patternr[1])
876 && GET_MODE (patternr[1]) == SImode)
877 || (RTX_IS ("m+^Sri")
878 && A0_OR_PSEUDO (patternr[4])
879 && GET_MODE (patternr[4]) == HImode)
880 || (RTX_IS ("m+^Srs")
881 && A0_OR_PSEUDO (patternr[4])
882 && GET_MODE (patternr[4]) == HImode)
883 || (RTX_IS ("m+^S+ris")
884 && A0_OR_PSEUDO (patternr[5])
885 && GET_MODE (patternr[5]) == HImode)
886 || RTX_IS ("ms")));
887 case CONSTRAINT_Sd:
38b2d076
DD
888 {
889 /* This is the common "src/dest" address */
890 rtx r;
891 if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
03dd17b1 892 return true;
38b2d076 893 if (RTX_IS ("ms") || RTX_IS ("m+si"))
03dd17b1 894 return true;
07127a0a
DD
895 if (RTX_IS ("m++rii"))
896 {
897 if (REGNO (patternr[3]) == FB_REGNO
898 && INTVAL (patternr[4]) == 0)
03dd17b1 899 return true;
07127a0a 900 }
38b2d076
DD
901 if (RTX_IS ("mr"))
902 r = patternr[1];
903 else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
904 r = patternr[2];
905 else
03dd17b1 906 return false;
38b2d076 907 if (REGNO (r) == SP_REGNO)
03dd17b1 908 return false;
38b2d076
DD
909 return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
910 }
03dd17b1 911 case CONSTRAINT_Sa:
38b2d076
DD
912 {
913 rtx r;
914 if (RTX_IS ("mr"))
915 r = patternr[1];
916 else if (RTX_IS ("m+ri"))
917 r = patternr[2];
918 else
03dd17b1 919 return false;
38b2d076
DD
920 return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
921 }
03dd17b1
NF
922 case CONSTRAINT_Si:
923 return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
924 case CONSTRAINT_Ss:
925 return ((RTX_IS ("mr")
926 && (IS_REG (patternr[1], SP_REGNO)))
927 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
928 case CONSTRAINT_Sf:
929 return ((RTX_IS ("mr")
930 && (IS_REG (patternr[1], FB_REGNO)))
931 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
932 case CONSTRAINT_Sb:
933 return ((RTX_IS ("mr")
934 && (IS_REG (patternr[1], SB_REGNO)))
935 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
936 case CONSTRAINT_Sp:
937 /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
938 return (RTX_IS ("mi")
939 && !(INTVAL (patternr[1]) & ~0x1fff));
940 case CONSTRAINT_S1:
941 return r1h_operand (value, QImode);
942 case CONSTRAINT_Rpa:
38b2d076 943 return GET_CODE (value) == PARALLEL;
03dd17b1
NF
944 default:
945 return false;
946 }
38b2d076
DD
947}
948
949/* STACK AND CALLING */
950
951/* Frame Layout */
952
953/* Implements RETURN_ADDR_RTX. Note that R8C and M16C push 24 bits
954 (yes, THREE bytes) onto the stack for the return address, but we
955 don't support pointers bigger than 16 bits on those chips. This
956 will likely wreak havoc with exception unwinding. FIXME. */
957rtx
958m32c_return_addr_rtx (int count)
959{
ef4bddc2 960 machine_mode mode;
38b2d076
DD
961 int offset;
962 rtx ra_mem;
963
964 if (count)
965 return NULL_RTX;
966 /* we want 2[$fb] */
967
968 if (TARGET_A24)
969 {
80b093df
DD
970 /* It's four bytes */
971 mode = PSImode;
38b2d076
DD
972 offset = 4;
973 }
974 else
975 {
976 /* FIXME: it's really 3 bytes */
977 mode = HImode;
978 offset = 2;
979 }
980
981 ra_mem =
0a81f074
RS
982 gen_rtx_MEM (mode, plus_constant (Pmode, gen_rtx_REG (Pmode, FP_REGNO),
983 offset));
38b2d076
DD
984 return copy_to_mode_reg (mode, ra_mem);
985}
986
987/* Implements INCOMING_RETURN_ADDR_RTX. See comment above. */
988rtx
989m32c_incoming_return_addr_rtx (void)
990{
991 /* we want [sp] */
992 return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
993}
994
995/* Exception Handling Support */
996
997/* Implements EH_RETURN_DATA_REGNO. Choose registers able to hold
998 pointers. */
999int
1000m32c_eh_return_data_regno (int n)
1001{
1002 switch (n)
1003 {
1004 case 0:
45d898e4 1005 return MEM0_REGNO;
38b2d076 1006 case 1:
45d898e4 1007 return MEM0_REGNO+4;
38b2d076
DD
1008 default:
1009 return INVALID_REGNUM;
1010 }
1011}
1012
1013/* Implements EH_RETURN_STACKADJ_RTX. Saved and used later in
1014 m32c_emit_eh_epilogue. */
1015rtx
1016m32c_eh_return_stackadj_rtx (void)
1017{
1018 if (!cfun->machine->eh_stack_adjust)
1019 {
1020 rtx sa;
1021
99920b6f 1022 sa = gen_rtx_REG (Pmode, R0_REGNO);
38b2d076
DD
1023 cfun->machine->eh_stack_adjust = sa;
1024 }
1025 return cfun->machine->eh_stack_adjust;
1026}
1027
1028/* Registers That Address the Stack Frame */
1029
1030/* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER. Note that
1031 the original spec called for dwarf numbers to vary with register
1032 width as well, for example, r0l, r0, and r2r0 would each have
1033 different dwarf numbers. GCC doesn't support this, and we don't do
1034 it, and gdb seems to like it this way anyway. */
1035unsigned int
1036m32c_dwarf_frame_regnum (int n)
1037{
1038 switch (n)
1039 {
1040 case R0_REGNO:
1041 return 5;
1042 case R1_REGNO:
1043 return 6;
1044 case R2_REGNO:
1045 return 7;
1046 case R3_REGNO:
1047 return 8;
1048 case A0_REGNO:
1049 return 9;
1050 case A1_REGNO:
1051 return 10;
1052 case FB_REGNO:
1053 return 11;
1054 case SB_REGNO:
1055 return 19;
1056
1057 case SP_REGNO:
1058 return 12;
1059 case PC_REGNO:
1060 return 13;
1061 default:
1062 return DWARF_FRAME_REGISTERS + 1;
1063 }
1064}
1065
1066/* The frame looks like this:
1067
1068 ap -> +------------------------------
1069 | Return address (3 or 4 bytes)
1070 | Saved FB (2 or 4 bytes)
1071 fb -> +------------------------------
1072 | local vars
1073 | register saves fb
1074 | through r0 as needed
1075 sp -> +------------------------------
1076*/
1077
1078/* We use this to wrap all emitted insns in the prologue. */
1079static rtx
1080F (rtx x)
1081{
1082 RTX_FRAME_RELATED_P (x) = 1;
1083 return x;
1084}
1085
1086/* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1087 how much the stack pointer moves for each, for each cpu family. */
1088static struct
1089{
1090 int reg1;
1091 int bit;
1092 int a16_bytes;
1093 int a24_bytes;
1094} pushm_info[] =
1095{
9d746d5e
DD
1096 /* These are in reverse push (nearest-to-sp) order. */
1097 { R0_REGNO, 0x80, 2, 2 },
38b2d076 1098 { R1_REGNO, 0x40, 2, 2 },
9d746d5e
DD
1099 { R2_REGNO, 0x20, 2, 2 },
1100 { R3_REGNO, 0x10, 2, 2 },
1101 { A0_REGNO, 0x08, 2, 4 },
1102 { A1_REGNO, 0x04, 2, 4 },
1103 { SB_REGNO, 0x02, 2, 4 },
1104 { FB_REGNO, 0x01, 2, 4 }
38b2d076
DD
1105};
1106
1107#define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1108
1109/* Returns TRUE if we need to save/restore the given register. We
1110 save everything for exception handlers, so that any register can be
1111 unwound. For interrupt handlers, we save everything if the handler
1112 calls something else (because we don't know what *that* function
1113 might do), but try to be a bit smarter if the handler is a leaf
1114 function. We always save $a0, though, because we use that in the
85f65093 1115 epilogue to copy $fb to $sp. */
38b2d076
DD
1116static int
1117need_to_save (int regno)
1118{
1119 if (fixed_regs[regno])
1120 return 0;
ad516a74 1121 if (crtl->calls_eh_return)
38b2d076
DD
1122 return 1;
1123 if (regno == FP_REGNO)
1124 return 0;
1125 if (cfun->machine->is_interrupt
65655f79
DD
1126 && (!cfun->machine->is_leaf
1127 || (regno == A0_REGNO
1128 && m32c_function_needs_enter ())
1129 ))
38b2d076 1130 return 1;
6fb5fa3c 1131 if (df_regs_ever_live_p (regno)
38b2d076
DD
1132 && (!call_used_regs[regno] || cfun->machine->is_interrupt))
1133 return 1;
1134 return 0;
1135}
1136
1137/* This function contains all the intelligence about saving and
1138 restoring registers. It always figures out the register save set.
1139 When called with PP_justcount, it merely returns the size of the
1140 save set (for eliminating the frame pointer, for example). When
1141 called with PP_pushm or PP_popm, it emits the appropriate
1142 instructions for saving (pushm) or restoring (popm) the
1143 registers. */
1144static int
1145m32c_pushm_popm (Push_Pop_Type ppt)
1146{
1147 int reg_mask = 0;
1148 int byte_count = 0, bytes;
1149 int i;
1150 rtx dwarf_set[PUSHM_N];
1151 int n_dwarfs = 0;
1152 int nosave_mask = 0;
1153
305da3ec
JH
1154 if (crtl->return_rtx
1155 && GET_CODE (crtl->return_rtx) == PARALLEL
ad516a74 1156 && !(crtl->calls_eh_return || cfun->machine->is_interrupt))
38b2d076 1157 {
305da3ec 1158 rtx exp = XVECEXP (crtl->return_rtx, 0, 0);
38b2d076
DD
1159 rtx rv = XEXP (exp, 0);
1160 int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1161
1162 if (rv_bytes > 2)
1163 nosave_mask |= 0x20; /* PSI, SI */
1164 else
1165 nosave_mask |= 0xf0; /* DF */
1166 if (rv_bytes > 4)
1167 nosave_mask |= 0x50; /* DI */
1168 }
1169
1170 for (i = 0; i < (int) PUSHM_N; i++)
1171 {
1172 /* Skip if neither register needs saving. */
1173 if (!need_to_save (pushm_info[i].reg1))
1174 continue;
1175
1176 if (pushm_info[i].bit & nosave_mask)
1177 continue;
1178
1179 reg_mask |= pushm_info[i].bit;
1180 bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1181
1182 if (ppt == PP_pushm)
1183 {
ef4bddc2 1184 machine_mode mode = (bytes == 2) ? HImode : SImode;
38b2d076
DD
1185 rtx addr;
1186
1187 /* Always use stack_pointer_rtx instead of calling
1188 rtx_gen_REG ourselves. Code elsewhere in GCC assumes
1189 that there is a single rtx representing the stack pointer,
1190 namely stack_pointer_rtx, and uses == to recognize it. */
1191 addr = stack_pointer_rtx;
1192
1193 if (byte_count != 0)
1194 addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1195
1196 dwarf_set[n_dwarfs++] =
f7df4a84 1197 gen_rtx_SET (gen_rtx_MEM (mode, addr),
38b2d076
DD
1198 gen_rtx_REG (mode, pushm_info[i].reg1));
1199 F (dwarf_set[n_dwarfs - 1]);
1200
1201 }
1202 byte_count += bytes;
1203 }
1204
1205 if (cfun->machine->is_interrupt)
1206 {
1207 cfun->machine->intr_pushm = reg_mask & 0xfe;
1208 reg_mask = 0;
1209 byte_count = 0;
1210 }
1211
1212 if (cfun->machine->is_interrupt)
1213 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1214 if (need_to_save (i))
1215 {
1216 byte_count += 2;
1217 cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1218 }
1219
1220 if (ppt == PP_pushm && byte_count)
1221 {
1222 rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1223 rtx pushm;
1224
1225 if (reg_mask)
1226 {
1227 XVECEXP (note, 0, 0)
f7df4a84 1228 = gen_rtx_SET (stack_pointer_rtx,
38b2d076
DD
1229 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1230 stack_pointer_rtx,
1231 GEN_INT (-byte_count)));
1232 F (XVECEXP (note, 0, 0));
1233
1234 for (i = 0; i < n_dwarfs; i++)
1235 XVECEXP (note, 0, i + 1) = dwarf_set[i];
1236
1237 pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1238
444d6efe 1239 add_reg_note (pushm, REG_FRAME_RELATED_EXPR, note);
38b2d076
DD
1240 }
1241
1242 if (cfun->machine->is_interrupt)
1243 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1244 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1245 {
1246 if (TARGET_A16)
1247 pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1248 else
1249 pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1250 F (pushm);
1251 }
1252 }
1253 if (ppt == PP_popm && byte_count)
1254 {
38b2d076
DD
1255 if (cfun->machine->is_interrupt)
1256 for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1257 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1258 {
1259 if (TARGET_A16)
b3fdec9e 1260 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
38b2d076 1261 else
b3fdec9e 1262 emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
38b2d076
DD
1263 }
1264 if (reg_mask)
1265 emit_insn (gen_popm (GEN_INT (reg_mask)));
1266 }
1267
1268 return byte_count;
1269}
1270
1271/* Implements INITIAL_ELIMINATION_OFFSET. See the comment above that
1272 diagrams our call frame. */
1273int
1274m32c_initial_elimination_offset (int from, int to)
1275{
1276 int ofs = 0;
1277
1278 if (from == AP_REGNO)
1279 {
1280 if (TARGET_A16)
1281 ofs += 5;
1282 else
1283 ofs += 8;
1284 }
1285
1286 if (to == SP_REGNO)
1287 {
1288 ofs += m32c_pushm_popm (PP_justcount);
1289 ofs += get_frame_size ();
1290 }
1291
1292 /* Account for push rounding. */
1293 if (TARGET_A24)
1294 ofs = (ofs + 1) & ~1;
1295#if DEBUG0
1296 fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1297 to, ofs);
1298#endif
1299 return ofs;
1300}
1301
1302/* Passing Function Arguments on the Stack */
1303
38b2d076
DD
1304/* Implements PUSH_ROUNDING. The R8C and M16C have byte stacks, the
1305 M32C has word stacks. */
444d6efe 1306unsigned int
38b2d076
DD
1307m32c_push_rounding (int n)
1308{
1309 if (TARGET_R8C || TARGET_M16C)
1310 return n;
1311 return (n + 1) & ~1;
1312}
1313
1314/* Passing Arguments in Registers */
1315
cd34bbe8
NF
1316/* Implements TARGET_FUNCTION_ARG. Arguments are passed partly in
1317 registers, partly on stack. If our function returns a struct, a
1318 pointer to a buffer for it is at the top of the stack (last thing
1319 pushed). The first few real arguments may be in registers as
1320 follows:
38b2d076
DD
1321
1322 R8C/M16C: arg1 in r1 if it's QI or HI (else it's pushed on stack)
1323 arg2 in r2 if it's HI (else pushed on stack)
1324 rest on stack
1325 M32C: arg1 in r0 if it's QI or HI (else it's pushed on stack)
1326 rest on stack
1327
1328 Structs are not passed in registers, even if they fit. Only
1329 integer and pointer types are passed in registers.
1330
1331 Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1332 r2 if it fits. */
cd34bbe8
NF
1333#undef TARGET_FUNCTION_ARG
1334#define TARGET_FUNCTION_ARG m32c_function_arg
1335static rtx
d5cc9181 1336m32c_function_arg (cumulative_args_t ca_v,
ef4bddc2 1337 machine_mode mode, const_tree type, bool named)
38b2d076 1338{
d5cc9181
JR
1339 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1340
38b2d076
DD
1341 /* Can return a reg, parallel, or 0 for stack */
1342 rtx rv = NULL_RTX;
1343#if DEBUG0
1344 fprintf (stderr, "func_arg %d (%s, %d)\n",
1345 ca->parm_num, mode_name[mode], named);
f75e07bc 1346 debug_tree ((tree)type);
38b2d076
DD
1347#endif
1348
1349 if (mode == VOIDmode)
1350 return GEN_INT (0);
1351
1352 if (ca->force_mem || !named)
1353 {
1354#if DEBUG0
1355 fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
1356 named);
1357#endif
1358 return NULL_RTX;
1359 }
1360
1361 if (type && INTEGRAL_TYPE_P (type) && POINTER_TYPE_P (type))
1362 return NULL_RTX;
1363
9d746d5e
DD
1364 if (type && AGGREGATE_TYPE_P (type))
1365 return NULL_RTX;
1366
38b2d076
DD
1367 switch (ca->parm_num)
1368 {
1369 case 1:
1370 if (GET_MODE_SIZE (mode) == 1 || GET_MODE_SIZE (mode) == 2)
1371 rv = gen_rtx_REG (mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
1372 break;
1373
1374 case 2:
1375 if (TARGET_A16 && GET_MODE_SIZE (mode) == 2)
1376 rv = gen_rtx_REG (mode, R2_REGNO);
1377 break;
1378 }
1379
1380#if DEBUG0
1381 debug_rtx (rv);
1382#endif
1383 return rv;
1384}
1385
1386#undef TARGET_PASS_BY_REFERENCE
1387#define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1388static bool
d5cc9181 1389m32c_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
ef4bddc2 1390 machine_mode mode ATTRIBUTE_UNUSED,
586de218 1391 const_tree type ATTRIBUTE_UNUSED,
38b2d076
DD
1392 bool named ATTRIBUTE_UNUSED)
1393{
1394 return 0;
1395}
1396
1397/* Implements INIT_CUMULATIVE_ARGS. */
1398void
1399m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
9d746d5e 1400 tree fntype,
38b2d076 1401 rtx libname ATTRIBUTE_UNUSED,
9d746d5e 1402 tree fndecl,
38b2d076
DD
1403 int n_named_args ATTRIBUTE_UNUSED)
1404{
9d746d5e
DD
1405 if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1406 ca->force_mem = 1;
1407 else
1408 ca->force_mem = 0;
38b2d076
DD
1409 ca->parm_num = 1;
1410}
1411
cd34bbe8
NF
1412/* Implements TARGET_FUNCTION_ARG_ADVANCE. force_mem is set for
1413 functions returning structures, so we always reset that. Otherwise,
1414 we only need to know the sequence number of the argument to know what
1415 to do with it. */
1416#undef TARGET_FUNCTION_ARG_ADVANCE
1417#define TARGET_FUNCTION_ARG_ADVANCE m32c_function_arg_advance
1418static void
d5cc9181 1419m32c_function_arg_advance (cumulative_args_t ca_v,
ef4bddc2 1420 machine_mode mode ATTRIBUTE_UNUSED,
cd34bbe8
NF
1421 const_tree type ATTRIBUTE_UNUSED,
1422 bool named ATTRIBUTE_UNUSED)
38b2d076 1423{
d5cc9181
JR
1424 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1425
38b2d076
DD
1426 if (ca->force_mem)
1427 ca->force_mem = 0;
9d746d5e
DD
1428 else
1429 ca->parm_num++;
38b2d076
DD
1430}
1431
c2ed6cf8
NF
1432/* Implements TARGET_FUNCTION_ARG_BOUNDARY. */
1433#undef TARGET_FUNCTION_ARG_BOUNDARY
1434#define TARGET_FUNCTION_ARG_BOUNDARY m32c_function_arg_boundary
1435static unsigned int
ef4bddc2 1436m32c_function_arg_boundary (machine_mode mode ATTRIBUTE_UNUSED,
c2ed6cf8
NF
1437 const_tree type ATTRIBUTE_UNUSED)
1438{
1439 return (TARGET_A16 ? 8 : 16);
1440}
1441
38b2d076
DD
1442/* Implements FUNCTION_ARG_REGNO_P. */
1443int
1444m32c_function_arg_regno_p (int r)
1445{
1446 if (TARGET_A24)
1447 return (r == R0_REGNO);
1448 return (r == R1_REGNO || r == R2_REGNO);
1449}
1450
e9555b13 1451/* HImode and PSImode are the two "native" modes as far as GCC is
85f65093 1452 concerned, but the chips also support a 32-bit mode which is used
e9555b13
DD
1453 for some opcodes in R8C/M16C and for reset vectors and such. */
1454#undef TARGET_VALID_POINTER_MODE
1455#define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
23fed240 1456static bool
ef4bddc2 1457m32c_valid_pointer_mode (machine_mode mode)
e9555b13 1458{
e9555b13
DD
1459 if (mode == HImode
1460 || mode == PSImode
1461 || mode == SImode
1462 )
1463 return 1;
1464 return 0;
1465}
1466
38b2d076
DD
1467/* How Scalar Function Values Are Returned */
1468
2a31793e 1469/* Implements TARGET_LIBCALL_VALUE. Most values are returned in $r0, or some
38b2d076
DD
1470 combination of registers starting there (r2r0 for longs, r3r1r2r0
1471 for long long, r3r2r1r0 for doubles), except that that ABI
1472 currently doesn't work because it ends up using all available
1473 general registers and gcc often can't compile it. So, instead, we
1474 return anything bigger than 16 bits in "mem0" (effectively, a
1475 memory location). */
2a31793e
AS
1476
1477#undef TARGET_LIBCALL_VALUE
1478#define TARGET_LIBCALL_VALUE m32c_libcall_value
1479
1480static rtx
ef4bddc2 1481m32c_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
38b2d076
DD
1482{
1483 /* return reg or parallel */
1484#if 0
1485 /* FIXME: GCC has difficulty returning large values in registers,
1486 because that ties up most of the general registers and gives the
1487 register allocator little to work with. Until we can resolve
1488 this, large values are returned in memory. */
1489 if (mode == DFmode)
1490 {
1491 rtx rv;
1492
1493 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1494 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1495 gen_rtx_REG (HImode,
1496 R0_REGNO),
1497 GEN_INT (0));
1498 XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1499 gen_rtx_REG (HImode,
1500 R1_REGNO),
1501 GEN_INT (2));
1502 XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1503 gen_rtx_REG (HImode,
1504 R2_REGNO),
1505 GEN_INT (4));
1506 XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1507 gen_rtx_REG (HImode,
1508 R3_REGNO),
1509 GEN_INT (6));
1510 return rv;
1511 }
1512
1513 if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1514 {
1515 rtx rv;
1516
1517 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1518 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1519 gen_rtx_REG (mode,
1520 R0_REGNO),
1521 GEN_INT (0));
1522 return rv;
1523 }
1524#endif
1525
1526 if (GET_MODE_SIZE (mode) > 2)
1527 return gen_rtx_REG (mode, MEM0_REGNO);
1528 return gen_rtx_REG (mode, R0_REGNO);
1529}
1530
2a31793e 1531/* Implements TARGET_FUNCTION_VALUE. Functions and libcalls have the same
38b2d076 1532 conventions. */
2a31793e
AS
1533
1534#undef TARGET_FUNCTION_VALUE
1535#define TARGET_FUNCTION_VALUE m32c_function_value
1536
1537static rtx
1538m32c_function_value (const_tree valtype,
1539 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1540 bool outgoing ATTRIBUTE_UNUSED)
38b2d076
DD
1541{
1542 /* return reg or parallel */
ef4bddc2 1543 const machine_mode mode = TYPE_MODE (valtype);
2a31793e
AS
1544 return m32c_libcall_value (mode, NULL_RTX);
1545}
1546
f28f2337
AS
1547/* Implements TARGET_FUNCTION_VALUE_REGNO_P. */
1548
1549#undef TARGET_FUNCTION_VALUE_REGNO_P
1550#define TARGET_FUNCTION_VALUE_REGNO_P m32c_function_value_regno_p
2a31793e 1551
f28f2337 1552static bool
2a31793e
AS
1553m32c_function_value_regno_p (const unsigned int regno)
1554{
1555 return (regno == R0_REGNO || regno == MEM0_REGNO);
38b2d076
DD
1556}
1557
1558/* How Large Values Are Returned */
1559
1560/* We return structures by pushing the address on the stack, even if
1561 we use registers for the first few "real" arguments. */
1562#undef TARGET_STRUCT_VALUE_RTX
1563#define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1564static rtx
1565m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1566 int incoming ATTRIBUTE_UNUSED)
1567{
1568 return 0;
1569}
1570
1571/* Function Entry and Exit */
1572
1573/* Implements EPILOGUE_USES. Interrupts restore all registers. */
1574int
1575m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1576{
1577 if (cfun->machine->is_interrupt)
1578 return 1;
1579 return 0;
1580}
1581
1582/* Implementing the Varargs Macros */
1583
1584#undef TARGET_STRICT_ARGUMENT_NAMING
1585#define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1586static bool
d5cc9181 1587m32c_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
38b2d076
DD
1588{
1589 return 1;
1590}
1591
1592/* Trampolines for Nested Functions */
1593
1594/*
1595 m16c:
1596 1 0000 75C43412 mov.w #0x1234,a0
1597 2 0004 FC000000 jmp.a label
1598
1599 m32c:
1600 1 0000 BC563412 mov.l:s #0x123456,a0
1601 2 0004 CC000000 jmp.a label
1602*/
1603
1604/* Implements TRAMPOLINE_SIZE. */
1605int
1606m32c_trampoline_size (void)
1607{
1608 /* Allocate extra space so we can avoid the messy shifts when we
1609 initialize the trampoline; we just write past the end of the
1610 opcode. */
1611 return TARGET_A16 ? 8 : 10;
1612}
1613
1614/* Implements TRAMPOLINE_ALIGNMENT. */
1615int
1616m32c_trampoline_alignment (void)
1617{
1618 return 2;
1619}
1620
229fbccb
RH
1621/* Implements TARGET_TRAMPOLINE_INIT. */
1622
1623#undef TARGET_TRAMPOLINE_INIT
1624#define TARGET_TRAMPOLINE_INIT m32c_trampoline_init
1625static void
1626m32c_trampoline_init (rtx m_tramp, tree fndecl, rtx chainval)
38b2d076 1627{
229fbccb
RH
1628 rtx function = XEXP (DECL_RTL (fndecl), 0);
1629
1630#define A0(m,i) adjust_address (m_tramp, m, i)
38b2d076
DD
1631 if (TARGET_A16)
1632 {
1633 /* Note: we subtract a "word" because the moves want signed
1634 constants, not unsigned constants. */
1635 emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1636 emit_move_insn (A0 (HImode, 2), chainval);
1637 emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
85f65093
KH
1638 /* We use 16-bit addresses here, but store the zero to turn it
1639 into a 24-bit offset. */
38b2d076
DD
1640 emit_move_insn (A0 (HImode, 5), function);
1641 emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1642 }
1643 else
1644 {
1645 /* Note that the PSI moves actually write 4 bytes. Make sure we
1646 write stuff out in the right order, and leave room for the
1647 extra byte at the end. */
1648 emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1649 emit_move_insn (A0 (PSImode, 1), chainval);
1650 emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1651 emit_move_insn (A0 (PSImode, 5), function);
1652 }
1653#undef A0
1654}
1655
1656/* Addressing Modes */
1657
c6c3dba9
PB
1658/* The r8c/m32c family supports a wide range of non-orthogonal
1659 addressing modes, including the ability to double-indirect on *some*
1660 of them. Not all insns support all modes, either, but we rely on
1661 predicates and constraints to deal with that. */
1662#undef TARGET_LEGITIMATE_ADDRESS_P
1663#define TARGET_LEGITIMATE_ADDRESS_P m32c_legitimate_address_p
1664bool
ef4bddc2 1665m32c_legitimate_address_p (machine_mode mode, rtx x, bool strict)
38b2d076
DD
1666{
1667 int mode_adjust;
1668 if (CONSTANT_P (x))
1669 return 1;
1670
5fd5d713
DD
1671 if (TARGET_A16 && GET_MODE (x) != HImode && GET_MODE (x) != SImode)
1672 return 0;
1673 if (TARGET_A24 && GET_MODE (x) != PSImode)
1674 return 0;
1675
38b2d076
DD
1676 /* Wide references to memory will be split after reload, so we must
1677 ensure that all parts of such splits remain legitimate
1678 addresses. */
1679 mode_adjust = GET_MODE_SIZE (mode) - 1;
1680
1681 /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1682 if (GET_CODE (x) == PRE_DEC
1683 || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1684 {
1685 return (GET_CODE (XEXP (x, 0)) == REG
1686 && REGNO (XEXP (x, 0)) == SP_REGNO);
1687 }
1688
1689#if 0
1690 /* This is the double indirection detection, but it currently
1691 doesn't work as cleanly as this code implies, so until we've had
1692 a chance to debug it, leave it disabled. */
1693 if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1694 {
1695#if DEBUG_DOUBLE
1696 fprintf (stderr, "double indirect\n");
1697#endif
1698 x = XEXP (x, 0);
1699 }
1700#endif
1701
1702 encode_pattern (x);
1703 if (RTX_IS ("r"))
1704 {
1705 /* Most indexable registers can be used without displacements,
1706 although some of them will be emitted with an explicit zero
1707 to please the assembler. */
1708 switch (REGNO (patternr[0]))
1709 {
38b2d076
DD
1710 case A1_REGNO:
1711 case SB_REGNO:
1712 case FB_REGNO:
1713 case SP_REGNO:
5fd5d713
DD
1714 if (TARGET_A16 && GET_MODE (x) == SImode)
1715 return 0;
1716 case A0_REGNO:
38b2d076
DD
1717 return 1;
1718
1719 default:
1720 if (IS_PSEUDO (patternr[0], strict))
1721 return 1;
1722 return 0;
1723 }
1724 }
5fd5d713
DD
1725
1726 if (TARGET_A16 && GET_MODE (x) == SImode)
1727 return 0;
1728
38b2d076
DD
1729 if (RTX_IS ("+ri"))
1730 {
1731 /* This is more interesting, because different base registers
1732 allow for different displacements - both range and signedness
1733 - and it differs from chip series to chip series too. */
1734 int rn = REGNO (patternr[1]);
1735 HOST_WIDE_INT offs = INTVAL (patternr[2]);
1736 switch (rn)
1737 {
1738 case A0_REGNO:
1739 case A1_REGNO:
1740 case SB_REGNO:
1741 /* The syntax only allows positive offsets, but when the
1742 offsets span the entire memory range, we can simulate
1743 negative offsets by wrapping. */
1744 if (TARGET_A16)
1745 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1746 if (rn == SB_REGNO)
1747 return (offs >= 0 && offs <= 65535 - mode_adjust);
1748 /* A0 or A1 */
1749 return (offs >= -16777216 && offs <= 16777215);
1750
1751 case FB_REGNO:
1752 if (TARGET_A16)
1753 return (offs >= -128 && offs <= 127 - mode_adjust);
1754 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1755
1756 case SP_REGNO:
1757 return (offs >= -128 && offs <= 127 - mode_adjust);
1758
1759 default:
1760 if (IS_PSEUDO (patternr[1], strict))
1761 return 1;
1762 return 0;
1763 }
1764 }
1765 if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1766 {
1767 rtx reg = patternr[1];
1768
1769 /* We don't know where the symbol is, so only allow base
1770 registers which support displacements spanning the whole
1771 address range. */
1772 switch (REGNO (reg))
1773 {
1774 case A0_REGNO:
1775 case A1_REGNO:
1776 /* $sb needs a secondary reload, but since it's involved in
1777 memory address reloads too, we don't deal with it very
1778 well. */
1779 /* case SB_REGNO: */
1780 return 1;
1781 default:
45d898e4
DD
1782 if (GET_CODE (reg) == SUBREG)
1783 return 0;
38b2d076
DD
1784 if (IS_PSEUDO (reg, strict))
1785 return 1;
1786 return 0;
1787 }
1788 }
1789 return 0;
1790}
1791
1792/* Implements REG_OK_FOR_BASE_P. */
1793int
1794m32c_reg_ok_for_base_p (rtx x, int strict)
1795{
1796 if (GET_CODE (x) != REG)
1797 return 0;
1798 switch (REGNO (x))
1799 {
1800 case A0_REGNO:
1801 case A1_REGNO:
1802 case SB_REGNO:
1803 case FB_REGNO:
1804 case SP_REGNO:
1805 return 1;
1806 default:
1807 if (IS_PSEUDO (x, strict))
1808 return 1;
1809 return 0;
1810 }
1811}
1812
04aff2c0 1813/* We have three choices for choosing fb->aN offsets. If we choose -128,
85f65093 1814 we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
04aff2c0
DD
1815 like this:
1816 EB 4B FF mova -128[$fb],$a0
1817 D8 0C FF FF mov.w:Q #0,-1[$a0]
1818
85f65093 1819 Alternately, we subtract the frame size, and hopefully use 8-bit aN
04aff2c0
DD
1820 displacements:
1821 7B F4 stc $fb,$a0
1822 77 54 00 01 sub #256,$a0
1823 D8 08 01 mov.w:Q #0,1[$a0]
1824
1825 If we don't offset (i.e. offset by zero), we end up with:
1826 7B F4 stc $fb,$a0
1827 D8 0C 00 FF mov.w:Q #0,-256[$a0]
1828
1829 We have to subtract *something* so that we have a PLUS rtx to mark
1830 that we've done this reload. The -128 offset will never result in
85f65093 1831 an 8-bit aN offset, and the payoff for the second case is five
04aff2c0
DD
1832 loads *if* those loads are within 256 bytes of the other end of the
1833 frame, so the third case seems best. Note that we subtract the
1834 zero, but detect that in the addhi3 pattern. */
1835
ea471af0
JM
1836#define BIG_FB_ADJ 0
1837
38b2d076
DD
1838/* Implements LEGITIMIZE_ADDRESS. The only address we really have to
1839 worry about is frame base offsets, as $fb has a limited
1840 displacement range. We deal with this by attempting to reload $fb
1841 itself into an address register; that seems to result in the best
1842 code. */
506d7b68
PB
1843#undef TARGET_LEGITIMIZE_ADDRESS
1844#define TARGET_LEGITIMIZE_ADDRESS m32c_legitimize_address
1845static rtx
1846m32c_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
ef4bddc2 1847 machine_mode mode)
38b2d076
DD
1848{
1849#if DEBUG0
1850 fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
506d7b68 1851 debug_rtx (x);
38b2d076
DD
1852 fprintf (stderr, "\n");
1853#endif
1854
506d7b68
PB
1855 if (GET_CODE (x) == PLUS
1856 && GET_CODE (XEXP (x, 0)) == REG
1857 && REGNO (XEXP (x, 0)) == FB_REGNO
1858 && GET_CODE (XEXP (x, 1)) == CONST_INT
1859 && (INTVAL (XEXP (x, 1)) < -128
1860 || INTVAL (XEXP (x, 1)) > (128 - GET_MODE_SIZE (mode))))
38b2d076
DD
1861 {
1862 /* reload FB to A_REGS */
38b2d076 1863 rtx temp = gen_reg_rtx (Pmode);
506d7b68 1864 x = copy_rtx (x);
f7df4a84 1865 emit_insn (gen_rtx_SET (temp, XEXP (x, 0)));
506d7b68 1866 XEXP (x, 0) = temp;
38b2d076
DD
1867 }
1868
506d7b68 1869 return x;
38b2d076
DD
1870}
1871
1872/* Implements LEGITIMIZE_RELOAD_ADDRESS. See comment above. */
1873int
1874m32c_legitimize_reload_address (rtx * x,
ef4bddc2 1875 machine_mode mode,
38b2d076
DD
1876 int opnum,
1877 int type, int ind_levels ATTRIBUTE_UNUSED)
1878{
1879#if DEBUG0
1880 fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
1881 mode_name[mode]);
1882 debug_rtx (*x);
1883#endif
1884
1885 /* At one point, this function tried to get $fb copied to an address
1886 register, which in theory would maximize sharing, but gcc was
1887 *also* still trying to reload the whole address, and we'd run out
1888 of address registers. So we let gcc do the naive (but safe)
1889 reload instead, when the above function doesn't handle it for
04aff2c0
DD
1890 us.
1891
1892 The code below is a second attempt at the above. */
1893
1894 if (GET_CODE (*x) == PLUS
1895 && GET_CODE (XEXP (*x, 0)) == REG
1896 && REGNO (XEXP (*x, 0)) == FB_REGNO
1897 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1898 && (INTVAL (XEXP (*x, 1)) < -128
1899 || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
1900 {
1901 rtx sum;
1902 int offset = INTVAL (XEXP (*x, 1));
1903 int adjustment = -BIG_FB_ADJ;
1904
1905 sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
1906 GEN_INT (adjustment));
1907 *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
1908 if (type == RELOAD_OTHER)
1909 type = RELOAD_FOR_OTHER_ADDRESS;
1910 push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
1911 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
444d6efe 1912 (enum reload_type) type);
04aff2c0
DD
1913 return 1;
1914 }
1915
1916 if (GET_CODE (*x) == PLUS
1917 && GET_CODE (XEXP (*x, 0)) == PLUS
1918 && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
1919 && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
1920 && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
1921 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1922 )
1923 {
1924 if (type == RELOAD_OTHER)
1925 type = RELOAD_FOR_OTHER_ADDRESS;
1926 push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
1927 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
444d6efe 1928 (enum reload_type) type);
f75e07bc
BE
1929 return 1;
1930 }
1931
1932 if (TARGET_A24 && GET_MODE (*x) == PSImode)
1933 {
1934 push_reload (*x, NULL_RTX, x, NULL,
1935 A_REGS, PSImode, VOIDmode, 0, 0, opnum,
1936 (enum reload_type) type);
04aff2c0
DD
1937 return 1;
1938 }
38b2d076
DD
1939
1940 return 0;
1941}
1942
5fd5d713
DD
1943/* Return the appropriate mode for a named address pointer. */
1944#undef TARGET_ADDR_SPACE_POINTER_MODE
1945#define TARGET_ADDR_SPACE_POINTER_MODE m32c_addr_space_pointer_mode
ef4bddc2 1946static machine_mode
5fd5d713
DD
1947m32c_addr_space_pointer_mode (addr_space_t addrspace)
1948{
1949 switch (addrspace)
1950 {
1951 case ADDR_SPACE_GENERIC:
1952 return TARGET_A24 ? PSImode : HImode;
1953 case ADDR_SPACE_FAR:
1954 return SImode;
1955 default:
1956 gcc_unreachable ();
1957 }
1958}
1959
1960/* Return the appropriate mode for a named address address. */
1961#undef TARGET_ADDR_SPACE_ADDRESS_MODE
1962#define TARGET_ADDR_SPACE_ADDRESS_MODE m32c_addr_space_address_mode
ef4bddc2 1963static machine_mode
5fd5d713
DD
1964m32c_addr_space_address_mode (addr_space_t addrspace)
1965{
1966 switch (addrspace)
1967 {
1968 case ADDR_SPACE_GENERIC:
1969 return TARGET_A24 ? PSImode : HImode;
1970 case ADDR_SPACE_FAR:
1971 return SImode;
1972 default:
1973 gcc_unreachable ();
1974 }
1975}
1976
1977/* Like m32c_legitimate_address_p, except with named addresses. */
1978#undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
1979#define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P \
1980 m32c_addr_space_legitimate_address_p
1981static bool
ef4bddc2 1982m32c_addr_space_legitimate_address_p (machine_mode mode, rtx x,
5fd5d713
DD
1983 bool strict, addr_space_t as)
1984{
1985 if (as == ADDR_SPACE_FAR)
1986 {
1987 if (TARGET_A24)
1988 return 0;
1989 encode_pattern (x);
1990 if (RTX_IS ("r"))
1991 {
1992 if (GET_MODE (x) != SImode)
1993 return 0;
1994 switch (REGNO (patternr[0]))
1995 {
1996 case A0_REGNO:
1997 return 1;
1998
1999 default:
2000 if (IS_PSEUDO (patternr[0], strict))
2001 return 1;
2002 return 0;
2003 }
2004 }
2005 if (RTX_IS ("+^Sri"))
2006 {
2007 int rn = REGNO (patternr[3]);
2008 HOST_WIDE_INT offs = INTVAL (patternr[4]);
2009 if (GET_MODE (patternr[3]) != HImode)
2010 return 0;
2011 switch (rn)
2012 {
2013 case A0_REGNO:
2014 return (offs >= 0 && offs <= 0xfffff);
2015
2016 default:
2017 if (IS_PSEUDO (patternr[3], strict))
2018 return 1;
2019 return 0;
2020 }
2021 }
2022 if (RTX_IS ("+^Srs"))
2023 {
2024 int rn = REGNO (patternr[3]);
2025 if (GET_MODE (patternr[3]) != HImode)
2026 return 0;
2027 switch (rn)
2028 {
2029 case A0_REGNO:
2030 return 1;
2031
2032 default:
2033 if (IS_PSEUDO (patternr[3], strict))
2034 return 1;
2035 return 0;
2036 }
2037 }
2038 if (RTX_IS ("+^S+ris"))
2039 {
2040 int rn = REGNO (patternr[4]);
2041 if (GET_MODE (patternr[4]) != HImode)
2042 return 0;
2043 switch (rn)
2044 {
2045 case A0_REGNO:
2046 return 1;
2047
2048 default:
2049 if (IS_PSEUDO (patternr[4], strict))
2050 return 1;
2051 return 0;
2052 }
2053 }
2054 if (RTX_IS ("s"))
2055 {
2056 return 1;
2057 }
2058 return 0;
2059 }
2060
2061 else if (as != ADDR_SPACE_GENERIC)
2062 gcc_unreachable ();
2063
2064 return m32c_legitimate_address_p (mode, x, strict);
2065}
2066
2067/* Like m32c_legitimate_address, except with named address support. */
2068#undef TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS
2069#define TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS m32c_addr_space_legitimize_address
2070static rtx
ef4bddc2 2071m32c_addr_space_legitimize_address (rtx x, rtx oldx, machine_mode mode,
5fd5d713
DD
2072 addr_space_t as)
2073{
2074 if (as != ADDR_SPACE_GENERIC)
2075 {
2076#if DEBUG0
2077 fprintf (stderr, "\033[36mm32c_addr_space_legitimize_address for mode %s\033[0m\n", mode_name[mode]);
2078 debug_rtx (x);
2079 fprintf (stderr, "\n");
2080#endif
2081
2082 if (GET_CODE (x) != REG)
2083 {
2084 x = force_reg (SImode, x);
2085 }
2086 return x;
2087 }
2088
2089 return m32c_legitimize_address (x, oldx, mode);
2090}
2091
2092/* Determine if one named address space is a subset of another. */
2093#undef TARGET_ADDR_SPACE_SUBSET_P
2094#define TARGET_ADDR_SPACE_SUBSET_P m32c_addr_space_subset_p
2095static bool
2096m32c_addr_space_subset_p (addr_space_t subset, addr_space_t superset)
2097{
2098 gcc_assert (subset == ADDR_SPACE_GENERIC || subset == ADDR_SPACE_FAR);
2099 gcc_assert (superset == ADDR_SPACE_GENERIC || superset == ADDR_SPACE_FAR);
2100
2101 if (subset == superset)
2102 return true;
2103
2104 else
2105 return (subset == ADDR_SPACE_GENERIC && superset == ADDR_SPACE_FAR);
2106}
2107
2108#undef TARGET_ADDR_SPACE_CONVERT
2109#define TARGET_ADDR_SPACE_CONVERT m32c_addr_space_convert
2110/* Convert from one address space to another. */
2111static rtx
2112m32c_addr_space_convert (rtx op, tree from_type, tree to_type)
2113{
2114 addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (from_type));
2115 addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (to_type));
2116 rtx result;
2117
2118 gcc_assert (from_as == ADDR_SPACE_GENERIC || from_as == ADDR_SPACE_FAR);
2119 gcc_assert (to_as == ADDR_SPACE_GENERIC || to_as == ADDR_SPACE_FAR);
2120
2121 if (to_as == ADDR_SPACE_GENERIC && from_as == ADDR_SPACE_FAR)
2122 {
2123 /* This is unpredictable, as we're truncating off usable address
2124 bits. */
2125
2126 result = gen_reg_rtx (HImode);
2127 emit_move_insn (result, simplify_subreg (HImode, op, SImode, 0));
2128 return result;
2129 }
2130 else if (to_as == ADDR_SPACE_FAR && from_as == ADDR_SPACE_GENERIC)
2131 {
2132 /* This always works. */
2133 result = gen_reg_rtx (SImode);
2134 emit_insn (gen_zero_extendhisi2 (result, op));
2135 return result;
2136 }
2137 else
2138 gcc_unreachable ();
2139}
2140
38b2d076
DD
2141/* Condition Code Status */
2142
2143#undef TARGET_FIXED_CONDITION_CODE_REGS
2144#define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2145static bool
2146m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2147{
2148 *p1 = FLG_REGNO;
2149 *p2 = INVALID_REGNUM;
2150 return true;
2151}
2152
2153/* Describing Relative Costs of Operations */
2154
0e607518 2155/* Implements TARGET_REGISTER_MOVE_COST. We make impossible moves
38b2d076
DD
2156 prohibitively expensive, like trying to put QIs in r2/r3 (there are
2157 no opcodes to do that). We also discourage use of mem* registers
2158 since they're really memory. */
0e607518
AS
2159
2160#undef TARGET_REGISTER_MOVE_COST
2161#define TARGET_REGISTER_MOVE_COST m32c_register_move_cost
2162
2163static int
ef4bddc2 2164m32c_register_move_cost (machine_mode mode, reg_class_t from,
0e607518 2165 reg_class_t to)
38b2d076
DD
2166{
2167 int cost = COSTS_N_INSNS (3);
0e607518
AS
2168 HARD_REG_SET cc;
2169
2170/* FIXME: pick real values, but not 2 for now. */
2171 COPY_HARD_REG_SET (cc, reg_class_contents[(int) from]);
2172 IOR_HARD_REG_SET (cc, reg_class_contents[(int) to]);
2173
2174 if (mode == QImode
2175 && hard_reg_set_intersect_p (cc, reg_class_contents[R23_REGS]))
38b2d076 2176 {
0e607518 2177 if (hard_reg_set_subset_p (cc, reg_class_contents[R23_REGS]))
38b2d076
DD
2178 cost = COSTS_N_INSNS (1000);
2179 else
2180 cost = COSTS_N_INSNS (80);
2181 }
2182
2183 if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2184 cost = COSTS_N_INSNS (1000);
2185
0e607518 2186 if (reg_classes_intersect_p (from, CR_REGS))
38b2d076
DD
2187 cost += COSTS_N_INSNS (5);
2188
0e607518 2189 if (reg_classes_intersect_p (to, CR_REGS))
38b2d076
DD
2190 cost += COSTS_N_INSNS (5);
2191
2192 if (from == MEM_REGS || to == MEM_REGS)
2193 cost += COSTS_N_INSNS (50);
0e607518
AS
2194 else if (reg_classes_intersect_p (from, MEM_REGS)
2195 || reg_classes_intersect_p (to, MEM_REGS))
38b2d076
DD
2196 cost += COSTS_N_INSNS (10);
2197
2198#if DEBUG0
2199 fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
0e607518
AS
2200 mode_name[mode], class_names[(int) from], class_names[(int) to],
2201 cost);
38b2d076
DD
2202#endif
2203 return cost;
2204}
2205
0e607518
AS
2206/* Implements TARGET_MEMORY_MOVE_COST. */
2207
2208#undef TARGET_MEMORY_MOVE_COST
2209#define TARGET_MEMORY_MOVE_COST m32c_memory_move_cost
2210
2211static int
ef4bddc2 2212m32c_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
0e607518
AS
2213 reg_class_t rclass ATTRIBUTE_UNUSED,
2214 bool in ATTRIBUTE_UNUSED)
38b2d076
DD
2215{
2216 /* FIXME: pick real values. */
2217 return COSTS_N_INSNS (10);
2218}
2219
07127a0a
DD
2220/* Here we try to describe when we use multiple opcodes for one RTX so
2221 that gcc knows when to use them. */
2222#undef TARGET_RTX_COSTS
2223#define TARGET_RTX_COSTS m32c_rtx_costs
2224static bool
e548c9df
AM
2225m32c_rtx_costs (rtx x, machine_mode mode, int outer_code,
2226 int opno ATTRIBUTE_UNUSED,
68f932c4 2227 int *total, bool speed ATTRIBUTE_UNUSED)
07127a0a 2228{
e548c9df 2229 int code = GET_CODE (x);
07127a0a
DD
2230 switch (code)
2231 {
2232 case REG:
2233 if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2234 *total += COSTS_N_INSNS (500);
2235 else
2236 *total += COSTS_N_INSNS (1);
2237 return true;
2238
2239 case ASHIFT:
2240 case LSHIFTRT:
2241 case ASHIFTRT:
2242 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2243 {
2244 /* mov.b r1l, r1h */
2245 *total += COSTS_N_INSNS (1);
2246 return true;
2247 }
2248 if (INTVAL (XEXP (x, 1)) > 8
2249 || INTVAL (XEXP (x, 1)) < -8)
2250 {
2251 /* mov.b #N, r1l */
2252 /* mov.b r1l, r1h */
2253 *total += COSTS_N_INSNS (2);
2254 return true;
2255 }
2256 return true;
2257
2258 case LE:
2259 case LEU:
2260 case LT:
2261 case LTU:
2262 case GT:
2263 case GTU:
2264 case GE:
2265 case GEU:
2266 case NE:
2267 case EQ:
2268 if (outer_code == SET)
2269 {
2270 *total += COSTS_N_INSNS (2);
2271 return true;
2272 }
2273 break;
2274
2275 case ZERO_EXTRACT:
2276 {
2277 rtx dest = XEXP (x, 0);
2278 rtx addr = XEXP (dest, 0);
2279 switch (GET_CODE (addr))
2280 {
2281 case CONST_INT:
2282 *total += COSTS_N_INSNS (1);
2283 break;
2284 case SYMBOL_REF:
2285 *total += COSTS_N_INSNS (3);
2286 break;
2287 default:
2288 *total += COSTS_N_INSNS (2);
2289 break;
2290 }
2291 return true;
2292 }
2293 break;
2294
2295 default:
2296 /* Reasonable default. */
e548c9df 2297 if (TARGET_A16 && mode == SImode)
07127a0a
DD
2298 *total += COSTS_N_INSNS (2);
2299 break;
2300 }
2301 return false;
2302}
2303
2304#undef TARGET_ADDRESS_COST
2305#define TARGET_ADDRESS_COST m32c_address_cost
2306static int
ef4bddc2 2307m32c_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
b413068c
OE
2308 addr_space_t as ATTRIBUTE_UNUSED,
2309 bool speed ATTRIBUTE_UNUSED)
07127a0a 2310{
80b093df 2311 int i;
07127a0a
DD
2312 /* fprintf(stderr, "\naddress_cost\n");
2313 debug_rtx(addr);*/
2314 switch (GET_CODE (addr))
2315 {
2316 case CONST_INT:
80b093df
DD
2317 i = INTVAL (addr);
2318 if (i == 0)
2319 return COSTS_N_INSNS(1);
2320 if (0 < i && i <= 255)
2321 return COSTS_N_INSNS(2);
2322 if (0 < i && i <= 65535)
2323 return COSTS_N_INSNS(3);
2324 return COSTS_N_INSNS(4);
07127a0a 2325 case SYMBOL_REF:
80b093df 2326 return COSTS_N_INSNS(4);
07127a0a 2327 case REG:
80b093df
DD
2328 return COSTS_N_INSNS(1);
2329 case PLUS:
2330 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
2331 {
2332 i = INTVAL (XEXP (addr, 1));
2333 if (i == 0)
2334 return COSTS_N_INSNS(1);
2335 if (0 < i && i <= 255)
2336 return COSTS_N_INSNS(2);
2337 if (0 < i && i <= 65535)
2338 return COSTS_N_INSNS(3);
2339 }
2340 return COSTS_N_INSNS(4);
07127a0a
DD
2341 default:
2342 return 0;
2343 }
2344}
2345
38b2d076
DD
2346/* Defining the Output Assembler Language */
2347
38b2d076
DD
2348/* Output of Data */
2349
2350/* We may have 24 bit sizes, which is the native address size.
2351 Currently unused, but provided for completeness. */
2352#undef TARGET_ASM_INTEGER
2353#define TARGET_ASM_INTEGER m32c_asm_integer
2354static bool
2355m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2356{
2357 switch (size)
2358 {
2359 case 3:
2360 fprintf (asm_out_file, "\t.3byte\t");
2361 output_addr_const (asm_out_file, x);
2362 fputc ('\n', asm_out_file);
2363 return true;
e9555b13
DD
2364 case 4:
2365 if (GET_CODE (x) == SYMBOL_REF)
2366 {
2367 fprintf (asm_out_file, "\t.long\t");
2368 output_addr_const (asm_out_file, x);
2369 fputc ('\n', asm_out_file);
2370 return true;
2371 }
2372 break;
38b2d076
DD
2373 }
2374 return default_assemble_integer (x, size, aligned_p);
2375}
2376
2377/* Output of Assembler Instructions */
2378
a4174ebf 2379/* We use a lookup table because the addressing modes are non-orthogonal. */
38b2d076
DD
2380
2381static struct
2382{
2383 char code;
2384 char const *pattern;
2385 char const *format;
2386}
2387const conversions[] = {
2388 { 0, "r", "0" },
2389
2390 { 0, "mr", "z[1]" },
2391 { 0, "m+ri", "3[2]" },
2392 { 0, "m+rs", "3[2]" },
5fd5d713
DD
2393 { 0, "m+^Zrs", "5[4]" },
2394 { 0, "m+^Zri", "5[4]" },
2395 { 0, "m+^Z+ris", "7+6[5]" },
2396 { 0, "m+^Srs", "5[4]" },
2397 { 0, "m+^Sri", "5[4]" },
2398 { 0, "m+^S+ris", "7+6[5]" },
38b2d076
DD
2399 { 0, "m+r+si", "4+5[2]" },
2400 { 0, "ms", "1" },
2401 { 0, "mi", "1" },
2402 { 0, "m+si", "2+3" },
2403
2404 { 0, "mmr", "[z[2]]" },
2405 { 0, "mm+ri", "[4[3]]" },
2406 { 0, "mm+rs", "[4[3]]" },
2407 { 0, "mm+r+si", "[5+6[3]]" },
2408 { 0, "mms", "[[2]]" },
2409 { 0, "mmi", "[[2]]" },
2410 { 0, "mm+si", "[4[3]]" },
2411
2412 { 0, "i", "#0" },
2413 { 0, "s", "#0" },
2414 { 0, "+si", "#1+2" },
2415 { 0, "l", "#0" },
2416
2417 { 'l', "l", "0" },
2418 { 'd', "i", "0" },
2419 { 'd', "s", "0" },
2420 { 'd', "+si", "1+2" },
2421 { 'D', "i", "0" },
2422 { 'D', "s", "0" },
2423 { 'D', "+si", "1+2" },
2424 { 'x', "i", "#0" },
2425 { 'X', "i", "#0" },
2426 { 'm', "i", "#0" },
2427 { 'b', "i", "#0" },
07127a0a 2428 { 'B', "i", "0" },
38b2d076
DD
2429 { 'p', "i", "0" },
2430
2431 { 0, 0, 0 }
2432};
2433
2434/* This is in order according to the bitfield that pushm/popm use. */
2435static char const *pushm_regs[] = {
2436 "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2437};
2438
4645179e
AS
2439/* Implements TARGET_PRINT_OPERAND. */
2440
2441#undef TARGET_PRINT_OPERAND
2442#define TARGET_PRINT_OPERAND m32c_print_operand
2443
2444static void
38b2d076
DD
2445m32c_print_operand (FILE * file, rtx x, int code)
2446{
2447 int i, j, b;
2448 const char *comma;
2449 HOST_WIDE_INT ival;
2450 int unsigned_const = 0;
ff485e71 2451 int force_sign;
38b2d076
DD
2452
2453 /* Multiplies; constants are converted to sign-extended format but
2454 we need unsigned, so 'u' and 'U' tell us what size unsigned we
2455 need. */
2456 if (code == 'u')
2457 {
2458 unsigned_const = 2;
2459 code = 0;
2460 }
2461 if (code == 'U')
2462 {
2463 unsigned_const = 1;
2464 code = 0;
2465 }
2466 /* This one is only for debugging; you can put it in a pattern to
2467 force this error. */
2468 if (code == '!')
2469 {
2470 fprintf (stderr, "dj: unreviewed pattern:");
2471 if (current_output_insn)
2472 debug_rtx (current_output_insn);
2473 gcc_unreachable ();
2474 }
2475 /* PSImode operations are either .w or .l depending on the target. */
2476 if (code == '&')
2477 {
2478 if (TARGET_A16)
2479 fprintf (file, "w");
2480 else
2481 fprintf (file, "l");
2482 return;
2483 }
2484 /* Inverted conditionals. */
2485 if (code == 'C')
2486 {
2487 switch (GET_CODE (x))
2488 {
2489 case LE:
2490 fputs ("gt", file);
2491 break;
2492 case LEU:
2493 fputs ("gtu", file);
2494 break;
2495 case LT:
2496 fputs ("ge", file);
2497 break;
2498 case LTU:
2499 fputs ("geu", file);
2500 break;
2501 case GT:
2502 fputs ("le", file);
2503 break;
2504 case GTU:
2505 fputs ("leu", file);
2506 break;
2507 case GE:
2508 fputs ("lt", file);
2509 break;
2510 case GEU:
2511 fputs ("ltu", file);
2512 break;
2513 case NE:
2514 fputs ("eq", file);
2515 break;
2516 case EQ:
2517 fputs ("ne", file);
2518 break;
2519 default:
2520 gcc_unreachable ();
2521 }
2522 return;
2523 }
2524 /* Regular conditionals. */
2525 if (code == 'c')
2526 {
2527 switch (GET_CODE (x))
2528 {
2529 case LE:
2530 fputs ("le", file);
2531 break;
2532 case LEU:
2533 fputs ("leu", file);
2534 break;
2535 case LT:
2536 fputs ("lt", file);
2537 break;
2538 case LTU:
2539 fputs ("ltu", file);
2540 break;
2541 case GT:
2542 fputs ("gt", file);
2543 break;
2544 case GTU:
2545 fputs ("gtu", file);
2546 break;
2547 case GE:
2548 fputs ("ge", file);
2549 break;
2550 case GEU:
2551 fputs ("geu", file);
2552 break;
2553 case NE:
2554 fputs ("ne", file);
2555 break;
2556 case EQ:
2557 fputs ("eq", file);
2558 break;
2559 default:
2560 gcc_unreachable ();
2561 }
2562 return;
2563 }
2564 /* Used in negsi2 to do HImode ops on the two parts of an SImode
2565 operand. */
2566 if (code == 'h' && GET_MODE (x) == SImode)
2567 {
2568 x = m32c_subreg (HImode, x, SImode, 0);
2569 code = 0;
2570 }
2571 if (code == 'H' && GET_MODE (x) == SImode)
2572 {
2573 x = m32c_subreg (HImode, x, SImode, 2);
2574 code = 0;
2575 }
07127a0a
DD
2576 if (code == 'h' && GET_MODE (x) == HImode)
2577 {
2578 x = m32c_subreg (QImode, x, HImode, 0);
2579 code = 0;
2580 }
2581 if (code == 'H' && GET_MODE (x) == HImode)
2582 {
2583 /* We can't actually represent this as an rtx. Do it here. */
2584 if (GET_CODE (x) == REG)
2585 {
2586 switch (REGNO (x))
2587 {
2588 case R0_REGNO:
2589 fputs ("r0h", file);
2590 return;
2591 case R1_REGNO:
2592 fputs ("r1h", file);
2593 return;
2594 default:
2595 gcc_unreachable();
2596 }
2597 }
2598 /* This should be a MEM. */
2599 x = m32c_subreg (QImode, x, HImode, 1);
2600 code = 0;
2601 }
2602 /* This is for BMcond, which always wants word register names. */
2603 if (code == 'h' && GET_MODE (x) == QImode)
2604 {
2605 if (GET_CODE (x) == REG)
2606 x = gen_rtx_REG (HImode, REGNO (x));
2607 code = 0;
2608 }
38b2d076
DD
2609 /* 'x' and 'X' need to be ignored for non-immediates. */
2610 if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2611 code = 0;
2612
2613 encode_pattern (x);
ff485e71 2614 force_sign = 0;
38b2d076
DD
2615 for (i = 0; conversions[i].pattern; i++)
2616 if (conversions[i].code == code
2617 && streq (conversions[i].pattern, pattern))
2618 {
2619 for (j = 0; conversions[i].format[j]; j++)
2620 /* backslash quotes the next character in the output pattern. */
2621 if (conversions[i].format[j] == '\\')
2622 {
2623 fputc (conversions[i].format[j + 1], file);
2624 j++;
2625 }
2626 /* Digits in the output pattern indicate that the
2627 corresponding RTX is to be output at that point. */
2628 else if (ISDIGIT (conversions[i].format[j]))
2629 {
2630 rtx r = patternr[conversions[i].format[j] - '0'];
2631 switch (GET_CODE (r))
2632 {
2633 case REG:
2634 fprintf (file, "%s",
2635 reg_name_with_mode (REGNO (r), GET_MODE (r)));
2636 break;
2637 case CONST_INT:
2638 switch (code)
2639 {
2640 case 'b':
07127a0a
DD
2641 case 'B':
2642 {
2643 int v = INTVAL (r);
2644 int i = (int) exact_log2 (v);
2645 if (i == -1)
2646 i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2647 if (i == -1)
2648 i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2649 /* Bit position. */
2650 fprintf (file, "%d", i);
2651 }
38b2d076
DD
2652 break;
2653 case 'x':
2654 /* Unsigned byte. */
2655 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2656 INTVAL (r) & 0xff);
2657 break;
2658 case 'X':
2659 /* Unsigned word. */
2660 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2661 INTVAL (r) & 0xffff);
2662 break;
2663 case 'p':
2664 /* pushm and popm encode a register set into a single byte. */
2665 comma = "";
2666 for (b = 7; b >= 0; b--)
2667 if (INTVAL (r) & (1 << b))
2668 {
2669 fprintf (file, "%s%s", comma, pushm_regs[b]);
2670 comma = ",";
2671 }
2672 break;
2673 case 'm':
2674 /* "Minus". Output -X */
2675 ival = (-INTVAL (r) & 0xffff);
2676 if (ival & 0x8000)
2677 ival = ival - 0x10000;
2678 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2679 break;
2680 default:
2681 ival = INTVAL (r);
2682 if (conversions[i].format[j + 1] == '[' && ival < 0)
2683 {
2684 /* We can simulate negative displacements by
2685 taking advantage of address space
2686 wrapping when the offset can span the
2687 entire address range. */
2688 rtx base =
2689 patternr[conversions[i].format[j + 2] - '0'];
2690 if (GET_CODE (base) == REG)
2691 switch (REGNO (base))
2692 {
2693 case A0_REGNO:
2694 case A1_REGNO:
2695 if (TARGET_A24)
2696 ival = 0x1000000 + ival;
2697 else
2698 ival = 0x10000 + ival;
2699 break;
2700 case SB_REGNO:
2701 if (TARGET_A16)
2702 ival = 0x10000 + ival;
2703 break;
2704 }
2705 }
2706 else if (code == 'd' && ival < 0 && j == 0)
2707 /* The "mova" opcode is used to do addition by
2708 computing displacements, but again, we need
2709 displacements to be unsigned *if* they're
2710 the only component of the displacement
2711 (i.e. no "symbol-4" type displacement). */
2712 ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2713
2714 if (conversions[i].format[j] == '0')
2715 {
2716 /* More conversions to unsigned. */
2717 if (unsigned_const == 2)
2718 ival &= 0xffff;
2719 if (unsigned_const == 1)
2720 ival &= 0xff;
2721 }
2722 if (streq (conversions[i].pattern, "mi")
2723 || streq (conversions[i].pattern, "mmi"))
2724 {
2725 /* Integers used as addresses are unsigned. */
2726 ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2727 }
ff485e71
DD
2728 if (force_sign && ival >= 0)
2729 fputc ('+', file);
38b2d076
DD
2730 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2731 break;
2732 }
2733 break;
2734 case CONST_DOUBLE:
2735 /* We don't have const_double constants. If it
2736 happens, make it obvious. */
2737 fprintf (file, "[const_double 0x%lx]",
2738 (unsigned long) CONST_DOUBLE_HIGH (r));
2739 break;
2740 case SYMBOL_REF:
2741 assemble_name (file, XSTR (r, 0));
2742 break;
2743 case LABEL_REF:
2744 output_asm_label (r);
2745 break;
2746 default:
2747 fprintf (stderr, "don't know how to print this operand:");
2748 debug_rtx (r);
2749 gcc_unreachable ();
2750 }
2751 }
2752 else
2753 {
2754 if (conversions[i].format[j] == 'z')
2755 {
2756 /* Some addressing modes *must* have a displacement,
2757 so insert a zero here if needed. */
2758 int k;
2759 for (k = j + 1; conversions[i].format[k]; k++)
2760 if (ISDIGIT (conversions[i].format[k]))
2761 {
2762 rtx reg = patternr[conversions[i].format[k] - '0'];
2763 if (GET_CODE (reg) == REG
2764 && (REGNO (reg) == SB_REGNO
2765 || REGNO (reg) == FB_REGNO
2766 || REGNO (reg) == SP_REGNO))
2767 fputc ('0', file);
2768 }
2769 continue;
2770 }
2771 /* Signed displacements off symbols need to have signs
2772 blended cleanly. */
2773 if (conversions[i].format[j] == '+'
ff485e71 2774 && (!code || code == 'D' || code == 'd')
38b2d076 2775 && ISDIGIT (conversions[i].format[j + 1])
ff485e71
DD
2776 && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2777 == CONST_INT))
2778 {
2779 force_sign = 1;
2780 continue;
2781 }
38b2d076
DD
2782 fputc (conversions[i].format[j], file);
2783 }
2784 break;
2785 }
2786 if (!conversions[i].pattern)
2787 {
2788 fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
2789 pattern);
2790 debug_rtx (x);
2791 fprintf (file, "[%c.%s]", code ? code : '-', pattern);
2792 }
2793
2794 return;
2795}
2796
4645179e
AS
2797/* Implements TARGET_PRINT_OPERAND_PUNCT_VALID_P.
2798
2799 See m32c_print_operand above for descriptions of what these do. */
2800
2801#undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
2802#define TARGET_PRINT_OPERAND_PUNCT_VALID_P m32c_print_operand_punct_valid_p
2803
2804static bool
2805m32c_print_operand_punct_valid_p (unsigned char c)
38b2d076
DD
2806{
2807 if (c == '&' || c == '!')
4645179e
AS
2808 return true;
2809
2810 return false;
38b2d076
DD
2811}
2812
4645179e
AS
2813/* Implements TARGET_PRINT_OPERAND_ADDRESS. Nothing unusual here. */
2814
2815#undef TARGET_PRINT_OPERAND_ADDRESS
2816#define TARGET_PRINT_OPERAND_ADDRESS m32c_print_operand_address
2817
2818static void
38b2d076
DD
2819m32c_print_operand_address (FILE * stream, rtx address)
2820{
235e1fe8
NC
2821 if (GET_CODE (address) == MEM)
2822 address = XEXP (address, 0);
2823 else
2824 /* cf: gcc.dg/asm-4.c. */
2825 gcc_assert (GET_CODE (address) == REG);
2826
2827 m32c_print_operand (stream, address, 0);
38b2d076
DD
2828}
2829
2830/* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
2831 differently than general registers. */
2832void
2833m32c_output_reg_push (FILE * s, int regno)
2834{
2835 if (regno == FLG_REGNO)
2836 fprintf (s, "\tpushc\tflg\n");
2837 else
04aff2c0 2838 fprintf (s, "\tpush.%c\t%s\n",
38b2d076
DD
2839 " bwll"[reg_push_size (regno)], reg_names[regno]);
2840}
2841
2842/* Likewise for ASM_OUTPUT_REG_POP. */
2843void
2844m32c_output_reg_pop (FILE * s, int regno)
2845{
2846 if (regno == FLG_REGNO)
2847 fprintf (s, "\tpopc\tflg\n");
2848 else
04aff2c0 2849 fprintf (s, "\tpop.%c\t%s\n",
38b2d076
DD
2850 " bwll"[reg_push_size (regno)], reg_names[regno]);
2851}
2852
2853/* Defining target-specific uses of `__attribute__' */
2854
2855/* Used to simplify the logic below. Find the attributes wherever
2856 they may be. */
2857#define M32C_ATTRIBUTES(decl) \
2858 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2859 : DECL_ATTRIBUTES (decl) \
2860 ? (DECL_ATTRIBUTES (decl)) \
2861 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2862
2863/* Returns TRUE if the given tree has the "interrupt" attribute. */
2864static int
2865interrupt_p (tree node ATTRIBUTE_UNUSED)
2866{
2867 tree list = M32C_ATTRIBUTES (node);
2868 while (list)
2869 {
2870 if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
2871 return 1;
2872 list = TREE_CHAIN (list);
2873 }
65655f79
DD
2874 return fast_interrupt_p (node);
2875}
2876
2877/* Returns TRUE if the given tree has the "bank_switch" attribute. */
2878static int
2879bank_switch_p (tree node ATTRIBUTE_UNUSED)
2880{
2881 tree list = M32C_ATTRIBUTES (node);
2882 while (list)
2883 {
2884 if (is_attribute_p ("bank_switch", TREE_PURPOSE (list)))
2885 return 1;
2886 list = TREE_CHAIN (list);
2887 }
2888 return 0;
2889}
2890
2891/* Returns TRUE if the given tree has the "fast_interrupt" attribute. */
2892static int
2893fast_interrupt_p (tree node ATTRIBUTE_UNUSED)
2894{
2895 tree list = M32C_ATTRIBUTES (node);
2896 while (list)
2897 {
2898 if (is_attribute_p ("fast_interrupt", TREE_PURPOSE (list)))
2899 return 1;
2900 list = TREE_CHAIN (list);
2901 }
38b2d076
DD
2902 return 0;
2903}
2904
2905static tree
2906interrupt_handler (tree * node ATTRIBUTE_UNUSED,
2907 tree name ATTRIBUTE_UNUSED,
2908 tree args ATTRIBUTE_UNUSED,
2909 int flags ATTRIBUTE_UNUSED,
2910 bool * no_add_attrs ATTRIBUTE_UNUSED)
2911{
2912 return NULL_TREE;
2913}
2914
5abd2125
JS
2915/* Returns TRUE if given tree has the "function_vector" attribute. */
2916int
2917m32c_special_page_vector_p (tree func)
2918{
653e2568
DD
2919 tree list;
2920
5abd2125
JS
2921 if (TREE_CODE (func) != FUNCTION_DECL)
2922 return 0;
2923
653e2568 2924 list = M32C_ATTRIBUTES (func);
5abd2125
JS
2925 while (list)
2926 {
2927 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2928 return 1;
2929 list = TREE_CHAIN (list);
2930 }
2931 return 0;
2932}
2933
2934static tree
2935function_vector_handler (tree * node ATTRIBUTE_UNUSED,
2936 tree name ATTRIBUTE_UNUSED,
2937 tree args ATTRIBUTE_UNUSED,
2938 int flags ATTRIBUTE_UNUSED,
2939 bool * no_add_attrs ATTRIBUTE_UNUSED)
2940{
2941 if (TARGET_R8C)
2942 {
2943 /* The attribute is not supported for R8C target. */
2944 warning (OPT_Wattributes,
29d08eba
JM
2945 "%qE attribute is not supported for R8C target",
2946 name);
5abd2125
JS
2947 *no_add_attrs = true;
2948 }
2949 else if (TREE_CODE (*node) != FUNCTION_DECL)
2950 {
2951 /* The attribute must be applied to functions only. */
2952 warning (OPT_Wattributes,
29d08eba
JM
2953 "%qE attribute applies only to functions",
2954 name);
5abd2125
JS
2955 *no_add_attrs = true;
2956 }
2957 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
2958 {
2959 /* The argument must be a constant integer. */
2960 warning (OPT_Wattributes,
29d08eba
JM
2961 "%qE attribute argument not an integer constant",
2962 name);
5abd2125
JS
2963 *no_add_attrs = true;
2964 }
2965 else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
2966 || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
2967 {
2968 /* The argument value must be between 18 to 255. */
2969 warning (OPT_Wattributes,
29d08eba
JM
2970 "%qE attribute argument should be between 18 to 255",
2971 name);
5abd2125
JS
2972 *no_add_attrs = true;
2973 }
2974 return NULL_TREE;
2975}
2976
2977/* If the function is assigned the attribute 'function_vector', it
2978 returns the function vector number, otherwise returns zero. */
2979int
2980current_function_special_page_vector (rtx x)
2981{
2982 int num;
2983
2984 if ((GET_CODE(x) == SYMBOL_REF)
2985 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
2986 {
653e2568 2987 tree list;
5abd2125
JS
2988 tree t = SYMBOL_REF_DECL (x);
2989
2990 if (TREE_CODE (t) != FUNCTION_DECL)
2991 return 0;
2992
653e2568 2993 list = M32C_ATTRIBUTES (t);
5abd2125
JS
2994 while (list)
2995 {
2996 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2997 {
2998 num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
2999 return num;
3000 }
3001
3002 list = TREE_CHAIN (list);
3003 }
3004
3005 return 0;
3006 }
3007 else
3008 return 0;
3009}
3010
38b2d076
DD
3011#undef TARGET_ATTRIBUTE_TABLE
3012#define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
3013static const struct attribute_spec m32c_attribute_table[] = {
62d784f7
KT
3014 {"interrupt", 0, 0, false, false, false, interrupt_handler, false},
3015 {"bank_switch", 0, 0, false, false, false, interrupt_handler, false},
3016 {"fast_interrupt", 0, 0, false, false, false, interrupt_handler, false},
3017 {"function_vector", 1, 1, true, false, false, function_vector_handler,
3018 false},
3019 {0, 0, 0, 0, 0, 0, 0, false}
38b2d076
DD
3020};
3021
3022#undef TARGET_COMP_TYPE_ATTRIBUTES
3023#define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
3024static int
3101faab
KG
3025m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED,
3026 const_tree type2 ATTRIBUTE_UNUSED)
38b2d076
DD
3027{
3028 /* 0=incompatible 1=compatible 2=warning */
3029 return 1;
3030}
3031
3032#undef TARGET_INSERT_ATTRIBUTES
3033#define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
3034static void
3035m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
3036 tree * attr_ptr ATTRIBUTE_UNUSED)
3037{
f6052f86
DD
3038 unsigned addr;
3039 /* See if we need to make #pragma address variables volatile. */
3040
3041 if (TREE_CODE (node) == VAR_DECL)
3042 {
444d6efe 3043 const char *name = IDENTIFIER_POINTER (DECL_NAME (node));
f6052f86
DD
3044 if (m32c_get_pragma_address (name, &addr))
3045 {
3046 TREE_THIS_VOLATILE (node) = true;
3047 }
3048 }
3049}
3050
f6052f86 3051/* Hash table of pragma info. */
fb5c464a 3052static GTY(()) hash_map<nofree_string_hash, unsigned> *pragma_htab;
f6052f86
DD
3053
3054void
3055m32c_note_pragma_address (const char *varname, unsigned address)
3056{
f6052f86 3057 if (!pragma_htab)
fb5c464a 3058 pragma_htab = hash_map<nofree_string_hash, unsigned>::create_ggc (31);
f6052f86 3059
2a22f99c
TS
3060 const char *name = ggc_strdup (varname);
3061 unsigned int *slot = &pragma_htab->get_or_insert (name);
3062 *slot = address;
f6052f86
DD
3063}
3064
3065static bool
3066m32c_get_pragma_address (const char *varname, unsigned *address)
3067{
f6052f86
DD
3068 if (!pragma_htab)
3069 return false;
3070
2a22f99c
TS
3071 unsigned int *slot = pragma_htab->get (varname);
3072 if (slot)
f6052f86 3073 {
2a22f99c 3074 *address = *slot;
f6052f86
DD
3075 return true;
3076 }
3077 return false;
3078}
3079
3080void
444d6efe
JR
3081m32c_output_aligned_common (FILE *stream, tree decl ATTRIBUTE_UNUSED,
3082 const char *name,
f6052f86
DD
3083 int size, int align, int global)
3084{
3085 unsigned address;
3086
3087 if (m32c_get_pragma_address (name, &address))
3088 {
3089 /* We never output these as global. */
3090 assemble_name (stream, name);
3091 fprintf (stream, " = 0x%04x\n", address);
3092 return;
3093 }
3094 if (!global)
3095 {
3096 fprintf (stream, "\t.local\t");
3097 assemble_name (stream, name);
3098 fprintf (stream, "\n");
3099 }
3100 fprintf (stream, "\t.comm\t");
3101 assemble_name (stream, name);
3102 fprintf (stream, ",%u,%u\n", size, align / BITS_PER_UNIT);
38b2d076
DD
3103}
3104
3105/* Predicates */
3106
f9b89438 3107/* This is a list of legal subregs of hard regs. */
67fc44cb
DD
3108static const struct {
3109 unsigned char outer_mode_size;
3110 unsigned char inner_mode_size;
3111 unsigned char byte_mask;
3112 unsigned char legal_when;
f9b89438 3113 unsigned int regno;
f9b89438 3114} legal_subregs[] = {
67fc44cb
DD
3115 {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
3116 {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
3117 {1, 2, 0x01, 1, A0_REGNO},
3118 {1, 2, 0x01, 1, A1_REGNO},
f9b89438 3119
67fc44cb
DD
3120 {1, 4, 0x01, 1, A0_REGNO},
3121 {1, 4, 0x01, 1, A1_REGNO},
f9b89438 3122
67fc44cb
DD
3123 {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
3124 {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
3125 {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
3126 {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
3127 {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
f9b89438 3128
67fc44cb 3129 {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
f9b89438
DD
3130};
3131
3132/* Returns TRUE if OP is a subreg of a hard reg which we don't
f6052f86 3133 support. We also bail on MEMs with illegal addresses. */
f9b89438
DD
3134bool
3135m32c_illegal_subreg_p (rtx op)
3136{
f9b89438
DD
3137 int offset;
3138 unsigned int i;
ef4bddc2 3139 machine_mode src_mode, dest_mode;
f9b89438 3140
f6052f86
DD
3141 if (GET_CODE (op) == MEM
3142 && ! m32c_legitimate_address_p (Pmode, XEXP (op, 0), false))
3143 {
3144 return true;
3145 }
3146
f9b89438
DD
3147 if (GET_CODE (op) != SUBREG)
3148 return false;
3149
3150 dest_mode = GET_MODE (op);
3151 offset = SUBREG_BYTE (op);
3152 op = SUBREG_REG (op);
3153 src_mode = GET_MODE (op);
3154
3155 if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
3156 return false;
3157 if (GET_CODE (op) != REG)
3158 return false;
3159 if (REGNO (op) >= MEM0_REGNO)
3160 return false;
3161
3162 offset = (1 << offset);
3163
67fc44cb 3164 for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
f9b89438
DD
3165 if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
3166 && legal_subregs[i].regno == REGNO (op)
3167 && legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
3168 && legal_subregs[i].byte_mask & offset)
3169 {
3170 switch (legal_subregs[i].legal_when)
3171 {
3172 case 1:
3173 return false;
3174 case 16:
3175 if (TARGET_A16)
3176 return false;
3177 break;
3178 case 24:
3179 if (TARGET_A24)
3180 return false;
3181 break;
3182 }
3183 }
3184 return true;
3185}
3186
38b2d076
DD
3187/* Returns TRUE if we support a move between the first two operands.
3188 At the moment, we just want to discourage mem to mem moves until
3189 after reload, because reload has a hard time with our limited
3190 number of address registers, and we can get into a situation where
3191 we need three of them when we only have two. */
3192bool
ef4bddc2 3193m32c_mov_ok (rtx * operands, machine_mode mode ATTRIBUTE_UNUSED)
38b2d076
DD
3194{
3195 rtx op0 = operands[0];
3196 rtx op1 = operands[1];
3197
3198 if (TARGET_A24)
3199 return true;
3200
3201#define DEBUG_MOV_OK 0
3202#if DEBUG_MOV_OK
3203 fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
3204 debug_rtx (op0);
3205 debug_rtx (op1);
3206#endif
3207
3208 if (GET_CODE (op0) == SUBREG)
3209 op0 = XEXP (op0, 0);
3210 if (GET_CODE (op1) == SUBREG)
3211 op1 = XEXP (op1, 0);
3212
3213 if (GET_CODE (op0) == MEM
3214 && GET_CODE (op1) == MEM
3215 && ! reload_completed)
3216 {
3217#if DEBUG_MOV_OK
3218 fprintf (stderr, " - no, mem to mem\n");
3219#endif
3220 return false;
3221 }
3222
3223#if DEBUG_MOV_OK
3224 fprintf (stderr, " - ok\n");
3225#endif
3226 return true;
3227}
3228
ff485e71
DD
3229/* Returns TRUE if two consecutive HImode mov instructions, generated
3230 for moving an immediate double data to a double data type variable
3231 location, can be combined into single SImode mov instruction. */
3232bool
55356334 3233m32c_immd_dbl_mov (rtx * operands ATTRIBUTE_UNUSED,
ef4bddc2 3234 machine_mode mode ATTRIBUTE_UNUSED)
ff485e71 3235{
55356334
RS
3236 /* ??? This relied on the now-defunct MEM_SCALAR and MEM_IN_STRUCT_P
3237 flags. */
ff485e71
DD
3238 return false;
3239}
3240
38b2d076
DD
3241/* Expanders */
3242
3243/* Subregs are non-orthogonal for us, because our registers are all
3244 different sizes. */
3245static rtx
ef4bddc2
RS
3246m32c_subreg (machine_mode outer,
3247 rtx x, machine_mode inner, int byte)
38b2d076
DD
3248{
3249 int r, nr = -1;
3250
3251 /* Converting MEMs to different types that are the same size, we
3252 just rewrite them. */
3253 if (GET_CODE (x) == SUBREG
3254 && SUBREG_BYTE (x) == 0
3255 && GET_CODE (SUBREG_REG (x)) == MEM
3256 && (GET_MODE_SIZE (GET_MODE (x))
3257 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
3258 {
3259 rtx oldx = x;
3260 x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
3261 MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
3262 }
3263
3264 /* Push/pop get done as smaller push/pops. */
3265 if (GET_CODE (x) == MEM
3266 && (GET_CODE (XEXP (x, 0)) == PRE_DEC
3267 || GET_CODE (XEXP (x, 0)) == POST_INC))
3268 return gen_rtx_MEM (outer, XEXP (x, 0));
3269 if (GET_CODE (x) == SUBREG
3270 && GET_CODE (XEXP (x, 0)) == MEM
3271 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
3272 || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
3273 return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
3274
3275 if (GET_CODE (x) != REG)
146456c1
DD
3276 {
3277 rtx r = simplify_gen_subreg (outer, x, inner, byte);
3278 if (GET_CODE (r) == SUBREG
3279 && GET_CODE (x) == MEM
3280 && MEM_VOLATILE_P (x))
3281 {
3282 /* Volatile MEMs don't get simplified, but we need them to
3283 be. We are little endian, so the subreg byte is the
3284 offset. */
91140cd3 3285 r = adjust_address_nv (x, outer, byte);
146456c1
DD
3286 }
3287 return r;
3288 }
38b2d076
DD
3289
3290 r = REGNO (x);
3291 if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
3292 return simplify_gen_subreg (outer, x, inner, byte);
3293
3294 if (IS_MEM_REGNO (r))
3295 return simplify_gen_subreg (outer, x, inner, byte);
3296
3297 /* This is where the complexities of our register layout are
3298 described. */
3299 if (byte == 0)
3300 nr = r;
3301 else if (outer == HImode)
3302 {
3303 if (r == R0_REGNO && byte == 2)
3304 nr = R2_REGNO;
3305 else if (r == R0_REGNO && byte == 4)
3306 nr = R1_REGNO;
3307 else if (r == R0_REGNO && byte == 6)
3308 nr = R3_REGNO;
3309 else if (r == R1_REGNO && byte == 2)
3310 nr = R3_REGNO;
3311 else if (r == A0_REGNO && byte == 2)
3312 nr = A1_REGNO;
3313 }
3314 else if (outer == SImode)
3315 {
3316 if (r == R0_REGNO && byte == 0)
3317 nr = R0_REGNO;
3318 else if (r == R0_REGNO && byte == 4)
3319 nr = R1_REGNO;
3320 }
3321 if (nr == -1)
3322 {
3323 fprintf (stderr, "m32c_subreg %s %s %d\n",
3324 mode_name[outer], mode_name[inner], byte);
3325 debug_rtx (x);
3326 gcc_unreachable ();
3327 }
3328 return gen_rtx_REG (outer, nr);
3329}
3330
3331/* Used to emit move instructions. We split some moves,
3332 and avoid mem-mem moves. */
3333int
ef4bddc2 3334m32c_prepare_move (rtx * operands, machine_mode mode)
38b2d076 3335{
5fd5d713
DD
3336 if (far_addr_space_p (operands[0])
3337 && CONSTANT_P (operands[1]))
3338 {
3339 operands[1] = force_reg (GET_MODE (operands[0]), operands[1]);
3340 }
38b2d076
DD
3341 if (TARGET_A16 && mode == PSImode)
3342 return m32c_split_move (operands, mode, 1);
3343 if ((GET_CODE (operands[0]) == MEM)
3344 && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
3345 {
3346 rtx pmv = XEXP (operands[0], 0);
3347 rtx dest_reg = XEXP (pmv, 0);
3348 rtx dest_mod = XEXP (pmv, 1);
3349
f7df4a84 3350 emit_insn (gen_rtx_SET (dest_reg, dest_mod));
38b2d076
DD
3351 operands[0] = gen_rtx_MEM (mode, dest_reg);
3352 }
b3a13419 3353 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
38b2d076
DD
3354 operands[1] = copy_to_mode_reg (mode, operands[1]);
3355 return 0;
3356}
3357
3358#define DEBUG_SPLIT 0
3359
3360/* Returns TRUE if the given PSImode move should be split. We split
3361 for all r8c/m16c moves, since it doesn't support them, and for
3362 POP.L as we can only *push* SImode. */
3363int
3364m32c_split_psi_p (rtx * operands)
3365{
3366#if DEBUG_SPLIT
3367 fprintf (stderr, "\nm32c_split_psi_p\n");
3368 debug_rtx (operands[0]);
3369 debug_rtx (operands[1]);
3370#endif
3371 if (TARGET_A16)
3372 {
3373#if DEBUG_SPLIT
3374 fprintf (stderr, "yes, A16\n");
3375#endif
3376 return 1;
3377 }
3378 if (GET_CODE (operands[1]) == MEM
3379 && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3380 {
3381#if DEBUG_SPLIT
3382 fprintf (stderr, "yes, pop.l\n");
3383#endif
3384 return 1;
3385 }
3386#if DEBUG_SPLIT
3387 fprintf (stderr, "no, default\n");
3388#endif
3389 return 0;
3390}
3391
3392/* Split the given move. SPLIT_ALL is 0 if splitting is optional
3393 (define_expand), 1 if it is not optional (define_insn_and_split),
3394 and 3 for define_split (alternate api). */
3395int
ef4bddc2 3396m32c_split_move (rtx * operands, machine_mode mode, int split_all)
38b2d076
DD
3397{
3398 rtx s[4], d[4];
3399 int parts, si, di, rev = 0;
3400 int rv = 0, opi = 2;
ef4bddc2 3401 machine_mode submode = HImode;
38b2d076
DD
3402 rtx *ops, local_ops[10];
3403
3404 /* define_split modifies the existing operands, but the other two
3405 emit new insns. OPS is where we store the operand pairs, which
3406 we emit later. */
3407 if (split_all == 3)
3408 ops = operands;
3409 else
3410 ops = local_ops;
3411
3412 /* Else HImode. */
3413 if (mode == DImode)
3414 submode = SImode;
3415
3416 /* Before splitting mem-mem moves, force one operand into a
3417 register. */
b3a13419 3418 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
38b2d076
DD
3419 {
3420#if DEBUG0
3421 fprintf (stderr, "force_reg...\n");
3422 debug_rtx (operands[1]);
3423#endif
3424 operands[1] = force_reg (mode, operands[1]);
3425#if DEBUG0
3426 debug_rtx (operands[1]);
3427#endif
3428 }
3429
3430 parts = 2;
3431
3432#if DEBUG_SPLIT
b3a13419
ILT
3433 fprintf (stderr, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3434 split_all);
38b2d076
DD
3435 debug_rtx (operands[0]);
3436 debug_rtx (operands[1]);
3437#endif
3438
eb5f0c07
DD
3439 /* Note that split_all is not used to select the api after this
3440 point, so it's safe to set it to 3 even with define_insn. */
3441 /* None of the chips can move SI operands to sp-relative addresses,
3442 so we always split those. */
03dd17b1 3443 if (satisfies_constraint_Ss (operands[0]))
eb5f0c07
DD
3444 split_all = 3;
3445
5fd5d713
DD
3446 if (TARGET_A16
3447 && (far_addr_space_p (operands[0])
3448 || far_addr_space_p (operands[1])))
3449 split_all |= 1;
3450
38b2d076
DD
3451 /* We don't need to split these. */
3452 if (TARGET_A24
3453 && split_all != 3
3454 && (mode == SImode || mode == PSImode)
3455 && !(GET_CODE (operands[1]) == MEM
3456 && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3457 return 0;
3458
3459 /* First, enumerate the subregs we'll be dealing with. */
3460 for (si = 0; si < parts; si++)
3461 {
3462 d[si] =
3463 m32c_subreg (submode, operands[0], mode,
3464 si * GET_MODE_SIZE (submode));
3465 s[si] =
3466 m32c_subreg (submode, operands[1], mode,
3467 si * GET_MODE_SIZE (submode));
3468 }
3469
3470 /* Split pushes by emitting a sequence of smaller pushes. */
3471 if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3472 {
3473 for (si = parts - 1; si >= 0; si--)
3474 {
3475 ops[opi++] = gen_rtx_MEM (submode,
3476 gen_rtx_PRE_DEC (Pmode,
3477 gen_rtx_REG (Pmode,
3478 SP_REGNO)));
3479 ops[opi++] = s[si];
3480 }
3481
3482 rv = 1;
3483 }
3484 /* Likewise for pops. */
3485 else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3486 {
3487 for (di = 0; di < parts; di++)
3488 {
3489 ops[opi++] = d[di];
3490 ops[opi++] = gen_rtx_MEM (submode,
3491 gen_rtx_POST_INC (Pmode,
3492 gen_rtx_REG (Pmode,
3493 SP_REGNO)));
3494 }
3495 rv = 1;
3496 }
3497 else if (split_all)
3498 {
3499 /* if d[di] == s[si] for any di < si, we'll early clobber. */
3500 for (di = 0; di < parts - 1; di++)
3501 for (si = di + 1; si < parts; si++)
3502 if (reg_mentioned_p (d[di], s[si]))
3503 rev = 1;
3504
3505 if (rev)
3506 for (si = 0; si < parts; si++)
3507 {
3508 ops[opi++] = d[si];
3509 ops[opi++] = s[si];
3510 }
3511 else
3512 for (si = parts - 1; si >= 0; si--)
3513 {
3514 ops[opi++] = d[si];
3515 ops[opi++] = s[si];
3516 }
3517 rv = 1;
3518 }
3519 /* Now emit any moves we may have accumulated. */
3520 if (rv && split_all != 3)
3521 {
3522 int i;
3523 for (i = 2; i < opi; i += 2)
3524 emit_move_insn (ops[i], ops[i + 1]);
3525 }
3526 return rv;
3527}
3528
07127a0a
DD
3529/* The m32c has a number of opcodes that act like memcpy, strcmp, and
3530 the like. For the R8C they expect one of the addresses to be in
3531 R1L:An so we need to arrange for that. Otherwise, it's just a
3532 matter of picking out the operands we want and emitting the right
3533 pattern for them. All these expanders, which correspond to
3534 patterns in blkmov.md, must return nonzero if they expand the insn,
3535 or zero if they should FAIL. */
3536
3537/* This is a memset() opcode. All operands are implied, so we need to
3538 arrange for them to be in the right registers. The opcode wants
3539 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3540 the count (HI), and $2 the value (QI). */
3541int
3542m32c_expand_setmemhi(rtx *operands)
3543{
3544 rtx desta, count, val;
3545 rtx desto, counto;
3546
3547 desta = XEXP (operands[0], 0);
3548 count = operands[1];
3549 val = operands[2];
3550
3551 desto = gen_reg_rtx (Pmode);
3552 counto = gen_reg_rtx (HImode);
3553
3554 if (GET_CODE (desta) != REG
3555 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3556 desta = copy_to_mode_reg (Pmode, desta);
3557
3558 /* This looks like an arbitrary restriction, but this is by far the
3559 most common case. For counts 8..14 this actually results in
3560 smaller code with no speed penalty because the half-sized
3561 constant can be loaded with a shorter opcode. */
3562 if (GET_CODE (count) == CONST_INT
3563 && GET_CODE (val) == CONST_INT
3564 && ! (INTVAL (count) & 1)
3565 && (INTVAL (count) > 1)
3566 && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3567 {
3568 unsigned v = INTVAL (val) & 0xff;
3569 v = v | (v << 8);
3570 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3571 val = copy_to_mode_reg (HImode, GEN_INT (v));
3572 if (TARGET_A16)
3573 emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3574 else
3575 emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3576 return 1;
3577 }
3578
3579 /* This is the generalized memset() case. */
3580 if (GET_CODE (val) != REG
3581 || REGNO (val) < FIRST_PSEUDO_REGISTER)
3582 val = copy_to_mode_reg (QImode, val);
3583
3584 if (GET_CODE (count) != REG
3585 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3586 count = copy_to_mode_reg (HImode, count);
3587
3588 if (TARGET_A16)
3589 emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3590 else
3591 emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3592
3593 return 1;
3594}
3595
3596/* This is a memcpy() opcode. All operands are implied, so we need to
3597 arrange for them to be in the right registers. The opcode wants
3598 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3599 is the source (MEM:BLK), and $2 the count (HI). */
3600int
3601m32c_expand_movmemhi(rtx *operands)
3602{
3603 rtx desta, srca, count;
3604 rtx desto, srco, counto;
3605
3606 desta = XEXP (operands[0], 0);
3607 srca = XEXP (operands[1], 0);
3608 count = operands[2];
3609
3610 desto = gen_reg_rtx (Pmode);
3611 srco = gen_reg_rtx (Pmode);
3612 counto = gen_reg_rtx (HImode);
3613
3614 if (GET_CODE (desta) != REG
3615 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3616 desta = copy_to_mode_reg (Pmode, desta);
3617
3618 if (GET_CODE (srca) != REG
3619 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3620 srca = copy_to_mode_reg (Pmode, srca);
3621
3622 /* Similar to setmem, but we don't need to check the value. */
3623 if (GET_CODE (count) == CONST_INT
3624 && ! (INTVAL (count) & 1)
3625 && (INTVAL (count) > 1))
3626 {
3627 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3628 if (TARGET_A16)
3629 emit_insn (gen_movmemhi_whi_op (desto, srco, counto, desta, srca, count));
3630 else
3631 emit_insn (gen_movmemhi_wpsi_op (desto, srco, counto, desta, srca, count));
3632 return 1;
3633 }
3634
3635 /* This is the generalized memset() case. */
3636 if (GET_CODE (count) != REG
3637 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3638 count = copy_to_mode_reg (HImode, count);
3639
3640 if (TARGET_A16)
3641 emit_insn (gen_movmemhi_bhi_op (desto, srco, counto, desta, srca, count));
3642 else
3643 emit_insn (gen_movmemhi_bpsi_op (desto, srco, counto, desta, srca, count));
3644
3645 return 1;
3646}
3647
3648/* This is a stpcpy() opcode. $0 is the destination (MEM:BLK) after
3649 the copy, which should point to the NUL at the end of the string,
3650 $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3651 Since our opcode leaves the destination pointing *after* the NUL,
3652 we must emit an adjustment. */
3653int
3654m32c_expand_movstr(rtx *operands)
3655{
3656 rtx desta, srca;
3657 rtx desto, srco;
3658
3659 desta = XEXP (operands[1], 0);
3660 srca = XEXP (operands[2], 0);
3661
3662 desto = gen_reg_rtx (Pmode);
3663 srco = gen_reg_rtx (Pmode);
3664
3665 if (GET_CODE (desta) != REG
3666 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3667 desta = copy_to_mode_reg (Pmode, desta);
3668
3669 if (GET_CODE (srca) != REG
3670 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3671 srca = copy_to_mode_reg (Pmode, srca);
3672
3673 emit_insn (gen_movstr_op (desto, srco, desta, srca));
3674 /* desto ends up being a1, which allows this type of add through MOVA. */
3675 emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
3676
3677 return 1;
3678}
3679
3680/* This is a strcmp() opcode. $0 is the destination (HI) which holds
3681 <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3682 $2 is the other (MEM:BLK). We must do the comparison, and then
3683 convert the flags to a signed integer result. */
3684int
3685m32c_expand_cmpstr(rtx *operands)
3686{
3687 rtx src1a, src2a;
3688
3689 src1a = XEXP (operands[1], 0);
3690 src2a = XEXP (operands[2], 0);
3691
3692 if (GET_CODE (src1a) != REG
3693 || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
3694 src1a = copy_to_mode_reg (Pmode, src1a);
3695
3696 if (GET_CODE (src2a) != REG
3697 || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
3698 src2a = copy_to_mode_reg (Pmode, src2a);
3699
3700 emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
3701 emit_insn (gen_cond_to_int (operands[0]));
3702
3703 return 1;
3704}
3705
3706
23fed240
DD
3707typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
3708
3709static shift_gen_func
3710shift_gen_func_for (int mode, int code)
3711{
3712#define GFF(m,c,f) if (mode == m && code == c) return f
3713 GFF(QImode, ASHIFT, gen_ashlqi3_i);
3714 GFF(QImode, ASHIFTRT, gen_ashrqi3_i);
3715 GFF(QImode, LSHIFTRT, gen_lshrqi3_i);
3716 GFF(HImode, ASHIFT, gen_ashlhi3_i);
3717 GFF(HImode, ASHIFTRT, gen_ashrhi3_i);
3718 GFF(HImode, LSHIFTRT, gen_lshrhi3_i);
3719 GFF(PSImode, ASHIFT, gen_ashlpsi3_i);
3720 GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
3721 GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
3722 GFF(SImode, ASHIFT, TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
3723 GFF(SImode, ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
3724 GFF(SImode, LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
3725#undef GFF
07127a0a 3726 gcc_unreachable ();
23fed240
DD
3727}
3728
38b2d076
DD
3729/* The m32c only has one shift, but it takes a signed count. GCC
3730 doesn't want this, so we fake it by negating any shift count when
07127a0a
DD
3731 we're pretending to shift the other way. Also, the shift count is
3732 limited to -8..8. It's slightly better to use two shifts for 9..15
3733 than to load the count into r1h, so we do that too. */
38b2d076 3734int
23fed240 3735m32c_prepare_shift (rtx * operands, int scale, int shift_code)
38b2d076 3736{
ef4bddc2 3737 machine_mode mode = GET_MODE (operands[0]);
23fed240 3738 shift_gen_func func = shift_gen_func_for (mode, shift_code);
38b2d076 3739 rtx temp;
23fed240
DD
3740
3741 if (GET_CODE (operands[2]) == CONST_INT)
38b2d076 3742 {
23fed240
DD
3743 int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
3744 int count = INTVAL (operands[2]) * scale;
3745
3746 while (count > maxc)
3747 {
3748 temp = gen_reg_rtx (mode);
3749 emit_insn (func (temp, operands[1], GEN_INT (maxc)));
3750 operands[1] = temp;
3751 count -= maxc;
3752 }
3753 while (count < -maxc)
3754 {
3755 temp = gen_reg_rtx (mode);
3756 emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
3757 operands[1] = temp;
3758 count += maxc;
3759 }
3760 emit_insn (func (operands[0], operands[1], GEN_INT (count)));
3761 return 1;
38b2d076 3762 }
2e160056
DD
3763
3764 temp = gen_reg_rtx (QImode);
38b2d076 3765 if (scale < 0)
2e160056
DD
3766 /* The pattern has a NEG that corresponds to this. */
3767 emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
3768 else if (TARGET_A16 && mode == SImode)
3769 /* We do this because the code below may modify this, we don't
3770 want to modify the origin of this value. */
3771 emit_move_insn (temp, operands[2]);
38b2d076 3772 else
2e160056 3773 /* We'll only use it for the shift, no point emitting a move. */
38b2d076 3774 temp = operands[2];
2e160056 3775
16659fcf 3776 if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
2e160056
DD
3777 {
3778 /* The m16c has a limit of -16..16 for SI shifts, even when the
3779 shift count is in a register. Since there are so many targets
3780 of these shifts, it's better to expand the RTL here than to
3781 call a helper function.
3782
3783 The resulting code looks something like this:
3784
3785 cmp.b r1h,-16
3786 jge.b 1f
3787 shl.l -16,dest
3788 add.b r1h,16
3789 1f: cmp.b r1h,16
3790 jle.b 1f
3791 shl.l 16,dest
3792 sub.b r1h,16
3793 1f: shl.l r1h,dest
3794
3795 We take advantage of the fact that "negative" shifts are
3796 undefined to skip one of the comparisons. */
3797
3798 rtx count;
e60365d3
TS
3799 rtx label, tempvar;
3800 rtx_insn *insn;
2e160056 3801
16659fcf
DD
3802 emit_move_insn (operands[0], operands[1]);
3803
2e160056
DD
3804 count = temp;
3805 label = gen_label_rtx ();
2e160056
DD
3806 LABEL_NUSES (label) ++;
3807
833bf445
DD
3808 tempvar = gen_reg_rtx (mode);
3809
2e160056
DD
3810 if (shift_code == ASHIFT)
3811 {
3812 /* This is a left shift. We only need check positive counts. */
3813 emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
3814 count, GEN_INT (16), label));
833bf445
DD
3815 emit_insn (func (tempvar, operands[0], GEN_INT (8)));
3816 emit_insn (func (operands[0], tempvar, GEN_INT (8)));
2e160056
DD
3817 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
3818 emit_label_after (label, insn);
3819 }
3820 else
3821 {
3822 /* This is a right shift. We only need check negative counts. */
3823 emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
3824 count, GEN_INT (-16), label));
833bf445
DD
3825 emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
3826 emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
2e160056
DD
3827 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
3828 emit_label_after (label, insn);
3829 }
16659fcf
DD
3830 operands[1] = operands[0];
3831 emit_insn (func (operands[0], operands[0], count));
3832 return 1;
2e160056
DD
3833 }
3834
38b2d076
DD
3835 operands[2] = temp;
3836 return 0;
3837}
3838
12ea2512
DD
3839/* The m32c has a limited range of operations that work on PSImode
3840 values; we have to expand to SI, do the math, and truncate back to
3841 PSI. Yes, this is expensive, but hopefully gcc will learn to avoid
3842 those cases. */
3843void
3844m32c_expand_neg_mulpsi3 (rtx * operands)
3845{
3846 /* operands: a = b * i */
3847 rtx temp1; /* b as SI */
07127a0a
DD
3848 rtx scale /* i as SI */;
3849 rtx temp2; /* a*b as SI */
12ea2512
DD
3850
3851 temp1 = gen_reg_rtx (SImode);
3852 temp2 = gen_reg_rtx (SImode);
07127a0a
DD
3853 if (GET_CODE (operands[2]) != CONST_INT)
3854 {
3855 scale = gen_reg_rtx (SImode);
3856 emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
3857 }
3858 else
3859 scale = copy_to_mode_reg (SImode, operands[2]);
12ea2512
DD
3860
3861 emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
07127a0a
DD
3862 temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
3863 emit_insn (gen_truncsipsi2 (operands[0], temp2));
12ea2512
DD
3864}
3865
38b2d076
DD
3866/* Pattern Output Functions */
3867
07127a0a
DD
3868int
3869m32c_expand_movcc (rtx *operands)
3870{
3871 rtx rel = operands[1];
0166ff05 3872
07127a0a
DD
3873 if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
3874 return 1;
3875 if (GET_CODE (operands[2]) != CONST_INT
3876 || GET_CODE (operands[3]) != CONST_INT)
3877 return 1;
07127a0a
DD
3878 if (GET_CODE (rel) == NE)
3879 {
3880 rtx tmp = operands[2];
3881 operands[2] = operands[3];
3882 operands[3] = tmp;
f90b7a5a 3883 rel = gen_rtx_EQ (GET_MODE (rel), XEXP (rel, 0), XEXP (rel, 1));
07127a0a 3884 }
0166ff05 3885
0166ff05
DD
3886 emit_move_insn (operands[0],
3887 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
f90b7a5a 3888 rel,
0166ff05
DD
3889 operands[2],
3890 operands[3]));
07127a0a
DD
3891 return 0;
3892}
3893
3894/* Used for the "insv" pattern. Return nonzero to fail, else done. */
3895int
3896m32c_expand_insv (rtx *operands)
3897{
3898 rtx op0, src0, p;
3899 int mask;
3900
3901 if (INTVAL (operands[1]) != 1)
3902 return 1;
3903
9cb96754
N
3904 /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
3905 if (GET_CODE (operands[3]) != CONST_INT)
3906 return 1;
3907 if (INTVAL (operands[3]) != 0
3908 && INTVAL (operands[3]) != 1
3909 && INTVAL (operands[3]) != -1)
3910 return 1;
3911
07127a0a
DD
3912 mask = 1 << INTVAL (operands[2]);
3913
3914 op0 = operands[0];
3915 if (GET_CODE (op0) == SUBREG
3916 && SUBREG_BYTE (op0) == 0)
3917 {
3918 rtx sub = SUBREG_REG (op0);
3919 if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
3920 op0 = sub;
3921 }
3922
b3a13419 3923 if (!can_create_pseudo_p ()
07127a0a
DD
3924 || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
3925 src0 = op0;
3926 else
3927 {
3928 src0 = gen_reg_rtx (GET_MODE (op0));
3929 emit_move_insn (src0, op0);
3930 }
3931
3932 if (GET_MODE (op0) == HImode
3933 && INTVAL (operands[2]) >= 8
444d6efe 3934 && GET_CODE (op0) == MEM)
07127a0a
DD
3935 {
3936 /* We are little endian. */
0a81f074
RS
3937 rtx new_mem = gen_rtx_MEM (QImode, plus_constant (Pmode,
3938 XEXP (op0, 0), 1));
07127a0a
DD
3939 MEM_COPY_ATTRIBUTES (new_mem, op0);
3940 mask >>= 8;
3941 }
3942
8e4edce7
DD
3943 /* First, we generate a mask with the correct polarity. If we are
3944 storing a zero, we want an AND mask, so invert it. */
3945 if (INTVAL (operands[3]) == 0)
07127a0a 3946 {
16659fcf 3947 /* Storing a zero, use an AND mask */
07127a0a
DD
3948 if (GET_MODE (op0) == HImode)
3949 mask ^= 0xffff;
3950 else
3951 mask ^= 0xff;
3952 }
8e4edce7
DD
3953 /* Now we need to properly sign-extend the mask in case we need to
3954 fall back to an AND or OR opcode. */
07127a0a
DD
3955 if (GET_MODE (op0) == HImode)
3956 {
3957 if (mask & 0x8000)
3958 mask -= 0x10000;
3959 }
3960 else
3961 {
3962 if (mask & 0x80)
3963 mask -= 0x100;
3964 }
3965
3966 switch ( (INTVAL (operands[3]) ? 4 : 0)
3967 + ((GET_MODE (op0) == HImode) ? 2 : 0)
3968 + (TARGET_A24 ? 1 : 0))
3969 {
3970 case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
3971 case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
3972 case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
3973 case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
3974 case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
3975 case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
3976 case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
3977 case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
653e2568 3978 default: p = NULL_RTX; break; /* Not reached, but silences a warning. */
07127a0a
DD
3979 }
3980
3981 emit_insn (p);
3982 return 0;
3983}
3984
3985const char *
3986m32c_scc_pattern(rtx *operands, RTX_CODE code)
3987{
3988 static char buf[30];
3989 if (GET_CODE (operands[0]) == REG
3990 && REGNO (operands[0]) == R0_REGNO)
3991 {
3992 if (code == EQ)
3993 return "stzx\t#1,#0,r0l";
3994 if (code == NE)
3995 return "stzx\t#0,#1,r0l";
3996 }
3997 sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
3998 return buf;
3999}
4000
5abd2125
JS
4001/* Encode symbol attributes of a SYMBOL_REF into its
4002 SYMBOL_REF_FLAGS. */
4003static void
4004m32c_encode_section_info (tree decl, rtx rtl, int first)
4005{
4006 int extra_flags = 0;
4007
4008 default_encode_section_info (decl, rtl, first);
4009 if (TREE_CODE (decl) == FUNCTION_DECL
4010 && m32c_special_page_vector_p (decl))
4011
4012 extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
4013
4014 if (extra_flags)
4015 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
4016}
4017
38b2d076
DD
4018/* Returns TRUE if the current function is a leaf, and thus we can
4019 determine which registers an interrupt function really needs to
4020 save. The logic below is mostly about finding the insn sequence
4021 that's the function, versus any sequence that might be open for the
4022 current insn. */
4023static int
4024m32c_leaf_function_p (void)
4025{
38b2d076
DD
4026 int rv;
4027
614d5bd8 4028 push_topmost_sequence ();
38b2d076 4029 rv = leaf_function_p ();
614d5bd8 4030 pop_topmost_sequence ();
38b2d076
DD
4031 return rv;
4032}
4033
4034/* Returns TRUE if the current function needs to use the ENTER/EXIT
4035 opcodes. If the function doesn't need the frame base or stack
4036 pointer, it can use the simpler RTS opcode. */
4037static bool
4038m32c_function_needs_enter (void)
4039{
b32d5189 4040 rtx_insn *insn;
38b2d076
DD
4041 rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
4042 rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
4043
614d5bd8
AM
4044 for (insn = get_topmost_sequence ()->first; insn; insn = NEXT_INSN (insn))
4045 if (NONDEBUG_INSN_P (insn))
4046 {
4047 if (reg_mentioned_p (sp, insn))
4048 return true;
4049 if (reg_mentioned_p (fb, insn))
4050 return true;
4051 }
38b2d076
DD
4052 return false;
4053}
4054
4055/* Mark all the subexpressions of the PARALLEL rtx PAR as
4056 frame-related. Return PAR.
4057
4058 dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
4059 PARALLEL rtx other than the first if they do not have the
4060 FRAME_RELATED flag set on them. So this function is handy for
4061 marking up 'enter' instructions. */
4062static rtx
4063m32c_all_frame_related (rtx par)
4064{
4065 int len = XVECLEN (par, 0);
4066 int i;
4067
4068 for (i = 0; i < len; i++)
4069 F (XVECEXP (par, 0, i));
4070
4071 return par;
4072}
4073
4074/* Emits the prologue. See the frame layout comment earlier in this
4075 file. We can reserve up to 256 bytes with the ENTER opcode, beyond
4076 that we manually update sp. */
4077void
4078m32c_emit_prologue (void)
4079{
4080 int frame_size, extra_frame_size = 0, reg_save_size;
4081 int complex_prologue = 0;
4082
4083 cfun->machine->is_leaf = m32c_leaf_function_p ();
4084 if (interrupt_p (cfun->decl))
4085 {
4086 cfun->machine->is_interrupt = 1;
4087 complex_prologue = 1;
4088 }
65655f79
DD
4089 else if (bank_switch_p (cfun->decl))
4090 warning (OPT_Wattributes,
4091 "%<bank_switch%> has no effect on non-interrupt functions");
38b2d076
DD
4092
4093 reg_save_size = m32c_pushm_popm (PP_justcount);
4094
4095 if (interrupt_p (cfun->decl))
65655f79
DD
4096 {
4097 if (bank_switch_p (cfun->decl))
4098 emit_insn (gen_fset_b ());
4099 else if (cfun->machine->intr_pushm)
4100 emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
4101 }
38b2d076
DD
4102
4103 frame_size =
4104 m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
4105 if (frame_size == 0
38b2d076
DD
4106 && !m32c_function_needs_enter ())
4107 cfun->machine->use_rts = 1;
4108
ed1332ee
NC
4109 if (flag_stack_usage_info)
4110 current_function_static_stack_size = frame_size;
4111
38b2d076
DD
4112 if (frame_size > 254)
4113 {
4114 extra_frame_size = frame_size - 254;
4115 frame_size = 254;
4116 }
4117 if (cfun->machine->use_rts == 0)
4118 F (emit_insn (m32c_all_frame_related
4119 (TARGET_A16
fa9fd28a
RIL
4120 ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
4121 : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
38b2d076
DD
4122
4123 if (extra_frame_size)
4124 {
4125 complex_prologue = 1;
4126 if (TARGET_A16)
4127 F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
4128 gen_rtx_REG (HImode, SP_REGNO),
4129 GEN_INT (-extra_frame_size))));
4130 else
4131 F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
4132 gen_rtx_REG (PSImode, SP_REGNO),
4133 GEN_INT (-extra_frame_size))));
4134 }
4135
4136 complex_prologue += m32c_pushm_popm (PP_pushm);
4137
4138 /* This just emits a comment into the .s file for debugging. */
4139 if (complex_prologue)
4140 emit_insn (gen_prologue_end ());
4141}
4142
4143/* Likewise, for the epilogue. The only exception is that, for
4144 interrupts, we must manually unwind the frame as the REIT opcode
4145 doesn't do that. */
4146void
4147m32c_emit_epilogue (void)
4148{
f0679612
DD
4149 int popm_count = m32c_pushm_popm (PP_justcount);
4150
38b2d076 4151 /* This just emits a comment into the .s file for debugging. */
f0679612 4152 if (popm_count > 0 || cfun->machine->is_interrupt)
38b2d076
DD
4153 emit_insn (gen_epilogue_start ());
4154
f0679612
DD
4155 if (popm_count > 0)
4156 m32c_pushm_popm (PP_popm);
38b2d076
DD
4157
4158 if (cfun->machine->is_interrupt)
4159 {
ef4bddc2 4160 machine_mode spmode = TARGET_A16 ? HImode : PSImode;
38b2d076 4161
65655f79
DD
4162 /* REIT clears B flag and restores $fp for us, but we still
4163 have to fix up the stack. USE_RTS just means we didn't
4164 emit ENTER. */
4165 if (!cfun->machine->use_rts)
4166 {
4167 emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
4168 gen_rtx_REG (spmode, FP_REGNO));
4169 emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
4170 gen_rtx_REG (spmode, A0_REGNO));
4171 /* We can't just add this to the POPM because it would be in
4172 the wrong order, and wouldn't fix the stack if we're bank
4173 switching. */
4174 if (TARGET_A16)
4175 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
4176 else
4177 emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
4178 }
4179 if (!bank_switch_p (cfun->decl) && cfun->machine->intr_pushm)
4180 emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
4181
402f2db8
DD
4182 /* The FREIT (Fast REturn from InTerrupt) instruction should be
4183 generated only for M32C/M32CM targets (generate the REIT
4184 instruction otherwise). */
65655f79 4185 if (fast_interrupt_p (cfun->decl))
402f2db8
DD
4186 {
4187 /* Check if fast_attribute is set for M32C or M32CM. */
4188 if (TARGET_A24)
4189 {
4190 emit_jump_insn (gen_epilogue_freit ());
4191 }
4192 /* If fast_interrupt attribute is set for an R8C or M16C
4193 target ignore this attribute and generated REIT
4194 instruction. */
4195 else
4196 {
4197 warning (OPT_Wattributes,
4198 "%<fast_interrupt%> attribute directive ignored");
4199 emit_jump_insn (gen_epilogue_reit_16 ());
4200 }
4201 }
65655f79 4202 else if (TARGET_A16)
0e0642aa
RIL
4203 emit_jump_insn (gen_epilogue_reit_16 ());
4204 else
4205 emit_jump_insn (gen_epilogue_reit_24 ());
38b2d076
DD
4206 }
4207 else if (cfun->machine->use_rts)
4208 emit_jump_insn (gen_epilogue_rts ());
0e0642aa
RIL
4209 else if (TARGET_A16)
4210 emit_jump_insn (gen_epilogue_exitd_16 ());
38b2d076 4211 else
0e0642aa 4212 emit_jump_insn (gen_epilogue_exitd_24 ());
38b2d076
DD
4213}
4214
4215void
4216m32c_emit_eh_epilogue (rtx ret_addr)
4217{
4218 /* R0[R2] has the stack adjustment. R1[R3] has the address to
4219 return to. We have to fudge the stack, pop everything, pop SP
4220 (fudged), and return (fudged). This is actually easier to do in
4221 assembler, so punt to libgcc. */
4222 emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
c41c1387 4223 /* emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
38b2d076
DD
4224}
4225
16659fcf
DD
4226/* Indicate which flags must be properly set for a given conditional. */
4227static int
4228flags_needed_for_conditional (rtx cond)
4229{
4230 switch (GET_CODE (cond))
4231 {
4232 case LE:
4233 case GT:
4234 return FLAGS_OSZ;
4235 case LEU:
4236 case GTU:
4237 return FLAGS_ZC;
4238 case LT:
4239 case GE:
4240 return FLAGS_OS;
4241 case LTU:
4242 case GEU:
4243 return FLAGS_C;
4244 case EQ:
4245 case NE:
4246 return FLAGS_Z;
4247 default:
4248 return FLAGS_N;
4249 }
4250}
4251
4252#define DEBUG_CMP 0
4253
4254/* Returns true if a compare insn is redundant because it would only
4255 set flags that are already set correctly. */
4256static bool
84034c69 4257m32c_compare_redundant (rtx_insn *cmp, rtx *operands)
16659fcf
DD
4258{
4259 int flags_needed;
4260 int pflags;
84034c69
DM
4261 rtx_insn *prev;
4262 rtx pp, next;
444d6efe 4263 rtx op0, op1;
16659fcf
DD
4264#if DEBUG_CMP
4265 int prev_icode, i;
4266#endif
4267
4268 op0 = operands[0];
4269 op1 = operands[1];
16659fcf
DD
4270
4271#if DEBUG_CMP
4272 fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
4273 debug_rtx(cmp);
4274 for (i=0; i<2; i++)
4275 {
4276 fprintf(stderr, "operands[%d] = ", i);
4277 debug_rtx(operands[i]);
4278 }
4279#endif
4280
4281 next = next_nonnote_insn (cmp);
4282 if (!next || !INSN_P (next))
4283 {
4284#if DEBUG_CMP
4285 fprintf(stderr, "compare not followed by insn\n");
4286 debug_rtx(next);
4287#endif
4288 return false;
4289 }
4290 if (GET_CODE (PATTERN (next)) == SET
4291 && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
4292 {
4293 next = XEXP (XEXP (PATTERN (next), 1), 0);
4294 }
4295 else if (GET_CODE (PATTERN (next)) == SET)
4296 {
4297 /* If this is a conditional, flags_needed will be something
4298 other than FLAGS_N, which we test below. */
4299 next = XEXP (PATTERN (next), 1);
4300 }
4301 else
4302 {
4303#if DEBUG_CMP
4304 fprintf(stderr, "compare not followed by conditional\n");
4305 debug_rtx(next);
4306#endif
4307 return false;
4308 }
4309#if DEBUG_CMP
4310 fprintf(stderr, "conditional is: ");
4311 debug_rtx(next);
4312#endif
4313
4314 flags_needed = flags_needed_for_conditional (next);
4315 if (flags_needed == FLAGS_N)
4316 {
4317#if DEBUG_CMP
4318 fprintf(stderr, "compare not followed by conditional\n");
4319 debug_rtx(next);
4320#endif
4321 return false;
4322 }
4323
4324 /* Compare doesn't set overflow and carry the same way that
4325 arithmetic instructions do, so we can't replace those. */
4326 if (flags_needed & FLAGS_OC)
4327 return false;
4328
4329 prev = cmp;
4330 do {
4331 prev = prev_nonnote_insn (prev);
4332 if (!prev)
4333 {
4334#if DEBUG_CMP
4335 fprintf(stderr, "No previous insn.\n");
4336#endif
4337 return false;
4338 }
4339 if (!INSN_P (prev))
4340 {
4341#if DEBUG_CMP
4342 fprintf(stderr, "Previous insn is a non-insn.\n");
4343#endif
4344 return false;
4345 }
4346 pp = PATTERN (prev);
4347 if (GET_CODE (pp) != SET)
4348 {
4349#if DEBUG_CMP
4350 fprintf(stderr, "Previous insn is not a SET.\n");
4351#endif
4352 return false;
4353 }
4354 pflags = get_attr_flags (prev);
4355
4356 /* Looking up attributes of previous insns corrupted the recog
4357 tables. */
4358 INSN_UID (cmp) = -1;
4359 recog (PATTERN (cmp), cmp, 0);
4360
4361 if (pflags == FLAGS_N
4362 && reg_mentioned_p (op0, pp))
4363 {
4364#if DEBUG_CMP
4365 fprintf(stderr, "intermediate non-flags insn uses op:\n");
4366 debug_rtx(prev);
4367#endif
4368 return false;
4369 }
b3c5a409
DD
4370
4371 /* Check for comparisons against memory - between volatiles and
4372 aliases, we just can't risk this one. */
4373 if (GET_CODE (operands[0]) == MEM
4374 || GET_CODE (operands[0]) == MEM)
4375 {
4376#if DEBUG_CMP
4377 fprintf(stderr, "comparisons with memory:\n");
4378 debug_rtx(prev);
4379#endif
4380 return false;
4381 }
4382
4383 /* Check for PREV changing a register that's used to compute a
4384 value in CMP, even if it doesn't otherwise change flags. */
4385 if (GET_CODE (operands[0]) == REG
4386 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[0]))
4387 {
4388#if DEBUG_CMP
4389 fprintf(stderr, "sub-value affected, op0:\n");
4390 debug_rtx(prev);
4391#endif
4392 return false;
4393 }
4394 if (GET_CODE (operands[1]) == REG
4395 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[1]))
4396 {
4397#if DEBUG_CMP
4398 fprintf(stderr, "sub-value affected, op1:\n");
4399 debug_rtx(prev);
4400#endif
4401 return false;
4402 }
4403
16659fcf
DD
4404 } while (pflags == FLAGS_N);
4405#if DEBUG_CMP
4406 fprintf(stderr, "previous flag-setting insn:\n");
4407 debug_rtx(prev);
4408 debug_rtx(pp);
4409#endif
4410
4411 if (GET_CODE (pp) == SET
4412 && GET_CODE (XEXP (pp, 0)) == REG
4413 && REGNO (XEXP (pp, 0)) == FLG_REGNO
4414 && GET_CODE (XEXP (pp, 1)) == COMPARE)
4415 {
4416 /* Adjacent cbranches must have the same operands to be
4417 redundant. */
4418 rtx pop0 = XEXP (XEXP (pp, 1), 0);
4419 rtx pop1 = XEXP (XEXP (pp, 1), 1);
4420#if DEBUG_CMP
4421 fprintf(stderr, "adjacent cbranches\n");
4422 debug_rtx(pop0);
4423 debug_rtx(pop1);
4424#endif
4425 if (rtx_equal_p (op0, pop0)
4426 && rtx_equal_p (op1, pop1))
4427 return true;
4428#if DEBUG_CMP
4429 fprintf(stderr, "prev cmp not same\n");
4430#endif
4431 return false;
4432 }
4433
4434 /* Else the previous insn must be a SET, with either the source or
4435 dest equal to operands[0], and operands[1] must be zero. */
4436
4437 if (!rtx_equal_p (op1, const0_rtx))
4438 {
4439#if DEBUG_CMP
4440 fprintf(stderr, "operands[1] not const0_rtx\n");
4441#endif
4442 return false;
4443 }
4444 if (GET_CODE (pp) != SET)
4445 {
4446#if DEBUG_CMP
4447 fprintf (stderr, "pp not set\n");
4448#endif
4449 return false;
4450 }
4451 if (!rtx_equal_p (op0, SET_SRC (pp))
4452 && !rtx_equal_p (op0, SET_DEST (pp)))
4453 {
4454#if DEBUG_CMP
4455 fprintf(stderr, "operands[0] not found in set\n");
4456#endif
4457 return false;
4458 }
4459
4460#if DEBUG_CMP
4461 fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4462#endif
4463 if ((pflags & flags_needed) == flags_needed)
4464 return true;
4465
4466 return false;
4467}
4468
4469/* Return the pattern for a compare. This will be commented out if
4470 the compare is redundant, else a normal pattern is returned. Thus,
4471 the assembler output says where the compare would have been. */
4472char *
84034c69 4473m32c_output_compare (rtx_insn *insn, rtx *operands)
16659fcf 4474{
0a2aaacc 4475 static char templ[] = ";cmp.b\t%1,%0";
16659fcf
DD
4476 /* ^ 5 */
4477
0a2aaacc 4478 templ[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
16659fcf
DD
4479 if (m32c_compare_redundant (insn, operands))
4480 {
4481#if DEBUG_CMP
4482 fprintf(stderr, "cbranch: cmp not needed\n");
4483#endif
0a2aaacc 4484 return templ;
16659fcf
DD
4485 }
4486
4487#if DEBUG_CMP
b3c5a409 4488 fprintf(stderr, "cbranch: cmp needed: `%s'\n", templ + 1);
16659fcf 4489#endif
0a2aaacc 4490 return templ + 1;
16659fcf
DD
4491}
4492
5abd2125
JS
4493#undef TARGET_ENCODE_SECTION_INFO
4494#define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4495
b52b1749
AS
4496/* If the frame pointer isn't used, we detect it manually. But the
4497 stack pointer doesn't have as flexible addressing as the frame
4498 pointer, so we always assume we have it. */
4499
4500#undef TARGET_FRAME_POINTER_REQUIRED
4501#define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
4502
38b2d076
DD
4503/* The Global `targetm' Variable. */
4504
4505struct gcc_target targetm = TARGET_INITIALIZER;
4506
4507#include "gt-m32c.h"