]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/m32c/m32c.c
Turn HARD_REGNO_CALL_PART_CLOBBERED into a target hook
[thirdparty/gcc.git] / gcc / config / m32c / m32c.c
CommitLineData
38b2d076 1/* Target Code for R8C/M16C/M32C
cbe34bb5 2 Copyright (C) 2005-2017 Free Software Foundation, Inc.
38b2d076
DD
3 Contributed by Red Hat.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
2f83c7d6 9 by the Free Software Foundation; either version 3, or (at your
38b2d076
DD
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
2f83c7d6
NC
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
38b2d076
DD
20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
c7131fb2 24#include "backend.h"
e11c4407 25#include "target.h"
38b2d076 26#include "rtl.h"
e11c4407 27#include "tree.h"
2643d17f
ML
28#include "stringpool.h"
29#include "attribs.h"
c7131fb2 30#include "df.h"
4d0cdd0c 31#include "memmodel.h"
e11c4407
AM
32#include "tm_p.h"
33#include "optabs.h"
38b2d076 34#include "regs.h"
e11c4407
AM
35#include "emit-rtl.h"
36#include "recog.h"
37#include "diagnostic-core.h"
38b2d076
DD
38#include "output.h"
39#include "insn-attr.h"
40#include "flags.h"
38b2d076 41#include "reload.h"
d8a2d370
DN
42#include "stor-layout.h"
43#include "varasm.h"
44#include "calls.h"
36566b39 45#include "explow.h"
38b2d076 46#include "expr.h"
03dd17b1 47#include "tm-constrs.h"
9b2b7279 48#include "builtins.h"
38b2d076 49
994c5d85 50/* This file should be included last. */
d58627a0
RS
51#include "target-def.h"
52
38b2d076
DD
53/* Prototypes */
54
55/* Used by m32c_pushm_popm. */
56typedef enum
57{
58 PP_pushm,
59 PP_popm,
60 PP_justcount
61} Push_Pop_Type;
62
65655f79 63static bool m32c_function_needs_enter (void);
38b2d076 64static tree interrupt_handler (tree *, tree, tree, int, bool *);
5abd2125 65static tree function_vector_handler (tree *, tree, tree, int, bool *);
38b2d076 66static int interrupt_p (tree node);
65655f79
DD
67static int bank_switch_p (tree node);
68static int fast_interrupt_p (tree node);
69static int interrupt_p (tree node);
38b2d076 70static bool m32c_asm_integer (rtx, unsigned int, int);
3101faab 71static int m32c_comp_type_attributes (const_tree, const_tree);
38b2d076
DD
72static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
73static struct machine_function *m32c_init_machine_status (void);
74static void m32c_insert_attributes (tree, tree *);
ef4bddc2
RS
75static bool m32c_legitimate_address_p (machine_mode, rtx, bool);
76static bool m32c_addr_space_legitimate_address_p (machine_mode, rtx, bool, addr_space_t);
77static rtx m32c_function_arg (cumulative_args_t, machine_mode,
444d6efe 78 const_tree, bool);
ef4bddc2 79static bool m32c_pass_by_reference (cumulative_args_t, machine_mode,
586de218 80 const_tree, bool);
ef4bddc2 81static void m32c_function_arg_advance (cumulative_args_t, machine_mode,
cd34bbe8 82 const_tree, bool);
ef4bddc2 83static unsigned int m32c_function_arg_boundary (machine_mode, const_tree);
38b2d076 84static int m32c_pushm_popm (Push_Pop_Type);
d5cc9181 85static bool m32c_strict_argument_naming (cumulative_args_t);
38b2d076 86static rtx m32c_struct_value_rtx (tree, int);
ef4bddc2 87static rtx m32c_subreg (machine_mode, rtx, machine_mode, int);
38b2d076 88static int need_to_save (int);
2a31793e 89static rtx m32c_function_value (const_tree, const_tree, bool);
ef4bddc2 90static rtx m32c_libcall_value (machine_mode, const_rtx);
2a31793e 91
f6052f86
DD
92/* Returns true if an address is specified, else false. */
93static bool m32c_get_pragma_address (const char *varname, unsigned *addr);
94
5abd2125 95#define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
38b2d076
DD
96
97#define streq(a,b) (strcmp ((a), (b)) == 0)
98
99/* Internal support routines */
100
101/* Debugging statements are tagged with DEBUG0 only so that they can
102 be easily enabled individually, by replacing the '0' with '1' as
103 needed. */
104#define DEBUG0 0
105#define DEBUG1 1
106
107#if DEBUG0
f75e07bc 108#include "print-tree.h"
38b2d076
DD
109/* This is needed by some of the commented-out debug statements
110 below. */
111static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
112#endif
113static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
114
115/* These are all to support encode_pattern(). */
116static char pattern[30], *patternp;
117static GTY(()) rtx patternr[30];
118#define RTX_IS(x) (streq (pattern, x))
119
120/* Some macros to simplify the logic throughout this file. */
121#define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
122#define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
123
124#define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
125#define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
126
5fd5d713
DD
127static int
128far_addr_space_p (rtx x)
129{
130 if (GET_CODE (x) != MEM)
131 return 0;
132#if DEBUG0
133 fprintf(stderr, "\033[35mfar_addr_space: "); debug_rtx(x);
134 fprintf(stderr, " = %d\033[0m\n", MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR);
135#endif
136 return MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR;
137}
138
38b2d076
DD
139/* We do most RTX matching by converting the RTX into a string, and
140 using string compares. This vastly simplifies the logic in many of
141 the functions in this file.
142
143 On exit, pattern[] has the encoded string (use RTX_IS("...") to
144 compare it) and patternr[] has pointers to the nodes in the RTX
145 corresponding to each character in the encoded string. The latter
146 is mostly used by print_operand().
147
148 Unrecognized patterns have '?' in them; this shows up when the
149 assembler complains about syntax errors.
150*/
151
152static void
153encode_pattern_1 (rtx x)
154{
155 int i;
156
157 if (patternp == pattern + sizeof (pattern) - 2)
158 {
159 patternp[-1] = '?';
160 return;
161 }
162
163 patternr[patternp - pattern] = x;
164
165 switch (GET_CODE (x))
166 {
167 case REG:
168 *patternp++ = 'r';
169 break;
170 case SUBREG:
171 if (GET_MODE_SIZE (GET_MODE (x)) !=
172 GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
173 *patternp++ = 'S';
45d898e4
DD
174 if (GET_MODE (x) == PSImode
175 && GET_CODE (XEXP (x, 0)) == REG)
176 *patternp++ = 'S';
38b2d076
DD
177 encode_pattern_1 (XEXP (x, 0));
178 break;
179 case MEM:
180 *patternp++ = 'm';
0c57f4bf 181 /* FALLTHRU */
38b2d076
DD
182 case CONST:
183 encode_pattern_1 (XEXP (x, 0));
184 break;
5fd5d713
DD
185 case SIGN_EXTEND:
186 *patternp++ = '^';
187 *patternp++ = 'S';
188 encode_pattern_1 (XEXP (x, 0));
189 break;
190 case ZERO_EXTEND:
191 *patternp++ = '^';
192 *patternp++ = 'Z';
193 encode_pattern_1 (XEXP (x, 0));
194 break;
38b2d076
DD
195 case PLUS:
196 *patternp++ = '+';
197 encode_pattern_1 (XEXP (x, 0));
198 encode_pattern_1 (XEXP (x, 1));
199 break;
200 case PRE_DEC:
201 *patternp++ = '>';
202 encode_pattern_1 (XEXP (x, 0));
203 break;
204 case POST_INC:
205 *patternp++ = '<';
206 encode_pattern_1 (XEXP (x, 0));
207 break;
208 case LO_SUM:
209 *patternp++ = 'L';
210 encode_pattern_1 (XEXP (x, 0));
211 encode_pattern_1 (XEXP (x, 1));
212 break;
213 case HIGH:
214 *patternp++ = 'H';
215 encode_pattern_1 (XEXP (x, 0));
216 break;
217 case SYMBOL_REF:
218 *patternp++ = 's';
219 break;
220 case LABEL_REF:
221 *patternp++ = 'l';
222 break;
223 case CODE_LABEL:
224 *patternp++ = 'c';
225 break;
226 case CONST_INT:
227 case CONST_DOUBLE:
228 *patternp++ = 'i';
229 break;
230 case UNSPEC:
231 *patternp++ = 'u';
232 *patternp++ = '0' + XCINT (x, 1, UNSPEC);
233 for (i = 0; i < XVECLEN (x, 0); i++)
234 encode_pattern_1 (XVECEXP (x, 0, i));
235 break;
236 case USE:
237 *patternp++ = 'U';
238 break;
239 case PARALLEL:
240 *patternp++ = '|';
241 for (i = 0; i < XVECLEN (x, 0); i++)
242 encode_pattern_1 (XVECEXP (x, 0, i));
243 break;
244 case EXPR_LIST:
245 *patternp++ = 'E';
246 encode_pattern_1 (XEXP (x, 0));
247 if (XEXP (x, 1))
248 encode_pattern_1 (XEXP (x, 1));
249 break;
250 default:
251 *patternp++ = '?';
252#if DEBUG0
253 fprintf (stderr, "can't encode pattern %s\n",
254 GET_RTX_NAME (GET_CODE (x)));
255 debug_rtx (x);
38b2d076
DD
256#endif
257 break;
258 }
259}
260
261static void
262encode_pattern (rtx x)
263{
264 patternp = pattern;
265 encode_pattern_1 (x);
266 *patternp = 0;
267}
268
269/* Since register names indicate the mode they're used in, we need a
270 way to determine which name to refer to the register with. Called
271 by print_operand(). */
272
273static const char *
ef4bddc2 274reg_name_with_mode (int regno, machine_mode mode)
38b2d076
DD
275{
276 int mlen = GET_MODE_SIZE (mode);
277 if (regno == R0_REGNO && mlen == 1)
278 return "r0l";
279 if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
280 return "r2r0";
281 if (regno == R0_REGNO && mlen == 6)
282 return "r2r1r0";
283 if (regno == R0_REGNO && mlen == 8)
284 return "r3r1r2r0";
285 if (regno == R1_REGNO && mlen == 1)
286 return "r1l";
287 if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
288 return "r3r1";
289 if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
290 return "a1a0";
291 return reg_names[regno];
292}
293
294/* How many bytes a register uses on stack when it's pushed. We need
295 to know this because the push opcode needs to explicitly indicate
296 the size of the register, even though the name of the register
297 already tells it that. Used by m32c_output_reg_{push,pop}, which
298 is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}. */
299
300static int
301reg_push_size (int regno)
302{
303 switch (regno)
304 {
305 case R0_REGNO:
306 case R1_REGNO:
307 return 2;
308 case R2_REGNO:
309 case R3_REGNO:
310 case FLG_REGNO:
311 return 2;
312 case A0_REGNO:
313 case A1_REGNO:
314 case SB_REGNO:
315 case FB_REGNO:
316 case SP_REGNO:
317 if (TARGET_A16)
318 return 2;
319 else
320 return 3;
321 default:
322 gcc_unreachable ();
323 }
324}
325
38b2d076
DD
326/* Given two register classes, find the largest intersection between
327 them. If there is no intersection, return RETURNED_IF_EMPTY
328 instead. */
35bdbc69
AS
329static reg_class_t
330reduce_class (reg_class_t original_class, reg_class_t limiting_class,
331 reg_class_t returned_if_empty)
38b2d076 332{
35bdbc69
AS
333 HARD_REG_SET cc;
334 int i;
335 reg_class_t best = NO_REGS;
336 unsigned int best_size = 0;
38b2d076
DD
337
338 if (original_class == limiting_class)
339 return original_class;
340
35bdbc69
AS
341 cc = reg_class_contents[original_class];
342 AND_HARD_REG_SET (cc, reg_class_contents[limiting_class]);
38b2d076 343
38b2d076
DD
344 for (i = 0; i < LIM_REG_CLASSES; i++)
345 {
35bdbc69
AS
346 if (hard_reg_set_subset_p (reg_class_contents[i], cc))
347 if (best_size < reg_class_size[i])
38b2d076 348 {
35bdbc69
AS
349 best = (reg_class_t) i;
350 best_size = reg_class_size[i];
38b2d076
DD
351 }
352
353 }
354 if (best == NO_REGS)
355 return returned_if_empty;
356 return best;
357}
358
38b2d076
DD
359/* Used by m32c_register_move_cost to determine if a move is
360 impossibly expensive. */
0e607518 361static bool
ef4bddc2 362class_can_hold_mode (reg_class_t rclass, machine_mode mode)
38b2d076
DD
363{
364 /* Cache the results: 0=untested 1=no 2=yes */
365 static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
0e607518
AS
366
367 if (results[(int) rclass][mode] == 0)
38b2d076 368 {
0e607518 369 int r;
0a2aaacc 370 results[rclass][mode] = 1;
38b2d076 371 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
0e607518 372 if (in_hard_reg_set_p (reg_class_contents[(int) rclass], mode, r)
38b2d076
DD
373 && HARD_REGNO_MODE_OK (r, mode))
374 {
0e607518
AS
375 results[rclass][mode] = 2;
376 break;
38b2d076
DD
377 }
378 }
0e607518 379
38b2d076
DD
380#if DEBUG0
381 fprintf (stderr, "class %s can hold %s? %s\n",
0e607518 382 class_names[(int) rclass], mode_name[mode],
0a2aaacc 383 (results[rclass][mode] == 2) ? "yes" : "no");
38b2d076 384#endif
0e607518 385 return results[(int) rclass][mode] == 2;
38b2d076
DD
386}
387
388/* Run-time Target Specification. */
389
390/* Memregs are memory locations that gcc treats like general
391 registers, as there are a limited number of true registers and the
392 m32c families can use memory in most places that registers can be
393 used.
394
395 However, since memory accesses are more expensive than registers,
396 we allow the user to limit the number of memregs available, in
397 order to try to persuade gcc to try harder to use real registers.
398
45b86625 399 Memregs are provided by lib1funcs.S.
38b2d076
DD
400*/
401
38b2d076
DD
402int ok_to_change_target_memregs = TRUE;
403
f28f2337
AS
404/* Implements TARGET_OPTION_OVERRIDE. */
405
406#undef TARGET_OPTION_OVERRIDE
407#define TARGET_OPTION_OVERRIDE m32c_option_override
408
409static void
410m32c_option_override (void)
38b2d076 411{
f28f2337 412 /* We limit memregs to 0..16, and provide a default. */
bbfc9a8c 413 if (global_options_set.x_target_memregs)
38b2d076
DD
414 {
415 if (target_memregs < 0 || target_memregs > 16)
416 error ("invalid target memregs value '%d'", target_memregs);
417 }
418 else
07127a0a 419 target_memregs = 16;
18b80268
DD
420
421 if (TARGET_A24)
422 flag_ivopts = 0;
0685e770
DD
423
424 /* This target defaults to strict volatile bitfields. */
36acc1a2 425 if (flag_strict_volatile_bitfields < 0 && abi_version_at_least(2))
0685e770 426 flag_strict_volatile_bitfields = 1;
d123bf41
DD
427
428 /* r8c/m16c have no 16-bit indirect call, so thunks are involved.
429 This is always worse than an absolute call. */
430 if (TARGET_A16)
431 flag_no_function_cse = 1;
a4403164
DD
432
433 /* This wants to put insns between compares and their jumps. */
434 /* FIXME: The right solution is to properly trace the flags register
435 values, but that is too much work for stage 4. */
436 flag_combine_stack_adjustments = 0;
d123bf41
DD
437}
438
439#undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
440#define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m32c_override_options_after_change
441
442static void
443m32c_override_options_after_change (void)
444{
445 if (TARGET_A16)
446 flag_no_function_cse = 1;
38b2d076
DD
447}
448
449/* Defining data structures for per-function information */
450
451/* The usual; we set up our machine_function data. */
452static struct machine_function *
453m32c_init_machine_status (void)
454{
766090c2 455 return ggc_cleared_alloc<machine_function> ();
38b2d076
DD
456}
457
458/* Implements INIT_EXPANDERS. We just set up to call the above
459 function. */
460void
461m32c_init_expanders (void)
462{
463 init_machine_status = m32c_init_machine_status;
464}
465
466/* Storage Layout */
467
38b2d076
DD
468/* Register Basics */
469
470/* Basic Characteristics of Registers */
471
472/* Whether a mode fits in a register is complex enough to warrant a
473 table. */
474static struct
475{
476 char qi_regs;
477 char hi_regs;
478 char pi_regs;
479 char si_regs;
480 char di_regs;
481} nregs_table[FIRST_PSEUDO_REGISTER] =
482{
483 { 1, 1, 2, 2, 4 }, /* r0 */
484 { 0, 1, 0, 0, 0 }, /* r2 */
485 { 1, 1, 2, 2, 0 }, /* r1 */
486 { 0, 1, 0, 0, 0 }, /* r3 */
487 { 0, 1, 1, 0, 0 }, /* a0 */
488 { 0, 1, 1, 0, 0 }, /* a1 */
489 { 0, 1, 1, 0, 0 }, /* sb */
490 { 0, 1, 1, 0, 0 }, /* fb */
491 { 0, 1, 1, 0, 0 }, /* sp */
492 { 1, 1, 1, 0, 0 }, /* pc */
493 { 0, 0, 0, 0, 0 }, /* fl */
494 { 1, 1, 1, 0, 0 }, /* ap */
495 { 1, 1, 2, 2, 4 }, /* mem0 */
496 { 1, 1, 2, 2, 4 }, /* mem1 */
497 { 1, 1, 2, 2, 4 }, /* mem2 */
498 { 1, 1, 2, 2, 4 }, /* mem3 */
499 { 1, 1, 2, 2, 4 }, /* mem4 */
500 { 1, 1, 2, 2, 0 }, /* mem5 */
501 { 1, 1, 2, 2, 0 }, /* mem6 */
502 { 1, 1, 0, 0, 0 }, /* mem7 */
503};
504
5efd84c5
NF
505/* Implements TARGET_CONDITIONAL_REGISTER_USAGE. We adjust the number
506 of available memregs, and select which registers need to be preserved
38b2d076
DD
507 across calls based on the chip family. */
508
5efd84c5
NF
509#undef TARGET_CONDITIONAL_REGISTER_USAGE
510#define TARGET_CONDITIONAL_REGISTER_USAGE m32c_conditional_register_usage
d6d17ae7 511void
38b2d076
DD
512m32c_conditional_register_usage (void)
513{
38b2d076
DD
514 int i;
515
516 if (0 <= target_memregs && target_memregs <= 16)
517 {
518 /* The command line option is bytes, but our "registers" are
519 16-bit words. */
65655f79 520 for (i = (target_memregs+1)/2; i < 8; i++)
38b2d076
DD
521 {
522 fixed_regs[MEM0_REGNO + i] = 1;
523 CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
524 }
525 }
526
527 /* M32CM and M32C preserve more registers across function calls. */
528 if (TARGET_A24)
529 {
530 call_used_regs[R1_REGNO] = 0;
531 call_used_regs[R2_REGNO] = 0;
532 call_used_regs[R3_REGNO] = 0;
533 call_used_regs[A0_REGNO] = 0;
534 call_used_regs[A1_REGNO] = 0;
535 }
536}
537
538/* How Values Fit in Registers */
539
540/* Implements HARD_REGNO_NREGS. This is complicated by the fact that
541 different registers are different sizes from each other, *and* may
542 be different sizes in different chip families. */
b8a669d0 543static int
ef4bddc2 544m32c_hard_regno_nregs_1 (int regno, machine_mode mode)
38b2d076
DD
545{
546 if (regno == FLG_REGNO && mode == CCmode)
547 return 1;
548 if (regno >= FIRST_PSEUDO_REGISTER)
549 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
550
551 if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
552 return (GET_MODE_SIZE (mode) + 1) / 2;
553
554 if (GET_MODE_SIZE (mode) <= 1)
555 return nregs_table[regno].qi_regs;
556 if (GET_MODE_SIZE (mode) <= 2)
557 return nregs_table[regno].hi_regs;
5fd5d713 558 if (regno == A0_REGNO && mode == SImode && TARGET_A16)
38b2d076
DD
559 return 2;
560 if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
561 return nregs_table[regno].pi_regs;
562 if (GET_MODE_SIZE (mode) <= 4)
563 return nregs_table[regno].si_regs;
564 if (GET_MODE_SIZE (mode) <= 8)
565 return nregs_table[regno].di_regs;
566 return 0;
567}
568
b8a669d0 569int
ef4bddc2 570m32c_hard_regno_nregs (int regno, machine_mode mode)
b8a669d0
DD
571{
572 int rv = m32c_hard_regno_nregs_1 (regno, mode);
573 return rv ? rv : 1;
574}
575
38b2d076
DD
576/* Implements HARD_REGNO_MODE_OK. The above function does the work
577 already; just test its return value. */
578int
ef4bddc2 579m32c_hard_regno_ok (int regno, machine_mode mode)
38b2d076 580{
b8a669d0 581 return m32c_hard_regno_nregs_1 (regno, mode) != 0;
38b2d076
DD
582}
583
584/* Implements MODES_TIEABLE_P. In general, modes aren't tieable since
585 registers are all different sizes. However, since most modes are
586 bigger than our registers anyway, it's easier to implement this
587 function that way, leaving QImode as the only unique case. */
588int
ef4bddc2 589m32c_modes_tieable_p (machine_mode m1, machine_mode m2)
38b2d076
DD
590{
591 if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
592 return 1;
593
07127a0a 594#if 0
38b2d076
DD
595 if (m1 == QImode || m2 == QImode)
596 return 0;
07127a0a 597#endif
38b2d076
DD
598
599 return 1;
600}
601
602/* Register Classes */
603
604/* Implements REGNO_REG_CLASS. */
444d6efe 605enum reg_class
38b2d076
DD
606m32c_regno_reg_class (int regno)
607{
608 switch (regno)
609 {
610 case R0_REGNO:
611 return R0_REGS;
612 case R1_REGNO:
613 return R1_REGS;
614 case R2_REGNO:
615 return R2_REGS;
616 case R3_REGNO:
617 return R3_REGS;
618 case A0_REGNO:
22843acd 619 return A0_REGS;
38b2d076 620 case A1_REGNO:
22843acd 621 return A1_REGS;
38b2d076
DD
622 case SB_REGNO:
623 return SB_REGS;
624 case FB_REGNO:
625 return FB_REGS;
626 case SP_REGNO:
627 return SP_REGS;
628 case FLG_REGNO:
629 return FLG_REGS;
630 default:
631 if (IS_MEM_REGNO (regno))
632 return MEM_REGS;
633 return ALL_REGS;
634 }
635}
636
38b2d076
DD
637/* Implements REGNO_OK_FOR_BASE_P. */
638int
639m32c_regno_ok_for_base_p (int regno)
640{
641 if (regno == A0_REGNO
642 || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
643 return 1;
644 return 0;
645}
646
b05933f5 647/* Implements TARGET_PREFERRED_RELOAD_CLASS. In general, prefer general
38b2d076 648 registers of the appropriate size. */
b05933f5
AS
649
650#undef TARGET_PREFERRED_RELOAD_CLASS
651#define TARGET_PREFERRED_RELOAD_CLASS m32c_preferred_reload_class
652
653static reg_class_t
654m32c_preferred_reload_class (rtx x, reg_class_t rclass)
38b2d076 655{
b05933f5 656 reg_class_t newclass = rclass;
38b2d076 657
f75e07bc 658#if DEBUG0
38b2d076
DD
659 fprintf (stderr, "\npreferred_reload_class for %s is ",
660 class_names[rclass]);
661#endif
662 if (rclass == NO_REGS)
663 rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
664
0e607518 665 if (reg_classes_intersect_p (rclass, CR_REGS))
38b2d076
DD
666 {
667 switch (GET_MODE (x))
668 {
4e10a5a7 669 case E_QImode:
38b2d076
DD
670 newclass = HL_REGS;
671 break;
672 default:
673 /* newclass = HI_REGS; */
674 break;
675 }
676 }
677
678 else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
679 newclass = SI_REGS;
680 else if (GET_MODE_SIZE (GET_MODE (x)) > 4
b05933f5 681 && ! reg_class_subset_p (R03_REGS, rclass))
38b2d076
DD
682 newclass = DI_REGS;
683
684 rclass = reduce_class (rclass, newclass, rclass);
685
686 if (GET_MODE (x) == QImode)
687 rclass = reduce_class (rclass, HL_REGS, rclass);
688
f75e07bc 689#if DEBUG0
38b2d076
DD
690 fprintf (stderr, "%s\n", class_names[rclass]);
691 debug_rtx (x);
692
693 if (GET_CODE (x) == MEM
694 && GET_CODE (XEXP (x, 0)) == PLUS
695 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
696 fprintf (stderr, "Glorm!\n");
697#endif
698 return rclass;
699}
700
b05933f5
AS
701/* Implements TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
702
703#undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
704#define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS m32c_preferred_output_reload_class
705
706static reg_class_t
707m32c_preferred_output_reload_class (rtx x, reg_class_t rclass)
38b2d076
DD
708{
709 return m32c_preferred_reload_class (x, rclass);
710}
711
712/* Implements LIMIT_RELOAD_CLASS. We basically want to avoid using
713 address registers for reloads since they're needed for address
714 reloads. */
715int
ef4bddc2 716m32c_limit_reload_class (machine_mode mode, int rclass)
38b2d076 717{
f75e07bc 718#if DEBUG0
38b2d076
DD
719 fprintf (stderr, "limit_reload_class for %s: %s ->",
720 mode_name[mode], class_names[rclass]);
721#endif
722
723 if (mode == QImode)
724 rclass = reduce_class (rclass, HL_REGS, rclass);
725 else if (mode == HImode)
726 rclass = reduce_class (rclass, HI_REGS, rclass);
727 else if (mode == SImode)
728 rclass = reduce_class (rclass, SI_REGS, rclass);
729
730 if (rclass != A_REGS)
731 rclass = reduce_class (rclass, DI_REGS, rclass);
732
f75e07bc 733#if DEBUG0
38b2d076
DD
734 fprintf (stderr, " %s\n", class_names[rclass]);
735#endif
736 return rclass;
737}
738
739/* Implements SECONDARY_RELOAD_CLASS. QImode have to be reloaded in
740 r0 or r1, as those are the only real QImode registers. CR regs get
741 reloaded through appropriately sized general or address
742 registers. */
743int
ef4bddc2 744m32c_secondary_reload_class (int rclass, machine_mode mode, rtx x)
38b2d076
DD
745{
746 int cc = class_contents[rclass][0];
747#if DEBUG0
748 fprintf (stderr, "\nsecondary reload class %s %s\n",
749 class_names[rclass], mode_name[mode]);
750 debug_rtx (x);
751#endif
752 if (mode == QImode
753 && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
754 return QI_REGS;
0e607518 755 if (reg_classes_intersect_p (rclass, CR_REGS)
38b2d076
DD
756 && GET_CODE (x) == REG
757 && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
13a23442 758 return (TARGET_A16 || mode == HImode) ? HI_REGS : A_REGS;
38b2d076
DD
759 return NO_REGS;
760}
761
184866c5 762/* Implements TARGET_CLASS_LIKELY_SPILLED_P. A_REGS is needed for address
38b2d076 763 reloads. */
184866c5
AS
764
765#undef TARGET_CLASS_LIKELY_SPILLED_P
766#define TARGET_CLASS_LIKELY_SPILLED_P m32c_class_likely_spilled_p
767
768static bool
769m32c_class_likely_spilled_p (reg_class_t regclass)
38b2d076
DD
770{
771 if (regclass == A_REGS)
184866c5
AS
772 return true;
773
774 return (reg_class_size[(int) regclass] == 1);
38b2d076
DD
775}
776
c4831cff 777/* Implements TARGET_CLASS_MAX_NREGS. We calculate this according to its
38b2d076
DD
778 documented meaning, to avoid potential inconsistencies with actual
779 class definitions. */
c4831cff
AS
780
781#undef TARGET_CLASS_MAX_NREGS
782#define TARGET_CLASS_MAX_NREGS m32c_class_max_nregs
783
784static unsigned char
ef4bddc2 785m32c_class_max_nregs (reg_class_t regclass, machine_mode mode)
38b2d076 786{
c4831cff
AS
787 int rn;
788 unsigned char max = 0;
38b2d076
DD
789
790 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
c4831cff 791 if (TEST_HARD_REG_BIT (reg_class_contents[(int) regclass], rn))
38b2d076 792 {
c4831cff 793 unsigned char n = m32c_hard_regno_nregs (rn, mode);
38b2d076
DD
794 if (max < n)
795 max = n;
796 }
797 return max;
798}
799
800/* Implements CANNOT_CHANGE_MODE_CLASS. Only r0 and r1 can change to
801 QI (r0l, r1l) because the chip doesn't support QI ops on other
802 registers (well, it does on a0/a1 but if we let gcc do that, reload
803 suffers). Otherwise, we allow changes to larger modes. */
804int
ef4bddc2
RS
805m32c_cannot_change_mode_class (machine_mode from,
806 machine_mode to, int rclass)
38b2d076 807{
db9c8397 808 int rn;
38b2d076
DD
809#if DEBUG0
810 fprintf (stderr, "cannot change from %s to %s in %s\n",
811 mode_name[from], mode_name[to], class_names[rclass]);
812#endif
813
db9c8397
DD
814 /* If the larger mode isn't allowed in any of these registers, we
815 can't allow the change. */
816 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
817 if (class_contents[rclass][0] & (1 << rn))
818 if (! m32c_hard_regno_ok (rn, to))
819 return 1;
820
38b2d076
DD
821 if (to == QImode)
822 return (class_contents[rclass][0] & 0x1ffa);
823
824 if (class_contents[rclass][0] & 0x0005 /* r0, r1 */
825 && GET_MODE_SIZE (from) > 1)
826 return 0;
827 if (GET_MODE_SIZE (from) > 2) /* all other regs */
828 return 0;
829
830 return 1;
831}
832
833/* Helpers for the rest of the file. */
834/* TRUE if the rtx is a REG rtx for the given register. */
835#define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
836 && REGNO (rtx) == regno)
837/* TRUE if the rtx is a pseudo - specifically, one we can use as a
838 base register in address calculations (hence the "strict"
839 argument). */
840#define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
841 && (REGNO (rtx) == AP_REGNO \
842 || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
843
5fd5d713
DD
844#define A0_OR_PSEUDO(x) (IS_REG(x, A0_REGNO) || REGNO (x) >= FIRST_PSEUDO_REGISTER)
845
777e635f 846/* Implements matching for constraints (see next function too). 'S' is
38b2d076
DD
847 for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
848 call return values. */
03dd17b1
NF
849bool
850m32c_matches_constraint_p (rtx value, int constraint)
38b2d076
DD
851{
852 encode_pattern (value);
5fd5d713 853
03dd17b1
NF
854 switch (constraint) {
855 case CONSTRAINT_SF:
856 return (far_addr_space_p (value)
857 && ((RTX_IS ("mr")
858 && A0_OR_PSEUDO (patternr[1])
859 && GET_MODE (patternr[1]) == SImode)
860 || (RTX_IS ("m+^Sri")
861 && A0_OR_PSEUDO (patternr[4])
862 && GET_MODE (patternr[4]) == HImode)
863 || (RTX_IS ("m+^Srs")
864 && A0_OR_PSEUDO (patternr[4])
865 && GET_MODE (patternr[4]) == HImode)
866 || (RTX_IS ("m+^S+ris")
867 && A0_OR_PSEUDO (patternr[5])
868 && GET_MODE (patternr[5]) == HImode)
869 || RTX_IS ("ms")));
870 case CONSTRAINT_Sd:
38b2d076
DD
871 {
872 /* This is the common "src/dest" address */
873 rtx r;
874 if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
03dd17b1 875 return true;
38b2d076 876 if (RTX_IS ("ms") || RTX_IS ("m+si"))
03dd17b1 877 return true;
07127a0a
DD
878 if (RTX_IS ("m++rii"))
879 {
880 if (REGNO (patternr[3]) == FB_REGNO
881 && INTVAL (patternr[4]) == 0)
03dd17b1 882 return true;
07127a0a 883 }
38b2d076
DD
884 if (RTX_IS ("mr"))
885 r = patternr[1];
886 else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
887 r = patternr[2];
888 else
03dd17b1 889 return false;
38b2d076 890 if (REGNO (r) == SP_REGNO)
03dd17b1 891 return false;
38b2d076
DD
892 return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
893 }
03dd17b1 894 case CONSTRAINT_Sa:
38b2d076
DD
895 {
896 rtx r;
897 if (RTX_IS ("mr"))
898 r = patternr[1];
899 else if (RTX_IS ("m+ri"))
900 r = patternr[2];
901 else
03dd17b1 902 return false;
38b2d076
DD
903 return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
904 }
03dd17b1
NF
905 case CONSTRAINT_Si:
906 return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
907 case CONSTRAINT_Ss:
908 return ((RTX_IS ("mr")
909 && (IS_REG (patternr[1], SP_REGNO)))
910 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
911 case CONSTRAINT_Sf:
912 return ((RTX_IS ("mr")
913 && (IS_REG (patternr[1], FB_REGNO)))
914 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
915 case CONSTRAINT_Sb:
916 return ((RTX_IS ("mr")
917 && (IS_REG (patternr[1], SB_REGNO)))
918 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
919 case CONSTRAINT_Sp:
920 /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
921 return (RTX_IS ("mi")
922 && !(INTVAL (patternr[1]) & ~0x1fff));
923 case CONSTRAINT_S1:
924 return r1h_operand (value, QImode);
925 case CONSTRAINT_Rpa:
38b2d076 926 return GET_CODE (value) == PARALLEL;
03dd17b1
NF
927 default:
928 return false;
929 }
38b2d076
DD
930}
931
932/* STACK AND CALLING */
933
934/* Frame Layout */
935
936/* Implements RETURN_ADDR_RTX. Note that R8C and M16C push 24 bits
937 (yes, THREE bytes) onto the stack for the return address, but we
938 don't support pointers bigger than 16 bits on those chips. This
939 will likely wreak havoc with exception unwinding. FIXME. */
940rtx
941m32c_return_addr_rtx (int count)
942{
ef4bddc2 943 machine_mode mode;
38b2d076
DD
944 int offset;
945 rtx ra_mem;
946
947 if (count)
948 return NULL_RTX;
949 /* we want 2[$fb] */
950
951 if (TARGET_A24)
952 {
80b093df
DD
953 /* It's four bytes */
954 mode = PSImode;
38b2d076
DD
955 offset = 4;
956 }
957 else
958 {
959 /* FIXME: it's really 3 bytes */
960 mode = HImode;
961 offset = 2;
962 }
963
964 ra_mem =
0a81f074
RS
965 gen_rtx_MEM (mode, plus_constant (Pmode, gen_rtx_REG (Pmode, FP_REGNO),
966 offset));
38b2d076
DD
967 return copy_to_mode_reg (mode, ra_mem);
968}
969
970/* Implements INCOMING_RETURN_ADDR_RTX. See comment above. */
971rtx
972m32c_incoming_return_addr_rtx (void)
973{
974 /* we want [sp] */
975 return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
976}
977
978/* Exception Handling Support */
979
980/* Implements EH_RETURN_DATA_REGNO. Choose registers able to hold
981 pointers. */
982int
983m32c_eh_return_data_regno (int n)
984{
985 switch (n)
986 {
987 case 0:
45d898e4 988 return MEM0_REGNO;
38b2d076 989 case 1:
45d898e4 990 return MEM0_REGNO+4;
38b2d076
DD
991 default:
992 return INVALID_REGNUM;
993 }
994}
995
996/* Implements EH_RETURN_STACKADJ_RTX. Saved and used later in
997 m32c_emit_eh_epilogue. */
998rtx
999m32c_eh_return_stackadj_rtx (void)
1000{
1001 if (!cfun->machine->eh_stack_adjust)
1002 {
1003 rtx sa;
1004
99920b6f 1005 sa = gen_rtx_REG (Pmode, R0_REGNO);
38b2d076
DD
1006 cfun->machine->eh_stack_adjust = sa;
1007 }
1008 return cfun->machine->eh_stack_adjust;
1009}
1010
1011/* Registers That Address the Stack Frame */
1012
1013/* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER. Note that
1014 the original spec called for dwarf numbers to vary with register
1015 width as well, for example, r0l, r0, and r2r0 would each have
1016 different dwarf numbers. GCC doesn't support this, and we don't do
1017 it, and gdb seems to like it this way anyway. */
1018unsigned int
1019m32c_dwarf_frame_regnum (int n)
1020{
1021 switch (n)
1022 {
1023 case R0_REGNO:
1024 return 5;
1025 case R1_REGNO:
1026 return 6;
1027 case R2_REGNO:
1028 return 7;
1029 case R3_REGNO:
1030 return 8;
1031 case A0_REGNO:
1032 return 9;
1033 case A1_REGNO:
1034 return 10;
1035 case FB_REGNO:
1036 return 11;
1037 case SB_REGNO:
1038 return 19;
1039
1040 case SP_REGNO:
1041 return 12;
1042 case PC_REGNO:
1043 return 13;
1044 default:
1045 return DWARF_FRAME_REGISTERS + 1;
1046 }
1047}
1048
1049/* The frame looks like this:
1050
1051 ap -> +------------------------------
1052 | Return address (3 or 4 bytes)
1053 | Saved FB (2 or 4 bytes)
1054 fb -> +------------------------------
1055 | local vars
1056 | register saves fb
1057 | through r0 as needed
1058 sp -> +------------------------------
1059*/
1060
1061/* We use this to wrap all emitted insns in the prologue. */
1062static rtx
1063F (rtx x)
1064{
1065 RTX_FRAME_RELATED_P (x) = 1;
1066 return x;
1067}
1068
1069/* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1070 how much the stack pointer moves for each, for each cpu family. */
1071static struct
1072{
1073 int reg1;
1074 int bit;
1075 int a16_bytes;
1076 int a24_bytes;
1077} pushm_info[] =
1078{
9d746d5e
DD
1079 /* These are in reverse push (nearest-to-sp) order. */
1080 { R0_REGNO, 0x80, 2, 2 },
38b2d076 1081 { R1_REGNO, 0x40, 2, 2 },
9d746d5e
DD
1082 { R2_REGNO, 0x20, 2, 2 },
1083 { R3_REGNO, 0x10, 2, 2 },
1084 { A0_REGNO, 0x08, 2, 4 },
1085 { A1_REGNO, 0x04, 2, 4 },
1086 { SB_REGNO, 0x02, 2, 4 },
1087 { FB_REGNO, 0x01, 2, 4 }
38b2d076
DD
1088};
1089
1090#define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1091
1092/* Returns TRUE if we need to save/restore the given register. We
1093 save everything for exception handlers, so that any register can be
1094 unwound. For interrupt handlers, we save everything if the handler
1095 calls something else (because we don't know what *that* function
1096 might do), but try to be a bit smarter if the handler is a leaf
1097 function. We always save $a0, though, because we use that in the
85f65093 1098 epilogue to copy $fb to $sp. */
38b2d076
DD
1099static int
1100need_to_save (int regno)
1101{
1102 if (fixed_regs[regno])
1103 return 0;
ad516a74 1104 if (crtl->calls_eh_return)
38b2d076
DD
1105 return 1;
1106 if (regno == FP_REGNO)
1107 return 0;
1108 if (cfun->machine->is_interrupt
65655f79
DD
1109 && (!cfun->machine->is_leaf
1110 || (regno == A0_REGNO
1111 && m32c_function_needs_enter ())
1112 ))
38b2d076 1113 return 1;
6fb5fa3c 1114 if (df_regs_ever_live_p (regno)
38b2d076
DD
1115 && (!call_used_regs[regno] || cfun->machine->is_interrupt))
1116 return 1;
1117 return 0;
1118}
1119
1120/* This function contains all the intelligence about saving and
1121 restoring registers. It always figures out the register save set.
1122 When called with PP_justcount, it merely returns the size of the
1123 save set (for eliminating the frame pointer, for example). When
1124 called with PP_pushm or PP_popm, it emits the appropriate
1125 instructions for saving (pushm) or restoring (popm) the
1126 registers. */
1127static int
1128m32c_pushm_popm (Push_Pop_Type ppt)
1129{
1130 int reg_mask = 0;
1131 int byte_count = 0, bytes;
1132 int i;
1133 rtx dwarf_set[PUSHM_N];
1134 int n_dwarfs = 0;
1135 int nosave_mask = 0;
1136
305da3ec
JH
1137 if (crtl->return_rtx
1138 && GET_CODE (crtl->return_rtx) == PARALLEL
ad516a74 1139 && !(crtl->calls_eh_return || cfun->machine->is_interrupt))
38b2d076 1140 {
305da3ec 1141 rtx exp = XVECEXP (crtl->return_rtx, 0, 0);
38b2d076
DD
1142 rtx rv = XEXP (exp, 0);
1143 int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1144
1145 if (rv_bytes > 2)
1146 nosave_mask |= 0x20; /* PSI, SI */
1147 else
1148 nosave_mask |= 0xf0; /* DF */
1149 if (rv_bytes > 4)
1150 nosave_mask |= 0x50; /* DI */
1151 }
1152
1153 for (i = 0; i < (int) PUSHM_N; i++)
1154 {
1155 /* Skip if neither register needs saving. */
1156 if (!need_to_save (pushm_info[i].reg1))
1157 continue;
1158
1159 if (pushm_info[i].bit & nosave_mask)
1160 continue;
1161
1162 reg_mask |= pushm_info[i].bit;
1163 bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1164
1165 if (ppt == PP_pushm)
1166 {
ef4bddc2 1167 machine_mode mode = (bytes == 2) ? HImode : SImode;
38b2d076
DD
1168 rtx addr;
1169
1170 /* Always use stack_pointer_rtx instead of calling
1171 rtx_gen_REG ourselves. Code elsewhere in GCC assumes
1172 that there is a single rtx representing the stack pointer,
1173 namely stack_pointer_rtx, and uses == to recognize it. */
1174 addr = stack_pointer_rtx;
1175
1176 if (byte_count != 0)
1177 addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1178
1179 dwarf_set[n_dwarfs++] =
f7df4a84 1180 gen_rtx_SET (gen_rtx_MEM (mode, addr),
38b2d076
DD
1181 gen_rtx_REG (mode, pushm_info[i].reg1));
1182 F (dwarf_set[n_dwarfs - 1]);
1183
1184 }
1185 byte_count += bytes;
1186 }
1187
1188 if (cfun->machine->is_interrupt)
1189 {
1190 cfun->machine->intr_pushm = reg_mask & 0xfe;
1191 reg_mask = 0;
1192 byte_count = 0;
1193 }
1194
1195 if (cfun->machine->is_interrupt)
1196 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1197 if (need_to_save (i))
1198 {
1199 byte_count += 2;
1200 cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1201 }
1202
1203 if (ppt == PP_pushm && byte_count)
1204 {
1205 rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1206 rtx pushm;
1207
1208 if (reg_mask)
1209 {
1210 XVECEXP (note, 0, 0)
f7df4a84 1211 = gen_rtx_SET (stack_pointer_rtx,
38b2d076
DD
1212 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1213 stack_pointer_rtx,
1214 GEN_INT (-byte_count)));
1215 F (XVECEXP (note, 0, 0));
1216
1217 for (i = 0; i < n_dwarfs; i++)
1218 XVECEXP (note, 0, i + 1) = dwarf_set[i];
1219
1220 pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1221
444d6efe 1222 add_reg_note (pushm, REG_FRAME_RELATED_EXPR, note);
38b2d076
DD
1223 }
1224
1225 if (cfun->machine->is_interrupt)
1226 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1227 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1228 {
1229 if (TARGET_A16)
1230 pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1231 else
1232 pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1233 F (pushm);
1234 }
1235 }
1236 if (ppt == PP_popm && byte_count)
1237 {
38b2d076
DD
1238 if (cfun->machine->is_interrupt)
1239 for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1240 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1241 {
1242 if (TARGET_A16)
b3fdec9e 1243 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
38b2d076 1244 else
b3fdec9e 1245 emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
38b2d076
DD
1246 }
1247 if (reg_mask)
1248 emit_insn (gen_popm (GEN_INT (reg_mask)));
1249 }
1250
1251 return byte_count;
1252}
1253
1254/* Implements INITIAL_ELIMINATION_OFFSET. See the comment above that
1255 diagrams our call frame. */
1256int
1257m32c_initial_elimination_offset (int from, int to)
1258{
1259 int ofs = 0;
1260
1261 if (from == AP_REGNO)
1262 {
1263 if (TARGET_A16)
1264 ofs += 5;
1265 else
1266 ofs += 8;
1267 }
1268
1269 if (to == SP_REGNO)
1270 {
1271 ofs += m32c_pushm_popm (PP_justcount);
1272 ofs += get_frame_size ();
1273 }
1274
1275 /* Account for push rounding. */
1276 if (TARGET_A24)
1277 ofs = (ofs + 1) & ~1;
1278#if DEBUG0
1279 fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1280 to, ofs);
1281#endif
1282 return ofs;
1283}
1284
1285/* Passing Function Arguments on the Stack */
1286
38b2d076
DD
1287/* Implements PUSH_ROUNDING. The R8C and M16C have byte stacks, the
1288 M32C has word stacks. */
444d6efe 1289unsigned int
38b2d076
DD
1290m32c_push_rounding (int n)
1291{
1292 if (TARGET_R8C || TARGET_M16C)
1293 return n;
1294 return (n + 1) & ~1;
1295}
1296
1297/* Passing Arguments in Registers */
1298
cd34bbe8
NF
1299/* Implements TARGET_FUNCTION_ARG. Arguments are passed partly in
1300 registers, partly on stack. If our function returns a struct, a
1301 pointer to a buffer for it is at the top of the stack (last thing
1302 pushed). The first few real arguments may be in registers as
1303 follows:
38b2d076
DD
1304
1305 R8C/M16C: arg1 in r1 if it's QI or HI (else it's pushed on stack)
1306 arg2 in r2 if it's HI (else pushed on stack)
1307 rest on stack
1308 M32C: arg1 in r0 if it's QI or HI (else it's pushed on stack)
1309 rest on stack
1310
1311 Structs are not passed in registers, even if they fit. Only
1312 integer and pointer types are passed in registers.
1313
1314 Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1315 r2 if it fits. */
cd34bbe8
NF
1316#undef TARGET_FUNCTION_ARG
1317#define TARGET_FUNCTION_ARG m32c_function_arg
1318static rtx
d5cc9181 1319m32c_function_arg (cumulative_args_t ca_v,
ef4bddc2 1320 machine_mode mode, const_tree type, bool named)
38b2d076 1321{
d5cc9181
JR
1322 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1323
38b2d076
DD
1324 /* Can return a reg, parallel, or 0 for stack */
1325 rtx rv = NULL_RTX;
1326#if DEBUG0
1327 fprintf (stderr, "func_arg %d (%s, %d)\n",
1328 ca->parm_num, mode_name[mode], named);
f75e07bc 1329 debug_tree ((tree)type);
38b2d076
DD
1330#endif
1331
1332 if (mode == VOIDmode)
1333 return GEN_INT (0);
1334
1335 if (ca->force_mem || !named)
1336 {
1337#if DEBUG0
1338 fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
1339 named);
1340#endif
1341 return NULL_RTX;
1342 }
1343
1344 if (type && INTEGRAL_TYPE_P (type) && POINTER_TYPE_P (type))
1345 return NULL_RTX;
1346
9d746d5e
DD
1347 if (type && AGGREGATE_TYPE_P (type))
1348 return NULL_RTX;
1349
38b2d076
DD
1350 switch (ca->parm_num)
1351 {
1352 case 1:
1353 if (GET_MODE_SIZE (mode) == 1 || GET_MODE_SIZE (mode) == 2)
1354 rv = gen_rtx_REG (mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
1355 break;
1356
1357 case 2:
1358 if (TARGET_A16 && GET_MODE_SIZE (mode) == 2)
1359 rv = gen_rtx_REG (mode, R2_REGNO);
1360 break;
1361 }
1362
1363#if DEBUG0
1364 debug_rtx (rv);
1365#endif
1366 return rv;
1367}
1368
1369#undef TARGET_PASS_BY_REFERENCE
1370#define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1371static bool
d5cc9181 1372m32c_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
ef4bddc2 1373 machine_mode mode ATTRIBUTE_UNUSED,
586de218 1374 const_tree type ATTRIBUTE_UNUSED,
38b2d076
DD
1375 bool named ATTRIBUTE_UNUSED)
1376{
1377 return 0;
1378}
1379
1380/* Implements INIT_CUMULATIVE_ARGS. */
1381void
1382m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
9d746d5e 1383 tree fntype,
38b2d076 1384 rtx libname ATTRIBUTE_UNUSED,
9d746d5e 1385 tree fndecl,
38b2d076
DD
1386 int n_named_args ATTRIBUTE_UNUSED)
1387{
9d746d5e
DD
1388 if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1389 ca->force_mem = 1;
1390 else
1391 ca->force_mem = 0;
38b2d076
DD
1392 ca->parm_num = 1;
1393}
1394
cd34bbe8
NF
1395/* Implements TARGET_FUNCTION_ARG_ADVANCE. force_mem is set for
1396 functions returning structures, so we always reset that. Otherwise,
1397 we only need to know the sequence number of the argument to know what
1398 to do with it. */
1399#undef TARGET_FUNCTION_ARG_ADVANCE
1400#define TARGET_FUNCTION_ARG_ADVANCE m32c_function_arg_advance
1401static void
d5cc9181 1402m32c_function_arg_advance (cumulative_args_t ca_v,
ef4bddc2 1403 machine_mode mode ATTRIBUTE_UNUSED,
cd34bbe8
NF
1404 const_tree type ATTRIBUTE_UNUSED,
1405 bool named ATTRIBUTE_UNUSED)
38b2d076 1406{
d5cc9181
JR
1407 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1408
38b2d076
DD
1409 if (ca->force_mem)
1410 ca->force_mem = 0;
9d746d5e
DD
1411 else
1412 ca->parm_num++;
38b2d076
DD
1413}
1414
c2ed6cf8
NF
1415/* Implements TARGET_FUNCTION_ARG_BOUNDARY. */
1416#undef TARGET_FUNCTION_ARG_BOUNDARY
1417#define TARGET_FUNCTION_ARG_BOUNDARY m32c_function_arg_boundary
1418static unsigned int
ef4bddc2 1419m32c_function_arg_boundary (machine_mode mode ATTRIBUTE_UNUSED,
c2ed6cf8
NF
1420 const_tree type ATTRIBUTE_UNUSED)
1421{
1422 return (TARGET_A16 ? 8 : 16);
1423}
1424
38b2d076
DD
1425/* Implements FUNCTION_ARG_REGNO_P. */
1426int
1427m32c_function_arg_regno_p (int r)
1428{
1429 if (TARGET_A24)
1430 return (r == R0_REGNO);
1431 return (r == R1_REGNO || r == R2_REGNO);
1432}
1433
e9555b13 1434/* HImode and PSImode are the two "native" modes as far as GCC is
85f65093 1435 concerned, but the chips also support a 32-bit mode which is used
e9555b13
DD
1436 for some opcodes in R8C/M16C and for reset vectors and such. */
1437#undef TARGET_VALID_POINTER_MODE
1438#define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
23fed240 1439static bool
095a2d76 1440m32c_valid_pointer_mode (scalar_int_mode mode)
e9555b13 1441{
e9555b13
DD
1442 if (mode == HImode
1443 || mode == PSImode
1444 || mode == SImode
1445 )
1446 return 1;
1447 return 0;
1448}
1449
38b2d076
DD
1450/* How Scalar Function Values Are Returned */
1451
2a31793e 1452/* Implements TARGET_LIBCALL_VALUE. Most values are returned in $r0, or some
38b2d076
DD
1453 combination of registers starting there (r2r0 for longs, r3r1r2r0
1454 for long long, r3r2r1r0 for doubles), except that that ABI
1455 currently doesn't work because it ends up using all available
1456 general registers and gcc often can't compile it. So, instead, we
1457 return anything bigger than 16 bits in "mem0" (effectively, a
1458 memory location). */
2a31793e
AS
1459
1460#undef TARGET_LIBCALL_VALUE
1461#define TARGET_LIBCALL_VALUE m32c_libcall_value
1462
1463static rtx
ef4bddc2 1464m32c_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
38b2d076
DD
1465{
1466 /* return reg or parallel */
1467#if 0
1468 /* FIXME: GCC has difficulty returning large values in registers,
1469 because that ties up most of the general registers and gives the
1470 register allocator little to work with. Until we can resolve
1471 this, large values are returned in memory. */
1472 if (mode == DFmode)
1473 {
1474 rtx rv;
1475
1476 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1477 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1478 gen_rtx_REG (HImode,
1479 R0_REGNO),
1480 GEN_INT (0));
1481 XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1482 gen_rtx_REG (HImode,
1483 R1_REGNO),
1484 GEN_INT (2));
1485 XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1486 gen_rtx_REG (HImode,
1487 R2_REGNO),
1488 GEN_INT (4));
1489 XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1490 gen_rtx_REG (HImode,
1491 R3_REGNO),
1492 GEN_INT (6));
1493 return rv;
1494 }
1495
1496 if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1497 {
1498 rtx rv;
1499
1500 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1501 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1502 gen_rtx_REG (mode,
1503 R0_REGNO),
1504 GEN_INT (0));
1505 return rv;
1506 }
1507#endif
1508
1509 if (GET_MODE_SIZE (mode) > 2)
1510 return gen_rtx_REG (mode, MEM0_REGNO);
1511 return gen_rtx_REG (mode, R0_REGNO);
1512}
1513
2a31793e 1514/* Implements TARGET_FUNCTION_VALUE. Functions and libcalls have the same
38b2d076 1515 conventions. */
2a31793e
AS
1516
1517#undef TARGET_FUNCTION_VALUE
1518#define TARGET_FUNCTION_VALUE m32c_function_value
1519
1520static rtx
1521m32c_function_value (const_tree valtype,
1522 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1523 bool outgoing ATTRIBUTE_UNUSED)
38b2d076
DD
1524{
1525 /* return reg or parallel */
ef4bddc2 1526 const machine_mode mode = TYPE_MODE (valtype);
2a31793e
AS
1527 return m32c_libcall_value (mode, NULL_RTX);
1528}
1529
f28f2337
AS
1530/* Implements TARGET_FUNCTION_VALUE_REGNO_P. */
1531
1532#undef TARGET_FUNCTION_VALUE_REGNO_P
1533#define TARGET_FUNCTION_VALUE_REGNO_P m32c_function_value_regno_p
2a31793e 1534
f28f2337 1535static bool
2a31793e
AS
1536m32c_function_value_regno_p (const unsigned int regno)
1537{
1538 return (regno == R0_REGNO || regno == MEM0_REGNO);
38b2d076
DD
1539}
1540
1541/* How Large Values Are Returned */
1542
1543/* We return structures by pushing the address on the stack, even if
1544 we use registers for the first few "real" arguments. */
1545#undef TARGET_STRUCT_VALUE_RTX
1546#define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1547static rtx
1548m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1549 int incoming ATTRIBUTE_UNUSED)
1550{
1551 return 0;
1552}
1553
1554/* Function Entry and Exit */
1555
1556/* Implements EPILOGUE_USES. Interrupts restore all registers. */
1557int
1558m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1559{
1560 if (cfun->machine->is_interrupt)
1561 return 1;
1562 return 0;
1563}
1564
1565/* Implementing the Varargs Macros */
1566
1567#undef TARGET_STRICT_ARGUMENT_NAMING
1568#define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1569static bool
d5cc9181 1570m32c_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
38b2d076
DD
1571{
1572 return 1;
1573}
1574
1575/* Trampolines for Nested Functions */
1576
1577/*
1578 m16c:
1579 1 0000 75C43412 mov.w #0x1234,a0
1580 2 0004 FC000000 jmp.a label
1581
1582 m32c:
1583 1 0000 BC563412 mov.l:s #0x123456,a0
1584 2 0004 CC000000 jmp.a label
1585*/
1586
1587/* Implements TRAMPOLINE_SIZE. */
1588int
1589m32c_trampoline_size (void)
1590{
1591 /* Allocate extra space so we can avoid the messy shifts when we
1592 initialize the trampoline; we just write past the end of the
1593 opcode. */
1594 return TARGET_A16 ? 8 : 10;
1595}
1596
1597/* Implements TRAMPOLINE_ALIGNMENT. */
1598int
1599m32c_trampoline_alignment (void)
1600{
1601 return 2;
1602}
1603
229fbccb
RH
1604/* Implements TARGET_TRAMPOLINE_INIT. */
1605
1606#undef TARGET_TRAMPOLINE_INIT
1607#define TARGET_TRAMPOLINE_INIT m32c_trampoline_init
1608static void
1609m32c_trampoline_init (rtx m_tramp, tree fndecl, rtx chainval)
38b2d076 1610{
229fbccb
RH
1611 rtx function = XEXP (DECL_RTL (fndecl), 0);
1612
1613#define A0(m,i) adjust_address (m_tramp, m, i)
38b2d076
DD
1614 if (TARGET_A16)
1615 {
1616 /* Note: we subtract a "word" because the moves want signed
1617 constants, not unsigned constants. */
1618 emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1619 emit_move_insn (A0 (HImode, 2), chainval);
1620 emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
85f65093
KH
1621 /* We use 16-bit addresses here, but store the zero to turn it
1622 into a 24-bit offset. */
38b2d076
DD
1623 emit_move_insn (A0 (HImode, 5), function);
1624 emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1625 }
1626 else
1627 {
1628 /* Note that the PSI moves actually write 4 bytes. Make sure we
1629 write stuff out in the right order, and leave room for the
1630 extra byte at the end. */
1631 emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1632 emit_move_insn (A0 (PSImode, 1), chainval);
1633 emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1634 emit_move_insn (A0 (PSImode, 5), function);
1635 }
1636#undef A0
1637}
1638
d81db636
SB
1639#undef TARGET_LRA_P
1640#define TARGET_LRA_P hook_bool_void_false
1641
38b2d076
DD
1642/* Addressing Modes */
1643
c6c3dba9
PB
1644/* The r8c/m32c family supports a wide range of non-orthogonal
1645 addressing modes, including the ability to double-indirect on *some*
1646 of them. Not all insns support all modes, either, but we rely on
1647 predicates and constraints to deal with that. */
1648#undef TARGET_LEGITIMATE_ADDRESS_P
1649#define TARGET_LEGITIMATE_ADDRESS_P m32c_legitimate_address_p
1650bool
ef4bddc2 1651m32c_legitimate_address_p (machine_mode mode, rtx x, bool strict)
38b2d076
DD
1652{
1653 int mode_adjust;
1654 if (CONSTANT_P (x))
1655 return 1;
1656
5fd5d713
DD
1657 if (TARGET_A16 && GET_MODE (x) != HImode && GET_MODE (x) != SImode)
1658 return 0;
1659 if (TARGET_A24 && GET_MODE (x) != PSImode)
1660 return 0;
1661
38b2d076
DD
1662 /* Wide references to memory will be split after reload, so we must
1663 ensure that all parts of such splits remain legitimate
1664 addresses. */
1665 mode_adjust = GET_MODE_SIZE (mode) - 1;
1666
1667 /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1668 if (GET_CODE (x) == PRE_DEC
1669 || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1670 {
1671 return (GET_CODE (XEXP (x, 0)) == REG
1672 && REGNO (XEXP (x, 0)) == SP_REGNO);
1673 }
1674
1675#if 0
1676 /* This is the double indirection detection, but it currently
1677 doesn't work as cleanly as this code implies, so until we've had
1678 a chance to debug it, leave it disabled. */
1679 if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1680 {
1681#if DEBUG_DOUBLE
1682 fprintf (stderr, "double indirect\n");
1683#endif
1684 x = XEXP (x, 0);
1685 }
1686#endif
1687
1688 encode_pattern (x);
1689 if (RTX_IS ("r"))
1690 {
1691 /* Most indexable registers can be used without displacements,
1692 although some of them will be emitted with an explicit zero
1693 to please the assembler. */
1694 switch (REGNO (patternr[0]))
1695 {
38b2d076
DD
1696 case A1_REGNO:
1697 case SB_REGNO:
1698 case FB_REGNO:
1699 case SP_REGNO:
5fd5d713
DD
1700 if (TARGET_A16 && GET_MODE (x) == SImode)
1701 return 0;
0c57f4bf 1702 /* FALLTHRU */
5fd5d713 1703 case A0_REGNO:
38b2d076
DD
1704 return 1;
1705
1706 default:
1707 if (IS_PSEUDO (patternr[0], strict))
1708 return 1;
1709 return 0;
1710 }
1711 }
5fd5d713
DD
1712
1713 if (TARGET_A16 && GET_MODE (x) == SImode)
1714 return 0;
1715
38b2d076
DD
1716 if (RTX_IS ("+ri"))
1717 {
1718 /* This is more interesting, because different base registers
1719 allow for different displacements - both range and signedness
1720 - and it differs from chip series to chip series too. */
1721 int rn = REGNO (patternr[1]);
1722 HOST_WIDE_INT offs = INTVAL (patternr[2]);
1723 switch (rn)
1724 {
1725 case A0_REGNO:
1726 case A1_REGNO:
1727 case SB_REGNO:
1728 /* The syntax only allows positive offsets, but when the
1729 offsets span the entire memory range, we can simulate
1730 negative offsets by wrapping. */
1731 if (TARGET_A16)
1732 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1733 if (rn == SB_REGNO)
1734 return (offs >= 0 && offs <= 65535 - mode_adjust);
1735 /* A0 or A1 */
1736 return (offs >= -16777216 && offs <= 16777215);
1737
1738 case FB_REGNO:
1739 if (TARGET_A16)
1740 return (offs >= -128 && offs <= 127 - mode_adjust);
1741 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1742
1743 case SP_REGNO:
1744 return (offs >= -128 && offs <= 127 - mode_adjust);
1745
1746 default:
1747 if (IS_PSEUDO (patternr[1], strict))
1748 return 1;
1749 return 0;
1750 }
1751 }
1752 if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1753 {
1754 rtx reg = patternr[1];
1755
1756 /* We don't know where the symbol is, so only allow base
1757 registers which support displacements spanning the whole
1758 address range. */
1759 switch (REGNO (reg))
1760 {
1761 case A0_REGNO:
1762 case A1_REGNO:
1763 /* $sb needs a secondary reload, but since it's involved in
1764 memory address reloads too, we don't deal with it very
1765 well. */
1766 /* case SB_REGNO: */
1767 return 1;
1768 default:
45d898e4
DD
1769 if (GET_CODE (reg) == SUBREG)
1770 return 0;
38b2d076
DD
1771 if (IS_PSEUDO (reg, strict))
1772 return 1;
1773 return 0;
1774 }
1775 }
1776 return 0;
1777}
1778
1779/* Implements REG_OK_FOR_BASE_P. */
1780int
1781m32c_reg_ok_for_base_p (rtx x, int strict)
1782{
1783 if (GET_CODE (x) != REG)
1784 return 0;
1785 switch (REGNO (x))
1786 {
1787 case A0_REGNO:
1788 case A1_REGNO:
1789 case SB_REGNO:
1790 case FB_REGNO:
1791 case SP_REGNO:
1792 return 1;
1793 default:
1794 if (IS_PSEUDO (x, strict))
1795 return 1;
1796 return 0;
1797 }
1798}
1799
04aff2c0 1800/* We have three choices for choosing fb->aN offsets. If we choose -128,
85f65093 1801 we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
04aff2c0
DD
1802 like this:
1803 EB 4B FF mova -128[$fb],$a0
1804 D8 0C FF FF mov.w:Q #0,-1[$a0]
1805
85f65093 1806 Alternately, we subtract the frame size, and hopefully use 8-bit aN
04aff2c0
DD
1807 displacements:
1808 7B F4 stc $fb,$a0
1809 77 54 00 01 sub #256,$a0
1810 D8 08 01 mov.w:Q #0,1[$a0]
1811
1812 If we don't offset (i.e. offset by zero), we end up with:
1813 7B F4 stc $fb,$a0
1814 D8 0C 00 FF mov.w:Q #0,-256[$a0]
1815
1816 We have to subtract *something* so that we have a PLUS rtx to mark
1817 that we've done this reload. The -128 offset will never result in
85f65093 1818 an 8-bit aN offset, and the payoff for the second case is five
04aff2c0
DD
1819 loads *if* those loads are within 256 bytes of the other end of the
1820 frame, so the third case seems best. Note that we subtract the
1821 zero, but detect that in the addhi3 pattern. */
1822
ea471af0
JM
1823#define BIG_FB_ADJ 0
1824
38b2d076
DD
1825/* Implements LEGITIMIZE_ADDRESS. The only address we really have to
1826 worry about is frame base offsets, as $fb has a limited
1827 displacement range. We deal with this by attempting to reload $fb
1828 itself into an address register; that seems to result in the best
1829 code. */
506d7b68
PB
1830#undef TARGET_LEGITIMIZE_ADDRESS
1831#define TARGET_LEGITIMIZE_ADDRESS m32c_legitimize_address
1832static rtx
1833m32c_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
ef4bddc2 1834 machine_mode mode)
38b2d076
DD
1835{
1836#if DEBUG0
1837 fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
506d7b68 1838 debug_rtx (x);
38b2d076
DD
1839 fprintf (stderr, "\n");
1840#endif
1841
506d7b68
PB
1842 if (GET_CODE (x) == PLUS
1843 && GET_CODE (XEXP (x, 0)) == REG
1844 && REGNO (XEXP (x, 0)) == FB_REGNO
1845 && GET_CODE (XEXP (x, 1)) == CONST_INT
1846 && (INTVAL (XEXP (x, 1)) < -128
1847 || INTVAL (XEXP (x, 1)) > (128 - GET_MODE_SIZE (mode))))
38b2d076
DD
1848 {
1849 /* reload FB to A_REGS */
38b2d076 1850 rtx temp = gen_reg_rtx (Pmode);
506d7b68 1851 x = copy_rtx (x);
f7df4a84 1852 emit_insn (gen_rtx_SET (temp, XEXP (x, 0)));
506d7b68 1853 XEXP (x, 0) = temp;
38b2d076
DD
1854 }
1855
506d7b68 1856 return x;
38b2d076
DD
1857}
1858
1859/* Implements LEGITIMIZE_RELOAD_ADDRESS. See comment above. */
1860int
1861m32c_legitimize_reload_address (rtx * x,
ef4bddc2 1862 machine_mode mode,
38b2d076
DD
1863 int opnum,
1864 int type, int ind_levels ATTRIBUTE_UNUSED)
1865{
1866#if DEBUG0
1867 fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
1868 mode_name[mode]);
1869 debug_rtx (*x);
1870#endif
1871
1872 /* At one point, this function tried to get $fb copied to an address
1873 register, which in theory would maximize sharing, but gcc was
1874 *also* still trying to reload the whole address, and we'd run out
1875 of address registers. So we let gcc do the naive (but safe)
1876 reload instead, when the above function doesn't handle it for
04aff2c0
DD
1877 us.
1878
1879 The code below is a second attempt at the above. */
1880
1881 if (GET_CODE (*x) == PLUS
1882 && GET_CODE (XEXP (*x, 0)) == REG
1883 && REGNO (XEXP (*x, 0)) == FB_REGNO
1884 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1885 && (INTVAL (XEXP (*x, 1)) < -128
1886 || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
1887 {
1888 rtx sum;
1889 int offset = INTVAL (XEXP (*x, 1));
1890 int adjustment = -BIG_FB_ADJ;
1891
1892 sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
1893 GEN_INT (adjustment));
1894 *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
1895 if (type == RELOAD_OTHER)
1896 type = RELOAD_FOR_OTHER_ADDRESS;
1897 push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
1898 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
444d6efe 1899 (enum reload_type) type);
04aff2c0
DD
1900 return 1;
1901 }
1902
1903 if (GET_CODE (*x) == PLUS
1904 && GET_CODE (XEXP (*x, 0)) == PLUS
1905 && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
1906 && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
1907 && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
1908 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1909 )
1910 {
1911 if (type == RELOAD_OTHER)
1912 type = RELOAD_FOR_OTHER_ADDRESS;
1913 push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
1914 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
444d6efe 1915 (enum reload_type) type);
f75e07bc
BE
1916 return 1;
1917 }
1918
1919 if (TARGET_A24 && GET_MODE (*x) == PSImode)
1920 {
1921 push_reload (*x, NULL_RTX, x, NULL,
1922 A_REGS, PSImode, VOIDmode, 0, 0, opnum,
1923 (enum reload_type) type);
04aff2c0
DD
1924 return 1;
1925 }
38b2d076
DD
1926
1927 return 0;
1928}
1929
5fd5d713
DD
1930/* Return the appropriate mode for a named address pointer. */
1931#undef TARGET_ADDR_SPACE_POINTER_MODE
1932#define TARGET_ADDR_SPACE_POINTER_MODE m32c_addr_space_pointer_mode
095a2d76 1933static scalar_int_mode
5fd5d713
DD
1934m32c_addr_space_pointer_mode (addr_space_t addrspace)
1935{
1936 switch (addrspace)
1937 {
1938 case ADDR_SPACE_GENERIC:
1939 return TARGET_A24 ? PSImode : HImode;
1940 case ADDR_SPACE_FAR:
1941 return SImode;
1942 default:
1943 gcc_unreachable ();
1944 }
1945}
1946
1947/* Return the appropriate mode for a named address address. */
1948#undef TARGET_ADDR_SPACE_ADDRESS_MODE
1949#define TARGET_ADDR_SPACE_ADDRESS_MODE m32c_addr_space_address_mode
095a2d76 1950static scalar_int_mode
5fd5d713
DD
1951m32c_addr_space_address_mode (addr_space_t addrspace)
1952{
1953 switch (addrspace)
1954 {
1955 case ADDR_SPACE_GENERIC:
1956 return TARGET_A24 ? PSImode : HImode;
1957 case ADDR_SPACE_FAR:
1958 return SImode;
1959 default:
1960 gcc_unreachable ();
1961 }
1962}
1963
1964/* Like m32c_legitimate_address_p, except with named addresses. */
1965#undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
1966#define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P \
1967 m32c_addr_space_legitimate_address_p
1968static bool
ef4bddc2 1969m32c_addr_space_legitimate_address_p (machine_mode mode, rtx x,
5fd5d713
DD
1970 bool strict, addr_space_t as)
1971{
1972 if (as == ADDR_SPACE_FAR)
1973 {
1974 if (TARGET_A24)
1975 return 0;
1976 encode_pattern (x);
1977 if (RTX_IS ("r"))
1978 {
1979 if (GET_MODE (x) != SImode)
1980 return 0;
1981 switch (REGNO (patternr[0]))
1982 {
1983 case A0_REGNO:
1984 return 1;
1985
1986 default:
1987 if (IS_PSEUDO (patternr[0], strict))
1988 return 1;
1989 return 0;
1990 }
1991 }
1992 if (RTX_IS ("+^Sri"))
1993 {
1994 int rn = REGNO (patternr[3]);
1995 HOST_WIDE_INT offs = INTVAL (patternr[4]);
1996 if (GET_MODE (patternr[3]) != HImode)
1997 return 0;
1998 switch (rn)
1999 {
2000 case A0_REGNO:
2001 return (offs >= 0 && offs <= 0xfffff);
2002
2003 default:
2004 if (IS_PSEUDO (patternr[3], strict))
2005 return 1;
2006 return 0;
2007 }
2008 }
2009 if (RTX_IS ("+^Srs"))
2010 {
2011 int rn = REGNO (patternr[3]);
2012 if (GET_MODE (patternr[3]) != HImode)
2013 return 0;
2014 switch (rn)
2015 {
2016 case A0_REGNO:
2017 return 1;
2018
2019 default:
2020 if (IS_PSEUDO (patternr[3], strict))
2021 return 1;
2022 return 0;
2023 }
2024 }
2025 if (RTX_IS ("+^S+ris"))
2026 {
2027 int rn = REGNO (patternr[4]);
2028 if (GET_MODE (patternr[4]) != HImode)
2029 return 0;
2030 switch (rn)
2031 {
2032 case A0_REGNO:
2033 return 1;
2034
2035 default:
2036 if (IS_PSEUDO (patternr[4], strict))
2037 return 1;
2038 return 0;
2039 }
2040 }
2041 if (RTX_IS ("s"))
2042 {
2043 return 1;
2044 }
2045 return 0;
2046 }
2047
2048 else if (as != ADDR_SPACE_GENERIC)
2049 gcc_unreachable ();
2050
2051 return m32c_legitimate_address_p (mode, x, strict);
2052}
2053
2054/* Like m32c_legitimate_address, except with named address support. */
2055#undef TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS
2056#define TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS m32c_addr_space_legitimize_address
2057static rtx
ef4bddc2 2058m32c_addr_space_legitimize_address (rtx x, rtx oldx, machine_mode mode,
5fd5d713
DD
2059 addr_space_t as)
2060{
2061 if (as != ADDR_SPACE_GENERIC)
2062 {
2063#if DEBUG0
2064 fprintf (stderr, "\033[36mm32c_addr_space_legitimize_address for mode %s\033[0m\n", mode_name[mode]);
2065 debug_rtx (x);
2066 fprintf (stderr, "\n");
2067#endif
2068
2069 if (GET_CODE (x) != REG)
2070 {
2071 x = force_reg (SImode, x);
2072 }
2073 return x;
2074 }
2075
2076 return m32c_legitimize_address (x, oldx, mode);
2077}
2078
2079/* Determine if one named address space is a subset of another. */
2080#undef TARGET_ADDR_SPACE_SUBSET_P
2081#define TARGET_ADDR_SPACE_SUBSET_P m32c_addr_space_subset_p
2082static bool
2083m32c_addr_space_subset_p (addr_space_t subset, addr_space_t superset)
2084{
2085 gcc_assert (subset == ADDR_SPACE_GENERIC || subset == ADDR_SPACE_FAR);
2086 gcc_assert (superset == ADDR_SPACE_GENERIC || superset == ADDR_SPACE_FAR);
2087
2088 if (subset == superset)
2089 return true;
2090
2091 else
2092 return (subset == ADDR_SPACE_GENERIC && superset == ADDR_SPACE_FAR);
2093}
2094
2095#undef TARGET_ADDR_SPACE_CONVERT
2096#define TARGET_ADDR_SPACE_CONVERT m32c_addr_space_convert
2097/* Convert from one address space to another. */
2098static rtx
2099m32c_addr_space_convert (rtx op, tree from_type, tree to_type)
2100{
2101 addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (from_type));
2102 addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (to_type));
2103 rtx result;
2104
2105 gcc_assert (from_as == ADDR_SPACE_GENERIC || from_as == ADDR_SPACE_FAR);
2106 gcc_assert (to_as == ADDR_SPACE_GENERIC || to_as == ADDR_SPACE_FAR);
2107
2108 if (to_as == ADDR_SPACE_GENERIC && from_as == ADDR_SPACE_FAR)
2109 {
2110 /* This is unpredictable, as we're truncating off usable address
2111 bits. */
2112
2113 result = gen_reg_rtx (HImode);
2114 emit_move_insn (result, simplify_subreg (HImode, op, SImode, 0));
2115 return result;
2116 }
2117 else if (to_as == ADDR_SPACE_FAR && from_as == ADDR_SPACE_GENERIC)
2118 {
2119 /* This always works. */
2120 result = gen_reg_rtx (SImode);
2121 emit_insn (gen_zero_extendhisi2 (result, op));
2122 return result;
2123 }
2124 else
2125 gcc_unreachable ();
2126}
2127
38b2d076
DD
2128/* Condition Code Status */
2129
2130#undef TARGET_FIXED_CONDITION_CODE_REGS
2131#define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2132static bool
2133m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2134{
2135 *p1 = FLG_REGNO;
2136 *p2 = INVALID_REGNUM;
2137 return true;
2138}
2139
2140/* Describing Relative Costs of Operations */
2141
0e607518 2142/* Implements TARGET_REGISTER_MOVE_COST. We make impossible moves
38b2d076
DD
2143 prohibitively expensive, like trying to put QIs in r2/r3 (there are
2144 no opcodes to do that). We also discourage use of mem* registers
2145 since they're really memory. */
0e607518
AS
2146
2147#undef TARGET_REGISTER_MOVE_COST
2148#define TARGET_REGISTER_MOVE_COST m32c_register_move_cost
2149
2150static int
ef4bddc2 2151m32c_register_move_cost (machine_mode mode, reg_class_t from,
0e607518 2152 reg_class_t to)
38b2d076
DD
2153{
2154 int cost = COSTS_N_INSNS (3);
0e607518
AS
2155 HARD_REG_SET cc;
2156
2157/* FIXME: pick real values, but not 2 for now. */
2158 COPY_HARD_REG_SET (cc, reg_class_contents[(int) from]);
2159 IOR_HARD_REG_SET (cc, reg_class_contents[(int) to]);
2160
2161 if (mode == QImode
2162 && hard_reg_set_intersect_p (cc, reg_class_contents[R23_REGS]))
38b2d076 2163 {
0e607518 2164 if (hard_reg_set_subset_p (cc, reg_class_contents[R23_REGS]))
38b2d076
DD
2165 cost = COSTS_N_INSNS (1000);
2166 else
2167 cost = COSTS_N_INSNS (80);
2168 }
2169
2170 if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2171 cost = COSTS_N_INSNS (1000);
2172
0e607518 2173 if (reg_classes_intersect_p (from, CR_REGS))
38b2d076
DD
2174 cost += COSTS_N_INSNS (5);
2175
0e607518 2176 if (reg_classes_intersect_p (to, CR_REGS))
38b2d076
DD
2177 cost += COSTS_N_INSNS (5);
2178
2179 if (from == MEM_REGS || to == MEM_REGS)
2180 cost += COSTS_N_INSNS (50);
0e607518
AS
2181 else if (reg_classes_intersect_p (from, MEM_REGS)
2182 || reg_classes_intersect_p (to, MEM_REGS))
38b2d076
DD
2183 cost += COSTS_N_INSNS (10);
2184
2185#if DEBUG0
2186 fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
0e607518
AS
2187 mode_name[mode], class_names[(int) from], class_names[(int) to],
2188 cost);
38b2d076
DD
2189#endif
2190 return cost;
2191}
2192
0e607518
AS
2193/* Implements TARGET_MEMORY_MOVE_COST. */
2194
2195#undef TARGET_MEMORY_MOVE_COST
2196#define TARGET_MEMORY_MOVE_COST m32c_memory_move_cost
2197
2198static int
ef4bddc2 2199m32c_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
0e607518
AS
2200 reg_class_t rclass ATTRIBUTE_UNUSED,
2201 bool in ATTRIBUTE_UNUSED)
38b2d076
DD
2202{
2203 /* FIXME: pick real values. */
2204 return COSTS_N_INSNS (10);
2205}
2206
07127a0a
DD
2207/* Here we try to describe when we use multiple opcodes for one RTX so
2208 that gcc knows when to use them. */
2209#undef TARGET_RTX_COSTS
2210#define TARGET_RTX_COSTS m32c_rtx_costs
2211static bool
e548c9df
AM
2212m32c_rtx_costs (rtx x, machine_mode mode, int outer_code,
2213 int opno ATTRIBUTE_UNUSED,
68f932c4 2214 int *total, bool speed ATTRIBUTE_UNUSED)
07127a0a 2215{
e548c9df 2216 int code = GET_CODE (x);
07127a0a
DD
2217 switch (code)
2218 {
2219 case REG:
2220 if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2221 *total += COSTS_N_INSNS (500);
2222 else
2223 *total += COSTS_N_INSNS (1);
2224 return true;
2225
2226 case ASHIFT:
2227 case LSHIFTRT:
2228 case ASHIFTRT:
2229 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2230 {
2231 /* mov.b r1l, r1h */
2232 *total += COSTS_N_INSNS (1);
2233 return true;
2234 }
2235 if (INTVAL (XEXP (x, 1)) > 8
2236 || INTVAL (XEXP (x, 1)) < -8)
2237 {
2238 /* mov.b #N, r1l */
2239 /* mov.b r1l, r1h */
2240 *total += COSTS_N_INSNS (2);
2241 return true;
2242 }
2243 return true;
2244
2245 case LE:
2246 case LEU:
2247 case LT:
2248 case LTU:
2249 case GT:
2250 case GTU:
2251 case GE:
2252 case GEU:
2253 case NE:
2254 case EQ:
2255 if (outer_code == SET)
2256 {
2257 *total += COSTS_N_INSNS (2);
2258 return true;
2259 }
2260 break;
2261
2262 case ZERO_EXTRACT:
2263 {
2264 rtx dest = XEXP (x, 0);
2265 rtx addr = XEXP (dest, 0);
2266 switch (GET_CODE (addr))
2267 {
2268 case CONST_INT:
2269 *total += COSTS_N_INSNS (1);
2270 break;
2271 case SYMBOL_REF:
2272 *total += COSTS_N_INSNS (3);
2273 break;
2274 default:
2275 *total += COSTS_N_INSNS (2);
2276 break;
2277 }
2278 return true;
2279 }
2280 break;
2281
2282 default:
2283 /* Reasonable default. */
e548c9df 2284 if (TARGET_A16 && mode == SImode)
07127a0a
DD
2285 *total += COSTS_N_INSNS (2);
2286 break;
2287 }
2288 return false;
2289}
2290
2291#undef TARGET_ADDRESS_COST
2292#define TARGET_ADDRESS_COST m32c_address_cost
2293static int
ef4bddc2 2294m32c_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
b413068c
OE
2295 addr_space_t as ATTRIBUTE_UNUSED,
2296 bool speed ATTRIBUTE_UNUSED)
07127a0a 2297{
80b093df 2298 int i;
07127a0a
DD
2299 /* fprintf(stderr, "\naddress_cost\n");
2300 debug_rtx(addr);*/
2301 switch (GET_CODE (addr))
2302 {
2303 case CONST_INT:
80b093df
DD
2304 i = INTVAL (addr);
2305 if (i == 0)
2306 return COSTS_N_INSNS(1);
2307 if (0 < i && i <= 255)
2308 return COSTS_N_INSNS(2);
2309 if (0 < i && i <= 65535)
2310 return COSTS_N_INSNS(3);
2311 return COSTS_N_INSNS(4);
07127a0a 2312 case SYMBOL_REF:
80b093df 2313 return COSTS_N_INSNS(4);
07127a0a 2314 case REG:
80b093df
DD
2315 return COSTS_N_INSNS(1);
2316 case PLUS:
2317 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
2318 {
2319 i = INTVAL (XEXP (addr, 1));
2320 if (i == 0)
2321 return COSTS_N_INSNS(1);
2322 if (0 < i && i <= 255)
2323 return COSTS_N_INSNS(2);
2324 if (0 < i && i <= 65535)
2325 return COSTS_N_INSNS(3);
2326 }
2327 return COSTS_N_INSNS(4);
07127a0a
DD
2328 default:
2329 return 0;
2330 }
2331}
2332
38b2d076
DD
2333/* Defining the Output Assembler Language */
2334
38b2d076
DD
2335/* Output of Data */
2336
2337/* We may have 24 bit sizes, which is the native address size.
2338 Currently unused, but provided for completeness. */
2339#undef TARGET_ASM_INTEGER
2340#define TARGET_ASM_INTEGER m32c_asm_integer
2341static bool
2342m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2343{
2344 switch (size)
2345 {
2346 case 3:
2347 fprintf (asm_out_file, "\t.3byte\t");
2348 output_addr_const (asm_out_file, x);
2349 fputc ('\n', asm_out_file);
2350 return true;
e9555b13
DD
2351 case 4:
2352 if (GET_CODE (x) == SYMBOL_REF)
2353 {
2354 fprintf (asm_out_file, "\t.long\t");
2355 output_addr_const (asm_out_file, x);
2356 fputc ('\n', asm_out_file);
2357 return true;
2358 }
2359 break;
38b2d076
DD
2360 }
2361 return default_assemble_integer (x, size, aligned_p);
2362}
2363
2364/* Output of Assembler Instructions */
2365
a4174ebf 2366/* We use a lookup table because the addressing modes are non-orthogonal. */
38b2d076
DD
2367
2368static struct
2369{
2370 char code;
2371 char const *pattern;
2372 char const *format;
2373}
2374const conversions[] = {
2375 { 0, "r", "0" },
2376
2377 { 0, "mr", "z[1]" },
2378 { 0, "m+ri", "3[2]" },
2379 { 0, "m+rs", "3[2]" },
5fd5d713
DD
2380 { 0, "m+^Zrs", "5[4]" },
2381 { 0, "m+^Zri", "5[4]" },
2382 { 0, "m+^Z+ris", "7+6[5]" },
2383 { 0, "m+^Srs", "5[4]" },
2384 { 0, "m+^Sri", "5[4]" },
2385 { 0, "m+^S+ris", "7+6[5]" },
38b2d076
DD
2386 { 0, "m+r+si", "4+5[2]" },
2387 { 0, "ms", "1" },
2388 { 0, "mi", "1" },
2389 { 0, "m+si", "2+3" },
2390
2391 { 0, "mmr", "[z[2]]" },
2392 { 0, "mm+ri", "[4[3]]" },
2393 { 0, "mm+rs", "[4[3]]" },
2394 { 0, "mm+r+si", "[5+6[3]]" },
2395 { 0, "mms", "[[2]]" },
2396 { 0, "mmi", "[[2]]" },
2397 { 0, "mm+si", "[4[3]]" },
2398
2399 { 0, "i", "#0" },
2400 { 0, "s", "#0" },
2401 { 0, "+si", "#1+2" },
2402 { 0, "l", "#0" },
2403
2404 { 'l', "l", "0" },
2405 { 'd', "i", "0" },
2406 { 'd', "s", "0" },
2407 { 'd', "+si", "1+2" },
2408 { 'D', "i", "0" },
2409 { 'D', "s", "0" },
2410 { 'D', "+si", "1+2" },
2411 { 'x', "i", "#0" },
2412 { 'X', "i", "#0" },
2413 { 'm', "i", "#0" },
2414 { 'b', "i", "#0" },
07127a0a 2415 { 'B', "i", "0" },
38b2d076
DD
2416 { 'p', "i", "0" },
2417
2418 { 0, 0, 0 }
2419};
2420
2421/* This is in order according to the bitfield that pushm/popm use. */
2422static char const *pushm_regs[] = {
2423 "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2424};
2425
4645179e
AS
2426/* Implements TARGET_PRINT_OPERAND. */
2427
2428#undef TARGET_PRINT_OPERAND
2429#define TARGET_PRINT_OPERAND m32c_print_operand
2430
2431static void
38b2d076
DD
2432m32c_print_operand (FILE * file, rtx x, int code)
2433{
2434 int i, j, b;
2435 const char *comma;
2436 HOST_WIDE_INT ival;
2437 int unsigned_const = 0;
ff485e71 2438 int force_sign;
38b2d076
DD
2439
2440 /* Multiplies; constants are converted to sign-extended format but
2441 we need unsigned, so 'u' and 'U' tell us what size unsigned we
2442 need. */
2443 if (code == 'u')
2444 {
2445 unsigned_const = 2;
2446 code = 0;
2447 }
2448 if (code == 'U')
2449 {
2450 unsigned_const = 1;
2451 code = 0;
2452 }
2453 /* This one is only for debugging; you can put it in a pattern to
2454 force this error. */
2455 if (code == '!')
2456 {
2457 fprintf (stderr, "dj: unreviewed pattern:");
2458 if (current_output_insn)
2459 debug_rtx (current_output_insn);
2460 gcc_unreachable ();
2461 }
2462 /* PSImode operations are either .w or .l depending on the target. */
2463 if (code == '&')
2464 {
2465 if (TARGET_A16)
2466 fprintf (file, "w");
2467 else
2468 fprintf (file, "l");
2469 return;
2470 }
2471 /* Inverted conditionals. */
2472 if (code == 'C')
2473 {
2474 switch (GET_CODE (x))
2475 {
2476 case LE:
2477 fputs ("gt", file);
2478 break;
2479 case LEU:
2480 fputs ("gtu", file);
2481 break;
2482 case LT:
2483 fputs ("ge", file);
2484 break;
2485 case LTU:
2486 fputs ("geu", file);
2487 break;
2488 case GT:
2489 fputs ("le", file);
2490 break;
2491 case GTU:
2492 fputs ("leu", file);
2493 break;
2494 case GE:
2495 fputs ("lt", file);
2496 break;
2497 case GEU:
2498 fputs ("ltu", file);
2499 break;
2500 case NE:
2501 fputs ("eq", file);
2502 break;
2503 case EQ:
2504 fputs ("ne", file);
2505 break;
2506 default:
2507 gcc_unreachable ();
2508 }
2509 return;
2510 }
2511 /* Regular conditionals. */
2512 if (code == 'c')
2513 {
2514 switch (GET_CODE (x))
2515 {
2516 case LE:
2517 fputs ("le", file);
2518 break;
2519 case LEU:
2520 fputs ("leu", file);
2521 break;
2522 case LT:
2523 fputs ("lt", file);
2524 break;
2525 case LTU:
2526 fputs ("ltu", file);
2527 break;
2528 case GT:
2529 fputs ("gt", file);
2530 break;
2531 case GTU:
2532 fputs ("gtu", file);
2533 break;
2534 case GE:
2535 fputs ("ge", file);
2536 break;
2537 case GEU:
2538 fputs ("geu", file);
2539 break;
2540 case NE:
2541 fputs ("ne", file);
2542 break;
2543 case EQ:
2544 fputs ("eq", file);
2545 break;
2546 default:
2547 gcc_unreachable ();
2548 }
2549 return;
2550 }
2551 /* Used in negsi2 to do HImode ops on the two parts of an SImode
2552 operand. */
2553 if (code == 'h' && GET_MODE (x) == SImode)
2554 {
2555 x = m32c_subreg (HImode, x, SImode, 0);
2556 code = 0;
2557 }
2558 if (code == 'H' && GET_MODE (x) == SImode)
2559 {
2560 x = m32c_subreg (HImode, x, SImode, 2);
2561 code = 0;
2562 }
07127a0a
DD
2563 if (code == 'h' && GET_MODE (x) == HImode)
2564 {
2565 x = m32c_subreg (QImode, x, HImode, 0);
2566 code = 0;
2567 }
2568 if (code == 'H' && GET_MODE (x) == HImode)
2569 {
2570 /* We can't actually represent this as an rtx. Do it here. */
2571 if (GET_CODE (x) == REG)
2572 {
2573 switch (REGNO (x))
2574 {
2575 case R0_REGNO:
2576 fputs ("r0h", file);
2577 return;
2578 case R1_REGNO:
2579 fputs ("r1h", file);
2580 return;
2581 default:
2582 gcc_unreachable();
2583 }
2584 }
2585 /* This should be a MEM. */
2586 x = m32c_subreg (QImode, x, HImode, 1);
2587 code = 0;
2588 }
2589 /* This is for BMcond, which always wants word register names. */
2590 if (code == 'h' && GET_MODE (x) == QImode)
2591 {
2592 if (GET_CODE (x) == REG)
2593 x = gen_rtx_REG (HImode, REGNO (x));
2594 code = 0;
2595 }
38b2d076
DD
2596 /* 'x' and 'X' need to be ignored for non-immediates. */
2597 if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2598 code = 0;
2599
2600 encode_pattern (x);
ff485e71 2601 force_sign = 0;
38b2d076
DD
2602 for (i = 0; conversions[i].pattern; i++)
2603 if (conversions[i].code == code
2604 && streq (conversions[i].pattern, pattern))
2605 {
2606 for (j = 0; conversions[i].format[j]; j++)
2607 /* backslash quotes the next character in the output pattern. */
2608 if (conversions[i].format[j] == '\\')
2609 {
2610 fputc (conversions[i].format[j + 1], file);
2611 j++;
2612 }
2613 /* Digits in the output pattern indicate that the
2614 corresponding RTX is to be output at that point. */
2615 else if (ISDIGIT (conversions[i].format[j]))
2616 {
2617 rtx r = patternr[conversions[i].format[j] - '0'];
2618 switch (GET_CODE (r))
2619 {
2620 case REG:
2621 fprintf (file, "%s",
2622 reg_name_with_mode (REGNO (r), GET_MODE (r)));
2623 break;
2624 case CONST_INT:
2625 switch (code)
2626 {
2627 case 'b':
07127a0a
DD
2628 case 'B':
2629 {
2630 int v = INTVAL (r);
2631 int i = (int) exact_log2 (v);
2632 if (i == -1)
2633 i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2634 if (i == -1)
2635 i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2636 /* Bit position. */
2637 fprintf (file, "%d", i);
2638 }
38b2d076
DD
2639 break;
2640 case 'x':
2641 /* Unsigned byte. */
2642 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2643 INTVAL (r) & 0xff);
2644 break;
2645 case 'X':
2646 /* Unsigned word. */
2647 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2648 INTVAL (r) & 0xffff);
2649 break;
2650 case 'p':
2651 /* pushm and popm encode a register set into a single byte. */
2652 comma = "";
2653 for (b = 7; b >= 0; b--)
2654 if (INTVAL (r) & (1 << b))
2655 {
2656 fprintf (file, "%s%s", comma, pushm_regs[b]);
2657 comma = ",";
2658 }
2659 break;
2660 case 'm':
2661 /* "Minus". Output -X */
2662 ival = (-INTVAL (r) & 0xffff);
2663 if (ival & 0x8000)
2664 ival = ival - 0x10000;
2665 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2666 break;
2667 default:
2668 ival = INTVAL (r);
2669 if (conversions[i].format[j + 1] == '[' && ival < 0)
2670 {
2671 /* We can simulate negative displacements by
2672 taking advantage of address space
2673 wrapping when the offset can span the
2674 entire address range. */
2675 rtx base =
2676 patternr[conversions[i].format[j + 2] - '0'];
2677 if (GET_CODE (base) == REG)
2678 switch (REGNO (base))
2679 {
2680 case A0_REGNO:
2681 case A1_REGNO:
2682 if (TARGET_A24)
2683 ival = 0x1000000 + ival;
2684 else
2685 ival = 0x10000 + ival;
2686 break;
2687 case SB_REGNO:
2688 if (TARGET_A16)
2689 ival = 0x10000 + ival;
2690 break;
2691 }
2692 }
2693 else if (code == 'd' && ival < 0 && j == 0)
2694 /* The "mova" opcode is used to do addition by
2695 computing displacements, but again, we need
2696 displacements to be unsigned *if* they're
2697 the only component of the displacement
2698 (i.e. no "symbol-4" type displacement). */
2699 ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2700
2701 if (conversions[i].format[j] == '0')
2702 {
2703 /* More conversions to unsigned. */
2704 if (unsigned_const == 2)
2705 ival &= 0xffff;
2706 if (unsigned_const == 1)
2707 ival &= 0xff;
2708 }
2709 if (streq (conversions[i].pattern, "mi")
2710 || streq (conversions[i].pattern, "mmi"))
2711 {
2712 /* Integers used as addresses are unsigned. */
2713 ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2714 }
ff485e71
DD
2715 if (force_sign && ival >= 0)
2716 fputc ('+', file);
38b2d076
DD
2717 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2718 break;
2719 }
2720 break;
2721 case CONST_DOUBLE:
2722 /* We don't have const_double constants. If it
2723 happens, make it obvious. */
2724 fprintf (file, "[const_double 0x%lx]",
2725 (unsigned long) CONST_DOUBLE_HIGH (r));
2726 break;
2727 case SYMBOL_REF:
2728 assemble_name (file, XSTR (r, 0));
2729 break;
2730 case LABEL_REF:
2731 output_asm_label (r);
2732 break;
2733 default:
2734 fprintf (stderr, "don't know how to print this operand:");
2735 debug_rtx (r);
2736 gcc_unreachable ();
2737 }
2738 }
2739 else
2740 {
2741 if (conversions[i].format[j] == 'z')
2742 {
2743 /* Some addressing modes *must* have a displacement,
2744 so insert a zero here if needed. */
2745 int k;
2746 for (k = j + 1; conversions[i].format[k]; k++)
2747 if (ISDIGIT (conversions[i].format[k]))
2748 {
2749 rtx reg = patternr[conversions[i].format[k] - '0'];
2750 if (GET_CODE (reg) == REG
2751 && (REGNO (reg) == SB_REGNO
2752 || REGNO (reg) == FB_REGNO
2753 || REGNO (reg) == SP_REGNO))
2754 fputc ('0', file);
2755 }
2756 continue;
2757 }
2758 /* Signed displacements off symbols need to have signs
2759 blended cleanly. */
2760 if (conversions[i].format[j] == '+'
ff485e71 2761 && (!code || code == 'D' || code == 'd')
38b2d076 2762 && ISDIGIT (conversions[i].format[j + 1])
ff485e71
DD
2763 && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2764 == CONST_INT))
2765 {
2766 force_sign = 1;
2767 continue;
2768 }
38b2d076
DD
2769 fputc (conversions[i].format[j], file);
2770 }
2771 break;
2772 }
2773 if (!conversions[i].pattern)
2774 {
2775 fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
2776 pattern);
2777 debug_rtx (x);
2778 fprintf (file, "[%c.%s]", code ? code : '-', pattern);
2779 }
2780
2781 return;
2782}
2783
4645179e
AS
2784/* Implements TARGET_PRINT_OPERAND_PUNCT_VALID_P.
2785
2786 See m32c_print_operand above for descriptions of what these do. */
2787
2788#undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
2789#define TARGET_PRINT_OPERAND_PUNCT_VALID_P m32c_print_operand_punct_valid_p
2790
2791static bool
2792m32c_print_operand_punct_valid_p (unsigned char c)
38b2d076
DD
2793{
2794 if (c == '&' || c == '!')
4645179e
AS
2795 return true;
2796
2797 return false;
38b2d076
DD
2798}
2799
4645179e
AS
2800/* Implements TARGET_PRINT_OPERAND_ADDRESS. Nothing unusual here. */
2801
2802#undef TARGET_PRINT_OPERAND_ADDRESS
2803#define TARGET_PRINT_OPERAND_ADDRESS m32c_print_operand_address
2804
2805static void
cc8ca59e 2806m32c_print_operand_address (FILE * stream, machine_mode /*mode*/, rtx address)
38b2d076 2807{
235e1fe8
NC
2808 if (GET_CODE (address) == MEM)
2809 address = XEXP (address, 0);
2810 else
2811 /* cf: gcc.dg/asm-4.c. */
2812 gcc_assert (GET_CODE (address) == REG);
2813
2814 m32c_print_operand (stream, address, 0);
38b2d076
DD
2815}
2816
2817/* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
2818 differently than general registers. */
2819void
2820m32c_output_reg_push (FILE * s, int regno)
2821{
2822 if (regno == FLG_REGNO)
2823 fprintf (s, "\tpushc\tflg\n");
2824 else
04aff2c0 2825 fprintf (s, "\tpush.%c\t%s\n",
38b2d076
DD
2826 " bwll"[reg_push_size (regno)], reg_names[regno]);
2827}
2828
2829/* Likewise for ASM_OUTPUT_REG_POP. */
2830void
2831m32c_output_reg_pop (FILE * s, int regno)
2832{
2833 if (regno == FLG_REGNO)
2834 fprintf (s, "\tpopc\tflg\n");
2835 else
04aff2c0 2836 fprintf (s, "\tpop.%c\t%s\n",
38b2d076
DD
2837 " bwll"[reg_push_size (regno)], reg_names[regno]);
2838}
2839
2840/* Defining target-specific uses of `__attribute__' */
2841
2842/* Used to simplify the logic below. Find the attributes wherever
2843 they may be. */
2844#define M32C_ATTRIBUTES(decl) \
2845 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2846 : DECL_ATTRIBUTES (decl) \
2847 ? (DECL_ATTRIBUTES (decl)) \
2848 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2849
2850/* Returns TRUE if the given tree has the "interrupt" attribute. */
2851static int
2852interrupt_p (tree node ATTRIBUTE_UNUSED)
2853{
2854 tree list = M32C_ATTRIBUTES (node);
2855 while (list)
2856 {
2857 if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
2858 return 1;
2859 list = TREE_CHAIN (list);
2860 }
65655f79
DD
2861 return fast_interrupt_p (node);
2862}
2863
2864/* Returns TRUE if the given tree has the "bank_switch" attribute. */
2865static int
2866bank_switch_p (tree node ATTRIBUTE_UNUSED)
2867{
2868 tree list = M32C_ATTRIBUTES (node);
2869 while (list)
2870 {
2871 if (is_attribute_p ("bank_switch", TREE_PURPOSE (list)))
2872 return 1;
2873 list = TREE_CHAIN (list);
2874 }
2875 return 0;
2876}
2877
2878/* Returns TRUE if the given tree has the "fast_interrupt" attribute. */
2879static int
2880fast_interrupt_p (tree node ATTRIBUTE_UNUSED)
2881{
2882 tree list = M32C_ATTRIBUTES (node);
2883 while (list)
2884 {
2885 if (is_attribute_p ("fast_interrupt", TREE_PURPOSE (list)))
2886 return 1;
2887 list = TREE_CHAIN (list);
2888 }
38b2d076
DD
2889 return 0;
2890}
2891
2892static tree
2893interrupt_handler (tree * node ATTRIBUTE_UNUSED,
2894 tree name ATTRIBUTE_UNUSED,
2895 tree args ATTRIBUTE_UNUSED,
2896 int flags ATTRIBUTE_UNUSED,
2897 bool * no_add_attrs ATTRIBUTE_UNUSED)
2898{
2899 return NULL_TREE;
2900}
2901
5abd2125
JS
2902/* Returns TRUE if given tree has the "function_vector" attribute. */
2903int
2904m32c_special_page_vector_p (tree func)
2905{
653e2568
DD
2906 tree list;
2907
5abd2125
JS
2908 if (TREE_CODE (func) != FUNCTION_DECL)
2909 return 0;
2910
653e2568 2911 list = M32C_ATTRIBUTES (func);
5abd2125
JS
2912 while (list)
2913 {
2914 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2915 return 1;
2916 list = TREE_CHAIN (list);
2917 }
2918 return 0;
2919}
2920
2921static tree
2922function_vector_handler (tree * node ATTRIBUTE_UNUSED,
2923 tree name ATTRIBUTE_UNUSED,
2924 tree args ATTRIBUTE_UNUSED,
2925 int flags ATTRIBUTE_UNUSED,
2926 bool * no_add_attrs ATTRIBUTE_UNUSED)
2927{
2928 if (TARGET_R8C)
2929 {
2930 /* The attribute is not supported for R8C target. */
2931 warning (OPT_Wattributes,
29d08eba
JM
2932 "%qE attribute is not supported for R8C target",
2933 name);
5abd2125
JS
2934 *no_add_attrs = true;
2935 }
2936 else if (TREE_CODE (*node) != FUNCTION_DECL)
2937 {
2938 /* The attribute must be applied to functions only. */
2939 warning (OPT_Wattributes,
29d08eba
JM
2940 "%qE attribute applies only to functions",
2941 name);
5abd2125
JS
2942 *no_add_attrs = true;
2943 }
2944 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
2945 {
2946 /* The argument must be a constant integer. */
2947 warning (OPT_Wattributes,
29d08eba
JM
2948 "%qE attribute argument not an integer constant",
2949 name);
5abd2125
JS
2950 *no_add_attrs = true;
2951 }
2952 else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
2953 || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
2954 {
2955 /* The argument value must be between 18 to 255. */
2956 warning (OPT_Wattributes,
29d08eba
JM
2957 "%qE attribute argument should be between 18 to 255",
2958 name);
5abd2125
JS
2959 *no_add_attrs = true;
2960 }
2961 return NULL_TREE;
2962}
2963
2964/* If the function is assigned the attribute 'function_vector', it
2965 returns the function vector number, otherwise returns zero. */
2966int
2967current_function_special_page_vector (rtx x)
2968{
2969 int num;
2970
2971 if ((GET_CODE(x) == SYMBOL_REF)
2972 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
2973 {
653e2568 2974 tree list;
5abd2125
JS
2975 tree t = SYMBOL_REF_DECL (x);
2976
2977 if (TREE_CODE (t) != FUNCTION_DECL)
2978 return 0;
2979
653e2568 2980 list = M32C_ATTRIBUTES (t);
5abd2125
JS
2981 while (list)
2982 {
2983 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2984 {
2985 num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
2986 return num;
2987 }
2988
2989 list = TREE_CHAIN (list);
2990 }
2991
2992 return 0;
2993 }
2994 else
2995 return 0;
2996}
2997
38b2d076
DD
2998#undef TARGET_ATTRIBUTE_TABLE
2999#define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
3000static const struct attribute_spec m32c_attribute_table[] = {
62d784f7
KT
3001 {"interrupt", 0, 0, false, false, false, interrupt_handler, false},
3002 {"bank_switch", 0, 0, false, false, false, interrupt_handler, false},
3003 {"fast_interrupt", 0, 0, false, false, false, interrupt_handler, false},
3004 {"function_vector", 1, 1, true, false, false, function_vector_handler,
3005 false},
3006 {0, 0, 0, 0, 0, 0, 0, false}
38b2d076
DD
3007};
3008
3009#undef TARGET_COMP_TYPE_ATTRIBUTES
3010#define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
3011static int
3101faab
KG
3012m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED,
3013 const_tree type2 ATTRIBUTE_UNUSED)
38b2d076
DD
3014{
3015 /* 0=incompatible 1=compatible 2=warning */
3016 return 1;
3017}
3018
3019#undef TARGET_INSERT_ATTRIBUTES
3020#define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
3021static void
3022m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
3023 tree * attr_ptr ATTRIBUTE_UNUSED)
3024{
f6052f86
DD
3025 unsigned addr;
3026 /* See if we need to make #pragma address variables volatile. */
3027
3028 if (TREE_CODE (node) == VAR_DECL)
3029 {
444d6efe 3030 const char *name = IDENTIFIER_POINTER (DECL_NAME (node));
f6052f86
DD
3031 if (m32c_get_pragma_address (name, &addr))
3032 {
3033 TREE_THIS_VOLATILE (node) = true;
3034 }
3035 }
3036}
3037
f6052f86 3038/* Hash table of pragma info. */
fb5c464a 3039static GTY(()) hash_map<nofree_string_hash, unsigned> *pragma_htab;
f6052f86
DD
3040
3041void
3042m32c_note_pragma_address (const char *varname, unsigned address)
3043{
f6052f86 3044 if (!pragma_htab)
fb5c464a 3045 pragma_htab = hash_map<nofree_string_hash, unsigned>::create_ggc (31);
f6052f86 3046
2a22f99c
TS
3047 const char *name = ggc_strdup (varname);
3048 unsigned int *slot = &pragma_htab->get_or_insert (name);
3049 *slot = address;
f6052f86
DD
3050}
3051
3052static bool
3053m32c_get_pragma_address (const char *varname, unsigned *address)
3054{
f6052f86
DD
3055 if (!pragma_htab)
3056 return false;
3057
2a22f99c
TS
3058 unsigned int *slot = pragma_htab->get (varname);
3059 if (slot)
f6052f86 3060 {
2a22f99c 3061 *address = *slot;
f6052f86
DD
3062 return true;
3063 }
3064 return false;
3065}
3066
3067void
444d6efe
JR
3068m32c_output_aligned_common (FILE *stream, tree decl ATTRIBUTE_UNUSED,
3069 const char *name,
f6052f86
DD
3070 int size, int align, int global)
3071{
3072 unsigned address;
3073
3074 if (m32c_get_pragma_address (name, &address))
3075 {
3076 /* We never output these as global. */
3077 assemble_name (stream, name);
3078 fprintf (stream, " = 0x%04x\n", address);
3079 return;
3080 }
3081 if (!global)
3082 {
3083 fprintf (stream, "\t.local\t");
3084 assemble_name (stream, name);
3085 fprintf (stream, "\n");
3086 }
3087 fprintf (stream, "\t.comm\t");
3088 assemble_name (stream, name);
3089 fprintf (stream, ",%u,%u\n", size, align / BITS_PER_UNIT);
38b2d076
DD
3090}
3091
3092/* Predicates */
3093
f9b89438 3094/* This is a list of legal subregs of hard regs. */
67fc44cb
DD
3095static const struct {
3096 unsigned char outer_mode_size;
3097 unsigned char inner_mode_size;
3098 unsigned char byte_mask;
3099 unsigned char legal_when;
f9b89438 3100 unsigned int regno;
f9b89438 3101} legal_subregs[] = {
67fc44cb
DD
3102 {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
3103 {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
3104 {1, 2, 0x01, 1, A0_REGNO},
3105 {1, 2, 0x01, 1, A1_REGNO},
f9b89438 3106
67fc44cb
DD
3107 {1, 4, 0x01, 1, A0_REGNO},
3108 {1, 4, 0x01, 1, A1_REGNO},
f9b89438 3109
67fc44cb
DD
3110 {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
3111 {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
3112 {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
3113 {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
3114 {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
f9b89438 3115
67fc44cb 3116 {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
f9b89438
DD
3117};
3118
3119/* Returns TRUE if OP is a subreg of a hard reg which we don't
f6052f86 3120 support. We also bail on MEMs with illegal addresses. */
f9b89438
DD
3121bool
3122m32c_illegal_subreg_p (rtx op)
3123{
f9b89438
DD
3124 int offset;
3125 unsigned int i;
ef4bddc2 3126 machine_mode src_mode, dest_mode;
f9b89438 3127
f6052f86
DD
3128 if (GET_CODE (op) == MEM
3129 && ! m32c_legitimate_address_p (Pmode, XEXP (op, 0), false))
3130 {
3131 return true;
3132 }
3133
f9b89438
DD
3134 if (GET_CODE (op) != SUBREG)
3135 return false;
3136
3137 dest_mode = GET_MODE (op);
3138 offset = SUBREG_BYTE (op);
3139 op = SUBREG_REG (op);
3140 src_mode = GET_MODE (op);
3141
3142 if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
3143 return false;
3144 if (GET_CODE (op) != REG)
3145 return false;
3146 if (REGNO (op) >= MEM0_REGNO)
3147 return false;
3148
3149 offset = (1 << offset);
3150
67fc44cb 3151 for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
f9b89438
DD
3152 if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
3153 && legal_subregs[i].regno == REGNO (op)
3154 && legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
3155 && legal_subregs[i].byte_mask & offset)
3156 {
3157 switch (legal_subregs[i].legal_when)
3158 {
3159 case 1:
3160 return false;
3161 case 16:
3162 if (TARGET_A16)
3163 return false;
3164 break;
3165 case 24:
3166 if (TARGET_A24)
3167 return false;
3168 break;
3169 }
3170 }
3171 return true;
3172}
3173
38b2d076
DD
3174/* Returns TRUE if we support a move between the first two operands.
3175 At the moment, we just want to discourage mem to mem moves until
3176 after reload, because reload has a hard time with our limited
3177 number of address registers, and we can get into a situation where
3178 we need three of them when we only have two. */
3179bool
ef4bddc2 3180m32c_mov_ok (rtx * operands, machine_mode mode ATTRIBUTE_UNUSED)
38b2d076
DD
3181{
3182 rtx op0 = operands[0];
3183 rtx op1 = operands[1];
3184
3185 if (TARGET_A24)
3186 return true;
3187
3188#define DEBUG_MOV_OK 0
3189#if DEBUG_MOV_OK
3190 fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
3191 debug_rtx (op0);
3192 debug_rtx (op1);
3193#endif
3194
3195 if (GET_CODE (op0) == SUBREG)
3196 op0 = XEXP (op0, 0);
3197 if (GET_CODE (op1) == SUBREG)
3198 op1 = XEXP (op1, 0);
3199
3200 if (GET_CODE (op0) == MEM
3201 && GET_CODE (op1) == MEM
3202 && ! reload_completed)
3203 {
3204#if DEBUG_MOV_OK
3205 fprintf (stderr, " - no, mem to mem\n");
3206#endif
3207 return false;
3208 }
3209
3210#if DEBUG_MOV_OK
3211 fprintf (stderr, " - ok\n");
3212#endif
3213 return true;
3214}
3215
ff485e71
DD
3216/* Returns TRUE if two consecutive HImode mov instructions, generated
3217 for moving an immediate double data to a double data type variable
3218 location, can be combined into single SImode mov instruction. */
3219bool
55356334 3220m32c_immd_dbl_mov (rtx * operands ATTRIBUTE_UNUSED,
ef4bddc2 3221 machine_mode mode ATTRIBUTE_UNUSED)
ff485e71 3222{
55356334
RS
3223 /* ??? This relied on the now-defunct MEM_SCALAR and MEM_IN_STRUCT_P
3224 flags. */
ff485e71
DD
3225 return false;
3226}
3227
38b2d076
DD
3228/* Expanders */
3229
3230/* Subregs are non-orthogonal for us, because our registers are all
3231 different sizes. */
3232static rtx
ef4bddc2
RS
3233m32c_subreg (machine_mode outer,
3234 rtx x, machine_mode inner, int byte)
38b2d076
DD
3235{
3236 int r, nr = -1;
3237
3238 /* Converting MEMs to different types that are the same size, we
3239 just rewrite them. */
3240 if (GET_CODE (x) == SUBREG
3241 && SUBREG_BYTE (x) == 0
3242 && GET_CODE (SUBREG_REG (x)) == MEM
3243 && (GET_MODE_SIZE (GET_MODE (x))
3244 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
3245 {
3246 rtx oldx = x;
3247 x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
3248 MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
3249 }
3250
3251 /* Push/pop get done as smaller push/pops. */
3252 if (GET_CODE (x) == MEM
3253 && (GET_CODE (XEXP (x, 0)) == PRE_DEC
3254 || GET_CODE (XEXP (x, 0)) == POST_INC))
3255 return gen_rtx_MEM (outer, XEXP (x, 0));
3256 if (GET_CODE (x) == SUBREG
3257 && GET_CODE (XEXP (x, 0)) == MEM
3258 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
3259 || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
3260 return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
3261
3262 if (GET_CODE (x) != REG)
146456c1
DD
3263 {
3264 rtx r = simplify_gen_subreg (outer, x, inner, byte);
3265 if (GET_CODE (r) == SUBREG
3266 && GET_CODE (x) == MEM
3267 && MEM_VOLATILE_P (x))
3268 {
3269 /* Volatile MEMs don't get simplified, but we need them to
3270 be. We are little endian, so the subreg byte is the
3271 offset. */
91140cd3 3272 r = adjust_address_nv (x, outer, byte);
146456c1
DD
3273 }
3274 return r;
3275 }
38b2d076
DD
3276
3277 r = REGNO (x);
3278 if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
3279 return simplify_gen_subreg (outer, x, inner, byte);
3280
3281 if (IS_MEM_REGNO (r))
3282 return simplify_gen_subreg (outer, x, inner, byte);
3283
3284 /* This is where the complexities of our register layout are
3285 described. */
3286 if (byte == 0)
3287 nr = r;
3288 else if (outer == HImode)
3289 {
3290 if (r == R0_REGNO && byte == 2)
3291 nr = R2_REGNO;
3292 else if (r == R0_REGNO && byte == 4)
3293 nr = R1_REGNO;
3294 else if (r == R0_REGNO && byte == 6)
3295 nr = R3_REGNO;
3296 else if (r == R1_REGNO && byte == 2)
3297 nr = R3_REGNO;
3298 else if (r == A0_REGNO && byte == 2)
3299 nr = A1_REGNO;
3300 }
3301 else if (outer == SImode)
3302 {
3303 if (r == R0_REGNO && byte == 0)
3304 nr = R0_REGNO;
3305 else if (r == R0_REGNO && byte == 4)
3306 nr = R1_REGNO;
3307 }
3308 if (nr == -1)
3309 {
3310 fprintf (stderr, "m32c_subreg %s %s %d\n",
3311 mode_name[outer], mode_name[inner], byte);
3312 debug_rtx (x);
3313 gcc_unreachable ();
3314 }
3315 return gen_rtx_REG (outer, nr);
3316}
3317
3318/* Used to emit move instructions. We split some moves,
3319 and avoid mem-mem moves. */
3320int
ef4bddc2 3321m32c_prepare_move (rtx * operands, machine_mode mode)
38b2d076 3322{
5fd5d713
DD
3323 if (far_addr_space_p (operands[0])
3324 && CONSTANT_P (operands[1]))
3325 {
3326 operands[1] = force_reg (GET_MODE (operands[0]), operands[1]);
3327 }
38b2d076
DD
3328 if (TARGET_A16 && mode == PSImode)
3329 return m32c_split_move (operands, mode, 1);
3330 if ((GET_CODE (operands[0]) == MEM)
3331 && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
3332 {
3333 rtx pmv = XEXP (operands[0], 0);
3334 rtx dest_reg = XEXP (pmv, 0);
3335 rtx dest_mod = XEXP (pmv, 1);
3336
f7df4a84 3337 emit_insn (gen_rtx_SET (dest_reg, dest_mod));
38b2d076
DD
3338 operands[0] = gen_rtx_MEM (mode, dest_reg);
3339 }
b3a13419 3340 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
38b2d076
DD
3341 operands[1] = copy_to_mode_reg (mode, operands[1]);
3342 return 0;
3343}
3344
3345#define DEBUG_SPLIT 0
3346
3347/* Returns TRUE if the given PSImode move should be split. We split
3348 for all r8c/m16c moves, since it doesn't support them, and for
3349 POP.L as we can only *push* SImode. */
3350int
3351m32c_split_psi_p (rtx * operands)
3352{
3353#if DEBUG_SPLIT
3354 fprintf (stderr, "\nm32c_split_psi_p\n");
3355 debug_rtx (operands[0]);
3356 debug_rtx (operands[1]);
3357#endif
3358 if (TARGET_A16)
3359 {
3360#if DEBUG_SPLIT
3361 fprintf (stderr, "yes, A16\n");
3362#endif
3363 return 1;
3364 }
3365 if (GET_CODE (operands[1]) == MEM
3366 && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3367 {
3368#if DEBUG_SPLIT
3369 fprintf (stderr, "yes, pop.l\n");
3370#endif
3371 return 1;
3372 }
3373#if DEBUG_SPLIT
3374 fprintf (stderr, "no, default\n");
3375#endif
3376 return 0;
3377}
3378
3379/* Split the given move. SPLIT_ALL is 0 if splitting is optional
3380 (define_expand), 1 if it is not optional (define_insn_and_split),
3381 and 3 for define_split (alternate api). */
3382int
ef4bddc2 3383m32c_split_move (rtx * operands, machine_mode mode, int split_all)
38b2d076
DD
3384{
3385 rtx s[4], d[4];
3386 int parts, si, di, rev = 0;
3387 int rv = 0, opi = 2;
ef4bddc2 3388 machine_mode submode = HImode;
38b2d076
DD
3389 rtx *ops, local_ops[10];
3390
3391 /* define_split modifies the existing operands, but the other two
3392 emit new insns. OPS is where we store the operand pairs, which
3393 we emit later. */
3394 if (split_all == 3)
3395 ops = operands;
3396 else
3397 ops = local_ops;
3398
3399 /* Else HImode. */
3400 if (mode == DImode)
3401 submode = SImode;
3402
3403 /* Before splitting mem-mem moves, force one operand into a
3404 register. */
b3a13419 3405 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
38b2d076
DD
3406 {
3407#if DEBUG0
3408 fprintf (stderr, "force_reg...\n");
3409 debug_rtx (operands[1]);
3410#endif
3411 operands[1] = force_reg (mode, operands[1]);
3412#if DEBUG0
3413 debug_rtx (operands[1]);
3414#endif
3415 }
3416
3417 parts = 2;
3418
3419#if DEBUG_SPLIT
b3a13419
ILT
3420 fprintf (stderr, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3421 split_all);
38b2d076
DD
3422 debug_rtx (operands[0]);
3423 debug_rtx (operands[1]);
3424#endif
3425
eb5f0c07
DD
3426 /* Note that split_all is not used to select the api after this
3427 point, so it's safe to set it to 3 even with define_insn. */
3428 /* None of the chips can move SI operands to sp-relative addresses,
3429 so we always split those. */
03dd17b1 3430 if (satisfies_constraint_Ss (operands[0]))
eb5f0c07
DD
3431 split_all = 3;
3432
5fd5d713
DD
3433 if (TARGET_A16
3434 && (far_addr_space_p (operands[0])
3435 || far_addr_space_p (operands[1])))
3436 split_all |= 1;
3437
38b2d076
DD
3438 /* We don't need to split these. */
3439 if (TARGET_A24
3440 && split_all != 3
3441 && (mode == SImode || mode == PSImode)
3442 && !(GET_CODE (operands[1]) == MEM
3443 && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3444 return 0;
3445
3446 /* First, enumerate the subregs we'll be dealing with. */
3447 for (si = 0; si < parts; si++)
3448 {
3449 d[si] =
3450 m32c_subreg (submode, operands[0], mode,
3451 si * GET_MODE_SIZE (submode));
3452 s[si] =
3453 m32c_subreg (submode, operands[1], mode,
3454 si * GET_MODE_SIZE (submode));
3455 }
3456
3457 /* Split pushes by emitting a sequence of smaller pushes. */
3458 if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3459 {
3460 for (si = parts - 1; si >= 0; si--)
3461 {
3462 ops[opi++] = gen_rtx_MEM (submode,
3463 gen_rtx_PRE_DEC (Pmode,
3464 gen_rtx_REG (Pmode,
3465 SP_REGNO)));
3466 ops[opi++] = s[si];
3467 }
3468
3469 rv = 1;
3470 }
3471 /* Likewise for pops. */
3472 else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3473 {
3474 for (di = 0; di < parts; di++)
3475 {
3476 ops[opi++] = d[di];
3477 ops[opi++] = gen_rtx_MEM (submode,
3478 gen_rtx_POST_INC (Pmode,
3479 gen_rtx_REG (Pmode,
3480 SP_REGNO)));
3481 }
3482 rv = 1;
3483 }
3484 else if (split_all)
3485 {
3486 /* if d[di] == s[si] for any di < si, we'll early clobber. */
3487 for (di = 0; di < parts - 1; di++)
3488 for (si = di + 1; si < parts; si++)
3489 if (reg_mentioned_p (d[di], s[si]))
3490 rev = 1;
3491
3492 if (rev)
3493 for (si = 0; si < parts; si++)
3494 {
3495 ops[opi++] = d[si];
3496 ops[opi++] = s[si];
3497 }
3498 else
3499 for (si = parts - 1; si >= 0; si--)
3500 {
3501 ops[opi++] = d[si];
3502 ops[opi++] = s[si];
3503 }
3504 rv = 1;
3505 }
3506 /* Now emit any moves we may have accumulated. */
3507 if (rv && split_all != 3)
3508 {
3509 int i;
3510 for (i = 2; i < opi; i += 2)
3511 emit_move_insn (ops[i], ops[i + 1]);
3512 }
3513 return rv;
3514}
3515
07127a0a
DD
3516/* The m32c has a number of opcodes that act like memcpy, strcmp, and
3517 the like. For the R8C they expect one of the addresses to be in
3518 R1L:An so we need to arrange for that. Otherwise, it's just a
3519 matter of picking out the operands we want and emitting the right
3520 pattern for them. All these expanders, which correspond to
3521 patterns in blkmov.md, must return nonzero if they expand the insn,
3522 or zero if they should FAIL. */
3523
3524/* This is a memset() opcode. All operands are implied, so we need to
3525 arrange for them to be in the right registers. The opcode wants
3526 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3527 the count (HI), and $2 the value (QI). */
3528int
3529m32c_expand_setmemhi(rtx *operands)
3530{
3531 rtx desta, count, val;
3532 rtx desto, counto;
3533
3534 desta = XEXP (operands[0], 0);
3535 count = operands[1];
3536 val = operands[2];
3537
3538 desto = gen_reg_rtx (Pmode);
3539 counto = gen_reg_rtx (HImode);
3540
3541 if (GET_CODE (desta) != REG
3542 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3543 desta = copy_to_mode_reg (Pmode, desta);
3544
3545 /* This looks like an arbitrary restriction, but this is by far the
3546 most common case. For counts 8..14 this actually results in
3547 smaller code with no speed penalty because the half-sized
3548 constant can be loaded with a shorter opcode. */
3549 if (GET_CODE (count) == CONST_INT
3550 && GET_CODE (val) == CONST_INT
3551 && ! (INTVAL (count) & 1)
3552 && (INTVAL (count) > 1)
3553 && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3554 {
3555 unsigned v = INTVAL (val) & 0xff;
3556 v = v | (v << 8);
3557 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3558 val = copy_to_mode_reg (HImode, GEN_INT (v));
3559 if (TARGET_A16)
3560 emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3561 else
3562 emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3563 return 1;
3564 }
3565
3566 /* This is the generalized memset() case. */
3567 if (GET_CODE (val) != REG
3568 || REGNO (val) < FIRST_PSEUDO_REGISTER)
3569 val = copy_to_mode_reg (QImode, val);
3570
3571 if (GET_CODE (count) != REG
3572 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3573 count = copy_to_mode_reg (HImode, count);
3574
3575 if (TARGET_A16)
3576 emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3577 else
3578 emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3579
3580 return 1;
3581}
3582
3583/* This is a memcpy() opcode. All operands are implied, so we need to
3584 arrange for them to be in the right registers. The opcode wants
3585 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3586 is the source (MEM:BLK), and $2 the count (HI). */
3587int
3588m32c_expand_movmemhi(rtx *operands)
3589{
3590 rtx desta, srca, count;
3591 rtx desto, srco, counto;
3592
3593 desta = XEXP (operands[0], 0);
3594 srca = XEXP (operands[1], 0);
3595 count = operands[2];
3596
3597 desto = gen_reg_rtx (Pmode);
3598 srco = gen_reg_rtx (Pmode);
3599 counto = gen_reg_rtx (HImode);
3600
3601 if (GET_CODE (desta) != REG
3602 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3603 desta = copy_to_mode_reg (Pmode, desta);
3604
3605 if (GET_CODE (srca) != REG
3606 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3607 srca = copy_to_mode_reg (Pmode, srca);
3608
3609 /* Similar to setmem, but we don't need to check the value. */
3610 if (GET_CODE (count) == CONST_INT
3611 && ! (INTVAL (count) & 1)
3612 && (INTVAL (count) > 1))
3613 {
3614 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3615 if (TARGET_A16)
3616 emit_insn (gen_movmemhi_whi_op (desto, srco, counto, desta, srca, count));
3617 else
3618 emit_insn (gen_movmemhi_wpsi_op (desto, srco, counto, desta, srca, count));
3619 return 1;
3620 }
3621
3622 /* This is the generalized memset() case. */
3623 if (GET_CODE (count) != REG
3624 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3625 count = copy_to_mode_reg (HImode, count);
3626
3627 if (TARGET_A16)
3628 emit_insn (gen_movmemhi_bhi_op (desto, srco, counto, desta, srca, count));
3629 else
3630 emit_insn (gen_movmemhi_bpsi_op (desto, srco, counto, desta, srca, count));
3631
3632 return 1;
3633}
3634
3635/* This is a stpcpy() opcode. $0 is the destination (MEM:BLK) after
3636 the copy, which should point to the NUL at the end of the string,
3637 $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3638 Since our opcode leaves the destination pointing *after* the NUL,
3639 we must emit an adjustment. */
3640int
3641m32c_expand_movstr(rtx *operands)
3642{
3643 rtx desta, srca;
3644 rtx desto, srco;
3645
3646 desta = XEXP (operands[1], 0);
3647 srca = XEXP (operands[2], 0);
3648
3649 desto = gen_reg_rtx (Pmode);
3650 srco = gen_reg_rtx (Pmode);
3651
3652 if (GET_CODE (desta) != REG
3653 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3654 desta = copy_to_mode_reg (Pmode, desta);
3655
3656 if (GET_CODE (srca) != REG
3657 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3658 srca = copy_to_mode_reg (Pmode, srca);
3659
3660 emit_insn (gen_movstr_op (desto, srco, desta, srca));
3661 /* desto ends up being a1, which allows this type of add through MOVA. */
3662 emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
3663
3664 return 1;
3665}
3666
3667/* This is a strcmp() opcode. $0 is the destination (HI) which holds
3668 <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3669 $2 is the other (MEM:BLK). We must do the comparison, and then
3670 convert the flags to a signed integer result. */
3671int
3672m32c_expand_cmpstr(rtx *operands)
3673{
3674 rtx src1a, src2a;
3675
3676 src1a = XEXP (operands[1], 0);
3677 src2a = XEXP (operands[2], 0);
3678
3679 if (GET_CODE (src1a) != REG
3680 || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
3681 src1a = copy_to_mode_reg (Pmode, src1a);
3682
3683 if (GET_CODE (src2a) != REG
3684 || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
3685 src2a = copy_to_mode_reg (Pmode, src2a);
3686
3687 emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
3688 emit_insn (gen_cond_to_int (operands[0]));
3689
3690 return 1;
3691}
3692
3693
23fed240
DD
3694typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
3695
3696static shift_gen_func
3697shift_gen_func_for (int mode, int code)
3698{
3699#define GFF(m,c,f) if (mode == m && code == c) return f
3700 GFF(QImode, ASHIFT, gen_ashlqi3_i);
3701 GFF(QImode, ASHIFTRT, gen_ashrqi3_i);
3702 GFF(QImode, LSHIFTRT, gen_lshrqi3_i);
3703 GFF(HImode, ASHIFT, gen_ashlhi3_i);
3704 GFF(HImode, ASHIFTRT, gen_ashrhi3_i);
3705 GFF(HImode, LSHIFTRT, gen_lshrhi3_i);
3706 GFF(PSImode, ASHIFT, gen_ashlpsi3_i);
3707 GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
3708 GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
3709 GFF(SImode, ASHIFT, TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
3710 GFF(SImode, ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
3711 GFF(SImode, LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
3712#undef GFF
07127a0a 3713 gcc_unreachable ();
23fed240
DD
3714}
3715
38b2d076
DD
3716/* The m32c only has one shift, but it takes a signed count. GCC
3717 doesn't want this, so we fake it by negating any shift count when
07127a0a
DD
3718 we're pretending to shift the other way. Also, the shift count is
3719 limited to -8..8. It's slightly better to use two shifts for 9..15
3720 than to load the count into r1h, so we do that too. */
38b2d076 3721int
23fed240 3722m32c_prepare_shift (rtx * operands, int scale, int shift_code)
38b2d076 3723{
ef4bddc2 3724 machine_mode mode = GET_MODE (operands[0]);
23fed240 3725 shift_gen_func func = shift_gen_func_for (mode, shift_code);
38b2d076 3726 rtx temp;
23fed240
DD
3727
3728 if (GET_CODE (operands[2]) == CONST_INT)
38b2d076 3729 {
23fed240
DD
3730 int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
3731 int count = INTVAL (operands[2]) * scale;
3732
3733 while (count > maxc)
3734 {
3735 temp = gen_reg_rtx (mode);
3736 emit_insn (func (temp, operands[1], GEN_INT (maxc)));
3737 operands[1] = temp;
3738 count -= maxc;
3739 }
3740 while (count < -maxc)
3741 {
3742 temp = gen_reg_rtx (mode);
3743 emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
3744 operands[1] = temp;
3745 count += maxc;
3746 }
3747 emit_insn (func (operands[0], operands[1], GEN_INT (count)));
3748 return 1;
38b2d076 3749 }
2e160056
DD
3750
3751 temp = gen_reg_rtx (QImode);
38b2d076 3752 if (scale < 0)
2e160056
DD
3753 /* The pattern has a NEG that corresponds to this. */
3754 emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
3755 else if (TARGET_A16 && mode == SImode)
3756 /* We do this because the code below may modify this, we don't
3757 want to modify the origin of this value. */
3758 emit_move_insn (temp, operands[2]);
38b2d076 3759 else
2e160056 3760 /* We'll only use it for the shift, no point emitting a move. */
38b2d076 3761 temp = operands[2];
2e160056 3762
16659fcf 3763 if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
2e160056
DD
3764 {
3765 /* The m16c has a limit of -16..16 for SI shifts, even when the
3766 shift count is in a register. Since there are so many targets
3767 of these shifts, it's better to expand the RTL here than to
3768 call a helper function.
3769
3770 The resulting code looks something like this:
3771
3772 cmp.b r1h,-16
3773 jge.b 1f
3774 shl.l -16,dest
3775 add.b r1h,16
3776 1f: cmp.b r1h,16
3777 jle.b 1f
3778 shl.l 16,dest
3779 sub.b r1h,16
3780 1f: shl.l r1h,dest
3781
3782 We take advantage of the fact that "negative" shifts are
3783 undefined to skip one of the comparisons. */
3784
3785 rtx count;
9b2ea071 3786 rtx tempvar;
e60365d3 3787 rtx_insn *insn;
2e160056 3788
16659fcf
DD
3789 emit_move_insn (operands[0], operands[1]);
3790
2e160056 3791 count = temp;
9b2ea071 3792 rtx_code_label *label = gen_label_rtx ();
2e160056
DD
3793 LABEL_NUSES (label) ++;
3794
833bf445
DD
3795 tempvar = gen_reg_rtx (mode);
3796
2e160056
DD
3797 if (shift_code == ASHIFT)
3798 {
3799 /* This is a left shift. We only need check positive counts. */
3800 emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
3801 count, GEN_INT (16), label));
833bf445
DD
3802 emit_insn (func (tempvar, operands[0], GEN_INT (8)));
3803 emit_insn (func (operands[0], tempvar, GEN_INT (8)));
2e160056
DD
3804 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
3805 emit_label_after (label, insn);
3806 }
3807 else
3808 {
3809 /* This is a right shift. We only need check negative counts. */
3810 emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
3811 count, GEN_INT (-16), label));
833bf445
DD
3812 emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
3813 emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
2e160056
DD
3814 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
3815 emit_label_after (label, insn);
3816 }
16659fcf
DD
3817 operands[1] = operands[0];
3818 emit_insn (func (operands[0], operands[0], count));
3819 return 1;
2e160056
DD
3820 }
3821
38b2d076
DD
3822 operands[2] = temp;
3823 return 0;
3824}
3825
12ea2512
DD
3826/* The m32c has a limited range of operations that work on PSImode
3827 values; we have to expand to SI, do the math, and truncate back to
3828 PSI. Yes, this is expensive, but hopefully gcc will learn to avoid
3829 those cases. */
3830void
3831m32c_expand_neg_mulpsi3 (rtx * operands)
3832{
3833 /* operands: a = b * i */
3834 rtx temp1; /* b as SI */
07127a0a
DD
3835 rtx scale /* i as SI */;
3836 rtx temp2; /* a*b as SI */
12ea2512
DD
3837
3838 temp1 = gen_reg_rtx (SImode);
3839 temp2 = gen_reg_rtx (SImode);
07127a0a
DD
3840 if (GET_CODE (operands[2]) != CONST_INT)
3841 {
3842 scale = gen_reg_rtx (SImode);
3843 emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
3844 }
3845 else
3846 scale = copy_to_mode_reg (SImode, operands[2]);
12ea2512
DD
3847
3848 emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
07127a0a
DD
3849 temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
3850 emit_insn (gen_truncsipsi2 (operands[0], temp2));
12ea2512
DD
3851}
3852
38b2d076
DD
3853/* Pattern Output Functions */
3854
07127a0a
DD
3855int
3856m32c_expand_movcc (rtx *operands)
3857{
3858 rtx rel = operands[1];
0166ff05 3859
07127a0a
DD
3860 if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
3861 return 1;
3862 if (GET_CODE (operands[2]) != CONST_INT
3863 || GET_CODE (operands[3]) != CONST_INT)
3864 return 1;
07127a0a
DD
3865 if (GET_CODE (rel) == NE)
3866 {
3867 rtx tmp = operands[2];
3868 operands[2] = operands[3];
3869 operands[3] = tmp;
f90b7a5a 3870 rel = gen_rtx_EQ (GET_MODE (rel), XEXP (rel, 0), XEXP (rel, 1));
07127a0a 3871 }
0166ff05 3872
0166ff05
DD
3873 emit_move_insn (operands[0],
3874 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
f90b7a5a 3875 rel,
0166ff05
DD
3876 operands[2],
3877 operands[3]));
07127a0a
DD
3878 return 0;
3879}
3880
3881/* Used for the "insv" pattern. Return nonzero to fail, else done. */
3882int
3883m32c_expand_insv (rtx *operands)
3884{
3885 rtx op0, src0, p;
3886 int mask;
3887
3888 if (INTVAL (operands[1]) != 1)
3889 return 1;
3890
9cb96754
N
3891 /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
3892 if (GET_CODE (operands[3]) != CONST_INT)
3893 return 1;
3894 if (INTVAL (operands[3]) != 0
3895 && INTVAL (operands[3]) != 1
3896 && INTVAL (operands[3]) != -1)
3897 return 1;
3898
07127a0a
DD
3899 mask = 1 << INTVAL (operands[2]);
3900
3901 op0 = operands[0];
3902 if (GET_CODE (op0) == SUBREG
3903 && SUBREG_BYTE (op0) == 0)
3904 {
3905 rtx sub = SUBREG_REG (op0);
3906 if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
3907 op0 = sub;
3908 }
3909
b3a13419 3910 if (!can_create_pseudo_p ()
07127a0a
DD
3911 || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
3912 src0 = op0;
3913 else
3914 {
3915 src0 = gen_reg_rtx (GET_MODE (op0));
3916 emit_move_insn (src0, op0);
3917 }
3918
3919 if (GET_MODE (op0) == HImode
3920 && INTVAL (operands[2]) >= 8
444d6efe 3921 && GET_CODE (op0) == MEM)
07127a0a
DD
3922 {
3923 /* We are little endian. */
0a81f074
RS
3924 rtx new_mem = gen_rtx_MEM (QImode, plus_constant (Pmode,
3925 XEXP (op0, 0), 1));
07127a0a
DD
3926 MEM_COPY_ATTRIBUTES (new_mem, op0);
3927 mask >>= 8;
3928 }
3929
8e4edce7
DD
3930 /* First, we generate a mask with the correct polarity. If we are
3931 storing a zero, we want an AND mask, so invert it. */
3932 if (INTVAL (operands[3]) == 0)
07127a0a 3933 {
16659fcf 3934 /* Storing a zero, use an AND mask */
07127a0a
DD
3935 if (GET_MODE (op0) == HImode)
3936 mask ^= 0xffff;
3937 else
3938 mask ^= 0xff;
3939 }
8e4edce7
DD
3940 /* Now we need to properly sign-extend the mask in case we need to
3941 fall back to an AND or OR opcode. */
07127a0a
DD
3942 if (GET_MODE (op0) == HImode)
3943 {
3944 if (mask & 0x8000)
3945 mask -= 0x10000;
3946 }
3947 else
3948 {
3949 if (mask & 0x80)
3950 mask -= 0x100;
3951 }
3952
3953 switch ( (INTVAL (operands[3]) ? 4 : 0)
3954 + ((GET_MODE (op0) == HImode) ? 2 : 0)
3955 + (TARGET_A24 ? 1 : 0))
3956 {
3957 case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
3958 case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
3959 case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
3960 case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
3961 case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
3962 case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
3963 case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
3964 case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
653e2568 3965 default: p = NULL_RTX; break; /* Not reached, but silences a warning. */
07127a0a
DD
3966 }
3967
3968 emit_insn (p);
3969 return 0;
3970}
3971
3972const char *
3973m32c_scc_pattern(rtx *operands, RTX_CODE code)
3974{
3975 static char buf[30];
3976 if (GET_CODE (operands[0]) == REG
3977 && REGNO (operands[0]) == R0_REGNO)
3978 {
3979 if (code == EQ)
3980 return "stzx\t#1,#0,r0l";
3981 if (code == NE)
3982 return "stzx\t#0,#1,r0l";
3983 }
3984 sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
3985 return buf;
3986}
3987
5abd2125
JS
3988/* Encode symbol attributes of a SYMBOL_REF into its
3989 SYMBOL_REF_FLAGS. */
3990static void
3991m32c_encode_section_info (tree decl, rtx rtl, int first)
3992{
3993 int extra_flags = 0;
3994
3995 default_encode_section_info (decl, rtl, first);
3996 if (TREE_CODE (decl) == FUNCTION_DECL
3997 && m32c_special_page_vector_p (decl))
3998
3999 extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
4000
4001 if (extra_flags)
4002 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
4003}
4004
38b2d076
DD
4005/* Returns TRUE if the current function is a leaf, and thus we can
4006 determine which registers an interrupt function really needs to
4007 save. The logic below is mostly about finding the insn sequence
4008 that's the function, versus any sequence that might be open for the
4009 current insn. */
4010static int
4011m32c_leaf_function_p (void)
4012{
38b2d076
DD
4013 int rv;
4014
614d5bd8 4015 push_topmost_sequence ();
38b2d076 4016 rv = leaf_function_p ();
614d5bd8 4017 pop_topmost_sequence ();
38b2d076
DD
4018 return rv;
4019}
4020
4021/* Returns TRUE if the current function needs to use the ENTER/EXIT
4022 opcodes. If the function doesn't need the frame base or stack
4023 pointer, it can use the simpler RTS opcode. */
4024static bool
4025m32c_function_needs_enter (void)
4026{
b32d5189 4027 rtx_insn *insn;
38b2d076
DD
4028 rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
4029 rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
4030
614d5bd8
AM
4031 for (insn = get_topmost_sequence ()->first; insn; insn = NEXT_INSN (insn))
4032 if (NONDEBUG_INSN_P (insn))
4033 {
4034 if (reg_mentioned_p (sp, insn))
4035 return true;
4036 if (reg_mentioned_p (fb, insn))
4037 return true;
4038 }
38b2d076
DD
4039 return false;
4040}
4041
4042/* Mark all the subexpressions of the PARALLEL rtx PAR as
4043 frame-related. Return PAR.
4044
4045 dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
4046 PARALLEL rtx other than the first if they do not have the
4047 FRAME_RELATED flag set on them. So this function is handy for
4048 marking up 'enter' instructions. */
4049static rtx
4050m32c_all_frame_related (rtx par)
4051{
4052 int len = XVECLEN (par, 0);
4053 int i;
4054
4055 for (i = 0; i < len; i++)
4056 F (XVECEXP (par, 0, i));
4057
4058 return par;
4059}
4060
4061/* Emits the prologue. See the frame layout comment earlier in this
4062 file. We can reserve up to 256 bytes with the ENTER opcode, beyond
4063 that we manually update sp. */
4064void
4065m32c_emit_prologue (void)
4066{
4067 int frame_size, extra_frame_size = 0, reg_save_size;
4068 int complex_prologue = 0;
4069
4070 cfun->machine->is_leaf = m32c_leaf_function_p ();
4071 if (interrupt_p (cfun->decl))
4072 {
4073 cfun->machine->is_interrupt = 1;
4074 complex_prologue = 1;
4075 }
65655f79
DD
4076 else if (bank_switch_p (cfun->decl))
4077 warning (OPT_Wattributes,
4078 "%<bank_switch%> has no effect on non-interrupt functions");
38b2d076
DD
4079
4080 reg_save_size = m32c_pushm_popm (PP_justcount);
4081
4082 if (interrupt_p (cfun->decl))
65655f79
DD
4083 {
4084 if (bank_switch_p (cfun->decl))
4085 emit_insn (gen_fset_b ());
4086 else if (cfun->machine->intr_pushm)
4087 emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
4088 }
38b2d076
DD
4089
4090 frame_size =
4091 m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
4092 if (frame_size == 0
38b2d076
DD
4093 && !m32c_function_needs_enter ())
4094 cfun->machine->use_rts = 1;
4095
ed1332ee
NC
4096 if (flag_stack_usage_info)
4097 current_function_static_stack_size = frame_size;
4098
38b2d076
DD
4099 if (frame_size > 254)
4100 {
4101 extra_frame_size = frame_size - 254;
4102 frame_size = 254;
4103 }
4104 if (cfun->machine->use_rts == 0)
4105 F (emit_insn (m32c_all_frame_related
4106 (TARGET_A16
fa9fd28a
RIL
4107 ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
4108 : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
38b2d076
DD
4109
4110 if (extra_frame_size)
4111 {
4112 complex_prologue = 1;
4113 if (TARGET_A16)
4114 F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
4115 gen_rtx_REG (HImode, SP_REGNO),
4116 GEN_INT (-extra_frame_size))));
4117 else
4118 F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
4119 gen_rtx_REG (PSImode, SP_REGNO),
4120 GEN_INT (-extra_frame_size))));
4121 }
4122
4123 complex_prologue += m32c_pushm_popm (PP_pushm);
4124
4125 /* This just emits a comment into the .s file for debugging. */
4126 if (complex_prologue)
4127 emit_insn (gen_prologue_end ());
4128}
4129
4130/* Likewise, for the epilogue. The only exception is that, for
4131 interrupts, we must manually unwind the frame as the REIT opcode
4132 doesn't do that. */
4133void
4134m32c_emit_epilogue (void)
4135{
f0679612
DD
4136 int popm_count = m32c_pushm_popm (PP_justcount);
4137
38b2d076 4138 /* This just emits a comment into the .s file for debugging. */
f0679612 4139 if (popm_count > 0 || cfun->machine->is_interrupt)
38b2d076
DD
4140 emit_insn (gen_epilogue_start ());
4141
f0679612
DD
4142 if (popm_count > 0)
4143 m32c_pushm_popm (PP_popm);
38b2d076
DD
4144
4145 if (cfun->machine->is_interrupt)
4146 {
ef4bddc2 4147 machine_mode spmode = TARGET_A16 ? HImode : PSImode;
38b2d076 4148
65655f79
DD
4149 /* REIT clears B flag and restores $fp for us, but we still
4150 have to fix up the stack. USE_RTS just means we didn't
4151 emit ENTER. */
4152 if (!cfun->machine->use_rts)
4153 {
4154 emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
4155 gen_rtx_REG (spmode, FP_REGNO));
4156 emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
4157 gen_rtx_REG (spmode, A0_REGNO));
4158 /* We can't just add this to the POPM because it would be in
4159 the wrong order, and wouldn't fix the stack if we're bank
4160 switching. */
4161 if (TARGET_A16)
4162 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
4163 else
4164 emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
4165 }
4166 if (!bank_switch_p (cfun->decl) && cfun->machine->intr_pushm)
4167 emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
4168
402f2db8
DD
4169 /* The FREIT (Fast REturn from InTerrupt) instruction should be
4170 generated only for M32C/M32CM targets (generate the REIT
4171 instruction otherwise). */
65655f79 4172 if (fast_interrupt_p (cfun->decl))
402f2db8
DD
4173 {
4174 /* Check if fast_attribute is set for M32C or M32CM. */
4175 if (TARGET_A24)
4176 {
4177 emit_jump_insn (gen_epilogue_freit ());
4178 }
4179 /* If fast_interrupt attribute is set for an R8C or M16C
4180 target ignore this attribute and generated REIT
4181 instruction. */
4182 else
4183 {
4184 warning (OPT_Wattributes,
4185 "%<fast_interrupt%> attribute directive ignored");
4186 emit_jump_insn (gen_epilogue_reit_16 ());
4187 }
4188 }
65655f79 4189 else if (TARGET_A16)
0e0642aa
RIL
4190 emit_jump_insn (gen_epilogue_reit_16 ());
4191 else
4192 emit_jump_insn (gen_epilogue_reit_24 ());
38b2d076
DD
4193 }
4194 else if (cfun->machine->use_rts)
4195 emit_jump_insn (gen_epilogue_rts ());
0e0642aa
RIL
4196 else if (TARGET_A16)
4197 emit_jump_insn (gen_epilogue_exitd_16 ());
38b2d076 4198 else
0e0642aa 4199 emit_jump_insn (gen_epilogue_exitd_24 ());
38b2d076
DD
4200}
4201
4202void
4203m32c_emit_eh_epilogue (rtx ret_addr)
4204{
4205 /* R0[R2] has the stack adjustment. R1[R3] has the address to
4206 return to. We have to fudge the stack, pop everything, pop SP
4207 (fudged), and return (fudged). This is actually easier to do in
4208 assembler, so punt to libgcc. */
4209 emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
c41c1387 4210 /* emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
38b2d076
DD
4211}
4212
16659fcf
DD
4213/* Indicate which flags must be properly set for a given conditional. */
4214static int
4215flags_needed_for_conditional (rtx cond)
4216{
4217 switch (GET_CODE (cond))
4218 {
4219 case LE:
4220 case GT:
4221 return FLAGS_OSZ;
4222 case LEU:
4223 case GTU:
4224 return FLAGS_ZC;
4225 case LT:
4226 case GE:
4227 return FLAGS_OS;
4228 case LTU:
4229 case GEU:
4230 return FLAGS_C;
4231 case EQ:
4232 case NE:
4233 return FLAGS_Z;
4234 default:
4235 return FLAGS_N;
4236 }
4237}
4238
4239#define DEBUG_CMP 0
4240
4241/* Returns true if a compare insn is redundant because it would only
4242 set flags that are already set correctly. */
4243static bool
84034c69 4244m32c_compare_redundant (rtx_insn *cmp, rtx *operands)
16659fcf
DD
4245{
4246 int flags_needed;
4247 int pflags;
84034c69
DM
4248 rtx_insn *prev;
4249 rtx pp, next;
444d6efe 4250 rtx op0, op1;
16659fcf
DD
4251#if DEBUG_CMP
4252 int prev_icode, i;
4253#endif
4254
4255 op0 = operands[0];
4256 op1 = operands[1];
16659fcf
DD
4257
4258#if DEBUG_CMP
4259 fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
4260 debug_rtx(cmp);
4261 for (i=0; i<2; i++)
4262 {
4263 fprintf(stderr, "operands[%d] = ", i);
4264 debug_rtx(operands[i]);
4265 }
4266#endif
4267
4268 next = next_nonnote_insn (cmp);
4269 if (!next || !INSN_P (next))
4270 {
4271#if DEBUG_CMP
4272 fprintf(stderr, "compare not followed by insn\n");
4273 debug_rtx(next);
4274#endif
4275 return false;
4276 }
4277 if (GET_CODE (PATTERN (next)) == SET
4278 && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
4279 {
4280 next = XEXP (XEXP (PATTERN (next), 1), 0);
4281 }
4282 else if (GET_CODE (PATTERN (next)) == SET)
4283 {
4284 /* If this is a conditional, flags_needed will be something
4285 other than FLAGS_N, which we test below. */
4286 next = XEXP (PATTERN (next), 1);
4287 }
4288 else
4289 {
4290#if DEBUG_CMP
4291 fprintf(stderr, "compare not followed by conditional\n");
4292 debug_rtx(next);
4293#endif
4294 return false;
4295 }
4296#if DEBUG_CMP
4297 fprintf(stderr, "conditional is: ");
4298 debug_rtx(next);
4299#endif
4300
4301 flags_needed = flags_needed_for_conditional (next);
4302 if (flags_needed == FLAGS_N)
4303 {
4304#if DEBUG_CMP
4305 fprintf(stderr, "compare not followed by conditional\n");
4306 debug_rtx(next);
4307#endif
4308 return false;
4309 }
4310
4311 /* Compare doesn't set overflow and carry the same way that
4312 arithmetic instructions do, so we can't replace those. */
4313 if (flags_needed & FLAGS_OC)
4314 return false;
4315
4316 prev = cmp;
4317 do {
4318 prev = prev_nonnote_insn (prev);
4319 if (!prev)
4320 {
4321#if DEBUG_CMP
4322 fprintf(stderr, "No previous insn.\n");
4323#endif
4324 return false;
4325 }
4326 if (!INSN_P (prev))
4327 {
4328#if DEBUG_CMP
4329 fprintf(stderr, "Previous insn is a non-insn.\n");
4330#endif
4331 return false;
4332 }
4333 pp = PATTERN (prev);
4334 if (GET_CODE (pp) != SET)
4335 {
4336#if DEBUG_CMP
4337 fprintf(stderr, "Previous insn is not a SET.\n");
4338#endif
4339 return false;
4340 }
4341 pflags = get_attr_flags (prev);
4342
4343 /* Looking up attributes of previous insns corrupted the recog
4344 tables. */
4345 INSN_UID (cmp) = -1;
4346 recog (PATTERN (cmp), cmp, 0);
4347
4348 if (pflags == FLAGS_N
4349 && reg_mentioned_p (op0, pp))
4350 {
4351#if DEBUG_CMP
4352 fprintf(stderr, "intermediate non-flags insn uses op:\n");
4353 debug_rtx(prev);
4354#endif
4355 return false;
4356 }
b3c5a409
DD
4357
4358 /* Check for comparisons against memory - between volatiles and
4359 aliases, we just can't risk this one. */
4360 if (GET_CODE (operands[0]) == MEM
4361 || GET_CODE (operands[0]) == MEM)
4362 {
4363#if DEBUG_CMP
4364 fprintf(stderr, "comparisons with memory:\n");
4365 debug_rtx(prev);
4366#endif
4367 return false;
4368 }
4369
4370 /* Check for PREV changing a register that's used to compute a
4371 value in CMP, even if it doesn't otherwise change flags. */
4372 if (GET_CODE (operands[0]) == REG
4373 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[0]))
4374 {
4375#if DEBUG_CMP
4376 fprintf(stderr, "sub-value affected, op0:\n");
4377 debug_rtx(prev);
4378#endif
4379 return false;
4380 }
4381 if (GET_CODE (operands[1]) == REG
4382 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[1]))
4383 {
4384#if DEBUG_CMP
4385 fprintf(stderr, "sub-value affected, op1:\n");
4386 debug_rtx(prev);
4387#endif
4388 return false;
4389 }
4390
16659fcf
DD
4391 } while (pflags == FLAGS_N);
4392#if DEBUG_CMP
4393 fprintf(stderr, "previous flag-setting insn:\n");
4394 debug_rtx(prev);
4395 debug_rtx(pp);
4396#endif
4397
4398 if (GET_CODE (pp) == SET
4399 && GET_CODE (XEXP (pp, 0)) == REG
4400 && REGNO (XEXP (pp, 0)) == FLG_REGNO
4401 && GET_CODE (XEXP (pp, 1)) == COMPARE)
4402 {
4403 /* Adjacent cbranches must have the same operands to be
4404 redundant. */
4405 rtx pop0 = XEXP (XEXP (pp, 1), 0);
4406 rtx pop1 = XEXP (XEXP (pp, 1), 1);
4407#if DEBUG_CMP
4408 fprintf(stderr, "adjacent cbranches\n");
4409 debug_rtx(pop0);
4410 debug_rtx(pop1);
4411#endif
4412 if (rtx_equal_p (op0, pop0)
4413 && rtx_equal_p (op1, pop1))
4414 return true;
4415#if DEBUG_CMP
4416 fprintf(stderr, "prev cmp not same\n");
4417#endif
4418 return false;
4419 }
4420
4421 /* Else the previous insn must be a SET, with either the source or
4422 dest equal to operands[0], and operands[1] must be zero. */
4423
4424 if (!rtx_equal_p (op1, const0_rtx))
4425 {
4426#if DEBUG_CMP
4427 fprintf(stderr, "operands[1] not const0_rtx\n");
4428#endif
4429 return false;
4430 }
4431 if (GET_CODE (pp) != SET)
4432 {
4433#if DEBUG_CMP
4434 fprintf (stderr, "pp not set\n");
4435#endif
4436 return false;
4437 }
4438 if (!rtx_equal_p (op0, SET_SRC (pp))
4439 && !rtx_equal_p (op0, SET_DEST (pp)))
4440 {
4441#if DEBUG_CMP
4442 fprintf(stderr, "operands[0] not found in set\n");
4443#endif
4444 return false;
4445 }
4446
4447#if DEBUG_CMP
4448 fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4449#endif
4450 if ((pflags & flags_needed) == flags_needed)
4451 return true;
4452
4453 return false;
4454}
4455
4456/* Return the pattern for a compare. This will be commented out if
4457 the compare is redundant, else a normal pattern is returned. Thus,
4458 the assembler output says where the compare would have been. */
4459char *
84034c69 4460m32c_output_compare (rtx_insn *insn, rtx *operands)
16659fcf 4461{
0a2aaacc 4462 static char templ[] = ";cmp.b\t%1,%0";
16659fcf
DD
4463 /* ^ 5 */
4464
0a2aaacc 4465 templ[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
16659fcf
DD
4466 if (m32c_compare_redundant (insn, operands))
4467 {
4468#if DEBUG_CMP
4469 fprintf(stderr, "cbranch: cmp not needed\n");
4470#endif
0a2aaacc 4471 return templ;
16659fcf
DD
4472 }
4473
4474#if DEBUG_CMP
b3c5a409 4475 fprintf(stderr, "cbranch: cmp needed: `%s'\n", templ + 1);
16659fcf 4476#endif
0a2aaacc 4477 return templ + 1;
16659fcf
DD
4478}
4479
5abd2125
JS
4480#undef TARGET_ENCODE_SECTION_INFO
4481#define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4482
b52b1749
AS
4483/* If the frame pointer isn't used, we detect it manually. But the
4484 stack pointer doesn't have as flexible addressing as the frame
4485 pointer, so we always assume we have it. */
4486
4487#undef TARGET_FRAME_POINTER_REQUIRED
4488#define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
4489
38b2d076
DD
4490/* The Global `targetm' Variable. */
4491
4492struct gcc_target targetm = TARGET_INITIALIZER;
4493
4494#include "gt-m32c.h"