]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/m32c/m32c.c
Use function_arg_info for TARGET_FUNCTION_ARG_ADVANCE
[thirdparty/gcc.git] / gcc / config / m32c / m32c.c
CommitLineData
38b2d076 1/* Target Code for R8C/M16C/M32C
a5544970 2 Copyright (C) 2005-2019 Free Software Foundation, Inc.
38b2d076
DD
3 Contributed by Red Hat.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
2f83c7d6 9 by the Free Software Foundation; either version 3, or (at your
38b2d076
DD
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
2f83c7d6
NC
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
38b2d076 20
8fcc61f8
RS
21#define IN_TARGET_CODE 1
22
38b2d076
DD
23#include "config.h"
24#include "system.h"
25#include "coretypes.h"
c7131fb2 26#include "backend.h"
e11c4407 27#include "target.h"
38b2d076 28#include "rtl.h"
e11c4407 29#include "tree.h"
2643d17f
ML
30#include "stringpool.h"
31#include "attribs.h"
c7131fb2 32#include "df.h"
4d0cdd0c 33#include "memmodel.h"
e11c4407
AM
34#include "tm_p.h"
35#include "optabs.h"
38b2d076 36#include "regs.h"
e11c4407
AM
37#include "emit-rtl.h"
38#include "recog.h"
39#include "diagnostic-core.h"
38b2d076
DD
40#include "output.h"
41#include "insn-attr.h"
42#include "flags.h"
38b2d076 43#include "reload.h"
d8a2d370
DN
44#include "stor-layout.h"
45#include "varasm.h"
46#include "calls.h"
36566b39 47#include "explow.h"
38b2d076 48#include "expr.h"
03dd17b1 49#include "tm-constrs.h"
9b2b7279 50#include "builtins.h"
38b2d076 51
994c5d85 52/* This file should be included last. */
d58627a0
RS
53#include "target-def.h"
54
38b2d076
DD
55/* Prototypes */
56
57/* Used by m32c_pushm_popm. */
58typedef enum
59{
60 PP_pushm,
61 PP_popm,
62 PP_justcount
63} Push_Pop_Type;
64
65655f79 65static bool m32c_function_needs_enter (void);
38b2d076 66static tree interrupt_handler (tree *, tree, tree, int, bool *);
5abd2125 67static tree function_vector_handler (tree *, tree, tree, int, bool *);
38b2d076 68static int interrupt_p (tree node);
65655f79
DD
69static int bank_switch_p (tree node);
70static int fast_interrupt_p (tree node);
71static int interrupt_p (tree node);
38b2d076 72static bool m32c_asm_integer (rtx, unsigned int, int);
3101faab 73static int m32c_comp_type_attributes (const_tree, const_tree);
38b2d076
DD
74static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
75static struct machine_function *m32c_init_machine_status (void);
76static void m32c_insert_attributes (tree, tree *);
ef4bddc2
RS
77static bool m32c_legitimate_address_p (machine_mode, rtx, bool);
78static bool m32c_addr_space_legitimate_address_p (machine_mode, rtx, bool, addr_space_t);
6783fdb7 79static rtx m32c_function_arg (cumulative_args_t, const function_arg_info &);
52090e4d
RS
80static bool m32c_pass_by_reference (cumulative_args_t,
81 const function_arg_info &);
6930c98c
RS
82static void m32c_function_arg_advance (cumulative_args_t,
83 const function_arg_info &);
ef4bddc2 84static unsigned int m32c_function_arg_boundary (machine_mode, const_tree);
38b2d076 85static int m32c_pushm_popm (Push_Pop_Type);
d5cc9181 86static bool m32c_strict_argument_naming (cumulative_args_t);
38b2d076 87static rtx m32c_struct_value_rtx (tree, int);
ef4bddc2 88static rtx m32c_subreg (machine_mode, rtx, machine_mode, int);
38b2d076 89static int need_to_save (int);
2a31793e 90static rtx m32c_function_value (const_tree, const_tree, bool);
ef4bddc2 91static rtx m32c_libcall_value (machine_mode, const_rtx);
2a31793e 92
f6052f86
DD
93/* Returns true if an address is specified, else false. */
94static bool m32c_get_pragma_address (const char *varname, unsigned *addr);
95
f939c3e6
RS
96static bool m32c_hard_regno_mode_ok (unsigned int, machine_mode);
97
5abd2125 98#define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
38b2d076
DD
99
100#define streq(a,b) (strcmp ((a), (b)) == 0)
101
102/* Internal support routines */
103
104/* Debugging statements are tagged with DEBUG0 only so that they can
105 be easily enabled individually, by replacing the '0' with '1' as
106 needed. */
107#define DEBUG0 0
108#define DEBUG1 1
109
110#if DEBUG0
f75e07bc 111#include "print-tree.h"
38b2d076
DD
112/* This is needed by some of the commented-out debug statements
113 below. */
114static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
115#endif
116static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
117
118/* These are all to support encode_pattern(). */
119static char pattern[30], *patternp;
120static GTY(()) rtx patternr[30];
121#define RTX_IS(x) (streq (pattern, x))
122
123/* Some macros to simplify the logic throughout this file. */
124#define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
125#define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
126
127#define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
128#define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
129
5fd5d713
DD
130static int
131far_addr_space_p (rtx x)
132{
133 if (GET_CODE (x) != MEM)
134 return 0;
135#if DEBUG0
136 fprintf(stderr, "\033[35mfar_addr_space: "); debug_rtx(x);
137 fprintf(stderr, " = %d\033[0m\n", MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR);
138#endif
139 return MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR;
140}
141
38b2d076
DD
142/* We do most RTX matching by converting the RTX into a string, and
143 using string compares. This vastly simplifies the logic in many of
144 the functions in this file.
145
146 On exit, pattern[] has the encoded string (use RTX_IS("...") to
147 compare it) and patternr[] has pointers to the nodes in the RTX
148 corresponding to each character in the encoded string. The latter
149 is mostly used by print_operand().
150
151 Unrecognized patterns have '?' in them; this shows up when the
152 assembler complains about syntax errors.
153*/
154
155static void
156encode_pattern_1 (rtx x)
157{
158 int i;
159
160 if (patternp == pattern + sizeof (pattern) - 2)
161 {
162 patternp[-1] = '?';
163 return;
164 }
165
166 patternr[patternp - pattern] = x;
167
168 switch (GET_CODE (x))
169 {
170 case REG:
171 *patternp++ = 'r';
172 break;
173 case SUBREG:
174 if (GET_MODE_SIZE (GET_MODE (x)) !=
175 GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
176 *patternp++ = 'S';
45d898e4
DD
177 if (GET_MODE (x) == PSImode
178 && GET_CODE (XEXP (x, 0)) == REG)
179 *patternp++ = 'S';
38b2d076
DD
180 encode_pattern_1 (XEXP (x, 0));
181 break;
182 case MEM:
183 *patternp++ = 'm';
0c57f4bf 184 /* FALLTHRU */
38b2d076
DD
185 case CONST:
186 encode_pattern_1 (XEXP (x, 0));
187 break;
5fd5d713
DD
188 case SIGN_EXTEND:
189 *patternp++ = '^';
190 *patternp++ = 'S';
191 encode_pattern_1 (XEXP (x, 0));
192 break;
193 case ZERO_EXTEND:
194 *patternp++ = '^';
195 *patternp++ = 'Z';
196 encode_pattern_1 (XEXP (x, 0));
197 break;
38b2d076
DD
198 case PLUS:
199 *patternp++ = '+';
200 encode_pattern_1 (XEXP (x, 0));
201 encode_pattern_1 (XEXP (x, 1));
202 break;
203 case PRE_DEC:
204 *patternp++ = '>';
205 encode_pattern_1 (XEXP (x, 0));
206 break;
207 case POST_INC:
208 *patternp++ = '<';
209 encode_pattern_1 (XEXP (x, 0));
210 break;
211 case LO_SUM:
212 *patternp++ = 'L';
213 encode_pattern_1 (XEXP (x, 0));
214 encode_pattern_1 (XEXP (x, 1));
215 break;
216 case HIGH:
217 *patternp++ = 'H';
218 encode_pattern_1 (XEXP (x, 0));
219 break;
220 case SYMBOL_REF:
221 *patternp++ = 's';
222 break;
223 case LABEL_REF:
224 *patternp++ = 'l';
225 break;
226 case CODE_LABEL:
227 *patternp++ = 'c';
228 break;
229 case CONST_INT:
230 case CONST_DOUBLE:
231 *patternp++ = 'i';
232 break;
233 case UNSPEC:
234 *patternp++ = 'u';
235 *patternp++ = '0' + XCINT (x, 1, UNSPEC);
236 for (i = 0; i < XVECLEN (x, 0); i++)
237 encode_pattern_1 (XVECEXP (x, 0, i));
238 break;
239 case USE:
240 *patternp++ = 'U';
241 break;
242 case PARALLEL:
243 *patternp++ = '|';
244 for (i = 0; i < XVECLEN (x, 0); i++)
245 encode_pattern_1 (XVECEXP (x, 0, i));
246 break;
247 case EXPR_LIST:
248 *patternp++ = 'E';
249 encode_pattern_1 (XEXP (x, 0));
250 if (XEXP (x, 1))
251 encode_pattern_1 (XEXP (x, 1));
252 break;
253 default:
254 *patternp++ = '?';
255#if DEBUG0
256 fprintf (stderr, "can't encode pattern %s\n",
257 GET_RTX_NAME (GET_CODE (x)));
258 debug_rtx (x);
38b2d076
DD
259#endif
260 break;
261 }
262}
263
264static void
265encode_pattern (rtx x)
266{
267 patternp = pattern;
268 encode_pattern_1 (x);
269 *patternp = 0;
270}
271
272/* Since register names indicate the mode they're used in, we need a
273 way to determine which name to refer to the register with. Called
274 by print_operand(). */
275
276static const char *
ef4bddc2 277reg_name_with_mode (int regno, machine_mode mode)
38b2d076
DD
278{
279 int mlen = GET_MODE_SIZE (mode);
280 if (regno == R0_REGNO && mlen == 1)
281 return "r0l";
282 if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
283 return "r2r0";
284 if (regno == R0_REGNO && mlen == 6)
285 return "r2r1r0";
286 if (regno == R0_REGNO && mlen == 8)
287 return "r3r1r2r0";
288 if (regno == R1_REGNO && mlen == 1)
289 return "r1l";
290 if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
291 return "r3r1";
292 if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
293 return "a1a0";
294 return reg_names[regno];
295}
296
297/* How many bytes a register uses on stack when it's pushed. We need
298 to know this because the push opcode needs to explicitly indicate
299 the size of the register, even though the name of the register
300 already tells it that. Used by m32c_output_reg_{push,pop}, which
301 is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}. */
302
303static int
304reg_push_size (int regno)
305{
306 switch (regno)
307 {
308 case R0_REGNO:
309 case R1_REGNO:
310 return 2;
311 case R2_REGNO:
312 case R3_REGNO:
313 case FLG_REGNO:
314 return 2;
315 case A0_REGNO:
316 case A1_REGNO:
317 case SB_REGNO:
318 case FB_REGNO:
319 case SP_REGNO:
320 if (TARGET_A16)
321 return 2;
322 else
323 return 3;
324 default:
325 gcc_unreachable ();
326 }
327}
328
38b2d076
DD
329/* Given two register classes, find the largest intersection between
330 them. If there is no intersection, return RETURNED_IF_EMPTY
331 instead. */
35bdbc69
AS
332static reg_class_t
333reduce_class (reg_class_t original_class, reg_class_t limiting_class,
334 reg_class_t returned_if_empty)
38b2d076 335{
35bdbc69
AS
336 HARD_REG_SET cc;
337 int i;
338 reg_class_t best = NO_REGS;
339 unsigned int best_size = 0;
38b2d076
DD
340
341 if (original_class == limiting_class)
342 return original_class;
343
35bdbc69
AS
344 cc = reg_class_contents[original_class];
345 AND_HARD_REG_SET (cc, reg_class_contents[limiting_class]);
38b2d076 346
38b2d076
DD
347 for (i = 0; i < LIM_REG_CLASSES; i++)
348 {
35bdbc69
AS
349 if (hard_reg_set_subset_p (reg_class_contents[i], cc))
350 if (best_size < reg_class_size[i])
38b2d076 351 {
35bdbc69
AS
352 best = (reg_class_t) i;
353 best_size = reg_class_size[i];
38b2d076
DD
354 }
355
356 }
357 if (best == NO_REGS)
358 return returned_if_empty;
359 return best;
360}
361
38b2d076
DD
362/* Used by m32c_register_move_cost to determine if a move is
363 impossibly expensive. */
0e607518 364static bool
ef4bddc2 365class_can_hold_mode (reg_class_t rclass, machine_mode mode)
38b2d076
DD
366{
367 /* Cache the results: 0=untested 1=no 2=yes */
368 static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
0e607518
AS
369
370 if (results[(int) rclass][mode] == 0)
38b2d076 371 {
0e607518 372 int r;
0a2aaacc 373 results[rclass][mode] = 1;
38b2d076 374 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
0e607518 375 if (in_hard_reg_set_p (reg_class_contents[(int) rclass], mode, r)
f939c3e6 376 && m32c_hard_regno_mode_ok (r, mode))
38b2d076 377 {
0e607518
AS
378 results[rclass][mode] = 2;
379 break;
38b2d076
DD
380 }
381 }
0e607518 382
38b2d076
DD
383#if DEBUG0
384 fprintf (stderr, "class %s can hold %s? %s\n",
0e607518 385 class_names[(int) rclass], mode_name[mode],
0a2aaacc 386 (results[rclass][mode] == 2) ? "yes" : "no");
38b2d076 387#endif
0e607518 388 return results[(int) rclass][mode] == 2;
38b2d076
DD
389}
390
391/* Run-time Target Specification. */
392
393/* Memregs are memory locations that gcc treats like general
394 registers, as there are a limited number of true registers and the
395 m32c families can use memory in most places that registers can be
396 used.
397
398 However, since memory accesses are more expensive than registers,
399 we allow the user to limit the number of memregs available, in
400 order to try to persuade gcc to try harder to use real registers.
401
45b86625 402 Memregs are provided by lib1funcs.S.
38b2d076
DD
403*/
404
38b2d076
DD
405int ok_to_change_target_memregs = TRUE;
406
f28f2337
AS
407/* Implements TARGET_OPTION_OVERRIDE. */
408
409#undef TARGET_OPTION_OVERRIDE
410#define TARGET_OPTION_OVERRIDE m32c_option_override
411
412static void
413m32c_option_override (void)
38b2d076 414{
f28f2337 415 /* We limit memregs to 0..16, and provide a default. */
bbfc9a8c 416 if (global_options_set.x_target_memregs)
38b2d076
DD
417 {
418 if (target_memregs < 0 || target_memregs > 16)
904f3daa 419 error ("invalid target memregs value %<%d%>", target_memregs);
38b2d076
DD
420 }
421 else
07127a0a 422 target_memregs = 16;
18b80268
DD
423
424 if (TARGET_A24)
425 flag_ivopts = 0;
0685e770
DD
426
427 /* This target defaults to strict volatile bitfields. */
36acc1a2 428 if (flag_strict_volatile_bitfields < 0 && abi_version_at_least(2))
0685e770 429 flag_strict_volatile_bitfields = 1;
d123bf41
DD
430
431 /* r8c/m16c have no 16-bit indirect call, so thunks are involved.
432 This is always worse than an absolute call. */
433 if (TARGET_A16)
434 flag_no_function_cse = 1;
a4403164
DD
435
436 /* This wants to put insns between compares and their jumps. */
437 /* FIXME: The right solution is to properly trace the flags register
438 values, but that is too much work for stage 4. */
439 flag_combine_stack_adjustments = 0;
d123bf41
DD
440}
441
442#undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
443#define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m32c_override_options_after_change
444
445static void
446m32c_override_options_after_change (void)
447{
448 if (TARGET_A16)
449 flag_no_function_cse = 1;
38b2d076
DD
450}
451
452/* Defining data structures for per-function information */
453
454/* The usual; we set up our machine_function data. */
455static struct machine_function *
456m32c_init_machine_status (void)
457{
766090c2 458 return ggc_cleared_alloc<machine_function> ();
38b2d076
DD
459}
460
461/* Implements INIT_EXPANDERS. We just set up to call the above
462 function. */
463void
464m32c_init_expanders (void)
465{
466 init_machine_status = m32c_init_machine_status;
467}
468
469/* Storage Layout */
470
38b2d076
DD
471/* Register Basics */
472
473/* Basic Characteristics of Registers */
474
475/* Whether a mode fits in a register is complex enough to warrant a
476 table. */
477static struct
478{
479 char qi_regs;
480 char hi_regs;
481 char pi_regs;
482 char si_regs;
483 char di_regs;
484} nregs_table[FIRST_PSEUDO_REGISTER] =
485{
486 { 1, 1, 2, 2, 4 }, /* r0 */
487 { 0, 1, 0, 0, 0 }, /* r2 */
488 { 1, 1, 2, 2, 0 }, /* r1 */
489 { 0, 1, 0, 0, 0 }, /* r3 */
490 { 0, 1, 1, 0, 0 }, /* a0 */
491 { 0, 1, 1, 0, 0 }, /* a1 */
492 { 0, 1, 1, 0, 0 }, /* sb */
493 { 0, 1, 1, 0, 0 }, /* fb */
494 { 0, 1, 1, 0, 0 }, /* sp */
495 { 1, 1, 1, 0, 0 }, /* pc */
496 { 0, 0, 0, 0, 0 }, /* fl */
497 { 1, 1, 1, 0, 0 }, /* ap */
498 { 1, 1, 2, 2, 4 }, /* mem0 */
499 { 1, 1, 2, 2, 4 }, /* mem1 */
500 { 1, 1, 2, 2, 4 }, /* mem2 */
501 { 1, 1, 2, 2, 4 }, /* mem3 */
502 { 1, 1, 2, 2, 4 }, /* mem4 */
503 { 1, 1, 2, 2, 0 }, /* mem5 */
504 { 1, 1, 2, 2, 0 }, /* mem6 */
505 { 1, 1, 0, 0, 0 }, /* mem7 */
506};
507
5efd84c5
NF
508/* Implements TARGET_CONDITIONAL_REGISTER_USAGE. We adjust the number
509 of available memregs, and select which registers need to be preserved
38b2d076
DD
510 across calls based on the chip family. */
511
5efd84c5
NF
512#undef TARGET_CONDITIONAL_REGISTER_USAGE
513#define TARGET_CONDITIONAL_REGISTER_USAGE m32c_conditional_register_usage
d6d17ae7 514void
38b2d076
DD
515m32c_conditional_register_usage (void)
516{
38b2d076
DD
517 int i;
518
01512446 519 if (target_memregs >= 0 && target_memregs <= 16)
38b2d076
DD
520 {
521 /* The command line option is bytes, but our "registers" are
522 16-bit words. */
65655f79 523 for (i = (target_memregs+1)/2; i < 8; i++)
38b2d076
DD
524 {
525 fixed_regs[MEM0_REGNO + i] = 1;
526 CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
527 }
528 }
529
530 /* M32CM and M32C preserve more registers across function calls. */
531 if (TARGET_A24)
532 {
533 call_used_regs[R1_REGNO] = 0;
534 call_used_regs[R2_REGNO] = 0;
535 call_used_regs[R3_REGNO] = 0;
536 call_used_regs[A0_REGNO] = 0;
537 call_used_regs[A1_REGNO] = 0;
538 }
539}
540
541/* How Values Fit in Registers */
542
c43f4279 543/* Implements TARGET_HARD_REGNO_NREGS. This is complicated by the fact that
38b2d076
DD
544 different registers are different sizes from each other, *and* may
545 be different sizes in different chip families. */
c43f4279
RS
546static unsigned int
547m32c_hard_regno_nregs_1 (unsigned int regno, machine_mode mode)
38b2d076
DD
548{
549 if (regno == FLG_REGNO && mode == CCmode)
550 return 1;
551 if (regno >= FIRST_PSEUDO_REGISTER)
552 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
553
554 if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
555 return (GET_MODE_SIZE (mode) + 1) / 2;
556
557 if (GET_MODE_SIZE (mode) <= 1)
558 return nregs_table[regno].qi_regs;
559 if (GET_MODE_SIZE (mode) <= 2)
560 return nregs_table[regno].hi_regs;
5fd5d713 561 if (regno == A0_REGNO && mode == SImode && TARGET_A16)
38b2d076
DD
562 return 2;
563 if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
564 return nregs_table[regno].pi_regs;
565 if (GET_MODE_SIZE (mode) <= 4)
566 return nregs_table[regno].si_regs;
567 if (GET_MODE_SIZE (mode) <= 8)
568 return nregs_table[regno].di_regs;
569 return 0;
570}
571
c43f4279
RS
572static unsigned int
573m32c_hard_regno_nregs (unsigned int regno, machine_mode mode)
b8a669d0 574{
c43f4279 575 unsigned int rv = m32c_hard_regno_nregs_1 (regno, mode);
b8a669d0
DD
576 return rv ? rv : 1;
577}
578
f939c3e6 579/* Implement TARGET_HARD_REGNO_MODE_OK. The above function does the work
38b2d076 580 already; just test its return value. */
f939c3e6
RS
581static bool
582m32c_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
38b2d076 583{
b8a669d0 584 return m32c_hard_regno_nregs_1 (regno, mode) != 0;
38b2d076
DD
585}
586
99e1629f 587/* Implement TARGET_MODES_TIEABLE_P. In general, modes aren't tieable since
38b2d076
DD
588 registers are all different sizes. However, since most modes are
589 bigger than our registers anyway, it's easier to implement this
590 function that way, leaving QImode as the only unique case. */
99e1629f 591static bool
ef4bddc2 592m32c_modes_tieable_p (machine_mode m1, machine_mode m2)
38b2d076
DD
593{
594 if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
595 return 1;
596
07127a0a 597#if 0
38b2d076
DD
598 if (m1 == QImode || m2 == QImode)
599 return 0;
07127a0a 600#endif
38b2d076
DD
601
602 return 1;
603}
604
605/* Register Classes */
606
607/* Implements REGNO_REG_CLASS. */
444d6efe 608enum reg_class
38b2d076
DD
609m32c_regno_reg_class (int regno)
610{
611 switch (regno)
612 {
613 case R0_REGNO:
614 return R0_REGS;
615 case R1_REGNO:
616 return R1_REGS;
617 case R2_REGNO:
618 return R2_REGS;
619 case R3_REGNO:
620 return R3_REGS;
621 case A0_REGNO:
22843acd 622 return A0_REGS;
38b2d076 623 case A1_REGNO:
22843acd 624 return A1_REGS;
38b2d076
DD
625 case SB_REGNO:
626 return SB_REGS;
627 case FB_REGNO:
628 return FB_REGS;
629 case SP_REGNO:
630 return SP_REGS;
631 case FLG_REGNO:
632 return FLG_REGS;
633 default:
634 if (IS_MEM_REGNO (regno))
635 return MEM_REGS;
636 return ALL_REGS;
637 }
638}
639
38b2d076
DD
640/* Implements REGNO_OK_FOR_BASE_P. */
641int
642m32c_regno_ok_for_base_p (int regno)
643{
644 if (regno == A0_REGNO
645 || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
646 return 1;
647 return 0;
648}
649
b05933f5 650/* Implements TARGET_PREFERRED_RELOAD_CLASS. In general, prefer general
38b2d076 651 registers of the appropriate size. */
b05933f5
AS
652
653#undef TARGET_PREFERRED_RELOAD_CLASS
654#define TARGET_PREFERRED_RELOAD_CLASS m32c_preferred_reload_class
655
656static reg_class_t
657m32c_preferred_reload_class (rtx x, reg_class_t rclass)
38b2d076 658{
b05933f5 659 reg_class_t newclass = rclass;
38b2d076 660
f75e07bc 661#if DEBUG0
38b2d076
DD
662 fprintf (stderr, "\npreferred_reload_class for %s is ",
663 class_names[rclass]);
664#endif
665 if (rclass == NO_REGS)
666 rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
667
0e607518 668 if (reg_classes_intersect_p (rclass, CR_REGS))
38b2d076
DD
669 {
670 switch (GET_MODE (x))
671 {
4e10a5a7 672 case E_QImode:
38b2d076
DD
673 newclass = HL_REGS;
674 break;
675 default:
676 /* newclass = HI_REGS; */
677 break;
678 }
679 }
680
681 else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
682 newclass = SI_REGS;
683 else if (GET_MODE_SIZE (GET_MODE (x)) > 4
b05933f5 684 && ! reg_class_subset_p (R03_REGS, rclass))
38b2d076
DD
685 newclass = DI_REGS;
686
687 rclass = reduce_class (rclass, newclass, rclass);
688
689 if (GET_MODE (x) == QImode)
690 rclass = reduce_class (rclass, HL_REGS, rclass);
691
f75e07bc 692#if DEBUG0
38b2d076
DD
693 fprintf (stderr, "%s\n", class_names[rclass]);
694 debug_rtx (x);
695
696 if (GET_CODE (x) == MEM
697 && GET_CODE (XEXP (x, 0)) == PLUS
698 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
699 fprintf (stderr, "Glorm!\n");
700#endif
701 return rclass;
702}
703
b05933f5
AS
704/* Implements TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
705
706#undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
707#define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS m32c_preferred_output_reload_class
708
709static reg_class_t
710m32c_preferred_output_reload_class (rtx x, reg_class_t rclass)
38b2d076
DD
711{
712 return m32c_preferred_reload_class (x, rclass);
713}
714
715/* Implements LIMIT_RELOAD_CLASS. We basically want to avoid using
716 address registers for reloads since they're needed for address
717 reloads. */
718int
ef4bddc2 719m32c_limit_reload_class (machine_mode mode, int rclass)
38b2d076 720{
f75e07bc 721#if DEBUG0
38b2d076
DD
722 fprintf (stderr, "limit_reload_class for %s: %s ->",
723 mode_name[mode], class_names[rclass]);
724#endif
725
726 if (mode == QImode)
727 rclass = reduce_class (rclass, HL_REGS, rclass);
728 else if (mode == HImode)
729 rclass = reduce_class (rclass, HI_REGS, rclass);
730 else if (mode == SImode)
731 rclass = reduce_class (rclass, SI_REGS, rclass);
732
733 if (rclass != A_REGS)
734 rclass = reduce_class (rclass, DI_REGS, rclass);
735
f75e07bc 736#if DEBUG0
38b2d076
DD
737 fprintf (stderr, " %s\n", class_names[rclass]);
738#endif
739 return rclass;
740}
741
742/* Implements SECONDARY_RELOAD_CLASS. QImode have to be reloaded in
743 r0 or r1, as those are the only real QImode registers. CR regs get
744 reloaded through appropriately sized general or address
745 registers. */
746int
ef4bddc2 747m32c_secondary_reload_class (int rclass, machine_mode mode, rtx x)
38b2d076
DD
748{
749 int cc = class_contents[rclass][0];
750#if DEBUG0
751 fprintf (stderr, "\nsecondary reload class %s %s\n",
752 class_names[rclass], mode_name[mode]);
753 debug_rtx (x);
754#endif
755 if (mode == QImode
756 && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
757 return QI_REGS;
0e607518 758 if (reg_classes_intersect_p (rclass, CR_REGS)
38b2d076
DD
759 && GET_CODE (x) == REG
760 && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
13a23442 761 return (TARGET_A16 || mode == HImode) ? HI_REGS : A_REGS;
38b2d076
DD
762 return NO_REGS;
763}
764
184866c5 765/* Implements TARGET_CLASS_LIKELY_SPILLED_P. A_REGS is needed for address
38b2d076 766 reloads. */
184866c5
AS
767
768#undef TARGET_CLASS_LIKELY_SPILLED_P
769#define TARGET_CLASS_LIKELY_SPILLED_P m32c_class_likely_spilled_p
770
771static bool
772m32c_class_likely_spilled_p (reg_class_t regclass)
38b2d076
DD
773{
774 if (regclass == A_REGS)
184866c5
AS
775 return true;
776
777 return (reg_class_size[(int) regclass] == 1);
38b2d076
DD
778}
779
c4831cff 780/* Implements TARGET_CLASS_MAX_NREGS. We calculate this according to its
38b2d076
DD
781 documented meaning, to avoid potential inconsistencies with actual
782 class definitions. */
c4831cff
AS
783
784#undef TARGET_CLASS_MAX_NREGS
785#define TARGET_CLASS_MAX_NREGS m32c_class_max_nregs
786
787static unsigned char
ef4bddc2 788m32c_class_max_nregs (reg_class_t regclass, machine_mode mode)
38b2d076 789{
c4831cff
AS
790 int rn;
791 unsigned char max = 0;
38b2d076
DD
792
793 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
c4831cff 794 if (TEST_HARD_REG_BIT (reg_class_contents[(int) regclass], rn))
38b2d076 795 {
c4831cff 796 unsigned char n = m32c_hard_regno_nregs (rn, mode);
38b2d076
DD
797 if (max < n)
798 max = n;
799 }
800 return max;
801}
802
0d803030 803/* Implements TARGET_CAN_CHANGE_MODE_CLASS. Only r0 and r1 can change to
38b2d076
DD
804 QI (r0l, r1l) because the chip doesn't support QI ops on other
805 registers (well, it does on a0/a1 but if we let gcc do that, reload
806 suffers). Otherwise, we allow changes to larger modes. */
0d803030
RS
807static bool
808m32c_can_change_mode_class (machine_mode from,
809 machine_mode to, reg_class_t rclass)
38b2d076 810{
db9c8397 811 int rn;
38b2d076 812#if DEBUG0
0d803030 813 fprintf (stderr, "can change from %s to %s in %s\n",
38b2d076
DD
814 mode_name[from], mode_name[to], class_names[rclass]);
815#endif
816
db9c8397
DD
817 /* If the larger mode isn't allowed in any of these registers, we
818 can't allow the change. */
819 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
820 if (class_contents[rclass][0] & (1 << rn))
f939c3e6 821 if (! m32c_hard_regno_mode_ok (rn, to))
0d803030 822 return false;
db9c8397 823
38b2d076 824 if (to == QImode)
0d803030 825 return (class_contents[rclass][0] & 0x1ffa) == 0;
38b2d076
DD
826
827 if (class_contents[rclass][0] & 0x0005 /* r0, r1 */
828 && GET_MODE_SIZE (from) > 1)
0d803030 829 return true;
38b2d076 830 if (GET_MODE_SIZE (from) > 2) /* all other regs */
0d803030 831 return true;
38b2d076 832
0d803030 833 return false;
38b2d076
DD
834}
835
836/* Helpers for the rest of the file. */
837/* TRUE if the rtx is a REG rtx for the given register. */
838#define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
839 && REGNO (rtx) == regno)
840/* TRUE if the rtx is a pseudo - specifically, one we can use as a
841 base register in address calculations (hence the "strict"
842 argument). */
843#define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
844 && (REGNO (rtx) == AP_REGNO \
845 || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
846
5fd5d713
DD
847#define A0_OR_PSEUDO(x) (IS_REG(x, A0_REGNO) || REGNO (x) >= FIRST_PSEUDO_REGISTER)
848
777e635f 849/* Implements matching for constraints (see next function too). 'S' is
38b2d076
DD
850 for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
851 call return values. */
03dd17b1
NF
852bool
853m32c_matches_constraint_p (rtx value, int constraint)
38b2d076
DD
854{
855 encode_pattern (value);
5fd5d713 856
03dd17b1
NF
857 switch (constraint) {
858 case CONSTRAINT_SF:
859 return (far_addr_space_p (value)
860 && ((RTX_IS ("mr")
861 && A0_OR_PSEUDO (patternr[1])
862 && GET_MODE (patternr[1]) == SImode)
863 || (RTX_IS ("m+^Sri")
864 && A0_OR_PSEUDO (patternr[4])
865 && GET_MODE (patternr[4]) == HImode)
866 || (RTX_IS ("m+^Srs")
867 && A0_OR_PSEUDO (patternr[4])
868 && GET_MODE (patternr[4]) == HImode)
869 || (RTX_IS ("m+^S+ris")
870 && A0_OR_PSEUDO (patternr[5])
871 && GET_MODE (patternr[5]) == HImode)
872 || RTX_IS ("ms")));
873 case CONSTRAINT_Sd:
38b2d076
DD
874 {
875 /* This is the common "src/dest" address */
876 rtx r;
877 if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
03dd17b1 878 return true;
38b2d076 879 if (RTX_IS ("ms") || RTX_IS ("m+si"))
03dd17b1 880 return true;
07127a0a
DD
881 if (RTX_IS ("m++rii"))
882 {
883 if (REGNO (patternr[3]) == FB_REGNO
884 && INTVAL (patternr[4]) == 0)
03dd17b1 885 return true;
07127a0a 886 }
38b2d076
DD
887 if (RTX_IS ("mr"))
888 r = patternr[1];
889 else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
890 r = patternr[2];
891 else
03dd17b1 892 return false;
38b2d076 893 if (REGNO (r) == SP_REGNO)
03dd17b1 894 return false;
38b2d076
DD
895 return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
896 }
03dd17b1 897 case CONSTRAINT_Sa:
38b2d076
DD
898 {
899 rtx r;
900 if (RTX_IS ("mr"))
901 r = patternr[1];
902 else if (RTX_IS ("m+ri"))
903 r = patternr[2];
904 else
03dd17b1 905 return false;
38b2d076
DD
906 return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
907 }
03dd17b1
NF
908 case CONSTRAINT_Si:
909 return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
910 case CONSTRAINT_Ss:
911 return ((RTX_IS ("mr")
912 && (IS_REG (patternr[1], SP_REGNO)))
913 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
914 case CONSTRAINT_Sf:
915 return ((RTX_IS ("mr")
916 && (IS_REG (patternr[1], FB_REGNO)))
917 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
918 case CONSTRAINT_Sb:
919 return ((RTX_IS ("mr")
920 && (IS_REG (patternr[1], SB_REGNO)))
921 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
922 case CONSTRAINT_Sp:
923 /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
924 return (RTX_IS ("mi")
925 && !(INTVAL (patternr[1]) & ~0x1fff));
926 case CONSTRAINT_S1:
927 return r1h_operand (value, QImode);
928 case CONSTRAINT_Rpa:
38b2d076 929 return GET_CODE (value) == PARALLEL;
03dd17b1
NF
930 default:
931 return false;
932 }
38b2d076
DD
933}
934
935/* STACK AND CALLING */
936
937/* Frame Layout */
938
939/* Implements RETURN_ADDR_RTX. Note that R8C and M16C push 24 bits
940 (yes, THREE bytes) onto the stack for the return address, but we
941 don't support pointers bigger than 16 bits on those chips. This
942 will likely wreak havoc with exception unwinding. FIXME. */
943rtx
944m32c_return_addr_rtx (int count)
945{
ef4bddc2 946 machine_mode mode;
38b2d076
DD
947 int offset;
948 rtx ra_mem;
949
950 if (count)
951 return NULL_RTX;
952 /* we want 2[$fb] */
953
954 if (TARGET_A24)
955 {
80b093df
DD
956 /* It's four bytes */
957 mode = PSImode;
38b2d076
DD
958 offset = 4;
959 }
960 else
961 {
962 /* FIXME: it's really 3 bytes */
963 mode = HImode;
964 offset = 2;
965 }
966
967 ra_mem =
0a81f074
RS
968 gen_rtx_MEM (mode, plus_constant (Pmode, gen_rtx_REG (Pmode, FP_REGNO),
969 offset));
38b2d076
DD
970 return copy_to_mode_reg (mode, ra_mem);
971}
972
973/* Implements INCOMING_RETURN_ADDR_RTX. See comment above. */
974rtx
975m32c_incoming_return_addr_rtx (void)
976{
977 /* we want [sp] */
978 return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
979}
980
981/* Exception Handling Support */
982
983/* Implements EH_RETURN_DATA_REGNO. Choose registers able to hold
984 pointers. */
985int
986m32c_eh_return_data_regno (int n)
987{
988 switch (n)
989 {
990 case 0:
45d898e4 991 return MEM0_REGNO;
38b2d076 992 case 1:
45d898e4 993 return MEM0_REGNO+4;
38b2d076
DD
994 default:
995 return INVALID_REGNUM;
996 }
997}
998
999/* Implements EH_RETURN_STACKADJ_RTX. Saved and used later in
1000 m32c_emit_eh_epilogue. */
1001rtx
1002m32c_eh_return_stackadj_rtx (void)
1003{
1004 if (!cfun->machine->eh_stack_adjust)
1005 {
1006 rtx sa;
1007
99920b6f 1008 sa = gen_rtx_REG (Pmode, R0_REGNO);
38b2d076
DD
1009 cfun->machine->eh_stack_adjust = sa;
1010 }
1011 return cfun->machine->eh_stack_adjust;
1012}
1013
1014/* Registers That Address the Stack Frame */
1015
1016/* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER. Note that
1017 the original spec called for dwarf numbers to vary with register
1018 width as well, for example, r0l, r0, and r2r0 would each have
1019 different dwarf numbers. GCC doesn't support this, and we don't do
1020 it, and gdb seems to like it this way anyway. */
1021unsigned int
1022m32c_dwarf_frame_regnum (int n)
1023{
1024 switch (n)
1025 {
1026 case R0_REGNO:
1027 return 5;
1028 case R1_REGNO:
1029 return 6;
1030 case R2_REGNO:
1031 return 7;
1032 case R3_REGNO:
1033 return 8;
1034 case A0_REGNO:
1035 return 9;
1036 case A1_REGNO:
1037 return 10;
1038 case FB_REGNO:
1039 return 11;
1040 case SB_REGNO:
1041 return 19;
1042
1043 case SP_REGNO:
1044 return 12;
1045 case PC_REGNO:
1046 return 13;
1047 default:
1048 return DWARF_FRAME_REGISTERS + 1;
1049 }
1050}
1051
1052/* The frame looks like this:
1053
1054 ap -> +------------------------------
1055 | Return address (3 or 4 bytes)
1056 | Saved FB (2 or 4 bytes)
1057 fb -> +------------------------------
1058 | local vars
1059 | register saves fb
1060 | through r0 as needed
1061 sp -> +------------------------------
1062*/
1063
1064/* We use this to wrap all emitted insns in the prologue. */
1065static rtx
1066F (rtx x)
1067{
1068 RTX_FRAME_RELATED_P (x) = 1;
1069 return x;
1070}
1071
1072/* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1073 how much the stack pointer moves for each, for each cpu family. */
1074static struct
1075{
1076 int reg1;
1077 int bit;
1078 int a16_bytes;
1079 int a24_bytes;
1080} pushm_info[] =
1081{
9d746d5e
DD
1082 /* These are in reverse push (nearest-to-sp) order. */
1083 { R0_REGNO, 0x80, 2, 2 },
38b2d076 1084 { R1_REGNO, 0x40, 2, 2 },
9d746d5e
DD
1085 { R2_REGNO, 0x20, 2, 2 },
1086 { R3_REGNO, 0x10, 2, 2 },
1087 { A0_REGNO, 0x08, 2, 4 },
1088 { A1_REGNO, 0x04, 2, 4 },
1089 { SB_REGNO, 0x02, 2, 4 },
1090 { FB_REGNO, 0x01, 2, 4 }
38b2d076
DD
1091};
1092
1093#define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1094
1095/* Returns TRUE if we need to save/restore the given register. We
1096 save everything for exception handlers, so that any register can be
1097 unwound. For interrupt handlers, we save everything if the handler
1098 calls something else (because we don't know what *that* function
1099 might do), but try to be a bit smarter if the handler is a leaf
1100 function. We always save $a0, though, because we use that in the
85f65093 1101 epilogue to copy $fb to $sp. */
38b2d076
DD
1102static int
1103need_to_save (int regno)
1104{
1105 if (fixed_regs[regno])
1106 return 0;
ad516a74 1107 if (crtl->calls_eh_return)
38b2d076
DD
1108 return 1;
1109 if (regno == FP_REGNO)
1110 return 0;
1111 if (cfun->machine->is_interrupt
65655f79
DD
1112 && (!cfun->machine->is_leaf
1113 || (regno == A0_REGNO
1114 && m32c_function_needs_enter ())
1115 ))
38b2d076 1116 return 1;
6fb5fa3c 1117 if (df_regs_ever_live_p (regno)
38b2d076
DD
1118 && (!call_used_regs[regno] || cfun->machine->is_interrupt))
1119 return 1;
1120 return 0;
1121}
1122
1123/* This function contains all the intelligence about saving and
1124 restoring registers. It always figures out the register save set.
1125 When called with PP_justcount, it merely returns the size of the
1126 save set (for eliminating the frame pointer, for example). When
1127 called with PP_pushm or PP_popm, it emits the appropriate
1128 instructions for saving (pushm) or restoring (popm) the
1129 registers. */
1130static int
1131m32c_pushm_popm (Push_Pop_Type ppt)
1132{
1133 int reg_mask = 0;
1134 int byte_count = 0, bytes;
1135 int i;
1136 rtx dwarf_set[PUSHM_N];
1137 int n_dwarfs = 0;
1138 int nosave_mask = 0;
1139
305da3ec
JH
1140 if (crtl->return_rtx
1141 && GET_CODE (crtl->return_rtx) == PARALLEL
ad516a74 1142 && !(crtl->calls_eh_return || cfun->machine->is_interrupt))
38b2d076 1143 {
305da3ec 1144 rtx exp = XVECEXP (crtl->return_rtx, 0, 0);
38b2d076
DD
1145 rtx rv = XEXP (exp, 0);
1146 int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1147
1148 if (rv_bytes > 2)
1149 nosave_mask |= 0x20; /* PSI, SI */
1150 else
1151 nosave_mask |= 0xf0; /* DF */
1152 if (rv_bytes > 4)
1153 nosave_mask |= 0x50; /* DI */
1154 }
1155
1156 for (i = 0; i < (int) PUSHM_N; i++)
1157 {
1158 /* Skip if neither register needs saving. */
1159 if (!need_to_save (pushm_info[i].reg1))
1160 continue;
1161
1162 if (pushm_info[i].bit & nosave_mask)
1163 continue;
1164
1165 reg_mask |= pushm_info[i].bit;
1166 bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1167
1168 if (ppt == PP_pushm)
1169 {
ef4bddc2 1170 machine_mode mode = (bytes == 2) ? HImode : SImode;
38b2d076
DD
1171 rtx addr;
1172
1173 /* Always use stack_pointer_rtx instead of calling
1174 rtx_gen_REG ourselves. Code elsewhere in GCC assumes
1175 that there is a single rtx representing the stack pointer,
1176 namely stack_pointer_rtx, and uses == to recognize it. */
1177 addr = stack_pointer_rtx;
1178
1179 if (byte_count != 0)
1180 addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1181
1182 dwarf_set[n_dwarfs++] =
f7df4a84 1183 gen_rtx_SET (gen_rtx_MEM (mode, addr),
38b2d076
DD
1184 gen_rtx_REG (mode, pushm_info[i].reg1));
1185 F (dwarf_set[n_dwarfs - 1]);
1186
1187 }
1188 byte_count += bytes;
1189 }
1190
1191 if (cfun->machine->is_interrupt)
1192 {
1193 cfun->machine->intr_pushm = reg_mask & 0xfe;
1194 reg_mask = 0;
1195 byte_count = 0;
1196 }
1197
1198 if (cfun->machine->is_interrupt)
1199 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1200 if (need_to_save (i))
1201 {
1202 byte_count += 2;
1203 cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1204 }
1205
1206 if (ppt == PP_pushm && byte_count)
1207 {
1208 rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1209 rtx pushm;
1210
1211 if (reg_mask)
1212 {
1213 XVECEXP (note, 0, 0)
f7df4a84 1214 = gen_rtx_SET (stack_pointer_rtx,
38b2d076
DD
1215 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1216 stack_pointer_rtx,
1217 GEN_INT (-byte_count)));
1218 F (XVECEXP (note, 0, 0));
1219
1220 for (i = 0; i < n_dwarfs; i++)
1221 XVECEXP (note, 0, i + 1) = dwarf_set[i];
1222
1223 pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1224
444d6efe 1225 add_reg_note (pushm, REG_FRAME_RELATED_EXPR, note);
38b2d076
DD
1226 }
1227
1228 if (cfun->machine->is_interrupt)
1229 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1230 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1231 {
1232 if (TARGET_A16)
1233 pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1234 else
1235 pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1236 F (pushm);
1237 }
1238 }
1239 if (ppt == PP_popm && byte_count)
1240 {
38b2d076
DD
1241 if (cfun->machine->is_interrupt)
1242 for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1243 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1244 {
1245 if (TARGET_A16)
b3fdec9e 1246 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
38b2d076 1247 else
b3fdec9e 1248 emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
38b2d076
DD
1249 }
1250 if (reg_mask)
1251 emit_insn (gen_popm (GEN_INT (reg_mask)));
1252 }
1253
1254 return byte_count;
1255}
1256
1257/* Implements INITIAL_ELIMINATION_OFFSET. See the comment above that
1258 diagrams our call frame. */
1259int
1260m32c_initial_elimination_offset (int from, int to)
1261{
1262 int ofs = 0;
1263
1264 if (from == AP_REGNO)
1265 {
1266 if (TARGET_A16)
1267 ofs += 5;
1268 else
1269 ofs += 8;
1270 }
1271
1272 if (to == SP_REGNO)
1273 {
1274 ofs += m32c_pushm_popm (PP_justcount);
1275 ofs += get_frame_size ();
1276 }
1277
1278 /* Account for push rounding. */
1279 if (TARGET_A24)
1280 ofs = (ofs + 1) & ~1;
1281#if DEBUG0
1282 fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1283 to, ofs);
1284#endif
1285 return ofs;
1286}
1287
1288/* Passing Function Arguments on the Stack */
1289
38b2d076
DD
1290/* Implements PUSH_ROUNDING. The R8C and M16C have byte stacks, the
1291 M32C has word stacks. */
7b4df2bf
RS
1292poly_int64
1293m32c_push_rounding (poly_int64 n)
38b2d076
DD
1294{
1295 if (TARGET_R8C || TARGET_M16C)
1296 return n;
1297 return (n + 1) & ~1;
1298}
1299
1300/* Passing Arguments in Registers */
1301
cd34bbe8
NF
1302/* Implements TARGET_FUNCTION_ARG. Arguments are passed partly in
1303 registers, partly on stack. If our function returns a struct, a
1304 pointer to a buffer for it is at the top of the stack (last thing
1305 pushed). The first few real arguments may be in registers as
1306 follows:
38b2d076
DD
1307
1308 R8C/M16C: arg1 in r1 if it's QI or HI (else it's pushed on stack)
1309 arg2 in r2 if it's HI (else pushed on stack)
1310 rest on stack
1311 M32C: arg1 in r0 if it's QI or HI (else it's pushed on stack)
1312 rest on stack
1313
1314 Structs are not passed in registers, even if they fit. Only
1315 integer and pointer types are passed in registers.
1316
1317 Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1318 r2 if it fits. */
cd34bbe8
NF
1319#undef TARGET_FUNCTION_ARG
1320#define TARGET_FUNCTION_ARG m32c_function_arg
1321static rtx
6783fdb7 1322m32c_function_arg (cumulative_args_t ca_v, const function_arg_info &arg)
38b2d076 1323{
d5cc9181
JR
1324 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1325
38b2d076
DD
1326 /* Can return a reg, parallel, or 0 for stack */
1327 rtx rv = NULL_RTX;
1328#if DEBUG0
1329 fprintf (stderr, "func_arg %d (%s, %d)\n",
6783fdb7
RS
1330 ca->parm_num, mode_name[arg.mode], arg.named);
1331 debug_tree (arg.type);
38b2d076
DD
1332#endif
1333
6783fdb7 1334 if (arg.end_marker_p ())
38b2d076
DD
1335 return GEN_INT (0);
1336
6783fdb7 1337 if (ca->force_mem || !arg.named)
38b2d076
DD
1338 {
1339#if DEBUG0
1340 fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
6783fdb7 1341 arg.named);
38b2d076
DD
1342#endif
1343 return NULL_RTX;
1344 }
1345
6783fdb7 1346 if (arg.type && INTEGRAL_TYPE_P (arg.type) && POINTER_TYPE_P (arg.type))
38b2d076
DD
1347 return NULL_RTX;
1348
6783fdb7 1349 if (arg.aggregate_type_p ())
9d746d5e
DD
1350 return NULL_RTX;
1351
38b2d076
DD
1352 switch (ca->parm_num)
1353 {
1354 case 1:
6783fdb7
RS
1355 if (GET_MODE_SIZE (arg.mode) == 1 || GET_MODE_SIZE (arg.mode) == 2)
1356 rv = gen_rtx_REG (arg.mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
38b2d076
DD
1357 break;
1358
1359 case 2:
6783fdb7
RS
1360 if (TARGET_A16 && GET_MODE_SIZE (arg.mode) == 2)
1361 rv = gen_rtx_REG (arg.mode, R2_REGNO);
38b2d076
DD
1362 break;
1363 }
1364
1365#if DEBUG0
1366 debug_rtx (rv);
1367#endif
1368 return rv;
1369}
1370
1371#undef TARGET_PASS_BY_REFERENCE
1372#define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1373static bool
52090e4d 1374m32c_pass_by_reference (cumulative_args_t, const function_arg_info &)
38b2d076
DD
1375{
1376 return 0;
1377}
1378
1379/* Implements INIT_CUMULATIVE_ARGS. */
1380void
1381m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
9d746d5e 1382 tree fntype,
38b2d076 1383 rtx libname ATTRIBUTE_UNUSED,
9d746d5e 1384 tree fndecl,
38b2d076
DD
1385 int n_named_args ATTRIBUTE_UNUSED)
1386{
9d746d5e
DD
1387 if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1388 ca->force_mem = 1;
1389 else
1390 ca->force_mem = 0;
38b2d076
DD
1391 ca->parm_num = 1;
1392}
1393
cd34bbe8
NF
1394/* Implements TARGET_FUNCTION_ARG_ADVANCE. force_mem is set for
1395 functions returning structures, so we always reset that. Otherwise,
1396 we only need to know the sequence number of the argument to know what
1397 to do with it. */
1398#undef TARGET_FUNCTION_ARG_ADVANCE
1399#define TARGET_FUNCTION_ARG_ADVANCE m32c_function_arg_advance
1400static void
d5cc9181 1401m32c_function_arg_advance (cumulative_args_t ca_v,
6930c98c 1402 const function_arg_info &)
38b2d076 1403{
d5cc9181
JR
1404 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1405
38b2d076
DD
1406 if (ca->force_mem)
1407 ca->force_mem = 0;
9d746d5e
DD
1408 else
1409 ca->parm_num++;
38b2d076
DD
1410}
1411
c2ed6cf8
NF
1412/* Implements TARGET_FUNCTION_ARG_BOUNDARY. */
1413#undef TARGET_FUNCTION_ARG_BOUNDARY
1414#define TARGET_FUNCTION_ARG_BOUNDARY m32c_function_arg_boundary
1415static unsigned int
ef4bddc2 1416m32c_function_arg_boundary (machine_mode mode ATTRIBUTE_UNUSED,
c2ed6cf8
NF
1417 const_tree type ATTRIBUTE_UNUSED)
1418{
1419 return (TARGET_A16 ? 8 : 16);
1420}
1421
38b2d076
DD
1422/* Implements FUNCTION_ARG_REGNO_P. */
1423int
1424m32c_function_arg_regno_p (int r)
1425{
1426 if (TARGET_A24)
1427 return (r == R0_REGNO);
1428 return (r == R1_REGNO || r == R2_REGNO);
1429}
1430
e9555b13 1431/* HImode and PSImode are the two "native" modes as far as GCC is
85f65093 1432 concerned, but the chips also support a 32-bit mode which is used
e9555b13
DD
1433 for some opcodes in R8C/M16C and for reset vectors and such. */
1434#undef TARGET_VALID_POINTER_MODE
1435#define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
23fed240 1436static bool
095a2d76 1437m32c_valid_pointer_mode (scalar_int_mode mode)
e9555b13 1438{
e9555b13
DD
1439 if (mode == HImode
1440 || mode == PSImode
1441 || mode == SImode
1442 )
1443 return 1;
1444 return 0;
1445}
1446
38b2d076
DD
1447/* How Scalar Function Values Are Returned */
1448
2a31793e 1449/* Implements TARGET_LIBCALL_VALUE. Most values are returned in $r0, or some
38b2d076
DD
1450 combination of registers starting there (r2r0 for longs, r3r1r2r0
1451 for long long, r3r2r1r0 for doubles), except that that ABI
1452 currently doesn't work because it ends up using all available
1453 general registers and gcc often can't compile it. So, instead, we
1454 return anything bigger than 16 bits in "mem0" (effectively, a
1455 memory location). */
2a31793e
AS
1456
1457#undef TARGET_LIBCALL_VALUE
1458#define TARGET_LIBCALL_VALUE m32c_libcall_value
1459
1460static rtx
ef4bddc2 1461m32c_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
38b2d076
DD
1462{
1463 /* return reg or parallel */
1464#if 0
1465 /* FIXME: GCC has difficulty returning large values in registers,
1466 because that ties up most of the general registers and gives the
1467 register allocator little to work with. Until we can resolve
1468 this, large values are returned in memory. */
1469 if (mode == DFmode)
1470 {
1471 rtx rv;
1472
1473 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1474 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1475 gen_rtx_REG (HImode,
1476 R0_REGNO),
1477 GEN_INT (0));
1478 XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1479 gen_rtx_REG (HImode,
1480 R1_REGNO),
1481 GEN_INT (2));
1482 XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1483 gen_rtx_REG (HImode,
1484 R2_REGNO),
1485 GEN_INT (4));
1486 XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1487 gen_rtx_REG (HImode,
1488 R3_REGNO),
1489 GEN_INT (6));
1490 return rv;
1491 }
1492
1493 if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1494 {
1495 rtx rv;
1496
1497 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1498 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1499 gen_rtx_REG (mode,
1500 R0_REGNO),
1501 GEN_INT (0));
1502 return rv;
1503 }
1504#endif
1505
1506 if (GET_MODE_SIZE (mode) > 2)
1507 return gen_rtx_REG (mode, MEM0_REGNO);
1508 return gen_rtx_REG (mode, R0_REGNO);
1509}
1510
2a31793e 1511/* Implements TARGET_FUNCTION_VALUE. Functions and libcalls have the same
38b2d076 1512 conventions. */
2a31793e
AS
1513
1514#undef TARGET_FUNCTION_VALUE
1515#define TARGET_FUNCTION_VALUE m32c_function_value
1516
1517static rtx
1518m32c_function_value (const_tree valtype,
1519 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1520 bool outgoing ATTRIBUTE_UNUSED)
38b2d076
DD
1521{
1522 /* return reg or parallel */
ef4bddc2 1523 const machine_mode mode = TYPE_MODE (valtype);
2a31793e
AS
1524 return m32c_libcall_value (mode, NULL_RTX);
1525}
1526
f28f2337
AS
1527/* Implements TARGET_FUNCTION_VALUE_REGNO_P. */
1528
1529#undef TARGET_FUNCTION_VALUE_REGNO_P
1530#define TARGET_FUNCTION_VALUE_REGNO_P m32c_function_value_regno_p
2a31793e 1531
f28f2337 1532static bool
2a31793e
AS
1533m32c_function_value_regno_p (const unsigned int regno)
1534{
1535 return (regno == R0_REGNO || regno == MEM0_REGNO);
38b2d076
DD
1536}
1537
1538/* How Large Values Are Returned */
1539
1540/* We return structures by pushing the address on the stack, even if
1541 we use registers for the first few "real" arguments. */
1542#undef TARGET_STRUCT_VALUE_RTX
1543#define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1544static rtx
1545m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1546 int incoming ATTRIBUTE_UNUSED)
1547{
1548 return 0;
1549}
1550
1551/* Function Entry and Exit */
1552
1553/* Implements EPILOGUE_USES. Interrupts restore all registers. */
1554int
1555m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1556{
1557 if (cfun->machine->is_interrupt)
1558 return 1;
1559 return 0;
1560}
1561
1562/* Implementing the Varargs Macros */
1563
1564#undef TARGET_STRICT_ARGUMENT_NAMING
1565#define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1566static bool
d5cc9181 1567m32c_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
38b2d076
DD
1568{
1569 return 1;
1570}
1571
1572/* Trampolines for Nested Functions */
1573
1574/*
1575 m16c:
1576 1 0000 75C43412 mov.w #0x1234,a0
1577 2 0004 FC000000 jmp.a label
1578
1579 m32c:
1580 1 0000 BC563412 mov.l:s #0x123456,a0
1581 2 0004 CC000000 jmp.a label
1582*/
1583
1584/* Implements TRAMPOLINE_SIZE. */
1585int
1586m32c_trampoline_size (void)
1587{
1588 /* Allocate extra space so we can avoid the messy shifts when we
1589 initialize the trampoline; we just write past the end of the
1590 opcode. */
1591 return TARGET_A16 ? 8 : 10;
1592}
1593
1594/* Implements TRAMPOLINE_ALIGNMENT. */
1595int
1596m32c_trampoline_alignment (void)
1597{
1598 return 2;
1599}
1600
229fbccb
RH
1601/* Implements TARGET_TRAMPOLINE_INIT. */
1602
1603#undef TARGET_TRAMPOLINE_INIT
1604#define TARGET_TRAMPOLINE_INIT m32c_trampoline_init
1605static void
1606m32c_trampoline_init (rtx m_tramp, tree fndecl, rtx chainval)
38b2d076 1607{
229fbccb
RH
1608 rtx function = XEXP (DECL_RTL (fndecl), 0);
1609
1610#define A0(m,i) adjust_address (m_tramp, m, i)
38b2d076
DD
1611 if (TARGET_A16)
1612 {
1613 /* Note: we subtract a "word" because the moves want signed
1614 constants, not unsigned constants. */
1615 emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1616 emit_move_insn (A0 (HImode, 2), chainval);
1617 emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
85f65093
KH
1618 /* We use 16-bit addresses here, but store the zero to turn it
1619 into a 24-bit offset. */
38b2d076
DD
1620 emit_move_insn (A0 (HImode, 5), function);
1621 emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1622 }
1623 else
1624 {
1625 /* Note that the PSI moves actually write 4 bytes. Make sure we
1626 write stuff out in the right order, and leave room for the
1627 extra byte at the end. */
1628 emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1629 emit_move_insn (A0 (PSImode, 1), chainval);
1630 emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1631 emit_move_insn (A0 (PSImode, 5), function);
1632 }
1633#undef A0
1634}
1635
d81db636
SB
1636#undef TARGET_LRA_P
1637#define TARGET_LRA_P hook_bool_void_false
1638
38b2d076
DD
1639/* Addressing Modes */
1640
c6c3dba9
PB
1641/* The r8c/m32c family supports a wide range of non-orthogonal
1642 addressing modes, including the ability to double-indirect on *some*
1643 of them. Not all insns support all modes, either, but we rely on
1644 predicates and constraints to deal with that. */
1645#undef TARGET_LEGITIMATE_ADDRESS_P
1646#define TARGET_LEGITIMATE_ADDRESS_P m32c_legitimate_address_p
1647bool
ef4bddc2 1648m32c_legitimate_address_p (machine_mode mode, rtx x, bool strict)
38b2d076
DD
1649{
1650 int mode_adjust;
1651 if (CONSTANT_P (x))
1652 return 1;
1653
5fd5d713
DD
1654 if (TARGET_A16 && GET_MODE (x) != HImode && GET_MODE (x) != SImode)
1655 return 0;
1656 if (TARGET_A24 && GET_MODE (x) != PSImode)
1657 return 0;
1658
38b2d076
DD
1659 /* Wide references to memory will be split after reload, so we must
1660 ensure that all parts of such splits remain legitimate
1661 addresses. */
1662 mode_adjust = GET_MODE_SIZE (mode) - 1;
1663
1664 /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1665 if (GET_CODE (x) == PRE_DEC
1666 || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1667 {
1668 return (GET_CODE (XEXP (x, 0)) == REG
1669 && REGNO (XEXP (x, 0)) == SP_REGNO);
1670 }
1671
1672#if 0
1673 /* This is the double indirection detection, but it currently
1674 doesn't work as cleanly as this code implies, so until we've had
1675 a chance to debug it, leave it disabled. */
1676 if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1677 {
1678#if DEBUG_DOUBLE
1679 fprintf (stderr, "double indirect\n");
1680#endif
1681 x = XEXP (x, 0);
1682 }
1683#endif
1684
1685 encode_pattern (x);
1686 if (RTX_IS ("r"))
1687 {
1688 /* Most indexable registers can be used without displacements,
1689 although some of them will be emitted with an explicit zero
1690 to please the assembler. */
1691 switch (REGNO (patternr[0]))
1692 {
38b2d076
DD
1693 case A1_REGNO:
1694 case SB_REGNO:
1695 case FB_REGNO:
1696 case SP_REGNO:
5fd5d713
DD
1697 if (TARGET_A16 && GET_MODE (x) == SImode)
1698 return 0;
0c57f4bf 1699 /* FALLTHRU */
5fd5d713 1700 case A0_REGNO:
38b2d076
DD
1701 return 1;
1702
1703 default:
1704 if (IS_PSEUDO (patternr[0], strict))
1705 return 1;
1706 return 0;
1707 }
1708 }
5fd5d713
DD
1709
1710 if (TARGET_A16 && GET_MODE (x) == SImode)
1711 return 0;
1712
38b2d076
DD
1713 if (RTX_IS ("+ri"))
1714 {
1715 /* This is more interesting, because different base registers
1716 allow for different displacements - both range and signedness
1717 - and it differs from chip series to chip series too. */
1718 int rn = REGNO (patternr[1]);
1719 HOST_WIDE_INT offs = INTVAL (patternr[2]);
1720 switch (rn)
1721 {
1722 case A0_REGNO:
1723 case A1_REGNO:
1724 case SB_REGNO:
1725 /* The syntax only allows positive offsets, but when the
1726 offsets span the entire memory range, we can simulate
1727 negative offsets by wrapping. */
1728 if (TARGET_A16)
1729 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1730 if (rn == SB_REGNO)
1731 return (offs >= 0 && offs <= 65535 - mode_adjust);
1732 /* A0 or A1 */
1733 return (offs >= -16777216 && offs <= 16777215);
1734
1735 case FB_REGNO:
1736 if (TARGET_A16)
1737 return (offs >= -128 && offs <= 127 - mode_adjust);
1738 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1739
1740 case SP_REGNO:
1741 return (offs >= -128 && offs <= 127 - mode_adjust);
1742
1743 default:
1744 if (IS_PSEUDO (patternr[1], strict))
1745 return 1;
1746 return 0;
1747 }
1748 }
1749 if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1750 {
1751 rtx reg = patternr[1];
1752
1753 /* We don't know where the symbol is, so only allow base
1754 registers which support displacements spanning the whole
1755 address range. */
1756 switch (REGNO (reg))
1757 {
1758 case A0_REGNO:
1759 case A1_REGNO:
1760 /* $sb needs a secondary reload, but since it's involved in
1761 memory address reloads too, we don't deal with it very
1762 well. */
1763 /* case SB_REGNO: */
1764 return 1;
1765 default:
45d898e4
DD
1766 if (GET_CODE (reg) == SUBREG)
1767 return 0;
38b2d076
DD
1768 if (IS_PSEUDO (reg, strict))
1769 return 1;
1770 return 0;
1771 }
1772 }
1773 return 0;
1774}
1775
1776/* Implements REG_OK_FOR_BASE_P. */
1777int
1778m32c_reg_ok_for_base_p (rtx x, int strict)
1779{
1780 if (GET_CODE (x) != REG)
1781 return 0;
1782 switch (REGNO (x))
1783 {
1784 case A0_REGNO:
1785 case A1_REGNO:
1786 case SB_REGNO:
1787 case FB_REGNO:
1788 case SP_REGNO:
1789 return 1;
1790 default:
1791 if (IS_PSEUDO (x, strict))
1792 return 1;
1793 return 0;
1794 }
1795}
1796
04aff2c0 1797/* We have three choices for choosing fb->aN offsets. If we choose -128,
85f65093 1798 we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
04aff2c0
DD
1799 like this:
1800 EB 4B FF mova -128[$fb],$a0
1801 D8 0C FF FF mov.w:Q #0,-1[$a0]
1802
85f65093 1803 Alternately, we subtract the frame size, and hopefully use 8-bit aN
04aff2c0
DD
1804 displacements:
1805 7B F4 stc $fb,$a0
1806 77 54 00 01 sub #256,$a0
1807 D8 08 01 mov.w:Q #0,1[$a0]
1808
1809 If we don't offset (i.e. offset by zero), we end up with:
1810 7B F4 stc $fb,$a0
1811 D8 0C 00 FF mov.w:Q #0,-256[$a0]
1812
1813 We have to subtract *something* so that we have a PLUS rtx to mark
1814 that we've done this reload. The -128 offset will never result in
85f65093 1815 an 8-bit aN offset, and the payoff for the second case is five
04aff2c0
DD
1816 loads *if* those loads are within 256 bytes of the other end of the
1817 frame, so the third case seems best. Note that we subtract the
1818 zero, but detect that in the addhi3 pattern. */
1819
ea471af0
JM
1820#define BIG_FB_ADJ 0
1821
38b2d076
DD
1822/* Implements LEGITIMIZE_ADDRESS. The only address we really have to
1823 worry about is frame base offsets, as $fb has a limited
1824 displacement range. We deal with this by attempting to reload $fb
1825 itself into an address register; that seems to result in the best
1826 code. */
506d7b68
PB
1827#undef TARGET_LEGITIMIZE_ADDRESS
1828#define TARGET_LEGITIMIZE_ADDRESS m32c_legitimize_address
1829static rtx
1830m32c_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
ef4bddc2 1831 machine_mode mode)
38b2d076
DD
1832{
1833#if DEBUG0
1834 fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
506d7b68 1835 debug_rtx (x);
38b2d076
DD
1836 fprintf (stderr, "\n");
1837#endif
1838
506d7b68
PB
1839 if (GET_CODE (x) == PLUS
1840 && GET_CODE (XEXP (x, 0)) == REG
1841 && REGNO (XEXP (x, 0)) == FB_REGNO
1842 && GET_CODE (XEXP (x, 1)) == CONST_INT
1843 && (INTVAL (XEXP (x, 1)) < -128
1844 || INTVAL (XEXP (x, 1)) > (128 - GET_MODE_SIZE (mode))))
38b2d076
DD
1845 {
1846 /* reload FB to A_REGS */
38b2d076 1847 rtx temp = gen_reg_rtx (Pmode);
506d7b68 1848 x = copy_rtx (x);
f7df4a84 1849 emit_insn (gen_rtx_SET (temp, XEXP (x, 0)));
506d7b68 1850 XEXP (x, 0) = temp;
38b2d076
DD
1851 }
1852
506d7b68 1853 return x;
38b2d076
DD
1854}
1855
1856/* Implements LEGITIMIZE_RELOAD_ADDRESS. See comment above. */
1857int
1858m32c_legitimize_reload_address (rtx * x,
ef4bddc2 1859 machine_mode mode,
38b2d076
DD
1860 int opnum,
1861 int type, int ind_levels ATTRIBUTE_UNUSED)
1862{
1863#if DEBUG0
1864 fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
1865 mode_name[mode]);
1866 debug_rtx (*x);
1867#endif
1868
1869 /* At one point, this function tried to get $fb copied to an address
1870 register, which in theory would maximize sharing, but gcc was
1871 *also* still trying to reload the whole address, and we'd run out
1872 of address registers. So we let gcc do the naive (but safe)
1873 reload instead, when the above function doesn't handle it for
04aff2c0
DD
1874 us.
1875
1876 The code below is a second attempt at the above. */
1877
1878 if (GET_CODE (*x) == PLUS
1879 && GET_CODE (XEXP (*x, 0)) == REG
1880 && REGNO (XEXP (*x, 0)) == FB_REGNO
1881 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1882 && (INTVAL (XEXP (*x, 1)) < -128
1883 || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
1884 {
1885 rtx sum;
1886 int offset = INTVAL (XEXP (*x, 1));
1887 int adjustment = -BIG_FB_ADJ;
1888
1889 sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
1890 GEN_INT (adjustment));
1891 *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
1892 if (type == RELOAD_OTHER)
1893 type = RELOAD_FOR_OTHER_ADDRESS;
1894 push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
1895 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
444d6efe 1896 (enum reload_type) type);
04aff2c0
DD
1897 return 1;
1898 }
1899
1900 if (GET_CODE (*x) == PLUS
1901 && GET_CODE (XEXP (*x, 0)) == PLUS
1902 && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
1903 && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
1904 && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
1905 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1906 )
1907 {
1908 if (type == RELOAD_OTHER)
1909 type = RELOAD_FOR_OTHER_ADDRESS;
1910 push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
1911 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
444d6efe 1912 (enum reload_type) type);
f75e07bc
BE
1913 return 1;
1914 }
1915
1916 if (TARGET_A24 && GET_MODE (*x) == PSImode)
1917 {
1918 push_reload (*x, NULL_RTX, x, NULL,
1919 A_REGS, PSImode, VOIDmode, 0, 0, opnum,
1920 (enum reload_type) type);
04aff2c0
DD
1921 return 1;
1922 }
38b2d076
DD
1923
1924 return 0;
1925}
1926
5fd5d713
DD
1927/* Return the appropriate mode for a named address pointer. */
1928#undef TARGET_ADDR_SPACE_POINTER_MODE
1929#define TARGET_ADDR_SPACE_POINTER_MODE m32c_addr_space_pointer_mode
095a2d76 1930static scalar_int_mode
5fd5d713
DD
1931m32c_addr_space_pointer_mode (addr_space_t addrspace)
1932{
1933 switch (addrspace)
1934 {
1935 case ADDR_SPACE_GENERIC:
1936 return TARGET_A24 ? PSImode : HImode;
1937 case ADDR_SPACE_FAR:
1938 return SImode;
1939 default:
1940 gcc_unreachable ();
1941 }
1942}
1943
1944/* Return the appropriate mode for a named address address. */
1945#undef TARGET_ADDR_SPACE_ADDRESS_MODE
1946#define TARGET_ADDR_SPACE_ADDRESS_MODE m32c_addr_space_address_mode
095a2d76 1947static scalar_int_mode
5fd5d713
DD
1948m32c_addr_space_address_mode (addr_space_t addrspace)
1949{
1950 switch (addrspace)
1951 {
1952 case ADDR_SPACE_GENERIC:
1953 return TARGET_A24 ? PSImode : HImode;
1954 case ADDR_SPACE_FAR:
1955 return SImode;
1956 default:
1957 gcc_unreachable ();
1958 }
1959}
1960
1961/* Like m32c_legitimate_address_p, except with named addresses. */
1962#undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
1963#define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P \
1964 m32c_addr_space_legitimate_address_p
1965static bool
ef4bddc2 1966m32c_addr_space_legitimate_address_p (machine_mode mode, rtx x,
5fd5d713
DD
1967 bool strict, addr_space_t as)
1968{
1969 if (as == ADDR_SPACE_FAR)
1970 {
1971 if (TARGET_A24)
1972 return 0;
1973 encode_pattern (x);
1974 if (RTX_IS ("r"))
1975 {
1976 if (GET_MODE (x) != SImode)
1977 return 0;
1978 switch (REGNO (patternr[0]))
1979 {
1980 case A0_REGNO:
1981 return 1;
1982
1983 default:
1984 if (IS_PSEUDO (patternr[0], strict))
1985 return 1;
1986 return 0;
1987 }
1988 }
1989 if (RTX_IS ("+^Sri"))
1990 {
1991 int rn = REGNO (patternr[3]);
1992 HOST_WIDE_INT offs = INTVAL (patternr[4]);
1993 if (GET_MODE (patternr[3]) != HImode)
1994 return 0;
1995 switch (rn)
1996 {
1997 case A0_REGNO:
1998 return (offs >= 0 && offs <= 0xfffff);
1999
2000 default:
2001 if (IS_PSEUDO (patternr[3], strict))
2002 return 1;
2003 return 0;
2004 }
2005 }
2006 if (RTX_IS ("+^Srs"))
2007 {
2008 int rn = REGNO (patternr[3]);
2009 if (GET_MODE (patternr[3]) != HImode)
2010 return 0;
2011 switch (rn)
2012 {
2013 case A0_REGNO:
2014 return 1;
2015
2016 default:
2017 if (IS_PSEUDO (patternr[3], strict))
2018 return 1;
2019 return 0;
2020 }
2021 }
2022 if (RTX_IS ("+^S+ris"))
2023 {
2024 int rn = REGNO (patternr[4]);
2025 if (GET_MODE (patternr[4]) != HImode)
2026 return 0;
2027 switch (rn)
2028 {
2029 case A0_REGNO:
2030 return 1;
2031
2032 default:
2033 if (IS_PSEUDO (patternr[4], strict))
2034 return 1;
2035 return 0;
2036 }
2037 }
2038 if (RTX_IS ("s"))
2039 {
2040 return 1;
2041 }
2042 return 0;
2043 }
2044
2045 else if (as != ADDR_SPACE_GENERIC)
2046 gcc_unreachable ();
2047
2048 return m32c_legitimate_address_p (mode, x, strict);
2049}
2050
2051/* Like m32c_legitimate_address, except with named address support. */
2052#undef TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS
2053#define TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS m32c_addr_space_legitimize_address
2054static rtx
ef4bddc2 2055m32c_addr_space_legitimize_address (rtx x, rtx oldx, machine_mode mode,
5fd5d713
DD
2056 addr_space_t as)
2057{
2058 if (as != ADDR_SPACE_GENERIC)
2059 {
2060#if DEBUG0
2061 fprintf (stderr, "\033[36mm32c_addr_space_legitimize_address for mode %s\033[0m\n", mode_name[mode]);
2062 debug_rtx (x);
2063 fprintf (stderr, "\n");
2064#endif
2065
2066 if (GET_CODE (x) != REG)
2067 {
2068 x = force_reg (SImode, x);
2069 }
2070 return x;
2071 }
2072
2073 return m32c_legitimize_address (x, oldx, mode);
2074}
2075
2076/* Determine if one named address space is a subset of another. */
2077#undef TARGET_ADDR_SPACE_SUBSET_P
2078#define TARGET_ADDR_SPACE_SUBSET_P m32c_addr_space_subset_p
2079static bool
2080m32c_addr_space_subset_p (addr_space_t subset, addr_space_t superset)
2081{
2082 gcc_assert (subset == ADDR_SPACE_GENERIC || subset == ADDR_SPACE_FAR);
2083 gcc_assert (superset == ADDR_SPACE_GENERIC || superset == ADDR_SPACE_FAR);
2084
2085 if (subset == superset)
2086 return true;
2087
2088 else
2089 return (subset == ADDR_SPACE_GENERIC && superset == ADDR_SPACE_FAR);
2090}
2091
2092#undef TARGET_ADDR_SPACE_CONVERT
2093#define TARGET_ADDR_SPACE_CONVERT m32c_addr_space_convert
2094/* Convert from one address space to another. */
2095static rtx
2096m32c_addr_space_convert (rtx op, tree from_type, tree to_type)
2097{
2098 addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (from_type));
2099 addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (to_type));
2100 rtx result;
2101
2102 gcc_assert (from_as == ADDR_SPACE_GENERIC || from_as == ADDR_SPACE_FAR);
2103 gcc_assert (to_as == ADDR_SPACE_GENERIC || to_as == ADDR_SPACE_FAR);
2104
2105 if (to_as == ADDR_SPACE_GENERIC && from_as == ADDR_SPACE_FAR)
2106 {
2107 /* This is unpredictable, as we're truncating off usable address
2108 bits. */
2109
2110 result = gen_reg_rtx (HImode);
2111 emit_move_insn (result, simplify_subreg (HImode, op, SImode, 0));
2112 return result;
2113 }
2114 else if (to_as == ADDR_SPACE_FAR && from_as == ADDR_SPACE_GENERIC)
2115 {
2116 /* This always works. */
2117 result = gen_reg_rtx (SImode);
2118 emit_insn (gen_zero_extendhisi2 (result, op));
2119 return result;
2120 }
2121 else
2122 gcc_unreachable ();
2123}
2124
38b2d076
DD
2125/* Condition Code Status */
2126
2127#undef TARGET_FIXED_CONDITION_CODE_REGS
2128#define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2129static bool
2130m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2131{
2132 *p1 = FLG_REGNO;
2133 *p2 = INVALID_REGNUM;
2134 return true;
2135}
2136
2137/* Describing Relative Costs of Operations */
2138
0e607518 2139/* Implements TARGET_REGISTER_MOVE_COST. We make impossible moves
38b2d076
DD
2140 prohibitively expensive, like trying to put QIs in r2/r3 (there are
2141 no opcodes to do that). We also discourage use of mem* registers
2142 since they're really memory. */
0e607518
AS
2143
2144#undef TARGET_REGISTER_MOVE_COST
2145#define TARGET_REGISTER_MOVE_COST m32c_register_move_cost
2146
2147static int
ef4bddc2 2148m32c_register_move_cost (machine_mode mode, reg_class_t from,
0e607518 2149 reg_class_t to)
38b2d076
DD
2150{
2151 int cost = COSTS_N_INSNS (3);
0e607518
AS
2152 HARD_REG_SET cc;
2153
2154/* FIXME: pick real values, but not 2 for now. */
2155 COPY_HARD_REG_SET (cc, reg_class_contents[(int) from]);
2156 IOR_HARD_REG_SET (cc, reg_class_contents[(int) to]);
2157
2158 if (mode == QImode
2159 && hard_reg_set_intersect_p (cc, reg_class_contents[R23_REGS]))
38b2d076 2160 {
0e607518 2161 if (hard_reg_set_subset_p (cc, reg_class_contents[R23_REGS]))
38b2d076
DD
2162 cost = COSTS_N_INSNS (1000);
2163 else
2164 cost = COSTS_N_INSNS (80);
2165 }
2166
2167 if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2168 cost = COSTS_N_INSNS (1000);
2169
0e607518 2170 if (reg_classes_intersect_p (from, CR_REGS))
38b2d076
DD
2171 cost += COSTS_N_INSNS (5);
2172
0e607518 2173 if (reg_classes_intersect_p (to, CR_REGS))
38b2d076
DD
2174 cost += COSTS_N_INSNS (5);
2175
2176 if (from == MEM_REGS || to == MEM_REGS)
2177 cost += COSTS_N_INSNS (50);
0e607518
AS
2178 else if (reg_classes_intersect_p (from, MEM_REGS)
2179 || reg_classes_intersect_p (to, MEM_REGS))
38b2d076
DD
2180 cost += COSTS_N_INSNS (10);
2181
2182#if DEBUG0
2183 fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
0e607518
AS
2184 mode_name[mode], class_names[(int) from], class_names[(int) to],
2185 cost);
38b2d076
DD
2186#endif
2187 return cost;
2188}
2189
0e607518
AS
2190/* Implements TARGET_MEMORY_MOVE_COST. */
2191
2192#undef TARGET_MEMORY_MOVE_COST
2193#define TARGET_MEMORY_MOVE_COST m32c_memory_move_cost
2194
2195static int
ef4bddc2 2196m32c_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
0e607518
AS
2197 reg_class_t rclass ATTRIBUTE_UNUSED,
2198 bool in ATTRIBUTE_UNUSED)
38b2d076
DD
2199{
2200 /* FIXME: pick real values. */
2201 return COSTS_N_INSNS (10);
2202}
2203
07127a0a
DD
2204/* Here we try to describe when we use multiple opcodes for one RTX so
2205 that gcc knows when to use them. */
2206#undef TARGET_RTX_COSTS
2207#define TARGET_RTX_COSTS m32c_rtx_costs
2208static bool
e548c9df
AM
2209m32c_rtx_costs (rtx x, machine_mode mode, int outer_code,
2210 int opno ATTRIBUTE_UNUSED,
68f932c4 2211 int *total, bool speed ATTRIBUTE_UNUSED)
07127a0a 2212{
e548c9df 2213 int code = GET_CODE (x);
07127a0a
DD
2214 switch (code)
2215 {
2216 case REG:
2217 if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2218 *total += COSTS_N_INSNS (500);
2219 else
2220 *total += COSTS_N_INSNS (1);
2221 return true;
2222
2223 case ASHIFT:
2224 case LSHIFTRT:
2225 case ASHIFTRT:
2226 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2227 {
2228 /* mov.b r1l, r1h */
2229 *total += COSTS_N_INSNS (1);
2230 return true;
2231 }
2232 if (INTVAL (XEXP (x, 1)) > 8
2233 || INTVAL (XEXP (x, 1)) < -8)
2234 {
2235 /* mov.b #N, r1l */
2236 /* mov.b r1l, r1h */
2237 *total += COSTS_N_INSNS (2);
2238 return true;
2239 }
2240 return true;
2241
2242 case LE:
2243 case LEU:
2244 case LT:
2245 case LTU:
2246 case GT:
2247 case GTU:
2248 case GE:
2249 case GEU:
2250 case NE:
2251 case EQ:
2252 if (outer_code == SET)
2253 {
2254 *total += COSTS_N_INSNS (2);
2255 return true;
2256 }
2257 break;
2258
2259 case ZERO_EXTRACT:
2260 {
2261 rtx dest = XEXP (x, 0);
2262 rtx addr = XEXP (dest, 0);
2263 switch (GET_CODE (addr))
2264 {
2265 case CONST_INT:
2266 *total += COSTS_N_INSNS (1);
2267 break;
2268 case SYMBOL_REF:
2269 *total += COSTS_N_INSNS (3);
2270 break;
2271 default:
2272 *total += COSTS_N_INSNS (2);
2273 break;
2274 }
2275 return true;
2276 }
2277 break;
2278
2279 default:
2280 /* Reasonable default. */
e548c9df 2281 if (TARGET_A16 && mode == SImode)
07127a0a
DD
2282 *total += COSTS_N_INSNS (2);
2283 break;
2284 }
2285 return false;
2286}
2287
2288#undef TARGET_ADDRESS_COST
2289#define TARGET_ADDRESS_COST m32c_address_cost
2290static int
ef4bddc2 2291m32c_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
b413068c
OE
2292 addr_space_t as ATTRIBUTE_UNUSED,
2293 bool speed ATTRIBUTE_UNUSED)
07127a0a 2294{
80b093df 2295 int i;
07127a0a
DD
2296 /* fprintf(stderr, "\naddress_cost\n");
2297 debug_rtx(addr);*/
2298 switch (GET_CODE (addr))
2299 {
2300 case CONST_INT:
80b093df
DD
2301 i = INTVAL (addr);
2302 if (i == 0)
2303 return COSTS_N_INSNS(1);
01512446 2304 if (i > 0 && i <= 255)
80b093df 2305 return COSTS_N_INSNS(2);
01512446 2306 if (i > 0 && i <= 65535)
80b093df
DD
2307 return COSTS_N_INSNS(3);
2308 return COSTS_N_INSNS(4);
07127a0a 2309 case SYMBOL_REF:
80b093df 2310 return COSTS_N_INSNS(4);
07127a0a 2311 case REG:
80b093df
DD
2312 return COSTS_N_INSNS(1);
2313 case PLUS:
2314 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
2315 {
2316 i = INTVAL (XEXP (addr, 1));
2317 if (i == 0)
2318 return COSTS_N_INSNS(1);
01512446 2319 if (i > 0 && i <= 255)
80b093df 2320 return COSTS_N_INSNS(2);
01512446 2321 if (i > 0 && i <= 65535)
80b093df
DD
2322 return COSTS_N_INSNS(3);
2323 }
2324 return COSTS_N_INSNS(4);
07127a0a
DD
2325 default:
2326 return 0;
2327 }
2328}
2329
38b2d076
DD
2330/* Defining the Output Assembler Language */
2331
38b2d076
DD
2332/* Output of Data */
2333
2334/* We may have 24 bit sizes, which is the native address size.
2335 Currently unused, but provided for completeness. */
2336#undef TARGET_ASM_INTEGER
2337#define TARGET_ASM_INTEGER m32c_asm_integer
2338static bool
2339m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2340{
2341 switch (size)
2342 {
2343 case 3:
2344 fprintf (asm_out_file, "\t.3byte\t");
2345 output_addr_const (asm_out_file, x);
2346 fputc ('\n', asm_out_file);
2347 return true;
e9555b13
DD
2348 case 4:
2349 if (GET_CODE (x) == SYMBOL_REF)
2350 {
2351 fprintf (asm_out_file, "\t.long\t");
2352 output_addr_const (asm_out_file, x);
2353 fputc ('\n', asm_out_file);
2354 return true;
2355 }
2356 break;
38b2d076
DD
2357 }
2358 return default_assemble_integer (x, size, aligned_p);
2359}
2360
2361/* Output of Assembler Instructions */
2362
a4174ebf 2363/* We use a lookup table because the addressing modes are non-orthogonal. */
38b2d076
DD
2364
2365static struct
2366{
2367 char code;
2368 char const *pattern;
2369 char const *format;
2370}
2371const conversions[] = {
2372 { 0, "r", "0" },
2373
2374 { 0, "mr", "z[1]" },
2375 { 0, "m+ri", "3[2]" },
2376 { 0, "m+rs", "3[2]" },
5fd5d713
DD
2377 { 0, "m+^Zrs", "5[4]" },
2378 { 0, "m+^Zri", "5[4]" },
2379 { 0, "m+^Z+ris", "7+6[5]" },
2380 { 0, "m+^Srs", "5[4]" },
2381 { 0, "m+^Sri", "5[4]" },
2382 { 0, "m+^S+ris", "7+6[5]" },
38b2d076
DD
2383 { 0, "m+r+si", "4+5[2]" },
2384 { 0, "ms", "1" },
2385 { 0, "mi", "1" },
2386 { 0, "m+si", "2+3" },
2387
2388 { 0, "mmr", "[z[2]]" },
2389 { 0, "mm+ri", "[4[3]]" },
2390 { 0, "mm+rs", "[4[3]]" },
2391 { 0, "mm+r+si", "[5+6[3]]" },
2392 { 0, "mms", "[[2]]" },
2393 { 0, "mmi", "[[2]]" },
2394 { 0, "mm+si", "[4[3]]" },
2395
2396 { 0, "i", "#0" },
2397 { 0, "s", "#0" },
2398 { 0, "+si", "#1+2" },
2399 { 0, "l", "#0" },
2400
2401 { 'l', "l", "0" },
2402 { 'd', "i", "0" },
2403 { 'd', "s", "0" },
2404 { 'd', "+si", "1+2" },
2405 { 'D', "i", "0" },
2406 { 'D', "s", "0" },
2407 { 'D', "+si", "1+2" },
2408 { 'x', "i", "#0" },
2409 { 'X', "i", "#0" },
2410 { 'm', "i", "#0" },
2411 { 'b', "i", "#0" },
07127a0a 2412 { 'B', "i", "0" },
38b2d076
DD
2413 { 'p', "i", "0" },
2414
2415 { 0, 0, 0 }
2416};
2417
2418/* This is in order according to the bitfield that pushm/popm use. */
2419static char const *pushm_regs[] = {
2420 "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2421};
2422
4645179e
AS
2423/* Implements TARGET_PRINT_OPERAND. */
2424
2425#undef TARGET_PRINT_OPERAND
2426#define TARGET_PRINT_OPERAND m32c_print_operand
2427
2428static void
38b2d076
DD
2429m32c_print_operand (FILE * file, rtx x, int code)
2430{
2431 int i, j, b;
2432 const char *comma;
2433 HOST_WIDE_INT ival;
2434 int unsigned_const = 0;
ff485e71 2435 int force_sign;
38b2d076
DD
2436
2437 /* Multiplies; constants are converted to sign-extended format but
2438 we need unsigned, so 'u' and 'U' tell us what size unsigned we
2439 need. */
2440 if (code == 'u')
2441 {
2442 unsigned_const = 2;
2443 code = 0;
2444 }
2445 if (code == 'U')
2446 {
2447 unsigned_const = 1;
2448 code = 0;
2449 }
2450 /* This one is only for debugging; you can put it in a pattern to
2451 force this error. */
2452 if (code == '!')
2453 {
2454 fprintf (stderr, "dj: unreviewed pattern:");
2455 if (current_output_insn)
2456 debug_rtx (current_output_insn);
2457 gcc_unreachable ();
2458 }
2459 /* PSImode operations are either .w or .l depending on the target. */
2460 if (code == '&')
2461 {
2462 if (TARGET_A16)
2463 fprintf (file, "w");
2464 else
2465 fprintf (file, "l");
2466 return;
2467 }
2468 /* Inverted conditionals. */
2469 if (code == 'C')
2470 {
2471 switch (GET_CODE (x))
2472 {
2473 case LE:
2474 fputs ("gt", file);
2475 break;
2476 case LEU:
2477 fputs ("gtu", file);
2478 break;
2479 case LT:
2480 fputs ("ge", file);
2481 break;
2482 case LTU:
2483 fputs ("geu", file);
2484 break;
2485 case GT:
2486 fputs ("le", file);
2487 break;
2488 case GTU:
2489 fputs ("leu", file);
2490 break;
2491 case GE:
2492 fputs ("lt", file);
2493 break;
2494 case GEU:
2495 fputs ("ltu", file);
2496 break;
2497 case NE:
2498 fputs ("eq", file);
2499 break;
2500 case EQ:
2501 fputs ("ne", file);
2502 break;
2503 default:
2504 gcc_unreachable ();
2505 }
2506 return;
2507 }
2508 /* Regular conditionals. */
2509 if (code == 'c')
2510 {
2511 switch (GET_CODE (x))
2512 {
2513 case LE:
2514 fputs ("le", file);
2515 break;
2516 case LEU:
2517 fputs ("leu", file);
2518 break;
2519 case LT:
2520 fputs ("lt", file);
2521 break;
2522 case LTU:
2523 fputs ("ltu", file);
2524 break;
2525 case GT:
2526 fputs ("gt", file);
2527 break;
2528 case GTU:
2529 fputs ("gtu", file);
2530 break;
2531 case GE:
2532 fputs ("ge", file);
2533 break;
2534 case GEU:
2535 fputs ("geu", file);
2536 break;
2537 case NE:
2538 fputs ("ne", file);
2539 break;
2540 case EQ:
2541 fputs ("eq", file);
2542 break;
2543 default:
2544 gcc_unreachable ();
2545 }
2546 return;
2547 }
2548 /* Used in negsi2 to do HImode ops on the two parts of an SImode
2549 operand. */
2550 if (code == 'h' && GET_MODE (x) == SImode)
2551 {
2552 x = m32c_subreg (HImode, x, SImode, 0);
2553 code = 0;
2554 }
2555 if (code == 'H' && GET_MODE (x) == SImode)
2556 {
2557 x = m32c_subreg (HImode, x, SImode, 2);
2558 code = 0;
2559 }
07127a0a
DD
2560 if (code == 'h' && GET_MODE (x) == HImode)
2561 {
2562 x = m32c_subreg (QImode, x, HImode, 0);
2563 code = 0;
2564 }
2565 if (code == 'H' && GET_MODE (x) == HImode)
2566 {
2567 /* We can't actually represent this as an rtx. Do it here. */
2568 if (GET_CODE (x) == REG)
2569 {
2570 switch (REGNO (x))
2571 {
2572 case R0_REGNO:
2573 fputs ("r0h", file);
2574 return;
2575 case R1_REGNO:
2576 fputs ("r1h", file);
2577 return;
2578 default:
2579 gcc_unreachable();
2580 }
2581 }
2582 /* This should be a MEM. */
2583 x = m32c_subreg (QImode, x, HImode, 1);
2584 code = 0;
2585 }
2586 /* This is for BMcond, which always wants word register names. */
2587 if (code == 'h' && GET_MODE (x) == QImode)
2588 {
2589 if (GET_CODE (x) == REG)
2590 x = gen_rtx_REG (HImode, REGNO (x));
2591 code = 0;
2592 }
38b2d076
DD
2593 /* 'x' and 'X' need to be ignored for non-immediates. */
2594 if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2595 code = 0;
2596
2597 encode_pattern (x);
ff485e71 2598 force_sign = 0;
38b2d076
DD
2599 for (i = 0; conversions[i].pattern; i++)
2600 if (conversions[i].code == code
2601 && streq (conversions[i].pattern, pattern))
2602 {
2603 for (j = 0; conversions[i].format[j]; j++)
2604 /* backslash quotes the next character in the output pattern. */
2605 if (conversions[i].format[j] == '\\')
2606 {
2607 fputc (conversions[i].format[j + 1], file);
2608 j++;
2609 }
2610 /* Digits in the output pattern indicate that the
2611 corresponding RTX is to be output at that point. */
2612 else if (ISDIGIT (conversions[i].format[j]))
2613 {
2614 rtx r = patternr[conversions[i].format[j] - '0'];
2615 switch (GET_CODE (r))
2616 {
2617 case REG:
2618 fprintf (file, "%s",
2619 reg_name_with_mode (REGNO (r), GET_MODE (r)));
2620 break;
2621 case CONST_INT:
2622 switch (code)
2623 {
2624 case 'b':
07127a0a
DD
2625 case 'B':
2626 {
2627 int v = INTVAL (r);
2628 int i = (int) exact_log2 (v);
2629 if (i == -1)
2630 i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2631 if (i == -1)
2632 i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2633 /* Bit position. */
2634 fprintf (file, "%d", i);
2635 }
38b2d076
DD
2636 break;
2637 case 'x':
2638 /* Unsigned byte. */
2639 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2640 INTVAL (r) & 0xff);
2641 break;
2642 case 'X':
2643 /* Unsigned word. */
2644 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2645 INTVAL (r) & 0xffff);
2646 break;
2647 case 'p':
2648 /* pushm and popm encode a register set into a single byte. */
2649 comma = "";
2650 for (b = 7; b >= 0; b--)
2651 if (INTVAL (r) & (1 << b))
2652 {
2653 fprintf (file, "%s%s", comma, pushm_regs[b]);
2654 comma = ",";
2655 }
2656 break;
2657 case 'm':
2658 /* "Minus". Output -X */
2659 ival = (-INTVAL (r) & 0xffff);
2660 if (ival & 0x8000)
2661 ival = ival - 0x10000;
2662 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2663 break;
2664 default:
2665 ival = INTVAL (r);
2666 if (conversions[i].format[j + 1] == '[' && ival < 0)
2667 {
2668 /* We can simulate negative displacements by
2669 taking advantage of address space
2670 wrapping when the offset can span the
2671 entire address range. */
2672 rtx base =
2673 patternr[conversions[i].format[j + 2] - '0'];
2674 if (GET_CODE (base) == REG)
2675 switch (REGNO (base))
2676 {
2677 case A0_REGNO:
2678 case A1_REGNO:
2679 if (TARGET_A24)
2680 ival = 0x1000000 + ival;
2681 else
2682 ival = 0x10000 + ival;
2683 break;
2684 case SB_REGNO:
2685 if (TARGET_A16)
2686 ival = 0x10000 + ival;
2687 break;
2688 }
2689 }
2690 else if (code == 'd' && ival < 0 && j == 0)
2691 /* The "mova" opcode is used to do addition by
2692 computing displacements, but again, we need
2693 displacements to be unsigned *if* they're
2694 the only component of the displacement
2695 (i.e. no "symbol-4" type displacement). */
2696 ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2697
2698 if (conversions[i].format[j] == '0')
2699 {
2700 /* More conversions to unsigned. */
2701 if (unsigned_const == 2)
2702 ival &= 0xffff;
2703 if (unsigned_const == 1)
2704 ival &= 0xff;
2705 }
2706 if (streq (conversions[i].pattern, "mi")
2707 || streq (conversions[i].pattern, "mmi"))
2708 {
2709 /* Integers used as addresses are unsigned. */
2710 ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2711 }
ff485e71
DD
2712 if (force_sign && ival >= 0)
2713 fputc ('+', file);
38b2d076
DD
2714 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2715 break;
2716 }
2717 break;
2718 case CONST_DOUBLE:
2719 /* We don't have const_double constants. If it
2720 happens, make it obvious. */
2721 fprintf (file, "[const_double 0x%lx]",
2722 (unsigned long) CONST_DOUBLE_HIGH (r));
2723 break;
2724 case SYMBOL_REF:
2725 assemble_name (file, XSTR (r, 0));
2726 break;
2727 case LABEL_REF:
2728 output_asm_label (r);
2729 break;
2730 default:
2731 fprintf (stderr, "don't know how to print this operand:");
2732 debug_rtx (r);
2733 gcc_unreachable ();
2734 }
2735 }
2736 else
2737 {
2738 if (conversions[i].format[j] == 'z')
2739 {
2740 /* Some addressing modes *must* have a displacement,
2741 so insert a zero here if needed. */
2742 int k;
2743 for (k = j + 1; conversions[i].format[k]; k++)
2744 if (ISDIGIT (conversions[i].format[k]))
2745 {
2746 rtx reg = patternr[conversions[i].format[k] - '0'];
2747 if (GET_CODE (reg) == REG
2748 && (REGNO (reg) == SB_REGNO
2749 || REGNO (reg) == FB_REGNO
2750 || REGNO (reg) == SP_REGNO))
2751 fputc ('0', file);
2752 }
2753 continue;
2754 }
2755 /* Signed displacements off symbols need to have signs
2756 blended cleanly. */
2757 if (conversions[i].format[j] == '+'
ff485e71 2758 && (!code || code == 'D' || code == 'd')
38b2d076 2759 && ISDIGIT (conversions[i].format[j + 1])
ff485e71
DD
2760 && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2761 == CONST_INT))
2762 {
2763 force_sign = 1;
2764 continue;
2765 }
38b2d076
DD
2766 fputc (conversions[i].format[j], file);
2767 }
2768 break;
2769 }
2770 if (!conversions[i].pattern)
2771 {
2772 fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
2773 pattern);
2774 debug_rtx (x);
2775 fprintf (file, "[%c.%s]", code ? code : '-', pattern);
2776 }
2777
2778 return;
2779}
2780
4645179e
AS
2781/* Implements TARGET_PRINT_OPERAND_PUNCT_VALID_P.
2782
2783 See m32c_print_operand above for descriptions of what these do. */
2784
2785#undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
2786#define TARGET_PRINT_OPERAND_PUNCT_VALID_P m32c_print_operand_punct_valid_p
2787
2788static bool
2789m32c_print_operand_punct_valid_p (unsigned char c)
38b2d076
DD
2790{
2791 if (c == '&' || c == '!')
4645179e
AS
2792 return true;
2793
2794 return false;
38b2d076
DD
2795}
2796
4645179e
AS
2797/* Implements TARGET_PRINT_OPERAND_ADDRESS. Nothing unusual here. */
2798
2799#undef TARGET_PRINT_OPERAND_ADDRESS
2800#define TARGET_PRINT_OPERAND_ADDRESS m32c_print_operand_address
2801
2802static void
cc8ca59e 2803m32c_print_operand_address (FILE * stream, machine_mode /*mode*/, rtx address)
38b2d076 2804{
235e1fe8
NC
2805 if (GET_CODE (address) == MEM)
2806 address = XEXP (address, 0);
2807 else
2808 /* cf: gcc.dg/asm-4.c. */
2809 gcc_assert (GET_CODE (address) == REG);
2810
2811 m32c_print_operand (stream, address, 0);
38b2d076
DD
2812}
2813
2814/* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
2815 differently than general registers. */
2816void
2817m32c_output_reg_push (FILE * s, int regno)
2818{
2819 if (regno == FLG_REGNO)
2820 fprintf (s, "\tpushc\tflg\n");
2821 else
04aff2c0 2822 fprintf (s, "\tpush.%c\t%s\n",
38b2d076
DD
2823 " bwll"[reg_push_size (regno)], reg_names[regno]);
2824}
2825
2826/* Likewise for ASM_OUTPUT_REG_POP. */
2827void
2828m32c_output_reg_pop (FILE * s, int regno)
2829{
2830 if (regno == FLG_REGNO)
2831 fprintf (s, "\tpopc\tflg\n");
2832 else
04aff2c0 2833 fprintf (s, "\tpop.%c\t%s\n",
38b2d076
DD
2834 " bwll"[reg_push_size (regno)], reg_names[regno]);
2835}
2836
2837/* Defining target-specific uses of `__attribute__' */
2838
2839/* Used to simplify the logic below. Find the attributes wherever
2840 they may be. */
2841#define M32C_ATTRIBUTES(decl) \
2842 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2843 : DECL_ATTRIBUTES (decl) \
2844 ? (DECL_ATTRIBUTES (decl)) \
2845 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2846
2847/* Returns TRUE if the given tree has the "interrupt" attribute. */
2848static int
2849interrupt_p (tree node ATTRIBUTE_UNUSED)
2850{
2851 tree list = M32C_ATTRIBUTES (node);
2852 while (list)
2853 {
2854 if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
2855 return 1;
2856 list = TREE_CHAIN (list);
2857 }
65655f79
DD
2858 return fast_interrupt_p (node);
2859}
2860
2861/* Returns TRUE if the given tree has the "bank_switch" attribute. */
2862static int
2863bank_switch_p (tree node ATTRIBUTE_UNUSED)
2864{
2865 tree list = M32C_ATTRIBUTES (node);
2866 while (list)
2867 {
2868 if (is_attribute_p ("bank_switch", TREE_PURPOSE (list)))
2869 return 1;
2870 list = TREE_CHAIN (list);
2871 }
2872 return 0;
2873}
2874
2875/* Returns TRUE if the given tree has the "fast_interrupt" attribute. */
2876static int
2877fast_interrupt_p (tree node ATTRIBUTE_UNUSED)
2878{
2879 tree list = M32C_ATTRIBUTES (node);
2880 while (list)
2881 {
2882 if (is_attribute_p ("fast_interrupt", TREE_PURPOSE (list)))
2883 return 1;
2884 list = TREE_CHAIN (list);
2885 }
38b2d076
DD
2886 return 0;
2887}
2888
2889static tree
2890interrupt_handler (tree * node ATTRIBUTE_UNUSED,
2891 tree name ATTRIBUTE_UNUSED,
2892 tree args ATTRIBUTE_UNUSED,
2893 int flags ATTRIBUTE_UNUSED,
2894 bool * no_add_attrs ATTRIBUTE_UNUSED)
2895{
2896 return NULL_TREE;
2897}
2898
5abd2125
JS
2899/* Returns TRUE if given tree has the "function_vector" attribute. */
2900int
2901m32c_special_page_vector_p (tree func)
2902{
653e2568
DD
2903 tree list;
2904
5abd2125
JS
2905 if (TREE_CODE (func) != FUNCTION_DECL)
2906 return 0;
2907
653e2568 2908 list = M32C_ATTRIBUTES (func);
5abd2125
JS
2909 while (list)
2910 {
2911 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2912 return 1;
2913 list = TREE_CHAIN (list);
2914 }
2915 return 0;
2916}
2917
2918static tree
2919function_vector_handler (tree * node ATTRIBUTE_UNUSED,
2920 tree name ATTRIBUTE_UNUSED,
2921 tree args ATTRIBUTE_UNUSED,
2922 int flags ATTRIBUTE_UNUSED,
2923 bool * no_add_attrs ATTRIBUTE_UNUSED)
2924{
2925 if (TARGET_R8C)
2926 {
2927 /* The attribute is not supported for R8C target. */
2928 warning (OPT_Wattributes,
29d08eba
JM
2929 "%qE attribute is not supported for R8C target",
2930 name);
5abd2125
JS
2931 *no_add_attrs = true;
2932 }
2933 else if (TREE_CODE (*node) != FUNCTION_DECL)
2934 {
2935 /* The attribute must be applied to functions only. */
2936 warning (OPT_Wattributes,
29d08eba
JM
2937 "%qE attribute applies only to functions",
2938 name);
5abd2125
JS
2939 *no_add_attrs = true;
2940 }
2941 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
2942 {
2943 /* The argument must be a constant integer. */
2944 warning (OPT_Wattributes,
29d08eba
JM
2945 "%qE attribute argument not an integer constant",
2946 name);
5abd2125
JS
2947 *no_add_attrs = true;
2948 }
2949 else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
2950 || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
2951 {
2952 /* The argument value must be between 18 to 255. */
2953 warning (OPT_Wattributes,
29d08eba
JM
2954 "%qE attribute argument should be between 18 to 255",
2955 name);
5abd2125
JS
2956 *no_add_attrs = true;
2957 }
2958 return NULL_TREE;
2959}
2960
2961/* If the function is assigned the attribute 'function_vector', it
2962 returns the function vector number, otherwise returns zero. */
2963int
2964current_function_special_page_vector (rtx x)
2965{
2966 int num;
2967
2968 if ((GET_CODE(x) == SYMBOL_REF)
2969 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
2970 {
653e2568 2971 tree list;
5abd2125
JS
2972 tree t = SYMBOL_REF_DECL (x);
2973
2974 if (TREE_CODE (t) != FUNCTION_DECL)
2975 return 0;
2976
653e2568 2977 list = M32C_ATTRIBUTES (t);
5abd2125
JS
2978 while (list)
2979 {
2980 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2981 {
2982 num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
2983 return num;
2984 }
2985
2986 list = TREE_CHAIN (list);
2987 }
2988
2989 return 0;
2990 }
2991 else
2992 return 0;
2993}
2994
38b2d076
DD
2995#undef TARGET_ATTRIBUTE_TABLE
2996#define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
2997static const struct attribute_spec m32c_attribute_table[] = {
4849deb1
JJ
2998 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
2999 affects_type_identity, handler, exclude } */
3000 { "interrupt", 0, 0, false, false, false, false, interrupt_handler, NULL },
3001 { "bank_switch", 0, 0, false, false, false, false, interrupt_handler, NULL },
3002 { "fast_interrupt", 0, 0, false, false, false, false,
3003 interrupt_handler, NULL },
3004 { "function_vector", 1, 1, true, false, false, false,
3005 function_vector_handler, NULL },
3006 { NULL, 0, 0, false, false, false, false, NULL, NULL }
38b2d076
DD
3007};
3008
3009#undef TARGET_COMP_TYPE_ATTRIBUTES
3010#define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
3011static int
3101faab
KG
3012m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED,
3013 const_tree type2 ATTRIBUTE_UNUSED)
38b2d076
DD
3014{
3015 /* 0=incompatible 1=compatible 2=warning */
3016 return 1;
3017}
3018
3019#undef TARGET_INSERT_ATTRIBUTES
3020#define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
3021static void
3022m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
3023 tree * attr_ptr ATTRIBUTE_UNUSED)
3024{
f6052f86
DD
3025 unsigned addr;
3026 /* See if we need to make #pragma address variables volatile. */
3027
3028 if (TREE_CODE (node) == VAR_DECL)
3029 {
444d6efe 3030 const char *name = IDENTIFIER_POINTER (DECL_NAME (node));
f6052f86
DD
3031 if (m32c_get_pragma_address (name, &addr))
3032 {
3033 TREE_THIS_VOLATILE (node) = true;
3034 }
3035 }
3036}
3037
f6052f86 3038/* Hash table of pragma info. */
fb5c464a 3039static GTY(()) hash_map<nofree_string_hash, unsigned> *pragma_htab;
f6052f86
DD
3040
3041void
3042m32c_note_pragma_address (const char *varname, unsigned address)
3043{
f6052f86 3044 if (!pragma_htab)
fb5c464a 3045 pragma_htab = hash_map<nofree_string_hash, unsigned>::create_ggc (31);
f6052f86 3046
2a22f99c
TS
3047 const char *name = ggc_strdup (varname);
3048 unsigned int *slot = &pragma_htab->get_or_insert (name);
3049 *slot = address;
f6052f86
DD
3050}
3051
3052static bool
3053m32c_get_pragma_address (const char *varname, unsigned *address)
3054{
f6052f86
DD
3055 if (!pragma_htab)
3056 return false;
3057
2a22f99c
TS
3058 unsigned int *slot = pragma_htab->get (varname);
3059 if (slot)
f6052f86 3060 {
2a22f99c 3061 *address = *slot;
f6052f86
DD
3062 return true;
3063 }
3064 return false;
3065}
3066
3067void
444d6efe
JR
3068m32c_output_aligned_common (FILE *stream, tree decl ATTRIBUTE_UNUSED,
3069 const char *name,
f6052f86
DD
3070 int size, int align, int global)
3071{
3072 unsigned address;
3073
3074 if (m32c_get_pragma_address (name, &address))
3075 {
3076 /* We never output these as global. */
3077 assemble_name (stream, name);
3078 fprintf (stream, " = 0x%04x\n", address);
3079 return;
3080 }
3081 if (!global)
3082 {
3083 fprintf (stream, "\t.local\t");
3084 assemble_name (stream, name);
3085 fprintf (stream, "\n");
3086 }
3087 fprintf (stream, "\t.comm\t");
3088 assemble_name (stream, name);
3089 fprintf (stream, ",%u,%u\n", size, align / BITS_PER_UNIT);
38b2d076
DD
3090}
3091
3092/* Predicates */
3093
f9b89438 3094/* This is a list of legal subregs of hard regs. */
67fc44cb
DD
3095static const struct {
3096 unsigned char outer_mode_size;
3097 unsigned char inner_mode_size;
3098 unsigned char byte_mask;
3099 unsigned char legal_when;
f9b89438 3100 unsigned int regno;
f9b89438 3101} legal_subregs[] = {
67fc44cb
DD
3102 {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
3103 {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
3104 {1, 2, 0x01, 1, A0_REGNO},
3105 {1, 2, 0x01, 1, A1_REGNO},
f9b89438 3106
67fc44cb
DD
3107 {1, 4, 0x01, 1, A0_REGNO},
3108 {1, 4, 0x01, 1, A1_REGNO},
f9b89438 3109
67fc44cb
DD
3110 {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
3111 {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
3112 {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
3113 {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
3114 {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
f9b89438 3115
67fc44cb 3116 {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
f9b89438
DD
3117};
3118
3119/* Returns TRUE if OP is a subreg of a hard reg which we don't
f6052f86 3120 support. We also bail on MEMs with illegal addresses. */
f9b89438
DD
3121bool
3122m32c_illegal_subreg_p (rtx op)
3123{
f9b89438
DD
3124 int offset;
3125 unsigned int i;
ef4bddc2 3126 machine_mode src_mode, dest_mode;
f9b89438 3127
f6052f86
DD
3128 if (GET_CODE (op) == MEM
3129 && ! m32c_legitimate_address_p (Pmode, XEXP (op, 0), false))
3130 {
3131 return true;
3132 }
3133
f9b89438
DD
3134 if (GET_CODE (op) != SUBREG)
3135 return false;
3136
3137 dest_mode = GET_MODE (op);
3138 offset = SUBREG_BYTE (op);
3139 op = SUBREG_REG (op);
3140 src_mode = GET_MODE (op);
3141
3142 if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
3143 return false;
3144 if (GET_CODE (op) != REG)
3145 return false;
3146 if (REGNO (op) >= MEM0_REGNO)
3147 return false;
3148
3149 offset = (1 << offset);
3150
67fc44cb 3151 for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
f9b89438
DD
3152 if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
3153 && legal_subregs[i].regno == REGNO (op)
3154 && legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
3155 && legal_subregs[i].byte_mask & offset)
3156 {
3157 switch (legal_subregs[i].legal_when)
3158 {
3159 case 1:
3160 return false;
3161 case 16:
3162 if (TARGET_A16)
3163 return false;
3164 break;
3165 case 24:
3166 if (TARGET_A24)
3167 return false;
3168 break;
3169 }
3170 }
3171 return true;
3172}
3173
38b2d076
DD
3174/* Returns TRUE if we support a move between the first two operands.
3175 At the moment, we just want to discourage mem to mem moves until
3176 after reload, because reload has a hard time with our limited
3177 number of address registers, and we can get into a situation where
3178 we need three of them when we only have two. */
3179bool
ef4bddc2 3180m32c_mov_ok (rtx * operands, machine_mode mode ATTRIBUTE_UNUSED)
38b2d076
DD
3181{
3182 rtx op0 = operands[0];
3183 rtx op1 = operands[1];
3184
3185 if (TARGET_A24)
3186 return true;
3187
3188#define DEBUG_MOV_OK 0
3189#if DEBUG_MOV_OK
3190 fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
3191 debug_rtx (op0);
3192 debug_rtx (op1);
3193#endif
3194
3195 if (GET_CODE (op0) == SUBREG)
3196 op0 = XEXP (op0, 0);
3197 if (GET_CODE (op1) == SUBREG)
3198 op1 = XEXP (op1, 0);
3199
3200 if (GET_CODE (op0) == MEM
3201 && GET_CODE (op1) == MEM
3202 && ! reload_completed)
3203 {
3204#if DEBUG_MOV_OK
3205 fprintf (stderr, " - no, mem to mem\n");
3206#endif
3207 return false;
3208 }
3209
3210#if DEBUG_MOV_OK
3211 fprintf (stderr, " - ok\n");
3212#endif
3213 return true;
3214}
3215
ff485e71
DD
3216/* Returns TRUE if two consecutive HImode mov instructions, generated
3217 for moving an immediate double data to a double data type variable
3218 location, can be combined into single SImode mov instruction. */
3219bool
55356334 3220m32c_immd_dbl_mov (rtx * operands ATTRIBUTE_UNUSED,
ef4bddc2 3221 machine_mode mode ATTRIBUTE_UNUSED)
ff485e71 3222{
55356334
RS
3223 /* ??? This relied on the now-defunct MEM_SCALAR and MEM_IN_STRUCT_P
3224 flags. */
ff485e71
DD
3225 return false;
3226}
3227
38b2d076
DD
3228/* Expanders */
3229
3230/* Subregs are non-orthogonal for us, because our registers are all
3231 different sizes. */
3232static rtx
ef4bddc2
RS
3233m32c_subreg (machine_mode outer,
3234 rtx x, machine_mode inner, int byte)
38b2d076
DD
3235{
3236 int r, nr = -1;
3237
3238 /* Converting MEMs to different types that are the same size, we
3239 just rewrite them. */
3240 if (GET_CODE (x) == SUBREG
3241 && SUBREG_BYTE (x) == 0
3242 && GET_CODE (SUBREG_REG (x)) == MEM
3243 && (GET_MODE_SIZE (GET_MODE (x))
3244 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
3245 {
3246 rtx oldx = x;
3247 x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
3248 MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
3249 }
3250
3251 /* Push/pop get done as smaller push/pops. */
3252 if (GET_CODE (x) == MEM
3253 && (GET_CODE (XEXP (x, 0)) == PRE_DEC
3254 || GET_CODE (XEXP (x, 0)) == POST_INC))
3255 return gen_rtx_MEM (outer, XEXP (x, 0));
3256 if (GET_CODE (x) == SUBREG
3257 && GET_CODE (XEXP (x, 0)) == MEM
3258 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
3259 || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
3260 return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
3261
3262 if (GET_CODE (x) != REG)
146456c1
DD
3263 {
3264 rtx r = simplify_gen_subreg (outer, x, inner, byte);
3265 if (GET_CODE (r) == SUBREG
3266 && GET_CODE (x) == MEM
3267 && MEM_VOLATILE_P (x))
3268 {
3269 /* Volatile MEMs don't get simplified, but we need them to
3270 be. We are little endian, so the subreg byte is the
3271 offset. */
91140cd3 3272 r = adjust_address_nv (x, outer, byte);
146456c1
DD
3273 }
3274 return r;
3275 }
38b2d076
DD
3276
3277 r = REGNO (x);
3278 if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
3279 return simplify_gen_subreg (outer, x, inner, byte);
3280
3281 if (IS_MEM_REGNO (r))
3282 return simplify_gen_subreg (outer, x, inner, byte);
3283
3284 /* This is where the complexities of our register layout are
3285 described. */
3286 if (byte == 0)
3287 nr = r;
3288 else if (outer == HImode)
3289 {
3290 if (r == R0_REGNO && byte == 2)
3291 nr = R2_REGNO;
3292 else if (r == R0_REGNO && byte == 4)
3293 nr = R1_REGNO;
3294 else if (r == R0_REGNO && byte == 6)
3295 nr = R3_REGNO;
3296 else if (r == R1_REGNO && byte == 2)
3297 nr = R3_REGNO;
3298 else if (r == A0_REGNO && byte == 2)
3299 nr = A1_REGNO;
3300 }
3301 else if (outer == SImode)
3302 {
3303 if (r == R0_REGNO && byte == 0)
3304 nr = R0_REGNO;
3305 else if (r == R0_REGNO && byte == 4)
3306 nr = R1_REGNO;
3307 }
3308 if (nr == -1)
3309 {
3310 fprintf (stderr, "m32c_subreg %s %s %d\n",
3311 mode_name[outer], mode_name[inner], byte);
3312 debug_rtx (x);
3313 gcc_unreachable ();
3314 }
3315 return gen_rtx_REG (outer, nr);
3316}
3317
3318/* Used to emit move instructions. We split some moves,
3319 and avoid mem-mem moves. */
3320int
ef4bddc2 3321m32c_prepare_move (rtx * operands, machine_mode mode)
38b2d076 3322{
5fd5d713
DD
3323 if (far_addr_space_p (operands[0])
3324 && CONSTANT_P (operands[1]))
3325 {
3326 operands[1] = force_reg (GET_MODE (operands[0]), operands[1]);
3327 }
38b2d076
DD
3328 if (TARGET_A16 && mode == PSImode)
3329 return m32c_split_move (operands, mode, 1);
3330 if ((GET_CODE (operands[0]) == MEM)
3331 && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
3332 {
3333 rtx pmv = XEXP (operands[0], 0);
3334 rtx dest_reg = XEXP (pmv, 0);
3335 rtx dest_mod = XEXP (pmv, 1);
3336
f7df4a84 3337 emit_insn (gen_rtx_SET (dest_reg, dest_mod));
38b2d076
DD
3338 operands[0] = gen_rtx_MEM (mode, dest_reg);
3339 }
b3a13419 3340 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
38b2d076
DD
3341 operands[1] = copy_to_mode_reg (mode, operands[1]);
3342 return 0;
3343}
3344
3345#define DEBUG_SPLIT 0
3346
3347/* Returns TRUE if the given PSImode move should be split. We split
3348 for all r8c/m16c moves, since it doesn't support them, and for
3349 POP.L as we can only *push* SImode. */
3350int
3351m32c_split_psi_p (rtx * operands)
3352{
3353#if DEBUG_SPLIT
3354 fprintf (stderr, "\nm32c_split_psi_p\n");
3355 debug_rtx (operands[0]);
3356 debug_rtx (operands[1]);
3357#endif
3358 if (TARGET_A16)
3359 {
3360#if DEBUG_SPLIT
3361 fprintf (stderr, "yes, A16\n");
3362#endif
3363 return 1;
3364 }
3365 if (GET_CODE (operands[1]) == MEM
3366 && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3367 {
3368#if DEBUG_SPLIT
3369 fprintf (stderr, "yes, pop.l\n");
3370#endif
3371 return 1;
3372 }
3373#if DEBUG_SPLIT
3374 fprintf (stderr, "no, default\n");
3375#endif
3376 return 0;
3377}
3378
3379/* Split the given move. SPLIT_ALL is 0 if splitting is optional
3380 (define_expand), 1 if it is not optional (define_insn_and_split),
3381 and 3 for define_split (alternate api). */
3382int
ef4bddc2 3383m32c_split_move (rtx * operands, machine_mode mode, int split_all)
38b2d076
DD
3384{
3385 rtx s[4], d[4];
3386 int parts, si, di, rev = 0;
3387 int rv = 0, opi = 2;
ef4bddc2 3388 machine_mode submode = HImode;
38b2d076
DD
3389 rtx *ops, local_ops[10];
3390
3391 /* define_split modifies the existing operands, but the other two
3392 emit new insns. OPS is where we store the operand pairs, which
3393 we emit later. */
3394 if (split_all == 3)
3395 ops = operands;
3396 else
3397 ops = local_ops;
3398
3399 /* Else HImode. */
3400 if (mode == DImode)
3401 submode = SImode;
3402
3403 /* Before splitting mem-mem moves, force one operand into a
3404 register. */
b3a13419 3405 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
38b2d076
DD
3406 {
3407#if DEBUG0
3408 fprintf (stderr, "force_reg...\n");
3409 debug_rtx (operands[1]);
3410#endif
3411 operands[1] = force_reg (mode, operands[1]);
3412#if DEBUG0
3413 debug_rtx (operands[1]);
3414#endif
3415 }
3416
3417 parts = 2;
3418
3419#if DEBUG_SPLIT
b3a13419
ILT
3420 fprintf (stderr, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3421 split_all);
38b2d076
DD
3422 debug_rtx (operands[0]);
3423 debug_rtx (operands[1]);
3424#endif
3425
eb5f0c07
DD
3426 /* Note that split_all is not used to select the api after this
3427 point, so it's safe to set it to 3 even with define_insn. */
3428 /* None of the chips can move SI operands to sp-relative addresses,
3429 so we always split those. */
03dd17b1 3430 if (satisfies_constraint_Ss (operands[0]))
eb5f0c07
DD
3431 split_all = 3;
3432
5fd5d713
DD
3433 if (TARGET_A16
3434 && (far_addr_space_p (operands[0])
3435 || far_addr_space_p (operands[1])))
3436 split_all |= 1;
3437
38b2d076
DD
3438 /* We don't need to split these. */
3439 if (TARGET_A24
3440 && split_all != 3
3441 && (mode == SImode || mode == PSImode)
3442 && !(GET_CODE (operands[1]) == MEM
3443 && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3444 return 0;
3445
3446 /* First, enumerate the subregs we'll be dealing with. */
3447 for (si = 0; si < parts; si++)
3448 {
3449 d[si] =
3450 m32c_subreg (submode, operands[0], mode,
3451 si * GET_MODE_SIZE (submode));
3452 s[si] =
3453 m32c_subreg (submode, operands[1], mode,
3454 si * GET_MODE_SIZE (submode));
3455 }
3456
3457 /* Split pushes by emitting a sequence of smaller pushes. */
3458 if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3459 {
3460 for (si = parts - 1; si >= 0; si--)
3461 {
3462 ops[opi++] = gen_rtx_MEM (submode,
3463 gen_rtx_PRE_DEC (Pmode,
3464 gen_rtx_REG (Pmode,
3465 SP_REGNO)));
3466 ops[opi++] = s[si];
3467 }
3468
3469 rv = 1;
3470 }
3471 /* Likewise for pops. */
3472 else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3473 {
3474 for (di = 0; di < parts; di++)
3475 {
3476 ops[opi++] = d[di];
3477 ops[opi++] = gen_rtx_MEM (submode,
3478 gen_rtx_POST_INC (Pmode,
3479 gen_rtx_REG (Pmode,
3480 SP_REGNO)));
3481 }
3482 rv = 1;
3483 }
3484 else if (split_all)
3485 {
3486 /* if d[di] == s[si] for any di < si, we'll early clobber. */
3487 for (di = 0; di < parts - 1; di++)
3488 for (si = di + 1; si < parts; si++)
3489 if (reg_mentioned_p (d[di], s[si]))
3490 rev = 1;
3491
3492 if (rev)
3493 for (si = 0; si < parts; si++)
3494 {
3495 ops[opi++] = d[si];
3496 ops[opi++] = s[si];
3497 }
3498 else
3499 for (si = parts - 1; si >= 0; si--)
3500 {
3501 ops[opi++] = d[si];
3502 ops[opi++] = s[si];
3503 }
3504 rv = 1;
3505 }
3506 /* Now emit any moves we may have accumulated. */
3507 if (rv && split_all != 3)
3508 {
3509 int i;
3510 for (i = 2; i < opi; i += 2)
3511 emit_move_insn (ops[i], ops[i + 1]);
3512 }
3513 return rv;
3514}
3515
07127a0a
DD
3516/* The m32c has a number of opcodes that act like memcpy, strcmp, and
3517 the like. For the R8C they expect one of the addresses to be in
3518 R1L:An so we need to arrange for that. Otherwise, it's just a
3519 matter of picking out the operands we want and emitting the right
3520 pattern for them. All these expanders, which correspond to
3521 patterns in blkmov.md, must return nonzero if they expand the insn,
3522 or zero if they should FAIL. */
3523
3524/* This is a memset() opcode. All operands are implied, so we need to
3525 arrange for them to be in the right registers. The opcode wants
3526 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3527 the count (HI), and $2 the value (QI). */
3528int
3529m32c_expand_setmemhi(rtx *operands)
3530{
3531 rtx desta, count, val;
3532 rtx desto, counto;
3533
3534 desta = XEXP (operands[0], 0);
3535 count = operands[1];
3536 val = operands[2];
3537
3538 desto = gen_reg_rtx (Pmode);
3539 counto = gen_reg_rtx (HImode);
3540
3541 if (GET_CODE (desta) != REG
3542 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3543 desta = copy_to_mode_reg (Pmode, desta);
3544
3545 /* This looks like an arbitrary restriction, but this is by far the
3546 most common case. For counts 8..14 this actually results in
3547 smaller code with no speed penalty because the half-sized
3548 constant can be loaded with a shorter opcode. */
3549 if (GET_CODE (count) == CONST_INT
3550 && GET_CODE (val) == CONST_INT
3551 && ! (INTVAL (count) & 1)
3552 && (INTVAL (count) > 1)
3553 && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3554 {
3555 unsigned v = INTVAL (val) & 0xff;
3556 v = v | (v << 8);
3557 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3558 val = copy_to_mode_reg (HImode, GEN_INT (v));
3559 if (TARGET_A16)
3560 emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3561 else
3562 emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3563 return 1;
3564 }
3565
3566 /* This is the generalized memset() case. */
3567 if (GET_CODE (val) != REG
3568 || REGNO (val) < FIRST_PSEUDO_REGISTER)
3569 val = copy_to_mode_reg (QImode, val);
3570
3571 if (GET_CODE (count) != REG
3572 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3573 count = copy_to_mode_reg (HImode, count);
3574
3575 if (TARGET_A16)
3576 emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3577 else
3578 emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3579
3580 return 1;
3581}
3582
3583/* This is a memcpy() opcode. All operands are implied, so we need to
3584 arrange for them to be in the right registers. The opcode wants
3585 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3586 is the source (MEM:BLK), and $2 the count (HI). */
3587int
76715c32 3588m32c_expand_cpymemhi(rtx *operands)
07127a0a
DD
3589{
3590 rtx desta, srca, count;
3591 rtx desto, srco, counto;
3592
3593 desta = XEXP (operands[0], 0);
3594 srca = XEXP (operands[1], 0);
3595 count = operands[2];
3596
3597 desto = gen_reg_rtx (Pmode);
3598 srco = gen_reg_rtx (Pmode);
3599 counto = gen_reg_rtx (HImode);
3600
3601 if (GET_CODE (desta) != REG
3602 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3603 desta = copy_to_mode_reg (Pmode, desta);
3604
3605 if (GET_CODE (srca) != REG
3606 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3607 srca = copy_to_mode_reg (Pmode, srca);
3608
3609 /* Similar to setmem, but we don't need to check the value. */
3610 if (GET_CODE (count) == CONST_INT
3611 && ! (INTVAL (count) & 1)
3612 && (INTVAL (count) > 1))
3613 {
3614 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3615 if (TARGET_A16)
76715c32 3616 emit_insn (gen_cpymemhi_whi_op (desto, srco, counto, desta, srca, count));
07127a0a 3617 else
76715c32 3618 emit_insn (gen_cpymemhi_wpsi_op (desto, srco, counto, desta, srca, count));
07127a0a
DD
3619 return 1;
3620 }
3621
3622 /* This is the generalized memset() case. */
3623 if (GET_CODE (count) != REG
3624 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3625 count = copy_to_mode_reg (HImode, count);
3626
3627 if (TARGET_A16)
76715c32 3628 emit_insn (gen_cpymemhi_bhi_op (desto, srco, counto, desta, srca, count));
07127a0a 3629 else
76715c32 3630 emit_insn (gen_cpymemhi_bpsi_op (desto, srco, counto, desta, srca, count));
07127a0a
DD
3631
3632 return 1;
3633}
3634
3635/* This is a stpcpy() opcode. $0 is the destination (MEM:BLK) after
3636 the copy, which should point to the NUL at the end of the string,
3637 $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3638 Since our opcode leaves the destination pointing *after* the NUL,
3639 we must emit an adjustment. */
3640int
3641m32c_expand_movstr(rtx *operands)
3642{
3643 rtx desta, srca;
3644 rtx desto, srco;
3645
3646 desta = XEXP (operands[1], 0);
3647 srca = XEXP (operands[2], 0);
3648
3649 desto = gen_reg_rtx (Pmode);
3650 srco = gen_reg_rtx (Pmode);
3651
3652 if (GET_CODE (desta) != REG
3653 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3654 desta = copy_to_mode_reg (Pmode, desta);
3655
3656 if (GET_CODE (srca) != REG
3657 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3658 srca = copy_to_mode_reg (Pmode, srca);
3659
3660 emit_insn (gen_movstr_op (desto, srco, desta, srca));
3661 /* desto ends up being a1, which allows this type of add through MOVA. */
3662 emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
3663
3664 return 1;
3665}
3666
3667/* This is a strcmp() opcode. $0 is the destination (HI) which holds
3668 <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3669 $2 is the other (MEM:BLK). We must do the comparison, and then
3670 convert the flags to a signed integer result. */
3671int
3672m32c_expand_cmpstr(rtx *operands)
3673{
3674 rtx src1a, src2a;
3675
3676 src1a = XEXP (operands[1], 0);
3677 src2a = XEXP (operands[2], 0);
3678
3679 if (GET_CODE (src1a) != REG
3680 || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
3681 src1a = copy_to_mode_reg (Pmode, src1a);
3682
3683 if (GET_CODE (src2a) != REG
3684 || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
3685 src2a = copy_to_mode_reg (Pmode, src2a);
3686
3687 emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
3688 emit_insn (gen_cond_to_int (operands[0]));
3689
3690 return 1;
3691}
3692
3693
23fed240
DD
3694typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
3695
3696static shift_gen_func
3697shift_gen_func_for (int mode, int code)
3698{
3699#define GFF(m,c,f) if (mode == m && code == c) return f
3700 GFF(QImode, ASHIFT, gen_ashlqi3_i);
3701 GFF(QImode, ASHIFTRT, gen_ashrqi3_i);
3702 GFF(QImode, LSHIFTRT, gen_lshrqi3_i);
3703 GFF(HImode, ASHIFT, gen_ashlhi3_i);
3704 GFF(HImode, ASHIFTRT, gen_ashrhi3_i);
3705 GFF(HImode, LSHIFTRT, gen_lshrhi3_i);
3706 GFF(PSImode, ASHIFT, gen_ashlpsi3_i);
3707 GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
3708 GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
3709 GFF(SImode, ASHIFT, TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
3710 GFF(SImode, ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
3711 GFF(SImode, LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
3712#undef GFF
07127a0a 3713 gcc_unreachable ();
23fed240
DD
3714}
3715
38b2d076
DD
3716/* The m32c only has one shift, but it takes a signed count. GCC
3717 doesn't want this, so we fake it by negating any shift count when
07127a0a
DD
3718 we're pretending to shift the other way. Also, the shift count is
3719 limited to -8..8. It's slightly better to use two shifts for 9..15
3720 than to load the count into r1h, so we do that too. */
38b2d076 3721int
23fed240 3722m32c_prepare_shift (rtx * operands, int scale, int shift_code)
38b2d076 3723{
ef4bddc2 3724 machine_mode mode = GET_MODE (operands[0]);
23fed240 3725 shift_gen_func func = shift_gen_func_for (mode, shift_code);
38b2d076 3726 rtx temp;
23fed240
DD
3727
3728 if (GET_CODE (operands[2]) == CONST_INT)
38b2d076 3729 {
23fed240
DD
3730 int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
3731 int count = INTVAL (operands[2]) * scale;
3732
3733 while (count > maxc)
3734 {
3735 temp = gen_reg_rtx (mode);
3736 emit_insn (func (temp, operands[1], GEN_INT (maxc)));
3737 operands[1] = temp;
3738 count -= maxc;
3739 }
3740 while (count < -maxc)
3741 {
3742 temp = gen_reg_rtx (mode);
3743 emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
3744 operands[1] = temp;
3745 count += maxc;
3746 }
3747 emit_insn (func (operands[0], operands[1], GEN_INT (count)));
3748 return 1;
38b2d076 3749 }
2e160056
DD
3750
3751 temp = gen_reg_rtx (QImode);
38b2d076 3752 if (scale < 0)
2e160056
DD
3753 /* The pattern has a NEG that corresponds to this. */
3754 emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
3755 else if (TARGET_A16 && mode == SImode)
3756 /* We do this because the code below may modify this, we don't
3757 want to modify the origin of this value. */
3758 emit_move_insn (temp, operands[2]);
38b2d076 3759 else
2e160056 3760 /* We'll only use it for the shift, no point emitting a move. */
38b2d076 3761 temp = operands[2];
2e160056 3762
16659fcf 3763 if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
2e160056
DD
3764 {
3765 /* The m16c has a limit of -16..16 for SI shifts, even when the
3766 shift count is in a register. Since there are so many targets
3767 of these shifts, it's better to expand the RTL here than to
3768 call a helper function.
3769
3770 The resulting code looks something like this:
3771
3772 cmp.b r1h,-16
3773 jge.b 1f
3774 shl.l -16,dest
3775 add.b r1h,16
3776 1f: cmp.b r1h,16
3777 jle.b 1f
3778 shl.l 16,dest
3779 sub.b r1h,16
3780 1f: shl.l r1h,dest
3781
3782 We take advantage of the fact that "negative" shifts are
3783 undefined to skip one of the comparisons. */
3784
3785 rtx count;
9b2ea071 3786 rtx tempvar;
e60365d3 3787 rtx_insn *insn;
2e160056 3788
16659fcf
DD
3789 emit_move_insn (operands[0], operands[1]);
3790
2e160056 3791 count = temp;
9b2ea071 3792 rtx_code_label *label = gen_label_rtx ();
2e160056
DD
3793 LABEL_NUSES (label) ++;
3794
833bf445
DD
3795 tempvar = gen_reg_rtx (mode);
3796
2e160056
DD
3797 if (shift_code == ASHIFT)
3798 {
3799 /* This is a left shift. We only need check positive counts. */
3800 emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
3801 count, GEN_INT (16), label));
833bf445
DD
3802 emit_insn (func (tempvar, operands[0], GEN_INT (8)));
3803 emit_insn (func (operands[0], tempvar, GEN_INT (8)));
2e160056
DD
3804 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
3805 emit_label_after (label, insn);
3806 }
3807 else
3808 {
3809 /* This is a right shift. We only need check negative counts. */
3810 emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
3811 count, GEN_INT (-16), label));
833bf445
DD
3812 emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
3813 emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
2e160056
DD
3814 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
3815 emit_label_after (label, insn);
3816 }
16659fcf
DD
3817 operands[1] = operands[0];
3818 emit_insn (func (operands[0], operands[0], count));
3819 return 1;
2e160056
DD
3820 }
3821
38b2d076
DD
3822 operands[2] = temp;
3823 return 0;
3824}
3825
12ea2512
DD
3826/* The m32c has a limited range of operations that work on PSImode
3827 values; we have to expand to SI, do the math, and truncate back to
3828 PSI. Yes, this is expensive, but hopefully gcc will learn to avoid
3829 those cases. */
3830void
3831m32c_expand_neg_mulpsi3 (rtx * operands)
3832{
3833 /* operands: a = b * i */
3834 rtx temp1; /* b as SI */
07127a0a
DD
3835 rtx scale /* i as SI */;
3836 rtx temp2; /* a*b as SI */
12ea2512
DD
3837
3838 temp1 = gen_reg_rtx (SImode);
3839 temp2 = gen_reg_rtx (SImode);
07127a0a
DD
3840 if (GET_CODE (operands[2]) != CONST_INT)
3841 {
3842 scale = gen_reg_rtx (SImode);
3843 emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
3844 }
3845 else
3846 scale = copy_to_mode_reg (SImode, operands[2]);
12ea2512
DD
3847
3848 emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
07127a0a
DD
3849 temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
3850 emit_insn (gen_truncsipsi2 (operands[0], temp2));
12ea2512
DD
3851}
3852
38b2d076
DD
3853/* Pattern Output Functions */
3854
07127a0a
DD
3855int
3856m32c_expand_movcc (rtx *operands)
3857{
3858 rtx rel = operands[1];
0166ff05 3859
07127a0a
DD
3860 if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
3861 return 1;
3862 if (GET_CODE (operands[2]) != CONST_INT
3863 || GET_CODE (operands[3]) != CONST_INT)
3864 return 1;
07127a0a
DD
3865 if (GET_CODE (rel) == NE)
3866 {
3867 rtx tmp = operands[2];
3868 operands[2] = operands[3];
3869 operands[3] = tmp;
f90b7a5a 3870 rel = gen_rtx_EQ (GET_MODE (rel), XEXP (rel, 0), XEXP (rel, 1));
07127a0a 3871 }
0166ff05 3872
0166ff05
DD
3873 emit_move_insn (operands[0],
3874 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
f90b7a5a 3875 rel,
0166ff05
DD
3876 operands[2],
3877 operands[3]));
07127a0a
DD
3878 return 0;
3879}
3880
3881/* Used for the "insv" pattern. Return nonzero to fail, else done. */
3882int
3883m32c_expand_insv (rtx *operands)
3884{
3885 rtx op0, src0, p;
3886 int mask;
3887
3888 if (INTVAL (operands[1]) != 1)
3889 return 1;
3890
9cb96754
N
3891 /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
3892 if (GET_CODE (operands[3]) != CONST_INT)
3893 return 1;
3894 if (INTVAL (operands[3]) != 0
3895 && INTVAL (operands[3]) != 1
3896 && INTVAL (operands[3]) != -1)
3897 return 1;
3898
07127a0a
DD
3899 mask = 1 << INTVAL (operands[2]);
3900
3901 op0 = operands[0];
3902 if (GET_CODE (op0) == SUBREG
3903 && SUBREG_BYTE (op0) == 0)
3904 {
3905 rtx sub = SUBREG_REG (op0);
3906 if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
3907 op0 = sub;
3908 }
3909
b3a13419 3910 if (!can_create_pseudo_p ()
07127a0a
DD
3911 || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
3912 src0 = op0;
3913 else
3914 {
3915 src0 = gen_reg_rtx (GET_MODE (op0));
3916 emit_move_insn (src0, op0);
3917 }
3918
3919 if (GET_MODE (op0) == HImode
3920 && INTVAL (operands[2]) >= 8
444d6efe 3921 && GET_CODE (op0) == MEM)
07127a0a
DD
3922 {
3923 /* We are little endian. */
0a81f074
RS
3924 rtx new_mem = gen_rtx_MEM (QImode, plus_constant (Pmode,
3925 XEXP (op0, 0), 1));
07127a0a
DD
3926 MEM_COPY_ATTRIBUTES (new_mem, op0);
3927 mask >>= 8;
3928 }
3929
8e4edce7
DD
3930 /* First, we generate a mask with the correct polarity. If we are
3931 storing a zero, we want an AND mask, so invert it. */
3932 if (INTVAL (operands[3]) == 0)
07127a0a 3933 {
16659fcf 3934 /* Storing a zero, use an AND mask */
07127a0a
DD
3935 if (GET_MODE (op0) == HImode)
3936 mask ^= 0xffff;
3937 else
3938 mask ^= 0xff;
3939 }
8e4edce7
DD
3940 /* Now we need to properly sign-extend the mask in case we need to
3941 fall back to an AND or OR opcode. */
07127a0a
DD
3942 if (GET_MODE (op0) == HImode)
3943 {
3944 if (mask & 0x8000)
3945 mask -= 0x10000;
3946 }
3947 else
3948 {
3949 if (mask & 0x80)
3950 mask -= 0x100;
3951 }
3952
3953 switch ( (INTVAL (operands[3]) ? 4 : 0)
3954 + ((GET_MODE (op0) == HImode) ? 2 : 0)
3955 + (TARGET_A24 ? 1 : 0))
3956 {
3957 case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
3958 case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
3959 case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
3960 case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
3961 case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
3962 case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
3963 case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
3964 case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
653e2568 3965 default: p = NULL_RTX; break; /* Not reached, but silences a warning. */
07127a0a
DD
3966 }
3967
3968 emit_insn (p);
3969 return 0;
3970}
3971
3972const char *
3973m32c_scc_pattern(rtx *operands, RTX_CODE code)
3974{
3975 static char buf[30];
3976 if (GET_CODE (operands[0]) == REG
3977 && REGNO (operands[0]) == R0_REGNO)
3978 {
3979 if (code == EQ)
3980 return "stzx\t#1,#0,r0l";
3981 if (code == NE)
3982 return "stzx\t#0,#1,r0l";
3983 }
3984 sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
3985 return buf;
3986}
3987
5abd2125
JS
3988/* Encode symbol attributes of a SYMBOL_REF into its
3989 SYMBOL_REF_FLAGS. */
3990static void
3991m32c_encode_section_info (tree decl, rtx rtl, int first)
3992{
3993 int extra_flags = 0;
3994
3995 default_encode_section_info (decl, rtl, first);
3996 if (TREE_CODE (decl) == FUNCTION_DECL
3997 && m32c_special_page_vector_p (decl))
3998
3999 extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
4000
4001 if (extra_flags)
4002 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
4003}
4004
38b2d076
DD
4005/* Returns TRUE if the current function is a leaf, and thus we can
4006 determine which registers an interrupt function really needs to
4007 save. The logic below is mostly about finding the insn sequence
4008 that's the function, versus any sequence that might be open for the
4009 current insn. */
4010static int
4011m32c_leaf_function_p (void)
4012{
38b2d076
DD
4013 int rv;
4014
614d5bd8 4015 push_topmost_sequence ();
38b2d076 4016 rv = leaf_function_p ();
614d5bd8 4017 pop_topmost_sequence ();
38b2d076
DD
4018 return rv;
4019}
4020
4021/* Returns TRUE if the current function needs to use the ENTER/EXIT
4022 opcodes. If the function doesn't need the frame base or stack
4023 pointer, it can use the simpler RTS opcode. */
4024static bool
4025m32c_function_needs_enter (void)
4026{
b32d5189 4027 rtx_insn *insn;
38b2d076
DD
4028 rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
4029 rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
4030
614d5bd8
AM
4031 for (insn = get_topmost_sequence ()->first; insn; insn = NEXT_INSN (insn))
4032 if (NONDEBUG_INSN_P (insn))
4033 {
4034 if (reg_mentioned_p (sp, insn))
4035 return true;
4036 if (reg_mentioned_p (fb, insn))
4037 return true;
4038 }
38b2d076
DD
4039 return false;
4040}
4041
4042/* Mark all the subexpressions of the PARALLEL rtx PAR as
4043 frame-related. Return PAR.
4044
4045 dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
4046 PARALLEL rtx other than the first if they do not have the
4047 FRAME_RELATED flag set on them. So this function is handy for
4048 marking up 'enter' instructions. */
4049static rtx
4050m32c_all_frame_related (rtx par)
4051{
4052 int len = XVECLEN (par, 0);
4053 int i;
4054
4055 for (i = 0; i < len; i++)
4056 F (XVECEXP (par, 0, i));
4057
4058 return par;
4059}
4060
4061/* Emits the prologue. See the frame layout comment earlier in this
4062 file. We can reserve up to 256 bytes with the ENTER opcode, beyond
4063 that we manually update sp. */
4064void
4065m32c_emit_prologue (void)
4066{
4067 int frame_size, extra_frame_size = 0, reg_save_size;
4068 int complex_prologue = 0;
4069
4070 cfun->machine->is_leaf = m32c_leaf_function_p ();
4071 if (interrupt_p (cfun->decl))
4072 {
4073 cfun->machine->is_interrupt = 1;
4074 complex_prologue = 1;
4075 }
65655f79
DD
4076 else if (bank_switch_p (cfun->decl))
4077 warning (OPT_Wattributes,
4078 "%<bank_switch%> has no effect on non-interrupt functions");
38b2d076
DD
4079
4080 reg_save_size = m32c_pushm_popm (PP_justcount);
4081
4082 if (interrupt_p (cfun->decl))
65655f79
DD
4083 {
4084 if (bank_switch_p (cfun->decl))
4085 emit_insn (gen_fset_b ());
4086 else if (cfun->machine->intr_pushm)
4087 emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
4088 }
38b2d076
DD
4089
4090 frame_size =
4091 m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
4092 if (frame_size == 0
38b2d076
DD
4093 && !m32c_function_needs_enter ())
4094 cfun->machine->use_rts = 1;
4095
ed1332ee
NC
4096 if (flag_stack_usage_info)
4097 current_function_static_stack_size = frame_size;
4098
38b2d076
DD
4099 if (frame_size > 254)
4100 {
4101 extra_frame_size = frame_size - 254;
4102 frame_size = 254;
4103 }
4104 if (cfun->machine->use_rts == 0)
4105 F (emit_insn (m32c_all_frame_related
4106 (TARGET_A16
fa9fd28a
RIL
4107 ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
4108 : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
38b2d076
DD
4109
4110 if (extra_frame_size)
4111 {
4112 complex_prologue = 1;
4113 if (TARGET_A16)
4114 F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
4115 gen_rtx_REG (HImode, SP_REGNO),
4116 GEN_INT (-extra_frame_size))));
4117 else
4118 F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
4119 gen_rtx_REG (PSImode, SP_REGNO),
4120 GEN_INT (-extra_frame_size))));
4121 }
4122
4123 complex_prologue += m32c_pushm_popm (PP_pushm);
4124
4125 /* This just emits a comment into the .s file for debugging. */
4126 if (complex_prologue)
4127 emit_insn (gen_prologue_end ());
4128}
4129
4130/* Likewise, for the epilogue. The only exception is that, for
4131 interrupts, we must manually unwind the frame as the REIT opcode
4132 doesn't do that. */
4133void
4134m32c_emit_epilogue (void)
4135{
f0679612
DD
4136 int popm_count = m32c_pushm_popm (PP_justcount);
4137
38b2d076 4138 /* This just emits a comment into the .s file for debugging. */
f0679612 4139 if (popm_count > 0 || cfun->machine->is_interrupt)
38b2d076
DD
4140 emit_insn (gen_epilogue_start ());
4141
f0679612
DD
4142 if (popm_count > 0)
4143 m32c_pushm_popm (PP_popm);
38b2d076
DD
4144
4145 if (cfun->machine->is_interrupt)
4146 {
ef4bddc2 4147 machine_mode spmode = TARGET_A16 ? HImode : PSImode;
38b2d076 4148
65655f79
DD
4149 /* REIT clears B flag and restores $fp for us, but we still
4150 have to fix up the stack. USE_RTS just means we didn't
4151 emit ENTER. */
4152 if (!cfun->machine->use_rts)
4153 {
4154 emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
4155 gen_rtx_REG (spmode, FP_REGNO));
4156 emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
4157 gen_rtx_REG (spmode, A0_REGNO));
4158 /* We can't just add this to the POPM because it would be in
4159 the wrong order, and wouldn't fix the stack if we're bank
4160 switching. */
4161 if (TARGET_A16)
4162 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
4163 else
4164 emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
4165 }
4166 if (!bank_switch_p (cfun->decl) && cfun->machine->intr_pushm)
4167 emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
4168
402f2db8
DD
4169 /* The FREIT (Fast REturn from InTerrupt) instruction should be
4170 generated only for M32C/M32CM targets (generate the REIT
4171 instruction otherwise). */
65655f79 4172 if (fast_interrupt_p (cfun->decl))
402f2db8
DD
4173 {
4174 /* Check if fast_attribute is set for M32C or M32CM. */
4175 if (TARGET_A24)
4176 {
4177 emit_jump_insn (gen_epilogue_freit ());
4178 }
4179 /* If fast_interrupt attribute is set for an R8C or M16C
4180 target ignore this attribute and generated REIT
4181 instruction. */
4182 else
4183 {
4184 warning (OPT_Wattributes,
4185 "%<fast_interrupt%> attribute directive ignored");
4186 emit_jump_insn (gen_epilogue_reit_16 ());
4187 }
4188 }
65655f79 4189 else if (TARGET_A16)
0e0642aa
RIL
4190 emit_jump_insn (gen_epilogue_reit_16 ());
4191 else
4192 emit_jump_insn (gen_epilogue_reit_24 ());
38b2d076
DD
4193 }
4194 else if (cfun->machine->use_rts)
4195 emit_jump_insn (gen_epilogue_rts ());
0e0642aa
RIL
4196 else if (TARGET_A16)
4197 emit_jump_insn (gen_epilogue_exitd_16 ());
38b2d076 4198 else
0e0642aa 4199 emit_jump_insn (gen_epilogue_exitd_24 ());
38b2d076
DD
4200}
4201
4202void
4203m32c_emit_eh_epilogue (rtx ret_addr)
4204{
4205 /* R0[R2] has the stack adjustment. R1[R3] has the address to
4206 return to. We have to fudge the stack, pop everything, pop SP
4207 (fudged), and return (fudged). This is actually easier to do in
4208 assembler, so punt to libgcc. */
4209 emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
c41c1387 4210 /* emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
38b2d076
DD
4211}
4212
16659fcf
DD
4213/* Indicate which flags must be properly set for a given conditional. */
4214static int
4215flags_needed_for_conditional (rtx cond)
4216{
4217 switch (GET_CODE (cond))
4218 {
4219 case LE:
4220 case GT:
4221 return FLAGS_OSZ;
4222 case LEU:
4223 case GTU:
4224 return FLAGS_ZC;
4225 case LT:
4226 case GE:
4227 return FLAGS_OS;
4228 case LTU:
4229 case GEU:
4230 return FLAGS_C;
4231 case EQ:
4232 case NE:
4233 return FLAGS_Z;
4234 default:
4235 return FLAGS_N;
4236 }
4237}
4238
4239#define DEBUG_CMP 0
4240
4241/* Returns true if a compare insn is redundant because it would only
4242 set flags that are already set correctly. */
4243static bool
84034c69 4244m32c_compare_redundant (rtx_insn *cmp, rtx *operands)
16659fcf
DD
4245{
4246 int flags_needed;
4247 int pflags;
84034c69
DM
4248 rtx_insn *prev;
4249 rtx pp, next;
444d6efe 4250 rtx op0, op1;
16659fcf
DD
4251#if DEBUG_CMP
4252 int prev_icode, i;
4253#endif
4254
4255 op0 = operands[0];
4256 op1 = operands[1];
16659fcf
DD
4257
4258#if DEBUG_CMP
4259 fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
4260 debug_rtx(cmp);
4261 for (i=0; i<2; i++)
4262 {
4263 fprintf(stderr, "operands[%d] = ", i);
4264 debug_rtx(operands[i]);
4265 }
4266#endif
4267
4268 next = next_nonnote_insn (cmp);
4269 if (!next || !INSN_P (next))
4270 {
4271#if DEBUG_CMP
4272 fprintf(stderr, "compare not followed by insn\n");
4273 debug_rtx(next);
4274#endif
4275 return false;
4276 }
4277 if (GET_CODE (PATTERN (next)) == SET
4278 && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
4279 {
4280 next = XEXP (XEXP (PATTERN (next), 1), 0);
4281 }
4282 else if (GET_CODE (PATTERN (next)) == SET)
4283 {
4284 /* If this is a conditional, flags_needed will be something
4285 other than FLAGS_N, which we test below. */
4286 next = XEXP (PATTERN (next), 1);
4287 }
4288 else
4289 {
4290#if DEBUG_CMP
4291 fprintf(stderr, "compare not followed by conditional\n");
4292 debug_rtx(next);
4293#endif
4294 return false;
4295 }
4296#if DEBUG_CMP
4297 fprintf(stderr, "conditional is: ");
4298 debug_rtx(next);
4299#endif
4300
4301 flags_needed = flags_needed_for_conditional (next);
4302 if (flags_needed == FLAGS_N)
4303 {
4304#if DEBUG_CMP
4305 fprintf(stderr, "compare not followed by conditional\n");
4306 debug_rtx(next);
4307#endif
4308 return false;
4309 }
4310
4311 /* Compare doesn't set overflow and carry the same way that
4312 arithmetic instructions do, so we can't replace those. */
4313 if (flags_needed & FLAGS_OC)
4314 return false;
4315
4316 prev = cmp;
4317 do {
4318 prev = prev_nonnote_insn (prev);
4319 if (!prev)
4320 {
4321#if DEBUG_CMP
4322 fprintf(stderr, "No previous insn.\n");
4323#endif
4324 return false;
4325 }
4326 if (!INSN_P (prev))
4327 {
4328#if DEBUG_CMP
4329 fprintf(stderr, "Previous insn is a non-insn.\n");
4330#endif
4331 return false;
4332 }
4333 pp = PATTERN (prev);
4334 if (GET_CODE (pp) != SET)
4335 {
4336#if DEBUG_CMP
4337 fprintf(stderr, "Previous insn is not a SET.\n");
4338#endif
4339 return false;
4340 }
4341 pflags = get_attr_flags (prev);
4342
4343 /* Looking up attributes of previous insns corrupted the recog
4344 tables. */
4345 INSN_UID (cmp) = -1;
4346 recog (PATTERN (cmp), cmp, 0);
4347
4348 if (pflags == FLAGS_N
4349 && reg_mentioned_p (op0, pp))
4350 {
4351#if DEBUG_CMP
4352 fprintf(stderr, "intermediate non-flags insn uses op:\n");
4353 debug_rtx(prev);
4354#endif
4355 return false;
4356 }
b3c5a409
DD
4357
4358 /* Check for comparisons against memory - between volatiles and
4359 aliases, we just can't risk this one. */
4360 if (GET_CODE (operands[0]) == MEM
4361 || GET_CODE (operands[0]) == MEM)
4362 {
4363#if DEBUG_CMP
4364 fprintf(stderr, "comparisons with memory:\n");
4365 debug_rtx(prev);
4366#endif
4367 return false;
4368 }
4369
4370 /* Check for PREV changing a register that's used to compute a
4371 value in CMP, even if it doesn't otherwise change flags. */
4372 if (GET_CODE (operands[0]) == REG
4373 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[0]))
4374 {
4375#if DEBUG_CMP
4376 fprintf(stderr, "sub-value affected, op0:\n");
4377 debug_rtx(prev);
4378#endif
4379 return false;
4380 }
4381 if (GET_CODE (operands[1]) == REG
4382 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[1]))
4383 {
4384#if DEBUG_CMP
4385 fprintf(stderr, "sub-value affected, op1:\n");
4386 debug_rtx(prev);
4387#endif
4388 return false;
4389 }
4390
16659fcf
DD
4391 } while (pflags == FLAGS_N);
4392#if DEBUG_CMP
4393 fprintf(stderr, "previous flag-setting insn:\n");
4394 debug_rtx(prev);
4395 debug_rtx(pp);
4396#endif
4397
4398 if (GET_CODE (pp) == SET
4399 && GET_CODE (XEXP (pp, 0)) == REG
4400 && REGNO (XEXP (pp, 0)) == FLG_REGNO
4401 && GET_CODE (XEXP (pp, 1)) == COMPARE)
4402 {
4403 /* Adjacent cbranches must have the same operands to be
4404 redundant. */
4405 rtx pop0 = XEXP (XEXP (pp, 1), 0);
4406 rtx pop1 = XEXP (XEXP (pp, 1), 1);
4407#if DEBUG_CMP
4408 fprintf(stderr, "adjacent cbranches\n");
4409 debug_rtx(pop0);
4410 debug_rtx(pop1);
4411#endif
4412 if (rtx_equal_p (op0, pop0)
4413 && rtx_equal_p (op1, pop1))
4414 return true;
4415#if DEBUG_CMP
4416 fprintf(stderr, "prev cmp not same\n");
4417#endif
4418 return false;
4419 }
4420
4421 /* Else the previous insn must be a SET, with either the source or
4422 dest equal to operands[0], and operands[1] must be zero. */
4423
4424 if (!rtx_equal_p (op1, const0_rtx))
4425 {
4426#if DEBUG_CMP
4427 fprintf(stderr, "operands[1] not const0_rtx\n");
4428#endif
4429 return false;
4430 }
4431 if (GET_CODE (pp) != SET)
4432 {
4433#if DEBUG_CMP
4434 fprintf (stderr, "pp not set\n");
4435#endif
4436 return false;
4437 }
4438 if (!rtx_equal_p (op0, SET_SRC (pp))
4439 && !rtx_equal_p (op0, SET_DEST (pp)))
4440 {
4441#if DEBUG_CMP
4442 fprintf(stderr, "operands[0] not found in set\n");
4443#endif
4444 return false;
4445 }
4446
4447#if DEBUG_CMP
4448 fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4449#endif
4450 if ((pflags & flags_needed) == flags_needed)
4451 return true;
4452
4453 return false;
4454}
4455
4456/* Return the pattern for a compare. This will be commented out if
4457 the compare is redundant, else a normal pattern is returned. Thus,
4458 the assembler output says where the compare would have been. */
4459char *
84034c69 4460m32c_output_compare (rtx_insn *insn, rtx *operands)
16659fcf 4461{
0a2aaacc 4462 static char templ[] = ";cmp.b\t%1,%0";
16659fcf
DD
4463 /* ^ 5 */
4464
0a2aaacc 4465 templ[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
16659fcf
DD
4466 if (m32c_compare_redundant (insn, operands))
4467 {
4468#if DEBUG_CMP
4469 fprintf(stderr, "cbranch: cmp not needed\n");
4470#endif
0a2aaacc 4471 return templ;
16659fcf
DD
4472 }
4473
4474#if DEBUG_CMP
b3c5a409 4475 fprintf(stderr, "cbranch: cmp needed: `%s'\n", templ + 1);
16659fcf 4476#endif
0a2aaacc 4477 return templ + 1;
16659fcf
DD
4478}
4479
5abd2125
JS
4480#undef TARGET_ENCODE_SECTION_INFO
4481#define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4482
b52b1749
AS
4483/* If the frame pointer isn't used, we detect it manually. But the
4484 stack pointer doesn't have as flexible addressing as the frame
4485 pointer, so we always assume we have it. */
4486
4487#undef TARGET_FRAME_POINTER_REQUIRED
4488#define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
4489
c43f4279
RS
4490#undef TARGET_HARD_REGNO_NREGS
4491#define TARGET_HARD_REGNO_NREGS m32c_hard_regno_nregs
f939c3e6
RS
4492#undef TARGET_HARD_REGNO_MODE_OK
4493#define TARGET_HARD_REGNO_MODE_OK m32c_hard_regno_mode_ok
99e1629f
RS
4494#undef TARGET_MODES_TIEABLE_P
4495#define TARGET_MODES_TIEABLE_P m32c_modes_tieable_p
f939c3e6 4496
0d803030
RS
4497#undef TARGET_CAN_CHANGE_MODE_CLASS
4498#define TARGET_CAN_CHANGE_MODE_CLASS m32c_can_change_mode_class
4499
38b2d076
DD
4500/* The Global `targetm' Variable. */
4501
4502struct gcc_target targetm = TARGET_INITIALIZER;
4503
4504#include "gt-m32c.h"