]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/m32c/m32c.c
Update copyright years.
[thirdparty/gcc.git] / gcc / config / m32c / m32c.c
CommitLineData
38b2d076 1/* Target Code for R8C/M16C/M32C
8d9254fc 2 Copyright (C) 2005-2020 Free Software Foundation, Inc.
38b2d076
DD
3 Contributed by Red Hat.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
2f83c7d6 9 by the Free Software Foundation; either version 3, or (at your
38b2d076
DD
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
2f83c7d6
NC
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
38b2d076 20
8fcc61f8
RS
21#define IN_TARGET_CODE 1
22
38b2d076
DD
23#include "config.h"
24#include "system.h"
25#include "coretypes.h"
c7131fb2 26#include "backend.h"
e11c4407 27#include "target.h"
38b2d076 28#include "rtl.h"
e11c4407 29#include "tree.h"
2643d17f
ML
30#include "stringpool.h"
31#include "attribs.h"
c7131fb2 32#include "df.h"
4d0cdd0c 33#include "memmodel.h"
e11c4407
AM
34#include "tm_p.h"
35#include "optabs.h"
38b2d076 36#include "regs.h"
e11c4407
AM
37#include "emit-rtl.h"
38#include "recog.h"
39#include "diagnostic-core.h"
38b2d076
DD
40#include "output.h"
41#include "insn-attr.h"
42#include "flags.h"
38b2d076 43#include "reload.h"
d8a2d370
DN
44#include "stor-layout.h"
45#include "varasm.h"
46#include "calls.h"
36566b39 47#include "explow.h"
38b2d076 48#include "expr.h"
03dd17b1 49#include "tm-constrs.h"
9b2b7279 50#include "builtins.h"
38b2d076 51
994c5d85 52/* This file should be included last. */
d58627a0
RS
53#include "target-def.h"
54
38b2d076
DD
55/* Prototypes */
56
57/* Used by m32c_pushm_popm. */
58typedef enum
59{
60 PP_pushm,
61 PP_popm,
62 PP_justcount
63} Push_Pop_Type;
64
65655f79 65static bool m32c_function_needs_enter (void);
38b2d076 66static tree interrupt_handler (tree *, tree, tree, int, bool *);
5abd2125 67static tree function_vector_handler (tree *, tree, tree, int, bool *);
38b2d076 68static int interrupt_p (tree node);
65655f79
DD
69static int bank_switch_p (tree node);
70static int fast_interrupt_p (tree node);
71static int interrupt_p (tree node);
38b2d076 72static bool m32c_asm_integer (rtx, unsigned int, int);
3101faab 73static int m32c_comp_type_attributes (const_tree, const_tree);
38b2d076
DD
74static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
75static struct machine_function *m32c_init_machine_status (void);
76static void m32c_insert_attributes (tree, tree *);
ef4bddc2
RS
77static bool m32c_legitimate_address_p (machine_mode, rtx, bool);
78static bool m32c_addr_space_legitimate_address_p (machine_mode, rtx, bool, addr_space_t);
6783fdb7 79static rtx m32c_function_arg (cumulative_args_t, const function_arg_info &);
52090e4d
RS
80static bool m32c_pass_by_reference (cumulative_args_t,
81 const function_arg_info &);
6930c98c
RS
82static void m32c_function_arg_advance (cumulative_args_t,
83 const function_arg_info &);
ef4bddc2 84static unsigned int m32c_function_arg_boundary (machine_mode, const_tree);
38b2d076 85static int m32c_pushm_popm (Push_Pop_Type);
d5cc9181 86static bool m32c_strict_argument_naming (cumulative_args_t);
38b2d076 87static rtx m32c_struct_value_rtx (tree, int);
ef4bddc2 88static rtx m32c_subreg (machine_mode, rtx, machine_mode, int);
38b2d076 89static int need_to_save (int);
2a31793e 90static rtx m32c_function_value (const_tree, const_tree, bool);
ef4bddc2 91static rtx m32c_libcall_value (machine_mode, const_rtx);
2a31793e 92
f6052f86
DD
93/* Returns true if an address is specified, else false. */
94static bool m32c_get_pragma_address (const char *varname, unsigned *addr);
95
f939c3e6
RS
96static bool m32c_hard_regno_mode_ok (unsigned int, machine_mode);
97
5abd2125 98#define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
38b2d076
DD
99
100#define streq(a,b) (strcmp ((a), (b)) == 0)
101
102/* Internal support routines */
103
104/* Debugging statements are tagged with DEBUG0 only so that they can
105 be easily enabled individually, by replacing the '0' with '1' as
106 needed. */
107#define DEBUG0 0
108#define DEBUG1 1
109
110#if DEBUG0
f75e07bc 111#include "print-tree.h"
38b2d076
DD
112/* This is needed by some of the commented-out debug statements
113 below. */
114static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
115#endif
116static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
117
118/* These are all to support encode_pattern(). */
119static char pattern[30], *patternp;
120static GTY(()) rtx patternr[30];
121#define RTX_IS(x) (streq (pattern, x))
122
123/* Some macros to simplify the logic throughout this file. */
124#define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
125#define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
126
127#define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
128#define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
129
5fd5d713
DD
130static int
131far_addr_space_p (rtx x)
132{
133 if (GET_CODE (x) != MEM)
134 return 0;
135#if DEBUG0
136 fprintf(stderr, "\033[35mfar_addr_space: "); debug_rtx(x);
137 fprintf(stderr, " = %d\033[0m\n", MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR);
138#endif
139 return MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR;
140}
141
38b2d076
DD
142/* We do most RTX matching by converting the RTX into a string, and
143 using string compares. This vastly simplifies the logic in many of
144 the functions in this file.
145
146 On exit, pattern[] has the encoded string (use RTX_IS("...") to
147 compare it) and patternr[] has pointers to the nodes in the RTX
148 corresponding to each character in the encoded string. The latter
149 is mostly used by print_operand().
150
151 Unrecognized patterns have '?' in them; this shows up when the
152 assembler complains about syntax errors.
153*/
154
155static void
156encode_pattern_1 (rtx x)
157{
158 int i;
159
160 if (patternp == pattern + sizeof (pattern) - 2)
161 {
162 patternp[-1] = '?';
163 return;
164 }
165
166 patternr[patternp - pattern] = x;
167
168 switch (GET_CODE (x))
169 {
170 case REG:
171 *patternp++ = 'r';
172 break;
173 case SUBREG:
174 if (GET_MODE_SIZE (GET_MODE (x)) !=
175 GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
176 *patternp++ = 'S';
45d898e4
DD
177 if (GET_MODE (x) == PSImode
178 && GET_CODE (XEXP (x, 0)) == REG)
179 *patternp++ = 'S';
38b2d076
DD
180 encode_pattern_1 (XEXP (x, 0));
181 break;
182 case MEM:
183 *patternp++ = 'm';
0c57f4bf 184 /* FALLTHRU */
38b2d076
DD
185 case CONST:
186 encode_pattern_1 (XEXP (x, 0));
187 break;
5fd5d713
DD
188 case SIGN_EXTEND:
189 *patternp++ = '^';
190 *patternp++ = 'S';
191 encode_pattern_1 (XEXP (x, 0));
192 break;
193 case ZERO_EXTEND:
194 *patternp++ = '^';
195 *patternp++ = 'Z';
196 encode_pattern_1 (XEXP (x, 0));
197 break;
38b2d076
DD
198 case PLUS:
199 *patternp++ = '+';
200 encode_pattern_1 (XEXP (x, 0));
201 encode_pattern_1 (XEXP (x, 1));
202 break;
203 case PRE_DEC:
204 *patternp++ = '>';
205 encode_pattern_1 (XEXP (x, 0));
206 break;
207 case POST_INC:
208 *patternp++ = '<';
209 encode_pattern_1 (XEXP (x, 0));
210 break;
211 case LO_SUM:
212 *patternp++ = 'L';
213 encode_pattern_1 (XEXP (x, 0));
214 encode_pattern_1 (XEXP (x, 1));
215 break;
216 case HIGH:
217 *patternp++ = 'H';
218 encode_pattern_1 (XEXP (x, 0));
219 break;
220 case SYMBOL_REF:
221 *patternp++ = 's';
222 break;
223 case LABEL_REF:
224 *patternp++ = 'l';
225 break;
226 case CODE_LABEL:
227 *patternp++ = 'c';
228 break;
229 case CONST_INT:
230 case CONST_DOUBLE:
231 *patternp++ = 'i';
232 break;
233 case UNSPEC:
234 *patternp++ = 'u';
235 *patternp++ = '0' + XCINT (x, 1, UNSPEC);
236 for (i = 0; i < XVECLEN (x, 0); i++)
237 encode_pattern_1 (XVECEXP (x, 0, i));
238 break;
239 case USE:
240 *patternp++ = 'U';
241 break;
242 case PARALLEL:
243 *patternp++ = '|';
244 for (i = 0; i < XVECLEN (x, 0); i++)
245 encode_pattern_1 (XVECEXP (x, 0, i));
246 break;
247 case EXPR_LIST:
248 *patternp++ = 'E';
249 encode_pattern_1 (XEXP (x, 0));
250 if (XEXP (x, 1))
251 encode_pattern_1 (XEXP (x, 1));
252 break;
253 default:
254 *patternp++ = '?';
255#if DEBUG0
256 fprintf (stderr, "can't encode pattern %s\n",
257 GET_RTX_NAME (GET_CODE (x)));
258 debug_rtx (x);
38b2d076
DD
259#endif
260 break;
261 }
262}
263
264static void
265encode_pattern (rtx x)
266{
267 patternp = pattern;
268 encode_pattern_1 (x);
269 *patternp = 0;
270}
271
272/* Since register names indicate the mode they're used in, we need a
273 way to determine which name to refer to the register with. Called
274 by print_operand(). */
275
276static const char *
ef4bddc2 277reg_name_with_mode (int regno, machine_mode mode)
38b2d076
DD
278{
279 int mlen = GET_MODE_SIZE (mode);
280 if (regno == R0_REGNO && mlen == 1)
281 return "r0l";
282 if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
283 return "r2r0";
284 if (regno == R0_REGNO && mlen == 6)
285 return "r2r1r0";
286 if (regno == R0_REGNO && mlen == 8)
287 return "r3r1r2r0";
288 if (regno == R1_REGNO && mlen == 1)
289 return "r1l";
290 if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
291 return "r3r1";
292 if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
293 return "a1a0";
294 return reg_names[regno];
295}
296
297/* How many bytes a register uses on stack when it's pushed. We need
298 to know this because the push opcode needs to explicitly indicate
299 the size of the register, even though the name of the register
300 already tells it that. Used by m32c_output_reg_{push,pop}, which
301 is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}. */
302
303static int
304reg_push_size (int regno)
305{
306 switch (regno)
307 {
308 case R0_REGNO:
309 case R1_REGNO:
310 return 2;
311 case R2_REGNO:
312 case R3_REGNO:
313 case FLG_REGNO:
314 return 2;
315 case A0_REGNO:
316 case A1_REGNO:
317 case SB_REGNO:
318 case FB_REGNO:
319 case SP_REGNO:
320 if (TARGET_A16)
321 return 2;
322 else
323 return 3;
324 default:
325 gcc_unreachable ();
326 }
327}
328
38b2d076
DD
329/* Given two register classes, find the largest intersection between
330 them. If there is no intersection, return RETURNED_IF_EMPTY
331 instead. */
35bdbc69
AS
332static reg_class_t
333reduce_class (reg_class_t original_class, reg_class_t limiting_class,
334 reg_class_t returned_if_empty)
38b2d076 335{
35bdbc69
AS
336 HARD_REG_SET cc;
337 int i;
338 reg_class_t best = NO_REGS;
339 unsigned int best_size = 0;
38b2d076
DD
340
341 if (original_class == limiting_class)
342 return original_class;
343
dc333d8f 344 cc = reg_class_contents[original_class] & reg_class_contents[limiting_class];
38b2d076 345
38b2d076
DD
346 for (i = 0; i < LIM_REG_CLASSES; i++)
347 {
35bdbc69
AS
348 if (hard_reg_set_subset_p (reg_class_contents[i], cc))
349 if (best_size < reg_class_size[i])
38b2d076 350 {
35bdbc69
AS
351 best = (reg_class_t) i;
352 best_size = reg_class_size[i];
38b2d076
DD
353 }
354
355 }
356 if (best == NO_REGS)
357 return returned_if_empty;
358 return best;
359}
360
38b2d076
DD
361/* Used by m32c_register_move_cost to determine if a move is
362 impossibly expensive. */
0e607518 363static bool
ef4bddc2 364class_can_hold_mode (reg_class_t rclass, machine_mode mode)
38b2d076
DD
365{
366 /* Cache the results: 0=untested 1=no 2=yes */
367 static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
0e607518
AS
368
369 if (results[(int) rclass][mode] == 0)
38b2d076 370 {
0e607518 371 int r;
0a2aaacc 372 results[rclass][mode] = 1;
38b2d076 373 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
0e607518 374 if (in_hard_reg_set_p (reg_class_contents[(int) rclass], mode, r)
f939c3e6 375 && m32c_hard_regno_mode_ok (r, mode))
38b2d076 376 {
0e607518
AS
377 results[rclass][mode] = 2;
378 break;
38b2d076
DD
379 }
380 }
0e607518 381
38b2d076
DD
382#if DEBUG0
383 fprintf (stderr, "class %s can hold %s? %s\n",
0e607518 384 class_names[(int) rclass], mode_name[mode],
0a2aaacc 385 (results[rclass][mode] == 2) ? "yes" : "no");
38b2d076 386#endif
0e607518 387 return results[(int) rclass][mode] == 2;
38b2d076
DD
388}
389
390/* Run-time Target Specification. */
391
392/* Memregs are memory locations that gcc treats like general
393 registers, as there are a limited number of true registers and the
394 m32c families can use memory in most places that registers can be
395 used.
396
397 However, since memory accesses are more expensive than registers,
398 we allow the user to limit the number of memregs available, in
399 order to try to persuade gcc to try harder to use real registers.
400
45b86625 401 Memregs are provided by lib1funcs.S.
38b2d076
DD
402*/
403
38b2d076
DD
404int ok_to_change_target_memregs = TRUE;
405
f28f2337
AS
406/* Implements TARGET_OPTION_OVERRIDE. */
407
408#undef TARGET_OPTION_OVERRIDE
409#define TARGET_OPTION_OVERRIDE m32c_option_override
410
411static void
412m32c_option_override (void)
38b2d076 413{
f28f2337 414 /* We limit memregs to 0..16, and provide a default. */
bbfc9a8c 415 if (global_options_set.x_target_memregs)
38b2d076
DD
416 {
417 if (target_memregs < 0 || target_memregs > 16)
904f3daa 418 error ("invalid target memregs value %<%d%>", target_memregs);
38b2d076
DD
419 }
420 else
07127a0a 421 target_memregs = 16;
18b80268
DD
422
423 if (TARGET_A24)
424 flag_ivopts = 0;
0685e770
DD
425
426 /* This target defaults to strict volatile bitfields. */
36acc1a2 427 if (flag_strict_volatile_bitfields < 0 && abi_version_at_least(2))
0685e770 428 flag_strict_volatile_bitfields = 1;
d123bf41
DD
429
430 /* r8c/m16c have no 16-bit indirect call, so thunks are involved.
431 This is always worse than an absolute call. */
432 if (TARGET_A16)
433 flag_no_function_cse = 1;
a4403164
DD
434
435 /* This wants to put insns between compares and their jumps. */
436 /* FIXME: The right solution is to properly trace the flags register
437 values, but that is too much work for stage 4. */
438 flag_combine_stack_adjustments = 0;
d123bf41
DD
439}
440
441#undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
442#define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m32c_override_options_after_change
443
444static void
445m32c_override_options_after_change (void)
446{
447 if (TARGET_A16)
448 flag_no_function_cse = 1;
38b2d076
DD
449}
450
451/* Defining data structures for per-function information */
452
453/* The usual; we set up our machine_function data. */
454static struct machine_function *
455m32c_init_machine_status (void)
456{
766090c2 457 return ggc_cleared_alloc<machine_function> ();
38b2d076
DD
458}
459
460/* Implements INIT_EXPANDERS. We just set up to call the above
461 function. */
462void
463m32c_init_expanders (void)
464{
465 init_machine_status = m32c_init_machine_status;
466}
467
468/* Storage Layout */
469
38b2d076
DD
470/* Register Basics */
471
472/* Basic Characteristics of Registers */
473
474/* Whether a mode fits in a register is complex enough to warrant a
475 table. */
476static struct
477{
478 char qi_regs;
479 char hi_regs;
480 char pi_regs;
481 char si_regs;
482 char di_regs;
483} nregs_table[FIRST_PSEUDO_REGISTER] =
484{
485 { 1, 1, 2, 2, 4 }, /* r0 */
486 { 0, 1, 0, 0, 0 }, /* r2 */
487 { 1, 1, 2, 2, 0 }, /* r1 */
488 { 0, 1, 0, 0, 0 }, /* r3 */
489 { 0, 1, 1, 0, 0 }, /* a0 */
490 { 0, 1, 1, 0, 0 }, /* a1 */
491 { 0, 1, 1, 0, 0 }, /* sb */
492 { 0, 1, 1, 0, 0 }, /* fb */
493 { 0, 1, 1, 0, 0 }, /* sp */
494 { 1, 1, 1, 0, 0 }, /* pc */
495 { 0, 0, 0, 0, 0 }, /* fl */
496 { 1, 1, 1, 0, 0 }, /* ap */
497 { 1, 1, 2, 2, 4 }, /* mem0 */
498 { 1, 1, 2, 2, 4 }, /* mem1 */
499 { 1, 1, 2, 2, 4 }, /* mem2 */
500 { 1, 1, 2, 2, 4 }, /* mem3 */
501 { 1, 1, 2, 2, 4 }, /* mem4 */
502 { 1, 1, 2, 2, 0 }, /* mem5 */
503 { 1, 1, 2, 2, 0 }, /* mem6 */
504 { 1, 1, 0, 0, 0 }, /* mem7 */
505};
506
5efd84c5
NF
507/* Implements TARGET_CONDITIONAL_REGISTER_USAGE. We adjust the number
508 of available memregs, and select which registers need to be preserved
38b2d076
DD
509 across calls based on the chip family. */
510
5efd84c5
NF
511#undef TARGET_CONDITIONAL_REGISTER_USAGE
512#define TARGET_CONDITIONAL_REGISTER_USAGE m32c_conditional_register_usage
d6d17ae7 513void
38b2d076
DD
514m32c_conditional_register_usage (void)
515{
38b2d076
DD
516 int i;
517
01512446 518 if (target_memregs >= 0 && target_memregs <= 16)
38b2d076
DD
519 {
520 /* The command line option is bytes, but our "registers" are
521 16-bit words. */
65655f79 522 for (i = (target_memregs+1)/2; i < 8; i++)
38b2d076
DD
523 {
524 fixed_regs[MEM0_REGNO + i] = 1;
525 CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
526 }
527 }
528
529 /* M32CM and M32C preserve more registers across function calls. */
530 if (TARGET_A24)
531 {
532 call_used_regs[R1_REGNO] = 0;
533 call_used_regs[R2_REGNO] = 0;
534 call_used_regs[R3_REGNO] = 0;
535 call_used_regs[A0_REGNO] = 0;
536 call_used_regs[A1_REGNO] = 0;
537 }
538}
539
540/* How Values Fit in Registers */
541
c43f4279 542/* Implements TARGET_HARD_REGNO_NREGS. This is complicated by the fact that
38b2d076
DD
543 different registers are different sizes from each other, *and* may
544 be different sizes in different chip families. */
c43f4279
RS
545static unsigned int
546m32c_hard_regno_nregs_1 (unsigned int regno, machine_mode mode)
38b2d076
DD
547{
548 if (regno == FLG_REGNO && mode == CCmode)
549 return 1;
550 if (regno >= FIRST_PSEUDO_REGISTER)
551 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
552
553 if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
554 return (GET_MODE_SIZE (mode) + 1) / 2;
555
556 if (GET_MODE_SIZE (mode) <= 1)
557 return nregs_table[regno].qi_regs;
558 if (GET_MODE_SIZE (mode) <= 2)
559 return nregs_table[regno].hi_regs;
5fd5d713 560 if (regno == A0_REGNO && mode == SImode && TARGET_A16)
38b2d076
DD
561 return 2;
562 if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
563 return nregs_table[regno].pi_regs;
564 if (GET_MODE_SIZE (mode) <= 4)
565 return nregs_table[regno].si_regs;
566 if (GET_MODE_SIZE (mode) <= 8)
567 return nregs_table[regno].di_regs;
568 return 0;
569}
570
c43f4279
RS
571static unsigned int
572m32c_hard_regno_nregs (unsigned int regno, machine_mode mode)
b8a669d0 573{
c43f4279 574 unsigned int rv = m32c_hard_regno_nregs_1 (regno, mode);
b8a669d0
DD
575 return rv ? rv : 1;
576}
577
f939c3e6 578/* Implement TARGET_HARD_REGNO_MODE_OK. The above function does the work
38b2d076 579 already; just test its return value. */
f939c3e6
RS
580static bool
581m32c_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
38b2d076 582{
b8a669d0 583 return m32c_hard_regno_nregs_1 (regno, mode) != 0;
38b2d076
DD
584}
585
99e1629f 586/* Implement TARGET_MODES_TIEABLE_P. In general, modes aren't tieable since
38b2d076
DD
587 registers are all different sizes. However, since most modes are
588 bigger than our registers anyway, it's easier to implement this
589 function that way, leaving QImode as the only unique case. */
99e1629f 590static bool
ef4bddc2 591m32c_modes_tieable_p (machine_mode m1, machine_mode m2)
38b2d076
DD
592{
593 if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
594 return 1;
595
07127a0a 596#if 0
38b2d076
DD
597 if (m1 == QImode || m2 == QImode)
598 return 0;
07127a0a 599#endif
38b2d076
DD
600
601 return 1;
602}
603
604/* Register Classes */
605
606/* Implements REGNO_REG_CLASS. */
444d6efe 607enum reg_class
38b2d076
DD
608m32c_regno_reg_class (int regno)
609{
610 switch (regno)
611 {
612 case R0_REGNO:
613 return R0_REGS;
614 case R1_REGNO:
615 return R1_REGS;
616 case R2_REGNO:
617 return R2_REGS;
618 case R3_REGNO:
619 return R3_REGS;
620 case A0_REGNO:
22843acd 621 return A0_REGS;
38b2d076 622 case A1_REGNO:
22843acd 623 return A1_REGS;
38b2d076
DD
624 case SB_REGNO:
625 return SB_REGS;
626 case FB_REGNO:
627 return FB_REGS;
628 case SP_REGNO:
629 return SP_REGS;
630 case FLG_REGNO:
631 return FLG_REGS;
632 default:
633 if (IS_MEM_REGNO (regno))
634 return MEM_REGS;
635 return ALL_REGS;
636 }
637}
638
38b2d076
DD
639/* Implements REGNO_OK_FOR_BASE_P. */
640int
641m32c_regno_ok_for_base_p (int regno)
642{
643 if (regno == A0_REGNO
644 || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
645 return 1;
646 return 0;
647}
648
b05933f5 649/* Implements TARGET_PREFERRED_RELOAD_CLASS. In general, prefer general
38b2d076 650 registers of the appropriate size. */
b05933f5
AS
651
652#undef TARGET_PREFERRED_RELOAD_CLASS
653#define TARGET_PREFERRED_RELOAD_CLASS m32c_preferred_reload_class
654
655static reg_class_t
656m32c_preferred_reload_class (rtx x, reg_class_t rclass)
38b2d076 657{
b05933f5 658 reg_class_t newclass = rclass;
38b2d076 659
f75e07bc 660#if DEBUG0
38b2d076
DD
661 fprintf (stderr, "\npreferred_reload_class for %s is ",
662 class_names[rclass]);
663#endif
664 if (rclass == NO_REGS)
665 rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
666
0e607518 667 if (reg_classes_intersect_p (rclass, CR_REGS))
38b2d076
DD
668 {
669 switch (GET_MODE (x))
670 {
4e10a5a7 671 case E_QImode:
38b2d076
DD
672 newclass = HL_REGS;
673 break;
674 default:
675 /* newclass = HI_REGS; */
676 break;
677 }
678 }
679
680 else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
681 newclass = SI_REGS;
682 else if (GET_MODE_SIZE (GET_MODE (x)) > 4
b05933f5 683 && ! reg_class_subset_p (R03_REGS, rclass))
38b2d076
DD
684 newclass = DI_REGS;
685
686 rclass = reduce_class (rclass, newclass, rclass);
687
688 if (GET_MODE (x) == QImode)
689 rclass = reduce_class (rclass, HL_REGS, rclass);
690
f75e07bc 691#if DEBUG0
38b2d076
DD
692 fprintf (stderr, "%s\n", class_names[rclass]);
693 debug_rtx (x);
694
695 if (GET_CODE (x) == MEM
696 && GET_CODE (XEXP (x, 0)) == PLUS
697 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
698 fprintf (stderr, "Glorm!\n");
699#endif
700 return rclass;
701}
702
b05933f5
AS
703/* Implements TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
704
705#undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
706#define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS m32c_preferred_output_reload_class
707
708static reg_class_t
709m32c_preferred_output_reload_class (rtx x, reg_class_t rclass)
38b2d076
DD
710{
711 return m32c_preferred_reload_class (x, rclass);
712}
713
714/* Implements LIMIT_RELOAD_CLASS. We basically want to avoid using
715 address registers for reloads since they're needed for address
716 reloads. */
717int
ef4bddc2 718m32c_limit_reload_class (machine_mode mode, int rclass)
38b2d076 719{
f75e07bc 720#if DEBUG0
38b2d076
DD
721 fprintf (stderr, "limit_reload_class for %s: %s ->",
722 mode_name[mode], class_names[rclass]);
723#endif
724
725 if (mode == QImode)
726 rclass = reduce_class (rclass, HL_REGS, rclass);
727 else if (mode == HImode)
728 rclass = reduce_class (rclass, HI_REGS, rclass);
729 else if (mode == SImode)
730 rclass = reduce_class (rclass, SI_REGS, rclass);
731
732 if (rclass != A_REGS)
733 rclass = reduce_class (rclass, DI_REGS, rclass);
734
f75e07bc 735#if DEBUG0
38b2d076
DD
736 fprintf (stderr, " %s\n", class_names[rclass]);
737#endif
738 return rclass;
739}
740
741/* Implements SECONDARY_RELOAD_CLASS. QImode have to be reloaded in
742 r0 or r1, as those are the only real QImode registers. CR regs get
743 reloaded through appropriately sized general or address
744 registers. */
745int
ef4bddc2 746m32c_secondary_reload_class (int rclass, machine_mode mode, rtx x)
38b2d076
DD
747{
748 int cc = class_contents[rclass][0];
749#if DEBUG0
750 fprintf (stderr, "\nsecondary reload class %s %s\n",
751 class_names[rclass], mode_name[mode]);
752 debug_rtx (x);
753#endif
754 if (mode == QImode
755 && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
756 return QI_REGS;
0e607518 757 if (reg_classes_intersect_p (rclass, CR_REGS)
38b2d076
DD
758 && GET_CODE (x) == REG
759 && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
13a23442 760 return (TARGET_A16 || mode == HImode) ? HI_REGS : A_REGS;
38b2d076
DD
761 return NO_REGS;
762}
763
184866c5 764/* Implements TARGET_CLASS_LIKELY_SPILLED_P. A_REGS is needed for address
38b2d076 765 reloads. */
184866c5
AS
766
767#undef TARGET_CLASS_LIKELY_SPILLED_P
768#define TARGET_CLASS_LIKELY_SPILLED_P m32c_class_likely_spilled_p
769
770static bool
771m32c_class_likely_spilled_p (reg_class_t regclass)
38b2d076
DD
772{
773 if (regclass == A_REGS)
184866c5
AS
774 return true;
775
776 return (reg_class_size[(int) regclass] == 1);
38b2d076
DD
777}
778
c4831cff 779/* Implements TARGET_CLASS_MAX_NREGS. We calculate this according to its
38b2d076
DD
780 documented meaning, to avoid potential inconsistencies with actual
781 class definitions. */
c4831cff
AS
782
783#undef TARGET_CLASS_MAX_NREGS
784#define TARGET_CLASS_MAX_NREGS m32c_class_max_nregs
785
786static unsigned char
ef4bddc2 787m32c_class_max_nregs (reg_class_t regclass, machine_mode mode)
38b2d076 788{
c4831cff
AS
789 int rn;
790 unsigned char max = 0;
38b2d076
DD
791
792 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
c4831cff 793 if (TEST_HARD_REG_BIT (reg_class_contents[(int) regclass], rn))
38b2d076 794 {
c4831cff 795 unsigned char n = m32c_hard_regno_nregs (rn, mode);
38b2d076
DD
796 if (max < n)
797 max = n;
798 }
799 return max;
800}
801
0d803030 802/* Implements TARGET_CAN_CHANGE_MODE_CLASS. Only r0 and r1 can change to
38b2d076
DD
803 QI (r0l, r1l) because the chip doesn't support QI ops on other
804 registers (well, it does on a0/a1 but if we let gcc do that, reload
805 suffers). Otherwise, we allow changes to larger modes. */
0d803030
RS
806static bool
807m32c_can_change_mode_class (machine_mode from,
808 machine_mode to, reg_class_t rclass)
38b2d076 809{
db9c8397 810 int rn;
38b2d076 811#if DEBUG0
0d803030 812 fprintf (stderr, "can change from %s to %s in %s\n",
38b2d076
DD
813 mode_name[from], mode_name[to], class_names[rclass]);
814#endif
815
db9c8397
DD
816 /* If the larger mode isn't allowed in any of these registers, we
817 can't allow the change. */
818 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
819 if (class_contents[rclass][0] & (1 << rn))
f939c3e6 820 if (! m32c_hard_regno_mode_ok (rn, to))
0d803030 821 return false;
db9c8397 822
38b2d076 823 if (to == QImode)
0d803030 824 return (class_contents[rclass][0] & 0x1ffa) == 0;
38b2d076
DD
825
826 if (class_contents[rclass][0] & 0x0005 /* r0, r1 */
827 && GET_MODE_SIZE (from) > 1)
0d803030 828 return true;
38b2d076 829 if (GET_MODE_SIZE (from) > 2) /* all other regs */
0d803030 830 return true;
38b2d076 831
0d803030 832 return false;
38b2d076
DD
833}
834
835/* Helpers for the rest of the file. */
836/* TRUE if the rtx is a REG rtx for the given register. */
837#define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
838 && REGNO (rtx) == regno)
839/* TRUE if the rtx is a pseudo - specifically, one we can use as a
840 base register in address calculations (hence the "strict"
841 argument). */
842#define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
843 && (REGNO (rtx) == AP_REGNO \
844 || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
845
5fd5d713
DD
846#define A0_OR_PSEUDO(x) (IS_REG(x, A0_REGNO) || REGNO (x) >= FIRST_PSEUDO_REGISTER)
847
777e635f 848/* Implements matching for constraints (see next function too). 'S' is
38b2d076
DD
849 for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
850 call return values. */
03dd17b1
NF
851bool
852m32c_matches_constraint_p (rtx value, int constraint)
38b2d076
DD
853{
854 encode_pattern (value);
5fd5d713 855
03dd17b1
NF
856 switch (constraint) {
857 case CONSTRAINT_SF:
858 return (far_addr_space_p (value)
859 && ((RTX_IS ("mr")
860 && A0_OR_PSEUDO (patternr[1])
861 && GET_MODE (patternr[1]) == SImode)
862 || (RTX_IS ("m+^Sri")
863 && A0_OR_PSEUDO (patternr[4])
864 && GET_MODE (patternr[4]) == HImode)
865 || (RTX_IS ("m+^Srs")
866 && A0_OR_PSEUDO (patternr[4])
867 && GET_MODE (patternr[4]) == HImode)
868 || (RTX_IS ("m+^S+ris")
869 && A0_OR_PSEUDO (patternr[5])
870 && GET_MODE (patternr[5]) == HImode)
871 || RTX_IS ("ms")));
872 case CONSTRAINT_Sd:
38b2d076
DD
873 {
874 /* This is the common "src/dest" address */
875 rtx r;
876 if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
03dd17b1 877 return true;
38b2d076 878 if (RTX_IS ("ms") || RTX_IS ("m+si"))
03dd17b1 879 return true;
07127a0a
DD
880 if (RTX_IS ("m++rii"))
881 {
882 if (REGNO (patternr[3]) == FB_REGNO
883 && INTVAL (patternr[4]) == 0)
03dd17b1 884 return true;
07127a0a 885 }
38b2d076
DD
886 if (RTX_IS ("mr"))
887 r = patternr[1];
888 else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
889 r = patternr[2];
890 else
03dd17b1 891 return false;
38b2d076 892 if (REGNO (r) == SP_REGNO)
03dd17b1 893 return false;
38b2d076
DD
894 return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
895 }
03dd17b1 896 case CONSTRAINT_Sa:
38b2d076
DD
897 {
898 rtx r;
899 if (RTX_IS ("mr"))
900 r = patternr[1];
901 else if (RTX_IS ("m+ri"))
902 r = patternr[2];
903 else
03dd17b1 904 return false;
38b2d076
DD
905 return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
906 }
03dd17b1
NF
907 case CONSTRAINT_Si:
908 return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
909 case CONSTRAINT_Ss:
910 return ((RTX_IS ("mr")
911 && (IS_REG (patternr[1], SP_REGNO)))
912 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
913 case CONSTRAINT_Sf:
914 return ((RTX_IS ("mr")
915 && (IS_REG (patternr[1], FB_REGNO)))
916 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
917 case CONSTRAINT_Sb:
918 return ((RTX_IS ("mr")
919 && (IS_REG (patternr[1], SB_REGNO)))
920 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
921 case CONSTRAINT_Sp:
922 /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
923 return (RTX_IS ("mi")
924 && !(INTVAL (patternr[1]) & ~0x1fff));
925 case CONSTRAINT_S1:
926 return r1h_operand (value, QImode);
927 case CONSTRAINT_Rpa:
38b2d076 928 return GET_CODE (value) == PARALLEL;
03dd17b1
NF
929 default:
930 return false;
931 }
38b2d076
DD
932}
933
934/* STACK AND CALLING */
935
936/* Frame Layout */
937
938/* Implements RETURN_ADDR_RTX. Note that R8C and M16C push 24 bits
939 (yes, THREE bytes) onto the stack for the return address, but we
940 don't support pointers bigger than 16 bits on those chips. This
941 will likely wreak havoc with exception unwinding. FIXME. */
942rtx
943m32c_return_addr_rtx (int count)
944{
ef4bddc2 945 machine_mode mode;
38b2d076
DD
946 int offset;
947 rtx ra_mem;
948
949 if (count)
950 return NULL_RTX;
951 /* we want 2[$fb] */
952
953 if (TARGET_A24)
954 {
80b093df
DD
955 /* It's four bytes */
956 mode = PSImode;
38b2d076
DD
957 offset = 4;
958 }
959 else
960 {
961 /* FIXME: it's really 3 bytes */
962 mode = HImode;
963 offset = 2;
964 }
965
966 ra_mem =
0a81f074
RS
967 gen_rtx_MEM (mode, plus_constant (Pmode, gen_rtx_REG (Pmode, FP_REGNO),
968 offset));
38b2d076
DD
969 return copy_to_mode_reg (mode, ra_mem);
970}
971
972/* Implements INCOMING_RETURN_ADDR_RTX. See comment above. */
973rtx
974m32c_incoming_return_addr_rtx (void)
975{
976 /* we want [sp] */
977 return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
978}
979
980/* Exception Handling Support */
981
982/* Implements EH_RETURN_DATA_REGNO. Choose registers able to hold
983 pointers. */
984int
985m32c_eh_return_data_regno (int n)
986{
987 switch (n)
988 {
989 case 0:
45d898e4 990 return MEM0_REGNO;
38b2d076 991 case 1:
45d898e4 992 return MEM0_REGNO+4;
38b2d076
DD
993 default:
994 return INVALID_REGNUM;
995 }
996}
997
998/* Implements EH_RETURN_STACKADJ_RTX. Saved and used later in
999 m32c_emit_eh_epilogue. */
1000rtx
1001m32c_eh_return_stackadj_rtx (void)
1002{
1003 if (!cfun->machine->eh_stack_adjust)
1004 {
1005 rtx sa;
1006
99920b6f 1007 sa = gen_rtx_REG (Pmode, R0_REGNO);
38b2d076
DD
1008 cfun->machine->eh_stack_adjust = sa;
1009 }
1010 return cfun->machine->eh_stack_adjust;
1011}
1012
1013/* Registers That Address the Stack Frame */
1014
1015/* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER. Note that
1016 the original spec called for dwarf numbers to vary with register
1017 width as well, for example, r0l, r0, and r2r0 would each have
1018 different dwarf numbers. GCC doesn't support this, and we don't do
1019 it, and gdb seems to like it this way anyway. */
1020unsigned int
1021m32c_dwarf_frame_regnum (int n)
1022{
1023 switch (n)
1024 {
1025 case R0_REGNO:
1026 return 5;
1027 case R1_REGNO:
1028 return 6;
1029 case R2_REGNO:
1030 return 7;
1031 case R3_REGNO:
1032 return 8;
1033 case A0_REGNO:
1034 return 9;
1035 case A1_REGNO:
1036 return 10;
1037 case FB_REGNO:
1038 return 11;
1039 case SB_REGNO:
1040 return 19;
1041
1042 case SP_REGNO:
1043 return 12;
1044 case PC_REGNO:
1045 return 13;
1046 default:
1047 return DWARF_FRAME_REGISTERS + 1;
1048 }
1049}
1050
1051/* The frame looks like this:
1052
1053 ap -> +------------------------------
1054 | Return address (3 or 4 bytes)
1055 | Saved FB (2 or 4 bytes)
1056 fb -> +------------------------------
1057 | local vars
1058 | register saves fb
1059 | through r0 as needed
1060 sp -> +------------------------------
1061*/
1062
1063/* We use this to wrap all emitted insns in the prologue. */
1064static rtx
1065F (rtx x)
1066{
1067 RTX_FRAME_RELATED_P (x) = 1;
1068 return x;
1069}
1070
1071/* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1072 how much the stack pointer moves for each, for each cpu family. */
1073static struct
1074{
1075 int reg1;
1076 int bit;
1077 int a16_bytes;
1078 int a24_bytes;
1079} pushm_info[] =
1080{
9d746d5e
DD
1081 /* These are in reverse push (nearest-to-sp) order. */
1082 { R0_REGNO, 0x80, 2, 2 },
38b2d076 1083 { R1_REGNO, 0x40, 2, 2 },
9d746d5e
DD
1084 { R2_REGNO, 0x20, 2, 2 },
1085 { R3_REGNO, 0x10, 2, 2 },
1086 { A0_REGNO, 0x08, 2, 4 },
1087 { A1_REGNO, 0x04, 2, 4 },
1088 { SB_REGNO, 0x02, 2, 4 },
1089 { FB_REGNO, 0x01, 2, 4 }
38b2d076
DD
1090};
1091
1092#define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1093
1094/* Returns TRUE if we need to save/restore the given register. We
1095 save everything for exception handlers, so that any register can be
1096 unwound. For interrupt handlers, we save everything if the handler
1097 calls something else (because we don't know what *that* function
1098 might do), but try to be a bit smarter if the handler is a leaf
1099 function. We always save $a0, though, because we use that in the
85f65093 1100 epilogue to copy $fb to $sp. */
38b2d076
DD
1101static int
1102need_to_save (int regno)
1103{
1104 if (fixed_regs[regno])
1105 return 0;
ad516a74 1106 if (crtl->calls_eh_return)
38b2d076
DD
1107 return 1;
1108 if (regno == FP_REGNO)
1109 return 0;
1110 if (cfun->machine->is_interrupt
65655f79
DD
1111 && (!cfun->machine->is_leaf
1112 || (regno == A0_REGNO
1113 && m32c_function_needs_enter ())
1114 ))
38b2d076 1115 return 1;
6fb5fa3c 1116 if (df_regs_ever_live_p (regno)
a365fa06 1117 && (!call_used_or_fixed_reg_p (regno) || cfun->machine->is_interrupt))
38b2d076
DD
1118 return 1;
1119 return 0;
1120}
1121
1122/* This function contains all the intelligence about saving and
1123 restoring registers. It always figures out the register save set.
1124 When called with PP_justcount, it merely returns the size of the
1125 save set (for eliminating the frame pointer, for example). When
1126 called with PP_pushm or PP_popm, it emits the appropriate
1127 instructions for saving (pushm) or restoring (popm) the
1128 registers. */
1129static int
1130m32c_pushm_popm (Push_Pop_Type ppt)
1131{
1132 int reg_mask = 0;
1133 int byte_count = 0, bytes;
1134 int i;
1135 rtx dwarf_set[PUSHM_N];
1136 int n_dwarfs = 0;
1137 int nosave_mask = 0;
1138
305da3ec
JH
1139 if (crtl->return_rtx
1140 && GET_CODE (crtl->return_rtx) == PARALLEL
ad516a74 1141 && !(crtl->calls_eh_return || cfun->machine->is_interrupt))
38b2d076 1142 {
305da3ec 1143 rtx exp = XVECEXP (crtl->return_rtx, 0, 0);
38b2d076
DD
1144 rtx rv = XEXP (exp, 0);
1145 int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1146
1147 if (rv_bytes > 2)
1148 nosave_mask |= 0x20; /* PSI, SI */
1149 else
1150 nosave_mask |= 0xf0; /* DF */
1151 if (rv_bytes > 4)
1152 nosave_mask |= 0x50; /* DI */
1153 }
1154
1155 for (i = 0; i < (int) PUSHM_N; i++)
1156 {
1157 /* Skip if neither register needs saving. */
1158 if (!need_to_save (pushm_info[i].reg1))
1159 continue;
1160
1161 if (pushm_info[i].bit & nosave_mask)
1162 continue;
1163
1164 reg_mask |= pushm_info[i].bit;
1165 bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1166
1167 if (ppt == PP_pushm)
1168 {
ef4bddc2 1169 machine_mode mode = (bytes == 2) ? HImode : SImode;
38b2d076
DD
1170 rtx addr;
1171
1172 /* Always use stack_pointer_rtx instead of calling
1173 rtx_gen_REG ourselves. Code elsewhere in GCC assumes
1174 that there is a single rtx representing the stack pointer,
1175 namely stack_pointer_rtx, and uses == to recognize it. */
1176 addr = stack_pointer_rtx;
1177
1178 if (byte_count != 0)
1179 addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1180
1181 dwarf_set[n_dwarfs++] =
f7df4a84 1182 gen_rtx_SET (gen_rtx_MEM (mode, addr),
38b2d076
DD
1183 gen_rtx_REG (mode, pushm_info[i].reg1));
1184 F (dwarf_set[n_dwarfs - 1]);
1185
1186 }
1187 byte_count += bytes;
1188 }
1189
1190 if (cfun->machine->is_interrupt)
1191 {
1192 cfun->machine->intr_pushm = reg_mask & 0xfe;
1193 reg_mask = 0;
1194 byte_count = 0;
1195 }
1196
1197 if (cfun->machine->is_interrupt)
1198 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1199 if (need_to_save (i))
1200 {
1201 byte_count += 2;
1202 cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1203 }
1204
1205 if (ppt == PP_pushm && byte_count)
1206 {
1207 rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1208 rtx pushm;
1209
1210 if (reg_mask)
1211 {
1212 XVECEXP (note, 0, 0)
f7df4a84 1213 = gen_rtx_SET (stack_pointer_rtx,
38b2d076
DD
1214 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1215 stack_pointer_rtx,
1216 GEN_INT (-byte_count)));
1217 F (XVECEXP (note, 0, 0));
1218
1219 for (i = 0; i < n_dwarfs; i++)
1220 XVECEXP (note, 0, i + 1) = dwarf_set[i];
1221
1222 pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1223
444d6efe 1224 add_reg_note (pushm, REG_FRAME_RELATED_EXPR, note);
38b2d076
DD
1225 }
1226
1227 if (cfun->machine->is_interrupt)
1228 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1229 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1230 {
1231 if (TARGET_A16)
1232 pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1233 else
1234 pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1235 F (pushm);
1236 }
1237 }
1238 if (ppt == PP_popm && byte_count)
1239 {
38b2d076
DD
1240 if (cfun->machine->is_interrupt)
1241 for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1242 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1243 {
1244 if (TARGET_A16)
b3fdec9e 1245 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
38b2d076 1246 else
b3fdec9e 1247 emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
38b2d076
DD
1248 }
1249 if (reg_mask)
1250 emit_insn (gen_popm (GEN_INT (reg_mask)));
1251 }
1252
1253 return byte_count;
1254}
1255
1256/* Implements INITIAL_ELIMINATION_OFFSET. See the comment above that
1257 diagrams our call frame. */
1258int
1259m32c_initial_elimination_offset (int from, int to)
1260{
1261 int ofs = 0;
1262
1263 if (from == AP_REGNO)
1264 {
1265 if (TARGET_A16)
1266 ofs += 5;
1267 else
1268 ofs += 8;
1269 }
1270
1271 if (to == SP_REGNO)
1272 {
1273 ofs += m32c_pushm_popm (PP_justcount);
1274 ofs += get_frame_size ();
1275 }
1276
1277 /* Account for push rounding. */
1278 if (TARGET_A24)
1279 ofs = (ofs + 1) & ~1;
1280#if DEBUG0
1281 fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1282 to, ofs);
1283#endif
1284 return ofs;
1285}
1286
1287/* Passing Function Arguments on the Stack */
1288
38b2d076
DD
1289/* Implements PUSH_ROUNDING. The R8C and M16C have byte stacks, the
1290 M32C has word stacks. */
7b4df2bf
RS
1291poly_int64
1292m32c_push_rounding (poly_int64 n)
38b2d076
DD
1293{
1294 if (TARGET_R8C || TARGET_M16C)
1295 return n;
1296 return (n + 1) & ~1;
1297}
1298
1299/* Passing Arguments in Registers */
1300
cd34bbe8
NF
1301/* Implements TARGET_FUNCTION_ARG. Arguments are passed partly in
1302 registers, partly on stack. If our function returns a struct, a
1303 pointer to a buffer for it is at the top of the stack (last thing
1304 pushed). The first few real arguments may be in registers as
1305 follows:
38b2d076
DD
1306
1307 R8C/M16C: arg1 in r1 if it's QI or HI (else it's pushed on stack)
1308 arg2 in r2 if it's HI (else pushed on stack)
1309 rest on stack
1310 M32C: arg1 in r0 if it's QI or HI (else it's pushed on stack)
1311 rest on stack
1312
1313 Structs are not passed in registers, even if they fit. Only
1314 integer and pointer types are passed in registers.
1315
1316 Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1317 r2 if it fits. */
cd34bbe8
NF
1318#undef TARGET_FUNCTION_ARG
1319#define TARGET_FUNCTION_ARG m32c_function_arg
1320static rtx
6783fdb7 1321m32c_function_arg (cumulative_args_t ca_v, const function_arg_info &arg)
38b2d076 1322{
d5cc9181
JR
1323 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1324
38b2d076
DD
1325 /* Can return a reg, parallel, or 0 for stack */
1326 rtx rv = NULL_RTX;
1327#if DEBUG0
1328 fprintf (stderr, "func_arg %d (%s, %d)\n",
6783fdb7
RS
1329 ca->parm_num, mode_name[arg.mode], arg.named);
1330 debug_tree (arg.type);
38b2d076
DD
1331#endif
1332
6783fdb7 1333 if (arg.end_marker_p ())
38b2d076
DD
1334 return GEN_INT (0);
1335
6783fdb7 1336 if (ca->force_mem || !arg.named)
38b2d076
DD
1337 {
1338#if DEBUG0
1339 fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
6783fdb7 1340 arg.named);
38b2d076
DD
1341#endif
1342 return NULL_RTX;
1343 }
1344
6783fdb7 1345 if (arg.type && INTEGRAL_TYPE_P (arg.type) && POINTER_TYPE_P (arg.type))
38b2d076
DD
1346 return NULL_RTX;
1347
6783fdb7 1348 if (arg.aggregate_type_p ())
9d746d5e
DD
1349 return NULL_RTX;
1350
38b2d076
DD
1351 switch (ca->parm_num)
1352 {
1353 case 1:
6783fdb7
RS
1354 if (GET_MODE_SIZE (arg.mode) == 1 || GET_MODE_SIZE (arg.mode) == 2)
1355 rv = gen_rtx_REG (arg.mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
38b2d076
DD
1356 break;
1357
1358 case 2:
6783fdb7
RS
1359 if (TARGET_A16 && GET_MODE_SIZE (arg.mode) == 2)
1360 rv = gen_rtx_REG (arg.mode, R2_REGNO);
38b2d076
DD
1361 break;
1362 }
1363
1364#if DEBUG0
1365 debug_rtx (rv);
1366#endif
1367 return rv;
1368}
1369
1370#undef TARGET_PASS_BY_REFERENCE
1371#define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1372static bool
52090e4d 1373m32c_pass_by_reference (cumulative_args_t, const function_arg_info &)
38b2d076
DD
1374{
1375 return 0;
1376}
1377
1378/* Implements INIT_CUMULATIVE_ARGS. */
1379void
1380m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
9d746d5e 1381 tree fntype,
38b2d076 1382 rtx libname ATTRIBUTE_UNUSED,
9d746d5e 1383 tree fndecl,
38b2d076
DD
1384 int n_named_args ATTRIBUTE_UNUSED)
1385{
9d746d5e
DD
1386 if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1387 ca->force_mem = 1;
1388 else
1389 ca->force_mem = 0;
38b2d076
DD
1390 ca->parm_num = 1;
1391}
1392
cd34bbe8
NF
1393/* Implements TARGET_FUNCTION_ARG_ADVANCE. force_mem is set for
1394 functions returning structures, so we always reset that. Otherwise,
1395 we only need to know the sequence number of the argument to know what
1396 to do with it. */
1397#undef TARGET_FUNCTION_ARG_ADVANCE
1398#define TARGET_FUNCTION_ARG_ADVANCE m32c_function_arg_advance
1399static void
d5cc9181 1400m32c_function_arg_advance (cumulative_args_t ca_v,
6930c98c 1401 const function_arg_info &)
38b2d076 1402{
d5cc9181
JR
1403 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1404
38b2d076
DD
1405 if (ca->force_mem)
1406 ca->force_mem = 0;
9d746d5e
DD
1407 else
1408 ca->parm_num++;
38b2d076
DD
1409}
1410
c2ed6cf8
NF
1411/* Implements TARGET_FUNCTION_ARG_BOUNDARY. */
1412#undef TARGET_FUNCTION_ARG_BOUNDARY
1413#define TARGET_FUNCTION_ARG_BOUNDARY m32c_function_arg_boundary
1414static unsigned int
ef4bddc2 1415m32c_function_arg_boundary (machine_mode mode ATTRIBUTE_UNUSED,
c2ed6cf8
NF
1416 const_tree type ATTRIBUTE_UNUSED)
1417{
1418 return (TARGET_A16 ? 8 : 16);
1419}
1420
38b2d076
DD
1421/* Implements FUNCTION_ARG_REGNO_P. */
1422int
1423m32c_function_arg_regno_p (int r)
1424{
1425 if (TARGET_A24)
1426 return (r == R0_REGNO);
1427 return (r == R1_REGNO || r == R2_REGNO);
1428}
1429
e9555b13 1430/* HImode and PSImode are the two "native" modes as far as GCC is
85f65093 1431 concerned, but the chips also support a 32-bit mode which is used
e9555b13
DD
1432 for some opcodes in R8C/M16C and for reset vectors and such. */
1433#undef TARGET_VALID_POINTER_MODE
1434#define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
23fed240 1435static bool
095a2d76 1436m32c_valid_pointer_mode (scalar_int_mode mode)
e9555b13 1437{
e9555b13
DD
1438 if (mode == HImode
1439 || mode == PSImode
1440 || mode == SImode
1441 )
1442 return 1;
1443 return 0;
1444}
1445
38b2d076
DD
1446/* How Scalar Function Values Are Returned */
1447
2a31793e 1448/* Implements TARGET_LIBCALL_VALUE. Most values are returned in $r0, or some
38b2d076
DD
1449 combination of registers starting there (r2r0 for longs, r3r1r2r0
1450 for long long, r3r2r1r0 for doubles), except that that ABI
1451 currently doesn't work because it ends up using all available
1452 general registers and gcc often can't compile it. So, instead, we
1453 return anything bigger than 16 bits in "mem0" (effectively, a
1454 memory location). */
2a31793e
AS
1455
1456#undef TARGET_LIBCALL_VALUE
1457#define TARGET_LIBCALL_VALUE m32c_libcall_value
1458
1459static rtx
ef4bddc2 1460m32c_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
38b2d076
DD
1461{
1462 /* return reg or parallel */
1463#if 0
1464 /* FIXME: GCC has difficulty returning large values in registers,
1465 because that ties up most of the general registers and gives the
1466 register allocator little to work with. Until we can resolve
1467 this, large values are returned in memory. */
1468 if (mode == DFmode)
1469 {
1470 rtx rv;
1471
1472 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1473 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1474 gen_rtx_REG (HImode,
1475 R0_REGNO),
1476 GEN_INT (0));
1477 XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1478 gen_rtx_REG (HImode,
1479 R1_REGNO),
1480 GEN_INT (2));
1481 XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1482 gen_rtx_REG (HImode,
1483 R2_REGNO),
1484 GEN_INT (4));
1485 XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1486 gen_rtx_REG (HImode,
1487 R3_REGNO),
1488 GEN_INT (6));
1489 return rv;
1490 }
1491
1492 if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1493 {
1494 rtx rv;
1495
1496 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1497 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1498 gen_rtx_REG (mode,
1499 R0_REGNO),
1500 GEN_INT (0));
1501 return rv;
1502 }
1503#endif
1504
1505 if (GET_MODE_SIZE (mode) > 2)
1506 return gen_rtx_REG (mode, MEM0_REGNO);
1507 return gen_rtx_REG (mode, R0_REGNO);
1508}
1509
2a31793e 1510/* Implements TARGET_FUNCTION_VALUE. Functions and libcalls have the same
38b2d076 1511 conventions. */
2a31793e
AS
1512
1513#undef TARGET_FUNCTION_VALUE
1514#define TARGET_FUNCTION_VALUE m32c_function_value
1515
1516static rtx
1517m32c_function_value (const_tree valtype,
1518 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1519 bool outgoing ATTRIBUTE_UNUSED)
38b2d076
DD
1520{
1521 /* return reg or parallel */
ef4bddc2 1522 const machine_mode mode = TYPE_MODE (valtype);
2a31793e
AS
1523 return m32c_libcall_value (mode, NULL_RTX);
1524}
1525
f28f2337
AS
1526/* Implements TARGET_FUNCTION_VALUE_REGNO_P. */
1527
1528#undef TARGET_FUNCTION_VALUE_REGNO_P
1529#define TARGET_FUNCTION_VALUE_REGNO_P m32c_function_value_regno_p
2a31793e 1530
f28f2337 1531static bool
2a31793e
AS
1532m32c_function_value_regno_p (const unsigned int regno)
1533{
1534 return (regno == R0_REGNO || regno == MEM0_REGNO);
38b2d076
DD
1535}
1536
1537/* How Large Values Are Returned */
1538
1539/* We return structures by pushing the address on the stack, even if
1540 we use registers for the first few "real" arguments. */
1541#undef TARGET_STRUCT_VALUE_RTX
1542#define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1543static rtx
1544m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1545 int incoming ATTRIBUTE_UNUSED)
1546{
1547 return 0;
1548}
1549
1550/* Function Entry and Exit */
1551
1552/* Implements EPILOGUE_USES. Interrupts restore all registers. */
1553int
1554m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1555{
1556 if (cfun->machine->is_interrupt)
1557 return 1;
1558 return 0;
1559}
1560
1561/* Implementing the Varargs Macros */
1562
1563#undef TARGET_STRICT_ARGUMENT_NAMING
1564#define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1565static bool
d5cc9181 1566m32c_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
38b2d076
DD
1567{
1568 return 1;
1569}
1570
1571/* Trampolines for Nested Functions */
1572
1573/*
1574 m16c:
1575 1 0000 75C43412 mov.w #0x1234,a0
1576 2 0004 FC000000 jmp.a label
1577
1578 m32c:
1579 1 0000 BC563412 mov.l:s #0x123456,a0
1580 2 0004 CC000000 jmp.a label
1581*/
1582
1583/* Implements TRAMPOLINE_SIZE. */
1584int
1585m32c_trampoline_size (void)
1586{
1587 /* Allocate extra space so we can avoid the messy shifts when we
1588 initialize the trampoline; we just write past the end of the
1589 opcode. */
1590 return TARGET_A16 ? 8 : 10;
1591}
1592
1593/* Implements TRAMPOLINE_ALIGNMENT. */
1594int
1595m32c_trampoline_alignment (void)
1596{
1597 return 2;
1598}
1599
229fbccb
RH
1600/* Implements TARGET_TRAMPOLINE_INIT. */
1601
1602#undef TARGET_TRAMPOLINE_INIT
1603#define TARGET_TRAMPOLINE_INIT m32c_trampoline_init
1604static void
1605m32c_trampoline_init (rtx m_tramp, tree fndecl, rtx chainval)
38b2d076 1606{
229fbccb
RH
1607 rtx function = XEXP (DECL_RTL (fndecl), 0);
1608
1609#define A0(m,i) adjust_address (m_tramp, m, i)
38b2d076
DD
1610 if (TARGET_A16)
1611 {
1612 /* Note: we subtract a "word" because the moves want signed
1613 constants, not unsigned constants. */
1614 emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1615 emit_move_insn (A0 (HImode, 2), chainval);
1616 emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
85f65093
KH
1617 /* We use 16-bit addresses here, but store the zero to turn it
1618 into a 24-bit offset. */
38b2d076
DD
1619 emit_move_insn (A0 (HImode, 5), function);
1620 emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1621 }
1622 else
1623 {
1624 /* Note that the PSI moves actually write 4 bytes. Make sure we
1625 write stuff out in the right order, and leave room for the
1626 extra byte at the end. */
1627 emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1628 emit_move_insn (A0 (PSImode, 1), chainval);
1629 emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1630 emit_move_insn (A0 (PSImode, 5), function);
1631 }
1632#undef A0
1633}
1634
d81db636
SB
1635#undef TARGET_LRA_P
1636#define TARGET_LRA_P hook_bool_void_false
1637
38b2d076
DD
1638/* Addressing Modes */
1639
c6c3dba9
PB
1640/* The r8c/m32c family supports a wide range of non-orthogonal
1641 addressing modes, including the ability to double-indirect on *some*
1642 of them. Not all insns support all modes, either, but we rely on
1643 predicates and constraints to deal with that. */
1644#undef TARGET_LEGITIMATE_ADDRESS_P
1645#define TARGET_LEGITIMATE_ADDRESS_P m32c_legitimate_address_p
1646bool
ef4bddc2 1647m32c_legitimate_address_p (machine_mode mode, rtx x, bool strict)
38b2d076
DD
1648{
1649 int mode_adjust;
1650 if (CONSTANT_P (x))
1651 return 1;
1652
5fd5d713
DD
1653 if (TARGET_A16 && GET_MODE (x) != HImode && GET_MODE (x) != SImode)
1654 return 0;
1655 if (TARGET_A24 && GET_MODE (x) != PSImode)
1656 return 0;
1657
38b2d076
DD
1658 /* Wide references to memory will be split after reload, so we must
1659 ensure that all parts of such splits remain legitimate
1660 addresses. */
1661 mode_adjust = GET_MODE_SIZE (mode) - 1;
1662
1663 /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1664 if (GET_CODE (x) == PRE_DEC
1665 || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1666 {
1667 return (GET_CODE (XEXP (x, 0)) == REG
1668 && REGNO (XEXP (x, 0)) == SP_REGNO);
1669 }
1670
1671#if 0
1672 /* This is the double indirection detection, but it currently
1673 doesn't work as cleanly as this code implies, so until we've had
1674 a chance to debug it, leave it disabled. */
1675 if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1676 {
1677#if DEBUG_DOUBLE
1678 fprintf (stderr, "double indirect\n");
1679#endif
1680 x = XEXP (x, 0);
1681 }
1682#endif
1683
1684 encode_pattern (x);
1685 if (RTX_IS ("r"))
1686 {
1687 /* Most indexable registers can be used without displacements,
1688 although some of them will be emitted with an explicit zero
1689 to please the assembler. */
1690 switch (REGNO (patternr[0]))
1691 {
38b2d076
DD
1692 case A1_REGNO:
1693 case SB_REGNO:
1694 case FB_REGNO:
1695 case SP_REGNO:
5fd5d713
DD
1696 if (TARGET_A16 && GET_MODE (x) == SImode)
1697 return 0;
0c57f4bf 1698 /* FALLTHRU */
5fd5d713 1699 case A0_REGNO:
38b2d076
DD
1700 return 1;
1701
1702 default:
1703 if (IS_PSEUDO (patternr[0], strict))
1704 return 1;
1705 return 0;
1706 }
1707 }
5fd5d713
DD
1708
1709 if (TARGET_A16 && GET_MODE (x) == SImode)
1710 return 0;
1711
38b2d076
DD
1712 if (RTX_IS ("+ri"))
1713 {
1714 /* This is more interesting, because different base registers
1715 allow for different displacements - both range and signedness
1716 - and it differs from chip series to chip series too. */
1717 int rn = REGNO (patternr[1]);
1718 HOST_WIDE_INT offs = INTVAL (patternr[2]);
1719 switch (rn)
1720 {
1721 case A0_REGNO:
1722 case A1_REGNO:
1723 case SB_REGNO:
1724 /* The syntax only allows positive offsets, but when the
1725 offsets span the entire memory range, we can simulate
1726 negative offsets by wrapping. */
1727 if (TARGET_A16)
1728 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1729 if (rn == SB_REGNO)
1730 return (offs >= 0 && offs <= 65535 - mode_adjust);
1731 /* A0 or A1 */
1732 return (offs >= -16777216 && offs <= 16777215);
1733
1734 case FB_REGNO:
1735 if (TARGET_A16)
1736 return (offs >= -128 && offs <= 127 - mode_adjust);
1737 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1738
1739 case SP_REGNO:
1740 return (offs >= -128 && offs <= 127 - mode_adjust);
1741
1742 default:
1743 if (IS_PSEUDO (patternr[1], strict))
1744 return 1;
1745 return 0;
1746 }
1747 }
1748 if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1749 {
1750 rtx reg = patternr[1];
1751
1752 /* We don't know where the symbol is, so only allow base
1753 registers which support displacements spanning the whole
1754 address range. */
1755 switch (REGNO (reg))
1756 {
1757 case A0_REGNO:
1758 case A1_REGNO:
1759 /* $sb needs a secondary reload, but since it's involved in
1760 memory address reloads too, we don't deal with it very
1761 well. */
1762 /* case SB_REGNO: */
1763 return 1;
1764 default:
45d898e4
DD
1765 if (GET_CODE (reg) == SUBREG)
1766 return 0;
38b2d076
DD
1767 if (IS_PSEUDO (reg, strict))
1768 return 1;
1769 return 0;
1770 }
1771 }
1772 return 0;
1773}
1774
1775/* Implements REG_OK_FOR_BASE_P. */
1776int
1777m32c_reg_ok_for_base_p (rtx x, int strict)
1778{
1779 if (GET_CODE (x) != REG)
1780 return 0;
1781 switch (REGNO (x))
1782 {
1783 case A0_REGNO:
1784 case A1_REGNO:
1785 case SB_REGNO:
1786 case FB_REGNO:
1787 case SP_REGNO:
1788 return 1;
1789 default:
1790 if (IS_PSEUDO (x, strict))
1791 return 1;
1792 return 0;
1793 }
1794}
1795
04aff2c0 1796/* We have three choices for choosing fb->aN offsets. If we choose -128,
85f65093 1797 we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
04aff2c0
DD
1798 like this:
1799 EB 4B FF mova -128[$fb],$a0
1800 D8 0C FF FF mov.w:Q #0,-1[$a0]
1801
85f65093 1802 Alternately, we subtract the frame size, and hopefully use 8-bit aN
04aff2c0
DD
1803 displacements:
1804 7B F4 stc $fb,$a0
1805 77 54 00 01 sub #256,$a0
1806 D8 08 01 mov.w:Q #0,1[$a0]
1807
1808 If we don't offset (i.e. offset by zero), we end up with:
1809 7B F4 stc $fb,$a0
1810 D8 0C 00 FF mov.w:Q #0,-256[$a0]
1811
1812 We have to subtract *something* so that we have a PLUS rtx to mark
1813 that we've done this reload. The -128 offset will never result in
85f65093 1814 an 8-bit aN offset, and the payoff for the second case is five
04aff2c0
DD
1815 loads *if* those loads are within 256 bytes of the other end of the
1816 frame, so the third case seems best. Note that we subtract the
1817 zero, but detect that in the addhi3 pattern. */
1818
ea471af0
JM
1819#define BIG_FB_ADJ 0
1820
38b2d076
DD
1821/* Implements LEGITIMIZE_ADDRESS. The only address we really have to
1822 worry about is frame base offsets, as $fb has a limited
1823 displacement range. We deal with this by attempting to reload $fb
1824 itself into an address register; that seems to result in the best
1825 code. */
506d7b68
PB
1826#undef TARGET_LEGITIMIZE_ADDRESS
1827#define TARGET_LEGITIMIZE_ADDRESS m32c_legitimize_address
1828static rtx
1829m32c_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
ef4bddc2 1830 machine_mode mode)
38b2d076
DD
1831{
1832#if DEBUG0
1833 fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
506d7b68 1834 debug_rtx (x);
38b2d076
DD
1835 fprintf (stderr, "\n");
1836#endif
1837
506d7b68
PB
1838 if (GET_CODE (x) == PLUS
1839 && GET_CODE (XEXP (x, 0)) == REG
1840 && REGNO (XEXP (x, 0)) == FB_REGNO
1841 && GET_CODE (XEXP (x, 1)) == CONST_INT
1842 && (INTVAL (XEXP (x, 1)) < -128
1843 || INTVAL (XEXP (x, 1)) > (128 - GET_MODE_SIZE (mode))))
38b2d076
DD
1844 {
1845 /* reload FB to A_REGS */
38b2d076 1846 rtx temp = gen_reg_rtx (Pmode);
506d7b68 1847 x = copy_rtx (x);
f7df4a84 1848 emit_insn (gen_rtx_SET (temp, XEXP (x, 0)));
506d7b68 1849 XEXP (x, 0) = temp;
38b2d076
DD
1850 }
1851
506d7b68 1852 return x;
38b2d076
DD
1853}
1854
1855/* Implements LEGITIMIZE_RELOAD_ADDRESS. See comment above. */
1856int
1857m32c_legitimize_reload_address (rtx * x,
ef4bddc2 1858 machine_mode mode,
38b2d076
DD
1859 int opnum,
1860 int type, int ind_levels ATTRIBUTE_UNUSED)
1861{
1862#if DEBUG0
1863 fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
1864 mode_name[mode]);
1865 debug_rtx (*x);
1866#endif
1867
1868 /* At one point, this function tried to get $fb copied to an address
1869 register, which in theory would maximize sharing, but gcc was
1870 *also* still trying to reload the whole address, and we'd run out
1871 of address registers. So we let gcc do the naive (but safe)
1872 reload instead, when the above function doesn't handle it for
04aff2c0
DD
1873 us.
1874
1875 The code below is a second attempt at the above. */
1876
1877 if (GET_CODE (*x) == PLUS
1878 && GET_CODE (XEXP (*x, 0)) == REG
1879 && REGNO (XEXP (*x, 0)) == FB_REGNO
1880 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1881 && (INTVAL (XEXP (*x, 1)) < -128
1882 || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
1883 {
1884 rtx sum;
1885 int offset = INTVAL (XEXP (*x, 1));
1886 int adjustment = -BIG_FB_ADJ;
1887
1888 sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
1889 GEN_INT (adjustment));
1890 *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
1891 if (type == RELOAD_OTHER)
1892 type = RELOAD_FOR_OTHER_ADDRESS;
1893 push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
1894 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
444d6efe 1895 (enum reload_type) type);
04aff2c0
DD
1896 return 1;
1897 }
1898
1899 if (GET_CODE (*x) == PLUS
1900 && GET_CODE (XEXP (*x, 0)) == PLUS
1901 && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
1902 && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
1903 && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
1904 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1905 )
1906 {
1907 if (type == RELOAD_OTHER)
1908 type = RELOAD_FOR_OTHER_ADDRESS;
1909 push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
1910 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
444d6efe 1911 (enum reload_type) type);
f75e07bc
BE
1912 return 1;
1913 }
1914
1915 if (TARGET_A24 && GET_MODE (*x) == PSImode)
1916 {
1917 push_reload (*x, NULL_RTX, x, NULL,
1918 A_REGS, PSImode, VOIDmode, 0, 0, opnum,
1919 (enum reload_type) type);
04aff2c0
DD
1920 return 1;
1921 }
38b2d076
DD
1922
1923 return 0;
1924}
1925
5fd5d713
DD
1926/* Return the appropriate mode for a named address pointer. */
1927#undef TARGET_ADDR_SPACE_POINTER_MODE
1928#define TARGET_ADDR_SPACE_POINTER_MODE m32c_addr_space_pointer_mode
095a2d76 1929static scalar_int_mode
5fd5d713
DD
1930m32c_addr_space_pointer_mode (addr_space_t addrspace)
1931{
1932 switch (addrspace)
1933 {
1934 case ADDR_SPACE_GENERIC:
1935 return TARGET_A24 ? PSImode : HImode;
1936 case ADDR_SPACE_FAR:
1937 return SImode;
1938 default:
1939 gcc_unreachable ();
1940 }
1941}
1942
1943/* Return the appropriate mode for a named address address. */
1944#undef TARGET_ADDR_SPACE_ADDRESS_MODE
1945#define TARGET_ADDR_SPACE_ADDRESS_MODE m32c_addr_space_address_mode
095a2d76 1946static scalar_int_mode
5fd5d713
DD
1947m32c_addr_space_address_mode (addr_space_t addrspace)
1948{
1949 switch (addrspace)
1950 {
1951 case ADDR_SPACE_GENERIC:
1952 return TARGET_A24 ? PSImode : HImode;
1953 case ADDR_SPACE_FAR:
1954 return SImode;
1955 default:
1956 gcc_unreachable ();
1957 }
1958}
1959
1960/* Like m32c_legitimate_address_p, except with named addresses. */
1961#undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
1962#define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P \
1963 m32c_addr_space_legitimate_address_p
1964static bool
ef4bddc2 1965m32c_addr_space_legitimate_address_p (machine_mode mode, rtx x,
5fd5d713
DD
1966 bool strict, addr_space_t as)
1967{
1968 if (as == ADDR_SPACE_FAR)
1969 {
1970 if (TARGET_A24)
1971 return 0;
1972 encode_pattern (x);
1973 if (RTX_IS ("r"))
1974 {
1975 if (GET_MODE (x) != SImode)
1976 return 0;
1977 switch (REGNO (patternr[0]))
1978 {
1979 case A0_REGNO:
1980 return 1;
1981
1982 default:
1983 if (IS_PSEUDO (patternr[0], strict))
1984 return 1;
1985 return 0;
1986 }
1987 }
1988 if (RTX_IS ("+^Sri"))
1989 {
1990 int rn = REGNO (patternr[3]);
1991 HOST_WIDE_INT offs = INTVAL (patternr[4]);
1992 if (GET_MODE (patternr[3]) != HImode)
1993 return 0;
1994 switch (rn)
1995 {
1996 case A0_REGNO:
1997 return (offs >= 0 && offs <= 0xfffff);
1998
1999 default:
2000 if (IS_PSEUDO (patternr[3], strict))
2001 return 1;
2002 return 0;
2003 }
2004 }
2005 if (RTX_IS ("+^Srs"))
2006 {
2007 int rn = REGNO (patternr[3]);
2008 if (GET_MODE (patternr[3]) != HImode)
2009 return 0;
2010 switch (rn)
2011 {
2012 case A0_REGNO:
2013 return 1;
2014
2015 default:
2016 if (IS_PSEUDO (patternr[3], strict))
2017 return 1;
2018 return 0;
2019 }
2020 }
2021 if (RTX_IS ("+^S+ris"))
2022 {
2023 int rn = REGNO (patternr[4]);
2024 if (GET_MODE (patternr[4]) != HImode)
2025 return 0;
2026 switch (rn)
2027 {
2028 case A0_REGNO:
2029 return 1;
2030
2031 default:
2032 if (IS_PSEUDO (patternr[4], strict))
2033 return 1;
2034 return 0;
2035 }
2036 }
2037 if (RTX_IS ("s"))
2038 {
2039 return 1;
2040 }
2041 return 0;
2042 }
2043
2044 else if (as != ADDR_SPACE_GENERIC)
2045 gcc_unreachable ();
2046
2047 return m32c_legitimate_address_p (mode, x, strict);
2048}
2049
2050/* Like m32c_legitimate_address, except with named address support. */
2051#undef TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS
2052#define TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS m32c_addr_space_legitimize_address
2053static rtx
ef4bddc2 2054m32c_addr_space_legitimize_address (rtx x, rtx oldx, machine_mode mode,
5fd5d713
DD
2055 addr_space_t as)
2056{
2057 if (as != ADDR_SPACE_GENERIC)
2058 {
2059#if DEBUG0
2060 fprintf (stderr, "\033[36mm32c_addr_space_legitimize_address for mode %s\033[0m\n", mode_name[mode]);
2061 debug_rtx (x);
2062 fprintf (stderr, "\n");
2063#endif
2064
2065 if (GET_CODE (x) != REG)
2066 {
2067 x = force_reg (SImode, x);
2068 }
2069 return x;
2070 }
2071
2072 return m32c_legitimize_address (x, oldx, mode);
2073}
2074
2075/* Determine if one named address space is a subset of another. */
2076#undef TARGET_ADDR_SPACE_SUBSET_P
2077#define TARGET_ADDR_SPACE_SUBSET_P m32c_addr_space_subset_p
2078static bool
2079m32c_addr_space_subset_p (addr_space_t subset, addr_space_t superset)
2080{
2081 gcc_assert (subset == ADDR_SPACE_GENERIC || subset == ADDR_SPACE_FAR);
2082 gcc_assert (superset == ADDR_SPACE_GENERIC || superset == ADDR_SPACE_FAR);
2083
2084 if (subset == superset)
2085 return true;
2086
2087 else
2088 return (subset == ADDR_SPACE_GENERIC && superset == ADDR_SPACE_FAR);
2089}
2090
2091#undef TARGET_ADDR_SPACE_CONVERT
2092#define TARGET_ADDR_SPACE_CONVERT m32c_addr_space_convert
2093/* Convert from one address space to another. */
2094static rtx
2095m32c_addr_space_convert (rtx op, tree from_type, tree to_type)
2096{
2097 addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (from_type));
2098 addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (to_type));
2099 rtx result;
2100
2101 gcc_assert (from_as == ADDR_SPACE_GENERIC || from_as == ADDR_SPACE_FAR);
2102 gcc_assert (to_as == ADDR_SPACE_GENERIC || to_as == ADDR_SPACE_FAR);
2103
2104 if (to_as == ADDR_SPACE_GENERIC && from_as == ADDR_SPACE_FAR)
2105 {
2106 /* This is unpredictable, as we're truncating off usable address
2107 bits. */
2108
2109 result = gen_reg_rtx (HImode);
2110 emit_move_insn (result, simplify_subreg (HImode, op, SImode, 0));
2111 return result;
2112 }
2113 else if (to_as == ADDR_SPACE_FAR && from_as == ADDR_SPACE_GENERIC)
2114 {
2115 /* This always works. */
2116 result = gen_reg_rtx (SImode);
2117 emit_insn (gen_zero_extendhisi2 (result, op));
2118 return result;
2119 }
2120 else
2121 gcc_unreachable ();
2122}
2123
38b2d076
DD
2124/* Condition Code Status */
2125
2126#undef TARGET_FIXED_CONDITION_CODE_REGS
2127#define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2128static bool
2129m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2130{
2131 *p1 = FLG_REGNO;
2132 *p2 = INVALID_REGNUM;
2133 return true;
2134}
2135
2136/* Describing Relative Costs of Operations */
2137
0e607518 2138/* Implements TARGET_REGISTER_MOVE_COST. We make impossible moves
38b2d076
DD
2139 prohibitively expensive, like trying to put QIs in r2/r3 (there are
2140 no opcodes to do that). We also discourage use of mem* registers
2141 since they're really memory. */
0e607518
AS
2142
2143#undef TARGET_REGISTER_MOVE_COST
2144#define TARGET_REGISTER_MOVE_COST m32c_register_move_cost
2145
2146static int
ef4bddc2 2147m32c_register_move_cost (machine_mode mode, reg_class_t from,
0e607518 2148 reg_class_t to)
38b2d076
DD
2149{
2150 int cost = COSTS_N_INSNS (3);
0e607518
AS
2151 HARD_REG_SET cc;
2152
2153/* FIXME: pick real values, but not 2 for now. */
44942965 2154 cc = reg_class_contents[from] | reg_class_contents[(int) to];
0e607518
AS
2155
2156 if (mode == QImode
2157 && hard_reg_set_intersect_p (cc, reg_class_contents[R23_REGS]))
38b2d076 2158 {
0e607518 2159 if (hard_reg_set_subset_p (cc, reg_class_contents[R23_REGS]))
38b2d076
DD
2160 cost = COSTS_N_INSNS (1000);
2161 else
2162 cost = COSTS_N_INSNS (80);
2163 }
2164
2165 if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2166 cost = COSTS_N_INSNS (1000);
2167
0e607518 2168 if (reg_classes_intersect_p (from, CR_REGS))
38b2d076
DD
2169 cost += COSTS_N_INSNS (5);
2170
0e607518 2171 if (reg_classes_intersect_p (to, CR_REGS))
38b2d076
DD
2172 cost += COSTS_N_INSNS (5);
2173
2174 if (from == MEM_REGS || to == MEM_REGS)
2175 cost += COSTS_N_INSNS (50);
0e607518
AS
2176 else if (reg_classes_intersect_p (from, MEM_REGS)
2177 || reg_classes_intersect_p (to, MEM_REGS))
38b2d076
DD
2178 cost += COSTS_N_INSNS (10);
2179
2180#if DEBUG0
2181 fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
0e607518
AS
2182 mode_name[mode], class_names[(int) from], class_names[(int) to],
2183 cost);
38b2d076
DD
2184#endif
2185 return cost;
2186}
2187
0e607518
AS
2188/* Implements TARGET_MEMORY_MOVE_COST. */
2189
2190#undef TARGET_MEMORY_MOVE_COST
2191#define TARGET_MEMORY_MOVE_COST m32c_memory_move_cost
2192
2193static int
ef4bddc2 2194m32c_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
0e607518
AS
2195 reg_class_t rclass ATTRIBUTE_UNUSED,
2196 bool in ATTRIBUTE_UNUSED)
38b2d076
DD
2197{
2198 /* FIXME: pick real values. */
2199 return COSTS_N_INSNS (10);
2200}
2201
07127a0a
DD
2202/* Here we try to describe when we use multiple opcodes for one RTX so
2203 that gcc knows when to use them. */
2204#undef TARGET_RTX_COSTS
2205#define TARGET_RTX_COSTS m32c_rtx_costs
2206static bool
e548c9df
AM
2207m32c_rtx_costs (rtx x, machine_mode mode, int outer_code,
2208 int opno ATTRIBUTE_UNUSED,
68f932c4 2209 int *total, bool speed ATTRIBUTE_UNUSED)
07127a0a 2210{
e548c9df 2211 int code = GET_CODE (x);
07127a0a
DD
2212 switch (code)
2213 {
2214 case REG:
2215 if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2216 *total += COSTS_N_INSNS (500);
2217 else
2218 *total += COSTS_N_INSNS (1);
2219 return true;
2220
2221 case ASHIFT:
2222 case LSHIFTRT:
2223 case ASHIFTRT:
2224 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2225 {
2226 /* mov.b r1l, r1h */
2227 *total += COSTS_N_INSNS (1);
2228 return true;
2229 }
2230 if (INTVAL (XEXP (x, 1)) > 8
2231 || INTVAL (XEXP (x, 1)) < -8)
2232 {
2233 /* mov.b #N, r1l */
2234 /* mov.b r1l, r1h */
2235 *total += COSTS_N_INSNS (2);
2236 return true;
2237 }
2238 return true;
2239
2240 case LE:
2241 case LEU:
2242 case LT:
2243 case LTU:
2244 case GT:
2245 case GTU:
2246 case GE:
2247 case GEU:
2248 case NE:
2249 case EQ:
2250 if (outer_code == SET)
2251 {
2252 *total += COSTS_N_INSNS (2);
2253 return true;
2254 }
2255 break;
2256
2257 case ZERO_EXTRACT:
2258 {
2259 rtx dest = XEXP (x, 0);
2260 rtx addr = XEXP (dest, 0);
2261 switch (GET_CODE (addr))
2262 {
2263 case CONST_INT:
2264 *total += COSTS_N_INSNS (1);
2265 break;
2266 case SYMBOL_REF:
2267 *total += COSTS_N_INSNS (3);
2268 break;
2269 default:
2270 *total += COSTS_N_INSNS (2);
2271 break;
2272 }
2273 return true;
2274 }
2275 break;
2276
2277 default:
2278 /* Reasonable default. */
e548c9df 2279 if (TARGET_A16 && mode == SImode)
07127a0a
DD
2280 *total += COSTS_N_INSNS (2);
2281 break;
2282 }
2283 return false;
2284}
2285
2286#undef TARGET_ADDRESS_COST
2287#define TARGET_ADDRESS_COST m32c_address_cost
2288static int
ef4bddc2 2289m32c_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
b413068c
OE
2290 addr_space_t as ATTRIBUTE_UNUSED,
2291 bool speed ATTRIBUTE_UNUSED)
07127a0a 2292{
80b093df 2293 int i;
07127a0a
DD
2294 /* fprintf(stderr, "\naddress_cost\n");
2295 debug_rtx(addr);*/
2296 switch (GET_CODE (addr))
2297 {
2298 case CONST_INT:
80b093df
DD
2299 i = INTVAL (addr);
2300 if (i == 0)
2301 return COSTS_N_INSNS(1);
01512446 2302 if (i > 0 && i <= 255)
80b093df 2303 return COSTS_N_INSNS(2);
01512446 2304 if (i > 0 && i <= 65535)
80b093df
DD
2305 return COSTS_N_INSNS(3);
2306 return COSTS_N_INSNS(4);
07127a0a 2307 case SYMBOL_REF:
80b093df 2308 return COSTS_N_INSNS(4);
07127a0a 2309 case REG:
80b093df
DD
2310 return COSTS_N_INSNS(1);
2311 case PLUS:
2312 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
2313 {
2314 i = INTVAL (XEXP (addr, 1));
2315 if (i == 0)
2316 return COSTS_N_INSNS(1);
01512446 2317 if (i > 0 && i <= 255)
80b093df 2318 return COSTS_N_INSNS(2);
01512446 2319 if (i > 0 && i <= 65535)
80b093df
DD
2320 return COSTS_N_INSNS(3);
2321 }
2322 return COSTS_N_INSNS(4);
07127a0a
DD
2323 default:
2324 return 0;
2325 }
2326}
2327
38b2d076
DD
2328/* Defining the Output Assembler Language */
2329
38b2d076
DD
2330/* Output of Data */
2331
2332/* We may have 24 bit sizes, which is the native address size.
2333 Currently unused, but provided for completeness. */
2334#undef TARGET_ASM_INTEGER
2335#define TARGET_ASM_INTEGER m32c_asm_integer
2336static bool
2337m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2338{
2339 switch (size)
2340 {
2341 case 3:
2342 fprintf (asm_out_file, "\t.3byte\t");
2343 output_addr_const (asm_out_file, x);
2344 fputc ('\n', asm_out_file);
2345 return true;
e9555b13
DD
2346 case 4:
2347 if (GET_CODE (x) == SYMBOL_REF)
2348 {
2349 fprintf (asm_out_file, "\t.long\t");
2350 output_addr_const (asm_out_file, x);
2351 fputc ('\n', asm_out_file);
2352 return true;
2353 }
2354 break;
38b2d076
DD
2355 }
2356 return default_assemble_integer (x, size, aligned_p);
2357}
2358
2359/* Output of Assembler Instructions */
2360
a4174ebf 2361/* We use a lookup table because the addressing modes are non-orthogonal. */
38b2d076
DD
2362
2363static struct
2364{
2365 char code;
2366 char const *pattern;
2367 char const *format;
2368}
2369const conversions[] = {
2370 { 0, "r", "0" },
2371
2372 { 0, "mr", "z[1]" },
2373 { 0, "m+ri", "3[2]" },
2374 { 0, "m+rs", "3[2]" },
5fd5d713
DD
2375 { 0, "m+^Zrs", "5[4]" },
2376 { 0, "m+^Zri", "5[4]" },
2377 { 0, "m+^Z+ris", "7+6[5]" },
2378 { 0, "m+^Srs", "5[4]" },
2379 { 0, "m+^Sri", "5[4]" },
2380 { 0, "m+^S+ris", "7+6[5]" },
38b2d076
DD
2381 { 0, "m+r+si", "4+5[2]" },
2382 { 0, "ms", "1" },
2383 { 0, "mi", "1" },
2384 { 0, "m+si", "2+3" },
2385
2386 { 0, "mmr", "[z[2]]" },
2387 { 0, "mm+ri", "[4[3]]" },
2388 { 0, "mm+rs", "[4[3]]" },
2389 { 0, "mm+r+si", "[5+6[3]]" },
2390 { 0, "mms", "[[2]]" },
2391 { 0, "mmi", "[[2]]" },
2392 { 0, "mm+si", "[4[3]]" },
2393
2394 { 0, "i", "#0" },
2395 { 0, "s", "#0" },
2396 { 0, "+si", "#1+2" },
2397 { 0, "l", "#0" },
2398
2399 { 'l', "l", "0" },
2400 { 'd', "i", "0" },
2401 { 'd', "s", "0" },
2402 { 'd', "+si", "1+2" },
2403 { 'D', "i", "0" },
2404 { 'D', "s", "0" },
2405 { 'D', "+si", "1+2" },
2406 { 'x', "i", "#0" },
2407 { 'X', "i", "#0" },
2408 { 'm', "i", "#0" },
2409 { 'b', "i", "#0" },
07127a0a 2410 { 'B', "i", "0" },
38b2d076
DD
2411 { 'p', "i", "0" },
2412
2413 { 0, 0, 0 }
2414};
2415
2416/* This is in order according to the bitfield that pushm/popm use. */
2417static char const *pushm_regs[] = {
2418 "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2419};
2420
4645179e
AS
2421/* Implements TARGET_PRINT_OPERAND. */
2422
2423#undef TARGET_PRINT_OPERAND
2424#define TARGET_PRINT_OPERAND m32c_print_operand
2425
2426static void
38b2d076
DD
2427m32c_print_operand (FILE * file, rtx x, int code)
2428{
2429 int i, j, b;
2430 const char *comma;
2431 HOST_WIDE_INT ival;
2432 int unsigned_const = 0;
ff485e71 2433 int force_sign;
38b2d076
DD
2434
2435 /* Multiplies; constants are converted to sign-extended format but
2436 we need unsigned, so 'u' and 'U' tell us what size unsigned we
2437 need. */
2438 if (code == 'u')
2439 {
2440 unsigned_const = 2;
2441 code = 0;
2442 }
2443 if (code == 'U')
2444 {
2445 unsigned_const = 1;
2446 code = 0;
2447 }
2448 /* This one is only for debugging; you can put it in a pattern to
2449 force this error. */
2450 if (code == '!')
2451 {
2452 fprintf (stderr, "dj: unreviewed pattern:");
2453 if (current_output_insn)
2454 debug_rtx (current_output_insn);
2455 gcc_unreachable ();
2456 }
2457 /* PSImode operations are either .w or .l depending on the target. */
2458 if (code == '&')
2459 {
2460 if (TARGET_A16)
2461 fprintf (file, "w");
2462 else
2463 fprintf (file, "l");
2464 return;
2465 }
2466 /* Inverted conditionals. */
2467 if (code == 'C')
2468 {
2469 switch (GET_CODE (x))
2470 {
2471 case LE:
2472 fputs ("gt", file);
2473 break;
2474 case LEU:
2475 fputs ("gtu", file);
2476 break;
2477 case LT:
2478 fputs ("ge", file);
2479 break;
2480 case LTU:
2481 fputs ("geu", file);
2482 break;
2483 case GT:
2484 fputs ("le", file);
2485 break;
2486 case GTU:
2487 fputs ("leu", file);
2488 break;
2489 case GE:
2490 fputs ("lt", file);
2491 break;
2492 case GEU:
2493 fputs ("ltu", file);
2494 break;
2495 case NE:
2496 fputs ("eq", file);
2497 break;
2498 case EQ:
2499 fputs ("ne", file);
2500 break;
2501 default:
2502 gcc_unreachable ();
2503 }
2504 return;
2505 }
2506 /* Regular conditionals. */
2507 if (code == 'c')
2508 {
2509 switch (GET_CODE (x))
2510 {
2511 case LE:
2512 fputs ("le", file);
2513 break;
2514 case LEU:
2515 fputs ("leu", file);
2516 break;
2517 case LT:
2518 fputs ("lt", file);
2519 break;
2520 case LTU:
2521 fputs ("ltu", file);
2522 break;
2523 case GT:
2524 fputs ("gt", file);
2525 break;
2526 case GTU:
2527 fputs ("gtu", file);
2528 break;
2529 case GE:
2530 fputs ("ge", file);
2531 break;
2532 case GEU:
2533 fputs ("geu", file);
2534 break;
2535 case NE:
2536 fputs ("ne", file);
2537 break;
2538 case EQ:
2539 fputs ("eq", file);
2540 break;
2541 default:
2542 gcc_unreachable ();
2543 }
2544 return;
2545 }
2546 /* Used in negsi2 to do HImode ops on the two parts of an SImode
2547 operand. */
2548 if (code == 'h' && GET_MODE (x) == SImode)
2549 {
2550 x = m32c_subreg (HImode, x, SImode, 0);
2551 code = 0;
2552 }
2553 if (code == 'H' && GET_MODE (x) == SImode)
2554 {
2555 x = m32c_subreg (HImode, x, SImode, 2);
2556 code = 0;
2557 }
07127a0a
DD
2558 if (code == 'h' && GET_MODE (x) == HImode)
2559 {
2560 x = m32c_subreg (QImode, x, HImode, 0);
2561 code = 0;
2562 }
2563 if (code == 'H' && GET_MODE (x) == HImode)
2564 {
2565 /* We can't actually represent this as an rtx. Do it here. */
2566 if (GET_CODE (x) == REG)
2567 {
2568 switch (REGNO (x))
2569 {
2570 case R0_REGNO:
2571 fputs ("r0h", file);
2572 return;
2573 case R1_REGNO:
2574 fputs ("r1h", file);
2575 return;
2576 default:
2577 gcc_unreachable();
2578 }
2579 }
2580 /* This should be a MEM. */
2581 x = m32c_subreg (QImode, x, HImode, 1);
2582 code = 0;
2583 }
2584 /* This is for BMcond, which always wants word register names. */
2585 if (code == 'h' && GET_MODE (x) == QImode)
2586 {
2587 if (GET_CODE (x) == REG)
2588 x = gen_rtx_REG (HImode, REGNO (x));
2589 code = 0;
2590 }
38b2d076
DD
2591 /* 'x' and 'X' need to be ignored for non-immediates. */
2592 if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2593 code = 0;
2594
2595 encode_pattern (x);
ff485e71 2596 force_sign = 0;
38b2d076
DD
2597 for (i = 0; conversions[i].pattern; i++)
2598 if (conversions[i].code == code
2599 && streq (conversions[i].pattern, pattern))
2600 {
2601 for (j = 0; conversions[i].format[j]; j++)
2602 /* backslash quotes the next character in the output pattern. */
2603 if (conversions[i].format[j] == '\\')
2604 {
2605 fputc (conversions[i].format[j + 1], file);
2606 j++;
2607 }
2608 /* Digits in the output pattern indicate that the
2609 corresponding RTX is to be output at that point. */
2610 else if (ISDIGIT (conversions[i].format[j]))
2611 {
2612 rtx r = patternr[conversions[i].format[j] - '0'];
2613 switch (GET_CODE (r))
2614 {
2615 case REG:
2616 fprintf (file, "%s",
2617 reg_name_with_mode (REGNO (r), GET_MODE (r)));
2618 break;
2619 case CONST_INT:
2620 switch (code)
2621 {
2622 case 'b':
07127a0a
DD
2623 case 'B':
2624 {
2625 int v = INTVAL (r);
2626 int i = (int) exact_log2 (v);
2627 if (i == -1)
2628 i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2629 if (i == -1)
2630 i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2631 /* Bit position. */
2632 fprintf (file, "%d", i);
2633 }
38b2d076
DD
2634 break;
2635 case 'x':
2636 /* Unsigned byte. */
2637 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2638 INTVAL (r) & 0xff);
2639 break;
2640 case 'X':
2641 /* Unsigned word. */
2642 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2643 INTVAL (r) & 0xffff);
2644 break;
2645 case 'p':
2646 /* pushm and popm encode a register set into a single byte. */
2647 comma = "";
2648 for (b = 7; b >= 0; b--)
2649 if (INTVAL (r) & (1 << b))
2650 {
2651 fprintf (file, "%s%s", comma, pushm_regs[b]);
2652 comma = ",";
2653 }
2654 break;
2655 case 'm':
2656 /* "Minus". Output -X */
2657 ival = (-INTVAL (r) & 0xffff);
2658 if (ival & 0x8000)
2659 ival = ival - 0x10000;
2660 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2661 break;
2662 default:
2663 ival = INTVAL (r);
2664 if (conversions[i].format[j + 1] == '[' && ival < 0)
2665 {
2666 /* We can simulate negative displacements by
2667 taking advantage of address space
2668 wrapping when the offset can span the
2669 entire address range. */
2670 rtx base =
2671 patternr[conversions[i].format[j + 2] - '0'];
2672 if (GET_CODE (base) == REG)
2673 switch (REGNO (base))
2674 {
2675 case A0_REGNO:
2676 case A1_REGNO:
2677 if (TARGET_A24)
2678 ival = 0x1000000 + ival;
2679 else
2680 ival = 0x10000 + ival;
2681 break;
2682 case SB_REGNO:
2683 if (TARGET_A16)
2684 ival = 0x10000 + ival;
2685 break;
2686 }
2687 }
2688 else if (code == 'd' && ival < 0 && j == 0)
2689 /* The "mova" opcode is used to do addition by
2690 computing displacements, but again, we need
2691 displacements to be unsigned *if* they're
2692 the only component of the displacement
2693 (i.e. no "symbol-4" type displacement). */
2694 ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2695
2696 if (conversions[i].format[j] == '0')
2697 {
2698 /* More conversions to unsigned. */
2699 if (unsigned_const == 2)
2700 ival &= 0xffff;
2701 if (unsigned_const == 1)
2702 ival &= 0xff;
2703 }
2704 if (streq (conversions[i].pattern, "mi")
2705 || streq (conversions[i].pattern, "mmi"))
2706 {
2707 /* Integers used as addresses are unsigned. */
2708 ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2709 }
ff485e71
DD
2710 if (force_sign && ival >= 0)
2711 fputc ('+', file);
38b2d076
DD
2712 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2713 break;
2714 }
2715 break;
2716 case CONST_DOUBLE:
2717 /* We don't have const_double constants. If it
2718 happens, make it obvious. */
2719 fprintf (file, "[const_double 0x%lx]",
2720 (unsigned long) CONST_DOUBLE_HIGH (r));
2721 break;
2722 case SYMBOL_REF:
2723 assemble_name (file, XSTR (r, 0));
2724 break;
2725 case LABEL_REF:
2726 output_asm_label (r);
2727 break;
2728 default:
2729 fprintf (stderr, "don't know how to print this operand:");
2730 debug_rtx (r);
2731 gcc_unreachable ();
2732 }
2733 }
2734 else
2735 {
2736 if (conversions[i].format[j] == 'z')
2737 {
2738 /* Some addressing modes *must* have a displacement,
2739 so insert a zero here if needed. */
2740 int k;
2741 for (k = j + 1; conversions[i].format[k]; k++)
2742 if (ISDIGIT (conversions[i].format[k]))
2743 {
2744 rtx reg = patternr[conversions[i].format[k] - '0'];
2745 if (GET_CODE (reg) == REG
2746 && (REGNO (reg) == SB_REGNO
2747 || REGNO (reg) == FB_REGNO
2748 || REGNO (reg) == SP_REGNO))
2749 fputc ('0', file);
2750 }
2751 continue;
2752 }
2753 /* Signed displacements off symbols need to have signs
2754 blended cleanly. */
2755 if (conversions[i].format[j] == '+'
ff485e71 2756 && (!code || code == 'D' || code == 'd')
38b2d076 2757 && ISDIGIT (conversions[i].format[j + 1])
ff485e71
DD
2758 && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2759 == CONST_INT))
2760 {
2761 force_sign = 1;
2762 continue;
2763 }
38b2d076
DD
2764 fputc (conversions[i].format[j], file);
2765 }
2766 break;
2767 }
2768 if (!conversions[i].pattern)
2769 {
2770 fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
2771 pattern);
2772 debug_rtx (x);
2773 fprintf (file, "[%c.%s]", code ? code : '-', pattern);
2774 }
2775
2776 return;
2777}
2778
4645179e
AS
2779/* Implements TARGET_PRINT_OPERAND_PUNCT_VALID_P.
2780
2781 See m32c_print_operand above for descriptions of what these do. */
2782
2783#undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
2784#define TARGET_PRINT_OPERAND_PUNCT_VALID_P m32c_print_operand_punct_valid_p
2785
2786static bool
2787m32c_print_operand_punct_valid_p (unsigned char c)
38b2d076
DD
2788{
2789 if (c == '&' || c == '!')
4645179e
AS
2790 return true;
2791
2792 return false;
38b2d076
DD
2793}
2794
4645179e
AS
2795/* Implements TARGET_PRINT_OPERAND_ADDRESS. Nothing unusual here. */
2796
2797#undef TARGET_PRINT_OPERAND_ADDRESS
2798#define TARGET_PRINT_OPERAND_ADDRESS m32c_print_operand_address
2799
2800static void
cc8ca59e 2801m32c_print_operand_address (FILE * stream, machine_mode /*mode*/, rtx address)
38b2d076 2802{
235e1fe8
NC
2803 if (GET_CODE (address) == MEM)
2804 address = XEXP (address, 0);
2805 else
2806 /* cf: gcc.dg/asm-4.c. */
2807 gcc_assert (GET_CODE (address) == REG);
2808
2809 m32c_print_operand (stream, address, 0);
38b2d076
DD
2810}
2811
2812/* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
2813 differently than general registers. */
2814void
2815m32c_output_reg_push (FILE * s, int regno)
2816{
2817 if (regno == FLG_REGNO)
2818 fprintf (s, "\tpushc\tflg\n");
2819 else
04aff2c0 2820 fprintf (s, "\tpush.%c\t%s\n",
38b2d076
DD
2821 " bwll"[reg_push_size (regno)], reg_names[regno]);
2822}
2823
2824/* Likewise for ASM_OUTPUT_REG_POP. */
2825void
2826m32c_output_reg_pop (FILE * s, int regno)
2827{
2828 if (regno == FLG_REGNO)
2829 fprintf (s, "\tpopc\tflg\n");
2830 else
04aff2c0 2831 fprintf (s, "\tpop.%c\t%s\n",
38b2d076
DD
2832 " bwll"[reg_push_size (regno)], reg_names[regno]);
2833}
2834
2835/* Defining target-specific uses of `__attribute__' */
2836
2837/* Used to simplify the logic below. Find the attributes wherever
2838 they may be. */
2839#define M32C_ATTRIBUTES(decl) \
2840 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2841 : DECL_ATTRIBUTES (decl) \
2842 ? (DECL_ATTRIBUTES (decl)) \
2843 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2844
2845/* Returns TRUE if the given tree has the "interrupt" attribute. */
2846static int
2847interrupt_p (tree node ATTRIBUTE_UNUSED)
2848{
2849 tree list = M32C_ATTRIBUTES (node);
2850 while (list)
2851 {
2852 if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
2853 return 1;
2854 list = TREE_CHAIN (list);
2855 }
65655f79
DD
2856 return fast_interrupt_p (node);
2857}
2858
2859/* Returns TRUE if the given tree has the "bank_switch" attribute. */
2860static int
2861bank_switch_p (tree node ATTRIBUTE_UNUSED)
2862{
2863 tree list = M32C_ATTRIBUTES (node);
2864 while (list)
2865 {
2866 if (is_attribute_p ("bank_switch", TREE_PURPOSE (list)))
2867 return 1;
2868 list = TREE_CHAIN (list);
2869 }
2870 return 0;
2871}
2872
2873/* Returns TRUE if the given tree has the "fast_interrupt" attribute. */
2874static int
2875fast_interrupt_p (tree node ATTRIBUTE_UNUSED)
2876{
2877 tree list = M32C_ATTRIBUTES (node);
2878 while (list)
2879 {
2880 if (is_attribute_p ("fast_interrupt", TREE_PURPOSE (list)))
2881 return 1;
2882 list = TREE_CHAIN (list);
2883 }
38b2d076
DD
2884 return 0;
2885}
2886
2887static tree
2888interrupt_handler (tree * node ATTRIBUTE_UNUSED,
2889 tree name ATTRIBUTE_UNUSED,
2890 tree args ATTRIBUTE_UNUSED,
2891 int flags ATTRIBUTE_UNUSED,
2892 bool * no_add_attrs ATTRIBUTE_UNUSED)
2893{
2894 return NULL_TREE;
2895}
2896
5abd2125
JS
2897/* Returns TRUE if given tree has the "function_vector" attribute. */
2898int
2899m32c_special_page_vector_p (tree func)
2900{
653e2568
DD
2901 tree list;
2902
5abd2125
JS
2903 if (TREE_CODE (func) != FUNCTION_DECL)
2904 return 0;
2905
653e2568 2906 list = M32C_ATTRIBUTES (func);
5abd2125
JS
2907 while (list)
2908 {
2909 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2910 return 1;
2911 list = TREE_CHAIN (list);
2912 }
2913 return 0;
2914}
2915
2916static tree
2917function_vector_handler (tree * node ATTRIBUTE_UNUSED,
2918 tree name ATTRIBUTE_UNUSED,
2919 tree args ATTRIBUTE_UNUSED,
2920 int flags ATTRIBUTE_UNUSED,
2921 bool * no_add_attrs ATTRIBUTE_UNUSED)
2922{
2923 if (TARGET_R8C)
2924 {
2925 /* The attribute is not supported for R8C target. */
2926 warning (OPT_Wattributes,
29d08eba
JM
2927 "%qE attribute is not supported for R8C target",
2928 name);
5abd2125
JS
2929 *no_add_attrs = true;
2930 }
2931 else if (TREE_CODE (*node) != FUNCTION_DECL)
2932 {
2933 /* The attribute must be applied to functions only. */
2934 warning (OPT_Wattributes,
29d08eba
JM
2935 "%qE attribute applies only to functions",
2936 name);
5abd2125
JS
2937 *no_add_attrs = true;
2938 }
2939 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
2940 {
2941 /* The argument must be a constant integer. */
2942 warning (OPT_Wattributes,
29d08eba
JM
2943 "%qE attribute argument not an integer constant",
2944 name);
5abd2125
JS
2945 *no_add_attrs = true;
2946 }
2947 else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
2948 || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
2949 {
2950 /* The argument value must be between 18 to 255. */
2951 warning (OPT_Wattributes,
29d08eba
JM
2952 "%qE attribute argument should be between 18 to 255",
2953 name);
5abd2125
JS
2954 *no_add_attrs = true;
2955 }
2956 return NULL_TREE;
2957}
2958
2959/* If the function is assigned the attribute 'function_vector', it
2960 returns the function vector number, otherwise returns zero. */
2961int
2962current_function_special_page_vector (rtx x)
2963{
2964 int num;
2965
2966 if ((GET_CODE(x) == SYMBOL_REF)
2967 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
2968 {
653e2568 2969 tree list;
5abd2125
JS
2970 tree t = SYMBOL_REF_DECL (x);
2971
2972 if (TREE_CODE (t) != FUNCTION_DECL)
2973 return 0;
2974
653e2568 2975 list = M32C_ATTRIBUTES (t);
5abd2125
JS
2976 while (list)
2977 {
2978 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2979 {
2980 num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
2981 return num;
2982 }
2983
2984 list = TREE_CHAIN (list);
2985 }
2986
2987 return 0;
2988 }
2989 else
2990 return 0;
2991}
2992
38b2d076
DD
2993#undef TARGET_ATTRIBUTE_TABLE
2994#define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
2995static const struct attribute_spec m32c_attribute_table[] = {
4849deb1
JJ
2996 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
2997 affects_type_identity, handler, exclude } */
2998 { "interrupt", 0, 0, false, false, false, false, interrupt_handler, NULL },
2999 { "bank_switch", 0, 0, false, false, false, false, interrupt_handler, NULL },
3000 { "fast_interrupt", 0, 0, false, false, false, false,
3001 interrupt_handler, NULL },
3002 { "function_vector", 1, 1, true, false, false, false,
3003 function_vector_handler, NULL },
3004 { NULL, 0, 0, false, false, false, false, NULL, NULL }
38b2d076
DD
3005};
3006
3007#undef TARGET_COMP_TYPE_ATTRIBUTES
3008#define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
3009static int
3101faab
KG
3010m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED,
3011 const_tree type2 ATTRIBUTE_UNUSED)
38b2d076
DD
3012{
3013 /* 0=incompatible 1=compatible 2=warning */
3014 return 1;
3015}
3016
3017#undef TARGET_INSERT_ATTRIBUTES
3018#define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
3019static void
3020m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
3021 tree * attr_ptr ATTRIBUTE_UNUSED)
3022{
f6052f86
DD
3023 unsigned addr;
3024 /* See if we need to make #pragma address variables volatile. */
3025
3026 if (TREE_CODE (node) == VAR_DECL)
3027 {
444d6efe 3028 const char *name = IDENTIFIER_POINTER (DECL_NAME (node));
f6052f86
DD
3029 if (m32c_get_pragma_address (name, &addr))
3030 {
3031 TREE_THIS_VOLATILE (node) = true;
3032 }
3033 }
3034}
3035
f6052f86 3036/* Hash table of pragma info. */
fb5c464a 3037static GTY(()) hash_map<nofree_string_hash, unsigned> *pragma_htab;
f6052f86
DD
3038
3039void
3040m32c_note_pragma_address (const char *varname, unsigned address)
3041{
f6052f86 3042 if (!pragma_htab)
fb5c464a 3043 pragma_htab = hash_map<nofree_string_hash, unsigned>::create_ggc (31);
f6052f86 3044
2a22f99c
TS
3045 const char *name = ggc_strdup (varname);
3046 unsigned int *slot = &pragma_htab->get_or_insert (name);
3047 *slot = address;
f6052f86
DD
3048}
3049
3050static bool
3051m32c_get_pragma_address (const char *varname, unsigned *address)
3052{
f6052f86
DD
3053 if (!pragma_htab)
3054 return false;
3055
2a22f99c
TS
3056 unsigned int *slot = pragma_htab->get (varname);
3057 if (slot)
f6052f86 3058 {
2a22f99c 3059 *address = *slot;
f6052f86
DD
3060 return true;
3061 }
3062 return false;
3063}
3064
3065void
444d6efe
JR
3066m32c_output_aligned_common (FILE *stream, tree decl ATTRIBUTE_UNUSED,
3067 const char *name,
f6052f86
DD
3068 int size, int align, int global)
3069{
3070 unsigned address;
3071
3072 if (m32c_get_pragma_address (name, &address))
3073 {
3074 /* We never output these as global. */
3075 assemble_name (stream, name);
3076 fprintf (stream, " = 0x%04x\n", address);
3077 return;
3078 }
3079 if (!global)
3080 {
3081 fprintf (stream, "\t.local\t");
3082 assemble_name (stream, name);
3083 fprintf (stream, "\n");
3084 }
3085 fprintf (stream, "\t.comm\t");
3086 assemble_name (stream, name);
3087 fprintf (stream, ",%u,%u\n", size, align / BITS_PER_UNIT);
38b2d076
DD
3088}
3089
3090/* Predicates */
3091
f9b89438 3092/* This is a list of legal subregs of hard regs. */
67fc44cb
DD
3093static const struct {
3094 unsigned char outer_mode_size;
3095 unsigned char inner_mode_size;
3096 unsigned char byte_mask;
3097 unsigned char legal_when;
f9b89438 3098 unsigned int regno;
f9b89438 3099} legal_subregs[] = {
67fc44cb
DD
3100 {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
3101 {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
3102 {1, 2, 0x01, 1, A0_REGNO},
3103 {1, 2, 0x01, 1, A1_REGNO},
f9b89438 3104
67fc44cb
DD
3105 {1, 4, 0x01, 1, A0_REGNO},
3106 {1, 4, 0x01, 1, A1_REGNO},
f9b89438 3107
67fc44cb
DD
3108 {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
3109 {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
3110 {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
3111 {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
3112 {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
f9b89438 3113
67fc44cb 3114 {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
f9b89438
DD
3115};
3116
3117/* Returns TRUE if OP is a subreg of a hard reg which we don't
f6052f86 3118 support. We also bail on MEMs with illegal addresses. */
f9b89438
DD
3119bool
3120m32c_illegal_subreg_p (rtx op)
3121{
f9b89438
DD
3122 int offset;
3123 unsigned int i;
ef4bddc2 3124 machine_mode src_mode, dest_mode;
f9b89438 3125
f6052f86
DD
3126 if (GET_CODE (op) == MEM
3127 && ! m32c_legitimate_address_p (Pmode, XEXP (op, 0), false))
3128 {
3129 return true;
3130 }
3131
f9b89438
DD
3132 if (GET_CODE (op) != SUBREG)
3133 return false;
3134
3135 dest_mode = GET_MODE (op);
3136 offset = SUBREG_BYTE (op);
3137 op = SUBREG_REG (op);
3138 src_mode = GET_MODE (op);
3139
3140 if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
3141 return false;
3142 if (GET_CODE (op) != REG)
3143 return false;
3144 if (REGNO (op) >= MEM0_REGNO)
3145 return false;
3146
3147 offset = (1 << offset);
3148
67fc44cb 3149 for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
f9b89438
DD
3150 if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
3151 && legal_subregs[i].regno == REGNO (op)
3152 && legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
3153 && legal_subregs[i].byte_mask & offset)
3154 {
3155 switch (legal_subregs[i].legal_when)
3156 {
3157 case 1:
3158 return false;
3159 case 16:
3160 if (TARGET_A16)
3161 return false;
3162 break;
3163 case 24:
3164 if (TARGET_A24)
3165 return false;
3166 break;
3167 }
3168 }
3169 return true;
3170}
3171
38b2d076
DD
3172/* Returns TRUE if we support a move between the first two operands.
3173 At the moment, we just want to discourage mem to mem moves until
3174 after reload, because reload has a hard time with our limited
3175 number of address registers, and we can get into a situation where
3176 we need three of them when we only have two. */
3177bool
ef4bddc2 3178m32c_mov_ok (rtx * operands, machine_mode mode ATTRIBUTE_UNUSED)
38b2d076
DD
3179{
3180 rtx op0 = operands[0];
3181 rtx op1 = operands[1];
3182
3183 if (TARGET_A24)
3184 return true;
3185
3186#define DEBUG_MOV_OK 0
3187#if DEBUG_MOV_OK
3188 fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
3189 debug_rtx (op0);
3190 debug_rtx (op1);
3191#endif
3192
3193 if (GET_CODE (op0) == SUBREG)
3194 op0 = XEXP (op0, 0);
3195 if (GET_CODE (op1) == SUBREG)
3196 op1 = XEXP (op1, 0);
3197
3198 if (GET_CODE (op0) == MEM
3199 && GET_CODE (op1) == MEM
3200 && ! reload_completed)
3201 {
3202#if DEBUG_MOV_OK
3203 fprintf (stderr, " - no, mem to mem\n");
3204#endif
3205 return false;
3206 }
3207
3208#if DEBUG_MOV_OK
3209 fprintf (stderr, " - ok\n");
3210#endif
3211 return true;
3212}
3213
ff485e71
DD
3214/* Returns TRUE if two consecutive HImode mov instructions, generated
3215 for moving an immediate double data to a double data type variable
3216 location, can be combined into single SImode mov instruction. */
3217bool
55356334 3218m32c_immd_dbl_mov (rtx * operands ATTRIBUTE_UNUSED,
ef4bddc2 3219 machine_mode mode ATTRIBUTE_UNUSED)
ff485e71 3220{
55356334
RS
3221 /* ??? This relied on the now-defunct MEM_SCALAR and MEM_IN_STRUCT_P
3222 flags. */
ff485e71
DD
3223 return false;
3224}
3225
38b2d076
DD
3226/* Expanders */
3227
3228/* Subregs are non-orthogonal for us, because our registers are all
3229 different sizes. */
3230static rtx
ef4bddc2
RS
3231m32c_subreg (machine_mode outer,
3232 rtx x, machine_mode inner, int byte)
38b2d076
DD
3233{
3234 int r, nr = -1;
3235
3236 /* Converting MEMs to different types that are the same size, we
3237 just rewrite them. */
3238 if (GET_CODE (x) == SUBREG
3239 && SUBREG_BYTE (x) == 0
3240 && GET_CODE (SUBREG_REG (x)) == MEM
3241 && (GET_MODE_SIZE (GET_MODE (x))
3242 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
3243 {
3244 rtx oldx = x;
3245 x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
3246 MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
3247 }
3248
3249 /* Push/pop get done as smaller push/pops. */
3250 if (GET_CODE (x) == MEM
3251 && (GET_CODE (XEXP (x, 0)) == PRE_DEC
3252 || GET_CODE (XEXP (x, 0)) == POST_INC))
3253 return gen_rtx_MEM (outer, XEXP (x, 0));
3254 if (GET_CODE (x) == SUBREG
3255 && GET_CODE (XEXP (x, 0)) == MEM
3256 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
3257 || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
3258 return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
3259
3260 if (GET_CODE (x) != REG)
146456c1
DD
3261 {
3262 rtx r = simplify_gen_subreg (outer, x, inner, byte);
3263 if (GET_CODE (r) == SUBREG
3264 && GET_CODE (x) == MEM
3265 && MEM_VOLATILE_P (x))
3266 {
3267 /* Volatile MEMs don't get simplified, but we need them to
3268 be. We are little endian, so the subreg byte is the
3269 offset. */
91140cd3 3270 r = adjust_address_nv (x, outer, byte);
146456c1
DD
3271 }
3272 return r;
3273 }
38b2d076
DD
3274
3275 r = REGNO (x);
3276 if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
3277 return simplify_gen_subreg (outer, x, inner, byte);
3278
3279 if (IS_MEM_REGNO (r))
3280 return simplify_gen_subreg (outer, x, inner, byte);
3281
3282 /* This is where the complexities of our register layout are
3283 described. */
3284 if (byte == 0)
3285 nr = r;
3286 else if (outer == HImode)
3287 {
3288 if (r == R0_REGNO && byte == 2)
3289 nr = R2_REGNO;
3290 else if (r == R0_REGNO && byte == 4)
3291 nr = R1_REGNO;
3292 else if (r == R0_REGNO && byte == 6)
3293 nr = R3_REGNO;
3294 else if (r == R1_REGNO && byte == 2)
3295 nr = R3_REGNO;
3296 else if (r == A0_REGNO && byte == 2)
3297 nr = A1_REGNO;
3298 }
3299 else if (outer == SImode)
3300 {
3301 if (r == R0_REGNO && byte == 0)
3302 nr = R0_REGNO;
3303 else if (r == R0_REGNO && byte == 4)
3304 nr = R1_REGNO;
3305 }
3306 if (nr == -1)
3307 {
3308 fprintf (stderr, "m32c_subreg %s %s %d\n",
3309 mode_name[outer], mode_name[inner], byte);
3310 debug_rtx (x);
3311 gcc_unreachable ();
3312 }
3313 return gen_rtx_REG (outer, nr);
3314}
3315
3316/* Used to emit move instructions. We split some moves,
3317 and avoid mem-mem moves. */
3318int
ef4bddc2 3319m32c_prepare_move (rtx * operands, machine_mode mode)
38b2d076 3320{
5fd5d713
DD
3321 if (far_addr_space_p (operands[0])
3322 && CONSTANT_P (operands[1]))
3323 {
3324 operands[1] = force_reg (GET_MODE (operands[0]), operands[1]);
3325 }
38b2d076
DD
3326 if (TARGET_A16 && mode == PSImode)
3327 return m32c_split_move (operands, mode, 1);
3328 if ((GET_CODE (operands[0]) == MEM)
3329 && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
3330 {
3331 rtx pmv = XEXP (operands[0], 0);
3332 rtx dest_reg = XEXP (pmv, 0);
3333 rtx dest_mod = XEXP (pmv, 1);
3334
f7df4a84 3335 emit_insn (gen_rtx_SET (dest_reg, dest_mod));
38b2d076
DD
3336 operands[0] = gen_rtx_MEM (mode, dest_reg);
3337 }
b3a13419 3338 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
38b2d076
DD
3339 operands[1] = copy_to_mode_reg (mode, operands[1]);
3340 return 0;
3341}
3342
3343#define DEBUG_SPLIT 0
3344
3345/* Returns TRUE if the given PSImode move should be split. We split
3346 for all r8c/m16c moves, since it doesn't support them, and for
3347 POP.L as we can only *push* SImode. */
3348int
3349m32c_split_psi_p (rtx * operands)
3350{
3351#if DEBUG_SPLIT
3352 fprintf (stderr, "\nm32c_split_psi_p\n");
3353 debug_rtx (operands[0]);
3354 debug_rtx (operands[1]);
3355#endif
3356 if (TARGET_A16)
3357 {
3358#if DEBUG_SPLIT
3359 fprintf (stderr, "yes, A16\n");
3360#endif
3361 return 1;
3362 }
3363 if (GET_CODE (operands[1]) == MEM
3364 && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3365 {
3366#if DEBUG_SPLIT
3367 fprintf (stderr, "yes, pop.l\n");
3368#endif
3369 return 1;
3370 }
3371#if DEBUG_SPLIT
3372 fprintf (stderr, "no, default\n");
3373#endif
3374 return 0;
3375}
3376
3377/* Split the given move. SPLIT_ALL is 0 if splitting is optional
3378 (define_expand), 1 if it is not optional (define_insn_and_split),
3379 and 3 for define_split (alternate api). */
3380int
ef4bddc2 3381m32c_split_move (rtx * operands, machine_mode mode, int split_all)
38b2d076
DD
3382{
3383 rtx s[4], d[4];
3384 int parts, si, di, rev = 0;
3385 int rv = 0, opi = 2;
ef4bddc2 3386 machine_mode submode = HImode;
38b2d076
DD
3387 rtx *ops, local_ops[10];
3388
3389 /* define_split modifies the existing operands, but the other two
3390 emit new insns. OPS is where we store the operand pairs, which
3391 we emit later. */
3392 if (split_all == 3)
3393 ops = operands;
3394 else
3395 ops = local_ops;
3396
3397 /* Else HImode. */
3398 if (mode == DImode)
3399 submode = SImode;
3400
3401 /* Before splitting mem-mem moves, force one operand into a
3402 register. */
b3a13419 3403 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
38b2d076
DD
3404 {
3405#if DEBUG0
3406 fprintf (stderr, "force_reg...\n");
3407 debug_rtx (operands[1]);
3408#endif
3409 operands[1] = force_reg (mode, operands[1]);
3410#if DEBUG0
3411 debug_rtx (operands[1]);
3412#endif
3413 }
3414
3415 parts = 2;
3416
3417#if DEBUG_SPLIT
b3a13419
ILT
3418 fprintf (stderr, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3419 split_all);
38b2d076
DD
3420 debug_rtx (operands[0]);
3421 debug_rtx (operands[1]);
3422#endif
3423
eb5f0c07
DD
3424 /* Note that split_all is not used to select the api after this
3425 point, so it's safe to set it to 3 even with define_insn. */
3426 /* None of the chips can move SI operands to sp-relative addresses,
3427 so we always split those. */
03dd17b1 3428 if (satisfies_constraint_Ss (operands[0]))
eb5f0c07
DD
3429 split_all = 3;
3430
5fd5d713
DD
3431 if (TARGET_A16
3432 && (far_addr_space_p (operands[0])
3433 || far_addr_space_p (operands[1])))
3434 split_all |= 1;
3435
38b2d076
DD
3436 /* We don't need to split these. */
3437 if (TARGET_A24
3438 && split_all != 3
3439 && (mode == SImode || mode == PSImode)
3440 && !(GET_CODE (operands[1]) == MEM
3441 && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3442 return 0;
3443
3444 /* First, enumerate the subregs we'll be dealing with. */
3445 for (si = 0; si < parts; si++)
3446 {
3447 d[si] =
3448 m32c_subreg (submode, operands[0], mode,
3449 si * GET_MODE_SIZE (submode));
3450 s[si] =
3451 m32c_subreg (submode, operands[1], mode,
3452 si * GET_MODE_SIZE (submode));
3453 }
3454
3455 /* Split pushes by emitting a sequence of smaller pushes. */
3456 if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3457 {
3458 for (si = parts - 1; si >= 0; si--)
3459 {
3460 ops[opi++] = gen_rtx_MEM (submode,
3461 gen_rtx_PRE_DEC (Pmode,
3462 gen_rtx_REG (Pmode,
3463 SP_REGNO)));
3464 ops[opi++] = s[si];
3465 }
3466
3467 rv = 1;
3468 }
3469 /* Likewise for pops. */
3470 else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3471 {
3472 for (di = 0; di < parts; di++)
3473 {
3474 ops[opi++] = d[di];
3475 ops[opi++] = gen_rtx_MEM (submode,
3476 gen_rtx_POST_INC (Pmode,
3477 gen_rtx_REG (Pmode,
3478 SP_REGNO)));
3479 }
3480 rv = 1;
3481 }
3482 else if (split_all)
3483 {
3484 /* if d[di] == s[si] for any di < si, we'll early clobber. */
3485 for (di = 0; di < parts - 1; di++)
3486 for (si = di + 1; si < parts; si++)
3487 if (reg_mentioned_p (d[di], s[si]))
3488 rev = 1;
3489
3490 if (rev)
3491 for (si = 0; si < parts; si++)
3492 {
3493 ops[opi++] = d[si];
3494 ops[opi++] = s[si];
3495 }
3496 else
3497 for (si = parts - 1; si >= 0; si--)
3498 {
3499 ops[opi++] = d[si];
3500 ops[opi++] = s[si];
3501 }
3502 rv = 1;
3503 }
3504 /* Now emit any moves we may have accumulated. */
3505 if (rv && split_all != 3)
3506 {
3507 int i;
3508 for (i = 2; i < opi; i += 2)
3509 emit_move_insn (ops[i], ops[i + 1]);
3510 }
3511 return rv;
3512}
3513
07127a0a
DD
3514/* The m32c has a number of opcodes that act like memcpy, strcmp, and
3515 the like. For the R8C they expect one of the addresses to be in
3516 R1L:An so we need to arrange for that. Otherwise, it's just a
3517 matter of picking out the operands we want and emitting the right
3518 pattern for them. All these expanders, which correspond to
3519 patterns in blkmov.md, must return nonzero if they expand the insn,
3520 or zero if they should FAIL. */
3521
3522/* This is a memset() opcode. All operands are implied, so we need to
3523 arrange for them to be in the right registers. The opcode wants
3524 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3525 the count (HI), and $2 the value (QI). */
3526int
3527m32c_expand_setmemhi(rtx *operands)
3528{
3529 rtx desta, count, val;
3530 rtx desto, counto;
3531
3532 desta = XEXP (operands[0], 0);
3533 count = operands[1];
3534 val = operands[2];
3535
3536 desto = gen_reg_rtx (Pmode);
3537 counto = gen_reg_rtx (HImode);
3538
3539 if (GET_CODE (desta) != REG
3540 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3541 desta = copy_to_mode_reg (Pmode, desta);
3542
3543 /* This looks like an arbitrary restriction, but this is by far the
3544 most common case. For counts 8..14 this actually results in
3545 smaller code with no speed penalty because the half-sized
3546 constant can be loaded with a shorter opcode. */
3547 if (GET_CODE (count) == CONST_INT
3548 && GET_CODE (val) == CONST_INT
3549 && ! (INTVAL (count) & 1)
3550 && (INTVAL (count) > 1)
3551 && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3552 {
3553 unsigned v = INTVAL (val) & 0xff;
3554 v = v | (v << 8);
3555 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3556 val = copy_to_mode_reg (HImode, GEN_INT (v));
3557 if (TARGET_A16)
3558 emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3559 else
3560 emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3561 return 1;
3562 }
3563
3564 /* This is the generalized memset() case. */
3565 if (GET_CODE (val) != REG
3566 || REGNO (val) < FIRST_PSEUDO_REGISTER)
3567 val = copy_to_mode_reg (QImode, val);
3568
3569 if (GET_CODE (count) != REG
3570 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3571 count = copy_to_mode_reg (HImode, count);
3572
3573 if (TARGET_A16)
3574 emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3575 else
3576 emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3577
3578 return 1;
3579}
3580
3581/* This is a memcpy() opcode. All operands are implied, so we need to
3582 arrange for them to be in the right registers. The opcode wants
3583 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3584 is the source (MEM:BLK), and $2 the count (HI). */
3585int
76715c32 3586m32c_expand_cpymemhi(rtx *operands)
07127a0a
DD
3587{
3588 rtx desta, srca, count;
3589 rtx desto, srco, counto;
3590
3591 desta = XEXP (operands[0], 0);
3592 srca = XEXP (operands[1], 0);
3593 count = operands[2];
3594
3595 desto = gen_reg_rtx (Pmode);
3596 srco = gen_reg_rtx (Pmode);
3597 counto = gen_reg_rtx (HImode);
3598
3599 if (GET_CODE (desta) != REG
3600 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3601 desta = copy_to_mode_reg (Pmode, desta);
3602
3603 if (GET_CODE (srca) != REG
3604 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3605 srca = copy_to_mode_reg (Pmode, srca);
3606
3607 /* Similar to setmem, but we don't need to check the value. */
3608 if (GET_CODE (count) == CONST_INT
3609 && ! (INTVAL (count) & 1)
3610 && (INTVAL (count) > 1))
3611 {
3612 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3613 if (TARGET_A16)
76715c32 3614 emit_insn (gen_cpymemhi_whi_op (desto, srco, counto, desta, srca, count));
07127a0a 3615 else
76715c32 3616 emit_insn (gen_cpymemhi_wpsi_op (desto, srco, counto, desta, srca, count));
07127a0a
DD
3617 return 1;
3618 }
3619
3620 /* This is the generalized memset() case. */
3621 if (GET_CODE (count) != REG
3622 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3623 count = copy_to_mode_reg (HImode, count);
3624
3625 if (TARGET_A16)
76715c32 3626 emit_insn (gen_cpymemhi_bhi_op (desto, srco, counto, desta, srca, count));
07127a0a 3627 else
76715c32 3628 emit_insn (gen_cpymemhi_bpsi_op (desto, srco, counto, desta, srca, count));
07127a0a
DD
3629
3630 return 1;
3631}
3632
3633/* This is a stpcpy() opcode. $0 is the destination (MEM:BLK) after
3634 the copy, which should point to the NUL at the end of the string,
3635 $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3636 Since our opcode leaves the destination pointing *after* the NUL,
3637 we must emit an adjustment. */
3638int
3639m32c_expand_movstr(rtx *operands)
3640{
3641 rtx desta, srca;
3642 rtx desto, srco;
3643
3644 desta = XEXP (operands[1], 0);
3645 srca = XEXP (operands[2], 0);
3646
3647 desto = gen_reg_rtx (Pmode);
3648 srco = gen_reg_rtx (Pmode);
3649
3650 if (GET_CODE (desta) != REG
3651 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3652 desta = copy_to_mode_reg (Pmode, desta);
3653
3654 if (GET_CODE (srca) != REG
3655 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3656 srca = copy_to_mode_reg (Pmode, srca);
3657
3658 emit_insn (gen_movstr_op (desto, srco, desta, srca));
3659 /* desto ends up being a1, which allows this type of add through MOVA. */
3660 emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
3661
3662 return 1;
3663}
3664
3665/* This is a strcmp() opcode. $0 is the destination (HI) which holds
3666 <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3667 $2 is the other (MEM:BLK). We must do the comparison, and then
3668 convert the flags to a signed integer result. */
3669int
3670m32c_expand_cmpstr(rtx *operands)
3671{
3672 rtx src1a, src2a;
3673
3674 src1a = XEXP (operands[1], 0);
3675 src2a = XEXP (operands[2], 0);
3676
3677 if (GET_CODE (src1a) != REG
3678 || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
3679 src1a = copy_to_mode_reg (Pmode, src1a);
3680
3681 if (GET_CODE (src2a) != REG
3682 || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
3683 src2a = copy_to_mode_reg (Pmode, src2a);
3684
3685 emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
3686 emit_insn (gen_cond_to_int (operands[0]));
3687
3688 return 1;
3689}
3690
3691
23fed240
DD
3692typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
3693
3694static shift_gen_func
3695shift_gen_func_for (int mode, int code)
3696{
3697#define GFF(m,c,f) if (mode == m && code == c) return f
3698 GFF(QImode, ASHIFT, gen_ashlqi3_i);
3699 GFF(QImode, ASHIFTRT, gen_ashrqi3_i);
3700 GFF(QImode, LSHIFTRT, gen_lshrqi3_i);
3701 GFF(HImode, ASHIFT, gen_ashlhi3_i);
3702 GFF(HImode, ASHIFTRT, gen_ashrhi3_i);
3703 GFF(HImode, LSHIFTRT, gen_lshrhi3_i);
3704 GFF(PSImode, ASHIFT, gen_ashlpsi3_i);
3705 GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
3706 GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
3707 GFF(SImode, ASHIFT, TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
3708 GFF(SImode, ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
3709 GFF(SImode, LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
3710#undef GFF
07127a0a 3711 gcc_unreachable ();
23fed240
DD
3712}
3713
38b2d076
DD
3714/* The m32c only has one shift, but it takes a signed count. GCC
3715 doesn't want this, so we fake it by negating any shift count when
07127a0a
DD
3716 we're pretending to shift the other way. Also, the shift count is
3717 limited to -8..8. It's slightly better to use two shifts for 9..15
3718 than to load the count into r1h, so we do that too. */
38b2d076 3719int
23fed240 3720m32c_prepare_shift (rtx * operands, int scale, int shift_code)
38b2d076 3721{
ef4bddc2 3722 machine_mode mode = GET_MODE (operands[0]);
23fed240 3723 shift_gen_func func = shift_gen_func_for (mode, shift_code);
38b2d076 3724 rtx temp;
23fed240
DD
3725
3726 if (GET_CODE (operands[2]) == CONST_INT)
38b2d076 3727 {
23fed240
DD
3728 int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
3729 int count = INTVAL (operands[2]) * scale;
3730
3731 while (count > maxc)
3732 {
3733 temp = gen_reg_rtx (mode);
3734 emit_insn (func (temp, operands[1], GEN_INT (maxc)));
3735 operands[1] = temp;
3736 count -= maxc;
3737 }
3738 while (count < -maxc)
3739 {
3740 temp = gen_reg_rtx (mode);
3741 emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
3742 operands[1] = temp;
3743 count += maxc;
3744 }
3745 emit_insn (func (operands[0], operands[1], GEN_INT (count)));
3746 return 1;
38b2d076 3747 }
2e160056
DD
3748
3749 temp = gen_reg_rtx (QImode);
38b2d076 3750 if (scale < 0)
2e160056
DD
3751 /* The pattern has a NEG that corresponds to this. */
3752 emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
3753 else if (TARGET_A16 && mode == SImode)
3754 /* We do this because the code below may modify this, we don't
3755 want to modify the origin of this value. */
3756 emit_move_insn (temp, operands[2]);
38b2d076 3757 else
2e160056 3758 /* We'll only use it for the shift, no point emitting a move. */
38b2d076 3759 temp = operands[2];
2e160056 3760
16659fcf 3761 if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
2e160056
DD
3762 {
3763 /* The m16c has a limit of -16..16 for SI shifts, even when the
3764 shift count is in a register. Since there are so many targets
3765 of these shifts, it's better to expand the RTL here than to
3766 call a helper function.
3767
3768 The resulting code looks something like this:
3769
3770 cmp.b r1h,-16
3771 jge.b 1f
3772 shl.l -16,dest
3773 add.b r1h,16
3774 1f: cmp.b r1h,16
3775 jle.b 1f
3776 shl.l 16,dest
3777 sub.b r1h,16
3778 1f: shl.l r1h,dest
3779
3780 We take advantage of the fact that "negative" shifts are
3781 undefined to skip one of the comparisons. */
3782
3783 rtx count;
9b2ea071 3784 rtx tempvar;
e60365d3 3785 rtx_insn *insn;
2e160056 3786
16659fcf
DD
3787 emit_move_insn (operands[0], operands[1]);
3788
2e160056 3789 count = temp;
9b2ea071 3790 rtx_code_label *label = gen_label_rtx ();
2e160056
DD
3791 LABEL_NUSES (label) ++;
3792
833bf445
DD
3793 tempvar = gen_reg_rtx (mode);
3794
2e160056
DD
3795 if (shift_code == ASHIFT)
3796 {
3797 /* This is a left shift. We only need check positive counts. */
3798 emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
3799 count, GEN_INT (16), label));
833bf445
DD
3800 emit_insn (func (tempvar, operands[0], GEN_INT (8)));
3801 emit_insn (func (operands[0], tempvar, GEN_INT (8)));
2e160056
DD
3802 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
3803 emit_label_after (label, insn);
3804 }
3805 else
3806 {
3807 /* This is a right shift. We only need check negative counts. */
3808 emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
3809 count, GEN_INT (-16), label));
833bf445
DD
3810 emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
3811 emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
2e160056
DD
3812 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
3813 emit_label_after (label, insn);
3814 }
16659fcf
DD
3815 operands[1] = operands[0];
3816 emit_insn (func (operands[0], operands[0], count));
3817 return 1;
2e160056
DD
3818 }
3819
38b2d076
DD
3820 operands[2] = temp;
3821 return 0;
3822}
3823
12ea2512
DD
3824/* The m32c has a limited range of operations that work on PSImode
3825 values; we have to expand to SI, do the math, and truncate back to
3826 PSI. Yes, this is expensive, but hopefully gcc will learn to avoid
3827 those cases. */
3828void
3829m32c_expand_neg_mulpsi3 (rtx * operands)
3830{
3831 /* operands: a = b * i */
3832 rtx temp1; /* b as SI */
07127a0a
DD
3833 rtx scale /* i as SI */;
3834 rtx temp2; /* a*b as SI */
12ea2512
DD
3835
3836 temp1 = gen_reg_rtx (SImode);
3837 temp2 = gen_reg_rtx (SImode);
07127a0a
DD
3838 if (GET_CODE (operands[2]) != CONST_INT)
3839 {
3840 scale = gen_reg_rtx (SImode);
3841 emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
3842 }
3843 else
3844 scale = copy_to_mode_reg (SImode, operands[2]);
12ea2512
DD
3845
3846 emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
07127a0a
DD
3847 temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
3848 emit_insn (gen_truncsipsi2 (operands[0], temp2));
12ea2512
DD
3849}
3850
38b2d076
DD
3851/* Pattern Output Functions */
3852
07127a0a
DD
3853int
3854m32c_expand_movcc (rtx *operands)
3855{
3856 rtx rel = operands[1];
0166ff05 3857
07127a0a
DD
3858 if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
3859 return 1;
3860 if (GET_CODE (operands[2]) != CONST_INT
3861 || GET_CODE (operands[3]) != CONST_INT)
3862 return 1;
07127a0a
DD
3863 if (GET_CODE (rel) == NE)
3864 {
3865 rtx tmp = operands[2];
3866 operands[2] = operands[3];
3867 operands[3] = tmp;
f90b7a5a 3868 rel = gen_rtx_EQ (GET_MODE (rel), XEXP (rel, 0), XEXP (rel, 1));
07127a0a 3869 }
0166ff05 3870
0166ff05
DD
3871 emit_move_insn (operands[0],
3872 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
f90b7a5a 3873 rel,
0166ff05
DD
3874 operands[2],
3875 operands[3]));
07127a0a
DD
3876 return 0;
3877}
3878
3879/* Used for the "insv" pattern. Return nonzero to fail, else done. */
3880int
3881m32c_expand_insv (rtx *operands)
3882{
3883 rtx op0, src0, p;
3884 int mask;
3885
3886 if (INTVAL (operands[1]) != 1)
3887 return 1;
3888
9cb96754
N
3889 /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
3890 if (GET_CODE (operands[3]) != CONST_INT)
3891 return 1;
3892 if (INTVAL (operands[3]) != 0
3893 && INTVAL (operands[3]) != 1
3894 && INTVAL (operands[3]) != -1)
3895 return 1;
3896
07127a0a
DD
3897 mask = 1 << INTVAL (operands[2]);
3898
3899 op0 = operands[0];
3900 if (GET_CODE (op0) == SUBREG
3901 && SUBREG_BYTE (op0) == 0)
3902 {
3903 rtx sub = SUBREG_REG (op0);
3904 if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
3905 op0 = sub;
3906 }
3907
b3a13419 3908 if (!can_create_pseudo_p ()
07127a0a
DD
3909 || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
3910 src0 = op0;
3911 else
3912 {
3913 src0 = gen_reg_rtx (GET_MODE (op0));
3914 emit_move_insn (src0, op0);
3915 }
3916
3917 if (GET_MODE (op0) == HImode
3918 && INTVAL (operands[2]) >= 8
444d6efe 3919 && GET_CODE (op0) == MEM)
07127a0a
DD
3920 {
3921 /* We are little endian. */
0a81f074
RS
3922 rtx new_mem = gen_rtx_MEM (QImode, plus_constant (Pmode,
3923 XEXP (op0, 0), 1));
07127a0a
DD
3924 MEM_COPY_ATTRIBUTES (new_mem, op0);
3925 mask >>= 8;
3926 }
3927
8e4edce7
DD
3928 /* First, we generate a mask with the correct polarity. If we are
3929 storing a zero, we want an AND mask, so invert it. */
3930 if (INTVAL (operands[3]) == 0)
07127a0a 3931 {
16659fcf 3932 /* Storing a zero, use an AND mask */
07127a0a
DD
3933 if (GET_MODE (op0) == HImode)
3934 mask ^= 0xffff;
3935 else
3936 mask ^= 0xff;
3937 }
8e4edce7
DD
3938 /* Now we need to properly sign-extend the mask in case we need to
3939 fall back to an AND or OR opcode. */
07127a0a
DD
3940 if (GET_MODE (op0) == HImode)
3941 {
3942 if (mask & 0x8000)
3943 mask -= 0x10000;
3944 }
3945 else
3946 {
3947 if (mask & 0x80)
3948 mask -= 0x100;
3949 }
3950
3951 switch ( (INTVAL (operands[3]) ? 4 : 0)
3952 + ((GET_MODE (op0) == HImode) ? 2 : 0)
3953 + (TARGET_A24 ? 1 : 0))
3954 {
3955 case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
3956 case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
3957 case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
3958 case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
3959 case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
3960 case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
3961 case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
3962 case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
653e2568 3963 default: p = NULL_RTX; break; /* Not reached, but silences a warning. */
07127a0a
DD
3964 }
3965
3966 emit_insn (p);
3967 return 0;
3968}
3969
3970const char *
3971m32c_scc_pattern(rtx *operands, RTX_CODE code)
3972{
3973 static char buf[30];
3974 if (GET_CODE (operands[0]) == REG
3975 && REGNO (operands[0]) == R0_REGNO)
3976 {
3977 if (code == EQ)
3978 return "stzx\t#1,#0,r0l";
3979 if (code == NE)
3980 return "stzx\t#0,#1,r0l";
3981 }
3982 sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
3983 return buf;
3984}
3985
5abd2125
JS
3986/* Encode symbol attributes of a SYMBOL_REF into its
3987 SYMBOL_REF_FLAGS. */
3988static void
3989m32c_encode_section_info (tree decl, rtx rtl, int first)
3990{
3991 int extra_flags = 0;
3992
3993 default_encode_section_info (decl, rtl, first);
3994 if (TREE_CODE (decl) == FUNCTION_DECL
3995 && m32c_special_page_vector_p (decl))
3996
3997 extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
3998
3999 if (extra_flags)
4000 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
4001}
4002
38b2d076
DD
4003/* Returns TRUE if the current function is a leaf, and thus we can
4004 determine which registers an interrupt function really needs to
4005 save. The logic below is mostly about finding the insn sequence
4006 that's the function, versus any sequence that might be open for the
4007 current insn. */
4008static int
4009m32c_leaf_function_p (void)
4010{
38b2d076
DD
4011 int rv;
4012
614d5bd8 4013 push_topmost_sequence ();
38b2d076 4014 rv = leaf_function_p ();
614d5bd8 4015 pop_topmost_sequence ();
38b2d076
DD
4016 return rv;
4017}
4018
4019/* Returns TRUE if the current function needs to use the ENTER/EXIT
4020 opcodes. If the function doesn't need the frame base or stack
4021 pointer, it can use the simpler RTS opcode. */
4022static bool
4023m32c_function_needs_enter (void)
4024{
b32d5189 4025 rtx_insn *insn;
38b2d076
DD
4026 rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
4027 rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
4028
614d5bd8
AM
4029 for (insn = get_topmost_sequence ()->first; insn; insn = NEXT_INSN (insn))
4030 if (NONDEBUG_INSN_P (insn))
4031 {
4032 if (reg_mentioned_p (sp, insn))
4033 return true;
4034 if (reg_mentioned_p (fb, insn))
4035 return true;
4036 }
38b2d076
DD
4037 return false;
4038}
4039
4040/* Mark all the subexpressions of the PARALLEL rtx PAR as
4041 frame-related. Return PAR.
4042
4043 dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
4044 PARALLEL rtx other than the first if they do not have the
4045 FRAME_RELATED flag set on them. So this function is handy for
4046 marking up 'enter' instructions. */
4047static rtx
4048m32c_all_frame_related (rtx par)
4049{
4050 int len = XVECLEN (par, 0);
4051 int i;
4052
4053 for (i = 0; i < len; i++)
4054 F (XVECEXP (par, 0, i));
4055
4056 return par;
4057}
4058
4059/* Emits the prologue. See the frame layout comment earlier in this
4060 file. We can reserve up to 256 bytes with the ENTER opcode, beyond
4061 that we manually update sp. */
4062void
4063m32c_emit_prologue (void)
4064{
4065 int frame_size, extra_frame_size = 0, reg_save_size;
4066 int complex_prologue = 0;
4067
4068 cfun->machine->is_leaf = m32c_leaf_function_p ();
4069 if (interrupt_p (cfun->decl))
4070 {
4071 cfun->machine->is_interrupt = 1;
4072 complex_prologue = 1;
4073 }
65655f79
DD
4074 else if (bank_switch_p (cfun->decl))
4075 warning (OPT_Wattributes,
4076 "%<bank_switch%> has no effect on non-interrupt functions");
38b2d076
DD
4077
4078 reg_save_size = m32c_pushm_popm (PP_justcount);
4079
4080 if (interrupt_p (cfun->decl))
65655f79
DD
4081 {
4082 if (bank_switch_p (cfun->decl))
4083 emit_insn (gen_fset_b ());
4084 else if (cfun->machine->intr_pushm)
4085 emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
4086 }
38b2d076
DD
4087
4088 frame_size =
4089 m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
4090 if (frame_size == 0
38b2d076
DD
4091 && !m32c_function_needs_enter ())
4092 cfun->machine->use_rts = 1;
4093
ed1332ee
NC
4094 if (flag_stack_usage_info)
4095 current_function_static_stack_size = frame_size;
4096
38b2d076
DD
4097 if (frame_size > 254)
4098 {
4099 extra_frame_size = frame_size - 254;
4100 frame_size = 254;
4101 }
4102 if (cfun->machine->use_rts == 0)
4103 F (emit_insn (m32c_all_frame_related
4104 (TARGET_A16
fa9fd28a
RIL
4105 ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
4106 : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
38b2d076
DD
4107
4108 if (extra_frame_size)
4109 {
4110 complex_prologue = 1;
4111 if (TARGET_A16)
4112 F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
4113 gen_rtx_REG (HImode, SP_REGNO),
4114 GEN_INT (-extra_frame_size))));
4115 else
4116 F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
4117 gen_rtx_REG (PSImode, SP_REGNO),
4118 GEN_INT (-extra_frame_size))));
4119 }
4120
4121 complex_prologue += m32c_pushm_popm (PP_pushm);
4122
4123 /* This just emits a comment into the .s file for debugging. */
4124 if (complex_prologue)
4125 emit_insn (gen_prologue_end ());
4126}
4127
4128/* Likewise, for the epilogue. The only exception is that, for
4129 interrupts, we must manually unwind the frame as the REIT opcode
4130 doesn't do that. */
4131void
4132m32c_emit_epilogue (void)
4133{
f0679612
DD
4134 int popm_count = m32c_pushm_popm (PP_justcount);
4135
38b2d076 4136 /* This just emits a comment into the .s file for debugging. */
f0679612 4137 if (popm_count > 0 || cfun->machine->is_interrupt)
38b2d076
DD
4138 emit_insn (gen_epilogue_start ());
4139
f0679612
DD
4140 if (popm_count > 0)
4141 m32c_pushm_popm (PP_popm);
38b2d076
DD
4142
4143 if (cfun->machine->is_interrupt)
4144 {
ef4bddc2 4145 machine_mode spmode = TARGET_A16 ? HImode : PSImode;
38b2d076 4146
65655f79
DD
4147 /* REIT clears B flag and restores $fp for us, but we still
4148 have to fix up the stack. USE_RTS just means we didn't
4149 emit ENTER. */
4150 if (!cfun->machine->use_rts)
4151 {
4152 emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
4153 gen_rtx_REG (spmode, FP_REGNO));
4154 emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
4155 gen_rtx_REG (spmode, A0_REGNO));
4156 /* We can't just add this to the POPM because it would be in
4157 the wrong order, and wouldn't fix the stack if we're bank
4158 switching. */
4159 if (TARGET_A16)
4160 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
4161 else
4162 emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
4163 }
4164 if (!bank_switch_p (cfun->decl) && cfun->machine->intr_pushm)
4165 emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
4166
402f2db8
DD
4167 /* The FREIT (Fast REturn from InTerrupt) instruction should be
4168 generated only for M32C/M32CM targets (generate the REIT
4169 instruction otherwise). */
65655f79 4170 if (fast_interrupt_p (cfun->decl))
402f2db8
DD
4171 {
4172 /* Check if fast_attribute is set for M32C or M32CM. */
4173 if (TARGET_A24)
4174 {
4175 emit_jump_insn (gen_epilogue_freit ());
4176 }
4177 /* If fast_interrupt attribute is set for an R8C or M16C
4178 target ignore this attribute and generated REIT
4179 instruction. */
4180 else
4181 {
4182 warning (OPT_Wattributes,
4183 "%<fast_interrupt%> attribute directive ignored");
4184 emit_jump_insn (gen_epilogue_reit_16 ());
4185 }
4186 }
65655f79 4187 else if (TARGET_A16)
0e0642aa
RIL
4188 emit_jump_insn (gen_epilogue_reit_16 ());
4189 else
4190 emit_jump_insn (gen_epilogue_reit_24 ());
38b2d076
DD
4191 }
4192 else if (cfun->machine->use_rts)
4193 emit_jump_insn (gen_epilogue_rts ());
0e0642aa
RIL
4194 else if (TARGET_A16)
4195 emit_jump_insn (gen_epilogue_exitd_16 ());
38b2d076 4196 else
0e0642aa 4197 emit_jump_insn (gen_epilogue_exitd_24 ());
38b2d076
DD
4198}
4199
4200void
4201m32c_emit_eh_epilogue (rtx ret_addr)
4202{
4203 /* R0[R2] has the stack adjustment. R1[R3] has the address to
4204 return to. We have to fudge the stack, pop everything, pop SP
4205 (fudged), and return (fudged). This is actually easier to do in
4206 assembler, so punt to libgcc. */
4207 emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
c41c1387 4208 /* emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
38b2d076
DD
4209}
4210
16659fcf
DD
4211/* Indicate which flags must be properly set for a given conditional. */
4212static int
4213flags_needed_for_conditional (rtx cond)
4214{
4215 switch (GET_CODE (cond))
4216 {
4217 case LE:
4218 case GT:
4219 return FLAGS_OSZ;
4220 case LEU:
4221 case GTU:
4222 return FLAGS_ZC;
4223 case LT:
4224 case GE:
4225 return FLAGS_OS;
4226 case LTU:
4227 case GEU:
4228 return FLAGS_C;
4229 case EQ:
4230 case NE:
4231 return FLAGS_Z;
4232 default:
4233 return FLAGS_N;
4234 }
4235}
4236
4237#define DEBUG_CMP 0
4238
4239/* Returns true if a compare insn is redundant because it would only
4240 set flags that are already set correctly. */
4241static bool
84034c69 4242m32c_compare_redundant (rtx_insn *cmp, rtx *operands)
16659fcf
DD
4243{
4244 int flags_needed;
4245 int pflags;
84034c69
DM
4246 rtx_insn *prev;
4247 rtx pp, next;
444d6efe 4248 rtx op0, op1;
16659fcf
DD
4249#if DEBUG_CMP
4250 int prev_icode, i;
4251#endif
4252
4253 op0 = operands[0];
4254 op1 = operands[1];
16659fcf
DD
4255
4256#if DEBUG_CMP
4257 fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
4258 debug_rtx(cmp);
4259 for (i=0; i<2; i++)
4260 {
4261 fprintf(stderr, "operands[%d] = ", i);
4262 debug_rtx(operands[i]);
4263 }
4264#endif
4265
4266 next = next_nonnote_insn (cmp);
4267 if (!next || !INSN_P (next))
4268 {
4269#if DEBUG_CMP
4270 fprintf(stderr, "compare not followed by insn\n");
4271 debug_rtx(next);
4272#endif
4273 return false;
4274 }
4275 if (GET_CODE (PATTERN (next)) == SET
4276 && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
4277 {
4278 next = XEXP (XEXP (PATTERN (next), 1), 0);
4279 }
4280 else if (GET_CODE (PATTERN (next)) == SET)
4281 {
4282 /* If this is a conditional, flags_needed will be something
4283 other than FLAGS_N, which we test below. */
4284 next = XEXP (PATTERN (next), 1);
4285 }
4286 else
4287 {
4288#if DEBUG_CMP
4289 fprintf(stderr, "compare not followed by conditional\n");
4290 debug_rtx(next);
4291#endif
4292 return false;
4293 }
4294#if DEBUG_CMP
4295 fprintf(stderr, "conditional is: ");
4296 debug_rtx(next);
4297#endif
4298
4299 flags_needed = flags_needed_for_conditional (next);
4300 if (flags_needed == FLAGS_N)
4301 {
4302#if DEBUG_CMP
4303 fprintf(stderr, "compare not followed by conditional\n");
4304 debug_rtx(next);
4305#endif
4306 return false;
4307 }
4308
4309 /* Compare doesn't set overflow and carry the same way that
4310 arithmetic instructions do, so we can't replace those. */
4311 if (flags_needed & FLAGS_OC)
4312 return false;
4313
4314 prev = cmp;
4315 do {
4316 prev = prev_nonnote_insn (prev);
4317 if (!prev)
4318 {
4319#if DEBUG_CMP
4320 fprintf(stderr, "No previous insn.\n");
4321#endif
4322 return false;
4323 }
4324 if (!INSN_P (prev))
4325 {
4326#if DEBUG_CMP
4327 fprintf(stderr, "Previous insn is a non-insn.\n");
4328#endif
4329 return false;
4330 }
4331 pp = PATTERN (prev);
4332 if (GET_CODE (pp) != SET)
4333 {
4334#if DEBUG_CMP
4335 fprintf(stderr, "Previous insn is not a SET.\n");
4336#endif
4337 return false;
4338 }
4339 pflags = get_attr_flags (prev);
4340
4341 /* Looking up attributes of previous insns corrupted the recog
4342 tables. */
4343 INSN_UID (cmp) = -1;
4344 recog (PATTERN (cmp), cmp, 0);
4345
4346 if (pflags == FLAGS_N
4347 && reg_mentioned_p (op0, pp))
4348 {
4349#if DEBUG_CMP
4350 fprintf(stderr, "intermediate non-flags insn uses op:\n");
4351 debug_rtx(prev);
4352#endif
4353 return false;
4354 }
b3c5a409
DD
4355
4356 /* Check for comparisons against memory - between volatiles and
4357 aliases, we just can't risk this one. */
4358 if (GET_CODE (operands[0]) == MEM
4359 || GET_CODE (operands[0]) == MEM)
4360 {
4361#if DEBUG_CMP
4362 fprintf(stderr, "comparisons with memory:\n");
4363 debug_rtx(prev);
4364#endif
4365 return false;
4366 }
4367
4368 /* Check for PREV changing a register that's used to compute a
4369 value in CMP, even if it doesn't otherwise change flags. */
4370 if (GET_CODE (operands[0]) == REG
4371 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[0]))
4372 {
4373#if DEBUG_CMP
4374 fprintf(stderr, "sub-value affected, op0:\n");
4375 debug_rtx(prev);
4376#endif
4377 return false;
4378 }
4379 if (GET_CODE (operands[1]) == REG
4380 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[1]))
4381 {
4382#if DEBUG_CMP
4383 fprintf(stderr, "sub-value affected, op1:\n");
4384 debug_rtx(prev);
4385#endif
4386 return false;
4387 }
4388
16659fcf
DD
4389 } while (pflags == FLAGS_N);
4390#if DEBUG_CMP
4391 fprintf(stderr, "previous flag-setting insn:\n");
4392 debug_rtx(prev);
4393 debug_rtx(pp);
4394#endif
4395
4396 if (GET_CODE (pp) == SET
4397 && GET_CODE (XEXP (pp, 0)) == REG
4398 && REGNO (XEXP (pp, 0)) == FLG_REGNO
4399 && GET_CODE (XEXP (pp, 1)) == COMPARE)
4400 {
4401 /* Adjacent cbranches must have the same operands to be
4402 redundant. */
4403 rtx pop0 = XEXP (XEXP (pp, 1), 0);
4404 rtx pop1 = XEXP (XEXP (pp, 1), 1);
4405#if DEBUG_CMP
4406 fprintf(stderr, "adjacent cbranches\n");
4407 debug_rtx(pop0);
4408 debug_rtx(pop1);
4409#endif
4410 if (rtx_equal_p (op0, pop0)
4411 && rtx_equal_p (op1, pop1))
4412 return true;
4413#if DEBUG_CMP
4414 fprintf(stderr, "prev cmp not same\n");
4415#endif
4416 return false;
4417 }
4418
4419 /* Else the previous insn must be a SET, with either the source or
4420 dest equal to operands[0], and operands[1] must be zero. */
4421
4422 if (!rtx_equal_p (op1, const0_rtx))
4423 {
4424#if DEBUG_CMP
4425 fprintf(stderr, "operands[1] not const0_rtx\n");
4426#endif
4427 return false;
4428 }
4429 if (GET_CODE (pp) != SET)
4430 {
4431#if DEBUG_CMP
4432 fprintf (stderr, "pp not set\n");
4433#endif
4434 return false;
4435 }
4436 if (!rtx_equal_p (op0, SET_SRC (pp))
4437 && !rtx_equal_p (op0, SET_DEST (pp)))
4438 {
4439#if DEBUG_CMP
4440 fprintf(stderr, "operands[0] not found in set\n");
4441#endif
4442 return false;
4443 }
4444
4445#if DEBUG_CMP
4446 fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4447#endif
4448 if ((pflags & flags_needed) == flags_needed)
4449 return true;
4450
4451 return false;
4452}
4453
4454/* Return the pattern for a compare. This will be commented out if
4455 the compare is redundant, else a normal pattern is returned. Thus,
4456 the assembler output says where the compare would have been. */
4457char *
84034c69 4458m32c_output_compare (rtx_insn *insn, rtx *operands)
16659fcf 4459{
0a2aaacc 4460 static char templ[] = ";cmp.b\t%1,%0";
16659fcf
DD
4461 /* ^ 5 */
4462
0a2aaacc 4463 templ[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
16659fcf
DD
4464 if (m32c_compare_redundant (insn, operands))
4465 {
4466#if DEBUG_CMP
4467 fprintf(stderr, "cbranch: cmp not needed\n");
4468#endif
0a2aaacc 4469 return templ;
16659fcf
DD
4470 }
4471
4472#if DEBUG_CMP
b3c5a409 4473 fprintf(stderr, "cbranch: cmp needed: `%s'\n", templ + 1);
16659fcf 4474#endif
0a2aaacc 4475 return templ + 1;
16659fcf
DD
4476}
4477
5abd2125
JS
4478#undef TARGET_ENCODE_SECTION_INFO
4479#define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4480
b52b1749
AS
4481/* If the frame pointer isn't used, we detect it manually. But the
4482 stack pointer doesn't have as flexible addressing as the frame
4483 pointer, so we always assume we have it. */
4484
4485#undef TARGET_FRAME_POINTER_REQUIRED
4486#define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
4487
c43f4279
RS
4488#undef TARGET_HARD_REGNO_NREGS
4489#define TARGET_HARD_REGNO_NREGS m32c_hard_regno_nregs
f939c3e6
RS
4490#undef TARGET_HARD_REGNO_MODE_OK
4491#define TARGET_HARD_REGNO_MODE_OK m32c_hard_regno_mode_ok
99e1629f
RS
4492#undef TARGET_MODES_TIEABLE_P
4493#define TARGET_MODES_TIEABLE_P m32c_modes_tieable_p
f939c3e6 4494
0d803030
RS
4495#undef TARGET_CAN_CHANGE_MODE_CLASS
4496#define TARGET_CAN_CHANGE_MODE_CLASS m32c_can_change_mode_class
4497
38b2d076
DD
4498/* The Global `targetm' Variable. */
4499
4500struct gcc_target targetm = TARGET_INITIALIZER;
4501
4502#include "gt-m32c.h"