]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/m32c/m32c.c
Machine modes for address printing.
[thirdparty/gcc.git] / gcc / config / m32c / m32c.c
CommitLineData
38b2d076 1/* Target Code for R8C/M16C/M32C
5624e564 2 Copyright (C) 2005-2015 Free Software Foundation, Inc.
38b2d076
DD
3 Contributed by Red Hat.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
2f83c7d6 9 by the Free Software Foundation; either version 3, or (at your
38b2d076
DD
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
2f83c7d6
NC
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
38b2d076
DD
20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
c7131fb2 24#include "backend.h"
e11c4407 25#include "target.h"
38b2d076 26#include "rtl.h"
e11c4407 27#include "tree.h"
c7131fb2 28#include "df.h"
e11c4407
AM
29#include "tm_p.h"
30#include "optabs.h"
38b2d076 31#include "regs.h"
e11c4407
AM
32#include "emit-rtl.h"
33#include "recog.h"
34#include "diagnostic-core.h"
38b2d076
DD
35#include "output.h"
36#include "insn-attr.h"
37#include "flags.h"
38b2d076 38#include "reload.h"
d8a2d370
DN
39#include "stor-layout.h"
40#include "varasm.h"
41#include "calls.h"
36566b39 42#include "explow.h"
38b2d076 43#include "expr.h"
03dd17b1 44#include "tm-constrs.h"
9b2b7279 45#include "builtins.h"
38b2d076 46
994c5d85 47/* This file should be included last. */
d58627a0
RS
48#include "target-def.h"
49
38b2d076
DD
50/* Prototypes */
51
52/* Used by m32c_pushm_popm. */
53typedef enum
54{
55 PP_pushm,
56 PP_popm,
57 PP_justcount
58} Push_Pop_Type;
59
65655f79 60static bool m32c_function_needs_enter (void);
38b2d076 61static tree interrupt_handler (tree *, tree, tree, int, bool *);
5abd2125 62static tree function_vector_handler (tree *, tree, tree, int, bool *);
38b2d076 63static int interrupt_p (tree node);
65655f79
DD
64static int bank_switch_p (tree node);
65static int fast_interrupt_p (tree node);
66static int interrupt_p (tree node);
38b2d076 67static bool m32c_asm_integer (rtx, unsigned int, int);
3101faab 68static int m32c_comp_type_attributes (const_tree, const_tree);
38b2d076
DD
69static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
70static struct machine_function *m32c_init_machine_status (void);
71static void m32c_insert_attributes (tree, tree *);
ef4bddc2
RS
72static bool m32c_legitimate_address_p (machine_mode, rtx, bool);
73static bool m32c_addr_space_legitimate_address_p (machine_mode, rtx, bool, addr_space_t);
74static rtx m32c_function_arg (cumulative_args_t, machine_mode,
444d6efe 75 const_tree, bool);
ef4bddc2 76static bool m32c_pass_by_reference (cumulative_args_t, machine_mode,
586de218 77 const_tree, bool);
ef4bddc2 78static void m32c_function_arg_advance (cumulative_args_t, machine_mode,
cd34bbe8 79 const_tree, bool);
ef4bddc2 80static unsigned int m32c_function_arg_boundary (machine_mode, const_tree);
38b2d076 81static int m32c_pushm_popm (Push_Pop_Type);
d5cc9181 82static bool m32c_strict_argument_naming (cumulative_args_t);
38b2d076 83static rtx m32c_struct_value_rtx (tree, int);
ef4bddc2 84static rtx m32c_subreg (machine_mode, rtx, machine_mode, int);
38b2d076 85static int need_to_save (int);
2a31793e 86static rtx m32c_function_value (const_tree, const_tree, bool);
ef4bddc2 87static rtx m32c_libcall_value (machine_mode, const_rtx);
2a31793e 88
f6052f86
DD
89/* Returns true if an address is specified, else false. */
90static bool m32c_get_pragma_address (const char *varname, unsigned *addr);
91
5abd2125 92#define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
38b2d076
DD
93
94#define streq(a,b) (strcmp ((a), (b)) == 0)
95
96/* Internal support routines */
97
98/* Debugging statements are tagged with DEBUG0 only so that they can
99 be easily enabled individually, by replacing the '0' with '1' as
100 needed. */
101#define DEBUG0 0
102#define DEBUG1 1
103
104#if DEBUG0
f75e07bc 105#include "print-tree.h"
38b2d076
DD
106/* This is needed by some of the commented-out debug statements
107 below. */
108static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
109#endif
110static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
111
112/* These are all to support encode_pattern(). */
113static char pattern[30], *patternp;
114static GTY(()) rtx patternr[30];
115#define RTX_IS(x) (streq (pattern, x))
116
117/* Some macros to simplify the logic throughout this file. */
118#define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
119#define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
120
121#define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
122#define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
123
5fd5d713
DD
124static int
125far_addr_space_p (rtx x)
126{
127 if (GET_CODE (x) != MEM)
128 return 0;
129#if DEBUG0
130 fprintf(stderr, "\033[35mfar_addr_space: "); debug_rtx(x);
131 fprintf(stderr, " = %d\033[0m\n", MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR);
132#endif
133 return MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR;
134}
135
38b2d076
DD
136/* We do most RTX matching by converting the RTX into a string, and
137 using string compares. This vastly simplifies the logic in many of
138 the functions in this file.
139
140 On exit, pattern[] has the encoded string (use RTX_IS("...") to
141 compare it) and patternr[] has pointers to the nodes in the RTX
142 corresponding to each character in the encoded string. The latter
143 is mostly used by print_operand().
144
145 Unrecognized patterns have '?' in them; this shows up when the
146 assembler complains about syntax errors.
147*/
148
149static void
150encode_pattern_1 (rtx x)
151{
152 int i;
153
154 if (patternp == pattern + sizeof (pattern) - 2)
155 {
156 patternp[-1] = '?';
157 return;
158 }
159
160 patternr[patternp - pattern] = x;
161
162 switch (GET_CODE (x))
163 {
164 case REG:
165 *patternp++ = 'r';
166 break;
167 case SUBREG:
168 if (GET_MODE_SIZE (GET_MODE (x)) !=
169 GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
170 *patternp++ = 'S';
45d898e4
DD
171 if (GET_MODE (x) == PSImode
172 && GET_CODE (XEXP (x, 0)) == REG)
173 *patternp++ = 'S';
38b2d076
DD
174 encode_pattern_1 (XEXP (x, 0));
175 break;
176 case MEM:
177 *patternp++ = 'm';
178 case CONST:
179 encode_pattern_1 (XEXP (x, 0));
180 break;
5fd5d713
DD
181 case SIGN_EXTEND:
182 *patternp++ = '^';
183 *patternp++ = 'S';
184 encode_pattern_1 (XEXP (x, 0));
185 break;
186 case ZERO_EXTEND:
187 *patternp++ = '^';
188 *patternp++ = 'Z';
189 encode_pattern_1 (XEXP (x, 0));
190 break;
38b2d076
DD
191 case PLUS:
192 *patternp++ = '+';
193 encode_pattern_1 (XEXP (x, 0));
194 encode_pattern_1 (XEXP (x, 1));
195 break;
196 case PRE_DEC:
197 *patternp++ = '>';
198 encode_pattern_1 (XEXP (x, 0));
199 break;
200 case POST_INC:
201 *patternp++ = '<';
202 encode_pattern_1 (XEXP (x, 0));
203 break;
204 case LO_SUM:
205 *patternp++ = 'L';
206 encode_pattern_1 (XEXP (x, 0));
207 encode_pattern_1 (XEXP (x, 1));
208 break;
209 case HIGH:
210 *patternp++ = 'H';
211 encode_pattern_1 (XEXP (x, 0));
212 break;
213 case SYMBOL_REF:
214 *patternp++ = 's';
215 break;
216 case LABEL_REF:
217 *patternp++ = 'l';
218 break;
219 case CODE_LABEL:
220 *patternp++ = 'c';
221 break;
222 case CONST_INT:
223 case CONST_DOUBLE:
224 *patternp++ = 'i';
225 break;
226 case UNSPEC:
227 *patternp++ = 'u';
228 *patternp++ = '0' + XCINT (x, 1, UNSPEC);
229 for (i = 0; i < XVECLEN (x, 0); i++)
230 encode_pattern_1 (XVECEXP (x, 0, i));
231 break;
232 case USE:
233 *patternp++ = 'U';
234 break;
235 case PARALLEL:
236 *patternp++ = '|';
237 for (i = 0; i < XVECLEN (x, 0); i++)
238 encode_pattern_1 (XVECEXP (x, 0, i));
239 break;
240 case EXPR_LIST:
241 *patternp++ = 'E';
242 encode_pattern_1 (XEXP (x, 0));
243 if (XEXP (x, 1))
244 encode_pattern_1 (XEXP (x, 1));
245 break;
246 default:
247 *patternp++ = '?';
248#if DEBUG0
249 fprintf (stderr, "can't encode pattern %s\n",
250 GET_RTX_NAME (GET_CODE (x)));
251 debug_rtx (x);
38b2d076
DD
252#endif
253 break;
254 }
255}
256
257static void
258encode_pattern (rtx x)
259{
260 patternp = pattern;
261 encode_pattern_1 (x);
262 *patternp = 0;
263}
264
265/* Since register names indicate the mode they're used in, we need a
266 way to determine which name to refer to the register with. Called
267 by print_operand(). */
268
269static const char *
ef4bddc2 270reg_name_with_mode (int regno, machine_mode mode)
38b2d076
DD
271{
272 int mlen = GET_MODE_SIZE (mode);
273 if (regno == R0_REGNO && mlen == 1)
274 return "r0l";
275 if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
276 return "r2r0";
277 if (regno == R0_REGNO && mlen == 6)
278 return "r2r1r0";
279 if (regno == R0_REGNO && mlen == 8)
280 return "r3r1r2r0";
281 if (regno == R1_REGNO && mlen == 1)
282 return "r1l";
283 if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
284 return "r3r1";
285 if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
286 return "a1a0";
287 return reg_names[regno];
288}
289
290/* How many bytes a register uses on stack when it's pushed. We need
291 to know this because the push opcode needs to explicitly indicate
292 the size of the register, even though the name of the register
293 already tells it that. Used by m32c_output_reg_{push,pop}, which
294 is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}. */
295
296static int
297reg_push_size (int regno)
298{
299 switch (regno)
300 {
301 case R0_REGNO:
302 case R1_REGNO:
303 return 2;
304 case R2_REGNO:
305 case R3_REGNO:
306 case FLG_REGNO:
307 return 2;
308 case A0_REGNO:
309 case A1_REGNO:
310 case SB_REGNO:
311 case FB_REGNO:
312 case SP_REGNO:
313 if (TARGET_A16)
314 return 2;
315 else
316 return 3;
317 default:
318 gcc_unreachable ();
319 }
320}
321
38b2d076
DD
322/* Given two register classes, find the largest intersection between
323 them. If there is no intersection, return RETURNED_IF_EMPTY
324 instead. */
35bdbc69
AS
325static reg_class_t
326reduce_class (reg_class_t original_class, reg_class_t limiting_class,
327 reg_class_t returned_if_empty)
38b2d076 328{
35bdbc69
AS
329 HARD_REG_SET cc;
330 int i;
331 reg_class_t best = NO_REGS;
332 unsigned int best_size = 0;
38b2d076
DD
333
334 if (original_class == limiting_class)
335 return original_class;
336
35bdbc69
AS
337 cc = reg_class_contents[original_class];
338 AND_HARD_REG_SET (cc, reg_class_contents[limiting_class]);
38b2d076 339
38b2d076
DD
340 for (i = 0; i < LIM_REG_CLASSES; i++)
341 {
35bdbc69
AS
342 if (hard_reg_set_subset_p (reg_class_contents[i], cc))
343 if (best_size < reg_class_size[i])
38b2d076 344 {
35bdbc69
AS
345 best = (reg_class_t) i;
346 best_size = reg_class_size[i];
38b2d076
DD
347 }
348
349 }
350 if (best == NO_REGS)
351 return returned_if_empty;
352 return best;
353}
354
38b2d076
DD
355/* Used by m32c_register_move_cost to determine if a move is
356 impossibly expensive. */
0e607518 357static bool
ef4bddc2 358class_can_hold_mode (reg_class_t rclass, machine_mode mode)
38b2d076
DD
359{
360 /* Cache the results: 0=untested 1=no 2=yes */
361 static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
0e607518
AS
362
363 if (results[(int) rclass][mode] == 0)
38b2d076 364 {
0e607518 365 int r;
0a2aaacc 366 results[rclass][mode] = 1;
38b2d076 367 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
0e607518 368 if (in_hard_reg_set_p (reg_class_contents[(int) rclass], mode, r)
38b2d076
DD
369 && HARD_REGNO_MODE_OK (r, mode))
370 {
0e607518
AS
371 results[rclass][mode] = 2;
372 break;
38b2d076
DD
373 }
374 }
0e607518 375
38b2d076
DD
376#if DEBUG0
377 fprintf (stderr, "class %s can hold %s? %s\n",
0e607518 378 class_names[(int) rclass], mode_name[mode],
0a2aaacc 379 (results[rclass][mode] == 2) ? "yes" : "no");
38b2d076 380#endif
0e607518 381 return results[(int) rclass][mode] == 2;
38b2d076
DD
382}
383
384/* Run-time Target Specification. */
385
386/* Memregs are memory locations that gcc treats like general
387 registers, as there are a limited number of true registers and the
388 m32c families can use memory in most places that registers can be
389 used.
390
391 However, since memory accesses are more expensive than registers,
392 we allow the user to limit the number of memregs available, in
393 order to try to persuade gcc to try harder to use real registers.
394
45b86625 395 Memregs are provided by lib1funcs.S.
38b2d076
DD
396*/
397
38b2d076
DD
398int ok_to_change_target_memregs = TRUE;
399
f28f2337
AS
400/* Implements TARGET_OPTION_OVERRIDE. */
401
402#undef TARGET_OPTION_OVERRIDE
403#define TARGET_OPTION_OVERRIDE m32c_option_override
404
405static void
406m32c_option_override (void)
38b2d076 407{
f28f2337 408 /* We limit memregs to 0..16, and provide a default. */
bbfc9a8c 409 if (global_options_set.x_target_memregs)
38b2d076
DD
410 {
411 if (target_memregs < 0 || target_memregs > 16)
412 error ("invalid target memregs value '%d'", target_memregs);
413 }
414 else
07127a0a 415 target_memregs = 16;
18b80268
DD
416
417 if (TARGET_A24)
418 flag_ivopts = 0;
0685e770
DD
419
420 /* This target defaults to strict volatile bitfields. */
36acc1a2 421 if (flag_strict_volatile_bitfields < 0 && abi_version_at_least(2))
0685e770 422 flag_strict_volatile_bitfields = 1;
d123bf41
DD
423
424 /* r8c/m16c have no 16-bit indirect call, so thunks are involved.
425 This is always worse than an absolute call. */
426 if (TARGET_A16)
427 flag_no_function_cse = 1;
a4403164
DD
428
429 /* This wants to put insns between compares and their jumps. */
430 /* FIXME: The right solution is to properly trace the flags register
431 values, but that is too much work for stage 4. */
432 flag_combine_stack_adjustments = 0;
d123bf41
DD
433}
434
435#undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
436#define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m32c_override_options_after_change
437
438static void
439m32c_override_options_after_change (void)
440{
441 if (TARGET_A16)
442 flag_no_function_cse = 1;
38b2d076
DD
443}
444
445/* Defining data structures for per-function information */
446
447/* The usual; we set up our machine_function data. */
448static struct machine_function *
449m32c_init_machine_status (void)
450{
766090c2 451 return ggc_cleared_alloc<machine_function> ();
38b2d076
DD
452}
453
454/* Implements INIT_EXPANDERS. We just set up to call the above
455 function. */
456void
457m32c_init_expanders (void)
458{
459 init_machine_status = m32c_init_machine_status;
460}
461
462/* Storage Layout */
463
38b2d076
DD
464/* Register Basics */
465
466/* Basic Characteristics of Registers */
467
468/* Whether a mode fits in a register is complex enough to warrant a
469 table. */
470static struct
471{
472 char qi_regs;
473 char hi_regs;
474 char pi_regs;
475 char si_regs;
476 char di_regs;
477} nregs_table[FIRST_PSEUDO_REGISTER] =
478{
479 { 1, 1, 2, 2, 4 }, /* r0 */
480 { 0, 1, 0, 0, 0 }, /* r2 */
481 { 1, 1, 2, 2, 0 }, /* r1 */
482 { 0, 1, 0, 0, 0 }, /* r3 */
483 { 0, 1, 1, 0, 0 }, /* a0 */
484 { 0, 1, 1, 0, 0 }, /* a1 */
485 { 0, 1, 1, 0, 0 }, /* sb */
486 { 0, 1, 1, 0, 0 }, /* fb */
487 { 0, 1, 1, 0, 0 }, /* sp */
488 { 1, 1, 1, 0, 0 }, /* pc */
489 { 0, 0, 0, 0, 0 }, /* fl */
490 { 1, 1, 1, 0, 0 }, /* ap */
491 { 1, 1, 2, 2, 4 }, /* mem0 */
492 { 1, 1, 2, 2, 4 }, /* mem1 */
493 { 1, 1, 2, 2, 4 }, /* mem2 */
494 { 1, 1, 2, 2, 4 }, /* mem3 */
495 { 1, 1, 2, 2, 4 }, /* mem4 */
496 { 1, 1, 2, 2, 0 }, /* mem5 */
497 { 1, 1, 2, 2, 0 }, /* mem6 */
498 { 1, 1, 0, 0, 0 }, /* mem7 */
499};
500
5efd84c5
NF
501/* Implements TARGET_CONDITIONAL_REGISTER_USAGE. We adjust the number
502 of available memregs, and select which registers need to be preserved
38b2d076
DD
503 across calls based on the chip family. */
504
5efd84c5
NF
505#undef TARGET_CONDITIONAL_REGISTER_USAGE
506#define TARGET_CONDITIONAL_REGISTER_USAGE m32c_conditional_register_usage
d6d17ae7 507void
38b2d076
DD
508m32c_conditional_register_usage (void)
509{
38b2d076
DD
510 int i;
511
512 if (0 <= target_memregs && target_memregs <= 16)
513 {
514 /* The command line option is bytes, but our "registers" are
515 16-bit words. */
65655f79 516 for (i = (target_memregs+1)/2; i < 8; i++)
38b2d076
DD
517 {
518 fixed_regs[MEM0_REGNO + i] = 1;
519 CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
520 }
521 }
522
523 /* M32CM and M32C preserve more registers across function calls. */
524 if (TARGET_A24)
525 {
526 call_used_regs[R1_REGNO] = 0;
527 call_used_regs[R2_REGNO] = 0;
528 call_used_regs[R3_REGNO] = 0;
529 call_used_regs[A0_REGNO] = 0;
530 call_used_regs[A1_REGNO] = 0;
531 }
532}
533
534/* How Values Fit in Registers */
535
536/* Implements HARD_REGNO_NREGS. This is complicated by the fact that
537 different registers are different sizes from each other, *and* may
538 be different sizes in different chip families. */
b8a669d0 539static int
ef4bddc2 540m32c_hard_regno_nregs_1 (int regno, machine_mode mode)
38b2d076
DD
541{
542 if (regno == FLG_REGNO && mode == CCmode)
543 return 1;
544 if (regno >= FIRST_PSEUDO_REGISTER)
545 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
546
547 if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
548 return (GET_MODE_SIZE (mode) + 1) / 2;
549
550 if (GET_MODE_SIZE (mode) <= 1)
551 return nregs_table[regno].qi_regs;
552 if (GET_MODE_SIZE (mode) <= 2)
553 return nregs_table[regno].hi_regs;
5fd5d713 554 if (regno == A0_REGNO && mode == SImode && TARGET_A16)
38b2d076
DD
555 return 2;
556 if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
557 return nregs_table[regno].pi_regs;
558 if (GET_MODE_SIZE (mode) <= 4)
559 return nregs_table[regno].si_regs;
560 if (GET_MODE_SIZE (mode) <= 8)
561 return nregs_table[regno].di_regs;
562 return 0;
563}
564
b8a669d0 565int
ef4bddc2 566m32c_hard_regno_nregs (int regno, machine_mode mode)
b8a669d0
DD
567{
568 int rv = m32c_hard_regno_nregs_1 (regno, mode);
569 return rv ? rv : 1;
570}
571
38b2d076
DD
572/* Implements HARD_REGNO_MODE_OK. The above function does the work
573 already; just test its return value. */
574int
ef4bddc2 575m32c_hard_regno_ok (int regno, machine_mode mode)
38b2d076 576{
b8a669d0 577 return m32c_hard_regno_nregs_1 (regno, mode) != 0;
38b2d076
DD
578}
579
580/* Implements MODES_TIEABLE_P. In general, modes aren't tieable since
581 registers are all different sizes. However, since most modes are
582 bigger than our registers anyway, it's easier to implement this
583 function that way, leaving QImode as the only unique case. */
584int
ef4bddc2 585m32c_modes_tieable_p (machine_mode m1, machine_mode m2)
38b2d076
DD
586{
587 if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
588 return 1;
589
07127a0a 590#if 0
38b2d076
DD
591 if (m1 == QImode || m2 == QImode)
592 return 0;
07127a0a 593#endif
38b2d076
DD
594
595 return 1;
596}
597
598/* Register Classes */
599
600/* Implements REGNO_REG_CLASS. */
444d6efe 601enum reg_class
38b2d076
DD
602m32c_regno_reg_class (int regno)
603{
604 switch (regno)
605 {
606 case R0_REGNO:
607 return R0_REGS;
608 case R1_REGNO:
609 return R1_REGS;
610 case R2_REGNO:
611 return R2_REGS;
612 case R3_REGNO:
613 return R3_REGS;
614 case A0_REGNO:
22843acd 615 return A0_REGS;
38b2d076 616 case A1_REGNO:
22843acd 617 return A1_REGS;
38b2d076
DD
618 case SB_REGNO:
619 return SB_REGS;
620 case FB_REGNO:
621 return FB_REGS;
622 case SP_REGNO:
623 return SP_REGS;
624 case FLG_REGNO:
625 return FLG_REGS;
626 default:
627 if (IS_MEM_REGNO (regno))
628 return MEM_REGS;
629 return ALL_REGS;
630 }
631}
632
38b2d076
DD
633/* Implements REGNO_OK_FOR_BASE_P. */
634int
635m32c_regno_ok_for_base_p (int regno)
636{
637 if (regno == A0_REGNO
638 || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
639 return 1;
640 return 0;
641}
642
b05933f5 643/* Implements TARGET_PREFERRED_RELOAD_CLASS. In general, prefer general
38b2d076 644 registers of the appropriate size. */
b05933f5
AS
645
646#undef TARGET_PREFERRED_RELOAD_CLASS
647#define TARGET_PREFERRED_RELOAD_CLASS m32c_preferred_reload_class
648
649static reg_class_t
650m32c_preferred_reload_class (rtx x, reg_class_t rclass)
38b2d076 651{
b05933f5 652 reg_class_t newclass = rclass;
38b2d076 653
f75e07bc 654#if DEBUG0
38b2d076
DD
655 fprintf (stderr, "\npreferred_reload_class for %s is ",
656 class_names[rclass]);
657#endif
658 if (rclass == NO_REGS)
659 rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
660
0e607518 661 if (reg_classes_intersect_p (rclass, CR_REGS))
38b2d076
DD
662 {
663 switch (GET_MODE (x))
664 {
665 case QImode:
666 newclass = HL_REGS;
667 break;
668 default:
669 /* newclass = HI_REGS; */
670 break;
671 }
672 }
673
674 else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
675 newclass = SI_REGS;
676 else if (GET_MODE_SIZE (GET_MODE (x)) > 4
b05933f5 677 && ! reg_class_subset_p (R03_REGS, rclass))
38b2d076
DD
678 newclass = DI_REGS;
679
680 rclass = reduce_class (rclass, newclass, rclass);
681
682 if (GET_MODE (x) == QImode)
683 rclass = reduce_class (rclass, HL_REGS, rclass);
684
f75e07bc 685#if DEBUG0
38b2d076
DD
686 fprintf (stderr, "%s\n", class_names[rclass]);
687 debug_rtx (x);
688
689 if (GET_CODE (x) == MEM
690 && GET_CODE (XEXP (x, 0)) == PLUS
691 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
692 fprintf (stderr, "Glorm!\n");
693#endif
694 return rclass;
695}
696
b05933f5
AS
697/* Implements TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
698
699#undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
700#define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS m32c_preferred_output_reload_class
701
702static reg_class_t
703m32c_preferred_output_reload_class (rtx x, reg_class_t rclass)
38b2d076
DD
704{
705 return m32c_preferred_reload_class (x, rclass);
706}
707
708/* Implements LIMIT_RELOAD_CLASS. We basically want to avoid using
709 address registers for reloads since they're needed for address
710 reloads. */
711int
ef4bddc2 712m32c_limit_reload_class (machine_mode mode, int rclass)
38b2d076 713{
f75e07bc 714#if DEBUG0
38b2d076
DD
715 fprintf (stderr, "limit_reload_class for %s: %s ->",
716 mode_name[mode], class_names[rclass]);
717#endif
718
719 if (mode == QImode)
720 rclass = reduce_class (rclass, HL_REGS, rclass);
721 else if (mode == HImode)
722 rclass = reduce_class (rclass, HI_REGS, rclass);
723 else if (mode == SImode)
724 rclass = reduce_class (rclass, SI_REGS, rclass);
725
726 if (rclass != A_REGS)
727 rclass = reduce_class (rclass, DI_REGS, rclass);
728
f75e07bc 729#if DEBUG0
38b2d076
DD
730 fprintf (stderr, " %s\n", class_names[rclass]);
731#endif
732 return rclass;
733}
734
735/* Implements SECONDARY_RELOAD_CLASS. QImode have to be reloaded in
736 r0 or r1, as those are the only real QImode registers. CR regs get
737 reloaded through appropriately sized general or address
738 registers. */
739int
ef4bddc2 740m32c_secondary_reload_class (int rclass, machine_mode mode, rtx x)
38b2d076
DD
741{
742 int cc = class_contents[rclass][0];
743#if DEBUG0
744 fprintf (stderr, "\nsecondary reload class %s %s\n",
745 class_names[rclass], mode_name[mode]);
746 debug_rtx (x);
747#endif
748 if (mode == QImode
749 && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
750 return QI_REGS;
0e607518 751 if (reg_classes_intersect_p (rclass, CR_REGS)
38b2d076
DD
752 && GET_CODE (x) == REG
753 && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
13a23442 754 return (TARGET_A16 || mode == HImode) ? HI_REGS : A_REGS;
38b2d076
DD
755 return NO_REGS;
756}
757
184866c5 758/* Implements TARGET_CLASS_LIKELY_SPILLED_P. A_REGS is needed for address
38b2d076 759 reloads. */
184866c5
AS
760
761#undef TARGET_CLASS_LIKELY_SPILLED_P
762#define TARGET_CLASS_LIKELY_SPILLED_P m32c_class_likely_spilled_p
763
764static bool
765m32c_class_likely_spilled_p (reg_class_t regclass)
38b2d076
DD
766{
767 if (regclass == A_REGS)
184866c5
AS
768 return true;
769
770 return (reg_class_size[(int) regclass] == 1);
38b2d076
DD
771}
772
c4831cff 773/* Implements TARGET_CLASS_MAX_NREGS. We calculate this according to its
38b2d076
DD
774 documented meaning, to avoid potential inconsistencies with actual
775 class definitions. */
c4831cff
AS
776
777#undef TARGET_CLASS_MAX_NREGS
778#define TARGET_CLASS_MAX_NREGS m32c_class_max_nregs
779
780static unsigned char
ef4bddc2 781m32c_class_max_nregs (reg_class_t regclass, machine_mode mode)
38b2d076 782{
c4831cff
AS
783 int rn;
784 unsigned char max = 0;
38b2d076
DD
785
786 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
c4831cff 787 if (TEST_HARD_REG_BIT (reg_class_contents[(int) regclass], rn))
38b2d076 788 {
c4831cff 789 unsigned char n = m32c_hard_regno_nregs (rn, mode);
38b2d076
DD
790 if (max < n)
791 max = n;
792 }
793 return max;
794}
795
796/* Implements CANNOT_CHANGE_MODE_CLASS. Only r0 and r1 can change to
797 QI (r0l, r1l) because the chip doesn't support QI ops on other
798 registers (well, it does on a0/a1 but if we let gcc do that, reload
799 suffers). Otherwise, we allow changes to larger modes. */
800int
ef4bddc2
RS
801m32c_cannot_change_mode_class (machine_mode from,
802 machine_mode to, int rclass)
38b2d076 803{
db9c8397 804 int rn;
38b2d076
DD
805#if DEBUG0
806 fprintf (stderr, "cannot change from %s to %s in %s\n",
807 mode_name[from], mode_name[to], class_names[rclass]);
808#endif
809
db9c8397
DD
810 /* If the larger mode isn't allowed in any of these registers, we
811 can't allow the change. */
812 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
813 if (class_contents[rclass][0] & (1 << rn))
814 if (! m32c_hard_regno_ok (rn, to))
815 return 1;
816
38b2d076
DD
817 if (to == QImode)
818 return (class_contents[rclass][0] & 0x1ffa);
819
820 if (class_contents[rclass][0] & 0x0005 /* r0, r1 */
821 && GET_MODE_SIZE (from) > 1)
822 return 0;
823 if (GET_MODE_SIZE (from) > 2) /* all other regs */
824 return 0;
825
826 return 1;
827}
828
829/* Helpers for the rest of the file. */
830/* TRUE if the rtx is a REG rtx for the given register. */
831#define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
832 && REGNO (rtx) == regno)
833/* TRUE if the rtx is a pseudo - specifically, one we can use as a
834 base register in address calculations (hence the "strict"
835 argument). */
836#define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
837 && (REGNO (rtx) == AP_REGNO \
838 || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
839
5fd5d713
DD
840#define A0_OR_PSEUDO(x) (IS_REG(x, A0_REGNO) || REGNO (x) >= FIRST_PSEUDO_REGISTER)
841
777e635f 842/* Implements matching for constraints (see next function too). 'S' is
38b2d076
DD
843 for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
844 call return values. */
03dd17b1
NF
845bool
846m32c_matches_constraint_p (rtx value, int constraint)
38b2d076
DD
847{
848 encode_pattern (value);
5fd5d713 849
03dd17b1
NF
850 switch (constraint) {
851 case CONSTRAINT_SF:
852 return (far_addr_space_p (value)
853 && ((RTX_IS ("mr")
854 && A0_OR_PSEUDO (patternr[1])
855 && GET_MODE (patternr[1]) == SImode)
856 || (RTX_IS ("m+^Sri")
857 && A0_OR_PSEUDO (patternr[4])
858 && GET_MODE (patternr[4]) == HImode)
859 || (RTX_IS ("m+^Srs")
860 && A0_OR_PSEUDO (patternr[4])
861 && GET_MODE (patternr[4]) == HImode)
862 || (RTX_IS ("m+^S+ris")
863 && A0_OR_PSEUDO (patternr[5])
864 && GET_MODE (patternr[5]) == HImode)
865 || RTX_IS ("ms")));
866 case CONSTRAINT_Sd:
38b2d076
DD
867 {
868 /* This is the common "src/dest" address */
869 rtx r;
870 if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
03dd17b1 871 return true;
38b2d076 872 if (RTX_IS ("ms") || RTX_IS ("m+si"))
03dd17b1 873 return true;
07127a0a
DD
874 if (RTX_IS ("m++rii"))
875 {
876 if (REGNO (patternr[3]) == FB_REGNO
877 && INTVAL (patternr[4]) == 0)
03dd17b1 878 return true;
07127a0a 879 }
38b2d076
DD
880 if (RTX_IS ("mr"))
881 r = patternr[1];
882 else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
883 r = patternr[2];
884 else
03dd17b1 885 return false;
38b2d076 886 if (REGNO (r) == SP_REGNO)
03dd17b1 887 return false;
38b2d076
DD
888 return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
889 }
03dd17b1 890 case CONSTRAINT_Sa:
38b2d076
DD
891 {
892 rtx r;
893 if (RTX_IS ("mr"))
894 r = patternr[1];
895 else if (RTX_IS ("m+ri"))
896 r = patternr[2];
897 else
03dd17b1 898 return false;
38b2d076
DD
899 return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
900 }
03dd17b1
NF
901 case CONSTRAINT_Si:
902 return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
903 case CONSTRAINT_Ss:
904 return ((RTX_IS ("mr")
905 && (IS_REG (patternr[1], SP_REGNO)))
906 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
907 case CONSTRAINT_Sf:
908 return ((RTX_IS ("mr")
909 && (IS_REG (patternr[1], FB_REGNO)))
910 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
911 case CONSTRAINT_Sb:
912 return ((RTX_IS ("mr")
913 && (IS_REG (patternr[1], SB_REGNO)))
914 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
915 case CONSTRAINT_Sp:
916 /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
917 return (RTX_IS ("mi")
918 && !(INTVAL (patternr[1]) & ~0x1fff));
919 case CONSTRAINT_S1:
920 return r1h_operand (value, QImode);
921 case CONSTRAINT_Rpa:
38b2d076 922 return GET_CODE (value) == PARALLEL;
03dd17b1
NF
923 default:
924 return false;
925 }
38b2d076
DD
926}
927
928/* STACK AND CALLING */
929
930/* Frame Layout */
931
932/* Implements RETURN_ADDR_RTX. Note that R8C and M16C push 24 bits
933 (yes, THREE bytes) onto the stack for the return address, but we
934 don't support pointers bigger than 16 bits on those chips. This
935 will likely wreak havoc with exception unwinding. FIXME. */
936rtx
937m32c_return_addr_rtx (int count)
938{
ef4bddc2 939 machine_mode mode;
38b2d076
DD
940 int offset;
941 rtx ra_mem;
942
943 if (count)
944 return NULL_RTX;
945 /* we want 2[$fb] */
946
947 if (TARGET_A24)
948 {
80b093df
DD
949 /* It's four bytes */
950 mode = PSImode;
38b2d076
DD
951 offset = 4;
952 }
953 else
954 {
955 /* FIXME: it's really 3 bytes */
956 mode = HImode;
957 offset = 2;
958 }
959
960 ra_mem =
0a81f074
RS
961 gen_rtx_MEM (mode, plus_constant (Pmode, gen_rtx_REG (Pmode, FP_REGNO),
962 offset));
38b2d076
DD
963 return copy_to_mode_reg (mode, ra_mem);
964}
965
966/* Implements INCOMING_RETURN_ADDR_RTX. See comment above. */
967rtx
968m32c_incoming_return_addr_rtx (void)
969{
970 /* we want [sp] */
971 return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
972}
973
974/* Exception Handling Support */
975
976/* Implements EH_RETURN_DATA_REGNO. Choose registers able to hold
977 pointers. */
978int
979m32c_eh_return_data_regno (int n)
980{
981 switch (n)
982 {
983 case 0:
45d898e4 984 return MEM0_REGNO;
38b2d076 985 case 1:
45d898e4 986 return MEM0_REGNO+4;
38b2d076
DD
987 default:
988 return INVALID_REGNUM;
989 }
990}
991
992/* Implements EH_RETURN_STACKADJ_RTX. Saved and used later in
993 m32c_emit_eh_epilogue. */
994rtx
995m32c_eh_return_stackadj_rtx (void)
996{
997 if (!cfun->machine->eh_stack_adjust)
998 {
999 rtx sa;
1000
99920b6f 1001 sa = gen_rtx_REG (Pmode, R0_REGNO);
38b2d076
DD
1002 cfun->machine->eh_stack_adjust = sa;
1003 }
1004 return cfun->machine->eh_stack_adjust;
1005}
1006
1007/* Registers That Address the Stack Frame */
1008
1009/* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER. Note that
1010 the original spec called for dwarf numbers to vary with register
1011 width as well, for example, r0l, r0, and r2r0 would each have
1012 different dwarf numbers. GCC doesn't support this, and we don't do
1013 it, and gdb seems to like it this way anyway. */
1014unsigned int
1015m32c_dwarf_frame_regnum (int n)
1016{
1017 switch (n)
1018 {
1019 case R0_REGNO:
1020 return 5;
1021 case R1_REGNO:
1022 return 6;
1023 case R2_REGNO:
1024 return 7;
1025 case R3_REGNO:
1026 return 8;
1027 case A0_REGNO:
1028 return 9;
1029 case A1_REGNO:
1030 return 10;
1031 case FB_REGNO:
1032 return 11;
1033 case SB_REGNO:
1034 return 19;
1035
1036 case SP_REGNO:
1037 return 12;
1038 case PC_REGNO:
1039 return 13;
1040 default:
1041 return DWARF_FRAME_REGISTERS + 1;
1042 }
1043}
1044
1045/* The frame looks like this:
1046
1047 ap -> +------------------------------
1048 | Return address (3 or 4 bytes)
1049 | Saved FB (2 or 4 bytes)
1050 fb -> +------------------------------
1051 | local vars
1052 | register saves fb
1053 | through r0 as needed
1054 sp -> +------------------------------
1055*/
1056
1057/* We use this to wrap all emitted insns in the prologue. */
1058static rtx
1059F (rtx x)
1060{
1061 RTX_FRAME_RELATED_P (x) = 1;
1062 return x;
1063}
1064
1065/* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1066 how much the stack pointer moves for each, for each cpu family. */
1067static struct
1068{
1069 int reg1;
1070 int bit;
1071 int a16_bytes;
1072 int a24_bytes;
1073} pushm_info[] =
1074{
9d746d5e
DD
1075 /* These are in reverse push (nearest-to-sp) order. */
1076 { R0_REGNO, 0x80, 2, 2 },
38b2d076 1077 { R1_REGNO, 0x40, 2, 2 },
9d746d5e
DD
1078 { R2_REGNO, 0x20, 2, 2 },
1079 { R3_REGNO, 0x10, 2, 2 },
1080 { A0_REGNO, 0x08, 2, 4 },
1081 { A1_REGNO, 0x04, 2, 4 },
1082 { SB_REGNO, 0x02, 2, 4 },
1083 { FB_REGNO, 0x01, 2, 4 }
38b2d076
DD
1084};
1085
1086#define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1087
1088/* Returns TRUE if we need to save/restore the given register. We
1089 save everything for exception handlers, so that any register can be
1090 unwound. For interrupt handlers, we save everything if the handler
1091 calls something else (because we don't know what *that* function
1092 might do), but try to be a bit smarter if the handler is a leaf
1093 function. We always save $a0, though, because we use that in the
85f65093 1094 epilogue to copy $fb to $sp. */
38b2d076
DD
1095static int
1096need_to_save (int regno)
1097{
1098 if (fixed_regs[regno])
1099 return 0;
ad516a74 1100 if (crtl->calls_eh_return)
38b2d076
DD
1101 return 1;
1102 if (regno == FP_REGNO)
1103 return 0;
1104 if (cfun->machine->is_interrupt
65655f79
DD
1105 && (!cfun->machine->is_leaf
1106 || (regno == A0_REGNO
1107 && m32c_function_needs_enter ())
1108 ))
38b2d076 1109 return 1;
6fb5fa3c 1110 if (df_regs_ever_live_p (regno)
38b2d076
DD
1111 && (!call_used_regs[regno] || cfun->machine->is_interrupt))
1112 return 1;
1113 return 0;
1114}
1115
1116/* This function contains all the intelligence about saving and
1117 restoring registers. It always figures out the register save set.
1118 When called with PP_justcount, it merely returns the size of the
1119 save set (for eliminating the frame pointer, for example). When
1120 called with PP_pushm or PP_popm, it emits the appropriate
1121 instructions for saving (pushm) or restoring (popm) the
1122 registers. */
1123static int
1124m32c_pushm_popm (Push_Pop_Type ppt)
1125{
1126 int reg_mask = 0;
1127 int byte_count = 0, bytes;
1128 int i;
1129 rtx dwarf_set[PUSHM_N];
1130 int n_dwarfs = 0;
1131 int nosave_mask = 0;
1132
305da3ec
JH
1133 if (crtl->return_rtx
1134 && GET_CODE (crtl->return_rtx) == PARALLEL
ad516a74 1135 && !(crtl->calls_eh_return || cfun->machine->is_interrupt))
38b2d076 1136 {
305da3ec 1137 rtx exp = XVECEXP (crtl->return_rtx, 0, 0);
38b2d076
DD
1138 rtx rv = XEXP (exp, 0);
1139 int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1140
1141 if (rv_bytes > 2)
1142 nosave_mask |= 0x20; /* PSI, SI */
1143 else
1144 nosave_mask |= 0xf0; /* DF */
1145 if (rv_bytes > 4)
1146 nosave_mask |= 0x50; /* DI */
1147 }
1148
1149 for (i = 0; i < (int) PUSHM_N; i++)
1150 {
1151 /* Skip if neither register needs saving. */
1152 if (!need_to_save (pushm_info[i].reg1))
1153 continue;
1154
1155 if (pushm_info[i].bit & nosave_mask)
1156 continue;
1157
1158 reg_mask |= pushm_info[i].bit;
1159 bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1160
1161 if (ppt == PP_pushm)
1162 {
ef4bddc2 1163 machine_mode mode = (bytes == 2) ? HImode : SImode;
38b2d076
DD
1164 rtx addr;
1165
1166 /* Always use stack_pointer_rtx instead of calling
1167 rtx_gen_REG ourselves. Code elsewhere in GCC assumes
1168 that there is a single rtx representing the stack pointer,
1169 namely stack_pointer_rtx, and uses == to recognize it. */
1170 addr = stack_pointer_rtx;
1171
1172 if (byte_count != 0)
1173 addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1174
1175 dwarf_set[n_dwarfs++] =
f7df4a84 1176 gen_rtx_SET (gen_rtx_MEM (mode, addr),
38b2d076
DD
1177 gen_rtx_REG (mode, pushm_info[i].reg1));
1178 F (dwarf_set[n_dwarfs - 1]);
1179
1180 }
1181 byte_count += bytes;
1182 }
1183
1184 if (cfun->machine->is_interrupt)
1185 {
1186 cfun->machine->intr_pushm = reg_mask & 0xfe;
1187 reg_mask = 0;
1188 byte_count = 0;
1189 }
1190
1191 if (cfun->machine->is_interrupt)
1192 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1193 if (need_to_save (i))
1194 {
1195 byte_count += 2;
1196 cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1197 }
1198
1199 if (ppt == PP_pushm && byte_count)
1200 {
1201 rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1202 rtx pushm;
1203
1204 if (reg_mask)
1205 {
1206 XVECEXP (note, 0, 0)
f7df4a84 1207 = gen_rtx_SET (stack_pointer_rtx,
38b2d076
DD
1208 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1209 stack_pointer_rtx,
1210 GEN_INT (-byte_count)));
1211 F (XVECEXP (note, 0, 0));
1212
1213 for (i = 0; i < n_dwarfs; i++)
1214 XVECEXP (note, 0, i + 1) = dwarf_set[i];
1215
1216 pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1217
444d6efe 1218 add_reg_note (pushm, REG_FRAME_RELATED_EXPR, note);
38b2d076
DD
1219 }
1220
1221 if (cfun->machine->is_interrupt)
1222 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1223 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1224 {
1225 if (TARGET_A16)
1226 pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1227 else
1228 pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1229 F (pushm);
1230 }
1231 }
1232 if (ppt == PP_popm && byte_count)
1233 {
38b2d076
DD
1234 if (cfun->machine->is_interrupt)
1235 for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1236 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1237 {
1238 if (TARGET_A16)
b3fdec9e 1239 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
38b2d076 1240 else
b3fdec9e 1241 emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
38b2d076
DD
1242 }
1243 if (reg_mask)
1244 emit_insn (gen_popm (GEN_INT (reg_mask)));
1245 }
1246
1247 return byte_count;
1248}
1249
1250/* Implements INITIAL_ELIMINATION_OFFSET. See the comment above that
1251 diagrams our call frame. */
1252int
1253m32c_initial_elimination_offset (int from, int to)
1254{
1255 int ofs = 0;
1256
1257 if (from == AP_REGNO)
1258 {
1259 if (TARGET_A16)
1260 ofs += 5;
1261 else
1262 ofs += 8;
1263 }
1264
1265 if (to == SP_REGNO)
1266 {
1267 ofs += m32c_pushm_popm (PP_justcount);
1268 ofs += get_frame_size ();
1269 }
1270
1271 /* Account for push rounding. */
1272 if (TARGET_A24)
1273 ofs = (ofs + 1) & ~1;
1274#if DEBUG0
1275 fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1276 to, ofs);
1277#endif
1278 return ofs;
1279}
1280
1281/* Passing Function Arguments on the Stack */
1282
38b2d076
DD
1283/* Implements PUSH_ROUNDING. The R8C and M16C have byte stacks, the
1284 M32C has word stacks. */
444d6efe 1285unsigned int
38b2d076
DD
1286m32c_push_rounding (int n)
1287{
1288 if (TARGET_R8C || TARGET_M16C)
1289 return n;
1290 return (n + 1) & ~1;
1291}
1292
1293/* Passing Arguments in Registers */
1294
cd34bbe8
NF
1295/* Implements TARGET_FUNCTION_ARG. Arguments are passed partly in
1296 registers, partly on stack. If our function returns a struct, a
1297 pointer to a buffer for it is at the top of the stack (last thing
1298 pushed). The first few real arguments may be in registers as
1299 follows:
38b2d076
DD
1300
1301 R8C/M16C: arg1 in r1 if it's QI or HI (else it's pushed on stack)
1302 arg2 in r2 if it's HI (else pushed on stack)
1303 rest on stack
1304 M32C: arg1 in r0 if it's QI or HI (else it's pushed on stack)
1305 rest on stack
1306
1307 Structs are not passed in registers, even if they fit. Only
1308 integer and pointer types are passed in registers.
1309
1310 Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1311 r2 if it fits. */
cd34bbe8
NF
1312#undef TARGET_FUNCTION_ARG
1313#define TARGET_FUNCTION_ARG m32c_function_arg
1314static rtx
d5cc9181 1315m32c_function_arg (cumulative_args_t ca_v,
ef4bddc2 1316 machine_mode mode, const_tree type, bool named)
38b2d076 1317{
d5cc9181
JR
1318 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1319
38b2d076
DD
1320 /* Can return a reg, parallel, or 0 for stack */
1321 rtx rv = NULL_RTX;
1322#if DEBUG0
1323 fprintf (stderr, "func_arg %d (%s, %d)\n",
1324 ca->parm_num, mode_name[mode], named);
f75e07bc 1325 debug_tree ((tree)type);
38b2d076
DD
1326#endif
1327
1328 if (mode == VOIDmode)
1329 return GEN_INT (0);
1330
1331 if (ca->force_mem || !named)
1332 {
1333#if DEBUG0
1334 fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
1335 named);
1336#endif
1337 return NULL_RTX;
1338 }
1339
1340 if (type && INTEGRAL_TYPE_P (type) && POINTER_TYPE_P (type))
1341 return NULL_RTX;
1342
9d746d5e
DD
1343 if (type && AGGREGATE_TYPE_P (type))
1344 return NULL_RTX;
1345
38b2d076
DD
1346 switch (ca->parm_num)
1347 {
1348 case 1:
1349 if (GET_MODE_SIZE (mode) == 1 || GET_MODE_SIZE (mode) == 2)
1350 rv = gen_rtx_REG (mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
1351 break;
1352
1353 case 2:
1354 if (TARGET_A16 && GET_MODE_SIZE (mode) == 2)
1355 rv = gen_rtx_REG (mode, R2_REGNO);
1356 break;
1357 }
1358
1359#if DEBUG0
1360 debug_rtx (rv);
1361#endif
1362 return rv;
1363}
1364
1365#undef TARGET_PASS_BY_REFERENCE
1366#define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1367static bool
d5cc9181 1368m32c_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
ef4bddc2 1369 machine_mode mode ATTRIBUTE_UNUSED,
586de218 1370 const_tree type ATTRIBUTE_UNUSED,
38b2d076
DD
1371 bool named ATTRIBUTE_UNUSED)
1372{
1373 return 0;
1374}
1375
1376/* Implements INIT_CUMULATIVE_ARGS. */
1377void
1378m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
9d746d5e 1379 tree fntype,
38b2d076 1380 rtx libname ATTRIBUTE_UNUSED,
9d746d5e 1381 tree fndecl,
38b2d076
DD
1382 int n_named_args ATTRIBUTE_UNUSED)
1383{
9d746d5e
DD
1384 if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1385 ca->force_mem = 1;
1386 else
1387 ca->force_mem = 0;
38b2d076
DD
1388 ca->parm_num = 1;
1389}
1390
cd34bbe8
NF
1391/* Implements TARGET_FUNCTION_ARG_ADVANCE. force_mem is set for
1392 functions returning structures, so we always reset that. Otherwise,
1393 we only need to know the sequence number of the argument to know what
1394 to do with it. */
1395#undef TARGET_FUNCTION_ARG_ADVANCE
1396#define TARGET_FUNCTION_ARG_ADVANCE m32c_function_arg_advance
1397static void
d5cc9181 1398m32c_function_arg_advance (cumulative_args_t ca_v,
ef4bddc2 1399 machine_mode mode ATTRIBUTE_UNUSED,
cd34bbe8
NF
1400 const_tree type ATTRIBUTE_UNUSED,
1401 bool named ATTRIBUTE_UNUSED)
38b2d076 1402{
d5cc9181
JR
1403 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1404
38b2d076
DD
1405 if (ca->force_mem)
1406 ca->force_mem = 0;
9d746d5e
DD
1407 else
1408 ca->parm_num++;
38b2d076
DD
1409}
1410
c2ed6cf8
NF
1411/* Implements TARGET_FUNCTION_ARG_BOUNDARY. */
1412#undef TARGET_FUNCTION_ARG_BOUNDARY
1413#define TARGET_FUNCTION_ARG_BOUNDARY m32c_function_arg_boundary
1414static unsigned int
ef4bddc2 1415m32c_function_arg_boundary (machine_mode mode ATTRIBUTE_UNUSED,
c2ed6cf8
NF
1416 const_tree type ATTRIBUTE_UNUSED)
1417{
1418 return (TARGET_A16 ? 8 : 16);
1419}
1420
38b2d076
DD
1421/* Implements FUNCTION_ARG_REGNO_P. */
1422int
1423m32c_function_arg_regno_p (int r)
1424{
1425 if (TARGET_A24)
1426 return (r == R0_REGNO);
1427 return (r == R1_REGNO || r == R2_REGNO);
1428}
1429
e9555b13 1430/* HImode and PSImode are the two "native" modes as far as GCC is
85f65093 1431 concerned, but the chips also support a 32-bit mode which is used
e9555b13
DD
1432 for some opcodes in R8C/M16C and for reset vectors and such. */
1433#undef TARGET_VALID_POINTER_MODE
1434#define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
23fed240 1435static bool
ef4bddc2 1436m32c_valid_pointer_mode (machine_mode mode)
e9555b13 1437{
e9555b13
DD
1438 if (mode == HImode
1439 || mode == PSImode
1440 || mode == SImode
1441 )
1442 return 1;
1443 return 0;
1444}
1445
38b2d076
DD
1446/* How Scalar Function Values Are Returned */
1447
2a31793e 1448/* Implements TARGET_LIBCALL_VALUE. Most values are returned in $r0, or some
38b2d076
DD
1449 combination of registers starting there (r2r0 for longs, r3r1r2r0
1450 for long long, r3r2r1r0 for doubles), except that that ABI
1451 currently doesn't work because it ends up using all available
1452 general registers and gcc often can't compile it. So, instead, we
1453 return anything bigger than 16 bits in "mem0" (effectively, a
1454 memory location). */
2a31793e
AS
1455
1456#undef TARGET_LIBCALL_VALUE
1457#define TARGET_LIBCALL_VALUE m32c_libcall_value
1458
1459static rtx
ef4bddc2 1460m32c_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
38b2d076
DD
1461{
1462 /* return reg or parallel */
1463#if 0
1464 /* FIXME: GCC has difficulty returning large values in registers,
1465 because that ties up most of the general registers and gives the
1466 register allocator little to work with. Until we can resolve
1467 this, large values are returned in memory. */
1468 if (mode == DFmode)
1469 {
1470 rtx rv;
1471
1472 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1473 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1474 gen_rtx_REG (HImode,
1475 R0_REGNO),
1476 GEN_INT (0));
1477 XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1478 gen_rtx_REG (HImode,
1479 R1_REGNO),
1480 GEN_INT (2));
1481 XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1482 gen_rtx_REG (HImode,
1483 R2_REGNO),
1484 GEN_INT (4));
1485 XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1486 gen_rtx_REG (HImode,
1487 R3_REGNO),
1488 GEN_INT (6));
1489 return rv;
1490 }
1491
1492 if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1493 {
1494 rtx rv;
1495
1496 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1497 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1498 gen_rtx_REG (mode,
1499 R0_REGNO),
1500 GEN_INT (0));
1501 return rv;
1502 }
1503#endif
1504
1505 if (GET_MODE_SIZE (mode) > 2)
1506 return gen_rtx_REG (mode, MEM0_REGNO);
1507 return gen_rtx_REG (mode, R0_REGNO);
1508}
1509
2a31793e 1510/* Implements TARGET_FUNCTION_VALUE. Functions and libcalls have the same
38b2d076 1511 conventions. */
2a31793e
AS
1512
1513#undef TARGET_FUNCTION_VALUE
1514#define TARGET_FUNCTION_VALUE m32c_function_value
1515
1516static rtx
1517m32c_function_value (const_tree valtype,
1518 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1519 bool outgoing ATTRIBUTE_UNUSED)
38b2d076
DD
1520{
1521 /* return reg or parallel */
ef4bddc2 1522 const machine_mode mode = TYPE_MODE (valtype);
2a31793e
AS
1523 return m32c_libcall_value (mode, NULL_RTX);
1524}
1525
f28f2337
AS
1526/* Implements TARGET_FUNCTION_VALUE_REGNO_P. */
1527
1528#undef TARGET_FUNCTION_VALUE_REGNO_P
1529#define TARGET_FUNCTION_VALUE_REGNO_P m32c_function_value_regno_p
2a31793e 1530
f28f2337 1531static bool
2a31793e
AS
1532m32c_function_value_regno_p (const unsigned int regno)
1533{
1534 return (regno == R0_REGNO || regno == MEM0_REGNO);
38b2d076
DD
1535}
1536
1537/* How Large Values Are Returned */
1538
1539/* We return structures by pushing the address on the stack, even if
1540 we use registers for the first few "real" arguments. */
1541#undef TARGET_STRUCT_VALUE_RTX
1542#define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1543static rtx
1544m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1545 int incoming ATTRIBUTE_UNUSED)
1546{
1547 return 0;
1548}
1549
1550/* Function Entry and Exit */
1551
1552/* Implements EPILOGUE_USES. Interrupts restore all registers. */
1553int
1554m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1555{
1556 if (cfun->machine->is_interrupt)
1557 return 1;
1558 return 0;
1559}
1560
1561/* Implementing the Varargs Macros */
1562
1563#undef TARGET_STRICT_ARGUMENT_NAMING
1564#define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1565static bool
d5cc9181 1566m32c_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
38b2d076
DD
1567{
1568 return 1;
1569}
1570
1571/* Trampolines for Nested Functions */
1572
1573/*
1574 m16c:
1575 1 0000 75C43412 mov.w #0x1234,a0
1576 2 0004 FC000000 jmp.a label
1577
1578 m32c:
1579 1 0000 BC563412 mov.l:s #0x123456,a0
1580 2 0004 CC000000 jmp.a label
1581*/
1582
1583/* Implements TRAMPOLINE_SIZE. */
1584int
1585m32c_trampoline_size (void)
1586{
1587 /* Allocate extra space so we can avoid the messy shifts when we
1588 initialize the trampoline; we just write past the end of the
1589 opcode. */
1590 return TARGET_A16 ? 8 : 10;
1591}
1592
1593/* Implements TRAMPOLINE_ALIGNMENT. */
1594int
1595m32c_trampoline_alignment (void)
1596{
1597 return 2;
1598}
1599
229fbccb
RH
1600/* Implements TARGET_TRAMPOLINE_INIT. */
1601
1602#undef TARGET_TRAMPOLINE_INIT
1603#define TARGET_TRAMPOLINE_INIT m32c_trampoline_init
1604static void
1605m32c_trampoline_init (rtx m_tramp, tree fndecl, rtx chainval)
38b2d076 1606{
229fbccb
RH
1607 rtx function = XEXP (DECL_RTL (fndecl), 0);
1608
1609#define A0(m,i) adjust_address (m_tramp, m, i)
38b2d076
DD
1610 if (TARGET_A16)
1611 {
1612 /* Note: we subtract a "word" because the moves want signed
1613 constants, not unsigned constants. */
1614 emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1615 emit_move_insn (A0 (HImode, 2), chainval);
1616 emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
85f65093
KH
1617 /* We use 16-bit addresses here, but store the zero to turn it
1618 into a 24-bit offset. */
38b2d076
DD
1619 emit_move_insn (A0 (HImode, 5), function);
1620 emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1621 }
1622 else
1623 {
1624 /* Note that the PSI moves actually write 4 bytes. Make sure we
1625 write stuff out in the right order, and leave room for the
1626 extra byte at the end. */
1627 emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1628 emit_move_insn (A0 (PSImode, 1), chainval);
1629 emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1630 emit_move_insn (A0 (PSImode, 5), function);
1631 }
1632#undef A0
1633}
1634
1635/* Addressing Modes */
1636
c6c3dba9
PB
1637/* The r8c/m32c family supports a wide range of non-orthogonal
1638 addressing modes, including the ability to double-indirect on *some*
1639 of them. Not all insns support all modes, either, but we rely on
1640 predicates and constraints to deal with that. */
1641#undef TARGET_LEGITIMATE_ADDRESS_P
1642#define TARGET_LEGITIMATE_ADDRESS_P m32c_legitimate_address_p
1643bool
ef4bddc2 1644m32c_legitimate_address_p (machine_mode mode, rtx x, bool strict)
38b2d076
DD
1645{
1646 int mode_adjust;
1647 if (CONSTANT_P (x))
1648 return 1;
1649
5fd5d713
DD
1650 if (TARGET_A16 && GET_MODE (x) != HImode && GET_MODE (x) != SImode)
1651 return 0;
1652 if (TARGET_A24 && GET_MODE (x) != PSImode)
1653 return 0;
1654
38b2d076
DD
1655 /* Wide references to memory will be split after reload, so we must
1656 ensure that all parts of such splits remain legitimate
1657 addresses. */
1658 mode_adjust = GET_MODE_SIZE (mode) - 1;
1659
1660 /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1661 if (GET_CODE (x) == PRE_DEC
1662 || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1663 {
1664 return (GET_CODE (XEXP (x, 0)) == REG
1665 && REGNO (XEXP (x, 0)) == SP_REGNO);
1666 }
1667
1668#if 0
1669 /* This is the double indirection detection, but it currently
1670 doesn't work as cleanly as this code implies, so until we've had
1671 a chance to debug it, leave it disabled. */
1672 if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1673 {
1674#if DEBUG_DOUBLE
1675 fprintf (stderr, "double indirect\n");
1676#endif
1677 x = XEXP (x, 0);
1678 }
1679#endif
1680
1681 encode_pattern (x);
1682 if (RTX_IS ("r"))
1683 {
1684 /* Most indexable registers can be used without displacements,
1685 although some of them will be emitted with an explicit zero
1686 to please the assembler. */
1687 switch (REGNO (patternr[0]))
1688 {
38b2d076
DD
1689 case A1_REGNO:
1690 case SB_REGNO:
1691 case FB_REGNO:
1692 case SP_REGNO:
5fd5d713
DD
1693 if (TARGET_A16 && GET_MODE (x) == SImode)
1694 return 0;
1695 case A0_REGNO:
38b2d076
DD
1696 return 1;
1697
1698 default:
1699 if (IS_PSEUDO (patternr[0], strict))
1700 return 1;
1701 return 0;
1702 }
1703 }
5fd5d713
DD
1704
1705 if (TARGET_A16 && GET_MODE (x) == SImode)
1706 return 0;
1707
38b2d076
DD
1708 if (RTX_IS ("+ri"))
1709 {
1710 /* This is more interesting, because different base registers
1711 allow for different displacements - both range and signedness
1712 - and it differs from chip series to chip series too. */
1713 int rn = REGNO (patternr[1]);
1714 HOST_WIDE_INT offs = INTVAL (patternr[2]);
1715 switch (rn)
1716 {
1717 case A0_REGNO:
1718 case A1_REGNO:
1719 case SB_REGNO:
1720 /* The syntax only allows positive offsets, but when the
1721 offsets span the entire memory range, we can simulate
1722 negative offsets by wrapping. */
1723 if (TARGET_A16)
1724 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1725 if (rn == SB_REGNO)
1726 return (offs >= 0 && offs <= 65535 - mode_adjust);
1727 /* A0 or A1 */
1728 return (offs >= -16777216 && offs <= 16777215);
1729
1730 case FB_REGNO:
1731 if (TARGET_A16)
1732 return (offs >= -128 && offs <= 127 - mode_adjust);
1733 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1734
1735 case SP_REGNO:
1736 return (offs >= -128 && offs <= 127 - mode_adjust);
1737
1738 default:
1739 if (IS_PSEUDO (patternr[1], strict))
1740 return 1;
1741 return 0;
1742 }
1743 }
1744 if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1745 {
1746 rtx reg = patternr[1];
1747
1748 /* We don't know where the symbol is, so only allow base
1749 registers which support displacements spanning the whole
1750 address range. */
1751 switch (REGNO (reg))
1752 {
1753 case A0_REGNO:
1754 case A1_REGNO:
1755 /* $sb needs a secondary reload, but since it's involved in
1756 memory address reloads too, we don't deal with it very
1757 well. */
1758 /* case SB_REGNO: */
1759 return 1;
1760 default:
45d898e4
DD
1761 if (GET_CODE (reg) == SUBREG)
1762 return 0;
38b2d076
DD
1763 if (IS_PSEUDO (reg, strict))
1764 return 1;
1765 return 0;
1766 }
1767 }
1768 return 0;
1769}
1770
1771/* Implements REG_OK_FOR_BASE_P. */
1772int
1773m32c_reg_ok_for_base_p (rtx x, int strict)
1774{
1775 if (GET_CODE (x) != REG)
1776 return 0;
1777 switch (REGNO (x))
1778 {
1779 case A0_REGNO:
1780 case A1_REGNO:
1781 case SB_REGNO:
1782 case FB_REGNO:
1783 case SP_REGNO:
1784 return 1;
1785 default:
1786 if (IS_PSEUDO (x, strict))
1787 return 1;
1788 return 0;
1789 }
1790}
1791
04aff2c0 1792/* We have three choices for choosing fb->aN offsets. If we choose -128,
85f65093 1793 we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
04aff2c0
DD
1794 like this:
1795 EB 4B FF mova -128[$fb],$a0
1796 D8 0C FF FF mov.w:Q #0,-1[$a0]
1797
85f65093 1798 Alternately, we subtract the frame size, and hopefully use 8-bit aN
04aff2c0
DD
1799 displacements:
1800 7B F4 stc $fb,$a0
1801 77 54 00 01 sub #256,$a0
1802 D8 08 01 mov.w:Q #0,1[$a0]
1803
1804 If we don't offset (i.e. offset by zero), we end up with:
1805 7B F4 stc $fb,$a0
1806 D8 0C 00 FF mov.w:Q #0,-256[$a0]
1807
1808 We have to subtract *something* so that we have a PLUS rtx to mark
1809 that we've done this reload. The -128 offset will never result in
85f65093 1810 an 8-bit aN offset, and the payoff for the second case is five
04aff2c0
DD
1811 loads *if* those loads are within 256 bytes of the other end of the
1812 frame, so the third case seems best. Note that we subtract the
1813 zero, but detect that in the addhi3 pattern. */
1814
ea471af0
JM
1815#define BIG_FB_ADJ 0
1816
38b2d076
DD
1817/* Implements LEGITIMIZE_ADDRESS. The only address we really have to
1818 worry about is frame base offsets, as $fb has a limited
1819 displacement range. We deal with this by attempting to reload $fb
1820 itself into an address register; that seems to result in the best
1821 code. */
506d7b68
PB
1822#undef TARGET_LEGITIMIZE_ADDRESS
1823#define TARGET_LEGITIMIZE_ADDRESS m32c_legitimize_address
1824static rtx
1825m32c_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
ef4bddc2 1826 machine_mode mode)
38b2d076
DD
1827{
1828#if DEBUG0
1829 fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
506d7b68 1830 debug_rtx (x);
38b2d076
DD
1831 fprintf (stderr, "\n");
1832#endif
1833
506d7b68
PB
1834 if (GET_CODE (x) == PLUS
1835 && GET_CODE (XEXP (x, 0)) == REG
1836 && REGNO (XEXP (x, 0)) == FB_REGNO
1837 && GET_CODE (XEXP (x, 1)) == CONST_INT
1838 && (INTVAL (XEXP (x, 1)) < -128
1839 || INTVAL (XEXP (x, 1)) > (128 - GET_MODE_SIZE (mode))))
38b2d076
DD
1840 {
1841 /* reload FB to A_REGS */
38b2d076 1842 rtx temp = gen_reg_rtx (Pmode);
506d7b68 1843 x = copy_rtx (x);
f7df4a84 1844 emit_insn (gen_rtx_SET (temp, XEXP (x, 0)));
506d7b68 1845 XEXP (x, 0) = temp;
38b2d076
DD
1846 }
1847
506d7b68 1848 return x;
38b2d076
DD
1849}
1850
1851/* Implements LEGITIMIZE_RELOAD_ADDRESS. See comment above. */
1852int
1853m32c_legitimize_reload_address (rtx * x,
ef4bddc2 1854 machine_mode mode,
38b2d076
DD
1855 int opnum,
1856 int type, int ind_levels ATTRIBUTE_UNUSED)
1857{
1858#if DEBUG0
1859 fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
1860 mode_name[mode]);
1861 debug_rtx (*x);
1862#endif
1863
1864 /* At one point, this function tried to get $fb copied to an address
1865 register, which in theory would maximize sharing, but gcc was
1866 *also* still trying to reload the whole address, and we'd run out
1867 of address registers. So we let gcc do the naive (but safe)
1868 reload instead, when the above function doesn't handle it for
04aff2c0
DD
1869 us.
1870
1871 The code below is a second attempt at the above. */
1872
1873 if (GET_CODE (*x) == PLUS
1874 && GET_CODE (XEXP (*x, 0)) == REG
1875 && REGNO (XEXP (*x, 0)) == FB_REGNO
1876 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1877 && (INTVAL (XEXP (*x, 1)) < -128
1878 || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
1879 {
1880 rtx sum;
1881 int offset = INTVAL (XEXP (*x, 1));
1882 int adjustment = -BIG_FB_ADJ;
1883
1884 sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
1885 GEN_INT (adjustment));
1886 *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
1887 if (type == RELOAD_OTHER)
1888 type = RELOAD_FOR_OTHER_ADDRESS;
1889 push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
1890 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
444d6efe 1891 (enum reload_type) type);
04aff2c0
DD
1892 return 1;
1893 }
1894
1895 if (GET_CODE (*x) == PLUS
1896 && GET_CODE (XEXP (*x, 0)) == PLUS
1897 && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
1898 && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
1899 && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
1900 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1901 )
1902 {
1903 if (type == RELOAD_OTHER)
1904 type = RELOAD_FOR_OTHER_ADDRESS;
1905 push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
1906 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
444d6efe 1907 (enum reload_type) type);
f75e07bc
BE
1908 return 1;
1909 }
1910
1911 if (TARGET_A24 && GET_MODE (*x) == PSImode)
1912 {
1913 push_reload (*x, NULL_RTX, x, NULL,
1914 A_REGS, PSImode, VOIDmode, 0, 0, opnum,
1915 (enum reload_type) type);
04aff2c0
DD
1916 return 1;
1917 }
38b2d076
DD
1918
1919 return 0;
1920}
1921
5fd5d713
DD
1922/* Return the appropriate mode for a named address pointer. */
1923#undef TARGET_ADDR_SPACE_POINTER_MODE
1924#define TARGET_ADDR_SPACE_POINTER_MODE m32c_addr_space_pointer_mode
ef4bddc2 1925static machine_mode
5fd5d713
DD
1926m32c_addr_space_pointer_mode (addr_space_t addrspace)
1927{
1928 switch (addrspace)
1929 {
1930 case ADDR_SPACE_GENERIC:
1931 return TARGET_A24 ? PSImode : HImode;
1932 case ADDR_SPACE_FAR:
1933 return SImode;
1934 default:
1935 gcc_unreachable ();
1936 }
1937}
1938
1939/* Return the appropriate mode for a named address address. */
1940#undef TARGET_ADDR_SPACE_ADDRESS_MODE
1941#define TARGET_ADDR_SPACE_ADDRESS_MODE m32c_addr_space_address_mode
ef4bddc2 1942static machine_mode
5fd5d713
DD
1943m32c_addr_space_address_mode (addr_space_t addrspace)
1944{
1945 switch (addrspace)
1946 {
1947 case ADDR_SPACE_GENERIC:
1948 return TARGET_A24 ? PSImode : HImode;
1949 case ADDR_SPACE_FAR:
1950 return SImode;
1951 default:
1952 gcc_unreachable ();
1953 }
1954}
1955
1956/* Like m32c_legitimate_address_p, except with named addresses. */
1957#undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
1958#define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P \
1959 m32c_addr_space_legitimate_address_p
1960static bool
ef4bddc2 1961m32c_addr_space_legitimate_address_p (machine_mode mode, rtx x,
5fd5d713
DD
1962 bool strict, addr_space_t as)
1963{
1964 if (as == ADDR_SPACE_FAR)
1965 {
1966 if (TARGET_A24)
1967 return 0;
1968 encode_pattern (x);
1969 if (RTX_IS ("r"))
1970 {
1971 if (GET_MODE (x) != SImode)
1972 return 0;
1973 switch (REGNO (patternr[0]))
1974 {
1975 case A0_REGNO:
1976 return 1;
1977
1978 default:
1979 if (IS_PSEUDO (patternr[0], strict))
1980 return 1;
1981 return 0;
1982 }
1983 }
1984 if (RTX_IS ("+^Sri"))
1985 {
1986 int rn = REGNO (patternr[3]);
1987 HOST_WIDE_INT offs = INTVAL (patternr[4]);
1988 if (GET_MODE (patternr[3]) != HImode)
1989 return 0;
1990 switch (rn)
1991 {
1992 case A0_REGNO:
1993 return (offs >= 0 && offs <= 0xfffff);
1994
1995 default:
1996 if (IS_PSEUDO (patternr[3], strict))
1997 return 1;
1998 return 0;
1999 }
2000 }
2001 if (RTX_IS ("+^Srs"))
2002 {
2003 int rn = REGNO (patternr[3]);
2004 if (GET_MODE (patternr[3]) != HImode)
2005 return 0;
2006 switch (rn)
2007 {
2008 case A0_REGNO:
2009 return 1;
2010
2011 default:
2012 if (IS_PSEUDO (patternr[3], strict))
2013 return 1;
2014 return 0;
2015 }
2016 }
2017 if (RTX_IS ("+^S+ris"))
2018 {
2019 int rn = REGNO (patternr[4]);
2020 if (GET_MODE (patternr[4]) != HImode)
2021 return 0;
2022 switch (rn)
2023 {
2024 case A0_REGNO:
2025 return 1;
2026
2027 default:
2028 if (IS_PSEUDO (patternr[4], strict))
2029 return 1;
2030 return 0;
2031 }
2032 }
2033 if (RTX_IS ("s"))
2034 {
2035 return 1;
2036 }
2037 return 0;
2038 }
2039
2040 else if (as != ADDR_SPACE_GENERIC)
2041 gcc_unreachable ();
2042
2043 return m32c_legitimate_address_p (mode, x, strict);
2044}
2045
2046/* Like m32c_legitimate_address, except with named address support. */
2047#undef TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS
2048#define TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS m32c_addr_space_legitimize_address
2049static rtx
ef4bddc2 2050m32c_addr_space_legitimize_address (rtx x, rtx oldx, machine_mode mode,
5fd5d713
DD
2051 addr_space_t as)
2052{
2053 if (as != ADDR_SPACE_GENERIC)
2054 {
2055#if DEBUG0
2056 fprintf (stderr, "\033[36mm32c_addr_space_legitimize_address for mode %s\033[0m\n", mode_name[mode]);
2057 debug_rtx (x);
2058 fprintf (stderr, "\n");
2059#endif
2060
2061 if (GET_CODE (x) != REG)
2062 {
2063 x = force_reg (SImode, x);
2064 }
2065 return x;
2066 }
2067
2068 return m32c_legitimize_address (x, oldx, mode);
2069}
2070
2071/* Determine if one named address space is a subset of another. */
2072#undef TARGET_ADDR_SPACE_SUBSET_P
2073#define TARGET_ADDR_SPACE_SUBSET_P m32c_addr_space_subset_p
2074static bool
2075m32c_addr_space_subset_p (addr_space_t subset, addr_space_t superset)
2076{
2077 gcc_assert (subset == ADDR_SPACE_GENERIC || subset == ADDR_SPACE_FAR);
2078 gcc_assert (superset == ADDR_SPACE_GENERIC || superset == ADDR_SPACE_FAR);
2079
2080 if (subset == superset)
2081 return true;
2082
2083 else
2084 return (subset == ADDR_SPACE_GENERIC && superset == ADDR_SPACE_FAR);
2085}
2086
2087#undef TARGET_ADDR_SPACE_CONVERT
2088#define TARGET_ADDR_SPACE_CONVERT m32c_addr_space_convert
2089/* Convert from one address space to another. */
2090static rtx
2091m32c_addr_space_convert (rtx op, tree from_type, tree to_type)
2092{
2093 addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (from_type));
2094 addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (to_type));
2095 rtx result;
2096
2097 gcc_assert (from_as == ADDR_SPACE_GENERIC || from_as == ADDR_SPACE_FAR);
2098 gcc_assert (to_as == ADDR_SPACE_GENERIC || to_as == ADDR_SPACE_FAR);
2099
2100 if (to_as == ADDR_SPACE_GENERIC && from_as == ADDR_SPACE_FAR)
2101 {
2102 /* This is unpredictable, as we're truncating off usable address
2103 bits. */
2104
2105 result = gen_reg_rtx (HImode);
2106 emit_move_insn (result, simplify_subreg (HImode, op, SImode, 0));
2107 return result;
2108 }
2109 else if (to_as == ADDR_SPACE_FAR && from_as == ADDR_SPACE_GENERIC)
2110 {
2111 /* This always works. */
2112 result = gen_reg_rtx (SImode);
2113 emit_insn (gen_zero_extendhisi2 (result, op));
2114 return result;
2115 }
2116 else
2117 gcc_unreachable ();
2118}
2119
38b2d076
DD
2120/* Condition Code Status */
2121
2122#undef TARGET_FIXED_CONDITION_CODE_REGS
2123#define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2124static bool
2125m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2126{
2127 *p1 = FLG_REGNO;
2128 *p2 = INVALID_REGNUM;
2129 return true;
2130}
2131
2132/* Describing Relative Costs of Operations */
2133
0e607518 2134/* Implements TARGET_REGISTER_MOVE_COST. We make impossible moves
38b2d076
DD
2135 prohibitively expensive, like trying to put QIs in r2/r3 (there are
2136 no opcodes to do that). We also discourage use of mem* registers
2137 since they're really memory. */
0e607518
AS
2138
2139#undef TARGET_REGISTER_MOVE_COST
2140#define TARGET_REGISTER_MOVE_COST m32c_register_move_cost
2141
2142static int
ef4bddc2 2143m32c_register_move_cost (machine_mode mode, reg_class_t from,
0e607518 2144 reg_class_t to)
38b2d076
DD
2145{
2146 int cost = COSTS_N_INSNS (3);
0e607518
AS
2147 HARD_REG_SET cc;
2148
2149/* FIXME: pick real values, but not 2 for now. */
2150 COPY_HARD_REG_SET (cc, reg_class_contents[(int) from]);
2151 IOR_HARD_REG_SET (cc, reg_class_contents[(int) to]);
2152
2153 if (mode == QImode
2154 && hard_reg_set_intersect_p (cc, reg_class_contents[R23_REGS]))
38b2d076 2155 {
0e607518 2156 if (hard_reg_set_subset_p (cc, reg_class_contents[R23_REGS]))
38b2d076
DD
2157 cost = COSTS_N_INSNS (1000);
2158 else
2159 cost = COSTS_N_INSNS (80);
2160 }
2161
2162 if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2163 cost = COSTS_N_INSNS (1000);
2164
0e607518 2165 if (reg_classes_intersect_p (from, CR_REGS))
38b2d076
DD
2166 cost += COSTS_N_INSNS (5);
2167
0e607518 2168 if (reg_classes_intersect_p (to, CR_REGS))
38b2d076
DD
2169 cost += COSTS_N_INSNS (5);
2170
2171 if (from == MEM_REGS || to == MEM_REGS)
2172 cost += COSTS_N_INSNS (50);
0e607518
AS
2173 else if (reg_classes_intersect_p (from, MEM_REGS)
2174 || reg_classes_intersect_p (to, MEM_REGS))
38b2d076
DD
2175 cost += COSTS_N_INSNS (10);
2176
2177#if DEBUG0
2178 fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
0e607518
AS
2179 mode_name[mode], class_names[(int) from], class_names[(int) to],
2180 cost);
38b2d076
DD
2181#endif
2182 return cost;
2183}
2184
0e607518
AS
2185/* Implements TARGET_MEMORY_MOVE_COST. */
2186
2187#undef TARGET_MEMORY_MOVE_COST
2188#define TARGET_MEMORY_MOVE_COST m32c_memory_move_cost
2189
2190static int
ef4bddc2 2191m32c_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
0e607518
AS
2192 reg_class_t rclass ATTRIBUTE_UNUSED,
2193 bool in ATTRIBUTE_UNUSED)
38b2d076
DD
2194{
2195 /* FIXME: pick real values. */
2196 return COSTS_N_INSNS (10);
2197}
2198
07127a0a
DD
2199/* Here we try to describe when we use multiple opcodes for one RTX so
2200 that gcc knows when to use them. */
2201#undef TARGET_RTX_COSTS
2202#define TARGET_RTX_COSTS m32c_rtx_costs
2203static bool
e548c9df
AM
2204m32c_rtx_costs (rtx x, machine_mode mode, int outer_code,
2205 int opno ATTRIBUTE_UNUSED,
68f932c4 2206 int *total, bool speed ATTRIBUTE_UNUSED)
07127a0a 2207{
e548c9df 2208 int code = GET_CODE (x);
07127a0a
DD
2209 switch (code)
2210 {
2211 case REG:
2212 if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2213 *total += COSTS_N_INSNS (500);
2214 else
2215 *total += COSTS_N_INSNS (1);
2216 return true;
2217
2218 case ASHIFT:
2219 case LSHIFTRT:
2220 case ASHIFTRT:
2221 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2222 {
2223 /* mov.b r1l, r1h */
2224 *total += COSTS_N_INSNS (1);
2225 return true;
2226 }
2227 if (INTVAL (XEXP (x, 1)) > 8
2228 || INTVAL (XEXP (x, 1)) < -8)
2229 {
2230 /* mov.b #N, r1l */
2231 /* mov.b r1l, r1h */
2232 *total += COSTS_N_INSNS (2);
2233 return true;
2234 }
2235 return true;
2236
2237 case LE:
2238 case LEU:
2239 case LT:
2240 case LTU:
2241 case GT:
2242 case GTU:
2243 case GE:
2244 case GEU:
2245 case NE:
2246 case EQ:
2247 if (outer_code == SET)
2248 {
2249 *total += COSTS_N_INSNS (2);
2250 return true;
2251 }
2252 break;
2253
2254 case ZERO_EXTRACT:
2255 {
2256 rtx dest = XEXP (x, 0);
2257 rtx addr = XEXP (dest, 0);
2258 switch (GET_CODE (addr))
2259 {
2260 case CONST_INT:
2261 *total += COSTS_N_INSNS (1);
2262 break;
2263 case SYMBOL_REF:
2264 *total += COSTS_N_INSNS (3);
2265 break;
2266 default:
2267 *total += COSTS_N_INSNS (2);
2268 break;
2269 }
2270 return true;
2271 }
2272 break;
2273
2274 default:
2275 /* Reasonable default. */
e548c9df 2276 if (TARGET_A16 && mode == SImode)
07127a0a
DD
2277 *total += COSTS_N_INSNS (2);
2278 break;
2279 }
2280 return false;
2281}
2282
2283#undef TARGET_ADDRESS_COST
2284#define TARGET_ADDRESS_COST m32c_address_cost
2285static int
ef4bddc2 2286m32c_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
b413068c
OE
2287 addr_space_t as ATTRIBUTE_UNUSED,
2288 bool speed ATTRIBUTE_UNUSED)
07127a0a 2289{
80b093df 2290 int i;
07127a0a
DD
2291 /* fprintf(stderr, "\naddress_cost\n");
2292 debug_rtx(addr);*/
2293 switch (GET_CODE (addr))
2294 {
2295 case CONST_INT:
80b093df
DD
2296 i = INTVAL (addr);
2297 if (i == 0)
2298 return COSTS_N_INSNS(1);
2299 if (0 < i && i <= 255)
2300 return COSTS_N_INSNS(2);
2301 if (0 < i && i <= 65535)
2302 return COSTS_N_INSNS(3);
2303 return COSTS_N_INSNS(4);
07127a0a 2304 case SYMBOL_REF:
80b093df 2305 return COSTS_N_INSNS(4);
07127a0a 2306 case REG:
80b093df
DD
2307 return COSTS_N_INSNS(1);
2308 case PLUS:
2309 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
2310 {
2311 i = INTVAL (XEXP (addr, 1));
2312 if (i == 0)
2313 return COSTS_N_INSNS(1);
2314 if (0 < i && i <= 255)
2315 return COSTS_N_INSNS(2);
2316 if (0 < i && i <= 65535)
2317 return COSTS_N_INSNS(3);
2318 }
2319 return COSTS_N_INSNS(4);
07127a0a
DD
2320 default:
2321 return 0;
2322 }
2323}
2324
38b2d076
DD
2325/* Defining the Output Assembler Language */
2326
38b2d076
DD
2327/* Output of Data */
2328
2329/* We may have 24 bit sizes, which is the native address size.
2330 Currently unused, but provided for completeness. */
2331#undef TARGET_ASM_INTEGER
2332#define TARGET_ASM_INTEGER m32c_asm_integer
2333static bool
2334m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2335{
2336 switch (size)
2337 {
2338 case 3:
2339 fprintf (asm_out_file, "\t.3byte\t");
2340 output_addr_const (asm_out_file, x);
2341 fputc ('\n', asm_out_file);
2342 return true;
e9555b13
DD
2343 case 4:
2344 if (GET_CODE (x) == SYMBOL_REF)
2345 {
2346 fprintf (asm_out_file, "\t.long\t");
2347 output_addr_const (asm_out_file, x);
2348 fputc ('\n', asm_out_file);
2349 return true;
2350 }
2351 break;
38b2d076
DD
2352 }
2353 return default_assemble_integer (x, size, aligned_p);
2354}
2355
2356/* Output of Assembler Instructions */
2357
a4174ebf 2358/* We use a lookup table because the addressing modes are non-orthogonal. */
38b2d076
DD
2359
2360static struct
2361{
2362 char code;
2363 char const *pattern;
2364 char const *format;
2365}
2366const conversions[] = {
2367 { 0, "r", "0" },
2368
2369 { 0, "mr", "z[1]" },
2370 { 0, "m+ri", "3[2]" },
2371 { 0, "m+rs", "3[2]" },
5fd5d713
DD
2372 { 0, "m+^Zrs", "5[4]" },
2373 { 0, "m+^Zri", "5[4]" },
2374 { 0, "m+^Z+ris", "7+6[5]" },
2375 { 0, "m+^Srs", "5[4]" },
2376 { 0, "m+^Sri", "5[4]" },
2377 { 0, "m+^S+ris", "7+6[5]" },
38b2d076
DD
2378 { 0, "m+r+si", "4+5[2]" },
2379 { 0, "ms", "1" },
2380 { 0, "mi", "1" },
2381 { 0, "m+si", "2+3" },
2382
2383 { 0, "mmr", "[z[2]]" },
2384 { 0, "mm+ri", "[4[3]]" },
2385 { 0, "mm+rs", "[4[3]]" },
2386 { 0, "mm+r+si", "[5+6[3]]" },
2387 { 0, "mms", "[[2]]" },
2388 { 0, "mmi", "[[2]]" },
2389 { 0, "mm+si", "[4[3]]" },
2390
2391 { 0, "i", "#0" },
2392 { 0, "s", "#0" },
2393 { 0, "+si", "#1+2" },
2394 { 0, "l", "#0" },
2395
2396 { 'l', "l", "0" },
2397 { 'd', "i", "0" },
2398 { 'd', "s", "0" },
2399 { 'd', "+si", "1+2" },
2400 { 'D', "i", "0" },
2401 { 'D', "s", "0" },
2402 { 'D', "+si", "1+2" },
2403 { 'x', "i", "#0" },
2404 { 'X', "i", "#0" },
2405 { 'm', "i", "#0" },
2406 { 'b', "i", "#0" },
07127a0a 2407 { 'B', "i", "0" },
38b2d076
DD
2408 { 'p', "i", "0" },
2409
2410 { 0, 0, 0 }
2411};
2412
2413/* This is in order according to the bitfield that pushm/popm use. */
2414static char const *pushm_regs[] = {
2415 "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2416};
2417
4645179e
AS
2418/* Implements TARGET_PRINT_OPERAND. */
2419
2420#undef TARGET_PRINT_OPERAND
2421#define TARGET_PRINT_OPERAND m32c_print_operand
2422
2423static void
38b2d076
DD
2424m32c_print_operand (FILE * file, rtx x, int code)
2425{
2426 int i, j, b;
2427 const char *comma;
2428 HOST_WIDE_INT ival;
2429 int unsigned_const = 0;
ff485e71 2430 int force_sign;
38b2d076
DD
2431
2432 /* Multiplies; constants are converted to sign-extended format but
2433 we need unsigned, so 'u' and 'U' tell us what size unsigned we
2434 need. */
2435 if (code == 'u')
2436 {
2437 unsigned_const = 2;
2438 code = 0;
2439 }
2440 if (code == 'U')
2441 {
2442 unsigned_const = 1;
2443 code = 0;
2444 }
2445 /* This one is only for debugging; you can put it in a pattern to
2446 force this error. */
2447 if (code == '!')
2448 {
2449 fprintf (stderr, "dj: unreviewed pattern:");
2450 if (current_output_insn)
2451 debug_rtx (current_output_insn);
2452 gcc_unreachable ();
2453 }
2454 /* PSImode operations are either .w or .l depending on the target. */
2455 if (code == '&')
2456 {
2457 if (TARGET_A16)
2458 fprintf (file, "w");
2459 else
2460 fprintf (file, "l");
2461 return;
2462 }
2463 /* Inverted conditionals. */
2464 if (code == 'C')
2465 {
2466 switch (GET_CODE (x))
2467 {
2468 case LE:
2469 fputs ("gt", file);
2470 break;
2471 case LEU:
2472 fputs ("gtu", file);
2473 break;
2474 case LT:
2475 fputs ("ge", file);
2476 break;
2477 case LTU:
2478 fputs ("geu", file);
2479 break;
2480 case GT:
2481 fputs ("le", file);
2482 break;
2483 case GTU:
2484 fputs ("leu", file);
2485 break;
2486 case GE:
2487 fputs ("lt", file);
2488 break;
2489 case GEU:
2490 fputs ("ltu", file);
2491 break;
2492 case NE:
2493 fputs ("eq", file);
2494 break;
2495 case EQ:
2496 fputs ("ne", file);
2497 break;
2498 default:
2499 gcc_unreachable ();
2500 }
2501 return;
2502 }
2503 /* Regular conditionals. */
2504 if (code == 'c')
2505 {
2506 switch (GET_CODE (x))
2507 {
2508 case LE:
2509 fputs ("le", file);
2510 break;
2511 case LEU:
2512 fputs ("leu", file);
2513 break;
2514 case LT:
2515 fputs ("lt", file);
2516 break;
2517 case LTU:
2518 fputs ("ltu", file);
2519 break;
2520 case GT:
2521 fputs ("gt", file);
2522 break;
2523 case GTU:
2524 fputs ("gtu", file);
2525 break;
2526 case GE:
2527 fputs ("ge", file);
2528 break;
2529 case GEU:
2530 fputs ("geu", file);
2531 break;
2532 case NE:
2533 fputs ("ne", file);
2534 break;
2535 case EQ:
2536 fputs ("eq", file);
2537 break;
2538 default:
2539 gcc_unreachable ();
2540 }
2541 return;
2542 }
2543 /* Used in negsi2 to do HImode ops on the two parts of an SImode
2544 operand. */
2545 if (code == 'h' && GET_MODE (x) == SImode)
2546 {
2547 x = m32c_subreg (HImode, x, SImode, 0);
2548 code = 0;
2549 }
2550 if (code == 'H' && GET_MODE (x) == SImode)
2551 {
2552 x = m32c_subreg (HImode, x, SImode, 2);
2553 code = 0;
2554 }
07127a0a
DD
2555 if (code == 'h' && GET_MODE (x) == HImode)
2556 {
2557 x = m32c_subreg (QImode, x, HImode, 0);
2558 code = 0;
2559 }
2560 if (code == 'H' && GET_MODE (x) == HImode)
2561 {
2562 /* We can't actually represent this as an rtx. Do it here. */
2563 if (GET_CODE (x) == REG)
2564 {
2565 switch (REGNO (x))
2566 {
2567 case R0_REGNO:
2568 fputs ("r0h", file);
2569 return;
2570 case R1_REGNO:
2571 fputs ("r1h", file);
2572 return;
2573 default:
2574 gcc_unreachable();
2575 }
2576 }
2577 /* This should be a MEM. */
2578 x = m32c_subreg (QImode, x, HImode, 1);
2579 code = 0;
2580 }
2581 /* This is for BMcond, which always wants word register names. */
2582 if (code == 'h' && GET_MODE (x) == QImode)
2583 {
2584 if (GET_CODE (x) == REG)
2585 x = gen_rtx_REG (HImode, REGNO (x));
2586 code = 0;
2587 }
38b2d076
DD
2588 /* 'x' and 'X' need to be ignored for non-immediates. */
2589 if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2590 code = 0;
2591
2592 encode_pattern (x);
ff485e71 2593 force_sign = 0;
38b2d076
DD
2594 for (i = 0; conversions[i].pattern; i++)
2595 if (conversions[i].code == code
2596 && streq (conversions[i].pattern, pattern))
2597 {
2598 for (j = 0; conversions[i].format[j]; j++)
2599 /* backslash quotes the next character in the output pattern. */
2600 if (conversions[i].format[j] == '\\')
2601 {
2602 fputc (conversions[i].format[j + 1], file);
2603 j++;
2604 }
2605 /* Digits in the output pattern indicate that the
2606 corresponding RTX is to be output at that point. */
2607 else if (ISDIGIT (conversions[i].format[j]))
2608 {
2609 rtx r = patternr[conversions[i].format[j] - '0'];
2610 switch (GET_CODE (r))
2611 {
2612 case REG:
2613 fprintf (file, "%s",
2614 reg_name_with_mode (REGNO (r), GET_MODE (r)));
2615 break;
2616 case CONST_INT:
2617 switch (code)
2618 {
2619 case 'b':
07127a0a
DD
2620 case 'B':
2621 {
2622 int v = INTVAL (r);
2623 int i = (int) exact_log2 (v);
2624 if (i == -1)
2625 i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2626 if (i == -1)
2627 i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2628 /* Bit position. */
2629 fprintf (file, "%d", i);
2630 }
38b2d076
DD
2631 break;
2632 case 'x':
2633 /* Unsigned byte. */
2634 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2635 INTVAL (r) & 0xff);
2636 break;
2637 case 'X':
2638 /* Unsigned word. */
2639 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2640 INTVAL (r) & 0xffff);
2641 break;
2642 case 'p':
2643 /* pushm and popm encode a register set into a single byte. */
2644 comma = "";
2645 for (b = 7; b >= 0; b--)
2646 if (INTVAL (r) & (1 << b))
2647 {
2648 fprintf (file, "%s%s", comma, pushm_regs[b]);
2649 comma = ",";
2650 }
2651 break;
2652 case 'm':
2653 /* "Minus". Output -X */
2654 ival = (-INTVAL (r) & 0xffff);
2655 if (ival & 0x8000)
2656 ival = ival - 0x10000;
2657 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2658 break;
2659 default:
2660 ival = INTVAL (r);
2661 if (conversions[i].format[j + 1] == '[' && ival < 0)
2662 {
2663 /* We can simulate negative displacements by
2664 taking advantage of address space
2665 wrapping when the offset can span the
2666 entire address range. */
2667 rtx base =
2668 patternr[conversions[i].format[j + 2] - '0'];
2669 if (GET_CODE (base) == REG)
2670 switch (REGNO (base))
2671 {
2672 case A0_REGNO:
2673 case A1_REGNO:
2674 if (TARGET_A24)
2675 ival = 0x1000000 + ival;
2676 else
2677 ival = 0x10000 + ival;
2678 break;
2679 case SB_REGNO:
2680 if (TARGET_A16)
2681 ival = 0x10000 + ival;
2682 break;
2683 }
2684 }
2685 else if (code == 'd' && ival < 0 && j == 0)
2686 /* The "mova" opcode is used to do addition by
2687 computing displacements, but again, we need
2688 displacements to be unsigned *if* they're
2689 the only component of the displacement
2690 (i.e. no "symbol-4" type displacement). */
2691 ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2692
2693 if (conversions[i].format[j] == '0')
2694 {
2695 /* More conversions to unsigned. */
2696 if (unsigned_const == 2)
2697 ival &= 0xffff;
2698 if (unsigned_const == 1)
2699 ival &= 0xff;
2700 }
2701 if (streq (conversions[i].pattern, "mi")
2702 || streq (conversions[i].pattern, "mmi"))
2703 {
2704 /* Integers used as addresses are unsigned. */
2705 ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2706 }
ff485e71
DD
2707 if (force_sign && ival >= 0)
2708 fputc ('+', file);
38b2d076
DD
2709 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2710 break;
2711 }
2712 break;
2713 case CONST_DOUBLE:
2714 /* We don't have const_double constants. If it
2715 happens, make it obvious. */
2716 fprintf (file, "[const_double 0x%lx]",
2717 (unsigned long) CONST_DOUBLE_HIGH (r));
2718 break;
2719 case SYMBOL_REF:
2720 assemble_name (file, XSTR (r, 0));
2721 break;
2722 case LABEL_REF:
2723 output_asm_label (r);
2724 break;
2725 default:
2726 fprintf (stderr, "don't know how to print this operand:");
2727 debug_rtx (r);
2728 gcc_unreachable ();
2729 }
2730 }
2731 else
2732 {
2733 if (conversions[i].format[j] == 'z')
2734 {
2735 /* Some addressing modes *must* have a displacement,
2736 so insert a zero here if needed. */
2737 int k;
2738 for (k = j + 1; conversions[i].format[k]; k++)
2739 if (ISDIGIT (conversions[i].format[k]))
2740 {
2741 rtx reg = patternr[conversions[i].format[k] - '0'];
2742 if (GET_CODE (reg) == REG
2743 && (REGNO (reg) == SB_REGNO
2744 || REGNO (reg) == FB_REGNO
2745 || REGNO (reg) == SP_REGNO))
2746 fputc ('0', file);
2747 }
2748 continue;
2749 }
2750 /* Signed displacements off symbols need to have signs
2751 blended cleanly. */
2752 if (conversions[i].format[j] == '+'
ff485e71 2753 && (!code || code == 'D' || code == 'd')
38b2d076 2754 && ISDIGIT (conversions[i].format[j + 1])
ff485e71
DD
2755 && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2756 == CONST_INT))
2757 {
2758 force_sign = 1;
2759 continue;
2760 }
38b2d076
DD
2761 fputc (conversions[i].format[j], file);
2762 }
2763 break;
2764 }
2765 if (!conversions[i].pattern)
2766 {
2767 fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
2768 pattern);
2769 debug_rtx (x);
2770 fprintf (file, "[%c.%s]", code ? code : '-', pattern);
2771 }
2772
2773 return;
2774}
2775
4645179e
AS
2776/* Implements TARGET_PRINT_OPERAND_PUNCT_VALID_P.
2777
2778 See m32c_print_operand above for descriptions of what these do. */
2779
2780#undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
2781#define TARGET_PRINT_OPERAND_PUNCT_VALID_P m32c_print_operand_punct_valid_p
2782
2783static bool
2784m32c_print_operand_punct_valid_p (unsigned char c)
38b2d076
DD
2785{
2786 if (c == '&' || c == '!')
4645179e
AS
2787 return true;
2788
2789 return false;
38b2d076
DD
2790}
2791
4645179e
AS
2792/* Implements TARGET_PRINT_OPERAND_ADDRESS. Nothing unusual here. */
2793
2794#undef TARGET_PRINT_OPERAND_ADDRESS
2795#define TARGET_PRINT_OPERAND_ADDRESS m32c_print_operand_address
2796
2797static void
cc8ca59e 2798m32c_print_operand_address (FILE * stream, machine_mode /*mode*/, rtx address)
38b2d076 2799{
235e1fe8
NC
2800 if (GET_CODE (address) == MEM)
2801 address = XEXP (address, 0);
2802 else
2803 /* cf: gcc.dg/asm-4.c. */
2804 gcc_assert (GET_CODE (address) == REG);
2805
2806 m32c_print_operand (stream, address, 0);
38b2d076
DD
2807}
2808
2809/* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
2810 differently than general registers. */
2811void
2812m32c_output_reg_push (FILE * s, int regno)
2813{
2814 if (regno == FLG_REGNO)
2815 fprintf (s, "\tpushc\tflg\n");
2816 else
04aff2c0 2817 fprintf (s, "\tpush.%c\t%s\n",
38b2d076
DD
2818 " bwll"[reg_push_size (regno)], reg_names[regno]);
2819}
2820
2821/* Likewise for ASM_OUTPUT_REG_POP. */
2822void
2823m32c_output_reg_pop (FILE * s, int regno)
2824{
2825 if (regno == FLG_REGNO)
2826 fprintf (s, "\tpopc\tflg\n");
2827 else
04aff2c0 2828 fprintf (s, "\tpop.%c\t%s\n",
38b2d076
DD
2829 " bwll"[reg_push_size (regno)], reg_names[regno]);
2830}
2831
2832/* Defining target-specific uses of `__attribute__' */
2833
2834/* Used to simplify the logic below. Find the attributes wherever
2835 they may be. */
2836#define M32C_ATTRIBUTES(decl) \
2837 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2838 : DECL_ATTRIBUTES (decl) \
2839 ? (DECL_ATTRIBUTES (decl)) \
2840 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2841
2842/* Returns TRUE if the given tree has the "interrupt" attribute. */
2843static int
2844interrupt_p (tree node ATTRIBUTE_UNUSED)
2845{
2846 tree list = M32C_ATTRIBUTES (node);
2847 while (list)
2848 {
2849 if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
2850 return 1;
2851 list = TREE_CHAIN (list);
2852 }
65655f79
DD
2853 return fast_interrupt_p (node);
2854}
2855
2856/* Returns TRUE if the given tree has the "bank_switch" attribute. */
2857static int
2858bank_switch_p (tree node ATTRIBUTE_UNUSED)
2859{
2860 tree list = M32C_ATTRIBUTES (node);
2861 while (list)
2862 {
2863 if (is_attribute_p ("bank_switch", TREE_PURPOSE (list)))
2864 return 1;
2865 list = TREE_CHAIN (list);
2866 }
2867 return 0;
2868}
2869
2870/* Returns TRUE if the given tree has the "fast_interrupt" attribute. */
2871static int
2872fast_interrupt_p (tree node ATTRIBUTE_UNUSED)
2873{
2874 tree list = M32C_ATTRIBUTES (node);
2875 while (list)
2876 {
2877 if (is_attribute_p ("fast_interrupt", TREE_PURPOSE (list)))
2878 return 1;
2879 list = TREE_CHAIN (list);
2880 }
38b2d076
DD
2881 return 0;
2882}
2883
2884static tree
2885interrupt_handler (tree * node ATTRIBUTE_UNUSED,
2886 tree name ATTRIBUTE_UNUSED,
2887 tree args ATTRIBUTE_UNUSED,
2888 int flags ATTRIBUTE_UNUSED,
2889 bool * no_add_attrs ATTRIBUTE_UNUSED)
2890{
2891 return NULL_TREE;
2892}
2893
5abd2125
JS
2894/* Returns TRUE if given tree has the "function_vector" attribute. */
2895int
2896m32c_special_page_vector_p (tree func)
2897{
653e2568
DD
2898 tree list;
2899
5abd2125
JS
2900 if (TREE_CODE (func) != FUNCTION_DECL)
2901 return 0;
2902
653e2568 2903 list = M32C_ATTRIBUTES (func);
5abd2125
JS
2904 while (list)
2905 {
2906 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2907 return 1;
2908 list = TREE_CHAIN (list);
2909 }
2910 return 0;
2911}
2912
2913static tree
2914function_vector_handler (tree * node ATTRIBUTE_UNUSED,
2915 tree name ATTRIBUTE_UNUSED,
2916 tree args ATTRIBUTE_UNUSED,
2917 int flags ATTRIBUTE_UNUSED,
2918 bool * no_add_attrs ATTRIBUTE_UNUSED)
2919{
2920 if (TARGET_R8C)
2921 {
2922 /* The attribute is not supported for R8C target. */
2923 warning (OPT_Wattributes,
29d08eba
JM
2924 "%qE attribute is not supported for R8C target",
2925 name);
5abd2125
JS
2926 *no_add_attrs = true;
2927 }
2928 else if (TREE_CODE (*node) != FUNCTION_DECL)
2929 {
2930 /* The attribute must be applied to functions only. */
2931 warning (OPT_Wattributes,
29d08eba
JM
2932 "%qE attribute applies only to functions",
2933 name);
5abd2125
JS
2934 *no_add_attrs = true;
2935 }
2936 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
2937 {
2938 /* The argument must be a constant integer. */
2939 warning (OPT_Wattributes,
29d08eba
JM
2940 "%qE attribute argument not an integer constant",
2941 name);
5abd2125
JS
2942 *no_add_attrs = true;
2943 }
2944 else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
2945 || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
2946 {
2947 /* The argument value must be between 18 to 255. */
2948 warning (OPT_Wattributes,
29d08eba
JM
2949 "%qE attribute argument should be between 18 to 255",
2950 name);
5abd2125
JS
2951 *no_add_attrs = true;
2952 }
2953 return NULL_TREE;
2954}
2955
2956/* If the function is assigned the attribute 'function_vector', it
2957 returns the function vector number, otherwise returns zero. */
2958int
2959current_function_special_page_vector (rtx x)
2960{
2961 int num;
2962
2963 if ((GET_CODE(x) == SYMBOL_REF)
2964 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
2965 {
653e2568 2966 tree list;
5abd2125
JS
2967 tree t = SYMBOL_REF_DECL (x);
2968
2969 if (TREE_CODE (t) != FUNCTION_DECL)
2970 return 0;
2971
653e2568 2972 list = M32C_ATTRIBUTES (t);
5abd2125
JS
2973 while (list)
2974 {
2975 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2976 {
2977 num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
2978 return num;
2979 }
2980
2981 list = TREE_CHAIN (list);
2982 }
2983
2984 return 0;
2985 }
2986 else
2987 return 0;
2988}
2989
38b2d076
DD
2990#undef TARGET_ATTRIBUTE_TABLE
2991#define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
2992static const struct attribute_spec m32c_attribute_table[] = {
62d784f7
KT
2993 {"interrupt", 0, 0, false, false, false, interrupt_handler, false},
2994 {"bank_switch", 0, 0, false, false, false, interrupt_handler, false},
2995 {"fast_interrupt", 0, 0, false, false, false, interrupt_handler, false},
2996 {"function_vector", 1, 1, true, false, false, function_vector_handler,
2997 false},
2998 {0, 0, 0, 0, 0, 0, 0, false}
38b2d076
DD
2999};
3000
3001#undef TARGET_COMP_TYPE_ATTRIBUTES
3002#define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
3003static int
3101faab
KG
3004m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED,
3005 const_tree type2 ATTRIBUTE_UNUSED)
38b2d076
DD
3006{
3007 /* 0=incompatible 1=compatible 2=warning */
3008 return 1;
3009}
3010
3011#undef TARGET_INSERT_ATTRIBUTES
3012#define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
3013static void
3014m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
3015 tree * attr_ptr ATTRIBUTE_UNUSED)
3016{
f6052f86
DD
3017 unsigned addr;
3018 /* See if we need to make #pragma address variables volatile. */
3019
3020 if (TREE_CODE (node) == VAR_DECL)
3021 {
444d6efe 3022 const char *name = IDENTIFIER_POINTER (DECL_NAME (node));
f6052f86
DD
3023 if (m32c_get_pragma_address (name, &addr))
3024 {
3025 TREE_THIS_VOLATILE (node) = true;
3026 }
3027 }
3028}
3029
f6052f86 3030/* Hash table of pragma info. */
fb5c464a 3031static GTY(()) hash_map<nofree_string_hash, unsigned> *pragma_htab;
f6052f86
DD
3032
3033void
3034m32c_note_pragma_address (const char *varname, unsigned address)
3035{
f6052f86 3036 if (!pragma_htab)
fb5c464a 3037 pragma_htab = hash_map<nofree_string_hash, unsigned>::create_ggc (31);
f6052f86 3038
2a22f99c
TS
3039 const char *name = ggc_strdup (varname);
3040 unsigned int *slot = &pragma_htab->get_or_insert (name);
3041 *slot = address;
f6052f86
DD
3042}
3043
3044static bool
3045m32c_get_pragma_address (const char *varname, unsigned *address)
3046{
f6052f86
DD
3047 if (!pragma_htab)
3048 return false;
3049
2a22f99c
TS
3050 unsigned int *slot = pragma_htab->get (varname);
3051 if (slot)
f6052f86 3052 {
2a22f99c 3053 *address = *slot;
f6052f86
DD
3054 return true;
3055 }
3056 return false;
3057}
3058
3059void
444d6efe
JR
3060m32c_output_aligned_common (FILE *stream, tree decl ATTRIBUTE_UNUSED,
3061 const char *name,
f6052f86
DD
3062 int size, int align, int global)
3063{
3064 unsigned address;
3065
3066 if (m32c_get_pragma_address (name, &address))
3067 {
3068 /* We never output these as global. */
3069 assemble_name (stream, name);
3070 fprintf (stream, " = 0x%04x\n", address);
3071 return;
3072 }
3073 if (!global)
3074 {
3075 fprintf (stream, "\t.local\t");
3076 assemble_name (stream, name);
3077 fprintf (stream, "\n");
3078 }
3079 fprintf (stream, "\t.comm\t");
3080 assemble_name (stream, name);
3081 fprintf (stream, ",%u,%u\n", size, align / BITS_PER_UNIT);
38b2d076
DD
3082}
3083
3084/* Predicates */
3085
f9b89438 3086/* This is a list of legal subregs of hard regs. */
67fc44cb
DD
3087static const struct {
3088 unsigned char outer_mode_size;
3089 unsigned char inner_mode_size;
3090 unsigned char byte_mask;
3091 unsigned char legal_when;
f9b89438 3092 unsigned int regno;
f9b89438 3093} legal_subregs[] = {
67fc44cb
DD
3094 {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
3095 {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
3096 {1, 2, 0x01, 1, A0_REGNO},
3097 {1, 2, 0x01, 1, A1_REGNO},
f9b89438 3098
67fc44cb
DD
3099 {1, 4, 0x01, 1, A0_REGNO},
3100 {1, 4, 0x01, 1, A1_REGNO},
f9b89438 3101
67fc44cb
DD
3102 {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
3103 {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
3104 {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
3105 {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
3106 {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
f9b89438 3107
67fc44cb 3108 {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
f9b89438
DD
3109};
3110
3111/* Returns TRUE if OP is a subreg of a hard reg which we don't
f6052f86 3112 support. We also bail on MEMs with illegal addresses. */
f9b89438
DD
3113bool
3114m32c_illegal_subreg_p (rtx op)
3115{
f9b89438
DD
3116 int offset;
3117 unsigned int i;
ef4bddc2 3118 machine_mode src_mode, dest_mode;
f9b89438 3119
f6052f86
DD
3120 if (GET_CODE (op) == MEM
3121 && ! m32c_legitimate_address_p (Pmode, XEXP (op, 0), false))
3122 {
3123 return true;
3124 }
3125
f9b89438
DD
3126 if (GET_CODE (op) != SUBREG)
3127 return false;
3128
3129 dest_mode = GET_MODE (op);
3130 offset = SUBREG_BYTE (op);
3131 op = SUBREG_REG (op);
3132 src_mode = GET_MODE (op);
3133
3134 if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
3135 return false;
3136 if (GET_CODE (op) != REG)
3137 return false;
3138 if (REGNO (op) >= MEM0_REGNO)
3139 return false;
3140
3141 offset = (1 << offset);
3142
67fc44cb 3143 for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
f9b89438
DD
3144 if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
3145 && legal_subregs[i].regno == REGNO (op)
3146 && legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
3147 && legal_subregs[i].byte_mask & offset)
3148 {
3149 switch (legal_subregs[i].legal_when)
3150 {
3151 case 1:
3152 return false;
3153 case 16:
3154 if (TARGET_A16)
3155 return false;
3156 break;
3157 case 24:
3158 if (TARGET_A24)
3159 return false;
3160 break;
3161 }
3162 }
3163 return true;
3164}
3165
38b2d076
DD
3166/* Returns TRUE if we support a move between the first two operands.
3167 At the moment, we just want to discourage mem to mem moves until
3168 after reload, because reload has a hard time with our limited
3169 number of address registers, and we can get into a situation where
3170 we need three of them when we only have two. */
3171bool
ef4bddc2 3172m32c_mov_ok (rtx * operands, machine_mode mode ATTRIBUTE_UNUSED)
38b2d076
DD
3173{
3174 rtx op0 = operands[0];
3175 rtx op1 = operands[1];
3176
3177 if (TARGET_A24)
3178 return true;
3179
3180#define DEBUG_MOV_OK 0
3181#if DEBUG_MOV_OK
3182 fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
3183 debug_rtx (op0);
3184 debug_rtx (op1);
3185#endif
3186
3187 if (GET_CODE (op0) == SUBREG)
3188 op0 = XEXP (op0, 0);
3189 if (GET_CODE (op1) == SUBREG)
3190 op1 = XEXP (op1, 0);
3191
3192 if (GET_CODE (op0) == MEM
3193 && GET_CODE (op1) == MEM
3194 && ! reload_completed)
3195 {
3196#if DEBUG_MOV_OK
3197 fprintf (stderr, " - no, mem to mem\n");
3198#endif
3199 return false;
3200 }
3201
3202#if DEBUG_MOV_OK
3203 fprintf (stderr, " - ok\n");
3204#endif
3205 return true;
3206}
3207
ff485e71
DD
3208/* Returns TRUE if two consecutive HImode mov instructions, generated
3209 for moving an immediate double data to a double data type variable
3210 location, can be combined into single SImode mov instruction. */
3211bool
55356334 3212m32c_immd_dbl_mov (rtx * operands ATTRIBUTE_UNUSED,
ef4bddc2 3213 machine_mode mode ATTRIBUTE_UNUSED)
ff485e71 3214{
55356334
RS
3215 /* ??? This relied on the now-defunct MEM_SCALAR and MEM_IN_STRUCT_P
3216 flags. */
ff485e71
DD
3217 return false;
3218}
3219
38b2d076
DD
3220/* Expanders */
3221
3222/* Subregs are non-orthogonal for us, because our registers are all
3223 different sizes. */
3224static rtx
ef4bddc2
RS
3225m32c_subreg (machine_mode outer,
3226 rtx x, machine_mode inner, int byte)
38b2d076
DD
3227{
3228 int r, nr = -1;
3229
3230 /* Converting MEMs to different types that are the same size, we
3231 just rewrite them. */
3232 if (GET_CODE (x) == SUBREG
3233 && SUBREG_BYTE (x) == 0
3234 && GET_CODE (SUBREG_REG (x)) == MEM
3235 && (GET_MODE_SIZE (GET_MODE (x))
3236 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
3237 {
3238 rtx oldx = x;
3239 x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
3240 MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
3241 }
3242
3243 /* Push/pop get done as smaller push/pops. */
3244 if (GET_CODE (x) == MEM
3245 && (GET_CODE (XEXP (x, 0)) == PRE_DEC
3246 || GET_CODE (XEXP (x, 0)) == POST_INC))
3247 return gen_rtx_MEM (outer, XEXP (x, 0));
3248 if (GET_CODE (x) == SUBREG
3249 && GET_CODE (XEXP (x, 0)) == MEM
3250 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
3251 || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
3252 return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
3253
3254 if (GET_CODE (x) != REG)
146456c1
DD
3255 {
3256 rtx r = simplify_gen_subreg (outer, x, inner, byte);
3257 if (GET_CODE (r) == SUBREG
3258 && GET_CODE (x) == MEM
3259 && MEM_VOLATILE_P (x))
3260 {
3261 /* Volatile MEMs don't get simplified, but we need them to
3262 be. We are little endian, so the subreg byte is the
3263 offset. */
91140cd3 3264 r = adjust_address_nv (x, outer, byte);
146456c1
DD
3265 }
3266 return r;
3267 }
38b2d076
DD
3268
3269 r = REGNO (x);
3270 if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
3271 return simplify_gen_subreg (outer, x, inner, byte);
3272
3273 if (IS_MEM_REGNO (r))
3274 return simplify_gen_subreg (outer, x, inner, byte);
3275
3276 /* This is where the complexities of our register layout are
3277 described. */
3278 if (byte == 0)
3279 nr = r;
3280 else if (outer == HImode)
3281 {
3282 if (r == R0_REGNO && byte == 2)
3283 nr = R2_REGNO;
3284 else if (r == R0_REGNO && byte == 4)
3285 nr = R1_REGNO;
3286 else if (r == R0_REGNO && byte == 6)
3287 nr = R3_REGNO;
3288 else if (r == R1_REGNO && byte == 2)
3289 nr = R3_REGNO;
3290 else if (r == A0_REGNO && byte == 2)
3291 nr = A1_REGNO;
3292 }
3293 else if (outer == SImode)
3294 {
3295 if (r == R0_REGNO && byte == 0)
3296 nr = R0_REGNO;
3297 else if (r == R0_REGNO && byte == 4)
3298 nr = R1_REGNO;
3299 }
3300 if (nr == -1)
3301 {
3302 fprintf (stderr, "m32c_subreg %s %s %d\n",
3303 mode_name[outer], mode_name[inner], byte);
3304 debug_rtx (x);
3305 gcc_unreachable ();
3306 }
3307 return gen_rtx_REG (outer, nr);
3308}
3309
3310/* Used to emit move instructions. We split some moves,
3311 and avoid mem-mem moves. */
3312int
ef4bddc2 3313m32c_prepare_move (rtx * operands, machine_mode mode)
38b2d076 3314{
5fd5d713
DD
3315 if (far_addr_space_p (operands[0])
3316 && CONSTANT_P (operands[1]))
3317 {
3318 operands[1] = force_reg (GET_MODE (operands[0]), operands[1]);
3319 }
38b2d076
DD
3320 if (TARGET_A16 && mode == PSImode)
3321 return m32c_split_move (operands, mode, 1);
3322 if ((GET_CODE (operands[0]) == MEM)
3323 && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
3324 {
3325 rtx pmv = XEXP (operands[0], 0);
3326 rtx dest_reg = XEXP (pmv, 0);
3327 rtx dest_mod = XEXP (pmv, 1);
3328
f7df4a84 3329 emit_insn (gen_rtx_SET (dest_reg, dest_mod));
38b2d076
DD
3330 operands[0] = gen_rtx_MEM (mode, dest_reg);
3331 }
b3a13419 3332 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
38b2d076
DD
3333 operands[1] = copy_to_mode_reg (mode, operands[1]);
3334 return 0;
3335}
3336
3337#define DEBUG_SPLIT 0
3338
3339/* Returns TRUE if the given PSImode move should be split. We split
3340 for all r8c/m16c moves, since it doesn't support them, and for
3341 POP.L as we can only *push* SImode. */
3342int
3343m32c_split_psi_p (rtx * operands)
3344{
3345#if DEBUG_SPLIT
3346 fprintf (stderr, "\nm32c_split_psi_p\n");
3347 debug_rtx (operands[0]);
3348 debug_rtx (operands[1]);
3349#endif
3350 if (TARGET_A16)
3351 {
3352#if DEBUG_SPLIT
3353 fprintf (stderr, "yes, A16\n");
3354#endif
3355 return 1;
3356 }
3357 if (GET_CODE (operands[1]) == MEM
3358 && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3359 {
3360#if DEBUG_SPLIT
3361 fprintf (stderr, "yes, pop.l\n");
3362#endif
3363 return 1;
3364 }
3365#if DEBUG_SPLIT
3366 fprintf (stderr, "no, default\n");
3367#endif
3368 return 0;
3369}
3370
3371/* Split the given move. SPLIT_ALL is 0 if splitting is optional
3372 (define_expand), 1 if it is not optional (define_insn_and_split),
3373 and 3 for define_split (alternate api). */
3374int
ef4bddc2 3375m32c_split_move (rtx * operands, machine_mode mode, int split_all)
38b2d076
DD
3376{
3377 rtx s[4], d[4];
3378 int parts, si, di, rev = 0;
3379 int rv = 0, opi = 2;
ef4bddc2 3380 machine_mode submode = HImode;
38b2d076
DD
3381 rtx *ops, local_ops[10];
3382
3383 /* define_split modifies the existing operands, but the other two
3384 emit new insns. OPS is where we store the operand pairs, which
3385 we emit later. */
3386 if (split_all == 3)
3387 ops = operands;
3388 else
3389 ops = local_ops;
3390
3391 /* Else HImode. */
3392 if (mode == DImode)
3393 submode = SImode;
3394
3395 /* Before splitting mem-mem moves, force one operand into a
3396 register. */
b3a13419 3397 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
38b2d076
DD
3398 {
3399#if DEBUG0
3400 fprintf (stderr, "force_reg...\n");
3401 debug_rtx (operands[1]);
3402#endif
3403 operands[1] = force_reg (mode, operands[1]);
3404#if DEBUG0
3405 debug_rtx (operands[1]);
3406#endif
3407 }
3408
3409 parts = 2;
3410
3411#if DEBUG_SPLIT
b3a13419
ILT
3412 fprintf (stderr, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3413 split_all);
38b2d076
DD
3414 debug_rtx (operands[0]);
3415 debug_rtx (operands[1]);
3416#endif
3417
eb5f0c07
DD
3418 /* Note that split_all is not used to select the api after this
3419 point, so it's safe to set it to 3 even with define_insn. */
3420 /* None of the chips can move SI operands to sp-relative addresses,
3421 so we always split those. */
03dd17b1 3422 if (satisfies_constraint_Ss (operands[0]))
eb5f0c07
DD
3423 split_all = 3;
3424
5fd5d713
DD
3425 if (TARGET_A16
3426 && (far_addr_space_p (operands[0])
3427 || far_addr_space_p (operands[1])))
3428 split_all |= 1;
3429
38b2d076
DD
3430 /* We don't need to split these. */
3431 if (TARGET_A24
3432 && split_all != 3
3433 && (mode == SImode || mode == PSImode)
3434 && !(GET_CODE (operands[1]) == MEM
3435 && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3436 return 0;
3437
3438 /* First, enumerate the subregs we'll be dealing with. */
3439 for (si = 0; si < parts; si++)
3440 {
3441 d[si] =
3442 m32c_subreg (submode, operands[0], mode,
3443 si * GET_MODE_SIZE (submode));
3444 s[si] =
3445 m32c_subreg (submode, operands[1], mode,
3446 si * GET_MODE_SIZE (submode));
3447 }
3448
3449 /* Split pushes by emitting a sequence of smaller pushes. */
3450 if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3451 {
3452 for (si = parts - 1; si >= 0; si--)
3453 {
3454 ops[opi++] = gen_rtx_MEM (submode,
3455 gen_rtx_PRE_DEC (Pmode,
3456 gen_rtx_REG (Pmode,
3457 SP_REGNO)));
3458 ops[opi++] = s[si];
3459 }
3460
3461 rv = 1;
3462 }
3463 /* Likewise for pops. */
3464 else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3465 {
3466 for (di = 0; di < parts; di++)
3467 {
3468 ops[opi++] = d[di];
3469 ops[opi++] = gen_rtx_MEM (submode,
3470 gen_rtx_POST_INC (Pmode,
3471 gen_rtx_REG (Pmode,
3472 SP_REGNO)));
3473 }
3474 rv = 1;
3475 }
3476 else if (split_all)
3477 {
3478 /* if d[di] == s[si] for any di < si, we'll early clobber. */
3479 for (di = 0; di < parts - 1; di++)
3480 for (si = di + 1; si < parts; si++)
3481 if (reg_mentioned_p (d[di], s[si]))
3482 rev = 1;
3483
3484 if (rev)
3485 for (si = 0; si < parts; si++)
3486 {
3487 ops[opi++] = d[si];
3488 ops[opi++] = s[si];
3489 }
3490 else
3491 for (si = parts - 1; si >= 0; si--)
3492 {
3493 ops[opi++] = d[si];
3494 ops[opi++] = s[si];
3495 }
3496 rv = 1;
3497 }
3498 /* Now emit any moves we may have accumulated. */
3499 if (rv && split_all != 3)
3500 {
3501 int i;
3502 for (i = 2; i < opi; i += 2)
3503 emit_move_insn (ops[i], ops[i + 1]);
3504 }
3505 return rv;
3506}
3507
07127a0a
DD
3508/* The m32c has a number of opcodes that act like memcpy, strcmp, and
3509 the like. For the R8C they expect one of the addresses to be in
3510 R1L:An so we need to arrange for that. Otherwise, it's just a
3511 matter of picking out the operands we want and emitting the right
3512 pattern for them. All these expanders, which correspond to
3513 patterns in blkmov.md, must return nonzero if they expand the insn,
3514 or zero if they should FAIL. */
3515
3516/* This is a memset() opcode. All operands are implied, so we need to
3517 arrange for them to be in the right registers. The opcode wants
3518 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3519 the count (HI), and $2 the value (QI). */
3520int
3521m32c_expand_setmemhi(rtx *operands)
3522{
3523 rtx desta, count, val;
3524 rtx desto, counto;
3525
3526 desta = XEXP (operands[0], 0);
3527 count = operands[1];
3528 val = operands[2];
3529
3530 desto = gen_reg_rtx (Pmode);
3531 counto = gen_reg_rtx (HImode);
3532
3533 if (GET_CODE (desta) != REG
3534 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3535 desta = copy_to_mode_reg (Pmode, desta);
3536
3537 /* This looks like an arbitrary restriction, but this is by far the
3538 most common case. For counts 8..14 this actually results in
3539 smaller code with no speed penalty because the half-sized
3540 constant can be loaded with a shorter opcode. */
3541 if (GET_CODE (count) == CONST_INT
3542 && GET_CODE (val) == CONST_INT
3543 && ! (INTVAL (count) & 1)
3544 && (INTVAL (count) > 1)
3545 && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3546 {
3547 unsigned v = INTVAL (val) & 0xff;
3548 v = v | (v << 8);
3549 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3550 val = copy_to_mode_reg (HImode, GEN_INT (v));
3551 if (TARGET_A16)
3552 emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3553 else
3554 emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3555 return 1;
3556 }
3557
3558 /* This is the generalized memset() case. */
3559 if (GET_CODE (val) != REG
3560 || REGNO (val) < FIRST_PSEUDO_REGISTER)
3561 val = copy_to_mode_reg (QImode, val);
3562
3563 if (GET_CODE (count) != REG
3564 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3565 count = copy_to_mode_reg (HImode, count);
3566
3567 if (TARGET_A16)
3568 emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3569 else
3570 emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3571
3572 return 1;
3573}
3574
3575/* This is a memcpy() opcode. All operands are implied, so we need to
3576 arrange for them to be in the right registers. The opcode wants
3577 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3578 is the source (MEM:BLK), and $2 the count (HI). */
3579int
3580m32c_expand_movmemhi(rtx *operands)
3581{
3582 rtx desta, srca, count;
3583 rtx desto, srco, counto;
3584
3585 desta = XEXP (operands[0], 0);
3586 srca = XEXP (operands[1], 0);
3587 count = operands[2];
3588
3589 desto = gen_reg_rtx (Pmode);
3590 srco = gen_reg_rtx (Pmode);
3591 counto = gen_reg_rtx (HImode);
3592
3593 if (GET_CODE (desta) != REG
3594 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3595 desta = copy_to_mode_reg (Pmode, desta);
3596
3597 if (GET_CODE (srca) != REG
3598 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3599 srca = copy_to_mode_reg (Pmode, srca);
3600
3601 /* Similar to setmem, but we don't need to check the value. */
3602 if (GET_CODE (count) == CONST_INT
3603 && ! (INTVAL (count) & 1)
3604 && (INTVAL (count) > 1))
3605 {
3606 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3607 if (TARGET_A16)
3608 emit_insn (gen_movmemhi_whi_op (desto, srco, counto, desta, srca, count));
3609 else
3610 emit_insn (gen_movmemhi_wpsi_op (desto, srco, counto, desta, srca, count));
3611 return 1;
3612 }
3613
3614 /* This is the generalized memset() case. */
3615 if (GET_CODE (count) != REG
3616 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3617 count = copy_to_mode_reg (HImode, count);
3618
3619 if (TARGET_A16)
3620 emit_insn (gen_movmemhi_bhi_op (desto, srco, counto, desta, srca, count));
3621 else
3622 emit_insn (gen_movmemhi_bpsi_op (desto, srco, counto, desta, srca, count));
3623
3624 return 1;
3625}
3626
3627/* This is a stpcpy() opcode. $0 is the destination (MEM:BLK) after
3628 the copy, which should point to the NUL at the end of the string,
3629 $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3630 Since our opcode leaves the destination pointing *after* the NUL,
3631 we must emit an adjustment. */
3632int
3633m32c_expand_movstr(rtx *operands)
3634{
3635 rtx desta, srca;
3636 rtx desto, srco;
3637
3638 desta = XEXP (operands[1], 0);
3639 srca = XEXP (operands[2], 0);
3640
3641 desto = gen_reg_rtx (Pmode);
3642 srco = gen_reg_rtx (Pmode);
3643
3644 if (GET_CODE (desta) != REG
3645 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3646 desta = copy_to_mode_reg (Pmode, desta);
3647
3648 if (GET_CODE (srca) != REG
3649 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3650 srca = copy_to_mode_reg (Pmode, srca);
3651
3652 emit_insn (gen_movstr_op (desto, srco, desta, srca));
3653 /* desto ends up being a1, which allows this type of add through MOVA. */
3654 emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
3655
3656 return 1;
3657}
3658
3659/* This is a strcmp() opcode. $0 is the destination (HI) which holds
3660 <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3661 $2 is the other (MEM:BLK). We must do the comparison, and then
3662 convert the flags to a signed integer result. */
3663int
3664m32c_expand_cmpstr(rtx *operands)
3665{
3666 rtx src1a, src2a;
3667
3668 src1a = XEXP (operands[1], 0);
3669 src2a = XEXP (operands[2], 0);
3670
3671 if (GET_CODE (src1a) != REG
3672 || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
3673 src1a = copy_to_mode_reg (Pmode, src1a);
3674
3675 if (GET_CODE (src2a) != REG
3676 || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
3677 src2a = copy_to_mode_reg (Pmode, src2a);
3678
3679 emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
3680 emit_insn (gen_cond_to_int (operands[0]));
3681
3682 return 1;
3683}
3684
3685
23fed240
DD
3686typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
3687
3688static shift_gen_func
3689shift_gen_func_for (int mode, int code)
3690{
3691#define GFF(m,c,f) if (mode == m && code == c) return f
3692 GFF(QImode, ASHIFT, gen_ashlqi3_i);
3693 GFF(QImode, ASHIFTRT, gen_ashrqi3_i);
3694 GFF(QImode, LSHIFTRT, gen_lshrqi3_i);
3695 GFF(HImode, ASHIFT, gen_ashlhi3_i);
3696 GFF(HImode, ASHIFTRT, gen_ashrhi3_i);
3697 GFF(HImode, LSHIFTRT, gen_lshrhi3_i);
3698 GFF(PSImode, ASHIFT, gen_ashlpsi3_i);
3699 GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
3700 GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
3701 GFF(SImode, ASHIFT, TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
3702 GFF(SImode, ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
3703 GFF(SImode, LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
3704#undef GFF
07127a0a 3705 gcc_unreachable ();
23fed240
DD
3706}
3707
38b2d076
DD
3708/* The m32c only has one shift, but it takes a signed count. GCC
3709 doesn't want this, so we fake it by negating any shift count when
07127a0a
DD
3710 we're pretending to shift the other way. Also, the shift count is
3711 limited to -8..8. It's slightly better to use two shifts for 9..15
3712 than to load the count into r1h, so we do that too. */
38b2d076 3713int
23fed240 3714m32c_prepare_shift (rtx * operands, int scale, int shift_code)
38b2d076 3715{
ef4bddc2 3716 machine_mode mode = GET_MODE (operands[0]);
23fed240 3717 shift_gen_func func = shift_gen_func_for (mode, shift_code);
38b2d076 3718 rtx temp;
23fed240
DD
3719
3720 if (GET_CODE (operands[2]) == CONST_INT)
38b2d076 3721 {
23fed240
DD
3722 int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
3723 int count = INTVAL (operands[2]) * scale;
3724
3725 while (count > maxc)
3726 {
3727 temp = gen_reg_rtx (mode);
3728 emit_insn (func (temp, operands[1], GEN_INT (maxc)));
3729 operands[1] = temp;
3730 count -= maxc;
3731 }
3732 while (count < -maxc)
3733 {
3734 temp = gen_reg_rtx (mode);
3735 emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
3736 operands[1] = temp;
3737 count += maxc;
3738 }
3739 emit_insn (func (operands[0], operands[1], GEN_INT (count)));
3740 return 1;
38b2d076 3741 }
2e160056
DD
3742
3743 temp = gen_reg_rtx (QImode);
38b2d076 3744 if (scale < 0)
2e160056
DD
3745 /* The pattern has a NEG that corresponds to this. */
3746 emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
3747 else if (TARGET_A16 && mode == SImode)
3748 /* We do this because the code below may modify this, we don't
3749 want to modify the origin of this value. */
3750 emit_move_insn (temp, operands[2]);
38b2d076 3751 else
2e160056 3752 /* We'll only use it for the shift, no point emitting a move. */
38b2d076 3753 temp = operands[2];
2e160056 3754
16659fcf 3755 if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
2e160056
DD
3756 {
3757 /* The m16c has a limit of -16..16 for SI shifts, even when the
3758 shift count is in a register. Since there are so many targets
3759 of these shifts, it's better to expand the RTL here than to
3760 call a helper function.
3761
3762 The resulting code looks something like this:
3763
3764 cmp.b r1h,-16
3765 jge.b 1f
3766 shl.l -16,dest
3767 add.b r1h,16
3768 1f: cmp.b r1h,16
3769 jle.b 1f
3770 shl.l 16,dest
3771 sub.b r1h,16
3772 1f: shl.l r1h,dest
3773
3774 We take advantage of the fact that "negative" shifts are
3775 undefined to skip one of the comparisons. */
3776
3777 rtx count;
e60365d3
TS
3778 rtx label, tempvar;
3779 rtx_insn *insn;
2e160056 3780
16659fcf
DD
3781 emit_move_insn (operands[0], operands[1]);
3782
2e160056
DD
3783 count = temp;
3784 label = gen_label_rtx ();
2e160056
DD
3785 LABEL_NUSES (label) ++;
3786
833bf445
DD
3787 tempvar = gen_reg_rtx (mode);
3788
2e160056
DD
3789 if (shift_code == ASHIFT)
3790 {
3791 /* This is a left shift. We only need check positive counts. */
3792 emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
3793 count, GEN_INT (16), label));
833bf445
DD
3794 emit_insn (func (tempvar, operands[0], GEN_INT (8)));
3795 emit_insn (func (operands[0], tempvar, GEN_INT (8)));
2e160056
DD
3796 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
3797 emit_label_after (label, insn);
3798 }
3799 else
3800 {
3801 /* This is a right shift. We only need check negative counts. */
3802 emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
3803 count, GEN_INT (-16), label));
833bf445
DD
3804 emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
3805 emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
2e160056
DD
3806 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
3807 emit_label_after (label, insn);
3808 }
16659fcf
DD
3809 operands[1] = operands[0];
3810 emit_insn (func (operands[0], operands[0], count));
3811 return 1;
2e160056
DD
3812 }
3813
38b2d076
DD
3814 operands[2] = temp;
3815 return 0;
3816}
3817
12ea2512
DD
3818/* The m32c has a limited range of operations that work on PSImode
3819 values; we have to expand to SI, do the math, and truncate back to
3820 PSI. Yes, this is expensive, but hopefully gcc will learn to avoid
3821 those cases. */
3822void
3823m32c_expand_neg_mulpsi3 (rtx * operands)
3824{
3825 /* operands: a = b * i */
3826 rtx temp1; /* b as SI */
07127a0a
DD
3827 rtx scale /* i as SI */;
3828 rtx temp2; /* a*b as SI */
12ea2512
DD
3829
3830 temp1 = gen_reg_rtx (SImode);
3831 temp2 = gen_reg_rtx (SImode);
07127a0a
DD
3832 if (GET_CODE (operands[2]) != CONST_INT)
3833 {
3834 scale = gen_reg_rtx (SImode);
3835 emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
3836 }
3837 else
3838 scale = copy_to_mode_reg (SImode, operands[2]);
12ea2512
DD
3839
3840 emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
07127a0a
DD
3841 temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
3842 emit_insn (gen_truncsipsi2 (operands[0], temp2));
12ea2512
DD
3843}
3844
38b2d076
DD
3845/* Pattern Output Functions */
3846
07127a0a
DD
3847int
3848m32c_expand_movcc (rtx *operands)
3849{
3850 rtx rel = operands[1];
0166ff05 3851
07127a0a
DD
3852 if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
3853 return 1;
3854 if (GET_CODE (operands[2]) != CONST_INT
3855 || GET_CODE (operands[3]) != CONST_INT)
3856 return 1;
07127a0a
DD
3857 if (GET_CODE (rel) == NE)
3858 {
3859 rtx tmp = operands[2];
3860 operands[2] = operands[3];
3861 operands[3] = tmp;
f90b7a5a 3862 rel = gen_rtx_EQ (GET_MODE (rel), XEXP (rel, 0), XEXP (rel, 1));
07127a0a 3863 }
0166ff05 3864
0166ff05
DD
3865 emit_move_insn (operands[0],
3866 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
f90b7a5a 3867 rel,
0166ff05
DD
3868 operands[2],
3869 operands[3]));
07127a0a
DD
3870 return 0;
3871}
3872
3873/* Used for the "insv" pattern. Return nonzero to fail, else done. */
3874int
3875m32c_expand_insv (rtx *operands)
3876{
3877 rtx op0, src0, p;
3878 int mask;
3879
3880 if (INTVAL (operands[1]) != 1)
3881 return 1;
3882
9cb96754
N
3883 /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
3884 if (GET_CODE (operands[3]) != CONST_INT)
3885 return 1;
3886 if (INTVAL (operands[3]) != 0
3887 && INTVAL (operands[3]) != 1
3888 && INTVAL (operands[3]) != -1)
3889 return 1;
3890
07127a0a
DD
3891 mask = 1 << INTVAL (operands[2]);
3892
3893 op0 = operands[0];
3894 if (GET_CODE (op0) == SUBREG
3895 && SUBREG_BYTE (op0) == 0)
3896 {
3897 rtx sub = SUBREG_REG (op0);
3898 if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
3899 op0 = sub;
3900 }
3901
b3a13419 3902 if (!can_create_pseudo_p ()
07127a0a
DD
3903 || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
3904 src0 = op0;
3905 else
3906 {
3907 src0 = gen_reg_rtx (GET_MODE (op0));
3908 emit_move_insn (src0, op0);
3909 }
3910
3911 if (GET_MODE (op0) == HImode
3912 && INTVAL (operands[2]) >= 8
444d6efe 3913 && GET_CODE (op0) == MEM)
07127a0a
DD
3914 {
3915 /* We are little endian. */
0a81f074
RS
3916 rtx new_mem = gen_rtx_MEM (QImode, plus_constant (Pmode,
3917 XEXP (op0, 0), 1));
07127a0a
DD
3918 MEM_COPY_ATTRIBUTES (new_mem, op0);
3919 mask >>= 8;
3920 }
3921
8e4edce7
DD
3922 /* First, we generate a mask with the correct polarity. If we are
3923 storing a zero, we want an AND mask, so invert it. */
3924 if (INTVAL (operands[3]) == 0)
07127a0a 3925 {
16659fcf 3926 /* Storing a zero, use an AND mask */
07127a0a
DD
3927 if (GET_MODE (op0) == HImode)
3928 mask ^= 0xffff;
3929 else
3930 mask ^= 0xff;
3931 }
8e4edce7
DD
3932 /* Now we need to properly sign-extend the mask in case we need to
3933 fall back to an AND or OR opcode. */
07127a0a
DD
3934 if (GET_MODE (op0) == HImode)
3935 {
3936 if (mask & 0x8000)
3937 mask -= 0x10000;
3938 }
3939 else
3940 {
3941 if (mask & 0x80)
3942 mask -= 0x100;
3943 }
3944
3945 switch ( (INTVAL (operands[3]) ? 4 : 0)
3946 + ((GET_MODE (op0) == HImode) ? 2 : 0)
3947 + (TARGET_A24 ? 1 : 0))
3948 {
3949 case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
3950 case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
3951 case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
3952 case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
3953 case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
3954 case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
3955 case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
3956 case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
653e2568 3957 default: p = NULL_RTX; break; /* Not reached, but silences a warning. */
07127a0a
DD
3958 }
3959
3960 emit_insn (p);
3961 return 0;
3962}
3963
3964const char *
3965m32c_scc_pattern(rtx *operands, RTX_CODE code)
3966{
3967 static char buf[30];
3968 if (GET_CODE (operands[0]) == REG
3969 && REGNO (operands[0]) == R0_REGNO)
3970 {
3971 if (code == EQ)
3972 return "stzx\t#1,#0,r0l";
3973 if (code == NE)
3974 return "stzx\t#0,#1,r0l";
3975 }
3976 sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
3977 return buf;
3978}
3979
5abd2125
JS
3980/* Encode symbol attributes of a SYMBOL_REF into its
3981 SYMBOL_REF_FLAGS. */
3982static void
3983m32c_encode_section_info (tree decl, rtx rtl, int first)
3984{
3985 int extra_flags = 0;
3986
3987 default_encode_section_info (decl, rtl, first);
3988 if (TREE_CODE (decl) == FUNCTION_DECL
3989 && m32c_special_page_vector_p (decl))
3990
3991 extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
3992
3993 if (extra_flags)
3994 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
3995}
3996
38b2d076
DD
3997/* Returns TRUE if the current function is a leaf, and thus we can
3998 determine which registers an interrupt function really needs to
3999 save. The logic below is mostly about finding the insn sequence
4000 that's the function, versus any sequence that might be open for the
4001 current insn. */
4002static int
4003m32c_leaf_function_p (void)
4004{
38b2d076
DD
4005 int rv;
4006
614d5bd8 4007 push_topmost_sequence ();
38b2d076 4008 rv = leaf_function_p ();
614d5bd8 4009 pop_topmost_sequence ();
38b2d076
DD
4010 return rv;
4011}
4012
4013/* Returns TRUE if the current function needs to use the ENTER/EXIT
4014 opcodes. If the function doesn't need the frame base or stack
4015 pointer, it can use the simpler RTS opcode. */
4016static bool
4017m32c_function_needs_enter (void)
4018{
b32d5189 4019 rtx_insn *insn;
38b2d076
DD
4020 rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
4021 rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
4022
614d5bd8
AM
4023 for (insn = get_topmost_sequence ()->first; insn; insn = NEXT_INSN (insn))
4024 if (NONDEBUG_INSN_P (insn))
4025 {
4026 if (reg_mentioned_p (sp, insn))
4027 return true;
4028 if (reg_mentioned_p (fb, insn))
4029 return true;
4030 }
38b2d076
DD
4031 return false;
4032}
4033
4034/* Mark all the subexpressions of the PARALLEL rtx PAR as
4035 frame-related. Return PAR.
4036
4037 dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
4038 PARALLEL rtx other than the first if they do not have the
4039 FRAME_RELATED flag set on them. So this function is handy for
4040 marking up 'enter' instructions. */
4041static rtx
4042m32c_all_frame_related (rtx par)
4043{
4044 int len = XVECLEN (par, 0);
4045 int i;
4046
4047 for (i = 0; i < len; i++)
4048 F (XVECEXP (par, 0, i));
4049
4050 return par;
4051}
4052
4053/* Emits the prologue. See the frame layout comment earlier in this
4054 file. We can reserve up to 256 bytes with the ENTER opcode, beyond
4055 that we manually update sp. */
4056void
4057m32c_emit_prologue (void)
4058{
4059 int frame_size, extra_frame_size = 0, reg_save_size;
4060 int complex_prologue = 0;
4061
4062 cfun->machine->is_leaf = m32c_leaf_function_p ();
4063 if (interrupt_p (cfun->decl))
4064 {
4065 cfun->machine->is_interrupt = 1;
4066 complex_prologue = 1;
4067 }
65655f79
DD
4068 else if (bank_switch_p (cfun->decl))
4069 warning (OPT_Wattributes,
4070 "%<bank_switch%> has no effect on non-interrupt functions");
38b2d076
DD
4071
4072 reg_save_size = m32c_pushm_popm (PP_justcount);
4073
4074 if (interrupt_p (cfun->decl))
65655f79
DD
4075 {
4076 if (bank_switch_p (cfun->decl))
4077 emit_insn (gen_fset_b ());
4078 else if (cfun->machine->intr_pushm)
4079 emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
4080 }
38b2d076
DD
4081
4082 frame_size =
4083 m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
4084 if (frame_size == 0
38b2d076
DD
4085 && !m32c_function_needs_enter ())
4086 cfun->machine->use_rts = 1;
4087
ed1332ee
NC
4088 if (flag_stack_usage_info)
4089 current_function_static_stack_size = frame_size;
4090
38b2d076
DD
4091 if (frame_size > 254)
4092 {
4093 extra_frame_size = frame_size - 254;
4094 frame_size = 254;
4095 }
4096 if (cfun->machine->use_rts == 0)
4097 F (emit_insn (m32c_all_frame_related
4098 (TARGET_A16
fa9fd28a
RIL
4099 ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
4100 : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
38b2d076
DD
4101
4102 if (extra_frame_size)
4103 {
4104 complex_prologue = 1;
4105 if (TARGET_A16)
4106 F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
4107 gen_rtx_REG (HImode, SP_REGNO),
4108 GEN_INT (-extra_frame_size))));
4109 else
4110 F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
4111 gen_rtx_REG (PSImode, SP_REGNO),
4112 GEN_INT (-extra_frame_size))));
4113 }
4114
4115 complex_prologue += m32c_pushm_popm (PP_pushm);
4116
4117 /* This just emits a comment into the .s file for debugging. */
4118 if (complex_prologue)
4119 emit_insn (gen_prologue_end ());
4120}
4121
4122/* Likewise, for the epilogue. The only exception is that, for
4123 interrupts, we must manually unwind the frame as the REIT opcode
4124 doesn't do that. */
4125void
4126m32c_emit_epilogue (void)
4127{
f0679612
DD
4128 int popm_count = m32c_pushm_popm (PP_justcount);
4129
38b2d076 4130 /* This just emits a comment into the .s file for debugging. */
f0679612 4131 if (popm_count > 0 || cfun->machine->is_interrupt)
38b2d076
DD
4132 emit_insn (gen_epilogue_start ());
4133
f0679612
DD
4134 if (popm_count > 0)
4135 m32c_pushm_popm (PP_popm);
38b2d076
DD
4136
4137 if (cfun->machine->is_interrupt)
4138 {
ef4bddc2 4139 machine_mode spmode = TARGET_A16 ? HImode : PSImode;
38b2d076 4140
65655f79
DD
4141 /* REIT clears B flag and restores $fp for us, but we still
4142 have to fix up the stack. USE_RTS just means we didn't
4143 emit ENTER. */
4144 if (!cfun->machine->use_rts)
4145 {
4146 emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
4147 gen_rtx_REG (spmode, FP_REGNO));
4148 emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
4149 gen_rtx_REG (spmode, A0_REGNO));
4150 /* We can't just add this to the POPM because it would be in
4151 the wrong order, and wouldn't fix the stack if we're bank
4152 switching. */
4153 if (TARGET_A16)
4154 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
4155 else
4156 emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
4157 }
4158 if (!bank_switch_p (cfun->decl) && cfun->machine->intr_pushm)
4159 emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
4160
402f2db8
DD
4161 /* The FREIT (Fast REturn from InTerrupt) instruction should be
4162 generated only for M32C/M32CM targets (generate the REIT
4163 instruction otherwise). */
65655f79 4164 if (fast_interrupt_p (cfun->decl))
402f2db8
DD
4165 {
4166 /* Check if fast_attribute is set for M32C or M32CM. */
4167 if (TARGET_A24)
4168 {
4169 emit_jump_insn (gen_epilogue_freit ());
4170 }
4171 /* If fast_interrupt attribute is set for an R8C or M16C
4172 target ignore this attribute and generated REIT
4173 instruction. */
4174 else
4175 {
4176 warning (OPT_Wattributes,
4177 "%<fast_interrupt%> attribute directive ignored");
4178 emit_jump_insn (gen_epilogue_reit_16 ());
4179 }
4180 }
65655f79 4181 else if (TARGET_A16)
0e0642aa
RIL
4182 emit_jump_insn (gen_epilogue_reit_16 ());
4183 else
4184 emit_jump_insn (gen_epilogue_reit_24 ());
38b2d076
DD
4185 }
4186 else if (cfun->machine->use_rts)
4187 emit_jump_insn (gen_epilogue_rts ());
0e0642aa
RIL
4188 else if (TARGET_A16)
4189 emit_jump_insn (gen_epilogue_exitd_16 ());
38b2d076 4190 else
0e0642aa 4191 emit_jump_insn (gen_epilogue_exitd_24 ());
38b2d076
DD
4192}
4193
4194void
4195m32c_emit_eh_epilogue (rtx ret_addr)
4196{
4197 /* R0[R2] has the stack adjustment. R1[R3] has the address to
4198 return to. We have to fudge the stack, pop everything, pop SP
4199 (fudged), and return (fudged). This is actually easier to do in
4200 assembler, so punt to libgcc. */
4201 emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
c41c1387 4202 /* emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
38b2d076
DD
4203}
4204
16659fcf
DD
4205/* Indicate which flags must be properly set for a given conditional. */
4206static int
4207flags_needed_for_conditional (rtx cond)
4208{
4209 switch (GET_CODE (cond))
4210 {
4211 case LE:
4212 case GT:
4213 return FLAGS_OSZ;
4214 case LEU:
4215 case GTU:
4216 return FLAGS_ZC;
4217 case LT:
4218 case GE:
4219 return FLAGS_OS;
4220 case LTU:
4221 case GEU:
4222 return FLAGS_C;
4223 case EQ:
4224 case NE:
4225 return FLAGS_Z;
4226 default:
4227 return FLAGS_N;
4228 }
4229}
4230
4231#define DEBUG_CMP 0
4232
4233/* Returns true if a compare insn is redundant because it would only
4234 set flags that are already set correctly. */
4235static bool
84034c69 4236m32c_compare_redundant (rtx_insn *cmp, rtx *operands)
16659fcf
DD
4237{
4238 int flags_needed;
4239 int pflags;
84034c69
DM
4240 rtx_insn *prev;
4241 rtx pp, next;
444d6efe 4242 rtx op0, op1;
16659fcf
DD
4243#if DEBUG_CMP
4244 int prev_icode, i;
4245#endif
4246
4247 op0 = operands[0];
4248 op1 = operands[1];
16659fcf
DD
4249
4250#if DEBUG_CMP
4251 fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
4252 debug_rtx(cmp);
4253 for (i=0; i<2; i++)
4254 {
4255 fprintf(stderr, "operands[%d] = ", i);
4256 debug_rtx(operands[i]);
4257 }
4258#endif
4259
4260 next = next_nonnote_insn (cmp);
4261 if (!next || !INSN_P (next))
4262 {
4263#if DEBUG_CMP
4264 fprintf(stderr, "compare not followed by insn\n");
4265 debug_rtx(next);
4266#endif
4267 return false;
4268 }
4269 if (GET_CODE (PATTERN (next)) == SET
4270 && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
4271 {
4272 next = XEXP (XEXP (PATTERN (next), 1), 0);
4273 }
4274 else if (GET_CODE (PATTERN (next)) == SET)
4275 {
4276 /* If this is a conditional, flags_needed will be something
4277 other than FLAGS_N, which we test below. */
4278 next = XEXP (PATTERN (next), 1);
4279 }
4280 else
4281 {
4282#if DEBUG_CMP
4283 fprintf(stderr, "compare not followed by conditional\n");
4284 debug_rtx(next);
4285#endif
4286 return false;
4287 }
4288#if DEBUG_CMP
4289 fprintf(stderr, "conditional is: ");
4290 debug_rtx(next);
4291#endif
4292
4293 flags_needed = flags_needed_for_conditional (next);
4294 if (flags_needed == FLAGS_N)
4295 {
4296#if DEBUG_CMP
4297 fprintf(stderr, "compare not followed by conditional\n");
4298 debug_rtx(next);
4299#endif
4300 return false;
4301 }
4302
4303 /* Compare doesn't set overflow and carry the same way that
4304 arithmetic instructions do, so we can't replace those. */
4305 if (flags_needed & FLAGS_OC)
4306 return false;
4307
4308 prev = cmp;
4309 do {
4310 prev = prev_nonnote_insn (prev);
4311 if (!prev)
4312 {
4313#if DEBUG_CMP
4314 fprintf(stderr, "No previous insn.\n");
4315#endif
4316 return false;
4317 }
4318 if (!INSN_P (prev))
4319 {
4320#if DEBUG_CMP
4321 fprintf(stderr, "Previous insn is a non-insn.\n");
4322#endif
4323 return false;
4324 }
4325 pp = PATTERN (prev);
4326 if (GET_CODE (pp) != SET)
4327 {
4328#if DEBUG_CMP
4329 fprintf(stderr, "Previous insn is not a SET.\n");
4330#endif
4331 return false;
4332 }
4333 pflags = get_attr_flags (prev);
4334
4335 /* Looking up attributes of previous insns corrupted the recog
4336 tables. */
4337 INSN_UID (cmp) = -1;
4338 recog (PATTERN (cmp), cmp, 0);
4339
4340 if (pflags == FLAGS_N
4341 && reg_mentioned_p (op0, pp))
4342 {
4343#if DEBUG_CMP
4344 fprintf(stderr, "intermediate non-flags insn uses op:\n");
4345 debug_rtx(prev);
4346#endif
4347 return false;
4348 }
b3c5a409
DD
4349
4350 /* Check for comparisons against memory - between volatiles and
4351 aliases, we just can't risk this one. */
4352 if (GET_CODE (operands[0]) == MEM
4353 || GET_CODE (operands[0]) == MEM)
4354 {
4355#if DEBUG_CMP
4356 fprintf(stderr, "comparisons with memory:\n");
4357 debug_rtx(prev);
4358#endif
4359 return false;
4360 }
4361
4362 /* Check for PREV changing a register that's used to compute a
4363 value in CMP, even if it doesn't otherwise change flags. */
4364 if (GET_CODE (operands[0]) == REG
4365 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[0]))
4366 {
4367#if DEBUG_CMP
4368 fprintf(stderr, "sub-value affected, op0:\n");
4369 debug_rtx(prev);
4370#endif
4371 return false;
4372 }
4373 if (GET_CODE (operands[1]) == REG
4374 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[1]))
4375 {
4376#if DEBUG_CMP
4377 fprintf(stderr, "sub-value affected, op1:\n");
4378 debug_rtx(prev);
4379#endif
4380 return false;
4381 }
4382
16659fcf
DD
4383 } while (pflags == FLAGS_N);
4384#if DEBUG_CMP
4385 fprintf(stderr, "previous flag-setting insn:\n");
4386 debug_rtx(prev);
4387 debug_rtx(pp);
4388#endif
4389
4390 if (GET_CODE (pp) == SET
4391 && GET_CODE (XEXP (pp, 0)) == REG
4392 && REGNO (XEXP (pp, 0)) == FLG_REGNO
4393 && GET_CODE (XEXP (pp, 1)) == COMPARE)
4394 {
4395 /* Adjacent cbranches must have the same operands to be
4396 redundant. */
4397 rtx pop0 = XEXP (XEXP (pp, 1), 0);
4398 rtx pop1 = XEXP (XEXP (pp, 1), 1);
4399#if DEBUG_CMP
4400 fprintf(stderr, "adjacent cbranches\n");
4401 debug_rtx(pop0);
4402 debug_rtx(pop1);
4403#endif
4404 if (rtx_equal_p (op0, pop0)
4405 && rtx_equal_p (op1, pop1))
4406 return true;
4407#if DEBUG_CMP
4408 fprintf(stderr, "prev cmp not same\n");
4409#endif
4410 return false;
4411 }
4412
4413 /* Else the previous insn must be a SET, with either the source or
4414 dest equal to operands[0], and operands[1] must be zero. */
4415
4416 if (!rtx_equal_p (op1, const0_rtx))
4417 {
4418#if DEBUG_CMP
4419 fprintf(stderr, "operands[1] not const0_rtx\n");
4420#endif
4421 return false;
4422 }
4423 if (GET_CODE (pp) != SET)
4424 {
4425#if DEBUG_CMP
4426 fprintf (stderr, "pp not set\n");
4427#endif
4428 return false;
4429 }
4430 if (!rtx_equal_p (op0, SET_SRC (pp))
4431 && !rtx_equal_p (op0, SET_DEST (pp)))
4432 {
4433#if DEBUG_CMP
4434 fprintf(stderr, "operands[0] not found in set\n");
4435#endif
4436 return false;
4437 }
4438
4439#if DEBUG_CMP
4440 fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4441#endif
4442 if ((pflags & flags_needed) == flags_needed)
4443 return true;
4444
4445 return false;
4446}
4447
4448/* Return the pattern for a compare. This will be commented out if
4449 the compare is redundant, else a normal pattern is returned. Thus,
4450 the assembler output says where the compare would have been. */
4451char *
84034c69 4452m32c_output_compare (rtx_insn *insn, rtx *operands)
16659fcf 4453{
0a2aaacc 4454 static char templ[] = ";cmp.b\t%1,%0";
16659fcf
DD
4455 /* ^ 5 */
4456
0a2aaacc 4457 templ[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
16659fcf
DD
4458 if (m32c_compare_redundant (insn, operands))
4459 {
4460#if DEBUG_CMP
4461 fprintf(stderr, "cbranch: cmp not needed\n");
4462#endif
0a2aaacc 4463 return templ;
16659fcf
DD
4464 }
4465
4466#if DEBUG_CMP
b3c5a409 4467 fprintf(stderr, "cbranch: cmp needed: `%s'\n", templ + 1);
16659fcf 4468#endif
0a2aaacc 4469 return templ + 1;
16659fcf
DD
4470}
4471
5abd2125
JS
4472#undef TARGET_ENCODE_SECTION_INFO
4473#define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4474
b52b1749
AS
4475/* If the frame pointer isn't used, we detect it manually. But the
4476 stack pointer doesn't have as flexible addressing as the frame
4477 pointer, so we always assume we have it. */
4478
4479#undef TARGET_FRAME_POINTER_REQUIRED
4480#define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
4481
38b2d076
DD
4482/* The Global `targetm' Variable. */
4483
4484struct gcc_target targetm = TARGET_INITIALIZER;
4485
4486#include "gt-m32c.h"