]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/mn10300/mn10300.c
re PR middle-end/46500 (target.h includes tm.h)
[thirdparty/gcc.git] / gcc / config / mn10300 / mn10300.c
CommitLineData
11bb1f11 1/* Subroutines for insn-output.c for Matsushita MN10300 series
6fb5fa3c 2 Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004,
298362c8 3 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
11bb1f11
JL
4 Contributed by Jeff Law (law@cygnus.com).
5
e7ab5593 6 This file is part of GCC.
11bb1f11 7
e7ab5593
NC
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
11bb1f11 12
e7ab5593
NC
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
11bb1f11 17
e7ab5593
NC
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
11bb1f11 21
11bb1f11 22#include "config.h"
c5c76735 23#include "system.h"
4977bab6
ZW
24#include "coretypes.h"
25#include "tm.h"
11bb1f11 26#include "rtl.h"
bf6bb899 27#include "tree.h"
11bb1f11
JL
28#include "regs.h"
29#include "hard-reg-set.h"
11bb1f11
JL
30#include "insn-config.h"
31#include "conditions.h"
11bb1f11
JL
32#include "output.h"
33#include "insn-attr.h"
34#include "flags.h"
35#include "recog.h"
6528281d 36#include "reload.h"
11bb1f11 37#include "expr.h"
e78d8e51 38#include "optabs.h"
bf6bb899 39#include "function.h"
11bb1f11 40#include "obstack.h"
718f9c0f 41#include "diagnostic-core.h"
69bc71fa 42#include "tm_p.h"
a45d420a 43#include "tm-constrs.h"
672a6f42
NB
44#include "target.h"
45#include "target-def.h"
4af476d7 46#include "df.h"
96e45421 47#include "opts.h"
662c03f4 48#include "cfgloop.h"
11bb1f11 49
2beef00e
AO
50/* This is used in the am33_2.0-linux-gnu port, in which global symbol
51 names are not prefixed by underscores, to tell whether to prefix a
52 label with a plus sign or not, so that the assembler can tell
53 symbol names from register names. */
54int mn10300_protect_label;
55
f3f63737
NC
56/* Selected processor type for tuning. */
57enum processor_type mn10300_tune_cpu = PROCESSOR_DEFAULT;
58
4d1a91c2
JL
59/* The size of the callee register save area. Right now we save everything
60 on entry since it costs us nothing in code size. It does cost us from a
61 speed standpoint, so we want to optimize this sooner or later. */
e7ab5593
NC
62#define REG_SAVE_BYTES (4 * df_regs_ever_live_p (2) \
63 + 4 * df_regs_ever_live_p (3) \
64 + 4 * df_regs_ever_live_p (6) \
65 + 4 * df_regs_ever_live_p (7) \
66 + 16 * (df_regs_ever_live_p (14) \
67 || df_regs_ever_live_p (15) \
68 || df_regs_ever_live_p (16) \
69 || df_regs_ever_live_p (17)))
990dc016 70
bad41521
RH
71#define CC_FLAG_Z 1
72#define CC_FLAG_N 2
73#define CC_FLAG_C 4
74#define CC_FLAG_V 8
75
76static int cc_flags_for_mode(enum machine_mode);
77static int cc_flags_for_code(enum rtx_code);
672a6f42 78\f
c5387660 79/* Implement TARGET_OPTION_OVERRIDE. */
13dd556c 80
c5387660
JM
81static void
82mn10300_option_override (void)
13dd556c
RS
83{
84 if (TARGET_AM33)
85 target_flags &= ~MASK_MULT_BUG;
f3f63737
NC
86 else
87 {
88 /* Disable scheduling for the MN10300 as we do
89 not have timing information available for it. */
90 flag_schedule_insns = 0;
91 flag_schedule_insns_after_reload = 0;
ec815d65
RH
92
93 /* Force enable splitting of wide types, as otherwise it is trivial
94 to run out of registers. Indeed, this works so well that register
95 allocation problems are now more common *without* optimization,
96 when this flag is not enabled by default. */
97 flag_split_wide_types = 1;
f3f63737 98 }
bad41521 99
f3f63737
NC
100 if (mn10300_tune_string)
101 {
102 if (strcasecmp (mn10300_tune_string, "mn10300") == 0)
103 mn10300_tune_cpu = PROCESSOR_MN10300;
104 else if (strcasecmp (mn10300_tune_string, "am33") == 0)
105 mn10300_tune_cpu = PROCESSOR_AM33;
106 else if (strcasecmp (mn10300_tune_string, "am33-2") == 0)
107 mn10300_tune_cpu = PROCESSOR_AM33_2;
108 else if (strcasecmp (mn10300_tune_string, "am34") == 0)
109 mn10300_tune_cpu = PROCESSOR_AM34;
110 else
111 error ("-mtune= expects mn10300, am33, am33-2, or am34");
112 }
13dd556c
RS
113}
114
1bc7c5b6 115static void
f1777882 116mn10300_file_start (void)
11bb1f11 117{
1bc7c5b6 118 default_file_start ();
705ac34f 119
18e9d2f9
AO
120 if (TARGET_AM33_2)
121 fprintf (asm_out_file, "\t.am33_2\n");
122 else if (TARGET_AM33)
1bc7c5b6 123 fprintf (asm_out_file, "\t.am33\n");
11bb1f11
JL
124}
125\f
298362c8
NC
126/* Note: This list must match the liw_op attribute in mn10300.md. */
127
128static const char *liw_op_names[] =
129{
130 "add", "cmp", "sub", "mov",
131 "and", "or", "xor",
132 "asr", "lsr", "asl",
133 "none", "max"
134};
135
11bb1f11
JL
136/* Print operand X using operand code CODE to assembly language output file
137 FILE. */
138
139void
e7ab5593 140mn10300_print_operand (FILE *file, rtx x, int code)
11bb1f11
JL
141{
142 switch (code)
143 {
298362c8
NC
144 case 'W':
145 {
146 unsigned int liw_op = UINTVAL (x);
bad41521 147
298362c8
NC
148 gcc_assert (TARGET_ALLOW_LIW);
149 gcc_assert (liw_op < LIW_OP_MAX);
150 fputs (liw_op_names[liw_op], file);
11bb1f11 151 break;
298362c8 152 }
bad41521 153
298362c8
NC
154 case 'b':
155 case 'B':
156 {
157 enum rtx_code cmp = GET_CODE (x);
158 enum machine_mode mode = GET_MODE (XEXP (x, 0));
159 const char *str;
160 int have_flags;
161
162 if (code == 'B')
163 cmp = reverse_condition (cmp);
164 have_flags = cc_flags_for_mode (mode);
5abc5de9 165
298362c8 166 switch (cmp)
18e9d2f9 167 {
298362c8
NC
168 case NE:
169 str = "ne";
18e9d2f9 170 break;
298362c8
NC
171 case EQ:
172 str = "eq";
173 break;
174 case GE:
175 /* bge is smaller than bnc. */
176 str = (have_flags & CC_FLAG_V ? "ge" : "nc");
177 break;
178 case LT:
179 str = (have_flags & CC_FLAG_V ? "lt" : "ns");
180 break;
181 case GT:
182 str = "gt";
183 break;
184 case LE:
185 str = "le";
186 break;
187 case GEU:
188 str = "cc";
189 break;
190 case GTU:
191 str = "hi";
192 break;
193 case LEU:
194 str = "ls";
195 break;
196 case LTU:
197 str = "cs";
198 break;
199 case ORDERED:
200 str = "lge";
201 break;
202 case UNORDERED:
203 str = "uo";
204 break;
205 case LTGT:
206 str = "lg";
207 break;
208 case UNEQ:
209 str = "ue";
210 break;
211 case UNGE:
212 str = "uge";
213 break;
214 case UNGT:
215 str = "ug";
216 break;
217 case UNLE:
218 str = "ule";
219 break;
220 case UNLT:
221 str = "ul";
18e9d2f9 222 break;
18e9d2f9 223 default:
dc759020 224 gcc_unreachable ();
18e9d2f9 225 }
298362c8
NC
226
227 gcc_checking_assert ((cc_flags_for_code (cmp) & ~have_flags) == 0);
228 fputs (str, file);
229 }
230 break;
231
232 case 'C':
233 /* This is used for the operand to a call instruction;
234 if it's a REG, enclose it in parens, else output
235 the operand normally. */
236 if (REG_P (x))
237 {
238 fputc ('(', file);
239 mn10300_print_operand (file, x, 0);
240 fputc (')', file);
241 }
242 else
243 mn10300_print_operand (file, x, 0);
244 break;
245
246 case 'D':
247 switch (GET_CODE (x))
248 {
249 case MEM:
250 fputc ('(', file);
251 output_address (XEXP (x, 0));
252 fputc (')', file);
253 break;
254
255 case REG:
256 fprintf (file, "fd%d", REGNO (x) - 18);
257 break;
258
259 default:
260 gcc_unreachable ();
261 }
262 break;
18e9d2f9 263
38c37a0e 264 /* These are the least significant word in a 64bit value. */
298362c8
NC
265 case 'L':
266 switch (GET_CODE (x))
267 {
268 case MEM:
269 fputc ('(', file);
270 output_address (XEXP (x, 0));
271 fputc (')', file);
272 break;
38c37a0e 273
298362c8
NC
274 case REG:
275 fprintf (file, "%s", reg_names[REGNO (x)]);
276 break;
38c37a0e 277
298362c8
NC
278 case SUBREG:
279 fprintf (file, "%s", reg_names[subreg_regno (x)]);
280 break;
38c37a0e 281
298362c8
NC
282 case CONST_DOUBLE:
283 {
284 long val[2];
285 REAL_VALUE_TYPE rv;
38c37a0e 286
298362c8
NC
287 switch (GET_MODE (x))
288 {
289 case DFmode:
290 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
291 REAL_VALUE_TO_TARGET_DOUBLE (rv, val);
292 fprintf (file, "0x%lx", val[0]);
293 break;;
294 case SFmode:
295 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
296 REAL_VALUE_TO_TARGET_SINGLE (rv, val[0]);
297 fprintf (file, "0x%lx", val[0]);
298 break;;
299 case VOIDmode:
300 case DImode:
301 mn10300_print_operand_address (file,
302 GEN_INT (CONST_DOUBLE_LOW (x)));
303 break;
304 default:
38c37a0e
JL
305 break;
306 }
298362c8
NC
307 break;
308 }
38c37a0e 309
298362c8
NC
310 case CONST_INT:
311 {
312 rtx low, high;
313 split_double (x, &low, &high);
314 fprintf (file, "%ld", (long)INTVAL (low));
315 break;
212bc5fa 316 }
38c37a0e 317
298362c8
NC
318 default:
319 gcc_unreachable ();
320 }
321 break;
38c37a0e
JL
322
323 /* Similarly, but for the most significant word. */
298362c8
NC
324 case 'H':
325 switch (GET_CODE (x))
326 {
327 case MEM:
328 fputc ('(', file);
329 x = adjust_address (x, SImode, 4);
330 output_address (XEXP (x, 0));
331 fputc (')', file);
332 break;
38c37a0e 333
298362c8
NC
334 case REG:
335 fprintf (file, "%s", reg_names[REGNO (x) + 1]);
336 break;
38c37a0e 337
298362c8
NC
338 case SUBREG:
339 fprintf (file, "%s", reg_names[subreg_regno (x) + 1]);
340 break;
38c37a0e 341
298362c8
NC
342 case CONST_DOUBLE:
343 {
344 long val[2];
345 REAL_VALUE_TYPE rv;
38c37a0e 346
298362c8
NC
347 switch (GET_MODE (x))
348 {
349 case DFmode:
350 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
351 REAL_VALUE_TO_TARGET_DOUBLE (rv, val);
352 fprintf (file, "0x%lx", val[1]);
353 break;;
354 case SFmode:
355 gcc_unreachable ();
356 case VOIDmode:
357 case DImode:
358 mn10300_print_operand_address (file,
359 GEN_INT (CONST_DOUBLE_HIGH (x)));
360 break;
361 default:
38c37a0e
JL
362 break;
363 }
298362c8
NC
364 break;
365 }
38c37a0e 366
298362c8
NC
367 case CONST_INT:
368 {
369 rtx low, high;
370 split_double (x, &low, &high);
371 fprintf (file, "%ld", (long)INTVAL (high));
372 break;
38c37a0e 373 }
38c37a0e 374
298362c8
NC
375 default:
376 gcc_unreachable ();
377 }
378 break;
38c37a0e 379
298362c8
NC
380 case 'A':
381 fputc ('(', file);
382 if (REG_P (XEXP (x, 0)))
383 output_address (gen_rtx_PLUS (SImode, XEXP (x, 0), const0_rtx));
384 else
385 output_address (XEXP (x, 0));
386 fputc (')', file);
387 break;
a58be199 388
298362c8
NC
389 case 'N':
390 gcc_assert (INTVAL (x) >= -128 && INTVAL (x) <= 255);
391 fprintf (file, "%d", (int)((~INTVAL (x)) & 0xff));
392 break;
393
394 case 'U':
395 gcc_assert (INTVAL (x) >= -128 && INTVAL (x) <= 255);
396 fprintf (file, "%d", (int)(INTVAL (x) & 0xff));
397 break;
6fafc523 398
576e5acc
JL
399 /* For shift counts. The hardware ignores the upper bits of
400 any immediate, but the assembler will flag an out of range
401 shift count as an error. So we mask off the high bits
402 of the immediate here. */
298362c8
NC
403 case 'S':
404 if (CONST_INT_P (x))
405 {
406 fprintf (file, "%d", (int)(INTVAL (x) & 0x1f));
407 break;
408 }
409 /* FALL THROUGH */
576e5acc 410
298362c8
NC
411 default:
412 switch (GET_CODE (x))
413 {
414 case MEM:
415 fputc ('(', file);
416 output_address (XEXP (x, 0));
417 fputc (')', file);
418 break;
11bb1f11 419
298362c8
NC
420 case PLUS:
421 output_address (x);
422 break;
38c37a0e 423
298362c8
NC
424 case REG:
425 fprintf (file, "%s", reg_names[REGNO (x)]);
426 break;
11bb1f11 427
298362c8
NC
428 case SUBREG:
429 fprintf (file, "%s", reg_names[subreg_regno (x)]);
430 break;
11bb1f11 431
38c37a0e 432 /* This will only be single precision.... */
298362c8
NC
433 case CONST_DOUBLE:
434 {
435 unsigned long val;
436 REAL_VALUE_TYPE rv;
38c37a0e 437
298362c8
NC
438 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
439 REAL_VALUE_TO_TARGET_SINGLE (rv, val);
440 fprintf (file, "0x%lx", val);
11bb1f11 441 break;
11bb1f11 442 }
298362c8
NC
443
444 case CONST_INT:
445 case SYMBOL_REF:
446 case CONST:
447 case LABEL_REF:
448 case CODE_LABEL:
449 case UNSPEC:
450 mn10300_print_operand_address (file, x);
451 break;
452 default:
453 gcc_unreachable ();
454 }
455 break;
456 }
11bb1f11
JL
457}
458
459/* Output assembly language output for the address ADDR to FILE. */
460
461void
e7ab5593 462mn10300_print_operand_address (FILE *file, rtx addr)
11bb1f11
JL
463{
464 switch (GET_CODE (addr))
465 {
705ac34f 466 case POST_INC:
36846b26 467 mn10300_print_operand (file, XEXP (addr, 0), 0);
705ac34f
JL
468 fputc ('+', file);
469 break;
36846b26
RH
470
471 case POST_MODIFY:
472 mn10300_print_operand (file, XEXP (addr, 0), 0);
473 fputc ('+', file);
474 fputc (',', file);
475 mn10300_print_operand (file, XEXP (addr, 1), 0);
476 break;
477
11bb1f11 478 case REG:
e7ab5593 479 mn10300_print_operand (file, addr, 0);
11bb1f11
JL
480 break;
481 case PLUS:
482 {
36846b26
RH
483 rtx base = XEXP (addr, 0);
484 rtx index = XEXP (addr, 1);
485
486 if (REG_P (index) && !REG_OK_FOR_INDEX_P (index))
487 {
488 rtx x = base;
489 base = index;
490 index = x;
491
492 gcc_assert (REG_P (index) && REG_OK_FOR_INDEX_P (index));
493 }
494 gcc_assert (REG_OK_FOR_BASE_P (base));
495
e7ab5593 496 mn10300_print_operand (file, index, 0);
11bb1f11 497 fputc (',', file);
36846b26 498 mn10300_print_operand (file, base, 0);
11bb1f11
JL
499 break;
500 }
501 case SYMBOL_REF:
502 output_addr_const (file, addr);
503 break;
504 default:
505 output_addr_const (file, addr);
506 break;
507 }
508}
509
535bd17c
AS
510/* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA.
511
512 Used for PIC-specific UNSPECs. */
513
514static bool
515mn10300_asm_output_addr_const_extra (FILE *file, rtx x)
516{
517 if (GET_CODE (x) == UNSPEC)
518 {
519 switch (XINT (x, 1))
520 {
535bd17c
AS
521 case UNSPEC_PIC:
522 /* GLOBAL_OFFSET_TABLE or local symbols, no suffix. */
523 output_addr_const (file, XVECEXP (x, 0, 0));
524 break;
525 case UNSPEC_GOT:
526 output_addr_const (file, XVECEXP (x, 0, 0));
527 fputs ("@GOT", file);
528 break;
529 case UNSPEC_GOTOFF:
530 output_addr_const (file, XVECEXP (x, 0, 0));
531 fputs ("@GOTOFF", file);
532 break;
533 case UNSPEC_PLT:
534 output_addr_const (file, XVECEXP (x, 0, 0));
535 fputs ("@PLT", file);
536 break;
537 case UNSPEC_GOTSYM_OFF:
538 assemble_name (file, GOT_SYMBOL_NAME);
539 fputs ("-(", file);
540 output_addr_const (file, XVECEXP (x, 0, 0));
541 fputs ("-.)", file);
542 break;
543 default:
544 return false;
545 }
546 return true;
547 }
548 else
549 return false;
550}
551
18e9d2f9
AO
552/* Count the number of FP registers that have to be saved. */
553static int
f1777882 554fp_regs_to_save (void)
18e9d2f9
AO
555{
556 int i, n = 0;
557
558 if (! TARGET_AM33_2)
559 return 0;
560
561 for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
9d54866d 562 if (df_regs_ever_live_p (i) && ! call_really_used_regs[i])
18e9d2f9
AO
563 ++n;
564
565 return n;
566}
567
f6cd7c62
RS
568/* Print a set of registers in the format required by "movm" and "ret".
569 Register K is saved if bit K of MASK is set. The data and address
570 registers can be stored individually, but the extended registers cannot.
9f5ed61a 571 We assume that the mask already takes that into account. For instance,
8596d0a1 572 bits 14 to 17 must have the same value. */
f6cd7c62
RS
573
574void
f1777882 575mn10300_print_reg_list (FILE *file, int mask)
f6cd7c62
RS
576{
577 int need_comma;
578 int i;
579
580 need_comma = 0;
581 fputc ('[', file);
582
583 for (i = 0; i < FIRST_EXTENDED_REGNUM; i++)
584 if ((mask & (1 << i)) != 0)
585 {
586 if (need_comma)
587 fputc (',', file);
588 fputs (reg_names [i], file);
589 need_comma = 1;
590 }
591
592 if ((mask & 0x3c000) != 0)
593 {
dc759020 594 gcc_assert ((mask & 0x3c000) == 0x3c000);
f6cd7c62
RS
595 if (need_comma)
596 fputc (',', file);
597 fputs ("exreg1", file);
598 need_comma = 1;
599 }
600
601 fputc (']', file);
602}
603
37a185d7
RH
604/* If the MDR register is never clobbered, we can use the RETF instruction
605 which takes the address from the MDR register. This is 3 cycles faster
606 than having to load the address from the stack. */
607
608bool
609mn10300_can_use_retf_insn (void)
610{
611 /* Don't bother if we're not optimizing. In this case we won't
612 have proper access to df_regs_ever_live_p. */
613 if (!optimize)
614 return false;
615
616 /* EH returns alter the saved return address; MDR is not current. */
617 if (crtl->calls_eh_return)
618 return false;
619
620 /* Obviously not if MDR is ever clobbered. */
621 if (df_regs_ever_live_p (MDR_REG))
622 return false;
623
624 /* ??? Careful not to use this during expand_epilogue etc. */
625 gcc_assert (!in_sequence_p ());
626 return leaf_function_p ();
627}
628
629bool
630mn10300_can_use_rets_insn (void)
38c37a0e 631{
040c5757 632 return !mn10300_initial_offset (ARG_POINTER_REGNUM, STACK_POINTER_REGNUM);
38c37a0e
JL
633}
634
f6cd7c62
RS
635/* Returns the set of live, callee-saved registers as a bitmask. The
636 callee-saved extended registers cannot be stored individually, so
8596d0a1 637 all of them will be included in the mask if any one of them is used. */
f6cd7c62
RS
638
639int
f1777882 640mn10300_get_live_callee_saved_regs (void)
f6cd7c62
RS
641{
642 int mask;
643 int i;
644
645 mask = 0;
18e9d2f9 646 for (i = 0; i <= LAST_EXTENDED_REGNUM; i++)
9d54866d 647 if (df_regs_ever_live_p (i) && ! call_really_used_regs[i])
f6cd7c62
RS
648 mask |= (1 << i);
649 if ((mask & 0x3c000) != 0)
650 mask |= 0x3c000;
651
652 return mask;
653}
654
2720cc47
NC
655static rtx
656F (rtx r)
657{
658 RTX_FRAME_RELATED_P (r) = 1;
659 return r;
660}
661
f6cd7c62
RS
662/* Generate an instruction that pushes several registers onto the stack.
663 Register K will be saved if bit K in MASK is set. The function does
664 nothing if MASK is zero.
665
666 To be compatible with the "movm" instruction, the lowest-numbered
667 register must be stored in the lowest slot. If MASK is the set
668 { R1,...,RN }, where R1...RN are ordered least first, the generated
669 instruction will have the form:
670
671 (parallel
672 (set (reg:SI 9) (plus:SI (reg:SI 9) (const_int -N*4)))
673 (set (mem:SI (plus:SI (reg:SI 9)
674 (const_int -1*4)))
675 (reg:SI RN))
676 ...
677 (set (mem:SI (plus:SI (reg:SI 9)
678 (const_int -N*4)))
679 (reg:SI R1))) */
680
cc909bba
RH
681static void
682mn10300_gen_multiple_store (unsigned int mask)
f6cd7c62 683{
cc909bba
RH
684 /* The order in which registers are stored, from SP-4 through SP-N*4. */
685 static const unsigned int store_order[8] = {
686 /* e2, e3: never saved */
687 FIRST_EXTENDED_REGNUM + 4,
688 FIRST_EXTENDED_REGNUM + 5,
689 FIRST_EXTENDED_REGNUM + 6,
690 FIRST_EXTENDED_REGNUM + 7,
691 /* e0, e1, mdrq, mcrh, mcrl, mcvf: never saved. */
692 FIRST_DATA_REGNUM + 2,
693 FIRST_DATA_REGNUM + 3,
694 FIRST_ADDRESS_REGNUM + 2,
695 FIRST_ADDRESS_REGNUM + 3,
696 /* d0, d1, a0, a1, mdr, lir, lar: never saved. */
697 };
698
699 rtx x, elts[9];
700 unsigned int i;
701 int count;
702
703 if (mask == 0)
704 return;
705
706 for (i = count = 0; i < ARRAY_SIZE(store_order); ++i)
f6cd7c62 707 {
cc909bba
RH
708 unsigned regno = store_order[i];
709
710 if (((mask >> regno) & 1) == 0)
711 continue;
f6cd7c62 712
cc909bba
RH
713 ++count;
714 x = plus_constant (stack_pointer_rtx, count * -4);
715 x = gen_frame_mem (SImode, x);
716 x = gen_rtx_SET (VOIDmode, x, gen_rtx_REG (SImode, regno));
717 elts[count] = F(x);
718
719 /* Remove the register from the mask so that... */
720 mask &= ~(1u << regno);
f6cd7c62 721 }
cc909bba
RH
722
723 /* ... we can make sure that we didn't try to use a register
724 not listed in the store order. */
725 gcc_assert (mask == 0);
726
727 /* Create the instruction that updates the stack pointer. */
728 x = plus_constant (stack_pointer_rtx, count * -4);
729 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
730 elts[0] = F(x);
731
732 /* We need one PARALLEL element to update the stack pointer and
733 an additional element for each register that is stored. */
734 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (count + 1, elts));
735 F (emit_insn (x));
f6cd7c62
RS
736}
737
11bb1f11 738void
e7ab5593 739mn10300_expand_prologue (void)
11bb1f11 740{
040c5757 741 HOST_WIDE_INT size = mn10300_frame_size ();
11bb1f11 742
8596d0a1 743 /* If we use any of the callee-saved registers, save them now. */
f6cd7c62 744 mn10300_gen_multiple_store (mn10300_get_live_callee_saved_regs ());
777fbf09 745
18e9d2f9
AO
746 if (TARGET_AM33_2 && fp_regs_to_save ())
747 {
748 int num_regs_to_save = fp_regs_to_save (), i;
749 HOST_WIDE_INT xsize;
e7ab5593
NC
750 enum
751 {
752 save_sp_merge,
753 save_sp_no_merge,
754 save_sp_partial_merge,
755 save_a0_merge,
756 save_a0_no_merge
757 } strategy;
18e9d2f9
AO
758 unsigned int strategy_size = (unsigned)-1, this_strategy_size;
759 rtx reg;
18e9d2f9
AO
760
761 /* We have several different strategies to save FP registers.
762 We can store them using SP offsets, which is beneficial if
763 there are just a few registers to save, or we can use `a0' in
764 post-increment mode (`a0' is the only call-clobbered address
765 register that is never used to pass information to a
766 function). Furthermore, if we don't need a frame pointer, we
767 can merge the two SP adds into a single one, but this isn't
768 always beneficial; sometimes we can just split the two adds
769 so that we don't exceed a 16-bit constant size. The code
770 below will select which strategy to use, so as to generate
771 smallest code. Ties are broken in favor or shorter sequences
772 (in terms of number of instructions). */
773
774#define SIZE_ADD_AX(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
775 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 2)
776#define SIZE_ADD_SP(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
777 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 3)
f4a88680
JR
778
779/* We add 0 * (S) in two places to promote to the type of S,
780 so that all arms of the conditional have the same type. */
18e9d2f9 781#define SIZE_FMOV_LIMIT(S,N,L,SIZE1,SIZE2,ELSE) \
f4a88680 782 (((S) >= (L)) ? 0 * (S) + (SIZE1) * (N) \
18e9d2f9
AO
783 : ((S) + 4 * (N) >= (L)) ? (((L) - (S)) / 4 * (SIZE2) \
784 + ((S) + 4 * (N) - (L)) / 4 * (SIZE1)) \
f4a88680 785 : 0 * (S) + (ELSE))
18e9d2f9
AO
786#define SIZE_FMOV_SP_(S,N) \
787 (SIZE_FMOV_LIMIT ((S), (N), (1 << 24), 7, 6, \
788 SIZE_FMOV_LIMIT ((S), (N), (1 << 8), 6, 4, \
789 (S) ? 4 * (N) : 3 + 4 * ((N) - 1))))
790#define SIZE_FMOV_SP(S,N) (SIZE_FMOV_SP_ ((unsigned HOST_WIDE_INT)(S), (N)))
791
792 /* Consider alternative save_sp_merge only if we don't need the
4375e090 793 frame pointer and size is nonzero. */
18e9d2f9
AO
794 if (! frame_pointer_needed && size)
795 {
796 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
797 this_strategy_size = SIZE_ADD_SP (-(size + 4 * num_regs_to_save));
798 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
799 this_strategy_size += SIZE_FMOV_SP (size, num_regs_to_save);
800
801 if (this_strategy_size < strategy_size)
802 {
803 strategy = save_sp_merge;
804 strategy_size = this_strategy_size;
805 }
806 }
807
808 /* Consider alternative save_sp_no_merge unconditionally. */
809 /* Insn: add -4 * num_regs_to_save, sp. */
810 this_strategy_size = SIZE_ADD_SP (-4 * num_regs_to_save);
811 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
812 this_strategy_size += SIZE_FMOV_SP (0, num_regs_to_save);
813 if (size)
814 {
815 /* Insn: add -size, sp. */
816 this_strategy_size += SIZE_ADD_SP (-size);
817 }
818
819 if (this_strategy_size < strategy_size)
820 {
821 strategy = save_sp_no_merge;
822 strategy_size = this_strategy_size;
823 }
824
825 /* Consider alternative save_sp_partial_merge only if we don't
826 need a frame pointer and size is reasonably large. */
827 if (! frame_pointer_needed && size + 4 * num_regs_to_save > 128)
828 {
829 /* Insn: add -128, sp. */
830 this_strategy_size = SIZE_ADD_SP (-128);
831 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
832 this_strategy_size += SIZE_FMOV_SP (128 - 4 * num_regs_to_save,
833 num_regs_to_save);
834 if (size)
835 {
836 /* Insn: add 128-size, sp. */
837 this_strategy_size += SIZE_ADD_SP (128 - size);
838 }
839
840 if (this_strategy_size < strategy_size)
841 {
842 strategy = save_sp_partial_merge;
843 strategy_size = this_strategy_size;
844 }
845 }
846
847 /* Consider alternative save_a0_merge only if we don't need a
4375e090 848 frame pointer, size is nonzero and the user hasn't
18e9d2f9
AO
849 changed the calling conventions of a0. */
850 if (! frame_pointer_needed && size
9d54866d 851 && call_really_used_regs [FIRST_ADDRESS_REGNUM]
18e9d2f9
AO
852 && ! fixed_regs[FIRST_ADDRESS_REGNUM])
853 {
854 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
855 this_strategy_size = SIZE_ADD_SP (-(size + 4 * num_regs_to_save));
856 /* Insn: mov sp, a0. */
857 this_strategy_size++;
858 if (size)
859 {
860 /* Insn: add size, a0. */
861 this_strategy_size += SIZE_ADD_AX (size);
862 }
863 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
864 this_strategy_size += 3 * num_regs_to_save;
865
866 if (this_strategy_size < strategy_size)
867 {
868 strategy = save_a0_merge;
869 strategy_size = this_strategy_size;
870 }
871 }
872
873 /* Consider alternative save_a0_no_merge if the user hasn't
8596d0a1 874 changed the calling conventions of a0. */
9d54866d 875 if (call_really_used_regs [FIRST_ADDRESS_REGNUM]
18e9d2f9
AO
876 && ! fixed_regs[FIRST_ADDRESS_REGNUM])
877 {
878 /* Insn: add -4 * num_regs_to_save, sp. */
879 this_strategy_size = SIZE_ADD_SP (-4 * num_regs_to_save);
880 /* Insn: mov sp, a0. */
881 this_strategy_size++;
882 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
883 this_strategy_size += 3 * num_regs_to_save;
884 if (size)
885 {
886 /* Insn: add -size, sp. */
887 this_strategy_size += SIZE_ADD_SP (-size);
888 }
889
890 if (this_strategy_size < strategy_size)
891 {
892 strategy = save_a0_no_merge;
893 strategy_size = this_strategy_size;
894 }
895 }
896
897 /* Emit the initial SP add, common to all strategies. */
898 switch (strategy)
899 {
900 case save_sp_no_merge:
901 case save_a0_no_merge:
2720cc47
NC
902 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
903 stack_pointer_rtx,
904 GEN_INT (-4 * num_regs_to_save))));
18e9d2f9
AO
905 xsize = 0;
906 break;
907
908 case save_sp_partial_merge:
2720cc47
NC
909 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
910 stack_pointer_rtx,
911 GEN_INT (-128))));
18e9d2f9
AO
912 xsize = 128 - 4 * num_regs_to_save;
913 size -= xsize;
914 break;
915
916 case save_sp_merge:
917 case save_a0_merge:
2720cc47
NC
918 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
919 stack_pointer_rtx,
920 GEN_INT (-(size + 4 * num_regs_to_save)))));
18e9d2f9 921 /* We'll have to adjust FP register saves according to the
8596d0a1 922 frame size. */
18e9d2f9
AO
923 xsize = size;
924 /* Since we've already created the stack frame, don't do it
8596d0a1 925 again at the end of the function. */
18e9d2f9
AO
926 size = 0;
927 break;
928
929 default:
dc759020 930 gcc_unreachable ();
18e9d2f9 931 }
5abc5de9 932
18e9d2f9
AO
933 /* Now prepare register a0, if we have decided to use it. */
934 switch (strategy)
935 {
936 case save_sp_merge:
937 case save_sp_no_merge:
938 case save_sp_partial_merge:
939 reg = 0;
940 break;
941
942 case save_a0_merge:
943 case save_a0_no_merge:
944 reg = gen_rtx_REG (SImode, FIRST_ADDRESS_REGNUM);
2720cc47 945 F (emit_insn (gen_movsi (reg, stack_pointer_rtx)));
18e9d2f9 946 if (xsize)
2720cc47 947 F (emit_insn (gen_addsi3 (reg, reg, GEN_INT (xsize))));
18e9d2f9
AO
948 reg = gen_rtx_POST_INC (SImode, reg);
949 break;
5abc5de9 950
18e9d2f9 951 default:
dc759020 952 gcc_unreachable ();
18e9d2f9 953 }
5abc5de9 954
18e9d2f9
AO
955 /* Now actually save the FP registers. */
956 for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
9d54866d 957 if (df_regs_ever_live_p (i) && ! call_really_used_regs [i])
18e9d2f9
AO
958 {
959 rtx addr;
960
961 if (reg)
962 addr = reg;
963 else
964 {
965 /* If we aren't using `a0', use an SP offset. */
966 if (xsize)
967 {
968 addr = gen_rtx_PLUS (SImode,
969 stack_pointer_rtx,
970 GEN_INT (xsize));
971 }
972 else
973 addr = stack_pointer_rtx;
5abc5de9 974
18e9d2f9
AO
975 xsize += 4;
976 }
977
2720cc47
NC
978 F (emit_insn (gen_movsf (gen_rtx_MEM (SFmode, addr),
979 gen_rtx_REG (SFmode, i))));
18e9d2f9
AO
980 }
981 }
982
777fbf09 983 /* Now put the frame pointer into the frame pointer register. */
11bb1f11 984 if (frame_pointer_needed)
2720cc47 985 F (emit_move_insn (frame_pointer_rtx, stack_pointer_rtx));
11bb1f11 986
777fbf09 987 /* Allocate stack for this frame. */
11bb1f11 988 if (size)
2720cc47
NC
989 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
990 stack_pointer_rtx,
991 GEN_INT (-size))));
992
6fb5fa3c 993 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
040c5757 994 emit_insn (gen_load_pic ());
11bb1f11
JL
995}
996
997void
e7ab5593 998mn10300_expand_epilogue (void)
11bb1f11 999{
040c5757 1000 HOST_WIDE_INT size = mn10300_frame_size ();
37a185d7 1001 int reg_save_bytes = REG_SAVE_BYTES;
4af476d7 1002
18e9d2f9
AO
1003 if (TARGET_AM33_2 && fp_regs_to_save ())
1004 {
1005 int num_regs_to_save = fp_regs_to_save (), i;
1006 rtx reg = 0;
1007
1008 /* We have several options to restore FP registers. We could
1009 load them from SP offsets, but, if there are enough FP
1010 registers to restore, we win if we use a post-increment
1011 addressing mode. */
1012
1013 /* If we have a frame pointer, it's the best option, because we
1014 already know it has the value we want. */
1015 if (frame_pointer_needed)
1016 reg = gen_rtx_REG (SImode, FRAME_POINTER_REGNUM);
1017 /* Otherwise, we may use `a1', since it's call-clobbered and
1018 it's never used for return values. But only do so if it's
1019 smaller than using SP offsets. */
1020 else
1021 {
1022 enum { restore_sp_post_adjust,
1023 restore_sp_pre_adjust,
1024 restore_sp_partial_adjust,
1025 restore_a1 } strategy;
1026 unsigned int this_strategy_size, strategy_size = (unsigned)-1;
1027
1028 /* Consider using sp offsets before adjusting sp. */
1029 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1030 this_strategy_size = SIZE_FMOV_SP (size, num_regs_to_save);
1031 /* If size is too large, we'll have to adjust SP with an
1032 add. */
37a185d7 1033 if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
18e9d2f9
AO
1034 {
1035 /* Insn: add size + 4 * num_regs_to_save, sp. */
1036 this_strategy_size += SIZE_ADD_SP (size + 4 * num_regs_to_save);
1037 }
1038 /* If we don't have to restore any non-FP registers,
1039 we'll be able to save one byte by using rets. */
37a185d7 1040 if (! reg_save_bytes)
18e9d2f9
AO
1041 this_strategy_size--;
1042
1043 if (this_strategy_size < strategy_size)
1044 {
1045 strategy = restore_sp_post_adjust;
1046 strategy_size = this_strategy_size;
1047 }
1048
1049 /* Consider using sp offsets after adjusting sp. */
1050 /* Insn: add size, sp. */
1051 this_strategy_size = SIZE_ADD_SP (size);
1052 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1053 this_strategy_size += SIZE_FMOV_SP (0, num_regs_to_save);
1054 /* We're going to use ret to release the FP registers
8596d0a1 1055 save area, so, no savings. */
18e9d2f9
AO
1056
1057 if (this_strategy_size < strategy_size)
1058 {
1059 strategy = restore_sp_pre_adjust;
1060 strategy_size = this_strategy_size;
1061 }
1062
1063 /* Consider using sp offsets after partially adjusting sp.
1064 When size is close to 32Kb, we may be able to adjust SP
1065 with an imm16 add instruction while still using fmov
1066 (d8,sp). */
37a185d7 1067 if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
18e9d2f9
AO
1068 {
1069 /* Insn: add size + 4 * num_regs_to_save
37a185d7 1070 + reg_save_bytes - 252,sp. */
18e9d2f9 1071 this_strategy_size = SIZE_ADD_SP (size + 4 * num_regs_to_save
37a185d7 1072 + reg_save_bytes - 252);
18e9d2f9 1073 /* Insn: fmov (##,sp),fs#, fo each fs# to be restored. */
37a185d7 1074 this_strategy_size += SIZE_FMOV_SP (252 - reg_save_bytes
18e9d2f9
AO
1075 - 4 * num_regs_to_save,
1076 num_regs_to_save);
1077 /* We're going to use ret to release the FP registers
8596d0a1 1078 save area, so, no savings. */
18e9d2f9
AO
1079
1080 if (this_strategy_size < strategy_size)
1081 {
1082 strategy = restore_sp_partial_adjust;
1083 strategy_size = this_strategy_size;
1084 }
1085 }
1086
1087 /* Consider using a1 in post-increment mode, as long as the
1088 user hasn't changed the calling conventions of a1. */
9d54866d 1089 if (call_really_used_regs [FIRST_ADDRESS_REGNUM + 1]
18e9d2f9
AO
1090 && ! fixed_regs[FIRST_ADDRESS_REGNUM+1])
1091 {
1092 /* Insn: mov sp,a1. */
1093 this_strategy_size = 1;
1094 if (size)
1095 {
1096 /* Insn: add size,a1. */
1097 this_strategy_size += SIZE_ADD_AX (size);
1098 }
1099 /* Insn: fmov (a1+),fs#, for each fs# to be restored. */
1100 this_strategy_size += 3 * num_regs_to_save;
1101 /* If size is large enough, we may be able to save a
1102 couple of bytes. */
37a185d7 1103 if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
18e9d2f9
AO
1104 {
1105 /* Insn: mov a1,sp. */
1106 this_strategy_size += 2;
1107 }
1108 /* If we don't have to restore any non-FP registers,
1109 we'll be able to save one byte by using rets. */
37a185d7 1110 if (! reg_save_bytes)
18e9d2f9
AO
1111 this_strategy_size--;
1112
1113 if (this_strategy_size < strategy_size)
1114 {
1115 strategy = restore_a1;
1116 strategy_size = this_strategy_size;
1117 }
1118 }
1119
1120 switch (strategy)
1121 {
1122 case restore_sp_post_adjust:
1123 break;
1124
1125 case restore_sp_pre_adjust:
1126 emit_insn (gen_addsi3 (stack_pointer_rtx,
1127 stack_pointer_rtx,
1128 GEN_INT (size)));
1129 size = 0;
1130 break;
1131
1132 case restore_sp_partial_adjust:
1133 emit_insn (gen_addsi3 (stack_pointer_rtx,
1134 stack_pointer_rtx,
1135 GEN_INT (size + 4 * num_regs_to_save
37a185d7
RH
1136 + reg_save_bytes - 252)));
1137 size = 252 - reg_save_bytes - 4 * num_regs_to_save;
18e9d2f9 1138 break;
5abc5de9 1139
18e9d2f9
AO
1140 case restore_a1:
1141 reg = gen_rtx_REG (SImode, FIRST_ADDRESS_REGNUM + 1);
1142 emit_insn (gen_movsi (reg, stack_pointer_rtx));
1143 if (size)
1144 emit_insn (gen_addsi3 (reg, reg, GEN_INT (size)));
1145 break;
1146
1147 default:
dc759020 1148 gcc_unreachable ();
18e9d2f9
AO
1149 }
1150 }
1151
1152 /* Adjust the selected register, if any, for post-increment. */
1153 if (reg)
1154 reg = gen_rtx_POST_INC (SImode, reg);
1155
1156 for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
9d54866d 1157 if (df_regs_ever_live_p (i) && ! call_really_used_regs [i])
18e9d2f9
AO
1158 {
1159 rtx addr;
5abc5de9 1160
18e9d2f9
AO
1161 if (reg)
1162 addr = reg;
1163 else if (size)
1164 {
1165 /* If we aren't using a post-increment register, use an
8596d0a1 1166 SP offset. */
18e9d2f9
AO
1167 addr = gen_rtx_PLUS (SImode,
1168 stack_pointer_rtx,
1169 GEN_INT (size));
1170 }
1171 else
1172 addr = stack_pointer_rtx;
1173
1174 size += 4;
1175
2720cc47
NC
1176 emit_insn (gen_movsf (gen_rtx_REG (SFmode, i),
1177 gen_rtx_MEM (SFmode, addr)));
18e9d2f9
AO
1178 }
1179
1180 /* If we were using the restore_a1 strategy and the number of
1181 bytes to be released won't fit in the `ret' byte, copy `a1'
1182 to `sp', to avoid having to use `add' to adjust it. */
37a185d7 1183 if (! frame_pointer_needed && reg && size + reg_save_bytes > 255)
18e9d2f9
AO
1184 {
1185 emit_move_insn (stack_pointer_rtx, XEXP (reg, 0));
1186 size = 0;
1187 }
1188 }
1189
5d29a95f
JL
1190 /* Maybe cut back the stack, except for the register save area.
1191
1192 If the frame pointer exists, then use the frame pointer to
1193 cut back the stack.
1194
1195 If the stack size + register save area is more than 255 bytes,
1196 then the stack must be cut back here since the size + register
5abc5de9 1197 save size is too big for a ret/retf instruction.
5d29a95f
JL
1198
1199 Else leave it alone, it will be cut back as part of the
1200 ret/retf instruction, or there wasn't any stack to begin with.
1201
dab66575 1202 Under no circumstances should the register save area be
5d29a95f
JL
1203 deallocated here, that would leave a window where an interrupt
1204 could occur and trash the register save area. */
11bb1f11
JL
1205 if (frame_pointer_needed)
1206 {
11bb1f11 1207 emit_move_insn (stack_pointer_rtx, frame_pointer_rtx);
4246e0c5
JL
1208 size = 0;
1209 }
37a185d7 1210 else if (size + reg_save_bytes > 255)
4246e0c5
JL
1211 {
1212 emit_insn (gen_addsi3 (stack_pointer_rtx,
1213 stack_pointer_rtx,
1214 GEN_INT (size)));
1215 size = 0;
11bb1f11 1216 }
11bb1f11 1217
ed6089d6 1218 /* Adjust the stack and restore callee-saved registers, if any. */
37a185d7 1219 if (mn10300_can_use_rets_insn ())
3810076b 1220 emit_jump_insn (ret_rtx);
777fbf09 1221 else
37a185d7 1222 emit_jump_insn (gen_return_ret (GEN_INT (size + REG_SAVE_BYTES)));
11bb1f11
JL
1223}
1224
05713b80 1225/* Recognize the PARALLEL rtx generated by mn10300_gen_multiple_store().
f6cd7c62
RS
1226 This function is for MATCH_PARALLEL and so assumes OP is known to be
1227 parallel. If OP is a multiple store, return a mask indicating which
1228 registers it saves. Return 0 otherwise. */
1229
1230int
e7ab5593
NC
1231mn10300_store_multiple_operation (rtx op,
1232 enum machine_mode mode ATTRIBUTE_UNUSED)
f6cd7c62
RS
1233{
1234 int count;
1235 int mask;
1236 int i;
1237 unsigned int last;
1238 rtx elt;
1239
1240 count = XVECLEN (op, 0);
1241 if (count < 2)
1242 return 0;
1243
1244 /* Check that first instruction has the form (set (sp) (plus A B)) */
1245 elt = XVECEXP (op, 0, 0);
1246 if (GET_CODE (elt) != SET
e7ab5593 1247 || (! REG_P (SET_DEST (elt)))
f6cd7c62
RS
1248 || REGNO (SET_DEST (elt)) != STACK_POINTER_REGNUM
1249 || GET_CODE (SET_SRC (elt)) != PLUS)
1250 return 0;
1251
1252 /* Check that A is the stack pointer and B is the expected stack size.
1253 For OP to match, each subsequent instruction should push a word onto
1254 the stack. We therefore expect the first instruction to create
8596d0a1 1255 COUNT-1 stack slots. */
f6cd7c62 1256 elt = SET_SRC (elt);
e7ab5593 1257 if ((! REG_P (XEXP (elt, 0)))
f6cd7c62 1258 || REGNO (XEXP (elt, 0)) != STACK_POINTER_REGNUM
e7ab5593 1259 || (! CONST_INT_P (XEXP (elt, 1)))
f6cd7c62
RS
1260 || INTVAL (XEXP (elt, 1)) != -(count - 1) * 4)
1261 return 0;
1262
f6cd7c62
RS
1263 mask = 0;
1264 for (i = 1; i < count; i++)
1265 {
cc909bba
RH
1266 /* Check that element i is a (set (mem M) R). */
1267 /* ??? Validate the register order a-la mn10300_gen_multiple_store.
1268 Remember: the ordering is *not* monotonic. */
f6cd7c62
RS
1269 elt = XVECEXP (op, 0, i);
1270 if (GET_CODE (elt) != SET
e7ab5593 1271 || (! MEM_P (SET_DEST (elt)))
cc909bba 1272 || (! REG_P (SET_SRC (elt))))
f6cd7c62
RS
1273 return 0;
1274
cc909bba 1275 /* Remember which registers are to be saved. */
f6cd7c62
RS
1276 last = REGNO (SET_SRC (elt));
1277 mask |= (1 << last);
1278
1279 /* Check that M has the form (plus (sp) (const_int -I*4)) */
1280 elt = XEXP (SET_DEST (elt), 0);
1281 if (GET_CODE (elt) != PLUS
e7ab5593 1282 || (! REG_P (XEXP (elt, 0)))
f6cd7c62 1283 || REGNO (XEXP (elt, 0)) != STACK_POINTER_REGNUM
e7ab5593 1284 || (! CONST_INT_P (XEXP (elt, 1)))
f6cd7c62
RS
1285 || INTVAL (XEXP (elt, 1)) != -i * 4)
1286 return 0;
1287 }
1288
8596d0a1 1289 /* All or none of the callee-saved extended registers must be in the set. */
f6cd7c62
RS
1290 if ((mask & 0x3c000) != 0
1291 && (mask & 0x3c000) != 0x3c000)
1292 return 0;
1293
1294 return mask;
1295}
1296
f2831cc9
AS
1297/* Implement TARGET_PREFERRED_RELOAD_CLASS. */
1298
1299static reg_class_t
1300mn10300_preferred_reload_class (rtx x, reg_class_t rclass)
1301{
1302 if (x == stack_pointer_rtx && rclass != SP_REGS)
8b119bb6 1303 return (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
f2831cc9
AS
1304 else if (MEM_P (x)
1305 || (REG_P (x)
1306 && !HARD_REGISTER_P (x))
1307 || (GET_CODE (x) == SUBREG
1308 && REG_P (SUBREG_REG (x))
1309 && !HARD_REGISTER_P (SUBREG_REG (x))))
1310 return LIMIT_RELOAD_CLASS (GET_MODE (x), rclass);
1311 else
1312 return rclass;
1313}
1314
1315/* Implement TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
1316
1317static reg_class_t
1318mn10300_preferred_output_reload_class (rtx x, reg_class_t rclass)
1319{
1320 if (x == stack_pointer_rtx && rclass != SP_REGS)
8b119bb6 1321 return (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
f2831cc9
AS
1322 return rclass;
1323}
1324
8b119bb6 1325/* Implement TARGET_SECONDARY_RELOAD. */
e7ab5593 1326
8b119bb6
RH
1327static reg_class_t
1328mn10300_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
1329 enum machine_mode mode, secondary_reload_info *sri)
11bb1f11 1330{
8b119bb6
RH
1331 enum reg_class rclass = (enum reg_class) rclass_i;
1332 enum reg_class xclass = NO_REGS;
1333 unsigned int xregno = INVALID_REGNUM;
1334
1335 if (REG_P (x))
4d1a91c2 1336 {
8b119bb6
RH
1337 xregno = REGNO (x);
1338 if (xregno >= FIRST_PSEUDO_REGISTER)
1339 xregno = true_regnum (x);
1340 if (xregno != INVALID_REGNUM)
1341 xclass = REGNO_REG_CLASS (xregno);
1342 }
1343
1344 if (!TARGET_AM33)
1345 {
1346 /* Memory load/stores less than a full word wide can't have an
1347 address or stack pointer destination. They must use a data
1348 register as an intermediate register. */
1349 if (rclass != DATA_REGS
1350 && (mode == QImode || mode == HImode)
1351 && xclass == NO_REGS)
1352 return DATA_REGS;
1353
1354 /* We can only move SP to/from an address register. */
1355 if (in_p
1356 && rclass == SP_REGS
1357 && xclass != ADDRESS_REGS)
1358 return ADDRESS_REGS;
1359 if (!in_p
1360 && xclass == SP_REGS
1361 && rclass != ADDRESS_REGS
1362 && rclass != SP_OR_ADDRESS_REGS)
1363 return ADDRESS_REGS;
4d1a91c2 1364 }
11bb1f11 1365
8b119bb6
RH
1366 /* We can't directly load sp + const_int into a register;
1367 we must use an address register as an scratch. */
1368 if (in_p
1369 && rclass != SP_REGS
0a2aaacc 1370 && rclass != SP_OR_ADDRESS_REGS
36846b26 1371 && rclass != SP_OR_GENERAL_REGS
8b119bb6
RH
1372 && GET_CODE (x) == PLUS
1373 && (XEXP (x, 0) == stack_pointer_rtx
1374 || XEXP (x, 1) == stack_pointer_rtx))
1375 {
1376 sri->icode = CODE_FOR_reload_plus_sp_const;
1377 return NO_REGS;
1378 }
11bb1f11 1379
c25a21f5
RH
1380 /* We can only move MDR to/from a data register. */
1381 if (rclass == MDR_REGS && xclass != DATA_REGS)
1382 return DATA_REGS;
1383 if (xclass == MDR_REGS && rclass != DATA_REGS)
1384 return DATA_REGS;
1385
8b119bb6 1386 /* We can't load/store an FP register from a constant address. */
6528281d 1387 if (TARGET_AM33_2
8b119bb6
RH
1388 && (rclass == FP_REGS || xclass == FP_REGS)
1389 && (xclass == NO_REGS || rclass == NO_REGS))
18e9d2f9 1390 {
8b119bb6
RH
1391 rtx addr = NULL;
1392
1393 if (xregno >= FIRST_PSEUDO_REGISTER && xregno != INVALID_REGNUM)
1394 {
f2034d06 1395 addr = reg_equiv_mem (xregno);
8b119bb6
RH
1396 if (addr)
1397 addr = XEXP (addr, 0);
1398 }
1399 else if (MEM_P (x))
1400 addr = XEXP (x, 0);
6528281d 1401
8b119bb6 1402 if (addr && CONSTANT_ADDRESS_P (addr))
36846b26 1403 return GENERAL_REGS;
18e9d2f9
AO
1404 }
1405
777fbf09
JL
1406 /* Otherwise assume no secondary reloads are needed. */
1407 return NO_REGS;
1408}
1409
040c5757
RH
1410int
1411mn10300_frame_size (void)
1412{
1413 /* size includes the fixed stack space needed for function calls. */
1414 int size = get_frame_size () + crtl->outgoing_args_size;
1415
1416 /* And space for the return pointer. */
1417 size += crtl->outgoing_args_size ? 4 : 0;
1418
1419 return size;
1420}
1421
777fbf09 1422int
e7ab5593 1423mn10300_initial_offset (int from, int to)
777fbf09 1424{
040c5757
RH
1425 int diff = 0;
1426
1427 gcc_assert (from == ARG_POINTER_REGNUM || from == FRAME_POINTER_REGNUM);
1428 gcc_assert (to == FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM);
1429
1430 if (to == STACK_POINTER_REGNUM)
1431 diff = mn10300_frame_size ();
1432
3dbc43d1
JL
1433 /* The difference between the argument pointer and the frame pointer
1434 is the size of the callee register save area. */
040c5757 1435 if (from == ARG_POINTER_REGNUM)
11bb1f11 1436 {
040c5757
RH
1437 diff += REG_SAVE_BYTES;
1438 diff += 4 * fp_regs_to_save ();
11bb1f11
JL
1439 }
1440
040c5757 1441 return diff;
11bb1f11 1442}
22ef4e9b 1443
bd5bd7ac
KH
1444/* Worker function for TARGET_RETURN_IN_MEMORY. */
1445
9024ea92 1446static bool
586de218 1447mn10300_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
9024ea92
KH
1448{
1449 /* Return values > 8 bytes in length in memory. */
b1eb8119
DD
1450 return (int_size_in_bytes (type) > 8
1451 || int_size_in_bytes (type) == 0
1452 || TYPE_MODE (type) == BLKmode);
9024ea92
KH
1453}
1454
22ef4e9b
JL
1455/* Flush the argument registers to the stack for a stdarg function;
1456 return the new argument pointer. */
9024ea92 1457static rtx
f1777882 1458mn10300_builtin_saveregs (void)
22ef4e9b 1459{
fc2acc87 1460 rtx offset, mem;
22ef4e9b 1461 tree fntype = TREE_TYPE (current_function_decl);
f38958e8 1462 int argadj = ((!stdarg_p (fntype))
22ef4e9b 1463 ? UNITS_PER_WORD : 0);
4862826d 1464 alias_set_type set = get_varargs_alias_set ();
22ef4e9b
JL
1465
1466 if (argadj)
38173d38 1467 offset = plus_constant (crtl->args.arg_offset_rtx, argadj);
22ef4e9b 1468 else
38173d38 1469 offset = crtl->args.arg_offset_rtx;
22ef4e9b 1470
38173d38 1471 mem = gen_rtx_MEM (SImode, crtl->args.internal_arg_pointer);
ba4828e0 1472 set_mem_alias_set (mem, set);
fc2acc87
RH
1473 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
1474
1475 mem = gen_rtx_MEM (SImode,
38173d38 1476 plus_constant (crtl->args.internal_arg_pointer, 4));
ba4828e0 1477 set_mem_alias_set (mem, set);
fc2acc87
RH
1478 emit_move_insn (mem, gen_rtx_REG (SImode, 1));
1479
22ef4e9b 1480 return copy_to_reg (expand_binop (Pmode, add_optab,
38173d38 1481 crtl->args.internal_arg_pointer,
22ef4e9b
JL
1482 offset, 0, 0, OPTAB_LIB_WIDEN));
1483}
1484
d7bd8aeb 1485static void
f1777882 1486mn10300_va_start (tree valist, rtx nextarg)
fc2acc87 1487{
6c535c69 1488 nextarg = expand_builtin_saveregs ();
e5faf155 1489 std_expand_builtin_va_start (valist, nextarg);
fc2acc87
RH
1490}
1491
8cd5a4e0
RH
1492/* Return true when a parameter should be passed by reference. */
1493
1494static bool
d5cc9181 1495mn10300_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
586de218 1496 enum machine_mode mode, const_tree type,
8cd5a4e0
RH
1497 bool named ATTRIBUTE_UNUSED)
1498{
1499 unsigned HOST_WIDE_INT size;
1500
1501 if (type)
1502 size = int_size_in_bytes (type);
1503 else
1504 size = GET_MODE_SIZE (mode);
1505
b1eb8119 1506 return (size > 8 || size == 0);
8cd5a4e0
RH
1507}
1508
22ef4e9b 1509/* Return an RTX to represent where a value with mode MODE will be returned
990dc016 1510 from a function. If the result is NULL_RTX, the argument is pushed. */
22ef4e9b 1511
ce236858 1512static rtx
d5cc9181 1513mn10300_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
ce236858 1514 const_tree type, bool named ATTRIBUTE_UNUSED)
22ef4e9b 1515{
d5cc9181 1516 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
990dc016 1517 rtx result = NULL_RTX;
f4a88680 1518 int size;
22ef4e9b
JL
1519
1520 /* We only support using 2 data registers as argument registers. */
1521 int nregs = 2;
1522
1523 /* Figure out the size of the object to be passed. */
1524 if (mode == BLKmode)
1525 size = int_size_in_bytes (type);
1526 else
1527 size = GET_MODE_SIZE (mode);
1528
22ef4e9b
JL
1529 cum->nbytes = (cum->nbytes + 3) & ~3;
1530
1531 /* Don't pass this arg via a register if all the argument registers
1532 are used up. */
1533 if (cum->nbytes > nregs * UNITS_PER_WORD)
990dc016 1534 return result;
22ef4e9b
JL
1535
1536 /* Don't pass this arg via a register if it would be split between
1537 registers and memory. */
1538 if (type == NULL_TREE
1539 && cum->nbytes + size > nregs * UNITS_PER_WORD)
990dc016 1540 return result;
22ef4e9b
JL
1541
1542 switch (cum->nbytes / UNITS_PER_WORD)
1543 {
1544 case 0:
990dc016 1545 result = gen_rtx_REG (mode, FIRST_ARGUMENT_REGNUM);
22ef4e9b
JL
1546 break;
1547 case 1:
990dc016 1548 result = gen_rtx_REG (mode, FIRST_ARGUMENT_REGNUM + 1);
22ef4e9b
JL
1549 break;
1550 default:
990dc016 1551 break;
22ef4e9b
JL
1552 }
1553
1554 return result;
1555}
1556
ce236858
NF
1557/* Update the data in CUM to advance over an argument
1558 of mode MODE and data type TYPE.
1559 (TYPE is null for libcalls where that information may not be available.) */
1560
1561static void
d5cc9181 1562mn10300_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
ce236858
NF
1563 const_tree type, bool named ATTRIBUTE_UNUSED)
1564{
d5cc9181
JR
1565 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1566
ce236858
NF
1567 cum->nbytes += (mode != BLKmode
1568 ? (GET_MODE_SIZE (mode) + 3) & ~3
1569 : (int_size_in_bytes (type) + 3) & ~3);
1570}
1571
78a52f11
RH
1572/* Return the number of bytes of registers to use for an argument passed
1573 partially in registers and partially in memory. */
22ef4e9b 1574
78a52f11 1575static int
d5cc9181 1576mn10300_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode,
78a52f11 1577 tree type, bool named ATTRIBUTE_UNUSED)
22ef4e9b 1578{
d5cc9181 1579 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
f4a88680 1580 int size;
22ef4e9b
JL
1581
1582 /* We only support using 2 data registers as argument registers. */
1583 int nregs = 2;
1584
1585 /* Figure out the size of the object to be passed. */
1586 if (mode == BLKmode)
1587 size = int_size_in_bytes (type);
1588 else
1589 size = GET_MODE_SIZE (mode);
1590
22ef4e9b
JL
1591 cum->nbytes = (cum->nbytes + 3) & ~3;
1592
1593 /* Don't pass this arg via a register if all the argument registers
1594 are used up. */
1595 if (cum->nbytes > nregs * UNITS_PER_WORD)
1596 return 0;
1597
1598 if (cum->nbytes + size <= nregs * UNITS_PER_WORD)
1599 return 0;
1600
1601 /* Don't pass this arg via a register if it would be split between
1602 registers and memory. */
1603 if (type == NULL_TREE
1604 && cum->nbytes + size > nregs * UNITS_PER_WORD)
1605 return 0;
1606
78a52f11 1607 return nregs * UNITS_PER_WORD - cum->nbytes;
22ef4e9b
JL
1608}
1609
b1eb8119
DD
1610/* Return the location of the function's value. This will be either
1611 $d0 for integer functions, $a0 for pointers, or a PARALLEL of both
1612 $d0 and $a0 if the -mreturn-pointer-on-do flag is set. Note that
1613 we only return the PARALLEL for outgoing values; we do not want
1614 callers relying on this extra copy. */
1615
34732b0a
AS
1616static rtx
1617mn10300_function_value (const_tree valtype,
1618 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1619 bool outgoing)
b1eb8119
DD
1620{
1621 rtx rv;
1622 enum machine_mode mode = TYPE_MODE (valtype);
1623
1624 if (! POINTER_TYPE_P (valtype))
1625 return gen_rtx_REG (mode, FIRST_DATA_REGNUM);
1626 else if (! TARGET_PTR_A0D0 || ! outgoing
e3b5732b 1627 || cfun->returns_struct)
b1eb8119
DD
1628 return gen_rtx_REG (mode, FIRST_ADDRESS_REGNUM);
1629
1630 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (2));
1631 XVECEXP (rv, 0, 0)
1632 = gen_rtx_EXPR_LIST (VOIDmode,
1633 gen_rtx_REG (mode, FIRST_ADDRESS_REGNUM),
1634 GEN_INT (0));
5abc5de9 1635
b1eb8119
DD
1636 XVECEXP (rv, 0, 1)
1637 = gen_rtx_EXPR_LIST (VOIDmode,
1638 gen_rtx_REG (mode, FIRST_DATA_REGNUM),
1639 GEN_INT (0));
1640 return rv;
1641}
1642
34732b0a
AS
1643/* Implements TARGET_LIBCALL_VALUE. */
1644
1645static rtx
1646mn10300_libcall_value (enum machine_mode mode,
1647 const_rtx fun ATTRIBUTE_UNUSED)
1648{
1649 return gen_rtx_REG (mode, FIRST_DATA_REGNUM);
1650}
1651
1652/* Implements FUNCTION_VALUE_REGNO_P. */
1653
1654bool
1655mn10300_function_value_regno_p (const unsigned int regno)
1656{
1657 return (regno == FIRST_DATA_REGNUM || regno == FIRST_ADDRESS_REGNUM);
1658}
1659
bad41521 1660/* Output an addition operation. */
4af476d7 1661
1943c2c1 1662const char *
bad41521 1663mn10300_output_add (rtx operands[3], bool need_flags)
22ef4e9b 1664{
bad41521
RH
1665 rtx dest, src1, src2;
1666 unsigned int dest_regnum, src1_regnum, src2_regnum;
1667 enum reg_class src1_class, src2_class, dest_class;
22ef4e9b 1668
bad41521
RH
1669 dest = operands[0];
1670 src1 = operands[1];
1671 src2 = operands[2];
22ef4e9b 1672
bad41521
RH
1673 dest_regnum = true_regnum (dest);
1674 src1_regnum = true_regnum (src1);
22ef4e9b 1675
bad41521
RH
1676 dest_class = REGNO_REG_CLASS (dest_regnum);
1677 src1_class = REGNO_REG_CLASS (src1_regnum);
22ef4e9b 1678
298362c8 1679 if (CONST_INT_P (src2))
bad41521
RH
1680 {
1681 gcc_assert (dest_regnum == src1_regnum);
22ef4e9b 1682
bad41521
RH
1683 if (src2 == const1_rtx && !need_flags)
1684 return "inc %0";
1685 if (INTVAL (src2) == 4 && !need_flags && dest_class != DATA_REGS)
1686 return "inc4 %0";
705ac34f 1687
bad41521
RH
1688 gcc_assert (!need_flags || dest_class != SP_REGS);
1689 return "add %2,%0";
1690 }
1691 else if (CONSTANT_P (src2))
1692 return "add %2,%0";
1693
1694 src2_regnum = true_regnum (src2);
1695 src2_class = REGNO_REG_CLASS (src2_regnum);
1696
1697 if (dest_regnum == src1_regnum)
1698 return "add %2,%0";
1699 if (dest_regnum == src2_regnum)
1700 return "add %1,%0";
1701
1702 /* The rest of the cases are reg = reg+reg. For AM33, we can implement
1703 this directly, as below, but when optimizing for space we can sometimes
1704 do better by using a mov+add. For MN103, we claimed that we could
1705 implement a three-operand add because the various move and add insns
1706 change sizes across register classes, and we can often do better than
1707 reload in choosing which operand to move. */
1708 if (TARGET_AM33 && optimize_insn_for_speed_p ())
1709 return "add %2,%1,%0";
1710
1711 /* Catch cases where no extended register was used. */
1712 if (src1_class != EXTENDED_REGS
1713 && src2_class != EXTENDED_REGS
1714 && dest_class != EXTENDED_REGS)
1715 {
1716 /* We have to copy one of the sources into the destination, then
1717 add the other source to the destination.
1718
1719 Carefully select which source to copy to the destination; a
1720 naive implementation will waste a byte when the source classes
1721 are different and the destination is an address register.
1722 Selecting the lowest cost register copy will optimize this
1723 sequence. */
1724 if (src1_class == dest_class)
1725 return "mov %1,%0\n\tadd %2,%0";
1726 else
1727 return "mov %2,%0\n\tadd %1,%0";
1728 }
705ac34f 1729
bad41521 1730 /* At least one register is an extended register. */
22ef4e9b 1731
bad41521
RH
1732 /* The three operand add instruction on the am33 is a win iff the
1733 output register is an extended register, or if both source
1734 registers are extended registers. */
1735 if (dest_class == EXTENDED_REGS || src1_class == src2_class)
1736 return "add %2,%1,%0";
1737
1738 /* It is better to copy one of the sources to the destination, then
1739 perform a 2 address add. The destination in this case must be
1740 an address or data register and one of the sources must be an
1741 extended register and the remaining source must not be an extended
1742 register.
1743
1744 The best code for this case is to copy the extended reg to the
1745 destination, then emit a two address add. */
1746 if (src1_class == EXTENDED_REGS)
1747 return "mov %1,%0\n\tadd %2,%0";
1748 else
1749 return "mov %2,%0\n\tadd %1,%0";
22ef4e9b 1750}
460f4b9d 1751
e9ad4573
JL
1752/* Return 1 if X contains a symbolic expression. We know these
1753 expressions will have one of a few well defined forms, so
1754 we need only check those forms. */
e7ab5593 1755
e9ad4573 1756int
e7ab5593
NC
1757mn10300_symbolic_operand (rtx op,
1758 enum machine_mode mode ATTRIBUTE_UNUSED)
e9ad4573
JL
1759{
1760 switch (GET_CODE (op))
1761 {
1762 case SYMBOL_REF:
1763 case LABEL_REF:
1764 return 1;
1765 case CONST:
1766 op = XEXP (op, 0);
1767 return ((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
1768 || GET_CODE (XEXP (op, 0)) == LABEL_REF)
f3f63737 1769 && CONST_INT_P (XEXP (op, 1)));
e9ad4573
JL
1770 default:
1771 return 0;
1772 }
1773}
1774
1775/* Try machine dependent ways of modifying an illegitimate address
1776 to be legitimate. If we find one, return the new valid address.
1777 This macro is used in only one place: `memory_address' in explow.c.
1778
1779 OLDX is the address as it was before break_out_memory_refs was called.
1780 In some cases it is useful to look at this to decide what needs to be done.
1781
e9ad4573
JL
1782 Normally it is always safe for this macro to do nothing. It exists to
1783 recognize opportunities to optimize the output.
1784
1785 But on a few ports with segmented architectures and indexed addressing
1786 (mn10300, hppa) it is used to rewrite certain problematical addresses. */
e7ab5593 1787
4af476d7 1788static rtx
506d7b68
PB
1789mn10300_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1790 enum machine_mode mode ATTRIBUTE_UNUSED)
e9ad4573 1791{
e7ab5593
NC
1792 if (flag_pic && ! mn10300_legitimate_pic_operand_p (x))
1793 x = mn10300_legitimize_pic_address (oldx, NULL_RTX);
d1776069 1794
e9ad4573
JL
1795 /* Uh-oh. We might have an address for x[n-100000]. This needs
1796 special handling to avoid creating an indexed memory address
1797 with x-100000 as the base. */
1798 if (GET_CODE (x) == PLUS
e7ab5593 1799 && mn10300_symbolic_operand (XEXP (x, 1), VOIDmode))
e9ad4573
JL
1800 {
1801 /* Ugly. We modify things here so that the address offset specified
1802 by the index expression is computed first, then added to x to form
1803 the entire address. */
1804
69bc71fa 1805 rtx regx1, regy1, regy2, y;
e9ad4573
JL
1806
1807 /* Strip off any CONST. */
1808 y = XEXP (x, 1);
1809 if (GET_CODE (y) == CONST)
1810 y = XEXP (y, 0);
1811
bf4219f0
JL
1812 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1813 {
1814 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1815 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1816 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1817 regx1 = force_reg (Pmode,
e7ab5593
NC
1818 gen_rtx_fmt_ee (GET_CODE (y), Pmode, regx1,
1819 regy2));
c5c76735 1820 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
bf4219f0 1821 }
e9ad4573 1822 }
371036e0 1823 return x;
e9ad4573 1824}
460ad325 1825
d1776069 1826/* Convert a non-PIC address in `orig' to a PIC address using @GOT or
8596d0a1 1827 @GOTOFF in `reg'. */
e7ab5593 1828
d1776069 1829rtx
e7ab5593 1830mn10300_legitimize_pic_address (rtx orig, rtx reg)
d1776069 1831{
53855940
RH
1832 rtx x;
1833
d1776069
AO
1834 if (GET_CODE (orig) == LABEL_REF
1835 || (GET_CODE (orig) == SYMBOL_REF
1836 && (CONSTANT_POOL_ADDRESS_P (orig)
1837 || ! MN10300_GLOBAL_P (orig))))
1838 {
53855940 1839 if (reg == NULL)
d1776069
AO
1840 reg = gen_reg_rtx (Pmode);
1841
53855940
RH
1842 x = gen_rtx_UNSPEC (SImode, gen_rtvec (1, orig), UNSPEC_GOTOFF);
1843 x = gen_rtx_CONST (SImode, x);
1844 emit_move_insn (reg, x);
1845
1846 x = emit_insn (gen_addsi3 (reg, reg, pic_offset_table_rtx));
d1776069
AO
1847 }
1848 else if (GET_CODE (orig) == SYMBOL_REF)
1849 {
53855940 1850 if (reg == NULL)
d1776069
AO
1851 reg = gen_reg_rtx (Pmode);
1852
53855940
RH
1853 x = gen_rtx_UNSPEC (SImode, gen_rtvec (1, orig), UNSPEC_GOT);
1854 x = gen_rtx_CONST (SImode, x);
1855 x = gen_rtx_PLUS (SImode, pic_offset_table_rtx, x);
1856 x = gen_const_mem (SImode, x);
1857
1858 x = emit_move_insn (reg, x);
d1776069 1859 }
53855940
RH
1860 else
1861 return orig;
1862
1863 set_unique_reg_note (x, REG_EQUAL, orig);
1864 return reg;
d1776069
AO
1865}
1866
1867/* Return zero if X references a SYMBOL_REF or LABEL_REF whose symbol
4375e090 1868 isn't protected by a PIC unspec; nonzero otherwise. */
e7ab5593 1869
d1776069 1870int
e7ab5593 1871mn10300_legitimate_pic_operand_p (rtx x)
d1776069 1872{
e7ab5593
NC
1873 const char *fmt;
1874 int i;
d1776069
AO
1875
1876 if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1877 return 0;
1878
1879 if (GET_CODE (x) == UNSPEC
1880 && (XINT (x, 1) == UNSPEC_PIC
1881 || XINT (x, 1) == UNSPEC_GOT
1882 || XINT (x, 1) == UNSPEC_GOTOFF
d4e2d7d2
RS
1883 || XINT (x, 1) == UNSPEC_PLT
1884 || XINT (x, 1) == UNSPEC_GOTSYM_OFF))
d1776069
AO
1885 return 1;
1886
d1776069
AO
1887 fmt = GET_RTX_FORMAT (GET_CODE (x));
1888 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
1889 {
1890 if (fmt[i] == 'E')
1891 {
4af476d7 1892 int j;
d1776069
AO
1893
1894 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
e7ab5593 1895 if (! mn10300_legitimate_pic_operand_p (XVECEXP (x, i, j)))
d1776069
AO
1896 return 0;
1897 }
e7ab5593
NC
1898 else if (fmt[i] == 'e'
1899 && ! mn10300_legitimate_pic_operand_p (XEXP (x, i)))
d1776069
AO
1900 return 0;
1901 }
1902
1903 return 1;
1904}
1905
e733134f 1906/* Return TRUE if the address X, taken from a (MEM:MODE X) rtx, is
c6c3dba9
PB
1907 legitimate, and FALSE otherwise.
1908
1909 On the mn10300, the value in the address register must be
1910 in the same memory space/segment as the effective address.
1911
1912 This is problematical for reload since it does not understand
1913 that base+index != index+base in a memory reference.
1914
1915 Note it is still possible to use reg+reg addressing modes,
1916 it's just much more difficult. For a discussion of a possible
1917 workaround and solution, see the comments in pa.c before the
1918 function record_unscaled_index_insn_codes. */
1919
4af476d7 1920static bool
c6c3dba9 1921mn10300_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
e733134f 1922{
36846b26
RH
1923 rtx base, index;
1924
1925 if (CONSTANT_ADDRESS_P (x))
1926 return !flag_pic || mn10300_legitimate_pic_operand_p (x);
e733134f
AO
1927
1928 if (RTX_OK_FOR_BASE_P (x, strict))
36846b26
RH
1929 return true;
1930
1931 if (TARGET_AM33 && (mode == SImode || mode == SFmode || mode == HImode))
1932 {
1933 if (GET_CODE (x) == POST_INC)
1934 return RTX_OK_FOR_BASE_P (XEXP (x, 0), strict);
1935 if (GET_CODE (x) == POST_MODIFY)
1936 return (RTX_OK_FOR_BASE_P (XEXP (x, 0), strict)
1937 && CONSTANT_ADDRESS_P (XEXP (x, 1)));
1938 }
1939
1940 if (GET_CODE (x) != PLUS)
1941 return false;
e733134f 1942
36846b26
RH
1943 base = XEXP (x, 0);
1944 index = XEXP (x, 1);
e733134f 1945
36846b26
RH
1946 if (!REG_P (base))
1947 return false;
1948 if (REG_P (index))
e733134f 1949 {
36846b26
RH
1950 /* ??? Without AM33 generalized (Ri,Rn) addressing, reg+reg
1951 addressing is hard to satisfy. */
1952 if (!TARGET_AM33)
1953 return false;
e733134f 1954
36846b26
RH
1955 return (REGNO_GENERAL_P (REGNO (base), strict)
1956 && REGNO_GENERAL_P (REGNO (index), strict));
1957 }
e733134f 1958
36846b26
RH
1959 if (!REGNO_STRICT_OK_FOR_BASE_P (REGNO (base), strict))
1960 return false;
e733134f 1961
36846b26
RH
1962 if (CONST_INT_P (index))
1963 return IN_RANGE (INTVAL (index), -1 - 0x7fffffff, 0x7fffffff);
1964
1965 if (CONSTANT_ADDRESS_P (index))
1966 return !flag_pic || mn10300_legitimate_pic_operand_p (index);
1967
1968 return false;
1969}
1970
1971bool
1972mn10300_regno_in_class_p (unsigned regno, int rclass, bool strict)
1973{
1974 if (regno >= FIRST_PSEUDO_REGISTER)
1975 {
1976 if (!strict)
1977 return true;
1978 if (!reg_renumber)
1979 return false;
1980 regno = reg_renumber[regno];
ba4ec0e0
NC
1981 if (regno == INVALID_REGNUM)
1982 return false;
36846b26
RH
1983 }
1984 return TEST_HARD_REG_BIT (reg_class_contents[rclass], regno);
1985}
1986
1987rtx
1988mn10300_legitimize_reload_address (rtx x,
1989 enum machine_mode mode ATTRIBUTE_UNUSED,
1990 int opnum, int type,
1991 int ind_levels ATTRIBUTE_UNUSED)
1992{
1993 bool any_change = false;
1994
1995 /* See above re disabling reg+reg addressing for MN103. */
1996 if (!TARGET_AM33)
1997 return NULL_RTX;
1998
1999 if (GET_CODE (x) != PLUS)
2000 return NULL_RTX;
2001
2002 if (XEXP (x, 0) == stack_pointer_rtx)
2003 {
2004 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
2005 GENERAL_REGS, GET_MODE (x), VOIDmode, 0, 0,
2006 opnum, (enum reload_type) type);
2007 any_change = true;
2008 }
2009 if (XEXP (x, 1) == stack_pointer_rtx)
2010 {
2011 push_reload (XEXP (x, 1), NULL_RTX, &XEXP (x, 1), NULL,
2012 GENERAL_REGS, GET_MODE (x), VOIDmode, 0, 0,
2013 opnum, (enum reload_type) type);
2014 any_change = true;
e733134f
AO
2015 }
2016
36846b26 2017 return any_change ? x : NULL_RTX;
e733134f
AO
2018}
2019
1a627b35 2020/* Implement TARGET_LEGITIMATE_CONSTANT_P. Returns TRUE if X is a valid
4af476d7
NC
2021 constant. Note that some "constants" aren't valid, such as TLS
2022 symbols and unconverted GOT-based references, so we eliminate
2023 those here. */
2024
1a627b35
RS
2025static bool
2026mn10300_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
4af476d7
NC
2027{
2028 switch (GET_CODE (x))
2029 {
2030 case CONST:
2031 x = XEXP (x, 0);
2032
2033 if (GET_CODE (x) == PLUS)
2034 {
e7ab5593 2035 if (! CONST_INT_P (XEXP (x, 1)))
4af476d7
NC
2036 return false;
2037 x = XEXP (x, 0);
2038 }
2039
2040 /* Only some unspecs are valid as "constants". */
2041 if (GET_CODE (x) == UNSPEC)
2042 {
4af476d7
NC
2043 switch (XINT (x, 1))
2044 {
4af476d7
NC
2045 case UNSPEC_PIC:
2046 case UNSPEC_GOT:
2047 case UNSPEC_GOTOFF:
2048 case UNSPEC_PLT:
2049 return true;
2050 default:
2051 return false;
2052 }
2053 }
2054
2055 /* We must have drilled down to a symbol. */
e7ab5593 2056 if (! mn10300_symbolic_operand (x, Pmode))
4af476d7
NC
2057 return false;
2058 break;
2059
2060 default:
2061 break;
2062 }
2063
2064 return true;
2065}
2066
126b1483
RH
2067/* Undo pic address legitimization for the benefit of debug info. */
2068
2069static rtx
2070mn10300_delegitimize_address (rtx orig_x)
2071{
2072 rtx x = orig_x, ret, addend = NULL;
2073 bool need_mem;
2074
2075 if (MEM_P (x))
2076 x = XEXP (x, 0);
2077 if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
2078 return orig_x;
2079
2080 if (XEXP (x, 0) == pic_offset_table_rtx)
2081 ;
2082 /* With the REG+REG addressing of AM33, var-tracking can re-assemble
2083 some odd-looking "addresses" that were never valid in the first place.
2084 We need to look harder to avoid warnings being emitted. */
2085 else if (GET_CODE (XEXP (x, 0)) == PLUS)
2086 {
2087 rtx x0 = XEXP (x, 0);
2088 rtx x00 = XEXP (x0, 0);
2089 rtx x01 = XEXP (x0, 1);
2090
2091 if (x00 == pic_offset_table_rtx)
2092 addend = x01;
2093 else if (x01 == pic_offset_table_rtx)
2094 addend = x00;
2095 else
2096 return orig_x;
2097
2098 }
2099 else
2100 return orig_x;
2101 x = XEXP (x, 1);
2102
2103 if (GET_CODE (x) != CONST)
2104 return orig_x;
2105 x = XEXP (x, 0);
2106 if (GET_CODE (x) != UNSPEC)
2107 return orig_x;
2108
2109 ret = XVECEXP (x, 0, 0);
2110 if (XINT (x, 1) == UNSPEC_GOTOFF)
2111 need_mem = false;
2112 else if (XINT (x, 1) == UNSPEC_GOT)
2113 need_mem = true;
2114 else
2115 return orig_x;
2116
2117 gcc_assert (GET_CODE (ret) == SYMBOL_REF);
2118 if (need_mem != MEM_P (orig_x))
2119 return orig_x;
2120 if (need_mem && addend)
2121 return orig_x;
2122 if (addend)
2123 ret = gen_rtx_PLUS (Pmode, addend, ret);
2124 return ret;
2125}
2126
72d6e3c5
RH
2127/* For addresses, costs are relative to "MOV (Rm),Rn". For AM33 this is
2128 the 3-byte fully general instruction; for MN103 this is the 2-byte form
2129 with an address register. */
2130
dcefdf67 2131static int
72d6e3c5 2132mn10300_address_cost (rtx x, bool speed)
460ad325 2133{
72d6e3c5
RH
2134 HOST_WIDE_INT i;
2135 rtx base, index;
2136
460ad325
AO
2137 switch (GET_CODE (x))
2138 {
72d6e3c5
RH
2139 case CONST:
2140 case SYMBOL_REF:
2141 case LABEL_REF:
2142 /* We assume all of these require a 32-bit constant, even though
2143 some symbol and label references can be relaxed. */
2144 return speed ? 1 : 4;
2145
460ad325 2146 case REG:
72d6e3c5
RH
2147 case SUBREG:
2148 case POST_INC:
2149 return 0;
2150
2151 case POST_MODIFY:
2152 /* Assume any symbolic offset is a 32-bit constant. */
2153 i = (CONST_INT_P (XEXP (x, 1)) ? INTVAL (XEXP (x, 1)) : 0x12345678);
2154 if (IN_RANGE (i, -128, 127))
2155 return speed ? 0 : 1;
2156 if (speed)
2157 return 1;
2158 if (IN_RANGE (i, -0x800000, 0x7fffff))
2159 return 3;
2160 return 4;
2161
2162 case PLUS:
2163 base = XEXP (x, 0);
2164 index = XEXP (x, 1);
2165 if (register_operand (index, SImode))
460ad325 2166 {
72d6e3c5
RH
2167 /* Attempt to minimize the number of registers in the address.
2168 This is similar to what other ports do. */
2169 if (register_operand (base, SImode))
2170 return 1;
460ad325 2171
72d6e3c5
RH
2172 base = XEXP (x, 1);
2173 index = XEXP (x, 0);
2174 }
460ad325 2175
72d6e3c5
RH
2176 /* Assume any symbolic offset is a 32-bit constant. */
2177 i = (CONST_INT_P (XEXP (x, 1)) ? INTVAL (XEXP (x, 1)) : 0x12345678);
2178 if (IN_RANGE (i, -128, 127))
2179 return speed ? 0 : 1;
2180 if (IN_RANGE (i, -32768, 32767))
2181 return speed ? 0 : 2;
2182 return speed ? 2 : 6;
460ad325 2183
72d6e3c5
RH
2184 default:
2185 return rtx_cost (x, MEM, speed);
2186 }
2187}
460ad325 2188
72d6e3c5 2189/* Implement the TARGET_REGISTER_MOVE_COST hook.
460ad325 2190
72d6e3c5
RH
2191 Recall that the base value of 2 is required by assumptions elsewhere
2192 in the body of the compiler, and that cost 2 is special-cased as an
2193 early exit from reload meaning no work is required. */
460ad325 2194
72d6e3c5
RH
2195static int
2196mn10300_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2197 reg_class_t ifrom, reg_class_t ito)
2198{
2199 enum reg_class from = (enum reg_class) ifrom;
2200 enum reg_class to = (enum reg_class) ito;
2201 enum reg_class scratch, test;
2202
2203 /* Simplify the following code by unifying the fp register classes. */
2204 if (to == FP_ACC_REGS)
2205 to = FP_REGS;
2206 if (from == FP_ACC_REGS)
2207 from = FP_REGS;
2208
2209 /* Diagnose invalid moves by costing them as two moves. */
2210
2211 scratch = NO_REGS;
2212 test = from;
2213 if (to == SP_REGS)
2214 scratch = (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
c25a21f5
RH
2215 else if (to == MDR_REGS)
2216 scratch = DATA_REGS;
72d6e3c5
RH
2217 else if (to == FP_REGS && to != from)
2218 scratch = GENERAL_REGS;
2219 else
2220 {
2221 test = to;
2222 if (from == SP_REGS)
2223 scratch = (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
c25a21f5
RH
2224 else if (from == MDR_REGS)
2225 scratch = DATA_REGS;
72d6e3c5
RH
2226 else if (from == FP_REGS && to != from)
2227 scratch = GENERAL_REGS;
2228 }
2229 if (scratch != NO_REGS && !reg_class_subset_p (test, scratch))
2230 return (mn10300_register_move_cost (VOIDmode, from, scratch)
2231 + mn10300_register_move_cost (VOIDmode, scratch, to));
460ad325 2232
72d6e3c5 2233 /* From here on, all we need consider are legal combinations. */
460ad325 2234
72d6e3c5
RH
2235 if (optimize_size)
2236 {
2237 /* The scale here is bytes * 2. */
460ad325 2238
72d6e3c5
RH
2239 if (from == to && (to == ADDRESS_REGS || to == DATA_REGS))
2240 return 2;
460ad325 2241
72d6e3c5
RH
2242 if (from == SP_REGS)
2243 return (to == ADDRESS_REGS ? 2 : 6);
2244
2245 /* For MN103, all remaining legal moves are two bytes. */
2246 if (TARGET_AM33)
2247 return 4;
2248
2249 if (to == SP_REGS)
2250 return (from == ADDRESS_REGS ? 4 : 6);
2251
2252 if ((from == ADDRESS_REGS || from == DATA_REGS)
2253 && (to == ADDRESS_REGS || to == DATA_REGS))
2254 return 4;
2255
2256 if (to == EXTENDED_REGS)
2257 return (to == from ? 6 : 4);
460ad325 2258
72d6e3c5
RH
2259 /* What's left are SP_REGS, FP_REGS, or combinations of the above. */
2260 return 6;
2261 }
2262 else
2263 {
2264 /* The scale here is cycles * 2. */
2265
2266 if (to == FP_REGS)
2267 return 8;
2268 if (from == FP_REGS)
2269 return 4;
2270
2271 /* All legal moves between integral registers are single cycle. */
2272 return 2;
460ad325
AO
2273 }
2274}
3c50106f 2275
72d6e3c5
RH
2276/* Implement the TARGET_MEMORY_MOVE_COST hook.
2277
2278 Given lack of the form of the address, this must be speed-relative,
2279 though we should never be less expensive than a size-relative register
2280 move cost above. This is not a problem. */
2281
dcefdf67 2282static int
72d6e3c5
RH
2283mn10300_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2284 reg_class_t iclass, bool in ATTRIBUTE_UNUSED)
dcefdf67 2285{
72d6e3c5
RH
2286 enum reg_class rclass = (enum reg_class) iclass;
2287
2288 if (rclass == FP_REGS)
2289 return 8;
2290 return 6;
dcefdf67
RH
2291}
2292
72d6e3c5
RH
2293/* Implement the TARGET_RTX_COSTS hook.
2294
2295 Speed-relative costs are relative to COSTS_N_INSNS, which is intended
2296 to represent cycles. Size-relative costs are in bytes. */
2297
3c50106f 2298static bool
72d6e3c5 2299mn10300_rtx_costs (rtx x, int code, int outer_code, int *ptotal, bool speed)
3c50106f 2300{
72d6e3c5
RH
2301 /* This value is used for SYMBOL_REF etc where we want to pretend
2302 we have a full 32-bit constant. */
2303 HOST_WIDE_INT i = 0x12345678;
2304 int total;
2305
3c50106f
RH
2306 switch (code)
2307 {
2308 case CONST_INT:
72d6e3c5
RH
2309 i = INTVAL (x);
2310 do_int_costs:
2311 if (speed)
2312 {
2313 if (outer_code == SET)
2314 {
2315 /* 16-bit integer loads have latency 1, 32-bit loads 2. */
2316 if (IN_RANGE (i, -32768, 32767))
2317 total = COSTS_N_INSNS (1);
2318 else
2319 total = COSTS_N_INSNS (2);
2320 }
2321 else
2322 {
2323 /* 16-bit integer operands don't affect latency;
2324 24-bit and 32-bit operands add a cycle. */
2325 if (IN_RANGE (i, -32768, 32767))
2326 total = 0;
2327 else
2328 total = COSTS_N_INSNS (1);
2329 }
2330 }
3c50106f 2331 else
72d6e3c5
RH
2332 {
2333 if (outer_code == SET)
2334 {
2335 if (i == 0)
2336 total = 1;
2337 else if (IN_RANGE (i, -128, 127))
2338 total = 2;
2339 else if (IN_RANGE (i, -32768, 32767))
2340 total = 3;
2341 else
2342 total = 6;
2343 }
2344 else
2345 {
2346 /* Reference here is ADD An,Dn, vs ADD imm,Dn. */
2347 if (IN_RANGE (i, -128, 127))
2348 total = 0;
2349 else if (IN_RANGE (i, -32768, 32767))
2350 total = 2;
2351 else if (TARGET_AM33 && IN_RANGE (i, -0x01000000, 0x00ffffff))
2352 total = 3;
2353 else
2354 total = 4;
2355 }
2356 }
2357 goto alldone;
3c50106f
RH
2358
2359 case CONST:
2360 case LABEL_REF:
2361 case SYMBOL_REF:
3c50106f 2362 case CONST_DOUBLE:
72d6e3c5
RH
2363 /* We assume all of these require a 32-bit constant, even though
2364 some symbol and label references can be relaxed. */
2365 goto do_int_costs;
f90b7a5a 2366
72d6e3c5
RH
2367 case UNSPEC:
2368 switch (XINT (x, 1))
2369 {
2370 case UNSPEC_PIC:
2371 case UNSPEC_GOT:
2372 case UNSPEC_GOTOFF:
2373 case UNSPEC_PLT:
2374 case UNSPEC_GOTSYM_OFF:
2375 /* The PIC unspecs also resolve to a 32-bit constant. */
2376 goto do_int_costs;
3c50106f 2377
72d6e3c5
RH
2378 default:
2379 /* Assume any non-listed unspec is some sort of arithmetic. */
2380 goto do_arith_costs;
2381 }
fe7496dd 2382
72d6e3c5
RH
2383 case PLUS:
2384 /* Notice the size difference of INC and INC4. */
2385 if (!speed && outer_code == SET && CONST_INT_P (XEXP (x, 1)))
2386 {
2387 i = INTVAL (XEXP (x, 1));
2388 if (i == 1 || i == 4)
2389 {
2390 total = 1 + rtx_cost (XEXP (x, 0), PLUS, speed);
2391 goto alldone;
2392 }
2393 }
2394 goto do_arith_costs;
2395
2396 case MINUS:
2397 case AND:
2398 case IOR:
2399 case XOR:
2400 case NOT:
2401 case NEG:
2402 case ZERO_EXTEND:
2403 case SIGN_EXTEND:
2404 case COMPARE:
2405 case BSWAP:
2406 case CLZ:
2407 do_arith_costs:
2408 total = (speed ? COSTS_N_INSNS (1) : 2);
2409 break;
fe7496dd 2410
72d6e3c5
RH
2411 case ASHIFT:
2412 /* Notice the size difference of ASL2 and variants. */
2413 if (!speed && CONST_INT_P (XEXP (x, 1)))
2414 switch (INTVAL (XEXP (x, 1)))
2415 {
2416 case 1:
2417 case 2:
2418 total = 1;
2419 goto alldone;
2420 case 3:
2421 case 4:
2422 total = 2;
2423 goto alldone;
2424 }
2425 /* FALLTHRU */
fe7496dd 2426
72d6e3c5
RH
2427 case ASHIFTRT:
2428 case LSHIFTRT:
2429 total = (speed ? COSTS_N_INSNS (1) : 3);
2430 goto alldone;
fe7496dd 2431
72d6e3c5
RH
2432 case MULT:
2433 total = (speed ? COSTS_N_INSNS (3) : 2);
fe7496dd 2434 break;
5abc5de9 2435
72d6e3c5
RH
2436 case DIV:
2437 case UDIV:
2438 case MOD:
2439 case UMOD:
2440 total = (speed ? COSTS_N_INSNS (39)
2441 /* Include space to load+retrieve MDR. */
2442 : code == MOD || code == UMOD ? 6 : 4);
fe7496dd 2443 break;
5abc5de9 2444
72d6e3c5
RH
2445 case MEM:
2446 total = mn10300_address_cost (XEXP (x, 0), speed);
2447 if (speed)
2448 total = COSTS_N_INSNS (2 + total);
2449 goto alldone;
2450
fe7496dd 2451 default:
72d6e3c5
RH
2452 /* Probably not implemented. Assume external call. */
2453 total = (speed ? COSTS_N_INSNS (10) : 7);
2454 break;
fe7496dd
AO
2455 }
2456
72d6e3c5
RH
2457 *ptotal = total;
2458 return false;
2459
2460 alldone:
2461 *ptotal = total;
2462 return true;
fe7496dd 2463}
72d6e3c5 2464
d1776069
AO
2465/* If using PIC, mark a SYMBOL_REF for a non-global symbol so that we
2466 may access it using GOTOFF instead of GOT. */
2467
2468static void
3bc5e4ef 2469mn10300_encode_section_info (tree decl, rtx rtl, int first ATTRIBUTE_UNUSED)
d1776069
AO
2470{
2471 rtx symbol;
2472
e7ab5593 2473 if (! MEM_P (rtl))
d1776069
AO
2474 return;
2475 symbol = XEXP (rtl, 0);
2476 if (GET_CODE (symbol) != SYMBOL_REF)
2477 return;
2478
2479 if (flag_pic)
2480 SYMBOL_REF_FLAG (symbol) = (*targetm.binds_local_p) (decl);
2481}
e6ff3083
AS
2482
2483/* Dispatch tables on the mn10300 are extremely expensive in terms of code
2484 and readonly data size. So we crank up the case threshold value to
2485 encourage a series of if/else comparisons to implement many small switch
2486 statements. In theory, this value could be increased much more if we
2487 were solely optimizing for space, but we keep it "reasonable" to avoid
2488 serious code efficiency lossage. */
2489
4af476d7
NC
2490static unsigned int
2491mn10300_case_values_threshold (void)
e6ff3083
AS
2492{
2493 return 6;
2494}
bdeb5f0c 2495
bdeb5f0c
RH
2496/* Worker function for TARGET_TRAMPOLINE_INIT. */
2497
2498static void
2499mn10300_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
2500{
d6a3e264
RH
2501 rtx mem, disp, fnaddr = XEXP (DECL_RTL (fndecl), 0);
2502
2503 /* This is a strict alignment target, which means that we play
2504 some games to make sure that the locations at which we need
2505 to store <chain> and <disp> wind up at aligned addresses.
2506
2507 0x28 0x00 add 0,d0
2508 0xfc 0xdd mov chain,a1
2509 <chain>
2510 0xf8 0xed 0x00 btst 0,d1
2511 0xdc jmp fnaddr
2512 <disp>
2513
2514 Note that the two extra insns are effectively nops; they
2515 clobber the flags but do not affect the contents of D0 or D1. */
bdeb5f0c 2516
d6a3e264
RH
2517 disp = expand_binop (SImode, sub_optab, fnaddr,
2518 plus_constant (XEXP (m_tramp, 0), 11),
2519 NULL_RTX, 1, OPTAB_DIRECT);
bdeb5f0c 2520
d6a3e264
RH
2521 mem = adjust_address (m_tramp, SImode, 0);
2522 emit_move_insn (mem, gen_int_mode (0xddfc0028, SImode));
2523 mem = adjust_address (m_tramp, SImode, 4);
bdeb5f0c 2524 emit_move_insn (mem, chain_value);
d6a3e264
RH
2525 mem = adjust_address (m_tramp, SImode, 8);
2526 emit_move_insn (mem, gen_int_mode (0xdc00edf8, SImode));
2527 mem = adjust_address (m_tramp, SImode, 12);
2528 emit_move_insn (mem, disp);
bdeb5f0c 2529}
990dc016
NC
2530
2531/* Output the assembler code for a C++ thunk function.
2532 THUNK_DECL is the declaration for the thunk function itself, FUNCTION
2533 is the decl for the target function. DELTA is an immediate constant
2534 offset to be added to the THIS parameter. If VCALL_OFFSET is nonzero
2535 the word at the adjusted address *(*THIS' + VCALL_OFFSET) should be
2536 additionally added to THIS. Finally jump to the entry point of
2537 FUNCTION. */
2538
2539static void
2540mn10300_asm_output_mi_thunk (FILE * file,
2541 tree thunk_fndecl ATTRIBUTE_UNUSED,
2542 HOST_WIDE_INT delta,
2543 HOST_WIDE_INT vcall_offset,
2544 tree function)
2545{
2546 const char * _this;
2547
2548 /* Get the register holding the THIS parameter. Handle the case
2549 where there is a hidden first argument for a returned structure. */
2550 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
2551 _this = reg_names [FIRST_ARGUMENT_REGNUM + 1];
2552 else
2553 _this = reg_names [FIRST_ARGUMENT_REGNUM];
2554
2555 fprintf (file, "\t%s Thunk Entry Point:\n", ASM_COMMENT_START);
2556
2557 if (delta)
2558 fprintf (file, "\tadd %d, %s\n", (int) delta, _this);
2559
2560 if (vcall_offset)
2561 {
2562 const char * scratch = reg_names [FIRST_ADDRESS_REGNUM + 1];
2563
2564 fprintf (file, "\tmov %s, %s\n", _this, scratch);
2565 fprintf (file, "\tmov (%s), %s\n", scratch, scratch);
2566 fprintf (file, "\tadd %d, %s\n", (int) vcall_offset, scratch);
2567 fprintf (file, "\tmov (%s), %s\n", scratch, scratch);
2568 fprintf (file, "\tadd %s, %s\n", scratch, _this);
2569 }
2570
2571 fputs ("\tjmp ", file);
2572 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
2573 putc ('\n', file);
2574}
2575
2576/* Return true if mn10300_output_mi_thunk would be able to output the
2577 assembler code for the thunk function specified by the arguments
2578 it is passed, and false otherwise. */
2579
2580static bool
2581mn10300_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
2582 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
2583 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
2584 const_tree function ATTRIBUTE_UNUSED)
2585{
2586 return true;
2587}
4af476d7
NC
2588
2589bool
2590mn10300_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
2591{
2592 if (REGNO_REG_CLASS (regno) == FP_REGS
2593 || REGNO_REG_CLASS (regno) == FP_ACC_REGS)
2594 /* Do not store integer values in FP registers. */
2595 return GET_MODE_CLASS (mode) == MODE_FLOAT && ((regno & 1) == 0);
2596
2597 if (((regno) & 1) == 0 || GET_MODE_SIZE (mode) == 4)
2598 return true;
2599
2600 if (REGNO_REG_CLASS (regno) == DATA_REGS
2601 || (TARGET_AM33 && REGNO_REG_CLASS (regno) == ADDRESS_REGS)
2602 || REGNO_REG_CLASS (regno) == EXTENDED_REGS)
2603 return GET_MODE_SIZE (mode) <= 4;
2604
2605 return false;
2606}
2607
2608bool
2609mn10300_modes_tieable (enum machine_mode mode1, enum machine_mode mode2)
2610{
2611 if (GET_MODE_CLASS (mode1) == MODE_FLOAT
2612 && GET_MODE_CLASS (mode2) != MODE_FLOAT)
2613 return false;
2614
2615 if (GET_MODE_CLASS (mode2) == MODE_FLOAT
2616 && GET_MODE_CLASS (mode1) != MODE_FLOAT)
2617 return false;
2618
2619 if (TARGET_AM33
2620 || mode1 == mode2
2621 || (GET_MODE_SIZE (mode1) <= 4 && GET_MODE_SIZE (mode2) <= 4))
2622 return true;
2623
2624 return false;
2625}
2626
bad41521
RH
2627static int
2628cc_flags_for_mode (enum machine_mode mode)
2629{
2630 switch (mode)
2631 {
2632 case CCmode:
2633 return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_C | CC_FLAG_V;
2634 case CCZNCmode:
2635 return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_C;
2636 case CCZNmode:
2637 return CC_FLAG_Z | CC_FLAG_N;
2638 case CC_FLOATmode:
2639 return -1;
2640 default:
2641 gcc_unreachable ();
2642 }
2643}
2644
2645static int
2646cc_flags_for_code (enum rtx_code code)
2647{
2648 switch (code)
2649 {
2650 case EQ: /* Z */
2651 case NE: /* ~Z */
2652 return CC_FLAG_Z;
2653
2654 case LT: /* N */
2655 case GE: /* ~N */
2656 return CC_FLAG_N;
2657 break;
2658
2659 case GT: /* ~(Z|(N^V)) */
2660 case LE: /* Z|(N^V) */
2661 return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_V;
2662
2663 case GEU: /* ~C */
2664 case LTU: /* C */
2665 return CC_FLAG_C;
2666
2667 case GTU: /* ~(C | Z) */
2668 case LEU: /* C | Z */
2669 return CC_FLAG_Z | CC_FLAG_C;
2670
2671 case ORDERED:
2672 case UNORDERED:
2673 case LTGT:
2674 case UNEQ:
2675 case UNGE:
2676 case UNGT:
2677 case UNLE:
2678 case UNLT:
2679 return -1;
2680
2681 default:
2682 gcc_unreachable ();
2683 }
2684}
2685
4af476d7 2686enum machine_mode
bad41521 2687mn10300_select_cc_mode (enum rtx_code code, rtx x, rtx y ATTRIBUTE_UNUSED)
4af476d7 2688{
bad41521
RH
2689 int req;
2690
2691 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2692 return CC_FLOATmode;
2693
2694 req = cc_flags_for_code (code);
2695
2696 if (req & CC_FLAG_V)
2697 return CCmode;
2698 if (req & CC_FLAG_C)
2699 return CCZNCmode;
2700 return CCZNmode;
4af476d7 2701}
f3f63737
NC
2702
2703static inline bool
2704is_load_insn (rtx insn)
2705{
2706 if (GET_CODE (PATTERN (insn)) != SET)
2707 return false;
2708
2709 return MEM_P (SET_SRC (PATTERN (insn)));
2710}
2711
2712static inline bool
2713is_store_insn (rtx insn)
2714{
2715 if (GET_CODE (PATTERN (insn)) != SET)
2716 return false;
2717
2718 return MEM_P (SET_DEST (PATTERN (insn)));
2719}
2720
2721/* Update scheduling costs for situations that cannot be
2722 described using the attributes and DFA machinery.
2723 DEP is the insn being scheduled.
2724 INSN is the previous insn.
2725 COST is the current cycle cost for DEP. */
2726
2727static int
2728mn10300_adjust_sched_cost (rtx insn, rtx link, rtx dep, int cost)
2729{
2730 int timings = get_attr_timings (insn);
2731
2732 if (!TARGET_AM33)
2733 return 1;
2734
2735 if (GET_CODE (insn) == PARALLEL)
2736 insn = XVECEXP (insn, 0, 0);
2737
2738 if (GET_CODE (dep) == PARALLEL)
2739 dep = XVECEXP (dep, 0, 0);
2740
2741 /* For the AM34 a load instruction that follows a
2742 store instruction incurs an extra cycle of delay. */
2743 if (mn10300_tune_cpu == PROCESSOR_AM34
2744 && is_load_insn (dep)
2745 && is_store_insn (insn))
2746 cost += 1;
2747
2748 /* For the AM34 a non-store, non-branch FPU insn that follows
2749 another FPU insn incurs a one cycle throughput increase. */
2750 else if (mn10300_tune_cpu == PROCESSOR_AM34
2751 && ! is_store_insn (insn)
2752 && ! JUMP_P (insn)
2753 && GET_CODE (PATTERN (dep)) == SET
2754 && GET_CODE (PATTERN (insn)) == SET
2755 && GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (dep)))) == MODE_FLOAT
2756 && GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (insn)))) == MODE_FLOAT)
2757 cost += 1;
2758
2759 /* Resolve the conflict described in section 1-7-4 of
2760 Chapter 3 of the MN103E Series Instruction Manual
2761 where it says:
2762
2763 "When the preceeding instruction is a CPU load or
2764 store instruction, a following FPU instruction
2765 cannot be executed until the CPU completes the
2766 latency period even though there are no register
2767 or flag dependencies between them." */
2768
2769 /* Only the AM33-2 (and later) CPUs have FPU instructions. */
2770 if (! TARGET_AM33_2)
2771 return cost;
2772
2773 /* If a data dependence already exists then the cost is correct. */
2774 if (REG_NOTE_KIND (link) == 0)
2775 return cost;
2776
2777 /* Check that the instruction about to scheduled is an FPU instruction. */
2778 if (GET_CODE (PATTERN (dep)) != SET)
2779 return cost;
2780
2781 if (GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (dep)))) != MODE_FLOAT)
2782 return cost;
2783
2784 /* Now check to see if the previous instruction is a load or store. */
2785 if (! is_load_insn (insn) && ! is_store_insn (insn))
2786 return cost;
2787
2788 /* XXX: Verify: The text of 1-7-4 implies that the restriction
2789 only applies when an INTEGER load/store preceeds an FPU
2790 instruction, but is this true ? For now we assume that it is. */
2791 if (GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (insn)))) != MODE_INT)
2792 return cost;
2793
2794 /* Extract the latency value from the timings attribute. */
2795 return timings < 100 ? (timings % 10) : (timings % 100);
2796}
5efd84c5
NF
2797
2798static void
2799mn10300_conditional_register_usage (void)
2800{
2801 unsigned int i;
2802
2803 if (!TARGET_AM33)
2804 {
2805 for (i = FIRST_EXTENDED_REGNUM;
2806 i <= LAST_EXTENDED_REGNUM; i++)
2807 fixed_regs[i] = call_used_regs[i] = 1;
2808 }
2809 if (!TARGET_AM33_2)
2810 {
2811 for (i = FIRST_FP_REGNUM;
2812 i <= LAST_FP_REGNUM; i++)
2813 fixed_regs[i] = call_used_regs[i] = 1;
2814 }
2815 if (flag_pic)
2816 fixed_regs[PIC_OFFSET_TABLE_REGNUM] =
2817 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
2818}
a49b692a
RH
2819
2820/* Worker function for TARGET_MD_ASM_CLOBBERS.
2821 We do this in the mn10300 backend to maintain source compatibility
2822 with the old cc0-based compiler. */
2823
2824static tree
2825mn10300_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
2826 tree inputs ATTRIBUTE_UNUSED,
2827 tree clobbers)
2828{
2829 clobbers = tree_cons (NULL_TREE, build_string (5, "EPSW"),
2830 clobbers);
2831 return clobbers;
2832}
4af476d7 2833\f
bad41521
RH
2834/* A helper function for splitting cbranch patterns after reload. */
2835
2836void
2837mn10300_split_cbranch (enum machine_mode cmp_mode, rtx cmp_op, rtx label_ref)
2838{
2839 rtx flags, x;
2840
2841 flags = gen_rtx_REG (cmp_mode, CC_REG);
2842 x = gen_rtx_COMPARE (cmp_mode, XEXP (cmp_op, 0), XEXP (cmp_op, 1));
2843 x = gen_rtx_SET (VOIDmode, flags, x);
2844 emit_insn (x);
2845
2846 x = gen_rtx_fmt_ee (GET_CODE (cmp_op), VOIDmode, flags, const0_rtx);
2847 x = gen_rtx_IF_THEN_ELSE (VOIDmode, x, label_ref, pc_rtx);
2848 x = gen_rtx_SET (VOIDmode, pc_rtx, x);
2849 emit_jump_insn (x);
2850}
2851
2852/* A helper function for matching parallels that set the flags. */
2853
2854bool
2855mn10300_match_ccmode (rtx insn, enum machine_mode cc_mode)
2856{
2857 rtx op1, flags;
2858 enum machine_mode flags_mode;
2859
2860 gcc_checking_assert (XVECLEN (PATTERN (insn), 0) == 2);
2861
2862 op1 = XVECEXP (PATTERN (insn), 0, 1);
2863 gcc_checking_assert (GET_CODE (SET_SRC (op1)) == COMPARE);
2864
2865 flags = SET_DEST (op1);
2866 flags_mode = GET_MODE (flags);
2867
2868 if (GET_MODE (SET_SRC (op1)) != flags_mode)
2869 return false;
2870 if (GET_MODE_CLASS (flags_mode) != MODE_CC)
2871 return false;
2872
2873 /* Ensure that the mode of FLAGS is compatible with CC_MODE. */
2874 if (cc_flags_for_mode (flags_mode) & ~cc_flags_for_mode (cc_mode))
2875 return false;
2876
2877 return true;
2878}
2879
2880int
2881mn10300_split_and_operand_count (rtx op)
2882{
2883 HOST_WIDE_INT val = INTVAL (op);
2884 int count;
2885
2886 if (val < 0)
2887 {
2888 /* High bit is set, look for bits clear at the bottom. */
2889 count = exact_log2 (-val);
2890 if (count < 0)
2891 return 0;
2892 /* This is only size win if we can use the asl2 insn. Otherwise we
2893 would be replacing 1 6-byte insn with 2 3-byte insns. */
2894 if (count > (optimize_insn_for_speed_p () ? 2 : 4))
2895 return 0;
2896 return -count;
2897 }
2898 else
2899 {
2900 /* High bit is clear, look for bits set at the bottom. */
2901 count = exact_log2 (val + 1);
2902 count = 32 - count;
2903 /* Again, this is only a size win with asl2. */
2904 if (count > (optimize_insn_for_speed_p () ? 2 : 4))
2905 return 0;
2906 return -count;
2907 }
2908}
2909\f
a45d420a
NC
2910struct liw_data
2911{
2912 enum attr_liw slot;
2913 enum attr_liw_op op;
2914 rtx dest;
2915 rtx src;
2916};
2917
2918/* Decide if the given insn is a candidate for LIW bundling. If it is then
2919 extract the operands and LIW attributes from the insn and use them to fill
2920 in the liw_data structure. Return true upon success or false if the insn
2921 cannot be bundled. */
298362c8
NC
2922
2923static bool
a45d420a 2924extract_bundle (rtx insn, struct liw_data * pdata)
298362c8 2925{
a45d420a 2926 bool allow_consts = true;
2cf320a8 2927 rtx p;
298362c8 2928
a45d420a
NC
2929 gcc_assert (pdata != NULL);
2930
2931 if (insn == NULL_RTX)
2932 return false;
2933 /* Make sure that we are dealing with a simple SET insn. */
298362c8 2934 p = single_set (insn);
a45d420a
NC
2935 if (p == NULL_RTX)
2936 return false;
2937
2938 /* Make sure that it could go into one of the LIW pipelines. */
2939 pdata->slot = get_attr_liw (insn);
2940 if (pdata->slot == LIW_BOTH)
2941 return false;
2942
2943 pdata->op = get_attr_liw_op (insn);
2944
a45d420a 2945 switch (pdata->op)
298362c8
NC
2946 {
2947 case LIW_OP_MOV:
a45d420a
NC
2948 pdata->dest = SET_DEST (p);
2949 pdata->src = SET_SRC (p);
298362c8
NC
2950 break;
2951 case LIW_OP_CMP:
a45d420a
NC
2952 pdata->dest = XEXP (SET_SRC (p), 0);
2953 pdata->src = XEXP (SET_SRC (p), 1);
298362c8
NC
2954 break;
2955 case LIW_OP_NONE:
2956 return false;
a45d420a
NC
2957 case LIW_OP_AND:
2958 case LIW_OP_OR:
2959 case LIW_OP_XOR:
2960 /* The AND, OR and XOR long instruction words only accept register arguments. */
2961 allow_consts = false;
2962 /* Fall through. */
298362c8 2963 default:
a45d420a
NC
2964 pdata->dest = SET_DEST (p);
2965 pdata->src = XEXP (SET_SRC (p), 1);
298362c8
NC
2966 break;
2967 }
2968
a45d420a
NC
2969 if (! REG_P (pdata->dest))
2970 return false;
2971
2972 if (REG_P (pdata->src))
2973 return true;
2974
2975 return allow_consts && satisfies_constraint_O (pdata->src);
298362c8
NC
2976}
2977
a45d420a
NC
2978/* Make sure that it is OK to execute LIW1 and LIW2 in parallel. GCC generated
2979 the instructions with the assumption that LIW1 would be executed before LIW2
2980 so we must check for overlaps between their sources and destinations. */
298362c8
NC
2981
2982static bool
a45d420a
NC
2983check_liw_constraints (struct liw_data * pliw1, struct liw_data * pliw2)
2984{
2985 /* Check for slot conflicts. */
2986 if (pliw2->slot == pliw1->slot && pliw1->slot != LIW_EITHER)
298362c8
NC
2987 return false;
2988
a45d420a
NC
2989 /* If either operation is a compare, then "dest" is really an input; the real
2990 destination is CC_REG. So these instructions need different checks. */
2991
2992 /* Changing "CMP ; OP" into "CMP | OP" is OK because the comparison will
2993 check its values prior to any changes made by OP. */
2994 if (pliw1->op == LIW_OP_CMP)
2995 {
2996 /* Two sequential comparisons means dead code, which ought to
2997 have been eliminated given that bundling only happens with
2998 optimization. We cannot bundle them in any case. */
2999 gcc_assert (pliw1->op != pliw2->op);
3000 return true;
3001 }
298362c8 3002
a45d420a
NC
3003 /* Changing "OP ; CMP" into "OP | CMP" does not work if the value being compared
3004 is the destination of OP, as the CMP will look at the old value, not the new
3005 one. */
3006 if (pliw2->op == LIW_OP_CMP)
298362c8 3007 {
a45d420a
NC
3008 if (REGNO (pliw2->dest) == REGNO (pliw1->dest))
3009 return false;
3010
3011 if (REG_P (pliw2->src))
3012 return REGNO (pliw2->src) != REGNO (pliw1->dest);
3013
3014 return true;
3015 }
3016
3017 /* Changing "OP1 ; OP2" into "OP1 | OP2" does not work if they both write to the
3018 same destination register. */
3019 if (REGNO (pliw2->dest) == REGNO (pliw1->dest))
3020 return false;
3021
3022 /* Changing "OP1 ; OP2" into "OP1 | OP2" generally does not work if the destination
3023 of OP1 is the source of OP2. The exception is when OP1 is a MOVE instruction when
3024 we can replace the source in OP2 with the source of OP1. */
3025 if (REG_P (pliw2->src) && REGNO (pliw2->src) == REGNO (pliw1->dest))
3026 {
3027 if (pliw1->op == LIW_OP_MOV && REG_P (pliw1->src))
298362c8 3028 {
a45d420a
NC
3029 if (! REG_P (pliw1->src)
3030 && (pliw2->op == LIW_OP_AND
3031 || pliw2->op == LIW_OP_OR
3032 || pliw2->op == LIW_OP_XOR))
3033 return false;
3034
3035 pliw2->src = pliw1->src;
298362c8
NC
3036 return true;
3037 }
3038 return false;
3039 }
3040
a45d420a 3041 /* Everything else is OK. */
298362c8
NC
3042 return true;
3043}
3044
298362c8
NC
3045/* Combine pairs of insns into LIW bundles. */
3046
3047static void
3048mn10300_bundle_liw (void)
3049{
3050 rtx r;
3051
3052 for (r = get_insns (); r != NULL_RTX; r = next_nonnote_nondebug_insn (r))
3053 {
a45d420a
NC
3054 rtx insn1, insn2;
3055 struct liw_data liw1, liw2;
298362c8
NC
3056
3057 insn1 = r;
a45d420a 3058 if (! extract_bundle (insn1, & liw1))
298362c8
NC
3059 continue;
3060
3061 insn2 = next_nonnote_nondebug_insn (insn1);
a45d420a 3062 if (! extract_bundle (insn2, & liw2))
298362c8
NC
3063 continue;
3064
a45d420a
NC
3065 /* Check for source/destination overlap. */
3066 if (! check_liw_constraints (& liw1, & liw2))
298362c8
NC
3067 continue;
3068
a45d420a 3069 if (liw1.slot == LIW_OP2 || liw2.slot == LIW_OP1)
298362c8 3070 {
a45d420a
NC
3071 struct liw_data temp;
3072
3073 temp = liw1;
298362c8 3074 liw1 = liw2;
a45d420a 3075 liw2 = temp;
298362c8
NC
3076 }
3077
298362c8
NC
3078 delete_insn (insn2);
3079
a45d420a
NC
3080 if (liw1.op == LIW_OP_CMP)
3081 insn2 = gen_cmp_liw (liw2.dest, liw2.src, liw1.dest, liw1.src,
3082 GEN_INT (liw2.op));
3083 else if (liw2.op == LIW_OP_CMP)
3084 insn2 = gen_liw_cmp (liw1.dest, liw1.src, liw2.dest, liw2.src,
3085 GEN_INT (liw1.op));
298362c8 3086 else
a45d420a
NC
3087 insn2 = gen_liw (liw1.dest, liw2.dest, liw1.src, liw2.src,
3088 GEN_INT (liw1.op), GEN_INT (liw2.op));
298362c8
NC
3089
3090 insn2 = emit_insn_after (insn2, insn1);
3091 delete_insn (insn1);
3092 r = insn2;
3093 }
3094}
3095
662c03f4
NC
3096#define DUMP(reason, insn) \
3097 do \
3098 { \
3099 if (dump_file) \
3100 { \
3101 fprintf (dump_file, reason "\n"); \
3102 if (insn != NULL_RTX) \
3103 print_rtl_single (dump_file, insn); \
3104 fprintf(dump_file, "\n"); \
3105 } \
3106 } \
3107 while (0)
3108
3109/* Replace the BRANCH insn with a Lcc insn that goes to LABEL.
3110 Insert a SETLB insn just before LABEL. */
3111
3112static void
3113mn10300_insert_setlb_lcc (rtx label, rtx branch)
3114{
3115 rtx lcc, comparison, cmp_reg;
3116
3117 if (LABEL_NUSES (label) > 1)
3118 {
3119 rtx insn;
3120
3121 /* This label is used both as an entry point to the loop
3122 and as a loop-back point for the loop. We need to separate
3123 these two functions so that the SETLB happens upon entry,
3124 but the loop-back does not go to the SETLB instruction. */
3125 DUMP ("Inserting SETLB insn after:", label);
3126 insn = emit_insn_after (gen_setlb (), label);
3127 label = gen_label_rtx ();
3128 emit_label_after (label, insn);
3129 DUMP ("Created new loop-back label:", label);
3130 }
3131 else
3132 {
3133 DUMP ("Inserting SETLB insn before:", label);
3134 emit_insn_before (gen_setlb (), label);
3135 }
3136
3137 comparison = XEXP (SET_SRC (PATTERN (branch)), 0);
3138 cmp_reg = XEXP (comparison, 0);
3139 gcc_assert (REG_P (cmp_reg));
3140
3141 /* If the comparison has not already been split out of the branch
3142 then do so now. */
3143 gcc_assert (REGNO (cmp_reg) == CC_REG);
3144
3145 if (GET_MODE (cmp_reg) == CC_FLOATmode)
3146 lcc = gen_FLcc (comparison, label);
3147 else
3148 lcc = gen_Lcc (comparison, label);
3149
3150 lcc = emit_jump_insn_before (lcc, branch);
3151 mark_jump_label (XVECEXP (PATTERN (lcc), 0, 0), lcc, 0);
3152 DUMP ("Replacing branch insn...", branch);
3153 DUMP ("... with Lcc insn:", lcc);
3154 delete_insn (branch);
3155}
3156
3157static bool
3158mn10300_block_contains_call (struct basic_block_def * block)
3159{
3160 rtx insn;
3161
3162 FOR_BB_INSNS (block, insn)
3163 if (CALL_P (insn))
3164 return true;
3165
3166 return false;
3167}
3168
3169static bool
3170mn10300_loop_contains_call_insn (loop_p loop)
3171{
3172 basic_block * bbs;
3173 bool result = false;
3174 unsigned int i;
3175
3176 bbs = get_loop_body (loop);
3177
3178 for (i = 0; i < loop->num_nodes; i++)
3179 if (mn10300_block_contains_call (bbs[i]))
3180 {
3181 result = true;
3182 break;
3183 }
3184
3185 free (bbs);
3186 return result;
3187}
3188
3189static void
3190mn10300_scan_for_setlb_lcc (void)
3191{
3192 struct loops loops;
3193 loop_iterator liter;
3194 loop_p loop;
3195
3196 DUMP ("Looking for loops that can use the SETLB insn", NULL_RTX);
3197
3198 df_analyze ();
3199 compute_bb_for_insn ();
3200
3201 /* Find the loops. */
3202 if (flow_loops_find (& loops) < 1)
3203 DUMP ("No loops found", NULL_RTX);
3204 current_loops = & loops;
3205
3206 /* FIXME: For now we only investigate innermost loops. In practice however
3207 if an inner loop is not suitable for use with the SETLB/Lcc insns, it may
3208 be the case that its parent loop is suitable. Thus we should check all
3209 loops, but work from the innermost outwards. */
3210 FOR_EACH_LOOP (liter, loop, LI_ONLY_INNERMOST)
3211 {
3212 const char * reason = NULL;
3213
3214 /* Check to see if we can modify this loop. If we cannot
3215 then set 'reason' to describe why it could not be done. */
3216 if (loop->latch == NULL)
3217 reason = "it contains multiple latches";
3218 else if (loop->header != loop->latch)
3219 /* FIXME: We could handle loops that span multiple blocks,
3220 but this requires a lot more work tracking down the branches
3221 that need altering, so for now keep things simple. */
3222 reason = "the loop spans multiple blocks";
3223 else if (mn10300_loop_contains_call_insn (loop))
3224 reason = "it contains CALL insns";
3225 else
3226 {
3227 rtx branch = BB_END (loop->latch);
3228
3229 gcc_assert (JUMP_P (branch));
3230 if (single_set (branch) == NULL_RTX || ! any_condjump_p (branch))
3231 /* We cannot optimize tablejumps and the like. */
3232 /* FIXME: We could handle unconditional jumps. */
3233 reason = "it is not a simple loop";
3234 else
3235 {
3236 rtx label;
3237
3238 if (dump_file)
3239 flow_loop_dump (loop, dump_file, NULL, 0);
3240
3241 label = BB_HEAD (loop->header);
3242 gcc_assert (LABEL_P (label));
3243
3244 mn10300_insert_setlb_lcc (label, branch);
3245 }
3246 }
3247
3248 if (dump_file && reason != NULL)
3249 fprintf (dump_file, "Loop starting with insn %d is not suitable because %s\n",
3250 INSN_UID (BB_HEAD (loop->header)),
3251 reason);
3252 }
3253
3254#if 0 /* FIXME: We should free the storage we allocated, but
3255 for some unknown reason this leads to seg-faults. */
3256 FOR_EACH_LOOP (liter, loop, 0)
3257 free_simple_loop_desc (loop);
3258
3259 flow_loops_free (current_loops);
3260#endif
3261
3262 current_loops = NULL;
3263
3264 df_finish_pass (false);
3265
3266 DUMP ("SETLB scan complete", NULL_RTX);
3267}
3268
298362c8
NC
3269static void
3270mn10300_reorg (void)
3271{
662c03f4
NC
3272 /* These are optimizations, so only run them if optimizing. */
3273 if (TARGET_AM33 && (optimize > 0 || optimize_size))
298362c8 3274 {
662c03f4
NC
3275 if (TARGET_ALLOW_SETLB)
3276 mn10300_scan_for_setlb_lcc ();
3277
298362c8
NC
3278 if (TARGET_ALLOW_LIW)
3279 mn10300_bundle_liw ();
3280 }
3281}
3282\f
e7ab5593
NC
3283/* Initialize the GCC target structure. */
3284
298362c8
NC
3285#undef TARGET_MACHINE_DEPENDENT_REORG
3286#define TARGET_MACHINE_DEPENDENT_REORG mn10300_reorg
3287
e7ab5593
NC
3288#undef TARGET_ASM_ALIGNED_HI_OP
3289#define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
3290
3291#undef TARGET_LEGITIMIZE_ADDRESS
3292#define TARGET_LEGITIMIZE_ADDRESS mn10300_legitimize_address
3293
72d6e3c5
RH
3294#undef TARGET_ADDRESS_COST
3295#define TARGET_ADDRESS_COST mn10300_address_cost
3296#undef TARGET_REGISTER_MOVE_COST
3297#define TARGET_REGISTER_MOVE_COST mn10300_register_move_cost
3298#undef TARGET_MEMORY_MOVE_COST
3299#define TARGET_MEMORY_MOVE_COST mn10300_memory_move_cost
e7ab5593
NC
3300#undef TARGET_RTX_COSTS
3301#define TARGET_RTX_COSTS mn10300_rtx_costs
e7ab5593
NC
3302
3303#undef TARGET_ASM_FILE_START
3304#define TARGET_ASM_FILE_START mn10300_file_start
3305#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
3306#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
3307
535bd17c
AS
3308#undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
3309#define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA mn10300_asm_output_addr_const_extra
3310
e7ab5593
NC
3311#undef TARGET_OPTION_OVERRIDE
3312#define TARGET_OPTION_OVERRIDE mn10300_option_override
3313
3314#undef TARGET_ENCODE_SECTION_INFO
3315#define TARGET_ENCODE_SECTION_INFO mn10300_encode_section_info
3316
3317#undef TARGET_PROMOTE_PROTOTYPES
3318#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
3319#undef TARGET_RETURN_IN_MEMORY
3320#define TARGET_RETURN_IN_MEMORY mn10300_return_in_memory
3321#undef TARGET_PASS_BY_REFERENCE
3322#define TARGET_PASS_BY_REFERENCE mn10300_pass_by_reference
3323#undef TARGET_CALLEE_COPIES
3324#define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
3325#undef TARGET_ARG_PARTIAL_BYTES
3326#define TARGET_ARG_PARTIAL_BYTES mn10300_arg_partial_bytes
ce236858
NF
3327#undef TARGET_FUNCTION_ARG
3328#define TARGET_FUNCTION_ARG mn10300_function_arg
3329#undef TARGET_FUNCTION_ARG_ADVANCE
3330#define TARGET_FUNCTION_ARG_ADVANCE mn10300_function_arg_advance
e7ab5593
NC
3331
3332#undef TARGET_EXPAND_BUILTIN_SAVEREGS
3333#define TARGET_EXPAND_BUILTIN_SAVEREGS mn10300_builtin_saveregs
3334#undef TARGET_EXPAND_BUILTIN_VA_START
3335#define TARGET_EXPAND_BUILTIN_VA_START mn10300_va_start
3336
3337#undef TARGET_CASE_VALUES_THRESHOLD
3338#define TARGET_CASE_VALUES_THRESHOLD mn10300_case_values_threshold
3339
3340#undef TARGET_LEGITIMATE_ADDRESS_P
3341#define TARGET_LEGITIMATE_ADDRESS_P mn10300_legitimate_address_p
126b1483
RH
3342#undef TARGET_DELEGITIMIZE_ADDRESS
3343#define TARGET_DELEGITIMIZE_ADDRESS mn10300_delegitimize_address
1a627b35
RS
3344#undef TARGET_LEGITIMATE_CONSTANT_P
3345#define TARGET_LEGITIMATE_CONSTANT_P mn10300_legitimate_constant_p
e7ab5593 3346
f2831cc9
AS
3347#undef TARGET_PREFERRED_RELOAD_CLASS
3348#define TARGET_PREFERRED_RELOAD_CLASS mn10300_preferred_reload_class
3349#undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
8b119bb6
RH
3350#define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS \
3351 mn10300_preferred_output_reload_class
3352#undef TARGET_SECONDARY_RELOAD
3353#define TARGET_SECONDARY_RELOAD mn10300_secondary_reload
f2831cc9 3354
e7ab5593
NC
3355#undef TARGET_TRAMPOLINE_INIT
3356#define TARGET_TRAMPOLINE_INIT mn10300_trampoline_init
3357
3358#undef TARGET_FUNCTION_VALUE
3359#define TARGET_FUNCTION_VALUE mn10300_function_value
3360#undef TARGET_LIBCALL_VALUE
3361#define TARGET_LIBCALL_VALUE mn10300_libcall_value
3362
3363#undef TARGET_ASM_OUTPUT_MI_THUNK
3364#define TARGET_ASM_OUTPUT_MI_THUNK mn10300_asm_output_mi_thunk
3365#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
3366#define TARGET_ASM_CAN_OUTPUT_MI_THUNK mn10300_can_output_mi_thunk
3367
f3f63737
NC
3368#undef TARGET_SCHED_ADJUST_COST
3369#define TARGET_SCHED_ADJUST_COST mn10300_adjust_sched_cost
3370
5efd84c5
NF
3371#undef TARGET_CONDITIONAL_REGISTER_USAGE
3372#define TARGET_CONDITIONAL_REGISTER_USAGE mn10300_conditional_register_usage
3373
a49b692a
RH
3374#undef TARGET_MD_ASM_CLOBBERS
3375#define TARGET_MD_ASM_CLOBBERS mn10300_md_asm_clobbers
3376
3843787f
RH
3377#undef TARGET_FLAGS_REGNUM
3378#define TARGET_FLAGS_REGNUM CC_REG
3379
e7ab5593 3380struct gcc_target targetm = TARGET_INITIALIZER;