]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/mn10300/mn10300.c
PR sanitizer/59600
[thirdparty/gcc.git] / gcc / config / mn10300 / mn10300.c
CommitLineData
29a404f9 1/* Subroutines for insn-output.c for Matsushita MN10300 series
3aea1f79 2 Copyright (C) 1996-2014 Free Software Foundation, Inc.
29a404f9 3 Contributed by Jeff Law (law@cygnus.com).
4
3626e955 5 This file is part of GCC.
29a404f9 6
3626e955 7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
29a404f9 11
3626e955 12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
29a404f9 16
3626e955 17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
29a404f9 20
29a404f9 21#include "config.h"
7014838c 22#include "system.h"
805e22b2 23#include "coretypes.h"
24#include "tm.h"
29a404f9 25#include "rtl.h"
4faf81b8 26#include "tree.h"
9ed99284 27#include "stor-layout.h"
28#include "varasm.h"
29#include "calls.h"
29a404f9 30#include "regs.h"
31#include "hard-reg-set.h"
29a404f9 32#include "insn-config.h"
33#include "conditions.h"
29a404f9 34#include "output.h"
35#include "insn-attr.h"
36#include "flags.h"
37#include "recog.h"
8b8be022 38#include "reload.h"
29a404f9 39#include "expr.h"
d8fc4d0b 40#include "optabs.h"
4faf81b8 41#include "function.h"
29a404f9 42#include "obstack.h"
0b205f4c 43#include "diagnostic-core.h"
59086782 44#include "tm_p.h"
e7076c21 45#include "tm-constrs.h"
a767736d 46#include "target.h"
47#include "target-def.h"
5574dbdd 48#include "df.h"
fba5dd52 49#include "opts.h"
f9b3e8f5 50#include "cfgloop.h"
b9ed1410 51#include "dumpfile.h"
29a404f9 52
1acdfc69 53/* This is used in the am33_2.0-linux-gnu port, in which global symbol
54 names are not prefixed by underscores, to tell whether to prefix a
55 label with a plus sign or not, so that the assembler can tell
56 symbol names from register names. */
57int mn10300_protect_label;
58
4879b320 59/* Selected processor type for tuning. */
60enum processor_type mn10300_tune_cpu = PROCESSOR_DEFAULT;
61
990679af 62#define CC_FLAG_Z 1
63#define CC_FLAG_N 2
64#define CC_FLAG_C 4
65#define CC_FLAG_V 8
66
67static int cc_flags_for_mode(enum machine_mode);
68static int cc_flags_for_code(enum rtx_code);
a767736d 69\f
4c834714 70/* Implement TARGET_OPTION_OVERRIDE. */
8c2c40c5 71
4c834714 72static void
73mn10300_option_override (void)
8c2c40c5 74{
75 if (TARGET_AM33)
76 target_flags &= ~MASK_MULT_BUG;
4879b320 77 else
78 {
79 /* Disable scheduling for the MN10300 as we do
80 not have timing information available for it. */
81 flag_schedule_insns = 0;
82 flag_schedule_insns_after_reload = 0;
731049b6 83
84 /* Force enable splitting of wide types, as otherwise it is trivial
85 to run out of registers. Indeed, this works so well that register
86 allocation problems are now more common *without* optimization,
87 when this flag is not enabled by default. */
88 flag_split_wide_types = 1;
4879b320 89 }
990679af 90
4879b320 91 if (mn10300_tune_string)
92 {
93 if (strcasecmp (mn10300_tune_string, "mn10300") == 0)
94 mn10300_tune_cpu = PROCESSOR_MN10300;
95 else if (strcasecmp (mn10300_tune_string, "am33") == 0)
96 mn10300_tune_cpu = PROCESSOR_AM33;
97 else if (strcasecmp (mn10300_tune_string, "am33-2") == 0)
98 mn10300_tune_cpu = PROCESSOR_AM33_2;
99 else if (strcasecmp (mn10300_tune_string, "am34") == 0)
100 mn10300_tune_cpu = PROCESSOR_AM34;
101 else
102 error ("-mtune= expects mn10300, am33, am33-2, or am34");
103 }
8c2c40c5 104}
105
92c473b8 106static void
3285410a 107mn10300_file_start (void)
29a404f9 108{
92c473b8 109 default_file_start ();
911517ac 110
b166356e 111 if (TARGET_AM33_2)
112 fprintf (asm_out_file, "\t.am33_2\n");
113 else if (TARGET_AM33)
92c473b8 114 fprintf (asm_out_file, "\t.am33\n");
29a404f9 115}
116\f
f9e46c25 117/* Note: This list must match the liw_op attribute in mn10300.md. */
118
119static const char *liw_op_names[] =
120{
121 "add", "cmp", "sub", "mov",
122 "and", "or", "xor",
123 "asr", "lsr", "asl",
124 "none", "max"
125};
126
29a404f9 127/* Print operand X using operand code CODE to assembly language output file
128 FILE. */
129
130void
3626e955 131mn10300_print_operand (FILE *file, rtx x, int code)
29a404f9 132{
133 switch (code)
134 {
f9e46c25 135 case 'W':
136 {
137 unsigned int liw_op = UINTVAL (x);
990679af 138
f9e46c25 139 gcc_assert (TARGET_ALLOW_LIW);
140 gcc_assert (liw_op < LIW_OP_MAX);
141 fputs (liw_op_names[liw_op], file);
29a404f9 142 break;
f9e46c25 143 }
990679af 144
f9e46c25 145 case 'b':
146 case 'B':
147 {
148 enum rtx_code cmp = GET_CODE (x);
149 enum machine_mode mode = GET_MODE (XEXP (x, 0));
150 const char *str;
151 int have_flags;
152
153 if (code == 'B')
154 cmp = reverse_condition (cmp);
155 have_flags = cc_flags_for_mode (mode);
fb16c776 156
f9e46c25 157 switch (cmp)
b166356e 158 {
f9e46c25 159 case NE:
160 str = "ne";
b166356e 161 break;
f9e46c25 162 case EQ:
163 str = "eq";
164 break;
165 case GE:
166 /* bge is smaller than bnc. */
167 str = (have_flags & CC_FLAG_V ? "ge" : "nc");
168 break;
169 case LT:
170 str = (have_flags & CC_FLAG_V ? "lt" : "ns");
171 break;
172 case GT:
173 str = "gt";
174 break;
175 case LE:
176 str = "le";
177 break;
178 case GEU:
179 str = "cc";
180 break;
181 case GTU:
182 str = "hi";
183 break;
184 case LEU:
185 str = "ls";
186 break;
187 case LTU:
188 str = "cs";
189 break;
190 case ORDERED:
191 str = "lge";
192 break;
193 case UNORDERED:
194 str = "uo";
195 break;
196 case LTGT:
197 str = "lg";
198 break;
199 case UNEQ:
200 str = "ue";
201 break;
202 case UNGE:
203 str = "uge";
204 break;
205 case UNGT:
206 str = "ug";
207 break;
208 case UNLE:
209 str = "ule";
210 break;
211 case UNLT:
212 str = "ul";
b166356e 213 break;
b166356e 214 default:
cf41bb03 215 gcc_unreachable ();
b166356e 216 }
f9e46c25 217
218 gcc_checking_assert ((cc_flags_for_code (cmp) & ~have_flags) == 0);
219 fputs (str, file);
220 }
221 break;
222
223 case 'C':
224 /* This is used for the operand to a call instruction;
225 if it's a REG, enclose it in parens, else output
226 the operand normally. */
227 if (REG_P (x))
228 {
229 fputc ('(', file);
230 mn10300_print_operand (file, x, 0);
231 fputc (')', file);
232 }
233 else
234 mn10300_print_operand (file, x, 0);
235 break;
236
237 case 'D':
238 switch (GET_CODE (x))
239 {
240 case MEM:
241 fputc ('(', file);
242 output_address (XEXP (x, 0));
243 fputc (')', file);
244 break;
245
246 case REG:
247 fprintf (file, "fd%d", REGNO (x) - 18);
248 break;
249
250 default:
251 gcc_unreachable ();
252 }
253 break;
b166356e 254
6ce19398 255 /* These are the least significant word in a 64bit value. */
f9e46c25 256 case 'L':
257 switch (GET_CODE (x))
258 {
259 case MEM:
260 fputc ('(', file);
261 output_address (XEXP (x, 0));
262 fputc (')', file);
263 break;
6ce19398 264
f9e46c25 265 case REG:
266 fprintf (file, "%s", reg_names[REGNO (x)]);
267 break;
6ce19398 268
f9e46c25 269 case SUBREG:
270 fprintf (file, "%s", reg_names[subreg_regno (x)]);
271 break;
6ce19398 272
f9e46c25 273 case CONST_DOUBLE:
274 {
275 long val[2];
276 REAL_VALUE_TYPE rv;
6ce19398 277
f9e46c25 278 switch (GET_MODE (x))
279 {
280 case DFmode:
281 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
282 REAL_VALUE_TO_TARGET_DOUBLE (rv, val);
283 fprintf (file, "0x%lx", val[0]);
284 break;;
285 case SFmode:
286 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
287 REAL_VALUE_TO_TARGET_SINGLE (rv, val[0]);
288 fprintf (file, "0x%lx", val[0]);
289 break;;
290 case VOIDmode:
291 case DImode:
292 mn10300_print_operand_address (file,
293 GEN_INT (CONST_DOUBLE_LOW (x)));
294 break;
295 default:
6ce19398 296 break;
297 }
f9e46c25 298 break;
299 }
6ce19398 300
f9e46c25 301 case CONST_INT:
302 {
303 rtx low, high;
304 split_double (x, &low, &high);
305 fprintf (file, "%ld", (long)INTVAL (low));
306 break;
964d057c 307 }
6ce19398 308
f9e46c25 309 default:
310 gcc_unreachable ();
311 }
312 break;
6ce19398 313
314 /* Similarly, but for the most significant word. */
f9e46c25 315 case 'H':
316 switch (GET_CODE (x))
317 {
318 case MEM:
319 fputc ('(', file);
320 x = adjust_address (x, SImode, 4);
321 output_address (XEXP (x, 0));
322 fputc (')', file);
323 break;
6ce19398 324
f9e46c25 325 case REG:
326 fprintf (file, "%s", reg_names[REGNO (x) + 1]);
327 break;
6ce19398 328
f9e46c25 329 case SUBREG:
330 fprintf (file, "%s", reg_names[subreg_regno (x) + 1]);
331 break;
6ce19398 332
f9e46c25 333 case CONST_DOUBLE:
334 {
335 long val[2];
336 REAL_VALUE_TYPE rv;
6ce19398 337
f9e46c25 338 switch (GET_MODE (x))
339 {
340 case DFmode:
341 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
342 REAL_VALUE_TO_TARGET_DOUBLE (rv, val);
343 fprintf (file, "0x%lx", val[1]);
344 break;;
345 case SFmode:
346 gcc_unreachable ();
347 case VOIDmode:
348 case DImode:
349 mn10300_print_operand_address (file,
350 GEN_INT (CONST_DOUBLE_HIGH (x)));
351 break;
352 default:
6ce19398 353 break;
354 }
f9e46c25 355 break;
356 }
6ce19398 357
f9e46c25 358 case CONST_INT:
359 {
360 rtx low, high;
361 split_double (x, &low, &high);
362 fprintf (file, "%ld", (long)INTVAL (high));
363 break;
6ce19398 364 }
6ce19398 365
f9e46c25 366 default:
367 gcc_unreachable ();
368 }
369 break;
6ce19398 370
f9e46c25 371 case 'A':
372 fputc ('(', file);
373 if (REG_P (XEXP (x, 0)))
374 output_address (gen_rtx_PLUS (SImode, XEXP (x, 0), const0_rtx));
375 else
376 output_address (XEXP (x, 0));
377 fputc (')', file);
378 break;
058f71f0 379
f9e46c25 380 case 'N':
381 gcc_assert (INTVAL (x) >= -128 && INTVAL (x) <= 255);
382 fprintf (file, "%d", (int)((~INTVAL (x)) & 0xff));
383 break;
384
385 case 'U':
386 gcc_assert (INTVAL (x) >= -128 && INTVAL (x) <= 255);
387 fprintf (file, "%d", (int)(INTVAL (x) & 0xff));
388 break;
167fa942 389
63e678f2 390 /* For shift counts. The hardware ignores the upper bits of
391 any immediate, but the assembler will flag an out of range
392 shift count as an error. So we mask off the high bits
393 of the immediate here. */
f9e46c25 394 case 'S':
395 if (CONST_INT_P (x))
396 {
397 fprintf (file, "%d", (int)(INTVAL (x) & 0x1f));
398 break;
399 }
400 /* FALL THROUGH */
63e678f2 401
f9e46c25 402 default:
403 switch (GET_CODE (x))
404 {
405 case MEM:
406 fputc ('(', file);
407 output_address (XEXP (x, 0));
408 fputc (')', file);
409 break;
29a404f9 410
f9e46c25 411 case PLUS:
412 output_address (x);
413 break;
6ce19398 414
f9e46c25 415 case REG:
416 fprintf (file, "%s", reg_names[REGNO (x)]);
417 break;
29a404f9 418
f9e46c25 419 case SUBREG:
420 fprintf (file, "%s", reg_names[subreg_regno (x)]);
421 break;
29a404f9 422
6ce19398 423 /* This will only be single precision.... */
f9e46c25 424 case CONST_DOUBLE:
425 {
426 unsigned long val;
427 REAL_VALUE_TYPE rv;
6ce19398 428
f9e46c25 429 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
430 REAL_VALUE_TO_TARGET_SINGLE (rv, val);
431 fprintf (file, "0x%lx", val);
29a404f9 432 break;
29a404f9 433 }
f9e46c25 434
435 case CONST_INT:
436 case SYMBOL_REF:
437 case CONST:
438 case LABEL_REF:
439 case CODE_LABEL:
440 case UNSPEC:
441 mn10300_print_operand_address (file, x);
442 break;
443 default:
444 gcc_unreachable ();
445 }
446 break;
447 }
29a404f9 448}
449
450/* Output assembly language output for the address ADDR to FILE. */
451
452void
3626e955 453mn10300_print_operand_address (FILE *file, rtx addr)
29a404f9 454{
455 switch (GET_CODE (addr))
456 {
911517ac 457 case POST_INC:
c8a596d6 458 mn10300_print_operand (file, XEXP (addr, 0), 0);
911517ac 459 fputc ('+', file);
460 break;
c8a596d6 461
462 case POST_MODIFY:
463 mn10300_print_operand (file, XEXP (addr, 0), 0);
464 fputc ('+', file);
465 fputc (',', file);
466 mn10300_print_operand (file, XEXP (addr, 1), 0);
467 break;
468
29a404f9 469 case REG:
3626e955 470 mn10300_print_operand (file, addr, 0);
29a404f9 471 break;
472 case PLUS:
473 {
c8a596d6 474 rtx base = XEXP (addr, 0);
475 rtx index = XEXP (addr, 1);
476
477 if (REG_P (index) && !REG_OK_FOR_INDEX_P (index))
478 {
479 rtx x = base;
480 base = index;
481 index = x;
482
483 gcc_assert (REG_P (index) && REG_OK_FOR_INDEX_P (index));
484 }
485 gcc_assert (REG_OK_FOR_BASE_P (base));
486
3626e955 487 mn10300_print_operand (file, index, 0);
29a404f9 488 fputc (',', file);
c8a596d6 489 mn10300_print_operand (file, base, 0);
29a404f9 490 break;
491 }
492 case SYMBOL_REF:
493 output_addr_const (file, addr);
494 break;
495 default:
496 output_addr_const (file, addr);
497 break;
498 }
499}
500
22680c28 501/* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA.
502
503 Used for PIC-specific UNSPECs. */
504
505static bool
506mn10300_asm_output_addr_const_extra (FILE *file, rtx x)
507{
508 if (GET_CODE (x) == UNSPEC)
509 {
510 switch (XINT (x, 1))
511 {
22680c28 512 case UNSPEC_PIC:
513 /* GLOBAL_OFFSET_TABLE or local symbols, no suffix. */
514 output_addr_const (file, XVECEXP (x, 0, 0));
515 break;
516 case UNSPEC_GOT:
517 output_addr_const (file, XVECEXP (x, 0, 0));
518 fputs ("@GOT", file);
519 break;
520 case UNSPEC_GOTOFF:
521 output_addr_const (file, XVECEXP (x, 0, 0));
522 fputs ("@GOTOFF", file);
523 break;
524 case UNSPEC_PLT:
525 output_addr_const (file, XVECEXP (x, 0, 0));
526 fputs ("@PLT", file);
527 break;
528 case UNSPEC_GOTSYM_OFF:
529 assemble_name (file, GOT_SYMBOL_NAME);
530 fputs ("-(", file);
531 output_addr_const (file, XVECEXP (x, 0, 0));
532 fputs ("-.)", file);
533 break;
534 default:
535 return false;
536 }
537 return true;
538 }
539 else
540 return false;
541}
542
b166356e 543/* Count the number of FP registers that have to be saved. */
544static int
3285410a 545fp_regs_to_save (void)
b166356e 546{
547 int i, n = 0;
548
549 if (! TARGET_AM33_2)
550 return 0;
551
552 for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
d37e81ec 553 if (df_regs_ever_live_p (i) && ! call_really_used_regs[i])
b166356e 554 ++n;
555
556 return n;
557}
558
4caa3669 559/* Print a set of registers in the format required by "movm" and "ret".
560 Register K is saved if bit K of MASK is set. The data and address
561 registers can be stored individually, but the extended registers cannot.
f2b32076 562 We assume that the mask already takes that into account. For instance,
09e5ce26 563 bits 14 to 17 must have the same value. */
4caa3669 564
565void
3285410a 566mn10300_print_reg_list (FILE *file, int mask)
4caa3669 567{
568 int need_comma;
569 int i;
570
571 need_comma = 0;
572 fputc ('[', file);
573
574 for (i = 0; i < FIRST_EXTENDED_REGNUM; i++)
575 if ((mask & (1 << i)) != 0)
576 {
577 if (need_comma)
578 fputc (',', file);
579 fputs (reg_names [i], file);
580 need_comma = 1;
581 }
582
583 if ((mask & 0x3c000) != 0)
584 {
cf41bb03 585 gcc_assert ((mask & 0x3c000) == 0x3c000);
4caa3669 586 if (need_comma)
587 fputc (',', file);
588 fputs ("exreg1", file);
589 need_comma = 1;
590 }
591
592 fputc (']', file);
593}
594
ad3e6900 595/* If the MDR register is never clobbered, we can use the RETF instruction
596 which takes the address from the MDR register. This is 3 cycles faster
597 than having to load the address from the stack. */
598
599bool
600mn10300_can_use_retf_insn (void)
601{
602 /* Don't bother if we're not optimizing. In this case we won't
603 have proper access to df_regs_ever_live_p. */
604 if (!optimize)
605 return false;
606
607 /* EH returns alter the saved return address; MDR is not current. */
608 if (crtl->calls_eh_return)
609 return false;
610
611 /* Obviously not if MDR is ever clobbered. */
612 if (df_regs_ever_live_p (MDR_REG))
613 return false;
614
615 /* ??? Careful not to use this during expand_epilogue etc. */
616 gcc_assert (!in_sequence_p ());
617 return leaf_function_p ();
618}
619
620bool
621mn10300_can_use_rets_insn (void)
6ce19398 622{
6f22c3b4 623 return !mn10300_initial_offset (ARG_POINTER_REGNUM, STACK_POINTER_REGNUM);
6ce19398 624}
625
4caa3669 626/* Returns the set of live, callee-saved registers as a bitmask. The
627 callee-saved extended registers cannot be stored individually, so
23ecf105 628 all of them will be included in the mask if any one of them is used.
d876ba6e 629 Also returns the number of bytes in the registers in the mask if
630 BYTES_SAVED is not NULL. */
4caa3669 631
d876ba6e 632unsigned int
633mn10300_get_live_callee_saved_regs (unsigned int * bytes_saved)
4caa3669 634{
635 int mask;
636 int i;
d876ba6e 637 unsigned int count;
4caa3669 638
d876ba6e 639 count = mask = 0;
b166356e 640 for (i = 0; i <= LAST_EXTENDED_REGNUM; i++)
d37e81ec 641 if (df_regs_ever_live_p (i) && ! call_really_used_regs[i])
d876ba6e 642 {
643 mask |= (1 << i);
644 ++ count;
645 }
646
4caa3669 647 if ((mask & 0x3c000) != 0)
d876ba6e 648 {
649 for (i = 0x04000; i < 0x40000; i <<= 1)
650 if ((mask & i) == 0)
651 ++ count;
652
653 mask |= 0x3c000;
654 }
655
656 if (bytes_saved)
657 * bytes_saved = count * UNITS_PER_WORD;
4caa3669 658
659 return mask;
660}
661
5f2853dd 662static rtx
663F (rtx r)
664{
665 RTX_FRAME_RELATED_P (r) = 1;
666 return r;
667}
668
4caa3669 669/* Generate an instruction that pushes several registers onto the stack.
670 Register K will be saved if bit K in MASK is set. The function does
671 nothing if MASK is zero.
672
673 To be compatible with the "movm" instruction, the lowest-numbered
674 register must be stored in the lowest slot. If MASK is the set
675 { R1,...,RN }, where R1...RN are ordered least first, the generated
676 instruction will have the form:
677
678 (parallel
679 (set (reg:SI 9) (plus:SI (reg:SI 9) (const_int -N*4)))
680 (set (mem:SI (plus:SI (reg:SI 9)
681 (const_int -1*4)))
682 (reg:SI RN))
683 ...
684 (set (mem:SI (plus:SI (reg:SI 9)
685 (const_int -N*4)))
686 (reg:SI R1))) */
687
32f9c04a 688static void
689mn10300_gen_multiple_store (unsigned int mask)
4caa3669 690{
32f9c04a 691 /* The order in which registers are stored, from SP-4 through SP-N*4. */
692 static const unsigned int store_order[8] = {
693 /* e2, e3: never saved */
694 FIRST_EXTENDED_REGNUM + 4,
695 FIRST_EXTENDED_REGNUM + 5,
696 FIRST_EXTENDED_REGNUM + 6,
697 FIRST_EXTENDED_REGNUM + 7,
698 /* e0, e1, mdrq, mcrh, mcrl, mcvf: never saved. */
699 FIRST_DATA_REGNUM + 2,
700 FIRST_DATA_REGNUM + 3,
701 FIRST_ADDRESS_REGNUM + 2,
702 FIRST_ADDRESS_REGNUM + 3,
703 /* d0, d1, a0, a1, mdr, lir, lar: never saved. */
704 };
705
706 rtx x, elts[9];
707 unsigned int i;
708 int count;
709
710 if (mask == 0)
711 return;
712
713 for (i = count = 0; i < ARRAY_SIZE(store_order); ++i)
4caa3669 714 {
32f9c04a 715 unsigned regno = store_order[i];
716
717 if (((mask >> regno) & 1) == 0)
718 continue;
4caa3669 719
32f9c04a 720 ++count;
29c05e22 721 x = plus_constant (Pmode, stack_pointer_rtx, count * -4);
32f9c04a 722 x = gen_frame_mem (SImode, x);
723 x = gen_rtx_SET (VOIDmode, x, gen_rtx_REG (SImode, regno));
724 elts[count] = F(x);
725
726 /* Remove the register from the mask so that... */
727 mask &= ~(1u << regno);
4caa3669 728 }
32f9c04a 729
730 /* ... we can make sure that we didn't try to use a register
731 not listed in the store order. */
732 gcc_assert (mask == 0);
733
734 /* Create the instruction that updates the stack pointer. */
29c05e22 735 x = plus_constant (Pmode, stack_pointer_rtx, count * -4);
32f9c04a 736 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
737 elts[0] = F(x);
738
739 /* We need one PARALLEL element to update the stack pointer and
740 an additional element for each register that is stored. */
741 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (count + 1, elts));
742 F (emit_insn (x));
4caa3669 743}
744
29a404f9 745void
3626e955 746mn10300_expand_prologue (void)
29a404f9 747{
6f22c3b4 748 HOST_WIDE_INT size = mn10300_frame_size ();
29a404f9 749
824b8aa5 750 if (flag_stack_usage_info)
751 current_function_static_stack_size = size;
752
09e5ce26 753 /* If we use any of the callee-saved registers, save them now. */
d876ba6e 754 mn10300_gen_multiple_store (mn10300_get_live_callee_saved_regs (NULL));
48cb86e3 755
b166356e 756 if (TARGET_AM33_2 && fp_regs_to_save ())
757 {
758 int num_regs_to_save = fp_regs_to_save (), i;
759 HOST_WIDE_INT xsize;
3626e955 760 enum
761 {
762 save_sp_merge,
763 save_sp_no_merge,
764 save_sp_partial_merge,
765 save_a0_merge,
766 save_a0_no_merge
767 } strategy;
b166356e 768 unsigned int strategy_size = (unsigned)-1, this_strategy_size;
769 rtx reg;
b166356e 770
771 /* We have several different strategies to save FP registers.
772 We can store them using SP offsets, which is beneficial if
773 there are just a few registers to save, or we can use `a0' in
774 post-increment mode (`a0' is the only call-clobbered address
775 register that is never used to pass information to a
776 function). Furthermore, if we don't need a frame pointer, we
777 can merge the two SP adds into a single one, but this isn't
778 always beneficial; sometimes we can just split the two adds
779 so that we don't exceed a 16-bit constant size. The code
780 below will select which strategy to use, so as to generate
781 smallest code. Ties are broken in favor or shorter sequences
782 (in terms of number of instructions). */
783
784#define SIZE_ADD_AX(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
785 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 2)
786#define SIZE_ADD_SP(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
787 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 3)
e14cac83 788
789/* We add 0 * (S) in two places to promote to the type of S,
790 so that all arms of the conditional have the same type. */
b166356e 791#define SIZE_FMOV_LIMIT(S,N,L,SIZE1,SIZE2,ELSE) \
e14cac83 792 (((S) >= (L)) ? 0 * (S) + (SIZE1) * (N) \
b166356e 793 : ((S) + 4 * (N) >= (L)) ? (((L) - (S)) / 4 * (SIZE2) \
794 + ((S) + 4 * (N) - (L)) / 4 * (SIZE1)) \
e14cac83 795 : 0 * (S) + (ELSE))
b166356e 796#define SIZE_FMOV_SP_(S,N) \
797 (SIZE_FMOV_LIMIT ((S), (N), (1 << 24), 7, 6, \
798 SIZE_FMOV_LIMIT ((S), (N), (1 << 8), 6, 4, \
799 (S) ? 4 * (N) : 3 + 4 * ((N) - 1))))
800#define SIZE_FMOV_SP(S,N) (SIZE_FMOV_SP_ ((unsigned HOST_WIDE_INT)(S), (N)))
801
802 /* Consider alternative save_sp_merge only if we don't need the
fa483857 803 frame pointer and size is nonzero. */
b166356e 804 if (! frame_pointer_needed && size)
805 {
806 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
807 this_strategy_size = SIZE_ADD_SP (-(size + 4 * num_regs_to_save));
808 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
809 this_strategy_size += SIZE_FMOV_SP (size, num_regs_to_save);
810
811 if (this_strategy_size < strategy_size)
812 {
813 strategy = save_sp_merge;
814 strategy_size = this_strategy_size;
815 }
816 }
817
818 /* Consider alternative save_sp_no_merge unconditionally. */
819 /* Insn: add -4 * num_regs_to_save, sp. */
820 this_strategy_size = SIZE_ADD_SP (-4 * num_regs_to_save);
821 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
822 this_strategy_size += SIZE_FMOV_SP (0, num_regs_to_save);
823 if (size)
824 {
825 /* Insn: add -size, sp. */
826 this_strategy_size += SIZE_ADD_SP (-size);
827 }
828
829 if (this_strategy_size < strategy_size)
830 {
831 strategy = save_sp_no_merge;
832 strategy_size = this_strategy_size;
833 }
834
835 /* Consider alternative save_sp_partial_merge only if we don't
836 need a frame pointer and size is reasonably large. */
837 if (! frame_pointer_needed && size + 4 * num_regs_to_save > 128)
838 {
839 /* Insn: add -128, sp. */
840 this_strategy_size = SIZE_ADD_SP (-128);
841 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
842 this_strategy_size += SIZE_FMOV_SP (128 - 4 * num_regs_to_save,
843 num_regs_to_save);
844 if (size)
845 {
846 /* Insn: add 128-size, sp. */
847 this_strategy_size += SIZE_ADD_SP (128 - size);
848 }
849
850 if (this_strategy_size < strategy_size)
851 {
852 strategy = save_sp_partial_merge;
853 strategy_size = this_strategy_size;
854 }
855 }
856
857 /* Consider alternative save_a0_merge only if we don't need a
fa483857 858 frame pointer, size is nonzero and the user hasn't
b166356e 859 changed the calling conventions of a0. */
860 if (! frame_pointer_needed && size
d37e81ec 861 && call_really_used_regs [FIRST_ADDRESS_REGNUM]
b166356e 862 && ! fixed_regs[FIRST_ADDRESS_REGNUM])
863 {
864 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
865 this_strategy_size = SIZE_ADD_SP (-(size + 4 * num_regs_to_save));
866 /* Insn: mov sp, a0. */
867 this_strategy_size++;
868 if (size)
869 {
870 /* Insn: add size, a0. */
871 this_strategy_size += SIZE_ADD_AX (size);
872 }
873 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
874 this_strategy_size += 3 * num_regs_to_save;
875
876 if (this_strategy_size < strategy_size)
877 {
878 strategy = save_a0_merge;
879 strategy_size = this_strategy_size;
880 }
881 }
882
883 /* Consider alternative save_a0_no_merge if the user hasn't
09e5ce26 884 changed the calling conventions of a0. */
d37e81ec 885 if (call_really_used_regs [FIRST_ADDRESS_REGNUM]
b166356e 886 && ! fixed_regs[FIRST_ADDRESS_REGNUM])
887 {
888 /* Insn: add -4 * num_regs_to_save, sp. */
889 this_strategy_size = SIZE_ADD_SP (-4 * num_regs_to_save);
890 /* Insn: mov sp, a0. */
891 this_strategy_size++;
892 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
893 this_strategy_size += 3 * num_regs_to_save;
894 if (size)
895 {
896 /* Insn: add -size, sp. */
897 this_strategy_size += SIZE_ADD_SP (-size);
898 }
899
900 if (this_strategy_size < strategy_size)
901 {
902 strategy = save_a0_no_merge;
903 strategy_size = this_strategy_size;
904 }
905 }
906
907 /* Emit the initial SP add, common to all strategies. */
908 switch (strategy)
909 {
910 case save_sp_no_merge:
911 case save_a0_no_merge:
5f2853dd 912 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
913 stack_pointer_rtx,
914 GEN_INT (-4 * num_regs_to_save))));
b166356e 915 xsize = 0;
916 break;
917
918 case save_sp_partial_merge:
5f2853dd 919 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
920 stack_pointer_rtx,
921 GEN_INT (-128))));
b166356e 922 xsize = 128 - 4 * num_regs_to_save;
923 size -= xsize;
924 break;
925
926 case save_sp_merge:
927 case save_a0_merge:
5f2853dd 928 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
929 stack_pointer_rtx,
930 GEN_INT (-(size + 4 * num_regs_to_save)))));
b166356e 931 /* We'll have to adjust FP register saves according to the
09e5ce26 932 frame size. */
b166356e 933 xsize = size;
934 /* Since we've already created the stack frame, don't do it
09e5ce26 935 again at the end of the function. */
b166356e 936 size = 0;
937 break;
938
939 default:
cf41bb03 940 gcc_unreachable ();
b166356e 941 }
fb16c776 942
b166356e 943 /* Now prepare register a0, if we have decided to use it. */
944 switch (strategy)
945 {
946 case save_sp_merge:
947 case save_sp_no_merge:
948 case save_sp_partial_merge:
949 reg = 0;
950 break;
951
952 case save_a0_merge:
953 case save_a0_no_merge:
954 reg = gen_rtx_REG (SImode, FIRST_ADDRESS_REGNUM);
5f2853dd 955 F (emit_insn (gen_movsi (reg, stack_pointer_rtx)));
b166356e 956 if (xsize)
5f2853dd 957 F (emit_insn (gen_addsi3 (reg, reg, GEN_INT (xsize))));
b166356e 958 reg = gen_rtx_POST_INC (SImode, reg);
959 break;
fb16c776 960
b166356e 961 default:
cf41bb03 962 gcc_unreachable ();
b166356e 963 }
fb16c776 964
b166356e 965 /* Now actually save the FP registers. */
966 for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
d37e81ec 967 if (df_regs_ever_live_p (i) && ! call_really_used_regs [i])
b166356e 968 {
969 rtx addr;
970
971 if (reg)
972 addr = reg;
973 else
974 {
975 /* If we aren't using `a0', use an SP offset. */
976 if (xsize)
977 {
978 addr = gen_rtx_PLUS (SImode,
979 stack_pointer_rtx,
980 GEN_INT (xsize));
981 }
982 else
983 addr = stack_pointer_rtx;
fb16c776 984
b166356e 985 xsize += 4;
986 }
987
5f2853dd 988 F (emit_insn (gen_movsf (gen_rtx_MEM (SFmode, addr),
989 gen_rtx_REG (SFmode, i))));
b166356e 990 }
991 }
992
48cb86e3 993 /* Now put the frame pointer into the frame pointer register. */
29a404f9 994 if (frame_pointer_needed)
5f2853dd 995 F (emit_move_insn (frame_pointer_rtx, stack_pointer_rtx));
29a404f9 996
48cb86e3 997 /* Allocate stack for this frame. */
29a404f9 998 if (size)
5f2853dd 999 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
1000 stack_pointer_rtx,
1001 GEN_INT (-size))));
1002
3072d30e 1003 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
6f22c3b4 1004 emit_insn (gen_load_pic ());
29a404f9 1005}
1006
1007void
3626e955 1008mn10300_expand_epilogue (void)
29a404f9 1009{
6f22c3b4 1010 HOST_WIDE_INT size = mn10300_frame_size ();
d876ba6e 1011 unsigned int reg_save_bytes;
1012
1013 mn10300_get_live_callee_saved_regs (& reg_save_bytes);
1014
b166356e 1015 if (TARGET_AM33_2 && fp_regs_to_save ())
1016 {
1017 int num_regs_to_save = fp_regs_to_save (), i;
1018 rtx reg = 0;
1019
1020 /* We have several options to restore FP registers. We could
1021 load them from SP offsets, but, if there are enough FP
1022 registers to restore, we win if we use a post-increment
1023 addressing mode. */
1024
1025 /* If we have a frame pointer, it's the best option, because we
1026 already know it has the value we want. */
1027 if (frame_pointer_needed)
1028 reg = gen_rtx_REG (SImode, FRAME_POINTER_REGNUM);
1029 /* Otherwise, we may use `a1', since it's call-clobbered and
1030 it's never used for return values. But only do so if it's
1031 smaller than using SP offsets. */
1032 else
1033 {
1034 enum { restore_sp_post_adjust,
1035 restore_sp_pre_adjust,
1036 restore_sp_partial_adjust,
1037 restore_a1 } strategy;
1038 unsigned int this_strategy_size, strategy_size = (unsigned)-1;
1039
1040 /* Consider using sp offsets before adjusting sp. */
1041 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1042 this_strategy_size = SIZE_FMOV_SP (size, num_regs_to_save);
1043 /* If size is too large, we'll have to adjust SP with an
1044 add. */
ad3e6900 1045 if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
b166356e 1046 {
1047 /* Insn: add size + 4 * num_regs_to_save, sp. */
1048 this_strategy_size += SIZE_ADD_SP (size + 4 * num_regs_to_save);
1049 }
1050 /* If we don't have to restore any non-FP registers,
1051 we'll be able to save one byte by using rets. */
ad3e6900 1052 if (! reg_save_bytes)
b166356e 1053 this_strategy_size--;
1054
1055 if (this_strategy_size < strategy_size)
1056 {
1057 strategy = restore_sp_post_adjust;
1058 strategy_size = this_strategy_size;
1059 }
1060
1061 /* Consider using sp offsets after adjusting sp. */
1062 /* Insn: add size, sp. */
1063 this_strategy_size = SIZE_ADD_SP (size);
1064 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1065 this_strategy_size += SIZE_FMOV_SP (0, num_regs_to_save);
1066 /* We're going to use ret to release the FP registers
09e5ce26 1067 save area, so, no savings. */
b166356e 1068
1069 if (this_strategy_size < strategy_size)
1070 {
1071 strategy = restore_sp_pre_adjust;
1072 strategy_size = this_strategy_size;
1073 }
1074
1075 /* Consider using sp offsets after partially adjusting sp.
1076 When size is close to 32Kb, we may be able to adjust SP
1077 with an imm16 add instruction while still using fmov
1078 (d8,sp). */
ad3e6900 1079 if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
b166356e 1080 {
1081 /* Insn: add size + 4 * num_regs_to_save
ad3e6900 1082 + reg_save_bytes - 252,sp. */
b166356e 1083 this_strategy_size = SIZE_ADD_SP (size + 4 * num_regs_to_save
df70f973 1084 + (int) reg_save_bytes - 252);
b166356e 1085 /* Insn: fmov (##,sp),fs#, fo each fs# to be restored. */
ad3e6900 1086 this_strategy_size += SIZE_FMOV_SP (252 - reg_save_bytes
b166356e 1087 - 4 * num_regs_to_save,
1088 num_regs_to_save);
1089 /* We're going to use ret to release the FP registers
09e5ce26 1090 save area, so, no savings. */
b166356e 1091
1092 if (this_strategy_size < strategy_size)
1093 {
1094 strategy = restore_sp_partial_adjust;
1095 strategy_size = this_strategy_size;
1096 }
1097 }
1098
1099 /* Consider using a1 in post-increment mode, as long as the
1100 user hasn't changed the calling conventions of a1. */
d37e81ec 1101 if (call_really_used_regs [FIRST_ADDRESS_REGNUM + 1]
b166356e 1102 && ! fixed_regs[FIRST_ADDRESS_REGNUM+1])
1103 {
1104 /* Insn: mov sp,a1. */
1105 this_strategy_size = 1;
1106 if (size)
1107 {
1108 /* Insn: add size,a1. */
1109 this_strategy_size += SIZE_ADD_AX (size);
1110 }
1111 /* Insn: fmov (a1+),fs#, for each fs# to be restored. */
1112 this_strategy_size += 3 * num_regs_to_save;
1113 /* If size is large enough, we may be able to save a
1114 couple of bytes. */
ad3e6900 1115 if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
b166356e 1116 {
1117 /* Insn: mov a1,sp. */
1118 this_strategy_size += 2;
1119 }
1120 /* If we don't have to restore any non-FP registers,
1121 we'll be able to save one byte by using rets. */
ad3e6900 1122 if (! reg_save_bytes)
b166356e 1123 this_strategy_size--;
1124
1125 if (this_strategy_size < strategy_size)
1126 {
1127 strategy = restore_a1;
1128 strategy_size = this_strategy_size;
1129 }
1130 }
1131
1132 switch (strategy)
1133 {
1134 case restore_sp_post_adjust:
1135 break;
1136
1137 case restore_sp_pre_adjust:
1138 emit_insn (gen_addsi3 (stack_pointer_rtx,
1139 stack_pointer_rtx,
1140 GEN_INT (size)));
1141 size = 0;
1142 break;
1143
1144 case restore_sp_partial_adjust:
1145 emit_insn (gen_addsi3 (stack_pointer_rtx,
1146 stack_pointer_rtx,
1147 GEN_INT (size + 4 * num_regs_to_save
ad3e6900 1148 + reg_save_bytes - 252)));
1149 size = 252 - reg_save_bytes - 4 * num_regs_to_save;
b166356e 1150 break;
fb16c776 1151
b166356e 1152 case restore_a1:
1153 reg = gen_rtx_REG (SImode, FIRST_ADDRESS_REGNUM + 1);
1154 emit_insn (gen_movsi (reg, stack_pointer_rtx));
1155 if (size)
1156 emit_insn (gen_addsi3 (reg, reg, GEN_INT (size)));
1157 break;
1158
1159 default:
cf41bb03 1160 gcc_unreachable ();
b166356e 1161 }
1162 }
1163
1164 /* Adjust the selected register, if any, for post-increment. */
1165 if (reg)
1166 reg = gen_rtx_POST_INC (SImode, reg);
1167
1168 for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
d37e81ec 1169 if (df_regs_ever_live_p (i) && ! call_really_used_regs [i])
b166356e 1170 {
1171 rtx addr;
fb16c776 1172
b166356e 1173 if (reg)
1174 addr = reg;
1175 else if (size)
1176 {
1177 /* If we aren't using a post-increment register, use an
09e5ce26 1178 SP offset. */
b166356e 1179 addr = gen_rtx_PLUS (SImode,
1180 stack_pointer_rtx,
1181 GEN_INT (size));
1182 }
1183 else
1184 addr = stack_pointer_rtx;
1185
1186 size += 4;
1187
5f2853dd 1188 emit_insn (gen_movsf (gen_rtx_REG (SFmode, i),
1189 gen_rtx_MEM (SFmode, addr)));
b166356e 1190 }
1191
1192 /* If we were using the restore_a1 strategy and the number of
1193 bytes to be released won't fit in the `ret' byte, copy `a1'
1194 to `sp', to avoid having to use `add' to adjust it. */
ad3e6900 1195 if (! frame_pointer_needed && reg && size + reg_save_bytes > 255)
b166356e 1196 {
1197 emit_move_insn (stack_pointer_rtx, XEXP (reg, 0));
1198 size = 0;
1199 }
1200 }
1201
461cabcc 1202 /* Maybe cut back the stack, except for the register save area.
1203
1204 If the frame pointer exists, then use the frame pointer to
1205 cut back the stack.
1206
1207 If the stack size + register save area is more than 255 bytes,
1208 then the stack must be cut back here since the size + register
fb16c776 1209 save size is too big for a ret/retf instruction.
461cabcc 1210
1211 Else leave it alone, it will be cut back as part of the
1212 ret/retf instruction, or there wasn't any stack to begin with.
1213
dfd1079d 1214 Under no circumstances should the register save area be
461cabcc 1215 deallocated here, that would leave a window where an interrupt
1216 could occur and trash the register save area. */
29a404f9 1217 if (frame_pointer_needed)
1218 {
29a404f9 1219 emit_move_insn (stack_pointer_rtx, frame_pointer_rtx);
b21218d6 1220 size = 0;
1221 }
ad3e6900 1222 else if (size + reg_save_bytes > 255)
b21218d6 1223 {
1224 emit_insn (gen_addsi3 (stack_pointer_rtx,
1225 stack_pointer_rtx,
1226 GEN_INT (size)));
1227 size = 0;
29a404f9 1228 }
29a404f9 1229
55daf463 1230 /* Adjust the stack and restore callee-saved registers, if any. */
ad3e6900 1231 if (mn10300_can_use_rets_insn ())
1a860023 1232 emit_jump_insn (ret_rtx);
48cb86e3 1233 else
d876ba6e 1234 emit_jump_insn (gen_return_ret (GEN_INT (size + reg_save_bytes)));
29a404f9 1235}
1236
a2f10574 1237/* Recognize the PARALLEL rtx generated by mn10300_gen_multiple_store().
4caa3669 1238 This function is for MATCH_PARALLEL and so assumes OP is known to be
1239 parallel. If OP is a multiple store, return a mask indicating which
1240 registers it saves. Return 0 otherwise. */
1241
bc6d465d 1242unsigned int
1243mn10300_store_multiple_regs (rtx op)
4caa3669 1244{
1245 int count;
1246 int mask;
1247 int i;
1248 unsigned int last;
1249 rtx elt;
1250
1251 count = XVECLEN (op, 0);
1252 if (count < 2)
1253 return 0;
1254
1255 /* Check that first instruction has the form (set (sp) (plus A B)) */
1256 elt = XVECEXP (op, 0, 0);
1257 if (GET_CODE (elt) != SET
3626e955 1258 || (! REG_P (SET_DEST (elt)))
4caa3669 1259 || REGNO (SET_DEST (elt)) != STACK_POINTER_REGNUM
1260 || GET_CODE (SET_SRC (elt)) != PLUS)
1261 return 0;
1262
1263 /* Check that A is the stack pointer and B is the expected stack size.
1264 For OP to match, each subsequent instruction should push a word onto
1265 the stack. We therefore expect the first instruction to create
09e5ce26 1266 COUNT-1 stack slots. */
4caa3669 1267 elt = SET_SRC (elt);
3626e955 1268 if ((! REG_P (XEXP (elt, 0)))
4caa3669 1269 || REGNO (XEXP (elt, 0)) != STACK_POINTER_REGNUM
3626e955 1270 || (! CONST_INT_P (XEXP (elt, 1)))
4caa3669 1271 || INTVAL (XEXP (elt, 1)) != -(count - 1) * 4)
1272 return 0;
1273
4caa3669 1274 mask = 0;
1275 for (i = 1; i < count; i++)
1276 {
32f9c04a 1277 /* Check that element i is a (set (mem M) R). */
1278 /* ??? Validate the register order a-la mn10300_gen_multiple_store.
1279 Remember: the ordering is *not* monotonic. */
4caa3669 1280 elt = XVECEXP (op, 0, i);
1281 if (GET_CODE (elt) != SET
3626e955 1282 || (! MEM_P (SET_DEST (elt)))
32f9c04a 1283 || (! REG_P (SET_SRC (elt))))
4caa3669 1284 return 0;
1285
32f9c04a 1286 /* Remember which registers are to be saved. */
4caa3669 1287 last = REGNO (SET_SRC (elt));
1288 mask |= (1 << last);
1289
1290 /* Check that M has the form (plus (sp) (const_int -I*4)) */
1291 elt = XEXP (SET_DEST (elt), 0);
1292 if (GET_CODE (elt) != PLUS
3626e955 1293 || (! REG_P (XEXP (elt, 0)))
4caa3669 1294 || REGNO (XEXP (elt, 0)) != STACK_POINTER_REGNUM
3626e955 1295 || (! CONST_INT_P (XEXP (elt, 1)))
4caa3669 1296 || INTVAL (XEXP (elt, 1)) != -i * 4)
1297 return 0;
1298 }
1299
09e5ce26 1300 /* All or none of the callee-saved extended registers must be in the set. */
4caa3669 1301 if ((mask & 0x3c000) != 0
1302 && (mask & 0x3c000) != 0x3c000)
1303 return 0;
1304
1305 return mask;
1306}
1307
029ca87f 1308/* Implement TARGET_PREFERRED_RELOAD_CLASS. */
1309
1310static reg_class_t
1311mn10300_preferred_reload_class (rtx x, reg_class_t rclass)
1312{
1313 if (x == stack_pointer_rtx && rclass != SP_REGS)
c78ac668 1314 return (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
029ca87f 1315 else if (MEM_P (x)
1316 || (REG_P (x)
1317 && !HARD_REGISTER_P (x))
1318 || (GET_CODE (x) == SUBREG
1319 && REG_P (SUBREG_REG (x))
1320 && !HARD_REGISTER_P (SUBREG_REG (x))))
1321 return LIMIT_RELOAD_CLASS (GET_MODE (x), rclass);
1322 else
1323 return rclass;
1324}
1325
1326/* Implement TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
1327
1328static reg_class_t
1329mn10300_preferred_output_reload_class (rtx x, reg_class_t rclass)
1330{
1331 if (x == stack_pointer_rtx && rclass != SP_REGS)
c78ac668 1332 return (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
029ca87f 1333 return rclass;
1334}
1335
c78ac668 1336/* Implement TARGET_SECONDARY_RELOAD. */
3626e955 1337
c78ac668 1338static reg_class_t
1339mn10300_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
1340 enum machine_mode mode, secondary_reload_info *sri)
29a404f9 1341{
c78ac668 1342 enum reg_class rclass = (enum reg_class) rclass_i;
1343 enum reg_class xclass = NO_REGS;
1344 unsigned int xregno = INVALID_REGNUM;
1345
1346 if (REG_P (x))
8ecf154e 1347 {
c78ac668 1348 xregno = REGNO (x);
1349 if (xregno >= FIRST_PSEUDO_REGISTER)
1350 xregno = true_regnum (x);
1351 if (xregno != INVALID_REGNUM)
1352 xclass = REGNO_REG_CLASS (xregno);
1353 }
1354
1355 if (!TARGET_AM33)
1356 {
1357 /* Memory load/stores less than a full word wide can't have an
1358 address or stack pointer destination. They must use a data
1359 register as an intermediate register. */
1360 if (rclass != DATA_REGS
1361 && (mode == QImode || mode == HImode)
1362 && xclass == NO_REGS)
1363 return DATA_REGS;
1364
1365 /* We can only move SP to/from an address register. */
1366 if (in_p
1367 && rclass == SP_REGS
1368 && xclass != ADDRESS_REGS)
1369 return ADDRESS_REGS;
1370 if (!in_p
1371 && xclass == SP_REGS
1372 && rclass != ADDRESS_REGS
1373 && rclass != SP_OR_ADDRESS_REGS)
1374 return ADDRESS_REGS;
8ecf154e 1375 }
29a404f9 1376
c78ac668 1377 /* We can't directly load sp + const_int into a register;
1378 we must use an address register as an scratch. */
1379 if (in_p
1380 && rclass != SP_REGS
8deb3959 1381 && rclass != SP_OR_ADDRESS_REGS
c8a596d6 1382 && rclass != SP_OR_GENERAL_REGS
c78ac668 1383 && GET_CODE (x) == PLUS
1384 && (XEXP (x, 0) == stack_pointer_rtx
1385 || XEXP (x, 1) == stack_pointer_rtx))
1386 {
1387 sri->icode = CODE_FOR_reload_plus_sp_const;
1388 return NO_REGS;
1389 }
29a404f9 1390
85a6eed4 1391 /* We can only move MDR to/from a data register. */
1392 if (rclass == MDR_REGS && xclass != DATA_REGS)
1393 return DATA_REGS;
1394 if (xclass == MDR_REGS && rclass != DATA_REGS)
1395 return DATA_REGS;
1396
c78ac668 1397 /* We can't load/store an FP register from a constant address. */
8b8be022 1398 if (TARGET_AM33_2
c78ac668 1399 && (rclass == FP_REGS || xclass == FP_REGS)
1400 && (xclass == NO_REGS || rclass == NO_REGS))
b166356e 1401 {
c78ac668 1402 rtx addr = NULL;
1403
1404 if (xregno >= FIRST_PSEUDO_REGISTER && xregno != INVALID_REGNUM)
1405 {
1c654ff1 1406 addr = reg_equiv_mem (xregno);
c78ac668 1407 if (addr)
1408 addr = XEXP (addr, 0);
1409 }
1410 else if (MEM_P (x))
1411 addr = XEXP (x, 0);
8b8be022 1412
c78ac668 1413 if (addr && CONSTANT_ADDRESS_P (addr))
c8a596d6 1414 return GENERAL_REGS;
b166356e 1415 }
1416
48cb86e3 1417 /* Otherwise assume no secondary reloads are needed. */
1418 return NO_REGS;
1419}
1420
6f22c3b4 1421int
1422mn10300_frame_size (void)
1423{
1424 /* size includes the fixed stack space needed for function calls. */
1425 int size = get_frame_size () + crtl->outgoing_args_size;
1426
1427 /* And space for the return pointer. */
1428 size += crtl->outgoing_args_size ? 4 : 0;
1429
1430 return size;
1431}
1432
48cb86e3 1433int
3626e955 1434mn10300_initial_offset (int from, int to)
48cb86e3 1435{
6f22c3b4 1436 int diff = 0;
1437
1438 gcc_assert (from == ARG_POINTER_REGNUM || from == FRAME_POINTER_REGNUM);
1439 gcc_assert (to == FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM);
1440
1441 if (to == STACK_POINTER_REGNUM)
1442 diff = mn10300_frame_size ();
1443
f1899bff 1444 /* The difference between the argument pointer and the frame pointer
1445 is the size of the callee register save area. */
6f22c3b4 1446 if (from == ARG_POINTER_REGNUM)
29a404f9 1447 {
d876ba6e 1448 unsigned int reg_save_bytes;
1449
1450 mn10300_get_live_callee_saved_regs (& reg_save_bytes);
1451 diff += reg_save_bytes;
6f22c3b4 1452 diff += 4 * fp_regs_to_save ();
29a404f9 1453 }
1454
6f22c3b4 1455 return diff;
29a404f9 1456}
bb4959a8 1457
6644435d 1458/* Worker function for TARGET_RETURN_IN_MEMORY. */
1459
f2d49d02 1460static bool
fb80456a 1461mn10300_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
f2d49d02 1462{
1463 /* Return values > 8 bytes in length in memory. */
00b1da0e 1464 return (int_size_in_bytes (type) > 8
1465 || int_size_in_bytes (type) == 0
1466 || TYPE_MODE (type) == BLKmode);
f2d49d02 1467}
1468
bb4959a8 1469/* Flush the argument registers to the stack for a stdarg function;
1470 return the new argument pointer. */
f2d49d02 1471static rtx
3285410a 1472mn10300_builtin_saveregs (void)
bb4959a8 1473{
ed554036 1474 rtx offset, mem;
bb4959a8 1475 tree fntype = TREE_TYPE (current_function_decl);
257d99c3 1476 int argadj = ((!stdarg_p (fntype))
bb4959a8 1477 ? UNITS_PER_WORD : 0);
32c2fdea 1478 alias_set_type set = get_varargs_alias_set ();
bb4959a8 1479
1480 if (argadj)
29c05e22 1481 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, argadj);
bb4959a8 1482 else
abe32cce 1483 offset = crtl->args.arg_offset_rtx;
bb4959a8 1484
abe32cce 1485 mem = gen_rtx_MEM (SImode, crtl->args.internal_arg_pointer);
ab6ab77e 1486 set_mem_alias_set (mem, set);
ed554036 1487 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
1488
1489 mem = gen_rtx_MEM (SImode,
29c05e22 1490 plus_constant (Pmode,
1491 crtl->args.internal_arg_pointer, 4));
ab6ab77e 1492 set_mem_alias_set (mem, set);
ed554036 1493 emit_move_insn (mem, gen_rtx_REG (SImode, 1));
1494
bb4959a8 1495 return copy_to_reg (expand_binop (Pmode, add_optab,
abe32cce 1496 crtl->args.internal_arg_pointer,
bb4959a8 1497 offset, 0, 0, OPTAB_LIB_WIDEN));
1498}
1499
8a58ed0a 1500static void
3285410a 1501mn10300_va_start (tree valist, rtx nextarg)
ed554036 1502{
7ccc713a 1503 nextarg = expand_builtin_saveregs ();
7df226a2 1504 std_expand_builtin_va_start (valist, nextarg);
ed554036 1505}
1506
b981d932 1507/* Return true when a parameter should be passed by reference. */
1508
1509static bool
39cba157 1510mn10300_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
fb80456a 1511 enum machine_mode mode, const_tree type,
b981d932 1512 bool named ATTRIBUTE_UNUSED)
1513{
1514 unsigned HOST_WIDE_INT size;
1515
1516 if (type)
1517 size = int_size_in_bytes (type);
1518 else
1519 size = GET_MODE_SIZE (mode);
1520
00b1da0e 1521 return (size > 8 || size == 0);
b981d932 1522}
1523
bb4959a8 1524/* Return an RTX to represent where a value with mode MODE will be returned
e92d3ba8 1525 from a function. If the result is NULL_RTX, the argument is pushed. */
bb4959a8 1526
dc67179a 1527static rtx
39cba157 1528mn10300_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
dc67179a 1529 const_tree type, bool named ATTRIBUTE_UNUSED)
bb4959a8 1530{
39cba157 1531 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
e92d3ba8 1532 rtx result = NULL_RTX;
e14cac83 1533 int size;
bb4959a8 1534
1535 /* We only support using 2 data registers as argument registers. */
1536 int nregs = 2;
1537
1538 /* Figure out the size of the object to be passed. */
1539 if (mode == BLKmode)
1540 size = int_size_in_bytes (type);
1541 else
1542 size = GET_MODE_SIZE (mode);
1543
bb4959a8 1544 cum->nbytes = (cum->nbytes + 3) & ~3;
1545
1546 /* Don't pass this arg via a register if all the argument registers
1547 are used up. */
1548 if (cum->nbytes > nregs * UNITS_PER_WORD)
e92d3ba8 1549 return result;
bb4959a8 1550
1551 /* Don't pass this arg via a register if it would be split between
1552 registers and memory. */
1553 if (type == NULL_TREE
1554 && cum->nbytes + size > nregs * UNITS_PER_WORD)
e92d3ba8 1555 return result;
bb4959a8 1556
1557 switch (cum->nbytes / UNITS_PER_WORD)
1558 {
1559 case 0:
e92d3ba8 1560 result = gen_rtx_REG (mode, FIRST_ARGUMENT_REGNUM);
bb4959a8 1561 break;
1562 case 1:
e92d3ba8 1563 result = gen_rtx_REG (mode, FIRST_ARGUMENT_REGNUM + 1);
bb4959a8 1564 break;
1565 default:
e92d3ba8 1566 break;
bb4959a8 1567 }
1568
1569 return result;
1570}
1571
dc67179a 1572/* Update the data in CUM to advance over an argument
1573 of mode MODE and data type TYPE.
1574 (TYPE is null for libcalls where that information may not be available.) */
1575
1576static void
39cba157 1577mn10300_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
dc67179a 1578 const_tree type, bool named ATTRIBUTE_UNUSED)
1579{
39cba157 1580 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1581
dc67179a 1582 cum->nbytes += (mode != BLKmode
1583 ? (GET_MODE_SIZE (mode) + 3) & ~3
1584 : (int_size_in_bytes (type) + 3) & ~3);
1585}
1586
f054eb3c 1587/* Return the number of bytes of registers to use for an argument passed
1588 partially in registers and partially in memory. */
bb4959a8 1589
f054eb3c 1590static int
39cba157 1591mn10300_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode,
f054eb3c 1592 tree type, bool named ATTRIBUTE_UNUSED)
bb4959a8 1593{
39cba157 1594 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
e14cac83 1595 int size;
bb4959a8 1596
1597 /* We only support using 2 data registers as argument registers. */
1598 int nregs = 2;
1599
1600 /* Figure out the size of the object to be passed. */
1601 if (mode == BLKmode)
1602 size = int_size_in_bytes (type);
1603 else
1604 size = GET_MODE_SIZE (mode);
1605
bb4959a8 1606 cum->nbytes = (cum->nbytes + 3) & ~3;
1607
1608 /* Don't pass this arg via a register if all the argument registers
1609 are used up. */
1610 if (cum->nbytes > nregs * UNITS_PER_WORD)
1611 return 0;
1612
1613 if (cum->nbytes + size <= nregs * UNITS_PER_WORD)
1614 return 0;
1615
1616 /* Don't pass this arg via a register if it would be split between
1617 registers and memory. */
1618 if (type == NULL_TREE
1619 && cum->nbytes + size > nregs * UNITS_PER_WORD)
1620 return 0;
1621
f054eb3c 1622 return nregs * UNITS_PER_WORD - cum->nbytes;
bb4959a8 1623}
1624
00b1da0e 1625/* Return the location of the function's value. This will be either
1626 $d0 for integer functions, $a0 for pointers, or a PARALLEL of both
1627 $d0 and $a0 if the -mreturn-pointer-on-do flag is set. Note that
1628 we only return the PARALLEL for outgoing values; we do not want
1629 callers relying on this extra copy. */
1630
b6713ba6 1631static rtx
1632mn10300_function_value (const_tree valtype,
1633 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1634 bool outgoing)
00b1da0e 1635{
1636 rtx rv;
1637 enum machine_mode mode = TYPE_MODE (valtype);
1638
1639 if (! POINTER_TYPE_P (valtype))
1640 return gen_rtx_REG (mode, FIRST_DATA_REGNUM);
1641 else if (! TARGET_PTR_A0D0 || ! outgoing
18d50ae6 1642 || cfun->returns_struct)
00b1da0e 1643 return gen_rtx_REG (mode, FIRST_ADDRESS_REGNUM);
1644
1645 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (2));
1646 XVECEXP (rv, 0, 0)
1647 = gen_rtx_EXPR_LIST (VOIDmode,
1648 gen_rtx_REG (mode, FIRST_ADDRESS_REGNUM),
1649 GEN_INT (0));
fb16c776 1650
00b1da0e 1651 XVECEXP (rv, 0, 1)
1652 = gen_rtx_EXPR_LIST (VOIDmode,
1653 gen_rtx_REG (mode, FIRST_DATA_REGNUM),
1654 GEN_INT (0));
1655 return rv;
1656}
1657
b6713ba6 1658/* Implements TARGET_LIBCALL_VALUE. */
1659
1660static rtx
1661mn10300_libcall_value (enum machine_mode mode,
1662 const_rtx fun ATTRIBUTE_UNUSED)
1663{
1664 return gen_rtx_REG (mode, FIRST_DATA_REGNUM);
1665}
1666
1667/* Implements FUNCTION_VALUE_REGNO_P. */
1668
1669bool
1670mn10300_function_value_regno_p (const unsigned int regno)
1671{
1672 return (regno == FIRST_DATA_REGNUM || regno == FIRST_ADDRESS_REGNUM);
1673}
1674
990679af 1675/* Output an addition operation. */
5574dbdd 1676
feb9af9f 1677const char *
990679af 1678mn10300_output_add (rtx operands[3], bool need_flags)
bb4959a8 1679{
990679af 1680 rtx dest, src1, src2;
1681 unsigned int dest_regnum, src1_regnum, src2_regnum;
1682 enum reg_class src1_class, src2_class, dest_class;
bb4959a8 1683
990679af 1684 dest = operands[0];
1685 src1 = operands[1];
1686 src2 = operands[2];
bb4959a8 1687
990679af 1688 dest_regnum = true_regnum (dest);
1689 src1_regnum = true_regnum (src1);
bb4959a8 1690
990679af 1691 dest_class = REGNO_REG_CLASS (dest_regnum);
1692 src1_class = REGNO_REG_CLASS (src1_regnum);
bb4959a8 1693
f9e46c25 1694 if (CONST_INT_P (src2))
990679af 1695 {
1696 gcc_assert (dest_regnum == src1_regnum);
bb4959a8 1697
990679af 1698 if (src2 == const1_rtx && !need_flags)
1699 return "inc %0";
1700 if (INTVAL (src2) == 4 && !need_flags && dest_class != DATA_REGS)
1701 return "inc4 %0";
911517ac 1702
990679af 1703 gcc_assert (!need_flags || dest_class != SP_REGS);
1704 return "add %2,%0";
1705 }
1706 else if (CONSTANT_P (src2))
1707 return "add %2,%0";
1708
1709 src2_regnum = true_regnum (src2);
1710 src2_class = REGNO_REG_CLASS (src2_regnum);
1711
1712 if (dest_regnum == src1_regnum)
1713 return "add %2,%0";
1714 if (dest_regnum == src2_regnum)
1715 return "add %1,%0";
1716
1717 /* The rest of the cases are reg = reg+reg. For AM33, we can implement
1718 this directly, as below, but when optimizing for space we can sometimes
1719 do better by using a mov+add. For MN103, we claimed that we could
1720 implement a three-operand add because the various move and add insns
1721 change sizes across register classes, and we can often do better than
1722 reload in choosing which operand to move. */
1723 if (TARGET_AM33 && optimize_insn_for_speed_p ())
1724 return "add %2,%1,%0";
1725
1726 /* Catch cases where no extended register was used. */
1727 if (src1_class != EXTENDED_REGS
1728 && src2_class != EXTENDED_REGS
1729 && dest_class != EXTENDED_REGS)
1730 {
1731 /* We have to copy one of the sources into the destination, then
1732 add the other source to the destination.
1733
1734 Carefully select which source to copy to the destination; a
1735 naive implementation will waste a byte when the source classes
1736 are different and the destination is an address register.
1737 Selecting the lowest cost register copy will optimize this
1738 sequence. */
1739 if (src1_class == dest_class)
1740 return "mov %1,%0\n\tadd %2,%0";
1741 else
1742 return "mov %2,%0\n\tadd %1,%0";
1743 }
911517ac 1744
990679af 1745 /* At least one register is an extended register. */
bb4959a8 1746
990679af 1747 /* The three operand add instruction on the am33 is a win iff the
1748 output register is an extended register, or if both source
1749 registers are extended registers. */
1750 if (dest_class == EXTENDED_REGS || src1_class == src2_class)
1751 return "add %2,%1,%0";
1752
1753 /* It is better to copy one of the sources to the destination, then
1754 perform a 2 address add. The destination in this case must be
1755 an address or data register and one of the sources must be an
1756 extended register and the remaining source must not be an extended
1757 register.
1758
1759 The best code for this case is to copy the extended reg to the
1760 destination, then emit a two address add. */
1761 if (src1_class == EXTENDED_REGS)
1762 return "mov %1,%0\n\tadd %2,%0";
1763 else
1764 return "mov %2,%0\n\tadd %1,%0";
bb4959a8 1765}
36ed4406 1766
c4cd8f6a 1767/* Return 1 if X contains a symbolic expression. We know these
1768 expressions will have one of a few well defined forms, so
1769 we need only check those forms. */
3626e955 1770
c4cd8f6a 1771int
3626e955 1772mn10300_symbolic_operand (rtx op,
1773 enum machine_mode mode ATTRIBUTE_UNUSED)
c4cd8f6a 1774{
1775 switch (GET_CODE (op))
1776 {
1777 case SYMBOL_REF:
1778 case LABEL_REF:
1779 return 1;
1780 case CONST:
1781 op = XEXP (op, 0);
1782 return ((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
1783 || GET_CODE (XEXP (op, 0)) == LABEL_REF)
4879b320 1784 && CONST_INT_P (XEXP (op, 1)));
c4cd8f6a 1785 default:
1786 return 0;
1787 }
1788}
1789
1790/* Try machine dependent ways of modifying an illegitimate address
1791 to be legitimate. If we find one, return the new valid address.
1792 This macro is used in only one place: `memory_address' in explow.c.
1793
1794 OLDX is the address as it was before break_out_memory_refs was called.
1795 In some cases it is useful to look at this to decide what needs to be done.
1796
c4cd8f6a 1797 Normally it is always safe for this macro to do nothing. It exists to
1798 recognize opportunities to optimize the output.
1799
1800 But on a few ports with segmented architectures and indexed addressing
1801 (mn10300, hppa) it is used to rewrite certain problematical addresses. */
3626e955 1802
5574dbdd 1803static rtx
41e3a0c7 1804mn10300_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1805 enum machine_mode mode ATTRIBUTE_UNUSED)
c4cd8f6a 1806{
3626e955 1807 if (flag_pic && ! mn10300_legitimate_pic_operand_p (x))
1808 x = mn10300_legitimize_pic_address (oldx, NULL_RTX);
b87a151a 1809
c4cd8f6a 1810 /* Uh-oh. We might have an address for x[n-100000]. This needs
1811 special handling to avoid creating an indexed memory address
1812 with x-100000 as the base. */
1813 if (GET_CODE (x) == PLUS
3626e955 1814 && mn10300_symbolic_operand (XEXP (x, 1), VOIDmode))
c4cd8f6a 1815 {
1816 /* Ugly. We modify things here so that the address offset specified
1817 by the index expression is computed first, then added to x to form
1818 the entire address. */
1819
59086782 1820 rtx regx1, regy1, regy2, y;
c4cd8f6a 1821
1822 /* Strip off any CONST. */
1823 y = XEXP (x, 1);
1824 if (GET_CODE (y) == CONST)
1825 y = XEXP (y, 0);
1826
c927a8ab 1827 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1828 {
1829 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1830 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1831 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1832 regx1 = force_reg (Pmode,
3626e955 1833 gen_rtx_fmt_ee (GET_CODE (y), Pmode, regx1,
1834 regy2));
7014838c 1835 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
c927a8ab 1836 }
c4cd8f6a 1837 }
11b4605c 1838 return x;
c4cd8f6a 1839}
e2aead91 1840
b87a151a 1841/* Convert a non-PIC address in `orig' to a PIC address using @GOT or
09e5ce26 1842 @GOTOFF in `reg'. */
3626e955 1843
b87a151a 1844rtx
3626e955 1845mn10300_legitimize_pic_address (rtx orig, rtx reg)
b87a151a 1846{
d92c1383 1847 rtx x;
1848
b87a151a 1849 if (GET_CODE (orig) == LABEL_REF
1850 || (GET_CODE (orig) == SYMBOL_REF
1851 && (CONSTANT_POOL_ADDRESS_P (orig)
1852 || ! MN10300_GLOBAL_P (orig))))
1853 {
d92c1383 1854 if (reg == NULL)
b87a151a 1855 reg = gen_reg_rtx (Pmode);
1856
d92c1383 1857 x = gen_rtx_UNSPEC (SImode, gen_rtvec (1, orig), UNSPEC_GOTOFF);
1858 x = gen_rtx_CONST (SImode, x);
1859 emit_move_insn (reg, x);
1860
1861 x = emit_insn (gen_addsi3 (reg, reg, pic_offset_table_rtx));
b87a151a 1862 }
1863 else if (GET_CODE (orig) == SYMBOL_REF)
1864 {
d92c1383 1865 if (reg == NULL)
b87a151a 1866 reg = gen_reg_rtx (Pmode);
1867
d92c1383 1868 x = gen_rtx_UNSPEC (SImode, gen_rtvec (1, orig), UNSPEC_GOT);
1869 x = gen_rtx_CONST (SImode, x);
1870 x = gen_rtx_PLUS (SImode, pic_offset_table_rtx, x);
1871 x = gen_const_mem (SImode, x);
1872
1873 x = emit_move_insn (reg, x);
b87a151a 1874 }
d92c1383 1875 else
1876 return orig;
1877
1878 set_unique_reg_note (x, REG_EQUAL, orig);
1879 return reg;
b87a151a 1880}
1881
1882/* Return zero if X references a SYMBOL_REF or LABEL_REF whose symbol
fa483857 1883 isn't protected by a PIC unspec; nonzero otherwise. */
3626e955 1884
b87a151a 1885int
3626e955 1886mn10300_legitimate_pic_operand_p (rtx x)
b87a151a 1887{
3626e955 1888 const char *fmt;
1889 int i;
b87a151a 1890
1891 if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1892 return 0;
1893
1894 if (GET_CODE (x) == UNSPEC
1895 && (XINT (x, 1) == UNSPEC_PIC
1896 || XINT (x, 1) == UNSPEC_GOT
1897 || XINT (x, 1) == UNSPEC_GOTOFF
b6e3379c 1898 || XINT (x, 1) == UNSPEC_PLT
1899 || XINT (x, 1) == UNSPEC_GOTSYM_OFF))
b87a151a 1900 return 1;
1901
b87a151a 1902 fmt = GET_RTX_FORMAT (GET_CODE (x));
1903 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
1904 {
1905 if (fmt[i] == 'E')
1906 {
5574dbdd 1907 int j;
b87a151a 1908
1909 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3626e955 1910 if (! mn10300_legitimate_pic_operand_p (XVECEXP (x, i, j)))
b87a151a 1911 return 0;
1912 }
3626e955 1913 else if (fmt[i] == 'e'
1914 && ! mn10300_legitimate_pic_operand_p (XEXP (x, i)))
b87a151a 1915 return 0;
1916 }
1917
1918 return 1;
1919}
1920
5411aa8c 1921/* Return TRUE if the address X, taken from a (MEM:MODE X) rtx, is
fd50b071 1922 legitimate, and FALSE otherwise.
1923
1924 On the mn10300, the value in the address register must be
1925 in the same memory space/segment as the effective address.
1926
1927 This is problematical for reload since it does not understand
1928 that base+index != index+base in a memory reference.
1929
1930 Note it is still possible to use reg+reg addressing modes,
1931 it's just much more difficult. For a discussion of a possible
1932 workaround and solution, see the comments in pa.c before the
1933 function record_unscaled_index_insn_codes. */
1934
5574dbdd 1935static bool
fd50b071 1936mn10300_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
5411aa8c 1937{
c8a596d6 1938 rtx base, index;
1939
1940 if (CONSTANT_ADDRESS_P (x))
1941 return !flag_pic || mn10300_legitimate_pic_operand_p (x);
5411aa8c 1942
1943 if (RTX_OK_FOR_BASE_P (x, strict))
c8a596d6 1944 return true;
1945
1946 if (TARGET_AM33 && (mode == SImode || mode == SFmode || mode == HImode))
1947 {
1948 if (GET_CODE (x) == POST_INC)
1949 return RTX_OK_FOR_BASE_P (XEXP (x, 0), strict);
1950 if (GET_CODE (x) == POST_MODIFY)
1951 return (RTX_OK_FOR_BASE_P (XEXP (x, 0), strict)
1952 && CONSTANT_ADDRESS_P (XEXP (x, 1)));
1953 }
1954
1955 if (GET_CODE (x) != PLUS)
1956 return false;
5411aa8c 1957
c8a596d6 1958 base = XEXP (x, 0);
1959 index = XEXP (x, 1);
5411aa8c 1960
c8a596d6 1961 if (!REG_P (base))
1962 return false;
1963 if (REG_P (index))
5411aa8c 1964 {
c8a596d6 1965 /* ??? Without AM33 generalized (Ri,Rn) addressing, reg+reg
1966 addressing is hard to satisfy. */
1967 if (!TARGET_AM33)
1968 return false;
5411aa8c 1969
c8a596d6 1970 return (REGNO_GENERAL_P (REGNO (base), strict)
1971 && REGNO_GENERAL_P (REGNO (index), strict));
1972 }
5411aa8c 1973
c8a596d6 1974 if (!REGNO_STRICT_OK_FOR_BASE_P (REGNO (base), strict))
1975 return false;
5411aa8c 1976
c8a596d6 1977 if (CONST_INT_P (index))
1978 return IN_RANGE (INTVAL (index), -1 - 0x7fffffff, 0x7fffffff);
1979
1980 if (CONSTANT_ADDRESS_P (index))
1981 return !flag_pic || mn10300_legitimate_pic_operand_p (index);
1982
1983 return false;
1984}
1985
1986bool
1987mn10300_regno_in_class_p (unsigned regno, int rclass, bool strict)
1988{
1989 if (regno >= FIRST_PSEUDO_REGISTER)
1990 {
1991 if (!strict)
1992 return true;
1993 if (!reg_renumber)
1994 return false;
1995 regno = reg_renumber[regno];
c2fa9c24 1996 if (regno == INVALID_REGNUM)
1997 return false;
c8a596d6 1998 }
1999 return TEST_HARD_REG_BIT (reg_class_contents[rclass], regno);
2000}
2001
2002rtx
2003mn10300_legitimize_reload_address (rtx x,
2004 enum machine_mode mode ATTRIBUTE_UNUSED,
2005 int opnum, int type,
2006 int ind_levels ATTRIBUTE_UNUSED)
2007{
2008 bool any_change = false;
2009
2010 /* See above re disabling reg+reg addressing for MN103. */
2011 if (!TARGET_AM33)
2012 return NULL_RTX;
2013
2014 if (GET_CODE (x) != PLUS)
2015 return NULL_RTX;
2016
2017 if (XEXP (x, 0) == stack_pointer_rtx)
2018 {
2019 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
2020 GENERAL_REGS, GET_MODE (x), VOIDmode, 0, 0,
2021 opnum, (enum reload_type) type);
2022 any_change = true;
2023 }
2024 if (XEXP (x, 1) == stack_pointer_rtx)
2025 {
2026 push_reload (XEXP (x, 1), NULL_RTX, &XEXP (x, 1), NULL,
2027 GENERAL_REGS, GET_MODE (x), VOIDmode, 0, 0,
2028 opnum, (enum reload_type) type);
2029 any_change = true;
5411aa8c 2030 }
2031
c8a596d6 2032 return any_change ? x : NULL_RTX;
5411aa8c 2033}
2034
ca316360 2035/* Implement TARGET_LEGITIMATE_CONSTANT_P. Returns TRUE if X is a valid
5574dbdd 2036 constant. Note that some "constants" aren't valid, such as TLS
2037 symbols and unconverted GOT-based references, so we eliminate
2038 those here. */
2039
ca316360 2040static bool
2041mn10300_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
5574dbdd 2042{
2043 switch (GET_CODE (x))
2044 {
2045 case CONST:
2046 x = XEXP (x, 0);
2047
2048 if (GET_CODE (x) == PLUS)
2049 {
3626e955 2050 if (! CONST_INT_P (XEXP (x, 1)))
5574dbdd 2051 return false;
2052 x = XEXP (x, 0);
2053 }
2054
2055 /* Only some unspecs are valid as "constants". */
2056 if (GET_CODE (x) == UNSPEC)
2057 {
5574dbdd 2058 switch (XINT (x, 1))
2059 {
5574dbdd 2060 case UNSPEC_PIC:
2061 case UNSPEC_GOT:
2062 case UNSPEC_GOTOFF:
2063 case UNSPEC_PLT:
2064 return true;
2065 default:
2066 return false;
2067 }
2068 }
2069
2070 /* We must have drilled down to a symbol. */
3626e955 2071 if (! mn10300_symbolic_operand (x, Pmode))
5574dbdd 2072 return false;
2073 break;
2074
2075 default:
2076 break;
2077 }
2078
2079 return true;
2080}
2081
4c6c308e 2082/* Undo pic address legitimization for the benefit of debug info. */
2083
2084static rtx
2085mn10300_delegitimize_address (rtx orig_x)
2086{
2087 rtx x = orig_x, ret, addend = NULL;
2088 bool need_mem;
2089
2090 if (MEM_P (x))
2091 x = XEXP (x, 0);
2092 if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
2093 return orig_x;
2094
2095 if (XEXP (x, 0) == pic_offset_table_rtx)
2096 ;
2097 /* With the REG+REG addressing of AM33, var-tracking can re-assemble
2098 some odd-looking "addresses" that were never valid in the first place.
2099 We need to look harder to avoid warnings being emitted. */
2100 else if (GET_CODE (XEXP (x, 0)) == PLUS)
2101 {
2102 rtx x0 = XEXP (x, 0);
2103 rtx x00 = XEXP (x0, 0);
2104 rtx x01 = XEXP (x0, 1);
2105
2106 if (x00 == pic_offset_table_rtx)
2107 addend = x01;
2108 else if (x01 == pic_offset_table_rtx)
2109 addend = x00;
2110 else
2111 return orig_x;
2112
2113 }
2114 else
2115 return orig_x;
2116 x = XEXP (x, 1);
2117
2118 if (GET_CODE (x) != CONST)
2119 return orig_x;
2120 x = XEXP (x, 0);
2121 if (GET_CODE (x) != UNSPEC)
2122 return orig_x;
2123
2124 ret = XVECEXP (x, 0, 0);
2125 if (XINT (x, 1) == UNSPEC_GOTOFF)
2126 need_mem = false;
2127 else if (XINT (x, 1) == UNSPEC_GOT)
2128 need_mem = true;
2129 else
2130 return orig_x;
2131
2132 gcc_assert (GET_CODE (ret) == SYMBOL_REF);
2133 if (need_mem != MEM_P (orig_x))
2134 return orig_x;
2135 if (need_mem && addend)
2136 return orig_x;
2137 if (addend)
2138 ret = gen_rtx_PLUS (Pmode, addend, ret);
2139 return ret;
2140}
2141
28f32607 2142/* For addresses, costs are relative to "MOV (Rm),Rn". For AM33 this is
2143 the 3-byte fully general instruction; for MN103 this is the 2-byte form
2144 with an address register. */
2145
ec0457a8 2146static int
d9c5e5f4 2147mn10300_address_cost (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED,
2148 addr_space_t as ATTRIBUTE_UNUSED, bool speed)
e2aead91 2149{
28f32607 2150 HOST_WIDE_INT i;
2151 rtx base, index;
2152
e2aead91 2153 switch (GET_CODE (x))
2154 {
28f32607 2155 case CONST:
2156 case SYMBOL_REF:
2157 case LABEL_REF:
2158 /* We assume all of these require a 32-bit constant, even though
2159 some symbol and label references can be relaxed. */
2160 return speed ? 1 : 4;
2161
e2aead91 2162 case REG:
28f32607 2163 case SUBREG:
2164 case POST_INC:
2165 return 0;
2166
2167 case POST_MODIFY:
2168 /* Assume any symbolic offset is a 32-bit constant. */
2169 i = (CONST_INT_P (XEXP (x, 1)) ? INTVAL (XEXP (x, 1)) : 0x12345678);
2170 if (IN_RANGE (i, -128, 127))
2171 return speed ? 0 : 1;
2172 if (speed)
2173 return 1;
2174 if (IN_RANGE (i, -0x800000, 0x7fffff))
2175 return 3;
2176 return 4;
2177
2178 case PLUS:
2179 base = XEXP (x, 0);
2180 index = XEXP (x, 1);
2181 if (register_operand (index, SImode))
e2aead91 2182 {
28f32607 2183 /* Attempt to minimize the number of registers in the address.
2184 This is similar to what other ports do. */
2185 if (register_operand (base, SImode))
2186 return 1;
e2aead91 2187
28f32607 2188 base = XEXP (x, 1);
2189 index = XEXP (x, 0);
2190 }
e2aead91 2191
28f32607 2192 /* Assume any symbolic offset is a 32-bit constant. */
2193 i = (CONST_INT_P (XEXP (x, 1)) ? INTVAL (XEXP (x, 1)) : 0x12345678);
2194 if (IN_RANGE (i, -128, 127))
2195 return speed ? 0 : 1;
2196 if (IN_RANGE (i, -32768, 32767))
2197 return speed ? 0 : 2;
2198 return speed ? 2 : 6;
e2aead91 2199
28f32607 2200 default:
20d892d1 2201 return rtx_cost (x, MEM, 0, speed);
28f32607 2202 }
2203}
e2aead91 2204
28f32607 2205/* Implement the TARGET_REGISTER_MOVE_COST hook.
e2aead91 2206
28f32607 2207 Recall that the base value of 2 is required by assumptions elsewhere
2208 in the body of the compiler, and that cost 2 is special-cased as an
2209 early exit from reload meaning no work is required. */
e2aead91 2210
28f32607 2211static int
2212mn10300_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2213 reg_class_t ifrom, reg_class_t ito)
2214{
2215 enum reg_class from = (enum reg_class) ifrom;
2216 enum reg_class to = (enum reg_class) ito;
2217 enum reg_class scratch, test;
2218
2219 /* Simplify the following code by unifying the fp register classes. */
2220 if (to == FP_ACC_REGS)
2221 to = FP_REGS;
2222 if (from == FP_ACC_REGS)
2223 from = FP_REGS;
2224
2225 /* Diagnose invalid moves by costing them as two moves. */
2226
2227 scratch = NO_REGS;
2228 test = from;
2229 if (to == SP_REGS)
2230 scratch = (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
85a6eed4 2231 else if (to == MDR_REGS)
2232 scratch = DATA_REGS;
28f32607 2233 else if (to == FP_REGS && to != from)
2234 scratch = GENERAL_REGS;
2235 else
2236 {
2237 test = to;
2238 if (from == SP_REGS)
2239 scratch = (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
85a6eed4 2240 else if (from == MDR_REGS)
2241 scratch = DATA_REGS;
28f32607 2242 else if (from == FP_REGS && to != from)
2243 scratch = GENERAL_REGS;
2244 }
2245 if (scratch != NO_REGS && !reg_class_subset_p (test, scratch))
2246 return (mn10300_register_move_cost (VOIDmode, from, scratch)
2247 + mn10300_register_move_cost (VOIDmode, scratch, to));
e2aead91 2248
28f32607 2249 /* From here on, all we need consider are legal combinations. */
e2aead91 2250
28f32607 2251 if (optimize_size)
2252 {
2253 /* The scale here is bytes * 2. */
e2aead91 2254
28f32607 2255 if (from == to && (to == ADDRESS_REGS || to == DATA_REGS))
2256 return 2;
e2aead91 2257
28f32607 2258 if (from == SP_REGS)
2259 return (to == ADDRESS_REGS ? 2 : 6);
2260
2261 /* For MN103, all remaining legal moves are two bytes. */
2262 if (TARGET_AM33)
2263 return 4;
2264
2265 if (to == SP_REGS)
2266 return (from == ADDRESS_REGS ? 4 : 6);
2267
2268 if ((from == ADDRESS_REGS || from == DATA_REGS)
2269 && (to == ADDRESS_REGS || to == DATA_REGS))
2270 return 4;
2271
2272 if (to == EXTENDED_REGS)
2273 return (to == from ? 6 : 4);
e2aead91 2274
28f32607 2275 /* What's left are SP_REGS, FP_REGS, or combinations of the above. */
2276 return 6;
2277 }
2278 else
2279 {
2280 /* The scale here is cycles * 2. */
2281
2282 if (to == FP_REGS)
2283 return 8;
2284 if (from == FP_REGS)
2285 return 4;
2286
2287 /* All legal moves between integral registers are single cycle. */
2288 return 2;
e2aead91 2289 }
2290}
fab7adbf 2291
28f32607 2292/* Implement the TARGET_MEMORY_MOVE_COST hook.
2293
2294 Given lack of the form of the address, this must be speed-relative,
2295 though we should never be less expensive than a size-relative register
2296 move cost above. This is not a problem. */
2297
ec0457a8 2298static int
28f32607 2299mn10300_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2300 reg_class_t iclass, bool in ATTRIBUTE_UNUSED)
ec0457a8 2301{
28f32607 2302 enum reg_class rclass = (enum reg_class) iclass;
2303
2304 if (rclass == FP_REGS)
2305 return 8;
2306 return 6;
ec0457a8 2307}
2308
28f32607 2309/* Implement the TARGET_RTX_COSTS hook.
2310
2311 Speed-relative costs are relative to COSTS_N_INSNS, which is intended
2312 to represent cycles. Size-relative costs are in bytes. */
2313
fab7adbf 2314static bool
20d892d1 2315mn10300_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2316 int *ptotal, bool speed)
fab7adbf 2317{
28f32607 2318 /* This value is used for SYMBOL_REF etc where we want to pretend
2319 we have a full 32-bit constant. */
2320 HOST_WIDE_INT i = 0x12345678;
2321 int total;
2322
fab7adbf 2323 switch (code)
2324 {
2325 case CONST_INT:
28f32607 2326 i = INTVAL (x);
2327 do_int_costs:
2328 if (speed)
2329 {
2330 if (outer_code == SET)
2331 {
2332 /* 16-bit integer loads have latency 1, 32-bit loads 2. */
2333 if (IN_RANGE (i, -32768, 32767))
2334 total = COSTS_N_INSNS (1);
2335 else
2336 total = COSTS_N_INSNS (2);
2337 }
2338 else
2339 {
2340 /* 16-bit integer operands don't affect latency;
2341 24-bit and 32-bit operands add a cycle. */
2342 if (IN_RANGE (i, -32768, 32767))
2343 total = 0;
2344 else
2345 total = COSTS_N_INSNS (1);
2346 }
2347 }
fab7adbf 2348 else
28f32607 2349 {
2350 if (outer_code == SET)
2351 {
2352 if (i == 0)
2353 total = 1;
2354 else if (IN_RANGE (i, -128, 127))
2355 total = 2;
2356 else if (IN_RANGE (i, -32768, 32767))
2357 total = 3;
2358 else
2359 total = 6;
2360 }
2361 else
2362 {
2363 /* Reference here is ADD An,Dn, vs ADD imm,Dn. */
2364 if (IN_RANGE (i, -128, 127))
2365 total = 0;
2366 else if (IN_RANGE (i, -32768, 32767))
2367 total = 2;
2368 else if (TARGET_AM33 && IN_RANGE (i, -0x01000000, 0x00ffffff))
2369 total = 3;
2370 else
2371 total = 4;
2372 }
2373 }
2374 goto alldone;
fab7adbf 2375
2376 case CONST:
2377 case LABEL_REF:
2378 case SYMBOL_REF:
fab7adbf 2379 case CONST_DOUBLE:
28f32607 2380 /* We assume all of these require a 32-bit constant, even though
2381 some symbol and label references can be relaxed. */
2382 goto do_int_costs;
74f4459c 2383
28f32607 2384 case UNSPEC:
2385 switch (XINT (x, 1))
2386 {
2387 case UNSPEC_PIC:
2388 case UNSPEC_GOT:
2389 case UNSPEC_GOTOFF:
2390 case UNSPEC_PLT:
2391 case UNSPEC_GOTSYM_OFF:
2392 /* The PIC unspecs also resolve to a 32-bit constant. */
2393 goto do_int_costs;
fab7adbf 2394
28f32607 2395 default:
2396 /* Assume any non-listed unspec is some sort of arithmetic. */
2397 goto do_arith_costs;
2398 }
8935d57c 2399
28f32607 2400 case PLUS:
2401 /* Notice the size difference of INC and INC4. */
2402 if (!speed && outer_code == SET && CONST_INT_P (XEXP (x, 1)))
2403 {
2404 i = INTVAL (XEXP (x, 1));
2405 if (i == 1 || i == 4)
2406 {
20d892d1 2407 total = 1 + rtx_cost (XEXP (x, 0), PLUS, 0, speed);
28f32607 2408 goto alldone;
2409 }
2410 }
2411 goto do_arith_costs;
2412
2413 case MINUS:
2414 case AND:
2415 case IOR:
2416 case XOR:
2417 case NOT:
2418 case NEG:
2419 case ZERO_EXTEND:
2420 case SIGN_EXTEND:
2421 case COMPARE:
2422 case BSWAP:
2423 case CLZ:
2424 do_arith_costs:
2425 total = (speed ? COSTS_N_INSNS (1) : 2);
2426 break;
8935d57c 2427
28f32607 2428 case ASHIFT:
2429 /* Notice the size difference of ASL2 and variants. */
2430 if (!speed && CONST_INT_P (XEXP (x, 1)))
2431 switch (INTVAL (XEXP (x, 1)))
2432 {
2433 case 1:
2434 case 2:
2435 total = 1;
2436 goto alldone;
2437 case 3:
2438 case 4:
2439 total = 2;
2440 goto alldone;
2441 }
2442 /* FALLTHRU */
8935d57c 2443
28f32607 2444 case ASHIFTRT:
2445 case LSHIFTRT:
2446 total = (speed ? COSTS_N_INSNS (1) : 3);
2447 goto alldone;
8935d57c 2448
28f32607 2449 case MULT:
2450 total = (speed ? COSTS_N_INSNS (3) : 2);
8935d57c 2451 break;
fb16c776 2452
28f32607 2453 case DIV:
2454 case UDIV:
2455 case MOD:
2456 case UMOD:
2457 total = (speed ? COSTS_N_INSNS (39)
2458 /* Include space to load+retrieve MDR. */
2459 : code == MOD || code == UMOD ? 6 : 4);
8935d57c 2460 break;
fb16c776 2461
28f32607 2462 case MEM:
d9c5e5f4 2463 total = mn10300_address_cost (XEXP (x, 0), GET_MODE (x),
2464 MEM_ADDR_SPACE (x), speed);
28f32607 2465 if (speed)
2466 total = COSTS_N_INSNS (2 + total);
2467 goto alldone;
2468
8935d57c 2469 default:
28f32607 2470 /* Probably not implemented. Assume external call. */
2471 total = (speed ? COSTS_N_INSNS (10) : 7);
2472 break;
8935d57c 2473 }
2474
28f32607 2475 *ptotal = total;
2476 return false;
2477
2478 alldone:
2479 *ptotal = total;
2480 return true;
8935d57c 2481}
28f32607 2482
b87a151a 2483/* If using PIC, mark a SYMBOL_REF for a non-global symbol so that we
2484 may access it using GOTOFF instead of GOT. */
2485
2486static void
b4d5791b 2487mn10300_encode_section_info (tree decl, rtx rtl, int first)
b87a151a 2488{
2489 rtx symbol;
2490
b4d5791b 2491 default_encode_section_info (decl, rtl, first);
2492
3626e955 2493 if (! MEM_P (rtl))
b87a151a 2494 return;
b4d5791b 2495
b87a151a 2496 symbol = XEXP (rtl, 0);
2497 if (GET_CODE (symbol) != SYMBOL_REF)
2498 return;
2499
2500 if (flag_pic)
2501 SYMBOL_REF_FLAG (symbol) = (*targetm.binds_local_p) (decl);
2502}
906bb5c3 2503
2504/* Dispatch tables on the mn10300 are extremely expensive in terms of code
2505 and readonly data size. So we crank up the case threshold value to
2506 encourage a series of if/else comparisons to implement many small switch
2507 statements. In theory, this value could be increased much more if we
2508 were solely optimizing for space, but we keep it "reasonable" to avoid
2509 serious code efficiency lossage. */
2510
5574dbdd 2511static unsigned int
2512mn10300_case_values_threshold (void)
906bb5c3 2513{
2514 return 6;
2515}
3e16f982 2516
3e16f982 2517/* Worker function for TARGET_TRAMPOLINE_INIT. */
2518
2519static void
2520mn10300_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
2521{
3562cea7 2522 rtx mem, disp, fnaddr = XEXP (DECL_RTL (fndecl), 0);
2523
2524 /* This is a strict alignment target, which means that we play
2525 some games to make sure that the locations at which we need
2526 to store <chain> and <disp> wind up at aligned addresses.
2527
2528 0x28 0x00 add 0,d0
2529 0xfc 0xdd mov chain,a1
2530 <chain>
2531 0xf8 0xed 0x00 btst 0,d1
2532 0xdc jmp fnaddr
2533 <disp>
2534
2535 Note that the two extra insns are effectively nops; they
2536 clobber the flags but do not affect the contents of D0 or D1. */
3e16f982 2537
3562cea7 2538 disp = expand_binop (SImode, sub_optab, fnaddr,
29c05e22 2539 plus_constant (Pmode, XEXP (m_tramp, 0), 11),
3562cea7 2540 NULL_RTX, 1, OPTAB_DIRECT);
3e16f982 2541
3562cea7 2542 mem = adjust_address (m_tramp, SImode, 0);
2543 emit_move_insn (mem, gen_int_mode (0xddfc0028, SImode));
2544 mem = adjust_address (m_tramp, SImode, 4);
3e16f982 2545 emit_move_insn (mem, chain_value);
3562cea7 2546 mem = adjust_address (m_tramp, SImode, 8);
2547 emit_move_insn (mem, gen_int_mode (0xdc00edf8, SImode));
2548 mem = adjust_address (m_tramp, SImode, 12);
2549 emit_move_insn (mem, disp);
3e16f982 2550}
e92d3ba8 2551
2552/* Output the assembler code for a C++ thunk function.
2553 THUNK_DECL is the declaration for the thunk function itself, FUNCTION
2554 is the decl for the target function. DELTA is an immediate constant
2555 offset to be added to the THIS parameter. If VCALL_OFFSET is nonzero
2556 the word at the adjusted address *(*THIS' + VCALL_OFFSET) should be
2557 additionally added to THIS. Finally jump to the entry point of
2558 FUNCTION. */
2559
2560static void
2561mn10300_asm_output_mi_thunk (FILE * file,
2562 tree thunk_fndecl ATTRIBUTE_UNUSED,
2563 HOST_WIDE_INT delta,
2564 HOST_WIDE_INT vcall_offset,
2565 tree function)
2566{
2567 const char * _this;
2568
2569 /* Get the register holding the THIS parameter. Handle the case
2570 where there is a hidden first argument for a returned structure. */
2571 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
2572 _this = reg_names [FIRST_ARGUMENT_REGNUM + 1];
2573 else
2574 _this = reg_names [FIRST_ARGUMENT_REGNUM];
2575
2576 fprintf (file, "\t%s Thunk Entry Point:\n", ASM_COMMENT_START);
2577
2578 if (delta)
2579 fprintf (file, "\tadd %d, %s\n", (int) delta, _this);
2580
2581 if (vcall_offset)
2582 {
2583 const char * scratch = reg_names [FIRST_ADDRESS_REGNUM + 1];
2584
2585 fprintf (file, "\tmov %s, %s\n", _this, scratch);
2586 fprintf (file, "\tmov (%s), %s\n", scratch, scratch);
2587 fprintf (file, "\tadd %d, %s\n", (int) vcall_offset, scratch);
2588 fprintf (file, "\tmov (%s), %s\n", scratch, scratch);
2589 fprintf (file, "\tadd %s, %s\n", scratch, _this);
2590 }
2591
2592 fputs ("\tjmp ", file);
2593 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
2594 putc ('\n', file);
2595}
2596
2597/* Return true if mn10300_output_mi_thunk would be able to output the
2598 assembler code for the thunk function specified by the arguments
2599 it is passed, and false otherwise. */
2600
2601static bool
2602mn10300_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
2603 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
2604 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
2605 const_tree function ATTRIBUTE_UNUSED)
2606{
2607 return true;
2608}
5574dbdd 2609
2610bool
2611mn10300_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
2612{
2613 if (REGNO_REG_CLASS (regno) == FP_REGS
2614 || REGNO_REG_CLASS (regno) == FP_ACC_REGS)
2615 /* Do not store integer values in FP registers. */
2616 return GET_MODE_CLASS (mode) == MODE_FLOAT && ((regno & 1) == 0);
2617
2618 if (((regno) & 1) == 0 || GET_MODE_SIZE (mode) == 4)
2619 return true;
2620
2621 if (REGNO_REG_CLASS (regno) == DATA_REGS
2622 || (TARGET_AM33 && REGNO_REG_CLASS (regno) == ADDRESS_REGS)
2623 || REGNO_REG_CLASS (regno) == EXTENDED_REGS)
2624 return GET_MODE_SIZE (mode) <= 4;
2625
2626 return false;
2627}
2628
2629bool
2630mn10300_modes_tieable (enum machine_mode mode1, enum machine_mode mode2)
2631{
2632 if (GET_MODE_CLASS (mode1) == MODE_FLOAT
2633 && GET_MODE_CLASS (mode2) != MODE_FLOAT)
2634 return false;
2635
2636 if (GET_MODE_CLASS (mode2) == MODE_FLOAT
2637 && GET_MODE_CLASS (mode1) != MODE_FLOAT)
2638 return false;
2639
2640 if (TARGET_AM33
2641 || mode1 == mode2
2642 || (GET_MODE_SIZE (mode1) <= 4 && GET_MODE_SIZE (mode2) <= 4))
2643 return true;
2644
2645 return false;
2646}
2647
990679af 2648static int
2649cc_flags_for_mode (enum machine_mode mode)
2650{
2651 switch (mode)
2652 {
2653 case CCmode:
2654 return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_C | CC_FLAG_V;
2655 case CCZNCmode:
2656 return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_C;
2657 case CCZNmode:
2658 return CC_FLAG_Z | CC_FLAG_N;
2659 case CC_FLOATmode:
2660 return -1;
2661 default:
2662 gcc_unreachable ();
2663 }
2664}
2665
2666static int
2667cc_flags_for_code (enum rtx_code code)
2668{
2669 switch (code)
2670 {
2671 case EQ: /* Z */
2672 case NE: /* ~Z */
2673 return CC_FLAG_Z;
2674
2675 case LT: /* N */
2676 case GE: /* ~N */
2677 return CC_FLAG_N;
2678 break;
2679
2680 case GT: /* ~(Z|(N^V)) */
2681 case LE: /* Z|(N^V) */
2682 return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_V;
2683
2684 case GEU: /* ~C */
2685 case LTU: /* C */
2686 return CC_FLAG_C;
2687
2688 case GTU: /* ~(C | Z) */
2689 case LEU: /* C | Z */
2690 return CC_FLAG_Z | CC_FLAG_C;
2691
2692 case ORDERED:
2693 case UNORDERED:
2694 case LTGT:
2695 case UNEQ:
2696 case UNGE:
2697 case UNGT:
2698 case UNLE:
2699 case UNLT:
2700 return -1;
2701
2702 default:
2703 gcc_unreachable ();
2704 }
2705}
2706
5574dbdd 2707enum machine_mode
990679af 2708mn10300_select_cc_mode (enum rtx_code code, rtx x, rtx y ATTRIBUTE_UNUSED)
5574dbdd 2709{
990679af 2710 int req;
2711
2712 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2713 return CC_FLOATmode;
2714
2715 req = cc_flags_for_code (code);
2716
2717 if (req & CC_FLAG_V)
2718 return CCmode;
2719 if (req & CC_FLAG_C)
2720 return CCZNCmode;
2721 return CCZNmode;
5574dbdd 2722}
4879b320 2723
2724static inline bool
2725is_load_insn (rtx insn)
2726{
2727 if (GET_CODE (PATTERN (insn)) != SET)
2728 return false;
2729
2730 return MEM_P (SET_SRC (PATTERN (insn)));
2731}
2732
2733static inline bool
2734is_store_insn (rtx insn)
2735{
2736 if (GET_CODE (PATTERN (insn)) != SET)
2737 return false;
2738
2739 return MEM_P (SET_DEST (PATTERN (insn)));
2740}
2741
2742/* Update scheduling costs for situations that cannot be
2743 described using the attributes and DFA machinery.
2744 DEP is the insn being scheduled.
2745 INSN is the previous insn.
2746 COST is the current cycle cost for DEP. */
2747
2748static int
2749mn10300_adjust_sched_cost (rtx insn, rtx link, rtx dep, int cost)
2750{
2751 int timings = get_attr_timings (insn);
2752
2753 if (!TARGET_AM33)
2754 return 1;
2755
2756 if (GET_CODE (insn) == PARALLEL)
2757 insn = XVECEXP (insn, 0, 0);
2758
2759 if (GET_CODE (dep) == PARALLEL)
2760 dep = XVECEXP (dep, 0, 0);
2761
2762 /* For the AM34 a load instruction that follows a
2763 store instruction incurs an extra cycle of delay. */
2764 if (mn10300_tune_cpu == PROCESSOR_AM34
2765 && is_load_insn (dep)
2766 && is_store_insn (insn))
2767 cost += 1;
2768
2769 /* For the AM34 a non-store, non-branch FPU insn that follows
2770 another FPU insn incurs a one cycle throughput increase. */
2771 else if (mn10300_tune_cpu == PROCESSOR_AM34
2772 && ! is_store_insn (insn)
2773 && ! JUMP_P (insn)
2774 && GET_CODE (PATTERN (dep)) == SET
2775 && GET_CODE (PATTERN (insn)) == SET
2776 && GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (dep)))) == MODE_FLOAT
2777 && GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (insn)))) == MODE_FLOAT)
2778 cost += 1;
2779
2780 /* Resolve the conflict described in section 1-7-4 of
2781 Chapter 3 of the MN103E Series Instruction Manual
2782 where it says:
2783
9d75589a 2784 "When the preceding instruction is a CPU load or
4879b320 2785 store instruction, a following FPU instruction
2786 cannot be executed until the CPU completes the
2787 latency period even though there are no register
2788 or flag dependencies between them." */
2789
2790 /* Only the AM33-2 (and later) CPUs have FPU instructions. */
2791 if (! TARGET_AM33_2)
2792 return cost;
2793
2794 /* If a data dependence already exists then the cost is correct. */
2795 if (REG_NOTE_KIND (link) == 0)
2796 return cost;
2797
2798 /* Check that the instruction about to scheduled is an FPU instruction. */
2799 if (GET_CODE (PATTERN (dep)) != SET)
2800 return cost;
2801
2802 if (GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (dep)))) != MODE_FLOAT)
2803 return cost;
2804
2805 /* Now check to see if the previous instruction is a load or store. */
2806 if (! is_load_insn (insn) && ! is_store_insn (insn))
2807 return cost;
2808
2809 /* XXX: Verify: The text of 1-7-4 implies that the restriction
9d75589a 2810 only applies when an INTEGER load/store precedes an FPU
4879b320 2811 instruction, but is this true ? For now we assume that it is. */
2812 if (GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (insn)))) != MODE_INT)
2813 return cost;
2814
2815 /* Extract the latency value from the timings attribute. */
2816 return timings < 100 ? (timings % 10) : (timings % 100);
2817}
b2d7ede1 2818
2819static void
2820mn10300_conditional_register_usage (void)
2821{
2822 unsigned int i;
2823
2824 if (!TARGET_AM33)
2825 {
2826 for (i = FIRST_EXTENDED_REGNUM;
2827 i <= LAST_EXTENDED_REGNUM; i++)
2828 fixed_regs[i] = call_used_regs[i] = 1;
2829 }
2830 if (!TARGET_AM33_2)
2831 {
2832 for (i = FIRST_FP_REGNUM;
2833 i <= LAST_FP_REGNUM; i++)
2834 fixed_regs[i] = call_used_regs[i] = 1;
2835 }
2836 if (flag_pic)
2837 fixed_regs[PIC_OFFSET_TABLE_REGNUM] =
2838 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
2839}
7de3ada8 2840
2841/* Worker function for TARGET_MD_ASM_CLOBBERS.
2842 We do this in the mn10300 backend to maintain source compatibility
2843 with the old cc0-based compiler. */
2844
2845static tree
2846mn10300_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
2847 tree inputs ATTRIBUTE_UNUSED,
2848 tree clobbers)
2849{
2850 clobbers = tree_cons (NULL_TREE, build_string (5, "EPSW"),
2851 clobbers);
2852 return clobbers;
2853}
5574dbdd 2854\f
990679af 2855/* A helper function for splitting cbranch patterns after reload. */
2856
2857void
2858mn10300_split_cbranch (enum machine_mode cmp_mode, rtx cmp_op, rtx label_ref)
2859{
2860 rtx flags, x;
2861
2862 flags = gen_rtx_REG (cmp_mode, CC_REG);
2863 x = gen_rtx_COMPARE (cmp_mode, XEXP (cmp_op, 0), XEXP (cmp_op, 1));
2864 x = gen_rtx_SET (VOIDmode, flags, x);
2865 emit_insn (x);
2866
2867 x = gen_rtx_fmt_ee (GET_CODE (cmp_op), VOIDmode, flags, const0_rtx);
2868 x = gen_rtx_IF_THEN_ELSE (VOIDmode, x, label_ref, pc_rtx);
2869 x = gen_rtx_SET (VOIDmode, pc_rtx, x);
2870 emit_jump_insn (x);
2871}
2872
2873/* A helper function for matching parallels that set the flags. */
2874
2875bool
2876mn10300_match_ccmode (rtx insn, enum machine_mode cc_mode)
2877{
2878 rtx op1, flags;
2879 enum machine_mode flags_mode;
2880
2881 gcc_checking_assert (XVECLEN (PATTERN (insn), 0) == 2);
2882
2883 op1 = XVECEXP (PATTERN (insn), 0, 1);
2884 gcc_checking_assert (GET_CODE (SET_SRC (op1)) == COMPARE);
2885
2886 flags = SET_DEST (op1);
2887 flags_mode = GET_MODE (flags);
2888
2889 if (GET_MODE (SET_SRC (op1)) != flags_mode)
2890 return false;
2891 if (GET_MODE_CLASS (flags_mode) != MODE_CC)
2892 return false;
2893
2894 /* Ensure that the mode of FLAGS is compatible with CC_MODE. */
2895 if (cc_flags_for_mode (flags_mode) & ~cc_flags_for_mode (cc_mode))
2896 return false;
2897
2898 return true;
2899}
2900
35c2a6c6 2901/* This function is used to help split:
2902
2903 (set (reg) (and (reg) (int)))
2904
2905 into:
2906
2907 (set (reg) (shift (reg) (int))
2908 (set (reg) (shift (reg) (int))
2909
2910 where the shitfs will be shorter than the "and" insn.
2911
2912 It returns the number of bits that should be shifted. A positive
2913 values means that the low bits are to be cleared (and hence the
2914 shifts should be right followed by left) whereas a negative value
2915 means that the high bits are to be cleared (left followed by right).
2916 Zero is returned when it would not be economical to split the AND. */
2917
990679af 2918int
2919mn10300_split_and_operand_count (rtx op)
2920{
2921 HOST_WIDE_INT val = INTVAL (op);
2922 int count;
2923
2924 if (val < 0)
2925 {
2926 /* High bit is set, look for bits clear at the bottom. */
2927 count = exact_log2 (-val);
2928 if (count < 0)
2929 return 0;
2930 /* This is only size win if we can use the asl2 insn. Otherwise we
2931 would be replacing 1 6-byte insn with 2 3-byte insns. */
2932 if (count > (optimize_insn_for_speed_p () ? 2 : 4))
2933 return 0;
35c2a6c6 2934 return count;
990679af 2935 }
2936 else
2937 {
2938 /* High bit is clear, look for bits set at the bottom. */
2939 count = exact_log2 (val + 1);
2940 count = 32 - count;
2941 /* Again, this is only a size win with asl2. */
2942 if (count > (optimize_insn_for_speed_p () ? 2 : 4))
2943 return 0;
2944 return -count;
2945 }
2946}
2947\f
e7076c21 2948struct liw_data
2949{
2950 enum attr_liw slot;
2951 enum attr_liw_op op;
2952 rtx dest;
2953 rtx src;
2954};
2955
2956/* Decide if the given insn is a candidate for LIW bundling. If it is then
2957 extract the operands and LIW attributes from the insn and use them to fill
2958 in the liw_data structure. Return true upon success or false if the insn
2959 cannot be bundled. */
f9e46c25 2960
2961static bool
e7076c21 2962extract_bundle (rtx insn, struct liw_data * pdata)
f9e46c25 2963{
e7076c21 2964 bool allow_consts = true;
81705807 2965 rtx p;
f9e46c25 2966
e7076c21 2967 gcc_assert (pdata != NULL);
2968
2969 if (insn == NULL_RTX)
2970 return false;
2971 /* Make sure that we are dealing with a simple SET insn. */
f9e46c25 2972 p = single_set (insn);
e7076c21 2973 if (p == NULL_RTX)
2974 return false;
2975
2976 /* Make sure that it could go into one of the LIW pipelines. */
2977 pdata->slot = get_attr_liw (insn);
2978 if (pdata->slot == LIW_BOTH)
2979 return false;
2980
2981 pdata->op = get_attr_liw_op (insn);
2982
e7076c21 2983 switch (pdata->op)
f9e46c25 2984 {
2985 case LIW_OP_MOV:
e7076c21 2986 pdata->dest = SET_DEST (p);
2987 pdata->src = SET_SRC (p);
f9e46c25 2988 break;
2989 case LIW_OP_CMP:
e7076c21 2990 pdata->dest = XEXP (SET_SRC (p), 0);
2991 pdata->src = XEXP (SET_SRC (p), 1);
f9e46c25 2992 break;
2993 case LIW_OP_NONE:
2994 return false;
e7076c21 2995 case LIW_OP_AND:
2996 case LIW_OP_OR:
2997 case LIW_OP_XOR:
2998 /* The AND, OR and XOR long instruction words only accept register arguments. */
2999 allow_consts = false;
3000 /* Fall through. */
f9e46c25 3001 default:
e7076c21 3002 pdata->dest = SET_DEST (p);
3003 pdata->src = XEXP (SET_SRC (p), 1);
f9e46c25 3004 break;
3005 }
3006
e7076c21 3007 if (! REG_P (pdata->dest))
3008 return false;
3009
3010 if (REG_P (pdata->src))
3011 return true;
3012
3013 return allow_consts && satisfies_constraint_O (pdata->src);
f9e46c25 3014}
3015
e7076c21 3016/* Make sure that it is OK to execute LIW1 and LIW2 in parallel. GCC generated
3017 the instructions with the assumption that LIW1 would be executed before LIW2
3018 so we must check for overlaps between their sources and destinations. */
f9e46c25 3019
3020static bool
e7076c21 3021check_liw_constraints (struct liw_data * pliw1, struct liw_data * pliw2)
3022{
3023 /* Check for slot conflicts. */
3024 if (pliw2->slot == pliw1->slot && pliw1->slot != LIW_EITHER)
f9e46c25 3025 return false;
3026
e7076c21 3027 /* If either operation is a compare, then "dest" is really an input; the real
3028 destination is CC_REG. So these instructions need different checks. */
3029
3030 /* Changing "CMP ; OP" into "CMP | OP" is OK because the comparison will
3031 check its values prior to any changes made by OP. */
3032 if (pliw1->op == LIW_OP_CMP)
3033 {
3034 /* Two sequential comparisons means dead code, which ought to
3035 have been eliminated given that bundling only happens with
3036 optimization. We cannot bundle them in any case. */
3037 gcc_assert (pliw1->op != pliw2->op);
3038 return true;
3039 }
f9e46c25 3040
e7076c21 3041 /* Changing "OP ; CMP" into "OP | CMP" does not work if the value being compared
3042 is the destination of OP, as the CMP will look at the old value, not the new
3043 one. */
3044 if (pliw2->op == LIW_OP_CMP)
f9e46c25 3045 {
e7076c21 3046 if (REGNO (pliw2->dest) == REGNO (pliw1->dest))
3047 return false;
3048
3049 if (REG_P (pliw2->src))
3050 return REGNO (pliw2->src) != REGNO (pliw1->dest);
3051
3052 return true;
3053 }
3054
3055 /* Changing "OP1 ; OP2" into "OP1 | OP2" does not work if they both write to the
3056 same destination register. */
3057 if (REGNO (pliw2->dest) == REGNO (pliw1->dest))
3058 return false;
3059
3060 /* Changing "OP1 ; OP2" into "OP1 | OP2" generally does not work if the destination
3061 of OP1 is the source of OP2. The exception is when OP1 is a MOVE instruction when
3062 we can replace the source in OP2 with the source of OP1. */
3063 if (REG_P (pliw2->src) && REGNO (pliw2->src) == REGNO (pliw1->dest))
3064 {
3065 if (pliw1->op == LIW_OP_MOV && REG_P (pliw1->src))
f9e46c25 3066 {
e7076c21 3067 if (! REG_P (pliw1->src)
3068 && (pliw2->op == LIW_OP_AND
3069 || pliw2->op == LIW_OP_OR
3070 || pliw2->op == LIW_OP_XOR))
3071 return false;
3072
3073 pliw2->src = pliw1->src;
f9e46c25 3074 return true;
3075 }
3076 return false;
3077 }
3078
e7076c21 3079 /* Everything else is OK. */
f9e46c25 3080 return true;
3081}
3082
f9e46c25 3083/* Combine pairs of insns into LIW bundles. */
3084
3085static void
3086mn10300_bundle_liw (void)
3087{
3088 rtx r;
3089
3090 for (r = get_insns (); r != NULL_RTX; r = next_nonnote_nondebug_insn (r))
3091 {
e7076c21 3092 rtx insn1, insn2;
3093 struct liw_data liw1, liw2;
f9e46c25 3094
3095 insn1 = r;
e7076c21 3096 if (! extract_bundle (insn1, & liw1))
f9e46c25 3097 continue;
3098
3099 insn2 = next_nonnote_nondebug_insn (insn1);
e7076c21 3100 if (! extract_bundle (insn2, & liw2))
f9e46c25 3101 continue;
3102
e7076c21 3103 /* Check for source/destination overlap. */
3104 if (! check_liw_constraints (& liw1, & liw2))
f9e46c25 3105 continue;
3106
e7076c21 3107 if (liw1.slot == LIW_OP2 || liw2.slot == LIW_OP1)
f9e46c25 3108 {
e7076c21 3109 struct liw_data temp;
3110
3111 temp = liw1;
f9e46c25 3112 liw1 = liw2;
e7076c21 3113 liw2 = temp;
f9e46c25 3114 }
3115
f9e46c25 3116 delete_insn (insn2);
3117
e7076c21 3118 if (liw1.op == LIW_OP_CMP)
3119 insn2 = gen_cmp_liw (liw2.dest, liw2.src, liw1.dest, liw1.src,
3120 GEN_INT (liw2.op));
3121 else if (liw2.op == LIW_OP_CMP)
3122 insn2 = gen_liw_cmp (liw1.dest, liw1.src, liw2.dest, liw2.src,
3123 GEN_INT (liw1.op));
f9e46c25 3124 else
e7076c21 3125 insn2 = gen_liw (liw1.dest, liw2.dest, liw1.src, liw2.src,
3126 GEN_INT (liw1.op), GEN_INT (liw2.op));
f9e46c25 3127
3128 insn2 = emit_insn_after (insn2, insn1);
3129 delete_insn (insn1);
3130 r = insn2;
3131 }
3132}
3133
f9b3e8f5 3134#define DUMP(reason, insn) \
3135 do \
3136 { \
3137 if (dump_file) \
3138 { \
3139 fprintf (dump_file, reason "\n"); \
3140 if (insn != NULL_RTX) \
3141 print_rtl_single (dump_file, insn); \
3142 fprintf(dump_file, "\n"); \
3143 } \
3144 } \
3145 while (0)
3146
3147/* Replace the BRANCH insn with a Lcc insn that goes to LABEL.
3148 Insert a SETLB insn just before LABEL. */
3149
3150static void
3151mn10300_insert_setlb_lcc (rtx label, rtx branch)
3152{
3153 rtx lcc, comparison, cmp_reg;
3154
3155 if (LABEL_NUSES (label) > 1)
3156 {
3157 rtx insn;
3158
3159 /* This label is used both as an entry point to the loop
3160 and as a loop-back point for the loop. We need to separate
3161 these two functions so that the SETLB happens upon entry,
3162 but the loop-back does not go to the SETLB instruction. */
3163 DUMP ("Inserting SETLB insn after:", label);
3164 insn = emit_insn_after (gen_setlb (), label);
3165 label = gen_label_rtx ();
3166 emit_label_after (label, insn);
3167 DUMP ("Created new loop-back label:", label);
3168 }
3169 else
3170 {
3171 DUMP ("Inserting SETLB insn before:", label);
3172 emit_insn_before (gen_setlb (), label);
3173 }
3174
3175 comparison = XEXP (SET_SRC (PATTERN (branch)), 0);
3176 cmp_reg = XEXP (comparison, 0);
3177 gcc_assert (REG_P (cmp_reg));
3178
3179 /* If the comparison has not already been split out of the branch
3180 then do so now. */
3181 gcc_assert (REGNO (cmp_reg) == CC_REG);
3182
3183 if (GET_MODE (cmp_reg) == CC_FLOATmode)
3184 lcc = gen_FLcc (comparison, label);
3185 else
3186 lcc = gen_Lcc (comparison, label);
3187
3188 lcc = emit_jump_insn_before (lcc, branch);
3189 mark_jump_label (XVECEXP (PATTERN (lcc), 0, 0), lcc, 0);
bd2b2481 3190 JUMP_LABEL (lcc) = label;
f9b3e8f5 3191 DUMP ("Replacing branch insn...", branch);
3192 DUMP ("... with Lcc insn:", lcc);
3193 delete_insn (branch);
3194}
3195
3196static bool
161dfa6e 3197mn10300_block_contains_call (basic_block block)
f9b3e8f5 3198{
3199 rtx insn;
3200
3201 FOR_BB_INSNS (block, insn)
3202 if (CALL_P (insn))
3203 return true;
3204
3205 return false;
3206}
3207
3208static bool
3209mn10300_loop_contains_call_insn (loop_p loop)
3210{
3211 basic_block * bbs;
3212 bool result = false;
3213 unsigned int i;
3214
3215 bbs = get_loop_body (loop);
3216
3217 for (i = 0; i < loop->num_nodes; i++)
3218 if (mn10300_block_contains_call (bbs[i]))
3219 {
3220 result = true;
3221 break;
3222 }
3223
3224 free (bbs);
3225 return result;
3226}
3227
3228static void
3229mn10300_scan_for_setlb_lcc (void)
3230{
f9b3e8f5 3231 loop_p loop;
3232
3233 DUMP ("Looking for loops that can use the SETLB insn", NULL_RTX);
3234
3235 df_analyze ();
3236 compute_bb_for_insn ();
3237
3238 /* Find the loops. */
319f4d7d 3239 loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
f9b3e8f5 3240
3241 /* FIXME: For now we only investigate innermost loops. In practice however
3242 if an inner loop is not suitable for use with the SETLB/Lcc insns, it may
3243 be the case that its parent loop is suitable. Thus we should check all
3244 loops, but work from the innermost outwards. */
f21d4d00 3245 FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
f9b3e8f5 3246 {
3247 const char * reason = NULL;
3248
3249 /* Check to see if we can modify this loop. If we cannot
3250 then set 'reason' to describe why it could not be done. */
3251 if (loop->latch == NULL)
3252 reason = "it contains multiple latches";
3253 else if (loop->header != loop->latch)
3254 /* FIXME: We could handle loops that span multiple blocks,
3255 but this requires a lot more work tracking down the branches
3256 that need altering, so for now keep things simple. */
3257 reason = "the loop spans multiple blocks";
3258 else if (mn10300_loop_contains_call_insn (loop))
3259 reason = "it contains CALL insns";
3260 else
3261 {
3262 rtx branch = BB_END (loop->latch);
3263
3264 gcc_assert (JUMP_P (branch));
3265 if (single_set (branch) == NULL_RTX || ! any_condjump_p (branch))
3266 /* We cannot optimize tablejumps and the like. */
3267 /* FIXME: We could handle unconditional jumps. */
3268 reason = "it is not a simple loop";
3269 else
3270 {
3271 rtx label;
3272
3273 if (dump_file)
3274 flow_loop_dump (loop, dump_file, NULL, 0);
3275
3276 label = BB_HEAD (loop->header);
3277 gcc_assert (LABEL_P (label));
3278
3279 mn10300_insert_setlb_lcc (label, branch);
3280 }
3281 }
3282
3283 if (dump_file && reason != NULL)
3284 fprintf (dump_file, "Loop starting with insn %d is not suitable because %s\n",
3285 INSN_UID (BB_HEAD (loop->header)),
3286 reason);
3287 }
3288
319f4d7d 3289 loop_optimizer_finalize ();
f9b3e8f5 3290
3291 df_finish_pass (false);
3292
3293 DUMP ("SETLB scan complete", NULL_RTX);
3294}
3295
f9e46c25 3296static void
3297mn10300_reorg (void)
3298{
f9b3e8f5 3299 /* These are optimizations, so only run them if optimizing. */
3300 if (TARGET_AM33 && (optimize > 0 || optimize_size))
f9e46c25 3301 {
f9b3e8f5 3302 if (TARGET_ALLOW_SETLB)
3303 mn10300_scan_for_setlb_lcc ();
3304
f9e46c25 3305 if (TARGET_ALLOW_LIW)
3306 mn10300_bundle_liw ();
3307 }
3308}
3309\f
3626e955 3310/* Initialize the GCC target structure. */
3311
f9e46c25 3312#undef TARGET_MACHINE_DEPENDENT_REORG
3313#define TARGET_MACHINE_DEPENDENT_REORG mn10300_reorg
3314
3626e955 3315#undef TARGET_ASM_ALIGNED_HI_OP
3316#define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
3317
3318#undef TARGET_LEGITIMIZE_ADDRESS
3319#define TARGET_LEGITIMIZE_ADDRESS mn10300_legitimize_address
3320
28f32607 3321#undef TARGET_ADDRESS_COST
3322#define TARGET_ADDRESS_COST mn10300_address_cost
3323#undef TARGET_REGISTER_MOVE_COST
3324#define TARGET_REGISTER_MOVE_COST mn10300_register_move_cost
3325#undef TARGET_MEMORY_MOVE_COST
3326#define TARGET_MEMORY_MOVE_COST mn10300_memory_move_cost
3626e955 3327#undef TARGET_RTX_COSTS
3328#define TARGET_RTX_COSTS mn10300_rtx_costs
3626e955 3329
3330#undef TARGET_ASM_FILE_START
3331#define TARGET_ASM_FILE_START mn10300_file_start
3332#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
3333#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
3334
22680c28 3335#undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
3336#define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA mn10300_asm_output_addr_const_extra
3337
3626e955 3338#undef TARGET_OPTION_OVERRIDE
3339#define TARGET_OPTION_OVERRIDE mn10300_option_override
3340
3341#undef TARGET_ENCODE_SECTION_INFO
3342#define TARGET_ENCODE_SECTION_INFO mn10300_encode_section_info
3343
3344#undef TARGET_PROMOTE_PROTOTYPES
3345#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
3346#undef TARGET_RETURN_IN_MEMORY
3347#define TARGET_RETURN_IN_MEMORY mn10300_return_in_memory
3348#undef TARGET_PASS_BY_REFERENCE
3349#define TARGET_PASS_BY_REFERENCE mn10300_pass_by_reference
3350#undef TARGET_CALLEE_COPIES
3351#define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
3352#undef TARGET_ARG_PARTIAL_BYTES
3353#define TARGET_ARG_PARTIAL_BYTES mn10300_arg_partial_bytes
dc67179a 3354#undef TARGET_FUNCTION_ARG
3355#define TARGET_FUNCTION_ARG mn10300_function_arg
3356#undef TARGET_FUNCTION_ARG_ADVANCE
3357#define TARGET_FUNCTION_ARG_ADVANCE mn10300_function_arg_advance
3626e955 3358
3359#undef TARGET_EXPAND_BUILTIN_SAVEREGS
3360#define TARGET_EXPAND_BUILTIN_SAVEREGS mn10300_builtin_saveregs
3361#undef TARGET_EXPAND_BUILTIN_VA_START
3362#define TARGET_EXPAND_BUILTIN_VA_START mn10300_va_start
3363
3364#undef TARGET_CASE_VALUES_THRESHOLD
3365#define TARGET_CASE_VALUES_THRESHOLD mn10300_case_values_threshold
3366
3367#undef TARGET_LEGITIMATE_ADDRESS_P
3368#define TARGET_LEGITIMATE_ADDRESS_P mn10300_legitimate_address_p
4c6c308e 3369#undef TARGET_DELEGITIMIZE_ADDRESS
3370#define TARGET_DELEGITIMIZE_ADDRESS mn10300_delegitimize_address
ca316360 3371#undef TARGET_LEGITIMATE_CONSTANT_P
3372#define TARGET_LEGITIMATE_CONSTANT_P mn10300_legitimate_constant_p
3626e955 3373
029ca87f 3374#undef TARGET_PREFERRED_RELOAD_CLASS
3375#define TARGET_PREFERRED_RELOAD_CLASS mn10300_preferred_reload_class
3376#undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
c78ac668 3377#define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS \
3378 mn10300_preferred_output_reload_class
3379#undef TARGET_SECONDARY_RELOAD
3380#define TARGET_SECONDARY_RELOAD mn10300_secondary_reload
029ca87f 3381
3626e955 3382#undef TARGET_TRAMPOLINE_INIT
3383#define TARGET_TRAMPOLINE_INIT mn10300_trampoline_init
3384
3385#undef TARGET_FUNCTION_VALUE
3386#define TARGET_FUNCTION_VALUE mn10300_function_value
3387#undef TARGET_LIBCALL_VALUE
3388#define TARGET_LIBCALL_VALUE mn10300_libcall_value
3389
3390#undef TARGET_ASM_OUTPUT_MI_THUNK
3391#define TARGET_ASM_OUTPUT_MI_THUNK mn10300_asm_output_mi_thunk
3392#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
3393#define TARGET_ASM_CAN_OUTPUT_MI_THUNK mn10300_can_output_mi_thunk
3394
4879b320 3395#undef TARGET_SCHED_ADJUST_COST
3396#define TARGET_SCHED_ADJUST_COST mn10300_adjust_sched_cost
3397
b2d7ede1 3398#undef TARGET_CONDITIONAL_REGISTER_USAGE
3399#define TARGET_CONDITIONAL_REGISTER_USAGE mn10300_conditional_register_usage
3400
7de3ada8 3401#undef TARGET_MD_ASM_CLOBBERS
3402#define TARGET_MD_ASM_CLOBBERS mn10300_md_asm_clobbers
3403
08207c2f 3404#undef TARGET_FLAGS_REGNUM
3405#define TARGET_FLAGS_REGNUM CC_REG
3406
3626e955 3407struct gcc_target targetm = TARGET_INITIALIZER;