]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/mn10300/mn10300.c
* config/rs6000/sysv4.h (ASM_OUTPUT_REG_POP): Use addi instead
[thirdparty/gcc.git] / gcc / config / mn10300 / mn10300.c
CommitLineData
29a404f9 1/* Subroutines for insn-output.c for Matsushita MN10300 series
3aea1f79 2 Copyright (C) 1996-2014 Free Software Foundation, Inc.
29a404f9 3 Contributed by Jeff Law (law@cygnus.com).
4
3626e955 5 This file is part of GCC.
29a404f9 6
3626e955 7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
29a404f9 11
3626e955 12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
29a404f9 16
3626e955 17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
29a404f9 20
29a404f9 21#include "config.h"
7014838c 22#include "system.h"
805e22b2 23#include "coretypes.h"
24#include "tm.h"
29a404f9 25#include "rtl.h"
4faf81b8 26#include "tree.h"
9ed99284 27#include "stor-layout.h"
28#include "varasm.h"
29#include "calls.h"
29a404f9 30#include "regs.h"
31#include "hard-reg-set.h"
29a404f9 32#include "insn-config.h"
33#include "conditions.h"
29a404f9 34#include "output.h"
35#include "insn-attr.h"
36#include "flags.h"
37#include "recog.h"
8b8be022 38#include "reload.h"
29a404f9 39#include "expr.h"
34517c64 40#include "insn-codes.h"
d8fc4d0b 41#include "optabs.h"
a3020f2f 42#include "hashtab.h"
43#include "hash-set.h"
44#include "vec.h"
45#include "machmode.h"
46#include "input.h"
4faf81b8 47#include "function.h"
29a404f9 48#include "obstack.h"
0b205f4c 49#include "diagnostic-core.h"
59086782 50#include "tm_p.h"
e7076c21 51#include "tm-constrs.h"
a767736d 52#include "target.h"
53#include "target-def.h"
94ea8568 54#include "dominance.h"
55#include "cfg.h"
56#include "cfgrtl.h"
57#include "cfganal.h"
58#include "lcm.h"
59#include "cfgbuild.h"
60#include "cfgcleanup.h"
61#include "predict.h"
62#include "basic-block.h"
5574dbdd 63#include "df.h"
fba5dd52 64#include "opts.h"
f9b3e8f5 65#include "cfgloop.h"
b9ed1410 66#include "dumpfile.h"
f7715905 67#include "builtins.h"
29a404f9 68
1acdfc69 69/* This is used in the am33_2.0-linux-gnu port, in which global symbol
70 names are not prefixed by underscores, to tell whether to prefix a
71 label with a plus sign or not, so that the assembler can tell
72 symbol names from register names. */
73int mn10300_protect_label;
74
4879b320 75/* Selected processor type for tuning. */
76enum processor_type mn10300_tune_cpu = PROCESSOR_DEFAULT;
77
990679af 78#define CC_FLAG_Z 1
79#define CC_FLAG_N 2
80#define CC_FLAG_C 4
81#define CC_FLAG_V 8
82
3754d046 83static int cc_flags_for_mode(machine_mode);
990679af 84static int cc_flags_for_code(enum rtx_code);
a767736d 85\f
4c834714 86/* Implement TARGET_OPTION_OVERRIDE. */
4c834714 87static void
88mn10300_option_override (void)
8c2c40c5 89{
90 if (TARGET_AM33)
91 target_flags &= ~MASK_MULT_BUG;
4879b320 92 else
93 {
94 /* Disable scheduling for the MN10300 as we do
95 not have timing information available for it. */
96 flag_schedule_insns = 0;
97 flag_schedule_insns_after_reload = 0;
731049b6 98
99 /* Force enable splitting of wide types, as otherwise it is trivial
100 to run out of registers. Indeed, this works so well that register
101 allocation problems are now more common *without* optimization,
102 when this flag is not enabled by default. */
103 flag_split_wide_types = 1;
4879b320 104 }
990679af 105
4879b320 106 if (mn10300_tune_string)
107 {
108 if (strcasecmp (mn10300_tune_string, "mn10300") == 0)
109 mn10300_tune_cpu = PROCESSOR_MN10300;
110 else if (strcasecmp (mn10300_tune_string, "am33") == 0)
111 mn10300_tune_cpu = PROCESSOR_AM33;
112 else if (strcasecmp (mn10300_tune_string, "am33-2") == 0)
113 mn10300_tune_cpu = PROCESSOR_AM33_2;
114 else if (strcasecmp (mn10300_tune_string, "am34") == 0)
115 mn10300_tune_cpu = PROCESSOR_AM34;
116 else
117 error ("-mtune= expects mn10300, am33, am33-2, or am34");
118 }
8c2c40c5 119}
120
92c473b8 121static void
3285410a 122mn10300_file_start (void)
29a404f9 123{
92c473b8 124 default_file_start ();
911517ac 125
b166356e 126 if (TARGET_AM33_2)
127 fprintf (asm_out_file, "\t.am33_2\n");
128 else if (TARGET_AM33)
92c473b8 129 fprintf (asm_out_file, "\t.am33\n");
29a404f9 130}
131\f
f9e46c25 132/* Note: This list must match the liw_op attribute in mn10300.md. */
133
134static const char *liw_op_names[] =
135{
136 "add", "cmp", "sub", "mov",
137 "and", "or", "xor",
138 "asr", "lsr", "asl",
139 "none", "max"
140};
141
29a404f9 142/* Print operand X using operand code CODE to assembly language output file
143 FILE. */
144
145void
3626e955 146mn10300_print_operand (FILE *file, rtx x, int code)
29a404f9 147{
148 switch (code)
149 {
f9e46c25 150 case 'W':
151 {
152 unsigned int liw_op = UINTVAL (x);
990679af 153
f9e46c25 154 gcc_assert (TARGET_ALLOW_LIW);
155 gcc_assert (liw_op < LIW_OP_MAX);
156 fputs (liw_op_names[liw_op], file);
29a404f9 157 break;
f9e46c25 158 }
990679af 159
f9e46c25 160 case 'b':
161 case 'B':
162 {
163 enum rtx_code cmp = GET_CODE (x);
3754d046 164 machine_mode mode = GET_MODE (XEXP (x, 0));
f9e46c25 165 const char *str;
166 int have_flags;
167
168 if (code == 'B')
169 cmp = reverse_condition (cmp);
170 have_flags = cc_flags_for_mode (mode);
fb16c776 171
f9e46c25 172 switch (cmp)
b166356e 173 {
f9e46c25 174 case NE:
175 str = "ne";
b166356e 176 break;
f9e46c25 177 case EQ:
178 str = "eq";
179 break;
180 case GE:
181 /* bge is smaller than bnc. */
182 str = (have_flags & CC_FLAG_V ? "ge" : "nc");
183 break;
184 case LT:
185 str = (have_flags & CC_FLAG_V ? "lt" : "ns");
186 break;
187 case GT:
188 str = "gt";
189 break;
190 case LE:
191 str = "le";
192 break;
193 case GEU:
194 str = "cc";
195 break;
196 case GTU:
197 str = "hi";
198 break;
199 case LEU:
200 str = "ls";
201 break;
202 case LTU:
203 str = "cs";
204 break;
205 case ORDERED:
206 str = "lge";
207 break;
208 case UNORDERED:
209 str = "uo";
210 break;
211 case LTGT:
212 str = "lg";
213 break;
214 case UNEQ:
215 str = "ue";
216 break;
217 case UNGE:
218 str = "uge";
219 break;
220 case UNGT:
221 str = "ug";
222 break;
223 case UNLE:
224 str = "ule";
225 break;
226 case UNLT:
227 str = "ul";
b166356e 228 break;
b166356e 229 default:
cf41bb03 230 gcc_unreachable ();
b166356e 231 }
f9e46c25 232
233 gcc_checking_assert ((cc_flags_for_code (cmp) & ~have_flags) == 0);
234 fputs (str, file);
235 }
236 break;
237
238 case 'C':
239 /* This is used for the operand to a call instruction;
240 if it's a REG, enclose it in parens, else output
241 the operand normally. */
242 if (REG_P (x))
243 {
244 fputc ('(', file);
245 mn10300_print_operand (file, x, 0);
246 fputc (')', file);
247 }
248 else
249 mn10300_print_operand (file, x, 0);
250 break;
251
252 case 'D':
253 switch (GET_CODE (x))
254 {
255 case MEM:
256 fputc ('(', file);
257 output_address (XEXP (x, 0));
258 fputc (')', file);
259 break;
260
261 case REG:
262 fprintf (file, "fd%d", REGNO (x) - 18);
263 break;
264
265 default:
266 gcc_unreachable ();
267 }
268 break;
b166356e 269
6ce19398 270 /* These are the least significant word in a 64bit value. */
f9e46c25 271 case 'L':
272 switch (GET_CODE (x))
273 {
274 case MEM:
275 fputc ('(', file);
276 output_address (XEXP (x, 0));
277 fputc (')', file);
278 break;
6ce19398 279
f9e46c25 280 case REG:
281 fprintf (file, "%s", reg_names[REGNO (x)]);
282 break;
6ce19398 283
f9e46c25 284 case SUBREG:
285 fprintf (file, "%s", reg_names[subreg_regno (x)]);
286 break;
6ce19398 287
f9e46c25 288 case CONST_DOUBLE:
289 {
290 long val[2];
291 REAL_VALUE_TYPE rv;
6ce19398 292
f9e46c25 293 switch (GET_MODE (x))
294 {
295 case DFmode:
296 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
297 REAL_VALUE_TO_TARGET_DOUBLE (rv, val);
298 fprintf (file, "0x%lx", val[0]);
299 break;;
300 case SFmode:
301 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
302 REAL_VALUE_TO_TARGET_SINGLE (rv, val[0]);
303 fprintf (file, "0x%lx", val[0]);
304 break;;
305 case VOIDmode:
306 case DImode:
307 mn10300_print_operand_address (file,
308 GEN_INT (CONST_DOUBLE_LOW (x)));
309 break;
310 default:
6ce19398 311 break;
312 }
f9e46c25 313 break;
314 }
6ce19398 315
f9e46c25 316 case CONST_INT:
317 {
318 rtx low, high;
319 split_double (x, &low, &high);
320 fprintf (file, "%ld", (long)INTVAL (low));
321 break;
964d057c 322 }
6ce19398 323
f9e46c25 324 default:
325 gcc_unreachable ();
326 }
327 break;
6ce19398 328
329 /* Similarly, but for the most significant word. */
f9e46c25 330 case 'H':
331 switch (GET_CODE (x))
332 {
333 case MEM:
334 fputc ('(', file);
335 x = adjust_address (x, SImode, 4);
336 output_address (XEXP (x, 0));
337 fputc (')', file);
338 break;
6ce19398 339
f9e46c25 340 case REG:
341 fprintf (file, "%s", reg_names[REGNO (x) + 1]);
342 break;
6ce19398 343
f9e46c25 344 case SUBREG:
345 fprintf (file, "%s", reg_names[subreg_regno (x) + 1]);
346 break;
6ce19398 347
f9e46c25 348 case CONST_DOUBLE:
349 {
350 long val[2];
351 REAL_VALUE_TYPE rv;
6ce19398 352
f9e46c25 353 switch (GET_MODE (x))
354 {
355 case DFmode:
356 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
357 REAL_VALUE_TO_TARGET_DOUBLE (rv, val);
358 fprintf (file, "0x%lx", val[1]);
359 break;;
360 case SFmode:
361 gcc_unreachable ();
362 case VOIDmode:
363 case DImode:
364 mn10300_print_operand_address (file,
365 GEN_INT (CONST_DOUBLE_HIGH (x)));
366 break;
367 default:
6ce19398 368 break;
369 }
f9e46c25 370 break;
371 }
6ce19398 372
f9e46c25 373 case CONST_INT:
374 {
375 rtx low, high;
376 split_double (x, &low, &high);
377 fprintf (file, "%ld", (long)INTVAL (high));
378 break;
6ce19398 379 }
6ce19398 380
f9e46c25 381 default:
382 gcc_unreachable ();
383 }
384 break;
6ce19398 385
f9e46c25 386 case 'A':
387 fputc ('(', file);
388 if (REG_P (XEXP (x, 0)))
389 output_address (gen_rtx_PLUS (SImode, XEXP (x, 0), const0_rtx));
390 else
391 output_address (XEXP (x, 0));
392 fputc (')', file);
393 break;
058f71f0 394
f9e46c25 395 case 'N':
396 gcc_assert (INTVAL (x) >= -128 && INTVAL (x) <= 255);
397 fprintf (file, "%d", (int)((~INTVAL (x)) & 0xff));
398 break;
399
400 case 'U':
401 gcc_assert (INTVAL (x) >= -128 && INTVAL (x) <= 255);
402 fprintf (file, "%d", (int)(INTVAL (x) & 0xff));
403 break;
167fa942 404
63e678f2 405 /* For shift counts. The hardware ignores the upper bits of
406 any immediate, but the assembler will flag an out of range
407 shift count as an error. So we mask off the high bits
408 of the immediate here. */
f9e46c25 409 case 'S':
410 if (CONST_INT_P (x))
411 {
412 fprintf (file, "%d", (int)(INTVAL (x) & 0x1f));
413 break;
414 }
415 /* FALL THROUGH */
63e678f2 416
f9e46c25 417 default:
418 switch (GET_CODE (x))
419 {
420 case MEM:
421 fputc ('(', file);
422 output_address (XEXP (x, 0));
423 fputc (')', file);
424 break;
29a404f9 425
f9e46c25 426 case PLUS:
427 output_address (x);
428 break;
6ce19398 429
f9e46c25 430 case REG:
431 fprintf (file, "%s", reg_names[REGNO (x)]);
432 break;
29a404f9 433
f9e46c25 434 case SUBREG:
435 fprintf (file, "%s", reg_names[subreg_regno (x)]);
436 break;
29a404f9 437
6ce19398 438 /* This will only be single precision.... */
f9e46c25 439 case CONST_DOUBLE:
440 {
441 unsigned long val;
442 REAL_VALUE_TYPE rv;
6ce19398 443
f9e46c25 444 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
445 REAL_VALUE_TO_TARGET_SINGLE (rv, val);
446 fprintf (file, "0x%lx", val);
29a404f9 447 break;
29a404f9 448 }
f9e46c25 449
450 case CONST_INT:
451 case SYMBOL_REF:
452 case CONST:
453 case LABEL_REF:
454 case CODE_LABEL:
455 case UNSPEC:
456 mn10300_print_operand_address (file, x);
457 break;
458 default:
459 gcc_unreachable ();
460 }
461 break;
462 }
29a404f9 463}
464
465/* Output assembly language output for the address ADDR to FILE. */
466
467void
3626e955 468mn10300_print_operand_address (FILE *file, rtx addr)
29a404f9 469{
470 switch (GET_CODE (addr))
471 {
911517ac 472 case POST_INC:
c8a596d6 473 mn10300_print_operand (file, XEXP (addr, 0), 0);
911517ac 474 fputc ('+', file);
475 break;
c8a596d6 476
477 case POST_MODIFY:
478 mn10300_print_operand (file, XEXP (addr, 0), 0);
479 fputc ('+', file);
480 fputc (',', file);
481 mn10300_print_operand (file, XEXP (addr, 1), 0);
482 break;
483
29a404f9 484 case REG:
3626e955 485 mn10300_print_operand (file, addr, 0);
29a404f9 486 break;
487 case PLUS:
488 {
c8a596d6 489 rtx base = XEXP (addr, 0);
490 rtx index = XEXP (addr, 1);
491
492 if (REG_P (index) && !REG_OK_FOR_INDEX_P (index))
493 {
494 rtx x = base;
495 base = index;
496 index = x;
497
498 gcc_assert (REG_P (index) && REG_OK_FOR_INDEX_P (index));
499 }
500 gcc_assert (REG_OK_FOR_BASE_P (base));
501
3626e955 502 mn10300_print_operand (file, index, 0);
29a404f9 503 fputc (',', file);
c8a596d6 504 mn10300_print_operand (file, base, 0);
29a404f9 505 break;
506 }
507 case SYMBOL_REF:
508 output_addr_const (file, addr);
509 break;
510 default:
511 output_addr_const (file, addr);
512 break;
513 }
514}
515
22680c28 516/* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA.
517
518 Used for PIC-specific UNSPECs. */
519
520static bool
521mn10300_asm_output_addr_const_extra (FILE *file, rtx x)
522{
523 if (GET_CODE (x) == UNSPEC)
524 {
525 switch (XINT (x, 1))
526 {
22680c28 527 case UNSPEC_PIC:
528 /* GLOBAL_OFFSET_TABLE or local symbols, no suffix. */
529 output_addr_const (file, XVECEXP (x, 0, 0));
530 break;
531 case UNSPEC_GOT:
532 output_addr_const (file, XVECEXP (x, 0, 0));
533 fputs ("@GOT", file);
534 break;
535 case UNSPEC_GOTOFF:
536 output_addr_const (file, XVECEXP (x, 0, 0));
537 fputs ("@GOTOFF", file);
538 break;
539 case UNSPEC_PLT:
540 output_addr_const (file, XVECEXP (x, 0, 0));
541 fputs ("@PLT", file);
542 break;
543 case UNSPEC_GOTSYM_OFF:
544 assemble_name (file, GOT_SYMBOL_NAME);
545 fputs ("-(", file);
546 output_addr_const (file, XVECEXP (x, 0, 0));
547 fputs ("-.)", file);
548 break;
549 default:
550 return false;
551 }
552 return true;
553 }
554 else
555 return false;
556}
557
b166356e 558/* Count the number of FP registers that have to be saved. */
559static int
3285410a 560fp_regs_to_save (void)
b166356e 561{
562 int i, n = 0;
563
564 if (! TARGET_AM33_2)
565 return 0;
566
567 for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
d37e81ec 568 if (df_regs_ever_live_p (i) && ! call_really_used_regs[i])
b166356e 569 ++n;
570
571 return n;
572}
573
4caa3669 574/* Print a set of registers in the format required by "movm" and "ret".
575 Register K is saved if bit K of MASK is set. The data and address
576 registers can be stored individually, but the extended registers cannot.
f2b32076 577 We assume that the mask already takes that into account. For instance,
09e5ce26 578 bits 14 to 17 must have the same value. */
4caa3669 579
580void
3285410a 581mn10300_print_reg_list (FILE *file, int mask)
4caa3669 582{
583 int need_comma;
584 int i;
585
586 need_comma = 0;
587 fputc ('[', file);
588
589 for (i = 0; i < FIRST_EXTENDED_REGNUM; i++)
590 if ((mask & (1 << i)) != 0)
591 {
592 if (need_comma)
593 fputc (',', file);
594 fputs (reg_names [i], file);
595 need_comma = 1;
596 }
597
598 if ((mask & 0x3c000) != 0)
599 {
cf41bb03 600 gcc_assert ((mask & 0x3c000) == 0x3c000);
4caa3669 601 if (need_comma)
602 fputc (',', file);
603 fputs ("exreg1", file);
604 need_comma = 1;
605 }
606
607 fputc (']', file);
608}
609
ad3e6900 610/* If the MDR register is never clobbered, we can use the RETF instruction
611 which takes the address from the MDR register. This is 3 cycles faster
612 than having to load the address from the stack. */
613
614bool
615mn10300_can_use_retf_insn (void)
616{
617 /* Don't bother if we're not optimizing. In this case we won't
618 have proper access to df_regs_ever_live_p. */
619 if (!optimize)
620 return false;
621
622 /* EH returns alter the saved return address; MDR is not current. */
623 if (crtl->calls_eh_return)
624 return false;
625
626 /* Obviously not if MDR is ever clobbered. */
627 if (df_regs_ever_live_p (MDR_REG))
628 return false;
629
630 /* ??? Careful not to use this during expand_epilogue etc. */
631 gcc_assert (!in_sequence_p ());
632 return leaf_function_p ();
633}
634
635bool
636mn10300_can_use_rets_insn (void)
6ce19398 637{
6f22c3b4 638 return !mn10300_initial_offset (ARG_POINTER_REGNUM, STACK_POINTER_REGNUM);
6ce19398 639}
640
4caa3669 641/* Returns the set of live, callee-saved registers as a bitmask. The
642 callee-saved extended registers cannot be stored individually, so
23ecf105 643 all of them will be included in the mask if any one of them is used.
d876ba6e 644 Also returns the number of bytes in the registers in the mask if
645 BYTES_SAVED is not NULL. */
4caa3669 646
d876ba6e 647unsigned int
648mn10300_get_live_callee_saved_regs (unsigned int * bytes_saved)
4caa3669 649{
650 int mask;
651 int i;
d876ba6e 652 unsigned int count;
4caa3669 653
d876ba6e 654 count = mask = 0;
b166356e 655 for (i = 0; i <= LAST_EXTENDED_REGNUM; i++)
d37e81ec 656 if (df_regs_ever_live_p (i) && ! call_really_used_regs[i])
d876ba6e 657 {
658 mask |= (1 << i);
659 ++ count;
660 }
661
4caa3669 662 if ((mask & 0x3c000) != 0)
d876ba6e 663 {
664 for (i = 0x04000; i < 0x40000; i <<= 1)
665 if ((mask & i) == 0)
666 ++ count;
667
668 mask |= 0x3c000;
669 }
670
671 if (bytes_saved)
672 * bytes_saved = count * UNITS_PER_WORD;
4caa3669 673
674 return mask;
675}
676
5f2853dd 677static rtx
678F (rtx r)
679{
680 RTX_FRAME_RELATED_P (r) = 1;
681 return r;
682}
683
4caa3669 684/* Generate an instruction that pushes several registers onto the stack.
685 Register K will be saved if bit K in MASK is set. The function does
686 nothing if MASK is zero.
687
688 To be compatible with the "movm" instruction, the lowest-numbered
689 register must be stored in the lowest slot. If MASK is the set
690 { R1,...,RN }, where R1...RN are ordered least first, the generated
691 instruction will have the form:
692
693 (parallel
694 (set (reg:SI 9) (plus:SI (reg:SI 9) (const_int -N*4)))
695 (set (mem:SI (plus:SI (reg:SI 9)
696 (const_int -1*4)))
697 (reg:SI RN))
698 ...
699 (set (mem:SI (plus:SI (reg:SI 9)
700 (const_int -N*4)))
701 (reg:SI R1))) */
702
32f9c04a 703static void
704mn10300_gen_multiple_store (unsigned int mask)
4caa3669 705{
32f9c04a 706 /* The order in which registers are stored, from SP-4 through SP-N*4. */
707 static const unsigned int store_order[8] = {
708 /* e2, e3: never saved */
709 FIRST_EXTENDED_REGNUM + 4,
710 FIRST_EXTENDED_REGNUM + 5,
711 FIRST_EXTENDED_REGNUM + 6,
712 FIRST_EXTENDED_REGNUM + 7,
713 /* e0, e1, mdrq, mcrh, mcrl, mcvf: never saved. */
714 FIRST_DATA_REGNUM + 2,
715 FIRST_DATA_REGNUM + 3,
716 FIRST_ADDRESS_REGNUM + 2,
717 FIRST_ADDRESS_REGNUM + 3,
718 /* d0, d1, a0, a1, mdr, lir, lar: never saved. */
719 };
720
721 rtx x, elts[9];
722 unsigned int i;
723 int count;
724
725 if (mask == 0)
726 return;
727
728 for (i = count = 0; i < ARRAY_SIZE(store_order); ++i)
4caa3669 729 {
32f9c04a 730 unsigned regno = store_order[i];
731
732 if (((mask >> regno) & 1) == 0)
733 continue;
4caa3669 734
32f9c04a 735 ++count;
29c05e22 736 x = plus_constant (Pmode, stack_pointer_rtx, count * -4);
32f9c04a 737 x = gen_frame_mem (SImode, x);
738 x = gen_rtx_SET (VOIDmode, x, gen_rtx_REG (SImode, regno));
739 elts[count] = F(x);
740
741 /* Remove the register from the mask so that... */
742 mask &= ~(1u << regno);
4caa3669 743 }
32f9c04a 744
745 /* ... we can make sure that we didn't try to use a register
746 not listed in the store order. */
747 gcc_assert (mask == 0);
748
749 /* Create the instruction that updates the stack pointer. */
29c05e22 750 x = plus_constant (Pmode, stack_pointer_rtx, count * -4);
32f9c04a 751 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
752 elts[0] = F(x);
753
754 /* We need one PARALLEL element to update the stack pointer and
755 an additional element for each register that is stored. */
756 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (count + 1, elts));
757 F (emit_insn (x));
4caa3669 758}
759
36706943 760static inline unsigned int
761popcount (unsigned int mask)
762{
763 unsigned int count = 0;
764
765 while (mask)
766 {
767 ++ count;
768 mask &= ~ (mask & - mask);
769 }
770 return count;
771}
772
29a404f9 773void
3626e955 774mn10300_expand_prologue (void)
29a404f9 775{
6f22c3b4 776 HOST_WIDE_INT size = mn10300_frame_size ();
36706943 777 unsigned int mask;
29a404f9 778
36706943 779 mask = mn10300_get_live_callee_saved_regs (NULL);
09e5ce26 780 /* If we use any of the callee-saved registers, save them now. */
36706943 781 mn10300_gen_multiple_store (mask);
782
783 if (flag_stack_usage_info)
784 current_function_static_stack_size = size + popcount (mask) * 4;
48cb86e3 785
b166356e 786 if (TARGET_AM33_2 && fp_regs_to_save ())
787 {
788 int num_regs_to_save = fp_regs_to_save (), i;
789 HOST_WIDE_INT xsize;
3626e955 790 enum
791 {
792 save_sp_merge,
793 save_sp_no_merge,
794 save_sp_partial_merge,
795 save_a0_merge,
796 save_a0_no_merge
797 } strategy;
b166356e 798 unsigned int strategy_size = (unsigned)-1, this_strategy_size;
799 rtx reg;
b166356e 800
36706943 801 if (flag_stack_usage_info)
802 current_function_static_stack_size += num_regs_to_save * 4;
803
b166356e 804 /* We have several different strategies to save FP registers.
805 We can store them using SP offsets, which is beneficial if
806 there are just a few registers to save, or we can use `a0' in
807 post-increment mode (`a0' is the only call-clobbered address
808 register that is never used to pass information to a
809 function). Furthermore, if we don't need a frame pointer, we
810 can merge the two SP adds into a single one, but this isn't
811 always beneficial; sometimes we can just split the two adds
812 so that we don't exceed a 16-bit constant size. The code
813 below will select which strategy to use, so as to generate
814 smallest code. Ties are broken in favor or shorter sequences
815 (in terms of number of instructions). */
816
817#define SIZE_ADD_AX(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
818 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 2)
819#define SIZE_ADD_SP(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
820 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 3)
e14cac83 821
822/* We add 0 * (S) in two places to promote to the type of S,
823 so that all arms of the conditional have the same type. */
b166356e 824#define SIZE_FMOV_LIMIT(S,N,L,SIZE1,SIZE2,ELSE) \
e14cac83 825 (((S) >= (L)) ? 0 * (S) + (SIZE1) * (N) \
b166356e 826 : ((S) + 4 * (N) >= (L)) ? (((L) - (S)) / 4 * (SIZE2) \
827 + ((S) + 4 * (N) - (L)) / 4 * (SIZE1)) \
e14cac83 828 : 0 * (S) + (ELSE))
b166356e 829#define SIZE_FMOV_SP_(S,N) \
830 (SIZE_FMOV_LIMIT ((S), (N), (1 << 24), 7, 6, \
831 SIZE_FMOV_LIMIT ((S), (N), (1 << 8), 6, 4, \
832 (S) ? 4 * (N) : 3 + 4 * ((N) - 1))))
833#define SIZE_FMOV_SP(S,N) (SIZE_FMOV_SP_ ((unsigned HOST_WIDE_INT)(S), (N)))
834
835 /* Consider alternative save_sp_merge only if we don't need the
fa483857 836 frame pointer and size is nonzero. */
b166356e 837 if (! frame_pointer_needed && size)
838 {
839 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
840 this_strategy_size = SIZE_ADD_SP (-(size + 4 * num_regs_to_save));
841 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
842 this_strategy_size += SIZE_FMOV_SP (size, num_regs_to_save);
843
844 if (this_strategy_size < strategy_size)
845 {
846 strategy = save_sp_merge;
847 strategy_size = this_strategy_size;
848 }
849 }
850
851 /* Consider alternative save_sp_no_merge unconditionally. */
852 /* Insn: add -4 * num_regs_to_save, sp. */
853 this_strategy_size = SIZE_ADD_SP (-4 * num_regs_to_save);
854 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
855 this_strategy_size += SIZE_FMOV_SP (0, num_regs_to_save);
856 if (size)
857 {
858 /* Insn: add -size, sp. */
859 this_strategy_size += SIZE_ADD_SP (-size);
860 }
861
862 if (this_strategy_size < strategy_size)
863 {
864 strategy = save_sp_no_merge;
865 strategy_size = this_strategy_size;
866 }
867
868 /* Consider alternative save_sp_partial_merge only if we don't
869 need a frame pointer and size is reasonably large. */
870 if (! frame_pointer_needed && size + 4 * num_regs_to_save > 128)
871 {
872 /* Insn: add -128, sp. */
873 this_strategy_size = SIZE_ADD_SP (-128);
874 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
875 this_strategy_size += SIZE_FMOV_SP (128 - 4 * num_regs_to_save,
876 num_regs_to_save);
877 if (size)
878 {
879 /* Insn: add 128-size, sp. */
880 this_strategy_size += SIZE_ADD_SP (128 - size);
881 }
882
883 if (this_strategy_size < strategy_size)
884 {
885 strategy = save_sp_partial_merge;
886 strategy_size = this_strategy_size;
887 }
888 }
889
890 /* Consider alternative save_a0_merge only if we don't need a
fa483857 891 frame pointer, size is nonzero and the user hasn't
b166356e 892 changed the calling conventions of a0. */
893 if (! frame_pointer_needed && size
d37e81ec 894 && call_really_used_regs [FIRST_ADDRESS_REGNUM]
b166356e 895 && ! fixed_regs[FIRST_ADDRESS_REGNUM])
896 {
897 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
898 this_strategy_size = SIZE_ADD_SP (-(size + 4 * num_regs_to_save));
899 /* Insn: mov sp, a0. */
900 this_strategy_size++;
901 if (size)
902 {
903 /* Insn: add size, a0. */
904 this_strategy_size += SIZE_ADD_AX (size);
905 }
906 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
907 this_strategy_size += 3 * num_regs_to_save;
908
909 if (this_strategy_size < strategy_size)
910 {
911 strategy = save_a0_merge;
912 strategy_size = this_strategy_size;
913 }
914 }
915
916 /* Consider alternative save_a0_no_merge if the user hasn't
09e5ce26 917 changed the calling conventions of a0. */
d37e81ec 918 if (call_really_used_regs [FIRST_ADDRESS_REGNUM]
b166356e 919 && ! fixed_regs[FIRST_ADDRESS_REGNUM])
920 {
921 /* Insn: add -4 * num_regs_to_save, sp. */
922 this_strategy_size = SIZE_ADD_SP (-4 * num_regs_to_save);
923 /* Insn: mov sp, a0. */
924 this_strategy_size++;
925 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
926 this_strategy_size += 3 * num_regs_to_save;
927 if (size)
928 {
929 /* Insn: add -size, sp. */
930 this_strategy_size += SIZE_ADD_SP (-size);
931 }
932
933 if (this_strategy_size < strategy_size)
934 {
935 strategy = save_a0_no_merge;
936 strategy_size = this_strategy_size;
937 }
938 }
939
940 /* Emit the initial SP add, common to all strategies. */
941 switch (strategy)
942 {
943 case save_sp_no_merge:
944 case save_a0_no_merge:
5f2853dd 945 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
946 stack_pointer_rtx,
947 GEN_INT (-4 * num_regs_to_save))));
b166356e 948 xsize = 0;
949 break;
950
951 case save_sp_partial_merge:
5f2853dd 952 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
953 stack_pointer_rtx,
954 GEN_INT (-128))));
b166356e 955 xsize = 128 - 4 * num_regs_to_save;
956 size -= xsize;
957 break;
958
959 case save_sp_merge:
960 case save_a0_merge:
5f2853dd 961 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
962 stack_pointer_rtx,
963 GEN_INT (-(size + 4 * num_regs_to_save)))));
b166356e 964 /* We'll have to adjust FP register saves according to the
09e5ce26 965 frame size. */
b166356e 966 xsize = size;
967 /* Since we've already created the stack frame, don't do it
09e5ce26 968 again at the end of the function. */
b166356e 969 size = 0;
970 break;
971
972 default:
cf41bb03 973 gcc_unreachable ();
b166356e 974 }
fb16c776 975
b166356e 976 /* Now prepare register a0, if we have decided to use it. */
977 switch (strategy)
978 {
979 case save_sp_merge:
980 case save_sp_no_merge:
981 case save_sp_partial_merge:
982 reg = 0;
983 break;
984
985 case save_a0_merge:
986 case save_a0_no_merge:
987 reg = gen_rtx_REG (SImode, FIRST_ADDRESS_REGNUM);
5f2853dd 988 F (emit_insn (gen_movsi (reg, stack_pointer_rtx)));
b166356e 989 if (xsize)
5f2853dd 990 F (emit_insn (gen_addsi3 (reg, reg, GEN_INT (xsize))));
b166356e 991 reg = gen_rtx_POST_INC (SImode, reg);
992 break;
fb16c776 993
b166356e 994 default:
cf41bb03 995 gcc_unreachable ();
b166356e 996 }
fb16c776 997
b166356e 998 /* Now actually save the FP registers. */
999 for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
d37e81ec 1000 if (df_regs_ever_live_p (i) && ! call_really_used_regs [i])
b166356e 1001 {
1002 rtx addr;
1003
1004 if (reg)
1005 addr = reg;
1006 else
1007 {
1008 /* If we aren't using `a0', use an SP offset. */
1009 if (xsize)
1010 {
1011 addr = gen_rtx_PLUS (SImode,
1012 stack_pointer_rtx,
1013 GEN_INT (xsize));
1014 }
1015 else
1016 addr = stack_pointer_rtx;
fb16c776 1017
b166356e 1018 xsize += 4;
1019 }
1020
5f2853dd 1021 F (emit_insn (gen_movsf (gen_rtx_MEM (SFmode, addr),
1022 gen_rtx_REG (SFmode, i))));
b166356e 1023 }
1024 }
1025
48cb86e3 1026 /* Now put the frame pointer into the frame pointer register. */
29a404f9 1027 if (frame_pointer_needed)
5f2853dd 1028 F (emit_move_insn (frame_pointer_rtx, stack_pointer_rtx));
29a404f9 1029
48cb86e3 1030 /* Allocate stack for this frame. */
29a404f9 1031 if (size)
5f2853dd 1032 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
1033 stack_pointer_rtx,
1034 GEN_INT (-size))));
1035
3072d30e 1036 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
6f22c3b4 1037 emit_insn (gen_load_pic ());
29a404f9 1038}
1039
1040void
3626e955 1041mn10300_expand_epilogue (void)
29a404f9 1042{
6f22c3b4 1043 HOST_WIDE_INT size = mn10300_frame_size ();
d876ba6e 1044 unsigned int reg_save_bytes;
1045
1046 mn10300_get_live_callee_saved_regs (& reg_save_bytes);
1047
b166356e 1048 if (TARGET_AM33_2 && fp_regs_to_save ())
1049 {
1050 int num_regs_to_save = fp_regs_to_save (), i;
1051 rtx reg = 0;
1052
1053 /* We have several options to restore FP registers. We could
1054 load them from SP offsets, but, if there are enough FP
1055 registers to restore, we win if we use a post-increment
1056 addressing mode. */
1057
1058 /* If we have a frame pointer, it's the best option, because we
1059 already know it has the value we want. */
1060 if (frame_pointer_needed)
1061 reg = gen_rtx_REG (SImode, FRAME_POINTER_REGNUM);
1062 /* Otherwise, we may use `a1', since it's call-clobbered and
1063 it's never used for return values. But only do so if it's
1064 smaller than using SP offsets. */
1065 else
1066 {
1067 enum { restore_sp_post_adjust,
1068 restore_sp_pre_adjust,
1069 restore_sp_partial_adjust,
1070 restore_a1 } strategy;
1071 unsigned int this_strategy_size, strategy_size = (unsigned)-1;
1072
1073 /* Consider using sp offsets before adjusting sp. */
1074 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1075 this_strategy_size = SIZE_FMOV_SP (size, num_regs_to_save);
1076 /* If size is too large, we'll have to adjust SP with an
1077 add. */
ad3e6900 1078 if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
b166356e 1079 {
1080 /* Insn: add size + 4 * num_regs_to_save, sp. */
1081 this_strategy_size += SIZE_ADD_SP (size + 4 * num_regs_to_save);
1082 }
1083 /* If we don't have to restore any non-FP registers,
1084 we'll be able to save one byte by using rets. */
ad3e6900 1085 if (! reg_save_bytes)
b166356e 1086 this_strategy_size--;
1087
1088 if (this_strategy_size < strategy_size)
1089 {
1090 strategy = restore_sp_post_adjust;
1091 strategy_size = this_strategy_size;
1092 }
1093
1094 /* Consider using sp offsets after adjusting sp. */
1095 /* Insn: add size, sp. */
1096 this_strategy_size = SIZE_ADD_SP (size);
1097 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1098 this_strategy_size += SIZE_FMOV_SP (0, num_regs_to_save);
1099 /* We're going to use ret to release the FP registers
09e5ce26 1100 save area, so, no savings. */
b166356e 1101
1102 if (this_strategy_size < strategy_size)
1103 {
1104 strategy = restore_sp_pre_adjust;
1105 strategy_size = this_strategy_size;
1106 }
1107
1108 /* Consider using sp offsets after partially adjusting sp.
1109 When size is close to 32Kb, we may be able to adjust SP
1110 with an imm16 add instruction while still using fmov
1111 (d8,sp). */
ad3e6900 1112 if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
b166356e 1113 {
1114 /* Insn: add size + 4 * num_regs_to_save
ad3e6900 1115 + reg_save_bytes - 252,sp. */
b166356e 1116 this_strategy_size = SIZE_ADD_SP (size + 4 * num_regs_to_save
df70f973 1117 + (int) reg_save_bytes - 252);
b166356e 1118 /* Insn: fmov (##,sp),fs#, fo each fs# to be restored. */
ad3e6900 1119 this_strategy_size += SIZE_FMOV_SP (252 - reg_save_bytes
b166356e 1120 - 4 * num_regs_to_save,
1121 num_regs_to_save);
1122 /* We're going to use ret to release the FP registers
09e5ce26 1123 save area, so, no savings. */
b166356e 1124
1125 if (this_strategy_size < strategy_size)
1126 {
1127 strategy = restore_sp_partial_adjust;
1128 strategy_size = this_strategy_size;
1129 }
1130 }
1131
1132 /* Consider using a1 in post-increment mode, as long as the
1133 user hasn't changed the calling conventions of a1. */
d37e81ec 1134 if (call_really_used_regs [FIRST_ADDRESS_REGNUM + 1]
b166356e 1135 && ! fixed_regs[FIRST_ADDRESS_REGNUM+1])
1136 {
1137 /* Insn: mov sp,a1. */
1138 this_strategy_size = 1;
1139 if (size)
1140 {
1141 /* Insn: add size,a1. */
1142 this_strategy_size += SIZE_ADD_AX (size);
1143 }
1144 /* Insn: fmov (a1+),fs#, for each fs# to be restored. */
1145 this_strategy_size += 3 * num_regs_to_save;
1146 /* If size is large enough, we may be able to save a
1147 couple of bytes. */
ad3e6900 1148 if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
b166356e 1149 {
1150 /* Insn: mov a1,sp. */
1151 this_strategy_size += 2;
1152 }
1153 /* If we don't have to restore any non-FP registers,
1154 we'll be able to save one byte by using rets. */
ad3e6900 1155 if (! reg_save_bytes)
b166356e 1156 this_strategy_size--;
1157
1158 if (this_strategy_size < strategy_size)
1159 {
1160 strategy = restore_a1;
1161 strategy_size = this_strategy_size;
1162 }
1163 }
1164
1165 switch (strategy)
1166 {
1167 case restore_sp_post_adjust:
1168 break;
1169
1170 case restore_sp_pre_adjust:
1171 emit_insn (gen_addsi3 (stack_pointer_rtx,
1172 stack_pointer_rtx,
1173 GEN_INT (size)));
1174 size = 0;
1175 break;
1176
1177 case restore_sp_partial_adjust:
1178 emit_insn (gen_addsi3 (stack_pointer_rtx,
1179 stack_pointer_rtx,
1180 GEN_INT (size + 4 * num_regs_to_save
ad3e6900 1181 + reg_save_bytes - 252)));
1182 size = 252 - reg_save_bytes - 4 * num_regs_to_save;
b166356e 1183 break;
fb16c776 1184
b166356e 1185 case restore_a1:
1186 reg = gen_rtx_REG (SImode, FIRST_ADDRESS_REGNUM + 1);
1187 emit_insn (gen_movsi (reg, stack_pointer_rtx));
1188 if (size)
1189 emit_insn (gen_addsi3 (reg, reg, GEN_INT (size)));
1190 break;
1191
1192 default:
cf41bb03 1193 gcc_unreachable ();
b166356e 1194 }
1195 }
1196
1197 /* Adjust the selected register, if any, for post-increment. */
1198 if (reg)
1199 reg = gen_rtx_POST_INC (SImode, reg);
1200
1201 for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
d37e81ec 1202 if (df_regs_ever_live_p (i) && ! call_really_used_regs [i])
b166356e 1203 {
1204 rtx addr;
fb16c776 1205
b166356e 1206 if (reg)
1207 addr = reg;
1208 else if (size)
1209 {
1210 /* If we aren't using a post-increment register, use an
09e5ce26 1211 SP offset. */
b166356e 1212 addr = gen_rtx_PLUS (SImode,
1213 stack_pointer_rtx,
1214 GEN_INT (size));
1215 }
1216 else
1217 addr = stack_pointer_rtx;
1218
1219 size += 4;
1220
5f2853dd 1221 emit_insn (gen_movsf (gen_rtx_REG (SFmode, i),
1222 gen_rtx_MEM (SFmode, addr)));
b166356e 1223 }
1224
1225 /* If we were using the restore_a1 strategy and the number of
1226 bytes to be released won't fit in the `ret' byte, copy `a1'
1227 to `sp', to avoid having to use `add' to adjust it. */
ad3e6900 1228 if (! frame_pointer_needed && reg && size + reg_save_bytes > 255)
b166356e 1229 {
1230 emit_move_insn (stack_pointer_rtx, XEXP (reg, 0));
1231 size = 0;
1232 }
1233 }
1234
461cabcc 1235 /* Maybe cut back the stack, except for the register save area.
1236
1237 If the frame pointer exists, then use the frame pointer to
1238 cut back the stack.
1239
1240 If the stack size + register save area is more than 255 bytes,
1241 then the stack must be cut back here since the size + register
fb16c776 1242 save size is too big for a ret/retf instruction.
461cabcc 1243
1244 Else leave it alone, it will be cut back as part of the
1245 ret/retf instruction, or there wasn't any stack to begin with.
1246
dfd1079d 1247 Under no circumstances should the register save area be
461cabcc 1248 deallocated here, that would leave a window where an interrupt
1249 could occur and trash the register save area. */
29a404f9 1250 if (frame_pointer_needed)
1251 {
29a404f9 1252 emit_move_insn (stack_pointer_rtx, frame_pointer_rtx);
b21218d6 1253 size = 0;
1254 }
ad3e6900 1255 else if (size + reg_save_bytes > 255)
b21218d6 1256 {
1257 emit_insn (gen_addsi3 (stack_pointer_rtx,
1258 stack_pointer_rtx,
1259 GEN_INT (size)));
1260 size = 0;
29a404f9 1261 }
29a404f9 1262
55daf463 1263 /* Adjust the stack and restore callee-saved registers, if any. */
ad3e6900 1264 if (mn10300_can_use_rets_insn ())
1a860023 1265 emit_jump_insn (ret_rtx);
48cb86e3 1266 else
d876ba6e 1267 emit_jump_insn (gen_return_ret (GEN_INT (size + reg_save_bytes)));
29a404f9 1268}
1269
a2f10574 1270/* Recognize the PARALLEL rtx generated by mn10300_gen_multiple_store().
4caa3669 1271 This function is for MATCH_PARALLEL and so assumes OP is known to be
1272 parallel. If OP is a multiple store, return a mask indicating which
1273 registers it saves. Return 0 otherwise. */
1274
bc6d465d 1275unsigned int
1276mn10300_store_multiple_regs (rtx op)
4caa3669 1277{
1278 int count;
1279 int mask;
1280 int i;
1281 unsigned int last;
1282 rtx elt;
1283
1284 count = XVECLEN (op, 0);
1285 if (count < 2)
1286 return 0;
1287
1288 /* Check that first instruction has the form (set (sp) (plus A B)) */
1289 elt = XVECEXP (op, 0, 0);
1290 if (GET_CODE (elt) != SET
3626e955 1291 || (! REG_P (SET_DEST (elt)))
4caa3669 1292 || REGNO (SET_DEST (elt)) != STACK_POINTER_REGNUM
1293 || GET_CODE (SET_SRC (elt)) != PLUS)
1294 return 0;
1295
1296 /* Check that A is the stack pointer and B is the expected stack size.
1297 For OP to match, each subsequent instruction should push a word onto
1298 the stack. We therefore expect the first instruction to create
09e5ce26 1299 COUNT-1 stack slots. */
4caa3669 1300 elt = SET_SRC (elt);
3626e955 1301 if ((! REG_P (XEXP (elt, 0)))
4caa3669 1302 || REGNO (XEXP (elt, 0)) != STACK_POINTER_REGNUM
3626e955 1303 || (! CONST_INT_P (XEXP (elt, 1)))
4caa3669 1304 || INTVAL (XEXP (elt, 1)) != -(count - 1) * 4)
1305 return 0;
1306
4caa3669 1307 mask = 0;
1308 for (i = 1; i < count; i++)
1309 {
32f9c04a 1310 /* Check that element i is a (set (mem M) R). */
1311 /* ??? Validate the register order a-la mn10300_gen_multiple_store.
1312 Remember: the ordering is *not* monotonic. */
4caa3669 1313 elt = XVECEXP (op, 0, i);
1314 if (GET_CODE (elt) != SET
3626e955 1315 || (! MEM_P (SET_DEST (elt)))
32f9c04a 1316 || (! REG_P (SET_SRC (elt))))
4caa3669 1317 return 0;
1318
32f9c04a 1319 /* Remember which registers are to be saved. */
4caa3669 1320 last = REGNO (SET_SRC (elt));
1321 mask |= (1 << last);
1322
1323 /* Check that M has the form (plus (sp) (const_int -I*4)) */
1324 elt = XEXP (SET_DEST (elt), 0);
1325 if (GET_CODE (elt) != PLUS
3626e955 1326 || (! REG_P (XEXP (elt, 0)))
4caa3669 1327 || REGNO (XEXP (elt, 0)) != STACK_POINTER_REGNUM
3626e955 1328 || (! CONST_INT_P (XEXP (elt, 1)))
4caa3669 1329 || INTVAL (XEXP (elt, 1)) != -i * 4)
1330 return 0;
1331 }
1332
09e5ce26 1333 /* All or none of the callee-saved extended registers must be in the set. */
4caa3669 1334 if ((mask & 0x3c000) != 0
1335 && (mask & 0x3c000) != 0x3c000)
1336 return 0;
1337
1338 return mask;
1339}
1340
029ca87f 1341/* Implement TARGET_PREFERRED_RELOAD_CLASS. */
1342
1343static reg_class_t
1344mn10300_preferred_reload_class (rtx x, reg_class_t rclass)
1345{
1346 if (x == stack_pointer_rtx && rclass != SP_REGS)
c78ac668 1347 return (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
029ca87f 1348 else if (MEM_P (x)
1349 || (REG_P (x)
1350 && !HARD_REGISTER_P (x))
1351 || (GET_CODE (x) == SUBREG
1352 && REG_P (SUBREG_REG (x))
1353 && !HARD_REGISTER_P (SUBREG_REG (x))))
1354 return LIMIT_RELOAD_CLASS (GET_MODE (x), rclass);
1355 else
1356 return rclass;
1357}
1358
1359/* Implement TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
1360
1361static reg_class_t
1362mn10300_preferred_output_reload_class (rtx x, reg_class_t rclass)
1363{
1364 if (x == stack_pointer_rtx && rclass != SP_REGS)
c78ac668 1365 return (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
029ca87f 1366 return rclass;
1367}
1368
c78ac668 1369/* Implement TARGET_SECONDARY_RELOAD. */
3626e955 1370
c78ac668 1371static reg_class_t
1372mn10300_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
3754d046 1373 machine_mode mode, secondary_reload_info *sri)
29a404f9 1374{
c78ac668 1375 enum reg_class rclass = (enum reg_class) rclass_i;
1376 enum reg_class xclass = NO_REGS;
1377 unsigned int xregno = INVALID_REGNUM;
1378
1379 if (REG_P (x))
8ecf154e 1380 {
c78ac668 1381 xregno = REGNO (x);
1382 if (xregno >= FIRST_PSEUDO_REGISTER)
1383 xregno = true_regnum (x);
1384 if (xregno != INVALID_REGNUM)
1385 xclass = REGNO_REG_CLASS (xregno);
1386 }
1387
1388 if (!TARGET_AM33)
1389 {
1390 /* Memory load/stores less than a full word wide can't have an
1391 address or stack pointer destination. They must use a data
1392 register as an intermediate register. */
1393 if (rclass != DATA_REGS
1394 && (mode == QImode || mode == HImode)
1395 && xclass == NO_REGS)
1396 return DATA_REGS;
1397
1398 /* We can only move SP to/from an address register. */
1399 if (in_p
1400 && rclass == SP_REGS
1401 && xclass != ADDRESS_REGS)
1402 return ADDRESS_REGS;
1403 if (!in_p
1404 && xclass == SP_REGS
1405 && rclass != ADDRESS_REGS
1406 && rclass != SP_OR_ADDRESS_REGS)
1407 return ADDRESS_REGS;
8ecf154e 1408 }
29a404f9 1409
c78ac668 1410 /* We can't directly load sp + const_int into a register;
1411 we must use an address register as an scratch. */
1412 if (in_p
1413 && rclass != SP_REGS
8deb3959 1414 && rclass != SP_OR_ADDRESS_REGS
c8a596d6 1415 && rclass != SP_OR_GENERAL_REGS
c78ac668 1416 && GET_CODE (x) == PLUS
1417 && (XEXP (x, 0) == stack_pointer_rtx
1418 || XEXP (x, 1) == stack_pointer_rtx))
1419 {
1420 sri->icode = CODE_FOR_reload_plus_sp_const;
1421 return NO_REGS;
1422 }
29a404f9 1423
85a6eed4 1424 /* We can only move MDR to/from a data register. */
1425 if (rclass == MDR_REGS && xclass != DATA_REGS)
1426 return DATA_REGS;
1427 if (xclass == MDR_REGS && rclass != DATA_REGS)
1428 return DATA_REGS;
1429
c78ac668 1430 /* We can't load/store an FP register from a constant address. */
8b8be022 1431 if (TARGET_AM33_2
c78ac668 1432 && (rclass == FP_REGS || xclass == FP_REGS)
1433 && (xclass == NO_REGS || rclass == NO_REGS))
b166356e 1434 {
c78ac668 1435 rtx addr = NULL;
1436
1437 if (xregno >= FIRST_PSEUDO_REGISTER && xregno != INVALID_REGNUM)
1438 {
1c654ff1 1439 addr = reg_equiv_mem (xregno);
c78ac668 1440 if (addr)
1441 addr = XEXP (addr, 0);
1442 }
1443 else if (MEM_P (x))
1444 addr = XEXP (x, 0);
8b8be022 1445
c78ac668 1446 if (addr && CONSTANT_ADDRESS_P (addr))
c8a596d6 1447 return GENERAL_REGS;
b166356e 1448 }
48cb86e3 1449 /* Otherwise assume no secondary reloads are needed. */
1450 return NO_REGS;
1451}
1452
6f22c3b4 1453int
1454mn10300_frame_size (void)
1455{
1456 /* size includes the fixed stack space needed for function calls. */
1457 int size = get_frame_size () + crtl->outgoing_args_size;
1458
1459 /* And space for the return pointer. */
1460 size += crtl->outgoing_args_size ? 4 : 0;
1461
1462 return size;
1463}
1464
48cb86e3 1465int
3626e955 1466mn10300_initial_offset (int from, int to)
48cb86e3 1467{
6f22c3b4 1468 int diff = 0;
1469
1470 gcc_assert (from == ARG_POINTER_REGNUM || from == FRAME_POINTER_REGNUM);
1471 gcc_assert (to == FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM);
1472
1473 if (to == STACK_POINTER_REGNUM)
1474 diff = mn10300_frame_size ();
1475
f1899bff 1476 /* The difference between the argument pointer and the frame pointer
1477 is the size of the callee register save area. */
6f22c3b4 1478 if (from == ARG_POINTER_REGNUM)
29a404f9 1479 {
d876ba6e 1480 unsigned int reg_save_bytes;
1481
1482 mn10300_get_live_callee_saved_regs (& reg_save_bytes);
1483 diff += reg_save_bytes;
6f22c3b4 1484 diff += 4 * fp_regs_to_save ();
29a404f9 1485 }
1486
6f22c3b4 1487 return diff;
29a404f9 1488}
bb4959a8 1489
6644435d 1490/* Worker function for TARGET_RETURN_IN_MEMORY. */
1491
f2d49d02 1492static bool
fb80456a 1493mn10300_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
f2d49d02 1494{
1495 /* Return values > 8 bytes in length in memory. */
00b1da0e 1496 return (int_size_in_bytes (type) > 8
1497 || int_size_in_bytes (type) == 0
1498 || TYPE_MODE (type) == BLKmode);
f2d49d02 1499}
1500
bb4959a8 1501/* Flush the argument registers to the stack for a stdarg function;
1502 return the new argument pointer. */
f2d49d02 1503static rtx
3285410a 1504mn10300_builtin_saveregs (void)
bb4959a8 1505{
ed554036 1506 rtx offset, mem;
bb4959a8 1507 tree fntype = TREE_TYPE (current_function_decl);
257d99c3 1508 int argadj = ((!stdarg_p (fntype))
bb4959a8 1509 ? UNITS_PER_WORD : 0);
32c2fdea 1510 alias_set_type set = get_varargs_alias_set ();
bb4959a8 1511
1512 if (argadj)
29c05e22 1513 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, argadj);
bb4959a8 1514 else
abe32cce 1515 offset = crtl->args.arg_offset_rtx;
bb4959a8 1516
abe32cce 1517 mem = gen_rtx_MEM (SImode, crtl->args.internal_arg_pointer);
ab6ab77e 1518 set_mem_alias_set (mem, set);
ed554036 1519 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
1520
1521 mem = gen_rtx_MEM (SImode,
29c05e22 1522 plus_constant (Pmode,
1523 crtl->args.internal_arg_pointer, 4));
ab6ab77e 1524 set_mem_alias_set (mem, set);
ed554036 1525 emit_move_insn (mem, gen_rtx_REG (SImode, 1));
1526
bb4959a8 1527 return copy_to_reg (expand_binop (Pmode, add_optab,
abe32cce 1528 crtl->args.internal_arg_pointer,
bb4959a8 1529 offset, 0, 0, OPTAB_LIB_WIDEN));
1530}
1531
8a58ed0a 1532static void
3285410a 1533mn10300_va_start (tree valist, rtx nextarg)
ed554036 1534{
7ccc713a 1535 nextarg = expand_builtin_saveregs ();
7df226a2 1536 std_expand_builtin_va_start (valist, nextarg);
ed554036 1537}
1538
b981d932 1539/* Return true when a parameter should be passed by reference. */
1540
1541static bool
39cba157 1542mn10300_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
3754d046 1543 machine_mode mode, const_tree type,
b981d932 1544 bool named ATTRIBUTE_UNUSED)
1545{
1546 unsigned HOST_WIDE_INT size;
1547
1548 if (type)
1549 size = int_size_in_bytes (type);
1550 else
1551 size = GET_MODE_SIZE (mode);
1552
00b1da0e 1553 return (size > 8 || size == 0);
b981d932 1554}
1555
bb4959a8 1556/* Return an RTX to represent where a value with mode MODE will be returned
e92d3ba8 1557 from a function. If the result is NULL_RTX, the argument is pushed. */
bb4959a8 1558
dc67179a 1559static rtx
3754d046 1560mn10300_function_arg (cumulative_args_t cum_v, machine_mode mode,
dc67179a 1561 const_tree type, bool named ATTRIBUTE_UNUSED)
bb4959a8 1562{
39cba157 1563 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
e92d3ba8 1564 rtx result = NULL_RTX;
e14cac83 1565 int size;
bb4959a8 1566
1567 /* We only support using 2 data registers as argument registers. */
1568 int nregs = 2;
1569
1570 /* Figure out the size of the object to be passed. */
1571 if (mode == BLKmode)
1572 size = int_size_in_bytes (type);
1573 else
1574 size = GET_MODE_SIZE (mode);
1575
bb4959a8 1576 cum->nbytes = (cum->nbytes + 3) & ~3;
1577
1578 /* Don't pass this arg via a register if all the argument registers
1579 are used up. */
1580 if (cum->nbytes > nregs * UNITS_PER_WORD)
e92d3ba8 1581 return result;
bb4959a8 1582
1583 /* Don't pass this arg via a register if it would be split between
1584 registers and memory. */
1585 if (type == NULL_TREE
1586 && cum->nbytes + size > nregs * UNITS_PER_WORD)
e92d3ba8 1587 return result;
bb4959a8 1588
1589 switch (cum->nbytes / UNITS_PER_WORD)
1590 {
1591 case 0:
e92d3ba8 1592 result = gen_rtx_REG (mode, FIRST_ARGUMENT_REGNUM);
bb4959a8 1593 break;
1594 case 1:
e92d3ba8 1595 result = gen_rtx_REG (mode, FIRST_ARGUMENT_REGNUM + 1);
bb4959a8 1596 break;
1597 default:
e92d3ba8 1598 break;
bb4959a8 1599 }
1600
1601 return result;
1602}
1603
dc67179a 1604/* Update the data in CUM to advance over an argument
1605 of mode MODE and data type TYPE.
1606 (TYPE is null for libcalls where that information may not be available.) */
1607
1608static void
3754d046 1609mn10300_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
dc67179a 1610 const_tree type, bool named ATTRIBUTE_UNUSED)
1611{
39cba157 1612 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1613
dc67179a 1614 cum->nbytes += (mode != BLKmode
1615 ? (GET_MODE_SIZE (mode) + 3) & ~3
1616 : (int_size_in_bytes (type) + 3) & ~3);
1617}
1618
f054eb3c 1619/* Return the number of bytes of registers to use for an argument passed
1620 partially in registers and partially in memory. */
bb4959a8 1621
f054eb3c 1622static int
3754d046 1623mn10300_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
f054eb3c 1624 tree type, bool named ATTRIBUTE_UNUSED)
bb4959a8 1625{
39cba157 1626 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
e14cac83 1627 int size;
bb4959a8 1628
1629 /* We only support using 2 data registers as argument registers. */
1630 int nregs = 2;
1631
1632 /* Figure out the size of the object to be passed. */
1633 if (mode == BLKmode)
1634 size = int_size_in_bytes (type);
1635 else
1636 size = GET_MODE_SIZE (mode);
1637
bb4959a8 1638 cum->nbytes = (cum->nbytes + 3) & ~3;
1639
1640 /* Don't pass this arg via a register if all the argument registers
1641 are used up. */
1642 if (cum->nbytes > nregs * UNITS_PER_WORD)
1643 return 0;
1644
1645 if (cum->nbytes + size <= nregs * UNITS_PER_WORD)
1646 return 0;
1647
1648 /* Don't pass this arg via a register if it would be split between
1649 registers and memory. */
1650 if (type == NULL_TREE
1651 && cum->nbytes + size > nregs * UNITS_PER_WORD)
1652 return 0;
1653
f054eb3c 1654 return nregs * UNITS_PER_WORD - cum->nbytes;
bb4959a8 1655}
1656
00b1da0e 1657/* Return the location of the function's value. This will be either
1658 $d0 for integer functions, $a0 for pointers, or a PARALLEL of both
1659 $d0 and $a0 if the -mreturn-pointer-on-do flag is set. Note that
1660 we only return the PARALLEL for outgoing values; we do not want
1661 callers relying on this extra copy. */
1662
b6713ba6 1663static rtx
1664mn10300_function_value (const_tree valtype,
1665 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1666 bool outgoing)
00b1da0e 1667{
1668 rtx rv;
3754d046 1669 machine_mode mode = TYPE_MODE (valtype);
00b1da0e 1670
1671 if (! POINTER_TYPE_P (valtype))
1672 return gen_rtx_REG (mode, FIRST_DATA_REGNUM);
1673 else if (! TARGET_PTR_A0D0 || ! outgoing
18d50ae6 1674 || cfun->returns_struct)
00b1da0e 1675 return gen_rtx_REG (mode, FIRST_ADDRESS_REGNUM);
1676
1677 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (2));
1678 XVECEXP (rv, 0, 0)
1679 = gen_rtx_EXPR_LIST (VOIDmode,
1680 gen_rtx_REG (mode, FIRST_ADDRESS_REGNUM),
1681 GEN_INT (0));
fb16c776 1682
00b1da0e 1683 XVECEXP (rv, 0, 1)
1684 = gen_rtx_EXPR_LIST (VOIDmode,
1685 gen_rtx_REG (mode, FIRST_DATA_REGNUM),
1686 GEN_INT (0));
1687 return rv;
1688}
1689
b6713ba6 1690/* Implements TARGET_LIBCALL_VALUE. */
1691
1692static rtx
3754d046 1693mn10300_libcall_value (machine_mode mode,
b6713ba6 1694 const_rtx fun ATTRIBUTE_UNUSED)
1695{
1696 return gen_rtx_REG (mode, FIRST_DATA_REGNUM);
1697}
1698
1699/* Implements FUNCTION_VALUE_REGNO_P. */
1700
1701bool
1702mn10300_function_value_regno_p (const unsigned int regno)
1703{
1704 return (regno == FIRST_DATA_REGNUM || regno == FIRST_ADDRESS_REGNUM);
1705}
1706
990679af 1707/* Output an addition operation. */
5574dbdd 1708
feb9af9f 1709const char *
990679af 1710mn10300_output_add (rtx operands[3], bool need_flags)
bb4959a8 1711{
990679af 1712 rtx dest, src1, src2;
1713 unsigned int dest_regnum, src1_regnum, src2_regnum;
1714 enum reg_class src1_class, src2_class, dest_class;
bb4959a8 1715
990679af 1716 dest = operands[0];
1717 src1 = operands[1];
1718 src2 = operands[2];
bb4959a8 1719
990679af 1720 dest_regnum = true_regnum (dest);
1721 src1_regnum = true_regnum (src1);
bb4959a8 1722
990679af 1723 dest_class = REGNO_REG_CLASS (dest_regnum);
1724 src1_class = REGNO_REG_CLASS (src1_regnum);
bb4959a8 1725
f9e46c25 1726 if (CONST_INT_P (src2))
990679af 1727 {
1728 gcc_assert (dest_regnum == src1_regnum);
bb4959a8 1729
990679af 1730 if (src2 == const1_rtx && !need_flags)
1731 return "inc %0";
1732 if (INTVAL (src2) == 4 && !need_flags && dest_class != DATA_REGS)
1733 return "inc4 %0";
911517ac 1734
990679af 1735 gcc_assert (!need_flags || dest_class != SP_REGS);
1736 return "add %2,%0";
1737 }
1738 else if (CONSTANT_P (src2))
1739 return "add %2,%0";
1740
1741 src2_regnum = true_regnum (src2);
1742 src2_class = REGNO_REG_CLASS (src2_regnum);
1743
1744 if (dest_regnum == src1_regnum)
1745 return "add %2,%0";
1746 if (dest_regnum == src2_regnum)
1747 return "add %1,%0";
1748
1749 /* The rest of the cases are reg = reg+reg. For AM33, we can implement
1750 this directly, as below, but when optimizing for space we can sometimes
1751 do better by using a mov+add. For MN103, we claimed that we could
1752 implement a three-operand add because the various move and add insns
1753 change sizes across register classes, and we can often do better than
1754 reload in choosing which operand to move. */
1755 if (TARGET_AM33 && optimize_insn_for_speed_p ())
1756 return "add %2,%1,%0";
1757
1758 /* Catch cases where no extended register was used. */
1759 if (src1_class != EXTENDED_REGS
1760 && src2_class != EXTENDED_REGS
1761 && dest_class != EXTENDED_REGS)
1762 {
1763 /* We have to copy one of the sources into the destination, then
1764 add the other source to the destination.
1765
1766 Carefully select which source to copy to the destination; a
1767 naive implementation will waste a byte when the source classes
1768 are different and the destination is an address register.
1769 Selecting the lowest cost register copy will optimize this
1770 sequence. */
1771 if (src1_class == dest_class)
1772 return "mov %1,%0\n\tadd %2,%0";
1773 else
1774 return "mov %2,%0\n\tadd %1,%0";
1775 }
911517ac 1776
990679af 1777 /* At least one register is an extended register. */
bb4959a8 1778
990679af 1779 /* The three operand add instruction on the am33 is a win iff the
1780 output register is an extended register, or if both source
1781 registers are extended registers. */
1782 if (dest_class == EXTENDED_REGS || src1_class == src2_class)
1783 return "add %2,%1,%0";
1784
1785 /* It is better to copy one of the sources to the destination, then
1786 perform a 2 address add. The destination in this case must be
1787 an address or data register and one of the sources must be an
1788 extended register and the remaining source must not be an extended
1789 register.
1790
1791 The best code for this case is to copy the extended reg to the
1792 destination, then emit a two address add. */
1793 if (src1_class == EXTENDED_REGS)
1794 return "mov %1,%0\n\tadd %2,%0";
1795 else
1796 return "mov %2,%0\n\tadd %1,%0";
bb4959a8 1797}
36ed4406 1798
c4cd8f6a 1799/* Return 1 if X contains a symbolic expression. We know these
1800 expressions will have one of a few well defined forms, so
1801 we need only check those forms. */
3626e955 1802
c4cd8f6a 1803int
3626e955 1804mn10300_symbolic_operand (rtx op,
3754d046 1805 machine_mode mode ATTRIBUTE_UNUSED)
c4cd8f6a 1806{
1807 switch (GET_CODE (op))
1808 {
1809 case SYMBOL_REF:
1810 case LABEL_REF:
1811 return 1;
1812 case CONST:
1813 op = XEXP (op, 0);
1814 return ((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
1815 || GET_CODE (XEXP (op, 0)) == LABEL_REF)
4879b320 1816 && CONST_INT_P (XEXP (op, 1)));
c4cd8f6a 1817 default:
1818 return 0;
1819 }
1820}
1821
1822/* Try machine dependent ways of modifying an illegitimate address
1823 to be legitimate. If we find one, return the new valid address.
1824 This macro is used in only one place: `memory_address' in explow.c.
1825
1826 OLDX is the address as it was before break_out_memory_refs was called.
1827 In some cases it is useful to look at this to decide what needs to be done.
1828
c4cd8f6a 1829 Normally it is always safe for this macro to do nothing. It exists to
1830 recognize opportunities to optimize the output.
1831
1832 But on a few ports with segmented architectures and indexed addressing
1833 (mn10300, hppa) it is used to rewrite certain problematical addresses. */
3626e955 1834
5574dbdd 1835static rtx
41e3a0c7 1836mn10300_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3754d046 1837 machine_mode mode ATTRIBUTE_UNUSED)
c4cd8f6a 1838{
3626e955 1839 if (flag_pic && ! mn10300_legitimate_pic_operand_p (x))
1840 x = mn10300_legitimize_pic_address (oldx, NULL_RTX);
b87a151a 1841
c4cd8f6a 1842 /* Uh-oh. We might have an address for x[n-100000]. This needs
1843 special handling to avoid creating an indexed memory address
1844 with x-100000 as the base. */
1845 if (GET_CODE (x) == PLUS
3626e955 1846 && mn10300_symbolic_operand (XEXP (x, 1), VOIDmode))
c4cd8f6a 1847 {
1848 /* Ugly. We modify things here so that the address offset specified
1849 by the index expression is computed first, then added to x to form
1850 the entire address. */
1851
59086782 1852 rtx regx1, regy1, regy2, y;
c4cd8f6a 1853
1854 /* Strip off any CONST. */
1855 y = XEXP (x, 1);
1856 if (GET_CODE (y) == CONST)
1857 y = XEXP (y, 0);
1858
c927a8ab 1859 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1860 {
1861 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1862 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1863 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1864 regx1 = force_reg (Pmode,
3626e955 1865 gen_rtx_fmt_ee (GET_CODE (y), Pmode, regx1,
1866 regy2));
7014838c 1867 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
c927a8ab 1868 }
c4cd8f6a 1869 }
11b4605c 1870 return x;
c4cd8f6a 1871}
e2aead91 1872
b87a151a 1873/* Convert a non-PIC address in `orig' to a PIC address using @GOT or
09e5ce26 1874 @GOTOFF in `reg'. */
3626e955 1875
b87a151a 1876rtx
3626e955 1877mn10300_legitimize_pic_address (rtx orig, rtx reg)
b87a151a 1878{
d92c1383 1879 rtx x;
1880
b87a151a 1881 if (GET_CODE (orig) == LABEL_REF
1882 || (GET_CODE (orig) == SYMBOL_REF
1883 && (CONSTANT_POOL_ADDRESS_P (orig)
1884 || ! MN10300_GLOBAL_P (orig))))
1885 {
d92c1383 1886 if (reg == NULL)
b87a151a 1887 reg = gen_reg_rtx (Pmode);
1888
d92c1383 1889 x = gen_rtx_UNSPEC (SImode, gen_rtvec (1, orig), UNSPEC_GOTOFF);
1890 x = gen_rtx_CONST (SImode, x);
1891 emit_move_insn (reg, x);
1892
1893 x = emit_insn (gen_addsi3 (reg, reg, pic_offset_table_rtx));
b87a151a 1894 }
1895 else if (GET_CODE (orig) == SYMBOL_REF)
1896 {
d92c1383 1897 if (reg == NULL)
b87a151a 1898 reg = gen_reg_rtx (Pmode);
1899
d92c1383 1900 x = gen_rtx_UNSPEC (SImode, gen_rtvec (1, orig), UNSPEC_GOT);
1901 x = gen_rtx_CONST (SImode, x);
1902 x = gen_rtx_PLUS (SImode, pic_offset_table_rtx, x);
1903 x = gen_const_mem (SImode, x);
1904
1905 x = emit_move_insn (reg, x);
b87a151a 1906 }
d92c1383 1907 else
1908 return orig;
1909
1910 set_unique_reg_note (x, REG_EQUAL, orig);
1911 return reg;
b87a151a 1912}
1913
1914/* Return zero if X references a SYMBOL_REF or LABEL_REF whose symbol
fa483857 1915 isn't protected by a PIC unspec; nonzero otherwise. */
3626e955 1916
b87a151a 1917int
3626e955 1918mn10300_legitimate_pic_operand_p (rtx x)
b87a151a 1919{
3626e955 1920 const char *fmt;
1921 int i;
b87a151a 1922
1923 if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1924 return 0;
1925
1926 if (GET_CODE (x) == UNSPEC
1927 && (XINT (x, 1) == UNSPEC_PIC
1928 || XINT (x, 1) == UNSPEC_GOT
1929 || XINT (x, 1) == UNSPEC_GOTOFF
b6e3379c 1930 || XINT (x, 1) == UNSPEC_PLT
1931 || XINT (x, 1) == UNSPEC_GOTSYM_OFF))
b87a151a 1932 return 1;
1933
b87a151a 1934 fmt = GET_RTX_FORMAT (GET_CODE (x));
1935 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
1936 {
1937 if (fmt[i] == 'E')
1938 {
5574dbdd 1939 int j;
b87a151a 1940
1941 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3626e955 1942 if (! mn10300_legitimate_pic_operand_p (XVECEXP (x, i, j)))
b87a151a 1943 return 0;
1944 }
3626e955 1945 else if (fmt[i] == 'e'
1946 && ! mn10300_legitimate_pic_operand_p (XEXP (x, i)))
b87a151a 1947 return 0;
1948 }
1949
1950 return 1;
1951}
1952
5411aa8c 1953/* Return TRUE if the address X, taken from a (MEM:MODE X) rtx, is
fd50b071 1954 legitimate, and FALSE otherwise.
1955
1956 On the mn10300, the value in the address register must be
1957 in the same memory space/segment as the effective address.
1958
1959 This is problematical for reload since it does not understand
1960 that base+index != index+base in a memory reference.
1961
1962 Note it is still possible to use reg+reg addressing modes,
1963 it's just much more difficult. For a discussion of a possible
1964 workaround and solution, see the comments in pa.c before the
1965 function record_unscaled_index_insn_codes. */
1966
5574dbdd 1967static bool
3754d046 1968mn10300_legitimate_address_p (machine_mode mode, rtx x, bool strict)
5411aa8c 1969{
c8a596d6 1970 rtx base, index;
1971
1972 if (CONSTANT_ADDRESS_P (x))
1973 return !flag_pic || mn10300_legitimate_pic_operand_p (x);
5411aa8c 1974
1975 if (RTX_OK_FOR_BASE_P (x, strict))
c8a596d6 1976 return true;
1977
1978 if (TARGET_AM33 && (mode == SImode || mode == SFmode || mode == HImode))
1979 {
1980 if (GET_CODE (x) == POST_INC)
1981 return RTX_OK_FOR_BASE_P (XEXP (x, 0), strict);
1982 if (GET_CODE (x) == POST_MODIFY)
1983 return (RTX_OK_FOR_BASE_P (XEXP (x, 0), strict)
1984 && CONSTANT_ADDRESS_P (XEXP (x, 1)));
1985 }
1986
1987 if (GET_CODE (x) != PLUS)
1988 return false;
5411aa8c 1989
c8a596d6 1990 base = XEXP (x, 0);
1991 index = XEXP (x, 1);
5411aa8c 1992
c8a596d6 1993 if (!REG_P (base))
1994 return false;
1995 if (REG_P (index))
5411aa8c 1996 {
c8a596d6 1997 /* ??? Without AM33 generalized (Ri,Rn) addressing, reg+reg
1998 addressing is hard to satisfy. */
1999 if (!TARGET_AM33)
2000 return false;
5411aa8c 2001
c8a596d6 2002 return (REGNO_GENERAL_P (REGNO (base), strict)
2003 && REGNO_GENERAL_P (REGNO (index), strict));
2004 }
5411aa8c 2005
c8a596d6 2006 if (!REGNO_STRICT_OK_FOR_BASE_P (REGNO (base), strict))
2007 return false;
5411aa8c 2008
c8a596d6 2009 if (CONST_INT_P (index))
2010 return IN_RANGE (INTVAL (index), -1 - 0x7fffffff, 0x7fffffff);
2011
2012 if (CONSTANT_ADDRESS_P (index))
2013 return !flag_pic || mn10300_legitimate_pic_operand_p (index);
2014
2015 return false;
2016}
2017
2018bool
2019mn10300_regno_in_class_p (unsigned regno, int rclass, bool strict)
2020{
2021 if (regno >= FIRST_PSEUDO_REGISTER)
2022 {
2023 if (!strict)
2024 return true;
2025 if (!reg_renumber)
2026 return false;
2027 regno = reg_renumber[regno];
c2fa9c24 2028 if (regno == INVALID_REGNUM)
2029 return false;
c8a596d6 2030 }
2031 return TEST_HARD_REG_BIT (reg_class_contents[rclass], regno);
2032}
2033
2034rtx
2035mn10300_legitimize_reload_address (rtx x,
3754d046 2036 machine_mode mode ATTRIBUTE_UNUSED,
c8a596d6 2037 int opnum, int type,
2038 int ind_levels ATTRIBUTE_UNUSED)
2039{
2040 bool any_change = false;
2041
2042 /* See above re disabling reg+reg addressing for MN103. */
2043 if (!TARGET_AM33)
2044 return NULL_RTX;
2045
2046 if (GET_CODE (x) != PLUS)
2047 return NULL_RTX;
2048
2049 if (XEXP (x, 0) == stack_pointer_rtx)
2050 {
2051 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
2052 GENERAL_REGS, GET_MODE (x), VOIDmode, 0, 0,
2053 opnum, (enum reload_type) type);
2054 any_change = true;
2055 }
2056 if (XEXP (x, 1) == stack_pointer_rtx)
2057 {
2058 push_reload (XEXP (x, 1), NULL_RTX, &XEXP (x, 1), NULL,
2059 GENERAL_REGS, GET_MODE (x), VOIDmode, 0, 0,
2060 opnum, (enum reload_type) type);
2061 any_change = true;
5411aa8c 2062 }
2063
c8a596d6 2064 return any_change ? x : NULL_RTX;
5411aa8c 2065}
2066
ca316360 2067/* Implement TARGET_LEGITIMATE_CONSTANT_P. Returns TRUE if X is a valid
5574dbdd 2068 constant. Note that some "constants" aren't valid, such as TLS
2069 symbols and unconverted GOT-based references, so we eliminate
2070 those here. */
2071
ca316360 2072static bool
3754d046 2073mn10300_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
5574dbdd 2074{
2075 switch (GET_CODE (x))
2076 {
2077 case CONST:
2078 x = XEXP (x, 0);
2079
2080 if (GET_CODE (x) == PLUS)
2081 {
3626e955 2082 if (! CONST_INT_P (XEXP (x, 1)))
5574dbdd 2083 return false;
2084 x = XEXP (x, 0);
2085 }
2086
2087 /* Only some unspecs are valid as "constants". */
2088 if (GET_CODE (x) == UNSPEC)
2089 {
5574dbdd 2090 switch (XINT (x, 1))
2091 {
5574dbdd 2092 case UNSPEC_PIC:
2093 case UNSPEC_GOT:
2094 case UNSPEC_GOTOFF:
2095 case UNSPEC_PLT:
2096 return true;
2097 default:
2098 return false;
2099 }
2100 }
2101
2102 /* We must have drilled down to a symbol. */
3626e955 2103 if (! mn10300_symbolic_operand (x, Pmode))
5574dbdd 2104 return false;
2105 break;
2106
2107 default:
2108 break;
2109 }
2110
2111 return true;
2112}
2113
4c6c308e 2114/* Undo pic address legitimization for the benefit of debug info. */
2115
2116static rtx
2117mn10300_delegitimize_address (rtx orig_x)
2118{
2119 rtx x = orig_x, ret, addend = NULL;
2120 bool need_mem;
2121
2122 if (MEM_P (x))
2123 x = XEXP (x, 0);
2124 if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
2125 return orig_x;
2126
2127 if (XEXP (x, 0) == pic_offset_table_rtx)
2128 ;
2129 /* With the REG+REG addressing of AM33, var-tracking can re-assemble
2130 some odd-looking "addresses" that were never valid in the first place.
2131 We need to look harder to avoid warnings being emitted. */
2132 else if (GET_CODE (XEXP (x, 0)) == PLUS)
2133 {
2134 rtx x0 = XEXP (x, 0);
2135 rtx x00 = XEXP (x0, 0);
2136 rtx x01 = XEXP (x0, 1);
2137
2138 if (x00 == pic_offset_table_rtx)
2139 addend = x01;
2140 else if (x01 == pic_offset_table_rtx)
2141 addend = x00;
2142 else
2143 return orig_x;
2144
2145 }
2146 else
2147 return orig_x;
2148 x = XEXP (x, 1);
2149
2150 if (GET_CODE (x) != CONST)
2151 return orig_x;
2152 x = XEXP (x, 0);
2153 if (GET_CODE (x) != UNSPEC)
2154 return orig_x;
2155
2156 ret = XVECEXP (x, 0, 0);
2157 if (XINT (x, 1) == UNSPEC_GOTOFF)
2158 need_mem = false;
2159 else if (XINT (x, 1) == UNSPEC_GOT)
2160 need_mem = true;
2161 else
2162 return orig_x;
2163
2164 gcc_assert (GET_CODE (ret) == SYMBOL_REF);
2165 if (need_mem != MEM_P (orig_x))
2166 return orig_x;
2167 if (need_mem && addend)
2168 return orig_x;
2169 if (addend)
2170 ret = gen_rtx_PLUS (Pmode, addend, ret);
2171 return ret;
2172}
2173
28f32607 2174/* For addresses, costs are relative to "MOV (Rm),Rn". For AM33 this is
2175 the 3-byte fully general instruction; for MN103 this is the 2-byte form
2176 with an address register. */
2177
ec0457a8 2178static int
3754d046 2179mn10300_address_cost (rtx x, machine_mode mode ATTRIBUTE_UNUSED,
d9c5e5f4 2180 addr_space_t as ATTRIBUTE_UNUSED, bool speed)
e2aead91 2181{
28f32607 2182 HOST_WIDE_INT i;
2183 rtx base, index;
2184
e2aead91 2185 switch (GET_CODE (x))
2186 {
28f32607 2187 case CONST:
2188 case SYMBOL_REF:
2189 case LABEL_REF:
2190 /* We assume all of these require a 32-bit constant, even though
2191 some symbol and label references can be relaxed. */
2192 return speed ? 1 : 4;
2193
e2aead91 2194 case REG:
28f32607 2195 case SUBREG:
2196 case POST_INC:
2197 return 0;
2198
2199 case POST_MODIFY:
2200 /* Assume any symbolic offset is a 32-bit constant. */
2201 i = (CONST_INT_P (XEXP (x, 1)) ? INTVAL (XEXP (x, 1)) : 0x12345678);
2202 if (IN_RANGE (i, -128, 127))
2203 return speed ? 0 : 1;
2204 if (speed)
2205 return 1;
2206 if (IN_RANGE (i, -0x800000, 0x7fffff))
2207 return 3;
2208 return 4;
2209
2210 case PLUS:
2211 base = XEXP (x, 0);
2212 index = XEXP (x, 1);
2213 if (register_operand (index, SImode))
e2aead91 2214 {
28f32607 2215 /* Attempt to minimize the number of registers in the address.
2216 This is similar to what other ports do. */
2217 if (register_operand (base, SImode))
2218 return 1;
e2aead91 2219
28f32607 2220 base = XEXP (x, 1);
2221 index = XEXP (x, 0);
2222 }
e2aead91 2223
28f32607 2224 /* Assume any symbolic offset is a 32-bit constant. */
2225 i = (CONST_INT_P (XEXP (x, 1)) ? INTVAL (XEXP (x, 1)) : 0x12345678);
2226 if (IN_RANGE (i, -128, 127))
2227 return speed ? 0 : 1;
2228 if (IN_RANGE (i, -32768, 32767))
2229 return speed ? 0 : 2;
2230 return speed ? 2 : 6;
e2aead91 2231
28f32607 2232 default:
20d892d1 2233 return rtx_cost (x, MEM, 0, speed);
28f32607 2234 }
2235}
e2aead91 2236
28f32607 2237/* Implement the TARGET_REGISTER_MOVE_COST hook.
e2aead91 2238
28f32607 2239 Recall that the base value of 2 is required by assumptions elsewhere
2240 in the body of the compiler, and that cost 2 is special-cased as an
2241 early exit from reload meaning no work is required. */
e2aead91 2242
28f32607 2243static int
3754d046 2244mn10300_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
28f32607 2245 reg_class_t ifrom, reg_class_t ito)
2246{
2247 enum reg_class from = (enum reg_class) ifrom;
2248 enum reg_class to = (enum reg_class) ito;
2249 enum reg_class scratch, test;
2250
2251 /* Simplify the following code by unifying the fp register classes. */
2252 if (to == FP_ACC_REGS)
2253 to = FP_REGS;
2254 if (from == FP_ACC_REGS)
2255 from = FP_REGS;
2256
2257 /* Diagnose invalid moves by costing them as two moves. */
2258
2259 scratch = NO_REGS;
2260 test = from;
2261 if (to == SP_REGS)
2262 scratch = (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
85a6eed4 2263 else if (to == MDR_REGS)
2264 scratch = DATA_REGS;
28f32607 2265 else if (to == FP_REGS && to != from)
2266 scratch = GENERAL_REGS;
2267 else
2268 {
2269 test = to;
2270 if (from == SP_REGS)
2271 scratch = (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
85a6eed4 2272 else if (from == MDR_REGS)
2273 scratch = DATA_REGS;
28f32607 2274 else if (from == FP_REGS && to != from)
2275 scratch = GENERAL_REGS;
2276 }
2277 if (scratch != NO_REGS && !reg_class_subset_p (test, scratch))
2278 return (mn10300_register_move_cost (VOIDmode, from, scratch)
2279 + mn10300_register_move_cost (VOIDmode, scratch, to));
e2aead91 2280
28f32607 2281 /* From here on, all we need consider are legal combinations. */
e2aead91 2282
28f32607 2283 if (optimize_size)
2284 {
2285 /* The scale here is bytes * 2. */
e2aead91 2286
28f32607 2287 if (from == to && (to == ADDRESS_REGS || to == DATA_REGS))
2288 return 2;
e2aead91 2289
28f32607 2290 if (from == SP_REGS)
2291 return (to == ADDRESS_REGS ? 2 : 6);
2292
2293 /* For MN103, all remaining legal moves are two bytes. */
2294 if (TARGET_AM33)
2295 return 4;
2296
2297 if (to == SP_REGS)
2298 return (from == ADDRESS_REGS ? 4 : 6);
2299
2300 if ((from == ADDRESS_REGS || from == DATA_REGS)
2301 && (to == ADDRESS_REGS || to == DATA_REGS))
2302 return 4;
2303
2304 if (to == EXTENDED_REGS)
2305 return (to == from ? 6 : 4);
e2aead91 2306
28f32607 2307 /* What's left are SP_REGS, FP_REGS, or combinations of the above. */
2308 return 6;
2309 }
2310 else
2311 {
2312 /* The scale here is cycles * 2. */
2313
2314 if (to == FP_REGS)
2315 return 8;
2316 if (from == FP_REGS)
2317 return 4;
2318
2319 /* All legal moves between integral registers are single cycle. */
2320 return 2;
e2aead91 2321 }
2322}
fab7adbf 2323
28f32607 2324/* Implement the TARGET_MEMORY_MOVE_COST hook.
2325
2326 Given lack of the form of the address, this must be speed-relative,
2327 though we should never be less expensive than a size-relative register
2328 move cost above. This is not a problem. */
2329
ec0457a8 2330static int
3754d046 2331mn10300_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
28f32607 2332 reg_class_t iclass, bool in ATTRIBUTE_UNUSED)
ec0457a8 2333{
28f32607 2334 enum reg_class rclass = (enum reg_class) iclass;
2335
2336 if (rclass == FP_REGS)
2337 return 8;
2338 return 6;
ec0457a8 2339}
2340
28f32607 2341/* Implement the TARGET_RTX_COSTS hook.
2342
2343 Speed-relative costs are relative to COSTS_N_INSNS, which is intended
2344 to represent cycles. Size-relative costs are in bytes. */
2345
fab7adbf 2346static bool
20d892d1 2347mn10300_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2348 int *ptotal, bool speed)
fab7adbf 2349{
28f32607 2350 /* This value is used for SYMBOL_REF etc where we want to pretend
2351 we have a full 32-bit constant. */
2352 HOST_WIDE_INT i = 0x12345678;
2353 int total;
2354
fab7adbf 2355 switch (code)
2356 {
2357 case CONST_INT:
28f32607 2358 i = INTVAL (x);
2359 do_int_costs:
2360 if (speed)
2361 {
2362 if (outer_code == SET)
2363 {
2364 /* 16-bit integer loads have latency 1, 32-bit loads 2. */
2365 if (IN_RANGE (i, -32768, 32767))
2366 total = COSTS_N_INSNS (1);
2367 else
2368 total = COSTS_N_INSNS (2);
2369 }
2370 else
2371 {
2372 /* 16-bit integer operands don't affect latency;
2373 24-bit and 32-bit operands add a cycle. */
2374 if (IN_RANGE (i, -32768, 32767))
2375 total = 0;
2376 else
2377 total = COSTS_N_INSNS (1);
2378 }
2379 }
fab7adbf 2380 else
28f32607 2381 {
2382 if (outer_code == SET)
2383 {
2384 if (i == 0)
2385 total = 1;
2386 else if (IN_RANGE (i, -128, 127))
2387 total = 2;
2388 else if (IN_RANGE (i, -32768, 32767))
2389 total = 3;
2390 else
2391 total = 6;
2392 }
2393 else
2394 {
2395 /* Reference here is ADD An,Dn, vs ADD imm,Dn. */
2396 if (IN_RANGE (i, -128, 127))
2397 total = 0;
2398 else if (IN_RANGE (i, -32768, 32767))
2399 total = 2;
2400 else if (TARGET_AM33 && IN_RANGE (i, -0x01000000, 0x00ffffff))
2401 total = 3;
2402 else
2403 total = 4;
2404 }
2405 }
2406 goto alldone;
fab7adbf 2407
2408 case CONST:
2409 case LABEL_REF:
2410 case SYMBOL_REF:
fab7adbf 2411 case CONST_DOUBLE:
28f32607 2412 /* We assume all of these require a 32-bit constant, even though
2413 some symbol and label references can be relaxed. */
2414 goto do_int_costs;
74f4459c 2415
28f32607 2416 case UNSPEC:
2417 switch (XINT (x, 1))
2418 {
2419 case UNSPEC_PIC:
2420 case UNSPEC_GOT:
2421 case UNSPEC_GOTOFF:
2422 case UNSPEC_PLT:
2423 case UNSPEC_GOTSYM_OFF:
2424 /* The PIC unspecs also resolve to a 32-bit constant. */
2425 goto do_int_costs;
fab7adbf 2426
28f32607 2427 default:
2428 /* Assume any non-listed unspec is some sort of arithmetic. */
2429 goto do_arith_costs;
2430 }
8935d57c 2431
28f32607 2432 case PLUS:
2433 /* Notice the size difference of INC and INC4. */
2434 if (!speed && outer_code == SET && CONST_INT_P (XEXP (x, 1)))
2435 {
2436 i = INTVAL (XEXP (x, 1));
2437 if (i == 1 || i == 4)
2438 {
20d892d1 2439 total = 1 + rtx_cost (XEXP (x, 0), PLUS, 0, speed);
28f32607 2440 goto alldone;
2441 }
2442 }
2443 goto do_arith_costs;
2444
2445 case MINUS:
2446 case AND:
2447 case IOR:
2448 case XOR:
2449 case NOT:
2450 case NEG:
2451 case ZERO_EXTEND:
2452 case SIGN_EXTEND:
2453 case COMPARE:
2454 case BSWAP:
2455 case CLZ:
2456 do_arith_costs:
2457 total = (speed ? COSTS_N_INSNS (1) : 2);
2458 break;
8935d57c 2459
28f32607 2460 case ASHIFT:
2461 /* Notice the size difference of ASL2 and variants. */
2462 if (!speed && CONST_INT_P (XEXP (x, 1)))
2463 switch (INTVAL (XEXP (x, 1)))
2464 {
2465 case 1:
2466 case 2:
2467 total = 1;
2468 goto alldone;
2469 case 3:
2470 case 4:
2471 total = 2;
2472 goto alldone;
2473 }
2474 /* FALLTHRU */
8935d57c 2475
28f32607 2476 case ASHIFTRT:
2477 case LSHIFTRT:
2478 total = (speed ? COSTS_N_INSNS (1) : 3);
2479 goto alldone;
8935d57c 2480
28f32607 2481 case MULT:
2482 total = (speed ? COSTS_N_INSNS (3) : 2);
8935d57c 2483 break;
fb16c776 2484
28f32607 2485 case DIV:
2486 case UDIV:
2487 case MOD:
2488 case UMOD:
2489 total = (speed ? COSTS_N_INSNS (39)
2490 /* Include space to load+retrieve MDR. */
2491 : code == MOD || code == UMOD ? 6 : 4);
8935d57c 2492 break;
fb16c776 2493
28f32607 2494 case MEM:
d9c5e5f4 2495 total = mn10300_address_cost (XEXP (x, 0), GET_MODE (x),
2496 MEM_ADDR_SPACE (x), speed);
28f32607 2497 if (speed)
2498 total = COSTS_N_INSNS (2 + total);
2499 goto alldone;
2500
8935d57c 2501 default:
28f32607 2502 /* Probably not implemented. Assume external call. */
2503 total = (speed ? COSTS_N_INSNS (10) : 7);
2504 break;
8935d57c 2505 }
2506
28f32607 2507 *ptotal = total;
2508 return false;
2509
2510 alldone:
2511 *ptotal = total;
2512 return true;
8935d57c 2513}
28f32607 2514
b87a151a 2515/* If using PIC, mark a SYMBOL_REF for a non-global symbol so that we
2516 may access it using GOTOFF instead of GOT. */
2517
2518static void
b4d5791b 2519mn10300_encode_section_info (tree decl, rtx rtl, int first)
b87a151a 2520{
2521 rtx symbol;
2522
b4d5791b 2523 default_encode_section_info (decl, rtl, first);
2524
3626e955 2525 if (! MEM_P (rtl))
b87a151a 2526 return;
b4d5791b 2527
b87a151a 2528 symbol = XEXP (rtl, 0);
2529 if (GET_CODE (symbol) != SYMBOL_REF)
2530 return;
2531
2532 if (flag_pic)
2533 SYMBOL_REF_FLAG (symbol) = (*targetm.binds_local_p) (decl);
2534}
906bb5c3 2535
2536/* Dispatch tables on the mn10300 are extremely expensive in terms of code
2537 and readonly data size. So we crank up the case threshold value to
2538 encourage a series of if/else comparisons to implement many small switch
2539 statements. In theory, this value could be increased much more if we
2540 were solely optimizing for space, but we keep it "reasonable" to avoid
2541 serious code efficiency lossage. */
2542
5574dbdd 2543static unsigned int
2544mn10300_case_values_threshold (void)
906bb5c3 2545{
2546 return 6;
2547}
3e16f982 2548
3e16f982 2549/* Worker function for TARGET_TRAMPOLINE_INIT. */
2550
2551static void
2552mn10300_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
2553{
3562cea7 2554 rtx mem, disp, fnaddr = XEXP (DECL_RTL (fndecl), 0);
2555
2556 /* This is a strict alignment target, which means that we play
2557 some games to make sure that the locations at which we need
2558 to store <chain> and <disp> wind up at aligned addresses.
2559
2560 0x28 0x00 add 0,d0
2561 0xfc 0xdd mov chain,a1
2562 <chain>
2563 0xf8 0xed 0x00 btst 0,d1
2564 0xdc jmp fnaddr
2565 <disp>
2566
2567 Note that the two extra insns are effectively nops; they
2568 clobber the flags but do not affect the contents of D0 or D1. */
3e16f982 2569
3562cea7 2570 disp = expand_binop (SImode, sub_optab, fnaddr,
29c05e22 2571 plus_constant (Pmode, XEXP (m_tramp, 0), 11),
3562cea7 2572 NULL_RTX, 1, OPTAB_DIRECT);
3e16f982 2573
3562cea7 2574 mem = adjust_address (m_tramp, SImode, 0);
2575 emit_move_insn (mem, gen_int_mode (0xddfc0028, SImode));
2576 mem = adjust_address (m_tramp, SImode, 4);
3e16f982 2577 emit_move_insn (mem, chain_value);
3562cea7 2578 mem = adjust_address (m_tramp, SImode, 8);
2579 emit_move_insn (mem, gen_int_mode (0xdc00edf8, SImode));
2580 mem = adjust_address (m_tramp, SImode, 12);
2581 emit_move_insn (mem, disp);
3e16f982 2582}
e92d3ba8 2583
2584/* Output the assembler code for a C++ thunk function.
2585 THUNK_DECL is the declaration for the thunk function itself, FUNCTION
2586 is the decl for the target function. DELTA is an immediate constant
2587 offset to be added to the THIS parameter. If VCALL_OFFSET is nonzero
2588 the word at the adjusted address *(*THIS' + VCALL_OFFSET) should be
2589 additionally added to THIS. Finally jump to the entry point of
2590 FUNCTION. */
2591
2592static void
2593mn10300_asm_output_mi_thunk (FILE * file,
2594 tree thunk_fndecl ATTRIBUTE_UNUSED,
2595 HOST_WIDE_INT delta,
2596 HOST_WIDE_INT vcall_offset,
2597 tree function)
2598{
2599 const char * _this;
2600
2601 /* Get the register holding the THIS parameter. Handle the case
2602 where there is a hidden first argument for a returned structure. */
2603 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
2604 _this = reg_names [FIRST_ARGUMENT_REGNUM + 1];
2605 else
2606 _this = reg_names [FIRST_ARGUMENT_REGNUM];
2607
2608 fprintf (file, "\t%s Thunk Entry Point:\n", ASM_COMMENT_START);
2609
2610 if (delta)
2611 fprintf (file, "\tadd %d, %s\n", (int) delta, _this);
2612
2613 if (vcall_offset)
2614 {
2615 const char * scratch = reg_names [FIRST_ADDRESS_REGNUM + 1];
2616
2617 fprintf (file, "\tmov %s, %s\n", _this, scratch);
2618 fprintf (file, "\tmov (%s), %s\n", scratch, scratch);
2619 fprintf (file, "\tadd %d, %s\n", (int) vcall_offset, scratch);
2620 fprintf (file, "\tmov (%s), %s\n", scratch, scratch);
2621 fprintf (file, "\tadd %s, %s\n", scratch, _this);
2622 }
2623
2624 fputs ("\tjmp ", file);
2625 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
2626 putc ('\n', file);
2627}
2628
2629/* Return true if mn10300_output_mi_thunk would be able to output the
2630 assembler code for the thunk function specified by the arguments
2631 it is passed, and false otherwise. */
2632
2633static bool
2634mn10300_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
2635 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
2636 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
2637 const_tree function ATTRIBUTE_UNUSED)
2638{
2639 return true;
2640}
5574dbdd 2641
2642bool
3754d046 2643mn10300_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
5574dbdd 2644{
2645 if (REGNO_REG_CLASS (regno) == FP_REGS
2646 || REGNO_REG_CLASS (regno) == FP_ACC_REGS)
2647 /* Do not store integer values in FP registers. */
2648 return GET_MODE_CLASS (mode) == MODE_FLOAT && ((regno & 1) == 0);
f70166f6 2649
2650 if (! TARGET_AM33 && REGNO_REG_CLASS (regno) == EXTENDED_REGS)
2651 return false;
2652
5574dbdd 2653 if (((regno) & 1) == 0 || GET_MODE_SIZE (mode) == 4)
2654 return true;
2655
2656 if (REGNO_REG_CLASS (regno) == DATA_REGS
2657 || (TARGET_AM33 && REGNO_REG_CLASS (regno) == ADDRESS_REGS)
2658 || REGNO_REG_CLASS (regno) == EXTENDED_REGS)
2659 return GET_MODE_SIZE (mode) <= 4;
2660
2661 return false;
2662}
2663
2664bool
3754d046 2665mn10300_modes_tieable (machine_mode mode1, machine_mode mode2)
5574dbdd 2666{
2667 if (GET_MODE_CLASS (mode1) == MODE_FLOAT
2668 && GET_MODE_CLASS (mode2) != MODE_FLOAT)
2669 return false;
2670
2671 if (GET_MODE_CLASS (mode2) == MODE_FLOAT
2672 && GET_MODE_CLASS (mode1) != MODE_FLOAT)
2673 return false;
2674
2675 if (TARGET_AM33
2676 || mode1 == mode2
2677 || (GET_MODE_SIZE (mode1) <= 4 && GET_MODE_SIZE (mode2) <= 4))
2678 return true;
2679
2680 return false;
2681}
2682
990679af 2683static int
3754d046 2684cc_flags_for_mode (machine_mode mode)
990679af 2685{
2686 switch (mode)
2687 {
2688 case CCmode:
2689 return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_C | CC_FLAG_V;
2690 case CCZNCmode:
2691 return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_C;
2692 case CCZNmode:
2693 return CC_FLAG_Z | CC_FLAG_N;
2694 case CC_FLOATmode:
2695 return -1;
2696 default:
2697 gcc_unreachable ();
2698 }
2699}
2700
2701static int
2702cc_flags_for_code (enum rtx_code code)
2703{
2704 switch (code)
2705 {
2706 case EQ: /* Z */
2707 case NE: /* ~Z */
2708 return CC_FLAG_Z;
2709
2710 case LT: /* N */
2711 case GE: /* ~N */
2712 return CC_FLAG_N;
2713 break;
2714
2715 case GT: /* ~(Z|(N^V)) */
2716 case LE: /* Z|(N^V) */
2717 return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_V;
2718
2719 case GEU: /* ~C */
2720 case LTU: /* C */
2721 return CC_FLAG_C;
2722
2723 case GTU: /* ~(C | Z) */
2724 case LEU: /* C | Z */
2725 return CC_FLAG_Z | CC_FLAG_C;
2726
2727 case ORDERED:
2728 case UNORDERED:
2729 case LTGT:
2730 case UNEQ:
2731 case UNGE:
2732 case UNGT:
2733 case UNLE:
2734 case UNLT:
2735 return -1;
2736
2737 default:
2738 gcc_unreachable ();
2739 }
2740}
2741
3754d046 2742machine_mode
990679af 2743mn10300_select_cc_mode (enum rtx_code code, rtx x, rtx y ATTRIBUTE_UNUSED)
5574dbdd 2744{
990679af 2745 int req;
2746
2747 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2748 return CC_FLOATmode;
2749
2750 req = cc_flags_for_code (code);
2751
2752 if (req & CC_FLAG_V)
2753 return CCmode;
2754 if (req & CC_FLAG_C)
2755 return CCZNCmode;
2756 return CCZNmode;
5574dbdd 2757}
4879b320 2758
2759static inline bool
627683ab 2760set_is_load_p (rtx set)
4879b320 2761{
627683ab 2762 return MEM_P (SET_SRC (set));
4879b320 2763}
2764
2765static inline bool
627683ab 2766set_is_store_p (rtx set)
4879b320 2767{
627683ab 2768 return MEM_P (SET_DEST (set));
4879b320 2769}
2770
2771/* Update scheduling costs for situations that cannot be
2772 described using the attributes and DFA machinery.
2773 DEP is the insn being scheduled.
2774 INSN is the previous insn.
2775 COST is the current cycle cost for DEP. */
2776
2777static int
18282db0 2778mn10300_adjust_sched_cost (rtx_insn *insn, rtx link, rtx_insn *dep, int cost)
4879b320 2779{
627683ab 2780 rtx insn_set;
2781 rtx dep_set;
2782 int timings;
4879b320 2783
2784 if (!TARGET_AM33)
2785 return 1;
2786
627683ab 2787 /* We are only interested in pairs of SET. */
2788 insn_set = single_set (insn);
2789 if (!insn_set)
2790 return cost;
4879b320 2791
627683ab 2792 dep_set = single_set (dep);
2793 if (!dep_set)
2794 return cost;
4879b320 2795
2796 /* For the AM34 a load instruction that follows a
2797 store instruction incurs an extra cycle of delay. */
2798 if (mn10300_tune_cpu == PROCESSOR_AM34
627683ab 2799 && set_is_load_p (dep_set)
2800 && set_is_store_p (insn_set))
4879b320 2801 cost += 1;
2802
2803 /* For the AM34 a non-store, non-branch FPU insn that follows
2804 another FPU insn incurs a one cycle throughput increase. */
2805 else if (mn10300_tune_cpu == PROCESSOR_AM34
627683ab 2806 && ! set_is_store_p (insn_set)
4879b320 2807 && ! JUMP_P (insn)
627683ab 2808 && GET_MODE_CLASS (GET_MODE (SET_SRC (dep_set))) == MODE_FLOAT
2809 && GET_MODE_CLASS (GET_MODE (SET_SRC (insn_set))) == MODE_FLOAT)
4879b320 2810 cost += 1;
2811
2812 /* Resolve the conflict described in section 1-7-4 of
2813 Chapter 3 of the MN103E Series Instruction Manual
2814 where it says:
2815
9d75589a 2816 "When the preceding instruction is a CPU load or
4879b320 2817 store instruction, a following FPU instruction
2818 cannot be executed until the CPU completes the
2819 latency period even though there are no register
2820 or flag dependencies between them." */
2821
2822 /* Only the AM33-2 (and later) CPUs have FPU instructions. */
2823 if (! TARGET_AM33_2)
2824 return cost;
2825
2826 /* If a data dependence already exists then the cost is correct. */
2827 if (REG_NOTE_KIND (link) == 0)
2828 return cost;
2829
2830 /* Check that the instruction about to scheduled is an FPU instruction. */
627683ab 2831 if (GET_MODE_CLASS (GET_MODE (SET_SRC (dep_set))) != MODE_FLOAT)
4879b320 2832 return cost;
2833
2834 /* Now check to see if the previous instruction is a load or store. */
627683ab 2835 if (! set_is_load_p (insn_set) && ! set_is_store_p (insn_set))
4879b320 2836 return cost;
2837
2838 /* XXX: Verify: The text of 1-7-4 implies that the restriction
9d75589a 2839 only applies when an INTEGER load/store precedes an FPU
4879b320 2840 instruction, but is this true ? For now we assume that it is. */
627683ab 2841 if (GET_MODE_CLASS (GET_MODE (SET_SRC (insn_set))) != MODE_INT)
4879b320 2842 return cost;
2843
2844 /* Extract the latency value from the timings attribute. */
627683ab 2845 timings = get_attr_timings (insn);
4879b320 2846 return timings < 100 ? (timings % 10) : (timings % 100);
2847}
b2d7ede1 2848
2849static void
2850mn10300_conditional_register_usage (void)
2851{
2852 unsigned int i;
2853
2854 if (!TARGET_AM33)
2855 {
2856 for (i = FIRST_EXTENDED_REGNUM;
2857 i <= LAST_EXTENDED_REGNUM; i++)
2858 fixed_regs[i] = call_used_regs[i] = 1;
2859 }
2860 if (!TARGET_AM33_2)
2861 {
2862 for (i = FIRST_FP_REGNUM;
2863 i <= LAST_FP_REGNUM; i++)
2864 fixed_regs[i] = call_used_regs[i] = 1;
2865 }
2866 if (flag_pic)
2867 fixed_regs[PIC_OFFSET_TABLE_REGNUM] =
2868 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
2869}
7de3ada8 2870
2871/* Worker function for TARGET_MD_ASM_CLOBBERS.
2872 We do this in the mn10300 backend to maintain source compatibility
2873 with the old cc0-based compiler. */
2874
2875static tree
2876mn10300_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
2877 tree inputs ATTRIBUTE_UNUSED,
2878 tree clobbers)
2879{
2880 clobbers = tree_cons (NULL_TREE, build_string (5, "EPSW"),
2881 clobbers);
2882 return clobbers;
2883}
5574dbdd 2884\f
990679af 2885/* A helper function for splitting cbranch patterns after reload. */
2886
2887void
3754d046 2888mn10300_split_cbranch (machine_mode cmp_mode, rtx cmp_op, rtx label_ref)
990679af 2889{
2890 rtx flags, x;
2891
2892 flags = gen_rtx_REG (cmp_mode, CC_REG);
2893 x = gen_rtx_COMPARE (cmp_mode, XEXP (cmp_op, 0), XEXP (cmp_op, 1));
2894 x = gen_rtx_SET (VOIDmode, flags, x);
2895 emit_insn (x);
2896
2897 x = gen_rtx_fmt_ee (GET_CODE (cmp_op), VOIDmode, flags, const0_rtx);
2898 x = gen_rtx_IF_THEN_ELSE (VOIDmode, x, label_ref, pc_rtx);
2899 x = gen_rtx_SET (VOIDmode, pc_rtx, x);
2900 emit_jump_insn (x);
2901}
2902
2903/* A helper function for matching parallels that set the flags. */
2904
2905bool
3754d046 2906mn10300_match_ccmode (rtx insn, machine_mode cc_mode)
990679af 2907{
2908 rtx op1, flags;
3754d046 2909 machine_mode flags_mode;
990679af 2910
2911 gcc_checking_assert (XVECLEN (PATTERN (insn), 0) == 2);
2912
2913 op1 = XVECEXP (PATTERN (insn), 0, 1);
2914 gcc_checking_assert (GET_CODE (SET_SRC (op1)) == COMPARE);
2915
2916 flags = SET_DEST (op1);
2917 flags_mode = GET_MODE (flags);
2918
2919 if (GET_MODE (SET_SRC (op1)) != flags_mode)
2920 return false;
2921 if (GET_MODE_CLASS (flags_mode) != MODE_CC)
2922 return false;
2923
2924 /* Ensure that the mode of FLAGS is compatible with CC_MODE. */
2925 if (cc_flags_for_mode (flags_mode) & ~cc_flags_for_mode (cc_mode))
2926 return false;
2927
2928 return true;
2929}
2930
35c2a6c6 2931/* This function is used to help split:
2932
2933 (set (reg) (and (reg) (int)))
2934
2935 into:
2936
2937 (set (reg) (shift (reg) (int))
2938 (set (reg) (shift (reg) (int))
2939
2940 where the shitfs will be shorter than the "and" insn.
2941
2942 It returns the number of bits that should be shifted. A positive
2943 values means that the low bits are to be cleared (and hence the
2944 shifts should be right followed by left) whereas a negative value
2945 means that the high bits are to be cleared (left followed by right).
2946 Zero is returned when it would not be economical to split the AND. */
2947
990679af 2948int
2949mn10300_split_and_operand_count (rtx op)
2950{
2951 HOST_WIDE_INT val = INTVAL (op);
2952 int count;
2953
2954 if (val < 0)
2955 {
2956 /* High bit is set, look for bits clear at the bottom. */
2957 count = exact_log2 (-val);
2958 if (count < 0)
2959 return 0;
2960 /* This is only size win if we can use the asl2 insn. Otherwise we
2961 would be replacing 1 6-byte insn with 2 3-byte insns. */
2962 if (count > (optimize_insn_for_speed_p () ? 2 : 4))
2963 return 0;
35c2a6c6 2964 return count;
990679af 2965 }
2966 else
2967 {
2968 /* High bit is clear, look for bits set at the bottom. */
2969 count = exact_log2 (val + 1);
2970 count = 32 - count;
2971 /* Again, this is only a size win with asl2. */
2972 if (count > (optimize_insn_for_speed_p () ? 2 : 4))
2973 return 0;
2974 return -count;
2975 }
2976}
2977\f
e7076c21 2978struct liw_data
2979{
2980 enum attr_liw slot;
2981 enum attr_liw_op op;
2982 rtx dest;
2983 rtx src;
2984};
2985
2986/* Decide if the given insn is a candidate for LIW bundling. If it is then
2987 extract the operands and LIW attributes from the insn and use them to fill
2988 in the liw_data structure. Return true upon success or false if the insn
2989 cannot be bundled. */
f9e46c25 2990
2991static bool
50fc2d35 2992extract_bundle (rtx_insn *insn, struct liw_data * pdata)
f9e46c25 2993{
e7076c21 2994 bool allow_consts = true;
81705807 2995 rtx p;
f9e46c25 2996
e7076c21 2997 gcc_assert (pdata != NULL);
2998
50fc2d35 2999 if (insn == NULL)
e7076c21 3000 return false;
3001 /* Make sure that we are dealing with a simple SET insn. */
f9e46c25 3002 p = single_set (insn);
e7076c21 3003 if (p == NULL_RTX)
3004 return false;
3005
3006 /* Make sure that it could go into one of the LIW pipelines. */
3007 pdata->slot = get_attr_liw (insn);
3008 if (pdata->slot == LIW_BOTH)
3009 return false;
3010
3011 pdata->op = get_attr_liw_op (insn);
3012
e7076c21 3013 switch (pdata->op)
f9e46c25 3014 {
3015 case LIW_OP_MOV:
e7076c21 3016 pdata->dest = SET_DEST (p);
3017 pdata->src = SET_SRC (p);
f9e46c25 3018 break;
3019 case LIW_OP_CMP:
e7076c21 3020 pdata->dest = XEXP (SET_SRC (p), 0);
3021 pdata->src = XEXP (SET_SRC (p), 1);
f9e46c25 3022 break;
3023 case LIW_OP_NONE:
3024 return false;
e7076c21 3025 case LIW_OP_AND:
3026 case LIW_OP_OR:
3027 case LIW_OP_XOR:
3028 /* The AND, OR and XOR long instruction words only accept register arguments. */
3029 allow_consts = false;
3030 /* Fall through. */
f9e46c25 3031 default:
e7076c21 3032 pdata->dest = SET_DEST (p);
3033 pdata->src = XEXP (SET_SRC (p), 1);
f9e46c25 3034 break;
3035 }
3036
e7076c21 3037 if (! REG_P (pdata->dest))
3038 return false;
3039
3040 if (REG_P (pdata->src))
3041 return true;
3042
3043 return allow_consts && satisfies_constraint_O (pdata->src);
f9e46c25 3044}
3045
e7076c21 3046/* Make sure that it is OK to execute LIW1 and LIW2 in parallel. GCC generated
3047 the instructions with the assumption that LIW1 would be executed before LIW2
3048 so we must check for overlaps between their sources and destinations. */
f9e46c25 3049
3050static bool
e7076c21 3051check_liw_constraints (struct liw_data * pliw1, struct liw_data * pliw2)
3052{
3053 /* Check for slot conflicts. */
3054 if (pliw2->slot == pliw1->slot && pliw1->slot != LIW_EITHER)
f9e46c25 3055 return false;
3056
e7076c21 3057 /* If either operation is a compare, then "dest" is really an input; the real
3058 destination is CC_REG. So these instructions need different checks. */
3059
3060 /* Changing "CMP ; OP" into "CMP | OP" is OK because the comparison will
3061 check its values prior to any changes made by OP. */
3062 if (pliw1->op == LIW_OP_CMP)
3063 {
3064 /* Two sequential comparisons means dead code, which ought to
3065 have been eliminated given that bundling only happens with
3066 optimization. We cannot bundle them in any case. */
3067 gcc_assert (pliw1->op != pliw2->op);
3068 return true;
3069 }
f9e46c25 3070
e7076c21 3071 /* Changing "OP ; CMP" into "OP | CMP" does not work if the value being compared
3072 is the destination of OP, as the CMP will look at the old value, not the new
3073 one. */
3074 if (pliw2->op == LIW_OP_CMP)
f9e46c25 3075 {
e7076c21 3076 if (REGNO (pliw2->dest) == REGNO (pliw1->dest))
3077 return false;
3078
3079 if (REG_P (pliw2->src))
3080 return REGNO (pliw2->src) != REGNO (pliw1->dest);
3081
3082 return true;
3083 }
3084
3085 /* Changing "OP1 ; OP2" into "OP1 | OP2" does not work if they both write to the
3086 same destination register. */
3087 if (REGNO (pliw2->dest) == REGNO (pliw1->dest))
3088 return false;
3089
3090 /* Changing "OP1 ; OP2" into "OP1 | OP2" generally does not work if the destination
3091 of OP1 is the source of OP2. The exception is when OP1 is a MOVE instruction when
3092 we can replace the source in OP2 with the source of OP1. */
3093 if (REG_P (pliw2->src) && REGNO (pliw2->src) == REGNO (pliw1->dest))
3094 {
3095 if (pliw1->op == LIW_OP_MOV && REG_P (pliw1->src))
f9e46c25 3096 {
e7076c21 3097 if (! REG_P (pliw1->src)
3098 && (pliw2->op == LIW_OP_AND
3099 || pliw2->op == LIW_OP_OR
3100 || pliw2->op == LIW_OP_XOR))
3101 return false;
3102
3103 pliw2->src = pliw1->src;
f9e46c25 3104 return true;
3105 }
3106 return false;
3107 }
3108
e7076c21 3109 /* Everything else is OK. */
f9e46c25 3110 return true;
3111}
3112
f9e46c25 3113/* Combine pairs of insns into LIW bundles. */
3114
3115static void
3116mn10300_bundle_liw (void)
3117{
50fc2d35 3118 rtx_insn *r;
f9e46c25 3119
50fc2d35 3120 for (r = get_insns (); r != NULL; r = next_nonnote_nondebug_insn (r))
f9e46c25 3121 {
50fc2d35 3122 rtx_insn *insn1, *insn2;
e7076c21 3123 struct liw_data liw1, liw2;
f9e46c25 3124
3125 insn1 = r;
e7076c21 3126 if (! extract_bundle (insn1, & liw1))
f9e46c25 3127 continue;
3128
3129 insn2 = next_nonnote_nondebug_insn (insn1);
e7076c21 3130 if (! extract_bundle (insn2, & liw2))
f9e46c25 3131 continue;
3132
e7076c21 3133 /* Check for source/destination overlap. */
3134 if (! check_liw_constraints (& liw1, & liw2))
f9e46c25 3135 continue;
3136
e7076c21 3137 if (liw1.slot == LIW_OP2 || liw2.slot == LIW_OP1)
f9e46c25 3138 {
e7076c21 3139 struct liw_data temp;
3140
3141 temp = liw1;
f9e46c25 3142 liw1 = liw2;
e7076c21 3143 liw2 = temp;
f9e46c25 3144 }
3145
f9e46c25 3146 delete_insn (insn2);
3147
50fc2d35 3148 rtx insn2_pat;
e7076c21 3149 if (liw1.op == LIW_OP_CMP)
50fc2d35 3150 insn2_pat = gen_cmp_liw (liw2.dest, liw2.src, liw1.dest, liw1.src,
3151 GEN_INT (liw2.op));
e7076c21 3152 else if (liw2.op == LIW_OP_CMP)
50fc2d35 3153 insn2_pat = gen_liw_cmp (liw1.dest, liw1.src, liw2.dest, liw2.src,
3154 GEN_INT (liw1.op));
f9e46c25 3155 else
50fc2d35 3156 insn2_pat = gen_liw (liw1.dest, liw2.dest, liw1.src, liw2.src,
3157 GEN_INT (liw1.op), GEN_INT (liw2.op));
f9e46c25 3158
50fc2d35 3159 insn2 = emit_insn_after (insn2_pat, insn1);
f9e46c25 3160 delete_insn (insn1);
3161 r = insn2;
3162 }
3163}
3164
f9b3e8f5 3165#define DUMP(reason, insn) \
3166 do \
3167 { \
3168 if (dump_file) \
3169 { \
3170 fprintf (dump_file, reason "\n"); \
3171 if (insn != NULL_RTX) \
3172 print_rtl_single (dump_file, insn); \
3173 fprintf(dump_file, "\n"); \
3174 } \
3175 } \
3176 while (0)
3177
3178/* Replace the BRANCH insn with a Lcc insn that goes to LABEL.
3179 Insert a SETLB insn just before LABEL. */
3180
3181static void
3182mn10300_insert_setlb_lcc (rtx label, rtx branch)
3183{
3184 rtx lcc, comparison, cmp_reg;
3185
3186 if (LABEL_NUSES (label) > 1)
3187 {
158a522b 3188 rtx_insn *insn;
f9b3e8f5 3189
3190 /* This label is used both as an entry point to the loop
3191 and as a loop-back point for the loop. We need to separate
3192 these two functions so that the SETLB happens upon entry,
3193 but the loop-back does not go to the SETLB instruction. */
3194 DUMP ("Inserting SETLB insn after:", label);
3195 insn = emit_insn_after (gen_setlb (), label);
3196 label = gen_label_rtx ();
3197 emit_label_after (label, insn);
3198 DUMP ("Created new loop-back label:", label);
3199 }
3200 else
3201 {
3202 DUMP ("Inserting SETLB insn before:", label);
3203 emit_insn_before (gen_setlb (), label);
3204 }
3205
3206 comparison = XEXP (SET_SRC (PATTERN (branch)), 0);
3207 cmp_reg = XEXP (comparison, 0);
3208 gcc_assert (REG_P (cmp_reg));
3209
3210 /* If the comparison has not already been split out of the branch
3211 then do so now. */
3212 gcc_assert (REGNO (cmp_reg) == CC_REG);
3213
3214 if (GET_MODE (cmp_reg) == CC_FLOATmode)
3215 lcc = gen_FLcc (comparison, label);
3216 else
3217 lcc = gen_Lcc (comparison, label);
3218
c6d14fbf 3219 rtx_insn *jump = emit_jump_insn_before (lcc, branch);
3220 mark_jump_label (XVECEXP (PATTERN (lcc), 0, 0), jump, 0);
3221 JUMP_LABEL (jump) = label;
f9b3e8f5 3222 DUMP ("Replacing branch insn...", branch);
c6d14fbf 3223 DUMP ("... with Lcc insn:", jump);
f9b3e8f5 3224 delete_insn (branch);
3225}
3226
3227static bool
161dfa6e 3228mn10300_block_contains_call (basic_block block)
f9b3e8f5 3229{
91a55c11 3230 rtx_insn *insn;
f9b3e8f5 3231
3232 FOR_BB_INSNS (block, insn)
3233 if (CALL_P (insn))
3234 return true;
3235
3236 return false;
3237}
3238
3239static bool
3240mn10300_loop_contains_call_insn (loop_p loop)
3241{
3242 basic_block * bbs;
3243 bool result = false;
3244 unsigned int i;
3245
3246 bbs = get_loop_body (loop);
3247
3248 for (i = 0; i < loop->num_nodes; i++)
3249 if (mn10300_block_contains_call (bbs[i]))
3250 {
3251 result = true;
3252 break;
3253 }
3254
3255 free (bbs);
3256 return result;
3257}
3258
3259static void
3260mn10300_scan_for_setlb_lcc (void)
3261{
f9b3e8f5 3262 loop_p loop;
3263
3264 DUMP ("Looking for loops that can use the SETLB insn", NULL_RTX);
3265
3266 df_analyze ();
3267 compute_bb_for_insn ();
3268
3269 /* Find the loops. */
319f4d7d 3270 loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
f9b3e8f5 3271
3272 /* FIXME: For now we only investigate innermost loops. In practice however
3273 if an inner loop is not suitable for use with the SETLB/Lcc insns, it may
3274 be the case that its parent loop is suitable. Thus we should check all
3275 loops, but work from the innermost outwards. */
f21d4d00 3276 FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
f9b3e8f5 3277 {
3278 const char * reason = NULL;
3279
3280 /* Check to see if we can modify this loop. If we cannot
3281 then set 'reason' to describe why it could not be done. */
3282 if (loop->latch == NULL)
3283 reason = "it contains multiple latches";
3284 else if (loop->header != loop->latch)
3285 /* FIXME: We could handle loops that span multiple blocks,
3286 but this requires a lot more work tracking down the branches
3287 that need altering, so for now keep things simple. */
3288 reason = "the loop spans multiple blocks";
3289 else if (mn10300_loop_contains_call_insn (loop))
3290 reason = "it contains CALL insns";
3291 else
3292 {
93ee8dfb 3293 rtx_insn *branch = BB_END (loop->latch);
f9b3e8f5 3294
3295 gcc_assert (JUMP_P (branch));
3296 if (single_set (branch) == NULL_RTX || ! any_condjump_p (branch))
3297 /* We cannot optimize tablejumps and the like. */
3298 /* FIXME: We could handle unconditional jumps. */
3299 reason = "it is not a simple loop";
3300 else
3301 {
93ee8dfb 3302 rtx_insn *label;
f9b3e8f5 3303
3304 if (dump_file)
3305 flow_loop_dump (loop, dump_file, NULL, 0);
3306
3307 label = BB_HEAD (loop->header);
3308 gcc_assert (LABEL_P (label));
3309
3310 mn10300_insert_setlb_lcc (label, branch);
3311 }
3312 }
3313
3314 if (dump_file && reason != NULL)
3315 fprintf (dump_file, "Loop starting with insn %d is not suitable because %s\n",
3316 INSN_UID (BB_HEAD (loop->header)),
3317 reason);
3318 }
3319
319f4d7d 3320 loop_optimizer_finalize ();
f9b3e8f5 3321
3322 df_finish_pass (false);
3323
3324 DUMP ("SETLB scan complete", NULL_RTX);
3325}
3326
f9e46c25 3327static void
3328mn10300_reorg (void)
3329{
f9b3e8f5 3330 /* These are optimizations, so only run them if optimizing. */
3331 if (TARGET_AM33 && (optimize > 0 || optimize_size))
f9e46c25 3332 {
f9b3e8f5 3333 if (TARGET_ALLOW_SETLB)
3334 mn10300_scan_for_setlb_lcc ();
3335
f9e46c25 3336 if (TARGET_ALLOW_LIW)
3337 mn10300_bundle_liw ();
3338 }
3339}
3340\f
3626e955 3341/* Initialize the GCC target structure. */
3342
f9e46c25 3343#undef TARGET_MACHINE_DEPENDENT_REORG
3344#define TARGET_MACHINE_DEPENDENT_REORG mn10300_reorg
3345
3626e955 3346#undef TARGET_ASM_ALIGNED_HI_OP
3347#define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
3348
3349#undef TARGET_LEGITIMIZE_ADDRESS
3350#define TARGET_LEGITIMIZE_ADDRESS mn10300_legitimize_address
3351
28f32607 3352#undef TARGET_ADDRESS_COST
3353#define TARGET_ADDRESS_COST mn10300_address_cost
3354#undef TARGET_REGISTER_MOVE_COST
3355#define TARGET_REGISTER_MOVE_COST mn10300_register_move_cost
3356#undef TARGET_MEMORY_MOVE_COST
3357#define TARGET_MEMORY_MOVE_COST mn10300_memory_move_cost
3626e955 3358#undef TARGET_RTX_COSTS
3359#define TARGET_RTX_COSTS mn10300_rtx_costs
3626e955 3360
3361#undef TARGET_ASM_FILE_START
3362#define TARGET_ASM_FILE_START mn10300_file_start
3363#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
3364#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
3365
22680c28 3366#undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
3367#define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA mn10300_asm_output_addr_const_extra
3368
3626e955 3369#undef TARGET_OPTION_OVERRIDE
3370#define TARGET_OPTION_OVERRIDE mn10300_option_override
3371
3372#undef TARGET_ENCODE_SECTION_INFO
3373#define TARGET_ENCODE_SECTION_INFO mn10300_encode_section_info
3374
3375#undef TARGET_PROMOTE_PROTOTYPES
3376#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
3377#undef TARGET_RETURN_IN_MEMORY
3378#define TARGET_RETURN_IN_MEMORY mn10300_return_in_memory
3379#undef TARGET_PASS_BY_REFERENCE
3380#define TARGET_PASS_BY_REFERENCE mn10300_pass_by_reference
3381#undef TARGET_CALLEE_COPIES
3382#define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
3383#undef TARGET_ARG_PARTIAL_BYTES
3384#define TARGET_ARG_PARTIAL_BYTES mn10300_arg_partial_bytes
dc67179a 3385#undef TARGET_FUNCTION_ARG
3386#define TARGET_FUNCTION_ARG mn10300_function_arg
3387#undef TARGET_FUNCTION_ARG_ADVANCE
3388#define TARGET_FUNCTION_ARG_ADVANCE mn10300_function_arg_advance
3626e955 3389
3390#undef TARGET_EXPAND_BUILTIN_SAVEREGS
3391#define TARGET_EXPAND_BUILTIN_SAVEREGS mn10300_builtin_saveregs
3392#undef TARGET_EXPAND_BUILTIN_VA_START
3393#define TARGET_EXPAND_BUILTIN_VA_START mn10300_va_start
3394
3395#undef TARGET_CASE_VALUES_THRESHOLD
3396#define TARGET_CASE_VALUES_THRESHOLD mn10300_case_values_threshold
3397
3398#undef TARGET_LEGITIMATE_ADDRESS_P
3399#define TARGET_LEGITIMATE_ADDRESS_P mn10300_legitimate_address_p
4c6c308e 3400#undef TARGET_DELEGITIMIZE_ADDRESS
3401#define TARGET_DELEGITIMIZE_ADDRESS mn10300_delegitimize_address
ca316360 3402#undef TARGET_LEGITIMATE_CONSTANT_P
3403#define TARGET_LEGITIMATE_CONSTANT_P mn10300_legitimate_constant_p
3626e955 3404
029ca87f 3405#undef TARGET_PREFERRED_RELOAD_CLASS
3406#define TARGET_PREFERRED_RELOAD_CLASS mn10300_preferred_reload_class
3407#undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
c78ac668 3408#define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS \
3409 mn10300_preferred_output_reload_class
3410#undef TARGET_SECONDARY_RELOAD
3411#define TARGET_SECONDARY_RELOAD mn10300_secondary_reload
029ca87f 3412
3626e955 3413#undef TARGET_TRAMPOLINE_INIT
3414#define TARGET_TRAMPOLINE_INIT mn10300_trampoline_init
3415
3416#undef TARGET_FUNCTION_VALUE
3417#define TARGET_FUNCTION_VALUE mn10300_function_value
3418#undef TARGET_LIBCALL_VALUE
3419#define TARGET_LIBCALL_VALUE mn10300_libcall_value
3420
3421#undef TARGET_ASM_OUTPUT_MI_THUNK
3422#define TARGET_ASM_OUTPUT_MI_THUNK mn10300_asm_output_mi_thunk
3423#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
3424#define TARGET_ASM_CAN_OUTPUT_MI_THUNK mn10300_can_output_mi_thunk
3425
4879b320 3426#undef TARGET_SCHED_ADJUST_COST
3427#define TARGET_SCHED_ADJUST_COST mn10300_adjust_sched_cost
3428
b2d7ede1 3429#undef TARGET_CONDITIONAL_REGISTER_USAGE
3430#define TARGET_CONDITIONAL_REGISTER_USAGE mn10300_conditional_register_usage
3431
7de3ada8 3432#undef TARGET_MD_ASM_CLOBBERS
3433#define TARGET_MD_ASM_CLOBBERS mn10300_md_asm_clobbers
3434
08207c2f 3435#undef TARGET_FLAGS_REGNUM
3436#define TARGET_FLAGS_REGNUM CC_REG
3437
3626e955 3438struct gcc_target targetm = TARGET_INITIALIZER;