]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/mn10300/mn10300.c
gcc/ada/
[thirdparty/gcc.git] / gcc / config / mn10300 / mn10300.c
1 /* Subroutines for insn-output.c for Matsushita MN10300 series
2 Copyright (C) 1996-2014 Free Software Foundation, Inc.
3 Contributed by Jeff Law (law@cygnus.com).
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "stor-layout.h"
28 #include "varasm.h"
29 #include "calls.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "recog.h"
38 #include "reload.h"
39 #include "expr.h"
40 #include "optabs.h"
41 #include "hashtab.h"
42 #include "hash-set.h"
43 #include "vec.h"
44 #include "machmode.h"
45 #include "input.h"
46 #include "function.h"
47 #include "obstack.h"
48 #include "diagnostic-core.h"
49 #include "tm_p.h"
50 #include "tm-constrs.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "dominance.h"
54 #include "cfg.h"
55 #include "cfgrtl.h"
56 #include "cfganal.h"
57 #include "lcm.h"
58 #include "cfgbuild.h"
59 #include "cfgcleanup.h"
60 #include "predict.h"
61 #include "basic-block.h"
62 #include "df.h"
63 #include "opts.h"
64 #include "cfgloop.h"
65 #include "dumpfile.h"
66 #include "builtins.h"
67
68 /* This is used in the am33_2.0-linux-gnu port, in which global symbol
69 names are not prefixed by underscores, to tell whether to prefix a
70 label with a plus sign or not, so that the assembler can tell
71 symbol names from register names. */
72 int mn10300_protect_label;
73
74 /* Selected processor type for tuning. */
75 enum processor_type mn10300_tune_cpu = PROCESSOR_DEFAULT;
76
77 #define CC_FLAG_Z 1
78 #define CC_FLAG_N 2
79 #define CC_FLAG_C 4
80 #define CC_FLAG_V 8
81
82 static int cc_flags_for_mode(machine_mode);
83 static int cc_flags_for_code(enum rtx_code);
84 \f
85 /* Implement TARGET_OPTION_OVERRIDE. */
86 static void
87 mn10300_option_override (void)
88 {
89 if (TARGET_AM33)
90 target_flags &= ~MASK_MULT_BUG;
91 else
92 {
93 /* Disable scheduling for the MN10300 as we do
94 not have timing information available for it. */
95 flag_schedule_insns = 0;
96 flag_schedule_insns_after_reload = 0;
97
98 /* Force enable splitting of wide types, as otherwise it is trivial
99 to run out of registers. Indeed, this works so well that register
100 allocation problems are now more common *without* optimization,
101 when this flag is not enabled by default. */
102 flag_split_wide_types = 1;
103 }
104
105 if (mn10300_tune_string)
106 {
107 if (strcasecmp (mn10300_tune_string, "mn10300") == 0)
108 mn10300_tune_cpu = PROCESSOR_MN10300;
109 else if (strcasecmp (mn10300_tune_string, "am33") == 0)
110 mn10300_tune_cpu = PROCESSOR_AM33;
111 else if (strcasecmp (mn10300_tune_string, "am33-2") == 0)
112 mn10300_tune_cpu = PROCESSOR_AM33_2;
113 else if (strcasecmp (mn10300_tune_string, "am34") == 0)
114 mn10300_tune_cpu = PROCESSOR_AM34;
115 else
116 error ("-mtune= expects mn10300, am33, am33-2, or am34");
117 }
118 }
119
120 static void
121 mn10300_file_start (void)
122 {
123 default_file_start ();
124
125 if (TARGET_AM33_2)
126 fprintf (asm_out_file, "\t.am33_2\n");
127 else if (TARGET_AM33)
128 fprintf (asm_out_file, "\t.am33\n");
129 }
130 \f
131 /* Note: This list must match the liw_op attribute in mn10300.md. */
132
133 static const char *liw_op_names[] =
134 {
135 "add", "cmp", "sub", "mov",
136 "and", "or", "xor",
137 "asr", "lsr", "asl",
138 "none", "max"
139 };
140
141 /* Print operand X using operand code CODE to assembly language output file
142 FILE. */
143
144 void
145 mn10300_print_operand (FILE *file, rtx x, int code)
146 {
147 switch (code)
148 {
149 case 'W':
150 {
151 unsigned int liw_op = UINTVAL (x);
152
153 gcc_assert (TARGET_ALLOW_LIW);
154 gcc_assert (liw_op < LIW_OP_MAX);
155 fputs (liw_op_names[liw_op], file);
156 break;
157 }
158
159 case 'b':
160 case 'B':
161 {
162 enum rtx_code cmp = GET_CODE (x);
163 machine_mode mode = GET_MODE (XEXP (x, 0));
164 const char *str;
165 int have_flags;
166
167 if (code == 'B')
168 cmp = reverse_condition (cmp);
169 have_flags = cc_flags_for_mode (mode);
170
171 switch (cmp)
172 {
173 case NE:
174 str = "ne";
175 break;
176 case EQ:
177 str = "eq";
178 break;
179 case GE:
180 /* bge is smaller than bnc. */
181 str = (have_flags & CC_FLAG_V ? "ge" : "nc");
182 break;
183 case LT:
184 str = (have_flags & CC_FLAG_V ? "lt" : "ns");
185 break;
186 case GT:
187 str = "gt";
188 break;
189 case LE:
190 str = "le";
191 break;
192 case GEU:
193 str = "cc";
194 break;
195 case GTU:
196 str = "hi";
197 break;
198 case LEU:
199 str = "ls";
200 break;
201 case LTU:
202 str = "cs";
203 break;
204 case ORDERED:
205 str = "lge";
206 break;
207 case UNORDERED:
208 str = "uo";
209 break;
210 case LTGT:
211 str = "lg";
212 break;
213 case UNEQ:
214 str = "ue";
215 break;
216 case UNGE:
217 str = "uge";
218 break;
219 case UNGT:
220 str = "ug";
221 break;
222 case UNLE:
223 str = "ule";
224 break;
225 case UNLT:
226 str = "ul";
227 break;
228 default:
229 gcc_unreachable ();
230 }
231
232 gcc_checking_assert ((cc_flags_for_code (cmp) & ~have_flags) == 0);
233 fputs (str, file);
234 }
235 break;
236
237 case 'C':
238 /* This is used for the operand to a call instruction;
239 if it's a REG, enclose it in parens, else output
240 the operand normally. */
241 if (REG_P (x))
242 {
243 fputc ('(', file);
244 mn10300_print_operand (file, x, 0);
245 fputc (')', file);
246 }
247 else
248 mn10300_print_operand (file, x, 0);
249 break;
250
251 case 'D':
252 switch (GET_CODE (x))
253 {
254 case MEM:
255 fputc ('(', file);
256 output_address (XEXP (x, 0));
257 fputc (')', file);
258 break;
259
260 case REG:
261 fprintf (file, "fd%d", REGNO (x) - 18);
262 break;
263
264 default:
265 gcc_unreachable ();
266 }
267 break;
268
269 /* These are the least significant word in a 64bit value. */
270 case 'L':
271 switch (GET_CODE (x))
272 {
273 case MEM:
274 fputc ('(', file);
275 output_address (XEXP (x, 0));
276 fputc (')', file);
277 break;
278
279 case REG:
280 fprintf (file, "%s", reg_names[REGNO (x)]);
281 break;
282
283 case SUBREG:
284 fprintf (file, "%s", reg_names[subreg_regno (x)]);
285 break;
286
287 case CONST_DOUBLE:
288 {
289 long val[2];
290 REAL_VALUE_TYPE rv;
291
292 switch (GET_MODE (x))
293 {
294 case DFmode:
295 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
296 REAL_VALUE_TO_TARGET_DOUBLE (rv, val);
297 fprintf (file, "0x%lx", val[0]);
298 break;;
299 case SFmode:
300 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
301 REAL_VALUE_TO_TARGET_SINGLE (rv, val[0]);
302 fprintf (file, "0x%lx", val[0]);
303 break;;
304 case VOIDmode:
305 case DImode:
306 mn10300_print_operand_address (file,
307 GEN_INT (CONST_DOUBLE_LOW (x)));
308 break;
309 default:
310 break;
311 }
312 break;
313 }
314
315 case CONST_INT:
316 {
317 rtx low, high;
318 split_double (x, &low, &high);
319 fprintf (file, "%ld", (long)INTVAL (low));
320 break;
321 }
322
323 default:
324 gcc_unreachable ();
325 }
326 break;
327
328 /* Similarly, but for the most significant word. */
329 case 'H':
330 switch (GET_CODE (x))
331 {
332 case MEM:
333 fputc ('(', file);
334 x = adjust_address (x, SImode, 4);
335 output_address (XEXP (x, 0));
336 fputc (')', file);
337 break;
338
339 case REG:
340 fprintf (file, "%s", reg_names[REGNO (x) + 1]);
341 break;
342
343 case SUBREG:
344 fprintf (file, "%s", reg_names[subreg_regno (x) + 1]);
345 break;
346
347 case CONST_DOUBLE:
348 {
349 long val[2];
350 REAL_VALUE_TYPE rv;
351
352 switch (GET_MODE (x))
353 {
354 case DFmode:
355 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
356 REAL_VALUE_TO_TARGET_DOUBLE (rv, val);
357 fprintf (file, "0x%lx", val[1]);
358 break;;
359 case SFmode:
360 gcc_unreachable ();
361 case VOIDmode:
362 case DImode:
363 mn10300_print_operand_address (file,
364 GEN_INT (CONST_DOUBLE_HIGH (x)));
365 break;
366 default:
367 break;
368 }
369 break;
370 }
371
372 case CONST_INT:
373 {
374 rtx low, high;
375 split_double (x, &low, &high);
376 fprintf (file, "%ld", (long)INTVAL (high));
377 break;
378 }
379
380 default:
381 gcc_unreachable ();
382 }
383 break;
384
385 case 'A':
386 fputc ('(', file);
387 if (REG_P (XEXP (x, 0)))
388 output_address (gen_rtx_PLUS (SImode, XEXP (x, 0), const0_rtx));
389 else
390 output_address (XEXP (x, 0));
391 fputc (')', file);
392 break;
393
394 case 'N':
395 gcc_assert (INTVAL (x) >= -128 && INTVAL (x) <= 255);
396 fprintf (file, "%d", (int)((~INTVAL (x)) & 0xff));
397 break;
398
399 case 'U':
400 gcc_assert (INTVAL (x) >= -128 && INTVAL (x) <= 255);
401 fprintf (file, "%d", (int)(INTVAL (x) & 0xff));
402 break;
403
404 /* For shift counts. The hardware ignores the upper bits of
405 any immediate, but the assembler will flag an out of range
406 shift count as an error. So we mask off the high bits
407 of the immediate here. */
408 case 'S':
409 if (CONST_INT_P (x))
410 {
411 fprintf (file, "%d", (int)(INTVAL (x) & 0x1f));
412 break;
413 }
414 /* FALL THROUGH */
415
416 default:
417 switch (GET_CODE (x))
418 {
419 case MEM:
420 fputc ('(', file);
421 output_address (XEXP (x, 0));
422 fputc (')', file);
423 break;
424
425 case PLUS:
426 output_address (x);
427 break;
428
429 case REG:
430 fprintf (file, "%s", reg_names[REGNO (x)]);
431 break;
432
433 case SUBREG:
434 fprintf (file, "%s", reg_names[subreg_regno (x)]);
435 break;
436
437 /* This will only be single precision.... */
438 case CONST_DOUBLE:
439 {
440 unsigned long val;
441 REAL_VALUE_TYPE rv;
442
443 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
444 REAL_VALUE_TO_TARGET_SINGLE (rv, val);
445 fprintf (file, "0x%lx", val);
446 break;
447 }
448
449 case CONST_INT:
450 case SYMBOL_REF:
451 case CONST:
452 case LABEL_REF:
453 case CODE_LABEL:
454 case UNSPEC:
455 mn10300_print_operand_address (file, x);
456 break;
457 default:
458 gcc_unreachable ();
459 }
460 break;
461 }
462 }
463
464 /* Output assembly language output for the address ADDR to FILE. */
465
466 void
467 mn10300_print_operand_address (FILE *file, rtx addr)
468 {
469 switch (GET_CODE (addr))
470 {
471 case POST_INC:
472 mn10300_print_operand (file, XEXP (addr, 0), 0);
473 fputc ('+', file);
474 break;
475
476 case POST_MODIFY:
477 mn10300_print_operand (file, XEXP (addr, 0), 0);
478 fputc ('+', file);
479 fputc (',', file);
480 mn10300_print_operand (file, XEXP (addr, 1), 0);
481 break;
482
483 case REG:
484 mn10300_print_operand (file, addr, 0);
485 break;
486 case PLUS:
487 {
488 rtx base = XEXP (addr, 0);
489 rtx index = XEXP (addr, 1);
490
491 if (REG_P (index) && !REG_OK_FOR_INDEX_P (index))
492 {
493 rtx x = base;
494 base = index;
495 index = x;
496
497 gcc_assert (REG_P (index) && REG_OK_FOR_INDEX_P (index));
498 }
499 gcc_assert (REG_OK_FOR_BASE_P (base));
500
501 mn10300_print_operand (file, index, 0);
502 fputc (',', file);
503 mn10300_print_operand (file, base, 0);
504 break;
505 }
506 case SYMBOL_REF:
507 output_addr_const (file, addr);
508 break;
509 default:
510 output_addr_const (file, addr);
511 break;
512 }
513 }
514
515 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA.
516
517 Used for PIC-specific UNSPECs. */
518
519 static bool
520 mn10300_asm_output_addr_const_extra (FILE *file, rtx x)
521 {
522 if (GET_CODE (x) == UNSPEC)
523 {
524 switch (XINT (x, 1))
525 {
526 case UNSPEC_PIC:
527 /* GLOBAL_OFFSET_TABLE or local symbols, no suffix. */
528 output_addr_const (file, XVECEXP (x, 0, 0));
529 break;
530 case UNSPEC_GOT:
531 output_addr_const (file, XVECEXP (x, 0, 0));
532 fputs ("@GOT", file);
533 break;
534 case UNSPEC_GOTOFF:
535 output_addr_const (file, XVECEXP (x, 0, 0));
536 fputs ("@GOTOFF", file);
537 break;
538 case UNSPEC_PLT:
539 output_addr_const (file, XVECEXP (x, 0, 0));
540 fputs ("@PLT", file);
541 break;
542 case UNSPEC_GOTSYM_OFF:
543 assemble_name (file, GOT_SYMBOL_NAME);
544 fputs ("-(", file);
545 output_addr_const (file, XVECEXP (x, 0, 0));
546 fputs ("-.)", file);
547 break;
548 default:
549 return false;
550 }
551 return true;
552 }
553 else
554 return false;
555 }
556
557 /* Count the number of FP registers that have to be saved. */
558 static int
559 fp_regs_to_save (void)
560 {
561 int i, n = 0;
562
563 if (! TARGET_AM33_2)
564 return 0;
565
566 for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
567 if (df_regs_ever_live_p (i) && ! call_really_used_regs[i])
568 ++n;
569
570 return n;
571 }
572
573 /* Print a set of registers in the format required by "movm" and "ret".
574 Register K is saved if bit K of MASK is set. The data and address
575 registers can be stored individually, but the extended registers cannot.
576 We assume that the mask already takes that into account. For instance,
577 bits 14 to 17 must have the same value. */
578
579 void
580 mn10300_print_reg_list (FILE *file, int mask)
581 {
582 int need_comma;
583 int i;
584
585 need_comma = 0;
586 fputc ('[', file);
587
588 for (i = 0; i < FIRST_EXTENDED_REGNUM; i++)
589 if ((mask & (1 << i)) != 0)
590 {
591 if (need_comma)
592 fputc (',', file);
593 fputs (reg_names [i], file);
594 need_comma = 1;
595 }
596
597 if ((mask & 0x3c000) != 0)
598 {
599 gcc_assert ((mask & 0x3c000) == 0x3c000);
600 if (need_comma)
601 fputc (',', file);
602 fputs ("exreg1", file);
603 need_comma = 1;
604 }
605
606 fputc (']', file);
607 }
608
609 /* If the MDR register is never clobbered, we can use the RETF instruction
610 which takes the address from the MDR register. This is 3 cycles faster
611 than having to load the address from the stack. */
612
613 bool
614 mn10300_can_use_retf_insn (void)
615 {
616 /* Don't bother if we're not optimizing. In this case we won't
617 have proper access to df_regs_ever_live_p. */
618 if (!optimize)
619 return false;
620
621 /* EH returns alter the saved return address; MDR is not current. */
622 if (crtl->calls_eh_return)
623 return false;
624
625 /* Obviously not if MDR is ever clobbered. */
626 if (df_regs_ever_live_p (MDR_REG))
627 return false;
628
629 /* ??? Careful not to use this during expand_epilogue etc. */
630 gcc_assert (!in_sequence_p ());
631 return leaf_function_p ();
632 }
633
634 bool
635 mn10300_can_use_rets_insn (void)
636 {
637 return !mn10300_initial_offset (ARG_POINTER_REGNUM, STACK_POINTER_REGNUM);
638 }
639
640 /* Returns the set of live, callee-saved registers as a bitmask. The
641 callee-saved extended registers cannot be stored individually, so
642 all of them will be included in the mask if any one of them is used.
643 Also returns the number of bytes in the registers in the mask if
644 BYTES_SAVED is not NULL. */
645
646 unsigned int
647 mn10300_get_live_callee_saved_regs (unsigned int * bytes_saved)
648 {
649 int mask;
650 int i;
651 unsigned int count;
652
653 count = mask = 0;
654 for (i = 0; i <= LAST_EXTENDED_REGNUM; i++)
655 if (df_regs_ever_live_p (i) && ! call_really_used_regs[i])
656 {
657 mask |= (1 << i);
658 ++ count;
659 }
660
661 if ((mask & 0x3c000) != 0)
662 {
663 for (i = 0x04000; i < 0x40000; i <<= 1)
664 if ((mask & i) == 0)
665 ++ count;
666
667 mask |= 0x3c000;
668 }
669
670 if (bytes_saved)
671 * bytes_saved = count * UNITS_PER_WORD;
672
673 return mask;
674 }
675
676 static rtx
677 F (rtx r)
678 {
679 RTX_FRAME_RELATED_P (r) = 1;
680 return r;
681 }
682
683 /* Generate an instruction that pushes several registers onto the stack.
684 Register K will be saved if bit K in MASK is set. The function does
685 nothing if MASK is zero.
686
687 To be compatible with the "movm" instruction, the lowest-numbered
688 register must be stored in the lowest slot. If MASK is the set
689 { R1,...,RN }, where R1...RN are ordered least first, the generated
690 instruction will have the form:
691
692 (parallel
693 (set (reg:SI 9) (plus:SI (reg:SI 9) (const_int -N*4)))
694 (set (mem:SI (plus:SI (reg:SI 9)
695 (const_int -1*4)))
696 (reg:SI RN))
697 ...
698 (set (mem:SI (plus:SI (reg:SI 9)
699 (const_int -N*4)))
700 (reg:SI R1))) */
701
702 static void
703 mn10300_gen_multiple_store (unsigned int mask)
704 {
705 /* The order in which registers are stored, from SP-4 through SP-N*4. */
706 static const unsigned int store_order[8] = {
707 /* e2, e3: never saved */
708 FIRST_EXTENDED_REGNUM + 4,
709 FIRST_EXTENDED_REGNUM + 5,
710 FIRST_EXTENDED_REGNUM + 6,
711 FIRST_EXTENDED_REGNUM + 7,
712 /* e0, e1, mdrq, mcrh, mcrl, mcvf: never saved. */
713 FIRST_DATA_REGNUM + 2,
714 FIRST_DATA_REGNUM + 3,
715 FIRST_ADDRESS_REGNUM + 2,
716 FIRST_ADDRESS_REGNUM + 3,
717 /* d0, d1, a0, a1, mdr, lir, lar: never saved. */
718 };
719
720 rtx x, elts[9];
721 unsigned int i;
722 int count;
723
724 if (mask == 0)
725 return;
726
727 for (i = count = 0; i < ARRAY_SIZE(store_order); ++i)
728 {
729 unsigned regno = store_order[i];
730
731 if (((mask >> regno) & 1) == 0)
732 continue;
733
734 ++count;
735 x = plus_constant (Pmode, stack_pointer_rtx, count * -4);
736 x = gen_frame_mem (SImode, x);
737 x = gen_rtx_SET (VOIDmode, x, gen_rtx_REG (SImode, regno));
738 elts[count] = F(x);
739
740 /* Remove the register from the mask so that... */
741 mask &= ~(1u << regno);
742 }
743
744 /* ... we can make sure that we didn't try to use a register
745 not listed in the store order. */
746 gcc_assert (mask == 0);
747
748 /* Create the instruction that updates the stack pointer. */
749 x = plus_constant (Pmode, stack_pointer_rtx, count * -4);
750 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
751 elts[0] = F(x);
752
753 /* We need one PARALLEL element to update the stack pointer and
754 an additional element for each register that is stored. */
755 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (count + 1, elts));
756 F (emit_insn (x));
757 }
758
759 static inline unsigned int
760 popcount (unsigned int mask)
761 {
762 unsigned int count = 0;
763
764 while (mask)
765 {
766 ++ count;
767 mask &= ~ (mask & - mask);
768 }
769 return count;
770 }
771
772 void
773 mn10300_expand_prologue (void)
774 {
775 HOST_WIDE_INT size = mn10300_frame_size ();
776 unsigned int mask;
777
778 mask = mn10300_get_live_callee_saved_regs (NULL);
779 /* If we use any of the callee-saved registers, save them now. */
780 mn10300_gen_multiple_store (mask);
781
782 if (flag_stack_usage_info)
783 current_function_static_stack_size = size + popcount (mask) * 4;
784
785 if (TARGET_AM33_2 && fp_regs_to_save ())
786 {
787 int num_regs_to_save = fp_regs_to_save (), i;
788 HOST_WIDE_INT xsize;
789 enum
790 {
791 save_sp_merge,
792 save_sp_no_merge,
793 save_sp_partial_merge,
794 save_a0_merge,
795 save_a0_no_merge
796 } strategy;
797 unsigned int strategy_size = (unsigned)-1, this_strategy_size;
798 rtx reg;
799
800 if (flag_stack_usage_info)
801 current_function_static_stack_size += num_regs_to_save * 4;
802
803 /* We have several different strategies to save FP registers.
804 We can store them using SP offsets, which is beneficial if
805 there are just a few registers to save, or we can use `a0' in
806 post-increment mode (`a0' is the only call-clobbered address
807 register that is never used to pass information to a
808 function). Furthermore, if we don't need a frame pointer, we
809 can merge the two SP adds into a single one, but this isn't
810 always beneficial; sometimes we can just split the two adds
811 so that we don't exceed a 16-bit constant size. The code
812 below will select which strategy to use, so as to generate
813 smallest code. Ties are broken in favor or shorter sequences
814 (in terms of number of instructions). */
815
816 #define SIZE_ADD_AX(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
817 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 2)
818 #define SIZE_ADD_SP(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
819 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 3)
820
821 /* We add 0 * (S) in two places to promote to the type of S,
822 so that all arms of the conditional have the same type. */
823 #define SIZE_FMOV_LIMIT(S,N,L,SIZE1,SIZE2,ELSE) \
824 (((S) >= (L)) ? 0 * (S) + (SIZE1) * (N) \
825 : ((S) + 4 * (N) >= (L)) ? (((L) - (S)) / 4 * (SIZE2) \
826 + ((S) + 4 * (N) - (L)) / 4 * (SIZE1)) \
827 : 0 * (S) + (ELSE))
828 #define SIZE_FMOV_SP_(S,N) \
829 (SIZE_FMOV_LIMIT ((S), (N), (1 << 24), 7, 6, \
830 SIZE_FMOV_LIMIT ((S), (N), (1 << 8), 6, 4, \
831 (S) ? 4 * (N) : 3 + 4 * ((N) - 1))))
832 #define SIZE_FMOV_SP(S,N) (SIZE_FMOV_SP_ ((unsigned HOST_WIDE_INT)(S), (N)))
833
834 /* Consider alternative save_sp_merge only if we don't need the
835 frame pointer and size is nonzero. */
836 if (! frame_pointer_needed && size)
837 {
838 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
839 this_strategy_size = SIZE_ADD_SP (-(size + 4 * num_regs_to_save));
840 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
841 this_strategy_size += SIZE_FMOV_SP (size, num_regs_to_save);
842
843 if (this_strategy_size < strategy_size)
844 {
845 strategy = save_sp_merge;
846 strategy_size = this_strategy_size;
847 }
848 }
849
850 /* Consider alternative save_sp_no_merge unconditionally. */
851 /* Insn: add -4 * num_regs_to_save, sp. */
852 this_strategy_size = SIZE_ADD_SP (-4 * num_regs_to_save);
853 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
854 this_strategy_size += SIZE_FMOV_SP (0, num_regs_to_save);
855 if (size)
856 {
857 /* Insn: add -size, sp. */
858 this_strategy_size += SIZE_ADD_SP (-size);
859 }
860
861 if (this_strategy_size < strategy_size)
862 {
863 strategy = save_sp_no_merge;
864 strategy_size = this_strategy_size;
865 }
866
867 /* Consider alternative save_sp_partial_merge only if we don't
868 need a frame pointer and size is reasonably large. */
869 if (! frame_pointer_needed && size + 4 * num_regs_to_save > 128)
870 {
871 /* Insn: add -128, sp. */
872 this_strategy_size = SIZE_ADD_SP (-128);
873 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
874 this_strategy_size += SIZE_FMOV_SP (128 - 4 * num_regs_to_save,
875 num_regs_to_save);
876 if (size)
877 {
878 /* Insn: add 128-size, sp. */
879 this_strategy_size += SIZE_ADD_SP (128 - size);
880 }
881
882 if (this_strategy_size < strategy_size)
883 {
884 strategy = save_sp_partial_merge;
885 strategy_size = this_strategy_size;
886 }
887 }
888
889 /* Consider alternative save_a0_merge only if we don't need a
890 frame pointer, size is nonzero and the user hasn't
891 changed the calling conventions of a0. */
892 if (! frame_pointer_needed && size
893 && call_really_used_regs [FIRST_ADDRESS_REGNUM]
894 && ! fixed_regs[FIRST_ADDRESS_REGNUM])
895 {
896 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
897 this_strategy_size = SIZE_ADD_SP (-(size + 4 * num_regs_to_save));
898 /* Insn: mov sp, a0. */
899 this_strategy_size++;
900 if (size)
901 {
902 /* Insn: add size, a0. */
903 this_strategy_size += SIZE_ADD_AX (size);
904 }
905 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
906 this_strategy_size += 3 * num_regs_to_save;
907
908 if (this_strategy_size < strategy_size)
909 {
910 strategy = save_a0_merge;
911 strategy_size = this_strategy_size;
912 }
913 }
914
915 /* Consider alternative save_a0_no_merge if the user hasn't
916 changed the calling conventions of a0. */
917 if (call_really_used_regs [FIRST_ADDRESS_REGNUM]
918 && ! fixed_regs[FIRST_ADDRESS_REGNUM])
919 {
920 /* Insn: add -4 * num_regs_to_save, sp. */
921 this_strategy_size = SIZE_ADD_SP (-4 * num_regs_to_save);
922 /* Insn: mov sp, a0. */
923 this_strategy_size++;
924 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
925 this_strategy_size += 3 * num_regs_to_save;
926 if (size)
927 {
928 /* Insn: add -size, sp. */
929 this_strategy_size += SIZE_ADD_SP (-size);
930 }
931
932 if (this_strategy_size < strategy_size)
933 {
934 strategy = save_a0_no_merge;
935 strategy_size = this_strategy_size;
936 }
937 }
938
939 /* Emit the initial SP add, common to all strategies. */
940 switch (strategy)
941 {
942 case save_sp_no_merge:
943 case save_a0_no_merge:
944 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
945 stack_pointer_rtx,
946 GEN_INT (-4 * num_regs_to_save))));
947 xsize = 0;
948 break;
949
950 case save_sp_partial_merge:
951 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
952 stack_pointer_rtx,
953 GEN_INT (-128))));
954 xsize = 128 - 4 * num_regs_to_save;
955 size -= xsize;
956 break;
957
958 case save_sp_merge:
959 case save_a0_merge:
960 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
961 stack_pointer_rtx,
962 GEN_INT (-(size + 4 * num_regs_to_save)))));
963 /* We'll have to adjust FP register saves according to the
964 frame size. */
965 xsize = size;
966 /* Since we've already created the stack frame, don't do it
967 again at the end of the function. */
968 size = 0;
969 break;
970
971 default:
972 gcc_unreachable ();
973 }
974
975 /* Now prepare register a0, if we have decided to use it. */
976 switch (strategy)
977 {
978 case save_sp_merge:
979 case save_sp_no_merge:
980 case save_sp_partial_merge:
981 reg = 0;
982 break;
983
984 case save_a0_merge:
985 case save_a0_no_merge:
986 reg = gen_rtx_REG (SImode, FIRST_ADDRESS_REGNUM);
987 F (emit_insn (gen_movsi (reg, stack_pointer_rtx)));
988 if (xsize)
989 F (emit_insn (gen_addsi3 (reg, reg, GEN_INT (xsize))));
990 reg = gen_rtx_POST_INC (SImode, reg);
991 break;
992
993 default:
994 gcc_unreachable ();
995 }
996
997 /* Now actually save the FP registers. */
998 for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
999 if (df_regs_ever_live_p (i) && ! call_really_used_regs [i])
1000 {
1001 rtx addr;
1002
1003 if (reg)
1004 addr = reg;
1005 else
1006 {
1007 /* If we aren't using `a0', use an SP offset. */
1008 if (xsize)
1009 {
1010 addr = gen_rtx_PLUS (SImode,
1011 stack_pointer_rtx,
1012 GEN_INT (xsize));
1013 }
1014 else
1015 addr = stack_pointer_rtx;
1016
1017 xsize += 4;
1018 }
1019
1020 F (emit_insn (gen_movsf (gen_rtx_MEM (SFmode, addr),
1021 gen_rtx_REG (SFmode, i))));
1022 }
1023 }
1024
1025 /* Now put the frame pointer into the frame pointer register. */
1026 if (frame_pointer_needed)
1027 F (emit_move_insn (frame_pointer_rtx, stack_pointer_rtx));
1028
1029 /* Allocate stack for this frame. */
1030 if (size)
1031 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
1032 stack_pointer_rtx,
1033 GEN_INT (-size))));
1034
1035 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
1036 emit_insn (gen_load_pic ());
1037 }
1038
1039 void
1040 mn10300_expand_epilogue (void)
1041 {
1042 HOST_WIDE_INT size = mn10300_frame_size ();
1043 unsigned int reg_save_bytes;
1044
1045 mn10300_get_live_callee_saved_regs (& reg_save_bytes);
1046
1047 if (TARGET_AM33_2 && fp_regs_to_save ())
1048 {
1049 int num_regs_to_save = fp_regs_to_save (), i;
1050 rtx reg = 0;
1051
1052 /* We have several options to restore FP registers. We could
1053 load them from SP offsets, but, if there are enough FP
1054 registers to restore, we win if we use a post-increment
1055 addressing mode. */
1056
1057 /* If we have a frame pointer, it's the best option, because we
1058 already know it has the value we want. */
1059 if (frame_pointer_needed)
1060 reg = gen_rtx_REG (SImode, FRAME_POINTER_REGNUM);
1061 /* Otherwise, we may use `a1', since it's call-clobbered and
1062 it's never used for return values. But only do so if it's
1063 smaller than using SP offsets. */
1064 else
1065 {
1066 enum { restore_sp_post_adjust,
1067 restore_sp_pre_adjust,
1068 restore_sp_partial_adjust,
1069 restore_a1 } strategy;
1070 unsigned int this_strategy_size, strategy_size = (unsigned)-1;
1071
1072 /* Consider using sp offsets before adjusting sp. */
1073 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1074 this_strategy_size = SIZE_FMOV_SP (size, num_regs_to_save);
1075 /* If size is too large, we'll have to adjust SP with an
1076 add. */
1077 if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
1078 {
1079 /* Insn: add size + 4 * num_regs_to_save, sp. */
1080 this_strategy_size += SIZE_ADD_SP (size + 4 * num_regs_to_save);
1081 }
1082 /* If we don't have to restore any non-FP registers,
1083 we'll be able to save one byte by using rets. */
1084 if (! reg_save_bytes)
1085 this_strategy_size--;
1086
1087 if (this_strategy_size < strategy_size)
1088 {
1089 strategy = restore_sp_post_adjust;
1090 strategy_size = this_strategy_size;
1091 }
1092
1093 /* Consider using sp offsets after adjusting sp. */
1094 /* Insn: add size, sp. */
1095 this_strategy_size = SIZE_ADD_SP (size);
1096 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1097 this_strategy_size += SIZE_FMOV_SP (0, num_regs_to_save);
1098 /* We're going to use ret to release the FP registers
1099 save area, so, no savings. */
1100
1101 if (this_strategy_size < strategy_size)
1102 {
1103 strategy = restore_sp_pre_adjust;
1104 strategy_size = this_strategy_size;
1105 }
1106
1107 /* Consider using sp offsets after partially adjusting sp.
1108 When size is close to 32Kb, we may be able to adjust SP
1109 with an imm16 add instruction while still using fmov
1110 (d8,sp). */
1111 if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
1112 {
1113 /* Insn: add size + 4 * num_regs_to_save
1114 + reg_save_bytes - 252,sp. */
1115 this_strategy_size = SIZE_ADD_SP (size + 4 * num_regs_to_save
1116 + (int) reg_save_bytes - 252);
1117 /* Insn: fmov (##,sp),fs#, fo each fs# to be restored. */
1118 this_strategy_size += SIZE_FMOV_SP (252 - reg_save_bytes
1119 - 4 * num_regs_to_save,
1120 num_regs_to_save);
1121 /* We're going to use ret to release the FP registers
1122 save area, so, no savings. */
1123
1124 if (this_strategy_size < strategy_size)
1125 {
1126 strategy = restore_sp_partial_adjust;
1127 strategy_size = this_strategy_size;
1128 }
1129 }
1130
1131 /* Consider using a1 in post-increment mode, as long as the
1132 user hasn't changed the calling conventions of a1. */
1133 if (call_really_used_regs [FIRST_ADDRESS_REGNUM + 1]
1134 && ! fixed_regs[FIRST_ADDRESS_REGNUM+1])
1135 {
1136 /* Insn: mov sp,a1. */
1137 this_strategy_size = 1;
1138 if (size)
1139 {
1140 /* Insn: add size,a1. */
1141 this_strategy_size += SIZE_ADD_AX (size);
1142 }
1143 /* Insn: fmov (a1+),fs#, for each fs# to be restored. */
1144 this_strategy_size += 3 * num_regs_to_save;
1145 /* If size is large enough, we may be able to save a
1146 couple of bytes. */
1147 if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
1148 {
1149 /* Insn: mov a1,sp. */
1150 this_strategy_size += 2;
1151 }
1152 /* If we don't have to restore any non-FP registers,
1153 we'll be able to save one byte by using rets. */
1154 if (! reg_save_bytes)
1155 this_strategy_size--;
1156
1157 if (this_strategy_size < strategy_size)
1158 {
1159 strategy = restore_a1;
1160 strategy_size = this_strategy_size;
1161 }
1162 }
1163
1164 switch (strategy)
1165 {
1166 case restore_sp_post_adjust:
1167 break;
1168
1169 case restore_sp_pre_adjust:
1170 emit_insn (gen_addsi3 (stack_pointer_rtx,
1171 stack_pointer_rtx,
1172 GEN_INT (size)));
1173 size = 0;
1174 break;
1175
1176 case restore_sp_partial_adjust:
1177 emit_insn (gen_addsi3 (stack_pointer_rtx,
1178 stack_pointer_rtx,
1179 GEN_INT (size + 4 * num_regs_to_save
1180 + reg_save_bytes - 252)));
1181 size = 252 - reg_save_bytes - 4 * num_regs_to_save;
1182 break;
1183
1184 case restore_a1:
1185 reg = gen_rtx_REG (SImode, FIRST_ADDRESS_REGNUM + 1);
1186 emit_insn (gen_movsi (reg, stack_pointer_rtx));
1187 if (size)
1188 emit_insn (gen_addsi3 (reg, reg, GEN_INT (size)));
1189 break;
1190
1191 default:
1192 gcc_unreachable ();
1193 }
1194 }
1195
1196 /* Adjust the selected register, if any, for post-increment. */
1197 if (reg)
1198 reg = gen_rtx_POST_INC (SImode, reg);
1199
1200 for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
1201 if (df_regs_ever_live_p (i) && ! call_really_used_regs [i])
1202 {
1203 rtx addr;
1204
1205 if (reg)
1206 addr = reg;
1207 else if (size)
1208 {
1209 /* If we aren't using a post-increment register, use an
1210 SP offset. */
1211 addr = gen_rtx_PLUS (SImode,
1212 stack_pointer_rtx,
1213 GEN_INT (size));
1214 }
1215 else
1216 addr = stack_pointer_rtx;
1217
1218 size += 4;
1219
1220 emit_insn (gen_movsf (gen_rtx_REG (SFmode, i),
1221 gen_rtx_MEM (SFmode, addr)));
1222 }
1223
1224 /* If we were using the restore_a1 strategy and the number of
1225 bytes to be released won't fit in the `ret' byte, copy `a1'
1226 to `sp', to avoid having to use `add' to adjust it. */
1227 if (! frame_pointer_needed && reg && size + reg_save_bytes > 255)
1228 {
1229 emit_move_insn (stack_pointer_rtx, XEXP (reg, 0));
1230 size = 0;
1231 }
1232 }
1233
1234 /* Maybe cut back the stack, except for the register save area.
1235
1236 If the frame pointer exists, then use the frame pointer to
1237 cut back the stack.
1238
1239 If the stack size + register save area is more than 255 bytes,
1240 then the stack must be cut back here since the size + register
1241 save size is too big for a ret/retf instruction.
1242
1243 Else leave it alone, it will be cut back as part of the
1244 ret/retf instruction, or there wasn't any stack to begin with.
1245
1246 Under no circumstances should the register save area be
1247 deallocated here, that would leave a window where an interrupt
1248 could occur and trash the register save area. */
1249 if (frame_pointer_needed)
1250 {
1251 emit_move_insn (stack_pointer_rtx, frame_pointer_rtx);
1252 size = 0;
1253 }
1254 else if (size + reg_save_bytes > 255)
1255 {
1256 emit_insn (gen_addsi3 (stack_pointer_rtx,
1257 stack_pointer_rtx,
1258 GEN_INT (size)));
1259 size = 0;
1260 }
1261
1262 /* Adjust the stack and restore callee-saved registers, if any. */
1263 if (mn10300_can_use_rets_insn ())
1264 emit_jump_insn (ret_rtx);
1265 else
1266 emit_jump_insn (gen_return_ret (GEN_INT (size + reg_save_bytes)));
1267 }
1268
1269 /* Recognize the PARALLEL rtx generated by mn10300_gen_multiple_store().
1270 This function is for MATCH_PARALLEL and so assumes OP is known to be
1271 parallel. If OP is a multiple store, return a mask indicating which
1272 registers it saves. Return 0 otherwise. */
1273
1274 unsigned int
1275 mn10300_store_multiple_regs (rtx op)
1276 {
1277 int count;
1278 int mask;
1279 int i;
1280 unsigned int last;
1281 rtx elt;
1282
1283 count = XVECLEN (op, 0);
1284 if (count < 2)
1285 return 0;
1286
1287 /* Check that first instruction has the form (set (sp) (plus A B)) */
1288 elt = XVECEXP (op, 0, 0);
1289 if (GET_CODE (elt) != SET
1290 || (! REG_P (SET_DEST (elt)))
1291 || REGNO (SET_DEST (elt)) != STACK_POINTER_REGNUM
1292 || GET_CODE (SET_SRC (elt)) != PLUS)
1293 return 0;
1294
1295 /* Check that A is the stack pointer and B is the expected stack size.
1296 For OP to match, each subsequent instruction should push a word onto
1297 the stack. We therefore expect the first instruction to create
1298 COUNT-1 stack slots. */
1299 elt = SET_SRC (elt);
1300 if ((! REG_P (XEXP (elt, 0)))
1301 || REGNO (XEXP (elt, 0)) != STACK_POINTER_REGNUM
1302 || (! CONST_INT_P (XEXP (elt, 1)))
1303 || INTVAL (XEXP (elt, 1)) != -(count - 1) * 4)
1304 return 0;
1305
1306 mask = 0;
1307 for (i = 1; i < count; i++)
1308 {
1309 /* Check that element i is a (set (mem M) R). */
1310 /* ??? Validate the register order a-la mn10300_gen_multiple_store.
1311 Remember: the ordering is *not* monotonic. */
1312 elt = XVECEXP (op, 0, i);
1313 if (GET_CODE (elt) != SET
1314 || (! MEM_P (SET_DEST (elt)))
1315 || (! REG_P (SET_SRC (elt))))
1316 return 0;
1317
1318 /* Remember which registers are to be saved. */
1319 last = REGNO (SET_SRC (elt));
1320 mask |= (1 << last);
1321
1322 /* Check that M has the form (plus (sp) (const_int -I*4)) */
1323 elt = XEXP (SET_DEST (elt), 0);
1324 if (GET_CODE (elt) != PLUS
1325 || (! REG_P (XEXP (elt, 0)))
1326 || REGNO (XEXP (elt, 0)) != STACK_POINTER_REGNUM
1327 || (! CONST_INT_P (XEXP (elt, 1)))
1328 || INTVAL (XEXP (elt, 1)) != -i * 4)
1329 return 0;
1330 }
1331
1332 /* All or none of the callee-saved extended registers must be in the set. */
1333 if ((mask & 0x3c000) != 0
1334 && (mask & 0x3c000) != 0x3c000)
1335 return 0;
1336
1337 return mask;
1338 }
1339
1340 /* Implement TARGET_PREFERRED_RELOAD_CLASS. */
1341
1342 static reg_class_t
1343 mn10300_preferred_reload_class (rtx x, reg_class_t rclass)
1344 {
1345 if (x == stack_pointer_rtx && rclass != SP_REGS)
1346 return (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
1347 else if (MEM_P (x)
1348 || (REG_P (x)
1349 && !HARD_REGISTER_P (x))
1350 || (GET_CODE (x) == SUBREG
1351 && REG_P (SUBREG_REG (x))
1352 && !HARD_REGISTER_P (SUBREG_REG (x))))
1353 return LIMIT_RELOAD_CLASS (GET_MODE (x), rclass);
1354 else
1355 return rclass;
1356 }
1357
1358 /* Implement TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
1359
1360 static reg_class_t
1361 mn10300_preferred_output_reload_class (rtx x, reg_class_t rclass)
1362 {
1363 if (x == stack_pointer_rtx && rclass != SP_REGS)
1364 return (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
1365 return rclass;
1366 }
1367
1368 /* Implement TARGET_SECONDARY_RELOAD. */
1369
1370 static reg_class_t
1371 mn10300_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
1372 machine_mode mode, secondary_reload_info *sri)
1373 {
1374 enum reg_class rclass = (enum reg_class) rclass_i;
1375 enum reg_class xclass = NO_REGS;
1376 unsigned int xregno = INVALID_REGNUM;
1377
1378 if (REG_P (x))
1379 {
1380 xregno = REGNO (x);
1381 if (xregno >= FIRST_PSEUDO_REGISTER)
1382 xregno = true_regnum (x);
1383 if (xregno != INVALID_REGNUM)
1384 xclass = REGNO_REG_CLASS (xregno);
1385 }
1386
1387 if (!TARGET_AM33)
1388 {
1389 /* Memory load/stores less than a full word wide can't have an
1390 address or stack pointer destination. They must use a data
1391 register as an intermediate register. */
1392 if (rclass != DATA_REGS
1393 && (mode == QImode || mode == HImode)
1394 && xclass == NO_REGS)
1395 return DATA_REGS;
1396
1397 /* We can only move SP to/from an address register. */
1398 if (in_p
1399 && rclass == SP_REGS
1400 && xclass != ADDRESS_REGS)
1401 return ADDRESS_REGS;
1402 if (!in_p
1403 && xclass == SP_REGS
1404 && rclass != ADDRESS_REGS
1405 && rclass != SP_OR_ADDRESS_REGS)
1406 return ADDRESS_REGS;
1407 }
1408
1409 /* We can't directly load sp + const_int into a register;
1410 we must use an address register as an scratch. */
1411 if (in_p
1412 && rclass != SP_REGS
1413 && rclass != SP_OR_ADDRESS_REGS
1414 && rclass != SP_OR_GENERAL_REGS
1415 && GET_CODE (x) == PLUS
1416 && (XEXP (x, 0) == stack_pointer_rtx
1417 || XEXP (x, 1) == stack_pointer_rtx))
1418 {
1419 sri->icode = CODE_FOR_reload_plus_sp_const;
1420 return NO_REGS;
1421 }
1422
1423 /* We can only move MDR to/from a data register. */
1424 if (rclass == MDR_REGS && xclass != DATA_REGS)
1425 return DATA_REGS;
1426 if (xclass == MDR_REGS && rclass != DATA_REGS)
1427 return DATA_REGS;
1428
1429 /* We can't load/store an FP register from a constant address. */
1430 if (TARGET_AM33_2
1431 && (rclass == FP_REGS || xclass == FP_REGS)
1432 && (xclass == NO_REGS || rclass == NO_REGS))
1433 {
1434 rtx addr = NULL;
1435
1436 if (xregno >= FIRST_PSEUDO_REGISTER && xregno != INVALID_REGNUM)
1437 {
1438 addr = reg_equiv_mem (xregno);
1439 if (addr)
1440 addr = XEXP (addr, 0);
1441 }
1442 else if (MEM_P (x))
1443 addr = XEXP (x, 0);
1444
1445 if (addr && CONSTANT_ADDRESS_P (addr))
1446 return GENERAL_REGS;
1447 }
1448 /* Otherwise assume no secondary reloads are needed. */
1449 return NO_REGS;
1450 }
1451
1452 int
1453 mn10300_frame_size (void)
1454 {
1455 /* size includes the fixed stack space needed for function calls. */
1456 int size = get_frame_size () + crtl->outgoing_args_size;
1457
1458 /* And space for the return pointer. */
1459 size += crtl->outgoing_args_size ? 4 : 0;
1460
1461 return size;
1462 }
1463
1464 int
1465 mn10300_initial_offset (int from, int to)
1466 {
1467 int diff = 0;
1468
1469 gcc_assert (from == ARG_POINTER_REGNUM || from == FRAME_POINTER_REGNUM);
1470 gcc_assert (to == FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM);
1471
1472 if (to == STACK_POINTER_REGNUM)
1473 diff = mn10300_frame_size ();
1474
1475 /* The difference between the argument pointer and the frame pointer
1476 is the size of the callee register save area. */
1477 if (from == ARG_POINTER_REGNUM)
1478 {
1479 unsigned int reg_save_bytes;
1480
1481 mn10300_get_live_callee_saved_regs (& reg_save_bytes);
1482 diff += reg_save_bytes;
1483 diff += 4 * fp_regs_to_save ();
1484 }
1485
1486 return diff;
1487 }
1488
1489 /* Worker function for TARGET_RETURN_IN_MEMORY. */
1490
1491 static bool
1492 mn10300_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
1493 {
1494 /* Return values > 8 bytes in length in memory. */
1495 return (int_size_in_bytes (type) > 8
1496 || int_size_in_bytes (type) == 0
1497 || TYPE_MODE (type) == BLKmode);
1498 }
1499
1500 /* Flush the argument registers to the stack for a stdarg function;
1501 return the new argument pointer. */
1502 static rtx
1503 mn10300_builtin_saveregs (void)
1504 {
1505 rtx offset, mem;
1506 tree fntype = TREE_TYPE (current_function_decl);
1507 int argadj = ((!stdarg_p (fntype))
1508 ? UNITS_PER_WORD : 0);
1509 alias_set_type set = get_varargs_alias_set ();
1510
1511 if (argadj)
1512 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, argadj);
1513 else
1514 offset = crtl->args.arg_offset_rtx;
1515
1516 mem = gen_rtx_MEM (SImode, crtl->args.internal_arg_pointer);
1517 set_mem_alias_set (mem, set);
1518 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
1519
1520 mem = gen_rtx_MEM (SImode,
1521 plus_constant (Pmode,
1522 crtl->args.internal_arg_pointer, 4));
1523 set_mem_alias_set (mem, set);
1524 emit_move_insn (mem, gen_rtx_REG (SImode, 1));
1525
1526 return copy_to_reg (expand_binop (Pmode, add_optab,
1527 crtl->args.internal_arg_pointer,
1528 offset, 0, 0, OPTAB_LIB_WIDEN));
1529 }
1530
1531 static void
1532 mn10300_va_start (tree valist, rtx nextarg)
1533 {
1534 nextarg = expand_builtin_saveregs ();
1535 std_expand_builtin_va_start (valist, nextarg);
1536 }
1537
1538 /* Return true when a parameter should be passed by reference. */
1539
1540 static bool
1541 mn10300_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
1542 machine_mode mode, const_tree type,
1543 bool named ATTRIBUTE_UNUSED)
1544 {
1545 unsigned HOST_WIDE_INT size;
1546
1547 if (type)
1548 size = int_size_in_bytes (type);
1549 else
1550 size = GET_MODE_SIZE (mode);
1551
1552 return (size > 8 || size == 0);
1553 }
1554
1555 /* Return an RTX to represent where a value with mode MODE will be returned
1556 from a function. If the result is NULL_RTX, the argument is pushed. */
1557
1558 static rtx
1559 mn10300_function_arg (cumulative_args_t cum_v, machine_mode mode,
1560 const_tree type, bool named ATTRIBUTE_UNUSED)
1561 {
1562 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1563 rtx result = NULL_RTX;
1564 int size;
1565
1566 /* We only support using 2 data registers as argument registers. */
1567 int nregs = 2;
1568
1569 /* Figure out the size of the object to be passed. */
1570 if (mode == BLKmode)
1571 size = int_size_in_bytes (type);
1572 else
1573 size = GET_MODE_SIZE (mode);
1574
1575 cum->nbytes = (cum->nbytes + 3) & ~3;
1576
1577 /* Don't pass this arg via a register if all the argument registers
1578 are used up. */
1579 if (cum->nbytes > nregs * UNITS_PER_WORD)
1580 return result;
1581
1582 /* Don't pass this arg via a register if it would be split between
1583 registers and memory. */
1584 if (type == NULL_TREE
1585 && cum->nbytes + size > nregs * UNITS_PER_WORD)
1586 return result;
1587
1588 switch (cum->nbytes / UNITS_PER_WORD)
1589 {
1590 case 0:
1591 result = gen_rtx_REG (mode, FIRST_ARGUMENT_REGNUM);
1592 break;
1593 case 1:
1594 result = gen_rtx_REG (mode, FIRST_ARGUMENT_REGNUM + 1);
1595 break;
1596 default:
1597 break;
1598 }
1599
1600 return result;
1601 }
1602
1603 /* Update the data in CUM to advance over an argument
1604 of mode MODE and data type TYPE.
1605 (TYPE is null for libcalls where that information may not be available.) */
1606
1607 static void
1608 mn10300_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
1609 const_tree type, bool named ATTRIBUTE_UNUSED)
1610 {
1611 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1612
1613 cum->nbytes += (mode != BLKmode
1614 ? (GET_MODE_SIZE (mode) + 3) & ~3
1615 : (int_size_in_bytes (type) + 3) & ~3);
1616 }
1617
1618 /* Return the number of bytes of registers to use for an argument passed
1619 partially in registers and partially in memory. */
1620
1621 static int
1622 mn10300_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
1623 tree type, bool named ATTRIBUTE_UNUSED)
1624 {
1625 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1626 int size;
1627
1628 /* We only support using 2 data registers as argument registers. */
1629 int nregs = 2;
1630
1631 /* Figure out the size of the object to be passed. */
1632 if (mode == BLKmode)
1633 size = int_size_in_bytes (type);
1634 else
1635 size = GET_MODE_SIZE (mode);
1636
1637 cum->nbytes = (cum->nbytes + 3) & ~3;
1638
1639 /* Don't pass this arg via a register if all the argument registers
1640 are used up. */
1641 if (cum->nbytes > nregs * UNITS_PER_WORD)
1642 return 0;
1643
1644 if (cum->nbytes + size <= nregs * UNITS_PER_WORD)
1645 return 0;
1646
1647 /* Don't pass this arg via a register if it would be split between
1648 registers and memory. */
1649 if (type == NULL_TREE
1650 && cum->nbytes + size > nregs * UNITS_PER_WORD)
1651 return 0;
1652
1653 return nregs * UNITS_PER_WORD - cum->nbytes;
1654 }
1655
1656 /* Return the location of the function's value. This will be either
1657 $d0 for integer functions, $a0 for pointers, or a PARALLEL of both
1658 $d0 and $a0 if the -mreturn-pointer-on-do flag is set. Note that
1659 we only return the PARALLEL for outgoing values; we do not want
1660 callers relying on this extra copy. */
1661
1662 static rtx
1663 mn10300_function_value (const_tree valtype,
1664 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1665 bool outgoing)
1666 {
1667 rtx rv;
1668 machine_mode mode = TYPE_MODE (valtype);
1669
1670 if (! POINTER_TYPE_P (valtype))
1671 return gen_rtx_REG (mode, FIRST_DATA_REGNUM);
1672 else if (! TARGET_PTR_A0D0 || ! outgoing
1673 || cfun->returns_struct)
1674 return gen_rtx_REG (mode, FIRST_ADDRESS_REGNUM);
1675
1676 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (2));
1677 XVECEXP (rv, 0, 0)
1678 = gen_rtx_EXPR_LIST (VOIDmode,
1679 gen_rtx_REG (mode, FIRST_ADDRESS_REGNUM),
1680 GEN_INT (0));
1681
1682 XVECEXP (rv, 0, 1)
1683 = gen_rtx_EXPR_LIST (VOIDmode,
1684 gen_rtx_REG (mode, FIRST_DATA_REGNUM),
1685 GEN_INT (0));
1686 return rv;
1687 }
1688
1689 /* Implements TARGET_LIBCALL_VALUE. */
1690
1691 static rtx
1692 mn10300_libcall_value (machine_mode mode,
1693 const_rtx fun ATTRIBUTE_UNUSED)
1694 {
1695 return gen_rtx_REG (mode, FIRST_DATA_REGNUM);
1696 }
1697
1698 /* Implements FUNCTION_VALUE_REGNO_P. */
1699
1700 bool
1701 mn10300_function_value_regno_p (const unsigned int regno)
1702 {
1703 return (regno == FIRST_DATA_REGNUM || regno == FIRST_ADDRESS_REGNUM);
1704 }
1705
1706 /* Output an addition operation. */
1707
1708 const char *
1709 mn10300_output_add (rtx operands[3], bool need_flags)
1710 {
1711 rtx dest, src1, src2;
1712 unsigned int dest_regnum, src1_regnum, src2_regnum;
1713 enum reg_class src1_class, src2_class, dest_class;
1714
1715 dest = operands[0];
1716 src1 = operands[1];
1717 src2 = operands[2];
1718
1719 dest_regnum = true_regnum (dest);
1720 src1_regnum = true_regnum (src1);
1721
1722 dest_class = REGNO_REG_CLASS (dest_regnum);
1723 src1_class = REGNO_REG_CLASS (src1_regnum);
1724
1725 if (CONST_INT_P (src2))
1726 {
1727 gcc_assert (dest_regnum == src1_regnum);
1728
1729 if (src2 == const1_rtx && !need_flags)
1730 return "inc %0";
1731 if (INTVAL (src2) == 4 && !need_flags && dest_class != DATA_REGS)
1732 return "inc4 %0";
1733
1734 gcc_assert (!need_flags || dest_class != SP_REGS);
1735 return "add %2,%0";
1736 }
1737 else if (CONSTANT_P (src2))
1738 return "add %2,%0";
1739
1740 src2_regnum = true_regnum (src2);
1741 src2_class = REGNO_REG_CLASS (src2_regnum);
1742
1743 if (dest_regnum == src1_regnum)
1744 return "add %2,%0";
1745 if (dest_regnum == src2_regnum)
1746 return "add %1,%0";
1747
1748 /* The rest of the cases are reg = reg+reg. For AM33, we can implement
1749 this directly, as below, but when optimizing for space we can sometimes
1750 do better by using a mov+add. For MN103, we claimed that we could
1751 implement a three-operand add because the various move and add insns
1752 change sizes across register classes, and we can often do better than
1753 reload in choosing which operand to move. */
1754 if (TARGET_AM33 && optimize_insn_for_speed_p ())
1755 return "add %2,%1,%0";
1756
1757 /* Catch cases where no extended register was used. */
1758 if (src1_class != EXTENDED_REGS
1759 && src2_class != EXTENDED_REGS
1760 && dest_class != EXTENDED_REGS)
1761 {
1762 /* We have to copy one of the sources into the destination, then
1763 add the other source to the destination.
1764
1765 Carefully select which source to copy to the destination; a
1766 naive implementation will waste a byte when the source classes
1767 are different and the destination is an address register.
1768 Selecting the lowest cost register copy will optimize this
1769 sequence. */
1770 if (src1_class == dest_class)
1771 return "mov %1,%0\n\tadd %2,%0";
1772 else
1773 return "mov %2,%0\n\tadd %1,%0";
1774 }
1775
1776 /* At least one register is an extended register. */
1777
1778 /* The three operand add instruction on the am33 is a win iff the
1779 output register is an extended register, or if both source
1780 registers are extended registers. */
1781 if (dest_class == EXTENDED_REGS || src1_class == src2_class)
1782 return "add %2,%1,%0";
1783
1784 /* It is better to copy one of the sources to the destination, then
1785 perform a 2 address add. The destination in this case must be
1786 an address or data register and one of the sources must be an
1787 extended register and the remaining source must not be an extended
1788 register.
1789
1790 The best code for this case is to copy the extended reg to the
1791 destination, then emit a two address add. */
1792 if (src1_class == EXTENDED_REGS)
1793 return "mov %1,%0\n\tadd %2,%0";
1794 else
1795 return "mov %2,%0\n\tadd %1,%0";
1796 }
1797
1798 /* Return 1 if X contains a symbolic expression. We know these
1799 expressions will have one of a few well defined forms, so
1800 we need only check those forms. */
1801
1802 int
1803 mn10300_symbolic_operand (rtx op,
1804 machine_mode mode ATTRIBUTE_UNUSED)
1805 {
1806 switch (GET_CODE (op))
1807 {
1808 case SYMBOL_REF:
1809 case LABEL_REF:
1810 return 1;
1811 case CONST:
1812 op = XEXP (op, 0);
1813 return ((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
1814 || GET_CODE (XEXP (op, 0)) == LABEL_REF)
1815 && CONST_INT_P (XEXP (op, 1)));
1816 default:
1817 return 0;
1818 }
1819 }
1820
1821 /* Try machine dependent ways of modifying an illegitimate address
1822 to be legitimate. If we find one, return the new valid address.
1823 This macro is used in only one place: `memory_address' in explow.c.
1824
1825 OLDX is the address as it was before break_out_memory_refs was called.
1826 In some cases it is useful to look at this to decide what needs to be done.
1827
1828 Normally it is always safe for this macro to do nothing. It exists to
1829 recognize opportunities to optimize the output.
1830
1831 But on a few ports with segmented architectures and indexed addressing
1832 (mn10300, hppa) it is used to rewrite certain problematical addresses. */
1833
1834 static rtx
1835 mn10300_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1836 machine_mode mode ATTRIBUTE_UNUSED)
1837 {
1838 if (flag_pic && ! mn10300_legitimate_pic_operand_p (x))
1839 x = mn10300_legitimize_pic_address (oldx, NULL_RTX);
1840
1841 /* Uh-oh. We might have an address for x[n-100000]. This needs
1842 special handling to avoid creating an indexed memory address
1843 with x-100000 as the base. */
1844 if (GET_CODE (x) == PLUS
1845 && mn10300_symbolic_operand (XEXP (x, 1), VOIDmode))
1846 {
1847 /* Ugly. We modify things here so that the address offset specified
1848 by the index expression is computed first, then added to x to form
1849 the entire address. */
1850
1851 rtx regx1, regy1, regy2, y;
1852
1853 /* Strip off any CONST. */
1854 y = XEXP (x, 1);
1855 if (GET_CODE (y) == CONST)
1856 y = XEXP (y, 0);
1857
1858 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1859 {
1860 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1861 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1862 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1863 regx1 = force_reg (Pmode,
1864 gen_rtx_fmt_ee (GET_CODE (y), Pmode, regx1,
1865 regy2));
1866 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1867 }
1868 }
1869 return x;
1870 }
1871
1872 /* Convert a non-PIC address in `orig' to a PIC address using @GOT or
1873 @GOTOFF in `reg'. */
1874
1875 rtx
1876 mn10300_legitimize_pic_address (rtx orig, rtx reg)
1877 {
1878 rtx x;
1879
1880 if (GET_CODE (orig) == LABEL_REF
1881 || (GET_CODE (orig) == SYMBOL_REF
1882 && (CONSTANT_POOL_ADDRESS_P (orig)
1883 || ! MN10300_GLOBAL_P (orig))))
1884 {
1885 if (reg == NULL)
1886 reg = gen_reg_rtx (Pmode);
1887
1888 x = gen_rtx_UNSPEC (SImode, gen_rtvec (1, orig), UNSPEC_GOTOFF);
1889 x = gen_rtx_CONST (SImode, x);
1890 emit_move_insn (reg, x);
1891
1892 x = emit_insn (gen_addsi3 (reg, reg, pic_offset_table_rtx));
1893 }
1894 else if (GET_CODE (orig) == SYMBOL_REF)
1895 {
1896 if (reg == NULL)
1897 reg = gen_reg_rtx (Pmode);
1898
1899 x = gen_rtx_UNSPEC (SImode, gen_rtvec (1, orig), UNSPEC_GOT);
1900 x = gen_rtx_CONST (SImode, x);
1901 x = gen_rtx_PLUS (SImode, pic_offset_table_rtx, x);
1902 x = gen_const_mem (SImode, x);
1903
1904 x = emit_move_insn (reg, x);
1905 }
1906 else
1907 return orig;
1908
1909 set_unique_reg_note (x, REG_EQUAL, orig);
1910 return reg;
1911 }
1912
1913 /* Return zero if X references a SYMBOL_REF or LABEL_REF whose symbol
1914 isn't protected by a PIC unspec; nonzero otherwise. */
1915
1916 int
1917 mn10300_legitimate_pic_operand_p (rtx x)
1918 {
1919 const char *fmt;
1920 int i;
1921
1922 if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1923 return 0;
1924
1925 if (GET_CODE (x) == UNSPEC
1926 && (XINT (x, 1) == UNSPEC_PIC
1927 || XINT (x, 1) == UNSPEC_GOT
1928 || XINT (x, 1) == UNSPEC_GOTOFF
1929 || XINT (x, 1) == UNSPEC_PLT
1930 || XINT (x, 1) == UNSPEC_GOTSYM_OFF))
1931 return 1;
1932
1933 fmt = GET_RTX_FORMAT (GET_CODE (x));
1934 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
1935 {
1936 if (fmt[i] == 'E')
1937 {
1938 int j;
1939
1940 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1941 if (! mn10300_legitimate_pic_operand_p (XVECEXP (x, i, j)))
1942 return 0;
1943 }
1944 else if (fmt[i] == 'e'
1945 && ! mn10300_legitimate_pic_operand_p (XEXP (x, i)))
1946 return 0;
1947 }
1948
1949 return 1;
1950 }
1951
1952 /* Return TRUE if the address X, taken from a (MEM:MODE X) rtx, is
1953 legitimate, and FALSE otherwise.
1954
1955 On the mn10300, the value in the address register must be
1956 in the same memory space/segment as the effective address.
1957
1958 This is problematical for reload since it does not understand
1959 that base+index != index+base in a memory reference.
1960
1961 Note it is still possible to use reg+reg addressing modes,
1962 it's just much more difficult. For a discussion of a possible
1963 workaround and solution, see the comments in pa.c before the
1964 function record_unscaled_index_insn_codes. */
1965
1966 static bool
1967 mn10300_legitimate_address_p (machine_mode mode, rtx x, bool strict)
1968 {
1969 rtx base, index;
1970
1971 if (CONSTANT_ADDRESS_P (x))
1972 return !flag_pic || mn10300_legitimate_pic_operand_p (x);
1973
1974 if (RTX_OK_FOR_BASE_P (x, strict))
1975 return true;
1976
1977 if (TARGET_AM33 && (mode == SImode || mode == SFmode || mode == HImode))
1978 {
1979 if (GET_CODE (x) == POST_INC)
1980 return RTX_OK_FOR_BASE_P (XEXP (x, 0), strict);
1981 if (GET_CODE (x) == POST_MODIFY)
1982 return (RTX_OK_FOR_BASE_P (XEXP (x, 0), strict)
1983 && CONSTANT_ADDRESS_P (XEXP (x, 1)));
1984 }
1985
1986 if (GET_CODE (x) != PLUS)
1987 return false;
1988
1989 base = XEXP (x, 0);
1990 index = XEXP (x, 1);
1991
1992 if (!REG_P (base))
1993 return false;
1994 if (REG_P (index))
1995 {
1996 /* ??? Without AM33 generalized (Ri,Rn) addressing, reg+reg
1997 addressing is hard to satisfy. */
1998 if (!TARGET_AM33)
1999 return false;
2000
2001 return (REGNO_GENERAL_P (REGNO (base), strict)
2002 && REGNO_GENERAL_P (REGNO (index), strict));
2003 }
2004
2005 if (!REGNO_STRICT_OK_FOR_BASE_P (REGNO (base), strict))
2006 return false;
2007
2008 if (CONST_INT_P (index))
2009 return IN_RANGE (INTVAL (index), -1 - 0x7fffffff, 0x7fffffff);
2010
2011 if (CONSTANT_ADDRESS_P (index))
2012 return !flag_pic || mn10300_legitimate_pic_operand_p (index);
2013
2014 return false;
2015 }
2016
2017 bool
2018 mn10300_regno_in_class_p (unsigned regno, int rclass, bool strict)
2019 {
2020 if (regno >= FIRST_PSEUDO_REGISTER)
2021 {
2022 if (!strict)
2023 return true;
2024 if (!reg_renumber)
2025 return false;
2026 regno = reg_renumber[regno];
2027 if (regno == INVALID_REGNUM)
2028 return false;
2029 }
2030 return TEST_HARD_REG_BIT (reg_class_contents[rclass], regno);
2031 }
2032
2033 rtx
2034 mn10300_legitimize_reload_address (rtx x,
2035 machine_mode mode ATTRIBUTE_UNUSED,
2036 int opnum, int type,
2037 int ind_levels ATTRIBUTE_UNUSED)
2038 {
2039 bool any_change = false;
2040
2041 /* See above re disabling reg+reg addressing for MN103. */
2042 if (!TARGET_AM33)
2043 return NULL_RTX;
2044
2045 if (GET_CODE (x) != PLUS)
2046 return NULL_RTX;
2047
2048 if (XEXP (x, 0) == stack_pointer_rtx)
2049 {
2050 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
2051 GENERAL_REGS, GET_MODE (x), VOIDmode, 0, 0,
2052 opnum, (enum reload_type) type);
2053 any_change = true;
2054 }
2055 if (XEXP (x, 1) == stack_pointer_rtx)
2056 {
2057 push_reload (XEXP (x, 1), NULL_RTX, &XEXP (x, 1), NULL,
2058 GENERAL_REGS, GET_MODE (x), VOIDmode, 0, 0,
2059 opnum, (enum reload_type) type);
2060 any_change = true;
2061 }
2062
2063 return any_change ? x : NULL_RTX;
2064 }
2065
2066 /* Implement TARGET_LEGITIMATE_CONSTANT_P. Returns TRUE if X is a valid
2067 constant. Note that some "constants" aren't valid, such as TLS
2068 symbols and unconverted GOT-based references, so we eliminate
2069 those here. */
2070
2071 static bool
2072 mn10300_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
2073 {
2074 switch (GET_CODE (x))
2075 {
2076 case CONST:
2077 x = XEXP (x, 0);
2078
2079 if (GET_CODE (x) == PLUS)
2080 {
2081 if (! CONST_INT_P (XEXP (x, 1)))
2082 return false;
2083 x = XEXP (x, 0);
2084 }
2085
2086 /* Only some unspecs are valid as "constants". */
2087 if (GET_CODE (x) == UNSPEC)
2088 {
2089 switch (XINT (x, 1))
2090 {
2091 case UNSPEC_PIC:
2092 case UNSPEC_GOT:
2093 case UNSPEC_GOTOFF:
2094 case UNSPEC_PLT:
2095 return true;
2096 default:
2097 return false;
2098 }
2099 }
2100
2101 /* We must have drilled down to a symbol. */
2102 if (! mn10300_symbolic_operand (x, Pmode))
2103 return false;
2104 break;
2105
2106 default:
2107 break;
2108 }
2109
2110 return true;
2111 }
2112
2113 /* Undo pic address legitimization for the benefit of debug info. */
2114
2115 static rtx
2116 mn10300_delegitimize_address (rtx orig_x)
2117 {
2118 rtx x = orig_x, ret, addend = NULL;
2119 bool need_mem;
2120
2121 if (MEM_P (x))
2122 x = XEXP (x, 0);
2123 if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
2124 return orig_x;
2125
2126 if (XEXP (x, 0) == pic_offset_table_rtx)
2127 ;
2128 /* With the REG+REG addressing of AM33, var-tracking can re-assemble
2129 some odd-looking "addresses" that were never valid in the first place.
2130 We need to look harder to avoid warnings being emitted. */
2131 else if (GET_CODE (XEXP (x, 0)) == PLUS)
2132 {
2133 rtx x0 = XEXP (x, 0);
2134 rtx x00 = XEXP (x0, 0);
2135 rtx x01 = XEXP (x0, 1);
2136
2137 if (x00 == pic_offset_table_rtx)
2138 addend = x01;
2139 else if (x01 == pic_offset_table_rtx)
2140 addend = x00;
2141 else
2142 return orig_x;
2143
2144 }
2145 else
2146 return orig_x;
2147 x = XEXP (x, 1);
2148
2149 if (GET_CODE (x) != CONST)
2150 return orig_x;
2151 x = XEXP (x, 0);
2152 if (GET_CODE (x) != UNSPEC)
2153 return orig_x;
2154
2155 ret = XVECEXP (x, 0, 0);
2156 if (XINT (x, 1) == UNSPEC_GOTOFF)
2157 need_mem = false;
2158 else if (XINT (x, 1) == UNSPEC_GOT)
2159 need_mem = true;
2160 else
2161 return orig_x;
2162
2163 gcc_assert (GET_CODE (ret) == SYMBOL_REF);
2164 if (need_mem != MEM_P (orig_x))
2165 return orig_x;
2166 if (need_mem && addend)
2167 return orig_x;
2168 if (addend)
2169 ret = gen_rtx_PLUS (Pmode, addend, ret);
2170 return ret;
2171 }
2172
2173 /* For addresses, costs are relative to "MOV (Rm),Rn". For AM33 this is
2174 the 3-byte fully general instruction; for MN103 this is the 2-byte form
2175 with an address register. */
2176
2177 static int
2178 mn10300_address_cost (rtx x, machine_mode mode ATTRIBUTE_UNUSED,
2179 addr_space_t as ATTRIBUTE_UNUSED, bool speed)
2180 {
2181 HOST_WIDE_INT i;
2182 rtx base, index;
2183
2184 switch (GET_CODE (x))
2185 {
2186 case CONST:
2187 case SYMBOL_REF:
2188 case LABEL_REF:
2189 /* We assume all of these require a 32-bit constant, even though
2190 some symbol and label references can be relaxed. */
2191 return speed ? 1 : 4;
2192
2193 case REG:
2194 case SUBREG:
2195 case POST_INC:
2196 return 0;
2197
2198 case POST_MODIFY:
2199 /* Assume any symbolic offset is a 32-bit constant. */
2200 i = (CONST_INT_P (XEXP (x, 1)) ? INTVAL (XEXP (x, 1)) : 0x12345678);
2201 if (IN_RANGE (i, -128, 127))
2202 return speed ? 0 : 1;
2203 if (speed)
2204 return 1;
2205 if (IN_RANGE (i, -0x800000, 0x7fffff))
2206 return 3;
2207 return 4;
2208
2209 case PLUS:
2210 base = XEXP (x, 0);
2211 index = XEXP (x, 1);
2212 if (register_operand (index, SImode))
2213 {
2214 /* Attempt to minimize the number of registers in the address.
2215 This is similar to what other ports do. */
2216 if (register_operand (base, SImode))
2217 return 1;
2218
2219 base = XEXP (x, 1);
2220 index = XEXP (x, 0);
2221 }
2222
2223 /* Assume any symbolic offset is a 32-bit constant. */
2224 i = (CONST_INT_P (XEXP (x, 1)) ? INTVAL (XEXP (x, 1)) : 0x12345678);
2225 if (IN_RANGE (i, -128, 127))
2226 return speed ? 0 : 1;
2227 if (IN_RANGE (i, -32768, 32767))
2228 return speed ? 0 : 2;
2229 return speed ? 2 : 6;
2230
2231 default:
2232 return rtx_cost (x, MEM, 0, speed);
2233 }
2234 }
2235
2236 /* Implement the TARGET_REGISTER_MOVE_COST hook.
2237
2238 Recall that the base value of 2 is required by assumptions elsewhere
2239 in the body of the compiler, and that cost 2 is special-cased as an
2240 early exit from reload meaning no work is required. */
2241
2242 static int
2243 mn10300_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
2244 reg_class_t ifrom, reg_class_t ito)
2245 {
2246 enum reg_class from = (enum reg_class) ifrom;
2247 enum reg_class to = (enum reg_class) ito;
2248 enum reg_class scratch, test;
2249
2250 /* Simplify the following code by unifying the fp register classes. */
2251 if (to == FP_ACC_REGS)
2252 to = FP_REGS;
2253 if (from == FP_ACC_REGS)
2254 from = FP_REGS;
2255
2256 /* Diagnose invalid moves by costing them as two moves. */
2257
2258 scratch = NO_REGS;
2259 test = from;
2260 if (to == SP_REGS)
2261 scratch = (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
2262 else if (to == MDR_REGS)
2263 scratch = DATA_REGS;
2264 else if (to == FP_REGS && to != from)
2265 scratch = GENERAL_REGS;
2266 else
2267 {
2268 test = to;
2269 if (from == SP_REGS)
2270 scratch = (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
2271 else if (from == MDR_REGS)
2272 scratch = DATA_REGS;
2273 else if (from == FP_REGS && to != from)
2274 scratch = GENERAL_REGS;
2275 }
2276 if (scratch != NO_REGS && !reg_class_subset_p (test, scratch))
2277 return (mn10300_register_move_cost (VOIDmode, from, scratch)
2278 + mn10300_register_move_cost (VOIDmode, scratch, to));
2279
2280 /* From here on, all we need consider are legal combinations. */
2281
2282 if (optimize_size)
2283 {
2284 /* The scale here is bytes * 2. */
2285
2286 if (from == to && (to == ADDRESS_REGS || to == DATA_REGS))
2287 return 2;
2288
2289 if (from == SP_REGS)
2290 return (to == ADDRESS_REGS ? 2 : 6);
2291
2292 /* For MN103, all remaining legal moves are two bytes. */
2293 if (TARGET_AM33)
2294 return 4;
2295
2296 if (to == SP_REGS)
2297 return (from == ADDRESS_REGS ? 4 : 6);
2298
2299 if ((from == ADDRESS_REGS || from == DATA_REGS)
2300 && (to == ADDRESS_REGS || to == DATA_REGS))
2301 return 4;
2302
2303 if (to == EXTENDED_REGS)
2304 return (to == from ? 6 : 4);
2305
2306 /* What's left are SP_REGS, FP_REGS, or combinations of the above. */
2307 return 6;
2308 }
2309 else
2310 {
2311 /* The scale here is cycles * 2. */
2312
2313 if (to == FP_REGS)
2314 return 8;
2315 if (from == FP_REGS)
2316 return 4;
2317
2318 /* All legal moves between integral registers are single cycle. */
2319 return 2;
2320 }
2321 }
2322
2323 /* Implement the TARGET_MEMORY_MOVE_COST hook.
2324
2325 Given lack of the form of the address, this must be speed-relative,
2326 though we should never be less expensive than a size-relative register
2327 move cost above. This is not a problem. */
2328
2329 static int
2330 mn10300_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
2331 reg_class_t iclass, bool in ATTRIBUTE_UNUSED)
2332 {
2333 enum reg_class rclass = (enum reg_class) iclass;
2334
2335 if (rclass == FP_REGS)
2336 return 8;
2337 return 6;
2338 }
2339
2340 /* Implement the TARGET_RTX_COSTS hook.
2341
2342 Speed-relative costs are relative to COSTS_N_INSNS, which is intended
2343 to represent cycles. Size-relative costs are in bytes. */
2344
2345 static bool
2346 mn10300_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2347 int *ptotal, bool speed)
2348 {
2349 /* This value is used for SYMBOL_REF etc where we want to pretend
2350 we have a full 32-bit constant. */
2351 HOST_WIDE_INT i = 0x12345678;
2352 int total;
2353
2354 switch (code)
2355 {
2356 case CONST_INT:
2357 i = INTVAL (x);
2358 do_int_costs:
2359 if (speed)
2360 {
2361 if (outer_code == SET)
2362 {
2363 /* 16-bit integer loads have latency 1, 32-bit loads 2. */
2364 if (IN_RANGE (i, -32768, 32767))
2365 total = COSTS_N_INSNS (1);
2366 else
2367 total = COSTS_N_INSNS (2);
2368 }
2369 else
2370 {
2371 /* 16-bit integer operands don't affect latency;
2372 24-bit and 32-bit operands add a cycle. */
2373 if (IN_RANGE (i, -32768, 32767))
2374 total = 0;
2375 else
2376 total = COSTS_N_INSNS (1);
2377 }
2378 }
2379 else
2380 {
2381 if (outer_code == SET)
2382 {
2383 if (i == 0)
2384 total = 1;
2385 else if (IN_RANGE (i, -128, 127))
2386 total = 2;
2387 else if (IN_RANGE (i, -32768, 32767))
2388 total = 3;
2389 else
2390 total = 6;
2391 }
2392 else
2393 {
2394 /* Reference here is ADD An,Dn, vs ADD imm,Dn. */
2395 if (IN_RANGE (i, -128, 127))
2396 total = 0;
2397 else if (IN_RANGE (i, -32768, 32767))
2398 total = 2;
2399 else if (TARGET_AM33 && IN_RANGE (i, -0x01000000, 0x00ffffff))
2400 total = 3;
2401 else
2402 total = 4;
2403 }
2404 }
2405 goto alldone;
2406
2407 case CONST:
2408 case LABEL_REF:
2409 case SYMBOL_REF:
2410 case CONST_DOUBLE:
2411 /* We assume all of these require a 32-bit constant, even though
2412 some symbol and label references can be relaxed. */
2413 goto do_int_costs;
2414
2415 case UNSPEC:
2416 switch (XINT (x, 1))
2417 {
2418 case UNSPEC_PIC:
2419 case UNSPEC_GOT:
2420 case UNSPEC_GOTOFF:
2421 case UNSPEC_PLT:
2422 case UNSPEC_GOTSYM_OFF:
2423 /* The PIC unspecs also resolve to a 32-bit constant. */
2424 goto do_int_costs;
2425
2426 default:
2427 /* Assume any non-listed unspec is some sort of arithmetic. */
2428 goto do_arith_costs;
2429 }
2430
2431 case PLUS:
2432 /* Notice the size difference of INC and INC4. */
2433 if (!speed && outer_code == SET && CONST_INT_P (XEXP (x, 1)))
2434 {
2435 i = INTVAL (XEXP (x, 1));
2436 if (i == 1 || i == 4)
2437 {
2438 total = 1 + rtx_cost (XEXP (x, 0), PLUS, 0, speed);
2439 goto alldone;
2440 }
2441 }
2442 goto do_arith_costs;
2443
2444 case MINUS:
2445 case AND:
2446 case IOR:
2447 case XOR:
2448 case NOT:
2449 case NEG:
2450 case ZERO_EXTEND:
2451 case SIGN_EXTEND:
2452 case COMPARE:
2453 case BSWAP:
2454 case CLZ:
2455 do_arith_costs:
2456 total = (speed ? COSTS_N_INSNS (1) : 2);
2457 break;
2458
2459 case ASHIFT:
2460 /* Notice the size difference of ASL2 and variants. */
2461 if (!speed && CONST_INT_P (XEXP (x, 1)))
2462 switch (INTVAL (XEXP (x, 1)))
2463 {
2464 case 1:
2465 case 2:
2466 total = 1;
2467 goto alldone;
2468 case 3:
2469 case 4:
2470 total = 2;
2471 goto alldone;
2472 }
2473 /* FALLTHRU */
2474
2475 case ASHIFTRT:
2476 case LSHIFTRT:
2477 total = (speed ? COSTS_N_INSNS (1) : 3);
2478 goto alldone;
2479
2480 case MULT:
2481 total = (speed ? COSTS_N_INSNS (3) : 2);
2482 break;
2483
2484 case DIV:
2485 case UDIV:
2486 case MOD:
2487 case UMOD:
2488 total = (speed ? COSTS_N_INSNS (39)
2489 /* Include space to load+retrieve MDR. */
2490 : code == MOD || code == UMOD ? 6 : 4);
2491 break;
2492
2493 case MEM:
2494 total = mn10300_address_cost (XEXP (x, 0), GET_MODE (x),
2495 MEM_ADDR_SPACE (x), speed);
2496 if (speed)
2497 total = COSTS_N_INSNS (2 + total);
2498 goto alldone;
2499
2500 default:
2501 /* Probably not implemented. Assume external call. */
2502 total = (speed ? COSTS_N_INSNS (10) : 7);
2503 break;
2504 }
2505
2506 *ptotal = total;
2507 return false;
2508
2509 alldone:
2510 *ptotal = total;
2511 return true;
2512 }
2513
2514 /* If using PIC, mark a SYMBOL_REF for a non-global symbol so that we
2515 may access it using GOTOFF instead of GOT. */
2516
2517 static void
2518 mn10300_encode_section_info (tree decl, rtx rtl, int first)
2519 {
2520 rtx symbol;
2521
2522 default_encode_section_info (decl, rtl, first);
2523
2524 if (! MEM_P (rtl))
2525 return;
2526
2527 symbol = XEXP (rtl, 0);
2528 if (GET_CODE (symbol) != SYMBOL_REF)
2529 return;
2530
2531 if (flag_pic)
2532 SYMBOL_REF_FLAG (symbol) = (*targetm.binds_local_p) (decl);
2533 }
2534
2535 /* Dispatch tables on the mn10300 are extremely expensive in terms of code
2536 and readonly data size. So we crank up the case threshold value to
2537 encourage a series of if/else comparisons to implement many small switch
2538 statements. In theory, this value could be increased much more if we
2539 were solely optimizing for space, but we keep it "reasonable" to avoid
2540 serious code efficiency lossage. */
2541
2542 static unsigned int
2543 mn10300_case_values_threshold (void)
2544 {
2545 return 6;
2546 }
2547
2548 /* Worker function for TARGET_TRAMPOLINE_INIT. */
2549
2550 static void
2551 mn10300_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
2552 {
2553 rtx mem, disp, fnaddr = XEXP (DECL_RTL (fndecl), 0);
2554
2555 /* This is a strict alignment target, which means that we play
2556 some games to make sure that the locations at which we need
2557 to store <chain> and <disp> wind up at aligned addresses.
2558
2559 0x28 0x00 add 0,d0
2560 0xfc 0xdd mov chain,a1
2561 <chain>
2562 0xf8 0xed 0x00 btst 0,d1
2563 0xdc jmp fnaddr
2564 <disp>
2565
2566 Note that the two extra insns are effectively nops; they
2567 clobber the flags but do not affect the contents of D0 or D1. */
2568
2569 disp = expand_binop (SImode, sub_optab, fnaddr,
2570 plus_constant (Pmode, XEXP (m_tramp, 0), 11),
2571 NULL_RTX, 1, OPTAB_DIRECT);
2572
2573 mem = adjust_address (m_tramp, SImode, 0);
2574 emit_move_insn (mem, gen_int_mode (0xddfc0028, SImode));
2575 mem = adjust_address (m_tramp, SImode, 4);
2576 emit_move_insn (mem, chain_value);
2577 mem = adjust_address (m_tramp, SImode, 8);
2578 emit_move_insn (mem, gen_int_mode (0xdc00edf8, SImode));
2579 mem = adjust_address (m_tramp, SImode, 12);
2580 emit_move_insn (mem, disp);
2581 }
2582
2583 /* Output the assembler code for a C++ thunk function.
2584 THUNK_DECL is the declaration for the thunk function itself, FUNCTION
2585 is the decl for the target function. DELTA is an immediate constant
2586 offset to be added to the THIS parameter. If VCALL_OFFSET is nonzero
2587 the word at the adjusted address *(*THIS' + VCALL_OFFSET) should be
2588 additionally added to THIS. Finally jump to the entry point of
2589 FUNCTION. */
2590
2591 static void
2592 mn10300_asm_output_mi_thunk (FILE * file,
2593 tree thunk_fndecl ATTRIBUTE_UNUSED,
2594 HOST_WIDE_INT delta,
2595 HOST_WIDE_INT vcall_offset,
2596 tree function)
2597 {
2598 const char * _this;
2599
2600 /* Get the register holding the THIS parameter. Handle the case
2601 where there is a hidden first argument for a returned structure. */
2602 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
2603 _this = reg_names [FIRST_ARGUMENT_REGNUM + 1];
2604 else
2605 _this = reg_names [FIRST_ARGUMENT_REGNUM];
2606
2607 fprintf (file, "\t%s Thunk Entry Point:\n", ASM_COMMENT_START);
2608
2609 if (delta)
2610 fprintf (file, "\tadd %d, %s\n", (int) delta, _this);
2611
2612 if (vcall_offset)
2613 {
2614 const char * scratch = reg_names [FIRST_ADDRESS_REGNUM + 1];
2615
2616 fprintf (file, "\tmov %s, %s\n", _this, scratch);
2617 fprintf (file, "\tmov (%s), %s\n", scratch, scratch);
2618 fprintf (file, "\tadd %d, %s\n", (int) vcall_offset, scratch);
2619 fprintf (file, "\tmov (%s), %s\n", scratch, scratch);
2620 fprintf (file, "\tadd %s, %s\n", scratch, _this);
2621 }
2622
2623 fputs ("\tjmp ", file);
2624 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
2625 putc ('\n', file);
2626 }
2627
2628 /* Return true if mn10300_output_mi_thunk would be able to output the
2629 assembler code for the thunk function specified by the arguments
2630 it is passed, and false otherwise. */
2631
2632 static bool
2633 mn10300_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
2634 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
2635 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
2636 const_tree function ATTRIBUTE_UNUSED)
2637 {
2638 return true;
2639 }
2640
2641 bool
2642 mn10300_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
2643 {
2644 if (REGNO_REG_CLASS (regno) == FP_REGS
2645 || REGNO_REG_CLASS (regno) == FP_ACC_REGS)
2646 /* Do not store integer values in FP registers. */
2647 return GET_MODE_CLASS (mode) == MODE_FLOAT && ((regno & 1) == 0);
2648
2649 if (! TARGET_AM33 && REGNO_REG_CLASS (regno) == EXTENDED_REGS)
2650 return false;
2651
2652 if (((regno) & 1) == 0 || GET_MODE_SIZE (mode) == 4)
2653 return true;
2654
2655 if (REGNO_REG_CLASS (regno) == DATA_REGS
2656 || (TARGET_AM33 && REGNO_REG_CLASS (regno) == ADDRESS_REGS)
2657 || REGNO_REG_CLASS (regno) == EXTENDED_REGS)
2658 return GET_MODE_SIZE (mode) <= 4;
2659
2660 return false;
2661 }
2662
2663 bool
2664 mn10300_modes_tieable (machine_mode mode1, machine_mode mode2)
2665 {
2666 if (GET_MODE_CLASS (mode1) == MODE_FLOAT
2667 && GET_MODE_CLASS (mode2) != MODE_FLOAT)
2668 return false;
2669
2670 if (GET_MODE_CLASS (mode2) == MODE_FLOAT
2671 && GET_MODE_CLASS (mode1) != MODE_FLOAT)
2672 return false;
2673
2674 if (TARGET_AM33
2675 || mode1 == mode2
2676 || (GET_MODE_SIZE (mode1) <= 4 && GET_MODE_SIZE (mode2) <= 4))
2677 return true;
2678
2679 return false;
2680 }
2681
2682 static int
2683 cc_flags_for_mode (machine_mode mode)
2684 {
2685 switch (mode)
2686 {
2687 case CCmode:
2688 return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_C | CC_FLAG_V;
2689 case CCZNCmode:
2690 return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_C;
2691 case CCZNmode:
2692 return CC_FLAG_Z | CC_FLAG_N;
2693 case CC_FLOATmode:
2694 return -1;
2695 default:
2696 gcc_unreachable ();
2697 }
2698 }
2699
2700 static int
2701 cc_flags_for_code (enum rtx_code code)
2702 {
2703 switch (code)
2704 {
2705 case EQ: /* Z */
2706 case NE: /* ~Z */
2707 return CC_FLAG_Z;
2708
2709 case LT: /* N */
2710 case GE: /* ~N */
2711 return CC_FLAG_N;
2712 break;
2713
2714 case GT: /* ~(Z|(N^V)) */
2715 case LE: /* Z|(N^V) */
2716 return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_V;
2717
2718 case GEU: /* ~C */
2719 case LTU: /* C */
2720 return CC_FLAG_C;
2721
2722 case GTU: /* ~(C | Z) */
2723 case LEU: /* C | Z */
2724 return CC_FLAG_Z | CC_FLAG_C;
2725
2726 case ORDERED:
2727 case UNORDERED:
2728 case LTGT:
2729 case UNEQ:
2730 case UNGE:
2731 case UNGT:
2732 case UNLE:
2733 case UNLT:
2734 return -1;
2735
2736 default:
2737 gcc_unreachable ();
2738 }
2739 }
2740
2741 machine_mode
2742 mn10300_select_cc_mode (enum rtx_code code, rtx x, rtx y ATTRIBUTE_UNUSED)
2743 {
2744 int req;
2745
2746 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2747 return CC_FLOATmode;
2748
2749 req = cc_flags_for_code (code);
2750
2751 if (req & CC_FLAG_V)
2752 return CCmode;
2753 if (req & CC_FLAG_C)
2754 return CCZNCmode;
2755 return CCZNmode;
2756 }
2757
2758 static inline bool
2759 set_is_load_p (rtx set)
2760 {
2761 return MEM_P (SET_SRC (set));
2762 }
2763
2764 static inline bool
2765 set_is_store_p (rtx set)
2766 {
2767 return MEM_P (SET_DEST (set));
2768 }
2769
2770 /* Update scheduling costs for situations that cannot be
2771 described using the attributes and DFA machinery.
2772 DEP is the insn being scheduled.
2773 INSN is the previous insn.
2774 COST is the current cycle cost for DEP. */
2775
2776 static int
2777 mn10300_adjust_sched_cost (rtx_insn *insn, rtx link, rtx_insn *dep, int cost)
2778 {
2779 rtx insn_set;
2780 rtx dep_set;
2781 int timings;
2782
2783 if (!TARGET_AM33)
2784 return 1;
2785
2786 /* We are only interested in pairs of SET. */
2787 insn_set = single_set (insn);
2788 if (!insn_set)
2789 return cost;
2790
2791 dep_set = single_set (dep);
2792 if (!dep_set)
2793 return cost;
2794
2795 /* For the AM34 a load instruction that follows a
2796 store instruction incurs an extra cycle of delay. */
2797 if (mn10300_tune_cpu == PROCESSOR_AM34
2798 && set_is_load_p (dep_set)
2799 && set_is_store_p (insn_set))
2800 cost += 1;
2801
2802 /* For the AM34 a non-store, non-branch FPU insn that follows
2803 another FPU insn incurs a one cycle throughput increase. */
2804 else if (mn10300_tune_cpu == PROCESSOR_AM34
2805 && ! set_is_store_p (insn_set)
2806 && ! JUMP_P (insn)
2807 && GET_MODE_CLASS (GET_MODE (SET_SRC (dep_set))) == MODE_FLOAT
2808 && GET_MODE_CLASS (GET_MODE (SET_SRC (insn_set))) == MODE_FLOAT)
2809 cost += 1;
2810
2811 /* Resolve the conflict described in section 1-7-4 of
2812 Chapter 3 of the MN103E Series Instruction Manual
2813 where it says:
2814
2815 "When the preceding instruction is a CPU load or
2816 store instruction, a following FPU instruction
2817 cannot be executed until the CPU completes the
2818 latency period even though there are no register
2819 or flag dependencies between them." */
2820
2821 /* Only the AM33-2 (and later) CPUs have FPU instructions. */
2822 if (! TARGET_AM33_2)
2823 return cost;
2824
2825 /* If a data dependence already exists then the cost is correct. */
2826 if (REG_NOTE_KIND (link) == 0)
2827 return cost;
2828
2829 /* Check that the instruction about to scheduled is an FPU instruction. */
2830 if (GET_MODE_CLASS (GET_MODE (SET_SRC (dep_set))) != MODE_FLOAT)
2831 return cost;
2832
2833 /* Now check to see if the previous instruction is a load or store. */
2834 if (! set_is_load_p (insn_set) && ! set_is_store_p (insn_set))
2835 return cost;
2836
2837 /* XXX: Verify: The text of 1-7-4 implies that the restriction
2838 only applies when an INTEGER load/store precedes an FPU
2839 instruction, but is this true ? For now we assume that it is. */
2840 if (GET_MODE_CLASS (GET_MODE (SET_SRC (insn_set))) != MODE_INT)
2841 return cost;
2842
2843 /* Extract the latency value from the timings attribute. */
2844 timings = get_attr_timings (insn);
2845 return timings < 100 ? (timings % 10) : (timings % 100);
2846 }
2847
2848 static void
2849 mn10300_conditional_register_usage (void)
2850 {
2851 unsigned int i;
2852
2853 if (!TARGET_AM33)
2854 {
2855 for (i = FIRST_EXTENDED_REGNUM;
2856 i <= LAST_EXTENDED_REGNUM; i++)
2857 fixed_regs[i] = call_used_regs[i] = 1;
2858 }
2859 if (!TARGET_AM33_2)
2860 {
2861 for (i = FIRST_FP_REGNUM;
2862 i <= LAST_FP_REGNUM; i++)
2863 fixed_regs[i] = call_used_regs[i] = 1;
2864 }
2865 if (flag_pic)
2866 fixed_regs[PIC_OFFSET_TABLE_REGNUM] =
2867 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
2868 }
2869
2870 /* Worker function for TARGET_MD_ASM_CLOBBERS.
2871 We do this in the mn10300 backend to maintain source compatibility
2872 with the old cc0-based compiler. */
2873
2874 static tree
2875 mn10300_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
2876 tree inputs ATTRIBUTE_UNUSED,
2877 tree clobbers)
2878 {
2879 clobbers = tree_cons (NULL_TREE, build_string (5, "EPSW"),
2880 clobbers);
2881 return clobbers;
2882 }
2883 \f
2884 /* A helper function for splitting cbranch patterns after reload. */
2885
2886 void
2887 mn10300_split_cbranch (machine_mode cmp_mode, rtx cmp_op, rtx label_ref)
2888 {
2889 rtx flags, x;
2890
2891 flags = gen_rtx_REG (cmp_mode, CC_REG);
2892 x = gen_rtx_COMPARE (cmp_mode, XEXP (cmp_op, 0), XEXP (cmp_op, 1));
2893 x = gen_rtx_SET (VOIDmode, flags, x);
2894 emit_insn (x);
2895
2896 x = gen_rtx_fmt_ee (GET_CODE (cmp_op), VOIDmode, flags, const0_rtx);
2897 x = gen_rtx_IF_THEN_ELSE (VOIDmode, x, label_ref, pc_rtx);
2898 x = gen_rtx_SET (VOIDmode, pc_rtx, x);
2899 emit_jump_insn (x);
2900 }
2901
2902 /* A helper function for matching parallels that set the flags. */
2903
2904 bool
2905 mn10300_match_ccmode (rtx insn, machine_mode cc_mode)
2906 {
2907 rtx op1, flags;
2908 machine_mode flags_mode;
2909
2910 gcc_checking_assert (XVECLEN (PATTERN (insn), 0) == 2);
2911
2912 op1 = XVECEXP (PATTERN (insn), 0, 1);
2913 gcc_checking_assert (GET_CODE (SET_SRC (op1)) == COMPARE);
2914
2915 flags = SET_DEST (op1);
2916 flags_mode = GET_MODE (flags);
2917
2918 if (GET_MODE (SET_SRC (op1)) != flags_mode)
2919 return false;
2920 if (GET_MODE_CLASS (flags_mode) != MODE_CC)
2921 return false;
2922
2923 /* Ensure that the mode of FLAGS is compatible with CC_MODE. */
2924 if (cc_flags_for_mode (flags_mode) & ~cc_flags_for_mode (cc_mode))
2925 return false;
2926
2927 return true;
2928 }
2929
2930 /* This function is used to help split:
2931
2932 (set (reg) (and (reg) (int)))
2933
2934 into:
2935
2936 (set (reg) (shift (reg) (int))
2937 (set (reg) (shift (reg) (int))
2938
2939 where the shitfs will be shorter than the "and" insn.
2940
2941 It returns the number of bits that should be shifted. A positive
2942 values means that the low bits are to be cleared (and hence the
2943 shifts should be right followed by left) whereas a negative value
2944 means that the high bits are to be cleared (left followed by right).
2945 Zero is returned when it would not be economical to split the AND. */
2946
2947 int
2948 mn10300_split_and_operand_count (rtx op)
2949 {
2950 HOST_WIDE_INT val = INTVAL (op);
2951 int count;
2952
2953 if (val < 0)
2954 {
2955 /* High bit is set, look for bits clear at the bottom. */
2956 count = exact_log2 (-val);
2957 if (count < 0)
2958 return 0;
2959 /* This is only size win if we can use the asl2 insn. Otherwise we
2960 would be replacing 1 6-byte insn with 2 3-byte insns. */
2961 if (count > (optimize_insn_for_speed_p () ? 2 : 4))
2962 return 0;
2963 return count;
2964 }
2965 else
2966 {
2967 /* High bit is clear, look for bits set at the bottom. */
2968 count = exact_log2 (val + 1);
2969 count = 32 - count;
2970 /* Again, this is only a size win with asl2. */
2971 if (count > (optimize_insn_for_speed_p () ? 2 : 4))
2972 return 0;
2973 return -count;
2974 }
2975 }
2976 \f
2977 struct liw_data
2978 {
2979 enum attr_liw slot;
2980 enum attr_liw_op op;
2981 rtx dest;
2982 rtx src;
2983 };
2984
2985 /* Decide if the given insn is a candidate for LIW bundling. If it is then
2986 extract the operands and LIW attributes from the insn and use them to fill
2987 in the liw_data structure. Return true upon success or false if the insn
2988 cannot be bundled. */
2989
2990 static bool
2991 extract_bundle (rtx_insn *insn, struct liw_data * pdata)
2992 {
2993 bool allow_consts = true;
2994 rtx p;
2995
2996 gcc_assert (pdata != NULL);
2997
2998 if (insn == NULL)
2999 return false;
3000 /* Make sure that we are dealing with a simple SET insn. */
3001 p = single_set (insn);
3002 if (p == NULL_RTX)
3003 return false;
3004
3005 /* Make sure that it could go into one of the LIW pipelines. */
3006 pdata->slot = get_attr_liw (insn);
3007 if (pdata->slot == LIW_BOTH)
3008 return false;
3009
3010 pdata->op = get_attr_liw_op (insn);
3011
3012 switch (pdata->op)
3013 {
3014 case LIW_OP_MOV:
3015 pdata->dest = SET_DEST (p);
3016 pdata->src = SET_SRC (p);
3017 break;
3018 case LIW_OP_CMP:
3019 pdata->dest = XEXP (SET_SRC (p), 0);
3020 pdata->src = XEXP (SET_SRC (p), 1);
3021 break;
3022 case LIW_OP_NONE:
3023 return false;
3024 case LIW_OP_AND:
3025 case LIW_OP_OR:
3026 case LIW_OP_XOR:
3027 /* The AND, OR and XOR long instruction words only accept register arguments. */
3028 allow_consts = false;
3029 /* Fall through. */
3030 default:
3031 pdata->dest = SET_DEST (p);
3032 pdata->src = XEXP (SET_SRC (p), 1);
3033 break;
3034 }
3035
3036 if (! REG_P (pdata->dest))
3037 return false;
3038
3039 if (REG_P (pdata->src))
3040 return true;
3041
3042 return allow_consts && satisfies_constraint_O (pdata->src);
3043 }
3044
3045 /* Make sure that it is OK to execute LIW1 and LIW2 in parallel. GCC generated
3046 the instructions with the assumption that LIW1 would be executed before LIW2
3047 so we must check for overlaps between their sources and destinations. */
3048
3049 static bool
3050 check_liw_constraints (struct liw_data * pliw1, struct liw_data * pliw2)
3051 {
3052 /* Check for slot conflicts. */
3053 if (pliw2->slot == pliw1->slot && pliw1->slot != LIW_EITHER)
3054 return false;
3055
3056 /* If either operation is a compare, then "dest" is really an input; the real
3057 destination is CC_REG. So these instructions need different checks. */
3058
3059 /* Changing "CMP ; OP" into "CMP | OP" is OK because the comparison will
3060 check its values prior to any changes made by OP. */
3061 if (pliw1->op == LIW_OP_CMP)
3062 {
3063 /* Two sequential comparisons means dead code, which ought to
3064 have been eliminated given that bundling only happens with
3065 optimization. We cannot bundle them in any case. */
3066 gcc_assert (pliw1->op != pliw2->op);
3067 return true;
3068 }
3069
3070 /* Changing "OP ; CMP" into "OP | CMP" does not work if the value being compared
3071 is the destination of OP, as the CMP will look at the old value, not the new
3072 one. */
3073 if (pliw2->op == LIW_OP_CMP)
3074 {
3075 if (REGNO (pliw2->dest) == REGNO (pliw1->dest))
3076 return false;
3077
3078 if (REG_P (pliw2->src))
3079 return REGNO (pliw2->src) != REGNO (pliw1->dest);
3080
3081 return true;
3082 }
3083
3084 /* Changing "OP1 ; OP2" into "OP1 | OP2" does not work if they both write to the
3085 same destination register. */
3086 if (REGNO (pliw2->dest) == REGNO (pliw1->dest))
3087 return false;
3088
3089 /* Changing "OP1 ; OP2" into "OP1 | OP2" generally does not work if the destination
3090 of OP1 is the source of OP2. The exception is when OP1 is a MOVE instruction when
3091 we can replace the source in OP2 with the source of OP1. */
3092 if (REG_P (pliw2->src) && REGNO (pliw2->src) == REGNO (pliw1->dest))
3093 {
3094 if (pliw1->op == LIW_OP_MOV && REG_P (pliw1->src))
3095 {
3096 if (! REG_P (pliw1->src)
3097 && (pliw2->op == LIW_OP_AND
3098 || pliw2->op == LIW_OP_OR
3099 || pliw2->op == LIW_OP_XOR))
3100 return false;
3101
3102 pliw2->src = pliw1->src;
3103 return true;
3104 }
3105 return false;
3106 }
3107
3108 /* Everything else is OK. */
3109 return true;
3110 }
3111
3112 /* Combine pairs of insns into LIW bundles. */
3113
3114 static void
3115 mn10300_bundle_liw (void)
3116 {
3117 rtx_insn *r;
3118
3119 for (r = get_insns (); r != NULL; r = next_nonnote_nondebug_insn (r))
3120 {
3121 rtx_insn *insn1, *insn2;
3122 struct liw_data liw1, liw2;
3123
3124 insn1 = r;
3125 if (! extract_bundle (insn1, & liw1))
3126 continue;
3127
3128 insn2 = next_nonnote_nondebug_insn (insn1);
3129 if (! extract_bundle (insn2, & liw2))
3130 continue;
3131
3132 /* Check for source/destination overlap. */
3133 if (! check_liw_constraints (& liw1, & liw2))
3134 continue;
3135
3136 if (liw1.slot == LIW_OP2 || liw2.slot == LIW_OP1)
3137 {
3138 struct liw_data temp;
3139
3140 temp = liw1;
3141 liw1 = liw2;
3142 liw2 = temp;
3143 }
3144
3145 delete_insn (insn2);
3146
3147 rtx insn2_pat;
3148 if (liw1.op == LIW_OP_CMP)
3149 insn2_pat = gen_cmp_liw (liw2.dest, liw2.src, liw1.dest, liw1.src,
3150 GEN_INT (liw2.op));
3151 else if (liw2.op == LIW_OP_CMP)
3152 insn2_pat = gen_liw_cmp (liw1.dest, liw1.src, liw2.dest, liw2.src,
3153 GEN_INT (liw1.op));
3154 else
3155 insn2_pat = gen_liw (liw1.dest, liw2.dest, liw1.src, liw2.src,
3156 GEN_INT (liw1.op), GEN_INT (liw2.op));
3157
3158 insn2 = emit_insn_after (insn2_pat, insn1);
3159 delete_insn (insn1);
3160 r = insn2;
3161 }
3162 }
3163
3164 #define DUMP(reason, insn) \
3165 do \
3166 { \
3167 if (dump_file) \
3168 { \
3169 fprintf (dump_file, reason "\n"); \
3170 if (insn != NULL_RTX) \
3171 print_rtl_single (dump_file, insn); \
3172 fprintf(dump_file, "\n"); \
3173 } \
3174 } \
3175 while (0)
3176
3177 /* Replace the BRANCH insn with a Lcc insn that goes to LABEL.
3178 Insert a SETLB insn just before LABEL. */
3179
3180 static void
3181 mn10300_insert_setlb_lcc (rtx label, rtx branch)
3182 {
3183 rtx lcc, comparison, cmp_reg;
3184
3185 if (LABEL_NUSES (label) > 1)
3186 {
3187 rtx_insn *insn;
3188
3189 /* This label is used both as an entry point to the loop
3190 and as a loop-back point for the loop. We need to separate
3191 these two functions so that the SETLB happens upon entry,
3192 but the loop-back does not go to the SETLB instruction. */
3193 DUMP ("Inserting SETLB insn after:", label);
3194 insn = emit_insn_after (gen_setlb (), label);
3195 label = gen_label_rtx ();
3196 emit_label_after (label, insn);
3197 DUMP ("Created new loop-back label:", label);
3198 }
3199 else
3200 {
3201 DUMP ("Inserting SETLB insn before:", label);
3202 emit_insn_before (gen_setlb (), label);
3203 }
3204
3205 comparison = XEXP (SET_SRC (PATTERN (branch)), 0);
3206 cmp_reg = XEXP (comparison, 0);
3207 gcc_assert (REG_P (cmp_reg));
3208
3209 /* If the comparison has not already been split out of the branch
3210 then do so now. */
3211 gcc_assert (REGNO (cmp_reg) == CC_REG);
3212
3213 if (GET_MODE (cmp_reg) == CC_FLOATmode)
3214 lcc = gen_FLcc (comparison, label);
3215 else
3216 lcc = gen_Lcc (comparison, label);
3217
3218 rtx_insn *jump = emit_jump_insn_before (lcc, branch);
3219 mark_jump_label (XVECEXP (PATTERN (lcc), 0, 0), jump, 0);
3220 JUMP_LABEL (jump) = label;
3221 DUMP ("Replacing branch insn...", branch);
3222 DUMP ("... with Lcc insn:", jump);
3223 delete_insn (branch);
3224 }
3225
3226 static bool
3227 mn10300_block_contains_call (basic_block block)
3228 {
3229 rtx_insn *insn;
3230
3231 FOR_BB_INSNS (block, insn)
3232 if (CALL_P (insn))
3233 return true;
3234
3235 return false;
3236 }
3237
3238 static bool
3239 mn10300_loop_contains_call_insn (loop_p loop)
3240 {
3241 basic_block * bbs;
3242 bool result = false;
3243 unsigned int i;
3244
3245 bbs = get_loop_body (loop);
3246
3247 for (i = 0; i < loop->num_nodes; i++)
3248 if (mn10300_block_contains_call (bbs[i]))
3249 {
3250 result = true;
3251 break;
3252 }
3253
3254 free (bbs);
3255 return result;
3256 }
3257
3258 static void
3259 mn10300_scan_for_setlb_lcc (void)
3260 {
3261 loop_p loop;
3262
3263 DUMP ("Looking for loops that can use the SETLB insn", NULL_RTX);
3264
3265 df_analyze ();
3266 compute_bb_for_insn ();
3267
3268 /* Find the loops. */
3269 loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
3270
3271 /* FIXME: For now we only investigate innermost loops. In practice however
3272 if an inner loop is not suitable for use with the SETLB/Lcc insns, it may
3273 be the case that its parent loop is suitable. Thus we should check all
3274 loops, but work from the innermost outwards. */
3275 FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
3276 {
3277 const char * reason = NULL;
3278
3279 /* Check to see if we can modify this loop. If we cannot
3280 then set 'reason' to describe why it could not be done. */
3281 if (loop->latch == NULL)
3282 reason = "it contains multiple latches";
3283 else if (loop->header != loop->latch)
3284 /* FIXME: We could handle loops that span multiple blocks,
3285 but this requires a lot more work tracking down the branches
3286 that need altering, so for now keep things simple. */
3287 reason = "the loop spans multiple blocks";
3288 else if (mn10300_loop_contains_call_insn (loop))
3289 reason = "it contains CALL insns";
3290 else
3291 {
3292 rtx_insn *branch = BB_END (loop->latch);
3293
3294 gcc_assert (JUMP_P (branch));
3295 if (single_set (branch) == NULL_RTX || ! any_condjump_p (branch))
3296 /* We cannot optimize tablejumps and the like. */
3297 /* FIXME: We could handle unconditional jumps. */
3298 reason = "it is not a simple loop";
3299 else
3300 {
3301 rtx_insn *label;
3302
3303 if (dump_file)
3304 flow_loop_dump (loop, dump_file, NULL, 0);
3305
3306 label = BB_HEAD (loop->header);
3307 gcc_assert (LABEL_P (label));
3308
3309 mn10300_insert_setlb_lcc (label, branch);
3310 }
3311 }
3312
3313 if (dump_file && reason != NULL)
3314 fprintf (dump_file, "Loop starting with insn %d is not suitable because %s\n",
3315 INSN_UID (BB_HEAD (loop->header)),
3316 reason);
3317 }
3318
3319 loop_optimizer_finalize ();
3320
3321 df_finish_pass (false);
3322
3323 DUMP ("SETLB scan complete", NULL_RTX);
3324 }
3325
3326 static void
3327 mn10300_reorg (void)
3328 {
3329 /* These are optimizations, so only run them if optimizing. */
3330 if (TARGET_AM33 && (optimize > 0 || optimize_size))
3331 {
3332 if (TARGET_ALLOW_SETLB)
3333 mn10300_scan_for_setlb_lcc ();
3334
3335 if (TARGET_ALLOW_LIW)
3336 mn10300_bundle_liw ();
3337 }
3338 }
3339 \f
3340 /* Initialize the GCC target structure. */
3341
3342 #undef TARGET_MACHINE_DEPENDENT_REORG
3343 #define TARGET_MACHINE_DEPENDENT_REORG mn10300_reorg
3344
3345 #undef TARGET_ASM_ALIGNED_HI_OP
3346 #define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
3347
3348 #undef TARGET_LEGITIMIZE_ADDRESS
3349 #define TARGET_LEGITIMIZE_ADDRESS mn10300_legitimize_address
3350
3351 #undef TARGET_ADDRESS_COST
3352 #define TARGET_ADDRESS_COST mn10300_address_cost
3353 #undef TARGET_REGISTER_MOVE_COST
3354 #define TARGET_REGISTER_MOVE_COST mn10300_register_move_cost
3355 #undef TARGET_MEMORY_MOVE_COST
3356 #define TARGET_MEMORY_MOVE_COST mn10300_memory_move_cost
3357 #undef TARGET_RTX_COSTS
3358 #define TARGET_RTX_COSTS mn10300_rtx_costs
3359
3360 #undef TARGET_ASM_FILE_START
3361 #define TARGET_ASM_FILE_START mn10300_file_start
3362 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
3363 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
3364
3365 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
3366 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA mn10300_asm_output_addr_const_extra
3367
3368 #undef TARGET_OPTION_OVERRIDE
3369 #define TARGET_OPTION_OVERRIDE mn10300_option_override
3370
3371 #undef TARGET_ENCODE_SECTION_INFO
3372 #define TARGET_ENCODE_SECTION_INFO mn10300_encode_section_info
3373
3374 #undef TARGET_PROMOTE_PROTOTYPES
3375 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
3376 #undef TARGET_RETURN_IN_MEMORY
3377 #define TARGET_RETURN_IN_MEMORY mn10300_return_in_memory
3378 #undef TARGET_PASS_BY_REFERENCE
3379 #define TARGET_PASS_BY_REFERENCE mn10300_pass_by_reference
3380 #undef TARGET_CALLEE_COPIES
3381 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
3382 #undef TARGET_ARG_PARTIAL_BYTES
3383 #define TARGET_ARG_PARTIAL_BYTES mn10300_arg_partial_bytes
3384 #undef TARGET_FUNCTION_ARG
3385 #define TARGET_FUNCTION_ARG mn10300_function_arg
3386 #undef TARGET_FUNCTION_ARG_ADVANCE
3387 #define TARGET_FUNCTION_ARG_ADVANCE mn10300_function_arg_advance
3388
3389 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
3390 #define TARGET_EXPAND_BUILTIN_SAVEREGS mn10300_builtin_saveregs
3391 #undef TARGET_EXPAND_BUILTIN_VA_START
3392 #define TARGET_EXPAND_BUILTIN_VA_START mn10300_va_start
3393
3394 #undef TARGET_CASE_VALUES_THRESHOLD
3395 #define TARGET_CASE_VALUES_THRESHOLD mn10300_case_values_threshold
3396
3397 #undef TARGET_LEGITIMATE_ADDRESS_P
3398 #define TARGET_LEGITIMATE_ADDRESS_P mn10300_legitimate_address_p
3399 #undef TARGET_DELEGITIMIZE_ADDRESS
3400 #define TARGET_DELEGITIMIZE_ADDRESS mn10300_delegitimize_address
3401 #undef TARGET_LEGITIMATE_CONSTANT_P
3402 #define TARGET_LEGITIMATE_CONSTANT_P mn10300_legitimate_constant_p
3403
3404 #undef TARGET_PREFERRED_RELOAD_CLASS
3405 #define TARGET_PREFERRED_RELOAD_CLASS mn10300_preferred_reload_class
3406 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
3407 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS \
3408 mn10300_preferred_output_reload_class
3409 #undef TARGET_SECONDARY_RELOAD
3410 #define TARGET_SECONDARY_RELOAD mn10300_secondary_reload
3411
3412 #undef TARGET_TRAMPOLINE_INIT
3413 #define TARGET_TRAMPOLINE_INIT mn10300_trampoline_init
3414
3415 #undef TARGET_FUNCTION_VALUE
3416 #define TARGET_FUNCTION_VALUE mn10300_function_value
3417 #undef TARGET_LIBCALL_VALUE
3418 #define TARGET_LIBCALL_VALUE mn10300_libcall_value
3419
3420 #undef TARGET_ASM_OUTPUT_MI_THUNK
3421 #define TARGET_ASM_OUTPUT_MI_THUNK mn10300_asm_output_mi_thunk
3422 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
3423 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK mn10300_can_output_mi_thunk
3424
3425 #undef TARGET_SCHED_ADJUST_COST
3426 #define TARGET_SCHED_ADJUST_COST mn10300_adjust_sched_cost
3427
3428 #undef TARGET_CONDITIONAL_REGISTER_USAGE
3429 #define TARGET_CONDITIONAL_REGISTER_USAGE mn10300_conditional_register_usage
3430
3431 #undef TARGET_MD_ASM_CLOBBERS
3432 #define TARGET_MD_ASM_CLOBBERS mn10300_md_asm_clobbers
3433
3434 #undef TARGET_FLAGS_REGNUM
3435 #define TARGET_FLAGS_REGNUM CC_REG
3436
3437 struct gcc_target targetm = TARGET_INITIALIZER;