]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/mn10300/mn10300.c
* expr.h: Remove prototypes of functions defined in builtins.c.
[thirdparty/gcc.git] / gcc / config / mn10300 / mn10300.c
1 /* Subroutines for insn-output.c for Matsushita MN10300 series
2 Copyright (C) 1996-2014 Free Software Foundation, Inc.
3 Contributed by Jeff Law (law@cygnus.com).
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "stor-layout.h"
28 #include "varasm.h"
29 #include "calls.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "recog.h"
38 #include "reload.h"
39 #include "expr.h"
40 #include "optabs.h"
41 #include "function.h"
42 #include "obstack.h"
43 #include "diagnostic-core.h"
44 #include "tm_p.h"
45 #include "tm-constrs.h"
46 #include "target.h"
47 #include "target-def.h"
48 #include "df.h"
49 #include "opts.h"
50 #include "cfgloop.h"
51 #include "dumpfile.h"
52 #include "builtins.h"
53
54 /* This is used in the am33_2.0-linux-gnu port, in which global symbol
55 names are not prefixed by underscores, to tell whether to prefix a
56 label with a plus sign or not, so that the assembler can tell
57 symbol names from register names. */
58 int mn10300_protect_label;
59
60 /* Selected processor type for tuning. */
61 enum processor_type mn10300_tune_cpu = PROCESSOR_DEFAULT;
62
63 #define CC_FLAG_Z 1
64 #define CC_FLAG_N 2
65 #define CC_FLAG_C 4
66 #define CC_FLAG_V 8
67
68 static int cc_flags_for_mode(enum machine_mode);
69 static int cc_flags_for_code(enum rtx_code);
70 \f
71 /* Implement TARGET_OPTION_OVERRIDE. */
72 static void
73 mn10300_option_override (void)
74 {
75 if (TARGET_AM33)
76 target_flags &= ~MASK_MULT_BUG;
77 else
78 {
79 /* Disable scheduling for the MN10300 as we do
80 not have timing information available for it. */
81 flag_schedule_insns = 0;
82 flag_schedule_insns_after_reload = 0;
83
84 /* Force enable splitting of wide types, as otherwise it is trivial
85 to run out of registers. Indeed, this works so well that register
86 allocation problems are now more common *without* optimization,
87 when this flag is not enabled by default. */
88 flag_split_wide_types = 1;
89 }
90
91 if (mn10300_tune_string)
92 {
93 if (strcasecmp (mn10300_tune_string, "mn10300") == 0)
94 mn10300_tune_cpu = PROCESSOR_MN10300;
95 else if (strcasecmp (mn10300_tune_string, "am33") == 0)
96 mn10300_tune_cpu = PROCESSOR_AM33;
97 else if (strcasecmp (mn10300_tune_string, "am33-2") == 0)
98 mn10300_tune_cpu = PROCESSOR_AM33_2;
99 else if (strcasecmp (mn10300_tune_string, "am34") == 0)
100 mn10300_tune_cpu = PROCESSOR_AM34;
101 else
102 error ("-mtune= expects mn10300, am33, am33-2, or am34");
103 }
104 }
105
106 static void
107 mn10300_file_start (void)
108 {
109 default_file_start ();
110
111 if (TARGET_AM33_2)
112 fprintf (asm_out_file, "\t.am33_2\n");
113 else if (TARGET_AM33)
114 fprintf (asm_out_file, "\t.am33\n");
115 }
116 \f
117 /* Note: This list must match the liw_op attribute in mn10300.md. */
118
119 static const char *liw_op_names[] =
120 {
121 "add", "cmp", "sub", "mov",
122 "and", "or", "xor",
123 "asr", "lsr", "asl",
124 "none", "max"
125 };
126
127 /* Print operand X using operand code CODE to assembly language output file
128 FILE. */
129
130 void
131 mn10300_print_operand (FILE *file, rtx x, int code)
132 {
133 switch (code)
134 {
135 case 'W':
136 {
137 unsigned int liw_op = UINTVAL (x);
138
139 gcc_assert (TARGET_ALLOW_LIW);
140 gcc_assert (liw_op < LIW_OP_MAX);
141 fputs (liw_op_names[liw_op], file);
142 break;
143 }
144
145 case 'b':
146 case 'B':
147 {
148 enum rtx_code cmp = GET_CODE (x);
149 enum machine_mode mode = GET_MODE (XEXP (x, 0));
150 const char *str;
151 int have_flags;
152
153 if (code == 'B')
154 cmp = reverse_condition (cmp);
155 have_flags = cc_flags_for_mode (mode);
156
157 switch (cmp)
158 {
159 case NE:
160 str = "ne";
161 break;
162 case EQ:
163 str = "eq";
164 break;
165 case GE:
166 /* bge is smaller than bnc. */
167 str = (have_flags & CC_FLAG_V ? "ge" : "nc");
168 break;
169 case LT:
170 str = (have_flags & CC_FLAG_V ? "lt" : "ns");
171 break;
172 case GT:
173 str = "gt";
174 break;
175 case LE:
176 str = "le";
177 break;
178 case GEU:
179 str = "cc";
180 break;
181 case GTU:
182 str = "hi";
183 break;
184 case LEU:
185 str = "ls";
186 break;
187 case LTU:
188 str = "cs";
189 break;
190 case ORDERED:
191 str = "lge";
192 break;
193 case UNORDERED:
194 str = "uo";
195 break;
196 case LTGT:
197 str = "lg";
198 break;
199 case UNEQ:
200 str = "ue";
201 break;
202 case UNGE:
203 str = "uge";
204 break;
205 case UNGT:
206 str = "ug";
207 break;
208 case UNLE:
209 str = "ule";
210 break;
211 case UNLT:
212 str = "ul";
213 break;
214 default:
215 gcc_unreachable ();
216 }
217
218 gcc_checking_assert ((cc_flags_for_code (cmp) & ~have_flags) == 0);
219 fputs (str, file);
220 }
221 break;
222
223 case 'C':
224 /* This is used for the operand to a call instruction;
225 if it's a REG, enclose it in parens, else output
226 the operand normally. */
227 if (REG_P (x))
228 {
229 fputc ('(', file);
230 mn10300_print_operand (file, x, 0);
231 fputc (')', file);
232 }
233 else
234 mn10300_print_operand (file, x, 0);
235 break;
236
237 case 'D':
238 switch (GET_CODE (x))
239 {
240 case MEM:
241 fputc ('(', file);
242 output_address (XEXP (x, 0));
243 fputc (')', file);
244 break;
245
246 case REG:
247 fprintf (file, "fd%d", REGNO (x) - 18);
248 break;
249
250 default:
251 gcc_unreachable ();
252 }
253 break;
254
255 /* These are the least significant word in a 64bit value. */
256 case 'L':
257 switch (GET_CODE (x))
258 {
259 case MEM:
260 fputc ('(', file);
261 output_address (XEXP (x, 0));
262 fputc (')', file);
263 break;
264
265 case REG:
266 fprintf (file, "%s", reg_names[REGNO (x)]);
267 break;
268
269 case SUBREG:
270 fprintf (file, "%s", reg_names[subreg_regno (x)]);
271 break;
272
273 case CONST_DOUBLE:
274 {
275 long val[2];
276 REAL_VALUE_TYPE rv;
277
278 switch (GET_MODE (x))
279 {
280 case DFmode:
281 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
282 REAL_VALUE_TO_TARGET_DOUBLE (rv, val);
283 fprintf (file, "0x%lx", val[0]);
284 break;;
285 case SFmode:
286 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
287 REAL_VALUE_TO_TARGET_SINGLE (rv, val[0]);
288 fprintf (file, "0x%lx", val[0]);
289 break;;
290 case VOIDmode:
291 case DImode:
292 mn10300_print_operand_address (file,
293 GEN_INT (CONST_DOUBLE_LOW (x)));
294 break;
295 default:
296 break;
297 }
298 break;
299 }
300
301 case CONST_INT:
302 {
303 rtx low, high;
304 split_double (x, &low, &high);
305 fprintf (file, "%ld", (long)INTVAL (low));
306 break;
307 }
308
309 default:
310 gcc_unreachable ();
311 }
312 break;
313
314 /* Similarly, but for the most significant word. */
315 case 'H':
316 switch (GET_CODE (x))
317 {
318 case MEM:
319 fputc ('(', file);
320 x = adjust_address (x, SImode, 4);
321 output_address (XEXP (x, 0));
322 fputc (')', file);
323 break;
324
325 case REG:
326 fprintf (file, "%s", reg_names[REGNO (x) + 1]);
327 break;
328
329 case SUBREG:
330 fprintf (file, "%s", reg_names[subreg_regno (x) + 1]);
331 break;
332
333 case CONST_DOUBLE:
334 {
335 long val[2];
336 REAL_VALUE_TYPE rv;
337
338 switch (GET_MODE (x))
339 {
340 case DFmode:
341 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
342 REAL_VALUE_TO_TARGET_DOUBLE (rv, val);
343 fprintf (file, "0x%lx", val[1]);
344 break;;
345 case SFmode:
346 gcc_unreachable ();
347 case VOIDmode:
348 case DImode:
349 mn10300_print_operand_address (file,
350 GEN_INT (CONST_DOUBLE_HIGH (x)));
351 break;
352 default:
353 break;
354 }
355 break;
356 }
357
358 case CONST_INT:
359 {
360 rtx low, high;
361 split_double (x, &low, &high);
362 fprintf (file, "%ld", (long)INTVAL (high));
363 break;
364 }
365
366 default:
367 gcc_unreachable ();
368 }
369 break;
370
371 case 'A':
372 fputc ('(', file);
373 if (REG_P (XEXP (x, 0)))
374 output_address (gen_rtx_PLUS (SImode, XEXP (x, 0), const0_rtx));
375 else
376 output_address (XEXP (x, 0));
377 fputc (')', file);
378 break;
379
380 case 'N':
381 gcc_assert (INTVAL (x) >= -128 && INTVAL (x) <= 255);
382 fprintf (file, "%d", (int)((~INTVAL (x)) & 0xff));
383 break;
384
385 case 'U':
386 gcc_assert (INTVAL (x) >= -128 && INTVAL (x) <= 255);
387 fprintf (file, "%d", (int)(INTVAL (x) & 0xff));
388 break;
389
390 /* For shift counts. The hardware ignores the upper bits of
391 any immediate, but the assembler will flag an out of range
392 shift count as an error. So we mask off the high bits
393 of the immediate here. */
394 case 'S':
395 if (CONST_INT_P (x))
396 {
397 fprintf (file, "%d", (int)(INTVAL (x) & 0x1f));
398 break;
399 }
400 /* FALL THROUGH */
401
402 default:
403 switch (GET_CODE (x))
404 {
405 case MEM:
406 fputc ('(', file);
407 output_address (XEXP (x, 0));
408 fputc (')', file);
409 break;
410
411 case PLUS:
412 output_address (x);
413 break;
414
415 case REG:
416 fprintf (file, "%s", reg_names[REGNO (x)]);
417 break;
418
419 case SUBREG:
420 fprintf (file, "%s", reg_names[subreg_regno (x)]);
421 break;
422
423 /* This will only be single precision.... */
424 case CONST_DOUBLE:
425 {
426 unsigned long val;
427 REAL_VALUE_TYPE rv;
428
429 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
430 REAL_VALUE_TO_TARGET_SINGLE (rv, val);
431 fprintf (file, "0x%lx", val);
432 break;
433 }
434
435 case CONST_INT:
436 case SYMBOL_REF:
437 case CONST:
438 case LABEL_REF:
439 case CODE_LABEL:
440 case UNSPEC:
441 mn10300_print_operand_address (file, x);
442 break;
443 default:
444 gcc_unreachable ();
445 }
446 break;
447 }
448 }
449
450 /* Output assembly language output for the address ADDR to FILE. */
451
452 void
453 mn10300_print_operand_address (FILE *file, rtx addr)
454 {
455 switch (GET_CODE (addr))
456 {
457 case POST_INC:
458 mn10300_print_operand (file, XEXP (addr, 0), 0);
459 fputc ('+', file);
460 break;
461
462 case POST_MODIFY:
463 mn10300_print_operand (file, XEXP (addr, 0), 0);
464 fputc ('+', file);
465 fputc (',', file);
466 mn10300_print_operand (file, XEXP (addr, 1), 0);
467 break;
468
469 case REG:
470 mn10300_print_operand (file, addr, 0);
471 break;
472 case PLUS:
473 {
474 rtx base = XEXP (addr, 0);
475 rtx index = XEXP (addr, 1);
476
477 if (REG_P (index) && !REG_OK_FOR_INDEX_P (index))
478 {
479 rtx x = base;
480 base = index;
481 index = x;
482
483 gcc_assert (REG_P (index) && REG_OK_FOR_INDEX_P (index));
484 }
485 gcc_assert (REG_OK_FOR_BASE_P (base));
486
487 mn10300_print_operand (file, index, 0);
488 fputc (',', file);
489 mn10300_print_operand (file, base, 0);
490 break;
491 }
492 case SYMBOL_REF:
493 output_addr_const (file, addr);
494 break;
495 default:
496 output_addr_const (file, addr);
497 break;
498 }
499 }
500
501 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA.
502
503 Used for PIC-specific UNSPECs. */
504
505 static bool
506 mn10300_asm_output_addr_const_extra (FILE *file, rtx x)
507 {
508 if (GET_CODE (x) == UNSPEC)
509 {
510 switch (XINT (x, 1))
511 {
512 case UNSPEC_PIC:
513 /* GLOBAL_OFFSET_TABLE or local symbols, no suffix. */
514 output_addr_const (file, XVECEXP (x, 0, 0));
515 break;
516 case UNSPEC_GOT:
517 output_addr_const (file, XVECEXP (x, 0, 0));
518 fputs ("@GOT", file);
519 break;
520 case UNSPEC_GOTOFF:
521 output_addr_const (file, XVECEXP (x, 0, 0));
522 fputs ("@GOTOFF", file);
523 break;
524 case UNSPEC_PLT:
525 output_addr_const (file, XVECEXP (x, 0, 0));
526 fputs ("@PLT", file);
527 break;
528 case UNSPEC_GOTSYM_OFF:
529 assemble_name (file, GOT_SYMBOL_NAME);
530 fputs ("-(", file);
531 output_addr_const (file, XVECEXP (x, 0, 0));
532 fputs ("-.)", file);
533 break;
534 default:
535 return false;
536 }
537 return true;
538 }
539 else
540 return false;
541 }
542
543 /* Count the number of FP registers that have to be saved. */
544 static int
545 fp_regs_to_save (void)
546 {
547 int i, n = 0;
548
549 if (! TARGET_AM33_2)
550 return 0;
551
552 for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
553 if (df_regs_ever_live_p (i) && ! call_really_used_regs[i])
554 ++n;
555
556 return n;
557 }
558
559 /* Print a set of registers in the format required by "movm" and "ret".
560 Register K is saved if bit K of MASK is set. The data and address
561 registers can be stored individually, but the extended registers cannot.
562 We assume that the mask already takes that into account. For instance,
563 bits 14 to 17 must have the same value. */
564
565 void
566 mn10300_print_reg_list (FILE *file, int mask)
567 {
568 int need_comma;
569 int i;
570
571 need_comma = 0;
572 fputc ('[', file);
573
574 for (i = 0; i < FIRST_EXTENDED_REGNUM; i++)
575 if ((mask & (1 << i)) != 0)
576 {
577 if (need_comma)
578 fputc (',', file);
579 fputs (reg_names [i], file);
580 need_comma = 1;
581 }
582
583 if ((mask & 0x3c000) != 0)
584 {
585 gcc_assert ((mask & 0x3c000) == 0x3c000);
586 if (need_comma)
587 fputc (',', file);
588 fputs ("exreg1", file);
589 need_comma = 1;
590 }
591
592 fputc (']', file);
593 }
594
595 /* If the MDR register is never clobbered, we can use the RETF instruction
596 which takes the address from the MDR register. This is 3 cycles faster
597 than having to load the address from the stack. */
598
599 bool
600 mn10300_can_use_retf_insn (void)
601 {
602 /* Don't bother if we're not optimizing. In this case we won't
603 have proper access to df_regs_ever_live_p. */
604 if (!optimize)
605 return false;
606
607 /* EH returns alter the saved return address; MDR is not current. */
608 if (crtl->calls_eh_return)
609 return false;
610
611 /* Obviously not if MDR is ever clobbered. */
612 if (df_regs_ever_live_p (MDR_REG))
613 return false;
614
615 /* ??? Careful not to use this during expand_epilogue etc. */
616 gcc_assert (!in_sequence_p ());
617 return leaf_function_p ();
618 }
619
620 bool
621 mn10300_can_use_rets_insn (void)
622 {
623 return !mn10300_initial_offset (ARG_POINTER_REGNUM, STACK_POINTER_REGNUM);
624 }
625
626 /* Returns the set of live, callee-saved registers as a bitmask. The
627 callee-saved extended registers cannot be stored individually, so
628 all of them will be included in the mask if any one of them is used.
629 Also returns the number of bytes in the registers in the mask if
630 BYTES_SAVED is not NULL. */
631
632 unsigned int
633 mn10300_get_live_callee_saved_regs (unsigned int * bytes_saved)
634 {
635 int mask;
636 int i;
637 unsigned int count;
638
639 count = mask = 0;
640 for (i = 0; i <= LAST_EXTENDED_REGNUM; i++)
641 if (df_regs_ever_live_p (i) && ! call_really_used_regs[i])
642 {
643 mask |= (1 << i);
644 ++ count;
645 }
646
647 if ((mask & 0x3c000) != 0)
648 {
649 for (i = 0x04000; i < 0x40000; i <<= 1)
650 if ((mask & i) == 0)
651 ++ count;
652
653 mask |= 0x3c000;
654 }
655
656 if (bytes_saved)
657 * bytes_saved = count * UNITS_PER_WORD;
658
659 return mask;
660 }
661
662 static rtx
663 F (rtx r)
664 {
665 RTX_FRAME_RELATED_P (r) = 1;
666 return r;
667 }
668
669 /* Generate an instruction that pushes several registers onto the stack.
670 Register K will be saved if bit K in MASK is set. The function does
671 nothing if MASK is zero.
672
673 To be compatible with the "movm" instruction, the lowest-numbered
674 register must be stored in the lowest slot. If MASK is the set
675 { R1,...,RN }, where R1...RN are ordered least first, the generated
676 instruction will have the form:
677
678 (parallel
679 (set (reg:SI 9) (plus:SI (reg:SI 9) (const_int -N*4)))
680 (set (mem:SI (plus:SI (reg:SI 9)
681 (const_int -1*4)))
682 (reg:SI RN))
683 ...
684 (set (mem:SI (plus:SI (reg:SI 9)
685 (const_int -N*4)))
686 (reg:SI R1))) */
687
688 static void
689 mn10300_gen_multiple_store (unsigned int mask)
690 {
691 /* The order in which registers are stored, from SP-4 through SP-N*4. */
692 static const unsigned int store_order[8] = {
693 /* e2, e3: never saved */
694 FIRST_EXTENDED_REGNUM + 4,
695 FIRST_EXTENDED_REGNUM + 5,
696 FIRST_EXTENDED_REGNUM + 6,
697 FIRST_EXTENDED_REGNUM + 7,
698 /* e0, e1, mdrq, mcrh, mcrl, mcvf: never saved. */
699 FIRST_DATA_REGNUM + 2,
700 FIRST_DATA_REGNUM + 3,
701 FIRST_ADDRESS_REGNUM + 2,
702 FIRST_ADDRESS_REGNUM + 3,
703 /* d0, d1, a0, a1, mdr, lir, lar: never saved. */
704 };
705
706 rtx x, elts[9];
707 unsigned int i;
708 int count;
709
710 if (mask == 0)
711 return;
712
713 for (i = count = 0; i < ARRAY_SIZE(store_order); ++i)
714 {
715 unsigned regno = store_order[i];
716
717 if (((mask >> regno) & 1) == 0)
718 continue;
719
720 ++count;
721 x = plus_constant (Pmode, stack_pointer_rtx, count * -4);
722 x = gen_frame_mem (SImode, x);
723 x = gen_rtx_SET (VOIDmode, x, gen_rtx_REG (SImode, regno));
724 elts[count] = F(x);
725
726 /* Remove the register from the mask so that... */
727 mask &= ~(1u << regno);
728 }
729
730 /* ... we can make sure that we didn't try to use a register
731 not listed in the store order. */
732 gcc_assert (mask == 0);
733
734 /* Create the instruction that updates the stack pointer. */
735 x = plus_constant (Pmode, stack_pointer_rtx, count * -4);
736 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
737 elts[0] = F(x);
738
739 /* We need one PARALLEL element to update the stack pointer and
740 an additional element for each register that is stored. */
741 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (count + 1, elts));
742 F (emit_insn (x));
743 }
744
745 static inline unsigned int
746 popcount (unsigned int mask)
747 {
748 unsigned int count = 0;
749
750 while (mask)
751 {
752 ++ count;
753 mask &= ~ (mask & - mask);
754 }
755 return count;
756 }
757
758 void
759 mn10300_expand_prologue (void)
760 {
761 HOST_WIDE_INT size = mn10300_frame_size ();
762 unsigned int mask;
763
764 mask = mn10300_get_live_callee_saved_regs (NULL);
765 /* If we use any of the callee-saved registers, save them now. */
766 mn10300_gen_multiple_store (mask);
767
768 if (flag_stack_usage_info)
769 current_function_static_stack_size = size + popcount (mask) * 4;
770
771 if (TARGET_AM33_2 && fp_regs_to_save ())
772 {
773 int num_regs_to_save = fp_regs_to_save (), i;
774 HOST_WIDE_INT xsize;
775 enum
776 {
777 save_sp_merge,
778 save_sp_no_merge,
779 save_sp_partial_merge,
780 save_a0_merge,
781 save_a0_no_merge
782 } strategy;
783 unsigned int strategy_size = (unsigned)-1, this_strategy_size;
784 rtx reg;
785
786 if (flag_stack_usage_info)
787 current_function_static_stack_size += num_regs_to_save * 4;
788
789 /* We have several different strategies to save FP registers.
790 We can store them using SP offsets, which is beneficial if
791 there are just a few registers to save, or we can use `a0' in
792 post-increment mode (`a0' is the only call-clobbered address
793 register that is never used to pass information to a
794 function). Furthermore, if we don't need a frame pointer, we
795 can merge the two SP adds into a single one, but this isn't
796 always beneficial; sometimes we can just split the two adds
797 so that we don't exceed a 16-bit constant size. The code
798 below will select which strategy to use, so as to generate
799 smallest code. Ties are broken in favor or shorter sequences
800 (in terms of number of instructions). */
801
802 #define SIZE_ADD_AX(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
803 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 2)
804 #define SIZE_ADD_SP(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
805 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 3)
806
807 /* We add 0 * (S) in two places to promote to the type of S,
808 so that all arms of the conditional have the same type. */
809 #define SIZE_FMOV_LIMIT(S,N,L,SIZE1,SIZE2,ELSE) \
810 (((S) >= (L)) ? 0 * (S) + (SIZE1) * (N) \
811 : ((S) + 4 * (N) >= (L)) ? (((L) - (S)) / 4 * (SIZE2) \
812 + ((S) + 4 * (N) - (L)) / 4 * (SIZE1)) \
813 : 0 * (S) + (ELSE))
814 #define SIZE_FMOV_SP_(S,N) \
815 (SIZE_FMOV_LIMIT ((S), (N), (1 << 24), 7, 6, \
816 SIZE_FMOV_LIMIT ((S), (N), (1 << 8), 6, 4, \
817 (S) ? 4 * (N) : 3 + 4 * ((N) - 1))))
818 #define SIZE_FMOV_SP(S,N) (SIZE_FMOV_SP_ ((unsigned HOST_WIDE_INT)(S), (N)))
819
820 /* Consider alternative save_sp_merge only if we don't need the
821 frame pointer and size is nonzero. */
822 if (! frame_pointer_needed && size)
823 {
824 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
825 this_strategy_size = SIZE_ADD_SP (-(size + 4 * num_regs_to_save));
826 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
827 this_strategy_size += SIZE_FMOV_SP (size, num_regs_to_save);
828
829 if (this_strategy_size < strategy_size)
830 {
831 strategy = save_sp_merge;
832 strategy_size = this_strategy_size;
833 }
834 }
835
836 /* Consider alternative save_sp_no_merge unconditionally. */
837 /* Insn: add -4 * num_regs_to_save, sp. */
838 this_strategy_size = SIZE_ADD_SP (-4 * num_regs_to_save);
839 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
840 this_strategy_size += SIZE_FMOV_SP (0, num_regs_to_save);
841 if (size)
842 {
843 /* Insn: add -size, sp. */
844 this_strategy_size += SIZE_ADD_SP (-size);
845 }
846
847 if (this_strategy_size < strategy_size)
848 {
849 strategy = save_sp_no_merge;
850 strategy_size = this_strategy_size;
851 }
852
853 /* Consider alternative save_sp_partial_merge only if we don't
854 need a frame pointer and size is reasonably large. */
855 if (! frame_pointer_needed && size + 4 * num_regs_to_save > 128)
856 {
857 /* Insn: add -128, sp. */
858 this_strategy_size = SIZE_ADD_SP (-128);
859 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
860 this_strategy_size += SIZE_FMOV_SP (128 - 4 * num_regs_to_save,
861 num_regs_to_save);
862 if (size)
863 {
864 /* Insn: add 128-size, sp. */
865 this_strategy_size += SIZE_ADD_SP (128 - size);
866 }
867
868 if (this_strategy_size < strategy_size)
869 {
870 strategy = save_sp_partial_merge;
871 strategy_size = this_strategy_size;
872 }
873 }
874
875 /* Consider alternative save_a0_merge only if we don't need a
876 frame pointer, size is nonzero and the user hasn't
877 changed the calling conventions of a0. */
878 if (! frame_pointer_needed && size
879 && call_really_used_regs [FIRST_ADDRESS_REGNUM]
880 && ! fixed_regs[FIRST_ADDRESS_REGNUM])
881 {
882 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
883 this_strategy_size = SIZE_ADD_SP (-(size + 4 * num_regs_to_save));
884 /* Insn: mov sp, a0. */
885 this_strategy_size++;
886 if (size)
887 {
888 /* Insn: add size, a0. */
889 this_strategy_size += SIZE_ADD_AX (size);
890 }
891 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
892 this_strategy_size += 3 * num_regs_to_save;
893
894 if (this_strategy_size < strategy_size)
895 {
896 strategy = save_a0_merge;
897 strategy_size = this_strategy_size;
898 }
899 }
900
901 /* Consider alternative save_a0_no_merge if the user hasn't
902 changed the calling conventions of a0. */
903 if (call_really_used_regs [FIRST_ADDRESS_REGNUM]
904 && ! fixed_regs[FIRST_ADDRESS_REGNUM])
905 {
906 /* Insn: add -4 * num_regs_to_save, sp. */
907 this_strategy_size = SIZE_ADD_SP (-4 * num_regs_to_save);
908 /* Insn: mov sp, a0. */
909 this_strategy_size++;
910 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
911 this_strategy_size += 3 * num_regs_to_save;
912 if (size)
913 {
914 /* Insn: add -size, sp. */
915 this_strategy_size += SIZE_ADD_SP (-size);
916 }
917
918 if (this_strategy_size < strategy_size)
919 {
920 strategy = save_a0_no_merge;
921 strategy_size = this_strategy_size;
922 }
923 }
924
925 /* Emit the initial SP add, common to all strategies. */
926 switch (strategy)
927 {
928 case save_sp_no_merge:
929 case save_a0_no_merge:
930 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
931 stack_pointer_rtx,
932 GEN_INT (-4 * num_regs_to_save))));
933 xsize = 0;
934 break;
935
936 case save_sp_partial_merge:
937 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
938 stack_pointer_rtx,
939 GEN_INT (-128))));
940 xsize = 128 - 4 * num_regs_to_save;
941 size -= xsize;
942 break;
943
944 case save_sp_merge:
945 case save_a0_merge:
946 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
947 stack_pointer_rtx,
948 GEN_INT (-(size + 4 * num_regs_to_save)))));
949 /* We'll have to adjust FP register saves according to the
950 frame size. */
951 xsize = size;
952 /* Since we've already created the stack frame, don't do it
953 again at the end of the function. */
954 size = 0;
955 break;
956
957 default:
958 gcc_unreachable ();
959 }
960
961 /* Now prepare register a0, if we have decided to use it. */
962 switch (strategy)
963 {
964 case save_sp_merge:
965 case save_sp_no_merge:
966 case save_sp_partial_merge:
967 reg = 0;
968 break;
969
970 case save_a0_merge:
971 case save_a0_no_merge:
972 reg = gen_rtx_REG (SImode, FIRST_ADDRESS_REGNUM);
973 F (emit_insn (gen_movsi (reg, stack_pointer_rtx)));
974 if (xsize)
975 F (emit_insn (gen_addsi3 (reg, reg, GEN_INT (xsize))));
976 reg = gen_rtx_POST_INC (SImode, reg);
977 break;
978
979 default:
980 gcc_unreachable ();
981 }
982
983 /* Now actually save the FP registers. */
984 for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
985 if (df_regs_ever_live_p (i) && ! call_really_used_regs [i])
986 {
987 rtx addr;
988
989 if (reg)
990 addr = reg;
991 else
992 {
993 /* If we aren't using `a0', use an SP offset. */
994 if (xsize)
995 {
996 addr = gen_rtx_PLUS (SImode,
997 stack_pointer_rtx,
998 GEN_INT (xsize));
999 }
1000 else
1001 addr = stack_pointer_rtx;
1002
1003 xsize += 4;
1004 }
1005
1006 F (emit_insn (gen_movsf (gen_rtx_MEM (SFmode, addr),
1007 gen_rtx_REG (SFmode, i))));
1008 }
1009 }
1010
1011 /* Now put the frame pointer into the frame pointer register. */
1012 if (frame_pointer_needed)
1013 F (emit_move_insn (frame_pointer_rtx, stack_pointer_rtx));
1014
1015 /* Allocate stack for this frame. */
1016 if (size)
1017 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
1018 stack_pointer_rtx,
1019 GEN_INT (-size))));
1020
1021 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
1022 emit_insn (gen_load_pic ());
1023 }
1024
1025 void
1026 mn10300_expand_epilogue (void)
1027 {
1028 HOST_WIDE_INT size = mn10300_frame_size ();
1029 unsigned int reg_save_bytes;
1030
1031 mn10300_get_live_callee_saved_regs (& reg_save_bytes);
1032
1033 if (TARGET_AM33_2 && fp_regs_to_save ())
1034 {
1035 int num_regs_to_save = fp_regs_to_save (), i;
1036 rtx reg = 0;
1037
1038 /* We have several options to restore FP registers. We could
1039 load them from SP offsets, but, if there are enough FP
1040 registers to restore, we win if we use a post-increment
1041 addressing mode. */
1042
1043 /* If we have a frame pointer, it's the best option, because we
1044 already know it has the value we want. */
1045 if (frame_pointer_needed)
1046 reg = gen_rtx_REG (SImode, FRAME_POINTER_REGNUM);
1047 /* Otherwise, we may use `a1', since it's call-clobbered and
1048 it's never used for return values. But only do so if it's
1049 smaller than using SP offsets. */
1050 else
1051 {
1052 enum { restore_sp_post_adjust,
1053 restore_sp_pre_adjust,
1054 restore_sp_partial_adjust,
1055 restore_a1 } strategy;
1056 unsigned int this_strategy_size, strategy_size = (unsigned)-1;
1057
1058 /* Consider using sp offsets before adjusting sp. */
1059 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1060 this_strategy_size = SIZE_FMOV_SP (size, num_regs_to_save);
1061 /* If size is too large, we'll have to adjust SP with an
1062 add. */
1063 if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
1064 {
1065 /* Insn: add size + 4 * num_regs_to_save, sp. */
1066 this_strategy_size += SIZE_ADD_SP (size + 4 * num_regs_to_save);
1067 }
1068 /* If we don't have to restore any non-FP registers,
1069 we'll be able to save one byte by using rets. */
1070 if (! reg_save_bytes)
1071 this_strategy_size--;
1072
1073 if (this_strategy_size < strategy_size)
1074 {
1075 strategy = restore_sp_post_adjust;
1076 strategy_size = this_strategy_size;
1077 }
1078
1079 /* Consider using sp offsets after adjusting sp. */
1080 /* Insn: add size, sp. */
1081 this_strategy_size = SIZE_ADD_SP (size);
1082 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1083 this_strategy_size += SIZE_FMOV_SP (0, num_regs_to_save);
1084 /* We're going to use ret to release the FP registers
1085 save area, so, no savings. */
1086
1087 if (this_strategy_size < strategy_size)
1088 {
1089 strategy = restore_sp_pre_adjust;
1090 strategy_size = this_strategy_size;
1091 }
1092
1093 /* Consider using sp offsets after partially adjusting sp.
1094 When size is close to 32Kb, we may be able to adjust SP
1095 with an imm16 add instruction while still using fmov
1096 (d8,sp). */
1097 if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
1098 {
1099 /* Insn: add size + 4 * num_regs_to_save
1100 + reg_save_bytes - 252,sp. */
1101 this_strategy_size = SIZE_ADD_SP (size + 4 * num_regs_to_save
1102 + (int) reg_save_bytes - 252);
1103 /* Insn: fmov (##,sp),fs#, fo each fs# to be restored. */
1104 this_strategy_size += SIZE_FMOV_SP (252 - reg_save_bytes
1105 - 4 * num_regs_to_save,
1106 num_regs_to_save);
1107 /* We're going to use ret to release the FP registers
1108 save area, so, no savings. */
1109
1110 if (this_strategy_size < strategy_size)
1111 {
1112 strategy = restore_sp_partial_adjust;
1113 strategy_size = this_strategy_size;
1114 }
1115 }
1116
1117 /* Consider using a1 in post-increment mode, as long as the
1118 user hasn't changed the calling conventions of a1. */
1119 if (call_really_used_regs [FIRST_ADDRESS_REGNUM + 1]
1120 && ! fixed_regs[FIRST_ADDRESS_REGNUM+1])
1121 {
1122 /* Insn: mov sp,a1. */
1123 this_strategy_size = 1;
1124 if (size)
1125 {
1126 /* Insn: add size,a1. */
1127 this_strategy_size += SIZE_ADD_AX (size);
1128 }
1129 /* Insn: fmov (a1+),fs#, for each fs# to be restored. */
1130 this_strategy_size += 3 * num_regs_to_save;
1131 /* If size is large enough, we may be able to save a
1132 couple of bytes. */
1133 if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
1134 {
1135 /* Insn: mov a1,sp. */
1136 this_strategy_size += 2;
1137 }
1138 /* If we don't have to restore any non-FP registers,
1139 we'll be able to save one byte by using rets. */
1140 if (! reg_save_bytes)
1141 this_strategy_size--;
1142
1143 if (this_strategy_size < strategy_size)
1144 {
1145 strategy = restore_a1;
1146 strategy_size = this_strategy_size;
1147 }
1148 }
1149
1150 switch (strategy)
1151 {
1152 case restore_sp_post_adjust:
1153 break;
1154
1155 case restore_sp_pre_adjust:
1156 emit_insn (gen_addsi3 (stack_pointer_rtx,
1157 stack_pointer_rtx,
1158 GEN_INT (size)));
1159 size = 0;
1160 break;
1161
1162 case restore_sp_partial_adjust:
1163 emit_insn (gen_addsi3 (stack_pointer_rtx,
1164 stack_pointer_rtx,
1165 GEN_INT (size + 4 * num_regs_to_save
1166 + reg_save_bytes - 252)));
1167 size = 252 - reg_save_bytes - 4 * num_regs_to_save;
1168 break;
1169
1170 case restore_a1:
1171 reg = gen_rtx_REG (SImode, FIRST_ADDRESS_REGNUM + 1);
1172 emit_insn (gen_movsi (reg, stack_pointer_rtx));
1173 if (size)
1174 emit_insn (gen_addsi3 (reg, reg, GEN_INT (size)));
1175 break;
1176
1177 default:
1178 gcc_unreachable ();
1179 }
1180 }
1181
1182 /* Adjust the selected register, if any, for post-increment. */
1183 if (reg)
1184 reg = gen_rtx_POST_INC (SImode, reg);
1185
1186 for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
1187 if (df_regs_ever_live_p (i) && ! call_really_used_regs [i])
1188 {
1189 rtx addr;
1190
1191 if (reg)
1192 addr = reg;
1193 else if (size)
1194 {
1195 /* If we aren't using a post-increment register, use an
1196 SP offset. */
1197 addr = gen_rtx_PLUS (SImode,
1198 stack_pointer_rtx,
1199 GEN_INT (size));
1200 }
1201 else
1202 addr = stack_pointer_rtx;
1203
1204 size += 4;
1205
1206 emit_insn (gen_movsf (gen_rtx_REG (SFmode, i),
1207 gen_rtx_MEM (SFmode, addr)));
1208 }
1209
1210 /* If we were using the restore_a1 strategy and the number of
1211 bytes to be released won't fit in the `ret' byte, copy `a1'
1212 to `sp', to avoid having to use `add' to adjust it. */
1213 if (! frame_pointer_needed && reg && size + reg_save_bytes > 255)
1214 {
1215 emit_move_insn (stack_pointer_rtx, XEXP (reg, 0));
1216 size = 0;
1217 }
1218 }
1219
1220 /* Maybe cut back the stack, except for the register save area.
1221
1222 If the frame pointer exists, then use the frame pointer to
1223 cut back the stack.
1224
1225 If the stack size + register save area is more than 255 bytes,
1226 then the stack must be cut back here since the size + register
1227 save size is too big for a ret/retf instruction.
1228
1229 Else leave it alone, it will be cut back as part of the
1230 ret/retf instruction, or there wasn't any stack to begin with.
1231
1232 Under no circumstances should the register save area be
1233 deallocated here, that would leave a window where an interrupt
1234 could occur and trash the register save area. */
1235 if (frame_pointer_needed)
1236 {
1237 emit_move_insn (stack_pointer_rtx, frame_pointer_rtx);
1238 size = 0;
1239 }
1240 else if (size + reg_save_bytes > 255)
1241 {
1242 emit_insn (gen_addsi3 (stack_pointer_rtx,
1243 stack_pointer_rtx,
1244 GEN_INT (size)));
1245 size = 0;
1246 }
1247
1248 /* Adjust the stack and restore callee-saved registers, if any. */
1249 if (mn10300_can_use_rets_insn ())
1250 emit_jump_insn (ret_rtx);
1251 else
1252 emit_jump_insn (gen_return_ret (GEN_INT (size + reg_save_bytes)));
1253 }
1254
1255 /* Recognize the PARALLEL rtx generated by mn10300_gen_multiple_store().
1256 This function is for MATCH_PARALLEL and so assumes OP is known to be
1257 parallel. If OP is a multiple store, return a mask indicating which
1258 registers it saves. Return 0 otherwise. */
1259
1260 unsigned int
1261 mn10300_store_multiple_regs (rtx op)
1262 {
1263 int count;
1264 int mask;
1265 int i;
1266 unsigned int last;
1267 rtx elt;
1268
1269 count = XVECLEN (op, 0);
1270 if (count < 2)
1271 return 0;
1272
1273 /* Check that first instruction has the form (set (sp) (plus A B)) */
1274 elt = XVECEXP (op, 0, 0);
1275 if (GET_CODE (elt) != SET
1276 || (! REG_P (SET_DEST (elt)))
1277 || REGNO (SET_DEST (elt)) != STACK_POINTER_REGNUM
1278 || GET_CODE (SET_SRC (elt)) != PLUS)
1279 return 0;
1280
1281 /* Check that A is the stack pointer and B is the expected stack size.
1282 For OP to match, each subsequent instruction should push a word onto
1283 the stack. We therefore expect the first instruction to create
1284 COUNT-1 stack slots. */
1285 elt = SET_SRC (elt);
1286 if ((! REG_P (XEXP (elt, 0)))
1287 || REGNO (XEXP (elt, 0)) != STACK_POINTER_REGNUM
1288 || (! CONST_INT_P (XEXP (elt, 1)))
1289 || INTVAL (XEXP (elt, 1)) != -(count - 1) * 4)
1290 return 0;
1291
1292 mask = 0;
1293 for (i = 1; i < count; i++)
1294 {
1295 /* Check that element i is a (set (mem M) R). */
1296 /* ??? Validate the register order a-la mn10300_gen_multiple_store.
1297 Remember: the ordering is *not* monotonic. */
1298 elt = XVECEXP (op, 0, i);
1299 if (GET_CODE (elt) != SET
1300 || (! MEM_P (SET_DEST (elt)))
1301 || (! REG_P (SET_SRC (elt))))
1302 return 0;
1303
1304 /* Remember which registers are to be saved. */
1305 last = REGNO (SET_SRC (elt));
1306 mask |= (1 << last);
1307
1308 /* Check that M has the form (plus (sp) (const_int -I*4)) */
1309 elt = XEXP (SET_DEST (elt), 0);
1310 if (GET_CODE (elt) != PLUS
1311 || (! REG_P (XEXP (elt, 0)))
1312 || REGNO (XEXP (elt, 0)) != STACK_POINTER_REGNUM
1313 || (! CONST_INT_P (XEXP (elt, 1)))
1314 || INTVAL (XEXP (elt, 1)) != -i * 4)
1315 return 0;
1316 }
1317
1318 /* All or none of the callee-saved extended registers must be in the set. */
1319 if ((mask & 0x3c000) != 0
1320 && (mask & 0x3c000) != 0x3c000)
1321 return 0;
1322
1323 return mask;
1324 }
1325
1326 /* Implement TARGET_PREFERRED_RELOAD_CLASS. */
1327
1328 static reg_class_t
1329 mn10300_preferred_reload_class (rtx x, reg_class_t rclass)
1330 {
1331 if (x == stack_pointer_rtx && rclass != SP_REGS)
1332 return (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
1333 else if (MEM_P (x)
1334 || (REG_P (x)
1335 && !HARD_REGISTER_P (x))
1336 || (GET_CODE (x) == SUBREG
1337 && REG_P (SUBREG_REG (x))
1338 && !HARD_REGISTER_P (SUBREG_REG (x))))
1339 return LIMIT_RELOAD_CLASS (GET_MODE (x), rclass);
1340 else
1341 return rclass;
1342 }
1343
1344 /* Implement TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
1345
1346 static reg_class_t
1347 mn10300_preferred_output_reload_class (rtx x, reg_class_t rclass)
1348 {
1349 if (x == stack_pointer_rtx && rclass != SP_REGS)
1350 return (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
1351 return rclass;
1352 }
1353
1354 /* Implement TARGET_SECONDARY_RELOAD. */
1355
1356 static reg_class_t
1357 mn10300_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
1358 enum machine_mode mode, secondary_reload_info *sri)
1359 {
1360 enum reg_class rclass = (enum reg_class) rclass_i;
1361 enum reg_class xclass = NO_REGS;
1362 unsigned int xregno = INVALID_REGNUM;
1363
1364 if (REG_P (x))
1365 {
1366 xregno = REGNO (x);
1367 if (xregno >= FIRST_PSEUDO_REGISTER)
1368 xregno = true_regnum (x);
1369 if (xregno != INVALID_REGNUM)
1370 xclass = REGNO_REG_CLASS (xregno);
1371 }
1372
1373 if (!TARGET_AM33)
1374 {
1375 /* Memory load/stores less than a full word wide can't have an
1376 address or stack pointer destination. They must use a data
1377 register as an intermediate register. */
1378 if (rclass != DATA_REGS
1379 && (mode == QImode || mode == HImode)
1380 && xclass == NO_REGS)
1381 return DATA_REGS;
1382
1383 /* We can only move SP to/from an address register. */
1384 if (in_p
1385 && rclass == SP_REGS
1386 && xclass != ADDRESS_REGS)
1387 return ADDRESS_REGS;
1388 if (!in_p
1389 && xclass == SP_REGS
1390 && rclass != ADDRESS_REGS
1391 && rclass != SP_OR_ADDRESS_REGS)
1392 return ADDRESS_REGS;
1393 }
1394
1395 /* We can't directly load sp + const_int into a register;
1396 we must use an address register as an scratch. */
1397 if (in_p
1398 && rclass != SP_REGS
1399 && rclass != SP_OR_ADDRESS_REGS
1400 && rclass != SP_OR_GENERAL_REGS
1401 && GET_CODE (x) == PLUS
1402 && (XEXP (x, 0) == stack_pointer_rtx
1403 || XEXP (x, 1) == stack_pointer_rtx))
1404 {
1405 sri->icode = CODE_FOR_reload_plus_sp_const;
1406 return NO_REGS;
1407 }
1408
1409 /* We can only move MDR to/from a data register. */
1410 if (rclass == MDR_REGS && xclass != DATA_REGS)
1411 return DATA_REGS;
1412 if (xclass == MDR_REGS && rclass != DATA_REGS)
1413 return DATA_REGS;
1414
1415 /* We can't load/store an FP register from a constant address. */
1416 if (TARGET_AM33_2
1417 && (rclass == FP_REGS || xclass == FP_REGS)
1418 && (xclass == NO_REGS || rclass == NO_REGS))
1419 {
1420 rtx addr = NULL;
1421
1422 if (xregno >= FIRST_PSEUDO_REGISTER && xregno != INVALID_REGNUM)
1423 {
1424 addr = reg_equiv_mem (xregno);
1425 if (addr)
1426 addr = XEXP (addr, 0);
1427 }
1428 else if (MEM_P (x))
1429 addr = XEXP (x, 0);
1430
1431 if (addr && CONSTANT_ADDRESS_P (addr))
1432 return GENERAL_REGS;
1433 }
1434 /* Otherwise assume no secondary reloads are needed. */
1435 return NO_REGS;
1436 }
1437
1438 int
1439 mn10300_frame_size (void)
1440 {
1441 /* size includes the fixed stack space needed for function calls. */
1442 int size = get_frame_size () + crtl->outgoing_args_size;
1443
1444 /* And space for the return pointer. */
1445 size += crtl->outgoing_args_size ? 4 : 0;
1446
1447 return size;
1448 }
1449
1450 int
1451 mn10300_initial_offset (int from, int to)
1452 {
1453 int diff = 0;
1454
1455 gcc_assert (from == ARG_POINTER_REGNUM || from == FRAME_POINTER_REGNUM);
1456 gcc_assert (to == FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM);
1457
1458 if (to == STACK_POINTER_REGNUM)
1459 diff = mn10300_frame_size ();
1460
1461 /* The difference between the argument pointer and the frame pointer
1462 is the size of the callee register save area. */
1463 if (from == ARG_POINTER_REGNUM)
1464 {
1465 unsigned int reg_save_bytes;
1466
1467 mn10300_get_live_callee_saved_regs (& reg_save_bytes);
1468 diff += reg_save_bytes;
1469 diff += 4 * fp_regs_to_save ();
1470 }
1471
1472 return diff;
1473 }
1474
1475 /* Worker function for TARGET_RETURN_IN_MEMORY. */
1476
1477 static bool
1478 mn10300_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
1479 {
1480 /* Return values > 8 bytes in length in memory. */
1481 return (int_size_in_bytes (type) > 8
1482 || int_size_in_bytes (type) == 0
1483 || TYPE_MODE (type) == BLKmode);
1484 }
1485
1486 /* Flush the argument registers to the stack for a stdarg function;
1487 return the new argument pointer. */
1488 static rtx
1489 mn10300_builtin_saveregs (void)
1490 {
1491 rtx offset, mem;
1492 tree fntype = TREE_TYPE (current_function_decl);
1493 int argadj = ((!stdarg_p (fntype))
1494 ? UNITS_PER_WORD : 0);
1495 alias_set_type set = get_varargs_alias_set ();
1496
1497 if (argadj)
1498 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, argadj);
1499 else
1500 offset = crtl->args.arg_offset_rtx;
1501
1502 mem = gen_rtx_MEM (SImode, crtl->args.internal_arg_pointer);
1503 set_mem_alias_set (mem, set);
1504 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
1505
1506 mem = gen_rtx_MEM (SImode,
1507 plus_constant (Pmode,
1508 crtl->args.internal_arg_pointer, 4));
1509 set_mem_alias_set (mem, set);
1510 emit_move_insn (mem, gen_rtx_REG (SImode, 1));
1511
1512 return copy_to_reg (expand_binop (Pmode, add_optab,
1513 crtl->args.internal_arg_pointer,
1514 offset, 0, 0, OPTAB_LIB_WIDEN));
1515 }
1516
1517 static void
1518 mn10300_va_start (tree valist, rtx nextarg)
1519 {
1520 nextarg = expand_builtin_saveregs ();
1521 std_expand_builtin_va_start (valist, nextarg);
1522 }
1523
1524 /* Return true when a parameter should be passed by reference. */
1525
1526 static bool
1527 mn10300_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
1528 enum machine_mode mode, const_tree type,
1529 bool named ATTRIBUTE_UNUSED)
1530 {
1531 unsigned HOST_WIDE_INT size;
1532
1533 if (type)
1534 size = int_size_in_bytes (type);
1535 else
1536 size = GET_MODE_SIZE (mode);
1537
1538 return (size > 8 || size == 0);
1539 }
1540
1541 /* Return an RTX to represent where a value with mode MODE will be returned
1542 from a function. If the result is NULL_RTX, the argument is pushed. */
1543
1544 static rtx
1545 mn10300_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
1546 const_tree type, bool named ATTRIBUTE_UNUSED)
1547 {
1548 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1549 rtx result = NULL_RTX;
1550 int size;
1551
1552 /* We only support using 2 data registers as argument registers. */
1553 int nregs = 2;
1554
1555 /* Figure out the size of the object to be passed. */
1556 if (mode == BLKmode)
1557 size = int_size_in_bytes (type);
1558 else
1559 size = GET_MODE_SIZE (mode);
1560
1561 cum->nbytes = (cum->nbytes + 3) & ~3;
1562
1563 /* Don't pass this arg via a register if all the argument registers
1564 are used up. */
1565 if (cum->nbytes > nregs * UNITS_PER_WORD)
1566 return result;
1567
1568 /* Don't pass this arg via a register if it would be split between
1569 registers and memory. */
1570 if (type == NULL_TREE
1571 && cum->nbytes + size > nregs * UNITS_PER_WORD)
1572 return result;
1573
1574 switch (cum->nbytes / UNITS_PER_WORD)
1575 {
1576 case 0:
1577 result = gen_rtx_REG (mode, FIRST_ARGUMENT_REGNUM);
1578 break;
1579 case 1:
1580 result = gen_rtx_REG (mode, FIRST_ARGUMENT_REGNUM + 1);
1581 break;
1582 default:
1583 break;
1584 }
1585
1586 return result;
1587 }
1588
1589 /* Update the data in CUM to advance over an argument
1590 of mode MODE and data type TYPE.
1591 (TYPE is null for libcalls where that information may not be available.) */
1592
1593 static void
1594 mn10300_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
1595 const_tree type, bool named ATTRIBUTE_UNUSED)
1596 {
1597 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1598
1599 cum->nbytes += (mode != BLKmode
1600 ? (GET_MODE_SIZE (mode) + 3) & ~3
1601 : (int_size_in_bytes (type) + 3) & ~3);
1602 }
1603
1604 /* Return the number of bytes of registers to use for an argument passed
1605 partially in registers and partially in memory. */
1606
1607 static int
1608 mn10300_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode,
1609 tree type, bool named ATTRIBUTE_UNUSED)
1610 {
1611 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1612 int size;
1613
1614 /* We only support using 2 data registers as argument registers. */
1615 int nregs = 2;
1616
1617 /* Figure out the size of the object to be passed. */
1618 if (mode == BLKmode)
1619 size = int_size_in_bytes (type);
1620 else
1621 size = GET_MODE_SIZE (mode);
1622
1623 cum->nbytes = (cum->nbytes + 3) & ~3;
1624
1625 /* Don't pass this arg via a register if all the argument registers
1626 are used up. */
1627 if (cum->nbytes > nregs * UNITS_PER_WORD)
1628 return 0;
1629
1630 if (cum->nbytes + size <= nregs * UNITS_PER_WORD)
1631 return 0;
1632
1633 /* Don't pass this arg via a register if it would be split between
1634 registers and memory. */
1635 if (type == NULL_TREE
1636 && cum->nbytes + size > nregs * UNITS_PER_WORD)
1637 return 0;
1638
1639 return nregs * UNITS_PER_WORD - cum->nbytes;
1640 }
1641
1642 /* Return the location of the function's value. This will be either
1643 $d0 for integer functions, $a0 for pointers, or a PARALLEL of both
1644 $d0 and $a0 if the -mreturn-pointer-on-do flag is set. Note that
1645 we only return the PARALLEL for outgoing values; we do not want
1646 callers relying on this extra copy. */
1647
1648 static rtx
1649 mn10300_function_value (const_tree valtype,
1650 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1651 bool outgoing)
1652 {
1653 rtx rv;
1654 enum machine_mode mode = TYPE_MODE (valtype);
1655
1656 if (! POINTER_TYPE_P (valtype))
1657 return gen_rtx_REG (mode, FIRST_DATA_REGNUM);
1658 else if (! TARGET_PTR_A0D0 || ! outgoing
1659 || cfun->returns_struct)
1660 return gen_rtx_REG (mode, FIRST_ADDRESS_REGNUM);
1661
1662 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (2));
1663 XVECEXP (rv, 0, 0)
1664 = gen_rtx_EXPR_LIST (VOIDmode,
1665 gen_rtx_REG (mode, FIRST_ADDRESS_REGNUM),
1666 GEN_INT (0));
1667
1668 XVECEXP (rv, 0, 1)
1669 = gen_rtx_EXPR_LIST (VOIDmode,
1670 gen_rtx_REG (mode, FIRST_DATA_REGNUM),
1671 GEN_INT (0));
1672 return rv;
1673 }
1674
1675 /* Implements TARGET_LIBCALL_VALUE. */
1676
1677 static rtx
1678 mn10300_libcall_value (enum machine_mode mode,
1679 const_rtx fun ATTRIBUTE_UNUSED)
1680 {
1681 return gen_rtx_REG (mode, FIRST_DATA_REGNUM);
1682 }
1683
1684 /* Implements FUNCTION_VALUE_REGNO_P. */
1685
1686 bool
1687 mn10300_function_value_regno_p (const unsigned int regno)
1688 {
1689 return (regno == FIRST_DATA_REGNUM || regno == FIRST_ADDRESS_REGNUM);
1690 }
1691
1692 /* Output an addition operation. */
1693
1694 const char *
1695 mn10300_output_add (rtx operands[3], bool need_flags)
1696 {
1697 rtx dest, src1, src2;
1698 unsigned int dest_regnum, src1_regnum, src2_regnum;
1699 enum reg_class src1_class, src2_class, dest_class;
1700
1701 dest = operands[0];
1702 src1 = operands[1];
1703 src2 = operands[2];
1704
1705 dest_regnum = true_regnum (dest);
1706 src1_regnum = true_regnum (src1);
1707
1708 dest_class = REGNO_REG_CLASS (dest_regnum);
1709 src1_class = REGNO_REG_CLASS (src1_regnum);
1710
1711 if (CONST_INT_P (src2))
1712 {
1713 gcc_assert (dest_regnum == src1_regnum);
1714
1715 if (src2 == const1_rtx && !need_flags)
1716 return "inc %0";
1717 if (INTVAL (src2) == 4 && !need_flags && dest_class != DATA_REGS)
1718 return "inc4 %0";
1719
1720 gcc_assert (!need_flags || dest_class != SP_REGS);
1721 return "add %2,%0";
1722 }
1723 else if (CONSTANT_P (src2))
1724 return "add %2,%0";
1725
1726 src2_regnum = true_regnum (src2);
1727 src2_class = REGNO_REG_CLASS (src2_regnum);
1728
1729 if (dest_regnum == src1_regnum)
1730 return "add %2,%0";
1731 if (dest_regnum == src2_regnum)
1732 return "add %1,%0";
1733
1734 /* The rest of the cases are reg = reg+reg. For AM33, we can implement
1735 this directly, as below, but when optimizing for space we can sometimes
1736 do better by using a mov+add. For MN103, we claimed that we could
1737 implement a three-operand add because the various move and add insns
1738 change sizes across register classes, and we can often do better than
1739 reload in choosing which operand to move. */
1740 if (TARGET_AM33 && optimize_insn_for_speed_p ())
1741 return "add %2,%1,%0";
1742
1743 /* Catch cases where no extended register was used. */
1744 if (src1_class != EXTENDED_REGS
1745 && src2_class != EXTENDED_REGS
1746 && dest_class != EXTENDED_REGS)
1747 {
1748 /* We have to copy one of the sources into the destination, then
1749 add the other source to the destination.
1750
1751 Carefully select which source to copy to the destination; a
1752 naive implementation will waste a byte when the source classes
1753 are different and the destination is an address register.
1754 Selecting the lowest cost register copy will optimize this
1755 sequence. */
1756 if (src1_class == dest_class)
1757 return "mov %1,%0\n\tadd %2,%0";
1758 else
1759 return "mov %2,%0\n\tadd %1,%0";
1760 }
1761
1762 /* At least one register is an extended register. */
1763
1764 /* The three operand add instruction on the am33 is a win iff the
1765 output register is an extended register, or if both source
1766 registers are extended registers. */
1767 if (dest_class == EXTENDED_REGS || src1_class == src2_class)
1768 return "add %2,%1,%0";
1769
1770 /* It is better to copy one of the sources to the destination, then
1771 perform a 2 address add. The destination in this case must be
1772 an address or data register and one of the sources must be an
1773 extended register and the remaining source must not be an extended
1774 register.
1775
1776 The best code for this case is to copy the extended reg to the
1777 destination, then emit a two address add. */
1778 if (src1_class == EXTENDED_REGS)
1779 return "mov %1,%0\n\tadd %2,%0";
1780 else
1781 return "mov %2,%0\n\tadd %1,%0";
1782 }
1783
1784 /* Return 1 if X contains a symbolic expression. We know these
1785 expressions will have one of a few well defined forms, so
1786 we need only check those forms. */
1787
1788 int
1789 mn10300_symbolic_operand (rtx op,
1790 enum machine_mode mode ATTRIBUTE_UNUSED)
1791 {
1792 switch (GET_CODE (op))
1793 {
1794 case SYMBOL_REF:
1795 case LABEL_REF:
1796 return 1;
1797 case CONST:
1798 op = XEXP (op, 0);
1799 return ((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
1800 || GET_CODE (XEXP (op, 0)) == LABEL_REF)
1801 && CONST_INT_P (XEXP (op, 1)));
1802 default:
1803 return 0;
1804 }
1805 }
1806
1807 /* Try machine dependent ways of modifying an illegitimate address
1808 to be legitimate. If we find one, return the new valid address.
1809 This macro is used in only one place: `memory_address' in explow.c.
1810
1811 OLDX is the address as it was before break_out_memory_refs was called.
1812 In some cases it is useful to look at this to decide what needs to be done.
1813
1814 Normally it is always safe for this macro to do nothing. It exists to
1815 recognize opportunities to optimize the output.
1816
1817 But on a few ports with segmented architectures and indexed addressing
1818 (mn10300, hppa) it is used to rewrite certain problematical addresses. */
1819
1820 static rtx
1821 mn10300_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1822 enum machine_mode mode ATTRIBUTE_UNUSED)
1823 {
1824 if (flag_pic && ! mn10300_legitimate_pic_operand_p (x))
1825 x = mn10300_legitimize_pic_address (oldx, NULL_RTX);
1826
1827 /* Uh-oh. We might have an address for x[n-100000]. This needs
1828 special handling to avoid creating an indexed memory address
1829 with x-100000 as the base. */
1830 if (GET_CODE (x) == PLUS
1831 && mn10300_symbolic_operand (XEXP (x, 1), VOIDmode))
1832 {
1833 /* Ugly. We modify things here so that the address offset specified
1834 by the index expression is computed first, then added to x to form
1835 the entire address. */
1836
1837 rtx regx1, regy1, regy2, y;
1838
1839 /* Strip off any CONST. */
1840 y = XEXP (x, 1);
1841 if (GET_CODE (y) == CONST)
1842 y = XEXP (y, 0);
1843
1844 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1845 {
1846 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1847 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1848 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1849 regx1 = force_reg (Pmode,
1850 gen_rtx_fmt_ee (GET_CODE (y), Pmode, regx1,
1851 regy2));
1852 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1853 }
1854 }
1855 return x;
1856 }
1857
1858 /* Convert a non-PIC address in `orig' to a PIC address using @GOT or
1859 @GOTOFF in `reg'. */
1860
1861 rtx
1862 mn10300_legitimize_pic_address (rtx orig, rtx reg)
1863 {
1864 rtx x;
1865
1866 if (GET_CODE (orig) == LABEL_REF
1867 || (GET_CODE (orig) == SYMBOL_REF
1868 && (CONSTANT_POOL_ADDRESS_P (orig)
1869 || ! MN10300_GLOBAL_P (orig))))
1870 {
1871 if (reg == NULL)
1872 reg = gen_reg_rtx (Pmode);
1873
1874 x = gen_rtx_UNSPEC (SImode, gen_rtvec (1, orig), UNSPEC_GOTOFF);
1875 x = gen_rtx_CONST (SImode, x);
1876 emit_move_insn (reg, x);
1877
1878 x = emit_insn (gen_addsi3 (reg, reg, pic_offset_table_rtx));
1879 }
1880 else if (GET_CODE (orig) == SYMBOL_REF)
1881 {
1882 if (reg == NULL)
1883 reg = gen_reg_rtx (Pmode);
1884
1885 x = gen_rtx_UNSPEC (SImode, gen_rtvec (1, orig), UNSPEC_GOT);
1886 x = gen_rtx_CONST (SImode, x);
1887 x = gen_rtx_PLUS (SImode, pic_offset_table_rtx, x);
1888 x = gen_const_mem (SImode, x);
1889
1890 x = emit_move_insn (reg, x);
1891 }
1892 else
1893 return orig;
1894
1895 set_unique_reg_note (x, REG_EQUAL, orig);
1896 return reg;
1897 }
1898
1899 /* Return zero if X references a SYMBOL_REF or LABEL_REF whose symbol
1900 isn't protected by a PIC unspec; nonzero otherwise. */
1901
1902 int
1903 mn10300_legitimate_pic_operand_p (rtx x)
1904 {
1905 const char *fmt;
1906 int i;
1907
1908 if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1909 return 0;
1910
1911 if (GET_CODE (x) == UNSPEC
1912 && (XINT (x, 1) == UNSPEC_PIC
1913 || XINT (x, 1) == UNSPEC_GOT
1914 || XINT (x, 1) == UNSPEC_GOTOFF
1915 || XINT (x, 1) == UNSPEC_PLT
1916 || XINT (x, 1) == UNSPEC_GOTSYM_OFF))
1917 return 1;
1918
1919 fmt = GET_RTX_FORMAT (GET_CODE (x));
1920 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
1921 {
1922 if (fmt[i] == 'E')
1923 {
1924 int j;
1925
1926 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1927 if (! mn10300_legitimate_pic_operand_p (XVECEXP (x, i, j)))
1928 return 0;
1929 }
1930 else if (fmt[i] == 'e'
1931 && ! mn10300_legitimate_pic_operand_p (XEXP (x, i)))
1932 return 0;
1933 }
1934
1935 return 1;
1936 }
1937
1938 /* Return TRUE if the address X, taken from a (MEM:MODE X) rtx, is
1939 legitimate, and FALSE otherwise.
1940
1941 On the mn10300, the value in the address register must be
1942 in the same memory space/segment as the effective address.
1943
1944 This is problematical for reload since it does not understand
1945 that base+index != index+base in a memory reference.
1946
1947 Note it is still possible to use reg+reg addressing modes,
1948 it's just much more difficult. For a discussion of a possible
1949 workaround and solution, see the comments in pa.c before the
1950 function record_unscaled_index_insn_codes. */
1951
1952 static bool
1953 mn10300_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
1954 {
1955 rtx base, index;
1956
1957 if (CONSTANT_ADDRESS_P (x))
1958 return !flag_pic || mn10300_legitimate_pic_operand_p (x);
1959
1960 if (RTX_OK_FOR_BASE_P (x, strict))
1961 return true;
1962
1963 if (TARGET_AM33 && (mode == SImode || mode == SFmode || mode == HImode))
1964 {
1965 if (GET_CODE (x) == POST_INC)
1966 return RTX_OK_FOR_BASE_P (XEXP (x, 0), strict);
1967 if (GET_CODE (x) == POST_MODIFY)
1968 return (RTX_OK_FOR_BASE_P (XEXP (x, 0), strict)
1969 && CONSTANT_ADDRESS_P (XEXP (x, 1)));
1970 }
1971
1972 if (GET_CODE (x) != PLUS)
1973 return false;
1974
1975 base = XEXP (x, 0);
1976 index = XEXP (x, 1);
1977
1978 if (!REG_P (base))
1979 return false;
1980 if (REG_P (index))
1981 {
1982 /* ??? Without AM33 generalized (Ri,Rn) addressing, reg+reg
1983 addressing is hard to satisfy. */
1984 if (!TARGET_AM33)
1985 return false;
1986
1987 return (REGNO_GENERAL_P (REGNO (base), strict)
1988 && REGNO_GENERAL_P (REGNO (index), strict));
1989 }
1990
1991 if (!REGNO_STRICT_OK_FOR_BASE_P (REGNO (base), strict))
1992 return false;
1993
1994 if (CONST_INT_P (index))
1995 return IN_RANGE (INTVAL (index), -1 - 0x7fffffff, 0x7fffffff);
1996
1997 if (CONSTANT_ADDRESS_P (index))
1998 return !flag_pic || mn10300_legitimate_pic_operand_p (index);
1999
2000 return false;
2001 }
2002
2003 bool
2004 mn10300_regno_in_class_p (unsigned regno, int rclass, bool strict)
2005 {
2006 if (regno >= FIRST_PSEUDO_REGISTER)
2007 {
2008 if (!strict)
2009 return true;
2010 if (!reg_renumber)
2011 return false;
2012 regno = reg_renumber[regno];
2013 if (regno == INVALID_REGNUM)
2014 return false;
2015 }
2016 return TEST_HARD_REG_BIT (reg_class_contents[rclass], regno);
2017 }
2018
2019 rtx
2020 mn10300_legitimize_reload_address (rtx x,
2021 enum machine_mode mode ATTRIBUTE_UNUSED,
2022 int opnum, int type,
2023 int ind_levels ATTRIBUTE_UNUSED)
2024 {
2025 bool any_change = false;
2026
2027 /* See above re disabling reg+reg addressing for MN103. */
2028 if (!TARGET_AM33)
2029 return NULL_RTX;
2030
2031 if (GET_CODE (x) != PLUS)
2032 return NULL_RTX;
2033
2034 if (XEXP (x, 0) == stack_pointer_rtx)
2035 {
2036 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
2037 GENERAL_REGS, GET_MODE (x), VOIDmode, 0, 0,
2038 opnum, (enum reload_type) type);
2039 any_change = true;
2040 }
2041 if (XEXP (x, 1) == stack_pointer_rtx)
2042 {
2043 push_reload (XEXP (x, 1), NULL_RTX, &XEXP (x, 1), NULL,
2044 GENERAL_REGS, GET_MODE (x), VOIDmode, 0, 0,
2045 opnum, (enum reload_type) type);
2046 any_change = true;
2047 }
2048
2049 return any_change ? x : NULL_RTX;
2050 }
2051
2052 /* Implement TARGET_LEGITIMATE_CONSTANT_P. Returns TRUE if X is a valid
2053 constant. Note that some "constants" aren't valid, such as TLS
2054 symbols and unconverted GOT-based references, so we eliminate
2055 those here. */
2056
2057 static bool
2058 mn10300_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
2059 {
2060 switch (GET_CODE (x))
2061 {
2062 case CONST:
2063 x = XEXP (x, 0);
2064
2065 if (GET_CODE (x) == PLUS)
2066 {
2067 if (! CONST_INT_P (XEXP (x, 1)))
2068 return false;
2069 x = XEXP (x, 0);
2070 }
2071
2072 /* Only some unspecs are valid as "constants". */
2073 if (GET_CODE (x) == UNSPEC)
2074 {
2075 switch (XINT (x, 1))
2076 {
2077 case UNSPEC_PIC:
2078 case UNSPEC_GOT:
2079 case UNSPEC_GOTOFF:
2080 case UNSPEC_PLT:
2081 return true;
2082 default:
2083 return false;
2084 }
2085 }
2086
2087 /* We must have drilled down to a symbol. */
2088 if (! mn10300_symbolic_operand (x, Pmode))
2089 return false;
2090 break;
2091
2092 default:
2093 break;
2094 }
2095
2096 return true;
2097 }
2098
2099 /* Undo pic address legitimization for the benefit of debug info. */
2100
2101 static rtx
2102 mn10300_delegitimize_address (rtx orig_x)
2103 {
2104 rtx x = orig_x, ret, addend = NULL;
2105 bool need_mem;
2106
2107 if (MEM_P (x))
2108 x = XEXP (x, 0);
2109 if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
2110 return orig_x;
2111
2112 if (XEXP (x, 0) == pic_offset_table_rtx)
2113 ;
2114 /* With the REG+REG addressing of AM33, var-tracking can re-assemble
2115 some odd-looking "addresses" that were never valid in the first place.
2116 We need to look harder to avoid warnings being emitted. */
2117 else if (GET_CODE (XEXP (x, 0)) == PLUS)
2118 {
2119 rtx x0 = XEXP (x, 0);
2120 rtx x00 = XEXP (x0, 0);
2121 rtx x01 = XEXP (x0, 1);
2122
2123 if (x00 == pic_offset_table_rtx)
2124 addend = x01;
2125 else if (x01 == pic_offset_table_rtx)
2126 addend = x00;
2127 else
2128 return orig_x;
2129
2130 }
2131 else
2132 return orig_x;
2133 x = XEXP (x, 1);
2134
2135 if (GET_CODE (x) != CONST)
2136 return orig_x;
2137 x = XEXP (x, 0);
2138 if (GET_CODE (x) != UNSPEC)
2139 return orig_x;
2140
2141 ret = XVECEXP (x, 0, 0);
2142 if (XINT (x, 1) == UNSPEC_GOTOFF)
2143 need_mem = false;
2144 else if (XINT (x, 1) == UNSPEC_GOT)
2145 need_mem = true;
2146 else
2147 return orig_x;
2148
2149 gcc_assert (GET_CODE (ret) == SYMBOL_REF);
2150 if (need_mem != MEM_P (orig_x))
2151 return orig_x;
2152 if (need_mem && addend)
2153 return orig_x;
2154 if (addend)
2155 ret = gen_rtx_PLUS (Pmode, addend, ret);
2156 return ret;
2157 }
2158
2159 /* For addresses, costs are relative to "MOV (Rm),Rn". For AM33 this is
2160 the 3-byte fully general instruction; for MN103 this is the 2-byte form
2161 with an address register. */
2162
2163 static int
2164 mn10300_address_cost (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED,
2165 addr_space_t as ATTRIBUTE_UNUSED, bool speed)
2166 {
2167 HOST_WIDE_INT i;
2168 rtx base, index;
2169
2170 switch (GET_CODE (x))
2171 {
2172 case CONST:
2173 case SYMBOL_REF:
2174 case LABEL_REF:
2175 /* We assume all of these require a 32-bit constant, even though
2176 some symbol and label references can be relaxed. */
2177 return speed ? 1 : 4;
2178
2179 case REG:
2180 case SUBREG:
2181 case POST_INC:
2182 return 0;
2183
2184 case POST_MODIFY:
2185 /* Assume any symbolic offset is a 32-bit constant. */
2186 i = (CONST_INT_P (XEXP (x, 1)) ? INTVAL (XEXP (x, 1)) : 0x12345678);
2187 if (IN_RANGE (i, -128, 127))
2188 return speed ? 0 : 1;
2189 if (speed)
2190 return 1;
2191 if (IN_RANGE (i, -0x800000, 0x7fffff))
2192 return 3;
2193 return 4;
2194
2195 case PLUS:
2196 base = XEXP (x, 0);
2197 index = XEXP (x, 1);
2198 if (register_operand (index, SImode))
2199 {
2200 /* Attempt to minimize the number of registers in the address.
2201 This is similar to what other ports do. */
2202 if (register_operand (base, SImode))
2203 return 1;
2204
2205 base = XEXP (x, 1);
2206 index = XEXP (x, 0);
2207 }
2208
2209 /* Assume any symbolic offset is a 32-bit constant. */
2210 i = (CONST_INT_P (XEXP (x, 1)) ? INTVAL (XEXP (x, 1)) : 0x12345678);
2211 if (IN_RANGE (i, -128, 127))
2212 return speed ? 0 : 1;
2213 if (IN_RANGE (i, -32768, 32767))
2214 return speed ? 0 : 2;
2215 return speed ? 2 : 6;
2216
2217 default:
2218 return rtx_cost (x, MEM, 0, speed);
2219 }
2220 }
2221
2222 /* Implement the TARGET_REGISTER_MOVE_COST hook.
2223
2224 Recall that the base value of 2 is required by assumptions elsewhere
2225 in the body of the compiler, and that cost 2 is special-cased as an
2226 early exit from reload meaning no work is required. */
2227
2228 static int
2229 mn10300_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2230 reg_class_t ifrom, reg_class_t ito)
2231 {
2232 enum reg_class from = (enum reg_class) ifrom;
2233 enum reg_class to = (enum reg_class) ito;
2234 enum reg_class scratch, test;
2235
2236 /* Simplify the following code by unifying the fp register classes. */
2237 if (to == FP_ACC_REGS)
2238 to = FP_REGS;
2239 if (from == FP_ACC_REGS)
2240 from = FP_REGS;
2241
2242 /* Diagnose invalid moves by costing them as two moves. */
2243
2244 scratch = NO_REGS;
2245 test = from;
2246 if (to == SP_REGS)
2247 scratch = (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
2248 else if (to == MDR_REGS)
2249 scratch = DATA_REGS;
2250 else if (to == FP_REGS && to != from)
2251 scratch = GENERAL_REGS;
2252 else
2253 {
2254 test = to;
2255 if (from == SP_REGS)
2256 scratch = (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
2257 else if (from == MDR_REGS)
2258 scratch = DATA_REGS;
2259 else if (from == FP_REGS && to != from)
2260 scratch = GENERAL_REGS;
2261 }
2262 if (scratch != NO_REGS && !reg_class_subset_p (test, scratch))
2263 return (mn10300_register_move_cost (VOIDmode, from, scratch)
2264 + mn10300_register_move_cost (VOIDmode, scratch, to));
2265
2266 /* From here on, all we need consider are legal combinations. */
2267
2268 if (optimize_size)
2269 {
2270 /* The scale here is bytes * 2. */
2271
2272 if (from == to && (to == ADDRESS_REGS || to == DATA_REGS))
2273 return 2;
2274
2275 if (from == SP_REGS)
2276 return (to == ADDRESS_REGS ? 2 : 6);
2277
2278 /* For MN103, all remaining legal moves are two bytes. */
2279 if (TARGET_AM33)
2280 return 4;
2281
2282 if (to == SP_REGS)
2283 return (from == ADDRESS_REGS ? 4 : 6);
2284
2285 if ((from == ADDRESS_REGS || from == DATA_REGS)
2286 && (to == ADDRESS_REGS || to == DATA_REGS))
2287 return 4;
2288
2289 if (to == EXTENDED_REGS)
2290 return (to == from ? 6 : 4);
2291
2292 /* What's left are SP_REGS, FP_REGS, or combinations of the above. */
2293 return 6;
2294 }
2295 else
2296 {
2297 /* The scale here is cycles * 2. */
2298
2299 if (to == FP_REGS)
2300 return 8;
2301 if (from == FP_REGS)
2302 return 4;
2303
2304 /* All legal moves between integral registers are single cycle. */
2305 return 2;
2306 }
2307 }
2308
2309 /* Implement the TARGET_MEMORY_MOVE_COST hook.
2310
2311 Given lack of the form of the address, this must be speed-relative,
2312 though we should never be less expensive than a size-relative register
2313 move cost above. This is not a problem. */
2314
2315 static int
2316 mn10300_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2317 reg_class_t iclass, bool in ATTRIBUTE_UNUSED)
2318 {
2319 enum reg_class rclass = (enum reg_class) iclass;
2320
2321 if (rclass == FP_REGS)
2322 return 8;
2323 return 6;
2324 }
2325
2326 /* Implement the TARGET_RTX_COSTS hook.
2327
2328 Speed-relative costs are relative to COSTS_N_INSNS, which is intended
2329 to represent cycles. Size-relative costs are in bytes. */
2330
2331 static bool
2332 mn10300_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2333 int *ptotal, bool speed)
2334 {
2335 /* This value is used for SYMBOL_REF etc where we want to pretend
2336 we have a full 32-bit constant. */
2337 HOST_WIDE_INT i = 0x12345678;
2338 int total;
2339
2340 switch (code)
2341 {
2342 case CONST_INT:
2343 i = INTVAL (x);
2344 do_int_costs:
2345 if (speed)
2346 {
2347 if (outer_code == SET)
2348 {
2349 /* 16-bit integer loads have latency 1, 32-bit loads 2. */
2350 if (IN_RANGE (i, -32768, 32767))
2351 total = COSTS_N_INSNS (1);
2352 else
2353 total = COSTS_N_INSNS (2);
2354 }
2355 else
2356 {
2357 /* 16-bit integer operands don't affect latency;
2358 24-bit and 32-bit operands add a cycle. */
2359 if (IN_RANGE (i, -32768, 32767))
2360 total = 0;
2361 else
2362 total = COSTS_N_INSNS (1);
2363 }
2364 }
2365 else
2366 {
2367 if (outer_code == SET)
2368 {
2369 if (i == 0)
2370 total = 1;
2371 else if (IN_RANGE (i, -128, 127))
2372 total = 2;
2373 else if (IN_RANGE (i, -32768, 32767))
2374 total = 3;
2375 else
2376 total = 6;
2377 }
2378 else
2379 {
2380 /* Reference here is ADD An,Dn, vs ADD imm,Dn. */
2381 if (IN_RANGE (i, -128, 127))
2382 total = 0;
2383 else if (IN_RANGE (i, -32768, 32767))
2384 total = 2;
2385 else if (TARGET_AM33 && IN_RANGE (i, -0x01000000, 0x00ffffff))
2386 total = 3;
2387 else
2388 total = 4;
2389 }
2390 }
2391 goto alldone;
2392
2393 case CONST:
2394 case LABEL_REF:
2395 case SYMBOL_REF:
2396 case CONST_DOUBLE:
2397 /* We assume all of these require a 32-bit constant, even though
2398 some symbol and label references can be relaxed. */
2399 goto do_int_costs;
2400
2401 case UNSPEC:
2402 switch (XINT (x, 1))
2403 {
2404 case UNSPEC_PIC:
2405 case UNSPEC_GOT:
2406 case UNSPEC_GOTOFF:
2407 case UNSPEC_PLT:
2408 case UNSPEC_GOTSYM_OFF:
2409 /* The PIC unspecs also resolve to a 32-bit constant. */
2410 goto do_int_costs;
2411
2412 default:
2413 /* Assume any non-listed unspec is some sort of arithmetic. */
2414 goto do_arith_costs;
2415 }
2416
2417 case PLUS:
2418 /* Notice the size difference of INC and INC4. */
2419 if (!speed && outer_code == SET && CONST_INT_P (XEXP (x, 1)))
2420 {
2421 i = INTVAL (XEXP (x, 1));
2422 if (i == 1 || i == 4)
2423 {
2424 total = 1 + rtx_cost (XEXP (x, 0), PLUS, 0, speed);
2425 goto alldone;
2426 }
2427 }
2428 goto do_arith_costs;
2429
2430 case MINUS:
2431 case AND:
2432 case IOR:
2433 case XOR:
2434 case NOT:
2435 case NEG:
2436 case ZERO_EXTEND:
2437 case SIGN_EXTEND:
2438 case COMPARE:
2439 case BSWAP:
2440 case CLZ:
2441 do_arith_costs:
2442 total = (speed ? COSTS_N_INSNS (1) : 2);
2443 break;
2444
2445 case ASHIFT:
2446 /* Notice the size difference of ASL2 and variants. */
2447 if (!speed && CONST_INT_P (XEXP (x, 1)))
2448 switch (INTVAL (XEXP (x, 1)))
2449 {
2450 case 1:
2451 case 2:
2452 total = 1;
2453 goto alldone;
2454 case 3:
2455 case 4:
2456 total = 2;
2457 goto alldone;
2458 }
2459 /* FALLTHRU */
2460
2461 case ASHIFTRT:
2462 case LSHIFTRT:
2463 total = (speed ? COSTS_N_INSNS (1) : 3);
2464 goto alldone;
2465
2466 case MULT:
2467 total = (speed ? COSTS_N_INSNS (3) : 2);
2468 break;
2469
2470 case DIV:
2471 case UDIV:
2472 case MOD:
2473 case UMOD:
2474 total = (speed ? COSTS_N_INSNS (39)
2475 /* Include space to load+retrieve MDR. */
2476 : code == MOD || code == UMOD ? 6 : 4);
2477 break;
2478
2479 case MEM:
2480 total = mn10300_address_cost (XEXP (x, 0), GET_MODE (x),
2481 MEM_ADDR_SPACE (x), speed);
2482 if (speed)
2483 total = COSTS_N_INSNS (2 + total);
2484 goto alldone;
2485
2486 default:
2487 /* Probably not implemented. Assume external call. */
2488 total = (speed ? COSTS_N_INSNS (10) : 7);
2489 break;
2490 }
2491
2492 *ptotal = total;
2493 return false;
2494
2495 alldone:
2496 *ptotal = total;
2497 return true;
2498 }
2499
2500 /* If using PIC, mark a SYMBOL_REF for a non-global symbol so that we
2501 may access it using GOTOFF instead of GOT. */
2502
2503 static void
2504 mn10300_encode_section_info (tree decl, rtx rtl, int first)
2505 {
2506 rtx symbol;
2507
2508 default_encode_section_info (decl, rtl, first);
2509
2510 if (! MEM_P (rtl))
2511 return;
2512
2513 symbol = XEXP (rtl, 0);
2514 if (GET_CODE (symbol) != SYMBOL_REF)
2515 return;
2516
2517 if (flag_pic)
2518 SYMBOL_REF_FLAG (symbol) = (*targetm.binds_local_p) (decl);
2519 }
2520
2521 /* Dispatch tables on the mn10300 are extremely expensive in terms of code
2522 and readonly data size. So we crank up the case threshold value to
2523 encourage a series of if/else comparisons to implement many small switch
2524 statements. In theory, this value could be increased much more if we
2525 were solely optimizing for space, but we keep it "reasonable" to avoid
2526 serious code efficiency lossage. */
2527
2528 static unsigned int
2529 mn10300_case_values_threshold (void)
2530 {
2531 return 6;
2532 }
2533
2534 /* Worker function for TARGET_TRAMPOLINE_INIT. */
2535
2536 static void
2537 mn10300_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
2538 {
2539 rtx mem, disp, fnaddr = XEXP (DECL_RTL (fndecl), 0);
2540
2541 /* This is a strict alignment target, which means that we play
2542 some games to make sure that the locations at which we need
2543 to store <chain> and <disp> wind up at aligned addresses.
2544
2545 0x28 0x00 add 0,d0
2546 0xfc 0xdd mov chain,a1
2547 <chain>
2548 0xf8 0xed 0x00 btst 0,d1
2549 0xdc jmp fnaddr
2550 <disp>
2551
2552 Note that the two extra insns are effectively nops; they
2553 clobber the flags but do not affect the contents of D0 or D1. */
2554
2555 disp = expand_binop (SImode, sub_optab, fnaddr,
2556 plus_constant (Pmode, XEXP (m_tramp, 0), 11),
2557 NULL_RTX, 1, OPTAB_DIRECT);
2558
2559 mem = adjust_address (m_tramp, SImode, 0);
2560 emit_move_insn (mem, gen_int_mode (0xddfc0028, SImode));
2561 mem = adjust_address (m_tramp, SImode, 4);
2562 emit_move_insn (mem, chain_value);
2563 mem = adjust_address (m_tramp, SImode, 8);
2564 emit_move_insn (mem, gen_int_mode (0xdc00edf8, SImode));
2565 mem = adjust_address (m_tramp, SImode, 12);
2566 emit_move_insn (mem, disp);
2567 }
2568
2569 /* Output the assembler code for a C++ thunk function.
2570 THUNK_DECL is the declaration for the thunk function itself, FUNCTION
2571 is the decl for the target function. DELTA is an immediate constant
2572 offset to be added to the THIS parameter. If VCALL_OFFSET is nonzero
2573 the word at the adjusted address *(*THIS' + VCALL_OFFSET) should be
2574 additionally added to THIS. Finally jump to the entry point of
2575 FUNCTION. */
2576
2577 static void
2578 mn10300_asm_output_mi_thunk (FILE * file,
2579 tree thunk_fndecl ATTRIBUTE_UNUSED,
2580 HOST_WIDE_INT delta,
2581 HOST_WIDE_INT vcall_offset,
2582 tree function)
2583 {
2584 const char * _this;
2585
2586 /* Get the register holding the THIS parameter. Handle the case
2587 where there is a hidden first argument for a returned structure. */
2588 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
2589 _this = reg_names [FIRST_ARGUMENT_REGNUM + 1];
2590 else
2591 _this = reg_names [FIRST_ARGUMENT_REGNUM];
2592
2593 fprintf (file, "\t%s Thunk Entry Point:\n", ASM_COMMENT_START);
2594
2595 if (delta)
2596 fprintf (file, "\tadd %d, %s\n", (int) delta, _this);
2597
2598 if (vcall_offset)
2599 {
2600 const char * scratch = reg_names [FIRST_ADDRESS_REGNUM + 1];
2601
2602 fprintf (file, "\tmov %s, %s\n", _this, scratch);
2603 fprintf (file, "\tmov (%s), %s\n", scratch, scratch);
2604 fprintf (file, "\tadd %d, %s\n", (int) vcall_offset, scratch);
2605 fprintf (file, "\tmov (%s), %s\n", scratch, scratch);
2606 fprintf (file, "\tadd %s, %s\n", scratch, _this);
2607 }
2608
2609 fputs ("\tjmp ", file);
2610 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
2611 putc ('\n', file);
2612 }
2613
2614 /* Return true if mn10300_output_mi_thunk would be able to output the
2615 assembler code for the thunk function specified by the arguments
2616 it is passed, and false otherwise. */
2617
2618 static bool
2619 mn10300_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
2620 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
2621 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
2622 const_tree function ATTRIBUTE_UNUSED)
2623 {
2624 return true;
2625 }
2626
2627 bool
2628 mn10300_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
2629 {
2630 if (REGNO_REG_CLASS (regno) == FP_REGS
2631 || REGNO_REG_CLASS (regno) == FP_ACC_REGS)
2632 /* Do not store integer values in FP registers. */
2633 return GET_MODE_CLASS (mode) == MODE_FLOAT && ((regno & 1) == 0);
2634
2635 if (! TARGET_AM33 && REGNO_REG_CLASS (regno) == EXTENDED_REGS)
2636 return false;
2637
2638 if (((regno) & 1) == 0 || GET_MODE_SIZE (mode) == 4)
2639 return true;
2640
2641 if (REGNO_REG_CLASS (regno) == DATA_REGS
2642 || (TARGET_AM33 && REGNO_REG_CLASS (regno) == ADDRESS_REGS)
2643 || REGNO_REG_CLASS (regno) == EXTENDED_REGS)
2644 return GET_MODE_SIZE (mode) <= 4;
2645
2646 return false;
2647 }
2648
2649 bool
2650 mn10300_modes_tieable (enum machine_mode mode1, enum machine_mode mode2)
2651 {
2652 if (GET_MODE_CLASS (mode1) == MODE_FLOAT
2653 && GET_MODE_CLASS (mode2) != MODE_FLOAT)
2654 return false;
2655
2656 if (GET_MODE_CLASS (mode2) == MODE_FLOAT
2657 && GET_MODE_CLASS (mode1) != MODE_FLOAT)
2658 return false;
2659
2660 if (TARGET_AM33
2661 || mode1 == mode2
2662 || (GET_MODE_SIZE (mode1) <= 4 && GET_MODE_SIZE (mode2) <= 4))
2663 return true;
2664
2665 return false;
2666 }
2667
2668 static int
2669 cc_flags_for_mode (enum machine_mode mode)
2670 {
2671 switch (mode)
2672 {
2673 case CCmode:
2674 return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_C | CC_FLAG_V;
2675 case CCZNCmode:
2676 return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_C;
2677 case CCZNmode:
2678 return CC_FLAG_Z | CC_FLAG_N;
2679 case CC_FLOATmode:
2680 return -1;
2681 default:
2682 gcc_unreachable ();
2683 }
2684 }
2685
2686 static int
2687 cc_flags_for_code (enum rtx_code code)
2688 {
2689 switch (code)
2690 {
2691 case EQ: /* Z */
2692 case NE: /* ~Z */
2693 return CC_FLAG_Z;
2694
2695 case LT: /* N */
2696 case GE: /* ~N */
2697 return CC_FLAG_N;
2698 break;
2699
2700 case GT: /* ~(Z|(N^V)) */
2701 case LE: /* Z|(N^V) */
2702 return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_V;
2703
2704 case GEU: /* ~C */
2705 case LTU: /* C */
2706 return CC_FLAG_C;
2707
2708 case GTU: /* ~(C | Z) */
2709 case LEU: /* C | Z */
2710 return CC_FLAG_Z | CC_FLAG_C;
2711
2712 case ORDERED:
2713 case UNORDERED:
2714 case LTGT:
2715 case UNEQ:
2716 case UNGE:
2717 case UNGT:
2718 case UNLE:
2719 case UNLT:
2720 return -1;
2721
2722 default:
2723 gcc_unreachable ();
2724 }
2725 }
2726
2727 enum machine_mode
2728 mn10300_select_cc_mode (enum rtx_code code, rtx x, rtx y ATTRIBUTE_UNUSED)
2729 {
2730 int req;
2731
2732 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2733 return CC_FLOATmode;
2734
2735 req = cc_flags_for_code (code);
2736
2737 if (req & CC_FLAG_V)
2738 return CCmode;
2739 if (req & CC_FLAG_C)
2740 return CCZNCmode;
2741 return CCZNmode;
2742 }
2743
2744 static inline bool
2745 is_load_insn (rtx insn)
2746 {
2747 if (GET_CODE (PATTERN (insn)) != SET)
2748 return false;
2749
2750 return MEM_P (SET_SRC (PATTERN (insn)));
2751 }
2752
2753 static inline bool
2754 is_store_insn (rtx insn)
2755 {
2756 if (GET_CODE (PATTERN (insn)) != SET)
2757 return false;
2758
2759 return MEM_P (SET_DEST (PATTERN (insn)));
2760 }
2761
2762 /* Update scheduling costs for situations that cannot be
2763 described using the attributes and DFA machinery.
2764 DEP is the insn being scheduled.
2765 INSN is the previous insn.
2766 COST is the current cycle cost for DEP. */
2767
2768 static int
2769 mn10300_adjust_sched_cost (rtx insn, rtx link, rtx dep, int cost)
2770 {
2771 int timings = get_attr_timings (insn);
2772
2773 if (!TARGET_AM33)
2774 return 1;
2775
2776 if (GET_CODE (insn) == PARALLEL)
2777 insn = XVECEXP (insn, 0, 0);
2778
2779 if (GET_CODE (dep) == PARALLEL)
2780 dep = XVECEXP (dep, 0, 0);
2781
2782 /* For the AM34 a load instruction that follows a
2783 store instruction incurs an extra cycle of delay. */
2784 if (mn10300_tune_cpu == PROCESSOR_AM34
2785 && is_load_insn (dep)
2786 && is_store_insn (insn))
2787 cost += 1;
2788
2789 /* For the AM34 a non-store, non-branch FPU insn that follows
2790 another FPU insn incurs a one cycle throughput increase. */
2791 else if (mn10300_tune_cpu == PROCESSOR_AM34
2792 && ! is_store_insn (insn)
2793 && ! JUMP_P (insn)
2794 && GET_CODE (PATTERN (dep)) == SET
2795 && GET_CODE (PATTERN (insn)) == SET
2796 && GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (dep)))) == MODE_FLOAT
2797 && GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (insn)))) == MODE_FLOAT)
2798 cost += 1;
2799
2800 /* Resolve the conflict described in section 1-7-4 of
2801 Chapter 3 of the MN103E Series Instruction Manual
2802 where it says:
2803
2804 "When the preceding instruction is a CPU load or
2805 store instruction, a following FPU instruction
2806 cannot be executed until the CPU completes the
2807 latency period even though there are no register
2808 or flag dependencies between them." */
2809
2810 /* Only the AM33-2 (and later) CPUs have FPU instructions. */
2811 if (! TARGET_AM33_2)
2812 return cost;
2813
2814 /* If a data dependence already exists then the cost is correct. */
2815 if (REG_NOTE_KIND (link) == 0)
2816 return cost;
2817
2818 /* Check that the instruction about to scheduled is an FPU instruction. */
2819 if (GET_CODE (PATTERN (dep)) != SET)
2820 return cost;
2821
2822 if (GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (dep)))) != MODE_FLOAT)
2823 return cost;
2824
2825 /* Now check to see if the previous instruction is a load or store. */
2826 if (! is_load_insn (insn) && ! is_store_insn (insn))
2827 return cost;
2828
2829 /* XXX: Verify: The text of 1-7-4 implies that the restriction
2830 only applies when an INTEGER load/store precedes an FPU
2831 instruction, but is this true ? For now we assume that it is. */
2832 if (GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (insn)))) != MODE_INT)
2833 return cost;
2834
2835 /* Extract the latency value from the timings attribute. */
2836 return timings < 100 ? (timings % 10) : (timings % 100);
2837 }
2838
2839 static void
2840 mn10300_conditional_register_usage (void)
2841 {
2842 unsigned int i;
2843
2844 if (!TARGET_AM33)
2845 {
2846 for (i = FIRST_EXTENDED_REGNUM;
2847 i <= LAST_EXTENDED_REGNUM; i++)
2848 fixed_regs[i] = call_used_regs[i] = 1;
2849 }
2850 if (!TARGET_AM33_2)
2851 {
2852 for (i = FIRST_FP_REGNUM;
2853 i <= LAST_FP_REGNUM; i++)
2854 fixed_regs[i] = call_used_regs[i] = 1;
2855 }
2856 if (flag_pic)
2857 fixed_regs[PIC_OFFSET_TABLE_REGNUM] =
2858 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
2859 }
2860
2861 /* Worker function for TARGET_MD_ASM_CLOBBERS.
2862 We do this in the mn10300 backend to maintain source compatibility
2863 with the old cc0-based compiler. */
2864
2865 static tree
2866 mn10300_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
2867 tree inputs ATTRIBUTE_UNUSED,
2868 tree clobbers)
2869 {
2870 clobbers = tree_cons (NULL_TREE, build_string (5, "EPSW"),
2871 clobbers);
2872 return clobbers;
2873 }
2874 \f
2875 /* A helper function for splitting cbranch patterns after reload. */
2876
2877 void
2878 mn10300_split_cbranch (enum machine_mode cmp_mode, rtx cmp_op, rtx label_ref)
2879 {
2880 rtx flags, x;
2881
2882 flags = gen_rtx_REG (cmp_mode, CC_REG);
2883 x = gen_rtx_COMPARE (cmp_mode, XEXP (cmp_op, 0), XEXP (cmp_op, 1));
2884 x = gen_rtx_SET (VOIDmode, flags, x);
2885 emit_insn (x);
2886
2887 x = gen_rtx_fmt_ee (GET_CODE (cmp_op), VOIDmode, flags, const0_rtx);
2888 x = gen_rtx_IF_THEN_ELSE (VOIDmode, x, label_ref, pc_rtx);
2889 x = gen_rtx_SET (VOIDmode, pc_rtx, x);
2890 emit_jump_insn (x);
2891 }
2892
2893 /* A helper function for matching parallels that set the flags. */
2894
2895 bool
2896 mn10300_match_ccmode (rtx insn, enum machine_mode cc_mode)
2897 {
2898 rtx op1, flags;
2899 enum machine_mode flags_mode;
2900
2901 gcc_checking_assert (XVECLEN (PATTERN (insn), 0) == 2);
2902
2903 op1 = XVECEXP (PATTERN (insn), 0, 1);
2904 gcc_checking_assert (GET_CODE (SET_SRC (op1)) == COMPARE);
2905
2906 flags = SET_DEST (op1);
2907 flags_mode = GET_MODE (flags);
2908
2909 if (GET_MODE (SET_SRC (op1)) != flags_mode)
2910 return false;
2911 if (GET_MODE_CLASS (flags_mode) != MODE_CC)
2912 return false;
2913
2914 /* Ensure that the mode of FLAGS is compatible with CC_MODE. */
2915 if (cc_flags_for_mode (flags_mode) & ~cc_flags_for_mode (cc_mode))
2916 return false;
2917
2918 return true;
2919 }
2920
2921 /* This function is used to help split:
2922
2923 (set (reg) (and (reg) (int)))
2924
2925 into:
2926
2927 (set (reg) (shift (reg) (int))
2928 (set (reg) (shift (reg) (int))
2929
2930 where the shitfs will be shorter than the "and" insn.
2931
2932 It returns the number of bits that should be shifted. A positive
2933 values means that the low bits are to be cleared (and hence the
2934 shifts should be right followed by left) whereas a negative value
2935 means that the high bits are to be cleared (left followed by right).
2936 Zero is returned when it would not be economical to split the AND. */
2937
2938 int
2939 mn10300_split_and_operand_count (rtx op)
2940 {
2941 HOST_WIDE_INT val = INTVAL (op);
2942 int count;
2943
2944 if (val < 0)
2945 {
2946 /* High bit is set, look for bits clear at the bottom. */
2947 count = exact_log2 (-val);
2948 if (count < 0)
2949 return 0;
2950 /* This is only size win if we can use the asl2 insn. Otherwise we
2951 would be replacing 1 6-byte insn with 2 3-byte insns. */
2952 if (count > (optimize_insn_for_speed_p () ? 2 : 4))
2953 return 0;
2954 return count;
2955 }
2956 else
2957 {
2958 /* High bit is clear, look for bits set at the bottom. */
2959 count = exact_log2 (val + 1);
2960 count = 32 - count;
2961 /* Again, this is only a size win with asl2. */
2962 if (count > (optimize_insn_for_speed_p () ? 2 : 4))
2963 return 0;
2964 return -count;
2965 }
2966 }
2967 \f
2968 struct liw_data
2969 {
2970 enum attr_liw slot;
2971 enum attr_liw_op op;
2972 rtx dest;
2973 rtx src;
2974 };
2975
2976 /* Decide if the given insn is a candidate for LIW bundling. If it is then
2977 extract the operands and LIW attributes from the insn and use them to fill
2978 in the liw_data structure. Return true upon success or false if the insn
2979 cannot be bundled. */
2980
2981 static bool
2982 extract_bundle (rtx insn, struct liw_data * pdata)
2983 {
2984 bool allow_consts = true;
2985 rtx p;
2986
2987 gcc_assert (pdata != NULL);
2988
2989 if (insn == NULL_RTX)
2990 return false;
2991 /* Make sure that we are dealing with a simple SET insn. */
2992 p = single_set (insn);
2993 if (p == NULL_RTX)
2994 return false;
2995
2996 /* Make sure that it could go into one of the LIW pipelines. */
2997 pdata->slot = get_attr_liw (insn);
2998 if (pdata->slot == LIW_BOTH)
2999 return false;
3000
3001 pdata->op = get_attr_liw_op (insn);
3002
3003 switch (pdata->op)
3004 {
3005 case LIW_OP_MOV:
3006 pdata->dest = SET_DEST (p);
3007 pdata->src = SET_SRC (p);
3008 break;
3009 case LIW_OP_CMP:
3010 pdata->dest = XEXP (SET_SRC (p), 0);
3011 pdata->src = XEXP (SET_SRC (p), 1);
3012 break;
3013 case LIW_OP_NONE:
3014 return false;
3015 case LIW_OP_AND:
3016 case LIW_OP_OR:
3017 case LIW_OP_XOR:
3018 /* The AND, OR and XOR long instruction words only accept register arguments. */
3019 allow_consts = false;
3020 /* Fall through. */
3021 default:
3022 pdata->dest = SET_DEST (p);
3023 pdata->src = XEXP (SET_SRC (p), 1);
3024 break;
3025 }
3026
3027 if (! REG_P (pdata->dest))
3028 return false;
3029
3030 if (REG_P (pdata->src))
3031 return true;
3032
3033 return allow_consts && satisfies_constraint_O (pdata->src);
3034 }
3035
3036 /* Make sure that it is OK to execute LIW1 and LIW2 in parallel. GCC generated
3037 the instructions with the assumption that LIW1 would be executed before LIW2
3038 so we must check for overlaps between their sources and destinations. */
3039
3040 static bool
3041 check_liw_constraints (struct liw_data * pliw1, struct liw_data * pliw2)
3042 {
3043 /* Check for slot conflicts. */
3044 if (pliw2->slot == pliw1->slot && pliw1->slot != LIW_EITHER)
3045 return false;
3046
3047 /* If either operation is a compare, then "dest" is really an input; the real
3048 destination is CC_REG. So these instructions need different checks. */
3049
3050 /* Changing "CMP ; OP" into "CMP | OP" is OK because the comparison will
3051 check its values prior to any changes made by OP. */
3052 if (pliw1->op == LIW_OP_CMP)
3053 {
3054 /* Two sequential comparisons means dead code, which ought to
3055 have been eliminated given that bundling only happens with
3056 optimization. We cannot bundle them in any case. */
3057 gcc_assert (pliw1->op != pliw2->op);
3058 return true;
3059 }
3060
3061 /* Changing "OP ; CMP" into "OP | CMP" does not work if the value being compared
3062 is the destination of OP, as the CMP will look at the old value, not the new
3063 one. */
3064 if (pliw2->op == LIW_OP_CMP)
3065 {
3066 if (REGNO (pliw2->dest) == REGNO (pliw1->dest))
3067 return false;
3068
3069 if (REG_P (pliw2->src))
3070 return REGNO (pliw2->src) != REGNO (pliw1->dest);
3071
3072 return true;
3073 }
3074
3075 /* Changing "OP1 ; OP2" into "OP1 | OP2" does not work if they both write to the
3076 same destination register. */
3077 if (REGNO (pliw2->dest) == REGNO (pliw1->dest))
3078 return false;
3079
3080 /* Changing "OP1 ; OP2" into "OP1 | OP2" generally does not work if the destination
3081 of OP1 is the source of OP2. The exception is when OP1 is a MOVE instruction when
3082 we can replace the source in OP2 with the source of OP1. */
3083 if (REG_P (pliw2->src) && REGNO (pliw2->src) == REGNO (pliw1->dest))
3084 {
3085 if (pliw1->op == LIW_OP_MOV && REG_P (pliw1->src))
3086 {
3087 if (! REG_P (pliw1->src)
3088 && (pliw2->op == LIW_OP_AND
3089 || pliw2->op == LIW_OP_OR
3090 || pliw2->op == LIW_OP_XOR))
3091 return false;
3092
3093 pliw2->src = pliw1->src;
3094 return true;
3095 }
3096 return false;
3097 }
3098
3099 /* Everything else is OK. */
3100 return true;
3101 }
3102
3103 /* Combine pairs of insns into LIW bundles. */
3104
3105 static void
3106 mn10300_bundle_liw (void)
3107 {
3108 rtx r;
3109
3110 for (r = get_insns (); r != NULL_RTX; r = next_nonnote_nondebug_insn (r))
3111 {
3112 rtx insn1, insn2;
3113 struct liw_data liw1, liw2;
3114
3115 insn1 = r;
3116 if (! extract_bundle (insn1, & liw1))
3117 continue;
3118
3119 insn2 = next_nonnote_nondebug_insn (insn1);
3120 if (! extract_bundle (insn2, & liw2))
3121 continue;
3122
3123 /* Check for source/destination overlap. */
3124 if (! check_liw_constraints (& liw1, & liw2))
3125 continue;
3126
3127 if (liw1.slot == LIW_OP2 || liw2.slot == LIW_OP1)
3128 {
3129 struct liw_data temp;
3130
3131 temp = liw1;
3132 liw1 = liw2;
3133 liw2 = temp;
3134 }
3135
3136 delete_insn (insn2);
3137
3138 if (liw1.op == LIW_OP_CMP)
3139 insn2 = gen_cmp_liw (liw2.dest, liw2.src, liw1.dest, liw1.src,
3140 GEN_INT (liw2.op));
3141 else if (liw2.op == LIW_OP_CMP)
3142 insn2 = gen_liw_cmp (liw1.dest, liw1.src, liw2.dest, liw2.src,
3143 GEN_INT (liw1.op));
3144 else
3145 insn2 = gen_liw (liw1.dest, liw2.dest, liw1.src, liw2.src,
3146 GEN_INT (liw1.op), GEN_INT (liw2.op));
3147
3148 insn2 = emit_insn_after (insn2, insn1);
3149 delete_insn (insn1);
3150 r = insn2;
3151 }
3152 }
3153
3154 #define DUMP(reason, insn) \
3155 do \
3156 { \
3157 if (dump_file) \
3158 { \
3159 fprintf (dump_file, reason "\n"); \
3160 if (insn != NULL_RTX) \
3161 print_rtl_single (dump_file, insn); \
3162 fprintf(dump_file, "\n"); \
3163 } \
3164 } \
3165 while (0)
3166
3167 /* Replace the BRANCH insn with a Lcc insn that goes to LABEL.
3168 Insert a SETLB insn just before LABEL. */
3169
3170 static void
3171 mn10300_insert_setlb_lcc (rtx label, rtx branch)
3172 {
3173 rtx lcc, comparison, cmp_reg;
3174
3175 if (LABEL_NUSES (label) > 1)
3176 {
3177 rtx insn;
3178
3179 /* This label is used both as an entry point to the loop
3180 and as a loop-back point for the loop. We need to separate
3181 these two functions so that the SETLB happens upon entry,
3182 but the loop-back does not go to the SETLB instruction. */
3183 DUMP ("Inserting SETLB insn after:", label);
3184 insn = emit_insn_after (gen_setlb (), label);
3185 label = gen_label_rtx ();
3186 emit_label_after (label, insn);
3187 DUMP ("Created new loop-back label:", label);
3188 }
3189 else
3190 {
3191 DUMP ("Inserting SETLB insn before:", label);
3192 emit_insn_before (gen_setlb (), label);
3193 }
3194
3195 comparison = XEXP (SET_SRC (PATTERN (branch)), 0);
3196 cmp_reg = XEXP (comparison, 0);
3197 gcc_assert (REG_P (cmp_reg));
3198
3199 /* If the comparison has not already been split out of the branch
3200 then do so now. */
3201 gcc_assert (REGNO (cmp_reg) == CC_REG);
3202
3203 if (GET_MODE (cmp_reg) == CC_FLOATmode)
3204 lcc = gen_FLcc (comparison, label);
3205 else
3206 lcc = gen_Lcc (comparison, label);
3207
3208 lcc = emit_jump_insn_before (lcc, branch);
3209 mark_jump_label (XVECEXP (PATTERN (lcc), 0, 0), lcc, 0);
3210 JUMP_LABEL (lcc) = label;
3211 DUMP ("Replacing branch insn...", branch);
3212 DUMP ("... with Lcc insn:", lcc);
3213 delete_insn (branch);
3214 }
3215
3216 static bool
3217 mn10300_block_contains_call (basic_block block)
3218 {
3219 rtx insn;
3220
3221 FOR_BB_INSNS (block, insn)
3222 if (CALL_P (insn))
3223 return true;
3224
3225 return false;
3226 }
3227
3228 static bool
3229 mn10300_loop_contains_call_insn (loop_p loop)
3230 {
3231 basic_block * bbs;
3232 bool result = false;
3233 unsigned int i;
3234
3235 bbs = get_loop_body (loop);
3236
3237 for (i = 0; i < loop->num_nodes; i++)
3238 if (mn10300_block_contains_call (bbs[i]))
3239 {
3240 result = true;
3241 break;
3242 }
3243
3244 free (bbs);
3245 return result;
3246 }
3247
3248 static void
3249 mn10300_scan_for_setlb_lcc (void)
3250 {
3251 loop_p loop;
3252
3253 DUMP ("Looking for loops that can use the SETLB insn", NULL_RTX);
3254
3255 df_analyze ();
3256 compute_bb_for_insn ();
3257
3258 /* Find the loops. */
3259 loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
3260
3261 /* FIXME: For now we only investigate innermost loops. In practice however
3262 if an inner loop is not suitable for use with the SETLB/Lcc insns, it may
3263 be the case that its parent loop is suitable. Thus we should check all
3264 loops, but work from the innermost outwards. */
3265 FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
3266 {
3267 const char * reason = NULL;
3268
3269 /* Check to see if we can modify this loop. If we cannot
3270 then set 'reason' to describe why it could not be done. */
3271 if (loop->latch == NULL)
3272 reason = "it contains multiple latches";
3273 else if (loop->header != loop->latch)
3274 /* FIXME: We could handle loops that span multiple blocks,
3275 but this requires a lot more work tracking down the branches
3276 that need altering, so for now keep things simple. */
3277 reason = "the loop spans multiple blocks";
3278 else if (mn10300_loop_contains_call_insn (loop))
3279 reason = "it contains CALL insns";
3280 else
3281 {
3282 rtx branch = BB_END (loop->latch);
3283
3284 gcc_assert (JUMP_P (branch));
3285 if (single_set (branch) == NULL_RTX || ! any_condjump_p (branch))
3286 /* We cannot optimize tablejumps and the like. */
3287 /* FIXME: We could handle unconditional jumps. */
3288 reason = "it is not a simple loop";
3289 else
3290 {
3291 rtx label;
3292
3293 if (dump_file)
3294 flow_loop_dump (loop, dump_file, NULL, 0);
3295
3296 label = BB_HEAD (loop->header);
3297 gcc_assert (LABEL_P (label));
3298
3299 mn10300_insert_setlb_lcc (label, branch);
3300 }
3301 }
3302
3303 if (dump_file && reason != NULL)
3304 fprintf (dump_file, "Loop starting with insn %d is not suitable because %s\n",
3305 INSN_UID (BB_HEAD (loop->header)),
3306 reason);
3307 }
3308
3309 loop_optimizer_finalize ();
3310
3311 df_finish_pass (false);
3312
3313 DUMP ("SETLB scan complete", NULL_RTX);
3314 }
3315
3316 static void
3317 mn10300_reorg (void)
3318 {
3319 /* These are optimizations, so only run them if optimizing. */
3320 if (TARGET_AM33 && (optimize > 0 || optimize_size))
3321 {
3322 if (TARGET_ALLOW_SETLB)
3323 mn10300_scan_for_setlb_lcc ();
3324
3325 if (TARGET_ALLOW_LIW)
3326 mn10300_bundle_liw ();
3327 }
3328 }
3329 \f
3330 /* Initialize the GCC target structure. */
3331
3332 #undef TARGET_MACHINE_DEPENDENT_REORG
3333 #define TARGET_MACHINE_DEPENDENT_REORG mn10300_reorg
3334
3335 #undef TARGET_ASM_ALIGNED_HI_OP
3336 #define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
3337
3338 #undef TARGET_LEGITIMIZE_ADDRESS
3339 #define TARGET_LEGITIMIZE_ADDRESS mn10300_legitimize_address
3340
3341 #undef TARGET_ADDRESS_COST
3342 #define TARGET_ADDRESS_COST mn10300_address_cost
3343 #undef TARGET_REGISTER_MOVE_COST
3344 #define TARGET_REGISTER_MOVE_COST mn10300_register_move_cost
3345 #undef TARGET_MEMORY_MOVE_COST
3346 #define TARGET_MEMORY_MOVE_COST mn10300_memory_move_cost
3347 #undef TARGET_RTX_COSTS
3348 #define TARGET_RTX_COSTS mn10300_rtx_costs
3349
3350 #undef TARGET_ASM_FILE_START
3351 #define TARGET_ASM_FILE_START mn10300_file_start
3352 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
3353 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
3354
3355 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
3356 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA mn10300_asm_output_addr_const_extra
3357
3358 #undef TARGET_OPTION_OVERRIDE
3359 #define TARGET_OPTION_OVERRIDE mn10300_option_override
3360
3361 #undef TARGET_ENCODE_SECTION_INFO
3362 #define TARGET_ENCODE_SECTION_INFO mn10300_encode_section_info
3363
3364 #undef TARGET_PROMOTE_PROTOTYPES
3365 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
3366 #undef TARGET_RETURN_IN_MEMORY
3367 #define TARGET_RETURN_IN_MEMORY mn10300_return_in_memory
3368 #undef TARGET_PASS_BY_REFERENCE
3369 #define TARGET_PASS_BY_REFERENCE mn10300_pass_by_reference
3370 #undef TARGET_CALLEE_COPIES
3371 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
3372 #undef TARGET_ARG_PARTIAL_BYTES
3373 #define TARGET_ARG_PARTIAL_BYTES mn10300_arg_partial_bytes
3374 #undef TARGET_FUNCTION_ARG
3375 #define TARGET_FUNCTION_ARG mn10300_function_arg
3376 #undef TARGET_FUNCTION_ARG_ADVANCE
3377 #define TARGET_FUNCTION_ARG_ADVANCE mn10300_function_arg_advance
3378
3379 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
3380 #define TARGET_EXPAND_BUILTIN_SAVEREGS mn10300_builtin_saveregs
3381 #undef TARGET_EXPAND_BUILTIN_VA_START
3382 #define TARGET_EXPAND_BUILTIN_VA_START mn10300_va_start
3383
3384 #undef TARGET_CASE_VALUES_THRESHOLD
3385 #define TARGET_CASE_VALUES_THRESHOLD mn10300_case_values_threshold
3386
3387 #undef TARGET_LEGITIMATE_ADDRESS_P
3388 #define TARGET_LEGITIMATE_ADDRESS_P mn10300_legitimate_address_p
3389 #undef TARGET_DELEGITIMIZE_ADDRESS
3390 #define TARGET_DELEGITIMIZE_ADDRESS mn10300_delegitimize_address
3391 #undef TARGET_LEGITIMATE_CONSTANT_P
3392 #define TARGET_LEGITIMATE_CONSTANT_P mn10300_legitimate_constant_p
3393
3394 #undef TARGET_PREFERRED_RELOAD_CLASS
3395 #define TARGET_PREFERRED_RELOAD_CLASS mn10300_preferred_reload_class
3396 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
3397 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS \
3398 mn10300_preferred_output_reload_class
3399 #undef TARGET_SECONDARY_RELOAD
3400 #define TARGET_SECONDARY_RELOAD mn10300_secondary_reload
3401
3402 #undef TARGET_TRAMPOLINE_INIT
3403 #define TARGET_TRAMPOLINE_INIT mn10300_trampoline_init
3404
3405 #undef TARGET_FUNCTION_VALUE
3406 #define TARGET_FUNCTION_VALUE mn10300_function_value
3407 #undef TARGET_LIBCALL_VALUE
3408 #define TARGET_LIBCALL_VALUE mn10300_libcall_value
3409
3410 #undef TARGET_ASM_OUTPUT_MI_THUNK
3411 #define TARGET_ASM_OUTPUT_MI_THUNK mn10300_asm_output_mi_thunk
3412 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
3413 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK mn10300_can_output_mi_thunk
3414
3415 #undef TARGET_SCHED_ADJUST_COST
3416 #define TARGET_SCHED_ADJUST_COST mn10300_adjust_sched_cost
3417
3418 #undef TARGET_CONDITIONAL_REGISTER_USAGE
3419 #define TARGET_CONDITIONAL_REGISTER_USAGE mn10300_conditional_register_usage
3420
3421 #undef TARGET_MD_ASM_CLOBBERS
3422 #define TARGET_MD_ASM_CLOBBERS mn10300_md_asm_clobbers
3423
3424 #undef TARGET_FLAGS_REGNUM
3425 #define TARGET_FLAGS_REGNUM CC_REG
3426
3427 struct gcc_target targetm = TARGET_INITIALIZER;