]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/mn10300/mn10300.c
Move MEMMODEL_* from coretypes.h to memmodel.h
[thirdparty/gcc.git] / gcc / config / mn10300 / mn10300.c
1 /* Subroutines for insn-output.c for Matsushita MN10300 series
2 Copyright (C) 1996-2016 Free Software Foundation, Inc.
3 Contributed by Jeff Law (law@cygnus.com).
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "cfghooks.h"
29 #include "cfgloop.h"
30 #include "df.h"
31 #include "memmodel.h"
32 #include "tm_p.h"
33 #include "optabs.h"
34 #include "regs.h"
35 #include "emit-rtl.h"
36 #include "recog.h"
37 #include "diagnostic-core.h"
38 #include "alias.h"
39 #include "stor-layout.h"
40 #include "varasm.h"
41 #include "calls.h"
42 #include "output.h"
43 #include "insn-attr.h"
44 #include "reload.h"
45 #include "explow.h"
46 #include "expr.h"
47 #include "tm-constrs.h"
48 #include "cfgrtl.h"
49 #include "dumpfile.h"
50 #include "builtins.h"
51
52 /* This file should be included last. */
53 #include "target-def.h"
54
55 /* This is used in the am33_2.0-linux-gnu port, in which global symbol
56 names are not prefixed by underscores, to tell whether to prefix a
57 label with a plus sign or not, so that the assembler can tell
58 symbol names from register names. */
59 int mn10300_protect_label;
60
61 /* Selected processor type for tuning. */
62 enum processor_type mn10300_tune_cpu = PROCESSOR_DEFAULT;
63
64 #define CC_FLAG_Z 1
65 #define CC_FLAG_N 2
66 #define CC_FLAG_C 4
67 #define CC_FLAG_V 8
68
69 static int cc_flags_for_mode(machine_mode);
70 static int cc_flags_for_code(enum rtx_code);
71 \f
72 /* Implement TARGET_OPTION_OVERRIDE. */
73 static void
74 mn10300_option_override (void)
75 {
76 if (TARGET_AM33)
77 target_flags &= ~MASK_MULT_BUG;
78 else
79 {
80 /* Disable scheduling for the MN10300 as we do
81 not have timing information available for it. */
82 flag_schedule_insns = 0;
83 flag_schedule_insns_after_reload = 0;
84
85 /* Force enable splitting of wide types, as otherwise it is trivial
86 to run out of registers. Indeed, this works so well that register
87 allocation problems are now more common *without* optimization,
88 when this flag is not enabled by default. */
89 flag_split_wide_types = 1;
90 }
91
92 if (mn10300_tune_string)
93 {
94 if (strcasecmp (mn10300_tune_string, "mn10300") == 0)
95 mn10300_tune_cpu = PROCESSOR_MN10300;
96 else if (strcasecmp (mn10300_tune_string, "am33") == 0)
97 mn10300_tune_cpu = PROCESSOR_AM33;
98 else if (strcasecmp (mn10300_tune_string, "am33-2") == 0)
99 mn10300_tune_cpu = PROCESSOR_AM33_2;
100 else if (strcasecmp (mn10300_tune_string, "am34") == 0)
101 mn10300_tune_cpu = PROCESSOR_AM34;
102 else
103 error ("-mtune= expects mn10300, am33, am33-2, or am34");
104 }
105 }
106
107 static void
108 mn10300_file_start (void)
109 {
110 default_file_start ();
111
112 if (TARGET_AM33_2)
113 fprintf (asm_out_file, "\t.am33_2\n");
114 else if (TARGET_AM33)
115 fprintf (asm_out_file, "\t.am33\n");
116 }
117 \f
118 /* Note: This list must match the liw_op attribute in mn10300.md. */
119
120 static const char *liw_op_names[] =
121 {
122 "add", "cmp", "sub", "mov",
123 "and", "or", "xor",
124 "asr", "lsr", "asl",
125 "none", "max"
126 };
127
128 /* Print operand X using operand code CODE to assembly language output file
129 FILE. */
130
131 void
132 mn10300_print_operand (FILE *file, rtx x, int code)
133 {
134 switch (code)
135 {
136 case 'W':
137 {
138 unsigned int liw_op = UINTVAL (x);
139
140 gcc_assert (TARGET_ALLOW_LIW);
141 gcc_assert (liw_op < LIW_OP_MAX);
142 fputs (liw_op_names[liw_op], file);
143 break;
144 }
145
146 case 'b':
147 case 'B':
148 {
149 enum rtx_code cmp = GET_CODE (x);
150 machine_mode mode = GET_MODE (XEXP (x, 0));
151 const char *str;
152 int have_flags;
153
154 if (code == 'B')
155 cmp = reverse_condition (cmp);
156 have_flags = cc_flags_for_mode (mode);
157
158 switch (cmp)
159 {
160 case NE:
161 str = "ne";
162 break;
163 case EQ:
164 str = "eq";
165 break;
166 case GE:
167 /* bge is smaller than bnc. */
168 str = (have_flags & CC_FLAG_V ? "ge" : "nc");
169 break;
170 case LT:
171 str = (have_flags & CC_FLAG_V ? "lt" : "ns");
172 break;
173 case GT:
174 str = "gt";
175 break;
176 case LE:
177 str = "le";
178 break;
179 case GEU:
180 str = "cc";
181 break;
182 case GTU:
183 str = "hi";
184 break;
185 case LEU:
186 str = "ls";
187 break;
188 case LTU:
189 str = "cs";
190 break;
191 case ORDERED:
192 str = "lge";
193 break;
194 case UNORDERED:
195 str = "uo";
196 break;
197 case LTGT:
198 str = "lg";
199 break;
200 case UNEQ:
201 str = "ue";
202 break;
203 case UNGE:
204 str = "uge";
205 break;
206 case UNGT:
207 str = "ug";
208 break;
209 case UNLE:
210 str = "ule";
211 break;
212 case UNLT:
213 str = "ul";
214 break;
215 default:
216 gcc_unreachable ();
217 }
218
219 gcc_checking_assert ((cc_flags_for_code (cmp) & ~have_flags) == 0);
220 fputs (str, file);
221 }
222 break;
223
224 case 'C':
225 /* This is used for the operand to a call instruction;
226 if it's a REG, enclose it in parens, else output
227 the operand normally. */
228 if (REG_P (x))
229 {
230 fputc ('(', file);
231 mn10300_print_operand (file, x, 0);
232 fputc (')', file);
233 }
234 else
235 mn10300_print_operand (file, x, 0);
236 break;
237
238 case 'D':
239 switch (GET_CODE (x))
240 {
241 case MEM:
242 fputc ('(', file);
243 output_address (GET_MODE (x), XEXP (x, 0));
244 fputc (')', file);
245 break;
246
247 case REG:
248 fprintf (file, "fd%d", REGNO (x) - 18);
249 break;
250
251 default:
252 gcc_unreachable ();
253 }
254 break;
255
256 /* These are the least significant word in a 64bit value. */
257 case 'L':
258 switch (GET_CODE (x))
259 {
260 case MEM:
261 fputc ('(', file);
262 output_address (GET_MODE (x), XEXP (x, 0));
263 fputc (')', file);
264 break;
265
266 case REG:
267 fprintf (file, "%s", reg_names[REGNO (x)]);
268 break;
269
270 case SUBREG:
271 fprintf (file, "%s", reg_names[subreg_regno (x)]);
272 break;
273
274 case CONST_DOUBLE:
275 {
276 long val[2];
277
278 switch (GET_MODE (x))
279 {
280 case DFmode:
281 REAL_VALUE_TO_TARGET_DOUBLE
282 (*CONST_DOUBLE_REAL_VALUE (x), val);
283 fprintf (file, "0x%lx", val[0]);
284 break;;
285 case SFmode:
286 REAL_VALUE_TO_TARGET_SINGLE
287 (*CONST_DOUBLE_REAL_VALUE (x), val[0]);
288 fprintf (file, "0x%lx", val[0]);
289 break;;
290 case VOIDmode:
291 case DImode:
292 mn10300_print_operand_address (file,
293 GEN_INT (CONST_DOUBLE_LOW (x)));
294 break;
295 default:
296 break;
297 }
298 break;
299 }
300
301 case CONST_INT:
302 {
303 rtx low, high;
304 split_double (x, &low, &high);
305 fprintf (file, "%ld", (long)INTVAL (low));
306 break;
307 }
308
309 default:
310 gcc_unreachable ();
311 }
312 break;
313
314 /* Similarly, but for the most significant word. */
315 case 'H':
316 switch (GET_CODE (x))
317 {
318 case MEM:
319 fputc ('(', file);
320 x = adjust_address (x, SImode, 4);
321 output_address (GET_MODE (x), XEXP (x, 0));
322 fputc (')', file);
323 break;
324
325 case REG:
326 fprintf (file, "%s", reg_names[REGNO (x) + 1]);
327 break;
328
329 case SUBREG:
330 fprintf (file, "%s", reg_names[subreg_regno (x) + 1]);
331 break;
332
333 case CONST_DOUBLE:
334 {
335 long val[2];
336
337 switch (GET_MODE (x))
338 {
339 case DFmode:
340 REAL_VALUE_TO_TARGET_DOUBLE
341 (*CONST_DOUBLE_REAL_VALUE (x), val);
342 fprintf (file, "0x%lx", val[1]);
343 break;;
344 case SFmode:
345 gcc_unreachable ();
346 case VOIDmode:
347 case DImode:
348 mn10300_print_operand_address (file,
349 GEN_INT (CONST_DOUBLE_HIGH (x)));
350 break;
351 default:
352 break;
353 }
354 break;
355 }
356
357 case CONST_INT:
358 {
359 rtx low, high;
360 split_double (x, &low, &high);
361 fprintf (file, "%ld", (long)INTVAL (high));
362 break;
363 }
364
365 default:
366 gcc_unreachable ();
367 }
368 break;
369
370 case 'A':
371 fputc ('(', file);
372 if (REG_P (XEXP (x, 0)))
373 output_address (VOIDmode, gen_rtx_PLUS (SImode,
374 XEXP (x, 0), const0_rtx));
375 else
376 output_address (VOIDmode, XEXP (x, 0));
377 fputc (')', file);
378 break;
379
380 case 'N':
381 gcc_assert (INTVAL (x) >= -128 && INTVAL (x) <= 255);
382 fprintf (file, "%d", (int)((~INTVAL (x)) & 0xff));
383 break;
384
385 case 'U':
386 gcc_assert (INTVAL (x) >= -128 && INTVAL (x) <= 255);
387 fprintf (file, "%d", (int)(INTVAL (x) & 0xff));
388 break;
389
390 /* For shift counts. The hardware ignores the upper bits of
391 any immediate, but the assembler will flag an out of range
392 shift count as an error. So we mask off the high bits
393 of the immediate here. */
394 case 'S':
395 if (CONST_INT_P (x))
396 {
397 fprintf (file, "%d", (int)(INTVAL (x) & 0x1f));
398 break;
399 }
400 /* FALL THROUGH */
401
402 default:
403 switch (GET_CODE (x))
404 {
405 case MEM:
406 fputc ('(', file);
407 output_address (GET_MODE (x), XEXP (x, 0));
408 fputc (')', file);
409 break;
410
411 case PLUS:
412 output_address (VOIDmode, x);
413 break;
414
415 case REG:
416 fprintf (file, "%s", reg_names[REGNO (x)]);
417 break;
418
419 case SUBREG:
420 fprintf (file, "%s", reg_names[subreg_regno (x)]);
421 break;
422
423 /* This will only be single precision.... */
424 case CONST_DOUBLE:
425 {
426 unsigned long val;
427
428 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), val);
429 fprintf (file, "0x%lx", val);
430 break;
431 }
432
433 case CONST_INT:
434 case SYMBOL_REF:
435 case CONST:
436 case LABEL_REF:
437 case CODE_LABEL:
438 case UNSPEC:
439 mn10300_print_operand_address (file, x);
440 break;
441 default:
442 gcc_unreachable ();
443 }
444 break;
445 }
446 }
447
448 /* Output assembly language output for the address ADDR to FILE. */
449
450 void
451 mn10300_print_operand_address (FILE *file, rtx addr)
452 {
453 switch (GET_CODE (addr))
454 {
455 case POST_INC:
456 mn10300_print_operand (file, XEXP (addr, 0), 0);
457 fputc ('+', file);
458 break;
459
460 case POST_MODIFY:
461 mn10300_print_operand (file, XEXP (addr, 0), 0);
462 fputc ('+', file);
463 fputc (',', file);
464 mn10300_print_operand (file, XEXP (addr, 1), 0);
465 break;
466
467 case REG:
468 mn10300_print_operand (file, addr, 0);
469 break;
470 case PLUS:
471 {
472 rtx base = XEXP (addr, 0);
473 rtx index = XEXP (addr, 1);
474
475 if (REG_P (index) && !REG_OK_FOR_INDEX_P (index))
476 {
477 rtx x = base;
478 base = index;
479 index = x;
480
481 gcc_assert (REG_P (index) && REG_OK_FOR_INDEX_P (index));
482 }
483 gcc_assert (REG_OK_FOR_BASE_P (base));
484
485 mn10300_print_operand (file, index, 0);
486 fputc (',', file);
487 mn10300_print_operand (file, base, 0);
488 break;
489 }
490 case SYMBOL_REF:
491 output_addr_const (file, addr);
492 break;
493 default:
494 output_addr_const (file, addr);
495 break;
496 }
497 }
498
499 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA.
500
501 Used for PIC-specific UNSPECs. */
502
503 static bool
504 mn10300_asm_output_addr_const_extra (FILE *file, rtx x)
505 {
506 if (GET_CODE (x) == UNSPEC)
507 {
508 switch (XINT (x, 1))
509 {
510 case UNSPEC_PIC:
511 /* GLOBAL_OFFSET_TABLE or local symbols, no suffix. */
512 output_addr_const (file, XVECEXP (x, 0, 0));
513 break;
514 case UNSPEC_GOT:
515 output_addr_const (file, XVECEXP (x, 0, 0));
516 fputs ("@GOT", file);
517 break;
518 case UNSPEC_GOTOFF:
519 output_addr_const (file, XVECEXP (x, 0, 0));
520 fputs ("@GOTOFF", file);
521 break;
522 case UNSPEC_PLT:
523 output_addr_const (file, XVECEXP (x, 0, 0));
524 fputs ("@PLT", file);
525 break;
526 case UNSPEC_GOTSYM_OFF:
527 assemble_name (file, GOT_SYMBOL_NAME);
528 fputs ("-(", file);
529 output_addr_const (file, XVECEXP (x, 0, 0));
530 fputs ("-.)", file);
531 break;
532 default:
533 return false;
534 }
535 return true;
536 }
537 else
538 return false;
539 }
540
541 /* Count the number of FP registers that have to be saved. */
542 static int
543 fp_regs_to_save (void)
544 {
545 int i, n = 0;
546
547 if (! TARGET_AM33_2)
548 return 0;
549
550 for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
551 if (df_regs_ever_live_p (i) && ! call_really_used_regs[i])
552 ++n;
553
554 return n;
555 }
556
557 /* Print a set of registers in the format required by "movm" and "ret".
558 Register K is saved if bit K of MASK is set. The data and address
559 registers can be stored individually, but the extended registers cannot.
560 We assume that the mask already takes that into account. For instance,
561 bits 14 to 17 must have the same value. */
562
563 void
564 mn10300_print_reg_list (FILE *file, int mask)
565 {
566 int need_comma;
567 int i;
568
569 need_comma = 0;
570 fputc ('[', file);
571
572 for (i = 0; i < FIRST_EXTENDED_REGNUM; i++)
573 if ((mask & (1 << i)) != 0)
574 {
575 if (need_comma)
576 fputc (',', file);
577 fputs (reg_names [i], file);
578 need_comma = 1;
579 }
580
581 if ((mask & 0x3c000) != 0)
582 {
583 gcc_assert ((mask & 0x3c000) == 0x3c000);
584 if (need_comma)
585 fputc (',', file);
586 fputs ("exreg1", file);
587 need_comma = 1;
588 }
589
590 fputc (']', file);
591 }
592
593 /* If the MDR register is never clobbered, we can use the RETF instruction
594 which takes the address from the MDR register. This is 3 cycles faster
595 than having to load the address from the stack. */
596
597 bool
598 mn10300_can_use_retf_insn (void)
599 {
600 /* Don't bother if we're not optimizing. In this case we won't
601 have proper access to df_regs_ever_live_p. */
602 if (!optimize)
603 return false;
604
605 /* EH returns alter the saved return address; MDR is not current. */
606 if (crtl->calls_eh_return)
607 return false;
608
609 /* Obviously not if MDR is ever clobbered. */
610 if (df_regs_ever_live_p (MDR_REG))
611 return false;
612
613 /* ??? Careful not to use this during expand_epilogue etc. */
614 gcc_assert (!in_sequence_p ());
615 return leaf_function_p ();
616 }
617
618 bool
619 mn10300_can_use_rets_insn (void)
620 {
621 return !mn10300_initial_offset (ARG_POINTER_REGNUM, STACK_POINTER_REGNUM);
622 }
623
624 /* Returns the set of live, callee-saved registers as a bitmask. The
625 callee-saved extended registers cannot be stored individually, so
626 all of them will be included in the mask if any one of them is used.
627 Also returns the number of bytes in the registers in the mask if
628 BYTES_SAVED is not NULL. */
629
630 unsigned int
631 mn10300_get_live_callee_saved_regs (unsigned int * bytes_saved)
632 {
633 int mask;
634 int i;
635 unsigned int count;
636
637 count = mask = 0;
638 for (i = 0; i <= LAST_EXTENDED_REGNUM; i++)
639 if (df_regs_ever_live_p (i) && ! call_really_used_regs[i])
640 {
641 mask |= (1 << i);
642 ++ count;
643 }
644
645 if ((mask & 0x3c000) != 0)
646 {
647 for (i = 0x04000; i < 0x40000; i <<= 1)
648 if ((mask & i) == 0)
649 ++ count;
650
651 mask |= 0x3c000;
652 }
653
654 if (bytes_saved)
655 * bytes_saved = count * UNITS_PER_WORD;
656
657 return mask;
658 }
659
660 static rtx
661 F (rtx r)
662 {
663 RTX_FRAME_RELATED_P (r) = 1;
664 return r;
665 }
666
667 /* Generate an instruction that pushes several registers onto the stack.
668 Register K will be saved if bit K in MASK is set. The function does
669 nothing if MASK is zero.
670
671 To be compatible with the "movm" instruction, the lowest-numbered
672 register must be stored in the lowest slot. If MASK is the set
673 { R1,...,RN }, where R1...RN are ordered least first, the generated
674 instruction will have the form:
675
676 (parallel
677 (set (reg:SI 9) (plus:SI (reg:SI 9) (const_int -N*4)))
678 (set (mem:SI (plus:SI (reg:SI 9)
679 (const_int -1*4)))
680 (reg:SI RN))
681 ...
682 (set (mem:SI (plus:SI (reg:SI 9)
683 (const_int -N*4)))
684 (reg:SI R1))) */
685
686 static void
687 mn10300_gen_multiple_store (unsigned int mask)
688 {
689 /* The order in which registers are stored, from SP-4 through SP-N*4. */
690 static const unsigned int store_order[8] = {
691 /* e2, e3: never saved */
692 FIRST_EXTENDED_REGNUM + 4,
693 FIRST_EXTENDED_REGNUM + 5,
694 FIRST_EXTENDED_REGNUM + 6,
695 FIRST_EXTENDED_REGNUM + 7,
696 /* e0, e1, mdrq, mcrh, mcrl, mcvf: never saved. */
697 FIRST_DATA_REGNUM + 2,
698 FIRST_DATA_REGNUM + 3,
699 FIRST_ADDRESS_REGNUM + 2,
700 FIRST_ADDRESS_REGNUM + 3,
701 /* d0, d1, a0, a1, mdr, lir, lar: never saved. */
702 };
703
704 rtx x, elts[9];
705 unsigned int i;
706 int count;
707
708 if (mask == 0)
709 return;
710
711 for (i = count = 0; i < ARRAY_SIZE(store_order); ++i)
712 {
713 unsigned regno = store_order[i];
714
715 if (((mask >> regno) & 1) == 0)
716 continue;
717
718 ++count;
719 x = plus_constant (Pmode, stack_pointer_rtx, count * -4);
720 x = gen_frame_mem (SImode, x);
721 x = gen_rtx_SET (x, gen_rtx_REG (SImode, regno));
722 elts[count] = F(x);
723
724 /* Remove the register from the mask so that... */
725 mask &= ~(1u << regno);
726 }
727
728 /* ... we can make sure that we didn't try to use a register
729 not listed in the store order. */
730 gcc_assert (mask == 0);
731
732 /* Create the instruction that updates the stack pointer. */
733 x = plus_constant (Pmode, stack_pointer_rtx, count * -4);
734 x = gen_rtx_SET (stack_pointer_rtx, x);
735 elts[0] = F(x);
736
737 /* We need one PARALLEL element to update the stack pointer and
738 an additional element for each register that is stored. */
739 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (count + 1, elts));
740 F (emit_insn (x));
741 }
742
743 static inline unsigned int
744 popcount (unsigned int mask)
745 {
746 unsigned int count = 0;
747
748 while (mask)
749 {
750 ++ count;
751 mask &= ~ (mask & - mask);
752 }
753 return count;
754 }
755
756 void
757 mn10300_expand_prologue (void)
758 {
759 HOST_WIDE_INT size = mn10300_frame_size ();
760 unsigned int mask;
761
762 mask = mn10300_get_live_callee_saved_regs (NULL);
763 /* If we use any of the callee-saved registers, save them now. */
764 mn10300_gen_multiple_store (mask);
765
766 if (flag_stack_usage_info)
767 current_function_static_stack_size = size + popcount (mask) * 4;
768
769 if (TARGET_AM33_2 && fp_regs_to_save ())
770 {
771 int num_regs_to_save = fp_regs_to_save (), i;
772 HOST_WIDE_INT xsize;
773 enum
774 {
775 save_sp_merge,
776 save_sp_no_merge,
777 save_sp_partial_merge,
778 save_a0_merge,
779 save_a0_no_merge
780 } strategy;
781 unsigned int strategy_size = (unsigned)-1, this_strategy_size;
782 rtx reg;
783
784 if (flag_stack_usage_info)
785 current_function_static_stack_size += num_regs_to_save * 4;
786
787 /* We have several different strategies to save FP registers.
788 We can store them using SP offsets, which is beneficial if
789 there are just a few registers to save, or we can use `a0' in
790 post-increment mode (`a0' is the only call-clobbered address
791 register that is never used to pass information to a
792 function). Furthermore, if we don't need a frame pointer, we
793 can merge the two SP adds into a single one, but this isn't
794 always beneficial; sometimes we can just split the two adds
795 so that we don't exceed a 16-bit constant size. The code
796 below will select which strategy to use, so as to generate
797 smallest code. Ties are broken in favor or shorter sequences
798 (in terms of number of instructions). */
799
800 #define SIZE_ADD_AX(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
801 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 2)
802 #define SIZE_ADD_SP(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
803 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 3)
804
805 /* We add 0 * (S) in two places to promote to the type of S,
806 so that all arms of the conditional have the same type. */
807 #define SIZE_FMOV_LIMIT(S,N,L,SIZE1,SIZE2,ELSE) \
808 (((S) >= (L)) ? 0 * (S) + (SIZE1) * (N) \
809 : ((S) + 4 * (N) >= (L)) ? (((L) - (S)) / 4 * (SIZE2) \
810 + ((S) + 4 * (N) - (L)) / 4 * (SIZE1)) \
811 : 0 * (S) + (ELSE))
812 #define SIZE_FMOV_SP_(S,N) \
813 (SIZE_FMOV_LIMIT ((S), (N), (1 << 24), 7, 6, \
814 SIZE_FMOV_LIMIT ((S), (N), (1 << 8), 6, 4, \
815 (S) ? 4 * (N) : 3 + 4 * ((N) - 1))))
816 #define SIZE_FMOV_SP(S,N) (SIZE_FMOV_SP_ ((unsigned HOST_WIDE_INT)(S), (N)))
817
818 /* Consider alternative save_sp_merge only if we don't need the
819 frame pointer and size is nonzero. */
820 if (! frame_pointer_needed && size)
821 {
822 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
823 this_strategy_size = SIZE_ADD_SP (-(size + 4 * num_regs_to_save));
824 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
825 this_strategy_size += SIZE_FMOV_SP (size, num_regs_to_save);
826
827 if (this_strategy_size < strategy_size)
828 {
829 strategy = save_sp_merge;
830 strategy_size = this_strategy_size;
831 }
832 }
833
834 /* Consider alternative save_sp_no_merge unconditionally. */
835 /* Insn: add -4 * num_regs_to_save, sp. */
836 this_strategy_size = SIZE_ADD_SP (-4 * num_regs_to_save);
837 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
838 this_strategy_size += SIZE_FMOV_SP (0, num_regs_to_save);
839 if (size)
840 {
841 /* Insn: add -size, sp. */
842 this_strategy_size += SIZE_ADD_SP (-size);
843 }
844
845 if (this_strategy_size < strategy_size)
846 {
847 strategy = save_sp_no_merge;
848 strategy_size = this_strategy_size;
849 }
850
851 /* Consider alternative save_sp_partial_merge only if we don't
852 need a frame pointer and size is reasonably large. */
853 if (! frame_pointer_needed && size + 4 * num_regs_to_save > 128)
854 {
855 /* Insn: add -128, sp. */
856 this_strategy_size = SIZE_ADD_SP (-128);
857 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
858 this_strategy_size += SIZE_FMOV_SP (128 - 4 * num_regs_to_save,
859 num_regs_to_save);
860 if (size)
861 {
862 /* Insn: add 128-size, sp. */
863 this_strategy_size += SIZE_ADD_SP (128 - size);
864 }
865
866 if (this_strategy_size < strategy_size)
867 {
868 strategy = save_sp_partial_merge;
869 strategy_size = this_strategy_size;
870 }
871 }
872
873 /* Consider alternative save_a0_merge only if we don't need a
874 frame pointer, size is nonzero and the user hasn't
875 changed the calling conventions of a0. */
876 if (! frame_pointer_needed && size
877 && call_really_used_regs [FIRST_ADDRESS_REGNUM]
878 && ! fixed_regs[FIRST_ADDRESS_REGNUM])
879 {
880 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
881 this_strategy_size = SIZE_ADD_SP (-(size + 4 * num_regs_to_save));
882 /* Insn: mov sp, a0. */
883 this_strategy_size++;
884 if (size)
885 {
886 /* Insn: add size, a0. */
887 this_strategy_size += SIZE_ADD_AX (size);
888 }
889 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
890 this_strategy_size += 3 * num_regs_to_save;
891
892 if (this_strategy_size < strategy_size)
893 {
894 strategy = save_a0_merge;
895 strategy_size = this_strategy_size;
896 }
897 }
898
899 /* Consider alternative save_a0_no_merge if the user hasn't
900 changed the calling conventions of a0. */
901 if (call_really_used_regs [FIRST_ADDRESS_REGNUM]
902 && ! fixed_regs[FIRST_ADDRESS_REGNUM])
903 {
904 /* Insn: add -4 * num_regs_to_save, sp. */
905 this_strategy_size = SIZE_ADD_SP (-4 * num_regs_to_save);
906 /* Insn: mov sp, a0. */
907 this_strategy_size++;
908 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
909 this_strategy_size += 3 * num_regs_to_save;
910 if (size)
911 {
912 /* Insn: add -size, sp. */
913 this_strategy_size += SIZE_ADD_SP (-size);
914 }
915
916 if (this_strategy_size < strategy_size)
917 {
918 strategy = save_a0_no_merge;
919 strategy_size = this_strategy_size;
920 }
921 }
922
923 /* Emit the initial SP add, common to all strategies. */
924 switch (strategy)
925 {
926 case save_sp_no_merge:
927 case save_a0_no_merge:
928 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
929 stack_pointer_rtx,
930 GEN_INT (-4 * num_regs_to_save))));
931 xsize = 0;
932 break;
933
934 case save_sp_partial_merge:
935 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
936 stack_pointer_rtx,
937 GEN_INT (-128))));
938 xsize = 128 - 4 * num_regs_to_save;
939 size -= xsize;
940 break;
941
942 case save_sp_merge:
943 case save_a0_merge:
944 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
945 stack_pointer_rtx,
946 GEN_INT (-(size + 4 * num_regs_to_save)))));
947 /* We'll have to adjust FP register saves according to the
948 frame size. */
949 xsize = size;
950 /* Since we've already created the stack frame, don't do it
951 again at the end of the function. */
952 size = 0;
953 break;
954
955 default:
956 gcc_unreachable ();
957 }
958
959 /* Now prepare register a0, if we have decided to use it. */
960 switch (strategy)
961 {
962 case save_sp_merge:
963 case save_sp_no_merge:
964 case save_sp_partial_merge:
965 reg = 0;
966 break;
967
968 case save_a0_merge:
969 case save_a0_no_merge:
970 reg = gen_rtx_REG (SImode, FIRST_ADDRESS_REGNUM);
971 F (emit_insn (gen_movsi (reg, stack_pointer_rtx)));
972 if (xsize)
973 F (emit_insn (gen_addsi3 (reg, reg, GEN_INT (xsize))));
974 reg = gen_rtx_POST_INC (SImode, reg);
975 break;
976
977 default:
978 gcc_unreachable ();
979 }
980
981 /* Now actually save the FP registers. */
982 for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
983 if (df_regs_ever_live_p (i) && ! call_really_used_regs [i])
984 {
985 rtx addr;
986
987 if (reg)
988 addr = reg;
989 else
990 {
991 /* If we aren't using `a0', use an SP offset. */
992 if (xsize)
993 {
994 addr = gen_rtx_PLUS (SImode,
995 stack_pointer_rtx,
996 GEN_INT (xsize));
997 }
998 else
999 addr = stack_pointer_rtx;
1000
1001 xsize += 4;
1002 }
1003
1004 F (emit_insn (gen_movsf (gen_rtx_MEM (SFmode, addr),
1005 gen_rtx_REG (SFmode, i))));
1006 }
1007 }
1008
1009 /* Now put the frame pointer into the frame pointer register. */
1010 if (frame_pointer_needed)
1011 F (emit_move_insn (frame_pointer_rtx, stack_pointer_rtx));
1012
1013 /* Allocate stack for this frame. */
1014 if (size)
1015 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
1016 stack_pointer_rtx,
1017 GEN_INT (-size))));
1018
1019 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
1020 emit_insn (gen_load_pic ());
1021 }
1022
1023 void
1024 mn10300_expand_epilogue (void)
1025 {
1026 HOST_WIDE_INT size = mn10300_frame_size ();
1027 unsigned int reg_save_bytes;
1028
1029 mn10300_get_live_callee_saved_regs (& reg_save_bytes);
1030
1031 if (TARGET_AM33_2 && fp_regs_to_save ())
1032 {
1033 int num_regs_to_save = fp_regs_to_save (), i;
1034 rtx reg = 0;
1035
1036 /* We have several options to restore FP registers. We could
1037 load them from SP offsets, but, if there are enough FP
1038 registers to restore, we win if we use a post-increment
1039 addressing mode. */
1040
1041 /* If we have a frame pointer, it's the best option, because we
1042 already know it has the value we want. */
1043 if (frame_pointer_needed)
1044 reg = gen_rtx_REG (SImode, FRAME_POINTER_REGNUM);
1045 /* Otherwise, we may use `a1', since it's call-clobbered and
1046 it's never used for return values. But only do so if it's
1047 smaller than using SP offsets. */
1048 else
1049 {
1050 enum { restore_sp_post_adjust,
1051 restore_sp_pre_adjust,
1052 restore_sp_partial_adjust,
1053 restore_a1 } strategy;
1054 unsigned int this_strategy_size, strategy_size = (unsigned)-1;
1055
1056 /* Consider using sp offsets before adjusting sp. */
1057 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1058 this_strategy_size = SIZE_FMOV_SP (size, num_regs_to_save);
1059 /* If size is too large, we'll have to adjust SP with an
1060 add. */
1061 if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
1062 {
1063 /* Insn: add size + 4 * num_regs_to_save, sp. */
1064 this_strategy_size += SIZE_ADD_SP (size + 4 * num_regs_to_save);
1065 }
1066 /* If we don't have to restore any non-FP registers,
1067 we'll be able to save one byte by using rets. */
1068 if (! reg_save_bytes)
1069 this_strategy_size--;
1070
1071 if (this_strategy_size < strategy_size)
1072 {
1073 strategy = restore_sp_post_adjust;
1074 strategy_size = this_strategy_size;
1075 }
1076
1077 /* Consider using sp offsets after adjusting sp. */
1078 /* Insn: add size, sp. */
1079 this_strategy_size = SIZE_ADD_SP (size);
1080 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1081 this_strategy_size += SIZE_FMOV_SP (0, num_regs_to_save);
1082 /* We're going to use ret to release the FP registers
1083 save area, so, no savings. */
1084
1085 if (this_strategy_size < strategy_size)
1086 {
1087 strategy = restore_sp_pre_adjust;
1088 strategy_size = this_strategy_size;
1089 }
1090
1091 /* Consider using sp offsets after partially adjusting sp.
1092 When size is close to 32Kb, we may be able to adjust SP
1093 with an imm16 add instruction while still using fmov
1094 (d8,sp). */
1095 if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
1096 {
1097 /* Insn: add size + 4 * num_regs_to_save
1098 + reg_save_bytes - 252,sp. */
1099 this_strategy_size = SIZE_ADD_SP (size + 4 * num_regs_to_save
1100 + (int) reg_save_bytes - 252);
1101 /* Insn: fmov (##,sp),fs#, fo each fs# to be restored. */
1102 this_strategy_size += SIZE_FMOV_SP (252 - reg_save_bytes
1103 - 4 * num_regs_to_save,
1104 num_regs_to_save);
1105 /* We're going to use ret to release the FP registers
1106 save area, so, no savings. */
1107
1108 if (this_strategy_size < strategy_size)
1109 {
1110 strategy = restore_sp_partial_adjust;
1111 strategy_size = this_strategy_size;
1112 }
1113 }
1114
1115 /* Consider using a1 in post-increment mode, as long as the
1116 user hasn't changed the calling conventions of a1. */
1117 if (call_really_used_regs [FIRST_ADDRESS_REGNUM + 1]
1118 && ! fixed_regs[FIRST_ADDRESS_REGNUM+1])
1119 {
1120 /* Insn: mov sp,a1. */
1121 this_strategy_size = 1;
1122 if (size)
1123 {
1124 /* Insn: add size,a1. */
1125 this_strategy_size += SIZE_ADD_AX (size);
1126 }
1127 /* Insn: fmov (a1+),fs#, for each fs# to be restored. */
1128 this_strategy_size += 3 * num_regs_to_save;
1129 /* If size is large enough, we may be able to save a
1130 couple of bytes. */
1131 if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
1132 {
1133 /* Insn: mov a1,sp. */
1134 this_strategy_size += 2;
1135 }
1136 /* If we don't have to restore any non-FP registers,
1137 we'll be able to save one byte by using rets. */
1138 if (! reg_save_bytes)
1139 this_strategy_size--;
1140
1141 if (this_strategy_size < strategy_size)
1142 {
1143 strategy = restore_a1;
1144 strategy_size = this_strategy_size;
1145 }
1146 }
1147
1148 switch (strategy)
1149 {
1150 case restore_sp_post_adjust:
1151 break;
1152
1153 case restore_sp_pre_adjust:
1154 emit_insn (gen_addsi3 (stack_pointer_rtx,
1155 stack_pointer_rtx,
1156 GEN_INT (size)));
1157 size = 0;
1158 break;
1159
1160 case restore_sp_partial_adjust:
1161 emit_insn (gen_addsi3 (stack_pointer_rtx,
1162 stack_pointer_rtx,
1163 GEN_INT (size + 4 * num_regs_to_save
1164 + reg_save_bytes - 252)));
1165 size = 252 - reg_save_bytes - 4 * num_regs_to_save;
1166 break;
1167
1168 case restore_a1:
1169 reg = gen_rtx_REG (SImode, FIRST_ADDRESS_REGNUM + 1);
1170 emit_insn (gen_movsi (reg, stack_pointer_rtx));
1171 if (size)
1172 emit_insn (gen_addsi3 (reg, reg, GEN_INT (size)));
1173 break;
1174
1175 default:
1176 gcc_unreachable ();
1177 }
1178 }
1179
1180 /* Adjust the selected register, if any, for post-increment. */
1181 if (reg)
1182 reg = gen_rtx_POST_INC (SImode, reg);
1183
1184 for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
1185 if (df_regs_ever_live_p (i) && ! call_really_used_regs [i])
1186 {
1187 rtx addr;
1188
1189 if (reg)
1190 addr = reg;
1191 else if (size)
1192 {
1193 /* If we aren't using a post-increment register, use an
1194 SP offset. */
1195 addr = gen_rtx_PLUS (SImode,
1196 stack_pointer_rtx,
1197 GEN_INT (size));
1198 }
1199 else
1200 addr = stack_pointer_rtx;
1201
1202 size += 4;
1203
1204 emit_insn (gen_movsf (gen_rtx_REG (SFmode, i),
1205 gen_rtx_MEM (SFmode, addr)));
1206 }
1207
1208 /* If we were using the restore_a1 strategy and the number of
1209 bytes to be released won't fit in the `ret' byte, copy `a1'
1210 to `sp', to avoid having to use `add' to adjust it. */
1211 if (! frame_pointer_needed && reg && size + reg_save_bytes > 255)
1212 {
1213 emit_move_insn (stack_pointer_rtx, XEXP (reg, 0));
1214 size = 0;
1215 }
1216 }
1217
1218 /* Maybe cut back the stack, except for the register save area.
1219
1220 If the frame pointer exists, then use the frame pointer to
1221 cut back the stack.
1222
1223 If the stack size + register save area is more than 255 bytes,
1224 then the stack must be cut back here since the size + register
1225 save size is too big for a ret/retf instruction.
1226
1227 Else leave it alone, it will be cut back as part of the
1228 ret/retf instruction, or there wasn't any stack to begin with.
1229
1230 Under no circumstances should the register save area be
1231 deallocated here, that would leave a window where an interrupt
1232 could occur and trash the register save area. */
1233 if (frame_pointer_needed)
1234 {
1235 emit_move_insn (stack_pointer_rtx, frame_pointer_rtx);
1236 size = 0;
1237 }
1238 else if (size + reg_save_bytes > 255)
1239 {
1240 emit_insn (gen_addsi3 (stack_pointer_rtx,
1241 stack_pointer_rtx,
1242 GEN_INT (size)));
1243 size = 0;
1244 }
1245
1246 /* Adjust the stack and restore callee-saved registers, if any. */
1247 if (mn10300_can_use_rets_insn ())
1248 emit_jump_insn (ret_rtx);
1249 else
1250 emit_jump_insn (gen_return_ret (GEN_INT (size + reg_save_bytes)));
1251 }
1252
1253 /* Recognize the PARALLEL rtx generated by mn10300_gen_multiple_store().
1254 This function is for MATCH_PARALLEL and so assumes OP is known to be
1255 parallel. If OP is a multiple store, return a mask indicating which
1256 registers it saves. Return 0 otherwise. */
1257
1258 unsigned int
1259 mn10300_store_multiple_regs (rtx op)
1260 {
1261 int count;
1262 int mask;
1263 int i;
1264 unsigned int last;
1265 rtx elt;
1266
1267 count = XVECLEN (op, 0);
1268 if (count < 2)
1269 return 0;
1270
1271 /* Check that first instruction has the form (set (sp) (plus A B)) */
1272 elt = XVECEXP (op, 0, 0);
1273 if (GET_CODE (elt) != SET
1274 || (! REG_P (SET_DEST (elt)))
1275 || REGNO (SET_DEST (elt)) != STACK_POINTER_REGNUM
1276 || GET_CODE (SET_SRC (elt)) != PLUS)
1277 return 0;
1278
1279 /* Check that A is the stack pointer and B is the expected stack size.
1280 For OP to match, each subsequent instruction should push a word onto
1281 the stack. We therefore expect the first instruction to create
1282 COUNT-1 stack slots. */
1283 elt = SET_SRC (elt);
1284 if ((! REG_P (XEXP (elt, 0)))
1285 || REGNO (XEXP (elt, 0)) != STACK_POINTER_REGNUM
1286 || (! CONST_INT_P (XEXP (elt, 1)))
1287 || INTVAL (XEXP (elt, 1)) != -(count - 1) * 4)
1288 return 0;
1289
1290 mask = 0;
1291 for (i = 1; i < count; i++)
1292 {
1293 /* Check that element i is a (set (mem M) R). */
1294 /* ??? Validate the register order a-la mn10300_gen_multiple_store.
1295 Remember: the ordering is *not* monotonic. */
1296 elt = XVECEXP (op, 0, i);
1297 if (GET_CODE (elt) != SET
1298 || (! MEM_P (SET_DEST (elt)))
1299 || (! REG_P (SET_SRC (elt))))
1300 return 0;
1301
1302 /* Remember which registers are to be saved. */
1303 last = REGNO (SET_SRC (elt));
1304 mask |= (1 << last);
1305
1306 /* Check that M has the form (plus (sp) (const_int -I*4)) */
1307 elt = XEXP (SET_DEST (elt), 0);
1308 if (GET_CODE (elt) != PLUS
1309 || (! REG_P (XEXP (elt, 0)))
1310 || REGNO (XEXP (elt, 0)) != STACK_POINTER_REGNUM
1311 || (! CONST_INT_P (XEXP (elt, 1)))
1312 || INTVAL (XEXP (elt, 1)) != -i * 4)
1313 return 0;
1314 }
1315
1316 /* All or none of the callee-saved extended registers must be in the set. */
1317 if ((mask & 0x3c000) != 0
1318 && (mask & 0x3c000) != 0x3c000)
1319 return 0;
1320
1321 return mask;
1322 }
1323
1324 /* Implement TARGET_PREFERRED_RELOAD_CLASS. */
1325
1326 static reg_class_t
1327 mn10300_preferred_reload_class (rtx x, reg_class_t rclass)
1328 {
1329 if (x == stack_pointer_rtx && rclass != SP_REGS)
1330 return (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
1331 else if (MEM_P (x)
1332 || (REG_P (x)
1333 && !HARD_REGISTER_P (x))
1334 || (GET_CODE (x) == SUBREG
1335 && REG_P (SUBREG_REG (x))
1336 && !HARD_REGISTER_P (SUBREG_REG (x))))
1337 return LIMIT_RELOAD_CLASS (GET_MODE (x), rclass);
1338 else
1339 return rclass;
1340 }
1341
1342 /* Implement TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
1343
1344 static reg_class_t
1345 mn10300_preferred_output_reload_class (rtx x, reg_class_t rclass)
1346 {
1347 if (x == stack_pointer_rtx && rclass != SP_REGS)
1348 return (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
1349 return rclass;
1350 }
1351
1352 /* Implement TARGET_SECONDARY_RELOAD. */
1353
1354 static reg_class_t
1355 mn10300_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
1356 machine_mode mode, secondary_reload_info *sri)
1357 {
1358 enum reg_class rclass = (enum reg_class) rclass_i;
1359 enum reg_class xclass = NO_REGS;
1360 unsigned int xregno = INVALID_REGNUM;
1361
1362 if (REG_P (x))
1363 {
1364 xregno = REGNO (x);
1365 if (xregno >= FIRST_PSEUDO_REGISTER)
1366 xregno = true_regnum (x);
1367 if (xregno != INVALID_REGNUM)
1368 xclass = REGNO_REG_CLASS (xregno);
1369 }
1370
1371 if (!TARGET_AM33)
1372 {
1373 /* Memory load/stores less than a full word wide can't have an
1374 address or stack pointer destination. They must use a data
1375 register as an intermediate register. */
1376 if (rclass != DATA_REGS
1377 && (mode == QImode || mode == HImode)
1378 && xclass == NO_REGS)
1379 return DATA_REGS;
1380
1381 /* We can only move SP to/from an address register. */
1382 if (in_p
1383 && rclass == SP_REGS
1384 && xclass != ADDRESS_REGS)
1385 return ADDRESS_REGS;
1386 if (!in_p
1387 && xclass == SP_REGS
1388 && rclass != ADDRESS_REGS
1389 && rclass != SP_OR_ADDRESS_REGS)
1390 return ADDRESS_REGS;
1391 }
1392
1393 /* We can't directly load sp + const_int into a register;
1394 we must use an address register as an scratch. */
1395 if (in_p
1396 && rclass != SP_REGS
1397 && rclass != SP_OR_ADDRESS_REGS
1398 && rclass != SP_OR_GENERAL_REGS
1399 && GET_CODE (x) == PLUS
1400 && (XEXP (x, 0) == stack_pointer_rtx
1401 || XEXP (x, 1) == stack_pointer_rtx))
1402 {
1403 sri->icode = CODE_FOR_reload_plus_sp_const;
1404 return NO_REGS;
1405 }
1406
1407 /* We can only move MDR to/from a data register. */
1408 if (rclass == MDR_REGS && xclass != DATA_REGS)
1409 return DATA_REGS;
1410 if (xclass == MDR_REGS && rclass != DATA_REGS)
1411 return DATA_REGS;
1412
1413 /* We can't load/store an FP register from a constant address. */
1414 if (TARGET_AM33_2
1415 && (rclass == FP_REGS || xclass == FP_REGS)
1416 && (xclass == NO_REGS || rclass == NO_REGS))
1417 {
1418 rtx addr = NULL;
1419
1420 if (xregno >= FIRST_PSEUDO_REGISTER && xregno != INVALID_REGNUM)
1421 {
1422 addr = reg_equiv_mem (xregno);
1423 if (addr)
1424 addr = XEXP (addr, 0);
1425 }
1426 else if (MEM_P (x))
1427 addr = XEXP (x, 0);
1428
1429 if (addr && CONSTANT_ADDRESS_P (addr))
1430 return GENERAL_REGS;
1431 }
1432 /* Otherwise assume no secondary reloads are needed. */
1433 return NO_REGS;
1434 }
1435
1436 int
1437 mn10300_frame_size (void)
1438 {
1439 /* size includes the fixed stack space needed for function calls. */
1440 int size = get_frame_size () + crtl->outgoing_args_size;
1441
1442 /* And space for the return pointer. */
1443 size += crtl->outgoing_args_size ? 4 : 0;
1444
1445 return size;
1446 }
1447
1448 int
1449 mn10300_initial_offset (int from, int to)
1450 {
1451 int diff = 0;
1452
1453 gcc_assert (from == ARG_POINTER_REGNUM || from == FRAME_POINTER_REGNUM);
1454 gcc_assert (to == FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM);
1455
1456 if (to == STACK_POINTER_REGNUM)
1457 diff = mn10300_frame_size ();
1458
1459 /* The difference between the argument pointer and the frame pointer
1460 is the size of the callee register save area. */
1461 if (from == ARG_POINTER_REGNUM)
1462 {
1463 unsigned int reg_save_bytes;
1464
1465 mn10300_get_live_callee_saved_regs (& reg_save_bytes);
1466 diff += reg_save_bytes;
1467 diff += 4 * fp_regs_to_save ();
1468 }
1469
1470 return diff;
1471 }
1472
1473 /* Worker function for TARGET_RETURN_IN_MEMORY. */
1474
1475 static bool
1476 mn10300_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
1477 {
1478 /* Return values > 8 bytes in length in memory. */
1479 return (int_size_in_bytes (type) > 8
1480 || int_size_in_bytes (type) == 0
1481 || TYPE_MODE (type) == BLKmode);
1482 }
1483
1484 /* Flush the argument registers to the stack for a stdarg function;
1485 return the new argument pointer. */
1486 static rtx
1487 mn10300_builtin_saveregs (void)
1488 {
1489 rtx offset, mem;
1490 tree fntype = TREE_TYPE (current_function_decl);
1491 int argadj = ((!stdarg_p (fntype))
1492 ? UNITS_PER_WORD : 0);
1493 alias_set_type set = get_varargs_alias_set ();
1494
1495 if (argadj)
1496 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, argadj);
1497 else
1498 offset = crtl->args.arg_offset_rtx;
1499
1500 mem = gen_rtx_MEM (SImode, crtl->args.internal_arg_pointer);
1501 set_mem_alias_set (mem, set);
1502 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
1503
1504 mem = gen_rtx_MEM (SImode,
1505 plus_constant (Pmode,
1506 crtl->args.internal_arg_pointer, 4));
1507 set_mem_alias_set (mem, set);
1508 emit_move_insn (mem, gen_rtx_REG (SImode, 1));
1509
1510 return copy_to_reg (expand_binop (Pmode, add_optab,
1511 crtl->args.internal_arg_pointer,
1512 offset, 0, 0, OPTAB_LIB_WIDEN));
1513 }
1514
1515 static void
1516 mn10300_va_start (tree valist, rtx nextarg)
1517 {
1518 nextarg = expand_builtin_saveregs ();
1519 std_expand_builtin_va_start (valist, nextarg);
1520 }
1521
1522 /* Return true when a parameter should be passed by reference. */
1523
1524 static bool
1525 mn10300_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
1526 machine_mode mode, const_tree type,
1527 bool named ATTRIBUTE_UNUSED)
1528 {
1529 unsigned HOST_WIDE_INT size;
1530
1531 if (type)
1532 size = int_size_in_bytes (type);
1533 else
1534 size = GET_MODE_SIZE (mode);
1535
1536 return (size > 8 || size == 0);
1537 }
1538
1539 /* Return an RTX to represent where a value with mode MODE will be returned
1540 from a function. If the result is NULL_RTX, the argument is pushed. */
1541
1542 static rtx
1543 mn10300_function_arg (cumulative_args_t cum_v, machine_mode mode,
1544 const_tree type, bool named ATTRIBUTE_UNUSED)
1545 {
1546 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1547 rtx result = NULL_RTX;
1548 int size;
1549
1550 /* We only support using 2 data registers as argument registers. */
1551 int nregs = 2;
1552
1553 /* Figure out the size of the object to be passed. */
1554 if (mode == BLKmode)
1555 size = int_size_in_bytes (type);
1556 else
1557 size = GET_MODE_SIZE (mode);
1558
1559 cum->nbytes = (cum->nbytes + 3) & ~3;
1560
1561 /* Don't pass this arg via a register if all the argument registers
1562 are used up. */
1563 if (cum->nbytes > nregs * UNITS_PER_WORD)
1564 return result;
1565
1566 /* Don't pass this arg via a register if it would be split between
1567 registers and memory. */
1568 if (type == NULL_TREE
1569 && cum->nbytes + size > nregs * UNITS_PER_WORD)
1570 return result;
1571
1572 switch (cum->nbytes / UNITS_PER_WORD)
1573 {
1574 case 0:
1575 result = gen_rtx_REG (mode, FIRST_ARGUMENT_REGNUM);
1576 break;
1577 case 1:
1578 result = gen_rtx_REG (mode, FIRST_ARGUMENT_REGNUM + 1);
1579 break;
1580 default:
1581 break;
1582 }
1583
1584 return result;
1585 }
1586
1587 /* Update the data in CUM to advance over an argument
1588 of mode MODE and data type TYPE.
1589 (TYPE is null for libcalls where that information may not be available.) */
1590
1591 static void
1592 mn10300_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
1593 const_tree type, bool named ATTRIBUTE_UNUSED)
1594 {
1595 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1596
1597 cum->nbytes += (mode != BLKmode
1598 ? (GET_MODE_SIZE (mode) + 3) & ~3
1599 : (int_size_in_bytes (type) + 3) & ~3);
1600 }
1601
1602 /* Return the number of bytes of registers to use for an argument passed
1603 partially in registers and partially in memory. */
1604
1605 static int
1606 mn10300_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
1607 tree type, bool named ATTRIBUTE_UNUSED)
1608 {
1609 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1610 int size;
1611
1612 /* We only support using 2 data registers as argument registers. */
1613 int nregs = 2;
1614
1615 /* Figure out the size of the object to be passed. */
1616 if (mode == BLKmode)
1617 size = int_size_in_bytes (type);
1618 else
1619 size = GET_MODE_SIZE (mode);
1620
1621 cum->nbytes = (cum->nbytes + 3) & ~3;
1622
1623 /* Don't pass this arg via a register if all the argument registers
1624 are used up. */
1625 if (cum->nbytes > nregs * UNITS_PER_WORD)
1626 return 0;
1627
1628 if (cum->nbytes + size <= nregs * UNITS_PER_WORD)
1629 return 0;
1630
1631 /* Don't pass this arg via a register if it would be split between
1632 registers and memory. */
1633 if (type == NULL_TREE
1634 && cum->nbytes + size > nregs * UNITS_PER_WORD)
1635 return 0;
1636
1637 return nregs * UNITS_PER_WORD - cum->nbytes;
1638 }
1639
1640 /* Return the location of the function's value. This will be either
1641 $d0 for integer functions, $a0 for pointers, or a PARALLEL of both
1642 $d0 and $a0 if the -mreturn-pointer-on-do flag is set. Note that
1643 we only return the PARALLEL for outgoing values; we do not want
1644 callers relying on this extra copy. */
1645
1646 static rtx
1647 mn10300_function_value (const_tree valtype,
1648 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1649 bool outgoing)
1650 {
1651 rtx rv;
1652 machine_mode mode = TYPE_MODE (valtype);
1653
1654 if (! POINTER_TYPE_P (valtype))
1655 return gen_rtx_REG (mode, FIRST_DATA_REGNUM);
1656 else if (! TARGET_PTR_A0D0 || ! outgoing
1657 || cfun->returns_struct)
1658 return gen_rtx_REG (mode, FIRST_ADDRESS_REGNUM);
1659
1660 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (2));
1661 XVECEXP (rv, 0, 0)
1662 = gen_rtx_EXPR_LIST (VOIDmode,
1663 gen_rtx_REG (mode, FIRST_ADDRESS_REGNUM),
1664 GEN_INT (0));
1665
1666 XVECEXP (rv, 0, 1)
1667 = gen_rtx_EXPR_LIST (VOIDmode,
1668 gen_rtx_REG (mode, FIRST_DATA_REGNUM),
1669 GEN_INT (0));
1670 return rv;
1671 }
1672
1673 /* Implements TARGET_LIBCALL_VALUE. */
1674
1675 static rtx
1676 mn10300_libcall_value (machine_mode mode,
1677 const_rtx fun ATTRIBUTE_UNUSED)
1678 {
1679 return gen_rtx_REG (mode, FIRST_DATA_REGNUM);
1680 }
1681
1682 /* Implements FUNCTION_VALUE_REGNO_P. */
1683
1684 bool
1685 mn10300_function_value_regno_p (const unsigned int regno)
1686 {
1687 return (regno == FIRST_DATA_REGNUM || regno == FIRST_ADDRESS_REGNUM);
1688 }
1689
1690 /* Output an addition operation. */
1691
1692 const char *
1693 mn10300_output_add (rtx operands[3], bool need_flags)
1694 {
1695 rtx dest, src1, src2;
1696 unsigned int dest_regnum, src1_regnum, src2_regnum;
1697 enum reg_class src1_class, src2_class, dest_class;
1698
1699 dest = operands[0];
1700 src1 = operands[1];
1701 src2 = operands[2];
1702
1703 dest_regnum = true_regnum (dest);
1704 src1_regnum = true_regnum (src1);
1705
1706 dest_class = REGNO_REG_CLASS (dest_regnum);
1707 src1_class = REGNO_REG_CLASS (src1_regnum);
1708
1709 if (CONST_INT_P (src2))
1710 {
1711 gcc_assert (dest_regnum == src1_regnum);
1712
1713 if (src2 == const1_rtx && !need_flags)
1714 return "inc %0";
1715 if (INTVAL (src2) == 4 && !need_flags && dest_class != DATA_REGS)
1716 return "inc4 %0";
1717
1718 gcc_assert (!need_flags || dest_class != SP_REGS);
1719 return "add %2,%0";
1720 }
1721 else if (CONSTANT_P (src2))
1722 return "add %2,%0";
1723
1724 src2_regnum = true_regnum (src2);
1725 src2_class = REGNO_REG_CLASS (src2_regnum);
1726
1727 if (dest_regnum == src1_regnum)
1728 return "add %2,%0";
1729 if (dest_regnum == src2_regnum)
1730 return "add %1,%0";
1731
1732 /* The rest of the cases are reg = reg+reg. For AM33, we can implement
1733 this directly, as below, but when optimizing for space we can sometimes
1734 do better by using a mov+add. For MN103, we claimed that we could
1735 implement a three-operand add because the various move and add insns
1736 change sizes across register classes, and we can often do better than
1737 reload in choosing which operand to move. */
1738 if (TARGET_AM33 && optimize_insn_for_speed_p ())
1739 return "add %2,%1,%0";
1740
1741 /* Catch cases where no extended register was used. */
1742 if (src1_class != EXTENDED_REGS
1743 && src2_class != EXTENDED_REGS
1744 && dest_class != EXTENDED_REGS)
1745 {
1746 /* We have to copy one of the sources into the destination, then
1747 add the other source to the destination.
1748
1749 Carefully select which source to copy to the destination; a
1750 naive implementation will waste a byte when the source classes
1751 are different and the destination is an address register.
1752 Selecting the lowest cost register copy will optimize this
1753 sequence. */
1754 if (src1_class == dest_class)
1755 return "mov %1,%0\n\tadd %2,%0";
1756 else
1757 return "mov %2,%0\n\tadd %1,%0";
1758 }
1759
1760 /* At least one register is an extended register. */
1761
1762 /* The three operand add instruction on the am33 is a win iff the
1763 output register is an extended register, or if both source
1764 registers are extended registers. */
1765 if (dest_class == EXTENDED_REGS || src1_class == src2_class)
1766 return "add %2,%1,%0";
1767
1768 /* It is better to copy one of the sources to the destination, then
1769 perform a 2 address add. The destination in this case must be
1770 an address or data register and one of the sources must be an
1771 extended register and the remaining source must not be an extended
1772 register.
1773
1774 The best code for this case is to copy the extended reg to the
1775 destination, then emit a two address add. */
1776 if (src1_class == EXTENDED_REGS)
1777 return "mov %1,%0\n\tadd %2,%0";
1778 else
1779 return "mov %2,%0\n\tadd %1,%0";
1780 }
1781
1782 /* Return 1 if X contains a symbolic expression. We know these
1783 expressions will have one of a few well defined forms, so
1784 we need only check those forms. */
1785
1786 int
1787 mn10300_symbolic_operand (rtx op,
1788 machine_mode mode ATTRIBUTE_UNUSED)
1789 {
1790 switch (GET_CODE (op))
1791 {
1792 case SYMBOL_REF:
1793 case LABEL_REF:
1794 return 1;
1795 case CONST:
1796 op = XEXP (op, 0);
1797 return ((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
1798 || GET_CODE (XEXP (op, 0)) == LABEL_REF)
1799 && CONST_INT_P (XEXP (op, 1)));
1800 default:
1801 return 0;
1802 }
1803 }
1804
1805 /* Try machine dependent ways of modifying an illegitimate address
1806 to be legitimate. If we find one, return the new valid address.
1807 This macro is used in only one place: `memory_address' in explow.c.
1808
1809 OLDX is the address as it was before break_out_memory_refs was called.
1810 In some cases it is useful to look at this to decide what needs to be done.
1811
1812 Normally it is always safe for this macro to do nothing. It exists to
1813 recognize opportunities to optimize the output.
1814
1815 But on a few ports with segmented architectures and indexed addressing
1816 (mn10300, hppa) it is used to rewrite certain problematical addresses. */
1817
1818 static rtx
1819 mn10300_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1820 machine_mode mode ATTRIBUTE_UNUSED)
1821 {
1822 if (flag_pic && ! mn10300_legitimate_pic_operand_p (x))
1823 x = mn10300_legitimize_pic_address (oldx, NULL_RTX);
1824
1825 /* Uh-oh. We might have an address for x[n-100000]. This needs
1826 special handling to avoid creating an indexed memory address
1827 with x-100000 as the base. */
1828 if (GET_CODE (x) == PLUS
1829 && mn10300_symbolic_operand (XEXP (x, 1), VOIDmode))
1830 {
1831 /* Ugly. We modify things here so that the address offset specified
1832 by the index expression is computed first, then added to x to form
1833 the entire address. */
1834
1835 rtx regx1, regy1, regy2, y;
1836
1837 /* Strip off any CONST. */
1838 y = XEXP (x, 1);
1839 if (GET_CODE (y) == CONST)
1840 y = XEXP (y, 0);
1841
1842 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1843 {
1844 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1845 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1846 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1847 regx1 = force_reg (Pmode,
1848 gen_rtx_fmt_ee (GET_CODE (y), Pmode, regx1,
1849 regy2));
1850 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1851 }
1852 }
1853 return x;
1854 }
1855
1856 /* Convert a non-PIC address in `orig' to a PIC address using @GOT or
1857 @GOTOFF in `reg'. */
1858
1859 rtx
1860 mn10300_legitimize_pic_address (rtx orig, rtx reg)
1861 {
1862 rtx x;
1863
1864 if (GET_CODE (orig) == LABEL_REF
1865 || (GET_CODE (orig) == SYMBOL_REF
1866 && (CONSTANT_POOL_ADDRESS_P (orig)
1867 || ! MN10300_GLOBAL_P (orig))))
1868 {
1869 if (reg == NULL)
1870 reg = gen_reg_rtx (Pmode);
1871
1872 x = gen_rtx_UNSPEC (SImode, gen_rtvec (1, orig), UNSPEC_GOTOFF);
1873 x = gen_rtx_CONST (SImode, x);
1874 emit_move_insn (reg, x);
1875
1876 x = emit_insn (gen_addsi3 (reg, reg, pic_offset_table_rtx));
1877 }
1878 else if (GET_CODE (orig) == SYMBOL_REF)
1879 {
1880 if (reg == NULL)
1881 reg = gen_reg_rtx (Pmode);
1882
1883 x = gen_rtx_UNSPEC (SImode, gen_rtvec (1, orig), UNSPEC_GOT);
1884 x = gen_rtx_CONST (SImode, x);
1885 x = gen_rtx_PLUS (SImode, pic_offset_table_rtx, x);
1886 x = gen_const_mem (SImode, x);
1887
1888 x = emit_move_insn (reg, x);
1889 }
1890 else
1891 return orig;
1892
1893 set_unique_reg_note (x, REG_EQUAL, orig);
1894 return reg;
1895 }
1896
1897 /* Return zero if X references a SYMBOL_REF or LABEL_REF whose symbol
1898 isn't protected by a PIC unspec; nonzero otherwise. */
1899
1900 int
1901 mn10300_legitimate_pic_operand_p (rtx x)
1902 {
1903 const char *fmt;
1904 int i;
1905
1906 if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1907 return 0;
1908
1909 if (GET_CODE (x) == UNSPEC
1910 && (XINT (x, 1) == UNSPEC_PIC
1911 || XINT (x, 1) == UNSPEC_GOT
1912 || XINT (x, 1) == UNSPEC_GOTOFF
1913 || XINT (x, 1) == UNSPEC_PLT
1914 || XINT (x, 1) == UNSPEC_GOTSYM_OFF))
1915 return 1;
1916
1917 fmt = GET_RTX_FORMAT (GET_CODE (x));
1918 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
1919 {
1920 if (fmt[i] == 'E')
1921 {
1922 int j;
1923
1924 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1925 if (! mn10300_legitimate_pic_operand_p (XVECEXP (x, i, j)))
1926 return 0;
1927 }
1928 else if (fmt[i] == 'e'
1929 && ! mn10300_legitimate_pic_operand_p (XEXP (x, i)))
1930 return 0;
1931 }
1932
1933 return 1;
1934 }
1935
1936 /* Return TRUE if the address X, taken from a (MEM:MODE X) rtx, is
1937 legitimate, and FALSE otherwise.
1938
1939 On the mn10300, the value in the address register must be
1940 in the same memory space/segment as the effective address.
1941
1942 This is problematical for reload since it does not understand
1943 that base+index != index+base in a memory reference.
1944
1945 Note it is still possible to use reg+reg addressing modes,
1946 it's just much more difficult. For a discussion of a possible
1947 workaround and solution, see the comments in pa.c before the
1948 function record_unscaled_index_insn_codes. */
1949
1950 static bool
1951 mn10300_legitimate_address_p (machine_mode mode, rtx x, bool strict)
1952 {
1953 rtx base, index;
1954
1955 if (CONSTANT_ADDRESS_P (x))
1956 return !flag_pic || mn10300_legitimate_pic_operand_p (x);
1957
1958 if (RTX_OK_FOR_BASE_P (x, strict))
1959 return true;
1960
1961 if (TARGET_AM33 && (mode == SImode || mode == SFmode || mode == HImode))
1962 {
1963 if (GET_CODE (x) == POST_INC)
1964 return RTX_OK_FOR_BASE_P (XEXP (x, 0), strict);
1965 if (GET_CODE (x) == POST_MODIFY)
1966 return (RTX_OK_FOR_BASE_P (XEXP (x, 0), strict)
1967 && CONSTANT_ADDRESS_P (XEXP (x, 1)));
1968 }
1969
1970 if (GET_CODE (x) != PLUS)
1971 return false;
1972
1973 base = XEXP (x, 0);
1974 index = XEXP (x, 1);
1975
1976 if (!REG_P (base))
1977 return false;
1978 if (REG_P (index))
1979 {
1980 /* ??? Without AM33 generalized (Ri,Rn) addressing, reg+reg
1981 addressing is hard to satisfy. */
1982 if (!TARGET_AM33)
1983 return false;
1984
1985 return (REGNO_GENERAL_P (REGNO (base), strict)
1986 && REGNO_GENERAL_P (REGNO (index), strict));
1987 }
1988
1989 if (!REGNO_STRICT_OK_FOR_BASE_P (REGNO (base), strict))
1990 return false;
1991
1992 if (CONST_INT_P (index))
1993 return IN_RANGE (INTVAL (index), -1 - 0x7fffffff, 0x7fffffff);
1994
1995 if (CONSTANT_ADDRESS_P (index))
1996 return !flag_pic || mn10300_legitimate_pic_operand_p (index);
1997
1998 return false;
1999 }
2000
2001 bool
2002 mn10300_regno_in_class_p (unsigned regno, int rclass, bool strict)
2003 {
2004 if (regno >= FIRST_PSEUDO_REGISTER)
2005 {
2006 if (!strict)
2007 return true;
2008 if (!reg_renumber)
2009 return false;
2010 regno = reg_renumber[regno];
2011 if (regno == INVALID_REGNUM)
2012 return false;
2013 }
2014 return TEST_HARD_REG_BIT (reg_class_contents[rclass], regno);
2015 }
2016
2017 rtx
2018 mn10300_legitimize_reload_address (rtx x,
2019 machine_mode mode ATTRIBUTE_UNUSED,
2020 int opnum, int type,
2021 int ind_levels ATTRIBUTE_UNUSED)
2022 {
2023 bool any_change = false;
2024
2025 /* See above re disabling reg+reg addressing for MN103. */
2026 if (!TARGET_AM33)
2027 return NULL_RTX;
2028
2029 if (GET_CODE (x) != PLUS)
2030 return NULL_RTX;
2031
2032 if (XEXP (x, 0) == stack_pointer_rtx)
2033 {
2034 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
2035 GENERAL_REGS, GET_MODE (x), VOIDmode, 0, 0,
2036 opnum, (enum reload_type) type);
2037 any_change = true;
2038 }
2039 if (XEXP (x, 1) == stack_pointer_rtx)
2040 {
2041 push_reload (XEXP (x, 1), NULL_RTX, &XEXP (x, 1), NULL,
2042 GENERAL_REGS, GET_MODE (x), VOIDmode, 0, 0,
2043 opnum, (enum reload_type) type);
2044 any_change = true;
2045 }
2046
2047 return any_change ? x : NULL_RTX;
2048 }
2049
2050 /* Implement TARGET_LEGITIMATE_CONSTANT_P. Returns TRUE if X is a valid
2051 constant. Note that some "constants" aren't valid, such as TLS
2052 symbols and unconverted GOT-based references, so we eliminate
2053 those here. */
2054
2055 static bool
2056 mn10300_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
2057 {
2058 switch (GET_CODE (x))
2059 {
2060 case CONST:
2061 x = XEXP (x, 0);
2062
2063 if (GET_CODE (x) == PLUS)
2064 {
2065 if (! CONST_INT_P (XEXP (x, 1)))
2066 return false;
2067 x = XEXP (x, 0);
2068 }
2069
2070 /* Only some unspecs are valid as "constants". */
2071 if (GET_CODE (x) == UNSPEC)
2072 {
2073 switch (XINT (x, 1))
2074 {
2075 case UNSPEC_PIC:
2076 case UNSPEC_GOT:
2077 case UNSPEC_GOTOFF:
2078 case UNSPEC_PLT:
2079 return true;
2080 default:
2081 return false;
2082 }
2083 }
2084
2085 /* We must have drilled down to a symbol. */
2086 if (! mn10300_symbolic_operand (x, Pmode))
2087 return false;
2088 break;
2089
2090 default:
2091 break;
2092 }
2093
2094 return true;
2095 }
2096
2097 /* Undo pic address legitimization for the benefit of debug info. */
2098
2099 static rtx
2100 mn10300_delegitimize_address (rtx orig_x)
2101 {
2102 rtx x = orig_x, ret, addend = NULL;
2103 bool need_mem;
2104
2105 if (MEM_P (x))
2106 x = XEXP (x, 0);
2107 if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
2108 return orig_x;
2109
2110 if (XEXP (x, 0) == pic_offset_table_rtx)
2111 ;
2112 /* With the REG+REG addressing of AM33, var-tracking can re-assemble
2113 some odd-looking "addresses" that were never valid in the first place.
2114 We need to look harder to avoid warnings being emitted. */
2115 else if (GET_CODE (XEXP (x, 0)) == PLUS)
2116 {
2117 rtx x0 = XEXP (x, 0);
2118 rtx x00 = XEXP (x0, 0);
2119 rtx x01 = XEXP (x0, 1);
2120
2121 if (x00 == pic_offset_table_rtx)
2122 addend = x01;
2123 else if (x01 == pic_offset_table_rtx)
2124 addend = x00;
2125 else
2126 return orig_x;
2127
2128 }
2129 else
2130 return orig_x;
2131 x = XEXP (x, 1);
2132
2133 if (GET_CODE (x) != CONST)
2134 return orig_x;
2135 x = XEXP (x, 0);
2136 if (GET_CODE (x) != UNSPEC)
2137 return orig_x;
2138
2139 ret = XVECEXP (x, 0, 0);
2140 if (XINT (x, 1) == UNSPEC_GOTOFF)
2141 need_mem = false;
2142 else if (XINT (x, 1) == UNSPEC_GOT)
2143 need_mem = true;
2144 else
2145 return orig_x;
2146
2147 gcc_assert (GET_CODE (ret) == SYMBOL_REF);
2148 if (need_mem != MEM_P (orig_x))
2149 return orig_x;
2150 if (need_mem && addend)
2151 return orig_x;
2152 if (addend)
2153 ret = gen_rtx_PLUS (Pmode, addend, ret);
2154 return ret;
2155 }
2156
2157 /* For addresses, costs are relative to "MOV (Rm),Rn". For AM33 this is
2158 the 3-byte fully general instruction; for MN103 this is the 2-byte form
2159 with an address register. */
2160
2161 static int
2162 mn10300_address_cost (rtx x, machine_mode mode ATTRIBUTE_UNUSED,
2163 addr_space_t as ATTRIBUTE_UNUSED, bool speed)
2164 {
2165 HOST_WIDE_INT i;
2166 rtx base, index;
2167
2168 switch (GET_CODE (x))
2169 {
2170 case CONST:
2171 case SYMBOL_REF:
2172 case LABEL_REF:
2173 /* We assume all of these require a 32-bit constant, even though
2174 some symbol and label references can be relaxed. */
2175 return speed ? 1 : 4;
2176
2177 case REG:
2178 case SUBREG:
2179 case POST_INC:
2180 return 0;
2181
2182 case POST_MODIFY:
2183 /* Assume any symbolic offset is a 32-bit constant. */
2184 i = (CONST_INT_P (XEXP (x, 1)) ? INTVAL (XEXP (x, 1)) : 0x12345678);
2185 if (IN_RANGE (i, -128, 127))
2186 return speed ? 0 : 1;
2187 if (speed)
2188 return 1;
2189 if (IN_RANGE (i, -0x800000, 0x7fffff))
2190 return 3;
2191 return 4;
2192
2193 case PLUS:
2194 base = XEXP (x, 0);
2195 index = XEXP (x, 1);
2196 if (register_operand (index, SImode))
2197 {
2198 /* Attempt to minimize the number of registers in the address.
2199 This is similar to what other ports do. */
2200 if (register_operand (base, SImode))
2201 return 1;
2202
2203 base = XEXP (x, 1);
2204 index = XEXP (x, 0);
2205 }
2206
2207 /* Assume any symbolic offset is a 32-bit constant. */
2208 i = (CONST_INT_P (XEXP (x, 1)) ? INTVAL (XEXP (x, 1)) : 0x12345678);
2209 if (IN_RANGE (i, -128, 127))
2210 return speed ? 0 : 1;
2211 if (IN_RANGE (i, -32768, 32767))
2212 return speed ? 0 : 2;
2213 return speed ? 2 : 6;
2214
2215 default:
2216 return rtx_cost (x, Pmode, MEM, 0, speed);
2217 }
2218 }
2219
2220 /* Implement the TARGET_REGISTER_MOVE_COST hook.
2221
2222 Recall that the base value of 2 is required by assumptions elsewhere
2223 in the body of the compiler, and that cost 2 is special-cased as an
2224 early exit from reload meaning no work is required. */
2225
2226 static int
2227 mn10300_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
2228 reg_class_t ifrom, reg_class_t ito)
2229 {
2230 enum reg_class from = (enum reg_class) ifrom;
2231 enum reg_class to = (enum reg_class) ito;
2232 enum reg_class scratch, test;
2233
2234 /* Simplify the following code by unifying the fp register classes. */
2235 if (to == FP_ACC_REGS)
2236 to = FP_REGS;
2237 if (from == FP_ACC_REGS)
2238 from = FP_REGS;
2239
2240 /* Diagnose invalid moves by costing them as two moves. */
2241
2242 scratch = NO_REGS;
2243 test = from;
2244 if (to == SP_REGS)
2245 scratch = (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
2246 else if (to == MDR_REGS)
2247 scratch = DATA_REGS;
2248 else if (to == FP_REGS && to != from)
2249 scratch = GENERAL_REGS;
2250 else
2251 {
2252 test = to;
2253 if (from == SP_REGS)
2254 scratch = (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
2255 else if (from == MDR_REGS)
2256 scratch = DATA_REGS;
2257 else if (from == FP_REGS && to != from)
2258 scratch = GENERAL_REGS;
2259 }
2260 if (scratch != NO_REGS && !reg_class_subset_p (test, scratch))
2261 return (mn10300_register_move_cost (VOIDmode, from, scratch)
2262 + mn10300_register_move_cost (VOIDmode, scratch, to));
2263
2264 /* From here on, all we need consider are legal combinations. */
2265
2266 if (optimize_size)
2267 {
2268 /* The scale here is bytes * 2. */
2269
2270 if (from == to && (to == ADDRESS_REGS || to == DATA_REGS))
2271 return 2;
2272
2273 if (from == SP_REGS)
2274 return (to == ADDRESS_REGS ? 2 : 6);
2275
2276 /* For MN103, all remaining legal moves are two bytes. */
2277 if (TARGET_AM33)
2278 return 4;
2279
2280 if (to == SP_REGS)
2281 return (from == ADDRESS_REGS ? 4 : 6);
2282
2283 if ((from == ADDRESS_REGS || from == DATA_REGS)
2284 && (to == ADDRESS_REGS || to == DATA_REGS))
2285 return 4;
2286
2287 if (to == EXTENDED_REGS)
2288 return (to == from ? 6 : 4);
2289
2290 /* What's left are SP_REGS, FP_REGS, or combinations of the above. */
2291 return 6;
2292 }
2293 else
2294 {
2295 /* The scale here is cycles * 2. */
2296
2297 if (to == FP_REGS)
2298 return 8;
2299 if (from == FP_REGS)
2300 return 4;
2301
2302 /* All legal moves between integral registers are single cycle. */
2303 return 2;
2304 }
2305 }
2306
2307 /* Implement the TARGET_MEMORY_MOVE_COST hook.
2308
2309 Given lack of the form of the address, this must be speed-relative,
2310 though we should never be less expensive than a size-relative register
2311 move cost above. This is not a problem. */
2312
2313 static int
2314 mn10300_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
2315 reg_class_t iclass, bool in ATTRIBUTE_UNUSED)
2316 {
2317 enum reg_class rclass = (enum reg_class) iclass;
2318
2319 if (rclass == FP_REGS)
2320 return 8;
2321 return 6;
2322 }
2323
2324 /* Implement the TARGET_RTX_COSTS hook.
2325
2326 Speed-relative costs are relative to COSTS_N_INSNS, which is intended
2327 to represent cycles. Size-relative costs are in bytes. */
2328
2329 static bool
2330 mn10300_rtx_costs (rtx x, machine_mode mode, int outer_code,
2331 int opno ATTRIBUTE_UNUSED, int *ptotal, bool speed)
2332 {
2333 /* This value is used for SYMBOL_REF etc where we want to pretend
2334 we have a full 32-bit constant. */
2335 HOST_WIDE_INT i = 0x12345678;
2336 int total;
2337 int code = GET_CODE (x);
2338
2339 switch (code)
2340 {
2341 case CONST_INT:
2342 i = INTVAL (x);
2343 do_int_costs:
2344 if (speed)
2345 {
2346 if (outer_code == SET)
2347 {
2348 /* 16-bit integer loads have latency 1, 32-bit loads 2. */
2349 if (IN_RANGE (i, -32768, 32767))
2350 total = COSTS_N_INSNS (1);
2351 else
2352 total = COSTS_N_INSNS (2);
2353 }
2354 else
2355 {
2356 /* 16-bit integer operands don't affect latency;
2357 24-bit and 32-bit operands add a cycle. */
2358 if (IN_RANGE (i, -32768, 32767))
2359 total = 0;
2360 else
2361 total = COSTS_N_INSNS (1);
2362 }
2363 }
2364 else
2365 {
2366 if (outer_code == SET)
2367 {
2368 if (i == 0)
2369 total = 1;
2370 else if (IN_RANGE (i, -128, 127))
2371 total = 2;
2372 else if (IN_RANGE (i, -32768, 32767))
2373 total = 3;
2374 else
2375 total = 6;
2376 }
2377 else
2378 {
2379 /* Reference here is ADD An,Dn, vs ADD imm,Dn. */
2380 if (IN_RANGE (i, -128, 127))
2381 total = 0;
2382 else if (IN_RANGE (i, -32768, 32767))
2383 total = 2;
2384 else if (TARGET_AM33 && IN_RANGE (i, -0x01000000, 0x00ffffff))
2385 total = 3;
2386 else
2387 total = 4;
2388 }
2389 }
2390 goto alldone;
2391
2392 case CONST:
2393 case LABEL_REF:
2394 case SYMBOL_REF:
2395 case CONST_DOUBLE:
2396 /* We assume all of these require a 32-bit constant, even though
2397 some symbol and label references can be relaxed. */
2398 goto do_int_costs;
2399
2400 case UNSPEC:
2401 switch (XINT (x, 1))
2402 {
2403 case UNSPEC_PIC:
2404 case UNSPEC_GOT:
2405 case UNSPEC_GOTOFF:
2406 case UNSPEC_PLT:
2407 case UNSPEC_GOTSYM_OFF:
2408 /* The PIC unspecs also resolve to a 32-bit constant. */
2409 goto do_int_costs;
2410
2411 default:
2412 /* Assume any non-listed unspec is some sort of arithmetic. */
2413 goto do_arith_costs;
2414 }
2415
2416 case PLUS:
2417 /* Notice the size difference of INC and INC4. */
2418 if (!speed && outer_code == SET && CONST_INT_P (XEXP (x, 1)))
2419 {
2420 i = INTVAL (XEXP (x, 1));
2421 if (i == 1 || i == 4)
2422 {
2423 total = 1 + rtx_cost (XEXP (x, 0), mode, PLUS, 0, speed);
2424 goto alldone;
2425 }
2426 }
2427 goto do_arith_costs;
2428
2429 case MINUS:
2430 case AND:
2431 case IOR:
2432 case XOR:
2433 case NOT:
2434 case NEG:
2435 case ZERO_EXTEND:
2436 case SIGN_EXTEND:
2437 case COMPARE:
2438 case BSWAP:
2439 case CLZ:
2440 do_arith_costs:
2441 total = (speed ? COSTS_N_INSNS (1) : 2);
2442 break;
2443
2444 case ASHIFT:
2445 /* Notice the size difference of ASL2 and variants. */
2446 if (!speed && CONST_INT_P (XEXP (x, 1)))
2447 switch (INTVAL (XEXP (x, 1)))
2448 {
2449 case 1:
2450 case 2:
2451 total = 1;
2452 goto alldone;
2453 case 3:
2454 case 4:
2455 total = 2;
2456 goto alldone;
2457 }
2458 /* FALLTHRU */
2459
2460 case ASHIFTRT:
2461 case LSHIFTRT:
2462 total = (speed ? COSTS_N_INSNS (1) : 3);
2463 goto alldone;
2464
2465 case MULT:
2466 total = (speed ? COSTS_N_INSNS (3) : 2);
2467 break;
2468
2469 case DIV:
2470 case UDIV:
2471 case MOD:
2472 case UMOD:
2473 total = (speed ? COSTS_N_INSNS (39)
2474 /* Include space to load+retrieve MDR. */
2475 : code == MOD || code == UMOD ? 6 : 4);
2476 break;
2477
2478 case MEM:
2479 total = mn10300_address_cost (XEXP (x, 0), mode,
2480 MEM_ADDR_SPACE (x), speed);
2481 if (speed)
2482 total = COSTS_N_INSNS (2 + total);
2483 goto alldone;
2484
2485 default:
2486 /* Probably not implemented. Assume external call. */
2487 total = (speed ? COSTS_N_INSNS (10) : 7);
2488 break;
2489 }
2490
2491 *ptotal = total;
2492 return false;
2493
2494 alldone:
2495 *ptotal = total;
2496 return true;
2497 }
2498
2499 /* If using PIC, mark a SYMBOL_REF for a non-global symbol so that we
2500 may access it using GOTOFF instead of GOT. */
2501
2502 static void
2503 mn10300_encode_section_info (tree decl, rtx rtl, int first)
2504 {
2505 rtx symbol;
2506
2507 default_encode_section_info (decl, rtl, first);
2508
2509 if (! MEM_P (rtl))
2510 return;
2511
2512 symbol = XEXP (rtl, 0);
2513 if (GET_CODE (symbol) != SYMBOL_REF)
2514 return;
2515
2516 if (flag_pic)
2517 SYMBOL_REF_FLAG (symbol) = (*targetm.binds_local_p) (decl);
2518 }
2519
2520 /* Dispatch tables on the mn10300 are extremely expensive in terms of code
2521 and readonly data size. So we crank up the case threshold value to
2522 encourage a series of if/else comparisons to implement many small switch
2523 statements. In theory, this value could be increased much more if we
2524 were solely optimizing for space, but we keep it "reasonable" to avoid
2525 serious code efficiency lossage. */
2526
2527 static unsigned int
2528 mn10300_case_values_threshold (void)
2529 {
2530 return 6;
2531 }
2532
2533 /* Worker function for TARGET_TRAMPOLINE_INIT. */
2534
2535 static void
2536 mn10300_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
2537 {
2538 rtx mem, disp, fnaddr = XEXP (DECL_RTL (fndecl), 0);
2539
2540 /* This is a strict alignment target, which means that we play
2541 some games to make sure that the locations at which we need
2542 to store <chain> and <disp> wind up at aligned addresses.
2543
2544 0x28 0x00 add 0,d0
2545 0xfc 0xdd mov chain,a1
2546 <chain>
2547 0xf8 0xed 0x00 btst 0,d1
2548 0xdc jmp fnaddr
2549 <disp>
2550
2551 Note that the two extra insns are effectively nops; they
2552 clobber the flags but do not affect the contents of D0 or D1. */
2553
2554 disp = expand_binop (SImode, sub_optab, fnaddr,
2555 plus_constant (Pmode, XEXP (m_tramp, 0), 11),
2556 NULL_RTX, 1, OPTAB_DIRECT);
2557
2558 mem = adjust_address (m_tramp, SImode, 0);
2559 emit_move_insn (mem, gen_int_mode (0xddfc0028, SImode));
2560 mem = adjust_address (m_tramp, SImode, 4);
2561 emit_move_insn (mem, chain_value);
2562 mem = adjust_address (m_tramp, SImode, 8);
2563 emit_move_insn (mem, gen_int_mode (0xdc00edf8, SImode));
2564 mem = adjust_address (m_tramp, SImode, 12);
2565 emit_move_insn (mem, disp);
2566 }
2567
2568 /* Output the assembler code for a C++ thunk function.
2569 THUNK_DECL is the declaration for the thunk function itself, FUNCTION
2570 is the decl for the target function. DELTA is an immediate constant
2571 offset to be added to the THIS parameter. If VCALL_OFFSET is nonzero
2572 the word at the adjusted address *(*THIS' + VCALL_OFFSET) should be
2573 additionally added to THIS. Finally jump to the entry point of
2574 FUNCTION. */
2575
2576 static void
2577 mn10300_asm_output_mi_thunk (FILE * file,
2578 tree thunk_fndecl ATTRIBUTE_UNUSED,
2579 HOST_WIDE_INT delta,
2580 HOST_WIDE_INT vcall_offset,
2581 tree function)
2582 {
2583 const char * _this;
2584
2585 /* Get the register holding the THIS parameter. Handle the case
2586 where there is a hidden first argument for a returned structure. */
2587 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
2588 _this = reg_names [FIRST_ARGUMENT_REGNUM + 1];
2589 else
2590 _this = reg_names [FIRST_ARGUMENT_REGNUM];
2591
2592 fprintf (file, "\t%s Thunk Entry Point:\n", ASM_COMMENT_START);
2593
2594 if (delta)
2595 fprintf (file, "\tadd %d, %s\n", (int) delta, _this);
2596
2597 if (vcall_offset)
2598 {
2599 const char * scratch = reg_names [FIRST_ADDRESS_REGNUM + 1];
2600
2601 fprintf (file, "\tmov %s, %s\n", _this, scratch);
2602 fprintf (file, "\tmov (%s), %s\n", scratch, scratch);
2603 fprintf (file, "\tadd %d, %s\n", (int) vcall_offset, scratch);
2604 fprintf (file, "\tmov (%s), %s\n", scratch, scratch);
2605 fprintf (file, "\tadd %s, %s\n", scratch, _this);
2606 }
2607
2608 fputs ("\tjmp ", file);
2609 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
2610 putc ('\n', file);
2611 }
2612
2613 /* Return true if mn10300_output_mi_thunk would be able to output the
2614 assembler code for the thunk function specified by the arguments
2615 it is passed, and false otherwise. */
2616
2617 static bool
2618 mn10300_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
2619 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
2620 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
2621 const_tree function ATTRIBUTE_UNUSED)
2622 {
2623 return true;
2624 }
2625
2626 bool
2627 mn10300_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
2628 {
2629 if (REGNO_REG_CLASS (regno) == FP_REGS
2630 || REGNO_REG_CLASS (regno) == FP_ACC_REGS)
2631 /* Do not store integer values in FP registers. */
2632 return GET_MODE_CLASS (mode) == MODE_FLOAT && ((regno & 1) == 0);
2633
2634 if (! TARGET_AM33 && REGNO_REG_CLASS (regno) == EXTENDED_REGS)
2635 return false;
2636
2637 if (((regno) & 1) == 0 || GET_MODE_SIZE (mode) == 4)
2638 return true;
2639
2640 if (REGNO_REG_CLASS (regno) == DATA_REGS
2641 || (TARGET_AM33 && REGNO_REG_CLASS (regno) == ADDRESS_REGS)
2642 || REGNO_REG_CLASS (regno) == EXTENDED_REGS)
2643 return GET_MODE_SIZE (mode) <= 4;
2644
2645 return false;
2646 }
2647
2648 bool
2649 mn10300_modes_tieable (machine_mode mode1, machine_mode mode2)
2650 {
2651 if (GET_MODE_CLASS (mode1) == MODE_FLOAT
2652 && GET_MODE_CLASS (mode2) != MODE_FLOAT)
2653 return false;
2654
2655 if (GET_MODE_CLASS (mode2) == MODE_FLOAT
2656 && GET_MODE_CLASS (mode1) != MODE_FLOAT)
2657 return false;
2658
2659 if (TARGET_AM33
2660 || mode1 == mode2
2661 || (GET_MODE_SIZE (mode1) <= 4 && GET_MODE_SIZE (mode2) <= 4))
2662 return true;
2663
2664 return false;
2665 }
2666
2667 static int
2668 cc_flags_for_mode (machine_mode mode)
2669 {
2670 switch (mode)
2671 {
2672 case CCmode:
2673 return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_C | CC_FLAG_V;
2674 case CCZNCmode:
2675 return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_C;
2676 case CCZNmode:
2677 return CC_FLAG_Z | CC_FLAG_N;
2678 case CC_FLOATmode:
2679 return -1;
2680 default:
2681 gcc_unreachable ();
2682 }
2683 }
2684
2685 static int
2686 cc_flags_for_code (enum rtx_code code)
2687 {
2688 switch (code)
2689 {
2690 case EQ: /* Z */
2691 case NE: /* ~Z */
2692 return CC_FLAG_Z;
2693
2694 case LT: /* N */
2695 case GE: /* ~N */
2696 return CC_FLAG_N;
2697
2698 case GT: /* ~(Z|(N^V)) */
2699 case LE: /* Z|(N^V) */
2700 return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_V;
2701
2702 case GEU: /* ~C */
2703 case LTU: /* C */
2704 return CC_FLAG_C;
2705
2706 case GTU: /* ~(C | Z) */
2707 case LEU: /* C | Z */
2708 return CC_FLAG_Z | CC_FLAG_C;
2709
2710 case ORDERED:
2711 case UNORDERED:
2712 case LTGT:
2713 case UNEQ:
2714 case UNGE:
2715 case UNGT:
2716 case UNLE:
2717 case UNLT:
2718 return -1;
2719
2720 default:
2721 gcc_unreachable ();
2722 }
2723 }
2724
2725 machine_mode
2726 mn10300_select_cc_mode (enum rtx_code code, rtx x, rtx y ATTRIBUTE_UNUSED)
2727 {
2728 int req;
2729
2730 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2731 return CC_FLOATmode;
2732
2733 req = cc_flags_for_code (code);
2734
2735 if (req & CC_FLAG_V)
2736 return CCmode;
2737 if (req & CC_FLAG_C)
2738 return CCZNCmode;
2739 return CCZNmode;
2740 }
2741
2742 static inline bool
2743 set_is_load_p (rtx set)
2744 {
2745 return MEM_P (SET_SRC (set));
2746 }
2747
2748 static inline bool
2749 set_is_store_p (rtx set)
2750 {
2751 return MEM_P (SET_DEST (set));
2752 }
2753
2754 /* Update scheduling costs for situations that cannot be
2755 described using the attributes and DFA machinery.
2756 DEP is the insn being scheduled.
2757 INSN is the previous insn.
2758 COST is the current cycle cost for DEP. */
2759
2760 static int
2761 mn10300_adjust_sched_cost (rtx_insn *insn, int dep_type, rtx_insn *dep,
2762 int cost, unsigned int)
2763 {
2764 rtx insn_set;
2765 rtx dep_set;
2766 int timings;
2767
2768 if (!TARGET_AM33)
2769 return 1;
2770
2771 /* We are only interested in pairs of SET. */
2772 insn_set = single_set (insn);
2773 if (!insn_set)
2774 return cost;
2775
2776 dep_set = single_set (dep);
2777 if (!dep_set)
2778 return cost;
2779
2780 /* For the AM34 a load instruction that follows a
2781 store instruction incurs an extra cycle of delay. */
2782 if (mn10300_tune_cpu == PROCESSOR_AM34
2783 && set_is_load_p (dep_set)
2784 && set_is_store_p (insn_set))
2785 cost += 1;
2786
2787 /* For the AM34 a non-store, non-branch FPU insn that follows
2788 another FPU insn incurs a one cycle throughput increase. */
2789 else if (mn10300_tune_cpu == PROCESSOR_AM34
2790 && ! set_is_store_p (insn_set)
2791 && ! JUMP_P (insn)
2792 && GET_MODE_CLASS (GET_MODE (SET_SRC (dep_set))) == MODE_FLOAT
2793 && GET_MODE_CLASS (GET_MODE (SET_SRC (insn_set))) == MODE_FLOAT)
2794 cost += 1;
2795
2796 /* Resolve the conflict described in section 1-7-4 of
2797 Chapter 3 of the MN103E Series Instruction Manual
2798 where it says:
2799
2800 "When the preceding instruction is a CPU load or
2801 store instruction, a following FPU instruction
2802 cannot be executed until the CPU completes the
2803 latency period even though there are no register
2804 or flag dependencies between them." */
2805
2806 /* Only the AM33-2 (and later) CPUs have FPU instructions. */
2807 if (! TARGET_AM33_2)
2808 return cost;
2809
2810 /* If a data dependence already exists then the cost is correct. */
2811 if (dep_type == 0)
2812 return cost;
2813
2814 /* Check that the instruction about to scheduled is an FPU instruction. */
2815 if (GET_MODE_CLASS (GET_MODE (SET_SRC (dep_set))) != MODE_FLOAT)
2816 return cost;
2817
2818 /* Now check to see if the previous instruction is a load or store. */
2819 if (! set_is_load_p (insn_set) && ! set_is_store_p (insn_set))
2820 return cost;
2821
2822 /* XXX: Verify: The text of 1-7-4 implies that the restriction
2823 only applies when an INTEGER load/store precedes an FPU
2824 instruction, but is this true ? For now we assume that it is. */
2825 if (GET_MODE_CLASS (GET_MODE (SET_SRC (insn_set))) != MODE_INT)
2826 return cost;
2827
2828 /* Extract the latency value from the timings attribute. */
2829 timings = get_attr_timings (insn);
2830 return timings < 100 ? (timings % 10) : (timings % 100);
2831 }
2832
2833 static void
2834 mn10300_conditional_register_usage (void)
2835 {
2836 unsigned int i;
2837
2838 if (!TARGET_AM33)
2839 {
2840 for (i = FIRST_EXTENDED_REGNUM;
2841 i <= LAST_EXTENDED_REGNUM; i++)
2842 fixed_regs[i] = call_used_regs[i] = 1;
2843 }
2844 if (!TARGET_AM33_2)
2845 {
2846 for (i = FIRST_FP_REGNUM;
2847 i <= LAST_FP_REGNUM; i++)
2848 fixed_regs[i] = call_used_regs[i] = 1;
2849 }
2850 if (flag_pic)
2851 fixed_regs[PIC_OFFSET_TABLE_REGNUM] =
2852 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
2853 }
2854
2855 /* Worker function for TARGET_MD_ASM_ADJUST.
2856 We do this in the mn10300 backend to maintain source compatibility
2857 with the old cc0-based compiler. */
2858
2859 static rtx_insn *
2860 mn10300_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
2861 vec<const char *> &/*constraints*/,
2862 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
2863 {
2864 clobbers.safe_push (gen_rtx_REG (CCmode, CC_REG));
2865 SET_HARD_REG_BIT (clobbered_regs, CC_REG);
2866 return NULL;
2867 }
2868 \f
2869 /* A helper function for splitting cbranch patterns after reload. */
2870
2871 void
2872 mn10300_split_cbranch (machine_mode cmp_mode, rtx cmp_op, rtx label_ref)
2873 {
2874 rtx flags, x;
2875
2876 flags = gen_rtx_REG (cmp_mode, CC_REG);
2877 x = gen_rtx_COMPARE (cmp_mode, XEXP (cmp_op, 0), XEXP (cmp_op, 1));
2878 x = gen_rtx_SET (flags, x);
2879 emit_insn (x);
2880
2881 x = gen_rtx_fmt_ee (GET_CODE (cmp_op), VOIDmode, flags, const0_rtx);
2882 x = gen_rtx_IF_THEN_ELSE (VOIDmode, x, label_ref, pc_rtx);
2883 x = gen_rtx_SET (pc_rtx, x);
2884 emit_jump_insn (x);
2885 }
2886
2887 /* A helper function for matching parallels that set the flags. */
2888
2889 bool
2890 mn10300_match_ccmode (rtx insn, machine_mode cc_mode)
2891 {
2892 rtx op1, flags;
2893 machine_mode flags_mode;
2894
2895 gcc_checking_assert (XVECLEN (PATTERN (insn), 0) == 2);
2896
2897 op1 = XVECEXP (PATTERN (insn), 0, 1);
2898 gcc_checking_assert (GET_CODE (SET_SRC (op1)) == COMPARE);
2899
2900 flags = SET_DEST (op1);
2901 flags_mode = GET_MODE (flags);
2902
2903 if (GET_MODE (SET_SRC (op1)) != flags_mode)
2904 return false;
2905 if (GET_MODE_CLASS (flags_mode) != MODE_CC)
2906 return false;
2907
2908 /* Ensure that the mode of FLAGS is compatible with CC_MODE. */
2909 if (cc_flags_for_mode (flags_mode) & ~cc_flags_for_mode (cc_mode))
2910 return false;
2911
2912 return true;
2913 }
2914
2915 /* This function is used to help split:
2916
2917 (set (reg) (and (reg) (int)))
2918
2919 into:
2920
2921 (set (reg) (shift (reg) (int))
2922 (set (reg) (shift (reg) (int))
2923
2924 where the shitfs will be shorter than the "and" insn.
2925
2926 It returns the number of bits that should be shifted. A positive
2927 values means that the low bits are to be cleared (and hence the
2928 shifts should be right followed by left) whereas a negative value
2929 means that the high bits are to be cleared (left followed by right).
2930 Zero is returned when it would not be economical to split the AND. */
2931
2932 int
2933 mn10300_split_and_operand_count (rtx op)
2934 {
2935 HOST_WIDE_INT val = INTVAL (op);
2936 int count;
2937
2938 if (val < 0)
2939 {
2940 /* High bit is set, look for bits clear at the bottom. */
2941 count = exact_log2 (-val);
2942 if (count < 0)
2943 return 0;
2944 /* This is only size win if we can use the asl2 insn. Otherwise we
2945 would be replacing 1 6-byte insn with 2 3-byte insns. */
2946 if (count > (optimize_insn_for_speed_p () ? 2 : 4))
2947 return 0;
2948 return count;
2949 }
2950 else
2951 {
2952 /* High bit is clear, look for bits set at the bottom. */
2953 count = exact_log2 (val + 1);
2954 count = 32 - count;
2955 /* Again, this is only a size win with asl2. */
2956 if (count > (optimize_insn_for_speed_p () ? 2 : 4))
2957 return 0;
2958 return -count;
2959 }
2960 }
2961 \f
2962 struct liw_data
2963 {
2964 enum attr_liw slot;
2965 enum attr_liw_op op;
2966 rtx dest;
2967 rtx src;
2968 };
2969
2970 /* Decide if the given insn is a candidate for LIW bundling. If it is then
2971 extract the operands and LIW attributes from the insn and use them to fill
2972 in the liw_data structure. Return true upon success or false if the insn
2973 cannot be bundled. */
2974
2975 static bool
2976 extract_bundle (rtx_insn *insn, struct liw_data * pdata)
2977 {
2978 bool allow_consts = true;
2979 rtx p;
2980
2981 gcc_assert (pdata != NULL);
2982
2983 if (insn == NULL)
2984 return false;
2985 /* Make sure that we are dealing with a simple SET insn. */
2986 p = single_set (insn);
2987 if (p == NULL_RTX)
2988 return false;
2989
2990 /* Make sure that it could go into one of the LIW pipelines. */
2991 pdata->slot = get_attr_liw (insn);
2992 if (pdata->slot == LIW_BOTH)
2993 return false;
2994
2995 pdata->op = get_attr_liw_op (insn);
2996
2997 switch (pdata->op)
2998 {
2999 case LIW_OP_MOV:
3000 pdata->dest = SET_DEST (p);
3001 pdata->src = SET_SRC (p);
3002 break;
3003 case LIW_OP_CMP:
3004 pdata->dest = XEXP (SET_SRC (p), 0);
3005 pdata->src = XEXP (SET_SRC (p), 1);
3006 break;
3007 case LIW_OP_NONE:
3008 return false;
3009 case LIW_OP_AND:
3010 case LIW_OP_OR:
3011 case LIW_OP_XOR:
3012 /* The AND, OR and XOR long instruction words only accept register arguments. */
3013 allow_consts = false;
3014 /* Fall through. */
3015 default:
3016 pdata->dest = SET_DEST (p);
3017 pdata->src = XEXP (SET_SRC (p), 1);
3018 break;
3019 }
3020
3021 if (! REG_P (pdata->dest))
3022 return false;
3023
3024 if (REG_P (pdata->src))
3025 return true;
3026
3027 return allow_consts && satisfies_constraint_O (pdata->src);
3028 }
3029
3030 /* Make sure that it is OK to execute LIW1 and LIW2 in parallel. GCC generated
3031 the instructions with the assumption that LIW1 would be executed before LIW2
3032 so we must check for overlaps between their sources and destinations. */
3033
3034 static bool
3035 check_liw_constraints (struct liw_data * pliw1, struct liw_data * pliw2)
3036 {
3037 /* Check for slot conflicts. */
3038 if (pliw2->slot == pliw1->slot && pliw1->slot != LIW_EITHER)
3039 return false;
3040
3041 /* If either operation is a compare, then "dest" is really an input; the real
3042 destination is CC_REG. So these instructions need different checks. */
3043
3044 /* Changing "CMP ; OP" into "CMP | OP" is OK because the comparison will
3045 check its values prior to any changes made by OP. */
3046 if (pliw1->op == LIW_OP_CMP)
3047 {
3048 /* Two sequential comparisons means dead code, which ought to
3049 have been eliminated given that bundling only happens with
3050 optimization. We cannot bundle them in any case. */
3051 gcc_assert (pliw1->op != pliw2->op);
3052 return true;
3053 }
3054
3055 /* Changing "OP ; CMP" into "OP | CMP" does not work if the value being compared
3056 is the destination of OP, as the CMP will look at the old value, not the new
3057 one. */
3058 if (pliw2->op == LIW_OP_CMP)
3059 {
3060 if (REGNO (pliw2->dest) == REGNO (pliw1->dest))
3061 return false;
3062
3063 if (REG_P (pliw2->src))
3064 return REGNO (pliw2->src) != REGNO (pliw1->dest);
3065
3066 return true;
3067 }
3068
3069 /* Changing "OP1 ; OP2" into "OP1 | OP2" does not work if they both write to the
3070 same destination register. */
3071 if (REGNO (pliw2->dest) == REGNO (pliw1->dest))
3072 return false;
3073
3074 /* Changing "OP1 ; OP2" into "OP1 | OP2" generally does not work if the destination
3075 of OP1 is the source of OP2. The exception is when OP1 is a MOVE instruction when
3076 we can replace the source in OP2 with the source of OP1. */
3077 if (REG_P (pliw2->src) && REGNO (pliw2->src) == REGNO (pliw1->dest))
3078 {
3079 if (pliw1->op == LIW_OP_MOV && REG_P (pliw1->src))
3080 {
3081 if (! REG_P (pliw1->src)
3082 && (pliw2->op == LIW_OP_AND
3083 || pliw2->op == LIW_OP_OR
3084 || pliw2->op == LIW_OP_XOR))
3085 return false;
3086
3087 pliw2->src = pliw1->src;
3088 return true;
3089 }
3090 return false;
3091 }
3092
3093 /* Everything else is OK. */
3094 return true;
3095 }
3096
3097 /* Combine pairs of insns into LIW bundles. */
3098
3099 static void
3100 mn10300_bundle_liw (void)
3101 {
3102 rtx_insn *r;
3103
3104 for (r = get_insns (); r != NULL; r = next_nonnote_nondebug_insn (r))
3105 {
3106 rtx_insn *insn1, *insn2;
3107 struct liw_data liw1, liw2;
3108
3109 insn1 = r;
3110 if (! extract_bundle (insn1, & liw1))
3111 continue;
3112
3113 insn2 = next_nonnote_nondebug_insn (insn1);
3114 if (! extract_bundle (insn2, & liw2))
3115 continue;
3116
3117 /* Check for source/destination overlap. */
3118 if (! check_liw_constraints (& liw1, & liw2))
3119 continue;
3120
3121 if (liw1.slot == LIW_OP2 || liw2.slot == LIW_OP1)
3122 {
3123 struct liw_data temp;
3124
3125 temp = liw1;
3126 liw1 = liw2;
3127 liw2 = temp;
3128 }
3129
3130 delete_insn (insn2);
3131
3132 rtx insn2_pat;
3133 if (liw1.op == LIW_OP_CMP)
3134 insn2_pat = gen_cmp_liw (liw2.dest, liw2.src, liw1.dest, liw1.src,
3135 GEN_INT (liw2.op));
3136 else if (liw2.op == LIW_OP_CMP)
3137 insn2_pat = gen_liw_cmp (liw1.dest, liw1.src, liw2.dest, liw2.src,
3138 GEN_INT (liw1.op));
3139 else
3140 insn2_pat = gen_liw (liw1.dest, liw2.dest, liw1.src, liw2.src,
3141 GEN_INT (liw1.op), GEN_INT (liw2.op));
3142
3143 insn2 = emit_insn_after (insn2_pat, insn1);
3144 delete_insn (insn1);
3145 r = insn2;
3146 }
3147 }
3148
3149 #define DUMP(reason, insn) \
3150 do \
3151 { \
3152 if (dump_file) \
3153 { \
3154 fprintf (dump_file, reason "\n"); \
3155 if (insn != NULL_RTX) \
3156 print_rtl_single (dump_file, insn); \
3157 fprintf(dump_file, "\n"); \
3158 } \
3159 } \
3160 while (0)
3161
3162 /* Replace the BRANCH insn with a Lcc insn that goes to LABEL.
3163 Insert a SETLB insn just before LABEL. */
3164
3165 static void
3166 mn10300_insert_setlb_lcc (rtx label, rtx branch)
3167 {
3168 rtx lcc, comparison, cmp_reg;
3169
3170 if (LABEL_NUSES (label) > 1)
3171 {
3172 rtx_insn *insn;
3173
3174 /* This label is used both as an entry point to the loop
3175 and as a loop-back point for the loop. We need to separate
3176 these two functions so that the SETLB happens upon entry,
3177 but the loop-back does not go to the SETLB instruction. */
3178 DUMP ("Inserting SETLB insn after:", label);
3179 insn = emit_insn_after (gen_setlb (), label);
3180 label = gen_label_rtx ();
3181 emit_label_after (label, insn);
3182 DUMP ("Created new loop-back label:", label);
3183 }
3184 else
3185 {
3186 DUMP ("Inserting SETLB insn before:", label);
3187 emit_insn_before (gen_setlb (), label);
3188 }
3189
3190 comparison = XEXP (SET_SRC (PATTERN (branch)), 0);
3191 cmp_reg = XEXP (comparison, 0);
3192 gcc_assert (REG_P (cmp_reg));
3193
3194 /* If the comparison has not already been split out of the branch
3195 then do so now. */
3196 gcc_assert (REGNO (cmp_reg) == CC_REG);
3197
3198 if (GET_MODE (cmp_reg) == CC_FLOATmode)
3199 lcc = gen_FLcc (comparison, label);
3200 else
3201 lcc = gen_Lcc (comparison, label);
3202
3203 rtx_insn *jump = emit_jump_insn_before (lcc, branch);
3204 mark_jump_label (XVECEXP (lcc, 0, 0), jump, 0);
3205 JUMP_LABEL (jump) = label;
3206 DUMP ("Replacing branch insn...", branch);
3207 DUMP ("... with Lcc insn:", jump);
3208 delete_insn (branch);
3209 }
3210
3211 static bool
3212 mn10300_block_contains_call (basic_block block)
3213 {
3214 rtx_insn *insn;
3215
3216 FOR_BB_INSNS (block, insn)
3217 if (CALL_P (insn))
3218 return true;
3219
3220 return false;
3221 }
3222
3223 static bool
3224 mn10300_loop_contains_call_insn (loop_p loop)
3225 {
3226 basic_block * bbs;
3227 bool result = false;
3228 unsigned int i;
3229
3230 bbs = get_loop_body (loop);
3231
3232 for (i = 0; i < loop->num_nodes; i++)
3233 if (mn10300_block_contains_call (bbs[i]))
3234 {
3235 result = true;
3236 break;
3237 }
3238
3239 free (bbs);
3240 return result;
3241 }
3242
3243 static void
3244 mn10300_scan_for_setlb_lcc (void)
3245 {
3246 loop_p loop;
3247
3248 DUMP ("Looking for loops that can use the SETLB insn", NULL_RTX);
3249
3250 df_analyze ();
3251 compute_bb_for_insn ();
3252
3253 /* Find the loops. */
3254 loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
3255
3256 /* FIXME: For now we only investigate innermost loops. In practice however
3257 if an inner loop is not suitable for use with the SETLB/Lcc insns, it may
3258 be the case that its parent loop is suitable. Thus we should check all
3259 loops, but work from the innermost outwards. */
3260 FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
3261 {
3262 const char * reason = NULL;
3263
3264 /* Check to see if we can modify this loop. If we cannot
3265 then set 'reason' to describe why it could not be done. */
3266 if (loop->latch == NULL)
3267 reason = "it contains multiple latches";
3268 else if (loop->header != loop->latch)
3269 /* FIXME: We could handle loops that span multiple blocks,
3270 but this requires a lot more work tracking down the branches
3271 that need altering, so for now keep things simple. */
3272 reason = "the loop spans multiple blocks";
3273 else if (mn10300_loop_contains_call_insn (loop))
3274 reason = "it contains CALL insns";
3275 else
3276 {
3277 rtx_insn *branch = BB_END (loop->latch);
3278
3279 gcc_assert (JUMP_P (branch));
3280 if (single_set (branch) == NULL_RTX || ! any_condjump_p (branch))
3281 /* We cannot optimize tablejumps and the like. */
3282 /* FIXME: We could handle unconditional jumps. */
3283 reason = "it is not a simple loop";
3284 else
3285 {
3286 rtx_insn *label;
3287
3288 if (dump_file)
3289 flow_loop_dump (loop, dump_file, NULL, 0);
3290
3291 label = BB_HEAD (loop->header);
3292 gcc_assert (LABEL_P (label));
3293
3294 mn10300_insert_setlb_lcc (label, branch);
3295 }
3296 }
3297
3298 if (dump_file && reason != NULL)
3299 fprintf (dump_file, "Loop starting with insn %d is not suitable because %s\n",
3300 INSN_UID (BB_HEAD (loop->header)),
3301 reason);
3302 }
3303
3304 loop_optimizer_finalize ();
3305
3306 df_finish_pass (false);
3307
3308 DUMP ("SETLB scan complete", NULL_RTX);
3309 }
3310
3311 static void
3312 mn10300_reorg (void)
3313 {
3314 /* These are optimizations, so only run them if optimizing. */
3315 if (TARGET_AM33 && (optimize > 0 || optimize_size))
3316 {
3317 if (TARGET_ALLOW_SETLB)
3318 mn10300_scan_for_setlb_lcc ();
3319
3320 if (TARGET_ALLOW_LIW)
3321 mn10300_bundle_liw ();
3322 }
3323 }
3324 \f
3325 /* Initialize the GCC target structure. */
3326
3327 #undef TARGET_MACHINE_DEPENDENT_REORG
3328 #define TARGET_MACHINE_DEPENDENT_REORG mn10300_reorg
3329
3330 #undef TARGET_ASM_ALIGNED_HI_OP
3331 #define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
3332
3333 #undef TARGET_LEGITIMIZE_ADDRESS
3334 #define TARGET_LEGITIMIZE_ADDRESS mn10300_legitimize_address
3335
3336 #undef TARGET_ADDRESS_COST
3337 #define TARGET_ADDRESS_COST mn10300_address_cost
3338 #undef TARGET_REGISTER_MOVE_COST
3339 #define TARGET_REGISTER_MOVE_COST mn10300_register_move_cost
3340 #undef TARGET_MEMORY_MOVE_COST
3341 #define TARGET_MEMORY_MOVE_COST mn10300_memory_move_cost
3342 #undef TARGET_RTX_COSTS
3343 #define TARGET_RTX_COSTS mn10300_rtx_costs
3344
3345 #undef TARGET_ASM_FILE_START
3346 #define TARGET_ASM_FILE_START mn10300_file_start
3347 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
3348 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
3349
3350 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
3351 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA mn10300_asm_output_addr_const_extra
3352
3353 #undef TARGET_OPTION_OVERRIDE
3354 #define TARGET_OPTION_OVERRIDE mn10300_option_override
3355
3356 #undef TARGET_ENCODE_SECTION_INFO
3357 #define TARGET_ENCODE_SECTION_INFO mn10300_encode_section_info
3358
3359 #undef TARGET_PROMOTE_PROTOTYPES
3360 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
3361 #undef TARGET_RETURN_IN_MEMORY
3362 #define TARGET_RETURN_IN_MEMORY mn10300_return_in_memory
3363 #undef TARGET_PASS_BY_REFERENCE
3364 #define TARGET_PASS_BY_REFERENCE mn10300_pass_by_reference
3365 #undef TARGET_CALLEE_COPIES
3366 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
3367 #undef TARGET_ARG_PARTIAL_BYTES
3368 #define TARGET_ARG_PARTIAL_BYTES mn10300_arg_partial_bytes
3369 #undef TARGET_FUNCTION_ARG
3370 #define TARGET_FUNCTION_ARG mn10300_function_arg
3371 #undef TARGET_FUNCTION_ARG_ADVANCE
3372 #define TARGET_FUNCTION_ARG_ADVANCE mn10300_function_arg_advance
3373
3374 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
3375 #define TARGET_EXPAND_BUILTIN_SAVEREGS mn10300_builtin_saveregs
3376 #undef TARGET_EXPAND_BUILTIN_VA_START
3377 #define TARGET_EXPAND_BUILTIN_VA_START mn10300_va_start
3378
3379 #undef TARGET_CASE_VALUES_THRESHOLD
3380 #define TARGET_CASE_VALUES_THRESHOLD mn10300_case_values_threshold
3381
3382 #undef TARGET_LRA_P
3383 #define TARGET_LRA_P hook_bool_void_false
3384
3385 #undef TARGET_LEGITIMATE_ADDRESS_P
3386 #define TARGET_LEGITIMATE_ADDRESS_P mn10300_legitimate_address_p
3387 #undef TARGET_DELEGITIMIZE_ADDRESS
3388 #define TARGET_DELEGITIMIZE_ADDRESS mn10300_delegitimize_address
3389 #undef TARGET_LEGITIMATE_CONSTANT_P
3390 #define TARGET_LEGITIMATE_CONSTANT_P mn10300_legitimate_constant_p
3391
3392 #undef TARGET_PREFERRED_RELOAD_CLASS
3393 #define TARGET_PREFERRED_RELOAD_CLASS mn10300_preferred_reload_class
3394 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
3395 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS \
3396 mn10300_preferred_output_reload_class
3397 #undef TARGET_SECONDARY_RELOAD
3398 #define TARGET_SECONDARY_RELOAD mn10300_secondary_reload
3399
3400 #undef TARGET_TRAMPOLINE_INIT
3401 #define TARGET_TRAMPOLINE_INIT mn10300_trampoline_init
3402
3403 #undef TARGET_FUNCTION_VALUE
3404 #define TARGET_FUNCTION_VALUE mn10300_function_value
3405 #undef TARGET_LIBCALL_VALUE
3406 #define TARGET_LIBCALL_VALUE mn10300_libcall_value
3407
3408 #undef TARGET_ASM_OUTPUT_MI_THUNK
3409 #define TARGET_ASM_OUTPUT_MI_THUNK mn10300_asm_output_mi_thunk
3410 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
3411 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK mn10300_can_output_mi_thunk
3412
3413 #undef TARGET_SCHED_ADJUST_COST
3414 #define TARGET_SCHED_ADJUST_COST mn10300_adjust_sched_cost
3415
3416 #undef TARGET_CONDITIONAL_REGISTER_USAGE
3417 #define TARGET_CONDITIONAL_REGISTER_USAGE mn10300_conditional_register_usage
3418
3419 #undef TARGET_MD_ASM_ADJUST
3420 #define TARGET_MD_ASM_ADJUST mn10300_md_asm_adjust
3421
3422 #undef TARGET_FLAGS_REGNUM
3423 #define TARGET_FLAGS_REGNUM CC_REG
3424
3425 struct gcc_target targetm = TARGET_INITIALIZER;