]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/mn10300/mn10300.c
* doc/include/texinfo.tex: Update to version 2012-05-16.16.
[thirdparty/gcc.git] / gcc / config / mn10300 / mn10300.c
CommitLineData
29a404f9 1/* Subroutines for insn-output.c for Matsushita MN10300 series
3072d30e 2 Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004,
f9e46c25 3 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
29a404f9 4 Contributed by Jeff Law (law@cygnus.com).
5
3626e955 6 This file is part of GCC.
29a404f9 7
3626e955 8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
29a404f9 12
3626e955 13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
29a404f9 17
3626e955 18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
29a404f9 21
29a404f9 22#include "config.h"
7014838c 23#include "system.h"
805e22b2 24#include "coretypes.h"
25#include "tm.h"
29a404f9 26#include "rtl.h"
4faf81b8 27#include "tree.h"
29a404f9 28#include "regs.h"
29#include "hard-reg-set.h"
29a404f9 30#include "insn-config.h"
31#include "conditions.h"
29a404f9 32#include "output.h"
33#include "insn-attr.h"
34#include "flags.h"
35#include "recog.h"
8b8be022 36#include "reload.h"
29a404f9 37#include "expr.h"
d8fc4d0b 38#include "optabs.h"
4faf81b8 39#include "function.h"
29a404f9 40#include "obstack.h"
0b205f4c 41#include "diagnostic-core.h"
59086782 42#include "tm_p.h"
e7076c21 43#include "tm-constrs.h"
a767736d 44#include "target.h"
45#include "target-def.h"
5574dbdd 46#include "df.h"
fba5dd52 47#include "opts.h"
f9b3e8f5 48#include "cfgloop.h"
29a404f9 49
1acdfc69 50/* This is used in the am33_2.0-linux-gnu port, in which global symbol
51 names are not prefixed by underscores, to tell whether to prefix a
52 label with a plus sign or not, so that the assembler can tell
53 symbol names from register names. */
54int mn10300_protect_label;
55
4879b320 56/* Selected processor type for tuning. */
57enum processor_type mn10300_tune_cpu = PROCESSOR_DEFAULT;
58
8ecf154e 59/* The size of the callee register save area. Right now we save everything
60 on entry since it costs us nothing in code size. It does cost us from a
61 speed standpoint, so we want to optimize this sooner or later. */
3626e955 62#define REG_SAVE_BYTES (4 * df_regs_ever_live_p (2) \
63 + 4 * df_regs_ever_live_p (3) \
64 + 4 * df_regs_ever_live_p (6) \
65 + 4 * df_regs_ever_live_p (7) \
66 + 16 * (df_regs_ever_live_p (14) \
67 || df_regs_ever_live_p (15) \
68 || df_regs_ever_live_p (16) \
69 || df_regs_ever_live_p (17)))
e92d3ba8 70
990679af 71#define CC_FLAG_Z 1
72#define CC_FLAG_N 2
73#define CC_FLAG_C 4
74#define CC_FLAG_V 8
75
76static int cc_flags_for_mode(enum machine_mode);
77static int cc_flags_for_code(enum rtx_code);
a767736d 78\f
4c834714 79/* Implement TARGET_OPTION_OVERRIDE. */
8c2c40c5 80
4c834714 81static void
82mn10300_option_override (void)
8c2c40c5 83{
84 if (TARGET_AM33)
85 target_flags &= ~MASK_MULT_BUG;
4879b320 86 else
87 {
88 /* Disable scheduling for the MN10300 as we do
89 not have timing information available for it. */
90 flag_schedule_insns = 0;
91 flag_schedule_insns_after_reload = 0;
731049b6 92
93 /* Force enable splitting of wide types, as otherwise it is trivial
94 to run out of registers. Indeed, this works so well that register
95 allocation problems are now more common *without* optimization,
96 when this flag is not enabled by default. */
97 flag_split_wide_types = 1;
4879b320 98 }
990679af 99
4879b320 100 if (mn10300_tune_string)
101 {
102 if (strcasecmp (mn10300_tune_string, "mn10300") == 0)
103 mn10300_tune_cpu = PROCESSOR_MN10300;
104 else if (strcasecmp (mn10300_tune_string, "am33") == 0)
105 mn10300_tune_cpu = PROCESSOR_AM33;
106 else if (strcasecmp (mn10300_tune_string, "am33-2") == 0)
107 mn10300_tune_cpu = PROCESSOR_AM33_2;
108 else if (strcasecmp (mn10300_tune_string, "am34") == 0)
109 mn10300_tune_cpu = PROCESSOR_AM34;
110 else
111 error ("-mtune= expects mn10300, am33, am33-2, or am34");
112 }
8c2c40c5 113}
114
92c473b8 115static void
3285410a 116mn10300_file_start (void)
29a404f9 117{
92c473b8 118 default_file_start ();
911517ac 119
b166356e 120 if (TARGET_AM33_2)
121 fprintf (asm_out_file, "\t.am33_2\n");
122 else if (TARGET_AM33)
92c473b8 123 fprintf (asm_out_file, "\t.am33\n");
29a404f9 124}
125\f
f9e46c25 126/* Note: This list must match the liw_op attribute in mn10300.md. */
127
128static const char *liw_op_names[] =
129{
130 "add", "cmp", "sub", "mov",
131 "and", "or", "xor",
132 "asr", "lsr", "asl",
133 "none", "max"
134};
135
29a404f9 136/* Print operand X using operand code CODE to assembly language output file
137 FILE. */
138
139void
3626e955 140mn10300_print_operand (FILE *file, rtx x, int code)
29a404f9 141{
142 switch (code)
143 {
f9e46c25 144 case 'W':
145 {
146 unsigned int liw_op = UINTVAL (x);
990679af 147
f9e46c25 148 gcc_assert (TARGET_ALLOW_LIW);
149 gcc_assert (liw_op < LIW_OP_MAX);
150 fputs (liw_op_names[liw_op], file);
29a404f9 151 break;
f9e46c25 152 }
990679af 153
f9e46c25 154 case 'b':
155 case 'B':
156 {
157 enum rtx_code cmp = GET_CODE (x);
158 enum machine_mode mode = GET_MODE (XEXP (x, 0));
159 const char *str;
160 int have_flags;
161
162 if (code == 'B')
163 cmp = reverse_condition (cmp);
164 have_flags = cc_flags_for_mode (mode);
fb16c776 165
f9e46c25 166 switch (cmp)
b166356e 167 {
f9e46c25 168 case NE:
169 str = "ne";
b166356e 170 break;
f9e46c25 171 case EQ:
172 str = "eq";
173 break;
174 case GE:
175 /* bge is smaller than bnc. */
176 str = (have_flags & CC_FLAG_V ? "ge" : "nc");
177 break;
178 case LT:
179 str = (have_flags & CC_FLAG_V ? "lt" : "ns");
180 break;
181 case GT:
182 str = "gt";
183 break;
184 case LE:
185 str = "le";
186 break;
187 case GEU:
188 str = "cc";
189 break;
190 case GTU:
191 str = "hi";
192 break;
193 case LEU:
194 str = "ls";
195 break;
196 case LTU:
197 str = "cs";
198 break;
199 case ORDERED:
200 str = "lge";
201 break;
202 case UNORDERED:
203 str = "uo";
204 break;
205 case LTGT:
206 str = "lg";
207 break;
208 case UNEQ:
209 str = "ue";
210 break;
211 case UNGE:
212 str = "uge";
213 break;
214 case UNGT:
215 str = "ug";
216 break;
217 case UNLE:
218 str = "ule";
219 break;
220 case UNLT:
221 str = "ul";
b166356e 222 break;
b166356e 223 default:
cf41bb03 224 gcc_unreachable ();
b166356e 225 }
f9e46c25 226
227 gcc_checking_assert ((cc_flags_for_code (cmp) & ~have_flags) == 0);
228 fputs (str, file);
229 }
230 break;
231
232 case 'C':
233 /* This is used for the operand to a call instruction;
234 if it's a REG, enclose it in parens, else output
235 the operand normally. */
236 if (REG_P (x))
237 {
238 fputc ('(', file);
239 mn10300_print_operand (file, x, 0);
240 fputc (')', file);
241 }
242 else
243 mn10300_print_operand (file, x, 0);
244 break;
245
246 case 'D':
247 switch (GET_CODE (x))
248 {
249 case MEM:
250 fputc ('(', file);
251 output_address (XEXP (x, 0));
252 fputc (')', file);
253 break;
254
255 case REG:
256 fprintf (file, "fd%d", REGNO (x) - 18);
257 break;
258
259 default:
260 gcc_unreachable ();
261 }
262 break;
b166356e 263
6ce19398 264 /* These are the least significant word in a 64bit value. */
f9e46c25 265 case 'L':
266 switch (GET_CODE (x))
267 {
268 case MEM:
269 fputc ('(', file);
270 output_address (XEXP (x, 0));
271 fputc (')', file);
272 break;
6ce19398 273
f9e46c25 274 case REG:
275 fprintf (file, "%s", reg_names[REGNO (x)]);
276 break;
6ce19398 277
f9e46c25 278 case SUBREG:
279 fprintf (file, "%s", reg_names[subreg_regno (x)]);
280 break;
6ce19398 281
f9e46c25 282 case CONST_DOUBLE:
283 {
284 long val[2];
285 REAL_VALUE_TYPE rv;
6ce19398 286
f9e46c25 287 switch (GET_MODE (x))
288 {
289 case DFmode:
290 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
291 REAL_VALUE_TO_TARGET_DOUBLE (rv, val);
292 fprintf (file, "0x%lx", val[0]);
293 break;;
294 case SFmode:
295 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
296 REAL_VALUE_TO_TARGET_SINGLE (rv, val[0]);
297 fprintf (file, "0x%lx", val[0]);
298 break;;
299 case VOIDmode:
300 case DImode:
301 mn10300_print_operand_address (file,
302 GEN_INT (CONST_DOUBLE_LOW (x)));
303 break;
304 default:
6ce19398 305 break;
306 }
f9e46c25 307 break;
308 }
6ce19398 309
f9e46c25 310 case CONST_INT:
311 {
312 rtx low, high;
313 split_double (x, &low, &high);
314 fprintf (file, "%ld", (long)INTVAL (low));
315 break;
964d057c 316 }
6ce19398 317
f9e46c25 318 default:
319 gcc_unreachable ();
320 }
321 break;
6ce19398 322
323 /* Similarly, but for the most significant word. */
f9e46c25 324 case 'H':
325 switch (GET_CODE (x))
326 {
327 case MEM:
328 fputc ('(', file);
329 x = adjust_address (x, SImode, 4);
330 output_address (XEXP (x, 0));
331 fputc (')', file);
332 break;
6ce19398 333
f9e46c25 334 case REG:
335 fprintf (file, "%s", reg_names[REGNO (x) + 1]);
336 break;
6ce19398 337
f9e46c25 338 case SUBREG:
339 fprintf (file, "%s", reg_names[subreg_regno (x) + 1]);
340 break;
6ce19398 341
f9e46c25 342 case CONST_DOUBLE:
343 {
344 long val[2];
345 REAL_VALUE_TYPE rv;
6ce19398 346
f9e46c25 347 switch (GET_MODE (x))
348 {
349 case DFmode:
350 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
351 REAL_VALUE_TO_TARGET_DOUBLE (rv, val);
352 fprintf (file, "0x%lx", val[1]);
353 break;;
354 case SFmode:
355 gcc_unreachable ();
356 case VOIDmode:
357 case DImode:
358 mn10300_print_operand_address (file,
359 GEN_INT (CONST_DOUBLE_HIGH (x)));
360 break;
361 default:
6ce19398 362 break;
363 }
f9e46c25 364 break;
365 }
6ce19398 366
f9e46c25 367 case CONST_INT:
368 {
369 rtx low, high;
370 split_double (x, &low, &high);
371 fprintf (file, "%ld", (long)INTVAL (high));
372 break;
6ce19398 373 }
6ce19398 374
f9e46c25 375 default:
376 gcc_unreachable ();
377 }
378 break;
6ce19398 379
f9e46c25 380 case 'A':
381 fputc ('(', file);
382 if (REG_P (XEXP (x, 0)))
383 output_address (gen_rtx_PLUS (SImode, XEXP (x, 0), const0_rtx));
384 else
385 output_address (XEXP (x, 0));
386 fputc (')', file);
387 break;
058f71f0 388
f9e46c25 389 case 'N':
390 gcc_assert (INTVAL (x) >= -128 && INTVAL (x) <= 255);
391 fprintf (file, "%d", (int)((~INTVAL (x)) & 0xff));
392 break;
393
394 case 'U':
395 gcc_assert (INTVAL (x) >= -128 && INTVAL (x) <= 255);
396 fprintf (file, "%d", (int)(INTVAL (x) & 0xff));
397 break;
167fa942 398
63e678f2 399 /* For shift counts. The hardware ignores the upper bits of
400 any immediate, but the assembler will flag an out of range
401 shift count as an error. So we mask off the high bits
402 of the immediate here. */
f9e46c25 403 case 'S':
404 if (CONST_INT_P (x))
405 {
406 fprintf (file, "%d", (int)(INTVAL (x) & 0x1f));
407 break;
408 }
409 /* FALL THROUGH */
63e678f2 410
f9e46c25 411 default:
412 switch (GET_CODE (x))
413 {
414 case MEM:
415 fputc ('(', file);
416 output_address (XEXP (x, 0));
417 fputc (')', file);
418 break;
29a404f9 419
f9e46c25 420 case PLUS:
421 output_address (x);
422 break;
6ce19398 423
f9e46c25 424 case REG:
425 fprintf (file, "%s", reg_names[REGNO (x)]);
426 break;
29a404f9 427
f9e46c25 428 case SUBREG:
429 fprintf (file, "%s", reg_names[subreg_regno (x)]);
430 break;
29a404f9 431
6ce19398 432 /* This will only be single precision.... */
f9e46c25 433 case CONST_DOUBLE:
434 {
435 unsigned long val;
436 REAL_VALUE_TYPE rv;
6ce19398 437
f9e46c25 438 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
439 REAL_VALUE_TO_TARGET_SINGLE (rv, val);
440 fprintf (file, "0x%lx", val);
29a404f9 441 break;
29a404f9 442 }
f9e46c25 443
444 case CONST_INT:
445 case SYMBOL_REF:
446 case CONST:
447 case LABEL_REF:
448 case CODE_LABEL:
449 case UNSPEC:
450 mn10300_print_operand_address (file, x);
451 break;
452 default:
453 gcc_unreachable ();
454 }
455 break;
456 }
29a404f9 457}
458
459/* Output assembly language output for the address ADDR to FILE. */
460
461void
3626e955 462mn10300_print_operand_address (FILE *file, rtx addr)
29a404f9 463{
464 switch (GET_CODE (addr))
465 {
911517ac 466 case POST_INC:
c8a596d6 467 mn10300_print_operand (file, XEXP (addr, 0), 0);
911517ac 468 fputc ('+', file);
469 break;
c8a596d6 470
471 case POST_MODIFY:
472 mn10300_print_operand (file, XEXP (addr, 0), 0);
473 fputc ('+', file);
474 fputc (',', file);
475 mn10300_print_operand (file, XEXP (addr, 1), 0);
476 break;
477
29a404f9 478 case REG:
3626e955 479 mn10300_print_operand (file, addr, 0);
29a404f9 480 break;
481 case PLUS:
482 {
c8a596d6 483 rtx base = XEXP (addr, 0);
484 rtx index = XEXP (addr, 1);
485
486 if (REG_P (index) && !REG_OK_FOR_INDEX_P (index))
487 {
488 rtx x = base;
489 base = index;
490 index = x;
491
492 gcc_assert (REG_P (index) && REG_OK_FOR_INDEX_P (index));
493 }
494 gcc_assert (REG_OK_FOR_BASE_P (base));
495
3626e955 496 mn10300_print_operand (file, index, 0);
29a404f9 497 fputc (',', file);
c8a596d6 498 mn10300_print_operand (file, base, 0);
29a404f9 499 break;
500 }
501 case SYMBOL_REF:
502 output_addr_const (file, addr);
503 break;
504 default:
505 output_addr_const (file, addr);
506 break;
507 }
508}
509
22680c28 510/* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA.
511
512 Used for PIC-specific UNSPECs. */
513
514static bool
515mn10300_asm_output_addr_const_extra (FILE *file, rtx x)
516{
517 if (GET_CODE (x) == UNSPEC)
518 {
519 switch (XINT (x, 1))
520 {
22680c28 521 case UNSPEC_PIC:
522 /* GLOBAL_OFFSET_TABLE or local symbols, no suffix. */
523 output_addr_const (file, XVECEXP (x, 0, 0));
524 break;
525 case UNSPEC_GOT:
526 output_addr_const (file, XVECEXP (x, 0, 0));
527 fputs ("@GOT", file);
528 break;
529 case UNSPEC_GOTOFF:
530 output_addr_const (file, XVECEXP (x, 0, 0));
531 fputs ("@GOTOFF", file);
532 break;
533 case UNSPEC_PLT:
534 output_addr_const (file, XVECEXP (x, 0, 0));
535 fputs ("@PLT", file);
536 break;
537 case UNSPEC_GOTSYM_OFF:
538 assemble_name (file, GOT_SYMBOL_NAME);
539 fputs ("-(", file);
540 output_addr_const (file, XVECEXP (x, 0, 0));
541 fputs ("-.)", file);
542 break;
543 default:
544 return false;
545 }
546 return true;
547 }
548 else
549 return false;
550}
551
b166356e 552/* Count the number of FP registers that have to be saved. */
553static int
3285410a 554fp_regs_to_save (void)
b166356e 555{
556 int i, n = 0;
557
558 if (! TARGET_AM33_2)
559 return 0;
560
561 for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
d37e81ec 562 if (df_regs_ever_live_p (i) && ! call_really_used_regs[i])
b166356e 563 ++n;
564
565 return n;
566}
567
4caa3669 568/* Print a set of registers in the format required by "movm" and "ret".
569 Register K is saved if bit K of MASK is set. The data and address
570 registers can be stored individually, but the extended registers cannot.
f2b32076 571 We assume that the mask already takes that into account. For instance,
09e5ce26 572 bits 14 to 17 must have the same value. */
4caa3669 573
574void
3285410a 575mn10300_print_reg_list (FILE *file, int mask)
4caa3669 576{
577 int need_comma;
578 int i;
579
580 need_comma = 0;
581 fputc ('[', file);
582
583 for (i = 0; i < FIRST_EXTENDED_REGNUM; i++)
584 if ((mask & (1 << i)) != 0)
585 {
586 if (need_comma)
587 fputc (',', file);
588 fputs (reg_names [i], file);
589 need_comma = 1;
590 }
591
592 if ((mask & 0x3c000) != 0)
593 {
cf41bb03 594 gcc_assert ((mask & 0x3c000) == 0x3c000);
4caa3669 595 if (need_comma)
596 fputc (',', file);
597 fputs ("exreg1", file);
598 need_comma = 1;
599 }
600
601 fputc (']', file);
602}
603
ad3e6900 604/* If the MDR register is never clobbered, we can use the RETF instruction
605 which takes the address from the MDR register. This is 3 cycles faster
606 than having to load the address from the stack. */
607
608bool
609mn10300_can_use_retf_insn (void)
610{
611 /* Don't bother if we're not optimizing. In this case we won't
612 have proper access to df_regs_ever_live_p. */
613 if (!optimize)
614 return false;
615
616 /* EH returns alter the saved return address; MDR is not current. */
617 if (crtl->calls_eh_return)
618 return false;
619
620 /* Obviously not if MDR is ever clobbered. */
621 if (df_regs_ever_live_p (MDR_REG))
622 return false;
623
624 /* ??? Careful not to use this during expand_epilogue etc. */
625 gcc_assert (!in_sequence_p ());
626 return leaf_function_p ();
627}
628
629bool
630mn10300_can_use_rets_insn (void)
6ce19398 631{
6f22c3b4 632 return !mn10300_initial_offset (ARG_POINTER_REGNUM, STACK_POINTER_REGNUM);
6ce19398 633}
634
4caa3669 635/* Returns the set of live, callee-saved registers as a bitmask. The
636 callee-saved extended registers cannot be stored individually, so
09e5ce26 637 all of them will be included in the mask if any one of them is used. */
4caa3669 638
639int
3285410a 640mn10300_get_live_callee_saved_regs (void)
4caa3669 641{
642 int mask;
643 int i;
644
645 mask = 0;
b166356e 646 for (i = 0; i <= LAST_EXTENDED_REGNUM; i++)
d37e81ec 647 if (df_regs_ever_live_p (i) && ! call_really_used_regs[i])
4caa3669 648 mask |= (1 << i);
649 if ((mask & 0x3c000) != 0)
650 mask |= 0x3c000;
651
652 return mask;
653}
654
5f2853dd 655static rtx
656F (rtx r)
657{
658 RTX_FRAME_RELATED_P (r) = 1;
659 return r;
660}
661
4caa3669 662/* Generate an instruction that pushes several registers onto the stack.
663 Register K will be saved if bit K in MASK is set. The function does
664 nothing if MASK is zero.
665
666 To be compatible with the "movm" instruction, the lowest-numbered
667 register must be stored in the lowest slot. If MASK is the set
668 { R1,...,RN }, where R1...RN are ordered least first, the generated
669 instruction will have the form:
670
671 (parallel
672 (set (reg:SI 9) (plus:SI (reg:SI 9) (const_int -N*4)))
673 (set (mem:SI (plus:SI (reg:SI 9)
674 (const_int -1*4)))
675 (reg:SI RN))
676 ...
677 (set (mem:SI (plus:SI (reg:SI 9)
678 (const_int -N*4)))
679 (reg:SI R1))) */
680
32f9c04a 681static void
682mn10300_gen_multiple_store (unsigned int mask)
4caa3669 683{
32f9c04a 684 /* The order in which registers are stored, from SP-4 through SP-N*4. */
685 static const unsigned int store_order[8] = {
686 /* e2, e3: never saved */
687 FIRST_EXTENDED_REGNUM + 4,
688 FIRST_EXTENDED_REGNUM + 5,
689 FIRST_EXTENDED_REGNUM + 6,
690 FIRST_EXTENDED_REGNUM + 7,
691 /* e0, e1, mdrq, mcrh, mcrl, mcvf: never saved. */
692 FIRST_DATA_REGNUM + 2,
693 FIRST_DATA_REGNUM + 3,
694 FIRST_ADDRESS_REGNUM + 2,
695 FIRST_ADDRESS_REGNUM + 3,
696 /* d0, d1, a0, a1, mdr, lir, lar: never saved. */
697 };
698
699 rtx x, elts[9];
700 unsigned int i;
701 int count;
702
703 if (mask == 0)
704 return;
705
706 for (i = count = 0; i < ARRAY_SIZE(store_order); ++i)
4caa3669 707 {
32f9c04a 708 unsigned regno = store_order[i];
709
710 if (((mask >> regno) & 1) == 0)
711 continue;
4caa3669 712
32f9c04a 713 ++count;
29c05e22 714 x = plus_constant (Pmode, stack_pointer_rtx, count * -4);
32f9c04a 715 x = gen_frame_mem (SImode, x);
716 x = gen_rtx_SET (VOIDmode, x, gen_rtx_REG (SImode, regno));
717 elts[count] = F(x);
718
719 /* Remove the register from the mask so that... */
720 mask &= ~(1u << regno);
4caa3669 721 }
32f9c04a 722
723 /* ... we can make sure that we didn't try to use a register
724 not listed in the store order. */
725 gcc_assert (mask == 0);
726
727 /* Create the instruction that updates the stack pointer. */
29c05e22 728 x = plus_constant (Pmode, stack_pointer_rtx, count * -4);
32f9c04a 729 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
730 elts[0] = F(x);
731
732 /* We need one PARALLEL element to update the stack pointer and
733 an additional element for each register that is stored. */
734 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (count + 1, elts));
735 F (emit_insn (x));
4caa3669 736}
737
29a404f9 738void
3626e955 739mn10300_expand_prologue (void)
29a404f9 740{
6f22c3b4 741 HOST_WIDE_INT size = mn10300_frame_size ();
29a404f9 742
09e5ce26 743 /* If we use any of the callee-saved registers, save them now. */
4caa3669 744 mn10300_gen_multiple_store (mn10300_get_live_callee_saved_regs ());
48cb86e3 745
b166356e 746 if (TARGET_AM33_2 && fp_regs_to_save ())
747 {
748 int num_regs_to_save = fp_regs_to_save (), i;
749 HOST_WIDE_INT xsize;
3626e955 750 enum
751 {
752 save_sp_merge,
753 save_sp_no_merge,
754 save_sp_partial_merge,
755 save_a0_merge,
756 save_a0_no_merge
757 } strategy;
b166356e 758 unsigned int strategy_size = (unsigned)-1, this_strategy_size;
759 rtx reg;
b166356e 760
761 /* We have several different strategies to save FP registers.
762 We can store them using SP offsets, which is beneficial if
763 there are just a few registers to save, or we can use `a0' in
764 post-increment mode (`a0' is the only call-clobbered address
765 register that is never used to pass information to a
766 function). Furthermore, if we don't need a frame pointer, we
767 can merge the two SP adds into a single one, but this isn't
768 always beneficial; sometimes we can just split the two adds
769 so that we don't exceed a 16-bit constant size. The code
770 below will select which strategy to use, so as to generate
771 smallest code. Ties are broken in favor or shorter sequences
772 (in terms of number of instructions). */
773
774#define SIZE_ADD_AX(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
775 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 2)
776#define SIZE_ADD_SP(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
777 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 3)
e14cac83 778
779/* We add 0 * (S) in two places to promote to the type of S,
780 so that all arms of the conditional have the same type. */
b166356e 781#define SIZE_FMOV_LIMIT(S,N,L,SIZE1,SIZE2,ELSE) \
e14cac83 782 (((S) >= (L)) ? 0 * (S) + (SIZE1) * (N) \
b166356e 783 : ((S) + 4 * (N) >= (L)) ? (((L) - (S)) / 4 * (SIZE2) \
784 + ((S) + 4 * (N) - (L)) / 4 * (SIZE1)) \
e14cac83 785 : 0 * (S) + (ELSE))
b166356e 786#define SIZE_FMOV_SP_(S,N) \
787 (SIZE_FMOV_LIMIT ((S), (N), (1 << 24), 7, 6, \
788 SIZE_FMOV_LIMIT ((S), (N), (1 << 8), 6, 4, \
789 (S) ? 4 * (N) : 3 + 4 * ((N) - 1))))
790#define SIZE_FMOV_SP(S,N) (SIZE_FMOV_SP_ ((unsigned HOST_WIDE_INT)(S), (N)))
791
792 /* Consider alternative save_sp_merge only if we don't need the
fa483857 793 frame pointer and size is nonzero. */
b166356e 794 if (! frame_pointer_needed && size)
795 {
796 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
797 this_strategy_size = SIZE_ADD_SP (-(size + 4 * num_regs_to_save));
798 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
799 this_strategy_size += SIZE_FMOV_SP (size, num_regs_to_save);
800
801 if (this_strategy_size < strategy_size)
802 {
803 strategy = save_sp_merge;
804 strategy_size = this_strategy_size;
805 }
806 }
807
808 /* Consider alternative save_sp_no_merge unconditionally. */
809 /* Insn: add -4 * num_regs_to_save, sp. */
810 this_strategy_size = SIZE_ADD_SP (-4 * num_regs_to_save);
811 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
812 this_strategy_size += SIZE_FMOV_SP (0, num_regs_to_save);
813 if (size)
814 {
815 /* Insn: add -size, sp. */
816 this_strategy_size += SIZE_ADD_SP (-size);
817 }
818
819 if (this_strategy_size < strategy_size)
820 {
821 strategy = save_sp_no_merge;
822 strategy_size = this_strategy_size;
823 }
824
825 /* Consider alternative save_sp_partial_merge only if we don't
826 need a frame pointer and size is reasonably large. */
827 if (! frame_pointer_needed && size + 4 * num_regs_to_save > 128)
828 {
829 /* Insn: add -128, sp. */
830 this_strategy_size = SIZE_ADD_SP (-128);
831 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
832 this_strategy_size += SIZE_FMOV_SP (128 - 4 * num_regs_to_save,
833 num_regs_to_save);
834 if (size)
835 {
836 /* Insn: add 128-size, sp. */
837 this_strategy_size += SIZE_ADD_SP (128 - size);
838 }
839
840 if (this_strategy_size < strategy_size)
841 {
842 strategy = save_sp_partial_merge;
843 strategy_size = this_strategy_size;
844 }
845 }
846
847 /* Consider alternative save_a0_merge only if we don't need a
fa483857 848 frame pointer, size is nonzero and the user hasn't
b166356e 849 changed the calling conventions of a0. */
850 if (! frame_pointer_needed && size
d37e81ec 851 && call_really_used_regs [FIRST_ADDRESS_REGNUM]
b166356e 852 && ! fixed_regs[FIRST_ADDRESS_REGNUM])
853 {
854 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
855 this_strategy_size = SIZE_ADD_SP (-(size + 4 * num_regs_to_save));
856 /* Insn: mov sp, a0. */
857 this_strategy_size++;
858 if (size)
859 {
860 /* Insn: add size, a0. */
861 this_strategy_size += SIZE_ADD_AX (size);
862 }
863 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
864 this_strategy_size += 3 * num_regs_to_save;
865
866 if (this_strategy_size < strategy_size)
867 {
868 strategy = save_a0_merge;
869 strategy_size = this_strategy_size;
870 }
871 }
872
873 /* Consider alternative save_a0_no_merge if the user hasn't
09e5ce26 874 changed the calling conventions of a0. */
d37e81ec 875 if (call_really_used_regs [FIRST_ADDRESS_REGNUM]
b166356e 876 && ! fixed_regs[FIRST_ADDRESS_REGNUM])
877 {
878 /* Insn: add -4 * num_regs_to_save, sp. */
879 this_strategy_size = SIZE_ADD_SP (-4 * num_regs_to_save);
880 /* Insn: mov sp, a0. */
881 this_strategy_size++;
882 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
883 this_strategy_size += 3 * num_regs_to_save;
884 if (size)
885 {
886 /* Insn: add -size, sp. */
887 this_strategy_size += SIZE_ADD_SP (-size);
888 }
889
890 if (this_strategy_size < strategy_size)
891 {
892 strategy = save_a0_no_merge;
893 strategy_size = this_strategy_size;
894 }
895 }
896
897 /* Emit the initial SP add, common to all strategies. */
898 switch (strategy)
899 {
900 case save_sp_no_merge:
901 case save_a0_no_merge:
5f2853dd 902 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
903 stack_pointer_rtx,
904 GEN_INT (-4 * num_regs_to_save))));
b166356e 905 xsize = 0;
906 break;
907
908 case save_sp_partial_merge:
5f2853dd 909 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
910 stack_pointer_rtx,
911 GEN_INT (-128))));
b166356e 912 xsize = 128 - 4 * num_regs_to_save;
913 size -= xsize;
914 break;
915
916 case save_sp_merge:
917 case save_a0_merge:
5f2853dd 918 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
919 stack_pointer_rtx,
920 GEN_INT (-(size + 4 * num_regs_to_save)))));
b166356e 921 /* We'll have to adjust FP register saves according to the
09e5ce26 922 frame size. */
b166356e 923 xsize = size;
924 /* Since we've already created the stack frame, don't do it
09e5ce26 925 again at the end of the function. */
b166356e 926 size = 0;
927 break;
928
929 default:
cf41bb03 930 gcc_unreachable ();
b166356e 931 }
fb16c776 932
b166356e 933 /* Now prepare register a0, if we have decided to use it. */
934 switch (strategy)
935 {
936 case save_sp_merge:
937 case save_sp_no_merge:
938 case save_sp_partial_merge:
939 reg = 0;
940 break;
941
942 case save_a0_merge:
943 case save_a0_no_merge:
944 reg = gen_rtx_REG (SImode, FIRST_ADDRESS_REGNUM);
5f2853dd 945 F (emit_insn (gen_movsi (reg, stack_pointer_rtx)));
b166356e 946 if (xsize)
5f2853dd 947 F (emit_insn (gen_addsi3 (reg, reg, GEN_INT (xsize))));
b166356e 948 reg = gen_rtx_POST_INC (SImode, reg);
949 break;
fb16c776 950
b166356e 951 default:
cf41bb03 952 gcc_unreachable ();
b166356e 953 }
fb16c776 954
b166356e 955 /* Now actually save the FP registers. */
956 for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
d37e81ec 957 if (df_regs_ever_live_p (i) && ! call_really_used_regs [i])
b166356e 958 {
959 rtx addr;
960
961 if (reg)
962 addr = reg;
963 else
964 {
965 /* If we aren't using `a0', use an SP offset. */
966 if (xsize)
967 {
968 addr = gen_rtx_PLUS (SImode,
969 stack_pointer_rtx,
970 GEN_INT (xsize));
971 }
972 else
973 addr = stack_pointer_rtx;
fb16c776 974
b166356e 975 xsize += 4;
976 }
977
5f2853dd 978 F (emit_insn (gen_movsf (gen_rtx_MEM (SFmode, addr),
979 gen_rtx_REG (SFmode, i))));
b166356e 980 }
981 }
982
48cb86e3 983 /* Now put the frame pointer into the frame pointer register. */
29a404f9 984 if (frame_pointer_needed)
5f2853dd 985 F (emit_move_insn (frame_pointer_rtx, stack_pointer_rtx));
29a404f9 986
48cb86e3 987 /* Allocate stack for this frame. */
29a404f9 988 if (size)
5f2853dd 989 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
990 stack_pointer_rtx,
991 GEN_INT (-size))));
992
3072d30e 993 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
6f22c3b4 994 emit_insn (gen_load_pic ());
29a404f9 995}
996
997void
3626e955 998mn10300_expand_epilogue (void)
29a404f9 999{
6f22c3b4 1000 HOST_WIDE_INT size = mn10300_frame_size ();
ad3e6900 1001 int reg_save_bytes = REG_SAVE_BYTES;
5574dbdd 1002
b166356e 1003 if (TARGET_AM33_2 && fp_regs_to_save ())
1004 {
1005 int num_regs_to_save = fp_regs_to_save (), i;
1006 rtx reg = 0;
1007
1008 /* We have several options to restore FP registers. We could
1009 load them from SP offsets, but, if there are enough FP
1010 registers to restore, we win if we use a post-increment
1011 addressing mode. */
1012
1013 /* If we have a frame pointer, it's the best option, because we
1014 already know it has the value we want. */
1015 if (frame_pointer_needed)
1016 reg = gen_rtx_REG (SImode, FRAME_POINTER_REGNUM);
1017 /* Otherwise, we may use `a1', since it's call-clobbered and
1018 it's never used for return values. But only do so if it's
1019 smaller than using SP offsets. */
1020 else
1021 {
1022 enum { restore_sp_post_adjust,
1023 restore_sp_pre_adjust,
1024 restore_sp_partial_adjust,
1025 restore_a1 } strategy;
1026 unsigned int this_strategy_size, strategy_size = (unsigned)-1;
1027
1028 /* Consider using sp offsets before adjusting sp. */
1029 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1030 this_strategy_size = SIZE_FMOV_SP (size, num_regs_to_save);
1031 /* If size is too large, we'll have to adjust SP with an
1032 add. */
ad3e6900 1033 if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
b166356e 1034 {
1035 /* Insn: add size + 4 * num_regs_to_save, sp. */
1036 this_strategy_size += SIZE_ADD_SP (size + 4 * num_regs_to_save);
1037 }
1038 /* If we don't have to restore any non-FP registers,
1039 we'll be able to save one byte by using rets. */
ad3e6900 1040 if (! reg_save_bytes)
b166356e 1041 this_strategy_size--;
1042
1043 if (this_strategy_size < strategy_size)
1044 {
1045 strategy = restore_sp_post_adjust;
1046 strategy_size = this_strategy_size;
1047 }
1048
1049 /* Consider using sp offsets after adjusting sp. */
1050 /* Insn: add size, sp. */
1051 this_strategy_size = SIZE_ADD_SP (size);
1052 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1053 this_strategy_size += SIZE_FMOV_SP (0, num_regs_to_save);
1054 /* We're going to use ret to release the FP registers
09e5ce26 1055 save area, so, no savings. */
b166356e 1056
1057 if (this_strategy_size < strategy_size)
1058 {
1059 strategy = restore_sp_pre_adjust;
1060 strategy_size = this_strategy_size;
1061 }
1062
1063 /* Consider using sp offsets after partially adjusting sp.
1064 When size is close to 32Kb, we may be able to adjust SP
1065 with an imm16 add instruction while still using fmov
1066 (d8,sp). */
ad3e6900 1067 if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
b166356e 1068 {
1069 /* Insn: add size + 4 * num_regs_to_save
ad3e6900 1070 + reg_save_bytes - 252,sp. */
b166356e 1071 this_strategy_size = SIZE_ADD_SP (size + 4 * num_regs_to_save
ad3e6900 1072 + reg_save_bytes - 252);
b166356e 1073 /* Insn: fmov (##,sp),fs#, fo each fs# to be restored. */
ad3e6900 1074 this_strategy_size += SIZE_FMOV_SP (252 - reg_save_bytes
b166356e 1075 - 4 * num_regs_to_save,
1076 num_regs_to_save);
1077 /* We're going to use ret to release the FP registers
09e5ce26 1078 save area, so, no savings. */
b166356e 1079
1080 if (this_strategy_size < strategy_size)
1081 {
1082 strategy = restore_sp_partial_adjust;
1083 strategy_size = this_strategy_size;
1084 }
1085 }
1086
1087 /* Consider using a1 in post-increment mode, as long as the
1088 user hasn't changed the calling conventions of a1. */
d37e81ec 1089 if (call_really_used_regs [FIRST_ADDRESS_REGNUM + 1]
b166356e 1090 && ! fixed_regs[FIRST_ADDRESS_REGNUM+1])
1091 {
1092 /* Insn: mov sp,a1. */
1093 this_strategy_size = 1;
1094 if (size)
1095 {
1096 /* Insn: add size,a1. */
1097 this_strategy_size += SIZE_ADD_AX (size);
1098 }
1099 /* Insn: fmov (a1+),fs#, for each fs# to be restored. */
1100 this_strategy_size += 3 * num_regs_to_save;
1101 /* If size is large enough, we may be able to save a
1102 couple of bytes. */
ad3e6900 1103 if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
b166356e 1104 {
1105 /* Insn: mov a1,sp. */
1106 this_strategy_size += 2;
1107 }
1108 /* If we don't have to restore any non-FP registers,
1109 we'll be able to save one byte by using rets. */
ad3e6900 1110 if (! reg_save_bytes)
b166356e 1111 this_strategy_size--;
1112
1113 if (this_strategy_size < strategy_size)
1114 {
1115 strategy = restore_a1;
1116 strategy_size = this_strategy_size;
1117 }
1118 }
1119
1120 switch (strategy)
1121 {
1122 case restore_sp_post_adjust:
1123 break;
1124
1125 case restore_sp_pre_adjust:
1126 emit_insn (gen_addsi3 (stack_pointer_rtx,
1127 stack_pointer_rtx,
1128 GEN_INT (size)));
1129 size = 0;
1130 break;
1131
1132 case restore_sp_partial_adjust:
1133 emit_insn (gen_addsi3 (stack_pointer_rtx,
1134 stack_pointer_rtx,
1135 GEN_INT (size + 4 * num_regs_to_save
ad3e6900 1136 + reg_save_bytes - 252)));
1137 size = 252 - reg_save_bytes - 4 * num_regs_to_save;
b166356e 1138 break;
fb16c776 1139
b166356e 1140 case restore_a1:
1141 reg = gen_rtx_REG (SImode, FIRST_ADDRESS_REGNUM + 1);
1142 emit_insn (gen_movsi (reg, stack_pointer_rtx));
1143 if (size)
1144 emit_insn (gen_addsi3 (reg, reg, GEN_INT (size)));
1145 break;
1146
1147 default:
cf41bb03 1148 gcc_unreachable ();
b166356e 1149 }
1150 }
1151
1152 /* Adjust the selected register, if any, for post-increment. */
1153 if (reg)
1154 reg = gen_rtx_POST_INC (SImode, reg);
1155
1156 for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
d37e81ec 1157 if (df_regs_ever_live_p (i) && ! call_really_used_regs [i])
b166356e 1158 {
1159 rtx addr;
fb16c776 1160
b166356e 1161 if (reg)
1162 addr = reg;
1163 else if (size)
1164 {
1165 /* If we aren't using a post-increment register, use an
09e5ce26 1166 SP offset. */
b166356e 1167 addr = gen_rtx_PLUS (SImode,
1168 stack_pointer_rtx,
1169 GEN_INT (size));
1170 }
1171 else
1172 addr = stack_pointer_rtx;
1173
1174 size += 4;
1175
5f2853dd 1176 emit_insn (gen_movsf (gen_rtx_REG (SFmode, i),
1177 gen_rtx_MEM (SFmode, addr)));
b166356e 1178 }
1179
1180 /* If we were using the restore_a1 strategy and the number of
1181 bytes to be released won't fit in the `ret' byte, copy `a1'
1182 to `sp', to avoid having to use `add' to adjust it. */
ad3e6900 1183 if (! frame_pointer_needed && reg && size + reg_save_bytes > 255)
b166356e 1184 {
1185 emit_move_insn (stack_pointer_rtx, XEXP (reg, 0));
1186 size = 0;
1187 }
1188 }
1189
461cabcc 1190 /* Maybe cut back the stack, except for the register save area.
1191
1192 If the frame pointer exists, then use the frame pointer to
1193 cut back the stack.
1194
1195 If the stack size + register save area is more than 255 bytes,
1196 then the stack must be cut back here since the size + register
fb16c776 1197 save size is too big for a ret/retf instruction.
461cabcc 1198
1199 Else leave it alone, it will be cut back as part of the
1200 ret/retf instruction, or there wasn't any stack to begin with.
1201
dfd1079d 1202 Under no circumstances should the register save area be
461cabcc 1203 deallocated here, that would leave a window where an interrupt
1204 could occur and trash the register save area. */
29a404f9 1205 if (frame_pointer_needed)
1206 {
29a404f9 1207 emit_move_insn (stack_pointer_rtx, frame_pointer_rtx);
b21218d6 1208 size = 0;
1209 }
ad3e6900 1210 else if (size + reg_save_bytes > 255)
b21218d6 1211 {
1212 emit_insn (gen_addsi3 (stack_pointer_rtx,
1213 stack_pointer_rtx,
1214 GEN_INT (size)));
1215 size = 0;
29a404f9 1216 }
29a404f9 1217
55daf463 1218 /* Adjust the stack and restore callee-saved registers, if any. */
ad3e6900 1219 if (mn10300_can_use_rets_insn ())
1a860023 1220 emit_jump_insn (ret_rtx);
48cb86e3 1221 else
ad3e6900 1222 emit_jump_insn (gen_return_ret (GEN_INT (size + REG_SAVE_BYTES)));
29a404f9 1223}
1224
a2f10574 1225/* Recognize the PARALLEL rtx generated by mn10300_gen_multiple_store().
4caa3669 1226 This function is for MATCH_PARALLEL and so assumes OP is known to be
1227 parallel. If OP is a multiple store, return a mask indicating which
1228 registers it saves. Return 0 otherwise. */
1229
1230int
3626e955 1231mn10300_store_multiple_operation (rtx op,
1232 enum machine_mode mode ATTRIBUTE_UNUSED)
4caa3669 1233{
1234 int count;
1235 int mask;
1236 int i;
1237 unsigned int last;
1238 rtx elt;
1239
1240 count = XVECLEN (op, 0);
1241 if (count < 2)
1242 return 0;
1243
1244 /* Check that first instruction has the form (set (sp) (plus A B)) */
1245 elt = XVECEXP (op, 0, 0);
1246 if (GET_CODE (elt) != SET
3626e955 1247 || (! REG_P (SET_DEST (elt)))
4caa3669 1248 || REGNO (SET_DEST (elt)) != STACK_POINTER_REGNUM
1249 || GET_CODE (SET_SRC (elt)) != PLUS)
1250 return 0;
1251
1252 /* Check that A is the stack pointer and B is the expected stack size.
1253 For OP to match, each subsequent instruction should push a word onto
1254 the stack. We therefore expect the first instruction to create
09e5ce26 1255 COUNT-1 stack slots. */
4caa3669 1256 elt = SET_SRC (elt);
3626e955 1257 if ((! REG_P (XEXP (elt, 0)))
4caa3669 1258 || REGNO (XEXP (elt, 0)) != STACK_POINTER_REGNUM
3626e955 1259 || (! CONST_INT_P (XEXP (elt, 1)))
4caa3669 1260 || INTVAL (XEXP (elt, 1)) != -(count - 1) * 4)
1261 return 0;
1262
4caa3669 1263 mask = 0;
1264 for (i = 1; i < count; i++)
1265 {
32f9c04a 1266 /* Check that element i is a (set (mem M) R). */
1267 /* ??? Validate the register order a-la mn10300_gen_multiple_store.
1268 Remember: the ordering is *not* monotonic. */
4caa3669 1269 elt = XVECEXP (op, 0, i);
1270 if (GET_CODE (elt) != SET
3626e955 1271 || (! MEM_P (SET_DEST (elt)))
32f9c04a 1272 || (! REG_P (SET_SRC (elt))))
4caa3669 1273 return 0;
1274
32f9c04a 1275 /* Remember which registers are to be saved. */
4caa3669 1276 last = REGNO (SET_SRC (elt));
1277 mask |= (1 << last);
1278
1279 /* Check that M has the form (plus (sp) (const_int -I*4)) */
1280 elt = XEXP (SET_DEST (elt), 0);
1281 if (GET_CODE (elt) != PLUS
3626e955 1282 || (! REG_P (XEXP (elt, 0)))
4caa3669 1283 || REGNO (XEXP (elt, 0)) != STACK_POINTER_REGNUM
3626e955 1284 || (! CONST_INT_P (XEXP (elt, 1)))
4caa3669 1285 || INTVAL (XEXP (elt, 1)) != -i * 4)
1286 return 0;
1287 }
1288
09e5ce26 1289 /* All or none of the callee-saved extended registers must be in the set. */
4caa3669 1290 if ((mask & 0x3c000) != 0
1291 && (mask & 0x3c000) != 0x3c000)
1292 return 0;
1293
1294 return mask;
1295}
1296
029ca87f 1297/* Implement TARGET_PREFERRED_RELOAD_CLASS. */
1298
1299static reg_class_t
1300mn10300_preferred_reload_class (rtx x, reg_class_t rclass)
1301{
1302 if (x == stack_pointer_rtx && rclass != SP_REGS)
c78ac668 1303 return (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
029ca87f 1304 else if (MEM_P (x)
1305 || (REG_P (x)
1306 && !HARD_REGISTER_P (x))
1307 || (GET_CODE (x) == SUBREG
1308 && REG_P (SUBREG_REG (x))
1309 && !HARD_REGISTER_P (SUBREG_REG (x))))
1310 return LIMIT_RELOAD_CLASS (GET_MODE (x), rclass);
1311 else
1312 return rclass;
1313}
1314
1315/* Implement TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
1316
1317static reg_class_t
1318mn10300_preferred_output_reload_class (rtx x, reg_class_t rclass)
1319{
1320 if (x == stack_pointer_rtx && rclass != SP_REGS)
c78ac668 1321 return (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
029ca87f 1322 return rclass;
1323}
1324
c78ac668 1325/* Implement TARGET_SECONDARY_RELOAD. */
3626e955 1326
c78ac668 1327static reg_class_t
1328mn10300_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
1329 enum machine_mode mode, secondary_reload_info *sri)
29a404f9 1330{
c78ac668 1331 enum reg_class rclass = (enum reg_class) rclass_i;
1332 enum reg_class xclass = NO_REGS;
1333 unsigned int xregno = INVALID_REGNUM;
1334
1335 if (REG_P (x))
8ecf154e 1336 {
c78ac668 1337 xregno = REGNO (x);
1338 if (xregno >= FIRST_PSEUDO_REGISTER)
1339 xregno = true_regnum (x);
1340 if (xregno != INVALID_REGNUM)
1341 xclass = REGNO_REG_CLASS (xregno);
1342 }
1343
1344 if (!TARGET_AM33)
1345 {
1346 /* Memory load/stores less than a full word wide can't have an
1347 address or stack pointer destination. They must use a data
1348 register as an intermediate register. */
1349 if (rclass != DATA_REGS
1350 && (mode == QImode || mode == HImode)
1351 && xclass == NO_REGS)
1352 return DATA_REGS;
1353
1354 /* We can only move SP to/from an address register. */
1355 if (in_p
1356 && rclass == SP_REGS
1357 && xclass != ADDRESS_REGS)
1358 return ADDRESS_REGS;
1359 if (!in_p
1360 && xclass == SP_REGS
1361 && rclass != ADDRESS_REGS
1362 && rclass != SP_OR_ADDRESS_REGS)
1363 return ADDRESS_REGS;
8ecf154e 1364 }
29a404f9 1365
c78ac668 1366 /* We can't directly load sp + const_int into a register;
1367 we must use an address register as an scratch. */
1368 if (in_p
1369 && rclass != SP_REGS
8deb3959 1370 && rclass != SP_OR_ADDRESS_REGS
c8a596d6 1371 && rclass != SP_OR_GENERAL_REGS
c78ac668 1372 && GET_CODE (x) == PLUS
1373 && (XEXP (x, 0) == stack_pointer_rtx
1374 || XEXP (x, 1) == stack_pointer_rtx))
1375 {
1376 sri->icode = CODE_FOR_reload_plus_sp_const;
1377 return NO_REGS;
1378 }
29a404f9 1379
85a6eed4 1380 /* We can only move MDR to/from a data register. */
1381 if (rclass == MDR_REGS && xclass != DATA_REGS)
1382 return DATA_REGS;
1383 if (xclass == MDR_REGS && rclass != DATA_REGS)
1384 return DATA_REGS;
1385
c78ac668 1386 /* We can't load/store an FP register from a constant address. */
8b8be022 1387 if (TARGET_AM33_2
c78ac668 1388 && (rclass == FP_REGS || xclass == FP_REGS)
1389 && (xclass == NO_REGS || rclass == NO_REGS))
b166356e 1390 {
c78ac668 1391 rtx addr = NULL;
1392
1393 if (xregno >= FIRST_PSEUDO_REGISTER && xregno != INVALID_REGNUM)
1394 {
1c654ff1 1395 addr = reg_equiv_mem (xregno);
c78ac668 1396 if (addr)
1397 addr = XEXP (addr, 0);
1398 }
1399 else if (MEM_P (x))
1400 addr = XEXP (x, 0);
8b8be022 1401
c78ac668 1402 if (addr && CONSTANT_ADDRESS_P (addr))
c8a596d6 1403 return GENERAL_REGS;
b166356e 1404 }
1405
48cb86e3 1406 /* Otherwise assume no secondary reloads are needed. */
1407 return NO_REGS;
1408}
1409
6f22c3b4 1410int
1411mn10300_frame_size (void)
1412{
1413 /* size includes the fixed stack space needed for function calls. */
1414 int size = get_frame_size () + crtl->outgoing_args_size;
1415
1416 /* And space for the return pointer. */
1417 size += crtl->outgoing_args_size ? 4 : 0;
1418
1419 return size;
1420}
1421
48cb86e3 1422int
3626e955 1423mn10300_initial_offset (int from, int to)
48cb86e3 1424{
6f22c3b4 1425 int diff = 0;
1426
1427 gcc_assert (from == ARG_POINTER_REGNUM || from == FRAME_POINTER_REGNUM);
1428 gcc_assert (to == FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM);
1429
1430 if (to == STACK_POINTER_REGNUM)
1431 diff = mn10300_frame_size ();
1432
f1899bff 1433 /* The difference between the argument pointer and the frame pointer
1434 is the size of the callee register save area. */
6f22c3b4 1435 if (from == ARG_POINTER_REGNUM)
29a404f9 1436 {
6f22c3b4 1437 diff += REG_SAVE_BYTES;
1438 diff += 4 * fp_regs_to_save ();
29a404f9 1439 }
1440
6f22c3b4 1441 return diff;
29a404f9 1442}
bb4959a8 1443
6644435d 1444/* Worker function for TARGET_RETURN_IN_MEMORY. */
1445
f2d49d02 1446static bool
fb80456a 1447mn10300_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
f2d49d02 1448{
1449 /* Return values > 8 bytes in length in memory. */
00b1da0e 1450 return (int_size_in_bytes (type) > 8
1451 || int_size_in_bytes (type) == 0
1452 || TYPE_MODE (type) == BLKmode);
f2d49d02 1453}
1454
bb4959a8 1455/* Flush the argument registers to the stack for a stdarg function;
1456 return the new argument pointer. */
f2d49d02 1457static rtx
3285410a 1458mn10300_builtin_saveregs (void)
bb4959a8 1459{
ed554036 1460 rtx offset, mem;
bb4959a8 1461 tree fntype = TREE_TYPE (current_function_decl);
257d99c3 1462 int argadj = ((!stdarg_p (fntype))
bb4959a8 1463 ? UNITS_PER_WORD : 0);
32c2fdea 1464 alias_set_type set = get_varargs_alias_set ();
bb4959a8 1465
1466 if (argadj)
29c05e22 1467 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, argadj);
bb4959a8 1468 else
abe32cce 1469 offset = crtl->args.arg_offset_rtx;
bb4959a8 1470
abe32cce 1471 mem = gen_rtx_MEM (SImode, crtl->args.internal_arg_pointer);
ab6ab77e 1472 set_mem_alias_set (mem, set);
ed554036 1473 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
1474
1475 mem = gen_rtx_MEM (SImode,
29c05e22 1476 plus_constant (Pmode,
1477 crtl->args.internal_arg_pointer, 4));
ab6ab77e 1478 set_mem_alias_set (mem, set);
ed554036 1479 emit_move_insn (mem, gen_rtx_REG (SImode, 1));
1480
bb4959a8 1481 return copy_to_reg (expand_binop (Pmode, add_optab,
abe32cce 1482 crtl->args.internal_arg_pointer,
bb4959a8 1483 offset, 0, 0, OPTAB_LIB_WIDEN));
1484}
1485
8a58ed0a 1486static void
3285410a 1487mn10300_va_start (tree valist, rtx nextarg)
ed554036 1488{
7ccc713a 1489 nextarg = expand_builtin_saveregs ();
7df226a2 1490 std_expand_builtin_va_start (valist, nextarg);
ed554036 1491}
1492
b981d932 1493/* Return true when a parameter should be passed by reference. */
1494
1495static bool
39cba157 1496mn10300_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
fb80456a 1497 enum machine_mode mode, const_tree type,
b981d932 1498 bool named ATTRIBUTE_UNUSED)
1499{
1500 unsigned HOST_WIDE_INT size;
1501
1502 if (type)
1503 size = int_size_in_bytes (type);
1504 else
1505 size = GET_MODE_SIZE (mode);
1506
00b1da0e 1507 return (size > 8 || size == 0);
b981d932 1508}
1509
bb4959a8 1510/* Return an RTX to represent where a value with mode MODE will be returned
e92d3ba8 1511 from a function. If the result is NULL_RTX, the argument is pushed. */
bb4959a8 1512
dc67179a 1513static rtx
39cba157 1514mn10300_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
dc67179a 1515 const_tree type, bool named ATTRIBUTE_UNUSED)
bb4959a8 1516{
39cba157 1517 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
e92d3ba8 1518 rtx result = NULL_RTX;
e14cac83 1519 int size;
bb4959a8 1520
1521 /* We only support using 2 data registers as argument registers. */
1522 int nregs = 2;
1523
1524 /* Figure out the size of the object to be passed. */
1525 if (mode == BLKmode)
1526 size = int_size_in_bytes (type);
1527 else
1528 size = GET_MODE_SIZE (mode);
1529
bb4959a8 1530 cum->nbytes = (cum->nbytes + 3) & ~3;
1531
1532 /* Don't pass this arg via a register if all the argument registers
1533 are used up. */
1534 if (cum->nbytes > nregs * UNITS_PER_WORD)
e92d3ba8 1535 return result;
bb4959a8 1536
1537 /* Don't pass this arg via a register if it would be split between
1538 registers and memory. */
1539 if (type == NULL_TREE
1540 && cum->nbytes + size > nregs * UNITS_PER_WORD)
e92d3ba8 1541 return result;
bb4959a8 1542
1543 switch (cum->nbytes / UNITS_PER_WORD)
1544 {
1545 case 0:
e92d3ba8 1546 result = gen_rtx_REG (mode, FIRST_ARGUMENT_REGNUM);
bb4959a8 1547 break;
1548 case 1:
e92d3ba8 1549 result = gen_rtx_REG (mode, FIRST_ARGUMENT_REGNUM + 1);
bb4959a8 1550 break;
1551 default:
e92d3ba8 1552 break;
bb4959a8 1553 }
1554
1555 return result;
1556}
1557
dc67179a 1558/* Update the data in CUM to advance over an argument
1559 of mode MODE and data type TYPE.
1560 (TYPE is null for libcalls where that information may not be available.) */
1561
1562static void
39cba157 1563mn10300_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
dc67179a 1564 const_tree type, bool named ATTRIBUTE_UNUSED)
1565{
39cba157 1566 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1567
dc67179a 1568 cum->nbytes += (mode != BLKmode
1569 ? (GET_MODE_SIZE (mode) + 3) & ~3
1570 : (int_size_in_bytes (type) + 3) & ~3);
1571}
1572
f054eb3c 1573/* Return the number of bytes of registers to use for an argument passed
1574 partially in registers and partially in memory. */
bb4959a8 1575
f054eb3c 1576static int
39cba157 1577mn10300_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode,
f054eb3c 1578 tree type, bool named ATTRIBUTE_UNUSED)
bb4959a8 1579{
39cba157 1580 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
e14cac83 1581 int size;
bb4959a8 1582
1583 /* We only support using 2 data registers as argument registers. */
1584 int nregs = 2;
1585
1586 /* Figure out the size of the object to be passed. */
1587 if (mode == BLKmode)
1588 size = int_size_in_bytes (type);
1589 else
1590 size = GET_MODE_SIZE (mode);
1591
bb4959a8 1592 cum->nbytes = (cum->nbytes + 3) & ~3;
1593
1594 /* Don't pass this arg via a register if all the argument registers
1595 are used up. */
1596 if (cum->nbytes > nregs * UNITS_PER_WORD)
1597 return 0;
1598
1599 if (cum->nbytes + size <= nregs * UNITS_PER_WORD)
1600 return 0;
1601
1602 /* Don't pass this arg via a register if it would be split between
1603 registers and memory. */
1604 if (type == NULL_TREE
1605 && cum->nbytes + size > nregs * UNITS_PER_WORD)
1606 return 0;
1607
f054eb3c 1608 return nregs * UNITS_PER_WORD - cum->nbytes;
bb4959a8 1609}
1610
00b1da0e 1611/* Return the location of the function's value. This will be either
1612 $d0 for integer functions, $a0 for pointers, or a PARALLEL of both
1613 $d0 and $a0 if the -mreturn-pointer-on-do flag is set. Note that
1614 we only return the PARALLEL for outgoing values; we do not want
1615 callers relying on this extra copy. */
1616
b6713ba6 1617static rtx
1618mn10300_function_value (const_tree valtype,
1619 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1620 bool outgoing)
00b1da0e 1621{
1622 rtx rv;
1623 enum machine_mode mode = TYPE_MODE (valtype);
1624
1625 if (! POINTER_TYPE_P (valtype))
1626 return gen_rtx_REG (mode, FIRST_DATA_REGNUM);
1627 else if (! TARGET_PTR_A0D0 || ! outgoing
18d50ae6 1628 || cfun->returns_struct)
00b1da0e 1629 return gen_rtx_REG (mode, FIRST_ADDRESS_REGNUM);
1630
1631 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (2));
1632 XVECEXP (rv, 0, 0)
1633 = gen_rtx_EXPR_LIST (VOIDmode,
1634 gen_rtx_REG (mode, FIRST_ADDRESS_REGNUM),
1635 GEN_INT (0));
fb16c776 1636
00b1da0e 1637 XVECEXP (rv, 0, 1)
1638 = gen_rtx_EXPR_LIST (VOIDmode,
1639 gen_rtx_REG (mode, FIRST_DATA_REGNUM),
1640 GEN_INT (0));
1641 return rv;
1642}
1643
b6713ba6 1644/* Implements TARGET_LIBCALL_VALUE. */
1645
1646static rtx
1647mn10300_libcall_value (enum machine_mode mode,
1648 const_rtx fun ATTRIBUTE_UNUSED)
1649{
1650 return gen_rtx_REG (mode, FIRST_DATA_REGNUM);
1651}
1652
1653/* Implements FUNCTION_VALUE_REGNO_P. */
1654
1655bool
1656mn10300_function_value_regno_p (const unsigned int regno)
1657{
1658 return (regno == FIRST_DATA_REGNUM || regno == FIRST_ADDRESS_REGNUM);
1659}
1660
990679af 1661/* Output an addition operation. */
5574dbdd 1662
feb9af9f 1663const char *
990679af 1664mn10300_output_add (rtx operands[3], bool need_flags)
bb4959a8 1665{
990679af 1666 rtx dest, src1, src2;
1667 unsigned int dest_regnum, src1_regnum, src2_regnum;
1668 enum reg_class src1_class, src2_class, dest_class;
bb4959a8 1669
990679af 1670 dest = operands[0];
1671 src1 = operands[1];
1672 src2 = operands[2];
bb4959a8 1673
990679af 1674 dest_regnum = true_regnum (dest);
1675 src1_regnum = true_regnum (src1);
bb4959a8 1676
990679af 1677 dest_class = REGNO_REG_CLASS (dest_regnum);
1678 src1_class = REGNO_REG_CLASS (src1_regnum);
bb4959a8 1679
f9e46c25 1680 if (CONST_INT_P (src2))
990679af 1681 {
1682 gcc_assert (dest_regnum == src1_regnum);
bb4959a8 1683
990679af 1684 if (src2 == const1_rtx && !need_flags)
1685 return "inc %0";
1686 if (INTVAL (src2) == 4 && !need_flags && dest_class != DATA_REGS)
1687 return "inc4 %0";
911517ac 1688
990679af 1689 gcc_assert (!need_flags || dest_class != SP_REGS);
1690 return "add %2,%0";
1691 }
1692 else if (CONSTANT_P (src2))
1693 return "add %2,%0";
1694
1695 src2_regnum = true_regnum (src2);
1696 src2_class = REGNO_REG_CLASS (src2_regnum);
1697
1698 if (dest_regnum == src1_regnum)
1699 return "add %2,%0";
1700 if (dest_regnum == src2_regnum)
1701 return "add %1,%0";
1702
1703 /* The rest of the cases are reg = reg+reg. For AM33, we can implement
1704 this directly, as below, but when optimizing for space we can sometimes
1705 do better by using a mov+add. For MN103, we claimed that we could
1706 implement a three-operand add because the various move and add insns
1707 change sizes across register classes, and we can often do better than
1708 reload in choosing which operand to move. */
1709 if (TARGET_AM33 && optimize_insn_for_speed_p ())
1710 return "add %2,%1,%0";
1711
1712 /* Catch cases where no extended register was used. */
1713 if (src1_class != EXTENDED_REGS
1714 && src2_class != EXTENDED_REGS
1715 && dest_class != EXTENDED_REGS)
1716 {
1717 /* We have to copy one of the sources into the destination, then
1718 add the other source to the destination.
1719
1720 Carefully select which source to copy to the destination; a
1721 naive implementation will waste a byte when the source classes
1722 are different and the destination is an address register.
1723 Selecting the lowest cost register copy will optimize this
1724 sequence. */
1725 if (src1_class == dest_class)
1726 return "mov %1,%0\n\tadd %2,%0";
1727 else
1728 return "mov %2,%0\n\tadd %1,%0";
1729 }
911517ac 1730
990679af 1731 /* At least one register is an extended register. */
bb4959a8 1732
990679af 1733 /* The three operand add instruction on the am33 is a win iff the
1734 output register is an extended register, or if both source
1735 registers are extended registers. */
1736 if (dest_class == EXTENDED_REGS || src1_class == src2_class)
1737 return "add %2,%1,%0";
1738
1739 /* It is better to copy one of the sources to the destination, then
1740 perform a 2 address add. The destination in this case must be
1741 an address or data register and one of the sources must be an
1742 extended register and the remaining source must not be an extended
1743 register.
1744
1745 The best code for this case is to copy the extended reg to the
1746 destination, then emit a two address add. */
1747 if (src1_class == EXTENDED_REGS)
1748 return "mov %1,%0\n\tadd %2,%0";
1749 else
1750 return "mov %2,%0\n\tadd %1,%0";
bb4959a8 1751}
36ed4406 1752
c4cd8f6a 1753/* Return 1 if X contains a symbolic expression. We know these
1754 expressions will have one of a few well defined forms, so
1755 we need only check those forms. */
3626e955 1756
c4cd8f6a 1757int
3626e955 1758mn10300_symbolic_operand (rtx op,
1759 enum machine_mode mode ATTRIBUTE_UNUSED)
c4cd8f6a 1760{
1761 switch (GET_CODE (op))
1762 {
1763 case SYMBOL_REF:
1764 case LABEL_REF:
1765 return 1;
1766 case CONST:
1767 op = XEXP (op, 0);
1768 return ((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
1769 || GET_CODE (XEXP (op, 0)) == LABEL_REF)
4879b320 1770 && CONST_INT_P (XEXP (op, 1)));
c4cd8f6a 1771 default:
1772 return 0;
1773 }
1774}
1775
1776/* Try machine dependent ways of modifying an illegitimate address
1777 to be legitimate. If we find one, return the new valid address.
1778 This macro is used in only one place: `memory_address' in explow.c.
1779
1780 OLDX is the address as it was before break_out_memory_refs was called.
1781 In some cases it is useful to look at this to decide what needs to be done.
1782
c4cd8f6a 1783 Normally it is always safe for this macro to do nothing. It exists to
1784 recognize opportunities to optimize the output.
1785
1786 But on a few ports with segmented architectures and indexed addressing
1787 (mn10300, hppa) it is used to rewrite certain problematical addresses. */
3626e955 1788
5574dbdd 1789static rtx
41e3a0c7 1790mn10300_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1791 enum machine_mode mode ATTRIBUTE_UNUSED)
c4cd8f6a 1792{
3626e955 1793 if (flag_pic && ! mn10300_legitimate_pic_operand_p (x))
1794 x = mn10300_legitimize_pic_address (oldx, NULL_RTX);
b87a151a 1795
c4cd8f6a 1796 /* Uh-oh. We might have an address for x[n-100000]. This needs
1797 special handling to avoid creating an indexed memory address
1798 with x-100000 as the base. */
1799 if (GET_CODE (x) == PLUS
3626e955 1800 && mn10300_symbolic_operand (XEXP (x, 1), VOIDmode))
c4cd8f6a 1801 {
1802 /* Ugly. We modify things here so that the address offset specified
1803 by the index expression is computed first, then added to x to form
1804 the entire address. */
1805
59086782 1806 rtx regx1, regy1, regy2, y;
c4cd8f6a 1807
1808 /* Strip off any CONST. */
1809 y = XEXP (x, 1);
1810 if (GET_CODE (y) == CONST)
1811 y = XEXP (y, 0);
1812
c927a8ab 1813 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1814 {
1815 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1816 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1817 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1818 regx1 = force_reg (Pmode,
3626e955 1819 gen_rtx_fmt_ee (GET_CODE (y), Pmode, regx1,
1820 regy2));
7014838c 1821 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
c927a8ab 1822 }
c4cd8f6a 1823 }
11b4605c 1824 return x;
c4cd8f6a 1825}
e2aead91 1826
b87a151a 1827/* Convert a non-PIC address in `orig' to a PIC address using @GOT or
09e5ce26 1828 @GOTOFF in `reg'. */
3626e955 1829
b87a151a 1830rtx
3626e955 1831mn10300_legitimize_pic_address (rtx orig, rtx reg)
b87a151a 1832{
d92c1383 1833 rtx x;
1834
b87a151a 1835 if (GET_CODE (orig) == LABEL_REF
1836 || (GET_CODE (orig) == SYMBOL_REF
1837 && (CONSTANT_POOL_ADDRESS_P (orig)
1838 || ! MN10300_GLOBAL_P (orig))))
1839 {
d92c1383 1840 if (reg == NULL)
b87a151a 1841 reg = gen_reg_rtx (Pmode);
1842
d92c1383 1843 x = gen_rtx_UNSPEC (SImode, gen_rtvec (1, orig), UNSPEC_GOTOFF);
1844 x = gen_rtx_CONST (SImode, x);
1845 emit_move_insn (reg, x);
1846
1847 x = emit_insn (gen_addsi3 (reg, reg, pic_offset_table_rtx));
b87a151a 1848 }
1849 else if (GET_CODE (orig) == SYMBOL_REF)
1850 {
d92c1383 1851 if (reg == NULL)
b87a151a 1852 reg = gen_reg_rtx (Pmode);
1853
d92c1383 1854 x = gen_rtx_UNSPEC (SImode, gen_rtvec (1, orig), UNSPEC_GOT);
1855 x = gen_rtx_CONST (SImode, x);
1856 x = gen_rtx_PLUS (SImode, pic_offset_table_rtx, x);
1857 x = gen_const_mem (SImode, x);
1858
1859 x = emit_move_insn (reg, x);
b87a151a 1860 }
d92c1383 1861 else
1862 return orig;
1863
1864 set_unique_reg_note (x, REG_EQUAL, orig);
1865 return reg;
b87a151a 1866}
1867
1868/* Return zero if X references a SYMBOL_REF or LABEL_REF whose symbol
fa483857 1869 isn't protected by a PIC unspec; nonzero otherwise. */
3626e955 1870
b87a151a 1871int
3626e955 1872mn10300_legitimate_pic_operand_p (rtx x)
b87a151a 1873{
3626e955 1874 const char *fmt;
1875 int i;
b87a151a 1876
1877 if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1878 return 0;
1879
1880 if (GET_CODE (x) == UNSPEC
1881 && (XINT (x, 1) == UNSPEC_PIC
1882 || XINT (x, 1) == UNSPEC_GOT
1883 || XINT (x, 1) == UNSPEC_GOTOFF
b6e3379c 1884 || XINT (x, 1) == UNSPEC_PLT
1885 || XINT (x, 1) == UNSPEC_GOTSYM_OFF))
b87a151a 1886 return 1;
1887
b87a151a 1888 fmt = GET_RTX_FORMAT (GET_CODE (x));
1889 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
1890 {
1891 if (fmt[i] == 'E')
1892 {
5574dbdd 1893 int j;
b87a151a 1894
1895 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3626e955 1896 if (! mn10300_legitimate_pic_operand_p (XVECEXP (x, i, j)))
b87a151a 1897 return 0;
1898 }
3626e955 1899 else if (fmt[i] == 'e'
1900 && ! mn10300_legitimate_pic_operand_p (XEXP (x, i)))
b87a151a 1901 return 0;
1902 }
1903
1904 return 1;
1905}
1906
5411aa8c 1907/* Return TRUE if the address X, taken from a (MEM:MODE X) rtx, is
fd50b071 1908 legitimate, and FALSE otherwise.
1909
1910 On the mn10300, the value in the address register must be
1911 in the same memory space/segment as the effective address.
1912
1913 This is problematical for reload since it does not understand
1914 that base+index != index+base in a memory reference.
1915
1916 Note it is still possible to use reg+reg addressing modes,
1917 it's just much more difficult. For a discussion of a possible
1918 workaround and solution, see the comments in pa.c before the
1919 function record_unscaled_index_insn_codes. */
1920
5574dbdd 1921static bool
fd50b071 1922mn10300_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
5411aa8c 1923{
c8a596d6 1924 rtx base, index;
1925
1926 if (CONSTANT_ADDRESS_P (x))
1927 return !flag_pic || mn10300_legitimate_pic_operand_p (x);
5411aa8c 1928
1929 if (RTX_OK_FOR_BASE_P (x, strict))
c8a596d6 1930 return true;
1931
1932 if (TARGET_AM33 && (mode == SImode || mode == SFmode || mode == HImode))
1933 {
1934 if (GET_CODE (x) == POST_INC)
1935 return RTX_OK_FOR_BASE_P (XEXP (x, 0), strict);
1936 if (GET_CODE (x) == POST_MODIFY)
1937 return (RTX_OK_FOR_BASE_P (XEXP (x, 0), strict)
1938 && CONSTANT_ADDRESS_P (XEXP (x, 1)));
1939 }
1940
1941 if (GET_CODE (x) != PLUS)
1942 return false;
5411aa8c 1943
c8a596d6 1944 base = XEXP (x, 0);
1945 index = XEXP (x, 1);
5411aa8c 1946
c8a596d6 1947 if (!REG_P (base))
1948 return false;
1949 if (REG_P (index))
5411aa8c 1950 {
c8a596d6 1951 /* ??? Without AM33 generalized (Ri,Rn) addressing, reg+reg
1952 addressing is hard to satisfy. */
1953 if (!TARGET_AM33)
1954 return false;
5411aa8c 1955
c8a596d6 1956 return (REGNO_GENERAL_P (REGNO (base), strict)
1957 && REGNO_GENERAL_P (REGNO (index), strict));
1958 }
5411aa8c 1959
c8a596d6 1960 if (!REGNO_STRICT_OK_FOR_BASE_P (REGNO (base), strict))
1961 return false;
5411aa8c 1962
c8a596d6 1963 if (CONST_INT_P (index))
1964 return IN_RANGE (INTVAL (index), -1 - 0x7fffffff, 0x7fffffff);
1965
1966 if (CONSTANT_ADDRESS_P (index))
1967 return !flag_pic || mn10300_legitimate_pic_operand_p (index);
1968
1969 return false;
1970}
1971
1972bool
1973mn10300_regno_in_class_p (unsigned regno, int rclass, bool strict)
1974{
1975 if (regno >= FIRST_PSEUDO_REGISTER)
1976 {
1977 if (!strict)
1978 return true;
1979 if (!reg_renumber)
1980 return false;
1981 regno = reg_renumber[regno];
c2fa9c24 1982 if (regno == INVALID_REGNUM)
1983 return false;
c8a596d6 1984 }
1985 return TEST_HARD_REG_BIT (reg_class_contents[rclass], regno);
1986}
1987
1988rtx
1989mn10300_legitimize_reload_address (rtx x,
1990 enum machine_mode mode ATTRIBUTE_UNUSED,
1991 int opnum, int type,
1992 int ind_levels ATTRIBUTE_UNUSED)
1993{
1994 bool any_change = false;
1995
1996 /* See above re disabling reg+reg addressing for MN103. */
1997 if (!TARGET_AM33)
1998 return NULL_RTX;
1999
2000 if (GET_CODE (x) != PLUS)
2001 return NULL_RTX;
2002
2003 if (XEXP (x, 0) == stack_pointer_rtx)
2004 {
2005 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
2006 GENERAL_REGS, GET_MODE (x), VOIDmode, 0, 0,
2007 opnum, (enum reload_type) type);
2008 any_change = true;
2009 }
2010 if (XEXP (x, 1) == stack_pointer_rtx)
2011 {
2012 push_reload (XEXP (x, 1), NULL_RTX, &XEXP (x, 1), NULL,
2013 GENERAL_REGS, GET_MODE (x), VOIDmode, 0, 0,
2014 opnum, (enum reload_type) type);
2015 any_change = true;
5411aa8c 2016 }
2017
c8a596d6 2018 return any_change ? x : NULL_RTX;
5411aa8c 2019}
2020
ca316360 2021/* Implement TARGET_LEGITIMATE_CONSTANT_P. Returns TRUE if X is a valid
5574dbdd 2022 constant. Note that some "constants" aren't valid, such as TLS
2023 symbols and unconverted GOT-based references, so we eliminate
2024 those here. */
2025
ca316360 2026static bool
2027mn10300_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
5574dbdd 2028{
2029 switch (GET_CODE (x))
2030 {
2031 case CONST:
2032 x = XEXP (x, 0);
2033
2034 if (GET_CODE (x) == PLUS)
2035 {
3626e955 2036 if (! CONST_INT_P (XEXP (x, 1)))
5574dbdd 2037 return false;
2038 x = XEXP (x, 0);
2039 }
2040
2041 /* Only some unspecs are valid as "constants". */
2042 if (GET_CODE (x) == UNSPEC)
2043 {
5574dbdd 2044 switch (XINT (x, 1))
2045 {
5574dbdd 2046 case UNSPEC_PIC:
2047 case UNSPEC_GOT:
2048 case UNSPEC_GOTOFF:
2049 case UNSPEC_PLT:
2050 return true;
2051 default:
2052 return false;
2053 }
2054 }
2055
2056 /* We must have drilled down to a symbol. */
3626e955 2057 if (! mn10300_symbolic_operand (x, Pmode))
5574dbdd 2058 return false;
2059 break;
2060
2061 default:
2062 break;
2063 }
2064
2065 return true;
2066}
2067
4c6c308e 2068/* Undo pic address legitimization for the benefit of debug info. */
2069
2070static rtx
2071mn10300_delegitimize_address (rtx orig_x)
2072{
2073 rtx x = orig_x, ret, addend = NULL;
2074 bool need_mem;
2075
2076 if (MEM_P (x))
2077 x = XEXP (x, 0);
2078 if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
2079 return orig_x;
2080
2081 if (XEXP (x, 0) == pic_offset_table_rtx)
2082 ;
2083 /* With the REG+REG addressing of AM33, var-tracking can re-assemble
2084 some odd-looking "addresses" that were never valid in the first place.
2085 We need to look harder to avoid warnings being emitted. */
2086 else if (GET_CODE (XEXP (x, 0)) == PLUS)
2087 {
2088 rtx x0 = XEXP (x, 0);
2089 rtx x00 = XEXP (x0, 0);
2090 rtx x01 = XEXP (x0, 1);
2091
2092 if (x00 == pic_offset_table_rtx)
2093 addend = x01;
2094 else if (x01 == pic_offset_table_rtx)
2095 addend = x00;
2096 else
2097 return orig_x;
2098
2099 }
2100 else
2101 return orig_x;
2102 x = XEXP (x, 1);
2103
2104 if (GET_CODE (x) != CONST)
2105 return orig_x;
2106 x = XEXP (x, 0);
2107 if (GET_CODE (x) != UNSPEC)
2108 return orig_x;
2109
2110 ret = XVECEXP (x, 0, 0);
2111 if (XINT (x, 1) == UNSPEC_GOTOFF)
2112 need_mem = false;
2113 else if (XINT (x, 1) == UNSPEC_GOT)
2114 need_mem = true;
2115 else
2116 return orig_x;
2117
2118 gcc_assert (GET_CODE (ret) == SYMBOL_REF);
2119 if (need_mem != MEM_P (orig_x))
2120 return orig_x;
2121 if (need_mem && addend)
2122 return orig_x;
2123 if (addend)
2124 ret = gen_rtx_PLUS (Pmode, addend, ret);
2125 return ret;
2126}
2127
28f32607 2128/* For addresses, costs are relative to "MOV (Rm),Rn". For AM33 this is
2129 the 3-byte fully general instruction; for MN103 this is the 2-byte form
2130 with an address register. */
2131
ec0457a8 2132static int
28f32607 2133mn10300_address_cost (rtx x, bool speed)
e2aead91 2134{
28f32607 2135 HOST_WIDE_INT i;
2136 rtx base, index;
2137
e2aead91 2138 switch (GET_CODE (x))
2139 {
28f32607 2140 case CONST:
2141 case SYMBOL_REF:
2142 case LABEL_REF:
2143 /* We assume all of these require a 32-bit constant, even though
2144 some symbol and label references can be relaxed. */
2145 return speed ? 1 : 4;
2146
e2aead91 2147 case REG:
28f32607 2148 case SUBREG:
2149 case POST_INC:
2150 return 0;
2151
2152 case POST_MODIFY:
2153 /* Assume any symbolic offset is a 32-bit constant. */
2154 i = (CONST_INT_P (XEXP (x, 1)) ? INTVAL (XEXP (x, 1)) : 0x12345678);
2155 if (IN_RANGE (i, -128, 127))
2156 return speed ? 0 : 1;
2157 if (speed)
2158 return 1;
2159 if (IN_RANGE (i, -0x800000, 0x7fffff))
2160 return 3;
2161 return 4;
2162
2163 case PLUS:
2164 base = XEXP (x, 0);
2165 index = XEXP (x, 1);
2166 if (register_operand (index, SImode))
e2aead91 2167 {
28f32607 2168 /* Attempt to minimize the number of registers in the address.
2169 This is similar to what other ports do. */
2170 if (register_operand (base, SImode))
2171 return 1;
e2aead91 2172
28f32607 2173 base = XEXP (x, 1);
2174 index = XEXP (x, 0);
2175 }
e2aead91 2176
28f32607 2177 /* Assume any symbolic offset is a 32-bit constant. */
2178 i = (CONST_INT_P (XEXP (x, 1)) ? INTVAL (XEXP (x, 1)) : 0x12345678);
2179 if (IN_RANGE (i, -128, 127))
2180 return speed ? 0 : 1;
2181 if (IN_RANGE (i, -32768, 32767))
2182 return speed ? 0 : 2;
2183 return speed ? 2 : 6;
e2aead91 2184
28f32607 2185 default:
20d892d1 2186 return rtx_cost (x, MEM, 0, speed);
28f32607 2187 }
2188}
e2aead91 2189
28f32607 2190/* Implement the TARGET_REGISTER_MOVE_COST hook.
e2aead91 2191
28f32607 2192 Recall that the base value of 2 is required by assumptions elsewhere
2193 in the body of the compiler, and that cost 2 is special-cased as an
2194 early exit from reload meaning no work is required. */
e2aead91 2195
28f32607 2196static int
2197mn10300_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2198 reg_class_t ifrom, reg_class_t ito)
2199{
2200 enum reg_class from = (enum reg_class) ifrom;
2201 enum reg_class to = (enum reg_class) ito;
2202 enum reg_class scratch, test;
2203
2204 /* Simplify the following code by unifying the fp register classes. */
2205 if (to == FP_ACC_REGS)
2206 to = FP_REGS;
2207 if (from == FP_ACC_REGS)
2208 from = FP_REGS;
2209
2210 /* Diagnose invalid moves by costing them as two moves. */
2211
2212 scratch = NO_REGS;
2213 test = from;
2214 if (to == SP_REGS)
2215 scratch = (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
85a6eed4 2216 else if (to == MDR_REGS)
2217 scratch = DATA_REGS;
28f32607 2218 else if (to == FP_REGS && to != from)
2219 scratch = GENERAL_REGS;
2220 else
2221 {
2222 test = to;
2223 if (from == SP_REGS)
2224 scratch = (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
85a6eed4 2225 else if (from == MDR_REGS)
2226 scratch = DATA_REGS;
28f32607 2227 else if (from == FP_REGS && to != from)
2228 scratch = GENERAL_REGS;
2229 }
2230 if (scratch != NO_REGS && !reg_class_subset_p (test, scratch))
2231 return (mn10300_register_move_cost (VOIDmode, from, scratch)
2232 + mn10300_register_move_cost (VOIDmode, scratch, to));
e2aead91 2233
28f32607 2234 /* From here on, all we need consider are legal combinations. */
e2aead91 2235
28f32607 2236 if (optimize_size)
2237 {
2238 /* The scale here is bytes * 2. */
e2aead91 2239
28f32607 2240 if (from == to && (to == ADDRESS_REGS || to == DATA_REGS))
2241 return 2;
e2aead91 2242
28f32607 2243 if (from == SP_REGS)
2244 return (to == ADDRESS_REGS ? 2 : 6);
2245
2246 /* For MN103, all remaining legal moves are two bytes. */
2247 if (TARGET_AM33)
2248 return 4;
2249
2250 if (to == SP_REGS)
2251 return (from == ADDRESS_REGS ? 4 : 6);
2252
2253 if ((from == ADDRESS_REGS || from == DATA_REGS)
2254 && (to == ADDRESS_REGS || to == DATA_REGS))
2255 return 4;
2256
2257 if (to == EXTENDED_REGS)
2258 return (to == from ? 6 : 4);
e2aead91 2259
28f32607 2260 /* What's left are SP_REGS, FP_REGS, or combinations of the above. */
2261 return 6;
2262 }
2263 else
2264 {
2265 /* The scale here is cycles * 2. */
2266
2267 if (to == FP_REGS)
2268 return 8;
2269 if (from == FP_REGS)
2270 return 4;
2271
2272 /* All legal moves between integral registers are single cycle. */
2273 return 2;
e2aead91 2274 }
2275}
fab7adbf 2276
28f32607 2277/* Implement the TARGET_MEMORY_MOVE_COST hook.
2278
2279 Given lack of the form of the address, this must be speed-relative,
2280 though we should never be less expensive than a size-relative register
2281 move cost above. This is not a problem. */
2282
ec0457a8 2283static int
28f32607 2284mn10300_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2285 reg_class_t iclass, bool in ATTRIBUTE_UNUSED)
ec0457a8 2286{
28f32607 2287 enum reg_class rclass = (enum reg_class) iclass;
2288
2289 if (rclass == FP_REGS)
2290 return 8;
2291 return 6;
ec0457a8 2292}
2293
28f32607 2294/* Implement the TARGET_RTX_COSTS hook.
2295
2296 Speed-relative costs are relative to COSTS_N_INSNS, which is intended
2297 to represent cycles. Size-relative costs are in bytes. */
2298
fab7adbf 2299static bool
20d892d1 2300mn10300_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2301 int *ptotal, bool speed)
fab7adbf 2302{
28f32607 2303 /* This value is used for SYMBOL_REF etc where we want to pretend
2304 we have a full 32-bit constant. */
2305 HOST_WIDE_INT i = 0x12345678;
2306 int total;
2307
fab7adbf 2308 switch (code)
2309 {
2310 case CONST_INT:
28f32607 2311 i = INTVAL (x);
2312 do_int_costs:
2313 if (speed)
2314 {
2315 if (outer_code == SET)
2316 {
2317 /* 16-bit integer loads have latency 1, 32-bit loads 2. */
2318 if (IN_RANGE (i, -32768, 32767))
2319 total = COSTS_N_INSNS (1);
2320 else
2321 total = COSTS_N_INSNS (2);
2322 }
2323 else
2324 {
2325 /* 16-bit integer operands don't affect latency;
2326 24-bit and 32-bit operands add a cycle. */
2327 if (IN_RANGE (i, -32768, 32767))
2328 total = 0;
2329 else
2330 total = COSTS_N_INSNS (1);
2331 }
2332 }
fab7adbf 2333 else
28f32607 2334 {
2335 if (outer_code == SET)
2336 {
2337 if (i == 0)
2338 total = 1;
2339 else if (IN_RANGE (i, -128, 127))
2340 total = 2;
2341 else if (IN_RANGE (i, -32768, 32767))
2342 total = 3;
2343 else
2344 total = 6;
2345 }
2346 else
2347 {
2348 /* Reference here is ADD An,Dn, vs ADD imm,Dn. */
2349 if (IN_RANGE (i, -128, 127))
2350 total = 0;
2351 else if (IN_RANGE (i, -32768, 32767))
2352 total = 2;
2353 else if (TARGET_AM33 && IN_RANGE (i, -0x01000000, 0x00ffffff))
2354 total = 3;
2355 else
2356 total = 4;
2357 }
2358 }
2359 goto alldone;
fab7adbf 2360
2361 case CONST:
2362 case LABEL_REF:
2363 case SYMBOL_REF:
fab7adbf 2364 case CONST_DOUBLE:
28f32607 2365 /* We assume all of these require a 32-bit constant, even though
2366 some symbol and label references can be relaxed. */
2367 goto do_int_costs;
74f4459c 2368
28f32607 2369 case UNSPEC:
2370 switch (XINT (x, 1))
2371 {
2372 case UNSPEC_PIC:
2373 case UNSPEC_GOT:
2374 case UNSPEC_GOTOFF:
2375 case UNSPEC_PLT:
2376 case UNSPEC_GOTSYM_OFF:
2377 /* The PIC unspecs also resolve to a 32-bit constant. */
2378 goto do_int_costs;
fab7adbf 2379
28f32607 2380 default:
2381 /* Assume any non-listed unspec is some sort of arithmetic. */
2382 goto do_arith_costs;
2383 }
8935d57c 2384
28f32607 2385 case PLUS:
2386 /* Notice the size difference of INC and INC4. */
2387 if (!speed && outer_code == SET && CONST_INT_P (XEXP (x, 1)))
2388 {
2389 i = INTVAL (XEXP (x, 1));
2390 if (i == 1 || i == 4)
2391 {
20d892d1 2392 total = 1 + rtx_cost (XEXP (x, 0), PLUS, 0, speed);
28f32607 2393 goto alldone;
2394 }
2395 }
2396 goto do_arith_costs;
2397
2398 case MINUS:
2399 case AND:
2400 case IOR:
2401 case XOR:
2402 case NOT:
2403 case NEG:
2404 case ZERO_EXTEND:
2405 case SIGN_EXTEND:
2406 case COMPARE:
2407 case BSWAP:
2408 case CLZ:
2409 do_arith_costs:
2410 total = (speed ? COSTS_N_INSNS (1) : 2);
2411 break;
8935d57c 2412
28f32607 2413 case ASHIFT:
2414 /* Notice the size difference of ASL2 and variants. */
2415 if (!speed && CONST_INT_P (XEXP (x, 1)))
2416 switch (INTVAL (XEXP (x, 1)))
2417 {
2418 case 1:
2419 case 2:
2420 total = 1;
2421 goto alldone;
2422 case 3:
2423 case 4:
2424 total = 2;
2425 goto alldone;
2426 }
2427 /* FALLTHRU */
8935d57c 2428
28f32607 2429 case ASHIFTRT:
2430 case LSHIFTRT:
2431 total = (speed ? COSTS_N_INSNS (1) : 3);
2432 goto alldone;
8935d57c 2433
28f32607 2434 case MULT:
2435 total = (speed ? COSTS_N_INSNS (3) : 2);
8935d57c 2436 break;
fb16c776 2437
28f32607 2438 case DIV:
2439 case UDIV:
2440 case MOD:
2441 case UMOD:
2442 total = (speed ? COSTS_N_INSNS (39)
2443 /* Include space to load+retrieve MDR. */
2444 : code == MOD || code == UMOD ? 6 : 4);
8935d57c 2445 break;
fb16c776 2446
28f32607 2447 case MEM:
2448 total = mn10300_address_cost (XEXP (x, 0), speed);
2449 if (speed)
2450 total = COSTS_N_INSNS (2 + total);
2451 goto alldone;
2452
8935d57c 2453 default:
28f32607 2454 /* Probably not implemented. Assume external call. */
2455 total = (speed ? COSTS_N_INSNS (10) : 7);
2456 break;
8935d57c 2457 }
2458
28f32607 2459 *ptotal = total;
2460 return false;
2461
2462 alldone:
2463 *ptotal = total;
2464 return true;
8935d57c 2465}
28f32607 2466
b87a151a 2467/* If using PIC, mark a SYMBOL_REF for a non-global symbol so that we
2468 may access it using GOTOFF instead of GOT. */
2469
2470static void
48ed5fc2 2471mn10300_encode_section_info (tree decl, rtx rtl, int first ATTRIBUTE_UNUSED)
b87a151a 2472{
2473 rtx symbol;
2474
3626e955 2475 if (! MEM_P (rtl))
b87a151a 2476 return;
2477 symbol = XEXP (rtl, 0);
2478 if (GET_CODE (symbol) != SYMBOL_REF)
2479 return;
2480
2481 if (flag_pic)
2482 SYMBOL_REF_FLAG (symbol) = (*targetm.binds_local_p) (decl);
2483}
906bb5c3 2484
2485/* Dispatch tables on the mn10300 are extremely expensive in terms of code
2486 and readonly data size. So we crank up the case threshold value to
2487 encourage a series of if/else comparisons to implement many small switch
2488 statements. In theory, this value could be increased much more if we
2489 were solely optimizing for space, but we keep it "reasonable" to avoid
2490 serious code efficiency lossage. */
2491
5574dbdd 2492static unsigned int
2493mn10300_case_values_threshold (void)
906bb5c3 2494{
2495 return 6;
2496}
3e16f982 2497
3e16f982 2498/* Worker function for TARGET_TRAMPOLINE_INIT. */
2499
2500static void
2501mn10300_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
2502{
3562cea7 2503 rtx mem, disp, fnaddr = XEXP (DECL_RTL (fndecl), 0);
2504
2505 /* This is a strict alignment target, which means that we play
2506 some games to make sure that the locations at which we need
2507 to store <chain> and <disp> wind up at aligned addresses.
2508
2509 0x28 0x00 add 0,d0
2510 0xfc 0xdd mov chain,a1
2511 <chain>
2512 0xf8 0xed 0x00 btst 0,d1
2513 0xdc jmp fnaddr
2514 <disp>
2515
2516 Note that the two extra insns are effectively nops; they
2517 clobber the flags but do not affect the contents of D0 or D1. */
3e16f982 2518
3562cea7 2519 disp = expand_binop (SImode, sub_optab, fnaddr,
29c05e22 2520 plus_constant (Pmode, XEXP (m_tramp, 0), 11),
3562cea7 2521 NULL_RTX, 1, OPTAB_DIRECT);
3e16f982 2522
3562cea7 2523 mem = adjust_address (m_tramp, SImode, 0);
2524 emit_move_insn (mem, gen_int_mode (0xddfc0028, SImode));
2525 mem = adjust_address (m_tramp, SImode, 4);
3e16f982 2526 emit_move_insn (mem, chain_value);
3562cea7 2527 mem = adjust_address (m_tramp, SImode, 8);
2528 emit_move_insn (mem, gen_int_mode (0xdc00edf8, SImode));
2529 mem = adjust_address (m_tramp, SImode, 12);
2530 emit_move_insn (mem, disp);
3e16f982 2531}
e92d3ba8 2532
2533/* Output the assembler code for a C++ thunk function.
2534 THUNK_DECL is the declaration for the thunk function itself, FUNCTION
2535 is the decl for the target function. DELTA is an immediate constant
2536 offset to be added to the THIS parameter. If VCALL_OFFSET is nonzero
2537 the word at the adjusted address *(*THIS' + VCALL_OFFSET) should be
2538 additionally added to THIS. Finally jump to the entry point of
2539 FUNCTION. */
2540
2541static void
2542mn10300_asm_output_mi_thunk (FILE * file,
2543 tree thunk_fndecl ATTRIBUTE_UNUSED,
2544 HOST_WIDE_INT delta,
2545 HOST_WIDE_INT vcall_offset,
2546 tree function)
2547{
2548 const char * _this;
2549
2550 /* Get the register holding the THIS parameter. Handle the case
2551 where there is a hidden first argument for a returned structure. */
2552 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
2553 _this = reg_names [FIRST_ARGUMENT_REGNUM + 1];
2554 else
2555 _this = reg_names [FIRST_ARGUMENT_REGNUM];
2556
2557 fprintf (file, "\t%s Thunk Entry Point:\n", ASM_COMMENT_START);
2558
2559 if (delta)
2560 fprintf (file, "\tadd %d, %s\n", (int) delta, _this);
2561
2562 if (vcall_offset)
2563 {
2564 const char * scratch = reg_names [FIRST_ADDRESS_REGNUM + 1];
2565
2566 fprintf (file, "\tmov %s, %s\n", _this, scratch);
2567 fprintf (file, "\tmov (%s), %s\n", scratch, scratch);
2568 fprintf (file, "\tadd %d, %s\n", (int) vcall_offset, scratch);
2569 fprintf (file, "\tmov (%s), %s\n", scratch, scratch);
2570 fprintf (file, "\tadd %s, %s\n", scratch, _this);
2571 }
2572
2573 fputs ("\tjmp ", file);
2574 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
2575 putc ('\n', file);
2576}
2577
2578/* Return true if mn10300_output_mi_thunk would be able to output the
2579 assembler code for the thunk function specified by the arguments
2580 it is passed, and false otherwise. */
2581
2582static bool
2583mn10300_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
2584 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
2585 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
2586 const_tree function ATTRIBUTE_UNUSED)
2587{
2588 return true;
2589}
5574dbdd 2590
2591bool
2592mn10300_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
2593{
2594 if (REGNO_REG_CLASS (regno) == FP_REGS
2595 || REGNO_REG_CLASS (regno) == FP_ACC_REGS)
2596 /* Do not store integer values in FP registers. */
2597 return GET_MODE_CLASS (mode) == MODE_FLOAT && ((regno & 1) == 0);
2598
2599 if (((regno) & 1) == 0 || GET_MODE_SIZE (mode) == 4)
2600 return true;
2601
2602 if (REGNO_REG_CLASS (regno) == DATA_REGS
2603 || (TARGET_AM33 && REGNO_REG_CLASS (regno) == ADDRESS_REGS)
2604 || REGNO_REG_CLASS (regno) == EXTENDED_REGS)
2605 return GET_MODE_SIZE (mode) <= 4;
2606
2607 return false;
2608}
2609
2610bool
2611mn10300_modes_tieable (enum machine_mode mode1, enum machine_mode mode2)
2612{
2613 if (GET_MODE_CLASS (mode1) == MODE_FLOAT
2614 && GET_MODE_CLASS (mode2) != MODE_FLOAT)
2615 return false;
2616
2617 if (GET_MODE_CLASS (mode2) == MODE_FLOAT
2618 && GET_MODE_CLASS (mode1) != MODE_FLOAT)
2619 return false;
2620
2621 if (TARGET_AM33
2622 || mode1 == mode2
2623 || (GET_MODE_SIZE (mode1) <= 4 && GET_MODE_SIZE (mode2) <= 4))
2624 return true;
2625
2626 return false;
2627}
2628
990679af 2629static int
2630cc_flags_for_mode (enum machine_mode mode)
2631{
2632 switch (mode)
2633 {
2634 case CCmode:
2635 return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_C | CC_FLAG_V;
2636 case CCZNCmode:
2637 return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_C;
2638 case CCZNmode:
2639 return CC_FLAG_Z | CC_FLAG_N;
2640 case CC_FLOATmode:
2641 return -1;
2642 default:
2643 gcc_unreachable ();
2644 }
2645}
2646
2647static int
2648cc_flags_for_code (enum rtx_code code)
2649{
2650 switch (code)
2651 {
2652 case EQ: /* Z */
2653 case NE: /* ~Z */
2654 return CC_FLAG_Z;
2655
2656 case LT: /* N */
2657 case GE: /* ~N */
2658 return CC_FLAG_N;
2659 break;
2660
2661 case GT: /* ~(Z|(N^V)) */
2662 case LE: /* Z|(N^V) */
2663 return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_V;
2664
2665 case GEU: /* ~C */
2666 case LTU: /* C */
2667 return CC_FLAG_C;
2668
2669 case GTU: /* ~(C | Z) */
2670 case LEU: /* C | Z */
2671 return CC_FLAG_Z | CC_FLAG_C;
2672
2673 case ORDERED:
2674 case UNORDERED:
2675 case LTGT:
2676 case UNEQ:
2677 case UNGE:
2678 case UNGT:
2679 case UNLE:
2680 case UNLT:
2681 return -1;
2682
2683 default:
2684 gcc_unreachable ();
2685 }
2686}
2687
5574dbdd 2688enum machine_mode
990679af 2689mn10300_select_cc_mode (enum rtx_code code, rtx x, rtx y ATTRIBUTE_UNUSED)
5574dbdd 2690{
990679af 2691 int req;
2692
2693 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2694 return CC_FLOATmode;
2695
2696 req = cc_flags_for_code (code);
2697
2698 if (req & CC_FLAG_V)
2699 return CCmode;
2700 if (req & CC_FLAG_C)
2701 return CCZNCmode;
2702 return CCZNmode;
5574dbdd 2703}
4879b320 2704
2705static inline bool
2706is_load_insn (rtx insn)
2707{
2708 if (GET_CODE (PATTERN (insn)) != SET)
2709 return false;
2710
2711 return MEM_P (SET_SRC (PATTERN (insn)));
2712}
2713
2714static inline bool
2715is_store_insn (rtx insn)
2716{
2717 if (GET_CODE (PATTERN (insn)) != SET)
2718 return false;
2719
2720 return MEM_P (SET_DEST (PATTERN (insn)));
2721}
2722
2723/* Update scheduling costs for situations that cannot be
2724 described using the attributes and DFA machinery.
2725 DEP is the insn being scheduled.
2726 INSN is the previous insn.
2727 COST is the current cycle cost for DEP. */
2728
2729static int
2730mn10300_adjust_sched_cost (rtx insn, rtx link, rtx dep, int cost)
2731{
2732 int timings = get_attr_timings (insn);
2733
2734 if (!TARGET_AM33)
2735 return 1;
2736
2737 if (GET_CODE (insn) == PARALLEL)
2738 insn = XVECEXP (insn, 0, 0);
2739
2740 if (GET_CODE (dep) == PARALLEL)
2741 dep = XVECEXP (dep, 0, 0);
2742
2743 /* For the AM34 a load instruction that follows a
2744 store instruction incurs an extra cycle of delay. */
2745 if (mn10300_tune_cpu == PROCESSOR_AM34
2746 && is_load_insn (dep)
2747 && is_store_insn (insn))
2748 cost += 1;
2749
2750 /* For the AM34 a non-store, non-branch FPU insn that follows
2751 another FPU insn incurs a one cycle throughput increase. */
2752 else if (mn10300_tune_cpu == PROCESSOR_AM34
2753 && ! is_store_insn (insn)
2754 && ! JUMP_P (insn)
2755 && GET_CODE (PATTERN (dep)) == SET
2756 && GET_CODE (PATTERN (insn)) == SET
2757 && GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (dep)))) == MODE_FLOAT
2758 && GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (insn)))) == MODE_FLOAT)
2759 cost += 1;
2760
2761 /* Resolve the conflict described in section 1-7-4 of
2762 Chapter 3 of the MN103E Series Instruction Manual
2763 where it says:
2764
2765 "When the preceeding instruction is a CPU load or
2766 store instruction, a following FPU instruction
2767 cannot be executed until the CPU completes the
2768 latency period even though there are no register
2769 or flag dependencies between them." */
2770
2771 /* Only the AM33-2 (and later) CPUs have FPU instructions. */
2772 if (! TARGET_AM33_2)
2773 return cost;
2774
2775 /* If a data dependence already exists then the cost is correct. */
2776 if (REG_NOTE_KIND (link) == 0)
2777 return cost;
2778
2779 /* Check that the instruction about to scheduled is an FPU instruction. */
2780 if (GET_CODE (PATTERN (dep)) != SET)
2781 return cost;
2782
2783 if (GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (dep)))) != MODE_FLOAT)
2784 return cost;
2785
2786 /* Now check to see if the previous instruction is a load or store. */
2787 if (! is_load_insn (insn) && ! is_store_insn (insn))
2788 return cost;
2789
2790 /* XXX: Verify: The text of 1-7-4 implies that the restriction
2791 only applies when an INTEGER load/store preceeds an FPU
2792 instruction, but is this true ? For now we assume that it is. */
2793 if (GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (insn)))) != MODE_INT)
2794 return cost;
2795
2796 /* Extract the latency value from the timings attribute. */
2797 return timings < 100 ? (timings % 10) : (timings % 100);
2798}
b2d7ede1 2799
2800static void
2801mn10300_conditional_register_usage (void)
2802{
2803 unsigned int i;
2804
2805 if (!TARGET_AM33)
2806 {
2807 for (i = FIRST_EXTENDED_REGNUM;
2808 i <= LAST_EXTENDED_REGNUM; i++)
2809 fixed_regs[i] = call_used_regs[i] = 1;
2810 }
2811 if (!TARGET_AM33_2)
2812 {
2813 for (i = FIRST_FP_REGNUM;
2814 i <= LAST_FP_REGNUM; i++)
2815 fixed_regs[i] = call_used_regs[i] = 1;
2816 }
2817 if (flag_pic)
2818 fixed_regs[PIC_OFFSET_TABLE_REGNUM] =
2819 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
2820}
7de3ada8 2821
2822/* Worker function for TARGET_MD_ASM_CLOBBERS.
2823 We do this in the mn10300 backend to maintain source compatibility
2824 with the old cc0-based compiler. */
2825
2826static tree
2827mn10300_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
2828 tree inputs ATTRIBUTE_UNUSED,
2829 tree clobbers)
2830{
2831 clobbers = tree_cons (NULL_TREE, build_string (5, "EPSW"),
2832 clobbers);
2833 return clobbers;
2834}
5574dbdd 2835\f
990679af 2836/* A helper function for splitting cbranch patterns after reload. */
2837
2838void
2839mn10300_split_cbranch (enum machine_mode cmp_mode, rtx cmp_op, rtx label_ref)
2840{
2841 rtx flags, x;
2842
2843 flags = gen_rtx_REG (cmp_mode, CC_REG);
2844 x = gen_rtx_COMPARE (cmp_mode, XEXP (cmp_op, 0), XEXP (cmp_op, 1));
2845 x = gen_rtx_SET (VOIDmode, flags, x);
2846 emit_insn (x);
2847
2848 x = gen_rtx_fmt_ee (GET_CODE (cmp_op), VOIDmode, flags, const0_rtx);
2849 x = gen_rtx_IF_THEN_ELSE (VOIDmode, x, label_ref, pc_rtx);
2850 x = gen_rtx_SET (VOIDmode, pc_rtx, x);
2851 emit_jump_insn (x);
2852}
2853
2854/* A helper function for matching parallels that set the flags. */
2855
2856bool
2857mn10300_match_ccmode (rtx insn, enum machine_mode cc_mode)
2858{
2859 rtx op1, flags;
2860 enum machine_mode flags_mode;
2861
2862 gcc_checking_assert (XVECLEN (PATTERN (insn), 0) == 2);
2863
2864 op1 = XVECEXP (PATTERN (insn), 0, 1);
2865 gcc_checking_assert (GET_CODE (SET_SRC (op1)) == COMPARE);
2866
2867 flags = SET_DEST (op1);
2868 flags_mode = GET_MODE (flags);
2869
2870 if (GET_MODE (SET_SRC (op1)) != flags_mode)
2871 return false;
2872 if (GET_MODE_CLASS (flags_mode) != MODE_CC)
2873 return false;
2874
2875 /* Ensure that the mode of FLAGS is compatible with CC_MODE. */
2876 if (cc_flags_for_mode (flags_mode) & ~cc_flags_for_mode (cc_mode))
2877 return false;
2878
2879 return true;
2880}
2881
35c2a6c6 2882/* This function is used to help split:
2883
2884 (set (reg) (and (reg) (int)))
2885
2886 into:
2887
2888 (set (reg) (shift (reg) (int))
2889 (set (reg) (shift (reg) (int))
2890
2891 where the shitfs will be shorter than the "and" insn.
2892
2893 It returns the number of bits that should be shifted. A positive
2894 values means that the low bits are to be cleared (and hence the
2895 shifts should be right followed by left) whereas a negative value
2896 means that the high bits are to be cleared (left followed by right).
2897 Zero is returned when it would not be economical to split the AND. */
2898
990679af 2899int
2900mn10300_split_and_operand_count (rtx op)
2901{
2902 HOST_WIDE_INT val = INTVAL (op);
2903 int count;
2904
2905 if (val < 0)
2906 {
2907 /* High bit is set, look for bits clear at the bottom. */
2908 count = exact_log2 (-val);
2909 if (count < 0)
2910 return 0;
2911 /* This is only size win if we can use the asl2 insn. Otherwise we
2912 would be replacing 1 6-byte insn with 2 3-byte insns. */
2913 if (count > (optimize_insn_for_speed_p () ? 2 : 4))
2914 return 0;
35c2a6c6 2915 return count;
990679af 2916 }
2917 else
2918 {
2919 /* High bit is clear, look for bits set at the bottom. */
2920 count = exact_log2 (val + 1);
2921 count = 32 - count;
2922 /* Again, this is only a size win with asl2. */
2923 if (count > (optimize_insn_for_speed_p () ? 2 : 4))
2924 return 0;
2925 return -count;
2926 }
2927}
2928\f
e7076c21 2929struct liw_data
2930{
2931 enum attr_liw slot;
2932 enum attr_liw_op op;
2933 rtx dest;
2934 rtx src;
2935};
2936
2937/* Decide if the given insn is a candidate for LIW bundling. If it is then
2938 extract the operands and LIW attributes from the insn and use them to fill
2939 in the liw_data structure. Return true upon success or false if the insn
2940 cannot be bundled. */
f9e46c25 2941
2942static bool
e7076c21 2943extract_bundle (rtx insn, struct liw_data * pdata)
f9e46c25 2944{
e7076c21 2945 bool allow_consts = true;
81705807 2946 rtx p;
f9e46c25 2947
e7076c21 2948 gcc_assert (pdata != NULL);
2949
2950 if (insn == NULL_RTX)
2951 return false;
2952 /* Make sure that we are dealing with a simple SET insn. */
f9e46c25 2953 p = single_set (insn);
e7076c21 2954 if (p == NULL_RTX)
2955 return false;
2956
2957 /* Make sure that it could go into one of the LIW pipelines. */
2958 pdata->slot = get_attr_liw (insn);
2959 if (pdata->slot == LIW_BOTH)
2960 return false;
2961
2962 pdata->op = get_attr_liw_op (insn);
2963
e7076c21 2964 switch (pdata->op)
f9e46c25 2965 {
2966 case LIW_OP_MOV:
e7076c21 2967 pdata->dest = SET_DEST (p);
2968 pdata->src = SET_SRC (p);
f9e46c25 2969 break;
2970 case LIW_OP_CMP:
e7076c21 2971 pdata->dest = XEXP (SET_SRC (p), 0);
2972 pdata->src = XEXP (SET_SRC (p), 1);
f9e46c25 2973 break;
2974 case LIW_OP_NONE:
2975 return false;
e7076c21 2976 case LIW_OP_AND:
2977 case LIW_OP_OR:
2978 case LIW_OP_XOR:
2979 /* The AND, OR and XOR long instruction words only accept register arguments. */
2980 allow_consts = false;
2981 /* Fall through. */
f9e46c25 2982 default:
e7076c21 2983 pdata->dest = SET_DEST (p);
2984 pdata->src = XEXP (SET_SRC (p), 1);
f9e46c25 2985 break;
2986 }
2987
e7076c21 2988 if (! REG_P (pdata->dest))
2989 return false;
2990
2991 if (REG_P (pdata->src))
2992 return true;
2993
2994 return allow_consts && satisfies_constraint_O (pdata->src);
f9e46c25 2995}
2996
e7076c21 2997/* Make sure that it is OK to execute LIW1 and LIW2 in parallel. GCC generated
2998 the instructions with the assumption that LIW1 would be executed before LIW2
2999 so we must check for overlaps between their sources and destinations. */
f9e46c25 3000
3001static bool
e7076c21 3002check_liw_constraints (struct liw_data * pliw1, struct liw_data * pliw2)
3003{
3004 /* Check for slot conflicts. */
3005 if (pliw2->slot == pliw1->slot && pliw1->slot != LIW_EITHER)
f9e46c25 3006 return false;
3007
e7076c21 3008 /* If either operation is a compare, then "dest" is really an input; the real
3009 destination is CC_REG. So these instructions need different checks. */
3010
3011 /* Changing "CMP ; OP" into "CMP | OP" is OK because the comparison will
3012 check its values prior to any changes made by OP. */
3013 if (pliw1->op == LIW_OP_CMP)
3014 {
3015 /* Two sequential comparisons means dead code, which ought to
3016 have been eliminated given that bundling only happens with
3017 optimization. We cannot bundle them in any case. */
3018 gcc_assert (pliw1->op != pliw2->op);
3019 return true;
3020 }
f9e46c25 3021
e7076c21 3022 /* Changing "OP ; CMP" into "OP | CMP" does not work if the value being compared
3023 is the destination of OP, as the CMP will look at the old value, not the new
3024 one. */
3025 if (pliw2->op == LIW_OP_CMP)
f9e46c25 3026 {
e7076c21 3027 if (REGNO (pliw2->dest) == REGNO (pliw1->dest))
3028 return false;
3029
3030 if (REG_P (pliw2->src))
3031 return REGNO (pliw2->src) != REGNO (pliw1->dest);
3032
3033 return true;
3034 }
3035
3036 /* Changing "OP1 ; OP2" into "OP1 | OP2" does not work if they both write to the
3037 same destination register. */
3038 if (REGNO (pliw2->dest) == REGNO (pliw1->dest))
3039 return false;
3040
3041 /* Changing "OP1 ; OP2" into "OP1 | OP2" generally does not work if the destination
3042 of OP1 is the source of OP2. The exception is when OP1 is a MOVE instruction when
3043 we can replace the source in OP2 with the source of OP1. */
3044 if (REG_P (pliw2->src) && REGNO (pliw2->src) == REGNO (pliw1->dest))
3045 {
3046 if (pliw1->op == LIW_OP_MOV && REG_P (pliw1->src))
f9e46c25 3047 {
e7076c21 3048 if (! REG_P (pliw1->src)
3049 && (pliw2->op == LIW_OP_AND
3050 || pliw2->op == LIW_OP_OR
3051 || pliw2->op == LIW_OP_XOR))
3052 return false;
3053
3054 pliw2->src = pliw1->src;
f9e46c25 3055 return true;
3056 }
3057 return false;
3058 }
3059
e7076c21 3060 /* Everything else is OK. */
f9e46c25 3061 return true;
3062}
3063
f9e46c25 3064/* Combine pairs of insns into LIW bundles. */
3065
3066static void
3067mn10300_bundle_liw (void)
3068{
3069 rtx r;
3070
3071 for (r = get_insns (); r != NULL_RTX; r = next_nonnote_nondebug_insn (r))
3072 {
e7076c21 3073 rtx insn1, insn2;
3074 struct liw_data liw1, liw2;
f9e46c25 3075
3076 insn1 = r;
e7076c21 3077 if (! extract_bundle (insn1, & liw1))
f9e46c25 3078 continue;
3079
3080 insn2 = next_nonnote_nondebug_insn (insn1);
e7076c21 3081 if (! extract_bundle (insn2, & liw2))
f9e46c25 3082 continue;
3083
e7076c21 3084 /* Check for source/destination overlap. */
3085 if (! check_liw_constraints (& liw1, & liw2))
f9e46c25 3086 continue;
3087
e7076c21 3088 if (liw1.slot == LIW_OP2 || liw2.slot == LIW_OP1)
f9e46c25 3089 {
e7076c21 3090 struct liw_data temp;
3091
3092 temp = liw1;
f9e46c25 3093 liw1 = liw2;
e7076c21 3094 liw2 = temp;
f9e46c25 3095 }
3096
f9e46c25 3097 delete_insn (insn2);
3098
e7076c21 3099 if (liw1.op == LIW_OP_CMP)
3100 insn2 = gen_cmp_liw (liw2.dest, liw2.src, liw1.dest, liw1.src,
3101 GEN_INT (liw2.op));
3102 else if (liw2.op == LIW_OP_CMP)
3103 insn2 = gen_liw_cmp (liw1.dest, liw1.src, liw2.dest, liw2.src,
3104 GEN_INT (liw1.op));
f9e46c25 3105 else
e7076c21 3106 insn2 = gen_liw (liw1.dest, liw2.dest, liw1.src, liw2.src,
3107 GEN_INT (liw1.op), GEN_INT (liw2.op));
f9e46c25 3108
3109 insn2 = emit_insn_after (insn2, insn1);
3110 delete_insn (insn1);
3111 r = insn2;
3112 }
3113}
3114
f9b3e8f5 3115#define DUMP(reason, insn) \
3116 do \
3117 { \
3118 if (dump_file) \
3119 { \
3120 fprintf (dump_file, reason "\n"); \
3121 if (insn != NULL_RTX) \
3122 print_rtl_single (dump_file, insn); \
3123 fprintf(dump_file, "\n"); \
3124 } \
3125 } \
3126 while (0)
3127
3128/* Replace the BRANCH insn with a Lcc insn that goes to LABEL.
3129 Insert a SETLB insn just before LABEL. */
3130
3131static void
3132mn10300_insert_setlb_lcc (rtx label, rtx branch)
3133{
3134 rtx lcc, comparison, cmp_reg;
3135
3136 if (LABEL_NUSES (label) > 1)
3137 {
3138 rtx insn;
3139
3140 /* This label is used both as an entry point to the loop
3141 and as a loop-back point for the loop. We need to separate
3142 these two functions so that the SETLB happens upon entry,
3143 but the loop-back does not go to the SETLB instruction. */
3144 DUMP ("Inserting SETLB insn after:", label);
3145 insn = emit_insn_after (gen_setlb (), label);
3146 label = gen_label_rtx ();
3147 emit_label_after (label, insn);
3148 DUMP ("Created new loop-back label:", label);
3149 }
3150 else
3151 {
3152 DUMP ("Inserting SETLB insn before:", label);
3153 emit_insn_before (gen_setlb (), label);
3154 }
3155
3156 comparison = XEXP (SET_SRC (PATTERN (branch)), 0);
3157 cmp_reg = XEXP (comparison, 0);
3158 gcc_assert (REG_P (cmp_reg));
3159
3160 /* If the comparison has not already been split out of the branch
3161 then do so now. */
3162 gcc_assert (REGNO (cmp_reg) == CC_REG);
3163
3164 if (GET_MODE (cmp_reg) == CC_FLOATmode)
3165 lcc = gen_FLcc (comparison, label);
3166 else
3167 lcc = gen_Lcc (comparison, label);
3168
3169 lcc = emit_jump_insn_before (lcc, branch);
3170 mark_jump_label (XVECEXP (PATTERN (lcc), 0, 0), lcc, 0);
bd2b2481 3171 JUMP_LABEL (lcc) = label;
f9b3e8f5 3172 DUMP ("Replacing branch insn...", branch);
3173 DUMP ("... with Lcc insn:", lcc);
3174 delete_insn (branch);
3175}
3176
3177static bool
3178mn10300_block_contains_call (struct basic_block_def * block)
3179{
3180 rtx insn;
3181
3182 FOR_BB_INSNS (block, insn)
3183 if (CALL_P (insn))
3184 return true;
3185
3186 return false;
3187}
3188
3189static bool
3190mn10300_loop_contains_call_insn (loop_p loop)
3191{
3192 basic_block * bbs;
3193 bool result = false;
3194 unsigned int i;
3195
3196 bbs = get_loop_body (loop);
3197
3198 for (i = 0; i < loop->num_nodes; i++)
3199 if (mn10300_block_contains_call (bbs[i]))
3200 {
3201 result = true;
3202 break;
3203 }
3204
3205 free (bbs);
3206 return result;
3207}
3208
3209static void
3210mn10300_scan_for_setlb_lcc (void)
3211{
3212 struct loops loops;
3213 loop_iterator liter;
3214 loop_p loop;
3215
3216 DUMP ("Looking for loops that can use the SETLB insn", NULL_RTX);
3217
3218 df_analyze ();
3219 compute_bb_for_insn ();
3220
3221 /* Find the loops. */
3222 if (flow_loops_find (& loops) < 1)
3223 DUMP ("No loops found", NULL_RTX);
3224 current_loops = & loops;
3225
3226 /* FIXME: For now we only investigate innermost loops. In practice however
3227 if an inner loop is not suitable for use with the SETLB/Lcc insns, it may
3228 be the case that its parent loop is suitable. Thus we should check all
3229 loops, but work from the innermost outwards. */
3230 FOR_EACH_LOOP (liter, loop, LI_ONLY_INNERMOST)
3231 {
3232 const char * reason = NULL;
3233
3234 /* Check to see if we can modify this loop. If we cannot
3235 then set 'reason' to describe why it could not be done. */
3236 if (loop->latch == NULL)
3237 reason = "it contains multiple latches";
3238 else if (loop->header != loop->latch)
3239 /* FIXME: We could handle loops that span multiple blocks,
3240 but this requires a lot more work tracking down the branches
3241 that need altering, so for now keep things simple. */
3242 reason = "the loop spans multiple blocks";
3243 else if (mn10300_loop_contains_call_insn (loop))
3244 reason = "it contains CALL insns";
3245 else
3246 {
3247 rtx branch = BB_END (loop->latch);
3248
3249 gcc_assert (JUMP_P (branch));
3250 if (single_set (branch) == NULL_RTX || ! any_condjump_p (branch))
3251 /* We cannot optimize tablejumps and the like. */
3252 /* FIXME: We could handle unconditional jumps. */
3253 reason = "it is not a simple loop";
3254 else
3255 {
3256 rtx label;
3257
3258 if (dump_file)
3259 flow_loop_dump (loop, dump_file, NULL, 0);
3260
3261 label = BB_HEAD (loop->header);
3262 gcc_assert (LABEL_P (label));
3263
3264 mn10300_insert_setlb_lcc (label, branch);
3265 }
3266 }
3267
3268 if (dump_file && reason != NULL)
3269 fprintf (dump_file, "Loop starting with insn %d is not suitable because %s\n",
3270 INSN_UID (BB_HEAD (loop->header)),
3271 reason);
3272 }
3273
3274#if 0 /* FIXME: We should free the storage we allocated, but
3275 for some unknown reason this leads to seg-faults. */
3276 FOR_EACH_LOOP (liter, loop, 0)
3277 free_simple_loop_desc (loop);
3278
3279 flow_loops_free (current_loops);
3280#endif
3281
3282 current_loops = NULL;
3283
3284 df_finish_pass (false);
3285
3286 DUMP ("SETLB scan complete", NULL_RTX);
3287}
3288
f9e46c25 3289static void
3290mn10300_reorg (void)
3291{
f9b3e8f5 3292 /* These are optimizations, so only run them if optimizing. */
3293 if (TARGET_AM33 && (optimize > 0 || optimize_size))
f9e46c25 3294 {
f9b3e8f5 3295 if (TARGET_ALLOW_SETLB)
3296 mn10300_scan_for_setlb_lcc ();
3297
f9e46c25 3298 if (TARGET_ALLOW_LIW)
3299 mn10300_bundle_liw ();
3300 }
3301}
3302\f
3626e955 3303/* Initialize the GCC target structure. */
3304
f9e46c25 3305#undef TARGET_MACHINE_DEPENDENT_REORG
3306#define TARGET_MACHINE_DEPENDENT_REORG mn10300_reorg
3307
3626e955 3308#undef TARGET_ASM_ALIGNED_HI_OP
3309#define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
3310
3311#undef TARGET_LEGITIMIZE_ADDRESS
3312#define TARGET_LEGITIMIZE_ADDRESS mn10300_legitimize_address
3313
28f32607 3314#undef TARGET_ADDRESS_COST
3315#define TARGET_ADDRESS_COST mn10300_address_cost
3316#undef TARGET_REGISTER_MOVE_COST
3317#define TARGET_REGISTER_MOVE_COST mn10300_register_move_cost
3318#undef TARGET_MEMORY_MOVE_COST
3319#define TARGET_MEMORY_MOVE_COST mn10300_memory_move_cost
3626e955 3320#undef TARGET_RTX_COSTS
3321#define TARGET_RTX_COSTS mn10300_rtx_costs
3626e955 3322
3323#undef TARGET_ASM_FILE_START
3324#define TARGET_ASM_FILE_START mn10300_file_start
3325#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
3326#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
3327
22680c28 3328#undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
3329#define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA mn10300_asm_output_addr_const_extra
3330
3626e955 3331#undef TARGET_OPTION_OVERRIDE
3332#define TARGET_OPTION_OVERRIDE mn10300_option_override
3333
3334#undef TARGET_ENCODE_SECTION_INFO
3335#define TARGET_ENCODE_SECTION_INFO mn10300_encode_section_info
3336
3337#undef TARGET_PROMOTE_PROTOTYPES
3338#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
3339#undef TARGET_RETURN_IN_MEMORY
3340#define TARGET_RETURN_IN_MEMORY mn10300_return_in_memory
3341#undef TARGET_PASS_BY_REFERENCE
3342#define TARGET_PASS_BY_REFERENCE mn10300_pass_by_reference
3343#undef TARGET_CALLEE_COPIES
3344#define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
3345#undef TARGET_ARG_PARTIAL_BYTES
3346#define TARGET_ARG_PARTIAL_BYTES mn10300_arg_partial_bytes
dc67179a 3347#undef TARGET_FUNCTION_ARG
3348#define TARGET_FUNCTION_ARG mn10300_function_arg
3349#undef TARGET_FUNCTION_ARG_ADVANCE
3350#define TARGET_FUNCTION_ARG_ADVANCE mn10300_function_arg_advance
3626e955 3351
3352#undef TARGET_EXPAND_BUILTIN_SAVEREGS
3353#define TARGET_EXPAND_BUILTIN_SAVEREGS mn10300_builtin_saveregs
3354#undef TARGET_EXPAND_BUILTIN_VA_START
3355#define TARGET_EXPAND_BUILTIN_VA_START mn10300_va_start
3356
3357#undef TARGET_CASE_VALUES_THRESHOLD
3358#define TARGET_CASE_VALUES_THRESHOLD mn10300_case_values_threshold
3359
3360#undef TARGET_LEGITIMATE_ADDRESS_P
3361#define TARGET_LEGITIMATE_ADDRESS_P mn10300_legitimate_address_p
4c6c308e 3362#undef TARGET_DELEGITIMIZE_ADDRESS
3363#define TARGET_DELEGITIMIZE_ADDRESS mn10300_delegitimize_address
ca316360 3364#undef TARGET_LEGITIMATE_CONSTANT_P
3365#define TARGET_LEGITIMATE_CONSTANT_P mn10300_legitimate_constant_p
3626e955 3366
029ca87f 3367#undef TARGET_PREFERRED_RELOAD_CLASS
3368#define TARGET_PREFERRED_RELOAD_CLASS mn10300_preferred_reload_class
3369#undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
c78ac668 3370#define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS \
3371 mn10300_preferred_output_reload_class
3372#undef TARGET_SECONDARY_RELOAD
3373#define TARGET_SECONDARY_RELOAD mn10300_secondary_reload
029ca87f 3374
3626e955 3375#undef TARGET_TRAMPOLINE_INIT
3376#define TARGET_TRAMPOLINE_INIT mn10300_trampoline_init
3377
3378#undef TARGET_FUNCTION_VALUE
3379#define TARGET_FUNCTION_VALUE mn10300_function_value
3380#undef TARGET_LIBCALL_VALUE
3381#define TARGET_LIBCALL_VALUE mn10300_libcall_value
3382
3383#undef TARGET_ASM_OUTPUT_MI_THUNK
3384#define TARGET_ASM_OUTPUT_MI_THUNK mn10300_asm_output_mi_thunk
3385#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
3386#define TARGET_ASM_CAN_OUTPUT_MI_THUNK mn10300_can_output_mi_thunk
3387
4879b320 3388#undef TARGET_SCHED_ADJUST_COST
3389#define TARGET_SCHED_ADJUST_COST mn10300_adjust_sched_cost
3390
b2d7ede1 3391#undef TARGET_CONDITIONAL_REGISTER_USAGE
3392#define TARGET_CONDITIONAL_REGISTER_USAGE mn10300_conditional_register_usage
3393
7de3ada8 3394#undef TARGET_MD_ASM_CLOBBERS
3395#define TARGET_MD_ASM_CLOBBERS mn10300_md_asm_clobbers
3396
08207c2f 3397#undef TARGET_FLAGS_REGNUM
3398#define TARGET_FLAGS_REGNUM CC_REG
3399
3626e955 3400struct gcc_target targetm = TARGET_INITIALIZER;