]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/mn10300/mn10300.c
Wrap option names in gcc internal messages with %< and %>.
[thirdparty/gcc.git] / gcc / config / mn10300 / mn10300.c
CommitLineData
29a404f9 1/* Subroutines for insn-output.c for Matsushita MN10300 series
fbd26352 2 Copyright (C) 1996-2019 Free Software Foundation, Inc.
29a404f9 3 Contributed by Jeff Law (law@cygnus.com).
4
3626e955 5 This file is part of GCC.
29a404f9 6
3626e955 7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
29a404f9 11
3626e955 12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
29a404f9 16
3626e955 17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
29a404f9 20
785790dc 21#define IN_TARGET_CODE 1
22
29a404f9 23#include "config.h"
7014838c 24#include "system.h"
805e22b2 25#include "coretypes.h"
9ef16211 26#include "backend.h"
c1eb80de 27#include "target.h"
29a404f9 28#include "rtl.h"
c1eb80de 29#include "tree.h"
30a86690 30#include "stringpool.h"
31#include "attribs.h"
c1eb80de 32#include "cfghooks.h"
33#include "cfgloop.h"
9ef16211 34#include "df.h"
ad7b10a2 35#include "memmodel.h"
c1eb80de 36#include "tm_p.h"
37#include "optabs.h"
38#include "regs.h"
39#include "emit-rtl.h"
40#include "recog.h"
41#include "diagnostic-core.h"
b20a8bb4 42#include "alias.h"
9ed99284 43#include "stor-layout.h"
44#include "varasm.h"
45#include "calls.h"
29a404f9 46#include "output.h"
47#include "insn-attr.h"
8b8be022 48#include "reload.h"
d53441c8 49#include "explow.h"
29a404f9 50#include "expr.h"
e7076c21 51#include "tm-constrs.h"
94ea8568 52#include "cfgrtl.h"
b9ed1410 53#include "dumpfile.h"
f7715905 54#include "builtins.h"
29a404f9 55
0c71fb4f 56/* This file should be included last. */
4b498588 57#include "target-def.h"
58
1acdfc69 59/* This is used in the am33_2.0-linux-gnu port, in which global symbol
60 names are not prefixed by underscores, to tell whether to prefix a
61 label with a plus sign or not, so that the assembler can tell
62 symbol names from register names. */
63int mn10300_protect_label;
64
4879b320 65/* Selected processor type for tuning. */
66enum processor_type mn10300_tune_cpu = PROCESSOR_DEFAULT;
67
990679af 68#define CC_FLAG_Z 1
69#define CC_FLAG_N 2
70#define CC_FLAG_C 4
71#define CC_FLAG_V 8
72
3754d046 73static int cc_flags_for_mode(machine_mode);
990679af 74static int cc_flags_for_code(enum rtx_code);
a767736d 75\f
4c834714 76/* Implement TARGET_OPTION_OVERRIDE. */
4c834714 77static void
78mn10300_option_override (void)
8c2c40c5 79{
80 if (TARGET_AM33)
81 target_flags &= ~MASK_MULT_BUG;
4879b320 82 else
83 {
84 /* Disable scheduling for the MN10300 as we do
85 not have timing information available for it. */
86 flag_schedule_insns = 0;
87 flag_schedule_insns_after_reload = 0;
731049b6 88
89 /* Force enable splitting of wide types, as otherwise it is trivial
90 to run out of registers. Indeed, this works so well that register
91 allocation problems are now more common *without* optimization,
92 when this flag is not enabled by default. */
93 flag_split_wide_types = 1;
4879b320 94 }
990679af 95
4879b320 96 if (mn10300_tune_string)
97 {
98 if (strcasecmp (mn10300_tune_string, "mn10300") == 0)
99 mn10300_tune_cpu = PROCESSOR_MN10300;
100 else if (strcasecmp (mn10300_tune_string, "am33") == 0)
101 mn10300_tune_cpu = PROCESSOR_AM33;
102 else if (strcasecmp (mn10300_tune_string, "am33-2") == 0)
103 mn10300_tune_cpu = PROCESSOR_AM33_2;
104 else if (strcasecmp (mn10300_tune_string, "am34") == 0)
105 mn10300_tune_cpu = PROCESSOR_AM34;
106 else
2f6d557f 107 error ("%<-mtune=%> expects mn10300, am33, am33-2, or am34");
4879b320 108 }
8c2c40c5 109}
110
92c473b8 111static void
3285410a 112mn10300_file_start (void)
29a404f9 113{
92c473b8 114 default_file_start ();
911517ac 115
b166356e 116 if (TARGET_AM33_2)
117 fprintf (asm_out_file, "\t.am33_2\n");
118 else if (TARGET_AM33)
92c473b8 119 fprintf (asm_out_file, "\t.am33\n");
29a404f9 120}
121\f
f9e46c25 122/* Note: This list must match the liw_op attribute in mn10300.md. */
123
124static const char *liw_op_names[] =
125{
126 "add", "cmp", "sub", "mov",
127 "and", "or", "xor",
128 "asr", "lsr", "asl",
129 "none", "max"
130};
131
29a404f9 132/* Print operand X using operand code CODE to assembly language output file
133 FILE. */
134
135void
3626e955 136mn10300_print_operand (FILE *file, rtx x, int code)
29a404f9 137{
138 switch (code)
139 {
f9e46c25 140 case 'W':
141 {
142 unsigned int liw_op = UINTVAL (x);
990679af 143
f9e46c25 144 gcc_assert (TARGET_ALLOW_LIW);
145 gcc_assert (liw_op < LIW_OP_MAX);
146 fputs (liw_op_names[liw_op], file);
29a404f9 147 break;
f9e46c25 148 }
990679af 149
f9e46c25 150 case 'b':
151 case 'B':
152 {
153 enum rtx_code cmp = GET_CODE (x);
3754d046 154 machine_mode mode = GET_MODE (XEXP (x, 0));
f9e46c25 155 const char *str;
156 int have_flags;
157
158 if (code == 'B')
159 cmp = reverse_condition (cmp);
160 have_flags = cc_flags_for_mode (mode);
fb16c776 161
f9e46c25 162 switch (cmp)
b166356e 163 {
f9e46c25 164 case NE:
165 str = "ne";
b166356e 166 break;
f9e46c25 167 case EQ:
168 str = "eq";
169 break;
170 case GE:
171 /* bge is smaller than bnc. */
172 str = (have_flags & CC_FLAG_V ? "ge" : "nc");
173 break;
174 case LT:
175 str = (have_flags & CC_FLAG_V ? "lt" : "ns");
176 break;
177 case GT:
178 str = "gt";
179 break;
180 case LE:
181 str = "le";
182 break;
183 case GEU:
184 str = "cc";
185 break;
186 case GTU:
187 str = "hi";
188 break;
189 case LEU:
190 str = "ls";
191 break;
192 case LTU:
193 str = "cs";
194 break;
195 case ORDERED:
196 str = "lge";
197 break;
198 case UNORDERED:
199 str = "uo";
200 break;
201 case LTGT:
202 str = "lg";
203 break;
204 case UNEQ:
205 str = "ue";
206 break;
207 case UNGE:
208 str = "uge";
209 break;
210 case UNGT:
211 str = "ug";
212 break;
213 case UNLE:
214 str = "ule";
215 break;
216 case UNLT:
217 str = "ul";
b166356e 218 break;
b166356e 219 default:
cf41bb03 220 gcc_unreachable ();
b166356e 221 }
f9e46c25 222
223 gcc_checking_assert ((cc_flags_for_code (cmp) & ~have_flags) == 0);
224 fputs (str, file);
225 }
226 break;
227
228 case 'C':
229 /* This is used for the operand to a call instruction;
230 if it's a REG, enclose it in parens, else output
231 the operand normally. */
232 if (REG_P (x))
233 {
234 fputc ('(', file);
235 mn10300_print_operand (file, x, 0);
236 fputc (')', file);
237 }
238 else
239 mn10300_print_operand (file, x, 0);
240 break;
241
242 case 'D':
243 switch (GET_CODE (x))
244 {
245 case MEM:
246 fputc ('(', file);
3c047fe9 247 output_address (GET_MODE (x), XEXP (x, 0));
f9e46c25 248 fputc (')', file);
249 break;
250
251 case REG:
252 fprintf (file, "fd%d", REGNO (x) - 18);
253 break;
254
255 default:
256 gcc_unreachable ();
257 }
258 break;
b166356e 259
6ce19398 260 /* These are the least significant word in a 64bit value. */
f9e46c25 261 case 'L':
262 switch (GET_CODE (x))
263 {
264 case MEM:
265 fputc ('(', file);
3c047fe9 266 output_address (GET_MODE (x), XEXP (x, 0));
f9e46c25 267 fputc (')', file);
268 break;
6ce19398 269
f9e46c25 270 case REG:
271 fprintf (file, "%s", reg_names[REGNO (x)]);
272 break;
6ce19398 273
f9e46c25 274 case SUBREG:
275 fprintf (file, "%s", reg_names[subreg_regno (x)]);
276 break;
6ce19398 277
f9e46c25 278 case CONST_DOUBLE:
279 {
280 long val[2];
6ce19398 281
f9e46c25 282 switch (GET_MODE (x))
283 {
916ace94 284 case E_DFmode:
945f7b03 285 REAL_VALUE_TO_TARGET_DOUBLE
286 (*CONST_DOUBLE_REAL_VALUE (x), val);
f9e46c25 287 fprintf (file, "0x%lx", val[0]);
288 break;;
916ace94 289 case E_SFmode:
945f7b03 290 REAL_VALUE_TO_TARGET_SINGLE
291 (*CONST_DOUBLE_REAL_VALUE (x), val[0]);
f9e46c25 292 fprintf (file, "0x%lx", val[0]);
293 break;;
916ace94 294 case E_VOIDmode:
295 case E_DImode:
f9e46c25 296 mn10300_print_operand_address (file,
297 GEN_INT (CONST_DOUBLE_LOW (x)));
298 break;
299 default:
6ce19398 300 break;
301 }
f9e46c25 302 break;
303 }
6ce19398 304
f9e46c25 305 case CONST_INT:
306 {
307 rtx low, high;
308 split_double (x, &low, &high);
309 fprintf (file, "%ld", (long)INTVAL (low));
310 break;
964d057c 311 }
6ce19398 312
f9e46c25 313 default:
314 gcc_unreachable ();
315 }
316 break;
6ce19398 317
318 /* Similarly, but for the most significant word. */
f9e46c25 319 case 'H':
320 switch (GET_CODE (x))
321 {
322 case MEM:
323 fputc ('(', file);
324 x = adjust_address (x, SImode, 4);
3c047fe9 325 output_address (GET_MODE (x), XEXP (x, 0));
f9e46c25 326 fputc (')', file);
327 break;
6ce19398 328
f9e46c25 329 case REG:
330 fprintf (file, "%s", reg_names[REGNO (x) + 1]);
331 break;
6ce19398 332
f9e46c25 333 case SUBREG:
334 fprintf (file, "%s", reg_names[subreg_regno (x) + 1]);
335 break;
6ce19398 336
f9e46c25 337 case CONST_DOUBLE:
338 {
339 long val[2];
6ce19398 340
f9e46c25 341 switch (GET_MODE (x))
342 {
916ace94 343 case E_DFmode:
945f7b03 344 REAL_VALUE_TO_TARGET_DOUBLE
345 (*CONST_DOUBLE_REAL_VALUE (x), val);
f9e46c25 346 fprintf (file, "0x%lx", val[1]);
347 break;;
916ace94 348 case E_SFmode:
f9e46c25 349 gcc_unreachable ();
916ace94 350 case E_VOIDmode:
351 case E_DImode:
f9e46c25 352 mn10300_print_operand_address (file,
353 GEN_INT (CONST_DOUBLE_HIGH (x)));
354 break;
355 default:
6ce19398 356 break;
357 }
f9e46c25 358 break;
359 }
6ce19398 360
f9e46c25 361 case CONST_INT:
362 {
363 rtx low, high;
364 split_double (x, &low, &high);
365 fprintf (file, "%ld", (long)INTVAL (high));
366 break;
6ce19398 367 }
6ce19398 368
f9e46c25 369 default:
370 gcc_unreachable ();
371 }
372 break;
6ce19398 373
f9e46c25 374 case 'A':
375 fputc ('(', file);
376 if (REG_P (XEXP (x, 0)))
3c047fe9 377 output_address (VOIDmode, gen_rtx_PLUS (SImode,
378 XEXP (x, 0), const0_rtx));
f9e46c25 379 else
3c047fe9 380 output_address (VOIDmode, XEXP (x, 0));
f9e46c25 381 fputc (')', file);
382 break;
058f71f0 383
f9e46c25 384 case 'N':
385 gcc_assert (INTVAL (x) >= -128 && INTVAL (x) <= 255);
386 fprintf (file, "%d", (int)((~INTVAL (x)) & 0xff));
387 break;
388
389 case 'U':
390 gcc_assert (INTVAL (x) >= -128 && INTVAL (x) <= 255);
391 fprintf (file, "%d", (int)(INTVAL (x) & 0xff));
392 break;
167fa942 393
63e678f2 394 /* For shift counts. The hardware ignores the upper bits of
395 any immediate, but the assembler will flag an out of range
396 shift count as an error. So we mask off the high bits
397 of the immediate here. */
f9e46c25 398 case 'S':
399 if (CONST_INT_P (x))
400 {
401 fprintf (file, "%d", (int)(INTVAL (x) & 0x1f));
402 break;
403 }
404 /* FALL THROUGH */
63e678f2 405
f9e46c25 406 default:
407 switch (GET_CODE (x))
408 {
409 case MEM:
410 fputc ('(', file);
3c047fe9 411 output_address (GET_MODE (x), XEXP (x, 0));
f9e46c25 412 fputc (')', file);
413 break;
29a404f9 414
f9e46c25 415 case PLUS:
3c047fe9 416 output_address (VOIDmode, x);
f9e46c25 417 break;
6ce19398 418
f9e46c25 419 case REG:
420 fprintf (file, "%s", reg_names[REGNO (x)]);
421 break;
29a404f9 422
f9e46c25 423 case SUBREG:
424 fprintf (file, "%s", reg_names[subreg_regno (x)]);
425 break;
29a404f9 426
6ce19398 427 /* This will only be single precision.... */
f9e46c25 428 case CONST_DOUBLE:
429 {
430 unsigned long val;
6ce19398 431
945f7b03 432 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), val);
f9e46c25 433 fprintf (file, "0x%lx", val);
29a404f9 434 break;
29a404f9 435 }
f9e46c25 436
437 case CONST_INT:
438 case SYMBOL_REF:
439 case CONST:
440 case LABEL_REF:
441 case CODE_LABEL:
442 case UNSPEC:
443 mn10300_print_operand_address (file, x);
444 break;
445 default:
446 gcc_unreachable ();
447 }
448 break;
449 }
29a404f9 450}
451
452/* Output assembly language output for the address ADDR to FILE. */
453
454void
3626e955 455mn10300_print_operand_address (FILE *file, rtx addr)
29a404f9 456{
457 switch (GET_CODE (addr))
458 {
911517ac 459 case POST_INC:
c8a596d6 460 mn10300_print_operand (file, XEXP (addr, 0), 0);
911517ac 461 fputc ('+', file);
462 break;
c8a596d6 463
464 case POST_MODIFY:
465 mn10300_print_operand (file, XEXP (addr, 0), 0);
466 fputc ('+', file);
467 fputc (',', file);
468 mn10300_print_operand (file, XEXP (addr, 1), 0);
469 break;
470
29a404f9 471 case REG:
3626e955 472 mn10300_print_operand (file, addr, 0);
29a404f9 473 break;
474 case PLUS:
475 {
c8a596d6 476 rtx base = XEXP (addr, 0);
477 rtx index = XEXP (addr, 1);
478
479 if (REG_P (index) && !REG_OK_FOR_INDEX_P (index))
480 {
481 rtx x = base;
482 base = index;
483 index = x;
484
485 gcc_assert (REG_P (index) && REG_OK_FOR_INDEX_P (index));
486 }
487 gcc_assert (REG_OK_FOR_BASE_P (base));
488
3626e955 489 mn10300_print_operand (file, index, 0);
29a404f9 490 fputc (',', file);
c8a596d6 491 mn10300_print_operand (file, base, 0);
29a404f9 492 break;
493 }
494 case SYMBOL_REF:
495 output_addr_const (file, addr);
496 break;
497 default:
498 output_addr_const (file, addr);
499 break;
500 }
501}
502
22680c28 503/* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA.
504
505 Used for PIC-specific UNSPECs. */
506
507static bool
508mn10300_asm_output_addr_const_extra (FILE *file, rtx x)
509{
510 if (GET_CODE (x) == UNSPEC)
511 {
512 switch (XINT (x, 1))
513 {
22680c28 514 case UNSPEC_PIC:
515 /* GLOBAL_OFFSET_TABLE or local symbols, no suffix. */
516 output_addr_const (file, XVECEXP (x, 0, 0));
517 break;
518 case UNSPEC_GOT:
519 output_addr_const (file, XVECEXP (x, 0, 0));
520 fputs ("@GOT", file);
521 break;
522 case UNSPEC_GOTOFF:
523 output_addr_const (file, XVECEXP (x, 0, 0));
524 fputs ("@GOTOFF", file);
525 break;
526 case UNSPEC_PLT:
527 output_addr_const (file, XVECEXP (x, 0, 0));
528 fputs ("@PLT", file);
529 break;
530 case UNSPEC_GOTSYM_OFF:
531 assemble_name (file, GOT_SYMBOL_NAME);
532 fputs ("-(", file);
533 output_addr_const (file, XVECEXP (x, 0, 0));
534 fputs ("-.)", file);
535 break;
536 default:
537 return false;
538 }
539 return true;
540 }
541 else
542 return false;
543}
544
b166356e 545/* Count the number of FP registers that have to be saved. */
546static int
3285410a 547fp_regs_to_save (void)
b166356e 548{
549 int i, n = 0;
550
551 if (! TARGET_AM33_2)
552 return 0;
553
554 for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
d37e81ec 555 if (df_regs_ever_live_p (i) && ! call_really_used_regs[i])
b166356e 556 ++n;
557
558 return n;
559}
560
4caa3669 561/* Print a set of registers in the format required by "movm" and "ret".
562 Register K is saved if bit K of MASK is set. The data and address
563 registers can be stored individually, but the extended registers cannot.
f2b32076 564 We assume that the mask already takes that into account. For instance,
09e5ce26 565 bits 14 to 17 must have the same value. */
4caa3669 566
567void
3285410a 568mn10300_print_reg_list (FILE *file, int mask)
4caa3669 569{
570 int need_comma;
571 int i;
572
573 need_comma = 0;
574 fputc ('[', file);
575
576 for (i = 0; i < FIRST_EXTENDED_REGNUM; i++)
577 if ((mask & (1 << i)) != 0)
578 {
579 if (need_comma)
580 fputc (',', file);
581 fputs (reg_names [i], file);
582 need_comma = 1;
583 }
584
585 if ((mask & 0x3c000) != 0)
586 {
cf41bb03 587 gcc_assert ((mask & 0x3c000) == 0x3c000);
4caa3669 588 if (need_comma)
589 fputc (',', file);
590 fputs ("exreg1", file);
591 need_comma = 1;
592 }
593
594 fputc (']', file);
595}
596
ad3e6900 597/* If the MDR register is never clobbered, we can use the RETF instruction
598 which takes the address from the MDR register. This is 3 cycles faster
599 than having to load the address from the stack. */
600
601bool
602mn10300_can_use_retf_insn (void)
603{
604 /* Don't bother if we're not optimizing. In this case we won't
605 have proper access to df_regs_ever_live_p. */
606 if (!optimize)
607 return false;
608
609 /* EH returns alter the saved return address; MDR is not current. */
610 if (crtl->calls_eh_return)
611 return false;
612
613 /* Obviously not if MDR is ever clobbered. */
614 if (df_regs_ever_live_p (MDR_REG))
615 return false;
616
617 /* ??? Careful not to use this during expand_epilogue etc. */
618 gcc_assert (!in_sequence_p ());
619 return leaf_function_p ();
620}
621
622bool
623mn10300_can_use_rets_insn (void)
6ce19398 624{
6f22c3b4 625 return !mn10300_initial_offset (ARG_POINTER_REGNUM, STACK_POINTER_REGNUM);
6ce19398 626}
627
4caa3669 628/* Returns the set of live, callee-saved registers as a bitmask. The
629 callee-saved extended registers cannot be stored individually, so
23ecf105 630 all of them will be included in the mask if any one of them is used.
d876ba6e 631 Also returns the number of bytes in the registers in the mask if
632 BYTES_SAVED is not NULL. */
4caa3669 633
d876ba6e 634unsigned int
635mn10300_get_live_callee_saved_regs (unsigned int * bytes_saved)
4caa3669 636{
637 int mask;
638 int i;
d876ba6e 639 unsigned int count;
4caa3669 640
d876ba6e 641 count = mask = 0;
b166356e 642 for (i = 0; i <= LAST_EXTENDED_REGNUM; i++)
d37e81ec 643 if (df_regs_ever_live_p (i) && ! call_really_used_regs[i])
d876ba6e 644 {
645 mask |= (1 << i);
646 ++ count;
647 }
648
4caa3669 649 if ((mask & 0x3c000) != 0)
d876ba6e 650 {
651 for (i = 0x04000; i < 0x40000; i <<= 1)
652 if ((mask & i) == 0)
653 ++ count;
654
655 mask |= 0x3c000;
656 }
657
658 if (bytes_saved)
659 * bytes_saved = count * UNITS_PER_WORD;
4caa3669 660
661 return mask;
662}
663
5f2853dd 664static rtx
665F (rtx r)
666{
667 RTX_FRAME_RELATED_P (r) = 1;
668 return r;
669}
670
4caa3669 671/* Generate an instruction that pushes several registers onto the stack.
672 Register K will be saved if bit K in MASK is set. The function does
673 nothing if MASK is zero.
674
675 To be compatible with the "movm" instruction, the lowest-numbered
676 register must be stored in the lowest slot. If MASK is the set
677 { R1,...,RN }, where R1...RN are ordered least first, the generated
678 instruction will have the form:
679
680 (parallel
681 (set (reg:SI 9) (plus:SI (reg:SI 9) (const_int -N*4)))
682 (set (mem:SI (plus:SI (reg:SI 9)
683 (const_int -1*4)))
684 (reg:SI RN))
685 ...
686 (set (mem:SI (plus:SI (reg:SI 9)
687 (const_int -N*4)))
688 (reg:SI R1))) */
689
32f9c04a 690static void
691mn10300_gen_multiple_store (unsigned int mask)
4caa3669 692{
32f9c04a 693 /* The order in which registers are stored, from SP-4 through SP-N*4. */
694 static const unsigned int store_order[8] = {
695 /* e2, e3: never saved */
696 FIRST_EXTENDED_REGNUM + 4,
697 FIRST_EXTENDED_REGNUM + 5,
698 FIRST_EXTENDED_REGNUM + 6,
699 FIRST_EXTENDED_REGNUM + 7,
700 /* e0, e1, mdrq, mcrh, mcrl, mcvf: never saved. */
701 FIRST_DATA_REGNUM + 2,
702 FIRST_DATA_REGNUM + 3,
703 FIRST_ADDRESS_REGNUM + 2,
704 FIRST_ADDRESS_REGNUM + 3,
705 /* d0, d1, a0, a1, mdr, lir, lar: never saved. */
706 };
707
708 rtx x, elts[9];
709 unsigned int i;
710 int count;
711
712 if (mask == 0)
713 return;
714
715 for (i = count = 0; i < ARRAY_SIZE(store_order); ++i)
4caa3669 716 {
32f9c04a 717 unsigned regno = store_order[i];
718
719 if (((mask >> regno) & 1) == 0)
720 continue;
4caa3669 721
32f9c04a 722 ++count;
29c05e22 723 x = plus_constant (Pmode, stack_pointer_rtx, count * -4);
32f9c04a 724 x = gen_frame_mem (SImode, x);
d1f9b275 725 x = gen_rtx_SET (x, gen_rtx_REG (SImode, regno));
32f9c04a 726 elts[count] = F(x);
727
728 /* Remove the register from the mask so that... */
729 mask &= ~(1u << regno);
4caa3669 730 }
32f9c04a 731
732 /* ... we can make sure that we didn't try to use a register
733 not listed in the store order. */
734 gcc_assert (mask == 0);
735
736 /* Create the instruction that updates the stack pointer. */
29c05e22 737 x = plus_constant (Pmode, stack_pointer_rtx, count * -4);
d1f9b275 738 x = gen_rtx_SET (stack_pointer_rtx, x);
32f9c04a 739 elts[0] = F(x);
740
741 /* We need one PARALLEL element to update the stack pointer and
742 an additional element for each register that is stored. */
743 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (count + 1, elts));
744 F (emit_insn (x));
4caa3669 745}
746
36706943 747static inline unsigned int
748popcount (unsigned int mask)
749{
750 unsigned int count = 0;
751
752 while (mask)
753 {
754 ++ count;
755 mask &= ~ (mask & - mask);
756 }
757 return count;
758}
759
29a404f9 760void
3626e955 761mn10300_expand_prologue (void)
29a404f9 762{
6f22c3b4 763 HOST_WIDE_INT size = mn10300_frame_size ();
36706943 764 unsigned int mask;
29a404f9 765
36706943 766 mask = mn10300_get_live_callee_saved_regs (NULL);
09e5ce26 767 /* If we use any of the callee-saved registers, save them now. */
36706943 768 mn10300_gen_multiple_store (mask);
769
770 if (flag_stack_usage_info)
771 current_function_static_stack_size = size + popcount (mask) * 4;
48cb86e3 772
b166356e 773 if (TARGET_AM33_2 && fp_regs_to_save ())
774 {
775 int num_regs_to_save = fp_regs_to_save (), i;
776 HOST_WIDE_INT xsize;
3626e955 777 enum
778 {
779 save_sp_merge,
780 save_sp_no_merge,
781 save_sp_partial_merge,
782 save_a0_merge,
783 save_a0_no_merge
784 } strategy;
b166356e 785 unsigned int strategy_size = (unsigned)-1, this_strategy_size;
786 rtx reg;
b166356e 787
36706943 788 if (flag_stack_usage_info)
789 current_function_static_stack_size += num_regs_to_save * 4;
790
b166356e 791 /* We have several different strategies to save FP registers.
792 We can store them using SP offsets, which is beneficial if
793 there are just a few registers to save, or we can use `a0' in
794 post-increment mode (`a0' is the only call-clobbered address
795 register that is never used to pass information to a
796 function). Furthermore, if we don't need a frame pointer, we
797 can merge the two SP adds into a single one, but this isn't
798 always beneficial; sometimes we can just split the two adds
799 so that we don't exceed a 16-bit constant size. The code
800 below will select which strategy to use, so as to generate
801 smallest code. Ties are broken in favor or shorter sequences
802 (in terms of number of instructions). */
803
804#define SIZE_ADD_AX(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
805 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 2)
806#define SIZE_ADD_SP(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
807 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 3)
e14cac83 808
809/* We add 0 * (S) in two places to promote to the type of S,
810 so that all arms of the conditional have the same type. */
b166356e 811#define SIZE_FMOV_LIMIT(S,N,L,SIZE1,SIZE2,ELSE) \
e14cac83 812 (((S) >= (L)) ? 0 * (S) + (SIZE1) * (N) \
b166356e 813 : ((S) + 4 * (N) >= (L)) ? (((L) - (S)) / 4 * (SIZE2) \
814 + ((S) + 4 * (N) - (L)) / 4 * (SIZE1)) \
e14cac83 815 : 0 * (S) + (ELSE))
b166356e 816#define SIZE_FMOV_SP_(S,N) \
817 (SIZE_FMOV_LIMIT ((S), (N), (1 << 24), 7, 6, \
818 SIZE_FMOV_LIMIT ((S), (N), (1 << 8), 6, 4, \
819 (S) ? 4 * (N) : 3 + 4 * ((N) - 1))))
820#define SIZE_FMOV_SP(S,N) (SIZE_FMOV_SP_ ((unsigned HOST_WIDE_INT)(S), (N)))
821
822 /* Consider alternative save_sp_merge only if we don't need the
fa483857 823 frame pointer and size is nonzero. */
b166356e 824 if (! frame_pointer_needed && size)
825 {
826 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
827 this_strategy_size = SIZE_ADD_SP (-(size + 4 * num_regs_to_save));
828 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
829 this_strategy_size += SIZE_FMOV_SP (size, num_regs_to_save);
830
831 if (this_strategy_size < strategy_size)
832 {
833 strategy = save_sp_merge;
834 strategy_size = this_strategy_size;
835 }
836 }
837
838 /* Consider alternative save_sp_no_merge unconditionally. */
839 /* Insn: add -4 * num_regs_to_save, sp. */
840 this_strategy_size = SIZE_ADD_SP (-4 * num_regs_to_save);
841 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
842 this_strategy_size += SIZE_FMOV_SP (0, num_regs_to_save);
843 if (size)
844 {
845 /* Insn: add -size, sp. */
846 this_strategy_size += SIZE_ADD_SP (-size);
847 }
848
849 if (this_strategy_size < strategy_size)
850 {
851 strategy = save_sp_no_merge;
852 strategy_size = this_strategy_size;
853 }
854
855 /* Consider alternative save_sp_partial_merge only if we don't
856 need a frame pointer and size is reasonably large. */
857 if (! frame_pointer_needed && size + 4 * num_regs_to_save > 128)
858 {
859 /* Insn: add -128, sp. */
860 this_strategy_size = SIZE_ADD_SP (-128);
861 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
862 this_strategy_size += SIZE_FMOV_SP (128 - 4 * num_regs_to_save,
863 num_regs_to_save);
864 if (size)
865 {
866 /* Insn: add 128-size, sp. */
867 this_strategy_size += SIZE_ADD_SP (128 - size);
868 }
869
870 if (this_strategy_size < strategy_size)
871 {
872 strategy = save_sp_partial_merge;
873 strategy_size = this_strategy_size;
874 }
875 }
876
877 /* Consider alternative save_a0_merge only if we don't need a
fa483857 878 frame pointer, size is nonzero and the user hasn't
b166356e 879 changed the calling conventions of a0. */
880 if (! frame_pointer_needed && size
d37e81ec 881 && call_really_used_regs [FIRST_ADDRESS_REGNUM]
b166356e 882 && ! fixed_regs[FIRST_ADDRESS_REGNUM])
883 {
884 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
885 this_strategy_size = SIZE_ADD_SP (-(size + 4 * num_regs_to_save));
886 /* Insn: mov sp, a0. */
887 this_strategy_size++;
888 if (size)
889 {
890 /* Insn: add size, a0. */
891 this_strategy_size += SIZE_ADD_AX (size);
892 }
893 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
894 this_strategy_size += 3 * num_regs_to_save;
895
896 if (this_strategy_size < strategy_size)
897 {
898 strategy = save_a0_merge;
899 strategy_size = this_strategy_size;
900 }
901 }
902
903 /* Consider alternative save_a0_no_merge if the user hasn't
09e5ce26 904 changed the calling conventions of a0. */
d37e81ec 905 if (call_really_used_regs [FIRST_ADDRESS_REGNUM]
b166356e 906 && ! fixed_regs[FIRST_ADDRESS_REGNUM])
907 {
908 /* Insn: add -4 * num_regs_to_save, sp. */
909 this_strategy_size = SIZE_ADD_SP (-4 * num_regs_to_save);
910 /* Insn: mov sp, a0. */
911 this_strategy_size++;
912 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
913 this_strategy_size += 3 * num_regs_to_save;
914 if (size)
915 {
916 /* Insn: add -size, sp. */
917 this_strategy_size += SIZE_ADD_SP (-size);
918 }
919
920 if (this_strategy_size < strategy_size)
921 {
922 strategy = save_a0_no_merge;
923 strategy_size = this_strategy_size;
924 }
925 }
926
927 /* Emit the initial SP add, common to all strategies. */
928 switch (strategy)
929 {
930 case save_sp_no_merge:
931 case save_a0_no_merge:
5f2853dd 932 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
933 stack_pointer_rtx,
934 GEN_INT (-4 * num_regs_to_save))));
b166356e 935 xsize = 0;
936 break;
937
938 case save_sp_partial_merge:
5f2853dd 939 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
940 stack_pointer_rtx,
941 GEN_INT (-128))));
b166356e 942 xsize = 128 - 4 * num_regs_to_save;
943 size -= xsize;
944 break;
945
946 case save_sp_merge:
947 case save_a0_merge:
5f2853dd 948 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
949 stack_pointer_rtx,
950 GEN_INT (-(size + 4 * num_regs_to_save)))));
b166356e 951 /* We'll have to adjust FP register saves according to the
09e5ce26 952 frame size. */
b166356e 953 xsize = size;
954 /* Since we've already created the stack frame, don't do it
09e5ce26 955 again at the end of the function. */
b166356e 956 size = 0;
957 break;
958
959 default:
cf41bb03 960 gcc_unreachable ();
b166356e 961 }
fb16c776 962
b166356e 963 /* Now prepare register a0, if we have decided to use it. */
964 switch (strategy)
965 {
966 case save_sp_merge:
967 case save_sp_no_merge:
968 case save_sp_partial_merge:
969 reg = 0;
970 break;
971
972 case save_a0_merge:
973 case save_a0_no_merge:
974 reg = gen_rtx_REG (SImode, FIRST_ADDRESS_REGNUM);
5f2853dd 975 F (emit_insn (gen_movsi (reg, stack_pointer_rtx)));
b166356e 976 if (xsize)
5f2853dd 977 F (emit_insn (gen_addsi3 (reg, reg, GEN_INT (xsize))));
b166356e 978 reg = gen_rtx_POST_INC (SImode, reg);
979 break;
fb16c776 980
b166356e 981 default:
cf41bb03 982 gcc_unreachable ();
b166356e 983 }
fb16c776 984
b166356e 985 /* Now actually save the FP registers. */
986 for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
d37e81ec 987 if (df_regs_ever_live_p (i) && ! call_really_used_regs [i])
b166356e 988 {
989 rtx addr;
990
991 if (reg)
992 addr = reg;
993 else
994 {
995 /* If we aren't using `a0', use an SP offset. */
996 if (xsize)
997 {
998 addr = gen_rtx_PLUS (SImode,
999 stack_pointer_rtx,
1000 GEN_INT (xsize));
1001 }
1002 else
1003 addr = stack_pointer_rtx;
fb16c776 1004
b166356e 1005 xsize += 4;
1006 }
1007
5f2853dd 1008 F (emit_insn (gen_movsf (gen_rtx_MEM (SFmode, addr),
1009 gen_rtx_REG (SFmode, i))));
b166356e 1010 }
1011 }
1012
48cb86e3 1013 /* Now put the frame pointer into the frame pointer register. */
29a404f9 1014 if (frame_pointer_needed)
5f2853dd 1015 F (emit_move_insn (frame_pointer_rtx, stack_pointer_rtx));
29a404f9 1016
48cb86e3 1017 /* Allocate stack for this frame. */
29a404f9 1018 if (size)
5f2853dd 1019 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
1020 stack_pointer_rtx,
1021 GEN_INT (-size))));
1022
3072d30e 1023 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
6f22c3b4 1024 emit_insn (gen_load_pic ());
29a404f9 1025}
1026
1027void
3626e955 1028mn10300_expand_epilogue (void)
29a404f9 1029{
6f22c3b4 1030 HOST_WIDE_INT size = mn10300_frame_size ();
d876ba6e 1031 unsigned int reg_save_bytes;
1032
1033 mn10300_get_live_callee_saved_regs (& reg_save_bytes);
1034
b166356e 1035 if (TARGET_AM33_2 && fp_regs_to_save ())
1036 {
1037 int num_regs_to_save = fp_regs_to_save (), i;
1038 rtx reg = 0;
1039
1040 /* We have several options to restore FP registers. We could
1041 load them from SP offsets, but, if there are enough FP
1042 registers to restore, we win if we use a post-increment
1043 addressing mode. */
1044
1045 /* If we have a frame pointer, it's the best option, because we
1046 already know it has the value we want. */
1047 if (frame_pointer_needed)
1048 reg = gen_rtx_REG (SImode, FRAME_POINTER_REGNUM);
1049 /* Otherwise, we may use `a1', since it's call-clobbered and
1050 it's never used for return values. But only do so if it's
1051 smaller than using SP offsets. */
1052 else
1053 {
1054 enum { restore_sp_post_adjust,
1055 restore_sp_pre_adjust,
1056 restore_sp_partial_adjust,
1057 restore_a1 } strategy;
1058 unsigned int this_strategy_size, strategy_size = (unsigned)-1;
1059
1060 /* Consider using sp offsets before adjusting sp. */
1061 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1062 this_strategy_size = SIZE_FMOV_SP (size, num_regs_to_save);
1063 /* If size is too large, we'll have to adjust SP with an
1064 add. */
ad3e6900 1065 if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
b166356e 1066 {
1067 /* Insn: add size + 4 * num_regs_to_save, sp. */
1068 this_strategy_size += SIZE_ADD_SP (size + 4 * num_regs_to_save);
1069 }
1070 /* If we don't have to restore any non-FP registers,
1071 we'll be able to save one byte by using rets. */
ad3e6900 1072 if (! reg_save_bytes)
b166356e 1073 this_strategy_size--;
1074
1075 if (this_strategy_size < strategy_size)
1076 {
1077 strategy = restore_sp_post_adjust;
1078 strategy_size = this_strategy_size;
1079 }
1080
1081 /* Consider using sp offsets after adjusting sp. */
1082 /* Insn: add size, sp. */
1083 this_strategy_size = SIZE_ADD_SP (size);
1084 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1085 this_strategy_size += SIZE_FMOV_SP (0, num_regs_to_save);
1086 /* We're going to use ret to release the FP registers
09e5ce26 1087 save area, so, no savings. */
b166356e 1088
1089 if (this_strategy_size < strategy_size)
1090 {
1091 strategy = restore_sp_pre_adjust;
1092 strategy_size = this_strategy_size;
1093 }
1094
1095 /* Consider using sp offsets after partially adjusting sp.
1096 When size is close to 32Kb, we may be able to adjust SP
1097 with an imm16 add instruction while still using fmov
1098 (d8,sp). */
ad3e6900 1099 if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
b166356e 1100 {
1101 /* Insn: add size + 4 * num_regs_to_save
ad3e6900 1102 + reg_save_bytes - 252,sp. */
b166356e 1103 this_strategy_size = SIZE_ADD_SP (size + 4 * num_regs_to_save
df70f973 1104 + (int) reg_save_bytes - 252);
b166356e 1105 /* Insn: fmov (##,sp),fs#, fo each fs# to be restored. */
ad3e6900 1106 this_strategy_size += SIZE_FMOV_SP (252 - reg_save_bytes
b166356e 1107 - 4 * num_regs_to_save,
1108 num_regs_to_save);
1109 /* We're going to use ret to release the FP registers
09e5ce26 1110 save area, so, no savings. */
b166356e 1111
1112 if (this_strategy_size < strategy_size)
1113 {
1114 strategy = restore_sp_partial_adjust;
1115 strategy_size = this_strategy_size;
1116 }
1117 }
1118
1119 /* Consider using a1 in post-increment mode, as long as the
1120 user hasn't changed the calling conventions of a1. */
d37e81ec 1121 if (call_really_used_regs [FIRST_ADDRESS_REGNUM + 1]
b166356e 1122 && ! fixed_regs[FIRST_ADDRESS_REGNUM+1])
1123 {
1124 /* Insn: mov sp,a1. */
1125 this_strategy_size = 1;
1126 if (size)
1127 {
1128 /* Insn: add size,a1. */
1129 this_strategy_size += SIZE_ADD_AX (size);
1130 }
1131 /* Insn: fmov (a1+),fs#, for each fs# to be restored. */
1132 this_strategy_size += 3 * num_regs_to_save;
1133 /* If size is large enough, we may be able to save a
1134 couple of bytes. */
ad3e6900 1135 if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
b166356e 1136 {
1137 /* Insn: mov a1,sp. */
1138 this_strategy_size += 2;
1139 }
1140 /* If we don't have to restore any non-FP registers,
1141 we'll be able to save one byte by using rets. */
ad3e6900 1142 if (! reg_save_bytes)
b166356e 1143 this_strategy_size--;
1144
1145 if (this_strategy_size < strategy_size)
1146 {
1147 strategy = restore_a1;
1148 strategy_size = this_strategy_size;
1149 }
1150 }
1151
1152 switch (strategy)
1153 {
1154 case restore_sp_post_adjust:
1155 break;
1156
1157 case restore_sp_pre_adjust:
1158 emit_insn (gen_addsi3 (stack_pointer_rtx,
1159 stack_pointer_rtx,
1160 GEN_INT (size)));
1161 size = 0;
1162 break;
1163
1164 case restore_sp_partial_adjust:
1165 emit_insn (gen_addsi3 (stack_pointer_rtx,
1166 stack_pointer_rtx,
1167 GEN_INT (size + 4 * num_regs_to_save
ad3e6900 1168 + reg_save_bytes - 252)));
1169 size = 252 - reg_save_bytes - 4 * num_regs_to_save;
b166356e 1170 break;
fb16c776 1171
b166356e 1172 case restore_a1:
1173 reg = gen_rtx_REG (SImode, FIRST_ADDRESS_REGNUM + 1);
1174 emit_insn (gen_movsi (reg, stack_pointer_rtx));
1175 if (size)
1176 emit_insn (gen_addsi3 (reg, reg, GEN_INT (size)));
1177 break;
1178
1179 default:
cf41bb03 1180 gcc_unreachable ();
b166356e 1181 }
1182 }
1183
1184 /* Adjust the selected register, if any, for post-increment. */
1185 if (reg)
1186 reg = gen_rtx_POST_INC (SImode, reg);
1187
1188 for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
d37e81ec 1189 if (df_regs_ever_live_p (i) && ! call_really_used_regs [i])
b166356e 1190 {
1191 rtx addr;
fb16c776 1192
b166356e 1193 if (reg)
1194 addr = reg;
1195 else if (size)
1196 {
1197 /* If we aren't using a post-increment register, use an
09e5ce26 1198 SP offset. */
b166356e 1199 addr = gen_rtx_PLUS (SImode,
1200 stack_pointer_rtx,
1201 GEN_INT (size));
1202 }
1203 else
1204 addr = stack_pointer_rtx;
1205
1206 size += 4;
1207
5f2853dd 1208 emit_insn (gen_movsf (gen_rtx_REG (SFmode, i),
1209 gen_rtx_MEM (SFmode, addr)));
b166356e 1210 }
1211
1212 /* If we were using the restore_a1 strategy and the number of
1213 bytes to be released won't fit in the `ret' byte, copy `a1'
1214 to `sp', to avoid having to use `add' to adjust it. */
ad3e6900 1215 if (! frame_pointer_needed && reg && size + reg_save_bytes > 255)
b166356e 1216 {
1217 emit_move_insn (stack_pointer_rtx, XEXP (reg, 0));
1218 size = 0;
1219 }
1220 }
1221
461cabcc 1222 /* Maybe cut back the stack, except for the register save area.
1223
1224 If the frame pointer exists, then use the frame pointer to
1225 cut back the stack.
1226
1227 If the stack size + register save area is more than 255 bytes,
1228 then the stack must be cut back here since the size + register
fb16c776 1229 save size is too big for a ret/retf instruction.
461cabcc 1230
1231 Else leave it alone, it will be cut back as part of the
1232 ret/retf instruction, or there wasn't any stack to begin with.
1233
dfd1079d 1234 Under no circumstances should the register save area be
461cabcc 1235 deallocated here, that would leave a window where an interrupt
1236 could occur and trash the register save area. */
29a404f9 1237 if (frame_pointer_needed)
1238 {
29a404f9 1239 emit_move_insn (stack_pointer_rtx, frame_pointer_rtx);
b21218d6 1240 size = 0;
1241 }
ad3e6900 1242 else if (size + reg_save_bytes > 255)
b21218d6 1243 {
1244 emit_insn (gen_addsi3 (stack_pointer_rtx,
1245 stack_pointer_rtx,
1246 GEN_INT (size)));
1247 size = 0;
29a404f9 1248 }
29a404f9 1249
55daf463 1250 /* Adjust the stack and restore callee-saved registers, if any. */
ad3e6900 1251 if (mn10300_can_use_rets_insn ())
1a860023 1252 emit_jump_insn (ret_rtx);
48cb86e3 1253 else
d876ba6e 1254 emit_jump_insn (gen_return_ret (GEN_INT (size + reg_save_bytes)));
29a404f9 1255}
1256
a2f10574 1257/* Recognize the PARALLEL rtx generated by mn10300_gen_multiple_store().
4caa3669 1258 This function is for MATCH_PARALLEL and so assumes OP is known to be
1259 parallel. If OP is a multiple store, return a mask indicating which
1260 registers it saves. Return 0 otherwise. */
1261
bc6d465d 1262unsigned int
1263mn10300_store_multiple_regs (rtx op)
4caa3669 1264{
1265 int count;
1266 int mask;
1267 int i;
1268 unsigned int last;
1269 rtx elt;
1270
1271 count = XVECLEN (op, 0);
1272 if (count < 2)
1273 return 0;
1274
1275 /* Check that first instruction has the form (set (sp) (plus A B)) */
1276 elt = XVECEXP (op, 0, 0);
1277 if (GET_CODE (elt) != SET
3626e955 1278 || (! REG_P (SET_DEST (elt)))
4caa3669 1279 || REGNO (SET_DEST (elt)) != STACK_POINTER_REGNUM
1280 || GET_CODE (SET_SRC (elt)) != PLUS)
1281 return 0;
1282
1283 /* Check that A is the stack pointer and B is the expected stack size.
1284 For OP to match, each subsequent instruction should push a word onto
1285 the stack. We therefore expect the first instruction to create
09e5ce26 1286 COUNT-1 stack slots. */
4caa3669 1287 elt = SET_SRC (elt);
3626e955 1288 if ((! REG_P (XEXP (elt, 0)))
4caa3669 1289 || REGNO (XEXP (elt, 0)) != STACK_POINTER_REGNUM
3626e955 1290 || (! CONST_INT_P (XEXP (elt, 1)))
4caa3669 1291 || INTVAL (XEXP (elt, 1)) != -(count - 1) * 4)
1292 return 0;
1293
4caa3669 1294 mask = 0;
1295 for (i = 1; i < count; i++)
1296 {
32f9c04a 1297 /* Check that element i is a (set (mem M) R). */
1298 /* ??? Validate the register order a-la mn10300_gen_multiple_store.
1299 Remember: the ordering is *not* monotonic. */
4caa3669 1300 elt = XVECEXP (op, 0, i);
1301 if (GET_CODE (elt) != SET
3626e955 1302 || (! MEM_P (SET_DEST (elt)))
32f9c04a 1303 || (! REG_P (SET_SRC (elt))))
4caa3669 1304 return 0;
1305
32f9c04a 1306 /* Remember which registers are to be saved. */
4caa3669 1307 last = REGNO (SET_SRC (elt));
1308 mask |= (1 << last);
1309
1310 /* Check that M has the form (plus (sp) (const_int -I*4)) */
1311 elt = XEXP (SET_DEST (elt), 0);
1312 if (GET_CODE (elt) != PLUS
3626e955 1313 || (! REG_P (XEXP (elt, 0)))
4caa3669 1314 || REGNO (XEXP (elt, 0)) != STACK_POINTER_REGNUM
3626e955 1315 || (! CONST_INT_P (XEXP (elt, 1)))
4caa3669 1316 || INTVAL (XEXP (elt, 1)) != -i * 4)
1317 return 0;
1318 }
1319
09e5ce26 1320 /* All or none of the callee-saved extended registers must be in the set. */
4caa3669 1321 if ((mask & 0x3c000) != 0
1322 && (mask & 0x3c000) != 0x3c000)
1323 return 0;
1324
1325 return mask;
1326}
1327
029ca87f 1328/* Implement TARGET_PREFERRED_RELOAD_CLASS. */
1329
1330static reg_class_t
1331mn10300_preferred_reload_class (rtx x, reg_class_t rclass)
1332{
1333 if (x == stack_pointer_rtx && rclass != SP_REGS)
c78ac668 1334 return (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
029ca87f 1335 else if (MEM_P (x)
1336 || (REG_P (x)
1337 && !HARD_REGISTER_P (x))
1338 || (GET_CODE (x) == SUBREG
1339 && REG_P (SUBREG_REG (x))
1340 && !HARD_REGISTER_P (SUBREG_REG (x))))
1341 return LIMIT_RELOAD_CLASS (GET_MODE (x), rclass);
1342 else
1343 return rclass;
1344}
1345
1346/* Implement TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
1347
1348static reg_class_t
1349mn10300_preferred_output_reload_class (rtx x, reg_class_t rclass)
1350{
1351 if (x == stack_pointer_rtx && rclass != SP_REGS)
c78ac668 1352 return (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
029ca87f 1353 return rclass;
1354}
1355
c78ac668 1356/* Implement TARGET_SECONDARY_RELOAD. */
3626e955 1357
c78ac668 1358static reg_class_t
1359mn10300_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
3754d046 1360 machine_mode mode, secondary_reload_info *sri)
29a404f9 1361{
c78ac668 1362 enum reg_class rclass = (enum reg_class) rclass_i;
1363 enum reg_class xclass = NO_REGS;
1364 unsigned int xregno = INVALID_REGNUM;
1365
1366 if (REG_P (x))
8ecf154e 1367 {
c78ac668 1368 xregno = REGNO (x);
1369 if (xregno >= FIRST_PSEUDO_REGISTER)
1370 xregno = true_regnum (x);
1371 if (xregno != INVALID_REGNUM)
1372 xclass = REGNO_REG_CLASS (xregno);
1373 }
1374
1375 if (!TARGET_AM33)
1376 {
1377 /* Memory load/stores less than a full word wide can't have an
1378 address or stack pointer destination. They must use a data
1379 register as an intermediate register. */
1380 if (rclass != DATA_REGS
1381 && (mode == QImode || mode == HImode)
1382 && xclass == NO_REGS)
1383 return DATA_REGS;
1384
1385 /* We can only move SP to/from an address register. */
1386 if (in_p
1387 && rclass == SP_REGS
1388 && xclass != ADDRESS_REGS)
1389 return ADDRESS_REGS;
1390 if (!in_p
1391 && xclass == SP_REGS
1392 && rclass != ADDRESS_REGS
1393 && rclass != SP_OR_ADDRESS_REGS)
1394 return ADDRESS_REGS;
8ecf154e 1395 }
29a404f9 1396
c78ac668 1397 /* We can't directly load sp + const_int into a register;
1398 we must use an address register as an scratch. */
1399 if (in_p
1400 && rclass != SP_REGS
8deb3959 1401 && rclass != SP_OR_ADDRESS_REGS
c8a596d6 1402 && rclass != SP_OR_GENERAL_REGS
c78ac668 1403 && GET_CODE (x) == PLUS
1404 && (XEXP (x, 0) == stack_pointer_rtx
1405 || XEXP (x, 1) == stack_pointer_rtx))
1406 {
1407 sri->icode = CODE_FOR_reload_plus_sp_const;
1408 return NO_REGS;
1409 }
29a404f9 1410
85a6eed4 1411 /* We can only move MDR to/from a data register. */
1412 if (rclass == MDR_REGS && xclass != DATA_REGS)
1413 return DATA_REGS;
1414 if (xclass == MDR_REGS && rclass != DATA_REGS)
1415 return DATA_REGS;
1416
c78ac668 1417 /* We can't load/store an FP register from a constant address. */
8b8be022 1418 if (TARGET_AM33_2
c78ac668 1419 && (rclass == FP_REGS || xclass == FP_REGS)
1420 && (xclass == NO_REGS || rclass == NO_REGS))
b166356e 1421 {
c78ac668 1422 rtx addr = NULL;
1423
1424 if (xregno >= FIRST_PSEUDO_REGISTER && xregno != INVALID_REGNUM)
1425 {
1c654ff1 1426 addr = reg_equiv_mem (xregno);
c78ac668 1427 if (addr)
1428 addr = XEXP (addr, 0);
1429 }
1430 else if (MEM_P (x))
1431 addr = XEXP (x, 0);
8b8be022 1432
c78ac668 1433 if (addr && CONSTANT_ADDRESS_P (addr))
c8a596d6 1434 return GENERAL_REGS;
b166356e 1435 }
48cb86e3 1436 /* Otherwise assume no secondary reloads are needed. */
1437 return NO_REGS;
1438}
1439
6f22c3b4 1440int
1441mn10300_frame_size (void)
1442{
1443 /* size includes the fixed stack space needed for function calls. */
1444 int size = get_frame_size () + crtl->outgoing_args_size;
1445
1446 /* And space for the return pointer. */
1447 size += crtl->outgoing_args_size ? 4 : 0;
1448
1449 return size;
1450}
1451
48cb86e3 1452int
3626e955 1453mn10300_initial_offset (int from, int to)
48cb86e3 1454{
6f22c3b4 1455 int diff = 0;
1456
1457 gcc_assert (from == ARG_POINTER_REGNUM || from == FRAME_POINTER_REGNUM);
1458 gcc_assert (to == FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM);
1459
1460 if (to == STACK_POINTER_REGNUM)
1461 diff = mn10300_frame_size ();
1462
f1899bff 1463 /* The difference between the argument pointer and the frame pointer
1464 is the size of the callee register save area. */
6f22c3b4 1465 if (from == ARG_POINTER_REGNUM)
29a404f9 1466 {
d876ba6e 1467 unsigned int reg_save_bytes;
1468
1469 mn10300_get_live_callee_saved_regs (& reg_save_bytes);
1470 diff += reg_save_bytes;
6f22c3b4 1471 diff += 4 * fp_regs_to_save ();
29a404f9 1472 }
1473
6f22c3b4 1474 return diff;
29a404f9 1475}
bb4959a8 1476
6644435d 1477/* Worker function for TARGET_RETURN_IN_MEMORY. */
1478
f2d49d02 1479static bool
fb80456a 1480mn10300_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
f2d49d02 1481{
1482 /* Return values > 8 bytes in length in memory. */
00b1da0e 1483 return (int_size_in_bytes (type) > 8
1484 || int_size_in_bytes (type) == 0
1485 || TYPE_MODE (type) == BLKmode);
f2d49d02 1486}
1487
bb4959a8 1488/* Flush the argument registers to the stack for a stdarg function;
1489 return the new argument pointer. */
f2d49d02 1490static rtx
3285410a 1491mn10300_builtin_saveregs (void)
bb4959a8 1492{
ed554036 1493 rtx offset, mem;
bb4959a8 1494 tree fntype = TREE_TYPE (current_function_decl);
257d99c3 1495 int argadj = ((!stdarg_p (fntype))
bb4959a8 1496 ? UNITS_PER_WORD : 0);
32c2fdea 1497 alias_set_type set = get_varargs_alias_set ();
bb4959a8 1498
1499 if (argadj)
29c05e22 1500 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, argadj);
bb4959a8 1501 else
abe32cce 1502 offset = crtl->args.arg_offset_rtx;
bb4959a8 1503
abe32cce 1504 mem = gen_rtx_MEM (SImode, crtl->args.internal_arg_pointer);
ab6ab77e 1505 set_mem_alias_set (mem, set);
ed554036 1506 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
1507
1508 mem = gen_rtx_MEM (SImode,
29c05e22 1509 plus_constant (Pmode,
1510 crtl->args.internal_arg_pointer, 4));
ab6ab77e 1511 set_mem_alias_set (mem, set);
ed554036 1512 emit_move_insn (mem, gen_rtx_REG (SImode, 1));
1513
bb4959a8 1514 return copy_to_reg (expand_binop (Pmode, add_optab,
abe32cce 1515 crtl->args.internal_arg_pointer,
bb4959a8 1516 offset, 0, 0, OPTAB_LIB_WIDEN));
1517}
1518
8a58ed0a 1519static void
3285410a 1520mn10300_va_start (tree valist, rtx nextarg)
ed554036 1521{
7ccc713a 1522 nextarg = expand_builtin_saveregs ();
7df226a2 1523 std_expand_builtin_va_start (valist, nextarg);
ed554036 1524}
1525
b981d932 1526/* Return true when a parameter should be passed by reference. */
1527
1528static bool
39cba157 1529mn10300_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
3754d046 1530 machine_mode mode, const_tree type,
b981d932 1531 bool named ATTRIBUTE_UNUSED)
1532{
1533 unsigned HOST_WIDE_INT size;
1534
1535 if (type)
1536 size = int_size_in_bytes (type);
1537 else
1538 size = GET_MODE_SIZE (mode);
1539
00b1da0e 1540 return (size > 8 || size == 0);
b981d932 1541}
1542
bb4959a8 1543/* Return an RTX to represent where a value with mode MODE will be returned
e92d3ba8 1544 from a function. If the result is NULL_RTX, the argument is pushed. */
bb4959a8 1545
dc67179a 1546static rtx
3754d046 1547mn10300_function_arg (cumulative_args_t cum_v, machine_mode mode,
dc67179a 1548 const_tree type, bool named ATTRIBUTE_UNUSED)
bb4959a8 1549{
39cba157 1550 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
e92d3ba8 1551 rtx result = NULL_RTX;
e14cac83 1552 int size;
bb4959a8 1553
1554 /* We only support using 2 data registers as argument registers. */
1555 int nregs = 2;
1556
1557 /* Figure out the size of the object to be passed. */
1558 if (mode == BLKmode)
1559 size = int_size_in_bytes (type);
1560 else
1561 size = GET_MODE_SIZE (mode);
1562
bb4959a8 1563 cum->nbytes = (cum->nbytes + 3) & ~3;
1564
1565 /* Don't pass this arg via a register if all the argument registers
1566 are used up. */
1567 if (cum->nbytes > nregs * UNITS_PER_WORD)
e92d3ba8 1568 return result;
bb4959a8 1569
1570 /* Don't pass this arg via a register if it would be split between
1571 registers and memory. */
1572 if (type == NULL_TREE
1573 && cum->nbytes + size > nregs * UNITS_PER_WORD)
e92d3ba8 1574 return result;
bb4959a8 1575
1576 switch (cum->nbytes / UNITS_PER_WORD)
1577 {
1578 case 0:
e92d3ba8 1579 result = gen_rtx_REG (mode, FIRST_ARGUMENT_REGNUM);
bb4959a8 1580 break;
1581 case 1:
e92d3ba8 1582 result = gen_rtx_REG (mode, FIRST_ARGUMENT_REGNUM + 1);
bb4959a8 1583 break;
1584 default:
e92d3ba8 1585 break;
bb4959a8 1586 }
1587
1588 return result;
1589}
1590
dc67179a 1591/* Update the data in CUM to advance over an argument
1592 of mode MODE and data type TYPE.
1593 (TYPE is null for libcalls where that information may not be available.) */
1594
1595static void
3754d046 1596mn10300_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
dc67179a 1597 const_tree type, bool named ATTRIBUTE_UNUSED)
1598{
39cba157 1599 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1600
dc67179a 1601 cum->nbytes += (mode != BLKmode
1602 ? (GET_MODE_SIZE (mode) + 3) & ~3
1603 : (int_size_in_bytes (type) + 3) & ~3);
1604}
1605
f054eb3c 1606/* Return the number of bytes of registers to use for an argument passed
1607 partially in registers and partially in memory. */
bb4959a8 1608
f054eb3c 1609static int
3754d046 1610mn10300_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
f054eb3c 1611 tree type, bool named ATTRIBUTE_UNUSED)
bb4959a8 1612{
39cba157 1613 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
e14cac83 1614 int size;
bb4959a8 1615
1616 /* We only support using 2 data registers as argument registers. */
1617 int nregs = 2;
1618
1619 /* Figure out the size of the object to be passed. */
1620 if (mode == BLKmode)
1621 size = int_size_in_bytes (type);
1622 else
1623 size = GET_MODE_SIZE (mode);
1624
bb4959a8 1625 cum->nbytes = (cum->nbytes + 3) & ~3;
1626
1627 /* Don't pass this arg via a register if all the argument registers
1628 are used up. */
1629 if (cum->nbytes > nregs * UNITS_PER_WORD)
1630 return 0;
1631
1632 if (cum->nbytes + size <= nregs * UNITS_PER_WORD)
1633 return 0;
1634
1635 /* Don't pass this arg via a register if it would be split between
1636 registers and memory. */
1637 if (type == NULL_TREE
1638 && cum->nbytes + size > nregs * UNITS_PER_WORD)
1639 return 0;
1640
f054eb3c 1641 return nregs * UNITS_PER_WORD - cum->nbytes;
bb4959a8 1642}
1643
00b1da0e 1644/* Return the location of the function's value. This will be either
1645 $d0 for integer functions, $a0 for pointers, or a PARALLEL of both
1646 $d0 and $a0 if the -mreturn-pointer-on-do flag is set. Note that
1647 we only return the PARALLEL for outgoing values; we do not want
1648 callers relying on this extra copy. */
1649
b6713ba6 1650static rtx
1651mn10300_function_value (const_tree valtype,
1652 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1653 bool outgoing)
00b1da0e 1654{
1655 rtx rv;
3754d046 1656 machine_mode mode = TYPE_MODE (valtype);
00b1da0e 1657
1658 if (! POINTER_TYPE_P (valtype))
1659 return gen_rtx_REG (mode, FIRST_DATA_REGNUM);
1660 else if (! TARGET_PTR_A0D0 || ! outgoing
18d50ae6 1661 || cfun->returns_struct)
00b1da0e 1662 return gen_rtx_REG (mode, FIRST_ADDRESS_REGNUM);
1663
1664 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (2));
1665 XVECEXP (rv, 0, 0)
1666 = gen_rtx_EXPR_LIST (VOIDmode,
1667 gen_rtx_REG (mode, FIRST_ADDRESS_REGNUM),
1668 GEN_INT (0));
fb16c776 1669
00b1da0e 1670 XVECEXP (rv, 0, 1)
1671 = gen_rtx_EXPR_LIST (VOIDmode,
1672 gen_rtx_REG (mode, FIRST_DATA_REGNUM),
1673 GEN_INT (0));
1674 return rv;
1675}
1676
b6713ba6 1677/* Implements TARGET_LIBCALL_VALUE. */
1678
1679static rtx
3754d046 1680mn10300_libcall_value (machine_mode mode,
b6713ba6 1681 const_rtx fun ATTRIBUTE_UNUSED)
1682{
1683 return gen_rtx_REG (mode, FIRST_DATA_REGNUM);
1684}
1685
1686/* Implements FUNCTION_VALUE_REGNO_P. */
1687
1688bool
1689mn10300_function_value_regno_p (const unsigned int regno)
1690{
1691 return (regno == FIRST_DATA_REGNUM || regno == FIRST_ADDRESS_REGNUM);
1692}
1693
990679af 1694/* Output an addition operation. */
5574dbdd 1695
feb9af9f 1696const char *
990679af 1697mn10300_output_add (rtx operands[3], bool need_flags)
bb4959a8 1698{
990679af 1699 rtx dest, src1, src2;
1700 unsigned int dest_regnum, src1_regnum, src2_regnum;
1701 enum reg_class src1_class, src2_class, dest_class;
bb4959a8 1702
990679af 1703 dest = operands[0];
1704 src1 = operands[1];
1705 src2 = operands[2];
bb4959a8 1706
990679af 1707 dest_regnum = true_regnum (dest);
1708 src1_regnum = true_regnum (src1);
bb4959a8 1709
990679af 1710 dest_class = REGNO_REG_CLASS (dest_regnum);
1711 src1_class = REGNO_REG_CLASS (src1_regnum);
bb4959a8 1712
f9e46c25 1713 if (CONST_INT_P (src2))
990679af 1714 {
1715 gcc_assert (dest_regnum == src1_regnum);
bb4959a8 1716
990679af 1717 if (src2 == const1_rtx && !need_flags)
1718 return "inc %0";
1719 if (INTVAL (src2) == 4 && !need_flags && dest_class != DATA_REGS)
1720 return "inc4 %0";
911517ac 1721
990679af 1722 gcc_assert (!need_flags || dest_class != SP_REGS);
1723 return "add %2,%0";
1724 }
1725 else if (CONSTANT_P (src2))
1726 return "add %2,%0";
1727
1728 src2_regnum = true_regnum (src2);
1729 src2_class = REGNO_REG_CLASS (src2_regnum);
1730
1731 if (dest_regnum == src1_regnum)
1732 return "add %2,%0";
1733 if (dest_regnum == src2_regnum)
1734 return "add %1,%0";
1735
1736 /* The rest of the cases are reg = reg+reg. For AM33, we can implement
1737 this directly, as below, but when optimizing for space we can sometimes
1738 do better by using a mov+add. For MN103, we claimed that we could
1739 implement a three-operand add because the various move and add insns
1740 change sizes across register classes, and we can often do better than
1741 reload in choosing which operand to move. */
1742 if (TARGET_AM33 && optimize_insn_for_speed_p ())
1743 return "add %2,%1,%0";
1744
1745 /* Catch cases where no extended register was used. */
1746 if (src1_class != EXTENDED_REGS
1747 && src2_class != EXTENDED_REGS
1748 && dest_class != EXTENDED_REGS)
1749 {
1750 /* We have to copy one of the sources into the destination, then
1751 add the other source to the destination.
1752
1753 Carefully select which source to copy to the destination; a
1754 naive implementation will waste a byte when the source classes
1755 are different and the destination is an address register.
1756 Selecting the lowest cost register copy will optimize this
1757 sequence. */
1758 if (src1_class == dest_class)
1759 return "mov %1,%0\n\tadd %2,%0";
1760 else
1761 return "mov %2,%0\n\tadd %1,%0";
1762 }
911517ac 1763
990679af 1764 /* At least one register is an extended register. */
bb4959a8 1765
990679af 1766 /* The three operand add instruction on the am33 is a win iff the
1767 output register is an extended register, or if both source
1768 registers are extended registers. */
1769 if (dest_class == EXTENDED_REGS || src1_class == src2_class)
1770 return "add %2,%1,%0";
1771
1772 /* It is better to copy one of the sources to the destination, then
1773 perform a 2 address add. The destination in this case must be
1774 an address or data register and one of the sources must be an
1775 extended register and the remaining source must not be an extended
1776 register.
1777
1778 The best code for this case is to copy the extended reg to the
1779 destination, then emit a two address add. */
1780 if (src1_class == EXTENDED_REGS)
1781 return "mov %1,%0\n\tadd %2,%0";
1782 else
1783 return "mov %2,%0\n\tadd %1,%0";
bb4959a8 1784}
36ed4406 1785
c4cd8f6a 1786/* Return 1 if X contains a symbolic expression. We know these
1787 expressions will have one of a few well defined forms, so
1788 we need only check those forms. */
3626e955 1789
c4cd8f6a 1790int
3626e955 1791mn10300_symbolic_operand (rtx op,
3754d046 1792 machine_mode mode ATTRIBUTE_UNUSED)
c4cd8f6a 1793{
1794 switch (GET_CODE (op))
1795 {
1796 case SYMBOL_REF:
1797 case LABEL_REF:
1798 return 1;
1799 case CONST:
1800 op = XEXP (op, 0);
1801 return ((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
1802 || GET_CODE (XEXP (op, 0)) == LABEL_REF)
4879b320 1803 && CONST_INT_P (XEXP (op, 1)));
c4cd8f6a 1804 default:
1805 return 0;
1806 }
1807}
1808
1809/* Try machine dependent ways of modifying an illegitimate address
1810 to be legitimate. If we find one, return the new valid address.
1811 This macro is used in only one place: `memory_address' in explow.c.
1812
1813 OLDX is the address as it was before break_out_memory_refs was called.
1814 In some cases it is useful to look at this to decide what needs to be done.
1815
c4cd8f6a 1816 Normally it is always safe for this macro to do nothing. It exists to
1817 recognize opportunities to optimize the output.
1818
1819 But on a few ports with segmented architectures and indexed addressing
1820 (mn10300, hppa) it is used to rewrite certain problematical addresses. */
3626e955 1821
5574dbdd 1822static rtx
41e3a0c7 1823mn10300_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3754d046 1824 machine_mode mode ATTRIBUTE_UNUSED)
c4cd8f6a 1825{
3626e955 1826 if (flag_pic && ! mn10300_legitimate_pic_operand_p (x))
1827 x = mn10300_legitimize_pic_address (oldx, NULL_RTX);
b87a151a 1828
c4cd8f6a 1829 /* Uh-oh. We might have an address for x[n-100000]. This needs
1830 special handling to avoid creating an indexed memory address
1831 with x-100000 as the base. */
1832 if (GET_CODE (x) == PLUS
3626e955 1833 && mn10300_symbolic_operand (XEXP (x, 1), VOIDmode))
c4cd8f6a 1834 {
1835 /* Ugly. We modify things here so that the address offset specified
1836 by the index expression is computed first, then added to x to form
1837 the entire address. */
1838
59086782 1839 rtx regx1, regy1, regy2, y;
c4cd8f6a 1840
1841 /* Strip off any CONST. */
1842 y = XEXP (x, 1);
1843 if (GET_CODE (y) == CONST)
1844 y = XEXP (y, 0);
1845
c927a8ab 1846 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1847 {
1848 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1849 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1850 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1851 regx1 = force_reg (Pmode,
3626e955 1852 gen_rtx_fmt_ee (GET_CODE (y), Pmode, regx1,
1853 regy2));
7014838c 1854 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
c927a8ab 1855 }
c4cd8f6a 1856 }
11b4605c 1857 return x;
c4cd8f6a 1858}
e2aead91 1859
b87a151a 1860/* Convert a non-PIC address in `orig' to a PIC address using @GOT or
09e5ce26 1861 @GOTOFF in `reg'. */
3626e955 1862
b87a151a 1863rtx
3626e955 1864mn10300_legitimize_pic_address (rtx orig, rtx reg)
b87a151a 1865{
d92c1383 1866 rtx x;
753de566 1867 rtx_insn *insn;
d92c1383 1868
b87a151a 1869 if (GET_CODE (orig) == LABEL_REF
1870 || (GET_CODE (orig) == SYMBOL_REF
1871 && (CONSTANT_POOL_ADDRESS_P (orig)
1872 || ! MN10300_GLOBAL_P (orig))))
1873 {
d92c1383 1874 if (reg == NULL)
b87a151a 1875 reg = gen_reg_rtx (Pmode);
1876
d92c1383 1877 x = gen_rtx_UNSPEC (SImode, gen_rtvec (1, orig), UNSPEC_GOTOFF);
1878 x = gen_rtx_CONST (SImode, x);
1879 emit_move_insn (reg, x);
1880
753de566 1881 insn = emit_insn (gen_addsi3 (reg, reg, pic_offset_table_rtx));
b87a151a 1882 }
1883 else if (GET_CODE (orig) == SYMBOL_REF)
1884 {
d92c1383 1885 if (reg == NULL)
b87a151a 1886 reg = gen_reg_rtx (Pmode);
1887
d92c1383 1888 x = gen_rtx_UNSPEC (SImode, gen_rtvec (1, orig), UNSPEC_GOT);
1889 x = gen_rtx_CONST (SImode, x);
1890 x = gen_rtx_PLUS (SImode, pic_offset_table_rtx, x);
1891 x = gen_const_mem (SImode, x);
1892
753de566 1893 insn = emit_move_insn (reg, x);
b87a151a 1894 }
d92c1383 1895 else
1896 return orig;
1897
753de566 1898 set_unique_reg_note (insn, REG_EQUAL, orig);
d92c1383 1899 return reg;
b87a151a 1900}
1901
1902/* Return zero if X references a SYMBOL_REF or LABEL_REF whose symbol
fa483857 1903 isn't protected by a PIC unspec; nonzero otherwise. */
3626e955 1904
b87a151a 1905int
3626e955 1906mn10300_legitimate_pic_operand_p (rtx x)
b87a151a 1907{
3626e955 1908 const char *fmt;
1909 int i;
b87a151a 1910
1911 if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1912 return 0;
1913
1914 if (GET_CODE (x) == UNSPEC
1915 && (XINT (x, 1) == UNSPEC_PIC
1916 || XINT (x, 1) == UNSPEC_GOT
1917 || XINT (x, 1) == UNSPEC_GOTOFF
b6e3379c 1918 || XINT (x, 1) == UNSPEC_PLT
1919 || XINT (x, 1) == UNSPEC_GOTSYM_OFF))
b87a151a 1920 return 1;
1921
b87a151a 1922 fmt = GET_RTX_FORMAT (GET_CODE (x));
1923 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
1924 {
1925 if (fmt[i] == 'E')
1926 {
5574dbdd 1927 int j;
b87a151a 1928
1929 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3626e955 1930 if (! mn10300_legitimate_pic_operand_p (XVECEXP (x, i, j)))
b87a151a 1931 return 0;
1932 }
3626e955 1933 else if (fmt[i] == 'e'
1934 && ! mn10300_legitimate_pic_operand_p (XEXP (x, i)))
b87a151a 1935 return 0;
1936 }
1937
1938 return 1;
1939}
1940
5411aa8c 1941/* Return TRUE if the address X, taken from a (MEM:MODE X) rtx, is
fd50b071 1942 legitimate, and FALSE otherwise.
1943
1944 On the mn10300, the value in the address register must be
1945 in the same memory space/segment as the effective address.
1946
1947 This is problematical for reload since it does not understand
1948 that base+index != index+base in a memory reference.
1949
1950 Note it is still possible to use reg+reg addressing modes,
1951 it's just much more difficult. For a discussion of a possible
1952 workaround and solution, see the comments in pa.c before the
1953 function record_unscaled_index_insn_codes. */
1954
5574dbdd 1955static bool
3754d046 1956mn10300_legitimate_address_p (machine_mode mode, rtx x, bool strict)
5411aa8c 1957{
c8a596d6 1958 rtx base, index;
1959
1960 if (CONSTANT_ADDRESS_P (x))
1961 return !flag_pic || mn10300_legitimate_pic_operand_p (x);
5411aa8c 1962
1963 if (RTX_OK_FOR_BASE_P (x, strict))
c8a596d6 1964 return true;
1965
1966 if (TARGET_AM33 && (mode == SImode || mode == SFmode || mode == HImode))
1967 {
1968 if (GET_CODE (x) == POST_INC)
1969 return RTX_OK_FOR_BASE_P (XEXP (x, 0), strict);
1970 if (GET_CODE (x) == POST_MODIFY)
1971 return (RTX_OK_FOR_BASE_P (XEXP (x, 0), strict)
1972 && CONSTANT_ADDRESS_P (XEXP (x, 1)));
1973 }
1974
1975 if (GET_CODE (x) != PLUS)
1976 return false;
5411aa8c 1977
c8a596d6 1978 base = XEXP (x, 0);
1979 index = XEXP (x, 1);
5411aa8c 1980
c8a596d6 1981 if (!REG_P (base))
1982 return false;
1983 if (REG_P (index))
5411aa8c 1984 {
c8a596d6 1985 /* ??? Without AM33 generalized (Ri,Rn) addressing, reg+reg
1986 addressing is hard to satisfy. */
1987 if (!TARGET_AM33)
1988 return false;
5411aa8c 1989
c8a596d6 1990 return (REGNO_GENERAL_P (REGNO (base), strict)
1991 && REGNO_GENERAL_P (REGNO (index), strict));
1992 }
5411aa8c 1993
c8a596d6 1994 if (!REGNO_STRICT_OK_FOR_BASE_P (REGNO (base), strict))
1995 return false;
5411aa8c 1996
c8a596d6 1997 if (CONST_INT_P (index))
1998 return IN_RANGE (INTVAL (index), -1 - 0x7fffffff, 0x7fffffff);
1999
2000 if (CONSTANT_ADDRESS_P (index))
2001 return !flag_pic || mn10300_legitimate_pic_operand_p (index);
2002
2003 return false;
2004}
2005
2006bool
2007mn10300_regno_in_class_p (unsigned regno, int rclass, bool strict)
2008{
2009 if (regno >= FIRST_PSEUDO_REGISTER)
2010 {
2011 if (!strict)
2012 return true;
2013 if (!reg_renumber)
2014 return false;
2015 regno = reg_renumber[regno];
c2fa9c24 2016 if (regno == INVALID_REGNUM)
2017 return false;
c8a596d6 2018 }
2019 return TEST_HARD_REG_BIT (reg_class_contents[rclass], regno);
2020}
2021
2022rtx
2023mn10300_legitimize_reload_address (rtx x,
3754d046 2024 machine_mode mode ATTRIBUTE_UNUSED,
c8a596d6 2025 int opnum, int type,
2026 int ind_levels ATTRIBUTE_UNUSED)
2027{
2028 bool any_change = false;
2029
2030 /* See above re disabling reg+reg addressing for MN103. */
2031 if (!TARGET_AM33)
2032 return NULL_RTX;
2033
2034 if (GET_CODE (x) != PLUS)
2035 return NULL_RTX;
2036
2037 if (XEXP (x, 0) == stack_pointer_rtx)
2038 {
2039 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
2040 GENERAL_REGS, GET_MODE (x), VOIDmode, 0, 0,
2041 opnum, (enum reload_type) type);
2042 any_change = true;
2043 }
2044 if (XEXP (x, 1) == stack_pointer_rtx)
2045 {
2046 push_reload (XEXP (x, 1), NULL_RTX, &XEXP (x, 1), NULL,
2047 GENERAL_REGS, GET_MODE (x), VOIDmode, 0, 0,
2048 opnum, (enum reload_type) type);
2049 any_change = true;
5411aa8c 2050 }
2051
c8a596d6 2052 return any_change ? x : NULL_RTX;
5411aa8c 2053}
2054
ca316360 2055/* Implement TARGET_LEGITIMATE_CONSTANT_P. Returns TRUE if X is a valid
5574dbdd 2056 constant. Note that some "constants" aren't valid, such as TLS
2057 symbols and unconverted GOT-based references, so we eliminate
2058 those here. */
2059
ca316360 2060static bool
3754d046 2061mn10300_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
5574dbdd 2062{
2063 switch (GET_CODE (x))
2064 {
2065 case CONST:
2066 x = XEXP (x, 0);
2067
2068 if (GET_CODE (x) == PLUS)
2069 {
3626e955 2070 if (! CONST_INT_P (XEXP (x, 1)))
5574dbdd 2071 return false;
2072 x = XEXP (x, 0);
2073 }
2074
2075 /* Only some unspecs are valid as "constants". */
2076 if (GET_CODE (x) == UNSPEC)
2077 {
5574dbdd 2078 switch (XINT (x, 1))
2079 {
5574dbdd 2080 case UNSPEC_PIC:
2081 case UNSPEC_GOT:
2082 case UNSPEC_GOTOFF:
2083 case UNSPEC_PLT:
2084 return true;
2085 default:
2086 return false;
2087 }
2088 }
2089
2090 /* We must have drilled down to a symbol. */
3626e955 2091 if (! mn10300_symbolic_operand (x, Pmode))
5574dbdd 2092 return false;
2093 break;
2094
2095 default:
2096 break;
2097 }
2098
2099 return true;
2100}
2101
4c6c308e 2102/* Undo pic address legitimization for the benefit of debug info. */
2103
2104static rtx
2105mn10300_delegitimize_address (rtx orig_x)
2106{
2107 rtx x = orig_x, ret, addend = NULL;
2108 bool need_mem;
2109
2110 if (MEM_P (x))
2111 x = XEXP (x, 0);
2112 if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
2113 return orig_x;
2114
2115 if (XEXP (x, 0) == pic_offset_table_rtx)
2116 ;
2117 /* With the REG+REG addressing of AM33, var-tracking can re-assemble
2118 some odd-looking "addresses" that were never valid in the first place.
2119 We need to look harder to avoid warnings being emitted. */
2120 else if (GET_CODE (XEXP (x, 0)) == PLUS)
2121 {
2122 rtx x0 = XEXP (x, 0);
2123 rtx x00 = XEXP (x0, 0);
2124 rtx x01 = XEXP (x0, 1);
2125
2126 if (x00 == pic_offset_table_rtx)
2127 addend = x01;
2128 else if (x01 == pic_offset_table_rtx)
2129 addend = x00;
2130 else
2131 return orig_x;
2132
2133 }
2134 else
2135 return orig_x;
2136 x = XEXP (x, 1);
2137
2138 if (GET_CODE (x) != CONST)
2139 return orig_x;
2140 x = XEXP (x, 0);
2141 if (GET_CODE (x) != UNSPEC)
2142 return orig_x;
2143
2144 ret = XVECEXP (x, 0, 0);
2145 if (XINT (x, 1) == UNSPEC_GOTOFF)
2146 need_mem = false;
2147 else if (XINT (x, 1) == UNSPEC_GOT)
2148 need_mem = true;
2149 else
2150 return orig_x;
2151
2152 gcc_assert (GET_CODE (ret) == SYMBOL_REF);
2153 if (need_mem != MEM_P (orig_x))
2154 return orig_x;
2155 if (need_mem && addend)
2156 return orig_x;
2157 if (addend)
2158 ret = gen_rtx_PLUS (Pmode, addend, ret);
2159 return ret;
2160}
2161
28f32607 2162/* For addresses, costs are relative to "MOV (Rm),Rn". For AM33 this is
2163 the 3-byte fully general instruction; for MN103 this is the 2-byte form
2164 with an address register. */
2165
ec0457a8 2166static int
3754d046 2167mn10300_address_cost (rtx x, machine_mode mode ATTRIBUTE_UNUSED,
d9c5e5f4 2168 addr_space_t as ATTRIBUTE_UNUSED, bool speed)
e2aead91 2169{
28f32607 2170 HOST_WIDE_INT i;
2171 rtx base, index;
2172
e2aead91 2173 switch (GET_CODE (x))
2174 {
28f32607 2175 case CONST:
2176 case SYMBOL_REF:
2177 case LABEL_REF:
2178 /* We assume all of these require a 32-bit constant, even though
2179 some symbol and label references can be relaxed. */
2180 return speed ? 1 : 4;
2181
e2aead91 2182 case REG:
28f32607 2183 case SUBREG:
2184 case POST_INC:
2185 return 0;
2186
2187 case POST_MODIFY:
2188 /* Assume any symbolic offset is a 32-bit constant. */
2189 i = (CONST_INT_P (XEXP (x, 1)) ? INTVAL (XEXP (x, 1)) : 0x12345678);
2190 if (IN_RANGE (i, -128, 127))
2191 return speed ? 0 : 1;
2192 if (speed)
2193 return 1;
2194 if (IN_RANGE (i, -0x800000, 0x7fffff))
2195 return 3;
2196 return 4;
2197
2198 case PLUS:
2199 base = XEXP (x, 0);
2200 index = XEXP (x, 1);
2201 if (register_operand (index, SImode))
e2aead91 2202 {
28f32607 2203 /* Attempt to minimize the number of registers in the address.
2204 This is similar to what other ports do. */
2205 if (register_operand (base, SImode))
2206 return 1;
e2aead91 2207
28f32607 2208 base = XEXP (x, 1);
2209 index = XEXP (x, 0);
2210 }
e2aead91 2211
28f32607 2212 /* Assume any symbolic offset is a 32-bit constant. */
2213 i = (CONST_INT_P (XEXP (x, 1)) ? INTVAL (XEXP (x, 1)) : 0x12345678);
2214 if (IN_RANGE (i, -128, 127))
2215 return speed ? 0 : 1;
2216 if (IN_RANGE (i, -32768, 32767))
2217 return speed ? 0 : 2;
2218 return speed ? 2 : 6;
e2aead91 2219
28f32607 2220 default:
5ae4887d 2221 return rtx_cost (x, Pmode, MEM, 0, speed);
28f32607 2222 }
2223}
e2aead91 2224
28f32607 2225/* Implement the TARGET_REGISTER_MOVE_COST hook.
e2aead91 2226
28f32607 2227 Recall that the base value of 2 is required by assumptions elsewhere
2228 in the body of the compiler, and that cost 2 is special-cased as an
2229 early exit from reload meaning no work is required. */
e2aead91 2230
28f32607 2231static int
3754d046 2232mn10300_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
28f32607 2233 reg_class_t ifrom, reg_class_t ito)
2234{
2235 enum reg_class from = (enum reg_class) ifrom;
2236 enum reg_class to = (enum reg_class) ito;
2237 enum reg_class scratch, test;
2238
2239 /* Simplify the following code by unifying the fp register classes. */
2240 if (to == FP_ACC_REGS)
2241 to = FP_REGS;
2242 if (from == FP_ACC_REGS)
2243 from = FP_REGS;
2244
2245 /* Diagnose invalid moves by costing them as two moves. */
2246
2247 scratch = NO_REGS;
2248 test = from;
2249 if (to == SP_REGS)
2250 scratch = (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
85a6eed4 2251 else if (to == MDR_REGS)
2252 scratch = DATA_REGS;
28f32607 2253 else if (to == FP_REGS && to != from)
2254 scratch = GENERAL_REGS;
2255 else
2256 {
2257 test = to;
2258 if (from == SP_REGS)
2259 scratch = (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
85a6eed4 2260 else if (from == MDR_REGS)
2261 scratch = DATA_REGS;
28f32607 2262 else if (from == FP_REGS && to != from)
2263 scratch = GENERAL_REGS;
2264 }
2265 if (scratch != NO_REGS && !reg_class_subset_p (test, scratch))
2266 return (mn10300_register_move_cost (VOIDmode, from, scratch)
2267 + mn10300_register_move_cost (VOIDmode, scratch, to));
e2aead91 2268
28f32607 2269 /* From here on, all we need consider are legal combinations. */
e2aead91 2270
28f32607 2271 if (optimize_size)
2272 {
2273 /* The scale here is bytes * 2. */
e2aead91 2274
28f32607 2275 if (from == to && (to == ADDRESS_REGS || to == DATA_REGS))
2276 return 2;
e2aead91 2277
28f32607 2278 if (from == SP_REGS)
2279 return (to == ADDRESS_REGS ? 2 : 6);
2280
2281 /* For MN103, all remaining legal moves are two bytes. */
2282 if (TARGET_AM33)
2283 return 4;
2284
2285 if (to == SP_REGS)
2286 return (from == ADDRESS_REGS ? 4 : 6);
2287
2288 if ((from == ADDRESS_REGS || from == DATA_REGS)
2289 && (to == ADDRESS_REGS || to == DATA_REGS))
2290 return 4;
2291
2292 if (to == EXTENDED_REGS)
2293 return (to == from ? 6 : 4);
e2aead91 2294
28f32607 2295 /* What's left are SP_REGS, FP_REGS, or combinations of the above. */
2296 return 6;
2297 }
2298 else
2299 {
2300 /* The scale here is cycles * 2. */
2301
2302 if (to == FP_REGS)
2303 return 8;
2304 if (from == FP_REGS)
2305 return 4;
2306
2307 /* All legal moves between integral registers are single cycle. */
2308 return 2;
e2aead91 2309 }
2310}
fab7adbf 2311
28f32607 2312/* Implement the TARGET_MEMORY_MOVE_COST hook.
2313
2314 Given lack of the form of the address, this must be speed-relative,
2315 though we should never be less expensive than a size-relative register
2316 move cost above. This is not a problem. */
2317
ec0457a8 2318static int
3754d046 2319mn10300_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
28f32607 2320 reg_class_t iclass, bool in ATTRIBUTE_UNUSED)
ec0457a8 2321{
28f32607 2322 enum reg_class rclass = (enum reg_class) iclass;
2323
2324 if (rclass == FP_REGS)
2325 return 8;
2326 return 6;
ec0457a8 2327}
2328
28f32607 2329/* Implement the TARGET_RTX_COSTS hook.
2330
2331 Speed-relative costs are relative to COSTS_N_INSNS, which is intended
2332 to represent cycles. Size-relative costs are in bytes. */
2333
fab7adbf 2334static bool
5ae4887d 2335mn10300_rtx_costs (rtx x, machine_mode mode, int outer_code,
2336 int opno ATTRIBUTE_UNUSED, int *ptotal, bool speed)
fab7adbf 2337{
28f32607 2338 /* This value is used for SYMBOL_REF etc where we want to pretend
2339 we have a full 32-bit constant. */
2340 HOST_WIDE_INT i = 0x12345678;
2341 int total;
5ae4887d 2342 int code = GET_CODE (x);
28f32607 2343
fab7adbf 2344 switch (code)
2345 {
2346 case CONST_INT:
28f32607 2347 i = INTVAL (x);
2348 do_int_costs:
2349 if (speed)
2350 {
2351 if (outer_code == SET)
2352 {
2353 /* 16-bit integer loads have latency 1, 32-bit loads 2. */
2354 if (IN_RANGE (i, -32768, 32767))
2355 total = COSTS_N_INSNS (1);
2356 else
2357 total = COSTS_N_INSNS (2);
2358 }
2359 else
2360 {
2361 /* 16-bit integer operands don't affect latency;
2362 24-bit and 32-bit operands add a cycle. */
2363 if (IN_RANGE (i, -32768, 32767))
2364 total = 0;
2365 else
2366 total = COSTS_N_INSNS (1);
2367 }
2368 }
fab7adbf 2369 else
28f32607 2370 {
2371 if (outer_code == SET)
2372 {
2373 if (i == 0)
2374 total = 1;
2375 else if (IN_RANGE (i, -128, 127))
2376 total = 2;
2377 else if (IN_RANGE (i, -32768, 32767))
2378 total = 3;
2379 else
2380 total = 6;
2381 }
2382 else
2383 {
2384 /* Reference here is ADD An,Dn, vs ADD imm,Dn. */
2385 if (IN_RANGE (i, -128, 127))
2386 total = 0;
2387 else if (IN_RANGE (i, -32768, 32767))
2388 total = 2;
2389 else if (TARGET_AM33 && IN_RANGE (i, -0x01000000, 0x00ffffff))
2390 total = 3;
2391 else
2392 total = 4;
2393 }
2394 }
2395 goto alldone;
fab7adbf 2396
2397 case CONST:
2398 case LABEL_REF:
2399 case SYMBOL_REF:
fab7adbf 2400 case CONST_DOUBLE:
28f32607 2401 /* We assume all of these require a 32-bit constant, even though
2402 some symbol and label references can be relaxed. */
2403 goto do_int_costs;
74f4459c 2404
28f32607 2405 case UNSPEC:
2406 switch (XINT (x, 1))
2407 {
2408 case UNSPEC_PIC:
2409 case UNSPEC_GOT:
2410 case UNSPEC_GOTOFF:
2411 case UNSPEC_PLT:
2412 case UNSPEC_GOTSYM_OFF:
2413 /* The PIC unspecs also resolve to a 32-bit constant. */
2414 goto do_int_costs;
fab7adbf 2415
28f32607 2416 default:
2417 /* Assume any non-listed unspec is some sort of arithmetic. */
2418 goto do_arith_costs;
2419 }
8935d57c 2420
28f32607 2421 case PLUS:
2422 /* Notice the size difference of INC and INC4. */
2423 if (!speed && outer_code == SET && CONST_INT_P (XEXP (x, 1)))
2424 {
2425 i = INTVAL (XEXP (x, 1));
2426 if (i == 1 || i == 4)
2427 {
5ae4887d 2428 total = 1 + rtx_cost (XEXP (x, 0), mode, PLUS, 0, speed);
28f32607 2429 goto alldone;
2430 }
2431 }
2432 goto do_arith_costs;
2433
2434 case MINUS:
2435 case AND:
2436 case IOR:
2437 case XOR:
2438 case NOT:
2439 case NEG:
2440 case ZERO_EXTEND:
2441 case SIGN_EXTEND:
2442 case COMPARE:
2443 case BSWAP:
2444 case CLZ:
2445 do_arith_costs:
2446 total = (speed ? COSTS_N_INSNS (1) : 2);
2447 break;
8935d57c 2448
28f32607 2449 case ASHIFT:
2450 /* Notice the size difference of ASL2 and variants. */
2451 if (!speed && CONST_INT_P (XEXP (x, 1)))
2452 switch (INTVAL (XEXP (x, 1)))
2453 {
2454 case 1:
2455 case 2:
2456 total = 1;
2457 goto alldone;
2458 case 3:
2459 case 4:
2460 total = 2;
2461 goto alldone;
2462 }
2463 /* FALLTHRU */
8935d57c 2464
28f32607 2465 case ASHIFTRT:
2466 case LSHIFTRT:
2467 total = (speed ? COSTS_N_INSNS (1) : 3);
2468 goto alldone;
8935d57c 2469
28f32607 2470 case MULT:
2471 total = (speed ? COSTS_N_INSNS (3) : 2);
8935d57c 2472 break;
fb16c776 2473
28f32607 2474 case DIV:
2475 case UDIV:
2476 case MOD:
2477 case UMOD:
2478 total = (speed ? COSTS_N_INSNS (39)
2479 /* Include space to load+retrieve MDR. */
2480 : code == MOD || code == UMOD ? 6 : 4);
8935d57c 2481 break;
fb16c776 2482
28f32607 2483 case MEM:
5ae4887d 2484 total = mn10300_address_cost (XEXP (x, 0), mode,
d9c5e5f4 2485 MEM_ADDR_SPACE (x), speed);
28f32607 2486 if (speed)
2487 total = COSTS_N_INSNS (2 + total);
2488 goto alldone;
2489
8935d57c 2490 default:
28f32607 2491 /* Probably not implemented. Assume external call. */
2492 total = (speed ? COSTS_N_INSNS (10) : 7);
2493 break;
8935d57c 2494 }
2495
28f32607 2496 *ptotal = total;
2497 return false;
2498
2499 alldone:
2500 *ptotal = total;
2501 return true;
8935d57c 2502}
28f32607 2503
b87a151a 2504/* If using PIC, mark a SYMBOL_REF for a non-global symbol so that we
2505 may access it using GOTOFF instead of GOT. */
2506
2507static void
b4d5791b 2508mn10300_encode_section_info (tree decl, rtx rtl, int first)
b87a151a 2509{
2510 rtx symbol;
2511
b4d5791b 2512 default_encode_section_info (decl, rtl, first);
2513
3626e955 2514 if (! MEM_P (rtl))
b87a151a 2515 return;
b4d5791b 2516
b87a151a 2517 symbol = XEXP (rtl, 0);
2518 if (GET_CODE (symbol) != SYMBOL_REF)
2519 return;
2520
2521 if (flag_pic)
2522 SYMBOL_REF_FLAG (symbol) = (*targetm.binds_local_p) (decl);
2523}
906bb5c3 2524
2525/* Dispatch tables on the mn10300 are extremely expensive in terms of code
2526 and readonly data size. So we crank up the case threshold value to
2527 encourage a series of if/else comparisons to implement many small switch
2528 statements. In theory, this value could be increased much more if we
2529 were solely optimizing for space, but we keep it "reasonable" to avoid
2530 serious code efficiency lossage. */
2531
5574dbdd 2532static unsigned int
2533mn10300_case_values_threshold (void)
906bb5c3 2534{
2535 return 6;
2536}
3e16f982 2537
3e16f982 2538/* Worker function for TARGET_TRAMPOLINE_INIT. */
2539
2540static void
2541mn10300_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
2542{
3562cea7 2543 rtx mem, disp, fnaddr = XEXP (DECL_RTL (fndecl), 0);
2544
2545 /* This is a strict alignment target, which means that we play
2546 some games to make sure that the locations at which we need
2547 to store <chain> and <disp> wind up at aligned addresses.
2548
2549 0x28 0x00 add 0,d0
2550 0xfc 0xdd mov chain,a1
2551 <chain>
2552 0xf8 0xed 0x00 btst 0,d1
2553 0xdc jmp fnaddr
2554 <disp>
2555
2556 Note that the two extra insns are effectively nops; they
2557 clobber the flags but do not affect the contents of D0 or D1. */
3e16f982 2558
3562cea7 2559 disp = expand_binop (SImode, sub_optab, fnaddr,
29c05e22 2560 plus_constant (Pmode, XEXP (m_tramp, 0), 11),
3562cea7 2561 NULL_RTX, 1, OPTAB_DIRECT);
3e16f982 2562
3562cea7 2563 mem = adjust_address (m_tramp, SImode, 0);
2564 emit_move_insn (mem, gen_int_mode (0xddfc0028, SImode));
2565 mem = adjust_address (m_tramp, SImode, 4);
3e16f982 2566 emit_move_insn (mem, chain_value);
3562cea7 2567 mem = adjust_address (m_tramp, SImode, 8);
2568 emit_move_insn (mem, gen_int_mode (0xdc00edf8, SImode));
2569 mem = adjust_address (m_tramp, SImode, 12);
2570 emit_move_insn (mem, disp);
3e16f982 2571}
e92d3ba8 2572
2573/* Output the assembler code for a C++ thunk function.
2574 THUNK_DECL is the declaration for the thunk function itself, FUNCTION
2575 is the decl for the target function. DELTA is an immediate constant
2576 offset to be added to the THIS parameter. If VCALL_OFFSET is nonzero
2577 the word at the adjusted address *(*THIS' + VCALL_OFFSET) should be
2578 additionally added to THIS. Finally jump to the entry point of
2579 FUNCTION. */
2580
2581static void
2582mn10300_asm_output_mi_thunk (FILE * file,
2583 tree thunk_fndecl ATTRIBUTE_UNUSED,
2584 HOST_WIDE_INT delta,
2585 HOST_WIDE_INT vcall_offset,
2586 tree function)
2587{
2588 const char * _this;
2589
2590 /* Get the register holding the THIS parameter. Handle the case
2591 where there is a hidden first argument for a returned structure. */
2592 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
2593 _this = reg_names [FIRST_ARGUMENT_REGNUM + 1];
2594 else
2595 _this = reg_names [FIRST_ARGUMENT_REGNUM];
2596
2597 fprintf (file, "\t%s Thunk Entry Point:\n", ASM_COMMENT_START);
2598
2599 if (delta)
2600 fprintf (file, "\tadd %d, %s\n", (int) delta, _this);
2601
2602 if (vcall_offset)
2603 {
2604 const char * scratch = reg_names [FIRST_ADDRESS_REGNUM + 1];
2605
2606 fprintf (file, "\tmov %s, %s\n", _this, scratch);
2607 fprintf (file, "\tmov (%s), %s\n", scratch, scratch);
2608 fprintf (file, "\tadd %d, %s\n", (int) vcall_offset, scratch);
2609 fprintf (file, "\tmov (%s), %s\n", scratch, scratch);
2610 fprintf (file, "\tadd %s, %s\n", scratch, _this);
2611 }
2612
2613 fputs ("\tjmp ", file);
2614 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
2615 putc ('\n', file);
2616}
2617
2618/* Return true if mn10300_output_mi_thunk would be able to output the
2619 assembler code for the thunk function specified by the arguments
2620 it is passed, and false otherwise. */
2621
2622static bool
2623mn10300_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
2624 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
2625 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
2626 const_tree function ATTRIBUTE_UNUSED)
2627{
2628 return true;
2629}
5574dbdd 2630
b395382f 2631/* Implement TARGET_HARD_REGNO_MODE_OK. */
2632
2633static bool
3754d046 2634mn10300_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
5574dbdd 2635{
2636 if (REGNO_REG_CLASS (regno) == FP_REGS
2637 || REGNO_REG_CLASS (regno) == FP_ACC_REGS)
2638 /* Do not store integer values in FP registers. */
2639 return GET_MODE_CLASS (mode) == MODE_FLOAT && ((regno & 1) == 0);
f70166f6 2640
2641 if (! TARGET_AM33 && REGNO_REG_CLASS (regno) == EXTENDED_REGS)
2642 return false;
2643
5574dbdd 2644 if (((regno) & 1) == 0 || GET_MODE_SIZE (mode) == 4)
2645 return true;
2646
2647 if (REGNO_REG_CLASS (regno) == DATA_REGS
2648 || (TARGET_AM33 && REGNO_REG_CLASS (regno) == ADDRESS_REGS)
2649 || REGNO_REG_CLASS (regno) == EXTENDED_REGS)
2650 return GET_MODE_SIZE (mode) <= 4;
2651
2652 return false;
2653}
2654
5f6dcf1a 2655/* Implement TARGET_MODES_TIEABLE_P. */
2656
2657static bool
2658mn10300_modes_tieable_p (machine_mode mode1, machine_mode mode2)
5574dbdd 2659{
2660 if (GET_MODE_CLASS (mode1) == MODE_FLOAT
2661 && GET_MODE_CLASS (mode2) != MODE_FLOAT)
2662 return false;
2663
2664 if (GET_MODE_CLASS (mode2) == MODE_FLOAT
2665 && GET_MODE_CLASS (mode1) != MODE_FLOAT)
2666 return false;
2667
2668 if (TARGET_AM33
2669 || mode1 == mode2
2670 || (GET_MODE_SIZE (mode1) <= 4 && GET_MODE_SIZE (mode2) <= 4))
2671 return true;
2672
2673 return false;
2674}
2675
990679af 2676static int
3754d046 2677cc_flags_for_mode (machine_mode mode)
990679af 2678{
2679 switch (mode)
2680 {
916ace94 2681 case E_CCmode:
990679af 2682 return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_C | CC_FLAG_V;
916ace94 2683 case E_CCZNCmode:
990679af 2684 return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_C;
916ace94 2685 case E_CCZNmode:
990679af 2686 return CC_FLAG_Z | CC_FLAG_N;
916ace94 2687 case E_CC_FLOATmode:
990679af 2688 return -1;
2689 default:
2690 gcc_unreachable ();
2691 }
2692}
2693
2694static int
2695cc_flags_for_code (enum rtx_code code)
2696{
2697 switch (code)
2698 {
2699 case EQ: /* Z */
2700 case NE: /* ~Z */
2701 return CC_FLAG_Z;
2702
2703 case LT: /* N */
2704 case GE: /* ~N */
2705 return CC_FLAG_N;
990679af 2706
2707 case GT: /* ~(Z|(N^V)) */
2708 case LE: /* Z|(N^V) */
2709 return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_V;
2710
2711 case GEU: /* ~C */
2712 case LTU: /* C */
2713 return CC_FLAG_C;
2714
2715 case GTU: /* ~(C | Z) */
2716 case LEU: /* C | Z */
2717 return CC_FLAG_Z | CC_FLAG_C;
2718
2719 case ORDERED:
2720 case UNORDERED:
2721 case LTGT:
2722 case UNEQ:
2723 case UNGE:
2724 case UNGT:
2725 case UNLE:
2726 case UNLT:
2727 return -1;
2728
2729 default:
2730 gcc_unreachable ();
2731 }
2732}
2733
3754d046 2734machine_mode
990679af 2735mn10300_select_cc_mode (enum rtx_code code, rtx x, rtx y ATTRIBUTE_UNUSED)
5574dbdd 2736{
990679af 2737 int req;
2738
2739 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2740 return CC_FLOATmode;
2741
2742 req = cc_flags_for_code (code);
2743
2744 if (req & CC_FLAG_V)
2745 return CCmode;
2746 if (req & CC_FLAG_C)
2747 return CCZNCmode;
2748 return CCZNmode;
5574dbdd 2749}
4879b320 2750
2751static inline bool
627683ab 2752set_is_load_p (rtx set)
4879b320 2753{
627683ab 2754 return MEM_P (SET_SRC (set));
4879b320 2755}
2756
2757static inline bool
627683ab 2758set_is_store_p (rtx set)
4879b320 2759{
627683ab 2760 return MEM_P (SET_DEST (set));
4879b320 2761}
2762
2763/* Update scheduling costs for situations that cannot be
2764 described using the attributes and DFA machinery.
2765 DEP is the insn being scheduled.
2766 INSN is the previous insn.
2767 COST is the current cycle cost for DEP. */
2768
2769static int
99f52c2b 2770mn10300_adjust_sched_cost (rtx_insn *insn, int dep_type, rtx_insn *dep,
2771 int cost, unsigned int)
4879b320 2772{
627683ab 2773 rtx insn_set;
2774 rtx dep_set;
2775 int timings;
4879b320 2776
2777 if (!TARGET_AM33)
2778 return 1;
2779
627683ab 2780 /* We are only interested in pairs of SET. */
2781 insn_set = single_set (insn);
2782 if (!insn_set)
2783 return cost;
4879b320 2784
627683ab 2785 dep_set = single_set (dep);
2786 if (!dep_set)
2787 return cost;
4879b320 2788
2789 /* For the AM34 a load instruction that follows a
2790 store instruction incurs an extra cycle of delay. */
2791 if (mn10300_tune_cpu == PROCESSOR_AM34
627683ab 2792 && set_is_load_p (dep_set)
2793 && set_is_store_p (insn_set))
4879b320 2794 cost += 1;
2795
2796 /* For the AM34 a non-store, non-branch FPU insn that follows
2797 another FPU insn incurs a one cycle throughput increase. */
2798 else if (mn10300_tune_cpu == PROCESSOR_AM34
627683ab 2799 && ! set_is_store_p (insn_set)
4879b320 2800 && ! JUMP_P (insn)
627683ab 2801 && GET_MODE_CLASS (GET_MODE (SET_SRC (dep_set))) == MODE_FLOAT
2802 && GET_MODE_CLASS (GET_MODE (SET_SRC (insn_set))) == MODE_FLOAT)
4879b320 2803 cost += 1;
2804
2805 /* Resolve the conflict described in section 1-7-4 of
2806 Chapter 3 of the MN103E Series Instruction Manual
2807 where it says:
2808
9d75589a 2809 "When the preceding instruction is a CPU load or
4879b320 2810 store instruction, a following FPU instruction
2811 cannot be executed until the CPU completes the
2812 latency period even though there are no register
2813 or flag dependencies between them." */
2814
2815 /* Only the AM33-2 (and later) CPUs have FPU instructions. */
2816 if (! TARGET_AM33_2)
2817 return cost;
2818
2819 /* If a data dependence already exists then the cost is correct. */
99f52c2b 2820 if (dep_type == 0)
4879b320 2821 return cost;
2822
2823 /* Check that the instruction about to scheduled is an FPU instruction. */
627683ab 2824 if (GET_MODE_CLASS (GET_MODE (SET_SRC (dep_set))) != MODE_FLOAT)
4879b320 2825 return cost;
2826
2827 /* Now check to see if the previous instruction is a load or store. */
627683ab 2828 if (! set_is_load_p (insn_set) && ! set_is_store_p (insn_set))
4879b320 2829 return cost;
2830
2831 /* XXX: Verify: The text of 1-7-4 implies that the restriction
9d75589a 2832 only applies when an INTEGER load/store precedes an FPU
4879b320 2833 instruction, but is this true ? For now we assume that it is. */
627683ab 2834 if (GET_MODE_CLASS (GET_MODE (SET_SRC (insn_set))) != MODE_INT)
4879b320 2835 return cost;
2836
2837 /* Extract the latency value from the timings attribute. */
627683ab 2838 timings = get_attr_timings (insn);
4879b320 2839 return timings < 100 ? (timings % 10) : (timings % 100);
2840}
b2d7ede1 2841
2842static void
2843mn10300_conditional_register_usage (void)
2844{
2845 unsigned int i;
2846
2847 if (!TARGET_AM33)
2848 {
2849 for (i = FIRST_EXTENDED_REGNUM;
2850 i <= LAST_EXTENDED_REGNUM; i++)
2851 fixed_regs[i] = call_used_regs[i] = 1;
2852 }
2853 if (!TARGET_AM33_2)
2854 {
2855 for (i = FIRST_FP_REGNUM;
2856 i <= LAST_FP_REGNUM; i++)
2857 fixed_regs[i] = call_used_regs[i] = 1;
2858 }
2859 if (flag_pic)
2860 fixed_regs[PIC_OFFSET_TABLE_REGNUM] =
2861 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
2862}
7de3ada8 2863
2af3d775 2864/* Worker function for TARGET_MD_ASM_ADJUST.
7de3ada8 2865 We do this in the mn10300 backend to maintain source compatibility
2866 with the old cc0-based compiler. */
2867
2af3d775 2868static rtx_insn *
2869mn10300_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
2870 vec<const char *> &/*constraints*/,
2871 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
7de3ada8 2872{
2af3d775 2873 clobbers.safe_push (gen_rtx_REG (CCmode, CC_REG));
2874 SET_HARD_REG_BIT (clobbered_regs, CC_REG);
2875 return NULL;
7de3ada8 2876}
5574dbdd 2877\f
990679af 2878/* A helper function for splitting cbranch patterns after reload. */
2879
2880void
3754d046 2881mn10300_split_cbranch (machine_mode cmp_mode, rtx cmp_op, rtx label_ref)
990679af 2882{
2883 rtx flags, x;
2884
2885 flags = gen_rtx_REG (cmp_mode, CC_REG);
2886 x = gen_rtx_COMPARE (cmp_mode, XEXP (cmp_op, 0), XEXP (cmp_op, 1));
d1f9b275 2887 x = gen_rtx_SET (flags, x);
990679af 2888 emit_insn (x);
2889
2890 x = gen_rtx_fmt_ee (GET_CODE (cmp_op), VOIDmode, flags, const0_rtx);
2891 x = gen_rtx_IF_THEN_ELSE (VOIDmode, x, label_ref, pc_rtx);
d1f9b275 2892 x = gen_rtx_SET (pc_rtx, x);
990679af 2893 emit_jump_insn (x);
2894}
2895
2896/* A helper function for matching parallels that set the flags. */
2897
2898bool
3754d046 2899mn10300_match_ccmode (rtx insn, machine_mode cc_mode)
990679af 2900{
2901 rtx op1, flags;
3754d046 2902 machine_mode flags_mode;
990679af 2903
2904 gcc_checking_assert (XVECLEN (PATTERN (insn), 0) == 2);
2905
e3b93558 2906 op1 = XVECEXP (PATTERN (insn), 0, 0);
990679af 2907 gcc_checking_assert (GET_CODE (SET_SRC (op1)) == COMPARE);
2908
2909 flags = SET_DEST (op1);
2910 flags_mode = GET_MODE (flags);
2911
2912 if (GET_MODE (SET_SRC (op1)) != flags_mode)
2913 return false;
2914 if (GET_MODE_CLASS (flags_mode) != MODE_CC)
2915 return false;
2916
2917 /* Ensure that the mode of FLAGS is compatible with CC_MODE. */
2918 if (cc_flags_for_mode (flags_mode) & ~cc_flags_for_mode (cc_mode))
2919 return false;
2920
2921 return true;
2922}
2923
35c2a6c6 2924/* This function is used to help split:
2925
2926 (set (reg) (and (reg) (int)))
2927
2928 into:
2929
2930 (set (reg) (shift (reg) (int))
2931 (set (reg) (shift (reg) (int))
2932
2933 where the shitfs will be shorter than the "and" insn.
2934
2935 It returns the number of bits that should be shifted. A positive
2936 values means that the low bits are to be cleared (and hence the
2937 shifts should be right followed by left) whereas a negative value
2938 means that the high bits are to be cleared (left followed by right).
2939 Zero is returned when it would not be economical to split the AND. */
2940
990679af 2941int
2942mn10300_split_and_operand_count (rtx op)
2943{
2944 HOST_WIDE_INT val = INTVAL (op);
2945 int count;
2946
2947 if (val < 0)
2948 {
2949 /* High bit is set, look for bits clear at the bottom. */
2950 count = exact_log2 (-val);
2951 if (count < 0)
2952 return 0;
2953 /* This is only size win if we can use the asl2 insn. Otherwise we
2954 would be replacing 1 6-byte insn with 2 3-byte insns. */
2955 if (count > (optimize_insn_for_speed_p () ? 2 : 4))
2956 return 0;
35c2a6c6 2957 return count;
990679af 2958 }
2959 else
2960 {
2961 /* High bit is clear, look for bits set at the bottom. */
2962 count = exact_log2 (val + 1);
2963 count = 32 - count;
2964 /* Again, this is only a size win with asl2. */
2965 if (count > (optimize_insn_for_speed_p () ? 2 : 4))
2966 return 0;
2967 return -count;
2968 }
2969}
2970\f
e7076c21 2971struct liw_data
2972{
2973 enum attr_liw slot;
2974 enum attr_liw_op op;
2975 rtx dest;
2976 rtx src;
2977};
2978
2979/* Decide if the given insn is a candidate for LIW bundling. If it is then
2980 extract the operands and LIW attributes from the insn and use them to fill
2981 in the liw_data structure. Return true upon success or false if the insn
2982 cannot be bundled. */
f9e46c25 2983
2984static bool
50fc2d35 2985extract_bundle (rtx_insn *insn, struct liw_data * pdata)
f9e46c25 2986{
e7076c21 2987 bool allow_consts = true;
81705807 2988 rtx p;
f9e46c25 2989
e7076c21 2990 gcc_assert (pdata != NULL);
2991
50fc2d35 2992 if (insn == NULL)
e7076c21 2993 return false;
2994 /* Make sure that we are dealing with a simple SET insn. */
f9e46c25 2995 p = single_set (insn);
e7076c21 2996 if (p == NULL_RTX)
2997 return false;
2998
2999 /* Make sure that it could go into one of the LIW pipelines. */
3000 pdata->slot = get_attr_liw (insn);
3001 if (pdata->slot == LIW_BOTH)
3002 return false;
3003
3004 pdata->op = get_attr_liw_op (insn);
3005
e7076c21 3006 switch (pdata->op)
f9e46c25 3007 {
3008 case LIW_OP_MOV:
e7076c21 3009 pdata->dest = SET_DEST (p);
3010 pdata->src = SET_SRC (p);
f9e46c25 3011 break;
3012 case LIW_OP_CMP:
e7076c21 3013 pdata->dest = XEXP (SET_SRC (p), 0);
3014 pdata->src = XEXP (SET_SRC (p), 1);
f9e46c25 3015 break;
3016 case LIW_OP_NONE:
3017 return false;
e7076c21 3018 case LIW_OP_AND:
3019 case LIW_OP_OR:
3020 case LIW_OP_XOR:
3021 /* The AND, OR and XOR long instruction words only accept register arguments. */
3022 allow_consts = false;
3023 /* Fall through. */
f9e46c25 3024 default:
e7076c21 3025 pdata->dest = SET_DEST (p);
3026 pdata->src = XEXP (SET_SRC (p), 1);
f9e46c25 3027 break;
3028 }
3029
e7076c21 3030 if (! REG_P (pdata->dest))
3031 return false;
3032
3033 if (REG_P (pdata->src))
3034 return true;
3035
3036 return allow_consts && satisfies_constraint_O (pdata->src);
f9e46c25 3037}
3038
e7076c21 3039/* Make sure that it is OK to execute LIW1 and LIW2 in parallel. GCC generated
3040 the instructions with the assumption that LIW1 would be executed before LIW2
3041 so we must check for overlaps between their sources and destinations. */
f9e46c25 3042
3043static bool
e7076c21 3044check_liw_constraints (struct liw_data * pliw1, struct liw_data * pliw2)
3045{
3046 /* Check for slot conflicts. */
3047 if (pliw2->slot == pliw1->slot && pliw1->slot != LIW_EITHER)
f9e46c25 3048 return false;
3049
e7076c21 3050 /* If either operation is a compare, then "dest" is really an input; the real
3051 destination is CC_REG. So these instructions need different checks. */
3052
3053 /* Changing "CMP ; OP" into "CMP | OP" is OK because the comparison will
3054 check its values prior to any changes made by OP. */
3055 if (pliw1->op == LIW_OP_CMP)
3056 {
3057 /* Two sequential comparisons means dead code, which ought to
3058 have been eliminated given that bundling only happens with
3059 optimization. We cannot bundle them in any case. */
3060 gcc_assert (pliw1->op != pliw2->op);
3061 return true;
3062 }
f9e46c25 3063
e7076c21 3064 /* Changing "OP ; CMP" into "OP | CMP" does not work if the value being compared
3065 is the destination of OP, as the CMP will look at the old value, not the new
3066 one. */
3067 if (pliw2->op == LIW_OP_CMP)
f9e46c25 3068 {
e7076c21 3069 if (REGNO (pliw2->dest) == REGNO (pliw1->dest))
3070 return false;
3071
3072 if (REG_P (pliw2->src))
3073 return REGNO (pliw2->src) != REGNO (pliw1->dest);
3074
3075 return true;
3076 }
3077
3078 /* Changing "OP1 ; OP2" into "OP1 | OP2" does not work if they both write to the
3079 same destination register. */
3080 if (REGNO (pliw2->dest) == REGNO (pliw1->dest))
3081 return false;
3082
3083 /* Changing "OP1 ; OP2" into "OP1 | OP2" generally does not work if the destination
3084 of OP1 is the source of OP2. The exception is when OP1 is a MOVE instruction when
3085 we can replace the source in OP2 with the source of OP1. */
3086 if (REG_P (pliw2->src) && REGNO (pliw2->src) == REGNO (pliw1->dest))
3087 {
3088 if (pliw1->op == LIW_OP_MOV && REG_P (pliw1->src))
f9e46c25 3089 {
e7076c21 3090 if (! REG_P (pliw1->src)
3091 && (pliw2->op == LIW_OP_AND
3092 || pliw2->op == LIW_OP_OR
3093 || pliw2->op == LIW_OP_XOR))
3094 return false;
3095
3096 pliw2->src = pliw1->src;
f9e46c25 3097 return true;
3098 }
3099 return false;
3100 }
3101
e7076c21 3102 /* Everything else is OK. */
f9e46c25 3103 return true;
3104}
3105
f9e46c25 3106/* Combine pairs of insns into LIW bundles. */
3107
3108static void
3109mn10300_bundle_liw (void)
3110{
50fc2d35 3111 rtx_insn *r;
f9e46c25 3112
50fc2d35 3113 for (r = get_insns (); r != NULL; r = next_nonnote_nondebug_insn (r))
f9e46c25 3114 {
50fc2d35 3115 rtx_insn *insn1, *insn2;
e7076c21 3116 struct liw_data liw1, liw2;
f9e46c25 3117
3118 insn1 = r;
e7076c21 3119 if (! extract_bundle (insn1, & liw1))
f9e46c25 3120 continue;
3121
3122 insn2 = next_nonnote_nondebug_insn (insn1);
e7076c21 3123 if (! extract_bundle (insn2, & liw2))
f9e46c25 3124 continue;
3125
e7076c21 3126 /* Check for source/destination overlap. */
3127 if (! check_liw_constraints (& liw1, & liw2))
f9e46c25 3128 continue;
3129
e7076c21 3130 if (liw1.slot == LIW_OP2 || liw2.slot == LIW_OP1)
f9e46c25 3131 {
e7076c21 3132 struct liw_data temp;
3133
3134 temp = liw1;
f9e46c25 3135 liw1 = liw2;
e7076c21 3136 liw2 = temp;
f9e46c25 3137 }
3138
f9e46c25 3139 delete_insn (insn2);
3140
50fc2d35 3141 rtx insn2_pat;
e7076c21 3142 if (liw1.op == LIW_OP_CMP)
50fc2d35 3143 insn2_pat = gen_cmp_liw (liw2.dest, liw2.src, liw1.dest, liw1.src,
3144 GEN_INT (liw2.op));
e7076c21 3145 else if (liw2.op == LIW_OP_CMP)
50fc2d35 3146 insn2_pat = gen_liw_cmp (liw1.dest, liw1.src, liw2.dest, liw2.src,
3147 GEN_INT (liw1.op));
f9e46c25 3148 else
50fc2d35 3149 insn2_pat = gen_liw (liw1.dest, liw2.dest, liw1.src, liw2.src,
3150 GEN_INT (liw1.op), GEN_INT (liw2.op));
f9e46c25 3151
50fc2d35 3152 insn2 = emit_insn_after (insn2_pat, insn1);
f9e46c25 3153 delete_insn (insn1);
3154 r = insn2;
3155 }
3156}
3157
f9b3e8f5 3158#define DUMP(reason, insn) \
3159 do \
3160 { \
3161 if (dump_file) \
3162 { \
3163 fprintf (dump_file, reason "\n"); \
3164 if (insn != NULL_RTX) \
3165 print_rtl_single (dump_file, insn); \
3166 fprintf(dump_file, "\n"); \
3167 } \
3168 } \
3169 while (0)
3170
3171/* Replace the BRANCH insn with a Lcc insn that goes to LABEL.
3172 Insert a SETLB insn just before LABEL. */
3173
3174static void
753de566 3175mn10300_insert_setlb_lcc (rtx_insn *label, rtx_insn *branch)
f9b3e8f5 3176{
3177 rtx lcc, comparison, cmp_reg;
3178
3179 if (LABEL_NUSES (label) > 1)
3180 {
158a522b 3181 rtx_insn *insn;
f9b3e8f5 3182
3183 /* This label is used both as an entry point to the loop
3184 and as a loop-back point for the loop. We need to separate
3185 these two functions so that the SETLB happens upon entry,
3186 but the loop-back does not go to the SETLB instruction. */
3187 DUMP ("Inserting SETLB insn after:", label);
3188 insn = emit_insn_after (gen_setlb (), label);
3189 label = gen_label_rtx ();
3190 emit_label_after (label, insn);
3191 DUMP ("Created new loop-back label:", label);
3192 }
3193 else
3194 {
3195 DUMP ("Inserting SETLB insn before:", label);
3196 emit_insn_before (gen_setlb (), label);
3197 }
3198
3199 comparison = XEXP (SET_SRC (PATTERN (branch)), 0);
3200 cmp_reg = XEXP (comparison, 0);
3201 gcc_assert (REG_P (cmp_reg));
3202
3203 /* If the comparison has not already been split out of the branch
3204 then do so now. */
3205 gcc_assert (REGNO (cmp_reg) == CC_REG);
3206
3207 if (GET_MODE (cmp_reg) == CC_FLOATmode)
3208 lcc = gen_FLcc (comparison, label);
3209 else
3210 lcc = gen_Lcc (comparison, label);
3211
c6d14fbf 3212 rtx_insn *jump = emit_jump_insn_before (lcc, branch);
35913365 3213 mark_jump_label (XVECEXP (lcc, 0, 0), jump, 0);
c6d14fbf 3214 JUMP_LABEL (jump) = label;
f9b3e8f5 3215 DUMP ("Replacing branch insn...", branch);
c6d14fbf 3216 DUMP ("... with Lcc insn:", jump);
f9b3e8f5 3217 delete_insn (branch);
3218}
3219
3220static bool
161dfa6e 3221mn10300_block_contains_call (basic_block block)
f9b3e8f5 3222{
91a55c11 3223 rtx_insn *insn;
f9b3e8f5 3224
3225 FOR_BB_INSNS (block, insn)
3226 if (CALL_P (insn))
3227 return true;
3228
3229 return false;
3230}
3231
3232static bool
3233mn10300_loop_contains_call_insn (loop_p loop)
3234{
3235 basic_block * bbs;
3236 bool result = false;
3237 unsigned int i;
3238
3239 bbs = get_loop_body (loop);
3240
3241 for (i = 0; i < loop->num_nodes; i++)
3242 if (mn10300_block_contains_call (bbs[i]))
3243 {
3244 result = true;
3245 break;
3246 }
3247
3248 free (bbs);
3249 return result;
3250}
3251
3252static void
3253mn10300_scan_for_setlb_lcc (void)
3254{
f9b3e8f5 3255 loop_p loop;
3256
3257 DUMP ("Looking for loops that can use the SETLB insn", NULL_RTX);
3258
3259 df_analyze ();
3260 compute_bb_for_insn ();
3261
3262 /* Find the loops. */
319f4d7d 3263 loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
f9b3e8f5 3264
3265 /* FIXME: For now we only investigate innermost loops. In practice however
3266 if an inner loop is not suitable for use with the SETLB/Lcc insns, it may
3267 be the case that its parent loop is suitable. Thus we should check all
3268 loops, but work from the innermost outwards. */
f21d4d00 3269 FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
f9b3e8f5 3270 {
3271 const char * reason = NULL;
3272
3273 /* Check to see if we can modify this loop. If we cannot
3274 then set 'reason' to describe why it could not be done. */
3275 if (loop->latch == NULL)
3276 reason = "it contains multiple latches";
3277 else if (loop->header != loop->latch)
3278 /* FIXME: We could handle loops that span multiple blocks,
3279 but this requires a lot more work tracking down the branches
3280 that need altering, so for now keep things simple. */
3281 reason = "the loop spans multiple blocks";
3282 else if (mn10300_loop_contains_call_insn (loop))
3283 reason = "it contains CALL insns";
3284 else
3285 {
93ee8dfb 3286 rtx_insn *branch = BB_END (loop->latch);
f9b3e8f5 3287
3288 gcc_assert (JUMP_P (branch));
3289 if (single_set (branch) == NULL_RTX || ! any_condjump_p (branch))
3290 /* We cannot optimize tablejumps and the like. */
3291 /* FIXME: We could handle unconditional jumps. */
3292 reason = "it is not a simple loop";
3293 else
3294 {
93ee8dfb 3295 rtx_insn *label;
f9b3e8f5 3296
3297 if (dump_file)
3298 flow_loop_dump (loop, dump_file, NULL, 0);
3299
3300 label = BB_HEAD (loop->header);
3301 gcc_assert (LABEL_P (label));
3302
3303 mn10300_insert_setlb_lcc (label, branch);
3304 }
3305 }
3306
3307 if (dump_file && reason != NULL)
3308 fprintf (dump_file, "Loop starting with insn %d is not suitable because %s\n",
3309 INSN_UID (BB_HEAD (loop->header)),
3310 reason);
3311 }
3312
319f4d7d 3313 loop_optimizer_finalize ();
f9b3e8f5 3314
3315 df_finish_pass (false);
3316
3317 DUMP ("SETLB scan complete", NULL_RTX);
3318}
3319
f9e46c25 3320static void
3321mn10300_reorg (void)
3322{
f9b3e8f5 3323 /* These are optimizations, so only run them if optimizing. */
3324 if (TARGET_AM33 && (optimize > 0 || optimize_size))
f9e46c25 3325 {
f9b3e8f5 3326 if (TARGET_ALLOW_SETLB)
3327 mn10300_scan_for_setlb_lcc ();
3328
f9e46c25 3329 if (TARGET_ALLOW_LIW)
3330 mn10300_bundle_liw ();
3331 }
3332}
3333\f
3626e955 3334/* Initialize the GCC target structure. */
3335
f9e46c25 3336#undef TARGET_MACHINE_DEPENDENT_REORG
3337#define TARGET_MACHINE_DEPENDENT_REORG mn10300_reorg
3338
3626e955 3339#undef TARGET_ASM_ALIGNED_HI_OP
3340#define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
3341
3342#undef TARGET_LEGITIMIZE_ADDRESS
3343#define TARGET_LEGITIMIZE_ADDRESS mn10300_legitimize_address
3344
28f32607 3345#undef TARGET_ADDRESS_COST
3346#define TARGET_ADDRESS_COST mn10300_address_cost
3347#undef TARGET_REGISTER_MOVE_COST
3348#define TARGET_REGISTER_MOVE_COST mn10300_register_move_cost
3349#undef TARGET_MEMORY_MOVE_COST
3350#define TARGET_MEMORY_MOVE_COST mn10300_memory_move_cost
3626e955 3351#undef TARGET_RTX_COSTS
3352#define TARGET_RTX_COSTS mn10300_rtx_costs
3626e955 3353
3354#undef TARGET_ASM_FILE_START
3355#define TARGET_ASM_FILE_START mn10300_file_start
3356#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
3357#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
3358
22680c28 3359#undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
3360#define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA mn10300_asm_output_addr_const_extra
3361
3626e955 3362#undef TARGET_OPTION_OVERRIDE
3363#define TARGET_OPTION_OVERRIDE mn10300_option_override
3364
3365#undef TARGET_ENCODE_SECTION_INFO
3366#define TARGET_ENCODE_SECTION_INFO mn10300_encode_section_info
3367
3368#undef TARGET_PROMOTE_PROTOTYPES
3369#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
3370#undef TARGET_RETURN_IN_MEMORY
3371#define TARGET_RETURN_IN_MEMORY mn10300_return_in_memory
3372#undef TARGET_PASS_BY_REFERENCE
3373#define TARGET_PASS_BY_REFERENCE mn10300_pass_by_reference
3374#undef TARGET_CALLEE_COPIES
3375#define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
3376#undef TARGET_ARG_PARTIAL_BYTES
3377#define TARGET_ARG_PARTIAL_BYTES mn10300_arg_partial_bytes
dc67179a 3378#undef TARGET_FUNCTION_ARG
3379#define TARGET_FUNCTION_ARG mn10300_function_arg
3380#undef TARGET_FUNCTION_ARG_ADVANCE
3381#define TARGET_FUNCTION_ARG_ADVANCE mn10300_function_arg_advance
3626e955 3382
3383#undef TARGET_EXPAND_BUILTIN_SAVEREGS
3384#define TARGET_EXPAND_BUILTIN_SAVEREGS mn10300_builtin_saveregs
3385#undef TARGET_EXPAND_BUILTIN_VA_START
3386#define TARGET_EXPAND_BUILTIN_VA_START mn10300_va_start
3387
3388#undef TARGET_CASE_VALUES_THRESHOLD
3389#define TARGET_CASE_VALUES_THRESHOLD mn10300_case_values_threshold
3390
e46fbef5 3391#undef TARGET_LRA_P
3392#define TARGET_LRA_P hook_bool_void_false
3393
3626e955 3394#undef TARGET_LEGITIMATE_ADDRESS_P
3395#define TARGET_LEGITIMATE_ADDRESS_P mn10300_legitimate_address_p
4c6c308e 3396#undef TARGET_DELEGITIMIZE_ADDRESS
3397#define TARGET_DELEGITIMIZE_ADDRESS mn10300_delegitimize_address
ca316360 3398#undef TARGET_LEGITIMATE_CONSTANT_P
3399#define TARGET_LEGITIMATE_CONSTANT_P mn10300_legitimate_constant_p
3626e955 3400
029ca87f 3401#undef TARGET_PREFERRED_RELOAD_CLASS
3402#define TARGET_PREFERRED_RELOAD_CLASS mn10300_preferred_reload_class
3403#undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
c78ac668 3404#define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS \
3405 mn10300_preferred_output_reload_class
3406#undef TARGET_SECONDARY_RELOAD
3407#define TARGET_SECONDARY_RELOAD mn10300_secondary_reload
029ca87f 3408
3626e955 3409#undef TARGET_TRAMPOLINE_INIT
3410#define TARGET_TRAMPOLINE_INIT mn10300_trampoline_init
3411
3412#undef TARGET_FUNCTION_VALUE
3413#define TARGET_FUNCTION_VALUE mn10300_function_value
3414#undef TARGET_LIBCALL_VALUE
3415#define TARGET_LIBCALL_VALUE mn10300_libcall_value
3416
3417#undef TARGET_ASM_OUTPUT_MI_THUNK
3418#define TARGET_ASM_OUTPUT_MI_THUNK mn10300_asm_output_mi_thunk
3419#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
3420#define TARGET_ASM_CAN_OUTPUT_MI_THUNK mn10300_can_output_mi_thunk
3421
4879b320 3422#undef TARGET_SCHED_ADJUST_COST
3423#define TARGET_SCHED_ADJUST_COST mn10300_adjust_sched_cost
3424
b2d7ede1 3425#undef TARGET_CONDITIONAL_REGISTER_USAGE
3426#define TARGET_CONDITIONAL_REGISTER_USAGE mn10300_conditional_register_usage
3427
2af3d775 3428#undef TARGET_MD_ASM_ADJUST
3429#define TARGET_MD_ASM_ADJUST mn10300_md_asm_adjust
7de3ada8 3430
08207c2f 3431#undef TARGET_FLAGS_REGNUM
3432#define TARGET_FLAGS_REGNUM CC_REG
3433
b395382f 3434#undef TARGET_HARD_REGNO_MODE_OK
3435#define TARGET_HARD_REGNO_MODE_OK mn10300_hard_regno_mode_ok
3436
5f6dcf1a 3437#undef TARGET_MODES_TIEABLE_P
3438#define TARGET_MODES_TIEABLE_P mn10300_modes_tieable_p
3439
2419ebf7 3440#undef TARGET_HAVE_SPECULATION_SAFE_VALUE
3441#define TARGET_HAVE_SPECULATION_SAFE_VALUE speculation_safe_value_not_needed
3442
3626e955 3443struct gcc_target targetm = TARGET_INITIALIZER;