]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/mn10300/mn10300.c
gen-mul-tables.cc: Adjust include files.
[thirdparty/gcc.git] / gcc / config / mn10300 / mn10300.c
CommitLineData
11bb1f11 1/* Subroutines for insn-output.c for Matsushita MN10300 series
5624e564 2 Copyright (C) 1996-2015 Free Software Foundation, Inc.
11bb1f11
JL
3 Contributed by Jeff Law (law@cygnus.com).
4
e7ab5593 5 This file is part of GCC.
11bb1f11 6
e7ab5593
NC
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11bb1f11 11
e7ab5593
NC
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
11bb1f11 16
e7ab5593
NC
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
11bb1f11 20
11bb1f11 21#include "config.h"
c5c76735 22#include "system.h"
4977bab6 23#include "coretypes.h"
c7131fb2 24#include "backend.h"
e11c4407 25#include "target.h"
11bb1f11 26#include "rtl.h"
e11c4407
AM
27#include "tree.h"
28#include "cfghooks.h"
29#include "cfgloop.h"
c7131fb2 30#include "df.h"
e11c4407
AM
31#include "tm_p.h"
32#include "optabs.h"
33#include "regs.h"
34#include "emit-rtl.h"
35#include "recog.h"
36#include "diagnostic-core.h"
40e23961 37#include "alias.h"
d8a2d370
DN
38#include "stor-layout.h"
39#include "varasm.h"
40#include "calls.h"
11bb1f11
JL
41#include "output.h"
42#include "insn-attr.h"
6528281d 43#include "reload.h"
36566b39 44#include "explow.h"
11bb1f11 45#include "expr.h"
a45d420a 46#include "tm-constrs.h"
60393bbc 47#include "cfgrtl.h"
7ee2468b 48#include "dumpfile.h"
9b2b7279 49#include "builtins.h"
11bb1f11 50
994c5d85 51/* This file should be included last. */
d58627a0
RS
52#include "target-def.h"
53
2beef00e
AO
54/* This is used in the am33_2.0-linux-gnu port, in which global symbol
55 names are not prefixed by underscores, to tell whether to prefix a
56 label with a plus sign or not, so that the assembler can tell
57 symbol names from register names. */
58int mn10300_protect_label;
59
f3f63737
NC
60/* Selected processor type for tuning. */
61enum processor_type mn10300_tune_cpu = PROCESSOR_DEFAULT;
62
bad41521
RH
63#define CC_FLAG_Z 1
64#define CC_FLAG_N 2
65#define CC_FLAG_C 4
66#define CC_FLAG_V 8
67
ef4bddc2 68static int cc_flags_for_mode(machine_mode);
bad41521 69static int cc_flags_for_code(enum rtx_code);
672a6f42 70\f
c5387660 71/* Implement TARGET_OPTION_OVERRIDE. */
c5387660
JM
72static void
73mn10300_option_override (void)
13dd556c
RS
74{
75 if (TARGET_AM33)
76 target_flags &= ~MASK_MULT_BUG;
f3f63737
NC
77 else
78 {
79 /* Disable scheduling for the MN10300 as we do
80 not have timing information available for it. */
81 flag_schedule_insns = 0;
82 flag_schedule_insns_after_reload = 0;
ec815d65
RH
83
84 /* Force enable splitting of wide types, as otherwise it is trivial
85 to run out of registers. Indeed, this works so well that register
86 allocation problems are now more common *without* optimization,
87 when this flag is not enabled by default. */
88 flag_split_wide_types = 1;
f3f63737 89 }
bad41521 90
f3f63737
NC
91 if (mn10300_tune_string)
92 {
93 if (strcasecmp (mn10300_tune_string, "mn10300") == 0)
94 mn10300_tune_cpu = PROCESSOR_MN10300;
95 else if (strcasecmp (mn10300_tune_string, "am33") == 0)
96 mn10300_tune_cpu = PROCESSOR_AM33;
97 else if (strcasecmp (mn10300_tune_string, "am33-2") == 0)
98 mn10300_tune_cpu = PROCESSOR_AM33_2;
99 else if (strcasecmp (mn10300_tune_string, "am34") == 0)
100 mn10300_tune_cpu = PROCESSOR_AM34;
101 else
102 error ("-mtune= expects mn10300, am33, am33-2, or am34");
103 }
13dd556c
RS
104}
105
1bc7c5b6 106static void
f1777882 107mn10300_file_start (void)
11bb1f11 108{
1bc7c5b6 109 default_file_start ();
705ac34f 110
18e9d2f9
AO
111 if (TARGET_AM33_2)
112 fprintf (asm_out_file, "\t.am33_2\n");
113 else if (TARGET_AM33)
1bc7c5b6 114 fprintf (asm_out_file, "\t.am33\n");
11bb1f11
JL
115}
116\f
298362c8
NC
117/* Note: This list must match the liw_op attribute in mn10300.md. */
118
119static const char *liw_op_names[] =
120{
121 "add", "cmp", "sub", "mov",
122 "and", "or", "xor",
123 "asr", "lsr", "asl",
124 "none", "max"
125};
126
11bb1f11
JL
127/* Print operand X using operand code CODE to assembly language output file
128 FILE. */
129
130void
e7ab5593 131mn10300_print_operand (FILE *file, rtx x, int code)
11bb1f11
JL
132{
133 switch (code)
134 {
298362c8
NC
135 case 'W':
136 {
137 unsigned int liw_op = UINTVAL (x);
bad41521 138
298362c8
NC
139 gcc_assert (TARGET_ALLOW_LIW);
140 gcc_assert (liw_op < LIW_OP_MAX);
141 fputs (liw_op_names[liw_op], file);
11bb1f11 142 break;
298362c8 143 }
bad41521 144
298362c8
NC
145 case 'b':
146 case 'B':
147 {
148 enum rtx_code cmp = GET_CODE (x);
ef4bddc2 149 machine_mode mode = GET_MODE (XEXP (x, 0));
298362c8
NC
150 const char *str;
151 int have_flags;
152
153 if (code == 'B')
154 cmp = reverse_condition (cmp);
155 have_flags = cc_flags_for_mode (mode);
5abc5de9 156
298362c8 157 switch (cmp)
18e9d2f9 158 {
298362c8
NC
159 case NE:
160 str = "ne";
18e9d2f9 161 break;
298362c8
NC
162 case EQ:
163 str = "eq";
164 break;
165 case GE:
166 /* bge is smaller than bnc. */
167 str = (have_flags & CC_FLAG_V ? "ge" : "nc");
168 break;
169 case LT:
170 str = (have_flags & CC_FLAG_V ? "lt" : "ns");
171 break;
172 case GT:
173 str = "gt";
174 break;
175 case LE:
176 str = "le";
177 break;
178 case GEU:
179 str = "cc";
180 break;
181 case GTU:
182 str = "hi";
183 break;
184 case LEU:
185 str = "ls";
186 break;
187 case LTU:
188 str = "cs";
189 break;
190 case ORDERED:
191 str = "lge";
192 break;
193 case UNORDERED:
194 str = "uo";
195 break;
196 case LTGT:
197 str = "lg";
198 break;
199 case UNEQ:
200 str = "ue";
201 break;
202 case UNGE:
203 str = "uge";
204 break;
205 case UNGT:
206 str = "ug";
207 break;
208 case UNLE:
209 str = "ule";
210 break;
211 case UNLT:
212 str = "ul";
18e9d2f9 213 break;
18e9d2f9 214 default:
dc759020 215 gcc_unreachable ();
18e9d2f9 216 }
298362c8
NC
217
218 gcc_checking_assert ((cc_flags_for_code (cmp) & ~have_flags) == 0);
219 fputs (str, file);
220 }
221 break;
222
223 case 'C':
224 /* This is used for the operand to a call instruction;
225 if it's a REG, enclose it in parens, else output
226 the operand normally. */
227 if (REG_P (x))
228 {
229 fputc ('(', file);
230 mn10300_print_operand (file, x, 0);
231 fputc (')', file);
232 }
233 else
234 mn10300_print_operand (file, x, 0);
235 break;
236
237 case 'D':
238 switch (GET_CODE (x))
239 {
240 case MEM:
241 fputc ('(', file);
242 output_address (XEXP (x, 0));
243 fputc (')', file);
244 break;
245
246 case REG:
247 fprintf (file, "fd%d", REGNO (x) - 18);
248 break;
249
250 default:
251 gcc_unreachable ();
252 }
253 break;
18e9d2f9 254
38c37a0e 255 /* These are the least significant word in a 64bit value. */
298362c8
NC
256 case 'L':
257 switch (GET_CODE (x))
258 {
259 case MEM:
260 fputc ('(', file);
261 output_address (XEXP (x, 0));
262 fputc (')', file);
263 break;
38c37a0e 264
298362c8
NC
265 case REG:
266 fprintf (file, "%s", reg_names[REGNO (x)]);
267 break;
38c37a0e 268
298362c8
NC
269 case SUBREG:
270 fprintf (file, "%s", reg_names[subreg_regno (x)]);
271 break;
38c37a0e 272
298362c8
NC
273 case CONST_DOUBLE:
274 {
275 long val[2];
38c37a0e 276
298362c8
NC
277 switch (GET_MODE (x))
278 {
279 case DFmode:
34a72c33
RS
280 REAL_VALUE_TO_TARGET_DOUBLE
281 (*CONST_DOUBLE_REAL_VALUE (x), val);
298362c8
NC
282 fprintf (file, "0x%lx", val[0]);
283 break;;
284 case SFmode:
34a72c33
RS
285 REAL_VALUE_TO_TARGET_SINGLE
286 (*CONST_DOUBLE_REAL_VALUE (x), val[0]);
298362c8
NC
287 fprintf (file, "0x%lx", val[0]);
288 break;;
289 case VOIDmode:
290 case DImode:
291 mn10300_print_operand_address (file,
292 GEN_INT (CONST_DOUBLE_LOW (x)));
293 break;
294 default:
38c37a0e
JL
295 break;
296 }
298362c8
NC
297 break;
298 }
38c37a0e 299
298362c8
NC
300 case CONST_INT:
301 {
302 rtx low, high;
303 split_double (x, &low, &high);
304 fprintf (file, "%ld", (long)INTVAL (low));
305 break;
212bc5fa 306 }
38c37a0e 307
298362c8
NC
308 default:
309 gcc_unreachable ();
310 }
311 break;
38c37a0e
JL
312
313 /* Similarly, but for the most significant word. */
298362c8
NC
314 case 'H':
315 switch (GET_CODE (x))
316 {
317 case MEM:
318 fputc ('(', file);
319 x = adjust_address (x, SImode, 4);
320 output_address (XEXP (x, 0));
321 fputc (')', file);
322 break;
38c37a0e 323
298362c8
NC
324 case REG:
325 fprintf (file, "%s", reg_names[REGNO (x) + 1]);
326 break;
38c37a0e 327
298362c8
NC
328 case SUBREG:
329 fprintf (file, "%s", reg_names[subreg_regno (x) + 1]);
330 break;
38c37a0e 331
298362c8
NC
332 case CONST_DOUBLE:
333 {
334 long val[2];
38c37a0e 335
298362c8
NC
336 switch (GET_MODE (x))
337 {
338 case DFmode:
34a72c33
RS
339 REAL_VALUE_TO_TARGET_DOUBLE
340 (*CONST_DOUBLE_REAL_VALUE (x), val);
298362c8
NC
341 fprintf (file, "0x%lx", val[1]);
342 break;;
343 case SFmode:
344 gcc_unreachable ();
345 case VOIDmode:
346 case DImode:
347 mn10300_print_operand_address (file,
348 GEN_INT (CONST_DOUBLE_HIGH (x)));
349 break;
350 default:
38c37a0e
JL
351 break;
352 }
298362c8
NC
353 break;
354 }
38c37a0e 355
298362c8
NC
356 case CONST_INT:
357 {
358 rtx low, high;
359 split_double (x, &low, &high);
360 fprintf (file, "%ld", (long)INTVAL (high));
361 break;
38c37a0e 362 }
38c37a0e 363
298362c8
NC
364 default:
365 gcc_unreachable ();
366 }
367 break;
38c37a0e 368
298362c8
NC
369 case 'A':
370 fputc ('(', file);
371 if (REG_P (XEXP (x, 0)))
372 output_address (gen_rtx_PLUS (SImode, XEXP (x, 0), const0_rtx));
373 else
374 output_address (XEXP (x, 0));
375 fputc (')', file);
376 break;
a58be199 377
298362c8
NC
378 case 'N':
379 gcc_assert (INTVAL (x) >= -128 && INTVAL (x) <= 255);
380 fprintf (file, "%d", (int)((~INTVAL (x)) & 0xff));
381 break;
382
383 case 'U':
384 gcc_assert (INTVAL (x) >= -128 && INTVAL (x) <= 255);
385 fprintf (file, "%d", (int)(INTVAL (x) & 0xff));
386 break;
6fafc523 387
576e5acc
JL
388 /* For shift counts. The hardware ignores the upper bits of
389 any immediate, but the assembler will flag an out of range
390 shift count as an error. So we mask off the high bits
391 of the immediate here. */
298362c8
NC
392 case 'S':
393 if (CONST_INT_P (x))
394 {
395 fprintf (file, "%d", (int)(INTVAL (x) & 0x1f));
396 break;
397 }
398 /* FALL THROUGH */
576e5acc 399
298362c8
NC
400 default:
401 switch (GET_CODE (x))
402 {
403 case MEM:
404 fputc ('(', file);
405 output_address (XEXP (x, 0));
406 fputc (')', file);
407 break;
11bb1f11 408
298362c8
NC
409 case PLUS:
410 output_address (x);
411 break;
38c37a0e 412
298362c8
NC
413 case REG:
414 fprintf (file, "%s", reg_names[REGNO (x)]);
415 break;
11bb1f11 416
298362c8
NC
417 case SUBREG:
418 fprintf (file, "%s", reg_names[subreg_regno (x)]);
419 break;
11bb1f11 420
38c37a0e 421 /* This will only be single precision.... */
298362c8
NC
422 case CONST_DOUBLE:
423 {
424 unsigned long val;
38c37a0e 425
34a72c33 426 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), val);
298362c8 427 fprintf (file, "0x%lx", val);
11bb1f11 428 break;
11bb1f11 429 }
298362c8
NC
430
431 case CONST_INT:
432 case SYMBOL_REF:
433 case CONST:
434 case LABEL_REF:
435 case CODE_LABEL:
436 case UNSPEC:
437 mn10300_print_operand_address (file, x);
438 break;
439 default:
440 gcc_unreachable ();
441 }
442 break;
443 }
11bb1f11
JL
444}
445
446/* Output assembly language output for the address ADDR to FILE. */
447
448void
e7ab5593 449mn10300_print_operand_address (FILE *file, rtx addr)
11bb1f11
JL
450{
451 switch (GET_CODE (addr))
452 {
705ac34f 453 case POST_INC:
36846b26 454 mn10300_print_operand (file, XEXP (addr, 0), 0);
705ac34f
JL
455 fputc ('+', file);
456 break;
36846b26
RH
457
458 case POST_MODIFY:
459 mn10300_print_operand (file, XEXP (addr, 0), 0);
460 fputc ('+', file);
461 fputc (',', file);
462 mn10300_print_operand (file, XEXP (addr, 1), 0);
463 break;
464
11bb1f11 465 case REG:
e7ab5593 466 mn10300_print_operand (file, addr, 0);
11bb1f11
JL
467 break;
468 case PLUS:
469 {
36846b26
RH
470 rtx base = XEXP (addr, 0);
471 rtx index = XEXP (addr, 1);
472
473 if (REG_P (index) && !REG_OK_FOR_INDEX_P (index))
474 {
475 rtx x = base;
476 base = index;
477 index = x;
478
479 gcc_assert (REG_P (index) && REG_OK_FOR_INDEX_P (index));
480 }
481 gcc_assert (REG_OK_FOR_BASE_P (base));
482
e7ab5593 483 mn10300_print_operand (file, index, 0);
11bb1f11 484 fputc (',', file);
36846b26 485 mn10300_print_operand (file, base, 0);
11bb1f11
JL
486 break;
487 }
488 case SYMBOL_REF:
489 output_addr_const (file, addr);
490 break;
491 default:
492 output_addr_const (file, addr);
493 break;
494 }
495}
496
535bd17c
AS
497/* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA.
498
499 Used for PIC-specific UNSPECs. */
500
501static bool
502mn10300_asm_output_addr_const_extra (FILE *file, rtx x)
503{
504 if (GET_CODE (x) == UNSPEC)
505 {
506 switch (XINT (x, 1))
507 {
535bd17c
AS
508 case UNSPEC_PIC:
509 /* GLOBAL_OFFSET_TABLE or local symbols, no suffix. */
510 output_addr_const (file, XVECEXP (x, 0, 0));
511 break;
512 case UNSPEC_GOT:
513 output_addr_const (file, XVECEXP (x, 0, 0));
514 fputs ("@GOT", file);
515 break;
516 case UNSPEC_GOTOFF:
517 output_addr_const (file, XVECEXP (x, 0, 0));
518 fputs ("@GOTOFF", file);
519 break;
520 case UNSPEC_PLT:
521 output_addr_const (file, XVECEXP (x, 0, 0));
522 fputs ("@PLT", file);
523 break;
524 case UNSPEC_GOTSYM_OFF:
525 assemble_name (file, GOT_SYMBOL_NAME);
526 fputs ("-(", file);
527 output_addr_const (file, XVECEXP (x, 0, 0));
528 fputs ("-.)", file);
529 break;
530 default:
531 return false;
532 }
533 return true;
534 }
535 else
536 return false;
537}
538
18e9d2f9
AO
539/* Count the number of FP registers that have to be saved. */
540static int
f1777882 541fp_regs_to_save (void)
18e9d2f9
AO
542{
543 int i, n = 0;
544
545 if (! TARGET_AM33_2)
546 return 0;
547
548 for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
9d54866d 549 if (df_regs_ever_live_p (i) && ! call_really_used_regs[i])
18e9d2f9
AO
550 ++n;
551
552 return n;
553}
554
f6cd7c62
RS
555/* Print a set of registers in the format required by "movm" and "ret".
556 Register K is saved if bit K of MASK is set. The data and address
557 registers can be stored individually, but the extended registers cannot.
9f5ed61a 558 We assume that the mask already takes that into account. For instance,
8596d0a1 559 bits 14 to 17 must have the same value. */
f6cd7c62
RS
560
561void
f1777882 562mn10300_print_reg_list (FILE *file, int mask)
f6cd7c62
RS
563{
564 int need_comma;
565 int i;
566
567 need_comma = 0;
568 fputc ('[', file);
569
570 for (i = 0; i < FIRST_EXTENDED_REGNUM; i++)
571 if ((mask & (1 << i)) != 0)
572 {
573 if (need_comma)
574 fputc (',', file);
575 fputs (reg_names [i], file);
576 need_comma = 1;
577 }
578
579 if ((mask & 0x3c000) != 0)
580 {
dc759020 581 gcc_assert ((mask & 0x3c000) == 0x3c000);
f6cd7c62
RS
582 if (need_comma)
583 fputc (',', file);
584 fputs ("exreg1", file);
585 need_comma = 1;
586 }
587
588 fputc (']', file);
589}
590
37a185d7
RH
591/* If the MDR register is never clobbered, we can use the RETF instruction
592 which takes the address from the MDR register. This is 3 cycles faster
593 than having to load the address from the stack. */
594
595bool
596mn10300_can_use_retf_insn (void)
597{
598 /* Don't bother if we're not optimizing. In this case we won't
599 have proper access to df_regs_ever_live_p. */
600 if (!optimize)
601 return false;
602
603 /* EH returns alter the saved return address; MDR is not current. */
604 if (crtl->calls_eh_return)
605 return false;
606
607 /* Obviously not if MDR is ever clobbered. */
608 if (df_regs_ever_live_p (MDR_REG))
609 return false;
610
611 /* ??? Careful not to use this during expand_epilogue etc. */
612 gcc_assert (!in_sequence_p ());
613 return leaf_function_p ();
614}
615
616bool
617mn10300_can_use_rets_insn (void)
38c37a0e 618{
040c5757 619 return !mn10300_initial_offset (ARG_POINTER_REGNUM, STACK_POINTER_REGNUM);
38c37a0e
JL
620}
621
f6cd7c62
RS
622/* Returns the set of live, callee-saved registers as a bitmask. The
623 callee-saved extended registers cannot be stored individually, so
20b2e6a0 624 all of them will be included in the mask if any one of them is used.
e902c266
NC
625 Also returns the number of bytes in the registers in the mask if
626 BYTES_SAVED is not NULL. */
f6cd7c62 627
e902c266
NC
628unsigned int
629mn10300_get_live_callee_saved_regs (unsigned int * bytes_saved)
f6cd7c62
RS
630{
631 int mask;
632 int i;
e902c266 633 unsigned int count;
f6cd7c62 634
e902c266 635 count = mask = 0;
18e9d2f9 636 for (i = 0; i <= LAST_EXTENDED_REGNUM; i++)
9d54866d 637 if (df_regs_ever_live_p (i) && ! call_really_used_regs[i])
e902c266
NC
638 {
639 mask |= (1 << i);
640 ++ count;
641 }
642
f6cd7c62 643 if ((mask & 0x3c000) != 0)
e902c266
NC
644 {
645 for (i = 0x04000; i < 0x40000; i <<= 1)
646 if ((mask & i) == 0)
647 ++ count;
648
649 mask |= 0x3c000;
650 }
651
652 if (bytes_saved)
653 * bytes_saved = count * UNITS_PER_WORD;
f6cd7c62
RS
654
655 return mask;
656}
657
2720cc47
NC
658static rtx
659F (rtx r)
660{
661 RTX_FRAME_RELATED_P (r) = 1;
662 return r;
663}
664
f6cd7c62
RS
665/* Generate an instruction that pushes several registers onto the stack.
666 Register K will be saved if bit K in MASK is set. The function does
667 nothing if MASK is zero.
668
669 To be compatible with the "movm" instruction, the lowest-numbered
670 register must be stored in the lowest slot. If MASK is the set
671 { R1,...,RN }, where R1...RN are ordered least first, the generated
672 instruction will have the form:
673
674 (parallel
675 (set (reg:SI 9) (plus:SI (reg:SI 9) (const_int -N*4)))
676 (set (mem:SI (plus:SI (reg:SI 9)
677 (const_int -1*4)))
678 (reg:SI RN))
679 ...
680 (set (mem:SI (plus:SI (reg:SI 9)
681 (const_int -N*4)))
682 (reg:SI R1))) */
683
cc909bba
RH
684static void
685mn10300_gen_multiple_store (unsigned int mask)
f6cd7c62 686{
cc909bba
RH
687 /* The order in which registers are stored, from SP-4 through SP-N*4. */
688 static const unsigned int store_order[8] = {
689 /* e2, e3: never saved */
690 FIRST_EXTENDED_REGNUM + 4,
691 FIRST_EXTENDED_REGNUM + 5,
692 FIRST_EXTENDED_REGNUM + 6,
693 FIRST_EXTENDED_REGNUM + 7,
694 /* e0, e1, mdrq, mcrh, mcrl, mcvf: never saved. */
695 FIRST_DATA_REGNUM + 2,
696 FIRST_DATA_REGNUM + 3,
697 FIRST_ADDRESS_REGNUM + 2,
698 FIRST_ADDRESS_REGNUM + 3,
699 /* d0, d1, a0, a1, mdr, lir, lar: never saved. */
700 };
701
702 rtx x, elts[9];
703 unsigned int i;
704 int count;
705
706 if (mask == 0)
707 return;
708
709 for (i = count = 0; i < ARRAY_SIZE(store_order); ++i)
f6cd7c62 710 {
cc909bba
RH
711 unsigned regno = store_order[i];
712
713 if (((mask >> regno) & 1) == 0)
714 continue;
f6cd7c62 715
cc909bba 716 ++count;
0a81f074 717 x = plus_constant (Pmode, stack_pointer_rtx, count * -4);
cc909bba 718 x = gen_frame_mem (SImode, x);
f7df4a84 719 x = gen_rtx_SET (x, gen_rtx_REG (SImode, regno));
cc909bba
RH
720 elts[count] = F(x);
721
722 /* Remove the register from the mask so that... */
723 mask &= ~(1u << regno);
f6cd7c62 724 }
cc909bba
RH
725
726 /* ... we can make sure that we didn't try to use a register
727 not listed in the store order. */
728 gcc_assert (mask == 0);
729
730 /* Create the instruction that updates the stack pointer. */
0a81f074 731 x = plus_constant (Pmode, stack_pointer_rtx, count * -4);
f7df4a84 732 x = gen_rtx_SET (stack_pointer_rtx, x);
cc909bba
RH
733 elts[0] = F(x);
734
735 /* We need one PARALLEL element to update the stack pointer and
736 an additional element for each register that is stored. */
737 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (count + 1, elts));
738 F (emit_insn (x));
f6cd7c62
RS
739}
740
3a22ad89
NC
741static inline unsigned int
742popcount (unsigned int mask)
743{
744 unsigned int count = 0;
745
746 while (mask)
747 {
748 ++ count;
749 mask &= ~ (mask & - mask);
750 }
751 return count;
752}
753
11bb1f11 754void
e7ab5593 755mn10300_expand_prologue (void)
11bb1f11 756{
040c5757 757 HOST_WIDE_INT size = mn10300_frame_size ();
3a22ad89 758 unsigned int mask;
11bb1f11 759
3a22ad89 760 mask = mn10300_get_live_callee_saved_regs (NULL);
8596d0a1 761 /* If we use any of the callee-saved registers, save them now. */
3a22ad89
NC
762 mn10300_gen_multiple_store (mask);
763
764 if (flag_stack_usage_info)
765 current_function_static_stack_size = size + popcount (mask) * 4;
777fbf09 766
18e9d2f9
AO
767 if (TARGET_AM33_2 && fp_regs_to_save ())
768 {
769 int num_regs_to_save = fp_regs_to_save (), i;
770 HOST_WIDE_INT xsize;
e7ab5593
NC
771 enum
772 {
773 save_sp_merge,
774 save_sp_no_merge,
775 save_sp_partial_merge,
776 save_a0_merge,
777 save_a0_no_merge
778 } strategy;
18e9d2f9
AO
779 unsigned int strategy_size = (unsigned)-1, this_strategy_size;
780 rtx reg;
18e9d2f9 781
3a22ad89
NC
782 if (flag_stack_usage_info)
783 current_function_static_stack_size += num_regs_to_save * 4;
784
18e9d2f9
AO
785 /* We have several different strategies to save FP registers.
786 We can store them using SP offsets, which is beneficial if
787 there are just a few registers to save, or we can use `a0' in
788 post-increment mode (`a0' is the only call-clobbered address
789 register that is never used to pass information to a
790 function). Furthermore, if we don't need a frame pointer, we
791 can merge the two SP adds into a single one, but this isn't
792 always beneficial; sometimes we can just split the two adds
793 so that we don't exceed a 16-bit constant size. The code
794 below will select which strategy to use, so as to generate
795 smallest code. Ties are broken in favor or shorter sequences
796 (in terms of number of instructions). */
797
798#define SIZE_ADD_AX(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
799 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 2)
800#define SIZE_ADD_SP(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
801 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 3)
f4a88680
JR
802
803/* We add 0 * (S) in two places to promote to the type of S,
804 so that all arms of the conditional have the same type. */
18e9d2f9 805#define SIZE_FMOV_LIMIT(S,N,L,SIZE1,SIZE2,ELSE) \
f4a88680 806 (((S) >= (L)) ? 0 * (S) + (SIZE1) * (N) \
18e9d2f9
AO
807 : ((S) + 4 * (N) >= (L)) ? (((L) - (S)) / 4 * (SIZE2) \
808 + ((S) + 4 * (N) - (L)) / 4 * (SIZE1)) \
f4a88680 809 : 0 * (S) + (ELSE))
18e9d2f9
AO
810#define SIZE_FMOV_SP_(S,N) \
811 (SIZE_FMOV_LIMIT ((S), (N), (1 << 24), 7, 6, \
812 SIZE_FMOV_LIMIT ((S), (N), (1 << 8), 6, 4, \
813 (S) ? 4 * (N) : 3 + 4 * ((N) - 1))))
814#define SIZE_FMOV_SP(S,N) (SIZE_FMOV_SP_ ((unsigned HOST_WIDE_INT)(S), (N)))
815
816 /* Consider alternative save_sp_merge only if we don't need the
4375e090 817 frame pointer and size is nonzero. */
18e9d2f9
AO
818 if (! frame_pointer_needed && size)
819 {
820 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
821 this_strategy_size = SIZE_ADD_SP (-(size + 4 * num_regs_to_save));
822 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
823 this_strategy_size += SIZE_FMOV_SP (size, num_regs_to_save);
824
825 if (this_strategy_size < strategy_size)
826 {
827 strategy = save_sp_merge;
828 strategy_size = this_strategy_size;
829 }
830 }
831
832 /* Consider alternative save_sp_no_merge unconditionally. */
833 /* Insn: add -4 * num_regs_to_save, sp. */
834 this_strategy_size = SIZE_ADD_SP (-4 * num_regs_to_save);
835 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
836 this_strategy_size += SIZE_FMOV_SP (0, num_regs_to_save);
837 if (size)
838 {
839 /* Insn: add -size, sp. */
840 this_strategy_size += SIZE_ADD_SP (-size);
841 }
842
843 if (this_strategy_size < strategy_size)
844 {
845 strategy = save_sp_no_merge;
846 strategy_size = this_strategy_size;
847 }
848
849 /* Consider alternative save_sp_partial_merge only if we don't
850 need a frame pointer and size is reasonably large. */
851 if (! frame_pointer_needed && size + 4 * num_regs_to_save > 128)
852 {
853 /* Insn: add -128, sp. */
854 this_strategy_size = SIZE_ADD_SP (-128);
855 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
856 this_strategy_size += SIZE_FMOV_SP (128 - 4 * num_regs_to_save,
857 num_regs_to_save);
858 if (size)
859 {
860 /* Insn: add 128-size, sp. */
861 this_strategy_size += SIZE_ADD_SP (128 - size);
862 }
863
864 if (this_strategy_size < strategy_size)
865 {
866 strategy = save_sp_partial_merge;
867 strategy_size = this_strategy_size;
868 }
869 }
870
871 /* Consider alternative save_a0_merge only if we don't need a
4375e090 872 frame pointer, size is nonzero and the user hasn't
18e9d2f9
AO
873 changed the calling conventions of a0. */
874 if (! frame_pointer_needed && size
9d54866d 875 && call_really_used_regs [FIRST_ADDRESS_REGNUM]
18e9d2f9
AO
876 && ! fixed_regs[FIRST_ADDRESS_REGNUM])
877 {
878 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
879 this_strategy_size = SIZE_ADD_SP (-(size + 4 * num_regs_to_save));
880 /* Insn: mov sp, a0. */
881 this_strategy_size++;
882 if (size)
883 {
884 /* Insn: add size, a0. */
885 this_strategy_size += SIZE_ADD_AX (size);
886 }
887 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
888 this_strategy_size += 3 * num_regs_to_save;
889
890 if (this_strategy_size < strategy_size)
891 {
892 strategy = save_a0_merge;
893 strategy_size = this_strategy_size;
894 }
895 }
896
897 /* Consider alternative save_a0_no_merge if the user hasn't
8596d0a1 898 changed the calling conventions of a0. */
9d54866d 899 if (call_really_used_regs [FIRST_ADDRESS_REGNUM]
18e9d2f9
AO
900 && ! fixed_regs[FIRST_ADDRESS_REGNUM])
901 {
902 /* Insn: add -4 * num_regs_to_save, sp. */
903 this_strategy_size = SIZE_ADD_SP (-4 * num_regs_to_save);
904 /* Insn: mov sp, a0. */
905 this_strategy_size++;
906 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
907 this_strategy_size += 3 * num_regs_to_save;
908 if (size)
909 {
910 /* Insn: add -size, sp. */
911 this_strategy_size += SIZE_ADD_SP (-size);
912 }
913
914 if (this_strategy_size < strategy_size)
915 {
916 strategy = save_a0_no_merge;
917 strategy_size = this_strategy_size;
918 }
919 }
920
921 /* Emit the initial SP add, common to all strategies. */
922 switch (strategy)
923 {
924 case save_sp_no_merge:
925 case save_a0_no_merge:
2720cc47
NC
926 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
927 stack_pointer_rtx,
928 GEN_INT (-4 * num_regs_to_save))));
18e9d2f9
AO
929 xsize = 0;
930 break;
931
932 case save_sp_partial_merge:
2720cc47
NC
933 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
934 stack_pointer_rtx,
935 GEN_INT (-128))));
18e9d2f9
AO
936 xsize = 128 - 4 * num_regs_to_save;
937 size -= xsize;
938 break;
939
940 case save_sp_merge:
941 case save_a0_merge:
2720cc47
NC
942 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
943 stack_pointer_rtx,
944 GEN_INT (-(size + 4 * num_regs_to_save)))));
18e9d2f9 945 /* We'll have to adjust FP register saves according to the
8596d0a1 946 frame size. */
18e9d2f9
AO
947 xsize = size;
948 /* Since we've already created the stack frame, don't do it
8596d0a1 949 again at the end of the function. */
18e9d2f9
AO
950 size = 0;
951 break;
952
953 default:
dc759020 954 gcc_unreachable ();
18e9d2f9 955 }
5abc5de9 956
18e9d2f9
AO
957 /* Now prepare register a0, if we have decided to use it. */
958 switch (strategy)
959 {
960 case save_sp_merge:
961 case save_sp_no_merge:
962 case save_sp_partial_merge:
963 reg = 0;
964 break;
965
966 case save_a0_merge:
967 case save_a0_no_merge:
968 reg = gen_rtx_REG (SImode, FIRST_ADDRESS_REGNUM);
2720cc47 969 F (emit_insn (gen_movsi (reg, stack_pointer_rtx)));
18e9d2f9 970 if (xsize)
2720cc47 971 F (emit_insn (gen_addsi3 (reg, reg, GEN_INT (xsize))));
18e9d2f9
AO
972 reg = gen_rtx_POST_INC (SImode, reg);
973 break;
5abc5de9 974
18e9d2f9 975 default:
dc759020 976 gcc_unreachable ();
18e9d2f9 977 }
5abc5de9 978
18e9d2f9
AO
979 /* Now actually save the FP registers. */
980 for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
9d54866d 981 if (df_regs_ever_live_p (i) && ! call_really_used_regs [i])
18e9d2f9
AO
982 {
983 rtx addr;
984
985 if (reg)
986 addr = reg;
987 else
988 {
989 /* If we aren't using `a0', use an SP offset. */
990 if (xsize)
991 {
992 addr = gen_rtx_PLUS (SImode,
993 stack_pointer_rtx,
994 GEN_INT (xsize));
995 }
996 else
997 addr = stack_pointer_rtx;
5abc5de9 998
18e9d2f9
AO
999 xsize += 4;
1000 }
1001
2720cc47
NC
1002 F (emit_insn (gen_movsf (gen_rtx_MEM (SFmode, addr),
1003 gen_rtx_REG (SFmode, i))));
18e9d2f9
AO
1004 }
1005 }
1006
777fbf09 1007 /* Now put the frame pointer into the frame pointer register. */
11bb1f11 1008 if (frame_pointer_needed)
2720cc47 1009 F (emit_move_insn (frame_pointer_rtx, stack_pointer_rtx));
11bb1f11 1010
777fbf09 1011 /* Allocate stack for this frame. */
11bb1f11 1012 if (size)
2720cc47
NC
1013 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
1014 stack_pointer_rtx,
1015 GEN_INT (-size))));
1016
6fb5fa3c 1017 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
040c5757 1018 emit_insn (gen_load_pic ());
11bb1f11
JL
1019}
1020
1021void
e7ab5593 1022mn10300_expand_epilogue (void)
11bb1f11 1023{
040c5757 1024 HOST_WIDE_INT size = mn10300_frame_size ();
e902c266
NC
1025 unsigned int reg_save_bytes;
1026
1027 mn10300_get_live_callee_saved_regs (& reg_save_bytes);
1028
18e9d2f9
AO
1029 if (TARGET_AM33_2 && fp_regs_to_save ())
1030 {
1031 int num_regs_to_save = fp_regs_to_save (), i;
1032 rtx reg = 0;
1033
1034 /* We have several options to restore FP registers. We could
1035 load them from SP offsets, but, if there are enough FP
1036 registers to restore, we win if we use a post-increment
1037 addressing mode. */
1038
1039 /* If we have a frame pointer, it's the best option, because we
1040 already know it has the value we want. */
1041 if (frame_pointer_needed)
1042 reg = gen_rtx_REG (SImode, FRAME_POINTER_REGNUM);
1043 /* Otherwise, we may use `a1', since it's call-clobbered and
1044 it's never used for return values. But only do so if it's
1045 smaller than using SP offsets. */
1046 else
1047 {
1048 enum { restore_sp_post_adjust,
1049 restore_sp_pre_adjust,
1050 restore_sp_partial_adjust,
1051 restore_a1 } strategy;
1052 unsigned int this_strategy_size, strategy_size = (unsigned)-1;
1053
1054 /* Consider using sp offsets before adjusting sp. */
1055 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1056 this_strategy_size = SIZE_FMOV_SP (size, num_regs_to_save);
1057 /* If size is too large, we'll have to adjust SP with an
1058 add. */
37a185d7 1059 if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
18e9d2f9
AO
1060 {
1061 /* Insn: add size + 4 * num_regs_to_save, sp. */
1062 this_strategy_size += SIZE_ADD_SP (size + 4 * num_regs_to_save);
1063 }
1064 /* If we don't have to restore any non-FP registers,
1065 we'll be able to save one byte by using rets. */
37a185d7 1066 if (! reg_save_bytes)
18e9d2f9
AO
1067 this_strategy_size--;
1068
1069 if (this_strategy_size < strategy_size)
1070 {
1071 strategy = restore_sp_post_adjust;
1072 strategy_size = this_strategy_size;
1073 }
1074
1075 /* Consider using sp offsets after adjusting sp. */
1076 /* Insn: add size, sp. */
1077 this_strategy_size = SIZE_ADD_SP (size);
1078 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1079 this_strategy_size += SIZE_FMOV_SP (0, num_regs_to_save);
1080 /* We're going to use ret to release the FP registers
8596d0a1 1081 save area, so, no savings. */
18e9d2f9
AO
1082
1083 if (this_strategy_size < strategy_size)
1084 {
1085 strategy = restore_sp_pre_adjust;
1086 strategy_size = this_strategy_size;
1087 }
1088
1089 /* Consider using sp offsets after partially adjusting sp.
1090 When size is close to 32Kb, we may be able to adjust SP
1091 with an imm16 add instruction while still using fmov
1092 (d8,sp). */
37a185d7 1093 if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
18e9d2f9
AO
1094 {
1095 /* Insn: add size + 4 * num_regs_to_save
37a185d7 1096 + reg_save_bytes - 252,sp. */
18e9d2f9 1097 this_strategy_size = SIZE_ADD_SP (size + 4 * num_regs_to_save
c81369fa 1098 + (int) reg_save_bytes - 252);
18e9d2f9 1099 /* Insn: fmov (##,sp),fs#, fo each fs# to be restored. */
37a185d7 1100 this_strategy_size += SIZE_FMOV_SP (252 - reg_save_bytes
18e9d2f9
AO
1101 - 4 * num_regs_to_save,
1102 num_regs_to_save);
1103 /* We're going to use ret to release the FP registers
8596d0a1 1104 save area, so, no savings. */
18e9d2f9
AO
1105
1106 if (this_strategy_size < strategy_size)
1107 {
1108 strategy = restore_sp_partial_adjust;
1109 strategy_size = this_strategy_size;
1110 }
1111 }
1112
1113 /* Consider using a1 in post-increment mode, as long as the
1114 user hasn't changed the calling conventions of a1. */
9d54866d 1115 if (call_really_used_regs [FIRST_ADDRESS_REGNUM + 1]
18e9d2f9
AO
1116 && ! fixed_regs[FIRST_ADDRESS_REGNUM+1])
1117 {
1118 /* Insn: mov sp,a1. */
1119 this_strategy_size = 1;
1120 if (size)
1121 {
1122 /* Insn: add size,a1. */
1123 this_strategy_size += SIZE_ADD_AX (size);
1124 }
1125 /* Insn: fmov (a1+),fs#, for each fs# to be restored. */
1126 this_strategy_size += 3 * num_regs_to_save;
1127 /* If size is large enough, we may be able to save a
1128 couple of bytes. */
37a185d7 1129 if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
18e9d2f9
AO
1130 {
1131 /* Insn: mov a1,sp. */
1132 this_strategy_size += 2;
1133 }
1134 /* If we don't have to restore any non-FP registers,
1135 we'll be able to save one byte by using rets. */
37a185d7 1136 if (! reg_save_bytes)
18e9d2f9
AO
1137 this_strategy_size--;
1138
1139 if (this_strategy_size < strategy_size)
1140 {
1141 strategy = restore_a1;
1142 strategy_size = this_strategy_size;
1143 }
1144 }
1145
1146 switch (strategy)
1147 {
1148 case restore_sp_post_adjust:
1149 break;
1150
1151 case restore_sp_pre_adjust:
1152 emit_insn (gen_addsi3 (stack_pointer_rtx,
1153 stack_pointer_rtx,
1154 GEN_INT (size)));
1155 size = 0;
1156 break;
1157
1158 case restore_sp_partial_adjust:
1159 emit_insn (gen_addsi3 (stack_pointer_rtx,
1160 stack_pointer_rtx,
1161 GEN_INT (size + 4 * num_regs_to_save
37a185d7
RH
1162 + reg_save_bytes - 252)));
1163 size = 252 - reg_save_bytes - 4 * num_regs_to_save;
18e9d2f9 1164 break;
5abc5de9 1165
18e9d2f9
AO
1166 case restore_a1:
1167 reg = gen_rtx_REG (SImode, FIRST_ADDRESS_REGNUM + 1);
1168 emit_insn (gen_movsi (reg, stack_pointer_rtx));
1169 if (size)
1170 emit_insn (gen_addsi3 (reg, reg, GEN_INT (size)));
1171 break;
1172
1173 default:
dc759020 1174 gcc_unreachable ();
18e9d2f9
AO
1175 }
1176 }
1177
1178 /* Adjust the selected register, if any, for post-increment. */
1179 if (reg)
1180 reg = gen_rtx_POST_INC (SImode, reg);
1181
1182 for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
9d54866d 1183 if (df_regs_ever_live_p (i) && ! call_really_used_regs [i])
18e9d2f9
AO
1184 {
1185 rtx addr;
5abc5de9 1186
18e9d2f9
AO
1187 if (reg)
1188 addr = reg;
1189 else if (size)
1190 {
1191 /* If we aren't using a post-increment register, use an
8596d0a1 1192 SP offset. */
18e9d2f9
AO
1193 addr = gen_rtx_PLUS (SImode,
1194 stack_pointer_rtx,
1195 GEN_INT (size));
1196 }
1197 else
1198 addr = stack_pointer_rtx;
1199
1200 size += 4;
1201
2720cc47
NC
1202 emit_insn (gen_movsf (gen_rtx_REG (SFmode, i),
1203 gen_rtx_MEM (SFmode, addr)));
18e9d2f9
AO
1204 }
1205
1206 /* If we were using the restore_a1 strategy and the number of
1207 bytes to be released won't fit in the `ret' byte, copy `a1'
1208 to `sp', to avoid having to use `add' to adjust it. */
37a185d7 1209 if (! frame_pointer_needed && reg && size + reg_save_bytes > 255)
18e9d2f9
AO
1210 {
1211 emit_move_insn (stack_pointer_rtx, XEXP (reg, 0));
1212 size = 0;
1213 }
1214 }
1215
5d29a95f
JL
1216 /* Maybe cut back the stack, except for the register save area.
1217
1218 If the frame pointer exists, then use the frame pointer to
1219 cut back the stack.
1220
1221 If the stack size + register save area is more than 255 bytes,
1222 then the stack must be cut back here since the size + register
5abc5de9 1223 save size is too big for a ret/retf instruction.
5d29a95f
JL
1224
1225 Else leave it alone, it will be cut back as part of the
1226 ret/retf instruction, or there wasn't any stack to begin with.
1227
dab66575 1228 Under no circumstances should the register save area be
5d29a95f
JL
1229 deallocated here, that would leave a window where an interrupt
1230 could occur and trash the register save area. */
11bb1f11
JL
1231 if (frame_pointer_needed)
1232 {
11bb1f11 1233 emit_move_insn (stack_pointer_rtx, frame_pointer_rtx);
4246e0c5
JL
1234 size = 0;
1235 }
37a185d7 1236 else if (size + reg_save_bytes > 255)
4246e0c5
JL
1237 {
1238 emit_insn (gen_addsi3 (stack_pointer_rtx,
1239 stack_pointer_rtx,
1240 GEN_INT (size)));
1241 size = 0;
11bb1f11 1242 }
11bb1f11 1243
ed6089d6 1244 /* Adjust the stack and restore callee-saved registers, if any. */
37a185d7 1245 if (mn10300_can_use_rets_insn ())
3810076b 1246 emit_jump_insn (ret_rtx);
777fbf09 1247 else
e902c266 1248 emit_jump_insn (gen_return_ret (GEN_INT (size + reg_save_bytes)));
11bb1f11
JL
1249}
1250
05713b80 1251/* Recognize the PARALLEL rtx generated by mn10300_gen_multiple_store().
f6cd7c62
RS
1252 This function is for MATCH_PARALLEL and so assumes OP is known to be
1253 parallel. If OP is a multiple store, return a mask indicating which
1254 registers it saves. Return 0 otherwise. */
1255
c345a0b1
NC
1256unsigned int
1257mn10300_store_multiple_regs (rtx op)
f6cd7c62
RS
1258{
1259 int count;
1260 int mask;
1261 int i;
1262 unsigned int last;
1263 rtx elt;
1264
1265 count = XVECLEN (op, 0);
1266 if (count < 2)
1267 return 0;
1268
1269 /* Check that first instruction has the form (set (sp) (plus A B)) */
1270 elt = XVECEXP (op, 0, 0);
1271 if (GET_CODE (elt) != SET
e7ab5593 1272 || (! REG_P (SET_DEST (elt)))
f6cd7c62
RS
1273 || REGNO (SET_DEST (elt)) != STACK_POINTER_REGNUM
1274 || GET_CODE (SET_SRC (elt)) != PLUS)
1275 return 0;
1276
1277 /* Check that A is the stack pointer and B is the expected stack size.
1278 For OP to match, each subsequent instruction should push a word onto
1279 the stack. We therefore expect the first instruction to create
8596d0a1 1280 COUNT-1 stack slots. */
f6cd7c62 1281 elt = SET_SRC (elt);
e7ab5593 1282 if ((! REG_P (XEXP (elt, 0)))
f6cd7c62 1283 || REGNO (XEXP (elt, 0)) != STACK_POINTER_REGNUM
e7ab5593 1284 || (! CONST_INT_P (XEXP (elt, 1)))
f6cd7c62
RS
1285 || INTVAL (XEXP (elt, 1)) != -(count - 1) * 4)
1286 return 0;
1287
f6cd7c62
RS
1288 mask = 0;
1289 for (i = 1; i < count; i++)
1290 {
cc909bba
RH
1291 /* Check that element i is a (set (mem M) R). */
1292 /* ??? Validate the register order a-la mn10300_gen_multiple_store.
1293 Remember: the ordering is *not* monotonic. */
f6cd7c62
RS
1294 elt = XVECEXP (op, 0, i);
1295 if (GET_CODE (elt) != SET
e7ab5593 1296 || (! MEM_P (SET_DEST (elt)))
cc909bba 1297 || (! REG_P (SET_SRC (elt))))
f6cd7c62
RS
1298 return 0;
1299
cc909bba 1300 /* Remember which registers are to be saved. */
f6cd7c62
RS
1301 last = REGNO (SET_SRC (elt));
1302 mask |= (1 << last);
1303
1304 /* Check that M has the form (plus (sp) (const_int -I*4)) */
1305 elt = XEXP (SET_DEST (elt), 0);
1306 if (GET_CODE (elt) != PLUS
e7ab5593 1307 || (! REG_P (XEXP (elt, 0)))
f6cd7c62 1308 || REGNO (XEXP (elt, 0)) != STACK_POINTER_REGNUM
e7ab5593 1309 || (! CONST_INT_P (XEXP (elt, 1)))
f6cd7c62
RS
1310 || INTVAL (XEXP (elt, 1)) != -i * 4)
1311 return 0;
1312 }
1313
8596d0a1 1314 /* All or none of the callee-saved extended registers must be in the set. */
f6cd7c62
RS
1315 if ((mask & 0x3c000) != 0
1316 && (mask & 0x3c000) != 0x3c000)
1317 return 0;
1318
1319 return mask;
1320}
1321
f2831cc9
AS
1322/* Implement TARGET_PREFERRED_RELOAD_CLASS. */
1323
1324static reg_class_t
1325mn10300_preferred_reload_class (rtx x, reg_class_t rclass)
1326{
1327 if (x == stack_pointer_rtx && rclass != SP_REGS)
8b119bb6 1328 return (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
f2831cc9
AS
1329 else if (MEM_P (x)
1330 || (REG_P (x)
1331 && !HARD_REGISTER_P (x))
1332 || (GET_CODE (x) == SUBREG
1333 && REG_P (SUBREG_REG (x))
1334 && !HARD_REGISTER_P (SUBREG_REG (x))))
1335 return LIMIT_RELOAD_CLASS (GET_MODE (x), rclass);
1336 else
1337 return rclass;
1338}
1339
1340/* Implement TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
1341
1342static reg_class_t
1343mn10300_preferred_output_reload_class (rtx x, reg_class_t rclass)
1344{
1345 if (x == stack_pointer_rtx && rclass != SP_REGS)
8b119bb6 1346 return (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
f2831cc9
AS
1347 return rclass;
1348}
1349
8b119bb6 1350/* Implement TARGET_SECONDARY_RELOAD. */
e7ab5593 1351
8b119bb6
RH
1352static reg_class_t
1353mn10300_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
ef4bddc2 1354 machine_mode mode, secondary_reload_info *sri)
11bb1f11 1355{
8b119bb6
RH
1356 enum reg_class rclass = (enum reg_class) rclass_i;
1357 enum reg_class xclass = NO_REGS;
1358 unsigned int xregno = INVALID_REGNUM;
1359
1360 if (REG_P (x))
4d1a91c2 1361 {
8b119bb6
RH
1362 xregno = REGNO (x);
1363 if (xregno >= FIRST_PSEUDO_REGISTER)
1364 xregno = true_regnum (x);
1365 if (xregno != INVALID_REGNUM)
1366 xclass = REGNO_REG_CLASS (xregno);
1367 }
1368
1369 if (!TARGET_AM33)
1370 {
1371 /* Memory load/stores less than a full word wide can't have an
1372 address or stack pointer destination. They must use a data
1373 register as an intermediate register. */
1374 if (rclass != DATA_REGS
1375 && (mode == QImode || mode == HImode)
1376 && xclass == NO_REGS)
1377 return DATA_REGS;
1378
1379 /* We can only move SP to/from an address register. */
1380 if (in_p
1381 && rclass == SP_REGS
1382 && xclass != ADDRESS_REGS)
1383 return ADDRESS_REGS;
1384 if (!in_p
1385 && xclass == SP_REGS
1386 && rclass != ADDRESS_REGS
1387 && rclass != SP_OR_ADDRESS_REGS)
1388 return ADDRESS_REGS;
4d1a91c2 1389 }
11bb1f11 1390
8b119bb6
RH
1391 /* We can't directly load sp + const_int into a register;
1392 we must use an address register as an scratch. */
1393 if (in_p
1394 && rclass != SP_REGS
0a2aaacc 1395 && rclass != SP_OR_ADDRESS_REGS
36846b26 1396 && rclass != SP_OR_GENERAL_REGS
8b119bb6
RH
1397 && GET_CODE (x) == PLUS
1398 && (XEXP (x, 0) == stack_pointer_rtx
1399 || XEXP (x, 1) == stack_pointer_rtx))
1400 {
1401 sri->icode = CODE_FOR_reload_plus_sp_const;
1402 return NO_REGS;
1403 }
11bb1f11 1404
c25a21f5
RH
1405 /* We can only move MDR to/from a data register. */
1406 if (rclass == MDR_REGS && xclass != DATA_REGS)
1407 return DATA_REGS;
1408 if (xclass == MDR_REGS && rclass != DATA_REGS)
1409 return DATA_REGS;
1410
8b119bb6 1411 /* We can't load/store an FP register from a constant address. */
6528281d 1412 if (TARGET_AM33_2
8b119bb6
RH
1413 && (rclass == FP_REGS || xclass == FP_REGS)
1414 && (xclass == NO_REGS || rclass == NO_REGS))
18e9d2f9 1415 {
8b119bb6
RH
1416 rtx addr = NULL;
1417
1418 if (xregno >= FIRST_PSEUDO_REGISTER && xregno != INVALID_REGNUM)
1419 {
f2034d06 1420 addr = reg_equiv_mem (xregno);
8b119bb6
RH
1421 if (addr)
1422 addr = XEXP (addr, 0);
1423 }
1424 else if (MEM_P (x))
1425 addr = XEXP (x, 0);
6528281d 1426
8b119bb6 1427 if (addr && CONSTANT_ADDRESS_P (addr))
36846b26 1428 return GENERAL_REGS;
18e9d2f9 1429 }
777fbf09
JL
1430 /* Otherwise assume no secondary reloads are needed. */
1431 return NO_REGS;
1432}
1433
040c5757
RH
1434int
1435mn10300_frame_size (void)
1436{
1437 /* size includes the fixed stack space needed for function calls. */
1438 int size = get_frame_size () + crtl->outgoing_args_size;
1439
1440 /* And space for the return pointer. */
1441 size += crtl->outgoing_args_size ? 4 : 0;
1442
1443 return size;
1444}
1445
777fbf09 1446int
e7ab5593 1447mn10300_initial_offset (int from, int to)
777fbf09 1448{
040c5757
RH
1449 int diff = 0;
1450
1451 gcc_assert (from == ARG_POINTER_REGNUM || from == FRAME_POINTER_REGNUM);
1452 gcc_assert (to == FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM);
1453
1454 if (to == STACK_POINTER_REGNUM)
1455 diff = mn10300_frame_size ();
1456
3dbc43d1
JL
1457 /* The difference between the argument pointer and the frame pointer
1458 is the size of the callee register save area. */
040c5757 1459 if (from == ARG_POINTER_REGNUM)
11bb1f11 1460 {
e902c266
NC
1461 unsigned int reg_save_bytes;
1462
1463 mn10300_get_live_callee_saved_regs (& reg_save_bytes);
1464 diff += reg_save_bytes;
040c5757 1465 diff += 4 * fp_regs_to_save ();
11bb1f11
JL
1466 }
1467
040c5757 1468 return diff;
11bb1f11 1469}
22ef4e9b 1470
bd5bd7ac
KH
1471/* Worker function for TARGET_RETURN_IN_MEMORY. */
1472
9024ea92 1473static bool
586de218 1474mn10300_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
9024ea92
KH
1475{
1476 /* Return values > 8 bytes in length in memory. */
b1eb8119
DD
1477 return (int_size_in_bytes (type) > 8
1478 || int_size_in_bytes (type) == 0
1479 || TYPE_MODE (type) == BLKmode);
9024ea92
KH
1480}
1481
22ef4e9b
JL
1482/* Flush the argument registers to the stack for a stdarg function;
1483 return the new argument pointer. */
9024ea92 1484static rtx
f1777882 1485mn10300_builtin_saveregs (void)
22ef4e9b 1486{
fc2acc87 1487 rtx offset, mem;
22ef4e9b 1488 tree fntype = TREE_TYPE (current_function_decl);
f38958e8 1489 int argadj = ((!stdarg_p (fntype))
22ef4e9b 1490 ? UNITS_PER_WORD : 0);
4862826d 1491 alias_set_type set = get_varargs_alias_set ();
22ef4e9b
JL
1492
1493 if (argadj)
0a81f074 1494 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, argadj);
22ef4e9b 1495 else
38173d38 1496 offset = crtl->args.arg_offset_rtx;
22ef4e9b 1497
38173d38 1498 mem = gen_rtx_MEM (SImode, crtl->args.internal_arg_pointer);
ba4828e0 1499 set_mem_alias_set (mem, set);
fc2acc87
RH
1500 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
1501
1502 mem = gen_rtx_MEM (SImode,
0a81f074
RS
1503 plus_constant (Pmode,
1504 crtl->args.internal_arg_pointer, 4));
ba4828e0 1505 set_mem_alias_set (mem, set);
fc2acc87
RH
1506 emit_move_insn (mem, gen_rtx_REG (SImode, 1));
1507
22ef4e9b 1508 return copy_to_reg (expand_binop (Pmode, add_optab,
38173d38 1509 crtl->args.internal_arg_pointer,
22ef4e9b
JL
1510 offset, 0, 0, OPTAB_LIB_WIDEN));
1511}
1512
d7bd8aeb 1513static void
f1777882 1514mn10300_va_start (tree valist, rtx nextarg)
fc2acc87 1515{
6c535c69 1516 nextarg = expand_builtin_saveregs ();
e5faf155 1517 std_expand_builtin_va_start (valist, nextarg);
fc2acc87
RH
1518}
1519
8cd5a4e0
RH
1520/* Return true when a parameter should be passed by reference. */
1521
1522static bool
d5cc9181 1523mn10300_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
ef4bddc2 1524 machine_mode mode, const_tree type,
8cd5a4e0
RH
1525 bool named ATTRIBUTE_UNUSED)
1526{
1527 unsigned HOST_WIDE_INT size;
1528
1529 if (type)
1530 size = int_size_in_bytes (type);
1531 else
1532 size = GET_MODE_SIZE (mode);
1533
b1eb8119 1534 return (size > 8 || size == 0);
8cd5a4e0
RH
1535}
1536
22ef4e9b 1537/* Return an RTX to represent where a value with mode MODE will be returned
990dc016 1538 from a function. If the result is NULL_RTX, the argument is pushed. */
22ef4e9b 1539
ce236858 1540static rtx
ef4bddc2 1541mn10300_function_arg (cumulative_args_t cum_v, machine_mode mode,
ce236858 1542 const_tree type, bool named ATTRIBUTE_UNUSED)
22ef4e9b 1543{
d5cc9181 1544 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
990dc016 1545 rtx result = NULL_RTX;
f4a88680 1546 int size;
22ef4e9b
JL
1547
1548 /* We only support using 2 data registers as argument registers. */
1549 int nregs = 2;
1550
1551 /* Figure out the size of the object to be passed. */
1552 if (mode == BLKmode)
1553 size = int_size_in_bytes (type);
1554 else
1555 size = GET_MODE_SIZE (mode);
1556
22ef4e9b
JL
1557 cum->nbytes = (cum->nbytes + 3) & ~3;
1558
1559 /* Don't pass this arg via a register if all the argument registers
1560 are used up. */
1561 if (cum->nbytes > nregs * UNITS_PER_WORD)
990dc016 1562 return result;
22ef4e9b
JL
1563
1564 /* Don't pass this arg via a register if it would be split between
1565 registers and memory. */
1566 if (type == NULL_TREE
1567 && cum->nbytes + size > nregs * UNITS_PER_WORD)
990dc016 1568 return result;
22ef4e9b
JL
1569
1570 switch (cum->nbytes / UNITS_PER_WORD)
1571 {
1572 case 0:
990dc016 1573 result = gen_rtx_REG (mode, FIRST_ARGUMENT_REGNUM);
22ef4e9b
JL
1574 break;
1575 case 1:
990dc016 1576 result = gen_rtx_REG (mode, FIRST_ARGUMENT_REGNUM + 1);
22ef4e9b
JL
1577 break;
1578 default:
990dc016 1579 break;
22ef4e9b
JL
1580 }
1581
1582 return result;
1583}
1584
ce236858
NF
1585/* Update the data in CUM to advance over an argument
1586 of mode MODE and data type TYPE.
1587 (TYPE is null for libcalls where that information may not be available.) */
1588
1589static void
ef4bddc2 1590mn10300_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
ce236858
NF
1591 const_tree type, bool named ATTRIBUTE_UNUSED)
1592{
d5cc9181
JR
1593 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1594
ce236858
NF
1595 cum->nbytes += (mode != BLKmode
1596 ? (GET_MODE_SIZE (mode) + 3) & ~3
1597 : (int_size_in_bytes (type) + 3) & ~3);
1598}
1599
78a52f11
RH
1600/* Return the number of bytes of registers to use for an argument passed
1601 partially in registers and partially in memory. */
22ef4e9b 1602
78a52f11 1603static int
ef4bddc2 1604mn10300_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
78a52f11 1605 tree type, bool named ATTRIBUTE_UNUSED)
22ef4e9b 1606{
d5cc9181 1607 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
f4a88680 1608 int size;
22ef4e9b
JL
1609
1610 /* We only support using 2 data registers as argument registers. */
1611 int nregs = 2;
1612
1613 /* Figure out the size of the object to be passed. */
1614 if (mode == BLKmode)
1615 size = int_size_in_bytes (type);
1616 else
1617 size = GET_MODE_SIZE (mode);
1618
22ef4e9b
JL
1619 cum->nbytes = (cum->nbytes + 3) & ~3;
1620
1621 /* Don't pass this arg via a register if all the argument registers
1622 are used up. */
1623 if (cum->nbytes > nregs * UNITS_PER_WORD)
1624 return 0;
1625
1626 if (cum->nbytes + size <= nregs * UNITS_PER_WORD)
1627 return 0;
1628
1629 /* Don't pass this arg via a register if it would be split between
1630 registers and memory. */
1631 if (type == NULL_TREE
1632 && cum->nbytes + size > nregs * UNITS_PER_WORD)
1633 return 0;
1634
78a52f11 1635 return nregs * UNITS_PER_WORD - cum->nbytes;
22ef4e9b
JL
1636}
1637
b1eb8119
DD
1638/* Return the location of the function's value. This will be either
1639 $d0 for integer functions, $a0 for pointers, or a PARALLEL of both
1640 $d0 and $a0 if the -mreturn-pointer-on-do flag is set. Note that
1641 we only return the PARALLEL for outgoing values; we do not want
1642 callers relying on this extra copy. */
1643
34732b0a
AS
1644static rtx
1645mn10300_function_value (const_tree valtype,
1646 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1647 bool outgoing)
b1eb8119
DD
1648{
1649 rtx rv;
ef4bddc2 1650 machine_mode mode = TYPE_MODE (valtype);
b1eb8119
DD
1651
1652 if (! POINTER_TYPE_P (valtype))
1653 return gen_rtx_REG (mode, FIRST_DATA_REGNUM);
1654 else if (! TARGET_PTR_A0D0 || ! outgoing
e3b5732b 1655 || cfun->returns_struct)
b1eb8119
DD
1656 return gen_rtx_REG (mode, FIRST_ADDRESS_REGNUM);
1657
1658 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (2));
1659 XVECEXP (rv, 0, 0)
1660 = gen_rtx_EXPR_LIST (VOIDmode,
1661 gen_rtx_REG (mode, FIRST_ADDRESS_REGNUM),
1662 GEN_INT (0));
5abc5de9 1663
b1eb8119
DD
1664 XVECEXP (rv, 0, 1)
1665 = gen_rtx_EXPR_LIST (VOIDmode,
1666 gen_rtx_REG (mode, FIRST_DATA_REGNUM),
1667 GEN_INT (0));
1668 return rv;
1669}
1670
34732b0a
AS
1671/* Implements TARGET_LIBCALL_VALUE. */
1672
1673static rtx
ef4bddc2 1674mn10300_libcall_value (machine_mode mode,
34732b0a
AS
1675 const_rtx fun ATTRIBUTE_UNUSED)
1676{
1677 return gen_rtx_REG (mode, FIRST_DATA_REGNUM);
1678}
1679
1680/* Implements FUNCTION_VALUE_REGNO_P. */
1681
1682bool
1683mn10300_function_value_regno_p (const unsigned int regno)
1684{
1685 return (regno == FIRST_DATA_REGNUM || regno == FIRST_ADDRESS_REGNUM);
1686}
1687
bad41521 1688/* Output an addition operation. */
4af476d7 1689
1943c2c1 1690const char *
bad41521 1691mn10300_output_add (rtx operands[3], bool need_flags)
22ef4e9b 1692{
bad41521
RH
1693 rtx dest, src1, src2;
1694 unsigned int dest_regnum, src1_regnum, src2_regnum;
1695 enum reg_class src1_class, src2_class, dest_class;
22ef4e9b 1696
bad41521
RH
1697 dest = operands[0];
1698 src1 = operands[1];
1699 src2 = operands[2];
22ef4e9b 1700
bad41521
RH
1701 dest_regnum = true_regnum (dest);
1702 src1_regnum = true_regnum (src1);
22ef4e9b 1703
bad41521
RH
1704 dest_class = REGNO_REG_CLASS (dest_regnum);
1705 src1_class = REGNO_REG_CLASS (src1_regnum);
22ef4e9b 1706
298362c8 1707 if (CONST_INT_P (src2))
bad41521
RH
1708 {
1709 gcc_assert (dest_regnum == src1_regnum);
22ef4e9b 1710
bad41521
RH
1711 if (src2 == const1_rtx && !need_flags)
1712 return "inc %0";
1713 if (INTVAL (src2) == 4 && !need_flags && dest_class != DATA_REGS)
1714 return "inc4 %0";
705ac34f 1715
bad41521
RH
1716 gcc_assert (!need_flags || dest_class != SP_REGS);
1717 return "add %2,%0";
1718 }
1719 else if (CONSTANT_P (src2))
1720 return "add %2,%0";
1721
1722 src2_regnum = true_regnum (src2);
1723 src2_class = REGNO_REG_CLASS (src2_regnum);
1724
1725 if (dest_regnum == src1_regnum)
1726 return "add %2,%0";
1727 if (dest_regnum == src2_regnum)
1728 return "add %1,%0";
1729
1730 /* The rest of the cases are reg = reg+reg. For AM33, we can implement
1731 this directly, as below, but when optimizing for space we can sometimes
1732 do better by using a mov+add. For MN103, we claimed that we could
1733 implement a three-operand add because the various move and add insns
1734 change sizes across register classes, and we can often do better than
1735 reload in choosing which operand to move. */
1736 if (TARGET_AM33 && optimize_insn_for_speed_p ())
1737 return "add %2,%1,%0";
1738
1739 /* Catch cases where no extended register was used. */
1740 if (src1_class != EXTENDED_REGS
1741 && src2_class != EXTENDED_REGS
1742 && dest_class != EXTENDED_REGS)
1743 {
1744 /* We have to copy one of the sources into the destination, then
1745 add the other source to the destination.
1746
1747 Carefully select which source to copy to the destination; a
1748 naive implementation will waste a byte when the source classes
1749 are different and the destination is an address register.
1750 Selecting the lowest cost register copy will optimize this
1751 sequence. */
1752 if (src1_class == dest_class)
1753 return "mov %1,%0\n\tadd %2,%0";
1754 else
1755 return "mov %2,%0\n\tadd %1,%0";
1756 }
705ac34f 1757
bad41521 1758 /* At least one register is an extended register. */
22ef4e9b 1759
bad41521
RH
1760 /* The three operand add instruction on the am33 is a win iff the
1761 output register is an extended register, or if both source
1762 registers are extended registers. */
1763 if (dest_class == EXTENDED_REGS || src1_class == src2_class)
1764 return "add %2,%1,%0";
1765
1766 /* It is better to copy one of the sources to the destination, then
1767 perform a 2 address add. The destination in this case must be
1768 an address or data register and one of the sources must be an
1769 extended register and the remaining source must not be an extended
1770 register.
1771
1772 The best code for this case is to copy the extended reg to the
1773 destination, then emit a two address add. */
1774 if (src1_class == EXTENDED_REGS)
1775 return "mov %1,%0\n\tadd %2,%0";
1776 else
1777 return "mov %2,%0\n\tadd %1,%0";
22ef4e9b 1778}
460f4b9d 1779
e9ad4573
JL
1780/* Return 1 if X contains a symbolic expression. We know these
1781 expressions will have one of a few well defined forms, so
1782 we need only check those forms. */
e7ab5593 1783
e9ad4573 1784int
e7ab5593 1785mn10300_symbolic_operand (rtx op,
ef4bddc2 1786 machine_mode mode ATTRIBUTE_UNUSED)
e9ad4573
JL
1787{
1788 switch (GET_CODE (op))
1789 {
1790 case SYMBOL_REF:
1791 case LABEL_REF:
1792 return 1;
1793 case CONST:
1794 op = XEXP (op, 0);
1795 return ((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
1796 || GET_CODE (XEXP (op, 0)) == LABEL_REF)
f3f63737 1797 && CONST_INT_P (XEXP (op, 1)));
e9ad4573
JL
1798 default:
1799 return 0;
1800 }
1801}
1802
1803/* Try machine dependent ways of modifying an illegitimate address
1804 to be legitimate. If we find one, return the new valid address.
1805 This macro is used in only one place: `memory_address' in explow.c.
1806
1807 OLDX is the address as it was before break_out_memory_refs was called.
1808 In some cases it is useful to look at this to decide what needs to be done.
1809
e9ad4573
JL
1810 Normally it is always safe for this macro to do nothing. It exists to
1811 recognize opportunities to optimize the output.
1812
1813 But on a few ports with segmented architectures and indexed addressing
1814 (mn10300, hppa) it is used to rewrite certain problematical addresses. */
e7ab5593 1815
4af476d7 1816static rtx
506d7b68 1817mn10300_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
ef4bddc2 1818 machine_mode mode ATTRIBUTE_UNUSED)
e9ad4573 1819{
e7ab5593
NC
1820 if (flag_pic && ! mn10300_legitimate_pic_operand_p (x))
1821 x = mn10300_legitimize_pic_address (oldx, NULL_RTX);
d1776069 1822
e9ad4573
JL
1823 /* Uh-oh. We might have an address for x[n-100000]. This needs
1824 special handling to avoid creating an indexed memory address
1825 with x-100000 as the base. */
1826 if (GET_CODE (x) == PLUS
e7ab5593 1827 && mn10300_symbolic_operand (XEXP (x, 1), VOIDmode))
e9ad4573
JL
1828 {
1829 /* Ugly. We modify things here so that the address offset specified
1830 by the index expression is computed first, then added to x to form
1831 the entire address. */
1832
69bc71fa 1833 rtx regx1, regy1, regy2, y;
e9ad4573
JL
1834
1835 /* Strip off any CONST. */
1836 y = XEXP (x, 1);
1837 if (GET_CODE (y) == CONST)
1838 y = XEXP (y, 0);
1839
bf4219f0
JL
1840 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1841 {
1842 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1843 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1844 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1845 regx1 = force_reg (Pmode,
e7ab5593
NC
1846 gen_rtx_fmt_ee (GET_CODE (y), Pmode, regx1,
1847 regy2));
c5c76735 1848 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
bf4219f0 1849 }
e9ad4573 1850 }
371036e0 1851 return x;
e9ad4573 1852}
460ad325 1853
d1776069 1854/* Convert a non-PIC address in `orig' to a PIC address using @GOT or
8596d0a1 1855 @GOTOFF in `reg'. */
e7ab5593 1856
d1776069 1857rtx
e7ab5593 1858mn10300_legitimize_pic_address (rtx orig, rtx reg)
d1776069 1859{
53855940
RH
1860 rtx x;
1861
d1776069
AO
1862 if (GET_CODE (orig) == LABEL_REF
1863 || (GET_CODE (orig) == SYMBOL_REF
1864 && (CONSTANT_POOL_ADDRESS_P (orig)
1865 || ! MN10300_GLOBAL_P (orig))))
1866 {
53855940 1867 if (reg == NULL)
d1776069
AO
1868 reg = gen_reg_rtx (Pmode);
1869
53855940
RH
1870 x = gen_rtx_UNSPEC (SImode, gen_rtvec (1, orig), UNSPEC_GOTOFF);
1871 x = gen_rtx_CONST (SImode, x);
1872 emit_move_insn (reg, x);
1873
1874 x = emit_insn (gen_addsi3 (reg, reg, pic_offset_table_rtx));
d1776069
AO
1875 }
1876 else if (GET_CODE (orig) == SYMBOL_REF)
1877 {
53855940 1878 if (reg == NULL)
d1776069
AO
1879 reg = gen_reg_rtx (Pmode);
1880
53855940
RH
1881 x = gen_rtx_UNSPEC (SImode, gen_rtvec (1, orig), UNSPEC_GOT);
1882 x = gen_rtx_CONST (SImode, x);
1883 x = gen_rtx_PLUS (SImode, pic_offset_table_rtx, x);
1884 x = gen_const_mem (SImode, x);
1885
1886 x = emit_move_insn (reg, x);
d1776069 1887 }
53855940
RH
1888 else
1889 return orig;
1890
1891 set_unique_reg_note (x, REG_EQUAL, orig);
1892 return reg;
d1776069
AO
1893}
1894
1895/* Return zero if X references a SYMBOL_REF or LABEL_REF whose symbol
4375e090 1896 isn't protected by a PIC unspec; nonzero otherwise. */
e7ab5593 1897
d1776069 1898int
e7ab5593 1899mn10300_legitimate_pic_operand_p (rtx x)
d1776069 1900{
e7ab5593
NC
1901 const char *fmt;
1902 int i;
d1776069
AO
1903
1904 if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1905 return 0;
1906
1907 if (GET_CODE (x) == UNSPEC
1908 && (XINT (x, 1) == UNSPEC_PIC
1909 || XINT (x, 1) == UNSPEC_GOT
1910 || XINT (x, 1) == UNSPEC_GOTOFF
d4e2d7d2
RS
1911 || XINT (x, 1) == UNSPEC_PLT
1912 || XINT (x, 1) == UNSPEC_GOTSYM_OFF))
d1776069
AO
1913 return 1;
1914
d1776069
AO
1915 fmt = GET_RTX_FORMAT (GET_CODE (x));
1916 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
1917 {
1918 if (fmt[i] == 'E')
1919 {
4af476d7 1920 int j;
d1776069
AO
1921
1922 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
e7ab5593 1923 if (! mn10300_legitimate_pic_operand_p (XVECEXP (x, i, j)))
d1776069
AO
1924 return 0;
1925 }
e7ab5593
NC
1926 else if (fmt[i] == 'e'
1927 && ! mn10300_legitimate_pic_operand_p (XEXP (x, i)))
d1776069
AO
1928 return 0;
1929 }
1930
1931 return 1;
1932}
1933
e733134f 1934/* Return TRUE if the address X, taken from a (MEM:MODE X) rtx, is
c6c3dba9
PB
1935 legitimate, and FALSE otherwise.
1936
1937 On the mn10300, the value in the address register must be
1938 in the same memory space/segment as the effective address.
1939
1940 This is problematical for reload since it does not understand
1941 that base+index != index+base in a memory reference.
1942
1943 Note it is still possible to use reg+reg addressing modes,
1944 it's just much more difficult. For a discussion of a possible
1945 workaround and solution, see the comments in pa.c before the
1946 function record_unscaled_index_insn_codes. */
1947
4af476d7 1948static bool
ef4bddc2 1949mn10300_legitimate_address_p (machine_mode mode, rtx x, bool strict)
e733134f 1950{
36846b26
RH
1951 rtx base, index;
1952
1953 if (CONSTANT_ADDRESS_P (x))
1954 return !flag_pic || mn10300_legitimate_pic_operand_p (x);
e733134f
AO
1955
1956 if (RTX_OK_FOR_BASE_P (x, strict))
36846b26
RH
1957 return true;
1958
1959 if (TARGET_AM33 && (mode == SImode || mode == SFmode || mode == HImode))
1960 {
1961 if (GET_CODE (x) == POST_INC)
1962 return RTX_OK_FOR_BASE_P (XEXP (x, 0), strict);
1963 if (GET_CODE (x) == POST_MODIFY)
1964 return (RTX_OK_FOR_BASE_P (XEXP (x, 0), strict)
1965 && CONSTANT_ADDRESS_P (XEXP (x, 1)));
1966 }
1967
1968 if (GET_CODE (x) != PLUS)
1969 return false;
e733134f 1970
36846b26
RH
1971 base = XEXP (x, 0);
1972 index = XEXP (x, 1);
e733134f 1973
36846b26
RH
1974 if (!REG_P (base))
1975 return false;
1976 if (REG_P (index))
e733134f 1977 {
36846b26
RH
1978 /* ??? Without AM33 generalized (Ri,Rn) addressing, reg+reg
1979 addressing is hard to satisfy. */
1980 if (!TARGET_AM33)
1981 return false;
e733134f 1982
36846b26
RH
1983 return (REGNO_GENERAL_P (REGNO (base), strict)
1984 && REGNO_GENERAL_P (REGNO (index), strict));
1985 }
e733134f 1986
36846b26
RH
1987 if (!REGNO_STRICT_OK_FOR_BASE_P (REGNO (base), strict))
1988 return false;
e733134f 1989
36846b26
RH
1990 if (CONST_INT_P (index))
1991 return IN_RANGE (INTVAL (index), -1 - 0x7fffffff, 0x7fffffff);
1992
1993 if (CONSTANT_ADDRESS_P (index))
1994 return !flag_pic || mn10300_legitimate_pic_operand_p (index);
1995
1996 return false;
1997}
1998
1999bool
2000mn10300_regno_in_class_p (unsigned regno, int rclass, bool strict)
2001{
2002 if (regno >= FIRST_PSEUDO_REGISTER)
2003 {
2004 if (!strict)
2005 return true;
2006 if (!reg_renumber)
2007 return false;
2008 regno = reg_renumber[regno];
ba4ec0e0
NC
2009 if (regno == INVALID_REGNUM)
2010 return false;
36846b26
RH
2011 }
2012 return TEST_HARD_REG_BIT (reg_class_contents[rclass], regno);
2013}
2014
2015rtx
2016mn10300_legitimize_reload_address (rtx x,
ef4bddc2 2017 machine_mode mode ATTRIBUTE_UNUSED,
36846b26
RH
2018 int opnum, int type,
2019 int ind_levels ATTRIBUTE_UNUSED)
2020{
2021 bool any_change = false;
2022
2023 /* See above re disabling reg+reg addressing for MN103. */
2024 if (!TARGET_AM33)
2025 return NULL_RTX;
2026
2027 if (GET_CODE (x) != PLUS)
2028 return NULL_RTX;
2029
2030 if (XEXP (x, 0) == stack_pointer_rtx)
2031 {
2032 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
2033 GENERAL_REGS, GET_MODE (x), VOIDmode, 0, 0,
2034 opnum, (enum reload_type) type);
2035 any_change = true;
2036 }
2037 if (XEXP (x, 1) == stack_pointer_rtx)
2038 {
2039 push_reload (XEXP (x, 1), NULL_RTX, &XEXP (x, 1), NULL,
2040 GENERAL_REGS, GET_MODE (x), VOIDmode, 0, 0,
2041 opnum, (enum reload_type) type);
2042 any_change = true;
e733134f
AO
2043 }
2044
36846b26 2045 return any_change ? x : NULL_RTX;
e733134f
AO
2046}
2047
1a627b35 2048/* Implement TARGET_LEGITIMATE_CONSTANT_P. Returns TRUE if X is a valid
4af476d7
NC
2049 constant. Note that some "constants" aren't valid, such as TLS
2050 symbols and unconverted GOT-based references, so we eliminate
2051 those here. */
2052
1a627b35 2053static bool
ef4bddc2 2054mn10300_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
4af476d7
NC
2055{
2056 switch (GET_CODE (x))
2057 {
2058 case CONST:
2059 x = XEXP (x, 0);
2060
2061 if (GET_CODE (x) == PLUS)
2062 {
e7ab5593 2063 if (! CONST_INT_P (XEXP (x, 1)))
4af476d7
NC
2064 return false;
2065 x = XEXP (x, 0);
2066 }
2067
2068 /* Only some unspecs are valid as "constants". */
2069 if (GET_CODE (x) == UNSPEC)
2070 {
4af476d7
NC
2071 switch (XINT (x, 1))
2072 {
4af476d7
NC
2073 case UNSPEC_PIC:
2074 case UNSPEC_GOT:
2075 case UNSPEC_GOTOFF:
2076 case UNSPEC_PLT:
2077 return true;
2078 default:
2079 return false;
2080 }
2081 }
2082
2083 /* We must have drilled down to a symbol. */
e7ab5593 2084 if (! mn10300_symbolic_operand (x, Pmode))
4af476d7
NC
2085 return false;
2086 break;
2087
2088 default:
2089 break;
2090 }
2091
2092 return true;
2093}
2094
126b1483
RH
2095/* Undo pic address legitimization for the benefit of debug info. */
2096
2097static rtx
2098mn10300_delegitimize_address (rtx orig_x)
2099{
2100 rtx x = orig_x, ret, addend = NULL;
2101 bool need_mem;
2102
2103 if (MEM_P (x))
2104 x = XEXP (x, 0);
2105 if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
2106 return orig_x;
2107
2108 if (XEXP (x, 0) == pic_offset_table_rtx)
2109 ;
2110 /* With the REG+REG addressing of AM33, var-tracking can re-assemble
2111 some odd-looking "addresses" that were never valid in the first place.
2112 We need to look harder to avoid warnings being emitted. */
2113 else if (GET_CODE (XEXP (x, 0)) == PLUS)
2114 {
2115 rtx x0 = XEXP (x, 0);
2116 rtx x00 = XEXP (x0, 0);
2117 rtx x01 = XEXP (x0, 1);
2118
2119 if (x00 == pic_offset_table_rtx)
2120 addend = x01;
2121 else if (x01 == pic_offset_table_rtx)
2122 addend = x00;
2123 else
2124 return orig_x;
2125
2126 }
2127 else
2128 return orig_x;
2129 x = XEXP (x, 1);
2130
2131 if (GET_CODE (x) != CONST)
2132 return orig_x;
2133 x = XEXP (x, 0);
2134 if (GET_CODE (x) != UNSPEC)
2135 return orig_x;
2136
2137 ret = XVECEXP (x, 0, 0);
2138 if (XINT (x, 1) == UNSPEC_GOTOFF)
2139 need_mem = false;
2140 else if (XINT (x, 1) == UNSPEC_GOT)
2141 need_mem = true;
2142 else
2143 return orig_x;
2144
2145 gcc_assert (GET_CODE (ret) == SYMBOL_REF);
2146 if (need_mem != MEM_P (orig_x))
2147 return orig_x;
2148 if (need_mem && addend)
2149 return orig_x;
2150 if (addend)
2151 ret = gen_rtx_PLUS (Pmode, addend, ret);
2152 return ret;
2153}
2154
72d6e3c5
RH
2155/* For addresses, costs are relative to "MOV (Rm),Rn". For AM33 this is
2156 the 3-byte fully general instruction; for MN103 this is the 2-byte form
2157 with an address register. */
2158
dcefdf67 2159static int
ef4bddc2 2160mn10300_address_cost (rtx x, machine_mode mode ATTRIBUTE_UNUSED,
b413068c 2161 addr_space_t as ATTRIBUTE_UNUSED, bool speed)
460ad325 2162{
72d6e3c5
RH
2163 HOST_WIDE_INT i;
2164 rtx base, index;
2165
460ad325
AO
2166 switch (GET_CODE (x))
2167 {
72d6e3c5
RH
2168 case CONST:
2169 case SYMBOL_REF:
2170 case LABEL_REF:
2171 /* We assume all of these require a 32-bit constant, even though
2172 some symbol and label references can be relaxed. */
2173 return speed ? 1 : 4;
2174
460ad325 2175 case REG:
72d6e3c5
RH
2176 case SUBREG:
2177 case POST_INC:
2178 return 0;
2179
2180 case POST_MODIFY:
2181 /* Assume any symbolic offset is a 32-bit constant. */
2182 i = (CONST_INT_P (XEXP (x, 1)) ? INTVAL (XEXP (x, 1)) : 0x12345678);
2183 if (IN_RANGE (i, -128, 127))
2184 return speed ? 0 : 1;
2185 if (speed)
2186 return 1;
2187 if (IN_RANGE (i, -0x800000, 0x7fffff))
2188 return 3;
2189 return 4;
2190
2191 case PLUS:
2192 base = XEXP (x, 0);
2193 index = XEXP (x, 1);
2194 if (register_operand (index, SImode))
460ad325 2195 {
72d6e3c5
RH
2196 /* Attempt to minimize the number of registers in the address.
2197 This is similar to what other ports do. */
2198 if (register_operand (base, SImode))
2199 return 1;
460ad325 2200
72d6e3c5
RH
2201 base = XEXP (x, 1);
2202 index = XEXP (x, 0);
2203 }
460ad325 2204
72d6e3c5
RH
2205 /* Assume any symbolic offset is a 32-bit constant. */
2206 i = (CONST_INT_P (XEXP (x, 1)) ? INTVAL (XEXP (x, 1)) : 0x12345678);
2207 if (IN_RANGE (i, -128, 127))
2208 return speed ? 0 : 1;
2209 if (IN_RANGE (i, -32768, 32767))
2210 return speed ? 0 : 2;
2211 return speed ? 2 : 6;
460ad325 2212
72d6e3c5 2213 default:
e548c9df 2214 return rtx_cost (x, Pmode, MEM, 0, speed);
72d6e3c5
RH
2215 }
2216}
460ad325 2217
72d6e3c5 2218/* Implement the TARGET_REGISTER_MOVE_COST hook.
460ad325 2219
72d6e3c5
RH
2220 Recall that the base value of 2 is required by assumptions elsewhere
2221 in the body of the compiler, and that cost 2 is special-cased as an
2222 early exit from reload meaning no work is required. */
460ad325 2223
72d6e3c5 2224static int
ef4bddc2 2225mn10300_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
72d6e3c5
RH
2226 reg_class_t ifrom, reg_class_t ito)
2227{
2228 enum reg_class from = (enum reg_class) ifrom;
2229 enum reg_class to = (enum reg_class) ito;
2230 enum reg_class scratch, test;
2231
2232 /* Simplify the following code by unifying the fp register classes. */
2233 if (to == FP_ACC_REGS)
2234 to = FP_REGS;
2235 if (from == FP_ACC_REGS)
2236 from = FP_REGS;
2237
2238 /* Diagnose invalid moves by costing them as two moves. */
2239
2240 scratch = NO_REGS;
2241 test = from;
2242 if (to == SP_REGS)
2243 scratch = (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
c25a21f5
RH
2244 else if (to == MDR_REGS)
2245 scratch = DATA_REGS;
72d6e3c5
RH
2246 else if (to == FP_REGS && to != from)
2247 scratch = GENERAL_REGS;
2248 else
2249 {
2250 test = to;
2251 if (from == SP_REGS)
2252 scratch = (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
c25a21f5
RH
2253 else if (from == MDR_REGS)
2254 scratch = DATA_REGS;
72d6e3c5
RH
2255 else if (from == FP_REGS && to != from)
2256 scratch = GENERAL_REGS;
2257 }
2258 if (scratch != NO_REGS && !reg_class_subset_p (test, scratch))
2259 return (mn10300_register_move_cost (VOIDmode, from, scratch)
2260 + mn10300_register_move_cost (VOIDmode, scratch, to));
460ad325 2261
72d6e3c5 2262 /* From here on, all we need consider are legal combinations. */
460ad325 2263
72d6e3c5
RH
2264 if (optimize_size)
2265 {
2266 /* The scale here is bytes * 2. */
460ad325 2267
72d6e3c5
RH
2268 if (from == to && (to == ADDRESS_REGS || to == DATA_REGS))
2269 return 2;
460ad325 2270
72d6e3c5
RH
2271 if (from == SP_REGS)
2272 return (to == ADDRESS_REGS ? 2 : 6);
2273
2274 /* For MN103, all remaining legal moves are two bytes. */
2275 if (TARGET_AM33)
2276 return 4;
2277
2278 if (to == SP_REGS)
2279 return (from == ADDRESS_REGS ? 4 : 6);
2280
2281 if ((from == ADDRESS_REGS || from == DATA_REGS)
2282 && (to == ADDRESS_REGS || to == DATA_REGS))
2283 return 4;
2284
2285 if (to == EXTENDED_REGS)
2286 return (to == from ? 6 : 4);
460ad325 2287
72d6e3c5
RH
2288 /* What's left are SP_REGS, FP_REGS, or combinations of the above. */
2289 return 6;
2290 }
2291 else
2292 {
2293 /* The scale here is cycles * 2. */
2294
2295 if (to == FP_REGS)
2296 return 8;
2297 if (from == FP_REGS)
2298 return 4;
2299
2300 /* All legal moves between integral registers are single cycle. */
2301 return 2;
460ad325
AO
2302 }
2303}
3c50106f 2304
72d6e3c5
RH
2305/* Implement the TARGET_MEMORY_MOVE_COST hook.
2306
2307 Given lack of the form of the address, this must be speed-relative,
2308 though we should never be less expensive than a size-relative register
2309 move cost above. This is not a problem. */
2310
dcefdf67 2311static int
ef4bddc2 2312mn10300_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
72d6e3c5 2313 reg_class_t iclass, bool in ATTRIBUTE_UNUSED)
dcefdf67 2314{
72d6e3c5
RH
2315 enum reg_class rclass = (enum reg_class) iclass;
2316
2317 if (rclass == FP_REGS)
2318 return 8;
2319 return 6;
dcefdf67
RH
2320}
2321
72d6e3c5
RH
2322/* Implement the TARGET_RTX_COSTS hook.
2323
2324 Speed-relative costs are relative to COSTS_N_INSNS, which is intended
2325 to represent cycles. Size-relative costs are in bytes. */
2326
3c50106f 2327static bool
e548c9df
AM
2328mn10300_rtx_costs (rtx x, machine_mode mode, int outer_code,
2329 int opno ATTRIBUTE_UNUSED, int *ptotal, bool speed)
3c50106f 2330{
72d6e3c5
RH
2331 /* This value is used for SYMBOL_REF etc where we want to pretend
2332 we have a full 32-bit constant. */
2333 HOST_WIDE_INT i = 0x12345678;
2334 int total;
e548c9df 2335 int code = GET_CODE (x);
72d6e3c5 2336
3c50106f
RH
2337 switch (code)
2338 {
2339 case CONST_INT:
72d6e3c5
RH
2340 i = INTVAL (x);
2341 do_int_costs:
2342 if (speed)
2343 {
2344 if (outer_code == SET)
2345 {
2346 /* 16-bit integer loads have latency 1, 32-bit loads 2. */
2347 if (IN_RANGE (i, -32768, 32767))
2348 total = COSTS_N_INSNS (1);
2349 else
2350 total = COSTS_N_INSNS (2);
2351 }
2352 else
2353 {
2354 /* 16-bit integer operands don't affect latency;
2355 24-bit and 32-bit operands add a cycle. */
2356 if (IN_RANGE (i, -32768, 32767))
2357 total = 0;
2358 else
2359 total = COSTS_N_INSNS (1);
2360 }
2361 }
3c50106f 2362 else
72d6e3c5
RH
2363 {
2364 if (outer_code == SET)
2365 {
2366 if (i == 0)
2367 total = 1;
2368 else if (IN_RANGE (i, -128, 127))
2369 total = 2;
2370 else if (IN_RANGE (i, -32768, 32767))
2371 total = 3;
2372 else
2373 total = 6;
2374 }
2375 else
2376 {
2377 /* Reference here is ADD An,Dn, vs ADD imm,Dn. */
2378 if (IN_RANGE (i, -128, 127))
2379 total = 0;
2380 else if (IN_RANGE (i, -32768, 32767))
2381 total = 2;
2382 else if (TARGET_AM33 && IN_RANGE (i, -0x01000000, 0x00ffffff))
2383 total = 3;
2384 else
2385 total = 4;
2386 }
2387 }
2388 goto alldone;
3c50106f
RH
2389
2390 case CONST:
2391 case LABEL_REF:
2392 case SYMBOL_REF:
3c50106f 2393 case CONST_DOUBLE:
72d6e3c5
RH
2394 /* We assume all of these require a 32-bit constant, even though
2395 some symbol and label references can be relaxed. */
2396 goto do_int_costs;
f90b7a5a 2397
72d6e3c5
RH
2398 case UNSPEC:
2399 switch (XINT (x, 1))
2400 {
2401 case UNSPEC_PIC:
2402 case UNSPEC_GOT:
2403 case UNSPEC_GOTOFF:
2404 case UNSPEC_PLT:
2405 case UNSPEC_GOTSYM_OFF:
2406 /* The PIC unspecs also resolve to a 32-bit constant. */
2407 goto do_int_costs;
3c50106f 2408
72d6e3c5
RH
2409 default:
2410 /* Assume any non-listed unspec is some sort of arithmetic. */
2411 goto do_arith_costs;
2412 }
fe7496dd 2413
72d6e3c5
RH
2414 case PLUS:
2415 /* Notice the size difference of INC and INC4. */
2416 if (!speed && outer_code == SET && CONST_INT_P (XEXP (x, 1)))
2417 {
2418 i = INTVAL (XEXP (x, 1));
2419 if (i == 1 || i == 4)
2420 {
e548c9df 2421 total = 1 + rtx_cost (XEXP (x, 0), mode, PLUS, 0, speed);
72d6e3c5
RH
2422 goto alldone;
2423 }
2424 }
2425 goto do_arith_costs;
2426
2427 case MINUS:
2428 case AND:
2429 case IOR:
2430 case XOR:
2431 case NOT:
2432 case NEG:
2433 case ZERO_EXTEND:
2434 case SIGN_EXTEND:
2435 case COMPARE:
2436 case BSWAP:
2437 case CLZ:
2438 do_arith_costs:
2439 total = (speed ? COSTS_N_INSNS (1) : 2);
2440 break;
fe7496dd 2441
72d6e3c5
RH
2442 case ASHIFT:
2443 /* Notice the size difference of ASL2 and variants. */
2444 if (!speed && CONST_INT_P (XEXP (x, 1)))
2445 switch (INTVAL (XEXP (x, 1)))
2446 {
2447 case 1:
2448 case 2:
2449 total = 1;
2450 goto alldone;
2451 case 3:
2452 case 4:
2453 total = 2;
2454 goto alldone;
2455 }
2456 /* FALLTHRU */
fe7496dd 2457
72d6e3c5
RH
2458 case ASHIFTRT:
2459 case LSHIFTRT:
2460 total = (speed ? COSTS_N_INSNS (1) : 3);
2461 goto alldone;
fe7496dd 2462
72d6e3c5
RH
2463 case MULT:
2464 total = (speed ? COSTS_N_INSNS (3) : 2);
fe7496dd 2465 break;
5abc5de9 2466
72d6e3c5
RH
2467 case DIV:
2468 case UDIV:
2469 case MOD:
2470 case UMOD:
2471 total = (speed ? COSTS_N_INSNS (39)
2472 /* Include space to load+retrieve MDR. */
2473 : code == MOD || code == UMOD ? 6 : 4);
fe7496dd 2474 break;
5abc5de9 2475
72d6e3c5 2476 case MEM:
e548c9df 2477 total = mn10300_address_cost (XEXP (x, 0), mode,
b413068c 2478 MEM_ADDR_SPACE (x), speed);
72d6e3c5
RH
2479 if (speed)
2480 total = COSTS_N_INSNS (2 + total);
2481 goto alldone;
2482
fe7496dd 2483 default:
72d6e3c5
RH
2484 /* Probably not implemented. Assume external call. */
2485 total = (speed ? COSTS_N_INSNS (10) : 7);
2486 break;
fe7496dd
AO
2487 }
2488
72d6e3c5
RH
2489 *ptotal = total;
2490 return false;
2491
2492 alldone:
2493 *ptotal = total;
2494 return true;
fe7496dd 2495}
72d6e3c5 2496
d1776069
AO
2497/* If using PIC, mark a SYMBOL_REF for a non-global symbol so that we
2498 may access it using GOTOFF instead of GOT. */
2499
2500static void
2ba3d2a9 2501mn10300_encode_section_info (tree decl, rtx rtl, int first)
d1776069
AO
2502{
2503 rtx symbol;
2504
2ba3d2a9
NC
2505 default_encode_section_info (decl, rtl, first);
2506
e7ab5593 2507 if (! MEM_P (rtl))
d1776069 2508 return;
2ba3d2a9 2509
d1776069
AO
2510 symbol = XEXP (rtl, 0);
2511 if (GET_CODE (symbol) != SYMBOL_REF)
2512 return;
2513
2514 if (flag_pic)
2515 SYMBOL_REF_FLAG (symbol) = (*targetm.binds_local_p) (decl);
2516}
e6ff3083
AS
2517
2518/* Dispatch tables on the mn10300 are extremely expensive in terms of code
2519 and readonly data size. So we crank up the case threshold value to
2520 encourage a series of if/else comparisons to implement many small switch
2521 statements. In theory, this value could be increased much more if we
2522 were solely optimizing for space, but we keep it "reasonable" to avoid
2523 serious code efficiency lossage. */
2524
4af476d7
NC
2525static unsigned int
2526mn10300_case_values_threshold (void)
e6ff3083
AS
2527{
2528 return 6;
2529}
bdeb5f0c 2530
bdeb5f0c
RH
2531/* Worker function for TARGET_TRAMPOLINE_INIT. */
2532
2533static void
2534mn10300_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
2535{
d6a3e264
RH
2536 rtx mem, disp, fnaddr = XEXP (DECL_RTL (fndecl), 0);
2537
2538 /* This is a strict alignment target, which means that we play
2539 some games to make sure that the locations at which we need
2540 to store <chain> and <disp> wind up at aligned addresses.
2541
2542 0x28 0x00 add 0,d0
2543 0xfc 0xdd mov chain,a1
2544 <chain>
2545 0xf8 0xed 0x00 btst 0,d1
2546 0xdc jmp fnaddr
2547 <disp>
2548
2549 Note that the two extra insns are effectively nops; they
2550 clobber the flags but do not affect the contents of D0 or D1. */
bdeb5f0c 2551
d6a3e264 2552 disp = expand_binop (SImode, sub_optab, fnaddr,
0a81f074 2553 plus_constant (Pmode, XEXP (m_tramp, 0), 11),
d6a3e264 2554 NULL_RTX, 1, OPTAB_DIRECT);
bdeb5f0c 2555
d6a3e264
RH
2556 mem = adjust_address (m_tramp, SImode, 0);
2557 emit_move_insn (mem, gen_int_mode (0xddfc0028, SImode));
2558 mem = adjust_address (m_tramp, SImode, 4);
bdeb5f0c 2559 emit_move_insn (mem, chain_value);
d6a3e264
RH
2560 mem = adjust_address (m_tramp, SImode, 8);
2561 emit_move_insn (mem, gen_int_mode (0xdc00edf8, SImode));
2562 mem = adjust_address (m_tramp, SImode, 12);
2563 emit_move_insn (mem, disp);
bdeb5f0c 2564}
990dc016
NC
2565
2566/* Output the assembler code for a C++ thunk function.
2567 THUNK_DECL is the declaration for the thunk function itself, FUNCTION
2568 is the decl for the target function. DELTA is an immediate constant
2569 offset to be added to the THIS parameter. If VCALL_OFFSET is nonzero
2570 the word at the adjusted address *(*THIS' + VCALL_OFFSET) should be
2571 additionally added to THIS. Finally jump to the entry point of
2572 FUNCTION. */
2573
2574static void
2575mn10300_asm_output_mi_thunk (FILE * file,
2576 tree thunk_fndecl ATTRIBUTE_UNUSED,
2577 HOST_WIDE_INT delta,
2578 HOST_WIDE_INT vcall_offset,
2579 tree function)
2580{
2581 const char * _this;
2582
2583 /* Get the register holding the THIS parameter. Handle the case
2584 where there is a hidden first argument for a returned structure. */
2585 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
2586 _this = reg_names [FIRST_ARGUMENT_REGNUM + 1];
2587 else
2588 _this = reg_names [FIRST_ARGUMENT_REGNUM];
2589
2590 fprintf (file, "\t%s Thunk Entry Point:\n", ASM_COMMENT_START);
2591
2592 if (delta)
2593 fprintf (file, "\tadd %d, %s\n", (int) delta, _this);
2594
2595 if (vcall_offset)
2596 {
2597 const char * scratch = reg_names [FIRST_ADDRESS_REGNUM + 1];
2598
2599 fprintf (file, "\tmov %s, %s\n", _this, scratch);
2600 fprintf (file, "\tmov (%s), %s\n", scratch, scratch);
2601 fprintf (file, "\tadd %d, %s\n", (int) vcall_offset, scratch);
2602 fprintf (file, "\tmov (%s), %s\n", scratch, scratch);
2603 fprintf (file, "\tadd %s, %s\n", scratch, _this);
2604 }
2605
2606 fputs ("\tjmp ", file);
2607 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
2608 putc ('\n', file);
2609}
2610
2611/* Return true if mn10300_output_mi_thunk would be able to output the
2612 assembler code for the thunk function specified by the arguments
2613 it is passed, and false otherwise. */
2614
2615static bool
2616mn10300_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
2617 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
2618 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
2619 const_tree function ATTRIBUTE_UNUSED)
2620{
2621 return true;
2622}
4af476d7
NC
2623
2624bool
ef4bddc2 2625mn10300_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
4af476d7
NC
2626{
2627 if (REGNO_REG_CLASS (regno) == FP_REGS
2628 || REGNO_REG_CLASS (regno) == FP_ACC_REGS)
2629 /* Do not store integer values in FP registers. */
2630 return GET_MODE_CLASS (mode) == MODE_FLOAT && ((regno & 1) == 0);
c70da878
NC
2631
2632 if (! TARGET_AM33 && REGNO_REG_CLASS (regno) == EXTENDED_REGS)
2633 return false;
2634
4af476d7
NC
2635 if (((regno) & 1) == 0 || GET_MODE_SIZE (mode) == 4)
2636 return true;
2637
2638 if (REGNO_REG_CLASS (regno) == DATA_REGS
2639 || (TARGET_AM33 && REGNO_REG_CLASS (regno) == ADDRESS_REGS)
2640 || REGNO_REG_CLASS (regno) == EXTENDED_REGS)
2641 return GET_MODE_SIZE (mode) <= 4;
2642
2643 return false;
2644}
2645
2646bool
ef4bddc2 2647mn10300_modes_tieable (machine_mode mode1, machine_mode mode2)
4af476d7
NC
2648{
2649 if (GET_MODE_CLASS (mode1) == MODE_FLOAT
2650 && GET_MODE_CLASS (mode2) != MODE_FLOAT)
2651 return false;
2652
2653 if (GET_MODE_CLASS (mode2) == MODE_FLOAT
2654 && GET_MODE_CLASS (mode1) != MODE_FLOAT)
2655 return false;
2656
2657 if (TARGET_AM33
2658 || mode1 == mode2
2659 || (GET_MODE_SIZE (mode1) <= 4 && GET_MODE_SIZE (mode2) <= 4))
2660 return true;
2661
2662 return false;
2663}
2664
bad41521 2665static int
ef4bddc2 2666cc_flags_for_mode (machine_mode mode)
bad41521
RH
2667{
2668 switch (mode)
2669 {
2670 case CCmode:
2671 return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_C | CC_FLAG_V;
2672 case CCZNCmode:
2673 return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_C;
2674 case CCZNmode:
2675 return CC_FLAG_Z | CC_FLAG_N;
2676 case CC_FLOATmode:
2677 return -1;
2678 default:
2679 gcc_unreachable ();
2680 }
2681}
2682
2683static int
2684cc_flags_for_code (enum rtx_code code)
2685{
2686 switch (code)
2687 {
2688 case EQ: /* Z */
2689 case NE: /* ~Z */
2690 return CC_FLAG_Z;
2691
2692 case LT: /* N */
2693 case GE: /* ~N */
2694 return CC_FLAG_N;
2695 break;
2696
2697 case GT: /* ~(Z|(N^V)) */
2698 case LE: /* Z|(N^V) */
2699 return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_V;
2700
2701 case GEU: /* ~C */
2702 case LTU: /* C */
2703 return CC_FLAG_C;
2704
2705 case GTU: /* ~(C | Z) */
2706 case LEU: /* C | Z */
2707 return CC_FLAG_Z | CC_FLAG_C;
2708
2709 case ORDERED:
2710 case UNORDERED:
2711 case LTGT:
2712 case UNEQ:
2713 case UNGE:
2714 case UNGT:
2715 case UNLE:
2716 case UNLT:
2717 return -1;
2718
2719 default:
2720 gcc_unreachable ();
2721 }
2722}
2723
ef4bddc2 2724machine_mode
bad41521 2725mn10300_select_cc_mode (enum rtx_code code, rtx x, rtx y ATTRIBUTE_UNUSED)
4af476d7 2726{
bad41521
RH
2727 int req;
2728
2729 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2730 return CC_FLOATmode;
2731
2732 req = cc_flags_for_code (code);
2733
2734 if (req & CC_FLAG_V)
2735 return CCmode;
2736 if (req & CC_FLAG_C)
2737 return CCZNCmode;
2738 return CCZNmode;
4af476d7 2739}
f3f63737
NC
2740
2741static inline bool
af480750 2742set_is_load_p (rtx set)
f3f63737 2743{
af480750 2744 return MEM_P (SET_SRC (set));
f3f63737
NC
2745}
2746
2747static inline bool
af480750 2748set_is_store_p (rtx set)
f3f63737 2749{
af480750 2750 return MEM_P (SET_DEST (set));
f3f63737
NC
2751}
2752
2753/* Update scheduling costs for situations that cannot be
2754 described using the attributes and DFA machinery.
2755 DEP is the insn being scheduled.
2756 INSN is the previous insn.
2757 COST is the current cycle cost for DEP. */
2758
2759static int
ac44248e 2760mn10300_adjust_sched_cost (rtx_insn *insn, rtx link, rtx_insn *dep, int cost)
f3f63737 2761{
af480750
DM
2762 rtx insn_set;
2763 rtx dep_set;
2764 int timings;
f3f63737
NC
2765
2766 if (!TARGET_AM33)
2767 return 1;
2768
af480750
DM
2769 /* We are only interested in pairs of SET. */
2770 insn_set = single_set (insn);
2771 if (!insn_set)
2772 return cost;
f3f63737 2773
af480750
DM
2774 dep_set = single_set (dep);
2775 if (!dep_set)
2776 return cost;
f3f63737
NC
2777
2778 /* For the AM34 a load instruction that follows a
2779 store instruction incurs an extra cycle of delay. */
2780 if (mn10300_tune_cpu == PROCESSOR_AM34
af480750
DM
2781 && set_is_load_p (dep_set)
2782 && set_is_store_p (insn_set))
f3f63737
NC
2783 cost += 1;
2784
2785 /* For the AM34 a non-store, non-branch FPU insn that follows
2786 another FPU insn incurs a one cycle throughput increase. */
2787 else if (mn10300_tune_cpu == PROCESSOR_AM34
af480750 2788 && ! set_is_store_p (insn_set)
f3f63737 2789 && ! JUMP_P (insn)
af480750
DM
2790 && GET_MODE_CLASS (GET_MODE (SET_SRC (dep_set))) == MODE_FLOAT
2791 && GET_MODE_CLASS (GET_MODE (SET_SRC (insn_set))) == MODE_FLOAT)
f3f63737
NC
2792 cost += 1;
2793
2794 /* Resolve the conflict described in section 1-7-4 of
2795 Chapter 3 of the MN103E Series Instruction Manual
2796 where it says:
2797
073a8998 2798 "When the preceding instruction is a CPU load or
f3f63737
NC
2799 store instruction, a following FPU instruction
2800 cannot be executed until the CPU completes the
2801 latency period even though there are no register
2802 or flag dependencies between them." */
2803
2804 /* Only the AM33-2 (and later) CPUs have FPU instructions. */
2805 if (! TARGET_AM33_2)
2806 return cost;
2807
2808 /* If a data dependence already exists then the cost is correct. */
2809 if (REG_NOTE_KIND (link) == 0)
2810 return cost;
2811
2812 /* Check that the instruction about to scheduled is an FPU instruction. */
af480750 2813 if (GET_MODE_CLASS (GET_MODE (SET_SRC (dep_set))) != MODE_FLOAT)
f3f63737
NC
2814 return cost;
2815
2816 /* Now check to see if the previous instruction is a load or store. */
af480750 2817 if (! set_is_load_p (insn_set) && ! set_is_store_p (insn_set))
f3f63737
NC
2818 return cost;
2819
2820 /* XXX: Verify: The text of 1-7-4 implies that the restriction
073a8998 2821 only applies when an INTEGER load/store precedes an FPU
f3f63737 2822 instruction, but is this true ? For now we assume that it is. */
af480750 2823 if (GET_MODE_CLASS (GET_MODE (SET_SRC (insn_set))) != MODE_INT)
f3f63737
NC
2824 return cost;
2825
2826 /* Extract the latency value from the timings attribute. */
af480750 2827 timings = get_attr_timings (insn);
f3f63737
NC
2828 return timings < 100 ? (timings % 10) : (timings % 100);
2829}
5efd84c5
NF
2830
2831static void
2832mn10300_conditional_register_usage (void)
2833{
2834 unsigned int i;
2835
2836 if (!TARGET_AM33)
2837 {
2838 for (i = FIRST_EXTENDED_REGNUM;
2839 i <= LAST_EXTENDED_REGNUM; i++)
2840 fixed_regs[i] = call_used_regs[i] = 1;
2841 }
2842 if (!TARGET_AM33_2)
2843 {
2844 for (i = FIRST_FP_REGNUM;
2845 i <= LAST_FP_REGNUM; i++)
2846 fixed_regs[i] = call_used_regs[i] = 1;
2847 }
2848 if (flag_pic)
2849 fixed_regs[PIC_OFFSET_TABLE_REGNUM] =
2850 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
2851}
a49b692a 2852
7ca35180 2853/* Worker function for TARGET_MD_ASM_ADJUST.
a49b692a
RH
2854 We do this in the mn10300 backend to maintain source compatibility
2855 with the old cc0-based compiler. */
2856
7ca35180
RH
2857static rtx_insn *
2858mn10300_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
2859 vec<const char *> &/*constraints*/,
2860 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
a49b692a 2861{
7ca35180
RH
2862 clobbers.safe_push (gen_rtx_REG (CCmode, CC_REG));
2863 SET_HARD_REG_BIT (clobbered_regs, CC_REG);
2864 return NULL;
a49b692a 2865}
4af476d7 2866\f
bad41521
RH
2867/* A helper function for splitting cbranch patterns after reload. */
2868
2869void
ef4bddc2 2870mn10300_split_cbranch (machine_mode cmp_mode, rtx cmp_op, rtx label_ref)
bad41521
RH
2871{
2872 rtx flags, x;
2873
2874 flags = gen_rtx_REG (cmp_mode, CC_REG);
2875 x = gen_rtx_COMPARE (cmp_mode, XEXP (cmp_op, 0), XEXP (cmp_op, 1));
f7df4a84 2876 x = gen_rtx_SET (flags, x);
bad41521
RH
2877 emit_insn (x);
2878
2879 x = gen_rtx_fmt_ee (GET_CODE (cmp_op), VOIDmode, flags, const0_rtx);
2880 x = gen_rtx_IF_THEN_ELSE (VOIDmode, x, label_ref, pc_rtx);
f7df4a84 2881 x = gen_rtx_SET (pc_rtx, x);
bad41521
RH
2882 emit_jump_insn (x);
2883}
2884
2885/* A helper function for matching parallels that set the flags. */
2886
2887bool
ef4bddc2 2888mn10300_match_ccmode (rtx insn, machine_mode cc_mode)
bad41521
RH
2889{
2890 rtx op1, flags;
ef4bddc2 2891 machine_mode flags_mode;
bad41521
RH
2892
2893 gcc_checking_assert (XVECLEN (PATTERN (insn), 0) == 2);
2894
2895 op1 = XVECEXP (PATTERN (insn), 0, 1);
2896 gcc_checking_assert (GET_CODE (SET_SRC (op1)) == COMPARE);
2897
2898 flags = SET_DEST (op1);
2899 flags_mode = GET_MODE (flags);
2900
2901 if (GET_MODE (SET_SRC (op1)) != flags_mode)
2902 return false;
2903 if (GET_MODE_CLASS (flags_mode) != MODE_CC)
2904 return false;
2905
2906 /* Ensure that the mode of FLAGS is compatible with CC_MODE. */
2907 if (cc_flags_for_mode (flags_mode) & ~cc_flags_for_mode (cc_mode))
2908 return false;
2909
2910 return true;
2911}
2912
cf13d9cf
NC
2913/* This function is used to help split:
2914
2915 (set (reg) (and (reg) (int)))
2916
2917 into:
2918
2919 (set (reg) (shift (reg) (int))
2920 (set (reg) (shift (reg) (int))
2921
2922 where the shitfs will be shorter than the "and" insn.
2923
2924 It returns the number of bits that should be shifted. A positive
2925 values means that the low bits are to be cleared (and hence the
2926 shifts should be right followed by left) whereas a negative value
2927 means that the high bits are to be cleared (left followed by right).
2928 Zero is returned when it would not be economical to split the AND. */
2929
bad41521
RH
2930int
2931mn10300_split_and_operand_count (rtx op)
2932{
2933 HOST_WIDE_INT val = INTVAL (op);
2934 int count;
2935
2936 if (val < 0)
2937 {
2938 /* High bit is set, look for bits clear at the bottom. */
2939 count = exact_log2 (-val);
2940 if (count < 0)
2941 return 0;
2942 /* This is only size win if we can use the asl2 insn. Otherwise we
2943 would be replacing 1 6-byte insn with 2 3-byte insns. */
2944 if (count > (optimize_insn_for_speed_p () ? 2 : 4))
2945 return 0;
cf13d9cf 2946 return count;
bad41521
RH
2947 }
2948 else
2949 {
2950 /* High bit is clear, look for bits set at the bottom. */
2951 count = exact_log2 (val + 1);
2952 count = 32 - count;
2953 /* Again, this is only a size win with asl2. */
2954 if (count > (optimize_insn_for_speed_p () ? 2 : 4))
2955 return 0;
2956 return -count;
2957 }
2958}
2959\f
a45d420a
NC
2960struct liw_data
2961{
2962 enum attr_liw slot;
2963 enum attr_liw_op op;
2964 rtx dest;
2965 rtx src;
2966};
2967
2968/* Decide if the given insn is a candidate for LIW bundling. If it is then
2969 extract the operands and LIW attributes from the insn and use them to fill
2970 in the liw_data structure. Return true upon success or false if the insn
2971 cannot be bundled. */
298362c8
NC
2972
2973static bool
e8a54173 2974extract_bundle (rtx_insn *insn, struct liw_data * pdata)
298362c8 2975{
a45d420a 2976 bool allow_consts = true;
2cf320a8 2977 rtx p;
298362c8 2978
a45d420a
NC
2979 gcc_assert (pdata != NULL);
2980
e8a54173 2981 if (insn == NULL)
a45d420a
NC
2982 return false;
2983 /* Make sure that we are dealing with a simple SET insn. */
298362c8 2984 p = single_set (insn);
a45d420a
NC
2985 if (p == NULL_RTX)
2986 return false;
2987
2988 /* Make sure that it could go into one of the LIW pipelines. */
2989 pdata->slot = get_attr_liw (insn);
2990 if (pdata->slot == LIW_BOTH)
2991 return false;
2992
2993 pdata->op = get_attr_liw_op (insn);
2994
a45d420a 2995 switch (pdata->op)
298362c8
NC
2996 {
2997 case LIW_OP_MOV:
a45d420a
NC
2998 pdata->dest = SET_DEST (p);
2999 pdata->src = SET_SRC (p);
298362c8
NC
3000 break;
3001 case LIW_OP_CMP:
a45d420a
NC
3002 pdata->dest = XEXP (SET_SRC (p), 0);
3003 pdata->src = XEXP (SET_SRC (p), 1);
298362c8
NC
3004 break;
3005 case LIW_OP_NONE:
3006 return false;
a45d420a
NC
3007 case LIW_OP_AND:
3008 case LIW_OP_OR:
3009 case LIW_OP_XOR:
3010 /* The AND, OR and XOR long instruction words only accept register arguments. */
3011 allow_consts = false;
3012 /* Fall through. */
298362c8 3013 default:
a45d420a
NC
3014 pdata->dest = SET_DEST (p);
3015 pdata->src = XEXP (SET_SRC (p), 1);
298362c8
NC
3016 break;
3017 }
3018
a45d420a
NC
3019 if (! REG_P (pdata->dest))
3020 return false;
3021
3022 if (REG_P (pdata->src))
3023 return true;
3024
3025 return allow_consts && satisfies_constraint_O (pdata->src);
298362c8
NC
3026}
3027
a45d420a
NC
3028/* Make sure that it is OK to execute LIW1 and LIW2 in parallel. GCC generated
3029 the instructions with the assumption that LIW1 would be executed before LIW2
3030 so we must check for overlaps between their sources and destinations. */
298362c8
NC
3031
3032static bool
a45d420a
NC
3033check_liw_constraints (struct liw_data * pliw1, struct liw_data * pliw2)
3034{
3035 /* Check for slot conflicts. */
3036 if (pliw2->slot == pliw1->slot && pliw1->slot != LIW_EITHER)
298362c8
NC
3037 return false;
3038
a45d420a
NC
3039 /* If either operation is a compare, then "dest" is really an input; the real
3040 destination is CC_REG. So these instructions need different checks. */
3041
3042 /* Changing "CMP ; OP" into "CMP | OP" is OK because the comparison will
3043 check its values prior to any changes made by OP. */
3044 if (pliw1->op == LIW_OP_CMP)
3045 {
3046 /* Two sequential comparisons means dead code, which ought to
3047 have been eliminated given that bundling only happens with
3048 optimization. We cannot bundle them in any case. */
3049 gcc_assert (pliw1->op != pliw2->op);
3050 return true;
3051 }
298362c8 3052
a45d420a
NC
3053 /* Changing "OP ; CMP" into "OP | CMP" does not work if the value being compared
3054 is the destination of OP, as the CMP will look at the old value, not the new
3055 one. */
3056 if (pliw2->op == LIW_OP_CMP)
298362c8 3057 {
a45d420a
NC
3058 if (REGNO (pliw2->dest) == REGNO (pliw1->dest))
3059 return false;
3060
3061 if (REG_P (pliw2->src))
3062 return REGNO (pliw2->src) != REGNO (pliw1->dest);
3063
3064 return true;
3065 }
3066
3067 /* Changing "OP1 ; OP2" into "OP1 | OP2" does not work if they both write to the
3068 same destination register. */
3069 if (REGNO (pliw2->dest) == REGNO (pliw1->dest))
3070 return false;
3071
3072 /* Changing "OP1 ; OP2" into "OP1 | OP2" generally does not work if the destination
3073 of OP1 is the source of OP2. The exception is when OP1 is a MOVE instruction when
3074 we can replace the source in OP2 with the source of OP1. */
3075 if (REG_P (pliw2->src) && REGNO (pliw2->src) == REGNO (pliw1->dest))
3076 {
3077 if (pliw1->op == LIW_OP_MOV && REG_P (pliw1->src))
298362c8 3078 {
a45d420a
NC
3079 if (! REG_P (pliw1->src)
3080 && (pliw2->op == LIW_OP_AND
3081 || pliw2->op == LIW_OP_OR
3082 || pliw2->op == LIW_OP_XOR))
3083 return false;
3084
3085 pliw2->src = pliw1->src;
298362c8
NC
3086 return true;
3087 }
3088 return false;
3089 }
3090
a45d420a 3091 /* Everything else is OK. */
298362c8
NC
3092 return true;
3093}
3094
298362c8
NC
3095/* Combine pairs of insns into LIW bundles. */
3096
3097static void
3098mn10300_bundle_liw (void)
3099{
e8a54173 3100 rtx_insn *r;
298362c8 3101
e8a54173 3102 for (r = get_insns (); r != NULL; r = next_nonnote_nondebug_insn (r))
298362c8 3103 {
e8a54173 3104 rtx_insn *insn1, *insn2;
a45d420a 3105 struct liw_data liw1, liw2;
298362c8
NC
3106
3107 insn1 = r;
a45d420a 3108 if (! extract_bundle (insn1, & liw1))
298362c8
NC
3109 continue;
3110
3111 insn2 = next_nonnote_nondebug_insn (insn1);
a45d420a 3112 if (! extract_bundle (insn2, & liw2))
298362c8
NC
3113 continue;
3114
a45d420a
NC
3115 /* Check for source/destination overlap. */
3116 if (! check_liw_constraints (& liw1, & liw2))
298362c8
NC
3117 continue;
3118
a45d420a 3119 if (liw1.slot == LIW_OP2 || liw2.slot == LIW_OP1)
298362c8 3120 {
a45d420a
NC
3121 struct liw_data temp;
3122
3123 temp = liw1;
298362c8 3124 liw1 = liw2;
a45d420a 3125 liw2 = temp;
298362c8
NC
3126 }
3127
298362c8
NC
3128 delete_insn (insn2);
3129
e8a54173 3130 rtx insn2_pat;
a45d420a 3131 if (liw1.op == LIW_OP_CMP)
e8a54173
DM
3132 insn2_pat = gen_cmp_liw (liw2.dest, liw2.src, liw1.dest, liw1.src,
3133 GEN_INT (liw2.op));
a45d420a 3134 else if (liw2.op == LIW_OP_CMP)
e8a54173
DM
3135 insn2_pat = gen_liw_cmp (liw1.dest, liw1.src, liw2.dest, liw2.src,
3136 GEN_INT (liw1.op));
298362c8 3137 else
e8a54173
DM
3138 insn2_pat = gen_liw (liw1.dest, liw2.dest, liw1.src, liw2.src,
3139 GEN_INT (liw1.op), GEN_INT (liw2.op));
298362c8 3140
e8a54173 3141 insn2 = emit_insn_after (insn2_pat, insn1);
298362c8
NC
3142 delete_insn (insn1);
3143 r = insn2;
3144 }
3145}
3146
662c03f4
NC
3147#define DUMP(reason, insn) \
3148 do \
3149 { \
3150 if (dump_file) \
3151 { \
3152 fprintf (dump_file, reason "\n"); \
3153 if (insn != NULL_RTX) \
3154 print_rtl_single (dump_file, insn); \
3155 fprintf(dump_file, "\n"); \
3156 } \
3157 } \
3158 while (0)
3159
3160/* Replace the BRANCH insn with a Lcc insn that goes to LABEL.
3161 Insert a SETLB insn just before LABEL. */
3162
3163static void
3164mn10300_insert_setlb_lcc (rtx label, rtx branch)
3165{
3166 rtx lcc, comparison, cmp_reg;
3167
3168 if (LABEL_NUSES (label) > 1)
3169 {
e60365d3 3170 rtx_insn *insn;
662c03f4
NC
3171
3172 /* This label is used both as an entry point to the loop
3173 and as a loop-back point for the loop. We need to separate
3174 these two functions so that the SETLB happens upon entry,
3175 but the loop-back does not go to the SETLB instruction. */
3176 DUMP ("Inserting SETLB insn after:", label);
3177 insn = emit_insn_after (gen_setlb (), label);
3178 label = gen_label_rtx ();
3179 emit_label_after (label, insn);
3180 DUMP ("Created new loop-back label:", label);
3181 }
3182 else
3183 {
3184 DUMP ("Inserting SETLB insn before:", label);
3185 emit_insn_before (gen_setlb (), label);
3186 }
3187
3188 comparison = XEXP (SET_SRC (PATTERN (branch)), 0);
3189 cmp_reg = XEXP (comparison, 0);
3190 gcc_assert (REG_P (cmp_reg));
3191
3192 /* If the comparison has not already been split out of the branch
3193 then do so now. */
3194 gcc_assert (REGNO (cmp_reg) == CC_REG);
3195
3196 if (GET_MODE (cmp_reg) == CC_FLOATmode)
3197 lcc = gen_FLcc (comparison, label);
3198 else
3199 lcc = gen_Lcc (comparison, label);
3200
e73de8f3 3201 rtx_insn *jump = emit_jump_insn_before (lcc, branch);
543e1b5f 3202 mark_jump_label (XVECEXP (lcc, 0, 0), jump, 0);
e73de8f3 3203 JUMP_LABEL (jump) = label;
662c03f4 3204 DUMP ("Replacing branch insn...", branch);
e73de8f3 3205 DUMP ("... with Lcc insn:", jump);
662c03f4
NC
3206 delete_insn (branch);
3207}
3208
3209static bool
b8244d74 3210mn10300_block_contains_call (basic_block block)
662c03f4 3211{
b32d5189 3212 rtx_insn *insn;
662c03f4
NC
3213
3214 FOR_BB_INSNS (block, insn)
3215 if (CALL_P (insn))
3216 return true;
3217
3218 return false;
3219}
3220
3221static bool
3222mn10300_loop_contains_call_insn (loop_p loop)
3223{
3224 basic_block * bbs;
3225 bool result = false;
3226 unsigned int i;
3227
3228 bbs = get_loop_body (loop);
3229
3230 for (i = 0; i < loop->num_nodes; i++)
3231 if (mn10300_block_contains_call (bbs[i]))
3232 {
3233 result = true;
3234 break;
3235 }
3236
3237 free (bbs);
3238 return result;
3239}
3240
3241static void
3242mn10300_scan_for_setlb_lcc (void)
3243{
662c03f4
NC
3244 loop_p loop;
3245
3246 DUMP ("Looking for loops that can use the SETLB insn", NULL_RTX);
3247
3248 df_analyze ();
3249 compute_bb_for_insn ();
3250
3251 /* Find the loops. */
4861a1f7 3252 loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
662c03f4
NC
3253
3254 /* FIXME: For now we only investigate innermost loops. In practice however
3255 if an inner loop is not suitable for use with the SETLB/Lcc insns, it may
3256 be the case that its parent loop is suitable. Thus we should check all
3257 loops, but work from the innermost outwards. */
f0bd40b1 3258 FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
662c03f4
NC
3259 {
3260 const char * reason = NULL;
3261
3262 /* Check to see if we can modify this loop. If we cannot
3263 then set 'reason' to describe why it could not be done. */
3264 if (loop->latch == NULL)
3265 reason = "it contains multiple latches";
3266 else if (loop->header != loop->latch)
3267 /* FIXME: We could handle loops that span multiple blocks,
3268 but this requires a lot more work tracking down the branches
3269 that need altering, so for now keep things simple. */
3270 reason = "the loop spans multiple blocks";
3271 else if (mn10300_loop_contains_call_insn (loop))
3272 reason = "it contains CALL insns";
3273 else
3274 {
68a1a6c0 3275 rtx_insn *branch = BB_END (loop->latch);
662c03f4
NC
3276
3277 gcc_assert (JUMP_P (branch));
3278 if (single_set (branch) == NULL_RTX || ! any_condjump_p (branch))
3279 /* We cannot optimize tablejumps and the like. */
3280 /* FIXME: We could handle unconditional jumps. */
3281 reason = "it is not a simple loop";
3282 else
3283 {
68a1a6c0 3284 rtx_insn *label;
662c03f4
NC
3285
3286 if (dump_file)
3287 flow_loop_dump (loop, dump_file, NULL, 0);
3288
3289 label = BB_HEAD (loop->header);
3290 gcc_assert (LABEL_P (label));
3291
3292 mn10300_insert_setlb_lcc (label, branch);
3293 }
3294 }
3295
3296 if (dump_file && reason != NULL)
3297 fprintf (dump_file, "Loop starting with insn %d is not suitable because %s\n",
3298 INSN_UID (BB_HEAD (loop->header)),
3299 reason);
3300 }
3301
4861a1f7 3302 loop_optimizer_finalize ();
662c03f4
NC
3303
3304 df_finish_pass (false);
3305
3306 DUMP ("SETLB scan complete", NULL_RTX);
3307}
3308
298362c8
NC
3309static void
3310mn10300_reorg (void)
3311{
662c03f4
NC
3312 /* These are optimizations, so only run them if optimizing. */
3313 if (TARGET_AM33 && (optimize > 0 || optimize_size))
298362c8 3314 {
662c03f4
NC
3315 if (TARGET_ALLOW_SETLB)
3316 mn10300_scan_for_setlb_lcc ();
3317
298362c8
NC
3318 if (TARGET_ALLOW_LIW)
3319 mn10300_bundle_liw ();
3320 }
3321}
3322\f
e7ab5593
NC
3323/* Initialize the GCC target structure. */
3324
298362c8
NC
3325#undef TARGET_MACHINE_DEPENDENT_REORG
3326#define TARGET_MACHINE_DEPENDENT_REORG mn10300_reorg
3327
e7ab5593
NC
3328#undef TARGET_ASM_ALIGNED_HI_OP
3329#define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
3330
3331#undef TARGET_LEGITIMIZE_ADDRESS
3332#define TARGET_LEGITIMIZE_ADDRESS mn10300_legitimize_address
3333
72d6e3c5
RH
3334#undef TARGET_ADDRESS_COST
3335#define TARGET_ADDRESS_COST mn10300_address_cost
3336#undef TARGET_REGISTER_MOVE_COST
3337#define TARGET_REGISTER_MOVE_COST mn10300_register_move_cost
3338#undef TARGET_MEMORY_MOVE_COST
3339#define TARGET_MEMORY_MOVE_COST mn10300_memory_move_cost
e7ab5593
NC
3340#undef TARGET_RTX_COSTS
3341#define TARGET_RTX_COSTS mn10300_rtx_costs
e7ab5593
NC
3342
3343#undef TARGET_ASM_FILE_START
3344#define TARGET_ASM_FILE_START mn10300_file_start
3345#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
3346#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
3347
535bd17c
AS
3348#undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
3349#define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA mn10300_asm_output_addr_const_extra
3350
e7ab5593
NC
3351#undef TARGET_OPTION_OVERRIDE
3352#define TARGET_OPTION_OVERRIDE mn10300_option_override
3353
3354#undef TARGET_ENCODE_SECTION_INFO
3355#define TARGET_ENCODE_SECTION_INFO mn10300_encode_section_info
3356
3357#undef TARGET_PROMOTE_PROTOTYPES
3358#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
3359#undef TARGET_RETURN_IN_MEMORY
3360#define TARGET_RETURN_IN_MEMORY mn10300_return_in_memory
3361#undef TARGET_PASS_BY_REFERENCE
3362#define TARGET_PASS_BY_REFERENCE mn10300_pass_by_reference
3363#undef TARGET_CALLEE_COPIES
3364#define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
3365#undef TARGET_ARG_PARTIAL_BYTES
3366#define TARGET_ARG_PARTIAL_BYTES mn10300_arg_partial_bytes
ce236858
NF
3367#undef TARGET_FUNCTION_ARG
3368#define TARGET_FUNCTION_ARG mn10300_function_arg
3369#undef TARGET_FUNCTION_ARG_ADVANCE
3370#define TARGET_FUNCTION_ARG_ADVANCE mn10300_function_arg_advance
e7ab5593
NC
3371
3372#undef TARGET_EXPAND_BUILTIN_SAVEREGS
3373#define TARGET_EXPAND_BUILTIN_SAVEREGS mn10300_builtin_saveregs
3374#undef TARGET_EXPAND_BUILTIN_VA_START
3375#define TARGET_EXPAND_BUILTIN_VA_START mn10300_va_start
3376
3377#undef TARGET_CASE_VALUES_THRESHOLD
3378#define TARGET_CASE_VALUES_THRESHOLD mn10300_case_values_threshold
3379
3380#undef TARGET_LEGITIMATE_ADDRESS_P
3381#define TARGET_LEGITIMATE_ADDRESS_P mn10300_legitimate_address_p
126b1483
RH
3382#undef TARGET_DELEGITIMIZE_ADDRESS
3383#define TARGET_DELEGITIMIZE_ADDRESS mn10300_delegitimize_address
1a627b35
RS
3384#undef TARGET_LEGITIMATE_CONSTANT_P
3385#define TARGET_LEGITIMATE_CONSTANT_P mn10300_legitimate_constant_p
e7ab5593 3386
f2831cc9
AS
3387#undef TARGET_PREFERRED_RELOAD_CLASS
3388#define TARGET_PREFERRED_RELOAD_CLASS mn10300_preferred_reload_class
3389#undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
8b119bb6
RH
3390#define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS \
3391 mn10300_preferred_output_reload_class
3392#undef TARGET_SECONDARY_RELOAD
3393#define TARGET_SECONDARY_RELOAD mn10300_secondary_reload
f2831cc9 3394
e7ab5593
NC
3395#undef TARGET_TRAMPOLINE_INIT
3396#define TARGET_TRAMPOLINE_INIT mn10300_trampoline_init
3397
3398#undef TARGET_FUNCTION_VALUE
3399#define TARGET_FUNCTION_VALUE mn10300_function_value
3400#undef TARGET_LIBCALL_VALUE
3401#define TARGET_LIBCALL_VALUE mn10300_libcall_value
3402
3403#undef TARGET_ASM_OUTPUT_MI_THUNK
3404#define TARGET_ASM_OUTPUT_MI_THUNK mn10300_asm_output_mi_thunk
3405#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
3406#define TARGET_ASM_CAN_OUTPUT_MI_THUNK mn10300_can_output_mi_thunk
3407
f3f63737
NC
3408#undef TARGET_SCHED_ADJUST_COST
3409#define TARGET_SCHED_ADJUST_COST mn10300_adjust_sched_cost
3410
5efd84c5
NF
3411#undef TARGET_CONDITIONAL_REGISTER_USAGE
3412#define TARGET_CONDITIONAL_REGISTER_USAGE mn10300_conditional_register_usage
3413
7ca35180
RH
3414#undef TARGET_MD_ASM_ADJUST
3415#define TARGET_MD_ASM_ADJUST mn10300_md_asm_adjust
a49b692a 3416
3843787f
RH
3417#undef TARGET_FLAGS_REGNUM
3418#define TARGET_FLAGS_REGNUM CC_REG
3419
e7ab5593 3420struct gcc_target targetm = TARGET_INITIALIZER;