]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/mn10300/mn10300.cc
Update copyright years.
[thirdparty/gcc.git] / gcc / config / mn10300 / mn10300.cc
CommitLineData
e53b6e56 1/* Subroutines for insn-output.cc for Matsushita MN10300 series
a945c346 2 Copyright (C) 1996-2024 Free Software Foundation, Inc.
11bb1f11
JL
3 Contributed by Jeff Law (law@cygnus.com).
4
e7ab5593 5 This file is part of GCC.
11bb1f11 6
e7ab5593
NC
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11bb1f11 11
e7ab5593
NC
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
11bb1f11 16
e7ab5593
NC
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
11bb1f11 20
8fcc61f8
RS
21#define IN_TARGET_CODE 1
22
11bb1f11 23#include "config.h"
c5c76735 24#include "system.h"
4977bab6 25#include "coretypes.h"
c7131fb2 26#include "backend.h"
e11c4407 27#include "target.h"
11bb1f11 28#include "rtl.h"
e11c4407 29#include "tree.h"
314e6352
ML
30#include "stringpool.h"
31#include "attribs.h"
e11c4407
AM
32#include "cfghooks.h"
33#include "cfgloop.h"
c7131fb2 34#include "df.h"
4d0cdd0c 35#include "memmodel.h"
e11c4407
AM
36#include "tm_p.h"
37#include "optabs.h"
38#include "regs.h"
39#include "emit-rtl.h"
40#include "recog.h"
41#include "diagnostic-core.h"
40e23961 42#include "alias.h"
d8a2d370
DN
43#include "stor-layout.h"
44#include "varasm.h"
45#include "calls.h"
11bb1f11
JL
46#include "output.h"
47#include "insn-attr.h"
6528281d 48#include "reload.h"
36566b39 49#include "explow.h"
11bb1f11 50#include "expr.h"
a45d420a 51#include "tm-constrs.h"
60393bbc 52#include "cfgrtl.h"
7ee2468b 53#include "dumpfile.h"
9b2b7279 54#include "builtins.h"
11bb1f11 55
994c5d85 56/* This file should be included last. */
d58627a0
RS
57#include "target-def.h"
58
2beef00e
AO
59/* This is used in the am33_2.0-linux-gnu port, in which global symbol
60 names are not prefixed by underscores, to tell whether to prefix a
61 label with a plus sign or not, so that the assembler can tell
62 symbol names from register names. */
63int mn10300_protect_label;
64
f3f63737
NC
65/* Selected processor type for tuning. */
66enum processor_type mn10300_tune_cpu = PROCESSOR_DEFAULT;
67
bad41521
RH
68#define CC_FLAG_Z 1
69#define CC_FLAG_N 2
70#define CC_FLAG_C 4
71#define CC_FLAG_V 8
72
ef4bddc2 73static int cc_flags_for_mode(machine_mode);
bad41521 74static int cc_flags_for_code(enum rtx_code);
672a6f42 75\f
c5387660 76/* Implement TARGET_OPTION_OVERRIDE. */
c5387660
JM
77static void
78mn10300_option_override (void)
13dd556c
RS
79{
80 if (TARGET_AM33)
81 target_flags &= ~MASK_MULT_BUG;
f3f63737
NC
82 else
83 {
84 /* Disable scheduling for the MN10300 as we do
85 not have timing information available for it. */
86 flag_schedule_insns = 0;
87 flag_schedule_insns_after_reload = 0;
ec815d65
RH
88
89 /* Force enable splitting of wide types, as otherwise it is trivial
90 to run out of registers. Indeed, this works so well that register
91 allocation problems are now more common *without* optimization,
92 when this flag is not enabled by default. */
93 flag_split_wide_types = 1;
f3f63737 94 }
bad41521 95
f3f63737
NC
96 if (mn10300_tune_string)
97 {
98 if (strcasecmp (mn10300_tune_string, "mn10300") == 0)
99 mn10300_tune_cpu = PROCESSOR_MN10300;
100 else if (strcasecmp (mn10300_tune_string, "am33") == 0)
101 mn10300_tune_cpu = PROCESSOR_AM33;
102 else if (strcasecmp (mn10300_tune_string, "am33-2") == 0)
103 mn10300_tune_cpu = PROCESSOR_AM33_2;
104 else if (strcasecmp (mn10300_tune_string, "am34") == 0)
105 mn10300_tune_cpu = PROCESSOR_AM34;
106 else
a3f9f006 107 error ("%<-mtune=%> expects mn10300, am33, am33-2, or am34");
f3f63737 108 }
13dd556c
RS
109}
110
1bc7c5b6 111static void
f1777882 112mn10300_file_start (void)
11bb1f11 113{
1bc7c5b6 114 default_file_start ();
705ac34f 115
18e9d2f9
AO
116 if (TARGET_AM33_2)
117 fprintf (asm_out_file, "\t.am33_2\n");
118 else if (TARGET_AM33)
1bc7c5b6 119 fprintf (asm_out_file, "\t.am33\n");
11bb1f11
JL
120}
121\f
298362c8
NC
122/* Note: This list must match the liw_op attribute in mn10300.md. */
123
124static const char *liw_op_names[] =
125{
126 "add", "cmp", "sub", "mov",
127 "and", "or", "xor",
128 "asr", "lsr", "asl",
129 "none", "max"
130};
131
11bb1f11
JL
132/* Print operand X using operand code CODE to assembly language output file
133 FILE. */
134
135void
e7ab5593 136mn10300_print_operand (FILE *file, rtx x, int code)
11bb1f11
JL
137{
138 switch (code)
139 {
298362c8
NC
140 case 'W':
141 {
142 unsigned int liw_op = UINTVAL (x);
bad41521 143
298362c8
NC
144 gcc_assert (TARGET_ALLOW_LIW);
145 gcc_assert (liw_op < LIW_OP_MAX);
146 fputs (liw_op_names[liw_op], file);
11bb1f11 147 break;
298362c8 148 }
bad41521 149
298362c8
NC
150 case 'b':
151 case 'B':
152 {
153 enum rtx_code cmp = GET_CODE (x);
ef4bddc2 154 machine_mode mode = GET_MODE (XEXP (x, 0));
298362c8
NC
155 const char *str;
156 int have_flags;
157
158 if (code == 'B')
159 cmp = reverse_condition (cmp);
160 have_flags = cc_flags_for_mode (mode);
5abc5de9 161
298362c8 162 switch (cmp)
18e9d2f9 163 {
298362c8
NC
164 case NE:
165 str = "ne";
18e9d2f9 166 break;
298362c8
NC
167 case EQ:
168 str = "eq";
169 break;
170 case GE:
171 /* bge is smaller than bnc. */
172 str = (have_flags & CC_FLAG_V ? "ge" : "nc");
173 break;
174 case LT:
175 str = (have_flags & CC_FLAG_V ? "lt" : "ns");
176 break;
177 case GT:
178 str = "gt";
179 break;
180 case LE:
181 str = "le";
182 break;
183 case GEU:
184 str = "cc";
185 break;
186 case GTU:
187 str = "hi";
188 break;
189 case LEU:
190 str = "ls";
191 break;
192 case LTU:
193 str = "cs";
194 break;
195 case ORDERED:
196 str = "lge";
197 break;
198 case UNORDERED:
199 str = "uo";
200 break;
201 case LTGT:
202 str = "lg";
203 break;
204 case UNEQ:
205 str = "ue";
206 break;
207 case UNGE:
208 str = "uge";
209 break;
210 case UNGT:
211 str = "ug";
212 break;
213 case UNLE:
214 str = "ule";
215 break;
216 case UNLT:
217 str = "ul";
18e9d2f9 218 break;
18e9d2f9 219 default:
dc759020 220 gcc_unreachable ();
18e9d2f9 221 }
298362c8
NC
222
223 gcc_checking_assert ((cc_flags_for_code (cmp) & ~have_flags) == 0);
224 fputs (str, file);
225 }
226 break;
227
228 case 'C':
229 /* This is used for the operand to a call instruction;
230 if it's a REG, enclose it in parens, else output
231 the operand normally. */
232 if (REG_P (x))
233 {
234 fputc ('(', file);
235 mn10300_print_operand (file, x, 0);
236 fputc (')', file);
237 }
238 else
239 mn10300_print_operand (file, x, 0);
240 break;
241
242 case 'D':
243 switch (GET_CODE (x))
244 {
245 case MEM:
246 fputc ('(', file);
cc8ca59e 247 output_address (GET_MODE (x), XEXP (x, 0));
298362c8
NC
248 fputc (')', file);
249 break;
250
251 case REG:
252 fprintf (file, "fd%d", REGNO (x) - 18);
253 break;
254
255 default:
256 gcc_unreachable ();
257 }
258 break;
18e9d2f9 259
38c37a0e 260 /* These are the least significant word in a 64bit value. */
298362c8
NC
261 case 'L':
262 switch (GET_CODE (x))
263 {
264 case MEM:
265 fputc ('(', file);
cc8ca59e 266 output_address (GET_MODE (x), XEXP (x, 0));
298362c8
NC
267 fputc (')', file);
268 break;
38c37a0e 269
298362c8
NC
270 case REG:
271 fprintf (file, "%s", reg_names[REGNO (x)]);
272 break;
38c37a0e 273
298362c8
NC
274 case SUBREG:
275 fprintf (file, "%s", reg_names[subreg_regno (x)]);
276 break;
38c37a0e 277
298362c8
NC
278 case CONST_DOUBLE:
279 {
280 long val[2];
38c37a0e 281
298362c8
NC
282 switch (GET_MODE (x))
283 {
4e10a5a7 284 case E_DFmode:
34a72c33
RS
285 REAL_VALUE_TO_TARGET_DOUBLE
286 (*CONST_DOUBLE_REAL_VALUE (x), val);
298362c8 287 fprintf (file, "0x%lx", val[0]);
6d82e0fe 288 break;
4e10a5a7 289 case E_SFmode:
34a72c33
RS
290 REAL_VALUE_TO_TARGET_SINGLE
291 (*CONST_DOUBLE_REAL_VALUE (x), val[0]);
298362c8 292 fprintf (file, "0x%lx", val[0]);
6d82e0fe 293 break;
4e10a5a7
RS
294 case E_VOIDmode:
295 case E_DImode:
298362c8
NC
296 mn10300_print_operand_address (file,
297 GEN_INT (CONST_DOUBLE_LOW (x)));
298 break;
299 default:
38c37a0e
JL
300 break;
301 }
298362c8
NC
302 break;
303 }
38c37a0e 304
298362c8
NC
305 case CONST_INT:
306 {
307 rtx low, high;
308 split_double (x, &low, &high);
309 fprintf (file, "%ld", (long)INTVAL (low));
310 break;
212bc5fa 311 }
38c37a0e 312
298362c8
NC
313 default:
314 gcc_unreachable ();
315 }
316 break;
38c37a0e
JL
317
318 /* Similarly, but for the most significant word. */
298362c8
NC
319 case 'H':
320 switch (GET_CODE (x))
321 {
322 case MEM:
323 fputc ('(', file);
324 x = adjust_address (x, SImode, 4);
cc8ca59e 325 output_address (GET_MODE (x), XEXP (x, 0));
298362c8
NC
326 fputc (')', file);
327 break;
38c37a0e 328
298362c8
NC
329 case REG:
330 fprintf (file, "%s", reg_names[REGNO (x) + 1]);
331 break;
38c37a0e 332
298362c8
NC
333 case SUBREG:
334 fprintf (file, "%s", reg_names[subreg_regno (x) + 1]);
335 break;
38c37a0e 336
298362c8
NC
337 case CONST_DOUBLE:
338 {
339 long val[2];
38c37a0e 340
298362c8
NC
341 switch (GET_MODE (x))
342 {
4e10a5a7 343 case E_DFmode:
34a72c33
RS
344 REAL_VALUE_TO_TARGET_DOUBLE
345 (*CONST_DOUBLE_REAL_VALUE (x), val);
298362c8 346 fprintf (file, "0x%lx", val[1]);
6d82e0fe 347 break;
4e10a5a7 348 case E_SFmode:
298362c8 349 gcc_unreachable ();
4e10a5a7
RS
350 case E_VOIDmode:
351 case E_DImode:
298362c8
NC
352 mn10300_print_operand_address (file,
353 GEN_INT (CONST_DOUBLE_HIGH (x)));
354 break;
355 default:
38c37a0e
JL
356 break;
357 }
298362c8
NC
358 break;
359 }
38c37a0e 360
298362c8
NC
361 case CONST_INT:
362 {
363 rtx low, high;
364 split_double (x, &low, &high);
365 fprintf (file, "%ld", (long)INTVAL (high));
366 break;
38c37a0e 367 }
38c37a0e 368
298362c8
NC
369 default:
370 gcc_unreachable ();
371 }
372 break;
38c37a0e 373
298362c8
NC
374 case 'A':
375 fputc ('(', file);
376 if (REG_P (XEXP (x, 0)))
cc8ca59e
JB
377 output_address (VOIDmode, gen_rtx_PLUS (SImode,
378 XEXP (x, 0), const0_rtx));
298362c8 379 else
cc8ca59e 380 output_address (VOIDmode, XEXP (x, 0));
298362c8
NC
381 fputc (')', file);
382 break;
a58be199 383
298362c8
NC
384 case 'N':
385 gcc_assert (INTVAL (x) >= -128 && INTVAL (x) <= 255);
386 fprintf (file, "%d", (int)((~INTVAL (x)) & 0xff));
387 break;
388
389 case 'U':
390 gcc_assert (INTVAL (x) >= -128 && INTVAL (x) <= 255);
391 fprintf (file, "%d", (int)(INTVAL (x) & 0xff));
392 break;
6fafc523 393
576e5acc
JL
394 /* For shift counts. The hardware ignores the upper bits of
395 any immediate, but the assembler will flag an out of range
396 shift count as an error. So we mask off the high bits
397 of the immediate here. */
298362c8
NC
398 case 'S':
399 if (CONST_INT_P (x))
400 {
401 fprintf (file, "%d", (int)(INTVAL (x) & 0x1f));
402 break;
403 }
404 /* FALL THROUGH */
576e5acc 405
298362c8
NC
406 default:
407 switch (GET_CODE (x))
408 {
409 case MEM:
410 fputc ('(', file);
cc8ca59e 411 output_address (GET_MODE (x), XEXP (x, 0));
298362c8
NC
412 fputc (')', file);
413 break;
11bb1f11 414
298362c8 415 case PLUS:
cc8ca59e 416 output_address (VOIDmode, x);
298362c8 417 break;
38c37a0e 418
298362c8
NC
419 case REG:
420 fprintf (file, "%s", reg_names[REGNO (x)]);
421 break;
11bb1f11 422
298362c8
NC
423 case SUBREG:
424 fprintf (file, "%s", reg_names[subreg_regno (x)]);
425 break;
11bb1f11 426
38c37a0e 427 /* This will only be single precision.... */
298362c8
NC
428 case CONST_DOUBLE:
429 {
430 unsigned long val;
38c37a0e 431
34a72c33 432 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), val);
298362c8 433 fprintf (file, "0x%lx", val);
11bb1f11 434 break;
11bb1f11 435 }
298362c8
NC
436
437 case CONST_INT:
438 case SYMBOL_REF:
439 case CONST:
440 case LABEL_REF:
441 case CODE_LABEL:
442 case UNSPEC:
443 mn10300_print_operand_address (file, x);
444 break;
445 default:
446 gcc_unreachable ();
447 }
448 break;
449 }
11bb1f11
JL
450}
451
452/* Output assembly language output for the address ADDR to FILE. */
453
454void
e7ab5593 455mn10300_print_operand_address (FILE *file, rtx addr)
11bb1f11
JL
456{
457 switch (GET_CODE (addr))
458 {
705ac34f 459 case POST_INC:
36846b26 460 mn10300_print_operand (file, XEXP (addr, 0), 0);
705ac34f
JL
461 fputc ('+', file);
462 break;
36846b26
RH
463
464 case POST_MODIFY:
465 mn10300_print_operand (file, XEXP (addr, 0), 0);
466 fputc ('+', file);
467 fputc (',', file);
468 mn10300_print_operand (file, XEXP (addr, 1), 0);
469 break;
470
11bb1f11 471 case REG:
e7ab5593 472 mn10300_print_operand (file, addr, 0);
11bb1f11
JL
473 break;
474 case PLUS:
475 {
36846b26
RH
476 rtx base = XEXP (addr, 0);
477 rtx index = XEXP (addr, 1);
478
479 if (REG_P (index) && !REG_OK_FOR_INDEX_P (index))
480 {
481 rtx x = base;
482 base = index;
483 index = x;
484
485 gcc_assert (REG_P (index) && REG_OK_FOR_INDEX_P (index));
486 }
487 gcc_assert (REG_OK_FOR_BASE_P (base));
488
e7ab5593 489 mn10300_print_operand (file, index, 0);
11bb1f11 490 fputc (',', file);
36846b26 491 mn10300_print_operand (file, base, 0);
11bb1f11
JL
492 break;
493 }
494 case SYMBOL_REF:
495 output_addr_const (file, addr);
496 break;
497 default:
498 output_addr_const (file, addr);
499 break;
500 }
501}
502
535bd17c
AS
503/* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA.
504
505 Used for PIC-specific UNSPECs. */
506
507static bool
508mn10300_asm_output_addr_const_extra (FILE *file, rtx x)
509{
510 if (GET_CODE (x) == UNSPEC)
511 {
512 switch (XINT (x, 1))
513 {
535bd17c
AS
514 case UNSPEC_PIC:
515 /* GLOBAL_OFFSET_TABLE or local symbols, no suffix. */
516 output_addr_const (file, XVECEXP (x, 0, 0));
517 break;
518 case UNSPEC_GOT:
519 output_addr_const (file, XVECEXP (x, 0, 0));
520 fputs ("@GOT", file);
521 break;
522 case UNSPEC_GOTOFF:
523 output_addr_const (file, XVECEXP (x, 0, 0));
524 fputs ("@GOTOFF", file);
525 break;
526 case UNSPEC_PLT:
527 output_addr_const (file, XVECEXP (x, 0, 0));
528 fputs ("@PLT", file);
529 break;
530 case UNSPEC_GOTSYM_OFF:
531 assemble_name (file, GOT_SYMBOL_NAME);
532 fputs ("-(", file);
533 output_addr_const (file, XVECEXP (x, 0, 0));
534 fputs ("-.)", file);
535 break;
536 default:
537 return false;
538 }
539 return true;
540 }
541 else
542 return false;
543}
544
18e9d2f9
AO
545/* Count the number of FP registers that have to be saved. */
546static int
f1777882 547fp_regs_to_save (void)
18e9d2f9
AO
548{
549 int i, n = 0;
550
551 if (! TARGET_AM33_2)
552 return 0;
553
554 for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
d7fb4c31 555 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
18e9d2f9
AO
556 ++n;
557
558 return n;
559}
560
f6cd7c62
RS
561/* Print a set of registers in the format required by "movm" and "ret".
562 Register K is saved if bit K of MASK is set. The data and address
563 registers can be stored individually, but the extended registers cannot.
9f5ed61a 564 We assume that the mask already takes that into account. For instance,
8596d0a1 565 bits 14 to 17 must have the same value. */
f6cd7c62
RS
566
567void
f1777882 568mn10300_print_reg_list (FILE *file, int mask)
f6cd7c62
RS
569{
570 int need_comma;
571 int i;
572
573 need_comma = 0;
574 fputc ('[', file);
575
576 for (i = 0; i < FIRST_EXTENDED_REGNUM; i++)
577 if ((mask & (1 << i)) != 0)
578 {
579 if (need_comma)
580 fputc (',', file);
581 fputs (reg_names [i], file);
582 need_comma = 1;
583 }
584
585 if ((mask & 0x3c000) != 0)
586 {
dc759020 587 gcc_assert ((mask & 0x3c000) == 0x3c000);
f6cd7c62
RS
588 if (need_comma)
589 fputc (',', file);
590 fputs ("exreg1", file);
591 need_comma = 1;
592 }
593
594 fputc (']', file);
595}
596
37a185d7
RH
597/* If the MDR register is never clobbered, we can use the RETF instruction
598 which takes the address from the MDR register. This is 3 cycles faster
599 than having to load the address from the stack. */
600
601bool
602mn10300_can_use_retf_insn (void)
603{
604 /* Don't bother if we're not optimizing. In this case we won't
605 have proper access to df_regs_ever_live_p. */
606 if (!optimize)
607 return false;
608
609 /* EH returns alter the saved return address; MDR is not current. */
610 if (crtl->calls_eh_return)
611 return false;
612
613 /* Obviously not if MDR is ever clobbered. */
614 if (df_regs_ever_live_p (MDR_REG))
615 return false;
616
617 /* ??? Careful not to use this during expand_epilogue etc. */
618 gcc_assert (!in_sequence_p ());
619 return leaf_function_p ();
620}
621
622bool
623mn10300_can_use_rets_insn (void)
38c37a0e 624{
040c5757 625 return !mn10300_initial_offset (ARG_POINTER_REGNUM, STACK_POINTER_REGNUM);
38c37a0e
JL
626}
627
f6cd7c62
RS
628/* Returns the set of live, callee-saved registers as a bitmask. The
629 callee-saved extended registers cannot be stored individually, so
20b2e6a0 630 all of them will be included in the mask if any one of them is used.
e902c266
NC
631 Also returns the number of bytes in the registers in the mask if
632 BYTES_SAVED is not NULL. */
f6cd7c62 633
e902c266
NC
634unsigned int
635mn10300_get_live_callee_saved_regs (unsigned int * bytes_saved)
f6cd7c62
RS
636{
637 int mask;
638 int i;
e902c266 639 unsigned int count;
f6cd7c62 640
e902c266 641 count = mask = 0;
18e9d2f9 642 for (i = 0; i <= LAST_EXTENDED_REGNUM; i++)
d7fb4c31 643 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
e902c266
NC
644 {
645 mask |= (1 << i);
646 ++ count;
647 }
648
f6cd7c62 649 if ((mask & 0x3c000) != 0)
e902c266
NC
650 {
651 for (i = 0x04000; i < 0x40000; i <<= 1)
652 if ((mask & i) == 0)
653 ++ count;
654
655 mask |= 0x3c000;
656 }
657
658 if (bytes_saved)
659 * bytes_saved = count * UNITS_PER_WORD;
f6cd7c62
RS
660
661 return mask;
662}
663
2720cc47
NC
664static rtx
665F (rtx r)
666{
667 RTX_FRAME_RELATED_P (r) = 1;
668 return r;
669}
670
f6cd7c62
RS
671/* Generate an instruction that pushes several registers onto the stack.
672 Register K will be saved if bit K in MASK is set. The function does
673 nothing if MASK is zero.
674
675 To be compatible with the "movm" instruction, the lowest-numbered
676 register must be stored in the lowest slot. If MASK is the set
677 { R1,...,RN }, where R1...RN are ordered least first, the generated
678 instruction will have the form:
679
680 (parallel
681 (set (reg:SI 9) (plus:SI (reg:SI 9) (const_int -N*4)))
682 (set (mem:SI (plus:SI (reg:SI 9)
683 (const_int -1*4)))
684 (reg:SI RN))
685 ...
686 (set (mem:SI (plus:SI (reg:SI 9)
687 (const_int -N*4)))
688 (reg:SI R1))) */
689
cc909bba
RH
690static void
691mn10300_gen_multiple_store (unsigned int mask)
f6cd7c62 692{
cc909bba
RH
693 /* The order in which registers are stored, from SP-4 through SP-N*4. */
694 static const unsigned int store_order[8] = {
695 /* e2, e3: never saved */
696 FIRST_EXTENDED_REGNUM + 4,
697 FIRST_EXTENDED_REGNUM + 5,
698 FIRST_EXTENDED_REGNUM + 6,
699 FIRST_EXTENDED_REGNUM + 7,
700 /* e0, e1, mdrq, mcrh, mcrl, mcvf: never saved. */
701 FIRST_DATA_REGNUM + 2,
702 FIRST_DATA_REGNUM + 3,
703 FIRST_ADDRESS_REGNUM + 2,
704 FIRST_ADDRESS_REGNUM + 3,
705 /* d0, d1, a0, a1, mdr, lir, lar: never saved. */
706 };
707
708 rtx x, elts[9];
709 unsigned int i;
710 int count;
711
712 if (mask == 0)
713 return;
714
715 for (i = count = 0; i < ARRAY_SIZE(store_order); ++i)
f6cd7c62 716 {
cc909bba
RH
717 unsigned regno = store_order[i];
718
719 if (((mask >> regno) & 1) == 0)
720 continue;
f6cd7c62 721
cc909bba 722 ++count;
0a81f074 723 x = plus_constant (Pmode, stack_pointer_rtx, count * -4);
cc909bba 724 x = gen_frame_mem (SImode, x);
f7df4a84 725 x = gen_rtx_SET (x, gen_rtx_REG (SImode, regno));
cc909bba
RH
726 elts[count] = F(x);
727
728 /* Remove the register from the mask so that... */
729 mask &= ~(1u << regno);
f6cd7c62 730 }
cc909bba
RH
731
732 /* ... we can make sure that we didn't try to use a register
733 not listed in the store order. */
734 gcc_assert (mask == 0);
735
736 /* Create the instruction that updates the stack pointer. */
0a81f074 737 x = plus_constant (Pmode, stack_pointer_rtx, count * -4);
f7df4a84 738 x = gen_rtx_SET (stack_pointer_rtx, x);
cc909bba
RH
739 elts[0] = F(x);
740
741 /* We need one PARALLEL element to update the stack pointer and
742 an additional element for each register that is stored. */
743 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (count + 1, elts));
744 F (emit_insn (x));
f6cd7c62
RS
745}
746
3a22ad89
NC
747static inline unsigned int
748popcount (unsigned int mask)
749{
750 unsigned int count = 0;
751
752 while (mask)
753 {
754 ++ count;
755 mask &= ~ (mask & - mask);
756 }
757 return count;
758}
759
11bb1f11 760void
e7ab5593 761mn10300_expand_prologue (void)
11bb1f11 762{
040c5757 763 HOST_WIDE_INT size = mn10300_frame_size ();
3a22ad89 764 unsigned int mask;
11bb1f11 765
3a22ad89 766 mask = mn10300_get_live_callee_saved_regs (NULL);
8596d0a1 767 /* If we use any of the callee-saved registers, save them now. */
3a22ad89
NC
768 mn10300_gen_multiple_store (mask);
769
770 if (flag_stack_usage_info)
771 current_function_static_stack_size = size + popcount (mask) * 4;
777fbf09 772
18e9d2f9
AO
773 if (TARGET_AM33_2 && fp_regs_to_save ())
774 {
775 int num_regs_to_save = fp_regs_to_save (), i;
776 HOST_WIDE_INT xsize;
e7ab5593
NC
777 enum
778 {
779 save_sp_merge,
780 save_sp_no_merge,
781 save_sp_partial_merge,
782 save_a0_merge,
783 save_a0_no_merge
784 } strategy;
18e9d2f9
AO
785 unsigned int strategy_size = (unsigned)-1, this_strategy_size;
786 rtx reg;
18e9d2f9 787
3a22ad89
NC
788 if (flag_stack_usage_info)
789 current_function_static_stack_size += num_regs_to_save * 4;
790
18e9d2f9
AO
791 /* We have several different strategies to save FP registers.
792 We can store them using SP offsets, which is beneficial if
793 there are just a few registers to save, or we can use `a0' in
794 post-increment mode (`a0' is the only call-clobbered address
795 register that is never used to pass information to a
796 function). Furthermore, if we don't need a frame pointer, we
797 can merge the two SP adds into a single one, but this isn't
798 always beneficial; sometimes we can just split the two adds
799 so that we don't exceed a 16-bit constant size. The code
800 below will select which strategy to use, so as to generate
801 smallest code. Ties are broken in favor or shorter sequences
802 (in terms of number of instructions). */
803
804#define SIZE_ADD_AX(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
805 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 2)
806#define SIZE_ADD_SP(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
807 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 3)
f4a88680
JR
808
809/* We add 0 * (S) in two places to promote to the type of S,
810 so that all arms of the conditional have the same type. */
18e9d2f9 811#define SIZE_FMOV_LIMIT(S,N,L,SIZE1,SIZE2,ELSE) \
f4a88680 812 (((S) >= (L)) ? 0 * (S) + (SIZE1) * (N) \
18e9d2f9
AO
813 : ((S) + 4 * (N) >= (L)) ? (((L) - (S)) / 4 * (SIZE2) \
814 + ((S) + 4 * (N) - (L)) / 4 * (SIZE1)) \
f4a88680 815 : 0 * (S) + (ELSE))
18e9d2f9
AO
816#define SIZE_FMOV_SP_(S,N) \
817 (SIZE_FMOV_LIMIT ((S), (N), (1 << 24), 7, 6, \
818 SIZE_FMOV_LIMIT ((S), (N), (1 << 8), 6, 4, \
819 (S) ? 4 * (N) : 3 + 4 * ((N) - 1))))
820#define SIZE_FMOV_SP(S,N) (SIZE_FMOV_SP_ ((unsigned HOST_WIDE_INT)(S), (N)))
821
822 /* Consider alternative save_sp_merge only if we don't need the
4375e090 823 frame pointer and size is nonzero. */
18e9d2f9
AO
824 if (! frame_pointer_needed && size)
825 {
826 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
827 this_strategy_size = SIZE_ADD_SP (-(size + 4 * num_regs_to_save));
828 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
829 this_strategy_size += SIZE_FMOV_SP (size, num_regs_to_save);
830
831 if (this_strategy_size < strategy_size)
832 {
833 strategy = save_sp_merge;
834 strategy_size = this_strategy_size;
835 }
836 }
837
838 /* Consider alternative save_sp_no_merge unconditionally. */
839 /* Insn: add -4 * num_regs_to_save, sp. */
840 this_strategy_size = SIZE_ADD_SP (-4 * num_regs_to_save);
841 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
842 this_strategy_size += SIZE_FMOV_SP (0, num_regs_to_save);
843 if (size)
844 {
845 /* Insn: add -size, sp. */
846 this_strategy_size += SIZE_ADD_SP (-size);
847 }
848
849 if (this_strategy_size < strategy_size)
850 {
851 strategy = save_sp_no_merge;
852 strategy_size = this_strategy_size;
853 }
854
855 /* Consider alternative save_sp_partial_merge only if we don't
856 need a frame pointer and size is reasonably large. */
857 if (! frame_pointer_needed && size + 4 * num_regs_to_save > 128)
858 {
859 /* Insn: add -128, sp. */
860 this_strategy_size = SIZE_ADD_SP (-128);
861 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
862 this_strategy_size += SIZE_FMOV_SP (128 - 4 * num_regs_to_save,
863 num_regs_to_save);
864 if (size)
865 {
866 /* Insn: add 128-size, sp. */
867 this_strategy_size += SIZE_ADD_SP (128 - size);
868 }
869
870 if (this_strategy_size < strategy_size)
871 {
872 strategy = save_sp_partial_merge;
873 strategy_size = this_strategy_size;
874 }
875 }
876
877 /* Consider alternative save_a0_merge only if we don't need a
4375e090 878 frame pointer, size is nonzero and the user hasn't
18e9d2f9
AO
879 changed the calling conventions of a0. */
880 if (! frame_pointer_needed && size
d7fb4c31 881 && call_used_regs[FIRST_ADDRESS_REGNUM]
18e9d2f9
AO
882 && ! fixed_regs[FIRST_ADDRESS_REGNUM])
883 {
884 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
885 this_strategy_size = SIZE_ADD_SP (-(size + 4 * num_regs_to_save));
886 /* Insn: mov sp, a0. */
887 this_strategy_size++;
888 if (size)
889 {
890 /* Insn: add size, a0. */
891 this_strategy_size += SIZE_ADD_AX (size);
892 }
893 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
894 this_strategy_size += 3 * num_regs_to_save;
895
896 if (this_strategy_size < strategy_size)
897 {
898 strategy = save_a0_merge;
899 strategy_size = this_strategy_size;
900 }
901 }
902
903 /* Consider alternative save_a0_no_merge if the user hasn't
8596d0a1 904 changed the calling conventions of a0. */
d7fb4c31 905 if (call_used_regs[FIRST_ADDRESS_REGNUM]
18e9d2f9
AO
906 && ! fixed_regs[FIRST_ADDRESS_REGNUM])
907 {
908 /* Insn: add -4 * num_regs_to_save, sp. */
909 this_strategy_size = SIZE_ADD_SP (-4 * num_regs_to_save);
910 /* Insn: mov sp, a0. */
911 this_strategy_size++;
912 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
913 this_strategy_size += 3 * num_regs_to_save;
914 if (size)
915 {
916 /* Insn: add -size, sp. */
917 this_strategy_size += SIZE_ADD_SP (-size);
918 }
919
920 if (this_strategy_size < strategy_size)
921 {
922 strategy = save_a0_no_merge;
923 strategy_size = this_strategy_size;
924 }
925 }
926
927 /* Emit the initial SP add, common to all strategies. */
928 switch (strategy)
929 {
930 case save_sp_no_merge:
931 case save_a0_no_merge:
2720cc47
NC
932 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
933 stack_pointer_rtx,
934 GEN_INT (-4 * num_regs_to_save))));
18e9d2f9
AO
935 xsize = 0;
936 break;
937
938 case save_sp_partial_merge:
2720cc47
NC
939 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
940 stack_pointer_rtx,
941 GEN_INT (-128))));
18e9d2f9
AO
942 xsize = 128 - 4 * num_regs_to_save;
943 size -= xsize;
944 break;
945
946 case save_sp_merge:
947 case save_a0_merge:
2720cc47
NC
948 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
949 stack_pointer_rtx,
950 GEN_INT (-(size + 4 * num_regs_to_save)))));
18e9d2f9 951 /* We'll have to adjust FP register saves according to the
8596d0a1 952 frame size. */
18e9d2f9
AO
953 xsize = size;
954 /* Since we've already created the stack frame, don't do it
8596d0a1 955 again at the end of the function. */
18e9d2f9
AO
956 size = 0;
957 break;
958
959 default:
dc759020 960 gcc_unreachable ();
18e9d2f9 961 }
5abc5de9 962
18e9d2f9
AO
963 /* Now prepare register a0, if we have decided to use it. */
964 switch (strategy)
965 {
966 case save_sp_merge:
967 case save_sp_no_merge:
968 case save_sp_partial_merge:
969 reg = 0;
970 break;
971
972 case save_a0_merge:
973 case save_a0_no_merge:
974 reg = gen_rtx_REG (SImode, FIRST_ADDRESS_REGNUM);
2720cc47 975 F (emit_insn (gen_movsi (reg, stack_pointer_rtx)));
18e9d2f9 976 if (xsize)
2720cc47 977 F (emit_insn (gen_addsi3 (reg, reg, GEN_INT (xsize))));
18e9d2f9
AO
978 reg = gen_rtx_POST_INC (SImode, reg);
979 break;
5abc5de9 980
18e9d2f9 981 default:
dc759020 982 gcc_unreachable ();
18e9d2f9 983 }
5abc5de9 984
18e9d2f9
AO
985 /* Now actually save the FP registers. */
986 for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
d7fb4c31 987 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
18e9d2f9
AO
988 {
989 rtx addr;
990
991 if (reg)
992 addr = reg;
993 else
994 {
995 /* If we aren't using `a0', use an SP offset. */
996 if (xsize)
997 {
998 addr = gen_rtx_PLUS (SImode,
999 stack_pointer_rtx,
1000 GEN_INT (xsize));
1001 }
1002 else
1003 addr = stack_pointer_rtx;
5abc5de9 1004
18e9d2f9
AO
1005 xsize += 4;
1006 }
1007
2720cc47
NC
1008 F (emit_insn (gen_movsf (gen_rtx_MEM (SFmode, addr),
1009 gen_rtx_REG (SFmode, i))));
18e9d2f9
AO
1010 }
1011 }
1012
777fbf09 1013 /* Now put the frame pointer into the frame pointer register. */
11bb1f11 1014 if (frame_pointer_needed)
2720cc47 1015 F (emit_move_insn (frame_pointer_rtx, stack_pointer_rtx));
11bb1f11 1016
777fbf09 1017 /* Allocate stack for this frame. */
11bb1f11 1018 if (size)
2720cc47
NC
1019 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
1020 stack_pointer_rtx,
1021 GEN_INT (-size))));
1022
6fb5fa3c 1023 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
040c5757 1024 emit_insn (gen_load_pic ());
11bb1f11
JL
1025}
1026
1027void
e7ab5593 1028mn10300_expand_epilogue (void)
11bb1f11 1029{
040c5757 1030 HOST_WIDE_INT size = mn10300_frame_size ();
e902c266
NC
1031 unsigned int reg_save_bytes;
1032
1033 mn10300_get_live_callee_saved_regs (& reg_save_bytes);
1034
18e9d2f9
AO
1035 if (TARGET_AM33_2 && fp_regs_to_save ())
1036 {
1037 int num_regs_to_save = fp_regs_to_save (), i;
1038 rtx reg = 0;
1039
1040 /* We have several options to restore FP registers. We could
1041 load them from SP offsets, but, if there are enough FP
1042 registers to restore, we win if we use a post-increment
1043 addressing mode. */
1044
1045 /* If we have a frame pointer, it's the best option, because we
1046 already know it has the value we want. */
1047 if (frame_pointer_needed)
1048 reg = gen_rtx_REG (SImode, FRAME_POINTER_REGNUM);
1049 /* Otherwise, we may use `a1', since it's call-clobbered and
1050 it's never used for return values. But only do so if it's
1051 smaller than using SP offsets. */
1052 else
1053 {
1054 enum { restore_sp_post_adjust,
1055 restore_sp_pre_adjust,
1056 restore_sp_partial_adjust,
1057 restore_a1 } strategy;
1058 unsigned int this_strategy_size, strategy_size = (unsigned)-1;
1059
1060 /* Consider using sp offsets before adjusting sp. */
1061 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1062 this_strategy_size = SIZE_FMOV_SP (size, num_regs_to_save);
1063 /* If size is too large, we'll have to adjust SP with an
1064 add. */
37a185d7 1065 if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
18e9d2f9
AO
1066 {
1067 /* Insn: add size + 4 * num_regs_to_save, sp. */
1068 this_strategy_size += SIZE_ADD_SP (size + 4 * num_regs_to_save);
1069 }
1070 /* If we don't have to restore any non-FP registers,
1071 we'll be able to save one byte by using rets. */
37a185d7 1072 if (! reg_save_bytes)
18e9d2f9
AO
1073 this_strategy_size--;
1074
1075 if (this_strategy_size < strategy_size)
1076 {
1077 strategy = restore_sp_post_adjust;
1078 strategy_size = this_strategy_size;
1079 }
1080
1081 /* Consider using sp offsets after adjusting sp. */
1082 /* Insn: add size, sp. */
1083 this_strategy_size = SIZE_ADD_SP (size);
1084 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1085 this_strategy_size += SIZE_FMOV_SP (0, num_regs_to_save);
1086 /* We're going to use ret to release the FP registers
8596d0a1 1087 save area, so, no savings. */
18e9d2f9
AO
1088
1089 if (this_strategy_size < strategy_size)
1090 {
1091 strategy = restore_sp_pre_adjust;
1092 strategy_size = this_strategy_size;
1093 }
1094
1095 /* Consider using sp offsets after partially adjusting sp.
1096 When size is close to 32Kb, we may be able to adjust SP
1097 with an imm16 add instruction while still using fmov
1098 (d8,sp). */
37a185d7 1099 if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
18e9d2f9
AO
1100 {
1101 /* Insn: add size + 4 * num_regs_to_save
37a185d7 1102 + reg_save_bytes - 252,sp. */
18e9d2f9 1103 this_strategy_size = SIZE_ADD_SP (size + 4 * num_regs_to_save
c81369fa 1104 + (int) reg_save_bytes - 252);
18e9d2f9 1105 /* Insn: fmov (##,sp),fs#, fo each fs# to be restored. */
37a185d7 1106 this_strategy_size += SIZE_FMOV_SP (252 - reg_save_bytes
18e9d2f9
AO
1107 - 4 * num_regs_to_save,
1108 num_regs_to_save);
1109 /* We're going to use ret to release the FP registers
8596d0a1 1110 save area, so, no savings. */
18e9d2f9
AO
1111
1112 if (this_strategy_size < strategy_size)
1113 {
1114 strategy = restore_sp_partial_adjust;
1115 strategy_size = this_strategy_size;
1116 }
1117 }
1118
1119 /* Consider using a1 in post-increment mode, as long as the
1120 user hasn't changed the calling conventions of a1. */
d7fb4c31 1121 if (call_used_regs[FIRST_ADDRESS_REGNUM + 1]
18e9d2f9
AO
1122 && ! fixed_regs[FIRST_ADDRESS_REGNUM+1])
1123 {
1124 /* Insn: mov sp,a1. */
1125 this_strategy_size = 1;
1126 if (size)
1127 {
1128 /* Insn: add size,a1. */
1129 this_strategy_size += SIZE_ADD_AX (size);
1130 }
1131 /* Insn: fmov (a1+),fs#, for each fs# to be restored. */
1132 this_strategy_size += 3 * num_regs_to_save;
1133 /* If size is large enough, we may be able to save a
1134 couple of bytes. */
37a185d7 1135 if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
18e9d2f9
AO
1136 {
1137 /* Insn: mov a1,sp. */
1138 this_strategy_size += 2;
1139 }
1140 /* If we don't have to restore any non-FP registers,
1141 we'll be able to save one byte by using rets. */
37a185d7 1142 if (! reg_save_bytes)
18e9d2f9
AO
1143 this_strategy_size--;
1144
1145 if (this_strategy_size < strategy_size)
1146 {
1147 strategy = restore_a1;
1148 strategy_size = this_strategy_size;
1149 }
1150 }
1151
1152 switch (strategy)
1153 {
1154 case restore_sp_post_adjust:
1155 break;
1156
1157 case restore_sp_pre_adjust:
1158 emit_insn (gen_addsi3 (stack_pointer_rtx,
1159 stack_pointer_rtx,
1160 GEN_INT (size)));
1161 size = 0;
1162 break;
1163
1164 case restore_sp_partial_adjust:
1165 emit_insn (gen_addsi3 (stack_pointer_rtx,
1166 stack_pointer_rtx,
1167 GEN_INT (size + 4 * num_regs_to_save
37a185d7
RH
1168 + reg_save_bytes - 252)));
1169 size = 252 - reg_save_bytes - 4 * num_regs_to_save;
18e9d2f9 1170 break;
5abc5de9 1171
18e9d2f9
AO
1172 case restore_a1:
1173 reg = gen_rtx_REG (SImode, FIRST_ADDRESS_REGNUM + 1);
1174 emit_insn (gen_movsi (reg, stack_pointer_rtx));
1175 if (size)
1176 emit_insn (gen_addsi3 (reg, reg, GEN_INT (size)));
1177 break;
1178
1179 default:
dc759020 1180 gcc_unreachable ();
18e9d2f9
AO
1181 }
1182 }
1183
1184 /* Adjust the selected register, if any, for post-increment. */
1185 if (reg)
1186 reg = gen_rtx_POST_INC (SImode, reg);
1187
1188 for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
d7fb4c31 1189 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
18e9d2f9
AO
1190 {
1191 rtx addr;
5abc5de9 1192
18e9d2f9
AO
1193 if (reg)
1194 addr = reg;
1195 else if (size)
1196 {
1197 /* If we aren't using a post-increment register, use an
8596d0a1 1198 SP offset. */
18e9d2f9
AO
1199 addr = gen_rtx_PLUS (SImode,
1200 stack_pointer_rtx,
1201 GEN_INT (size));
1202 }
1203 else
1204 addr = stack_pointer_rtx;
1205
1206 size += 4;
1207
2720cc47
NC
1208 emit_insn (gen_movsf (gen_rtx_REG (SFmode, i),
1209 gen_rtx_MEM (SFmode, addr)));
18e9d2f9
AO
1210 }
1211
1212 /* If we were using the restore_a1 strategy and the number of
1213 bytes to be released won't fit in the `ret' byte, copy `a1'
1214 to `sp', to avoid having to use `add' to adjust it. */
37a185d7 1215 if (! frame_pointer_needed && reg && size + reg_save_bytes > 255)
18e9d2f9
AO
1216 {
1217 emit_move_insn (stack_pointer_rtx, XEXP (reg, 0));
1218 size = 0;
1219 }
1220 }
1221
5d29a95f
JL
1222 /* Maybe cut back the stack, except for the register save area.
1223
1224 If the frame pointer exists, then use the frame pointer to
1225 cut back the stack.
1226
1227 If the stack size + register save area is more than 255 bytes,
1228 then the stack must be cut back here since the size + register
5abc5de9 1229 save size is too big for a ret/retf instruction.
5d29a95f
JL
1230
1231 Else leave it alone, it will be cut back as part of the
1232 ret/retf instruction, or there wasn't any stack to begin with.
1233
dab66575 1234 Under no circumstances should the register save area be
5d29a95f
JL
1235 deallocated here, that would leave a window where an interrupt
1236 could occur and trash the register save area. */
11bb1f11
JL
1237 if (frame_pointer_needed)
1238 {
11bb1f11 1239 emit_move_insn (stack_pointer_rtx, frame_pointer_rtx);
4246e0c5
JL
1240 size = 0;
1241 }
37a185d7 1242 else if (size + reg_save_bytes > 255)
4246e0c5
JL
1243 {
1244 emit_insn (gen_addsi3 (stack_pointer_rtx,
1245 stack_pointer_rtx,
1246 GEN_INT (size)));
1247 size = 0;
11bb1f11 1248 }
11bb1f11 1249
ed6089d6 1250 /* Adjust the stack and restore callee-saved registers, if any. */
37a185d7 1251 if (mn10300_can_use_rets_insn ())
3810076b 1252 emit_jump_insn (ret_rtx);
777fbf09 1253 else
e902c266 1254 emit_jump_insn (gen_return_ret (GEN_INT (size + reg_save_bytes)));
11bb1f11
JL
1255}
1256
05713b80 1257/* Recognize the PARALLEL rtx generated by mn10300_gen_multiple_store().
f6cd7c62
RS
1258 This function is for MATCH_PARALLEL and so assumes OP is known to be
1259 parallel. If OP is a multiple store, return a mask indicating which
1260 registers it saves. Return 0 otherwise. */
1261
c345a0b1
NC
1262unsigned int
1263mn10300_store_multiple_regs (rtx op)
f6cd7c62
RS
1264{
1265 int count;
1266 int mask;
1267 int i;
1268 unsigned int last;
1269 rtx elt;
1270
1271 count = XVECLEN (op, 0);
1272 if (count < 2)
1273 return 0;
1274
1275 /* Check that first instruction has the form (set (sp) (plus A B)) */
1276 elt = XVECEXP (op, 0, 0);
1277 if (GET_CODE (elt) != SET
e7ab5593 1278 || (! REG_P (SET_DEST (elt)))
f6cd7c62
RS
1279 || REGNO (SET_DEST (elt)) != STACK_POINTER_REGNUM
1280 || GET_CODE (SET_SRC (elt)) != PLUS)
1281 return 0;
1282
1283 /* Check that A is the stack pointer and B is the expected stack size.
1284 For OP to match, each subsequent instruction should push a word onto
1285 the stack. We therefore expect the first instruction to create
8596d0a1 1286 COUNT-1 stack slots. */
f6cd7c62 1287 elt = SET_SRC (elt);
e7ab5593 1288 if ((! REG_P (XEXP (elt, 0)))
f6cd7c62 1289 || REGNO (XEXP (elt, 0)) != STACK_POINTER_REGNUM
e7ab5593 1290 || (! CONST_INT_P (XEXP (elt, 1)))
f6cd7c62
RS
1291 || INTVAL (XEXP (elt, 1)) != -(count - 1) * 4)
1292 return 0;
1293
f6cd7c62
RS
1294 mask = 0;
1295 for (i = 1; i < count; i++)
1296 {
cc909bba
RH
1297 /* Check that element i is a (set (mem M) R). */
1298 /* ??? Validate the register order a-la mn10300_gen_multiple_store.
1299 Remember: the ordering is *not* monotonic. */
f6cd7c62
RS
1300 elt = XVECEXP (op, 0, i);
1301 if (GET_CODE (elt) != SET
e7ab5593 1302 || (! MEM_P (SET_DEST (elt)))
cc909bba 1303 || (! REG_P (SET_SRC (elt))))
f6cd7c62
RS
1304 return 0;
1305
cc909bba 1306 /* Remember which registers are to be saved. */
f6cd7c62
RS
1307 last = REGNO (SET_SRC (elt));
1308 mask |= (1 << last);
1309
1310 /* Check that M has the form (plus (sp) (const_int -I*4)) */
1311 elt = XEXP (SET_DEST (elt), 0);
1312 if (GET_CODE (elt) != PLUS
e7ab5593 1313 || (! REG_P (XEXP (elt, 0)))
f6cd7c62 1314 || REGNO (XEXP (elt, 0)) != STACK_POINTER_REGNUM
e7ab5593 1315 || (! CONST_INT_P (XEXP (elt, 1)))
f6cd7c62
RS
1316 || INTVAL (XEXP (elt, 1)) != -i * 4)
1317 return 0;
1318 }
1319
8596d0a1 1320 /* All or none of the callee-saved extended registers must be in the set. */
f6cd7c62
RS
1321 if ((mask & 0x3c000) != 0
1322 && (mask & 0x3c000) != 0x3c000)
1323 return 0;
1324
1325 return mask;
1326}
1327
f2831cc9
AS
1328/* Implement TARGET_PREFERRED_RELOAD_CLASS. */
1329
1330static reg_class_t
1331mn10300_preferred_reload_class (rtx x, reg_class_t rclass)
1332{
1333 if (x == stack_pointer_rtx && rclass != SP_REGS)
8b119bb6 1334 return (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
f2831cc9
AS
1335 else if (MEM_P (x)
1336 || (REG_P (x)
1337 && !HARD_REGISTER_P (x))
1338 || (GET_CODE (x) == SUBREG
1339 && REG_P (SUBREG_REG (x))
1340 && !HARD_REGISTER_P (SUBREG_REG (x))))
1341 return LIMIT_RELOAD_CLASS (GET_MODE (x), rclass);
1342 else
1343 return rclass;
1344}
1345
1346/* Implement TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
1347
1348static reg_class_t
1349mn10300_preferred_output_reload_class (rtx x, reg_class_t rclass)
1350{
1351 if (x == stack_pointer_rtx && rclass != SP_REGS)
8b119bb6 1352 return (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
f2831cc9
AS
1353 return rclass;
1354}
1355
8b119bb6 1356/* Implement TARGET_SECONDARY_RELOAD. */
e7ab5593 1357
8b119bb6
RH
1358static reg_class_t
1359mn10300_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
ef4bddc2 1360 machine_mode mode, secondary_reload_info *sri)
11bb1f11 1361{
8b119bb6
RH
1362 enum reg_class rclass = (enum reg_class) rclass_i;
1363 enum reg_class xclass = NO_REGS;
1364 unsigned int xregno = INVALID_REGNUM;
1365
1366 if (REG_P (x))
4d1a91c2 1367 {
8b119bb6
RH
1368 xregno = REGNO (x);
1369 if (xregno >= FIRST_PSEUDO_REGISTER)
1370 xregno = true_regnum (x);
1371 if (xregno != INVALID_REGNUM)
1372 xclass = REGNO_REG_CLASS (xregno);
1373 }
1374
1375 if (!TARGET_AM33)
1376 {
1377 /* Memory load/stores less than a full word wide can't have an
1378 address or stack pointer destination. They must use a data
1379 register as an intermediate register. */
1380 if (rclass != DATA_REGS
1381 && (mode == QImode || mode == HImode)
1382 && xclass == NO_REGS)
1383 return DATA_REGS;
1384
1385 /* We can only move SP to/from an address register. */
1386 if (in_p
1387 && rclass == SP_REGS
1388 && xclass != ADDRESS_REGS)
1389 return ADDRESS_REGS;
1390 if (!in_p
1391 && xclass == SP_REGS
1392 && rclass != ADDRESS_REGS
1393 && rclass != SP_OR_ADDRESS_REGS)
1394 return ADDRESS_REGS;
4d1a91c2 1395 }
11bb1f11 1396
8b119bb6
RH
1397 /* We can't directly load sp + const_int into a register;
1398 we must use an address register as an scratch. */
1399 if (in_p
1400 && rclass != SP_REGS
0a2aaacc 1401 && rclass != SP_OR_ADDRESS_REGS
36846b26 1402 && rclass != SP_OR_GENERAL_REGS
8b119bb6
RH
1403 && GET_CODE (x) == PLUS
1404 && (XEXP (x, 0) == stack_pointer_rtx
1405 || XEXP (x, 1) == stack_pointer_rtx))
1406 {
1407 sri->icode = CODE_FOR_reload_plus_sp_const;
1408 return NO_REGS;
1409 }
11bb1f11 1410
c25a21f5
RH
1411 /* We can only move MDR to/from a data register. */
1412 if (rclass == MDR_REGS && xclass != DATA_REGS)
1413 return DATA_REGS;
1414 if (xclass == MDR_REGS && rclass != DATA_REGS)
1415 return DATA_REGS;
1416
8b119bb6 1417 /* We can't load/store an FP register from a constant address. */
6528281d 1418 if (TARGET_AM33_2
8b119bb6
RH
1419 && (rclass == FP_REGS || xclass == FP_REGS)
1420 && (xclass == NO_REGS || rclass == NO_REGS))
18e9d2f9 1421 {
8b119bb6
RH
1422 rtx addr = NULL;
1423
1424 if (xregno >= FIRST_PSEUDO_REGISTER && xregno != INVALID_REGNUM)
1425 {
f2034d06 1426 addr = reg_equiv_mem (xregno);
8b119bb6
RH
1427 if (addr)
1428 addr = XEXP (addr, 0);
1429 }
1430 else if (MEM_P (x))
1431 addr = XEXP (x, 0);
6528281d 1432
8b119bb6 1433 if (addr && CONSTANT_ADDRESS_P (addr))
36846b26 1434 return GENERAL_REGS;
18e9d2f9 1435 }
777fbf09
JL
1436 /* Otherwise assume no secondary reloads are needed. */
1437 return NO_REGS;
1438}
1439
040c5757
RH
1440int
1441mn10300_frame_size (void)
1442{
1443 /* size includes the fixed stack space needed for function calls. */
1444 int size = get_frame_size () + crtl->outgoing_args_size;
1445
1446 /* And space for the return pointer. */
1447 size += crtl->outgoing_args_size ? 4 : 0;
1448
1449 return size;
1450}
1451
777fbf09 1452int
e7ab5593 1453mn10300_initial_offset (int from, int to)
777fbf09 1454{
040c5757
RH
1455 int diff = 0;
1456
1457 gcc_assert (from == ARG_POINTER_REGNUM || from == FRAME_POINTER_REGNUM);
1458 gcc_assert (to == FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM);
1459
1460 if (to == STACK_POINTER_REGNUM)
1461 diff = mn10300_frame_size ();
1462
3dbc43d1
JL
1463 /* The difference between the argument pointer and the frame pointer
1464 is the size of the callee register save area. */
040c5757 1465 if (from == ARG_POINTER_REGNUM)
11bb1f11 1466 {
e902c266
NC
1467 unsigned int reg_save_bytes;
1468
1469 mn10300_get_live_callee_saved_regs (& reg_save_bytes);
1470 diff += reg_save_bytes;
040c5757 1471 diff += 4 * fp_regs_to_save ();
11bb1f11
JL
1472 }
1473
040c5757 1474 return diff;
11bb1f11 1475}
22ef4e9b 1476
bd5bd7ac
KH
1477/* Worker function for TARGET_RETURN_IN_MEMORY. */
1478
9024ea92 1479static bool
586de218 1480mn10300_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
9024ea92
KH
1481{
1482 /* Return values > 8 bytes in length in memory. */
b1eb8119
DD
1483 return (int_size_in_bytes (type) > 8
1484 || int_size_in_bytes (type) == 0
1485 || TYPE_MODE (type) == BLKmode);
9024ea92
KH
1486}
1487
22ef4e9b
JL
1488/* Flush the argument registers to the stack for a stdarg function;
1489 return the new argument pointer. */
9024ea92 1490static rtx
f1777882 1491mn10300_builtin_saveregs (void)
22ef4e9b 1492{
fc2acc87 1493 rtx offset, mem;
22ef4e9b 1494 tree fntype = TREE_TYPE (current_function_decl);
f38958e8 1495 int argadj = ((!stdarg_p (fntype))
22ef4e9b 1496 ? UNITS_PER_WORD : 0);
4862826d 1497 alias_set_type set = get_varargs_alias_set ();
22ef4e9b
JL
1498
1499 if (argadj)
0a81f074 1500 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, argadj);
22ef4e9b 1501 else
38173d38 1502 offset = crtl->args.arg_offset_rtx;
22ef4e9b 1503
38173d38 1504 mem = gen_rtx_MEM (SImode, crtl->args.internal_arg_pointer);
ba4828e0 1505 set_mem_alias_set (mem, set);
fc2acc87
RH
1506 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
1507
1508 mem = gen_rtx_MEM (SImode,
0a81f074
RS
1509 plus_constant (Pmode,
1510 crtl->args.internal_arg_pointer, 4));
ba4828e0 1511 set_mem_alias_set (mem, set);
fc2acc87
RH
1512 emit_move_insn (mem, gen_rtx_REG (SImode, 1));
1513
22ef4e9b 1514 return copy_to_reg (expand_binop (Pmode, add_optab,
38173d38 1515 crtl->args.internal_arg_pointer,
22ef4e9b
JL
1516 offset, 0, 0, OPTAB_LIB_WIDEN));
1517}
1518
d7bd8aeb 1519static void
f1777882 1520mn10300_va_start (tree valist, rtx nextarg)
fc2acc87 1521{
6c535c69 1522 nextarg = expand_builtin_saveregs ();
e5faf155 1523 std_expand_builtin_va_start (valist, nextarg);
fc2acc87
RH
1524}
1525
8cd5a4e0
RH
1526/* Return true when a parameter should be passed by reference. */
1527
1528static bool
52090e4d 1529mn10300_pass_by_reference (cumulative_args_t, const function_arg_info &arg)
8cd5a4e0 1530{
52090e4d 1531 unsigned HOST_WIDE_INT size = arg.type_size_in_bytes ();
b1eb8119 1532 return (size > 8 || size == 0);
8cd5a4e0
RH
1533}
1534
6783fdb7
RS
1535/* Return an RTX to represent where argument ARG will be passed to a function.
1536 If the result is NULL_RTX, the argument is pushed. */
22ef4e9b 1537
ce236858 1538static rtx
6783fdb7 1539mn10300_function_arg (cumulative_args_t cum_v, const function_arg_info &arg)
22ef4e9b 1540{
d5cc9181 1541 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
990dc016 1542 rtx result = NULL_RTX;
f4a88680 1543 int size;
22ef4e9b
JL
1544
1545 /* We only support using 2 data registers as argument registers. */
1546 int nregs = 2;
1547
1548 /* Figure out the size of the object to be passed. */
6783fdb7 1549 size = arg.promoted_size_in_bytes ();
22ef4e9b
JL
1550 cum->nbytes = (cum->nbytes + 3) & ~3;
1551
1552 /* Don't pass this arg via a register if all the argument registers
1553 are used up. */
1554 if (cum->nbytes > nregs * UNITS_PER_WORD)
990dc016 1555 return result;
22ef4e9b
JL
1556
1557 /* Don't pass this arg via a register if it would be split between
1558 registers and memory. */
6783fdb7 1559 if (arg.type == NULL_TREE
22ef4e9b 1560 && cum->nbytes + size > nregs * UNITS_PER_WORD)
990dc016 1561 return result;
22ef4e9b
JL
1562
1563 switch (cum->nbytes / UNITS_PER_WORD)
1564 {
1565 case 0:
6783fdb7 1566 result = gen_rtx_REG (arg.mode, FIRST_ARGUMENT_REGNUM);
22ef4e9b
JL
1567 break;
1568 case 1:
6783fdb7 1569 result = gen_rtx_REG (arg.mode, FIRST_ARGUMENT_REGNUM + 1);
22ef4e9b
JL
1570 break;
1571 default:
990dc016 1572 break;
22ef4e9b
JL
1573 }
1574
1575 return result;
1576}
1577
6930c98c 1578/* Update the data in CUM to advance over argument ARG. */
ce236858
NF
1579
1580static void
6930c98c
RS
1581mn10300_function_arg_advance (cumulative_args_t cum_v,
1582 const function_arg_info &arg)
ce236858 1583{
d5cc9181
JR
1584 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1585
6930c98c 1586 cum->nbytes += (arg.promoted_size_in_bytes () + 3) & ~3;
ce236858
NF
1587}
1588
78a52f11
RH
1589/* Return the number of bytes of registers to use for an argument passed
1590 partially in registers and partially in memory. */
22ef4e9b 1591
78a52f11 1592static int
a7c81bc1
RS
1593mn10300_arg_partial_bytes (cumulative_args_t cum_v,
1594 const function_arg_info &arg)
22ef4e9b 1595{
d5cc9181 1596 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
f4a88680 1597 int size;
22ef4e9b
JL
1598
1599 /* We only support using 2 data registers as argument registers. */
1600 int nregs = 2;
1601
1602 /* Figure out the size of the object to be passed. */
a7c81bc1 1603 size = arg.promoted_size_in_bytes ();
22ef4e9b
JL
1604 cum->nbytes = (cum->nbytes + 3) & ~3;
1605
1606 /* Don't pass this arg via a register if all the argument registers
1607 are used up. */
1608 if (cum->nbytes > nregs * UNITS_PER_WORD)
1609 return 0;
1610
1611 if (cum->nbytes + size <= nregs * UNITS_PER_WORD)
1612 return 0;
1613
1614 /* Don't pass this arg via a register if it would be split between
1615 registers and memory. */
a7c81bc1 1616 if (arg.type == NULL_TREE
22ef4e9b
JL
1617 && cum->nbytes + size > nregs * UNITS_PER_WORD)
1618 return 0;
1619
78a52f11 1620 return nregs * UNITS_PER_WORD - cum->nbytes;
22ef4e9b
JL
1621}
1622
b1eb8119
DD
1623/* Return the location of the function's value. This will be either
1624 $d0 for integer functions, $a0 for pointers, or a PARALLEL of both
1625 $d0 and $a0 if the -mreturn-pointer-on-do flag is set. Note that
1626 we only return the PARALLEL for outgoing values; we do not want
1627 callers relying on this extra copy. */
1628
34732b0a
AS
1629static rtx
1630mn10300_function_value (const_tree valtype,
1631 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1632 bool outgoing)
b1eb8119
DD
1633{
1634 rtx rv;
ef4bddc2 1635 machine_mode mode = TYPE_MODE (valtype);
b1eb8119
DD
1636
1637 if (! POINTER_TYPE_P (valtype))
1638 return gen_rtx_REG (mode, FIRST_DATA_REGNUM);
1639 else if (! TARGET_PTR_A0D0 || ! outgoing
e3b5732b 1640 || cfun->returns_struct)
b1eb8119
DD
1641 return gen_rtx_REG (mode, FIRST_ADDRESS_REGNUM);
1642
1643 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (2));
1644 XVECEXP (rv, 0, 0)
1645 = gen_rtx_EXPR_LIST (VOIDmode,
1646 gen_rtx_REG (mode, FIRST_ADDRESS_REGNUM),
1647 GEN_INT (0));
5abc5de9 1648
b1eb8119
DD
1649 XVECEXP (rv, 0, 1)
1650 = gen_rtx_EXPR_LIST (VOIDmode,
1651 gen_rtx_REG (mode, FIRST_DATA_REGNUM),
1652 GEN_INT (0));
1653 return rv;
1654}
1655
34732b0a
AS
1656/* Implements TARGET_LIBCALL_VALUE. */
1657
1658static rtx
ef4bddc2 1659mn10300_libcall_value (machine_mode mode,
34732b0a
AS
1660 const_rtx fun ATTRIBUTE_UNUSED)
1661{
1662 return gen_rtx_REG (mode, FIRST_DATA_REGNUM);
1663}
1664
1665/* Implements FUNCTION_VALUE_REGNO_P. */
1666
1667bool
1668mn10300_function_value_regno_p (const unsigned int regno)
1669{
1670 return (regno == FIRST_DATA_REGNUM || regno == FIRST_ADDRESS_REGNUM);
1671}
1672
bad41521 1673/* Output an addition operation. */
4af476d7 1674
1943c2c1 1675const char *
bad41521 1676mn10300_output_add (rtx operands[3], bool need_flags)
22ef4e9b 1677{
bad41521
RH
1678 rtx dest, src1, src2;
1679 unsigned int dest_regnum, src1_regnum, src2_regnum;
1680 enum reg_class src1_class, src2_class, dest_class;
22ef4e9b 1681
bad41521
RH
1682 dest = operands[0];
1683 src1 = operands[1];
1684 src2 = operands[2];
22ef4e9b 1685
bad41521
RH
1686 dest_regnum = true_regnum (dest);
1687 src1_regnum = true_regnum (src1);
22ef4e9b 1688
bad41521
RH
1689 dest_class = REGNO_REG_CLASS (dest_regnum);
1690 src1_class = REGNO_REG_CLASS (src1_regnum);
22ef4e9b 1691
298362c8 1692 if (CONST_INT_P (src2))
bad41521
RH
1693 {
1694 gcc_assert (dest_regnum == src1_regnum);
22ef4e9b 1695
bad41521
RH
1696 if (src2 == const1_rtx && !need_flags)
1697 return "inc %0";
1698 if (INTVAL (src2) == 4 && !need_flags && dest_class != DATA_REGS)
1699 return "inc4 %0";
705ac34f 1700
bad41521
RH
1701 gcc_assert (!need_flags || dest_class != SP_REGS);
1702 return "add %2,%0";
1703 }
1704 else if (CONSTANT_P (src2))
1705 return "add %2,%0";
1706
1707 src2_regnum = true_regnum (src2);
1708 src2_class = REGNO_REG_CLASS (src2_regnum);
1709
1710 if (dest_regnum == src1_regnum)
1711 return "add %2,%0";
1712 if (dest_regnum == src2_regnum)
1713 return "add %1,%0";
1714
1715 /* The rest of the cases are reg = reg+reg. For AM33, we can implement
1716 this directly, as below, but when optimizing for space we can sometimes
1717 do better by using a mov+add. For MN103, we claimed that we could
1718 implement a three-operand add because the various move and add insns
1719 change sizes across register classes, and we can often do better than
1720 reload in choosing which operand to move. */
1721 if (TARGET_AM33 && optimize_insn_for_speed_p ())
1722 return "add %2,%1,%0";
1723
1724 /* Catch cases where no extended register was used. */
1725 if (src1_class != EXTENDED_REGS
1726 && src2_class != EXTENDED_REGS
1727 && dest_class != EXTENDED_REGS)
1728 {
1729 /* We have to copy one of the sources into the destination, then
1730 add the other source to the destination.
1731
1732 Carefully select which source to copy to the destination; a
1733 naive implementation will waste a byte when the source classes
1734 are different and the destination is an address register.
1735 Selecting the lowest cost register copy will optimize this
1736 sequence. */
1737 if (src1_class == dest_class)
1738 return "mov %1,%0\n\tadd %2,%0";
1739 else
1740 return "mov %2,%0\n\tadd %1,%0";
1741 }
705ac34f 1742
bad41521 1743 /* At least one register is an extended register. */
22ef4e9b 1744
bad41521
RH
1745 /* The three operand add instruction on the am33 is a win iff the
1746 output register is an extended register, or if both source
1747 registers are extended registers. */
1748 if (dest_class == EXTENDED_REGS || src1_class == src2_class)
1749 return "add %2,%1,%0";
1750
1751 /* It is better to copy one of the sources to the destination, then
1752 perform a 2 address add. The destination in this case must be
1753 an address or data register and one of the sources must be an
1754 extended register and the remaining source must not be an extended
1755 register.
1756
1757 The best code for this case is to copy the extended reg to the
1758 destination, then emit a two address add. */
1759 if (src1_class == EXTENDED_REGS)
1760 return "mov %1,%0\n\tadd %2,%0";
1761 else
1762 return "mov %2,%0\n\tadd %1,%0";
22ef4e9b 1763}
460f4b9d 1764
e9ad4573
JL
1765/* Return 1 if X contains a symbolic expression. We know these
1766 expressions will have one of a few well defined forms, so
1767 we need only check those forms. */
e7ab5593 1768
e9ad4573 1769int
e7ab5593 1770mn10300_symbolic_operand (rtx op,
ef4bddc2 1771 machine_mode mode ATTRIBUTE_UNUSED)
e9ad4573
JL
1772{
1773 switch (GET_CODE (op))
1774 {
1775 case SYMBOL_REF:
1776 case LABEL_REF:
1777 return 1;
1778 case CONST:
1779 op = XEXP (op, 0);
1780 return ((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
1781 || GET_CODE (XEXP (op, 0)) == LABEL_REF)
f3f63737 1782 && CONST_INT_P (XEXP (op, 1)));
e9ad4573
JL
1783 default:
1784 return 0;
1785 }
1786}
1787
1788/* Try machine dependent ways of modifying an illegitimate address
1789 to be legitimate. If we find one, return the new valid address.
e53b6e56 1790 This macro is used in only one place: `memory_address' in explow.cc.
e9ad4573
JL
1791
1792 OLDX is the address as it was before break_out_memory_refs was called.
1793 In some cases it is useful to look at this to decide what needs to be done.
1794
e9ad4573
JL
1795 Normally it is always safe for this macro to do nothing. It exists to
1796 recognize opportunities to optimize the output.
1797
1798 But on a few ports with segmented architectures and indexed addressing
1799 (mn10300, hppa) it is used to rewrite certain problematical addresses. */
e7ab5593 1800
4af476d7 1801static rtx
506d7b68 1802mn10300_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
ef4bddc2 1803 machine_mode mode ATTRIBUTE_UNUSED)
e9ad4573 1804{
e7ab5593
NC
1805 if (flag_pic && ! mn10300_legitimate_pic_operand_p (x))
1806 x = mn10300_legitimize_pic_address (oldx, NULL_RTX);
d1776069 1807
e9ad4573
JL
1808 /* Uh-oh. We might have an address for x[n-100000]. This needs
1809 special handling to avoid creating an indexed memory address
1810 with x-100000 as the base. */
1811 if (GET_CODE (x) == PLUS
e7ab5593 1812 && mn10300_symbolic_operand (XEXP (x, 1), VOIDmode))
e9ad4573
JL
1813 {
1814 /* Ugly. We modify things here so that the address offset specified
1815 by the index expression is computed first, then added to x to form
1816 the entire address. */
1817
69bc71fa 1818 rtx regx1, regy1, regy2, y;
e9ad4573
JL
1819
1820 /* Strip off any CONST. */
1821 y = XEXP (x, 1);
1822 if (GET_CODE (y) == CONST)
1823 y = XEXP (y, 0);
1824
bf4219f0
JL
1825 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1826 {
1827 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1828 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1829 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1830 regx1 = force_reg (Pmode,
e7ab5593
NC
1831 gen_rtx_fmt_ee (GET_CODE (y), Pmode, regx1,
1832 regy2));
c5c76735 1833 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
bf4219f0 1834 }
e9ad4573 1835 }
371036e0 1836 return x;
e9ad4573 1837}
460ad325 1838
d1776069 1839/* Convert a non-PIC address in `orig' to a PIC address using @GOT or
8596d0a1 1840 @GOTOFF in `reg'. */
e7ab5593 1841
d1776069 1842rtx
e7ab5593 1843mn10300_legitimize_pic_address (rtx orig, rtx reg)
d1776069 1844{
53855940 1845 rtx x;
f370536c 1846 rtx_insn *insn;
53855940 1847
d1776069
AO
1848 if (GET_CODE (orig) == LABEL_REF
1849 || (GET_CODE (orig) == SYMBOL_REF
1850 && (CONSTANT_POOL_ADDRESS_P (orig)
1851 || ! MN10300_GLOBAL_P (orig))))
1852 {
53855940 1853 if (reg == NULL)
d1776069
AO
1854 reg = gen_reg_rtx (Pmode);
1855
53855940
RH
1856 x = gen_rtx_UNSPEC (SImode, gen_rtvec (1, orig), UNSPEC_GOTOFF);
1857 x = gen_rtx_CONST (SImode, x);
1858 emit_move_insn (reg, x);
1859
f370536c 1860 insn = emit_insn (gen_addsi3 (reg, reg, pic_offset_table_rtx));
d1776069
AO
1861 }
1862 else if (GET_CODE (orig) == SYMBOL_REF)
1863 {
53855940 1864 if (reg == NULL)
d1776069
AO
1865 reg = gen_reg_rtx (Pmode);
1866
53855940
RH
1867 x = gen_rtx_UNSPEC (SImode, gen_rtvec (1, orig), UNSPEC_GOT);
1868 x = gen_rtx_CONST (SImode, x);
1869 x = gen_rtx_PLUS (SImode, pic_offset_table_rtx, x);
1870 x = gen_const_mem (SImode, x);
1871
f370536c 1872 insn = emit_move_insn (reg, x);
d1776069 1873 }
53855940
RH
1874 else
1875 return orig;
1876
f370536c 1877 set_unique_reg_note (insn, REG_EQUAL, orig);
53855940 1878 return reg;
d1776069
AO
1879}
1880
1881/* Return zero if X references a SYMBOL_REF or LABEL_REF whose symbol
4375e090 1882 isn't protected by a PIC unspec; nonzero otherwise. */
e7ab5593 1883
d1776069 1884int
e7ab5593 1885mn10300_legitimate_pic_operand_p (rtx x)
d1776069 1886{
e7ab5593
NC
1887 const char *fmt;
1888 int i;
d1776069
AO
1889
1890 if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1891 return 0;
1892
1893 if (GET_CODE (x) == UNSPEC
1894 && (XINT (x, 1) == UNSPEC_PIC
1895 || XINT (x, 1) == UNSPEC_GOT
1896 || XINT (x, 1) == UNSPEC_GOTOFF
d4e2d7d2
RS
1897 || XINT (x, 1) == UNSPEC_PLT
1898 || XINT (x, 1) == UNSPEC_GOTSYM_OFF))
d1776069
AO
1899 return 1;
1900
d1776069
AO
1901 fmt = GET_RTX_FORMAT (GET_CODE (x));
1902 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
1903 {
1904 if (fmt[i] == 'E')
1905 {
4af476d7 1906 int j;
d1776069
AO
1907
1908 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
e7ab5593 1909 if (! mn10300_legitimate_pic_operand_p (XVECEXP (x, i, j)))
d1776069
AO
1910 return 0;
1911 }
e7ab5593
NC
1912 else if (fmt[i] == 'e'
1913 && ! mn10300_legitimate_pic_operand_p (XEXP (x, i)))
d1776069
AO
1914 return 0;
1915 }
1916
1917 return 1;
1918}
1919
e733134f 1920/* Return TRUE if the address X, taken from a (MEM:MODE X) rtx, is
c6c3dba9
PB
1921 legitimate, and FALSE otherwise.
1922
1923 On the mn10300, the value in the address register must be
1924 in the same memory space/segment as the effective address.
1925
1926 This is problematical for reload since it does not understand
1927 that base+index != index+base in a memory reference.
1928
1929 Note it is still possible to use reg+reg addressing modes,
1930 it's just much more difficult. For a discussion of a possible
e53b6e56 1931 workaround and solution, see the comments in pa.cc before the
c6c3dba9
PB
1932 function record_unscaled_index_insn_codes. */
1933
4af476d7 1934static bool
165b1f6a
KL
1935mn10300_legitimate_address_p (machine_mode mode, rtx x, bool strict,
1936 code_helper = ERROR_MARK)
e733134f 1937{
36846b26
RH
1938 rtx base, index;
1939
1940 if (CONSTANT_ADDRESS_P (x))
1941 return !flag_pic || mn10300_legitimate_pic_operand_p (x);
e733134f
AO
1942
1943 if (RTX_OK_FOR_BASE_P (x, strict))
36846b26
RH
1944 return true;
1945
1946 if (TARGET_AM33 && (mode == SImode || mode == SFmode || mode == HImode))
1947 {
1948 if (GET_CODE (x) == POST_INC)
1949 return RTX_OK_FOR_BASE_P (XEXP (x, 0), strict);
1950 if (GET_CODE (x) == POST_MODIFY)
1951 return (RTX_OK_FOR_BASE_P (XEXP (x, 0), strict)
1952 && CONSTANT_ADDRESS_P (XEXP (x, 1)));
1953 }
1954
1955 if (GET_CODE (x) != PLUS)
1956 return false;
e733134f 1957
36846b26
RH
1958 base = XEXP (x, 0);
1959 index = XEXP (x, 1);
e733134f 1960
36846b26
RH
1961 if (!REG_P (base))
1962 return false;
1963 if (REG_P (index))
e733134f 1964 {
36846b26
RH
1965 /* ??? Without AM33 generalized (Ri,Rn) addressing, reg+reg
1966 addressing is hard to satisfy. */
1967 if (!TARGET_AM33)
1968 return false;
e733134f 1969
36846b26
RH
1970 return (REGNO_GENERAL_P (REGNO (base), strict)
1971 && REGNO_GENERAL_P (REGNO (index), strict));
1972 }
e733134f 1973
36846b26
RH
1974 if (!REGNO_STRICT_OK_FOR_BASE_P (REGNO (base), strict))
1975 return false;
e733134f 1976
36846b26
RH
1977 if (CONST_INT_P (index))
1978 return IN_RANGE (INTVAL (index), -1 - 0x7fffffff, 0x7fffffff);
1979
1980 if (CONSTANT_ADDRESS_P (index))
1981 return !flag_pic || mn10300_legitimate_pic_operand_p (index);
1982
1983 return false;
1984}
1985
1986bool
1987mn10300_regno_in_class_p (unsigned regno, int rclass, bool strict)
1988{
1989 if (regno >= FIRST_PSEUDO_REGISTER)
1990 {
1991 if (!strict)
1992 return true;
1993 if (!reg_renumber)
1994 return false;
1995 regno = reg_renumber[regno];
ba4ec0e0
NC
1996 if (regno == INVALID_REGNUM)
1997 return false;
36846b26
RH
1998 }
1999 return TEST_HARD_REG_BIT (reg_class_contents[rclass], regno);
2000}
2001
2002rtx
2003mn10300_legitimize_reload_address (rtx x,
ef4bddc2 2004 machine_mode mode ATTRIBUTE_UNUSED,
36846b26
RH
2005 int opnum, int type,
2006 int ind_levels ATTRIBUTE_UNUSED)
2007{
2008 bool any_change = false;
2009
2010 /* See above re disabling reg+reg addressing for MN103. */
2011 if (!TARGET_AM33)
2012 return NULL_RTX;
2013
2014 if (GET_CODE (x) != PLUS)
2015 return NULL_RTX;
2016
2017 if (XEXP (x, 0) == stack_pointer_rtx)
2018 {
2019 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
2020 GENERAL_REGS, GET_MODE (x), VOIDmode, 0, 0,
2021 opnum, (enum reload_type) type);
2022 any_change = true;
2023 }
2024 if (XEXP (x, 1) == stack_pointer_rtx)
2025 {
2026 push_reload (XEXP (x, 1), NULL_RTX, &XEXP (x, 1), NULL,
2027 GENERAL_REGS, GET_MODE (x), VOIDmode, 0, 0,
2028 opnum, (enum reload_type) type);
2029 any_change = true;
e733134f
AO
2030 }
2031
36846b26 2032 return any_change ? x : NULL_RTX;
e733134f
AO
2033}
2034
1a627b35 2035/* Implement TARGET_LEGITIMATE_CONSTANT_P. Returns TRUE if X is a valid
4af476d7
NC
2036 constant. Note that some "constants" aren't valid, such as TLS
2037 symbols and unconverted GOT-based references, so we eliminate
2038 those here. */
2039
1a627b35 2040static bool
ef4bddc2 2041mn10300_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
4af476d7
NC
2042{
2043 switch (GET_CODE (x))
2044 {
2045 case CONST:
2046 x = XEXP (x, 0);
2047
2048 if (GET_CODE (x) == PLUS)
2049 {
e7ab5593 2050 if (! CONST_INT_P (XEXP (x, 1)))
4af476d7
NC
2051 return false;
2052 x = XEXP (x, 0);
2053 }
2054
2055 /* Only some unspecs are valid as "constants". */
2056 if (GET_CODE (x) == UNSPEC)
2057 {
4af476d7
NC
2058 switch (XINT (x, 1))
2059 {
4af476d7
NC
2060 case UNSPEC_PIC:
2061 case UNSPEC_GOT:
2062 case UNSPEC_GOTOFF:
2063 case UNSPEC_PLT:
2064 return true;
2065 default:
2066 return false;
2067 }
2068 }
2069
2070 /* We must have drilled down to a symbol. */
e7ab5593 2071 if (! mn10300_symbolic_operand (x, Pmode))
4af476d7
NC
2072 return false;
2073 break;
2074
2075 default:
2076 break;
2077 }
2078
2079 return true;
2080}
2081
126b1483
RH
2082/* Undo pic address legitimization for the benefit of debug info. */
2083
2084static rtx
2085mn10300_delegitimize_address (rtx orig_x)
2086{
2087 rtx x = orig_x, ret, addend = NULL;
2088 bool need_mem;
2089
2090 if (MEM_P (x))
2091 x = XEXP (x, 0);
2092 if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
2093 return orig_x;
2094
2095 if (XEXP (x, 0) == pic_offset_table_rtx)
2096 ;
2097 /* With the REG+REG addressing of AM33, var-tracking can re-assemble
2098 some odd-looking "addresses" that were never valid in the first place.
2099 We need to look harder to avoid warnings being emitted. */
2100 else if (GET_CODE (XEXP (x, 0)) == PLUS)
2101 {
2102 rtx x0 = XEXP (x, 0);
2103 rtx x00 = XEXP (x0, 0);
2104 rtx x01 = XEXP (x0, 1);
2105
2106 if (x00 == pic_offset_table_rtx)
2107 addend = x01;
2108 else if (x01 == pic_offset_table_rtx)
2109 addend = x00;
2110 else
2111 return orig_x;
2112
2113 }
2114 else
2115 return orig_x;
2116 x = XEXP (x, 1);
2117
2118 if (GET_CODE (x) != CONST)
2119 return orig_x;
2120 x = XEXP (x, 0);
2121 if (GET_CODE (x) != UNSPEC)
2122 return orig_x;
2123
2124 ret = XVECEXP (x, 0, 0);
2125 if (XINT (x, 1) == UNSPEC_GOTOFF)
2126 need_mem = false;
2127 else if (XINT (x, 1) == UNSPEC_GOT)
2128 need_mem = true;
2129 else
2130 return orig_x;
2131
2132 gcc_assert (GET_CODE (ret) == SYMBOL_REF);
2133 if (need_mem != MEM_P (orig_x))
2134 return orig_x;
2135 if (need_mem && addend)
2136 return orig_x;
2137 if (addend)
2138 ret = gen_rtx_PLUS (Pmode, addend, ret);
2139 return ret;
2140}
2141
72d6e3c5
RH
2142/* For addresses, costs are relative to "MOV (Rm),Rn". For AM33 this is
2143 the 3-byte fully general instruction; for MN103 this is the 2-byte form
2144 with an address register. */
2145
dcefdf67 2146static int
ef4bddc2 2147mn10300_address_cost (rtx x, machine_mode mode ATTRIBUTE_UNUSED,
b413068c 2148 addr_space_t as ATTRIBUTE_UNUSED, bool speed)
460ad325 2149{
72d6e3c5
RH
2150 HOST_WIDE_INT i;
2151 rtx base, index;
2152
460ad325
AO
2153 switch (GET_CODE (x))
2154 {
72d6e3c5
RH
2155 case CONST:
2156 case SYMBOL_REF:
2157 case LABEL_REF:
2158 /* We assume all of these require a 32-bit constant, even though
2159 some symbol and label references can be relaxed. */
2160 return speed ? 1 : 4;
2161
460ad325 2162 case REG:
72d6e3c5
RH
2163 case SUBREG:
2164 case POST_INC:
2165 return 0;
2166
2167 case POST_MODIFY:
2168 /* Assume any symbolic offset is a 32-bit constant. */
2169 i = (CONST_INT_P (XEXP (x, 1)) ? INTVAL (XEXP (x, 1)) : 0x12345678);
2170 if (IN_RANGE (i, -128, 127))
2171 return speed ? 0 : 1;
2172 if (speed)
2173 return 1;
2174 if (IN_RANGE (i, -0x800000, 0x7fffff))
2175 return 3;
2176 return 4;
2177
2178 case PLUS:
2179 base = XEXP (x, 0);
2180 index = XEXP (x, 1);
2181 if (register_operand (index, SImode))
460ad325 2182 {
72d6e3c5
RH
2183 /* Attempt to minimize the number of registers in the address.
2184 This is similar to what other ports do. */
2185 if (register_operand (base, SImode))
2186 return 1;
460ad325 2187
72d6e3c5
RH
2188 base = XEXP (x, 1);
2189 index = XEXP (x, 0);
2190 }
460ad325 2191
72d6e3c5
RH
2192 /* Assume any symbolic offset is a 32-bit constant. */
2193 i = (CONST_INT_P (XEXP (x, 1)) ? INTVAL (XEXP (x, 1)) : 0x12345678);
2194 if (IN_RANGE (i, -128, 127))
2195 return speed ? 0 : 1;
2196 if (IN_RANGE (i, -32768, 32767))
2197 return speed ? 0 : 2;
2198 return speed ? 2 : 6;
460ad325 2199
72d6e3c5 2200 default:
e548c9df 2201 return rtx_cost (x, Pmode, MEM, 0, speed);
72d6e3c5
RH
2202 }
2203}
460ad325 2204
72d6e3c5 2205/* Implement the TARGET_REGISTER_MOVE_COST hook.
460ad325 2206
72d6e3c5
RH
2207 Recall that the base value of 2 is required by assumptions elsewhere
2208 in the body of the compiler, and that cost 2 is special-cased as an
2209 early exit from reload meaning no work is required. */
460ad325 2210
72d6e3c5 2211static int
ef4bddc2 2212mn10300_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
72d6e3c5
RH
2213 reg_class_t ifrom, reg_class_t ito)
2214{
2215 enum reg_class from = (enum reg_class) ifrom;
2216 enum reg_class to = (enum reg_class) ito;
2217 enum reg_class scratch, test;
2218
2219 /* Simplify the following code by unifying the fp register classes. */
2220 if (to == FP_ACC_REGS)
2221 to = FP_REGS;
2222 if (from == FP_ACC_REGS)
2223 from = FP_REGS;
2224
2225 /* Diagnose invalid moves by costing them as two moves. */
2226
2227 scratch = NO_REGS;
2228 test = from;
2229 if (to == SP_REGS)
2230 scratch = (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
c25a21f5
RH
2231 else if (to == MDR_REGS)
2232 scratch = DATA_REGS;
72d6e3c5
RH
2233 else if (to == FP_REGS && to != from)
2234 scratch = GENERAL_REGS;
2235 else
2236 {
2237 test = to;
2238 if (from == SP_REGS)
2239 scratch = (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
c25a21f5
RH
2240 else if (from == MDR_REGS)
2241 scratch = DATA_REGS;
72d6e3c5
RH
2242 else if (from == FP_REGS && to != from)
2243 scratch = GENERAL_REGS;
2244 }
2245 if (scratch != NO_REGS && !reg_class_subset_p (test, scratch))
2246 return (mn10300_register_move_cost (VOIDmode, from, scratch)
2247 + mn10300_register_move_cost (VOIDmode, scratch, to));
460ad325 2248
72d6e3c5 2249 /* From here on, all we need consider are legal combinations. */
460ad325 2250
72d6e3c5
RH
2251 if (optimize_size)
2252 {
2253 /* The scale here is bytes * 2. */
460ad325 2254
72d6e3c5
RH
2255 if (from == to && (to == ADDRESS_REGS || to == DATA_REGS))
2256 return 2;
460ad325 2257
72d6e3c5
RH
2258 if (from == SP_REGS)
2259 return (to == ADDRESS_REGS ? 2 : 6);
2260
2261 /* For MN103, all remaining legal moves are two bytes. */
2262 if (TARGET_AM33)
2263 return 4;
2264
2265 if (to == SP_REGS)
2266 return (from == ADDRESS_REGS ? 4 : 6);
2267
2268 if ((from == ADDRESS_REGS || from == DATA_REGS)
2269 && (to == ADDRESS_REGS || to == DATA_REGS))
2270 return 4;
2271
2272 if (to == EXTENDED_REGS)
2273 return (to == from ? 6 : 4);
460ad325 2274
72d6e3c5
RH
2275 /* What's left are SP_REGS, FP_REGS, or combinations of the above. */
2276 return 6;
2277 }
2278 else
2279 {
2280 /* The scale here is cycles * 2. */
2281
2282 if (to == FP_REGS)
2283 return 8;
2284 if (from == FP_REGS)
2285 return 4;
2286
2287 /* All legal moves between integral registers are single cycle. */
2288 return 2;
460ad325
AO
2289 }
2290}
3c50106f 2291
72d6e3c5
RH
2292/* Implement the TARGET_MEMORY_MOVE_COST hook.
2293
2294 Given lack of the form of the address, this must be speed-relative,
2295 though we should never be less expensive than a size-relative register
2296 move cost above. This is not a problem. */
2297
dcefdf67 2298static int
ef4bddc2 2299mn10300_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
72d6e3c5 2300 reg_class_t iclass, bool in ATTRIBUTE_UNUSED)
dcefdf67 2301{
72d6e3c5
RH
2302 enum reg_class rclass = (enum reg_class) iclass;
2303
2304 if (rclass == FP_REGS)
2305 return 8;
2306 return 6;
dcefdf67
RH
2307}
2308
72d6e3c5
RH
2309/* Implement the TARGET_RTX_COSTS hook.
2310
2311 Speed-relative costs are relative to COSTS_N_INSNS, which is intended
2312 to represent cycles. Size-relative costs are in bytes. */
2313
3c50106f 2314static bool
e548c9df
AM
2315mn10300_rtx_costs (rtx x, machine_mode mode, int outer_code,
2316 int opno ATTRIBUTE_UNUSED, int *ptotal, bool speed)
3c50106f 2317{
72d6e3c5
RH
2318 /* This value is used for SYMBOL_REF etc where we want to pretend
2319 we have a full 32-bit constant. */
2320 HOST_WIDE_INT i = 0x12345678;
2321 int total;
e548c9df 2322 int code = GET_CODE (x);
72d6e3c5 2323
3c50106f
RH
2324 switch (code)
2325 {
2326 case CONST_INT:
72d6e3c5
RH
2327 i = INTVAL (x);
2328 do_int_costs:
2329 if (speed)
2330 {
2331 if (outer_code == SET)
2332 {
2333 /* 16-bit integer loads have latency 1, 32-bit loads 2. */
2334 if (IN_RANGE (i, -32768, 32767))
2335 total = COSTS_N_INSNS (1);
2336 else
2337 total = COSTS_N_INSNS (2);
2338 }
2339 else
2340 {
2341 /* 16-bit integer operands don't affect latency;
2342 24-bit and 32-bit operands add a cycle. */
2343 if (IN_RANGE (i, -32768, 32767))
2344 total = 0;
2345 else
2346 total = COSTS_N_INSNS (1);
2347 }
2348 }
3c50106f 2349 else
72d6e3c5
RH
2350 {
2351 if (outer_code == SET)
2352 {
2353 if (i == 0)
2354 total = 1;
2355 else if (IN_RANGE (i, -128, 127))
2356 total = 2;
2357 else if (IN_RANGE (i, -32768, 32767))
2358 total = 3;
2359 else
2360 total = 6;
2361 }
2362 else
2363 {
2364 /* Reference here is ADD An,Dn, vs ADD imm,Dn. */
2365 if (IN_RANGE (i, -128, 127))
2366 total = 0;
2367 else if (IN_RANGE (i, -32768, 32767))
2368 total = 2;
2369 else if (TARGET_AM33 && IN_RANGE (i, -0x01000000, 0x00ffffff))
2370 total = 3;
2371 else
2372 total = 4;
2373 }
2374 }
2375 goto alldone;
3c50106f
RH
2376
2377 case CONST:
2378 case LABEL_REF:
2379 case SYMBOL_REF:
3c50106f 2380 case CONST_DOUBLE:
72d6e3c5
RH
2381 /* We assume all of these require a 32-bit constant, even though
2382 some symbol and label references can be relaxed. */
2383 goto do_int_costs;
f90b7a5a 2384
72d6e3c5
RH
2385 case UNSPEC:
2386 switch (XINT (x, 1))
2387 {
2388 case UNSPEC_PIC:
2389 case UNSPEC_GOT:
2390 case UNSPEC_GOTOFF:
2391 case UNSPEC_PLT:
2392 case UNSPEC_GOTSYM_OFF:
2393 /* The PIC unspecs also resolve to a 32-bit constant. */
2394 goto do_int_costs;
3c50106f 2395
72d6e3c5
RH
2396 default:
2397 /* Assume any non-listed unspec is some sort of arithmetic. */
2398 goto do_arith_costs;
2399 }
fe7496dd 2400
72d6e3c5
RH
2401 case PLUS:
2402 /* Notice the size difference of INC and INC4. */
2403 if (!speed && outer_code == SET && CONST_INT_P (XEXP (x, 1)))
2404 {
2405 i = INTVAL (XEXP (x, 1));
2406 if (i == 1 || i == 4)
2407 {
e548c9df 2408 total = 1 + rtx_cost (XEXP (x, 0), mode, PLUS, 0, speed);
72d6e3c5
RH
2409 goto alldone;
2410 }
2411 }
2412 goto do_arith_costs;
2413
2414 case MINUS:
2415 case AND:
2416 case IOR:
2417 case XOR:
2418 case NOT:
2419 case NEG:
2420 case ZERO_EXTEND:
2421 case SIGN_EXTEND:
2422 case COMPARE:
2423 case BSWAP:
2424 case CLZ:
2425 do_arith_costs:
2426 total = (speed ? COSTS_N_INSNS (1) : 2);
2427 break;
fe7496dd 2428
72d6e3c5
RH
2429 case ASHIFT:
2430 /* Notice the size difference of ASL2 and variants. */
2431 if (!speed && CONST_INT_P (XEXP (x, 1)))
2432 switch (INTVAL (XEXP (x, 1)))
2433 {
2434 case 1:
2435 case 2:
2436 total = 1;
2437 goto alldone;
2438 case 3:
2439 case 4:
2440 total = 2;
2441 goto alldone;
2442 }
2443 /* FALLTHRU */
fe7496dd 2444
72d6e3c5
RH
2445 case ASHIFTRT:
2446 case LSHIFTRT:
2447 total = (speed ? COSTS_N_INSNS (1) : 3);
2448 goto alldone;
fe7496dd 2449
72d6e3c5
RH
2450 case MULT:
2451 total = (speed ? COSTS_N_INSNS (3) : 2);
fe7496dd 2452 break;
5abc5de9 2453
72d6e3c5
RH
2454 case DIV:
2455 case UDIV:
2456 case MOD:
2457 case UMOD:
2458 total = (speed ? COSTS_N_INSNS (39)
2459 /* Include space to load+retrieve MDR. */
2460 : code == MOD || code == UMOD ? 6 : 4);
fe7496dd 2461 break;
5abc5de9 2462
72d6e3c5 2463 case MEM:
e548c9df 2464 total = mn10300_address_cost (XEXP (x, 0), mode,
b413068c 2465 MEM_ADDR_SPACE (x), speed);
72d6e3c5
RH
2466 if (speed)
2467 total = COSTS_N_INSNS (2 + total);
2468 goto alldone;
2469
fe7496dd 2470 default:
72d6e3c5
RH
2471 /* Probably not implemented. Assume external call. */
2472 total = (speed ? COSTS_N_INSNS (10) : 7);
2473 break;
fe7496dd
AO
2474 }
2475
72d6e3c5
RH
2476 *ptotal = total;
2477 return false;
2478
2479 alldone:
2480 *ptotal = total;
2481 return true;
fe7496dd 2482}
72d6e3c5 2483
d1776069
AO
2484/* If using PIC, mark a SYMBOL_REF for a non-global symbol so that we
2485 may access it using GOTOFF instead of GOT. */
2486
2487static void
2ba3d2a9 2488mn10300_encode_section_info (tree decl, rtx rtl, int first)
d1776069
AO
2489{
2490 rtx symbol;
2491
2ba3d2a9
NC
2492 default_encode_section_info (decl, rtl, first);
2493
e7ab5593 2494 if (! MEM_P (rtl))
d1776069 2495 return;
2ba3d2a9 2496
d1776069
AO
2497 symbol = XEXP (rtl, 0);
2498 if (GET_CODE (symbol) != SYMBOL_REF)
2499 return;
2500
2501 if (flag_pic)
2502 SYMBOL_REF_FLAG (symbol) = (*targetm.binds_local_p) (decl);
2503}
e6ff3083
AS
2504
2505/* Dispatch tables on the mn10300 are extremely expensive in terms of code
2506 and readonly data size. So we crank up the case threshold value to
2507 encourage a series of if/else comparisons to implement many small switch
2508 statements. In theory, this value could be increased much more if we
2509 were solely optimizing for space, but we keep it "reasonable" to avoid
2510 serious code efficiency lossage. */
2511
4af476d7
NC
2512static unsigned int
2513mn10300_case_values_threshold (void)
e6ff3083
AS
2514{
2515 return 6;
2516}
bdeb5f0c 2517
bdeb5f0c
RH
2518/* Worker function for TARGET_TRAMPOLINE_INIT. */
2519
2520static void
2521mn10300_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
2522{
d6a3e264
RH
2523 rtx mem, disp, fnaddr = XEXP (DECL_RTL (fndecl), 0);
2524
2525 /* This is a strict alignment target, which means that we play
2526 some games to make sure that the locations at which we need
2527 to store <chain> and <disp> wind up at aligned addresses.
2528
2529 0x28 0x00 add 0,d0
2530 0xfc 0xdd mov chain,a1
2531 <chain>
2532 0xf8 0xed 0x00 btst 0,d1
2533 0xdc jmp fnaddr
2534 <disp>
2535
2536 Note that the two extra insns are effectively nops; they
2537 clobber the flags but do not affect the contents of D0 or D1. */
bdeb5f0c 2538
d6a3e264 2539 disp = expand_binop (SImode, sub_optab, fnaddr,
0a81f074 2540 plus_constant (Pmode, XEXP (m_tramp, 0), 11),
d6a3e264 2541 NULL_RTX, 1, OPTAB_DIRECT);
bdeb5f0c 2542
d6a3e264
RH
2543 mem = adjust_address (m_tramp, SImode, 0);
2544 emit_move_insn (mem, gen_int_mode (0xddfc0028, SImode));
2545 mem = adjust_address (m_tramp, SImode, 4);
bdeb5f0c 2546 emit_move_insn (mem, chain_value);
d6a3e264
RH
2547 mem = adjust_address (m_tramp, SImode, 8);
2548 emit_move_insn (mem, gen_int_mode (0xdc00edf8, SImode));
2549 mem = adjust_address (m_tramp, SImode, 12);
2550 emit_move_insn (mem, disp);
bdeb5f0c 2551}
990dc016
NC
2552
2553/* Output the assembler code for a C++ thunk function.
2554 THUNK_DECL is the declaration for the thunk function itself, FUNCTION
2555 is the decl for the target function. DELTA is an immediate constant
2556 offset to be added to the THIS parameter. If VCALL_OFFSET is nonzero
2557 the word at the adjusted address *(*THIS' + VCALL_OFFSET) should be
2558 additionally added to THIS. Finally jump to the entry point of
2559 FUNCTION. */
2560
2561static void
2562mn10300_asm_output_mi_thunk (FILE * file,
2563 tree thunk_fndecl ATTRIBUTE_UNUSED,
2564 HOST_WIDE_INT delta,
2565 HOST_WIDE_INT vcall_offset,
2566 tree function)
2567{
f7430263 2568 const char *fnname = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (thunk_fndecl));
990dc016
NC
2569 const char * _this;
2570
f7430263 2571 assemble_start_function (thunk_fndecl, fnname);
990dc016
NC
2572 /* Get the register holding the THIS parameter. Handle the case
2573 where there is a hidden first argument for a returned structure. */
2574 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
2575 _this = reg_names [FIRST_ARGUMENT_REGNUM + 1];
2576 else
2577 _this = reg_names [FIRST_ARGUMENT_REGNUM];
2578
2579 fprintf (file, "\t%s Thunk Entry Point:\n", ASM_COMMENT_START);
2580
2581 if (delta)
2582 fprintf (file, "\tadd %d, %s\n", (int) delta, _this);
2583
2584 if (vcall_offset)
2585 {
2586 const char * scratch = reg_names [FIRST_ADDRESS_REGNUM + 1];
2587
2588 fprintf (file, "\tmov %s, %s\n", _this, scratch);
2589 fprintf (file, "\tmov (%s), %s\n", scratch, scratch);
2590 fprintf (file, "\tadd %d, %s\n", (int) vcall_offset, scratch);
2591 fprintf (file, "\tmov (%s), %s\n", scratch, scratch);
2592 fprintf (file, "\tadd %s, %s\n", scratch, _this);
2593 }
2594
2595 fputs ("\tjmp ", file);
2596 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
2597 putc ('\n', file);
f7430263 2598 assemble_end_function (thunk_fndecl, fnname);
990dc016
NC
2599}
2600
2601/* Return true if mn10300_output_mi_thunk would be able to output the
2602 assembler code for the thunk function specified by the arguments
2603 it is passed, and false otherwise. */
2604
2605static bool
2606mn10300_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
2607 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
2608 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
2609 const_tree function ATTRIBUTE_UNUSED)
2610{
2611 return true;
2612}
4af476d7 2613
f939c3e6
RS
2614/* Implement TARGET_HARD_REGNO_MODE_OK. */
2615
2616static bool
ef4bddc2 2617mn10300_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
4af476d7
NC
2618{
2619 if (REGNO_REG_CLASS (regno) == FP_REGS
2620 || REGNO_REG_CLASS (regno) == FP_ACC_REGS)
2621 /* Do not store integer values in FP registers. */
2622 return GET_MODE_CLASS (mode) == MODE_FLOAT && ((regno & 1) == 0);
c70da878
NC
2623
2624 if (! TARGET_AM33 && REGNO_REG_CLASS (regno) == EXTENDED_REGS)
2625 return false;
2626
4af476d7
NC
2627 if (((regno) & 1) == 0 || GET_MODE_SIZE (mode) == 4)
2628 return true;
2629
2630 if (REGNO_REG_CLASS (regno) == DATA_REGS
2631 || (TARGET_AM33 && REGNO_REG_CLASS (regno) == ADDRESS_REGS)
2632 || REGNO_REG_CLASS (regno) == EXTENDED_REGS)
2633 return GET_MODE_SIZE (mode) <= 4;
2634
2635 return false;
2636}
2637
99e1629f
RS
2638/* Implement TARGET_MODES_TIEABLE_P. */
2639
2640static bool
2641mn10300_modes_tieable_p (machine_mode mode1, machine_mode mode2)
4af476d7
NC
2642{
2643 if (GET_MODE_CLASS (mode1) == MODE_FLOAT
2644 && GET_MODE_CLASS (mode2) != MODE_FLOAT)
2645 return false;
2646
2647 if (GET_MODE_CLASS (mode2) == MODE_FLOAT
2648 && GET_MODE_CLASS (mode1) != MODE_FLOAT)
2649 return false;
2650
2651 if (TARGET_AM33
2652 || mode1 == mode2
2653 || (GET_MODE_SIZE (mode1) <= 4 && GET_MODE_SIZE (mode2) <= 4))
2654 return true;
2655
2656 return false;
2657}
2658
bad41521 2659static int
ef4bddc2 2660cc_flags_for_mode (machine_mode mode)
bad41521
RH
2661{
2662 switch (mode)
2663 {
4e10a5a7 2664 case E_CCmode:
bad41521 2665 return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_C | CC_FLAG_V;
4e10a5a7 2666 case E_CCZNCmode:
bad41521 2667 return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_C;
4e10a5a7 2668 case E_CCZNmode:
bad41521 2669 return CC_FLAG_Z | CC_FLAG_N;
4e10a5a7 2670 case E_CC_FLOATmode:
bad41521
RH
2671 return -1;
2672 default:
2673 gcc_unreachable ();
2674 }
2675}
2676
2677static int
2678cc_flags_for_code (enum rtx_code code)
2679{
2680 switch (code)
2681 {
2682 case EQ: /* Z */
2683 case NE: /* ~Z */
2684 return CC_FLAG_Z;
2685
2686 case LT: /* N */
2687 case GE: /* ~N */
2688 return CC_FLAG_N;
bad41521
RH
2689
2690 case GT: /* ~(Z|(N^V)) */
2691 case LE: /* Z|(N^V) */
2692 return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_V;
2693
2694 case GEU: /* ~C */
2695 case LTU: /* C */
2696 return CC_FLAG_C;
2697
2698 case GTU: /* ~(C | Z) */
2699 case LEU: /* C | Z */
2700 return CC_FLAG_Z | CC_FLAG_C;
2701
2702 case ORDERED:
2703 case UNORDERED:
2704 case LTGT:
2705 case UNEQ:
2706 case UNGE:
2707 case UNGT:
2708 case UNLE:
2709 case UNLT:
2710 return -1;
2711
2712 default:
2713 gcc_unreachable ();
2714 }
2715}
2716
ef4bddc2 2717machine_mode
bad41521 2718mn10300_select_cc_mode (enum rtx_code code, rtx x, rtx y ATTRIBUTE_UNUSED)
4af476d7 2719{
bad41521
RH
2720 int req;
2721
2722 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2723 return CC_FLOATmode;
2724
2725 req = cc_flags_for_code (code);
2726
2727 if (req & CC_FLAG_V)
2728 return CCmode;
2729 if (req & CC_FLAG_C)
2730 return CCZNCmode;
2731 return CCZNmode;
4af476d7 2732}
f3f63737
NC
2733
2734static inline bool
af480750 2735set_is_load_p (rtx set)
f3f63737 2736{
af480750 2737 return MEM_P (SET_SRC (set));
f3f63737
NC
2738}
2739
2740static inline bool
af480750 2741set_is_store_p (rtx set)
f3f63737 2742{
af480750 2743 return MEM_P (SET_DEST (set));
f3f63737
NC
2744}
2745
2746/* Update scheduling costs for situations that cannot be
2747 described using the attributes and DFA machinery.
2748 DEP is the insn being scheduled.
2749 INSN is the previous insn.
2750 COST is the current cycle cost for DEP. */
2751
2752static int
b505225b
TS
2753mn10300_adjust_sched_cost (rtx_insn *insn, int dep_type, rtx_insn *dep,
2754 int cost, unsigned int)
f3f63737 2755{
af480750
DM
2756 rtx insn_set;
2757 rtx dep_set;
2758 int timings;
f3f63737
NC
2759
2760 if (!TARGET_AM33)
2761 return 1;
2762
af480750
DM
2763 /* We are only interested in pairs of SET. */
2764 insn_set = single_set (insn);
2765 if (!insn_set)
2766 return cost;
f3f63737 2767
af480750
DM
2768 dep_set = single_set (dep);
2769 if (!dep_set)
2770 return cost;
f3f63737
NC
2771
2772 /* For the AM34 a load instruction that follows a
2773 store instruction incurs an extra cycle of delay. */
2774 if (mn10300_tune_cpu == PROCESSOR_AM34
af480750
DM
2775 && set_is_load_p (dep_set)
2776 && set_is_store_p (insn_set))
f3f63737
NC
2777 cost += 1;
2778
2779 /* For the AM34 a non-store, non-branch FPU insn that follows
2780 another FPU insn incurs a one cycle throughput increase. */
2781 else if (mn10300_tune_cpu == PROCESSOR_AM34
af480750 2782 && ! set_is_store_p (insn_set)
f3f63737 2783 && ! JUMP_P (insn)
af480750
DM
2784 && GET_MODE_CLASS (GET_MODE (SET_SRC (dep_set))) == MODE_FLOAT
2785 && GET_MODE_CLASS (GET_MODE (SET_SRC (insn_set))) == MODE_FLOAT)
f3f63737
NC
2786 cost += 1;
2787
2788 /* Resolve the conflict described in section 1-7-4 of
2789 Chapter 3 of the MN103E Series Instruction Manual
2790 where it says:
2791
073a8998 2792 "When the preceding instruction is a CPU load or
f3f63737
NC
2793 store instruction, a following FPU instruction
2794 cannot be executed until the CPU completes the
2795 latency period even though there are no register
2796 or flag dependencies between them." */
2797
2798 /* Only the AM33-2 (and later) CPUs have FPU instructions. */
2799 if (! TARGET_AM33_2)
2800 return cost;
2801
2802 /* If a data dependence already exists then the cost is correct. */
b505225b 2803 if (dep_type == 0)
f3f63737
NC
2804 return cost;
2805
2806 /* Check that the instruction about to scheduled is an FPU instruction. */
af480750 2807 if (GET_MODE_CLASS (GET_MODE (SET_SRC (dep_set))) != MODE_FLOAT)
f3f63737
NC
2808 return cost;
2809
2810 /* Now check to see if the previous instruction is a load or store. */
af480750 2811 if (! set_is_load_p (insn_set) && ! set_is_store_p (insn_set))
f3f63737
NC
2812 return cost;
2813
2814 /* XXX: Verify: The text of 1-7-4 implies that the restriction
073a8998 2815 only applies when an INTEGER load/store precedes an FPU
f3f63737 2816 instruction, but is this true ? For now we assume that it is. */
af480750 2817 if (GET_MODE_CLASS (GET_MODE (SET_SRC (insn_set))) != MODE_INT)
f3f63737
NC
2818 return cost;
2819
2820 /* Extract the latency value from the timings attribute. */
af480750 2821 timings = get_attr_timings (insn);
f3f63737
NC
2822 return timings < 100 ? (timings % 10) : (timings % 100);
2823}
5efd84c5
NF
2824
2825static void
2826mn10300_conditional_register_usage (void)
2827{
2828 unsigned int i;
2829
2830 if (!TARGET_AM33)
2831 {
2832 for (i = FIRST_EXTENDED_REGNUM;
2833 i <= LAST_EXTENDED_REGNUM; i++)
d7fb4c31 2834 fixed_regs[i] = 1;
5efd84c5
NF
2835 }
2836 if (!TARGET_AM33_2)
2837 {
2838 for (i = FIRST_FP_REGNUM;
2839 i <= LAST_FP_REGNUM; i++)
d7fb4c31 2840 fixed_regs[i] = 1;
5efd84c5
NF
2841 }
2842 if (flag_pic)
d7fb4c31 2843 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
5efd84c5 2844}
a49b692a 2845
7ca35180 2846/* Worker function for TARGET_MD_ASM_ADJUST.
a49b692a
RH
2847 We do this in the mn10300 backend to maintain source compatibility
2848 with the old cc0-based compiler. */
2849
7ca35180 2850static rtx_insn *
e52ef6e6
IL
2851mn10300_md_asm_adjust (vec<rtx> & /*outputs*/, vec<rtx> & /*inputs*/,
2852 vec<machine_mode> & /*input_modes*/,
414d795d
RS
2853 vec<const char *> & /*constraints*/,
2854 vec<rtx> &/*uses*/, vec<rtx> &clobbers,
8d76ff99 2855 HARD_REG_SET &clobbered_regs, location_t /*loc*/)
a49b692a 2856{
7ca35180
RH
2857 clobbers.safe_push (gen_rtx_REG (CCmode, CC_REG));
2858 SET_HARD_REG_BIT (clobbered_regs, CC_REG);
2859 return NULL;
a49b692a 2860}
4af476d7 2861\f
bad41521
RH
2862/* A helper function for splitting cbranch patterns after reload. */
2863
2864void
ef4bddc2 2865mn10300_split_cbranch (machine_mode cmp_mode, rtx cmp_op, rtx label_ref)
bad41521
RH
2866{
2867 rtx flags, x;
2868
2869 flags = gen_rtx_REG (cmp_mode, CC_REG);
2870 x = gen_rtx_COMPARE (cmp_mode, XEXP (cmp_op, 0), XEXP (cmp_op, 1));
f7df4a84 2871 x = gen_rtx_SET (flags, x);
bad41521
RH
2872 emit_insn (x);
2873
2874 x = gen_rtx_fmt_ee (GET_CODE (cmp_op), VOIDmode, flags, const0_rtx);
2875 x = gen_rtx_IF_THEN_ELSE (VOIDmode, x, label_ref, pc_rtx);
f7df4a84 2876 x = gen_rtx_SET (pc_rtx, x);
bad41521
RH
2877 emit_jump_insn (x);
2878}
2879
2880/* A helper function for matching parallels that set the flags. */
2881
2882bool
ef4bddc2 2883mn10300_match_ccmode (rtx insn, machine_mode cc_mode)
bad41521
RH
2884{
2885 rtx op1, flags;
ef4bddc2 2886 machine_mode flags_mode;
bad41521
RH
2887
2888 gcc_checking_assert (XVECLEN (PATTERN (insn), 0) == 2);
2889
65fdd5e9 2890 op1 = XVECEXP (PATTERN (insn), 0, 0);
bad41521
RH
2891 gcc_checking_assert (GET_CODE (SET_SRC (op1)) == COMPARE);
2892
2893 flags = SET_DEST (op1);
2894 flags_mode = GET_MODE (flags);
2895
2896 if (GET_MODE (SET_SRC (op1)) != flags_mode)
2897 return false;
2898 if (GET_MODE_CLASS (flags_mode) != MODE_CC)
2899 return false;
2900
2901 /* Ensure that the mode of FLAGS is compatible with CC_MODE. */
2902 if (cc_flags_for_mode (flags_mode) & ~cc_flags_for_mode (cc_mode))
2903 return false;
2904
2905 return true;
2906}
2907
cf13d9cf
NC
2908/* This function is used to help split:
2909
2910 (set (reg) (and (reg) (int)))
2911
2912 into:
2913
2914 (set (reg) (shift (reg) (int))
2915 (set (reg) (shift (reg) (int))
2916
2917 where the shitfs will be shorter than the "and" insn.
2918
2919 It returns the number of bits that should be shifted. A positive
2920 values means that the low bits are to be cleared (and hence the
2921 shifts should be right followed by left) whereas a negative value
2922 means that the high bits are to be cleared (left followed by right).
2923 Zero is returned when it would not be economical to split the AND. */
2924
bad41521
RH
2925int
2926mn10300_split_and_operand_count (rtx op)
2927{
2928 HOST_WIDE_INT val = INTVAL (op);
2929 int count;
2930
2931 if (val < 0)
2932 {
2933 /* High bit is set, look for bits clear at the bottom. */
2934 count = exact_log2 (-val);
2935 if (count < 0)
2936 return 0;
2937 /* This is only size win if we can use the asl2 insn. Otherwise we
2938 would be replacing 1 6-byte insn with 2 3-byte insns. */
2939 if (count > (optimize_insn_for_speed_p () ? 2 : 4))
2940 return 0;
cf13d9cf 2941 return count;
bad41521
RH
2942 }
2943 else
2944 {
2945 /* High bit is clear, look for bits set at the bottom. */
2946 count = exact_log2 (val + 1);
2947 count = 32 - count;
2948 /* Again, this is only a size win with asl2. */
2949 if (count > (optimize_insn_for_speed_p () ? 2 : 4))
2950 return 0;
2951 return -count;
2952 }
2953}
2954\f
a45d420a
NC
2955struct liw_data
2956{
2957 enum attr_liw slot;
2958 enum attr_liw_op op;
2959 rtx dest;
2960 rtx src;
2961};
2962
2963/* Decide if the given insn is a candidate for LIW bundling. If it is then
2964 extract the operands and LIW attributes from the insn and use them to fill
2965 in the liw_data structure. Return true upon success or false if the insn
2966 cannot be bundled. */
298362c8
NC
2967
2968static bool
e8a54173 2969extract_bundle (rtx_insn *insn, struct liw_data * pdata)
298362c8 2970{
a45d420a 2971 bool allow_consts = true;
2cf320a8 2972 rtx p;
298362c8 2973
a45d420a
NC
2974 gcc_assert (pdata != NULL);
2975
e8a54173 2976 if (insn == NULL)
a45d420a
NC
2977 return false;
2978 /* Make sure that we are dealing with a simple SET insn. */
298362c8 2979 p = single_set (insn);
a45d420a
NC
2980 if (p == NULL_RTX)
2981 return false;
2982
2983 /* Make sure that it could go into one of the LIW pipelines. */
2984 pdata->slot = get_attr_liw (insn);
2985 if (pdata->slot == LIW_BOTH)
2986 return false;
2987
2988 pdata->op = get_attr_liw_op (insn);
2989
a45d420a 2990 switch (pdata->op)
298362c8
NC
2991 {
2992 case LIW_OP_MOV:
a45d420a
NC
2993 pdata->dest = SET_DEST (p);
2994 pdata->src = SET_SRC (p);
298362c8
NC
2995 break;
2996 case LIW_OP_CMP:
a45d420a
NC
2997 pdata->dest = XEXP (SET_SRC (p), 0);
2998 pdata->src = XEXP (SET_SRC (p), 1);
298362c8
NC
2999 break;
3000 case LIW_OP_NONE:
3001 return false;
a45d420a
NC
3002 case LIW_OP_AND:
3003 case LIW_OP_OR:
3004 case LIW_OP_XOR:
3005 /* The AND, OR and XOR long instruction words only accept register arguments. */
3006 allow_consts = false;
3007 /* Fall through. */
298362c8 3008 default:
a45d420a
NC
3009 pdata->dest = SET_DEST (p);
3010 pdata->src = XEXP (SET_SRC (p), 1);
298362c8
NC
3011 break;
3012 }
3013
a45d420a
NC
3014 if (! REG_P (pdata->dest))
3015 return false;
3016
3017 if (REG_P (pdata->src))
3018 return true;
3019
3020 return allow_consts && satisfies_constraint_O (pdata->src);
298362c8
NC
3021}
3022
a45d420a
NC
3023/* Make sure that it is OK to execute LIW1 and LIW2 in parallel. GCC generated
3024 the instructions with the assumption that LIW1 would be executed before LIW2
3025 so we must check for overlaps between their sources and destinations. */
298362c8
NC
3026
3027static bool
a45d420a
NC
3028check_liw_constraints (struct liw_data * pliw1, struct liw_data * pliw2)
3029{
3030 /* Check for slot conflicts. */
3031 if (pliw2->slot == pliw1->slot && pliw1->slot != LIW_EITHER)
298362c8
NC
3032 return false;
3033
a45d420a
NC
3034 /* If either operation is a compare, then "dest" is really an input; the real
3035 destination is CC_REG. So these instructions need different checks. */
3036
3037 /* Changing "CMP ; OP" into "CMP | OP" is OK because the comparison will
3038 check its values prior to any changes made by OP. */
3039 if (pliw1->op == LIW_OP_CMP)
3040 {
3041 /* Two sequential comparisons means dead code, which ought to
3042 have been eliminated given that bundling only happens with
3043 optimization. We cannot bundle them in any case. */
3044 gcc_assert (pliw1->op != pliw2->op);
3045 return true;
3046 }
298362c8 3047
a45d420a
NC
3048 /* Changing "OP ; CMP" into "OP | CMP" does not work if the value being compared
3049 is the destination of OP, as the CMP will look at the old value, not the new
3050 one. */
3051 if (pliw2->op == LIW_OP_CMP)
298362c8 3052 {
a45d420a
NC
3053 if (REGNO (pliw2->dest) == REGNO (pliw1->dest))
3054 return false;
3055
3056 if (REG_P (pliw2->src))
3057 return REGNO (pliw2->src) != REGNO (pliw1->dest);
3058
3059 return true;
3060 }
3061
3062 /* Changing "OP1 ; OP2" into "OP1 | OP2" does not work if they both write to the
3063 same destination register. */
3064 if (REGNO (pliw2->dest) == REGNO (pliw1->dest))
3065 return false;
3066
3067 /* Changing "OP1 ; OP2" into "OP1 | OP2" generally does not work if the destination
3068 of OP1 is the source of OP2. The exception is when OP1 is a MOVE instruction when
3069 we can replace the source in OP2 with the source of OP1. */
3070 if (REG_P (pliw2->src) && REGNO (pliw2->src) == REGNO (pliw1->dest))
3071 {
3072 if (pliw1->op == LIW_OP_MOV && REG_P (pliw1->src))
298362c8 3073 {
a45d420a
NC
3074 if (! REG_P (pliw1->src)
3075 && (pliw2->op == LIW_OP_AND
3076 || pliw2->op == LIW_OP_OR
3077 || pliw2->op == LIW_OP_XOR))
3078 return false;
3079
3080 pliw2->src = pliw1->src;
298362c8
NC
3081 return true;
3082 }
3083 return false;
3084 }
3085
a45d420a 3086 /* Everything else is OK. */
298362c8
NC
3087 return true;
3088}
3089
298362c8
NC
3090/* Combine pairs of insns into LIW bundles. */
3091
3092static void
3093mn10300_bundle_liw (void)
3094{
e8a54173 3095 rtx_insn *r;
298362c8 3096
e8a54173 3097 for (r = get_insns (); r != NULL; r = next_nonnote_nondebug_insn (r))
298362c8 3098 {
e8a54173 3099 rtx_insn *insn1, *insn2;
a45d420a 3100 struct liw_data liw1, liw2;
298362c8
NC
3101
3102 insn1 = r;
a45d420a 3103 if (! extract_bundle (insn1, & liw1))
298362c8
NC
3104 continue;
3105
3106 insn2 = next_nonnote_nondebug_insn (insn1);
a45d420a 3107 if (! extract_bundle (insn2, & liw2))
298362c8
NC
3108 continue;
3109
a45d420a
NC
3110 /* Check for source/destination overlap. */
3111 if (! check_liw_constraints (& liw1, & liw2))
298362c8
NC
3112 continue;
3113
a45d420a 3114 if (liw1.slot == LIW_OP2 || liw2.slot == LIW_OP1)
298362c8 3115 {
a45d420a
NC
3116 struct liw_data temp;
3117
3118 temp = liw1;
298362c8 3119 liw1 = liw2;
a45d420a 3120 liw2 = temp;
298362c8
NC
3121 }
3122
298362c8
NC
3123 delete_insn (insn2);
3124
e8a54173 3125 rtx insn2_pat;
a45d420a 3126 if (liw1.op == LIW_OP_CMP)
e8a54173
DM
3127 insn2_pat = gen_cmp_liw (liw2.dest, liw2.src, liw1.dest, liw1.src,
3128 GEN_INT (liw2.op));
a45d420a 3129 else if (liw2.op == LIW_OP_CMP)
e8a54173
DM
3130 insn2_pat = gen_liw_cmp (liw1.dest, liw1.src, liw2.dest, liw2.src,
3131 GEN_INT (liw1.op));
298362c8 3132 else
e8a54173
DM
3133 insn2_pat = gen_liw (liw1.dest, liw2.dest, liw1.src, liw2.src,
3134 GEN_INT (liw1.op), GEN_INT (liw2.op));
298362c8 3135
e8a54173 3136 insn2 = emit_insn_after (insn2_pat, insn1);
298362c8
NC
3137 delete_insn (insn1);
3138 r = insn2;
3139 }
3140}
3141
662c03f4
NC
3142#define DUMP(reason, insn) \
3143 do \
3144 { \
3145 if (dump_file) \
3146 { \
3147 fprintf (dump_file, reason "\n"); \
3148 if (insn != NULL_RTX) \
3149 print_rtl_single (dump_file, insn); \
3150 fprintf(dump_file, "\n"); \
3151 } \
3152 } \
3153 while (0)
3154
3155/* Replace the BRANCH insn with a Lcc insn that goes to LABEL.
3156 Insert a SETLB insn just before LABEL. */
3157
3158static void
f370536c 3159mn10300_insert_setlb_lcc (rtx_insn *label, rtx_insn *branch)
662c03f4
NC
3160{
3161 rtx lcc, comparison, cmp_reg;
3162
3163 if (LABEL_NUSES (label) > 1)
3164 {
e60365d3 3165 rtx_insn *insn;
662c03f4
NC
3166
3167 /* This label is used both as an entry point to the loop
3168 and as a loop-back point for the loop. We need to separate
3169 these two functions so that the SETLB happens upon entry,
3170 but the loop-back does not go to the SETLB instruction. */
3171 DUMP ("Inserting SETLB insn after:", label);
3172 insn = emit_insn_after (gen_setlb (), label);
3173 label = gen_label_rtx ();
3174 emit_label_after (label, insn);
3175 DUMP ("Created new loop-back label:", label);
3176 }
3177 else
3178 {
3179 DUMP ("Inserting SETLB insn before:", label);
3180 emit_insn_before (gen_setlb (), label);
3181 }
3182
3183 comparison = XEXP (SET_SRC (PATTERN (branch)), 0);
3184 cmp_reg = XEXP (comparison, 0);
3185 gcc_assert (REG_P (cmp_reg));
3186
3187 /* If the comparison has not already been split out of the branch
3188 then do so now. */
3189 gcc_assert (REGNO (cmp_reg) == CC_REG);
3190
3191 if (GET_MODE (cmp_reg) == CC_FLOATmode)
3192 lcc = gen_FLcc (comparison, label);
3193 else
3194 lcc = gen_Lcc (comparison, label);
3195
e73de8f3 3196 rtx_insn *jump = emit_jump_insn_before (lcc, branch);
543e1b5f 3197 mark_jump_label (XVECEXP (lcc, 0, 0), jump, 0);
e73de8f3 3198 JUMP_LABEL (jump) = label;
662c03f4 3199 DUMP ("Replacing branch insn...", branch);
e73de8f3 3200 DUMP ("... with Lcc insn:", jump);
662c03f4
NC
3201 delete_insn (branch);
3202}
3203
3204static bool
b8244d74 3205mn10300_block_contains_call (basic_block block)
662c03f4 3206{
b32d5189 3207 rtx_insn *insn;
662c03f4
NC
3208
3209 FOR_BB_INSNS (block, insn)
3210 if (CALL_P (insn))
3211 return true;
3212
3213 return false;
3214}
3215
3216static bool
3217mn10300_loop_contains_call_insn (loop_p loop)
3218{
3219 basic_block * bbs;
3220 bool result = false;
3221 unsigned int i;
3222
3223 bbs = get_loop_body (loop);
3224
3225 for (i = 0; i < loop->num_nodes; i++)
3226 if (mn10300_block_contains_call (bbs[i]))
3227 {
3228 result = true;
3229 break;
3230 }
3231
3232 free (bbs);
3233 return result;
3234}
3235
3236static void
3237mn10300_scan_for_setlb_lcc (void)
3238{
662c03f4
NC
3239 DUMP ("Looking for loops that can use the SETLB insn", NULL_RTX);
3240
3241 df_analyze ();
3242 compute_bb_for_insn ();
3243
3244 /* Find the loops. */
4861a1f7 3245 loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
662c03f4
NC
3246
3247 /* FIXME: For now we only investigate innermost loops. In practice however
3248 if an inner loop is not suitable for use with the SETLB/Lcc insns, it may
3249 be the case that its parent loop is suitable. Thus we should check all
3250 loops, but work from the innermost outwards. */
e41ba804 3251 for (auto loop : loops_list (cfun, LI_ONLY_INNERMOST))
662c03f4
NC
3252 {
3253 const char * reason = NULL;
3254
3255 /* Check to see if we can modify this loop. If we cannot
3256 then set 'reason' to describe why it could not be done. */
3257 if (loop->latch == NULL)
3258 reason = "it contains multiple latches";
3259 else if (loop->header != loop->latch)
3260 /* FIXME: We could handle loops that span multiple blocks,
3261 but this requires a lot more work tracking down the branches
3262 that need altering, so for now keep things simple. */
3263 reason = "the loop spans multiple blocks";
3264 else if (mn10300_loop_contains_call_insn (loop))
3265 reason = "it contains CALL insns";
3266 else
3267 {
68a1a6c0 3268 rtx_insn *branch = BB_END (loop->latch);
662c03f4
NC
3269
3270 gcc_assert (JUMP_P (branch));
3271 if (single_set (branch) == NULL_RTX || ! any_condjump_p (branch))
3272 /* We cannot optimize tablejumps and the like. */
3273 /* FIXME: We could handle unconditional jumps. */
3274 reason = "it is not a simple loop";
3275 else
3276 {
68a1a6c0 3277 rtx_insn *label;
662c03f4
NC
3278
3279 if (dump_file)
3280 flow_loop_dump (loop, dump_file, NULL, 0);
3281
3282 label = BB_HEAD (loop->header);
3283 gcc_assert (LABEL_P (label));
3284
3285 mn10300_insert_setlb_lcc (label, branch);
3286 }
3287 }
3288
3289 if (dump_file && reason != NULL)
3290 fprintf (dump_file, "Loop starting with insn %d is not suitable because %s\n",
3291 INSN_UID (BB_HEAD (loop->header)),
3292 reason);
3293 }
3294
4861a1f7 3295 loop_optimizer_finalize ();
662c03f4
NC
3296
3297 df_finish_pass (false);
3298
3299 DUMP ("SETLB scan complete", NULL_RTX);
3300}
3301
298362c8
NC
3302static void
3303mn10300_reorg (void)
3304{
662c03f4
NC
3305 /* These are optimizations, so only run them if optimizing. */
3306 if (TARGET_AM33 && (optimize > 0 || optimize_size))
298362c8 3307 {
662c03f4
NC
3308 if (TARGET_ALLOW_SETLB)
3309 mn10300_scan_for_setlb_lcc ();
3310
298362c8
NC
3311 if (TARGET_ALLOW_LIW)
3312 mn10300_bundle_liw ();
3313 }
3314}
3315\f
e7ab5593
NC
3316/* Initialize the GCC target structure. */
3317
298362c8
NC
3318#undef TARGET_MACHINE_DEPENDENT_REORG
3319#define TARGET_MACHINE_DEPENDENT_REORG mn10300_reorg
3320
e7ab5593
NC
3321#undef TARGET_ASM_ALIGNED_HI_OP
3322#define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
3323
3324#undef TARGET_LEGITIMIZE_ADDRESS
3325#define TARGET_LEGITIMIZE_ADDRESS mn10300_legitimize_address
3326
72d6e3c5
RH
3327#undef TARGET_ADDRESS_COST
3328#define TARGET_ADDRESS_COST mn10300_address_cost
3329#undef TARGET_REGISTER_MOVE_COST
3330#define TARGET_REGISTER_MOVE_COST mn10300_register_move_cost
3331#undef TARGET_MEMORY_MOVE_COST
3332#define TARGET_MEMORY_MOVE_COST mn10300_memory_move_cost
e7ab5593
NC
3333#undef TARGET_RTX_COSTS
3334#define TARGET_RTX_COSTS mn10300_rtx_costs
e7ab5593
NC
3335
3336#undef TARGET_ASM_FILE_START
3337#define TARGET_ASM_FILE_START mn10300_file_start
3338#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
3339#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
3340
535bd17c
AS
3341#undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
3342#define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA mn10300_asm_output_addr_const_extra
3343
e7ab5593
NC
3344#undef TARGET_OPTION_OVERRIDE
3345#define TARGET_OPTION_OVERRIDE mn10300_option_override
3346
3347#undef TARGET_ENCODE_SECTION_INFO
3348#define TARGET_ENCODE_SECTION_INFO mn10300_encode_section_info
3349
3350#undef TARGET_PROMOTE_PROTOTYPES
3351#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
3352#undef TARGET_RETURN_IN_MEMORY
3353#define TARGET_RETURN_IN_MEMORY mn10300_return_in_memory
3354#undef TARGET_PASS_BY_REFERENCE
3355#define TARGET_PASS_BY_REFERENCE mn10300_pass_by_reference
3356#undef TARGET_CALLEE_COPIES
7256c719 3357#define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_arg_info_true
e7ab5593
NC
3358#undef TARGET_ARG_PARTIAL_BYTES
3359#define TARGET_ARG_PARTIAL_BYTES mn10300_arg_partial_bytes
ce236858
NF
3360#undef TARGET_FUNCTION_ARG
3361#define TARGET_FUNCTION_ARG mn10300_function_arg
3362#undef TARGET_FUNCTION_ARG_ADVANCE
3363#define TARGET_FUNCTION_ARG_ADVANCE mn10300_function_arg_advance
e7ab5593
NC
3364
3365#undef TARGET_EXPAND_BUILTIN_SAVEREGS
3366#define TARGET_EXPAND_BUILTIN_SAVEREGS mn10300_builtin_saveregs
3367#undef TARGET_EXPAND_BUILTIN_VA_START
3368#define TARGET_EXPAND_BUILTIN_VA_START mn10300_va_start
3369
3370#undef TARGET_CASE_VALUES_THRESHOLD
3371#define TARGET_CASE_VALUES_THRESHOLD mn10300_case_values_threshold
3372
d81db636
SB
3373#undef TARGET_LRA_P
3374#define TARGET_LRA_P hook_bool_void_false
3375
e7ab5593
NC
3376#undef TARGET_LEGITIMATE_ADDRESS_P
3377#define TARGET_LEGITIMATE_ADDRESS_P mn10300_legitimate_address_p
126b1483
RH
3378#undef TARGET_DELEGITIMIZE_ADDRESS
3379#define TARGET_DELEGITIMIZE_ADDRESS mn10300_delegitimize_address
1a627b35
RS
3380#undef TARGET_LEGITIMATE_CONSTANT_P
3381#define TARGET_LEGITIMATE_CONSTANT_P mn10300_legitimate_constant_p
e7ab5593 3382
f2831cc9
AS
3383#undef TARGET_PREFERRED_RELOAD_CLASS
3384#define TARGET_PREFERRED_RELOAD_CLASS mn10300_preferred_reload_class
3385#undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
8b119bb6
RH
3386#define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS \
3387 mn10300_preferred_output_reload_class
3388#undef TARGET_SECONDARY_RELOAD
3389#define TARGET_SECONDARY_RELOAD mn10300_secondary_reload
f2831cc9 3390
e7ab5593
NC
3391#undef TARGET_TRAMPOLINE_INIT
3392#define TARGET_TRAMPOLINE_INIT mn10300_trampoline_init
3393
3394#undef TARGET_FUNCTION_VALUE
3395#define TARGET_FUNCTION_VALUE mn10300_function_value
3396#undef TARGET_LIBCALL_VALUE
3397#define TARGET_LIBCALL_VALUE mn10300_libcall_value
3398
3399#undef TARGET_ASM_OUTPUT_MI_THUNK
3400#define TARGET_ASM_OUTPUT_MI_THUNK mn10300_asm_output_mi_thunk
3401#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
3402#define TARGET_ASM_CAN_OUTPUT_MI_THUNK mn10300_can_output_mi_thunk
3403
f3f63737
NC
3404#undef TARGET_SCHED_ADJUST_COST
3405#define TARGET_SCHED_ADJUST_COST mn10300_adjust_sched_cost
3406
5efd84c5
NF
3407#undef TARGET_CONDITIONAL_REGISTER_USAGE
3408#define TARGET_CONDITIONAL_REGISTER_USAGE mn10300_conditional_register_usage
3409
7ca35180
RH
3410#undef TARGET_MD_ASM_ADJUST
3411#define TARGET_MD_ASM_ADJUST mn10300_md_asm_adjust
a49b692a 3412
3843787f
RH
3413#undef TARGET_FLAGS_REGNUM
3414#define TARGET_FLAGS_REGNUM CC_REG
3415
f939c3e6
RS
3416#undef TARGET_HARD_REGNO_MODE_OK
3417#define TARGET_HARD_REGNO_MODE_OK mn10300_hard_regno_mode_ok
3418
99e1629f
RS
3419#undef TARGET_MODES_TIEABLE_P
3420#define TARGET_MODES_TIEABLE_P mn10300_modes_tieable_p
3421
2e2bd24b
JL
3422#undef TARGET_HAVE_SPECULATION_SAFE_VALUE
3423#define TARGET_HAVE_SPECULATION_SAFE_VALUE speculation_safe_value_not_needed
3424
e7ab5593 3425struct gcc_target targetm = TARGET_INITIALIZER;