]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/mcore/mcore.c
glimits.h (USHRT_MAX): Use unsigned suffix if int can not hold it.
[thirdparty/gcc.git] / gcc / config / mcore / mcore.c
CommitLineData
8f90be4c 1/* Output routines for Motorola MCore processor
d6edb99e 2 Copyright (C) 1993, 1999, 2000, 2001 Free Software Foundation, Inc.
8f90be4c
NC
3
4This file is part of GNU CC.
5
6GNU CC is free software; you can redistribute it and/or modify
7it under the terms of the GNU General Public License as published by
8the Free Software Foundation; either version 2, or (at your option)
9any later version.
10
11GNU CC is distributed in the hope that it will be useful,
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14GNU General Public License for more details.
15
16You should have received a copy of the GNU General Public License
17along with GNU CC; see the file COPYING. If not, write to
18the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
19
bc27e96c 20#include "config.h"
4bd048ef 21#include "system.h"
4816b8e4
NC
22#include "rtl.h"
23#include "tree.h"
24#include "tm_p.h"
8f90be4c 25#include "assert.h"
8f90be4c 26#include "mcore.h"
8f90be4c
NC
27#include "regs.h"
28#include "hard-reg-set.h"
29#include "real.h"
30#include "insn-config.h"
31#include "conditions.h"
8f90be4c
NC
32#include "output.h"
33#include "insn-attr.h"
34#include "flags.h"
35#include "obstack.h"
36#include "expr.h"
37#include "reload.h"
38#include "recog.h"
39#include "function.h"
40#include "ggc.h"
41#include "toplev.h"
8f90be4c 42
8f90be4c
NC
43/* Maximum size we are allowed to grow the stack in a single operation.
44 If we want more, we must do it in increments of at most this size.
45 If this value is 0, we don't check at all. */
46const char * mcore_stack_increment_string = 0;
47int mcore_stack_increment = STACK_UNITS_MAXSTEP;
48
49/* For dumping information about frame sizes. */
50char * mcore_current_function_name = 0;
51long mcore_current_compilation_timestamp = 0;
52
53/* Global variables for machine-dependent things. */
54
55/* Saved operands from the last compare to use when we generate an scc
56 or bcc insn. */
57rtx arch_compare_op0;
58rtx arch_compare_op1;
59
60/* Provides the class number of the smallest class containing
61 reg number. */
62int regno_reg_class[FIRST_PSEUDO_REGISTER] =
63{
64 GENERAL_REGS, ONLYR1_REGS, LRW_REGS, LRW_REGS,
65 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
66 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
67 LRW_REGS, LRW_REGS, LRW_REGS, GENERAL_REGS,
68 GENERAL_REGS, C_REGS, NO_REGS, NO_REGS,
69};
70
71/* Provide reg_class from a letter such as appears in the machine
72 description. */
73enum reg_class reg_class_from_letter[] =
74{
75 /* a */ LRW_REGS, /* b */ ONLYR1_REGS, /* c */ C_REGS, /* d */ NO_REGS,
76 /* e */ NO_REGS, /* f */ NO_REGS, /* g */ NO_REGS, /* h */ NO_REGS,
77 /* i */ NO_REGS, /* j */ NO_REGS, /* k */ NO_REGS, /* l */ NO_REGS,
78 /* m */ NO_REGS, /* n */ NO_REGS, /* o */ NO_REGS, /* p */ NO_REGS,
79 /* q */ NO_REGS, /* r */ GENERAL_REGS, /* s */ NO_REGS, /* t */ NO_REGS,
80 /* u */ NO_REGS, /* v */ NO_REGS, /* w */ NO_REGS, /* x */ ALL_REGS,
81 /* y */ NO_REGS, /* z */ NO_REGS
82};
83
f27cd94d
NC
84struct mcore_frame
85{
86 int arg_size; /* stdarg spills (bytes) */
87 int reg_size; /* non-volatile reg saves (bytes) */
88 int reg_mask; /* non-volatile reg saves */
89 int local_size; /* locals */
90 int outbound_size; /* arg overflow on calls out */
91 int pad_outbound;
92 int pad_local;
93 int pad_reg;
94 /* Describe the steps we'll use to grow it. */
95#define MAX_STACK_GROWS 4 /* gives us some spare space */
96 int growth[MAX_STACK_GROWS];
97 int arg_offset;
98 int reg_offset;
99 int reg_growth;
100 int local_growth;
101};
102
103typedef enum
104{
105 COND_NO,
106 COND_MOV_INSN,
107 COND_CLR_INSN,
108 COND_INC_INSN,
109 COND_DEC_INSN,
110 COND_BRANCH_INSN
111}
112cond_type;
113
114static void output_stack_adjust PARAMS ((int, int));
115static int calc_live_regs PARAMS ((int *));
116static int const_ok_for_mcore PARAMS ((int));
117static int try_constant_tricks PARAMS ((long, int *, int *));
118static const char * output_inline_const PARAMS ((enum machine_mode, rtx *));
119static void block_move_sequence PARAMS ((rtx, rtx, rtx, rtx, int, int, int));
120static void layout_mcore_frame PARAMS ((struct mcore_frame *));
121static cond_type is_cond_candidate PARAMS ((rtx));
122static rtx emit_new_cond_insn PARAMS ((rtx, int));
123static rtx conditionalize_block PARAMS ((rtx));
124static void conditionalize_optimization PARAMS ((rtx));
125static void mcore_add_gc_roots PARAMS ((void));
126static rtx handle_structs_in_regs PARAMS ((enum machine_mode, tree, int));
127static void mcore_mark_dllexport PARAMS ((tree));
128static void mcore_mark_dllimport PARAMS ((tree));
129static int mcore_dllexport_p PARAMS ((tree));
130static int mcore_dllimport_p PARAMS ((tree));
131\f
8f90be4c
NC
132/* Adjust the stack and return the number of bytes taken to do it. */
133static void
134output_stack_adjust (direction, size)
135 int direction;
136 int size;
137{
4816b8e4 138 /* If extending stack a lot, we do it incrementally. */
8f90be4c
NC
139 if (direction < 0 && size > mcore_stack_increment && mcore_stack_increment > 0)
140 {
141 rtx tmp = gen_rtx (REG, SImode, 1);
142 rtx memref;
143 emit_insn (gen_movsi (tmp, GEN_INT (mcore_stack_increment)));
144 do
145 {
146 emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
147 memref = gen_rtx (MEM, SImode, stack_pointer_rtx);
148 MEM_VOLATILE_P (memref) = 1;
149 emit_insn (gen_movsi (memref, stack_pointer_rtx));
150 size -= mcore_stack_increment;
151 }
152 while (size > mcore_stack_increment);
153
4816b8e4
NC
154 /* SIZE is now the residual for the last adjustment,
155 which doesn't require a probe. */
8f90be4c
NC
156 }
157
158 if (size)
159 {
160 rtx insn;
161 rtx val = GEN_INT (size);
162
163 if (size > 32)
164 {
165 rtx nval = gen_rtx (REG, SImode, 1);
166 emit_insn (gen_movsi (nval, val));
167 val = nval;
168 }
169
170 if (direction > 0)
171 insn = gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
172 else
173 insn = gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
174
175 emit_insn (insn);
176 }
177}
178
4816b8e4
NC
179/* Work out the registers which need to be saved,
180 both as a mask and a count. */
181
8f90be4c
NC
182static int
183calc_live_regs (count)
184 int * count;
185{
186 int reg;
187 int live_regs_mask = 0;
188
189 * count = 0;
190
191 for (reg = 0; reg < FIRST_PSEUDO_REGISTER; reg++)
192 {
193 if (regs_ever_live[reg] && !call_used_regs[reg])
194 {
195 (*count)++;
196 live_regs_mask |= (1 << reg);
197 }
198 }
199
200 return live_regs_mask;
201}
202
203/* Print the operand address in x to the stream. */
4816b8e4 204
8f90be4c
NC
205void
206mcore_print_operand_address (stream, x)
207 FILE * stream;
208 rtx x;
209{
210 switch (GET_CODE (x))
211 {
212 case REG:
213 fprintf (stream, "(%s)", reg_names[REGNO (x)]);
214 break;
215
216 case PLUS:
217 {
218 rtx base = XEXP (x, 0);
219 rtx index = XEXP (x, 1);
220
221 if (GET_CODE (base) != REG)
222 {
223 /* Ensure that BASE is a register (one of them must be). */
224 rtx temp = base;
225 base = index;
226 index = temp;
227 }
228
229 switch (GET_CODE (index))
230 {
231 case CONST_INT:
232 fprintf (stream, "(%s,%d)", reg_names[REGNO(base)],
233 INTVAL (index));
234 break;
235
236 default:
237 debug_rtx (x);
238
239 abort ();
240 }
241 }
242
243 break;
244
245 default:
246 output_addr_const (stream, x);
247 break;
248 }
249}
250
251/* Print operand x (an rtx) in assembler syntax to file stream
252 according to modifier code.
253
254 'R' print the next register or memory location along, ie the lsw in
255 a double word value
256 'O' print a constant without the #
257 'M' print a constant as its negative
258 'P' print log2 of a power of two
259 'Q' print log2 of an inverse of a power of two
260 'U' print register for ldm/stm instruction
4816b8e4
NC
261 'X' print byte number for xtrbN instruction. */
262
8f90be4c
NC
263void
264mcore_print_operand (stream, x, code)
265 FILE * stream;
266 rtx x;
267 int code;
268{
269 switch (code)
270 {
271 case 'N':
272 if (INTVAL(x) == -1)
273 fprintf (asm_out_file, "32");
274 else
275 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) + 1));
276 break;
277 case 'P':
278 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x)));
279 break;
280 case 'Q':
281 fprintf (asm_out_file, "%d", exact_log2 (~INTVAL (x)));
282 break;
283 case 'O':
284 fprintf (asm_out_file, "%d", INTVAL (x));
285 break;
286 case 'M':
287 fprintf (asm_out_file, "%d", - INTVAL (x));
288 break;
289 case 'R':
290 /* Next location along in memory or register. */
291 switch (GET_CODE (x))
292 {
293 case REG:
294 fputs (reg_names[REGNO (x) + 1], (stream));
295 break;
296 case MEM:
297 mcore_print_operand_address (stream,
298 XEXP (adj_offsettable_operand (x, 4), 0));
299 break;
300 default:
301 abort ();
302 }
303 break;
304 case 'U':
305 fprintf (asm_out_file, "%s-%s", reg_names[REGNO (x)],
306 reg_names[REGNO (x) + 3]);
307 break;
308 case 'x':
309 fprintf (asm_out_file, "0x%x", INTVAL (x));
310 break;
311 case 'X':
312 fprintf (asm_out_file, "%d", 3 - INTVAL (x) / 8);
313 break;
314
315 default:
316 switch (GET_CODE (x))
317 {
318 case REG:
319 fputs (reg_names[REGNO (x)], (stream));
320 break;
321 case MEM:
322 output_address (XEXP (x, 0));
323 break;
324 default:
325 output_addr_const (stream, x);
326 break;
327 }
328 break;
329 }
330}
331
332/* What does a constant cost ? */
4816b8e4 333
8f90be4c
NC
334int
335mcore_const_costs (exp, code)
336 rtx exp;
337 enum rtx_code code;
338{
339
340 int val = INTVAL (exp);
341
342 /* Easy constants. */
343 if ( CONST_OK_FOR_I (val)
344 || CONST_OK_FOR_M (val)
345 || CONST_OK_FOR_N (val)
346 || (code == PLUS && CONST_OK_FOR_L (val)))
347 return 1;
348 else if (code == AND
349 && ( CONST_OK_FOR_M (~val)
350 || CONST_OK_FOR_N (~val)))
351 return 2;
352 else if (code == PLUS
353 && ( CONST_OK_FOR_I (-val)
354 || CONST_OK_FOR_M (-val)
355 || CONST_OK_FOR_N (-val)))
356 return 2;
357
358 return 5;
359}
360
361/* What does an and instruction cost - we do this b/c immediates may
362 have been relaxed. We want to ensure that cse will cse relaxed immeds
4816b8e4
NC
363 out. Otherwise we'll get bad code (multiple reloads of the same const). */
364
8f90be4c
NC
365int
366mcore_and_cost (x)
367 rtx x;
368{
369 int val;
370
371 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
372 return 2;
373
374 val = INTVAL (XEXP (x, 1));
375
4816b8e4 376 /* Do it directly. */
8f90be4c
NC
377 if (CONST_OK_FOR_K (val) || CONST_OK_FOR_M (~val))
378 return 2;
379 /* Takes one instruction to load. */
380 else if (const_ok_for_mcore (val))
381 return 3;
382 /* Takes two instructions to load. */
383 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
384 return 4;
385
4816b8e4 386 /* Takes a lrw to load. */
8f90be4c
NC
387 return 5;
388}
389
4816b8e4
NC
390/* What does an or cost - see and_cost(). */
391
8f90be4c
NC
392int
393mcore_ior_cost (x)
394 rtx x;
395{
396 int val;
397
398 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
399 return 2;
400
401 val = INTVAL (XEXP (x, 1));
402
4816b8e4 403 /* Do it directly with bclri. */
8f90be4c
NC
404 if (CONST_OK_FOR_M (val))
405 return 2;
4816b8e4 406 /* Takes one instruction to load. */
8f90be4c
NC
407 else if (const_ok_for_mcore (val))
408 return 3;
4816b8e4 409 /* Takes two instructions to load. */
8f90be4c
NC
410 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
411 return 4;
412
4816b8e4 413 /* Takes a lrw to load. */
8f90be4c
NC
414 return 5;
415}
416
417/* Check to see if a comparison against a constant can be made more efficient
418 by incrementing/decrementing the constant to get one that is more efficient
419 to load. */
4816b8e4 420
8f90be4c
NC
421int
422mcore_modify_comparison (code)
423 enum rtx_code code;
424{
425 rtx op1 = arch_compare_op1;
426
427 if (GET_CODE (op1) == CONST_INT)
428 {
429 int val = INTVAL (op1);
430
431 switch (code)
432 {
433 case LE:
434 if (CONST_OK_FOR_J (val + 1))
435 {
436 arch_compare_op1 = GEN_INT (val + 1);
437 return 1;
438 }
439 break;
440
441 default:
442 break;
443 }
444 }
445
446 return 0;
447}
448
449/* Prepare the operands for a comparison. */
4816b8e4 450
8f90be4c
NC
451rtx
452mcore_gen_compare_reg (code)
453 enum rtx_code code;
454{
455 rtx op0 = arch_compare_op0;
456 rtx op1 = arch_compare_op1;
457 rtx cc_reg = gen_rtx (REG, CCmode, CC_REG);
458
459 if (CONSTANT_P (op1) && GET_CODE (op1) != CONST_INT)
460 op1 = force_reg (SImode, op1);
461
462 /* cmpnei: 0-31 (K immediate)
4816b8e4 463 cmplti: 1-32 (J immediate, 0 using btsti x,31). */
8f90be4c
NC
464 switch (code)
465 {
4816b8e4 466 case EQ: /* Use inverted condition, cmpne. */
8f90be4c
NC
467 code = NE;
468 /* drop through */
4816b8e4
NC
469
470 case NE: /* Use normal condition, cmpne. */
8f90be4c
NC
471 if (GET_CODE (op1) == CONST_INT && ! CONST_OK_FOR_K (INTVAL (op1)))
472 op1 = force_reg (SImode, op1);
473 break;
474
4816b8e4 475 case LE: /* Use inverted condition, reversed cmplt. */
8f90be4c
NC
476 code = GT;
477 /* drop through */
4816b8e4
NC
478
479 case GT: /* Use normal condition, reversed cmplt. */
8f90be4c
NC
480 if (GET_CODE (op1) == CONST_INT)
481 op1 = force_reg (SImode, op1);
482 break;
483
4816b8e4 484 case GE: /* Use inverted condition, cmplt. */
8f90be4c
NC
485 code = LT;
486 /* drop through */
4816b8e4
NC
487
488 case LT: /* Use normal condition, cmplt. */
8f90be4c
NC
489 if (GET_CODE (op1) == CONST_INT &&
490 /* covered by btsti x,31 */
491 INTVAL (op1) != 0 &&
492 ! CONST_OK_FOR_J (INTVAL (op1)))
493 op1 = force_reg (SImode, op1);
494 break;
495
4816b8e4 496 case GTU: /* Use inverted condition, cmple. */
8f90be4c
NC
497 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) == 0)
498 {
499 /* Unsigned > 0 is the same as != 0, but we need
500 to invert the condition, so we want to set
501 code = EQ. This cannot be done however, as the
502 mcore does not support such a test. Instead we
503 cope with this case in the "bgtu" pattern itself
504 so we should never reach this point. */
505 /* code = EQ; */
506 abort ();
507 break;
508 }
509 code = LEU;
510 /* drop through */
4816b8e4
NC
511
512 case LEU: /* Use normal condition, reversed cmphs. */
8f90be4c
NC
513 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
514 op1 = force_reg (SImode, op1);
515 break;
516
4816b8e4 517 case LTU: /* Use inverted condition, cmphs. */
8f90be4c
NC
518 code = GEU;
519 /* drop through */
4816b8e4
NC
520
521 case GEU: /* Use normal condition, cmphs. */
8f90be4c
NC
522 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
523 op1 = force_reg (SImode, op1);
524 break;
525
526 default:
527 break;
528 }
529
530 emit_insn (gen_rtx (SET, VOIDmode, cc_reg, gen_rtx (code, CCmode, op0, op1)));
531
532 return cc_reg;
533}
534
535
536int
537mcore_symbolic_address_p (x)
538 rtx x;
539{
540 switch (GET_CODE (x))
541 {
542 case SYMBOL_REF:
543 case LABEL_REF:
544 return 1;
545 case CONST:
546 x = XEXP (x, 0);
547 return ( (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
548 || GET_CODE (XEXP (x, 0)) == LABEL_REF)
549 && GET_CODE (XEXP (x, 1)) == CONST_INT);
550 default:
551 return 0;
552 }
553}
554
555int
556mcore_call_address_operand (x, mode)
557 rtx x;
558 enum machine_mode mode;
559{
560 return register_operand (x, mode) || CONSTANT_P (x);
561}
562
563/* Functions to output assembly code for a function call. */
f27cd94d 564
8f90be4c
NC
565char *
566mcore_output_call (operands, index)
567 rtx operands[];
568 int index;
569{
570 static char buffer[20];
571 rtx addr = operands [index];
572
573 if (REG_P (addr))
574 {
575 if (TARGET_CG_DATA)
576 {
577 if (mcore_current_function_name == 0)
578 abort ();
579
580 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
581 "unknown", 1);
582 }
583
584 sprintf (buffer, "jsr\t%%%d", index);
585 }
586 else
587 {
588 if (TARGET_CG_DATA)
589 {
590 if (mcore_current_function_name == 0)
591 abort ();
592
593 if (GET_CODE (addr) != SYMBOL_REF)
594 abort ();
595
596 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, XSTR (addr, 0), 0);
597 }
598
599 sprintf (buffer, "jbsr\t%%%d", index);
600 }
601
602 return buffer;
603}
604
605/* Can we load a constant with a single instruction ? */
4816b8e4 606
8f90be4c
NC
607static int
608const_ok_for_mcore (value)
609 int value;
610{
611 if (value >= 0 && value <= 127)
612 return 1;
613
614 /* Try exact power of two. */
615 if ((value & (value - 1)) == 0)
616 return 1;
617
618 /* Try exact power of two - 1. */
619 if ((value & (value + 1)) == 0)
620 return 1;
621
622 return 0;
623}
624
625/* Can we load a constant inline with up to 2 instructions ? */
4816b8e4 626
8f90be4c
NC
627int
628mcore_const_ok_for_inline (value)
629 long value;
630{
631 int x, y;
632
633 return try_constant_tricks (value, & x, & y) > 0;
634}
635
636/* Are we loading the constant using a not ? */
4816b8e4 637
8f90be4c
NC
638int
639mcore_const_trick_uses_not (value)
640 long value;
641{
642 int x, y;
643
644 return try_constant_tricks (value, & x, & y) == 2;
645}
646
647/* Try tricks to load a constant inline and return the trick number if
648 success (0 is non-inlinable).
4816b8e4
NC
649
650 0: not inlinable
651 1: single instruction (do the usual thing)
652 2: single insn followed by a 'not'
653 3: single insn followed by a subi
654 4: single insn followed by an addi
655 5: single insn followed by rsubi
656 6: single insn followed by bseti
657 7: single insn followed by bclri
658 8: single insn followed by rotli
659 9: single insn followed by lsli
660 10: single insn followed by ixh
661 11: single insn followed by ixw. */
8f90be4c
NC
662
663static int
664try_constant_tricks (value, x, y)
665 long value;
666 int * x;
667 int * y;
668{
669 int i;
670 unsigned bit, shf, rot;
671
672 if (const_ok_for_mcore (value))
4816b8e4 673 return 1; /* Do the usual thing. */
8f90be4c
NC
674
675 if (TARGET_HARDLIT)
676 {
677 if (const_ok_for_mcore (~value))
678 {
679 *x = ~value;
680 return 2;
681 }
682
683 for (i = 1; i <= 32; i++)
684 {
685 if (const_ok_for_mcore (value - i))
686 {
687 *x = value - i;
688 *y = i;
689
690 return 3;
691 }
692
693 if (const_ok_for_mcore (value + i))
694 {
695 *x = value + i;
696 *y = i;
697
698 return 4;
699 }
700 }
701
702 bit = 0x80000000UL;
703
704 for (i = 0; i <= 31; i++)
705 {
706 if (const_ok_for_mcore (i - value))
707 {
708 *x = i - value;
709 *y = i;
710
711 return 5;
712 }
713
714 if (const_ok_for_mcore (value & ~bit))
715 {
716 *y = bit;
717 *x = value & ~bit;
718
719 return 6;
720 }
721
722 if (const_ok_for_mcore (value | bit))
723 {
724 *y = ~bit;
725 *x = value | bit;
726
727 return 7;
728 }
729
730 bit >>= 1;
731 }
732
733 shf = value;
734 rot = value;
735
736 for (i = 1; i < 31; i++)
737 {
738 int c;
739
740 /* MCore has rotate left. */
741 c = rot << 31;
742 rot >>= 1;
743 rot &= 0x7FFFFFFF;
744 rot |= c; /* Simulate rotate. */
745
746 if (const_ok_for_mcore (rot))
747 {
748 *y = i;
749 *x = rot;
750
751 return 8;
752 }
753
754 if (shf & 1)
4816b8e4 755 shf = 0; /* Can't use logical shift, low order bit is one. */
8f90be4c
NC
756
757 shf >>= 1;
758
759 if (shf != 0 && const_ok_for_mcore (shf))
760 {
761 *y = i;
762 *x = shf;
763
764 return 9;
765 }
766 }
767
768 if ((value % 3) == 0 && const_ok_for_mcore (value / 3))
769 {
770 *x = value / 3;
771
772 return 10;
773 }
774
775 if ((value % 5) == 0 && const_ok_for_mcore (value / 5))
776 {
777 *x = value / 5;
778
779 return 11;
780 }
781 }
782
783 return 0;
784}
785
786
787/* Check whether reg is dead at first. This is done by searching ahead
788 for either the next use (i.e., reg is live), a death note, or a set of
789 reg. Don't just use dead_or_set_p() since reload does not always mark
790 deaths (especially if PRESERVE_DEATH_NOTES_REGNO_P is not defined). We
4816b8e4
NC
791 can ignore subregs by extracting the actual register. BRC */
792
8f90be4c
NC
793int
794mcore_is_dead (first, reg)
795 rtx first;
796 rtx reg;
797{
798 rtx insn;
799
800 /* For mcore, subregs can't live independently of their parent regs. */
801 if (GET_CODE (reg) == SUBREG)
802 reg = SUBREG_REG (reg);
803
804 /* Dies immediately. */
805 if (dead_or_set_p (first, reg))
806 return 1;
807
808 /* Look for conclusive evidence of live/death, otherwise we have
809 to assume that it is live. */
810 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
811 {
812 if (GET_CODE (insn) == JUMP_INSN)
813 return 0; /* We lose track, assume it is alive. */
814
815 else if (GET_CODE(insn) == CALL_INSN)
816 {
817 /* Call's might use it for target or register parms. */
818 if (reg_referenced_p (reg, PATTERN (insn))
819 || find_reg_fusage (insn, USE, reg))
820 return 0;
821 else if (dead_or_set_p (insn, reg))
822 return 1;
823 }
824 else if (GET_CODE (insn) == INSN)
825 {
826 if (reg_referenced_p (reg, PATTERN (insn)))
827 return 0;
828 else if (dead_or_set_p (insn, reg))
829 return 1;
830 }
831 }
832
833 /* No conclusive evidence either way, we can not take the chance
834 that control flow hid the use from us -- "I'm not dead yet". */
835 return 0;
836}
837
838
839/* Count the number of ones in mask. */
4816b8e4 840
8f90be4c
NC
841int
842mcore_num_ones (mask)
843 int mask;
844{
4816b8e4 845 /* A trick to count set bits recently posted on comp.compilers. */
8f90be4c
NC
846 mask = (mask >> 1 & 0x55555555) + (mask & 0x55555555);
847 mask = ((mask >> 2) & 0x33333333) + (mask & 0x33333333);
848 mask = ((mask >> 4) + mask) & 0x0f0f0f0f;
849 mask = ((mask >> 8) + mask);
850
851 return (mask + (mask >> 16)) & 0xff;
852}
853
4816b8e4
NC
854/* Count the number of zeros in mask. */
855
8f90be4c
NC
856int
857mcore_num_zeros (mask)
858 int mask;
859{
860 return 32 - mcore_num_ones (mask);
861}
862
863/* Determine byte being masked. */
4816b8e4 864
8f90be4c
NC
865int
866mcore_byte_offset (mask)
867 unsigned int mask;
868{
869 if (mask == 0x00ffffffUL)
870 return 0;
871 else if (mask == 0xff00ffffUL)
872 return 1;
873 else if (mask == 0xffff00ffUL)
874 return 2;
875 else if (mask == 0xffffff00UL)
876 return 3;
877
878 return -1;
879}
880
881/* Determine halfword being masked. */
4816b8e4 882
8f90be4c
NC
883int
884mcore_halfword_offset (mask)
885 unsigned int mask;
886{
887 if (mask == 0x0000ffffL)
888 return 0;
889 else if (mask == 0xffff0000UL)
890 return 1;
891
892 return -1;
893}
894
895/* Output a series of bseti's corresponding to mask. */
4816b8e4 896
f27cd94d 897const char *
8f90be4c
NC
898mcore_output_bseti (dst, mask)
899 rtx dst;
900 int mask;
901{
902 rtx out_operands[2];
903 int bit;
904
905 out_operands[0] = dst;
906
907 for (bit = 0; bit < 32; bit++)
908 {
909 if ((mask & 0x1) == 0x1)
910 {
911 out_operands[1] = GEN_INT (bit);
912
913 output_asm_insn ("bseti\t%0,%1", out_operands);
914 }
915 mask >>= 1;
916 }
917
918 return "";
919}
920
921/* Output a series of bclri's corresponding to mask. */
4816b8e4 922
f27cd94d 923const char *
8f90be4c
NC
924mcore_output_bclri (dst, mask)
925 rtx dst;
926 int mask;
927{
928 rtx out_operands[2];
929 int bit;
930
931 out_operands[0] = dst;
932
933 for (bit = 0; bit < 32; bit++)
934 {
935 if ((mask & 0x1) == 0x0)
936 {
937 out_operands[1] = GEN_INT (bit);
938
939 output_asm_insn ("bclri\t%0,%1", out_operands);
940 }
941
942 mask >>= 1;
943 }
944
945 return "";
946}
947
948/* Output a conditional move of two constants that are +/- 1 within each
949 other. See the "movtK" patterns in mcore.md. I'm not sure this is
950 really worth the effort. */
4816b8e4 951
f27cd94d 952const char *
8f90be4c
NC
953mcore_output_cmov (operands, cmp_t, test)
954 rtx operands[];
955 int cmp_t;
956 char * test;
957{
958 int load_value;
959 int adjust_value;
960 rtx out_operands[4];
961
962 out_operands[0] = operands[0];
963
4816b8e4 964 /* Check to see which constant is loadable. */
8f90be4c
NC
965 if (const_ok_for_mcore (INTVAL (operands[1])))
966 {
967 out_operands[1] = operands[1];
968 out_operands[2] = operands[2];
969 }
970 else if (const_ok_for_mcore (INTVAL (operands[2])))
971 {
972 out_operands[1] = operands[2];
973 out_operands[2] = operands[1];
974
4816b8e4 975 /* Complement test since constants are swapped. */
8f90be4c
NC
976 cmp_t = (cmp_t == 0);
977 }
978 load_value = INTVAL (out_operands[1]);
979 adjust_value = INTVAL (out_operands[2]);
980
4816b8e4 981 /* First output the test if folded into the pattern. */
8f90be4c
NC
982
983 if (test)
984 output_asm_insn (test, operands);
985
4816b8e4 986 /* Load the constant - for now, only support constants that can be
8f90be4c
NC
987 generated with a single instruction. maybe add general inlinable
988 constants later (this will increase the # of patterns since the
4816b8e4 989 instruction sequence has a different length attribute). */
8f90be4c
NC
990 if (load_value >= 0 && load_value <= 127)
991 output_asm_insn ("movi\t%0,%1", out_operands);
992 else if ((load_value & (load_value - 1)) == 0)
993 output_asm_insn ("bgeni\t%0,%P1", out_operands);
994 else if ((load_value & (load_value + 1)) == 0)
995 output_asm_insn ("bmaski\t%0,%N1", out_operands);
996
4816b8e4 997 /* Output the constant adjustment. */
8f90be4c
NC
998 if (load_value > adjust_value)
999 {
1000 if (cmp_t)
1001 output_asm_insn ("decf\t%0", out_operands);
1002 else
1003 output_asm_insn ("dect\t%0", out_operands);
1004 }
1005 else
1006 {
1007 if (cmp_t)
1008 output_asm_insn ("incf\t%0", out_operands);
1009 else
1010 output_asm_insn ("inct\t%0", out_operands);
1011 }
1012
1013 return "";
1014}
1015
1016/* Outputs the peephole for moving a constant that gets not'ed followed
4816b8e4
NC
1017 by an and (i.e. combine the not and the and into andn). BRC */
1018
f27cd94d 1019const char *
8f90be4c
NC
1020mcore_output_andn (insn, operands)
1021 rtx insn ATTRIBUTE_UNUSED;
1022 rtx operands[];
1023{
1024 int x, y;
1025 rtx out_operands[3];
f27cd94d 1026 const char * load_op;
8f90be4c
NC
1027 char buf[256];
1028
1029 if (try_constant_tricks (INTVAL (operands[1]), &x, &y) != 2)
1030 abort ();
1031
1032 out_operands[0] = operands[0];
1033 out_operands[1] = GEN_INT(x);
1034 out_operands[2] = operands[2];
1035
1036 if (x >= 0 && x <= 127)
1037 load_op = "movi\t%0,%1";
4816b8e4
NC
1038
1039 /* Try exact power of two. */
8f90be4c
NC
1040 else if ((x & (x - 1)) == 0)
1041 load_op = "bgeni\t%0,%P1";
4816b8e4
NC
1042
1043 /* Try exact power of two - 1. */
8f90be4c
NC
1044 else if ((x & (x + 1)) == 0)
1045 load_op = "bmaski\t%0,%N1";
4816b8e4 1046
8f90be4c
NC
1047 else
1048 load_op = "BADMOVI\t%0,%1";
1049
1050 sprintf (buf, "%s\n\tandn\t%%2,%%0", load_op);
1051 output_asm_insn (buf, out_operands);
1052
1053 return "";
1054}
1055
1056/* Output an inline constant. */
4816b8e4 1057
f27cd94d 1058static const char *
8f90be4c
NC
1059output_inline_const (mode, operands)
1060 enum machine_mode mode;
1061 rtx operands[];
1062{
1063 int x = 0, y = 0;
1064 int trick_no;
1065 rtx out_operands[3];
1066 char buf[256];
1067 char load_op[256];
f27cd94d 1068 const char *dst_fmt;
8f90be4c
NC
1069 int value;
1070
1071 value = INTVAL (operands[1]);
1072
1073 if ((trick_no = try_constant_tricks (value, &x, &y)) == 0)
1074 {
1075 /* lrw's are handled separately: Large inlinable constants
1076 never get turned into lrw's. Our caller uses try_constant_tricks
1077 to back off to an lrw rather than calling this routine. */
1078 abort ();
1079 }
1080
1081 if (trick_no == 1)
1082 x = value;
1083
4816b8e4 1084 /* operands: 0 = dst, 1 = load immed., 2 = immed. adjustment. */
8f90be4c
NC
1085 out_operands[0] = operands[0];
1086 out_operands[1] = GEN_INT (x);
1087
1088 if (trick_no > 2)
1089 out_operands[2] = GEN_INT (y);
1090
4816b8e4 1091 /* Select dst format based on mode. */
8f90be4c
NC
1092 if (mode == DImode && (! TARGET_LITTLE_END))
1093 dst_fmt = "%R0";
1094 else
1095 dst_fmt = "%0";
1096
1097 if (x >= 0 && x <= 127)
1098 sprintf (load_op, "movi\t%s,%%1", dst_fmt);
4816b8e4 1099
8f90be4c
NC
1100 /* Try exact power of two. */
1101 else if ((x & (x - 1)) == 0)
1102 sprintf (load_op, "bgeni\t%s,%%P1", dst_fmt);
4816b8e4
NC
1103
1104 /* Try exact power of two - 1. */
8f90be4c
NC
1105 else if ((x & (x + 1)) == 0)
1106 sprintf (load_op, "bmaski\t%s,%%N1", dst_fmt);
4816b8e4 1107
8f90be4c
NC
1108 else
1109 sprintf (load_op, "BADMOVI\t%s,%%1", dst_fmt);
1110
1111 switch (trick_no)
1112 {
1113 case 1:
1114 strcpy (buf, load_op);
1115 break;
1116 case 2: /* not */
1117 sprintf (buf, "%s\n\tnot\t%s\t// %d 0x%x", load_op, dst_fmt, value, value);
1118 break;
1119 case 3: /* add */
1120 sprintf (buf, "%s\n\taddi\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
1121 break;
1122 case 4: /* sub */
1123 sprintf (buf, "%s\n\tsubi\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
1124 break;
1125 case 5: /* rsub */
4816b8e4 1126 /* Never happens unless -mrsubi, see try_constant_tricks(). */
8f90be4c
NC
1127 sprintf (buf, "%s\n\trsubi\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
1128 break;
1129 case 6: /* bset */
1130 sprintf (buf, "%s\n\tbseti\t%s,%%P2\t// %d 0x%x", load_op, dst_fmt, value, value);
1131 break;
1132 case 7: /* bclr */
1133 sprintf (buf, "%s\n\tbclri\t%s,%%Q2\t// %d 0x%x", load_op, dst_fmt, value, value);
1134 break;
1135 case 8: /* rotl */
1136 sprintf (buf, "%s\n\trotli\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
1137 break;
1138 case 9: /* lsl */
1139 sprintf (buf, "%s\n\tlsli\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
1140 break;
1141 case 10: /* ixh */
1142 sprintf (buf, "%s\n\tixh\t%s,%s\t// %d 0x%x", load_op, dst_fmt, dst_fmt, value, value);
1143 break;
1144 case 11: /* ixw */
1145 sprintf (buf, "%s\n\tixw\t%s,%s\t// %d 0x%x", load_op, dst_fmt, dst_fmt, value, value);
1146 break;
1147 default:
1148 return "";
1149 }
1150
1151 output_asm_insn (buf, out_operands);
1152
1153 return "";
1154}
1155
1156/* Output a move of a word or less value. */
4816b8e4 1157
f27cd94d 1158const char *
8f90be4c
NC
1159mcore_output_move (insn, operands, mode)
1160 rtx insn ATTRIBUTE_UNUSED;
1161 rtx operands[];
1162 enum machine_mode mode ATTRIBUTE_UNUSED;
1163{
1164 rtx dst = operands[0];
1165 rtx src = operands[1];
1166
1167 if (GET_CODE (dst) == REG)
1168 {
1169 if (GET_CODE (src) == REG)
1170 {
1171 if (REGNO (src) == CC_REG) /* r-c */
1172 return "mvc\t%0";
1173 else
1174 return "mov\t%0,%1"; /* r-r*/
1175 }
1176 else if (GET_CODE (src) == MEM)
1177 {
1178 if (GET_CODE (XEXP (src, 0)) == LABEL_REF)
1179 return "lrw\t%0,[%1]"; /* a-R */
1180 else
1181 return "ldw\t%0,%1"; /* r-m */
1182 }
1183 else if (GET_CODE (src) == CONST_INT)
1184 {
1185 int x, y;
1186
1187 if (CONST_OK_FOR_I (INTVAL (src))) /* r-I */
1188 return "movi\t%0,%1";
1189 else if (CONST_OK_FOR_M (INTVAL (src))) /* r-M */
1190 return "bgeni\t%0,%P1\t// %1 %x1";
1191 else if (CONST_OK_FOR_N (INTVAL (src))) /* r-N */
1192 return "bmaski\t%0,%N1\t// %1 %x1";
1193 else if (try_constant_tricks (INTVAL (src), &x, &y)) /* R-P */
1194 return output_inline_const (SImode, operands); /* 1-2 insns */
1195 else
4816b8e4 1196 return "lrw\t%0,%x1\t// %1"; /* Get it from literal pool. */
8f90be4c
NC
1197 }
1198 else
4816b8e4 1199 return "lrw\t%0, %1"; /* Into the literal pool. */
8f90be4c
NC
1200 }
1201 else if (GET_CODE (dst) == MEM) /* m-r */
1202 return "stw\t%1,%0";
1203
1204 abort ();
1205}
1206
1207/* Outputs a constant inline -- regardless of the cost.
1208 Useful for things where we've gotten into trouble and think we'd
1209 be doing an lrw into r15 (forbidden). This lets us get out of
1210 that pickle even after register allocation. */
4816b8e4 1211
f27cd94d 1212const char *
8f90be4c
NC
1213mcore_output_inline_const_forced (insn, operands, mode)
1214 rtx insn ATTRIBUTE_UNUSED;
1215 rtx operands[];
1216 enum machine_mode mode ATTRIBUTE_UNUSED;
1217{
1218 unsigned long value = INTVAL (operands[1]);
1219 unsigned long ovalue = value;
1220 struct piece
1221 {
1222 int low;
1223 int shift;
1224 }
1225 part[6];
1226 int i;
1227
1228 if (mcore_const_ok_for_inline (value))
1229 return output_inline_const (SImode, operands);
1230
b6a1cbae 1231 for (i = 0; (unsigned) i < ARRAY_SIZE (part); i++)
8f90be4c
NC
1232 {
1233 part[i].shift = 0;
1234 part[i].low = (value & 0x1F);
1235 value -= part[i].low;
1236
1237 if (mcore_const_ok_for_inline (value))
1238 break;
1239 else
1240 {
1241 value >>= 5;
1242 part[i].shift = 5;
1243
1244 while ((value & 1) == 0)
1245 {
1246 part[i].shift++;
1247 value >>= 1;
1248 }
1249
1250 if (mcore_const_ok_for_inline (value))
1251 break;
1252 }
1253 }
1254
1255 /* 5 bits per iteration, a maximum of 5 times == 25 bits and leaves
1256 7 bits left in the constant -- which we know we can cover with
1257 a movi. The final value can't be zero otherwise we'd have stopped
1258 in the previous iteration. */
1259 if (value == 0 || ! mcore_const_ok_for_inline (value))
1260 abort ();
1261
4816b8e4 1262 /* Now, work our way backwards emitting the constant. */
8f90be4c
NC
1263
1264 /* Emit the value that remains -- it will be non-zero. */
1265 operands[1] = GEN_INT (value);
1266 output_asm_insn (output_inline_const (SImode, operands), operands);
1267
1268 while (i >= 0)
1269 {
1270 /* Shift anything we've already loaded. */
1271 if (part[i].shift)
1272 {
1273 operands[2] = GEN_INT (part[i].shift);
1274 output_asm_insn ("lsli %0,%2", operands);
1275 value <<= part[i].shift;
1276 }
1277
1278 /* Add anything we need into the low 5 bits. */
1279 if (part[i].low != 0)
1280 {
1281 operands[2] = GEN_INT (part[i].low);
1282 output_asm_insn ("addi %0,%2", operands);
1283 value += part[i].low;
1284 }
1285
1286 i--;
1287 }
1288
1289 if (value != ovalue) /* sanity */
1290 abort ();
1291
4816b8e4 1292 /* We've output all the instructions. */
8f90be4c
NC
1293 return "";
1294}
1295
1296/* Return a sequence of instructions to perform DI or DF move.
1297 Since the MCORE cannot move a DI or DF in one instruction, we have
1298 to take care when we see overlapping source and dest registers. */
4816b8e4 1299
f27cd94d 1300const char *
8f90be4c
NC
1301mcore_output_movedouble (operands, mode)
1302 rtx operands[];
1303 enum machine_mode mode ATTRIBUTE_UNUSED;
1304{
1305 rtx dst = operands[0];
1306 rtx src = operands[1];
1307
1308 if (GET_CODE (dst) == REG)
1309 {
1310 if (GET_CODE (src) == REG)
1311 {
1312 int dstreg = REGNO (dst);
1313 int srcreg = REGNO (src);
4816b8e4 1314
8f90be4c
NC
1315 /* Ensure the second source not overwritten. */
1316 if (srcreg + 1 == dstreg)
1317 return "mov %R0,%R1\n\tmov %0,%1";
1318 else
1319 return "mov %0,%1\n\tmov %R0,%R1";
1320 }
1321 else if (GET_CODE (src) == MEM)
1322 {
1323 rtx memexp = memexp = XEXP (src, 0);
1324 int dstreg = REGNO (dst);
1325 int basereg = -1;
1326
1327 if (GET_CODE (memexp) == LABEL_REF)
1328 return "lrw\t%0,[%1]\n\tlrw\t%R0,[%R1]";
1329 else if (GET_CODE (memexp) == REG)
1330 basereg = REGNO (memexp);
1331 else if (GET_CODE (memexp) == PLUS)
1332 {
1333 if (GET_CODE (XEXP (memexp, 0)) == REG)
1334 basereg = REGNO (XEXP (memexp, 0));
1335 else if (GET_CODE (XEXP (memexp, 1)) == REG)
1336 basereg = REGNO (XEXP (memexp, 1));
1337 else
1338 abort ();
1339 }
1340 else
1341 abort ();
1342
4816b8e4 1343 /* ??? length attribute is wrong here. */
8f90be4c
NC
1344 if (dstreg == basereg)
1345 {
4816b8e4 1346 /* Just load them in reverse order. */
8f90be4c 1347 return "ldw\t%R0,%R1\n\tldw\t%0,%1";
4816b8e4 1348
8f90be4c 1349 /* XXX: alternative: move basereg to basereg+1
4816b8e4 1350 and then fall through. */
8f90be4c
NC
1351 }
1352 else
1353 return "ldw\t%0,%1\n\tldw\t%R0,%R1";
1354 }
1355 else if (GET_CODE (src) == CONST_INT)
1356 {
1357 if (TARGET_LITTLE_END)
1358 {
1359 if (CONST_OK_FOR_I (INTVAL (src)))
1360 output_asm_insn ("movi %0,%1", operands);
1361 else if (CONST_OK_FOR_M (INTVAL (src)))
1362 output_asm_insn ("bgeni %0,%P1", operands);
1363 else if (INTVAL (src) == -1)
1364 output_asm_insn ("bmaski %0,32", operands);
1365 else if (CONST_OK_FOR_N (INTVAL (src)))
1366 output_asm_insn ("bmaski %0,%N1", operands);
1367 else
1368 abort ();
1369
1370 if (INTVAL (src) < 0)
1371 return "bmaski %R0,32";
1372 else
1373 return "movi %R0,0";
1374 }
1375 else
1376 {
1377 if (CONST_OK_FOR_I (INTVAL (src)))
1378 output_asm_insn ("movi %R0,%1", operands);
1379 else if (CONST_OK_FOR_M (INTVAL (src)))
1380 output_asm_insn ("bgeni %R0,%P1", operands);
1381 else if (INTVAL (src) == -1)
1382 output_asm_insn ("bmaski %R0,32", operands);
1383 else if (CONST_OK_FOR_N (INTVAL (src)))
1384 output_asm_insn ("bmaski %R0,%N1", operands);
1385 else
1386 abort ();
1387
1388 if (INTVAL (src) < 0)
1389 return "bmaski %0,32";
1390 else
1391 return "movi %0,0";
1392 }
1393 }
1394 else
1395 abort ();
1396 }
1397 else if (GET_CODE (dst) == MEM && GET_CODE (src) == REG)
1398 return "stw\t%1,%0\n\tstw\t%R1,%R0";
1399 else
1400 abort ();
1401}
1402
1403/* Predicates used by the templates. */
1404
1405/* Non zero if OP can be source of a simple move operation. */
4816b8e4 1406
8f90be4c
NC
1407int
1408mcore_general_movsrc_operand (op, mode)
1409 rtx op;
1410 enum machine_mode mode;
1411{
1412 /* Any (MEM LABEL_REF) is OK. That is a pc-relative load. */
1413 if (GET_CODE (op) == MEM && GET_CODE (XEXP (op, 0)) == LABEL_REF)
1414 return 1;
1415
1416 return general_operand (op, mode);
1417}
1418
1419/* Non zero if OP can be destination of a simple move operation. */
4816b8e4 1420
8f90be4c
NC
1421int
1422mcore_general_movdst_operand (op, mode)
1423 rtx op;
1424 enum machine_mode mode;
1425{
1426 if (GET_CODE (op) == REG && REGNO (op) == CC_REG)
1427 return 0;
1428
1429 return general_operand (op, mode);
1430}
1431
1432/* Nonzero if OP is a normal arithmetic register. */
4816b8e4 1433
8f90be4c
NC
1434int
1435mcore_arith_reg_operand (op, mode)
1436 rtx op;
1437 enum machine_mode mode;
1438{
1439 if (! register_operand (op, mode))
1440 return 0;
1441
1442 if (GET_CODE (op) == SUBREG)
1443 op = SUBREG_REG (op);
1444
1445 if (GET_CODE (op) == REG)
1446 return REGNO (op) != CC_REG;
1447
1448 return 1;
1449}
1450
1451/* Non zero if OP should be recognized during reload for an ixh/ixw
1452 operand. See the ixh/ixw patterns. */
4816b8e4 1453
8f90be4c
NC
1454int
1455mcore_reload_operand (op, mode)
1456 rtx op;
1457 enum machine_mode mode;
1458{
1459 if (mcore_arith_reg_operand (op, mode))
1460 return 1;
1461
1462 if (! reload_in_progress)
1463 return 0;
1464
1465 return GET_CODE (op) == MEM;
1466}
1467
1468/* Nonzero if OP is a valid source operand for an arithmetic insn. */
4816b8e4 1469
8f90be4c
NC
1470int
1471mcore_arith_J_operand (op, mode)
1472 rtx op;
1473 enum machine_mode mode;
1474{
1475 if (register_operand (op, mode))
1476 return 1;
1477
1478 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_J (INTVAL (op)))
1479 return 1;
1480
1481 return 0;
1482}
1483
1484/* Nonzero if OP is a valid source operand for an arithmetic insn. */
4816b8e4 1485
8f90be4c
NC
1486int
1487mcore_arith_K_operand (op, mode)
1488 rtx op;
1489 enum machine_mode mode;
1490{
1491 if (register_operand (op, mode))
1492 return 1;
1493
1494 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_K (INTVAL (op)))
1495 return 1;
1496
1497 return 0;
1498}
1499
1500/* Nonzero if OP is a valid source operand for a shift or rotate insn. */
4816b8e4 1501
8f90be4c
NC
1502int
1503mcore_arith_K_operand_not_0 (op, mode)
1504 rtx op;
1505 enum machine_mode mode;
1506{
1507 if (register_operand (op, mode))
1508 return 1;
1509
1510 if ( GET_CODE (op) == CONST_INT
1511 && CONST_OK_FOR_K (INTVAL (op))
1512 && INTVAL (op) != 0)
1513 return 1;
1514
1515 return 0;
1516}
1517
1518int
1519mcore_arith_K_S_operand (op, mode)
1520 rtx op;
1521 enum machine_mode mode;
1522{
1523 if (register_operand (op, mode))
1524 return 1;
1525
1526 if (GET_CODE (op) == CONST_INT)
1527 {
1528 if (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_M (~INTVAL (op)))
1529 return 1;
1530 }
1531
1532 return 0;
1533}
1534
1535int
1536mcore_arith_S_operand (op)
1537 rtx op;
1538{
1539 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (~INTVAL (op)))
1540 return 1;
1541
1542 return 0;
1543}
1544
1545int
1546mcore_arith_M_operand (op, mode)
1547 rtx op;
1548 enum machine_mode mode;
1549{
1550 if (register_operand (op, mode))
1551 return 1;
1552
1553 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (INTVAL (op)))
1554 return 1;
1555
1556 return 0;
1557}
1558
4816b8e4
NC
1559/* Nonzero if OP is a valid source operand for loading. */
1560
8f90be4c
NC
1561int
1562mcore_arith_imm_operand (op, mode)
1563 rtx op;
1564 enum machine_mode mode;
1565{
1566 if (register_operand (op, mode))
1567 return 1;
1568
1569 if (GET_CODE (op) == CONST_INT && const_ok_for_mcore (INTVAL (op)))
1570 return 1;
1571
1572 return 0;
1573}
1574
1575int
1576mcore_arith_any_imm_operand (op, mode)
1577 rtx op;
1578 enum machine_mode mode;
1579{
1580 if (register_operand (op, mode))
1581 return 1;
1582
1583 if (GET_CODE (op) == CONST_INT)
1584 return 1;
1585
1586 return 0;
1587}
1588
4816b8e4
NC
1589/* Nonzero if OP is a valid source operand for a cmov with two consts +/- 1. */
1590
8f90be4c
NC
1591int
1592mcore_arith_O_operand (op, mode)
1593 rtx op;
1594 enum machine_mode mode;
1595{
1596 if (register_operand (op, mode))
1597 return 1;
1598
1599 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_O (INTVAL (op)))
1600 return 1;
1601
1602 return 0;
1603}
1604
1605/* Nonzero if OP is a valid source operand for a btsti. */
4816b8e4 1606
8f90be4c
NC
1607int
1608mcore_literal_K_operand (op, mode)
1609 rtx op;
1610 enum machine_mode mode ATTRIBUTE_UNUSED;
1611{
1612 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_K (INTVAL (op)))
1613 return 1;
1614
1615 return 0;
1616}
1617
1618/* Nonzero if OP is a valid source operand for an add/sub insn. */
4816b8e4 1619
8f90be4c
NC
1620int
1621mcore_addsub_operand (op, mode)
1622 rtx op;
1623 enum machine_mode mode;
1624{
1625 if (register_operand (op, mode))
1626 return 1;
1627
1628 if (GET_CODE (op) == CONST_INT)
1629 {
1630 return 1;
1631
1632 /* The following is removed because it precludes large constants from being
1633 returned as valid source operands for and add/sub insn. While large
1634 constants may not directly be used in an add/sub, they may if first loaded
1635 into a register. Thus, this predicate should indicate that they are valid,
1636 and the constraint in mcore.md should control whether an additional load to
4816b8e4 1637 register is needed. (see mcore.md, addsi). -- DAC 4/2/1998 */
8f90be4c
NC
1638 /*
1639 if (CONST_OK_FOR_J(INTVAL(op)) || CONST_OK_FOR_L(INTVAL(op)))
1640 return 1;
1641 */
1642 }
1643
1644 return 0;
1645}
1646
1647/* Nonzero if OP is a valid source operand for a compare operation. */
4816b8e4 1648
8f90be4c
NC
1649int
1650mcore_compare_operand (op, mode)
1651 rtx op;
1652 enum machine_mode mode;
1653{
1654 if (register_operand (op, mode))
1655 return 1;
1656
1657 if (GET_CODE (op) == CONST_INT && INTVAL (op) == 0)
1658 return 1;
1659
1660 return 0;
1661}
1662
4816b8e4
NC
1663/* Expand insert bit field. BRC */
1664
8f90be4c
NC
1665int
1666mcore_expand_insv (operands)
1667 rtx operands[];
1668{
1669 int width = INTVAL (operands[1]);
1670 int posn = INTVAL (operands[2]);
1671 int mask;
1672 rtx mreg, sreg, ereg;
1673
1674 /* To get width 1 insv, the test in store_bit_field() (expmed.c, line 191)
1675 for width==1 must be removed. Look around line 368. This is something
4816b8e4 1676 we really want the md part to do. */
8f90be4c
NC
1677 if (width == 1 && GET_CODE (operands[3]) == CONST_INT)
1678 {
4816b8e4
NC
1679 /* Do directly with bseti or bclri. */
1680 /* RBE: 2/97 consider only low bit of constant. */
8f90be4c
NC
1681 if ((INTVAL(operands[3])&1) == 0)
1682 {
1683 mask = ~(1 << posn);
1684 emit_insn (gen_rtx (SET, SImode, operands[0],
1685 gen_rtx (AND, SImode, operands[0], GEN_INT (mask))));
1686 }
1687 else
1688 {
1689 mask = 1 << posn;
1690 emit_insn (gen_rtx (SET, SImode, operands[0],
1691 gen_rtx (IOR, SImode, operands[0], GEN_INT (mask))));
1692 }
1693
1694 return 1;
1695 }
1696
1697 /* Look at some bitfield placements that we aren't interested
4816b8e4 1698 in handling ourselves, unless specifically directed to do so. */
8f90be4c
NC
1699 if (! TARGET_W_FIELD)
1700 return 0; /* Generally, give up about now. */
1701
1702 if (width == 8 && posn % 8 == 0)
1703 /* Byte sized and aligned; let caller break it up. */
1704 return 0;
1705
1706 if (width == 16 && posn % 16 == 0)
1707 /* Short sized and aligned; let caller break it up. */
1708 return 0;
1709
1710 /* The general case - we can do this a little bit better than what the
1711 machine independent part tries. This will get rid of all the subregs
1712 that mess up constant folding in combine when working with relaxed
4816b8e4 1713 immediates. */
8f90be4c
NC
1714
1715 /* If setting the entire field, do it directly. */
1716 if (GET_CODE (operands[3]) == CONST_INT &&
1717 INTVAL (operands[3]) == ((1 << width) - 1))
1718 {
1719 mreg = force_reg (SImode, GEN_INT (INTVAL (operands[3]) << posn));
1720 emit_insn (gen_rtx (SET, SImode, operands[0],
1721 gen_rtx (IOR, SImode, operands[0], mreg)));
1722 return 1;
1723 }
1724
1725 /* Generate the clear mask. */
1726 mreg = force_reg (SImode, GEN_INT (~(((1 << width) - 1) << posn)));
1727
1728 /* Clear the field, to overlay it later with the source. */
1729 emit_insn (gen_rtx (SET, SImode, operands[0],
1730 gen_rtx (AND, SImode, operands[0], mreg)));
1731
1732 /* If the source is constant 0, we've nothing to add back. */
1733 if (GET_CODE (operands[3]) == CONST_INT && INTVAL (operands[3]) == 0)
1734 return 1;
1735
1736 /* XXX: Should we worry about more games with constant values?
1737 We've covered the high profile: set/clear single-bit and many-bit
1738 fields. How often do we see "arbitrary bit pattern" constants? */
1739 sreg = copy_to_mode_reg (SImode, operands[3]);
1740
1741 /* Extract src as same width as dst (needed for signed values). We
1742 always have to do this since we widen everything to SImode.
1743 We don't have to mask if we're shifting this up against the
1744 MSB of the register (e.g., the shift will push out any hi-order
4816b8e4 1745 bits. */
f27cd94d 1746 if (width + posn != (int) GET_MODE_SIZE (SImode))
8f90be4c
NC
1747 {
1748 ereg = force_reg (SImode, GEN_INT ((1 << width) - 1));
1749 emit_insn (gen_rtx (SET, SImode, sreg,
1750 gen_rtx (AND, SImode, sreg, ereg)));
1751 }
1752
4816b8e4 1753 /* Insert source value in dest. */
8f90be4c
NC
1754 if (posn != 0)
1755 emit_insn (gen_rtx (SET, SImode, sreg,
1756 gen_rtx (ASHIFT, SImode, sreg, GEN_INT (posn))));
1757
1758 emit_insn (gen_rtx (SET, SImode, operands[0],
1759 gen_rtx (IOR, SImode, operands[0], sreg)));
1760
1761 return 1;
1762}
1763
1764/* Return 1 if OP is a load multiple operation. It is known to be a
1765 PARALLEL and the first section will be tested. */
1766int
1767mcore_load_multiple_operation (op, mode)
1768 rtx op;
1769 enum machine_mode mode ATTRIBUTE_UNUSED;
1770{
1771 int count = XVECLEN (op, 0);
1772 int dest_regno;
1773 rtx src_addr;
1774 int i;
1775
1776 /* Perform a quick check so we don't blow up below. */
1777 if (count <= 1
1778 || GET_CODE (XVECEXP (op, 0, 0)) != SET
1779 || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != REG
1780 || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != MEM)
1781 return 0;
1782
1783 dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, 0)));
1784 src_addr = XEXP (SET_SRC (XVECEXP (op, 0, 0)), 0);
1785
1786 for (i = 1; i < count; i++)
1787 {
1788 rtx elt = XVECEXP (op, 0, i);
1789
1790 if (GET_CODE (elt) != SET
1791 || GET_CODE (SET_DEST (elt)) != REG
1792 || GET_MODE (SET_DEST (elt)) != SImode
f27cd94d
NC
1793 || REGNO (SET_DEST (elt)) != (unsigned) (dest_regno + i)
1794 || GET_CODE (SET_SRC (elt)) != MEM
1795 || GET_MODE (SET_SRC (elt)) != SImode
8f90be4c
NC
1796 || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS
1797 || ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr)
1798 || GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT
1799 || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != i * 4)
1800 return 0;
1801 }
1802
1803 return 1;
1804}
1805
1806/* Similar, but tests for store multiple. */
4816b8e4 1807
8f90be4c
NC
1808int
1809mcore_store_multiple_operation (op, mode)
1810 rtx op;
1811 enum machine_mode mode ATTRIBUTE_UNUSED;
1812{
1813 int count = XVECLEN (op, 0);
1814 int src_regno;
1815 rtx dest_addr;
1816 int i;
1817
1818 /* Perform a quick check so we don't blow up below. */
1819 if (count <= 1
1820 || GET_CODE (XVECEXP (op, 0, 0)) != SET
1821 || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != MEM
1822 || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != REG)
1823 return 0;
1824
1825 src_regno = REGNO (SET_SRC (XVECEXP (op, 0, 0)));
1826 dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, 0)), 0);
1827
1828 for (i = 1; i < count; i++)
1829 {
1830 rtx elt = XVECEXP (op, 0, i);
1831
1832 if (GET_CODE (elt) != SET
1833 || GET_CODE (SET_SRC (elt)) != REG
1834 || GET_MODE (SET_SRC (elt)) != SImode
f27cd94d 1835 || REGNO (SET_SRC (elt)) != (unsigned) (src_regno + i)
8f90be4c
NC
1836 || GET_CODE (SET_DEST (elt)) != MEM
1837 || GET_MODE (SET_DEST (elt)) != SImode
1838 || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS
1839 || ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr)
1840 || GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT
1841 || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != i * 4)
1842 return 0;
1843 }
1844
1845 return 1;
1846}
1847\f
1848/* ??? Block move stuff stolen from m88k. This code has not been
1849 verified for correctness. */
1850
1851/* Emit code to perform a block move. Choose the best method.
1852
1853 OPERANDS[0] is the destination.
1854 OPERANDS[1] is the source.
1855 OPERANDS[2] is the size.
1856 OPERANDS[3] is the alignment safe to use. */
1857
1858/* Emit code to perform a block move with an offset sequence of ldw/st
1859 instructions (..., ldw 0, stw 1, ldw 1, stw 0, ...). SIZE and ALIGN are
1860 known constants. DEST and SRC are registers. OFFSET is the known
1861 starting point for the output pattern. */
1862
1863static enum machine_mode mode_from_align[] =
1864{
1865 VOIDmode, QImode, HImode, VOIDmode, SImode,
1866 VOIDmode, VOIDmode, VOIDmode, DImode
1867};
1868
1869static void
1870block_move_sequence (dest, dst_mem, src, src_mem, size, align, offset)
1871 rtx dest, dst_mem;
1872 rtx src, src_mem;
1873 int size;
1874 int align;
1875 int offset;
1876{
1877 rtx temp[2];
1878 enum machine_mode mode[2];
1879 int amount[2];
1880 int active[2];
1881 int phase = 0;
1882 int next;
1883 int offset_ld = offset;
1884 int offset_st = offset;
1885
1886 active[0] = active[1] = FALSE;
1887
1888 /* Establish parameters for the first load and for the second load if
1889 it is known to be the same mode as the first. */
1890 amount[0] = amount[1] = align;
1891
1892 mode[0] = mode_from_align[align];
1893
1894 temp[0] = gen_reg_rtx (mode[0]);
1895
1896 if (size >= 2 * align)
1897 {
1898 mode[1] = mode[0];
1899 temp[1] = gen_reg_rtx (mode[1]);
1900 }
1901
1902 do
1903 {
1904 rtx srcp, dstp;
1905
1906 next = phase;
1907 phase = !phase;
1908
1909 if (size > 0)
1910 {
1911 /* Change modes as the sequence tails off. */
1912 if (size < amount[next])
1913 {
1914 amount[next] = (size >= 4 ? 4 : (size >= 2 ? 2 : 1));
1915 mode[next] = mode_from_align[amount[next]];
1916 temp[next] = gen_reg_rtx (mode[next]);
1917 }
1918
1919 size -= amount[next];
1920 srcp = gen_rtx (MEM,
1921#if 0
1922 MEM_IN_STRUCT_P (src_mem) ? mode[next] : BLKmode,
1923#else
1924 mode[next],
1925#endif
1926 gen_rtx (PLUS, Pmode, src,
1927 gen_rtx (CONST_INT, SImode, offset_ld)));
1928
1929 RTX_UNCHANGING_P (srcp) = RTX_UNCHANGING_P (src_mem);
1930 MEM_VOLATILE_P (srcp) = MEM_VOLATILE_P (src_mem);
1931 MEM_IN_STRUCT_P (srcp) = 1;
1932 emit_insn (gen_rtx (SET, VOIDmode, temp[next], srcp));
1933 offset_ld += amount[next];
1934 active[next] = TRUE;
1935 }
1936
1937 if (active[phase])
1938 {
1939 active[phase] = FALSE;
1940
1941 dstp = gen_rtx (MEM,
1942#if 0
1943 MEM_IN_STRUCT_P (dst_mem) ? mode[phase] : BLKmode,
1944#else
1945 mode[phase],
1946#endif
1947 gen_rtx (PLUS, Pmode, dest,
1948 gen_rtx (CONST_INT, SImode, offset_st)));
1949
1950 RTX_UNCHANGING_P (dstp) = RTX_UNCHANGING_P (dst_mem);
1951 MEM_VOLATILE_P (dstp) = MEM_VOLATILE_P (dst_mem);
1952 MEM_IN_STRUCT_P (dstp) = 1;
1953 emit_insn (gen_rtx (SET, VOIDmode, dstp, temp[phase]));
1954 offset_st += amount[phase];
1955 }
1956 }
1957 while (active[next]);
1958}
1959
1960void
1961mcore_expand_block_move (dst_mem, src_mem, operands)
1962 rtx dst_mem;
1963 rtx src_mem;
1964 rtx * operands;
1965{
1966 int align = INTVAL (operands[3]);
1967 int bytes;
1968
1969 if (GET_CODE (operands[2]) == CONST_INT)
1970 {
1971 bytes = INTVAL (operands[2]);
1972
1973 if (bytes <= 0)
1974 return;
1975 if (align > 4)
1976 align = 4;
1977
1978 /* RBE: bumped 1 and 2 byte align from 1 and 2 to 4 and 8 bytes before
4816b8e4 1979 we give up and go to memcpy. */
8f90be4c
NC
1980 if ((align == 4 && (bytes <= 4*4
1981 || ((bytes & 01) == 0 && bytes <= 8*4)
1982 || ((bytes & 03) == 0 && bytes <= 16*4)))
1983 || (align == 2 && bytes <= 4*2)
1984 || (align == 1 && bytes <= 4*1))
1985 {
1986 block_move_sequence (operands[0], dst_mem, operands[1], src_mem,
1987 bytes, align, 0);
1988 return;
1989 }
1990 }
1991
1992 /* If we get here, just use the library routine. */
1993 emit_library_call (gen_rtx (SYMBOL_REF, Pmode, "memcpy"), 0, VOIDmode, 3,
1994 operands[0], Pmode, operands[1], Pmode, operands[2],
1995 SImode);
1996}
1997\f
1998
1999/* Code to generate prologue and epilogue sequences. */
2000static int number_of_regs_before_varargs;
4816b8e4 2001
8f90be4c
NC
2002/* Set by SETUP_INCOMING_VARARGS to indicate to prolog that this is
2003 for a varargs function. */
2004static int current_function_anonymous_args;
2005
8f90be4c
NC
2006#define STACK_BYTES (STACK_BOUNDARY/BITS_PER_UNIT)
2007#define STORE_REACH (64) /* Maximum displace of word store + 4. */
4816b8e4 2008#define ADDI_REACH (32) /* Maximum addi operand. */
8f90be4c 2009
8f90be4c
NC
2010static void
2011layout_mcore_frame (infp)
2012 struct mcore_frame * infp;
2013{
2014 int n;
2015 unsigned int i;
2016 int nbytes;
2017 int regarg;
2018 int localregarg;
2019 int localreg;
2020 int outbounds;
2021 unsigned int growths;
2022 int step;
2023
2024 /* Might have to spill bytes to re-assemble a big argument that
4816b8e4 2025 was passed partially in registers and partially on the stack. */
8f90be4c
NC
2026 nbytes = current_function_pretend_args_size;
2027
2028 /* Determine how much space for spilled anonymous args (e.g., stdarg). */
2029 if (current_function_anonymous_args)
2030 nbytes += (NPARM_REGS - number_of_regs_before_varargs) * UNITS_PER_WORD;
2031
2032 infp->arg_size = nbytes;
2033
2034 /* How much space to save non-volatile registers we stomp. */
2035 infp->reg_mask = calc_live_regs (& n);
2036 infp->reg_size = n * 4;
2037
2038 /* And the rest of it... locals and space for overflowed outbounds. */
2039 infp->local_size = get_frame_size ();
2040 infp->outbound_size = current_function_outgoing_args_size;
2041
2042 /* Make sure we have a whole number of words for the locals. */
2043 if (infp->local_size % STACK_BYTES)
2044 infp->local_size = (infp->local_size + STACK_BYTES - 1) & ~ (STACK_BYTES -1);
2045
2046 /* Only thing we know we have to pad is the outbound space, since
2047 we've aligned our locals assuming that base of locals is aligned. */
2048 infp->pad_local = 0;
2049 infp->pad_reg = 0;
2050 infp->pad_outbound = 0;
2051 if (infp->outbound_size % STACK_BYTES)
2052 infp->pad_outbound = STACK_BYTES - (infp->outbound_size % STACK_BYTES);
2053
2054 /* Now we see how we want to stage the prologue so that it does
2055 the most appropriate stack growth and register saves to either:
2056 (1) run fast,
2057 (2) reduce instruction space, or
2058 (3) reduce stack space. */
b6a1cbae 2059 for (i = 0; i < ARRAY_SIZE (infp->growth); i++)
8f90be4c
NC
2060 infp->growth[i] = 0;
2061
2062 regarg = infp->reg_size + infp->arg_size;
2063 localregarg = infp->local_size + regarg;
2064 localreg = infp->local_size + infp->reg_size;
2065 outbounds = infp->outbound_size + infp->pad_outbound;
2066 growths = 0;
2067
2068 /* XXX: Consider one where we consider localregarg + outbound too! */
2069
2070 /* Frame of <= 32 bytes and using stm would get <= 2 registers.
2071 use stw's with offsets and buy the frame in one shot. */
2072 if (localregarg <= ADDI_REACH
2073 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
2074 {
2075 /* Make sure we'll be aligned. */
2076 if (localregarg % STACK_BYTES)
2077 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
2078
2079 step = localregarg + infp->pad_reg;
2080 infp->reg_offset = infp->local_size;
2081
2082 if (outbounds + step <= ADDI_REACH && !frame_pointer_needed)
2083 {
2084 step += outbounds;
2085 infp->reg_offset += outbounds;
2086 outbounds = 0;
2087 }
2088
2089 infp->arg_offset = step - 4;
2090 infp->growth[growths++] = step;
2091 infp->reg_growth = growths;
2092 infp->local_growth = growths;
2093
4816b8e4 2094 /* If we haven't already folded it in. */
8f90be4c
NC
2095 if (outbounds)
2096 infp->growth[growths++] = outbounds;
2097
2098 goto finish;
2099 }
2100
2101 /* Frame can't be done with a single subi, but can be done with 2
2102 insns. If the 'stm' is getting <= 2 registers, we use stw's and
2103 shift some of the stack purchase into the first subi, so both are
2104 single instructions. */
2105 if (localregarg <= STORE_REACH
2106 && (infp->local_size > ADDI_REACH)
2107 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
2108 {
2109 int all;
2110
2111 /* Make sure we'll be aligned; use either pad_reg or pad_local. */
2112 if (localregarg % STACK_BYTES)
2113 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
2114
2115 all = localregarg + infp->pad_reg + infp->pad_local;
2116 step = ADDI_REACH; /* As much up front as we can. */
2117 if (step > all)
2118 step = all;
2119
2120 /* XXX: Consider whether step will still be aligned; we believe so. */
2121 infp->arg_offset = step - 4;
2122 infp->growth[growths++] = step;
2123 infp->reg_growth = growths;
2124 infp->reg_offset = step - infp->pad_reg - infp->reg_size;
2125 all -= step;
2126
4816b8e4 2127 /* Can we fold in any space required for outbounds? */
8f90be4c
NC
2128 if (outbounds + all <= ADDI_REACH && !frame_pointer_needed)
2129 {
2130 all += outbounds;
2131 outbounds = 0;
2132 }
2133
4816b8e4 2134 /* Get the rest of the locals in place. */
8f90be4c
NC
2135 step = all;
2136 infp->growth[growths++] = step;
2137 infp->local_growth = growths;
2138 all -= step;
2139
2140 assert (all == 0);
2141
4816b8e4 2142 /* Finish off if we need to do so. */
8f90be4c
NC
2143 if (outbounds)
2144 infp->growth[growths++] = outbounds;
2145
2146 goto finish;
2147 }
2148
2149 /* Registers + args is nicely aligned, so we'll buy that in one shot.
2150 Then we buy the rest of the frame in 1 or 2 steps depending on
2151 whether we need a frame pointer. */
2152 if ((regarg % STACK_BYTES) == 0)
2153 {
2154 infp->growth[growths++] = regarg;
2155 infp->reg_growth = growths;
2156 infp->arg_offset = regarg - 4;
2157 infp->reg_offset = 0;
2158
2159 if (infp->local_size % STACK_BYTES)
2160 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
2161
2162 step = infp->local_size + infp->pad_local;
2163
2164 if (!frame_pointer_needed)
2165 {
2166 step += outbounds;
2167 outbounds = 0;
2168 }
2169
2170 infp->growth[growths++] = step;
2171 infp->local_growth = growths;
2172
4816b8e4 2173 /* If there's any left to be done. */
8f90be4c
NC
2174 if (outbounds)
2175 infp->growth[growths++] = outbounds;
2176
2177 goto finish;
2178 }
2179
2180 /* XXX: optimizations that we'll want to play with....
4816b8e4
NC
2181 -- regarg is not aligned, but it's a small number of registers;
2182 use some of localsize so that regarg is aligned and then
2183 save the registers. */
8f90be4c
NC
2184
2185 /* Simple encoding; plods down the stack buying the pieces as it goes.
4816b8e4
NC
2186 -- does not optimize space consumption.
2187 -- does not attempt to optimize instruction counts.
2188 -- but it is safe for all alignments. */
8f90be4c
NC
2189 if (regarg % STACK_BYTES != 0)
2190 infp->pad_reg = STACK_BYTES - (regarg % STACK_BYTES);
2191
2192 infp->growth[growths++] = infp->arg_size + infp->reg_size + infp->pad_reg;
2193 infp->reg_growth = growths;
2194 infp->arg_offset = infp->growth[0] - 4;
2195 infp->reg_offset = 0;
2196
2197 if (frame_pointer_needed)
2198 {
2199 if (infp->local_size % STACK_BYTES != 0)
2200 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
2201
2202 infp->growth[growths++] = infp->local_size + infp->pad_local;
2203 infp->local_growth = growths;
2204
2205 infp->growth[growths++] = outbounds;
2206 }
2207 else
2208 {
2209 if ((infp->local_size + outbounds) % STACK_BYTES != 0)
2210 infp->pad_local = STACK_BYTES - ((infp->local_size + outbounds) % STACK_BYTES);
2211
2212 infp->growth[growths++] = infp->local_size + infp->pad_local + outbounds;
2213 infp->local_growth = growths;
2214 }
2215
f27cd94d 2216 /* Anything else that we've forgotten?, plus a few consistency checks. */
8f90be4c
NC
2217 finish:
2218 assert (infp->reg_offset >= 0);
2219 assert (growths <= MAX_STACK_GROWS);
2220
2221 for (i = 0; i < growths; i++)
2222 {
2223 if (infp->growth[i] % STACK_BYTES)
2224 {
2225 fprintf (stderr,"stack growth of %d is not %d aligned\n",
2226 infp->growth[i], STACK_BYTES);
2227 abort ();
2228 }
2229 }
2230}
2231
2232/* Define the offset between two registers, one to be eliminated, and
2233 the other its replacement, at the start of a routine. */
4816b8e4 2234
8f90be4c
NC
2235int
2236mcore_initial_elimination_offset (from, to)
2237 int from;
2238 int to;
2239{
2240 int above_frame;
2241 int below_frame;
2242 struct mcore_frame fi;
2243
2244 layout_mcore_frame (& fi);
2245
2246 /* fp to ap */
2247 above_frame = fi.local_size + fi.pad_local + fi.reg_size + fi.pad_reg;
2248 /* sp to fp */
2249 below_frame = fi.outbound_size + fi.pad_outbound;
2250
2251 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
2252 return above_frame;
2253
2254 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
2255 return above_frame + below_frame;
2256
2257 if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
2258 return below_frame;
2259
2260 abort ();
2261
2262 return 0;
2263}
2264
4816b8e4
NC
2265/* Keep track of some information about varargs for the prolog. */
2266
8f90be4c
NC
2267void
2268mcore_setup_incoming_varargs (args_so_far, mode, type, ptr_pretend_size)
2269 CUMULATIVE_ARGS args_so_far;
2270 enum machine_mode mode;
2271 tree type;
2272 int * ptr_pretend_size ATTRIBUTE_UNUSED;
2273{
2274 current_function_anonymous_args = 1;
2275
2276 /* We need to know how many argument registers are used before
2277 the varargs start, so that we can push the remaining argument
2278 registers during the prologue. */
2279 number_of_regs_before_varargs = args_so_far + mcore_num_arg_regs (mode, type);
2280
2281 /* There is a bug somwehere in the arg handling code.
2282 Until I can find it this workaround always pushes the
2283 last named argument onto the stack. */
2284 number_of_regs_before_varargs = args_so_far;
2285
2286 /* The last named argument may be split between argument registers
2287 and the stack. Allow for this here. */
2288 if (number_of_regs_before_varargs > NPARM_REGS)
2289 number_of_regs_before_varargs = NPARM_REGS;
2290}
2291
2292void
2293mcore_expand_prolog ()
2294{
2295 struct mcore_frame fi;
2296 int space_allocated = 0;
2297 int growth = 0;
2298
2299 /* Find out what we're doing. */
2300 layout_mcore_frame (&fi);
2301
2302 space_allocated = fi.arg_size + fi.reg_size + fi.local_size +
2303 fi.outbound_size + fi.pad_outbound + fi.pad_local + fi.pad_reg;
2304
2305 if (TARGET_CG_DATA)
2306 {
2307 /* Emit a symbol for this routine's frame size. */
2308 rtx x;
2309 int len;
2310
2311 x = DECL_RTL (current_function_decl);
2312
2313 if (GET_CODE (x) != MEM)
2314 abort ();
2315
2316 x = XEXP (x, 0);
2317
2318 if (GET_CODE (x) != SYMBOL_REF)
2319 abort ();
2320
2321 if (mcore_current_function_name)
2322 free (mcore_current_function_name);
2323
2324 len = strlen (XSTR (x, 0)) + 1;
dd3b81b4 2325 mcore_current_function_name = (char *) xmalloc (len);
8f90be4c
NC
2326
2327 memcpy (mcore_current_function_name, XSTR (x, 0), len);
2328
2329 ASM_OUTPUT_CG_NODE (asm_out_file, mcore_current_function_name, space_allocated);
2330
2331 if (current_function_calls_alloca)
2332 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, "alloca", 1);
2333
2334 /* 970425: RBE:
2335 We're looking at how the 8byte alignment affects stack layout
2336 and where we had to pad things. This emits information we can
2337 extract which tells us about frame sizes and the like. */
2338 fprintf (asm_out_file,
2339 "\t.equ\t__$frame$info$_%s_$_%d_%d_x%x_%d_%d_%d,0\n",
2340 mcore_current_function_name,
2341 fi.arg_size, fi.reg_size, fi.reg_mask,
2342 fi.local_size, fi.outbound_size,
2343 frame_pointer_needed);
2344 }
2345
2346 if (mcore_naked_function_p ())
2347 return;
2348
2349 /* Handle stdarg+regsaves in one shot: can't be more than 64 bytes. */
2350 output_stack_adjust (-1, fi.growth[growth++]); /* grows it */
2351
2352 /* If we have a parameter passed partially in regs and partially in memory,
2353 the registers will have been stored to memory already in function.c. So
2354 we only need to do something here for varargs functions. */
2355 if (fi.arg_size != 0 && current_function_pretend_args_size == 0)
2356 {
2357 int offset;
2358 int rn = FIRST_PARM_REG + NPARM_REGS - 1;
2359 int remaining = fi.arg_size;
2360
2361 for (offset = fi.arg_offset; remaining >= 4; offset -= 4, rn--, remaining -= 4)
2362 {
2363 emit_insn (gen_movsi
2364 (gen_rtx (MEM, SImode,
2365 plus_constant (stack_pointer_rtx, offset)),
2366 gen_rtx (REG, SImode, rn)));
2367 }
2368 }
2369
4816b8e4 2370 /* Do we need another stack adjustment before we do the register saves? */
8f90be4c
NC
2371 if (growth < fi.reg_growth)
2372 output_stack_adjust (-1, fi.growth[growth++]); /* grows it */
2373
2374 if (fi.reg_size != 0)
2375 {
2376 int i;
2377 int offs = fi.reg_offset;
2378
2379 for (i = 15; i >= 0; i--)
2380 {
2381 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2382 {
2383 int first_reg = 15;
2384
2385 while (fi.reg_mask & (1 << first_reg))
2386 first_reg--;
2387 first_reg++;
2388
2389 emit_insn (gen_store_multiple (gen_rtx (MEM, SImode, stack_pointer_rtx),
2390 gen_rtx (REG, SImode, first_reg),
2391 GEN_INT (16 - first_reg)));
2392
2393 i -= (15 - first_reg);
2394 offs += (16 - first_reg) * 4;
2395 }
2396 else if (fi.reg_mask & (1 << i))
2397 {
2398 emit_insn (gen_movsi
2399 (gen_rtx (MEM, SImode,
2400 plus_constant (stack_pointer_rtx, offs)),
2401 gen_rtx (REG, SImode, i)));
2402 offs += 4;
2403 }
2404 }
2405 }
2406
2407 /* Figure the locals + outbounds. */
2408 if (frame_pointer_needed)
2409 {
2410 /* If we haven't already purchased to 'fp'. */
2411 if (growth < fi.local_growth)
2412 output_stack_adjust (-1, fi.growth[growth++]); /* grows it */
2413
2414 emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
2415
4816b8e4 2416 /* ... and then go any remaining distance for outbounds, etc. */
8f90be4c
NC
2417 if (fi.growth[growth])
2418 output_stack_adjust (-1, fi.growth[growth++]);
2419 }
2420 else
2421 {
2422 if (growth < fi.local_growth)
2423 output_stack_adjust (-1, fi.growth[growth++]); /* grows it */
2424 if (fi.growth[growth])
2425 output_stack_adjust (-1, fi.growth[growth++]);
2426 }
2427}
2428
2429void
2430mcore_expand_epilog ()
2431{
2432 struct mcore_frame fi;
2433 int i;
2434 int offs;
2435 int growth = MAX_STACK_GROWS - 1 ;
2436
f27cd94d 2437
8f90be4c
NC
2438 /* Find out what we're doing. */
2439 layout_mcore_frame(&fi);
2440
2441 if (mcore_naked_function_p ())
2442 return;
f27cd94d 2443
8f90be4c
NC
2444 /* If we had a frame pointer, restore the sp from that. */
2445 if (frame_pointer_needed)
2446 {
2447 emit_insn (gen_movsi (stack_pointer_rtx, frame_pointer_rtx));
2448 growth = fi.local_growth - 1;
2449 }
2450 else
2451 {
2452 /* XXX: while loop should accumulate and do a single sell. */
2453 while (growth >= fi.local_growth)
2454 {
2455 if (fi.growth[growth] != 0)
2456 output_stack_adjust (1, fi.growth[growth]);
2457 growth--;
2458 }
2459 }
2460
2461 /* Make sure we've shrunk stack back to the point where the registers
2462 were laid down. This is typically 0/1 iterations. Then pull the
4816b8e4 2463 register save information back off the stack. */
8f90be4c
NC
2464 while (growth >= fi.reg_growth)
2465 output_stack_adjust ( 1, fi.growth[growth--]);
2466
2467 offs = fi.reg_offset;
2468
2469 for (i = 15; i >= 0; i--)
2470 {
2471 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2472 {
2473 int first_reg;
2474
2475 /* Find the starting register. */
2476 first_reg = 15;
2477
2478 while (fi.reg_mask & (1 << first_reg))
2479 first_reg--;
2480
2481 first_reg++;
2482
2483 emit_insn (gen_load_multiple (gen_rtx (REG, SImode, first_reg),
2484 gen_rtx (MEM, SImode, stack_pointer_rtx),
2485 GEN_INT (16 - first_reg)));
2486
2487 i -= (15 - first_reg);
2488 offs += (16 - first_reg) * 4;
2489 }
2490 else if (fi.reg_mask & (1 << i))
2491 {
2492 emit_insn (gen_movsi
2493 (gen_rtx (REG, SImode, i),
2494 gen_rtx (MEM, SImode,
2495 plus_constant (stack_pointer_rtx, offs))));
2496 offs += 4;
2497 }
2498 }
2499
2500 /* Give back anything else. */
4816b8e4 2501 /* XXX: Should accumuate total and then give it back. */
8f90be4c
NC
2502 while (growth >= 0)
2503 output_stack_adjust ( 1, fi.growth[growth--]);
2504}
2505\f
2506/* This code is borrowed from the SH port. */
2507
2508/* The MCORE cannot load a large constant into a register, constants have to
2509 come from a pc relative load. The reference of a pc relative load
2510 instruction must be less than 1k infront of the instruction. This
2511 means that we often have to dump a constant inside a function, and
2512 generate code to branch around it.
2513
2514 It is important to minimize this, since the branches will slow things
2515 down and make things bigger.
2516
2517 Worst case code looks like:
2518
2519 lrw L1,r0
2520 br L2
2521 align
2522 L1: .long value
2523 L2:
2524 ..
2525
2526 lrw L3,r0
2527 br L4
2528 align
2529 L3: .long value
2530 L4:
2531 ..
2532
2533 We fix this by performing a scan before scheduling, which notices which
2534 instructions need to have their operands fetched from the constant table
2535 and builds the table.
2536
2537 The algorithm is:
2538
2539 scan, find an instruction which needs a pcrel move. Look forward, find the
2540 last barrier which is within MAX_COUNT bytes of the requirement.
2541 If there isn't one, make one. Process all the instructions between
2542 the find and the barrier.
2543
2544 In the above example, we can tell that L3 is within 1k of L1, so
2545 the first move can be shrunk from the 2 insn+constant sequence into
2546 just 1 insn, and the constant moved to L3 to make:
2547
2548 lrw L1,r0
2549 ..
2550 lrw L3,r0
2551 bra L4
2552 align
2553 L3:.long value
2554 L4:.long value
2555
2556 Then the second move becomes the target for the shortening process. */
2557
2558typedef struct
2559{
2560 rtx value; /* Value in table. */
2561 rtx label; /* Label of value. */
2562} pool_node;
2563
2564/* The maximum number of constants that can fit into one pool, since
2565 the pc relative range is 0...1020 bytes and constants are at least 4
2566 bytes long. We subtact 4 from the range to allow for the case where
2567 we need to add a branch/align before the constant pool. */
2568
2569#define MAX_COUNT 1016
2570#define MAX_POOL_SIZE (MAX_COUNT/4)
2571static pool_node pool_vector[MAX_POOL_SIZE];
2572static int pool_size;
2573
2574/* Dump out any constants accumulated in the final pass. These
2575 will only be labels. */
4816b8e4 2576
f27cd94d 2577const char *
8f90be4c
NC
2578mcore_output_jump_label_table ()
2579{
2580 int i;
2581
2582 if (pool_size)
2583 {
2584 fprintf (asm_out_file, "\t.align 2\n");
2585
2586 for (i = 0; i < pool_size; i++)
2587 {
2588 pool_node * p = pool_vector + i;
2589
2590 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (p->label));
2591
2592 output_asm_insn (".long %0", &p->value);
2593 }
2594
2595 pool_size = 0;
2596 }
2597
2598 return "";
2599}
2600
2601#if 0 /* XXX temporarily suppressed until I have time to look at what this code does. */
2602
2603/* We need these below. They use information stored in tables to figure out
2604 what values are in what registers, etc. This is okay, since these tables
2605 are valid at the time mcore_dependent_simplify_rtx() is invoked. Don't
4816b8e4 2606 use them anywhere else. BRC */
8f90be4c
NC
2607
2608extern unsigned HOST_WIDE_INT nonzero_bits PARAMS ((rtx, enum machine_mode));
2609extern int num_sign_bit_copies PARAMS ((Rtx, enum machine_mode));
2610
2611/* Do machine dependent simplifications: see simplify_rtx() in combine.c.
2612 GENERAL_SIMPLIFY controls whether general machine independent
2613 simplifications should be tried after machine dependent ones. Thus,
2614 we can filter out certain simplifications and keep the simplify_rtx()
2615 from changing things that we just simplified in a machine dependent
4816b8e4 2616 fashion. This is experimental. BRC */
8f90be4c
NC
2617rtx
2618mcore_dependent_simplify_rtx (x, int_op0_mode, last, in_dest, general_simplify)
2619 rtx x;
2620 int int_op0_mode;
2621 int last;
2622 int in_dest;
2623 int * general_simplify;
2624{
2625 enum machine_mode mode = GET_MODE (x);
2626 enum rtx_code code = GET_CODE (x);
2627
4816b8e4 2628 /* Always simplify unless explicitly asked not to. */
8f90be4c
NC
2629 * general_simplify = 1;
2630
2631 if (code == IF_THEN_ELSE)
2632 {
2633 int i;
2634 rtx cond = XEXP(x, 0);
d6edb99e
ZW
2635 rtx true_rtx = XEXP(x, 1);
2636 rtx false_rtx = XEXP(x, 2);
8f90be4c
NC
2637 enum rtx_code true_code = GET_CODE (cond);
2638
2639 /* On the mcore, when doing -mcmov-one, we don't want to simplify:
2640
2641 (if_then_else (ne A 0) C1 0)
2642
2643 if it would be turned into a shift by simplify_if_then_else().
2644 instead, leave it alone so that it will collapse into a conditional
2645 move. besides, at least for the mcore, doing this simplification does
4816b8e4 2646 not typically help. see combine.c, line 4217. BRC */
8f90be4c
NC
2647
2648 if (true_code == NE && XEXP (cond, 1) == const0_rtx
d6edb99e 2649 && false_rtx == const0_rtx && GET_CODE (true_rtx) == CONST_INT
8f90be4c 2650 && ((1 == nonzero_bits (XEXP (cond, 0), mode)
d6edb99e 2651 && (i = exact_log2 (INTVAL (true_rtx))) >= 0)
8f90be4c
NC
2652 || ((num_sign_bit_copies (XEXP (cond, 0), mode)
2653 == GET_MODE_BITSIZE (mode))
d6edb99e 2654 && (i = exact_log2 (- INTVAL (true_rtx))) >= 0)))
8f90be4c
NC
2655 {
2656 *general_simplify = 0;
2657 return x;
2658 }
2659 }
2660
2661 return x;
2662}
2663#endif
2664
8f90be4c 2665/* Check whether insn is a candidate for a conditional. */
4816b8e4 2666
8f90be4c
NC
2667static cond_type
2668is_cond_candidate (insn)
2669 rtx insn;
2670{
2671 /* The only things we conditionalize are those that can be directly
2672 changed into a conditional. Only bother with SImode items. If
2673 we wanted to be a little more aggressive, we could also do other
4816b8e4 2674 modes such as DImode with reg-reg move or load 0. */
8f90be4c
NC
2675 if (GET_CODE (insn) == INSN)
2676 {
2677 rtx pat = PATTERN (insn);
2678 rtx src, dst;
2679
2680 if (GET_CODE (pat) != SET)
2681 return COND_NO;
2682
2683 dst = XEXP (pat, 0);
2684
2685 if ((GET_CODE (dst) != REG &&
2686 GET_CODE (dst) != SUBREG) ||
2687 GET_MODE (dst) != SImode)
2688 return COND_NO;
2689
2690 src = XEXP (pat, 1);
2691
2692 if ((GET_CODE (src) == REG ||
2693 (GET_CODE (src) == SUBREG &&
2694 GET_CODE (SUBREG_REG (src)) == REG)) &&
2695 GET_MODE (src) == SImode)
2696 return COND_MOV_INSN;
2697 else if (GET_CODE (src) == CONST_INT &&
2698 INTVAL (src) == 0)
2699 return COND_CLR_INSN;
2700 else if (GET_CODE (src) == PLUS &&
2701 (GET_CODE (XEXP (src, 0)) == REG ||
2702 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2703 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2704 GET_MODE (XEXP (src, 0)) == SImode &&
2705 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2706 INTVAL (XEXP (src, 1)) == 1)
2707 return COND_INC_INSN;
2708 else if (((GET_CODE (src) == MINUS &&
2709 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2710 INTVAL( XEXP (src, 1)) == 1) ||
2711 (GET_CODE (src) == PLUS &&
2712 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2713 INTVAL (XEXP (src, 1)) == -1)) &&
2714 (GET_CODE (XEXP (src, 0)) == REG ||
2715 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2716 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2717 GET_MODE (XEXP (src, 0)) == SImode)
2718 return COND_DEC_INSN;
2719
2720 /* some insns that we don't bother with:
2721 (set (rx:DI) (ry:DI))
2722 (set (rx:DI) (const_int 0))
2723 */
2724
2725 }
2726 else if (GET_CODE (insn) == JUMP_INSN &&
2727 GET_CODE (PATTERN (insn)) == SET &&
2728 GET_CODE (XEXP (PATTERN (insn), 1)) == LABEL_REF)
2729 return COND_BRANCH_INSN;
2730
2731 return COND_NO;
2732}
2733
2734/* Emit a conditional version of insn and replace the old insn with the
2735 new one. Return the new insn if emitted. */
4816b8e4 2736
8f90be4c
NC
2737static rtx
2738emit_new_cond_insn (insn, cond)
2739 rtx insn;
2740 int cond;
2741{
2742 rtx c_insn = 0;
2743 rtx pat, dst, src;
2744 cond_type num;
2745
2746 if ((num = is_cond_candidate (insn)) == COND_NO)
2747 return NULL;
2748
2749 pat = PATTERN (insn);
2750
2751 if (GET_CODE (insn) == INSN)
2752 {
2753 dst = SET_DEST (pat);
2754 src = SET_SRC (pat);
2755 }
2756 else
2757 dst = JUMP_LABEL (insn);
2758
2759 switch (num)
2760 {
2761 case COND_MOV_INSN:
2762 case COND_CLR_INSN:
2763 if (cond)
2764 c_insn = gen_movt0 (dst, src, dst);
2765 else
2766 c_insn = gen_movt0 (dst, dst, src);
2767 break;
2768
2769 case COND_INC_INSN:
2770 if (cond)
2771 c_insn = gen_incscc (dst, dst);
2772 else
2773 c_insn = gen_incscc_false (dst, dst);
2774 break;
2775
2776 case COND_DEC_INSN:
2777 if (cond)
2778 c_insn = gen_decscc (dst, dst);
2779 else
2780 c_insn = gen_decscc_false (dst, dst);
2781 break;
2782
2783 case COND_BRANCH_INSN:
2784 if (cond)
2785 c_insn = gen_branch_true (dst);
2786 else
2787 c_insn = gen_branch_false (dst);
2788 break;
2789
2790 default:
2791 return NULL;
2792 }
2793
2794 /* Only copy the notes if they exist. */
2795 if (rtx_length [GET_CODE (c_insn)] >= 7 && rtx_length [GET_CODE (insn)] >= 7)
2796 {
2797 /* We really don't need to bother with the notes and links at this
2798 point, but go ahead and save the notes. This will help is_dead()
2799 when applying peepholes (links don't matter since they are not
2800 used any more beyond this point for the mcore). */
2801 REG_NOTES (c_insn) = REG_NOTES (insn);
2802 }
2803
2804 if (num == COND_BRANCH_INSN)
2805 {
2806 /* For jumps, we need to be a little bit careful and emit the new jump
2807 before the old one and to update the use count for the target label.
2808 This way, the barrier following the old (uncond) jump will get
2809 deleted, but the label won't. */
2810 c_insn = emit_jump_insn_before (c_insn, insn);
2811
2812 ++ LABEL_NUSES (dst);
2813
2814 JUMP_LABEL (c_insn) = dst;
2815 }
2816 else
2817 c_insn = emit_insn_after (c_insn, insn);
2818
2819 delete_insn (insn);
2820
2821 return c_insn;
2822}
2823
2824/* Attempt to change a basic block into a series of conditional insns. This
2825 works by taking the branch at the end of the 1st block and scanning for the
2826 end of the 2nd block. If all instructions in the 2nd block have cond.
2827 versions and the label at the start of block 3 is the same as the target
2828 from the branch at block 1, then conditionalize all insn in block 2 using
2829 the inverse condition of the branch at block 1. (Note I'm bending the
2830 definition of basic block here.)
2831
2832 e.g., change:
2833
2834 bt L2 <-- end of block 1 (delete)
2835 mov r7,r8
2836 addu r7,1
2837 br L3 <-- end of block 2
2838
2839 L2: ... <-- start of block 3 (NUSES==1)
2840 L3: ...
2841
2842 to:
2843
2844 movf r7,r8
2845 incf r7
2846 bf L3
2847
2848 L3: ...
2849
2850 we can delete the L2 label if NUSES==1 and re-apply the optimization
2851 starting at the last instruction of block 2. This may allow an entire
4816b8e4 2852 if-then-else statement to be conditionalized. BRC */
8f90be4c
NC
2853static rtx
2854conditionalize_block (first)
2855 rtx first;
2856{
2857 rtx insn;
2858 rtx br_pat;
2859 rtx end_blk_1_br = 0;
2860 rtx end_blk_2_insn = 0;
2861 rtx start_blk_3_lab = 0;
2862 int cond;
2863 int br_lab_num;
2864 int blk_size = 0;
2865
2866
2867 /* Check that the first insn is a candidate conditional jump. This is
2868 the one that we'll eliminate. If not, advance to the next insn to
2869 try. */
2870 if (GET_CODE (first) != JUMP_INSN ||
2871 GET_CODE (PATTERN (first)) != SET ||
2872 GET_CODE (XEXP (PATTERN (first), 1)) != IF_THEN_ELSE)
2873 return NEXT_INSN (first);
2874
2875 /* Extract some information we need. */
2876 end_blk_1_br = first;
2877 br_pat = PATTERN (end_blk_1_br);
2878
2879 /* Complement the condition since we use the reverse cond. for the insns. */
2880 cond = (GET_CODE (XEXP (XEXP (br_pat, 1), 0)) == EQ);
2881
2882 /* Determine what kind of branch we have. */
2883 if (GET_CODE (XEXP (XEXP (br_pat, 1), 1)) == LABEL_REF)
2884 {
2885 /* A normal branch, so extract label out of first arm. */
2886 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 1), 0));
2887 }
2888 else
2889 {
2890 /* An inverse branch, so extract the label out of the 2nd arm
2891 and complement the condition. */
2892 cond = (cond == 0);
2893 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 2), 0));
2894 }
2895
2896 /* Scan forward for the start of block 2: it must start with a
2897 label and that label must be the same as the branch target
2898 label from block 1. We don't care about whether block 2 actually
2899 ends with a branch or a label (an uncond. branch is
2900 conditionalizable). */
2901 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
2902 {
2903 enum rtx_code code;
2904
2905 code = GET_CODE (insn);
2906
2907 /* Look for the label at the start of block 3. */
2908 if (code == CODE_LABEL && CODE_LABEL_NUMBER (insn) == br_lab_num)
2909 break;
2910
2911 /* Skip barriers, notes, and conditionalizable insns. If the
2912 insn is not conditionalizable or makes this optimization fail,
2913 just return the next insn so we can start over from that point. */
2914 if (code != BARRIER && code != NOTE && !is_cond_candidate (insn))
2915 return NEXT_INSN (insn);
2916
2917 /* Remember the last real insn before the label (ie end of block 2). */
2918 if (code == JUMP_INSN || code == INSN)
2919 {
2920 blk_size ++;
2921 end_blk_2_insn = insn;
2922 }
2923 }
2924
2925 if (!insn)
2926 return insn;
2927
2928 /* It is possible for this optimization to slow performance if the blocks
2929 are long. This really depends upon whether the branch is likely taken
2930 or not. If the branch is taken, we slow performance in many cases. But,
2931 if the branch is not taken, we always help performance (for a single
2932 block, but for a double block (i.e. when the optimization is re-applied)
2933 this is not true since the 'right thing' depends on the overall length of
2934 the collapsed block). As a compromise, don't apply this optimization on
2935 blocks larger than size 2 (unlikely for the mcore) when speed is important.
2936 the best threshold depends on the latencies of the instructions (i.e.,
2937 the branch penalty). */
2938 if (optimize > 1 && blk_size > 2)
2939 return insn;
2940
2941 /* At this point, we've found the start of block 3 and we know that
2942 it is the destination of the branch from block 1. Also, all
2943 instructions in the block 2 are conditionalizable. So, apply the
2944 conditionalization and delete the branch. */
2945 start_blk_3_lab = insn;
2946
2947 for (insn = NEXT_INSN (end_blk_1_br); insn != start_blk_3_lab;
2948 insn = NEXT_INSN (insn))
2949 {
2950 rtx newinsn;
2951
2952 if (INSN_DELETED_P (insn))
2953 continue;
2954
2955 /* Try to form a conditional variant of the instruction and emit it. */
2956 if ((newinsn = emit_new_cond_insn (insn, cond)))
2957 {
2958 if (end_blk_2_insn == insn)
2959 end_blk_2_insn = newinsn;
2960
2961 insn = newinsn;
2962 }
2963 }
2964
2965 /* Note whether we will delete the label starting blk 3 when the jump
2966 gets deleted. If so, we want to re-apply this optimization at the
2967 last real instruction right before the label. */
2968 if (LABEL_NUSES (start_blk_3_lab) == 1)
2969 {
2970 start_blk_3_lab = 0;
2971 }
2972
2973 /* ??? we probably should redistribute the death notes for this insn, esp.
2974 the death of cc, but it doesn't really matter this late in the game.
2975 The peepholes all use is_dead() which will find the correct death
2976 regardless of whether there is a note. */
2977 delete_insn (end_blk_1_br);
2978
2979 if (! start_blk_3_lab)
2980 return end_blk_2_insn;
2981
4816b8e4 2982 /* Return the insn right after the label at the start of block 3. */
8f90be4c
NC
2983 return NEXT_INSN (start_blk_3_lab);
2984}
2985
2986/* Apply the conditionalization of blocks optimization. This is the
2987 outer loop that traverses through the insns scanning for a branch
2988 that signifies an opportunity to apply the optimization. Note that
2989 this optimization is applied late. If we could apply it earlier,
2990 say before cse 2, it may expose more optimization opportunities.
2991 but, the pay back probably isn't really worth the effort (we'd have
2992 to update all reg/flow/notes/links/etc to make it work - and stick it
4816b8e4
NC
2993 in before cse 2). */
2994
8f90be4c
NC
2995static void
2996conditionalize_optimization (first)
2997 rtx first;
2998{
2999 rtx insn;
3000
3001 for (insn = first; insn; insn = conditionalize_block (insn))
3002 continue;
3003}
3004
3005static int saved_warn_return_type = -1;
3006static int saved_warn_return_type_count = 0;
3007
4816b8e4
NC
3008/* This function is called from toplev.c before reorg. */
3009
8f90be4c
NC
3010void
3011mcore_dependent_reorg (first)
3012 rtx first;
3013{
3014 /* Reset this variable. */
3015 current_function_anonymous_args = 0;
3016
4816b8e4 3017 /* Restore the warn_return_type if it has been altered. */
8f90be4c
NC
3018 if (saved_warn_return_type != -1)
3019 {
3020 /* Only restore the value if we have reached another function.
3021 The test of warn_return_type occurs in final_function () in
3022 c-decl.c a long time after the code for the function is generated,
3023 so we need a counter to tell us when we have finished parsing that
3024 function and can restore the flag. */
3025 if (--saved_warn_return_type_count == 0)
3026 {
3027 warn_return_type = saved_warn_return_type;
3028 saved_warn_return_type = -1;
3029 }
3030 }
3031
3032 if (optimize == 0)
3033 return;
3034
3035 /* Conditionalize blocks where we can. */
3036 conditionalize_optimization (first);
3037
3038 /* Literal pool generation is now pushed off until the assembler. */
3039}
3040
3041\f
3042/* Return the reg_class to use when reloading the rtx X into the class
3043 CLASS. */
3044
3045/* If the input is (PLUS REG CONSTANT) representing a stack slot address,
3046 then we want to restrict the class to LRW_REGS since that ensures that
3047 will be able to safely load the constant.
3048
3049 If the input is a constant that should be loaded with mvir1, then use
3050 ONLYR1_REGS.
3051
3052 ??? We don't handle the case where we have (PLUS REG CONSTANT) and
3053 the constant should be loaded with mvir1, because that can lead to cases
3054 where an instruction needs two ONLYR1_REGS reloads. */
3055enum reg_class
3056mcore_reload_class (x, class)
3057 rtx x;
3058 enum reg_class class;
3059{
3060 enum reg_class new_class;
3061
3062 if (class == GENERAL_REGS && CONSTANT_P (x)
3063 && (GET_CODE (x) != CONST_INT
3064 || ( ! CONST_OK_FOR_I (INTVAL (x))
3065 && ! CONST_OK_FOR_M (INTVAL (x))
3066 && ! CONST_OK_FOR_N (INTVAL (x)))))
3067 new_class = LRW_REGS;
3068 else
3069 new_class = class;
3070
3071 return new_class;
3072}
3073
3074/* Tell me if a pair of reg/subreg rtx's actually refer to the same
3075 register. Note that the current version doesn't worry about whether
3076 they are the same mode or note (e.g., a QImode in r2 matches an HImode
3077 in r2 matches an SImode in r2. Might think in the future about whether
3078 we want to be able to say something about modes. */
3079int
3080mcore_is_same_reg (x, y)
3081 rtx x;
3082 rtx y;
3083{
3084 /* Strip any and all of the subreg wrappers. */
3085 while (GET_CODE (x) == SUBREG)
3086 x = SUBREG_REG (x);
3087
3088 while (GET_CODE (y) == SUBREG)
3089 y = SUBREG_REG (y);
3090
3091 if (GET_CODE(x) == REG && GET_CODE(y) == REG && REGNO(x) == REGNO(y))
3092 return 1;
3093
3094 return 0;
3095}
3096
3097/* Called to register all of our global variables with the garbage
3098 collector. */
3099static void
3100mcore_add_gc_roots ()
3101{
3102 ggc_add_rtx_root (&arch_compare_op0, 1);
3103 ggc_add_rtx_root (&arch_compare_op1, 1);
3104}
3105
3106void
3107mcore_override_options ()
3108{
3109 if (mcore_stack_increment_string)
3110 {
3111 mcore_stack_increment = atoi (mcore_stack_increment_string);
3112
3113 if (mcore_stack_increment < 0
3114 || (mcore_stack_increment == 0
3115 && (mcore_stack_increment_string[0] != '0'
3116 || mcore_stack_increment_string[1] != 0)))
3117 error ("Invalid option `-mstack-increment=%s'",
3118 mcore_stack_increment_string);
3119 }
3120
3121 /* Only the m340 supports little endian code. */
3122 if (TARGET_LITTLE_END && ! TARGET_M340)
3123 target_flags |= M340_BIT;
3124
3125 mcore_add_gc_roots ();
3126}
3127\f
3128int
3129mcore_must_pass_on_stack (mode, type)
3130 enum machine_mode mode ATTRIBUTE_UNUSED;
3131 tree type;
3132{
3133 if (type == NULL)
3134 return 0;
3135
3136 /* If the argugment can have its address taken, it must
3137 be placed on the stack. */
3138 if (TREE_ADDRESSABLE (type))
3139 return 1;
3140
3141 return 0;
3142}
3143
3144/* Compute the number of word sized registers needed to
3145 hold a function argument of mode MODE and type TYPE. */
3146int
3147mcore_num_arg_regs (mode, type)
3148 enum machine_mode mode;
3149 tree type;
3150{
3151 int size;
3152
3153 if (MUST_PASS_IN_STACK (mode, type))
3154 return 0;
3155
3156 if (type && mode == BLKmode)
3157 size = int_size_in_bytes (type);
3158 else
3159 size = GET_MODE_SIZE (mode);
3160
3161 return ROUND_ADVANCE (size);
3162}
3163
3164static rtx
3165handle_structs_in_regs (mode, type, reg)
3166 enum machine_mode mode;
3167 tree type;
3168 int reg;
3169{
3170 int size;
3171
3172 /* The MCore ABI defines that a structure whoes size is not a whole multiple
3173 of bytes is passed packed into registers (or spilled onto the stack if
3174 not enough registers are available) with the last few bytes of the
3175 structure being packed, left-justified, into the last register/stack slot.
3176 GCC handles this correctly if the last word is in a stack slot, but we
3177 have to generate a special, PARALLEL RTX if the last word is in an
3178 argument register. */
3179 if (type
3180 && TYPE_MODE (type) == BLKmode
3181 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
3182 && (size = int_size_in_bytes (type)) > UNITS_PER_WORD
3183 && (size % UNITS_PER_WORD != 0)
3184 && (reg + mcore_num_arg_regs (mode, type) <= (FIRST_PARM_REG + NPARM_REGS)))
3185 {
3186 rtx arg_regs [NPARM_REGS];
3187 int nregs;
3188 rtx result;
3189 rtvec rtvec;
3190
3191 for (nregs = 0; size > 0; size -= UNITS_PER_WORD)
3192 {
3193 arg_regs [nregs] =
3194 gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, reg ++),
3195 GEN_INT (nregs * UNITS_PER_WORD));
3196 nregs ++;
3197 }
3198
3199 /* We assume here that NPARM_REGS == 6. The assert checks this. */
b6a1cbae 3200 assert (ARRAY_SIZE (arg_regs) == 6);
8f90be4c
NC
3201 rtvec = gen_rtvec (nregs, arg_regs[0], arg_regs[1], arg_regs[2],
3202 arg_regs[3], arg_regs[4], arg_regs[5]);
3203
3204 result = gen_rtx_PARALLEL (mode, rtvec);
3205 return result;
3206 }
3207
3208 return gen_rtx_REG (mode, reg);
3209}
3210
3211rtx
3212mcore_function_value (valtype, func)
3213 tree valtype;
3214 tree func ATTRIBUTE_UNUSED;
3215{
3216 enum machine_mode mode;
3217 int unsigned_p;
3218
3219 mode = TYPE_MODE (valtype);
3220
3221 PROMOTE_MODE (mode, unsigned_p, NULL);
3222
3223 return handle_structs_in_regs (mode, valtype, FIRST_RET_REG);
3224}
3225
3226/* Define where to put the arguments to a function.
3227 Value is zero to push the argument on the stack,
3228 or a hard register in which to store the argument.
3229
3230 MODE is the argument's machine mode.
3231 TYPE is the data type of the argument (as a tree).
3232 This is null for libcalls where that information may
3233 not be available.
3234 CUM is a variable of type CUMULATIVE_ARGS which gives info about
3235 the preceding args and about the function being called.
3236 NAMED is nonzero if this argument is a named parameter
3237 (otherwise it is an extra parameter matching an ellipsis).
3238
3239 On MCore the first args are normally in registers
3240 and the rest are pushed. Any arg that starts within the first
3241 NPARM_REGS words is at least partially passed in a register unless
3242 its data type forbids. */
3243rtx
3244mcore_function_arg (cum, mode, type, named)
3245 CUMULATIVE_ARGS cum;
3246 enum machine_mode mode;
3247 tree type;
3248 int named;
3249{
3250 int arg_reg;
3251
3252 if (! named)
3253 return 0;
3254
3255 if (MUST_PASS_IN_STACK (mode, type))
3256 return 0;
3257
3258 arg_reg = ROUND_REG (cum, mode);
3259
3260 if (arg_reg < NPARM_REGS)
3261 return handle_structs_in_regs (mode, type, FIRST_PARM_REG + arg_reg);
3262
3263 return 0;
3264}
3265
3266/* Implements the FUNCTION_ARG_PARTIAL_NREGS macro.
3267 Returns the number of argument registers required to hold *part* of
3268 a parameter of machine mode MODE and type TYPE (which may be NULL if
3269 the type is not known). If the argument fits entirly in the argument
3270 registers, or entirely on the stack, then 0 is returned. CUM is the
3271 number of argument registers already used by earlier parameters to
3272 the function. */
3273int
3274mcore_function_arg_partial_nregs (cum, mode, type, named)
3275 CUMULATIVE_ARGS cum;
3276 enum machine_mode mode;
3277 tree type;
3278 int named;
3279{
3280 int reg = ROUND_REG (cum, mode);
3281
3282 if (named == 0)
3283 return 0;
3284
3285 if (MUST_PASS_IN_STACK (mode, type))
3286 return 0;
3287
3288 /* REG is not the *hardware* register number of the register that holds
3289 the argument, it is the *argument* register number. So for example,
3290 the first argument to a function goes in argument register 0, which
3291 translates (for the MCore) into hardware register 2. The second
3292 argument goes into argument register 1, which translates into hardware
3293 register 3, and so on. NPARM_REGS is the number of argument registers
3294 supported by the target, not the maximum hardware register number of
3295 the target. */
3296 if (reg >= NPARM_REGS)
3297 return 0;
3298
3299 /* If the argument fits entirely in registers, return 0. */
3300 if (reg + mcore_num_arg_regs (mode, type) <= NPARM_REGS)
3301 return 0;
3302
3303 /* The argument overflows the number of available argument registers.
3304 Compute how many argument registers have not yet been assigned to
3305 hold an argument. */
3306 reg = NPARM_REGS - reg;
3307
3308 /* Return partially in registers and partially on the stack. */
3309 return reg;
3310}
3311\f
3312/* Return non-zero if SYMBOL is marked as being dllexport'd. */
3313int
3314mcore_dllexport_name_p (symbol)
cbd3488b 3315 const char * symbol;
8f90be4c
NC
3316{
3317 return symbol[0] == '@' && symbol[1] == 'e' && symbol[2] == '.';
3318}
3319
3320/* Return non-zero if SYMBOL is marked as being dllimport'd. */
3321int
3322mcore_dllimport_name_p (symbol)
cbd3488b 3323 const char * symbol;
8f90be4c
NC
3324{
3325 return symbol[0] == '@' && symbol[1] == 'i' && symbol[2] == '.';
3326}
3327
3328/* Mark a DECL as being dllexport'd. */
3329static void
3330mcore_mark_dllexport (decl)
3331 tree decl;
3332{
cbd3488b 3333 const char * oldname;
8f90be4c
NC
3334 char * newname;
3335 rtx rtlname;
3336 tree idp;
3337
3338 rtlname = XEXP (DECL_RTL (decl), 0);
3339
3340 if (GET_CODE (rtlname) == SYMBOL_REF)
3341 oldname = XSTR (rtlname, 0);
3342 else if ( GET_CODE (rtlname) == MEM
3343 && GET_CODE (XEXP (rtlname, 0)) == SYMBOL_REF)
3344 oldname = XSTR (XEXP (rtlname, 0), 0);
3345 else
3346 abort ();
3347
3348 if (mcore_dllexport_name_p (oldname))
3349 return; /* Already done. */
3350
3351 newname = alloca (strlen (oldname) + 4);
3352 sprintf (newname, "@e.%s", oldname);
3353
3354 /* We pass newname through get_identifier to ensure it has a unique
3355 address. RTL processing can sometimes peek inside the symbol ref
3356 and compare the string's addresses to see if two symbols are
3357 identical. */
3358 /* ??? At least I think that's why we do this. */
3359 idp = get_identifier (newname);
3360
3361 XEXP (DECL_RTL (decl), 0) =
3362 gen_rtx (SYMBOL_REF, Pmode, IDENTIFIER_POINTER (idp));
3363}
3364
3365/* Mark a DECL as being dllimport'd. */
3366static void
3367mcore_mark_dllimport (decl)
3368 tree decl;
3369{
cbd3488b 3370 const char * oldname;
8f90be4c
NC
3371 char * newname;
3372 tree idp;
3373 rtx rtlname;
3374 rtx newrtl;
3375
3376 rtlname = XEXP (DECL_RTL (decl), 0);
3377
3378 if (GET_CODE (rtlname) == SYMBOL_REF)
3379 oldname = XSTR (rtlname, 0);
3380 else if ( GET_CODE (rtlname) == MEM
3381 && GET_CODE (XEXP (rtlname, 0)) == SYMBOL_REF)
3382 oldname = XSTR (XEXP (rtlname, 0), 0);
3383 else
3384 abort ();
3385
3386 if (mcore_dllexport_name_p (oldname))
3387 abort (); /* This shouldn't happen. */
3388 else if (mcore_dllimport_name_p (oldname))
3389 return; /* Already done. */
3390
3391 /* ??? One can well ask why we're making these checks here,
3392 and that would be a good question. */
3393
3394 /* Imported variables can't be initialized. */
3395 if (TREE_CODE (decl) == VAR_DECL
3396 && !DECL_VIRTUAL_P (decl)
3397 && DECL_INITIAL (decl))
3398 {
3399 error_with_decl (decl, "initialized variable `%s' is marked dllimport");
3400 return;
3401 }
3402
3403 /* `extern' needn't be specified with dllimport.
3404 Specify `extern' now and hope for the best. Sigh. */
3405 if (TREE_CODE (decl) == VAR_DECL
3406 /* ??? Is this test for vtables needed? */
3407 && !DECL_VIRTUAL_P (decl))
3408 {
3409 DECL_EXTERNAL (decl) = 1;
3410 TREE_PUBLIC (decl) = 1;
3411 }
3412
3413 newname = alloca (strlen (oldname) + 11);
3414 sprintf (newname, "@i.__imp_%s", oldname);
3415
3416 /* We pass newname through get_identifier to ensure it has a unique
3417 address. RTL processing can sometimes peek inside the symbol ref
3418 and compare the string's addresses to see if two symbols are
3419 identical. */
3420 /* ??? At least I think that's why we do this. */
3421 idp = get_identifier (newname);
3422
3423 newrtl = gen_rtx (MEM, Pmode,
3424 gen_rtx (SYMBOL_REF, Pmode,
3425 IDENTIFIER_POINTER (idp)));
3426 XEXP (DECL_RTL (decl), 0) = newrtl;
3427}
3428
3429static int
3430mcore_dllexport_p (decl)
3431 tree decl;
3432{
3433 if ( TREE_CODE (decl) != VAR_DECL
3434 && TREE_CODE (decl) != FUNCTION_DECL)
3435 return 0;
3436
3437 return lookup_attribute ("dllexport", DECL_MACHINE_ATTRIBUTES (decl)) != 0;
3438}
3439
3440static int
3441mcore_dllimport_p (decl)
3442 tree decl;
3443{
3444 if ( TREE_CODE (decl) != VAR_DECL
3445 && TREE_CODE (decl) != FUNCTION_DECL)
3446 return 0;
3447
3448 return lookup_attribute ("dllimport", DECL_MACHINE_ATTRIBUTES (decl)) != 0;
3449}
3450
3451/* Cover function to implement ENCODE_SECTION_INFO. */
3452void
3453mcore_encode_section_info (decl)
3454 tree decl;
3455{
3456 /* This bit is copied from arm.h. */
3457 if (optimize > 0
3458 && TREE_CONSTANT (decl)
3459 && (!flag_writable_strings || TREE_CODE (decl) != STRING_CST))
3460 {
3461 rtx rtl = (TREE_CODE_CLASS (TREE_CODE (decl)) != 'd'
3462 ? TREE_CST_RTL (decl) : DECL_RTL (decl));
3463 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
3464 }
3465
3466 /* Mark the decl so we can tell from the rtl whether the object is
3467 dllexport'd or dllimport'd. */
3468 if (mcore_dllexport_p (decl))
3469 mcore_mark_dllexport (decl);
3470 else if (mcore_dllimport_p (decl))
3471 mcore_mark_dllimport (decl);
3472
3473 /* It might be that DECL has already been marked as dllimport, but
3474 a subsequent definition nullified that. The attribute is gone
3475 but DECL_RTL still has @i.__imp_foo. We need to remove that. */
3476 else if ((TREE_CODE (decl) == FUNCTION_DECL
3477 || TREE_CODE (decl) == VAR_DECL)
3478 && DECL_RTL (decl) != NULL_RTX
3479 && GET_CODE (DECL_RTL (decl)) == MEM
3480 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == MEM
3481 && GET_CODE (XEXP (XEXP (DECL_RTL (decl), 0), 0)) == SYMBOL_REF
3482 && mcore_dllimport_name_p (XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0)))
3483 {
3cce094d 3484 const char * oldname = XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0);
8f90be4c
NC
3485 tree idp = get_identifier (oldname + 9);
3486 rtx newrtl = gen_rtx (SYMBOL_REF, Pmode, IDENTIFIER_POINTER (idp));
3487
3488 XEXP (DECL_RTL (decl), 0) = newrtl;
3489
3490 /* We previously set TREE_PUBLIC and DECL_EXTERNAL.
3491 ??? We leave these alone for now. */
3492 }
3493}
3494
3495/* MCore specific attribute support.
3496 dllexport - for exporting a function/variable that will live in a dll
3497 dllimport - for importing a function/variable from a dll
3498 naked - do not create a function prologue/epilogue. */
3499int
3500mcore_valid_machine_decl_attribute (decl, attributes, attr, args)
3501 tree decl;
3502 tree attributes ATTRIBUTE_UNUSED;
3503 tree attr;
3504 tree args;
3505{
3506 if (args != NULL_TREE)
3507 return 0;
3508
3509 if (is_attribute_p ("dllexport", attr))
3510 return 1;
3511
3512 if (is_attribute_p ("dllimport", attr))
3513 return 1;
3514
3515 if (is_attribute_p ("naked", attr) &&
3516 TREE_CODE (decl) == FUNCTION_DECL)
3517 {
3518 /* PR14310 - don't complain about lack of return statement
3519 in naked functions. The solution here is a gross hack
3520 but this is the only way to solve the problem without
3521 adding a new feature to GCC. I did try submitting a patch
3522 that would add such a new feature, but it was (rightfully)
3523 rejected on the grounds that it was creeping featurism,
3524 so hence this code. */
3525 if (warn_return_type)
3526 {
3527 saved_warn_return_type = warn_return_type;
3528 warn_return_type = 0;
3529 saved_warn_return_type_count = 2;
3530 }
3531 else if (saved_warn_return_type_count)
3532 saved_warn_return_type_count = 2;
3533
3534 return 1;
3535 }
3536
3537 return 0;
3538}
3539
3540/* Merge attributes in decls OLD and NEW.
3541 This handles the following situation:
3542
3543 __declspec (dllimport) int foo;
3544 int foo;
3545
3546 The second instance of `foo' nullifies the dllimport. */
3547tree
3548mcore_merge_machine_decl_attributes (old, new)
3549 tree old;
3550 tree new;
3551{
3552 tree a;
3553 int delete_dllimport_p;
3554
3555 old = DECL_MACHINE_ATTRIBUTES (old);
3556 new = DECL_MACHINE_ATTRIBUTES (new);
3557
3558 /* What we need to do here is remove from `old' dllimport if it doesn't
3559 appear in `new'. dllimport behaves like extern: if a declaration is
3560 marked dllimport and a definition appears later, then the object
3561 is not dllimport'd. */
3562 if ( lookup_attribute ("dllimport", old) != NULL_TREE
3563 && lookup_attribute ("dllimport", new) == NULL_TREE)
3564 delete_dllimport_p = 1;
3565 else
3566 delete_dllimport_p = 0;
3567
3568 a = merge_attributes (old, new);
3569
3570 if (delete_dllimport_p)
3571 {
3572 tree prev,t;
3573
3574 /* Scan the list for dllimport and delete it. */
3575 for (prev = NULL_TREE, t = a; t; prev = t, t = TREE_CHAIN (t))
3576 {
3577 if (is_attribute_p ("dllimport", TREE_PURPOSE (t)))
3578 {
3579 if (prev == NULL_TREE)
3580 a = TREE_CHAIN (a);
3581 else
3582 TREE_CHAIN (prev) = TREE_CHAIN (t);
3583 break;
3584 }
3585 }
3586 }
3587
3588 return a;
3589}
3590
3591/* Cover function for UNIQUE_SECTION. */
3592
3593void
3594mcore_unique_section (decl, reloc)
3595 tree decl;
3596 int reloc ATTRIBUTE_UNUSED;
3597{
3598 int len;
3599 char * name;
3600 char * string;
f27cd94d 3601 const char * prefix;
8f90be4c
NC
3602
3603 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
3604
3605 /* Strip off any encoding in name. */
3606 STRIP_NAME_ENCODING (name, name);
3607
3608 /* The object is put in, for example, section .text$foo.
3609 The linker will then ultimately place them in .text
3610 (everything from the $ on is stripped). */
3611 if (TREE_CODE (decl) == FUNCTION_DECL)
3612 prefix = ".text$";
3613 /* For compatability with EPOC, we ignore the fact that the
3614 section might have relocs against it. */
3615 else if (DECL_READONLY_SECTION (decl, 0))
3616 prefix = ".rdata$";
3617 else
3618 prefix = ".data$";
3619
3620 len = strlen (name) + strlen (prefix);
3621 string = alloca (len + 1);
3622
3623 sprintf (string, "%s%s", prefix, name);
3624
3625 DECL_SECTION_NAME (decl) = build_string (len, string);
3626}
3627
3628int
3629mcore_naked_function_p ()
3630{
3631 return lookup_attribute ("naked", DECL_MACHINE_ATTRIBUTES (current_function_decl)) != NULL_TREE;
3632}