]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/mcore/mcore.c
rtl.c (copy_rtx): Handle 'T' format letter.
[thirdparty/gcc.git] / gcc / config / mcore / mcore.c
CommitLineData
8f90be4c 1/* Output routines for Motorola MCore processor
d6edb99e 2 Copyright (C) 1993, 1999, 2000, 2001 Free Software Foundation, Inc.
8f90be4c
NC
3
4This file is part of GNU CC.
5
6GNU CC is free software; you can redistribute it and/or modify
7it under the terms of the GNU General Public License as published by
8the Free Software Foundation; either version 2, or (at your option)
9any later version.
10
11GNU CC is distributed in the hope that it will be useful,
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14GNU General Public License for more details.
15
16You should have received a copy of the GNU General Public License
17along with GNU CC; see the file COPYING. If not, write to
18the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
19
bc27e96c 20#include "config.h"
4bd048ef 21#include "system.h"
4816b8e4
NC
22#include "rtl.h"
23#include "tree.h"
24#include "tm_p.h"
8f90be4c 25#include "assert.h"
8f90be4c 26#include "mcore.h"
8f90be4c
NC
27#include "regs.h"
28#include "hard-reg-set.h"
29#include "real.h"
30#include "insn-config.h"
31#include "conditions.h"
8f90be4c
NC
32#include "output.h"
33#include "insn-attr.h"
34#include "flags.h"
35#include "obstack.h"
36#include "expr.h"
37#include "reload.h"
38#include "recog.h"
39#include "function.h"
40#include "ggc.h"
41#include "toplev.h"
672a6f42
NB
42#include "target.h"
43#include "target-def.h"
8f90be4c 44
8f90be4c
NC
45/* Maximum size we are allowed to grow the stack in a single operation.
46 If we want more, we must do it in increments of at most this size.
47 If this value is 0, we don't check at all. */
48const char * mcore_stack_increment_string = 0;
49int mcore_stack_increment = STACK_UNITS_MAXSTEP;
50
51/* For dumping information about frame sizes. */
52char * mcore_current_function_name = 0;
53long mcore_current_compilation_timestamp = 0;
54
55/* Global variables for machine-dependent things. */
56
57/* Saved operands from the last compare to use when we generate an scc
58 or bcc insn. */
59rtx arch_compare_op0;
60rtx arch_compare_op1;
61
62/* Provides the class number of the smallest class containing
63 reg number. */
64int regno_reg_class[FIRST_PSEUDO_REGISTER] =
65{
66 GENERAL_REGS, ONLYR1_REGS, LRW_REGS, LRW_REGS,
67 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
68 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
69 LRW_REGS, LRW_REGS, LRW_REGS, GENERAL_REGS,
70 GENERAL_REGS, C_REGS, NO_REGS, NO_REGS,
71};
72
73/* Provide reg_class from a letter such as appears in the machine
74 description. */
75enum reg_class reg_class_from_letter[] =
76{
77 /* a */ LRW_REGS, /* b */ ONLYR1_REGS, /* c */ C_REGS, /* d */ NO_REGS,
78 /* e */ NO_REGS, /* f */ NO_REGS, /* g */ NO_REGS, /* h */ NO_REGS,
79 /* i */ NO_REGS, /* j */ NO_REGS, /* k */ NO_REGS, /* l */ NO_REGS,
80 /* m */ NO_REGS, /* n */ NO_REGS, /* o */ NO_REGS, /* p */ NO_REGS,
81 /* q */ NO_REGS, /* r */ GENERAL_REGS, /* s */ NO_REGS, /* t */ NO_REGS,
82 /* u */ NO_REGS, /* v */ NO_REGS, /* w */ NO_REGS, /* x */ ALL_REGS,
83 /* y */ NO_REGS, /* z */ NO_REGS
84};
85
f27cd94d
NC
86struct mcore_frame
87{
88 int arg_size; /* stdarg spills (bytes) */
89 int reg_size; /* non-volatile reg saves (bytes) */
90 int reg_mask; /* non-volatile reg saves */
91 int local_size; /* locals */
92 int outbound_size; /* arg overflow on calls out */
93 int pad_outbound;
94 int pad_local;
95 int pad_reg;
96 /* Describe the steps we'll use to grow it. */
97#define MAX_STACK_GROWS 4 /* gives us some spare space */
98 int growth[MAX_STACK_GROWS];
99 int arg_offset;
100 int reg_offset;
101 int reg_growth;
102 int local_growth;
103};
104
105typedef enum
106{
107 COND_NO,
108 COND_MOV_INSN,
109 COND_CLR_INSN,
110 COND_INC_INSN,
111 COND_DEC_INSN,
112 COND_BRANCH_INSN
113}
114cond_type;
115
116static void output_stack_adjust PARAMS ((int, int));
117static int calc_live_regs PARAMS ((int *));
118static int const_ok_for_mcore PARAMS ((int));
119static int try_constant_tricks PARAMS ((long, int *, int *));
120static const char * output_inline_const PARAMS ((enum machine_mode, rtx *));
121static void block_move_sequence PARAMS ((rtx, rtx, rtx, rtx, int, int, int));
122static void layout_mcore_frame PARAMS ((struct mcore_frame *));
123static cond_type is_cond_candidate PARAMS ((rtx));
124static rtx emit_new_cond_insn PARAMS ((rtx, int));
125static rtx conditionalize_block PARAMS ((rtx));
126static void conditionalize_optimization PARAMS ((rtx));
127static void mcore_add_gc_roots PARAMS ((void));
128static rtx handle_structs_in_regs PARAMS ((enum machine_mode, tree, int));
129static void mcore_mark_dllexport PARAMS ((tree));
130static void mcore_mark_dllimport PARAMS ((tree));
131static int mcore_dllexport_p PARAMS ((tree));
132static int mcore_dllimport_p PARAMS ((tree));
672a6f42
NB
133static int mcore_valid_decl_attribute PARAMS ((tree, tree,
134 tree, tree));
135\f
136/* Initialize the GCC target structure. */
137#ifdef TARGET_DLLIMPORT_DECL_ATTRIBUTES
138#undef TARGET_MERGE_DECL_ATTRIBUTES
139#define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
140#endif
141
142#undef TARGET_VALID_DECL_ATTRIBUTE
143#define TARGET_VALID_DECL_ATTRIBUTE mcore_valid_decl_attribute
144
145struct gcc_target target = TARGET_INITIALIZER;
f27cd94d 146\f
8f90be4c
NC
147/* Adjust the stack and return the number of bytes taken to do it. */
148static void
149output_stack_adjust (direction, size)
150 int direction;
151 int size;
152{
4816b8e4 153 /* If extending stack a lot, we do it incrementally. */
8f90be4c
NC
154 if (direction < 0 && size > mcore_stack_increment && mcore_stack_increment > 0)
155 {
156 rtx tmp = gen_rtx (REG, SImode, 1);
157 rtx memref;
158 emit_insn (gen_movsi (tmp, GEN_INT (mcore_stack_increment)));
159 do
160 {
161 emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
162 memref = gen_rtx (MEM, SImode, stack_pointer_rtx);
163 MEM_VOLATILE_P (memref) = 1;
164 emit_insn (gen_movsi (memref, stack_pointer_rtx));
165 size -= mcore_stack_increment;
166 }
167 while (size > mcore_stack_increment);
168
4816b8e4
NC
169 /* SIZE is now the residual for the last adjustment,
170 which doesn't require a probe. */
8f90be4c
NC
171 }
172
173 if (size)
174 {
175 rtx insn;
176 rtx val = GEN_INT (size);
177
178 if (size > 32)
179 {
180 rtx nval = gen_rtx (REG, SImode, 1);
181 emit_insn (gen_movsi (nval, val));
182 val = nval;
183 }
184
185 if (direction > 0)
186 insn = gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
187 else
188 insn = gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
189
190 emit_insn (insn);
191 }
192}
193
4816b8e4
NC
194/* Work out the registers which need to be saved,
195 both as a mask and a count. */
196
8f90be4c
NC
197static int
198calc_live_regs (count)
199 int * count;
200{
201 int reg;
202 int live_regs_mask = 0;
203
204 * count = 0;
205
206 for (reg = 0; reg < FIRST_PSEUDO_REGISTER; reg++)
207 {
208 if (regs_ever_live[reg] && !call_used_regs[reg])
209 {
210 (*count)++;
211 live_regs_mask |= (1 << reg);
212 }
213 }
214
215 return live_regs_mask;
216}
217
218/* Print the operand address in x to the stream. */
4816b8e4 219
8f90be4c
NC
220void
221mcore_print_operand_address (stream, x)
222 FILE * stream;
223 rtx x;
224{
225 switch (GET_CODE (x))
226 {
227 case REG:
228 fprintf (stream, "(%s)", reg_names[REGNO (x)]);
229 break;
230
231 case PLUS:
232 {
233 rtx base = XEXP (x, 0);
234 rtx index = XEXP (x, 1);
235
236 if (GET_CODE (base) != REG)
237 {
238 /* Ensure that BASE is a register (one of them must be). */
239 rtx temp = base;
240 base = index;
241 index = temp;
242 }
243
244 switch (GET_CODE (index))
245 {
246 case CONST_INT:
247 fprintf (stream, "(%s,%d)", reg_names[REGNO(base)],
248 INTVAL (index));
249 break;
250
251 default:
252 debug_rtx (x);
253
254 abort ();
255 }
256 }
257
258 break;
259
260 default:
261 output_addr_const (stream, x);
262 break;
263 }
264}
265
266/* Print operand x (an rtx) in assembler syntax to file stream
267 according to modifier code.
268
269 'R' print the next register or memory location along, ie the lsw in
270 a double word value
271 'O' print a constant without the #
272 'M' print a constant as its negative
273 'P' print log2 of a power of two
274 'Q' print log2 of an inverse of a power of two
275 'U' print register for ldm/stm instruction
4816b8e4
NC
276 'X' print byte number for xtrbN instruction. */
277
8f90be4c
NC
278void
279mcore_print_operand (stream, x, code)
280 FILE * stream;
281 rtx x;
282 int code;
283{
284 switch (code)
285 {
286 case 'N':
287 if (INTVAL(x) == -1)
288 fprintf (asm_out_file, "32");
289 else
290 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) + 1));
291 break;
292 case 'P':
293 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x)));
294 break;
295 case 'Q':
296 fprintf (asm_out_file, "%d", exact_log2 (~INTVAL (x)));
297 break;
298 case 'O':
299 fprintf (asm_out_file, "%d", INTVAL (x));
300 break;
301 case 'M':
302 fprintf (asm_out_file, "%d", - INTVAL (x));
303 break;
304 case 'R':
305 /* Next location along in memory or register. */
306 switch (GET_CODE (x))
307 {
308 case REG:
309 fputs (reg_names[REGNO (x) + 1], (stream));
310 break;
311 case MEM:
312 mcore_print_operand_address (stream,
313 XEXP (adj_offsettable_operand (x, 4), 0));
314 break;
315 default:
316 abort ();
317 }
318 break;
319 case 'U':
320 fprintf (asm_out_file, "%s-%s", reg_names[REGNO (x)],
321 reg_names[REGNO (x) + 3]);
322 break;
323 case 'x':
324 fprintf (asm_out_file, "0x%x", INTVAL (x));
325 break;
326 case 'X':
327 fprintf (asm_out_file, "%d", 3 - INTVAL (x) / 8);
328 break;
329
330 default:
331 switch (GET_CODE (x))
332 {
333 case REG:
334 fputs (reg_names[REGNO (x)], (stream));
335 break;
336 case MEM:
337 output_address (XEXP (x, 0));
338 break;
339 default:
340 output_addr_const (stream, x);
341 break;
342 }
343 break;
344 }
345}
346
347/* What does a constant cost ? */
4816b8e4 348
8f90be4c
NC
349int
350mcore_const_costs (exp, code)
351 rtx exp;
352 enum rtx_code code;
353{
354
355 int val = INTVAL (exp);
356
357 /* Easy constants. */
358 if ( CONST_OK_FOR_I (val)
359 || CONST_OK_FOR_M (val)
360 || CONST_OK_FOR_N (val)
361 || (code == PLUS && CONST_OK_FOR_L (val)))
362 return 1;
363 else if (code == AND
364 && ( CONST_OK_FOR_M (~val)
365 || CONST_OK_FOR_N (~val)))
366 return 2;
367 else if (code == PLUS
368 && ( CONST_OK_FOR_I (-val)
369 || CONST_OK_FOR_M (-val)
370 || CONST_OK_FOR_N (-val)))
371 return 2;
372
373 return 5;
374}
375
376/* What does an and instruction cost - we do this b/c immediates may
377 have been relaxed. We want to ensure that cse will cse relaxed immeds
4816b8e4
NC
378 out. Otherwise we'll get bad code (multiple reloads of the same const). */
379
8f90be4c
NC
380int
381mcore_and_cost (x)
382 rtx x;
383{
384 int val;
385
386 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
387 return 2;
388
389 val = INTVAL (XEXP (x, 1));
390
4816b8e4 391 /* Do it directly. */
8f90be4c
NC
392 if (CONST_OK_FOR_K (val) || CONST_OK_FOR_M (~val))
393 return 2;
394 /* Takes one instruction to load. */
395 else if (const_ok_for_mcore (val))
396 return 3;
397 /* Takes two instructions to load. */
398 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
399 return 4;
400
4816b8e4 401 /* Takes a lrw to load. */
8f90be4c
NC
402 return 5;
403}
404
4816b8e4
NC
405/* What does an or cost - see and_cost(). */
406
8f90be4c
NC
407int
408mcore_ior_cost (x)
409 rtx x;
410{
411 int val;
412
413 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
414 return 2;
415
416 val = INTVAL (XEXP (x, 1));
417
4816b8e4 418 /* Do it directly with bclri. */
8f90be4c
NC
419 if (CONST_OK_FOR_M (val))
420 return 2;
4816b8e4 421 /* Takes one instruction to load. */
8f90be4c
NC
422 else if (const_ok_for_mcore (val))
423 return 3;
4816b8e4 424 /* Takes two instructions to load. */
8f90be4c
NC
425 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
426 return 4;
427
4816b8e4 428 /* Takes a lrw to load. */
8f90be4c
NC
429 return 5;
430}
431
432/* Check to see if a comparison against a constant can be made more efficient
433 by incrementing/decrementing the constant to get one that is more efficient
434 to load. */
4816b8e4 435
8f90be4c
NC
436int
437mcore_modify_comparison (code)
438 enum rtx_code code;
439{
440 rtx op1 = arch_compare_op1;
441
442 if (GET_CODE (op1) == CONST_INT)
443 {
444 int val = INTVAL (op1);
445
446 switch (code)
447 {
448 case LE:
449 if (CONST_OK_FOR_J (val + 1))
450 {
451 arch_compare_op1 = GEN_INT (val + 1);
452 return 1;
453 }
454 break;
455
456 default:
457 break;
458 }
459 }
460
461 return 0;
462}
463
464/* Prepare the operands for a comparison. */
4816b8e4 465
8f90be4c
NC
466rtx
467mcore_gen_compare_reg (code)
468 enum rtx_code code;
469{
470 rtx op0 = arch_compare_op0;
471 rtx op1 = arch_compare_op1;
472 rtx cc_reg = gen_rtx (REG, CCmode, CC_REG);
473
474 if (CONSTANT_P (op1) && GET_CODE (op1) != CONST_INT)
475 op1 = force_reg (SImode, op1);
476
477 /* cmpnei: 0-31 (K immediate)
4816b8e4 478 cmplti: 1-32 (J immediate, 0 using btsti x,31). */
8f90be4c
NC
479 switch (code)
480 {
4816b8e4 481 case EQ: /* Use inverted condition, cmpne. */
8f90be4c
NC
482 code = NE;
483 /* drop through */
4816b8e4
NC
484
485 case NE: /* Use normal condition, cmpne. */
8f90be4c
NC
486 if (GET_CODE (op1) == CONST_INT && ! CONST_OK_FOR_K (INTVAL (op1)))
487 op1 = force_reg (SImode, op1);
488 break;
489
4816b8e4 490 case LE: /* Use inverted condition, reversed cmplt. */
8f90be4c
NC
491 code = GT;
492 /* drop through */
4816b8e4
NC
493
494 case GT: /* Use normal condition, reversed cmplt. */
8f90be4c
NC
495 if (GET_CODE (op1) == CONST_INT)
496 op1 = force_reg (SImode, op1);
497 break;
498
4816b8e4 499 case GE: /* Use inverted condition, cmplt. */
8f90be4c
NC
500 code = LT;
501 /* drop through */
4816b8e4
NC
502
503 case LT: /* Use normal condition, cmplt. */
8f90be4c
NC
504 if (GET_CODE (op1) == CONST_INT &&
505 /* covered by btsti x,31 */
506 INTVAL (op1) != 0 &&
507 ! CONST_OK_FOR_J (INTVAL (op1)))
508 op1 = force_reg (SImode, op1);
509 break;
510
4816b8e4 511 case GTU: /* Use inverted condition, cmple. */
8f90be4c
NC
512 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) == 0)
513 {
514 /* Unsigned > 0 is the same as != 0, but we need
515 to invert the condition, so we want to set
516 code = EQ. This cannot be done however, as the
517 mcore does not support such a test. Instead we
518 cope with this case in the "bgtu" pattern itself
519 so we should never reach this point. */
520 /* code = EQ; */
521 abort ();
522 break;
523 }
524 code = LEU;
525 /* drop through */
4816b8e4
NC
526
527 case LEU: /* Use normal condition, reversed cmphs. */
8f90be4c
NC
528 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
529 op1 = force_reg (SImode, op1);
530 break;
531
4816b8e4 532 case LTU: /* Use inverted condition, cmphs. */
8f90be4c
NC
533 code = GEU;
534 /* drop through */
4816b8e4
NC
535
536 case GEU: /* Use normal condition, cmphs. */
8f90be4c
NC
537 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
538 op1 = force_reg (SImode, op1);
539 break;
540
541 default:
542 break;
543 }
544
545 emit_insn (gen_rtx (SET, VOIDmode, cc_reg, gen_rtx (code, CCmode, op0, op1)));
546
547 return cc_reg;
548}
549
550
551int
552mcore_symbolic_address_p (x)
553 rtx x;
554{
555 switch (GET_CODE (x))
556 {
557 case SYMBOL_REF:
558 case LABEL_REF:
559 return 1;
560 case CONST:
561 x = XEXP (x, 0);
562 return ( (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
563 || GET_CODE (XEXP (x, 0)) == LABEL_REF)
564 && GET_CODE (XEXP (x, 1)) == CONST_INT);
565 default:
566 return 0;
567 }
568}
569
570int
571mcore_call_address_operand (x, mode)
572 rtx x;
573 enum machine_mode mode;
574{
575 return register_operand (x, mode) || CONSTANT_P (x);
576}
577
578/* Functions to output assembly code for a function call. */
f27cd94d 579
8f90be4c
NC
580char *
581mcore_output_call (operands, index)
582 rtx operands[];
583 int index;
584{
585 static char buffer[20];
586 rtx addr = operands [index];
587
588 if (REG_P (addr))
589 {
590 if (TARGET_CG_DATA)
591 {
592 if (mcore_current_function_name == 0)
593 abort ();
594
595 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
596 "unknown", 1);
597 }
598
599 sprintf (buffer, "jsr\t%%%d", index);
600 }
601 else
602 {
603 if (TARGET_CG_DATA)
604 {
605 if (mcore_current_function_name == 0)
606 abort ();
607
608 if (GET_CODE (addr) != SYMBOL_REF)
609 abort ();
610
611 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, XSTR (addr, 0), 0);
612 }
613
614 sprintf (buffer, "jbsr\t%%%d", index);
615 }
616
617 return buffer;
618}
619
620/* Can we load a constant with a single instruction ? */
4816b8e4 621
8f90be4c
NC
622static int
623const_ok_for_mcore (value)
624 int value;
625{
626 if (value >= 0 && value <= 127)
627 return 1;
628
629 /* Try exact power of two. */
630 if ((value & (value - 1)) == 0)
631 return 1;
632
633 /* Try exact power of two - 1. */
634 if ((value & (value + 1)) == 0)
635 return 1;
636
637 return 0;
638}
639
640/* Can we load a constant inline with up to 2 instructions ? */
4816b8e4 641
8f90be4c
NC
642int
643mcore_const_ok_for_inline (value)
644 long value;
645{
646 int x, y;
647
648 return try_constant_tricks (value, & x, & y) > 0;
649}
650
651/* Are we loading the constant using a not ? */
4816b8e4 652
8f90be4c
NC
653int
654mcore_const_trick_uses_not (value)
655 long value;
656{
657 int x, y;
658
659 return try_constant_tricks (value, & x, & y) == 2;
660}
661
662/* Try tricks to load a constant inline and return the trick number if
663 success (0 is non-inlinable).
4816b8e4
NC
664
665 0: not inlinable
666 1: single instruction (do the usual thing)
667 2: single insn followed by a 'not'
668 3: single insn followed by a subi
669 4: single insn followed by an addi
670 5: single insn followed by rsubi
671 6: single insn followed by bseti
672 7: single insn followed by bclri
673 8: single insn followed by rotli
674 9: single insn followed by lsli
675 10: single insn followed by ixh
676 11: single insn followed by ixw. */
8f90be4c
NC
677
678static int
679try_constant_tricks (value, x, y)
680 long value;
681 int * x;
682 int * y;
683{
684 int i;
685 unsigned bit, shf, rot;
686
687 if (const_ok_for_mcore (value))
4816b8e4 688 return 1; /* Do the usual thing. */
8f90be4c
NC
689
690 if (TARGET_HARDLIT)
691 {
692 if (const_ok_for_mcore (~value))
693 {
694 *x = ~value;
695 return 2;
696 }
697
698 for (i = 1; i <= 32; i++)
699 {
700 if (const_ok_for_mcore (value - i))
701 {
702 *x = value - i;
703 *y = i;
704
705 return 3;
706 }
707
708 if (const_ok_for_mcore (value + i))
709 {
710 *x = value + i;
711 *y = i;
712
713 return 4;
714 }
715 }
716
717 bit = 0x80000000UL;
718
719 for (i = 0; i <= 31; i++)
720 {
721 if (const_ok_for_mcore (i - value))
722 {
723 *x = i - value;
724 *y = i;
725
726 return 5;
727 }
728
729 if (const_ok_for_mcore (value & ~bit))
730 {
731 *y = bit;
732 *x = value & ~bit;
733
734 return 6;
735 }
736
737 if (const_ok_for_mcore (value | bit))
738 {
739 *y = ~bit;
740 *x = value | bit;
741
742 return 7;
743 }
744
745 bit >>= 1;
746 }
747
748 shf = value;
749 rot = value;
750
751 for (i = 1; i < 31; i++)
752 {
753 int c;
754
755 /* MCore has rotate left. */
756 c = rot << 31;
757 rot >>= 1;
758 rot &= 0x7FFFFFFF;
759 rot |= c; /* Simulate rotate. */
760
761 if (const_ok_for_mcore (rot))
762 {
763 *y = i;
764 *x = rot;
765
766 return 8;
767 }
768
769 if (shf & 1)
4816b8e4 770 shf = 0; /* Can't use logical shift, low order bit is one. */
8f90be4c
NC
771
772 shf >>= 1;
773
774 if (shf != 0 && const_ok_for_mcore (shf))
775 {
776 *y = i;
777 *x = shf;
778
779 return 9;
780 }
781 }
782
783 if ((value % 3) == 0 && const_ok_for_mcore (value / 3))
784 {
785 *x = value / 3;
786
787 return 10;
788 }
789
790 if ((value % 5) == 0 && const_ok_for_mcore (value / 5))
791 {
792 *x = value / 5;
793
794 return 11;
795 }
796 }
797
798 return 0;
799}
800
801
802/* Check whether reg is dead at first. This is done by searching ahead
803 for either the next use (i.e., reg is live), a death note, or a set of
804 reg. Don't just use dead_or_set_p() since reload does not always mark
805 deaths (especially if PRESERVE_DEATH_NOTES_REGNO_P is not defined). We
4816b8e4
NC
806 can ignore subregs by extracting the actual register. BRC */
807
8f90be4c
NC
808int
809mcore_is_dead (first, reg)
810 rtx first;
811 rtx reg;
812{
813 rtx insn;
814
815 /* For mcore, subregs can't live independently of their parent regs. */
816 if (GET_CODE (reg) == SUBREG)
817 reg = SUBREG_REG (reg);
818
819 /* Dies immediately. */
820 if (dead_or_set_p (first, reg))
821 return 1;
822
823 /* Look for conclusive evidence of live/death, otherwise we have
824 to assume that it is live. */
825 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
826 {
827 if (GET_CODE (insn) == JUMP_INSN)
828 return 0; /* We lose track, assume it is alive. */
829
830 else if (GET_CODE(insn) == CALL_INSN)
831 {
832 /* Call's might use it for target or register parms. */
833 if (reg_referenced_p (reg, PATTERN (insn))
834 || find_reg_fusage (insn, USE, reg))
835 return 0;
836 else if (dead_or_set_p (insn, reg))
837 return 1;
838 }
839 else if (GET_CODE (insn) == INSN)
840 {
841 if (reg_referenced_p (reg, PATTERN (insn)))
842 return 0;
843 else if (dead_or_set_p (insn, reg))
844 return 1;
845 }
846 }
847
848 /* No conclusive evidence either way, we can not take the chance
849 that control flow hid the use from us -- "I'm not dead yet". */
850 return 0;
851}
852
853
854/* Count the number of ones in mask. */
4816b8e4 855
8f90be4c
NC
856int
857mcore_num_ones (mask)
858 int mask;
859{
4816b8e4 860 /* A trick to count set bits recently posted on comp.compilers. */
8f90be4c
NC
861 mask = (mask >> 1 & 0x55555555) + (mask & 0x55555555);
862 mask = ((mask >> 2) & 0x33333333) + (mask & 0x33333333);
863 mask = ((mask >> 4) + mask) & 0x0f0f0f0f;
864 mask = ((mask >> 8) + mask);
865
866 return (mask + (mask >> 16)) & 0xff;
867}
868
4816b8e4
NC
869/* Count the number of zeros in mask. */
870
8f90be4c
NC
871int
872mcore_num_zeros (mask)
873 int mask;
874{
875 return 32 - mcore_num_ones (mask);
876}
877
878/* Determine byte being masked. */
4816b8e4 879
8f90be4c
NC
880int
881mcore_byte_offset (mask)
882 unsigned int mask;
883{
884 if (mask == 0x00ffffffUL)
885 return 0;
886 else if (mask == 0xff00ffffUL)
887 return 1;
888 else if (mask == 0xffff00ffUL)
889 return 2;
890 else if (mask == 0xffffff00UL)
891 return 3;
892
893 return -1;
894}
895
896/* Determine halfword being masked. */
4816b8e4 897
8f90be4c
NC
898int
899mcore_halfword_offset (mask)
900 unsigned int mask;
901{
902 if (mask == 0x0000ffffL)
903 return 0;
904 else if (mask == 0xffff0000UL)
905 return 1;
906
907 return -1;
908}
909
910/* Output a series of bseti's corresponding to mask. */
4816b8e4 911
f27cd94d 912const char *
8f90be4c
NC
913mcore_output_bseti (dst, mask)
914 rtx dst;
915 int mask;
916{
917 rtx out_operands[2];
918 int bit;
919
920 out_operands[0] = dst;
921
922 for (bit = 0; bit < 32; bit++)
923 {
924 if ((mask & 0x1) == 0x1)
925 {
926 out_operands[1] = GEN_INT (bit);
927
928 output_asm_insn ("bseti\t%0,%1", out_operands);
929 }
930 mask >>= 1;
931 }
932
933 return "";
934}
935
936/* Output a series of bclri's corresponding to mask. */
4816b8e4 937
f27cd94d 938const char *
8f90be4c
NC
939mcore_output_bclri (dst, mask)
940 rtx dst;
941 int mask;
942{
943 rtx out_operands[2];
944 int bit;
945
946 out_operands[0] = dst;
947
948 for (bit = 0; bit < 32; bit++)
949 {
950 if ((mask & 0x1) == 0x0)
951 {
952 out_operands[1] = GEN_INT (bit);
953
954 output_asm_insn ("bclri\t%0,%1", out_operands);
955 }
956
957 mask >>= 1;
958 }
959
960 return "";
961}
962
963/* Output a conditional move of two constants that are +/- 1 within each
964 other. See the "movtK" patterns in mcore.md. I'm not sure this is
965 really worth the effort. */
4816b8e4 966
f27cd94d 967const char *
8f90be4c
NC
968mcore_output_cmov (operands, cmp_t, test)
969 rtx operands[];
970 int cmp_t;
971 char * test;
972{
973 int load_value;
974 int adjust_value;
975 rtx out_operands[4];
976
977 out_operands[0] = operands[0];
978
4816b8e4 979 /* Check to see which constant is loadable. */
8f90be4c
NC
980 if (const_ok_for_mcore (INTVAL (operands[1])))
981 {
982 out_operands[1] = operands[1];
983 out_operands[2] = operands[2];
984 }
985 else if (const_ok_for_mcore (INTVAL (operands[2])))
986 {
987 out_operands[1] = operands[2];
988 out_operands[2] = operands[1];
989
4816b8e4 990 /* Complement test since constants are swapped. */
8f90be4c
NC
991 cmp_t = (cmp_t == 0);
992 }
993 load_value = INTVAL (out_operands[1]);
994 adjust_value = INTVAL (out_operands[2]);
995
4816b8e4 996 /* First output the test if folded into the pattern. */
8f90be4c
NC
997
998 if (test)
999 output_asm_insn (test, operands);
1000
4816b8e4 1001 /* Load the constant - for now, only support constants that can be
8f90be4c
NC
1002 generated with a single instruction. maybe add general inlinable
1003 constants later (this will increase the # of patterns since the
4816b8e4 1004 instruction sequence has a different length attribute). */
8f90be4c
NC
1005 if (load_value >= 0 && load_value <= 127)
1006 output_asm_insn ("movi\t%0,%1", out_operands);
1007 else if ((load_value & (load_value - 1)) == 0)
1008 output_asm_insn ("bgeni\t%0,%P1", out_operands);
1009 else if ((load_value & (load_value + 1)) == 0)
1010 output_asm_insn ("bmaski\t%0,%N1", out_operands);
1011
4816b8e4 1012 /* Output the constant adjustment. */
8f90be4c
NC
1013 if (load_value > adjust_value)
1014 {
1015 if (cmp_t)
1016 output_asm_insn ("decf\t%0", out_operands);
1017 else
1018 output_asm_insn ("dect\t%0", out_operands);
1019 }
1020 else
1021 {
1022 if (cmp_t)
1023 output_asm_insn ("incf\t%0", out_operands);
1024 else
1025 output_asm_insn ("inct\t%0", out_operands);
1026 }
1027
1028 return "";
1029}
1030
1031/* Outputs the peephole for moving a constant that gets not'ed followed
4816b8e4
NC
1032 by an and (i.e. combine the not and the and into andn). BRC */
1033
f27cd94d 1034const char *
8f90be4c
NC
1035mcore_output_andn (insn, operands)
1036 rtx insn ATTRIBUTE_UNUSED;
1037 rtx operands[];
1038{
1039 int x, y;
1040 rtx out_operands[3];
f27cd94d 1041 const char * load_op;
8f90be4c
NC
1042 char buf[256];
1043
1044 if (try_constant_tricks (INTVAL (operands[1]), &x, &y) != 2)
1045 abort ();
1046
1047 out_operands[0] = operands[0];
1048 out_operands[1] = GEN_INT(x);
1049 out_operands[2] = operands[2];
1050
1051 if (x >= 0 && x <= 127)
1052 load_op = "movi\t%0,%1";
4816b8e4
NC
1053
1054 /* Try exact power of two. */
8f90be4c
NC
1055 else if ((x & (x - 1)) == 0)
1056 load_op = "bgeni\t%0,%P1";
4816b8e4
NC
1057
1058 /* Try exact power of two - 1. */
8f90be4c
NC
1059 else if ((x & (x + 1)) == 0)
1060 load_op = "bmaski\t%0,%N1";
4816b8e4 1061
8f90be4c
NC
1062 else
1063 load_op = "BADMOVI\t%0,%1";
1064
1065 sprintf (buf, "%s\n\tandn\t%%2,%%0", load_op);
1066 output_asm_insn (buf, out_operands);
1067
1068 return "";
1069}
1070
1071/* Output an inline constant. */
4816b8e4 1072
f27cd94d 1073static const char *
8f90be4c
NC
1074output_inline_const (mode, operands)
1075 enum machine_mode mode;
1076 rtx operands[];
1077{
1078 int x = 0, y = 0;
1079 int trick_no;
1080 rtx out_operands[3];
1081 char buf[256];
1082 char load_op[256];
f27cd94d 1083 const char *dst_fmt;
8f90be4c
NC
1084 int value;
1085
1086 value = INTVAL (operands[1]);
1087
1088 if ((trick_no = try_constant_tricks (value, &x, &y)) == 0)
1089 {
1090 /* lrw's are handled separately: Large inlinable constants
1091 never get turned into lrw's. Our caller uses try_constant_tricks
1092 to back off to an lrw rather than calling this routine. */
1093 abort ();
1094 }
1095
1096 if (trick_no == 1)
1097 x = value;
1098
4816b8e4 1099 /* operands: 0 = dst, 1 = load immed., 2 = immed. adjustment. */
8f90be4c
NC
1100 out_operands[0] = operands[0];
1101 out_operands[1] = GEN_INT (x);
1102
1103 if (trick_no > 2)
1104 out_operands[2] = GEN_INT (y);
1105
4816b8e4 1106 /* Select dst format based on mode. */
8f90be4c
NC
1107 if (mode == DImode && (! TARGET_LITTLE_END))
1108 dst_fmt = "%R0";
1109 else
1110 dst_fmt = "%0";
1111
1112 if (x >= 0 && x <= 127)
1113 sprintf (load_op, "movi\t%s,%%1", dst_fmt);
4816b8e4 1114
8f90be4c
NC
1115 /* Try exact power of two. */
1116 else if ((x & (x - 1)) == 0)
1117 sprintf (load_op, "bgeni\t%s,%%P1", dst_fmt);
4816b8e4
NC
1118
1119 /* Try exact power of two - 1. */
8f90be4c
NC
1120 else if ((x & (x + 1)) == 0)
1121 sprintf (load_op, "bmaski\t%s,%%N1", dst_fmt);
4816b8e4 1122
8f90be4c
NC
1123 else
1124 sprintf (load_op, "BADMOVI\t%s,%%1", dst_fmt);
1125
1126 switch (trick_no)
1127 {
1128 case 1:
1129 strcpy (buf, load_op);
1130 break;
1131 case 2: /* not */
1132 sprintf (buf, "%s\n\tnot\t%s\t// %d 0x%x", load_op, dst_fmt, value, value);
1133 break;
1134 case 3: /* add */
1135 sprintf (buf, "%s\n\taddi\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
1136 break;
1137 case 4: /* sub */
1138 sprintf (buf, "%s\n\tsubi\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
1139 break;
1140 case 5: /* rsub */
4816b8e4 1141 /* Never happens unless -mrsubi, see try_constant_tricks(). */
8f90be4c
NC
1142 sprintf (buf, "%s\n\trsubi\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
1143 break;
1144 case 6: /* bset */
1145 sprintf (buf, "%s\n\tbseti\t%s,%%P2\t// %d 0x%x", load_op, dst_fmt, value, value);
1146 break;
1147 case 7: /* bclr */
1148 sprintf (buf, "%s\n\tbclri\t%s,%%Q2\t// %d 0x%x", load_op, dst_fmt, value, value);
1149 break;
1150 case 8: /* rotl */
1151 sprintf (buf, "%s\n\trotli\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
1152 break;
1153 case 9: /* lsl */
1154 sprintf (buf, "%s\n\tlsli\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
1155 break;
1156 case 10: /* ixh */
1157 sprintf (buf, "%s\n\tixh\t%s,%s\t// %d 0x%x", load_op, dst_fmt, dst_fmt, value, value);
1158 break;
1159 case 11: /* ixw */
1160 sprintf (buf, "%s\n\tixw\t%s,%s\t// %d 0x%x", load_op, dst_fmt, dst_fmt, value, value);
1161 break;
1162 default:
1163 return "";
1164 }
1165
1166 output_asm_insn (buf, out_operands);
1167
1168 return "";
1169}
1170
1171/* Output a move of a word or less value. */
4816b8e4 1172
f27cd94d 1173const char *
8f90be4c
NC
1174mcore_output_move (insn, operands, mode)
1175 rtx insn ATTRIBUTE_UNUSED;
1176 rtx operands[];
1177 enum machine_mode mode ATTRIBUTE_UNUSED;
1178{
1179 rtx dst = operands[0];
1180 rtx src = operands[1];
1181
1182 if (GET_CODE (dst) == REG)
1183 {
1184 if (GET_CODE (src) == REG)
1185 {
1186 if (REGNO (src) == CC_REG) /* r-c */
1187 return "mvc\t%0";
1188 else
1189 return "mov\t%0,%1"; /* r-r*/
1190 }
1191 else if (GET_CODE (src) == MEM)
1192 {
1193 if (GET_CODE (XEXP (src, 0)) == LABEL_REF)
1194 return "lrw\t%0,[%1]"; /* a-R */
1195 else
1196 return "ldw\t%0,%1"; /* r-m */
1197 }
1198 else if (GET_CODE (src) == CONST_INT)
1199 {
1200 int x, y;
1201
1202 if (CONST_OK_FOR_I (INTVAL (src))) /* r-I */
1203 return "movi\t%0,%1";
1204 else if (CONST_OK_FOR_M (INTVAL (src))) /* r-M */
1205 return "bgeni\t%0,%P1\t// %1 %x1";
1206 else if (CONST_OK_FOR_N (INTVAL (src))) /* r-N */
1207 return "bmaski\t%0,%N1\t// %1 %x1";
1208 else if (try_constant_tricks (INTVAL (src), &x, &y)) /* R-P */
1209 return output_inline_const (SImode, operands); /* 1-2 insns */
1210 else
4816b8e4 1211 return "lrw\t%0,%x1\t// %1"; /* Get it from literal pool. */
8f90be4c
NC
1212 }
1213 else
4816b8e4 1214 return "lrw\t%0, %1"; /* Into the literal pool. */
8f90be4c
NC
1215 }
1216 else if (GET_CODE (dst) == MEM) /* m-r */
1217 return "stw\t%1,%0";
1218
1219 abort ();
1220}
1221
1222/* Outputs a constant inline -- regardless of the cost.
1223 Useful for things where we've gotten into trouble and think we'd
1224 be doing an lrw into r15 (forbidden). This lets us get out of
1225 that pickle even after register allocation. */
4816b8e4 1226
f27cd94d 1227const char *
8f90be4c
NC
1228mcore_output_inline_const_forced (insn, operands, mode)
1229 rtx insn ATTRIBUTE_UNUSED;
1230 rtx operands[];
1231 enum machine_mode mode ATTRIBUTE_UNUSED;
1232{
1233 unsigned long value = INTVAL (operands[1]);
1234 unsigned long ovalue = value;
1235 struct piece
1236 {
1237 int low;
1238 int shift;
1239 }
1240 part[6];
1241 int i;
1242
1243 if (mcore_const_ok_for_inline (value))
1244 return output_inline_const (SImode, operands);
1245
b6a1cbae 1246 for (i = 0; (unsigned) i < ARRAY_SIZE (part); i++)
8f90be4c
NC
1247 {
1248 part[i].shift = 0;
1249 part[i].low = (value & 0x1F);
1250 value -= part[i].low;
1251
1252 if (mcore_const_ok_for_inline (value))
1253 break;
1254 else
1255 {
1256 value >>= 5;
1257 part[i].shift = 5;
1258
1259 while ((value & 1) == 0)
1260 {
1261 part[i].shift++;
1262 value >>= 1;
1263 }
1264
1265 if (mcore_const_ok_for_inline (value))
1266 break;
1267 }
1268 }
1269
1270 /* 5 bits per iteration, a maximum of 5 times == 25 bits and leaves
1271 7 bits left in the constant -- which we know we can cover with
1272 a movi. The final value can't be zero otherwise we'd have stopped
1273 in the previous iteration. */
1274 if (value == 0 || ! mcore_const_ok_for_inline (value))
1275 abort ();
1276
4816b8e4 1277 /* Now, work our way backwards emitting the constant. */
8f90be4c
NC
1278
1279 /* Emit the value that remains -- it will be non-zero. */
1280 operands[1] = GEN_INT (value);
1281 output_asm_insn (output_inline_const (SImode, operands), operands);
1282
1283 while (i >= 0)
1284 {
1285 /* Shift anything we've already loaded. */
1286 if (part[i].shift)
1287 {
1288 operands[2] = GEN_INT (part[i].shift);
1289 output_asm_insn ("lsli %0,%2", operands);
1290 value <<= part[i].shift;
1291 }
1292
1293 /* Add anything we need into the low 5 bits. */
1294 if (part[i].low != 0)
1295 {
1296 operands[2] = GEN_INT (part[i].low);
1297 output_asm_insn ("addi %0,%2", operands);
1298 value += part[i].low;
1299 }
1300
1301 i--;
1302 }
1303
1304 if (value != ovalue) /* sanity */
1305 abort ();
1306
4816b8e4 1307 /* We've output all the instructions. */
8f90be4c
NC
1308 return "";
1309}
1310
1311/* Return a sequence of instructions to perform DI or DF move.
1312 Since the MCORE cannot move a DI or DF in one instruction, we have
1313 to take care when we see overlapping source and dest registers. */
4816b8e4 1314
f27cd94d 1315const char *
8f90be4c
NC
1316mcore_output_movedouble (operands, mode)
1317 rtx operands[];
1318 enum machine_mode mode ATTRIBUTE_UNUSED;
1319{
1320 rtx dst = operands[0];
1321 rtx src = operands[1];
1322
1323 if (GET_CODE (dst) == REG)
1324 {
1325 if (GET_CODE (src) == REG)
1326 {
1327 int dstreg = REGNO (dst);
1328 int srcreg = REGNO (src);
4816b8e4 1329
8f90be4c
NC
1330 /* Ensure the second source not overwritten. */
1331 if (srcreg + 1 == dstreg)
1332 return "mov %R0,%R1\n\tmov %0,%1";
1333 else
1334 return "mov %0,%1\n\tmov %R0,%R1";
1335 }
1336 else if (GET_CODE (src) == MEM)
1337 {
1338 rtx memexp = memexp = XEXP (src, 0);
1339 int dstreg = REGNO (dst);
1340 int basereg = -1;
1341
1342 if (GET_CODE (memexp) == LABEL_REF)
1343 return "lrw\t%0,[%1]\n\tlrw\t%R0,[%R1]";
1344 else if (GET_CODE (memexp) == REG)
1345 basereg = REGNO (memexp);
1346 else if (GET_CODE (memexp) == PLUS)
1347 {
1348 if (GET_CODE (XEXP (memexp, 0)) == REG)
1349 basereg = REGNO (XEXP (memexp, 0));
1350 else if (GET_CODE (XEXP (memexp, 1)) == REG)
1351 basereg = REGNO (XEXP (memexp, 1));
1352 else
1353 abort ();
1354 }
1355 else
1356 abort ();
1357
4816b8e4 1358 /* ??? length attribute is wrong here. */
8f90be4c
NC
1359 if (dstreg == basereg)
1360 {
4816b8e4 1361 /* Just load them in reverse order. */
8f90be4c 1362 return "ldw\t%R0,%R1\n\tldw\t%0,%1";
4816b8e4 1363
8f90be4c 1364 /* XXX: alternative: move basereg to basereg+1
4816b8e4 1365 and then fall through. */
8f90be4c
NC
1366 }
1367 else
1368 return "ldw\t%0,%1\n\tldw\t%R0,%R1";
1369 }
1370 else if (GET_CODE (src) == CONST_INT)
1371 {
1372 if (TARGET_LITTLE_END)
1373 {
1374 if (CONST_OK_FOR_I (INTVAL (src)))
1375 output_asm_insn ("movi %0,%1", operands);
1376 else if (CONST_OK_FOR_M (INTVAL (src)))
1377 output_asm_insn ("bgeni %0,%P1", operands);
1378 else if (INTVAL (src) == -1)
1379 output_asm_insn ("bmaski %0,32", operands);
1380 else if (CONST_OK_FOR_N (INTVAL (src)))
1381 output_asm_insn ("bmaski %0,%N1", operands);
1382 else
1383 abort ();
1384
1385 if (INTVAL (src) < 0)
1386 return "bmaski %R0,32";
1387 else
1388 return "movi %R0,0";
1389 }
1390 else
1391 {
1392 if (CONST_OK_FOR_I (INTVAL (src)))
1393 output_asm_insn ("movi %R0,%1", operands);
1394 else if (CONST_OK_FOR_M (INTVAL (src)))
1395 output_asm_insn ("bgeni %R0,%P1", operands);
1396 else if (INTVAL (src) == -1)
1397 output_asm_insn ("bmaski %R0,32", operands);
1398 else if (CONST_OK_FOR_N (INTVAL (src)))
1399 output_asm_insn ("bmaski %R0,%N1", operands);
1400 else
1401 abort ();
1402
1403 if (INTVAL (src) < 0)
1404 return "bmaski %0,32";
1405 else
1406 return "movi %0,0";
1407 }
1408 }
1409 else
1410 abort ();
1411 }
1412 else if (GET_CODE (dst) == MEM && GET_CODE (src) == REG)
1413 return "stw\t%1,%0\n\tstw\t%R1,%R0";
1414 else
1415 abort ();
1416}
1417
1418/* Predicates used by the templates. */
1419
1420/* Non zero if OP can be source of a simple move operation. */
4816b8e4 1421
8f90be4c
NC
1422int
1423mcore_general_movsrc_operand (op, mode)
1424 rtx op;
1425 enum machine_mode mode;
1426{
1427 /* Any (MEM LABEL_REF) is OK. That is a pc-relative load. */
1428 if (GET_CODE (op) == MEM && GET_CODE (XEXP (op, 0)) == LABEL_REF)
1429 return 1;
1430
1431 return general_operand (op, mode);
1432}
1433
1434/* Non zero if OP can be destination of a simple move operation. */
4816b8e4 1435
8f90be4c
NC
1436int
1437mcore_general_movdst_operand (op, mode)
1438 rtx op;
1439 enum machine_mode mode;
1440{
1441 if (GET_CODE (op) == REG && REGNO (op) == CC_REG)
1442 return 0;
1443
1444 return general_operand (op, mode);
1445}
1446
1447/* Nonzero if OP is a normal arithmetic register. */
4816b8e4 1448
8f90be4c
NC
1449int
1450mcore_arith_reg_operand (op, mode)
1451 rtx op;
1452 enum machine_mode mode;
1453{
1454 if (! register_operand (op, mode))
1455 return 0;
1456
1457 if (GET_CODE (op) == SUBREG)
1458 op = SUBREG_REG (op);
1459
1460 if (GET_CODE (op) == REG)
1461 return REGNO (op) != CC_REG;
1462
1463 return 1;
1464}
1465
1466/* Non zero if OP should be recognized during reload for an ixh/ixw
1467 operand. See the ixh/ixw patterns. */
4816b8e4 1468
8f90be4c
NC
1469int
1470mcore_reload_operand (op, mode)
1471 rtx op;
1472 enum machine_mode mode;
1473{
1474 if (mcore_arith_reg_operand (op, mode))
1475 return 1;
1476
1477 if (! reload_in_progress)
1478 return 0;
1479
1480 return GET_CODE (op) == MEM;
1481}
1482
1483/* Nonzero if OP is a valid source operand for an arithmetic insn. */
4816b8e4 1484
8f90be4c
NC
1485int
1486mcore_arith_J_operand (op, mode)
1487 rtx op;
1488 enum machine_mode mode;
1489{
1490 if (register_operand (op, mode))
1491 return 1;
1492
1493 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_J (INTVAL (op)))
1494 return 1;
1495
1496 return 0;
1497}
1498
1499/* Nonzero if OP is a valid source operand for an arithmetic insn. */
4816b8e4 1500
8f90be4c
NC
1501int
1502mcore_arith_K_operand (op, mode)
1503 rtx op;
1504 enum machine_mode mode;
1505{
1506 if (register_operand (op, mode))
1507 return 1;
1508
1509 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_K (INTVAL (op)))
1510 return 1;
1511
1512 return 0;
1513}
1514
1515/* Nonzero if OP is a valid source operand for a shift or rotate insn. */
4816b8e4 1516
8f90be4c
NC
1517int
1518mcore_arith_K_operand_not_0 (op, mode)
1519 rtx op;
1520 enum machine_mode mode;
1521{
1522 if (register_operand (op, mode))
1523 return 1;
1524
1525 if ( GET_CODE (op) == CONST_INT
1526 && CONST_OK_FOR_K (INTVAL (op))
1527 && INTVAL (op) != 0)
1528 return 1;
1529
1530 return 0;
1531}
1532
1533int
1534mcore_arith_K_S_operand (op, mode)
1535 rtx op;
1536 enum machine_mode mode;
1537{
1538 if (register_operand (op, mode))
1539 return 1;
1540
1541 if (GET_CODE (op) == CONST_INT)
1542 {
1543 if (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_M (~INTVAL (op)))
1544 return 1;
1545 }
1546
1547 return 0;
1548}
1549
1550int
1551mcore_arith_S_operand (op)
1552 rtx op;
1553{
1554 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (~INTVAL (op)))
1555 return 1;
1556
1557 return 0;
1558}
1559
1560int
1561mcore_arith_M_operand (op, mode)
1562 rtx op;
1563 enum machine_mode mode;
1564{
1565 if (register_operand (op, mode))
1566 return 1;
1567
1568 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (INTVAL (op)))
1569 return 1;
1570
1571 return 0;
1572}
1573
4816b8e4
NC
1574/* Nonzero if OP is a valid source operand for loading. */
1575
8f90be4c
NC
1576int
1577mcore_arith_imm_operand (op, mode)
1578 rtx op;
1579 enum machine_mode mode;
1580{
1581 if (register_operand (op, mode))
1582 return 1;
1583
1584 if (GET_CODE (op) == CONST_INT && const_ok_for_mcore (INTVAL (op)))
1585 return 1;
1586
1587 return 0;
1588}
1589
1590int
1591mcore_arith_any_imm_operand (op, mode)
1592 rtx op;
1593 enum machine_mode mode;
1594{
1595 if (register_operand (op, mode))
1596 return 1;
1597
1598 if (GET_CODE (op) == CONST_INT)
1599 return 1;
1600
1601 return 0;
1602}
1603
4816b8e4
NC
1604/* Nonzero if OP is a valid source operand for a cmov with two consts +/- 1. */
1605
8f90be4c
NC
1606int
1607mcore_arith_O_operand (op, mode)
1608 rtx op;
1609 enum machine_mode mode;
1610{
1611 if (register_operand (op, mode))
1612 return 1;
1613
1614 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_O (INTVAL (op)))
1615 return 1;
1616
1617 return 0;
1618}
1619
1620/* Nonzero if OP is a valid source operand for a btsti. */
4816b8e4 1621
8f90be4c
NC
1622int
1623mcore_literal_K_operand (op, mode)
1624 rtx op;
1625 enum machine_mode mode ATTRIBUTE_UNUSED;
1626{
1627 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_K (INTVAL (op)))
1628 return 1;
1629
1630 return 0;
1631}
1632
1633/* Nonzero if OP is a valid source operand for an add/sub insn. */
4816b8e4 1634
8f90be4c
NC
1635int
1636mcore_addsub_operand (op, mode)
1637 rtx op;
1638 enum machine_mode mode;
1639{
1640 if (register_operand (op, mode))
1641 return 1;
1642
1643 if (GET_CODE (op) == CONST_INT)
1644 {
1645 return 1;
1646
1647 /* The following is removed because it precludes large constants from being
1648 returned as valid source operands for and add/sub insn. While large
1649 constants may not directly be used in an add/sub, they may if first loaded
1650 into a register. Thus, this predicate should indicate that they are valid,
1651 and the constraint in mcore.md should control whether an additional load to
4816b8e4 1652 register is needed. (see mcore.md, addsi). -- DAC 4/2/1998 */
8f90be4c
NC
1653 /*
1654 if (CONST_OK_FOR_J(INTVAL(op)) || CONST_OK_FOR_L(INTVAL(op)))
1655 return 1;
1656 */
1657 }
1658
1659 return 0;
1660}
1661
1662/* Nonzero if OP is a valid source operand for a compare operation. */
4816b8e4 1663
8f90be4c
NC
1664int
1665mcore_compare_operand (op, mode)
1666 rtx op;
1667 enum machine_mode mode;
1668{
1669 if (register_operand (op, mode))
1670 return 1;
1671
1672 if (GET_CODE (op) == CONST_INT && INTVAL (op) == 0)
1673 return 1;
1674
1675 return 0;
1676}
1677
4816b8e4
NC
1678/* Expand insert bit field. BRC */
1679
8f90be4c
NC
1680int
1681mcore_expand_insv (operands)
1682 rtx operands[];
1683{
1684 int width = INTVAL (operands[1]);
1685 int posn = INTVAL (operands[2]);
1686 int mask;
1687 rtx mreg, sreg, ereg;
1688
1689 /* To get width 1 insv, the test in store_bit_field() (expmed.c, line 191)
1690 for width==1 must be removed. Look around line 368. This is something
4816b8e4 1691 we really want the md part to do. */
8f90be4c
NC
1692 if (width == 1 && GET_CODE (operands[3]) == CONST_INT)
1693 {
4816b8e4
NC
1694 /* Do directly with bseti or bclri. */
1695 /* RBE: 2/97 consider only low bit of constant. */
8f90be4c
NC
1696 if ((INTVAL(operands[3])&1) == 0)
1697 {
1698 mask = ~(1 << posn);
1699 emit_insn (gen_rtx (SET, SImode, operands[0],
1700 gen_rtx (AND, SImode, operands[0], GEN_INT (mask))));
1701 }
1702 else
1703 {
1704 mask = 1 << posn;
1705 emit_insn (gen_rtx (SET, SImode, operands[0],
1706 gen_rtx (IOR, SImode, operands[0], GEN_INT (mask))));
1707 }
1708
1709 return 1;
1710 }
1711
1712 /* Look at some bitfield placements that we aren't interested
4816b8e4 1713 in handling ourselves, unless specifically directed to do so. */
8f90be4c
NC
1714 if (! TARGET_W_FIELD)
1715 return 0; /* Generally, give up about now. */
1716
1717 if (width == 8 && posn % 8 == 0)
1718 /* Byte sized and aligned; let caller break it up. */
1719 return 0;
1720
1721 if (width == 16 && posn % 16 == 0)
1722 /* Short sized and aligned; let caller break it up. */
1723 return 0;
1724
1725 /* The general case - we can do this a little bit better than what the
1726 machine independent part tries. This will get rid of all the subregs
1727 that mess up constant folding in combine when working with relaxed
4816b8e4 1728 immediates. */
8f90be4c
NC
1729
1730 /* If setting the entire field, do it directly. */
1731 if (GET_CODE (operands[3]) == CONST_INT &&
1732 INTVAL (operands[3]) == ((1 << width) - 1))
1733 {
1734 mreg = force_reg (SImode, GEN_INT (INTVAL (operands[3]) << posn));
1735 emit_insn (gen_rtx (SET, SImode, operands[0],
1736 gen_rtx (IOR, SImode, operands[0], mreg)));
1737 return 1;
1738 }
1739
1740 /* Generate the clear mask. */
1741 mreg = force_reg (SImode, GEN_INT (~(((1 << width) - 1) << posn)));
1742
1743 /* Clear the field, to overlay it later with the source. */
1744 emit_insn (gen_rtx (SET, SImode, operands[0],
1745 gen_rtx (AND, SImode, operands[0], mreg)));
1746
1747 /* If the source is constant 0, we've nothing to add back. */
1748 if (GET_CODE (operands[3]) == CONST_INT && INTVAL (operands[3]) == 0)
1749 return 1;
1750
1751 /* XXX: Should we worry about more games with constant values?
1752 We've covered the high profile: set/clear single-bit and many-bit
1753 fields. How often do we see "arbitrary bit pattern" constants? */
1754 sreg = copy_to_mode_reg (SImode, operands[3]);
1755
1756 /* Extract src as same width as dst (needed for signed values). We
1757 always have to do this since we widen everything to SImode.
1758 We don't have to mask if we're shifting this up against the
1759 MSB of the register (e.g., the shift will push out any hi-order
4816b8e4 1760 bits. */
f27cd94d 1761 if (width + posn != (int) GET_MODE_SIZE (SImode))
8f90be4c
NC
1762 {
1763 ereg = force_reg (SImode, GEN_INT ((1 << width) - 1));
1764 emit_insn (gen_rtx (SET, SImode, sreg,
1765 gen_rtx (AND, SImode, sreg, ereg)));
1766 }
1767
4816b8e4 1768 /* Insert source value in dest. */
8f90be4c
NC
1769 if (posn != 0)
1770 emit_insn (gen_rtx (SET, SImode, sreg,
1771 gen_rtx (ASHIFT, SImode, sreg, GEN_INT (posn))));
1772
1773 emit_insn (gen_rtx (SET, SImode, operands[0],
1774 gen_rtx (IOR, SImode, operands[0], sreg)));
1775
1776 return 1;
1777}
1778
1779/* Return 1 if OP is a load multiple operation. It is known to be a
1780 PARALLEL and the first section will be tested. */
1781int
1782mcore_load_multiple_operation (op, mode)
1783 rtx op;
1784 enum machine_mode mode ATTRIBUTE_UNUSED;
1785{
1786 int count = XVECLEN (op, 0);
1787 int dest_regno;
1788 rtx src_addr;
1789 int i;
1790
1791 /* Perform a quick check so we don't blow up below. */
1792 if (count <= 1
1793 || GET_CODE (XVECEXP (op, 0, 0)) != SET
1794 || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != REG
1795 || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != MEM)
1796 return 0;
1797
1798 dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, 0)));
1799 src_addr = XEXP (SET_SRC (XVECEXP (op, 0, 0)), 0);
1800
1801 for (i = 1; i < count; i++)
1802 {
1803 rtx elt = XVECEXP (op, 0, i);
1804
1805 if (GET_CODE (elt) != SET
1806 || GET_CODE (SET_DEST (elt)) != REG
1807 || GET_MODE (SET_DEST (elt)) != SImode
f27cd94d
NC
1808 || REGNO (SET_DEST (elt)) != (unsigned) (dest_regno + i)
1809 || GET_CODE (SET_SRC (elt)) != MEM
1810 || GET_MODE (SET_SRC (elt)) != SImode
8f90be4c
NC
1811 || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS
1812 || ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr)
1813 || GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT
1814 || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != i * 4)
1815 return 0;
1816 }
1817
1818 return 1;
1819}
1820
1821/* Similar, but tests for store multiple. */
4816b8e4 1822
8f90be4c
NC
1823int
1824mcore_store_multiple_operation (op, mode)
1825 rtx op;
1826 enum machine_mode mode ATTRIBUTE_UNUSED;
1827{
1828 int count = XVECLEN (op, 0);
1829 int src_regno;
1830 rtx dest_addr;
1831 int i;
1832
1833 /* Perform a quick check so we don't blow up below. */
1834 if (count <= 1
1835 || GET_CODE (XVECEXP (op, 0, 0)) != SET
1836 || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != MEM
1837 || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != REG)
1838 return 0;
1839
1840 src_regno = REGNO (SET_SRC (XVECEXP (op, 0, 0)));
1841 dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, 0)), 0);
1842
1843 for (i = 1; i < count; i++)
1844 {
1845 rtx elt = XVECEXP (op, 0, i);
1846
1847 if (GET_CODE (elt) != SET
1848 || GET_CODE (SET_SRC (elt)) != REG
1849 || GET_MODE (SET_SRC (elt)) != SImode
f27cd94d 1850 || REGNO (SET_SRC (elt)) != (unsigned) (src_regno + i)
8f90be4c
NC
1851 || GET_CODE (SET_DEST (elt)) != MEM
1852 || GET_MODE (SET_DEST (elt)) != SImode
1853 || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS
1854 || ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr)
1855 || GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT
1856 || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != i * 4)
1857 return 0;
1858 }
1859
1860 return 1;
1861}
1862\f
1863/* ??? Block move stuff stolen from m88k. This code has not been
1864 verified for correctness. */
1865
1866/* Emit code to perform a block move. Choose the best method.
1867
1868 OPERANDS[0] is the destination.
1869 OPERANDS[1] is the source.
1870 OPERANDS[2] is the size.
1871 OPERANDS[3] is the alignment safe to use. */
1872
1873/* Emit code to perform a block move with an offset sequence of ldw/st
1874 instructions (..., ldw 0, stw 1, ldw 1, stw 0, ...). SIZE and ALIGN are
1875 known constants. DEST and SRC are registers. OFFSET is the known
1876 starting point for the output pattern. */
1877
1878static enum machine_mode mode_from_align[] =
1879{
1880 VOIDmode, QImode, HImode, VOIDmode, SImode,
1881 VOIDmode, VOIDmode, VOIDmode, DImode
1882};
1883
1884static void
1885block_move_sequence (dest, dst_mem, src, src_mem, size, align, offset)
1886 rtx dest, dst_mem;
1887 rtx src, src_mem;
1888 int size;
1889 int align;
1890 int offset;
1891{
1892 rtx temp[2];
1893 enum machine_mode mode[2];
1894 int amount[2];
1895 int active[2];
1896 int phase = 0;
1897 int next;
1898 int offset_ld = offset;
1899 int offset_st = offset;
1900
1901 active[0] = active[1] = FALSE;
1902
1903 /* Establish parameters for the first load and for the second load if
1904 it is known to be the same mode as the first. */
1905 amount[0] = amount[1] = align;
1906
1907 mode[0] = mode_from_align[align];
1908
1909 temp[0] = gen_reg_rtx (mode[0]);
1910
1911 if (size >= 2 * align)
1912 {
1913 mode[1] = mode[0];
1914 temp[1] = gen_reg_rtx (mode[1]);
1915 }
1916
1917 do
1918 {
1919 rtx srcp, dstp;
1920
1921 next = phase;
1922 phase = !phase;
1923
1924 if (size > 0)
1925 {
1926 /* Change modes as the sequence tails off. */
1927 if (size < amount[next])
1928 {
1929 amount[next] = (size >= 4 ? 4 : (size >= 2 ? 2 : 1));
1930 mode[next] = mode_from_align[amount[next]];
1931 temp[next] = gen_reg_rtx (mode[next]);
1932 }
1933
1934 size -= amount[next];
1935 srcp = gen_rtx (MEM,
1936#if 0
1937 MEM_IN_STRUCT_P (src_mem) ? mode[next] : BLKmode,
1938#else
1939 mode[next],
1940#endif
1941 gen_rtx (PLUS, Pmode, src,
1942 gen_rtx (CONST_INT, SImode, offset_ld)));
1943
1944 RTX_UNCHANGING_P (srcp) = RTX_UNCHANGING_P (src_mem);
1945 MEM_VOLATILE_P (srcp) = MEM_VOLATILE_P (src_mem);
1946 MEM_IN_STRUCT_P (srcp) = 1;
1947 emit_insn (gen_rtx (SET, VOIDmode, temp[next], srcp));
1948 offset_ld += amount[next];
1949 active[next] = TRUE;
1950 }
1951
1952 if (active[phase])
1953 {
1954 active[phase] = FALSE;
1955
1956 dstp = gen_rtx (MEM,
1957#if 0
1958 MEM_IN_STRUCT_P (dst_mem) ? mode[phase] : BLKmode,
1959#else
1960 mode[phase],
1961#endif
1962 gen_rtx (PLUS, Pmode, dest,
1963 gen_rtx (CONST_INT, SImode, offset_st)));
1964
1965 RTX_UNCHANGING_P (dstp) = RTX_UNCHANGING_P (dst_mem);
1966 MEM_VOLATILE_P (dstp) = MEM_VOLATILE_P (dst_mem);
1967 MEM_IN_STRUCT_P (dstp) = 1;
1968 emit_insn (gen_rtx (SET, VOIDmode, dstp, temp[phase]));
1969 offset_st += amount[phase];
1970 }
1971 }
1972 while (active[next]);
1973}
1974
1975void
1976mcore_expand_block_move (dst_mem, src_mem, operands)
1977 rtx dst_mem;
1978 rtx src_mem;
1979 rtx * operands;
1980{
1981 int align = INTVAL (operands[3]);
1982 int bytes;
1983
1984 if (GET_CODE (operands[2]) == CONST_INT)
1985 {
1986 bytes = INTVAL (operands[2]);
1987
1988 if (bytes <= 0)
1989 return;
1990 if (align > 4)
1991 align = 4;
1992
1993 /* RBE: bumped 1 and 2 byte align from 1 and 2 to 4 and 8 bytes before
4816b8e4 1994 we give up and go to memcpy. */
8f90be4c
NC
1995 if ((align == 4 && (bytes <= 4*4
1996 || ((bytes & 01) == 0 && bytes <= 8*4)
1997 || ((bytes & 03) == 0 && bytes <= 16*4)))
1998 || (align == 2 && bytes <= 4*2)
1999 || (align == 1 && bytes <= 4*1))
2000 {
2001 block_move_sequence (operands[0], dst_mem, operands[1], src_mem,
2002 bytes, align, 0);
2003 return;
2004 }
2005 }
2006
2007 /* If we get here, just use the library routine. */
2008 emit_library_call (gen_rtx (SYMBOL_REF, Pmode, "memcpy"), 0, VOIDmode, 3,
2009 operands[0], Pmode, operands[1], Pmode, operands[2],
2010 SImode);
2011}
2012\f
2013
2014/* Code to generate prologue and epilogue sequences. */
2015static int number_of_regs_before_varargs;
4816b8e4 2016
8f90be4c
NC
2017/* Set by SETUP_INCOMING_VARARGS to indicate to prolog that this is
2018 for a varargs function. */
2019static int current_function_anonymous_args;
2020
8f90be4c
NC
2021#define STACK_BYTES (STACK_BOUNDARY/BITS_PER_UNIT)
2022#define STORE_REACH (64) /* Maximum displace of word store + 4. */
4816b8e4 2023#define ADDI_REACH (32) /* Maximum addi operand. */
8f90be4c 2024
8f90be4c
NC
2025static void
2026layout_mcore_frame (infp)
2027 struct mcore_frame * infp;
2028{
2029 int n;
2030 unsigned int i;
2031 int nbytes;
2032 int regarg;
2033 int localregarg;
2034 int localreg;
2035 int outbounds;
2036 unsigned int growths;
2037 int step;
2038
2039 /* Might have to spill bytes to re-assemble a big argument that
4816b8e4 2040 was passed partially in registers and partially on the stack. */
8f90be4c
NC
2041 nbytes = current_function_pretend_args_size;
2042
2043 /* Determine how much space for spilled anonymous args (e.g., stdarg). */
2044 if (current_function_anonymous_args)
2045 nbytes += (NPARM_REGS - number_of_regs_before_varargs) * UNITS_PER_WORD;
2046
2047 infp->arg_size = nbytes;
2048
2049 /* How much space to save non-volatile registers we stomp. */
2050 infp->reg_mask = calc_live_regs (& n);
2051 infp->reg_size = n * 4;
2052
2053 /* And the rest of it... locals and space for overflowed outbounds. */
2054 infp->local_size = get_frame_size ();
2055 infp->outbound_size = current_function_outgoing_args_size;
2056
2057 /* Make sure we have a whole number of words for the locals. */
2058 if (infp->local_size % STACK_BYTES)
2059 infp->local_size = (infp->local_size + STACK_BYTES - 1) & ~ (STACK_BYTES -1);
2060
2061 /* Only thing we know we have to pad is the outbound space, since
2062 we've aligned our locals assuming that base of locals is aligned. */
2063 infp->pad_local = 0;
2064 infp->pad_reg = 0;
2065 infp->pad_outbound = 0;
2066 if (infp->outbound_size % STACK_BYTES)
2067 infp->pad_outbound = STACK_BYTES - (infp->outbound_size % STACK_BYTES);
2068
2069 /* Now we see how we want to stage the prologue so that it does
2070 the most appropriate stack growth and register saves to either:
2071 (1) run fast,
2072 (2) reduce instruction space, or
2073 (3) reduce stack space. */
b6a1cbae 2074 for (i = 0; i < ARRAY_SIZE (infp->growth); i++)
8f90be4c
NC
2075 infp->growth[i] = 0;
2076
2077 regarg = infp->reg_size + infp->arg_size;
2078 localregarg = infp->local_size + regarg;
2079 localreg = infp->local_size + infp->reg_size;
2080 outbounds = infp->outbound_size + infp->pad_outbound;
2081 growths = 0;
2082
2083 /* XXX: Consider one where we consider localregarg + outbound too! */
2084
2085 /* Frame of <= 32 bytes and using stm would get <= 2 registers.
2086 use stw's with offsets and buy the frame in one shot. */
2087 if (localregarg <= ADDI_REACH
2088 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
2089 {
2090 /* Make sure we'll be aligned. */
2091 if (localregarg % STACK_BYTES)
2092 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
2093
2094 step = localregarg + infp->pad_reg;
2095 infp->reg_offset = infp->local_size;
2096
2097 if (outbounds + step <= ADDI_REACH && !frame_pointer_needed)
2098 {
2099 step += outbounds;
2100 infp->reg_offset += outbounds;
2101 outbounds = 0;
2102 }
2103
2104 infp->arg_offset = step - 4;
2105 infp->growth[growths++] = step;
2106 infp->reg_growth = growths;
2107 infp->local_growth = growths;
2108
4816b8e4 2109 /* If we haven't already folded it in. */
8f90be4c
NC
2110 if (outbounds)
2111 infp->growth[growths++] = outbounds;
2112
2113 goto finish;
2114 }
2115
2116 /* Frame can't be done with a single subi, but can be done with 2
2117 insns. If the 'stm' is getting <= 2 registers, we use stw's and
2118 shift some of the stack purchase into the first subi, so both are
2119 single instructions. */
2120 if (localregarg <= STORE_REACH
2121 && (infp->local_size > ADDI_REACH)
2122 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
2123 {
2124 int all;
2125
2126 /* Make sure we'll be aligned; use either pad_reg or pad_local. */
2127 if (localregarg % STACK_BYTES)
2128 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
2129
2130 all = localregarg + infp->pad_reg + infp->pad_local;
2131 step = ADDI_REACH; /* As much up front as we can. */
2132 if (step > all)
2133 step = all;
2134
2135 /* XXX: Consider whether step will still be aligned; we believe so. */
2136 infp->arg_offset = step - 4;
2137 infp->growth[growths++] = step;
2138 infp->reg_growth = growths;
2139 infp->reg_offset = step - infp->pad_reg - infp->reg_size;
2140 all -= step;
2141
4816b8e4 2142 /* Can we fold in any space required for outbounds? */
8f90be4c
NC
2143 if (outbounds + all <= ADDI_REACH && !frame_pointer_needed)
2144 {
2145 all += outbounds;
2146 outbounds = 0;
2147 }
2148
4816b8e4 2149 /* Get the rest of the locals in place. */
8f90be4c
NC
2150 step = all;
2151 infp->growth[growths++] = step;
2152 infp->local_growth = growths;
2153 all -= step;
2154
2155 assert (all == 0);
2156
4816b8e4 2157 /* Finish off if we need to do so. */
8f90be4c
NC
2158 if (outbounds)
2159 infp->growth[growths++] = outbounds;
2160
2161 goto finish;
2162 }
2163
2164 /* Registers + args is nicely aligned, so we'll buy that in one shot.
2165 Then we buy the rest of the frame in 1 or 2 steps depending on
2166 whether we need a frame pointer. */
2167 if ((regarg % STACK_BYTES) == 0)
2168 {
2169 infp->growth[growths++] = regarg;
2170 infp->reg_growth = growths;
2171 infp->arg_offset = regarg - 4;
2172 infp->reg_offset = 0;
2173
2174 if (infp->local_size % STACK_BYTES)
2175 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
2176
2177 step = infp->local_size + infp->pad_local;
2178
2179 if (!frame_pointer_needed)
2180 {
2181 step += outbounds;
2182 outbounds = 0;
2183 }
2184
2185 infp->growth[growths++] = step;
2186 infp->local_growth = growths;
2187
4816b8e4 2188 /* If there's any left to be done. */
8f90be4c
NC
2189 if (outbounds)
2190 infp->growth[growths++] = outbounds;
2191
2192 goto finish;
2193 }
2194
2195 /* XXX: optimizations that we'll want to play with....
4816b8e4
NC
2196 -- regarg is not aligned, but it's a small number of registers;
2197 use some of localsize so that regarg is aligned and then
2198 save the registers. */
8f90be4c
NC
2199
2200 /* Simple encoding; plods down the stack buying the pieces as it goes.
4816b8e4
NC
2201 -- does not optimize space consumption.
2202 -- does not attempt to optimize instruction counts.
2203 -- but it is safe for all alignments. */
8f90be4c
NC
2204 if (regarg % STACK_BYTES != 0)
2205 infp->pad_reg = STACK_BYTES - (regarg % STACK_BYTES);
2206
2207 infp->growth[growths++] = infp->arg_size + infp->reg_size + infp->pad_reg;
2208 infp->reg_growth = growths;
2209 infp->arg_offset = infp->growth[0] - 4;
2210 infp->reg_offset = 0;
2211
2212 if (frame_pointer_needed)
2213 {
2214 if (infp->local_size % STACK_BYTES != 0)
2215 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
2216
2217 infp->growth[growths++] = infp->local_size + infp->pad_local;
2218 infp->local_growth = growths;
2219
2220 infp->growth[growths++] = outbounds;
2221 }
2222 else
2223 {
2224 if ((infp->local_size + outbounds) % STACK_BYTES != 0)
2225 infp->pad_local = STACK_BYTES - ((infp->local_size + outbounds) % STACK_BYTES);
2226
2227 infp->growth[growths++] = infp->local_size + infp->pad_local + outbounds;
2228 infp->local_growth = growths;
2229 }
2230
f27cd94d 2231 /* Anything else that we've forgotten?, plus a few consistency checks. */
8f90be4c
NC
2232 finish:
2233 assert (infp->reg_offset >= 0);
2234 assert (growths <= MAX_STACK_GROWS);
2235
2236 for (i = 0; i < growths; i++)
2237 {
2238 if (infp->growth[i] % STACK_BYTES)
2239 {
2240 fprintf (stderr,"stack growth of %d is not %d aligned\n",
2241 infp->growth[i], STACK_BYTES);
2242 abort ();
2243 }
2244 }
2245}
2246
2247/* Define the offset between two registers, one to be eliminated, and
2248 the other its replacement, at the start of a routine. */
4816b8e4 2249
8f90be4c
NC
2250int
2251mcore_initial_elimination_offset (from, to)
2252 int from;
2253 int to;
2254{
2255 int above_frame;
2256 int below_frame;
2257 struct mcore_frame fi;
2258
2259 layout_mcore_frame (& fi);
2260
2261 /* fp to ap */
2262 above_frame = fi.local_size + fi.pad_local + fi.reg_size + fi.pad_reg;
2263 /* sp to fp */
2264 below_frame = fi.outbound_size + fi.pad_outbound;
2265
2266 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
2267 return above_frame;
2268
2269 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
2270 return above_frame + below_frame;
2271
2272 if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
2273 return below_frame;
2274
2275 abort ();
2276
2277 return 0;
2278}
2279
4816b8e4
NC
2280/* Keep track of some information about varargs for the prolog. */
2281
8f90be4c
NC
2282void
2283mcore_setup_incoming_varargs (args_so_far, mode, type, ptr_pretend_size)
2284 CUMULATIVE_ARGS args_so_far;
2285 enum machine_mode mode;
2286 tree type;
2287 int * ptr_pretend_size ATTRIBUTE_UNUSED;
2288{
2289 current_function_anonymous_args = 1;
2290
2291 /* We need to know how many argument registers are used before
2292 the varargs start, so that we can push the remaining argument
2293 registers during the prologue. */
2294 number_of_regs_before_varargs = args_so_far + mcore_num_arg_regs (mode, type);
2295
2296 /* There is a bug somwehere in the arg handling code.
2297 Until I can find it this workaround always pushes the
2298 last named argument onto the stack. */
2299 number_of_regs_before_varargs = args_so_far;
2300
2301 /* The last named argument may be split between argument registers
2302 and the stack. Allow for this here. */
2303 if (number_of_regs_before_varargs > NPARM_REGS)
2304 number_of_regs_before_varargs = NPARM_REGS;
2305}
2306
2307void
2308mcore_expand_prolog ()
2309{
2310 struct mcore_frame fi;
2311 int space_allocated = 0;
2312 int growth = 0;
2313
2314 /* Find out what we're doing. */
2315 layout_mcore_frame (&fi);
2316
2317 space_allocated = fi.arg_size + fi.reg_size + fi.local_size +
2318 fi.outbound_size + fi.pad_outbound + fi.pad_local + fi.pad_reg;
2319
2320 if (TARGET_CG_DATA)
2321 {
2322 /* Emit a symbol for this routine's frame size. */
2323 rtx x;
2324 int len;
2325
2326 x = DECL_RTL (current_function_decl);
2327
2328 if (GET_CODE (x) != MEM)
2329 abort ();
2330
2331 x = XEXP (x, 0);
2332
2333 if (GET_CODE (x) != SYMBOL_REF)
2334 abort ();
2335
2336 if (mcore_current_function_name)
2337 free (mcore_current_function_name);
2338
2339 len = strlen (XSTR (x, 0)) + 1;
dd3b81b4 2340 mcore_current_function_name = (char *) xmalloc (len);
8f90be4c
NC
2341
2342 memcpy (mcore_current_function_name, XSTR (x, 0), len);
2343
2344 ASM_OUTPUT_CG_NODE (asm_out_file, mcore_current_function_name, space_allocated);
2345
2346 if (current_function_calls_alloca)
2347 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, "alloca", 1);
2348
2349 /* 970425: RBE:
2350 We're looking at how the 8byte alignment affects stack layout
2351 and where we had to pad things. This emits information we can
2352 extract which tells us about frame sizes and the like. */
2353 fprintf (asm_out_file,
2354 "\t.equ\t__$frame$info$_%s_$_%d_%d_x%x_%d_%d_%d,0\n",
2355 mcore_current_function_name,
2356 fi.arg_size, fi.reg_size, fi.reg_mask,
2357 fi.local_size, fi.outbound_size,
2358 frame_pointer_needed);
2359 }
2360
2361 if (mcore_naked_function_p ())
2362 return;
2363
2364 /* Handle stdarg+regsaves in one shot: can't be more than 64 bytes. */
2365 output_stack_adjust (-1, fi.growth[growth++]); /* grows it */
2366
2367 /* If we have a parameter passed partially in regs and partially in memory,
2368 the registers will have been stored to memory already in function.c. So
2369 we only need to do something here for varargs functions. */
2370 if (fi.arg_size != 0 && current_function_pretend_args_size == 0)
2371 {
2372 int offset;
2373 int rn = FIRST_PARM_REG + NPARM_REGS - 1;
2374 int remaining = fi.arg_size;
2375
2376 for (offset = fi.arg_offset; remaining >= 4; offset -= 4, rn--, remaining -= 4)
2377 {
2378 emit_insn (gen_movsi
2379 (gen_rtx (MEM, SImode,
2380 plus_constant (stack_pointer_rtx, offset)),
2381 gen_rtx (REG, SImode, rn)));
2382 }
2383 }
2384
4816b8e4 2385 /* Do we need another stack adjustment before we do the register saves? */
8f90be4c
NC
2386 if (growth < fi.reg_growth)
2387 output_stack_adjust (-1, fi.growth[growth++]); /* grows it */
2388
2389 if (fi.reg_size != 0)
2390 {
2391 int i;
2392 int offs = fi.reg_offset;
2393
2394 for (i = 15; i >= 0; i--)
2395 {
2396 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2397 {
2398 int first_reg = 15;
2399
2400 while (fi.reg_mask & (1 << first_reg))
2401 first_reg--;
2402 first_reg++;
2403
2404 emit_insn (gen_store_multiple (gen_rtx (MEM, SImode, stack_pointer_rtx),
2405 gen_rtx (REG, SImode, first_reg),
2406 GEN_INT (16 - first_reg)));
2407
2408 i -= (15 - first_reg);
2409 offs += (16 - first_reg) * 4;
2410 }
2411 else if (fi.reg_mask & (1 << i))
2412 {
2413 emit_insn (gen_movsi
2414 (gen_rtx (MEM, SImode,
2415 plus_constant (stack_pointer_rtx, offs)),
2416 gen_rtx (REG, SImode, i)));
2417 offs += 4;
2418 }
2419 }
2420 }
2421
2422 /* Figure the locals + outbounds. */
2423 if (frame_pointer_needed)
2424 {
2425 /* If we haven't already purchased to 'fp'. */
2426 if (growth < fi.local_growth)
2427 output_stack_adjust (-1, fi.growth[growth++]); /* grows it */
2428
2429 emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
2430
4816b8e4 2431 /* ... and then go any remaining distance for outbounds, etc. */
8f90be4c
NC
2432 if (fi.growth[growth])
2433 output_stack_adjust (-1, fi.growth[growth++]);
2434 }
2435 else
2436 {
2437 if (growth < fi.local_growth)
2438 output_stack_adjust (-1, fi.growth[growth++]); /* grows it */
2439 if (fi.growth[growth])
2440 output_stack_adjust (-1, fi.growth[growth++]);
2441 }
2442}
2443
2444void
2445mcore_expand_epilog ()
2446{
2447 struct mcore_frame fi;
2448 int i;
2449 int offs;
2450 int growth = MAX_STACK_GROWS - 1 ;
2451
f27cd94d 2452
8f90be4c
NC
2453 /* Find out what we're doing. */
2454 layout_mcore_frame(&fi);
2455
2456 if (mcore_naked_function_p ())
2457 return;
f27cd94d 2458
8f90be4c
NC
2459 /* If we had a frame pointer, restore the sp from that. */
2460 if (frame_pointer_needed)
2461 {
2462 emit_insn (gen_movsi (stack_pointer_rtx, frame_pointer_rtx));
2463 growth = fi.local_growth - 1;
2464 }
2465 else
2466 {
2467 /* XXX: while loop should accumulate and do a single sell. */
2468 while (growth >= fi.local_growth)
2469 {
2470 if (fi.growth[growth] != 0)
2471 output_stack_adjust (1, fi.growth[growth]);
2472 growth--;
2473 }
2474 }
2475
2476 /* Make sure we've shrunk stack back to the point where the registers
2477 were laid down. This is typically 0/1 iterations. Then pull the
4816b8e4 2478 register save information back off the stack. */
8f90be4c
NC
2479 while (growth >= fi.reg_growth)
2480 output_stack_adjust ( 1, fi.growth[growth--]);
2481
2482 offs = fi.reg_offset;
2483
2484 for (i = 15; i >= 0; i--)
2485 {
2486 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2487 {
2488 int first_reg;
2489
2490 /* Find the starting register. */
2491 first_reg = 15;
2492
2493 while (fi.reg_mask & (1 << first_reg))
2494 first_reg--;
2495
2496 first_reg++;
2497
2498 emit_insn (gen_load_multiple (gen_rtx (REG, SImode, first_reg),
2499 gen_rtx (MEM, SImode, stack_pointer_rtx),
2500 GEN_INT (16 - first_reg)));
2501
2502 i -= (15 - first_reg);
2503 offs += (16 - first_reg) * 4;
2504 }
2505 else if (fi.reg_mask & (1 << i))
2506 {
2507 emit_insn (gen_movsi
2508 (gen_rtx (REG, SImode, i),
2509 gen_rtx (MEM, SImode,
2510 plus_constant (stack_pointer_rtx, offs))));
2511 offs += 4;
2512 }
2513 }
2514
2515 /* Give back anything else. */
4816b8e4 2516 /* XXX: Should accumuate total and then give it back. */
8f90be4c
NC
2517 while (growth >= 0)
2518 output_stack_adjust ( 1, fi.growth[growth--]);
2519}
2520\f
2521/* This code is borrowed from the SH port. */
2522
2523/* The MCORE cannot load a large constant into a register, constants have to
2524 come from a pc relative load. The reference of a pc relative load
2525 instruction must be less than 1k infront of the instruction. This
2526 means that we often have to dump a constant inside a function, and
2527 generate code to branch around it.
2528
2529 It is important to minimize this, since the branches will slow things
2530 down and make things bigger.
2531
2532 Worst case code looks like:
2533
2534 lrw L1,r0
2535 br L2
2536 align
2537 L1: .long value
2538 L2:
2539 ..
2540
2541 lrw L3,r0
2542 br L4
2543 align
2544 L3: .long value
2545 L4:
2546 ..
2547
2548 We fix this by performing a scan before scheduling, which notices which
2549 instructions need to have their operands fetched from the constant table
2550 and builds the table.
2551
2552 The algorithm is:
2553
2554 scan, find an instruction which needs a pcrel move. Look forward, find the
2555 last barrier which is within MAX_COUNT bytes of the requirement.
2556 If there isn't one, make one. Process all the instructions between
2557 the find and the barrier.
2558
2559 In the above example, we can tell that L3 is within 1k of L1, so
2560 the first move can be shrunk from the 2 insn+constant sequence into
2561 just 1 insn, and the constant moved to L3 to make:
2562
2563 lrw L1,r0
2564 ..
2565 lrw L3,r0
2566 bra L4
2567 align
2568 L3:.long value
2569 L4:.long value
2570
2571 Then the second move becomes the target for the shortening process. */
2572
2573typedef struct
2574{
2575 rtx value; /* Value in table. */
2576 rtx label; /* Label of value. */
2577} pool_node;
2578
2579/* The maximum number of constants that can fit into one pool, since
2580 the pc relative range is 0...1020 bytes and constants are at least 4
2581 bytes long. We subtact 4 from the range to allow for the case where
2582 we need to add a branch/align before the constant pool. */
2583
2584#define MAX_COUNT 1016
2585#define MAX_POOL_SIZE (MAX_COUNT/4)
2586static pool_node pool_vector[MAX_POOL_SIZE];
2587static int pool_size;
2588
2589/* Dump out any constants accumulated in the final pass. These
2590 will only be labels. */
4816b8e4 2591
f27cd94d 2592const char *
8f90be4c
NC
2593mcore_output_jump_label_table ()
2594{
2595 int i;
2596
2597 if (pool_size)
2598 {
2599 fprintf (asm_out_file, "\t.align 2\n");
2600
2601 for (i = 0; i < pool_size; i++)
2602 {
2603 pool_node * p = pool_vector + i;
2604
2605 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (p->label));
2606
2607 output_asm_insn (".long %0", &p->value);
2608 }
2609
2610 pool_size = 0;
2611 }
2612
2613 return "";
2614}
2615
2616#if 0 /* XXX temporarily suppressed until I have time to look at what this code does. */
2617
2618/* We need these below. They use information stored in tables to figure out
2619 what values are in what registers, etc. This is okay, since these tables
2620 are valid at the time mcore_dependent_simplify_rtx() is invoked. Don't
4816b8e4 2621 use them anywhere else. BRC */
8f90be4c
NC
2622
2623extern unsigned HOST_WIDE_INT nonzero_bits PARAMS ((rtx, enum machine_mode));
2624extern int num_sign_bit_copies PARAMS ((Rtx, enum machine_mode));
2625
2626/* Do machine dependent simplifications: see simplify_rtx() in combine.c.
2627 GENERAL_SIMPLIFY controls whether general machine independent
2628 simplifications should be tried after machine dependent ones. Thus,
2629 we can filter out certain simplifications and keep the simplify_rtx()
2630 from changing things that we just simplified in a machine dependent
4816b8e4 2631 fashion. This is experimental. BRC */
8f90be4c
NC
2632rtx
2633mcore_dependent_simplify_rtx (x, int_op0_mode, last, in_dest, general_simplify)
2634 rtx x;
2635 int int_op0_mode;
2636 int last;
2637 int in_dest;
2638 int * general_simplify;
2639{
2640 enum machine_mode mode = GET_MODE (x);
2641 enum rtx_code code = GET_CODE (x);
2642
4816b8e4 2643 /* Always simplify unless explicitly asked not to. */
8f90be4c
NC
2644 * general_simplify = 1;
2645
2646 if (code == IF_THEN_ELSE)
2647 {
2648 int i;
2649 rtx cond = XEXP(x, 0);
d6edb99e
ZW
2650 rtx true_rtx = XEXP(x, 1);
2651 rtx false_rtx = XEXP(x, 2);
8f90be4c
NC
2652 enum rtx_code true_code = GET_CODE (cond);
2653
2654 /* On the mcore, when doing -mcmov-one, we don't want to simplify:
2655
2656 (if_then_else (ne A 0) C1 0)
2657
2658 if it would be turned into a shift by simplify_if_then_else().
2659 instead, leave it alone so that it will collapse into a conditional
2660 move. besides, at least for the mcore, doing this simplification does
4816b8e4 2661 not typically help. see combine.c, line 4217. BRC */
8f90be4c
NC
2662
2663 if (true_code == NE && XEXP (cond, 1) == const0_rtx
d6edb99e 2664 && false_rtx == const0_rtx && GET_CODE (true_rtx) == CONST_INT
8f90be4c 2665 && ((1 == nonzero_bits (XEXP (cond, 0), mode)
d6edb99e 2666 && (i = exact_log2 (INTVAL (true_rtx))) >= 0)
8f90be4c
NC
2667 || ((num_sign_bit_copies (XEXP (cond, 0), mode)
2668 == GET_MODE_BITSIZE (mode))
d6edb99e 2669 && (i = exact_log2 (- INTVAL (true_rtx))) >= 0)))
8f90be4c
NC
2670 {
2671 *general_simplify = 0;
2672 return x;
2673 }
2674 }
2675
2676 return x;
2677}
2678#endif
2679
8f90be4c 2680/* Check whether insn is a candidate for a conditional. */
4816b8e4 2681
8f90be4c
NC
2682static cond_type
2683is_cond_candidate (insn)
2684 rtx insn;
2685{
2686 /* The only things we conditionalize are those that can be directly
2687 changed into a conditional. Only bother with SImode items. If
2688 we wanted to be a little more aggressive, we could also do other
4816b8e4 2689 modes such as DImode with reg-reg move or load 0. */
8f90be4c
NC
2690 if (GET_CODE (insn) == INSN)
2691 {
2692 rtx pat = PATTERN (insn);
2693 rtx src, dst;
2694
2695 if (GET_CODE (pat) != SET)
2696 return COND_NO;
2697
2698 dst = XEXP (pat, 0);
2699
2700 if ((GET_CODE (dst) != REG &&
2701 GET_CODE (dst) != SUBREG) ||
2702 GET_MODE (dst) != SImode)
2703 return COND_NO;
2704
2705 src = XEXP (pat, 1);
2706
2707 if ((GET_CODE (src) == REG ||
2708 (GET_CODE (src) == SUBREG &&
2709 GET_CODE (SUBREG_REG (src)) == REG)) &&
2710 GET_MODE (src) == SImode)
2711 return COND_MOV_INSN;
2712 else if (GET_CODE (src) == CONST_INT &&
2713 INTVAL (src) == 0)
2714 return COND_CLR_INSN;
2715 else if (GET_CODE (src) == PLUS &&
2716 (GET_CODE (XEXP (src, 0)) == REG ||
2717 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2718 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2719 GET_MODE (XEXP (src, 0)) == SImode &&
2720 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2721 INTVAL (XEXP (src, 1)) == 1)
2722 return COND_INC_INSN;
2723 else if (((GET_CODE (src) == MINUS &&
2724 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2725 INTVAL( XEXP (src, 1)) == 1) ||
2726 (GET_CODE (src) == PLUS &&
2727 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2728 INTVAL (XEXP (src, 1)) == -1)) &&
2729 (GET_CODE (XEXP (src, 0)) == REG ||
2730 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2731 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2732 GET_MODE (XEXP (src, 0)) == SImode)
2733 return COND_DEC_INSN;
2734
2735 /* some insns that we don't bother with:
2736 (set (rx:DI) (ry:DI))
2737 (set (rx:DI) (const_int 0))
2738 */
2739
2740 }
2741 else if (GET_CODE (insn) == JUMP_INSN &&
2742 GET_CODE (PATTERN (insn)) == SET &&
2743 GET_CODE (XEXP (PATTERN (insn), 1)) == LABEL_REF)
2744 return COND_BRANCH_INSN;
2745
2746 return COND_NO;
2747}
2748
2749/* Emit a conditional version of insn and replace the old insn with the
2750 new one. Return the new insn if emitted. */
4816b8e4 2751
8f90be4c
NC
2752static rtx
2753emit_new_cond_insn (insn, cond)
2754 rtx insn;
2755 int cond;
2756{
2757 rtx c_insn = 0;
2758 rtx pat, dst, src;
2759 cond_type num;
2760
2761 if ((num = is_cond_candidate (insn)) == COND_NO)
2762 return NULL;
2763
2764 pat = PATTERN (insn);
2765
2766 if (GET_CODE (insn) == INSN)
2767 {
2768 dst = SET_DEST (pat);
2769 src = SET_SRC (pat);
2770 }
2771 else
2772 dst = JUMP_LABEL (insn);
2773
2774 switch (num)
2775 {
2776 case COND_MOV_INSN:
2777 case COND_CLR_INSN:
2778 if (cond)
2779 c_insn = gen_movt0 (dst, src, dst);
2780 else
2781 c_insn = gen_movt0 (dst, dst, src);
2782 break;
2783
2784 case COND_INC_INSN:
2785 if (cond)
2786 c_insn = gen_incscc (dst, dst);
2787 else
2788 c_insn = gen_incscc_false (dst, dst);
2789 break;
2790
2791 case COND_DEC_INSN:
2792 if (cond)
2793 c_insn = gen_decscc (dst, dst);
2794 else
2795 c_insn = gen_decscc_false (dst, dst);
2796 break;
2797
2798 case COND_BRANCH_INSN:
2799 if (cond)
2800 c_insn = gen_branch_true (dst);
2801 else
2802 c_insn = gen_branch_false (dst);
2803 break;
2804
2805 default:
2806 return NULL;
2807 }
2808
2809 /* Only copy the notes if they exist. */
2810 if (rtx_length [GET_CODE (c_insn)] >= 7 && rtx_length [GET_CODE (insn)] >= 7)
2811 {
2812 /* We really don't need to bother with the notes and links at this
2813 point, but go ahead and save the notes. This will help is_dead()
2814 when applying peepholes (links don't matter since they are not
2815 used any more beyond this point for the mcore). */
2816 REG_NOTES (c_insn) = REG_NOTES (insn);
2817 }
2818
2819 if (num == COND_BRANCH_INSN)
2820 {
2821 /* For jumps, we need to be a little bit careful and emit the new jump
2822 before the old one and to update the use count for the target label.
2823 This way, the barrier following the old (uncond) jump will get
2824 deleted, but the label won't. */
2825 c_insn = emit_jump_insn_before (c_insn, insn);
2826
2827 ++ LABEL_NUSES (dst);
2828
2829 JUMP_LABEL (c_insn) = dst;
2830 }
2831 else
2832 c_insn = emit_insn_after (c_insn, insn);
2833
2834 delete_insn (insn);
2835
2836 return c_insn;
2837}
2838
2839/* Attempt to change a basic block into a series of conditional insns. This
2840 works by taking the branch at the end of the 1st block and scanning for the
2841 end of the 2nd block. If all instructions in the 2nd block have cond.
2842 versions and the label at the start of block 3 is the same as the target
2843 from the branch at block 1, then conditionalize all insn in block 2 using
2844 the inverse condition of the branch at block 1. (Note I'm bending the
2845 definition of basic block here.)
2846
2847 e.g., change:
2848
2849 bt L2 <-- end of block 1 (delete)
2850 mov r7,r8
2851 addu r7,1
2852 br L3 <-- end of block 2
2853
2854 L2: ... <-- start of block 3 (NUSES==1)
2855 L3: ...
2856
2857 to:
2858
2859 movf r7,r8
2860 incf r7
2861 bf L3
2862
2863 L3: ...
2864
2865 we can delete the L2 label if NUSES==1 and re-apply the optimization
2866 starting at the last instruction of block 2. This may allow an entire
4816b8e4 2867 if-then-else statement to be conditionalized. BRC */
8f90be4c
NC
2868static rtx
2869conditionalize_block (first)
2870 rtx first;
2871{
2872 rtx insn;
2873 rtx br_pat;
2874 rtx end_blk_1_br = 0;
2875 rtx end_blk_2_insn = 0;
2876 rtx start_blk_3_lab = 0;
2877 int cond;
2878 int br_lab_num;
2879 int blk_size = 0;
2880
2881
2882 /* Check that the first insn is a candidate conditional jump. This is
2883 the one that we'll eliminate. If not, advance to the next insn to
2884 try. */
2885 if (GET_CODE (first) != JUMP_INSN ||
2886 GET_CODE (PATTERN (first)) != SET ||
2887 GET_CODE (XEXP (PATTERN (first), 1)) != IF_THEN_ELSE)
2888 return NEXT_INSN (first);
2889
2890 /* Extract some information we need. */
2891 end_blk_1_br = first;
2892 br_pat = PATTERN (end_blk_1_br);
2893
2894 /* Complement the condition since we use the reverse cond. for the insns. */
2895 cond = (GET_CODE (XEXP (XEXP (br_pat, 1), 0)) == EQ);
2896
2897 /* Determine what kind of branch we have. */
2898 if (GET_CODE (XEXP (XEXP (br_pat, 1), 1)) == LABEL_REF)
2899 {
2900 /* A normal branch, so extract label out of first arm. */
2901 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 1), 0));
2902 }
2903 else
2904 {
2905 /* An inverse branch, so extract the label out of the 2nd arm
2906 and complement the condition. */
2907 cond = (cond == 0);
2908 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 2), 0));
2909 }
2910
2911 /* Scan forward for the start of block 2: it must start with a
2912 label and that label must be the same as the branch target
2913 label from block 1. We don't care about whether block 2 actually
2914 ends with a branch or a label (an uncond. branch is
2915 conditionalizable). */
2916 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
2917 {
2918 enum rtx_code code;
2919
2920 code = GET_CODE (insn);
2921
2922 /* Look for the label at the start of block 3. */
2923 if (code == CODE_LABEL && CODE_LABEL_NUMBER (insn) == br_lab_num)
2924 break;
2925
2926 /* Skip barriers, notes, and conditionalizable insns. If the
2927 insn is not conditionalizable or makes this optimization fail,
2928 just return the next insn so we can start over from that point. */
2929 if (code != BARRIER && code != NOTE && !is_cond_candidate (insn))
2930 return NEXT_INSN (insn);
2931
2932 /* Remember the last real insn before the label (ie end of block 2). */
2933 if (code == JUMP_INSN || code == INSN)
2934 {
2935 blk_size ++;
2936 end_blk_2_insn = insn;
2937 }
2938 }
2939
2940 if (!insn)
2941 return insn;
2942
2943 /* It is possible for this optimization to slow performance if the blocks
2944 are long. This really depends upon whether the branch is likely taken
2945 or not. If the branch is taken, we slow performance in many cases. But,
2946 if the branch is not taken, we always help performance (for a single
2947 block, but for a double block (i.e. when the optimization is re-applied)
2948 this is not true since the 'right thing' depends on the overall length of
2949 the collapsed block). As a compromise, don't apply this optimization on
2950 blocks larger than size 2 (unlikely for the mcore) when speed is important.
2951 the best threshold depends on the latencies of the instructions (i.e.,
2952 the branch penalty). */
2953 if (optimize > 1 && blk_size > 2)
2954 return insn;
2955
2956 /* At this point, we've found the start of block 3 and we know that
2957 it is the destination of the branch from block 1. Also, all
2958 instructions in the block 2 are conditionalizable. So, apply the
2959 conditionalization and delete the branch. */
2960 start_blk_3_lab = insn;
2961
2962 for (insn = NEXT_INSN (end_blk_1_br); insn != start_blk_3_lab;
2963 insn = NEXT_INSN (insn))
2964 {
2965 rtx newinsn;
2966
2967 if (INSN_DELETED_P (insn))
2968 continue;
2969
2970 /* Try to form a conditional variant of the instruction and emit it. */
2971 if ((newinsn = emit_new_cond_insn (insn, cond)))
2972 {
2973 if (end_blk_2_insn == insn)
2974 end_blk_2_insn = newinsn;
2975
2976 insn = newinsn;
2977 }
2978 }
2979
2980 /* Note whether we will delete the label starting blk 3 when the jump
2981 gets deleted. If so, we want to re-apply this optimization at the
2982 last real instruction right before the label. */
2983 if (LABEL_NUSES (start_blk_3_lab) == 1)
2984 {
2985 start_blk_3_lab = 0;
2986 }
2987
2988 /* ??? we probably should redistribute the death notes for this insn, esp.
2989 the death of cc, but it doesn't really matter this late in the game.
2990 The peepholes all use is_dead() which will find the correct death
2991 regardless of whether there is a note. */
2992 delete_insn (end_blk_1_br);
2993
2994 if (! start_blk_3_lab)
2995 return end_blk_2_insn;
2996
4816b8e4 2997 /* Return the insn right after the label at the start of block 3. */
8f90be4c
NC
2998 return NEXT_INSN (start_blk_3_lab);
2999}
3000
3001/* Apply the conditionalization of blocks optimization. This is the
3002 outer loop that traverses through the insns scanning for a branch
3003 that signifies an opportunity to apply the optimization. Note that
3004 this optimization is applied late. If we could apply it earlier,
3005 say before cse 2, it may expose more optimization opportunities.
3006 but, the pay back probably isn't really worth the effort (we'd have
3007 to update all reg/flow/notes/links/etc to make it work - and stick it
4816b8e4
NC
3008 in before cse 2). */
3009
8f90be4c
NC
3010static void
3011conditionalize_optimization (first)
3012 rtx first;
3013{
3014 rtx insn;
3015
3016 for (insn = first; insn; insn = conditionalize_block (insn))
3017 continue;
3018}
3019
3020static int saved_warn_return_type = -1;
3021static int saved_warn_return_type_count = 0;
3022
4816b8e4
NC
3023/* This function is called from toplev.c before reorg. */
3024
8f90be4c
NC
3025void
3026mcore_dependent_reorg (first)
3027 rtx first;
3028{
3029 /* Reset this variable. */
3030 current_function_anonymous_args = 0;
3031
4816b8e4 3032 /* Restore the warn_return_type if it has been altered. */
8f90be4c
NC
3033 if (saved_warn_return_type != -1)
3034 {
3035 /* Only restore the value if we have reached another function.
3036 The test of warn_return_type occurs in final_function () in
3037 c-decl.c a long time after the code for the function is generated,
3038 so we need a counter to tell us when we have finished parsing that
3039 function and can restore the flag. */
3040 if (--saved_warn_return_type_count == 0)
3041 {
3042 warn_return_type = saved_warn_return_type;
3043 saved_warn_return_type = -1;
3044 }
3045 }
3046
3047 if (optimize == 0)
3048 return;
3049
3050 /* Conditionalize blocks where we can. */
3051 conditionalize_optimization (first);
3052
3053 /* Literal pool generation is now pushed off until the assembler. */
3054}
3055
3056\f
3057/* Return the reg_class to use when reloading the rtx X into the class
3058 CLASS. */
3059
3060/* If the input is (PLUS REG CONSTANT) representing a stack slot address,
3061 then we want to restrict the class to LRW_REGS since that ensures that
3062 will be able to safely load the constant.
3063
3064 If the input is a constant that should be loaded with mvir1, then use
3065 ONLYR1_REGS.
3066
3067 ??? We don't handle the case where we have (PLUS REG CONSTANT) and
3068 the constant should be loaded with mvir1, because that can lead to cases
3069 where an instruction needs two ONLYR1_REGS reloads. */
3070enum reg_class
3071mcore_reload_class (x, class)
3072 rtx x;
3073 enum reg_class class;
3074{
3075 enum reg_class new_class;
3076
3077 if (class == GENERAL_REGS && CONSTANT_P (x)
3078 && (GET_CODE (x) != CONST_INT
3079 || ( ! CONST_OK_FOR_I (INTVAL (x))
3080 && ! CONST_OK_FOR_M (INTVAL (x))
3081 && ! CONST_OK_FOR_N (INTVAL (x)))))
3082 new_class = LRW_REGS;
3083 else
3084 new_class = class;
3085
3086 return new_class;
3087}
3088
3089/* Tell me if a pair of reg/subreg rtx's actually refer to the same
3090 register. Note that the current version doesn't worry about whether
3091 they are the same mode or note (e.g., a QImode in r2 matches an HImode
3092 in r2 matches an SImode in r2. Might think in the future about whether
3093 we want to be able to say something about modes. */
3094int
3095mcore_is_same_reg (x, y)
3096 rtx x;
3097 rtx y;
3098{
3099 /* Strip any and all of the subreg wrappers. */
3100 while (GET_CODE (x) == SUBREG)
3101 x = SUBREG_REG (x);
3102
3103 while (GET_CODE (y) == SUBREG)
3104 y = SUBREG_REG (y);
3105
3106 if (GET_CODE(x) == REG && GET_CODE(y) == REG && REGNO(x) == REGNO(y))
3107 return 1;
3108
3109 return 0;
3110}
3111
3112/* Called to register all of our global variables with the garbage
3113 collector. */
3114static void
3115mcore_add_gc_roots ()
3116{
3117 ggc_add_rtx_root (&arch_compare_op0, 1);
3118 ggc_add_rtx_root (&arch_compare_op1, 1);
3119}
3120
3121void
3122mcore_override_options ()
3123{
3124 if (mcore_stack_increment_string)
3125 {
3126 mcore_stack_increment = atoi (mcore_stack_increment_string);
3127
3128 if (mcore_stack_increment < 0
3129 || (mcore_stack_increment == 0
3130 && (mcore_stack_increment_string[0] != '0'
3131 || mcore_stack_increment_string[1] != 0)))
3132 error ("Invalid option `-mstack-increment=%s'",
3133 mcore_stack_increment_string);
3134 }
3135
3136 /* Only the m340 supports little endian code. */
3137 if (TARGET_LITTLE_END && ! TARGET_M340)
3138 target_flags |= M340_BIT;
3139
3140 mcore_add_gc_roots ();
3141}
3142\f
3143int
3144mcore_must_pass_on_stack (mode, type)
3145 enum machine_mode mode ATTRIBUTE_UNUSED;
3146 tree type;
3147{
3148 if (type == NULL)
3149 return 0;
3150
3151 /* If the argugment can have its address taken, it must
3152 be placed on the stack. */
3153 if (TREE_ADDRESSABLE (type))
3154 return 1;
3155
3156 return 0;
3157}
3158
3159/* Compute the number of word sized registers needed to
3160 hold a function argument of mode MODE and type TYPE. */
3161int
3162mcore_num_arg_regs (mode, type)
3163 enum machine_mode mode;
3164 tree type;
3165{
3166 int size;
3167
3168 if (MUST_PASS_IN_STACK (mode, type))
3169 return 0;
3170
3171 if (type && mode == BLKmode)
3172 size = int_size_in_bytes (type);
3173 else
3174 size = GET_MODE_SIZE (mode);
3175
3176 return ROUND_ADVANCE (size);
3177}
3178
3179static rtx
3180handle_structs_in_regs (mode, type, reg)
3181 enum machine_mode mode;
3182 tree type;
3183 int reg;
3184{
3185 int size;
3186
3187 /* The MCore ABI defines that a structure whoes size is not a whole multiple
3188 of bytes is passed packed into registers (or spilled onto the stack if
3189 not enough registers are available) with the last few bytes of the
3190 structure being packed, left-justified, into the last register/stack slot.
3191 GCC handles this correctly if the last word is in a stack slot, but we
3192 have to generate a special, PARALLEL RTX if the last word is in an
3193 argument register. */
3194 if (type
3195 && TYPE_MODE (type) == BLKmode
3196 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
3197 && (size = int_size_in_bytes (type)) > UNITS_PER_WORD
3198 && (size % UNITS_PER_WORD != 0)
3199 && (reg + mcore_num_arg_regs (mode, type) <= (FIRST_PARM_REG + NPARM_REGS)))
3200 {
3201 rtx arg_regs [NPARM_REGS];
3202 int nregs;
3203 rtx result;
3204 rtvec rtvec;
3205
3206 for (nregs = 0; size > 0; size -= UNITS_PER_WORD)
3207 {
3208 arg_regs [nregs] =
3209 gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, reg ++),
3210 GEN_INT (nregs * UNITS_PER_WORD));
3211 nregs ++;
3212 }
3213
3214 /* We assume here that NPARM_REGS == 6. The assert checks this. */
b6a1cbae 3215 assert (ARRAY_SIZE (arg_regs) == 6);
8f90be4c
NC
3216 rtvec = gen_rtvec (nregs, arg_regs[0], arg_regs[1], arg_regs[2],
3217 arg_regs[3], arg_regs[4], arg_regs[5]);
3218
3219 result = gen_rtx_PARALLEL (mode, rtvec);
3220 return result;
3221 }
3222
3223 return gen_rtx_REG (mode, reg);
3224}
3225
3226rtx
3227mcore_function_value (valtype, func)
3228 tree valtype;
3229 tree func ATTRIBUTE_UNUSED;
3230{
3231 enum machine_mode mode;
3232 int unsigned_p;
3233
3234 mode = TYPE_MODE (valtype);
3235
3236 PROMOTE_MODE (mode, unsigned_p, NULL);
3237
3238 return handle_structs_in_regs (mode, valtype, FIRST_RET_REG);
3239}
3240
3241/* Define where to put the arguments to a function.
3242 Value is zero to push the argument on the stack,
3243 or a hard register in which to store the argument.
3244
3245 MODE is the argument's machine mode.
3246 TYPE is the data type of the argument (as a tree).
3247 This is null for libcalls where that information may
3248 not be available.
3249 CUM is a variable of type CUMULATIVE_ARGS which gives info about
3250 the preceding args and about the function being called.
3251 NAMED is nonzero if this argument is a named parameter
3252 (otherwise it is an extra parameter matching an ellipsis).
3253
3254 On MCore the first args are normally in registers
3255 and the rest are pushed. Any arg that starts within the first
3256 NPARM_REGS words is at least partially passed in a register unless
3257 its data type forbids. */
3258rtx
3259mcore_function_arg (cum, mode, type, named)
3260 CUMULATIVE_ARGS cum;
3261 enum machine_mode mode;
3262 tree type;
3263 int named;
3264{
3265 int arg_reg;
3266
3267 if (! named)
3268 return 0;
3269
3270 if (MUST_PASS_IN_STACK (mode, type))
3271 return 0;
3272
3273 arg_reg = ROUND_REG (cum, mode);
3274
3275 if (arg_reg < NPARM_REGS)
3276 return handle_structs_in_regs (mode, type, FIRST_PARM_REG + arg_reg);
3277
3278 return 0;
3279}
3280
3281/* Implements the FUNCTION_ARG_PARTIAL_NREGS macro.
3282 Returns the number of argument registers required to hold *part* of
3283 a parameter of machine mode MODE and type TYPE (which may be NULL if
3284 the type is not known). If the argument fits entirly in the argument
3285 registers, or entirely on the stack, then 0 is returned. CUM is the
3286 number of argument registers already used by earlier parameters to
3287 the function. */
3288int
3289mcore_function_arg_partial_nregs (cum, mode, type, named)
3290 CUMULATIVE_ARGS cum;
3291 enum machine_mode mode;
3292 tree type;
3293 int named;
3294{
3295 int reg = ROUND_REG (cum, mode);
3296
3297 if (named == 0)
3298 return 0;
3299
3300 if (MUST_PASS_IN_STACK (mode, type))
3301 return 0;
3302
3303 /* REG is not the *hardware* register number of the register that holds
3304 the argument, it is the *argument* register number. So for example,
3305 the first argument to a function goes in argument register 0, which
3306 translates (for the MCore) into hardware register 2. The second
3307 argument goes into argument register 1, which translates into hardware
3308 register 3, and so on. NPARM_REGS is the number of argument registers
3309 supported by the target, not the maximum hardware register number of
3310 the target. */
3311 if (reg >= NPARM_REGS)
3312 return 0;
3313
3314 /* If the argument fits entirely in registers, return 0. */
3315 if (reg + mcore_num_arg_regs (mode, type) <= NPARM_REGS)
3316 return 0;
3317
3318 /* The argument overflows the number of available argument registers.
3319 Compute how many argument registers have not yet been assigned to
3320 hold an argument. */
3321 reg = NPARM_REGS - reg;
3322
3323 /* Return partially in registers and partially on the stack. */
3324 return reg;
3325}
3326\f
3327/* Return non-zero if SYMBOL is marked as being dllexport'd. */
3328int
3329mcore_dllexport_name_p (symbol)
cbd3488b 3330 const char * symbol;
8f90be4c
NC
3331{
3332 return symbol[0] == '@' && symbol[1] == 'e' && symbol[2] == '.';
3333}
3334
3335/* Return non-zero if SYMBOL is marked as being dllimport'd. */
3336int
3337mcore_dllimport_name_p (symbol)
cbd3488b 3338 const char * symbol;
8f90be4c
NC
3339{
3340 return symbol[0] == '@' && symbol[1] == 'i' && symbol[2] == '.';
3341}
3342
3343/* Mark a DECL as being dllexport'd. */
3344static void
3345mcore_mark_dllexport (decl)
3346 tree decl;
3347{
cbd3488b 3348 const char * oldname;
8f90be4c
NC
3349 char * newname;
3350 rtx rtlname;
3351 tree idp;
3352
3353 rtlname = XEXP (DECL_RTL (decl), 0);
3354
3355 if (GET_CODE (rtlname) == SYMBOL_REF)
3356 oldname = XSTR (rtlname, 0);
3357 else if ( GET_CODE (rtlname) == MEM
3358 && GET_CODE (XEXP (rtlname, 0)) == SYMBOL_REF)
3359 oldname = XSTR (XEXP (rtlname, 0), 0);
3360 else
3361 abort ();
3362
3363 if (mcore_dllexport_name_p (oldname))
3364 return; /* Already done. */
3365
3366 newname = alloca (strlen (oldname) + 4);
3367 sprintf (newname, "@e.%s", oldname);
3368
3369 /* We pass newname through get_identifier to ensure it has a unique
3370 address. RTL processing can sometimes peek inside the symbol ref
3371 and compare the string's addresses to see if two symbols are
3372 identical. */
3373 /* ??? At least I think that's why we do this. */
3374 idp = get_identifier (newname);
3375
3376 XEXP (DECL_RTL (decl), 0) =
3377 gen_rtx (SYMBOL_REF, Pmode, IDENTIFIER_POINTER (idp));
3378}
3379
3380/* Mark a DECL as being dllimport'd. */
3381static void
3382mcore_mark_dllimport (decl)
3383 tree decl;
3384{
cbd3488b 3385 const char * oldname;
8f90be4c
NC
3386 char * newname;
3387 tree idp;
3388 rtx rtlname;
3389 rtx newrtl;
3390
3391 rtlname = XEXP (DECL_RTL (decl), 0);
3392
3393 if (GET_CODE (rtlname) == SYMBOL_REF)
3394 oldname = XSTR (rtlname, 0);
3395 else if ( GET_CODE (rtlname) == MEM
3396 && GET_CODE (XEXP (rtlname, 0)) == SYMBOL_REF)
3397 oldname = XSTR (XEXP (rtlname, 0), 0);
3398 else
3399 abort ();
3400
3401 if (mcore_dllexport_name_p (oldname))
3402 abort (); /* This shouldn't happen. */
3403 else if (mcore_dllimport_name_p (oldname))
3404 return; /* Already done. */
3405
3406 /* ??? One can well ask why we're making these checks here,
3407 and that would be a good question. */
3408
3409 /* Imported variables can't be initialized. */
3410 if (TREE_CODE (decl) == VAR_DECL
3411 && !DECL_VIRTUAL_P (decl)
3412 && DECL_INITIAL (decl))
3413 {
3414 error_with_decl (decl, "initialized variable `%s' is marked dllimport");
3415 return;
3416 }
3417
3418 /* `extern' needn't be specified with dllimport.
3419 Specify `extern' now and hope for the best. Sigh. */
3420 if (TREE_CODE (decl) == VAR_DECL
3421 /* ??? Is this test for vtables needed? */
3422 && !DECL_VIRTUAL_P (decl))
3423 {
3424 DECL_EXTERNAL (decl) = 1;
3425 TREE_PUBLIC (decl) = 1;
3426 }
3427
3428 newname = alloca (strlen (oldname) + 11);
3429 sprintf (newname, "@i.__imp_%s", oldname);
3430
3431 /* We pass newname through get_identifier to ensure it has a unique
3432 address. RTL processing can sometimes peek inside the symbol ref
3433 and compare the string's addresses to see if two symbols are
3434 identical. */
3435 /* ??? At least I think that's why we do this. */
3436 idp = get_identifier (newname);
3437
3438 newrtl = gen_rtx (MEM, Pmode,
3439 gen_rtx (SYMBOL_REF, Pmode,
3440 IDENTIFIER_POINTER (idp)));
3441 XEXP (DECL_RTL (decl), 0) = newrtl;
3442}
3443
3444static int
3445mcore_dllexport_p (decl)
3446 tree decl;
3447{
3448 if ( TREE_CODE (decl) != VAR_DECL
3449 && TREE_CODE (decl) != FUNCTION_DECL)
3450 return 0;
3451
3452 return lookup_attribute ("dllexport", DECL_MACHINE_ATTRIBUTES (decl)) != 0;
3453}
3454
3455static int
3456mcore_dllimport_p (decl)
3457 tree decl;
3458{
3459 if ( TREE_CODE (decl) != VAR_DECL
3460 && TREE_CODE (decl) != FUNCTION_DECL)
3461 return 0;
3462
3463 return lookup_attribute ("dllimport", DECL_MACHINE_ATTRIBUTES (decl)) != 0;
3464}
3465
3466/* Cover function to implement ENCODE_SECTION_INFO. */
3467void
3468mcore_encode_section_info (decl)
3469 tree decl;
3470{
3471 /* This bit is copied from arm.h. */
3472 if (optimize > 0
3473 && TREE_CONSTANT (decl)
3474 && (!flag_writable_strings || TREE_CODE (decl) != STRING_CST))
3475 {
3476 rtx rtl = (TREE_CODE_CLASS (TREE_CODE (decl)) != 'd'
3477 ? TREE_CST_RTL (decl) : DECL_RTL (decl));
3478 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
3479 }
3480
3481 /* Mark the decl so we can tell from the rtl whether the object is
3482 dllexport'd or dllimport'd. */
3483 if (mcore_dllexport_p (decl))
3484 mcore_mark_dllexport (decl);
3485 else if (mcore_dllimport_p (decl))
3486 mcore_mark_dllimport (decl);
3487
3488 /* It might be that DECL has already been marked as dllimport, but
3489 a subsequent definition nullified that. The attribute is gone
3490 but DECL_RTL still has @i.__imp_foo. We need to remove that. */
3491 else if ((TREE_CODE (decl) == FUNCTION_DECL
3492 || TREE_CODE (decl) == VAR_DECL)
3493 && DECL_RTL (decl) != NULL_RTX
3494 && GET_CODE (DECL_RTL (decl)) == MEM
3495 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == MEM
3496 && GET_CODE (XEXP (XEXP (DECL_RTL (decl), 0), 0)) == SYMBOL_REF
3497 && mcore_dllimport_name_p (XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0)))
3498 {
3cce094d 3499 const char * oldname = XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0);
8f90be4c
NC
3500 tree idp = get_identifier (oldname + 9);
3501 rtx newrtl = gen_rtx (SYMBOL_REF, Pmode, IDENTIFIER_POINTER (idp));
3502
3503 XEXP (DECL_RTL (decl), 0) = newrtl;
3504
3505 /* We previously set TREE_PUBLIC and DECL_EXTERNAL.
3506 ??? We leave these alone for now. */
3507 }
3508}
3509
3510/* MCore specific attribute support.
3511 dllexport - for exporting a function/variable that will live in a dll
3512 dllimport - for importing a function/variable from a dll
3513 naked - do not create a function prologue/epilogue. */
672a6f42
NB
3514static int
3515mcore_valid_decl_attribute (decl, attributes, attr, args)
8f90be4c
NC
3516 tree decl;
3517 tree attributes ATTRIBUTE_UNUSED;
3518 tree attr;
3519 tree args;
3520{
3521 if (args != NULL_TREE)
3522 return 0;
3523
3524 if (is_attribute_p ("dllexport", attr))
3525 return 1;
3526
3527 if (is_attribute_p ("dllimport", attr))
3528 return 1;
3529
3530 if (is_attribute_p ("naked", attr) &&
3531 TREE_CODE (decl) == FUNCTION_DECL)
3532 {
3533 /* PR14310 - don't complain about lack of return statement
3534 in naked functions. The solution here is a gross hack
3535 but this is the only way to solve the problem without
3536 adding a new feature to GCC. I did try submitting a patch
3537 that would add such a new feature, but it was (rightfully)
3538 rejected on the grounds that it was creeping featurism,
3539 so hence this code. */
3540 if (warn_return_type)
3541 {
3542 saved_warn_return_type = warn_return_type;
3543 warn_return_type = 0;
3544 saved_warn_return_type_count = 2;
3545 }
3546 else if (saved_warn_return_type_count)
3547 saved_warn_return_type_count = 2;
3548
3549 return 1;
3550 }
3551
3552 return 0;
3553}
3554
8f90be4c
NC
3555/* Cover function for UNIQUE_SECTION. */
3556
3557void
3558mcore_unique_section (decl, reloc)
3559 tree decl;
3560 int reloc ATTRIBUTE_UNUSED;
3561{
3562 int len;
3563 char * name;
3564 char * string;
f27cd94d 3565 const char * prefix;
8f90be4c
NC
3566
3567 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
3568
3569 /* Strip off any encoding in name. */
3570 STRIP_NAME_ENCODING (name, name);
3571
3572 /* The object is put in, for example, section .text$foo.
3573 The linker will then ultimately place them in .text
3574 (everything from the $ on is stripped). */
3575 if (TREE_CODE (decl) == FUNCTION_DECL)
3576 prefix = ".text$";
3577 /* For compatability with EPOC, we ignore the fact that the
3578 section might have relocs against it. */
3579 else if (DECL_READONLY_SECTION (decl, 0))
3580 prefix = ".rdata$";
3581 else
3582 prefix = ".data$";
3583
3584 len = strlen (name) + strlen (prefix);
3585 string = alloca (len + 1);
3586
3587 sprintf (string, "%s%s", prefix, name);
3588
3589 DECL_SECTION_NAME (decl) = build_string (len, string);
3590}
3591
3592int
3593mcore_naked_function_p ()
3594{
3595 return lookup_attribute ("naked", DECL_MACHINE_ATTRIBUTES (current_function_decl)) != NULL_TREE;
3596}