]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/mcore/mcore.c
Don't clobber app register r5 in muldi3.
[thirdparty/gcc.git] / gcc / config / mcore / mcore.c
CommitLineData
8f90be4c 1/* Output routines for Motorola MCore processor
ae46c4e0 2 Copyright (C) 1993, 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
8f90be4c
NC
3
4This file is part of GNU CC.
5
6GNU CC is free software; you can redistribute it and/or modify
7it under the terms of the GNU General Public License as published by
8the Free Software Foundation; either version 2, or (at your option)
9any later version.
10
11GNU CC is distributed in the hope that it will be useful,
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14GNU General Public License for more details.
15
16You should have received a copy of the GNU General Public License
17along with GNU CC; see the file COPYING. If not, write to
9f2f9eb6
JM
18the Free Software Foundation, 59 Temple Place - Suite 330,
19Boston, MA 02111-1307, USA. */
8f90be4c 20
bc27e96c 21#include "config.h"
4bd048ef 22#include "system.h"
4816b8e4
NC
23#include "rtl.h"
24#include "tree.h"
25#include "tm_p.h"
8f90be4c 26#include "assert.h"
8f90be4c 27#include "mcore.h"
8f90be4c
NC
28#include "regs.h"
29#include "hard-reg-set.h"
30#include "real.h"
31#include "insn-config.h"
32#include "conditions.h"
8f90be4c
NC
33#include "output.h"
34#include "insn-attr.h"
35#include "flags.h"
36#include "obstack.h"
37#include "expr.h"
38#include "reload.h"
39#include "recog.h"
40#include "function.h"
41#include "ggc.h"
42#include "toplev.h"
672a6f42
NB
43#include "target.h"
44#include "target-def.h"
8f90be4c 45
8f90be4c
NC
46/* Maximum size we are allowed to grow the stack in a single operation.
47 If we want more, we must do it in increments of at most this size.
48 If this value is 0, we don't check at all. */
49const char * mcore_stack_increment_string = 0;
50int mcore_stack_increment = STACK_UNITS_MAXSTEP;
51
52/* For dumping information about frame sizes. */
53char * mcore_current_function_name = 0;
54long mcore_current_compilation_timestamp = 0;
55
56/* Global variables for machine-dependent things. */
57
58/* Saved operands from the last compare to use when we generate an scc
59 or bcc insn. */
60rtx arch_compare_op0;
61rtx arch_compare_op1;
62
63/* Provides the class number of the smallest class containing
64 reg number. */
0139adca 65const int regno_reg_class[FIRST_PSEUDO_REGISTER] =
8f90be4c
NC
66{
67 GENERAL_REGS, ONLYR1_REGS, LRW_REGS, LRW_REGS,
68 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
69 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
70 LRW_REGS, LRW_REGS, LRW_REGS, GENERAL_REGS,
71 GENERAL_REGS, C_REGS, NO_REGS, NO_REGS,
72};
73
74/* Provide reg_class from a letter such as appears in the machine
75 description. */
0b5826ac 76const enum reg_class reg_class_from_letter[] =
8f90be4c
NC
77{
78 /* a */ LRW_REGS, /* b */ ONLYR1_REGS, /* c */ C_REGS, /* d */ NO_REGS,
79 /* e */ NO_REGS, /* f */ NO_REGS, /* g */ NO_REGS, /* h */ NO_REGS,
80 /* i */ NO_REGS, /* j */ NO_REGS, /* k */ NO_REGS, /* l */ NO_REGS,
81 /* m */ NO_REGS, /* n */ NO_REGS, /* o */ NO_REGS, /* p */ NO_REGS,
82 /* q */ NO_REGS, /* r */ GENERAL_REGS, /* s */ NO_REGS, /* t */ NO_REGS,
83 /* u */ NO_REGS, /* v */ NO_REGS, /* w */ NO_REGS, /* x */ ALL_REGS,
84 /* y */ NO_REGS, /* z */ NO_REGS
85};
86
f27cd94d
NC
87struct mcore_frame
88{
89 int arg_size; /* stdarg spills (bytes) */
90 int reg_size; /* non-volatile reg saves (bytes) */
91 int reg_mask; /* non-volatile reg saves */
92 int local_size; /* locals */
93 int outbound_size; /* arg overflow on calls out */
94 int pad_outbound;
95 int pad_local;
96 int pad_reg;
97 /* Describe the steps we'll use to grow it. */
98#define MAX_STACK_GROWS 4 /* gives us some spare space */
99 int growth[MAX_STACK_GROWS];
100 int arg_offset;
101 int reg_offset;
102 int reg_growth;
103 int local_growth;
104};
105
106typedef enum
107{
108 COND_NO,
109 COND_MOV_INSN,
110 COND_CLR_INSN,
111 COND_INC_INSN,
112 COND_DEC_INSN,
113 COND_BRANCH_INSN
114}
115cond_type;
116
117static void output_stack_adjust PARAMS ((int, int));
118static int calc_live_regs PARAMS ((int *));
119static int const_ok_for_mcore PARAMS ((int));
120static int try_constant_tricks PARAMS ((long, int *, int *));
121static const char * output_inline_const PARAMS ((enum machine_mode, rtx *));
122static void block_move_sequence PARAMS ((rtx, rtx, rtx, rtx, int, int, int));
123static void layout_mcore_frame PARAMS ((struct mcore_frame *));
124static cond_type is_cond_candidate PARAMS ((rtx));
125static rtx emit_new_cond_insn PARAMS ((rtx, int));
126static rtx conditionalize_block PARAMS ((rtx));
127static void conditionalize_optimization PARAMS ((rtx));
f27cd94d
NC
128static rtx handle_structs_in_regs PARAMS ((enum machine_mode, tree, int));
129static void mcore_mark_dllexport PARAMS ((tree));
130static void mcore_mark_dllimport PARAMS ((tree));
131static int mcore_dllexport_p PARAMS ((tree));
132static int mcore_dllimport_p PARAMS ((tree));
91d231cb
JM
133const struct attribute_spec mcore_attribute_table[];
134static tree mcore_handle_naked_attribute PARAMS ((tree *, tree, tree, int, bool *));
ede75ee8 135#ifdef OBJECT_FORMAT_ELF
7c262518 136static void mcore_asm_named_section PARAMS ((const char *,
7c262518 137 unsigned int));
ede75ee8 138#endif
ae46c4e0 139static void mcore_unique_section PARAMS ((tree, int));
fb49053f 140static void mcore_encode_section_info PARAMS ((tree, int));
772c5265 141static const char *mcore_strip_name_encoding PARAMS ((const char *));
672a6f42
NB
142\f
143/* Initialize the GCC target structure. */
144#ifdef TARGET_DLLIMPORT_DECL_ATTRIBUTES
145#undef TARGET_MERGE_DECL_ATTRIBUTES
146#define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
147#endif
148
301d03af
RS
149#ifdef OBJECT_FORMAT_ELF
150#undef TARGET_ASM_UNALIGNED_HI_OP
151#define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
152#undef TARGET_ASM_UNALIGNED_SI_OP
153#define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
154#endif
155
91d231cb
JM
156#undef TARGET_ATTRIBUTE_TABLE
157#define TARGET_ATTRIBUTE_TABLE mcore_attribute_table
ae46c4e0
RH
158#undef TARGET_ASM_UNIQUE_SECTION
159#define TARGET_ASM_UNIQUE_SECTION mcore_unique_section
fb49053f
RH
160#undef TARGET_ENCODE_SECTION_INFO
161#define TARGET_ENCODE_SECTION_INFO mcore_encode_section_info
772c5265
RH
162#undef TARGET_STRIP_NAME_ENCODING
163#define TARGET_STRIP_NAME_ENCODING mcore_strip_name_encoding
672a6f42 164
f6897b10 165struct gcc_target targetm = TARGET_INITIALIZER;
f27cd94d 166\f
8f90be4c
NC
167/* Adjust the stack and return the number of bytes taken to do it. */
168static void
169output_stack_adjust (direction, size)
170 int direction;
171 int size;
172{
4816b8e4 173 /* If extending stack a lot, we do it incrementally. */
8f90be4c
NC
174 if (direction < 0 && size > mcore_stack_increment && mcore_stack_increment > 0)
175 {
176 rtx tmp = gen_rtx (REG, SImode, 1);
177 rtx memref;
178 emit_insn (gen_movsi (tmp, GEN_INT (mcore_stack_increment)));
179 do
180 {
181 emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
182 memref = gen_rtx (MEM, SImode, stack_pointer_rtx);
183 MEM_VOLATILE_P (memref) = 1;
184 emit_insn (gen_movsi (memref, stack_pointer_rtx));
185 size -= mcore_stack_increment;
186 }
187 while (size > mcore_stack_increment);
188
4816b8e4
NC
189 /* SIZE is now the residual for the last adjustment,
190 which doesn't require a probe. */
8f90be4c
NC
191 }
192
193 if (size)
194 {
195 rtx insn;
196 rtx val = GEN_INT (size);
197
198 if (size > 32)
199 {
200 rtx nval = gen_rtx (REG, SImode, 1);
201 emit_insn (gen_movsi (nval, val));
202 val = nval;
203 }
204
205 if (direction > 0)
206 insn = gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
207 else
208 insn = gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
209
210 emit_insn (insn);
211 }
212}
213
4816b8e4
NC
214/* Work out the registers which need to be saved,
215 both as a mask and a count. */
216
8f90be4c
NC
217static int
218calc_live_regs (count)
219 int * count;
220{
221 int reg;
222 int live_regs_mask = 0;
223
224 * count = 0;
225
226 for (reg = 0; reg < FIRST_PSEUDO_REGISTER; reg++)
227 {
228 if (regs_ever_live[reg] && !call_used_regs[reg])
229 {
230 (*count)++;
231 live_regs_mask |= (1 << reg);
232 }
233 }
234
235 return live_regs_mask;
236}
237
238/* Print the operand address in x to the stream. */
4816b8e4 239
8f90be4c
NC
240void
241mcore_print_operand_address (stream, x)
242 FILE * stream;
243 rtx x;
244{
245 switch (GET_CODE (x))
246 {
247 case REG:
248 fprintf (stream, "(%s)", reg_names[REGNO (x)]);
249 break;
250
251 case PLUS:
252 {
253 rtx base = XEXP (x, 0);
254 rtx index = XEXP (x, 1);
255
256 if (GET_CODE (base) != REG)
257 {
258 /* Ensure that BASE is a register (one of them must be). */
259 rtx temp = base;
260 base = index;
261 index = temp;
262 }
263
264 switch (GET_CODE (index))
265 {
266 case CONST_INT:
267 fprintf (stream, "(%s,%d)", reg_names[REGNO(base)],
268 INTVAL (index));
269 break;
270
271 default:
272 debug_rtx (x);
273
274 abort ();
275 }
276 }
277
278 break;
279
280 default:
281 output_addr_const (stream, x);
282 break;
283 }
284}
285
286/* Print operand x (an rtx) in assembler syntax to file stream
287 according to modifier code.
288
289 'R' print the next register or memory location along, ie the lsw in
290 a double word value
291 'O' print a constant without the #
292 'M' print a constant as its negative
293 'P' print log2 of a power of two
294 'Q' print log2 of an inverse of a power of two
295 'U' print register for ldm/stm instruction
4816b8e4
NC
296 'X' print byte number for xtrbN instruction. */
297
8f90be4c
NC
298void
299mcore_print_operand (stream, x, code)
300 FILE * stream;
301 rtx x;
302 int code;
303{
304 switch (code)
305 {
306 case 'N':
307 if (INTVAL(x) == -1)
308 fprintf (asm_out_file, "32");
309 else
310 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) + 1));
311 break;
312 case 'P':
313 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x)));
314 break;
315 case 'Q':
316 fprintf (asm_out_file, "%d", exact_log2 (~INTVAL (x)));
317 break;
318 case 'O':
319 fprintf (asm_out_file, "%d", INTVAL (x));
320 break;
321 case 'M':
322 fprintf (asm_out_file, "%d", - INTVAL (x));
323 break;
324 case 'R':
325 /* Next location along in memory or register. */
326 switch (GET_CODE (x))
327 {
328 case REG:
329 fputs (reg_names[REGNO (x) + 1], (stream));
330 break;
331 case MEM:
b72f00af
RK
332 mcore_print_operand_address
333 (stream, XEXP (adjust_address (x, SImode, 4), 0));
8f90be4c
NC
334 break;
335 default:
336 abort ();
337 }
338 break;
339 case 'U':
340 fprintf (asm_out_file, "%s-%s", reg_names[REGNO (x)],
341 reg_names[REGNO (x) + 3]);
342 break;
343 case 'x':
344 fprintf (asm_out_file, "0x%x", INTVAL (x));
345 break;
346 case 'X':
347 fprintf (asm_out_file, "%d", 3 - INTVAL (x) / 8);
348 break;
349
350 default:
351 switch (GET_CODE (x))
352 {
353 case REG:
354 fputs (reg_names[REGNO (x)], (stream));
355 break;
356 case MEM:
357 output_address (XEXP (x, 0));
358 break;
359 default:
360 output_addr_const (stream, x);
361 break;
362 }
363 break;
364 }
365}
366
367/* What does a constant cost ? */
4816b8e4 368
8f90be4c
NC
369int
370mcore_const_costs (exp, code)
371 rtx exp;
372 enum rtx_code code;
373{
374
375 int val = INTVAL (exp);
376
377 /* Easy constants. */
378 if ( CONST_OK_FOR_I (val)
379 || CONST_OK_FOR_M (val)
380 || CONST_OK_FOR_N (val)
381 || (code == PLUS && CONST_OK_FOR_L (val)))
382 return 1;
383 else if (code == AND
384 && ( CONST_OK_FOR_M (~val)
385 || CONST_OK_FOR_N (~val)))
386 return 2;
387 else if (code == PLUS
388 && ( CONST_OK_FOR_I (-val)
389 || CONST_OK_FOR_M (-val)
390 || CONST_OK_FOR_N (-val)))
391 return 2;
392
393 return 5;
394}
395
396/* What does an and instruction cost - we do this b/c immediates may
397 have been relaxed. We want to ensure that cse will cse relaxed immeds
4816b8e4
NC
398 out. Otherwise we'll get bad code (multiple reloads of the same const). */
399
8f90be4c
NC
400int
401mcore_and_cost (x)
402 rtx x;
403{
404 int val;
405
406 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
407 return 2;
408
409 val = INTVAL (XEXP (x, 1));
410
4816b8e4 411 /* Do it directly. */
8f90be4c
NC
412 if (CONST_OK_FOR_K (val) || CONST_OK_FOR_M (~val))
413 return 2;
414 /* Takes one instruction to load. */
415 else if (const_ok_for_mcore (val))
416 return 3;
417 /* Takes two instructions to load. */
418 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
419 return 4;
420
4816b8e4 421 /* Takes a lrw to load. */
8f90be4c
NC
422 return 5;
423}
424
4816b8e4
NC
425/* What does an or cost - see and_cost(). */
426
8f90be4c
NC
427int
428mcore_ior_cost (x)
429 rtx x;
430{
431 int val;
432
433 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
434 return 2;
435
436 val = INTVAL (XEXP (x, 1));
437
4816b8e4 438 /* Do it directly with bclri. */
8f90be4c
NC
439 if (CONST_OK_FOR_M (val))
440 return 2;
4816b8e4 441 /* Takes one instruction to load. */
8f90be4c
NC
442 else if (const_ok_for_mcore (val))
443 return 3;
4816b8e4 444 /* Takes two instructions to load. */
8f90be4c
NC
445 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
446 return 4;
447
4816b8e4 448 /* Takes a lrw to load. */
8f90be4c
NC
449 return 5;
450}
451
452/* Check to see if a comparison against a constant can be made more efficient
453 by incrementing/decrementing the constant to get one that is more efficient
454 to load. */
4816b8e4 455
8f90be4c
NC
456int
457mcore_modify_comparison (code)
458 enum rtx_code code;
459{
460 rtx op1 = arch_compare_op1;
461
462 if (GET_CODE (op1) == CONST_INT)
463 {
464 int val = INTVAL (op1);
465
466 switch (code)
467 {
468 case LE:
469 if (CONST_OK_FOR_J (val + 1))
470 {
471 arch_compare_op1 = GEN_INT (val + 1);
472 return 1;
473 }
474 break;
475
476 default:
477 break;
478 }
479 }
480
481 return 0;
482}
483
484/* Prepare the operands for a comparison. */
4816b8e4 485
8f90be4c
NC
486rtx
487mcore_gen_compare_reg (code)
488 enum rtx_code code;
489{
490 rtx op0 = arch_compare_op0;
491 rtx op1 = arch_compare_op1;
492 rtx cc_reg = gen_rtx (REG, CCmode, CC_REG);
493
494 if (CONSTANT_P (op1) && GET_CODE (op1) != CONST_INT)
495 op1 = force_reg (SImode, op1);
496
497 /* cmpnei: 0-31 (K immediate)
4816b8e4 498 cmplti: 1-32 (J immediate, 0 using btsti x,31). */
8f90be4c
NC
499 switch (code)
500 {
4816b8e4 501 case EQ: /* Use inverted condition, cmpne. */
8f90be4c
NC
502 code = NE;
503 /* drop through */
4816b8e4
NC
504
505 case NE: /* Use normal condition, cmpne. */
8f90be4c
NC
506 if (GET_CODE (op1) == CONST_INT && ! CONST_OK_FOR_K (INTVAL (op1)))
507 op1 = force_reg (SImode, op1);
508 break;
509
4816b8e4 510 case LE: /* Use inverted condition, reversed cmplt. */
8f90be4c
NC
511 code = GT;
512 /* drop through */
4816b8e4
NC
513
514 case GT: /* Use normal condition, reversed cmplt. */
8f90be4c
NC
515 if (GET_CODE (op1) == CONST_INT)
516 op1 = force_reg (SImode, op1);
517 break;
518
4816b8e4 519 case GE: /* Use inverted condition, cmplt. */
8f90be4c
NC
520 code = LT;
521 /* drop through */
4816b8e4
NC
522
523 case LT: /* Use normal condition, cmplt. */
8f90be4c
NC
524 if (GET_CODE (op1) == CONST_INT &&
525 /* covered by btsti x,31 */
526 INTVAL (op1) != 0 &&
527 ! CONST_OK_FOR_J (INTVAL (op1)))
528 op1 = force_reg (SImode, op1);
529 break;
530
4816b8e4 531 case GTU: /* Use inverted condition, cmple. */
8f90be4c
NC
532 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) == 0)
533 {
534 /* Unsigned > 0 is the same as != 0, but we need
535 to invert the condition, so we want to set
536 code = EQ. This cannot be done however, as the
537 mcore does not support such a test. Instead we
538 cope with this case in the "bgtu" pattern itself
539 so we should never reach this point. */
540 /* code = EQ; */
541 abort ();
542 break;
543 }
544 code = LEU;
545 /* drop through */
4816b8e4
NC
546
547 case LEU: /* Use normal condition, reversed cmphs. */
8f90be4c
NC
548 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
549 op1 = force_reg (SImode, op1);
550 break;
551
4816b8e4 552 case LTU: /* Use inverted condition, cmphs. */
8f90be4c
NC
553 code = GEU;
554 /* drop through */
4816b8e4
NC
555
556 case GEU: /* Use normal condition, cmphs. */
8f90be4c
NC
557 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
558 op1 = force_reg (SImode, op1);
559 break;
560
561 default:
562 break;
563 }
564
565 emit_insn (gen_rtx (SET, VOIDmode, cc_reg, gen_rtx (code, CCmode, op0, op1)));
566
567 return cc_reg;
568}
569
570
571int
572mcore_symbolic_address_p (x)
573 rtx x;
574{
575 switch (GET_CODE (x))
576 {
577 case SYMBOL_REF:
578 case LABEL_REF:
579 return 1;
580 case CONST:
581 x = XEXP (x, 0);
582 return ( (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
583 || GET_CODE (XEXP (x, 0)) == LABEL_REF)
584 && GET_CODE (XEXP (x, 1)) == CONST_INT);
585 default:
586 return 0;
587 }
588}
589
590int
591mcore_call_address_operand (x, mode)
592 rtx x;
593 enum machine_mode mode;
594{
595 return register_operand (x, mode) || CONSTANT_P (x);
596}
597
598/* Functions to output assembly code for a function call. */
f27cd94d 599
8f90be4c
NC
600char *
601mcore_output_call (operands, index)
602 rtx operands[];
603 int index;
604{
605 static char buffer[20];
606 rtx addr = operands [index];
607
608 if (REG_P (addr))
609 {
610 if (TARGET_CG_DATA)
611 {
612 if (mcore_current_function_name == 0)
613 abort ();
614
615 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
616 "unknown", 1);
617 }
618
619 sprintf (buffer, "jsr\t%%%d", index);
620 }
621 else
622 {
623 if (TARGET_CG_DATA)
624 {
625 if (mcore_current_function_name == 0)
626 abort ();
627
628 if (GET_CODE (addr) != SYMBOL_REF)
629 abort ();
630
631 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, XSTR (addr, 0), 0);
632 }
633
634 sprintf (buffer, "jbsr\t%%%d", index);
635 }
636
637 return buffer;
638}
639
640/* Can we load a constant with a single instruction ? */
4816b8e4 641
8f90be4c
NC
642static int
643const_ok_for_mcore (value)
644 int value;
645{
646 if (value >= 0 && value <= 127)
647 return 1;
648
649 /* Try exact power of two. */
650 if ((value & (value - 1)) == 0)
651 return 1;
652
653 /* Try exact power of two - 1. */
654 if ((value & (value + 1)) == 0)
655 return 1;
656
657 return 0;
658}
659
660/* Can we load a constant inline with up to 2 instructions ? */
4816b8e4 661
8f90be4c
NC
662int
663mcore_const_ok_for_inline (value)
664 long value;
665{
666 int x, y;
667
668 return try_constant_tricks (value, & x, & y) > 0;
669}
670
671/* Are we loading the constant using a not ? */
4816b8e4 672
8f90be4c
NC
673int
674mcore_const_trick_uses_not (value)
675 long value;
676{
677 int x, y;
678
679 return try_constant_tricks (value, & x, & y) == 2;
680}
681
682/* Try tricks to load a constant inline and return the trick number if
683 success (0 is non-inlinable).
4816b8e4
NC
684
685 0: not inlinable
686 1: single instruction (do the usual thing)
687 2: single insn followed by a 'not'
688 3: single insn followed by a subi
689 4: single insn followed by an addi
690 5: single insn followed by rsubi
691 6: single insn followed by bseti
692 7: single insn followed by bclri
693 8: single insn followed by rotli
694 9: single insn followed by lsli
695 10: single insn followed by ixh
696 11: single insn followed by ixw. */
8f90be4c
NC
697
698static int
699try_constant_tricks (value, x, y)
700 long value;
701 int * x;
702 int * y;
703{
704 int i;
705 unsigned bit, shf, rot;
706
707 if (const_ok_for_mcore (value))
4816b8e4 708 return 1; /* Do the usual thing. */
8f90be4c
NC
709
710 if (TARGET_HARDLIT)
711 {
712 if (const_ok_for_mcore (~value))
713 {
714 *x = ~value;
715 return 2;
716 }
717
718 for (i = 1; i <= 32; i++)
719 {
720 if (const_ok_for_mcore (value - i))
721 {
722 *x = value - i;
723 *y = i;
724
725 return 3;
726 }
727
728 if (const_ok_for_mcore (value + i))
729 {
730 *x = value + i;
731 *y = i;
732
733 return 4;
734 }
735 }
736
11f9ed1a 737 bit = 0x80000000L;
8f90be4c
NC
738
739 for (i = 0; i <= 31; i++)
740 {
741 if (const_ok_for_mcore (i - value))
742 {
743 *x = i - value;
744 *y = i;
745
746 return 5;
747 }
748
749 if (const_ok_for_mcore (value & ~bit))
750 {
751 *y = bit;
752 *x = value & ~bit;
753
754 return 6;
755 }
756
757 if (const_ok_for_mcore (value | bit))
758 {
759 *y = ~bit;
760 *x = value | bit;
761
762 return 7;
763 }
764
765 bit >>= 1;
766 }
767
768 shf = value;
769 rot = value;
770
771 for (i = 1; i < 31; i++)
772 {
773 int c;
774
775 /* MCore has rotate left. */
776 c = rot << 31;
777 rot >>= 1;
778 rot &= 0x7FFFFFFF;
779 rot |= c; /* Simulate rotate. */
780
781 if (const_ok_for_mcore (rot))
782 {
783 *y = i;
784 *x = rot;
785
786 return 8;
787 }
788
789 if (shf & 1)
4816b8e4 790 shf = 0; /* Can't use logical shift, low order bit is one. */
8f90be4c
NC
791
792 shf >>= 1;
793
794 if (shf != 0 && const_ok_for_mcore (shf))
795 {
796 *y = i;
797 *x = shf;
798
799 return 9;
800 }
801 }
802
803 if ((value % 3) == 0 && const_ok_for_mcore (value / 3))
804 {
805 *x = value / 3;
806
807 return 10;
808 }
809
810 if ((value % 5) == 0 && const_ok_for_mcore (value / 5))
811 {
812 *x = value / 5;
813
814 return 11;
815 }
816 }
817
818 return 0;
819}
820
821
822/* Check whether reg is dead at first. This is done by searching ahead
823 for either the next use (i.e., reg is live), a death note, or a set of
824 reg. Don't just use dead_or_set_p() since reload does not always mark
825 deaths (especially if PRESERVE_DEATH_NOTES_REGNO_P is not defined). We
4816b8e4
NC
826 can ignore subregs by extracting the actual register. BRC */
827
8f90be4c
NC
828int
829mcore_is_dead (first, reg)
830 rtx first;
831 rtx reg;
832{
833 rtx insn;
834
835 /* For mcore, subregs can't live independently of their parent regs. */
836 if (GET_CODE (reg) == SUBREG)
837 reg = SUBREG_REG (reg);
838
839 /* Dies immediately. */
840 if (dead_or_set_p (first, reg))
841 return 1;
842
843 /* Look for conclusive evidence of live/death, otherwise we have
844 to assume that it is live. */
845 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
846 {
847 if (GET_CODE (insn) == JUMP_INSN)
848 return 0; /* We lose track, assume it is alive. */
849
850 else if (GET_CODE(insn) == CALL_INSN)
851 {
852 /* Call's might use it for target or register parms. */
853 if (reg_referenced_p (reg, PATTERN (insn))
854 || find_reg_fusage (insn, USE, reg))
855 return 0;
856 else if (dead_or_set_p (insn, reg))
857 return 1;
858 }
859 else if (GET_CODE (insn) == INSN)
860 {
861 if (reg_referenced_p (reg, PATTERN (insn)))
862 return 0;
863 else if (dead_or_set_p (insn, reg))
864 return 1;
865 }
866 }
867
868 /* No conclusive evidence either way, we can not take the chance
869 that control flow hid the use from us -- "I'm not dead yet". */
870 return 0;
871}
872
873
874/* Count the number of ones in mask. */
4816b8e4 875
8f90be4c
NC
876int
877mcore_num_ones (mask)
878 int mask;
879{
4816b8e4 880 /* A trick to count set bits recently posted on comp.compilers. */
8f90be4c
NC
881 mask = (mask >> 1 & 0x55555555) + (mask & 0x55555555);
882 mask = ((mask >> 2) & 0x33333333) + (mask & 0x33333333);
883 mask = ((mask >> 4) + mask) & 0x0f0f0f0f;
884 mask = ((mask >> 8) + mask);
885
886 return (mask + (mask >> 16)) & 0xff;
887}
888
4816b8e4
NC
889/* Count the number of zeros in mask. */
890
8f90be4c
NC
891int
892mcore_num_zeros (mask)
893 int mask;
894{
895 return 32 - mcore_num_ones (mask);
896}
897
898/* Determine byte being masked. */
4816b8e4 899
8f90be4c
NC
900int
901mcore_byte_offset (mask)
902 unsigned int mask;
903{
11f9ed1a 904 if (mask == 0x00ffffffL)
8f90be4c 905 return 0;
11f9ed1a 906 else if (mask == 0xff00ffffL)
8f90be4c 907 return 1;
11f9ed1a 908 else if (mask == 0xffff00ffL)
8f90be4c 909 return 2;
11f9ed1a 910 else if (mask == 0xffffff00L)
8f90be4c
NC
911 return 3;
912
913 return -1;
914}
915
916/* Determine halfword being masked. */
4816b8e4 917
8f90be4c
NC
918int
919mcore_halfword_offset (mask)
920 unsigned int mask;
921{
922 if (mask == 0x0000ffffL)
923 return 0;
11f9ed1a 924 else if (mask == 0xffff0000L)
8f90be4c
NC
925 return 1;
926
927 return -1;
928}
929
930/* Output a series of bseti's corresponding to mask. */
4816b8e4 931
f27cd94d 932const char *
8f90be4c
NC
933mcore_output_bseti (dst, mask)
934 rtx dst;
935 int mask;
936{
937 rtx out_operands[2];
938 int bit;
939
940 out_operands[0] = dst;
941
942 for (bit = 0; bit < 32; bit++)
943 {
944 if ((mask & 0x1) == 0x1)
945 {
946 out_operands[1] = GEN_INT (bit);
947
948 output_asm_insn ("bseti\t%0,%1", out_operands);
949 }
950 mask >>= 1;
951 }
952
953 return "";
954}
955
956/* Output a series of bclri's corresponding to mask. */
4816b8e4 957
f27cd94d 958const char *
8f90be4c
NC
959mcore_output_bclri (dst, mask)
960 rtx dst;
961 int mask;
962{
963 rtx out_operands[2];
964 int bit;
965
966 out_operands[0] = dst;
967
968 for (bit = 0; bit < 32; bit++)
969 {
970 if ((mask & 0x1) == 0x0)
971 {
972 out_operands[1] = GEN_INT (bit);
973
974 output_asm_insn ("bclri\t%0,%1", out_operands);
975 }
976
977 mask >>= 1;
978 }
979
980 return "";
981}
982
983/* Output a conditional move of two constants that are +/- 1 within each
984 other. See the "movtK" patterns in mcore.md. I'm not sure this is
985 really worth the effort. */
4816b8e4 986
f27cd94d 987const char *
8f90be4c
NC
988mcore_output_cmov (operands, cmp_t, test)
989 rtx operands[];
990 int cmp_t;
55710451 991 const char * test;
8f90be4c
NC
992{
993 int load_value;
994 int adjust_value;
995 rtx out_operands[4];
996
997 out_operands[0] = operands[0];
998
4816b8e4 999 /* Check to see which constant is loadable. */
8f90be4c
NC
1000 if (const_ok_for_mcore (INTVAL (operands[1])))
1001 {
1002 out_operands[1] = operands[1];
1003 out_operands[2] = operands[2];
1004 }
1005 else if (const_ok_for_mcore (INTVAL (operands[2])))
1006 {
1007 out_operands[1] = operands[2];
1008 out_operands[2] = operands[1];
1009
4816b8e4 1010 /* Complement test since constants are swapped. */
8f90be4c
NC
1011 cmp_t = (cmp_t == 0);
1012 }
1013 load_value = INTVAL (out_operands[1]);
1014 adjust_value = INTVAL (out_operands[2]);
1015
4816b8e4 1016 /* First output the test if folded into the pattern. */
8f90be4c
NC
1017
1018 if (test)
1019 output_asm_insn (test, operands);
1020
4816b8e4 1021 /* Load the constant - for now, only support constants that can be
8f90be4c
NC
1022 generated with a single instruction. maybe add general inlinable
1023 constants later (this will increase the # of patterns since the
4816b8e4 1024 instruction sequence has a different length attribute). */
8f90be4c
NC
1025 if (load_value >= 0 && load_value <= 127)
1026 output_asm_insn ("movi\t%0,%1", out_operands);
1027 else if ((load_value & (load_value - 1)) == 0)
1028 output_asm_insn ("bgeni\t%0,%P1", out_operands);
1029 else if ((load_value & (load_value + 1)) == 0)
1030 output_asm_insn ("bmaski\t%0,%N1", out_operands);
1031
4816b8e4 1032 /* Output the constant adjustment. */
8f90be4c
NC
1033 if (load_value > adjust_value)
1034 {
1035 if (cmp_t)
1036 output_asm_insn ("decf\t%0", out_operands);
1037 else
1038 output_asm_insn ("dect\t%0", out_operands);
1039 }
1040 else
1041 {
1042 if (cmp_t)
1043 output_asm_insn ("incf\t%0", out_operands);
1044 else
1045 output_asm_insn ("inct\t%0", out_operands);
1046 }
1047
1048 return "";
1049}
1050
1051/* Outputs the peephole for moving a constant that gets not'ed followed
4816b8e4
NC
1052 by an and (i.e. combine the not and the and into andn). BRC */
1053
f27cd94d 1054const char *
8f90be4c
NC
1055mcore_output_andn (insn, operands)
1056 rtx insn ATTRIBUTE_UNUSED;
1057 rtx operands[];
1058{
1059 int x, y;
1060 rtx out_operands[3];
f27cd94d 1061 const char * load_op;
8f90be4c
NC
1062 char buf[256];
1063
1064 if (try_constant_tricks (INTVAL (operands[1]), &x, &y) != 2)
1065 abort ();
1066
1067 out_operands[0] = operands[0];
1068 out_operands[1] = GEN_INT(x);
1069 out_operands[2] = operands[2];
1070
1071 if (x >= 0 && x <= 127)
1072 load_op = "movi\t%0,%1";
4816b8e4
NC
1073
1074 /* Try exact power of two. */
8f90be4c
NC
1075 else if ((x & (x - 1)) == 0)
1076 load_op = "bgeni\t%0,%P1";
4816b8e4
NC
1077
1078 /* Try exact power of two - 1. */
8f90be4c
NC
1079 else if ((x & (x + 1)) == 0)
1080 load_op = "bmaski\t%0,%N1";
4816b8e4 1081
8f90be4c
NC
1082 else
1083 load_op = "BADMOVI\t%0,%1";
1084
1085 sprintf (buf, "%s\n\tandn\t%%2,%%0", load_op);
1086 output_asm_insn (buf, out_operands);
1087
1088 return "";
1089}
1090
1091/* Output an inline constant. */
4816b8e4 1092
f27cd94d 1093static const char *
8f90be4c
NC
1094output_inline_const (mode, operands)
1095 enum machine_mode mode;
1096 rtx operands[];
1097{
1098 int x = 0, y = 0;
1099 int trick_no;
1100 rtx out_operands[3];
1101 char buf[256];
1102 char load_op[256];
f27cd94d 1103 const char *dst_fmt;
8f90be4c
NC
1104 int value;
1105
1106 value = INTVAL (operands[1]);
1107
1108 if ((trick_no = try_constant_tricks (value, &x, &y)) == 0)
1109 {
1110 /* lrw's are handled separately: Large inlinable constants
1111 never get turned into lrw's. Our caller uses try_constant_tricks
1112 to back off to an lrw rather than calling this routine. */
1113 abort ();
1114 }
1115
1116 if (trick_no == 1)
1117 x = value;
1118
4816b8e4 1119 /* operands: 0 = dst, 1 = load immed., 2 = immed. adjustment. */
8f90be4c
NC
1120 out_operands[0] = operands[0];
1121 out_operands[1] = GEN_INT (x);
1122
1123 if (trick_no > 2)
1124 out_operands[2] = GEN_INT (y);
1125
4816b8e4 1126 /* Select dst format based on mode. */
8f90be4c
NC
1127 if (mode == DImode && (! TARGET_LITTLE_END))
1128 dst_fmt = "%R0";
1129 else
1130 dst_fmt = "%0";
1131
1132 if (x >= 0 && x <= 127)
1133 sprintf (load_op, "movi\t%s,%%1", dst_fmt);
4816b8e4 1134
8f90be4c
NC
1135 /* Try exact power of two. */
1136 else if ((x & (x - 1)) == 0)
1137 sprintf (load_op, "bgeni\t%s,%%P1", dst_fmt);
4816b8e4
NC
1138
1139 /* Try exact power of two - 1. */
8f90be4c
NC
1140 else if ((x & (x + 1)) == 0)
1141 sprintf (load_op, "bmaski\t%s,%%N1", dst_fmt);
4816b8e4 1142
8f90be4c
NC
1143 else
1144 sprintf (load_op, "BADMOVI\t%s,%%1", dst_fmt);
1145
1146 switch (trick_no)
1147 {
1148 case 1:
1149 strcpy (buf, load_op);
1150 break;
1151 case 2: /* not */
1152 sprintf (buf, "%s\n\tnot\t%s\t// %d 0x%x", load_op, dst_fmt, value, value);
1153 break;
1154 case 3: /* add */
1155 sprintf (buf, "%s\n\taddi\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
1156 break;
1157 case 4: /* sub */
1158 sprintf (buf, "%s\n\tsubi\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
1159 break;
1160 case 5: /* rsub */
4816b8e4 1161 /* Never happens unless -mrsubi, see try_constant_tricks(). */
8f90be4c
NC
1162 sprintf (buf, "%s\n\trsubi\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
1163 break;
1164 case 6: /* bset */
1165 sprintf (buf, "%s\n\tbseti\t%s,%%P2\t// %d 0x%x", load_op, dst_fmt, value, value);
1166 break;
1167 case 7: /* bclr */
1168 sprintf (buf, "%s\n\tbclri\t%s,%%Q2\t// %d 0x%x", load_op, dst_fmt, value, value);
1169 break;
1170 case 8: /* rotl */
1171 sprintf (buf, "%s\n\trotli\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
1172 break;
1173 case 9: /* lsl */
1174 sprintf (buf, "%s\n\tlsli\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
1175 break;
1176 case 10: /* ixh */
1177 sprintf (buf, "%s\n\tixh\t%s,%s\t// %d 0x%x", load_op, dst_fmt, dst_fmt, value, value);
1178 break;
1179 case 11: /* ixw */
1180 sprintf (buf, "%s\n\tixw\t%s,%s\t// %d 0x%x", load_op, dst_fmt, dst_fmt, value, value);
1181 break;
1182 default:
1183 return "";
1184 }
1185
1186 output_asm_insn (buf, out_operands);
1187
1188 return "";
1189}
1190
1191/* Output a move of a word or less value. */
4816b8e4 1192
f27cd94d 1193const char *
8f90be4c
NC
1194mcore_output_move (insn, operands, mode)
1195 rtx insn ATTRIBUTE_UNUSED;
1196 rtx operands[];
1197 enum machine_mode mode ATTRIBUTE_UNUSED;
1198{
1199 rtx dst = operands[0];
1200 rtx src = operands[1];
1201
1202 if (GET_CODE (dst) == REG)
1203 {
1204 if (GET_CODE (src) == REG)
1205 {
1206 if (REGNO (src) == CC_REG) /* r-c */
1207 return "mvc\t%0";
1208 else
1209 return "mov\t%0,%1"; /* r-r*/
1210 }
1211 else if (GET_CODE (src) == MEM)
1212 {
1213 if (GET_CODE (XEXP (src, 0)) == LABEL_REF)
1214 return "lrw\t%0,[%1]"; /* a-R */
1215 else
1216 return "ldw\t%0,%1"; /* r-m */
1217 }
1218 else if (GET_CODE (src) == CONST_INT)
1219 {
1220 int x, y;
1221
1222 if (CONST_OK_FOR_I (INTVAL (src))) /* r-I */
1223 return "movi\t%0,%1";
1224 else if (CONST_OK_FOR_M (INTVAL (src))) /* r-M */
1225 return "bgeni\t%0,%P1\t// %1 %x1";
1226 else if (CONST_OK_FOR_N (INTVAL (src))) /* r-N */
1227 return "bmaski\t%0,%N1\t// %1 %x1";
1228 else if (try_constant_tricks (INTVAL (src), &x, &y)) /* R-P */
1229 return output_inline_const (SImode, operands); /* 1-2 insns */
1230 else
4816b8e4 1231 return "lrw\t%0,%x1\t// %1"; /* Get it from literal pool. */
8f90be4c
NC
1232 }
1233 else
4816b8e4 1234 return "lrw\t%0, %1"; /* Into the literal pool. */
8f90be4c
NC
1235 }
1236 else if (GET_CODE (dst) == MEM) /* m-r */
1237 return "stw\t%1,%0";
1238
1239 abort ();
1240}
1241
1242/* Outputs a constant inline -- regardless of the cost.
1243 Useful for things where we've gotten into trouble and think we'd
1244 be doing an lrw into r15 (forbidden). This lets us get out of
1245 that pickle even after register allocation. */
4816b8e4 1246
f27cd94d 1247const char *
8f90be4c
NC
1248mcore_output_inline_const_forced (insn, operands, mode)
1249 rtx insn ATTRIBUTE_UNUSED;
1250 rtx operands[];
1251 enum machine_mode mode ATTRIBUTE_UNUSED;
1252{
1253 unsigned long value = INTVAL (operands[1]);
1254 unsigned long ovalue = value;
1255 struct piece
1256 {
1257 int low;
1258 int shift;
1259 }
1260 part[6];
1261 int i;
1262
1263 if (mcore_const_ok_for_inline (value))
1264 return output_inline_const (SImode, operands);
1265
b6a1cbae 1266 for (i = 0; (unsigned) i < ARRAY_SIZE (part); i++)
8f90be4c
NC
1267 {
1268 part[i].shift = 0;
1269 part[i].low = (value & 0x1F);
1270 value -= part[i].low;
1271
1272 if (mcore_const_ok_for_inline (value))
1273 break;
1274 else
1275 {
1276 value >>= 5;
1277 part[i].shift = 5;
1278
1279 while ((value & 1) == 0)
1280 {
1281 part[i].shift++;
1282 value >>= 1;
1283 }
1284
1285 if (mcore_const_ok_for_inline (value))
1286 break;
1287 }
1288 }
1289
1290 /* 5 bits per iteration, a maximum of 5 times == 25 bits and leaves
1291 7 bits left in the constant -- which we know we can cover with
1292 a movi. The final value can't be zero otherwise we'd have stopped
1293 in the previous iteration. */
1294 if (value == 0 || ! mcore_const_ok_for_inline (value))
1295 abort ();
1296
4816b8e4 1297 /* Now, work our way backwards emitting the constant. */
8f90be4c
NC
1298
1299 /* Emit the value that remains -- it will be non-zero. */
1300 operands[1] = GEN_INT (value);
1301 output_asm_insn (output_inline_const (SImode, operands), operands);
1302
1303 while (i >= 0)
1304 {
1305 /* Shift anything we've already loaded. */
1306 if (part[i].shift)
1307 {
1308 operands[2] = GEN_INT (part[i].shift);
1309 output_asm_insn ("lsli %0,%2", operands);
1310 value <<= part[i].shift;
1311 }
1312
1313 /* Add anything we need into the low 5 bits. */
1314 if (part[i].low != 0)
1315 {
1316 operands[2] = GEN_INT (part[i].low);
1317 output_asm_insn ("addi %0,%2", operands);
1318 value += part[i].low;
1319 }
1320
1321 i--;
1322 }
1323
1324 if (value != ovalue) /* sanity */
1325 abort ();
1326
4816b8e4 1327 /* We've output all the instructions. */
8f90be4c
NC
1328 return "";
1329}
1330
1331/* Return a sequence of instructions to perform DI or DF move.
1332 Since the MCORE cannot move a DI or DF in one instruction, we have
1333 to take care when we see overlapping source and dest registers. */
4816b8e4 1334
f27cd94d 1335const char *
8f90be4c
NC
1336mcore_output_movedouble (operands, mode)
1337 rtx operands[];
1338 enum machine_mode mode ATTRIBUTE_UNUSED;
1339{
1340 rtx dst = operands[0];
1341 rtx src = operands[1];
1342
1343 if (GET_CODE (dst) == REG)
1344 {
1345 if (GET_CODE (src) == REG)
1346 {
1347 int dstreg = REGNO (dst);
1348 int srcreg = REGNO (src);
4816b8e4 1349
8f90be4c
NC
1350 /* Ensure the second source not overwritten. */
1351 if (srcreg + 1 == dstreg)
1352 return "mov %R0,%R1\n\tmov %0,%1";
1353 else
1354 return "mov %0,%1\n\tmov %R0,%R1";
1355 }
1356 else if (GET_CODE (src) == MEM)
1357 {
1358 rtx memexp = memexp = XEXP (src, 0);
1359 int dstreg = REGNO (dst);
1360 int basereg = -1;
1361
1362 if (GET_CODE (memexp) == LABEL_REF)
1363 return "lrw\t%0,[%1]\n\tlrw\t%R0,[%R1]";
1364 else if (GET_CODE (memexp) == REG)
1365 basereg = REGNO (memexp);
1366 else if (GET_CODE (memexp) == PLUS)
1367 {
1368 if (GET_CODE (XEXP (memexp, 0)) == REG)
1369 basereg = REGNO (XEXP (memexp, 0));
1370 else if (GET_CODE (XEXP (memexp, 1)) == REG)
1371 basereg = REGNO (XEXP (memexp, 1));
1372 else
1373 abort ();
1374 }
1375 else
1376 abort ();
1377
4816b8e4 1378 /* ??? length attribute is wrong here. */
8f90be4c
NC
1379 if (dstreg == basereg)
1380 {
4816b8e4 1381 /* Just load them in reverse order. */
8f90be4c 1382 return "ldw\t%R0,%R1\n\tldw\t%0,%1";
4816b8e4 1383
8f90be4c 1384 /* XXX: alternative: move basereg to basereg+1
4816b8e4 1385 and then fall through. */
8f90be4c
NC
1386 }
1387 else
1388 return "ldw\t%0,%1\n\tldw\t%R0,%R1";
1389 }
1390 else if (GET_CODE (src) == CONST_INT)
1391 {
1392 if (TARGET_LITTLE_END)
1393 {
1394 if (CONST_OK_FOR_I (INTVAL (src)))
1395 output_asm_insn ("movi %0,%1", operands);
1396 else if (CONST_OK_FOR_M (INTVAL (src)))
1397 output_asm_insn ("bgeni %0,%P1", operands);
1398 else if (INTVAL (src) == -1)
1399 output_asm_insn ("bmaski %0,32", operands);
1400 else if (CONST_OK_FOR_N (INTVAL (src)))
1401 output_asm_insn ("bmaski %0,%N1", operands);
1402 else
1403 abort ();
1404
1405 if (INTVAL (src) < 0)
1406 return "bmaski %R0,32";
1407 else
1408 return "movi %R0,0";
1409 }
1410 else
1411 {
1412 if (CONST_OK_FOR_I (INTVAL (src)))
1413 output_asm_insn ("movi %R0,%1", operands);
1414 else if (CONST_OK_FOR_M (INTVAL (src)))
1415 output_asm_insn ("bgeni %R0,%P1", operands);
1416 else if (INTVAL (src) == -1)
1417 output_asm_insn ("bmaski %R0,32", operands);
1418 else if (CONST_OK_FOR_N (INTVAL (src)))
1419 output_asm_insn ("bmaski %R0,%N1", operands);
1420 else
1421 abort ();
1422
1423 if (INTVAL (src) < 0)
1424 return "bmaski %0,32";
1425 else
1426 return "movi %0,0";
1427 }
1428 }
1429 else
1430 abort ();
1431 }
1432 else if (GET_CODE (dst) == MEM && GET_CODE (src) == REG)
1433 return "stw\t%1,%0\n\tstw\t%R1,%R0";
1434 else
1435 abort ();
1436}
1437
1438/* Predicates used by the templates. */
1439
88cad84b 1440/* Nonzero if OP can be source of a simple move operation. */
4816b8e4 1441
8f90be4c
NC
1442int
1443mcore_general_movsrc_operand (op, mode)
1444 rtx op;
1445 enum machine_mode mode;
1446{
1447 /* Any (MEM LABEL_REF) is OK. That is a pc-relative load. */
1448 if (GET_CODE (op) == MEM && GET_CODE (XEXP (op, 0)) == LABEL_REF)
1449 return 1;
1450
1451 return general_operand (op, mode);
1452}
1453
88cad84b 1454/* Nonzero if OP can be destination of a simple move operation. */
4816b8e4 1455
8f90be4c
NC
1456int
1457mcore_general_movdst_operand (op, mode)
1458 rtx op;
1459 enum machine_mode mode;
1460{
1461 if (GET_CODE (op) == REG && REGNO (op) == CC_REG)
1462 return 0;
1463
1464 return general_operand (op, mode);
1465}
1466
1467/* Nonzero if OP is a normal arithmetic register. */
4816b8e4 1468
8f90be4c
NC
1469int
1470mcore_arith_reg_operand (op, mode)
1471 rtx op;
1472 enum machine_mode mode;
1473{
1474 if (! register_operand (op, mode))
1475 return 0;
1476
1477 if (GET_CODE (op) == SUBREG)
1478 op = SUBREG_REG (op);
1479
1480 if (GET_CODE (op) == REG)
1481 return REGNO (op) != CC_REG;
1482
1483 return 1;
1484}
1485
88cad84b 1486/* Nonzero if OP should be recognized during reload for an ixh/ixw
8f90be4c 1487 operand. See the ixh/ixw patterns. */
4816b8e4 1488
8f90be4c
NC
1489int
1490mcore_reload_operand (op, mode)
1491 rtx op;
1492 enum machine_mode mode;
1493{
1494 if (mcore_arith_reg_operand (op, mode))
1495 return 1;
1496
1497 if (! reload_in_progress)
1498 return 0;
1499
1500 return GET_CODE (op) == MEM;
1501}
1502
1503/* Nonzero if OP is a valid source operand for an arithmetic insn. */
4816b8e4 1504
8f90be4c
NC
1505int
1506mcore_arith_J_operand (op, mode)
1507 rtx op;
1508 enum machine_mode mode;
1509{
1510 if (register_operand (op, mode))
1511 return 1;
1512
1513 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_J (INTVAL (op)))
1514 return 1;
1515
1516 return 0;
1517}
1518
1519/* Nonzero if OP is a valid source operand for an arithmetic insn. */
4816b8e4 1520
8f90be4c
NC
1521int
1522mcore_arith_K_operand (op, mode)
1523 rtx op;
1524 enum machine_mode mode;
1525{
1526 if (register_operand (op, mode))
1527 return 1;
1528
1529 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_K (INTVAL (op)))
1530 return 1;
1531
1532 return 0;
1533}
1534
1535/* Nonzero if OP is a valid source operand for a shift or rotate insn. */
4816b8e4 1536
8f90be4c
NC
1537int
1538mcore_arith_K_operand_not_0 (op, mode)
1539 rtx op;
1540 enum machine_mode mode;
1541{
1542 if (register_operand (op, mode))
1543 return 1;
1544
1545 if ( GET_CODE (op) == CONST_INT
1546 && CONST_OK_FOR_K (INTVAL (op))
1547 && INTVAL (op) != 0)
1548 return 1;
1549
1550 return 0;
1551}
1552
1553int
1554mcore_arith_K_S_operand (op, mode)
1555 rtx op;
1556 enum machine_mode mode;
1557{
1558 if (register_operand (op, mode))
1559 return 1;
1560
1561 if (GET_CODE (op) == CONST_INT)
1562 {
1563 if (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_M (~INTVAL (op)))
1564 return 1;
1565 }
1566
1567 return 0;
1568}
1569
1570int
1571mcore_arith_S_operand (op)
1572 rtx op;
1573{
1574 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (~INTVAL (op)))
1575 return 1;
1576
1577 return 0;
1578}
1579
1580int
1581mcore_arith_M_operand (op, mode)
1582 rtx op;
1583 enum machine_mode mode;
1584{
1585 if (register_operand (op, mode))
1586 return 1;
1587
1588 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (INTVAL (op)))
1589 return 1;
1590
1591 return 0;
1592}
1593
4816b8e4
NC
1594/* Nonzero if OP is a valid source operand for loading. */
1595
8f90be4c
NC
1596int
1597mcore_arith_imm_operand (op, mode)
1598 rtx op;
1599 enum machine_mode mode;
1600{
1601 if (register_operand (op, mode))
1602 return 1;
1603
1604 if (GET_CODE (op) == CONST_INT && const_ok_for_mcore (INTVAL (op)))
1605 return 1;
1606
1607 return 0;
1608}
1609
1610int
1611mcore_arith_any_imm_operand (op, mode)
1612 rtx op;
1613 enum machine_mode mode;
1614{
1615 if (register_operand (op, mode))
1616 return 1;
1617
1618 if (GET_CODE (op) == CONST_INT)
1619 return 1;
1620
1621 return 0;
1622}
1623
4816b8e4
NC
1624/* Nonzero if OP is a valid source operand for a cmov with two consts +/- 1. */
1625
8f90be4c
NC
1626int
1627mcore_arith_O_operand (op, mode)
1628 rtx op;
1629 enum machine_mode mode;
1630{
1631 if (register_operand (op, mode))
1632 return 1;
1633
1634 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_O (INTVAL (op)))
1635 return 1;
1636
1637 return 0;
1638}
1639
1640/* Nonzero if OP is a valid source operand for a btsti. */
4816b8e4 1641
8f90be4c
NC
1642int
1643mcore_literal_K_operand (op, mode)
1644 rtx op;
1645 enum machine_mode mode ATTRIBUTE_UNUSED;
1646{
1647 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_K (INTVAL (op)))
1648 return 1;
1649
1650 return 0;
1651}
1652
1653/* Nonzero if OP is a valid source operand for an add/sub insn. */
4816b8e4 1654
8f90be4c
NC
1655int
1656mcore_addsub_operand (op, mode)
1657 rtx op;
1658 enum machine_mode mode;
1659{
1660 if (register_operand (op, mode))
1661 return 1;
1662
1663 if (GET_CODE (op) == CONST_INT)
1664 {
1665 return 1;
1666
1667 /* The following is removed because it precludes large constants from being
1668 returned as valid source operands for and add/sub insn. While large
1669 constants may not directly be used in an add/sub, they may if first loaded
1670 into a register. Thus, this predicate should indicate that they are valid,
1671 and the constraint in mcore.md should control whether an additional load to
4816b8e4 1672 register is needed. (see mcore.md, addsi). -- DAC 4/2/1998 */
8f90be4c
NC
1673 /*
1674 if (CONST_OK_FOR_J(INTVAL(op)) || CONST_OK_FOR_L(INTVAL(op)))
1675 return 1;
1676 */
1677 }
1678
1679 return 0;
1680}
1681
1682/* Nonzero if OP is a valid source operand for a compare operation. */
4816b8e4 1683
8f90be4c
NC
1684int
1685mcore_compare_operand (op, mode)
1686 rtx op;
1687 enum machine_mode mode;
1688{
1689 if (register_operand (op, mode))
1690 return 1;
1691
1692 if (GET_CODE (op) == CONST_INT && INTVAL (op) == 0)
1693 return 1;
1694
1695 return 0;
1696}
1697
4816b8e4
NC
1698/* Expand insert bit field. BRC */
1699
8f90be4c
NC
1700int
1701mcore_expand_insv (operands)
1702 rtx operands[];
1703{
1704 int width = INTVAL (operands[1]);
1705 int posn = INTVAL (operands[2]);
1706 int mask;
1707 rtx mreg, sreg, ereg;
1708
1709 /* To get width 1 insv, the test in store_bit_field() (expmed.c, line 191)
1710 for width==1 must be removed. Look around line 368. This is something
4816b8e4 1711 we really want the md part to do. */
8f90be4c
NC
1712 if (width == 1 && GET_CODE (operands[3]) == CONST_INT)
1713 {
4816b8e4
NC
1714 /* Do directly with bseti or bclri. */
1715 /* RBE: 2/97 consider only low bit of constant. */
8f90be4c
NC
1716 if ((INTVAL(operands[3])&1) == 0)
1717 {
1718 mask = ~(1 << posn);
1719 emit_insn (gen_rtx (SET, SImode, operands[0],
1720 gen_rtx (AND, SImode, operands[0], GEN_INT (mask))));
1721 }
1722 else
1723 {
1724 mask = 1 << posn;
1725 emit_insn (gen_rtx (SET, SImode, operands[0],
1726 gen_rtx (IOR, SImode, operands[0], GEN_INT (mask))));
1727 }
1728
1729 return 1;
1730 }
1731
1732 /* Look at some bitfield placements that we aren't interested
4816b8e4 1733 in handling ourselves, unless specifically directed to do so. */
8f90be4c
NC
1734 if (! TARGET_W_FIELD)
1735 return 0; /* Generally, give up about now. */
1736
1737 if (width == 8 && posn % 8 == 0)
1738 /* Byte sized and aligned; let caller break it up. */
1739 return 0;
1740
1741 if (width == 16 && posn % 16 == 0)
1742 /* Short sized and aligned; let caller break it up. */
1743 return 0;
1744
1745 /* The general case - we can do this a little bit better than what the
1746 machine independent part tries. This will get rid of all the subregs
1747 that mess up constant folding in combine when working with relaxed
4816b8e4 1748 immediates. */
8f90be4c
NC
1749
1750 /* If setting the entire field, do it directly. */
1751 if (GET_CODE (operands[3]) == CONST_INT &&
1752 INTVAL (operands[3]) == ((1 << width) - 1))
1753 {
1754 mreg = force_reg (SImode, GEN_INT (INTVAL (operands[3]) << posn));
1755 emit_insn (gen_rtx (SET, SImode, operands[0],
1756 gen_rtx (IOR, SImode, operands[0], mreg)));
1757 return 1;
1758 }
1759
1760 /* Generate the clear mask. */
1761 mreg = force_reg (SImode, GEN_INT (~(((1 << width) - 1) << posn)));
1762
1763 /* Clear the field, to overlay it later with the source. */
1764 emit_insn (gen_rtx (SET, SImode, operands[0],
1765 gen_rtx (AND, SImode, operands[0], mreg)));
1766
1767 /* If the source is constant 0, we've nothing to add back. */
1768 if (GET_CODE (operands[3]) == CONST_INT && INTVAL (operands[3]) == 0)
1769 return 1;
1770
1771 /* XXX: Should we worry about more games with constant values?
1772 We've covered the high profile: set/clear single-bit and many-bit
1773 fields. How often do we see "arbitrary bit pattern" constants? */
1774 sreg = copy_to_mode_reg (SImode, operands[3]);
1775
1776 /* Extract src as same width as dst (needed for signed values). We
1777 always have to do this since we widen everything to SImode.
1778 We don't have to mask if we're shifting this up against the
1779 MSB of the register (e.g., the shift will push out any hi-order
4816b8e4 1780 bits. */
f27cd94d 1781 if (width + posn != (int) GET_MODE_SIZE (SImode))
8f90be4c
NC
1782 {
1783 ereg = force_reg (SImode, GEN_INT ((1 << width) - 1));
1784 emit_insn (gen_rtx (SET, SImode, sreg,
1785 gen_rtx (AND, SImode, sreg, ereg)));
1786 }
1787
4816b8e4 1788 /* Insert source value in dest. */
8f90be4c
NC
1789 if (posn != 0)
1790 emit_insn (gen_rtx (SET, SImode, sreg,
1791 gen_rtx (ASHIFT, SImode, sreg, GEN_INT (posn))));
1792
1793 emit_insn (gen_rtx (SET, SImode, operands[0],
1794 gen_rtx (IOR, SImode, operands[0], sreg)));
1795
1796 return 1;
1797}
1798
1799/* Return 1 if OP is a load multiple operation. It is known to be a
1800 PARALLEL and the first section will be tested. */
1801int
1802mcore_load_multiple_operation (op, mode)
1803 rtx op;
1804 enum machine_mode mode ATTRIBUTE_UNUSED;
1805{
1806 int count = XVECLEN (op, 0);
1807 int dest_regno;
1808 rtx src_addr;
1809 int i;
1810
1811 /* Perform a quick check so we don't blow up below. */
1812 if (count <= 1
1813 || GET_CODE (XVECEXP (op, 0, 0)) != SET
1814 || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != REG
1815 || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != MEM)
1816 return 0;
1817
1818 dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, 0)));
1819 src_addr = XEXP (SET_SRC (XVECEXP (op, 0, 0)), 0);
1820
1821 for (i = 1; i < count; i++)
1822 {
1823 rtx elt = XVECEXP (op, 0, i);
1824
1825 if (GET_CODE (elt) != SET
1826 || GET_CODE (SET_DEST (elt)) != REG
1827 || GET_MODE (SET_DEST (elt)) != SImode
f27cd94d
NC
1828 || REGNO (SET_DEST (elt)) != (unsigned) (dest_regno + i)
1829 || GET_CODE (SET_SRC (elt)) != MEM
1830 || GET_MODE (SET_SRC (elt)) != SImode
8f90be4c
NC
1831 || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS
1832 || ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr)
1833 || GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT
1834 || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != i * 4)
1835 return 0;
1836 }
1837
1838 return 1;
1839}
1840
1841/* Similar, but tests for store multiple. */
4816b8e4 1842
8f90be4c
NC
1843int
1844mcore_store_multiple_operation (op, mode)
1845 rtx op;
1846 enum machine_mode mode ATTRIBUTE_UNUSED;
1847{
1848 int count = XVECLEN (op, 0);
1849 int src_regno;
1850 rtx dest_addr;
1851 int i;
1852
1853 /* Perform a quick check so we don't blow up below. */
1854 if (count <= 1
1855 || GET_CODE (XVECEXP (op, 0, 0)) != SET
1856 || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != MEM
1857 || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != REG)
1858 return 0;
1859
1860 src_regno = REGNO (SET_SRC (XVECEXP (op, 0, 0)));
1861 dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, 0)), 0);
1862
1863 for (i = 1; i < count; i++)
1864 {
1865 rtx elt = XVECEXP (op, 0, i);
1866
1867 if (GET_CODE (elt) != SET
1868 || GET_CODE (SET_SRC (elt)) != REG
1869 || GET_MODE (SET_SRC (elt)) != SImode
f27cd94d 1870 || REGNO (SET_SRC (elt)) != (unsigned) (src_regno + i)
8f90be4c
NC
1871 || GET_CODE (SET_DEST (elt)) != MEM
1872 || GET_MODE (SET_DEST (elt)) != SImode
1873 || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS
1874 || ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr)
1875 || GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT
1876 || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != i * 4)
1877 return 0;
1878 }
1879
1880 return 1;
1881}
1882\f
1883/* ??? Block move stuff stolen from m88k. This code has not been
1884 verified for correctness. */
1885
1886/* Emit code to perform a block move. Choose the best method.
1887
1888 OPERANDS[0] is the destination.
1889 OPERANDS[1] is the source.
1890 OPERANDS[2] is the size.
1891 OPERANDS[3] is the alignment safe to use. */
1892
1893/* Emit code to perform a block move with an offset sequence of ldw/st
1894 instructions (..., ldw 0, stw 1, ldw 1, stw 0, ...). SIZE and ALIGN are
1895 known constants. DEST and SRC are registers. OFFSET is the known
1896 starting point for the output pattern. */
1897
8b60264b 1898static const enum machine_mode mode_from_align[] =
8f90be4c
NC
1899{
1900 VOIDmode, QImode, HImode, VOIDmode, SImode,
1901 VOIDmode, VOIDmode, VOIDmode, DImode
1902};
1903
1904static void
1905block_move_sequence (dest, dst_mem, src, src_mem, size, align, offset)
1906 rtx dest, dst_mem;
1907 rtx src, src_mem;
1908 int size;
1909 int align;
1910 int offset;
1911{
1912 rtx temp[2];
1913 enum machine_mode mode[2];
1914 int amount[2];
1915 int active[2];
1916 int phase = 0;
1917 int next;
1918 int offset_ld = offset;
1919 int offset_st = offset;
1920
1921 active[0] = active[1] = FALSE;
1922
1923 /* Establish parameters for the first load and for the second load if
1924 it is known to be the same mode as the first. */
1925 amount[0] = amount[1] = align;
1926
1927 mode[0] = mode_from_align[align];
1928
1929 temp[0] = gen_reg_rtx (mode[0]);
1930
1931 if (size >= 2 * align)
1932 {
1933 mode[1] = mode[0];
1934 temp[1] = gen_reg_rtx (mode[1]);
1935 }
1936
1937 do
1938 {
1939 rtx srcp, dstp;
1940
1941 next = phase;
1942 phase = !phase;
1943
1944 if (size > 0)
1945 {
1946 /* Change modes as the sequence tails off. */
1947 if (size < amount[next])
1948 {
1949 amount[next] = (size >= 4 ? 4 : (size >= 2 ? 2 : 1));
1950 mode[next] = mode_from_align[amount[next]];
1951 temp[next] = gen_reg_rtx (mode[next]);
1952 }
1953
1954 size -= amount[next];
1955 srcp = gen_rtx (MEM,
1956#if 0
1957 MEM_IN_STRUCT_P (src_mem) ? mode[next] : BLKmode,
1958#else
1959 mode[next],
1960#endif
1961 gen_rtx (PLUS, Pmode, src,
1962 gen_rtx (CONST_INT, SImode, offset_ld)));
1963
1964 RTX_UNCHANGING_P (srcp) = RTX_UNCHANGING_P (src_mem);
1965 MEM_VOLATILE_P (srcp) = MEM_VOLATILE_P (src_mem);
1966 MEM_IN_STRUCT_P (srcp) = 1;
1967 emit_insn (gen_rtx (SET, VOIDmode, temp[next], srcp));
1968 offset_ld += amount[next];
1969 active[next] = TRUE;
1970 }
1971
1972 if (active[phase])
1973 {
1974 active[phase] = FALSE;
1975
1976 dstp = gen_rtx (MEM,
1977#if 0
1978 MEM_IN_STRUCT_P (dst_mem) ? mode[phase] : BLKmode,
1979#else
1980 mode[phase],
1981#endif
1982 gen_rtx (PLUS, Pmode, dest,
1983 gen_rtx (CONST_INT, SImode, offset_st)));
1984
1985 RTX_UNCHANGING_P (dstp) = RTX_UNCHANGING_P (dst_mem);
1986 MEM_VOLATILE_P (dstp) = MEM_VOLATILE_P (dst_mem);
1987 MEM_IN_STRUCT_P (dstp) = 1;
1988 emit_insn (gen_rtx (SET, VOIDmode, dstp, temp[phase]));
1989 offset_st += amount[phase];
1990 }
1991 }
1992 while (active[next]);
1993}
1994
1995void
1996mcore_expand_block_move (dst_mem, src_mem, operands)
1997 rtx dst_mem;
1998 rtx src_mem;
1999 rtx * operands;
2000{
2001 int align = INTVAL (operands[3]);
2002 int bytes;
2003
2004 if (GET_CODE (operands[2]) == CONST_INT)
2005 {
2006 bytes = INTVAL (operands[2]);
2007
2008 if (bytes <= 0)
2009 return;
2010 if (align > 4)
2011 align = 4;
2012
2013 /* RBE: bumped 1 and 2 byte align from 1 and 2 to 4 and 8 bytes before
4816b8e4 2014 we give up and go to memcpy. */
8f90be4c
NC
2015 if ((align == 4 && (bytes <= 4*4
2016 || ((bytes & 01) == 0 && bytes <= 8*4)
2017 || ((bytes & 03) == 0 && bytes <= 16*4)))
2018 || (align == 2 && bytes <= 4*2)
2019 || (align == 1 && bytes <= 4*1))
2020 {
2021 block_move_sequence (operands[0], dst_mem, operands[1], src_mem,
2022 bytes, align, 0);
2023 return;
2024 }
2025 }
2026
2027 /* If we get here, just use the library routine. */
2028 emit_library_call (gen_rtx (SYMBOL_REF, Pmode, "memcpy"), 0, VOIDmode, 3,
2029 operands[0], Pmode, operands[1], Pmode, operands[2],
2030 SImode);
2031}
2032\f
2033
2034/* Code to generate prologue and epilogue sequences. */
2035static int number_of_regs_before_varargs;
4816b8e4 2036
8f90be4c
NC
2037/* Set by SETUP_INCOMING_VARARGS to indicate to prolog that this is
2038 for a varargs function. */
2039static int current_function_anonymous_args;
2040
8f90be4c
NC
2041#define STACK_BYTES (STACK_BOUNDARY/BITS_PER_UNIT)
2042#define STORE_REACH (64) /* Maximum displace of word store + 4. */
4816b8e4 2043#define ADDI_REACH (32) /* Maximum addi operand. */
8f90be4c 2044
8f90be4c
NC
2045static void
2046layout_mcore_frame (infp)
2047 struct mcore_frame * infp;
2048{
2049 int n;
2050 unsigned int i;
2051 int nbytes;
2052 int regarg;
2053 int localregarg;
2054 int localreg;
2055 int outbounds;
2056 unsigned int growths;
2057 int step;
2058
2059 /* Might have to spill bytes to re-assemble a big argument that
4816b8e4 2060 was passed partially in registers and partially on the stack. */
8f90be4c
NC
2061 nbytes = current_function_pretend_args_size;
2062
2063 /* Determine how much space for spilled anonymous args (e.g., stdarg). */
2064 if (current_function_anonymous_args)
2065 nbytes += (NPARM_REGS - number_of_regs_before_varargs) * UNITS_PER_WORD;
2066
2067 infp->arg_size = nbytes;
2068
2069 /* How much space to save non-volatile registers we stomp. */
2070 infp->reg_mask = calc_live_regs (& n);
2071 infp->reg_size = n * 4;
2072
2073 /* And the rest of it... locals and space for overflowed outbounds. */
2074 infp->local_size = get_frame_size ();
2075 infp->outbound_size = current_function_outgoing_args_size;
2076
2077 /* Make sure we have a whole number of words for the locals. */
2078 if (infp->local_size % STACK_BYTES)
2079 infp->local_size = (infp->local_size + STACK_BYTES - 1) & ~ (STACK_BYTES -1);
2080
2081 /* Only thing we know we have to pad is the outbound space, since
2082 we've aligned our locals assuming that base of locals is aligned. */
2083 infp->pad_local = 0;
2084 infp->pad_reg = 0;
2085 infp->pad_outbound = 0;
2086 if (infp->outbound_size % STACK_BYTES)
2087 infp->pad_outbound = STACK_BYTES - (infp->outbound_size % STACK_BYTES);
2088
2089 /* Now we see how we want to stage the prologue so that it does
2090 the most appropriate stack growth and register saves to either:
2091 (1) run fast,
2092 (2) reduce instruction space, or
2093 (3) reduce stack space. */
b6a1cbae 2094 for (i = 0; i < ARRAY_SIZE (infp->growth); i++)
8f90be4c
NC
2095 infp->growth[i] = 0;
2096
2097 regarg = infp->reg_size + infp->arg_size;
2098 localregarg = infp->local_size + regarg;
2099 localreg = infp->local_size + infp->reg_size;
2100 outbounds = infp->outbound_size + infp->pad_outbound;
2101 growths = 0;
2102
2103 /* XXX: Consider one where we consider localregarg + outbound too! */
2104
2105 /* Frame of <= 32 bytes and using stm would get <= 2 registers.
2106 use stw's with offsets and buy the frame in one shot. */
2107 if (localregarg <= ADDI_REACH
2108 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
2109 {
2110 /* Make sure we'll be aligned. */
2111 if (localregarg % STACK_BYTES)
2112 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
2113
2114 step = localregarg + infp->pad_reg;
2115 infp->reg_offset = infp->local_size;
2116
2117 if (outbounds + step <= ADDI_REACH && !frame_pointer_needed)
2118 {
2119 step += outbounds;
2120 infp->reg_offset += outbounds;
2121 outbounds = 0;
2122 }
2123
2124 infp->arg_offset = step - 4;
2125 infp->growth[growths++] = step;
2126 infp->reg_growth = growths;
2127 infp->local_growth = growths;
2128
4816b8e4 2129 /* If we haven't already folded it in. */
8f90be4c
NC
2130 if (outbounds)
2131 infp->growth[growths++] = outbounds;
2132
2133 goto finish;
2134 }
2135
2136 /* Frame can't be done with a single subi, but can be done with 2
2137 insns. If the 'stm' is getting <= 2 registers, we use stw's and
2138 shift some of the stack purchase into the first subi, so both are
2139 single instructions. */
2140 if (localregarg <= STORE_REACH
2141 && (infp->local_size > ADDI_REACH)
2142 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
2143 {
2144 int all;
2145
2146 /* Make sure we'll be aligned; use either pad_reg or pad_local. */
2147 if (localregarg % STACK_BYTES)
2148 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
2149
2150 all = localregarg + infp->pad_reg + infp->pad_local;
2151 step = ADDI_REACH; /* As much up front as we can. */
2152 if (step > all)
2153 step = all;
2154
2155 /* XXX: Consider whether step will still be aligned; we believe so. */
2156 infp->arg_offset = step - 4;
2157 infp->growth[growths++] = step;
2158 infp->reg_growth = growths;
2159 infp->reg_offset = step - infp->pad_reg - infp->reg_size;
2160 all -= step;
2161
4816b8e4 2162 /* Can we fold in any space required for outbounds? */
8f90be4c
NC
2163 if (outbounds + all <= ADDI_REACH && !frame_pointer_needed)
2164 {
2165 all += outbounds;
2166 outbounds = 0;
2167 }
2168
4816b8e4 2169 /* Get the rest of the locals in place. */
8f90be4c
NC
2170 step = all;
2171 infp->growth[growths++] = step;
2172 infp->local_growth = growths;
2173 all -= step;
2174
2175 assert (all == 0);
2176
4816b8e4 2177 /* Finish off if we need to do so. */
8f90be4c
NC
2178 if (outbounds)
2179 infp->growth[growths++] = outbounds;
2180
2181 goto finish;
2182 }
2183
2184 /* Registers + args is nicely aligned, so we'll buy that in one shot.
2185 Then we buy the rest of the frame in 1 or 2 steps depending on
2186 whether we need a frame pointer. */
2187 if ((regarg % STACK_BYTES) == 0)
2188 {
2189 infp->growth[growths++] = regarg;
2190 infp->reg_growth = growths;
2191 infp->arg_offset = regarg - 4;
2192 infp->reg_offset = 0;
2193
2194 if (infp->local_size % STACK_BYTES)
2195 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
2196
2197 step = infp->local_size + infp->pad_local;
2198
2199 if (!frame_pointer_needed)
2200 {
2201 step += outbounds;
2202 outbounds = 0;
2203 }
2204
2205 infp->growth[growths++] = step;
2206 infp->local_growth = growths;
2207
4816b8e4 2208 /* If there's any left to be done. */
8f90be4c
NC
2209 if (outbounds)
2210 infp->growth[growths++] = outbounds;
2211
2212 goto finish;
2213 }
2214
2215 /* XXX: optimizations that we'll want to play with....
4816b8e4
NC
2216 -- regarg is not aligned, but it's a small number of registers;
2217 use some of localsize so that regarg is aligned and then
2218 save the registers. */
8f90be4c
NC
2219
2220 /* Simple encoding; plods down the stack buying the pieces as it goes.
4816b8e4
NC
2221 -- does not optimize space consumption.
2222 -- does not attempt to optimize instruction counts.
2223 -- but it is safe for all alignments. */
8f90be4c
NC
2224 if (regarg % STACK_BYTES != 0)
2225 infp->pad_reg = STACK_BYTES - (regarg % STACK_BYTES);
2226
2227 infp->growth[growths++] = infp->arg_size + infp->reg_size + infp->pad_reg;
2228 infp->reg_growth = growths;
2229 infp->arg_offset = infp->growth[0] - 4;
2230 infp->reg_offset = 0;
2231
2232 if (frame_pointer_needed)
2233 {
2234 if (infp->local_size % STACK_BYTES != 0)
2235 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
2236
2237 infp->growth[growths++] = infp->local_size + infp->pad_local;
2238 infp->local_growth = growths;
2239
2240 infp->growth[growths++] = outbounds;
2241 }
2242 else
2243 {
2244 if ((infp->local_size + outbounds) % STACK_BYTES != 0)
2245 infp->pad_local = STACK_BYTES - ((infp->local_size + outbounds) % STACK_BYTES);
2246
2247 infp->growth[growths++] = infp->local_size + infp->pad_local + outbounds;
2248 infp->local_growth = growths;
2249 }
2250
f27cd94d 2251 /* Anything else that we've forgotten?, plus a few consistency checks. */
8f90be4c
NC
2252 finish:
2253 assert (infp->reg_offset >= 0);
2254 assert (growths <= MAX_STACK_GROWS);
2255
2256 for (i = 0; i < growths; i++)
2257 {
2258 if (infp->growth[i] % STACK_BYTES)
2259 {
2260 fprintf (stderr,"stack growth of %d is not %d aligned\n",
2261 infp->growth[i], STACK_BYTES);
2262 abort ();
2263 }
2264 }
2265}
2266
2267/* Define the offset between two registers, one to be eliminated, and
2268 the other its replacement, at the start of a routine. */
4816b8e4 2269
8f90be4c
NC
2270int
2271mcore_initial_elimination_offset (from, to)
2272 int from;
2273 int to;
2274{
2275 int above_frame;
2276 int below_frame;
2277 struct mcore_frame fi;
2278
2279 layout_mcore_frame (& fi);
2280
2281 /* fp to ap */
2282 above_frame = fi.local_size + fi.pad_local + fi.reg_size + fi.pad_reg;
2283 /* sp to fp */
2284 below_frame = fi.outbound_size + fi.pad_outbound;
2285
2286 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
2287 return above_frame;
2288
2289 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
2290 return above_frame + below_frame;
2291
2292 if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
2293 return below_frame;
2294
2295 abort ();
2296
2297 return 0;
2298}
2299
4816b8e4
NC
2300/* Keep track of some information about varargs for the prolog. */
2301
8f90be4c
NC
2302void
2303mcore_setup_incoming_varargs (args_so_far, mode, type, ptr_pretend_size)
2304 CUMULATIVE_ARGS args_so_far;
2305 enum machine_mode mode;
2306 tree type;
2307 int * ptr_pretend_size ATTRIBUTE_UNUSED;
2308{
2309 current_function_anonymous_args = 1;
2310
2311 /* We need to know how many argument registers are used before
2312 the varargs start, so that we can push the remaining argument
2313 registers during the prologue. */
2314 number_of_regs_before_varargs = args_so_far + mcore_num_arg_regs (mode, type);
2315
2316 /* There is a bug somwehere in the arg handling code.
2317 Until I can find it this workaround always pushes the
2318 last named argument onto the stack. */
2319 number_of_regs_before_varargs = args_so_far;
2320
2321 /* The last named argument may be split between argument registers
2322 and the stack. Allow for this here. */
2323 if (number_of_regs_before_varargs > NPARM_REGS)
2324 number_of_regs_before_varargs = NPARM_REGS;
2325}
2326
2327void
2328mcore_expand_prolog ()
2329{
2330 struct mcore_frame fi;
2331 int space_allocated = 0;
2332 int growth = 0;
2333
2334 /* Find out what we're doing. */
2335 layout_mcore_frame (&fi);
2336
2337 space_allocated = fi.arg_size + fi.reg_size + fi.local_size +
2338 fi.outbound_size + fi.pad_outbound + fi.pad_local + fi.pad_reg;
2339
2340 if (TARGET_CG_DATA)
2341 {
2342 /* Emit a symbol for this routine's frame size. */
2343 rtx x;
8f90be4c
NC
2344
2345 x = DECL_RTL (current_function_decl);
2346
2347 if (GET_CODE (x) != MEM)
2348 abort ();
2349
2350 x = XEXP (x, 0);
2351
2352 if (GET_CODE (x) != SYMBOL_REF)
2353 abort ();
2354
2355 if (mcore_current_function_name)
2356 free (mcore_current_function_name);
2357
1dcd444b 2358 mcore_current_function_name = xstrdup (XSTR (x, 0));
8f90be4c
NC
2359
2360 ASM_OUTPUT_CG_NODE (asm_out_file, mcore_current_function_name, space_allocated);
2361
2362 if (current_function_calls_alloca)
2363 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, "alloca", 1);
2364
2365 /* 970425: RBE:
2366 We're looking at how the 8byte alignment affects stack layout
2367 and where we had to pad things. This emits information we can
2368 extract which tells us about frame sizes and the like. */
2369 fprintf (asm_out_file,
2370 "\t.equ\t__$frame$info$_%s_$_%d_%d_x%x_%d_%d_%d,0\n",
2371 mcore_current_function_name,
2372 fi.arg_size, fi.reg_size, fi.reg_mask,
2373 fi.local_size, fi.outbound_size,
2374 frame_pointer_needed);
2375 }
2376
2377 if (mcore_naked_function_p ())
2378 return;
2379
2380 /* Handle stdarg+regsaves in one shot: can't be more than 64 bytes. */
2381 output_stack_adjust (-1, fi.growth[growth++]); /* grows it */
2382
2383 /* If we have a parameter passed partially in regs and partially in memory,
2384 the registers will have been stored to memory already in function.c. So
2385 we only need to do something here for varargs functions. */
2386 if (fi.arg_size != 0 && current_function_pretend_args_size == 0)
2387 {
2388 int offset;
2389 int rn = FIRST_PARM_REG + NPARM_REGS - 1;
2390 int remaining = fi.arg_size;
2391
2392 for (offset = fi.arg_offset; remaining >= 4; offset -= 4, rn--, remaining -= 4)
2393 {
2394 emit_insn (gen_movsi
2395 (gen_rtx (MEM, SImode,
2396 plus_constant (stack_pointer_rtx, offset)),
2397 gen_rtx (REG, SImode, rn)));
2398 }
2399 }
2400
4816b8e4 2401 /* Do we need another stack adjustment before we do the register saves? */
8f90be4c
NC
2402 if (growth < fi.reg_growth)
2403 output_stack_adjust (-1, fi.growth[growth++]); /* grows it */
2404
2405 if (fi.reg_size != 0)
2406 {
2407 int i;
2408 int offs = fi.reg_offset;
2409
2410 for (i = 15; i >= 0; i--)
2411 {
2412 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2413 {
2414 int first_reg = 15;
2415
2416 while (fi.reg_mask & (1 << first_reg))
2417 first_reg--;
2418 first_reg++;
2419
2420 emit_insn (gen_store_multiple (gen_rtx (MEM, SImode, stack_pointer_rtx),
2421 gen_rtx (REG, SImode, first_reg),
2422 GEN_INT (16 - first_reg)));
2423
2424 i -= (15 - first_reg);
2425 offs += (16 - first_reg) * 4;
2426 }
2427 else if (fi.reg_mask & (1 << i))
2428 {
2429 emit_insn (gen_movsi
2430 (gen_rtx (MEM, SImode,
2431 plus_constant (stack_pointer_rtx, offs)),
2432 gen_rtx (REG, SImode, i)));
2433 offs += 4;
2434 }
2435 }
2436 }
2437
2438 /* Figure the locals + outbounds. */
2439 if (frame_pointer_needed)
2440 {
2441 /* If we haven't already purchased to 'fp'. */
2442 if (growth < fi.local_growth)
2443 output_stack_adjust (-1, fi.growth[growth++]); /* grows it */
2444
2445 emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
2446
4816b8e4 2447 /* ... and then go any remaining distance for outbounds, etc. */
8f90be4c
NC
2448 if (fi.growth[growth])
2449 output_stack_adjust (-1, fi.growth[growth++]);
2450 }
2451 else
2452 {
2453 if (growth < fi.local_growth)
2454 output_stack_adjust (-1, fi.growth[growth++]); /* grows it */
2455 if (fi.growth[growth])
2456 output_stack_adjust (-1, fi.growth[growth++]);
2457 }
2458}
2459
2460void
2461mcore_expand_epilog ()
2462{
2463 struct mcore_frame fi;
2464 int i;
2465 int offs;
2466 int growth = MAX_STACK_GROWS - 1 ;
2467
f27cd94d 2468
8f90be4c
NC
2469 /* Find out what we're doing. */
2470 layout_mcore_frame(&fi);
2471
2472 if (mcore_naked_function_p ())
2473 return;
f27cd94d 2474
8f90be4c
NC
2475 /* If we had a frame pointer, restore the sp from that. */
2476 if (frame_pointer_needed)
2477 {
2478 emit_insn (gen_movsi (stack_pointer_rtx, frame_pointer_rtx));
2479 growth = fi.local_growth - 1;
2480 }
2481 else
2482 {
2483 /* XXX: while loop should accumulate and do a single sell. */
2484 while (growth >= fi.local_growth)
2485 {
2486 if (fi.growth[growth] != 0)
2487 output_stack_adjust (1, fi.growth[growth]);
2488 growth--;
2489 }
2490 }
2491
2492 /* Make sure we've shrunk stack back to the point where the registers
2493 were laid down. This is typically 0/1 iterations. Then pull the
4816b8e4 2494 register save information back off the stack. */
8f90be4c
NC
2495 while (growth >= fi.reg_growth)
2496 output_stack_adjust ( 1, fi.growth[growth--]);
2497
2498 offs = fi.reg_offset;
2499
2500 for (i = 15; i >= 0; i--)
2501 {
2502 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2503 {
2504 int first_reg;
2505
2506 /* Find the starting register. */
2507 first_reg = 15;
2508
2509 while (fi.reg_mask & (1 << first_reg))
2510 first_reg--;
2511
2512 first_reg++;
2513
2514 emit_insn (gen_load_multiple (gen_rtx (REG, SImode, first_reg),
2515 gen_rtx (MEM, SImode, stack_pointer_rtx),
2516 GEN_INT (16 - first_reg)));
2517
2518 i -= (15 - first_reg);
2519 offs += (16 - first_reg) * 4;
2520 }
2521 else if (fi.reg_mask & (1 << i))
2522 {
2523 emit_insn (gen_movsi
2524 (gen_rtx (REG, SImode, i),
2525 gen_rtx (MEM, SImode,
2526 plus_constant (stack_pointer_rtx, offs))));
2527 offs += 4;
2528 }
2529 }
2530
2531 /* Give back anything else. */
4816b8e4 2532 /* XXX: Should accumuate total and then give it back. */
8f90be4c
NC
2533 while (growth >= 0)
2534 output_stack_adjust ( 1, fi.growth[growth--]);
2535}
2536\f
2537/* This code is borrowed from the SH port. */
2538
2539/* The MCORE cannot load a large constant into a register, constants have to
2540 come from a pc relative load. The reference of a pc relative load
2541 instruction must be less than 1k infront of the instruction. This
2542 means that we often have to dump a constant inside a function, and
2543 generate code to branch around it.
2544
2545 It is important to minimize this, since the branches will slow things
2546 down and make things bigger.
2547
2548 Worst case code looks like:
2549
2550 lrw L1,r0
2551 br L2
2552 align
2553 L1: .long value
2554 L2:
2555 ..
2556
2557 lrw L3,r0
2558 br L4
2559 align
2560 L3: .long value
2561 L4:
2562 ..
2563
2564 We fix this by performing a scan before scheduling, which notices which
2565 instructions need to have their operands fetched from the constant table
2566 and builds the table.
2567
2568 The algorithm is:
2569
2570 scan, find an instruction which needs a pcrel move. Look forward, find the
2571 last barrier which is within MAX_COUNT bytes of the requirement.
2572 If there isn't one, make one. Process all the instructions between
2573 the find and the barrier.
2574
2575 In the above example, we can tell that L3 is within 1k of L1, so
2576 the first move can be shrunk from the 2 insn+constant sequence into
2577 just 1 insn, and the constant moved to L3 to make:
2578
2579 lrw L1,r0
2580 ..
2581 lrw L3,r0
2582 bra L4
2583 align
2584 L3:.long value
2585 L4:.long value
2586
2587 Then the second move becomes the target for the shortening process. */
2588
2589typedef struct
2590{
2591 rtx value; /* Value in table. */
2592 rtx label; /* Label of value. */
2593} pool_node;
2594
2595/* The maximum number of constants that can fit into one pool, since
2596 the pc relative range is 0...1020 bytes and constants are at least 4
2597 bytes long. We subtact 4 from the range to allow for the case where
2598 we need to add a branch/align before the constant pool. */
2599
2600#define MAX_COUNT 1016
2601#define MAX_POOL_SIZE (MAX_COUNT/4)
2602static pool_node pool_vector[MAX_POOL_SIZE];
2603static int pool_size;
2604
2605/* Dump out any constants accumulated in the final pass. These
2606 will only be labels. */
4816b8e4 2607
f27cd94d 2608const char *
8f90be4c
NC
2609mcore_output_jump_label_table ()
2610{
2611 int i;
2612
2613 if (pool_size)
2614 {
2615 fprintf (asm_out_file, "\t.align 2\n");
2616
2617 for (i = 0; i < pool_size; i++)
2618 {
2619 pool_node * p = pool_vector + i;
2620
2621 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (p->label));
2622
2623 output_asm_insn (".long %0", &p->value);
2624 }
2625
2626 pool_size = 0;
2627 }
2628
2629 return "";
2630}
2631
8f90be4c 2632/* Check whether insn is a candidate for a conditional. */
4816b8e4 2633
8f90be4c
NC
2634static cond_type
2635is_cond_candidate (insn)
2636 rtx insn;
2637{
2638 /* The only things we conditionalize are those that can be directly
2639 changed into a conditional. Only bother with SImode items. If
2640 we wanted to be a little more aggressive, we could also do other
4816b8e4 2641 modes such as DImode with reg-reg move or load 0. */
8f90be4c
NC
2642 if (GET_CODE (insn) == INSN)
2643 {
2644 rtx pat = PATTERN (insn);
2645 rtx src, dst;
2646
2647 if (GET_CODE (pat) != SET)
2648 return COND_NO;
2649
2650 dst = XEXP (pat, 0);
2651
2652 if ((GET_CODE (dst) != REG &&
2653 GET_CODE (dst) != SUBREG) ||
2654 GET_MODE (dst) != SImode)
2655 return COND_NO;
2656
2657 src = XEXP (pat, 1);
2658
2659 if ((GET_CODE (src) == REG ||
2660 (GET_CODE (src) == SUBREG &&
2661 GET_CODE (SUBREG_REG (src)) == REG)) &&
2662 GET_MODE (src) == SImode)
2663 return COND_MOV_INSN;
2664 else if (GET_CODE (src) == CONST_INT &&
2665 INTVAL (src) == 0)
2666 return COND_CLR_INSN;
2667 else if (GET_CODE (src) == PLUS &&
2668 (GET_CODE (XEXP (src, 0)) == REG ||
2669 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2670 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2671 GET_MODE (XEXP (src, 0)) == SImode &&
2672 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2673 INTVAL (XEXP (src, 1)) == 1)
2674 return COND_INC_INSN;
2675 else if (((GET_CODE (src) == MINUS &&
2676 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2677 INTVAL( XEXP (src, 1)) == 1) ||
2678 (GET_CODE (src) == PLUS &&
2679 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2680 INTVAL (XEXP (src, 1)) == -1)) &&
2681 (GET_CODE (XEXP (src, 0)) == REG ||
2682 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2683 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2684 GET_MODE (XEXP (src, 0)) == SImode)
2685 return COND_DEC_INSN;
2686
2687 /* some insns that we don't bother with:
2688 (set (rx:DI) (ry:DI))
2689 (set (rx:DI) (const_int 0))
2690 */
2691
2692 }
2693 else if (GET_CODE (insn) == JUMP_INSN &&
2694 GET_CODE (PATTERN (insn)) == SET &&
2695 GET_CODE (XEXP (PATTERN (insn), 1)) == LABEL_REF)
2696 return COND_BRANCH_INSN;
2697
2698 return COND_NO;
2699}
2700
2701/* Emit a conditional version of insn and replace the old insn with the
2702 new one. Return the new insn if emitted. */
4816b8e4 2703
8f90be4c
NC
2704static rtx
2705emit_new_cond_insn (insn, cond)
2706 rtx insn;
2707 int cond;
2708{
2709 rtx c_insn = 0;
2710 rtx pat, dst, src;
2711 cond_type num;
2712
2713 if ((num = is_cond_candidate (insn)) == COND_NO)
2714 return NULL;
2715
2716 pat = PATTERN (insn);
2717
2718 if (GET_CODE (insn) == INSN)
2719 {
2720 dst = SET_DEST (pat);
2721 src = SET_SRC (pat);
2722 }
2723 else
cd4c46f3
KG
2724 {
2725 dst = JUMP_LABEL (insn);
2726 src = NULL_RTX;
2727 }
8f90be4c
NC
2728
2729 switch (num)
2730 {
2731 case COND_MOV_INSN:
2732 case COND_CLR_INSN:
2733 if (cond)
2734 c_insn = gen_movt0 (dst, src, dst);
2735 else
2736 c_insn = gen_movt0 (dst, dst, src);
2737 break;
2738
2739 case COND_INC_INSN:
2740 if (cond)
2741 c_insn = gen_incscc (dst, dst);
2742 else
2743 c_insn = gen_incscc_false (dst, dst);
2744 break;
2745
2746 case COND_DEC_INSN:
2747 if (cond)
2748 c_insn = gen_decscc (dst, dst);
2749 else
2750 c_insn = gen_decscc_false (dst, dst);
2751 break;
2752
2753 case COND_BRANCH_INSN:
2754 if (cond)
2755 c_insn = gen_branch_true (dst);
2756 else
2757 c_insn = gen_branch_false (dst);
2758 break;
2759
2760 default:
2761 return NULL;
2762 }
2763
2764 /* Only copy the notes if they exist. */
2765 if (rtx_length [GET_CODE (c_insn)] >= 7 && rtx_length [GET_CODE (insn)] >= 7)
2766 {
2767 /* We really don't need to bother with the notes and links at this
2768 point, but go ahead and save the notes. This will help is_dead()
2769 when applying peepholes (links don't matter since they are not
2770 used any more beyond this point for the mcore). */
2771 REG_NOTES (c_insn) = REG_NOTES (insn);
2772 }
2773
2774 if (num == COND_BRANCH_INSN)
2775 {
2776 /* For jumps, we need to be a little bit careful and emit the new jump
2777 before the old one and to update the use count for the target label.
2778 This way, the barrier following the old (uncond) jump will get
2779 deleted, but the label won't. */
2780 c_insn = emit_jump_insn_before (c_insn, insn);
2781
2782 ++ LABEL_NUSES (dst);
2783
2784 JUMP_LABEL (c_insn) = dst;
2785 }
2786 else
2787 c_insn = emit_insn_after (c_insn, insn);
2788
2789 delete_insn (insn);
2790
2791 return c_insn;
2792}
2793
2794/* Attempt to change a basic block into a series of conditional insns. This
2795 works by taking the branch at the end of the 1st block and scanning for the
2796 end of the 2nd block. If all instructions in the 2nd block have cond.
2797 versions and the label at the start of block 3 is the same as the target
2798 from the branch at block 1, then conditionalize all insn in block 2 using
2799 the inverse condition of the branch at block 1. (Note I'm bending the
2800 definition of basic block here.)
2801
2802 e.g., change:
2803
2804 bt L2 <-- end of block 1 (delete)
2805 mov r7,r8
2806 addu r7,1
2807 br L3 <-- end of block 2
2808
2809 L2: ... <-- start of block 3 (NUSES==1)
2810 L3: ...
2811
2812 to:
2813
2814 movf r7,r8
2815 incf r7
2816 bf L3
2817
2818 L3: ...
2819
2820 we can delete the L2 label if NUSES==1 and re-apply the optimization
2821 starting at the last instruction of block 2. This may allow an entire
4816b8e4 2822 if-then-else statement to be conditionalized. BRC */
8f90be4c
NC
2823static rtx
2824conditionalize_block (first)
2825 rtx first;
2826{
2827 rtx insn;
2828 rtx br_pat;
2829 rtx end_blk_1_br = 0;
2830 rtx end_blk_2_insn = 0;
2831 rtx start_blk_3_lab = 0;
2832 int cond;
2833 int br_lab_num;
2834 int blk_size = 0;
2835
2836
2837 /* Check that the first insn is a candidate conditional jump. This is
2838 the one that we'll eliminate. If not, advance to the next insn to
2839 try. */
2840 if (GET_CODE (first) != JUMP_INSN ||
2841 GET_CODE (PATTERN (first)) != SET ||
2842 GET_CODE (XEXP (PATTERN (first), 1)) != IF_THEN_ELSE)
2843 return NEXT_INSN (first);
2844
2845 /* Extract some information we need. */
2846 end_blk_1_br = first;
2847 br_pat = PATTERN (end_blk_1_br);
2848
2849 /* Complement the condition since we use the reverse cond. for the insns. */
2850 cond = (GET_CODE (XEXP (XEXP (br_pat, 1), 0)) == EQ);
2851
2852 /* Determine what kind of branch we have. */
2853 if (GET_CODE (XEXP (XEXP (br_pat, 1), 1)) == LABEL_REF)
2854 {
2855 /* A normal branch, so extract label out of first arm. */
2856 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 1), 0));
2857 }
2858 else
2859 {
2860 /* An inverse branch, so extract the label out of the 2nd arm
2861 and complement the condition. */
2862 cond = (cond == 0);
2863 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 2), 0));
2864 }
2865
2866 /* Scan forward for the start of block 2: it must start with a
2867 label and that label must be the same as the branch target
2868 label from block 1. We don't care about whether block 2 actually
2869 ends with a branch or a label (an uncond. branch is
2870 conditionalizable). */
2871 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
2872 {
2873 enum rtx_code code;
2874
2875 code = GET_CODE (insn);
2876
2877 /* Look for the label at the start of block 3. */
2878 if (code == CODE_LABEL && CODE_LABEL_NUMBER (insn) == br_lab_num)
2879 break;
2880
2881 /* Skip barriers, notes, and conditionalizable insns. If the
2882 insn is not conditionalizable or makes this optimization fail,
2883 just return the next insn so we can start over from that point. */
2884 if (code != BARRIER && code != NOTE && !is_cond_candidate (insn))
2885 return NEXT_INSN (insn);
2886
2887 /* Remember the last real insn before the label (ie end of block 2). */
2888 if (code == JUMP_INSN || code == INSN)
2889 {
2890 blk_size ++;
2891 end_blk_2_insn = insn;
2892 }
2893 }
2894
2895 if (!insn)
2896 return insn;
2897
2898 /* It is possible for this optimization to slow performance if the blocks
2899 are long. This really depends upon whether the branch is likely taken
2900 or not. If the branch is taken, we slow performance in many cases. But,
2901 if the branch is not taken, we always help performance (for a single
2902 block, but for a double block (i.e. when the optimization is re-applied)
2903 this is not true since the 'right thing' depends on the overall length of
2904 the collapsed block). As a compromise, don't apply this optimization on
2905 blocks larger than size 2 (unlikely for the mcore) when speed is important.
2906 the best threshold depends on the latencies of the instructions (i.e.,
2907 the branch penalty). */
2908 if (optimize > 1 && blk_size > 2)
2909 return insn;
2910
2911 /* At this point, we've found the start of block 3 and we know that
2912 it is the destination of the branch from block 1. Also, all
2913 instructions in the block 2 are conditionalizable. So, apply the
2914 conditionalization and delete the branch. */
2915 start_blk_3_lab = insn;
2916
2917 for (insn = NEXT_INSN (end_blk_1_br); insn != start_blk_3_lab;
2918 insn = NEXT_INSN (insn))
2919 {
2920 rtx newinsn;
2921
2922 if (INSN_DELETED_P (insn))
2923 continue;
2924
2925 /* Try to form a conditional variant of the instruction and emit it. */
2926 if ((newinsn = emit_new_cond_insn (insn, cond)))
2927 {
2928 if (end_blk_2_insn == insn)
2929 end_blk_2_insn = newinsn;
2930
2931 insn = newinsn;
2932 }
2933 }
2934
2935 /* Note whether we will delete the label starting blk 3 when the jump
2936 gets deleted. If so, we want to re-apply this optimization at the
2937 last real instruction right before the label. */
2938 if (LABEL_NUSES (start_blk_3_lab) == 1)
2939 {
2940 start_blk_3_lab = 0;
2941 }
2942
2943 /* ??? we probably should redistribute the death notes for this insn, esp.
2944 the death of cc, but it doesn't really matter this late in the game.
2945 The peepholes all use is_dead() which will find the correct death
2946 regardless of whether there is a note. */
2947 delete_insn (end_blk_1_br);
2948
2949 if (! start_blk_3_lab)
2950 return end_blk_2_insn;
2951
4816b8e4 2952 /* Return the insn right after the label at the start of block 3. */
8f90be4c
NC
2953 return NEXT_INSN (start_blk_3_lab);
2954}
2955
2956/* Apply the conditionalization of blocks optimization. This is the
2957 outer loop that traverses through the insns scanning for a branch
2958 that signifies an opportunity to apply the optimization. Note that
2959 this optimization is applied late. If we could apply it earlier,
2960 say before cse 2, it may expose more optimization opportunities.
2961 but, the pay back probably isn't really worth the effort (we'd have
2962 to update all reg/flow/notes/links/etc to make it work - and stick it
4816b8e4
NC
2963 in before cse 2). */
2964
8f90be4c
NC
2965static void
2966conditionalize_optimization (first)
2967 rtx first;
2968{
2969 rtx insn;
2970
2971 for (insn = first; insn; insn = conditionalize_block (insn))
2972 continue;
2973}
2974
2975static int saved_warn_return_type = -1;
2976static int saved_warn_return_type_count = 0;
2977
4816b8e4
NC
2978/* This function is called from toplev.c before reorg. */
2979
8f90be4c
NC
2980void
2981mcore_dependent_reorg (first)
2982 rtx first;
2983{
2984 /* Reset this variable. */
2985 current_function_anonymous_args = 0;
2986
4816b8e4 2987 /* Restore the warn_return_type if it has been altered. */
8f90be4c
NC
2988 if (saved_warn_return_type != -1)
2989 {
2990 /* Only restore the value if we have reached another function.
2991 The test of warn_return_type occurs in final_function () in
2992 c-decl.c a long time after the code for the function is generated,
2993 so we need a counter to tell us when we have finished parsing that
2994 function and can restore the flag. */
2995 if (--saved_warn_return_type_count == 0)
2996 {
2997 warn_return_type = saved_warn_return_type;
2998 saved_warn_return_type = -1;
2999 }
3000 }
3001
3002 if (optimize == 0)
3003 return;
3004
3005 /* Conditionalize blocks where we can. */
3006 conditionalize_optimization (first);
3007
3008 /* Literal pool generation is now pushed off until the assembler. */
3009}
3010
3011\f
3012/* Return the reg_class to use when reloading the rtx X into the class
3013 CLASS. */
3014
3015/* If the input is (PLUS REG CONSTANT) representing a stack slot address,
3016 then we want to restrict the class to LRW_REGS since that ensures that
3017 will be able to safely load the constant.
3018
3019 If the input is a constant that should be loaded with mvir1, then use
3020 ONLYR1_REGS.
3021
3022 ??? We don't handle the case where we have (PLUS REG CONSTANT) and
3023 the constant should be loaded with mvir1, because that can lead to cases
3024 where an instruction needs two ONLYR1_REGS reloads. */
3025enum reg_class
3026mcore_reload_class (x, class)
3027 rtx x;
3028 enum reg_class class;
3029{
3030 enum reg_class new_class;
3031
3032 if (class == GENERAL_REGS && CONSTANT_P (x)
3033 && (GET_CODE (x) != CONST_INT
3034 || ( ! CONST_OK_FOR_I (INTVAL (x))
3035 && ! CONST_OK_FOR_M (INTVAL (x))
3036 && ! CONST_OK_FOR_N (INTVAL (x)))))
3037 new_class = LRW_REGS;
3038 else
3039 new_class = class;
3040
3041 return new_class;
3042}
3043
3044/* Tell me if a pair of reg/subreg rtx's actually refer to the same
3045 register. Note that the current version doesn't worry about whether
3046 they are the same mode or note (e.g., a QImode in r2 matches an HImode
3047 in r2 matches an SImode in r2. Might think in the future about whether
3048 we want to be able to say something about modes. */
3049int
3050mcore_is_same_reg (x, y)
3051 rtx x;
3052 rtx y;
3053{
3054 /* Strip any and all of the subreg wrappers. */
3055 while (GET_CODE (x) == SUBREG)
3056 x = SUBREG_REG (x);
3057
3058 while (GET_CODE (y) == SUBREG)
3059 y = SUBREG_REG (y);
3060
3061 if (GET_CODE(x) == REG && GET_CODE(y) == REG && REGNO(x) == REGNO(y))
3062 return 1;
3063
3064 return 0;
3065}
3066
8f90be4c
NC
3067void
3068mcore_override_options ()
3069{
3070 if (mcore_stack_increment_string)
3071 {
3072 mcore_stack_increment = atoi (mcore_stack_increment_string);
3073
3074 if (mcore_stack_increment < 0
3075 || (mcore_stack_increment == 0
3076 && (mcore_stack_increment_string[0] != '0'
3077 || mcore_stack_increment_string[1] != 0)))
c725bd79 3078 error ("invalid option `-mstack-increment=%s'",
8f90be4c
NC
3079 mcore_stack_increment_string);
3080 }
3081
3082 /* Only the m340 supports little endian code. */
3083 if (TARGET_LITTLE_END && ! TARGET_M340)
3084 target_flags |= M340_BIT;
8f90be4c
NC
3085}
3086\f
3087int
3088mcore_must_pass_on_stack (mode, type)
3089 enum machine_mode mode ATTRIBUTE_UNUSED;
3090 tree type;
3091{
3092 if (type == NULL)
3093 return 0;
3094
3095 /* If the argugment can have its address taken, it must
3096 be placed on the stack. */
3097 if (TREE_ADDRESSABLE (type))
3098 return 1;
3099
3100 return 0;
3101}
3102
3103/* Compute the number of word sized registers needed to
3104 hold a function argument of mode MODE and type TYPE. */
3105int
3106mcore_num_arg_regs (mode, type)
3107 enum machine_mode mode;
3108 tree type;
3109{
3110 int size;
3111
3112 if (MUST_PASS_IN_STACK (mode, type))
3113 return 0;
3114
3115 if (type && mode == BLKmode)
3116 size = int_size_in_bytes (type);
3117 else
3118 size = GET_MODE_SIZE (mode);
3119
3120 return ROUND_ADVANCE (size);
3121}
3122
3123static rtx
3124handle_structs_in_regs (mode, type, reg)
3125 enum machine_mode mode;
3126 tree type;
3127 int reg;
3128{
3129 int size;
3130
3131 /* The MCore ABI defines that a structure whoes size is not a whole multiple
3132 of bytes is passed packed into registers (or spilled onto the stack if
3133 not enough registers are available) with the last few bytes of the
3134 structure being packed, left-justified, into the last register/stack slot.
3135 GCC handles this correctly if the last word is in a stack slot, but we
3136 have to generate a special, PARALLEL RTX if the last word is in an
3137 argument register. */
3138 if (type
3139 && TYPE_MODE (type) == BLKmode
3140 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
3141 && (size = int_size_in_bytes (type)) > UNITS_PER_WORD
3142 && (size % UNITS_PER_WORD != 0)
3143 && (reg + mcore_num_arg_regs (mode, type) <= (FIRST_PARM_REG + NPARM_REGS)))
3144 {
3145 rtx arg_regs [NPARM_REGS];
3146 int nregs;
3147 rtx result;
3148 rtvec rtvec;
3149
3150 for (nregs = 0; size > 0; size -= UNITS_PER_WORD)
3151 {
3152 arg_regs [nregs] =
3153 gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, reg ++),
3154 GEN_INT (nregs * UNITS_PER_WORD));
3155 nregs ++;
3156 }
3157
3158 /* We assume here that NPARM_REGS == 6. The assert checks this. */
b6a1cbae 3159 assert (ARRAY_SIZE (arg_regs) == 6);
8f90be4c
NC
3160 rtvec = gen_rtvec (nregs, arg_regs[0], arg_regs[1], arg_regs[2],
3161 arg_regs[3], arg_regs[4], arg_regs[5]);
3162
3163 result = gen_rtx_PARALLEL (mode, rtvec);
3164 return result;
3165 }
3166
3167 return gen_rtx_REG (mode, reg);
3168}
3169
3170rtx
3171mcore_function_value (valtype, func)
3172 tree valtype;
3173 tree func ATTRIBUTE_UNUSED;
3174{
3175 enum machine_mode mode;
3176 int unsigned_p;
3177
3178 mode = TYPE_MODE (valtype);
3179
3180 PROMOTE_MODE (mode, unsigned_p, NULL);
3181
3182 return handle_structs_in_regs (mode, valtype, FIRST_RET_REG);
3183}
3184
3185/* Define where to put the arguments to a function.
3186 Value is zero to push the argument on the stack,
3187 or a hard register in which to store the argument.
3188
3189 MODE is the argument's machine mode.
3190 TYPE is the data type of the argument (as a tree).
3191 This is null for libcalls where that information may
3192 not be available.
3193 CUM is a variable of type CUMULATIVE_ARGS which gives info about
3194 the preceding args and about the function being called.
3195 NAMED is nonzero if this argument is a named parameter
3196 (otherwise it is an extra parameter matching an ellipsis).
3197
3198 On MCore the first args are normally in registers
3199 and the rest are pushed. Any arg that starts within the first
3200 NPARM_REGS words is at least partially passed in a register unless
3201 its data type forbids. */
3202rtx
3203mcore_function_arg (cum, mode, type, named)
3204 CUMULATIVE_ARGS cum;
3205 enum machine_mode mode;
3206 tree type;
3207 int named;
3208{
3209 int arg_reg;
3210
3211 if (! named)
3212 return 0;
3213
3214 if (MUST_PASS_IN_STACK (mode, type))
3215 return 0;
3216
3217 arg_reg = ROUND_REG (cum, mode);
3218
3219 if (arg_reg < NPARM_REGS)
3220 return handle_structs_in_regs (mode, type, FIRST_PARM_REG + arg_reg);
3221
3222 return 0;
3223}
3224
3225/* Implements the FUNCTION_ARG_PARTIAL_NREGS macro.
3226 Returns the number of argument registers required to hold *part* of
3227 a parameter of machine mode MODE and type TYPE (which may be NULL if
3228 the type is not known). If the argument fits entirly in the argument
3229 registers, or entirely on the stack, then 0 is returned. CUM is the
3230 number of argument registers already used by earlier parameters to
3231 the function. */
3232int
3233mcore_function_arg_partial_nregs (cum, mode, type, named)
3234 CUMULATIVE_ARGS cum;
3235 enum machine_mode mode;
3236 tree type;
3237 int named;
3238{
3239 int reg = ROUND_REG (cum, mode);
3240
3241 if (named == 0)
3242 return 0;
3243
3244 if (MUST_PASS_IN_STACK (mode, type))
3245 return 0;
3246
3247 /* REG is not the *hardware* register number of the register that holds
3248 the argument, it is the *argument* register number. So for example,
3249 the first argument to a function goes in argument register 0, which
3250 translates (for the MCore) into hardware register 2. The second
3251 argument goes into argument register 1, which translates into hardware
3252 register 3, and so on. NPARM_REGS is the number of argument registers
3253 supported by the target, not the maximum hardware register number of
3254 the target. */
3255 if (reg >= NPARM_REGS)
3256 return 0;
3257
3258 /* If the argument fits entirely in registers, return 0. */
3259 if (reg + mcore_num_arg_regs (mode, type) <= NPARM_REGS)
3260 return 0;
3261
3262 /* The argument overflows the number of available argument registers.
3263 Compute how many argument registers have not yet been assigned to
3264 hold an argument. */
3265 reg = NPARM_REGS - reg;
3266
3267 /* Return partially in registers and partially on the stack. */
3268 return reg;
3269}
3270\f
3271/* Return non-zero if SYMBOL is marked as being dllexport'd. */
3272int
3273mcore_dllexport_name_p (symbol)
cbd3488b 3274 const char * symbol;
8f90be4c
NC
3275{
3276 return symbol[0] == '@' && symbol[1] == 'e' && symbol[2] == '.';
3277}
3278
3279/* Return non-zero if SYMBOL is marked as being dllimport'd. */
3280int
3281mcore_dllimport_name_p (symbol)
cbd3488b 3282 const char * symbol;
8f90be4c
NC
3283{
3284 return symbol[0] == '@' && symbol[1] == 'i' && symbol[2] == '.';
3285}
3286
3287/* Mark a DECL as being dllexport'd. */
3288static void
3289mcore_mark_dllexport (decl)
3290 tree decl;
3291{
cbd3488b 3292 const char * oldname;
8f90be4c
NC
3293 char * newname;
3294 rtx rtlname;
3295 tree idp;
3296
3297 rtlname = XEXP (DECL_RTL (decl), 0);
3298
3299 if (GET_CODE (rtlname) == SYMBOL_REF)
3300 oldname = XSTR (rtlname, 0);
3301 else if ( GET_CODE (rtlname) == MEM
3302 && GET_CODE (XEXP (rtlname, 0)) == SYMBOL_REF)
3303 oldname = XSTR (XEXP (rtlname, 0), 0);
3304 else
3305 abort ();
3306
3307 if (mcore_dllexport_name_p (oldname))
3308 return; /* Already done. */
3309
3310 newname = alloca (strlen (oldname) + 4);
3311 sprintf (newname, "@e.%s", oldname);
3312
3313 /* We pass newname through get_identifier to ensure it has a unique
3314 address. RTL processing can sometimes peek inside the symbol ref
3315 and compare the string's addresses to see if two symbols are
3316 identical. */
3317 /* ??? At least I think that's why we do this. */
3318 idp = get_identifier (newname);
3319
3320 XEXP (DECL_RTL (decl), 0) =
3321 gen_rtx (SYMBOL_REF, Pmode, IDENTIFIER_POINTER (idp));
3322}
3323
3324/* Mark a DECL as being dllimport'd. */
3325static void
3326mcore_mark_dllimport (decl)
3327 tree decl;
3328{
cbd3488b 3329 const char * oldname;
8f90be4c
NC
3330 char * newname;
3331 tree idp;
3332 rtx rtlname;
3333 rtx newrtl;
3334
3335 rtlname = XEXP (DECL_RTL (decl), 0);
3336
3337 if (GET_CODE (rtlname) == SYMBOL_REF)
3338 oldname = XSTR (rtlname, 0);
3339 else if ( GET_CODE (rtlname) == MEM
3340 && GET_CODE (XEXP (rtlname, 0)) == SYMBOL_REF)
3341 oldname = XSTR (XEXP (rtlname, 0), 0);
3342 else
3343 abort ();
3344
3345 if (mcore_dllexport_name_p (oldname))
3346 abort (); /* This shouldn't happen. */
3347 else if (mcore_dllimport_name_p (oldname))
3348 return; /* Already done. */
3349
3350 /* ??? One can well ask why we're making these checks here,
3351 and that would be a good question. */
3352
3353 /* Imported variables can't be initialized. */
3354 if (TREE_CODE (decl) == VAR_DECL
3355 && !DECL_VIRTUAL_P (decl)
3356 && DECL_INITIAL (decl))
3357 {
3358 error_with_decl (decl, "initialized variable `%s' is marked dllimport");
3359 return;
3360 }
3361
3362 /* `extern' needn't be specified with dllimport.
3363 Specify `extern' now and hope for the best. Sigh. */
3364 if (TREE_CODE (decl) == VAR_DECL
3365 /* ??? Is this test for vtables needed? */
3366 && !DECL_VIRTUAL_P (decl))
3367 {
3368 DECL_EXTERNAL (decl) = 1;
3369 TREE_PUBLIC (decl) = 1;
3370 }
3371
3372 newname = alloca (strlen (oldname) + 11);
3373 sprintf (newname, "@i.__imp_%s", oldname);
3374
3375 /* We pass newname through get_identifier to ensure it has a unique
3376 address. RTL processing can sometimes peek inside the symbol ref
3377 and compare the string's addresses to see if two symbols are
3378 identical. */
3379 /* ??? At least I think that's why we do this. */
3380 idp = get_identifier (newname);
3381
3382 newrtl = gen_rtx (MEM, Pmode,
3383 gen_rtx (SYMBOL_REF, Pmode,
3384 IDENTIFIER_POINTER (idp)));
3385 XEXP (DECL_RTL (decl), 0) = newrtl;
3386}
3387
3388static int
3389mcore_dllexport_p (decl)
3390 tree decl;
3391{
3392 if ( TREE_CODE (decl) != VAR_DECL
3393 && TREE_CODE (decl) != FUNCTION_DECL)
3394 return 0;
3395
91d231cb 3396 return lookup_attribute ("dllexport", DECL_ATTRIBUTES (decl)) != 0;
8f90be4c
NC
3397}
3398
3399static int
3400mcore_dllimport_p (decl)
3401 tree decl;
3402{
3403 if ( TREE_CODE (decl) != VAR_DECL
3404 && TREE_CODE (decl) != FUNCTION_DECL)
3405 return 0;
3406
91d231cb 3407 return lookup_attribute ("dllimport", DECL_ATTRIBUTES (decl)) != 0;
8f90be4c
NC
3408}
3409
fb49053f
RH
3410/* We must mark dll symbols specially. Definitions of dllexport'd objects
3411 install some info in the .drective (PE) or .exports (ELF) sections. */
3412
3413static void
b2003250 3414mcore_encode_section_info (decl, first)
8f90be4c 3415 tree decl;
b2003250 3416 int first ATTRIBUTE_UNUSED;
8f90be4c
NC
3417{
3418 /* This bit is copied from arm.h. */
3419 if (optimize > 0
3420 && TREE_CONSTANT (decl)
3421 && (!flag_writable_strings || TREE_CODE (decl) != STRING_CST))
3422 {
3423 rtx rtl = (TREE_CODE_CLASS (TREE_CODE (decl)) != 'd'
3424 ? TREE_CST_RTL (decl) : DECL_RTL (decl));
3425 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
3426 }
3427
3428 /* Mark the decl so we can tell from the rtl whether the object is
3429 dllexport'd or dllimport'd. */
3430 if (mcore_dllexport_p (decl))
3431 mcore_mark_dllexport (decl);
3432 else if (mcore_dllimport_p (decl))
3433 mcore_mark_dllimport (decl);
3434
3435 /* It might be that DECL has already been marked as dllimport, but
3436 a subsequent definition nullified that. The attribute is gone
3437 but DECL_RTL still has @i.__imp_foo. We need to remove that. */
3438 else if ((TREE_CODE (decl) == FUNCTION_DECL
3439 || TREE_CODE (decl) == VAR_DECL)
3440 && DECL_RTL (decl) != NULL_RTX
3441 && GET_CODE (DECL_RTL (decl)) == MEM
3442 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == MEM
3443 && GET_CODE (XEXP (XEXP (DECL_RTL (decl), 0), 0)) == SYMBOL_REF
3444 && mcore_dllimport_name_p (XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0)))
3445 {
3cce094d 3446 const char * oldname = XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0);
8f90be4c
NC
3447 tree idp = get_identifier (oldname + 9);
3448 rtx newrtl = gen_rtx (SYMBOL_REF, Pmode, IDENTIFIER_POINTER (idp));
3449
3450 XEXP (DECL_RTL (decl), 0) = newrtl;
3451
3452 /* We previously set TREE_PUBLIC and DECL_EXTERNAL.
3453 ??? We leave these alone for now. */
3454 }
3455}
3456
772c5265
RH
3457/* Undo the effects of the above. */
3458
3459static const char *
3460mcore_strip_name_encoding (str)
3461 const char *str;
3462{
3463 return str + (str[0] == '@' ? 3 : 0);
3464}
3465
8f90be4c
NC
3466/* MCore specific attribute support.
3467 dllexport - for exporting a function/variable that will live in a dll
3468 dllimport - for importing a function/variable from a dll
3469 naked - do not create a function prologue/epilogue. */
8f90be4c 3470
91d231cb
JM
3471const struct attribute_spec mcore_attribute_table[] =
3472{
3473 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
3474 { "dllexport", 0, 0, true, false, false, NULL },
3475 { "dllimport", 0, 0, true, false, false, NULL },
3476 { "naked", 0, 0, true, false, false, mcore_handle_naked_attribute },
3477 { NULL, 0, 0, false, false, false, NULL }
3478};
8f90be4c 3479
91d231cb
JM
3480/* Handle a "naked" attribute; arguments as in
3481 struct attribute_spec.handler. */
3482static tree
3483mcore_handle_naked_attribute (node, name, args, flags, no_add_attrs)
3484 tree *node;
3485 tree name;
3486 tree args ATTRIBUTE_UNUSED;
3487 int flags ATTRIBUTE_UNUSED;
3488 bool *no_add_attrs;
3489{
3490 if (TREE_CODE (*node) == FUNCTION_DECL)
8f90be4c
NC
3491 {
3492 /* PR14310 - don't complain about lack of return statement
3493 in naked functions. The solution here is a gross hack
3494 but this is the only way to solve the problem without
3495 adding a new feature to GCC. I did try submitting a patch
3496 that would add such a new feature, but it was (rightfully)
3497 rejected on the grounds that it was creeping featurism,
3498 so hence this code. */
3499 if (warn_return_type)
3500 {
3501 saved_warn_return_type = warn_return_type;
3502 warn_return_type = 0;
3503 saved_warn_return_type_count = 2;
3504 }
3505 else if (saved_warn_return_type_count)
3506 saved_warn_return_type_count = 2;
91d231cb
JM
3507 }
3508 else
3509 {
3510 warning ("`%s' attribute only applies to functions",
3511 IDENTIFIER_POINTER (name));
3512 *no_add_attrs = true;
8f90be4c
NC
3513 }
3514
91d231cb 3515 return NULL_TREE;
8f90be4c
NC
3516}
3517
ae46c4e0
RH
3518/* ??? It looks like this is PE specific? Oh well, this is what the
3519 old code did as well. */
8f90be4c 3520
ae46c4e0 3521static void
8f90be4c
NC
3522mcore_unique_section (decl, reloc)
3523 tree decl;
3524 int reloc ATTRIBUTE_UNUSED;
3525{
3526 int len;
0139adca 3527 const char * name;
8f90be4c 3528 char * string;
f27cd94d 3529 const char * prefix;
8f90be4c
NC
3530
3531 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
3532
3533 /* Strip off any encoding in name. */
772c5265 3534 name = (* targetm.strip_name_encoding) (name);
8f90be4c
NC
3535
3536 /* The object is put in, for example, section .text$foo.
3537 The linker will then ultimately place them in .text
3538 (everything from the $ on is stripped). */
3539 if (TREE_CODE (decl) == FUNCTION_DECL)
3540 prefix = ".text$";
f710504c 3541 /* For compatibility with EPOC, we ignore the fact that the
8f90be4c 3542 section might have relocs against it. */
4e4d733e 3543 else if (decl_readonly_section (decl, 0))
8f90be4c
NC
3544 prefix = ".rdata$";
3545 else
3546 prefix = ".data$";
3547
3548 len = strlen (name) + strlen (prefix);
3549 string = alloca (len + 1);
3550
3551 sprintf (string, "%s%s", prefix, name);
3552
3553 DECL_SECTION_NAME (decl) = build_string (len, string);
3554}
3555
3556int
3557mcore_naked_function_p ()
3558{
91d231cb 3559 return lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl)) != NULL_TREE;
8f90be4c 3560}
7c262518 3561
ede75ee8 3562#ifdef OBJECT_FORMAT_ELF
7c262518 3563static void
715bdd29 3564mcore_asm_named_section (name, flags)
7c262518
RH
3565 const char *name;
3566 unsigned int flags ATTRIBUTE_UNUSED;
7c262518
RH
3567{
3568 fprintf (asm_out_file, "\t.section %s\n", name);
3569}
ede75ee8 3570#endif /* OBJECT_FORMAT_ELF */