]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/mcore/mcore.c
cp-tree.h (complete_type_or_diagnostic): Changed prototype, renamed from...
[thirdparty/gcc.git] / gcc / config / mcore / mcore.c
CommitLineData
8f90be4c 1/* Output routines for Motorola MCore processor
ae46c4e0 2 Copyright (C) 1993, 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
8f90be4c
NC
3
4This file is part of GNU CC.
5
6GNU CC is free software; you can redistribute it and/or modify
7it under the terms of the GNU General Public License as published by
8the Free Software Foundation; either version 2, or (at your option)
9any later version.
10
11GNU CC is distributed in the hope that it will be useful,
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14GNU General Public License for more details.
15
16You should have received a copy of the GNU General Public License
17along with GNU CC; see the file COPYING. If not, write to
9f2f9eb6
JM
18the Free Software Foundation, 59 Temple Place - Suite 330,
19Boston, MA 02111-1307, USA. */
8f90be4c 20
bc27e96c 21#include "config.h"
4bd048ef 22#include "system.h"
4816b8e4
NC
23#include "rtl.h"
24#include "tree.h"
25#include "tm_p.h"
8f90be4c 26#include "assert.h"
8f90be4c 27#include "mcore.h"
8f90be4c
NC
28#include "regs.h"
29#include "hard-reg-set.h"
30#include "real.h"
31#include "insn-config.h"
32#include "conditions.h"
8f90be4c
NC
33#include "output.h"
34#include "insn-attr.h"
35#include "flags.h"
36#include "obstack.h"
37#include "expr.h"
38#include "reload.h"
39#include "recog.h"
40#include "function.h"
41#include "ggc.h"
42#include "toplev.h"
672a6f42
NB
43#include "target.h"
44#include "target-def.h"
8f90be4c 45
8f90be4c
NC
46/* Maximum size we are allowed to grow the stack in a single operation.
47 If we want more, we must do it in increments of at most this size.
48 If this value is 0, we don't check at all. */
49const char * mcore_stack_increment_string = 0;
50int mcore_stack_increment = STACK_UNITS_MAXSTEP;
51
52/* For dumping information about frame sizes. */
53char * mcore_current_function_name = 0;
54long mcore_current_compilation_timestamp = 0;
55
56/* Global variables for machine-dependent things. */
57
58/* Saved operands from the last compare to use when we generate an scc
59 or bcc insn. */
60rtx arch_compare_op0;
61rtx arch_compare_op1;
62
63/* Provides the class number of the smallest class containing
64 reg number. */
0139adca 65const int regno_reg_class[FIRST_PSEUDO_REGISTER] =
8f90be4c
NC
66{
67 GENERAL_REGS, ONLYR1_REGS, LRW_REGS, LRW_REGS,
68 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
69 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
70 LRW_REGS, LRW_REGS, LRW_REGS, GENERAL_REGS,
71 GENERAL_REGS, C_REGS, NO_REGS, NO_REGS,
72};
73
74/* Provide reg_class from a letter such as appears in the machine
75 description. */
0b5826ac 76const enum reg_class reg_class_from_letter[] =
8f90be4c
NC
77{
78 /* a */ LRW_REGS, /* b */ ONLYR1_REGS, /* c */ C_REGS, /* d */ NO_REGS,
79 /* e */ NO_REGS, /* f */ NO_REGS, /* g */ NO_REGS, /* h */ NO_REGS,
80 /* i */ NO_REGS, /* j */ NO_REGS, /* k */ NO_REGS, /* l */ NO_REGS,
81 /* m */ NO_REGS, /* n */ NO_REGS, /* o */ NO_REGS, /* p */ NO_REGS,
82 /* q */ NO_REGS, /* r */ GENERAL_REGS, /* s */ NO_REGS, /* t */ NO_REGS,
83 /* u */ NO_REGS, /* v */ NO_REGS, /* w */ NO_REGS, /* x */ ALL_REGS,
84 /* y */ NO_REGS, /* z */ NO_REGS
85};
86
f27cd94d
NC
87struct mcore_frame
88{
89 int arg_size; /* stdarg spills (bytes) */
90 int reg_size; /* non-volatile reg saves (bytes) */
91 int reg_mask; /* non-volatile reg saves */
92 int local_size; /* locals */
93 int outbound_size; /* arg overflow on calls out */
94 int pad_outbound;
95 int pad_local;
96 int pad_reg;
97 /* Describe the steps we'll use to grow it. */
98#define MAX_STACK_GROWS 4 /* gives us some spare space */
99 int growth[MAX_STACK_GROWS];
100 int arg_offset;
101 int reg_offset;
102 int reg_growth;
103 int local_growth;
104};
105
106typedef enum
107{
108 COND_NO,
109 COND_MOV_INSN,
110 COND_CLR_INSN,
111 COND_INC_INSN,
112 COND_DEC_INSN,
113 COND_BRANCH_INSN
114}
115cond_type;
116
117static void output_stack_adjust PARAMS ((int, int));
118static int calc_live_regs PARAMS ((int *));
119static int const_ok_for_mcore PARAMS ((int));
120static int try_constant_tricks PARAMS ((long, int *, int *));
121static const char * output_inline_const PARAMS ((enum machine_mode, rtx *));
122static void block_move_sequence PARAMS ((rtx, rtx, rtx, rtx, int, int, int));
123static void layout_mcore_frame PARAMS ((struct mcore_frame *));
124static cond_type is_cond_candidate PARAMS ((rtx));
125static rtx emit_new_cond_insn PARAMS ((rtx, int));
126static rtx conditionalize_block PARAMS ((rtx));
127static void conditionalize_optimization PARAMS ((rtx));
128static void mcore_add_gc_roots PARAMS ((void));
129static rtx handle_structs_in_regs PARAMS ((enum machine_mode, tree, int));
130static void mcore_mark_dllexport PARAMS ((tree));
131static void mcore_mark_dllimport PARAMS ((tree));
132static int mcore_dllexport_p PARAMS ((tree));
133static int mcore_dllimport_p PARAMS ((tree));
91d231cb
JM
134const struct attribute_spec mcore_attribute_table[];
135static tree mcore_handle_naked_attribute PARAMS ((tree *, tree, tree, int, bool *));
ede75ee8 136#ifdef OBJECT_FORMAT_ELF
7c262518 137static void mcore_asm_named_section PARAMS ((const char *,
7c262518 138 unsigned int));
ede75ee8 139#endif
ae46c4e0 140static void mcore_unique_section PARAMS ((tree, int));
672a6f42
NB
141\f
142/* Initialize the GCC target structure. */
143#ifdef TARGET_DLLIMPORT_DECL_ATTRIBUTES
144#undef TARGET_MERGE_DECL_ATTRIBUTES
145#define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
146#endif
147
301d03af
RS
148#ifdef OBJECT_FORMAT_ELF
149#undef TARGET_ASM_UNALIGNED_HI_OP
150#define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
151#undef TARGET_ASM_UNALIGNED_SI_OP
152#define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
153#endif
154
91d231cb
JM
155#undef TARGET_ATTRIBUTE_TABLE
156#define TARGET_ATTRIBUTE_TABLE mcore_attribute_table
ae46c4e0
RH
157#undef TARGET_ASM_UNIQUE_SECTION
158#define TARGET_ASM_UNIQUE_SECTION mcore_unique_section
672a6f42 159
f6897b10 160struct gcc_target targetm = TARGET_INITIALIZER;
f27cd94d 161\f
8f90be4c
NC
162/* Adjust the stack and return the number of bytes taken to do it. */
163static void
164output_stack_adjust (direction, size)
165 int direction;
166 int size;
167{
4816b8e4 168 /* If extending stack a lot, we do it incrementally. */
8f90be4c
NC
169 if (direction < 0 && size > mcore_stack_increment && mcore_stack_increment > 0)
170 {
171 rtx tmp = gen_rtx (REG, SImode, 1);
172 rtx memref;
173 emit_insn (gen_movsi (tmp, GEN_INT (mcore_stack_increment)));
174 do
175 {
176 emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
177 memref = gen_rtx (MEM, SImode, stack_pointer_rtx);
178 MEM_VOLATILE_P (memref) = 1;
179 emit_insn (gen_movsi (memref, stack_pointer_rtx));
180 size -= mcore_stack_increment;
181 }
182 while (size > mcore_stack_increment);
183
4816b8e4
NC
184 /* SIZE is now the residual for the last adjustment,
185 which doesn't require a probe. */
8f90be4c
NC
186 }
187
188 if (size)
189 {
190 rtx insn;
191 rtx val = GEN_INT (size);
192
193 if (size > 32)
194 {
195 rtx nval = gen_rtx (REG, SImode, 1);
196 emit_insn (gen_movsi (nval, val));
197 val = nval;
198 }
199
200 if (direction > 0)
201 insn = gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
202 else
203 insn = gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
204
205 emit_insn (insn);
206 }
207}
208
4816b8e4
NC
209/* Work out the registers which need to be saved,
210 both as a mask and a count. */
211
8f90be4c
NC
212static int
213calc_live_regs (count)
214 int * count;
215{
216 int reg;
217 int live_regs_mask = 0;
218
219 * count = 0;
220
221 for (reg = 0; reg < FIRST_PSEUDO_REGISTER; reg++)
222 {
223 if (regs_ever_live[reg] && !call_used_regs[reg])
224 {
225 (*count)++;
226 live_regs_mask |= (1 << reg);
227 }
228 }
229
230 return live_regs_mask;
231}
232
233/* Print the operand address in x to the stream. */
4816b8e4 234
8f90be4c
NC
235void
236mcore_print_operand_address (stream, x)
237 FILE * stream;
238 rtx x;
239{
240 switch (GET_CODE (x))
241 {
242 case REG:
243 fprintf (stream, "(%s)", reg_names[REGNO (x)]);
244 break;
245
246 case PLUS:
247 {
248 rtx base = XEXP (x, 0);
249 rtx index = XEXP (x, 1);
250
251 if (GET_CODE (base) != REG)
252 {
253 /* Ensure that BASE is a register (one of them must be). */
254 rtx temp = base;
255 base = index;
256 index = temp;
257 }
258
259 switch (GET_CODE (index))
260 {
261 case CONST_INT:
262 fprintf (stream, "(%s,%d)", reg_names[REGNO(base)],
263 INTVAL (index));
264 break;
265
266 default:
267 debug_rtx (x);
268
269 abort ();
270 }
271 }
272
273 break;
274
275 default:
276 output_addr_const (stream, x);
277 break;
278 }
279}
280
281/* Print operand x (an rtx) in assembler syntax to file stream
282 according to modifier code.
283
284 'R' print the next register or memory location along, ie the lsw in
285 a double word value
286 'O' print a constant without the #
287 'M' print a constant as its negative
288 'P' print log2 of a power of two
289 'Q' print log2 of an inverse of a power of two
290 'U' print register for ldm/stm instruction
4816b8e4
NC
291 'X' print byte number for xtrbN instruction. */
292
8f90be4c
NC
293void
294mcore_print_operand (stream, x, code)
295 FILE * stream;
296 rtx x;
297 int code;
298{
299 switch (code)
300 {
301 case 'N':
302 if (INTVAL(x) == -1)
303 fprintf (asm_out_file, "32");
304 else
305 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) + 1));
306 break;
307 case 'P':
308 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x)));
309 break;
310 case 'Q':
311 fprintf (asm_out_file, "%d", exact_log2 (~INTVAL (x)));
312 break;
313 case 'O':
314 fprintf (asm_out_file, "%d", INTVAL (x));
315 break;
316 case 'M':
317 fprintf (asm_out_file, "%d", - INTVAL (x));
318 break;
319 case 'R':
320 /* Next location along in memory or register. */
321 switch (GET_CODE (x))
322 {
323 case REG:
324 fputs (reg_names[REGNO (x) + 1], (stream));
325 break;
326 case MEM:
b72f00af
RK
327 mcore_print_operand_address
328 (stream, XEXP (adjust_address (x, SImode, 4), 0));
8f90be4c
NC
329 break;
330 default:
331 abort ();
332 }
333 break;
334 case 'U':
335 fprintf (asm_out_file, "%s-%s", reg_names[REGNO (x)],
336 reg_names[REGNO (x) + 3]);
337 break;
338 case 'x':
339 fprintf (asm_out_file, "0x%x", INTVAL (x));
340 break;
341 case 'X':
342 fprintf (asm_out_file, "%d", 3 - INTVAL (x) / 8);
343 break;
344
345 default:
346 switch (GET_CODE (x))
347 {
348 case REG:
349 fputs (reg_names[REGNO (x)], (stream));
350 break;
351 case MEM:
352 output_address (XEXP (x, 0));
353 break;
354 default:
355 output_addr_const (stream, x);
356 break;
357 }
358 break;
359 }
360}
361
362/* What does a constant cost ? */
4816b8e4 363
8f90be4c
NC
364int
365mcore_const_costs (exp, code)
366 rtx exp;
367 enum rtx_code code;
368{
369
370 int val = INTVAL (exp);
371
372 /* Easy constants. */
373 if ( CONST_OK_FOR_I (val)
374 || CONST_OK_FOR_M (val)
375 || CONST_OK_FOR_N (val)
376 || (code == PLUS && CONST_OK_FOR_L (val)))
377 return 1;
378 else if (code == AND
379 && ( CONST_OK_FOR_M (~val)
380 || CONST_OK_FOR_N (~val)))
381 return 2;
382 else if (code == PLUS
383 && ( CONST_OK_FOR_I (-val)
384 || CONST_OK_FOR_M (-val)
385 || CONST_OK_FOR_N (-val)))
386 return 2;
387
388 return 5;
389}
390
391/* What does an and instruction cost - we do this b/c immediates may
392 have been relaxed. We want to ensure that cse will cse relaxed immeds
4816b8e4
NC
393 out. Otherwise we'll get bad code (multiple reloads of the same const). */
394
8f90be4c
NC
395int
396mcore_and_cost (x)
397 rtx x;
398{
399 int val;
400
401 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
402 return 2;
403
404 val = INTVAL (XEXP (x, 1));
405
4816b8e4 406 /* Do it directly. */
8f90be4c
NC
407 if (CONST_OK_FOR_K (val) || CONST_OK_FOR_M (~val))
408 return 2;
409 /* Takes one instruction to load. */
410 else if (const_ok_for_mcore (val))
411 return 3;
412 /* Takes two instructions to load. */
413 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
414 return 4;
415
4816b8e4 416 /* Takes a lrw to load. */
8f90be4c
NC
417 return 5;
418}
419
4816b8e4
NC
420/* What does an or cost - see and_cost(). */
421
8f90be4c
NC
422int
423mcore_ior_cost (x)
424 rtx x;
425{
426 int val;
427
428 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
429 return 2;
430
431 val = INTVAL (XEXP (x, 1));
432
4816b8e4 433 /* Do it directly with bclri. */
8f90be4c
NC
434 if (CONST_OK_FOR_M (val))
435 return 2;
4816b8e4 436 /* Takes one instruction to load. */
8f90be4c
NC
437 else if (const_ok_for_mcore (val))
438 return 3;
4816b8e4 439 /* Takes two instructions to load. */
8f90be4c
NC
440 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
441 return 4;
442
4816b8e4 443 /* Takes a lrw to load. */
8f90be4c
NC
444 return 5;
445}
446
447/* Check to see if a comparison against a constant can be made more efficient
448 by incrementing/decrementing the constant to get one that is more efficient
449 to load. */
4816b8e4 450
8f90be4c
NC
451int
452mcore_modify_comparison (code)
453 enum rtx_code code;
454{
455 rtx op1 = arch_compare_op1;
456
457 if (GET_CODE (op1) == CONST_INT)
458 {
459 int val = INTVAL (op1);
460
461 switch (code)
462 {
463 case LE:
464 if (CONST_OK_FOR_J (val + 1))
465 {
466 arch_compare_op1 = GEN_INT (val + 1);
467 return 1;
468 }
469 break;
470
471 default:
472 break;
473 }
474 }
475
476 return 0;
477}
478
479/* Prepare the operands for a comparison. */
4816b8e4 480
8f90be4c
NC
481rtx
482mcore_gen_compare_reg (code)
483 enum rtx_code code;
484{
485 rtx op0 = arch_compare_op0;
486 rtx op1 = arch_compare_op1;
487 rtx cc_reg = gen_rtx (REG, CCmode, CC_REG);
488
489 if (CONSTANT_P (op1) && GET_CODE (op1) != CONST_INT)
490 op1 = force_reg (SImode, op1);
491
492 /* cmpnei: 0-31 (K immediate)
4816b8e4 493 cmplti: 1-32 (J immediate, 0 using btsti x,31). */
8f90be4c
NC
494 switch (code)
495 {
4816b8e4 496 case EQ: /* Use inverted condition, cmpne. */
8f90be4c
NC
497 code = NE;
498 /* drop through */
4816b8e4
NC
499
500 case NE: /* Use normal condition, cmpne. */
8f90be4c
NC
501 if (GET_CODE (op1) == CONST_INT && ! CONST_OK_FOR_K (INTVAL (op1)))
502 op1 = force_reg (SImode, op1);
503 break;
504
4816b8e4 505 case LE: /* Use inverted condition, reversed cmplt. */
8f90be4c
NC
506 code = GT;
507 /* drop through */
4816b8e4
NC
508
509 case GT: /* Use normal condition, reversed cmplt. */
8f90be4c
NC
510 if (GET_CODE (op1) == CONST_INT)
511 op1 = force_reg (SImode, op1);
512 break;
513
4816b8e4 514 case GE: /* Use inverted condition, cmplt. */
8f90be4c
NC
515 code = LT;
516 /* drop through */
4816b8e4
NC
517
518 case LT: /* Use normal condition, cmplt. */
8f90be4c
NC
519 if (GET_CODE (op1) == CONST_INT &&
520 /* covered by btsti x,31 */
521 INTVAL (op1) != 0 &&
522 ! CONST_OK_FOR_J (INTVAL (op1)))
523 op1 = force_reg (SImode, op1);
524 break;
525
4816b8e4 526 case GTU: /* Use inverted condition, cmple. */
8f90be4c
NC
527 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) == 0)
528 {
529 /* Unsigned > 0 is the same as != 0, but we need
530 to invert the condition, so we want to set
531 code = EQ. This cannot be done however, as the
532 mcore does not support such a test. Instead we
533 cope with this case in the "bgtu" pattern itself
534 so we should never reach this point. */
535 /* code = EQ; */
536 abort ();
537 break;
538 }
539 code = LEU;
540 /* drop through */
4816b8e4
NC
541
542 case LEU: /* Use normal condition, reversed cmphs. */
8f90be4c
NC
543 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
544 op1 = force_reg (SImode, op1);
545 break;
546
4816b8e4 547 case LTU: /* Use inverted condition, cmphs. */
8f90be4c
NC
548 code = GEU;
549 /* drop through */
4816b8e4
NC
550
551 case GEU: /* Use normal condition, cmphs. */
8f90be4c
NC
552 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
553 op1 = force_reg (SImode, op1);
554 break;
555
556 default:
557 break;
558 }
559
560 emit_insn (gen_rtx (SET, VOIDmode, cc_reg, gen_rtx (code, CCmode, op0, op1)));
561
562 return cc_reg;
563}
564
565
566int
567mcore_symbolic_address_p (x)
568 rtx x;
569{
570 switch (GET_CODE (x))
571 {
572 case SYMBOL_REF:
573 case LABEL_REF:
574 return 1;
575 case CONST:
576 x = XEXP (x, 0);
577 return ( (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
578 || GET_CODE (XEXP (x, 0)) == LABEL_REF)
579 && GET_CODE (XEXP (x, 1)) == CONST_INT);
580 default:
581 return 0;
582 }
583}
584
585int
586mcore_call_address_operand (x, mode)
587 rtx x;
588 enum machine_mode mode;
589{
590 return register_operand (x, mode) || CONSTANT_P (x);
591}
592
593/* Functions to output assembly code for a function call. */
f27cd94d 594
8f90be4c
NC
595char *
596mcore_output_call (operands, index)
597 rtx operands[];
598 int index;
599{
600 static char buffer[20];
601 rtx addr = operands [index];
602
603 if (REG_P (addr))
604 {
605 if (TARGET_CG_DATA)
606 {
607 if (mcore_current_function_name == 0)
608 abort ();
609
610 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
611 "unknown", 1);
612 }
613
614 sprintf (buffer, "jsr\t%%%d", index);
615 }
616 else
617 {
618 if (TARGET_CG_DATA)
619 {
620 if (mcore_current_function_name == 0)
621 abort ();
622
623 if (GET_CODE (addr) != SYMBOL_REF)
624 abort ();
625
626 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, XSTR (addr, 0), 0);
627 }
628
629 sprintf (buffer, "jbsr\t%%%d", index);
630 }
631
632 return buffer;
633}
634
635/* Can we load a constant with a single instruction ? */
4816b8e4 636
8f90be4c
NC
637static int
638const_ok_for_mcore (value)
639 int value;
640{
641 if (value >= 0 && value <= 127)
642 return 1;
643
644 /* Try exact power of two. */
645 if ((value & (value - 1)) == 0)
646 return 1;
647
648 /* Try exact power of two - 1. */
649 if ((value & (value + 1)) == 0)
650 return 1;
651
652 return 0;
653}
654
655/* Can we load a constant inline with up to 2 instructions ? */
4816b8e4 656
8f90be4c
NC
657int
658mcore_const_ok_for_inline (value)
659 long value;
660{
661 int x, y;
662
663 return try_constant_tricks (value, & x, & y) > 0;
664}
665
666/* Are we loading the constant using a not ? */
4816b8e4 667
8f90be4c
NC
668int
669mcore_const_trick_uses_not (value)
670 long value;
671{
672 int x, y;
673
674 return try_constant_tricks (value, & x, & y) == 2;
675}
676
677/* Try tricks to load a constant inline and return the trick number if
678 success (0 is non-inlinable).
4816b8e4
NC
679
680 0: not inlinable
681 1: single instruction (do the usual thing)
682 2: single insn followed by a 'not'
683 3: single insn followed by a subi
684 4: single insn followed by an addi
685 5: single insn followed by rsubi
686 6: single insn followed by bseti
687 7: single insn followed by bclri
688 8: single insn followed by rotli
689 9: single insn followed by lsli
690 10: single insn followed by ixh
691 11: single insn followed by ixw. */
8f90be4c
NC
692
693static int
694try_constant_tricks (value, x, y)
695 long value;
696 int * x;
697 int * y;
698{
699 int i;
700 unsigned bit, shf, rot;
701
702 if (const_ok_for_mcore (value))
4816b8e4 703 return 1; /* Do the usual thing. */
8f90be4c
NC
704
705 if (TARGET_HARDLIT)
706 {
707 if (const_ok_for_mcore (~value))
708 {
709 *x = ~value;
710 return 2;
711 }
712
713 for (i = 1; i <= 32; i++)
714 {
715 if (const_ok_for_mcore (value - i))
716 {
717 *x = value - i;
718 *y = i;
719
720 return 3;
721 }
722
723 if (const_ok_for_mcore (value + i))
724 {
725 *x = value + i;
726 *y = i;
727
728 return 4;
729 }
730 }
731
11f9ed1a 732 bit = 0x80000000L;
8f90be4c
NC
733
734 for (i = 0; i <= 31; i++)
735 {
736 if (const_ok_for_mcore (i - value))
737 {
738 *x = i - value;
739 *y = i;
740
741 return 5;
742 }
743
744 if (const_ok_for_mcore (value & ~bit))
745 {
746 *y = bit;
747 *x = value & ~bit;
748
749 return 6;
750 }
751
752 if (const_ok_for_mcore (value | bit))
753 {
754 *y = ~bit;
755 *x = value | bit;
756
757 return 7;
758 }
759
760 bit >>= 1;
761 }
762
763 shf = value;
764 rot = value;
765
766 for (i = 1; i < 31; i++)
767 {
768 int c;
769
770 /* MCore has rotate left. */
771 c = rot << 31;
772 rot >>= 1;
773 rot &= 0x7FFFFFFF;
774 rot |= c; /* Simulate rotate. */
775
776 if (const_ok_for_mcore (rot))
777 {
778 *y = i;
779 *x = rot;
780
781 return 8;
782 }
783
784 if (shf & 1)
4816b8e4 785 shf = 0; /* Can't use logical shift, low order bit is one. */
8f90be4c
NC
786
787 shf >>= 1;
788
789 if (shf != 0 && const_ok_for_mcore (shf))
790 {
791 *y = i;
792 *x = shf;
793
794 return 9;
795 }
796 }
797
798 if ((value % 3) == 0 && const_ok_for_mcore (value / 3))
799 {
800 *x = value / 3;
801
802 return 10;
803 }
804
805 if ((value % 5) == 0 && const_ok_for_mcore (value / 5))
806 {
807 *x = value / 5;
808
809 return 11;
810 }
811 }
812
813 return 0;
814}
815
816
817/* Check whether reg is dead at first. This is done by searching ahead
818 for either the next use (i.e., reg is live), a death note, or a set of
819 reg. Don't just use dead_or_set_p() since reload does not always mark
820 deaths (especially if PRESERVE_DEATH_NOTES_REGNO_P is not defined). We
4816b8e4
NC
821 can ignore subregs by extracting the actual register. BRC */
822
8f90be4c
NC
823int
824mcore_is_dead (first, reg)
825 rtx first;
826 rtx reg;
827{
828 rtx insn;
829
830 /* For mcore, subregs can't live independently of their parent regs. */
831 if (GET_CODE (reg) == SUBREG)
832 reg = SUBREG_REG (reg);
833
834 /* Dies immediately. */
835 if (dead_or_set_p (first, reg))
836 return 1;
837
838 /* Look for conclusive evidence of live/death, otherwise we have
839 to assume that it is live. */
840 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
841 {
842 if (GET_CODE (insn) == JUMP_INSN)
843 return 0; /* We lose track, assume it is alive. */
844
845 else if (GET_CODE(insn) == CALL_INSN)
846 {
847 /* Call's might use it for target or register parms. */
848 if (reg_referenced_p (reg, PATTERN (insn))
849 || find_reg_fusage (insn, USE, reg))
850 return 0;
851 else if (dead_or_set_p (insn, reg))
852 return 1;
853 }
854 else if (GET_CODE (insn) == INSN)
855 {
856 if (reg_referenced_p (reg, PATTERN (insn)))
857 return 0;
858 else if (dead_or_set_p (insn, reg))
859 return 1;
860 }
861 }
862
863 /* No conclusive evidence either way, we can not take the chance
864 that control flow hid the use from us -- "I'm not dead yet". */
865 return 0;
866}
867
868
869/* Count the number of ones in mask. */
4816b8e4 870
8f90be4c
NC
871int
872mcore_num_ones (mask)
873 int mask;
874{
4816b8e4 875 /* A trick to count set bits recently posted on comp.compilers. */
8f90be4c
NC
876 mask = (mask >> 1 & 0x55555555) + (mask & 0x55555555);
877 mask = ((mask >> 2) & 0x33333333) + (mask & 0x33333333);
878 mask = ((mask >> 4) + mask) & 0x0f0f0f0f;
879 mask = ((mask >> 8) + mask);
880
881 return (mask + (mask >> 16)) & 0xff;
882}
883
4816b8e4
NC
884/* Count the number of zeros in mask. */
885
8f90be4c
NC
886int
887mcore_num_zeros (mask)
888 int mask;
889{
890 return 32 - mcore_num_ones (mask);
891}
892
893/* Determine byte being masked. */
4816b8e4 894
8f90be4c
NC
895int
896mcore_byte_offset (mask)
897 unsigned int mask;
898{
11f9ed1a 899 if (mask == 0x00ffffffL)
8f90be4c 900 return 0;
11f9ed1a 901 else if (mask == 0xff00ffffL)
8f90be4c 902 return 1;
11f9ed1a 903 else if (mask == 0xffff00ffL)
8f90be4c 904 return 2;
11f9ed1a 905 else if (mask == 0xffffff00L)
8f90be4c
NC
906 return 3;
907
908 return -1;
909}
910
911/* Determine halfword being masked. */
4816b8e4 912
8f90be4c
NC
913int
914mcore_halfword_offset (mask)
915 unsigned int mask;
916{
917 if (mask == 0x0000ffffL)
918 return 0;
11f9ed1a 919 else if (mask == 0xffff0000L)
8f90be4c
NC
920 return 1;
921
922 return -1;
923}
924
925/* Output a series of bseti's corresponding to mask. */
4816b8e4 926
f27cd94d 927const char *
8f90be4c
NC
928mcore_output_bseti (dst, mask)
929 rtx dst;
930 int mask;
931{
932 rtx out_operands[2];
933 int bit;
934
935 out_operands[0] = dst;
936
937 for (bit = 0; bit < 32; bit++)
938 {
939 if ((mask & 0x1) == 0x1)
940 {
941 out_operands[1] = GEN_INT (bit);
942
943 output_asm_insn ("bseti\t%0,%1", out_operands);
944 }
945 mask >>= 1;
946 }
947
948 return "";
949}
950
951/* Output a series of bclri's corresponding to mask. */
4816b8e4 952
f27cd94d 953const char *
8f90be4c
NC
954mcore_output_bclri (dst, mask)
955 rtx dst;
956 int mask;
957{
958 rtx out_operands[2];
959 int bit;
960
961 out_operands[0] = dst;
962
963 for (bit = 0; bit < 32; bit++)
964 {
965 if ((mask & 0x1) == 0x0)
966 {
967 out_operands[1] = GEN_INT (bit);
968
969 output_asm_insn ("bclri\t%0,%1", out_operands);
970 }
971
972 mask >>= 1;
973 }
974
975 return "";
976}
977
978/* Output a conditional move of two constants that are +/- 1 within each
979 other. See the "movtK" patterns in mcore.md. I'm not sure this is
980 really worth the effort. */
4816b8e4 981
f27cd94d 982const char *
8f90be4c
NC
983mcore_output_cmov (operands, cmp_t, test)
984 rtx operands[];
985 int cmp_t;
55710451 986 const char * test;
8f90be4c
NC
987{
988 int load_value;
989 int adjust_value;
990 rtx out_operands[4];
991
992 out_operands[0] = operands[0];
993
4816b8e4 994 /* Check to see which constant is loadable. */
8f90be4c
NC
995 if (const_ok_for_mcore (INTVAL (operands[1])))
996 {
997 out_operands[1] = operands[1];
998 out_operands[2] = operands[2];
999 }
1000 else if (const_ok_for_mcore (INTVAL (operands[2])))
1001 {
1002 out_operands[1] = operands[2];
1003 out_operands[2] = operands[1];
1004
4816b8e4 1005 /* Complement test since constants are swapped. */
8f90be4c
NC
1006 cmp_t = (cmp_t == 0);
1007 }
1008 load_value = INTVAL (out_operands[1]);
1009 adjust_value = INTVAL (out_operands[2]);
1010
4816b8e4 1011 /* First output the test if folded into the pattern. */
8f90be4c
NC
1012
1013 if (test)
1014 output_asm_insn (test, operands);
1015
4816b8e4 1016 /* Load the constant - for now, only support constants that can be
8f90be4c
NC
1017 generated with a single instruction. maybe add general inlinable
1018 constants later (this will increase the # of patterns since the
4816b8e4 1019 instruction sequence has a different length attribute). */
8f90be4c
NC
1020 if (load_value >= 0 && load_value <= 127)
1021 output_asm_insn ("movi\t%0,%1", out_operands);
1022 else if ((load_value & (load_value - 1)) == 0)
1023 output_asm_insn ("bgeni\t%0,%P1", out_operands);
1024 else if ((load_value & (load_value + 1)) == 0)
1025 output_asm_insn ("bmaski\t%0,%N1", out_operands);
1026
4816b8e4 1027 /* Output the constant adjustment. */
8f90be4c
NC
1028 if (load_value > adjust_value)
1029 {
1030 if (cmp_t)
1031 output_asm_insn ("decf\t%0", out_operands);
1032 else
1033 output_asm_insn ("dect\t%0", out_operands);
1034 }
1035 else
1036 {
1037 if (cmp_t)
1038 output_asm_insn ("incf\t%0", out_operands);
1039 else
1040 output_asm_insn ("inct\t%0", out_operands);
1041 }
1042
1043 return "";
1044}
1045
1046/* Outputs the peephole for moving a constant that gets not'ed followed
4816b8e4
NC
1047 by an and (i.e. combine the not and the and into andn). BRC */
1048
f27cd94d 1049const char *
8f90be4c
NC
1050mcore_output_andn (insn, operands)
1051 rtx insn ATTRIBUTE_UNUSED;
1052 rtx operands[];
1053{
1054 int x, y;
1055 rtx out_operands[3];
f27cd94d 1056 const char * load_op;
8f90be4c
NC
1057 char buf[256];
1058
1059 if (try_constant_tricks (INTVAL (operands[1]), &x, &y) != 2)
1060 abort ();
1061
1062 out_operands[0] = operands[0];
1063 out_operands[1] = GEN_INT(x);
1064 out_operands[2] = operands[2];
1065
1066 if (x >= 0 && x <= 127)
1067 load_op = "movi\t%0,%1";
4816b8e4
NC
1068
1069 /* Try exact power of two. */
8f90be4c
NC
1070 else if ((x & (x - 1)) == 0)
1071 load_op = "bgeni\t%0,%P1";
4816b8e4
NC
1072
1073 /* Try exact power of two - 1. */
8f90be4c
NC
1074 else if ((x & (x + 1)) == 0)
1075 load_op = "bmaski\t%0,%N1";
4816b8e4 1076
8f90be4c
NC
1077 else
1078 load_op = "BADMOVI\t%0,%1";
1079
1080 sprintf (buf, "%s\n\tandn\t%%2,%%0", load_op);
1081 output_asm_insn (buf, out_operands);
1082
1083 return "";
1084}
1085
1086/* Output an inline constant. */
4816b8e4 1087
f27cd94d 1088static const char *
8f90be4c
NC
1089output_inline_const (mode, operands)
1090 enum machine_mode mode;
1091 rtx operands[];
1092{
1093 int x = 0, y = 0;
1094 int trick_no;
1095 rtx out_operands[3];
1096 char buf[256];
1097 char load_op[256];
f27cd94d 1098 const char *dst_fmt;
8f90be4c
NC
1099 int value;
1100
1101 value = INTVAL (operands[1]);
1102
1103 if ((trick_no = try_constant_tricks (value, &x, &y)) == 0)
1104 {
1105 /* lrw's are handled separately: Large inlinable constants
1106 never get turned into lrw's. Our caller uses try_constant_tricks
1107 to back off to an lrw rather than calling this routine. */
1108 abort ();
1109 }
1110
1111 if (trick_no == 1)
1112 x = value;
1113
4816b8e4 1114 /* operands: 0 = dst, 1 = load immed., 2 = immed. adjustment. */
8f90be4c
NC
1115 out_operands[0] = operands[0];
1116 out_operands[1] = GEN_INT (x);
1117
1118 if (trick_no > 2)
1119 out_operands[2] = GEN_INT (y);
1120
4816b8e4 1121 /* Select dst format based on mode. */
8f90be4c
NC
1122 if (mode == DImode && (! TARGET_LITTLE_END))
1123 dst_fmt = "%R0";
1124 else
1125 dst_fmt = "%0";
1126
1127 if (x >= 0 && x <= 127)
1128 sprintf (load_op, "movi\t%s,%%1", dst_fmt);
4816b8e4 1129
8f90be4c
NC
1130 /* Try exact power of two. */
1131 else if ((x & (x - 1)) == 0)
1132 sprintf (load_op, "bgeni\t%s,%%P1", dst_fmt);
4816b8e4
NC
1133
1134 /* Try exact power of two - 1. */
8f90be4c
NC
1135 else if ((x & (x + 1)) == 0)
1136 sprintf (load_op, "bmaski\t%s,%%N1", dst_fmt);
4816b8e4 1137
8f90be4c
NC
1138 else
1139 sprintf (load_op, "BADMOVI\t%s,%%1", dst_fmt);
1140
1141 switch (trick_no)
1142 {
1143 case 1:
1144 strcpy (buf, load_op);
1145 break;
1146 case 2: /* not */
1147 sprintf (buf, "%s\n\tnot\t%s\t// %d 0x%x", load_op, dst_fmt, value, value);
1148 break;
1149 case 3: /* add */
1150 sprintf (buf, "%s\n\taddi\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
1151 break;
1152 case 4: /* sub */
1153 sprintf (buf, "%s\n\tsubi\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
1154 break;
1155 case 5: /* rsub */
4816b8e4 1156 /* Never happens unless -mrsubi, see try_constant_tricks(). */
8f90be4c
NC
1157 sprintf (buf, "%s\n\trsubi\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
1158 break;
1159 case 6: /* bset */
1160 sprintf (buf, "%s\n\tbseti\t%s,%%P2\t// %d 0x%x", load_op, dst_fmt, value, value);
1161 break;
1162 case 7: /* bclr */
1163 sprintf (buf, "%s\n\tbclri\t%s,%%Q2\t// %d 0x%x", load_op, dst_fmt, value, value);
1164 break;
1165 case 8: /* rotl */
1166 sprintf (buf, "%s\n\trotli\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
1167 break;
1168 case 9: /* lsl */
1169 sprintf (buf, "%s\n\tlsli\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
1170 break;
1171 case 10: /* ixh */
1172 sprintf (buf, "%s\n\tixh\t%s,%s\t// %d 0x%x", load_op, dst_fmt, dst_fmt, value, value);
1173 break;
1174 case 11: /* ixw */
1175 sprintf (buf, "%s\n\tixw\t%s,%s\t// %d 0x%x", load_op, dst_fmt, dst_fmt, value, value);
1176 break;
1177 default:
1178 return "";
1179 }
1180
1181 output_asm_insn (buf, out_operands);
1182
1183 return "";
1184}
1185
1186/* Output a move of a word or less value. */
4816b8e4 1187
f27cd94d 1188const char *
8f90be4c
NC
1189mcore_output_move (insn, operands, mode)
1190 rtx insn ATTRIBUTE_UNUSED;
1191 rtx operands[];
1192 enum machine_mode mode ATTRIBUTE_UNUSED;
1193{
1194 rtx dst = operands[0];
1195 rtx src = operands[1];
1196
1197 if (GET_CODE (dst) == REG)
1198 {
1199 if (GET_CODE (src) == REG)
1200 {
1201 if (REGNO (src) == CC_REG) /* r-c */
1202 return "mvc\t%0";
1203 else
1204 return "mov\t%0,%1"; /* r-r*/
1205 }
1206 else if (GET_CODE (src) == MEM)
1207 {
1208 if (GET_CODE (XEXP (src, 0)) == LABEL_REF)
1209 return "lrw\t%0,[%1]"; /* a-R */
1210 else
1211 return "ldw\t%0,%1"; /* r-m */
1212 }
1213 else if (GET_CODE (src) == CONST_INT)
1214 {
1215 int x, y;
1216
1217 if (CONST_OK_FOR_I (INTVAL (src))) /* r-I */
1218 return "movi\t%0,%1";
1219 else if (CONST_OK_FOR_M (INTVAL (src))) /* r-M */
1220 return "bgeni\t%0,%P1\t// %1 %x1";
1221 else if (CONST_OK_FOR_N (INTVAL (src))) /* r-N */
1222 return "bmaski\t%0,%N1\t// %1 %x1";
1223 else if (try_constant_tricks (INTVAL (src), &x, &y)) /* R-P */
1224 return output_inline_const (SImode, operands); /* 1-2 insns */
1225 else
4816b8e4 1226 return "lrw\t%0,%x1\t// %1"; /* Get it from literal pool. */
8f90be4c
NC
1227 }
1228 else
4816b8e4 1229 return "lrw\t%0, %1"; /* Into the literal pool. */
8f90be4c
NC
1230 }
1231 else if (GET_CODE (dst) == MEM) /* m-r */
1232 return "stw\t%1,%0";
1233
1234 abort ();
1235}
1236
1237/* Outputs a constant inline -- regardless of the cost.
1238 Useful for things where we've gotten into trouble and think we'd
1239 be doing an lrw into r15 (forbidden). This lets us get out of
1240 that pickle even after register allocation. */
4816b8e4 1241
f27cd94d 1242const char *
8f90be4c
NC
1243mcore_output_inline_const_forced (insn, operands, mode)
1244 rtx insn ATTRIBUTE_UNUSED;
1245 rtx operands[];
1246 enum machine_mode mode ATTRIBUTE_UNUSED;
1247{
1248 unsigned long value = INTVAL (operands[1]);
1249 unsigned long ovalue = value;
1250 struct piece
1251 {
1252 int low;
1253 int shift;
1254 }
1255 part[6];
1256 int i;
1257
1258 if (mcore_const_ok_for_inline (value))
1259 return output_inline_const (SImode, operands);
1260
b6a1cbae 1261 for (i = 0; (unsigned) i < ARRAY_SIZE (part); i++)
8f90be4c
NC
1262 {
1263 part[i].shift = 0;
1264 part[i].low = (value & 0x1F);
1265 value -= part[i].low;
1266
1267 if (mcore_const_ok_for_inline (value))
1268 break;
1269 else
1270 {
1271 value >>= 5;
1272 part[i].shift = 5;
1273
1274 while ((value & 1) == 0)
1275 {
1276 part[i].shift++;
1277 value >>= 1;
1278 }
1279
1280 if (mcore_const_ok_for_inline (value))
1281 break;
1282 }
1283 }
1284
1285 /* 5 bits per iteration, a maximum of 5 times == 25 bits and leaves
1286 7 bits left in the constant -- which we know we can cover with
1287 a movi. The final value can't be zero otherwise we'd have stopped
1288 in the previous iteration. */
1289 if (value == 0 || ! mcore_const_ok_for_inline (value))
1290 abort ();
1291
4816b8e4 1292 /* Now, work our way backwards emitting the constant. */
8f90be4c
NC
1293
1294 /* Emit the value that remains -- it will be non-zero. */
1295 operands[1] = GEN_INT (value);
1296 output_asm_insn (output_inline_const (SImode, operands), operands);
1297
1298 while (i >= 0)
1299 {
1300 /* Shift anything we've already loaded. */
1301 if (part[i].shift)
1302 {
1303 operands[2] = GEN_INT (part[i].shift);
1304 output_asm_insn ("lsli %0,%2", operands);
1305 value <<= part[i].shift;
1306 }
1307
1308 /* Add anything we need into the low 5 bits. */
1309 if (part[i].low != 0)
1310 {
1311 operands[2] = GEN_INT (part[i].low);
1312 output_asm_insn ("addi %0,%2", operands);
1313 value += part[i].low;
1314 }
1315
1316 i--;
1317 }
1318
1319 if (value != ovalue) /* sanity */
1320 abort ();
1321
4816b8e4 1322 /* We've output all the instructions. */
8f90be4c
NC
1323 return "";
1324}
1325
1326/* Return a sequence of instructions to perform DI or DF move.
1327 Since the MCORE cannot move a DI or DF in one instruction, we have
1328 to take care when we see overlapping source and dest registers. */
4816b8e4 1329
f27cd94d 1330const char *
8f90be4c
NC
1331mcore_output_movedouble (operands, mode)
1332 rtx operands[];
1333 enum machine_mode mode ATTRIBUTE_UNUSED;
1334{
1335 rtx dst = operands[0];
1336 rtx src = operands[1];
1337
1338 if (GET_CODE (dst) == REG)
1339 {
1340 if (GET_CODE (src) == REG)
1341 {
1342 int dstreg = REGNO (dst);
1343 int srcreg = REGNO (src);
4816b8e4 1344
8f90be4c
NC
1345 /* Ensure the second source not overwritten. */
1346 if (srcreg + 1 == dstreg)
1347 return "mov %R0,%R1\n\tmov %0,%1";
1348 else
1349 return "mov %0,%1\n\tmov %R0,%R1";
1350 }
1351 else if (GET_CODE (src) == MEM)
1352 {
1353 rtx memexp = memexp = XEXP (src, 0);
1354 int dstreg = REGNO (dst);
1355 int basereg = -1;
1356
1357 if (GET_CODE (memexp) == LABEL_REF)
1358 return "lrw\t%0,[%1]\n\tlrw\t%R0,[%R1]";
1359 else if (GET_CODE (memexp) == REG)
1360 basereg = REGNO (memexp);
1361 else if (GET_CODE (memexp) == PLUS)
1362 {
1363 if (GET_CODE (XEXP (memexp, 0)) == REG)
1364 basereg = REGNO (XEXP (memexp, 0));
1365 else if (GET_CODE (XEXP (memexp, 1)) == REG)
1366 basereg = REGNO (XEXP (memexp, 1));
1367 else
1368 abort ();
1369 }
1370 else
1371 abort ();
1372
4816b8e4 1373 /* ??? length attribute is wrong here. */
8f90be4c
NC
1374 if (dstreg == basereg)
1375 {
4816b8e4 1376 /* Just load them in reverse order. */
8f90be4c 1377 return "ldw\t%R0,%R1\n\tldw\t%0,%1";
4816b8e4 1378
8f90be4c 1379 /* XXX: alternative: move basereg to basereg+1
4816b8e4 1380 and then fall through. */
8f90be4c
NC
1381 }
1382 else
1383 return "ldw\t%0,%1\n\tldw\t%R0,%R1";
1384 }
1385 else if (GET_CODE (src) == CONST_INT)
1386 {
1387 if (TARGET_LITTLE_END)
1388 {
1389 if (CONST_OK_FOR_I (INTVAL (src)))
1390 output_asm_insn ("movi %0,%1", operands);
1391 else if (CONST_OK_FOR_M (INTVAL (src)))
1392 output_asm_insn ("bgeni %0,%P1", operands);
1393 else if (INTVAL (src) == -1)
1394 output_asm_insn ("bmaski %0,32", operands);
1395 else if (CONST_OK_FOR_N (INTVAL (src)))
1396 output_asm_insn ("bmaski %0,%N1", operands);
1397 else
1398 abort ();
1399
1400 if (INTVAL (src) < 0)
1401 return "bmaski %R0,32";
1402 else
1403 return "movi %R0,0";
1404 }
1405 else
1406 {
1407 if (CONST_OK_FOR_I (INTVAL (src)))
1408 output_asm_insn ("movi %R0,%1", operands);
1409 else if (CONST_OK_FOR_M (INTVAL (src)))
1410 output_asm_insn ("bgeni %R0,%P1", operands);
1411 else if (INTVAL (src) == -1)
1412 output_asm_insn ("bmaski %R0,32", operands);
1413 else if (CONST_OK_FOR_N (INTVAL (src)))
1414 output_asm_insn ("bmaski %R0,%N1", operands);
1415 else
1416 abort ();
1417
1418 if (INTVAL (src) < 0)
1419 return "bmaski %0,32";
1420 else
1421 return "movi %0,0";
1422 }
1423 }
1424 else
1425 abort ();
1426 }
1427 else if (GET_CODE (dst) == MEM && GET_CODE (src) == REG)
1428 return "stw\t%1,%0\n\tstw\t%R1,%R0";
1429 else
1430 abort ();
1431}
1432
1433/* Predicates used by the templates. */
1434
1435/* Non zero if OP can be source of a simple move operation. */
4816b8e4 1436
8f90be4c
NC
1437int
1438mcore_general_movsrc_operand (op, mode)
1439 rtx op;
1440 enum machine_mode mode;
1441{
1442 /* Any (MEM LABEL_REF) is OK. That is a pc-relative load. */
1443 if (GET_CODE (op) == MEM && GET_CODE (XEXP (op, 0)) == LABEL_REF)
1444 return 1;
1445
1446 return general_operand (op, mode);
1447}
1448
1449/* Non zero if OP can be destination of a simple move operation. */
4816b8e4 1450
8f90be4c
NC
1451int
1452mcore_general_movdst_operand (op, mode)
1453 rtx op;
1454 enum machine_mode mode;
1455{
1456 if (GET_CODE (op) == REG && REGNO (op) == CC_REG)
1457 return 0;
1458
1459 return general_operand (op, mode);
1460}
1461
1462/* Nonzero if OP is a normal arithmetic register. */
4816b8e4 1463
8f90be4c
NC
1464int
1465mcore_arith_reg_operand (op, mode)
1466 rtx op;
1467 enum machine_mode mode;
1468{
1469 if (! register_operand (op, mode))
1470 return 0;
1471
1472 if (GET_CODE (op) == SUBREG)
1473 op = SUBREG_REG (op);
1474
1475 if (GET_CODE (op) == REG)
1476 return REGNO (op) != CC_REG;
1477
1478 return 1;
1479}
1480
1481/* Non zero if OP should be recognized during reload for an ixh/ixw
1482 operand. See the ixh/ixw patterns. */
4816b8e4 1483
8f90be4c
NC
1484int
1485mcore_reload_operand (op, mode)
1486 rtx op;
1487 enum machine_mode mode;
1488{
1489 if (mcore_arith_reg_operand (op, mode))
1490 return 1;
1491
1492 if (! reload_in_progress)
1493 return 0;
1494
1495 return GET_CODE (op) == MEM;
1496}
1497
1498/* Nonzero if OP is a valid source operand for an arithmetic insn. */
4816b8e4 1499
8f90be4c
NC
1500int
1501mcore_arith_J_operand (op, mode)
1502 rtx op;
1503 enum machine_mode mode;
1504{
1505 if (register_operand (op, mode))
1506 return 1;
1507
1508 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_J (INTVAL (op)))
1509 return 1;
1510
1511 return 0;
1512}
1513
1514/* Nonzero if OP is a valid source operand for an arithmetic insn. */
4816b8e4 1515
8f90be4c
NC
1516int
1517mcore_arith_K_operand (op, mode)
1518 rtx op;
1519 enum machine_mode mode;
1520{
1521 if (register_operand (op, mode))
1522 return 1;
1523
1524 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_K (INTVAL (op)))
1525 return 1;
1526
1527 return 0;
1528}
1529
1530/* Nonzero if OP is a valid source operand for a shift or rotate insn. */
4816b8e4 1531
8f90be4c
NC
1532int
1533mcore_arith_K_operand_not_0 (op, mode)
1534 rtx op;
1535 enum machine_mode mode;
1536{
1537 if (register_operand (op, mode))
1538 return 1;
1539
1540 if ( GET_CODE (op) == CONST_INT
1541 && CONST_OK_FOR_K (INTVAL (op))
1542 && INTVAL (op) != 0)
1543 return 1;
1544
1545 return 0;
1546}
1547
1548int
1549mcore_arith_K_S_operand (op, mode)
1550 rtx op;
1551 enum machine_mode mode;
1552{
1553 if (register_operand (op, mode))
1554 return 1;
1555
1556 if (GET_CODE (op) == CONST_INT)
1557 {
1558 if (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_M (~INTVAL (op)))
1559 return 1;
1560 }
1561
1562 return 0;
1563}
1564
1565int
1566mcore_arith_S_operand (op)
1567 rtx op;
1568{
1569 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (~INTVAL (op)))
1570 return 1;
1571
1572 return 0;
1573}
1574
1575int
1576mcore_arith_M_operand (op, mode)
1577 rtx op;
1578 enum machine_mode mode;
1579{
1580 if (register_operand (op, mode))
1581 return 1;
1582
1583 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (INTVAL (op)))
1584 return 1;
1585
1586 return 0;
1587}
1588
4816b8e4
NC
1589/* Nonzero if OP is a valid source operand for loading. */
1590
8f90be4c
NC
1591int
1592mcore_arith_imm_operand (op, mode)
1593 rtx op;
1594 enum machine_mode mode;
1595{
1596 if (register_operand (op, mode))
1597 return 1;
1598
1599 if (GET_CODE (op) == CONST_INT && const_ok_for_mcore (INTVAL (op)))
1600 return 1;
1601
1602 return 0;
1603}
1604
1605int
1606mcore_arith_any_imm_operand (op, mode)
1607 rtx op;
1608 enum machine_mode mode;
1609{
1610 if (register_operand (op, mode))
1611 return 1;
1612
1613 if (GET_CODE (op) == CONST_INT)
1614 return 1;
1615
1616 return 0;
1617}
1618
4816b8e4
NC
1619/* Nonzero if OP is a valid source operand for a cmov with two consts +/- 1. */
1620
8f90be4c
NC
1621int
1622mcore_arith_O_operand (op, mode)
1623 rtx op;
1624 enum machine_mode mode;
1625{
1626 if (register_operand (op, mode))
1627 return 1;
1628
1629 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_O (INTVAL (op)))
1630 return 1;
1631
1632 return 0;
1633}
1634
1635/* Nonzero if OP is a valid source operand for a btsti. */
4816b8e4 1636
8f90be4c
NC
1637int
1638mcore_literal_K_operand (op, mode)
1639 rtx op;
1640 enum machine_mode mode ATTRIBUTE_UNUSED;
1641{
1642 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_K (INTVAL (op)))
1643 return 1;
1644
1645 return 0;
1646}
1647
1648/* Nonzero if OP is a valid source operand for an add/sub insn. */
4816b8e4 1649
8f90be4c
NC
1650int
1651mcore_addsub_operand (op, mode)
1652 rtx op;
1653 enum machine_mode mode;
1654{
1655 if (register_operand (op, mode))
1656 return 1;
1657
1658 if (GET_CODE (op) == CONST_INT)
1659 {
1660 return 1;
1661
1662 /* The following is removed because it precludes large constants from being
1663 returned as valid source operands for and add/sub insn. While large
1664 constants may not directly be used in an add/sub, they may if first loaded
1665 into a register. Thus, this predicate should indicate that they are valid,
1666 and the constraint in mcore.md should control whether an additional load to
4816b8e4 1667 register is needed. (see mcore.md, addsi). -- DAC 4/2/1998 */
8f90be4c
NC
1668 /*
1669 if (CONST_OK_FOR_J(INTVAL(op)) || CONST_OK_FOR_L(INTVAL(op)))
1670 return 1;
1671 */
1672 }
1673
1674 return 0;
1675}
1676
1677/* Nonzero if OP is a valid source operand for a compare operation. */
4816b8e4 1678
8f90be4c
NC
1679int
1680mcore_compare_operand (op, mode)
1681 rtx op;
1682 enum machine_mode mode;
1683{
1684 if (register_operand (op, mode))
1685 return 1;
1686
1687 if (GET_CODE (op) == CONST_INT && INTVAL (op) == 0)
1688 return 1;
1689
1690 return 0;
1691}
1692
4816b8e4
NC
1693/* Expand insert bit field. BRC */
1694
8f90be4c
NC
1695int
1696mcore_expand_insv (operands)
1697 rtx operands[];
1698{
1699 int width = INTVAL (operands[1]);
1700 int posn = INTVAL (operands[2]);
1701 int mask;
1702 rtx mreg, sreg, ereg;
1703
1704 /* To get width 1 insv, the test in store_bit_field() (expmed.c, line 191)
1705 for width==1 must be removed. Look around line 368. This is something
4816b8e4 1706 we really want the md part to do. */
8f90be4c
NC
1707 if (width == 1 && GET_CODE (operands[3]) == CONST_INT)
1708 {
4816b8e4
NC
1709 /* Do directly with bseti or bclri. */
1710 /* RBE: 2/97 consider only low bit of constant. */
8f90be4c
NC
1711 if ((INTVAL(operands[3])&1) == 0)
1712 {
1713 mask = ~(1 << posn);
1714 emit_insn (gen_rtx (SET, SImode, operands[0],
1715 gen_rtx (AND, SImode, operands[0], GEN_INT (mask))));
1716 }
1717 else
1718 {
1719 mask = 1 << posn;
1720 emit_insn (gen_rtx (SET, SImode, operands[0],
1721 gen_rtx (IOR, SImode, operands[0], GEN_INT (mask))));
1722 }
1723
1724 return 1;
1725 }
1726
1727 /* Look at some bitfield placements that we aren't interested
4816b8e4 1728 in handling ourselves, unless specifically directed to do so. */
8f90be4c
NC
1729 if (! TARGET_W_FIELD)
1730 return 0; /* Generally, give up about now. */
1731
1732 if (width == 8 && posn % 8 == 0)
1733 /* Byte sized and aligned; let caller break it up. */
1734 return 0;
1735
1736 if (width == 16 && posn % 16 == 0)
1737 /* Short sized and aligned; let caller break it up. */
1738 return 0;
1739
1740 /* The general case - we can do this a little bit better than what the
1741 machine independent part tries. This will get rid of all the subregs
1742 that mess up constant folding in combine when working with relaxed
4816b8e4 1743 immediates. */
8f90be4c
NC
1744
1745 /* If setting the entire field, do it directly. */
1746 if (GET_CODE (operands[3]) == CONST_INT &&
1747 INTVAL (operands[3]) == ((1 << width) - 1))
1748 {
1749 mreg = force_reg (SImode, GEN_INT (INTVAL (operands[3]) << posn));
1750 emit_insn (gen_rtx (SET, SImode, operands[0],
1751 gen_rtx (IOR, SImode, operands[0], mreg)));
1752 return 1;
1753 }
1754
1755 /* Generate the clear mask. */
1756 mreg = force_reg (SImode, GEN_INT (~(((1 << width) - 1) << posn)));
1757
1758 /* Clear the field, to overlay it later with the source. */
1759 emit_insn (gen_rtx (SET, SImode, operands[0],
1760 gen_rtx (AND, SImode, operands[0], mreg)));
1761
1762 /* If the source is constant 0, we've nothing to add back. */
1763 if (GET_CODE (operands[3]) == CONST_INT && INTVAL (operands[3]) == 0)
1764 return 1;
1765
1766 /* XXX: Should we worry about more games with constant values?
1767 We've covered the high profile: set/clear single-bit and many-bit
1768 fields. How often do we see "arbitrary bit pattern" constants? */
1769 sreg = copy_to_mode_reg (SImode, operands[3]);
1770
1771 /* Extract src as same width as dst (needed for signed values). We
1772 always have to do this since we widen everything to SImode.
1773 We don't have to mask if we're shifting this up against the
1774 MSB of the register (e.g., the shift will push out any hi-order
4816b8e4 1775 bits. */
f27cd94d 1776 if (width + posn != (int) GET_MODE_SIZE (SImode))
8f90be4c
NC
1777 {
1778 ereg = force_reg (SImode, GEN_INT ((1 << width) - 1));
1779 emit_insn (gen_rtx (SET, SImode, sreg,
1780 gen_rtx (AND, SImode, sreg, ereg)));
1781 }
1782
4816b8e4 1783 /* Insert source value in dest. */
8f90be4c
NC
1784 if (posn != 0)
1785 emit_insn (gen_rtx (SET, SImode, sreg,
1786 gen_rtx (ASHIFT, SImode, sreg, GEN_INT (posn))));
1787
1788 emit_insn (gen_rtx (SET, SImode, operands[0],
1789 gen_rtx (IOR, SImode, operands[0], sreg)));
1790
1791 return 1;
1792}
1793
1794/* Return 1 if OP is a load multiple operation. It is known to be a
1795 PARALLEL and the first section will be tested. */
1796int
1797mcore_load_multiple_operation (op, mode)
1798 rtx op;
1799 enum machine_mode mode ATTRIBUTE_UNUSED;
1800{
1801 int count = XVECLEN (op, 0);
1802 int dest_regno;
1803 rtx src_addr;
1804 int i;
1805
1806 /* Perform a quick check so we don't blow up below. */
1807 if (count <= 1
1808 || GET_CODE (XVECEXP (op, 0, 0)) != SET
1809 || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != REG
1810 || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != MEM)
1811 return 0;
1812
1813 dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, 0)));
1814 src_addr = XEXP (SET_SRC (XVECEXP (op, 0, 0)), 0);
1815
1816 for (i = 1; i < count; i++)
1817 {
1818 rtx elt = XVECEXP (op, 0, i);
1819
1820 if (GET_CODE (elt) != SET
1821 || GET_CODE (SET_DEST (elt)) != REG
1822 || GET_MODE (SET_DEST (elt)) != SImode
f27cd94d
NC
1823 || REGNO (SET_DEST (elt)) != (unsigned) (dest_regno + i)
1824 || GET_CODE (SET_SRC (elt)) != MEM
1825 || GET_MODE (SET_SRC (elt)) != SImode
8f90be4c
NC
1826 || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS
1827 || ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr)
1828 || GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT
1829 || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != i * 4)
1830 return 0;
1831 }
1832
1833 return 1;
1834}
1835
1836/* Similar, but tests for store multiple. */
4816b8e4 1837
8f90be4c
NC
1838int
1839mcore_store_multiple_operation (op, mode)
1840 rtx op;
1841 enum machine_mode mode ATTRIBUTE_UNUSED;
1842{
1843 int count = XVECLEN (op, 0);
1844 int src_regno;
1845 rtx dest_addr;
1846 int i;
1847
1848 /* Perform a quick check so we don't blow up below. */
1849 if (count <= 1
1850 || GET_CODE (XVECEXP (op, 0, 0)) != SET
1851 || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != MEM
1852 || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != REG)
1853 return 0;
1854
1855 src_regno = REGNO (SET_SRC (XVECEXP (op, 0, 0)));
1856 dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, 0)), 0);
1857
1858 for (i = 1; i < count; i++)
1859 {
1860 rtx elt = XVECEXP (op, 0, i);
1861
1862 if (GET_CODE (elt) != SET
1863 || GET_CODE (SET_SRC (elt)) != REG
1864 || GET_MODE (SET_SRC (elt)) != SImode
f27cd94d 1865 || REGNO (SET_SRC (elt)) != (unsigned) (src_regno + i)
8f90be4c
NC
1866 || GET_CODE (SET_DEST (elt)) != MEM
1867 || GET_MODE (SET_DEST (elt)) != SImode
1868 || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS
1869 || ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr)
1870 || GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT
1871 || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != i * 4)
1872 return 0;
1873 }
1874
1875 return 1;
1876}
1877\f
1878/* ??? Block move stuff stolen from m88k. This code has not been
1879 verified for correctness. */
1880
1881/* Emit code to perform a block move. Choose the best method.
1882
1883 OPERANDS[0] is the destination.
1884 OPERANDS[1] is the source.
1885 OPERANDS[2] is the size.
1886 OPERANDS[3] is the alignment safe to use. */
1887
1888/* Emit code to perform a block move with an offset sequence of ldw/st
1889 instructions (..., ldw 0, stw 1, ldw 1, stw 0, ...). SIZE and ALIGN are
1890 known constants. DEST and SRC are registers. OFFSET is the known
1891 starting point for the output pattern. */
1892
8b60264b 1893static const enum machine_mode mode_from_align[] =
8f90be4c
NC
1894{
1895 VOIDmode, QImode, HImode, VOIDmode, SImode,
1896 VOIDmode, VOIDmode, VOIDmode, DImode
1897};
1898
1899static void
1900block_move_sequence (dest, dst_mem, src, src_mem, size, align, offset)
1901 rtx dest, dst_mem;
1902 rtx src, src_mem;
1903 int size;
1904 int align;
1905 int offset;
1906{
1907 rtx temp[2];
1908 enum machine_mode mode[2];
1909 int amount[2];
1910 int active[2];
1911 int phase = 0;
1912 int next;
1913 int offset_ld = offset;
1914 int offset_st = offset;
1915
1916 active[0] = active[1] = FALSE;
1917
1918 /* Establish parameters for the first load and for the second load if
1919 it is known to be the same mode as the first. */
1920 amount[0] = amount[1] = align;
1921
1922 mode[0] = mode_from_align[align];
1923
1924 temp[0] = gen_reg_rtx (mode[0]);
1925
1926 if (size >= 2 * align)
1927 {
1928 mode[1] = mode[0];
1929 temp[1] = gen_reg_rtx (mode[1]);
1930 }
1931
1932 do
1933 {
1934 rtx srcp, dstp;
1935
1936 next = phase;
1937 phase = !phase;
1938
1939 if (size > 0)
1940 {
1941 /* Change modes as the sequence tails off. */
1942 if (size < amount[next])
1943 {
1944 amount[next] = (size >= 4 ? 4 : (size >= 2 ? 2 : 1));
1945 mode[next] = mode_from_align[amount[next]];
1946 temp[next] = gen_reg_rtx (mode[next]);
1947 }
1948
1949 size -= amount[next];
1950 srcp = gen_rtx (MEM,
1951#if 0
1952 MEM_IN_STRUCT_P (src_mem) ? mode[next] : BLKmode,
1953#else
1954 mode[next],
1955#endif
1956 gen_rtx (PLUS, Pmode, src,
1957 gen_rtx (CONST_INT, SImode, offset_ld)));
1958
1959 RTX_UNCHANGING_P (srcp) = RTX_UNCHANGING_P (src_mem);
1960 MEM_VOLATILE_P (srcp) = MEM_VOLATILE_P (src_mem);
1961 MEM_IN_STRUCT_P (srcp) = 1;
1962 emit_insn (gen_rtx (SET, VOIDmode, temp[next], srcp));
1963 offset_ld += amount[next];
1964 active[next] = TRUE;
1965 }
1966
1967 if (active[phase])
1968 {
1969 active[phase] = FALSE;
1970
1971 dstp = gen_rtx (MEM,
1972#if 0
1973 MEM_IN_STRUCT_P (dst_mem) ? mode[phase] : BLKmode,
1974#else
1975 mode[phase],
1976#endif
1977 gen_rtx (PLUS, Pmode, dest,
1978 gen_rtx (CONST_INT, SImode, offset_st)));
1979
1980 RTX_UNCHANGING_P (dstp) = RTX_UNCHANGING_P (dst_mem);
1981 MEM_VOLATILE_P (dstp) = MEM_VOLATILE_P (dst_mem);
1982 MEM_IN_STRUCT_P (dstp) = 1;
1983 emit_insn (gen_rtx (SET, VOIDmode, dstp, temp[phase]));
1984 offset_st += amount[phase];
1985 }
1986 }
1987 while (active[next]);
1988}
1989
1990void
1991mcore_expand_block_move (dst_mem, src_mem, operands)
1992 rtx dst_mem;
1993 rtx src_mem;
1994 rtx * operands;
1995{
1996 int align = INTVAL (operands[3]);
1997 int bytes;
1998
1999 if (GET_CODE (operands[2]) == CONST_INT)
2000 {
2001 bytes = INTVAL (operands[2]);
2002
2003 if (bytes <= 0)
2004 return;
2005 if (align > 4)
2006 align = 4;
2007
2008 /* RBE: bumped 1 and 2 byte align from 1 and 2 to 4 and 8 bytes before
4816b8e4 2009 we give up and go to memcpy. */
8f90be4c
NC
2010 if ((align == 4 && (bytes <= 4*4
2011 || ((bytes & 01) == 0 && bytes <= 8*4)
2012 || ((bytes & 03) == 0 && bytes <= 16*4)))
2013 || (align == 2 && bytes <= 4*2)
2014 || (align == 1 && bytes <= 4*1))
2015 {
2016 block_move_sequence (operands[0], dst_mem, operands[1], src_mem,
2017 bytes, align, 0);
2018 return;
2019 }
2020 }
2021
2022 /* If we get here, just use the library routine. */
2023 emit_library_call (gen_rtx (SYMBOL_REF, Pmode, "memcpy"), 0, VOIDmode, 3,
2024 operands[0], Pmode, operands[1], Pmode, operands[2],
2025 SImode);
2026}
2027\f
2028
2029/* Code to generate prologue and epilogue sequences. */
2030static int number_of_regs_before_varargs;
4816b8e4 2031
8f90be4c
NC
2032/* Set by SETUP_INCOMING_VARARGS to indicate to prolog that this is
2033 for a varargs function. */
2034static int current_function_anonymous_args;
2035
8f90be4c
NC
2036#define STACK_BYTES (STACK_BOUNDARY/BITS_PER_UNIT)
2037#define STORE_REACH (64) /* Maximum displace of word store + 4. */
4816b8e4 2038#define ADDI_REACH (32) /* Maximum addi operand. */
8f90be4c 2039
8f90be4c
NC
2040static void
2041layout_mcore_frame (infp)
2042 struct mcore_frame * infp;
2043{
2044 int n;
2045 unsigned int i;
2046 int nbytes;
2047 int regarg;
2048 int localregarg;
2049 int localreg;
2050 int outbounds;
2051 unsigned int growths;
2052 int step;
2053
2054 /* Might have to spill bytes to re-assemble a big argument that
4816b8e4 2055 was passed partially in registers and partially on the stack. */
8f90be4c
NC
2056 nbytes = current_function_pretend_args_size;
2057
2058 /* Determine how much space for spilled anonymous args (e.g., stdarg). */
2059 if (current_function_anonymous_args)
2060 nbytes += (NPARM_REGS - number_of_regs_before_varargs) * UNITS_PER_WORD;
2061
2062 infp->arg_size = nbytes;
2063
2064 /* How much space to save non-volatile registers we stomp. */
2065 infp->reg_mask = calc_live_regs (& n);
2066 infp->reg_size = n * 4;
2067
2068 /* And the rest of it... locals and space for overflowed outbounds. */
2069 infp->local_size = get_frame_size ();
2070 infp->outbound_size = current_function_outgoing_args_size;
2071
2072 /* Make sure we have a whole number of words for the locals. */
2073 if (infp->local_size % STACK_BYTES)
2074 infp->local_size = (infp->local_size + STACK_BYTES - 1) & ~ (STACK_BYTES -1);
2075
2076 /* Only thing we know we have to pad is the outbound space, since
2077 we've aligned our locals assuming that base of locals is aligned. */
2078 infp->pad_local = 0;
2079 infp->pad_reg = 0;
2080 infp->pad_outbound = 0;
2081 if (infp->outbound_size % STACK_BYTES)
2082 infp->pad_outbound = STACK_BYTES - (infp->outbound_size % STACK_BYTES);
2083
2084 /* Now we see how we want to stage the prologue so that it does
2085 the most appropriate stack growth and register saves to either:
2086 (1) run fast,
2087 (2) reduce instruction space, or
2088 (3) reduce stack space. */
b6a1cbae 2089 for (i = 0; i < ARRAY_SIZE (infp->growth); i++)
8f90be4c
NC
2090 infp->growth[i] = 0;
2091
2092 regarg = infp->reg_size + infp->arg_size;
2093 localregarg = infp->local_size + regarg;
2094 localreg = infp->local_size + infp->reg_size;
2095 outbounds = infp->outbound_size + infp->pad_outbound;
2096 growths = 0;
2097
2098 /* XXX: Consider one where we consider localregarg + outbound too! */
2099
2100 /* Frame of <= 32 bytes and using stm would get <= 2 registers.
2101 use stw's with offsets and buy the frame in one shot. */
2102 if (localregarg <= ADDI_REACH
2103 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
2104 {
2105 /* Make sure we'll be aligned. */
2106 if (localregarg % STACK_BYTES)
2107 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
2108
2109 step = localregarg + infp->pad_reg;
2110 infp->reg_offset = infp->local_size;
2111
2112 if (outbounds + step <= ADDI_REACH && !frame_pointer_needed)
2113 {
2114 step += outbounds;
2115 infp->reg_offset += outbounds;
2116 outbounds = 0;
2117 }
2118
2119 infp->arg_offset = step - 4;
2120 infp->growth[growths++] = step;
2121 infp->reg_growth = growths;
2122 infp->local_growth = growths;
2123
4816b8e4 2124 /* If we haven't already folded it in. */
8f90be4c
NC
2125 if (outbounds)
2126 infp->growth[growths++] = outbounds;
2127
2128 goto finish;
2129 }
2130
2131 /* Frame can't be done with a single subi, but can be done with 2
2132 insns. If the 'stm' is getting <= 2 registers, we use stw's and
2133 shift some of the stack purchase into the first subi, so both are
2134 single instructions. */
2135 if (localregarg <= STORE_REACH
2136 && (infp->local_size > ADDI_REACH)
2137 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
2138 {
2139 int all;
2140
2141 /* Make sure we'll be aligned; use either pad_reg or pad_local. */
2142 if (localregarg % STACK_BYTES)
2143 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
2144
2145 all = localregarg + infp->pad_reg + infp->pad_local;
2146 step = ADDI_REACH; /* As much up front as we can. */
2147 if (step > all)
2148 step = all;
2149
2150 /* XXX: Consider whether step will still be aligned; we believe so. */
2151 infp->arg_offset = step - 4;
2152 infp->growth[growths++] = step;
2153 infp->reg_growth = growths;
2154 infp->reg_offset = step - infp->pad_reg - infp->reg_size;
2155 all -= step;
2156
4816b8e4 2157 /* Can we fold in any space required for outbounds? */
8f90be4c
NC
2158 if (outbounds + all <= ADDI_REACH && !frame_pointer_needed)
2159 {
2160 all += outbounds;
2161 outbounds = 0;
2162 }
2163
4816b8e4 2164 /* Get the rest of the locals in place. */
8f90be4c
NC
2165 step = all;
2166 infp->growth[growths++] = step;
2167 infp->local_growth = growths;
2168 all -= step;
2169
2170 assert (all == 0);
2171
4816b8e4 2172 /* Finish off if we need to do so. */
8f90be4c
NC
2173 if (outbounds)
2174 infp->growth[growths++] = outbounds;
2175
2176 goto finish;
2177 }
2178
2179 /* Registers + args is nicely aligned, so we'll buy that in one shot.
2180 Then we buy the rest of the frame in 1 or 2 steps depending on
2181 whether we need a frame pointer. */
2182 if ((regarg % STACK_BYTES) == 0)
2183 {
2184 infp->growth[growths++] = regarg;
2185 infp->reg_growth = growths;
2186 infp->arg_offset = regarg - 4;
2187 infp->reg_offset = 0;
2188
2189 if (infp->local_size % STACK_BYTES)
2190 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
2191
2192 step = infp->local_size + infp->pad_local;
2193
2194 if (!frame_pointer_needed)
2195 {
2196 step += outbounds;
2197 outbounds = 0;
2198 }
2199
2200 infp->growth[growths++] = step;
2201 infp->local_growth = growths;
2202
4816b8e4 2203 /* If there's any left to be done. */
8f90be4c
NC
2204 if (outbounds)
2205 infp->growth[growths++] = outbounds;
2206
2207 goto finish;
2208 }
2209
2210 /* XXX: optimizations that we'll want to play with....
4816b8e4
NC
2211 -- regarg is not aligned, but it's a small number of registers;
2212 use some of localsize so that regarg is aligned and then
2213 save the registers. */
8f90be4c
NC
2214
2215 /* Simple encoding; plods down the stack buying the pieces as it goes.
4816b8e4
NC
2216 -- does not optimize space consumption.
2217 -- does not attempt to optimize instruction counts.
2218 -- but it is safe for all alignments. */
8f90be4c
NC
2219 if (regarg % STACK_BYTES != 0)
2220 infp->pad_reg = STACK_BYTES - (regarg % STACK_BYTES);
2221
2222 infp->growth[growths++] = infp->arg_size + infp->reg_size + infp->pad_reg;
2223 infp->reg_growth = growths;
2224 infp->arg_offset = infp->growth[0] - 4;
2225 infp->reg_offset = 0;
2226
2227 if (frame_pointer_needed)
2228 {
2229 if (infp->local_size % STACK_BYTES != 0)
2230 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
2231
2232 infp->growth[growths++] = infp->local_size + infp->pad_local;
2233 infp->local_growth = growths;
2234
2235 infp->growth[growths++] = outbounds;
2236 }
2237 else
2238 {
2239 if ((infp->local_size + outbounds) % STACK_BYTES != 0)
2240 infp->pad_local = STACK_BYTES - ((infp->local_size + outbounds) % STACK_BYTES);
2241
2242 infp->growth[growths++] = infp->local_size + infp->pad_local + outbounds;
2243 infp->local_growth = growths;
2244 }
2245
f27cd94d 2246 /* Anything else that we've forgotten?, plus a few consistency checks. */
8f90be4c
NC
2247 finish:
2248 assert (infp->reg_offset >= 0);
2249 assert (growths <= MAX_STACK_GROWS);
2250
2251 for (i = 0; i < growths; i++)
2252 {
2253 if (infp->growth[i] % STACK_BYTES)
2254 {
2255 fprintf (stderr,"stack growth of %d is not %d aligned\n",
2256 infp->growth[i], STACK_BYTES);
2257 abort ();
2258 }
2259 }
2260}
2261
2262/* Define the offset between two registers, one to be eliminated, and
2263 the other its replacement, at the start of a routine. */
4816b8e4 2264
8f90be4c
NC
2265int
2266mcore_initial_elimination_offset (from, to)
2267 int from;
2268 int to;
2269{
2270 int above_frame;
2271 int below_frame;
2272 struct mcore_frame fi;
2273
2274 layout_mcore_frame (& fi);
2275
2276 /* fp to ap */
2277 above_frame = fi.local_size + fi.pad_local + fi.reg_size + fi.pad_reg;
2278 /* sp to fp */
2279 below_frame = fi.outbound_size + fi.pad_outbound;
2280
2281 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
2282 return above_frame;
2283
2284 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
2285 return above_frame + below_frame;
2286
2287 if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
2288 return below_frame;
2289
2290 abort ();
2291
2292 return 0;
2293}
2294
4816b8e4
NC
2295/* Keep track of some information about varargs for the prolog. */
2296
8f90be4c
NC
2297void
2298mcore_setup_incoming_varargs (args_so_far, mode, type, ptr_pretend_size)
2299 CUMULATIVE_ARGS args_so_far;
2300 enum machine_mode mode;
2301 tree type;
2302 int * ptr_pretend_size ATTRIBUTE_UNUSED;
2303{
2304 current_function_anonymous_args = 1;
2305
2306 /* We need to know how many argument registers are used before
2307 the varargs start, so that we can push the remaining argument
2308 registers during the prologue. */
2309 number_of_regs_before_varargs = args_so_far + mcore_num_arg_regs (mode, type);
2310
2311 /* There is a bug somwehere in the arg handling code.
2312 Until I can find it this workaround always pushes the
2313 last named argument onto the stack. */
2314 number_of_regs_before_varargs = args_so_far;
2315
2316 /* The last named argument may be split between argument registers
2317 and the stack. Allow for this here. */
2318 if (number_of_regs_before_varargs > NPARM_REGS)
2319 number_of_regs_before_varargs = NPARM_REGS;
2320}
2321
2322void
2323mcore_expand_prolog ()
2324{
2325 struct mcore_frame fi;
2326 int space_allocated = 0;
2327 int growth = 0;
2328
2329 /* Find out what we're doing. */
2330 layout_mcore_frame (&fi);
2331
2332 space_allocated = fi.arg_size + fi.reg_size + fi.local_size +
2333 fi.outbound_size + fi.pad_outbound + fi.pad_local + fi.pad_reg;
2334
2335 if (TARGET_CG_DATA)
2336 {
2337 /* Emit a symbol for this routine's frame size. */
2338 rtx x;
2339 int len;
2340
2341 x = DECL_RTL (current_function_decl);
2342
2343 if (GET_CODE (x) != MEM)
2344 abort ();
2345
2346 x = XEXP (x, 0);
2347
2348 if (GET_CODE (x) != SYMBOL_REF)
2349 abort ();
2350
2351 if (mcore_current_function_name)
2352 free (mcore_current_function_name);
2353
2354 len = strlen (XSTR (x, 0)) + 1;
dd3b81b4 2355 mcore_current_function_name = (char *) xmalloc (len);
8f90be4c
NC
2356
2357 memcpy (mcore_current_function_name, XSTR (x, 0), len);
2358
2359 ASM_OUTPUT_CG_NODE (asm_out_file, mcore_current_function_name, space_allocated);
2360
2361 if (current_function_calls_alloca)
2362 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, "alloca", 1);
2363
2364 /* 970425: RBE:
2365 We're looking at how the 8byte alignment affects stack layout
2366 and where we had to pad things. This emits information we can
2367 extract which tells us about frame sizes and the like. */
2368 fprintf (asm_out_file,
2369 "\t.equ\t__$frame$info$_%s_$_%d_%d_x%x_%d_%d_%d,0\n",
2370 mcore_current_function_name,
2371 fi.arg_size, fi.reg_size, fi.reg_mask,
2372 fi.local_size, fi.outbound_size,
2373 frame_pointer_needed);
2374 }
2375
2376 if (mcore_naked_function_p ())
2377 return;
2378
2379 /* Handle stdarg+regsaves in one shot: can't be more than 64 bytes. */
2380 output_stack_adjust (-1, fi.growth[growth++]); /* grows it */
2381
2382 /* If we have a parameter passed partially in regs and partially in memory,
2383 the registers will have been stored to memory already in function.c. So
2384 we only need to do something here for varargs functions. */
2385 if (fi.arg_size != 0 && current_function_pretend_args_size == 0)
2386 {
2387 int offset;
2388 int rn = FIRST_PARM_REG + NPARM_REGS - 1;
2389 int remaining = fi.arg_size;
2390
2391 for (offset = fi.arg_offset; remaining >= 4; offset -= 4, rn--, remaining -= 4)
2392 {
2393 emit_insn (gen_movsi
2394 (gen_rtx (MEM, SImode,
2395 plus_constant (stack_pointer_rtx, offset)),
2396 gen_rtx (REG, SImode, rn)));
2397 }
2398 }
2399
4816b8e4 2400 /* Do we need another stack adjustment before we do the register saves? */
8f90be4c
NC
2401 if (growth < fi.reg_growth)
2402 output_stack_adjust (-1, fi.growth[growth++]); /* grows it */
2403
2404 if (fi.reg_size != 0)
2405 {
2406 int i;
2407 int offs = fi.reg_offset;
2408
2409 for (i = 15; i >= 0; i--)
2410 {
2411 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2412 {
2413 int first_reg = 15;
2414
2415 while (fi.reg_mask & (1 << first_reg))
2416 first_reg--;
2417 first_reg++;
2418
2419 emit_insn (gen_store_multiple (gen_rtx (MEM, SImode, stack_pointer_rtx),
2420 gen_rtx (REG, SImode, first_reg),
2421 GEN_INT (16 - first_reg)));
2422
2423 i -= (15 - first_reg);
2424 offs += (16 - first_reg) * 4;
2425 }
2426 else if (fi.reg_mask & (1 << i))
2427 {
2428 emit_insn (gen_movsi
2429 (gen_rtx (MEM, SImode,
2430 plus_constant (stack_pointer_rtx, offs)),
2431 gen_rtx (REG, SImode, i)));
2432 offs += 4;
2433 }
2434 }
2435 }
2436
2437 /* Figure the locals + outbounds. */
2438 if (frame_pointer_needed)
2439 {
2440 /* If we haven't already purchased to 'fp'. */
2441 if (growth < fi.local_growth)
2442 output_stack_adjust (-1, fi.growth[growth++]); /* grows it */
2443
2444 emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
2445
4816b8e4 2446 /* ... and then go any remaining distance for outbounds, etc. */
8f90be4c
NC
2447 if (fi.growth[growth])
2448 output_stack_adjust (-1, fi.growth[growth++]);
2449 }
2450 else
2451 {
2452 if (growth < fi.local_growth)
2453 output_stack_adjust (-1, fi.growth[growth++]); /* grows it */
2454 if (fi.growth[growth])
2455 output_stack_adjust (-1, fi.growth[growth++]);
2456 }
2457}
2458
2459void
2460mcore_expand_epilog ()
2461{
2462 struct mcore_frame fi;
2463 int i;
2464 int offs;
2465 int growth = MAX_STACK_GROWS - 1 ;
2466
f27cd94d 2467
8f90be4c
NC
2468 /* Find out what we're doing. */
2469 layout_mcore_frame(&fi);
2470
2471 if (mcore_naked_function_p ())
2472 return;
f27cd94d 2473
8f90be4c
NC
2474 /* If we had a frame pointer, restore the sp from that. */
2475 if (frame_pointer_needed)
2476 {
2477 emit_insn (gen_movsi (stack_pointer_rtx, frame_pointer_rtx));
2478 growth = fi.local_growth - 1;
2479 }
2480 else
2481 {
2482 /* XXX: while loop should accumulate and do a single sell. */
2483 while (growth >= fi.local_growth)
2484 {
2485 if (fi.growth[growth] != 0)
2486 output_stack_adjust (1, fi.growth[growth]);
2487 growth--;
2488 }
2489 }
2490
2491 /* Make sure we've shrunk stack back to the point where the registers
2492 were laid down. This is typically 0/1 iterations. Then pull the
4816b8e4 2493 register save information back off the stack. */
8f90be4c
NC
2494 while (growth >= fi.reg_growth)
2495 output_stack_adjust ( 1, fi.growth[growth--]);
2496
2497 offs = fi.reg_offset;
2498
2499 for (i = 15; i >= 0; i--)
2500 {
2501 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2502 {
2503 int first_reg;
2504
2505 /* Find the starting register. */
2506 first_reg = 15;
2507
2508 while (fi.reg_mask & (1 << first_reg))
2509 first_reg--;
2510
2511 first_reg++;
2512
2513 emit_insn (gen_load_multiple (gen_rtx (REG, SImode, first_reg),
2514 gen_rtx (MEM, SImode, stack_pointer_rtx),
2515 GEN_INT (16 - first_reg)));
2516
2517 i -= (15 - first_reg);
2518 offs += (16 - first_reg) * 4;
2519 }
2520 else if (fi.reg_mask & (1 << i))
2521 {
2522 emit_insn (gen_movsi
2523 (gen_rtx (REG, SImode, i),
2524 gen_rtx (MEM, SImode,
2525 plus_constant (stack_pointer_rtx, offs))));
2526 offs += 4;
2527 }
2528 }
2529
2530 /* Give back anything else. */
4816b8e4 2531 /* XXX: Should accumuate total and then give it back. */
8f90be4c
NC
2532 while (growth >= 0)
2533 output_stack_adjust ( 1, fi.growth[growth--]);
2534}
2535\f
2536/* This code is borrowed from the SH port. */
2537
2538/* The MCORE cannot load a large constant into a register, constants have to
2539 come from a pc relative load. The reference of a pc relative load
2540 instruction must be less than 1k infront of the instruction. This
2541 means that we often have to dump a constant inside a function, and
2542 generate code to branch around it.
2543
2544 It is important to minimize this, since the branches will slow things
2545 down and make things bigger.
2546
2547 Worst case code looks like:
2548
2549 lrw L1,r0
2550 br L2
2551 align
2552 L1: .long value
2553 L2:
2554 ..
2555
2556 lrw L3,r0
2557 br L4
2558 align
2559 L3: .long value
2560 L4:
2561 ..
2562
2563 We fix this by performing a scan before scheduling, which notices which
2564 instructions need to have their operands fetched from the constant table
2565 and builds the table.
2566
2567 The algorithm is:
2568
2569 scan, find an instruction which needs a pcrel move. Look forward, find the
2570 last barrier which is within MAX_COUNT bytes of the requirement.
2571 If there isn't one, make one. Process all the instructions between
2572 the find and the barrier.
2573
2574 In the above example, we can tell that L3 is within 1k of L1, so
2575 the first move can be shrunk from the 2 insn+constant sequence into
2576 just 1 insn, and the constant moved to L3 to make:
2577
2578 lrw L1,r0
2579 ..
2580 lrw L3,r0
2581 bra L4
2582 align
2583 L3:.long value
2584 L4:.long value
2585
2586 Then the second move becomes the target for the shortening process. */
2587
2588typedef struct
2589{
2590 rtx value; /* Value in table. */
2591 rtx label; /* Label of value. */
2592} pool_node;
2593
2594/* The maximum number of constants that can fit into one pool, since
2595 the pc relative range is 0...1020 bytes and constants are at least 4
2596 bytes long. We subtact 4 from the range to allow for the case where
2597 we need to add a branch/align before the constant pool. */
2598
2599#define MAX_COUNT 1016
2600#define MAX_POOL_SIZE (MAX_COUNT/4)
2601static pool_node pool_vector[MAX_POOL_SIZE];
2602static int pool_size;
2603
2604/* Dump out any constants accumulated in the final pass. These
2605 will only be labels. */
4816b8e4 2606
f27cd94d 2607const char *
8f90be4c
NC
2608mcore_output_jump_label_table ()
2609{
2610 int i;
2611
2612 if (pool_size)
2613 {
2614 fprintf (asm_out_file, "\t.align 2\n");
2615
2616 for (i = 0; i < pool_size; i++)
2617 {
2618 pool_node * p = pool_vector + i;
2619
2620 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (p->label));
2621
2622 output_asm_insn (".long %0", &p->value);
2623 }
2624
2625 pool_size = 0;
2626 }
2627
2628 return "";
2629}
2630
8f90be4c 2631/* Check whether insn is a candidate for a conditional. */
4816b8e4 2632
8f90be4c
NC
2633static cond_type
2634is_cond_candidate (insn)
2635 rtx insn;
2636{
2637 /* The only things we conditionalize are those that can be directly
2638 changed into a conditional. Only bother with SImode items. If
2639 we wanted to be a little more aggressive, we could also do other
4816b8e4 2640 modes such as DImode with reg-reg move or load 0. */
8f90be4c
NC
2641 if (GET_CODE (insn) == INSN)
2642 {
2643 rtx pat = PATTERN (insn);
2644 rtx src, dst;
2645
2646 if (GET_CODE (pat) != SET)
2647 return COND_NO;
2648
2649 dst = XEXP (pat, 0);
2650
2651 if ((GET_CODE (dst) != REG &&
2652 GET_CODE (dst) != SUBREG) ||
2653 GET_MODE (dst) != SImode)
2654 return COND_NO;
2655
2656 src = XEXP (pat, 1);
2657
2658 if ((GET_CODE (src) == REG ||
2659 (GET_CODE (src) == SUBREG &&
2660 GET_CODE (SUBREG_REG (src)) == REG)) &&
2661 GET_MODE (src) == SImode)
2662 return COND_MOV_INSN;
2663 else if (GET_CODE (src) == CONST_INT &&
2664 INTVAL (src) == 0)
2665 return COND_CLR_INSN;
2666 else if (GET_CODE (src) == PLUS &&
2667 (GET_CODE (XEXP (src, 0)) == REG ||
2668 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2669 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2670 GET_MODE (XEXP (src, 0)) == SImode &&
2671 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2672 INTVAL (XEXP (src, 1)) == 1)
2673 return COND_INC_INSN;
2674 else if (((GET_CODE (src) == MINUS &&
2675 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2676 INTVAL( XEXP (src, 1)) == 1) ||
2677 (GET_CODE (src) == PLUS &&
2678 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2679 INTVAL (XEXP (src, 1)) == -1)) &&
2680 (GET_CODE (XEXP (src, 0)) == REG ||
2681 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2682 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2683 GET_MODE (XEXP (src, 0)) == SImode)
2684 return COND_DEC_INSN;
2685
2686 /* some insns that we don't bother with:
2687 (set (rx:DI) (ry:DI))
2688 (set (rx:DI) (const_int 0))
2689 */
2690
2691 }
2692 else if (GET_CODE (insn) == JUMP_INSN &&
2693 GET_CODE (PATTERN (insn)) == SET &&
2694 GET_CODE (XEXP (PATTERN (insn), 1)) == LABEL_REF)
2695 return COND_BRANCH_INSN;
2696
2697 return COND_NO;
2698}
2699
2700/* Emit a conditional version of insn and replace the old insn with the
2701 new one. Return the new insn if emitted. */
4816b8e4 2702
8f90be4c
NC
2703static rtx
2704emit_new_cond_insn (insn, cond)
2705 rtx insn;
2706 int cond;
2707{
2708 rtx c_insn = 0;
2709 rtx pat, dst, src;
2710 cond_type num;
2711
2712 if ((num = is_cond_candidate (insn)) == COND_NO)
2713 return NULL;
2714
2715 pat = PATTERN (insn);
2716
2717 if (GET_CODE (insn) == INSN)
2718 {
2719 dst = SET_DEST (pat);
2720 src = SET_SRC (pat);
2721 }
2722 else
cd4c46f3
KG
2723 {
2724 dst = JUMP_LABEL (insn);
2725 src = NULL_RTX;
2726 }
8f90be4c
NC
2727
2728 switch (num)
2729 {
2730 case COND_MOV_INSN:
2731 case COND_CLR_INSN:
2732 if (cond)
2733 c_insn = gen_movt0 (dst, src, dst);
2734 else
2735 c_insn = gen_movt0 (dst, dst, src);
2736 break;
2737
2738 case COND_INC_INSN:
2739 if (cond)
2740 c_insn = gen_incscc (dst, dst);
2741 else
2742 c_insn = gen_incscc_false (dst, dst);
2743 break;
2744
2745 case COND_DEC_INSN:
2746 if (cond)
2747 c_insn = gen_decscc (dst, dst);
2748 else
2749 c_insn = gen_decscc_false (dst, dst);
2750 break;
2751
2752 case COND_BRANCH_INSN:
2753 if (cond)
2754 c_insn = gen_branch_true (dst);
2755 else
2756 c_insn = gen_branch_false (dst);
2757 break;
2758
2759 default:
2760 return NULL;
2761 }
2762
2763 /* Only copy the notes if they exist. */
2764 if (rtx_length [GET_CODE (c_insn)] >= 7 && rtx_length [GET_CODE (insn)] >= 7)
2765 {
2766 /* We really don't need to bother with the notes and links at this
2767 point, but go ahead and save the notes. This will help is_dead()
2768 when applying peepholes (links don't matter since they are not
2769 used any more beyond this point for the mcore). */
2770 REG_NOTES (c_insn) = REG_NOTES (insn);
2771 }
2772
2773 if (num == COND_BRANCH_INSN)
2774 {
2775 /* For jumps, we need to be a little bit careful and emit the new jump
2776 before the old one and to update the use count for the target label.
2777 This way, the barrier following the old (uncond) jump will get
2778 deleted, but the label won't. */
2779 c_insn = emit_jump_insn_before (c_insn, insn);
2780
2781 ++ LABEL_NUSES (dst);
2782
2783 JUMP_LABEL (c_insn) = dst;
2784 }
2785 else
2786 c_insn = emit_insn_after (c_insn, insn);
2787
2788 delete_insn (insn);
2789
2790 return c_insn;
2791}
2792
2793/* Attempt to change a basic block into a series of conditional insns. This
2794 works by taking the branch at the end of the 1st block and scanning for the
2795 end of the 2nd block. If all instructions in the 2nd block have cond.
2796 versions and the label at the start of block 3 is the same as the target
2797 from the branch at block 1, then conditionalize all insn in block 2 using
2798 the inverse condition of the branch at block 1. (Note I'm bending the
2799 definition of basic block here.)
2800
2801 e.g., change:
2802
2803 bt L2 <-- end of block 1 (delete)
2804 mov r7,r8
2805 addu r7,1
2806 br L3 <-- end of block 2
2807
2808 L2: ... <-- start of block 3 (NUSES==1)
2809 L3: ...
2810
2811 to:
2812
2813 movf r7,r8
2814 incf r7
2815 bf L3
2816
2817 L3: ...
2818
2819 we can delete the L2 label if NUSES==1 and re-apply the optimization
2820 starting at the last instruction of block 2. This may allow an entire
4816b8e4 2821 if-then-else statement to be conditionalized. BRC */
8f90be4c
NC
2822static rtx
2823conditionalize_block (first)
2824 rtx first;
2825{
2826 rtx insn;
2827 rtx br_pat;
2828 rtx end_blk_1_br = 0;
2829 rtx end_blk_2_insn = 0;
2830 rtx start_blk_3_lab = 0;
2831 int cond;
2832 int br_lab_num;
2833 int blk_size = 0;
2834
2835
2836 /* Check that the first insn is a candidate conditional jump. This is
2837 the one that we'll eliminate. If not, advance to the next insn to
2838 try. */
2839 if (GET_CODE (first) != JUMP_INSN ||
2840 GET_CODE (PATTERN (first)) != SET ||
2841 GET_CODE (XEXP (PATTERN (first), 1)) != IF_THEN_ELSE)
2842 return NEXT_INSN (first);
2843
2844 /* Extract some information we need. */
2845 end_blk_1_br = first;
2846 br_pat = PATTERN (end_blk_1_br);
2847
2848 /* Complement the condition since we use the reverse cond. for the insns. */
2849 cond = (GET_CODE (XEXP (XEXP (br_pat, 1), 0)) == EQ);
2850
2851 /* Determine what kind of branch we have. */
2852 if (GET_CODE (XEXP (XEXP (br_pat, 1), 1)) == LABEL_REF)
2853 {
2854 /* A normal branch, so extract label out of first arm. */
2855 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 1), 0));
2856 }
2857 else
2858 {
2859 /* An inverse branch, so extract the label out of the 2nd arm
2860 and complement the condition. */
2861 cond = (cond == 0);
2862 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 2), 0));
2863 }
2864
2865 /* Scan forward for the start of block 2: it must start with a
2866 label and that label must be the same as the branch target
2867 label from block 1. We don't care about whether block 2 actually
2868 ends with a branch or a label (an uncond. branch is
2869 conditionalizable). */
2870 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
2871 {
2872 enum rtx_code code;
2873
2874 code = GET_CODE (insn);
2875
2876 /* Look for the label at the start of block 3. */
2877 if (code == CODE_LABEL && CODE_LABEL_NUMBER (insn) == br_lab_num)
2878 break;
2879
2880 /* Skip barriers, notes, and conditionalizable insns. If the
2881 insn is not conditionalizable or makes this optimization fail,
2882 just return the next insn so we can start over from that point. */
2883 if (code != BARRIER && code != NOTE && !is_cond_candidate (insn))
2884 return NEXT_INSN (insn);
2885
2886 /* Remember the last real insn before the label (ie end of block 2). */
2887 if (code == JUMP_INSN || code == INSN)
2888 {
2889 blk_size ++;
2890 end_blk_2_insn = insn;
2891 }
2892 }
2893
2894 if (!insn)
2895 return insn;
2896
2897 /* It is possible for this optimization to slow performance if the blocks
2898 are long. This really depends upon whether the branch is likely taken
2899 or not. If the branch is taken, we slow performance in many cases. But,
2900 if the branch is not taken, we always help performance (for a single
2901 block, but for a double block (i.e. when the optimization is re-applied)
2902 this is not true since the 'right thing' depends on the overall length of
2903 the collapsed block). As a compromise, don't apply this optimization on
2904 blocks larger than size 2 (unlikely for the mcore) when speed is important.
2905 the best threshold depends on the latencies of the instructions (i.e.,
2906 the branch penalty). */
2907 if (optimize > 1 && blk_size > 2)
2908 return insn;
2909
2910 /* At this point, we've found the start of block 3 and we know that
2911 it is the destination of the branch from block 1. Also, all
2912 instructions in the block 2 are conditionalizable. So, apply the
2913 conditionalization and delete the branch. */
2914 start_blk_3_lab = insn;
2915
2916 for (insn = NEXT_INSN (end_blk_1_br); insn != start_blk_3_lab;
2917 insn = NEXT_INSN (insn))
2918 {
2919 rtx newinsn;
2920
2921 if (INSN_DELETED_P (insn))
2922 continue;
2923
2924 /* Try to form a conditional variant of the instruction and emit it. */
2925 if ((newinsn = emit_new_cond_insn (insn, cond)))
2926 {
2927 if (end_blk_2_insn == insn)
2928 end_blk_2_insn = newinsn;
2929
2930 insn = newinsn;
2931 }
2932 }
2933
2934 /* Note whether we will delete the label starting blk 3 when the jump
2935 gets deleted. If so, we want to re-apply this optimization at the
2936 last real instruction right before the label. */
2937 if (LABEL_NUSES (start_blk_3_lab) == 1)
2938 {
2939 start_blk_3_lab = 0;
2940 }
2941
2942 /* ??? we probably should redistribute the death notes for this insn, esp.
2943 the death of cc, but it doesn't really matter this late in the game.
2944 The peepholes all use is_dead() which will find the correct death
2945 regardless of whether there is a note. */
2946 delete_insn (end_blk_1_br);
2947
2948 if (! start_blk_3_lab)
2949 return end_blk_2_insn;
2950
4816b8e4 2951 /* Return the insn right after the label at the start of block 3. */
8f90be4c
NC
2952 return NEXT_INSN (start_blk_3_lab);
2953}
2954
2955/* Apply the conditionalization of blocks optimization. This is the
2956 outer loop that traverses through the insns scanning for a branch
2957 that signifies an opportunity to apply the optimization. Note that
2958 this optimization is applied late. If we could apply it earlier,
2959 say before cse 2, it may expose more optimization opportunities.
2960 but, the pay back probably isn't really worth the effort (we'd have
2961 to update all reg/flow/notes/links/etc to make it work - and stick it
4816b8e4
NC
2962 in before cse 2). */
2963
8f90be4c
NC
2964static void
2965conditionalize_optimization (first)
2966 rtx first;
2967{
2968 rtx insn;
2969
2970 for (insn = first; insn; insn = conditionalize_block (insn))
2971 continue;
2972}
2973
2974static int saved_warn_return_type = -1;
2975static int saved_warn_return_type_count = 0;
2976
4816b8e4
NC
2977/* This function is called from toplev.c before reorg. */
2978
8f90be4c
NC
2979void
2980mcore_dependent_reorg (first)
2981 rtx first;
2982{
2983 /* Reset this variable. */
2984 current_function_anonymous_args = 0;
2985
4816b8e4 2986 /* Restore the warn_return_type if it has been altered. */
8f90be4c
NC
2987 if (saved_warn_return_type != -1)
2988 {
2989 /* Only restore the value if we have reached another function.
2990 The test of warn_return_type occurs in final_function () in
2991 c-decl.c a long time after the code for the function is generated,
2992 so we need a counter to tell us when we have finished parsing that
2993 function and can restore the flag. */
2994 if (--saved_warn_return_type_count == 0)
2995 {
2996 warn_return_type = saved_warn_return_type;
2997 saved_warn_return_type = -1;
2998 }
2999 }
3000
3001 if (optimize == 0)
3002 return;
3003
3004 /* Conditionalize blocks where we can. */
3005 conditionalize_optimization (first);
3006
3007 /* Literal pool generation is now pushed off until the assembler. */
3008}
3009
3010\f
3011/* Return the reg_class to use when reloading the rtx X into the class
3012 CLASS. */
3013
3014/* If the input is (PLUS REG CONSTANT) representing a stack slot address,
3015 then we want to restrict the class to LRW_REGS since that ensures that
3016 will be able to safely load the constant.
3017
3018 If the input is a constant that should be loaded with mvir1, then use
3019 ONLYR1_REGS.
3020
3021 ??? We don't handle the case where we have (PLUS REG CONSTANT) and
3022 the constant should be loaded with mvir1, because that can lead to cases
3023 where an instruction needs two ONLYR1_REGS reloads. */
3024enum reg_class
3025mcore_reload_class (x, class)
3026 rtx x;
3027 enum reg_class class;
3028{
3029 enum reg_class new_class;
3030
3031 if (class == GENERAL_REGS && CONSTANT_P (x)
3032 && (GET_CODE (x) != CONST_INT
3033 || ( ! CONST_OK_FOR_I (INTVAL (x))
3034 && ! CONST_OK_FOR_M (INTVAL (x))
3035 && ! CONST_OK_FOR_N (INTVAL (x)))))
3036 new_class = LRW_REGS;
3037 else
3038 new_class = class;
3039
3040 return new_class;
3041}
3042
3043/* Tell me if a pair of reg/subreg rtx's actually refer to the same
3044 register. Note that the current version doesn't worry about whether
3045 they are the same mode or note (e.g., a QImode in r2 matches an HImode
3046 in r2 matches an SImode in r2. Might think in the future about whether
3047 we want to be able to say something about modes. */
3048int
3049mcore_is_same_reg (x, y)
3050 rtx x;
3051 rtx y;
3052{
3053 /* Strip any and all of the subreg wrappers. */
3054 while (GET_CODE (x) == SUBREG)
3055 x = SUBREG_REG (x);
3056
3057 while (GET_CODE (y) == SUBREG)
3058 y = SUBREG_REG (y);
3059
3060 if (GET_CODE(x) == REG && GET_CODE(y) == REG && REGNO(x) == REGNO(y))
3061 return 1;
3062
3063 return 0;
3064}
3065
3066/* Called to register all of our global variables with the garbage
3067 collector. */
3068static void
3069mcore_add_gc_roots ()
3070{
3071 ggc_add_rtx_root (&arch_compare_op0, 1);
3072 ggc_add_rtx_root (&arch_compare_op1, 1);
3073}
3074
3075void
3076mcore_override_options ()
3077{
3078 if (mcore_stack_increment_string)
3079 {
3080 mcore_stack_increment = atoi (mcore_stack_increment_string);
3081
3082 if (mcore_stack_increment < 0
3083 || (mcore_stack_increment == 0
3084 && (mcore_stack_increment_string[0] != '0'
3085 || mcore_stack_increment_string[1] != 0)))
c725bd79 3086 error ("invalid option `-mstack-increment=%s'",
8f90be4c
NC
3087 mcore_stack_increment_string);
3088 }
3089
3090 /* Only the m340 supports little endian code. */
3091 if (TARGET_LITTLE_END && ! TARGET_M340)
3092 target_flags |= M340_BIT;
3093
3094 mcore_add_gc_roots ();
3095}
3096\f
3097int
3098mcore_must_pass_on_stack (mode, type)
3099 enum machine_mode mode ATTRIBUTE_UNUSED;
3100 tree type;
3101{
3102 if (type == NULL)
3103 return 0;
3104
3105 /* If the argugment can have its address taken, it must
3106 be placed on the stack. */
3107 if (TREE_ADDRESSABLE (type))
3108 return 1;
3109
3110 return 0;
3111}
3112
3113/* Compute the number of word sized registers needed to
3114 hold a function argument of mode MODE and type TYPE. */
3115int
3116mcore_num_arg_regs (mode, type)
3117 enum machine_mode mode;
3118 tree type;
3119{
3120 int size;
3121
3122 if (MUST_PASS_IN_STACK (mode, type))
3123 return 0;
3124
3125 if (type && mode == BLKmode)
3126 size = int_size_in_bytes (type);
3127 else
3128 size = GET_MODE_SIZE (mode);
3129
3130 return ROUND_ADVANCE (size);
3131}
3132
3133static rtx
3134handle_structs_in_regs (mode, type, reg)
3135 enum machine_mode mode;
3136 tree type;
3137 int reg;
3138{
3139 int size;
3140
3141 /* The MCore ABI defines that a structure whoes size is not a whole multiple
3142 of bytes is passed packed into registers (or spilled onto the stack if
3143 not enough registers are available) with the last few bytes of the
3144 structure being packed, left-justified, into the last register/stack slot.
3145 GCC handles this correctly if the last word is in a stack slot, but we
3146 have to generate a special, PARALLEL RTX if the last word is in an
3147 argument register. */
3148 if (type
3149 && TYPE_MODE (type) == BLKmode
3150 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
3151 && (size = int_size_in_bytes (type)) > UNITS_PER_WORD
3152 && (size % UNITS_PER_WORD != 0)
3153 && (reg + mcore_num_arg_regs (mode, type) <= (FIRST_PARM_REG + NPARM_REGS)))
3154 {
3155 rtx arg_regs [NPARM_REGS];
3156 int nregs;
3157 rtx result;
3158 rtvec rtvec;
3159
3160 for (nregs = 0; size > 0; size -= UNITS_PER_WORD)
3161 {
3162 arg_regs [nregs] =
3163 gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, reg ++),
3164 GEN_INT (nregs * UNITS_PER_WORD));
3165 nregs ++;
3166 }
3167
3168 /* We assume here that NPARM_REGS == 6. The assert checks this. */
b6a1cbae 3169 assert (ARRAY_SIZE (arg_regs) == 6);
8f90be4c
NC
3170 rtvec = gen_rtvec (nregs, arg_regs[0], arg_regs[1], arg_regs[2],
3171 arg_regs[3], arg_regs[4], arg_regs[5]);
3172
3173 result = gen_rtx_PARALLEL (mode, rtvec);
3174 return result;
3175 }
3176
3177 return gen_rtx_REG (mode, reg);
3178}
3179
3180rtx
3181mcore_function_value (valtype, func)
3182 tree valtype;
3183 tree func ATTRIBUTE_UNUSED;
3184{
3185 enum machine_mode mode;
3186 int unsigned_p;
3187
3188 mode = TYPE_MODE (valtype);
3189
3190 PROMOTE_MODE (mode, unsigned_p, NULL);
3191
3192 return handle_structs_in_regs (mode, valtype, FIRST_RET_REG);
3193}
3194
3195/* Define where to put the arguments to a function.
3196 Value is zero to push the argument on the stack,
3197 or a hard register in which to store the argument.
3198
3199 MODE is the argument's machine mode.
3200 TYPE is the data type of the argument (as a tree).
3201 This is null for libcalls where that information may
3202 not be available.
3203 CUM is a variable of type CUMULATIVE_ARGS which gives info about
3204 the preceding args and about the function being called.
3205 NAMED is nonzero if this argument is a named parameter
3206 (otherwise it is an extra parameter matching an ellipsis).
3207
3208 On MCore the first args are normally in registers
3209 and the rest are pushed. Any arg that starts within the first
3210 NPARM_REGS words is at least partially passed in a register unless
3211 its data type forbids. */
3212rtx
3213mcore_function_arg (cum, mode, type, named)
3214 CUMULATIVE_ARGS cum;
3215 enum machine_mode mode;
3216 tree type;
3217 int named;
3218{
3219 int arg_reg;
3220
3221 if (! named)
3222 return 0;
3223
3224 if (MUST_PASS_IN_STACK (mode, type))
3225 return 0;
3226
3227 arg_reg = ROUND_REG (cum, mode);
3228
3229 if (arg_reg < NPARM_REGS)
3230 return handle_structs_in_regs (mode, type, FIRST_PARM_REG + arg_reg);
3231
3232 return 0;
3233}
3234
3235/* Implements the FUNCTION_ARG_PARTIAL_NREGS macro.
3236 Returns the number of argument registers required to hold *part* of
3237 a parameter of machine mode MODE and type TYPE (which may be NULL if
3238 the type is not known). If the argument fits entirly in the argument
3239 registers, or entirely on the stack, then 0 is returned. CUM is the
3240 number of argument registers already used by earlier parameters to
3241 the function. */
3242int
3243mcore_function_arg_partial_nregs (cum, mode, type, named)
3244 CUMULATIVE_ARGS cum;
3245 enum machine_mode mode;
3246 tree type;
3247 int named;
3248{
3249 int reg = ROUND_REG (cum, mode);
3250
3251 if (named == 0)
3252 return 0;
3253
3254 if (MUST_PASS_IN_STACK (mode, type))
3255 return 0;
3256
3257 /* REG is not the *hardware* register number of the register that holds
3258 the argument, it is the *argument* register number. So for example,
3259 the first argument to a function goes in argument register 0, which
3260 translates (for the MCore) into hardware register 2. The second
3261 argument goes into argument register 1, which translates into hardware
3262 register 3, and so on. NPARM_REGS is the number of argument registers
3263 supported by the target, not the maximum hardware register number of
3264 the target. */
3265 if (reg >= NPARM_REGS)
3266 return 0;
3267
3268 /* If the argument fits entirely in registers, return 0. */
3269 if (reg + mcore_num_arg_regs (mode, type) <= NPARM_REGS)
3270 return 0;
3271
3272 /* The argument overflows the number of available argument registers.
3273 Compute how many argument registers have not yet been assigned to
3274 hold an argument. */
3275 reg = NPARM_REGS - reg;
3276
3277 /* Return partially in registers and partially on the stack. */
3278 return reg;
3279}
3280\f
3281/* Return non-zero if SYMBOL is marked as being dllexport'd. */
3282int
3283mcore_dllexport_name_p (symbol)
cbd3488b 3284 const char * symbol;
8f90be4c
NC
3285{
3286 return symbol[0] == '@' && symbol[1] == 'e' && symbol[2] == '.';
3287}
3288
3289/* Return non-zero if SYMBOL is marked as being dllimport'd. */
3290int
3291mcore_dllimport_name_p (symbol)
cbd3488b 3292 const char * symbol;
8f90be4c
NC
3293{
3294 return symbol[0] == '@' && symbol[1] == 'i' && symbol[2] == '.';
3295}
3296
3297/* Mark a DECL as being dllexport'd. */
3298static void
3299mcore_mark_dllexport (decl)
3300 tree decl;
3301{
cbd3488b 3302 const char * oldname;
8f90be4c
NC
3303 char * newname;
3304 rtx rtlname;
3305 tree idp;
3306
3307 rtlname = XEXP (DECL_RTL (decl), 0);
3308
3309 if (GET_CODE (rtlname) == SYMBOL_REF)
3310 oldname = XSTR (rtlname, 0);
3311 else if ( GET_CODE (rtlname) == MEM
3312 && GET_CODE (XEXP (rtlname, 0)) == SYMBOL_REF)
3313 oldname = XSTR (XEXP (rtlname, 0), 0);
3314 else
3315 abort ();
3316
3317 if (mcore_dllexport_name_p (oldname))
3318 return; /* Already done. */
3319
3320 newname = alloca (strlen (oldname) + 4);
3321 sprintf (newname, "@e.%s", oldname);
3322
3323 /* We pass newname through get_identifier to ensure it has a unique
3324 address. RTL processing can sometimes peek inside the symbol ref
3325 and compare the string's addresses to see if two symbols are
3326 identical. */
3327 /* ??? At least I think that's why we do this. */
3328 idp = get_identifier (newname);
3329
3330 XEXP (DECL_RTL (decl), 0) =
3331 gen_rtx (SYMBOL_REF, Pmode, IDENTIFIER_POINTER (idp));
3332}
3333
3334/* Mark a DECL as being dllimport'd. */
3335static void
3336mcore_mark_dllimport (decl)
3337 tree decl;
3338{
cbd3488b 3339 const char * oldname;
8f90be4c
NC
3340 char * newname;
3341 tree idp;
3342 rtx rtlname;
3343 rtx newrtl;
3344
3345 rtlname = XEXP (DECL_RTL (decl), 0);
3346
3347 if (GET_CODE (rtlname) == SYMBOL_REF)
3348 oldname = XSTR (rtlname, 0);
3349 else if ( GET_CODE (rtlname) == MEM
3350 && GET_CODE (XEXP (rtlname, 0)) == SYMBOL_REF)
3351 oldname = XSTR (XEXP (rtlname, 0), 0);
3352 else
3353 abort ();
3354
3355 if (mcore_dllexport_name_p (oldname))
3356 abort (); /* This shouldn't happen. */
3357 else if (mcore_dllimport_name_p (oldname))
3358 return; /* Already done. */
3359
3360 /* ??? One can well ask why we're making these checks here,
3361 and that would be a good question. */
3362
3363 /* Imported variables can't be initialized. */
3364 if (TREE_CODE (decl) == VAR_DECL
3365 && !DECL_VIRTUAL_P (decl)
3366 && DECL_INITIAL (decl))
3367 {
3368 error_with_decl (decl, "initialized variable `%s' is marked dllimport");
3369 return;
3370 }
3371
3372 /* `extern' needn't be specified with dllimport.
3373 Specify `extern' now and hope for the best. Sigh. */
3374 if (TREE_CODE (decl) == VAR_DECL
3375 /* ??? Is this test for vtables needed? */
3376 && !DECL_VIRTUAL_P (decl))
3377 {
3378 DECL_EXTERNAL (decl) = 1;
3379 TREE_PUBLIC (decl) = 1;
3380 }
3381
3382 newname = alloca (strlen (oldname) + 11);
3383 sprintf (newname, "@i.__imp_%s", oldname);
3384
3385 /* We pass newname through get_identifier to ensure it has a unique
3386 address. RTL processing can sometimes peek inside the symbol ref
3387 and compare the string's addresses to see if two symbols are
3388 identical. */
3389 /* ??? At least I think that's why we do this. */
3390 idp = get_identifier (newname);
3391
3392 newrtl = gen_rtx (MEM, Pmode,
3393 gen_rtx (SYMBOL_REF, Pmode,
3394 IDENTIFIER_POINTER (idp)));
3395 XEXP (DECL_RTL (decl), 0) = newrtl;
3396}
3397
3398static int
3399mcore_dllexport_p (decl)
3400 tree decl;
3401{
3402 if ( TREE_CODE (decl) != VAR_DECL
3403 && TREE_CODE (decl) != FUNCTION_DECL)
3404 return 0;
3405
91d231cb 3406 return lookup_attribute ("dllexport", DECL_ATTRIBUTES (decl)) != 0;
8f90be4c
NC
3407}
3408
3409static int
3410mcore_dllimport_p (decl)
3411 tree decl;
3412{
3413 if ( TREE_CODE (decl) != VAR_DECL
3414 && TREE_CODE (decl) != FUNCTION_DECL)
3415 return 0;
3416
91d231cb 3417 return lookup_attribute ("dllimport", DECL_ATTRIBUTES (decl)) != 0;
8f90be4c
NC
3418}
3419
3420/* Cover function to implement ENCODE_SECTION_INFO. */
3421void
b2003250 3422mcore_encode_section_info (decl, first)
8f90be4c 3423 tree decl;
b2003250 3424 int first ATTRIBUTE_UNUSED;
8f90be4c
NC
3425{
3426 /* This bit is copied from arm.h. */
3427 if (optimize > 0
3428 && TREE_CONSTANT (decl)
3429 && (!flag_writable_strings || TREE_CODE (decl) != STRING_CST))
3430 {
3431 rtx rtl = (TREE_CODE_CLASS (TREE_CODE (decl)) != 'd'
3432 ? TREE_CST_RTL (decl) : DECL_RTL (decl));
3433 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
3434 }
3435
3436 /* Mark the decl so we can tell from the rtl whether the object is
3437 dllexport'd or dllimport'd. */
3438 if (mcore_dllexport_p (decl))
3439 mcore_mark_dllexport (decl);
3440 else if (mcore_dllimport_p (decl))
3441 mcore_mark_dllimport (decl);
3442
3443 /* It might be that DECL has already been marked as dllimport, but
3444 a subsequent definition nullified that. The attribute is gone
3445 but DECL_RTL still has @i.__imp_foo. We need to remove that. */
3446 else if ((TREE_CODE (decl) == FUNCTION_DECL
3447 || TREE_CODE (decl) == VAR_DECL)
3448 && DECL_RTL (decl) != NULL_RTX
3449 && GET_CODE (DECL_RTL (decl)) == MEM
3450 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == MEM
3451 && GET_CODE (XEXP (XEXP (DECL_RTL (decl), 0), 0)) == SYMBOL_REF
3452 && mcore_dllimport_name_p (XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0)))
3453 {
3cce094d 3454 const char * oldname = XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0);
8f90be4c
NC
3455 tree idp = get_identifier (oldname + 9);
3456 rtx newrtl = gen_rtx (SYMBOL_REF, Pmode, IDENTIFIER_POINTER (idp));
3457
3458 XEXP (DECL_RTL (decl), 0) = newrtl;
3459
3460 /* We previously set TREE_PUBLIC and DECL_EXTERNAL.
3461 ??? We leave these alone for now. */
3462 }
3463}
3464
3465/* MCore specific attribute support.
3466 dllexport - for exporting a function/variable that will live in a dll
3467 dllimport - for importing a function/variable from a dll
3468 naked - do not create a function prologue/epilogue. */
8f90be4c 3469
91d231cb
JM
3470const struct attribute_spec mcore_attribute_table[] =
3471{
3472 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
3473 { "dllexport", 0, 0, true, false, false, NULL },
3474 { "dllimport", 0, 0, true, false, false, NULL },
3475 { "naked", 0, 0, true, false, false, mcore_handle_naked_attribute },
3476 { NULL, 0, 0, false, false, false, NULL }
3477};
8f90be4c 3478
91d231cb
JM
3479/* Handle a "naked" attribute; arguments as in
3480 struct attribute_spec.handler. */
3481static tree
3482mcore_handle_naked_attribute (node, name, args, flags, no_add_attrs)
3483 tree *node;
3484 tree name;
3485 tree args ATTRIBUTE_UNUSED;
3486 int flags ATTRIBUTE_UNUSED;
3487 bool *no_add_attrs;
3488{
3489 if (TREE_CODE (*node) == FUNCTION_DECL)
8f90be4c
NC
3490 {
3491 /* PR14310 - don't complain about lack of return statement
3492 in naked functions. The solution here is a gross hack
3493 but this is the only way to solve the problem without
3494 adding a new feature to GCC. I did try submitting a patch
3495 that would add such a new feature, but it was (rightfully)
3496 rejected on the grounds that it was creeping featurism,
3497 so hence this code. */
3498 if (warn_return_type)
3499 {
3500 saved_warn_return_type = warn_return_type;
3501 warn_return_type = 0;
3502 saved_warn_return_type_count = 2;
3503 }
3504 else if (saved_warn_return_type_count)
3505 saved_warn_return_type_count = 2;
91d231cb
JM
3506 }
3507 else
3508 {
3509 warning ("`%s' attribute only applies to functions",
3510 IDENTIFIER_POINTER (name));
3511 *no_add_attrs = true;
8f90be4c
NC
3512 }
3513
91d231cb 3514 return NULL_TREE;
8f90be4c
NC
3515}
3516
ae46c4e0
RH
3517/* ??? It looks like this is PE specific? Oh well, this is what the
3518 old code did as well. */
8f90be4c 3519
ae46c4e0 3520static void
8f90be4c
NC
3521mcore_unique_section (decl, reloc)
3522 tree decl;
3523 int reloc ATTRIBUTE_UNUSED;
3524{
3525 int len;
0139adca 3526 const char * name;
8f90be4c 3527 char * string;
f27cd94d 3528 const char * prefix;
8f90be4c
NC
3529
3530 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
3531
3532 /* Strip off any encoding in name. */
3533 STRIP_NAME_ENCODING (name, name);
3534
3535 /* The object is put in, for example, section .text$foo.
3536 The linker will then ultimately place them in .text
3537 (everything from the $ on is stripped). */
3538 if (TREE_CODE (decl) == FUNCTION_DECL)
3539 prefix = ".text$";
f710504c 3540 /* For compatibility with EPOC, we ignore the fact that the
8f90be4c
NC
3541 section might have relocs against it. */
3542 else if (DECL_READONLY_SECTION (decl, 0))
3543 prefix = ".rdata$";
3544 else
3545 prefix = ".data$";
3546
3547 len = strlen (name) + strlen (prefix);
3548 string = alloca (len + 1);
3549
3550 sprintf (string, "%s%s", prefix, name);
3551
3552 DECL_SECTION_NAME (decl) = build_string (len, string);
3553}
3554
3555int
3556mcore_naked_function_p ()
3557{
91d231cb 3558 return lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl)) != NULL_TREE;
8f90be4c 3559}
7c262518 3560
ede75ee8 3561#ifdef OBJECT_FORMAT_ELF
7c262518 3562static void
715bdd29 3563mcore_asm_named_section (name, flags)
7c262518
RH
3564 const char *name;
3565 unsigned int flags ATTRIBUTE_UNUSED;
7c262518
RH
3566{
3567 fprintf (asm_out_file, "\t.section %s\n", name);
3568}
ede75ee8 3569#endif /* OBJECT_FORMAT_ELF */