]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/mcore/mcore.c
Makefile.in (libgcc.mk): Pass GCC_FOR_TARGET.
[thirdparty/gcc.git] / gcc / config / mcore / mcore.c
CommitLineData
8f90be4c 1/* Output routines for Motorola MCore processor
09a2b93a
KH
2 Copyright (C) 1993, 1999, 2000, 2001, 2002, 2003, 2004
3 Free Software Foundation, Inc.
8f90be4c 4
08903e08 5 This file is part of GCC.
8f90be4c 6
08903e08
SB
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 2, or (at your
10 option) any later version.
8f90be4c 11
08903e08
SB
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
8f90be4c 16
08903e08
SB
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
8f90be4c 21
bc27e96c 22#include "config.h"
4bd048ef 23#include "system.h"
4977bab6
ZW
24#include "coretypes.h"
25#include "tm.h"
4816b8e4
NC
26#include "rtl.h"
27#include "tree.h"
28#include "tm_p.h"
8f90be4c 29#include "assert.h"
8f90be4c 30#include "mcore.h"
8f90be4c
NC
31#include "regs.h"
32#include "hard-reg-set.h"
33#include "real.h"
34#include "insn-config.h"
35#include "conditions.h"
8f90be4c
NC
36#include "output.h"
37#include "insn-attr.h"
38#include "flags.h"
39#include "obstack.h"
40#include "expr.h"
41#include "reload.h"
42#include "recog.h"
43#include "function.h"
44#include "ggc.h"
45#include "toplev.h"
672a6f42
NB
46#include "target.h"
47#include "target-def.h"
8f90be4c 48
8f90be4c
NC
49/* Maximum size we are allowed to grow the stack in a single operation.
50 If we want more, we must do it in increments of at most this size.
51 If this value is 0, we don't check at all. */
78fb8038 52int mcore_stack_increment = STACK_UNITS_MAXSTEP;
8f90be4c
NC
53
54/* For dumping information about frame sizes. */
55char * mcore_current_function_name = 0;
56long mcore_current_compilation_timestamp = 0;
57
58/* Global variables for machine-dependent things. */
59
60/* Saved operands from the last compare to use when we generate an scc
61 or bcc insn. */
62rtx arch_compare_op0;
63rtx arch_compare_op1;
64
65/* Provides the class number of the smallest class containing
66 reg number. */
0139adca 67const int regno_reg_class[FIRST_PSEUDO_REGISTER] =
8f90be4c
NC
68{
69 GENERAL_REGS, ONLYR1_REGS, LRW_REGS, LRW_REGS,
70 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
71 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
72 LRW_REGS, LRW_REGS, LRW_REGS, GENERAL_REGS,
73 GENERAL_REGS, C_REGS, NO_REGS, NO_REGS,
74};
75
76/* Provide reg_class from a letter such as appears in the machine
77 description. */
0b5826ac 78const enum reg_class reg_class_from_letter[] =
8f90be4c
NC
79{
80 /* a */ LRW_REGS, /* b */ ONLYR1_REGS, /* c */ C_REGS, /* d */ NO_REGS,
81 /* e */ NO_REGS, /* f */ NO_REGS, /* g */ NO_REGS, /* h */ NO_REGS,
82 /* i */ NO_REGS, /* j */ NO_REGS, /* k */ NO_REGS, /* l */ NO_REGS,
83 /* m */ NO_REGS, /* n */ NO_REGS, /* o */ NO_REGS, /* p */ NO_REGS,
84 /* q */ NO_REGS, /* r */ GENERAL_REGS, /* s */ NO_REGS, /* t */ NO_REGS,
85 /* u */ NO_REGS, /* v */ NO_REGS, /* w */ NO_REGS, /* x */ ALL_REGS,
86 /* y */ NO_REGS, /* z */ NO_REGS
87};
88
f27cd94d
NC
89struct mcore_frame
90{
08903e08
SB
91 int arg_size; /* Stdarg spills (bytes). */
92 int reg_size; /* Non-volatile reg saves (bytes). */
93 int reg_mask; /* Non-volatile reg saves. */
94 int local_size; /* Locals. */
95 int outbound_size; /* Arg overflow on calls out. */
f27cd94d
NC
96 int pad_outbound;
97 int pad_local;
98 int pad_reg;
99 /* Describe the steps we'll use to grow it. */
08903e08 100#define MAX_STACK_GROWS 4 /* Gives us some spare space. */
f27cd94d
NC
101 int growth[MAX_STACK_GROWS];
102 int arg_offset;
103 int reg_offset;
104 int reg_growth;
105 int local_growth;
106};
107
108typedef enum
109{
110 COND_NO,
111 COND_MOV_INSN,
112 COND_CLR_INSN,
113 COND_INC_INSN,
114 COND_DEC_INSN,
115 COND_BRANCH_INSN
116}
117cond_type;
118
08903e08
SB
119static void output_stack_adjust (int, int);
120static int calc_live_regs (int *);
08903e08
SB
121static int try_constant_tricks (long, int *, int *);
122static const char * output_inline_const (enum machine_mode, rtx *);
08903e08 123static void layout_mcore_frame (struct mcore_frame *);
09a2b93a 124static void mcore_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode, tree, int *, int);
08903e08
SB
125static cond_type is_cond_candidate (rtx);
126static rtx emit_new_cond_insn (rtx, int);
127static rtx conditionalize_block (rtx);
128static void conditionalize_optimization (void);
129static void mcore_reorg (void);
130static rtx handle_structs_in_regs (enum machine_mode, tree, int);
131static void mcore_mark_dllexport (tree);
132static void mcore_mark_dllimport (tree);
133static int mcore_dllexport_p (tree);
134static int mcore_dllimport_p (tree);
91d231cb 135const struct attribute_spec mcore_attribute_table[];
08903e08 136static tree mcore_handle_naked_attribute (tree *, tree, tree, int, bool *);
ede75ee8 137#ifdef OBJECT_FORMAT_ELF
08903e08 138static void mcore_asm_named_section (const char *,
c18a5b6c 139 unsigned int, tree);
ede75ee8 140#endif
08903e08
SB
141static void mcore_unique_section (tree, int);
142static void mcore_encode_section_info (tree, rtx, int);
143static const char *mcore_strip_name_encoding (const char *);
144static int mcore_const_costs (rtx, RTX_CODE);
145static int mcore_and_cost (rtx);
146static int mcore_ior_cost (rtx);
147static bool mcore_rtx_costs (rtx, int, int, int *);
09a2b93a
KH
148static void mcore_external_libcall (rtx);
149static bool mcore_return_in_memory (tree, tree);
78a52f11
RH
150static int mcore_arg_partial_bytes (CUMULATIVE_ARGS *,
151 enum machine_mode,
152 tree, bool);
09a2b93a 153
672a6f42
NB
154\f
155/* Initialize the GCC target structure. */
09a2b93a
KH
156#undef TARGET_ASM_EXTERNAL_LIBCALL
157#define TARGET_ASM_EXTERNAL_LIBCALL mcore_external_libcall
158
b2ca3702 159#if TARGET_DLLIMPORT_DECL_ATTRIBUTES
08903e08
SB
160#undef TARGET_MERGE_DECL_ATTRIBUTES
161#define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
672a6f42
NB
162#endif
163
301d03af 164#ifdef OBJECT_FORMAT_ELF
08903e08 165#undef TARGET_ASM_UNALIGNED_HI_OP
301d03af 166#define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
08903e08 167#undef TARGET_ASM_UNALIGNED_SI_OP
301d03af
RS
168#define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
169#endif
170
08903e08
SB
171#undef TARGET_ATTRIBUTE_TABLE
172#define TARGET_ATTRIBUTE_TABLE mcore_attribute_table
173#undef TARGET_ASM_UNIQUE_SECTION
174#define TARGET_ASM_UNIQUE_SECTION mcore_unique_section
ab5c8549
JJ
175#undef TARGET_ASM_FUNCTION_RODATA_SECTION
176#define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
78fb8038
RS
177#undef TARGET_DEFAULT_TARGET_FLAGS
178#define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
08903e08
SB
179#undef TARGET_ENCODE_SECTION_INFO
180#define TARGET_ENCODE_SECTION_INFO mcore_encode_section_info
181#undef TARGET_STRIP_NAME_ENCODING
182#define TARGET_STRIP_NAME_ENCODING mcore_strip_name_encoding
183#undef TARGET_RTX_COSTS
184#define TARGET_RTX_COSTS mcore_rtx_costs
185#undef TARGET_ADDRESS_COST
186#define TARGET_ADDRESS_COST hook_int_rtx_0
187#undef TARGET_MACHINE_DEPENDENT_REORG
188#define TARGET_MACHINE_DEPENDENT_REORG mcore_reorg
18dbd950 189
09a2b93a
KH
190#undef TARGET_PROMOTE_FUNCTION_ARGS
191#define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
192#undef TARGET_PROMOTE_FUNCTION_RETURN
193#define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
194#undef TARGET_PROMOTE_PROTOTYPES
195#define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
196
09a2b93a
KH
197#undef TARGET_RETURN_IN_MEMORY
198#define TARGET_RETURN_IN_MEMORY mcore_return_in_memory
fe984136
RH
199#undef TARGET_MUST_PASS_IN_STACK
200#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
8cd5a4e0
RH
201#undef TARGET_PASS_BY_REFERENCE
202#define TARGET_PASS_BY_REFERENCE hook_pass_by_reference_must_pass_in_stack
78a52f11
RH
203#undef TARGET_ARG_PARTIAL_BYTES
204#define TARGET_ARG_PARTIAL_BYTES mcore_arg_partial_bytes
09a2b93a
KH
205
206#undef TARGET_SETUP_INCOMING_VARARGS
207#define TARGET_SETUP_INCOMING_VARARGS mcore_setup_incoming_varargs
208
f6897b10 209struct gcc_target targetm = TARGET_INITIALIZER;
f27cd94d 210\f
8f90be4c
NC
211/* Adjust the stack and return the number of bytes taken to do it. */
212static void
08903e08 213output_stack_adjust (int direction, int size)
8f90be4c 214{
4816b8e4 215 /* If extending stack a lot, we do it incrementally. */
8f90be4c
NC
216 if (direction < 0 && size > mcore_stack_increment && mcore_stack_increment > 0)
217 {
f1c25d3b 218 rtx tmp = gen_rtx_REG (SImode, 1);
8f90be4c 219 rtx memref;
08903e08 220
8f90be4c
NC
221 emit_insn (gen_movsi (tmp, GEN_INT (mcore_stack_increment)));
222 do
223 {
224 emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
f1c25d3b 225 memref = gen_rtx_MEM (SImode, stack_pointer_rtx);
8f90be4c
NC
226 MEM_VOLATILE_P (memref) = 1;
227 emit_insn (gen_movsi (memref, stack_pointer_rtx));
228 size -= mcore_stack_increment;
229 }
230 while (size > mcore_stack_increment);
231
4816b8e4
NC
232 /* SIZE is now the residual for the last adjustment,
233 which doesn't require a probe. */
8f90be4c
NC
234 }
235
236 if (size)
237 {
238 rtx insn;
239 rtx val = GEN_INT (size);
240
241 if (size > 32)
242 {
f1c25d3b 243 rtx nval = gen_rtx_REG (SImode, 1);
8f90be4c
NC
244 emit_insn (gen_movsi (nval, val));
245 val = nval;
246 }
247
248 if (direction > 0)
249 insn = gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
250 else
251 insn = gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
252
253 emit_insn (insn);
254 }
255}
256
4816b8e4
NC
257/* Work out the registers which need to be saved,
258 both as a mask and a count. */
259
8f90be4c 260static int
08903e08 261calc_live_regs (int * count)
8f90be4c
NC
262{
263 int reg;
264 int live_regs_mask = 0;
265
266 * count = 0;
267
268 for (reg = 0; reg < FIRST_PSEUDO_REGISTER; reg++)
269 {
270 if (regs_ever_live[reg] && !call_used_regs[reg])
271 {
272 (*count)++;
273 live_regs_mask |= (1 << reg);
274 }
275 }
276
277 return live_regs_mask;
278}
279
280/* Print the operand address in x to the stream. */
4816b8e4 281
8f90be4c 282void
08903e08 283mcore_print_operand_address (FILE * stream, rtx x)
8f90be4c
NC
284{
285 switch (GET_CODE (x))
286 {
287 case REG:
288 fprintf (stream, "(%s)", reg_names[REGNO (x)]);
289 break;
290
291 case PLUS:
292 {
293 rtx base = XEXP (x, 0);
294 rtx index = XEXP (x, 1);
295
296 if (GET_CODE (base) != REG)
297 {
298 /* Ensure that BASE is a register (one of them must be). */
299 rtx temp = base;
300 base = index;
301 index = temp;
302 }
303
304 switch (GET_CODE (index))
305 {
306 case CONST_INT:
fd7b8952
KG
307 fprintf (stream, "(%s," HOST_WIDE_INT_PRINT_DEC ")",
308 reg_names[REGNO(base)], INTVAL (index));
8f90be4c
NC
309 break;
310
311 default:
312 debug_rtx (x);
313
314 abort ();
315 }
316 }
317
318 break;
319
320 default:
321 output_addr_const (stream, x);
322 break;
323 }
324}
325
326/* Print operand x (an rtx) in assembler syntax to file stream
327 according to modifier code.
328
112cdef5 329 'R' print the next register or memory location along, i.e. the lsw in
8f90be4c
NC
330 a double word value
331 'O' print a constant without the #
332 'M' print a constant as its negative
333 'P' print log2 of a power of two
334 'Q' print log2 of an inverse of a power of two
335 'U' print register for ldm/stm instruction
4816b8e4
NC
336 'X' print byte number for xtrbN instruction. */
337
8f90be4c 338void
08903e08 339mcore_print_operand (FILE * stream, rtx x, int code)
8f90be4c
NC
340{
341 switch (code)
342 {
343 case 'N':
344 if (INTVAL(x) == -1)
345 fprintf (asm_out_file, "32");
346 else
347 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) + 1));
348 break;
349 case 'P':
350 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x)));
351 break;
352 case 'Q':
353 fprintf (asm_out_file, "%d", exact_log2 (~INTVAL (x)));
354 break;
355 case 'O':
fd7b8952 356 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
8f90be4c
NC
357 break;
358 case 'M':
fd7b8952 359 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, - INTVAL (x));
8f90be4c
NC
360 break;
361 case 'R':
362 /* Next location along in memory or register. */
363 switch (GET_CODE (x))
364 {
365 case REG:
366 fputs (reg_names[REGNO (x) + 1], (stream));
367 break;
368 case MEM:
b72f00af
RK
369 mcore_print_operand_address
370 (stream, XEXP (adjust_address (x, SImode, 4), 0));
8f90be4c
NC
371 break;
372 default:
373 abort ();
374 }
375 break;
376 case 'U':
377 fprintf (asm_out_file, "%s-%s", reg_names[REGNO (x)],
378 reg_names[REGNO (x) + 3]);
379 break;
380 case 'x':
fd7b8952 381 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
8f90be4c
NC
382 break;
383 case 'X':
fd7b8952 384 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, 3 - INTVAL (x) / 8);
8f90be4c
NC
385 break;
386
387 default:
388 switch (GET_CODE (x))
389 {
390 case REG:
391 fputs (reg_names[REGNO (x)], (stream));
392 break;
393 case MEM:
394 output_address (XEXP (x, 0));
395 break;
396 default:
397 output_addr_const (stream, x);
398 break;
399 }
400 break;
401 }
402}
403
404/* What does a constant cost ? */
4816b8e4 405
3c50106f 406static int
08903e08 407mcore_const_costs (rtx exp, enum rtx_code code)
8f90be4c 408{
8f90be4c
NC
409 int val = INTVAL (exp);
410
411 /* Easy constants. */
412 if ( CONST_OK_FOR_I (val)
413 || CONST_OK_FOR_M (val)
414 || CONST_OK_FOR_N (val)
415 || (code == PLUS && CONST_OK_FOR_L (val)))
416 return 1;
417 else if (code == AND
418 && ( CONST_OK_FOR_M (~val)
419 || CONST_OK_FOR_N (~val)))
420 return 2;
421 else if (code == PLUS
422 && ( CONST_OK_FOR_I (-val)
423 || CONST_OK_FOR_M (-val)
424 || CONST_OK_FOR_N (-val)))
425 return 2;
426
427 return 5;
428}
429
430/* What does an and instruction cost - we do this b/c immediates may
431 have been relaxed. We want to ensure that cse will cse relaxed immeds
4816b8e4
NC
432 out. Otherwise we'll get bad code (multiple reloads of the same const). */
433
3c50106f 434static int
08903e08 435mcore_and_cost (rtx x)
8f90be4c
NC
436{
437 int val;
438
439 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
440 return 2;
441
442 val = INTVAL (XEXP (x, 1));
443
4816b8e4 444 /* Do it directly. */
8f90be4c
NC
445 if (CONST_OK_FOR_K (val) || CONST_OK_FOR_M (~val))
446 return 2;
447 /* Takes one instruction to load. */
448 else if (const_ok_for_mcore (val))
449 return 3;
450 /* Takes two instructions to load. */
451 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
452 return 4;
453
4816b8e4 454 /* Takes a lrw to load. */
8f90be4c
NC
455 return 5;
456}
457
4816b8e4
NC
458/* What does an or cost - see and_cost(). */
459
3c50106f 460static int
08903e08 461mcore_ior_cost (rtx x)
8f90be4c
NC
462{
463 int val;
464
465 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
466 return 2;
467
468 val = INTVAL (XEXP (x, 1));
469
4816b8e4 470 /* Do it directly with bclri. */
8f90be4c
NC
471 if (CONST_OK_FOR_M (val))
472 return 2;
4816b8e4 473 /* Takes one instruction to load. */
8f90be4c
NC
474 else if (const_ok_for_mcore (val))
475 return 3;
4816b8e4 476 /* Takes two instructions to load. */
8f90be4c
NC
477 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
478 return 4;
479
4816b8e4 480 /* Takes a lrw to load. */
8f90be4c
NC
481 return 5;
482}
483
3c50106f 484static bool
08903e08 485mcore_rtx_costs (rtx x, int code, int outer_code, int * total)
3c50106f
RH
486{
487 switch (code)
488 {
489 case CONST_INT:
490 *total = mcore_const_costs (x, outer_code);
491 return true;
492 case CONST:
493 case LABEL_REF:
494 case SYMBOL_REF:
495 *total = 5;
496 return true;
497 case CONST_DOUBLE:
498 *total = 10;
499 return true;
500
501 case AND:
502 *total = COSTS_N_INSNS (mcore_and_cost (x));
503 return true;
504
505 case IOR:
506 *total = COSTS_N_INSNS (mcore_ior_cost (x));
507 return true;
508
509 case DIV:
510 case UDIV:
511 case MOD:
512 case UMOD:
513 case FLOAT:
514 case FIX:
515 *total = COSTS_N_INSNS (100);
516 return true;
517
518 default:
519 return false;
520 }
521}
522
8f90be4c
NC
523/* Check to see if a comparison against a constant can be made more efficient
524 by incrementing/decrementing the constant to get one that is more efficient
525 to load. */
4816b8e4 526
8f90be4c 527int
08903e08 528mcore_modify_comparison (enum rtx_code code)
8f90be4c 529{
08903e08 530 rtx op1 = arch_compare_op1;
8f90be4c
NC
531
532 if (GET_CODE (op1) == CONST_INT)
533 {
534 int val = INTVAL (op1);
535
536 switch (code)
537 {
538 case LE:
539 if (CONST_OK_FOR_J (val + 1))
540 {
541 arch_compare_op1 = GEN_INT (val + 1);
542 return 1;
543 }
544 break;
545
546 default:
547 break;
548 }
549 }
550
551 return 0;
552}
553
554/* Prepare the operands for a comparison. */
4816b8e4 555
8f90be4c 556rtx
08903e08 557mcore_gen_compare_reg (enum rtx_code code)
8f90be4c
NC
558{
559 rtx op0 = arch_compare_op0;
560 rtx op1 = arch_compare_op1;
f1c25d3b 561 rtx cc_reg = gen_rtx_REG (CCmode, CC_REG);
8f90be4c
NC
562
563 if (CONSTANT_P (op1) && GET_CODE (op1) != CONST_INT)
564 op1 = force_reg (SImode, op1);
565
566 /* cmpnei: 0-31 (K immediate)
4816b8e4 567 cmplti: 1-32 (J immediate, 0 using btsti x,31). */
8f90be4c
NC
568 switch (code)
569 {
4816b8e4 570 case EQ: /* Use inverted condition, cmpne. */
8f90be4c 571 code = NE;
08903e08 572 /* Drop through. */
4816b8e4
NC
573
574 case NE: /* Use normal condition, cmpne. */
8f90be4c
NC
575 if (GET_CODE (op1) == CONST_INT && ! CONST_OK_FOR_K (INTVAL (op1)))
576 op1 = force_reg (SImode, op1);
577 break;
578
4816b8e4 579 case LE: /* Use inverted condition, reversed cmplt. */
8f90be4c 580 code = GT;
08903e08 581 /* Drop through. */
4816b8e4
NC
582
583 case GT: /* Use normal condition, reversed cmplt. */
8f90be4c
NC
584 if (GET_CODE (op1) == CONST_INT)
585 op1 = force_reg (SImode, op1);
586 break;
587
4816b8e4 588 case GE: /* Use inverted condition, cmplt. */
8f90be4c 589 code = LT;
08903e08 590 /* Drop through. */
4816b8e4
NC
591
592 case LT: /* Use normal condition, cmplt. */
8f90be4c 593 if (GET_CODE (op1) == CONST_INT &&
08903e08 594 /* covered by btsti x,31. */
8f90be4c
NC
595 INTVAL (op1) != 0 &&
596 ! CONST_OK_FOR_J (INTVAL (op1)))
597 op1 = force_reg (SImode, op1);
598 break;
599
4816b8e4 600 case GTU: /* Use inverted condition, cmple. */
8f90be4c
NC
601 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) == 0)
602 {
603 /* Unsigned > 0 is the same as != 0, but we need
604 to invert the condition, so we want to set
605 code = EQ. This cannot be done however, as the
606 mcore does not support such a test. Instead we
607 cope with this case in the "bgtu" pattern itself
608 so we should never reach this point. */
609 /* code = EQ; */
610 abort ();
611 break;
612 }
613 code = LEU;
08903e08 614 /* Drop through. */
4816b8e4 615
14bc6742 616 case LEU: /* Use normal condition, reversed cmphs. */
8f90be4c
NC
617 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
618 op1 = force_reg (SImode, op1);
619 break;
620
4816b8e4 621 case LTU: /* Use inverted condition, cmphs. */
8f90be4c 622 code = GEU;
08903e08 623 /* Drop through. */
4816b8e4
NC
624
625 case GEU: /* Use normal condition, cmphs. */
8f90be4c
NC
626 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
627 op1 = force_reg (SImode, op1);
628 break;
629
630 default:
631 break;
632 }
633
1c563bed 634 emit_insn (gen_rtx_SET (VOIDmode, cc_reg, gen_rtx_fmt_ee (code, CCmode, op0, op1)));
8f90be4c
NC
635
636 return cc_reg;
637}
638
8f90be4c 639int
08903e08 640mcore_symbolic_address_p (rtx x)
8f90be4c
NC
641{
642 switch (GET_CODE (x))
643 {
644 case SYMBOL_REF:
645 case LABEL_REF:
646 return 1;
647 case CONST:
648 x = XEXP (x, 0);
649 return ( (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
650 || GET_CODE (XEXP (x, 0)) == LABEL_REF)
651 && GET_CODE (XEXP (x, 1)) == CONST_INT);
652 default:
653 return 0;
654 }
655}
656
8f90be4c 657/* Functions to output assembly code for a function call. */
f27cd94d 658
8f90be4c 659char *
08903e08 660mcore_output_call (rtx operands[], int index)
8f90be4c
NC
661{
662 static char buffer[20];
663 rtx addr = operands [index];
664
665 if (REG_P (addr))
666 {
667 if (TARGET_CG_DATA)
668 {
669 if (mcore_current_function_name == 0)
670 abort ();
671
672 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
673 "unknown", 1);
674 }
675
676 sprintf (buffer, "jsr\t%%%d", index);
677 }
678 else
679 {
680 if (TARGET_CG_DATA)
681 {
682 if (mcore_current_function_name == 0)
683 abort ();
684
685 if (GET_CODE (addr) != SYMBOL_REF)
686 abort ();
687
688 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, XSTR (addr, 0), 0);
689 }
690
691 sprintf (buffer, "jbsr\t%%%d", index);
692 }
693
694 return buffer;
695}
696
697/* Can we load a constant with a single instruction ? */
4816b8e4 698
54d58eaf 699int
08903e08 700const_ok_for_mcore (int value)
8f90be4c
NC
701{
702 if (value >= 0 && value <= 127)
703 return 1;
704
705 /* Try exact power of two. */
706 if ((value & (value - 1)) == 0)
707 return 1;
708
14bc6742 709 /* Try exact power of two - 1. */
8f90be4c
NC
710 if ((value & (value + 1)) == 0)
711 return 1;
712
713 return 0;
714}
715
716/* Can we load a constant inline with up to 2 instructions ? */
4816b8e4 717
8f90be4c 718int
08903e08 719mcore_const_ok_for_inline (long value)
8f90be4c
NC
720{
721 int x, y;
722
723 return try_constant_tricks (value, & x, & y) > 0;
724}
725
726/* Are we loading the constant using a not ? */
4816b8e4 727
8f90be4c 728int
08903e08 729mcore_const_trick_uses_not (long value)
8f90be4c
NC
730{
731 int x, y;
732
733 return try_constant_tricks (value, & x, & y) == 2;
734}
735
736/* Try tricks to load a constant inline and return the trick number if
737 success (0 is non-inlinable).
4816b8e4
NC
738
739 0: not inlinable
740 1: single instruction (do the usual thing)
741 2: single insn followed by a 'not'
742 3: single insn followed by a subi
743 4: single insn followed by an addi
744 5: single insn followed by rsubi
745 6: single insn followed by bseti
746 7: single insn followed by bclri
747 8: single insn followed by rotli
748 9: single insn followed by lsli
749 10: single insn followed by ixh
750 11: single insn followed by ixw. */
8f90be4c
NC
751
752static int
08903e08 753try_constant_tricks (long value, int * x, int * y)
8f90be4c
NC
754{
755 int i;
756 unsigned bit, shf, rot;
757
758 if (const_ok_for_mcore (value))
4816b8e4 759 return 1; /* Do the usual thing. */
8f90be4c
NC
760
761 if (TARGET_HARDLIT)
762 {
763 if (const_ok_for_mcore (~value))
764 {
765 *x = ~value;
766 return 2;
767 }
768
769 for (i = 1; i <= 32; i++)
770 {
771 if (const_ok_for_mcore (value - i))
772 {
773 *x = value - i;
774 *y = i;
775
776 return 3;
777 }
778
779 if (const_ok_for_mcore (value + i))
780 {
781 *x = value + i;
782 *y = i;
783
784 return 4;
785 }
786 }
787
11f9ed1a 788 bit = 0x80000000L;
8f90be4c
NC
789
790 for (i = 0; i <= 31; i++)
791 {
792 if (const_ok_for_mcore (i - value))
793 {
794 *x = i - value;
795 *y = i;
796
797 return 5;
798 }
799
800 if (const_ok_for_mcore (value & ~bit))
801 {
802 *y = bit;
803 *x = value & ~bit;
804
805 return 6;
806 }
807
808 if (const_ok_for_mcore (value | bit))
809 {
810 *y = ~bit;
811 *x = value | bit;
812
813 return 7;
814 }
815
816 bit >>= 1;
817 }
818
819 shf = value;
820 rot = value;
821
822 for (i = 1; i < 31; i++)
823 {
824 int c;
825
826 /* MCore has rotate left. */
827 c = rot << 31;
828 rot >>= 1;
829 rot &= 0x7FFFFFFF;
830 rot |= c; /* Simulate rotate. */
831
832 if (const_ok_for_mcore (rot))
833 {
834 *y = i;
835 *x = rot;
836
837 return 8;
838 }
839
840 if (shf & 1)
4816b8e4 841 shf = 0; /* Can't use logical shift, low order bit is one. */
8f90be4c
NC
842
843 shf >>= 1;
844
845 if (shf != 0 && const_ok_for_mcore (shf))
846 {
847 *y = i;
848 *x = shf;
849
850 return 9;
851 }
852 }
853
854 if ((value % 3) == 0 && const_ok_for_mcore (value / 3))
855 {
856 *x = value / 3;
857
858 return 10;
859 }
860
861 if ((value % 5) == 0 && const_ok_for_mcore (value / 5))
862 {
863 *x = value / 5;
864
865 return 11;
866 }
867 }
868
869 return 0;
870}
871
8f90be4c
NC
872/* Check whether reg is dead at first. This is done by searching ahead
873 for either the next use (i.e., reg is live), a death note, or a set of
874 reg. Don't just use dead_or_set_p() since reload does not always mark
875 deaths (especially if PRESERVE_DEATH_NOTES_REGNO_P is not defined). We
4816b8e4
NC
876 can ignore subregs by extracting the actual register. BRC */
877
8f90be4c 878int
08903e08 879mcore_is_dead (rtx first, rtx reg)
8f90be4c
NC
880{
881 rtx insn;
882
883 /* For mcore, subregs can't live independently of their parent regs. */
884 if (GET_CODE (reg) == SUBREG)
885 reg = SUBREG_REG (reg);
886
887 /* Dies immediately. */
888 if (dead_or_set_p (first, reg))
889 return 1;
890
891 /* Look for conclusive evidence of live/death, otherwise we have
892 to assume that it is live. */
893 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
894 {
895 if (GET_CODE (insn) == JUMP_INSN)
896 return 0; /* We lose track, assume it is alive. */
897
898 else if (GET_CODE(insn) == CALL_INSN)
899 {
900 /* Call's might use it for target or register parms. */
901 if (reg_referenced_p (reg, PATTERN (insn))
902 || find_reg_fusage (insn, USE, reg))
903 return 0;
904 else if (dead_or_set_p (insn, reg))
905 return 1;
906 }
907 else if (GET_CODE (insn) == INSN)
908 {
909 if (reg_referenced_p (reg, PATTERN (insn)))
910 return 0;
911 else if (dead_or_set_p (insn, reg))
912 return 1;
913 }
914 }
915
1e5f1716 916 /* No conclusive evidence either way, we cannot take the chance
8f90be4c
NC
917 that control flow hid the use from us -- "I'm not dead yet". */
918 return 0;
919}
920
8f90be4c 921/* Count the number of ones in mask. */
4816b8e4 922
8f90be4c 923int
08903e08 924mcore_num_ones (int mask)
8f90be4c 925{
4816b8e4 926 /* A trick to count set bits recently posted on comp.compilers. */
8f90be4c
NC
927 mask = (mask >> 1 & 0x55555555) + (mask & 0x55555555);
928 mask = ((mask >> 2) & 0x33333333) + (mask & 0x33333333);
929 mask = ((mask >> 4) + mask) & 0x0f0f0f0f;
930 mask = ((mask >> 8) + mask);
931
932 return (mask + (mask >> 16)) & 0xff;
933}
934
4816b8e4
NC
935/* Count the number of zeros in mask. */
936
8f90be4c 937int
08903e08 938mcore_num_zeros (int mask)
8f90be4c
NC
939{
940 return 32 - mcore_num_ones (mask);
941}
942
943/* Determine byte being masked. */
4816b8e4 944
8f90be4c 945int
08903e08 946mcore_byte_offset (unsigned int mask)
8f90be4c 947{
11f9ed1a 948 if (mask == 0x00ffffffL)
8f90be4c 949 return 0;
11f9ed1a 950 else if (mask == 0xff00ffffL)
8f90be4c 951 return 1;
11f9ed1a 952 else if (mask == 0xffff00ffL)
8f90be4c 953 return 2;
11f9ed1a 954 else if (mask == 0xffffff00L)
8f90be4c
NC
955 return 3;
956
957 return -1;
958}
959
960/* Determine halfword being masked. */
4816b8e4 961
8f90be4c 962int
08903e08 963mcore_halfword_offset (unsigned int mask)
8f90be4c
NC
964{
965 if (mask == 0x0000ffffL)
966 return 0;
11f9ed1a 967 else if (mask == 0xffff0000L)
8f90be4c
NC
968 return 1;
969
970 return -1;
971}
972
973/* Output a series of bseti's corresponding to mask. */
4816b8e4 974
f27cd94d 975const char *
08903e08 976mcore_output_bseti (rtx dst, int mask)
8f90be4c
NC
977{
978 rtx out_operands[2];
979 int bit;
980
981 out_operands[0] = dst;
982
983 for (bit = 0; bit < 32; bit++)
984 {
985 if ((mask & 0x1) == 0x1)
986 {
987 out_operands[1] = GEN_INT (bit);
988
989 output_asm_insn ("bseti\t%0,%1", out_operands);
990 }
991 mask >>= 1;
992 }
993
994 return "";
995}
996
997/* Output a series of bclri's corresponding to mask. */
4816b8e4 998
f27cd94d 999const char *
08903e08 1000mcore_output_bclri (rtx dst, int mask)
8f90be4c
NC
1001{
1002 rtx out_operands[2];
1003 int bit;
1004
1005 out_operands[0] = dst;
1006
1007 for (bit = 0; bit < 32; bit++)
1008 {
1009 if ((mask & 0x1) == 0x0)
1010 {
1011 out_operands[1] = GEN_INT (bit);
1012
1013 output_asm_insn ("bclri\t%0,%1", out_operands);
1014 }
1015
1016 mask >>= 1;
1017 }
1018
1019 return "";
1020}
1021
1022/* Output a conditional move of two constants that are +/- 1 within each
1023 other. See the "movtK" patterns in mcore.md. I'm not sure this is
1024 really worth the effort. */
4816b8e4 1025
f27cd94d 1026const char *
08903e08 1027mcore_output_cmov (rtx operands[], int cmp_t, const char * test)
8f90be4c
NC
1028{
1029 int load_value;
1030 int adjust_value;
1031 rtx out_operands[4];
1032
1033 out_operands[0] = operands[0];
1034
4816b8e4 1035 /* Check to see which constant is loadable. */
8f90be4c
NC
1036 if (const_ok_for_mcore (INTVAL (operands[1])))
1037 {
1038 out_operands[1] = operands[1];
1039 out_operands[2] = operands[2];
1040 }
1041 else if (const_ok_for_mcore (INTVAL (operands[2])))
1042 {
1043 out_operands[1] = operands[2];
1044 out_operands[2] = operands[1];
1045
4816b8e4 1046 /* Complement test since constants are swapped. */
8f90be4c
NC
1047 cmp_t = (cmp_t == 0);
1048 }
1049 load_value = INTVAL (out_operands[1]);
1050 adjust_value = INTVAL (out_operands[2]);
1051
4816b8e4 1052 /* First output the test if folded into the pattern. */
8f90be4c
NC
1053
1054 if (test)
1055 output_asm_insn (test, operands);
1056
4816b8e4 1057 /* Load the constant - for now, only support constants that can be
8f90be4c
NC
1058 generated with a single instruction. maybe add general inlinable
1059 constants later (this will increase the # of patterns since the
4816b8e4 1060 instruction sequence has a different length attribute). */
8f90be4c
NC
1061 if (load_value >= 0 && load_value <= 127)
1062 output_asm_insn ("movi\t%0,%1", out_operands);
1063 else if ((load_value & (load_value - 1)) == 0)
1064 output_asm_insn ("bgeni\t%0,%P1", out_operands);
1065 else if ((load_value & (load_value + 1)) == 0)
1066 output_asm_insn ("bmaski\t%0,%N1", out_operands);
1067
4816b8e4 1068 /* Output the constant adjustment. */
8f90be4c
NC
1069 if (load_value > adjust_value)
1070 {
1071 if (cmp_t)
1072 output_asm_insn ("decf\t%0", out_operands);
1073 else
1074 output_asm_insn ("dect\t%0", out_operands);
1075 }
1076 else
1077 {
1078 if (cmp_t)
1079 output_asm_insn ("incf\t%0", out_operands);
1080 else
1081 output_asm_insn ("inct\t%0", out_operands);
1082 }
1083
1084 return "";
1085}
1086
1087/* Outputs the peephole for moving a constant that gets not'ed followed
4816b8e4
NC
1088 by an and (i.e. combine the not and the and into andn). BRC */
1089
f27cd94d 1090const char *
08903e08 1091mcore_output_andn (rtx insn ATTRIBUTE_UNUSED, rtx operands[])
8f90be4c
NC
1092{
1093 int x, y;
1094 rtx out_operands[3];
f27cd94d 1095 const char * load_op;
8f90be4c
NC
1096 char buf[256];
1097
1098 if (try_constant_tricks (INTVAL (operands[1]), &x, &y) != 2)
1099 abort ();
1100
1101 out_operands[0] = operands[0];
1102 out_operands[1] = GEN_INT(x);
1103 out_operands[2] = operands[2];
1104
1105 if (x >= 0 && x <= 127)
1106 load_op = "movi\t%0,%1";
4816b8e4
NC
1107
1108 /* Try exact power of two. */
8f90be4c
NC
1109 else if ((x & (x - 1)) == 0)
1110 load_op = "bgeni\t%0,%P1";
4816b8e4
NC
1111
1112 /* Try exact power of two - 1. */
8f90be4c
NC
1113 else if ((x & (x + 1)) == 0)
1114 load_op = "bmaski\t%0,%N1";
4816b8e4 1115
8f90be4c
NC
1116 else
1117 load_op = "BADMOVI\t%0,%1";
1118
1119 sprintf (buf, "%s\n\tandn\t%%2,%%0", load_op);
1120 output_asm_insn (buf, out_operands);
1121
1122 return "";
1123}
1124
1125/* Output an inline constant. */
4816b8e4 1126
f27cd94d 1127static const char *
08903e08 1128output_inline_const (enum machine_mode mode, rtx operands[])
8f90be4c
NC
1129{
1130 int x = 0, y = 0;
1131 int trick_no;
1132 rtx out_operands[3];
1133 char buf[256];
1134 char load_op[256];
f27cd94d 1135 const char *dst_fmt;
8f90be4c
NC
1136 int value;
1137
1138 value = INTVAL (operands[1]);
1139
1140 if ((trick_no = try_constant_tricks (value, &x, &y)) == 0)
1141 {
1142 /* lrw's are handled separately: Large inlinable constants
1143 never get turned into lrw's. Our caller uses try_constant_tricks
1144 to back off to an lrw rather than calling this routine. */
1145 abort ();
1146 }
1147
1148 if (trick_no == 1)
1149 x = value;
1150
4816b8e4 1151 /* operands: 0 = dst, 1 = load immed., 2 = immed. adjustment. */
8f90be4c
NC
1152 out_operands[0] = operands[0];
1153 out_operands[1] = GEN_INT (x);
1154
1155 if (trick_no > 2)
1156 out_operands[2] = GEN_INT (y);
1157
4816b8e4 1158 /* Select dst format based on mode. */
8f90be4c
NC
1159 if (mode == DImode && (! TARGET_LITTLE_END))
1160 dst_fmt = "%R0";
1161 else
1162 dst_fmt = "%0";
1163
1164 if (x >= 0 && x <= 127)
1165 sprintf (load_op, "movi\t%s,%%1", dst_fmt);
4816b8e4 1166
8f90be4c
NC
1167 /* Try exact power of two. */
1168 else if ((x & (x - 1)) == 0)
1169 sprintf (load_op, "bgeni\t%s,%%P1", dst_fmt);
4816b8e4
NC
1170
1171 /* Try exact power of two - 1. */
8f90be4c
NC
1172 else if ((x & (x + 1)) == 0)
1173 sprintf (load_op, "bmaski\t%s,%%N1", dst_fmt);
4816b8e4 1174
8f90be4c
NC
1175 else
1176 sprintf (load_op, "BADMOVI\t%s,%%1", dst_fmt);
1177
1178 switch (trick_no)
1179 {
1180 case 1:
1181 strcpy (buf, load_op);
1182 break;
1183 case 2: /* not */
1184 sprintf (buf, "%s\n\tnot\t%s\t// %d 0x%x", load_op, dst_fmt, value, value);
1185 break;
1186 case 3: /* add */
1187 sprintf (buf, "%s\n\taddi\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
1188 break;
1189 case 4: /* sub */
1190 sprintf (buf, "%s\n\tsubi\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
1191 break;
1192 case 5: /* rsub */
4816b8e4 1193 /* Never happens unless -mrsubi, see try_constant_tricks(). */
8f90be4c
NC
1194 sprintf (buf, "%s\n\trsubi\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
1195 break;
1196 case 6: /* bset */
1197 sprintf (buf, "%s\n\tbseti\t%s,%%P2\t// %d 0x%x", load_op, dst_fmt, value, value);
1198 break;
1199 case 7: /* bclr */
1200 sprintf (buf, "%s\n\tbclri\t%s,%%Q2\t// %d 0x%x", load_op, dst_fmt, value, value);
1201 break;
1202 case 8: /* rotl */
1203 sprintf (buf, "%s\n\trotli\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
1204 break;
1205 case 9: /* lsl */
1206 sprintf (buf, "%s\n\tlsli\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
1207 break;
1208 case 10: /* ixh */
1209 sprintf (buf, "%s\n\tixh\t%s,%s\t// %d 0x%x", load_op, dst_fmt, dst_fmt, value, value);
1210 break;
1211 case 11: /* ixw */
1212 sprintf (buf, "%s\n\tixw\t%s,%s\t// %d 0x%x", load_op, dst_fmt, dst_fmt, value, value);
1213 break;
1214 default:
1215 return "";
1216 }
1217
1218 output_asm_insn (buf, out_operands);
1219
1220 return "";
1221}
1222
1223/* Output a move of a word or less value. */
4816b8e4 1224
f27cd94d 1225const char *
08903e08
SB
1226mcore_output_move (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
1227 enum machine_mode mode ATTRIBUTE_UNUSED)
8f90be4c
NC
1228{
1229 rtx dst = operands[0];
1230 rtx src = operands[1];
1231
1232 if (GET_CODE (dst) == REG)
1233 {
1234 if (GET_CODE (src) == REG)
1235 {
1236 if (REGNO (src) == CC_REG) /* r-c */
1237 return "mvc\t%0";
1238 else
1239 return "mov\t%0,%1"; /* r-r*/
1240 }
1241 else if (GET_CODE (src) == MEM)
1242 {
1243 if (GET_CODE (XEXP (src, 0)) == LABEL_REF)
1244 return "lrw\t%0,[%1]"; /* a-R */
1245 else
f0f4da32
RS
1246 switch (GET_MODE (src)) /* r-m */
1247 {
1248 case SImode:
1249 return "ldw\t%0,%1";
1250 case HImode:
1251 return "ld.h\t%0,%1";
1252 case QImode:
1253 return "ld.b\t%0,%1";
1254 default:
1255 abort ();
1256 }
8f90be4c
NC
1257 }
1258 else if (GET_CODE (src) == CONST_INT)
1259 {
1260 int x, y;
1261
1262 if (CONST_OK_FOR_I (INTVAL (src))) /* r-I */
1263 return "movi\t%0,%1";
1264 else if (CONST_OK_FOR_M (INTVAL (src))) /* r-M */
1265 return "bgeni\t%0,%P1\t// %1 %x1";
1266 else if (CONST_OK_FOR_N (INTVAL (src))) /* r-N */
1267 return "bmaski\t%0,%N1\t// %1 %x1";
1268 else if (try_constant_tricks (INTVAL (src), &x, &y)) /* R-P */
1269 return output_inline_const (SImode, operands); /* 1-2 insns */
1270 else
4816b8e4 1271 return "lrw\t%0,%x1\t// %1"; /* Get it from literal pool. */
8f90be4c
NC
1272 }
1273 else
4816b8e4 1274 return "lrw\t%0, %1"; /* Into the literal pool. */
8f90be4c
NC
1275 }
1276 else if (GET_CODE (dst) == MEM) /* m-r */
f0f4da32
RS
1277 switch (GET_MODE (dst))
1278 {
1279 case SImode:
1280 return "stw\t%1,%0";
1281 case HImode:
1282 return "st.h\t%1,%0";
1283 case QImode:
1284 return "st.b\t%1,%0";
1285 default:
1286 abort ();
1287 }
8f90be4c
NC
1288
1289 abort ();
1290}
1291
8f90be4c
NC
1292/* Return a sequence of instructions to perform DI or DF move.
1293 Since the MCORE cannot move a DI or DF in one instruction, we have
1294 to take care when we see overlapping source and dest registers. */
4816b8e4 1295
f27cd94d 1296const char *
08903e08 1297mcore_output_movedouble (rtx operands[], enum machine_mode mode ATTRIBUTE_UNUSED)
8f90be4c
NC
1298{
1299 rtx dst = operands[0];
1300 rtx src = operands[1];
1301
1302 if (GET_CODE (dst) == REG)
1303 {
1304 if (GET_CODE (src) == REG)
1305 {
1306 int dstreg = REGNO (dst);
1307 int srcreg = REGNO (src);
4816b8e4 1308
8f90be4c
NC
1309 /* Ensure the second source not overwritten. */
1310 if (srcreg + 1 == dstreg)
1311 return "mov %R0,%R1\n\tmov %0,%1";
1312 else
1313 return "mov %0,%1\n\tmov %R0,%R1";
1314 }
1315 else if (GET_CODE (src) == MEM)
1316 {
1317 rtx memexp = memexp = XEXP (src, 0);
1318 int dstreg = REGNO (dst);
1319 int basereg = -1;
1320
1321 if (GET_CODE (memexp) == LABEL_REF)
1322 return "lrw\t%0,[%1]\n\tlrw\t%R0,[%R1]";
1323 else if (GET_CODE (memexp) == REG)
1324 basereg = REGNO (memexp);
1325 else if (GET_CODE (memexp) == PLUS)
1326 {
1327 if (GET_CODE (XEXP (memexp, 0)) == REG)
1328 basereg = REGNO (XEXP (memexp, 0));
1329 else if (GET_CODE (XEXP (memexp, 1)) == REG)
1330 basereg = REGNO (XEXP (memexp, 1));
1331 else
1332 abort ();
1333 }
1334 else
1335 abort ();
1336
4816b8e4 1337 /* ??? length attribute is wrong here. */
8f90be4c
NC
1338 if (dstreg == basereg)
1339 {
4816b8e4 1340 /* Just load them in reverse order. */
8f90be4c 1341 return "ldw\t%R0,%R1\n\tldw\t%0,%1";
4816b8e4 1342
8f90be4c 1343 /* XXX: alternative: move basereg to basereg+1
4816b8e4 1344 and then fall through. */
8f90be4c
NC
1345 }
1346 else
1347 return "ldw\t%0,%1\n\tldw\t%R0,%R1";
1348 }
1349 else if (GET_CODE (src) == CONST_INT)
1350 {
1351 if (TARGET_LITTLE_END)
1352 {
1353 if (CONST_OK_FOR_I (INTVAL (src)))
1354 output_asm_insn ("movi %0,%1", operands);
1355 else if (CONST_OK_FOR_M (INTVAL (src)))
1356 output_asm_insn ("bgeni %0,%P1", operands);
1357 else if (INTVAL (src) == -1)
1358 output_asm_insn ("bmaski %0,32", operands);
1359 else if (CONST_OK_FOR_N (INTVAL (src)))
1360 output_asm_insn ("bmaski %0,%N1", operands);
1361 else
1362 abort ();
1363
1364 if (INTVAL (src) < 0)
1365 return "bmaski %R0,32";
1366 else
1367 return "movi %R0,0";
1368 }
1369 else
1370 {
1371 if (CONST_OK_FOR_I (INTVAL (src)))
1372 output_asm_insn ("movi %R0,%1", operands);
1373 else if (CONST_OK_FOR_M (INTVAL (src)))
1374 output_asm_insn ("bgeni %R0,%P1", operands);
1375 else if (INTVAL (src) == -1)
1376 output_asm_insn ("bmaski %R0,32", operands);
1377 else if (CONST_OK_FOR_N (INTVAL (src)))
1378 output_asm_insn ("bmaski %R0,%N1", operands);
1379 else
1380 abort ();
1381
1382 if (INTVAL (src) < 0)
1383 return "bmaski %0,32";
1384 else
1385 return "movi %0,0";
1386 }
1387 }
1388 else
1389 abort ();
1390 }
1391 else if (GET_CODE (dst) == MEM && GET_CODE (src) == REG)
1392 return "stw\t%1,%0\n\tstw\t%R1,%R0";
1393 else
1394 abort ();
1395}
1396
1397/* Predicates used by the templates. */
1398
8f90be4c 1399int
08903e08 1400mcore_arith_S_operand (rtx op)
8f90be4c
NC
1401{
1402 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (~INTVAL (op)))
1403 return 1;
1404
1405 return 0;
1406}
1407
4816b8e4
NC
1408/* Expand insert bit field. BRC */
1409
8f90be4c 1410int
08903e08 1411mcore_expand_insv (rtx operands[])
8f90be4c
NC
1412{
1413 int width = INTVAL (operands[1]);
1414 int posn = INTVAL (operands[2]);
1415 int mask;
1416 rtx mreg, sreg, ereg;
1417
1418 /* To get width 1 insv, the test in store_bit_field() (expmed.c, line 191)
1419 for width==1 must be removed. Look around line 368. This is something
4816b8e4 1420 we really want the md part to do. */
8f90be4c
NC
1421 if (width == 1 && GET_CODE (operands[3]) == CONST_INT)
1422 {
4816b8e4
NC
1423 /* Do directly with bseti or bclri. */
1424 /* RBE: 2/97 consider only low bit of constant. */
8f90be4c
NC
1425 if ((INTVAL(operands[3])&1) == 0)
1426 {
1427 mask = ~(1 << posn);
f1c25d3b
KH
1428 emit_insn (gen_rtx_SET (SImode, operands[0],
1429 gen_rtx_AND (SImode, operands[0], GEN_INT (mask))));
8f90be4c
NC
1430 }
1431 else
1432 {
1433 mask = 1 << posn;
f1c25d3b
KH
1434 emit_insn (gen_rtx_SET (SImode, operands[0],
1435 gen_rtx_IOR (SImode, operands[0], GEN_INT (mask))));
8f90be4c
NC
1436 }
1437
1438 return 1;
1439 }
1440
43a88a8c 1441 /* Look at some bit-field placements that we aren't interested
4816b8e4 1442 in handling ourselves, unless specifically directed to do so. */
8f90be4c
NC
1443 if (! TARGET_W_FIELD)
1444 return 0; /* Generally, give up about now. */
1445
1446 if (width == 8 && posn % 8 == 0)
1447 /* Byte sized and aligned; let caller break it up. */
1448 return 0;
1449
1450 if (width == 16 && posn % 16 == 0)
1451 /* Short sized and aligned; let caller break it up. */
1452 return 0;
1453
1454 /* The general case - we can do this a little bit better than what the
1455 machine independent part tries. This will get rid of all the subregs
1456 that mess up constant folding in combine when working with relaxed
4816b8e4 1457 immediates. */
8f90be4c
NC
1458
1459 /* If setting the entire field, do it directly. */
1460 if (GET_CODE (operands[3]) == CONST_INT &&
1461 INTVAL (operands[3]) == ((1 << width) - 1))
1462 {
1463 mreg = force_reg (SImode, GEN_INT (INTVAL (operands[3]) << posn));
f1c25d3b
KH
1464 emit_insn (gen_rtx_SET (SImode, operands[0],
1465 gen_rtx_IOR (SImode, operands[0], mreg)));
8f90be4c
NC
1466 return 1;
1467 }
1468
1469 /* Generate the clear mask. */
1470 mreg = force_reg (SImode, GEN_INT (~(((1 << width) - 1) << posn)));
1471
1472 /* Clear the field, to overlay it later with the source. */
f1c25d3b
KH
1473 emit_insn (gen_rtx_SET (SImode, operands[0],
1474 gen_rtx_AND (SImode, operands[0], mreg)));
8f90be4c
NC
1475
1476 /* If the source is constant 0, we've nothing to add back. */
1477 if (GET_CODE (operands[3]) == CONST_INT && INTVAL (operands[3]) == 0)
1478 return 1;
1479
1480 /* XXX: Should we worry about more games with constant values?
1481 We've covered the high profile: set/clear single-bit and many-bit
1482 fields. How often do we see "arbitrary bit pattern" constants? */
1483 sreg = copy_to_mode_reg (SImode, operands[3]);
1484
1485 /* Extract src as same width as dst (needed for signed values). We
1486 always have to do this since we widen everything to SImode.
1487 We don't have to mask if we're shifting this up against the
1488 MSB of the register (e.g., the shift will push out any hi-order
4816b8e4 1489 bits. */
f27cd94d 1490 if (width + posn != (int) GET_MODE_SIZE (SImode))
8f90be4c
NC
1491 {
1492 ereg = force_reg (SImode, GEN_INT ((1 << width) - 1));
f1c25d3b
KH
1493 emit_insn (gen_rtx_SET (SImode, sreg,
1494 gen_rtx_AND (SImode, sreg, ereg)));
8f90be4c
NC
1495 }
1496
4816b8e4 1497 /* Insert source value in dest. */
8f90be4c 1498 if (posn != 0)
f1c25d3b
KH
1499 emit_insn (gen_rtx_SET (SImode, sreg,
1500 gen_rtx_ASHIFT (SImode, sreg, GEN_INT (posn))));
8f90be4c 1501
f1c25d3b
KH
1502 emit_insn (gen_rtx_SET (SImode, operands[0],
1503 gen_rtx_IOR (SImode, operands[0], sreg)));
8f90be4c
NC
1504
1505 return 1;
1506}
8f90be4c
NC
1507\f
1508/* ??? Block move stuff stolen from m88k. This code has not been
1509 verified for correctness. */
1510
1511/* Emit code to perform a block move. Choose the best method.
1512
1513 OPERANDS[0] is the destination.
1514 OPERANDS[1] is the source.
1515 OPERANDS[2] is the size.
1516 OPERANDS[3] is the alignment safe to use. */
1517
1518/* Emit code to perform a block move with an offset sequence of ldw/st
1519 instructions (..., ldw 0, stw 1, ldw 1, stw 0, ...). SIZE and ALIGN are
1520 known constants. DEST and SRC are registers. OFFSET is the known
1521 starting point for the output pattern. */
1522
8b60264b 1523static const enum machine_mode mode_from_align[] =
8f90be4c
NC
1524{
1525 VOIDmode, QImode, HImode, VOIDmode, SImode,
8f90be4c
NC
1526};
1527
1528static void
88042663 1529block_move_sequence (rtx dst_mem, rtx src_mem, int size, int align)
8f90be4c
NC
1530{
1531 rtx temp[2];
1532 enum machine_mode mode[2];
1533 int amount[2];
88042663 1534 bool active[2];
8f90be4c
NC
1535 int phase = 0;
1536 int next;
88042663
RH
1537 int offset_ld = 0;
1538 int offset_st = 0;
1539 rtx x;
8f90be4c 1540
88042663
RH
1541 x = XEXP (dst_mem, 0);
1542 if (!REG_P (x))
1543 {
1544 x = force_reg (Pmode, x);
1545 dst_mem = replace_equiv_address (dst_mem, x);
1546 }
8f90be4c 1547
88042663
RH
1548 x = XEXP (src_mem, 0);
1549 if (!REG_P (x))
8f90be4c 1550 {
88042663
RH
1551 x = force_reg (Pmode, x);
1552 src_mem = replace_equiv_address (src_mem, x);
8f90be4c
NC
1553 }
1554
88042663
RH
1555 active[0] = active[1] = false;
1556
8f90be4c
NC
1557 do
1558 {
8f90be4c 1559 next = phase;
88042663 1560 phase ^= 1;
8f90be4c
NC
1561
1562 if (size > 0)
1563 {
88042663
RH
1564 int next_amount;
1565
1566 next_amount = (size >= 4 ? 4 : (size >= 2 ? 2 : 1));
1567 next_amount = MIN (next_amount, align);
1568
1569 amount[next] = next_amount;
1570 mode[next] = mode_from_align[next_amount];
1571 temp[next] = gen_reg_rtx (mode[next]);
1572
1573 x = adjust_address (src_mem, mode[next], offset_ld);
1574 emit_insn (gen_rtx_SET (VOIDmode, temp[next], x));
1575
1576 offset_ld += next_amount;
1577 size -= next_amount;
1578 active[next] = true;
8f90be4c
NC
1579 }
1580
1581 if (active[phase])
1582 {
88042663 1583 active[phase] = false;
8f90be4c 1584
88042663
RH
1585 x = adjust_address (dst_mem, mode[phase], offset_st);
1586 emit_insn (gen_rtx_SET (VOIDmode, x, temp[phase]));
1587
8f90be4c
NC
1588 offset_st += amount[phase];
1589 }
1590 }
1591 while (active[next]);
1592}
1593
88042663
RH
1594bool
1595mcore_expand_block_move (rtx *operands)
8f90be4c 1596{
88042663
RH
1597 HOST_WIDE_INT align, bytes, max;
1598
1599 if (GET_CODE (operands[2]) != CONST_INT)
1600 return false;
1601
1602 bytes = INTVAL (operands[2]);
1603 align = INTVAL (operands[3]);
8f90be4c 1604
88042663
RH
1605 if (bytes <= 0)
1606 return false;
1607 if (align > 4)
1608 align = 4;
1609
1610 switch (align)
8f90be4c 1611 {
88042663
RH
1612 case 4:
1613 if (bytes & 1)
1614 max = 4*4;
1615 else if (bytes & 3)
1616 max = 8*4;
1617 else
1618 max = 16*4;
1619 break;
1620 case 2:
1621 max = 4*2;
1622 break;
1623 case 1:
1624 max = 4*1;
1625 break;
1626 default:
1627 abort ();
1628 }
1629
1630 if (bytes <= max)
1631 {
1632 block_move_sequence (operands[0], operands[1], bytes, align);
1633 return true;
8f90be4c
NC
1634 }
1635
88042663 1636 return false;
8f90be4c
NC
1637}
1638\f
1639
1640/* Code to generate prologue and epilogue sequences. */
1641static int number_of_regs_before_varargs;
4816b8e4 1642
bd5bd7ac 1643/* Set by TARGET_SETUP_INCOMING_VARARGS to indicate to prolog that this is
8f90be4c
NC
1644 for a varargs function. */
1645static int current_function_anonymous_args;
1646
8f90be4c
NC
1647#define STACK_BYTES (STACK_BOUNDARY/BITS_PER_UNIT)
1648#define STORE_REACH (64) /* Maximum displace of word store + 4. */
4816b8e4 1649#define ADDI_REACH (32) /* Maximum addi operand. */
8f90be4c 1650
8f90be4c 1651static void
08903e08 1652layout_mcore_frame (struct mcore_frame * infp)
8f90be4c
NC
1653{
1654 int n;
1655 unsigned int i;
1656 int nbytes;
1657 int regarg;
1658 int localregarg;
1659 int localreg;
1660 int outbounds;
1661 unsigned int growths;
1662 int step;
1663
1664 /* Might have to spill bytes to re-assemble a big argument that
4816b8e4 1665 was passed partially in registers and partially on the stack. */
8f90be4c
NC
1666 nbytes = current_function_pretend_args_size;
1667
1668 /* Determine how much space for spilled anonymous args (e.g., stdarg). */
1669 if (current_function_anonymous_args)
1670 nbytes += (NPARM_REGS - number_of_regs_before_varargs) * UNITS_PER_WORD;
1671
1672 infp->arg_size = nbytes;
1673
1674 /* How much space to save non-volatile registers we stomp. */
1675 infp->reg_mask = calc_live_regs (& n);
1676 infp->reg_size = n * 4;
1677
14bc6742 1678 /* And the rest of it... locals and space for overflowed outbounds. */
8f90be4c
NC
1679 infp->local_size = get_frame_size ();
1680 infp->outbound_size = current_function_outgoing_args_size;
1681
1682 /* Make sure we have a whole number of words for the locals. */
1683 if (infp->local_size % STACK_BYTES)
1684 infp->local_size = (infp->local_size + STACK_BYTES - 1) & ~ (STACK_BYTES -1);
1685
1686 /* Only thing we know we have to pad is the outbound space, since
1687 we've aligned our locals assuming that base of locals is aligned. */
1688 infp->pad_local = 0;
1689 infp->pad_reg = 0;
1690 infp->pad_outbound = 0;
1691 if (infp->outbound_size % STACK_BYTES)
1692 infp->pad_outbound = STACK_BYTES - (infp->outbound_size % STACK_BYTES);
1693
1694 /* Now we see how we want to stage the prologue so that it does
1695 the most appropriate stack growth and register saves to either:
1696 (1) run fast,
1697 (2) reduce instruction space, or
1698 (3) reduce stack space. */
b6a1cbae 1699 for (i = 0; i < ARRAY_SIZE (infp->growth); i++)
8f90be4c
NC
1700 infp->growth[i] = 0;
1701
1702 regarg = infp->reg_size + infp->arg_size;
1703 localregarg = infp->local_size + regarg;
1704 localreg = infp->local_size + infp->reg_size;
1705 outbounds = infp->outbound_size + infp->pad_outbound;
1706 growths = 0;
1707
1708 /* XXX: Consider one where we consider localregarg + outbound too! */
1709
1710 /* Frame of <= 32 bytes and using stm would get <= 2 registers.
1711 use stw's with offsets and buy the frame in one shot. */
1712 if (localregarg <= ADDI_REACH
1713 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1714 {
1715 /* Make sure we'll be aligned. */
1716 if (localregarg % STACK_BYTES)
1717 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1718
1719 step = localregarg + infp->pad_reg;
1720 infp->reg_offset = infp->local_size;
1721
1722 if (outbounds + step <= ADDI_REACH && !frame_pointer_needed)
1723 {
1724 step += outbounds;
1725 infp->reg_offset += outbounds;
1726 outbounds = 0;
1727 }
1728
1729 infp->arg_offset = step - 4;
1730 infp->growth[growths++] = step;
1731 infp->reg_growth = growths;
1732 infp->local_growth = growths;
1733
4816b8e4 1734 /* If we haven't already folded it in. */
8f90be4c
NC
1735 if (outbounds)
1736 infp->growth[growths++] = outbounds;
1737
1738 goto finish;
1739 }
1740
1741 /* Frame can't be done with a single subi, but can be done with 2
1742 insns. If the 'stm' is getting <= 2 registers, we use stw's and
1743 shift some of the stack purchase into the first subi, so both are
1744 single instructions. */
1745 if (localregarg <= STORE_REACH
1746 && (infp->local_size > ADDI_REACH)
1747 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1748 {
1749 int all;
1750
1751 /* Make sure we'll be aligned; use either pad_reg or pad_local. */
1752 if (localregarg % STACK_BYTES)
1753 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1754
1755 all = localregarg + infp->pad_reg + infp->pad_local;
1756 step = ADDI_REACH; /* As much up front as we can. */
1757 if (step > all)
1758 step = all;
1759
1760 /* XXX: Consider whether step will still be aligned; we believe so. */
1761 infp->arg_offset = step - 4;
1762 infp->growth[growths++] = step;
1763 infp->reg_growth = growths;
1764 infp->reg_offset = step - infp->pad_reg - infp->reg_size;
1765 all -= step;
1766
4816b8e4 1767 /* Can we fold in any space required for outbounds? */
8f90be4c
NC
1768 if (outbounds + all <= ADDI_REACH && !frame_pointer_needed)
1769 {
1770 all += outbounds;
1771 outbounds = 0;
1772 }
1773
4816b8e4 1774 /* Get the rest of the locals in place. */
8f90be4c
NC
1775 step = all;
1776 infp->growth[growths++] = step;
1777 infp->local_growth = growths;
1778 all -= step;
1779
1780 assert (all == 0);
1781
4816b8e4 1782 /* Finish off if we need to do so. */
8f90be4c
NC
1783 if (outbounds)
1784 infp->growth[growths++] = outbounds;
1785
1786 goto finish;
1787 }
1788
1789 /* Registers + args is nicely aligned, so we'll buy that in one shot.
1790 Then we buy the rest of the frame in 1 or 2 steps depending on
1791 whether we need a frame pointer. */
1792 if ((regarg % STACK_BYTES) == 0)
1793 {
1794 infp->growth[growths++] = regarg;
1795 infp->reg_growth = growths;
1796 infp->arg_offset = regarg - 4;
1797 infp->reg_offset = 0;
1798
1799 if (infp->local_size % STACK_BYTES)
1800 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1801
1802 step = infp->local_size + infp->pad_local;
1803
1804 if (!frame_pointer_needed)
1805 {
1806 step += outbounds;
1807 outbounds = 0;
1808 }
1809
1810 infp->growth[growths++] = step;
1811 infp->local_growth = growths;
1812
4816b8e4 1813 /* If there's any left to be done. */
8f90be4c
NC
1814 if (outbounds)
1815 infp->growth[growths++] = outbounds;
1816
1817 goto finish;
1818 }
1819
1820 /* XXX: optimizations that we'll want to play with....
4816b8e4
NC
1821 -- regarg is not aligned, but it's a small number of registers;
1822 use some of localsize so that regarg is aligned and then
1823 save the registers. */
8f90be4c
NC
1824
1825 /* Simple encoding; plods down the stack buying the pieces as it goes.
4816b8e4
NC
1826 -- does not optimize space consumption.
1827 -- does not attempt to optimize instruction counts.
1828 -- but it is safe for all alignments. */
8f90be4c
NC
1829 if (regarg % STACK_BYTES != 0)
1830 infp->pad_reg = STACK_BYTES - (regarg % STACK_BYTES);
1831
1832 infp->growth[growths++] = infp->arg_size + infp->reg_size + infp->pad_reg;
1833 infp->reg_growth = growths;
1834 infp->arg_offset = infp->growth[0] - 4;
1835 infp->reg_offset = 0;
1836
1837 if (frame_pointer_needed)
1838 {
1839 if (infp->local_size % STACK_BYTES != 0)
1840 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1841
1842 infp->growth[growths++] = infp->local_size + infp->pad_local;
1843 infp->local_growth = growths;
1844
1845 infp->growth[growths++] = outbounds;
1846 }
1847 else
1848 {
1849 if ((infp->local_size + outbounds) % STACK_BYTES != 0)
1850 infp->pad_local = STACK_BYTES - ((infp->local_size + outbounds) % STACK_BYTES);
1851
1852 infp->growth[growths++] = infp->local_size + infp->pad_local + outbounds;
1853 infp->local_growth = growths;
1854 }
1855
f27cd94d 1856 /* Anything else that we've forgotten?, plus a few consistency checks. */
8f90be4c
NC
1857 finish:
1858 assert (infp->reg_offset >= 0);
1859 assert (growths <= MAX_STACK_GROWS);
1860
1861 for (i = 0; i < growths; i++)
1862 {
1863 if (infp->growth[i] % STACK_BYTES)
1864 {
1865 fprintf (stderr,"stack growth of %d is not %d aligned\n",
1866 infp->growth[i], STACK_BYTES);
1867 abort ();
1868 }
1869 }
1870}
1871
1872/* Define the offset between two registers, one to be eliminated, and
1873 the other its replacement, at the start of a routine. */
4816b8e4 1874
8f90be4c 1875int
08903e08 1876mcore_initial_elimination_offset (int from, int to)
8f90be4c
NC
1877{
1878 int above_frame;
1879 int below_frame;
1880 struct mcore_frame fi;
1881
1882 layout_mcore_frame (& fi);
1883
1884 /* fp to ap */
1885 above_frame = fi.local_size + fi.pad_local + fi.reg_size + fi.pad_reg;
1886 /* sp to fp */
1887 below_frame = fi.outbound_size + fi.pad_outbound;
1888
1889 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
1890 return above_frame;
1891
1892 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1893 return above_frame + below_frame;
1894
1895 if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1896 return below_frame;
1897
1898 abort ();
1899
1900 return 0;
1901}
1902
4816b8e4
NC
1903/* Keep track of some information about varargs for the prolog. */
1904
09a2b93a
KH
1905static void
1906mcore_setup_incoming_varargs (CUMULATIVE_ARGS *args_so_far,
08903e08 1907 enum machine_mode mode, tree type,
09a2b93a
KH
1908 int * ptr_pretend_size ATTRIBUTE_UNUSED,
1909 int second_time ATTRIBUTE_UNUSED)
8f90be4c
NC
1910{
1911 current_function_anonymous_args = 1;
1912
1913 /* We need to know how many argument registers are used before
1914 the varargs start, so that we can push the remaining argument
1915 registers during the prologue. */
09a2b93a 1916 number_of_regs_before_varargs = *args_so_far + mcore_num_arg_regs (mode, type);
8f90be4c 1917
dab66575 1918 /* There is a bug somewhere in the arg handling code.
8f90be4c
NC
1919 Until I can find it this workaround always pushes the
1920 last named argument onto the stack. */
09a2b93a 1921 number_of_regs_before_varargs = *args_so_far;
8f90be4c
NC
1922
1923 /* The last named argument may be split between argument registers
1924 and the stack. Allow for this here. */
1925 if (number_of_regs_before_varargs > NPARM_REGS)
1926 number_of_regs_before_varargs = NPARM_REGS;
1927}
1928
1929void
08903e08 1930mcore_expand_prolog (void)
8f90be4c
NC
1931{
1932 struct mcore_frame fi;
1933 int space_allocated = 0;
1934 int growth = 0;
1935
1936 /* Find out what we're doing. */
1937 layout_mcore_frame (&fi);
1938
1939 space_allocated = fi.arg_size + fi.reg_size + fi.local_size +
1940 fi.outbound_size + fi.pad_outbound + fi.pad_local + fi.pad_reg;
1941
1942 if (TARGET_CG_DATA)
1943 {
1944 /* Emit a symbol for this routine's frame size. */
1945 rtx x;
8f90be4c
NC
1946
1947 x = DECL_RTL (current_function_decl);
1948
1949 if (GET_CODE (x) != MEM)
1950 abort ();
1951
1952 x = XEXP (x, 0);
1953
1954 if (GET_CODE (x) != SYMBOL_REF)
1955 abort ();
1956
1957 if (mcore_current_function_name)
1958 free (mcore_current_function_name);
1959
1dcd444b 1960 mcore_current_function_name = xstrdup (XSTR (x, 0));
8f90be4c
NC
1961
1962 ASM_OUTPUT_CG_NODE (asm_out_file, mcore_current_function_name, space_allocated);
1963
1964 if (current_function_calls_alloca)
1965 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, "alloca", 1);
1966
1967 /* 970425: RBE:
1968 We're looking at how the 8byte alignment affects stack layout
1969 and where we had to pad things. This emits information we can
1970 extract which tells us about frame sizes and the like. */
1971 fprintf (asm_out_file,
1972 "\t.equ\t__$frame$info$_%s_$_%d_%d_x%x_%d_%d_%d,0\n",
1973 mcore_current_function_name,
1974 fi.arg_size, fi.reg_size, fi.reg_mask,
1975 fi.local_size, fi.outbound_size,
1976 frame_pointer_needed);
1977 }
1978
1979 if (mcore_naked_function_p ())
1980 return;
1981
1982 /* Handle stdarg+regsaves in one shot: can't be more than 64 bytes. */
08903e08 1983 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
8f90be4c
NC
1984
1985 /* If we have a parameter passed partially in regs and partially in memory,
1986 the registers will have been stored to memory already in function.c. So
1987 we only need to do something here for varargs functions. */
1988 if (fi.arg_size != 0 && current_function_pretend_args_size == 0)
1989 {
1990 int offset;
1991 int rn = FIRST_PARM_REG + NPARM_REGS - 1;
1992 int remaining = fi.arg_size;
1993
1994 for (offset = fi.arg_offset; remaining >= 4; offset -= 4, rn--, remaining -= 4)
1995 {
1996 emit_insn (gen_movsi
f1c25d3b 1997 (gen_rtx_MEM (SImode,
8f90be4c 1998 plus_constant (stack_pointer_rtx, offset)),
f1c25d3b 1999 gen_rtx_REG (SImode, rn)));
8f90be4c
NC
2000 }
2001 }
2002
4816b8e4 2003 /* Do we need another stack adjustment before we do the register saves? */
8f90be4c 2004 if (growth < fi.reg_growth)
08903e08 2005 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
8f90be4c
NC
2006
2007 if (fi.reg_size != 0)
2008 {
2009 int i;
2010 int offs = fi.reg_offset;
2011
2012 for (i = 15; i >= 0; i--)
2013 {
2014 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2015 {
2016 int first_reg = 15;
2017
2018 while (fi.reg_mask & (1 << first_reg))
2019 first_reg--;
2020 first_reg++;
2021
f1c25d3b
KH
2022 emit_insn (gen_store_multiple (gen_rtx_MEM (SImode, stack_pointer_rtx),
2023 gen_rtx_REG (SImode, first_reg),
8f90be4c
NC
2024 GEN_INT (16 - first_reg)));
2025
2026 i -= (15 - first_reg);
2027 offs += (16 - first_reg) * 4;
2028 }
2029 else if (fi.reg_mask & (1 << i))
2030 {
2031 emit_insn (gen_movsi
f1c25d3b 2032 (gen_rtx_MEM (SImode,
8f90be4c 2033 plus_constant (stack_pointer_rtx, offs)),
f1c25d3b 2034 gen_rtx_REG (SImode, i)));
8f90be4c
NC
2035 offs += 4;
2036 }
2037 }
2038 }
2039
2040 /* Figure the locals + outbounds. */
2041 if (frame_pointer_needed)
2042 {
2043 /* If we haven't already purchased to 'fp'. */
2044 if (growth < fi.local_growth)
08903e08 2045 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
8f90be4c
NC
2046
2047 emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
2048
4816b8e4 2049 /* ... and then go any remaining distance for outbounds, etc. */
8f90be4c
NC
2050 if (fi.growth[growth])
2051 output_stack_adjust (-1, fi.growth[growth++]);
2052 }
2053 else
2054 {
2055 if (growth < fi.local_growth)
08903e08 2056 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
8f90be4c
NC
2057 if (fi.growth[growth])
2058 output_stack_adjust (-1, fi.growth[growth++]);
2059 }
2060}
2061
2062void
08903e08 2063mcore_expand_epilog (void)
8f90be4c
NC
2064{
2065 struct mcore_frame fi;
2066 int i;
2067 int offs;
2068 int growth = MAX_STACK_GROWS - 1 ;
2069
f27cd94d 2070
8f90be4c
NC
2071 /* Find out what we're doing. */
2072 layout_mcore_frame(&fi);
2073
2074 if (mcore_naked_function_p ())
2075 return;
f27cd94d 2076
8f90be4c
NC
2077 /* If we had a frame pointer, restore the sp from that. */
2078 if (frame_pointer_needed)
2079 {
2080 emit_insn (gen_movsi (stack_pointer_rtx, frame_pointer_rtx));
2081 growth = fi.local_growth - 1;
2082 }
2083 else
2084 {
2085 /* XXX: while loop should accumulate and do a single sell. */
2086 while (growth >= fi.local_growth)
2087 {
2088 if (fi.growth[growth] != 0)
2089 output_stack_adjust (1, fi.growth[growth]);
2090 growth--;
2091 }
2092 }
2093
2094 /* Make sure we've shrunk stack back to the point where the registers
2095 were laid down. This is typically 0/1 iterations. Then pull the
4816b8e4 2096 register save information back off the stack. */
8f90be4c
NC
2097 while (growth >= fi.reg_growth)
2098 output_stack_adjust ( 1, fi.growth[growth--]);
2099
2100 offs = fi.reg_offset;
2101
2102 for (i = 15; i >= 0; i--)
2103 {
2104 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2105 {
2106 int first_reg;
2107
2108 /* Find the starting register. */
2109 first_reg = 15;
2110
2111 while (fi.reg_mask & (1 << first_reg))
2112 first_reg--;
2113
2114 first_reg++;
2115
f1c25d3b
KH
2116 emit_insn (gen_load_multiple (gen_rtx_REG (SImode, first_reg),
2117 gen_rtx_MEM (SImode, stack_pointer_rtx),
8f90be4c
NC
2118 GEN_INT (16 - first_reg)));
2119
2120 i -= (15 - first_reg);
2121 offs += (16 - first_reg) * 4;
2122 }
2123 else if (fi.reg_mask & (1 << i))
2124 {
2125 emit_insn (gen_movsi
f1c25d3b
KH
2126 (gen_rtx_REG (SImode, i),
2127 gen_rtx_MEM (SImode,
8f90be4c
NC
2128 plus_constant (stack_pointer_rtx, offs))));
2129 offs += 4;
2130 }
2131 }
2132
2133 /* Give back anything else. */
dab66575 2134 /* XXX: Should accumulate total and then give it back. */
8f90be4c
NC
2135 while (growth >= 0)
2136 output_stack_adjust ( 1, fi.growth[growth--]);
2137}
2138\f
2139/* This code is borrowed from the SH port. */
2140
2141/* The MCORE cannot load a large constant into a register, constants have to
2142 come from a pc relative load. The reference of a pc relative load
2143 instruction must be less than 1k infront of the instruction. This
2144 means that we often have to dump a constant inside a function, and
2145 generate code to branch around it.
2146
2147 It is important to minimize this, since the branches will slow things
2148 down and make things bigger.
2149
2150 Worst case code looks like:
2151
2152 lrw L1,r0
2153 br L2
2154 align
2155 L1: .long value
2156 L2:
2157 ..
2158
2159 lrw L3,r0
2160 br L4
2161 align
2162 L3: .long value
2163 L4:
2164 ..
2165
2166 We fix this by performing a scan before scheduling, which notices which
2167 instructions need to have their operands fetched from the constant table
2168 and builds the table.
2169
2170 The algorithm is:
2171
2172 scan, find an instruction which needs a pcrel move. Look forward, find the
2173 last barrier which is within MAX_COUNT bytes of the requirement.
2174 If there isn't one, make one. Process all the instructions between
2175 the find and the barrier.
2176
2177 In the above example, we can tell that L3 is within 1k of L1, so
2178 the first move can be shrunk from the 2 insn+constant sequence into
2179 just 1 insn, and the constant moved to L3 to make:
2180
2181 lrw L1,r0
2182 ..
2183 lrw L3,r0
2184 bra L4
2185 align
2186 L3:.long value
2187 L4:.long value
2188
2189 Then the second move becomes the target for the shortening process. */
2190
2191typedef struct
2192{
2193 rtx value; /* Value in table. */
2194 rtx label; /* Label of value. */
2195} pool_node;
2196
2197/* The maximum number of constants that can fit into one pool, since
2198 the pc relative range is 0...1020 bytes and constants are at least 4
2a43945f 2199 bytes long. We subtract 4 from the range to allow for the case where
8f90be4c
NC
2200 we need to add a branch/align before the constant pool. */
2201
2202#define MAX_COUNT 1016
2203#define MAX_POOL_SIZE (MAX_COUNT/4)
2204static pool_node pool_vector[MAX_POOL_SIZE];
2205static int pool_size;
2206
2207/* Dump out any constants accumulated in the final pass. These
2208 will only be labels. */
4816b8e4 2209
f27cd94d 2210const char *
08903e08 2211mcore_output_jump_label_table (void)
8f90be4c
NC
2212{
2213 int i;
2214
2215 if (pool_size)
2216 {
2217 fprintf (asm_out_file, "\t.align 2\n");
2218
2219 for (i = 0; i < pool_size; i++)
2220 {
2221 pool_node * p = pool_vector + i;
2222
4977bab6 2223 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (p->label));
8f90be4c
NC
2224
2225 output_asm_insn (".long %0", &p->value);
2226 }
2227
2228 pool_size = 0;
2229 }
2230
2231 return "";
2232}
2233
8f90be4c 2234/* Check whether insn is a candidate for a conditional. */
4816b8e4 2235
8f90be4c 2236static cond_type
08903e08 2237is_cond_candidate (rtx insn)
8f90be4c
NC
2238{
2239 /* The only things we conditionalize are those that can be directly
2240 changed into a conditional. Only bother with SImode items. If
2241 we wanted to be a little more aggressive, we could also do other
4816b8e4 2242 modes such as DImode with reg-reg move or load 0. */
8f90be4c
NC
2243 if (GET_CODE (insn) == INSN)
2244 {
2245 rtx pat = PATTERN (insn);
2246 rtx src, dst;
2247
2248 if (GET_CODE (pat) != SET)
2249 return COND_NO;
2250
2251 dst = XEXP (pat, 0);
2252
2253 if ((GET_CODE (dst) != REG &&
2254 GET_CODE (dst) != SUBREG) ||
2255 GET_MODE (dst) != SImode)
2256 return COND_NO;
2257
2258 src = XEXP (pat, 1);
2259
2260 if ((GET_CODE (src) == REG ||
2261 (GET_CODE (src) == SUBREG &&
2262 GET_CODE (SUBREG_REG (src)) == REG)) &&
2263 GET_MODE (src) == SImode)
2264 return COND_MOV_INSN;
2265 else if (GET_CODE (src) == CONST_INT &&
2266 INTVAL (src) == 0)
2267 return COND_CLR_INSN;
2268 else if (GET_CODE (src) == PLUS &&
2269 (GET_CODE (XEXP (src, 0)) == REG ||
2270 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2271 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2272 GET_MODE (XEXP (src, 0)) == SImode &&
2273 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2274 INTVAL (XEXP (src, 1)) == 1)
2275 return COND_INC_INSN;
2276 else if (((GET_CODE (src) == MINUS &&
2277 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2278 INTVAL( XEXP (src, 1)) == 1) ||
2279 (GET_CODE (src) == PLUS &&
2280 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2281 INTVAL (XEXP (src, 1)) == -1)) &&
2282 (GET_CODE (XEXP (src, 0)) == REG ||
2283 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2284 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2285 GET_MODE (XEXP (src, 0)) == SImode)
2286 return COND_DEC_INSN;
2287
14bc6742 2288 /* Some insns that we don't bother with:
8f90be4c
NC
2289 (set (rx:DI) (ry:DI))
2290 (set (rx:DI) (const_int 0))
2291 */
2292
2293 }
2294 else if (GET_CODE (insn) == JUMP_INSN &&
2295 GET_CODE (PATTERN (insn)) == SET &&
2296 GET_CODE (XEXP (PATTERN (insn), 1)) == LABEL_REF)
2297 return COND_BRANCH_INSN;
2298
2299 return COND_NO;
2300}
2301
2302/* Emit a conditional version of insn and replace the old insn with the
2303 new one. Return the new insn if emitted. */
4816b8e4 2304
8f90be4c 2305static rtx
08903e08 2306emit_new_cond_insn (rtx insn, int cond)
8f90be4c
NC
2307{
2308 rtx c_insn = 0;
2309 rtx pat, dst, src;
2310 cond_type num;
2311
2312 if ((num = is_cond_candidate (insn)) == COND_NO)
2313 return NULL;
2314
2315 pat = PATTERN (insn);
2316
2317 if (GET_CODE (insn) == INSN)
2318 {
2319 dst = SET_DEST (pat);
2320 src = SET_SRC (pat);
2321 }
2322 else
cd4c46f3
KG
2323 {
2324 dst = JUMP_LABEL (insn);
2325 src = NULL_RTX;
2326 }
8f90be4c
NC
2327
2328 switch (num)
2329 {
2330 case COND_MOV_INSN:
2331 case COND_CLR_INSN:
2332 if (cond)
2333 c_insn = gen_movt0 (dst, src, dst);
2334 else
2335 c_insn = gen_movt0 (dst, dst, src);
2336 break;
2337
2338 case COND_INC_INSN:
2339 if (cond)
2340 c_insn = gen_incscc (dst, dst);
2341 else
2342 c_insn = gen_incscc_false (dst, dst);
2343 break;
2344
2345 case COND_DEC_INSN:
2346 if (cond)
2347 c_insn = gen_decscc (dst, dst);
2348 else
2349 c_insn = gen_decscc_false (dst, dst);
2350 break;
2351
2352 case COND_BRANCH_INSN:
2353 if (cond)
2354 c_insn = gen_branch_true (dst);
2355 else
2356 c_insn = gen_branch_false (dst);
2357 break;
2358
2359 default:
2360 return NULL;
2361 }
2362
2363 /* Only copy the notes if they exist. */
2364 if (rtx_length [GET_CODE (c_insn)] >= 7 && rtx_length [GET_CODE (insn)] >= 7)
2365 {
2366 /* We really don't need to bother with the notes and links at this
2367 point, but go ahead and save the notes. This will help is_dead()
2368 when applying peepholes (links don't matter since they are not
2369 used any more beyond this point for the mcore). */
2370 REG_NOTES (c_insn) = REG_NOTES (insn);
2371 }
2372
2373 if (num == COND_BRANCH_INSN)
2374 {
2375 /* For jumps, we need to be a little bit careful and emit the new jump
2376 before the old one and to update the use count for the target label.
2377 This way, the barrier following the old (uncond) jump will get
2378 deleted, but the label won't. */
2379 c_insn = emit_jump_insn_before (c_insn, insn);
2380
2381 ++ LABEL_NUSES (dst);
2382
2383 JUMP_LABEL (c_insn) = dst;
2384 }
2385 else
2386 c_insn = emit_insn_after (c_insn, insn);
2387
2388 delete_insn (insn);
2389
2390 return c_insn;
2391}
2392
2393/* Attempt to change a basic block into a series of conditional insns. This
2394 works by taking the branch at the end of the 1st block and scanning for the
2395 end of the 2nd block. If all instructions in the 2nd block have cond.
2396 versions and the label at the start of block 3 is the same as the target
2397 from the branch at block 1, then conditionalize all insn in block 2 using
2398 the inverse condition of the branch at block 1. (Note I'm bending the
2399 definition of basic block here.)
2400
2401 e.g., change:
2402
2403 bt L2 <-- end of block 1 (delete)
2404 mov r7,r8
2405 addu r7,1
2406 br L3 <-- end of block 2
2407
2408 L2: ... <-- start of block 3 (NUSES==1)
2409 L3: ...
2410
2411 to:
2412
2413 movf r7,r8
2414 incf r7
2415 bf L3
2416
2417 L3: ...
2418
2419 we can delete the L2 label if NUSES==1 and re-apply the optimization
2420 starting at the last instruction of block 2. This may allow an entire
4816b8e4 2421 if-then-else statement to be conditionalized. BRC */
8f90be4c 2422static rtx
08903e08 2423conditionalize_block (rtx first)
8f90be4c
NC
2424{
2425 rtx insn;
2426 rtx br_pat;
2427 rtx end_blk_1_br = 0;
2428 rtx end_blk_2_insn = 0;
2429 rtx start_blk_3_lab = 0;
2430 int cond;
2431 int br_lab_num;
2432 int blk_size = 0;
2433
2434
2435 /* Check that the first insn is a candidate conditional jump. This is
2436 the one that we'll eliminate. If not, advance to the next insn to
2437 try. */
2438 if (GET_CODE (first) != JUMP_INSN ||
2439 GET_CODE (PATTERN (first)) != SET ||
2440 GET_CODE (XEXP (PATTERN (first), 1)) != IF_THEN_ELSE)
2441 return NEXT_INSN (first);
2442
2443 /* Extract some information we need. */
2444 end_blk_1_br = first;
2445 br_pat = PATTERN (end_blk_1_br);
2446
2447 /* Complement the condition since we use the reverse cond. for the insns. */
2448 cond = (GET_CODE (XEXP (XEXP (br_pat, 1), 0)) == EQ);
2449
2450 /* Determine what kind of branch we have. */
2451 if (GET_CODE (XEXP (XEXP (br_pat, 1), 1)) == LABEL_REF)
2452 {
2453 /* A normal branch, so extract label out of first arm. */
2454 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 1), 0));
2455 }
2456 else
2457 {
2458 /* An inverse branch, so extract the label out of the 2nd arm
2459 and complement the condition. */
2460 cond = (cond == 0);
2461 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 2), 0));
2462 }
2463
2464 /* Scan forward for the start of block 2: it must start with a
2465 label and that label must be the same as the branch target
2466 label from block 1. We don't care about whether block 2 actually
2467 ends with a branch or a label (an uncond. branch is
2468 conditionalizable). */
2469 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
2470 {
2471 enum rtx_code code;
2472
2473 code = GET_CODE (insn);
2474
14bc6742 2475 /* Look for the label at the start of block 3. */
8f90be4c
NC
2476 if (code == CODE_LABEL && CODE_LABEL_NUMBER (insn) == br_lab_num)
2477 break;
2478
2479 /* Skip barriers, notes, and conditionalizable insns. If the
2480 insn is not conditionalizable or makes this optimization fail,
2481 just return the next insn so we can start over from that point. */
2482 if (code != BARRIER && code != NOTE && !is_cond_candidate (insn))
2483 return NEXT_INSN (insn);
2484
112cdef5 2485 /* Remember the last real insn before the label (i.e. end of block 2). */
8f90be4c
NC
2486 if (code == JUMP_INSN || code == INSN)
2487 {
2488 blk_size ++;
2489 end_blk_2_insn = insn;
2490 }
2491 }
2492
2493 if (!insn)
2494 return insn;
2495
2496 /* It is possible for this optimization to slow performance if the blocks
2497 are long. This really depends upon whether the branch is likely taken
2498 or not. If the branch is taken, we slow performance in many cases. But,
2499 if the branch is not taken, we always help performance (for a single
2500 block, but for a double block (i.e. when the optimization is re-applied)
2501 this is not true since the 'right thing' depends on the overall length of
2502 the collapsed block). As a compromise, don't apply this optimization on
2503 blocks larger than size 2 (unlikely for the mcore) when speed is important.
2504 the best threshold depends on the latencies of the instructions (i.e.,
2505 the branch penalty). */
2506 if (optimize > 1 && blk_size > 2)
2507 return insn;
2508
2509 /* At this point, we've found the start of block 3 and we know that
2510 it is the destination of the branch from block 1. Also, all
2511 instructions in the block 2 are conditionalizable. So, apply the
2512 conditionalization and delete the branch. */
2513 start_blk_3_lab = insn;
2514
2515 for (insn = NEXT_INSN (end_blk_1_br); insn != start_blk_3_lab;
2516 insn = NEXT_INSN (insn))
2517 {
2518 rtx newinsn;
2519
2520 if (INSN_DELETED_P (insn))
2521 continue;
2522
14bc6742 2523 /* Try to form a conditional variant of the instruction and emit it. */
8f90be4c
NC
2524 if ((newinsn = emit_new_cond_insn (insn, cond)))
2525 {
2526 if (end_blk_2_insn == insn)
2527 end_blk_2_insn = newinsn;
2528
2529 insn = newinsn;
2530 }
2531 }
2532
2533 /* Note whether we will delete the label starting blk 3 when the jump
2534 gets deleted. If so, we want to re-apply this optimization at the
2535 last real instruction right before the label. */
2536 if (LABEL_NUSES (start_blk_3_lab) == 1)
2537 {
2538 start_blk_3_lab = 0;
2539 }
2540
2541 /* ??? we probably should redistribute the death notes for this insn, esp.
2542 the death of cc, but it doesn't really matter this late in the game.
2543 The peepholes all use is_dead() which will find the correct death
2544 regardless of whether there is a note. */
2545 delete_insn (end_blk_1_br);
2546
2547 if (! start_blk_3_lab)
2548 return end_blk_2_insn;
2549
4816b8e4 2550 /* Return the insn right after the label at the start of block 3. */
8f90be4c
NC
2551 return NEXT_INSN (start_blk_3_lab);
2552}
2553
2554/* Apply the conditionalization of blocks optimization. This is the
2555 outer loop that traverses through the insns scanning for a branch
2556 that signifies an opportunity to apply the optimization. Note that
2557 this optimization is applied late. If we could apply it earlier,
2558 say before cse 2, it may expose more optimization opportunities.
2559 but, the pay back probably isn't really worth the effort (we'd have
2560 to update all reg/flow/notes/links/etc to make it work - and stick it
4816b8e4
NC
2561 in before cse 2). */
2562
8f90be4c 2563static void
08903e08 2564conditionalize_optimization (void)
8f90be4c
NC
2565{
2566 rtx insn;
2567
18dbd950 2568 for (insn = get_insns (); insn; insn = conditionalize_block (insn))
8f90be4c
NC
2569 continue;
2570}
2571
2572static int saved_warn_return_type = -1;
2573static int saved_warn_return_type_count = 0;
2574
18dbd950 2575/* This is to handle loads from the constant pool. */
4816b8e4 2576
18dbd950 2577static void
08903e08 2578mcore_reorg (void)
8f90be4c
NC
2579{
2580 /* Reset this variable. */
2581 current_function_anonymous_args = 0;
2582
4816b8e4 2583 /* Restore the warn_return_type if it has been altered. */
8f90be4c
NC
2584 if (saved_warn_return_type != -1)
2585 {
2586 /* Only restore the value if we have reached another function.
2587 The test of warn_return_type occurs in final_function () in
2588 c-decl.c a long time after the code for the function is generated,
2589 so we need a counter to tell us when we have finished parsing that
2590 function and can restore the flag. */
2591 if (--saved_warn_return_type_count == 0)
2592 {
2593 warn_return_type = saved_warn_return_type;
2594 saved_warn_return_type = -1;
2595 }
2596 }
2597
2598 if (optimize == 0)
2599 return;
2600
2601 /* Conditionalize blocks where we can. */
18dbd950 2602 conditionalize_optimization ();
8f90be4c
NC
2603
2604 /* Literal pool generation is now pushed off until the assembler. */
2605}
2606
2607\f
f0f4da32 2608/* Return true if X is something that can be moved directly into r15. */
8f90be4c 2609
f0f4da32 2610bool
08903e08 2611mcore_r15_operand_p (rtx x)
f0f4da32
RS
2612{
2613 switch (GET_CODE (x))
2614 {
2615 case CONST_INT:
2616 return mcore_const_ok_for_inline (INTVAL (x));
8f90be4c 2617
f0f4da32
RS
2618 case REG:
2619 case SUBREG:
2620 case MEM:
2621 return 1;
2622
2623 default:
2624 return 0;
2625 }
2626}
2627
2628/* Implement SECONDARY_RELOAD_CLASS. If CLASS contains r15, and we can't
2629 directly move X into it, use r1-r14 as a temporary. */
08903e08 2630
f0f4da32 2631enum reg_class
08903e08
SB
2632mcore_secondary_reload_class (enum reg_class class,
2633 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
f0f4da32
RS
2634{
2635 if (TEST_HARD_REG_BIT (reg_class_contents[class], 15)
2636 && !mcore_r15_operand_p (x))
2637 return LRW_REGS;
2638 return NO_REGS;
2639}
8f90be4c 2640
f0f4da32
RS
2641/* Return the reg_class to use when reloading the rtx X into the class
2642 CLASS. If X is too complex to move directly into r15, prefer to
2643 use LRW_REGS instead. */
08903e08 2644
8f90be4c 2645enum reg_class
08903e08 2646mcore_reload_class (rtx x, enum reg_class class)
8f90be4c 2647{
f0f4da32
RS
2648 if (reg_class_subset_p (LRW_REGS, class) && !mcore_r15_operand_p (x))
2649 return LRW_REGS;
8f90be4c 2650
f0f4da32 2651 return class;
8f90be4c
NC
2652}
2653
2654/* Tell me if a pair of reg/subreg rtx's actually refer to the same
2655 register. Note that the current version doesn't worry about whether
2656 they are the same mode or note (e.g., a QImode in r2 matches an HImode
2657 in r2 matches an SImode in r2. Might think in the future about whether
2658 we want to be able to say something about modes. */
08903e08 2659
8f90be4c 2660int
08903e08 2661mcore_is_same_reg (rtx x, rtx y)
8f90be4c 2662{
14bc6742 2663 /* Strip any and all of the subreg wrappers. */
8f90be4c
NC
2664 while (GET_CODE (x) == SUBREG)
2665 x = SUBREG_REG (x);
2666
2667 while (GET_CODE (y) == SUBREG)
2668 y = SUBREG_REG (y);
2669
2670 if (GET_CODE(x) == REG && GET_CODE(y) == REG && REGNO(x) == REGNO(y))
2671 return 1;
2672
2673 return 0;
2674}
2675
8f90be4c 2676void
08903e08 2677mcore_override_options (void)
8f90be4c 2678{
8f90be4c
NC
2679 /* Only the m340 supports little endian code. */
2680 if (TARGET_LITTLE_END && ! TARGET_M340)
78fb8038 2681 target_flags |= MASK_M340;
8f90be4c
NC
2682}
2683\f
8f90be4c
NC
2684/* Compute the number of word sized registers needed to
2685 hold a function argument of mode MODE and type TYPE. */
08903e08 2686
8f90be4c 2687int
08903e08 2688mcore_num_arg_regs (enum machine_mode mode, tree type)
8f90be4c
NC
2689{
2690 int size;
2691
fe984136 2692 if (targetm.calls.must_pass_in_stack (mode, type))
8f90be4c
NC
2693 return 0;
2694
2695 if (type && mode == BLKmode)
2696 size = int_size_in_bytes (type);
2697 else
2698 size = GET_MODE_SIZE (mode);
2699
2700 return ROUND_ADVANCE (size);
2701}
2702
2703static rtx
08903e08 2704handle_structs_in_regs (enum machine_mode mode, tree type, int reg)
8f90be4c
NC
2705{
2706 int size;
2707
2708 /* The MCore ABI defines that a structure whoes size is not a whole multiple
2709 of bytes is passed packed into registers (or spilled onto the stack if
2710 not enough registers are available) with the last few bytes of the
2711 structure being packed, left-justified, into the last register/stack slot.
2712 GCC handles this correctly if the last word is in a stack slot, but we
2713 have to generate a special, PARALLEL RTX if the last word is in an
2714 argument register. */
2715 if (type
2716 && TYPE_MODE (type) == BLKmode
2717 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
2718 && (size = int_size_in_bytes (type)) > UNITS_PER_WORD
2719 && (size % UNITS_PER_WORD != 0)
2720 && (reg + mcore_num_arg_regs (mode, type) <= (FIRST_PARM_REG + NPARM_REGS)))
2721 {
2722 rtx arg_regs [NPARM_REGS];
2723 int nregs;
2724 rtx result;
2725 rtvec rtvec;
2726
2727 for (nregs = 0; size > 0; size -= UNITS_PER_WORD)
2728 {
2729 arg_regs [nregs] =
2730 gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, reg ++),
2731 GEN_INT (nregs * UNITS_PER_WORD));
2732 nregs ++;
2733 }
2734
2735 /* We assume here that NPARM_REGS == 6. The assert checks this. */
b6a1cbae 2736 assert (ARRAY_SIZE (arg_regs) == 6);
8f90be4c
NC
2737 rtvec = gen_rtvec (nregs, arg_regs[0], arg_regs[1], arg_regs[2],
2738 arg_regs[3], arg_regs[4], arg_regs[5]);
2739
2740 result = gen_rtx_PARALLEL (mode, rtvec);
2741 return result;
2742 }
2743
2744 return gen_rtx_REG (mode, reg);
2745}
2746
2747rtx
08903e08 2748mcore_function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
8f90be4c
NC
2749{
2750 enum machine_mode mode;
2751 int unsigned_p;
2752
2753 mode = TYPE_MODE (valtype);
2754
2755 PROMOTE_MODE (mode, unsigned_p, NULL);
2756
2757 return handle_structs_in_regs (mode, valtype, FIRST_RET_REG);
2758}
2759
2760/* Define where to put the arguments to a function.
2761 Value is zero to push the argument on the stack,
2762 or a hard register in which to store the argument.
2763
2764 MODE is the argument's machine mode.
2765 TYPE is the data type of the argument (as a tree).
2766 This is null for libcalls where that information may
2767 not be available.
2768 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2769 the preceding args and about the function being called.
2770 NAMED is nonzero if this argument is a named parameter
2771 (otherwise it is an extra parameter matching an ellipsis).
2772
2773 On MCore the first args are normally in registers
2774 and the rest are pushed. Any arg that starts within the first
2775 NPARM_REGS words is at least partially passed in a register unless
2776 its data type forbids. */
08903e08 2777
8f90be4c 2778rtx
08903e08
SB
2779mcore_function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode,
2780 tree type, int named)
8f90be4c
NC
2781{
2782 int arg_reg;
2783
88042663 2784 if (! named || mode == VOIDmode)
8f90be4c
NC
2785 return 0;
2786
fe984136 2787 if (targetm.calls.must_pass_in_stack (mode, type))
8f90be4c
NC
2788 return 0;
2789
2790 arg_reg = ROUND_REG (cum, mode);
2791
2792 if (arg_reg < NPARM_REGS)
2793 return handle_structs_in_regs (mode, type, FIRST_PARM_REG + arg_reg);
2794
2795 return 0;
2796}
2797
78a52f11
RH
2798/* Returns the number of bytes of argument registers required to hold *part*
2799 of a parameter of machine mode MODE and type TYPE (which may be NULL if
dab66575 2800 the type is not known). If the argument fits entirely in the argument
8f90be4c
NC
2801 registers, or entirely on the stack, then 0 is returned. CUM is the
2802 number of argument registers already used by earlier parameters to
2803 the function. */
08903e08 2804
78a52f11
RH
2805static int
2806mcore_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
2807 tree type, bool named)
8f90be4c 2808{
78a52f11 2809 int reg = ROUND_REG (*cum, mode);
8f90be4c
NC
2810
2811 if (named == 0)
2812 return 0;
2813
fe984136 2814 if (targetm.calls.must_pass_in_stack (mode, type))
8f90be4c
NC
2815 return 0;
2816
2817 /* REG is not the *hardware* register number of the register that holds
2818 the argument, it is the *argument* register number. So for example,
2819 the first argument to a function goes in argument register 0, which
2820 translates (for the MCore) into hardware register 2. The second
2821 argument goes into argument register 1, which translates into hardware
2822 register 3, and so on. NPARM_REGS is the number of argument registers
2823 supported by the target, not the maximum hardware register number of
2824 the target. */
2825 if (reg >= NPARM_REGS)
2826 return 0;
2827
2828 /* If the argument fits entirely in registers, return 0. */
2829 if (reg + mcore_num_arg_regs (mode, type) <= NPARM_REGS)
2830 return 0;
2831
2832 /* The argument overflows the number of available argument registers.
2833 Compute how many argument registers have not yet been assigned to
2834 hold an argument. */
2835 reg = NPARM_REGS - reg;
2836
2837 /* Return partially in registers and partially on the stack. */
78a52f11 2838 return reg * UNITS_PER_WORD;
8f90be4c
NC
2839}
2840\f
a0ab749a 2841/* Return nonzero if SYMBOL is marked as being dllexport'd. */
08903e08 2842
8f90be4c 2843int
08903e08 2844mcore_dllexport_name_p (const char * symbol)
8f90be4c
NC
2845{
2846 return symbol[0] == '@' && symbol[1] == 'e' && symbol[2] == '.';
2847}
2848
a0ab749a 2849/* Return nonzero if SYMBOL is marked as being dllimport'd. */
08903e08 2850
8f90be4c 2851int
08903e08 2852mcore_dllimport_name_p (const char * symbol)
8f90be4c
NC
2853{
2854 return symbol[0] == '@' && symbol[1] == 'i' && symbol[2] == '.';
2855}
2856
2857/* Mark a DECL as being dllexport'd. */
08903e08 2858
8f90be4c 2859static void
08903e08 2860mcore_mark_dllexport (tree decl)
8f90be4c 2861{
cbd3488b 2862 const char * oldname;
8f90be4c
NC
2863 char * newname;
2864 rtx rtlname;
2865 tree idp;
2866
2867 rtlname = XEXP (DECL_RTL (decl), 0);
2868
2869 if (GET_CODE (rtlname) == SYMBOL_REF)
2870 oldname = XSTR (rtlname, 0);
2871 else if ( GET_CODE (rtlname) == MEM
2872 && GET_CODE (XEXP (rtlname, 0)) == SYMBOL_REF)
2873 oldname = XSTR (XEXP (rtlname, 0), 0);
2874 else
2875 abort ();
2876
2877 if (mcore_dllexport_name_p (oldname))
2878 return; /* Already done. */
2879
2880 newname = alloca (strlen (oldname) + 4);
2881 sprintf (newname, "@e.%s", oldname);
2882
2883 /* We pass newname through get_identifier to ensure it has a unique
2884 address. RTL processing can sometimes peek inside the symbol ref
2885 and compare the string's addresses to see if two symbols are
2886 identical. */
2887 /* ??? At least I think that's why we do this. */
2888 idp = get_identifier (newname);
2889
2890 XEXP (DECL_RTL (decl), 0) =
f1c25d3b 2891 gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
8f90be4c
NC
2892}
2893
2894/* Mark a DECL as being dllimport'd. */
08903e08 2895
8f90be4c 2896static void
08903e08 2897mcore_mark_dllimport (tree decl)
8f90be4c 2898{
cbd3488b 2899 const char * oldname;
8f90be4c
NC
2900 char * newname;
2901 tree idp;
2902 rtx rtlname;
2903 rtx newrtl;
2904
2905 rtlname = XEXP (DECL_RTL (decl), 0);
2906
2907 if (GET_CODE (rtlname) == SYMBOL_REF)
2908 oldname = XSTR (rtlname, 0);
2909 else if ( GET_CODE (rtlname) == MEM
2910 && GET_CODE (XEXP (rtlname, 0)) == SYMBOL_REF)
2911 oldname = XSTR (XEXP (rtlname, 0), 0);
2912 else
2913 abort ();
2914
2915 if (mcore_dllexport_name_p (oldname))
2916 abort (); /* This shouldn't happen. */
2917 else if (mcore_dllimport_name_p (oldname))
2918 return; /* Already done. */
2919
2920 /* ??? One can well ask why we're making these checks here,
2921 and that would be a good question. */
2922
2923 /* Imported variables can't be initialized. */
2924 if (TREE_CODE (decl) == VAR_DECL
2925 && !DECL_VIRTUAL_P (decl)
2926 && DECL_INITIAL (decl))
2927 {
ddd2d57e 2928 error ("%Jinitialized variable '%D' is marked dllimport", decl, decl);
8f90be4c
NC
2929 return;
2930 }
2931
2932 /* `extern' needn't be specified with dllimport.
2933 Specify `extern' now and hope for the best. Sigh. */
2934 if (TREE_CODE (decl) == VAR_DECL
2935 /* ??? Is this test for vtables needed? */
2936 && !DECL_VIRTUAL_P (decl))
2937 {
2938 DECL_EXTERNAL (decl) = 1;
2939 TREE_PUBLIC (decl) = 1;
2940 }
2941
2942 newname = alloca (strlen (oldname) + 11);
2943 sprintf (newname, "@i.__imp_%s", oldname);
2944
2945 /* We pass newname through get_identifier to ensure it has a unique
2946 address. RTL processing can sometimes peek inside the symbol ref
2947 and compare the string's addresses to see if two symbols are
2948 identical. */
2949 /* ??? At least I think that's why we do this. */
2950 idp = get_identifier (newname);
2951
f1c25d3b
KH
2952 newrtl = gen_rtx_MEM (Pmode,
2953 gen_rtx_SYMBOL_REF (Pmode,
8f90be4c
NC
2954 IDENTIFIER_POINTER (idp)));
2955 XEXP (DECL_RTL (decl), 0) = newrtl;
2956}
2957
2958static int
08903e08 2959mcore_dllexport_p (tree decl)
8f90be4c
NC
2960{
2961 if ( TREE_CODE (decl) != VAR_DECL
2962 && TREE_CODE (decl) != FUNCTION_DECL)
2963 return 0;
2964
91d231cb 2965 return lookup_attribute ("dllexport", DECL_ATTRIBUTES (decl)) != 0;
8f90be4c
NC
2966}
2967
2968static int
08903e08 2969mcore_dllimport_p (tree decl)
8f90be4c
NC
2970{
2971 if ( TREE_CODE (decl) != VAR_DECL
2972 && TREE_CODE (decl) != FUNCTION_DECL)
2973 return 0;
2974
91d231cb 2975 return lookup_attribute ("dllimport", DECL_ATTRIBUTES (decl)) != 0;
8f90be4c
NC
2976}
2977
fb49053f 2978/* We must mark dll symbols specially. Definitions of dllexport'd objects
14bc6742 2979 install some info in the .drective (PE) or .exports (ELF) sections. */
fb49053f
RH
2980
2981static void
08903e08 2982mcore_encode_section_info (tree decl, rtx rtl ATTRIBUTE_UNUSED, int first ATTRIBUTE_UNUSED)
8f90be4c 2983{
8f90be4c
NC
2984 /* Mark the decl so we can tell from the rtl whether the object is
2985 dllexport'd or dllimport'd. */
2986 if (mcore_dllexport_p (decl))
2987 mcore_mark_dllexport (decl);
2988 else if (mcore_dllimport_p (decl))
2989 mcore_mark_dllimport (decl);
2990
2991 /* It might be that DECL has already been marked as dllimport, but
2992 a subsequent definition nullified that. The attribute is gone
2993 but DECL_RTL still has @i.__imp_foo. We need to remove that. */
2994 else if ((TREE_CODE (decl) == FUNCTION_DECL
2995 || TREE_CODE (decl) == VAR_DECL)
2996 && DECL_RTL (decl) != NULL_RTX
2997 && GET_CODE (DECL_RTL (decl)) == MEM
2998 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == MEM
2999 && GET_CODE (XEXP (XEXP (DECL_RTL (decl), 0), 0)) == SYMBOL_REF
3000 && mcore_dllimport_name_p (XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0)))
3001 {
3cce094d 3002 const char * oldname = XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0);
8f90be4c 3003 tree idp = get_identifier (oldname + 9);
f1c25d3b 3004 rtx newrtl = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
8f90be4c
NC
3005
3006 XEXP (DECL_RTL (decl), 0) = newrtl;
3007
3008 /* We previously set TREE_PUBLIC and DECL_EXTERNAL.
3009 ??? We leave these alone for now. */
3010 }
3011}
3012
772c5265
RH
3013/* Undo the effects of the above. */
3014
3015static const char *
08903e08 3016mcore_strip_name_encoding (const char * str)
772c5265
RH
3017{
3018 return str + (str[0] == '@' ? 3 : 0);
3019}
3020
8f90be4c
NC
3021/* MCore specific attribute support.
3022 dllexport - for exporting a function/variable that will live in a dll
3023 dllimport - for importing a function/variable from a dll
3024 naked - do not create a function prologue/epilogue. */
8f90be4c 3025
91d231cb
JM
3026const struct attribute_spec mcore_attribute_table[] =
3027{
3028 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
3029 { "dllexport", 0, 0, true, false, false, NULL },
3030 { "dllimport", 0, 0, true, false, false, NULL },
3031 { "naked", 0, 0, true, false, false, mcore_handle_naked_attribute },
3032 { NULL, 0, 0, false, false, false, NULL }
3033};
8f90be4c 3034
91d231cb
JM
3035/* Handle a "naked" attribute; arguments as in
3036 struct attribute_spec.handler. */
08903e08 3037
91d231cb 3038static tree
08903e08
SB
3039mcore_handle_naked_attribute (tree * node, tree name, tree args ATTRIBUTE_UNUSED,
3040 int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
91d231cb
JM
3041{
3042 if (TREE_CODE (*node) == FUNCTION_DECL)
8f90be4c
NC
3043 {
3044 /* PR14310 - don't complain about lack of return statement
3045 in naked functions. The solution here is a gross hack
3046 but this is the only way to solve the problem without
3047 adding a new feature to GCC. I did try submitting a patch
3048 that would add such a new feature, but it was (rightfully)
3049 rejected on the grounds that it was creeping featurism,
3050 so hence this code. */
3051 if (warn_return_type)
3052 {
3053 saved_warn_return_type = warn_return_type;
3054 warn_return_type = 0;
3055 saved_warn_return_type_count = 2;
3056 }
3057 else if (saved_warn_return_type_count)
3058 saved_warn_return_type_count = 2;
91d231cb
JM
3059 }
3060 else
3061 {
9e637a26 3062 warning ("%qs attribute only applies to functions",
91d231cb
JM
3063 IDENTIFIER_POINTER (name));
3064 *no_add_attrs = true;
8f90be4c
NC
3065 }
3066
91d231cb 3067 return NULL_TREE;
8f90be4c
NC
3068}
3069
ae46c4e0
RH
3070/* ??? It looks like this is PE specific? Oh well, this is what the
3071 old code did as well. */
8f90be4c 3072
ae46c4e0 3073static void
08903e08 3074mcore_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
8f90be4c
NC
3075{
3076 int len;
0139adca 3077 const char * name;
8f90be4c 3078 char * string;
f27cd94d 3079 const char * prefix;
8f90be4c
NC
3080
3081 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
3082
3083 /* Strip off any encoding in name. */
772c5265 3084 name = (* targetm.strip_name_encoding) (name);
8f90be4c
NC
3085
3086 /* The object is put in, for example, section .text$foo.
3087 The linker will then ultimately place them in .text
3088 (everything from the $ on is stripped). */
3089 if (TREE_CODE (decl) == FUNCTION_DECL)
3090 prefix = ".text$";
f710504c 3091 /* For compatibility with EPOC, we ignore the fact that the
8f90be4c 3092 section might have relocs against it. */
4e4d733e 3093 else if (decl_readonly_section (decl, 0))
8f90be4c
NC
3094 prefix = ".rdata$";
3095 else
3096 prefix = ".data$";
3097
3098 len = strlen (name) + strlen (prefix);
3099 string = alloca (len + 1);
3100
3101 sprintf (string, "%s%s", prefix, name);
3102
3103 DECL_SECTION_NAME (decl) = build_string (len, string);
3104}
3105
3106int
08903e08 3107mcore_naked_function_p (void)
8f90be4c 3108{
91d231cb 3109 return lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl)) != NULL_TREE;
8f90be4c 3110}
7c262518 3111
ede75ee8 3112#ifdef OBJECT_FORMAT_ELF
7c262518 3113static void
c18a5b6c
MM
3114mcore_asm_named_section (const char *name,
3115 unsigned int flags ATTRIBUTE_UNUSED,
3116 tree decl ATTRIBUTE_UNUSED)
7c262518
RH
3117{
3118 fprintf (asm_out_file, "\t.section %s\n", name);
3119}
ede75ee8 3120#endif /* OBJECT_FORMAT_ELF */
09a2b93a 3121
dc7efe6e
KH
3122/* Worker function for TARGET_ASM_EXTERNAL_LIBCALL. */
3123
09a2b93a
KH
3124static void
3125mcore_external_libcall (rtx fun)
3126{
3127 fprintf (asm_out_file, "\t.import\t");
3128 assemble_name (asm_out_file, XSTR (fun, 0));
3129 fprintf (asm_out_file, "\n");
3130}
3131
dc7efe6e
KH
3132/* Worker function for TARGET_RETURN_IN_MEMORY. */
3133
09a2b93a
KH
3134static bool
3135mcore_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
3136{
78bc94a2
KH
3137 HOST_WIDE_INT size = int_size_in_bytes (type);
3138 return (size == -1 || size > 2 * UNITS_PER_WORD);
09a2b93a 3139}