]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/mcore/mcore.c
2015-06-17 Andrew MacLeod <amacleod@redhat.com>
[thirdparty/gcc.git] / gcc / config / mcore / mcore.c
CommitLineData
1bba6ac0 1/* Output routines for Motorola MCore processor
d353bf18 2 Copyright (C) 1993-2015 Free Software Foundation, Inc.
1bba6ac0 3
c167bbfc 4 This file is part of GCC.
1bba6ac0 5
c167bbfc 6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published
038d1e19 8 by the Free Software Foundation; either version 3, or (at your
c167bbfc 9 option) any later version.
1bba6ac0 10
c167bbfc 11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
14 License for more details.
1bba6ac0 15
c167bbfc 16 You should have received a copy of the GNU General Public License
038d1e19 17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
1bba6ac0 19
7ae98079 20#include "config.h"
5bc09e30 21#include "system.h"
805e22b2 22#include "coretypes.h"
23#include "tm.h"
fa4fdf98 24#include "rtl.h"
b20a8bb4 25#include "alias.h"
26#include "symtab.h"
fa4fdf98 27#include "tree.h"
b20a8bb4 28#include "fold-const.h"
9ed99284 29#include "stor-layout.h"
30#include "varasm.h"
31#include "stringpool.h"
32#include "calls.h"
fa4fdf98 33#include "tm_p.h"
1bba6ac0 34#include "mcore.h"
1bba6ac0 35#include "regs.h"
36#include "hard-reg-set.h"
1bba6ac0 37#include "insn-config.h"
38#include "conditions.h"
1bba6ac0 39#include "output.h"
40#include "insn-attr.h"
41#include "flags.h"
42#include "obstack.h"
d53441c8 43#include "function.h"
d53441c8 44#include "expmed.h"
45#include "dojump.h"
46#include "explow.h"
47#include "emit-rtl.h"
48#include "stmt.h"
1bba6ac0 49#include "expr.h"
50#include "reload.h"
51#include "recog.h"
0b205f4c 52#include "diagnostic-core.h"
a767736d 53#include "target.h"
54#include "target-def.h"
94ea8568 55#include "dominance.h"
56#include "cfg.h"
57#include "cfgrtl.h"
58#include "cfganal.h"
59#include "lcm.h"
60#include "cfgbuild.h"
61#include "cfgcleanup.h"
62#include "predict.h"
63#include "basic-block.h"
db65aa2c 64#include "df.h"
f7715905 65#include "builtins.h"
1bba6ac0 66
1bba6ac0 67/* For dumping information about frame sizes. */
68char * mcore_current_function_name = 0;
69long mcore_current_compilation_timestamp = 0;
70
71/* Global variables for machine-dependent things. */
72
1bba6ac0 73/* Provides the class number of the smallest class containing
74 reg number. */
ef51d1e3 75const enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER] =
1bba6ac0 76{
77 GENERAL_REGS, ONLYR1_REGS, LRW_REGS, LRW_REGS,
78 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
79 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
80 LRW_REGS, LRW_REGS, LRW_REGS, GENERAL_REGS,
81 GENERAL_REGS, C_REGS, NO_REGS, NO_REGS,
82};
83
2eebe422 84struct mcore_frame
85{
c167bbfc 86 int arg_size; /* Stdarg spills (bytes). */
87 int reg_size; /* Non-volatile reg saves (bytes). */
88 int reg_mask; /* Non-volatile reg saves. */
89 int local_size; /* Locals. */
90 int outbound_size; /* Arg overflow on calls out. */
2eebe422 91 int pad_outbound;
92 int pad_local;
93 int pad_reg;
94 /* Describe the steps we'll use to grow it. */
c167bbfc 95#define MAX_STACK_GROWS 4 /* Gives us some spare space. */
2eebe422 96 int growth[MAX_STACK_GROWS];
97 int arg_offset;
98 int reg_offset;
99 int reg_growth;
100 int local_growth;
101};
102
103typedef enum
104{
105 COND_NO,
106 COND_MOV_INSN,
107 COND_CLR_INSN,
108 COND_INC_INSN,
109 COND_DEC_INSN,
110 COND_BRANCH_INSN
111}
112cond_type;
113
c167bbfc 114static void output_stack_adjust (int, int);
115static int calc_live_regs (int *);
95ba5045 116static int try_constant_tricks (HOST_WIDE_INT, HOST_WIDE_INT *, HOST_WIDE_INT *);
3754d046 117static const char * output_inline_const (machine_mode, rtx *);
c167bbfc 118static void layout_mcore_frame (struct mcore_frame *);
3754d046 119static void mcore_setup_incoming_varargs (cumulative_args_t, machine_mode, tree, int *, int);
c167bbfc 120static cond_type is_cond_candidate (rtx);
91a55c11 121static rtx_insn *emit_new_cond_insn (rtx, int);
122static rtx_insn *conditionalize_block (rtx_insn *);
c167bbfc 123static void conditionalize_optimization (void);
124static void mcore_reorg (void);
3754d046 125static rtx handle_structs_in_regs (machine_mode, const_tree, int);
c167bbfc 126static void mcore_mark_dllexport (tree);
127static void mcore_mark_dllimport (tree);
128static int mcore_dllexport_p (tree);
129static int mcore_dllimport_p (tree);
c167bbfc 130static tree mcore_handle_naked_attribute (tree *, tree, tree, int, bool *);
6e4758ce 131#ifdef OBJECT_FORMAT_ELF
c167bbfc 132static void mcore_asm_named_section (const char *,
537cd941 133 unsigned int, tree);
6e4758ce 134#endif
932f5d0a 135static void mcore_print_operand (FILE *, rtx, int);
136static void mcore_print_operand_address (FILE *, rtx);
137static bool mcore_print_operand_punct_valid_p (unsigned char code);
c167bbfc 138static void mcore_unique_section (tree, int);
139static void mcore_encode_section_info (tree, rtx, int);
140static const char *mcore_strip_name_encoding (const char *);
7e0514b3 141static int mcore_const_costs (rtx, RTX_CODE);
142static int mcore_and_cost (rtx);
143static int mcore_ior_cost (rtx);
20d892d1 144static bool mcore_rtx_costs (rtx, int, int, int,
145 int *, bool);
5ee57edb 146static void mcore_external_libcall (rtx);
fb80456a 147static bool mcore_return_in_memory (const_tree, const_tree);
39cba157 148static int mcore_arg_partial_bytes (cumulative_args_t,
3754d046 149 machine_mode,
f054eb3c 150 tree, bool);
39cba157 151static rtx mcore_function_arg (cumulative_args_t,
3754d046 152 machine_mode,
da6d22fa 153 const_tree, bool);
39cba157 154static void mcore_function_arg_advance (cumulative_args_t,
3754d046 155 machine_mode,
da6d22fa 156 const_tree, bool);
3754d046 157static unsigned int mcore_function_arg_boundary (machine_mode,
bd99ba64 158 const_tree);
74e653fe 159static void mcore_asm_trampoline_template (FILE *);
160static void mcore_trampoline_init (rtx, tree, rtx);
08c6cbd2 161static bool mcore_warn_func_return (tree);
4c834714 162static void mcore_option_override (void);
3754d046 163static bool mcore_legitimate_constant_p (machine_mode, rtx);
ef51d1e3 164\f
165/* MCore specific attributes. */
166
167static const struct attribute_spec mcore_attribute_table[] =
168{
ac86af5d 169 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
170 affects_type_identity } */
171 { "dllexport", 0, 0, true, false, false, NULL, false },
172 { "dllimport", 0, 0, true, false, false, NULL, false },
173 { "naked", 0, 0, true, false, false, mcore_handle_naked_attribute,
174 false },
175 { NULL, 0, 0, false, false, false, NULL, false }
ef51d1e3 176};
a767736d 177\f
178/* Initialize the GCC target structure. */
5ee57edb 179#undef TARGET_ASM_EXTERNAL_LIBCALL
180#define TARGET_ASM_EXTERNAL_LIBCALL mcore_external_libcall
181
3aa0c315 182#if TARGET_DLLIMPORT_DECL_ATTRIBUTES
c167bbfc 183#undef TARGET_MERGE_DECL_ATTRIBUTES
184#define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
a767736d 185#endif
186
58356836 187#ifdef OBJECT_FORMAT_ELF
c167bbfc 188#undef TARGET_ASM_UNALIGNED_HI_OP
58356836 189#define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
c167bbfc 190#undef TARGET_ASM_UNALIGNED_SI_OP
58356836 191#define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
192#endif
193
932f5d0a 194#undef TARGET_PRINT_OPERAND
195#define TARGET_PRINT_OPERAND mcore_print_operand
196#undef TARGET_PRINT_OPERAND_ADDRESS
197#define TARGET_PRINT_OPERAND_ADDRESS mcore_print_operand_address
198#undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
199#define TARGET_PRINT_OPERAND_PUNCT_VALID_P mcore_print_operand_punct_valid_p
200
c167bbfc 201#undef TARGET_ATTRIBUTE_TABLE
202#define TARGET_ATTRIBUTE_TABLE mcore_attribute_table
203#undef TARGET_ASM_UNIQUE_SECTION
204#define TARGET_ASM_UNIQUE_SECTION mcore_unique_section
76aec42f 205#undef TARGET_ASM_FUNCTION_RODATA_SECTION
206#define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
c167bbfc 207#undef TARGET_ENCODE_SECTION_INFO
208#define TARGET_ENCODE_SECTION_INFO mcore_encode_section_info
209#undef TARGET_STRIP_NAME_ENCODING
210#define TARGET_STRIP_NAME_ENCODING mcore_strip_name_encoding
211#undef TARGET_RTX_COSTS
212#define TARGET_RTX_COSTS mcore_rtx_costs
213#undef TARGET_ADDRESS_COST
d9c5e5f4 214#define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
c167bbfc 215#undef TARGET_MACHINE_DEPENDENT_REORG
216#define TARGET_MACHINE_DEPENDENT_REORG mcore_reorg
2efea8c0 217
3b2411a8 218#undef TARGET_PROMOTE_FUNCTION_MODE
219#define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
5ee57edb 220#undef TARGET_PROMOTE_PROTOTYPES
fb80456a 221#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
5ee57edb 222
5ee57edb 223#undef TARGET_RETURN_IN_MEMORY
224#define TARGET_RETURN_IN_MEMORY mcore_return_in_memory
0336f0f0 225#undef TARGET_MUST_PASS_IN_STACK
226#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
b981d932 227#undef TARGET_PASS_BY_REFERENCE
228#define TARGET_PASS_BY_REFERENCE hook_pass_by_reference_must_pass_in_stack
f054eb3c 229#undef TARGET_ARG_PARTIAL_BYTES
230#define TARGET_ARG_PARTIAL_BYTES mcore_arg_partial_bytes
da6d22fa 231#undef TARGET_FUNCTION_ARG
232#define TARGET_FUNCTION_ARG mcore_function_arg
233#undef TARGET_FUNCTION_ARG_ADVANCE
234#define TARGET_FUNCTION_ARG_ADVANCE mcore_function_arg_advance
bd99ba64 235#undef TARGET_FUNCTION_ARG_BOUNDARY
236#define TARGET_FUNCTION_ARG_BOUNDARY mcore_function_arg_boundary
5ee57edb 237
238#undef TARGET_SETUP_INCOMING_VARARGS
239#define TARGET_SETUP_INCOMING_VARARGS mcore_setup_incoming_varargs
240
74e653fe 241#undef TARGET_ASM_TRAMPOLINE_TEMPLATE
242#define TARGET_ASM_TRAMPOLINE_TEMPLATE mcore_asm_trampoline_template
243#undef TARGET_TRAMPOLINE_INIT
244#define TARGET_TRAMPOLINE_INIT mcore_trampoline_init
245
4c834714 246#undef TARGET_OPTION_OVERRIDE
247#define TARGET_OPTION_OVERRIDE mcore_option_override
7630a512 248
ca316360 249#undef TARGET_LEGITIMATE_CONSTANT_P
250#define TARGET_LEGITIMATE_CONSTANT_P mcore_legitimate_constant_p
251
08c6cbd2 252#undef TARGET_WARN_FUNC_RETURN
253#define TARGET_WARN_FUNC_RETURN mcore_warn_func_return
254
57e4bbfb 255struct gcc_target targetm = TARGET_INITIALIZER;
2eebe422 256\f
1bba6ac0 257/* Adjust the stack and return the number of bytes taken to do it. */
258static void
c167bbfc 259output_stack_adjust (int direction, int size)
1bba6ac0 260{
fa4fdf98 261 /* If extending stack a lot, we do it incrementally. */
1bba6ac0 262 if (direction < 0 && size > mcore_stack_increment && mcore_stack_increment > 0)
263 {
1a83b3ff 264 rtx tmp = gen_rtx_REG (SImode, 1);
1bba6ac0 265 rtx memref;
c167bbfc 266
1bba6ac0 267 emit_insn (gen_movsi (tmp, GEN_INT (mcore_stack_increment)));
268 do
269 {
270 emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
1a83b3ff 271 memref = gen_rtx_MEM (SImode, stack_pointer_rtx);
1bba6ac0 272 MEM_VOLATILE_P (memref) = 1;
273 emit_insn (gen_movsi (memref, stack_pointer_rtx));
274 size -= mcore_stack_increment;
275 }
276 while (size > mcore_stack_increment);
277
fa4fdf98 278 /* SIZE is now the residual for the last adjustment,
279 which doesn't require a probe. */
1bba6ac0 280 }
281
282 if (size)
283 {
284 rtx insn;
285 rtx val = GEN_INT (size);
286
287 if (size > 32)
288 {
1a83b3ff 289 rtx nval = gen_rtx_REG (SImode, 1);
1bba6ac0 290 emit_insn (gen_movsi (nval, val));
291 val = nval;
292 }
293
294 if (direction > 0)
295 insn = gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
296 else
297 insn = gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
298
299 emit_insn (insn);
300 }
301}
302
fa4fdf98 303/* Work out the registers which need to be saved,
304 both as a mask and a count. */
305
1bba6ac0 306static int
c167bbfc 307calc_live_regs (int * count)
1bba6ac0 308{
309 int reg;
310 int live_regs_mask = 0;
311
312 * count = 0;
313
314 for (reg = 0; reg < FIRST_PSEUDO_REGISTER; reg++)
315 {
3072d30e 316 if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
1bba6ac0 317 {
318 (*count)++;
319 live_regs_mask |= (1 << reg);
320 }
321 }
322
323 return live_regs_mask;
324}
325
326/* Print the operand address in x to the stream. */
fa4fdf98 327
932f5d0a 328static void
c167bbfc 329mcore_print_operand_address (FILE * stream, rtx x)
1bba6ac0 330{
331 switch (GET_CODE (x))
332 {
333 case REG:
334 fprintf (stream, "(%s)", reg_names[REGNO (x)]);
335 break;
336
337 case PLUS:
338 {
339 rtx base = XEXP (x, 0);
340 rtx index = XEXP (x, 1);
341
342 if (GET_CODE (base) != REG)
343 {
344 /* Ensure that BASE is a register (one of them must be). */
345 rtx temp = base;
346 base = index;
347 index = temp;
348 }
349
350 switch (GET_CODE (index))
351 {
352 case CONST_INT:
b94709e4 353 fprintf (stream, "(%s," HOST_WIDE_INT_PRINT_DEC ")",
354 reg_names[REGNO(base)], INTVAL (index));
1bba6ac0 355 break;
356
357 default:
044e64da 358 gcc_unreachable ();
1bba6ac0 359 }
360 }
361
362 break;
363
364 default:
365 output_addr_const (stream, x);
366 break;
367 }
368}
369
932f5d0a 370static bool
371mcore_print_operand_punct_valid_p (unsigned char code)
372{
373 return (code == '.' || code == '#' || code == '*' || code == '^'
374 || code == '!');
375}
376
1bba6ac0 377/* Print operand x (an rtx) in assembler syntax to file stream
378 according to modifier code.
379
a361b456 380 'R' print the next register or memory location along, i.e. the lsw in
1bba6ac0 381 a double word value
382 'O' print a constant without the #
383 'M' print a constant as its negative
384 'P' print log2 of a power of two
385 'Q' print log2 of an inverse of a power of two
386 'U' print register for ldm/stm instruction
fa4fdf98 387 'X' print byte number for xtrbN instruction. */
388
932f5d0a 389static void
c167bbfc 390mcore_print_operand (FILE * stream, rtx x, int code)
1bba6ac0 391{
392 switch (code)
393 {
394 case 'N':
395 if (INTVAL(x) == -1)
396 fprintf (asm_out_file, "32");
397 else
398 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) + 1));
399 break;
400 case 'P':
90fab4b1 401 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) & 0xffffffff));
1bba6ac0 402 break;
403 case 'Q':
404 fprintf (asm_out_file, "%d", exact_log2 (~INTVAL (x)));
405 break;
406 case 'O':
b94709e4 407 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
1bba6ac0 408 break;
409 case 'M':
b94709e4 410 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, - INTVAL (x));
1bba6ac0 411 break;
412 case 'R':
413 /* Next location along in memory or register. */
414 switch (GET_CODE (x))
415 {
416 case REG:
417 fputs (reg_names[REGNO (x) + 1], (stream));
418 break;
419 case MEM:
eafc6604 420 mcore_print_operand_address
421 (stream, XEXP (adjust_address (x, SImode, 4), 0));
1bba6ac0 422 break;
423 default:
044e64da 424 gcc_unreachable ();
1bba6ac0 425 }
426 break;
427 case 'U':
428 fprintf (asm_out_file, "%s-%s", reg_names[REGNO (x)],
429 reg_names[REGNO (x) + 3]);
430 break;
431 case 'x':
b94709e4 432 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
1bba6ac0 433 break;
434 case 'X':
b94709e4 435 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, 3 - INTVAL (x) / 8);
1bba6ac0 436 break;
437
438 default:
439 switch (GET_CODE (x))
440 {
441 case REG:
442 fputs (reg_names[REGNO (x)], (stream));
443 break;
444 case MEM:
445 output_address (XEXP (x, 0));
446 break;
447 default:
448 output_addr_const (stream, x);
449 break;
450 }
451 break;
452 }
453}
454
455/* What does a constant cost ? */
fa4fdf98 456
fab7adbf 457static int
c167bbfc 458mcore_const_costs (rtx exp, enum rtx_code code)
1bba6ac0 459{
90fab4b1 460 HOST_WIDE_INT val = INTVAL (exp);
1bba6ac0 461
462 /* Easy constants. */
463 if ( CONST_OK_FOR_I (val)
464 || CONST_OK_FOR_M (val)
465 || CONST_OK_FOR_N (val)
466 || (code == PLUS && CONST_OK_FOR_L (val)))
467 return 1;
468 else if (code == AND
469 && ( CONST_OK_FOR_M (~val)
470 || CONST_OK_FOR_N (~val)))
471 return 2;
472 else if (code == PLUS
473 && ( CONST_OK_FOR_I (-val)
474 || CONST_OK_FOR_M (-val)
475 || CONST_OK_FOR_N (-val)))
476 return 2;
477
478 return 5;
479}
480
481/* What does an and instruction cost - we do this b/c immediates may
482 have been relaxed. We want to ensure that cse will cse relaxed immeds
fa4fdf98 483 out. Otherwise we'll get bad code (multiple reloads of the same const). */
484
fab7adbf 485static int
c167bbfc 486mcore_and_cost (rtx x)
1bba6ac0 487{
90fab4b1 488 HOST_WIDE_INT val;
1bba6ac0 489
490 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
491 return 2;
492
493 val = INTVAL (XEXP (x, 1));
494
fa4fdf98 495 /* Do it directly. */
1bba6ac0 496 if (CONST_OK_FOR_K (val) || CONST_OK_FOR_M (~val))
497 return 2;
498 /* Takes one instruction to load. */
499 else if (const_ok_for_mcore (val))
500 return 3;
501 /* Takes two instructions to load. */
502 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
503 return 4;
504
fa4fdf98 505 /* Takes a lrw to load. */
1bba6ac0 506 return 5;
507}
508
fa4fdf98 509/* What does an or cost - see and_cost(). */
510
fab7adbf 511static int
c167bbfc 512mcore_ior_cost (rtx x)
1bba6ac0 513{
90fab4b1 514 HOST_WIDE_INT val;
1bba6ac0 515
516 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
517 return 2;
518
519 val = INTVAL (XEXP (x, 1));
520
fa4fdf98 521 /* Do it directly with bclri. */
1bba6ac0 522 if (CONST_OK_FOR_M (val))
523 return 2;
fa4fdf98 524 /* Takes one instruction to load. */
1bba6ac0 525 else if (const_ok_for_mcore (val))
526 return 3;
fa4fdf98 527 /* Takes two instructions to load. */
1bba6ac0 528 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
529 return 4;
530
fa4fdf98 531 /* Takes a lrw to load. */
1bba6ac0 532 return 5;
533}
534
fab7adbf 535static bool
20d892d1 536mcore_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
537 int * total, bool speed ATTRIBUTE_UNUSED)
fab7adbf 538{
539 switch (code)
540 {
541 case CONST_INT:
ef51d1e3 542 *total = mcore_const_costs (x, (enum rtx_code) outer_code);
fab7adbf 543 return true;
544 case CONST:
545 case LABEL_REF:
546 case SYMBOL_REF:
547 *total = 5;
548 return true;
549 case CONST_DOUBLE:
550 *total = 10;
551 return true;
552
553 case AND:
554 *total = COSTS_N_INSNS (mcore_and_cost (x));
555 return true;
556
557 case IOR:
558 *total = COSTS_N_INSNS (mcore_ior_cost (x));
559 return true;
560
561 case DIV:
562 case UDIV:
563 case MOD:
564 case UMOD:
565 case FLOAT:
566 case FIX:
567 *total = COSTS_N_INSNS (100);
568 return true;
569
570 default:
571 return false;
572 }
573}
574
74f4459c 575/* Prepare the operands for a comparison. Return whether the branch/setcc
576 should reverse the operands. */
fa4fdf98 577
74f4459c 578bool
579mcore_gen_compare (enum rtx_code code, rtx op0, rtx op1)
1bba6ac0 580{
74f4459c 581 rtx cc_reg = gen_rtx_REG (CCmode, CC_REG);
582 bool invert;
583
1bba6ac0 584 if (GET_CODE (op1) == CONST_INT)
585 {
90fab4b1 586 HOST_WIDE_INT val = INTVAL (op1);
1bba6ac0 587
588 switch (code)
589 {
74f4459c 590 case GTU:
591 /* Unsigned > 0 is the same as != 0; everything else is converted
592 below to LEU (reversed cmphs). */
593 if (val == 0)
594 code = NE;
595 break;
596
597 /* Check whether (LE A imm) can become (LT A imm + 1),
598 or (GT A imm) can become (GE A imm + 1). */
599 case GT:
1bba6ac0 600 case LE:
601 if (CONST_OK_FOR_J (val + 1))
602 {
74f4459c 603 op1 = GEN_INT (val + 1);
604 code = code == LE ? LT : GE;
1bba6ac0 605 }
606 break;
607
608 default:
609 break;
610 }
611 }
74f4459c 612
1bba6ac0 613 if (CONSTANT_P (op1) && GET_CODE (op1) != CONST_INT)
614 op1 = force_reg (SImode, op1);
615
616 /* cmpnei: 0-31 (K immediate)
fa4fdf98 617 cmplti: 1-32 (J immediate, 0 using btsti x,31). */
74f4459c 618 invert = false;
1bba6ac0 619 switch (code)
620 {
fa4fdf98 621 case EQ: /* Use inverted condition, cmpne. */
1bba6ac0 622 code = NE;
74f4459c 623 invert = true;
c167bbfc 624 /* Drop through. */
fa4fdf98 625
626 case NE: /* Use normal condition, cmpne. */
1bba6ac0 627 if (GET_CODE (op1) == CONST_INT && ! CONST_OK_FOR_K (INTVAL (op1)))
628 op1 = force_reg (SImode, op1);
629 break;
630
fa4fdf98 631 case LE: /* Use inverted condition, reversed cmplt. */
1bba6ac0 632 code = GT;
74f4459c 633 invert = true;
c167bbfc 634 /* Drop through. */
fa4fdf98 635
636 case GT: /* Use normal condition, reversed cmplt. */
1bba6ac0 637 if (GET_CODE (op1) == CONST_INT)
638 op1 = force_reg (SImode, op1);
639 break;
640
fa4fdf98 641 case GE: /* Use inverted condition, cmplt. */
1bba6ac0 642 code = LT;
74f4459c 643 invert = true;
c167bbfc 644 /* Drop through. */
fa4fdf98 645
646 case LT: /* Use normal condition, cmplt. */
1bba6ac0 647 if (GET_CODE (op1) == CONST_INT &&
c167bbfc 648 /* covered by btsti x,31. */
1bba6ac0 649 INTVAL (op1) != 0 &&
650 ! CONST_OK_FOR_J (INTVAL (op1)))
651 op1 = force_reg (SImode, op1);
652 break;
653
fa4fdf98 654 case GTU: /* Use inverted condition, cmple. */
74f4459c 655 /* We coped with unsigned > 0 above. */
044e64da 656 gcc_assert (GET_CODE (op1) != CONST_INT || INTVAL (op1) != 0);
1bba6ac0 657 code = LEU;
74f4459c 658 invert = true;
c167bbfc 659 /* Drop through. */
fa4fdf98 660
3fcfff30 661 case LEU: /* Use normal condition, reversed cmphs. */
1bba6ac0 662 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
663 op1 = force_reg (SImode, op1);
664 break;
665
fa4fdf98 666 case LTU: /* Use inverted condition, cmphs. */
1bba6ac0 667 code = GEU;
74f4459c 668 invert = true;
c167bbfc 669 /* Drop through. */
fa4fdf98 670
671 case GEU: /* Use normal condition, cmphs. */
1bba6ac0 672 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
673 op1 = force_reg (SImode, op1);
674 break;
675
676 default:
677 break;
678 }
679
d1f9b275 680 emit_insn (gen_rtx_SET (cc_reg, gen_rtx_fmt_ee (code, CCmode, op0, op1)));
74f4459c 681 return invert;
1bba6ac0 682}
683
1bba6ac0 684int
c167bbfc 685mcore_symbolic_address_p (rtx x)
1bba6ac0 686{
687 switch (GET_CODE (x))
688 {
689 case SYMBOL_REF:
690 case LABEL_REF:
691 return 1;
692 case CONST:
693 x = XEXP (x, 0);
694 return ( (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
695 || GET_CODE (XEXP (x, 0)) == LABEL_REF)
696 && GET_CODE (XEXP (x, 1)) == CONST_INT);
697 default:
698 return 0;
699 }
700}
701
1bba6ac0 702/* Functions to output assembly code for a function call. */
2eebe422 703
1bba6ac0 704char *
c167bbfc 705mcore_output_call (rtx operands[], int index)
1bba6ac0 706{
707 static char buffer[20];
708 rtx addr = operands [index];
709
710 if (REG_P (addr))
711 {
712 if (TARGET_CG_DATA)
713 {
044e64da 714 gcc_assert (mcore_current_function_name);
1bba6ac0 715
716 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
717 "unknown", 1);
718 }
719
720 sprintf (buffer, "jsr\t%%%d", index);
721 }
722 else
723 {
724 if (TARGET_CG_DATA)
725 {
044e64da 726 gcc_assert (mcore_current_function_name);
727 gcc_assert (GET_CODE (addr) == SYMBOL_REF);
1bba6ac0 728
044e64da 729 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
730 XSTR (addr, 0), 0);
1bba6ac0 731 }
732
733 sprintf (buffer, "jbsr\t%%%d", index);
734 }
735
736 return buffer;
737}
738
739/* Can we load a constant with a single instruction ? */
fa4fdf98 740
1e235216 741int
90fab4b1 742const_ok_for_mcore (HOST_WIDE_INT value)
1bba6ac0 743{
744 if (value >= 0 && value <= 127)
745 return 1;
746
747 /* Try exact power of two. */
90fab4b1 748 if (CONST_OK_FOR_M (value))
1bba6ac0 749 return 1;
750
3fcfff30 751 /* Try exact power of two - 1. */
90fab4b1 752 if (CONST_OK_FOR_N (value) && value != -1)
1bba6ac0 753 return 1;
754
755 return 0;
756}
757
758/* Can we load a constant inline with up to 2 instructions ? */
fa4fdf98 759
1bba6ac0 760int
90fab4b1 761mcore_const_ok_for_inline (HOST_WIDE_INT value)
1bba6ac0 762{
90fab4b1 763 HOST_WIDE_INT x, y;
1bba6ac0 764
765 return try_constant_tricks (value, & x, & y) > 0;
766}
767
768/* Are we loading the constant using a not ? */
fa4fdf98 769
1bba6ac0 770int
90fab4b1 771mcore_const_trick_uses_not (HOST_WIDE_INT value)
1bba6ac0 772{
90fab4b1 773 HOST_WIDE_INT x, y;
1bba6ac0 774
775 return try_constant_tricks (value, & x, & y) == 2;
776}
777
778/* Try tricks to load a constant inline and return the trick number if
779 success (0 is non-inlinable).
fa4fdf98 780
781 0: not inlinable
782 1: single instruction (do the usual thing)
783 2: single insn followed by a 'not'
784 3: single insn followed by a subi
785 4: single insn followed by an addi
786 5: single insn followed by rsubi
787 6: single insn followed by bseti
788 7: single insn followed by bclri
789 8: single insn followed by rotli
790 9: single insn followed by lsli
791 10: single insn followed by ixh
792 11: single insn followed by ixw. */
1bba6ac0 793
794static int
90fab4b1 795try_constant_tricks (HOST_WIDE_INT value, HOST_WIDE_INT * x, HOST_WIDE_INT * y)
1bba6ac0 796{
90fab4b1 797 HOST_WIDE_INT i;
798 unsigned HOST_WIDE_INT bit, shf, rot;
1bba6ac0 799
800 if (const_ok_for_mcore (value))
fa4fdf98 801 return 1; /* Do the usual thing. */
1bba6ac0 802
90fab4b1 803 if (! TARGET_HARDLIT)
804 return 0;
805
806 if (const_ok_for_mcore (~value))
807 {
808 *x = ~value;
809 return 2;
810 }
811
812 for (i = 1; i <= 32; i++)
1bba6ac0 813 {
90fab4b1 814 if (const_ok_for_mcore (value - i))
1bba6ac0 815 {
90fab4b1 816 *x = value - i;
817 *y = i;
818
819 return 3;
1bba6ac0 820 }
90fab4b1 821
822 if (const_ok_for_mcore (value + i))
1bba6ac0 823 {
90fab4b1 824 *x = value + i;
825 *y = i;
826
827 return 4;
1bba6ac0 828 }
90fab4b1 829 }
830
831 bit = 0x80000000ULL;
832
833 for (i = 0; i <= 31; i++)
834 {
835 if (const_ok_for_mcore (i - value))
1bba6ac0 836 {
90fab4b1 837 *x = i - value;
838 *y = i;
839
840 return 5;
1bba6ac0 841 }
90fab4b1 842
843 if (const_ok_for_mcore (value & ~bit))
1bba6ac0 844 {
90fab4b1 845 *y = bit;
846 *x = value & ~bit;
847 return 6;
1bba6ac0 848 }
90fab4b1 849
850 if (const_ok_for_mcore (value | bit))
1bba6ac0 851 {
90fab4b1 852 *y = ~bit;
853 *x = value | bit;
854
855 return 7;
1bba6ac0 856 }
90fab4b1 857
858 bit >>= 1;
859 }
860
861 shf = value;
862 rot = value;
863
864 for (i = 1; i < 31; i++)
865 {
866 int c;
867
868 /* MCore has rotate left. */
869 c = rot << 31;
870 rot >>= 1;
871 rot &= 0x7FFFFFFF;
872 rot |= c; /* Simulate rotate. */
873
874 if (const_ok_for_mcore (rot))
1bba6ac0 875 {
90fab4b1 876 *y = i;
877 *x = rot;
878
879 return 8;
880 }
881
882 if (shf & 1)
883 shf = 0; /* Can't use logical shift, low order bit is one. */
884
885 shf >>= 1;
886
887 if (shf != 0 && const_ok_for_mcore (shf))
888 {
889 *y = i;
890 *x = shf;
891
892 return 9;
1bba6ac0 893 }
894 }
90fab4b1 895
896 if ((value % 3) == 0 && const_ok_for_mcore (value / 3))
897 {
898 *x = value / 3;
899
900 return 10;
901 }
902
903 if ((value % 5) == 0 && const_ok_for_mcore (value / 5))
904 {
905 *x = value / 5;
906
907 return 11;
908 }
1bba6ac0 909
910 return 0;
911}
912
1bba6ac0 913/* Check whether reg is dead at first. This is done by searching ahead
914 for either the next use (i.e., reg is live), a death note, or a set of
915 reg. Don't just use dead_or_set_p() since reload does not always mark
916 deaths (especially if PRESERVE_DEATH_NOTES_REGNO_P is not defined). We
fa4fdf98 917 can ignore subregs by extracting the actual register. BRC */
918
1bba6ac0 919int
91a55c11 920mcore_is_dead (rtx_insn *first, rtx reg)
1bba6ac0 921{
91a55c11 922 rtx_insn *insn;
1bba6ac0 923
924 /* For mcore, subregs can't live independently of their parent regs. */
925 if (GET_CODE (reg) == SUBREG)
926 reg = SUBREG_REG (reg);
927
928 /* Dies immediately. */
929 if (dead_or_set_p (first, reg))
930 return 1;
931
932 /* Look for conclusive evidence of live/death, otherwise we have
933 to assume that it is live. */
934 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
935 {
aa90bb35 936 if (JUMP_P (insn))
1bba6ac0 937 return 0; /* We lose track, assume it is alive. */
938
aa90bb35 939 else if (CALL_P (insn))
1bba6ac0 940 {
941 /* Call's might use it for target or register parms. */
942 if (reg_referenced_p (reg, PATTERN (insn))
943 || find_reg_fusage (insn, USE, reg))
944 return 0;
945 else if (dead_or_set_p (insn, reg))
946 return 1;
947 }
aa90bb35 948 else if (NONJUMP_INSN_P (insn))
1bba6ac0 949 {
950 if (reg_referenced_p (reg, PATTERN (insn)))
951 return 0;
952 else if (dead_or_set_p (insn, reg))
953 return 1;
954 }
955 }
956
33f88b1c 957 /* No conclusive evidence either way, we cannot take the chance
1bba6ac0 958 that control flow hid the use from us -- "I'm not dead yet". */
959 return 0;
960}
961
1bba6ac0 962/* Count the number of ones in mask. */
fa4fdf98 963
1bba6ac0 964int
90fab4b1 965mcore_num_ones (HOST_WIDE_INT mask)
1bba6ac0 966{
fa4fdf98 967 /* A trick to count set bits recently posted on comp.compilers. */
1bba6ac0 968 mask = (mask >> 1 & 0x55555555) + (mask & 0x55555555);
969 mask = ((mask >> 2) & 0x33333333) + (mask & 0x33333333);
970 mask = ((mask >> 4) + mask) & 0x0f0f0f0f;
971 mask = ((mask >> 8) + mask);
972
973 return (mask + (mask >> 16)) & 0xff;
974}
975
fa4fdf98 976/* Count the number of zeros in mask. */
977
1bba6ac0 978int
90fab4b1 979mcore_num_zeros (HOST_WIDE_INT mask)
1bba6ac0 980{
981 return 32 - mcore_num_ones (mask);
982}
983
984/* Determine byte being masked. */
fa4fdf98 985
1bba6ac0 986int
c167bbfc 987mcore_byte_offset (unsigned int mask)
1bba6ac0 988{
30435bf8 989 if (mask == 0x00ffffffL)
1bba6ac0 990 return 0;
30435bf8 991 else if (mask == 0xff00ffffL)
1bba6ac0 992 return 1;
30435bf8 993 else if (mask == 0xffff00ffL)
1bba6ac0 994 return 2;
30435bf8 995 else if (mask == 0xffffff00L)
1bba6ac0 996 return 3;
997
998 return -1;
999}
1000
1001/* Determine halfword being masked. */
fa4fdf98 1002
1bba6ac0 1003int
c167bbfc 1004mcore_halfword_offset (unsigned int mask)
1bba6ac0 1005{
1006 if (mask == 0x0000ffffL)
1007 return 0;
30435bf8 1008 else if (mask == 0xffff0000L)
1bba6ac0 1009 return 1;
1010
1011 return -1;
1012}
1013
1014/* Output a series of bseti's corresponding to mask. */
fa4fdf98 1015
2eebe422 1016const char *
c167bbfc 1017mcore_output_bseti (rtx dst, int mask)
1bba6ac0 1018{
1019 rtx out_operands[2];
1020 int bit;
1021
1022 out_operands[0] = dst;
1023
1024 for (bit = 0; bit < 32; bit++)
1025 {
1026 if ((mask & 0x1) == 0x1)
1027 {
1028 out_operands[1] = GEN_INT (bit);
1029
1030 output_asm_insn ("bseti\t%0,%1", out_operands);
1031 }
1032 mask >>= 1;
1033 }
1034
1035 return "";
1036}
1037
1038/* Output a series of bclri's corresponding to mask. */
fa4fdf98 1039
2eebe422 1040const char *
c167bbfc 1041mcore_output_bclri (rtx dst, int mask)
1bba6ac0 1042{
1043 rtx out_operands[2];
1044 int bit;
1045
1046 out_operands[0] = dst;
1047
1048 for (bit = 0; bit < 32; bit++)
1049 {
1050 if ((mask & 0x1) == 0x0)
1051 {
1052 out_operands[1] = GEN_INT (bit);
1053
1054 output_asm_insn ("bclri\t%0,%1", out_operands);
1055 }
1056
1057 mask >>= 1;
1058 }
1059
1060 return "";
1061}
1062
1063/* Output a conditional move of two constants that are +/- 1 within each
1064 other. See the "movtK" patterns in mcore.md. I'm not sure this is
1065 really worth the effort. */
fa4fdf98 1066
2eebe422 1067const char *
c167bbfc 1068mcore_output_cmov (rtx operands[], int cmp_t, const char * test)
1bba6ac0 1069{
90fab4b1 1070 HOST_WIDE_INT load_value;
1071 HOST_WIDE_INT adjust_value;
1bba6ac0 1072 rtx out_operands[4];
1073
1074 out_operands[0] = operands[0];
1075
fa4fdf98 1076 /* Check to see which constant is loadable. */
1bba6ac0 1077 if (const_ok_for_mcore (INTVAL (operands[1])))
1078 {
1079 out_operands[1] = operands[1];
1080 out_operands[2] = operands[2];
1081 }
1082 else if (const_ok_for_mcore (INTVAL (operands[2])))
1083 {
1084 out_operands[1] = operands[2];
1085 out_operands[2] = operands[1];
1086
fa4fdf98 1087 /* Complement test since constants are swapped. */
1bba6ac0 1088 cmp_t = (cmp_t == 0);
1089 }
1090 load_value = INTVAL (out_operands[1]);
1091 adjust_value = INTVAL (out_operands[2]);
1092
fa4fdf98 1093 /* First output the test if folded into the pattern. */
1bba6ac0 1094
1095 if (test)
1096 output_asm_insn (test, operands);
1097
fa4fdf98 1098 /* Load the constant - for now, only support constants that can be
1bba6ac0 1099 generated with a single instruction. maybe add general inlinable
1100 constants later (this will increase the # of patterns since the
fa4fdf98 1101 instruction sequence has a different length attribute). */
1bba6ac0 1102 if (load_value >= 0 && load_value <= 127)
1103 output_asm_insn ("movi\t%0,%1", out_operands);
90fab4b1 1104 else if (CONST_OK_FOR_M (load_value))
1bba6ac0 1105 output_asm_insn ("bgeni\t%0,%P1", out_operands);
90fab4b1 1106 else if (CONST_OK_FOR_N (load_value))
1bba6ac0 1107 output_asm_insn ("bmaski\t%0,%N1", out_operands);
1108
fa4fdf98 1109 /* Output the constant adjustment. */
1bba6ac0 1110 if (load_value > adjust_value)
1111 {
1112 if (cmp_t)
1113 output_asm_insn ("decf\t%0", out_operands);
1114 else
1115 output_asm_insn ("dect\t%0", out_operands);
1116 }
1117 else
1118 {
1119 if (cmp_t)
1120 output_asm_insn ("incf\t%0", out_operands);
1121 else
1122 output_asm_insn ("inct\t%0", out_operands);
1123 }
1124
1125 return "";
1126}
1127
1128/* Outputs the peephole for moving a constant that gets not'ed followed
fa4fdf98 1129 by an and (i.e. combine the not and the and into andn). BRC */
1130
2eebe422 1131const char *
c167bbfc 1132mcore_output_andn (rtx insn ATTRIBUTE_UNUSED, rtx operands[])
1bba6ac0 1133{
90fab4b1 1134 HOST_WIDE_INT x, y;
1bba6ac0 1135 rtx out_operands[3];
2eebe422 1136 const char * load_op;
1bba6ac0 1137 char buf[256];
044e64da 1138 int trick_no;
1bba6ac0 1139
044e64da 1140 trick_no = try_constant_tricks (INTVAL (operands[1]), &x, &y);
1141 gcc_assert (trick_no == 2);
1bba6ac0 1142
1143 out_operands[0] = operands[0];
90fab4b1 1144 out_operands[1] = GEN_INT (x);
1bba6ac0 1145 out_operands[2] = operands[2];
1146
1147 if (x >= 0 && x <= 127)
1148 load_op = "movi\t%0,%1";
fa4fdf98 1149
1150 /* Try exact power of two. */
90fab4b1 1151 else if (CONST_OK_FOR_M (x))
1bba6ac0 1152 load_op = "bgeni\t%0,%P1";
fa4fdf98 1153
1154 /* Try exact power of two - 1. */
90fab4b1 1155 else if (CONST_OK_FOR_N (x))
1bba6ac0 1156 load_op = "bmaski\t%0,%N1";
fa4fdf98 1157
90fab4b1 1158 else
1159 {
1160 load_op = "BADMOVI-andn\t%0, %1";
1161 gcc_unreachable ();
1162 }
1bba6ac0 1163
1164 sprintf (buf, "%s\n\tandn\t%%2,%%0", load_op);
1165 output_asm_insn (buf, out_operands);
1166
1167 return "";
1168}
1169
1170/* Output an inline constant. */
fa4fdf98 1171
2eebe422 1172static const char *
3754d046 1173output_inline_const (machine_mode mode, rtx operands[])
1bba6ac0 1174{
90fab4b1 1175 HOST_WIDE_INT x = 0, y = 0;
1bba6ac0 1176 int trick_no;
1177 rtx out_operands[3];
1178 char buf[256];
1179 char load_op[256];
2eebe422 1180 const char *dst_fmt;
90fab4b1 1181 HOST_WIDE_INT value;
1bba6ac0 1182
1183 value = INTVAL (operands[1]);
1bba6ac0 1184
044e64da 1185 trick_no = try_constant_tricks (value, &x, &y);
1186 /* lrw's are handled separately: Large inlinable constants never get
1187 turned into lrw's. Our caller uses try_constant_tricks to back
1188 off to an lrw rather than calling this routine. */
1189 gcc_assert (trick_no != 0);
1190
1bba6ac0 1191 if (trick_no == 1)
1192 x = value;
1193
fa4fdf98 1194 /* operands: 0 = dst, 1 = load immed., 2 = immed. adjustment. */
1bba6ac0 1195 out_operands[0] = operands[0];
1196 out_operands[1] = GEN_INT (x);
1197
1198 if (trick_no > 2)
1199 out_operands[2] = GEN_INT (y);
1200
fa4fdf98 1201 /* Select dst format based on mode. */
1bba6ac0 1202 if (mode == DImode && (! TARGET_LITTLE_END))
1203 dst_fmt = "%R0";
1204 else
1205 dst_fmt = "%0";
1206
1207 if (x >= 0 && x <= 127)
1208 sprintf (load_op, "movi\t%s,%%1", dst_fmt);
fa4fdf98 1209
1bba6ac0 1210 /* Try exact power of two. */
90fab4b1 1211 else if (CONST_OK_FOR_M (x))
1bba6ac0 1212 sprintf (load_op, "bgeni\t%s,%%P1", dst_fmt);
fa4fdf98 1213
1214 /* Try exact power of two - 1. */
90fab4b1 1215 else if (CONST_OK_FOR_N (x))
1bba6ac0 1216 sprintf (load_op, "bmaski\t%s,%%N1", dst_fmt);
fa4fdf98 1217
90fab4b1 1218 else
1219 {
1220 sprintf (load_op, "BADMOVI-inline_const %s, %%1", dst_fmt);
1221 gcc_unreachable ();
1222 }
1bba6ac0 1223
1224 switch (trick_no)
1225 {
1226 case 1:
1227 strcpy (buf, load_op);
1228 break;
1229 case 2: /* not */
90fab4b1 1230 sprintf (buf, "%s\n\tnot\t%s\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1bba6ac0 1231 break;
1232 case 3: /* add */
90fab4b1 1233 sprintf (buf, "%s\n\taddi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1bba6ac0 1234 break;
1235 case 4: /* sub */
90fab4b1 1236 sprintf (buf, "%s\n\tsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1bba6ac0 1237 break;
1238 case 5: /* rsub */
fa4fdf98 1239 /* Never happens unless -mrsubi, see try_constant_tricks(). */
90fab4b1 1240 sprintf (buf, "%s\n\trsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1bba6ac0 1241 break;
90fab4b1 1242 case 6: /* bseti */
1243 sprintf (buf, "%s\n\tbseti\t%s,%%P2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1bba6ac0 1244 break;
1245 case 7: /* bclr */
90fab4b1 1246 sprintf (buf, "%s\n\tbclri\t%s,%%Q2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1bba6ac0 1247 break;
1248 case 8: /* rotl */
90fab4b1 1249 sprintf (buf, "%s\n\trotli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1bba6ac0 1250 break;
1251 case 9: /* lsl */
90fab4b1 1252 sprintf (buf, "%s\n\tlsli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1bba6ac0 1253 break;
1254 case 10: /* ixh */
90fab4b1 1255 sprintf (buf, "%s\n\tixh\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
1bba6ac0 1256 break;
1257 case 11: /* ixw */
90fab4b1 1258 sprintf (buf, "%s\n\tixw\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
1bba6ac0 1259 break;
1260 default:
1261 return "";
1262 }
1263
1264 output_asm_insn (buf, out_operands);
1265
1266 return "";
1267}
1268
1269/* Output a move of a word or less value. */
fa4fdf98 1270
2eebe422 1271const char *
c167bbfc 1272mcore_output_move (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
3754d046 1273 machine_mode mode ATTRIBUTE_UNUSED)
1bba6ac0 1274{
1275 rtx dst = operands[0];
1276 rtx src = operands[1];
1277
1278 if (GET_CODE (dst) == REG)
1279 {
1280 if (GET_CODE (src) == REG)
1281 {
1282 if (REGNO (src) == CC_REG) /* r-c */
1283 return "mvc\t%0";
1284 else
1285 return "mov\t%0,%1"; /* r-r*/
1286 }
1287 else if (GET_CODE (src) == MEM)
1288 {
1289 if (GET_CODE (XEXP (src, 0)) == LABEL_REF)
1290 return "lrw\t%0,[%1]"; /* a-R */
1291 else
d13344c8 1292 switch (GET_MODE (src)) /* r-m */
1293 {
1294 case SImode:
1295 return "ldw\t%0,%1";
1296 case HImode:
1297 return "ld.h\t%0,%1";
1298 case QImode:
1299 return "ld.b\t%0,%1";
1300 default:
044e64da 1301 gcc_unreachable ();
d13344c8 1302 }
1bba6ac0 1303 }
1304 else if (GET_CODE (src) == CONST_INT)
1305 {
90fab4b1 1306 HOST_WIDE_INT x, y;
1bba6ac0 1307
1308 if (CONST_OK_FOR_I (INTVAL (src))) /* r-I */
1309 return "movi\t%0,%1";
1310 else if (CONST_OK_FOR_M (INTVAL (src))) /* r-M */
1311 return "bgeni\t%0,%P1\t// %1 %x1";
1312 else if (CONST_OK_FOR_N (INTVAL (src))) /* r-N */
1313 return "bmaski\t%0,%N1\t// %1 %x1";
1314 else if (try_constant_tricks (INTVAL (src), &x, &y)) /* R-P */
1315 return output_inline_const (SImode, operands); /* 1-2 insns */
1316 else
fa4fdf98 1317 return "lrw\t%0,%x1\t// %1"; /* Get it from literal pool. */
1bba6ac0 1318 }
1319 else
fa4fdf98 1320 return "lrw\t%0, %1"; /* Into the literal pool. */
1bba6ac0 1321 }
1322 else if (GET_CODE (dst) == MEM) /* m-r */
d13344c8 1323 switch (GET_MODE (dst))
1324 {
1325 case SImode:
1326 return "stw\t%1,%0";
1327 case HImode:
1328 return "st.h\t%1,%0";
1329 case QImode:
1330 return "st.b\t%1,%0";
1331 default:
044e64da 1332 gcc_unreachable ();
d13344c8 1333 }
1bba6ac0 1334
044e64da 1335 gcc_unreachable ();
1bba6ac0 1336}
1337
1bba6ac0 1338/* Return a sequence of instructions to perform DI or DF move.
1339 Since the MCORE cannot move a DI or DF in one instruction, we have
1340 to take care when we see overlapping source and dest registers. */
fa4fdf98 1341
2eebe422 1342const char *
3754d046 1343mcore_output_movedouble (rtx operands[], machine_mode mode ATTRIBUTE_UNUSED)
1bba6ac0 1344{
1345 rtx dst = operands[0];
1346 rtx src = operands[1];
1347
1348 if (GET_CODE (dst) == REG)
1349 {
1350 if (GET_CODE (src) == REG)
1351 {
1352 int dstreg = REGNO (dst);
1353 int srcreg = REGNO (src);
fa4fdf98 1354
1bba6ac0 1355 /* Ensure the second source not overwritten. */
1356 if (srcreg + 1 == dstreg)
1357 return "mov %R0,%R1\n\tmov %0,%1";
1358 else
1359 return "mov %0,%1\n\tmov %R0,%R1";
1360 }
1361 else if (GET_CODE (src) == MEM)
1362 {
6830c3f7 1363 rtx memexp = XEXP (src, 0);
1bba6ac0 1364 int dstreg = REGNO (dst);
1365 int basereg = -1;
1366
1367 if (GET_CODE (memexp) == LABEL_REF)
1368 return "lrw\t%0,[%1]\n\tlrw\t%R0,[%R1]";
1369 else if (GET_CODE (memexp) == REG)
1370 basereg = REGNO (memexp);
1371 else if (GET_CODE (memexp) == PLUS)
1372 {
1373 if (GET_CODE (XEXP (memexp, 0)) == REG)
1374 basereg = REGNO (XEXP (memexp, 0));
1375 else if (GET_CODE (XEXP (memexp, 1)) == REG)
1376 basereg = REGNO (XEXP (memexp, 1));
1377 else
044e64da 1378 gcc_unreachable ();
1bba6ac0 1379 }
1380 else
044e64da 1381 gcc_unreachable ();
1bba6ac0 1382
fa4fdf98 1383 /* ??? length attribute is wrong here. */
1bba6ac0 1384 if (dstreg == basereg)
1385 {
fa4fdf98 1386 /* Just load them in reverse order. */
1bba6ac0 1387 return "ldw\t%R0,%R1\n\tldw\t%0,%1";
fa4fdf98 1388
1bba6ac0 1389 /* XXX: alternative: move basereg to basereg+1
fa4fdf98 1390 and then fall through. */
1bba6ac0 1391 }
1392 else
1393 return "ldw\t%0,%1\n\tldw\t%R0,%R1";
1394 }
1395 else if (GET_CODE (src) == CONST_INT)
1396 {
1397 if (TARGET_LITTLE_END)
1398 {
1399 if (CONST_OK_FOR_I (INTVAL (src)))
1400 output_asm_insn ("movi %0,%1", operands);
1401 else if (CONST_OK_FOR_M (INTVAL (src)))
1402 output_asm_insn ("bgeni %0,%P1", operands);
1bba6ac0 1403 else if (CONST_OK_FOR_N (INTVAL (src)))
1404 output_asm_insn ("bmaski %0,%N1", operands);
1405 else
044e64da 1406 gcc_unreachable ();
1bba6ac0 1407
1408 if (INTVAL (src) < 0)
1409 return "bmaski %R0,32";
1410 else
1411 return "movi %R0,0";
1412 }
1413 else
1414 {
1415 if (CONST_OK_FOR_I (INTVAL (src)))
1416 output_asm_insn ("movi %R0,%1", operands);
1417 else if (CONST_OK_FOR_M (INTVAL (src)))
1418 output_asm_insn ("bgeni %R0,%P1", operands);
1bba6ac0 1419 else if (CONST_OK_FOR_N (INTVAL (src)))
1420 output_asm_insn ("bmaski %R0,%N1", operands);
1421 else
044e64da 1422 gcc_unreachable ();
90fab4b1 1423
1bba6ac0 1424 if (INTVAL (src) < 0)
1425 return "bmaski %0,32";
1426 else
1427 return "movi %0,0";
1428 }
1429 }
1430 else
044e64da 1431 gcc_unreachable ();
1bba6ac0 1432 }
1433 else if (GET_CODE (dst) == MEM && GET_CODE (src) == REG)
1434 return "stw\t%1,%0\n\tstw\t%R1,%R0";
1435 else
044e64da 1436 gcc_unreachable ();
1bba6ac0 1437}
1438
1439/* Predicates used by the templates. */
1440
1bba6ac0 1441int
c167bbfc 1442mcore_arith_S_operand (rtx op)
1bba6ac0 1443{
1444 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (~INTVAL (op)))
1445 return 1;
1446
1447 return 0;
1448}
1449
fa4fdf98 1450/* Expand insert bit field. BRC */
1451
1bba6ac0 1452int
c167bbfc 1453mcore_expand_insv (rtx operands[])
1bba6ac0 1454{
1455 int width = INTVAL (operands[1]);
1456 int posn = INTVAL (operands[2]);
1457 int mask;
1458 rtx mreg, sreg, ereg;
1459
1460 /* To get width 1 insv, the test in store_bit_field() (expmed.c, line 191)
1461 for width==1 must be removed. Look around line 368. This is something
fa4fdf98 1462 we really want the md part to do. */
1bba6ac0 1463 if (width == 1 && GET_CODE (operands[3]) == CONST_INT)
1464 {
fa4fdf98 1465 /* Do directly with bseti or bclri. */
1466 /* RBE: 2/97 consider only low bit of constant. */
90fab4b1 1467 if ((INTVAL (operands[3]) & 1) == 0)
1bba6ac0 1468 {
1469 mask = ~(1 << posn);
d1f9b275 1470 emit_insn (gen_rtx_SET (operands[0],
1471 gen_rtx_AND (SImode, operands[0],
1472 GEN_INT (mask))));
1bba6ac0 1473 }
1474 else
1475 {
1476 mask = 1 << posn;
d1f9b275 1477 emit_insn (gen_rtx_SET (operands[0],
1478 gen_rtx_IOR (SImode, operands[0],
1479 GEN_INT (mask))));
1bba6ac0 1480 }
1481
1482 return 1;
1483 }
1484
ceb2fe0f 1485 /* Look at some bit-field placements that we aren't interested
fa4fdf98 1486 in handling ourselves, unless specifically directed to do so. */
1bba6ac0 1487 if (! TARGET_W_FIELD)
1488 return 0; /* Generally, give up about now. */
1489
1490 if (width == 8 && posn % 8 == 0)
1491 /* Byte sized and aligned; let caller break it up. */
1492 return 0;
1493
1494 if (width == 16 && posn % 16 == 0)
1495 /* Short sized and aligned; let caller break it up. */
1496 return 0;
1497
1498 /* The general case - we can do this a little bit better than what the
1499 machine independent part tries. This will get rid of all the subregs
1500 that mess up constant folding in combine when working with relaxed
fa4fdf98 1501 immediates. */
1bba6ac0 1502
1503 /* If setting the entire field, do it directly. */
90fab4b1 1504 if (GET_CODE (operands[3]) == CONST_INT
1505 && INTVAL (operands[3]) == ((1 << width) - 1))
1bba6ac0 1506 {
1507 mreg = force_reg (SImode, GEN_INT (INTVAL (operands[3]) << posn));
d1f9b275 1508 emit_insn (gen_rtx_SET (operands[0],
1509 gen_rtx_IOR (SImode, operands[0], mreg)));
1bba6ac0 1510 return 1;
1511 }
1512
1513 /* Generate the clear mask. */
1514 mreg = force_reg (SImode, GEN_INT (~(((1 << width) - 1) << posn)));
1515
1516 /* Clear the field, to overlay it later with the source. */
d1f9b275 1517 emit_insn (gen_rtx_SET (operands[0],
1518 gen_rtx_AND (SImode, operands[0], mreg)));
1bba6ac0 1519
1520 /* If the source is constant 0, we've nothing to add back. */
1521 if (GET_CODE (operands[3]) == CONST_INT && INTVAL (operands[3]) == 0)
1522 return 1;
1523
1524 /* XXX: Should we worry about more games with constant values?
1525 We've covered the high profile: set/clear single-bit and many-bit
1526 fields. How often do we see "arbitrary bit pattern" constants? */
1527 sreg = copy_to_mode_reg (SImode, operands[3]);
1528
1529 /* Extract src as same width as dst (needed for signed values). We
1530 always have to do this since we widen everything to SImode.
1531 We don't have to mask if we're shifting this up against the
1532 MSB of the register (e.g., the shift will push out any hi-order
fa4fdf98 1533 bits. */
2eebe422 1534 if (width + posn != (int) GET_MODE_SIZE (SImode))
1bba6ac0 1535 {
1536 ereg = force_reg (SImode, GEN_INT ((1 << width) - 1));
d1f9b275 1537 emit_insn (gen_rtx_SET (sreg, gen_rtx_AND (SImode, sreg, ereg)));
1bba6ac0 1538 }
1539
fa4fdf98 1540 /* Insert source value in dest. */
1bba6ac0 1541 if (posn != 0)
d1f9b275 1542 emit_insn (gen_rtx_SET (sreg, gen_rtx_ASHIFT (SImode, sreg,
1543 GEN_INT (posn))));
1bba6ac0 1544
d1f9b275 1545 emit_insn (gen_rtx_SET (operands[0],
1546 gen_rtx_IOR (SImode, operands[0], sreg)));
1bba6ac0 1547
1548 return 1;
1549}
1bba6ac0 1550\f
1551/* ??? Block move stuff stolen from m88k. This code has not been
1552 verified for correctness. */
1553
1554/* Emit code to perform a block move. Choose the best method.
1555
1556 OPERANDS[0] is the destination.
1557 OPERANDS[1] is the source.
1558 OPERANDS[2] is the size.
1559 OPERANDS[3] is the alignment safe to use. */
1560
1561/* Emit code to perform a block move with an offset sequence of ldw/st
1562 instructions (..., ldw 0, stw 1, ldw 1, stw 0, ...). SIZE and ALIGN are
1563 known constants. DEST and SRC are registers. OFFSET is the known
1564 starting point for the output pattern. */
1565
3754d046 1566static const machine_mode mode_from_align[] =
1bba6ac0 1567{
1568 VOIDmode, QImode, HImode, VOIDmode, SImode,
1bba6ac0 1569};
1570
1571static void
51b742cc 1572block_move_sequence (rtx dst_mem, rtx src_mem, int size, int align)
1bba6ac0 1573{
1574 rtx temp[2];
3754d046 1575 machine_mode mode[2];
1bba6ac0 1576 int amount[2];
51b742cc 1577 bool active[2];
1bba6ac0 1578 int phase = 0;
1579 int next;
51b742cc 1580 int offset_ld = 0;
1581 int offset_st = 0;
1582 rtx x;
1bba6ac0 1583
51b742cc 1584 x = XEXP (dst_mem, 0);
1585 if (!REG_P (x))
1586 {
1587 x = force_reg (Pmode, x);
1588 dst_mem = replace_equiv_address (dst_mem, x);
1589 }
1bba6ac0 1590
51b742cc 1591 x = XEXP (src_mem, 0);
1592 if (!REG_P (x))
1bba6ac0 1593 {
51b742cc 1594 x = force_reg (Pmode, x);
1595 src_mem = replace_equiv_address (src_mem, x);
1bba6ac0 1596 }
1597
51b742cc 1598 active[0] = active[1] = false;
1599
1bba6ac0 1600 do
1601 {
1bba6ac0 1602 next = phase;
51b742cc 1603 phase ^= 1;
1bba6ac0 1604
1605 if (size > 0)
1606 {
51b742cc 1607 int next_amount;
1608
1609 next_amount = (size >= 4 ? 4 : (size >= 2 ? 2 : 1));
1610 next_amount = MIN (next_amount, align);
1611
1612 amount[next] = next_amount;
1613 mode[next] = mode_from_align[next_amount];
1614 temp[next] = gen_reg_rtx (mode[next]);
1615
1616 x = adjust_address (src_mem, mode[next], offset_ld);
d1f9b275 1617 emit_insn (gen_rtx_SET (temp[next], x));
51b742cc 1618
1619 offset_ld += next_amount;
1620 size -= next_amount;
1621 active[next] = true;
1bba6ac0 1622 }
1623
1624 if (active[phase])
1625 {
51b742cc 1626 active[phase] = false;
1bba6ac0 1627
51b742cc 1628 x = adjust_address (dst_mem, mode[phase], offset_st);
d1f9b275 1629 emit_insn (gen_rtx_SET (x, temp[phase]));
51b742cc 1630
1bba6ac0 1631 offset_st += amount[phase];
1632 }
1633 }
1634 while (active[next]);
1635}
1636
51b742cc 1637bool
1638mcore_expand_block_move (rtx *operands)
1bba6ac0 1639{
51b742cc 1640 HOST_WIDE_INT align, bytes, max;
1641
1642 if (GET_CODE (operands[2]) != CONST_INT)
1643 return false;
1644
1645 bytes = INTVAL (operands[2]);
1646 align = INTVAL (operands[3]);
1bba6ac0 1647
51b742cc 1648 if (bytes <= 0)
1649 return false;
1650 if (align > 4)
1651 align = 4;
1652
1653 switch (align)
1bba6ac0 1654 {
51b742cc 1655 case 4:
1656 if (bytes & 1)
1657 max = 4*4;
1658 else if (bytes & 3)
1659 max = 8*4;
1660 else
1661 max = 16*4;
1662 break;
1663 case 2:
1664 max = 4*2;
1665 break;
1666 case 1:
1667 max = 4*1;
1668 break;
1669 default:
044e64da 1670 gcc_unreachable ();
51b742cc 1671 }
1672
1673 if (bytes <= max)
1674 {
1675 block_move_sequence (operands[0], operands[1], bytes, align);
1676 return true;
1bba6ac0 1677 }
1678
51b742cc 1679 return false;
1bba6ac0 1680}
1681\f
1682
1683/* Code to generate prologue and epilogue sequences. */
1684static int number_of_regs_before_varargs;
fa4fdf98 1685
6644435d 1686/* Set by TARGET_SETUP_INCOMING_VARARGS to indicate to prolog that this is
1bba6ac0 1687 for a varargs function. */
1688static int current_function_anonymous_args;
1689
1bba6ac0 1690#define STACK_BYTES (STACK_BOUNDARY/BITS_PER_UNIT)
1691#define STORE_REACH (64) /* Maximum displace of word store + 4. */
fa4fdf98 1692#define ADDI_REACH (32) /* Maximum addi operand. */
1bba6ac0 1693
1bba6ac0 1694static void
c167bbfc 1695layout_mcore_frame (struct mcore_frame * infp)
1bba6ac0 1696{
1697 int n;
1698 unsigned int i;
1699 int nbytes;
1700 int regarg;
1701 int localregarg;
1bba6ac0 1702 int outbounds;
1703 unsigned int growths;
1704 int step;
1705
1706 /* Might have to spill bytes to re-assemble a big argument that
fa4fdf98 1707 was passed partially in registers and partially on the stack. */
abe32cce 1708 nbytes = crtl->args.pretend_args_size;
1bba6ac0 1709
1710 /* Determine how much space for spilled anonymous args (e.g., stdarg). */
1711 if (current_function_anonymous_args)
1712 nbytes += (NPARM_REGS - number_of_regs_before_varargs) * UNITS_PER_WORD;
1713
1714 infp->arg_size = nbytes;
1715
1716 /* How much space to save non-volatile registers we stomp. */
1717 infp->reg_mask = calc_live_regs (& n);
1718 infp->reg_size = n * 4;
1719
3fcfff30 1720 /* And the rest of it... locals and space for overflowed outbounds. */
1bba6ac0 1721 infp->local_size = get_frame_size ();
abe32cce 1722 infp->outbound_size = crtl->outgoing_args_size;
1bba6ac0 1723
1724 /* Make sure we have a whole number of words for the locals. */
1725 if (infp->local_size % STACK_BYTES)
1726 infp->local_size = (infp->local_size + STACK_BYTES - 1) & ~ (STACK_BYTES -1);
1727
1728 /* Only thing we know we have to pad is the outbound space, since
1729 we've aligned our locals assuming that base of locals is aligned. */
1730 infp->pad_local = 0;
1731 infp->pad_reg = 0;
1732 infp->pad_outbound = 0;
1733 if (infp->outbound_size % STACK_BYTES)
1734 infp->pad_outbound = STACK_BYTES - (infp->outbound_size % STACK_BYTES);
1735
1736 /* Now we see how we want to stage the prologue so that it does
1737 the most appropriate stack growth and register saves to either:
1738 (1) run fast,
1739 (2) reduce instruction space, or
1740 (3) reduce stack space. */
3098b2d3 1741 for (i = 0; i < ARRAY_SIZE (infp->growth); i++)
1bba6ac0 1742 infp->growth[i] = 0;
1743
1744 regarg = infp->reg_size + infp->arg_size;
1745 localregarg = infp->local_size + regarg;
1bba6ac0 1746 outbounds = infp->outbound_size + infp->pad_outbound;
1747 growths = 0;
1748
1749 /* XXX: Consider one where we consider localregarg + outbound too! */
1750
1751 /* Frame of <= 32 bytes and using stm would get <= 2 registers.
1752 use stw's with offsets and buy the frame in one shot. */
1753 if (localregarg <= ADDI_REACH
1754 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1755 {
1756 /* Make sure we'll be aligned. */
1757 if (localregarg % STACK_BYTES)
1758 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1759
1760 step = localregarg + infp->pad_reg;
1761 infp->reg_offset = infp->local_size;
1762
1763 if (outbounds + step <= ADDI_REACH && !frame_pointer_needed)
1764 {
1765 step += outbounds;
1766 infp->reg_offset += outbounds;
1767 outbounds = 0;
1768 }
1769
1770 infp->arg_offset = step - 4;
1771 infp->growth[growths++] = step;
1772 infp->reg_growth = growths;
1773 infp->local_growth = growths;
1774
fa4fdf98 1775 /* If we haven't already folded it in. */
1bba6ac0 1776 if (outbounds)
1777 infp->growth[growths++] = outbounds;
1778
1779 goto finish;
1780 }
1781
1782 /* Frame can't be done with a single subi, but can be done with 2
1783 insns. If the 'stm' is getting <= 2 registers, we use stw's and
1784 shift some of the stack purchase into the first subi, so both are
1785 single instructions. */
1786 if (localregarg <= STORE_REACH
1787 && (infp->local_size > ADDI_REACH)
1788 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1789 {
1790 int all;
1791
1792 /* Make sure we'll be aligned; use either pad_reg or pad_local. */
1793 if (localregarg % STACK_BYTES)
1794 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1795
1796 all = localregarg + infp->pad_reg + infp->pad_local;
1797 step = ADDI_REACH; /* As much up front as we can. */
1798 if (step > all)
1799 step = all;
1800
1801 /* XXX: Consider whether step will still be aligned; we believe so. */
1802 infp->arg_offset = step - 4;
1803 infp->growth[growths++] = step;
1804 infp->reg_growth = growths;
1805 infp->reg_offset = step - infp->pad_reg - infp->reg_size;
1806 all -= step;
1807
fa4fdf98 1808 /* Can we fold in any space required for outbounds? */
1bba6ac0 1809 if (outbounds + all <= ADDI_REACH && !frame_pointer_needed)
1810 {
1811 all += outbounds;
1812 outbounds = 0;
1813 }
1814
fa4fdf98 1815 /* Get the rest of the locals in place. */
1bba6ac0 1816 step = all;
1817 infp->growth[growths++] = step;
1818 infp->local_growth = growths;
1819 all -= step;
1820
1e944a0b 1821 gcc_assert (all == 0);
1bba6ac0 1822
fa4fdf98 1823 /* Finish off if we need to do so. */
1bba6ac0 1824 if (outbounds)
1825 infp->growth[growths++] = outbounds;
1826
1827 goto finish;
1828 }
1829
1830 /* Registers + args is nicely aligned, so we'll buy that in one shot.
1831 Then we buy the rest of the frame in 1 or 2 steps depending on
1832 whether we need a frame pointer. */
1833 if ((regarg % STACK_BYTES) == 0)
1834 {
1835 infp->growth[growths++] = regarg;
1836 infp->reg_growth = growths;
1837 infp->arg_offset = regarg - 4;
1838 infp->reg_offset = 0;
1839
1840 if (infp->local_size % STACK_BYTES)
1841 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1842
1843 step = infp->local_size + infp->pad_local;
1844
1845 if (!frame_pointer_needed)
1846 {
1847 step += outbounds;
1848 outbounds = 0;
1849 }
1850
1851 infp->growth[growths++] = step;
1852 infp->local_growth = growths;
1853
fa4fdf98 1854 /* If there's any left to be done. */
1bba6ac0 1855 if (outbounds)
1856 infp->growth[growths++] = outbounds;
1857
1858 goto finish;
1859 }
1860
1861 /* XXX: optimizations that we'll want to play with....
fa4fdf98 1862 -- regarg is not aligned, but it's a small number of registers;
1863 use some of localsize so that regarg is aligned and then
1864 save the registers. */
1bba6ac0 1865
1866 /* Simple encoding; plods down the stack buying the pieces as it goes.
fa4fdf98 1867 -- does not optimize space consumption.
1868 -- does not attempt to optimize instruction counts.
1869 -- but it is safe for all alignments. */
1bba6ac0 1870 if (regarg % STACK_BYTES != 0)
1871 infp->pad_reg = STACK_BYTES - (regarg % STACK_BYTES);
1872
1873 infp->growth[growths++] = infp->arg_size + infp->reg_size + infp->pad_reg;
1874 infp->reg_growth = growths;
1875 infp->arg_offset = infp->growth[0] - 4;
1876 infp->reg_offset = 0;
1877
1878 if (frame_pointer_needed)
1879 {
1880 if (infp->local_size % STACK_BYTES != 0)
1881 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1882
1883 infp->growth[growths++] = infp->local_size + infp->pad_local;
1884 infp->local_growth = growths;
1885
1886 infp->growth[growths++] = outbounds;
1887 }
1888 else
1889 {
1890 if ((infp->local_size + outbounds) % STACK_BYTES != 0)
1891 infp->pad_local = STACK_BYTES - ((infp->local_size + outbounds) % STACK_BYTES);
1892
1893 infp->growth[growths++] = infp->local_size + infp->pad_local + outbounds;
1894 infp->local_growth = growths;
1895 }
1896
2eebe422 1897 /* Anything else that we've forgotten?, plus a few consistency checks. */
1bba6ac0 1898 finish:
1e944a0b 1899 gcc_assert (infp->reg_offset >= 0);
1900 gcc_assert (growths <= MAX_STACK_GROWS);
1bba6ac0 1901
1902 for (i = 0; i < growths; i++)
044e64da 1903 gcc_assert (!(infp->growth[i] % STACK_BYTES));
1bba6ac0 1904}
1905
1906/* Define the offset between two registers, one to be eliminated, and
1907 the other its replacement, at the start of a routine. */
fa4fdf98 1908
1bba6ac0 1909int
c167bbfc 1910mcore_initial_elimination_offset (int from, int to)
1bba6ac0 1911{
1912 int above_frame;
1913 int below_frame;
1914 struct mcore_frame fi;
1915
1916 layout_mcore_frame (& fi);
1917
1918 /* fp to ap */
1919 above_frame = fi.local_size + fi.pad_local + fi.reg_size + fi.pad_reg;
1920 /* sp to fp */
1921 below_frame = fi.outbound_size + fi.pad_outbound;
1922
1923 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
1924 return above_frame;
1925
1926 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1927 return above_frame + below_frame;
1928
1929 if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1930 return below_frame;
1931
044e64da 1932 gcc_unreachable ();
1bba6ac0 1933}
1934
fa4fdf98 1935/* Keep track of some information about varargs for the prolog. */
1936
5ee57edb 1937static void
39cba157 1938mcore_setup_incoming_varargs (cumulative_args_t args_so_far_v,
3754d046 1939 machine_mode mode, tree type,
5ee57edb 1940 int * ptr_pretend_size ATTRIBUTE_UNUSED,
1941 int second_time ATTRIBUTE_UNUSED)
1bba6ac0 1942{
39cba157 1943 CUMULATIVE_ARGS *args_so_far = get_cumulative_args (args_so_far_v);
1944
1bba6ac0 1945 current_function_anonymous_args = 1;
1946
1947 /* We need to know how many argument registers are used before
1948 the varargs start, so that we can push the remaining argument
1949 registers during the prologue. */
5ee57edb 1950 number_of_regs_before_varargs = *args_so_far + mcore_num_arg_regs (mode, type);
1bba6ac0 1951
dfd1079d 1952 /* There is a bug somewhere in the arg handling code.
1bba6ac0 1953 Until I can find it this workaround always pushes the
1954 last named argument onto the stack. */
5ee57edb 1955 number_of_regs_before_varargs = *args_so_far;
1bba6ac0 1956
1957 /* The last named argument may be split between argument registers
1958 and the stack. Allow for this here. */
1959 if (number_of_regs_before_varargs > NPARM_REGS)
1960 number_of_regs_before_varargs = NPARM_REGS;
1961}
1962
1963void
c167bbfc 1964mcore_expand_prolog (void)
1bba6ac0 1965{
1966 struct mcore_frame fi;
1967 int space_allocated = 0;
1968 int growth = 0;
1969
1970 /* Find out what we're doing. */
1971 layout_mcore_frame (&fi);
1972
1973 space_allocated = fi.arg_size + fi.reg_size + fi.local_size +
1974 fi.outbound_size + fi.pad_outbound + fi.pad_local + fi.pad_reg;
1975
1976 if (TARGET_CG_DATA)
1977 {
1978 /* Emit a symbol for this routine's frame size. */
1979 rtx x;
1bba6ac0 1980
1981 x = DECL_RTL (current_function_decl);
1982
044e64da 1983 gcc_assert (GET_CODE (x) == MEM);
1bba6ac0 1984
1985 x = XEXP (x, 0);
1986
044e64da 1987 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1bba6ac0 1988
dd045aee 1989 free (mcore_current_function_name);
1bba6ac0 1990
9591cb7b 1991 mcore_current_function_name = xstrdup (XSTR (x, 0));
1bba6ac0 1992
1993 ASM_OUTPUT_CG_NODE (asm_out_file, mcore_current_function_name, space_allocated);
1994
18d50ae6 1995 if (cfun->calls_alloca)
1bba6ac0 1996 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, "alloca", 1);
1997
1998 /* 970425: RBE:
1999 We're looking at how the 8byte alignment affects stack layout
2000 and where we had to pad things. This emits information we can
2001 extract which tells us about frame sizes and the like. */
2002 fprintf (asm_out_file,
2003 "\t.equ\t__$frame$info$_%s_$_%d_%d_x%x_%d_%d_%d,0\n",
2004 mcore_current_function_name,
2005 fi.arg_size, fi.reg_size, fi.reg_mask,
2006 fi.local_size, fi.outbound_size,
2007 frame_pointer_needed);
2008 }
2009
2010 if (mcore_naked_function_p ())
2011 return;
2012
2013 /* Handle stdarg+regsaves in one shot: can't be more than 64 bytes. */
c167bbfc 2014 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
1bba6ac0 2015
2016 /* If we have a parameter passed partially in regs and partially in memory,
2017 the registers will have been stored to memory already in function.c. So
2018 we only need to do something here for varargs functions. */
abe32cce 2019 if (fi.arg_size != 0 && crtl->args.pretend_args_size == 0)
1bba6ac0 2020 {
2021 int offset;
2022 int rn = FIRST_PARM_REG + NPARM_REGS - 1;
2023 int remaining = fi.arg_size;
2024
2025 for (offset = fi.arg_offset; remaining >= 4; offset -= 4, rn--, remaining -= 4)
2026 {
2027 emit_insn (gen_movsi
1a83b3ff 2028 (gen_rtx_MEM (SImode,
29c05e22 2029 plus_constant (Pmode, stack_pointer_rtx,
2030 offset)),
1a83b3ff 2031 gen_rtx_REG (SImode, rn)));
1bba6ac0 2032 }
2033 }
2034
fa4fdf98 2035 /* Do we need another stack adjustment before we do the register saves? */
1bba6ac0 2036 if (growth < fi.reg_growth)
c167bbfc 2037 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
1bba6ac0 2038
2039 if (fi.reg_size != 0)
2040 {
2041 int i;
2042 int offs = fi.reg_offset;
2043
2044 for (i = 15; i >= 0; i--)
2045 {
2046 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2047 {
2048 int first_reg = 15;
2049
2050 while (fi.reg_mask & (1 << first_reg))
2051 first_reg--;
2052 first_reg++;
2053
1a83b3ff 2054 emit_insn (gen_store_multiple (gen_rtx_MEM (SImode, stack_pointer_rtx),
2055 gen_rtx_REG (SImode, first_reg),
1bba6ac0 2056 GEN_INT (16 - first_reg)));
2057
2058 i -= (15 - first_reg);
2059 offs += (16 - first_reg) * 4;
2060 }
2061 else if (fi.reg_mask & (1 << i))
2062 {
2063 emit_insn (gen_movsi
1a83b3ff 2064 (gen_rtx_MEM (SImode,
29c05e22 2065 plus_constant (Pmode, stack_pointer_rtx,
2066 offs)),
1a83b3ff 2067 gen_rtx_REG (SImode, i)));
1bba6ac0 2068 offs += 4;
2069 }
2070 }
2071 }
2072
2073 /* Figure the locals + outbounds. */
2074 if (frame_pointer_needed)
2075 {
2076 /* If we haven't already purchased to 'fp'. */
2077 if (growth < fi.local_growth)
c167bbfc 2078 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
1bba6ac0 2079
2080 emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
2081
fa4fdf98 2082 /* ... and then go any remaining distance for outbounds, etc. */
1bba6ac0 2083 if (fi.growth[growth])
2084 output_stack_adjust (-1, fi.growth[growth++]);
2085 }
2086 else
2087 {
2088 if (growth < fi.local_growth)
c167bbfc 2089 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
1bba6ac0 2090 if (fi.growth[growth])
2091 output_stack_adjust (-1, fi.growth[growth++]);
2092 }
2093}
2094
2095void
c167bbfc 2096mcore_expand_epilog (void)
1bba6ac0 2097{
2098 struct mcore_frame fi;
2099 int i;
2100 int offs;
2101 int growth = MAX_STACK_GROWS - 1 ;
2102
2eebe422 2103
1bba6ac0 2104 /* Find out what we're doing. */
2105 layout_mcore_frame(&fi);
2106
2107 if (mcore_naked_function_p ())
2108 return;
2eebe422 2109
1bba6ac0 2110 /* If we had a frame pointer, restore the sp from that. */
2111 if (frame_pointer_needed)
2112 {
2113 emit_insn (gen_movsi (stack_pointer_rtx, frame_pointer_rtx));
2114 growth = fi.local_growth - 1;
2115 }
2116 else
2117 {
2118 /* XXX: while loop should accumulate and do a single sell. */
2119 while (growth >= fi.local_growth)
2120 {
2121 if (fi.growth[growth] != 0)
2122 output_stack_adjust (1, fi.growth[growth]);
2123 growth--;
2124 }
2125 }
2126
2127 /* Make sure we've shrunk stack back to the point where the registers
2128 were laid down. This is typically 0/1 iterations. Then pull the
fa4fdf98 2129 register save information back off the stack. */
1bba6ac0 2130 while (growth >= fi.reg_growth)
2131 output_stack_adjust ( 1, fi.growth[growth--]);
2132
2133 offs = fi.reg_offset;
2134
2135 for (i = 15; i >= 0; i--)
2136 {
2137 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2138 {
2139 int first_reg;
2140
2141 /* Find the starting register. */
2142 first_reg = 15;
2143
2144 while (fi.reg_mask & (1 << first_reg))
2145 first_reg--;
2146
2147 first_reg++;
2148
1a83b3ff 2149 emit_insn (gen_load_multiple (gen_rtx_REG (SImode, first_reg),
2150 gen_rtx_MEM (SImode, stack_pointer_rtx),
1bba6ac0 2151 GEN_INT (16 - first_reg)));
2152
2153 i -= (15 - first_reg);
2154 offs += (16 - first_reg) * 4;
2155 }
2156 else if (fi.reg_mask & (1 << i))
2157 {
2158 emit_insn (gen_movsi
1a83b3ff 2159 (gen_rtx_REG (SImode, i),
2160 gen_rtx_MEM (SImode,
29c05e22 2161 plus_constant (Pmode, stack_pointer_rtx,
2162 offs))));
1bba6ac0 2163 offs += 4;
2164 }
2165 }
2166
2167 /* Give back anything else. */
dfd1079d 2168 /* XXX: Should accumulate total and then give it back. */
1bba6ac0 2169 while (growth >= 0)
2170 output_stack_adjust ( 1, fi.growth[growth--]);
2171}
2172\f
2173/* This code is borrowed from the SH port. */
2174
2175/* The MCORE cannot load a large constant into a register, constants have to
2176 come from a pc relative load. The reference of a pc relative load
442e3cb9 2177 instruction must be less than 1k in front of the instruction. This
1bba6ac0 2178 means that we often have to dump a constant inside a function, and
2179 generate code to branch around it.
2180
2181 It is important to minimize this, since the branches will slow things
2182 down and make things bigger.
2183
2184 Worst case code looks like:
2185
2186 lrw L1,r0
2187 br L2
2188 align
2189 L1: .long value
2190 L2:
2191 ..
2192
2193 lrw L3,r0
2194 br L4
2195 align
2196 L3: .long value
2197 L4:
2198 ..
2199
2200 We fix this by performing a scan before scheduling, which notices which
2201 instructions need to have their operands fetched from the constant table
2202 and builds the table.
2203
2204 The algorithm is:
2205
2206 scan, find an instruction which needs a pcrel move. Look forward, find the
2207 last barrier which is within MAX_COUNT bytes of the requirement.
2208 If there isn't one, make one. Process all the instructions between
2209 the find and the barrier.
2210
2211 In the above example, we can tell that L3 is within 1k of L1, so
2212 the first move can be shrunk from the 2 insn+constant sequence into
2213 just 1 insn, and the constant moved to L3 to make:
2214
2215 lrw L1,r0
2216 ..
2217 lrw L3,r0
2218 bra L4
2219 align
2220 L3:.long value
2221 L4:.long value
2222
2223 Then the second move becomes the target for the shortening process. */
2224
2225typedef struct
2226{
2227 rtx value; /* Value in table. */
2228 rtx label; /* Label of value. */
2229} pool_node;
2230
2231/* The maximum number of constants that can fit into one pool, since
2232 the pc relative range is 0...1020 bytes and constants are at least 4
3c364971 2233 bytes long. We subtract 4 from the range to allow for the case where
1bba6ac0 2234 we need to add a branch/align before the constant pool. */
2235
2236#define MAX_COUNT 1016
2237#define MAX_POOL_SIZE (MAX_COUNT/4)
2238static pool_node pool_vector[MAX_POOL_SIZE];
2239static int pool_size;
2240
2241/* Dump out any constants accumulated in the final pass. These
2242 will only be labels. */
fa4fdf98 2243
2eebe422 2244const char *
c167bbfc 2245mcore_output_jump_label_table (void)
1bba6ac0 2246{
2247 int i;
2248
2249 if (pool_size)
2250 {
2251 fprintf (asm_out_file, "\t.align 2\n");
2252
2253 for (i = 0; i < pool_size; i++)
2254 {
2255 pool_node * p = pool_vector + i;
2256
805e22b2 2257 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (p->label));
1bba6ac0 2258
2259 output_asm_insn (".long %0", &p->value);
2260 }
2261
2262 pool_size = 0;
2263 }
2264
2265 return "";
2266}
2267
1bba6ac0 2268/* Check whether insn is a candidate for a conditional. */
fa4fdf98 2269
1bba6ac0 2270static cond_type
c167bbfc 2271is_cond_candidate (rtx insn)
1bba6ac0 2272{
2273 /* The only things we conditionalize are those that can be directly
2274 changed into a conditional. Only bother with SImode items. If
2275 we wanted to be a little more aggressive, we could also do other
fa4fdf98 2276 modes such as DImode with reg-reg move or load 0. */
aa90bb35 2277 if (NONJUMP_INSN_P (insn))
1bba6ac0 2278 {
2279 rtx pat = PATTERN (insn);
2280 rtx src, dst;
2281
2282 if (GET_CODE (pat) != SET)
2283 return COND_NO;
2284
2285 dst = XEXP (pat, 0);
2286
2287 if ((GET_CODE (dst) != REG &&
2288 GET_CODE (dst) != SUBREG) ||
2289 GET_MODE (dst) != SImode)
2290 return COND_NO;
2291
2292 src = XEXP (pat, 1);
2293
2294 if ((GET_CODE (src) == REG ||
2295 (GET_CODE (src) == SUBREG &&
2296 GET_CODE (SUBREG_REG (src)) == REG)) &&
2297 GET_MODE (src) == SImode)
2298 return COND_MOV_INSN;
2299 else if (GET_CODE (src) == CONST_INT &&
2300 INTVAL (src) == 0)
2301 return COND_CLR_INSN;
2302 else if (GET_CODE (src) == PLUS &&
2303 (GET_CODE (XEXP (src, 0)) == REG ||
2304 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2305 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2306 GET_MODE (XEXP (src, 0)) == SImode &&
2307 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2308 INTVAL (XEXP (src, 1)) == 1)
2309 return COND_INC_INSN;
2310 else if (((GET_CODE (src) == MINUS &&
2311 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2312 INTVAL( XEXP (src, 1)) == 1) ||
2313 (GET_CODE (src) == PLUS &&
2314 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2315 INTVAL (XEXP (src, 1)) == -1)) &&
2316 (GET_CODE (XEXP (src, 0)) == REG ||
2317 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2318 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2319 GET_MODE (XEXP (src, 0)) == SImode)
2320 return COND_DEC_INSN;
2321
3fcfff30 2322 /* Some insns that we don't bother with:
1bba6ac0 2323 (set (rx:DI) (ry:DI))
2324 (set (rx:DI) (const_int 0))
2325 */
2326
2327 }
aa90bb35 2328 else if (JUMP_P (insn)
2329 && GET_CODE (PATTERN (insn)) == SET
2330 && GET_CODE (XEXP (PATTERN (insn), 1)) == LABEL_REF)
1bba6ac0 2331 return COND_BRANCH_INSN;
2332
2333 return COND_NO;
2334}
2335
2336/* Emit a conditional version of insn and replace the old insn with the
2337 new one. Return the new insn if emitted. */
fa4fdf98 2338
91a55c11 2339static rtx_insn *
c167bbfc 2340emit_new_cond_insn (rtx insn, int cond)
1bba6ac0 2341{
2342 rtx c_insn = 0;
2343 rtx pat, dst, src;
2344 cond_type num;
2345
2346 if ((num = is_cond_candidate (insn)) == COND_NO)
2347 return NULL;
2348
2349 pat = PATTERN (insn);
2350
aa90bb35 2351 if (NONJUMP_INSN_P (insn))
1bba6ac0 2352 {
2353 dst = SET_DEST (pat);
2354 src = SET_SRC (pat);
2355 }
2356 else
7b7a8c50 2357 {
2358 dst = JUMP_LABEL (insn);
2359 src = NULL_RTX;
2360 }
1bba6ac0 2361
2362 switch (num)
2363 {
2364 case COND_MOV_INSN:
2365 case COND_CLR_INSN:
2366 if (cond)
2367 c_insn = gen_movt0 (dst, src, dst);
2368 else
2369 c_insn = gen_movt0 (dst, dst, src);
2370 break;
2371
2372 case COND_INC_INSN:
2373 if (cond)
2374 c_insn = gen_incscc (dst, dst);
2375 else
2376 c_insn = gen_incscc_false (dst, dst);
2377 break;
2378
2379 case COND_DEC_INSN:
2380 if (cond)
2381 c_insn = gen_decscc (dst, dst);
2382 else
2383 c_insn = gen_decscc_false (dst, dst);
2384 break;
2385
2386 case COND_BRANCH_INSN:
2387 if (cond)
2388 c_insn = gen_branch_true (dst);
2389 else
2390 c_insn = gen_branch_false (dst);
2391 break;
2392
2393 default:
2394 return NULL;
2395 }
2396
2397 /* Only copy the notes if they exist. */
2398 if (rtx_length [GET_CODE (c_insn)] >= 7 && rtx_length [GET_CODE (insn)] >= 7)
2399 {
2400 /* We really don't need to bother with the notes and links at this
2401 point, but go ahead and save the notes. This will help is_dead()
2402 when applying peepholes (links don't matter since they are not
2403 used any more beyond this point for the mcore). */
2404 REG_NOTES (c_insn) = REG_NOTES (insn);
2405 }
2406
2407 if (num == COND_BRANCH_INSN)
2408 {
2409 /* For jumps, we need to be a little bit careful and emit the new jump
2410 before the old one and to update the use count for the target label.
2411 This way, the barrier following the old (uncond) jump will get
2412 deleted, but the label won't. */
2413 c_insn = emit_jump_insn_before (c_insn, insn);
2414
2415 ++ LABEL_NUSES (dst);
2416
2417 JUMP_LABEL (c_insn) = dst;
2418 }
2419 else
2420 c_insn = emit_insn_after (c_insn, insn);
2421
2422 delete_insn (insn);
2423
91a55c11 2424 return as_a <rtx_insn *> (c_insn);
1bba6ac0 2425}
2426
2427/* Attempt to change a basic block into a series of conditional insns. This
2428 works by taking the branch at the end of the 1st block and scanning for the
2429 end of the 2nd block. If all instructions in the 2nd block have cond.
2430 versions and the label at the start of block 3 is the same as the target
2431 from the branch at block 1, then conditionalize all insn in block 2 using
2432 the inverse condition of the branch at block 1. (Note I'm bending the
2433 definition of basic block here.)
2434
2435 e.g., change:
2436
2437 bt L2 <-- end of block 1 (delete)
2438 mov r7,r8
2439 addu r7,1
2440 br L3 <-- end of block 2
2441
2442 L2: ... <-- start of block 3 (NUSES==1)
2443 L3: ...
2444
2445 to:
2446
2447 movf r7,r8
2448 incf r7
2449 bf L3
2450
2451 L3: ...
2452
2453 we can delete the L2 label if NUSES==1 and re-apply the optimization
2454 starting at the last instruction of block 2. This may allow an entire
fa4fdf98 2455 if-then-else statement to be conditionalized. BRC */
91a55c11 2456static rtx_insn *
2457conditionalize_block (rtx_insn *first)
1bba6ac0 2458{
91a55c11 2459 rtx_insn *insn;
1bba6ac0 2460 rtx br_pat;
91a55c11 2461 rtx_insn *end_blk_1_br = 0;
2462 rtx_insn *end_blk_2_insn = 0;
2463 rtx_insn *start_blk_3_lab = 0;
1bba6ac0 2464 int cond;
2465 int br_lab_num;
2466 int blk_size = 0;
2467
2468
2469 /* Check that the first insn is a candidate conditional jump. This is
2470 the one that we'll eliminate. If not, advance to the next insn to
2471 try. */
aa90bb35 2472 if (! JUMP_P (first)
2473 || GET_CODE (PATTERN (first)) != SET
2474 || GET_CODE (XEXP (PATTERN (first), 1)) != IF_THEN_ELSE)
1bba6ac0 2475 return NEXT_INSN (first);
2476
2477 /* Extract some information we need. */
2478 end_blk_1_br = first;
2479 br_pat = PATTERN (end_blk_1_br);
2480
2481 /* Complement the condition since we use the reverse cond. for the insns. */
2482 cond = (GET_CODE (XEXP (XEXP (br_pat, 1), 0)) == EQ);
2483
2484 /* Determine what kind of branch we have. */
2485 if (GET_CODE (XEXP (XEXP (br_pat, 1), 1)) == LABEL_REF)
2486 {
2487 /* A normal branch, so extract label out of first arm. */
2488 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 1), 0));
2489 }
2490 else
2491 {
2492 /* An inverse branch, so extract the label out of the 2nd arm
2493 and complement the condition. */
2494 cond = (cond == 0);
2495 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 2), 0));
2496 }
2497
2498 /* Scan forward for the start of block 2: it must start with a
2499 label and that label must be the same as the branch target
2500 label from block 1. We don't care about whether block 2 actually
2501 ends with a branch or a label (an uncond. branch is
2502 conditionalizable). */
2503 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
2504 {
2505 enum rtx_code code;
2506
2507 code = GET_CODE (insn);
2508
3fcfff30 2509 /* Look for the label at the start of block 3. */
1bba6ac0 2510 if (code == CODE_LABEL && CODE_LABEL_NUMBER (insn) == br_lab_num)
2511 break;
2512
2513 /* Skip barriers, notes, and conditionalizable insns. If the
2514 insn is not conditionalizable or makes this optimization fail,
2515 just return the next insn so we can start over from that point. */
2516 if (code != BARRIER && code != NOTE && !is_cond_candidate (insn))
2517 return NEXT_INSN (insn);
2518
a361b456 2519 /* Remember the last real insn before the label (i.e. end of block 2). */
1bba6ac0 2520 if (code == JUMP_INSN || code == INSN)
2521 {
2522 blk_size ++;
2523 end_blk_2_insn = insn;
2524 }
2525 }
2526
2527 if (!insn)
2528 return insn;
2529
2530 /* It is possible for this optimization to slow performance if the blocks
2531 are long. This really depends upon whether the branch is likely taken
2532 or not. If the branch is taken, we slow performance in many cases. But,
2533 if the branch is not taken, we always help performance (for a single
2534 block, but for a double block (i.e. when the optimization is re-applied)
2535 this is not true since the 'right thing' depends on the overall length of
2536 the collapsed block). As a compromise, don't apply this optimization on
2537 blocks larger than size 2 (unlikely for the mcore) when speed is important.
2538 the best threshold depends on the latencies of the instructions (i.e.,
2539 the branch penalty). */
2540 if (optimize > 1 && blk_size > 2)
2541 return insn;
2542
2543 /* At this point, we've found the start of block 3 and we know that
2544 it is the destination of the branch from block 1. Also, all
2545 instructions in the block 2 are conditionalizable. So, apply the
2546 conditionalization and delete the branch. */
2547 start_blk_3_lab = insn;
2548
2549 for (insn = NEXT_INSN (end_blk_1_br); insn != start_blk_3_lab;
2550 insn = NEXT_INSN (insn))
2551 {
91a55c11 2552 rtx_insn *newinsn;
1bba6ac0 2553
dd1286fb 2554 if (insn->deleted ())
1bba6ac0 2555 continue;
2556
3fcfff30 2557 /* Try to form a conditional variant of the instruction and emit it. */
1bba6ac0 2558 if ((newinsn = emit_new_cond_insn (insn, cond)))
2559 {
2560 if (end_blk_2_insn == insn)
2561 end_blk_2_insn = newinsn;
2562
2563 insn = newinsn;
2564 }
2565 }
2566
2567 /* Note whether we will delete the label starting blk 3 when the jump
2568 gets deleted. If so, we want to re-apply this optimization at the
2569 last real instruction right before the label. */
2570 if (LABEL_NUSES (start_blk_3_lab) == 1)
2571 {
2572 start_blk_3_lab = 0;
2573 }
2574
2575 /* ??? we probably should redistribute the death notes for this insn, esp.
2576 the death of cc, but it doesn't really matter this late in the game.
2577 The peepholes all use is_dead() which will find the correct death
2578 regardless of whether there is a note. */
2579 delete_insn (end_blk_1_br);
2580
2581 if (! start_blk_3_lab)
2582 return end_blk_2_insn;
2583
fa4fdf98 2584 /* Return the insn right after the label at the start of block 3. */
1bba6ac0 2585 return NEXT_INSN (start_blk_3_lab);
2586}
2587
2588/* Apply the conditionalization of blocks optimization. This is the
2589 outer loop that traverses through the insns scanning for a branch
2590 that signifies an opportunity to apply the optimization. Note that
2591 this optimization is applied late. If we could apply it earlier,
2592 say before cse 2, it may expose more optimization opportunities.
2593 but, the pay back probably isn't really worth the effort (we'd have
2594 to update all reg/flow/notes/links/etc to make it work - and stick it
fa4fdf98 2595 in before cse 2). */
2596
1bba6ac0 2597static void
c167bbfc 2598conditionalize_optimization (void)
1bba6ac0 2599{
91a55c11 2600 rtx_insn *insn;
1bba6ac0 2601
2efea8c0 2602 for (insn = get_insns (); insn; insn = conditionalize_block (insn))
1bba6ac0 2603 continue;
2604}
2605
2efea8c0 2606/* This is to handle loads from the constant pool. */
fa4fdf98 2607
2efea8c0 2608static void
c167bbfc 2609mcore_reorg (void)
1bba6ac0 2610{
2611 /* Reset this variable. */
2612 current_function_anonymous_args = 0;
2613
1bba6ac0 2614 if (optimize == 0)
2615 return;
2616
2617 /* Conditionalize blocks where we can. */
2efea8c0 2618 conditionalize_optimization ();
1bba6ac0 2619
2620 /* Literal pool generation is now pushed off until the assembler. */
2621}
2622
2623\f
d13344c8 2624/* Return true if X is something that can be moved directly into r15. */
1bba6ac0 2625
d13344c8 2626bool
c167bbfc 2627mcore_r15_operand_p (rtx x)
d13344c8 2628{
2629 switch (GET_CODE (x))
2630 {
2631 case CONST_INT:
2632 return mcore_const_ok_for_inline (INTVAL (x));
1bba6ac0 2633
d13344c8 2634 case REG:
2635 case SUBREG:
2636 case MEM:
2637 return 1;
2638
2639 default:
2640 return 0;
2641 }
2642}
2643
8deb3959 2644/* Implement SECONDARY_RELOAD_CLASS. If RCLASS contains r15, and we can't
d13344c8 2645 directly move X into it, use r1-r14 as a temporary. */
c167bbfc 2646
d13344c8 2647enum reg_class
8deb3959 2648mcore_secondary_reload_class (enum reg_class rclass,
3754d046 2649 machine_mode mode ATTRIBUTE_UNUSED, rtx x)
d13344c8 2650{
8deb3959 2651 if (TEST_HARD_REG_BIT (reg_class_contents[rclass], 15)
d13344c8 2652 && !mcore_r15_operand_p (x))
2653 return LRW_REGS;
2654 return NO_REGS;
2655}
1bba6ac0 2656
d13344c8 2657/* Return the reg_class to use when reloading the rtx X into the class
8deb3959 2658 RCLASS. If X is too complex to move directly into r15, prefer to
d13344c8 2659 use LRW_REGS instead. */
c167bbfc 2660
1bba6ac0 2661enum reg_class
8deb3959 2662mcore_reload_class (rtx x, enum reg_class rclass)
1bba6ac0 2663{
8deb3959 2664 if (reg_class_subset_p (LRW_REGS, rclass) && !mcore_r15_operand_p (x))
d13344c8 2665 return LRW_REGS;
1bba6ac0 2666
8deb3959 2667 return rclass;
1bba6ac0 2668}
2669
2670/* Tell me if a pair of reg/subreg rtx's actually refer to the same
2671 register. Note that the current version doesn't worry about whether
2672 they are the same mode or note (e.g., a QImode in r2 matches an HImode
2673 in r2 matches an SImode in r2. Might think in the future about whether
2674 we want to be able to say something about modes. */
c167bbfc 2675
1bba6ac0 2676int
c167bbfc 2677mcore_is_same_reg (rtx x, rtx y)
1bba6ac0 2678{
3fcfff30 2679 /* Strip any and all of the subreg wrappers. */
1bba6ac0 2680 while (GET_CODE (x) == SUBREG)
2681 x = SUBREG_REG (x);
2682
2683 while (GET_CODE (y) == SUBREG)
2684 y = SUBREG_REG (y);
2685
2686 if (GET_CODE(x) == REG && GET_CODE(y) == REG && REGNO(x) == REGNO(y))
2687 return 1;
2688
2689 return 0;
2690}
2691
4c834714 2692static void
2693mcore_option_override (void)
1bba6ac0 2694{
1bba6ac0 2695 /* Only the m340 supports little endian code. */
2696 if (TARGET_LITTLE_END && ! TARGET_M340)
6a9bcd3e 2697 target_flags |= MASK_M340;
1bba6ac0 2698}
02e53c17 2699
1bba6ac0 2700\f
1bba6ac0 2701/* Compute the number of word sized registers needed to
2702 hold a function argument of mode MODE and type TYPE. */
c167bbfc 2703
1bba6ac0 2704int
3754d046 2705mcore_num_arg_regs (machine_mode mode, const_tree type)
1bba6ac0 2706{
2707 int size;
2708
0336f0f0 2709 if (targetm.calls.must_pass_in_stack (mode, type))
1bba6ac0 2710 return 0;
2711
2712 if (type && mode == BLKmode)
2713 size = int_size_in_bytes (type);
2714 else
2715 size = GET_MODE_SIZE (mode);
2716
2717 return ROUND_ADVANCE (size);
2718}
2719
2720static rtx
3754d046 2721handle_structs_in_regs (machine_mode mode, const_tree type, int reg)
1bba6ac0 2722{
2723 int size;
2724
fccee353 2725 /* The MCore ABI defines that a structure whose size is not a whole multiple
1bba6ac0 2726 of bytes is passed packed into registers (or spilled onto the stack if
2727 not enough registers are available) with the last few bytes of the
2728 structure being packed, left-justified, into the last register/stack slot.
2729 GCC handles this correctly if the last word is in a stack slot, but we
2730 have to generate a special, PARALLEL RTX if the last word is in an
2731 argument register. */
2732 if (type
2733 && TYPE_MODE (type) == BLKmode
2734 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
2735 && (size = int_size_in_bytes (type)) > UNITS_PER_WORD
2736 && (size % UNITS_PER_WORD != 0)
2737 && (reg + mcore_num_arg_regs (mode, type) <= (FIRST_PARM_REG + NPARM_REGS)))
2738 {
2739 rtx arg_regs [NPARM_REGS];
2740 int nregs;
2741 rtx result;
2742 rtvec rtvec;
2743
2744 for (nregs = 0; size > 0; size -= UNITS_PER_WORD)
2745 {
2746 arg_regs [nregs] =
2747 gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, reg ++),
2748 GEN_INT (nregs * UNITS_PER_WORD));
2749 nregs ++;
2750 }
2751
2752 /* We assume here that NPARM_REGS == 6. The assert checks this. */
1e944a0b 2753 gcc_assert (ARRAY_SIZE (arg_regs) == 6);
1bba6ac0 2754 rtvec = gen_rtvec (nregs, arg_regs[0], arg_regs[1], arg_regs[2],
2755 arg_regs[3], arg_regs[4], arg_regs[5]);
2756
2757 result = gen_rtx_PARALLEL (mode, rtvec);
2758 return result;
2759 }
2760
2761 return gen_rtx_REG (mode, reg);
2762}
2763
2764rtx
3b2411a8 2765mcore_function_value (const_tree valtype, const_tree func)
1bba6ac0 2766{
3754d046 2767 machine_mode mode;
1bba6ac0 2768 int unsigned_p;
2769
2770 mode = TYPE_MODE (valtype);
2771
3b2411a8 2772 /* Since we promote return types, we must promote the mode here too. */
74e653fe 2773 mode = promote_function_mode (valtype, mode, &unsigned_p, func, 1);
1bba6ac0 2774
2775 return handle_structs_in_regs (mode, valtype, FIRST_RET_REG);
2776}
2777
2778/* Define where to put the arguments to a function.
2779 Value is zero to push the argument on the stack,
2780 or a hard register in which to store the argument.
2781
2782 MODE is the argument's machine mode.
2783 TYPE is the data type of the argument (as a tree).
2784 This is null for libcalls where that information may
2785 not be available.
2786 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2787 the preceding args and about the function being called.
2788 NAMED is nonzero if this argument is a named parameter
2789 (otherwise it is an extra parameter matching an ellipsis).
2790
2791 On MCore the first args are normally in registers
2792 and the rest are pushed. Any arg that starts within the first
2793 NPARM_REGS words is at least partially passed in a register unless
2794 its data type forbids. */
c167bbfc 2795
da6d22fa 2796static rtx
3754d046 2797mcore_function_arg (cumulative_args_t cum, machine_mode mode,
da6d22fa 2798 const_tree type, bool named)
1bba6ac0 2799{
2800 int arg_reg;
2801
51b742cc 2802 if (! named || mode == VOIDmode)
1bba6ac0 2803 return 0;
2804
0336f0f0 2805 if (targetm.calls.must_pass_in_stack (mode, type))
1bba6ac0 2806 return 0;
2807
39cba157 2808 arg_reg = ROUND_REG (*get_cumulative_args (cum), mode);
1bba6ac0 2809
2810 if (arg_reg < NPARM_REGS)
2811 return handle_structs_in_regs (mode, type, FIRST_PARM_REG + arg_reg);
2812
2813 return 0;
2814}
2815
da6d22fa 2816static void
3754d046 2817mcore_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
da6d22fa 2818 const_tree type, bool named ATTRIBUTE_UNUSED)
2819{
39cba157 2820 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
2821
da6d22fa 2822 *cum = (ROUND_REG (*cum, mode)
2823 + (int)named * mcore_num_arg_regs (mode, type));
2824}
2825
bd99ba64 2826static unsigned int
3754d046 2827mcore_function_arg_boundary (machine_mode mode,
bd99ba64 2828 const_tree type ATTRIBUTE_UNUSED)
2829{
2830 /* Doubles must be aligned to an 8 byte boundary. */
2831 return (mode != BLKmode && GET_MODE_SIZE (mode) == 8
2832 ? BIGGEST_ALIGNMENT
2833 : PARM_BOUNDARY);
2834}
2835
f054eb3c 2836/* Returns the number of bytes of argument registers required to hold *part*
2837 of a parameter of machine mode MODE and type TYPE (which may be NULL if
dfd1079d 2838 the type is not known). If the argument fits entirely in the argument
1bba6ac0 2839 registers, or entirely on the stack, then 0 is returned. CUM is the
2840 number of argument registers already used by earlier parameters to
2841 the function. */
c167bbfc 2842
f054eb3c 2843static int
3754d046 2844mcore_arg_partial_bytes (cumulative_args_t cum, machine_mode mode,
f054eb3c 2845 tree type, bool named)
1bba6ac0 2846{
39cba157 2847 int reg = ROUND_REG (*get_cumulative_args (cum), mode);
1bba6ac0 2848
2849 if (named == 0)
2850 return 0;
2851
0336f0f0 2852 if (targetm.calls.must_pass_in_stack (mode, type))
1bba6ac0 2853 return 0;
2854
2855 /* REG is not the *hardware* register number of the register that holds
2856 the argument, it is the *argument* register number. So for example,
2857 the first argument to a function goes in argument register 0, which
2858 translates (for the MCore) into hardware register 2. The second
2859 argument goes into argument register 1, which translates into hardware
2860 register 3, and so on. NPARM_REGS is the number of argument registers
2861 supported by the target, not the maximum hardware register number of
2862 the target. */
2863 if (reg >= NPARM_REGS)
2864 return 0;
2865
2866 /* If the argument fits entirely in registers, return 0. */
2867 if (reg + mcore_num_arg_regs (mode, type) <= NPARM_REGS)
2868 return 0;
2869
2870 /* The argument overflows the number of available argument registers.
2871 Compute how many argument registers have not yet been assigned to
2872 hold an argument. */
2873 reg = NPARM_REGS - reg;
2874
2875 /* Return partially in registers and partially on the stack. */
f054eb3c 2876 return reg * UNITS_PER_WORD;
1bba6ac0 2877}
2878\f
e911aedf 2879/* Return nonzero if SYMBOL is marked as being dllexport'd. */
c167bbfc 2880
1bba6ac0 2881int
c167bbfc 2882mcore_dllexport_name_p (const char * symbol)
1bba6ac0 2883{
2884 return symbol[0] == '@' && symbol[1] == 'e' && symbol[2] == '.';
2885}
2886
e911aedf 2887/* Return nonzero if SYMBOL is marked as being dllimport'd. */
c167bbfc 2888
1bba6ac0 2889int
c167bbfc 2890mcore_dllimport_name_p (const char * symbol)
1bba6ac0 2891{
2892 return symbol[0] == '@' && symbol[1] == 'i' && symbol[2] == '.';
2893}
2894
2895/* Mark a DECL as being dllexport'd. */
c167bbfc 2896
1bba6ac0 2897static void
c167bbfc 2898mcore_mark_dllexport (tree decl)
1bba6ac0 2899{
04238935 2900 const char * oldname;
1bba6ac0 2901 char * newname;
2902 rtx rtlname;
2903 tree idp;
2904
2905 rtlname = XEXP (DECL_RTL (decl), 0);
2906
044e64da 2907 if (GET_CODE (rtlname) == MEM)
2908 rtlname = XEXP (rtlname, 0);
2909 gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2910 oldname = XSTR (rtlname, 0);
1bba6ac0 2911
2912 if (mcore_dllexport_name_p (oldname))
2913 return; /* Already done. */
2914
225ab426 2915 newname = XALLOCAVEC (char, strlen (oldname) + 4);
1bba6ac0 2916 sprintf (newname, "@e.%s", oldname);
2917
2918 /* We pass newname through get_identifier to ensure it has a unique
2919 address. RTL processing can sometimes peek inside the symbol ref
2920 and compare the string's addresses to see if two symbols are
2921 identical. */
2922 /* ??? At least I think that's why we do this. */
2923 idp = get_identifier (newname);
2924
2925 XEXP (DECL_RTL (decl), 0) =
1a83b3ff 2926 gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
1bba6ac0 2927}
2928
2929/* Mark a DECL as being dllimport'd. */
c167bbfc 2930
1bba6ac0 2931static void
c167bbfc 2932mcore_mark_dllimport (tree decl)
1bba6ac0 2933{
04238935 2934 const char * oldname;
1bba6ac0 2935 char * newname;
2936 tree idp;
2937 rtx rtlname;
2938 rtx newrtl;
2939
2940 rtlname = XEXP (DECL_RTL (decl), 0);
2941
044e64da 2942 if (GET_CODE (rtlname) == MEM)
2943 rtlname = XEXP (rtlname, 0);
2944 gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2945 oldname = XSTR (rtlname, 0);
1bba6ac0 2946
044e64da 2947 gcc_assert (!mcore_dllexport_name_p (oldname));
2948 if (mcore_dllimport_name_p (oldname))
1bba6ac0 2949 return; /* Already done. */
2950
2951 /* ??? One can well ask why we're making these checks here,
2952 and that would be a good question. */
2953
2954 /* Imported variables can't be initialized. */
2955 if (TREE_CODE (decl) == VAR_DECL
2956 && !DECL_VIRTUAL_P (decl)
2957 && DECL_INITIAL (decl))
2958 {
3cf8b391 2959 error ("initialized variable %q+D is marked dllimport", decl);
1bba6ac0 2960 return;
2961 }
2962
2963 /* `extern' needn't be specified with dllimport.
2964 Specify `extern' now and hope for the best. Sigh. */
2965 if (TREE_CODE (decl) == VAR_DECL
2966 /* ??? Is this test for vtables needed? */
2967 && !DECL_VIRTUAL_P (decl))
2968 {
2969 DECL_EXTERNAL (decl) = 1;
2970 TREE_PUBLIC (decl) = 1;
2971 }
2972
225ab426 2973 newname = XALLOCAVEC (char, strlen (oldname) + 11);
1bba6ac0 2974 sprintf (newname, "@i.__imp_%s", oldname);
2975
2976 /* We pass newname through get_identifier to ensure it has a unique
2977 address. RTL processing can sometimes peek inside the symbol ref
2978 and compare the string's addresses to see if two symbols are
2979 identical. */
2980 /* ??? At least I think that's why we do this. */
2981 idp = get_identifier (newname);
2982
1a83b3ff 2983 newrtl = gen_rtx_MEM (Pmode,
2984 gen_rtx_SYMBOL_REF (Pmode,
1bba6ac0 2985 IDENTIFIER_POINTER (idp)));
2986 XEXP (DECL_RTL (decl), 0) = newrtl;
2987}
2988
2989static int
c167bbfc 2990mcore_dllexport_p (tree decl)
1bba6ac0 2991{
2992 if ( TREE_CODE (decl) != VAR_DECL
2993 && TREE_CODE (decl) != FUNCTION_DECL)
2994 return 0;
2995
e3c541f0 2996 return lookup_attribute ("dllexport", DECL_ATTRIBUTES (decl)) != 0;
1bba6ac0 2997}
2998
2999static int
c167bbfc 3000mcore_dllimport_p (tree decl)
1bba6ac0 3001{
3002 if ( TREE_CODE (decl) != VAR_DECL
3003 && TREE_CODE (decl) != FUNCTION_DECL)
3004 return 0;
3005
e3c541f0 3006 return lookup_attribute ("dllimport", DECL_ATTRIBUTES (decl)) != 0;
1bba6ac0 3007}
3008
7811991d 3009/* We must mark dll symbols specially. Definitions of dllexport'd objects
3fcfff30 3010 install some info in the .drective (PE) or .exports (ELF) sections. */
7811991d 3011
3012static void
c167bbfc 3013mcore_encode_section_info (tree decl, rtx rtl ATTRIBUTE_UNUSED, int first ATTRIBUTE_UNUSED)
1bba6ac0 3014{
1bba6ac0 3015 /* Mark the decl so we can tell from the rtl whether the object is
3016 dllexport'd or dllimport'd. */
3017 if (mcore_dllexport_p (decl))
3018 mcore_mark_dllexport (decl);
3019 else if (mcore_dllimport_p (decl))
3020 mcore_mark_dllimport (decl);
3021
3022 /* It might be that DECL has already been marked as dllimport, but
3023 a subsequent definition nullified that. The attribute is gone
3024 but DECL_RTL still has @i.__imp_foo. We need to remove that. */
3025 else if ((TREE_CODE (decl) == FUNCTION_DECL
3026 || TREE_CODE (decl) == VAR_DECL)
3027 && DECL_RTL (decl) != NULL_RTX
3028 && GET_CODE (DECL_RTL (decl)) == MEM
3029 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == MEM
3030 && GET_CODE (XEXP (XEXP (DECL_RTL (decl), 0), 0)) == SYMBOL_REF
3031 && mcore_dllimport_name_p (XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0)))
3032 {
9a356c3c 3033 const char * oldname = XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0);
1bba6ac0 3034 tree idp = get_identifier (oldname + 9);
1a83b3ff 3035 rtx newrtl = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
1bba6ac0 3036
3037 XEXP (DECL_RTL (decl), 0) = newrtl;
3038
3039 /* We previously set TREE_PUBLIC and DECL_EXTERNAL.
3040 ??? We leave these alone for now. */
3041 }
3042}
3043
7b4a38a6 3044/* Undo the effects of the above. */
3045
3046static const char *
c167bbfc 3047mcore_strip_name_encoding (const char * str)
7b4a38a6 3048{
3049 return str + (str[0] == '@' ? 3 : 0);
3050}
3051
1bba6ac0 3052/* MCore specific attribute support.
3053 dllexport - for exporting a function/variable that will live in a dll
3054 dllimport - for importing a function/variable from a dll
3055 naked - do not create a function prologue/epilogue. */
1bba6ac0 3056
e3c541f0 3057/* Handle a "naked" attribute; arguments as in
3058 struct attribute_spec.handler. */
c167bbfc 3059
e3c541f0 3060static tree
c167bbfc 3061mcore_handle_naked_attribute (tree * node, tree name, tree args ATTRIBUTE_UNUSED,
3062 int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
e3c541f0 3063{
08c6cbd2 3064 if (TREE_CODE (*node) != FUNCTION_DECL)
e3c541f0 3065 {
67a779df 3066 warning (OPT_Wattributes, "%qE attribute only applies to functions",
3067 name);
e3c541f0 3068 *no_add_attrs = true;
1bba6ac0 3069 }
3070
e3c541f0 3071 return NULL_TREE;
1bba6ac0 3072}
3073
52470889 3074/* ??? It looks like this is PE specific? Oh well, this is what the
3075 old code did as well. */
1bba6ac0 3076
52470889 3077static void
c167bbfc 3078mcore_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
1bba6ac0 3079{
3080 int len;
c8834c5f 3081 const char * name;
1bba6ac0 3082 char * string;
2eebe422 3083 const char * prefix;
1bba6ac0 3084
3085 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
3086
3087 /* Strip off any encoding in name. */
7b4a38a6 3088 name = (* targetm.strip_name_encoding) (name);
1bba6ac0 3089
3090 /* The object is put in, for example, section .text$foo.
3091 The linker will then ultimately place them in .text
3092 (everything from the $ on is stripped). */
3093 if (TREE_CODE (decl) == FUNCTION_DECL)
3094 prefix = ".text$";
8ef587dc 3095 /* For compatibility with EPOC, we ignore the fact that the
1bba6ac0 3096 section might have relocs against it. */
f4111c94 3097 else if (decl_readonly_section (decl, 0))
1bba6ac0 3098 prefix = ".rdata$";
3099 else
3100 prefix = ".data$";
3101
3102 len = strlen (name) + strlen (prefix);
225ab426 3103 string = XALLOCAVEC (char, len + 1);
1bba6ac0 3104
3105 sprintf (string, "%s%s", prefix, name);
3106
738a6bda 3107 set_decl_section_name (decl, string);
1bba6ac0 3108}
3109
3110int
c167bbfc 3111mcore_naked_function_p (void)
1bba6ac0 3112{
e3c541f0 3113 return lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl)) != NULL_TREE;
1bba6ac0 3114}
2cb4ac60 3115
08c6cbd2 3116static bool
3117mcore_warn_func_return (tree decl)
3118{
3119 /* Naked functions are implemented entirely in assembly, including the
3120 return sequence, so suppress warnings about this. */
3121 return lookup_attribute ("naked", DECL_ATTRIBUTES (decl)) == NULL_TREE;
3122}
3123
6e4758ce 3124#ifdef OBJECT_FORMAT_ELF
2cb4ac60 3125static void
537cd941 3126mcore_asm_named_section (const char *name,
3127 unsigned int flags ATTRIBUTE_UNUSED,
3128 tree decl ATTRIBUTE_UNUSED)
2cb4ac60 3129{
3130 fprintf (asm_out_file, "\t.section %s\n", name);
3131}
6e4758ce 3132#endif /* OBJECT_FORMAT_ELF */
5ee57edb 3133
e4ef650e 3134/* Worker function for TARGET_ASM_EXTERNAL_LIBCALL. */
3135
5ee57edb 3136static void
3137mcore_external_libcall (rtx fun)
3138{
3139 fprintf (asm_out_file, "\t.import\t");
3140 assemble_name (asm_out_file, XSTR (fun, 0));
3141 fprintf (asm_out_file, "\n");
3142}
3143
e4ef650e 3144/* Worker function for TARGET_RETURN_IN_MEMORY. */
3145
5ee57edb 3146static bool
fb80456a 3147mcore_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
5ee57edb 3148{
fb80456a 3149 const HOST_WIDE_INT size = int_size_in_bytes (type);
39cc9599 3150 return (size == -1 || size > 2 * UNITS_PER_WORD);
5ee57edb 3151}
74e653fe 3152
3153/* Worker function for TARGET_ASM_TRAMPOLINE_TEMPLATE.
3154 Output assembler code for a block containing the constant parts
3155 of a trampoline, leaving space for the variable parts.
3156
3157 On the MCore, the trampoline looks like:
3158 lrw r1, function
3159 lrw r13, area
3160 jmp r13
3161 or r0, r0
3162 .literals */
3163
3164static void
3165mcore_asm_trampoline_template (FILE *f)
3166{
3167 fprintf (f, "\t.short 0x7102\n");
3168 fprintf (f, "\t.short 0x7d02\n");
3169 fprintf (f, "\t.short 0x00cd\n");
3170 fprintf (f, "\t.short 0x1e00\n");
3171 fprintf (f, "\t.long 0\n");
3172 fprintf (f, "\t.long 0\n");
3173}
3174
3175/* Worker function for TARGET_TRAMPOLINE_INIT. */
3176
3177static void
3178mcore_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
3179{
3180 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
3181 rtx mem;
3182
3183 emit_block_move (m_tramp, assemble_trampoline_template (),
3184 GEN_INT (2*UNITS_PER_WORD), BLOCK_OP_NORMAL);
3185
3186 mem = adjust_address (m_tramp, SImode, 8);
3187 emit_move_insn (mem, chain_value);
3188 mem = adjust_address (m_tramp, SImode, 12);
3189 emit_move_insn (mem, fnaddr);
3190}
ca316360 3191
3192/* Implement TARGET_LEGITIMATE_CONSTANT_P
3193
3194 On the MCore, allow anything but a double. */
3195
3196static bool
3754d046 3197mcore_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
ca316360 3198{
3199 return GET_CODE (x) != CONST_DOUBLE;
3200}