]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/mcore/mcore.c
use rtx_insn * more places where it is obvious
[thirdparty/gcc.git] / gcc / config / mcore / mcore.c
CommitLineData
1bba6ac0 1/* Output routines for Motorola MCore processor
f1717362 2 Copyright (C) 1993-2016 Free Software Foundation, Inc.
1bba6ac0 3
c167bbfc 4 This file is part of GCC.
1bba6ac0 5
c167bbfc 6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published
038d1e19 8 by the Free Software Foundation; either version 3, or (at your
c167bbfc 9 option) any later version.
1bba6ac0 10
c167bbfc 11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
14 License for more details.
1bba6ac0 15
c167bbfc 16 You should have received a copy of the GNU General Public License
038d1e19 17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
1bba6ac0 19
7ae98079 20#include "config.h"
5bc09e30 21#include "system.h"
805e22b2 22#include "coretypes.h"
9ef16211 23#include "backend.h"
c1eb80de 24#include "target.h"
fa4fdf98 25#include "rtl.h"
c1eb80de 26#include "tree.h"
9ef16211 27#include "df.h"
ad7b10a2 28#include "memmodel.h"
c1eb80de 29#include "tm_p.h"
30#include "stringpool.h"
31#include "emit-rtl.h"
32#include "diagnostic-core.h"
9ed99284 33#include "stor-layout.h"
34#include "varasm.h"
9ed99284 35#include "calls.h"
1bba6ac0 36#include "mcore.h"
1bba6ac0 37#include "output.h"
d53441c8 38#include "explow.h"
1bba6ac0 39#include "expr.h"
94ea8568 40#include "cfgrtl.h"
f7715905 41#include "builtins.h"
77dd4d75 42#include "regs.h"
1bba6ac0 43
0c71fb4f 44/* This file should be included last. */
4b498588 45#include "target-def.h"
46
1bba6ac0 47/* For dumping information about frame sizes. */
48char * mcore_current_function_name = 0;
49long mcore_current_compilation_timestamp = 0;
50
51/* Global variables for machine-dependent things. */
52
1bba6ac0 53/* Provides the class number of the smallest class containing
54 reg number. */
ef51d1e3 55const enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER] =
1bba6ac0 56{
57 GENERAL_REGS, ONLYR1_REGS, LRW_REGS, LRW_REGS,
58 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
59 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
60 LRW_REGS, LRW_REGS, LRW_REGS, GENERAL_REGS,
61 GENERAL_REGS, C_REGS, NO_REGS, NO_REGS,
62};
63
2eebe422 64struct mcore_frame
65{
c167bbfc 66 int arg_size; /* Stdarg spills (bytes). */
67 int reg_size; /* Non-volatile reg saves (bytes). */
68 int reg_mask; /* Non-volatile reg saves. */
69 int local_size; /* Locals. */
70 int outbound_size; /* Arg overflow on calls out. */
2eebe422 71 int pad_outbound;
72 int pad_local;
73 int pad_reg;
74 /* Describe the steps we'll use to grow it. */
c167bbfc 75#define MAX_STACK_GROWS 4 /* Gives us some spare space. */
2eebe422 76 int growth[MAX_STACK_GROWS];
77 int arg_offset;
78 int reg_offset;
79 int reg_growth;
80 int local_growth;
81};
82
83typedef enum
84{
85 COND_NO,
86 COND_MOV_INSN,
87 COND_CLR_INSN,
88 COND_INC_INSN,
89 COND_DEC_INSN,
90 COND_BRANCH_INSN
91}
92cond_type;
93
c167bbfc 94static void output_stack_adjust (int, int);
95static int calc_live_regs (int *);
95ba5045 96static int try_constant_tricks (HOST_WIDE_INT, HOST_WIDE_INT *, HOST_WIDE_INT *);
3754d046 97static const char * output_inline_const (machine_mode, rtx *);
c167bbfc 98static void layout_mcore_frame (struct mcore_frame *);
3754d046 99static void mcore_setup_incoming_varargs (cumulative_args_t, machine_mode, tree, int *, int);
c167bbfc 100static cond_type is_cond_candidate (rtx);
91a55c11 101static rtx_insn *emit_new_cond_insn (rtx, int);
102static rtx_insn *conditionalize_block (rtx_insn *);
c167bbfc 103static void conditionalize_optimization (void);
104static void mcore_reorg (void);
3754d046 105static rtx handle_structs_in_regs (machine_mode, const_tree, int);
c167bbfc 106static void mcore_mark_dllexport (tree);
107static void mcore_mark_dllimport (tree);
108static int mcore_dllexport_p (tree);
109static int mcore_dllimport_p (tree);
c167bbfc 110static tree mcore_handle_naked_attribute (tree *, tree, tree, int, bool *);
6e4758ce 111#ifdef OBJECT_FORMAT_ELF
c167bbfc 112static void mcore_asm_named_section (const char *,
537cd941 113 unsigned int, tree);
6e4758ce 114#endif
932f5d0a 115static void mcore_print_operand (FILE *, rtx, int);
3c047fe9 116static void mcore_print_operand_address (FILE *, machine_mode, rtx);
932f5d0a 117static bool mcore_print_operand_punct_valid_p (unsigned char code);
c167bbfc 118static void mcore_unique_section (tree, int);
119static void mcore_encode_section_info (tree, rtx, int);
120static const char *mcore_strip_name_encoding (const char *);
7e0514b3 121static int mcore_const_costs (rtx, RTX_CODE);
122static int mcore_and_cost (rtx);
123static int mcore_ior_cost (rtx);
5ae4887d 124static bool mcore_rtx_costs (rtx, machine_mode, int, int,
20d892d1 125 int *, bool);
5ee57edb 126static void mcore_external_libcall (rtx);
fb80456a 127static bool mcore_return_in_memory (const_tree, const_tree);
39cba157 128static int mcore_arg_partial_bytes (cumulative_args_t,
3754d046 129 machine_mode,
f054eb3c 130 tree, bool);
39cba157 131static rtx mcore_function_arg (cumulative_args_t,
3754d046 132 machine_mode,
da6d22fa 133 const_tree, bool);
39cba157 134static void mcore_function_arg_advance (cumulative_args_t,
3754d046 135 machine_mode,
da6d22fa 136 const_tree, bool);
3754d046 137static unsigned int mcore_function_arg_boundary (machine_mode,
bd99ba64 138 const_tree);
74e653fe 139static void mcore_asm_trampoline_template (FILE *);
140static void mcore_trampoline_init (rtx, tree, rtx);
08c6cbd2 141static bool mcore_warn_func_return (tree);
4c834714 142static void mcore_option_override (void);
3754d046 143static bool mcore_legitimate_constant_p (machine_mode, rtx);
30ff90f6 144static bool mcore_legitimate_address_p (machine_mode, rtx, bool,
145 addr_space_t);
ef51d1e3 146\f
147/* MCore specific attributes. */
148
149static const struct attribute_spec mcore_attribute_table[] =
150{
ac86af5d 151 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
152 affects_type_identity } */
153 { "dllexport", 0, 0, true, false, false, NULL, false },
154 { "dllimport", 0, 0, true, false, false, NULL, false },
155 { "naked", 0, 0, true, false, false, mcore_handle_naked_attribute,
156 false },
157 { NULL, 0, 0, false, false, false, NULL, false }
ef51d1e3 158};
a767736d 159\f
160/* Initialize the GCC target structure. */
5ee57edb 161#undef TARGET_ASM_EXTERNAL_LIBCALL
162#define TARGET_ASM_EXTERNAL_LIBCALL mcore_external_libcall
163
3aa0c315 164#if TARGET_DLLIMPORT_DECL_ATTRIBUTES
c167bbfc 165#undef TARGET_MERGE_DECL_ATTRIBUTES
166#define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
a767736d 167#endif
168
58356836 169#ifdef OBJECT_FORMAT_ELF
c167bbfc 170#undef TARGET_ASM_UNALIGNED_HI_OP
58356836 171#define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
c167bbfc 172#undef TARGET_ASM_UNALIGNED_SI_OP
58356836 173#define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
174#endif
175
932f5d0a 176#undef TARGET_PRINT_OPERAND
177#define TARGET_PRINT_OPERAND mcore_print_operand
178#undef TARGET_PRINT_OPERAND_ADDRESS
179#define TARGET_PRINT_OPERAND_ADDRESS mcore_print_operand_address
180#undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
181#define TARGET_PRINT_OPERAND_PUNCT_VALID_P mcore_print_operand_punct_valid_p
182
c167bbfc 183#undef TARGET_ATTRIBUTE_TABLE
184#define TARGET_ATTRIBUTE_TABLE mcore_attribute_table
185#undef TARGET_ASM_UNIQUE_SECTION
186#define TARGET_ASM_UNIQUE_SECTION mcore_unique_section
76aec42f 187#undef TARGET_ASM_FUNCTION_RODATA_SECTION
188#define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
c167bbfc 189#undef TARGET_ENCODE_SECTION_INFO
190#define TARGET_ENCODE_SECTION_INFO mcore_encode_section_info
191#undef TARGET_STRIP_NAME_ENCODING
192#define TARGET_STRIP_NAME_ENCODING mcore_strip_name_encoding
193#undef TARGET_RTX_COSTS
194#define TARGET_RTX_COSTS mcore_rtx_costs
195#undef TARGET_ADDRESS_COST
d9c5e5f4 196#define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
c167bbfc 197#undef TARGET_MACHINE_DEPENDENT_REORG
198#define TARGET_MACHINE_DEPENDENT_REORG mcore_reorg
2efea8c0 199
3b2411a8 200#undef TARGET_PROMOTE_FUNCTION_MODE
201#define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
5ee57edb 202#undef TARGET_PROMOTE_PROTOTYPES
fb80456a 203#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
5ee57edb 204
5ee57edb 205#undef TARGET_RETURN_IN_MEMORY
206#define TARGET_RETURN_IN_MEMORY mcore_return_in_memory
0336f0f0 207#undef TARGET_MUST_PASS_IN_STACK
208#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
b981d932 209#undef TARGET_PASS_BY_REFERENCE
210#define TARGET_PASS_BY_REFERENCE hook_pass_by_reference_must_pass_in_stack
f054eb3c 211#undef TARGET_ARG_PARTIAL_BYTES
212#define TARGET_ARG_PARTIAL_BYTES mcore_arg_partial_bytes
da6d22fa 213#undef TARGET_FUNCTION_ARG
214#define TARGET_FUNCTION_ARG mcore_function_arg
215#undef TARGET_FUNCTION_ARG_ADVANCE
216#define TARGET_FUNCTION_ARG_ADVANCE mcore_function_arg_advance
bd99ba64 217#undef TARGET_FUNCTION_ARG_BOUNDARY
218#define TARGET_FUNCTION_ARG_BOUNDARY mcore_function_arg_boundary
5ee57edb 219
220#undef TARGET_SETUP_INCOMING_VARARGS
221#define TARGET_SETUP_INCOMING_VARARGS mcore_setup_incoming_varargs
222
74e653fe 223#undef TARGET_ASM_TRAMPOLINE_TEMPLATE
224#define TARGET_ASM_TRAMPOLINE_TEMPLATE mcore_asm_trampoline_template
225#undef TARGET_TRAMPOLINE_INIT
226#define TARGET_TRAMPOLINE_INIT mcore_trampoline_init
227
4c834714 228#undef TARGET_OPTION_OVERRIDE
229#define TARGET_OPTION_OVERRIDE mcore_option_override
7630a512 230
ca316360 231#undef TARGET_LEGITIMATE_CONSTANT_P
232#define TARGET_LEGITIMATE_CONSTANT_P mcore_legitimate_constant_p
30ff90f6 233#undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
234#define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P mcore_legitimate_address_p
ca316360 235
e46fbef5 236#undef TARGET_LRA_P
237#define TARGET_LRA_P hook_bool_void_false
238
08c6cbd2 239#undef TARGET_WARN_FUNC_RETURN
240#define TARGET_WARN_FUNC_RETURN mcore_warn_func_return
241
57e4bbfb 242struct gcc_target targetm = TARGET_INITIALIZER;
2eebe422 243\f
1bba6ac0 244/* Adjust the stack and return the number of bytes taken to do it. */
245static void
c167bbfc 246output_stack_adjust (int direction, int size)
1bba6ac0 247{
fa4fdf98 248 /* If extending stack a lot, we do it incrementally. */
1bba6ac0 249 if (direction < 0 && size > mcore_stack_increment && mcore_stack_increment > 0)
250 {
1a83b3ff 251 rtx tmp = gen_rtx_REG (SImode, 1);
1bba6ac0 252 rtx memref;
c167bbfc 253
1bba6ac0 254 emit_insn (gen_movsi (tmp, GEN_INT (mcore_stack_increment)));
255 do
256 {
257 emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
1a83b3ff 258 memref = gen_rtx_MEM (SImode, stack_pointer_rtx);
1bba6ac0 259 MEM_VOLATILE_P (memref) = 1;
260 emit_insn (gen_movsi (memref, stack_pointer_rtx));
261 size -= mcore_stack_increment;
262 }
263 while (size > mcore_stack_increment);
264
fa4fdf98 265 /* SIZE is now the residual for the last adjustment,
266 which doesn't require a probe. */
1bba6ac0 267 }
268
269 if (size)
270 {
271 rtx insn;
272 rtx val = GEN_INT (size);
273
274 if (size > 32)
275 {
1a83b3ff 276 rtx nval = gen_rtx_REG (SImode, 1);
1bba6ac0 277 emit_insn (gen_movsi (nval, val));
278 val = nval;
279 }
280
281 if (direction > 0)
282 insn = gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
283 else
284 insn = gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
285
286 emit_insn (insn);
287 }
288}
289
fa4fdf98 290/* Work out the registers which need to be saved,
291 both as a mask and a count. */
292
1bba6ac0 293static int
c167bbfc 294calc_live_regs (int * count)
1bba6ac0 295{
296 int reg;
297 int live_regs_mask = 0;
298
299 * count = 0;
300
301 for (reg = 0; reg < FIRST_PSEUDO_REGISTER; reg++)
302 {
3072d30e 303 if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
1bba6ac0 304 {
305 (*count)++;
306 live_regs_mask |= (1 << reg);
307 }
308 }
309
310 return live_regs_mask;
311}
312
313/* Print the operand address in x to the stream. */
fa4fdf98 314
932f5d0a 315static void
3c047fe9 316mcore_print_operand_address (FILE * stream, machine_mode /*mode*/, rtx x)
1bba6ac0 317{
318 switch (GET_CODE (x))
319 {
320 case REG:
321 fprintf (stream, "(%s)", reg_names[REGNO (x)]);
322 break;
323
324 case PLUS:
325 {
326 rtx base = XEXP (x, 0);
327 rtx index = XEXP (x, 1);
328
329 if (GET_CODE (base) != REG)
330 {
331 /* Ensure that BASE is a register (one of them must be). */
332 rtx temp = base;
333 base = index;
334 index = temp;
335 }
336
337 switch (GET_CODE (index))
338 {
339 case CONST_INT:
b94709e4 340 fprintf (stream, "(%s," HOST_WIDE_INT_PRINT_DEC ")",
341 reg_names[REGNO(base)], INTVAL (index));
1bba6ac0 342 break;
343
344 default:
044e64da 345 gcc_unreachable ();
1bba6ac0 346 }
347 }
348
349 break;
350
351 default:
352 output_addr_const (stream, x);
353 break;
354 }
355}
356
932f5d0a 357static bool
358mcore_print_operand_punct_valid_p (unsigned char code)
359{
360 return (code == '.' || code == '#' || code == '*' || code == '^'
361 || code == '!');
362}
363
1bba6ac0 364/* Print operand x (an rtx) in assembler syntax to file stream
365 according to modifier code.
366
a361b456 367 'R' print the next register or memory location along, i.e. the lsw in
1bba6ac0 368 a double word value
369 'O' print a constant without the #
370 'M' print a constant as its negative
371 'P' print log2 of a power of two
372 'Q' print log2 of an inverse of a power of two
373 'U' print register for ldm/stm instruction
fa4fdf98 374 'X' print byte number for xtrbN instruction. */
375
932f5d0a 376static void
c167bbfc 377mcore_print_operand (FILE * stream, rtx x, int code)
1bba6ac0 378{
379 switch (code)
380 {
381 case 'N':
382 if (INTVAL(x) == -1)
383 fprintf (asm_out_file, "32");
384 else
385 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) + 1));
386 break;
387 case 'P':
90fab4b1 388 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) & 0xffffffff));
1bba6ac0 389 break;
390 case 'Q':
391 fprintf (asm_out_file, "%d", exact_log2 (~INTVAL (x)));
392 break;
393 case 'O':
b94709e4 394 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
1bba6ac0 395 break;
396 case 'M':
b94709e4 397 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, - INTVAL (x));
1bba6ac0 398 break;
399 case 'R':
400 /* Next location along in memory or register. */
401 switch (GET_CODE (x))
402 {
403 case REG:
404 fputs (reg_names[REGNO (x) + 1], (stream));
405 break;
406 case MEM:
eafc6604 407 mcore_print_operand_address
3c047fe9 408 (stream, GET_MODE (x), XEXP (adjust_address (x, SImode, 4), 0));
1bba6ac0 409 break;
410 default:
044e64da 411 gcc_unreachable ();
1bba6ac0 412 }
413 break;
414 case 'U':
415 fprintf (asm_out_file, "%s-%s", reg_names[REGNO (x)],
416 reg_names[REGNO (x) + 3]);
417 break;
418 case 'x':
b94709e4 419 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
1bba6ac0 420 break;
421 case 'X':
b94709e4 422 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, 3 - INTVAL (x) / 8);
1bba6ac0 423 break;
424
425 default:
426 switch (GET_CODE (x))
427 {
428 case REG:
429 fputs (reg_names[REGNO (x)], (stream));
430 break;
431 case MEM:
3c047fe9 432 output_address (GET_MODE (x), XEXP (x, 0));
1bba6ac0 433 break;
434 default:
435 output_addr_const (stream, x);
436 break;
437 }
438 break;
439 }
440}
441
442/* What does a constant cost ? */
fa4fdf98 443
fab7adbf 444static int
c167bbfc 445mcore_const_costs (rtx exp, enum rtx_code code)
1bba6ac0 446{
90fab4b1 447 HOST_WIDE_INT val = INTVAL (exp);
1bba6ac0 448
449 /* Easy constants. */
450 if ( CONST_OK_FOR_I (val)
451 || CONST_OK_FOR_M (val)
452 || CONST_OK_FOR_N (val)
453 || (code == PLUS && CONST_OK_FOR_L (val)))
454 return 1;
455 else if (code == AND
456 && ( CONST_OK_FOR_M (~val)
457 || CONST_OK_FOR_N (~val)))
458 return 2;
459 else if (code == PLUS
460 && ( CONST_OK_FOR_I (-val)
461 || CONST_OK_FOR_M (-val)
462 || CONST_OK_FOR_N (-val)))
463 return 2;
464
465 return 5;
466}
467
468/* What does an and instruction cost - we do this b/c immediates may
469 have been relaxed. We want to ensure that cse will cse relaxed immeds
fa4fdf98 470 out. Otherwise we'll get bad code (multiple reloads of the same const). */
471
fab7adbf 472static int
c167bbfc 473mcore_and_cost (rtx x)
1bba6ac0 474{
90fab4b1 475 HOST_WIDE_INT val;
1bba6ac0 476
477 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
478 return 2;
479
480 val = INTVAL (XEXP (x, 1));
481
fa4fdf98 482 /* Do it directly. */
1bba6ac0 483 if (CONST_OK_FOR_K (val) || CONST_OK_FOR_M (~val))
484 return 2;
485 /* Takes one instruction to load. */
486 else if (const_ok_for_mcore (val))
487 return 3;
488 /* Takes two instructions to load. */
489 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
490 return 4;
491
fa4fdf98 492 /* Takes a lrw to load. */
1bba6ac0 493 return 5;
494}
495
fa4fdf98 496/* What does an or cost - see and_cost(). */
497
fab7adbf 498static int
c167bbfc 499mcore_ior_cost (rtx x)
1bba6ac0 500{
90fab4b1 501 HOST_WIDE_INT val;
1bba6ac0 502
503 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
504 return 2;
505
506 val = INTVAL (XEXP (x, 1));
507
fa4fdf98 508 /* Do it directly with bclri. */
1bba6ac0 509 if (CONST_OK_FOR_M (val))
510 return 2;
fa4fdf98 511 /* Takes one instruction to load. */
1bba6ac0 512 else if (const_ok_for_mcore (val))
513 return 3;
fa4fdf98 514 /* Takes two instructions to load. */
1bba6ac0 515 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
516 return 4;
517
fa4fdf98 518 /* Takes a lrw to load. */
1bba6ac0 519 return 5;
520}
521
fab7adbf 522static bool
5ae4887d 523mcore_rtx_costs (rtx x, machine_mode mode ATTRIBUTE_UNUSED, int outer_code,
524 int opno ATTRIBUTE_UNUSED,
20d892d1 525 int * total, bool speed ATTRIBUTE_UNUSED)
fab7adbf 526{
5ae4887d 527 int code = GET_CODE (x);
528
fab7adbf 529 switch (code)
530 {
531 case CONST_INT:
ef51d1e3 532 *total = mcore_const_costs (x, (enum rtx_code) outer_code);
fab7adbf 533 return true;
534 case CONST:
535 case LABEL_REF:
536 case SYMBOL_REF:
537 *total = 5;
538 return true;
539 case CONST_DOUBLE:
540 *total = 10;
541 return true;
542
543 case AND:
544 *total = COSTS_N_INSNS (mcore_and_cost (x));
545 return true;
546
547 case IOR:
548 *total = COSTS_N_INSNS (mcore_ior_cost (x));
549 return true;
550
551 case DIV:
552 case UDIV:
553 case MOD:
554 case UMOD:
555 case FLOAT:
556 case FIX:
557 *total = COSTS_N_INSNS (100);
558 return true;
559
560 default:
561 return false;
562 }
563}
564
74f4459c 565/* Prepare the operands for a comparison. Return whether the branch/setcc
566 should reverse the operands. */
fa4fdf98 567
74f4459c 568bool
569mcore_gen_compare (enum rtx_code code, rtx op0, rtx op1)
1bba6ac0 570{
74f4459c 571 rtx cc_reg = gen_rtx_REG (CCmode, CC_REG);
572 bool invert;
573
1bba6ac0 574 if (GET_CODE (op1) == CONST_INT)
575 {
90fab4b1 576 HOST_WIDE_INT val = INTVAL (op1);
1bba6ac0 577
578 switch (code)
579 {
74f4459c 580 case GTU:
581 /* Unsigned > 0 is the same as != 0; everything else is converted
582 below to LEU (reversed cmphs). */
583 if (val == 0)
584 code = NE;
585 break;
586
587 /* Check whether (LE A imm) can become (LT A imm + 1),
588 or (GT A imm) can become (GE A imm + 1). */
589 case GT:
1bba6ac0 590 case LE:
591 if (CONST_OK_FOR_J (val + 1))
592 {
74f4459c 593 op1 = GEN_INT (val + 1);
594 code = code == LE ? LT : GE;
1bba6ac0 595 }
596 break;
597
598 default:
599 break;
600 }
601 }
74f4459c 602
1bba6ac0 603 if (CONSTANT_P (op1) && GET_CODE (op1) != CONST_INT)
604 op1 = force_reg (SImode, op1);
605
606 /* cmpnei: 0-31 (K immediate)
fa4fdf98 607 cmplti: 1-32 (J immediate, 0 using btsti x,31). */
74f4459c 608 invert = false;
1bba6ac0 609 switch (code)
610 {
fa4fdf98 611 case EQ: /* Use inverted condition, cmpne. */
1bba6ac0 612 code = NE;
74f4459c 613 invert = true;
531eafa5 614 /* FALLTHRU */
fa4fdf98 615
616 case NE: /* Use normal condition, cmpne. */
1bba6ac0 617 if (GET_CODE (op1) == CONST_INT && ! CONST_OK_FOR_K (INTVAL (op1)))
618 op1 = force_reg (SImode, op1);
619 break;
620
fa4fdf98 621 case LE: /* Use inverted condition, reversed cmplt. */
1bba6ac0 622 code = GT;
74f4459c 623 invert = true;
531eafa5 624 /* FALLTHRU */
fa4fdf98 625
626 case GT: /* Use normal condition, reversed cmplt. */
1bba6ac0 627 if (GET_CODE (op1) == CONST_INT)
628 op1 = force_reg (SImode, op1);
629 break;
630
fa4fdf98 631 case GE: /* Use inverted condition, cmplt. */
1bba6ac0 632 code = LT;
74f4459c 633 invert = true;
531eafa5 634 /* FALLTHRU */
fa4fdf98 635
636 case LT: /* Use normal condition, cmplt. */
1bba6ac0 637 if (GET_CODE (op1) == CONST_INT &&
c167bbfc 638 /* covered by btsti x,31. */
1bba6ac0 639 INTVAL (op1) != 0 &&
640 ! CONST_OK_FOR_J (INTVAL (op1)))
641 op1 = force_reg (SImode, op1);
642 break;
643
fa4fdf98 644 case GTU: /* Use inverted condition, cmple. */
74f4459c 645 /* We coped with unsigned > 0 above. */
044e64da 646 gcc_assert (GET_CODE (op1) != CONST_INT || INTVAL (op1) != 0);
1bba6ac0 647 code = LEU;
74f4459c 648 invert = true;
531eafa5 649 /* FALLTHRU */
fa4fdf98 650
3fcfff30 651 case LEU: /* Use normal condition, reversed cmphs. */
1bba6ac0 652 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
653 op1 = force_reg (SImode, op1);
654 break;
655
fa4fdf98 656 case LTU: /* Use inverted condition, cmphs. */
1bba6ac0 657 code = GEU;
74f4459c 658 invert = true;
531eafa5 659 /* FALLTHRU */
fa4fdf98 660
661 case GEU: /* Use normal condition, cmphs. */
1bba6ac0 662 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
663 op1 = force_reg (SImode, op1);
664 break;
665
666 default:
667 break;
668 }
669
d1f9b275 670 emit_insn (gen_rtx_SET (cc_reg, gen_rtx_fmt_ee (code, CCmode, op0, op1)));
74f4459c 671 return invert;
1bba6ac0 672}
673
1bba6ac0 674int
c167bbfc 675mcore_symbolic_address_p (rtx x)
1bba6ac0 676{
677 switch (GET_CODE (x))
678 {
679 case SYMBOL_REF:
680 case LABEL_REF:
681 return 1;
682 case CONST:
683 x = XEXP (x, 0);
684 return ( (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
685 || GET_CODE (XEXP (x, 0)) == LABEL_REF)
686 && GET_CODE (XEXP (x, 1)) == CONST_INT);
687 default:
688 return 0;
689 }
690}
691
1bba6ac0 692/* Functions to output assembly code for a function call. */
2eebe422 693
1bba6ac0 694char *
c167bbfc 695mcore_output_call (rtx operands[], int index)
1bba6ac0 696{
697 static char buffer[20];
698 rtx addr = operands [index];
699
700 if (REG_P (addr))
701 {
702 if (TARGET_CG_DATA)
703 {
044e64da 704 gcc_assert (mcore_current_function_name);
1bba6ac0 705
706 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
707 "unknown", 1);
708 }
709
710 sprintf (buffer, "jsr\t%%%d", index);
711 }
712 else
713 {
714 if (TARGET_CG_DATA)
715 {
044e64da 716 gcc_assert (mcore_current_function_name);
717 gcc_assert (GET_CODE (addr) == SYMBOL_REF);
1bba6ac0 718
044e64da 719 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
720 XSTR (addr, 0), 0);
1bba6ac0 721 }
722
723 sprintf (buffer, "jbsr\t%%%d", index);
724 }
725
726 return buffer;
727}
728
729/* Can we load a constant with a single instruction ? */
fa4fdf98 730
1e235216 731int
90fab4b1 732const_ok_for_mcore (HOST_WIDE_INT value)
1bba6ac0 733{
734 if (value >= 0 && value <= 127)
735 return 1;
736
737 /* Try exact power of two. */
90fab4b1 738 if (CONST_OK_FOR_M (value))
1bba6ac0 739 return 1;
740
3fcfff30 741 /* Try exact power of two - 1. */
90fab4b1 742 if (CONST_OK_FOR_N (value) && value != -1)
1bba6ac0 743 return 1;
744
745 return 0;
746}
747
748/* Can we load a constant inline with up to 2 instructions ? */
fa4fdf98 749
1bba6ac0 750int
90fab4b1 751mcore_const_ok_for_inline (HOST_WIDE_INT value)
1bba6ac0 752{
90fab4b1 753 HOST_WIDE_INT x, y;
1bba6ac0 754
755 return try_constant_tricks (value, & x, & y) > 0;
756}
757
758/* Are we loading the constant using a not ? */
fa4fdf98 759
1bba6ac0 760int
90fab4b1 761mcore_const_trick_uses_not (HOST_WIDE_INT value)
1bba6ac0 762{
90fab4b1 763 HOST_WIDE_INT x, y;
1bba6ac0 764
765 return try_constant_tricks (value, & x, & y) == 2;
766}
767
768/* Try tricks to load a constant inline and return the trick number if
769 success (0 is non-inlinable).
fa4fdf98 770
771 0: not inlinable
772 1: single instruction (do the usual thing)
773 2: single insn followed by a 'not'
774 3: single insn followed by a subi
775 4: single insn followed by an addi
776 5: single insn followed by rsubi
777 6: single insn followed by bseti
778 7: single insn followed by bclri
779 8: single insn followed by rotli
780 9: single insn followed by lsli
781 10: single insn followed by ixh
782 11: single insn followed by ixw. */
1bba6ac0 783
784static int
90fab4b1 785try_constant_tricks (HOST_WIDE_INT value, HOST_WIDE_INT * x, HOST_WIDE_INT * y)
1bba6ac0 786{
90fab4b1 787 HOST_WIDE_INT i;
788 unsigned HOST_WIDE_INT bit, shf, rot;
1bba6ac0 789
790 if (const_ok_for_mcore (value))
fa4fdf98 791 return 1; /* Do the usual thing. */
1bba6ac0 792
90fab4b1 793 if (! TARGET_HARDLIT)
794 return 0;
795
796 if (const_ok_for_mcore (~value))
797 {
798 *x = ~value;
799 return 2;
800 }
801
802 for (i = 1; i <= 32; i++)
1bba6ac0 803 {
90fab4b1 804 if (const_ok_for_mcore (value - i))
1bba6ac0 805 {
90fab4b1 806 *x = value - i;
807 *y = i;
808
809 return 3;
1bba6ac0 810 }
90fab4b1 811
812 if (const_ok_for_mcore (value + i))
1bba6ac0 813 {
90fab4b1 814 *x = value + i;
815 *y = i;
816
817 return 4;
1bba6ac0 818 }
90fab4b1 819 }
820
821 bit = 0x80000000ULL;
822
823 for (i = 0; i <= 31; i++)
824 {
825 if (const_ok_for_mcore (i - value))
1bba6ac0 826 {
90fab4b1 827 *x = i - value;
828 *y = i;
829
830 return 5;
1bba6ac0 831 }
90fab4b1 832
833 if (const_ok_for_mcore (value & ~bit))
1bba6ac0 834 {
90fab4b1 835 *y = bit;
836 *x = value & ~bit;
837 return 6;
1bba6ac0 838 }
90fab4b1 839
840 if (const_ok_for_mcore (value | bit))
1bba6ac0 841 {
90fab4b1 842 *y = ~bit;
843 *x = value | bit;
844
845 return 7;
1bba6ac0 846 }
90fab4b1 847
848 bit >>= 1;
849 }
850
851 shf = value;
852 rot = value;
853
854 for (i = 1; i < 31; i++)
855 {
856 int c;
857
858 /* MCore has rotate left. */
859 c = rot << 31;
860 rot >>= 1;
861 rot &= 0x7FFFFFFF;
862 rot |= c; /* Simulate rotate. */
863
864 if (const_ok_for_mcore (rot))
1bba6ac0 865 {
90fab4b1 866 *y = i;
867 *x = rot;
868
869 return 8;
870 }
871
872 if (shf & 1)
873 shf = 0; /* Can't use logical shift, low order bit is one. */
874
875 shf >>= 1;
876
877 if (shf != 0 && const_ok_for_mcore (shf))
878 {
879 *y = i;
880 *x = shf;
881
882 return 9;
1bba6ac0 883 }
884 }
90fab4b1 885
886 if ((value % 3) == 0 && const_ok_for_mcore (value / 3))
887 {
888 *x = value / 3;
889
890 return 10;
891 }
892
893 if ((value % 5) == 0 && const_ok_for_mcore (value / 5))
894 {
895 *x = value / 5;
896
897 return 11;
898 }
1bba6ac0 899
900 return 0;
901}
902
1bba6ac0 903/* Check whether reg is dead at first. This is done by searching ahead
904 for either the next use (i.e., reg is live), a death note, or a set of
905 reg. Don't just use dead_or_set_p() since reload does not always mark
906 deaths (especially if PRESERVE_DEATH_NOTES_REGNO_P is not defined). We
fa4fdf98 907 can ignore subregs by extracting the actual register. BRC */
908
1bba6ac0 909int
91a55c11 910mcore_is_dead (rtx_insn *first, rtx reg)
1bba6ac0 911{
91a55c11 912 rtx_insn *insn;
1bba6ac0 913
914 /* For mcore, subregs can't live independently of their parent regs. */
915 if (GET_CODE (reg) == SUBREG)
916 reg = SUBREG_REG (reg);
917
918 /* Dies immediately. */
919 if (dead_or_set_p (first, reg))
920 return 1;
921
922 /* Look for conclusive evidence of live/death, otherwise we have
923 to assume that it is live. */
924 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
925 {
aa90bb35 926 if (JUMP_P (insn))
1bba6ac0 927 return 0; /* We lose track, assume it is alive. */
928
aa90bb35 929 else if (CALL_P (insn))
1bba6ac0 930 {
931 /* Call's might use it for target or register parms. */
932 if (reg_referenced_p (reg, PATTERN (insn))
933 || find_reg_fusage (insn, USE, reg))
934 return 0;
935 else if (dead_or_set_p (insn, reg))
936 return 1;
937 }
aa90bb35 938 else if (NONJUMP_INSN_P (insn))
1bba6ac0 939 {
940 if (reg_referenced_p (reg, PATTERN (insn)))
941 return 0;
942 else if (dead_or_set_p (insn, reg))
943 return 1;
944 }
945 }
946
33f88b1c 947 /* No conclusive evidence either way, we cannot take the chance
1bba6ac0 948 that control flow hid the use from us -- "I'm not dead yet". */
949 return 0;
950}
951
1bba6ac0 952/* Count the number of ones in mask. */
fa4fdf98 953
1bba6ac0 954int
90fab4b1 955mcore_num_ones (HOST_WIDE_INT mask)
1bba6ac0 956{
fa4fdf98 957 /* A trick to count set bits recently posted on comp.compilers. */
1bba6ac0 958 mask = (mask >> 1 & 0x55555555) + (mask & 0x55555555);
959 mask = ((mask >> 2) & 0x33333333) + (mask & 0x33333333);
960 mask = ((mask >> 4) + mask) & 0x0f0f0f0f;
961 mask = ((mask >> 8) + mask);
962
963 return (mask + (mask >> 16)) & 0xff;
964}
965
fa4fdf98 966/* Count the number of zeros in mask. */
967
1bba6ac0 968int
90fab4b1 969mcore_num_zeros (HOST_WIDE_INT mask)
1bba6ac0 970{
971 return 32 - mcore_num_ones (mask);
972}
973
974/* Determine byte being masked. */
fa4fdf98 975
1bba6ac0 976int
c167bbfc 977mcore_byte_offset (unsigned int mask)
1bba6ac0 978{
30435bf8 979 if (mask == 0x00ffffffL)
1bba6ac0 980 return 0;
30435bf8 981 else if (mask == 0xff00ffffL)
1bba6ac0 982 return 1;
30435bf8 983 else if (mask == 0xffff00ffL)
1bba6ac0 984 return 2;
30435bf8 985 else if (mask == 0xffffff00L)
1bba6ac0 986 return 3;
987
988 return -1;
989}
990
991/* Determine halfword being masked. */
fa4fdf98 992
1bba6ac0 993int
c167bbfc 994mcore_halfword_offset (unsigned int mask)
1bba6ac0 995{
996 if (mask == 0x0000ffffL)
997 return 0;
30435bf8 998 else if (mask == 0xffff0000L)
1bba6ac0 999 return 1;
1000
1001 return -1;
1002}
1003
1004/* Output a series of bseti's corresponding to mask. */
fa4fdf98 1005
2eebe422 1006const char *
c167bbfc 1007mcore_output_bseti (rtx dst, int mask)
1bba6ac0 1008{
1009 rtx out_operands[2];
1010 int bit;
1011
1012 out_operands[0] = dst;
1013
1014 for (bit = 0; bit < 32; bit++)
1015 {
1016 if ((mask & 0x1) == 0x1)
1017 {
1018 out_operands[1] = GEN_INT (bit);
1019
1020 output_asm_insn ("bseti\t%0,%1", out_operands);
1021 }
1022 mask >>= 1;
1023 }
1024
1025 return "";
1026}
1027
1028/* Output a series of bclri's corresponding to mask. */
fa4fdf98 1029
2eebe422 1030const char *
c167bbfc 1031mcore_output_bclri (rtx dst, int mask)
1bba6ac0 1032{
1033 rtx out_operands[2];
1034 int bit;
1035
1036 out_operands[0] = dst;
1037
1038 for (bit = 0; bit < 32; bit++)
1039 {
1040 if ((mask & 0x1) == 0x0)
1041 {
1042 out_operands[1] = GEN_INT (bit);
1043
1044 output_asm_insn ("bclri\t%0,%1", out_operands);
1045 }
1046
1047 mask >>= 1;
1048 }
1049
1050 return "";
1051}
1052
1053/* Output a conditional move of two constants that are +/- 1 within each
1054 other. See the "movtK" patterns in mcore.md. I'm not sure this is
1055 really worth the effort. */
fa4fdf98 1056
2eebe422 1057const char *
c167bbfc 1058mcore_output_cmov (rtx operands[], int cmp_t, const char * test)
1bba6ac0 1059{
90fab4b1 1060 HOST_WIDE_INT load_value;
1061 HOST_WIDE_INT adjust_value;
1bba6ac0 1062 rtx out_operands[4];
1063
1064 out_operands[0] = operands[0];
1065
fa4fdf98 1066 /* Check to see which constant is loadable. */
1bba6ac0 1067 if (const_ok_for_mcore (INTVAL (operands[1])))
1068 {
1069 out_operands[1] = operands[1];
1070 out_operands[2] = operands[2];
1071 }
1072 else if (const_ok_for_mcore (INTVAL (operands[2])))
1073 {
1074 out_operands[1] = operands[2];
1075 out_operands[2] = operands[1];
1076
fa4fdf98 1077 /* Complement test since constants are swapped. */
1bba6ac0 1078 cmp_t = (cmp_t == 0);
1079 }
1080 load_value = INTVAL (out_operands[1]);
1081 adjust_value = INTVAL (out_operands[2]);
1082
fa4fdf98 1083 /* First output the test if folded into the pattern. */
1bba6ac0 1084
1085 if (test)
1086 output_asm_insn (test, operands);
1087
fa4fdf98 1088 /* Load the constant - for now, only support constants that can be
1bba6ac0 1089 generated with a single instruction. maybe add general inlinable
1090 constants later (this will increase the # of patterns since the
fa4fdf98 1091 instruction sequence has a different length attribute). */
1bba6ac0 1092 if (load_value >= 0 && load_value <= 127)
1093 output_asm_insn ("movi\t%0,%1", out_operands);
90fab4b1 1094 else if (CONST_OK_FOR_M (load_value))
1bba6ac0 1095 output_asm_insn ("bgeni\t%0,%P1", out_operands);
90fab4b1 1096 else if (CONST_OK_FOR_N (load_value))
1bba6ac0 1097 output_asm_insn ("bmaski\t%0,%N1", out_operands);
1098
fa4fdf98 1099 /* Output the constant adjustment. */
1bba6ac0 1100 if (load_value > adjust_value)
1101 {
1102 if (cmp_t)
1103 output_asm_insn ("decf\t%0", out_operands);
1104 else
1105 output_asm_insn ("dect\t%0", out_operands);
1106 }
1107 else
1108 {
1109 if (cmp_t)
1110 output_asm_insn ("incf\t%0", out_operands);
1111 else
1112 output_asm_insn ("inct\t%0", out_operands);
1113 }
1114
1115 return "";
1116}
1117
1118/* Outputs the peephole for moving a constant that gets not'ed followed
fa4fdf98 1119 by an and (i.e. combine the not and the and into andn). BRC */
1120
2eebe422 1121const char *
c167bbfc 1122mcore_output_andn (rtx insn ATTRIBUTE_UNUSED, rtx operands[])
1bba6ac0 1123{
90fab4b1 1124 HOST_WIDE_INT x, y;
1bba6ac0 1125 rtx out_operands[3];
2eebe422 1126 const char * load_op;
1bba6ac0 1127 char buf[256];
044e64da 1128 int trick_no;
1bba6ac0 1129
044e64da 1130 trick_no = try_constant_tricks (INTVAL (operands[1]), &x, &y);
1131 gcc_assert (trick_no == 2);
1bba6ac0 1132
1133 out_operands[0] = operands[0];
90fab4b1 1134 out_operands[1] = GEN_INT (x);
1bba6ac0 1135 out_operands[2] = operands[2];
1136
1137 if (x >= 0 && x <= 127)
1138 load_op = "movi\t%0,%1";
fa4fdf98 1139
1140 /* Try exact power of two. */
90fab4b1 1141 else if (CONST_OK_FOR_M (x))
1bba6ac0 1142 load_op = "bgeni\t%0,%P1";
fa4fdf98 1143
1144 /* Try exact power of two - 1. */
90fab4b1 1145 else if (CONST_OK_FOR_N (x))
1bba6ac0 1146 load_op = "bmaski\t%0,%N1";
fa4fdf98 1147
90fab4b1 1148 else
1149 {
1150 load_op = "BADMOVI-andn\t%0, %1";
1151 gcc_unreachable ();
1152 }
1bba6ac0 1153
1154 sprintf (buf, "%s\n\tandn\t%%2,%%0", load_op);
1155 output_asm_insn (buf, out_operands);
1156
1157 return "";
1158}
1159
1160/* Output an inline constant. */
fa4fdf98 1161
2eebe422 1162static const char *
3754d046 1163output_inline_const (machine_mode mode, rtx operands[])
1bba6ac0 1164{
90fab4b1 1165 HOST_WIDE_INT x = 0, y = 0;
1bba6ac0 1166 int trick_no;
1167 rtx out_operands[3];
1168 char buf[256];
1169 char load_op[256];
2eebe422 1170 const char *dst_fmt;
90fab4b1 1171 HOST_WIDE_INT value;
1bba6ac0 1172
1173 value = INTVAL (operands[1]);
1bba6ac0 1174
044e64da 1175 trick_no = try_constant_tricks (value, &x, &y);
1176 /* lrw's are handled separately: Large inlinable constants never get
1177 turned into lrw's. Our caller uses try_constant_tricks to back
1178 off to an lrw rather than calling this routine. */
1179 gcc_assert (trick_no != 0);
1180
1bba6ac0 1181 if (trick_no == 1)
1182 x = value;
1183
fa4fdf98 1184 /* operands: 0 = dst, 1 = load immed., 2 = immed. adjustment. */
1bba6ac0 1185 out_operands[0] = operands[0];
1186 out_operands[1] = GEN_INT (x);
1187
1188 if (trick_no > 2)
1189 out_operands[2] = GEN_INT (y);
1190
fa4fdf98 1191 /* Select dst format based on mode. */
1bba6ac0 1192 if (mode == DImode && (! TARGET_LITTLE_END))
1193 dst_fmt = "%R0";
1194 else
1195 dst_fmt = "%0";
1196
1197 if (x >= 0 && x <= 127)
1198 sprintf (load_op, "movi\t%s,%%1", dst_fmt);
fa4fdf98 1199
1bba6ac0 1200 /* Try exact power of two. */
90fab4b1 1201 else if (CONST_OK_FOR_M (x))
1bba6ac0 1202 sprintf (load_op, "bgeni\t%s,%%P1", dst_fmt);
fa4fdf98 1203
1204 /* Try exact power of two - 1. */
90fab4b1 1205 else if (CONST_OK_FOR_N (x))
1bba6ac0 1206 sprintf (load_op, "bmaski\t%s,%%N1", dst_fmt);
fa4fdf98 1207
90fab4b1 1208 else
1209 {
1210 sprintf (load_op, "BADMOVI-inline_const %s, %%1", dst_fmt);
1211 gcc_unreachable ();
1212 }
1bba6ac0 1213
1214 switch (trick_no)
1215 {
1216 case 1:
1217 strcpy (buf, load_op);
1218 break;
1219 case 2: /* not */
90fab4b1 1220 sprintf (buf, "%s\n\tnot\t%s\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1bba6ac0 1221 break;
1222 case 3: /* add */
90fab4b1 1223 sprintf (buf, "%s\n\taddi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1bba6ac0 1224 break;
1225 case 4: /* sub */
90fab4b1 1226 sprintf (buf, "%s\n\tsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1bba6ac0 1227 break;
1228 case 5: /* rsub */
fa4fdf98 1229 /* Never happens unless -mrsubi, see try_constant_tricks(). */
90fab4b1 1230 sprintf (buf, "%s\n\trsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1bba6ac0 1231 break;
90fab4b1 1232 case 6: /* bseti */
1233 sprintf (buf, "%s\n\tbseti\t%s,%%P2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1bba6ac0 1234 break;
1235 case 7: /* bclr */
90fab4b1 1236 sprintf (buf, "%s\n\tbclri\t%s,%%Q2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1bba6ac0 1237 break;
1238 case 8: /* rotl */
90fab4b1 1239 sprintf (buf, "%s\n\trotli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1bba6ac0 1240 break;
1241 case 9: /* lsl */
90fab4b1 1242 sprintf (buf, "%s\n\tlsli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1bba6ac0 1243 break;
1244 case 10: /* ixh */
90fab4b1 1245 sprintf (buf, "%s\n\tixh\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
1bba6ac0 1246 break;
1247 case 11: /* ixw */
90fab4b1 1248 sprintf (buf, "%s\n\tixw\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
1bba6ac0 1249 break;
1250 default:
1251 return "";
1252 }
1253
1254 output_asm_insn (buf, out_operands);
1255
1256 return "";
1257}
1258
1259/* Output a move of a word or less value. */
fa4fdf98 1260
2eebe422 1261const char *
c167bbfc 1262mcore_output_move (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
3754d046 1263 machine_mode mode ATTRIBUTE_UNUSED)
1bba6ac0 1264{
1265 rtx dst = operands[0];
1266 rtx src = operands[1];
1267
1268 if (GET_CODE (dst) == REG)
1269 {
1270 if (GET_CODE (src) == REG)
1271 {
1272 if (REGNO (src) == CC_REG) /* r-c */
1273 return "mvc\t%0";
1274 else
1275 return "mov\t%0,%1"; /* r-r*/
1276 }
1277 else if (GET_CODE (src) == MEM)
1278 {
1279 if (GET_CODE (XEXP (src, 0)) == LABEL_REF)
1280 return "lrw\t%0,[%1]"; /* a-R */
1281 else
d13344c8 1282 switch (GET_MODE (src)) /* r-m */
1283 {
1284 case SImode:
1285 return "ldw\t%0,%1";
1286 case HImode:
1287 return "ld.h\t%0,%1";
1288 case QImode:
1289 return "ld.b\t%0,%1";
1290 default:
044e64da 1291 gcc_unreachable ();
d13344c8 1292 }
1bba6ac0 1293 }
1294 else if (GET_CODE (src) == CONST_INT)
1295 {
90fab4b1 1296 HOST_WIDE_INT x, y;
1bba6ac0 1297
1298 if (CONST_OK_FOR_I (INTVAL (src))) /* r-I */
1299 return "movi\t%0,%1";
1300 else if (CONST_OK_FOR_M (INTVAL (src))) /* r-M */
1301 return "bgeni\t%0,%P1\t// %1 %x1";
1302 else if (CONST_OK_FOR_N (INTVAL (src))) /* r-N */
1303 return "bmaski\t%0,%N1\t// %1 %x1";
1304 else if (try_constant_tricks (INTVAL (src), &x, &y)) /* R-P */
1305 return output_inline_const (SImode, operands); /* 1-2 insns */
1306 else
fa4fdf98 1307 return "lrw\t%0,%x1\t// %1"; /* Get it from literal pool. */
1bba6ac0 1308 }
1309 else
fa4fdf98 1310 return "lrw\t%0, %1"; /* Into the literal pool. */
1bba6ac0 1311 }
1312 else if (GET_CODE (dst) == MEM) /* m-r */
d13344c8 1313 switch (GET_MODE (dst))
1314 {
1315 case SImode:
1316 return "stw\t%1,%0";
1317 case HImode:
1318 return "st.h\t%1,%0";
1319 case QImode:
1320 return "st.b\t%1,%0";
1321 default:
044e64da 1322 gcc_unreachable ();
d13344c8 1323 }
1bba6ac0 1324
044e64da 1325 gcc_unreachable ();
1bba6ac0 1326}
1327
1bba6ac0 1328/* Return a sequence of instructions to perform DI or DF move.
1329 Since the MCORE cannot move a DI or DF in one instruction, we have
1330 to take care when we see overlapping source and dest registers. */
fa4fdf98 1331
2eebe422 1332const char *
3754d046 1333mcore_output_movedouble (rtx operands[], machine_mode mode ATTRIBUTE_UNUSED)
1bba6ac0 1334{
1335 rtx dst = operands[0];
1336 rtx src = operands[1];
1337
1338 if (GET_CODE (dst) == REG)
1339 {
1340 if (GET_CODE (src) == REG)
1341 {
1342 int dstreg = REGNO (dst);
1343 int srcreg = REGNO (src);
fa4fdf98 1344
1bba6ac0 1345 /* Ensure the second source not overwritten. */
1346 if (srcreg + 1 == dstreg)
1347 return "mov %R0,%R1\n\tmov %0,%1";
1348 else
1349 return "mov %0,%1\n\tmov %R0,%R1";
1350 }
1351 else if (GET_CODE (src) == MEM)
1352 {
6830c3f7 1353 rtx memexp = XEXP (src, 0);
1bba6ac0 1354 int dstreg = REGNO (dst);
1355 int basereg = -1;
1356
1357 if (GET_CODE (memexp) == LABEL_REF)
1358 return "lrw\t%0,[%1]\n\tlrw\t%R0,[%R1]";
1359 else if (GET_CODE (memexp) == REG)
1360 basereg = REGNO (memexp);
1361 else if (GET_CODE (memexp) == PLUS)
1362 {
1363 if (GET_CODE (XEXP (memexp, 0)) == REG)
1364 basereg = REGNO (XEXP (memexp, 0));
1365 else if (GET_CODE (XEXP (memexp, 1)) == REG)
1366 basereg = REGNO (XEXP (memexp, 1));
1367 else
044e64da 1368 gcc_unreachable ();
1bba6ac0 1369 }
1370 else
044e64da 1371 gcc_unreachable ();
1bba6ac0 1372
fa4fdf98 1373 /* ??? length attribute is wrong here. */
1bba6ac0 1374 if (dstreg == basereg)
1375 {
fa4fdf98 1376 /* Just load them in reverse order. */
1bba6ac0 1377 return "ldw\t%R0,%R1\n\tldw\t%0,%1";
fa4fdf98 1378
1bba6ac0 1379 /* XXX: alternative: move basereg to basereg+1
fa4fdf98 1380 and then fall through. */
1bba6ac0 1381 }
1382 else
1383 return "ldw\t%0,%1\n\tldw\t%R0,%R1";
1384 }
1385 else if (GET_CODE (src) == CONST_INT)
1386 {
1387 if (TARGET_LITTLE_END)
1388 {
1389 if (CONST_OK_FOR_I (INTVAL (src)))
1390 output_asm_insn ("movi %0,%1", operands);
1391 else if (CONST_OK_FOR_M (INTVAL (src)))
1392 output_asm_insn ("bgeni %0,%P1", operands);
1bba6ac0 1393 else if (CONST_OK_FOR_N (INTVAL (src)))
1394 output_asm_insn ("bmaski %0,%N1", operands);
1395 else
044e64da 1396 gcc_unreachable ();
1bba6ac0 1397
1398 if (INTVAL (src) < 0)
1399 return "bmaski %R0,32";
1400 else
1401 return "movi %R0,0";
1402 }
1403 else
1404 {
1405 if (CONST_OK_FOR_I (INTVAL (src)))
1406 output_asm_insn ("movi %R0,%1", operands);
1407 else if (CONST_OK_FOR_M (INTVAL (src)))
1408 output_asm_insn ("bgeni %R0,%P1", operands);
1bba6ac0 1409 else if (CONST_OK_FOR_N (INTVAL (src)))
1410 output_asm_insn ("bmaski %R0,%N1", operands);
1411 else
044e64da 1412 gcc_unreachable ();
90fab4b1 1413
1bba6ac0 1414 if (INTVAL (src) < 0)
1415 return "bmaski %0,32";
1416 else
1417 return "movi %0,0";
1418 }
1419 }
1420 else
044e64da 1421 gcc_unreachable ();
1bba6ac0 1422 }
1423 else if (GET_CODE (dst) == MEM && GET_CODE (src) == REG)
1424 return "stw\t%1,%0\n\tstw\t%R1,%R0";
1425 else
044e64da 1426 gcc_unreachable ();
1bba6ac0 1427}
1428
1429/* Predicates used by the templates. */
1430
1bba6ac0 1431int
c167bbfc 1432mcore_arith_S_operand (rtx op)
1bba6ac0 1433{
1434 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (~INTVAL (op)))
1435 return 1;
1436
1437 return 0;
1438}
1439
fa4fdf98 1440/* Expand insert bit field. BRC */
1441
1bba6ac0 1442int
c167bbfc 1443mcore_expand_insv (rtx operands[])
1bba6ac0 1444{
1445 int width = INTVAL (operands[1]);
1446 int posn = INTVAL (operands[2]);
1447 int mask;
1448 rtx mreg, sreg, ereg;
1449
1450 /* To get width 1 insv, the test in store_bit_field() (expmed.c, line 191)
1451 for width==1 must be removed. Look around line 368. This is something
fa4fdf98 1452 we really want the md part to do. */
1bba6ac0 1453 if (width == 1 && GET_CODE (operands[3]) == CONST_INT)
1454 {
fa4fdf98 1455 /* Do directly with bseti or bclri. */
1456 /* RBE: 2/97 consider only low bit of constant. */
90fab4b1 1457 if ((INTVAL (operands[3]) & 1) == 0)
1bba6ac0 1458 {
1459 mask = ~(1 << posn);
d1f9b275 1460 emit_insn (gen_rtx_SET (operands[0],
1461 gen_rtx_AND (SImode, operands[0],
1462 GEN_INT (mask))));
1bba6ac0 1463 }
1464 else
1465 {
1466 mask = 1 << posn;
d1f9b275 1467 emit_insn (gen_rtx_SET (operands[0],
1468 gen_rtx_IOR (SImode, operands[0],
1469 GEN_INT (mask))));
1bba6ac0 1470 }
1471
1472 return 1;
1473 }
1474
ceb2fe0f 1475 /* Look at some bit-field placements that we aren't interested
fa4fdf98 1476 in handling ourselves, unless specifically directed to do so. */
1bba6ac0 1477 if (! TARGET_W_FIELD)
1478 return 0; /* Generally, give up about now. */
1479
1480 if (width == 8 && posn % 8 == 0)
1481 /* Byte sized and aligned; let caller break it up. */
1482 return 0;
1483
1484 if (width == 16 && posn % 16 == 0)
1485 /* Short sized and aligned; let caller break it up. */
1486 return 0;
1487
1488 /* The general case - we can do this a little bit better than what the
1489 machine independent part tries. This will get rid of all the subregs
1490 that mess up constant folding in combine when working with relaxed
fa4fdf98 1491 immediates. */
1bba6ac0 1492
1493 /* If setting the entire field, do it directly. */
90fab4b1 1494 if (GET_CODE (operands[3]) == CONST_INT
1495 && INTVAL (operands[3]) == ((1 << width) - 1))
1bba6ac0 1496 {
1497 mreg = force_reg (SImode, GEN_INT (INTVAL (operands[3]) << posn));
d1f9b275 1498 emit_insn (gen_rtx_SET (operands[0],
1499 gen_rtx_IOR (SImode, operands[0], mreg)));
1bba6ac0 1500 return 1;
1501 }
1502
1503 /* Generate the clear mask. */
1504 mreg = force_reg (SImode, GEN_INT (~(((1 << width) - 1) << posn)));
1505
1506 /* Clear the field, to overlay it later with the source. */
d1f9b275 1507 emit_insn (gen_rtx_SET (operands[0],
1508 gen_rtx_AND (SImode, operands[0], mreg)));
1bba6ac0 1509
1510 /* If the source is constant 0, we've nothing to add back. */
1511 if (GET_CODE (operands[3]) == CONST_INT && INTVAL (operands[3]) == 0)
1512 return 1;
1513
1514 /* XXX: Should we worry about more games with constant values?
1515 We've covered the high profile: set/clear single-bit and many-bit
1516 fields. How often do we see "arbitrary bit pattern" constants? */
1517 sreg = copy_to_mode_reg (SImode, operands[3]);
1518
1519 /* Extract src as same width as dst (needed for signed values). We
1520 always have to do this since we widen everything to SImode.
1521 We don't have to mask if we're shifting this up against the
1522 MSB of the register (e.g., the shift will push out any hi-order
fa4fdf98 1523 bits. */
2eebe422 1524 if (width + posn != (int) GET_MODE_SIZE (SImode))
1bba6ac0 1525 {
1526 ereg = force_reg (SImode, GEN_INT ((1 << width) - 1));
d1f9b275 1527 emit_insn (gen_rtx_SET (sreg, gen_rtx_AND (SImode, sreg, ereg)));
1bba6ac0 1528 }
1529
fa4fdf98 1530 /* Insert source value in dest. */
1bba6ac0 1531 if (posn != 0)
d1f9b275 1532 emit_insn (gen_rtx_SET (sreg, gen_rtx_ASHIFT (SImode, sreg,
1533 GEN_INT (posn))));
1bba6ac0 1534
d1f9b275 1535 emit_insn (gen_rtx_SET (operands[0],
1536 gen_rtx_IOR (SImode, operands[0], sreg)));
1bba6ac0 1537
1538 return 1;
1539}
1bba6ac0 1540\f
1541/* ??? Block move stuff stolen from m88k. This code has not been
1542 verified for correctness. */
1543
1544/* Emit code to perform a block move. Choose the best method.
1545
1546 OPERANDS[0] is the destination.
1547 OPERANDS[1] is the source.
1548 OPERANDS[2] is the size.
1549 OPERANDS[3] is the alignment safe to use. */
1550
1551/* Emit code to perform a block move with an offset sequence of ldw/st
1552 instructions (..., ldw 0, stw 1, ldw 1, stw 0, ...). SIZE and ALIGN are
1553 known constants. DEST and SRC are registers. OFFSET is the known
1554 starting point for the output pattern. */
1555
3754d046 1556static const machine_mode mode_from_align[] =
1bba6ac0 1557{
1558 VOIDmode, QImode, HImode, VOIDmode, SImode,
1bba6ac0 1559};
1560
1561static void
51b742cc 1562block_move_sequence (rtx dst_mem, rtx src_mem, int size, int align)
1bba6ac0 1563{
1564 rtx temp[2];
3754d046 1565 machine_mode mode[2];
1bba6ac0 1566 int amount[2];
51b742cc 1567 bool active[2];
1bba6ac0 1568 int phase = 0;
1569 int next;
51b742cc 1570 int offset_ld = 0;
1571 int offset_st = 0;
1572 rtx x;
1bba6ac0 1573
51b742cc 1574 x = XEXP (dst_mem, 0);
1575 if (!REG_P (x))
1576 {
1577 x = force_reg (Pmode, x);
1578 dst_mem = replace_equiv_address (dst_mem, x);
1579 }
1bba6ac0 1580
51b742cc 1581 x = XEXP (src_mem, 0);
1582 if (!REG_P (x))
1bba6ac0 1583 {
51b742cc 1584 x = force_reg (Pmode, x);
1585 src_mem = replace_equiv_address (src_mem, x);
1bba6ac0 1586 }
1587
51b742cc 1588 active[0] = active[1] = false;
1589
1bba6ac0 1590 do
1591 {
1bba6ac0 1592 next = phase;
51b742cc 1593 phase ^= 1;
1bba6ac0 1594
1595 if (size > 0)
1596 {
51b742cc 1597 int next_amount;
1598
1599 next_amount = (size >= 4 ? 4 : (size >= 2 ? 2 : 1));
1600 next_amount = MIN (next_amount, align);
1601
1602 amount[next] = next_amount;
1603 mode[next] = mode_from_align[next_amount];
1604 temp[next] = gen_reg_rtx (mode[next]);
1605
1606 x = adjust_address (src_mem, mode[next], offset_ld);
d1f9b275 1607 emit_insn (gen_rtx_SET (temp[next], x));
51b742cc 1608
1609 offset_ld += next_amount;
1610 size -= next_amount;
1611 active[next] = true;
1bba6ac0 1612 }
1613
1614 if (active[phase])
1615 {
51b742cc 1616 active[phase] = false;
1bba6ac0 1617
51b742cc 1618 x = adjust_address (dst_mem, mode[phase], offset_st);
d1f9b275 1619 emit_insn (gen_rtx_SET (x, temp[phase]));
51b742cc 1620
1bba6ac0 1621 offset_st += amount[phase];
1622 }
1623 }
1624 while (active[next]);
1625}
1626
51b742cc 1627bool
1628mcore_expand_block_move (rtx *operands)
1bba6ac0 1629{
51b742cc 1630 HOST_WIDE_INT align, bytes, max;
1631
1632 if (GET_CODE (operands[2]) != CONST_INT)
1633 return false;
1634
1635 bytes = INTVAL (operands[2]);
1636 align = INTVAL (operands[3]);
1bba6ac0 1637
51b742cc 1638 if (bytes <= 0)
1639 return false;
1640 if (align > 4)
1641 align = 4;
1642
1643 switch (align)
1bba6ac0 1644 {
51b742cc 1645 case 4:
1646 if (bytes & 1)
1647 max = 4*4;
1648 else if (bytes & 3)
1649 max = 8*4;
1650 else
1651 max = 16*4;
1652 break;
1653 case 2:
1654 max = 4*2;
1655 break;
1656 case 1:
1657 max = 4*1;
1658 break;
1659 default:
044e64da 1660 gcc_unreachable ();
51b742cc 1661 }
1662
1663 if (bytes <= max)
1664 {
1665 block_move_sequence (operands[0], operands[1], bytes, align);
1666 return true;
1bba6ac0 1667 }
1668
51b742cc 1669 return false;
1bba6ac0 1670}
1671\f
1672
1673/* Code to generate prologue and epilogue sequences. */
1674static int number_of_regs_before_varargs;
fa4fdf98 1675
6644435d 1676/* Set by TARGET_SETUP_INCOMING_VARARGS to indicate to prolog that this is
1bba6ac0 1677 for a varargs function. */
1678static int current_function_anonymous_args;
1679
1bba6ac0 1680#define STACK_BYTES (STACK_BOUNDARY/BITS_PER_UNIT)
1681#define STORE_REACH (64) /* Maximum displace of word store + 4. */
fa4fdf98 1682#define ADDI_REACH (32) /* Maximum addi operand. */
1bba6ac0 1683
1bba6ac0 1684static void
c167bbfc 1685layout_mcore_frame (struct mcore_frame * infp)
1bba6ac0 1686{
1687 int n;
1688 unsigned int i;
1689 int nbytes;
1690 int regarg;
1691 int localregarg;
1bba6ac0 1692 int outbounds;
1693 unsigned int growths;
1694 int step;
1695
1696 /* Might have to spill bytes to re-assemble a big argument that
fa4fdf98 1697 was passed partially in registers and partially on the stack. */
abe32cce 1698 nbytes = crtl->args.pretend_args_size;
1bba6ac0 1699
1700 /* Determine how much space for spilled anonymous args (e.g., stdarg). */
1701 if (current_function_anonymous_args)
1702 nbytes += (NPARM_REGS - number_of_regs_before_varargs) * UNITS_PER_WORD;
1703
1704 infp->arg_size = nbytes;
1705
1706 /* How much space to save non-volatile registers we stomp. */
1707 infp->reg_mask = calc_live_regs (& n);
1708 infp->reg_size = n * 4;
1709
3fcfff30 1710 /* And the rest of it... locals and space for overflowed outbounds. */
1bba6ac0 1711 infp->local_size = get_frame_size ();
abe32cce 1712 infp->outbound_size = crtl->outgoing_args_size;
1bba6ac0 1713
1714 /* Make sure we have a whole number of words for the locals. */
1715 if (infp->local_size % STACK_BYTES)
1716 infp->local_size = (infp->local_size + STACK_BYTES - 1) & ~ (STACK_BYTES -1);
1717
1718 /* Only thing we know we have to pad is the outbound space, since
1719 we've aligned our locals assuming that base of locals is aligned. */
1720 infp->pad_local = 0;
1721 infp->pad_reg = 0;
1722 infp->pad_outbound = 0;
1723 if (infp->outbound_size % STACK_BYTES)
1724 infp->pad_outbound = STACK_BYTES - (infp->outbound_size % STACK_BYTES);
1725
1726 /* Now we see how we want to stage the prologue so that it does
1727 the most appropriate stack growth and register saves to either:
1728 (1) run fast,
1729 (2) reduce instruction space, or
1730 (3) reduce stack space. */
3098b2d3 1731 for (i = 0; i < ARRAY_SIZE (infp->growth); i++)
1bba6ac0 1732 infp->growth[i] = 0;
1733
1734 regarg = infp->reg_size + infp->arg_size;
1735 localregarg = infp->local_size + regarg;
1bba6ac0 1736 outbounds = infp->outbound_size + infp->pad_outbound;
1737 growths = 0;
1738
1739 /* XXX: Consider one where we consider localregarg + outbound too! */
1740
1741 /* Frame of <= 32 bytes and using stm would get <= 2 registers.
1742 use stw's with offsets and buy the frame in one shot. */
1743 if (localregarg <= ADDI_REACH
1744 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1745 {
1746 /* Make sure we'll be aligned. */
1747 if (localregarg % STACK_BYTES)
1748 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1749
1750 step = localregarg + infp->pad_reg;
1751 infp->reg_offset = infp->local_size;
1752
1753 if (outbounds + step <= ADDI_REACH && !frame_pointer_needed)
1754 {
1755 step += outbounds;
1756 infp->reg_offset += outbounds;
1757 outbounds = 0;
1758 }
1759
1760 infp->arg_offset = step - 4;
1761 infp->growth[growths++] = step;
1762 infp->reg_growth = growths;
1763 infp->local_growth = growths;
1764
fa4fdf98 1765 /* If we haven't already folded it in. */
1bba6ac0 1766 if (outbounds)
1767 infp->growth[growths++] = outbounds;
1768
1769 goto finish;
1770 }
1771
1772 /* Frame can't be done with a single subi, but can be done with 2
1773 insns. If the 'stm' is getting <= 2 registers, we use stw's and
1774 shift some of the stack purchase into the first subi, so both are
1775 single instructions. */
1776 if (localregarg <= STORE_REACH
1777 && (infp->local_size > ADDI_REACH)
1778 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1779 {
1780 int all;
1781
1782 /* Make sure we'll be aligned; use either pad_reg or pad_local. */
1783 if (localregarg % STACK_BYTES)
1784 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1785
1786 all = localregarg + infp->pad_reg + infp->pad_local;
1787 step = ADDI_REACH; /* As much up front as we can. */
1788 if (step > all)
1789 step = all;
1790
1791 /* XXX: Consider whether step will still be aligned; we believe so. */
1792 infp->arg_offset = step - 4;
1793 infp->growth[growths++] = step;
1794 infp->reg_growth = growths;
1795 infp->reg_offset = step - infp->pad_reg - infp->reg_size;
1796 all -= step;
1797
fa4fdf98 1798 /* Can we fold in any space required for outbounds? */
1bba6ac0 1799 if (outbounds + all <= ADDI_REACH && !frame_pointer_needed)
1800 {
1801 all += outbounds;
1802 outbounds = 0;
1803 }
1804
fa4fdf98 1805 /* Get the rest of the locals in place. */
1bba6ac0 1806 step = all;
1807 infp->growth[growths++] = step;
1808 infp->local_growth = growths;
1809 all -= step;
1810
1e944a0b 1811 gcc_assert (all == 0);
1bba6ac0 1812
fa4fdf98 1813 /* Finish off if we need to do so. */
1bba6ac0 1814 if (outbounds)
1815 infp->growth[growths++] = outbounds;
1816
1817 goto finish;
1818 }
1819
1820 /* Registers + args is nicely aligned, so we'll buy that in one shot.
1821 Then we buy the rest of the frame in 1 or 2 steps depending on
1822 whether we need a frame pointer. */
1823 if ((regarg % STACK_BYTES) == 0)
1824 {
1825 infp->growth[growths++] = regarg;
1826 infp->reg_growth = growths;
1827 infp->arg_offset = regarg - 4;
1828 infp->reg_offset = 0;
1829
1830 if (infp->local_size % STACK_BYTES)
1831 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1832
1833 step = infp->local_size + infp->pad_local;
1834
1835 if (!frame_pointer_needed)
1836 {
1837 step += outbounds;
1838 outbounds = 0;
1839 }
1840
1841 infp->growth[growths++] = step;
1842 infp->local_growth = growths;
1843
fa4fdf98 1844 /* If there's any left to be done. */
1bba6ac0 1845 if (outbounds)
1846 infp->growth[growths++] = outbounds;
1847
1848 goto finish;
1849 }
1850
1851 /* XXX: optimizations that we'll want to play with....
fa4fdf98 1852 -- regarg is not aligned, but it's a small number of registers;
1853 use some of localsize so that regarg is aligned and then
1854 save the registers. */
1bba6ac0 1855
1856 /* Simple encoding; plods down the stack buying the pieces as it goes.
fa4fdf98 1857 -- does not optimize space consumption.
1858 -- does not attempt to optimize instruction counts.
1859 -- but it is safe for all alignments. */
1bba6ac0 1860 if (regarg % STACK_BYTES != 0)
1861 infp->pad_reg = STACK_BYTES - (regarg % STACK_BYTES);
1862
1863 infp->growth[growths++] = infp->arg_size + infp->reg_size + infp->pad_reg;
1864 infp->reg_growth = growths;
1865 infp->arg_offset = infp->growth[0] - 4;
1866 infp->reg_offset = 0;
1867
1868 if (frame_pointer_needed)
1869 {
1870 if (infp->local_size % STACK_BYTES != 0)
1871 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1872
1873 infp->growth[growths++] = infp->local_size + infp->pad_local;
1874 infp->local_growth = growths;
1875
1876 infp->growth[growths++] = outbounds;
1877 }
1878 else
1879 {
1880 if ((infp->local_size + outbounds) % STACK_BYTES != 0)
1881 infp->pad_local = STACK_BYTES - ((infp->local_size + outbounds) % STACK_BYTES);
1882
1883 infp->growth[growths++] = infp->local_size + infp->pad_local + outbounds;
1884 infp->local_growth = growths;
1885 }
1886
2eebe422 1887 /* Anything else that we've forgotten?, plus a few consistency checks. */
1bba6ac0 1888 finish:
1e944a0b 1889 gcc_assert (infp->reg_offset >= 0);
1890 gcc_assert (growths <= MAX_STACK_GROWS);
1bba6ac0 1891
1892 for (i = 0; i < growths; i++)
044e64da 1893 gcc_assert (!(infp->growth[i] % STACK_BYTES));
1bba6ac0 1894}
1895
1896/* Define the offset between two registers, one to be eliminated, and
1897 the other its replacement, at the start of a routine. */
fa4fdf98 1898
1bba6ac0 1899int
c167bbfc 1900mcore_initial_elimination_offset (int from, int to)
1bba6ac0 1901{
1902 int above_frame;
1903 int below_frame;
1904 struct mcore_frame fi;
1905
1906 layout_mcore_frame (& fi);
1907
1908 /* fp to ap */
1909 above_frame = fi.local_size + fi.pad_local + fi.reg_size + fi.pad_reg;
1910 /* sp to fp */
1911 below_frame = fi.outbound_size + fi.pad_outbound;
1912
1913 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
1914 return above_frame;
1915
1916 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1917 return above_frame + below_frame;
1918
1919 if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1920 return below_frame;
1921
044e64da 1922 gcc_unreachable ();
1bba6ac0 1923}
1924
fa4fdf98 1925/* Keep track of some information about varargs for the prolog. */
1926
5ee57edb 1927static void
39cba157 1928mcore_setup_incoming_varargs (cumulative_args_t args_so_far_v,
3754d046 1929 machine_mode mode, tree type,
5ee57edb 1930 int * ptr_pretend_size ATTRIBUTE_UNUSED,
1931 int second_time ATTRIBUTE_UNUSED)
1bba6ac0 1932{
39cba157 1933 CUMULATIVE_ARGS *args_so_far = get_cumulative_args (args_so_far_v);
1934
1bba6ac0 1935 current_function_anonymous_args = 1;
1936
1937 /* We need to know how many argument registers are used before
1938 the varargs start, so that we can push the remaining argument
1939 registers during the prologue. */
5ee57edb 1940 number_of_regs_before_varargs = *args_so_far + mcore_num_arg_regs (mode, type);
1bba6ac0 1941
dfd1079d 1942 /* There is a bug somewhere in the arg handling code.
1bba6ac0 1943 Until I can find it this workaround always pushes the
1944 last named argument onto the stack. */
5ee57edb 1945 number_of_regs_before_varargs = *args_so_far;
1bba6ac0 1946
1947 /* The last named argument may be split between argument registers
1948 and the stack. Allow for this here. */
1949 if (number_of_regs_before_varargs > NPARM_REGS)
1950 number_of_regs_before_varargs = NPARM_REGS;
1951}
1952
1953void
c167bbfc 1954mcore_expand_prolog (void)
1bba6ac0 1955{
1956 struct mcore_frame fi;
1957 int space_allocated = 0;
1958 int growth = 0;
1959
1960 /* Find out what we're doing. */
1961 layout_mcore_frame (&fi);
1962
1963 space_allocated = fi.arg_size + fi.reg_size + fi.local_size +
1964 fi.outbound_size + fi.pad_outbound + fi.pad_local + fi.pad_reg;
1965
1966 if (TARGET_CG_DATA)
1967 {
1968 /* Emit a symbol for this routine's frame size. */
1969 rtx x;
1bba6ac0 1970
1971 x = DECL_RTL (current_function_decl);
1972
044e64da 1973 gcc_assert (GET_CODE (x) == MEM);
1bba6ac0 1974
1975 x = XEXP (x, 0);
1976
044e64da 1977 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1bba6ac0 1978
dd045aee 1979 free (mcore_current_function_name);
1bba6ac0 1980
9591cb7b 1981 mcore_current_function_name = xstrdup (XSTR (x, 0));
1bba6ac0 1982
1983 ASM_OUTPUT_CG_NODE (asm_out_file, mcore_current_function_name, space_allocated);
1984
18d50ae6 1985 if (cfun->calls_alloca)
1bba6ac0 1986 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, "alloca", 1);
1987
1988 /* 970425: RBE:
1989 We're looking at how the 8byte alignment affects stack layout
1990 and where we had to pad things. This emits information we can
1991 extract which tells us about frame sizes and the like. */
1992 fprintf (asm_out_file,
1993 "\t.equ\t__$frame$info$_%s_$_%d_%d_x%x_%d_%d_%d,0\n",
1994 mcore_current_function_name,
1995 fi.arg_size, fi.reg_size, fi.reg_mask,
1996 fi.local_size, fi.outbound_size,
1997 frame_pointer_needed);
1998 }
1999
2000 if (mcore_naked_function_p ())
2001 return;
2002
2003 /* Handle stdarg+regsaves in one shot: can't be more than 64 bytes. */
c167bbfc 2004 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
1bba6ac0 2005
2006 /* If we have a parameter passed partially in regs and partially in memory,
2007 the registers will have been stored to memory already in function.c. So
2008 we only need to do something here for varargs functions. */
abe32cce 2009 if (fi.arg_size != 0 && crtl->args.pretend_args_size == 0)
1bba6ac0 2010 {
2011 int offset;
2012 int rn = FIRST_PARM_REG + NPARM_REGS - 1;
2013 int remaining = fi.arg_size;
2014
2015 for (offset = fi.arg_offset; remaining >= 4; offset -= 4, rn--, remaining -= 4)
2016 {
2017 emit_insn (gen_movsi
1a83b3ff 2018 (gen_rtx_MEM (SImode,
29c05e22 2019 plus_constant (Pmode, stack_pointer_rtx,
2020 offset)),
1a83b3ff 2021 gen_rtx_REG (SImode, rn)));
1bba6ac0 2022 }
2023 }
2024
fa4fdf98 2025 /* Do we need another stack adjustment before we do the register saves? */
1bba6ac0 2026 if (growth < fi.reg_growth)
c167bbfc 2027 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
1bba6ac0 2028
2029 if (fi.reg_size != 0)
2030 {
2031 int i;
2032 int offs = fi.reg_offset;
2033
2034 for (i = 15; i >= 0; i--)
2035 {
2036 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2037 {
2038 int first_reg = 15;
2039
2040 while (fi.reg_mask & (1 << first_reg))
2041 first_reg--;
2042 first_reg++;
2043
1a83b3ff 2044 emit_insn (gen_store_multiple (gen_rtx_MEM (SImode, stack_pointer_rtx),
2045 gen_rtx_REG (SImode, first_reg),
1bba6ac0 2046 GEN_INT (16 - first_reg)));
2047
2048 i -= (15 - first_reg);
2049 offs += (16 - first_reg) * 4;
2050 }
2051 else if (fi.reg_mask & (1 << i))
2052 {
2053 emit_insn (gen_movsi
1a83b3ff 2054 (gen_rtx_MEM (SImode,
29c05e22 2055 plus_constant (Pmode, stack_pointer_rtx,
2056 offs)),
1a83b3ff 2057 gen_rtx_REG (SImode, i)));
1bba6ac0 2058 offs += 4;
2059 }
2060 }
2061 }
2062
2063 /* Figure the locals + outbounds. */
2064 if (frame_pointer_needed)
2065 {
2066 /* If we haven't already purchased to 'fp'. */
2067 if (growth < fi.local_growth)
c167bbfc 2068 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
1bba6ac0 2069
2070 emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
2071
fa4fdf98 2072 /* ... and then go any remaining distance for outbounds, etc. */
1bba6ac0 2073 if (fi.growth[growth])
2074 output_stack_adjust (-1, fi.growth[growth++]);
2075 }
2076 else
2077 {
2078 if (growth < fi.local_growth)
c167bbfc 2079 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
1bba6ac0 2080 if (fi.growth[growth])
2081 output_stack_adjust (-1, fi.growth[growth++]);
2082 }
2083}
2084
2085void
c167bbfc 2086mcore_expand_epilog (void)
1bba6ac0 2087{
2088 struct mcore_frame fi;
2089 int i;
2090 int offs;
2091 int growth = MAX_STACK_GROWS - 1 ;
2092
2eebe422 2093
1bba6ac0 2094 /* Find out what we're doing. */
2095 layout_mcore_frame(&fi);
2096
2097 if (mcore_naked_function_p ())
2098 return;
2eebe422 2099
1bba6ac0 2100 /* If we had a frame pointer, restore the sp from that. */
2101 if (frame_pointer_needed)
2102 {
2103 emit_insn (gen_movsi (stack_pointer_rtx, frame_pointer_rtx));
2104 growth = fi.local_growth - 1;
2105 }
2106 else
2107 {
2108 /* XXX: while loop should accumulate and do a single sell. */
2109 while (growth >= fi.local_growth)
2110 {
2111 if (fi.growth[growth] != 0)
2112 output_stack_adjust (1, fi.growth[growth]);
2113 growth--;
2114 }
2115 }
2116
2117 /* Make sure we've shrunk stack back to the point where the registers
2118 were laid down. This is typically 0/1 iterations. Then pull the
fa4fdf98 2119 register save information back off the stack. */
1bba6ac0 2120 while (growth >= fi.reg_growth)
2121 output_stack_adjust ( 1, fi.growth[growth--]);
2122
2123 offs = fi.reg_offset;
2124
2125 for (i = 15; i >= 0; i--)
2126 {
2127 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2128 {
2129 int first_reg;
2130
2131 /* Find the starting register. */
2132 first_reg = 15;
2133
2134 while (fi.reg_mask & (1 << first_reg))
2135 first_reg--;
2136
2137 first_reg++;
2138
1a83b3ff 2139 emit_insn (gen_load_multiple (gen_rtx_REG (SImode, first_reg),
2140 gen_rtx_MEM (SImode, stack_pointer_rtx),
1bba6ac0 2141 GEN_INT (16 - first_reg)));
2142
2143 i -= (15 - first_reg);
2144 offs += (16 - first_reg) * 4;
2145 }
2146 else if (fi.reg_mask & (1 << i))
2147 {
2148 emit_insn (gen_movsi
1a83b3ff 2149 (gen_rtx_REG (SImode, i),
2150 gen_rtx_MEM (SImode,
29c05e22 2151 plus_constant (Pmode, stack_pointer_rtx,
2152 offs))));
1bba6ac0 2153 offs += 4;
2154 }
2155 }
2156
2157 /* Give back anything else. */
dfd1079d 2158 /* XXX: Should accumulate total and then give it back. */
1bba6ac0 2159 while (growth >= 0)
2160 output_stack_adjust ( 1, fi.growth[growth--]);
2161}
2162\f
2163/* This code is borrowed from the SH port. */
2164
2165/* The MCORE cannot load a large constant into a register, constants have to
2166 come from a pc relative load. The reference of a pc relative load
442e3cb9 2167 instruction must be less than 1k in front of the instruction. This
1bba6ac0 2168 means that we often have to dump a constant inside a function, and
2169 generate code to branch around it.
2170
2171 It is important to minimize this, since the branches will slow things
2172 down and make things bigger.
2173
2174 Worst case code looks like:
2175
2176 lrw L1,r0
2177 br L2
2178 align
2179 L1: .long value
2180 L2:
2181 ..
2182
2183 lrw L3,r0
2184 br L4
2185 align
2186 L3: .long value
2187 L4:
2188 ..
2189
2190 We fix this by performing a scan before scheduling, which notices which
2191 instructions need to have their operands fetched from the constant table
2192 and builds the table.
2193
2194 The algorithm is:
2195
2196 scan, find an instruction which needs a pcrel move. Look forward, find the
2197 last barrier which is within MAX_COUNT bytes of the requirement.
2198 If there isn't one, make one. Process all the instructions between
2199 the find and the barrier.
2200
2201 In the above example, we can tell that L3 is within 1k of L1, so
2202 the first move can be shrunk from the 2 insn+constant sequence into
2203 just 1 insn, and the constant moved to L3 to make:
2204
2205 lrw L1,r0
2206 ..
2207 lrw L3,r0
2208 bra L4
2209 align
2210 L3:.long value
2211 L4:.long value
2212
2213 Then the second move becomes the target for the shortening process. */
2214
2215typedef struct
2216{
2217 rtx value; /* Value in table. */
2218 rtx label; /* Label of value. */
2219} pool_node;
2220
2221/* The maximum number of constants that can fit into one pool, since
2222 the pc relative range is 0...1020 bytes and constants are at least 4
3c364971 2223 bytes long. We subtract 4 from the range to allow for the case where
1bba6ac0 2224 we need to add a branch/align before the constant pool. */
2225
2226#define MAX_COUNT 1016
2227#define MAX_POOL_SIZE (MAX_COUNT/4)
2228static pool_node pool_vector[MAX_POOL_SIZE];
2229static int pool_size;
2230
2231/* Dump out any constants accumulated in the final pass. These
2232 will only be labels. */
fa4fdf98 2233
2eebe422 2234const char *
c167bbfc 2235mcore_output_jump_label_table (void)
1bba6ac0 2236{
2237 int i;
2238
2239 if (pool_size)
2240 {
2241 fprintf (asm_out_file, "\t.align 2\n");
2242
2243 for (i = 0; i < pool_size; i++)
2244 {
2245 pool_node * p = pool_vector + i;
2246
805e22b2 2247 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (p->label));
1bba6ac0 2248
2249 output_asm_insn (".long %0", &p->value);
2250 }
2251
2252 pool_size = 0;
2253 }
2254
2255 return "";
2256}
2257
1bba6ac0 2258/* Check whether insn is a candidate for a conditional. */
fa4fdf98 2259
1bba6ac0 2260static cond_type
c167bbfc 2261is_cond_candidate (rtx insn)
1bba6ac0 2262{
2263 /* The only things we conditionalize are those that can be directly
2264 changed into a conditional. Only bother with SImode items. If
2265 we wanted to be a little more aggressive, we could also do other
fa4fdf98 2266 modes such as DImode with reg-reg move or load 0. */
aa90bb35 2267 if (NONJUMP_INSN_P (insn))
1bba6ac0 2268 {
2269 rtx pat = PATTERN (insn);
2270 rtx src, dst;
2271
2272 if (GET_CODE (pat) != SET)
2273 return COND_NO;
2274
2275 dst = XEXP (pat, 0);
2276
2277 if ((GET_CODE (dst) != REG &&
2278 GET_CODE (dst) != SUBREG) ||
2279 GET_MODE (dst) != SImode)
2280 return COND_NO;
2281
2282 src = XEXP (pat, 1);
2283
2284 if ((GET_CODE (src) == REG ||
2285 (GET_CODE (src) == SUBREG &&
2286 GET_CODE (SUBREG_REG (src)) == REG)) &&
2287 GET_MODE (src) == SImode)
2288 return COND_MOV_INSN;
2289 else if (GET_CODE (src) == CONST_INT &&
2290 INTVAL (src) == 0)
2291 return COND_CLR_INSN;
2292 else if (GET_CODE (src) == PLUS &&
2293 (GET_CODE (XEXP (src, 0)) == REG ||
2294 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2295 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2296 GET_MODE (XEXP (src, 0)) == SImode &&
2297 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2298 INTVAL (XEXP (src, 1)) == 1)
2299 return COND_INC_INSN;
2300 else if (((GET_CODE (src) == MINUS &&
2301 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2302 INTVAL( XEXP (src, 1)) == 1) ||
2303 (GET_CODE (src) == PLUS &&
2304 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2305 INTVAL (XEXP (src, 1)) == -1)) &&
2306 (GET_CODE (XEXP (src, 0)) == REG ||
2307 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2308 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2309 GET_MODE (XEXP (src, 0)) == SImode)
2310 return COND_DEC_INSN;
2311
3fcfff30 2312 /* Some insns that we don't bother with:
1bba6ac0 2313 (set (rx:DI) (ry:DI))
2314 (set (rx:DI) (const_int 0))
2315 */
2316
2317 }
aa90bb35 2318 else if (JUMP_P (insn)
2319 && GET_CODE (PATTERN (insn)) == SET
2320 && GET_CODE (XEXP (PATTERN (insn), 1)) == LABEL_REF)
1bba6ac0 2321 return COND_BRANCH_INSN;
2322
2323 return COND_NO;
2324}
2325
2326/* Emit a conditional version of insn and replace the old insn with the
2327 new one. Return the new insn if emitted. */
fa4fdf98 2328
91a55c11 2329static rtx_insn *
26cd1198 2330emit_new_cond_insn (rtx_insn *insn, int cond)
1bba6ac0 2331{
2332 rtx c_insn = 0;
2333 rtx pat, dst, src;
2334 cond_type num;
2335
2336 if ((num = is_cond_candidate (insn)) == COND_NO)
2337 return NULL;
2338
2339 pat = PATTERN (insn);
2340
aa90bb35 2341 if (NONJUMP_INSN_P (insn))
1bba6ac0 2342 {
2343 dst = SET_DEST (pat);
2344 src = SET_SRC (pat);
2345 }
2346 else
7b7a8c50 2347 {
2348 dst = JUMP_LABEL (insn);
2349 src = NULL_RTX;
2350 }
1bba6ac0 2351
2352 switch (num)
2353 {
2354 case COND_MOV_INSN:
2355 case COND_CLR_INSN:
2356 if (cond)
2357 c_insn = gen_movt0 (dst, src, dst);
2358 else
2359 c_insn = gen_movt0 (dst, dst, src);
2360 break;
2361
2362 case COND_INC_INSN:
2363 if (cond)
2364 c_insn = gen_incscc (dst, dst);
2365 else
2366 c_insn = gen_incscc_false (dst, dst);
2367 break;
2368
2369 case COND_DEC_INSN:
2370 if (cond)
2371 c_insn = gen_decscc (dst, dst);
2372 else
2373 c_insn = gen_decscc_false (dst, dst);
2374 break;
2375
2376 case COND_BRANCH_INSN:
2377 if (cond)
2378 c_insn = gen_branch_true (dst);
2379 else
2380 c_insn = gen_branch_false (dst);
2381 break;
2382
2383 default:
2384 return NULL;
2385 }
2386
2387 /* Only copy the notes if they exist. */
2388 if (rtx_length [GET_CODE (c_insn)] >= 7 && rtx_length [GET_CODE (insn)] >= 7)
2389 {
2390 /* We really don't need to bother with the notes and links at this
2391 point, but go ahead and save the notes. This will help is_dead()
2392 when applying peepholes (links don't matter since they are not
2393 used any more beyond this point for the mcore). */
2394 REG_NOTES (c_insn) = REG_NOTES (insn);
2395 }
2396
2397 if (num == COND_BRANCH_INSN)
2398 {
2399 /* For jumps, we need to be a little bit careful and emit the new jump
2400 before the old one and to update the use count for the target label.
2401 This way, the barrier following the old (uncond) jump will get
2402 deleted, but the label won't. */
2403 c_insn = emit_jump_insn_before (c_insn, insn);
2404
2405 ++ LABEL_NUSES (dst);
2406
2407 JUMP_LABEL (c_insn) = dst;
2408 }
2409 else
2410 c_insn = emit_insn_after (c_insn, insn);
2411
2412 delete_insn (insn);
2413
91a55c11 2414 return as_a <rtx_insn *> (c_insn);
1bba6ac0 2415}
2416
2417/* Attempt to change a basic block into a series of conditional insns. This
2418 works by taking the branch at the end of the 1st block and scanning for the
2419 end of the 2nd block. If all instructions in the 2nd block have cond.
2420 versions and the label at the start of block 3 is the same as the target
2421 from the branch at block 1, then conditionalize all insn in block 2 using
2422 the inverse condition of the branch at block 1. (Note I'm bending the
2423 definition of basic block here.)
2424
2425 e.g., change:
2426
2427 bt L2 <-- end of block 1 (delete)
2428 mov r7,r8
2429 addu r7,1
2430 br L3 <-- end of block 2
2431
2432 L2: ... <-- start of block 3 (NUSES==1)
2433 L3: ...
2434
2435 to:
2436
2437 movf r7,r8
2438 incf r7
2439 bf L3
2440
2441 L3: ...
2442
2443 we can delete the L2 label if NUSES==1 and re-apply the optimization
2444 starting at the last instruction of block 2. This may allow an entire
fa4fdf98 2445 if-then-else statement to be conditionalized. BRC */
91a55c11 2446static rtx_insn *
2447conditionalize_block (rtx_insn *first)
1bba6ac0 2448{
91a55c11 2449 rtx_insn *insn;
1bba6ac0 2450 rtx br_pat;
91a55c11 2451 rtx_insn *end_blk_1_br = 0;
2452 rtx_insn *end_blk_2_insn = 0;
2453 rtx_insn *start_blk_3_lab = 0;
1bba6ac0 2454 int cond;
2455 int br_lab_num;
2456 int blk_size = 0;
2457
2458
2459 /* Check that the first insn is a candidate conditional jump. This is
2460 the one that we'll eliminate. If not, advance to the next insn to
2461 try. */
aa90bb35 2462 if (! JUMP_P (first)
2463 || GET_CODE (PATTERN (first)) != SET
2464 || GET_CODE (XEXP (PATTERN (first), 1)) != IF_THEN_ELSE)
1bba6ac0 2465 return NEXT_INSN (first);
2466
2467 /* Extract some information we need. */
2468 end_blk_1_br = first;
2469 br_pat = PATTERN (end_blk_1_br);
2470
2471 /* Complement the condition since we use the reverse cond. for the insns. */
2472 cond = (GET_CODE (XEXP (XEXP (br_pat, 1), 0)) == EQ);
2473
2474 /* Determine what kind of branch we have. */
2475 if (GET_CODE (XEXP (XEXP (br_pat, 1), 1)) == LABEL_REF)
2476 {
2477 /* A normal branch, so extract label out of first arm. */
2478 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 1), 0));
2479 }
2480 else
2481 {
2482 /* An inverse branch, so extract the label out of the 2nd arm
2483 and complement the condition. */
2484 cond = (cond == 0);
2485 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 2), 0));
2486 }
2487
2488 /* Scan forward for the start of block 2: it must start with a
2489 label and that label must be the same as the branch target
2490 label from block 1. We don't care about whether block 2 actually
2491 ends with a branch or a label (an uncond. branch is
2492 conditionalizable). */
2493 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
2494 {
2495 enum rtx_code code;
2496
2497 code = GET_CODE (insn);
2498
3fcfff30 2499 /* Look for the label at the start of block 3. */
1bba6ac0 2500 if (code == CODE_LABEL && CODE_LABEL_NUMBER (insn) == br_lab_num)
2501 break;
2502
2503 /* Skip barriers, notes, and conditionalizable insns. If the
2504 insn is not conditionalizable or makes this optimization fail,
2505 just return the next insn so we can start over from that point. */
2506 if (code != BARRIER && code != NOTE && !is_cond_candidate (insn))
2507 return NEXT_INSN (insn);
2508
a361b456 2509 /* Remember the last real insn before the label (i.e. end of block 2). */
1bba6ac0 2510 if (code == JUMP_INSN || code == INSN)
2511 {
2512 blk_size ++;
2513 end_blk_2_insn = insn;
2514 }
2515 }
2516
2517 if (!insn)
2518 return insn;
2519
2520 /* It is possible for this optimization to slow performance if the blocks
2521 are long. This really depends upon whether the branch is likely taken
2522 or not. If the branch is taken, we slow performance in many cases. But,
2523 if the branch is not taken, we always help performance (for a single
2524 block, but for a double block (i.e. when the optimization is re-applied)
2525 this is not true since the 'right thing' depends on the overall length of
2526 the collapsed block). As a compromise, don't apply this optimization on
2527 blocks larger than size 2 (unlikely for the mcore) when speed is important.
2528 the best threshold depends on the latencies of the instructions (i.e.,
2529 the branch penalty). */
2530 if (optimize > 1 && blk_size > 2)
2531 return insn;
2532
2533 /* At this point, we've found the start of block 3 and we know that
2534 it is the destination of the branch from block 1. Also, all
2535 instructions in the block 2 are conditionalizable. So, apply the
2536 conditionalization and delete the branch. */
2537 start_blk_3_lab = insn;
2538
2539 for (insn = NEXT_INSN (end_blk_1_br); insn != start_blk_3_lab;
2540 insn = NEXT_INSN (insn))
2541 {
91a55c11 2542 rtx_insn *newinsn;
1bba6ac0 2543
dd1286fb 2544 if (insn->deleted ())
1bba6ac0 2545 continue;
2546
3fcfff30 2547 /* Try to form a conditional variant of the instruction and emit it. */
1bba6ac0 2548 if ((newinsn = emit_new_cond_insn (insn, cond)))
2549 {
2550 if (end_blk_2_insn == insn)
2551 end_blk_2_insn = newinsn;
2552
2553 insn = newinsn;
2554 }
2555 }
2556
2557 /* Note whether we will delete the label starting blk 3 when the jump
2558 gets deleted. If so, we want to re-apply this optimization at the
2559 last real instruction right before the label. */
2560 if (LABEL_NUSES (start_blk_3_lab) == 1)
2561 {
2562 start_blk_3_lab = 0;
2563 }
2564
2565 /* ??? we probably should redistribute the death notes for this insn, esp.
2566 the death of cc, but it doesn't really matter this late in the game.
2567 The peepholes all use is_dead() which will find the correct death
2568 regardless of whether there is a note. */
2569 delete_insn (end_blk_1_br);
2570
2571 if (! start_blk_3_lab)
2572 return end_blk_2_insn;
2573
fa4fdf98 2574 /* Return the insn right after the label at the start of block 3. */
1bba6ac0 2575 return NEXT_INSN (start_blk_3_lab);
2576}
2577
2578/* Apply the conditionalization of blocks optimization. This is the
2579 outer loop that traverses through the insns scanning for a branch
2580 that signifies an opportunity to apply the optimization. Note that
2581 this optimization is applied late. If we could apply it earlier,
2582 say before cse 2, it may expose more optimization opportunities.
2583 but, the pay back probably isn't really worth the effort (we'd have
2584 to update all reg/flow/notes/links/etc to make it work - and stick it
fa4fdf98 2585 in before cse 2). */
2586
1bba6ac0 2587static void
c167bbfc 2588conditionalize_optimization (void)
1bba6ac0 2589{
91a55c11 2590 rtx_insn *insn;
1bba6ac0 2591
2efea8c0 2592 for (insn = get_insns (); insn; insn = conditionalize_block (insn))
1bba6ac0 2593 continue;
2594}
2595
2efea8c0 2596/* This is to handle loads from the constant pool. */
fa4fdf98 2597
2efea8c0 2598static void
c167bbfc 2599mcore_reorg (void)
1bba6ac0 2600{
2601 /* Reset this variable. */
2602 current_function_anonymous_args = 0;
2603
1bba6ac0 2604 if (optimize == 0)
2605 return;
2606
2607 /* Conditionalize blocks where we can. */
2efea8c0 2608 conditionalize_optimization ();
1bba6ac0 2609
2610 /* Literal pool generation is now pushed off until the assembler. */
2611}
2612
2613\f
d13344c8 2614/* Return true if X is something that can be moved directly into r15. */
1bba6ac0 2615
d13344c8 2616bool
c167bbfc 2617mcore_r15_operand_p (rtx x)
d13344c8 2618{
2619 switch (GET_CODE (x))
2620 {
2621 case CONST_INT:
2622 return mcore_const_ok_for_inline (INTVAL (x));
1bba6ac0 2623
d13344c8 2624 case REG:
2625 case SUBREG:
2626 case MEM:
2627 return 1;
2628
2629 default:
2630 return 0;
2631 }
2632}
2633
8deb3959 2634/* Implement SECONDARY_RELOAD_CLASS. If RCLASS contains r15, and we can't
d13344c8 2635 directly move X into it, use r1-r14 as a temporary. */
c167bbfc 2636
d13344c8 2637enum reg_class
8deb3959 2638mcore_secondary_reload_class (enum reg_class rclass,
3754d046 2639 machine_mode mode ATTRIBUTE_UNUSED, rtx x)
d13344c8 2640{
8deb3959 2641 if (TEST_HARD_REG_BIT (reg_class_contents[rclass], 15)
d13344c8 2642 && !mcore_r15_operand_p (x))
2643 return LRW_REGS;
2644 return NO_REGS;
2645}
1bba6ac0 2646
d13344c8 2647/* Return the reg_class to use when reloading the rtx X into the class
8deb3959 2648 RCLASS. If X is too complex to move directly into r15, prefer to
d13344c8 2649 use LRW_REGS instead. */
c167bbfc 2650
1bba6ac0 2651enum reg_class
8deb3959 2652mcore_reload_class (rtx x, enum reg_class rclass)
1bba6ac0 2653{
8deb3959 2654 if (reg_class_subset_p (LRW_REGS, rclass) && !mcore_r15_operand_p (x))
d13344c8 2655 return LRW_REGS;
1bba6ac0 2656
8deb3959 2657 return rclass;
1bba6ac0 2658}
2659
2660/* Tell me if a pair of reg/subreg rtx's actually refer to the same
2661 register. Note that the current version doesn't worry about whether
2662 they are the same mode or note (e.g., a QImode in r2 matches an HImode
2663 in r2 matches an SImode in r2. Might think in the future about whether
2664 we want to be able to say something about modes. */
c167bbfc 2665
1bba6ac0 2666int
c167bbfc 2667mcore_is_same_reg (rtx x, rtx y)
1bba6ac0 2668{
3fcfff30 2669 /* Strip any and all of the subreg wrappers. */
1bba6ac0 2670 while (GET_CODE (x) == SUBREG)
2671 x = SUBREG_REG (x);
2672
2673 while (GET_CODE (y) == SUBREG)
2674 y = SUBREG_REG (y);
2675
2676 if (GET_CODE(x) == REG && GET_CODE(y) == REG && REGNO(x) == REGNO(y))
2677 return 1;
2678
2679 return 0;
2680}
2681
4c834714 2682static void
2683mcore_option_override (void)
1bba6ac0 2684{
1bba6ac0 2685 /* Only the m340 supports little endian code. */
2686 if (TARGET_LITTLE_END && ! TARGET_M340)
6a9bcd3e 2687 target_flags |= MASK_M340;
1bba6ac0 2688}
02e53c17 2689
1bba6ac0 2690\f
1bba6ac0 2691/* Compute the number of word sized registers needed to
2692 hold a function argument of mode MODE and type TYPE. */
c167bbfc 2693
1bba6ac0 2694int
3754d046 2695mcore_num_arg_regs (machine_mode mode, const_tree type)
1bba6ac0 2696{
2697 int size;
2698
0336f0f0 2699 if (targetm.calls.must_pass_in_stack (mode, type))
1bba6ac0 2700 return 0;
2701
2702 if (type && mode == BLKmode)
2703 size = int_size_in_bytes (type);
2704 else
2705 size = GET_MODE_SIZE (mode);
2706
2707 return ROUND_ADVANCE (size);
2708}
2709
2710static rtx
3754d046 2711handle_structs_in_regs (machine_mode mode, const_tree type, int reg)
1bba6ac0 2712{
2713 int size;
2714
fccee353 2715 /* The MCore ABI defines that a structure whose size is not a whole multiple
1bba6ac0 2716 of bytes is passed packed into registers (or spilled onto the stack if
2717 not enough registers are available) with the last few bytes of the
2718 structure being packed, left-justified, into the last register/stack slot.
2719 GCC handles this correctly if the last word is in a stack slot, but we
2720 have to generate a special, PARALLEL RTX if the last word is in an
2721 argument register. */
2722 if (type
2723 && TYPE_MODE (type) == BLKmode
2724 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
2725 && (size = int_size_in_bytes (type)) > UNITS_PER_WORD
2726 && (size % UNITS_PER_WORD != 0)
2727 && (reg + mcore_num_arg_regs (mode, type) <= (FIRST_PARM_REG + NPARM_REGS)))
2728 {
2729 rtx arg_regs [NPARM_REGS];
2730 int nregs;
2731 rtx result;
2732 rtvec rtvec;
2733
2734 for (nregs = 0; size > 0; size -= UNITS_PER_WORD)
2735 {
2736 arg_regs [nregs] =
2737 gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, reg ++),
2738 GEN_INT (nregs * UNITS_PER_WORD));
2739 nregs ++;
2740 }
2741
2742 /* We assume here that NPARM_REGS == 6. The assert checks this. */
1e944a0b 2743 gcc_assert (ARRAY_SIZE (arg_regs) == 6);
1bba6ac0 2744 rtvec = gen_rtvec (nregs, arg_regs[0], arg_regs[1], arg_regs[2],
2745 arg_regs[3], arg_regs[4], arg_regs[5]);
2746
2747 result = gen_rtx_PARALLEL (mode, rtvec);
2748 return result;
2749 }
2750
2751 return gen_rtx_REG (mode, reg);
2752}
2753
2754rtx
3b2411a8 2755mcore_function_value (const_tree valtype, const_tree func)
1bba6ac0 2756{
3754d046 2757 machine_mode mode;
1bba6ac0 2758 int unsigned_p;
2759
2760 mode = TYPE_MODE (valtype);
2761
3b2411a8 2762 /* Since we promote return types, we must promote the mode here too. */
74e653fe 2763 mode = promote_function_mode (valtype, mode, &unsigned_p, func, 1);
1bba6ac0 2764
2765 return handle_structs_in_regs (mode, valtype, FIRST_RET_REG);
2766}
2767
2768/* Define where to put the arguments to a function.
2769 Value is zero to push the argument on the stack,
2770 or a hard register in which to store the argument.
2771
2772 MODE is the argument's machine mode.
2773 TYPE is the data type of the argument (as a tree).
2774 This is null for libcalls where that information may
2775 not be available.
2776 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2777 the preceding args and about the function being called.
2778 NAMED is nonzero if this argument is a named parameter
2779 (otherwise it is an extra parameter matching an ellipsis).
2780
2781 On MCore the first args are normally in registers
2782 and the rest are pushed. Any arg that starts within the first
2783 NPARM_REGS words is at least partially passed in a register unless
2784 its data type forbids. */
c167bbfc 2785
da6d22fa 2786static rtx
3754d046 2787mcore_function_arg (cumulative_args_t cum, machine_mode mode,
da6d22fa 2788 const_tree type, bool named)
1bba6ac0 2789{
2790 int arg_reg;
2791
51b742cc 2792 if (! named || mode == VOIDmode)
1bba6ac0 2793 return 0;
2794
0336f0f0 2795 if (targetm.calls.must_pass_in_stack (mode, type))
1bba6ac0 2796 return 0;
2797
39cba157 2798 arg_reg = ROUND_REG (*get_cumulative_args (cum), mode);
1bba6ac0 2799
2800 if (arg_reg < NPARM_REGS)
2801 return handle_structs_in_regs (mode, type, FIRST_PARM_REG + arg_reg);
2802
2803 return 0;
2804}
2805
da6d22fa 2806static void
3754d046 2807mcore_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
da6d22fa 2808 const_tree type, bool named ATTRIBUTE_UNUSED)
2809{
39cba157 2810 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
2811
da6d22fa 2812 *cum = (ROUND_REG (*cum, mode)
2813 + (int)named * mcore_num_arg_regs (mode, type));
2814}
2815
bd99ba64 2816static unsigned int
3754d046 2817mcore_function_arg_boundary (machine_mode mode,
bd99ba64 2818 const_tree type ATTRIBUTE_UNUSED)
2819{
2820 /* Doubles must be aligned to an 8 byte boundary. */
2821 return (mode != BLKmode && GET_MODE_SIZE (mode) == 8
2822 ? BIGGEST_ALIGNMENT
2823 : PARM_BOUNDARY);
2824}
2825
f054eb3c 2826/* Returns the number of bytes of argument registers required to hold *part*
2827 of a parameter of machine mode MODE and type TYPE (which may be NULL if
dfd1079d 2828 the type is not known). If the argument fits entirely in the argument
1bba6ac0 2829 registers, or entirely on the stack, then 0 is returned. CUM is the
2830 number of argument registers already used by earlier parameters to
2831 the function. */
c167bbfc 2832
f054eb3c 2833static int
3754d046 2834mcore_arg_partial_bytes (cumulative_args_t cum, machine_mode mode,
f054eb3c 2835 tree type, bool named)
1bba6ac0 2836{
39cba157 2837 int reg = ROUND_REG (*get_cumulative_args (cum), mode);
1bba6ac0 2838
2839 if (named == 0)
2840 return 0;
2841
0336f0f0 2842 if (targetm.calls.must_pass_in_stack (mode, type))
1bba6ac0 2843 return 0;
2844
2845 /* REG is not the *hardware* register number of the register that holds
2846 the argument, it is the *argument* register number. So for example,
2847 the first argument to a function goes in argument register 0, which
2848 translates (for the MCore) into hardware register 2. The second
2849 argument goes into argument register 1, which translates into hardware
2850 register 3, and so on. NPARM_REGS is the number of argument registers
2851 supported by the target, not the maximum hardware register number of
2852 the target. */
2853 if (reg >= NPARM_REGS)
2854 return 0;
2855
2856 /* If the argument fits entirely in registers, return 0. */
2857 if (reg + mcore_num_arg_regs (mode, type) <= NPARM_REGS)
2858 return 0;
2859
2860 /* The argument overflows the number of available argument registers.
2861 Compute how many argument registers have not yet been assigned to
2862 hold an argument. */
2863 reg = NPARM_REGS - reg;
2864
2865 /* Return partially in registers and partially on the stack. */
f054eb3c 2866 return reg * UNITS_PER_WORD;
1bba6ac0 2867}
2868\f
e911aedf 2869/* Return nonzero if SYMBOL is marked as being dllexport'd. */
c167bbfc 2870
1bba6ac0 2871int
c167bbfc 2872mcore_dllexport_name_p (const char * symbol)
1bba6ac0 2873{
2874 return symbol[0] == '@' && symbol[1] == 'e' && symbol[2] == '.';
2875}
2876
e911aedf 2877/* Return nonzero if SYMBOL is marked as being dllimport'd. */
c167bbfc 2878
1bba6ac0 2879int
c167bbfc 2880mcore_dllimport_name_p (const char * symbol)
1bba6ac0 2881{
2882 return symbol[0] == '@' && symbol[1] == 'i' && symbol[2] == '.';
2883}
2884
2885/* Mark a DECL as being dllexport'd. */
c167bbfc 2886
1bba6ac0 2887static void
c167bbfc 2888mcore_mark_dllexport (tree decl)
1bba6ac0 2889{
04238935 2890 const char * oldname;
1bba6ac0 2891 char * newname;
2892 rtx rtlname;
2893 tree idp;
2894
2895 rtlname = XEXP (DECL_RTL (decl), 0);
2896
044e64da 2897 if (GET_CODE (rtlname) == MEM)
2898 rtlname = XEXP (rtlname, 0);
2899 gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2900 oldname = XSTR (rtlname, 0);
1bba6ac0 2901
2902 if (mcore_dllexport_name_p (oldname))
2903 return; /* Already done. */
2904
225ab426 2905 newname = XALLOCAVEC (char, strlen (oldname) + 4);
1bba6ac0 2906 sprintf (newname, "@e.%s", oldname);
2907
2908 /* We pass newname through get_identifier to ensure it has a unique
2909 address. RTL processing can sometimes peek inside the symbol ref
2910 and compare the string's addresses to see if two symbols are
2911 identical. */
2912 /* ??? At least I think that's why we do this. */
2913 idp = get_identifier (newname);
2914
2915 XEXP (DECL_RTL (decl), 0) =
1a83b3ff 2916 gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
1bba6ac0 2917}
2918
2919/* Mark a DECL as being dllimport'd. */
c167bbfc 2920
1bba6ac0 2921static void
c167bbfc 2922mcore_mark_dllimport (tree decl)
1bba6ac0 2923{
04238935 2924 const char * oldname;
1bba6ac0 2925 char * newname;
2926 tree idp;
2927 rtx rtlname;
2928 rtx newrtl;
2929
2930 rtlname = XEXP (DECL_RTL (decl), 0);
2931
044e64da 2932 if (GET_CODE (rtlname) == MEM)
2933 rtlname = XEXP (rtlname, 0);
2934 gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2935 oldname = XSTR (rtlname, 0);
1bba6ac0 2936
044e64da 2937 gcc_assert (!mcore_dllexport_name_p (oldname));
2938 if (mcore_dllimport_name_p (oldname))
1bba6ac0 2939 return; /* Already done. */
2940
2941 /* ??? One can well ask why we're making these checks here,
2942 and that would be a good question. */
2943
2944 /* Imported variables can't be initialized. */
2945 if (TREE_CODE (decl) == VAR_DECL
2946 && !DECL_VIRTUAL_P (decl)
2947 && DECL_INITIAL (decl))
2948 {
3cf8b391 2949 error ("initialized variable %q+D is marked dllimport", decl);
1bba6ac0 2950 return;
2951 }
2952
2953 /* `extern' needn't be specified with dllimport.
2954 Specify `extern' now and hope for the best. Sigh. */
2955 if (TREE_CODE (decl) == VAR_DECL
2956 /* ??? Is this test for vtables needed? */
2957 && !DECL_VIRTUAL_P (decl))
2958 {
2959 DECL_EXTERNAL (decl) = 1;
2960 TREE_PUBLIC (decl) = 1;
2961 }
2962
225ab426 2963 newname = XALLOCAVEC (char, strlen (oldname) + 11);
1bba6ac0 2964 sprintf (newname, "@i.__imp_%s", oldname);
2965
2966 /* We pass newname through get_identifier to ensure it has a unique
2967 address. RTL processing can sometimes peek inside the symbol ref
2968 and compare the string's addresses to see if two symbols are
2969 identical. */
2970 /* ??? At least I think that's why we do this. */
2971 idp = get_identifier (newname);
2972
1a83b3ff 2973 newrtl = gen_rtx_MEM (Pmode,
2974 gen_rtx_SYMBOL_REF (Pmode,
1bba6ac0 2975 IDENTIFIER_POINTER (idp)));
2976 XEXP (DECL_RTL (decl), 0) = newrtl;
2977}
2978
2979static int
c167bbfc 2980mcore_dllexport_p (tree decl)
1bba6ac0 2981{
2982 if ( TREE_CODE (decl) != VAR_DECL
2983 && TREE_CODE (decl) != FUNCTION_DECL)
2984 return 0;
2985
e3c541f0 2986 return lookup_attribute ("dllexport", DECL_ATTRIBUTES (decl)) != 0;
1bba6ac0 2987}
2988
2989static int
c167bbfc 2990mcore_dllimport_p (tree decl)
1bba6ac0 2991{
2992 if ( TREE_CODE (decl) != VAR_DECL
2993 && TREE_CODE (decl) != FUNCTION_DECL)
2994 return 0;
2995
e3c541f0 2996 return lookup_attribute ("dllimport", DECL_ATTRIBUTES (decl)) != 0;
1bba6ac0 2997}
2998
7811991d 2999/* We must mark dll symbols specially. Definitions of dllexport'd objects
3fcfff30 3000 install some info in the .drective (PE) or .exports (ELF) sections. */
7811991d 3001
3002static void
c167bbfc 3003mcore_encode_section_info (tree decl, rtx rtl ATTRIBUTE_UNUSED, int first ATTRIBUTE_UNUSED)
1bba6ac0 3004{
1bba6ac0 3005 /* Mark the decl so we can tell from the rtl whether the object is
3006 dllexport'd or dllimport'd. */
3007 if (mcore_dllexport_p (decl))
3008 mcore_mark_dllexport (decl);
3009 else if (mcore_dllimport_p (decl))
3010 mcore_mark_dllimport (decl);
3011
3012 /* It might be that DECL has already been marked as dllimport, but
3013 a subsequent definition nullified that. The attribute is gone
3014 but DECL_RTL still has @i.__imp_foo. We need to remove that. */
3015 else if ((TREE_CODE (decl) == FUNCTION_DECL
3016 || TREE_CODE (decl) == VAR_DECL)
3017 && DECL_RTL (decl) != NULL_RTX
3018 && GET_CODE (DECL_RTL (decl)) == MEM
3019 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == MEM
3020 && GET_CODE (XEXP (XEXP (DECL_RTL (decl), 0), 0)) == SYMBOL_REF
3021 && mcore_dllimport_name_p (XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0)))
3022 {
9a356c3c 3023 const char * oldname = XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0);
1bba6ac0 3024 tree idp = get_identifier (oldname + 9);
1a83b3ff 3025 rtx newrtl = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
1bba6ac0 3026
3027 XEXP (DECL_RTL (decl), 0) = newrtl;
3028
3029 /* We previously set TREE_PUBLIC and DECL_EXTERNAL.
3030 ??? We leave these alone for now. */
3031 }
3032}
3033
7b4a38a6 3034/* Undo the effects of the above. */
3035
3036static const char *
c167bbfc 3037mcore_strip_name_encoding (const char * str)
7b4a38a6 3038{
3039 return str + (str[0] == '@' ? 3 : 0);
3040}
3041
1bba6ac0 3042/* MCore specific attribute support.
3043 dllexport - for exporting a function/variable that will live in a dll
3044 dllimport - for importing a function/variable from a dll
3045 naked - do not create a function prologue/epilogue. */
1bba6ac0 3046
e3c541f0 3047/* Handle a "naked" attribute; arguments as in
3048 struct attribute_spec.handler. */
c167bbfc 3049
e3c541f0 3050static tree
c167bbfc 3051mcore_handle_naked_attribute (tree * node, tree name, tree args ATTRIBUTE_UNUSED,
3052 int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
e3c541f0 3053{
08c6cbd2 3054 if (TREE_CODE (*node) != FUNCTION_DECL)
e3c541f0 3055 {
67a779df 3056 warning (OPT_Wattributes, "%qE attribute only applies to functions",
3057 name);
e3c541f0 3058 *no_add_attrs = true;
1bba6ac0 3059 }
3060
e3c541f0 3061 return NULL_TREE;
1bba6ac0 3062}
3063
52470889 3064/* ??? It looks like this is PE specific? Oh well, this is what the
3065 old code did as well. */
1bba6ac0 3066
52470889 3067static void
c167bbfc 3068mcore_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
1bba6ac0 3069{
3070 int len;
c8834c5f 3071 const char * name;
1bba6ac0 3072 char * string;
2eebe422 3073 const char * prefix;
1bba6ac0 3074
3075 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
3076
3077 /* Strip off any encoding in name. */
7b4a38a6 3078 name = (* targetm.strip_name_encoding) (name);
1bba6ac0 3079
3080 /* The object is put in, for example, section .text$foo.
3081 The linker will then ultimately place them in .text
3082 (everything from the $ on is stripped). */
3083 if (TREE_CODE (decl) == FUNCTION_DECL)
3084 prefix = ".text$";
8ef587dc 3085 /* For compatibility with EPOC, we ignore the fact that the
1bba6ac0 3086 section might have relocs against it. */
f4111c94 3087 else if (decl_readonly_section (decl, 0))
1bba6ac0 3088 prefix = ".rdata$";
3089 else
3090 prefix = ".data$";
3091
3092 len = strlen (name) + strlen (prefix);
225ab426 3093 string = XALLOCAVEC (char, len + 1);
1bba6ac0 3094
3095 sprintf (string, "%s%s", prefix, name);
3096
738a6bda 3097 set_decl_section_name (decl, string);
1bba6ac0 3098}
3099
3100int
c167bbfc 3101mcore_naked_function_p (void)
1bba6ac0 3102{
e3c541f0 3103 return lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl)) != NULL_TREE;
1bba6ac0 3104}
2cb4ac60 3105
08c6cbd2 3106static bool
3107mcore_warn_func_return (tree decl)
3108{
3109 /* Naked functions are implemented entirely in assembly, including the
3110 return sequence, so suppress warnings about this. */
3111 return lookup_attribute ("naked", DECL_ATTRIBUTES (decl)) == NULL_TREE;
3112}
3113
6e4758ce 3114#ifdef OBJECT_FORMAT_ELF
2cb4ac60 3115static void
537cd941 3116mcore_asm_named_section (const char *name,
3117 unsigned int flags ATTRIBUTE_UNUSED,
3118 tree decl ATTRIBUTE_UNUSED)
2cb4ac60 3119{
3120 fprintf (asm_out_file, "\t.section %s\n", name);
3121}
6e4758ce 3122#endif /* OBJECT_FORMAT_ELF */
5ee57edb 3123
e4ef650e 3124/* Worker function for TARGET_ASM_EXTERNAL_LIBCALL. */
3125
5ee57edb 3126static void
3127mcore_external_libcall (rtx fun)
3128{
3129 fprintf (asm_out_file, "\t.import\t");
3130 assemble_name (asm_out_file, XSTR (fun, 0));
3131 fprintf (asm_out_file, "\n");
3132}
3133
e4ef650e 3134/* Worker function for TARGET_RETURN_IN_MEMORY. */
3135
5ee57edb 3136static bool
fb80456a 3137mcore_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
5ee57edb 3138{
fb80456a 3139 const HOST_WIDE_INT size = int_size_in_bytes (type);
39cc9599 3140 return (size == -1 || size > 2 * UNITS_PER_WORD);
5ee57edb 3141}
74e653fe 3142
3143/* Worker function for TARGET_ASM_TRAMPOLINE_TEMPLATE.
3144 Output assembler code for a block containing the constant parts
3145 of a trampoline, leaving space for the variable parts.
3146
3147 On the MCore, the trampoline looks like:
3148 lrw r1, function
3149 lrw r13, area
3150 jmp r13
3151 or r0, r0
3152 .literals */
3153
3154static void
3155mcore_asm_trampoline_template (FILE *f)
3156{
3157 fprintf (f, "\t.short 0x7102\n");
3158 fprintf (f, "\t.short 0x7d02\n");
3159 fprintf (f, "\t.short 0x00cd\n");
3160 fprintf (f, "\t.short 0x1e00\n");
3161 fprintf (f, "\t.long 0\n");
3162 fprintf (f, "\t.long 0\n");
3163}
3164
3165/* Worker function for TARGET_TRAMPOLINE_INIT. */
3166
3167static void
3168mcore_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
3169{
3170 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
3171 rtx mem;
3172
3173 emit_block_move (m_tramp, assemble_trampoline_template (),
3174 GEN_INT (2*UNITS_PER_WORD), BLOCK_OP_NORMAL);
3175
3176 mem = adjust_address (m_tramp, SImode, 8);
3177 emit_move_insn (mem, chain_value);
3178 mem = adjust_address (m_tramp, SImode, 12);
3179 emit_move_insn (mem, fnaddr);
3180}
ca316360 3181
3182/* Implement TARGET_LEGITIMATE_CONSTANT_P
3183
3184 On the MCore, allow anything but a double. */
3185
3186static bool
3754d046 3187mcore_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
ca316360 3188{
3189 return GET_CODE (x) != CONST_DOUBLE;
3190}
30ff90f6 3191
3192/* Helper function for `mcore_legitimate_address_p'. */
3193
3194static bool
3195mcore_reg_ok_for_base_p (const_rtx reg, bool strict_p)
3196{
3197 if (strict_p)
3198 return REGNO_OK_FOR_BASE_P (REGNO (reg));
3199 else
3200 return (REGNO (reg) <= 16 || !HARD_REGISTER_P (reg));
3201}
3202
3203static bool
3204mcore_base_register_rtx_p (const_rtx x, bool strict_p)
3205{
3206 return REG_P(x) && mcore_reg_ok_for_base_p (x, strict_p);
3207}
3208
3209/* A legitimate index for a QI is 0..15, for HI is 0..30, for SI is 0..60,
3210 and for DI is 0..56 because we use two SI loads, etc. */
3211
3212static bool
3213mcore_legitimate_index_p (machine_mode mode, const_rtx op)
3214{
3215 if (CONST_INT_P (op))
3216 {
3217 if (GET_MODE_SIZE (mode) >= 4
3218 && (((unsigned HOST_WIDE_INT) INTVAL (op)) % 4) == 0
3219 && ((unsigned HOST_WIDE_INT) INTVAL (op))
3220 <= (unsigned HOST_WIDE_INT) 64 - GET_MODE_SIZE (mode))
3221 return true;
3222 if (GET_MODE_SIZE (mode) == 2
3223 && (((unsigned HOST_WIDE_INT) INTVAL (op)) % 2) == 0
3224 && ((unsigned HOST_WIDE_INT) INTVAL (op)) <= 30)
3225 return true;
3226 if (GET_MODE_SIZE (mode) == 1
3227 && ((unsigned HOST_WIDE_INT) INTVAL (op)) <= 15)
3228 return true;
3229 }
3230 return false;
3231}
3232
3233
3234/* Worker function for TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P.
3235
3236 Allow REG
3237 REG + disp */
3238
3239static bool
3240mcore_legitimate_address_p (machine_mode mode, rtx x, bool strict_p,
3241 addr_space_t as)
3242{
3243 gcc_assert (ADDR_SPACE_GENERIC_P (as));
3244
3245 if (mcore_base_register_rtx_p (x, strict_p))
3246 return true;
3247 else if (GET_CODE (x) == PLUS || GET_CODE (x) == LO_SUM)
3248 {
3249 rtx xop0 = XEXP (x, 0);
3250 rtx xop1 = XEXP (x, 1);
3251 if (mcore_base_register_rtx_p (xop0, strict_p)
3252 && mcore_legitimate_index_p (mode, xop1))
3253 return true;
3254 if (mcore_base_register_rtx_p (xop1, strict_p)
3255 && mcore_legitimate_index_p (mode, xop0))
3256 return true;
3257 }
3258
3259 return false;
3260}
3261