]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/mcore/mcore.c
trans-types.c (gfc_real16_is_float128): Fix spelling in a comment.
[thirdparty/gcc.git] / gcc / config / mcore / mcore.c
CommitLineData
8f90be4c 1/* Output routines for Motorola MCore processor
29d08eba 2 Copyright (C) 1993, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008,
677f3fa8 3 2009, 2010, 2011 Free Software Foundation, Inc.
8f90be4c 4
08903e08 5 This file is part of GCC.
8f90be4c 6
08903e08
SB
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
2f83c7d6 9 by the Free Software Foundation; either version 3, or (at your
08903e08 10 option) any later version.
8f90be4c 11
08903e08
SB
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
8f90be4c 16
08903e08 17 You should have received a copy of the GNU General Public License
2f83c7d6
NC
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
8f90be4c 20
bc27e96c 21#include "config.h"
4bd048ef 22#include "system.h"
4977bab6
ZW
23#include "coretypes.h"
24#include "tm.h"
4816b8e4
NC
25#include "rtl.h"
26#include "tree.h"
27#include "tm_p.h"
8f90be4c 28#include "mcore.h"
8f90be4c
NC
29#include "regs.h"
30#include "hard-reg-set.h"
8f90be4c
NC
31#include "insn-config.h"
32#include "conditions.h"
8f90be4c
NC
33#include "output.h"
34#include "insn-attr.h"
35#include "flags.h"
36#include "obstack.h"
37#include "expr.h"
38#include "reload.h"
39#include "recog.h"
40#include "function.h"
41#include "ggc.h"
718f9c0f 42#include "diagnostic-core.h"
672a6f42
NB
43#include "target.h"
44#include "target-def.h"
899cc0f4 45#include "df.h"
8f90be4c 46
8f90be4c
NC
47/* For dumping information about frame sizes. */
48char * mcore_current_function_name = 0;
49long mcore_current_compilation_timestamp = 0;
50
51/* Global variables for machine-dependent things. */
52
8f90be4c
NC
53/* Provides the class number of the smallest class containing
54 reg number. */
5a82ecd9 55const enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER] =
8f90be4c
NC
56{
57 GENERAL_REGS, ONLYR1_REGS, LRW_REGS, LRW_REGS,
58 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
59 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
60 LRW_REGS, LRW_REGS, LRW_REGS, GENERAL_REGS,
61 GENERAL_REGS, C_REGS, NO_REGS, NO_REGS,
62};
63
f27cd94d
NC
64struct mcore_frame
65{
08903e08
SB
66 int arg_size; /* Stdarg spills (bytes). */
67 int reg_size; /* Non-volatile reg saves (bytes). */
68 int reg_mask; /* Non-volatile reg saves. */
69 int local_size; /* Locals. */
70 int outbound_size; /* Arg overflow on calls out. */
f27cd94d
NC
71 int pad_outbound;
72 int pad_local;
73 int pad_reg;
74 /* Describe the steps we'll use to grow it. */
08903e08 75#define MAX_STACK_GROWS 4 /* Gives us some spare space. */
f27cd94d
NC
76 int growth[MAX_STACK_GROWS];
77 int arg_offset;
78 int reg_offset;
79 int reg_growth;
80 int local_growth;
81};
82
83typedef enum
84{
85 COND_NO,
86 COND_MOV_INSN,
87 COND_CLR_INSN,
88 COND_INC_INSN,
89 COND_DEC_INSN,
90 COND_BRANCH_INSN
91}
92cond_type;
93
08903e08
SB
94static void output_stack_adjust (int, int);
95static int calc_live_regs (int *);
6e3a343d 96static int try_constant_tricks (long, HOST_WIDE_INT *, HOST_WIDE_INT *);
08903e08 97static const char * output_inline_const (enum machine_mode, rtx *);
08903e08 98static void layout_mcore_frame (struct mcore_frame *);
d5cc9181 99static void mcore_setup_incoming_varargs (cumulative_args_t, enum machine_mode, tree, int *, int);
08903e08
SB
100static cond_type is_cond_candidate (rtx);
101static rtx emit_new_cond_insn (rtx, int);
102static rtx conditionalize_block (rtx);
103static void conditionalize_optimization (void);
104static void mcore_reorg (void);
586de218 105static rtx handle_structs_in_regs (enum machine_mode, const_tree, int);
08903e08
SB
106static void mcore_mark_dllexport (tree);
107static void mcore_mark_dllimport (tree);
108static int mcore_dllexport_p (tree);
109static int mcore_dllimport_p (tree);
08903e08 110static tree mcore_handle_naked_attribute (tree *, tree, tree, int, bool *);
ede75ee8 111#ifdef OBJECT_FORMAT_ELF
08903e08 112static void mcore_asm_named_section (const char *,
c18a5b6c 113 unsigned int, tree);
ede75ee8 114#endif
349f851e
NF
115static void mcore_print_operand (FILE *, rtx, int);
116static void mcore_print_operand_address (FILE *, rtx);
117static bool mcore_print_operand_punct_valid_p (unsigned char code);
08903e08
SB
118static void mcore_unique_section (tree, int);
119static void mcore_encode_section_info (tree, rtx, int);
120static const char *mcore_strip_name_encoding (const char *);
121static int mcore_const_costs (rtx, RTX_CODE);
122static int mcore_and_cost (rtx);
123static int mcore_ior_cost (rtx);
68f932c4
RS
124static bool mcore_rtx_costs (rtx, int, int, int,
125 int *, bool);
09a2b93a 126static void mcore_external_libcall (rtx);
586de218 127static bool mcore_return_in_memory (const_tree, const_tree);
d5cc9181 128static int mcore_arg_partial_bytes (cumulative_args_t,
78a52f11
RH
129 enum machine_mode,
130 tree, bool);
d5cc9181 131static rtx mcore_function_arg (cumulative_args_t,
4665ac17
NF
132 enum machine_mode,
133 const_tree, bool);
d5cc9181 134static void mcore_function_arg_advance (cumulative_args_t,
4665ac17
NF
135 enum machine_mode,
136 const_tree, bool);
c2ed6cf8
NF
137static unsigned int mcore_function_arg_boundary (enum machine_mode,
138 const_tree);
71e0af3c
RH
139static void mcore_asm_trampoline_template (FILE *);
140static void mcore_trampoline_init (rtx, tree, rtx);
c5387660 141static void mcore_option_override (void);
1a627b35 142static bool mcore_legitimate_constant_p (enum machine_mode, rtx);
5a82ecd9
ILT
143\f
144/* MCore specific attributes. */
145
146static const struct attribute_spec mcore_attribute_table[] =
147{
62d784f7
KT
148 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
149 affects_type_identity } */
150 { "dllexport", 0, 0, true, false, false, NULL, false },
151 { "dllimport", 0, 0, true, false, false, NULL, false },
152 { "naked", 0, 0, true, false, false, mcore_handle_naked_attribute,
153 false },
154 { NULL, 0, 0, false, false, false, NULL, false }
5a82ecd9 155};
672a6f42
NB
156\f
157/* Initialize the GCC target structure. */
09a2b93a
KH
158#undef TARGET_ASM_EXTERNAL_LIBCALL
159#define TARGET_ASM_EXTERNAL_LIBCALL mcore_external_libcall
160
b2ca3702 161#if TARGET_DLLIMPORT_DECL_ATTRIBUTES
08903e08
SB
162#undef TARGET_MERGE_DECL_ATTRIBUTES
163#define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
672a6f42
NB
164#endif
165
301d03af 166#ifdef OBJECT_FORMAT_ELF
08903e08 167#undef TARGET_ASM_UNALIGNED_HI_OP
301d03af 168#define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
08903e08 169#undef TARGET_ASM_UNALIGNED_SI_OP
301d03af
RS
170#define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
171#endif
172
349f851e
NF
173#undef TARGET_PRINT_OPERAND
174#define TARGET_PRINT_OPERAND mcore_print_operand
175#undef TARGET_PRINT_OPERAND_ADDRESS
176#define TARGET_PRINT_OPERAND_ADDRESS mcore_print_operand_address
177#undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
178#define TARGET_PRINT_OPERAND_PUNCT_VALID_P mcore_print_operand_punct_valid_p
179
08903e08
SB
180#undef TARGET_ATTRIBUTE_TABLE
181#define TARGET_ATTRIBUTE_TABLE mcore_attribute_table
182#undef TARGET_ASM_UNIQUE_SECTION
183#define TARGET_ASM_UNIQUE_SECTION mcore_unique_section
ab5c8549
JJ
184#undef TARGET_ASM_FUNCTION_RODATA_SECTION
185#define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
08903e08
SB
186#undef TARGET_ENCODE_SECTION_INFO
187#define TARGET_ENCODE_SECTION_INFO mcore_encode_section_info
188#undef TARGET_STRIP_NAME_ENCODING
189#define TARGET_STRIP_NAME_ENCODING mcore_strip_name_encoding
190#undef TARGET_RTX_COSTS
191#define TARGET_RTX_COSTS mcore_rtx_costs
192#undef TARGET_ADDRESS_COST
f40751dd 193#define TARGET_ADDRESS_COST hook_int_rtx_bool_0
08903e08
SB
194#undef TARGET_MACHINE_DEPENDENT_REORG
195#define TARGET_MACHINE_DEPENDENT_REORG mcore_reorg
18dbd950 196
cde0f3fd
PB
197#undef TARGET_PROMOTE_FUNCTION_MODE
198#define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
09a2b93a 199#undef TARGET_PROMOTE_PROTOTYPES
586de218 200#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
09a2b93a 201
09a2b93a
KH
202#undef TARGET_RETURN_IN_MEMORY
203#define TARGET_RETURN_IN_MEMORY mcore_return_in_memory
fe984136
RH
204#undef TARGET_MUST_PASS_IN_STACK
205#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
8cd5a4e0
RH
206#undef TARGET_PASS_BY_REFERENCE
207#define TARGET_PASS_BY_REFERENCE hook_pass_by_reference_must_pass_in_stack
78a52f11
RH
208#undef TARGET_ARG_PARTIAL_BYTES
209#define TARGET_ARG_PARTIAL_BYTES mcore_arg_partial_bytes
4665ac17
NF
210#undef TARGET_FUNCTION_ARG
211#define TARGET_FUNCTION_ARG mcore_function_arg
212#undef TARGET_FUNCTION_ARG_ADVANCE
213#define TARGET_FUNCTION_ARG_ADVANCE mcore_function_arg_advance
c2ed6cf8
NF
214#undef TARGET_FUNCTION_ARG_BOUNDARY
215#define TARGET_FUNCTION_ARG_BOUNDARY mcore_function_arg_boundary
09a2b93a
KH
216
217#undef TARGET_SETUP_INCOMING_VARARGS
218#define TARGET_SETUP_INCOMING_VARARGS mcore_setup_incoming_varargs
219
71e0af3c
RH
220#undef TARGET_ASM_TRAMPOLINE_TEMPLATE
221#define TARGET_ASM_TRAMPOLINE_TEMPLATE mcore_asm_trampoline_template
222#undef TARGET_TRAMPOLINE_INIT
223#define TARGET_TRAMPOLINE_INIT mcore_trampoline_init
224
c5387660
JM
225#undef TARGET_OPTION_OVERRIDE
226#define TARGET_OPTION_OVERRIDE mcore_option_override
fd02e833 227
1a627b35
RS
228#undef TARGET_LEGITIMATE_CONSTANT_P
229#define TARGET_LEGITIMATE_CONSTANT_P mcore_legitimate_constant_p
230
f6897b10 231struct gcc_target targetm = TARGET_INITIALIZER;
f27cd94d 232\f
8f90be4c
NC
233/* Adjust the stack and return the number of bytes taken to do it. */
234static void
08903e08 235output_stack_adjust (int direction, int size)
8f90be4c 236{
4816b8e4 237 /* If extending stack a lot, we do it incrementally. */
8f90be4c
NC
238 if (direction < 0 && size > mcore_stack_increment && mcore_stack_increment > 0)
239 {
f1c25d3b 240 rtx tmp = gen_rtx_REG (SImode, 1);
8f90be4c 241 rtx memref;
08903e08 242
8f90be4c
NC
243 emit_insn (gen_movsi (tmp, GEN_INT (mcore_stack_increment)));
244 do
245 {
246 emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
f1c25d3b 247 memref = gen_rtx_MEM (SImode, stack_pointer_rtx);
8f90be4c
NC
248 MEM_VOLATILE_P (memref) = 1;
249 emit_insn (gen_movsi (memref, stack_pointer_rtx));
250 size -= mcore_stack_increment;
251 }
252 while (size > mcore_stack_increment);
253
4816b8e4
NC
254 /* SIZE is now the residual for the last adjustment,
255 which doesn't require a probe. */
8f90be4c
NC
256 }
257
258 if (size)
259 {
260 rtx insn;
261 rtx val = GEN_INT (size);
262
263 if (size > 32)
264 {
f1c25d3b 265 rtx nval = gen_rtx_REG (SImode, 1);
8f90be4c
NC
266 emit_insn (gen_movsi (nval, val));
267 val = nval;
268 }
269
270 if (direction > 0)
271 insn = gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
272 else
273 insn = gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
274
275 emit_insn (insn);
276 }
277}
278
4816b8e4
NC
279/* Work out the registers which need to be saved,
280 both as a mask and a count. */
281
8f90be4c 282static int
08903e08 283calc_live_regs (int * count)
8f90be4c
NC
284{
285 int reg;
286 int live_regs_mask = 0;
287
288 * count = 0;
289
290 for (reg = 0; reg < FIRST_PSEUDO_REGISTER; reg++)
291 {
6fb5fa3c 292 if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
8f90be4c
NC
293 {
294 (*count)++;
295 live_regs_mask |= (1 << reg);
296 }
297 }
298
299 return live_regs_mask;
300}
301
302/* Print the operand address in x to the stream. */
4816b8e4 303
349f851e 304static void
08903e08 305mcore_print_operand_address (FILE * stream, rtx x)
8f90be4c
NC
306{
307 switch (GET_CODE (x))
308 {
309 case REG:
310 fprintf (stream, "(%s)", reg_names[REGNO (x)]);
311 break;
312
313 case PLUS:
314 {
315 rtx base = XEXP (x, 0);
316 rtx index = XEXP (x, 1);
317
318 if (GET_CODE (base) != REG)
319 {
320 /* Ensure that BASE is a register (one of them must be). */
321 rtx temp = base;
322 base = index;
323 index = temp;
324 }
325
326 switch (GET_CODE (index))
327 {
328 case CONST_INT:
fd7b8952
KG
329 fprintf (stream, "(%s," HOST_WIDE_INT_PRINT_DEC ")",
330 reg_names[REGNO(base)], INTVAL (index));
8f90be4c
NC
331 break;
332
333 default:
6e1f65b5 334 gcc_unreachable ();
8f90be4c
NC
335 }
336 }
337
338 break;
339
340 default:
341 output_addr_const (stream, x);
342 break;
343 }
344}
345
349f851e
NF
346static bool
347mcore_print_operand_punct_valid_p (unsigned char code)
348{
349 return (code == '.' || code == '#' || code == '*' || code == '^'
350 || code == '!');
351}
352
8f90be4c
NC
353/* Print operand x (an rtx) in assembler syntax to file stream
354 according to modifier code.
355
112cdef5 356 'R' print the next register or memory location along, i.e. the lsw in
8f90be4c
NC
357 a double word value
358 'O' print a constant without the #
359 'M' print a constant as its negative
360 'P' print log2 of a power of two
361 'Q' print log2 of an inverse of a power of two
362 'U' print register for ldm/stm instruction
4816b8e4
NC
363 'X' print byte number for xtrbN instruction. */
364
349f851e 365static void
08903e08 366mcore_print_operand (FILE * stream, rtx x, int code)
8f90be4c
NC
367{
368 switch (code)
369 {
370 case 'N':
371 if (INTVAL(x) == -1)
372 fprintf (asm_out_file, "32");
373 else
374 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) + 1));
375 break;
376 case 'P':
6e3a343d 377 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) & 0xffffffff));
8f90be4c
NC
378 break;
379 case 'Q':
380 fprintf (asm_out_file, "%d", exact_log2 (~INTVAL (x)));
381 break;
382 case 'O':
fd7b8952 383 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
8f90be4c
NC
384 break;
385 case 'M':
fd7b8952 386 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, - INTVAL (x));
8f90be4c
NC
387 break;
388 case 'R':
389 /* Next location along in memory or register. */
390 switch (GET_CODE (x))
391 {
392 case REG:
393 fputs (reg_names[REGNO (x) + 1], (stream));
394 break;
395 case MEM:
b72f00af
RK
396 mcore_print_operand_address
397 (stream, XEXP (adjust_address (x, SImode, 4), 0));
8f90be4c
NC
398 break;
399 default:
6e1f65b5 400 gcc_unreachable ();
8f90be4c
NC
401 }
402 break;
403 case 'U':
404 fprintf (asm_out_file, "%s-%s", reg_names[REGNO (x)],
405 reg_names[REGNO (x) + 3]);
406 break;
407 case 'x':
fd7b8952 408 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
8f90be4c
NC
409 break;
410 case 'X':
fd7b8952 411 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, 3 - INTVAL (x) / 8);
8f90be4c
NC
412 break;
413
414 default:
415 switch (GET_CODE (x))
416 {
417 case REG:
418 fputs (reg_names[REGNO (x)], (stream));
419 break;
420 case MEM:
421 output_address (XEXP (x, 0));
422 break;
423 default:
424 output_addr_const (stream, x);
425 break;
426 }
427 break;
428 }
429}
430
431/* What does a constant cost ? */
4816b8e4 432
3c50106f 433static int
08903e08 434mcore_const_costs (rtx exp, enum rtx_code code)
8f90be4c 435{
6e3a343d 436 HOST_WIDE_INT val = INTVAL (exp);
8f90be4c
NC
437
438 /* Easy constants. */
439 if ( CONST_OK_FOR_I (val)
440 || CONST_OK_FOR_M (val)
441 || CONST_OK_FOR_N (val)
442 || (code == PLUS && CONST_OK_FOR_L (val)))
443 return 1;
444 else if (code == AND
445 && ( CONST_OK_FOR_M (~val)
446 || CONST_OK_FOR_N (~val)))
447 return 2;
448 else if (code == PLUS
449 && ( CONST_OK_FOR_I (-val)
450 || CONST_OK_FOR_M (-val)
451 || CONST_OK_FOR_N (-val)))
452 return 2;
453
454 return 5;
455}
456
457/* What does an and instruction cost - we do this b/c immediates may
458 have been relaxed. We want to ensure that cse will cse relaxed immeds
4816b8e4
NC
459 out. Otherwise we'll get bad code (multiple reloads of the same const). */
460
3c50106f 461static int
08903e08 462mcore_and_cost (rtx x)
8f90be4c 463{
6e3a343d 464 HOST_WIDE_INT val;
8f90be4c
NC
465
466 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
467 return 2;
468
469 val = INTVAL (XEXP (x, 1));
470
4816b8e4 471 /* Do it directly. */
8f90be4c
NC
472 if (CONST_OK_FOR_K (val) || CONST_OK_FOR_M (~val))
473 return 2;
474 /* Takes one instruction to load. */
475 else if (const_ok_for_mcore (val))
476 return 3;
477 /* Takes two instructions to load. */
478 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
479 return 4;
480
4816b8e4 481 /* Takes a lrw to load. */
8f90be4c
NC
482 return 5;
483}
484
4816b8e4
NC
485/* What does an or cost - see and_cost(). */
486
3c50106f 487static int
08903e08 488mcore_ior_cost (rtx x)
8f90be4c 489{
6e3a343d 490 HOST_WIDE_INT val;
8f90be4c
NC
491
492 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
493 return 2;
494
495 val = INTVAL (XEXP (x, 1));
496
4816b8e4 497 /* Do it directly with bclri. */
8f90be4c
NC
498 if (CONST_OK_FOR_M (val))
499 return 2;
4816b8e4 500 /* Takes one instruction to load. */
8f90be4c
NC
501 else if (const_ok_for_mcore (val))
502 return 3;
4816b8e4 503 /* Takes two instructions to load. */
8f90be4c
NC
504 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
505 return 4;
506
4816b8e4 507 /* Takes a lrw to load. */
8f90be4c
NC
508 return 5;
509}
510
3c50106f 511static bool
68f932c4
RS
512mcore_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
513 int * total, bool speed ATTRIBUTE_UNUSED)
3c50106f
RH
514{
515 switch (code)
516 {
517 case CONST_INT:
5a82ecd9 518 *total = mcore_const_costs (x, (enum rtx_code) outer_code);
3c50106f
RH
519 return true;
520 case CONST:
521 case LABEL_REF:
522 case SYMBOL_REF:
523 *total = 5;
524 return true;
525 case CONST_DOUBLE:
526 *total = 10;
527 return true;
528
529 case AND:
530 *total = COSTS_N_INSNS (mcore_and_cost (x));
531 return true;
532
533 case IOR:
534 *total = COSTS_N_INSNS (mcore_ior_cost (x));
535 return true;
536
537 case DIV:
538 case UDIV:
539 case MOD:
540 case UMOD:
541 case FLOAT:
542 case FIX:
543 *total = COSTS_N_INSNS (100);
544 return true;
545
546 default:
547 return false;
548 }
549}
550
f90b7a5a
PB
551/* Prepare the operands for a comparison. Return whether the branch/setcc
552 should reverse the operands. */
4816b8e4 553
f90b7a5a
PB
554bool
555mcore_gen_compare (enum rtx_code code, rtx op0, rtx op1)
8f90be4c 556{
f90b7a5a
PB
557 rtx cc_reg = gen_rtx_REG (CCmode, CC_REG);
558 bool invert;
559
8f90be4c
NC
560 if (GET_CODE (op1) == CONST_INT)
561 {
6e3a343d 562 HOST_WIDE_INT val = INTVAL (op1);
8f90be4c
NC
563
564 switch (code)
565 {
f90b7a5a
PB
566 case GTU:
567 /* Unsigned > 0 is the same as != 0; everything else is converted
568 below to LEU (reversed cmphs). */
569 if (val == 0)
570 code = NE;
571 break;
572
573 /* Check whether (LE A imm) can become (LT A imm + 1),
574 or (GT A imm) can become (GE A imm + 1). */
575 case GT:
8f90be4c
NC
576 case LE:
577 if (CONST_OK_FOR_J (val + 1))
578 {
f90b7a5a
PB
579 op1 = GEN_INT (val + 1);
580 code = code == LE ? LT : GE;
8f90be4c
NC
581 }
582 break;
583
584 default:
585 break;
586 }
587 }
f90b7a5a 588
8f90be4c
NC
589 if (CONSTANT_P (op1) && GET_CODE (op1) != CONST_INT)
590 op1 = force_reg (SImode, op1);
591
592 /* cmpnei: 0-31 (K immediate)
4816b8e4 593 cmplti: 1-32 (J immediate, 0 using btsti x,31). */
f90b7a5a 594 invert = false;
8f90be4c
NC
595 switch (code)
596 {
4816b8e4 597 case EQ: /* Use inverted condition, cmpne. */
8f90be4c 598 code = NE;
f90b7a5a 599 invert = true;
08903e08 600 /* Drop through. */
4816b8e4
NC
601
602 case NE: /* Use normal condition, cmpne. */
8f90be4c
NC
603 if (GET_CODE (op1) == CONST_INT && ! CONST_OK_FOR_K (INTVAL (op1)))
604 op1 = force_reg (SImode, op1);
605 break;
606
4816b8e4 607 case LE: /* Use inverted condition, reversed cmplt. */
8f90be4c 608 code = GT;
f90b7a5a 609 invert = true;
08903e08 610 /* Drop through. */
4816b8e4
NC
611
612 case GT: /* Use normal condition, reversed cmplt. */
8f90be4c
NC
613 if (GET_CODE (op1) == CONST_INT)
614 op1 = force_reg (SImode, op1);
615 break;
616
4816b8e4 617 case GE: /* Use inverted condition, cmplt. */
8f90be4c 618 code = LT;
f90b7a5a 619 invert = true;
08903e08 620 /* Drop through. */
4816b8e4
NC
621
622 case LT: /* Use normal condition, cmplt. */
8f90be4c 623 if (GET_CODE (op1) == CONST_INT &&
08903e08 624 /* covered by btsti x,31. */
8f90be4c
NC
625 INTVAL (op1) != 0 &&
626 ! CONST_OK_FOR_J (INTVAL (op1)))
627 op1 = force_reg (SImode, op1);
628 break;
629
4816b8e4 630 case GTU: /* Use inverted condition, cmple. */
f90b7a5a 631 /* We coped with unsigned > 0 above. */
6e1f65b5 632 gcc_assert (GET_CODE (op1) != CONST_INT || INTVAL (op1) != 0);
8f90be4c 633 code = LEU;
f90b7a5a 634 invert = true;
08903e08 635 /* Drop through. */
4816b8e4 636
14bc6742 637 case LEU: /* Use normal condition, reversed cmphs. */
8f90be4c
NC
638 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
639 op1 = force_reg (SImode, op1);
640 break;
641
4816b8e4 642 case LTU: /* Use inverted condition, cmphs. */
8f90be4c 643 code = GEU;
f90b7a5a 644 invert = true;
08903e08 645 /* Drop through. */
4816b8e4
NC
646
647 case GEU: /* Use normal condition, cmphs. */
8f90be4c
NC
648 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
649 op1 = force_reg (SImode, op1);
650 break;
651
652 default:
653 break;
654 }
655
f90b7a5a
PB
656 emit_insn (gen_rtx_SET (VOIDmode,
657 cc_reg,
658 gen_rtx_fmt_ee (code, CCmode, op0, op1)));
659 return invert;
8f90be4c
NC
660}
661
8f90be4c 662int
08903e08 663mcore_symbolic_address_p (rtx x)
8f90be4c
NC
664{
665 switch (GET_CODE (x))
666 {
667 case SYMBOL_REF:
668 case LABEL_REF:
669 return 1;
670 case CONST:
671 x = XEXP (x, 0);
672 return ( (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
673 || GET_CODE (XEXP (x, 0)) == LABEL_REF)
674 && GET_CODE (XEXP (x, 1)) == CONST_INT);
675 default:
676 return 0;
677 }
678}
679
8f90be4c 680/* Functions to output assembly code for a function call. */
f27cd94d 681
8f90be4c 682char *
08903e08 683mcore_output_call (rtx operands[], int index)
8f90be4c
NC
684{
685 static char buffer[20];
686 rtx addr = operands [index];
687
688 if (REG_P (addr))
689 {
690 if (TARGET_CG_DATA)
691 {
6e1f65b5 692 gcc_assert (mcore_current_function_name);
8f90be4c
NC
693
694 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
695 "unknown", 1);
696 }
697
698 sprintf (buffer, "jsr\t%%%d", index);
699 }
700 else
701 {
702 if (TARGET_CG_DATA)
703 {
6e1f65b5
NS
704 gcc_assert (mcore_current_function_name);
705 gcc_assert (GET_CODE (addr) == SYMBOL_REF);
8f90be4c 706
6e1f65b5
NS
707 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
708 XSTR (addr, 0), 0);
8f90be4c
NC
709 }
710
711 sprintf (buffer, "jbsr\t%%%d", index);
712 }
713
714 return buffer;
715}
716
717/* Can we load a constant with a single instruction ? */
4816b8e4 718
54d58eaf 719int
6e3a343d 720const_ok_for_mcore (HOST_WIDE_INT value)
8f90be4c
NC
721{
722 if (value >= 0 && value <= 127)
723 return 1;
724
725 /* Try exact power of two. */
6e3a343d 726 if (CONST_OK_FOR_M (value))
8f90be4c
NC
727 return 1;
728
14bc6742 729 /* Try exact power of two - 1. */
6e3a343d 730 if (CONST_OK_FOR_N (value) && value != -1)
8f90be4c
NC
731 return 1;
732
733 return 0;
734}
735
736/* Can we load a constant inline with up to 2 instructions ? */
4816b8e4 737
8f90be4c 738int
6e3a343d 739mcore_const_ok_for_inline (HOST_WIDE_INT value)
8f90be4c 740{
6e3a343d 741 HOST_WIDE_INT x, y;
8f90be4c
NC
742
743 return try_constant_tricks (value, & x, & y) > 0;
744}
745
746/* Are we loading the constant using a not ? */
4816b8e4 747
8f90be4c 748int
6e3a343d 749mcore_const_trick_uses_not (HOST_WIDE_INT value)
8f90be4c 750{
6e3a343d 751 HOST_WIDE_INT x, y;
8f90be4c
NC
752
753 return try_constant_tricks (value, & x, & y) == 2;
754}
755
756/* Try tricks to load a constant inline and return the trick number if
757 success (0 is non-inlinable).
4816b8e4
NC
758
759 0: not inlinable
760 1: single instruction (do the usual thing)
761 2: single insn followed by a 'not'
762 3: single insn followed by a subi
763 4: single insn followed by an addi
764 5: single insn followed by rsubi
765 6: single insn followed by bseti
766 7: single insn followed by bclri
767 8: single insn followed by rotli
768 9: single insn followed by lsli
769 10: single insn followed by ixh
770 11: single insn followed by ixw. */
8f90be4c
NC
771
772static int
6e3a343d 773try_constant_tricks (HOST_WIDE_INT value, HOST_WIDE_INT * x, HOST_WIDE_INT * y)
8f90be4c 774{
6e3a343d
NC
775 HOST_WIDE_INT i;
776 unsigned HOST_WIDE_INT bit, shf, rot;
8f90be4c
NC
777
778 if (const_ok_for_mcore (value))
4816b8e4 779 return 1; /* Do the usual thing. */
8f90be4c 780
6e3a343d
NC
781 if (! TARGET_HARDLIT)
782 return 0;
783
784 if (const_ok_for_mcore (~value))
785 {
786 *x = ~value;
787 return 2;
788 }
789
790 for (i = 1; i <= 32; i++)
8f90be4c 791 {
6e3a343d 792 if (const_ok_for_mcore (value - i))
8f90be4c 793 {
6e3a343d
NC
794 *x = value - i;
795 *y = i;
796
797 return 3;
8f90be4c 798 }
6e3a343d
NC
799
800 if (const_ok_for_mcore (value + i))
8f90be4c 801 {
6e3a343d
NC
802 *x = value + i;
803 *y = i;
804
805 return 4;
8f90be4c 806 }
6e3a343d
NC
807 }
808
809 bit = 0x80000000ULL;
810
811 for (i = 0; i <= 31; i++)
812 {
813 if (const_ok_for_mcore (i - value))
8f90be4c 814 {
6e3a343d
NC
815 *x = i - value;
816 *y = i;
817
818 return 5;
8f90be4c 819 }
6e3a343d
NC
820
821 if (const_ok_for_mcore (value & ~bit))
8f90be4c 822 {
6e3a343d
NC
823 *y = bit;
824 *x = value & ~bit;
825 return 6;
8f90be4c 826 }
6e3a343d
NC
827
828 if (const_ok_for_mcore (value | bit))
8f90be4c 829 {
6e3a343d
NC
830 *y = ~bit;
831 *x = value | bit;
832
833 return 7;
8f90be4c 834 }
6e3a343d
NC
835
836 bit >>= 1;
837 }
838
839 shf = value;
840 rot = value;
841
842 for (i = 1; i < 31; i++)
843 {
844 int c;
845
846 /* MCore has rotate left. */
847 c = rot << 31;
848 rot >>= 1;
849 rot &= 0x7FFFFFFF;
850 rot |= c; /* Simulate rotate. */
851
852 if (const_ok_for_mcore (rot))
8f90be4c 853 {
6e3a343d
NC
854 *y = i;
855 *x = rot;
856
857 return 8;
858 }
859
860 if (shf & 1)
861 shf = 0; /* Can't use logical shift, low order bit is one. */
862
863 shf >>= 1;
864
865 if (shf != 0 && const_ok_for_mcore (shf))
866 {
867 *y = i;
868 *x = shf;
869
870 return 9;
8f90be4c
NC
871 }
872 }
6e3a343d
NC
873
874 if ((value % 3) == 0 && const_ok_for_mcore (value / 3))
875 {
876 *x = value / 3;
877
878 return 10;
879 }
880
881 if ((value % 5) == 0 && const_ok_for_mcore (value / 5))
882 {
883 *x = value / 5;
884
885 return 11;
886 }
8f90be4c
NC
887
888 return 0;
889}
890
8f90be4c
NC
891/* Check whether reg is dead at first. This is done by searching ahead
892 for either the next use (i.e., reg is live), a death note, or a set of
893 reg. Don't just use dead_or_set_p() since reload does not always mark
894 deaths (especially if PRESERVE_DEATH_NOTES_REGNO_P is not defined). We
4816b8e4
NC
895 can ignore subregs by extracting the actual register. BRC */
896
8f90be4c 897int
08903e08 898mcore_is_dead (rtx first, rtx reg)
8f90be4c
NC
899{
900 rtx insn;
901
902 /* For mcore, subregs can't live independently of their parent regs. */
903 if (GET_CODE (reg) == SUBREG)
904 reg = SUBREG_REG (reg);
905
906 /* Dies immediately. */
907 if (dead_or_set_p (first, reg))
908 return 1;
909
910 /* Look for conclusive evidence of live/death, otherwise we have
911 to assume that it is live. */
912 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
913 {
914 if (GET_CODE (insn) == JUMP_INSN)
915 return 0; /* We lose track, assume it is alive. */
916
917 else if (GET_CODE(insn) == CALL_INSN)
918 {
919 /* Call's might use it for target or register parms. */
920 if (reg_referenced_p (reg, PATTERN (insn))
921 || find_reg_fusage (insn, USE, reg))
922 return 0;
923 else if (dead_or_set_p (insn, reg))
924 return 1;
925 }
926 else if (GET_CODE (insn) == INSN)
927 {
928 if (reg_referenced_p (reg, PATTERN (insn)))
929 return 0;
930 else if (dead_or_set_p (insn, reg))
931 return 1;
932 }
933 }
934
1e5f1716 935 /* No conclusive evidence either way, we cannot take the chance
8f90be4c
NC
936 that control flow hid the use from us -- "I'm not dead yet". */
937 return 0;
938}
939
8f90be4c 940/* Count the number of ones in mask. */
4816b8e4 941
8f90be4c 942int
6e3a343d 943mcore_num_ones (HOST_WIDE_INT mask)
8f90be4c 944{
4816b8e4 945 /* A trick to count set bits recently posted on comp.compilers. */
8f90be4c
NC
946 mask = (mask >> 1 & 0x55555555) + (mask & 0x55555555);
947 mask = ((mask >> 2) & 0x33333333) + (mask & 0x33333333);
948 mask = ((mask >> 4) + mask) & 0x0f0f0f0f;
949 mask = ((mask >> 8) + mask);
950
951 return (mask + (mask >> 16)) & 0xff;
952}
953
4816b8e4
NC
954/* Count the number of zeros in mask. */
955
8f90be4c 956int
6e3a343d 957mcore_num_zeros (HOST_WIDE_INT mask)
8f90be4c
NC
958{
959 return 32 - mcore_num_ones (mask);
960}
961
962/* Determine byte being masked. */
4816b8e4 963
8f90be4c 964int
08903e08 965mcore_byte_offset (unsigned int mask)
8f90be4c 966{
11f9ed1a 967 if (mask == 0x00ffffffL)
8f90be4c 968 return 0;
11f9ed1a 969 else if (mask == 0xff00ffffL)
8f90be4c 970 return 1;
11f9ed1a 971 else if (mask == 0xffff00ffL)
8f90be4c 972 return 2;
11f9ed1a 973 else if (mask == 0xffffff00L)
8f90be4c
NC
974 return 3;
975
976 return -1;
977}
978
979/* Determine halfword being masked. */
4816b8e4 980
8f90be4c 981int
08903e08 982mcore_halfword_offset (unsigned int mask)
8f90be4c
NC
983{
984 if (mask == 0x0000ffffL)
985 return 0;
11f9ed1a 986 else if (mask == 0xffff0000L)
8f90be4c
NC
987 return 1;
988
989 return -1;
990}
991
992/* Output a series of bseti's corresponding to mask. */
4816b8e4 993
f27cd94d 994const char *
08903e08 995mcore_output_bseti (rtx dst, int mask)
8f90be4c
NC
996{
997 rtx out_operands[2];
998 int bit;
999
1000 out_operands[0] = dst;
1001
1002 for (bit = 0; bit < 32; bit++)
1003 {
1004 if ((mask & 0x1) == 0x1)
1005 {
1006 out_operands[1] = GEN_INT (bit);
1007
1008 output_asm_insn ("bseti\t%0,%1", out_operands);
1009 }
1010 mask >>= 1;
1011 }
1012
1013 return "";
1014}
1015
1016/* Output a series of bclri's corresponding to mask. */
4816b8e4 1017
f27cd94d 1018const char *
08903e08 1019mcore_output_bclri (rtx dst, int mask)
8f90be4c
NC
1020{
1021 rtx out_operands[2];
1022 int bit;
1023
1024 out_operands[0] = dst;
1025
1026 for (bit = 0; bit < 32; bit++)
1027 {
1028 if ((mask & 0x1) == 0x0)
1029 {
1030 out_operands[1] = GEN_INT (bit);
1031
1032 output_asm_insn ("bclri\t%0,%1", out_operands);
1033 }
1034
1035 mask >>= 1;
1036 }
1037
1038 return "";
1039}
1040
1041/* Output a conditional move of two constants that are +/- 1 within each
1042 other. See the "movtK" patterns in mcore.md. I'm not sure this is
1043 really worth the effort. */
4816b8e4 1044
f27cd94d 1045const char *
08903e08 1046mcore_output_cmov (rtx operands[], int cmp_t, const char * test)
8f90be4c 1047{
6e3a343d
NC
1048 HOST_WIDE_INT load_value;
1049 HOST_WIDE_INT adjust_value;
8f90be4c
NC
1050 rtx out_operands[4];
1051
1052 out_operands[0] = operands[0];
1053
4816b8e4 1054 /* Check to see which constant is loadable. */
8f90be4c
NC
1055 if (const_ok_for_mcore (INTVAL (operands[1])))
1056 {
1057 out_operands[1] = operands[1];
1058 out_operands[2] = operands[2];
1059 }
1060 else if (const_ok_for_mcore (INTVAL (operands[2])))
1061 {
1062 out_operands[1] = operands[2];
1063 out_operands[2] = operands[1];
1064
4816b8e4 1065 /* Complement test since constants are swapped. */
8f90be4c
NC
1066 cmp_t = (cmp_t == 0);
1067 }
1068 load_value = INTVAL (out_operands[1]);
1069 adjust_value = INTVAL (out_operands[2]);
1070
4816b8e4 1071 /* First output the test if folded into the pattern. */
8f90be4c
NC
1072
1073 if (test)
1074 output_asm_insn (test, operands);
1075
4816b8e4 1076 /* Load the constant - for now, only support constants that can be
8f90be4c
NC
1077 generated with a single instruction. maybe add general inlinable
1078 constants later (this will increase the # of patterns since the
4816b8e4 1079 instruction sequence has a different length attribute). */
8f90be4c
NC
1080 if (load_value >= 0 && load_value <= 127)
1081 output_asm_insn ("movi\t%0,%1", out_operands);
6e3a343d 1082 else if (CONST_OK_FOR_M (load_value))
8f90be4c 1083 output_asm_insn ("bgeni\t%0,%P1", out_operands);
6e3a343d 1084 else if (CONST_OK_FOR_N (load_value))
8f90be4c
NC
1085 output_asm_insn ("bmaski\t%0,%N1", out_operands);
1086
4816b8e4 1087 /* Output the constant adjustment. */
8f90be4c
NC
1088 if (load_value > adjust_value)
1089 {
1090 if (cmp_t)
1091 output_asm_insn ("decf\t%0", out_operands);
1092 else
1093 output_asm_insn ("dect\t%0", out_operands);
1094 }
1095 else
1096 {
1097 if (cmp_t)
1098 output_asm_insn ("incf\t%0", out_operands);
1099 else
1100 output_asm_insn ("inct\t%0", out_operands);
1101 }
1102
1103 return "";
1104}
1105
1106/* Outputs the peephole for moving a constant that gets not'ed followed
4816b8e4
NC
1107 by an and (i.e. combine the not and the and into andn). BRC */
1108
f27cd94d 1109const char *
08903e08 1110mcore_output_andn (rtx insn ATTRIBUTE_UNUSED, rtx operands[])
8f90be4c 1111{
6e3a343d 1112 HOST_WIDE_INT x, y;
8f90be4c 1113 rtx out_operands[3];
f27cd94d 1114 const char * load_op;
8f90be4c 1115 char buf[256];
6e1f65b5 1116 int trick_no;
8f90be4c 1117
6e1f65b5
NS
1118 trick_no = try_constant_tricks (INTVAL (operands[1]), &x, &y);
1119 gcc_assert (trick_no == 2);
8f90be4c
NC
1120
1121 out_operands[0] = operands[0];
6e3a343d 1122 out_operands[1] = GEN_INT (x);
8f90be4c
NC
1123 out_operands[2] = operands[2];
1124
1125 if (x >= 0 && x <= 127)
1126 load_op = "movi\t%0,%1";
4816b8e4
NC
1127
1128 /* Try exact power of two. */
6e3a343d 1129 else if (CONST_OK_FOR_M (x))
8f90be4c 1130 load_op = "bgeni\t%0,%P1";
4816b8e4
NC
1131
1132 /* Try exact power of two - 1. */
6e3a343d 1133 else if (CONST_OK_FOR_N (x))
8f90be4c 1134 load_op = "bmaski\t%0,%N1";
4816b8e4 1135
6e3a343d
NC
1136 else
1137 {
1138 load_op = "BADMOVI-andn\t%0, %1";
1139 gcc_unreachable ();
1140 }
8f90be4c
NC
1141
1142 sprintf (buf, "%s\n\tandn\t%%2,%%0", load_op);
1143 output_asm_insn (buf, out_operands);
1144
1145 return "";
1146}
1147
1148/* Output an inline constant. */
4816b8e4 1149
f27cd94d 1150static const char *
08903e08 1151output_inline_const (enum machine_mode mode, rtx operands[])
8f90be4c 1152{
6e3a343d 1153 HOST_WIDE_INT x = 0, y = 0;
8f90be4c
NC
1154 int trick_no;
1155 rtx out_operands[3];
1156 char buf[256];
1157 char load_op[256];
f27cd94d 1158 const char *dst_fmt;
6e3a343d 1159 HOST_WIDE_INT value;
8f90be4c
NC
1160
1161 value = INTVAL (operands[1]);
8f90be4c 1162
6e1f65b5
NS
1163 trick_no = try_constant_tricks (value, &x, &y);
1164 /* lrw's are handled separately: Large inlinable constants never get
1165 turned into lrw's. Our caller uses try_constant_tricks to back
1166 off to an lrw rather than calling this routine. */
1167 gcc_assert (trick_no != 0);
1168
8f90be4c
NC
1169 if (trick_no == 1)
1170 x = value;
1171
4816b8e4 1172 /* operands: 0 = dst, 1 = load immed., 2 = immed. adjustment. */
8f90be4c
NC
1173 out_operands[0] = operands[0];
1174 out_operands[1] = GEN_INT (x);
1175
1176 if (trick_no > 2)
1177 out_operands[2] = GEN_INT (y);
1178
4816b8e4 1179 /* Select dst format based on mode. */
8f90be4c
NC
1180 if (mode == DImode && (! TARGET_LITTLE_END))
1181 dst_fmt = "%R0";
1182 else
1183 dst_fmt = "%0";
1184
1185 if (x >= 0 && x <= 127)
1186 sprintf (load_op, "movi\t%s,%%1", dst_fmt);
4816b8e4 1187
8f90be4c 1188 /* Try exact power of two. */
6e3a343d 1189 else if (CONST_OK_FOR_M (x))
8f90be4c 1190 sprintf (load_op, "bgeni\t%s,%%P1", dst_fmt);
4816b8e4
NC
1191
1192 /* Try exact power of two - 1. */
6e3a343d 1193 else if (CONST_OK_FOR_N (x))
8f90be4c 1194 sprintf (load_op, "bmaski\t%s,%%N1", dst_fmt);
4816b8e4 1195
6e3a343d
NC
1196 else
1197 {
1198 sprintf (load_op, "BADMOVI-inline_const %s, %%1", dst_fmt);
1199 gcc_unreachable ();
1200 }
8f90be4c
NC
1201
1202 switch (trick_no)
1203 {
1204 case 1:
1205 strcpy (buf, load_op);
1206 break;
1207 case 2: /* not */
6e3a343d 1208 sprintf (buf, "%s\n\tnot\t%s\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1209 break;
1210 case 3: /* add */
6e3a343d 1211 sprintf (buf, "%s\n\taddi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1212 break;
1213 case 4: /* sub */
6e3a343d 1214 sprintf (buf, "%s\n\tsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1215 break;
1216 case 5: /* rsub */
4816b8e4 1217 /* Never happens unless -mrsubi, see try_constant_tricks(). */
6e3a343d 1218 sprintf (buf, "%s\n\trsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c 1219 break;
6e3a343d
NC
1220 case 6: /* bseti */
1221 sprintf (buf, "%s\n\tbseti\t%s,%%P2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1222 break;
1223 case 7: /* bclr */
6e3a343d 1224 sprintf (buf, "%s\n\tbclri\t%s,%%Q2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1225 break;
1226 case 8: /* rotl */
6e3a343d 1227 sprintf (buf, "%s\n\trotli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1228 break;
1229 case 9: /* lsl */
6e3a343d 1230 sprintf (buf, "%s\n\tlsli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1231 break;
1232 case 10: /* ixh */
6e3a343d 1233 sprintf (buf, "%s\n\tixh\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
8f90be4c
NC
1234 break;
1235 case 11: /* ixw */
6e3a343d 1236 sprintf (buf, "%s\n\tixw\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
8f90be4c
NC
1237 break;
1238 default:
1239 return "";
1240 }
1241
1242 output_asm_insn (buf, out_operands);
1243
1244 return "";
1245}
1246
1247/* Output a move of a word or less value. */
4816b8e4 1248
f27cd94d 1249const char *
08903e08
SB
1250mcore_output_move (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
1251 enum machine_mode mode ATTRIBUTE_UNUSED)
8f90be4c
NC
1252{
1253 rtx dst = operands[0];
1254 rtx src = operands[1];
1255
1256 if (GET_CODE (dst) == REG)
1257 {
1258 if (GET_CODE (src) == REG)
1259 {
1260 if (REGNO (src) == CC_REG) /* r-c */
1261 return "mvc\t%0";
1262 else
1263 return "mov\t%0,%1"; /* r-r*/
1264 }
1265 else if (GET_CODE (src) == MEM)
1266 {
1267 if (GET_CODE (XEXP (src, 0)) == LABEL_REF)
1268 return "lrw\t%0,[%1]"; /* a-R */
1269 else
f0f4da32
RS
1270 switch (GET_MODE (src)) /* r-m */
1271 {
1272 case SImode:
1273 return "ldw\t%0,%1";
1274 case HImode:
1275 return "ld.h\t%0,%1";
1276 case QImode:
1277 return "ld.b\t%0,%1";
1278 default:
6e1f65b5 1279 gcc_unreachable ();
f0f4da32 1280 }
8f90be4c
NC
1281 }
1282 else if (GET_CODE (src) == CONST_INT)
1283 {
6e3a343d 1284 HOST_WIDE_INT x, y;
8f90be4c
NC
1285
1286 if (CONST_OK_FOR_I (INTVAL (src))) /* r-I */
1287 return "movi\t%0,%1";
1288 else if (CONST_OK_FOR_M (INTVAL (src))) /* r-M */
1289 return "bgeni\t%0,%P1\t// %1 %x1";
1290 else if (CONST_OK_FOR_N (INTVAL (src))) /* r-N */
1291 return "bmaski\t%0,%N1\t// %1 %x1";
1292 else if (try_constant_tricks (INTVAL (src), &x, &y)) /* R-P */
1293 return output_inline_const (SImode, operands); /* 1-2 insns */
1294 else
4816b8e4 1295 return "lrw\t%0,%x1\t// %1"; /* Get it from literal pool. */
8f90be4c
NC
1296 }
1297 else
4816b8e4 1298 return "lrw\t%0, %1"; /* Into the literal pool. */
8f90be4c
NC
1299 }
1300 else if (GET_CODE (dst) == MEM) /* m-r */
f0f4da32
RS
1301 switch (GET_MODE (dst))
1302 {
1303 case SImode:
1304 return "stw\t%1,%0";
1305 case HImode:
1306 return "st.h\t%1,%0";
1307 case QImode:
1308 return "st.b\t%1,%0";
1309 default:
6e1f65b5 1310 gcc_unreachable ();
f0f4da32 1311 }
8f90be4c 1312
6e1f65b5 1313 gcc_unreachable ();
8f90be4c
NC
1314}
1315
8f90be4c
NC
1316/* Return a sequence of instructions to perform DI or DF move.
1317 Since the MCORE cannot move a DI or DF in one instruction, we have
1318 to take care when we see overlapping source and dest registers. */
4816b8e4 1319
f27cd94d 1320const char *
08903e08 1321mcore_output_movedouble (rtx operands[], enum machine_mode mode ATTRIBUTE_UNUSED)
8f90be4c
NC
1322{
1323 rtx dst = operands[0];
1324 rtx src = operands[1];
1325
1326 if (GET_CODE (dst) == REG)
1327 {
1328 if (GET_CODE (src) == REG)
1329 {
1330 int dstreg = REGNO (dst);
1331 int srcreg = REGNO (src);
4816b8e4 1332
8f90be4c
NC
1333 /* Ensure the second source not overwritten. */
1334 if (srcreg + 1 == dstreg)
1335 return "mov %R0,%R1\n\tmov %0,%1";
1336 else
1337 return "mov %0,%1\n\tmov %R0,%R1";
1338 }
1339 else if (GET_CODE (src) == MEM)
1340 {
1341 rtx memexp = memexp = XEXP (src, 0);
1342 int dstreg = REGNO (dst);
1343 int basereg = -1;
1344
1345 if (GET_CODE (memexp) == LABEL_REF)
1346 return "lrw\t%0,[%1]\n\tlrw\t%R0,[%R1]";
1347 else if (GET_CODE (memexp) == REG)
1348 basereg = REGNO (memexp);
1349 else if (GET_CODE (memexp) == PLUS)
1350 {
1351 if (GET_CODE (XEXP (memexp, 0)) == REG)
1352 basereg = REGNO (XEXP (memexp, 0));
1353 else if (GET_CODE (XEXP (memexp, 1)) == REG)
1354 basereg = REGNO (XEXP (memexp, 1));
1355 else
6e1f65b5 1356 gcc_unreachable ();
8f90be4c
NC
1357 }
1358 else
6e1f65b5 1359 gcc_unreachable ();
8f90be4c 1360
4816b8e4 1361 /* ??? length attribute is wrong here. */
8f90be4c
NC
1362 if (dstreg == basereg)
1363 {
4816b8e4 1364 /* Just load them in reverse order. */
8f90be4c 1365 return "ldw\t%R0,%R1\n\tldw\t%0,%1";
4816b8e4 1366
8f90be4c 1367 /* XXX: alternative: move basereg to basereg+1
4816b8e4 1368 and then fall through. */
8f90be4c
NC
1369 }
1370 else
1371 return "ldw\t%0,%1\n\tldw\t%R0,%R1";
1372 }
1373 else if (GET_CODE (src) == CONST_INT)
1374 {
1375 if (TARGET_LITTLE_END)
1376 {
1377 if (CONST_OK_FOR_I (INTVAL (src)))
1378 output_asm_insn ("movi %0,%1", operands);
1379 else if (CONST_OK_FOR_M (INTVAL (src)))
1380 output_asm_insn ("bgeni %0,%P1", operands);
8f90be4c
NC
1381 else if (CONST_OK_FOR_N (INTVAL (src)))
1382 output_asm_insn ("bmaski %0,%N1", operands);
1383 else
6e1f65b5 1384 gcc_unreachable ();
8f90be4c
NC
1385
1386 if (INTVAL (src) < 0)
1387 return "bmaski %R0,32";
1388 else
1389 return "movi %R0,0";
1390 }
1391 else
1392 {
1393 if (CONST_OK_FOR_I (INTVAL (src)))
1394 output_asm_insn ("movi %R0,%1", operands);
1395 else if (CONST_OK_FOR_M (INTVAL (src)))
1396 output_asm_insn ("bgeni %R0,%P1", operands);
8f90be4c
NC
1397 else if (CONST_OK_FOR_N (INTVAL (src)))
1398 output_asm_insn ("bmaski %R0,%N1", operands);
1399 else
6e1f65b5 1400 gcc_unreachable ();
6e3a343d 1401
8f90be4c
NC
1402 if (INTVAL (src) < 0)
1403 return "bmaski %0,32";
1404 else
1405 return "movi %0,0";
1406 }
1407 }
1408 else
6e1f65b5 1409 gcc_unreachable ();
8f90be4c
NC
1410 }
1411 else if (GET_CODE (dst) == MEM && GET_CODE (src) == REG)
1412 return "stw\t%1,%0\n\tstw\t%R1,%R0";
1413 else
6e1f65b5 1414 gcc_unreachable ();
8f90be4c
NC
1415}
1416
1417/* Predicates used by the templates. */
1418
8f90be4c 1419int
08903e08 1420mcore_arith_S_operand (rtx op)
8f90be4c
NC
1421{
1422 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (~INTVAL (op)))
1423 return 1;
1424
1425 return 0;
1426}
1427
4816b8e4
NC
1428/* Expand insert bit field. BRC */
1429
8f90be4c 1430int
08903e08 1431mcore_expand_insv (rtx operands[])
8f90be4c
NC
1432{
1433 int width = INTVAL (operands[1]);
1434 int posn = INTVAL (operands[2]);
1435 int mask;
1436 rtx mreg, sreg, ereg;
1437
1438 /* To get width 1 insv, the test in store_bit_field() (expmed.c, line 191)
1439 for width==1 must be removed. Look around line 368. This is something
4816b8e4 1440 we really want the md part to do. */
8f90be4c
NC
1441 if (width == 1 && GET_CODE (operands[3]) == CONST_INT)
1442 {
4816b8e4
NC
1443 /* Do directly with bseti or bclri. */
1444 /* RBE: 2/97 consider only low bit of constant. */
6e3a343d 1445 if ((INTVAL (operands[3]) & 1) == 0)
8f90be4c
NC
1446 {
1447 mask = ~(1 << posn);
f1c25d3b
KH
1448 emit_insn (gen_rtx_SET (SImode, operands[0],
1449 gen_rtx_AND (SImode, operands[0], GEN_INT (mask))));
8f90be4c
NC
1450 }
1451 else
1452 {
1453 mask = 1 << posn;
f1c25d3b
KH
1454 emit_insn (gen_rtx_SET (SImode, operands[0],
1455 gen_rtx_IOR (SImode, operands[0], GEN_INT (mask))));
8f90be4c
NC
1456 }
1457
1458 return 1;
1459 }
1460
43a88a8c 1461 /* Look at some bit-field placements that we aren't interested
4816b8e4 1462 in handling ourselves, unless specifically directed to do so. */
8f90be4c
NC
1463 if (! TARGET_W_FIELD)
1464 return 0; /* Generally, give up about now. */
1465
1466 if (width == 8 && posn % 8 == 0)
1467 /* Byte sized and aligned; let caller break it up. */
1468 return 0;
1469
1470 if (width == 16 && posn % 16 == 0)
1471 /* Short sized and aligned; let caller break it up. */
1472 return 0;
1473
1474 /* The general case - we can do this a little bit better than what the
1475 machine independent part tries. This will get rid of all the subregs
1476 that mess up constant folding in combine when working with relaxed
4816b8e4 1477 immediates. */
8f90be4c
NC
1478
1479 /* If setting the entire field, do it directly. */
6e3a343d
NC
1480 if (GET_CODE (operands[3]) == CONST_INT
1481 && INTVAL (operands[3]) == ((1 << width) - 1))
8f90be4c
NC
1482 {
1483 mreg = force_reg (SImode, GEN_INT (INTVAL (operands[3]) << posn));
f1c25d3b
KH
1484 emit_insn (gen_rtx_SET (SImode, operands[0],
1485 gen_rtx_IOR (SImode, operands[0], mreg)));
8f90be4c
NC
1486 return 1;
1487 }
1488
1489 /* Generate the clear mask. */
1490 mreg = force_reg (SImode, GEN_INT (~(((1 << width) - 1) << posn)));
1491
1492 /* Clear the field, to overlay it later with the source. */
f1c25d3b
KH
1493 emit_insn (gen_rtx_SET (SImode, operands[0],
1494 gen_rtx_AND (SImode, operands[0], mreg)));
8f90be4c
NC
1495
1496 /* If the source is constant 0, we've nothing to add back. */
1497 if (GET_CODE (operands[3]) == CONST_INT && INTVAL (operands[3]) == 0)
1498 return 1;
1499
1500 /* XXX: Should we worry about more games with constant values?
1501 We've covered the high profile: set/clear single-bit and many-bit
1502 fields. How often do we see "arbitrary bit pattern" constants? */
1503 sreg = copy_to_mode_reg (SImode, operands[3]);
1504
1505 /* Extract src as same width as dst (needed for signed values). We
1506 always have to do this since we widen everything to SImode.
1507 We don't have to mask if we're shifting this up against the
1508 MSB of the register (e.g., the shift will push out any hi-order
4816b8e4 1509 bits. */
f27cd94d 1510 if (width + posn != (int) GET_MODE_SIZE (SImode))
8f90be4c
NC
1511 {
1512 ereg = force_reg (SImode, GEN_INT ((1 << width) - 1));
f1c25d3b
KH
1513 emit_insn (gen_rtx_SET (SImode, sreg,
1514 gen_rtx_AND (SImode, sreg, ereg)));
8f90be4c
NC
1515 }
1516
4816b8e4 1517 /* Insert source value in dest. */
8f90be4c 1518 if (posn != 0)
f1c25d3b
KH
1519 emit_insn (gen_rtx_SET (SImode, sreg,
1520 gen_rtx_ASHIFT (SImode, sreg, GEN_INT (posn))));
8f90be4c 1521
f1c25d3b
KH
1522 emit_insn (gen_rtx_SET (SImode, operands[0],
1523 gen_rtx_IOR (SImode, operands[0], sreg)));
8f90be4c
NC
1524
1525 return 1;
1526}
8f90be4c
NC
1527\f
1528/* ??? Block move stuff stolen from m88k. This code has not been
1529 verified for correctness. */
1530
1531/* Emit code to perform a block move. Choose the best method.
1532
1533 OPERANDS[0] is the destination.
1534 OPERANDS[1] is the source.
1535 OPERANDS[2] is the size.
1536 OPERANDS[3] is the alignment safe to use. */
1537
1538/* Emit code to perform a block move with an offset sequence of ldw/st
1539 instructions (..., ldw 0, stw 1, ldw 1, stw 0, ...). SIZE and ALIGN are
1540 known constants. DEST and SRC are registers. OFFSET is the known
1541 starting point for the output pattern. */
1542
8b60264b 1543static const enum machine_mode mode_from_align[] =
8f90be4c
NC
1544{
1545 VOIDmode, QImode, HImode, VOIDmode, SImode,
8f90be4c
NC
1546};
1547
1548static void
88042663 1549block_move_sequence (rtx dst_mem, rtx src_mem, int size, int align)
8f90be4c
NC
1550{
1551 rtx temp[2];
1552 enum machine_mode mode[2];
1553 int amount[2];
88042663 1554 bool active[2];
8f90be4c
NC
1555 int phase = 0;
1556 int next;
88042663
RH
1557 int offset_ld = 0;
1558 int offset_st = 0;
1559 rtx x;
8f90be4c 1560
88042663
RH
1561 x = XEXP (dst_mem, 0);
1562 if (!REG_P (x))
1563 {
1564 x = force_reg (Pmode, x);
1565 dst_mem = replace_equiv_address (dst_mem, x);
1566 }
8f90be4c 1567
88042663
RH
1568 x = XEXP (src_mem, 0);
1569 if (!REG_P (x))
8f90be4c 1570 {
88042663
RH
1571 x = force_reg (Pmode, x);
1572 src_mem = replace_equiv_address (src_mem, x);
8f90be4c
NC
1573 }
1574
88042663
RH
1575 active[0] = active[1] = false;
1576
8f90be4c
NC
1577 do
1578 {
8f90be4c 1579 next = phase;
88042663 1580 phase ^= 1;
8f90be4c
NC
1581
1582 if (size > 0)
1583 {
88042663
RH
1584 int next_amount;
1585
1586 next_amount = (size >= 4 ? 4 : (size >= 2 ? 2 : 1));
1587 next_amount = MIN (next_amount, align);
1588
1589 amount[next] = next_amount;
1590 mode[next] = mode_from_align[next_amount];
1591 temp[next] = gen_reg_rtx (mode[next]);
1592
1593 x = adjust_address (src_mem, mode[next], offset_ld);
1594 emit_insn (gen_rtx_SET (VOIDmode, temp[next], x));
1595
1596 offset_ld += next_amount;
1597 size -= next_amount;
1598 active[next] = true;
8f90be4c
NC
1599 }
1600
1601 if (active[phase])
1602 {
88042663 1603 active[phase] = false;
8f90be4c 1604
88042663
RH
1605 x = adjust_address (dst_mem, mode[phase], offset_st);
1606 emit_insn (gen_rtx_SET (VOIDmode, x, temp[phase]));
1607
8f90be4c
NC
1608 offset_st += amount[phase];
1609 }
1610 }
1611 while (active[next]);
1612}
1613
88042663
RH
1614bool
1615mcore_expand_block_move (rtx *operands)
8f90be4c 1616{
88042663
RH
1617 HOST_WIDE_INT align, bytes, max;
1618
1619 if (GET_CODE (operands[2]) != CONST_INT)
1620 return false;
1621
1622 bytes = INTVAL (operands[2]);
1623 align = INTVAL (operands[3]);
8f90be4c 1624
88042663
RH
1625 if (bytes <= 0)
1626 return false;
1627 if (align > 4)
1628 align = 4;
1629
1630 switch (align)
8f90be4c 1631 {
88042663
RH
1632 case 4:
1633 if (bytes & 1)
1634 max = 4*4;
1635 else if (bytes & 3)
1636 max = 8*4;
1637 else
1638 max = 16*4;
1639 break;
1640 case 2:
1641 max = 4*2;
1642 break;
1643 case 1:
1644 max = 4*1;
1645 break;
1646 default:
6e1f65b5 1647 gcc_unreachable ();
88042663
RH
1648 }
1649
1650 if (bytes <= max)
1651 {
1652 block_move_sequence (operands[0], operands[1], bytes, align);
1653 return true;
8f90be4c
NC
1654 }
1655
88042663 1656 return false;
8f90be4c
NC
1657}
1658\f
1659
1660/* Code to generate prologue and epilogue sequences. */
1661static int number_of_regs_before_varargs;
4816b8e4 1662
bd5bd7ac 1663/* Set by TARGET_SETUP_INCOMING_VARARGS to indicate to prolog that this is
8f90be4c
NC
1664 for a varargs function. */
1665static int current_function_anonymous_args;
1666
8f90be4c
NC
1667#define STACK_BYTES (STACK_BOUNDARY/BITS_PER_UNIT)
1668#define STORE_REACH (64) /* Maximum displace of word store + 4. */
4816b8e4 1669#define ADDI_REACH (32) /* Maximum addi operand. */
8f90be4c 1670
8f90be4c 1671static void
08903e08 1672layout_mcore_frame (struct mcore_frame * infp)
8f90be4c
NC
1673{
1674 int n;
1675 unsigned int i;
1676 int nbytes;
1677 int regarg;
1678 int localregarg;
8f90be4c
NC
1679 int outbounds;
1680 unsigned int growths;
1681 int step;
1682
1683 /* Might have to spill bytes to re-assemble a big argument that
4816b8e4 1684 was passed partially in registers and partially on the stack. */
38173d38 1685 nbytes = crtl->args.pretend_args_size;
8f90be4c
NC
1686
1687 /* Determine how much space for spilled anonymous args (e.g., stdarg). */
1688 if (current_function_anonymous_args)
1689 nbytes += (NPARM_REGS - number_of_regs_before_varargs) * UNITS_PER_WORD;
1690
1691 infp->arg_size = nbytes;
1692
1693 /* How much space to save non-volatile registers we stomp. */
1694 infp->reg_mask = calc_live_regs (& n);
1695 infp->reg_size = n * 4;
1696
14bc6742 1697 /* And the rest of it... locals and space for overflowed outbounds. */
8f90be4c 1698 infp->local_size = get_frame_size ();
38173d38 1699 infp->outbound_size = crtl->outgoing_args_size;
8f90be4c
NC
1700
1701 /* Make sure we have a whole number of words for the locals. */
1702 if (infp->local_size % STACK_BYTES)
1703 infp->local_size = (infp->local_size + STACK_BYTES - 1) & ~ (STACK_BYTES -1);
1704
1705 /* Only thing we know we have to pad is the outbound space, since
1706 we've aligned our locals assuming that base of locals is aligned. */
1707 infp->pad_local = 0;
1708 infp->pad_reg = 0;
1709 infp->pad_outbound = 0;
1710 if (infp->outbound_size % STACK_BYTES)
1711 infp->pad_outbound = STACK_BYTES - (infp->outbound_size % STACK_BYTES);
1712
1713 /* Now we see how we want to stage the prologue so that it does
1714 the most appropriate stack growth and register saves to either:
1715 (1) run fast,
1716 (2) reduce instruction space, or
1717 (3) reduce stack space. */
b6a1cbae 1718 for (i = 0; i < ARRAY_SIZE (infp->growth); i++)
8f90be4c
NC
1719 infp->growth[i] = 0;
1720
1721 regarg = infp->reg_size + infp->arg_size;
1722 localregarg = infp->local_size + regarg;
8f90be4c
NC
1723 outbounds = infp->outbound_size + infp->pad_outbound;
1724 growths = 0;
1725
1726 /* XXX: Consider one where we consider localregarg + outbound too! */
1727
1728 /* Frame of <= 32 bytes and using stm would get <= 2 registers.
1729 use stw's with offsets and buy the frame in one shot. */
1730 if (localregarg <= ADDI_REACH
1731 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1732 {
1733 /* Make sure we'll be aligned. */
1734 if (localregarg % STACK_BYTES)
1735 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1736
1737 step = localregarg + infp->pad_reg;
1738 infp->reg_offset = infp->local_size;
1739
1740 if (outbounds + step <= ADDI_REACH && !frame_pointer_needed)
1741 {
1742 step += outbounds;
1743 infp->reg_offset += outbounds;
1744 outbounds = 0;
1745 }
1746
1747 infp->arg_offset = step - 4;
1748 infp->growth[growths++] = step;
1749 infp->reg_growth = growths;
1750 infp->local_growth = growths;
1751
4816b8e4 1752 /* If we haven't already folded it in. */
8f90be4c
NC
1753 if (outbounds)
1754 infp->growth[growths++] = outbounds;
1755
1756 goto finish;
1757 }
1758
1759 /* Frame can't be done with a single subi, but can be done with 2
1760 insns. If the 'stm' is getting <= 2 registers, we use stw's and
1761 shift some of the stack purchase into the first subi, so both are
1762 single instructions. */
1763 if (localregarg <= STORE_REACH
1764 && (infp->local_size > ADDI_REACH)
1765 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1766 {
1767 int all;
1768
1769 /* Make sure we'll be aligned; use either pad_reg or pad_local. */
1770 if (localregarg % STACK_BYTES)
1771 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1772
1773 all = localregarg + infp->pad_reg + infp->pad_local;
1774 step = ADDI_REACH; /* As much up front as we can. */
1775 if (step > all)
1776 step = all;
1777
1778 /* XXX: Consider whether step will still be aligned; we believe so. */
1779 infp->arg_offset = step - 4;
1780 infp->growth[growths++] = step;
1781 infp->reg_growth = growths;
1782 infp->reg_offset = step - infp->pad_reg - infp->reg_size;
1783 all -= step;
1784
4816b8e4 1785 /* Can we fold in any space required for outbounds? */
8f90be4c
NC
1786 if (outbounds + all <= ADDI_REACH && !frame_pointer_needed)
1787 {
1788 all += outbounds;
1789 outbounds = 0;
1790 }
1791
4816b8e4 1792 /* Get the rest of the locals in place. */
8f90be4c
NC
1793 step = all;
1794 infp->growth[growths++] = step;
1795 infp->local_growth = growths;
1796 all -= step;
1797
819bfe0e 1798 gcc_assert (all == 0);
8f90be4c 1799
4816b8e4 1800 /* Finish off if we need to do so. */
8f90be4c
NC
1801 if (outbounds)
1802 infp->growth[growths++] = outbounds;
1803
1804 goto finish;
1805 }
1806
1807 /* Registers + args is nicely aligned, so we'll buy that in one shot.
1808 Then we buy the rest of the frame in 1 or 2 steps depending on
1809 whether we need a frame pointer. */
1810 if ((regarg % STACK_BYTES) == 0)
1811 {
1812 infp->growth[growths++] = regarg;
1813 infp->reg_growth = growths;
1814 infp->arg_offset = regarg - 4;
1815 infp->reg_offset = 0;
1816
1817 if (infp->local_size % STACK_BYTES)
1818 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1819
1820 step = infp->local_size + infp->pad_local;
1821
1822 if (!frame_pointer_needed)
1823 {
1824 step += outbounds;
1825 outbounds = 0;
1826 }
1827
1828 infp->growth[growths++] = step;
1829 infp->local_growth = growths;
1830
4816b8e4 1831 /* If there's any left to be done. */
8f90be4c
NC
1832 if (outbounds)
1833 infp->growth[growths++] = outbounds;
1834
1835 goto finish;
1836 }
1837
1838 /* XXX: optimizations that we'll want to play with....
4816b8e4
NC
1839 -- regarg is not aligned, but it's a small number of registers;
1840 use some of localsize so that regarg is aligned and then
1841 save the registers. */
8f90be4c
NC
1842
1843 /* Simple encoding; plods down the stack buying the pieces as it goes.
4816b8e4
NC
1844 -- does not optimize space consumption.
1845 -- does not attempt to optimize instruction counts.
1846 -- but it is safe for all alignments. */
8f90be4c
NC
1847 if (regarg % STACK_BYTES != 0)
1848 infp->pad_reg = STACK_BYTES - (regarg % STACK_BYTES);
1849
1850 infp->growth[growths++] = infp->arg_size + infp->reg_size + infp->pad_reg;
1851 infp->reg_growth = growths;
1852 infp->arg_offset = infp->growth[0] - 4;
1853 infp->reg_offset = 0;
1854
1855 if (frame_pointer_needed)
1856 {
1857 if (infp->local_size % STACK_BYTES != 0)
1858 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1859
1860 infp->growth[growths++] = infp->local_size + infp->pad_local;
1861 infp->local_growth = growths;
1862
1863 infp->growth[growths++] = outbounds;
1864 }
1865 else
1866 {
1867 if ((infp->local_size + outbounds) % STACK_BYTES != 0)
1868 infp->pad_local = STACK_BYTES - ((infp->local_size + outbounds) % STACK_BYTES);
1869
1870 infp->growth[growths++] = infp->local_size + infp->pad_local + outbounds;
1871 infp->local_growth = growths;
1872 }
1873
f27cd94d 1874 /* Anything else that we've forgotten?, plus a few consistency checks. */
8f90be4c 1875 finish:
819bfe0e
JM
1876 gcc_assert (infp->reg_offset >= 0);
1877 gcc_assert (growths <= MAX_STACK_GROWS);
8f90be4c
NC
1878
1879 for (i = 0; i < growths; i++)
6e1f65b5 1880 gcc_assert (!(infp->growth[i] % STACK_BYTES));
8f90be4c
NC
1881}
1882
1883/* Define the offset between two registers, one to be eliminated, and
1884 the other its replacement, at the start of a routine. */
4816b8e4 1885
8f90be4c 1886int
08903e08 1887mcore_initial_elimination_offset (int from, int to)
8f90be4c
NC
1888{
1889 int above_frame;
1890 int below_frame;
1891 struct mcore_frame fi;
1892
1893 layout_mcore_frame (& fi);
1894
1895 /* fp to ap */
1896 above_frame = fi.local_size + fi.pad_local + fi.reg_size + fi.pad_reg;
1897 /* sp to fp */
1898 below_frame = fi.outbound_size + fi.pad_outbound;
1899
1900 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
1901 return above_frame;
1902
1903 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1904 return above_frame + below_frame;
1905
1906 if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1907 return below_frame;
1908
6e1f65b5 1909 gcc_unreachable ();
8f90be4c
NC
1910}
1911
4816b8e4
NC
1912/* Keep track of some information about varargs for the prolog. */
1913
09a2b93a 1914static void
d5cc9181 1915mcore_setup_incoming_varargs (cumulative_args_t args_so_far_v,
08903e08 1916 enum machine_mode mode, tree type,
09a2b93a
KH
1917 int * ptr_pretend_size ATTRIBUTE_UNUSED,
1918 int second_time ATTRIBUTE_UNUSED)
8f90be4c 1919{
d5cc9181
JR
1920 CUMULATIVE_ARGS *args_so_far = get_cumulative_args (args_so_far_v);
1921
8f90be4c
NC
1922 current_function_anonymous_args = 1;
1923
1924 /* We need to know how many argument registers are used before
1925 the varargs start, so that we can push the remaining argument
1926 registers during the prologue. */
09a2b93a 1927 number_of_regs_before_varargs = *args_so_far + mcore_num_arg_regs (mode, type);
8f90be4c 1928
dab66575 1929 /* There is a bug somewhere in the arg handling code.
8f90be4c
NC
1930 Until I can find it this workaround always pushes the
1931 last named argument onto the stack. */
09a2b93a 1932 number_of_regs_before_varargs = *args_so_far;
8f90be4c
NC
1933
1934 /* The last named argument may be split between argument registers
1935 and the stack. Allow for this here. */
1936 if (number_of_regs_before_varargs > NPARM_REGS)
1937 number_of_regs_before_varargs = NPARM_REGS;
1938}
1939
1940void
08903e08 1941mcore_expand_prolog (void)
8f90be4c
NC
1942{
1943 struct mcore_frame fi;
1944 int space_allocated = 0;
1945 int growth = 0;
1946
1947 /* Find out what we're doing. */
1948 layout_mcore_frame (&fi);
1949
1950 space_allocated = fi.arg_size + fi.reg_size + fi.local_size +
1951 fi.outbound_size + fi.pad_outbound + fi.pad_local + fi.pad_reg;
1952
1953 if (TARGET_CG_DATA)
1954 {
1955 /* Emit a symbol for this routine's frame size. */
1956 rtx x;
8f90be4c
NC
1957
1958 x = DECL_RTL (current_function_decl);
1959
6e1f65b5 1960 gcc_assert (GET_CODE (x) == MEM);
8f90be4c
NC
1961
1962 x = XEXP (x, 0);
1963
6e1f65b5 1964 gcc_assert (GET_CODE (x) == SYMBOL_REF);
8f90be4c 1965
04695783 1966 free (mcore_current_function_name);
8f90be4c 1967
1dcd444b 1968 mcore_current_function_name = xstrdup (XSTR (x, 0));
8f90be4c
NC
1969
1970 ASM_OUTPUT_CG_NODE (asm_out_file, mcore_current_function_name, space_allocated);
1971
e3b5732b 1972 if (cfun->calls_alloca)
8f90be4c
NC
1973 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, "alloca", 1);
1974
1975 /* 970425: RBE:
1976 We're looking at how the 8byte alignment affects stack layout
1977 and where we had to pad things. This emits information we can
1978 extract which tells us about frame sizes and the like. */
1979 fprintf (asm_out_file,
1980 "\t.equ\t__$frame$info$_%s_$_%d_%d_x%x_%d_%d_%d,0\n",
1981 mcore_current_function_name,
1982 fi.arg_size, fi.reg_size, fi.reg_mask,
1983 fi.local_size, fi.outbound_size,
1984 frame_pointer_needed);
1985 }
1986
1987 if (mcore_naked_function_p ())
1988 return;
1989
1990 /* Handle stdarg+regsaves in one shot: can't be more than 64 bytes. */
08903e08 1991 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
8f90be4c
NC
1992
1993 /* If we have a parameter passed partially in regs and partially in memory,
1994 the registers will have been stored to memory already in function.c. So
1995 we only need to do something here for varargs functions. */
38173d38 1996 if (fi.arg_size != 0 && crtl->args.pretend_args_size == 0)
8f90be4c
NC
1997 {
1998 int offset;
1999 int rn = FIRST_PARM_REG + NPARM_REGS - 1;
2000 int remaining = fi.arg_size;
2001
2002 for (offset = fi.arg_offset; remaining >= 4; offset -= 4, rn--, remaining -= 4)
2003 {
2004 emit_insn (gen_movsi
f1c25d3b 2005 (gen_rtx_MEM (SImode,
0a81f074
RS
2006 plus_constant (Pmode, stack_pointer_rtx,
2007 offset)),
f1c25d3b 2008 gen_rtx_REG (SImode, rn)));
8f90be4c
NC
2009 }
2010 }
2011
4816b8e4 2012 /* Do we need another stack adjustment before we do the register saves? */
8f90be4c 2013 if (growth < fi.reg_growth)
08903e08 2014 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
8f90be4c
NC
2015
2016 if (fi.reg_size != 0)
2017 {
2018 int i;
2019 int offs = fi.reg_offset;
2020
2021 for (i = 15; i >= 0; i--)
2022 {
2023 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2024 {
2025 int first_reg = 15;
2026
2027 while (fi.reg_mask & (1 << first_reg))
2028 first_reg--;
2029 first_reg++;
2030
f1c25d3b
KH
2031 emit_insn (gen_store_multiple (gen_rtx_MEM (SImode, stack_pointer_rtx),
2032 gen_rtx_REG (SImode, first_reg),
8f90be4c
NC
2033 GEN_INT (16 - first_reg)));
2034
2035 i -= (15 - first_reg);
2036 offs += (16 - first_reg) * 4;
2037 }
2038 else if (fi.reg_mask & (1 << i))
2039 {
2040 emit_insn (gen_movsi
f1c25d3b 2041 (gen_rtx_MEM (SImode,
0a81f074
RS
2042 plus_constant (Pmode, stack_pointer_rtx,
2043 offs)),
f1c25d3b 2044 gen_rtx_REG (SImode, i)));
8f90be4c
NC
2045 offs += 4;
2046 }
2047 }
2048 }
2049
2050 /* Figure the locals + outbounds. */
2051 if (frame_pointer_needed)
2052 {
2053 /* If we haven't already purchased to 'fp'. */
2054 if (growth < fi.local_growth)
08903e08 2055 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
8f90be4c
NC
2056
2057 emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
2058
4816b8e4 2059 /* ... and then go any remaining distance for outbounds, etc. */
8f90be4c
NC
2060 if (fi.growth[growth])
2061 output_stack_adjust (-1, fi.growth[growth++]);
2062 }
2063 else
2064 {
2065 if (growth < fi.local_growth)
08903e08 2066 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
8f90be4c
NC
2067 if (fi.growth[growth])
2068 output_stack_adjust (-1, fi.growth[growth++]);
2069 }
2070}
2071
2072void
08903e08 2073mcore_expand_epilog (void)
8f90be4c
NC
2074{
2075 struct mcore_frame fi;
2076 int i;
2077 int offs;
2078 int growth = MAX_STACK_GROWS - 1 ;
2079
f27cd94d 2080
8f90be4c
NC
2081 /* Find out what we're doing. */
2082 layout_mcore_frame(&fi);
2083
2084 if (mcore_naked_function_p ())
2085 return;
f27cd94d 2086
8f90be4c
NC
2087 /* If we had a frame pointer, restore the sp from that. */
2088 if (frame_pointer_needed)
2089 {
2090 emit_insn (gen_movsi (stack_pointer_rtx, frame_pointer_rtx));
2091 growth = fi.local_growth - 1;
2092 }
2093 else
2094 {
2095 /* XXX: while loop should accumulate and do a single sell. */
2096 while (growth >= fi.local_growth)
2097 {
2098 if (fi.growth[growth] != 0)
2099 output_stack_adjust (1, fi.growth[growth]);
2100 growth--;
2101 }
2102 }
2103
2104 /* Make sure we've shrunk stack back to the point where the registers
2105 were laid down. This is typically 0/1 iterations. Then pull the
4816b8e4 2106 register save information back off the stack. */
8f90be4c
NC
2107 while (growth >= fi.reg_growth)
2108 output_stack_adjust ( 1, fi.growth[growth--]);
2109
2110 offs = fi.reg_offset;
2111
2112 for (i = 15; i >= 0; i--)
2113 {
2114 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2115 {
2116 int first_reg;
2117
2118 /* Find the starting register. */
2119 first_reg = 15;
2120
2121 while (fi.reg_mask & (1 << first_reg))
2122 first_reg--;
2123
2124 first_reg++;
2125
f1c25d3b
KH
2126 emit_insn (gen_load_multiple (gen_rtx_REG (SImode, first_reg),
2127 gen_rtx_MEM (SImode, stack_pointer_rtx),
8f90be4c
NC
2128 GEN_INT (16 - first_reg)));
2129
2130 i -= (15 - first_reg);
2131 offs += (16 - first_reg) * 4;
2132 }
2133 else if (fi.reg_mask & (1 << i))
2134 {
2135 emit_insn (gen_movsi
f1c25d3b
KH
2136 (gen_rtx_REG (SImode, i),
2137 gen_rtx_MEM (SImode,
0a81f074
RS
2138 plus_constant (Pmode, stack_pointer_rtx,
2139 offs))));
8f90be4c
NC
2140 offs += 4;
2141 }
2142 }
2143
2144 /* Give back anything else. */
dab66575 2145 /* XXX: Should accumulate total and then give it back. */
8f90be4c
NC
2146 while (growth >= 0)
2147 output_stack_adjust ( 1, fi.growth[growth--]);
2148}
2149\f
2150/* This code is borrowed from the SH port. */
2151
2152/* The MCORE cannot load a large constant into a register, constants have to
2153 come from a pc relative load. The reference of a pc relative load
0fa2e4df 2154 instruction must be less than 1k in front of the instruction. This
8f90be4c
NC
2155 means that we often have to dump a constant inside a function, and
2156 generate code to branch around it.
2157
2158 It is important to minimize this, since the branches will slow things
2159 down and make things bigger.
2160
2161 Worst case code looks like:
2162
2163 lrw L1,r0
2164 br L2
2165 align
2166 L1: .long value
2167 L2:
2168 ..
2169
2170 lrw L3,r0
2171 br L4
2172 align
2173 L3: .long value
2174 L4:
2175 ..
2176
2177 We fix this by performing a scan before scheduling, which notices which
2178 instructions need to have their operands fetched from the constant table
2179 and builds the table.
2180
2181 The algorithm is:
2182
2183 scan, find an instruction which needs a pcrel move. Look forward, find the
2184 last barrier which is within MAX_COUNT bytes of the requirement.
2185 If there isn't one, make one. Process all the instructions between
2186 the find and the barrier.
2187
2188 In the above example, we can tell that L3 is within 1k of L1, so
2189 the first move can be shrunk from the 2 insn+constant sequence into
2190 just 1 insn, and the constant moved to L3 to make:
2191
2192 lrw L1,r0
2193 ..
2194 lrw L3,r0
2195 bra L4
2196 align
2197 L3:.long value
2198 L4:.long value
2199
2200 Then the second move becomes the target for the shortening process. */
2201
2202typedef struct
2203{
2204 rtx value; /* Value in table. */
2205 rtx label; /* Label of value. */
2206} pool_node;
2207
2208/* The maximum number of constants that can fit into one pool, since
2209 the pc relative range is 0...1020 bytes and constants are at least 4
2a43945f 2210 bytes long. We subtract 4 from the range to allow for the case where
8f90be4c
NC
2211 we need to add a branch/align before the constant pool. */
2212
2213#define MAX_COUNT 1016
2214#define MAX_POOL_SIZE (MAX_COUNT/4)
2215static pool_node pool_vector[MAX_POOL_SIZE];
2216static int pool_size;
2217
2218/* Dump out any constants accumulated in the final pass. These
2219 will only be labels. */
4816b8e4 2220
f27cd94d 2221const char *
08903e08 2222mcore_output_jump_label_table (void)
8f90be4c
NC
2223{
2224 int i;
2225
2226 if (pool_size)
2227 {
2228 fprintf (asm_out_file, "\t.align 2\n");
2229
2230 for (i = 0; i < pool_size; i++)
2231 {
2232 pool_node * p = pool_vector + i;
2233
4977bab6 2234 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (p->label));
8f90be4c
NC
2235
2236 output_asm_insn (".long %0", &p->value);
2237 }
2238
2239 pool_size = 0;
2240 }
2241
2242 return "";
2243}
2244
8f90be4c 2245/* Check whether insn is a candidate for a conditional. */
4816b8e4 2246
8f90be4c 2247static cond_type
08903e08 2248is_cond_candidate (rtx insn)
8f90be4c
NC
2249{
2250 /* The only things we conditionalize are those that can be directly
2251 changed into a conditional. Only bother with SImode items. If
2252 we wanted to be a little more aggressive, we could also do other
4816b8e4 2253 modes such as DImode with reg-reg move or load 0. */
8f90be4c
NC
2254 if (GET_CODE (insn) == INSN)
2255 {
2256 rtx pat = PATTERN (insn);
2257 rtx src, dst;
2258
2259 if (GET_CODE (pat) != SET)
2260 return COND_NO;
2261
2262 dst = XEXP (pat, 0);
2263
2264 if ((GET_CODE (dst) != REG &&
2265 GET_CODE (dst) != SUBREG) ||
2266 GET_MODE (dst) != SImode)
2267 return COND_NO;
2268
2269 src = XEXP (pat, 1);
2270
2271 if ((GET_CODE (src) == REG ||
2272 (GET_CODE (src) == SUBREG &&
2273 GET_CODE (SUBREG_REG (src)) == REG)) &&
2274 GET_MODE (src) == SImode)
2275 return COND_MOV_INSN;
2276 else if (GET_CODE (src) == CONST_INT &&
2277 INTVAL (src) == 0)
2278 return COND_CLR_INSN;
2279 else if (GET_CODE (src) == PLUS &&
2280 (GET_CODE (XEXP (src, 0)) == REG ||
2281 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2282 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2283 GET_MODE (XEXP (src, 0)) == SImode &&
2284 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2285 INTVAL (XEXP (src, 1)) == 1)
2286 return COND_INC_INSN;
2287 else if (((GET_CODE (src) == MINUS &&
2288 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2289 INTVAL( XEXP (src, 1)) == 1) ||
2290 (GET_CODE (src) == PLUS &&
2291 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2292 INTVAL (XEXP (src, 1)) == -1)) &&
2293 (GET_CODE (XEXP (src, 0)) == REG ||
2294 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2295 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2296 GET_MODE (XEXP (src, 0)) == SImode)
2297 return COND_DEC_INSN;
2298
14bc6742 2299 /* Some insns that we don't bother with:
8f90be4c
NC
2300 (set (rx:DI) (ry:DI))
2301 (set (rx:DI) (const_int 0))
2302 */
2303
2304 }
2305 else if (GET_CODE (insn) == JUMP_INSN &&
2306 GET_CODE (PATTERN (insn)) == SET &&
2307 GET_CODE (XEXP (PATTERN (insn), 1)) == LABEL_REF)
2308 return COND_BRANCH_INSN;
2309
2310 return COND_NO;
2311}
2312
2313/* Emit a conditional version of insn and replace the old insn with the
2314 new one. Return the new insn if emitted. */
4816b8e4 2315
8f90be4c 2316static rtx
08903e08 2317emit_new_cond_insn (rtx insn, int cond)
8f90be4c
NC
2318{
2319 rtx c_insn = 0;
2320 rtx pat, dst, src;
2321 cond_type num;
2322
2323 if ((num = is_cond_candidate (insn)) == COND_NO)
2324 return NULL;
2325
2326 pat = PATTERN (insn);
2327
2328 if (GET_CODE (insn) == INSN)
2329 {
2330 dst = SET_DEST (pat);
2331 src = SET_SRC (pat);
2332 }
2333 else
cd4c46f3
KG
2334 {
2335 dst = JUMP_LABEL (insn);
2336 src = NULL_RTX;
2337 }
8f90be4c
NC
2338
2339 switch (num)
2340 {
2341 case COND_MOV_INSN:
2342 case COND_CLR_INSN:
2343 if (cond)
2344 c_insn = gen_movt0 (dst, src, dst);
2345 else
2346 c_insn = gen_movt0 (dst, dst, src);
2347 break;
2348
2349 case COND_INC_INSN:
2350 if (cond)
2351 c_insn = gen_incscc (dst, dst);
2352 else
2353 c_insn = gen_incscc_false (dst, dst);
2354 break;
2355
2356 case COND_DEC_INSN:
2357 if (cond)
2358 c_insn = gen_decscc (dst, dst);
2359 else
2360 c_insn = gen_decscc_false (dst, dst);
2361 break;
2362
2363 case COND_BRANCH_INSN:
2364 if (cond)
2365 c_insn = gen_branch_true (dst);
2366 else
2367 c_insn = gen_branch_false (dst);
2368 break;
2369
2370 default:
2371 return NULL;
2372 }
2373
2374 /* Only copy the notes if they exist. */
2375 if (rtx_length [GET_CODE (c_insn)] >= 7 && rtx_length [GET_CODE (insn)] >= 7)
2376 {
2377 /* We really don't need to bother with the notes and links at this
2378 point, but go ahead and save the notes. This will help is_dead()
2379 when applying peepholes (links don't matter since they are not
2380 used any more beyond this point for the mcore). */
2381 REG_NOTES (c_insn) = REG_NOTES (insn);
2382 }
2383
2384 if (num == COND_BRANCH_INSN)
2385 {
2386 /* For jumps, we need to be a little bit careful and emit the new jump
2387 before the old one and to update the use count for the target label.
2388 This way, the barrier following the old (uncond) jump will get
2389 deleted, but the label won't. */
2390 c_insn = emit_jump_insn_before (c_insn, insn);
2391
2392 ++ LABEL_NUSES (dst);
2393
2394 JUMP_LABEL (c_insn) = dst;
2395 }
2396 else
2397 c_insn = emit_insn_after (c_insn, insn);
2398
2399 delete_insn (insn);
2400
2401 return c_insn;
2402}
2403
2404/* Attempt to change a basic block into a series of conditional insns. This
2405 works by taking the branch at the end of the 1st block and scanning for the
2406 end of the 2nd block. If all instructions in the 2nd block have cond.
2407 versions and the label at the start of block 3 is the same as the target
2408 from the branch at block 1, then conditionalize all insn in block 2 using
2409 the inverse condition of the branch at block 1. (Note I'm bending the
2410 definition of basic block here.)
2411
2412 e.g., change:
2413
2414 bt L2 <-- end of block 1 (delete)
2415 mov r7,r8
2416 addu r7,1
2417 br L3 <-- end of block 2
2418
2419 L2: ... <-- start of block 3 (NUSES==1)
2420 L3: ...
2421
2422 to:
2423
2424 movf r7,r8
2425 incf r7
2426 bf L3
2427
2428 L3: ...
2429
2430 we can delete the L2 label if NUSES==1 and re-apply the optimization
2431 starting at the last instruction of block 2. This may allow an entire
4816b8e4 2432 if-then-else statement to be conditionalized. BRC */
8f90be4c 2433static rtx
08903e08 2434conditionalize_block (rtx first)
8f90be4c
NC
2435{
2436 rtx insn;
2437 rtx br_pat;
2438 rtx end_blk_1_br = 0;
2439 rtx end_blk_2_insn = 0;
2440 rtx start_blk_3_lab = 0;
2441 int cond;
2442 int br_lab_num;
2443 int blk_size = 0;
2444
2445
2446 /* Check that the first insn is a candidate conditional jump. This is
2447 the one that we'll eliminate. If not, advance to the next insn to
2448 try. */
2449 if (GET_CODE (first) != JUMP_INSN ||
2450 GET_CODE (PATTERN (first)) != SET ||
2451 GET_CODE (XEXP (PATTERN (first), 1)) != IF_THEN_ELSE)
2452 return NEXT_INSN (first);
2453
2454 /* Extract some information we need. */
2455 end_blk_1_br = first;
2456 br_pat = PATTERN (end_blk_1_br);
2457
2458 /* Complement the condition since we use the reverse cond. for the insns. */
2459 cond = (GET_CODE (XEXP (XEXP (br_pat, 1), 0)) == EQ);
2460
2461 /* Determine what kind of branch we have. */
2462 if (GET_CODE (XEXP (XEXP (br_pat, 1), 1)) == LABEL_REF)
2463 {
2464 /* A normal branch, so extract label out of first arm. */
2465 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 1), 0));
2466 }
2467 else
2468 {
2469 /* An inverse branch, so extract the label out of the 2nd arm
2470 and complement the condition. */
2471 cond = (cond == 0);
2472 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 2), 0));
2473 }
2474
2475 /* Scan forward for the start of block 2: it must start with a
2476 label and that label must be the same as the branch target
2477 label from block 1. We don't care about whether block 2 actually
2478 ends with a branch or a label (an uncond. branch is
2479 conditionalizable). */
2480 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
2481 {
2482 enum rtx_code code;
2483
2484 code = GET_CODE (insn);
2485
14bc6742 2486 /* Look for the label at the start of block 3. */
8f90be4c
NC
2487 if (code == CODE_LABEL && CODE_LABEL_NUMBER (insn) == br_lab_num)
2488 break;
2489
2490 /* Skip barriers, notes, and conditionalizable insns. If the
2491 insn is not conditionalizable or makes this optimization fail,
2492 just return the next insn so we can start over from that point. */
2493 if (code != BARRIER && code != NOTE && !is_cond_candidate (insn))
2494 return NEXT_INSN (insn);
2495
112cdef5 2496 /* Remember the last real insn before the label (i.e. end of block 2). */
8f90be4c
NC
2497 if (code == JUMP_INSN || code == INSN)
2498 {
2499 blk_size ++;
2500 end_blk_2_insn = insn;
2501 }
2502 }
2503
2504 if (!insn)
2505 return insn;
2506
2507 /* It is possible for this optimization to slow performance if the blocks
2508 are long. This really depends upon whether the branch is likely taken
2509 or not. If the branch is taken, we slow performance in many cases. But,
2510 if the branch is not taken, we always help performance (for a single
2511 block, but for a double block (i.e. when the optimization is re-applied)
2512 this is not true since the 'right thing' depends on the overall length of
2513 the collapsed block). As a compromise, don't apply this optimization on
2514 blocks larger than size 2 (unlikely for the mcore) when speed is important.
2515 the best threshold depends on the latencies of the instructions (i.e.,
2516 the branch penalty). */
2517 if (optimize > 1 && blk_size > 2)
2518 return insn;
2519
2520 /* At this point, we've found the start of block 3 and we know that
2521 it is the destination of the branch from block 1. Also, all
2522 instructions in the block 2 are conditionalizable. So, apply the
2523 conditionalization and delete the branch. */
2524 start_blk_3_lab = insn;
2525
2526 for (insn = NEXT_INSN (end_blk_1_br); insn != start_blk_3_lab;
2527 insn = NEXT_INSN (insn))
2528 {
2529 rtx newinsn;
2530
2531 if (INSN_DELETED_P (insn))
2532 continue;
2533
14bc6742 2534 /* Try to form a conditional variant of the instruction and emit it. */
8f90be4c
NC
2535 if ((newinsn = emit_new_cond_insn (insn, cond)))
2536 {
2537 if (end_blk_2_insn == insn)
2538 end_blk_2_insn = newinsn;
2539
2540 insn = newinsn;
2541 }
2542 }
2543
2544 /* Note whether we will delete the label starting blk 3 when the jump
2545 gets deleted. If so, we want to re-apply this optimization at the
2546 last real instruction right before the label. */
2547 if (LABEL_NUSES (start_blk_3_lab) == 1)
2548 {
2549 start_blk_3_lab = 0;
2550 }
2551
2552 /* ??? we probably should redistribute the death notes for this insn, esp.
2553 the death of cc, but it doesn't really matter this late in the game.
2554 The peepholes all use is_dead() which will find the correct death
2555 regardless of whether there is a note. */
2556 delete_insn (end_blk_1_br);
2557
2558 if (! start_blk_3_lab)
2559 return end_blk_2_insn;
2560
4816b8e4 2561 /* Return the insn right after the label at the start of block 3. */
8f90be4c
NC
2562 return NEXT_INSN (start_blk_3_lab);
2563}
2564
2565/* Apply the conditionalization of blocks optimization. This is the
2566 outer loop that traverses through the insns scanning for a branch
2567 that signifies an opportunity to apply the optimization. Note that
2568 this optimization is applied late. If we could apply it earlier,
2569 say before cse 2, it may expose more optimization opportunities.
2570 but, the pay back probably isn't really worth the effort (we'd have
2571 to update all reg/flow/notes/links/etc to make it work - and stick it
4816b8e4
NC
2572 in before cse 2). */
2573
8f90be4c 2574static void
08903e08 2575conditionalize_optimization (void)
8f90be4c
NC
2576{
2577 rtx insn;
2578
18dbd950 2579 for (insn = get_insns (); insn; insn = conditionalize_block (insn))
8f90be4c
NC
2580 continue;
2581}
2582
2583static int saved_warn_return_type = -1;
2584static int saved_warn_return_type_count = 0;
2585
18dbd950 2586/* This is to handle loads from the constant pool. */
4816b8e4 2587
18dbd950 2588static void
08903e08 2589mcore_reorg (void)
8f90be4c
NC
2590{
2591 /* Reset this variable. */
2592 current_function_anonymous_args = 0;
2593
4816b8e4 2594 /* Restore the warn_return_type if it has been altered. */
8f90be4c
NC
2595 if (saved_warn_return_type != -1)
2596 {
2597 /* Only restore the value if we have reached another function.
2598 The test of warn_return_type occurs in final_function () in
2599 c-decl.c a long time after the code for the function is generated,
2600 so we need a counter to tell us when we have finished parsing that
2601 function and can restore the flag. */
2602 if (--saved_warn_return_type_count == 0)
2603 {
2604 warn_return_type = saved_warn_return_type;
2605 saved_warn_return_type = -1;
2606 }
2607 }
2608
2609 if (optimize == 0)
2610 return;
2611
2612 /* Conditionalize blocks where we can. */
18dbd950 2613 conditionalize_optimization ();
8f90be4c
NC
2614
2615 /* Literal pool generation is now pushed off until the assembler. */
2616}
2617
2618\f
f0f4da32 2619/* Return true if X is something that can be moved directly into r15. */
8f90be4c 2620
f0f4da32 2621bool
08903e08 2622mcore_r15_operand_p (rtx x)
f0f4da32
RS
2623{
2624 switch (GET_CODE (x))
2625 {
2626 case CONST_INT:
2627 return mcore_const_ok_for_inline (INTVAL (x));
8f90be4c 2628
f0f4da32
RS
2629 case REG:
2630 case SUBREG:
2631 case MEM:
2632 return 1;
2633
2634 default:
2635 return 0;
2636 }
2637}
2638
0a2aaacc 2639/* Implement SECONDARY_RELOAD_CLASS. If RCLASS contains r15, and we can't
f0f4da32 2640 directly move X into it, use r1-r14 as a temporary. */
08903e08 2641
f0f4da32 2642enum reg_class
0a2aaacc 2643mcore_secondary_reload_class (enum reg_class rclass,
08903e08 2644 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
f0f4da32 2645{
0a2aaacc 2646 if (TEST_HARD_REG_BIT (reg_class_contents[rclass], 15)
f0f4da32
RS
2647 && !mcore_r15_operand_p (x))
2648 return LRW_REGS;
2649 return NO_REGS;
2650}
8f90be4c 2651
f0f4da32 2652/* Return the reg_class to use when reloading the rtx X into the class
0a2aaacc 2653 RCLASS. If X is too complex to move directly into r15, prefer to
f0f4da32 2654 use LRW_REGS instead. */
08903e08 2655
8f90be4c 2656enum reg_class
0a2aaacc 2657mcore_reload_class (rtx x, enum reg_class rclass)
8f90be4c 2658{
0a2aaacc 2659 if (reg_class_subset_p (LRW_REGS, rclass) && !mcore_r15_operand_p (x))
f0f4da32 2660 return LRW_REGS;
8f90be4c 2661
0a2aaacc 2662 return rclass;
8f90be4c
NC
2663}
2664
2665/* Tell me if a pair of reg/subreg rtx's actually refer to the same
2666 register. Note that the current version doesn't worry about whether
2667 they are the same mode or note (e.g., a QImode in r2 matches an HImode
2668 in r2 matches an SImode in r2. Might think in the future about whether
2669 we want to be able to say something about modes. */
08903e08 2670
8f90be4c 2671int
08903e08 2672mcore_is_same_reg (rtx x, rtx y)
8f90be4c 2673{
14bc6742 2674 /* Strip any and all of the subreg wrappers. */
8f90be4c
NC
2675 while (GET_CODE (x) == SUBREG)
2676 x = SUBREG_REG (x);
2677
2678 while (GET_CODE (y) == SUBREG)
2679 y = SUBREG_REG (y);
2680
2681 if (GET_CODE(x) == REG && GET_CODE(y) == REG && REGNO(x) == REGNO(y))
2682 return 1;
2683
2684 return 0;
2685}
2686
c5387660
JM
2687static void
2688mcore_option_override (void)
8f90be4c 2689{
8f90be4c
NC
2690 /* Only the m340 supports little endian code. */
2691 if (TARGET_LITTLE_END && ! TARGET_M340)
78fb8038 2692 target_flags |= MASK_M340;
8f90be4c 2693}
fac0f722 2694
8f90be4c 2695\f
8f90be4c
NC
2696/* Compute the number of word sized registers needed to
2697 hold a function argument of mode MODE and type TYPE. */
08903e08 2698
8f90be4c 2699int
586de218 2700mcore_num_arg_regs (enum machine_mode mode, const_tree type)
8f90be4c
NC
2701{
2702 int size;
2703
fe984136 2704 if (targetm.calls.must_pass_in_stack (mode, type))
8f90be4c
NC
2705 return 0;
2706
2707 if (type && mode == BLKmode)
2708 size = int_size_in_bytes (type);
2709 else
2710 size = GET_MODE_SIZE (mode);
2711
2712 return ROUND_ADVANCE (size);
2713}
2714
2715static rtx
586de218 2716handle_structs_in_regs (enum machine_mode mode, const_tree type, int reg)
8f90be4c
NC
2717{
2718 int size;
2719
696e78bf 2720 /* The MCore ABI defines that a structure whose size is not a whole multiple
8f90be4c
NC
2721 of bytes is passed packed into registers (or spilled onto the stack if
2722 not enough registers are available) with the last few bytes of the
2723 structure being packed, left-justified, into the last register/stack slot.
2724 GCC handles this correctly if the last word is in a stack slot, but we
2725 have to generate a special, PARALLEL RTX if the last word is in an
2726 argument register. */
2727 if (type
2728 && TYPE_MODE (type) == BLKmode
2729 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
2730 && (size = int_size_in_bytes (type)) > UNITS_PER_WORD
2731 && (size % UNITS_PER_WORD != 0)
2732 && (reg + mcore_num_arg_regs (mode, type) <= (FIRST_PARM_REG + NPARM_REGS)))
2733 {
2734 rtx arg_regs [NPARM_REGS];
2735 int nregs;
2736 rtx result;
2737 rtvec rtvec;
2738
2739 for (nregs = 0; size > 0; size -= UNITS_PER_WORD)
2740 {
2741 arg_regs [nregs] =
2742 gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, reg ++),
2743 GEN_INT (nregs * UNITS_PER_WORD));
2744 nregs ++;
2745 }
2746
2747 /* We assume here that NPARM_REGS == 6. The assert checks this. */
819bfe0e 2748 gcc_assert (ARRAY_SIZE (arg_regs) == 6);
8f90be4c
NC
2749 rtvec = gen_rtvec (nregs, arg_regs[0], arg_regs[1], arg_regs[2],
2750 arg_regs[3], arg_regs[4], arg_regs[5]);
2751
2752 result = gen_rtx_PARALLEL (mode, rtvec);
2753 return result;
2754 }
2755
2756 return gen_rtx_REG (mode, reg);
2757}
2758
2759rtx
cde0f3fd 2760mcore_function_value (const_tree valtype, const_tree func)
8f90be4c
NC
2761{
2762 enum machine_mode mode;
2763 int unsigned_p;
2764
2765 mode = TYPE_MODE (valtype);
2766
cde0f3fd 2767 /* Since we promote return types, we must promote the mode here too. */
71e0af3c 2768 mode = promote_function_mode (valtype, mode, &unsigned_p, func, 1);
8f90be4c
NC
2769
2770 return handle_structs_in_regs (mode, valtype, FIRST_RET_REG);
2771}
2772
2773/* Define where to put the arguments to a function.
2774 Value is zero to push the argument on the stack,
2775 or a hard register in which to store the argument.
2776
2777 MODE is the argument's machine mode.
2778 TYPE is the data type of the argument (as a tree).
2779 This is null for libcalls where that information may
2780 not be available.
2781 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2782 the preceding args and about the function being called.
2783 NAMED is nonzero if this argument is a named parameter
2784 (otherwise it is an extra parameter matching an ellipsis).
2785
2786 On MCore the first args are normally in registers
2787 and the rest are pushed. Any arg that starts within the first
2788 NPARM_REGS words is at least partially passed in a register unless
2789 its data type forbids. */
08903e08 2790
4665ac17 2791static rtx
d5cc9181 2792mcore_function_arg (cumulative_args_t cum, enum machine_mode mode,
4665ac17 2793 const_tree type, bool named)
8f90be4c
NC
2794{
2795 int arg_reg;
2796
88042663 2797 if (! named || mode == VOIDmode)
8f90be4c
NC
2798 return 0;
2799
fe984136 2800 if (targetm.calls.must_pass_in_stack (mode, type))
8f90be4c
NC
2801 return 0;
2802
d5cc9181 2803 arg_reg = ROUND_REG (*get_cumulative_args (cum), mode);
8f90be4c
NC
2804
2805 if (arg_reg < NPARM_REGS)
2806 return handle_structs_in_regs (mode, type, FIRST_PARM_REG + arg_reg);
2807
2808 return 0;
2809}
2810
4665ac17 2811static void
d5cc9181 2812mcore_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
4665ac17
NF
2813 const_tree type, bool named ATTRIBUTE_UNUSED)
2814{
d5cc9181
JR
2815 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
2816
4665ac17
NF
2817 *cum = (ROUND_REG (*cum, mode)
2818 + (int)named * mcore_num_arg_regs (mode, type));
2819}
2820
c2ed6cf8
NF
2821static unsigned int
2822mcore_function_arg_boundary (enum machine_mode mode,
2823 const_tree type ATTRIBUTE_UNUSED)
2824{
2825 /* Doubles must be aligned to an 8 byte boundary. */
2826 return (mode != BLKmode && GET_MODE_SIZE (mode) == 8
2827 ? BIGGEST_ALIGNMENT
2828 : PARM_BOUNDARY);
2829}
2830
78a52f11
RH
2831/* Returns the number of bytes of argument registers required to hold *part*
2832 of a parameter of machine mode MODE and type TYPE (which may be NULL if
dab66575 2833 the type is not known). If the argument fits entirely in the argument
8f90be4c
NC
2834 registers, or entirely on the stack, then 0 is returned. CUM is the
2835 number of argument registers already used by earlier parameters to
2836 the function. */
08903e08 2837
78a52f11 2838static int
d5cc9181 2839mcore_arg_partial_bytes (cumulative_args_t cum, enum machine_mode mode,
78a52f11 2840 tree type, bool named)
8f90be4c 2841{
d5cc9181 2842 int reg = ROUND_REG (*get_cumulative_args (cum), mode);
8f90be4c
NC
2843
2844 if (named == 0)
2845 return 0;
2846
fe984136 2847 if (targetm.calls.must_pass_in_stack (mode, type))
8f90be4c
NC
2848 return 0;
2849
2850 /* REG is not the *hardware* register number of the register that holds
2851 the argument, it is the *argument* register number. So for example,
2852 the first argument to a function goes in argument register 0, which
2853 translates (for the MCore) into hardware register 2. The second
2854 argument goes into argument register 1, which translates into hardware
2855 register 3, and so on. NPARM_REGS is the number of argument registers
2856 supported by the target, not the maximum hardware register number of
2857 the target. */
2858 if (reg >= NPARM_REGS)
2859 return 0;
2860
2861 /* If the argument fits entirely in registers, return 0. */
2862 if (reg + mcore_num_arg_regs (mode, type) <= NPARM_REGS)
2863 return 0;
2864
2865 /* The argument overflows the number of available argument registers.
2866 Compute how many argument registers have not yet been assigned to
2867 hold an argument. */
2868 reg = NPARM_REGS - reg;
2869
2870 /* Return partially in registers and partially on the stack. */
78a52f11 2871 return reg * UNITS_PER_WORD;
8f90be4c
NC
2872}
2873\f
a0ab749a 2874/* Return nonzero if SYMBOL is marked as being dllexport'd. */
08903e08 2875
8f90be4c 2876int
08903e08 2877mcore_dllexport_name_p (const char * symbol)
8f90be4c
NC
2878{
2879 return symbol[0] == '@' && symbol[1] == 'e' && symbol[2] == '.';
2880}
2881
a0ab749a 2882/* Return nonzero if SYMBOL is marked as being dllimport'd. */
08903e08 2883
8f90be4c 2884int
08903e08 2885mcore_dllimport_name_p (const char * symbol)
8f90be4c
NC
2886{
2887 return symbol[0] == '@' && symbol[1] == 'i' && symbol[2] == '.';
2888}
2889
2890/* Mark a DECL as being dllexport'd. */
08903e08 2891
8f90be4c 2892static void
08903e08 2893mcore_mark_dllexport (tree decl)
8f90be4c 2894{
cbd3488b 2895 const char * oldname;
8f90be4c
NC
2896 char * newname;
2897 rtx rtlname;
2898 tree idp;
2899
2900 rtlname = XEXP (DECL_RTL (decl), 0);
2901
6e1f65b5
NS
2902 if (GET_CODE (rtlname) == MEM)
2903 rtlname = XEXP (rtlname, 0);
2904 gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2905 oldname = XSTR (rtlname, 0);
8f90be4c
NC
2906
2907 if (mcore_dllexport_name_p (oldname))
2908 return; /* Already done. */
2909
5ead67f6 2910 newname = XALLOCAVEC (char, strlen (oldname) + 4);
8f90be4c
NC
2911 sprintf (newname, "@e.%s", oldname);
2912
2913 /* We pass newname through get_identifier to ensure it has a unique
2914 address. RTL processing can sometimes peek inside the symbol ref
2915 and compare the string's addresses to see if two symbols are
2916 identical. */
2917 /* ??? At least I think that's why we do this. */
2918 idp = get_identifier (newname);
2919
2920 XEXP (DECL_RTL (decl), 0) =
f1c25d3b 2921 gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
8f90be4c
NC
2922}
2923
2924/* Mark a DECL as being dllimport'd. */
08903e08 2925
8f90be4c 2926static void
08903e08 2927mcore_mark_dllimport (tree decl)
8f90be4c 2928{
cbd3488b 2929 const char * oldname;
8f90be4c
NC
2930 char * newname;
2931 tree idp;
2932 rtx rtlname;
2933 rtx newrtl;
2934
2935 rtlname = XEXP (DECL_RTL (decl), 0);
2936
6e1f65b5
NS
2937 if (GET_CODE (rtlname) == MEM)
2938 rtlname = XEXP (rtlname, 0);
2939 gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2940 oldname = XSTR (rtlname, 0);
8f90be4c 2941
6e1f65b5
NS
2942 gcc_assert (!mcore_dllexport_name_p (oldname));
2943 if (mcore_dllimport_name_p (oldname))
8f90be4c
NC
2944 return; /* Already done. */
2945
2946 /* ??? One can well ask why we're making these checks here,
2947 and that would be a good question. */
2948
2949 /* Imported variables can't be initialized. */
2950 if (TREE_CODE (decl) == VAR_DECL
2951 && !DECL_VIRTUAL_P (decl)
2952 && DECL_INITIAL (decl))
2953 {
dee15844 2954 error ("initialized variable %q+D is marked dllimport", decl);
8f90be4c
NC
2955 return;
2956 }
2957
2958 /* `extern' needn't be specified with dllimport.
2959 Specify `extern' now and hope for the best. Sigh. */
2960 if (TREE_CODE (decl) == VAR_DECL
2961 /* ??? Is this test for vtables needed? */
2962 && !DECL_VIRTUAL_P (decl))
2963 {
2964 DECL_EXTERNAL (decl) = 1;
2965 TREE_PUBLIC (decl) = 1;
2966 }
2967
5ead67f6 2968 newname = XALLOCAVEC (char, strlen (oldname) + 11);
8f90be4c
NC
2969 sprintf (newname, "@i.__imp_%s", oldname);
2970
2971 /* We pass newname through get_identifier to ensure it has a unique
2972 address. RTL processing can sometimes peek inside the symbol ref
2973 and compare the string's addresses to see if two symbols are
2974 identical. */
2975 /* ??? At least I think that's why we do this. */
2976 idp = get_identifier (newname);
2977
f1c25d3b
KH
2978 newrtl = gen_rtx_MEM (Pmode,
2979 gen_rtx_SYMBOL_REF (Pmode,
8f90be4c
NC
2980 IDENTIFIER_POINTER (idp)));
2981 XEXP (DECL_RTL (decl), 0) = newrtl;
2982}
2983
2984static int
08903e08 2985mcore_dllexport_p (tree decl)
8f90be4c
NC
2986{
2987 if ( TREE_CODE (decl) != VAR_DECL
2988 && TREE_CODE (decl) != FUNCTION_DECL)
2989 return 0;
2990
91d231cb 2991 return lookup_attribute ("dllexport", DECL_ATTRIBUTES (decl)) != 0;
8f90be4c
NC
2992}
2993
2994static int
08903e08 2995mcore_dllimport_p (tree decl)
8f90be4c
NC
2996{
2997 if ( TREE_CODE (decl) != VAR_DECL
2998 && TREE_CODE (decl) != FUNCTION_DECL)
2999 return 0;
3000
91d231cb 3001 return lookup_attribute ("dllimport", DECL_ATTRIBUTES (decl)) != 0;
8f90be4c
NC
3002}
3003
fb49053f 3004/* We must mark dll symbols specially. Definitions of dllexport'd objects
14bc6742 3005 install some info in the .drective (PE) or .exports (ELF) sections. */
fb49053f
RH
3006
3007static void
08903e08 3008mcore_encode_section_info (tree decl, rtx rtl ATTRIBUTE_UNUSED, int first ATTRIBUTE_UNUSED)
8f90be4c 3009{
8f90be4c
NC
3010 /* Mark the decl so we can tell from the rtl whether the object is
3011 dllexport'd or dllimport'd. */
3012 if (mcore_dllexport_p (decl))
3013 mcore_mark_dllexport (decl);
3014 else if (mcore_dllimport_p (decl))
3015 mcore_mark_dllimport (decl);
3016
3017 /* It might be that DECL has already been marked as dllimport, but
3018 a subsequent definition nullified that. The attribute is gone
3019 but DECL_RTL still has @i.__imp_foo. We need to remove that. */
3020 else if ((TREE_CODE (decl) == FUNCTION_DECL
3021 || TREE_CODE (decl) == VAR_DECL)
3022 && DECL_RTL (decl) != NULL_RTX
3023 && GET_CODE (DECL_RTL (decl)) == MEM
3024 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == MEM
3025 && GET_CODE (XEXP (XEXP (DECL_RTL (decl), 0), 0)) == SYMBOL_REF
3026 && mcore_dllimport_name_p (XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0)))
3027 {
3cce094d 3028 const char * oldname = XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0);
8f90be4c 3029 tree idp = get_identifier (oldname + 9);
f1c25d3b 3030 rtx newrtl = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
8f90be4c
NC
3031
3032 XEXP (DECL_RTL (decl), 0) = newrtl;
3033
3034 /* We previously set TREE_PUBLIC and DECL_EXTERNAL.
3035 ??? We leave these alone for now. */
3036 }
3037}
3038
772c5265
RH
3039/* Undo the effects of the above. */
3040
3041static const char *
08903e08 3042mcore_strip_name_encoding (const char * str)
772c5265
RH
3043{
3044 return str + (str[0] == '@' ? 3 : 0);
3045}
3046
8f90be4c
NC
3047/* MCore specific attribute support.
3048 dllexport - for exporting a function/variable that will live in a dll
3049 dllimport - for importing a function/variable from a dll
3050 naked - do not create a function prologue/epilogue. */
8f90be4c 3051
91d231cb
JM
3052/* Handle a "naked" attribute; arguments as in
3053 struct attribute_spec.handler. */
08903e08 3054
91d231cb 3055static tree
08903e08
SB
3056mcore_handle_naked_attribute (tree * node, tree name, tree args ATTRIBUTE_UNUSED,
3057 int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
91d231cb
JM
3058{
3059 if (TREE_CODE (*node) == FUNCTION_DECL)
8f90be4c
NC
3060 {
3061 /* PR14310 - don't complain about lack of return statement
3062 in naked functions. The solution here is a gross hack
3063 but this is the only way to solve the problem without
3064 adding a new feature to GCC. I did try submitting a patch
3065 that would add such a new feature, but it was (rightfully)
3066 rejected on the grounds that it was creeping featurism,
3067 so hence this code. */
3068 if (warn_return_type)
3069 {
3070 saved_warn_return_type = warn_return_type;
3071 warn_return_type = 0;
3072 saved_warn_return_type_count = 2;
3073 }
3074 else if (saved_warn_return_type_count)
3075 saved_warn_return_type_count = 2;
91d231cb
JM
3076 }
3077 else
3078 {
29d08eba
JM
3079 warning (OPT_Wattributes, "%qE attribute only applies to functions",
3080 name);
91d231cb 3081 *no_add_attrs = true;
8f90be4c
NC
3082 }
3083
91d231cb 3084 return NULL_TREE;
8f90be4c
NC
3085}
3086
ae46c4e0
RH
3087/* ??? It looks like this is PE specific? Oh well, this is what the
3088 old code did as well. */
8f90be4c 3089
ae46c4e0 3090static void
08903e08 3091mcore_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
8f90be4c
NC
3092{
3093 int len;
0139adca 3094 const char * name;
8f90be4c 3095 char * string;
f27cd94d 3096 const char * prefix;
8f90be4c
NC
3097
3098 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
3099
3100 /* Strip off any encoding in name. */
772c5265 3101 name = (* targetm.strip_name_encoding) (name);
8f90be4c
NC
3102
3103 /* The object is put in, for example, section .text$foo.
3104 The linker will then ultimately place them in .text
3105 (everything from the $ on is stripped). */
3106 if (TREE_CODE (decl) == FUNCTION_DECL)
3107 prefix = ".text$";
f710504c 3108 /* For compatibility with EPOC, we ignore the fact that the
8f90be4c 3109 section might have relocs against it. */
4e4d733e 3110 else if (decl_readonly_section (decl, 0))
8f90be4c
NC
3111 prefix = ".rdata$";
3112 else
3113 prefix = ".data$";
3114
3115 len = strlen (name) + strlen (prefix);
5ead67f6 3116 string = XALLOCAVEC (char, len + 1);
8f90be4c
NC
3117
3118 sprintf (string, "%s%s", prefix, name);
3119
3120 DECL_SECTION_NAME (decl) = build_string (len, string);
3121}
3122
3123int
08903e08 3124mcore_naked_function_p (void)
8f90be4c 3125{
91d231cb 3126 return lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl)) != NULL_TREE;
8f90be4c 3127}
7c262518 3128
ede75ee8 3129#ifdef OBJECT_FORMAT_ELF
7c262518 3130static void
c18a5b6c
MM
3131mcore_asm_named_section (const char *name,
3132 unsigned int flags ATTRIBUTE_UNUSED,
3133 tree decl ATTRIBUTE_UNUSED)
7c262518
RH
3134{
3135 fprintf (asm_out_file, "\t.section %s\n", name);
3136}
ede75ee8 3137#endif /* OBJECT_FORMAT_ELF */
09a2b93a 3138
dc7efe6e
KH
3139/* Worker function for TARGET_ASM_EXTERNAL_LIBCALL. */
3140
09a2b93a
KH
3141static void
3142mcore_external_libcall (rtx fun)
3143{
3144 fprintf (asm_out_file, "\t.import\t");
3145 assemble_name (asm_out_file, XSTR (fun, 0));
3146 fprintf (asm_out_file, "\n");
3147}
3148
dc7efe6e
KH
3149/* Worker function for TARGET_RETURN_IN_MEMORY. */
3150
09a2b93a 3151static bool
586de218 3152mcore_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
09a2b93a 3153{
586de218 3154 const HOST_WIDE_INT size = int_size_in_bytes (type);
78bc94a2 3155 return (size == -1 || size > 2 * UNITS_PER_WORD);
09a2b93a 3156}
71e0af3c
RH
3157
3158/* Worker function for TARGET_ASM_TRAMPOLINE_TEMPLATE.
3159 Output assembler code for a block containing the constant parts
3160 of a trampoline, leaving space for the variable parts.
3161
3162 On the MCore, the trampoline looks like:
3163 lrw r1, function
3164 lrw r13, area
3165 jmp r13
3166 or r0, r0
3167 .literals */
3168
3169static void
3170mcore_asm_trampoline_template (FILE *f)
3171{
3172 fprintf (f, "\t.short 0x7102\n");
3173 fprintf (f, "\t.short 0x7d02\n");
3174 fprintf (f, "\t.short 0x00cd\n");
3175 fprintf (f, "\t.short 0x1e00\n");
3176 fprintf (f, "\t.long 0\n");
3177 fprintf (f, "\t.long 0\n");
3178}
3179
3180/* Worker function for TARGET_TRAMPOLINE_INIT. */
3181
3182static void
3183mcore_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
3184{
3185 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
3186 rtx mem;
3187
3188 emit_block_move (m_tramp, assemble_trampoline_template (),
3189 GEN_INT (2*UNITS_PER_WORD), BLOCK_OP_NORMAL);
3190
3191 mem = adjust_address (m_tramp, SImode, 8);
3192 emit_move_insn (mem, chain_value);
3193 mem = adjust_address (m_tramp, SImode, 12);
3194 emit_move_insn (mem, fnaddr);
3195}
1a627b35
RS
3196
3197/* Implement TARGET_LEGITIMATE_CONSTANT_P
3198
3199 On the MCore, allow anything but a double. */
3200
3201static bool
3202mcore_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
3203{
3204 return GET_CODE (x) != CONST_DOUBLE;
3205}